aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-06-17 12:52:15 +0200
committerIngo Molnar <mingo@elte.hu>2009-06-17 12:56:49 +0200
commiteadb8a091b27a840de7450f84ecff5ef13476424 (patch)
tree58c3782d40def63baa8167f3d31e3048cb4c7660 /drivers
parent73874005cd8800440be4299bd095387fff4b90ac (diff)
parent65795efbd380a832ae508b04dba8f8e53f0b84d9 (diff)
Merge branch 'linus' into tracing/hw-breakpoints
Conflicts: arch/x86/Kconfig arch/x86/kernel/traps.c arch/x86/power/cpu.c arch/x86/power/cpu_32.c kernel/Makefile Semantic conflict: arch/x86/kernel/hw_breakpoint.c Merge reason: Resolve the conflicts, move from put_cpu_no_sched() to put_cpu() in arch/x86/kernel/hw_breakpoint.c. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/acpica/Makefile27
-rw-r--r--drivers/acpi/acpica/acevents.h2
-rw-r--r--drivers/acpi/acpica/acglobal.h3
-rw-r--r--drivers/acpi/acpica/aclocal.h11
-rw-r--r--drivers/acpi/acpica/acnamesp.h13
-rw-r--r--drivers/acpi/acpica/amlcode.h2
-rw-r--r--drivers/acpi/acpica/dsobject.c5
-rw-r--r--drivers/acpi/acpica/dsopcode.c17
-rw-r--r--drivers/acpi/acpica/dswstate.c4
-rw-r--r--drivers/acpi/acpica/evregion.c12
-rw-r--r--drivers/acpi/acpica/evxfevnt.c4
-rw-r--r--drivers/acpi/acpica/exconfig.c125
-rw-r--r--drivers/acpi/acpica/excreate.c2
-rw-r--r--drivers/acpi/acpica/exdump.c6
-rw-r--r--drivers/acpi/acpica/exfldio.c20
-rw-r--r--drivers/acpi/acpica/exmutex.c45
-rw-r--r--drivers/acpi/acpica/exstore.c4
-rw-r--r--drivers/acpi/acpica/hwregs.c4
-rw-r--r--drivers/acpi/acpica/nsalloc.c14
-rw-r--r--drivers/acpi/acpica/nsnames.c2
-rw-r--r--drivers/acpi/acpica/nsobject.c9
-rw-r--r--drivers/acpi/acpica/nspredef.c7
-rw-r--r--drivers/acpi/acpica/nssearch.c4
-rw-r--r--drivers/acpi/acpica/nswalk.c69
-rw-r--r--drivers/acpi/acpica/nsxfname.c150
-rw-r--r--drivers/acpi/acpica/nsxfobj.c9
-rw-r--r--drivers/acpi/acpica/rscalc.c5
-rw-r--r--drivers/acpi/acpica/rsxface.c8
-rw-r--r--drivers/acpi/acpica/tbfadt.c16
-rw-r--r--drivers/acpi/acpica/tbinstal.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c23
-rw-r--r--drivers/acpi/acpica/utdebug.c8
-rw-r--r--drivers/acpi/acpica/utdelete.c21
-rw-r--r--drivers/acpi/acpica/utmisc.c20
-rw-r--r--drivers/acpi/acpica/utmutex.c26
-rw-r--r--drivers/acpi/bus.c2
-rw-r--r--drivers/acpi/pci_bind.c24
-rw-r--r--drivers/acpi/pci_irq.c5
-rw-r--r--drivers/acpi/processor_core.c2
-rw-r--r--drivers/acpi/processor_idle.c50
-rw-r--r--drivers/acpi/processor_perflib.c12
-rw-r--r--drivers/acpi/processor_throttling.c25
-rw-r--r--drivers/acpi/video.c56
-rw-r--r--drivers/ata/Kconfig9
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c159
-rw-r--r--drivers/ata/ata_piix.c20
-rw-r--r--drivers/ata/libata-acpi.c4
-rw-r--r--drivers/ata/libata-core.c11
-rw-r--r--drivers/ata/libata-eh.c2
-rw-r--r--drivers/ata/libata-scsi.c2
-rw-r--r--drivers/ata/libata-sff.c20
-rw-r--r--drivers/ata/pata_ali.c17
-rw-r--r--drivers/ata/pata_efar.c17
-rw-r--r--drivers/ata/pata_legacy.c2
-rw-r--r--drivers/ata/pata_netcell.c13
-rw-r--r--drivers/ata/pata_palmld.c150
-rw-r--r--drivers/ata/sata_nv.c131
-rw-r--r--drivers/ata/sata_sil.c2
-rw-r--r--drivers/ata/sata_sx4.c11
-rw-r--r--drivers/base/bus.c6
-rw-r--r--drivers/base/core.c62
-rw-r--r--drivers/base/dd.c6
-rw-r--r--drivers/base/driver.c4
-rw-r--r--drivers/base/firmware_class.c156
-rw-r--r--drivers/base/node.c4
-rw-r--r--drivers/base/platform.c43
-rw-r--r--drivers/base/power/main.c98
-rw-r--r--drivers/base/sys.c24
-rw-r--r--drivers/block/DAC960.c10
-rw-r--r--drivers/block/Kconfig6
-rw-r--r--drivers/block/amiflop.c54
-rw-r--r--drivers/block/aoe/aoechr.c7
-rw-r--r--drivers/block/aoe/aoecmd.c7
-rw-r--r--drivers/block/ataflop.c66
-rw-r--r--drivers/block/brd.c7
-rw-r--r--drivers/block/cciss.c928
-rw-r--r--drivers/block/cciss.h34
-rw-r--r--drivers/block/cciss_cmd.h2
-rw-r--r--drivers/block/cciss_scsi.c109
-rw-r--r--drivers/block/cpqarray.c20
-rw-r--r--drivers/block/floppy.c85
-rw-r--r--drivers/block/hd.c106
-rw-r--r--drivers/block/loop.c37
-rw-r--r--drivers/block/mg_disk.c509
-rw-r--r--drivers/block/nbd.c23
-rw-r--r--drivers/block/paride/pcd.c29
-rw-r--r--drivers/block/paride/pd.c22
-rw-r--r--drivers/block/paride/pf.c47
-rw-r--r--drivers/block/pktcdvd.c17
-rw-r--r--drivers/block/ps3disk.c42
-rw-r--r--drivers/block/ps3vram.c168
-rw-r--r--drivers/block/sunvdc.c14
-rw-r--r--drivers/block/swim.c48
-rw-r--r--drivers/block/swim3.c107
-rw-r--r--drivers/block/sx8.c17
-rw-r--r--drivers/block/ub.c54
-rw-r--r--drivers/block/viodasd.c12
-rw-r--r--drivers/block/virtio_blk.c120
-rw-r--r--drivers/block/xd.c41
-rw-r--r--drivers/block/xen-blkfront.c58
-rw-r--r--drivers/block/xsysace.c46
-rw-r--r--drivers/block/z2ram.c19
-rw-r--r--drivers/bluetooth/dtl1_cs.c2
-rw-r--r--drivers/bluetooth/hci_ldisc.c5
-rw-r--r--drivers/bluetooth/hci_vhci.c90
-rw-r--r--drivers/cdrom/cdrom.c4
-rw-r--r--drivers/cdrom/gdrom.c36
-rw-r--r--drivers/cdrom/viocd.c35
-rw-r--r--drivers/char/Kconfig17
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/agp/intel-agp.c16
-rw-r--r--drivers/char/amiserial.c2
-rw-r--r--drivers/char/bfin_jtag_comm.c365
-rw-r--r--drivers/char/cyclades.c290
-rw-r--r--drivers/char/epca.c17
-rw-r--r--drivers/char/hpet.c4
-rw-r--r--drivers/char/hvc_iucv.c204
-rw-r--r--drivers/char/hvcs.c6
-rw-r--r--drivers/char/hw_random/Kconfig14
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/core.c1
-rw-r--r--drivers/char/hw_random/mxc-rnga.c247
-rw-r--r--drivers/char/hw_random/omap-rng.c2
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c26
-rw-r--r--drivers/char/hw_random/via-rng.c15
-rw-r--r--drivers/char/hw_random/virtio-rng.c30
-rw-r--r--drivers/char/ip2/i2lib.c4
-rw-r--r--drivers/char/ip2/ip2main.c4
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c13
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c4
-rw-r--r--drivers/char/isicom.c19
-rw-r--r--drivers/char/istallion.c8
-rw-r--r--drivers/char/mem.c2
-rw-r--r--drivers/char/misc.c15
-rw-r--r--drivers/char/moxa.c5
-rw-r--r--drivers/char/mxser.c14
-rw-r--r--drivers/char/n_hdlc.c4
-rw-r--r--drivers/char/n_tty.c29
-rw-r--r--drivers/char/pcmcia/synclink_cs.c11
-rw-r--r--drivers/char/ps3flash.c296
-rw-r--r--drivers/char/pty.c56
-rw-r--r--drivers/char/random.c2
-rw-r--r--drivers/char/raw.c8
-rw-r--r--drivers/char/rocket.c19
-rw-r--r--drivers/char/selection.c2
-rw-r--r--drivers/char/stallion.c6
-rw-r--r--drivers/char/synclink.c9
-rw-r--r--drivers/char/synclink_gt.c86
-rw-r--r--drivers/char/synclinkmp.c9
-rw-r--r--drivers/char/sysrq.c2
-rw-r--r--drivers/char/tpm/tpm_bios.c3
-rw-r--r--drivers/char/tty_audit.c10
-rw-r--r--drivers/char/tty_io.c124
-rw-r--r--drivers/char/tty_ioctl.c91
-rw-r--r--drivers/char/tty_ldisc.c556
-rw-r--r--drivers/char/tty_port.c47
-rw-r--r--drivers/char/viotape.c2
-rw-r--r--drivers/char/virtio_console.c26
-rw-r--r--drivers/char/vt.c21
-rw-r--r--drivers/clocksource/Makefile2
-rw-r--r--drivers/clocksource/acpi_pm.c1
-rw-r--r--drivers/clocksource/sh_cmt.c116
-rw-r--r--drivers/clocksource/sh_mtu2.c357
-rw-r--r--drivers/clocksource/sh_tmu.c461
-rw-r--r--drivers/connector/Kconfig2
-rw-r--r--drivers/cpufreq/cpufreq.c6
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c5
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c5
-rw-r--r--drivers/crypto/Kconfig8
-rw-r--r--drivers/crypto/hifn_795x.c8
-rw-r--r--drivers/crypto/ixp4xx_crypto.c33
-rw-r--r--drivers/crypto/padlock-aes.c15
-rw-r--r--drivers/crypto/talitos.c713
-rw-r--r--drivers/dma/fsldma.c71
-rw-r--r--drivers/dma/ioat_dma.c2
-rw-r--r--drivers/edac/Kconfig34
-rw-r--r--drivers/edac/Makefile9
-rw-r--r--drivers/edac/amd64_edac.c3354
-rw-r--r--drivers/edac/amd64_edac.h644
-rw-r--r--drivers/edac/amd64_edac_dbg.c255
-rw-r--r--drivers/edac/amd64_edac_err_types.c161
-rw-r--r--drivers/edac/amd64_edac_inj.c185
-rw-r--r--drivers/edac/amd8111_edac.c4
-rw-r--r--drivers/edac/amd8131_edac.c2
-rw-r--r--drivers/edac/e752x_edac.c2
-rw-r--r--drivers/edac/edac_core.h9
-rw-r--r--drivers/eisa/eisa.ids5
-rw-r--r--drivers/eisa/pci_eisa.c2
-rw-r--r--drivers/eisa/virtual_root.c2
-rw-r--r--drivers/firewire/Makefile8
-rw-r--r--drivers/firewire/core-card.c (renamed from drivers/firewire/fw-card.c)25
-rw-r--r--drivers/firewire/core-cdev.c (renamed from drivers/firewire/fw-cdev.c)13
-rw-r--r--drivers/firewire/core-device.c (renamed from drivers/firewire/fw-device.c)154
-rw-r--r--drivers/firewire/core-iso.c (renamed from drivers/firewire/fw-iso.c)6
-rw-r--r--drivers/firewire/core-topology.c (renamed from drivers/firewire/fw-topology.c)24
-rw-r--r--drivers/firewire/core-transaction.c (renamed from drivers/firewire/fw-transaction.c)42
-rw-r--r--drivers/firewire/core.h293
-rw-r--r--drivers/firewire/fw-device.h202
-rw-r--r--drivers/firewire/fw-topology.h77
-rw-r--r--drivers/firewire/fw-transaction.h446
-rw-r--r--drivers/firewire/ohci.c (renamed from drivers/firewire/fw-ohci.c)19
-rw-r--r--drivers/firewire/ohci.h (renamed from drivers/firewire/fw-ohci.h)6
-rw-r--r--drivers/firewire/sbp2.c (renamed from drivers/firewire/fw-sbp2.c)64
-rw-r--r--drivers/firmware/dmi_scan.c1
-rw-r--r--drivers/firmware/memmap.c16
-rw-r--r--drivers/gpio/Kconfig2
-rw-r--r--drivers/gpu/drm/Kconfig21
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/drm_bufs.c11
-rw-r--r--drivers/gpu/drm/drm_crtc.c7
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c109
-rw-r--r--drivers/gpu/drm/drm_debugfs.c12
-rw-r--r--drivers/gpu/drm/drm_drv.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c79
-rw-r--r--drivers/gpu/drm/drm_gem.c2
-rw-r--r--drivers/gpu/drm/drm_hashtab.c4
-rw-r--r--drivers/gpu/drm/drm_irq.c8
-rw-r--r--drivers/gpu/drm/drm_mm.c169
-rw-r--r--drivers/gpu/drm/drm_modes.c18
-rw-r--r--drivers/gpu/drm/drm_stub.c19
-rw-r--r--drivers/gpu/drm/drm_sysfs.c14
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c79
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h51
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c221
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c166
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c190
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h636
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c20
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c188
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h118
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c231
-rw-r--r--drivers/gpu/drm/i915/intel_display.c671
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c1
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c34
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c34
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c163
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c248
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c4
-rw-r--r--drivers/gpu/drm/radeon/Kconfig34
-rw-r--r--drivers/gpu/drm/radeon/Makefile12
-rw-r--r--drivers/gpu/drm/radeon/ObjectID.h578
-rw-r--r--drivers/gpu/drm/radeon/atom-bits.h48
-rw-r--r--drivers/gpu/drm/radeon/atom-names.h100
-rw-r--r--drivers/gpu/drm/radeon/atom-types.h42
-rw-r--r--drivers/gpu/drm/radeon/atom.c1215
-rw-r--r--drivers/gpu/drm/radeon/atom.h149
-rw-r--r--drivers/gpu/drm/radeon/atombios.h4785
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c695
-rw-r--r--drivers/gpu/drm/radeon/r100.c1524
-rw-r--r--drivers/gpu/drm/radeon/r300.c1116
-rw-r--r--drivers/gpu/drm/radeon/r300_reg.h52
-rw-r--r--drivers/gpu/drm/radeon/r420.c223
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h749
-rw-r--r--drivers/gpu/drm/radeon/r520.c234
-rw-r--r--drivers/gpu/drm/radeon/r600.c169
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c42
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h114
-rw-r--r--drivers/gpu/drm/radeon/radeon.h793
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c249
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h405
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c1298
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c133
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c390
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c833
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c2481
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c603
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c249
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c252
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c813
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c692
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c217
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c1708
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c825
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c387
-rw-r--r--drivers/gpu/drm/radeon/radeon_fixed.h50
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c233
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c287
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c209
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c158
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c295
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c1276
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c1288
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h398
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c511
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h45
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h3570
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c485
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c653
-rw-r--r--drivers/gpu/drm/radeon/rs400.c411
-rw-r--r--drivers/gpu/drm/radeon/rs600.c324
-rw-r--r--drivers/gpu/drm/radeon/rs690.c181
-rw-r--r--drivers/gpu/drm/radeon/rs780.c102
-rw-r--r--drivers/gpu/drm/radeon/rv515.c504
-rw-r--r--drivers/gpu/drm/radeon/rv770.c124
-rw-r--r--drivers/gpu/drm/ttm/Makefile8
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c150
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c1698
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c561
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c454
-rw-r--r--drivers/gpu/drm/ttm/ttm_global.c114
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c234
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c50
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c635
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c6
-rw-r--r--drivers/hid/Kconfig92
-rw-r--r--drivers/hid/Makefile10
-rw-r--r--drivers/hid/hid-apple.c4
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-debug.c23
-rw-r--r--drivers/hid/hid-drff.c8
-rw-r--r--drivers/hid/hid-gaff.c8
-rw-r--r--drivers/hid/hid-ids.h2
-rw-r--r--drivers/hid/hid-lgff.c10
-rw-r--r--drivers/hid/hid-ntrig.c222
-rw-r--r--drivers/hid/hid-sjoy.c180
-rw-r--r--drivers/hid/hid-tmff.c17
-rw-r--r--drivers/hid/hid-wacom.c259
-rw-r--r--drivers/hid/hid-zpff.c7
-rw-r--r--drivers/hid/hidraw.c5
-rw-r--r--drivers/hid/usbhid/hid-core.c7
-rw-r--r--drivers/hid/usbhid/hiddev.c7
-rw-r--r--drivers/hwmon/Kconfig18
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/f71882fg.c250
-rw-r--r--drivers/hwmon/hp_accel.c20
-rw-r--r--drivers/hwmon/hwmon.c29
-rw-r--r--drivers/hwmon/ibmaem.c1
-rw-r--r--drivers/hwmon/lis3lv02d.c187
-rw-r--r--drivers/hwmon/lis3lv02d.h29
-rw-r--r--drivers/hwmon/lis3lv02d_spi.c1
-rw-r--r--drivers/hwmon/lm78.c2
-rw-r--r--drivers/hwmon/max6650.c86
-rw-r--r--drivers/hwmon/sht15.c10
-rw-r--r--drivers/hwmon/tmp401.c690
-rw-r--r--drivers/hwmon/w83627ehf.c10
-rw-r--r--drivers/i2c/busses/Kconfig21
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-at91.c8
-rw-r--r--drivers/i2c/busses/i2c-au1550.c2
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c62
-rw-r--r--drivers/i2c/busses/i2c-highlander.c2
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c6
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c2
-rw-r--r--drivers/i2c/busses/i2c-ocores.c13
-rw-r--r--drivers/i2c/busses/i2c-omap.c41
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c10
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c8
-rw-r--r--drivers/i2c/busses/i2c-pxa.c31
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c52
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c2
-rw-r--r--drivers/i2c/busses/i2c-stu300.c1029
-rw-r--r--drivers/i2c/busses/i2c-versatile.c6
-rw-r--r--drivers/i2c/busses/i2c-viapro.c4
-rw-r--r--drivers/i2c/busses/i2c-voodoo3.c1
-rw-r--r--drivers/i2c/chips/Kconfig15
-rw-r--r--drivers/i2c/chips/Makefile1
-rw-r--r--drivers/i2c/i2c-core.c43
-rw-r--r--drivers/ide/Kconfig2
-rw-r--r--drivers/ide/alim15x3.c10
-rw-r--r--drivers/ide/at91_ide.c7
-rw-r--r--drivers/ide/au1xxx-ide.c8
-rw-r--r--drivers/ide/buddha.c9
-rw-r--r--drivers/ide/cmd640.c7
-rw-r--r--drivers/ide/cs5520.c4
-rw-r--r--drivers/ide/delkin_cb.c6
-rw-r--r--drivers/ide/falconide.c9
-rw-r--r--drivers/ide/gayle.c9
-rw-r--r--drivers/ide/hpt366.c25
-rw-r--r--drivers/ide/icside.c77
-rw-r--r--drivers/ide/ide-4drives.c6
-rw-r--r--drivers/ide/ide-atapi.c187
-rw-r--r--drivers/ide/ide-cd.c152
-rw-r--r--drivers/ide/ide-cd.h4
-rw-r--r--drivers/ide/ide-cs.c6
-rw-r--r--drivers/ide/ide-disk.c86
-rw-r--r--drivers/ide/ide-dma.c23
-rw-r--r--drivers/ide/ide-eh.c14
-rw-r--r--drivers/ide/ide-floppy.c32
-rw-r--r--drivers/ide/ide-gd.c14
-rw-r--r--drivers/ide/ide-generic.c7
-rw-r--r--drivers/ide/ide-h8300.c10
-rw-r--r--drivers/ide/ide-io.c138
-rw-r--r--drivers/ide/ide-ioctls.c1
-rw-r--r--drivers/ide/ide-iops.c47
-rw-r--r--drivers/ide/ide-legacy.c7
-rw-r--r--drivers/ide/ide-lib.c29
-rw-r--r--drivers/ide/ide-park.c7
-rw-r--r--drivers/ide/ide-pci-generic.c11
-rw-r--r--drivers/ide/ide-pm.c44
-rw-r--r--drivers/ide/ide-pnp.c6
-rw-r--r--drivers/ide/ide-probe.c108
-rw-r--r--drivers/ide/ide-tape.c828
-rw-r--r--drivers/ide/ide-taskfile.c23
-rw-r--r--drivers/ide/ide.c10
-rw-r--r--drivers/ide/ide_platform.c11
-rw-r--r--drivers/ide/macide.c9
-rw-r--r--drivers/ide/palm_bk3710.c6
-rw-r--r--drivers/ide/pdc202xx_new.c26
-rw-r--r--drivers/ide/pdc202xx_old.c106
-rw-r--r--drivers/ide/piix.c1
-rw-r--r--drivers/ide/pmac.c13
-rw-r--r--drivers/ide/q40ide.c11
-rw-r--r--drivers/ide/rapide.c8
-rw-r--r--drivers/ide/scc_pata.c6
-rw-r--r--drivers/ide/setup-pci.c85
-rw-r--r--drivers/ide/sgiioc4.c7
-rw-r--r--drivers/ide/siimage.c4
-rw-r--r--drivers/ide/sl82c105.c9
-rw-r--r--drivers/ide/tc86c001.c2
-rw-r--r--drivers/ide/tx4938ide.c5
-rw-r--r--drivers/ide/tx4939ide.c7
-rw-r--r--drivers/ide/via82cxxx.c2
-rw-r--r--drivers/idle/i7300_idle.c6
-rw-r--r--drivers/ieee1394/csr1212.c2
-rw-r--r--drivers/ieee1394/dv1394.c5
-rw-r--r--drivers/ieee1394/eth1394.c16
-rw-r--r--drivers/ieee1394/ieee1394_core.h6
-rw-r--r--drivers/ieee1394/nodemgr.c5
-rw-r--r--drivers/ieee1394/sbp2.c8
-rw-r--r--drivers/ieee802154/Kconfig22
-rw-r--r--drivers/ieee802154/Makefile3
-rw-r--r--drivers/ieee802154/fakehard.c270
-rw-r--r--drivers/infiniband/core/sysfs.c2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_cq.c4
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c32
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes_pSeries.h28
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c9
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c10
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c112
-rw-r--r--drivers/infiniband/hw/ehca/hcp_if.c6
-rw-r--r--drivers/infiniband/hw/ehca/hcp_if.h2
-rw-r--r--drivers/infiniband/hw/ehca/hcp_phyp.c11
-rw-r--r--drivers/infiniband/hw/ehca/hcp_phyp.h2
-rw-r--r--drivers/infiniband/hw/ehca/ipz_pt_fn.c19
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c17
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c16
-rw-r--r--drivers/infiniband/hw/mthca/mthca_profile.c4
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c14
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c10
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c31
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c10
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c6
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c10
-rw-r--r--drivers/input/input.c7
-rw-r--r--drivers/input/joystick/analog.c2
-rw-r--r--drivers/input/misc/Kconfig2
-rw-r--r--drivers/input/misc/pcspkr.c1
-rw-r--r--drivers/input/serio/Kconfig2
-rw-r--r--drivers/input/serio/ambakmi.c4
-rw-r--r--drivers/input/serio/libps2.c2
-rw-r--r--drivers/input/touchscreen/ucb1400_ts.c2
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c2
-rw-r--r--drivers/input/xen-kbdfront.c8
-rw-r--r--drivers/isdn/Kconfig2
-rw-r--r--drivers/isdn/capi/capiutil.c67
-rw-r--r--drivers/isdn/capi/kcapi.c8
-rw-r--r--drivers/isdn/divert/isdn_divert.c2
-rw-r--r--drivers/isdn/gigaset/Kconfig8
-rw-r--r--drivers/isdn/gigaset/asyncdata.c5
-rw-r--r--drivers/isdn/gigaset/common.c12
-rw-r--r--drivers/isdn/gigaset/ev-layer.c4
-rw-r--r--drivers/isdn/gigaset/gigaset.h5
-rw-r--r--drivers/isdn/gigaset/i4l.c12
-rw-r--r--drivers/isdn/gigaset/interface.c3
-rw-r--r--drivers/isdn/gigaset/isocdata.c6
-rw-r--r--drivers/isdn/gigaset/proc.c2
-rw-r--r--drivers/isdn/gigaset/usb-gigaset.c62
-rw-r--r--drivers/isdn/hardware/avm/b1.c2
-rw-r--r--drivers/isdn/hardware/avm/b1dma.c2
-rw-r--r--drivers/isdn/hardware/avm/c4.c4
-rw-r--r--drivers/isdn/hardware/avm/t1isa.c2
-rw-r--r--drivers/isdn/hardware/mISDN/Kconfig11
-rw-r--r--drivers/isdn/hardware/mISDN/hfc_multi.h47
-rw-r--r--drivers/isdn/hardware/mISDN/hfc_multi_8xx.h167
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c614
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c105
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c4
-rw-r--r--drivers/isdn/hisax/hfc_pci.c41
-rw-r--r--drivers/isdn/hisax/hisax.h2
-rw-r--r--drivers/isdn/hysdn/hycapi.c4
-rw-r--r--drivers/isdn/i4l/Kconfig2
-rw-r--r--drivers/isdn/i4l/isdn_net.c6
-rw-r--r--drivers/isdn/i4l/isdn_tty.c2
-rw-r--r--drivers/isdn/mISDN/core.c8
-rw-r--r--drivers/isdn/mISDN/dsp.h19
-rw-r--r--drivers/isdn/mISDN/dsp_audio.c5
-rw-r--r--drivers/isdn/mISDN/dsp_cmx.c115
-rw-r--r--drivers/isdn/mISDN/dsp_core.c74
-rw-r--r--drivers/isdn/mISDN/dsp_dtmf.c3
-rw-r--r--drivers/isdn/mISDN/dsp_ecdis.h2
-rw-r--r--drivers/isdn/mISDN/dsp_pipeline.c16
-rw-r--r--drivers/isdn/mISDN/dsp_tones.c23
-rw-r--r--drivers/isdn/mISDN/hwchannel.c4
-rw-r--r--drivers/isdn/mISDN/l1oip.h2
-rw-r--r--drivers/isdn/mISDN/l1oip_codec.c1
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c71
-rw-r--r--drivers/isdn/mISDN/layer2.c37
-rw-r--r--drivers/isdn/mISDN/layer2.h2
-rw-r--r--drivers/isdn/mISDN/socket.c45
-rw-r--r--drivers/isdn/mISDN/tei.c102
-rw-r--r--drivers/isdn/mISDN/timerdev.c2
-rw-r--r--drivers/leds/leds-h1940.c2
-rw-r--r--drivers/leds/leds-s3c24xx.c1
-rw-r--r--drivers/lguest/Kconfig2
-rw-r--r--drivers/lguest/core.c30
-rw-r--r--drivers/lguest/hypercalls.c14
-rw-r--r--drivers/lguest/interrupts_and_traps.c57
-rw-r--r--drivers/lguest/lg.h28
-rw-r--r--drivers/lguest/lguest_device.c41
-rw-r--r--drivers/lguest/lguest_user.c127
-rw-r--r--drivers/lguest/page_tables.c396
-rw-r--r--drivers/lguest/segments.c2
-rw-r--r--drivers/lguest/x86/core.c19
-rw-r--r--drivers/macintosh/therm_adt746x.c88
-rw-r--r--drivers/macintosh/therm_pm72.c95
-rw-r--r--drivers/macintosh/therm_windtunnel.c126
-rw-r--r--drivers/macintosh/windfarm_lm75_sensor.c129
-rw-r--r--drivers/macintosh/windfarm_max6690_sensor.c103
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c109
-rw-r--r--drivers/md/bitmap.c17
-rw-r--r--drivers/md/dm-exception-store.c2
-rw-r--r--drivers/md/dm-ioctl.c1
-rw-r--r--drivers/md/dm-log.c3
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/md/dm-table.c38
-rw-r--r--drivers/md/dm.c6
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/md.c33
-rw-r--r--drivers/md/multipath.c4
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c4
-rw-r--r--drivers/md/raid10.c8
-rw-r--r--drivers/md/raid5.c38
-rw-r--r--drivers/media/Kconfig10
-rw-r--r--drivers/media/common/tuners/tuner-simple.c44
-rw-r--r--drivers/media/common/tuners/tuner-types.c59
-rw-r--r--drivers/media/common/tuners/tuner-xc2028.c58
-rw-r--r--drivers/media/common/tuners/xc5000.c264
-rw-r--r--drivers/media/dvb/b2c2/flexcop-common.h8
-rw-r--r--drivers/media/dvb/b2c2/flexcop-fe-tuner.c790
-rw-r--r--drivers/media/dvb/b2c2/flexcop-i2c.c2
-rw-r--r--drivers/media/dvb/b2c2/flexcop-misc.c20
-rw-r--r--drivers/media/dvb/bt8xx/bt878.c8
-rw-r--r--drivers/media/dvb/dm1105/dm1105.c121
-rw-r--r--drivers/media/dvb/dvb-core/dmxdev.c14
-rw-r--r--drivers/media/dvb/dvb-core/dvb_demux.c42
-rw-r--r--drivers/media/dvb/dvb-core/dvb_demux.h4
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c2
-rw-r--r--drivers/media/dvb/dvb-core/dvbdev.c10
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig3
-rw-r--r--drivers/media/dvb/dvb-usb/af9015.c94
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c31
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb-common.c7
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-ids.h8
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb.h4
-rw-r--r--drivers/media/dvb/dvb-usb/dw2102.c325
-rw-r--r--drivers/media/dvb/dvb-usb/dw2102.h1
-rw-r--r--drivers/media/dvb/dvb-usb/gp8psk.c8
-rw-r--r--drivers/media/dvb/firewire/firedtv-1394.c4
-rw-r--r--drivers/media/dvb/firewire/firedtv-dvb.c2
-rw-r--r--drivers/media/dvb/firewire/firedtv-rc.c4
-rw-r--r--drivers/media/dvb/frontends/Kconfig22
-rw-r--r--drivers/media/dvb/frontends/Makefile4
-rw-r--r--drivers/media/dvb/frontends/af9013.c2
-rw-r--r--drivers/media/dvb/frontends/au8522_dig.c98
-rw-r--r--drivers/media/dvb/frontends/cx24116.c2
-rw-r--r--drivers/media/dvb/frontends/drx397xD.c4
-rw-r--r--drivers/media/dvb/frontends/isl6423.c308
-rw-r--r--drivers/media/dvb/frontends/isl6423.h63
-rw-r--r--drivers/media/dvb/frontends/lgdt3305.c17
-rw-r--r--drivers/media/dvb/frontends/lgs8gxx.c10
-rw-r--r--drivers/media/dvb/frontends/lnbp21.c2
-rw-r--r--drivers/media/dvb/frontends/mt312.c2
-rw-r--r--drivers/media/dvb/frontends/nxt200x.c6
-rw-r--r--drivers/media/dvb/frontends/or51132.c2
-rw-r--r--drivers/media/dvb/frontends/stv0900_priv.h2
-rw-r--r--drivers/media/dvb/frontends/stv090x.c4299
-rw-r--r--drivers/media/dvb/frontends/stv090x.h106
-rw-r--r--drivers/media/dvb/frontends/stv090x_priv.h269
-rw-r--r--drivers/media/dvb/frontends/stv090x_reg.h2373
-rw-r--r--drivers/media/dvb/frontends/stv6110x.c373
-rw-r--r--drivers/media/dvb/frontends/stv6110x.h71
-rw-r--r--drivers/media/dvb/frontends/stv6110x_priv.h75
-rw-r--r--drivers/media/dvb/frontends/stv6110x_reg.h82
-rw-r--r--drivers/media/dvb/frontends/tda10048.c312
-rw-r--r--drivers/media/dvb/frontends/tda10048.h21
-rw-r--r--drivers/media/dvb/siano/Makefile2
-rw-r--r--drivers/media/dvb/siano/sms-cards.c188
-rw-r--r--drivers/media/dvb/siano/sms-cards.h64
-rw-r--r--drivers/media/dvb/siano/smscoreapi.c468
-rw-r--r--drivers/media/dvb/siano/smscoreapi.h488
-rw-r--r--drivers/media/dvb/siano/smsdvb.c372
-rw-r--r--drivers/media/dvb/siano/smsendian.c102
-rw-r--r--drivers/media/dvb/siano/smsendian.h32
-rw-r--r--drivers/media/dvb/siano/smsir.c301
-rw-r--r--drivers/media/dvb/siano/smsir.h93
-rw-r--r--drivers/media/dvb/siano/smssdio.c357
-rw-r--r--drivers/media/dvb/siano/smsusb.c75
-rw-r--r--drivers/media/dvb/ttpci/av7110_av.c124
-rw-r--r--drivers/media/dvb/ttpci/av7110_hw.c2
-rw-r--r--drivers/media/dvb/ttpci/av7110_v4l.c2
-rw-r--r--drivers/media/dvb/ttpci/budget-av.c2
-rw-r--r--drivers/media/dvb/ttpci/budget.c85
-rw-r--r--drivers/media/radio/dsbr100.c109
-rw-r--r--drivers/media/radio/radio-mr800.c1
-rw-r--r--drivers/media/radio/radio-sf16fmi.c16
-rw-r--r--drivers/media/radio/radio-sf16fmr2.c22
-rw-r--r--drivers/media/radio/radio-si470x.c1
-rw-r--r--drivers/media/video/Kconfig24
-rw-r--r--drivers/media/video/Makefile79
-rw-r--r--drivers/media/video/adv7343.c534
-rw-r--r--drivers/media/video/adv7343_regs.h185
-rw-r--r--drivers/media/video/au0828/au0828-cards.c4
-rw-r--r--drivers/media/video/au0828/au0828-core.c17
-rw-r--r--drivers/media/video/au0828/au0828-video.c8
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c14
-rw-r--r--drivers/media/video/bt8xx/bttv-i2c.c21
-rw-r--r--drivers/media/video/cpia2/cpia2_v4l.c6
-rw-r--r--drivers/media/video/cx18/cx18-audio.c44
-rw-r--r--drivers/media/video/cx18/cx18-av-core.c374
-rw-r--r--drivers/media/video/cx18/cx18-av-firmware.c82
-rw-r--r--drivers/media/video/cx18/cx18-av-vbi.c4
-rw-r--r--drivers/media/video/cx18/cx18-cards.c63
-rw-r--r--drivers/media/video/cx18/cx18-controls.c6
-rw-r--r--drivers/media/video/cx18/cx18-driver.c100
-rw-r--r--drivers/media/video/cx18/cx18-driver.h22
-rw-r--r--drivers/media/video/cx18/cx18-dvb.c54
-rw-r--r--drivers/media/video/cx18/cx18-fileops.c7
-rw-r--r--drivers/media/video/cx18/cx18-mailbox.c114
-rw-r--r--drivers/media/video/cx18/cx18-mailbox.h2
-rw-r--r--drivers/media/video/cx18/cx18-queue.c85
-rw-r--r--drivers/media/video/cx18/cx18-streams.c44
-rw-r--r--drivers/media/video/cx18/cx18-streams.h20
-rw-r--r--drivers/media/video/cx18/cx18-version.h2
-rw-r--r--drivers/media/video/cx231xx/cx231xx-avcore.c1
-rw-r--r--drivers/media/video/cx231xx/cx231xx-cards.c8
-rw-r--r--drivers/media/video/cx231xx/cx231xx-i2c.c32
-rw-r--r--drivers/media/video/cx231xx/cx231xx-input.c2
-rw-r--r--drivers/media/video/cx231xx/cx231xx-vbi.c1
-rw-r--r--drivers/media/video/cx231xx/cx231xx.h2
-rw-r--r--drivers/media/video/cx23885/cimax2.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-417.c1
-rw-r--r--drivers/media/video/cx23885/cx23885-cards.c121
-rw-r--r--drivers/media/video/cx23885/cx23885-core.c92
-rw-r--r--drivers/media/video/cx23885/cx23885-dvb.c123
-rw-r--r--drivers/media/video/cx23885/cx23885-i2c.c12
-rw-r--r--drivers/media/video/cx23885/cx23885-video.c14
-rw-r--r--drivers/media/video/cx23885/cx23885.h21
-rw-r--r--drivers/media/video/cx88/Makefile2
-rw-r--r--drivers/media/video/cx88/cx88-alsa.c7
-rw-r--r--drivers/media/video/cx88/cx88-cards.c108
-rw-r--r--drivers/media/video/cx88/cx88-core.c27
-rw-r--r--drivers/media/video/cx88/cx88-dsp.c312
-rw-r--r--drivers/media/video/cx88/cx88-dvb.c1
-rw-r--r--drivers/media/video/cx88/cx88-i2c.c13
-rw-r--r--drivers/media/video/cx88/cx88-input.c6
-rw-r--r--drivers/media/video/cx88/cx88-tvaudio.c115
-rw-r--r--drivers/media/video/cx88/cx88-video.c16
-rw-r--r--drivers/media/video/cx88/cx88.h12
-rw-r--r--drivers/media/video/dabusb.c6
-rw-r--r--drivers/media/video/em28xx/em28xx-audio.c5
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c222
-rw-r--r--drivers/media/video/em28xx/em28xx-core.c58
-rw-r--r--drivers/media/video/em28xx/em28xx-dvb.c21
-rw-r--r--drivers/media/video/em28xx/em28xx-i2c.c25
-rw-r--r--drivers/media/video/em28xx/em28xx-input.c8
-rw-r--r--drivers/media/video/em28xx/em28xx-reg.h16
-rw-r--r--drivers/media/video/em28xx/em28xx.h9
-rw-r--r--drivers/media/video/gspca/finepix.c1
-rw-r--r--drivers/media/video/gspca/gspca.c199
-rw-r--r--drivers/media/video/gspca/gspca.h6
-rw-r--r--drivers/media/video/gspca/m5602/Makefile3
-rw-r--r--drivers/media/video/gspca/m5602/m5602_bridge.h26
-rw-r--r--drivers/media/video/gspca/m5602/m5602_core.c44
-rw-r--r--drivers/media/video/gspca/m5602/m5602_mt9m111.c400
-rw-r--r--drivers/media/video/gspca/m5602/m5602_mt9m111.h805
-rw-r--r--drivers/media/video/gspca/m5602/m5602_ov7660.c227
-rw-r--r--drivers/media/video/gspca/m5602/m5602_ov7660.h279
-rw-r--r--drivers/media/video/gspca/m5602/m5602_ov9650.c222
-rw-r--r--drivers/media/video/gspca/m5602/m5602_ov9650.h57
-rw-r--r--drivers/media/video/gspca/m5602/m5602_po1030.c494
-rw-r--r--drivers/media/video/gspca/m5602/m5602_po1030.h439
-rw-r--r--drivers/media/video/gspca/m5602/m5602_s5k4aa.c391
-rw-r--r--drivers/media/video/gspca/m5602/m5602_s5k4aa.h93
-rw-r--r--drivers/media/video/gspca/m5602/m5602_s5k83a.c473
-rw-r--r--drivers/media/video/gspca/m5602/m5602_s5k83a.h280
-rw-r--r--drivers/media/video/gspca/m5602/m5602_sensor.h9
-rw-r--r--drivers/media/video/gspca/mr97310a.c8
-rw-r--r--drivers/media/video/gspca/ov519.c520
-rw-r--r--drivers/media/video/gspca/ov534.c277
-rw-r--r--drivers/media/video/gspca/sonixb.c2
-rw-r--r--drivers/media/video/gspca/sonixj.c66
-rw-r--r--drivers/media/video/gspca/spca500.c33
-rw-r--r--drivers/media/video/gspca/spca505.c14
-rw-r--r--drivers/media/video/gspca/spca508.c1934
-rw-r--r--drivers/media/video/gspca/spca561.c105
-rw-r--r--drivers/media/video/gspca/sq905.c1
-rw-r--r--drivers/media/video/gspca/sq905c.c1
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx.c2
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c76
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h10
-rw-r--r--drivers/media/video/gspca/sunplus.c33
-rw-r--r--drivers/media/video/gspca/t613.c2
-rw-r--r--drivers/media/video/gspca/vc032x.c22
-rw-r--r--drivers/media/video/gspca/zc3xx.c22
-rw-r--r--drivers/media/video/hdpvr/hdpvr-video.c2
-rw-r--r--drivers/media/video/hexium_gemini.c2
-rw-r--r--drivers/media/video/hexium_orion.c2
-rw-r--r--drivers/media/video/ir-kbd-i2c.c222
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.c9
-rw-r--r--drivers/media/video/ivtv/ivtv-i2c.c36
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c2
-rw-r--r--drivers/media/video/ivtv/ivtv-queue.c3
-rw-r--r--drivers/media/video/mt9m001.c108
-rw-r--r--drivers/media/video/mt9m111.c73
-rw-r--r--drivers/media/video/mt9t031.c135
-rw-r--r--drivers/media/video/mt9v022.c138
-rw-r--r--drivers/media/video/mx1_camera.c50
-rw-r--r--drivers/media/video/mx3_camera.c46
-rw-r--r--drivers/media/video/mxb.c4
-rw-r--r--drivers/media/video/ov511.c45
-rw-r--r--drivers/media/video/ov511.h3
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-devattr.c6
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-devattr.h23
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h3
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.c74
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-i2c-core.c51
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-sysfs.c22
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-v4l2.c6
-rw-r--r--drivers/media/video/pwc/pwc-if.c6
-rw-r--r--drivers/media/video/pwc/pwc-v4l.c2
-rw-r--r--drivers/media/video/pxa_camera.c126
-rw-r--r--drivers/media/video/s2255drv.c110
-rw-r--r--drivers/media/video/saa7134/Kconfig1
-rw-r--r--drivers/media/video/saa7134/Makefile3
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c450
-rw-r--r--drivers/media/video/saa7134/saa7134-core.c18
-rw-r--r--drivers/media/video/saa7134/saa7134-dvb.c26
-rw-r--r--drivers/media/video/saa7134/saa7134-empress.c14
-rw-r--r--drivers/media/video/saa7134/saa7134-i2c.c33
-rw-r--r--drivers/media/video/saa7134/saa7134-input.c118
-rw-r--r--drivers/media/video/saa7134/saa7134-ts.c122
-rw-r--r--drivers/media/video/saa7134/saa7134-video.c10
-rw-r--r--drivers/media/video/saa7134/saa7134.h29
-rw-r--r--drivers/media/video/se401.c882
-rw-r--r--drivers/media/video/se401.h7
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c27
-rw-r--r--drivers/media/video/soc_camera.c106
-rw-r--r--drivers/media/video/stk-webcam.c4
-rw-r--r--drivers/media/video/tda7432.c14
-rw-r--r--drivers/media/video/tea6415c.c1
-rw-r--r--drivers/media/video/tea6420.c1
-rw-r--r--drivers/media/video/ths7303.c151
-rw-r--r--drivers/media/video/tuner-core.c33
-rw-r--r--drivers/media/video/tveeprom.c6
-rw-r--r--drivers/media/video/tvp514x.c2
-rw-r--r--drivers/media/video/usbvideo/konicawc.c4
-rw-r--r--drivers/media/video/usbvideo/quickcam_messenger.c4
-rw-r--r--drivers/media/video/usbvision/usbvision-core.c14
-rw-r--r--drivers/media/video/usbvision/usbvision-video.c4
-rw-r--r--drivers/media/video/uvc/uvc_ctrl.c35
-rw-r--r--drivers/media/video/uvc/uvc_driver.c68
-rw-r--r--drivers/media/video/uvc/uvc_queue.c14
-rw-r--r--drivers/media/video/uvc/uvc_status.c21
-rw-r--r--drivers/media/video/uvc/uvc_v4l2.c39
-rw-r--r--drivers/media/video/uvc/uvc_video.c17
-rw-r--r--drivers/media/video/uvc/uvcvideo.h5
-rw-r--r--drivers/media/video/v4l2-common.c4
-rw-r--r--drivers/media/video/v4l2-device.c31
-rw-r--r--drivers/media/video/videobuf-core.c6
-rw-r--r--drivers/media/video/videobuf-dma-contig.c108
-rw-r--r--drivers/media/video/videobuf-dma-sg.c19
-rw-r--r--drivers/media/video/vino.c6
-rw-r--r--drivers/media/video/zoran/zoran_card.c4
-rw-r--r--drivers/media/video/zr364xx.c6
-rw-r--r--drivers/memstick/core/mspro_block.c19
-rw-r--r--drivers/message/fusion/lsi/mpi_history.txt6
-rw-r--r--drivers/message/fusion/mptbase.c1570
-rw-r--r--drivers/message/fusion/mptbase.h180
-rw-r--r--drivers/message/fusion/mptctl.c692
-rw-r--r--drivers/message/fusion/mptdebug.h3
-rw-r--r--drivers/message/fusion/mptfc.c15
-rw-r--r--drivers/message/fusion/mptlan.c4
-rw-r--r--drivers/message/fusion/mptsas.c3136
-rw-r--r--drivers/message/fusion/mptsas.h41
-rw-r--r--drivers/message/fusion/mptscsih.c1329
-rw-r--r--drivers/message/fusion/mptscsih.h7
-rw-r--r--drivers/message/fusion/mptspi.c71
-rw-r--r--drivers/message/i2o/i2o_block.c43
-rw-r--r--drivers/mfd/htc-pasic3.c4
-rw-r--r--drivers/mfd/pcf50633-core.c4
-rw-r--r--drivers/mfd/t7l66xb.c5
-rw-r--r--drivers/mfd/tc6387xb.c5
-rw-r--r--drivers/mfd/tc6393xb.c5
-rw-r--r--drivers/mfd/wm8350-core.c8
-rw-r--r--drivers/mfd/wm8400-core.c2
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/c2port/core.c2
-rw-r--r--drivers/misc/cb710/Kconfig25
-rw-r--r--drivers/misc/cb710/Makefile8
-rw-r--r--drivers/misc/cb710/core.c357
-rw-r--r--drivers/misc/cb710/debug.c119
-rw-r--r--drivers/misc/cb710/sgbuf2.c150
-rw-r--r--drivers/misc/eeprom/Kconfig14
-rw-r--r--drivers/misc/eeprom/Makefile1
-rw-r--r--drivers/misc/eeprom/max6875.c (renamed from drivers/i2c/chips/max6875.c)2
-rw-r--r--drivers/misc/enclosure.c6
-rw-r--r--drivers/misc/sgi-gru/grufile.c2
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c2
-rw-r--r--drivers/misc/sgi-xp/xpnet.c3
-rw-r--r--drivers/mmc/card/block.c28
-rw-r--r--drivers/mmc/card/queue.c11
-rw-r--r--drivers/mmc/core/core.c107
-rw-r--r--drivers/mmc/host/Kconfig30
-rw-r--r--drivers/mmc/host/Makefile5
-rw-r--r--drivers/mmc/host/atmel-mci-regs.h33
-rw-r--r--drivers/mmc/host/atmel-mci.c12
-rw-r--r--drivers/mmc/host/cb710-mmc.c804
-rw-r--r--drivers/mmc/host/cb710-mmc.h104
-rw-r--r--drivers/mmc/host/mmc_spi.c23
-rw-r--r--drivers/mmc/host/mmci.c4
-rw-r--r--drivers/mmc/host/mvsdio.c35
-rw-r--r--drivers/mmc/host/mxcmmc.c45
-rw-r--r--drivers/mmc/host/omap.c5
-rw-r--r--drivers/mmc/host/omap_hsmmc.c8
-rw-r--r--drivers/mmc/host/pxamci.c46
-rw-r--r--drivers/mmc/host/s3cmci.c5
-rw-r--r--drivers/mmc/host/sdhci-of.c9
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c168
-rw-r--r--drivers/mmc/host/sdhci.c58
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mmc/host/tmio_mmc.c180
-rw-r--r--drivers/mmc/host/tmio_mmc.h77
-rw-r--r--drivers/mtd/Kconfig3
-rw-r--r--drivers/mtd/devices/Kconfig2
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c2
-rw-r--r--drivers/mtd/mtd_blkdevs.c43
-rw-r--r--drivers/mtd/nand/Kconfig4
-rw-r--r--drivers/mtd/nand/davinci_nand.c7
-rw-r--r--drivers/mtd/nand/mxc_nand.c43
-rw-r--r--drivers/mtd/onenand/omap2.c1
-rw-r--r--drivers/net/3c501.c65
-rw-r--r--drivers/net/3c503.c36
-rw-r--r--drivers/net/3c505.c217
-rw-r--r--drivers/net/3c507.c55
-rw-r--r--drivers/net/3c509.c80
-rw-r--r--drivers/net/3c515.c126
-rw-r--r--drivers/net/3c523.c91
-rw-r--r--drivers/net/3c527.c48
-rw-r--r--drivers/net/3c59x.c216
-rw-r--r--drivers/net/7990.c2
-rw-r--r--drivers/net/8139cp.c34
-rw-r--r--drivers/net/8139too.c210
-rw-r--r--drivers/net/82596.c14
-rw-r--r--drivers/net/8390.c10
-rw-r--r--drivers/net/8390p.c19
-rw-r--r--drivers/net/Kconfig76
-rw-r--r--drivers/net/Makefile9
-rw-r--r--drivers/net/a2065.c2
-rw-r--r--drivers/net/acenic.c1
-rw-r--r--drivers/net/appletalk/ipddp.c43
-rw-r--r--drivers/net/appletalk/ltpc.c2
-rw-r--r--drivers/net/arm/at91_ether.c2
-rw-r--r--drivers/net/arm/ep93xx_eth.c4
-rw-r--r--drivers/net/arm/ether3.c2
-rw-r--r--drivers/net/arm/ixp4xx_eth.c34
-rw-r--r--drivers/net/atl1c/atl1c_ethtool.c2
-rw-r--r--drivers/net/atl1c/atl1c_main.c73
-rw-r--r--drivers/net/atl1e/atl1e.h1
-rw-r--r--drivers/net/atl1e/atl1e_main.c16
-rw-r--r--drivers/net/atlx/atl1.c16
-rw-r--r--drivers/net/atlx/atlx.h6
-rw-r--r--drivers/net/au1000_eth.c2
-rw-r--r--drivers/net/b44.c4
-rw-r--r--drivers/net/b44.h3
-rw-r--r--drivers/net/benet/be.h14
-rw-r--r--drivers/net/benet/be_main.c169
-rw-r--r--drivers/net/bfin_mac.c264
-rw-r--r--drivers/net/bmac.c16
-rw-r--r--drivers/net/bnx2.c237
-rw-r--r--drivers/net/bnx2.h20
-rw-r--r--drivers/net/bnx2x.h15
-rw-r--r--drivers/net/bnx2x_fw_file_hdr.h37
-rw-r--r--drivers/net/bnx2x_init.h605
-rw-r--r--drivers/net/bnx2x_init_ops.h442
-rw-r--r--drivers/net/bnx2x_init_values.h16322
-rw-r--r--drivers/net/bnx2x_main.c359
-rw-r--r--drivers/net/bonding/bond_3ad.c16
-rw-r--r--drivers/net/bonding/bond_3ad.h4
-rw-r--r--drivers/net/bonding/bond_main.c826
-rw-r--r--drivers/net/bonding/bond_sysfs.c463
-rw-r--r--drivers/net/bonding/bonding.h10
-rw-r--r--drivers/net/can/Kconfig62
-rw-r--r--drivers/net/can/Makefile7
-rw-r--r--drivers/net/can/dev.c657
-rw-r--r--drivers/net/can/sja1000/Makefile11
-rw-r--r--drivers/net/can/sja1000/ems_pci.c320
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c412
-rw-r--r--drivers/net/can/sja1000/sja1000.c637
-rw-r--r--drivers/net/can/sja1000/sja1000.h181
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c235
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c165
-rw-r--r--drivers/net/cassini.c2
-rw-r--r--drivers/net/chelsio/common.h2
-rw-r--r--drivers/net/chelsio/cphy.h51
-rw-r--r--drivers/net/chelsio/cxgb2.c36
-rw-r--r--drivers/net/chelsio/mv88e1xxx.c5
-rw-r--r--drivers/net/chelsio/mv88x201x.c50
-rw-r--r--drivers/net/chelsio/my3126.c14
-rw-r--r--drivers/net/chelsio/sge.c5
-rw-r--r--drivers/net/chelsio/subr.c46
-rw-r--r--drivers/net/cnic.c2717
-rw-r--r--drivers/net/cnic.h299
-rw-r--r--drivers/net/cnic_defs.h580
-rw-r--r--drivers/net/cnic_if.h299
-rw-r--r--drivers/net/cpmac.c33
-rw-r--r--drivers/net/cs89x0.c2
-rw-r--r--drivers/net/cxgb3/Makefile2
-rw-r--r--drivers/net/cxgb3/adapter.h8
-rw-r--r--drivers/net/cxgb3/ael1002.c958
-rw-r--r--drivers/net/cxgb3/aq100x.c355
-rw-r--r--drivers/net/cxgb3/common.h67
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c160
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c27
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.h3
-rw-r--r--drivers/net/cxgb3/sge.c82
-rw-r--r--drivers/net/cxgb3/t3_hw.c94
-rw-r--r--drivers/net/cxgb3/version.h4
-rw-r--r--drivers/net/cxgb3/vsc8211.c70
-rw-r--r--drivers/net/davinci_emac.c2830
-rw-r--r--drivers/net/de600.c25
-rw-r--r--drivers/net/de620.c63
-rw-r--r--drivers/net/declance.c5
-rw-r--r--drivers/net/defxx.c2
-rw-r--r--drivers/net/depca.c8
-rw-r--r--drivers/net/dl2k.c8
-rw-r--r--drivers/net/dm9000.c32
-rw-r--r--drivers/net/e100.c203
-rw-r--r--drivers/net/e1000/e1000_main.c49
-rw-r--r--drivers/net/e1000e/82571.c99
-rw-r--r--drivers/net/e1000e/defines.h25
-rw-r--r--drivers/net/e1000e/e1000.h63
-rw-r--r--drivers/net/e1000e/es2lan.c3
-rw-r--r--drivers/net/e1000e/ethtool.c46
-rw-r--r--drivers/net/e1000e/hw.h20
-rw-r--r--drivers/net/e1000e/ich8lan.c448
-rw-r--r--drivers/net/e1000e/lib.c38
-rw-r--r--drivers/net/e1000e/netdev.c280
-rw-r--r--drivers/net/e1000e/param.c2
-rw-r--r--drivers/net/e1000e/phy.c699
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c8
-rw-r--r--drivers/net/enic/enic_main.c2
-rw-r--r--drivers/net/eql.c1
-rw-r--r--drivers/net/ethoc.c6
-rw-r--r--drivers/net/ewrk3.c2
-rw-r--r--drivers/net/fec.c901
-rw-r--r--drivers/net/fec.h127
-rw-r--r--drivers/net/fec_mpc52xx.c180
-rw-r--r--drivers/net/fec_mpc52xx_phy.c26
-rw-r--r--drivers/net/forcedeth.c255
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c108
-rw-r--r--drivers/net/fs_enet/fs_enet.h5
-rw-r--r--drivers/net/fs_enet/mac-fec.c34
-rw-r--r--drivers/net/fs_enet/mii-bitbang.c29
-rw-r--r--drivers/net/fs_enet/mii-fec.c32
-rw-r--r--drivers/net/fsl_pq_mdio.c59
-rw-r--r--drivers/net/gianfar.c123
-rw-r--r--drivers/net/gianfar.h6
-rw-r--r--drivers/net/hamachi.c5
-rw-r--r--drivers/net/hamradio/baycom_epp.c2
-rw-r--r--drivers/net/hamradio/bpqether.c4
-rw-r--r--drivers/net/hamradio/hdlcdrv.c2
-rw-r--r--drivers/net/hamradio/mkiss.c4
-rw-r--r--drivers/net/hp100.c4
-rw-r--r--drivers/net/hplance.c21
-rw-r--r--drivers/net/ibm_newemac/core.c2
-rw-r--r--drivers/net/ibmlana.c5
-rw-r--r--drivers/net/ibmveth.c40
-rw-r--r--drivers/net/ifb.c1
-rw-r--r--drivers/net/igb/e1000_82575.h1
-rw-r--r--drivers/net/igb/e1000_defines.h3
-rw-r--r--drivers/net/igb/e1000_mbx.c8
-rw-r--r--drivers/net/igb/e1000_phy.h2
-rw-r--r--drivers/net/igb/e1000_regs.h1
-rw-r--r--drivers/net/igb/igb.h15
-rw-r--r--drivers/net/igb/igb_ethtool.c37
-rw-r--r--drivers/net/igb/igb_main.c148
-rw-r--r--drivers/net/igbvf/ethtool.c36
-rw-r--r--drivers/net/igbvf/igbvf.h8
-rw-r--r--drivers/net/igbvf/netdev.c23
-rw-r--r--drivers/net/ioc3-eth.c2
-rw-r--r--drivers/net/ipg.h2
-rw-r--r--drivers/net/irda/Kconfig45
-rw-r--r--drivers/net/irda/Makefile1
-rw-r--r--drivers/net/irda/au1k_ir.c22
-rw-r--r--drivers/net/irda/bfin_sir.c820
-rw-r--r--drivers/net/irda/bfin_sir.h148
-rw-r--r--drivers/net/irda/donauboe.c8
-rw-r--r--drivers/net/irda/irda-usb.c44
-rw-r--r--drivers/net/irda/kingsun-sir.c5
-rw-r--r--drivers/net/irda/ks959-sir.c5
-rw-r--r--drivers/net/irda/ksdazzle-sir.c5
-rw-r--r--drivers/net/irda/mcs7780.c6
-rw-r--r--drivers/net/irda/pxaficp_ir.c16
-rw-r--r--drivers/net/irda/sa1100_ir.c18
-rw-r--r--drivers/net/irda/sir_dev.c2
-rw-r--r--drivers/net/irda/smsc-ircc2.c2
-rw-r--r--drivers/net/iseries_veth.c17
-rw-r--r--drivers/net/ixgb/ixgb_hw.c20
-rw-r--r--drivers/net/ixgb/ixgb_hw.h14
-rw-r--r--drivers/net/ixgb/ixgb_main.c20
-rw-r--r--drivers/net/ixgb/ixgb_osdep.h2
-rw-r--r--drivers/net/ixgbe/Makefile2
-rw-r--r--drivers/net/ixgbe/ixgbe.h161
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c330
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c1489
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c316
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h8
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.c4
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c119
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c961
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c556
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.h67
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c1708
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c166
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.h5
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h333
-rw-r--r--drivers/net/ixp2000/ixpdev.c19
-rw-r--r--drivers/net/jazzsonic.c19
-rw-r--r--drivers/net/jme.c1
-rw-r--r--drivers/net/korina.c39
-rw-r--r--drivers/net/ks8842.c732
-rw-r--r--drivers/net/lasi_82596.c6
-rw-r--r--drivers/net/lib82596.c23
-rw-r--r--drivers/net/lib8390.c2
-rw-r--r--drivers/net/ll_temac.h374
-rw-r--r--drivers/net/ll_temac_main.c969
-rw-r--r--drivers/net/ll_temac_mdio.c120
-rw-r--r--drivers/net/loopback.c22
-rw-r--r--drivers/net/mac8390.c31
-rw-r--r--drivers/net/mac89x0.c4
-rw-r--r--drivers/net/macb.c20
-rw-r--r--drivers/net/mace.c18
-rw-r--r--drivers/net/macmace.c18
-rw-r--r--drivers/net/macvlan.c35
-rw-r--r--drivers/net/mdio.c431
-rw-r--r--drivers/net/meth.c55
-rw-r--r--drivers/net/mii.c91
-rw-r--r--drivers/net/mipsnet.c15
-rw-r--r--drivers/net/mlx4/Makefile2
-rw-r--r--drivers/net/mlx4/en_cq.c7
-rw-r--r--drivers/net/mlx4/en_ethtool.c (renamed from drivers/net/mlx4/en_params.c)67
-rw-r--r--drivers/net/mlx4/en_main.c68
-rw-r--r--drivers/net/mlx4/en_netdev.c201
-rw-r--r--drivers/net/mlx4/en_rx.c139
-rw-r--r--drivers/net/mlx4/en_tx.c120
-rw-r--r--drivers/net/mlx4/eq.c8
-rw-r--r--drivers/net/mlx4/main.c14
-rw-r--r--drivers/net/mlx4/mlx4_en.h49
-rw-r--r--drivers/net/mlx4/mr.c13
-rw-r--r--drivers/net/mlx4/profile.c2
-rw-r--r--drivers/net/mv643xx_eth.c139
-rw-r--r--drivers/net/mvme147.c17
-rw-r--r--drivers/net/myri10ge/myri10ge.c70
-rw-r--r--drivers/net/myri_sbus.c2
-rw-r--r--drivers/net/ne2k-pci.c2
-rw-r--r--drivers/net/ne3210.c4
-rw-r--r--drivers/net/netx-eth.c17
-rw-r--r--drivers/net/netxen/netxen_nic.h624
-rw-r--r--drivers/net/netxen/netxen_nic_ctx.c241
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c106
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h8
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c737
-rw-r--r--drivers/net/netxen/netxen_nic_hw.h76
-rw-r--r--drivers/net/netxen/netxen_nic_init.c541
-rw-r--r--drivers/net/netxen/netxen_nic_main.c411
-rw-r--r--drivers/net/netxen/netxen_nic_niu.c341
-rw-r--r--drivers/net/netxen/netxen_nic_phan_reg.h27
-rw-r--r--drivers/net/ni65.c2
-rw-r--r--drivers/net/niu.c66
-rw-r--r--drivers/net/niu.h4
-rw-r--r--drivers/net/ns83820.c10
-rw-r--r--drivers/net/pasemi_mac.c58
-rw-r--r--drivers/net/pasemi_mac.h1
-rw-r--r--drivers/net/pci-skeleton.c19
-rw-r--r--drivers/net/pcmcia/3c574_cs.c2
-rw-r--r--drivers/net/pcmcia/3c589_cs.c4
-rw-r--r--drivers/net/pcmcia/axnet_cs.c2
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c2
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c2
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c2
-rw-r--r--drivers/net/pcnet32.c5
-rw-r--r--drivers/net/phy/marvell.c1
-rw-r--r--drivers/net/phy/mdio_bus.c29
-rw-r--r--drivers/net/phy/phy_device.c163
-rw-r--r--drivers/net/plip.c6
-rw-r--r--drivers/net/ppp_generic.c1
-rw-r--r--drivers/net/pppol2tp.c14
-rw-r--r--drivers/net/ps3_gelic_net.c8
-rw-r--r--drivers/net/qla3xxx.c1
-rw-r--r--drivers/net/qlge/qlge.h31
-rw-r--r--drivers/net/qlge/qlge_ethtool.c6
-rw-r--r--drivers/net/qlge/qlge_main.c136
-rw-r--r--drivers/net/qlge/qlge_mpi.c64
-rw-r--r--drivers/net/r6040.c18
-rw-r--r--drivers/net/r8169.c331
-rw-r--r--drivers/net/rionet.c14
-rw-r--r--drivers/net/rrunner.c2
-rw-r--r--drivers/net/s2io-regs.h5
-rw-r--r--drivers/net/s2io.c44
-rw-r--r--drivers/net/s2io.h9
-rw-r--r--drivers/net/sb1250-mac.c37
-rw-r--r--drivers/net/sfc/Kconfig2
-rw-r--r--drivers/net/sfc/boards.c2
-rw-r--r--drivers/net/sfc/efx.c29
-rw-r--r--drivers/net/sfc/ethtool.c19
-rw-r--r--drivers/net/sfc/falcon.c137
-rw-r--r--drivers/net/sfc/falcon_hwdefs.h3
-rw-r--r--drivers/net/sfc/falcon_xmac.c2
-rw-r--r--drivers/net/sfc/mdio_10g.c385
-rw-r--r--drivers/net/sfc/mdio_10g.h282
-rw-r--r--drivers/net/sfc/net_driver.h34
-rw-r--r--drivers/net/sfc/rx.c26
-rw-r--r--drivers/net/sfc/selftest.c22
-rw-r--r--drivers/net/sfc/selftest.h2
-rw-r--r--drivers/net/sfc/sfe4001.c3
-rw-r--r--drivers/net/sfc/tenxpress.c251
-rw-r--r--drivers/net/sfc/tx.c7
-rw-r--r--drivers/net/sfc/xenpack.h62
-rw-r--r--drivers/net/sfc/xfp_phy.c55
-rw-r--r--drivers/net/sgiseeq.c18
-rw-r--r--drivers/net/sh_eth.c499
-rw-r--r--drivers/net/sh_eth.h278
-rw-r--r--drivers/net/sis190.c59
-rw-r--r--drivers/net/sis900.c2
-rw-r--r--drivers/net/skfp/h/smt.h2
-rw-r--r--drivers/net/skfp/skfddi.c158
-rw-r--r--drivers/net/skge.c2
-rw-r--r--drivers/net/sky2.c1
-rw-r--r--drivers/net/smc-mca.c4
-rw-r--r--drivers/net/smc911x.c23
-rw-r--r--drivers/net/smc9194.c2
-rw-r--r--drivers/net/smc91x.h5
-rw-r--r--drivers/net/smsc911x.c63
-rw-r--r--drivers/net/sonic.c2
-rw-r--r--drivers/net/starfire.c2
-rw-r--r--drivers/net/sun3_82586.c2
-rw-r--r--drivers/net/sun3lance.c21
-rw-r--r--drivers/net/sundance.c53
-rw-r--r--drivers/net/sunhme.c2
-rw-r--r--drivers/net/tc35815.c13
-rw-r--r--drivers/net/tehuti.c14
-rw-r--r--drivers/net/tg3.c107
-rw-r--r--drivers/net/tg3.h6
-rw-r--r--drivers/net/tlan.c2
-rw-r--r--drivers/net/tokenring/3c359.c4
-rw-r--r--drivers/net/tokenring/lanstreamer.c4
-rw-r--r--drivers/net/tokenring/olympic.c4
-rw-r--r--drivers/net/tokenring/smctr.c2
-rw-r--r--drivers/net/tokenring/tms380tr.c2
-rw-r--r--drivers/net/tulip/Kconfig12
-rw-r--r--drivers/net/tulip/de2104x.c15
-rw-r--r--drivers/net/tulip/de4x5.c12
-rw-r--r--drivers/net/tulip/dmfe.c2
-rw-r--r--drivers/net/tulip/uli526x.c2
-rw-r--r--drivers/net/tulip/winbond-840.c3
-rw-r--r--drivers/net/tun.c111
-rw-r--r--drivers/net/ucc_geth.c211
-rw-r--r--drivers/net/ucc_geth.h35
-rw-r--r--drivers/net/ucc_geth_ethtool.c2
-rw-r--r--drivers/net/usb/Kconfig8
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/cdc_ether.c33
-rw-r--r--drivers/net/usb/dm9601.c12
-rw-r--r--drivers/net/usb/hso.c53
-rw-r--r--drivers/net/usb/int51x1.c253
-rw-r--r--drivers/net/usb/kaweth.c33
-rw-r--r--drivers/net/usb/rtl8150.c9
-rw-r--r--drivers/net/usb/smsc95xx.c4
-rw-r--r--drivers/net/usb/usbnet.c39
-rw-r--r--drivers/net/veth.c2
-rw-r--r--drivers/net/via-rhine.c58
-rw-r--r--drivers/net/via-velocity.c22
-rw-r--r--drivers/net/via-velocity.h1
-rw-r--r--drivers/net/virtio_net.c69
-rw-r--r--drivers/net/vxge/vxge-config.c12
-rw-r--r--drivers/net/vxge/vxge-main.c6
-rw-r--r--drivers/net/vxge/vxge-traffic.c6
-rw-r--r--drivers/net/wan/cycx_x25.c6
-rw-r--r--drivers/net/wan/dlci.c6
-rw-r--r--drivers/net/wan/hdlc_fr.c1
-rw-r--r--drivers/net/wan/ixp4xx_hss.c15
-rw-r--r--drivers/net/wan/pc300_drv.c20
-rw-r--r--drivers/net/wan/sbni.c2
-rw-r--r--drivers/net/wan/wanxl.c2
-rw-r--r--drivers/net/wimax/i2400m/control.c124
-rw-r--r--drivers/net/wimax/i2400m/driver.c45
-rw-r--r--drivers/net/wimax/i2400m/fw.c58
-rw-r--r--drivers/net/wimax/i2400m/i2400m-sdio.h9
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h50
-rw-r--r--drivers/net/wimax/i2400m/netdev.c4
-rw-r--r--drivers/net/wimax/i2400m/op-rfkill.c4
-rw-r--r--drivers/net/wimax/i2400m/rx.c15
-rw-r--r--drivers/net/wimax/i2400m/sdio-fw.c109
-rw-r--r--drivers/net/wimax/i2400m/sdio-rx.c47
-rw-r--r--drivers/net/wimax/i2400m/sdio.c68
-rw-r--r--drivers/net/wimax/i2400m/tx.c75
-rw-r--r--drivers/net/wimax/i2400m/usb.c40
-rw-r--r--drivers/net/wireless/Kconfig23
-rw-r--r--drivers/net/wireless/Makefile8
-rw-r--r--drivers/net/wireless/adm8211.c14
-rw-r--r--drivers/net/wireless/airo.c32
-rw-r--r--drivers/net/wireless/arlan-main.c2
-rw-r--r--drivers/net/wireless/at76c50x-usb.c31
-rw-r--r--drivers/net/wireless/ath/Kconfig8
-rw-r--r--drivers/net/wireless/ath/Makefile6
-rw-r--r--drivers/net/wireless/ath/ar9170/Kconfig (renamed from drivers/net/wireless/ar9170/Kconfig)1
-rw-r--r--drivers/net/wireless/ath/ar9170/Makefile (renamed from drivers/net/wireless/ar9170/Makefile)0
-rw-r--r--drivers/net/wireless/ath/ar9170/ar9170.h (renamed from drivers/net/wireless/ar9170/ar9170.h)76
-rw-r--r--drivers/net/wireless/ath/ar9170/cmd.c (renamed from drivers/net/wireless/ar9170/cmd.c)0
-rw-r--r--drivers/net/wireless/ath/ar9170/cmd.h (renamed from drivers/net/wireless/ar9170/cmd.h)0
-rw-r--r--drivers/net/wireless/ath/ar9170/eeprom.h (renamed from drivers/net/wireless/ar9170/eeprom.h)0
-rw-r--r--drivers/net/wireless/ath/ar9170/hw.h (renamed from drivers/net/wireless/ar9170/hw.h)23
-rw-r--r--drivers/net/wireless/ath/ar9170/led.c (renamed from drivers/net/wireless/ar9170/led.c)17
-rw-r--r--drivers/net/wireless/ath/ar9170/mac.c (renamed from drivers/net/wireless/ar9170/mac.c)86
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c (renamed from drivers/net/wireless/ar9170/main.c)1411
-rw-r--r--drivers/net/wireless/ath/ar9170/phy.c (renamed from drivers/net/wireless/ar9170/phy.c)6
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c (renamed from drivers/net/wireless/ar9170/usb.c)195
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.h (renamed from drivers/net/wireless/ar9170/usb.h)11
-rw-r--r--drivers/net/wireless/ath/ath5k/Kconfig (renamed from drivers/net/wireless/ath5k/Kconfig)6
-rw-r--r--drivers/net/wireless/ath/ath5k/Makefile (renamed from drivers/net/wireless/ath5k/Makefile)1
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h (renamed from drivers/net/wireless/ath5k/ath5k.h)46
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c (renamed from drivers/net/wireless/ath5k/attach.c)1
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c (renamed from drivers/net/wireless/ath5k/base.c)347
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h (renamed from drivers/net/wireless/ath5k/base.h)13
-rw-r--r--drivers/net/wireless/ath/ath5k/caps.c (renamed from drivers/net/wireless/ath5k/caps.c)0
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c (renamed from drivers/net/wireless/ath5k/debug.c)0
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.h (renamed from drivers/net/wireless/ath5k/debug.h)0
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.c (renamed from drivers/net/wireless/ath5k/desc.c)0
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.h (renamed from drivers/net/wireless/ath5k/desc.h)0
-rw-r--r--drivers/net/wireless/ath/ath5k/dma.c (renamed from drivers/net/wireless/ath5k/dma.c)2
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c (renamed from drivers/net/wireless/ath5k/eeprom.c)73
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.h (renamed from drivers/net/wireless/ath5k/eeprom.h)46
-rw-r--r--drivers/net/wireless/ath/ath5k/gpio.c (renamed from drivers/net/wireless/ath5k/gpio.c)0
-rw-r--r--drivers/net/wireless/ath/ath5k/initvals.c (renamed from drivers/net/wireless/ath5k/initvals.c)8
-rw-r--r--drivers/net/wireless/ath/ath5k/led.c (renamed from drivers/net/wireless/ath5k/led.c)10
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c (renamed from drivers/net/wireless/ath5k/pcu.c)10
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c (renamed from drivers/net/wireless/ath5k/phy.c)541
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c (renamed from drivers/net/wireless/ath5k/qcu.c)7
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h (renamed from drivers/net/wireless/ath5k/reg.h)9
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c (renamed from drivers/net/wireless/ath5k/reset.c)100
-rw-r--r--drivers/net/wireless/ath/ath5k/rfbuffer.h (renamed from drivers/net/wireless/ath5k/rfbuffer.h)0
-rw-r--r--drivers/net/wireless/ath/ath5k/rfgain.h (renamed from drivers/net/wireless/ath5k/rfgain.h)0
-rw-r--r--drivers/net/wireless/ath/ath5k/rfkill.c121
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig (renamed from drivers/net/wireless/ath9k/Kconfig)1
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile (renamed from drivers/net/wireless/ath9k/Makefile)1
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c (renamed from drivers/net/wireless/ath9k/ahb.c)0
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c (renamed from drivers/net/wireless/ath9k/ani.c)8
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.h (renamed from drivers/net/wireless/ath9k/ani.h)0
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h (renamed from drivers/net/wireless/ath9k/ath9k.h)122
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c (renamed from drivers/net/wireless/ath9k/beacon.c)133
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c (renamed from drivers/net/wireless/ath9k/calib.c)129
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h (renamed from drivers/net/wireless/ath9k/calib.h)33
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c (renamed from drivers/net/wireless/ath9k/debug.c)158
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h (renamed from drivers/net/wireless/ath9k/debug.h)35
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c (renamed from drivers/net/wireless/ath9k/eeprom.c)75
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h (renamed from drivers/net/wireless/ath9k/eeprom.h)4
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c (renamed from drivers/net/wireless/ath9k/hw.c)269
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h (renamed from drivers/net/wireless/ath9k/hw.h)95
-rw-r--r--drivers/net/wireless/ath/ath9k/initvals.h (renamed from drivers/net/wireless/ath9k/initvals.h)0
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c (renamed from drivers/net/wireless/ath9k/mac.c)63
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h (renamed from drivers/net/wireless/ath9k/mac.h)0
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c (renamed from drivers/net/wireless/ath9k/main.c)780
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c (renamed from drivers/net/wireless/ath9k/pci.c)15
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.c (renamed from drivers/net/wireless/ath9k/phy.c)11
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.h (renamed from drivers/net/wireless/ath9k/phy.h)5
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c (renamed from drivers/net/wireless/ath9k/rc.c)96
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h (renamed from drivers/net/wireless/ath9k/rc.h)4
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c (renamed from drivers/net/wireless/ath9k/recv.c)279
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h (renamed from drivers/net/wireless/ath9k/reg.h)0
-rw-r--r--drivers/net/wireless/ath/ath9k/virtual.c (renamed from drivers/net/wireless/ath9k/virtual.c)0
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c (renamed from drivers/net/wireless/ath9k/xmit.c)87
-rw-r--r--drivers/net/wireless/ath/main.c22
-rw-r--r--drivers/net/wireless/ath/regd.c (renamed from drivers/net/wireless/ath9k/regd.c)276
-rw-r--r--drivers/net/wireless/ath/regd.h (renamed from drivers/net/wireless/ath9k/regd.h)40
-rw-r--r--drivers/net/wireless/ath/regd_common.h (renamed from drivers/net/wireless/ath9k/regd_common.h)0
-rw-r--r--drivers/net/wireless/atmel.c2
-rw-r--r--drivers/net/wireless/atmel_cs.c2
-rw-r--r--drivers/net/wireless/b43/Kconfig9
-rw-r--r--drivers/net/wireless/b43/Makefile2
-rw-r--r--drivers/net/wireless/b43/b43.h25
-rw-r--r--drivers/net/wireless/b43/dma.c2
-rw-r--r--drivers/net/wireless/b43/leds.c9
-rw-r--r--drivers/net/wireless/b43/main.c232
-rw-r--r--drivers/net/wireless/b43/main.h1
-rw-r--r--drivers/net/wireless/b43/phy_a.c4
-rw-r--r--drivers/net/wireless/b43/phy_common.c17
-rw-r--r--drivers/net/wireless/b43/phy_common.h6
-rw-r--r--drivers/net/wireless/b43/phy_g.c4
-rw-r--r--drivers/net/wireless/b43/phy_lp.c2
-rw-r--r--drivers/net/wireless/b43/phy_n.c2
-rw-r--r--drivers/net/wireless/b43/pio.c2
-rw-r--r--drivers/net/wireless/b43/rfkill.c171
-rw-r--r--drivers/net/wireless/b43/rfkill.h47
-rw-r--r--drivers/net/wireless/b43/xmit.c5
-rw-r--r--drivers/net/wireless/b43legacy/Kconfig10
-rw-r--r--drivers/net/wireless/b43legacy/Makefile2
-rw-r--r--drivers/net/wireless/b43legacy/b43legacy.h15
-rw-r--r--drivers/net/wireless/b43legacy/leds.c10
-rw-r--r--drivers/net/wireless/b43legacy/main.c346
-rw-r--r--drivers/net/wireless/b43legacy/pio.c2
-rw-r--r--drivers/net/wireless/b43legacy/rfkill.c173
-rw-r--r--drivers/net/wireless/b43legacy/rfkill.h54
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c2
-rw-r--r--drivers/net/wireless/b43legacy/xmit.h4
-rw-r--r--drivers/net/wireless/hostap/Kconfig8
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_tx.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_plx.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c8
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c57
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_module.c4
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_tx.c2
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig9
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.c66
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c412
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c95
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000-hw.h12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c177
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c593
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h27
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c1280
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h35
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c1189
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h107
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h22
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c193
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h69
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c153
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h253
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c233
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.h39
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rfkill.c145
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rfkill.h48
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c70
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c221
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c47
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c29
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c1460
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Kconfig23
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Makefile5
-rw-r--r--drivers/net/wireless/iwmc3200wifi/bus.h57
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.c409
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.h31
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.c920
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.h419
-rw-r--r--drivers/net/wireless/iwmc3200wifi/debug.h124
-rw-r--r--drivers/net/wireless/iwmc3200wifi/debugfs.c453
-rw-r--r--drivers/net/wireless/iwmc3200wifi/eeprom.c187
-rw-r--r--drivers/net/wireless/iwmc3200wifi/eeprom.h114
-rw-r--r--drivers/net/wireless/iwmc3200wifi/fw.c388
-rw-r--r--drivers/net/wireless/iwmc3200wifi/fw.h100
-rw-r--r--drivers/net/wireless/iwmc3200wifi/hal.c464
-rw-r--r--drivers/net/wireless/iwmc3200wifi/hal.h236
-rw-r--r--drivers/net/wireless/iwmc3200wifi/iwm.h346
-rw-r--r--drivers/net/wireless/iwmc3200wifi/lmac.h457
-rw-r--r--drivers/net/wireless/iwmc3200wifi/main.c680
-rw-r--r--drivers/net/wireless/iwmc3200wifi/netdev.c162
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c1431
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.h60
-rw-r--r--drivers/net/wireless/iwmc3200wifi/sdio.c516
-rw-r--r--drivers/net/wireless/iwmc3200wifi/sdio.h67
-rw-r--r--drivers/net/wireless/iwmc3200wifi/tx.c492
-rw-r--r--drivers/net/wireless/iwmc3200wifi/umac.h744
-rw-r--r--drivers/net/wireless/iwmc3200wifi/wext.c723
-rw-r--r--drivers/net/wireless/libertas/11d.c26
-rw-r--r--drivers/net/wireless/libertas/11d.h29
-rw-r--r--drivers/net/wireless/libertas/README12
-rw-r--r--drivers/net/wireless/libertas/assoc.c758
-rw-r--r--drivers/net/wireless/libertas/assoc.h13
-rw-r--r--drivers/net/wireless/libertas/cmd.c42
-rw-r--r--drivers/net/wireless/libertas/cmdresp.c17
-rw-r--r--drivers/net/wireless/libertas/debugfs.c8
-rw-r--r--drivers/net/wireless/libertas/defs.h21
-rw-r--r--drivers/net/wireless/libertas/dev.h9
-rw-r--r--drivers/net/wireless/libertas/host.h5
-rw-r--r--drivers/net/wireless/libertas/hostcmd.h69
-rw-r--r--drivers/net/wireless/libertas/if_cs.c34
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c176
-rw-r--r--drivers/net/wireless/libertas/if_sdio.h10
-rw-r--r--drivers/net/wireless/libertas/if_spi.c147
-rw-r--r--drivers/net/wireless/libertas/if_spi.h3
-rw-r--r--drivers/net/wireless/libertas/if_usb.c11
-rw-r--r--drivers/net/wireless/libertas/main.c44
-rw-r--r--drivers/net/wireless/libertas/rx.c48
-rw-r--r--drivers/net/wireless/libertas/scan.c63
-rw-r--r--drivers/net/wireless/libertas/tx.c8
-rw-r--r--drivers/net/wireless/libertas/types.h152
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c3
-rw-r--r--drivers/net/wireless/libertas_tf/main.c56
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c105
-rw-r--r--drivers/net/wireless/mwl8k.c20
-rw-r--r--drivers/net/wireless/p54/p54.h74
-rw-r--r--drivers/net/wireless/p54/p54common.c348
-rw-r--r--drivers/net/wireless/p54/p54spi.c175
-rw-r--r--drivers/net/wireless/p54/p54usb.c314
-rw-r--r--drivers/net/wireless/p54/p54usb.h16
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c8
-rw-r--r--drivers/net/wireless/ray_cs.c8
-rw-r--r--drivers/net/wireless/rndis_wlan.c1108
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig31
-rw-r--r--drivers/net/wireless/rt2x00/Makefile2
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c35
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c35
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c22
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c3078
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.h1945
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h57
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c9
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00crypto.c89
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c134
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00ht.c69
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h67
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00link.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c88
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c16
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c48
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h53
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c38
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.h6
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c34
-rw-r--r--drivers/net/wireless/rtl818x/Makefile2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_dev.c33
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187.h7
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c78
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_leds.c218
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_leds.h57
-rw-r--r--drivers/net/wireless/strip.c4
-rw-r--r--drivers/net/wireless/wavelan.c10
-rw-r--r--drivers/net/wireless/wavelan_cs.c7
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig11
-rw-r--r--drivers/net/wireless/wl12xx/Makefile4
-rw-r--r--drivers/net/wireless/wl12xx/acx.c689
-rw-r--r--drivers/net/wireless/wl12xx/acx.h1245
-rw-r--r--drivers/net/wireless/wl12xx/boot.c295
-rw-r--r--drivers/net/wireless/wl12xx/boot.h40
-rw-r--r--drivers/net/wireless/wl12xx/cmd.c353
-rw-r--r--drivers/net/wireless/wl12xx/cmd.h265
-rw-r--r--drivers/net/wireless/wl12xx/debugfs.c508
-rw-r--r--drivers/net/wireless/wl12xx/debugfs.h33
-rw-r--r--drivers/net/wireless/wl12xx/event.c127
-rw-r--r--drivers/net/wireless/wl12xx/event.h121
-rw-r--r--drivers/net/wireless/wl12xx/init.c200
-rw-r--r--drivers/net/wireless/wl12xx/init.h40
-rw-r--r--drivers/net/wireless/wl12xx/main.c1358
-rw-r--r--drivers/net/wireless/wl12xx/ps.c151
-rw-r--r--drivers/net/wireless/wl12xx/ps.h36
-rw-r--r--drivers/net/wireless/wl12xx/reg.h745
-rw-r--r--drivers/net/wireless/wl12xx/rx.c208
-rw-r--r--drivers/net/wireless/wl12xx/rx.h122
-rw-r--r--drivers/net/wireless/wl12xx/spi.c358
-rw-r--r--drivers/net/wireless/wl12xx/spi.h109
-rw-r--r--drivers/net/wireless/wl12xx/tx.c557
-rw-r--r--drivers/net/wireless/wl12xx/tx.h215
-rw-r--r--drivers/net/wireless/wl12xx/wl1251.c709
-rw-r--r--drivers/net/wireless/wl12xx/wl1251.h165
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx.h409
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx_80211.h156
-rw-r--r--drivers/net/wireless/wl3501_cs.c1
-rw-r--r--drivers/net/wireless/zd1201.c8
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c86
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.h2
-rw-r--r--drivers/net/xen-netfront.c10
-rw-r--r--drivers/net/yellowfin.c3
-rw-r--r--drivers/of/Kconfig14
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/base.c25
-rw-r--r--drivers/of/of_mdio.c139
-rw-r--r--drivers/oprofile/cpu_buffer.c8
-rw-r--r--drivers/parisc/eisa.c2
-rw-r--r--drivers/parisc/iosapic.c6
-rw-r--r--drivers/parisc/sba_iommu.c6
-rw-r--r--drivers/parport/parport_gsc.c8
-rw-r--r--drivers/parport/parport_pc.c1802
-rw-r--r--drivers/parport/share.c13
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/hotplug/acpiphp.h1
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c63
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c54
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c4
-rw-r--r--drivers/pci/htirq.c4
-rw-r--r--drivers/pci/intel-iommu.c9
-rw-r--r--drivers/pci/intr_remapping.c54
-rw-r--r--drivers/pci/pci.c8
-rw-r--r--drivers/pci/pcie/portdrv_core.c2
-rw-r--r--drivers/pci/probe.c2
-rw-r--r--drivers/pci/quirks.c2
-rw-r--r--drivers/pcmcia/Kconfig2
-rw-r--r--drivers/pcmcia/Makefile1
-rw-r--r--drivers/pcmcia/ds.c20
-rw-r--r--drivers/pcmcia/pcmcia_ioctl.c9
-rw-r--r--drivers/pcmcia/pxa2xx_stargate2.c174
-rw-r--r--drivers/platform/x86/Kconfig14
-rw-r--r--drivers/platform/x86/acer-wmi.c52
-rw-r--r--drivers/platform/x86/asus-laptop.c6
-rw-r--r--drivers/platform/x86/dell-laptop.c101
-rw-r--r--drivers/platform/x86/eeepc-laptop.c136
-rw-r--r--drivers/platform/x86/hp-wmi.c102
-rw-r--r--drivers/platform/x86/sony-laptop.c193
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c876
-rw-r--r--drivers/platform/x86/toshiba_acpi.c160
-rw-r--r--drivers/pnp/pnpacpi/core.c8
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c2
-rw-r--r--drivers/pnp/resource.c18
-rw-r--r--drivers/ps3/ps3-sys-manager.c2
-rw-r--r--drivers/ps3/ps3av.c8
-rw-r--r--drivers/ps3/ps3av_cmd.c3
-rw-r--r--drivers/rapidio/rio-scan.c6
-rw-r--r--drivers/regulator/Kconfig26
-rw-r--r--drivers/regulator/Makefile3
-rw-r--r--drivers/regulator/da903x.c4
-rw-r--r--drivers/regulator/fixed.c18
-rw-r--r--drivers/regulator/lp3971.c562
-rw-r--r--drivers/regulator/max1586.c282
-rw-r--r--drivers/regulator/pcf50633-regulator.c2
-rw-r--r--drivers/regulator/userspace-consumer.c200
-rw-r--r--drivers/regulator/virtual.c1
-rw-r--r--drivers/regulator/wm8350-regulator.c1
-rw-r--r--drivers/regulator/wm8400-regulator.c8
-rw-r--r--drivers/rtc/Kconfig2
-rw-r--r--drivers/rtc/rtc-ep93xx.c149
-rw-r--r--drivers/rtc/rtc-pl030.c4
-rw-r--r--drivers/rtc/rtc-pl031.c5
-rw-r--r--drivers/s390/block/dasd.c180
-rw-r--r--drivers/s390/block/dasd_devmap.c1
-rw-r--r--drivers/s390/block/dasd_diag.c6
-rw-r--r--drivers/s390/block/dasd_eckd.c158
-rw-r--r--drivers/s390/block/dasd_fba.c35
-rw-r--r--drivers/s390/block/dasd_int.h16
-rw-r--r--drivers/s390/block/dcssblk.c138
-rw-r--r--drivers/s390/block/xpram.c131
-rw-r--r--drivers/s390/char/con3215.c221
-rw-r--r--drivers/s390/char/con3270.c50
-rw-r--r--drivers/s390/char/fs3270.c16
-rw-r--r--drivers/s390/char/monreader.c140
-rw-r--r--drivers/s390/char/monwriter.c98
-rw-r--r--drivers/s390/char/raw3270.c100
-rw-r--r--drivers/s390/char/raw3270.h12
-rw-r--r--drivers/s390/char/sclp.c248
-rw-r--r--drivers/s390/char/sclp.h23
-rw-r--r--drivers/s390/char/sclp_cmd.c42
-rw-r--r--drivers/s390/char/sclp_con.c139
-rw-r--r--drivers/s390/char/sclp_rw.c20
-rw-r--r--drivers/s390/char/sclp_rw.h12
-rw-r--r--drivers/s390/char/sclp_vt220.c118
-rw-r--r--drivers/s390/char/tape.h3
-rw-r--r--drivers/s390/char/tape_34xx.c7
-rw-r--r--drivers/s390/char/tape_3590.c7
-rw-r--r--drivers/s390/char/tape_block.c26
-rw-r--r--drivers/s390/char/tape_core.c74
-rw-r--r--drivers/s390/char/tty3270.c57
-rw-r--r--drivers/s390/char/vmlogrdr.c39
-rw-r--r--drivers/s390/char/vmur.c42
-rw-r--r--drivers/s390/char/vmwatchdog.c81
-rw-r--r--drivers/s390/cio/ccwgroup.c78
-rw-r--r--drivers/s390/cio/chsc.c3
-rw-r--r--drivers/s390/cio/chsc.h1
-rw-r--r--drivers/s390/cio/chsc_sch.c32
-rw-r--r--drivers/s390/cio/cio.c6
-rw-r--r--drivers/s390/cio/cmf.c5
-rw-r--r--drivers/s390/cio/css.c157
-rw-r--r--drivers/s390/cio/css.h10
-rw-r--r--drivers/s390/cio/device.c260
-rw-r--r--drivers/s390/cio/device.h3
-rw-r--r--drivers/s390/cio/device_fsm.c96
-rw-r--r--drivers/s390/cio/device_ops.c50
-rw-r--r--drivers/s390/cio/io_sch.h1
-rw-r--r--drivers/s390/cio/qdio_main.c46
-rw-r--r--drivers/s390/cio/qdio_perf.c12
-rw-r--r--drivers/s390/cio/qdio_perf.h10
-rw-r--r--drivers/s390/kvm/kvm_virtio.c43
-rw-r--r--drivers/s390/net/Kconfig14
-rw-r--r--drivers/s390/net/claw.c72
-rw-r--r--drivers/s390/net/ctcm_main.c43
-rw-r--r--drivers/s390/net/lcs.c94
-rw-r--r--drivers/s390/net/lcs.h4
-rw-r--r--drivers/s390/net/netiucv.c166
-rw-r--r--drivers/s390/net/qeth_core_main.c58
-rw-r--r--drivers/s390/net/qeth_core_mpc.c2
-rw-r--r--drivers/s390/net/qeth_core_mpc.h2
-rw-r--r--drivers/s390/net/qeth_l2_main.c88
-rw-r--r--drivers/s390/net/qeth_l3_main.c85
-rw-r--r--drivers/s390/net/smsgiucv.c63
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c63
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c10
-rw-r--r--drivers/s390/scsi/zfcp_def.h7
-rw-r--r--drivers/s390/scsi/zfcp_erp.c8
-rw-r--r--drivers/s390/scsi/zfcp_ext.h1
-rw-r--r--drivers/s390/scsi/zfcp_fc.c9
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c29
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c13
-rw-r--r--drivers/sbus/char/jsflash.c26
-rw-r--r--drivers/sbus/char/openprom.c2
-rw-r--r--drivers/scsi/3w-9xxx.c3
-rw-r--r--drivers/scsi/3w-xxxx.c5
-rw-r--r--drivers/scsi/3w-xxxx.h2
-rw-r--r--drivers/scsi/Kconfig42
-rw-r--r--drivers/scsi/Makefile4
-rw-r--r--drivers/scsi/NCR_D700.c2
-rw-r--r--drivers/scsi/aha1740.c2
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_constants.h155
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_hsi.h1509
-rw-r--r--drivers/scsi/bnx2i/Kconfig7
-rw-r--r--drivers/scsi/bnx2i/Makefile3
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h771
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2405
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c438
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2064
-rw-r--r--drivers/scsi/bnx2i/bnx2i_sysfs.c142
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i.h1
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_iscsi.c26
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.c23
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.h3
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c6
-rw-r--r--drivers/scsi/dpt/osd_util.h2
-rw-r--r--drivers/scsi/eata.c24
-rw-r--r--drivers/scsi/fcoe/fcoe.c292
-rw-r--r--drivers/scsi/fcoe/fcoe.h1
-rw-r--r--drivers/scsi/fcoe/libfcoe.c31
-rw-r--r--drivers/scsi/fnic/Makefile15
-rw-r--r--drivers/scsi/fnic/cq_desc.h78
-rw-r--r--drivers/scsi/fnic/cq_enet_desc.h167
-rw-r--r--drivers/scsi/fnic/cq_exch_desc.h182
-rw-r--r--drivers/scsi/fnic/fcpio.h780
-rw-r--r--drivers/scsi/fnic/fnic.h265
-rw-r--r--drivers/scsi/fnic/fnic_attrs.c56
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c742
-rw-r--r--drivers/scsi/fnic/fnic_io.h67
-rw-r--r--drivers/scsi/fnic/fnic_isr.c332
-rw-r--r--drivers/scsi/fnic/fnic_main.c943
-rw-r--r--drivers/scsi/fnic/fnic_res.c444
-rw-r--r--drivers/scsi/fnic/fnic_res.h197
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c1850
-rw-r--r--drivers/scsi/fnic/rq_enet_desc.h58
-rw-r--r--drivers/scsi/fnic/vnic_cq.c85
-rw-r--r--drivers/scsi/fnic/vnic_cq.h121
-rw-r--r--drivers/scsi/fnic/vnic_cq_copy.h62
-rw-r--r--drivers/scsi/fnic/vnic_dev.c690
-rw-r--r--drivers/scsi/fnic/vnic_dev.h161
-rw-r--r--drivers/scsi/fnic/vnic_devcmd.h281
-rw-r--r--drivers/scsi/fnic/vnic_intr.c60
-rw-r--r--drivers/scsi/fnic/vnic_intr.h118
-rw-r--r--drivers/scsi/fnic/vnic_nic.h69
-rw-r--r--drivers/scsi/fnic/vnic_resource.h61
-rw-r--r--drivers/scsi/fnic/vnic_rq.c196
-rw-r--r--drivers/scsi/fnic/vnic_rq.h235
-rw-r--r--drivers/scsi/fnic/vnic_scsi.h99
-rw-r--r--drivers/scsi/fnic/vnic_stats.h68
-rw-r--r--drivers/scsi/fnic/vnic_wq.c182
-rw-r--r--drivers/scsi/fnic/vnic_wq.h175
-rw-r--r--drivers/scsi/fnic/vnic_wq_copy.c117
-rw-r--r--drivers/scsi/fnic/vnic_wq_copy.h128
-rw-r--r--drivers/scsi/fnic/wq_enet_desc.h96
-rw-r--r--drivers/scsi/gdth_proc.c5
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c434
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h40
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c469
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h4
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c2
-rw-r--r--drivers/scsi/ibmvscsi/viosrp.h68
-rw-r--r--drivers/scsi/ipr.c5
-rw-r--r--drivers/scsi/libfc/fc_exch.c4
-rw-r--r--drivers/scsi/libfc/fc_fcp.c2
-rw-r--r--drivers/scsi/libfc/fc_rport.c6
-rw-r--r--drivers/scsi/libiscsi.c468
-rw-r--r--drivers/scsi/libiscsi_tcp.c18
-rw-r--r--drivers/scsi/libsas/sas_expander.c16
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c49
-rw-r--r--drivers/scsi/libsrp.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h123
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c250
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h63
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c15
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c275
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c1365
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h142
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h2141
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c5626
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h54
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c674
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c206
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c51
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c956
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c6683
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h29
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h467
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c62
-rw-r--r--drivers/scsi/megaraid.h2
-rw-r--r--drivers/scsi/megaraid/mbox_defs.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h7
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c32
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c363
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c59
-rw-r--r--drivers/scsi/mvsas.c3222
-rw-r--r--drivers/scsi/mvsas/Kconfig42
-rw-r--r--drivers/scsi/mvsas/Makefile32
-rw-r--r--drivers/scsi/mvsas/mv_64xx.c793
-rw-r--r--drivers/scsi/mvsas/mv_64xx.h151
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c672
-rw-r--r--drivers/scsi/mvsas/mv_94xx.h222
-rw-r--r--drivers/scsi/mvsas/mv_chips.h280
-rw-r--r--drivers/scsi/mvsas/mv_defs.h502
-rw-r--r--drivers/scsi/mvsas/mv_init.c703
-rw-r--r--drivers/scsi/mvsas/mv_sas.c2154
-rw-r--r--drivers/scsi/mvsas/mv_sas.h406
-rw-r--r--drivers/scsi/osd/Kbuild25
-rwxr-xr-xdrivers/scsi/osd/Makefile37
-rw-r--r--drivers/scsi/osd/osd_initiator.c155
-rw-r--r--drivers/scsi/osd/osd_uld.c66
-rw-r--r--drivers/scsi/qla1280.c387
-rw-r--r--drivers/scsi/qla1280.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c227
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h45
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h6
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h43
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c206
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c55
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c240
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c244
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c118
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c294
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c47
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/scsi.c4
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/scsi/scsi_error.c21
-rw-r--r--drivers/scsi/scsi_lib.c101
-rw-r--r--drivers/scsi/scsi_scan.c5
-rw-r--r--drivers/scsi/scsi_tgt_lib.c2
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c175
-rw-r--r--drivers/scsi/scsi_transport_sas.c4
-rw-r--r--drivers/scsi/sd.c71
-rw-r--r--drivers/scsi/sd_dif.c2
-rw-r--r--drivers/scsi/sg.c17
-rw-r--r--drivers/scsi/sr.c17
-rw-r--r--drivers/scsi/st.c8
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c66
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c49
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.h2
-rw-r--r--drivers/scsi/u14-34f.c22
-rw-r--r--drivers/serial/8250.c22
-rw-r--r--drivers/serial/8250_gsc.c4
-rw-r--r--drivers/serial/8250_pci.c3
-rw-r--r--drivers/serial/Kconfig10
-rw-r--r--drivers/serial/Makefile1
-rw-r--r--drivers/serial/amba-pl010.c4
-rw-r--r--drivers/serial/amba-pl011.c40
-rw-r--r--drivers/serial/atmel_serial.c8
-rw-r--r--drivers/serial/bfin_5xx.c77
-rw-r--r--drivers/serial/bfin_sport_uart.c58
-rw-r--r--drivers/serial/icom.c22
-rw-r--r--drivers/serial/imx.c309
-rw-r--r--drivers/serial/jsm/jsm.h1
-rw-r--r--drivers/serial/jsm/jsm_tty.c14
-rw-r--r--drivers/serial/mpc52xx_uart.c2
-rw-r--r--drivers/serial/of_serial.c4
-rw-r--r--drivers/serial/serial_cs.c22
-rw-r--r--drivers/serial/sh-sci.c388
-rw-r--r--drivers/serial/sh-sci.h42
-rw-r--r--drivers/serial/timbuart.c526
-rw-r--r--drivers/serial/timbuart.h58
-rw-r--r--drivers/sh/intc.c11
-rw-r--r--drivers/spi/Kconfig13
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/amba-pl022.c1866
-rw-r--r--drivers/spi/spi_bfin5xx.c4
-rw-r--r--drivers/spi/spi_mpc83xx.c6
-rw-r--r--drivers/spi/spi_s3c24xx_gpio.c1
-rw-r--r--drivers/ssb/embedded.c1
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/agnx/pci.c15
-rw-r--r--drivers/staging/at76_usb/at76_usb.c7
-rw-r--r--drivers/staging/et131x/et131x_netdev.c4
-rw-r--r--drivers/staging/go7007/go7007.txt4
-rw-r--r--drivers/staging/panel/lcd-panel-cgram.txt2
-rw-r--r--drivers/staging/rt2860/common/mlme.c2
-rw-r--r--drivers/staging/rt2870/common/mlme.c2
-rw-r--r--drivers/staging/rt3070/common/mlme.c2
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c2
-rw-r--r--drivers/staging/uc2322/aten2011.c4
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c2
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c8
-rw-r--r--drivers/thermal/thermal_sys.c12
-rw-r--r--drivers/usb/Kconfig2
-rw-r--r--drivers/usb/Makefile2
-rw-r--r--drivers/usb/atm/ueagle-atm.c9
-rw-r--r--drivers/usb/class/cdc-acm.c506
-rw-r--r--drivers/usb/class/cdc-acm.h7
-rw-r--r--drivers/usb/class/usblp.c6
-rw-r--r--drivers/usb/class/usbtmc.c6
-rw-r--r--drivers/usb/core/Kconfig16
-rw-r--r--drivers/usb/core/Makefile4
-rw-r--r--drivers/usb/core/config.c192
-rw-r--r--drivers/usb/core/driver.c56
-rw-r--r--drivers/usb/core/endpoint.c160
-rw-r--r--drivers/usb/core/file.c13
-rw-r--r--drivers/usb/core/hcd-pci.c244
-rw-r--r--drivers/usb/core/hcd.c220
-rw-r--r--drivers/usb/core/hcd.h55
-rw-r--r--drivers/usb/core/hub.c134
-rw-r--r--drivers/usb/core/hub.h3
-rw-r--r--drivers/usb/core/inode.c5
-rw-r--r--drivers/usb/core/message.c194
-rw-r--r--drivers/usb/core/sysfs.c12
-rw-r--r--drivers/usb/core/urb.c12
-rw-r--r--drivers/usb/core/usb.c87
-rw-r--r--drivers/usb/core/usb.h13
-rw-r--r--drivers/usb/gadget/Kconfig53
-rw-r--r--drivers/usb/gadget/Makefile8
-rw-r--r--drivers/usb/gadget/at91_udc.c10
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c25
-rw-r--r--drivers/usb/gadget/audio.c302
-rw-r--r--drivers/usb/gadget/ci13xxx_udc.c6
-rw-r--r--drivers/usb/gadget/f_audio.c707
-rw-r--r--drivers/usb/gadget/f_phonet.c21
-rw-r--r--drivers/usb/gadget/f_rndis.c4
-rw-r--r--drivers/usb/gadget/file_storage.c93
-rw-r--r--drivers/usb/gadget/fsl_mx3_udc.c95
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c (renamed from drivers/usb/gadget/fsl_usb2_udc.c)69
-rw-r--r--drivers/usb/gadget/fsl_usb2_udc.h18
-rw-r--r--drivers/usb/gadget/gadget_chips.h8
-rw-r--r--drivers/usb/gadget/goku_udc.c6
-rw-r--r--drivers/usb/gadget/imx_udc.c14
-rw-r--r--drivers/usb/gadget/inode.c14
-rw-r--r--drivers/usb/gadget/langwell_udc.c3373
-rw-r--r--drivers/usb/gadget/langwell_udc.h228
-rw-r--r--drivers/usb/gadget/pxa27x_udc.c71
-rw-r--r--drivers/usb/gadget/pxa27x_udc.h2
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c3269
-rw-r--r--drivers/usb/gadget/u_audio.c319
-rw-r--r--drivers/usb/gadget/u_audio.h56
-rw-r--r--drivers/usb/gadget/u_ether.c2
-rw-r--r--drivers/usb/gadget/u_serial.c1
-rw-r--r--drivers/usb/host/Kconfig20
-rw-r--r--drivers/usb/host/Makefile2
-rw-r--r--drivers/usb/host/ehci-au1xxx.c1
-rw-r--r--drivers/usb/host/ehci-fsl.c1
-rw-r--r--drivers/usb/host/ehci-hcd.c47
-rw-r--r--drivers/usb/host/ehci-hub.c4
-rw-r--r--drivers/usb/host/ehci-ixp4xx.c1
-rw-r--r--drivers/usb/host/ehci-orion.c3
-rw-r--r--drivers/usb/host/ehci-pci.c27
-rw-r--r--drivers/usb/host/ehci-ppc-of.c1
-rw-r--r--drivers/usb/host/ehci-ps3.c8
-rw-r--r--drivers/usb/host/ehci-q.c19
-rw-r--r--drivers/usb/host/ehci-sched.c8
-rw-r--r--drivers/usb/host/ehci.h1
-rw-r--r--drivers/usb/host/fhci-dbg.c2
-rw-r--r--drivers/usb/host/hwa-hc.c21
-rw-r--r--drivers/usb/host/isp1760-hcd.c24
-rw-r--r--drivers/usb/host/ohci-dbg.c31
-rw-r--r--drivers/usb/host/ohci-ep93xx.c13
-rw-r--r--drivers/usb/host/ohci-hcd.c38
-rw-r--r--drivers/usb/host/ohci-pci.c24
-rw-r--r--drivers/usb/host/ohci-ps3.c7
-rw-r--r--drivers/usb/host/pci-quirks.c123
-rw-r--r--drivers/usb/host/r8a66597-hcd.c62
-rw-r--r--drivers/usb/host/r8a66597.h38
-rw-r--r--drivers/usb/host/uhci-hcd.c23
-rw-r--r--drivers/usb/host/uhci-q.c2
-rw-r--r--drivers/usb/host/xhci-dbg.c485
-rw-r--r--drivers/usb/host/xhci-ext-caps.h145
-rw-r--r--drivers/usb/host/xhci-hcd.c1274
-rw-r--r--drivers/usb/host/xhci-hub.c308
-rw-r--r--drivers/usb/host/xhci-mem.c769
-rw-r--r--drivers/usb/host/xhci-pci.c166
-rw-r--r--drivers/usb/host/xhci-ring.c1648
-rw-r--r--drivers/usb/host/xhci.h1157
-rw-r--r--drivers/usb/misc/iowarrior.c6
-rw-r--r--drivers/usb/misc/legousbtower.c6
-rw-r--r--drivers/usb/misc/sisusbvga/Kconfig2
-rw-r--r--drivers/usb/misc/usbtest.c39
-rw-r--r--drivers/usb/mon/mon_text.c2
-rw-r--r--drivers/usb/musb/Kconfig2
-rw-r--r--drivers/usb/musb/blackfin.c11
-rw-r--r--drivers/usb/musb/cppi_dma.c34
-rw-r--r--drivers/usb/musb/cppi_dma.h6
-rw-r--r--drivers/usb/musb/davinci.c54
-rw-r--r--drivers/usb/musb/musb_core.c228
-rw-r--r--drivers/usb/musb/musb_core.h22
-rw-r--r--drivers/usb/musb/musb_gadget.c45
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c45
-rw-r--r--drivers/usb/musb/musb_host.c273
-rw-r--r--drivers/usb/musb/musb_host.h1
-rw-r--r--drivers/usb/musb/musb_virthub.c35
-rw-r--r--drivers/usb/musb/omap2430.c71
-rw-r--r--drivers/usb/musb/tusb6010.c70
-rw-r--r--drivers/usb/otg/Kconfig14
-rw-r--r--drivers/usb/otg/Makefile1
-rw-r--r--drivers/usb/otg/langwell_otg.c1915
-rw-r--r--drivers/usb/otg/nop-usb-xceiv.c25
-rw-r--r--drivers/usb/otg/twl4030-usb.c28
-rw-r--r--drivers/usb/serial/aircable.c5
-rw-r--r--drivers/usb/serial/belkin_sa.c13
-rw-r--r--drivers/usb/serial/bus.c27
-rw-r--r--drivers/usb/serial/ch341.c46
-rw-r--r--drivers/usb/serial/console.c6
-rw-r--r--drivers/usb/serial/cp210x.c253
-rw-r--r--drivers/usb/serial/cyberjack.c26
-rw-r--r--drivers/usb/serial/cypress_m8.c92
-rw-r--r--drivers/usb/serial/digi_acceleport.c95
-rw-r--r--drivers/usb/serial/empeg.c14
-rw-r--r--drivers/usb/serial/ftdi_sio.c337
-rw-r--r--drivers/usb/serial/ftdi_sio.h13
-rw-r--r--drivers/usb/serial/garmin_gps.c217
-rw-r--r--drivers/usb/serial/generic.c189
-rw-r--r--drivers/usb/serial/io_edgeport.c39
-rw-r--r--drivers/usb/serial/io_tables.h12
-rw-r--r--drivers/usb/serial/io_ti.c27
-rw-r--r--drivers/usb/serial/ipaq.c13
-rw-r--r--drivers/usb/serial/ipw.c18
-rw-r--r--drivers/usb/serial/ir-usb.c6
-rw-r--r--drivers/usb/serial/iuu_phoenix.c108
-rw-r--r--drivers/usb/serial/keyspan.c26
-rw-r--r--drivers/usb/serial/keyspan.h20
-rw-r--r--drivers/usb/serial/keyspan_pda.c52
-rw-r--r--drivers/usb/serial/kl5kusb105.c45
-rw-r--r--drivers/usb/serial/kobil_sct.c19
-rw-r--r--drivers/usb/serial/mct_u232.c50
-rw-r--r--drivers/usb/serial/mos7720.c12
-rw-r--r--drivers/usb/serial/mos7840.c406
-rw-r--r--drivers/usb/serial/navman.c3
-rw-r--r--drivers/usb/serial/omninet.c25
-rw-r--r--drivers/usb/serial/opticon.c17
-rw-r--r--drivers/usb/serial/option.c109
-rw-r--r--drivers/usb/serial/oti6858.c64
-rw-r--r--drivers/usb/serial/pl2303.c89
-rw-r--r--drivers/usb/serial/sierra.c528
-rw-r--r--drivers/usb/serial/spcp8x5.c90
-rw-r--r--drivers/usb/serial/symbolserial.c17
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c16
-rw-r--r--drivers/usb/serial/usb-serial.c198
-rw-r--r--drivers/usb/serial/usb_debug.c41
-rw-r--r--drivers/usb/serial/visor.c19
-rw-r--r--drivers/usb/serial/whiteheat.c39
-rw-r--r--drivers/usb/storage/initializers.c14
-rw-r--r--drivers/usb/storage/option_ms.c124
-rw-r--r--drivers/usb/storage/scsiglue.c4
-rw-r--r--drivers/usb/storage/sierra_ms.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h2
-rw-r--r--drivers/video/Kconfig15
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/acornfb.c38
-rw-r--r--drivers/video/amba-clcd.c4
-rw-r--r--drivers/video/atmel_lcdfb.c12
-rw-r--r--drivers/video/aty/aty128fb.c2
-rw-r--r--drivers/video/aty/radeon_pm.c3
-rw-r--r--drivers/video/bf54x-lq043fb.c15
-rw-r--r--drivers/video/bfin-t350mcqb-fb.c15
-rw-r--r--drivers/video/bw2.c20
-rw-r--r--drivers/video/carminefb.c2
-rw-r--r--drivers/video/cg14.c19
-rw-r--r--drivers/video/cg3.c20
-rw-r--r--drivers/video/cg6.c25
-rw-r--r--drivers/video/chipsfb.c1
-rw-r--r--drivers/video/console/vgacon.c5
-rw-r--r--drivers/video/cyber2000fb.c9
-rw-r--r--drivers/video/efifb.c5
-rw-r--r--drivers/video/fbmem.c31
-rw-r--r--drivers/video/hitfb.c4
-rw-r--r--drivers/video/igafb.c8
-rw-r--r--drivers/video/intelfb/intelfbdrv.c5
-rw-r--r--drivers/video/leo.c14
-rw-r--r--drivers/video/logo/Makefile12
-rw-r--r--drivers/video/logo/logo.c15
-rw-r--r--drivers/video/mb862xx/mb862xxfb.c2
-rw-r--r--drivers/video/modedb.c8
-rw-r--r--drivers/video/mx3fb.c4
-rw-r--r--drivers/video/offb.c8
-rw-r--r--drivers/video/omap/dispc.c14
-rw-r--r--drivers/video/omap/hwa742.c26
-rw-r--r--drivers/video/omap/rfbi.c8
-rw-r--r--drivers/video/p9100.c20
-rw-r--r--drivers/video/pm2fb.c2
-rw-r--r--drivers/video/ps3fb.c272
-rw-r--r--drivers/video/pxa168fb.c803
-rw-r--r--drivers/video/pxa168fb.h558
-rw-r--r--drivers/video/s1d13xxxfb.c341
-rw-r--r--drivers/video/s3c-fb.c65
-rw-r--r--drivers/video/s3c2410fb.c67
-rw-r--r--drivers/video/s3c2410fb.h5
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c7
-rw-r--r--drivers/video/sis/sis_main.c4
-rw-r--r--drivers/video/stifb.c2
-rw-r--r--drivers/video/tcx.c27
-rw-r--r--drivers/video/tdfxfb.c1
-rw-r--r--drivers/video/uvesafb.c10
-rw-r--r--drivers/video/vesafb.c15
-rw-r--r--drivers/video/xen-fbfront.c8
-rw-r--r--drivers/video/xilinxfb.c290
-rw-r--r--drivers/virtio/virtio.c29
-rw-r--r--drivers/virtio/virtio_balloon.c27
-rw-r--r--drivers/virtio/virtio_pci.c307
-rw-r--r--drivers/virtio/virtio_ring.c102
-rw-r--r--drivers/vlynq/Kconfig20
-rw-r--r--drivers/vlynq/Makefile5
-rw-r--r--drivers/vlynq/vlynq.c814
-rw-r--r--drivers/w1/Kconfig2
-rw-r--r--drivers/w1/masters/Kconfig6
-rw-r--r--drivers/watchdog/Kconfig12
-rw-r--r--drivers/watchdog/Makefile2
-rw-r--r--drivers/watchdog/iop_wdt.c2
-rw-r--r--drivers/watchdog/orion_wdt.c (renamed from drivers/watchdog/orion5x_wdt.c)120
-rw-r--r--drivers/xen/Kconfig20
-rw-r--r--drivers/xen/Makefile4
-rw-r--r--drivers/xen/events.c20
-rw-r--r--drivers/xen/evtchn.c507
-rw-r--r--drivers/xen/manage.c25
-rw-r--r--drivers/xen/sys-hypervisor.c445
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c61
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c2
-rw-r--r--drivers/xen/xenfs/super.c19
2050 files changed, 233193 insertions, 65973 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 00cf9553f74..a442c8f29fc 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -104,6 +104,8 @@ source "drivers/auxdisplay/Kconfig"
source "drivers/uio/Kconfig"
+source "drivers/vlynq/Kconfig"
+
source "drivers/xen/Kconfig"
source "drivers/staging/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 1266ead6ace..00b44f4ccf0 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -105,5 +105,7 @@ obj-$(CONFIG_PPC_PS3) += ps3/
obj-$(CONFIG_OF) += of/
obj-$(CONFIG_SSB) += ssb/
obj-$(CONFIG_VIRTIO) += virtio/
+obj-$(CONFIG_VLYNQ) += vlynq/
obj-$(CONFIG_STAGING) += staging/
obj-y += platform/
+obj-y += ieee802154/
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 17e50824a6f..72ac28da14e 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -5,40 +5,43 @@
ccflags-y := -Os
ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
-obj-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \
+# use acpi.o to put all files here into acpi.o modparam namespace
+obj-y += acpi.o
+
+acpi-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \
dsmethod.o dsobject.o dsutils.o dswload.o dswstate.o \
dsinit.o
-obj-y += evevent.o evregion.o evsci.o evxfevnt.o \
+acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \
evmisc.o evrgnini.o evxface.o evxfregn.o \
evgpe.o evgpeblk.o
-obj-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\
+acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\
exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\
excreate.o exmisc.o exoparg2.o exregion.o exstore.o exutils.o \
exdump.o exmutex.o exoparg3.o exresnte.o exstoren.o
-obj-y += hwacpi.o hwgpe.o hwregs.o hwsleep.o hwxface.o hwvalid.o
+acpi-y += hwacpi.o hwgpe.o hwregs.o hwsleep.o hwxface.o hwvalid.o
-obj-$(ACPI_FUTURE_USAGE) += hwtimer.o
+acpi-$(ACPI_FUTURE_USAGE) += hwtimer.o
-obj-y += nsaccess.o nsload.o nssearch.o nsxfeval.o \
+acpi-y += nsaccess.o nsload.o nssearch.o nsxfeval.o \
nsalloc.o nseval.o nsnames.o nsutils.o nsxfname.o \
nsdump.o nsinit.o nsobject.o nswalk.o nsxfobj.o \
nsparse.o nspredef.o
-obj-$(ACPI_FUTURE_USAGE) += nsdumpdv.o
+acpi-$(ACPI_FUTURE_USAGE) += nsdumpdv.o
-obj-y += psargs.o psparse.o psloop.o pstree.o pswalk.o \
+acpi-y += psargs.o psparse.o psloop.o pstree.o pswalk.o \
psopcode.o psscope.o psutils.o psxface.o
-obj-y += rsaddr.o rscreate.o rsinfo.o rsio.o rslist.o rsmisc.o rsxface.o \
+acpi-y += rsaddr.o rscreate.o rsinfo.o rsio.o rslist.o rsmisc.o rsxface.o \
rscalc.o rsirq.o rsmemory.o rsutils.o
-obj-$(ACPI_FUTURE_USAGE) += rsdump.o
+acpi-$(ACPI_FUTURE_USAGE) += rsdump.o
-obj-y += tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o tbxfroot.o
+acpi-y += tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o tbxfroot.o
-obj-y += utalloc.o utdebug.o uteval.o utinit.o utmisc.o utxface.o \
+acpi-y += utalloc.o utdebug.o uteval.o utinit.o utmisc.o utxface.o \
utcopy.o utdelete.o utglobal.o utmath.o utobject.o \
utstate.o utmutex.o utobject.o utresrc.o utlock.o
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 07e20135f01..0bba148a2c6 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -139,7 +139,7 @@ acpi_status acpi_ev_initialize_op_regions(void);
acpi_status
acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
u32 function,
- acpi_physical_address address,
+ u32 region_offset,
u32 bit_width, acpi_integer * value);
acpi_status
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 16e5210ae93..3d87362d17e 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -362,9 +362,6 @@ extern u8 acpi_gbl_method_executing;
extern u8 acpi_gbl_abort_method;
extern u8 acpi_gbl_db_terminate_threads;
-ACPI_EXTERN int optind;
-ACPI_EXTERN char *optarg;
-
ACPI_EXTERN u8 acpi_gbl_db_opt_tables;
ACPI_EXTERN u8 acpi_gbl_db_opt_stats;
ACPI_EXTERN u8 acpi_gbl_db_opt_ini_methods;
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 772ee5c4ccc..ee986edfa0d 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -205,6 +205,7 @@ struct acpi_namespace_node {
#define ANOBJ_METHOD_LOCAL 0x08 /* Node is a method local */
#define ANOBJ_SUBTREE_HAS_INI 0x10 /* Used to optimize device initialization */
#define ANOBJ_EVALUATED 0x20 /* Set on first evaluation of node */
+#define ANOBJ_ALLOCATED_BUFFER 0x40 /* Method AML buffer is dynamic (install_method) */
#define ANOBJ_IS_EXTERNAL 0x08 /* i_aSL only: This object created via External() */
#define ANOBJ_METHOD_NO_RETVAL 0x10 /* i_aSL only: Method has no return value */
@@ -787,7 +788,15 @@ struct acpi_bit_register_info {
/* For control registers, both ignored and reserved bits must be preserved */
-#define ACPI_PM1_CONTROL_IGNORED_BITS 0x0201 /* Bits 9, 0(SCI_EN) */
+/*
+ * For PM1 control, the SCI enable bit (bit 0, SCI_EN) is defined by the
+ * ACPI specification to be a "preserved" bit - "OSPM always preserves this
+ * bit position", section 4.7.3.2.1. However, on some machines the OS must
+ * write a one to this bit after resume for the machine to work properly.
+ * To enable this, we no longer attempt to preserve this bit. No machines
+ * are known to fail if the bit is not preserved. (May 2009)
+ */
+#define ACPI_PM1_CONTROL_IGNORED_BITS 0x0200 /* Bit 9 */
#define ACPI_PM1_CONTROL_RESERVED_BITS 0xC1F8 /* Bits 14-15, 3-8 */
#define ACPI_PM1_CONTROL_PRESERVED_BITS \
(ACPI_PM1_CONTROL_IGNORED_BITS | ACPI_PM1_CONTROL_RESERVED_BITS)
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 46cb5b46d28..94cdc2b8cb9 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -99,10 +99,19 @@ acpi_ns_walk_namespace(acpi_object_type type,
acpi_walk_callback user_function,
void *context, void **return_value);
-struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type, struct acpi_namespace_node
- *parent, struct acpi_namespace_node
+struct acpi_namespace_node *acpi_ns_get_next_node(struct acpi_namespace_node
+ *parent,
+ struct acpi_namespace_node
*child);
+struct acpi_namespace_node *acpi_ns_get_next_node_typed(acpi_object_type type,
+ struct
+ acpi_namespace_node
+ *parent,
+ struct
+ acpi_namespace_node
+ *child);
+
/*
* nsparse - table parsing
*/
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index ff851c5df69..067f967eb38 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -483,7 +483,7 @@ typedef enum {
#define AML_METHOD_ARG_COUNT 0x07
#define AML_METHOD_SERIALIZED 0x08
-#define AML_METHOD_SYNCH_LEVEL 0xF0
+#define AML_METHOD_SYNC_LEVEL 0xF0
/* METHOD_FLAGS_ARG_COUNT is not used internally, define additional flags */
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index dab3f48f0b4..02e6caad4a7 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -734,7 +734,8 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
/* Local ID (0-7) is (AML opcode - base AML_LOCAL_OP) */
- obj_desc->reference.value = opcode - AML_LOCAL_OP;
+ obj_desc->reference.value =
+ ((u32)opcode) - AML_LOCAL_OP;
obj_desc->reference.class = ACPI_REFCLASS_LOCAL;
#ifndef ACPI_NO_METHOD_EXECUTION
@@ -754,7 +755,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
/* Arg ID (0-6) is (AML opcode - base AML_ARG_OP) */
- obj_desc->reference.value = opcode - AML_ARG_OP;
+ obj_desc->reference.value = ((u32)opcode) - AML_ARG_OP;
obj_desc->reference.class = ACPI_REFCLASS_ARG;
#ifndef ACPI_NO_METHOD_EXECUTION
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index b4c87b5053e..584d766e6f1 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -1386,14 +1386,19 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
case AML_BREAK_POINT_OP:
- /* Call up to the OS service layer to handle this */
-
- status =
- acpi_os_signal(ACPI_SIGNAL_BREAKPOINT,
- "Executed AML Breakpoint opcode");
+ /*
+ * Set the single-step flag. This will cause the debugger (if present)
+ * to break to the console within the AML debugger at the start of the
+ * next AML instruction.
+ */
+ ACPI_DEBUGGER_EXEC(acpi_gbl_cm_single_step = TRUE);
+ ACPI_DEBUGGER_EXEC(acpi_os_printf
+ ("**break** Executed AML BreakPoint opcode\n"));
- /* If and when it returns, all done. */
+ /* Call to the OSL in case OS wants a piece of the action */
+ status = acpi_os_signal(ACPI_SIGNAL_BREAKPOINT,
+ "Executed AML Breakpoint opcode");
break;
case AML_BREAK_OP:
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index 40f92bf7dce..e46c821cf57 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -102,7 +102,7 @@ acpi_ds_result_pop(union acpi_operand_object **object,
/* Return object of the top element and clean that top element result stack */
walk_state->result_count--;
- index = walk_state->result_count % ACPI_RESULTS_FRAME_OBJ_NUM;
+ index = (u32)walk_state->result_count % ACPI_RESULTS_FRAME_OBJ_NUM;
*object = state->results.obj_desc[index];
if (!*object) {
@@ -186,7 +186,7 @@ acpi_ds_result_push(union acpi_operand_object * object,
/* Assign the address of object to the top free element of result stack */
- index = walk_state->result_count % ACPI_RESULTS_FRAME_OBJ_NUM;
+ index = (u32)walk_state->result_count % ACPI_RESULTS_FRAME_OBJ_NUM;
state->results.obj_desc[index] = object;
walk_state->result_count++;
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 538d6326455..98c7f9c6265 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -275,7 +275,7 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
*
* PARAMETERS: region_obj - Internal region object
* Function - Read or Write operation
- * Address - Where in the space to read or write
+ * region_offset - Where in the region to read or write
* bit_width - Field width in bits (8, 16, 32, or 64)
* Value - Pointer to in or out value, must be
* full 64-bit acpi_integer
@@ -290,7 +290,7 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
acpi_status
acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
u32 function,
- acpi_physical_address address,
+ u32 region_offset,
u32 bit_width, acpi_integer * value)
{
acpi_status status;
@@ -396,7 +396,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
"Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
&region_obj->region.handler->address_space, handler,
- ACPI_FORMAT_NATIVE_UINT(address),
+ ACPI_FORMAT_NATIVE_UINT(region_obj->region.address +
+ region_offset),
acpi_ut_get_region_name(region_obj->region.
space_id)));
@@ -412,8 +413,9 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
/* Call the handler */
- status = handler(function, address, bit_width, value,
- handler_desc->address_space.context,
+ status = handler(function,
+ (region_obj->region.address + region_offset),
+ bit_width, value, handler_desc->address_space.context,
region_obj2->extra.region_context);
if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index d0a080747ec..4721f58fe42 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -51,7 +51,7 @@
ACPI_MODULE_NAME("evxfevnt")
/* Local prototypes */
-acpi_status
+static acpi_status
acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block, void *context);
@@ -785,7 +785,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_gpe_device)
* block device. NULL if the GPE is one of the FADT-defined GPEs.
*
******************************************************************************/
-acpi_status
+static acpi_status
acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block, void *context)
{
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 3deb20a126b..277fd609611 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -47,6 +47,7 @@
#include "acnamesp.h"
#include "actables.h"
#include "acdispat.h"
+#include "acevents.h"
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("exconfig")
@@ -57,6 +58,10 @@ acpi_ex_add_table(u32 table_index,
struct acpi_namespace_node *parent_node,
union acpi_operand_object **ddb_handle);
+static acpi_status
+acpi_ex_region_read(union acpi_operand_object *obj_desc,
+ u32 length, u8 *buffer);
+
/*******************************************************************************
*
* FUNCTION: acpi_ex_add_table
@@ -91,6 +96,7 @@ acpi_ex_add_table(u32 table_index,
/* Init the table handle */
+ obj_desc->common.flags |= AOPOBJ_DATA_VALID;
obj_desc->reference.class = ACPI_REFCLASS_TABLE;
*ddb_handle = obj_desc;
@@ -229,6 +235,8 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
walk_state);
if (ACPI_FAILURE(status)) {
(void)acpi_ex_unload_table(ddb_handle);
+
+ acpi_ut_remove_reference(ddb_handle);
return_ACPI_STATUS(status);
}
}
@@ -254,6 +262,47 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
/*******************************************************************************
*
+ * FUNCTION: acpi_ex_region_read
+ *
+ * PARAMETERS: obj_desc - Region descriptor
+ * Length - Number of bytes to read
+ * Buffer - Pointer to where to put the data
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Read data from an operation region. The read starts from the
+ * beginning of the region.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ex_region_read(union acpi_operand_object *obj_desc, u32 length, u8 *buffer)
+{
+ acpi_status status;
+ acpi_integer value;
+ u32 region_offset = 0;
+ u32 i;
+
+ /* Bytewise reads */
+
+ for (i = 0; i < length; i++) {
+ status = acpi_ev_address_space_dispatch(obj_desc, ACPI_READ,
+ region_offset, 8,
+ &value);
+ if (ACPI_FAILURE(status)) {
+ return status;
+ }
+
+ *buffer = (u8)value;
+ buffer++;
+ region_offset++;
+ }
+
+ return AE_OK;
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ex_load_op
*
* PARAMETERS: obj_desc - Region or Buffer/Field where the table will be
@@ -314,18 +363,23 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
}
}
- /*
- * Map the table header and get the actual table length. The region
- * length is not guaranteed to be the same as the table length.
- */
- table = acpi_os_map_memory(obj_desc->region.address,
- sizeof(struct acpi_table_header));
+ /* Get the table header first so we can get the table length */
+
+ table = ACPI_ALLOCATE(sizeof(struct acpi_table_header));
if (!table) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
+ status =
+ acpi_ex_region_read(obj_desc,
+ sizeof(struct acpi_table_header),
+ ACPI_CAST_PTR(u8, table));
length = table->length;
- acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
+ ACPI_FREE(table);
+
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
/* Must have at least an ACPI table header */
@@ -334,10 +388,19 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
}
/*
- * The memory region is not guaranteed to remain stable and we must
- * copy the table to a local buffer. For example, the memory region
- * is corrupted after suspend on some machines. Dynamically loaded
- * tables are usually small, so this overhead is minimal.
+ * The original implementation simply mapped the table, with no copy.
+ * However, the memory region is not guaranteed to remain stable and
+ * we must copy the table to a local buffer. For example, the memory
+ * region is corrupted after suspend on some machines. Dynamically
+ * loaded tables are usually small, so this overhead is minimal.
+ *
+ * The latest implementation (5/2009) does not use a mapping at all.
+ * We use the low-level operation region interface to read the table
+ * instead of the obvious optimization of using a direct mapping.
+ * This maintains a consistent use of operation regions across the
+ * entire subsystem. This is important if additional processing must
+ * be performed in the (possibly user-installed) operation region
+ * handler. For example, acpi_exec and ASLTS depend on this.
*/
/* Allocate a buffer for the table */
@@ -347,17 +410,16 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(AE_NO_MEMORY);
}
- /* Map the entire table and copy it */
+ /* Read the entire table */
- table = acpi_os_map_memory(obj_desc->region.address, length);
- if (!table) {
+ status = acpi_ex_region_read(obj_desc, length,
+ ACPI_CAST_PTR(u8,
+ table_desc.pointer));
+ if (ACPI_FAILURE(status)) {
ACPI_FREE(table_desc.pointer);
- return_ACPI_STATUS(AE_NO_MEMORY);
+ return_ACPI_STATUS(status);
}
- ACPI_MEMCPY(table_desc.pointer, table, length);
- acpi_os_unmap_memory(table, length);
-
table_desc.address = obj_desc->region.address;
break;
@@ -454,6 +516,10 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(status);
}
+ /* Remove the reference by added by acpi_ex_store above */
+
+ acpi_ut_remove_reference(ddb_handle);
+
/* Invoke table handler if present */
if (acpi_gbl_table_handler) {
@@ -495,13 +561,18 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
/*
* Validate the handle
- * Although the handle is partially validated in acpi_ex_reconfiguration(),
+ * Although the handle is partially validated in acpi_ex_reconfiguration()
* when it calls acpi_ex_resolve_operands(), the handle is more completely
* validated here.
+ *
+ * Handle must be a valid operand object of type reference. Also, the
+ * ddb_handle must still be marked valid (table has not been previously
+ * unloaded)
*/
if ((!ddb_handle) ||
(ACPI_GET_DESCRIPTOR_TYPE(ddb_handle) != ACPI_DESC_TYPE_OPERAND) ||
- (ddb_handle->common.type != ACPI_TYPE_LOCAL_REFERENCE)) {
+ (ddb_handle->common.type != ACPI_TYPE_LOCAL_REFERENCE) ||
+ (!(ddb_handle->common.flags & AOPOBJ_DATA_VALID))) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
@@ -509,6 +580,12 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
table_index = table_desc->reference.value;
+ /* Ensure the table is still loaded */
+
+ if (!acpi_tb_is_table_loaded(table_index)) {
+ return_ACPI_STATUS(AE_NOT_EXIST);
+ }
+
/* Invoke table handler if present */
if (acpi_gbl_table_handler) {
@@ -530,8 +607,10 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
(void)acpi_tb_release_owner_id(table_index);
acpi_tb_set_table_loaded_flag(table_index, FALSE);
- /* Table unloaded, remove a reference to the ddb_handle object */
-
- acpi_ut_remove_reference(ddb_handle);
+ /*
+ * Invalidate the handle. We do this because the handle may be stored
+ * in a named object and may not be actually deleted until much later.
+ */
+ ddb_handle->common.flags &= ~AOPOBJ_DATA_VALID;
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index a57ad2564ab..02b25d233d9 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -502,7 +502,7 @@ acpi_ex_create_method(u8 * aml_start,
* ACPI 2.0: sync_level = sync_level in method declaration
*/
obj_desc->method.sync_level = (u8)
- ((method_flags & AML_METHOD_SYNCH_LEVEL) >> 4);
+ ((method_flags & AML_METHOD_SYNC_LEVEL) >> 4);
}
/* Attach the new object to the method Node */
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 89d141fdae0..ec524614e70 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -120,9 +120,11 @@ static struct acpi_exdump_info acpi_ex_dump_event[2] = {
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(event.os_semaphore), "OsSemaphore"}
};
-static struct acpi_exdump_info acpi_ex_dump_method[8] = {
+static struct acpi_exdump_info acpi_ex_dump_method[9] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_method), NULL},
- {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.param_count), "ParamCount"},
+ {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.method_flags), "Method Flags"},
+ {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.param_count),
+ "Parameter Count"},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.sync_level), "Sync Level"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(method.mutex), "Mutex"},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.owner_id), "Owner Id"},
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index 99cee61e655..d4075b82102 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -222,7 +222,7 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
{
acpi_status status;
union acpi_operand_object *rgn_desc;
- acpi_physical_address address;
+ u32 region_offset;
ACPI_FUNCTION_TRACE(ex_access_region);
@@ -243,7 +243,7 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
* 3) The current offset into the field
*/
rgn_desc = obj_desc->common_field.region_obj;
- address = rgn_desc->region.address +
+ region_offset =
obj_desc->common_field.base_byte_offset + field_datum_byte_offset;
if ((function & ACPI_IO_MASK) == ACPI_READ) {
@@ -260,16 +260,18 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
obj_desc->common_field.access_byte_width,
obj_desc->common_field.base_byte_offset,
field_datum_byte_offset, ACPI_CAST_PTR(void,
- address)));
+ (rgn_desc->
+ region.
+ address +
+ region_offset))));
/* Invoke the appropriate address_space/op_region handler */
- status = acpi_ev_address_space_dispatch(rgn_desc, function,
- address,
- ACPI_MUL_8(obj_desc->
- common_field.
- access_byte_width),
- value);
+ status =
+ acpi_ev_address_space_dispatch(rgn_desc, function, region_offset,
+ ACPI_MUL_8(obj_desc->common_field.
+ access_byte_width),
+ value);
if (ACPI_FAILURE(status)) {
if (status == AE_NOT_IMPLEMENTED) {
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index d301c1f363e..2f0114202b0 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -83,6 +83,15 @@ void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc)
if (obj_desc->mutex.prev) {
(obj_desc->mutex.prev)->mutex.next = obj_desc->mutex.next;
+
+ /*
+ * Migrate the previous sync level associated with this mutex to the
+ * previous mutex on the list so that it may be preserved. This handles
+ * the case where several mutexes have been acquired at the same level,
+ * but are not released in opposite order.
+ */
+ (obj_desc->mutex.prev)->mutex.original_sync_level =
+ obj_desc->mutex.original_sync_level;
} else {
thread->acquired_mutex_list = obj_desc->mutex.next;
}
@@ -349,6 +358,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
struct acpi_walk_state *walk_state)
{
acpi_status status = AE_OK;
+ u8 previous_sync_level;
ACPI_FUNCTION_TRACE(ex_release_mutex);
@@ -373,11 +383,12 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
walk_state->thread->thread_id)
&& (obj_desc != acpi_gbl_global_lock_mutex)) {
ACPI_ERROR((AE_INFO,
- "Thread %lX cannot release Mutex [%4.4s] acquired by thread %lX",
- (unsigned long)walk_state->thread->thread_id,
+ "Thread %p cannot release Mutex [%4.4s] acquired by thread %p",
+ ACPI_CAST_PTR(void, walk_state->thread->thread_id),
acpi_ut_get_node_name(obj_desc->mutex.node),
- (unsigned long)obj_desc->mutex.owner_thread->
- thread_id));
+ ACPI_CAST_PTR(void,
+ obj_desc->mutex.owner_thread->
+ thread_id)));
return_ACPI_STATUS(AE_AML_NOT_OWNER);
}
@@ -391,10 +402,14 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
}
/*
- * The sync level of the mutex must be less than or equal to the current
- * sync level
+ * The sync level of the mutex must be equal to the current sync level. In
+ * other words, the current level means that at least one mutex at that
+ * level is currently being held. Attempting to release a mutex of a
+ * different level can only mean that the mutex ordering rule is being
+ * violated. This behavior is clarified in ACPI 4.0 specification.
*/
- if (obj_desc->mutex.sync_level > walk_state->thread->current_sync_level) {
+ if (obj_desc->mutex.sync_level !=
+ walk_state->thread->current_sync_level) {
ACPI_ERROR((AE_INFO,
"Cannot release Mutex [%4.4s], SyncLevel mismatch: mutex %d current %d",
acpi_ut_get_node_name(obj_desc->mutex.node),
@@ -403,14 +418,24 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
}
+ /*
+ * Get the previous sync_level from the head of the acquired mutex list.
+ * This handles the case where several mutexes at the same level have been
+ * acquired, but are not released in reverse order.
+ */
+ previous_sync_level =
+ walk_state->thread->acquired_mutex_list->mutex.original_sync_level;
+
status = acpi_ex_release_mutex_object(obj_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
if (obj_desc->mutex.acquisition_depth == 0) {
- /* Restore the original sync_level */
+ /* Restore the previous sync_level */
- walk_state->thread->current_sync_level =
- obj_desc->mutex.original_sync_level;
+ walk_state->thread->current_sync_level = previous_sync_level;
}
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index 90d606196c9..6efd07a4f77 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -193,10 +193,12 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
case ACPI_REFCLASS_TABLE:
+ /* Case for ddb_handle */
+
ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
"Table Index 0x%X\n",
source_desc->reference.value));
- break;
+ return;
default:
break;
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 7b2fb602b5c..23d5505cb1f 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -81,9 +81,9 @@ acpi_status acpi_hw_clear_acpi_status(void)
ACPI_FUNCTION_TRACE(hw_clear_acpi_status);
- ACPI_DEBUG_PRINT((ACPI_DB_IO, "About to write %04X to %0llX\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_IO, "About to write %04X to %8.8X%8.8X\n",
ACPI_BITMASK_ALL_FIXED_STATUS,
- acpi_gbl_xpm1a_status.address));
+ ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address)));
lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index aceb9311196..efc971ab7d6 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -334,9 +334,7 @@ void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node)
/* Get the next node in this scope (NULL if none) */
- child_node =
- acpi_ns_get_next_node(ACPI_TYPE_ANY, parent_node,
- child_node);
+ child_node = acpi_ns_get_next_node(parent_node, child_node);
if (child_node) {
/* Found a child node - detach any attached object */
@@ -345,8 +343,7 @@ void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node)
/* Check if this node has any children */
- if (acpi_ns_get_next_node
- (ACPI_TYPE_ANY, child_node, NULL)) {
+ if (child_node->child) {
/*
* There is at least one child of this node,
* visit the node
@@ -432,9 +429,7 @@ void acpi_ns_delete_namespace_by_owner(acpi_owner_id owner_id)
* Get the next child of this parent node. When child_node is NULL,
* the first child of the parent is returned
*/
- child_node =
- acpi_ns_get_next_node(ACPI_TYPE_ANY, parent_node,
- child_node);
+ child_node = acpi_ns_get_next_node(parent_node, child_node);
if (deletion_node) {
acpi_ns_delete_children(deletion_node);
@@ -452,8 +447,7 @@ void acpi_ns_delete_namespace_by_owner(acpi_owner_id owner_id)
/* Check if this node has any children */
- if (acpi_ns_get_next_node
- (ACPI_TYPE_ANY, child_node, NULL)) {
+ if (child_node->child) {
/*
* There is at least one child of this node,
* visit the node
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index ae3dc10a7e8..af8e6bcee07 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -149,7 +149,7 @@ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
name_buffer = ACPI_ALLOCATE_ZEROED(size);
if (!name_buffer) {
- ACPI_ERROR((AE_INFO, "Allocation failure"));
+ ACPI_ERROR((AE_INFO, "Could not allocate %u bytes", (u32)size));
return_PTR(NULL);
}
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
index 3eb20bfda9d..60f3af08d28 100644
--- a/drivers/acpi/acpica/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -213,6 +213,15 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node)
return_VOID;
}
+ if (node->flags & ANOBJ_ALLOCATED_BUFFER) {
+
+ /* Free the dynamic aml buffer */
+
+ if (obj_desc->common.type == ACPI_TYPE_METHOD) {
+ ACPI_FREE(obj_desc->method.aml_start);
+ }
+ }
+
/* Clear the entry in all cases */
node->object = NULL;
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index d9e8cbc6e67..7f8e066b12a 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -144,7 +144,7 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
pathname = acpi_ns_get_external_pathname(node);
if (!pathname) {
- pathname = ACPI_CAST_PTR(char, predefined->info.name);
+ return AE_OK; /* Could not get pathname, ignore */
}
/*
@@ -230,10 +230,7 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
}
exit:
- if (pathname != predefined->info.name) {
- ACPI_FREE(pathname);
- }
-
+ ACPI_FREE(pathname);
return (status);
}
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index f9b4f51bf8f..7e865639a92 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -45,6 +45,10 @@
#include "accommon.h"
#include "acnamesp.h"
+#ifdef ACPI_ASL_COMPILER
+#include "amlcode.h"
+#endif
+
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nssearch")
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index 83e3aa6d4b9..35539df5c75 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -52,8 +52,7 @@ ACPI_MODULE_NAME("nswalk")
*
* FUNCTION: acpi_ns_get_next_node
*
- * PARAMETERS: Type - Type of node to be searched for
- * parent_node - Parent node whose children we are
+ * PARAMETERS: parent_node - Parent node whose children we are
* getting
* child_node - Previous child that was found.
* The NEXT child will be returned
@@ -66,27 +65,68 @@ ACPI_MODULE_NAME("nswalk")
* within Scope is returned.
*
******************************************************************************/
-struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type, struct acpi_namespace_node
- *parent_node, struct acpi_namespace_node
+struct acpi_namespace_node *acpi_ns_get_next_node(struct acpi_namespace_node
+ *parent_node,
+ struct acpi_namespace_node
*child_node)
{
- struct acpi_namespace_node *next_node = NULL;
-
ACPI_FUNCTION_ENTRY();
if (!child_node) {
/* It's really the parent's _scope_ that we want */
- next_node = parent_node->child;
+ return parent_node->child;
}
- else {
- /* Start search at the NEXT node */
-
- next_node = acpi_ns_get_next_valid_node(child_node);
+ /*
+ * Get the next node.
+ *
+ * If we are at the end of this peer list, return NULL
+ */
+ if (child_node->flags & ANOBJ_END_OF_PEER_LIST) {
+ return NULL;
}
+ /* Otherwise just return the next peer */
+
+ return child_node->peer;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_get_next_node_typed
+ *
+ * PARAMETERS: Type - Type of node to be searched for
+ * parent_node - Parent node whose children we are
+ * getting
+ * child_node - Previous child that was found.
+ * The NEXT child will be returned
+ *
+ * RETURN: struct acpi_namespace_node - Pointer to the NEXT child or NULL if
+ * none is found.
+ *
+ * DESCRIPTION: Return the next peer node within the namespace. If Handle
+ * is valid, Scope is ignored. Otherwise, the first node
+ * within Scope is returned.
+ *
+ ******************************************************************************/
+
+struct acpi_namespace_node *acpi_ns_get_next_node_typed(acpi_object_type type,
+ struct
+ acpi_namespace_node
+ *parent_node,
+ struct
+ acpi_namespace_node
+ *child_node)
+{
+ struct acpi_namespace_node *next_node = NULL;
+
+ ACPI_FUNCTION_ENTRY();
+
+ next_node = acpi_ns_get_next_node(parent_node, child_node);
+
+
/* If any type is OK, we are done */
if (type == ACPI_TYPE_ANY) {
@@ -186,9 +226,7 @@ acpi_ns_walk_namespace(acpi_object_type type,
/* Get the next node in this scope. Null if not found */
status = AE_OK;
- child_node =
- acpi_ns_get_next_node(ACPI_TYPE_ANY, parent_node,
- child_node);
+ child_node = acpi_ns_get_next_node(parent_node, child_node);
if (child_node) {
/* Found next child, get the type if we are not searching for ANY */
@@ -269,8 +307,7 @@ acpi_ns_walk_namespace(acpi_object_type type,
* function has specified that the maximum depth has been reached.
*/
if ((level < max_depth) && (status != AE_CTRL_DEPTH)) {
- if (acpi_ns_get_next_node
- (ACPI_TYPE_ANY, child_node, NULL)) {
+ if (child_node->child) {
/* There is at least one child of this node, visit it */
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 9589fea2499..f23593d6add 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -45,6 +45,8 @@
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
+#include "acparser.h"
+#include "amlcode.h"
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsxfname")
@@ -358,3 +360,151 @@ acpi_get_object_info(acpi_handle handle, struct acpi_buffer * buffer)
}
ACPI_EXPORT_SYMBOL(acpi_get_object_info)
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_install_method
+ *
+ * PARAMETERS: Buffer - An ACPI table containing one control method
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install a control method into the namespace. If the method
+ * name already exists in the namespace, it is overwritten. The
+ * input buffer must contain a valid DSDT or SSDT containing a
+ * single control method.
+ *
+ ******************************************************************************/
+acpi_status acpi_install_method(u8 *buffer)
+{
+ struct acpi_table_header *table =
+ ACPI_CAST_PTR(struct acpi_table_header, buffer);
+ u8 *aml_buffer;
+ u8 *aml_start;
+ char *path;
+ struct acpi_namespace_node *node;
+ union acpi_operand_object *method_obj;
+ struct acpi_parse_state parser_state;
+ u32 aml_length;
+ u16 opcode;
+ u8 method_flags;
+ acpi_status status;
+
+ /* Parameter validation */
+
+ if (!buffer) {
+ return AE_BAD_PARAMETER;
+ }
+
+ /* Table must be a DSDT or SSDT */
+
+ if (!ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT) &&
+ !ACPI_COMPARE_NAME(table->signature, ACPI_SIG_SSDT)) {
+ return AE_BAD_HEADER;
+ }
+
+ /* First AML opcode in the table must be a control method */
+
+ parser_state.aml = buffer + sizeof(struct acpi_table_header);
+ opcode = acpi_ps_peek_opcode(&parser_state);
+ if (opcode != AML_METHOD_OP) {
+ return AE_BAD_PARAMETER;
+ }
+
+ /* Extract method information from the raw AML */
+
+ parser_state.aml += acpi_ps_get_opcode_size(opcode);
+ parser_state.pkg_end = acpi_ps_get_next_package_end(&parser_state);
+ path = acpi_ps_get_next_namestring(&parser_state);
+ method_flags = *parser_state.aml++;
+ aml_start = parser_state.aml;
+ aml_length = ACPI_PTR_DIFF(parser_state.pkg_end, aml_start);
+
+ /*
+ * Allocate resources up-front. We don't want to have to delete a new
+ * node from the namespace if we cannot allocate memory.
+ */
+ aml_buffer = ACPI_ALLOCATE(aml_length);
+ if (!aml_buffer) {
+ return AE_NO_MEMORY;
+ }
+
+ method_obj = acpi_ut_create_internal_object(ACPI_TYPE_METHOD);
+ if (!method_obj) {
+ ACPI_FREE(aml_buffer);
+ return AE_NO_MEMORY;
+ }
+
+ /* Lock namespace for acpi_ns_lookup, we may be creating a new node */
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ goto error_exit;
+ }
+
+ /* The lookup either returns an existing node or creates a new one */
+
+ status =
+ acpi_ns_lookup(NULL, path, ACPI_TYPE_METHOD, ACPI_IMODE_LOAD_PASS1,
+ ACPI_NS_DONT_OPEN_SCOPE | ACPI_NS_ERROR_IF_FOUND,
+ NULL, &node);
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+
+ if (ACPI_FAILURE(status)) { /* ns_lookup */
+ if (status != AE_ALREADY_EXISTS) {
+ goto error_exit;
+ }
+
+ /* Node existed previously, make sure it is a method node */
+
+ if (node->type != ACPI_TYPE_METHOD) {
+ status = AE_TYPE;
+ goto error_exit;
+ }
+ }
+
+ /* Copy the method AML to the local buffer */
+
+ ACPI_MEMCPY(aml_buffer, aml_start, aml_length);
+
+ /* Initialize the method object with the new method's information */
+
+ method_obj->method.aml_start = aml_buffer;
+ method_obj->method.aml_length = aml_length;
+
+ method_obj->method.param_count = (u8)
+ (method_flags & AML_METHOD_ARG_COUNT);
+
+ method_obj->method.method_flags = (u8)
+ (method_flags & ~AML_METHOD_ARG_COUNT);
+
+ if (method_flags & AML_METHOD_SERIALIZED) {
+ method_obj->method.sync_level = (u8)
+ ((method_flags & AML_METHOD_SYNC_LEVEL) >> 4);
+ }
+
+ /*
+ * Now that it is complete, we can attach the new method object to
+ * the method Node (detaches/deletes any existing object)
+ */
+ status = acpi_ns_attach_object(node, method_obj, ACPI_TYPE_METHOD);
+
+ /*
+ * Flag indicates AML buffer is dynamic, must be deleted later.
+ * Must be set only after attach above.
+ */
+ node->flags |= ANOBJ_ALLOCATED_BUFFER;
+
+ /* Remove local reference to the method object */
+
+ acpi_ut_remove_reference(method_obj);
+ return status;
+
+error_exit:
+
+ ACPI_FREE(aml_buffer);
+ ACPI_FREE(method_obj);
+ return status;
+}
+ACPI_EXPORT_SYMBOL(acpi_install_method)
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index 1c7efc15225..4071bad4458 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -162,6 +162,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_type)
acpi_status acpi_get_parent(acpi_handle handle, acpi_handle * ret_handle)
{
struct acpi_namespace_node *node;
+ struct acpi_namespace_node *parent_node;
acpi_status status;
if (!ret_handle) {
@@ -189,12 +190,12 @@ acpi_status acpi_get_parent(acpi_handle handle, acpi_handle * ret_handle)
/* Get the parent entry */
- *ret_handle =
- acpi_ns_convert_entry_to_handle(acpi_ns_get_parent_node(node));
+ parent_node = acpi_ns_get_parent_node(node);
+ *ret_handle = acpi_ns_convert_entry_to_handle(parent_node);
/* Return exception if parent is null */
- if (!acpi_ns_get_parent_node(node)) {
+ if (!parent_node) {
status = AE_NULL_ENTRY;
}
@@ -268,7 +269,7 @@ acpi_get_next_object(acpi_object_type type,
/* Internal function does the real work */
- node = acpi_ns_get_next_node(type, parent_node, child_node);
+ node = acpi_ns_get_next_node_typed(type, parent_node, child_node);
if (!node) {
status = AE_NOT_FOUND;
goto unlock_and_exit;
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index 88b5a2c4814..3c4dcc3d106 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -547,7 +547,7 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
if (!package_element ||
(package_element->common.type != ACPI_TYPE_PACKAGE)) {
- return_ACPI_STATUS (AE_AML_OPERAND_TYPE);
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
/*
@@ -593,9 +593,6 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
} else {
temp_size_needed +=
acpi_ns_get_pathname_length((*sub_object_list)->reference.node);
- if (!temp_size_needed) {
- return_ACPI_STATUS(AE_BAD_PARAMETER);
- }
}
} else {
/*
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 69a2aa5b5d8..395212bcd19 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -338,13 +338,17 @@ acpi_resource_to_address64(struct acpi_resource *resource,
switch (resource->type) {
case ACPI_RESOURCE_TYPE_ADDRESS16:
- address16 = (struct acpi_resource_address16 *)&resource->data;
+ address16 =
+ ACPI_CAST_PTR(struct acpi_resource_address16,
+ &resource->data);
ACPI_COPY_ADDRESS(out, address16);
break;
case ACPI_RESOURCE_TYPE_ADDRESS32:
- address32 = (struct acpi_resource_address32 *)&resource->data;
+ address32 =
+ ACPI_CAST_PTR(struct acpi_resource_address32,
+ &resource->data);
ACPI_COPY_ADDRESS(out, address32);
break;
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 71e655d14cb..82b02dcb942 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -284,9 +284,9 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
if (length > sizeof(struct acpi_table_fadt)) {
ACPI_WARNING((AE_INFO,
"FADT (revision %u) is longer than ACPI 2.0 version, "
- "truncating length 0x%X to 0x%zX",
- table->revision, (unsigned)length,
- sizeof(struct acpi_table_fadt)));
+ "truncating length 0x%X to 0x%X",
+ table->revision, length,
+ (u32)sizeof(struct acpi_table_fadt)));
}
/* Clear the entire local FADT */
@@ -441,7 +441,7 @@ static void acpi_tb_convert_fadt(void)
&acpi_gbl_FADT,
fadt_info_table
[i].length),
- address32);
+ (u64) address32);
}
}
}
@@ -469,7 +469,6 @@ static void acpi_tb_convert_fadt(void)
static void acpi_tb_validate_fadt(void)
{
char *name;
- u32 *address32;
struct acpi_generic_address *address64;
u8 length;
u32 i;
@@ -505,15 +504,12 @@ static void acpi_tb_validate_fadt(void)
for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) {
/*
- * Generate pointers to the 32-bit and 64-bit addresses, get the
- * register length (width), and the register name
+ * Generate pointer to the 64-bit address, get the register
+ * length (width) and the register name
*/
address64 = ACPI_ADD_PTR(struct acpi_generic_address,
&acpi_gbl_FADT,
fadt_info_table[i].address64);
- address32 =
- ACPI_ADD_PTR(u32, &acpi_gbl_FADT,
- fadt_info_table[i].address32);
length =
*ACPI_ADD_PTR(u8, &acpi_gbl_FADT,
fadt_info_table[i].length);
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index f865d5a096d..63e82329a9e 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -472,7 +472,7 @@ acpi_status acpi_tb_delete_namespace_by_owner(u32 table_index)
* lock may block, and also since the execution of a namespace walk
* must be allowed to use the interpreter.
*/
- acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
+ (void)acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
status = acpi_ut_acquire_write_lock(&acpi_gbl_namespace_rw_lock);
acpi_ns_delete_namespace_by_owner(owner_id);
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 919624f123d..0f0c64bf8ac 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -676,6 +676,7 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
{
u16 reference_count;
union acpi_operand_object *next_object;
+ acpi_status status;
/* Save fields from destination that we don't want to overwrite */
@@ -768,6 +769,28 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
}
break;
+ /*
+ * For Mutex and Event objects, we cannot simply copy the underlying
+ * OS object. We must create a new one.
+ */
+ case ACPI_TYPE_MUTEX:
+
+ status = acpi_os_create_mutex(&dest_desc->mutex.os_mutex);
+ if (ACPI_FAILURE(status)) {
+ return status;
+ }
+ break;
+
+ case ACPI_TYPE_EVENT:
+
+ status = acpi_os_create_semaphore(ACPI_NO_UNIT_LIMIT, 0,
+ &dest_desc->event.
+ os_semaphore);
+ if (ACPI_FAILURE(status)) {
+ return status;
+ }
+ break;
+
default:
/* Nothing to do for other simple objects */
break;
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 38821f53042..527d729f681 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -179,9 +179,9 @@ acpi_debug_print(u32 requested_debug_level,
if (thread_id != acpi_gbl_prev_thread_id) {
if (ACPI_LV_THREADS & acpi_dbg_level) {
acpi_os_printf
- ("\n**** Context Switch from TID %lX to TID %lX ****\n\n",
- (unsigned long)acpi_gbl_prev_thread_id,
- (unsigned long)thread_id);
+ ("\n**** Context Switch from TID %p to TID %p ****\n\n",
+ ACPI_CAST_PTR(void, acpi_gbl_prev_thread_id),
+ ACPI_CAST_PTR(void, thread_id));
}
acpi_gbl_prev_thread_id = thread_id;
@@ -194,7 +194,7 @@ acpi_debug_print(u32 requested_debug_level,
acpi_os_printf("%8s-%04ld ", module_name, line_number);
if (ACPI_LV_THREADS & acpi_dbg_level) {
- acpi_os_printf("[%04lX] ", (unsigned long)thread_id);
+ acpi_os_printf("[%p] ", ACPI_CAST_PTR(void, thread_id));
}
acpi_os_printf("[%02ld] %-22.22s: ",
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index a5ee23bc4f5..bc171031508 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -75,6 +75,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
union acpi_operand_object *handler_desc;
union acpi_operand_object *second_desc;
union acpi_operand_object *next_desc;
+ union acpi_operand_object **last_obj_ptr;
ACPI_FUNCTION_TRACE_PTR(ut_delete_internal_obj, object);
@@ -223,6 +224,26 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
*/
handler_desc = object->region.handler;
if (handler_desc) {
+ next_desc =
+ handler_desc->address_space.region_list;
+ last_obj_ptr =
+ &handler_desc->address_space.region_list;
+
+ /* Remove the region object from the handler's list */
+
+ while (next_desc) {
+ if (next_desc == object) {
+ *last_obj_ptr =
+ next_desc->region.next;
+ break;
+ }
+
+ /* Walk the linked list of handler */
+
+ last_obj_ptr = &next_desc->region.next;
+ next_desc = next_desc->region.next;
+ }
+
if (handler_desc->address_space.handler_flags &
ACPI_ADDR_HANDLER_DEFAULT_INSTALLED) {
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index 1c9e250caef..fbe782348b0 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -1033,11 +1033,12 @@ acpi_error(const char *module_name, u32 line_number, const char *format, ...)
{
va_list args;
- acpi_os_printf("ACPI Error (%s-%04d): ", module_name, line_number);
+ acpi_os_printf("ACPI Error: ");
va_start(args, format);
acpi_os_vprintf(format, args);
- acpi_os_printf(" [%X]\n", ACPI_CA_VERSION);
+ acpi_os_printf(" %8.8X %s-%u\n", ACPI_CA_VERSION, module_name,
+ line_number);
va_end(args);
}
@@ -1047,12 +1048,12 @@ acpi_exception(const char *module_name,
{
va_list args;
- acpi_os_printf("ACPI Exception (%s-%04d): %s, ", module_name,
- line_number, acpi_format_exception(status));
+ acpi_os_printf("ACPI Exception: %s, ", acpi_format_exception(status));
va_start(args, format);
acpi_os_vprintf(format, args);
- acpi_os_printf(" [%X]\n", ACPI_CA_VERSION);
+ acpi_os_printf(" %8.8X %s-%u\n", ACPI_CA_VERSION, module_name,
+ line_number);
va_end(args);
}
@@ -1061,11 +1062,12 @@ acpi_warning(const char *module_name, u32 line_number, const char *format, ...)
{
va_list args;
- acpi_os_printf("ACPI Warning (%s-%04d): ", module_name, line_number);
+ acpi_os_printf("ACPI Warning: ");
va_start(args, format);
acpi_os_vprintf(format, args);
- acpi_os_printf(" [%X]\n", ACPI_CA_VERSION);
+ acpi_os_printf(" %8.8X %s-%u\n", ACPI_CA_VERSION, module_name,
+ line_number);
va_end(args);
}
@@ -1074,10 +1076,6 @@ acpi_info(const char *module_name, u32 line_number, const char *format, ...)
{
va_list args;
- /*
- * Removed module_name, line_number, and acpica version, not needed
- * for info output
- */
acpi_os_printf("ACPI: ");
va_start(args, format);
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 26c93a748e6..80bb6515411 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -230,17 +230,18 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) {
if (i == mutex_id) {
ACPI_ERROR((AE_INFO,
- "Mutex [%s] already acquired by this thread [%X]",
+ "Mutex [%s] already acquired by this thread [%p]",
acpi_ut_get_mutex_name
(mutex_id),
- this_thread_id));
+ ACPI_CAST_PTR(void,
+ this_thread_id)));
return (AE_ALREADY_ACQUIRED);
}
ACPI_ERROR((AE_INFO,
- "Invalid acquire order: Thread %X owns [%s], wants [%s]",
- this_thread_id,
+ "Invalid acquire order: Thread %p owns [%s], wants [%s]",
+ ACPI_CAST_PTR(void, this_thread_id),
acpi_ut_get_mutex_name(i),
acpi_ut_get_mutex_name(mutex_id)));
@@ -251,24 +252,24 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
#endif
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
- "Thread %lX attempting to acquire Mutex [%s]\n",
- (unsigned long)this_thread_id,
+ "Thread %p attempting to acquire Mutex [%s]\n",
+ ACPI_CAST_PTR(void, this_thread_id),
acpi_ut_get_mutex_name(mutex_id)));
status = acpi_os_acquire_mutex(acpi_gbl_mutex_info[mutex_id].mutex,
ACPI_WAIT_FOREVER);
if (ACPI_SUCCESS(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
- "Thread %lX acquired Mutex [%s]\n",
- (unsigned long)this_thread_id,
+ "Thread %p acquired Mutex [%s]\n",
+ ACPI_CAST_PTR(void, this_thread_id),
acpi_ut_get_mutex_name(mutex_id)));
acpi_gbl_mutex_info[mutex_id].use_count++;
acpi_gbl_mutex_info[mutex_id].thread_id = this_thread_id;
} else {
ACPI_EXCEPTION((AE_INFO, status,
- "Thread %lX could not acquire Mutex [%X]",
- (unsigned long)this_thread_id, mutex_id));
+ "Thread %p could not acquire Mutex [%X]",
+ ACPI_CAST_PTR(void, this_thread_id), mutex_id));
}
return (status);
@@ -293,9 +294,8 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
ACPI_FUNCTION_NAME(ut_release_mutex);
this_thread_id = acpi_os_get_thread_id();
- ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
- "Thread %lX releasing Mutex [%s]\n",
- (unsigned long)this_thread_id,
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Thread %p releasing Mutex [%s]\n",
+ ACPI_CAST_PTR(void, this_thread_id),
acpi_ut_get_mutex_name(mutex_id)));
if (mutex_id > ACPI_MAX_MUTEX) {
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index e8f7b64e92d..ae862f1798d 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -312,7 +312,7 @@ int acpi_bus_set_power(acpi_handle handle, int state)
end:
if (result)
printk(KERN_WARNING PREFIX
- "Transitioning device [%s] to D%d\n",
+ "Device [%s] failed to transition to D%d\n",
device->pnp.bus_id, state);
else {
device->power.state = state;
diff --git a/drivers/acpi/pci_bind.c b/drivers/acpi/pci_bind.c
index 95650f83ce2..bc46de3d967 100644
--- a/drivers/acpi/pci_bind.c
+++ b/drivers/acpi/pci_bind.c
@@ -116,9 +116,6 @@ int acpi_pci_bind(struct acpi_device *device)
struct acpi_pci_data *pdata;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_handle handle;
- struct pci_dev *dev;
- struct pci_bus *bus;
-
if (!device || !device->parent)
return -EINVAL;
@@ -176,20 +173,9 @@ int acpi_pci_bind(struct acpi_device *device)
* Locate matching device in PCI namespace. If it doesn't exist
* this typically means that the device isn't currently inserted
* (e.g. docking station, port replicator, etc.).
- * We cannot simply search the global pci device list, since
- * PCI devices are added to the global pci list when the root
- * bridge start ops are run, which may not have happened yet.
*/
- bus = pci_find_bus(data->id.segment, data->id.bus);
- if (bus) {
- list_for_each_entry(dev, &bus->devices, bus_list) {
- if (dev->devfn == PCI_DEVFN(data->id.device,
- data->id.function)) {
- data->dev = dev;
- break;
- }
- }
- }
+ data->dev = pci_get_slot(pdata->bus,
+ PCI_DEVFN(data->id.device, data->id.function));
if (!data->dev) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Device %04x:%02x:%02x.%d not present in PCI namespace\n",
@@ -259,9 +245,10 @@ int acpi_pci_bind(struct acpi_device *device)
end:
kfree(buffer.pointer);
- if (result)
+ if (result) {
+ pci_dev_put(data->dev);
kfree(data);
-
+ }
return result;
}
@@ -303,6 +290,7 @@ static int acpi_pci_unbind(struct acpi_device *device)
if (data->dev->subordinate) {
acpi_pci_irq_del_prt(data->id.segment, data->bus->number);
}
+ pci_dev_put(data->dev);
kfree(data);
end:
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 51b9f8280f8..2faa9e2ac89 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -401,7 +401,8 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
/* Interrupt Line values above 0xF are forbidden */
if (dev->irq > 0 && (dev->irq <= 0xF)) {
printk(" - using IRQ %d\n", dev->irq);
- acpi_register_gsi(dev->irq, ACPI_LEVEL_SENSITIVE,
+ acpi_register_gsi(&dev->dev, dev->irq,
+ ACPI_LEVEL_SENSITIVE,
ACPI_ACTIVE_LOW);
return 0;
} else {
@@ -410,7 +411,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
}
}
- rc = acpi_register_gsi(gsi, triggering, polarity);
+ rc = acpi_register_gsi(&dev->dev, gsi, triggering, polarity);
if (rc < 0) {
dev_warn(&dev->dev, "PCI INT %c: failed to register GSI\n",
pin_name(pin));
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 45ad3288c5f..23f0fb84f1c 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -844,7 +844,7 @@ static int acpi_processor_add(struct acpi_device *device)
if (!pr)
return -ENOMEM;
- if (!alloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
+ if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
kfree(pr);
return -ENOMEM;
}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index f7ca8c55956..10a2d913635 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -148,6 +148,9 @@ static void acpi_timer_check_state(int state, struct acpi_processor *pr,
if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
return;
+ if (boot_cpu_has(X86_FEATURE_AMDC1E))
+ type = ACPI_STATE_C1;
+
/*
* Check, if one of the previous states already marked the lapic
* unstable
@@ -202,21 +205,44 @@ static void acpi_state_timer_broadcast(struct acpi_processor *pr,
* Suspend / resume control
*/
static int acpi_idle_suspend;
+static u32 saved_bm_rld;
+
+static void acpi_idle_bm_rld_save(void)
+{
+ acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
+}
+static void acpi_idle_bm_rld_restore(void)
+{
+ u32 resumed_bm_rld;
+
+ acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
+
+ if (resumed_bm_rld != saved_bm_rld)
+ acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
+}
int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
{
+ if (acpi_idle_suspend == 1)
+ return 0;
+
+ acpi_idle_bm_rld_save();
acpi_idle_suspend = 1;
return 0;
}
int acpi_processor_resume(struct acpi_device * device)
{
+ if (acpi_idle_suspend == 0)
+ return 0;
+
+ acpi_idle_bm_rld_restore();
acpi_idle_suspend = 0;
return 0;
}
#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
-static int tsc_halts_in_c(int state)
+static void tsc_check_state(int state)
{
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
@@ -226,13 +252,17 @@ static int tsc_halts_in_c(int state)
* C/P/S0/S1 states when this bit is set.
*/
if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
- return 0;
+ return;
/*FALL THROUGH*/
default:
- return state > ACPI_STATE_C1;
+ /* TSC could halt in idle, so notify users */
+ if (state > ACPI_STATE_C1)
+ mark_tsc_unstable("TSC halts in idle");
}
}
+#else
+static void tsc_check_state(int state) { return; }
#endif
static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
@@ -578,17 +608,13 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
pr->power.timer_broadcast_on_state = INT_MAX;
- for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
+ for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
struct acpi_processor_cx *cx = &pr->power.states[i];
-#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
- /* TSC could halt in idle, so notify users */
- if (tsc_halts_in_c(cx->type))
- mark_tsc_unstable("TSC halts in idle");;
-#endif
switch (cx->type) {
case ACPI_STATE_C1:
cx->valid = 1;
+ acpi_timer_check_state(i, pr, cx);
break;
case ACPI_STATE_C2:
@@ -603,6 +629,8 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
acpi_timer_check_state(i, pr, cx);
break;
}
+ if (cx->valid)
+ tsc_check_state(cx->type);
if (cx->valid)
working++;
@@ -806,11 +834,12 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
/* Do not access any ACPI IO ports in suspend path */
if (acpi_idle_suspend) {
- acpi_safe_halt();
local_irq_enable();
+ cpu_relax();
return 0;
}
+ acpi_state_timer_broadcast(pr, cx, 1);
kt1 = ktime_get_real();
acpi_idle_do_entry(cx);
kt2 = ktime_get_real();
@@ -818,6 +847,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
local_irq_enable();
cx->usage++;
+ acpi_state_timer_broadcast(pr, cx, 0);
return idle_time;
}
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index cafb41000f6..60e543d3234 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -309,9 +309,15 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
(u32) px->bus_master_latency,
(u32) px->control, (u32) px->status));
- if (!px->core_frequency) {
- printk(KERN_ERR PREFIX
- "Invalid _PSS data: freq is zero\n");
+ /*
+ * Check that ACPI's u64 MHz will be valid as u32 KHz in cpufreq
+ */
+ if (!px->core_frequency ||
+ ((u32)(px->core_frequency * 1000) !=
+ (px->core_frequency * 1000))) {
+ printk(KERN_ERR FW_BUG PREFIX
+ "Invalid BIOS _PSS frequency: 0x%llx MHz\n",
+ px->core_frequency);
result = -EFAULT;
kfree(pr->performance->states);
goto end;
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index d0d1f4d5043..227543789ba 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -45,6 +45,14 @@
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_throttling");
+/* ignore_tpc:
+ * 0 -> acpi processor driver doesn't ignore _TPC values
+ * 1 -> acpi processor driver ignores _TPC values
+ */
+static int ignore_tpc;
+module_param(ignore_tpc, int, 0644);
+MODULE_PARM_DESC(ignore_tpc, "Disable broken BIOS _TPC throttling support");
+
struct throttling_tstate {
unsigned int cpu; /* cpu nr */
int target_state; /* target T-state */
@@ -283,6 +291,10 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
if (!pr)
return -EINVAL;
+
+ if (ignore_tpc)
+ goto end;
+
status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc);
if (ACPI_FAILURE(status)) {
if (status != AE_NOT_FOUND) {
@@ -290,6 +302,8 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
}
return -ENODEV;
}
+
+end:
pr->throttling_platform_limit = (int)tpc;
return 0;
}
@@ -302,6 +316,9 @@ int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
struct acpi_processor_limit *limit;
int target_state;
+ if (ignore_tpc)
+ return 0;
+
result = acpi_processor_get_platform_limit(pr);
if (result) {
/* Throttling Limit is unsupported */
@@ -821,6 +838,14 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
ret = acpi_read_throttling_status(pr, &value);
if (ret >= 0) {
state = acpi_get_throttling_state(pr, value);
+ if (state == -1) {
+ ACPI_WARNING((AE_INFO,
+ "Invalid throttling state, reset"));
+ state = 0;
+ ret = acpi_processor_set_throttling(pr, state);
+ if (ret)
+ return ret;
+ }
pr->throttling.state = state;
}
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index d7ff61c0d57..1bdfb37377e 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -538,6 +538,57 @@ acpi_video_device_lcd_set_level(struct acpi_video_device *device, int level)
return -EINVAL;
}
+/*
+ * For some buggy _BQC methods, we need to add a constant value to
+ * the _BQC return value to get the actual current brightness level
+ */
+
+static int bqc_offset_aml_bug_workaround;
+static int __init video_set_bqc_offset(const struct dmi_system_id *d)
+{
+ bqc_offset_aml_bug_workaround = 9;
+ return 0;
+}
+
+static struct dmi_system_id video_dmi_table[] __initdata = {
+ /*
+ * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
+ */
+ {
+ .callback = video_set_bqc_offset,
+ .ident = "Acer Aspire 5720",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5720"),
+ },
+ },
+ {
+ .callback = video_set_bqc_offset,
+ .ident = "Acer Aspire 5710Z",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710Z"),
+ },
+ },
+ {
+ .callback = video_set_bqc_offset,
+ .ident = "eMachines E510",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "EMACHINES"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "eMachines E510"),
+ },
+ },
+ {
+ .callback = video_set_bqc_offset,
+ .ident = "Acer Aspire 5315",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5315"),
+ },
+ },
+ {}
+};
+
static int
acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
unsigned long long *level)
@@ -557,6 +608,7 @@ acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
*level = device->brightness->levels[*level + 2];
}
+ *level += bqc_offset_aml_bug_workaround;
device->brightness->curr = *level;
return 0;
} else {
@@ -2290,13 +2342,15 @@ EXPORT_SYMBOL(acpi_video_register);
static int __init acpi_video_init(void)
{
+ dmi_check_system(video_dmi_table);
+
if (intel_opregion_present())
return 0;
return acpi_video_register();
}
-void __exit acpi_video_exit(void)
+void acpi_video_exit(void)
{
acpi_bus_unregister_driver(&acpi_video_bus);
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 9120717c070..2aa1908e5ce 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -535,6 +535,15 @@ config PATA_OPTIDMA
If unsure, say N.
+config PATA_PALMLD
+ tristate "Palm LifeDrive PATA support"
+ depends on MACH_PALMLD
+ help
+ This option enables support for Palm LifeDrive's internal ATA
+ port via the new ATA layer.
+
+ If unsure, say N.
+
config PATA_PCMCIA
tristate "PCMCIA PATA support"
depends on PCMCIA
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 7f1ecf99528..1558059874f 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -50,6 +50,7 @@ obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o
obj-$(CONFIG_PATA_MARVELL) += pata_marvell.o
obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o
obj-$(CONFIG_PATA_OLDPIIX) += pata_oldpiix.o
+obj-$(CONFIG_PATA_PALMLD) += pata_palmld.o
obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o
obj-$(CONFIG_PATA_PDC2027X) += pata_pdc2027x.o
obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 08186ecbaf8..15a23031833 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -77,8 +77,6 @@ static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
size_t size);
static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
ssize_t size);
-#define MAX_SLOTS 8
-#define MAX_RETRY 15
enum {
AHCI_PCI_BAR = 5,
@@ -220,6 +218,7 @@ enum {
AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
+ AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
/* ap->flags bits */
@@ -230,6 +229,10 @@ enum {
ICH_MAP = 0x90, /* ICH MAP register */
+ /* em constants */
+ EM_MAX_SLOTS = 8,
+ EM_MAX_RETRY = 5,
+
/* em_ctl bits */
EM_CTL_RST = (1 << 9), /* Reset */
EM_CTL_TM = (1 << 8), /* Transmit Message */
@@ -281,8 +284,8 @@ struct ahci_port_priv {
unsigned int ncq_saw_dmas:1;
unsigned int ncq_saw_sdb:1;
u32 intr_mask; /* interrupts to enable */
- struct ahci_em_priv em_priv[MAX_SLOTS];/* enclosure management info
- * per PM slot */
+ /* enclosure management info per PM slot */
+ struct ahci_em_priv em_priv[EM_MAX_SLOTS];
};
static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
@@ -312,7 +315,6 @@ static void ahci_error_handler(struct ata_port *ap);
static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
static int ahci_port_resume(struct ata_port *ap);
static void ahci_dev_config(struct ata_device *dev);
-static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl);
static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
u32 opts);
#ifdef CONFIG_PM
@@ -403,14 +405,14 @@ static struct ata_port_operations ahci_sb600_ops = {
#define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
static const struct ata_port_info ahci_port_info[] = {
- /* board_ahci */
+ [board_ahci] =
{
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- /* board_ahci_vt8251 */
+ [board_ahci_vt8251] =
{
AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
.flags = AHCI_FLAG_COMMON,
@@ -418,7 +420,7 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_vt8251_ops,
},
- /* board_ahci_ign_iferr */
+ [board_ahci_ign_iferr] =
{
AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
.flags = AHCI_FLAG_COMMON,
@@ -426,17 +428,16 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- /* board_ahci_sb600 */
+ [board_ahci_sb600] =
{
AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
- AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI |
- AHCI_HFLAG_SECT255),
+ AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_sb600_ops,
},
- /* board_ahci_mv */
+ [board_ahci_mv] =
{
AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
@@ -446,7 +447,7 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- /* board_ahci_sb700, for SB700 and SB800 */
+ [board_ahci_sb700] = /* for SB700 and SB800 */
{
AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
.flags = AHCI_FLAG_COMMON,
@@ -454,7 +455,7 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_sb600_ops,
},
- /* board_ahci_mcp65 */
+ [board_ahci_mcp65] =
{
AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
.flags = AHCI_FLAG_COMMON,
@@ -462,7 +463,7 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- /* board_ahci_nopmp */
+ [board_ahci_nopmp] =
{
AHCI_HFLAGS (AHCI_HFLAG_NO_PMP),
.flags = AHCI_FLAG_COMMON,
@@ -1140,12 +1141,12 @@ static void ahci_start_port(struct ata_port *ap)
emp = &pp->em_priv[link->pmp];
/* EM Transmit bit maybe busy during init */
- for (i = 0; i < MAX_RETRY; i++) {
+ for (i = 0; i < EM_MAX_RETRY; i++) {
rc = ahci_transmit_led_message(ap,
emp->led_state,
4);
if (rc == -EBUSY)
- udelay(100);
+ msleep(1);
else
break;
}
@@ -1339,7 +1340,7 @@ static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
/* get the slot number from the message */
pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
- if (pmp < MAX_SLOTS)
+ if (pmp < EM_MAX_SLOTS)
emp = &pp->em_priv[pmp];
else
return -EINVAL;
@@ -1407,7 +1408,7 @@ static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
/* get the slot number from the message */
pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
- if (pmp < MAX_SLOTS)
+ if (pmp < EM_MAX_SLOTS)
emp = &pp->em_priv[pmp];
else
return -EINVAL;
@@ -2316,9 +2317,17 @@ static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct ahci_host_priv *hpriv = host->private_data;
void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
u32 ctl;
+ if (mesg.event & PM_EVENT_SUSPEND &&
+ hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "BIOS update required for suspend/resume\n");
+ return -EIO;
+ }
+
if (mesg.event & PM_EVENT_SLEEP) {
/* AHCI spec rev1.1 section 8.3.3:
* Software must disable interrupts prior to requesting a
@@ -2575,6 +2584,51 @@ static void ahci_p5wdh_workaround(struct ata_host *host)
}
}
+/*
+ * SB600 ahci controller on ASUS M2A-VM can't do 64bit DMA with older
+ * BIOS. The oldest version known to be broken is 0901 and working is
+ * 1501 which was released on 2007-10-26. Force 32bit DMA on anything
+ * older than 1501. Please read bko#9412 for more info.
+ */
+static bool ahci_asus_m2a_vm_32bit_only(struct pci_dev *pdev)
+{
+ static const struct dmi_system_id sysids[] = {
+ {
+ .ident = "ASUS M2A-VM",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR,
+ "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "M2A-VM"),
+ },
+ },
+ { }
+ };
+ const char *cutoff_mmdd = "10/26";
+ const char *date;
+ int year;
+
+ if (pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x12, 0) ||
+ !dmi_check_system(sysids))
+ return false;
+
+ /*
+ * Argh.... both version and date are free form strings.
+ * Let's hope they're using the same date format across
+ * different versions.
+ */
+ date = dmi_get_system_info(DMI_BIOS_DATE);
+ year = dmi_get_year(DMI_BIOS_DATE);
+ if (date && strlen(date) >= 10 && date[2] == '/' && date[5] == '/' &&
+ (year > 2007 ||
+ (year == 2007 && strncmp(date, cutoff_mmdd, 5) >= 0)))
+ return false;
+
+ dev_printk(KERN_WARNING, &pdev->dev, "ASUS M2A-VM: BIOS too old, "
+ "forcing 32bit DMA, update BIOS\n");
+
+ return true;
+}
+
static bool ahci_broken_system_poweroff(struct pci_dev *pdev)
{
static const struct dmi_system_id broken_systems[] = {
@@ -2610,6 +2664,63 @@ static bool ahci_broken_system_poweroff(struct pci_dev *pdev)
return false;
}
+static bool ahci_broken_suspend(struct pci_dev *pdev)
+{
+ static const struct dmi_system_id sysids[] = {
+ /*
+ * On HP dv[4-6] and HDX18 with earlier BIOSen, link
+ * to the harddisk doesn't become online after
+ * resuming from STR. Warn and fail suspend.
+ */
+ {
+ .ident = "dv4",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME,
+ "HP Pavilion dv4 Notebook PC"),
+ },
+ .driver_data = "F.30", /* cutoff BIOS version */
+ },
+ {
+ .ident = "dv5",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME,
+ "HP Pavilion dv5 Notebook PC"),
+ },
+ .driver_data = "F.16", /* cutoff BIOS version */
+ },
+ {
+ .ident = "dv6",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME,
+ "HP Pavilion dv6 Notebook PC"),
+ },
+ .driver_data = "F.21", /* cutoff BIOS version */
+ },
+ {
+ .ident = "HDX18",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME,
+ "HP HDX18 Notebook PC"),
+ },
+ .driver_data = "F.23", /* cutoff BIOS version */
+ },
+ { } /* terminate list */
+ };
+ const struct dmi_system_id *dmi = dmi_first_match(sysids);
+ const char *ver;
+
+ if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2))
+ return false;
+
+ ver = dmi_get_system_info(DMI_BIOS_VERSION);
+
+ return !ver || strcmp(ver, dmi->driver_data) < 0;
+}
+
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int printed_version;
@@ -2678,6 +2789,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (board_id == board_ahci_sb700 && pdev->revision >= 0x40)
hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL;
+ /* apply ASUS M2A_VM quirk */
+ if (ahci_asus_m2a_vm_32bit_only(pdev))
+ hpriv->flags |= AHCI_HFLAG_32BIT_ONLY;
+
if (!(hpriv->flags & AHCI_HFLAG_NO_MSI))
pci_enable_msi(pdev);
@@ -2715,6 +2830,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
"quirky BIOS, skipping spindown on poweroff\n");
}
+ if (ahci_broken_suspend(pdev)) {
+ hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
+ dev_printk(KERN_WARNING, &pdev->dev,
+ "BIOS update required for suspend/resume\n");
+ }
+
/* CAP.NP sometimes indicate the index of the last enabled
* port, at other times, that of the last possible port, so
* determining the maximum port number requires looking at
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index d51a17c0f59..d0a14cf2bd7 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -223,10 +223,8 @@ static const struct pci_device_id piix_pci_tbl[] = {
/* ICH8 Mobile PATA Controller */
{ 0x8086, 0x2850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
- /* NOTE: The following PCI ids must be kept in sync with the
- * list in drivers/pci/quirks.c.
- */
-
+ /* SATA ports */
+
/* 82801EB (ICH5) */
{ 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
/* 82801EB (ICH5) */
@@ -1455,6 +1453,15 @@ static bool piix_broken_system_poweroff(struct pci_dev *pdev)
/* PCI slot number of the controller */
.driver_data = (void *)0x1FUL,
},
+ {
+ .ident = "HP Compaq nc6000",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nc6000"),
+ },
+ /* PCI slot number of the controller */
+ .driver_data = (void *)0x1FUL,
+ },
{ } /* terminate list */
};
@@ -1500,8 +1507,8 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
dev_printk(KERN_DEBUG, &pdev->dev,
"version " DRV_VERSION "\n");
- /* no hotplugging support (FIXME) */
- if (!in_module_init)
+ /* no hotplugging support for later devices (FIXME) */
+ if (!in_module_init && ent->driver_data >= ich5_sata)
return -ENODEV;
if (piix_broken_system_poweroff(pdev)) {
@@ -1582,6 +1589,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
host->ports[1]->mwdma_mask = 0;
host->ports[1]->udma_mask = 0;
}
+ host->flags |= ATA_HOST_PARALLEL_SCAN;
pci_set_master(pdev);
return ata_pci_sff_activate_host(host, ata_sff_interrupt, &piix_sht);
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 6273d98d00e..ac176da1f94 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -748,9 +748,9 @@ static int ata_acpi_run_tf(struct ata_device *dev,
/**
* ata_acpi_exec_tfs - get then write drive taskfile settings
* @dev: target ATA device
- * @nr_executed: out paramter for the number of executed commands
+ * @nr_executed: out parameter for the number of executed commands
*
- * Evaluate _GTF and excute returned taskfiles.
+ * Evaluate _GTF and execute returned taskfiles.
*
* LOCKING:
* EH context.
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index c9242301cfa..ca4d208ddf3 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5031,7 +5031,6 @@ int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
{
int nr_done = 0;
u32 done_mask;
- int i;
done_mask = ap->qc_active ^ qc_active;
@@ -5041,16 +5040,16 @@ int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
return -EINVAL;
}
- for (i = 0; i < ATA_MAX_QUEUE; i++) {
+ while (done_mask) {
struct ata_queued_cmd *qc;
+ unsigned int tag = __ffs(done_mask);
- if (!(done_mask & (1 << i)))
- continue;
-
- if ((qc = ata_qc_from_tag(ap, i))) {
+ qc = ata_qc_from_tag(ap, tag);
+ if (qc) {
ata_qc_complete(qc);
nr_done++;
}
+ done_mask &= ~(1 << tag);
}
return nr_done;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 94919ad03df..fa22f94ca41 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2864,7 +2864,7 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
/**
* ata_set_mode - Program timings and issue SET FEATURES - XFER
* @link: link on which timings will be programmed
- * @r_failed_dev: out paramter for failed device
+ * @r_failed_dev: out parameter for failed device
*
* Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
* ata_set_mode() fails, pointer to the failing device is
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 342316064e9..d0dfeef55db 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1084,7 +1084,7 @@ static int atapi_drain_needed(struct request *rq)
if (likely(!blk_pc_request(rq)))
return 0;
- if (!rq->data_len || (rq->cmd_flags & REQ_RW))
+ if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_RW))
return 0;
return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index bb18415d3d6..bbbb1fab175 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -727,17 +727,23 @@ unsigned int ata_sff_data_xfer(struct ata_device *dev, unsigned char *buf,
else
iowrite16_rep(data_addr, buf, words);
- /* Transfer trailing 1 byte, if any. */
+ /* Transfer trailing byte, if any. */
if (unlikely(buflen & 0x01)) {
- __le16 align_buf[1] = { 0 };
- unsigned char *trailing_buf = buf + buflen - 1;
+ unsigned char pad[2];
+ /* Point buf to the tail of buffer */
+ buf += buflen - 1;
+
+ /*
+ * Use io*16_rep() accessors here as well to avoid pointlessly
+ * swapping bytes to and fro on the big endian machines...
+ */
if (rw == READ) {
- align_buf[0] = cpu_to_le16(ioread16(data_addr));
- memcpy(trailing_buf, align_buf, 1);
+ ioread16_rep(data_addr, pad, 1);
+ *buf = pad[0];
} else {
- memcpy(align_buf, trailing_buf, 1);
- iowrite16(le16_to_cpu(align_buf[0]), data_addr);
+ pad[0] = *buf;
+ iowrite16_rep(data_addr, pad, 1);
}
words++;
}
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index 751b7ea4816..fc9c5d6d7d8 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -497,14 +497,16 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
};
/* Revision 0x20 added DMA */
static const struct ata_port_info info_20 = {
- .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48,
+ .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 |
+ ATA_FLAG_IGN_SIMPLEX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.port_ops = &ali_20_port_ops
};
/* Revision 0x20 with support logic added UDMA */
static const struct ata_port_info info_20_udma = {
- .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48,
+ .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 |
+ ATA_FLAG_IGN_SIMPLEX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA2,
@@ -512,7 +514,8 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
};
/* Revision 0xC2 adds UDMA66 */
static const struct ata_port_info info_c2 = {
- .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48,
+ .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 |
+ ATA_FLAG_IGN_SIMPLEX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA4,
@@ -520,7 +523,8 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
};
/* Revision 0xC3 is UDMA66 for now */
static const struct ata_port_info info_c3 = {
- .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48,
+ .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 |
+ ATA_FLAG_IGN_SIMPLEX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA4,
@@ -528,7 +532,8 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
};
/* Revision 0xC4 is UDMA100 */
static const struct ata_port_info info_c4 = {
- .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48,
+ .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 |
+ ATA_FLAG_IGN_SIMPLEX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
@@ -536,7 +541,7 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
};
/* Revision 0xC5 is UDMA133 with LBA48 DMA */
static const struct ata_port_info info_c5 = {
- .flags = ATA_FLAG_SLAVE_POSS,
+ .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_IGN_SIMPLEX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
index 2085e0a3a05..2a6412f5d11 100644
--- a/drivers/ata/pata_efar.c
+++ b/drivers/ata/pata_efar.c
@@ -22,7 +22,7 @@
#include <linux/ata.h>
#define DRV_NAME "pata_efar"
-#define DRV_VERSION "0.4.4"
+#define DRV_VERSION "0.4.5"
/**
* efar_pre_reset - Enable bits
@@ -98,18 +98,17 @@ static void efar_set_piomode (struct ata_port *ap, struct ata_device *adev)
{ 2, 1 },
{ 2, 3 }, };
- if (pio > 2)
- control |= 1; /* TIME1 enable */
+ if (pio > 1)
+ control |= 1; /* TIME */
if (ata_pio_need_iordy(adev)) /* PIO 3/4 require IORDY */
- control |= 2; /* IE enable */
- /* Intel specifies that the PPE functionality is for disk only */
+ control |= 2; /* IE */
+ /* Intel specifies that the prefetch/posting is for disk only */
if (adev->class == ATA_DEV_ATA)
- control |= 4; /* PPE enable */
+ control |= 4; /* PPE */
pci_read_config_word(dev, idetm_port, &idetm_data);
- /* Enable PPE, IE and TIME as appropriate */
-
+ /* Set PPE, IE, and TIME as appropriate */
if (adev->devno == 0) {
idetm_data &= 0xCCF0;
idetm_data |= control;
@@ -129,7 +128,7 @@ static void efar_set_piomode (struct ata_port *ap, struct ata_device *adev)
pci_write_config_byte(dev, 0x44, slave_data);
}
- idetm_data |= 0x4000; /* Ensure SITRE is enabled */
+ idetm_data |= 0x4000; /* Ensure SITRE is set */
pci_write_config_word(dev, idetm_port, idetm_data);
}
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index f72c6c5b820..6932e56d179 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -48,6 +48,7 @@
*
*/
+#include <linux/async.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -1028,6 +1029,7 @@ static __init int legacy_init_one(struct legacy_probe *probe)
&legacy_sht);
if (ret)
goto fail;
+ async_synchronize_full();
ld->platform_dev = pdev;
/* Nothing found means we drop the port as its probably not there */
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
index bdb236957cb..f0d52f72f5b 100644
--- a/drivers/ata/pata_netcell.c
+++ b/drivers/ata/pata_netcell.c
@@ -20,13 +20,24 @@
/* No PIO or DMA methods needed for this device */
+static unsigned int netcell_read_id(struct ata_device *adev,
+ struct ata_taskfile *tf, u16 *id)
+{
+ unsigned int err_mask = ata_do_dev_read_id(adev, tf, id);
+ /* Firmware forgets to mark words 85-87 valid */
+ if (err_mask == 0)
+ id[ATA_ID_CSF_DEFAULT] |= 0x4000;
+ return err_mask;
+}
+
static struct scsi_host_template netcell_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations netcell_ops = {
.inherits = &ata_bmdma_port_ops,
- .cable_detect = ata_cable_80wire,
+ .cable_detect = ata_cable_80wire,
+ .read_id = netcell_read_id,
};
diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
new file mode 100644
index 00000000000..11fb4ccc74b
--- /dev/null
+++ b/drivers/ata/pata_palmld.c
@@ -0,0 +1,150 @@
+/*
+ * drivers/ata/pata_palmld.c
+ *
+ * Driver for IDE channel in Palm LifeDrive
+ *
+ * Based on research of:
+ * Alex Osborne <ato@meshy.org>
+ *
+ * Rewrite for mainline:
+ * Marek Vasut <marek.vasut@gmail.com>
+ *
+ * Rewritten version based on pata_ixp4xx_cf.c:
+ * ixp4xx PATA/Compact Flash driver
+ * Copyright (C) 2006-07 Tower Technologies
+ * Author: Alessandro Zummo <a.zummo@towertech.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/libata.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+
+#include <scsi/scsi_host.h>
+#include <mach/palmld.h>
+
+#define DRV_NAME "pata_palmld"
+
+static struct scsi_host_template palmld_sht = {
+ ATA_PIO_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations palmld_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .sff_data_xfer = ata_sff_data_xfer_noirq,
+ .cable_detect = ata_cable_40wire,
+};
+
+static __devinit int palmld_pata_probe(struct platform_device *pdev)
+{
+ struct ata_host *host;
+ struct ata_port *ap;
+ void __iomem *mem;
+ int ret;
+
+ /* allocate host */
+ host = ata_host_alloc(&pdev->dev, 1);
+ if (!host)
+ return -ENOMEM;
+
+ /* remap drive's physical memory address */
+ mem = devm_ioremap(&pdev->dev, PALMLD_IDE_PHYS, 0x1000);
+ if (!mem)
+ return -ENOMEM;
+
+ /* request and activate power GPIO, IRQ GPIO */
+ ret = gpio_request(GPIO_NR_PALMLD_IDE_PWEN, "HDD PWR");
+ if (ret)
+ goto err1;
+ ret = gpio_direction_output(GPIO_NR_PALMLD_IDE_PWEN, 1);
+ if (ret)
+ goto err2;
+
+ ret = gpio_request(GPIO_NR_PALMLD_IDE_RESET, "HDD RST");
+ if (ret)
+ goto err2;
+ ret = gpio_direction_output(GPIO_NR_PALMLD_IDE_RESET, 0);
+ if (ret)
+ goto err3;
+
+ /* reset the drive */
+ gpio_set_value(GPIO_NR_PALMLD_IDE_RESET, 0);
+ msleep(30);
+ gpio_set_value(GPIO_NR_PALMLD_IDE_RESET, 1);
+ msleep(30);
+
+ /* setup the ata port */
+ ap = host->ports[0];
+ ap->ops = &palmld_port_ops;
+ ap->pio_mask = ATA_PIO4;
+ ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY | ATA_FLAG_PIO_POLLING;
+
+ /* memory mapping voodoo */
+ ap->ioaddr.cmd_addr = mem + 0x10;
+ ap->ioaddr.altstatus_addr = mem + 0xe;
+ ap->ioaddr.ctl_addr = mem + 0xe;
+
+ /* start the port */
+ ata_sff_std_ports(&ap->ioaddr);
+
+ /* activate host */
+ return ata_host_activate(host, 0, NULL, IRQF_TRIGGER_RISING,
+ &palmld_sht);
+
+err3:
+ gpio_free(GPIO_NR_PALMLD_IDE_RESET);
+err2:
+ gpio_free(GPIO_NR_PALMLD_IDE_PWEN);
+err1:
+ return ret;
+}
+
+static __devexit int palmld_pata_remove(struct platform_device *dev)
+{
+ struct ata_host *host = platform_get_drvdata(dev);
+
+ ata_host_detach(host);
+
+ /* power down the HDD */
+ gpio_set_value(GPIO_NR_PALMLD_IDE_PWEN, 0);
+
+ gpio_free(GPIO_NR_PALMLD_IDE_RESET);
+ gpio_free(GPIO_NR_PALMLD_IDE_PWEN);
+
+ return 0;
+}
+
+static struct platform_driver palmld_pata_platform_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = palmld_pata_probe,
+ .remove = __devexit_p(palmld_pata_remove),
+};
+
+static int __init palmld_pata_init(void)
+{
+ return platform_driver_register(&palmld_pata_platform_driver);
+}
+
+static void __exit palmld_pata_exit(void)
+{
+ platform_driver_unregister(&palmld_pata_platform_driver);
+}
+
+MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
+MODULE_DESCRIPTION("PalmLD PATA driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
+
+module_init(palmld_pata_init);
+module_exit(palmld_pata_exit);
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 6cda12ba812..b2d11f300c3 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -305,8 +305,8 @@ static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
-static int nv_noclassify_hardreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline);
+static int nv_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
static void nv_nf2_freeze(struct ata_port *ap);
static void nv_nf2_thaw(struct ata_port *ap);
static void nv_ck804_freeze(struct ata_port *ap);
@@ -406,49 +406,82 @@ static struct scsi_host_template nv_swncq_sht = {
.slave_configure = nv_swncq_slave_config,
};
-static struct ata_port_operations nv_common_ops = {
+/*
+ * NV SATA controllers have various different problems with hardreset
+ * protocol depending on the specific controller and device.
+ *
+ * GENERIC:
+ *
+ * bko11195 reports that link doesn't come online after hardreset on
+ * generic nv's and there have been several other similar reports on
+ * linux-ide.
+ *
+ * bko12351#c23 reports that warmplug on MCP61 doesn't work with
+ * softreset.
+ *
+ * NF2/3:
+ *
+ * bko3352 reports nf2/3 controllers can't determine device signature
+ * reliably after hardreset. The following thread reports detection
+ * failure on cold boot with the standard debouncing timing.
+ *
+ * http://thread.gmane.org/gmane.linux.ide/34098
+ *
+ * bko12176 reports that hardreset fails to bring up the link during
+ * boot on nf2.
+ *
+ * CK804:
+ *
+ * For initial probing after boot and hot plugging, hardreset mostly
+ * works fine on CK804 but curiously, reprobing on the initial port
+ * by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
+ * FIS in somewhat undeterministic way.
+ *
+ * SWNCQ:
+ *
+ * bko12351 reports that when SWNCQ is enabled, for hotplug to work,
+ * hardreset should be used and hardreset can't report proper
+ * signature, which suggests that mcp5x is closer to nf2 as long as
+ * reset quirkiness is concerned.
+ *
+ * bko12703 reports that boot probing fails for intel SSD with
+ * hardreset. Link fails to come online. Softreset works fine.
+ *
+ * The failures are varied but the following patterns seem true for
+ * all flavors.
+ *
+ * - Softreset during boot always works.
+ *
+ * - Hardreset during boot sometimes fails to bring up the link on
+ * certain comibnations and device signature acquisition is
+ * unreliable.
+ *
+ * - Hardreset is often necessary after hotplug.
+ *
+ * So, preferring softreset for boot probing and error handling (as
+ * hardreset might bring down the link) but using hardreset for
+ * post-boot probing should work around the above issues in most
+ * cases. Define nv_hardreset() which only kicks in for post-boot
+ * probing and use it for all variants.
+ */
+static struct ata_port_operations nv_generic_ops = {
.inherits = &ata_bmdma_port_ops,
.lost_interrupt = ATA_OP_NULL,
.scr_read = nv_scr_read,
.scr_write = nv_scr_write,
+ .hardreset = nv_hardreset,
};
-/* OSDL bz11195 reports that link doesn't come online after hardreset
- * on generic nv's and there have been several other similar reports
- * on linux-ide. Disable hardreset for generic nv's.
- */
-static struct ata_port_operations nv_generic_ops = {
- .inherits = &nv_common_ops,
- .hardreset = ATA_OP_NULL,
-};
-
-/* nf2 is ripe with hardreset related problems.
- *
- * kernel bz#3352 reports nf2/3 controllers can't determine device
- * signature reliably. The following thread reports detection failure
- * on cold boot with the standard debouncing timing.
- *
- * http://thread.gmane.org/gmane.linux.ide/34098
- *
- * And bz#12176 reports that hardreset simply doesn't work on nf2.
- * Give up on it and just don't do hardreset.
- */
static struct ata_port_operations nv_nf2_ops = {
.inherits = &nv_generic_ops,
.freeze = nv_nf2_freeze,
.thaw = nv_nf2_thaw,
};
-/* For initial probing after boot and hot plugging, hardreset mostly
- * works fine on CK804 but curiously, reprobing on the initial port by
- * rescanning or rmmod/insmod fails to acquire the initial D2H Reg FIS
- * in somewhat undeterministic way. Use noclassify hardreset.
- */
static struct ata_port_operations nv_ck804_ops = {
- .inherits = &nv_common_ops,
+ .inherits = &nv_generic_ops,
.freeze = nv_ck804_freeze,
.thaw = nv_ck804_thaw,
- .hardreset = nv_noclassify_hardreset,
.host_stop = nv_ck804_host_stop,
};
@@ -476,19 +509,8 @@ static struct ata_port_operations nv_adma_ops = {
.host_stop = nv_adma_host_stop,
};
-/* Kernel bz#12351 reports that when SWNCQ is enabled, for hotplug to
- * work, hardreset should be used and hardreset can't report proper
- * signature, which suggests that mcp5x is closer to nf2 as long as
- * reset quirkiness is concerned. Define separate ops for mcp5x with
- * nv_noclassify_hardreset().
- */
-static struct ata_port_operations nv_mcp5x_ops = {
- .inherits = &nv_common_ops,
- .hardreset = nv_noclassify_hardreset,
-};
-
static struct ata_port_operations nv_swncq_ops = {
- .inherits = &nv_mcp5x_ops,
+ .inherits = &nv_generic_ops,
.qc_defer = ata_std_qc_defer,
.qc_prep = nv_swncq_qc_prep,
@@ -557,7 +579,7 @@ static const struct ata_port_info nv_port_info[] = {
.pio_mask = NV_PIO_MASK,
.mwdma_mask = NV_MWDMA_MASK,
.udma_mask = NV_UDMA_MASK,
- .port_ops = &nv_mcp5x_ops,
+ .port_ops = &nv_generic_ops,
.private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
},
/* SWNCQ */
@@ -1559,15 +1581,24 @@ static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
return 0;
}
-static int nv_noclassify_hardreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline)
+static int nv_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
{
- bool online;
- int rc;
+ struct ata_eh_context *ehc = &link->eh_context;
- rc = sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
- &online, NULL);
- return online ? -EAGAIN : rc;
+ /* Do hardreset iff it's post-boot probing, please read the
+ * comment above port ops for details.
+ */
+ if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
+ !ata_dev_enabled(link->device))
+ sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
+ NULL, NULL);
+ else if (!(ehc->i.flags & ATA_EHI_QUIET))
+ ata_link_printk(link, KERN_INFO,
+ "nv: skipping hardreset on occupied port\n");
+
+ /* device signature acquisition is unreliable */
+ return -EAGAIN;
}
static void nv_nf2_freeze(struct ata_port *ap)
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index e67ce8e5caa..030ec079b18 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -183,7 +183,7 @@ static struct scsi_host_template sil_sht = {
};
static struct ata_port_operations sil_ops = {
- .inherits = &ata_bmdma_port_ops,
+ .inherits = &ata_bmdma32_port_ops,
.dev_config = sil_dev_config,
.set_mode = sil_set_mode,
.bmdma_setup = sil_bmdma_setup,
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index eb05a3c82a9..bbcf970068a 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -193,6 +193,7 @@ enum {
PDC_TIMER_MASK_INT,
};
+#define ECC_ERASE_BUF_SZ (128 * 1024)
struct pdc_port_priv {
u8 dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512];
@@ -1280,7 +1281,6 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
{
int speed, size, length;
u32 addr, spd0, pci_status;
- u32 tmp = 0;
u32 time_period = 0;
u32 tcount = 0;
u32 ticks = 0;
@@ -1395,14 +1395,17 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
PDC_DIMM_SPD_TYPE, &spd0);
if (spd0 == 0x02) {
+ void *buf;
VPRINTK("Start ECC initialization\n");
addr = 0;
length = size * 1024 * 1024;
+ buf = kzalloc(ECC_ERASE_BUF_SZ, GFP_KERNEL);
while (addr < length) {
- pdc20621_put_to_dimm(host, (void *) &tmp, addr,
- sizeof(u32));
- addr += sizeof(u32);
+ pdc20621_put_to_dimm(host, buf, addr,
+ ECC_ERASE_BUF_SZ);
+ addr += ECC_ERASE_BUF_SZ;
}
+ kfree(buf);
VPRINTK("Finish ECC initialization\n");
}
return 0;
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index dc030f1f00f..4b04a15146d 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -279,7 +279,7 @@ static struct device *next_device(struct klist_iter *i)
*
* NOTE: The device that returns a non-zero value is not retained
* in any way, nor is its refcount incremented. If the caller needs
- * to retain this data, it should do, and increment the reference
+ * to retain this data, it should do so, and increment the reference
* count in the supplied callback.
*/
int bus_for_each_dev(struct bus_type *bus, struct device *start,
@@ -700,8 +700,10 @@ int bus_add_driver(struct device_driver *drv)
}
kobject_uevent(&priv->kobj, KOBJ_ADD);
- return error;
+ return 0;
out_unregister:
+ kfree(drv->p);
+ drv->p = NULL;
kobject_put(&priv->kobj);
out_put_bus:
bus_put(bus);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 4aa527b8a91..7ecb1938e59 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -22,6 +22,7 @@
#include <linux/kallsyms.h>
#include <linux/semaphore.h>
#include <linux/mutex.h>
+#include <linux/async.h>
#include "base.h"
#include "power/power.h"
@@ -161,10 +162,18 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
struct device *dev = to_dev(kobj);
int retval = 0;
- /* add the major/minor if present */
+ /* add device node properties if present */
if (MAJOR(dev->devt)) {
+ const char *tmp;
+ const char *name;
+
add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt));
add_uevent_var(env, "MINOR=%u", MINOR(dev->devt));
+ name = device_get_nodename(dev, &tmp);
+ if (name) {
+ add_uevent_var(env, "DEVNAME=%s", name);
+ kfree(tmp);
+ }
}
if (dev->type && dev->type->name)
@@ -874,12 +883,12 @@ int device_add(struct device *dev)
* the name, and force the use of dev_name()
*/
if (dev->init_name) {
- dev_set_name(dev, dev->init_name);
+ dev_set_name(dev, "%s", dev->init_name);
dev->init_name = NULL;
}
if (!dev_name(dev))
- goto done;
+ goto name_error;
pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
@@ -978,6 +987,9 @@ done:
cleanup_device_parent(dev);
if (parent)
put_device(parent);
+name_error:
+ kfree(dev->p);
+ dev->p = NULL;
goto done;
}
@@ -1125,6 +1137,47 @@ static struct device *next_device(struct klist_iter *i)
}
/**
+ * device_get_nodename - path of device node file
+ * @dev: device
+ * @tmp: possibly allocated string
+ *
+ * Return the relative path of a possible device node.
+ * Non-default names may need to allocate a memory to compose
+ * a name. This memory is returned in tmp and needs to be
+ * freed by the caller.
+ */
+const char *device_get_nodename(struct device *dev, const char **tmp)
+{
+ char *s;
+
+ *tmp = NULL;
+
+ /* the device type may provide a specific name */
+ if (dev->type && dev->type->nodename)
+ *tmp = dev->type->nodename(dev);
+ if (*tmp)
+ return *tmp;
+
+ /* the class may provide a specific name */
+ if (dev->class && dev->class->nodename)
+ *tmp = dev->class->nodename(dev);
+ if (*tmp)
+ return *tmp;
+
+ /* return name without allocation, tmp == NULL */
+ if (strchr(dev_name(dev), '!') == NULL)
+ return dev_name(dev);
+
+ /* replace '!' in the name with '/' */
+ *tmp = kstrdup(dev_name(dev), GFP_KERNEL);
+ if (!*tmp)
+ return NULL;
+ while ((s = strchr(*tmp, '!')))
+ s[0] = '/';
+ return *tmp;
+}
+
+/**
* device_for_each_child - device child iterator.
* @parent: parent struct device.
* @data: data for the callback.
@@ -1268,7 +1321,7 @@ struct device *__root_device_register(const char *name, struct module *owner)
if (!root)
return ERR_PTR(err);
- err = dev_set_name(&root->dev, name);
+ err = dev_set_name(&root->dev, "%s", name);
if (err) {
kfree(root);
return ERR_PTR(err);
@@ -1662,4 +1715,5 @@ void device_shutdown(void)
kobject_put(sysfs_dev_char_kobj);
kobject_put(sysfs_dev_block_kobj);
kobject_put(dev_kobj);
+ async_synchronize_full();
}
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 742cbe6b042..f0106875f01 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -226,7 +226,7 @@ static int __device_attach(struct device_driver *drv, void *data)
* pair is found, break out and return.
*
* Returns 1 if the device was bound to a driver;
- * 0 if no matching device was found;
+ * 0 if no matching driver was found;
* -ENODEV if the device is not registered.
*
* When called for a USB interface, @dev->parent->sem must be held.
@@ -320,6 +320,10 @@ static void __device_release_driver(struct device *dev)
devres_release_all(dev);
dev->driver = NULL;
klist_remove(&dev->p->knode_driver);
+ if (dev->bus)
+ blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+ BUS_NOTIFY_UNBOUND_DRIVER,
+ dev);
}
}
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index c51f11bb29a..8ae0f63602e 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -257,6 +257,10 @@ EXPORT_SYMBOL_GPL(driver_register);
*/
void driver_unregister(struct device_driver *drv)
{
+ if (!drv || !drv->p) {
+ WARN(1, "Unexpected driver unregister!\n");
+ return;
+ }
driver_remove_groups(drv, drv->groups);
bus_remove_driver(drv);
}
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index d3a59c688fe..ddeb819c8f8 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -17,7 +17,7 @@
#include <linux/bitops.h>
#include <linux/mutex.h>
#include <linux/kthread.h>
-
+#include <linux/highmem.h>
#include <linux/firmware.h>
#include "base.h"
@@ -40,12 +40,15 @@ static int loading_timeout = 60; /* In seconds */
static DEFINE_MUTEX(fw_lock);
struct firmware_priv {
- char fw_id[FIRMWARE_NAME_MAX];
+ char *fw_id;
struct completion completion;
struct bin_attribute attr_data;
struct firmware *fw;
unsigned long status;
- int alloc_size;
+ struct page **pages;
+ int nr_pages;
+ int page_array_size;
+ const char *vdata;
struct timer_list timeout;
};
@@ -122,6 +125,10 @@ static ssize_t firmware_loading_show(struct device *dev,
return sprintf(buf, "%d\n", loading);
}
+/* Some architectures don't have PAGE_KERNEL_RO */
+#ifndef PAGE_KERNEL_RO
+#define PAGE_KERNEL_RO PAGE_KERNEL
+#endif
/**
* firmware_loading_store - set value in the 'loading' control file
* @dev: device pointer
@@ -141,6 +148,7 @@ static ssize_t firmware_loading_store(struct device *dev,
{
struct firmware_priv *fw_priv = dev_get_drvdata(dev);
int loading = simple_strtol(buf, NULL, 10);
+ int i;
switch (loading) {
case 1:
@@ -151,13 +159,30 @@ static ssize_t firmware_loading_store(struct device *dev,
}
vfree(fw_priv->fw->data);
fw_priv->fw->data = NULL;
+ for (i = 0; i < fw_priv->nr_pages; i++)
+ __free_page(fw_priv->pages[i]);
+ kfree(fw_priv->pages);
+ fw_priv->pages = NULL;
+ fw_priv->page_array_size = 0;
+ fw_priv->nr_pages = 0;
fw_priv->fw->size = 0;
- fw_priv->alloc_size = 0;
set_bit(FW_STATUS_LOADING, &fw_priv->status);
mutex_unlock(&fw_lock);
break;
case 0:
if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) {
+ vfree(fw_priv->fw->data);
+ fw_priv->fw->data = vmap(fw_priv->pages,
+ fw_priv->nr_pages,
+ 0, PAGE_KERNEL_RO);
+ if (!fw_priv->fw->data) {
+ dev_err(dev, "%s: vmap() failed\n", __func__);
+ goto err;
+ }
+ /* Pages will be freed by vfree() */
+ fw_priv->pages = NULL;
+ fw_priv->page_array_size = 0;
+ fw_priv->nr_pages = 0;
complete(&fw_priv->completion);
clear_bit(FW_STATUS_LOADING, &fw_priv->status);
break;
@@ -167,6 +192,7 @@ static ssize_t firmware_loading_store(struct device *dev,
dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
/* fallthrough */
case -1:
+ err:
fw_load_abort(fw_priv);
break;
}
@@ -191,8 +217,28 @@ firmware_data_read(struct kobject *kobj, struct bin_attribute *bin_attr,
ret_count = -ENODEV;
goto out;
}
- ret_count = memory_read_from_buffer(buffer, count, &offset,
- fw->data, fw->size);
+ if (offset > fw->size)
+ return 0;
+ if (count > fw->size - offset)
+ count = fw->size - offset;
+
+ ret_count = count;
+
+ while (count) {
+ void *page_data;
+ int page_nr = offset >> PAGE_SHIFT;
+ int page_ofs = offset & (PAGE_SIZE-1);
+ int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
+
+ page_data = kmap(fw_priv->pages[page_nr]);
+
+ memcpy(buffer, page_data + page_ofs, page_cnt);
+
+ kunmap(fw_priv->pages[page_nr]);
+ buffer += page_cnt;
+ offset += page_cnt;
+ count -= page_cnt;
+ }
out:
mutex_unlock(&fw_lock);
return ret_count;
@@ -201,27 +247,39 @@ out:
static int
fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
{
- u8 *new_data;
- int new_size = fw_priv->alloc_size;
+ int pages_needed = ALIGN(min_size, PAGE_SIZE) >> PAGE_SHIFT;
+
+ /* If the array of pages is too small, grow it... */
+ if (fw_priv->page_array_size < pages_needed) {
+ int new_array_size = max(pages_needed,
+ fw_priv->page_array_size * 2);
+ struct page **new_pages;
+
+ new_pages = kmalloc(new_array_size * sizeof(void *),
+ GFP_KERNEL);
+ if (!new_pages) {
+ fw_load_abort(fw_priv);
+ return -ENOMEM;
+ }
+ memcpy(new_pages, fw_priv->pages,
+ fw_priv->page_array_size * sizeof(void *));
+ memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) *
+ (new_array_size - fw_priv->page_array_size));
+ kfree(fw_priv->pages);
+ fw_priv->pages = new_pages;
+ fw_priv->page_array_size = new_array_size;
+ }
- if (min_size <= fw_priv->alloc_size)
- return 0;
+ while (fw_priv->nr_pages < pages_needed) {
+ fw_priv->pages[fw_priv->nr_pages] =
+ alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
- new_size = ALIGN(min_size, PAGE_SIZE);
- new_data = vmalloc(new_size);
- if (!new_data) {
- printk(KERN_ERR "%s: unable to alloc buffer\n", __func__);
- /* Make sure that we don't keep incomplete data */
- fw_load_abort(fw_priv);
- return -ENOMEM;
- }
- fw_priv->alloc_size = new_size;
- if (fw_priv->fw->data) {
- memcpy(new_data, fw_priv->fw->data, fw_priv->fw->size);
- vfree(fw_priv->fw->data);
+ if (!fw_priv->pages[fw_priv->nr_pages]) {
+ fw_load_abort(fw_priv);
+ return -ENOMEM;
+ }
+ fw_priv->nr_pages++;
}
- fw_priv->fw->data = new_data;
- BUG_ON(min_size > fw_priv->alloc_size);
return 0;
}
@@ -258,10 +316,25 @@ firmware_data_write(struct kobject *kobj, struct bin_attribute *bin_attr,
if (retval)
goto out;
- memcpy((u8 *)fw->data + offset, buffer, count);
-
- fw->size = max_t(size_t, offset + count, fw->size);
retval = count;
+
+ while (count) {
+ void *page_data;
+ int page_nr = offset >> PAGE_SHIFT;
+ int page_ofs = offset & (PAGE_SIZE - 1);
+ int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
+
+ page_data = kmap(fw_priv->pages[page_nr]);
+
+ memcpy(page_data + page_ofs, buffer, page_cnt);
+
+ kunmap(fw_priv->pages[page_nr]);
+ buffer += page_cnt;
+ offset += page_cnt;
+ count -= page_cnt;
+ }
+
+ fw->size = max_t(size_t, offset, fw->size);
out:
mutex_unlock(&fw_lock);
return retval;
@@ -277,9 +350,14 @@ static struct bin_attribute firmware_attr_data_tmpl = {
static void fw_dev_release(struct device *dev)
{
struct firmware_priv *fw_priv = dev_get_drvdata(dev);
+ int i;
+ for (i = 0; i < fw_priv->nr_pages; i++)
+ __free_page(fw_priv->pages[i]);
+ kfree(fw_priv->pages);
+ kfree(fw_priv->fw_id);
kfree(fw_priv);
- kfree(dev);
+ put_device(dev);
module_put(THIS_MODULE);
}
@@ -309,13 +387,19 @@ static int fw_register_device(struct device **dev_p, const char *fw_name,
init_completion(&fw_priv->completion);
fw_priv->attr_data = firmware_attr_data_tmpl;
- strlcpy(fw_priv->fw_id, fw_name, FIRMWARE_NAME_MAX);
+ fw_priv->fw_id = kstrdup(fw_name, GFP_KERNEL);
+ if (!fw_priv->fw_id) {
+ dev_err(device, "%s: Firmware name allocation failed\n",
+ __func__);
+ retval = -ENOMEM;
+ goto error_kfree;
+ }
fw_priv->timeout.function = firmware_class_timeout;
fw_priv->timeout.data = (u_long) fw_priv;
init_timer(&fw_priv->timeout);
- dev_set_name(f_dev, dev_name(device));
+ dev_set_name(f_dev, "%s", dev_name(device));
f_dev->parent = device;
f_dev->class = &firmware_class;
dev_set_drvdata(f_dev, fw_priv);
@@ -323,14 +407,17 @@ static int fw_register_device(struct device **dev_p, const char *fw_name,
retval = device_register(f_dev);
if (retval) {
dev_err(device, "%s: device_register failed\n", __func__);
- goto error_kfree;
+ put_device(f_dev);
+ goto error_kfree_fw_id;
}
*dev_p = f_dev;
return 0;
+error_kfree_fw_id:
+ kfree(fw_priv->fw_id);
error_kfree:
- kfree(fw_priv);
kfree(f_dev);
+ kfree(fw_priv);
return retval;
}
@@ -538,8 +625,9 @@ request_firmware_work_func(void *arg)
* @cont: function will be called asynchronously when the firmware
* request is over.
*
- * Asynchronous variant of request_firmware() for contexts where
- * it is not possible to sleep.
+ * Asynchronous variant of request_firmware() for user contexts where
+ * it is not possible to sleep for long time. It can't be called
+ * in atomic contexts.
**/
int
request_firmware_nowait(
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 40b809742a1..91d4087b403 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -72,10 +72,8 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
"Node %d Inactive(anon): %8lu kB\n"
"Node %d Active(file): %8lu kB\n"
"Node %d Inactive(file): %8lu kB\n"
-#ifdef CONFIG_UNEVICTABLE_LRU
"Node %d Unevictable: %8lu kB\n"
"Node %d Mlocked: %8lu kB\n"
-#endif
#ifdef CONFIG_HIGHMEM
"Node %d HighTotal: %8lu kB\n"
"Node %d HighFree: %8lu kB\n"
@@ -105,10 +103,8 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
nid, K(node_page_state(nid, NR_INACTIVE_ANON)),
nid, K(node_page_state(nid, NR_ACTIVE_FILE)),
nid, K(node_page_state(nid, NR_INACTIVE_FILE)),
-#ifdef CONFIG_UNEVICTABLE_LRU
nid, K(node_page_state(nid, NR_UNEVICTABLE)),
nid, K(node_page_state(nid, NR_MLOCK)),
-#endif
#ifdef CONFIG_HIGHMEM
nid, K(i.totalhigh),
nid, K(i.freehigh),
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 8b4708e0624..81cb01bfc35 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -69,7 +69,8 @@ EXPORT_SYMBOL_GPL(platform_get_irq);
* @name: resource name
*/
struct resource *platform_get_resource_byname(struct platform_device *dev,
- unsigned int type, char *name)
+ unsigned int type,
+ const char *name)
{
int i;
@@ -88,7 +89,7 @@ EXPORT_SYMBOL_GPL(platform_get_resource_byname);
* @dev: platform device
* @name: IRQ name
*/
-int platform_get_irq_byname(struct platform_device *dev, char *name)
+int platform_get_irq_byname(struct platform_device *dev, const char *name)
{
struct resource *r = platform_get_resource_byname(dev, IORESOURCE_IRQ,
name);
@@ -244,7 +245,7 @@ int platform_device_add(struct platform_device *pdev)
if (pdev->id != -1)
dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id);
else
- dev_set_name(&pdev->dev, pdev->name);
+ dev_set_name(&pdev->dev, "%s", pdev->name);
for (i = 0; i < pdev->num_resources; i++) {
struct resource *p, *r = &pdev->resource[i];
@@ -469,22 +470,6 @@ static void platform_drv_shutdown(struct device *_dev)
drv->shutdown(dev);
}
-static int platform_drv_suspend(struct device *_dev, pm_message_t state)
-{
- struct platform_driver *drv = to_platform_driver(_dev->driver);
- struct platform_device *dev = to_platform_device(_dev);
-
- return drv->suspend(dev, state);
-}
-
-static int platform_drv_resume(struct device *_dev)
-{
- struct platform_driver *drv = to_platform_driver(_dev->driver);
- struct platform_device *dev = to_platform_device(_dev);
-
- return drv->resume(dev);
-}
-
/**
* platform_driver_register
* @drv: platform driver structure
@@ -498,10 +483,10 @@ int platform_driver_register(struct platform_driver *drv)
drv->driver.remove = platform_drv_remove;
if (drv->shutdown)
drv->driver.shutdown = platform_drv_shutdown;
- if (drv->suspend)
- drv->driver.suspend = platform_drv_suspend;
- if (drv->resume)
- drv->driver.resume = platform_drv_resume;
+ if (drv->suspend || drv->resume)
+ pr_warning("Platform driver '%s' needs updating - please use "
+ "dev_pm_ops\n", drv->driver.name);
+
return driver_register(&drv->driver);
}
EXPORT_SYMBOL_GPL(platform_driver_register);
@@ -633,10 +618,12 @@ static int platform_match(struct device *dev, struct device_driver *drv)
static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
{
+ struct platform_driver *pdrv = to_platform_driver(dev->driver);
+ struct platform_device *pdev = to_platform_device(dev);
int ret = 0;
- if (dev->driver && dev->driver->suspend)
- ret = dev->driver->suspend(dev, mesg);
+ if (dev->driver && pdrv->suspend)
+ ret = pdrv->suspend(pdev, mesg);
return ret;
}
@@ -667,10 +654,12 @@ static int platform_legacy_resume_early(struct device *dev)
static int platform_legacy_resume(struct device *dev)
{
+ struct platform_driver *pdrv = to_platform_driver(dev->driver);
+ struct platform_device *pdev = to_platform_device(dev);
int ret = 0;
- if (dev->driver && dev->driver->resume)
- ret = dev->driver->resume(dev);
+ if (dev->driver && pdrv->resume)
+ ret = pdrv->resume(pdev);
return ret;
}
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 69b4ddb7de3..fae72545898 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -315,13 +315,13 @@ static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
/*------------------------- Resume routines -------------------------*/
/**
- * resume_device_noirq - Power on one device (early resume).
+ * device_resume_noirq - Power on one device (early resume).
* @dev: Device.
* @state: PM transition of the system being carried out.
*
* Must be called with interrupts disabled.
*/
-static int resume_device_noirq(struct device *dev, pm_message_t state)
+static int device_resume_noirq(struct device *dev, pm_message_t state)
{
int error = 0;
@@ -334,9 +334,6 @@ static int resume_device_noirq(struct device *dev, pm_message_t state)
if (dev->bus->pm) {
pm_dev_dbg(dev, state, "EARLY ");
error = pm_noirq_op(dev, dev->bus->pm, state);
- } else if (dev->bus->resume_early) {
- pm_dev_dbg(dev, state, "legacy EARLY ");
- error = dev->bus->resume_early(dev);
}
End:
TRACE_RESUME(error);
@@ -344,50 +341,40 @@ static int resume_device_noirq(struct device *dev, pm_message_t state)
}
/**
- * dpm_power_up - Power on all regular (non-sysdev) devices.
+ * dpm_resume_noirq - Power on all regular (non-sysdev) devices.
* @state: PM transition of the system being carried out.
*
- * Execute the appropriate "noirq resume" callback for all devices marked
- * as DPM_OFF_IRQ.
+ * Call the "noirq" resume handlers for all devices marked as
+ * DPM_OFF_IRQ and enable device drivers to receive interrupts.
*
* Must be called under dpm_list_mtx. Device drivers should not receive
* interrupts while it's being executed.
*/
-static void dpm_power_up(pm_message_t state)
+void dpm_resume_noirq(pm_message_t state)
{
struct device *dev;
+ mutex_lock(&dpm_list_mtx);
list_for_each_entry(dev, &dpm_list, power.entry)
if (dev->power.status > DPM_OFF) {
int error;
dev->power.status = DPM_OFF;
- error = resume_device_noirq(dev, state);
+ error = device_resume_noirq(dev, state);
if (error)
pm_dev_err(dev, state, " early", error);
}
-}
-
-/**
- * device_power_up - Turn on all devices that need special attention.
- * @state: PM transition of the system being carried out.
- *
- * Call the "early" resume handlers and enable device drivers to receive
- * interrupts.
- */
-void device_power_up(pm_message_t state)
-{
- dpm_power_up(state);
+ mutex_unlock(&dpm_list_mtx);
resume_device_irqs();
}
-EXPORT_SYMBOL_GPL(device_power_up);
+EXPORT_SYMBOL_GPL(dpm_resume_noirq);
/**
- * resume_device - Restore state for one device.
+ * device_resume - Restore state for one device.
* @dev: Device.
* @state: PM transition of the system being carried out.
*/
-static int resume_device(struct device *dev, pm_message_t state)
+static int device_resume(struct device *dev, pm_message_t state)
{
int error = 0;
@@ -412,9 +399,6 @@ static int resume_device(struct device *dev, pm_message_t state)
if (dev->type->pm) {
pm_dev_dbg(dev, state, "type ");
error = pm_op(dev, dev->type->pm, state);
- } else if (dev->type->resume) {
- pm_dev_dbg(dev, state, "legacy type ");
- error = dev->type->resume(dev);
}
if (error)
goto End;
@@ -460,7 +444,7 @@ static void dpm_resume(pm_message_t state)
dev->power.status = DPM_RESUMING;
mutex_unlock(&dpm_list_mtx);
- error = resume_device(dev, state);
+ error = device_resume(dev, state);
mutex_lock(&dpm_list_mtx);
if (error)
@@ -478,11 +462,11 @@ static void dpm_resume(pm_message_t state)
}
/**
- * complete_device - Complete a PM transition for given device
+ * device_complete - Complete a PM transition for given device
* @dev: Device.
* @state: PM transition of the system being carried out.
*/
-static void complete_device(struct device *dev, pm_message_t state)
+static void device_complete(struct device *dev, pm_message_t state)
{
down(&dev->sem);
@@ -525,7 +509,7 @@ static void dpm_complete(pm_message_t state)
dev->power.status = DPM_ON;
mutex_unlock(&dpm_list_mtx);
- complete_device(dev, state);
+ device_complete(dev, state);
mutex_lock(&dpm_list_mtx);
}
@@ -538,19 +522,19 @@ static void dpm_complete(pm_message_t state)
}
/**
- * device_resume - Restore state of each device in system.
+ * dpm_resume_end - Restore state of each device in system.
* @state: PM transition of the system being carried out.
*
* Resume all the devices, unlock them all, and allow new
* devices to be registered once again.
*/
-void device_resume(pm_message_t state)
+void dpm_resume_end(pm_message_t state)
{
might_sleep();
dpm_resume(state);
dpm_complete(state);
}
-EXPORT_SYMBOL_GPL(device_resume);
+EXPORT_SYMBOL_GPL(dpm_resume_end);
/*------------------------- Suspend routines -------------------------*/
@@ -575,13 +559,13 @@ static pm_message_t resume_event(pm_message_t sleep_state)
}
/**
- * suspend_device_noirq - Shut down one device (late suspend).
+ * device_suspend_noirq - Shut down one device (late suspend).
* @dev: Device.
* @state: PM transition of the system being carried out.
*
* This is called with interrupts off and only a single CPU running.
*/
-static int suspend_device_noirq(struct device *dev, pm_message_t state)
+static int device_suspend_noirq(struct device *dev, pm_message_t state)
{
int error = 0;
@@ -591,49 +575,47 @@ static int suspend_device_noirq(struct device *dev, pm_message_t state)
if (dev->bus->pm) {
pm_dev_dbg(dev, state, "LATE ");
error = pm_noirq_op(dev, dev->bus->pm, state);
- } else if (dev->bus->suspend_late) {
- pm_dev_dbg(dev, state, "legacy LATE ");
- error = dev->bus->suspend_late(dev, state);
- suspend_report_result(dev->bus->suspend_late, error);
}
return error;
}
/**
- * device_power_down - Shut down special devices.
+ * dpm_suspend_noirq - Power down all regular (non-sysdev) devices.
* @state: PM transition of the system being carried out.
*
- * Prevent device drivers from receiving interrupts and call the "late"
+ * Prevent device drivers from receiving interrupts and call the "noirq"
* suspend handlers.
*
* Must be called under dpm_list_mtx.
*/
-int device_power_down(pm_message_t state)
+int dpm_suspend_noirq(pm_message_t state)
{
struct device *dev;
int error = 0;
suspend_device_irqs();
+ mutex_lock(&dpm_list_mtx);
list_for_each_entry_reverse(dev, &dpm_list, power.entry) {
- error = suspend_device_noirq(dev, state);
+ error = device_suspend_noirq(dev, state);
if (error) {
pm_dev_err(dev, state, " late", error);
break;
}
dev->power.status = DPM_OFF_IRQ;
}
+ mutex_unlock(&dpm_list_mtx);
if (error)
- device_power_up(resume_event(state));
+ dpm_resume_noirq(resume_event(state));
return error;
}
-EXPORT_SYMBOL_GPL(device_power_down);
+EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
/**
- * suspend_device - Save state of one device.
+ * device_suspend - Save state of one device.
* @dev: Device.
* @state: PM transition of the system being carried out.
*/
-static int suspend_device(struct device *dev, pm_message_t state)
+static int device_suspend(struct device *dev, pm_message_t state)
{
int error = 0;
@@ -656,10 +638,6 @@ static int suspend_device(struct device *dev, pm_message_t state)
if (dev->type->pm) {
pm_dev_dbg(dev, state, "type ");
error = pm_op(dev, dev->type->pm, state);
- } else if (dev->type->suspend) {
- pm_dev_dbg(dev, state, "legacy type ");
- error = dev->type->suspend(dev, state);
- suspend_report_result(dev->type->suspend, error);
}
if (error)
goto End;
@@ -700,7 +678,7 @@ static int dpm_suspend(pm_message_t state)
get_device(dev);
mutex_unlock(&dpm_list_mtx);
- error = suspend_device(dev, state);
+ error = device_suspend(dev, state);
mutex_lock(&dpm_list_mtx);
if (error) {
@@ -719,11 +697,11 @@ static int dpm_suspend(pm_message_t state)
}
/**
- * prepare_device - Execute the ->prepare() callback(s) for given device.
+ * device_prepare - Execute the ->prepare() callback(s) for given device.
* @dev: Device.
* @state: PM transition of the system being carried out.
*/
-static int prepare_device(struct device *dev, pm_message_t state)
+static int device_prepare(struct device *dev, pm_message_t state)
{
int error = 0;
@@ -777,7 +755,7 @@ static int dpm_prepare(pm_message_t state)
dev->power.status = DPM_PREPARING;
mutex_unlock(&dpm_list_mtx);
- error = prepare_device(dev, state);
+ error = device_prepare(dev, state);
mutex_lock(&dpm_list_mtx);
if (error) {
@@ -803,12 +781,12 @@ static int dpm_prepare(pm_message_t state)
}
/**
- * device_suspend - Save state and stop all devices in system.
+ * dpm_suspend_start - Save state and stop all devices in system.
* @state: PM transition of the system being carried out.
*
* Prepare and suspend all devices.
*/
-int device_suspend(pm_message_t state)
+int dpm_suspend_start(pm_message_t state)
{
int error;
@@ -818,7 +796,7 @@ int device_suspend(pm_message_t state)
error = dpm_suspend(state);
return error;
}
-EXPORT_SYMBOL_GPL(device_suspend);
+EXPORT_SYMBOL_GPL(dpm_suspend_start);
void __suspend_report_result(const char *function, void *fn, int ret)
{
diff --git a/drivers/base/sys.c b/drivers/base/sys.c
index 3236b434b96..79a9ae5238a 100644
--- a/drivers/base/sys.c
+++ b/drivers/base/sys.c
@@ -131,6 +131,8 @@ static struct kset *system_kset;
int sysdev_class_register(struct sysdev_class *cls)
{
+ int retval;
+
pr_debug("Registering sysdev class '%s'\n", cls->name);
INIT_LIST_HEAD(&cls->drivers);
@@ -138,7 +140,11 @@ int sysdev_class_register(struct sysdev_class *cls)
cls->kset.kobj.parent = &system_kset->kobj;
cls->kset.kobj.ktype = &ktype_sysdev_class;
cls->kset.kobj.kset = system_kset;
- kobject_set_name(&cls->kset.kobj, cls->name);
+
+ retval = kobject_set_name(&cls->kset.kobj, "%s", cls->name);
+ if (retval)
+ return retval;
+
return kset_register(&cls->kset);
}
@@ -343,11 +349,15 @@ static void __sysdev_resume(struct sys_device *dev)
/* First, call the class-specific one */
if (cls->resume)
cls->resume(dev);
+ WARN_ONCE(!irqs_disabled(),
+ "Interrupts enabled after %pF\n", cls->resume);
/* Call auxillary drivers next. */
list_for_each_entry(drv, &cls->drivers, entry) {
if (drv->resume)
drv->resume(dev);
+ WARN_ONCE(!irqs_disabled(),
+ "Interrupts enabled after %pF\n", drv->resume);
}
}
@@ -377,6 +387,9 @@ int sysdev_suspend(pm_message_t state)
if (ret)
return ret;
+ WARN_ONCE(!irqs_disabled(),
+ "Interrupts enabled while suspending system devices\n");
+
pr_debug("Suspending System Devices\n");
list_for_each_entry_reverse(cls, &system_kset->list, kset.kobj.entry) {
@@ -393,6 +406,9 @@ int sysdev_suspend(pm_message_t state)
if (ret)
goto aux_driver;
}
+ WARN_ONCE(!irqs_disabled(),
+ "Interrupts enabled after %pF\n",
+ drv->suspend);
}
/* Now call the generic one */
@@ -400,6 +416,9 @@ int sysdev_suspend(pm_message_t state)
ret = cls->suspend(sysdev, state);
if (ret)
goto cls_driver;
+ WARN_ONCE(!irqs_disabled(),
+ "Interrupts enabled after %pF\n",
+ cls->suspend);
}
}
}
@@ -452,6 +471,9 @@ int sysdev_resume(void)
{
struct sysdev_class *cls;
+ WARN_ONCE(!irqs_disabled(),
+ "Interrupts enabled while resuming system devices\n");
+
pr_debug("Resuming System Devices\n");
list_for_each_entry(cls, &system_kset->list, kset.kobj.entry) {
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index f22ed6cc69f..668dc234b8e 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -3321,7 +3321,7 @@ static int DAC960_process_queue(DAC960_Controller_T *Controller, struct request_
DAC960_Command_T *Command;
while(1) {
- Request = elv_next_request(req_q);
+ Request = blk_peek_request(req_q);
if (!Request)
return 1;
@@ -3338,10 +3338,10 @@ static int DAC960_process_queue(DAC960_Controller_T *Controller, struct request_
}
Command->Completion = Request->end_io_data;
Command->LogicalDriveNumber = (long)Request->rq_disk->private_data;
- Command->BlockNumber = Request->sector;
- Command->BlockCount = Request->nr_sectors;
+ Command->BlockNumber = blk_rq_pos(Request);
+ Command->BlockCount = blk_rq_sectors(Request);
Command->Request = Request;
- blkdev_dequeue_request(Request);
+ blk_start_request(Request);
Command->SegmentCount = blk_rq_map_sg(req_q,
Command->Request, Command->cmd_sglist);
/* pci_map_sg MAY change the value of SegCount */
@@ -3431,7 +3431,7 @@ static void DAC960_queue_partial_rw(DAC960_Command_T *Command)
* successfully as possible.
*/
Command->SegmentCount = 1;
- Command->BlockNumber = Request->sector;
+ Command->BlockNumber = blk_rq_pos(Request);
Command->BlockCount = 1;
DAC960_QueueReadWriteCommand(Command);
return;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index ddea8e485cc..bb72ada9f07 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -112,7 +112,7 @@ config GDROM
with up to 1 GB of data. This drive will also read standard CD ROM
disks. Select this option to access any disks in your GD ROM drive.
Most users will want to say "Y" here.
- You can also build this as a module which will be called gdrom.ko
+ You can also build this as a module which will be called gdrom.
source "drivers/block/paride/Kconfig"
@@ -412,7 +412,7 @@ config ATA_OVER_ETH
config MG_DISK
tristate "mGine mflash, gflash support"
- depends on ARM && ATA && GPIOLIB
+ depends on ARM && GPIOLIB
help
mGine mFlash(gFlash) block device driver
@@ -438,7 +438,7 @@ source "drivers/s390/block/Kconfig"
config XILINX_SYSACE
tristate "Xilinx SystemACE support"
- depends on 4xx
+ depends on 4xx || MICROBLAZE
help
Include support for the Xilinx SystemACE CompactFlash interface
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 8df436ff706..9c6e5b0fe89 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -112,8 +112,6 @@ module_param(fd_def_df0, ulong, 0);
MODULE_LICENSE("GPL");
static struct request_queue *floppy_queue;
-#define QUEUE (floppy_queue)
-#define CURRENT elv_next_request(floppy_queue)
/*
* Macros
@@ -1335,64 +1333,60 @@ static int get_track(int drive, int track)
static void redo_fd_request(void)
{
+ struct request *rq;
unsigned int cnt, block, track, sector;
int drive;
struct amiga_floppy_struct *floppy;
char *data;
unsigned long flags;
+ int err;
- repeat:
- if (!CURRENT) {
+next_req:
+ rq = blk_fetch_request(floppy_queue);
+ if (!rq) {
/* Nothing left to do */
return;
}
- floppy = CURRENT->rq_disk->private_data;
+ floppy = rq->rq_disk->private_data;
drive = floppy - unit;
+next_segment:
/* Here someone could investigate to be more efficient */
- for (cnt = 0; cnt < CURRENT->current_nr_sectors; cnt++) {
+ for (cnt = 0, err = 0; cnt < blk_rq_cur_sectors(rq); cnt++) {
#ifdef DEBUG
printk("fd: sector %ld + %d requested for %s\n",
- CURRENT->sector,cnt,
- (rq_data_dir(CURRENT) == READ) ? "read" : "write");
+ blk_rq_pos(rq), cnt,
+ (rq_data_dir(rq) == READ) ? "read" : "write");
#endif
- block = CURRENT->sector + cnt;
+ block = blk_rq_pos(rq) + cnt;
if ((int)block > floppy->blocks) {
- end_request(CURRENT, 0);
- goto repeat;
+ err = -EIO;
+ break;
}
track = block / (floppy->dtype->sects * floppy->type->sect_mult);
sector = block % (floppy->dtype->sects * floppy->type->sect_mult);
- data = CURRENT->buffer + 512 * cnt;
+ data = rq->buffer + 512 * cnt;
#ifdef DEBUG
printk("access to track %d, sector %d, with buffer at "
"0x%08lx\n", track, sector, data);
#endif
- if ((rq_data_dir(CURRENT) != READ) && (rq_data_dir(CURRENT) != WRITE)) {
- printk(KERN_WARNING "do_fd_request: unknown command\n");
- end_request(CURRENT, 0);
- goto repeat;
- }
if (get_track(drive, track) == -1) {
- end_request(CURRENT, 0);
- goto repeat;
+ err = -EIO;
+ break;
}
- switch (rq_data_dir(CURRENT)) {
- case READ:
+ if (rq_data_dir(rq) == READ) {
memcpy(data, floppy->trackbuf + sector * 512, 512);
- break;
-
- case WRITE:
+ } else {
memcpy(floppy->trackbuf + sector * 512, data, 512);
/* keep the drive spinning while writes are scheduled */
if (!fd_motor_on(drive)) {
- end_request(CURRENT, 0);
- goto repeat;
+ err = -EIO;
+ break;
}
/*
* setup a callback to write the track buffer
@@ -1404,14 +1398,12 @@ static void redo_fd_request(void)
/* reset the timer */
mod_timer (flush_track_timer + drive, jiffies + 1);
local_irq_restore(flags);
- break;
}
}
- CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
- CURRENT->sector += CURRENT->current_nr_sectors;
- end_request(CURRENT, 1);
- goto repeat;
+ if (__blk_end_request_cur(rq, err))
+ goto next_segment;
+ goto next_req;
}
static void do_fd_request(struct request_queue * q)
diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c
index 200efc4d2c1..19888354188 100644
--- a/drivers/block/aoe/aoechr.c
+++ b/drivers/block/aoe/aoechr.c
@@ -266,6 +266,11 @@ static const struct file_operations aoe_fops = {
.owner = THIS_MODULE,
};
+static char *aoe_nodename(struct device *dev)
+{
+ return kasprintf(GFP_KERNEL, "etherd/%s", dev_name(dev));
+}
+
int __init
aoechr_init(void)
{
@@ -283,6 +288,8 @@ aoechr_init(void)
unregister_chrdev(AOE_MAJOR, "aoechr");
return PTR_ERR(aoe_class);
}
+ aoe_class->nodename = aoe_nodename;
+
for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
device_create(aoe_class, NULL,
MKDEV(AOE_MAJOR, chardevs[i].minor), NULL,
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 31693bc2444..965ece2c7e4 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -34,13 +34,6 @@ new_skb(ulong len)
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
skb->protocol = __constant_htons(ETH_P_AOE);
- skb->priority = 0;
- skb->next = skb->prev = NULL;
-
- /* tell the network layer not to perform IP checksums
- * or to get the NIC to do it
- */
- skb->ip_summed = CHECKSUM_NONE;
}
return skb;
}
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 4234c11c1e4..f5e7180d7f4 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -79,9 +79,7 @@
#undef DEBUG
static struct request_queue *floppy_queue;
-
-#define QUEUE (floppy_queue)
-#define CURRENT elv_next_request(floppy_queue)
+static struct request *fd_request;
/* Disk types: DD, HD, ED */
static struct atari_disk_type {
@@ -376,6 +374,12 @@ static DEFINE_TIMER(readtrack_timer, fd_readtrack_check, 0, 0);
static DEFINE_TIMER(timeout_timer, fd_times_out, 0, 0);
static DEFINE_TIMER(fd_timer, check_change, 0, 0);
+static void fd_end_request_cur(int err)
+{
+ if (!__blk_end_request_cur(fd_request, err))
+ fd_request = NULL;
+}
+
static inline void start_motor_off_timer(void)
{
mod_timer(&motor_off_timer, jiffies + FD_MOTOR_OFF_DELAY);
@@ -606,15 +610,15 @@ static void fd_error( void )
return;
}
- if (!CURRENT)
+ if (!fd_request)
return;
- CURRENT->errors++;
- if (CURRENT->errors >= MAX_ERRORS) {
+ fd_request->errors++;
+ if (fd_request->errors >= MAX_ERRORS) {
printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive );
- end_request(CURRENT, 0);
+ fd_end_request_cur(-EIO);
}
- else if (CURRENT->errors == RECALIBRATE_ERRORS) {
+ else if (fd_request->errors == RECALIBRATE_ERRORS) {
printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive );
if (SelectedDrive != -1)
SUD.track = -1;
@@ -725,16 +729,14 @@ static void do_fd_action( int drive )
if (IS_BUFFERED( drive, ReqSide, ReqTrack )) {
if (ReqCmd == READ) {
copy_buffer( SECTOR_BUFFER(ReqSector), ReqData );
- if (++ReqCnt < CURRENT->current_nr_sectors) {
+ if (++ReqCnt < blk_rq_cur_sectors(fd_request)) {
/* read next sector */
setup_req_params( drive );
goto repeat;
}
else {
/* all sectors finished */
- CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
- CURRENT->sector += CURRENT->current_nr_sectors;
- end_request(CURRENT, 1);
+ fd_end_request_cur(0);
redo_fd_request();
return;
}
@@ -1132,16 +1134,14 @@ static void fd_rwsec_done1(int status)
}
}
- if (++ReqCnt < CURRENT->current_nr_sectors) {
+ if (++ReqCnt < blk_rq_cur_sectors(fd_request)) {
/* read next sector */
setup_req_params( SelectedDrive );
do_fd_action( SelectedDrive );
}
else {
/* all sectors finished */
- CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
- CURRENT->sector += CURRENT->current_nr_sectors;
- end_request(CURRENT, 1);
+ fd_end_request_cur(0);
redo_fd_request();
}
return;
@@ -1382,7 +1382,7 @@ static void setup_req_params( int drive )
ReqData = ReqBuffer + 512 * ReqCnt;
if (UseTrackbuffer)
- read_track = (ReqCmd == READ && CURRENT->errors == 0);
+ read_track = (ReqCmd == READ && fd_request->errors == 0);
else
read_track = 0;
@@ -1396,25 +1396,27 @@ static void redo_fd_request(void)
int drive, type;
struct atari_floppy_struct *floppy;
- DPRINT(("redo_fd_request: CURRENT=%p dev=%s CURRENT->sector=%ld\n",
- CURRENT, CURRENT ? CURRENT->rq_disk->disk_name : "",
- CURRENT ? CURRENT->sector : 0 ));
+ DPRINT(("redo_fd_request: fd_request=%p dev=%s fd_request->sector=%ld\n",
+ fd_request, fd_request ? fd_request->rq_disk->disk_name : "",
+ fd_request ? blk_rq_pos(fd_request) : 0 ));
IsFormatting = 0;
repeat:
+ if (!fd_request) {
+ fd_request = blk_fetch_request(floppy_queue);
+ if (!fd_request)
+ goto the_end;
+ }
- if (!CURRENT)
- goto the_end;
-
- floppy = CURRENT->rq_disk->private_data;
+ floppy = fd_request->rq_disk->private_data;
drive = floppy - unit;
type = floppy->type;
if (!UD.connected) {
/* drive not connected */
printk(KERN_ERR "Unknown Device: fd%d\n", drive );
- end_request(CURRENT, 0);
+ fd_end_request_cur(-EIO);
goto repeat;
}
@@ -1430,12 +1432,12 @@ repeat:
/* user supplied disk type */
if (--type >= NUM_DISK_MINORS) {
printk(KERN_WARNING "fd%d: invalid disk format", drive );
- end_request(CURRENT, 0);
+ fd_end_request_cur(-EIO);
goto repeat;
}
if (minor2disktype[type].drive_types > DriveType) {
printk(KERN_WARNING "fd%d: unsupported disk format", drive );
- end_request(CURRENT, 0);
+ fd_end_request_cur(-EIO);
goto repeat;
}
type = minor2disktype[type].index;
@@ -1444,8 +1446,8 @@ repeat:
UD.autoprobe = 0;
}
- if (CURRENT->sector + 1 > UDT->blocks) {
- end_request(CURRENT, 0);
+ if (blk_rq_pos(fd_request) + 1 > UDT->blocks) {
+ fd_end_request_cur(-EIO);
goto repeat;
}
@@ -1453,9 +1455,9 @@ repeat:
del_timer( &motor_off_timer );
ReqCnt = 0;
- ReqCmd = rq_data_dir(CURRENT);
- ReqBlock = CURRENT->sector;
- ReqBuffer = CURRENT->buffer;
+ ReqCmd = rq_data_dir(fd_request);
+ ReqBlock = blk_rq_pos(fd_request);
+ ReqBuffer = fd_request->buffer;
setup_req_params( drive );
do_fd_action( drive );
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 5f7e64ba87e..4bf8705b3ac 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -407,12 +407,7 @@ static int __init ramdisk_size(char *str)
rd_size = simple_strtol(str, NULL, 0);
return 1;
}
-static int __init ramdisk_size2(char *str)
-{
- return ramdisk_size(str);
-}
-__setup("ramdisk=", ramdisk_size);
-__setup("ramdisk_size=", ramdisk_size2);
+__setup("ramdisk_size=", ramdisk_size);
#endif
/*
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 4d4d5e0d3fa..c7a527c08a0 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -38,7 +38,6 @@
#include <linux/hdreg.h>
#include <linux/spinlock.h>
#include <linux/compat.h>
-#include <linux/blktrace_api.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -180,11 +179,13 @@ static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
__u32);
static void start_io(ctlr_info_t *h);
static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
- unsigned int use_unit_num, unsigned int log_unit,
__u8 page_code, unsigned char *scsi3addr, int cmd_type);
static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
- unsigned int use_unit_num, unsigned int log_unit,
- __u8 page_code, int cmd_type);
+ __u8 page_code, unsigned char scsi3addr[],
+ int cmd_type);
+static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c,
+ int attempt_retry);
+static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c);
static void fail_all_cmds(unsigned long ctlr);
static int scan_thread(void *data);
@@ -437,6 +438,194 @@ static void __devinit cciss_procinit(int i)
}
#endif /* CONFIG_PROC_FS */
+#define MAX_PRODUCT_NAME_LEN 19
+
+#define to_hba(n) container_of(n, struct ctlr_info, dev)
+#define to_drv(n) container_of(n, drive_info_struct, dev)
+
+static struct device_type cciss_host_type = {
+ .name = "cciss_host",
+};
+
+static ssize_t dev_show_unique_id(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ drive_info_struct *drv = to_drv(dev);
+ struct ctlr_info *h = to_hba(drv->dev.parent);
+ __u8 sn[16];
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ if (h->busy_configuring)
+ ret = -EBUSY;
+ else
+ memcpy(sn, drv->serial_no, sizeof(sn));
+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+
+ if (ret)
+ return ret;
+ else
+ return snprintf(buf, 16 * 2 + 2,
+ "%02X%02X%02X%02X%02X%02X%02X%02X"
+ "%02X%02X%02X%02X%02X%02X%02X%02X\n",
+ sn[0], sn[1], sn[2], sn[3],
+ sn[4], sn[5], sn[6], sn[7],
+ sn[8], sn[9], sn[10], sn[11],
+ sn[12], sn[13], sn[14], sn[15]);
+}
+DEVICE_ATTR(unique_id, S_IRUGO, dev_show_unique_id, NULL);
+
+static ssize_t dev_show_vendor(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ drive_info_struct *drv = to_drv(dev);
+ struct ctlr_info *h = to_hba(drv->dev.parent);
+ char vendor[VENDOR_LEN + 1];
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ if (h->busy_configuring)
+ ret = -EBUSY;
+ else
+ memcpy(vendor, drv->vendor, VENDOR_LEN + 1);
+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+
+ if (ret)
+ return ret;
+ else
+ return snprintf(buf, sizeof(vendor) + 1, "%s\n", drv->vendor);
+}
+DEVICE_ATTR(vendor, S_IRUGO, dev_show_vendor, NULL);
+
+static ssize_t dev_show_model(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ drive_info_struct *drv = to_drv(dev);
+ struct ctlr_info *h = to_hba(drv->dev.parent);
+ char model[MODEL_LEN + 1];
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ if (h->busy_configuring)
+ ret = -EBUSY;
+ else
+ memcpy(model, drv->model, MODEL_LEN + 1);
+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+
+ if (ret)
+ return ret;
+ else
+ return snprintf(buf, sizeof(model) + 1, "%s\n", drv->model);
+}
+DEVICE_ATTR(model, S_IRUGO, dev_show_model, NULL);
+
+static ssize_t dev_show_rev(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ drive_info_struct *drv = to_drv(dev);
+ struct ctlr_info *h = to_hba(drv->dev.parent);
+ char rev[REV_LEN + 1];
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+ if (h->busy_configuring)
+ ret = -EBUSY;
+ else
+ memcpy(rev, drv->rev, REV_LEN + 1);
+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+
+ if (ret)
+ return ret;
+ else
+ return snprintf(buf, sizeof(rev) + 1, "%s\n", drv->rev);
+}
+DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL);
+
+static struct attribute *cciss_dev_attrs[] = {
+ &dev_attr_unique_id.attr,
+ &dev_attr_model.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_rev.attr,
+ NULL
+};
+
+static struct attribute_group cciss_dev_attr_group = {
+ .attrs = cciss_dev_attrs,
+};
+
+static struct attribute_group *cciss_dev_attr_groups[] = {
+ &cciss_dev_attr_group,
+ NULL
+};
+
+static struct device_type cciss_dev_type = {
+ .name = "cciss_device",
+ .groups = cciss_dev_attr_groups,
+};
+
+static struct bus_type cciss_bus_type = {
+ .name = "cciss",
+};
+
+
+/*
+ * Initialize sysfs entry for each controller. This sets up and registers
+ * the 'cciss#' directory for each individual controller under
+ * /sys/bus/pci/devices/<dev>/.
+ */
+static int cciss_create_hba_sysfs_entry(struct ctlr_info *h)
+{
+ device_initialize(&h->dev);
+ h->dev.type = &cciss_host_type;
+ h->dev.bus = &cciss_bus_type;
+ dev_set_name(&h->dev, "%s", h->devname);
+ h->dev.parent = &h->pdev->dev;
+
+ return device_add(&h->dev);
+}
+
+/*
+ * Remove sysfs entries for an hba.
+ */
+static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h)
+{
+ device_del(&h->dev);
+}
+
+/*
+ * Initialize sysfs for each logical drive. This sets up and registers
+ * the 'c#d#' directory for each individual logical drive under
+ * /sys/bus/pci/devices/<dev/ccis#/. We also create a link from
+ * /sys/block/cciss!c#d# to this entry.
+ */
+static int cciss_create_ld_sysfs_entry(struct ctlr_info *h,
+ drive_info_struct *drv,
+ int drv_index)
+{
+ device_initialize(&drv->dev);
+ drv->dev.type = &cciss_dev_type;
+ drv->dev.bus = &cciss_bus_type;
+ dev_set_name(&drv->dev, "c%dd%d", h->ctlr, drv_index);
+ drv->dev.parent = &h->dev;
+ return device_add(&drv->dev);
+}
+
+/*
+ * Remove sysfs entries for a logical drive.
+ */
+static void cciss_destroy_ld_sysfs_entry(drive_info_struct *drv)
+{
+ device_del(&drv->dev);
+}
+
/*
* For operations that cannot sleep, a command block is allocated at init,
* and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
@@ -1299,7 +1488,6 @@ static void cciss_softirq_done(struct request *rq)
{
CommandList_struct *cmd = rq->completion_data;
ctlr_info_t *h = hba[cmd->ctlr];
- unsigned int nr_bytes;
unsigned long flags;
u64bit temp64;
int i, ddir;
@@ -1321,15 +1509,11 @@ static void cciss_softirq_done(struct request *rq)
printk("Done with %p\n", rq);
#endif /* CCISS_DEBUG */
- /*
- * Store the full size and set the residual count for pc requests
- */
- nr_bytes = blk_rq_bytes(rq);
+ /* set the residual count for pc requests */
if (blk_pc_request(rq))
- rq->data_len = cmd->err_info->ResidualCnt;
+ rq->resid_len = cmd->err_info->ResidualCnt;
- if (blk_end_request(rq, (rq->errors == 0) ? 0 : -EIO, nr_bytes))
- BUG();
+ blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO);
spin_lock_irqsave(&h->lock, flags);
cmd_free(h, cmd, 1);
@@ -1337,6 +1521,56 @@ static void cciss_softirq_done(struct request *rq)
spin_unlock_irqrestore(&h->lock, flags);
}
+static void log_unit_to_scsi3addr(ctlr_info_t *h, unsigned char scsi3addr[],
+ uint32_t log_unit)
+{
+ log_unit = h->drv[log_unit].LunID & 0x03fff;
+ memset(&scsi3addr[4], 0, 4);
+ memcpy(&scsi3addr[0], &log_unit, 4);
+ scsi3addr[3] |= 0x40;
+}
+
+/* This function gets the SCSI vendor, model, and revision of a logical drive
+ * via the inquiry page 0. Model, vendor, and rev are set to empty strings if
+ * they cannot be read.
+ */
+static void cciss_get_device_descr(int ctlr, int logvol, int withirq,
+ char *vendor, char *model, char *rev)
+{
+ int rc;
+ InquiryData_struct *inq_buf;
+ unsigned char scsi3addr[8];
+
+ *vendor = '\0';
+ *model = '\0';
+ *rev = '\0';
+
+ inq_buf = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
+ if (!inq_buf)
+ return;
+
+ log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
+ if (withirq)
+ rc = sendcmd_withirq(CISS_INQUIRY, ctlr, inq_buf,
+ sizeof(InquiryData_struct), 0,
+ scsi3addr, TYPE_CMD);
+ else
+ rc = sendcmd(CISS_INQUIRY, ctlr, inq_buf,
+ sizeof(InquiryData_struct), 0,
+ scsi3addr, TYPE_CMD);
+ if (rc == IO_OK) {
+ memcpy(vendor, &inq_buf->data_byte[8], VENDOR_LEN);
+ vendor[VENDOR_LEN] = '\0';
+ memcpy(model, &inq_buf->data_byte[16], MODEL_LEN);
+ model[MODEL_LEN] = '\0';
+ memcpy(rev, &inq_buf->data_byte[32], REV_LEN);
+ rev[REV_LEN] = '\0';
+ }
+
+ kfree(inq_buf);
+ return;
+}
+
/* This function gets the serial number of a logical drive via
* inquiry page 0x83. Serial no. is 16 bytes. If the serial
* number cannot be had, for whatever reason, 16 bytes of 0xff
@@ -1348,6 +1582,7 @@ static void cciss_get_serial_no(int ctlr, int logvol, int withirq,
#define PAGE_83_INQ_BYTES 64
int rc;
unsigned char *buf;
+ unsigned char scsi3addr[8];
if (buflen > 16)
buflen = 16;
@@ -1356,12 +1591,13 @@ static void cciss_get_serial_no(int ctlr, int logvol, int withirq,
if (!buf)
return;
memset(serial_no, 0, buflen);
+ log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
if (withirq)
rc = sendcmd_withirq(CISS_INQUIRY, ctlr, buf,
- PAGE_83_INQ_BYTES, 1, logvol, 0x83, TYPE_CMD);
+ PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD);
else
rc = sendcmd(CISS_INQUIRY, ctlr, buf,
- PAGE_83_INQ_BYTES, 1, logvol, 0x83, NULL, TYPE_CMD);
+ PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD);
if (rc == IO_OK)
memcpy(serial_no, &buf[8], buflen);
kfree(buf);
@@ -1377,7 +1613,7 @@ static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
disk->first_minor = drv_index << NWD_SHIFT;
disk->fops = &cciss_fops;
disk->private_data = &h->drv[drv_index];
- disk->driverfs_dev = &h->pdev->dev;
+ disk->driverfs_dev = &h->drv[drv_index].dev;
/* Set up queue information */
blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask);
@@ -1394,8 +1630,8 @@ static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
disk->queue->queuedata = h;
- blk_queue_hardsect_size(disk->queue,
- h->drv[drv_index].block_size);
+ blk_queue_logical_block_size(disk->queue,
+ h->drv[drv_index].block_size);
/* Make sure all queue data is written out before */
/* setting h->drv[drv_index].queue, as setting this */
@@ -1468,6 +1704,8 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
drvinfo->block_size = block_size;
drvinfo->nr_blocks = total_size + 1;
+ cciss_get_device_descr(ctlr, drv_index, 1, drvinfo->vendor,
+ drvinfo->model, drvinfo->rev);
cciss_get_serial_no(ctlr, drv_index, 1, drvinfo->serial_no,
sizeof(drvinfo->serial_no));
@@ -1517,6 +1755,9 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
h->drv[drv_index].cylinders = drvinfo->cylinders;
h->drv[drv_index].raid_level = drvinfo->raid_level;
memcpy(h->drv[drv_index].serial_no, drvinfo->serial_no, 16);
+ memcpy(h->drv[drv_index].vendor, drvinfo->vendor, VENDOR_LEN + 1);
+ memcpy(h->drv[drv_index].model, drvinfo->model, MODEL_LEN + 1);
+ memcpy(h->drv[drv_index].rev, drvinfo->rev, REV_LEN + 1);
++h->num_luns;
disk = h->gendisk[drv_index];
@@ -1591,6 +1832,8 @@ static int cciss_add_gendisk(ctlr_info_t *h, __u32 lunid, int controller_node)
}
}
h->drv[drv_index].LunID = lunid;
+ if (cciss_create_ld_sysfs_entry(h, &h->drv[drv_index], drv_index))
+ goto err_free_disk;
/* Don't need to mark this busy because nobody */
/* else knows about this disk yet to contend */
@@ -1598,6 +1841,11 @@ static int cciss_add_gendisk(ctlr_info_t *h, __u32 lunid, int controller_node)
h->drv[drv_index].busy_configuring = 0;
wmb();
return drv_index;
+
+err_free_disk:
+ put_disk(h->gendisk[drv_index]);
+ h->gendisk[drv_index] = NULL;
+ return -1;
}
/* This is for the special case of a controller which
@@ -1668,8 +1916,8 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
goto mem_msg;
return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
- sizeof(ReportLunData_struct), 0,
- 0, 0, TYPE_CMD);
+ sizeof(ReportLunData_struct),
+ 0, CTLR_LUNID, TYPE_CMD);
if (return_code == IO_OK)
listlength = be32_to_cpu(*(__be32 *) ld_buff->LUNListLength);
@@ -1718,6 +1966,7 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
h->drv[i].busy_configuring = 1;
spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
return_code = deregister_disk(h, i, 1);
+ cciss_destroy_ld_sysfs_entry(&h->drv[i]);
h->drv[i].busy_configuring = 0;
}
}
@@ -1877,11 +2126,9 @@ static int deregister_disk(ctlr_info_t *h, int drv_index,
return 0;
}
-static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
- 1: address logical volume log_unit,
- 2: periph device address is scsi3addr */
- unsigned int log_unit, __u8 page_code,
- unsigned char *scsi3addr, int cmd_type)
+static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
+ size_t size, __u8 page_code, unsigned char *scsi3addr,
+ int cmd_type)
{
ctlr_info_t *h = hba[ctlr];
u64bit buff_dma_handle;
@@ -1897,27 +2144,12 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_
c->Header.SGTotal = 0;
}
c->Header.Tag.lower = c->busaddr;
+ memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
c->Request.Type.Type = cmd_type;
if (cmd_type == TYPE_CMD) {
switch (cmd) {
case CISS_INQUIRY:
- /* If the logical unit number is 0 then, this is going
- to controller so It's a physical command
- mode = 0 target = 0. So we have nothing to write.
- otherwise, if use_unit_num == 1,
- mode = 1(volume set addressing) target = LUNID
- otherwise, if use_unit_num == 2,
- mode = 0(periph dev addr) target = scsi3addr */
- if (use_unit_num == 1) {
- c->Header.LUN.LogDev.VolId =
- h->drv[log_unit].LunID;
- c->Header.LUN.LogDev.Mode = 1;
- } else if (use_unit_num == 2) {
- memcpy(c->Header.LUN.LunAddrBytes, scsi3addr,
- 8);
- c->Header.LUN.LogDev.Mode = 0;
- }
/* are we trying to read a vital product page */
if (page_code != 0) {
c->Request.CDB[1] = 0x01;
@@ -1947,8 +2179,6 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_
break;
case CCISS_READ_CAPACITY:
- c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
- c->Header.LUN.LogDev.Mode = 1;
c->Request.CDBLen = 10;
c->Request.Type.Attribute = ATTR_SIMPLE;
c->Request.Type.Direction = XFER_READ;
@@ -1956,8 +2186,6 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_
c->Request.CDB[0] = cmd;
break;
case CCISS_READ_CAPACITY_16:
- c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
- c->Header.LUN.LogDev.Mode = 1;
c->Request.CDBLen = 16;
c->Request.Type.Attribute = ATTR_SIMPLE;
c->Request.Type.Direction = XFER_READ;
@@ -1979,6 +2207,12 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_
c->Request.CDB[0] = BMIC_WRITE;
c->Request.CDB[6] = BMIC_CACHE_FLUSH;
break;
+ case TEST_UNIT_READY:
+ c->Request.CDBLen = 6;
+ c->Request.Type.Attribute = ATTR_SIMPLE;
+ c->Request.Type.Direction = XFER_NONE;
+ c->Request.Timeout = 0;
+ break;
default:
printk(KERN_WARNING
"cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
@@ -1997,13 +2231,13 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_
memcpy(&c->Request.CDB[4], buff, 8);
break;
case 1: /* RESET message */
- c->Request.CDBLen = 12;
+ c->Request.CDBLen = 16;
c->Request.Type.Attribute = ATTR_SIMPLE;
- c->Request.Type.Direction = XFER_WRITE;
+ c->Request.Type.Direction = XFER_NONE;
c->Request.Timeout = 0;
memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
c->Request.CDB[0] = cmd; /* reset */
- c->Request.CDB[1] = 0x04; /* reset a LUN */
+ c->Request.CDB[1] = 0x03; /* reset a target */
break;
case 3: /* No-Op message */
c->Request.CDBLen = 1;
@@ -2035,114 +2269,152 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_
return status;
}
-static int sendcmd_withirq(__u8 cmd,
- int ctlr,
- void *buff,
- size_t size,
- unsigned int use_unit_num,
- unsigned int log_unit, __u8 page_code, int cmd_type)
+static int check_target_status(ctlr_info_t *h, CommandList_struct *c)
{
- ctlr_info_t *h = hba[ctlr];
- CommandList_struct *c;
+ switch (c->err_info->ScsiStatus) {
+ case SAM_STAT_GOOD:
+ return IO_OK;
+ case SAM_STAT_CHECK_CONDITION:
+ switch (0xf & c->err_info->SenseInfo[2]) {
+ case 0: return IO_OK; /* no sense */
+ case 1: return IO_OK; /* recovered error */
+ default:
+ printk(KERN_WARNING "cciss%d: cmd 0x%02x "
+ "check condition, sense key = 0x%02x\n",
+ h->ctlr, c->Request.CDB[0],
+ c->err_info->SenseInfo[2]);
+ }
+ break;
+ default:
+ printk(KERN_WARNING "cciss%d: cmd 0x%02x"
+ "scsi status = 0x%02x\n", h->ctlr,
+ c->Request.CDB[0], c->err_info->ScsiStatus);
+ break;
+ }
+ return IO_ERROR;
+}
+
+static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c)
+{
+ int return_status = IO_OK;
+
+ if (c->err_info->CommandStatus == CMD_SUCCESS)
+ return IO_OK;
+
+ switch (c->err_info->CommandStatus) {
+ case CMD_TARGET_STATUS:
+ return_status = check_target_status(h, c);
+ break;
+ case CMD_DATA_UNDERRUN:
+ case CMD_DATA_OVERRUN:
+ /* expected for inquiry and report lun commands */
+ break;
+ case CMD_INVALID:
+ printk(KERN_WARNING "cciss: cmd 0x%02x is "
+ "reported invalid\n", c->Request.CDB[0]);
+ return_status = IO_ERROR;
+ break;
+ case CMD_PROTOCOL_ERR:
+ printk(KERN_WARNING "cciss: cmd 0x%02x has "
+ "protocol error \n", c->Request.CDB[0]);
+ return_status = IO_ERROR;
+ break;
+ case CMD_HARDWARE_ERR:
+ printk(KERN_WARNING "cciss: cmd 0x%02x had "
+ " hardware error\n", c->Request.CDB[0]);
+ return_status = IO_ERROR;
+ break;
+ case CMD_CONNECTION_LOST:
+ printk(KERN_WARNING "cciss: cmd 0x%02x had "
+ "connection lost\n", c->Request.CDB[0]);
+ return_status = IO_ERROR;
+ break;
+ case CMD_ABORTED:
+ printk(KERN_WARNING "cciss: cmd 0x%02x was "
+ "aborted\n", c->Request.CDB[0]);
+ return_status = IO_ERROR;
+ break;
+ case CMD_ABORT_FAILED:
+ printk(KERN_WARNING "cciss: cmd 0x%02x reports "
+ "abort failed\n", c->Request.CDB[0]);
+ return_status = IO_ERROR;
+ break;
+ case CMD_UNSOLICITED_ABORT:
+ printk(KERN_WARNING
+ "cciss%d: unsolicited abort 0x%02x\n", h->ctlr,
+ c->Request.CDB[0]);
+ return_status = IO_NEEDS_RETRY;
+ break;
+ default:
+ printk(KERN_WARNING "cciss: cmd 0x%02x returned "
+ "unknown status %x\n", c->Request.CDB[0],
+ c->err_info->CommandStatus);
+ return_status = IO_ERROR;
+ }
+ return return_status;
+}
+
+static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c,
+ int attempt_retry)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
u64bit buff_dma_handle;
unsigned long flags;
- int return_status;
- DECLARE_COMPLETION_ONSTACK(wait);
+ int return_status = IO_OK;
- if ((c = cmd_alloc(h, 0)) == NULL)
- return -ENOMEM;
- return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
- log_unit, page_code, NULL, cmd_type);
- if (return_status != IO_OK) {
- cmd_free(h, c, 0);
- return return_status;
- }
- resend_cmd2:
+resend_cmd2:
c->waiting = &wait;
-
/* Put the request on the tail of the queue and send it */
- spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+ spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
addQ(&h->reqQ, c);
h->Qdepth++;
start_io(h);
- spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+ spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
wait_for_completion(&wait);
- if (c->err_info->CommandStatus != 0) { /* an error has occurred */
- switch (c->err_info->CommandStatus) {
- case CMD_TARGET_STATUS:
- printk(KERN_WARNING "cciss: cmd %p has "
- " completed with errors\n", c);
- if (c->err_info->ScsiStatus) {
- printk(KERN_WARNING "cciss: cmd %p "
- "has SCSI Status = %x\n",
- c, c->err_info->ScsiStatus);
- }
+ if (c->err_info->CommandStatus == 0 || !attempt_retry)
+ goto command_done;
- break;
- case CMD_DATA_UNDERRUN:
- case CMD_DATA_OVERRUN:
- /* expected for inquire and report lun commands */
- break;
- case CMD_INVALID:
- printk(KERN_WARNING "cciss: Cmd %p is "
- "reported invalid\n", c);
- return_status = IO_ERROR;
- break;
- case CMD_PROTOCOL_ERR:
- printk(KERN_WARNING "cciss: cmd %p has "
- "protocol error \n", c);
- return_status = IO_ERROR;
- break;
- case CMD_HARDWARE_ERR:
- printk(KERN_WARNING "cciss: cmd %p had "
- " hardware error\n", c);
- return_status = IO_ERROR;
- break;
- case CMD_CONNECTION_LOST:
- printk(KERN_WARNING "cciss: cmd %p had "
- "connection lost\n", c);
- return_status = IO_ERROR;
- break;
- case CMD_ABORTED:
- printk(KERN_WARNING "cciss: cmd %p was "
- "aborted\n", c);
- return_status = IO_ERROR;
- break;
- case CMD_ABORT_FAILED:
- printk(KERN_WARNING "cciss: cmd %p reports "
- "abort failed\n", c);
- return_status = IO_ERROR;
- break;
- case CMD_UNSOLICITED_ABORT:
- printk(KERN_WARNING
- "cciss%d: unsolicited abort %p\n", ctlr, c);
- if (c->retry_count < MAX_CMD_RETRIES) {
- printk(KERN_WARNING
- "cciss%d: retrying %p\n", ctlr, c);
- c->retry_count++;
- /* erase the old error information */
- memset(c->err_info, 0,
- sizeof(ErrorInfo_struct));
- return_status = IO_OK;
- INIT_COMPLETION(wait);
- goto resend_cmd2;
- }
- return_status = IO_ERROR;
- break;
- default:
- printk(KERN_WARNING "cciss: cmd %p returned "
- "unknown status %x\n", c,
- c->err_info->CommandStatus);
- return_status = IO_ERROR;
- }
+ return_status = process_sendcmd_error(h, c);
+
+ if (return_status == IO_NEEDS_RETRY &&
+ c->retry_count < MAX_CMD_RETRIES) {
+ printk(KERN_WARNING "cciss%d: retrying 0x%02x\n", h->ctlr,
+ c->Request.CDB[0]);
+ c->retry_count++;
+ /* erase the old error information */
+ memset(c->err_info, 0, sizeof(ErrorInfo_struct));
+ return_status = IO_OK;
+ INIT_COMPLETION(wait);
+ goto resend_cmd2;
}
+
+command_done:
/* unlock the buffers from DMA */
buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
+ return return_status;
+}
+
+static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
+ __u8 page_code, unsigned char scsi3addr[],
+ int cmd_type)
+{
+ ctlr_info_t *h = hba[ctlr];
+ CommandList_struct *c;
+ int return_status;
+
+ c = cmd_alloc(h, 0);
+ if (!c)
+ return -ENOMEM;
+ return_status = fill_cmd(c, cmd, ctlr, buff, size, page_code,
+ scsi3addr, cmd_type);
+ if (return_status == IO_OK)
+ return_status = sendcmd_withirq_core(h, c, 1);
+
cmd_free(h, c, 0);
return return_status;
}
@@ -2155,15 +2427,17 @@ static void cciss_geometry_inquiry(int ctlr, int logvol,
{
int return_code;
unsigned long t;
+ unsigned char scsi3addr[8];
memset(inq_buff, 0, sizeof(InquiryData_struct));
+ log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
if (withirq)
return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
- inq_buff, sizeof(*inq_buff), 1,
- logvol, 0xC1, TYPE_CMD);
+ inq_buff, sizeof(*inq_buff),
+ 0xC1, scsi3addr, TYPE_CMD);
else
return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
- sizeof(*inq_buff), 1, logvol, 0xC1, NULL,
+ sizeof(*inq_buff), 0xC1, scsi3addr,
TYPE_CMD);
if (return_code == IO_OK) {
if (inq_buff->data_byte[8] == 0xFF) {
@@ -2204,6 +2478,7 @@ cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
{
ReadCapdata_struct *buf;
int return_code;
+ unsigned char scsi3addr[8];
buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
if (!buf) {
@@ -2211,14 +2486,15 @@ cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
return;
}
+ log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
if (withirq)
return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
ctlr, buf, sizeof(ReadCapdata_struct),
- 1, logvol, 0, TYPE_CMD);
+ 0, scsi3addr, TYPE_CMD);
else
return_code = sendcmd(CCISS_READ_CAPACITY,
ctlr, buf, sizeof(ReadCapdata_struct),
- 1, logvol, 0, NULL, TYPE_CMD);
+ 0, scsi3addr, TYPE_CMD);
if (return_code == IO_OK) {
*total_size = be32_to_cpu(*(__be32 *) buf->total_size);
*block_size = be32_to_cpu(*(__be32 *) buf->block_size);
@@ -2238,6 +2514,7 @@ cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size,
{
ReadCapdata_struct_16 *buf;
int return_code;
+ unsigned char scsi3addr[8];
buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
if (!buf) {
@@ -2245,15 +2522,16 @@ cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size,
return;
}
+ log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
if (withirq) {
return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16,
ctlr, buf, sizeof(ReadCapdata_struct_16),
- 1, logvol, 0, TYPE_CMD);
+ 0, scsi3addr, TYPE_CMD);
}
else {
return_code = sendcmd(CCISS_READ_CAPACITY_16,
ctlr, buf, sizeof(ReadCapdata_struct_16),
- 1, logvol, 0, NULL, TYPE_CMD);
+ 0, scsi3addr, TYPE_CMD);
}
if (return_code == IO_OK) {
*total_size = be64_to_cpu(*(__be64 *) buf->total_size);
@@ -2303,7 +2581,7 @@ static int cciss_revalidate(struct gendisk *disk)
cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
inq_buff, drv);
- blk_queue_hardsect_size(drv->queue, drv->block_size);
+ blk_queue_logical_block_size(drv->queue, drv->block_size);
set_capacity(disk, drv->nr_blocks);
kfree(inq_buff);
@@ -2333,86 +2611,21 @@ static unsigned long pollcomplete(int ctlr)
return 1;
}
-static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
-{
- /* We get in here if sendcmd() is polling for completions
- and gets some command back that it wasn't expecting --
- something other than that which it just sent down.
- Ordinarily, that shouldn't happen, but it can happen when
- the scsi tape stuff gets into error handling mode, and
- starts using sendcmd() to try to abort commands and
- reset tape drives. In that case, sendcmd may pick up
- completions of commands that were sent to logical drives
- through the block i/o system, or cciss ioctls completing, etc.
- In that case, we need to save those completions for later
- processing by the interrupt handler.
- */
-
-#ifdef CONFIG_CISS_SCSI_TAPE
- struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
-
- /* If it's not the scsi tape stuff doing error handling, (abort */
- /* or reset) then we don't expect anything weird. */
- if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
-#endif
- printk(KERN_WARNING "cciss cciss%d: SendCmd "
- "Invalid command list address returned! (%lx)\n",
- ctlr, complete);
- /* not much we can do. */
-#ifdef CONFIG_CISS_SCSI_TAPE
- return 1;
- }
-
- /* We've sent down an abort or reset, but something else
- has completed */
- if (srl->ncompletions >= (hba[ctlr]->nr_cmds + 2)) {
- /* Uh oh. No room to save it for later... */
- printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
- "reject list overflow, command lost!\n", ctlr);
- return 1;
- }
- /* Save it for later */
- srl->complete[srl->ncompletions] = complete;
- srl->ncompletions++;
-#endif
- return 0;
-}
-
-/*
- * Send a command to the controller, and wait for it to complete.
- * Only used at init time.
+/* Send command c to controller h and poll for it to complete.
+ * Turns interrupts off on the board. Used at driver init time
+ * and during SCSI error recovery.
*/
-static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
- 1: address logical volume log_unit,
- 2: periph device address is scsi3addr */
- unsigned int log_unit,
- __u8 page_code, unsigned char *scsi3addr, int cmd_type)
+static int sendcmd_core(ctlr_info_t *h, CommandList_struct *c)
{
- CommandList_struct *c;
int i;
unsigned long complete;
- ctlr_info_t *info_p = hba[ctlr];
+ int status = IO_ERROR;
u64bit buff_dma_handle;
- int status, done = 0;
- if ((c = cmd_alloc(info_p, 1)) == NULL) {
- printk(KERN_WARNING "cciss: unable to get memory");
- return IO_ERROR;
- }
- status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
- log_unit, page_code, scsi3addr, cmd_type);
- if (status != IO_OK) {
- cmd_free(info_p, c, 1);
- return status;
- }
- resend_cmd1:
- /*
- * Disable interrupt
- */
-#ifdef CCISS_DEBUG
- printk(KERN_DEBUG "cciss: turning intr off\n");
-#endif /* CCISS_DEBUG */
- info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
+resend_cmd1:
+
+ /* Disable interrupt on the board. */
+ h->access.set_intr_mask(h, CCISS_INTR_OFF);
/* Make sure there is room in the command FIFO */
/* Actually it should be completely empty at this time */
@@ -2420,21 +2633,15 @@ static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use
/* tape side of the driver. */
for (i = 200000; i > 0; i--) {
/* if fifo isn't full go */
- if (!(info_p->access.fifo_full(info_p))) {
-
+ if (!(h->access.fifo_full(h)))
break;
- }
udelay(10);
printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
- " waiting!\n", ctlr);
+ " waiting!\n", h->ctlr);
}
- /*
- * Send the cmd
- */
- info_p->access.submit_command(info_p, c);
- done = 0;
+ h->access.submit_command(h, c); /* Send the cmd */
do {
- complete = pollcomplete(ctlr);
+ complete = pollcomplete(h->ctlr);
#ifdef CCISS_DEBUG
printk(KERN_DEBUG "cciss: command completed\n");
@@ -2443,97 +2650,102 @@ static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use
if (complete == 1) {
printk(KERN_WARNING
"cciss cciss%d: SendCmd Timeout out, "
- "No command list address returned!\n", ctlr);
+ "No command list address returned!\n", h->ctlr);
status = IO_ERROR;
- done = 1;
break;
}
- /* This will need to change for direct lookup completions */
- if ((complete & CISS_ERROR_BIT)
- && (complete & ~CISS_ERROR_BIT) == c->busaddr) {
- /* if data overrun or underun on Report command
- ignore it
- */
- if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
- (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
- (c->Request.CDB[0] == CISS_INQUIRY)) &&
- ((c->err_info->CommandStatus ==
- CMD_DATA_OVERRUN) ||
- (c->err_info->CommandStatus == CMD_DATA_UNDERRUN)
- )) {
- complete = c->busaddr;
- } else {
- if (c->err_info->CommandStatus ==
- CMD_UNSOLICITED_ABORT) {
- printk(KERN_WARNING "cciss%d: "
- "unsolicited abort %p\n",
- ctlr, c);
- if (c->retry_count < MAX_CMD_RETRIES) {
- printk(KERN_WARNING
- "cciss%d: retrying %p\n",
- ctlr, c);
- c->retry_count++;
- /* erase the old error */
- /* information */
- memset(c->err_info, 0,
- sizeof
- (ErrorInfo_struct));
- goto resend_cmd1;
- } else {
- printk(KERN_WARNING
- "cciss%d: retried %p too "
- "many times\n", ctlr, c);
- status = IO_ERROR;
- goto cleanup1;
- }
- } else if (c->err_info->CommandStatus ==
- CMD_UNABORTABLE) {
- printk(KERN_WARNING
- "cciss%d: command could not be aborted.\n",
- ctlr);
- status = IO_ERROR;
- goto cleanup1;
- }
- printk(KERN_WARNING "ciss ciss%d: sendcmd"
- " Error %x \n", ctlr,
- c->err_info->CommandStatus);
- printk(KERN_WARNING "ciss ciss%d: sendcmd"
- " offensive info\n"
- " size %x\n num %x value %x\n",
- ctlr,
- c->err_info->MoreErrInfo.Invalid_Cmd.
- offense_size,
- c->err_info->MoreErrInfo.Invalid_Cmd.
- offense_num,
- c->err_info->MoreErrInfo.Invalid_Cmd.
- offense_value);
- status = IO_ERROR;
- goto cleanup1;
- }
+ /* Make sure it's the command we're expecting. */
+ if ((complete & ~CISS_ERROR_BIT) != c->busaddr) {
+ printk(KERN_WARNING "cciss%d: Unexpected command "
+ "completion.\n", h->ctlr);
+ continue;
+ }
+
+ /* It is our command. If no error, we're done. */
+ if (!(complete & CISS_ERROR_BIT)) {
+ status = IO_OK;
+ break;
}
- /* This will need changing for direct lookup completions */
- if (complete != c->busaddr) {
- if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
- BUG(); /* we are pretty much hosed if we get here. */
+
+ /* There is an error... */
+
+ /* if data overrun or underun on Report command ignore it */
+ if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
+ (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
+ (c->Request.CDB[0] == CISS_INQUIRY)) &&
+ ((c->err_info->CommandStatus == CMD_DATA_OVERRUN) ||
+ (c->err_info->CommandStatus == CMD_DATA_UNDERRUN))) {
+ complete = c->busaddr;
+ status = IO_OK;
+ break;
+ }
+
+ if (c->err_info->CommandStatus == CMD_UNSOLICITED_ABORT) {
+ printk(KERN_WARNING "cciss%d: unsolicited abort %p\n",
+ h->ctlr, c);
+ if (c->retry_count < MAX_CMD_RETRIES) {
+ printk(KERN_WARNING "cciss%d: retrying %p\n",
+ h->ctlr, c);
+ c->retry_count++;
+ /* erase the old error information */
+ memset(c->err_info, 0, sizeof(c->err_info));
+ goto resend_cmd1;
}
- continue;
- } else
- done = 1;
- } while (!done);
+ printk(KERN_WARNING "cciss%d: retried %p too many "
+ "times\n", h->ctlr, c);
+ status = IO_ERROR;
+ break;
+ }
+
+ if (c->err_info->CommandStatus == CMD_UNABORTABLE) {
+ printk(KERN_WARNING "cciss%d: command could not be "
+ "aborted.\n", h->ctlr);
+ status = IO_ERROR;
+ break;
+ }
+
+ if (c->err_info->CommandStatus == CMD_TARGET_STATUS) {
+ status = check_target_status(h, c);
+ break;
+ }
+
+ printk(KERN_WARNING "cciss%d: sendcmd error\n", h->ctlr);
+ printk(KERN_WARNING "cmd = 0x%02x, CommandStatus = 0x%02x\n",
+ c->Request.CDB[0], c->err_info->CommandStatus);
+ status = IO_ERROR;
+ break;
+
+ } while (1);
- cleanup1:
/* unlock the data buffer from DMA */
buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
- pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
+ pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
-#ifdef CONFIG_CISS_SCSI_TAPE
- /* if we saved some commands for later, process them now. */
- if (info_p->scsi_rejects.ncompletions > 0)
- do_cciss_intr(0, info_p);
-#endif
- cmd_free(info_p, c, 1);
+ return status;
+}
+
+/*
+ * Send a command to the controller, and wait for it to complete.
+ * Used at init time, and during SCSI error recovery.
+ */
+static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
+ __u8 page_code, unsigned char *scsi3addr, int cmd_type)
+{
+ CommandList_struct *c;
+ int status;
+
+ c = cmd_alloc(hba[ctlr], 1);
+ if (!c) {
+ printk(KERN_WARNING "cciss: unable to get memory");
+ return IO_ERROR;
+ }
+ status = fill_cmd(c, cmd, ctlr, buff, size, page_code,
+ scsi3addr, cmd_type);
+ if (status == IO_OK)
+ status = sendcmd_core(hba[ctlr], c);
+ cmd_free(hba[ctlr], c, 1);
return status;
}
@@ -2691,7 +2903,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
printk(KERN_WARNING "cciss: cmd %p has"
" completed with data underrun "
"reported\n", cmd);
- cmd->rq->data_len = cmd->err_info->ResidualCnt;
+ cmd->rq->resid_len = cmd->err_info->ResidualCnt;
}
break;
case CMD_DATA_OVERRUN:
@@ -2806,7 +3018,7 @@ static void do_cciss_request(struct request_queue *q)
goto startio;
queue:
- creq = elv_next_request(q);
+ creq = blk_peek_request(q);
if (!creq)
goto startio;
@@ -2815,7 +3027,7 @@ static void do_cciss_request(struct request_queue *q)
if ((c = cmd_alloc(h, 1)) == NULL)
goto full;
- blkdev_dequeue_request(creq);
+ blk_start_request(creq);
spin_unlock_irq(q->queue_lock);
@@ -2840,10 +3052,10 @@ static void do_cciss_request(struct request_queue *q)
c->Request.Timeout = 0; // Don't time out
c->Request.CDB[0] =
(rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
- start_blk = creq->sector;
+ start_blk = blk_rq_pos(creq);
#ifdef CCISS_DEBUG
- printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector,
- (int)creq->nr_sectors);
+ printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n",
+ (int)blk_rq_pos(creq), (int)blk_rq_sectors(creq));
#endif /* CCISS_DEBUG */
sg_init_table(tmp_sg, MAXSGENTRIES);
@@ -2869,8 +3081,8 @@ static void do_cciss_request(struct request_queue *q)
h->maxSG = seg;
#ifdef CCISS_DEBUG
- printk(KERN_DEBUG "cciss: Submitting %lu sectors in %d segments\n",
- creq->nr_sectors, seg);
+ printk(KERN_DEBUG "cciss: Submitting %u sectors in %d segments\n",
+ blk_rq_sectors(creq), seg);
#endif /* CCISS_DEBUG */
c->Header.SGList = c->Header.SGTotal = seg;
@@ -2882,8 +3094,8 @@ static void do_cciss_request(struct request_queue *q)
c->Request.CDB[4] = (start_blk >> 8) & 0xff;
c->Request.CDB[5] = start_blk & 0xff;
c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
- c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff;
- c->Request.CDB[8] = creq->nr_sectors & 0xff;
+ c->Request.CDB[7] = (blk_rq_sectors(creq) >> 8) & 0xff;
+ c->Request.CDB[8] = blk_rq_sectors(creq) & 0xff;
c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
} else {
u32 upper32 = upper_32_bits(start_blk);
@@ -2898,10 +3110,10 @@ static void do_cciss_request(struct request_queue *q)
c->Request.CDB[7]= (start_blk >> 16) & 0xff;
c->Request.CDB[8]= (start_blk >> 8) & 0xff;
c->Request.CDB[9]= start_blk & 0xff;
- c->Request.CDB[10]= (creq->nr_sectors >> 24) & 0xff;
- c->Request.CDB[11]= (creq->nr_sectors >> 16) & 0xff;
- c->Request.CDB[12]= (creq->nr_sectors >> 8) & 0xff;
- c->Request.CDB[13]= creq->nr_sectors & 0xff;
+ c->Request.CDB[10]= (blk_rq_sectors(creq) >> 24) & 0xff;
+ c->Request.CDB[11]= (blk_rq_sectors(creq) >> 16) & 0xff;
+ c->Request.CDB[12]= (blk_rq_sectors(creq) >> 8) & 0xff;
+ c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff;
c->Request.CDB[14] = c->Request.CDB[15] = 0;
}
} else if (blk_pc_request(creq)) {
@@ -2931,44 +3143,18 @@ startio:
static inline unsigned long get_next_completion(ctlr_info_t *h)
{
-#ifdef CONFIG_CISS_SCSI_TAPE
- /* Any rejects from sendcmd() lying around? Process them first */
- if (h->scsi_rejects.ncompletions == 0)
- return h->access.command_completed(h);
- else {
- struct sendcmd_reject_list *srl;
- int n;
- srl = &h->scsi_rejects;
- n = --srl->ncompletions;
- /* printk("cciss%d: processing saved reject\n", h->ctlr); */
- printk("p");
- return srl->complete[n];
- }
-#else
return h->access.command_completed(h);
-#endif
}
static inline int interrupt_pending(ctlr_info_t *h)
{
-#ifdef CONFIG_CISS_SCSI_TAPE
- return (h->access.intr_pending(h)
- || (h->scsi_rejects.ncompletions > 0));
-#else
return h->access.intr_pending(h);
-#endif
}
static inline long interrupt_not_for_us(ctlr_info_t *h)
{
-#ifdef CONFIG_CISS_SCSI_TAPE
- return (((h->access.intr_pending(h) == 0) ||
- (h->interrupts_enabled == 0))
- && (h->scsi_rejects.ncompletions == 0));
-#else
return (((h->access.intr_pending(h) == 0) ||
(h->interrupts_enabled == 0)));
-#endif
}
static irqreturn_t do_cciss_intr(int irq, void *dev_id)
@@ -3723,12 +3909,15 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
INIT_HLIST_HEAD(&hba[i]->reqQ);
if (cciss_pci_init(hba[i], pdev) != 0)
- goto clean1;
+ goto clean0;
sprintf(hba[i]->devname, "cciss%d", i);
hba[i]->ctlr = i;
hba[i]->pdev = pdev;
+ if (cciss_create_hba_sysfs_entry(hba[i]))
+ goto clean0;
+
/* configure PCI DMA stuff */
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
dac = 1;
@@ -3787,15 +3976,6 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
printk(KERN_ERR "cciss: out of memory");
goto clean4;
}
-#ifdef CONFIG_CISS_SCSI_TAPE
- hba[i]->scsi_rejects.complete =
- kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
- (hba[i]->nr_cmds + 5), GFP_KERNEL);
- if (hba[i]->scsi_rejects.complete == NULL) {
- printk(KERN_ERR "cciss: out of memory");
- goto clean4;
- }
-#endif
spin_lock_init(&hba[i]->lock);
/* Initialize the pdev driver private data.
@@ -3828,7 +4008,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
}
return_code = sendcmd_withirq(CISS_INQUIRY, i, inq_buff,
- sizeof(InquiryData_struct), 0, 0 , 0, TYPE_CMD);
+ sizeof(InquiryData_struct), 0, CTLR_LUNID, TYPE_CMD);
if (return_code == IO_OK) {
hba[i]->firm_ver[0] = inq_buff->data_byte[32];
hba[i]->firm_ver[1] = inq_buff->data_byte[33];
@@ -3855,9 +4035,6 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
clean4:
kfree(inq_buff);
-#ifdef CONFIG_CISS_SCSI_TAPE
- kfree(hba[i]->scsi_rejects.complete);
-#endif
kfree(hba[i]->cmd_pool_bits);
if (hba[i]->cmd_pool)
pci_free_consistent(hba[i]->pdev,
@@ -3872,6 +4049,8 @@ clean4:
clean2:
unregister_blkdev(hba[i]->major, hba[i]->devname);
clean1:
+ cciss_destroy_hba_sysfs_entry(hba[i]);
+clean0:
hba[i]->busy_initializing = 0;
/* cleanup any queues that may have been initialized */
for (j=0; j <= hba[i]->highest_lun; j++){
@@ -3907,8 +4086,8 @@ static void cciss_shutdown(struct pci_dev *pdev)
/* sendcmd will turn off interrupt, and send the flush...
* To write all data in the battery backed cache to disks */
memset(flush_buf, 0, 4);
- return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
- TYPE_CMD);
+ return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0,
+ CTLR_LUNID, TYPE_CMD);
if (return_code == IO_OK) {
printk(KERN_INFO "Completed flushing cache on controller %d\n", i);
} else {
@@ -3973,15 +4152,13 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
kfree(hba[i]->cmd_pool_bits);
-#ifdef CONFIG_CISS_SCSI_TAPE
- kfree(hba[i]->scsi_rejects.complete);
-#endif
/*
* Deliberately omit pci_disable_device(): it does something nasty to
* Smart Array controllers that pci_enable_device does not undo
*/
pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
+ cciss_destroy_hba_sysfs_entry(hba[i]);
free_hba(i);
}
@@ -3999,6 +4176,8 @@ static struct pci_driver cciss_pci_driver = {
*/
static int __init cciss_init(void)
{
+ int err;
+
/*
* The hardware requires that commands are aligned on a 64-bit
* boundary. Given that we use pci_alloc_consistent() to allocate an
@@ -4008,8 +4187,20 @@ static int __init cciss_init(void)
printk(KERN_INFO DRIVER_NAME "\n");
+ err = bus_register(&cciss_bus_type);
+ if (err)
+ return err;
+
/* Register for our PCI devices */
- return pci_register_driver(&cciss_pci_driver);
+ err = pci_register_driver(&cciss_pci_driver);
+ if (err)
+ goto err_bus_register;
+
+ return 0;
+
+err_bus_register:
+ bus_unregister(&cciss_bus_type);
+ return err;
}
static void __exit cciss_cleanup(void)
@@ -4026,6 +4217,7 @@ static void __exit cciss_cleanup(void)
}
}
remove_proc_entry("driver/cciss", NULL);
+ bus_unregister(&cciss_bus_type);
}
static void fail_all_cmds(unsigned long ctlr)
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index 703e08038fb..06a5db25b29 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -11,6 +11,11 @@
#define IO_OK 0
#define IO_ERROR 1
+#define IO_NEEDS_RETRY 3
+
+#define VENDOR_LEN 8
+#define MODEL_LEN 16
+#define REV_LEN 4
struct ctlr_info;
typedef struct ctlr_info ctlr_info_t;
@@ -34,23 +39,20 @@ typedef struct _drive_info_struct
int cylinders;
int raid_level; /* set to -1 to indicate that
* the drive is not in use/configured
- */
- int busy_configuring; /*This is set when the drive is being removed
- *to prevent it from being opened or it's queue
- *from being started.
- */
- __u8 serial_no[16]; /* from inquiry page 0x83, */
- /* not necc. null terminated. */
+ */
+ int busy_configuring; /* This is set when a drive is being removed
+ * to prevent it from being opened or it's
+ * queue from being started.
+ */
+ struct device dev;
+ __u8 serial_no[16]; /* from inquiry page 0x83,
+ * not necc. null terminated.
+ */
+ char vendor[VENDOR_LEN + 1]; /* SCSI vendor string */
+ char model[MODEL_LEN + 1]; /* SCSI model string */
+ char rev[REV_LEN + 1]; /* SCSI revision string */
} drive_info_struct;
-#ifdef CONFIG_CISS_SCSI_TAPE
-
-struct sendcmd_reject_list {
- int ncompletions;
- unsigned long *complete; /* array of NR_CMDS tags */
-};
-
-#endif
struct ctlr_info
{
int ctlr;
@@ -118,11 +120,11 @@ struct ctlr_info
void *scsi_ctlr; /* ptr to structure containing scsi related stuff */
/* list of block side commands the scsi error handling sucked up */
/* and saved for later processing */
- struct sendcmd_reject_list scsi_rejects;
#endif
unsigned char alive;
struct completion *rescan_wait;
struct task_struct *cciss_scan_thread;
+ struct device dev;
};
/* Defining the diffent access_menthods */
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h
index 40b1b92dae7..cd665b00c7c 100644
--- a/drivers/block/cciss_cmd.h
+++ b/drivers/block/cciss_cmd.h
@@ -217,6 +217,8 @@ typedef union _LUNAddr_struct {
LogDevAddr_struct LogDev;
} LUNAddr_struct;
+#define CTLR_LUNID "\0\0\0\0\0\0\0\0"
+
typedef struct _CommandListHeader_struct {
BYTE ReplyQueue;
BYTE SGList;
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index a3fd87b4144..3315268b4ec 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -44,20 +44,13 @@
#define CCISS_ABORT_MSG 0x00
#define CCISS_RESET_MSG 0x01
-/* some prototypes... */
-static int sendcmd(
- __u8 cmd,
- int ctlr,
- void *buff,
- size_t size,
- unsigned int use_unit_num, /* 0: address the controller,
- 1: address logical volume log_unit,
- 2: address is in scsi3addr */
- unsigned int log_unit,
- __u8 page_code,
- unsigned char *scsi3addr,
+static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
+ size_t size,
+ __u8 page_code, unsigned char *scsi3addr,
int cmd_type);
+static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool);
+static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool);
static int cciss_scsi_proc_info(
struct Scsi_Host *sh,
@@ -1575,6 +1568,75 @@ cciss_seq_tape_report(struct seq_file *seq, int ctlr)
CPQ_TAPE_UNLOCK(ctlr, flags);
}
+static int wait_for_device_to_become_ready(ctlr_info_t *h,
+ unsigned char lunaddr[])
+{
+ int rc;
+ int count = 0;
+ int waittime = HZ;
+ CommandList_struct *c;
+
+ c = cmd_alloc(h, 1);
+ if (!c) {
+ printk(KERN_WARNING "cciss%d: out of memory in "
+ "wait_for_device_to_become_ready.\n", h->ctlr);
+ return IO_ERROR;
+ }
+
+ /* Send test unit ready until device ready, or give up. */
+ while (count < 20) {
+
+ /* Wait for a bit. do this first, because if we send
+ * the TUR right away, the reset will just abort it.
+ */
+ schedule_timeout_uninterruptible(waittime);
+ count++;
+
+ /* Increase wait time with each try, up to a point. */
+ if (waittime < (HZ * 30))
+ waittime = waittime * 2;
+
+ /* Send the Test Unit Ready */
+ rc = fill_cmd(c, TEST_UNIT_READY, h->ctlr, NULL, 0, 0,
+ lunaddr, TYPE_CMD);
+ if (rc == 0)
+ rc = sendcmd_withirq_core(h, c, 0);
+
+ (void) process_sendcmd_error(h, c);
+
+ if (rc != 0)
+ goto retry_tur;
+
+ if (c->err_info->CommandStatus == CMD_SUCCESS)
+ break;
+
+ if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
+ c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
+ if (c->err_info->SenseInfo[2] == NO_SENSE)
+ break;
+ if (c->err_info->SenseInfo[2] == UNIT_ATTENTION) {
+ unsigned char asc;
+ asc = c->err_info->SenseInfo[12];
+ check_for_unit_attention(h, c);
+ if (asc == POWER_OR_RESET)
+ break;
+ }
+ }
+retry_tur:
+ printk(KERN_WARNING "cciss%d: Waiting %d secs "
+ "for device to become ready.\n",
+ h->ctlr, waittime / HZ);
+ rc = 1; /* device not ready. */
+ }
+
+ if (rc)
+ printk("cciss%d: giving up on device.\n", h->ctlr);
+ else
+ printk(KERN_WARNING "cciss%d: device is ready.\n", h->ctlr);
+
+ cmd_free(h, c, 1);
+ return rc;
+}
/* Need at least one of these error handlers to keep ../scsi/hosts.c from
* complaining. Doing a host- or bus-reset can't do anything good here.
@@ -1591,6 +1653,7 @@ static int cciss_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
{
int rc;
CommandList_struct *cmd_in_trouble;
+ unsigned char lunaddr[8];
ctlr_info_t **c;
int ctlr;
@@ -1600,19 +1663,15 @@ static int cciss_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
return FAILED;
ctlr = (*c)->ctlr;
printk(KERN_WARNING "cciss%d: resetting tape drive or medium changer.\n", ctlr);
-
/* find the command that's giving us trouble */
cmd_in_trouble = (CommandList_struct *) scsicmd->host_scribble;
- if (cmd_in_trouble == NULL) { /* paranoia */
+ if (cmd_in_trouble == NULL) /* paranoia */
return FAILED;
- }
+ memcpy(lunaddr, &cmd_in_trouble->Header.LUN.LunAddrBytes[0], 8);
/* send a reset to the SCSI LUN which the command was sent to */
- rc = sendcmd(CCISS_RESET_MSG, ctlr, NULL, 0, 2, 0, 0,
- (unsigned char *) &cmd_in_trouble->Header.LUN.LunAddrBytes[0],
+ rc = sendcmd_withirq(CCISS_RESET_MSG, ctlr, NULL, 0, 0, lunaddr,
TYPE_MSG);
- /* sendcmd turned off interrupts on the board, turn 'em back on. */
- (*c)->access.set_intr_mask(*c, CCISS_INTR_ON);
- if (rc == 0)
+ if (rc == 0 && wait_for_device_to_become_ready(*c, lunaddr) == 0)
return SUCCESS;
printk(KERN_WARNING "cciss%d: resetting device failed.\n", ctlr);
return FAILED;
@@ -1622,6 +1681,7 @@ static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd)
{
int rc;
CommandList_struct *cmd_to_abort;
+ unsigned char lunaddr[8];
ctlr_info_t **c;
int ctlr;
@@ -1636,12 +1696,9 @@ static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd)
cmd_to_abort = (CommandList_struct *) scsicmd->host_scribble;
if (cmd_to_abort == NULL) /* paranoia */
return FAILED;
- rc = sendcmd(CCISS_ABORT_MSG, ctlr, &cmd_to_abort->Header.Tag,
- 0, 2, 0, 0,
- (unsigned char *) &cmd_to_abort->Header.LUN.LunAddrBytes[0],
- TYPE_MSG);
- /* sendcmd turned off interrupts on the board, turn 'em back on. */
- (*c)->access.set_intr_mask(*c, CCISS_INTR_ON);
+ memcpy(lunaddr, &cmd_to_abort->Header.LUN.LunAddrBytes[0], 8);
+ rc = sendcmd_withirq(CCISS_ABORT_MSG, ctlr, &cmd_to_abort->Header.Tag,
+ 0, 0, lunaddr, TYPE_MSG);
if (rc == 0)
return SUCCESS;
return FAILED;
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index ca268ca1115..44fa2018f6b 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -474,7 +474,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
disk->fops = &ida_fops;
if (j && !drv->nr_blks)
continue;
- blk_queue_hardsect_size(hba[i]->queue, drv->blk_size);
+ blk_queue_logical_block_size(hba[i]->queue, drv->blk_size);
set_capacity(disk, drv->nr_blks);
disk->queue = hba[i]->queue;
disk->private_data = drv;
@@ -903,7 +903,7 @@ static void do_ida_request(struct request_queue *q)
goto startio;
queue_next:
- creq = elv_next_request(q);
+ creq = blk_peek_request(q);
if (!creq)
goto startio;
@@ -912,17 +912,18 @@ queue_next:
if ((c = cmd_alloc(h,1)) == NULL)
goto startio;
- blkdev_dequeue_request(creq);
+ blk_start_request(creq);
c->ctlr = h->ctlr;
c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv;
c->hdr.size = sizeof(rblk_t) >> 2;
c->size += sizeof(rblk_t);
- c->req.hdr.blk = creq->sector;
+ c->req.hdr.blk = blk_rq_pos(creq);
c->rq = creq;
DBGPX(
- printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors);
+ printk("sector=%d, nr_sectors=%u\n",
+ blk_rq_pos(creq), blk_rq_sectors(creq));
);
sg_init_table(tmp_sg, SG_MAX);
seg = blk_rq_map_sg(q, creq, tmp_sg);
@@ -940,9 +941,9 @@ DBGPX(
tmp_sg[i].offset,
tmp_sg[i].length, dir);
}
-DBGPX( printk("Submitting %d sectors in %d segments\n", creq->nr_sectors, seg); );
+DBGPX( printk("Submitting %u sectors in %d segments\n", blk_rq_sectors(creq), seg); );
c->req.hdr.sg_cnt = seg;
- c->req.hdr.blk_cnt = creq->nr_sectors;
+ c->req.hdr.blk_cnt = blk_rq_sectors(creq);
c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE;
c->type = CMD_RWREQ;
@@ -1024,8 +1025,7 @@ static inline void complete_command(cmdlist_t *cmd, int timeout)
cmd->req.sg[i].size, ddir);
DBGPX(printk("Done with %p\n", rq););
- if (__blk_end_request(rq, error, blk_rq_bytes(rq)))
- BUG();
+ __blk_end_request_all(rq, error);
}
/*
@@ -1546,7 +1546,7 @@ static int revalidate_allvol(ctlr_info_t *host)
drv_info_t *drv = &host->drv[i];
if (i && !drv->nr_blks)
continue;
- blk_queue_hardsect_size(host->queue, drv->blk_size);
+ blk_queue_logical_block_size(host->queue, drv->blk_size);
set_capacity(disk, drv->nr_blks);
disk->queue = host->queue;
disk->private_data = drv;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 1300df6f164..862b40c9018 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -931,7 +931,7 @@ static inline void unlock_fdc(void)
del_timer(&fd_timeout);
cont = NULL;
clear_bit(0, &fdc_busy);
- if (elv_next_request(floppy_queue))
+ if (current_req || blk_peek_request(floppy_queue))
do_fd_request(floppy_queue);
spin_unlock_irqrestore(&floppy_lock, flags);
wake_up(&fdc_wait);
@@ -2303,7 +2303,7 @@ static void floppy_end_request(struct request *req, int error)
/* current_count_sectors can be zero if transfer failed */
if (error)
- nr_sectors = req->current_nr_sectors;
+ nr_sectors = blk_rq_cur_sectors(req);
if (__blk_end_request(req, error, nr_sectors << 9))
return;
@@ -2332,7 +2332,7 @@ static void request_done(int uptodate)
if (uptodate) {
/* maintain values for invalidation on geometry
* change */
- block = current_count_sectors + req->sector;
+ block = current_count_sectors + blk_rq_pos(req);
INFBOUND(DRS->maxblock, block);
if (block > _floppy->sect)
DRS->maxtrack = 1;
@@ -2346,10 +2346,10 @@ static void request_done(int uptodate)
/* record write error information */
DRWE->write_errors++;
if (DRWE->write_errors == 1) {
- DRWE->first_error_sector = req->sector;
+ DRWE->first_error_sector = blk_rq_pos(req);
DRWE->first_error_generation = DRS->generation;
}
- DRWE->last_error_sector = req->sector;
+ DRWE->last_error_sector = blk_rq_pos(req);
DRWE->last_error_generation = DRS->generation;
}
spin_lock_irqsave(q->queue_lock, flags);
@@ -2503,24 +2503,23 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
max_sector = transfer_size(ssize,
min(max_sector, max_sector_2),
- current_req->nr_sectors);
+ blk_rq_sectors(current_req));
if (current_count_sectors <= 0 && CT(COMMAND) == FD_WRITE &&
- buffer_max > fsector_t + current_req->nr_sectors)
+ buffer_max > fsector_t + blk_rq_sectors(current_req))
current_count_sectors = min_t(int, buffer_max - fsector_t,
- current_req->nr_sectors);
+ blk_rq_sectors(current_req));
remaining = current_count_sectors << 9;
#ifdef FLOPPY_SANITY_CHECK
- if ((remaining >> 9) > current_req->nr_sectors &&
- CT(COMMAND) == FD_WRITE) {
+ if (remaining > blk_rq_bytes(current_req) && CT(COMMAND) == FD_WRITE) {
DPRINT("in copy buffer\n");
printk("current_count_sectors=%ld\n", current_count_sectors);
printk("remaining=%d\n", remaining >> 9);
- printk("current_req->nr_sectors=%ld\n",
- current_req->nr_sectors);
+ printk("current_req->nr_sectors=%u\n",
+ blk_rq_sectors(current_req));
printk("current_req->current_nr_sectors=%u\n",
- current_req->current_nr_sectors);
+ blk_rq_cur_sectors(current_req));
printk("max_sector=%d\n", max_sector);
printk("ssize=%d\n", ssize);
}
@@ -2530,7 +2529,7 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
dma_buffer = floppy_track_buffer + ((fsector_t - buffer_min) << 9);
- size = current_req->current_nr_sectors << 9;
+ size = blk_rq_cur_bytes(current_req);
rq_for_each_segment(bv, current_req, iter) {
if (!remaining)
@@ -2648,10 +2647,10 @@ static int make_raw_rw_request(void)
max_sector = _floppy->sect * _floppy->head;
- TRACK = (int)current_req->sector / max_sector;
- fsector_t = (int)current_req->sector % max_sector;
+ TRACK = (int)blk_rq_pos(current_req) / max_sector;
+ fsector_t = (int)blk_rq_pos(current_req) % max_sector;
if (_floppy->track && TRACK >= _floppy->track) {
- if (current_req->current_nr_sectors & 1) {
+ if (blk_rq_cur_sectors(current_req) & 1) {
current_count_sectors = 1;
return 1;
} else
@@ -2669,7 +2668,7 @@ static int make_raw_rw_request(void)
if (fsector_t >= max_sector) {
current_count_sectors =
min_t(int, _floppy->sect - fsector_t,
- current_req->nr_sectors);
+ blk_rq_sectors(current_req));
return 1;
}
SIZECODE = 2;
@@ -2720,7 +2719,7 @@ static int make_raw_rw_request(void)
in_sector_offset = (fsector_t % _floppy->sect) % ssize;
aligned_sector_t = fsector_t - in_sector_offset;
- max_size = current_req->nr_sectors;
+ max_size = blk_rq_sectors(current_req);
if ((raw_cmd->track == buffer_track) &&
(current_drive == buffer_drive) &&
(fsector_t >= buffer_min) && (fsector_t < buffer_max)) {
@@ -2729,10 +2728,10 @@ static int make_raw_rw_request(void)
copy_buffer(1, max_sector, buffer_max);
return 1;
}
- } else if (in_sector_offset || current_req->nr_sectors < ssize) {
+ } else if (in_sector_offset || blk_rq_sectors(current_req) < ssize) {
if (CT(COMMAND) == FD_WRITE) {
- if (fsector_t + current_req->nr_sectors > ssize &&
- fsector_t + current_req->nr_sectors < ssize + ssize)
+ if (fsector_t + blk_rq_sectors(current_req) > ssize &&
+ fsector_t + blk_rq_sectors(current_req) < ssize + ssize)
max_size = ssize + ssize;
else
max_size = ssize;
@@ -2776,7 +2775,7 @@ static int make_raw_rw_request(void)
(indirect * 2 > direct * 3 &&
*errors < DP->max_errors.read_track && ((!probing
|| (DP->read_track & (1 << DRS->probed_format)))))) {
- max_size = current_req->nr_sectors;
+ max_size = blk_rq_sectors(current_req);
} else {
raw_cmd->kernel_data = current_req->buffer;
raw_cmd->length = current_count_sectors << 9;
@@ -2801,7 +2800,7 @@ static int make_raw_rw_request(void)
fsector_t > buffer_max ||
fsector_t < buffer_min ||
((CT(COMMAND) == FD_READ ||
- (!in_sector_offset && current_req->nr_sectors >= ssize)) &&
+ (!in_sector_offset && blk_rq_sectors(current_req) >= ssize)) &&
max_sector > 2 * max_buffer_sectors + buffer_min &&
max_size + fsector_t > 2 * max_buffer_sectors + buffer_min)
/* not enough space */
@@ -2879,8 +2878,8 @@ static int make_raw_rw_request(void)
printk("write\n");
return 0;
}
- } else if (raw_cmd->length > current_req->nr_sectors << 9 ||
- current_count_sectors > current_req->nr_sectors) {
+ } else if (raw_cmd->length > blk_rq_bytes(current_req) ||
+ current_count_sectors > blk_rq_sectors(current_req)) {
DPRINT("buffer overrun in direct transfer\n");
return 0;
} else if (raw_cmd->length < current_count_sectors << 9) {
@@ -2913,7 +2912,7 @@ static void redo_fd_request(void)
struct request *req;
spin_lock_irq(floppy_queue->queue_lock);
- req = elv_next_request(floppy_queue);
+ req = blk_fetch_request(floppy_queue);
spin_unlock_irq(floppy_queue->queue_lock);
if (!req) {
do_floppy = NULL;
@@ -2990,8 +2989,9 @@ static void do_fd_request(struct request_queue * q)
if (usage_count == 0) {
printk("warning: usage count=0, current_req=%p exiting\n",
current_req);
- printk("sect=%ld type=%x flags=%x\n", (long)current_req->sector,
- current_req->cmd_type, current_req->cmd_flags);
+ printk("sect=%ld type=%x flags=%x\n",
+ (long)blk_rq_pos(current_req), current_req->cmd_type,
+ current_req->cmd_flags);
return;
}
if (test_bit(0, &fdc_busy)) {
@@ -4148,6 +4148,24 @@ static void floppy_device_release(struct device *dev)
{
}
+static int floppy_resume(struct platform_device *dev)
+{
+ int fdc;
+
+ for (fdc = 0; fdc < N_FDC; fdc++)
+ if (FDCS->address != -1)
+ user_reset_fdc(-1, FD_RESET_ALWAYS, 0);
+
+ return 0;
+}
+
+static struct platform_driver floppy_driver = {
+ .resume = floppy_resume,
+ .driver = {
+ .name = "floppy",
+ },
+};
+
static struct platform_device floppy_device[N_DRIVE];
static struct kobject *floppy_find(dev_t dev, int *part, void *data)
@@ -4196,10 +4214,14 @@ static int __init floppy_init(void)
if (err)
goto out_put_disk;
+ err = platform_driver_register(&floppy_driver);
+ if (err)
+ goto out_unreg_blkdev;
+
floppy_queue = blk_init_queue(do_fd_request, &floppy_lock);
if (!floppy_queue) {
err = -ENOMEM;
- goto out_unreg_blkdev;
+ goto out_unreg_driver;
}
blk_queue_max_sectors(floppy_queue, 64);
@@ -4346,6 +4368,8 @@ out_flush_work:
out_unreg_region:
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
blk_cleanup_queue(floppy_queue);
+out_unreg_driver:
+ platform_driver_unregister(&floppy_driver);
out_unreg_blkdev:
unregister_blkdev(FLOPPY_MAJOR, "fd");
out_put_disk:
@@ -4566,6 +4590,7 @@ static void __exit floppy_module_exit(void)
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
unregister_blkdev(FLOPPY_MAJOR, "fd");
+ platform_driver_unregister(&floppy_driver);
for (drive = 0; drive < N_DRIVE; drive++) {
del_timer_sync(&motor_off_timer[drive]);
diff --git a/drivers/block/hd.c b/drivers/block/hd.c
index baaa9e486e5..f65b3f369eb 100644
--- a/drivers/block/hd.c
+++ b/drivers/block/hd.c
@@ -98,10 +98,9 @@
static DEFINE_SPINLOCK(hd_lock);
static struct request_queue *hd_queue;
+static struct request *hd_req;
#define MAJOR_NR HD_MAJOR
-#define QUEUE (hd_queue)
-#define CURRENT elv_next_request(hd_queue)
#define TIMEOUT_VALUE (6*HZ)
#define HD_DELAY 0
@@ -195,11 +194,24 @@ static void __init hd_setup(char *str, int *ints)
NR_HD = hdind+1;
}
+static bool hd_end_request(int err, unsigned int bytes)
+{
+ if (__blk_end_request(hd_req, err, bytes))
+ return true;
+ hd_req = NULL;
+ return false;
+}
+
+static bool hd_end_request_cur(int err)
+{
+ return hd_end_request(err, blk_rq_cur_bytes(hd_req));
+}
+
static void dump_status(const char *msg, unsigned int stat)
{
char *name = "hd?";
- if (CURRENT)
- name = CURRENT->rq_disk->disk_name;
+ if (hd_req)
+ name = hd_req->rq_disk->disk_name;
#ifdef VERBOSE_ERRORS
printk("%s: %s: status=0x%02x { ", name, msg, stat & 0xff);
@@ -227,8 +239,8 @@ static void dump_status(const char *msg, unsigned int stat)
if (hd_error & (BBD_ERR|ECC_ERR|ID_ERR|MARK_ERR)) {
printk(", CHS=%d/%d/%d", (inb(HD_HCYL)<<8) + inb(HD_LCYL),
inb(HD_CURRENT) & 0xf, inb(HD_SECTOR));
- if (CURRENT)
- printk(", sector=%ld", CURRENT->sector);
+ if (hd_req)
+ printk(", sector=%ld", blk_rq_pos(hd_req));
}
printk("\n");
}
@@ -406,11 +418,12 @@ static void unexpected_hd_interrupt(void)
*/
static void bad_rw_intr(void)
{
- struct request *req = CURRENT;
+ struct request *req = hd_req;
+
if (req != NULL) {
struct hd_i_struct *disk = req->rq_disk->private_data;
if (++req->errors >= MAX_ERRORS || (hd_error & BBD_ERR)) {
- end_request(req, 0);
+ hd_end_request_cur(-EIO);
disk->special_op = disk->recalibrate = 1;
} else if (req->errors % RESET_FREQ == 0)
reset = 1;
@@ -452,37 +465,30 @@ static void read_intr(void)
bad_rw_intr();
hd_request();
return;
+
ok_to_read:
- req = CURRENT;
+ req = hd_req;
insw(HD_DATA, req->buffer, 256);
- req->sector++;
- req->buffer += 512;
- req->errors = 0;
- i = --req->nr_sectors;
- --req->current_nr_sectors;
#ifdef DEBUG
- printk("%s: read: sector %ld, remaining = %ld, buffer=%p\n",
- req->rq_disk->disk_name, req->sector, req->nr_sectors,
- req->buffer+512);
+ printk("%s: read: sector %ld, remaining = %u, buffer=%p\n",
+ req->rq_disk->disk_name, blk_rq_pos(req) + 1,
+ blk_rq_sectors(req) - 1, req->buffer+512);
#endif
- if (req->current_nr_sectors <= 0)
- end_request(req, 1);
- if (i > 0) {
+ if (hd_end_request(0, 512)) {
SET_HANDLER(&read_intr);
return;
}
+
(void) inb_p(HD_STATUS);
#if (HD_DELAY > 0)
last_req = read_timer();
#endif
- if (elv_next_request(QUEUE))
- hd_request();
- return;
+ hd_request();
}
static void write_intr(void)
{
- struct request *req = CURRENT;
+ struct request *req = hd_req;
int i;
int retries = 100000;
@@ -492,30 +498,25 @@ static void write_intr(void)
continue;
if (!OK_STATUS(i))
break;
- if ((req->nr_sectors <= 1) || (i & DRQ_STAT))
+ if ((blk_rq_sectors(req) <= 1) || (i & DRQ_STAT))
goto ok_to_write;
} while (--retries > 0);
dump_status("write_intr", i);
bad_rw_intr();
hd_request();
return;
+
ok_to_write:
- req->sector++;
- i = --req->nr_sectors;
- --req->current_nr_sectors;
- req->buffer += 512;
- if (!i || (req->bio && req->current_nr_sectors <= 0))
- end_request(req, 1);
- if (i > 0) {
+ if (hd_end_request(0, 512)) {
SET_HANDLER(&write_intr);
outsw(HD_DATA, req->buffer, 256);
- } else {
+ return;
+ }
+
#if (HD_DELAY > 0)
- last_req = read_timer();
+ last_req = read_timer();
#endif
- hd_request();
- }
- return;
+ hd_request();
}
static void recal_intr(void)
@@ -537,18 +538,18 @@ static void hd_times_out(unsigned long dummy)
do_hd = NULL;
- if (!CURRENT)
+ if (!hd_req)
return;
spin_lock_irq(hd_queue->queue_lock);
reset = 1;
- name = CURRENT->rq_disk->disk_name;
+ name = hd_req->rq_disk->disk_name;
printk("%s: timeout\n", name);
- if (++CURRENT->errors >= MAX_ERRORS) {
+ if (++hd_req->errors >= MAX_ERRORS) {
#ifdef DEBUG
printk("%s: too many errors\n", name);
#endif
- end_request(CURRENT, 0);
+ hd_end_request_cur(-EIO);
}
hd_request();
spin_unlock_irq(hd_queue->queue_lock);
@@ -563,7 +564,7 @@ static int do_special_op(struct hd_i_struct *disk, struct request *req)
}
if (disk->head > 16) {
printk("%s: cannot handle device with more than 16 heads - giving up\n", req->rq_disk->disk_name);
- end_request(req, 0);
+ hd_end_request_cur(-EIO);
}
disk->special_op = 0;
return 1;
@@ -590,24 +591,27 @@ static void hd_request(void)
repeat:
del_timer(&device_timer);
- req = CURRENT;
- if (!req) {
- do_hd = NULL;
- return;
+ if (!hd_req) {
+ hd_req = blk_fetch_request(hd_queue);
+ if (!hd_req) {
+ do_hd = NULL;
+ return;
+ }
}
+ req = hd_req;
if (reset) {
reset_hd();
return;
}
disk = req->rq_disk->private_data;
- block = req->sector;
- nsect = req->nr_sectors;
+ block = blk_rq_pos(req);
+ nsect = blk_rq_sectors(req);
if (block >= get_capacity(req->rq_disk) ||
((block+nsect) > get_capacity(req->rq_disk))) {
printk("%s: bad access: block=%d, count=%d\n",
req->rq_disk->disk_name, block, nsect);
- end_request(req, 0);
+ hd_end_request_cur(-EIO);
goto repeat;
}
@@ -647,7 +651,7 @@ repeat:
break;
default:
printk("unknown hd-command\n");
- end_request(req, 0);
+ hd_end_request_cur(-EIO);
break;
}
}
@@ -720,7 +724,7 @@ static int __init hd_init(void)
blk_queue_max_sectors(hd_queue, 255);
init_timer(&device_timer);
device_timer.function = hd_times_out;
- blk_queue_hardsect_size(hd_queue, 512);
+ blk_queue_logical_block_size(hd_queue, 512);
if (!NR_HD) {
/*
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index ddae8082589..801f4ab8330 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -511,11 +511,7 @@ out:
*/
static void loop_add_bio(struct loop_device *lo, struct bio *bio)
{
- if (lo->lo_biotail) {
- lo->lo_biotail->bi_next = bio;
- lo->lo_biotail = bio;
- } else
- lo->lo_bio = lo->lo_biotail = bio;
+ bio_list_add(&lo->lo_bio_list, bio);
}
/*
@@ -523,16 +519,7 @@ static void loop_add_bio(struct loop_device *lo, struct bio *bio)
*/
static struct bio *loop_get_bio(struct loop_device *lo)
{
- struct bio *bio;
-
- if ((bio = lo->lo_bio)) {
- if (bio == lo->lo_biotail)
- lo->lo_biotail = NULL;
- lo->lo_bio = bio->bi_next;
- bio->bi_next = NULL;
- }
-
- return bio;
+ return bio_list_pop(&lo->lo_bio_list);
}
static int loop_make_request(struct request_queue *q, struct bio *old_bio)
@@ -609,12 +596,13 @@ static int loop_thread(void *data)
set_user_nice(current, -20);
- while (!kthread_should_stop() || lo->lo_bio) {
+ while (!kthread_should_stop() || !bio_list_empty(&lo->lo_bio_list)) {
wait_event_interruptible(lo->lo_event,
- lo->lo_bio || kthread_should_stop());
+ !bio_list_empty(&lo->lo_bio_list) ||
+ kthread_should_stop());
- if (!lo->lo_bio)
+ if (bio_list_empty(&lo->lo_bio_list))
continue;
spin_lock_irq(&lo->lo_lock);
bio = loop_get_bio(lo);
@@ -721,10 +709,6 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
goto out_putf;
- /* new backing store needs to support loop (eg splice_read) */
- if (!inode->i_fop->splice_read)
- goto out_putf;
-
/* size of the new backing store needs to be the same */
if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
goto out_putf;
@@ -800,12 +784,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
error = -EINVAL;
if (S_ISREG(inode->i_mode) || S_ISBLK(inode->i_mode)) {
const struct address_space_operations *aops = mapping->a_ops;
- /*
- * If we can't read - sorry. If we only can't write - well,
- * it's going to be read-only.
- */
- if (!file->f_op->splice_read)
- goto out_putf;
+
if (aops->write_begin)
lo_flags |= LO_FLAGS_USE_AOPS;
if (!(lo_flags & LO_FLAGS_USE_AOPS) && !file->f_op->write)
@@ -841,7 +820,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
lo->old_gfp_mask = mapping_gfp_mask(mapping);
mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
- lo->lo_bio = lo->lo_biotail = NULL;
+ bio_list_init(&lo->lo_bio_list);
/*
* set queue make_request_fn, and add limits based on lower level
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index f3898353d0a..f703f547824 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -17,7 +17,7 @@
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/hdreg.h>
-#include <linux/libata.h>
+#include <linux/ata.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
@@ -26,62 +26,185 @@
#define MG_RES_SEC (CONFIG_MG_DISK_RES << 1)
+/* name for block device */
+#define MG_DISK_NAME "mgd"
+
+#define MG_DISK_MAJ 0
+#define MG_DISK_MAX_PART 16
+#define MG_SECTOR_SIZE 512
+#define MG_MAX_SECTS 256
+
+/* Register offsets */
+#define MG_BUFF_OFFSET 0x8000
+#define MG_STORAGE_BUFFER_SIZE 0x200
+#define MG_REG_OFFSET 0xC000
+#define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */
+#define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */
+#define MG_REG_SECT_CNT (MG_REG_OFFSET + 4)
+#define MG_REG_SECT_NUM (MG_REG_OFFSET + 6)
+#define MG_REG_CYL_LOW (MG_REG_OFFSET + 8)
+#define MG_REG_CYL_HIGH (MG_REG_OFFSET + 0xA)
+#define MG_REG_DRV_HEAD (MG_REG_OFFSET + 0xC)
+#define MG_REG_COMMAND (MG_REG_OFFSET + 0xE) /* write case */
+#define MG_REG_STATUS (MG_REG_OFFSET + 0xE) /* read case */
+#define MG_REG_DRV_CTRL (MG_REG_OFFSET + 0x10)
+#define MG_REG_BURST_CTRL (MG_REG_OFFSET + 0x12)
+
+/* handy status */
+#define MG_STAT_READY (ATA_DRDY | ATA_DSC)
+#define MG_READY_OK(s) (((s) & (MG_STAT_READY | (ATA_BUSY | ATA_DF | \
+ ATA_ERR))) == MG_STAT_READY)
+
+/* error code for others */
+#define MG_ERR_NONE 0
+#define MG_ERR_TIMEOUT 0x100
+#define MG_ERR_INIT_STAT 0x101
+#define MG_ERR_TRANSLATION 0x102
+#define MG_ERR_CTRL_RST 0x103
+#define MG_ERR_INV_STAT 0x104
+#define MG_ERR_RSTOUT 0x105
+
+#define MG_MAX_ERRORS 6 /* Max read/write errors */
+
+/* command */
+#define MG_CMD_RD 0x20
+#define MG_CMD_WR 0x30
+#define MG_CMD_SLEEP 0x99
+#define MG_CMD_WAKEUP 0xC3
+#define MG_CMD_ID 0xEC
+#define MG_CMD_WR_CONF 0x3C
+#define MG_CMD_RD_CONF 0x40
+
+/* operation mode */
+#define MG_OP_CASCADE (1 << 0)
+#define MG_OP_CASCADE_SYNC_RD (1 << 1)
+#define MG_OP_CASCADE_SYNC_WR (1 << 2)
+#define MG_OP_INTERLEAVE (1 << 3)
+
+/* synchronous */
+#define MG_BURST_LAT_4 (3 << 4)
+#define MG_BURST_LAT_5 (4 << 4)
+#define MG_BURST_LAT_6 (5 << 4)
+#define MG_BURST_LAT_7 (6 << 4)
+#define MG_BURST_LAT_8 (7 << 4)
+#define MG_BURST_LEN_4 (1 << 1)
+#define MG_BURST_LEN_8 (2 << 1)
+#define MG_BURST_LEN_16 (3 << 1)
+#define MG_BURST_LEN_32 (4 << 1)
+#define MG_BURST_LEN_CONT (0 << 1)
+
+/* timeout value (unit: ms) */
+#define MG_TMAX_CONF_TO_CMD 1
+#define MG_TMAX_WAIT_RD_DRQ 10
+#define MG_TMAX_WAIT_WR_DRQ 500
+#define MG_TMAX_RST_TO_BUSY 10
+#define MG_TMAX_HDRST_TO_RDY 500
+#define MG_TMAX_SWRST_TO_RDY 500
+#define MG_TMAX_RSTOUT 3000
+
+#define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST)
+
+/* main structure for mflash driver */
+struct mg_host {
+ struct device *dev;
+
+ struct request_queue *breq;
+ struct request *req;
+ spinlock_t lock;
+ struct gendisk *gd;
+
+ struct timer_list timer;
+ void (*mg_do_intr) (struct mg_host *);
+
+ u16 id[ATA_ID_WORDS];
+
+ u16 cyls;
+ u16 heads;
+ u16 sectors;
+ u32 n_sectors;
+ u32 nres_sectors;
+
+ void __iomem *dev_base;
+ unsigned int irq;
+ unsigned int rst;
+ unsigned int rstout;
+
+ u32 major;
+ u32 error;
+};
+
+/*
+ * Debugging macro and defines
+ */
+#undef DO_MG_DEBUG
+#ifdef DO_MG_DEBUG
+# define MG_DBG(fmt, args...) \
+ printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args)
+#else /* CONFIG_MG_DEBUG */
+# define MG_DBG(fmt, args...) do { } while (0)
+#endif /* CONFIG_MG_DEBUG */
+
static void mg_request(struct request_queue *);
+static bool mg_end_request(struct mg_host *host, int err, unsigned int nr_bytes)
+{
+ if (__blk_end_request(host->req, err, nr_bytes))
+ return true;
+
+ host->req = NULL;
+ return false;
+}
+
+static bool mg_end_request_cur(struct mg_host *host, int err)
+{
+ return mg_end_request(host, err, blk_rq_cur_bytes(host->req));
+}
+
static void mg_dump_status(const char *msg, unsigned int stat,
struct mg_host *host)
{
char *name = MG_DISK_NAME;
- struct request *req;
- if (host->breq) {
- req = elv_next_request(host->breq);
- if (req)
- name = req->rq_disk->disk_name;
- }
+ if (host->req)
+ name = host->req->rq_disk->disk_name;
printk(KERN_ERR "%s: %s: status=0x%02x { ", name, msg, stat & 0xff);
- if (stat & MG_REG_STATUS_BIT_BUSY)
+ if (stat & ATA_BUSY)
printk("Busy ");
- if (stat & MG_REG_STATUS_BIT_READY)
+ if (stat & ATA_DRDY)
printk("DriveReady ");
- if (stat & MG_REG_STATUS_BIT_WRITE_FAULT)
+ if (stat & ATA_DF)
printk("WriteFault ");
- if (stat & MG_REG_STATUS_BIT_SEEK_DONE)
+ if (stat & ATA_DSC)
printk("SeekComplete ");
- if (stat & MG_REG_STATUS_BIT_DATA_REQ)
+ if (stat & ATA_DRQ)
printk("DataRequest ");
- if (stat & MG_REG_STATUS_BIT_CORRECTED_ERROR)
+ if (stat & ATA_CORR)
printk("CorrectedError ");
- if (stat & MG_REG_STATUS_BIT_ERROR)
+ if (stat & ATA_ERR)
printk("Error ");
printk("}\n");
- if ((stat & MG_REG_STATUS_BIT_ERROR) == 0) {
+ if ((stat & ATA_ERR) == 0) {
host->error = 0;
} else {
host->error = inb((unsigned long)host->dev_base + MG_REG_ERROR);
printk(KERN_ERR "%s: %s: error=0x%02x { ", name, msg,
host->error & 0xff);
- if (host->error & MG_REG_ERR_BBK)
+ if (host->error & ATA_BBK)
printk("BadSector ");
- if (host->error & MG_REG_ERR_UNC)
+ if (host->error & ATA_UNC)
printk("UncorrectableError ");
- if (host->error & MG_REG_ERR_IDNF)
+ if (host->error & ATA_IDNF)
printk("SectorIdNotFound ");
- if (host->error & MG_REG_ERR_ABRT)
+ if (host->error & ATA_ABORTED)
printk("DriveStatusError ");
- if (host->error & MG_REG_ERR_AMNF)
+ if (host->error & ATA_AMNF)
printk("AddrMarkNotFound ");
printk("}");
- if (host->error &
- (MG_REG_ERR_BBK | MG_REG_ERR_UNC |
- MG_REG_ERR_IDNF | MG_REG_ERR_AMNF)) {
- if (host->breq) {
- req = elv_next_request(host->breq);
- if (req)
- printk(", sector=%u", (u32)req->sector);
- }
-
+ if (host->error & (ATA_BBK | ATA_UNC | ATA_IDNF | ATA_AMNF)) {
+ if (host->req)
+ printk(", sector=%u",
+ (unsigned int)blk_rq_pos(host->req));
}
printk("\n");
}
@@ -100,12 +223,12 @@ static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec)
do {
cur_jiffies = jiffies;
- if (status & MG_REG_STATUS_BIT_BUSY) {
- if (expect == MG_REG_STATUS_BIT_BUSY)
+ if (status & ATA_BUSY) {
+ if (expect == ATA_BUSY)
break;
} else {
/* Check the error condition! */
- if (status & MG_REG_STATUS_BIT_ERROR) {
+ if (status & ATA_ERR) {
mg_dump_status("mg_wait", status, host);
break;
}
@@ -114,8 +237,8 @@ static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec)
if (MG_READY_OK(status))
break;
- if (expect == MG_REG_STATUS_BIT_DATA_REQ)
- if (status & MG_REG_STATUS_BIT_DATA_REQ)
+ if (expect == ATA_DRQ)
+ if (status & ATA_DRQ)
break;
}
if (!msec) {
@@ -173,6 +296,42 @@ static irqreturn_t mg_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
+/* local copy of ata_id_string() */
+static void mg_id_string(const u16 *id, unsigned char *s,
+ unsigned int ofs, unsigned int len)
+{
+ unsigned int c;
+
+ BUG_ON(len & 1);
+
+ while (len > 0) {
+ c = id[ofs] >> 8;
+ *s = c;
+ s++;
+
+ c = id[ofs] & 0xff;
+ *s = c;
+ s++;
+
+ ofs++;
+ len -= 2;
+ }
+}
+
+/* local copy of ata_id_c_string() */
+static void mg_id_c_string(const u16 *id, unsigned char *s,
+ unsigned int ofs, unsigned int len)
+{
+ unsigned char *p;
+
+ mg_id_string(id, s, ofs, len - 1);
+
+ p = s + strnlen(s, len - 1);
+ while (p > s && p[-1] == ' ')
+ p--;
+ *p = '\0';
+}
+
static int mg_get_disk_id(struct mg_host *host)
{
u32 i;
@@ -184,12 +343,10 @@ static int mg_get_disk_id(struct mg_host *host)
char serial[ATA_ID_SERNO_LEN + 1];
if (!prv_data->use_polling)
- outb(MG_REG_CTRL_INTR_DISABLE,
- (unsigned long)host->dev_base +
- MG_REG_DRV_CTRL);
+ outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
outb(MG_CMD_ID, (unsigned long)host->dev_base + MG_REG_COMMAND);
- err = mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, MG_TMAX_WAIT_RD_DRQ);
+ err = mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_RD_DRQ);
if (err)
return err;
@@ -219,9 +376,9 @@ static int mg_get_disk_id(struct mg_host *host)
host->n_sectors -= host->nres_sectors;
}
- ata_id_c_string(id, fwrev, ATA_ID_FW_REV, sizeof(fwrev));
- ata_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
- ata_id_c_string(id, serial, ATA_ID_SERNO, sizeof(serial));
+ mg_id_c_string(id, fwrev, ATA_ID_FW_REV, sizeof(fwrev));
+ mg_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
+ mg_id_c_string(id, serial, ATA_ID_SERNO, sizeof(serial));
printk(KERN_INFO "mg_disk: model: %s\n", model);
printk(KERN_INFO "mg_disk: firm: %.8s\n", fwrev);
printk(KERN_INFO "mg_disk: serial: %s\n", serial);
@@ -229,8 +386,7 @@ static int mg_get_disk_id(struct mg_host *host)
host->n_sectors, host->nres_sectors);
if (!prv_data->use_polling)
- outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base +
- MG_REG_DRV_CTRL);
+ outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
return err;
}
@@ -244,7 +400,7 @@ static int mg_disk_init(struct mg_host *host)
/* hdd rst low */
gpio_set_value(host->rst, 0);
- err = mg_wait(host, MG_REG_STATUS_BIT_BUSY, MG_TMAX_RST_TO_BUSY);
+ err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY);
if (err)
return err;
@@ -255,17 +411,14 @@ static int mg_disk_init(struct mg_host *host)
return err;
/* soft reset on */
- outb(MG_REG_CTRL_RESET |
- (prv_data->use_polling ? MG_REG_CTRL_INTR_DISABLE :
- MG_REG_CTRL_INTR_ENABLE),
+ outb(ATA_SRST | (prv_data->use_polling ? ATA_NIEN : 0),
(unsigned long)host->dev_base + MG_REG_DRV_CTRL);
- err = mg_wait(host, MG_REG_STATUS_BIT_BUSY, MG_TMAX_RST_TO_BUSY);
+ err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY);
if (err)
return err;
/* soft reset off */
- outb(prv_data->use_polling ? MG_REG_CTRL_INTR_DISABLE :
- MG_REG_CTRL_INTR_ENABLE,
+ outb(prv_data->use_polling ? ATA_NIEN : 0,
(unsigned long)host->dev_base + MG_REG_DRV_CTRL);
err = mg_wait(host, MG_STAT_READY, MG_TMAX_SWRST_TO_RDY);
if (err)
@@ -281,11 +434,10 @@ static int mg_disk_init(struct mg_host *host)
static void mg_bad_rw_intr(struct mg_host *host)
{
- struct request *req = elv_next_request(host->breq);
- if (req != NULL)
- if (++req->errors >= MG_MAX_ERRORS ||
- host->error == MG_ERR_TIMEOUT)
- end_request(req, 0);
+ if (host->req)
+ if (++host->req->errors >= MG_MAX_ERRORS ||
+ host->error == MG_ERR_TIMEOUT)
+ mg_end_request_cur(host, -EIO);
}
static unsigned int mg_out(struct mg_host *host,
@@ -311,7 +463,7 @@ static unsigned int mg_out(struct mg_host *host,
MG_REG_CYL_LOW);
outb((u8)(sect_num >> 16), (unsigned long)host->dev_base +
MG_REG_CYL_HIGH);
- outb((u8)((sect_num >> 24) | MG_REG_HEAD_LBA_MODE),
+ outb((u8)((sect_num >> 24) | ATA_LBA | ATA_DEVICE_OBS),
(unsigned long)host->dev_base + MG_REG_DRV_HEAD);
outb(cmd, (unsigned long)host->dev_base + MG_REG_COMMAND);
return MG_ERR_NONE;
@@ -319,105 +471,77 @@ static unsigned int mg_out(struct mg_host *host,
static void mg_read(struct request *req)
{
- u32 remains, j;
+ u32 j;
struct mg_host *host = req->rq_disk->private_data;
- remains = req->nr_sectors;
-
- if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_RD, NULL) !=
- MG_ERR_NONE)
+ if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req),
+ MG_CMD_RD, NULL) != MG_ERR_NONE)
mg_bad_rw_intr(host);
MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
- remains, req->sector, req->buffer);
+ blk_rq_sectors(req), blk_rq_pos(req), req->buffer);
+
+ do {
+ u16 *buff = (u16 *)req->buffer;
- while (remains) {
- if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ,
- MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) {
+ if (mg_wait(host, ATA_DRQ,
+ MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) {
mg_bad_rw_intr(host);
return;
}
- for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) {
- *(u16 *)req->buffer =
- inw((unsigned long)host->dev_base +
- MG_BUFF_OFFSET + (j << 1));
- req->buffer += 2;
- }
-
- req->sector++;
- req->errors = 0;
- remains = --req->nr_sectors;
- --req->current_nr_sectors;
-
- if (req->current_nr_sectors <= 0) {
- MG_DBG("remain : %d sects\n", remains);
- end_request(req, 1);
- if (remains > 0)
- req = elv_next_request(host->breq);
- }
+ for (j = 0; j < MG_SECTOR_SIZE >> 1; j++)
+ *buff++ = inw((unsigned long)host->dev_base +
+ MG_BUFF_OFFSET + (j << 1));
outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base +
MG_REG_COMMAND);
- }
+ } while (mg_end_request(host, 0, MG_SECTOR_SIZE));
}
static void mg_write(struct request *req)
{
- u32 remains, j;
+ u32 j;
struct mg_host *host = req->rq_disk->private_data;
- remains = req->nr_sectors;
-
- if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_WR, NULL) !=
- MG_ERR_NONE) {
+ if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req),
+ MG_CMD_WR, NULL) != MG_ERR_NONE) {
mg_bad_rw_intr(host);
return;
}
-
MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
- remains, req->sector, req->buffer);
- while (remains) {
- if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ,
- MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
+ blk_rq_sectors(req), blk_rq_pos(req), req->buffer);
+
+ do {
+ u16 *buff = (u16 *)req->buffer;
+
+ if (mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
mg_bad_rw_intr(host);
return;
}
- for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) {
- outw(*(u16 *)req->buffer,
- (unsigned long)host->dev_base +
- MG_BUFF_OFFSET + (j << 1));
- req->buffer += 2;
- }
- req->sector++;
- remains = --req->nr_sectors;
- --req->current_nr_sectors;
-
- if (req->current_nr_sectors <= 0) {
- MG_DBG("remain : %d sects\n", remains);
- end_request(req, 1);
- if (remains > 0)
- req = elv_next_request(host->breq);
- }
+ for (j = 0; j < MG_SECTOR_SIZE >> 1; j++)
+ outw(*buff++, (unsigned long)host->dev_base +
+ MG_BUFF_OFFSET + (j << 1));
outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
MG_REG_COMMAND);
- }
+ } while (mg_end_request(host, 0, MG_SECTOR_SIZE));
}
static void mg_read_intr(struct mg_host *host)
{
+ struct request *req = host->req;
u32 i;
- struct request *req;
+ u16 *buff;
/* check status */
do {
i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
- if (i & MG_REG_STATUS_BIT_BUSY)
+ if (i & ATA_BUSY)
break;
if (!MG_READY_OK(i))
break;
- if (i & MG_REG_STATUS_BIT_DATA_REQ)
+ if (i & ATA_DRQ)
goto ok_to_read;
} while (0);
mg_dump_status("mg_read_intr", i, host);
@@ -427,60 +551,42 @@ static void mg_read_intr(struct mg_host *host)
ok_to_read:
/* get current segment of request */
- req = elv_next_request(host->breq);
+ buff = (u16 *)req->buffer;
/* read 1 sector */
- for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) {
- *(u16 *)req->buffer =
- inw((unsigned long)host->dev_base + MG_BUFF_OFFSET +
- (i << 1));
- req->buffer += 2;
- }
+ for (i = 0; i < MG_SECTOR_SIZE >> 1; i++)
+ *buff++ = inw((unsigned long)host->dev_base + MG_BUFF_OFFSET +
+ (i << 1));
- /* manipulate request */
MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
- req->sector, req->nr_sectors - 1, req->buffer);
-
- req->sector++;
- req->errors = 0;
- i = --req->nr_sectors;
- --req->current_nr_sectors;
-
- /* let know if current segment done */
- if (req->current_nr_sectors <= 0)
- end_request(req, 1);
-
- /* set handler if read remains */
- if (i > 0) {
- host->mg_do_intr = mg_read_intr;
- mod_timer(&host->timer, jiffies + 3 * HZ);
- }
+ blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer);
/* send read confirm */
outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
- /* goto next request */
- if (!i)
+ if (mg_end_request(host, 0, MG_SECTOR_SIZE)) {
+ /* set handler if read remains */
+ host->mg_do_intr = mg_read_intr;
+ mod_timer(&host->timer, jiffies + 3 * HZ);
+ } else /* goto next request */
mg_request(host->breq);
}
static void mg_write_intr(struct mg_host *host)
{
+ struct request *req = host->req;
u32 i, j;
u16 *buff;
- struct request *req;
-
- /* get current segment of request */
- req = elv_next_request(host->breq);
+ bool rem;
/* check status */
do {
i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
- if (i & MG_REG_STATUS_BIT_BUSY)
+ if (i & ATA_BUSY)
break;
if (!MG_READY_OK(i))
break;
- if ((req->nr_sectors <= 1) || (i & MG_REG_STATUS_BIT_DATA_REQ))
+ if ((blk_rq_sectors(req) <= 1) || (i & ATA_DRQ))
goto ok_to_write;
} while (0);
mg_dump_status("mg_write_intr", i, host);
@@ -489,18 +595,8 @@ static void mg_write_intr(struct mg_host *host)
return;
ok_to_write:
- /* manipulate request */
- req->sector++;
- i = --req->nr_sectors;
- --req->current_nr_sectors;
- req->buffer += MG_SECTOR_SIZE;
-
- /* let know if current segment or all done */
- if (!i || (req->bio && req->current_nr_sectors <= 0))
- end_request(req, 1);
-
- /* write 1 sector and set handler if remains */
- if (i > 0) {
+ if ((rem = mg_end_request(host, 0, MG_SECTOR_SIZE))) {
+ /* write 1 sector and set handler if remains */
buff = (u16 *)req->buffer;
for (j = 0; j < MG_STORAGE_BUFFER_SIZE >> 1; j++) {
outw(*buff, (unsigned long)host->dev_base +
@@ -508,7 +604,7 @@ ok_to_write:
buff++;
}
MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
- req->sector, req->nr_sectors, req->buffer);
+ blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
host->mg_do_intr = mg_write_intr;
mod_timer(&host->timer, jiffies + 3 * HZ);
}
@@ -516,7 +612,7 @@ ok_to_write:
/* send write confirm */
outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
- if (!i)
+ if (!rem)
mg_request(host->breq);
}
@@ -524,49 +620,45 @@ void mg_times_out(unsigned long data)
{
struct mg_host *host = (struct mg_host *)data;
char *name;
- struct request *req;
spin_lock_irq(&host->lock);
- req = elv_next_request(host->breq);
- if (!req)
+ if (!host->req)
goto out_unlock;
host->mg_do_intr = NULL;
- name = req->rq_disk->disk_name;
+ name = host->req->rq_disk->disk_name;
printk(KERN_DEBUG "%s: timeout\n", name);
host->error = MG_ERR_TIMEOUT;
mg_bad_rw_intr(host);
- mg_request(host->breq);
out_unlock:
+ mg_request(host->breq);
spin_unlock_irq(&host->lock);
}
static void mg_request_poll(struct request_queue *q)
{
- struct request *req;
- struct mg_host *host;
+ struct mg_host *host = q->queuedata;
- while ((req = elv_next_request(q)) != NULL) {
- host = req->rq_disk->private_data;
- if (blk_fs_request(req)) {
- switch (rq_data_dir(req)) {
- case READ:
- mg_read(req);
- break;
- case WRITE:
- mg_write(req);
- break;
- default:
- printk(KERN_WARNING "%s:%d unknown command\n",
- __func__, __LINE__);
- end_request(req, 0);
+ while (1) {
+ if (!host->req) {
+ host->req = blk_fetch_request(q);
+ if (!host->req)
break;
- }
}
+
+ if (unlikely(!blk_fs_request(host->req))) {
+ mg_end_request_cur(host, -EIO);
+ continue;
+ }
+
+ if (rq_data_dir(host->req) == READ)
+ mg_read(host->req);
+ else
+ mg_write(host->req);
}
}
@@ -588,18 +680,15 @@ static unsigned int mg_issue_req(struct request *req,
break;
case WRITE:
/* TODO : handler */
- outb(MG_REG_CTRL_INTR_DISABLE,
- (unsigned long)host->dev_base +
- MG_REG_DRV_CTRL);
+ outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr)
!= MG_ERR_NONE) {
mg_bad_rw_intr(host);
return host->error;
}
del_timer(&host->timer);
- mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, MG_TMAX_WAIT_WR_DRQ);
- outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base +
- MG_REG_DRV_CTRL);
+ mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ);
+ outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
if (host->error) {
mg_bad_rw_intr(host);
return host->error;
@@ -614,11 +703,6 @@ static unsigned int mg_issue_req(struct request *req,
outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
MG_REG_COMMAND);
break;
- default:
- printk(KERN_WARNING "%s:%d unknown command\n",
- __func__, __LINE__);
- end_request(req, 0);
- break;
}
return MG_ERR_NONE;
}
@@ -626,16 +710,17 @@ static unsigned int mg_issue_req(struct request *req,
/* This function also called from IRQ context */
static void mg_request(struct request_queue *q)
{
+ struct mg_host *host = q->queuedata;
struct request *req;
- struct mg_host *host;
u32 sect_num, sect_cnt;
while (1) {
- req = elv_next_request(q);
- if (!req)
- return;
-
- host = req->rq_disk->private_data;
+ if (!host->req) {
+ host->req = blk_fetch_request(q);
+ if (!host->req)
+ break;
+ }
+ req = host->req;
/* check unwanted request call */
if (host->mg_do_intr)
@@ -643,9 +728,9 @@ static void mg_request(struct request_queue *q)
del_timer(&host->timer);
- sect_num = req->sector;
+ sect_num = blk_rq_pos(req);
/* deal whole segments */
- sect_cnt = req->nr_sectors;
+ sect_cnt = blk_rq_sectors(req);
/* sanity check */
if (sect_num >= get_capacity(req->rq_disk) ||
@@ -655,12 +740,14 @@ static void mg_request(struct request_queue *q)
"%s: bad access: sector=%d, count=%d\n",
req->rq_disk->disk_name,
sect_num, sect_cnt);
- end_request(req, 0);
+ mg_end_request_cur(host, -EIO);
continue;
}
- if (!blk_fs_request(req))
- return;
+ if (unlikely(!blk_fs_request(req))) {
+ mg_end_request_cur(host, -EIO);
+ continue;
+ }
if (!mg_issue_req(req, host, sect_num, sect_cnt))
return;
@@ -690,9 +777,7 @@ static int mg_suspend(struct platform_device *plat_dev, pm_message_t state)
return -EIO;
if (!prv_data->use_polling)
- outb(MG_REG_CTRL_INTR_DISABLE,
- (unsigned long)host->dev_base +
- MG_REG_DRV_CTRL);
+ outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
outb(MG_CMD_SLEEP, (unsigned long)host->dev_base + MG_REG_COMMAND);
/* wait until mflash deep sleep */
@@ -700,9 +785,7 @@ static int mg_suspend(struct platform_device *plat_dev, pm_message_t state)
if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) {
if (!prv_data->use_polling)
- outb(MG_REG_CTRL_INTR_ENABLE,
- (unsigned long)host->dev_base +
- MG_REG_DRV_CTRL);
+ outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
return -EIO;
}
@@ -725,8 +808,7 @@ static int mg_resume(struct platform_device *plat_dev)
return -EIO;
if (!prv_data->use_polling)
- outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base +
- MG_REG_DRV_CTRL);
+ outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
return 0;
}
@@ -877,6 +959,7 @@ static int mg_probe(struct platform_device *plat_dev)
__func__, __LINE__);
goto probe_err_5;
}
+ host->breq->queuedata = host;
/* mflash is random device, thanx for the noop */
elevator_exit(host->breq->elevator);
@@ -887,7 +970,7 @@ static int mg_probe(struct platform_device *plat_dev)
goto probe_err_6;
}
blk_queue_max_sectors(host->breq, MG_MAX_SECTS);
- blk_queue_hardsect_size(host->breq, MG_SECTOR_SIZE);
+ blk_queue_logical_block_size(host->breq, MG_SECTOR_SIZE);
init_timer(&host->timer);
host->timer.function = mg_times_out;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 4d6de4f15cc..5d23ffad7c7 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -110,7 +110,7 @@ static void nbd_end_request(struct request *req)
req, error ? "failed" : "done");
spin_lock_irqsave(q->queue_lock, flags);
- __blk_end_request(req, error, req->nr_sectors << 9);
+ __blk_end_request_all(req, error);
spin_unlock_irqrestore(q->queue_lock, flags);
}
@@ -231,19 +231,19 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req)
{
int result, flags;
struct nbd_request request;
- unsigned long size = req->nr_sectors << 9;
+ unsigned long size = blk_rq_bytes(req);
request.magic = htonl(NBD_REQUEST_MAGIC);
request.type = htonl(nbd_cmd(req));
- request.from = cpu_to_be64((u64) req->sector << 9);
+ request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
request.len = htonl(size);
memcpy(request.handle, &req, sizeof(req));
- dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%luB)\n",
+ dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n",
lo->disk->disk_name, req,
nbdcmd_to_ascii(nbd_cmd(req)),
- (unsigned long long)req->sector << 9,
- req->nr_sectors << 9);
+ (unsigned long long)blk_rq_pos(req) << 9,
+ blk_rq_bytes(req));
result = sock_xmit(lo, 1, &request, sizeof(request),
(nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0);
if (result <= 0) {
@@ -533,11 +533,9 @@ static void do_nbd_request(struct request_queue *q)
{
struct request *req;
- while ((req = elv_next_request(q)) != NULL) {
+ while ((req = blk_fetch_request(q)) != NULL) {
struct nbd_device *lo;
- blkdev_dequeue_request(req);
-
spin_unlock_irq(q->queue_lock);
dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n",
@@ -580,13 +578,6 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
blk_rq_init(NULL, &sreq);
sreq.cmd_type = REQ_TYPE_SPECIAL;
nbd_cmd(&sreq) = NBD_CMD_DISC;
- /*
- * Set these to sane values in case server implementation
- * fails to check the request type first and also to keep
- * debugging output cleaner.
- */
- sreq.sector = 0;
- sreq.nr_sectors = 0;
if (!lo->sock)
return -EINVAL;
nbd_send_req(lo, &sreq);
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index e91d4b4b014..911dfd98d81 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -719,32 +719,37 @@ static void do_pcd_request(struct request_queue * q)
if (pcd_busy)
return;
while (1) {
- pcd_req = elv_next_request(q);
- if (!pcd_req)
- return;
+ if (!pcd_req) {
+ pcd_req = blk_fetch_request(q);
+ if (!pcd_req)
+ return;
+ }
if (rq_data_dir(pcd_req) == READ) {
struct pcd_unit *cd = pcd_req->rq_disk->private_data;
if (cd != pcd_current)
pcd_bufblk = -1;
pcd_current = cd;
- pcd_sector = pcd_req->sector;
- pcd_count = pcd_req->current_nr_sectors;
+ pcd_sector = blk_rq_pos(pcd_req);
+ pcd_count = blk_rq_cur_sectors(pcd_req);
pcd_buf = pcd_req->buffer;
pcd_busy = 1;
ps_set_intr(do_pcd_read, NULL, 0, nice);
return;
- } else
- end_request(pcd_req, 0);
+ } else {
+ __blk_end_request_all(pcd_req, -EIO);
+ pcd_req = NULL;
+ }
}
}
-static inline void next_request(int success)
+static inline void next_request(int err)
{
unsigned long saved_flags;
spin_lock_irqsave(&pcd_lock, saved_flags);
- end_request(pcd_req, success);
+ if (!__blk_end_request_cur(pcd_req, err))
+ pcd_req = NULL;
pcd_busy = 0;
do_pcd_request(pcd_queue);
spin_unlock_irqrestore(&pcd_lock, saved_flags);
@@ -781,7 +786,7 @@ static void pcd_start(void)
if (pcd_command(pcd_current, rd_cmd, 2048, "read block")) {
pcd_bufblk = -1;
- next_request(0);
+ next_request(-EIO);
return;
}
@@ -796,7 +801,7 @@ static void do_pcd_read(void)
pcd_retries = 0;
pcd_transfer();
if (!pcd_count) {
- next_request(1);
+ next_request(0);
return;
}
@@ -815,7 +820,7 @@ static void do_pcd_read_drq(void)
return;
}
pcd_bufblk = -1;
- next_request(0);
+ next_request(-EIO);
return;
}
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 9299455b0af..bf5955b3d87 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -410,10 +410,12 @@ static void run_fsm(void)
pd_claimed = 0;
phase = NULL;
spin_lock_irqsave(&pd_lock, saved_flags);
- end_request(pd_req, res);
- pd_req = elv_next_request(pd_queue);
- if (!pd_req)
- stop = 1;
+ if (!__blk_end_request_cur(pd_req,
+ res == Ok ? 0 : -EIO)) {
+ pd_req = blk_fetch_request(pd_queue);
+ if (!pd_req)
+ stop = 1;
+ }
spin_unlock_irqrestore(&pd_lock, saved_flags);
if (stop)
return;
@@ -443,11 +445,11 @@ static enum action do_pd_io_start(void)
pd_cmd = rq_data_dir(pd_req);
if (pd_cmd == READ || pd_cmd == WRITE) {
- pd_block = pd_req->sector;
- pd_count = pd_req->current_nr_sectors;
+ pd_block = blk_rq_pos(pd_req);
+ pd_count = blk_rq_cur_sectors(pd_req);
if (pd_block + pd_count > get_capacity(pd_req->rq_disk))
return Fail;
- pd_run = pd_req->nr_sectors;
+ pd_run = blk_rq_sectors(pd_req);
pd_buf = pd_req->buffer;
pd_retries = 0;
if (pd_cmd == READ)
@@ -477,8 +479,8 @@ static int pd_next_buf(void)
if (pd_count)
return 0;
spin_lock_irqsave(&pd_lock, saved_flags);
- end_request(pd_req, 1);
- pd_count = pd_req->current_nr_sectors;
+ __blk_end_request_cur(pd_req, 0);
+ pd_count = blk_rq_cur_sectors(pd_req);
pd_buf = pd_req->buffer;
spin_unlock_irqrestore(&pd_lock, saved_flags);
return 0;
@@ -702,7 +704,7 @@ static void do_pd_request(struct request_queue * q)
{
if (pd_req)
return;
- pd_req = elv_next_request(q);
+ pd_req = blk_fetch_request(q);
if (!pd_req)
return;
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index bef3b997ba3..68a90834e99 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -750,12 +750,10 @@ static int pf_ready(void)
static struct request_queue *pf_queue;
-static void pf_end_request(int uptodate)
+static void pf_end_request(int err)
{
- if (pf_req) {
- end_request(pf_req, uptodate);
+ if (pf_req && !__blk_end_request_cur(pf_req, err))
pf_req = NULL;
- }
}
static void do_pf_request(struct request_queue * q)
@@ -763,17 +761,19 @@ static void do_pf_request(struct request_queue * q)
if (pf_busy)
return;
repeat:
- pf_req = elv_next_request(q);
- if (!pf_req)
- return;
+ if (!pf_req) {
+ pf_req = blk_fetch_request(q);
+ if (!pf_req)
+ return;
+ }
pf_current = pf_req->rq_disk->private_data;
- pf_block = pf_req->sector;
- pf_run = pf_req->nr_sectors;
- pf_count = pf_req->current_nr_sectors;
+ pf_block = blk_rq_pos(pf_req);
+ pf_run = blk_rq_sectors(pf_req);
+ pf_count = blk_rq_cur_sectors(pf_req);
if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) {
- pf_end_request(0);
+ pf_end_request(-EIO);
goto repeat;
}
@@ -788,7 +788,7 @@ repeat:
pi_do_claimed(pf_current->pi, do_pf_write);
else {
pf_busy = 0;
- pf_end_request(0);
+ pf_end_request(-EIO);
goto repeat;
}
}
@@ -805,23 +805,22 @@ static int pf_next_buf(void)
return 1;
if (!pf_count) {
spin_lock_irqsave(&pf_spin_lock, saved_flags);
- pf_end_request(1);
- pf_req = elv_next_request(pf_queue);
+ pf_end_request(0);
spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
if (!pf_req)
return 1;
- pf_count = pf_req->current_nr_sectors;
+ pf_count = blk_rq_cur_sectors(pf_req);
pf_buf = pf_req->buffer;
}
return 0;
}
-static inline void next_request(int success)
+static inline void next_request(int err)
{
unsigned long saved_flags;
spin_lock_irqsave(&pf_spin_lock, saved_flags);
- pf_end_request(success);
+ pf_end_request(err);
pf_busy = 0;
do_pf_request(pf_queue);
spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
@@ -844,7 +843,7 @@ static void do_pf_read_start(void)
pi_do_claimed(pf_current->pi, do_pf_read_start);
return;
}
- next_request(0);
+ next_request(-EIO);
return;
}
pf_mask = STAT_DRQ;
@@ -863,7 +862,7 @@ static void do_pf_read_drq(void)
pi_do_claimed(pf_current->pi, do_pf_read_start);
return;
}
- next_request(0);
+ next_request(-EIO);
return;
}
pi_read_block(pf_current->pi, pf_buf, 512);
@@ -871,7 +870,7 @@ static void do_pf_read_drq(void)
break;
}
pi_disconnect(pf_current->pi);
- next_request(1);
+ next_request(0);
}
static void do_pf_write(void)
@@ -890,7 +889,7 @@ static void do_pf_write_start(void)
pi_do_claimed(pf_current->pi, do_pf_write_start);
return;
}
- next_request(0);
+ next_request(-EIO);
return;
}
@@ -903,7 +902,7 @@ static void do_pf_write_start(void)
pi_do_claimed(pf_current->pi, do_pf_write_start);
return;
}
- next_request(0);
+ next_request(-EIO);
return;
}
pi_write_block(pf_current->pi, pf_buf, 512);
@@ -923,11 +922,11 @@ static void do_pf_write_done(void)
pi_do_claimed(pf_current->pi, do_pf_write_start);
return;
}
- next_request(0);
+ next_request(-EIO);
return;
}
pi_disconnect(pf_current->pi);
- next_request(1);
+ next_request(0);
}
static int __init pf_init(void)
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index dc7a8c352da..83650e00632 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -430,7 +430,7 @@ static void pkt_sysfs_cleanup(void)
/********************************************************************
entries in debugfs
- /debugfs/pktcdvd[0-7]/
+ /sys/kernel/debug/pktcdvd[0-7]/
info
*******************************************************************/
@@ -991,13 +991,15 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
*/
static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
{
- if ((pd->settings.size << 9) / CD_FRAMESIZE <= q->max_phys_segments) {
+ if ((pd->settings.size << 9) / CD_FRAMESIZE
+ <= queue_max_phys_segments(q)) {
/*
* The cdrom device can handle one segment/frame
*/
clear_bit(PACKET_MERGE_SEGS, &pd->flags);
return 0;
- } else if ((pd->settings.size << 9) / PAGE_SIZE <= q->max_phys_segments) {
+ } else if ((pd->settings.size << 9) / PAGE_SIZE
+ <= queue_max_phys_segments(q)) {
/*
* We can handle this case at the expense of some extra memory
* copies during write operations
@@ -2657,7 +2659,7 @@ static void pkt_init_queue(struct pktcdvd_device *pd)
struct request_queue *q = pd->disk->queue;
blk_queue_make_request(q, pkt_make_request);
- blk_queue_hardsect_size(q, CD_FRAMESIZE);
+ blk_queue_logical_block_size(q, CD_FRAMESIZE);
blk_queue_max_sectors(q, PACKET_MAX_SECTORS);
blk_queue_merge_bvec(q, pkt_merge_bvec);
q->queuedata = pd;
@@ -2853,6 +2855,11 @@ static struct block_device_operations pktcdvd_ops = {
.media_changed = pkt_media_changed,
};
+static char *pktcdvd_nodename(struct gendisk *gd)
+{
+ return kasprintf(GFP_KERNEL, "pktcdvd/%s", gd->disk_name);
+}
+
/*
* Set up mapping from pktcdvd device to CD-ROM device.
*/
@@ -2905,6 +2912,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
disk->fops = &pktcdvd_ops;
disk->flags = GENHD_FL_REMOVABLE;
strcpy(disk->disk_name, pd->name);
+ disk->nodename = pktcdvd_nodename;
disk->private_data = pd;
disk->queue = blk_alloc_queue(GFP_KERNEL);
if (!disk->queue)
@@ -3060,6 +3068,7 @@ static const struct file_operations pkt_ctl_fops = {
static struct miscdevice pkt_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = DRIVER_NAME,
+ .name = "pktcdvd/control",
.fops = &pkt_ctl_fops
};
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index bccc42bb921..34cbb7f3efa 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -120,7 +120,7 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
struct request *req)
{
- struct ps3disk_private *priv = dev->sbd.core.driver_data;
+ struct ps3disk_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
int write = rq_data_dir(req), res;
const char *op = write ? "write" : "read";
u64 start_sector, sectors;
@@ -134,13 +134,12 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
rq_for_each_segment(bv, req, iter)
n++;
dev_dbg(&dev->sbd.core,
- "%s:%u: %s req has %u bvecs for %lu sectors %lu hard sectors\n",
- __func__, __LINE__, op, n, req->nr_sectors,
- req->hard_nr_sectors);
+ "%s:%u: %s req has %u bvecs for %u sectors\n",
+ __func__, __LINE__, op, n, blk_rq_sectors(req));
#endif
- start_sector = req->sector * priv->blocking_factor;
- sectors = req->nr_sectors * priv->blocking_factor;
+ start_sector = blk_rq_pos(req) * priv->blocking_factor;
+ sectors = blk_rq_sectors(req) * priv->blocking_factor;
dev_dbg(&dev->sbd.core, "%s:%u: %s %llu sectors starting at %llu\n",
__func__, __LINE__, op, sectors, start_sector);
@@ -158,7 +157,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
if (res) {
dev_err(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__,
__LINE__, op, res);
- end_request(req, 0);
+ __blk_end_request_all(req, -EIO);
return 0;
}
@@ -169,7 +168,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
static int ps3disk_submit_flush_request(struct ps3_storage_device *dev,
struct request *req)
{
- struct ps3disk_private *priv = dev->sbd.core.driver_data;
+ struct ps3disk_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
u64 res;
dev_dbg(&dev->sbd.core, "%s:%u: flush request\n", __func__, __LINE__);
@@ -180,7 +179,7 @@ static int ps3disk_submit_flush_request(struct ps3_storage_device *dev,
if (res) {
dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n",
__func__, __LINE__, res);
- end_request(req, 0);
+ __blk_end_request_all(req, -EIO);
return 0;
}
@@ -195,7 +194,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
- while ((req = elv_next_request(q))) {
+ while ((req = blk_fetch_request(q))) {
if (blk_fs_request(req)) {
if (ps3disk_submit_request_sg(dev, req))
break;
@@ -205,7 +204,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
break;
} else {
blk_dump_rq_flags(req, DEVICE_NAME " bad request");
- end_request(req, 0);
+ __blk_end_request_all(req, -EIO);
continue;
}
}
@@ -214,7 +213,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
static void ps3disk_request(struct request_queue *q)
{
struct ps3_storage_device *dev = q->queuedata;
- struct ps3disk_private *priv = dev->sbd.core.driver_data;
+ struct ps3disk_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
if (priv->req) {
dev_dbg(&dev->sbd.core, "%s:%u busy\n", __func__, __LINE__);
@@ -231,7 +230,6 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
struct request *req;
int res, read, error;
u64 tag, status;
- unsigned long num_sectors;
const char *op;
res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status);
@@ -247,7 +245,7 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
return IRQ_HANDLED;
}
- priv = dev->sbd.core.driver_data;
+ priv = ps3_system_bus_get_drvdata(&dev->sbd);
req = priv->req;
if (!req) {
dev_dbg(&dev->sbd.core,
@@ -261,11 +259,9 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
req->cmd[0] == REQ_LB_OP_FLUSH) {
read = 0;
- num_sectors = req->hard_cur_sectors;
op = "flush";
} else {
read = !rq_data_dir(req);
- num_sectors = req->nr_sectors;
op = read ? "read" : "write";
}
if (status) {
@@ -281,7 +277,7 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
}
spin_lock(&priv->lock);
- __blk_end_request(req, error, num_sectors << 9);
+ __blk_end_request_all(req, error);
priv->req = NULL;
ps3disk_do_request(dev, priv->queue);
spin_unlock(&priv->lock);
@@ -368,7 +364,7 @@ static void ata_id_c_string(const u16 *id, unsigned char *s, unsigned int ofs,
static int ps3disk_identify(struct ps3_storage_device *dev)
{
- struct ps3disk_private *priv = dev->sbd.core.driver_data;
+ struct ps3disk_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
struct lv1_ata_cmnd_block ata_cmnd;
u16 *id = dev->bounce_buf;
u64 res;
@@ -449,7 +445,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
goto fail;
}
- dev->sbd.core.driver_data = priv;
+ ps3_system_bus_set_drvdata(_dev, priv);
spin_lock_init(&priv->lock);
dev->bounce_size = BOUNCE_SIZE;
@@ -481,7 +477,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
blk_queue_max_sectors(queue, dev->bounce_size >> 9);
blk_queue_segment_boundary(queue, -1UL);
blk_queue_dma_alignment(queue, dev->blk_size-1);
- blk_queue_hardsect_size(queue, dev->blk_size);
+ blk_queue_logical_block_size(queue, dev->blk_size);
blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH,
ps3disk_prepare_flush);
@@ -527,7 +523,7 @@ fail_free_bounce:
kfree(dev->bounce_buf);
fail_free_priv:
kfree(priv);
- dev->sbd.core.driver_data = NULL;
+ ps3_system_bus_set_drvdata(_dev, NULL);
fail:
mutex_lock(&ps3disk_mask_mutex);
__clear_bit(devidx, &ps3disk_mask);
@@ -538,7 +534,7 @@ fail:
static int ps3disk_remove(struct ps3_system_bus_device *_dev)
{
struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
- struct ps3disk_private *priv = dev->sbd.core.driver_data;
+ struct ps3disk_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
mutex_lock(&ps3disk_mask_mutex);
__clear_bit(MINOR(disk_devt(priv->gendisk)) / PS3DISK_MINORS,
@@ -552,7 +548,7 @@ static int ps3disk_remove(struct ps3_system_bus_device *_dev)
ps3stor_teardown(dev);
kfree(dev->bounce_buf);
kfree(priv);
- dev->sbd.core.driver_data = NULL;
+ ps3_system_bus_set_drvdata(_dev, NULL);
return 0;
}
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index 8eddef373a9..095f97e6066 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -14,8 +14,10 @@
#include <linux/seq_file.h>
#include <asm/firmware.h>
+#include <asm/iommu.h>
#include <asm/lv1call.h>
#include <asm/ps3.h>
+#include <asm/ps3gpu.h>
#define DEVICE_NAME "ps3vram"
@@ -45,8 +47,6 @@
#define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN 0x0000030c
#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY 0x00000104
-#define L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT 0x601
-
#define CACHE_PAGE_PRESENT 1
#define CACHE_PAGE_DIRTY 2
@@ -72,8 +72,7 @@ struct ps3vram_priv {
u64 memory_handle;
u64 context_handle;
u32 *ctrl;
- u32 *reports;
- u8 __iomem *ddr_base;
+ void *reports;
u8 *xdr_buf;
u32 *fifo_base;
@@ -81,8 +80,8 @@ struct ps3vram_priv {
struct ps3vram_cache cache;
- /* Used to serialize cache/DMA operations */
- struct mutex lock;
+ spinlock_t lock; /* protecting list of bios */
+ struct bio_list list;
};
@@ -103,15 +102,15 @@ static char *size = "256M";
module_param(size, charp, 0);
MODULE_PARM_DESC(size, "memory size");
-static u32 *ps3vram_get_notifier(u32 *reports, int notifier)
+static u32 *ps3vram_get_notifier(void *reports, int notifier)
{
- return (void *)reports + DMA_NOTIFIER_OFFSET_BASE +
+ return reports + DMA_NOTIFIER_OFFSET_BASE +
DMA_NOTIFIER_SIZE * notifier;
}
static void ps3vram_notifier_reset(struct ps3_system_bus_device *dev)
{
- struct ps3vram_priv *priv = dev->core.driver_data;
+ struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
u32 *notify = ps3vram_get_notifier(priv->reports, NOTIFIER);
int i;
@@ -122,7 +121,7 @@ static void ps3vram_notifier_reset(struct ps3_system_bus_device *dev)
static int ps3vram_notifier_wait(struct ps3_system_bus_device *dev,
unsigned int timeout_ms)
{
- struct ps3vram_priv *priv = dev->core.driver_data;
+ struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
u32 *notify = ps3vram_get_notifier(priv->reports, NOTIFIER);
unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
@@ -137,7 +136,7 @@ static int ps3vram_notifier_wait(struct ps3_system_bus_device *dev,
static void ps3vram_init_ring(struct ps3_system_bus_device *dev)
{
- struct ps3vram_priv *priv = dev->core.driver_data;
+ struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET;
priv->ctrl[CTRL_GET] = FIFO_BASE + FIFO_OFFSET;
@@ -146,7 +145,7 @@ static void ps3vram_init_ring(struct ps3_system_bus_device *dev)
static int ps3vram_wait_ring(struct ps3_system_bus_device *dev,
unsigned int timeout_ms)
{
- struct ps3vram_priv *priv = dev->core.driver_data;
+ struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
do {
@@ -175,7 +174,7 @@ static void ps3vram_begin_ring(struct ps3vram_priv *priv, u32 chan, u32 tag,
static void ps3vram_rewind_ring(struct ps3_system_bus_device *dev)
{
- struct ps3vram_priv *priv = dev->core.driver_data;
+ struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
int status;
ps3vram_out_ring(priv, 0x20000000 | (FIFO_BASE + FIFO_OFFSET));
@@ -183,20 +182,17 @@ static void ps3vram_rewind_ring(struct ps3_system_bus_device *dev)
priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET;
/* asking the HV for a blit will kick the FIFO */
- status = lv1_gpu_context_attribute(priv->context_handle,
- L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT, 0,
- 0, 0, 0);
+ status = lv1_gpu_fb_blit(priv->context_handle, 0, 0, 0, 0);
if (status)
- dev_err(&dev->core,
- "%s: lv1_gpu_context_attribute failed %d\n", __func__,
- status);
+ dev_err(&dev->core, "%s: lv1_gpu_fb_blit failed %d\n",
+ __func__, status);
priv->fifo_ptr = priv->fifo_base;
}
static void ps3vram_fire_ring(struct ps3_system_bus_device *dev)
{
- struct ps3vram_priv *priv = dev->core.driver_data;
+ struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
int status;
mutex_lock(&ps3_gpu_mutex);
@@ -205,13 +201,10 @@ static void ps3vram_fire_ring(struct ps3_system_bus_device *dev)
(priv->fifo_ptr - priv->fifo_base) * sizeof(u32);
/* asking the HV for a blit will kick the FIFO */
- status = lv1_gpu_context_attribute(priv->context_handle,
- L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT, 0,
- 0, 0, 0);
+ status = lv1_gpu_fb_blit(priv->context_handle, 0, 0, 0, 0);
if (status)
- dev_err(&dev->core,
- "%s: lv1_gpu_context_attribute failed %d\n", __func__,
- status);
+ dev_err(&dev->core, "%s: lv1_gpu_fb_blit failed %d\n",
+ __func__, status);
if ((priv->fifo_ptr - priv->fifo_base) * sizeof(u32) >
FIFO_SIZE - 1024) {
@@ -225,7 +218,7 @@ static void ps3vram_fire_ring(struct ps3_system_bus_device *dev)
static void ps3vram_bind(struct ps3_system_bus_device *dev)
{
- struct ps3vram_priv *priv = dev->core.driver_data;
+ struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0, 1);
ps3vram_out_ring(priv, 0x31337303);
@@ -248,7 +241,7 @@ static int ps3vram_upload(struct ps3_system_bus_device *dev,
unsigned int src_offset, unsigned int dst_offset,
int len, int count)
{
- struct ps3vram_priv *priv = dev->core.driver_data;
+ struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
ps3vram_begin_ring(priv, UPLOAD_SUBCH,
NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
@@ -280,7 +273,7 @@ static int ps3vram_download(struct ps3_system_bus_device *dev,
unsigned int src_offset, unsigned int dst_offset,
int len, int count)
{
- struct ps3vram_priv *priv = dev->core.driver_data;
+ struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
ps3vram_begin_ring(priv, DOWNLOAD_SUBCH,
NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
@@ -310,7 +303,7 @@ static int ps3vram_download(struct ps3_system_bus_device *dev,
static void ps3vram_cache_evict(struct ps3_system_bus_device *dev, int entry)
{
- struct ps3vram_priv *priv = dev->core.driver_data;
+ struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
struct ps3vram_cache *cache = &priv->cache;
if (!(cache->tags[entry].flags & CACHE_PAGE_DIRTY))
@@ -332,7 +325,7 @@ static void ps3vram_cache_evict(struct ps3_system_bus_device *dev, int entry)
static void ps3vram_cache_load(struct ps3_system_bus_device *dev, int entry,
unsigned int address)
{
- struct ps3vram_priv *priv = dev->core.driver_data;
+ struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
struct ps3vram_cache *cache = &priv->cache;
dev_dbg(&dev->core, "Fetching %d: 0x%08x\n", entry, address);
@@ -352,7 +345,7 @@ static void ps3vram_cache_load(struct ps3_system_bus_device *dev, int entry,
static void ps3vram_cache_flush(struct ps3_system_bus_device *dev)
{
- struct ps3vram_priv *priv = dev->core.driver_data;
+ struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
struct ps3vram_cache *cache = &priv->cache;
int i;
@@ -366,7 +359,7 @@ static void ps3vram_cache_flush(struct ps3_system_bus_device *dev)
static unsigned int ps3vram_cache_match(struct ps3_system_bus_device *dev,
loff_t address)
{
- struct ps3vram_priv *priv = dev->core.driver_data;
+ struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
struct ps3vram_cache *cache = &priv->cache;
unsigned int base;
unsigned int offset;
@@ -400,7 +393,7 @@ static unsigned int ps3vram_cache_match(struct ps3_system_bus_device *dev,
static int ps3vram_cache_init(struct ps3_system_bus_device *dev)
{
- struct ps3vram_priv *priv = dev->core.driver_data;
+ struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
priv->cache.page_count = CACHE_PAGE_COUNT;
priv->cache.page_size = CACHE_PAGE_SIZE;
@@ -419,7 +412,7 @@ static int ps3vram_cache_init(struct ps3_system_bus_device *dev)
static void ps3vram_cache_cleanup(struct ps3_system_bus_device *dev)
{
- struct ps3vram_priv *priv = dev->core.driver_data;
+ struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
ps3vram_cache_flush(dev);
kfree(priv->cache.tags);
@@ -428,7 +421,7 @@ static void ps3vram_cache_cleanup(struct ps3_system_bus_device *dev)
static int ps3vram_read(struct ps3_system_bus_device *dev, loff_t from,
size_t len, size_t *retlen, u_char *buf)
{
- struct ps3vram_priv *priv = dev->core.driver_data;
+ struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
unsigned int cached, count;
dev_dbg(&dev->core, "%s: from=0x%08x len=0x%zx\n", __func__,
@@ -449,8 +442,6 @@ static int ps3vram_read(struct ps3_system_bus_device *dev, loff_t from,
offset = (unsigned int) (from & (priv->cache.page_size - 1));
avail = priv->cache.page_size - offset;
- mutex_lock(&priv->lock);
-
entry = ps3vram_cache_match(dev, from);
cached = CACHE_OFFSET + entry * priv->cache.page_size + offset;
@@ -462,8 +453,6 @@ static int ps3vram_read(struct ps3_system_bus_device *dev, loff_t from,
avail = count;
memcpy(buf, priv->xdr_buf + cached, avail);
- mutex_unlock(&priv->lock);
-
buf += avail;
count -= avail;
from += avail;
@@ -476,7 +465,7 @@ static int ps3vram_read(struct ps3_system_bus_device *dev, loff_t from,
static int ps3vram_write(struct ps3_system_bus_device *dev, loff_t to,
size_t len, size_t *retlen, const u_char *buf)
{
- struct ps3vram_priv *priv = dev->core.driver_data;
+ struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
unsigned int cached, count;
if (to >= priv->size)
@@ -494,8 +483,6 @@ static int ps3vram_write(struct ps3_system_bus_device *dev, loff_t to,
offset = (unsigned int) (to & (priv->cache.page_size - 1));
avail = priv->cache.page_size - offset;
- mutex_lock(&priv->lock);
-
entry = ps3vram_cache_match(dev, to);
cached = CACHE_OFFSET + entry * priv->cache.page_size + offset;
@@ -509,8 +496,6 @@ static int ps3vram_write(struct ps3_system_bus_device *dev, loff_t to,
priv->cache.tags[entry].flags |= CACHE_PAGE_DIRTY;
- mutex_unlock(&priv->lock);
-
buf += avail;
count -= avail;
to += avail;
@@ -543,28 +528,26 @@ static const struct file_operations ps3vram_proc_fops = {
static void __devinit ps3vram_proc_init(struct ps3_system_bus_device *dev)
{
- struct ps3vram_priv *priv = dev->core.driver_data;
+ struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
struct proc_dir_entry *pde;
- pde = proc_create(DEVICE_NAME, 0444, NULL, &ps3vram_proc_fops);
- if (!pde) {
+ pde = proc_create_data(DEVICE_NAME, 0444, NULL, &ps3vram_proc_fops,
+ priv);
+ if (!pde)
dev_warn(&dev->core, "failed to create /proc entry\n");
- return;
- }
- pde->data = priv;
}
-static int ps3vram_make_request(struct request_queue *q, struct bio *bio)
+static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
+ struct bio *bio)
{
- struct ps3_system_bus_device *dev = q->queuedata;
+ struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
int write = bio_data_dir(bio) == WRITE;
const char *op = write ? "write" : "read";
loff_t offset = bio->bi_sector << 9;
int error = 0;
struct bio_vec *bvec;
unsigned int i;
-
- dev_dbg(&dev->core, "%s\n", __func__);
+ struct bio *next;
bio_for_each_segment(bvec, bio, i) {
/* PS3 is ppc64, so we don't handle highmem */
@@ -585,6 +568,7 @@ static int ps3vram_make_request(struct request_queue *q, struct bio *bio)
if (retlen != len) {
dev_err(&dev->core, "Short %s\n", op);
+ error = -EIO;
goto out;
}
@@ -594,7 +578,35 @@ static int ps3vram_make_request(struct request_queue *q, struct bio *bio)
dev_dbg(&dev->core, "%s completed\n", op);
out:
+ spin_lock_irq(&priv->lock);
+ bio_list_pop(&priv->list);
+ next = bio_list_peek(&priv->list);
+ spin_unlock_irq(&priv->lock);
+
bio_endio(bio, error);
+ return next;
+}
+
+static int ps3vram_make_request(struct request_queue *q, struct bio *bio)
+{
+ struct ps3_system_bus_device *dev = q->queuedata;
+ struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
+ int busy;
+
+ dev_dbg(&dev->core, "%s\n", __func__);
+
+ spin_lock_irq(&priv->lock);
+ busy = !bio_list_empty(&priv->list);
+ bio_list_add(&priv->list, bio);
+ spin_unlock_irq(&priv->lock);
+
+ if (busy)
+ return 0;
+
+ do {
+ bio = ps3vram_do_bio(dev, bio);
+ } while (bio);
+
return 0;
}
@@ -604,8 +616,8 @@ static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev)
int error, status;
struct request_queue *queue;
struct gendisk *gendisk;
- u64 ddr_lpar, ctrl_lpar, info_lpar, reports_lpar, ddr_size,
- reports_size;
+ u64 ddr_size, ddr_lpar, ctrl_lpar, info_lpar, reports_lpar,
+ reports_size, xdr_lpar;
char *rest;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -614,10 +626,9 @@ static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev)
goto fail;
}
- mutex_init(&priv->lock);
- dev->core.driver_data = priv;
-
- priv = dev->core.driver_data;
+ spin_lock_init(&priv->lock);
+ bio_list_init(&priv->list);
+ ps3_system_bus_set_drvdata(dev, priv);
/* Allocate XDR buffer (1MiB aligned) */
priv->xdr_buf = (void *)__get_free_pages(GFP_KERNEL,
@@ -636,7 +647,7 @@ static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev)
if (ps3_open_hv_device(dev)) {
dev_err(&dev->core, "ps3_open_hv_device failed\n");
error = -EAGAIN;
- goto out_close_gpu;
+ goto out_free_xdr_buf;
}
/* Request memory */
@@ -660,7 +671,7 @@ static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev)
dev_err(&dev->core, "lv1_gpu_memory_allocate failed %d\n",
status);
error = -ENOMEM;
- goto out_free_xdr_buf;
+ goto out_close_gpu;
}
/* Request context */
@@ -676,9 +687,11 @@ static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev)
}
/* Map XDR buffer to RSX */
+ xdr_lpar = ps3_mm_phys_to_lpar(__pa(priv->xdr_buf));
status = lv1_gpu_context_iomap(priv->context_handle, XDR_IOIF,
- ps3_mm_phys_to_lpar(__pa(priv->xdr_buf)),
- XDR_BUF_SIZE, 0);
+ xdr_lpar, XDR_BUF_SIZE,
+ CBE_IOPTE_PP_W | CBE_IOPTE_PP_R |
+ CBE_IOPTE_M);
if (status) {
dev_err(&dev->core, "lv1_gpu_context_iomap failed %d\n",
status);
@@ -686,19 +699,11 @@ static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev)
goto out_free_context;
}
- priv->ddr_base = ioremap_flags(ddr_lpar, ddr_size, _PAGE_NO_CACHE);
-
- if (!priv->ddr_base) {
- dev_err(&dev->core, "ioremap DDR failed\n");
- error = -ENOMEM;
- goto out_free_context;
- }
-
priv->ctrl = ioremap(ctrl_lpar, 64 * 1024);
if (!priv->ctrl) {
dev_err(&dev->core, "ioremap CTRL failed\n");
error = -ENOMEM;
- goto out_unmap_vram;
+ goto out_unmap_context;
}
priv->reports = ioremap(reports_lpar, reports_size);
@@ -775,8 +780,9 @@ out_unmap_reports:
iounmap(priv->reports);
out_unmap_ctrl:
iounmap(priv->ctrl);
-out_unmap_vram:
- iounmap(priv->ddr_base);
+out_unmap_context:
+ lv1_gpu_context_iomap(priv->context_handle, XDR_IOIF, xdr_lpar,
+ XDR_BUF_SIZE, CBE_IOPTE_M);
out_free_context:
lv1_gpu_context_free(priv->context_handle);
out_free_memory:
@@ -787,14 +793,14 @@ out_free_xdr_buf:
free_pages((unsigned long) priv->xdr_buf, get_order(XDR_BUF_SIZE));
fail_free_priv:
kfree(priv);
- dev->core.driver_data = NULL;
+ ps3_system_bus_set_drvdata(dev, NULL);
fail:
return error;
}
static int ps3vram_remove(struct ps3_system_bus_device *dev)
{
- struct ps3vram_priv *priv = dev->core.driver_data;
+ struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
del_gendisk(priv->gendisk);
put_disk(priv->gendisk);
@@ -803,13 +809,15 @@ static int ps3vram_remove(struct ps3_system_bus_device *dev)
ps3vram_cache_cleanup(dev);
iounmap(priv->reports);
iounmap(priv->ctrl);
- iounmap(priv->ddr_base);
+ lv1_gpu_context_iomap(priv->context_handle, XDR_IOIF,
+ ps3_mm_phys_to_lpar(__pa(priv->xdr_buf)),
+ XDR_BUF_SIZE, CBE_IOPTE_M);
lv1_gpu_context_free(priv->context_handle);
lv1_gpu_memory_free(priv->memory_handle);
ps3_close_hv_device(dev);
free_pages((unsigned long) priv->xdr_buf, get_order(XDR_BUF_SIZE));
kfree(priv);
- dev->core.driver_data = NULL;
+ ps3_system_bus_set_drvdata(dev, NULL);
return 0;
}
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 5861e33efe6..cbfd9c0aef0 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -212,11 +212,6 @@ static void vdc_end_special(struct vdc_port *port, struct vio_disk_desc *desc)
vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD);
}
-static void vdc_end_request(struct request *req, int error, int num_sectors)
-{
- __blk_end_request(req, error, num_sectors << 9);
-}
-
static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
unsigned int index)
{
@@ -239,7 +234,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
rqe->req = NULL;
- vdc_end_request(req, (desc->status ? -EIO : 0), desc->size >> 9);
+ __blk_end_request(req, (desc->status ? -EIO : 0), desc->size);
if (blk_queue_stopped(port->disk->queue))
blk_start_queue(port->disk->queue);
@@ -421,7 +416,7 @@ static int __send_request(struct request *req)
desc->slice = 0;
}
desc->status = ~0;
- desc->offset = (req->sector << 9) / port->vdisk_block_size;
+ desc->offset = (blk_rq_pos(req) << 9) / port->vdisk_block_size;
desc->size = len;
desc->ncookies = err;
@@ -446,14 +441,13 @@ out:
static void do_vdc_request(struct request_queue *q)
{
while (1) {
- struct request *req = elv_next_request(q);
+ struct request *req = blk_fetch_request(q);
if (!req)
break;
- blkdev_dequeue_request(req);
if (__send_request(req) < 0)
- vdc_end_request(req, -EIO, req->hard_nr_sectors);
+ __blk_end_request_all(req, -EIO);
}
}
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index d22cc385693..cf7877fb8a7 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -514,7 +514,7 @@ static int floppy_read_sectors(struct floppy_state *fs,
ret = swim_read_sector(fs, side, track, sector,
buffer);
if (try-- == 0)
- return -1;
+ return -EIO;
} while (ret != 512);
buffer += ret;
@@ -528,45 +528,31 @@ static void redo_fd_request(struct request_queue *q)
struct request *req;
struct floppy_state *fs;
- while ((req = elv_next_request(q))) {
+ req = blk_fetch_request(q);
+ while (req) {
+ int err = -EIO;
fs = req->rq_disk->private_data;
- if (req->sector < 0 || req->sector >= fs->total_secs) {
- end_request(req, 0);
- continue;
- }
- if (req->current_nr_sectors == 0) {
- end_request(req, 1);
- continue;
- }
- if (!fs->disk_in) {
- end_request(req, 0);
- continue;
- }
- if (rq_data_dir(req) == WRITE) {
- if (fs->write_protected) {
- end_request(req, 0);
- continue;
- }
- }
+ if (blk_rq_pos(req) >= fs->total_secs)
+ goto done;
+ if (!fs->disk_in)
+ goto done;
+ if (rq_data_dir(req) == WRITE && fs->write_protected)
+ goto done;
+
switch (rq_data_dir(req)) {
case WRITE:
/* NOT IMPLEMENTED */
- end_request(req, 0);
break;
case READ:
- if (floppy_read_sectors(fs, req->sector,
- req->current_nr_sectors,
- req->buffer)) {
- end_request(req, 0);
- continue;
- }
- req->nr_sectors -= req->current_nr_sectors;
- req->sector += req->current_nr_sectors;
- req->buffer += req->current_nr_sectors * 512;
- end_request(req, 1);
+ err = floppy_read_sectors(fs, blk_rq_pos(req),
+ blk_rq_cur_sectors(req),
+ req->buffer);
break;
}
+ done:
+ if (!__blk_end_request_cur(req, err))
+ req = blk_fetch_request(q);
}
}
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 612965307ba..80df93e3cdd 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -251,6 +251,20 @@ static int floppy_release(struct gendisk *disk, fmode_t mode);
static int floppy_check_change(struct gendisk *disk);
static int floppy_revalidate(struct gendisk *disk);
+static bool swim3_end_request(int err, unsigned int nr_bytes)
+{
+ if (__blk_end_request(fd_req, err, nr_bytes))
+ return true;
+
+ fd_req = NULL;
+ return false;
+}
+
+static bool swim3_end_request_cur(int err)
+{
+ return swim3_end_request(err, blk_rq_cur_bytes(fd_req));
+}
+
static void swim3_select(struct floppy_state *fs, int sel)
{
struct swim3 __iomem *sw = fs->swim3;
@@ -310,25 +324,27 @@ static void start_request(struct floppy_state *fs)
wake_up(&fs->wait);
return;
}
- while (fs->state == idle && (req = elv_next_request(swim3_queue))) {
+ while (fs->state == idle) {
+ if (!fd_req) {
+ fd_req = blk_fetch_request(swim3_queue);
+ if (!fd_req)
+ break;
+ }
+ req = fd_req;
#if 0
- printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%ld buf=%p\n",
+ printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
req->rq_disk->disk_name, req->cmd,
- (long)req->sector, req->nr_sectors, req->buffer);
- printk(" errors=%d current_nr_sectors=%ld\n",
- req->errors, req->current_nr_sectors);
+ (long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
+ printk(" errors=%d current_nr_sectors=%u\n",
+ req->errors, blk_rq_cur_sectors(req));
#endif
- if (req->sector < 0 || req->sector >= fs->total_secs) {
- end_request(req, 0);
- continue;
- }
- if (req->current_nr_sectors == 0) {
- end_request(req, 1);
+ if (blk_rq_pos(req) >= fs->total_secs) {
+ swim3_end_request_cur(-EIO);
continue;
}
if (fs->ejected) {
- end_request(req, 0);
+ swim3_end_request_cur(-EIO);
continue;
}
@@ -336,18 +352,19 @@ static void start_request(struct floppy_state *fs)
if (fs->write_prot < 0)
fs->write_prot = swim3_readbit(fs, WRITE_PROT);
if (fs->write_prot) {
- end_request(req, 0);
+ swim3_end_request_cur(-EIO);
continue;
}
}
- /* Do not remove the cast. req->sector is now a sector_t and
- * can be 64 bits, but it will never go past 32 bits for this
- * driver anyway, so we can safely cast it down and not have
- * to do a 64/32 division
+ /* Do not remove the cast. blk_rq_pos(req) is now a
+ * sector_t and can be 64 bits, but it will never go
+ * past 32 bits for this driver anyway, so we can
+ * safely cast it down and not have to do a 64/32
+ * division
*/
- fs->req_cyl = ((long)req->sector) / fs->secpercyl;
- x = ((long)req->sector) % fs->secpercyl;
+ fs->req_cyl = ((long)blk_rq_pos(req)) / fs->secpercyl;
+ x = ((long)blk_rq_pos(req)) % fs->secpercyl;
fs->head = x / fs->secpertrack;
fs->req_sector = x % fs->secpertrack + 1;
fd_req = req;
@@ -424,7 +441,7 @@ static inline void setup_transfer(struct floppy_state *fs)
struct dbdma_cmd *cp = fs->dma_cmd;
struct dbdma_regs __iomem *dr = fs->dma;
- if (fd_req->current_nr_sectors <= 0) {
+ if (blk_rq_cur_sectors(fd_req) <= 0) {
printk(KERN_ERR "swim3: transfer 0 sectors?\n");
return;
}
@@ -432,8 +449,8 @@ static inline void setup_transfer(struct floppy_state *fs)
n = 1;
else {
n = fs->secpertrack - fs->req_sector + 1;
- if (n > fd_req->current_nr_sectors)
- n = fd_req->current_nr_sectors;
+ if (n > blk_rq_cur_sectors(fd_req))
+ n = blk_rq_cur_sectors(fd_req);
}
fs->scount = n;
swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
@@ -508,7 +525,7 @@ static void act(struct floppy_state *fs)
case do_transfer:
if (fs->cur_cyl != fs->req_cyl) {
if (fs->retries > 5) {
- end_request(fd_req, 0);
+ swim3_end_request_cur(-EIO);
fs->state = idle;
return;
}
@@ -540,7 +557,7 @@ static void scan_timeout(unsigned long data)
out_8(&sw->intr_enable, 0);
fs->cur_cyl = -1;
if (fs->retries > 5) {
- end_request(fd_req, 0);
+ swim3_end_request_cur(-EIO);
fs->state = idle;
start_request(fs);
} else {
@@ -559,7 +576,7 @@ static void seek_timeout(unsigned long data)
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
printk(KERN_ERR "swim3: seek timeout\n");
- end_request(fd_req, 0);
+ swim3_end_request_cur(-EIO);
fs->state = idle;
start_request(fs);
}
@@ -583,7 +600,7 @@ static void settle_timeout(unsigned long data)
return;
}
printk(KERN_ERR "swim3: seek settle timeout\n");
- end_request(fd_req, 0);
+ swim3_end_request_cur(-EIO);
fs->state = idle;
start_request(fs);
}
@@ -593,8 +610,6 @@ static void xfer_timeout(unsigned long data)
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
struct dbdma_regs __iomem *dr = fs->dma;
- struct dbdma_cmd *cp = fs->dma_cmd;
- unsigned long s;
int n;
fs->timeout_pending = 0;
@@ -605,17 +620,10 @@ static void xfer_timeout(unsigned long data)
out_8(&sw->intr_enable, 0);
out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
out_8(&sw->select, RELAX);
- if (rq_data_dir(fd_req) == WRITE)
- ++cp;
- if (ld_le16(&cp->xfer_status) != 0)
- s = fs->scount - ((ld_le16(&cp->res_count) + 511) >> 9);
- else
- s = 0;
- fd_req->sector += s;
- fd_req->current_nr_sectors -= s;
printk(KERN_ERR "swim3: timeout %sing sector %ld\n",
- (rq_data_dir(fd_req)==WRITE? "writ": "read"), (long)fd_req->sector);
- end_request(fd_req, 0);
+ (rq_data_dir(fd_req)==WRITE? "writ": "read"),
+ (long)blk_rq_pos(fd_req));
+ swim3_end_request_cur(-EIO);
fs->state = idle;
start_request(fs);
}
@@ -646,7 +654,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
printk(KERN_ERR "swim3: seen sector but cyl=ff?\n");
fs->cur_cyl = -1;
if (fs->retries > 5) {
- end_request(fd_req, 0);
+ swim3_end_request_cur(-EIO);
fs->state = idle;
start_request(fs);
} else {
@@ -719,9 +727,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
if (intr & ERROR_INTR) {
n = fs->scount - 1 - resid / 512;
if (n > 0) {
- fd_req->sector += n;
- fd_req->current_nr_sectors -= n;
- fd_req->buffer += n * 512;
+ blk_update_request(fd_req, 0, n << 9);
fs->req_sector += n;
}
if (fs->retries < 5) {
@@ -730,8 +736,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
} else {
printk("swim3: error %sing block %ld (err=%x)\n",
rq_data_dir(fd_req) == WRITE? "writ": "read",
- (long)fd_req->sector, err);
- end_request(fd_req, 0);
+ (long)blk_rq_pos(fd_req), err);
+ swim3_end_request_cur(-EIO);
fs->state = idle;
}
} else {
@@ -740,18 +746,12 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid);
printk(KERN_ERR " state=%d, dir=%x, intr=%x, err=%x\n",
fs->state, rq_data_dir(fd_req), intr, err);
- end_request(fd_req, 0);
+ swim3_end_request_cur(-EIO);
fs->state = idle;
start_request(fs);
break;
}
- fd_req->sector += fs->scount;
- fd_req->current_nr_sectors -= fs->scount;
- fd_req->buffer += fs->scount * 512;
- if (fd_req->current_nr_sectors <= 0) {
- end_request(fd_req, 1);
- fs->state = idle;
- } else {
+ if (swim3_end_request(0, fs->scount << 9)) {
fs->req_sector += fs->scount;
if (fs->req_sector > fs->secpertrack) {
fs->req_sector -= fs->secpertrack;
@@ -761,7 +761,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
}
}
act(fs);
- }
+ } else
+ fs->state = idle;
}
if (fs->state == idle)
start_request(fs);
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index ff0448e4bf0..da403b6a7f4 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -749,8 +749,7 @@ static inline void carm_end_request_queued(struct carm_host *host,
struct request *req = crq->rq;
int rc;
- rc = __blk_end_request(req, error, blk_rq_bytes(req));
- assert(rc == 0);
+ __blk_end_request_all(req, error);
rc = carm_put_request(host, crq);
assert(rc == 0);
@@ -811,12 +810,10 @@ static void carm_oob_rq_fn(struct request_queue *q)
while (1) {
DPRINTK("get req\n");
- rq = elv_next_request(q);
+ rq = blk_fetch_request(q);
if (!rq)
break;
- blkdev_dequeue_request(rq);
-
crq = rq->special;
assert(crq != NULL);
assert(crq->rq == rq);
@@ -847,7 +844,7 @@ static void carm_rq_fn(struct request_queue *q)
queue_one_request:
VPRINTK("get req\n");
- rq = elv_next_request(q);
+ rq = blk_peek_request(q);
if (!rq)
return;
@@ -858,7 +855,7 @@ queue_one_request:
}
crq->rq = rq;
- blkdev_dequeue_request(rq);
+ blk_start_request(rq);
if (rq_data_dir(rq) == WRITE) {
writing = 1;
@@ -904,10 +901,10 @@ queue_one_request:
msg->sg_count = n_elem;
msg->sg_type = SGT_32BIT;
msg->handle = cpu_to_le32(TAG_ENCODE(crq->tag));
- msg->lba = cpu_to_le32(rq->sector & 0xffffffff);
- tmp = (rq->sector >> 16) >> 16;
+ msg->lba = cpu_to_le32(blk_rq_pos(rq) & 0xffffffff);
+ tmp = (blk_rq_pos(rq) >> 16) >> 16;
msg->lba_high = cpu_to_le16( (u16) tmp );
- msg->lba_count = cpu_to_le16(rq->nr_sectors);
+ msg->lba_count = cpu_to_le16(blk_rq_sectors(rq));
msg_size = sizeof(struct carm_msg_rw) - sizeof(msg->sg);
for (i = 0; i < n_elem; i++) {
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 689cd27ac89..cc54473b8e7 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -360,8 +360,7 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
struct ub_scsi_cmd *cmd, struct ub_request *urq);
static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
-static void ub_end_rq(struct request *rq, unsigned int status,
- unsigned int cmd_len);
+static void ub_end_rq(struct request *rq, unsigned int status);
static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
struct ub_request *urq, struct ub_scsi_cmd *cmd);
static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
@@ -627,7 +626,7 @@ static void ub_request_fn(struct request_queue *q)
struct ub_lun *lun = q->queuedata;
struct request *rq;
- while ((rq = elv_next_request(q)) != NULL) {
+ while ((rq = blk_peek_request(q)) != NULL) {
if (ub_request_fn_1(lun, rq) != 0) {
blk_stop_queue(q);
break;
@@ -643,14 +642,14 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
int n_elem;
if (atomic_read(&sc->poison)) {
- blkdev_dequeue_request(rq);
- ub_end_rq(rq, DID_NO_CONNECT << 16, blk_rq_bytes(rq));
+ blk_start_request(rq);
+ ub_end_rq(rq, DID_NO_CONNECT << 16);
return 0;
}
if (lun->changed && !blk_pc_request(rq)) {
- blkdev_dequeue_request(rq);
- ub_end_rq(rq, SAM_STAT_CHECK_CONDITION, blk_rq_bytes(rq));
+ blk_start_request(rq);
+ ub_end_rq(rq, SAM_STAT_CHECK_CONDITION);
return 0;
}
@@ -660,7 +659,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
return -1;
memset(cmd, 0, sizeof(struct ub_scsi_cmd));
- blkdev_dequeue_request(rq);
+ blk_start_request(rq);
urq = &lun->urq;
memset(urq, 0, sizeof(struct ub_request));
@@ -702,7 +701,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
drop:
ub_put_cmd(lun, cmd);
- ub_end_rq(rq, DID_ERROR << 16, blk_rq_bytes(rq));
+ ub_end_rq(rq, DID_ERROR << 16);
return 0;
}
@@ -723,11 +722,11 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
/*
* build the command
*
- * The call to blk_queue_hardsect_size() guarantees that request
+ * The call to blk_queue_logical_block_size() guarantees that request
* is aligned, but it is given in terms of 512 byte units, always.
*/
- block = rq->sector >> lun->capacity.bshift;
- nblks = rq->nr_sectors >> lun->capacity.bshift;
+ block = blk_rq_pos(rq) >> lun->capacity.bshift;
+ nblks = blk_rq_sectors(rq) >> lun->capacity.bshift;
cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10;
/* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */
@@ -739,7 +738,7 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
cmd->cdb[8] = nblks;
cmd->cdb_len = 10;
- cmd->len = rq->nr_sectors * 512;
+ cmd->len = blk_rq_bytes(rq);
}
static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
@@ -747,7 +746,7 @@ static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
{
struct request *rq = urq->rq;
- if (rq->data_len == 0) {
+ if (blk_rq_bytes(rq) == 0) {
cmd->dir = UB_DIR_NONE;
} else {
if (rq_data_dir(rq) == WRITE)
@@ -762,7 +761,7 @@ static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
cmd->cdb_len = rq->cmd_len;
- cmd->len = rq->data_len;
+ cmd->len = blk_rq_bytes(rq);
/*
* To reapply this to every URB is not as incorrect as it looks.
@@ -777,16 +776,15 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
struct ub_request *urq = cmd->back;
struct request *rq;
unsigned int scsi_status;
- unsigned int cmd_len;
rq = urq->rq;
if (cmd->error == 0) {
if (blk_pc_request(rq)) {
- if (cmd->act_len >= rq->data_len)
- rq->data_len = 0;
+ if (cmd->act_len >= rq->resid_len)
+ rq->resid_len = 0;
else
- rq->data_len -= cmd->act_len;
+ rq->resid_len -= cmd->act_len;
scsi_status = 0;
} else {
if (cmd->act_len != cmd->len) {
@@ -818,17 +816,14 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
urq->rq = NULL;
- cmd_len = cmd->len;
ub_put_cmd(lun, cmd);
- ub_end_rq(rq, scsi_status, cmd_len);
+ ub_end_rq(rq, scsi_status);
blk_start_queue(lun->disk->queue);
}
-static void ub_end_rq(struct request *rq, unsigned int scsi_status,
- unsigned int cmd_len)
+static void ub_end_rq(struct request *rq, unsigned int scsi_status)
{
int error;
- long rqlen;
if (scsi_status == 0) {
error = 0;
@@ -836,12 +831,7 @@ static void ub_end_rq(struct request *rq, unsigned int scsi_status,
error = -EIO;
rq->errors = scsi_status;
}
- rqlen = blk_rq_bytes(rq); /* Oddly enough, this is the residue. */
- if (__blk_end_request(rq, error, cmd_len)) {
- printk(KERN_WARNING DRV_NAME
- ": __blk_end_request blew, %s-cmd total %u rqlen %ld\n",
- blk_pc_request(rq)? "pc": "fs", cmd_len, rqlen);
- }
+ __blk_end_request_all(rq, error);
}
static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
@@ -1759,7 +1749,7 @@ static int ub_bd_revalidate(struct gendisk *disk)
ub_revalidate(lun->udev, lun);
/* XXX Support sector size switching like in sr.c */
- blk_queue_hardsect_size(disk->queue, lun->capacity.bsize);
+ blk_queue_logical_block_size(disk->queue, lun->capacity.bsize);
set_capacity(disk, lun->capacity.nsec);
// set_disk_ro(sdkp->disk, lun->readonly);
@@ -2334,7 +2324,7 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum)
blk_queue_max_phys_segments(q, UB_MAX_REQ_SG);
blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */
blk_queue_max_sectors(q, UB_MAX_SECTORS);
- blk_queue_hardsect_size(q, lun->capacity.bsize);
+ blk_queue_logical_block_size(q, lun->capacity.bsize);
lun->disk = disk;
q->queuedata = lun;
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index ecccf65dce2..390d69bb7c4 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -252,7 +252,7 @@ static int send_request(struct request *req)
struct viodasd_device *d;
unsigned long flags;
- start = (u64)req->sector << 9;
+ start = (u64)blk_rq_pos(req) << 9;
if (rq_data_dir(req) == READ) {
direction = DMA_FROM_DEVICE;
@@ -361,19 +361,17 @@ static void do_viodasd_request(struct request_queue *q)
* back later.
*/
while (num_req_outstanding < VIOMAXREQ) {
- req = elv_next_request(q);
+ req = blk_fetch_request(q);
if (req == NULL)
return;
- /* dequeue the current request from the queue */
- blkdev_dequeue_request(req);
/* check that request contains a valid command */
if (!blk_fs_request(req)) {
- viodasd_end_request(req, -EIO, req->hard_nr_sectors);
+ viodasd_end_request(req, -EIO, blk_rq_sectors(req));
continue;
}
/* Try sending the request */
if (send_request(req) != 0)
- viodasd_end_request(req, -EIO, req->hard_nr_sectors);
+ viodasd_end_request(req, -EIO, blk_rq_sectors(req));
}
}
@@ -590,7 +588,7 @@ static int viodasd_handle_read_write(struct vioblocklpevent *bevent)
err = vio_lookup_rc(viodasd_err_table, bevent->sub_result);
printk(VIOD_KERN_WARNING "read/write error %d:0x%04x (%s)\n",
event->xRc, bevent->sub_result, err->msg);
- num_sect = req->hard_nr_sectors;
+ num_sect = blk_rq_sectors(req);
}
qlock = req->q->queue_lock;
spin_lock_irqsave(qlock, irq_flags);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 5d34764c8a8..43db3ea15b5 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -37,6 +37,7 @@ struct virtblk_req
struct list_head list;
struct request *req;
struct virtio_blk_outhdr out_hdr;
+ struct virtio_scsi_inhdr in_hdr;
u8 status;
};
@@ -50,6 +51,7 @@ static void blk_done(struct virtqueue *vq)
spin_lock_irqsave(&vblk->lock, flags);
while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) {
int error;
+
switch (vbr->status) {
case VIRTIO_BLK_S_OK:
error = 0;
@@ -62,7 +64,13 @@ static void blk_done(struct virtqueue *vq)
break;
}
- __blk_end_request(vbr->req, error, blk_rq_bytes(vbr->req));
+ if (blk_pc_request(vbr->req)) {
+ vbr->req->resid_len = vbr->in_hdr.residual;
+ vbr->req->sense_len = vbr->in_hdr.sense_len;
+ vbr->req->errors = vbr->in_hdr.errors;
+ }
+
+ __blk_end_request_all(vbr->req, error);
list_del(&vbr->list);
mempool_free(vbr, vblk->pool);
}
@@ -74,7 +82,7 @@ static void blk_done(struct virtqueue *vq)
static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
struct request *req)
{
- unsigned long num, out, in;
+ unsigned long num, out = 0, in = 0;
struct virtblk_req *vbr;
vbr = mempool_alloc(vblk->pool, GFP_ATOMIC);
@@ -85,7 +93,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
vbr->req = req;
if (blk_fs_request(vbr->req)) {
vbr->out_hdr.type = 0;
- vbr->out_hdr.sector = vbr->req->sector;
+ vbr->out_hdr.sector = blk_rq_pos(vbr->req);
vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
} else if (blk_pc_request(vbr->req)) {
vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
@@ -99,18 +107,36 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
if (blk_barrier_rq(vbr->req))
vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER;
- sg_set_buf(&vblk->sg[0], &vbr->out_hdr, sizeof(vbr->out_hdr));
- num = blk_rq_map_sg(q, vbr->req, vblk->sg+1);
- sg_set_buf(&vblk->sg[num+1], &vbr->status, sizeof(vbr->status));
+ sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
- if (rq_data_dir(vbr->req) == WRITE) {
- vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
- out = 1 + num;
- in = 1;
- } else {
- vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
- out = 1;
- in = 1 + num;
+ /*
+ * If this is a packet command we need a couple of additional headers.
+ * Behind the normal outhdr we put a segment with the scsi command
+ * block, and before the normal inhdr we put the sense data and the
+ * inhdr with additional status information before the normal inhdr.
+ */
+ if (blk_pc_request(vbr->req))
+ sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len);
+
+ num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);
+
+ if (blk_pc_request(vbr->req)) {
+ sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96);
+ sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
+ sizeof(vbr->in_hdr));
+ }
+
+ sg_set_buf(&vblk->sg[num + out + in++], &vbr->status,
+ sizeof(vbr->status));
+
+ if (num) {
+ if (rq_data_dir(vbr->req) == WRITE) {
+ vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
+ out += num;
+ } else {
+ vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
+ in += num;
+ }
}
if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr)) {
@@ -124,12 +150,11 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
static void do_virtblk_request(struct request_queue *q)
{
- struct virtio_blk *vblk = NULL;
+ struct virtio_blk *vblk = q->queuedata;
struct request *req;
unsigned int issued = 0;
- while ((req = elv_next_request(q)) != NULL) {
- vblk = req->rq_disk->private_data;
+ while ((req = blk_peek_request(q)) != NULL) {
BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
/* If this request fails, stop queue and wait for something to
@@ -138,7 +163,7 @@ static void do_virtblk_request(struct request_queue *q)
blk_stop_queue(q);
break;
}
- blkdev_dequeue_request(req);
+ blk_start_request(req);
issued++;
}
@@ -146,12 +171,51 @@ static void do_virtblk_request(struct request_queue *q)
vblk->vq->vq_ops->kick(vblk->vq);
}
+/* return ATA identify data
+ */
+static int virtblk_identify(struct gendisk *disk, void *argp)
+{
+ struct virtio_blk *vblk = disk->private_data;
+ void *opaque;
+ int err = -ENOMEM;
+
+ opaque = kmalloc(VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
+ if (!opaque)
+ goto out;
+
+ err = virtio_config_buf(vblk->vdev, VIRTIO_BLK_F_IDENTIFY,
+ offsetof(struct virtio_blk_config, identify), opaque,
+ VIRTIO_BLK_ID_BYTES);
+
+ if (err)
+ goto out_kfree;
+
+ if (copy_to_user(argp, opaque, VIRTIO_BLK_ID_BYTES))
+ err = -EFAULT;
+
+out_kfree:
+ kfree(opaque);
+out:
+ return err;
+}
+
static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long data)
{
- return scsi_cmd_ioctl(bdev->bd_disk->queue,
- bdev->bd_disk, mode, cmd,
- (void __user *)data);
+ struct gendisk *disk = bdev->bd_disk;
+ struct virtio_blk *vblk = disk->private_data;
+ void __user *argp = (void __user *)data;
+
+ if (cmd == HDIO_GET_IDENTITY)
+ return virtblk_identify(disk, argp);
+
+ /*
+ * Only allow the generic SCSI ioctls if the host can support it.
+ */
+ if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
+ return -ENOIOCTLCMD;
+
+ return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
}
/* We provide getgeo only to please some old bootloader/partitioning tools */
@@ -190,7 +254,7 @@ static int index_to_minor(int index)
return index << PART_BITS;
}
-static int virtblk_probe(struct virtio_device *vdev)
+static int __devinit virtblk_probe(struct virtio_device *vdev)
{
struct virtio_blk *vblk;
int err;
@@ -224,7 +288,7 @@ static int virtblk_probe(struct virtio_device *vdev)
sg_init_table(vblk->sg, vblk->sg_elems);
/* We expect one virtqueue, for output. */
- vblk->vq = vdev->config->find_vq(vdev, 0, blk_done);
+ vblk->vq = virtio_find_single_vq(vdev, blk_done, "requests");
if (IS_ERR(vblk->vq)) {
err = PTR_ERR(vblk->vq);
goto out_free_vblk;
@@ -249,6 +313,7 @@ static int virtblk_probe(struct virtio_device *vdev)
goto out_put_disk;
}
+ vblk->disk->queue->queuedata = vblk;
queue_flag_set_unlocked(QUEUE_FLAG_VIRT, vblk->disk->queue);
if (index < 26) {
@@ -313,7 +378,7 @@ static int virtblk_probe(struct virtio_device *vdev)
offsetof(struct virtio_blk_config, blk_size),
&blk_size);
if (!err)
- blk_queue_hardsect_size(vblk->disk->queue, blk_size);
+ blk_queue_logical_block_size(vblk->disk->queue, blk_size);
add_disk(vblk->disk);
return 0;
@@ -323,14 +388,14 @@ out_put_disk:
out_mempool:
mempool_destroy(vblk->pool);
out_free_vq:
- vdev->config->del_vq(vblk->vq);
+ vdev->config->del_vqs(vdev);
out_free_vblk:
kfree(vblk);
out:
return err;
}
-static void virtblk_remove(struct virtio_device *vdev)
+static void __devexit virtblk_remove(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
@@ -344,7 +409,7 @@ static void virtblk_remove(struct virtio_device *vdev)
blk_cleanup_queue(vblk->disk->queue);
put_disk(vblk->disk);
mempool_destroy(vblk->pool);
- vdev->config->del_vq(vblk->vq);
+ vdev->config->del_vqs(vdev);
kfree(vblk);
}
@@ -356,6 +421,7 @@ static struct virtio_device_id id_table[] = {
static unsigned int features[] = {
VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX,
VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
+ VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_IDENTIFY
};
static struct virtio_driver virtio_blk = {
diff --git a/drivers/block/xd.c b/drivers/block/xd.c
index 64b496fce98..ce242921992 100644
--- a/drivers/block/xd.c
+++ b/drivers/block/xd.c
@@ -305,30 +305,25 @@ static void do_xd_request (struct request_queue * q)
if (xdc_busy)
return;
- while ((req = elv_next_request(q)) != NULL) {
- unsigned block = req->sector;
- unsigned count = req->nr_sectors;
- int rw = rq_data_dir(req);
+ req = blk_fetch_request(q);
+ while (req) {
+ unsigned block = blk_rq_pos(req);
+ unsigned count = blk_rq_cur_sectors(req);
XD_INFO *disk = req->rq_disk->private_data;
- int res = 0;
+ int res = -EIO;
int retry;
- if (!blk_fs_request(req)) {
- end_request(req, 0);
- continue;
- }
- if (block + count > get_capacity(req->rq_disk)) {
- end_request(req, 0);
- continue;
- }
- if (rw != READ && rw != WRITE) {
- printk("do_xd_request: unknown request\n");
- end_request(req, 0);
- continue;
- }
+ if (!blk_fs_request(req))
+ goto done;
+ if (block + count > get_capacity(req->rq_disk))
+ goto done;
for (retry = 0; (retry < XD_RETRIES) && !res; retry++)
- res = xd_readwrite(rw, disk, req->buffer, block, count);
- end_request(req, res); /* wrap up, 0 = fail, 1 = success */
+ res = xd_readwrite(rq_data_dir(req), disk, req->buffer,
+ block, count);
+ done:
+ /* wrap up, 0 = success, -errno = fail */
+ if (!__blk_end_request_cur(req, res))
+ req = blk_fetch_request(q);
}
}
@@ -418,7 +413,7 @@ static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_
printk("xd%c: %s timeout, recalibrating drive\n",'a'+drive,(operation == READ ? "read" : "write"));
xd_recalibrate(drive);
spin_lock_irq(&xd_lock);
- return (0);
+ return -EIO;
case 2:
if (sense[0] & 0x30) {
printk("xd%c: %s - ",'a'+drive,(operation == READ ? "reading" : "writing"));
@@ -439,7 +434,7 @@ static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_
else
printk(" - no valid disk address\n");
spin_lock_irq(&xd_lock);
- return (0);
+ return -EIO;
}
if (xd_dma_buffer)
for (i=0; i < (temp * 0x200); i++)
@@ -448,7 +443,7 @@ static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_
count -= temp, buffer += temp * 0x200, block += temp;
}
spin_lock_irq(&xd_lock);
- return (1);
+ return 0;
}
/* xd_recalibrate: recalibrate a given drive and reset controller if necessary */
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 8f905089b72..e53284767f7 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -122,7 +122,7 @@ static DEFINE_SPINLOCK(blkif_io_lock);
static int get_id_from_freelist(struct blkfront_info *info)
{
unsigned long free = info->shadow_free;
- BUG_ON(free > BLK_RING_SIZE);
+ BUG_ON(free >= BLK_RING_SIZE);
info->shadow_free = info->shadow[free].req.id;
info->shadow[free].req.id = 0x0fffffee; /* debug */
return free;
@@ -231,7 +231,7 @@ static int blkif_queue_request(struct request *req)
info->shadow[id].request = (unsigned long)req;
ring_req->id = id;
- ring_req->sector_number = (blkif_sector_t)req->sector;
+ ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req);
ring_req->handle = info->handle;
ring_req->operation = rq_data_dir(req) ?
@@ -299,25 +299,25 @@ static void do_blkif_request(struct request_queue *rq)
queued = 0;
- while ((req = elv_next_request(rq)) != NULL) {
+ while ((req = blk_peek_request(rq)) != NULL) {
info = req->rq_disk->private_data;
- if (!blk_fs_request(req)) {
- end_request(req, 0);
- continue;
- }
if (RING_FULL(&info->ring))
goto wait;
- pr_debug("do_blk_req %p: cmd %p, sec %lx, "
- "(%u/%li) buffer:%p [%s]\n",
- req, req->cmd, (unsigned long)req->sector,
- req->current_nr_sectors,
- req->nr_sectors, req->buffer,
- rq_data_dir(req) ? "write" : "read");
+ blk_start_request(req);
+ if (!blk_fs_request(req)) {
+ __blk_end_request_all(req, -EIO);
+ continue;
+ }
+
+ pr_debug("do_blk_req %p: cmd %p, sec %lx, "
+ "(%u/%u) buffer:%p [%s]\n",
+ req, req->cmd, (unsigned long)blk_rq_pos(req),
+ blk_rq_cur_sectors(req), blk_rq_sectors(req),
+ req->buffer, rq_data_dir(req) ? "write" : "read");
- blkdev_dequeue_request(req);
if (blkif_queue_request(req)) {
blk_requeue_request(rq, req);
wait:
@@ -344,7 +344,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
/* Hard sector size and max sectors impersonate the equiv. hardware. */
- blk_queue_hardsect_size(rq, sector_size);
+ blk_queue_logical_block_size(rq, sector_size);
blk_queue_max_sectors(rq, 512);
/* Each segment in a request is up to an aligned page in size. */
@@ -551,7 +551,6 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
for (i = info->ring.rsp_cons; i != rp; i++) {
unsigned long id;
- int ret;
bret = RING_GET_RESPONSE(&info->ring, i);
id = bret->id;
@@ -578,8 +577,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
"request: %x\n", bret->status);
- ret = __blk_end_request(req, error, blk_rq_bytes(req));
- BUG_ON(ret);
+ __blk_end_request_all(req, error);
break;
default:
BUG();
@@ -755,12 +753,12 @@ static int blkfront_probe(struct xenbus_device *dev,
/* Front end dir is a number, which is used as the id. */
info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
- dev->dev.driver_data = info;
+ dev_set_drvdata(&dev->dev, info);
err = talk_to_backend(dev, info);
if (err) {
kfree(info);
- dev->dev.driver_data = NULL;
+ dev_set_drvdata(&dev->dev, NULL);
return err;
}
@@ -845,7 +843,7 @@ static int blkif_recover(struct blkfront_info *info)
*/
static int blkfront_resume(struct xenbus_device *dev)
{
- struct blkfront_info *info = dev->dev.driver_data;
+ struct blkfront_info *info = dev_get_drvdata(&dev->dev);
int err;
dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
@@ -924,7 +922,7 @@ static void blkfront_connect(struct blkfront_info *info)
*/
static void blkfront_closing(struct xenbus_device *dev)
{
- struct blkfront_info *info = dev->dev.driver_data;
+ struct blkfront_info *info = dev_get_drvdata(&dev->dev);
unsigned long flags;
dev_dbg(&dev->dev, "blkfront_closing: %s removed\n", dev->nodename);
@@ -934,8 +932,6 @@ static void blkfront_closing(struct xenbus_device *dev)
spin_lock_irqsave(&blkif_io_lock, flags);
- del_gendisk(info->gd);
-
/* No more blkif_request(). */
blk_stop_queue(info->rq);
@@ -949,6 +945,8 @@ static void blkfront_closing(struct xenbus_device *dev)
blk_cleanup_queue(info->rq);
info->rq = NULL;
+ del_gendisk(info->gd);
+
out:
xenbus_frontend_closed(dev);
}
@@ -959,7 +957,7 @@ static void blkfront_closing(struct xenbus_device *dev)
static void backend_changed(struct xenbus_device *dev,
enum xenbus_state backend_state)
{
- struct blkfront_info *info = dev->dev.driver_data;
+ struct blkfront_info *info = dev_get_drvdata(&dev->dev);
struct block_device *bd;
dev_dbg(&dev->dev, "blkfront:backend_changed.\n");
@@ -977,8 +975,10 @@ static void backend_changed(struct xenbus_device *dev,
break;
case XenbusStateClosing:
- if (info->gd == NULL)
- xenbus_dev_fatal(dev, -ENODEV, "gd is NULL");
+ if (info->gd == NULL) {
+ xenbus_frontend_closed(dev);
+ break;
+ }
bd = bdget_disk(info->gd, 0);
if (bd == NULL)
xenbus_dev_fatal(dev, -ENODEV, "bdget failed");
@@ -997,7 +997,7 @@ static void backend_changed(struct xenbus_device *dev,
static int blkfront_remove(struct xenbus_device *dev)
{
- struct blkfront_info *info = dev->dev.driver_data;
+ struct blkfront_info *info = dev_get_drvdata(&dev->dev);
dev_dbg(&dev->dev, "blkfront_remove: %s removed\n", dev->nodename);
@@ -1010,7 +1010,7 @@ static int blkfront_remove(struct xenbus_device *dev)
static int blkfront_is_ready(struct xenbus_device *dev)
{
- struct blkfront_info *info = dev->dev.driver_data;
+ struct blkfront_info *info = dev_get_drvdata(&dev->dev);
return info->is_ready;
}
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 4aecf5dc6a9..f08491a3a81 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -463,10 +463,11 @@ struct request *ace_get_next_request(struct request_queue * q)
{
struct request *req;
- while ((req = elv_next_request(q)) != NULL) {
+ while ((req = blk_peek_request(q)) != NULL) {
if (blk_fs_request(req))
break;
- end_request(req, 0);
+ blk_start_request(req);
+ __blk_end_request_all(req, -EIO);
}
return req;
}
@@ -492,9 +493,13 @@ static void ace_fsm_dostate(struct ace_device *ace)
set_capacity(ace->gd, 0);
dev_info(ace->dev, "No CF in slot\n");
- /* Drop all pending requests */
- while ((req = elv_next_request(ace->queue)) != NULL)
- end_request(req, 0);
+ /* Drop all in-flight and pending requests */
+ if (ace->req) {
+ __blk_end_request_all(ace->req, -EIO);
+ ace->req = NULL;
+ }
+ while ((req = blk_fetch_request(ace->queue)) != NULL)
+ __blk_end_request_all(req, -EIO);
/* Drop back to IDLE state and notify waiters */
ace->fsm_state = ACE_FSM_STATE_IDLE;
@@ -642,19 +647,21 @@ static void ace_fsm_dostate(struct ace_device *ace)
ace->fsm_state = ACE_FSM_STATE_IDLE;
break;
}
+ blk_start_request(req);
/* Okay, it's a data request, set it up for transfer */
dev_dbg(ace->dev,
- "request: sec=%llx hcnt=%lx, ccnt=%x, dir=%i\n",
- (unsigned long long) req->sector, req->hard_nr_sectors,
- req->current_nr_sectors, rq_data_dir(req));
+ "request: sec=%llx hcnt=%x, ccnt=%x, dir=%i\n",
+ (unsigned long long)blk_rq_pos(req),
+ blk_rq_sectors(req), blk_rq_cur_sectors(req),
+ rq_data_dir(req));
ace->req = req;
ace->data_ptr = req->buffer;
- ace->data_count = req->current_nr_sectors * ACE_BUF_PER_SECTOR;
- ace_out32(ace, ACE_MPULBA, req->sector & 0x0FFFFFFF);
+ ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR;
+ ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF);
- count = req->hard_nr_sectors;
+ count = blk_rq_sectors(req);
if (rq_data_dir(req)) {
/* Kick off write request */
dev_dbg(ace->dev, "write data\n");
@@ -688,7 +695,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
dev_dbg(ace->dev,
"CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n",
ace->fsm_task, ace->fsm_iter_num,
- ace->req->current_nr_sectors * 16,
+ blk_rq_cur_sectors(ace->req) * 16,
ace->data_count, ace->in_irq);
ace_fsm_yield(ace); /* need to poll CFBSY bit */
break;
@@ -697,7 +704,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
dev_dbg(ace->dev,
"DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n",
ace->fsm_task, ace->fsm_iter_num,
- ace->req->current_nr_sectors * 16,
+ blk_rq_cur_sectors(ace->req) * 16,
ace->data_count, ace->in_irq);
ace_fsm_yieldirq(ace);
break;
@@ -717,14 +724,13 @@ static void ace_fsm_dostate(struct ace_device *ace)
}
/* bio finished; is there another one? */
- if (__blk_end_request(ace->req, 0,
- blk_rq_cur_bytes(ace->req))) {
- /* dev_dbg(ace->dev, "next block; h=%li c=%i\n",
- * ace->req->hard_nr_sectors,
- * ace->req->current_nr_sectors);
+ if (__blk_end_request_cur(ace->req, 0)) {
+ /* dev_dbg(ace->dev, "next block; h=%u c=%u\n",
+ * blk_rq_sectors(ace->req),
+ * blk_rq_cur_sectors(ace->req));
*/
ace->data_ptr = ace->req->buffer;
- ace->data_count = ace->req->current_nr_sectors * 16;
+ ace->data_count = blk_rq_cur_sectors(ace->req) * 16;
ace_fsm_yieldirq(ace);
break;
}
@@ -978,7 +984,7 @@ static int __devinit ace_setup(struct ace_device *ace)
ace->queue = blk_init_queue(ace_request, &ace->lock);
if (ace->queue == NULL)
goto err_blk_initq;
- blk_queue_hardsect_size(ace->queue, 512);
+ blk_queue_logical_block_size(ace->queue, 512);
/*
* Allocate and initialize GD structure
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 80754cdd311..4575171e5be 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -70,15 +70,18 @@ static struct gendisk *z2ram_gendisk;
static void do_z2_request(struct request_queue *q)
{
struct request *req;
- while ((req = elv_next_request(q)) != NULL) {
- unsigned long start = req->sector << 9;
- unsigned long len = req->current_nr_sectors << 9;
+
+ req = blk_fetch_request(q);
+ while (req) {
+ unsigned long start = blk_rq_pos(req) << 9;
+ unsigned long len = blk_rq_cur_bytes(req);
+ int err = 0;
if (start + len > z2ram_size) {
printk( KERN_ERR DEVICE_NAME ": bad access: block=%lu, count=%u\n",
- req->sector, req->current_nr_sectors);
- end_request(req, 0);
- continue;
+ blk_rq_pos(req), blk_rq_cur_sectors(req));
+ err = -EIO;
+ goto done;
}
while (len) {
unsigned long addr = start & Z2RAM_CHUNKMASK;
@@ -93,7 +96,9 @@ static void do_z2_request(struct request_queue *q)
start += size;
len -= size;
}
- end_request(req, 1);
+ done:
+ if (!__blk_end_request_cur(req, err))
+ req = blk_fetch_request(q);
}
}
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 901bdd95655..2cc7b3266ea 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -415,6 +415,8 @@ static int dtl1_hci_send_frame(struct sk_buff *skb)
hdev->stat.sco_tx++;
nsh.type = 0x83;
break;
+ default:
+ return -EILSEQ;
};
nsh.zero = 0;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index af761dc434f..4895f0e0532 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -277,8 +277,8 @@ static int hci_uart_tty_open(struct tty_struct *tty)
/* FIXME: why is this needed. Note don't use ldisc_ref here as the
open path is before the ldisc is referencable */
- if (tty->ldisc.ops->flush_buffer)
- tty->ldisc.ops->flush_buffer(tty);
+ if (tty->ldisc->ops->flush_buffer)
+ tty->ldisc->ops->flush_buffer(tty);
tty_driver_flush_buffer(tty);
return 0;
@@ -463,7 +463,6 @@ static int hci_uart_tty_ioctl(struct tty_struct *tty, struct file * file,
clear_bit(HCI_UART_PROTO_SET, &hu->flags);
return err;
}
- tty->low_latency = 1;
} else
return -EBUSY;
break;
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 0bbefba6469..1df9dda2e37 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -40,7 +40,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#define VERSION "1.2"
+#define VERSION "1.3"
static int minor = MISC_DYNAMIC_MINOR;
@@ -51,14 +51,8 @@ struct vhci_data {
wait_queue_head_t read_wait;
struct sk_buff_head readq;
-
- struct fasync_struct *fasync;
};
-#define VHCI_FASYNC 0x0010
-
-static struct miscdevice vhci_miscdev;
-
static int vhci_open_dev(struct hci_dev *hdev)
{
set_bit(HCI_RUNNING, &hdev->flags);
@@ -105,9 +99,6 @@ static int vhci_send_frame(struct sk_buff *skb)
memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
skb_queue_tail(&data->readq, skb);
- if (data->flags & VHCI_FASYNC)
- kill_fasync(&data->fasync, SIGIO, POLL_IN);
-
wake_up_interruptible(&data->read_wait);
return 0;
@@ -179,41 +170,31 @@ static inline ssize_t vhci_put_user(struct vhci_data *data,
static ssize_t vhci_read(struct file *file,
char __user *buf, size_t count, loff_t *pos)
{
- DECLARE_WAITQUEUE(wait, current);
struct vhci_data *data = file->private_data;
struct sk_buff *skb;
ssize_t ret = 0;
- add_wait_queue(&data->read_wait, &wait);
while (count) {
- set_current_state(TASK_INTERRUPTIBLE);
-
skb = skb_dequeue(&data->readq);
- if (!skb) {
- if (file->f_flags & O_NONBLOCK) {
- ret = -EAGAIN;
- break;
- }
-
- if (signal_pending(current)) {
- ret = -ERESTARTSYS;
- break;
- }
-
- schedule();
- continue;
+ if (skb) {
+ ret = vhci_put_user(data, skb, buf, count);
+ if (ret < 0)
+ skb_queue_head(&data->readq, skb);
+ else
+ kfree_skb(skb);
+ break;
}
- if (access_ok(VERIFY_WRITE, buf, count))
- ret = vhci_put_user(data, skb, buf, count);
- else
- ret = -EFAULT;
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+ }
- kfree_skb(skb);
- break;
+ ret = wait_event_interruptible(data->read_wait,
+ !skb_queue_empty(&data->readq));
+ if (ret < 0)
+ break;
}
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&data->read_wait, &wait);
return ret;
}
@@ -223,9 +204,6 @@ static ssize_t vhci_write(struct file *file,
{
struct vhci_data *data = file->private_data;
- if (!access_ok(VERIFY_READ, buf, count))
- return -EFAULT;
-
return vhci_get_user(data, buf, count);
}
@@ -259,11 +237,9 @@ static int vhci_open(struct inode *inode, struct file *file)
skb_queue_head_init(&data->readq);
init_waitqueue_head(&data->read_wait);
- lock_kernel();
hdev = hci_alloc_dev();
if (!hdev) {
kfree(data);
- unlock_kernel();
return -ENOMEM;
}
@@ -284,12 +260,10 @@ static int vhci_open(struct inode *inode, struct file *file)
BT_ERR("Can't register HCI device");
kfree(data);
hci_free_dev(hdev);
- unlock_kernel();
return -EBUSY;
}
file->private_data = data;
- unlock_kernel();
return nonseekable_open(inode, file);
}
@@ -310,48 +284,25 @@ static int vhci_release(struct inode *inode, struct file *file)
return 0;
}
-static int vhci_fasync(int fd, struct file *file, int on)
-{
- struct vhci_data *data = file->private_data;
- int err = 0;
-
- lock_kernel();
- err = fasync_helper(fd, file, on, &data->fasync);
- if (err < 0)
- goto out;
-
- if (on)
- data->flags |= VHCI_FASYNC;
- else
- data->flags &= ~VHCI_FASYNC;
-
-out:
- unlock_kernel();
- return err;
-}
-
static const struct file_operations vhci_fops = {
- .owner = THIS_MODULE,
.read = vhci_read,
.write = vhci_write,
.poll = vhci_poll,
.ioctl = vhci_ioctl,
.open = vhci_open,
.release = vhci_release,
- .fasync = vhci_fasync,
};
static struct miscdevice vhci_miscdev= {
- .name = "vhci",
- .fops = &vhci_fops,
+ .name = "vhci",
+ .fops = &vhci_fops,
+ .minor = MISC_DYNAMIC_MINOR,
};
static int __init vhci_init(void)
{
BT_INFO("Virtual HCI driver ver %s", VERSION);
- vhci_miscdev.minor = minor;
-
if (misc_register(&vhci_miscdev) < 0) {
BT_ERR("Can't register misc device with minor %d", minor);
return -EIO;
@@ -369,9 +320,6 @@ static void __exit vhci_exit(void)
module_init(vhci_init);
module_exit(vhci_exit);
-module_param(minor, int, 0444);
-MODULE_PARM_DESC(minor, "Miscellaneous minor device number");
-
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION);
MODULE_VERSION(VERSION);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index cceace61ef2..71d1b9bab70 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2101,8 +2101,8 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
nr = nframes;
if (cdi->cdda_method == CDDA_BPC_SINGLE)
nr = 1;
- if (nr * CD_FRAMESIZE_RAW > (q->max_sectors << 9))
- nr = (q->max_sectors << 9) / CD_FRAMESIZE_RAW;
+ if (nr * CD_FRAMESIZE_RAW > (queue_max_sectors(q) << 9))
+ nr = (queue_max_sectors(q) << 9) / CD_FRAMESIZE_RAW;
len = nr * CD_FRAMESIZE_RAW;
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 2eecb779437..b5621f27c4b 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -584,8 +584,8 @@ static void gdrom_readdisk_dma(struct work_struct *work)
list_for_each_safe(elem, next, &gdrom_deferred) {
req = list_entry(elem, struct request, queuelist);
spin_unlock(&gdrom_lock);
- block = req->sector/GD_TO_BLK + GD_SESSION_OFFSET;
- block_cnt = req->nr_sectors/GD_TO_BLK;
+ block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET;
+ block_cnt = blk_rq_sectors(req)/GD_TO_BLK;
ctrl_outl(PHYSADDR(req->buffer), GDROM_DMA_STARTADDR_REG);
ctrl_outl(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG);
ctrl_outl(1, GDROM_DMA_DIRECTION_REG);
@@ -632,39 +632,35 @@ static void gdrom_readdisk_dma(struct work_struct *work)
* before handling ending the request */
spin_lock(&gdrom_lock);
list_del_init(&req->queuelist);
- __blk_end_request(req, err, blk_rq_bytes(req));
+ __blk_end_request_all(req, err);
}
spin_unlock(&gdrom_lock);
kfree(read_command);
}
-static void gdrom_request_handler_dma(struct request *req)
-{
- /* dequeue, add to list of deferred work
- * and then schedule workqueue */
- blkdev_dequeue_request(req);
- list_add_tail(&req->queuelist, &gdrom_deferred);
- schedule_work(&work);
-}
-
static void gdrom_request(struct request_queue *rq)
{
struct request *req;
- while ((req = elv_next_request(rq)) != NULL) {
+ while ((req = blk_fetch_request(rq)) != NULL) {
if (!blk_fs_request(req)) {
printk(KERN_DEBUG "GDROM: Non-fs request ignored\n");
- end_request(req, 0);
+ __blk_end_request_all(req, -EIO);
+ continue;
}
if (rq_data_dir(req) != READ) {
printk(KERN_NOTICE "GDROM: Read only device -");
printk(" write request ignored\n");
- end_request(req, 0);
+ __blk_end_request_all(req, -EIO);
+ continue;
}
- if (req->nr_sectors)
- gdrom_request_handler_dma(req);
- else
- end_request(req, 0);
+
+ /*
+ * Add to list of deferred work and then schedule
+ * workqueue.
+ */
+ list_add_tail(&req->queuelist, &gdrom_deferred);
+ schedule_work(&work);
}
}
@@ -743,7 +739,7 @@ static void __devinit probe_gdrom_setupdisk(void)
static int __devinit probe_gdrom_setupqueue(void)
{
- blk_queue_hardsect_size(gd.gdrom_rq, GDROM_HARD_SECTOR);
+ blk_queue_logical_block_size(gd.gdrom_rq, GDROM_HARD_SECTOR);
/* using DMA so memory will need to be contiguous */
blk_queue_max_hw_segments(gd.gdrom_rq, 1);
/* set a large max size to get most from DMA */
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index 13929356135..0fff646cc2f 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -282,7 +282,7 @@ static int send_request(struct request *req)
viopath_targetinst(viopath_hostLp),
(u64)req, VIOVERSION << 16,
((u64)DEVICE_NR(diskinfo) << 48) | dmaaddr,
- (u64)req->sector * 512, len, 0);
+ (u64)blk_rq_pos(req) * 512, len, 0);
if (hvrc != HvLpEvent_Rc_Good) {
printk(VIOCD_KERN_WARNING "hv error on op %d\n", (int)hvrc);
return -1;
@@ -291,36 +291,19 @@ static int send_request(struct request *req)
return 0;
}
-static void viocd_end_request(struct request *req, int error)
-{
- int nsectors = req->hard_nr_sectors;
-
- /*
- * Make sure it's fully ended, and ensure that we process
- * at least one sector.
- */
- if (blk_pc_request(req))
- nsectors = (req->data_len + 511) >> 9;
- if (!nsectors)
- nsectors = 1;
-
- if (__blk_end_request(req, error, nsectors << 9))
- BUG();
-}
-
static int rwreq;
static void do_viocd_request(struct request_queue *q)
{
struct request *req;
- while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) {
+ while ((rwreq == 0) && ((req = blk_fetch_request(q)) != NULL)) {
if (!blk_fs_request(req))
- viocd_end_request(req, -EIO);
+ __blk_end_request_all(req, -EIO);
else if (send_request(req) < 0) {
printk(VIOCD_KERN_WARNING
"unable to send message to OS/400!");
- viocd_end_request(req, -EIO);
+ __blk_end_request_all(req, -EIO);
} else
rwreq++;
}
@@ -486,8 +469,8 @@ static void vio_handle_cd_event(struct HvLpEvent *event)
case viocdopen:
if (event->xRc == 0) {
di = &viocd_diskinfo[bevent->disk];
- blk_queue_hardsect_size(di->viocd_disk->queue,
- bevent->block_size);
+ blk_queue_logical_block_size(di->viocd_disk->queue,
+ bevent->block_size);
set_capacity(di->viocd_disk,
bevent->media_size *
bevent->block_size / 512);
@@ -531,9 +514,9 @@ return_complete:
"with rc %d:0x%04X: %s\n",
req, event->xRc,
bevent->sub_result, err->msg);
- viocd_end_request(req, -EIO);
+ __blk_end_request_all(req, -EIO);
} else
- viocd_end_request(req, 0);
+ __blk_end_request_all(req, 0);
/* restart handling of incoming requests */
spin_unlock_irqrestore(&viocd_reqlock, flags);
@@ -587,7 +570,7 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
struct device_node *node = vdev->dev.archdata.of_node;
deviceno = vdev->unit_address;
- if (deviceno > VIOCD_MAX_CD)
+ if (deviceno >= VIOCD_MAX_CD)
return -ENODEV;
if (!node)
return -ENODEV;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 735bbe2be51..30bae6de6a0 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -97,6 +97,19 @@ config DEVKMEM
kind of kernel debugging operations.
When in doubt, say "N".
+config BFIN_JTAG_COMM
+ tristate "Blackfin JTAG Communication"
+ depends on BLACKFIN
+ help
+ Add support for emulating a TTY device over the Blackfin JTAG.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bfin_jtag_comm.
+
+config BFIN_JTAG_COMM_CONSOLE
+ bool "Console on Blackfin JTAG"
+ depends on BFIN_JTAG_COMM=y
+
config SERIAL_NONSTANDARD
bool "Non-standard serial port support"
depends on HAS_IOMEM
@@ -679,7 +692,7 @@ config HVCS
this driver.
To compile this driver as a module, choose M here: the
- module will be called hvcs.ko. Additionally, this module
+ module will be called hvcs. Additionally, this module
will depend on arch specific APIs exported from hvcserver.ko
which will also be compiled when this driver is built as a
module.
@@ -893,7 +906,7 @@ config DTLK
config XILINX_HWICAP
tristate "Xilinx HWICAP Support"
- depends on XILINX_VIRTEX
+ depends on XILINX_VIRTEX || MICROBLAZE
help
This option enables support for Xilinx Internal Configuration
Access Port (ICAP) driver. The ICAP is used on Xilinx Virtex
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 9caf5b5ad1c..189efcff08c 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_LEGACY_PTYS) += pty.o
obj-$(CONFIG_UNIX98_PTYS) += pty.o
obj-y += misc.o
obj-$(CONFIG_VT) += vt_ioctl.o vc_screen.o selection.o keyboard.o
+obj-$(CONFIG_BFIN_JTAG_COMM) += bfin_jtag_comm.o
obj-$(CONFIG_CONSOLE_TRANSLATIONS) += consolemap.o consolemap_deftbl.o
obj-$(CONFIG_HW_CONSOLE) += vt.o defkeymap.o
obj-$(CONFIG_AUDIT) += tty_audit.o
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 3686912427b..7a748fa0dfc 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -46,6 +46,10 @@
#define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22
#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30
#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32
+#define PCI_DEVICE_ID_INTEL_IGDNG_D_HB 0x0040
+#define PCI_DEVICE_ID_INTEL_IGDNG_D_IG 0x0042
+#define PCI_DEVICE_ID_INTEL_IGDNG_M_HB 0x0044
+#define PCI_DEVICE_ID_INTEL_IGDNG_M_IG 0x0046
/* cover 915 and 945 variants */
#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
@@ -75,7 +79,9 @@
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB)
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB)
extern int agp_memory_reserved;
@@ -1211,6 +1217,8 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
case PCI_DEVICE_ID_INTEL_Q45_HB:
case PCI_DEVICE_ID_INTEL_G45_HB:
case PCI_DEVICE_ID_INTEL_G41_HB:
+ case PCI_DEVICE_ID_INTEL_IGDNG_D_HB:
+ case PCI_DEVICE_ID_INTEL_IGDNG_M_HB:
*gtt_offset = *gtt_size = MB(2);
break;
default:
@@ -2186,6 +2194,10 @@ static const struct intel_driver_description {
"G45/G43", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0,
"G41", NULL, &intel_i965_driver },
+ { PCI_DEVICE_ID_INTEL_IGDNG_D_HB, PCI_DEVICE_ID_INTEL_IGDNG_D_IG, 0,
+ "IGDNG/D", NULL, &intel_i965_driver },
+ { PCI_DEVICE_ID_INTEL_IGDNG_M_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
+ "IGDNG/M", NULL, &intel_i965_driver },
{ 0, 0, 0, NULL, NULL, NULL }
};
@@ -2387,6 +2399,8 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_Q45_HB),
ID(PCI_DEVICE_ID_INTEL_G45_HB),
ID(PCI_DEVICE_ID_INTEL_G41_HB),
+ ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB),
+ ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB),
{ }
};
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c
index fd3ebd1be57..72429b6b2fa 100644
--- a/drivers/char/amiserial.c
+++ b/drivers/char/amiserial.c
@@ -779,7 +779,7 @@ static void change_speed(struct async_struct *info,
info->IER |= UART_IER_MSI;
}
/* TBD:
- * Does clearing IER_MSI imply that we should disbale the VBL interrupt ?
+ * Does clearing IER_MSI imply that we should disable the VBL interrupt ?
*/
/*
diff --git a/drivers/char/bfin_jtag_comm.c b/drivers/char/bfin_jtag_comm.c
new file mode 100644
index 00000000000..44c113d5604
--- /dev/null
+++ b/drivers/char/bfin_jtag_comm.c
@@ -0,0 +1,365 @@
+/*
+ * TTY over Blackfin JTAG Communication
+ *
+ * Copyright 2008-2009 Analog Devices Inc.
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/circ_buf.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <asm/atomic.h>
+
+/* See the Debug/Emulation chapter in the HRM */
+#define EMUDOF 0x00000001 /* EMUDAT_OUT full & valid */
+#define EMUDIF 0x00000002 /* EMUDAT_IN full & valid */
+#define EMUDOOVF 0x00000004 /* EMUDAT_OUT overflow */
+#define EMUDIOVF 0x00000008 /* EMUDAT_IN overflow */
+
+#define DRV_NAME "bfin-jtag-comm"
+#define DEV_NAME "ttyBFJC"
+
+#define pr_init(fmt, args...) ({ static const __initdata char __fmt[] = fmt; printk(__fmt, ## args); })
+#define debug(fmt, args...) pr_debug(DRV_NAME ": " fmt, ## args)
+
+static inline uint32_t bfin_write_emudat(uint32_t emudat)
+{
+ __asm__ __volatile__("emudat = %0;" : : "d"(emudat));
+ return emudat;
+}
+
+static inline uint32_t bfin_read_emudat(void)
+{
+ uint32_t emudat;
+ __asm__ __volatile__("%0 = emudat;" : "=d"(emudat));
+ return emudat;
+}
+
+static inline uint32_t bfin_write_emudat_chars(char a, char b, char c, char d)
+{
+ return bfin_write_emudat((a << 0) | (b << 8) | (c << 16) | (d << 24));
+}
+
+#define CIRC_SIZE 2048 /* see comment in tty_io.c:do_tty_write() */
+#define CIRC_MASK (CIRC_SIZE - 1)
+#define circ_empty(circ) ((circ)->head == (circ)->tail)
+#define circ_free(circ) CIRC_SPACE((circ)->head, (circ)->tail, CIRC_SIZE)
+#define circ_cnt(circ) CIRC_CNT((circ)->head, (circ)->tail, CIRC_SIZE)
+#define circ_byte(circ, idx) ((circ)->buf[(idx) & CIRC_MASK])
+
+static struct tty_driver *bfin_jc_driver;
+static struct task_struct *bfin_jc_kthread;
+static struct tty_struct * volatile bfin_jc_tty;
+static unsigned long bfin_jc_count;
+static DEFINE_MUTEX(bfin_jc_tty_mutex);
+static volatile struct circ_buf bfin_jc_write_buf;
+
+static int
+bfin_jc_emudat_manager(void *arg)
+{
+ uint32_t inbound_len = 0, outbound_len = 0;
+
+ while (!kthread_should_stop()) {
+ /* no one left to give data to, so sleep */
+ if (bfin_jc_tty == NULL && circ_empty(&bfin_jc_write_buf)) {
+ debug("waiting for readers\n");
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule();
+ __set_current_state(TASK_RUNNING);
+ }
+
+ /* no data available, so just chill */
+ if (!(bfin_read_DBGSTAT() & EMUDIF) && circ_empty(&bfin_jc_write_buf)) {
+ debug("waiting for data (in_len = %i) (circ: %i %i)\n",
+ inbound_len, bfin_jc_write_buf.tail, bfin_jc_write_buf.head);
+ if (inbound_len)
+ schedule();
+ else
+ schedule_timeout_interruptible(HZ);
+ continue;
+ }
+
+ /* if incoming data is ready, eat it */
+ if (bfin_read_DBGSTAT() & EMUDIF) {
+ struct tty_struct *tty;
+ mutex_lock(&bfin_jc_tty_mutex);
+ tty = (struct tty_struct *)bfin_jc_tty;
+ if (tty != NULL) {
+ uint32_t emudat = bfin_read_emudat();
+ if (inbound_len == 0) {
+ debug("incoming length: 0x%08x\n", emudat);
+ inbound_len = emudat;
+ } else {
+ size_t num_chars = (4 <= inbound_len ? 4 : inbound_len);
+ debug(" incoming data: 0x%08x (pushing %zu)\n", emudat, num_chars);
+ inbound_len -= num_chars;
+ tty_insert_flip_string(tty, (unsigned char *)&emudat, num_chars);
+ tty_flip_buffer_push(tty);
+ }
+ }
+ mutex_unlock(&bfin_jc_tty_mutex);
+ }
+
+ /* if outgoing data is ready, post it */
+ if (!(bfin_read_DBGSTAT() & EMUDOF) && !circ_empty(&bfin_jc_write_buf)) {
+ if (outbound_len == 0) {
+ outbound_len = circ_cnt(&bfin_jc_write_buf);
+ bfin_write_emudat(outbound_len);
+ debug("outgoing length: 0x%08x\n", outbound_len);
+ } else {
+ struct tty_struct *tty;
+ int tail = bfin_jc_write_buf.tail;
+ size_t ate = (4 <= outbound_len ? 4 : outbound_len);
+ uint32_t emudat =
+ bfin_write_emudat_chars(
+ circ_byte(&bfin_jc_write_buf, tail + 0),
+ circ_byte(&bfin_jc_write_buf, tail + 1),
+ circ_byte(&bfin_jc_write_buf, tail + 2),
+ circ_byte(&bfin_jc_write_buf, tail + 3)
+ );
+ bfin_jc_write_buf.tail += ate;
+ outbound_len -= ate;
+ mutex_lock(&bfin_jc_tty_mutex);
+ tty = (struct tty_struct *)bfin_jc_tty;
+ if (tty)
+ tty_wakeup(tty);
+ mutex_unlock(&bfin_jc_tty_mutex);
+ debug(" outgoing data: 0x%08x (pushing %zu)\n", emudat, ate);
+ }
+ }
+ }
+
+ __set_current_state(TASK_RUNNING);
+ return 0;
+}
+
+static int
+bfin_jc_open(struct tty_struct *tty, struct file *filp)
+{
+ mutex_lock(&bfin_jc_tty_mutex);
+ debug("open %lu\n", bfin_jc_count);
+ ++bfin_jc_count;
+ bfin_jc_tty = tty;
+ wake_up_process(bfin_jc_kthread);
+ mutex_unlock(&bfin_jc_tty_mutex);
+ return 0;
+}
+
+static void
+bfin_jc_close(struct tty_struct *tty, struct file *filp)
+{
+ mutex_lock(&bfin_jc_tty_mutex);
+ debug("close %lu\n", bfin_jc_count);
+ if (--bfin_jc_count == 0)
+ bfin_jc_tty = NULL;
+ wake_up_process(bfin_jc_kthread);
+ mutex_unlock(&bfin_jc_tty_mutex);
+}
+
+/* XXX: we dont handle the put_char() case where we must handle count = 1 */
+static int
+bfin_jc_circ_write(const unsigned char *buf, int count)
+{
+ int i;
+ count = min(count, circ_free(&bfin_jc_write_buf));
+ debug("going to write chunk of %i bytes\n", count);
+ for (i = 0; i < count; ++i)
+ circ_byte(&bfin_jc_write_buf, bfin_jc_write_buf.head + i) = buf[i];
+ bfin_jc_write_buf.head += i;
+ return i;
+}
+
+#ifndef CONFIG_BFIN_JTAG_COMM_CONSOLE
+# define acquire_console_sem()
+# define release_console_sem()
+#endif
+static int
+bfin_jc_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+ int i;
+ acquire_console_sem();
+ i = bfin_jc_circ_write(buf, count);
+ release_console_sem();
+ wake_up_process(bfin_jc_kthread);
+ return i;
+}
+
+static void
+bfin_jc_flush_chars(struct tty_struct *tty)
+{
+ wake_up_process(bfin_jc_kthread);
+}
+
+static int
+bfin_jc_write_room(struct tty_struct *tty)
+{
+ return circ_free(&bfin_jc_write_buf);
+}
+
+static int
+bfin_jc_chars_in_buffer(struct tty_struct *tty)
+{
+ return circ_cnt(&bfin_jc_write_buf);
+}
+
+static void
+bfin_jc_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ unsigned long expire = jiffies + timeout;
+ while (!circ_empty(&bfin_jc_write_buf)) {
+ if (signal_pending(current))
+ break;
+ if (time_after(jiffies, expire))
+ break;
+ }
+}
+
+static struct tty_operations bfin_jc_ops = {
+ .open = bfin_jc_open,
+ .close = bfin_jc_close,
+ .write = bfin_jc_write,
+ /*.put_char = bfin_jc_put_char,*/
+ .flush_chars = bfin_jc_flush_chars,
+ .write_room = bfin_jc_write_room,
+ .chars_in_buffer = bfin_jc_chars_in_buffer,
+ .wait_until_sent = bfin_jc_wait_until_sent,
+};
+
+static int __init bfin_jc_init(void)
+{
+ int ret;
+
+ bfin_jc_kthread = kthread_create(bfin_jc_emudat_manager, NULL, DRV_NAME);
+ if (IS_ERR(bfin_jc_kthread))
+ return PTR_ERR(bfin_jc_kthread);
+
+ ret = -ENOMEM;
+
+ bfin_jc_write_buf.head = bfin_jc_write_buf.tail = 0;
+ bfin_jc_write_buf.buf = kmalloc(CIRC_SIZE, GFP_KERNEL);
+ if (!bfin_jc_write_buf.buf)
+ goto err;
+
+ bfin_jc_driver = alloc_tty_driver(1);
+ if (!bfin_jc_driver)
+ goto err;
+
+ bfin_jc_driver->owner = THIS_MODULE;
+ bfin_jc_driver->driver_name = DRV_NAME;
+ bfin_jc_driver->name = DEV_NAME;
+ bfin_jc_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ bfin_jc_driver->subtype = SERIAL_TYPE_NORMAL;
+ bfin_jc_driver->init_termios = tty_std_termios;
+ tty_set_operations(bfin_jc_driver, &bfin_jc_ops);
+
+ ret = tty_register_driver(bfin_jc_driver);
+ if (ret)
+ goto err;
+
+ pr_init(KERN_INFO DRV_NAME ": initialized\n");
+
+ return 0;
+
+ err:
+ put_tty_driver(bfin_jc_driver);
+ kfree(bfin_jc_write_buf.buf);
+ kthread_stop(bfin_jc_kthread);
+ return ret;
+}
+module_init(bfin_jc_init);
+
+static void __exit bfin_jc_exit(void)
+{
+ kthread_stop(bfin_jc_kthread);
+ kfree(bfin_jc_write_buf.buf);
+ tty_unregister_driver(bfin_jc_driver);
+ put_tty_driver(bfin_jc_driver);
+}
+module_exit(bfin_jc_exit);
+
+#if defined(CONFIG_BFIN_JTAG_COMM_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
+static void
+bfin_jc_straight_buffer_write(const char *buf, unsigned count)
+{
+ unsigned ate = 0;
+ while (bfin_read_DBGSTAT() & EMUDOF)
+ continue;
+ bfin_write_emudat(count);
+ while (ate < count) {
+ while (bfin_read_DBGSTAT() & EMUDOF)
+ continue;
+ bfin_write_emudat_chars(buf[ate], buf[ate+1], buf[ate+2], buf[ate+3]);
+ ate += 4;
+ }
+}
+#endif
+
+#ifdef CONFIG_BFIN_JTAG_COMM_CONSOLE
+static void
+bfin_jc_console_write(struct console *co, const char *buf, unsigned count)
+{
+ if (bfin_jc_kthread == NULL)
+ bfin_jc_straight_buffer_write(buf, count);
+ else
+ bfin_jc_circ_write(buf, count);
+}
+
+static struct tty_driver *
+bfin_jc_console_device(struct console *co, int *index)
+{
+ *index = co->index;
+ return bfin_jc_driver;
+}
+
+static struct console bfin_jc_console = {
+ .name = DEV_NAME,
+ .write = bfin_jc_console_write,
+ .device = bfin_jc_console_device,
+ .flags = CON_ANYTIME | CON_PRINTBUFFER,
+ .index = -1,
+};
+
+static int __init bfin_jc_console_init(void)
+{
+ register_console(&bfin_jc_console);
+ return 0;
+}
+console_initcall(bfin_jc_console_init);
+#endif
+
+#ifdef CONFIG_EARLY_PRINTK
+static void __init
+bfin_jc_early_write(struct console *co, const char *buf, unsigned int count)
+{
+ bfin_jc_straight_buffer_write(buf, count);
+}
+
+static struct __initdata console bfin_jc_early_console = {
+ .name = "early_BFJC",
+ .write = bfin_jc_early_write,
+ .flags = CON_ANYTIME | CON_PRINTBUFFER,
+ .index = -1,
+};
+
+struct console * __init
+bfin_jc_early_init(unsigned int port, unsigned int cflag)
+{
+ return &bfin_jc_early_console;
+}
+#endif
+
+MODULE_AUTHOR("Mike Frysinger <vapier@gentoo.org>");
+MODULE_DESCRIPTION("TTY over Blackfin JTAG Communication");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index 1fdb9f657d8..f3366d3f06c 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -604,7 +604,6 @@
#define NR_PORTS 256
-#define ZE_V1_NPORTS 64
#define ZO_V1 0
#define ZO_V2 1
#define ZE_V1 2
@@ -663,18 +662,6 @@
static void cy_throttle(struct tty_struct *tty);
static void cy_send_xchar(struct tty_struct *tty, char ch);
-#define IS_CYC_Z(card) ((card).num_chips == (unsigned int)-1)
-
-#define Z_FPGA_CHECK(card) \
- ((readl(&((struct RUNTIME_9060 __iomem *) \
- ((card).ctl_addr))->init_ctrl) & (1<<17)) != 0)
-
-#define ISZLOADED(card) (((ZO_V1 == readl(&((struct RUNTIME_9060 __iomem *) \
- ((card).ctl_addr))->mail_box_0)) || \
- Z_FPGA_CHECK(card)) && \
- (ZFIRM_ID == readl(&((struct FIRM_ID __iomem *) \
- ((card).base_addr+ID_ADDRESS))->signature)))
-
#ifndef SERIAL_XMIT_SIZE
#define SERIAL_XMIT_SIZE (min(PAGE_SIZE, 4096))
#endif
@@ -687,8 +674,6 @@ static void cy_send_xchar(struct tty_struct *tty, char ch);
#define DRIVER_VERSION 0x02010203
#define RAM_SIZE 0x80000
-#define Z_FPGA_LOADED(X) ((readl(&(X)->init_ctrl) & (1<<17)) != 0)
-
enum zblock_type {
ZBLOCK_PRG = 0,
ZBLOCK_FPGA = 1
@@ -883,6 +868,29 @@ static void cyz_rx_restart(unsigned long);
static struct timer_list cyz_rx_full_timer[NR_PORTS];
#endif /* CONFIG_CYZ_INTR */
+static inline bool cy_is_Z(struct cyclades_card *card)
+{
+ return card->num_chips == (unsigned int)-1;
+}
+
+static inline bool __cyz_fpga_loaded(struct RUNTIME_9060 __iomem *ctl_addr)
+{
+ return readl(&ctl_addr->init_ctrl) & (1 << 17);
+}
+
+static inline bool cyz_fpga_loaded(struct cyclades_card *card)
+{
+ return __cyz_fpga_loaded(card->ctl_addr.p9060);
+}
+
+static inline bool cyz_is_loaded(struct cyclades_card *card)
+{
+ struct FIRM_ID __iomem *fw_id = card->base_addr + ID_ADDRESS;
+
+ return (card->hw_ver == ZO_V1 || cyz_fpga_loaded(card)) &&
+ readl(&fw_id->signature) == ZFIRM_ID;
+}
+
static inline int serial_paranoia_check(struct cyclades_port *info,
char *name, const char *routine)
{
@@ -1395,19 +1403,15 @@ cyz_fetch_msg(struct cyclades_card *cinfo,
unsigned long loc_doorbell;
firm_id = cinfo->base_addr + ID_ADDRESS;
- if (!ISZLOADED(*cinfo))
- return -1;
zfw_ctrl = cinfo->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff);
board_ctrl = &zfw_ctrl->board_ctrl;
- loc_doorbell = readl(&((struct RUNTIME_9060 __iomem *)
- (cinfo->ctl_addr))->loc_doorbell);
+ loc_doorbell = readl(&cinfo->ctl_addr.p9060->loc_doorbell);
if (loc_doorbell) {
*cmd = (char)(0xff & loc_doorbell);
*channel = readl(&board_ctrl->fwcmd_channel);
*param = (__u32) readl(&board_ctrl->fwcmd_param);
- cy_writel(&((struct RUNTIME_9060 __iomem *)(cinfo->ctl_addr))->
- loc_doorbell, 0xffffffff);
+ cy_writel(&cinfo->ctl_addr.p9060->loc_doorbell, 0xffffffff);
return 1;
}
return 0;
@@ -1424,15 +1428,14 @@ cyz_issue_cmd(struct cyclades_card *cinfo,
unsigned int index;
firm_id = cinfo->base_addr + ID_ADDRESS;
- if (!ISZLOADED(*cinfo))
+ if (!cyz_is_loaded(cinfo))
return -1;
zfw_ctrl = cinfo->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff);
board_ctrl = &zfw_ctrl->board_ctrl;
index = 0;
- pci_doorbell =
- &((struct RUNTIME_9060 __iomem *)(cinfo->ctl_addr))->pci_doorbell;
+ pci_doorbell = &cinfo->ctl_addr.p9060->pci_doorbell;
while ((readl(pci_doorbell) & 0xff) != 0) {
if (index++ == 1000)
return (int)(readl(pci_doorbell) & 0xff);
@@ -1624,10 +1627,8 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo)
static struct BOARD_CTRL __iomem *board_ctrl;
static struct CH_CTRL __iomem *ch_ctrl;
static struct BUF_CTRL __iomem *buf_ctrl;
- __u32 channel;
+ __u32 channel, param, fw_ver;
__u8 cmd;
- __u32 param;
- __u32 hw_ver, fw_ver;
int special_count;
int delta_count;
@@ -1635,8 +1636,6 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo)
zfw_ctrl = cinfo->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff);
board_ctrl = &zfw_ctrl->board_ctrl;
fw_ver = readl(&board_ctrl->fw_version);
- hw_ver = readl(&((struct RUNTIME_9060 __iomem *)(cinfo->ctl_addr))->
- mail_box_0);
while (cyz_fetch_msg(cinfo, &channel, &cmd, &param) == 1) {
special_count = 0;
@@ -1737,15 +1736,7 @@ static irqreturn_t cyz_interrupt(int irq, void *dev_id)
{
struct cyclades_card *cinfo = dev_id;
- if (unlikely(cinfo == NULL)) {
-#ifdef CY_DEBUG_INTERRUPTS
- printk(KERN_DEBUG "cyz_interrupt: spurious interrupt %d\n",
- irq);
-#endif
- return IRQ_NONE; /* spurious interrupt */
- }
-
- if (unlikely(!ISZLOADED(*cinfo))) {
+ if (unlikely(!cyz_is_loaded(cinfo))) {
#ifdef CY_DEBUG_INTERRUPTS
printk(KERN_DEBUG "cyz_interrupt: board not yet loaded "
"(IRQ%d).\n", irq);
@@ -1785,7 +1776,6 @@ static void cyz_poll(unsigned long arg)
struct tty_struct *tty;
struct FIRM_ID __iomem *firm_id;
struct ZFW_CTRL __iomem *zfw_ctrl;
- struct BOARD_CTRL __iomem *board_ctrl;
struct BUF_CTRL __iomem *buf_ctrl;
unsigned long expires = jiffies + HZ;
unsigned int port, card;
@@ -1793,19 +1783,17 @@ static void cyz_poll(unsigned long arg)
for (card = 0; card < NR_CARDS; card++) {
cinfo = &cy_card[card];
- if (!IS_CYC_Z(*cinfo))
+ if (!cy_is_Z(cinfo))
continue;
- if (!ISZLOADED(*cinfo))
+ if (!cyz_is_loaded(cinfo))
continue;
firm_id = cinfo->base_addr + ID_ADDRESS;
zfw_ctrl = cinfo->base_addr +
(readl(&firm_id->zfwctrl_addr) & 0xfffff);
- board_ctrl = &(zfw_ctrl->board_ctrl);
/* Skip first polling cycle to avoid racing conditions with the FW */
if (!cinfo->intr_enabled) {
- cinfo->nports = (int)readl(&board_ctrl->n_channel);
cinfo->intr_enabled = 1;
continue;
}
@@ -1874,7 +1862,7 @@ static int startup(struct cyclades_port *info)
set_line_char(info);
- if (!IS_CYC_Z(*card)) {
+ if (!cy_is_Z(card)) {
chip = channel >> 2;
channel &= 0x03;
index = card->bus_index;
@@ -1931,7 +1919,7 @@ static int startup(struct cyclades_port *info)
base_addr = card->base_addr;
firm_id = base_addr + ID_ADDRESS;
- if (!ISZLOADED(*card))
+ if (!cyz_is_loaded(card))
return -ENODEV;
zfw_ctrl = card->base_addr +
@@ -2026,7 +2014,7 @@ static void start_xmit(struct cyclades_port *info)
card = info->card;
channel = info->line - card->first_line;
- if (!IS_CYC_Z(*card)) {
+ if (!cy_is_Z(card)) {
chip = channel >> 2;
channel &= 0x03;
index = card->bus_index;
@@ -2070,7 +2058,7 @@ static void shutdown(struct cyclades_port *info)
card = info->card;
channel = info->line - card->first_line;
- if (!IS_CYC_Z(*card)) {
+ if (!cy_is_Z(card)) {
chip = channel >> 2;
channel &= 0x03;
index = card->bus_index;
@@ -2126,7 +2114,7 @@ static void shutdown(struct cyclades_port *info)
#endif
firm_id = base_addr + ID_ADDRESS;
- if (!ISZLOADED(*card))
+ if (!cyz_is_loaded(card))
return;
zfw_ctrl = card->base_addr +
@@ -2233,7 +2221,7 @@ block_til_ready(struct tty_struct *tty, struct file *filp,
#endif
info->port.blocked_open++;
- if (!IS_CYC_Z(*cinfo)) {
+ if (!cy_is_Z(cinfo)) {
chip = channel >> 2;
channel &= 0x03;
index = cinfo->bus_index;
@@ -2296,7 +2284,7 @@ block_til_ready(struct tty_struct *tty, struct file *filp,
base_addr = cinfo->base_addr;
firm_id = base_addr + ID_ADDRESS;
- if (!ISZLOADED(*cinfo)) {
+ if (!cyz_is_loaded(cinfo)) {
__set_current_state(TASK_RUNNING);
remove_wait_queue(&info->port.open_wait, &wait);
return -EINVAL;
@@ -2397,16 +2385,14 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
treat it as absent from the system. This
will make the user pay attention.
*/
- if (IS_CYC_Z(*info->card)) {
+ if (cy_is_Z(info->card)) {
struct cyclades_card *cinfo = info->card;
struct FIRM_ID __iomem *firm_id = cinfo->base_addr + ID_ADDRESS;
- if (!ISZLOADED(*cinfo)) {
- if (((ZE_V1 == readl(&((struct RUNTIME_9060 __iomem *)
- (cinfo->ctl_addr))->mail_box_0)) &&
- Z_FPGA_CHECK(*cinfo)) &&
- (ZFIRM_HLT == readl(
- &firm_id->signature))) {
+ if (!cyz_is_loaded(cinfo)) {
+ if (cinfo->hw_ver == ZE_V1 && cyz_fpga_loaded(cinfo) &&
+ readl(&firm_id->signature) ==
+ ZFIRM_HLT) {
printk(KERN_ERR "cyc:Cyclades-Z Error: you "
"need an external power supply for "
"this number of ports.\nFirmware "
@@ -2423,18 +2409,13 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
interrupts should be enabled as soon as the first open
happens to one of its ports. */
if (!cinfo->intr_enabled) {
- struct ZFW_CTRL __iomem *zfw_ctrl;
- struct BOARD_CTRL __iomem *board_ctrl;
-
- zfw_ctrl = cinfo->base_addr +
- (readl(&firm_id->zfwctrl_addr) &
- 0xfffff);
-
- board_ctrl = &zfw_ctrl->board_ctrl;
+ u16 intr;
/* Enable interrupts on the PLX chip */
- cy_writew(cinfo->ctl_addr + 0x68,
- readw(cinfo->ctl_addr + 0x68) | 0x0900);
+ intr = readw(&cinfo->ctl_addr.p9060->
+ intr_ctrl_stat) | 0x0900;
+ cy_writew(&cinfo->ctl_addr.p9060->
+ intr_ctrl_stat, intr);
/* Enable interrupts on the FW */
retval = cyz_issue_cmd(cinfo, 0,
C_CM_IRQ_ENBL, 0L);
@@ -2442,8 +2423,6 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
printk(KERN_ERR "cyc:IRQ enable retval "
"was %x\n", retval);
}
- cinfo->nports =
- (int)readl(&board_ctrl->n_channel);
cinfo->intr_enabled = 1;
}
}
@@ -2556,7 +2535,7 @@ static void cy_wait_until_sent(struct tty_struct *tty, int timeout)
#endif
card = info->card;
channel = (info->line) - (card->first_line);
- if (!IS_CYC_Z(*card)) {
+ if (!cy_is_Z(card)) {
chip = channel >> 2;
channel &= 0x03;
index = card->bus_index;
@@ -2601,7 +2580,7 @@ static void cy_flush_buffer(struct tty_struct *tty)
info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
spin_unlock_irqrestore(&card->card_lock, flags);
- if (IS_CYC_Z(*card)) { /* If it is a Z card, flush the on-board
+ if (cy_is_Z(card)) { /* If it is a Z card, flush the on-board
buffers as well */
spin_lock_irqsave(&card->card_lock, flags);
retval = cyz_issue_cmd(card, channel, C_CM_FLUSH_TX, 0L);
@@ -2682,7 +2661,7 @@ static void cy_close(struct tty_struct *tty, struct file *filp)
spin_lock_irqsave(&card->card_lock, flags);
- if (!IS_CYC_Z(*card)) {
+ if (!cy_is_Z(card)) {
int channel = info->line - card->first_line;
int index = card->bus_index;
void __iomem *base_addr = card->base_addr +
@@ -2902,7 +2881,7 @@ static int cy_chars_in_buffer(struct tty_struct *tty)
channel = (info->line) - (card->first_line);
#ifdef Z_EXT_CHARS_IN_BUFFER
- if (!IS_CYC_Z(cy_card[card])) {
+ if (!cy_is_Z(card)) {
#endif /* Z_EXT_CHARS_IN_BUFFER */
#ifdef CY_DEBUG_IO
printk(KERN_DEBUG "cyc:cy_chars_in_buffer ttyC%d %d\n",
@@ -2984,7 +2963,6 @@ static void set_line_char(struct cyclades_port *info)
void __iomem *base_addr;
int chip, channel, index;
unsigned cflag, iflag;
- unsigned short chip_number;
int baud, baud_rate = 0;
int i;
@@ -3013,9 +2991,8 @@ static void set_line_char(struct cyclades_port *info)
card = info->card;
channel = info->line - card->first_line;
- chip_number = channel / 4;
- if (!IS_CYC_Z(*card)) {
+ if (!cy_is_Z(card)) {
index = card->bus_index;
@@ -3233,21 +3210,17 @@ static void set_line_char(struct cyclades_port *info)
} else {
struct FIRM_ID __iomem *firm_id;
struct ZFW_CTRL __iomem *zfw_ctrl;
- struct BOARD_CTRL __iomem *board_ctrl;
struct CH_CTRL __iomem *ch_ctrl;
- struct BUF_CTRL __iomem *buf_ctrl;
__u32 sw_flow;
int retval;
firm_id = card->base_addr + ID_ADDRESS;
- if (!ISZLOADED(*card))
+ if (!cyz_is_loaded(card))
return;
zfw_ctrl = card->base_addr +
(readl(&firm_id->zfwctrl_addr) & 0xfffff);
- board_ctrl = &zfw_ctrl->board_ctrl;
ch_ctrl = &(zfw_ctrl->ch_ctrl[channel]);
- buf_ctrl = &zfw_ctrl->buf_ctrl[channel];
/* baud rate */
baud = tty_get_baud_rate(info->port.tty);
@@ -3457,7 +3430,7 @@ static int get_lsr_info(struct cyclades_port *info, unsigned int __user *value)
card = info->card;
channel = (info->line) - (card->first_line);
- if (!IS_CYC_Z(*card)) {
+ if (!cy_is_Z(card)) {
chip = channel >> 2;
channel &= 0x03;
index = card->bus_index;
@@ -3497,7 +3470,7 @@ static int cy_tiocmget(struct tty_struct *tty, struct file *file)
card = info->card;
channel = info->line - card->first_line;
- if (!IS_CYC_Z(*card)) {
+ if (!cy_is_Z(card)) {
chip = channel >> 2;
channel &= 0x03;
index = card->bus_index;
@@ -3523,7 +3496,7 @@ static int cy_tiocmget(struct tty_struct *tty, struct file *file)
} else {
base_addr = card->base_addr;
firm_id = card->base_addr + ID_ADDRESS;
- if (ISZLOADED(*card)) {
+ if (cyz_is_loaded(card)) {
zfw_ctrl = card->base_addr +
(readl(&firm_id->zfwctrl_addr) & 0xfffff);
board_ctrl = &zfw_ctrl->board_ctrl;
@@ -3566,7 +3539,7 @@ cy_tiocmset(struct tty_struct *tty, struct file *file,
card = info->card;
channel = (info->line) - (card->first_line);
- if (!IS_CYC_Z(*card)) {
+ if (!cy_is_Z(card)) {
chip = channel >> 2;
channel &= 0x03;
index = card->bus_index;
@@ -3641,7 +3614,7 @@ cy_tiocmset(struct tty_struct *tty, struct file *file,
base_addr = card->base_addr;
firm_id = card->base_addr + ID_ADDRESS;
- if (ISZLOADED(*card)) {
+ if (cyz_is_loaded(card)) {
zfw_ctrl = card->base_addr +
(readl(&firm_id->zfwctrl_addr) & 0xfffff);
board_ctrl = &zfw_ctrl->board_ctrl;
@@ -3713,7 +3686,7 @@ static int cy_break(struct tty_struct *tty, int break_state)
card = info->card;
spin_lock_irqsave(&card->card_lock, flags);
- if (!IS_CYC_Z(*card)) {
+ if (!cy_is_Z(card)) {
/* Let the transmit ISR take care of this (since it
requires stuffing characters into the output stream).
*/
@@ -3782,7 +3755,7 @@ static int set_threshold(struct cyclades_port *info, unsigned long value)
card = info->card;
channel = info->line - card->first_line;
- if (!IS_CYC_Z(*card)) {
+ if (!cy_is_Z(card)) {
chip = channel >> 2;
channel &= 0x03;
index = card->bus_index;
@@ -3810,7 +3783,7 @@ static int get_threshold(struct cyclades_port *info,
card = info->card;
channel = info->line - card->first_line;
- if (!IS_CYC_Z(*card)) {
+ if (!cy_is_Z(card)) {
chip = channel >> 2;
channel &= 0x03;
index = card->bus_index;
@@ -3844,7 +3817,7 @@ static int set_timeout(struct cyclades_port *info, unsigned long value)
card = info->card;
channel = info->line - card->first_line;
- if (!IS_CYC_Z(*card)) {
+ if (!cy_is_Z(card)) {
chip = channel >> 2;
channel &= 0x03;
index = card->bus_index;
@@ -3867,7 +3840,7 @@ static int get_timeout(struct cyclades_port *info,
card = info->card;
channel = info->line - card->first_line;
- if (!IS_CYC_Z(*card)) {
+ if (!cy_is_Z(card)) {
chip = channel >> 2;
channel &= 0x03;
index = card->bus_index;
@@ -4121,7 +4094,7 @@ static void cy_send_xchar(struct tty_struct *tty, char ch)
card = info->card;
channel = info->line - card->first_line;
- if (IS_CYC_Z(*card)) {
+ if (cy_is_Z(card)) {
if (ch == STOP_CHAR(tty))
cyz_issue_cmd(card, channel, C_CM_SENDXOFF, 0L);
else if (ch == START_CHAR(tty))
@@ -4154,7 +4127,7 @@ static void cy_throttle(struct tty_struct *tty)
card = info->card;
if (I_IXOFF(tty)) {
- if (!IS_CYC_Z(*card))
+ if (!cy_is_Z(card))
cy_send_xchar(tty, STOP_CHAR(tty));
else
info->throttle = 1;
@@ -4162,7 +4135,7 @@ static void cy_throttle(struct tty_struct *tty)
if (tty->termios->c_cflag & CRTSCTS) {
channel = info->line - card->first_line;
- if (!IS_CYC_Z(*card)) {
+ if (!cy_is_Z(card)) {
chip = channel >> 2;
channel &= 0x03;
index = card->bus_index;
@@ -4219,7 +4192,7 @@ static void cy_unthrottle(struct tty_struct *tty)
if (tty->termios->c_cflag & CRTSCTS) {
card = info->card;
channel = info->line - card->first_line;
- if (!IS_CYC_Z(*card)) {
+ if (!cy_is_Z(card)) {
chip = channel >> 2;
channel &= 0x03;
index = card->bus_index;
@@ -4263,7 +4236,7 @@ static void cy_stop(struct tty_struct *tty)
cinfo = info->card;
channel = info->line - cinfo->first_line;
- if (!IS_CYC_Z(*cinfo)) {
+ if (!cy_is_Z(cinfo)) {
index = cinfo->bus_index;
chip = channel >> 2;
channel &= 0x03;
@@ -4296,7 +4269,7 @@ static void cy_start(struct tty_struct *tty)
cinfo = info->card;
channel = info->line - cinfo->first_line;
index = cinfo->bus_index;
- if (!IS_CYC_Z(*cinfo)) {
+ if (!cy_is_Z(cinfo)) {
chip = channel >> 2;
channel &= 0x03;
base_addr = cinfo->base_addr + (cy_chip_offset[chip] << index);
@@ -4347,33 +4320,20 @@ static void cy_hangup(struct tty_struct *tty)
static int __devinit cy_init_card(struct cyclades_card *cinfo)
{
struct cyclades_port *info;
- u32 uninitialized_var(mailbox);
- unsigned int nports, port;
+ unsigned int port;
unsigned short chip_number;
- int uninitialized_var(index);
spin_lock_init(&cinfo->card_lock);
+ cinfo->intr_enabled = 0;
- if (IS_CYC_Z(*cinfo)) { /* Cyclades-Z */
- mailbox = readl(&((struct RUNTIME_9060 __iomem *)
- cinfo->ctl_addr)->mail_box_0);
- nports = (mailbox == ZE_V1) ? ZE_V1_NPORTS : 8;
- cinfo->intr_enabled = 0;
- cinfo->nports = 0; /* Will be correctly set later, after
- Z FW is loaded */
- } else {
- index = cinfo->bus_index;
- nports = cinfo->nports = CyPORTS_PER_CHIP * cinfo->num_chips;
- }
-
- cinfo->ports = kzalloc(sizeof(*cinfo->ports) * nports, GFP_KERNEL);
+ cinfo->ports = kcalloc(cinfo->nports, sizeof(*cinfo->ports),
+ GFP_KERNEL);
if (cinfo->ports == NULL) {
printk(KERN_ERR "Cyclades: cannot allocate ports\n");
- cinfo->nports = 0;
return -ENOMEM;
}
- for (port = cinfo->first_line; port < cinfo->first_line + nports;
+ for (port = cinfo->first_line; port < cinfo->first_line + cinfo->nports;
port++) {
info = &cinfo->ports[port - cinfo->first_line];
tty_port_init(&info->port);
@@ -4387,9 +4347,9 @@ static int __devinit cy_init_card(struct cyclades_card *cinfo)
init_completion(&info->shutdown_wait);
init_waitqueue_head(&info->delta_msr_wait);
- if (IS_CYC_Z(*cinfo)) {
+ if (cy_is_Z(cinfo)) {
info->type = PORT_STARTECH;
- if (mailbox == ZO_V1)
+ if (cinfo->hw_ver == ZO_V1)
info->xmit_fifo_size = CYZ_FIFO_SIZE;
else
info->xmit_fifo_size = 4 * CYZ_FIFO_SIZE;
@@ -4398,6 +4358,7 @@ static int __devinit cy_init_card(struct cyclades_card *cinfo)
cyz_rx_restart, (unsigned long)info);
#endif
} else {
+ int index = cinfo->bus_index;
info->type = PORT_CIRRUS;
info->xmit_fifo_size = CyMAX_CHAR_FIFO;
info->cor1 = CyPARITY_NONE | Cy_1_STOP | Cy_8_BITS;
@@ -4430,7 +4391,7 @@ static int __devinit cy_init_card(struct cyclades_card *cinfo)
}
#ifndef CONFIG_CYZ_INTR
- if (IS_CYC_Z(*cinfo) && !timer_pending(&cyz_timerlist)) {
+ if (cy_is_Z(cinfo) && !timer_pending(&cyz_timerlist)) {
mod_timer(&cyz_timerlist, jiffies + 1);
#ifdef CY_PCI_DEBUG
printk(KERN_DEBUG "Cyclades-Z polling initialized\n");
@@ -4621,11 +4582,12 @@ static int __init cy_detect_isa(void)
/* set cy_card */
cy_card[j].base_addr = cy_isa_address;
- cy_card[j].ctl_addr = NULL;
+ cy_card[j].ctl_addr.p9050 = NULL;
cy_card[j].irq = (int)cy_isa_irq;
cy_card[j].bus_index = 0;
cy_card[j].first_line = cy_next_channel;
- cy_card[j].num_chips = cy_isa_nchan / 4;
+ cy_card[j].num_chips = cy_isa_nchan / CyPORTS_PER_CHIP;
+ cy_card[j].nports = cy_isa_nchan;
if (cy_init_card(&cy_card[j])) {
cy_card[j].base_addr = NULL;
free_irq(cy_isa_irq, &cy_card[j]);
@@ -4781,7 +4743,7 @@ static int __devinit cyz_load_fw(struct pci_dev *pdev, void __iomem *base_addr,
struct CUSTOM_REG __iomem *cust = base_addr;
struct ZFW_CTRL __iomem *pt_zfwctrl;
void __iomem *tmp;
- u32 mailbox, status;
+ u32 mailbox, status, nchan;
unsigned int i;
int retval;
@@ -4793,7 +4755,7 @@ static int __devinit cyz_load_fw(struct pci_dev *pdev, void __iomem *base_addr,
/* Check whether the firmware is already loaded and running. If
positive, skip this board */
- if (Z_FPGA_LOADED(ctl_addr) && readl(&fid->signature) == ZFIRM_ID) {
+ if (__cyz_fpga_loaded(ctl_addr) && readl(&fid->signature) == ZFIRM_ID) {
u32 cntval = readl(base_addr + 0x190);
udelay(100);
@@ -4812,7 +4774,7 @@ static int __devinit cyz_load_fw(struct pci_dev *pdev, void __iomem *base_addr,
mailbox = readl(&ctl_addr->mail_box_0);
- if (mailbox == 0 || Z_FPGA_LOADED(ctl_addr)) {
+ if (mailbox == 0 || __cyz_fpga_loaded(ctl_addr)) {
/* stops CPU and set window to beginning of RAM */
cy_writel(&ctl_addr->loc_addr_base, WIN_CREG);
cy_writel(&cust->cpu_stop, 0);
@@ -4828,7 +4790,7 @@ static int __devinit cyz_load_fw(struct pci_dev *pdev, void __iomem *base_addr,
base_addr);
if (retval)
goto err_rel;
- if (!Z_FPGA_LOADED(ctl_addr)) {
+ if (!__cyz_fpga_loaded(ctl_addr)) {
dev_err(&pdev->dev, "fw upload successful, but fw is "
"not loaded\n");
goto err_rel;
@@ -4887,7 +4849,7 @@ static int __devinit cyz_load_fw(struct pci_dev *pdev, void __iomem *base_addr,
"system before loading the new FW to the "
"Cyclades-Z.\n");
- if (Z_FPGA_LOADED(ctl_addr))
+ if (__cyz_fpga_loaded(ctl_addr))
plx_init(pdev, irq, ctl_addr);
retval = -EIO;
@@ -4902,16 +4864,16 @@ static int __devinit cyz_load_fw(struct pci_dev *pdev, void __iomem *base_addr,
base_addr + ID_ADDRESS, readl(&fid->zfwctrl_addr),
base_addr + readl(&fid->zfwctrl_addr));
+ nchan = readl(&pt_zfwctrl->board_ctrl.n_channel);
dev_info(&pdev->dev, "Cyclades-Z FW loaded: version = %x, ports = %u\n",
- readl(&pt_zfwctrl->board_ctrl.fw_version),
- readl(&pt_zfwctrl->board_ctrl.n_channel));
+ readl(&pt_zfwctrl->board_ctrl.fw_version), nchan);
- if (readl(&pt_zfwctrl->board_ctrl.n_channel) == 0) {
+ if (nchan == 0) {
dev_warn(&pdev->dev, "no Cyclades-Z ports were found. Please "
"check the connection between the Z host card and the "
"serial expanders.\n");
- if (Z_FPGA_LOADED(ctl_addr))
+ if (__cyz_fpga_loaded(ctl_addr))
plx_init(pdev, irq, ctl_addr);
dev_info(&pdev->dev, "Null number of ports detected. Board "
@@ -4932,9 +4894,7 @@ static int __devinit cyz_load_fw(struct pci_dev *pdev, void __iomem *base_addr,
cy_writel(&ctl_addr->intr_ctrl_stat, readl(&ctl_addr->intr_ctrl_stat) |
0x00030800UL);
- plx_init(pdev, irq, ctl_addr);
-
- return 0;
+ return nchan;
err_rel:
release_firmware(fw);
err:
@@ -4946,7 +4906,7 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev,
{
void __iomem *addr0 = NULL, *addr2 = NULL;
char *card_name = NULL;
- u32 mailbox;
+ u32 uninitialized_var(mailbox);
unsigned int device_id, nchan = 0, card_no, i;
unsigned char plx_ver;
int retval, irq;
@@ -5023,11 +4983,12 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev,
}
/* Disable interrupts on the PLX before resetting it */
- cy_writew(addr0 + 0x68, readw(addr0 + 0x68) & ~0x0900);
+ cy_writew(&ctl_addr->intr_ctrl_stat,
+ readw(&ctl_addr->intr_ctrl_stat) & ~0x0900);
plx_init(pdev, irq, addr0);
- mailbox = (u32)readl(&ctl_addr->mail_box_0);
+ mailbox = readl(&ctl_addr->mail_box_0);
addr2 = ioremap_nocache(pci_resource_start(pdev, 2),
mailbox == ZE_V1 ? CyPCI_Ze_win : CyPCI_Zwin);
@@ -5038,12 +4999,8 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev,
if (mailbox == ZE_V1) {
card_name = "Cyclades-Ze";
-
- readl(&ctl_addr->mail_box_0);
- nchan = ZE_V1_NPORTS;
} else {
card_name = "Cyclades-8Zo";
-
#ifdef CY_PCI_DEBUG
if (mailbox == ZO_V1) {
cy_writel(&ctl_addr->loc_addr_base, WIN_CREG);
@@ -5065,15 +5022,12 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev,
*/
if ((mailbox == ZO_V1) || (mailbox == ZO_V2))
cy_writel(addr2 + ID_ADDRESS, 0L);
-
- retval = cyz_load_fw(pdev, addr2, addr0, irq);
- if (retval)
- goto err_unmap;
- /* This must be a Cyclades-8Zo/PCI. The extendable
- version will have a different device_id and will
- be allocated its maximum number of ports. */
- nchan = 8;
}
+
+ retval = cyz_load_fw(pdev, addr2, addr0, irq);
+ if (retval <= 0)
+ goto err_unmap;
+ nchan = retval;
}
if ((cy_next_channel + nchan) > NR_PORTS) {
@@ -5103,8 +5057,10 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev,
dev_err(&pdev->dev, "could not allocate IRQ\n");
goto err_unmap;
}
- cy_card[card_no].num_chips = nchan / 4;
+ cy_card[card_no].num_chips = nchan / CyPORTS_PER_CHIP;
} else {
+ cy_card[card_no].hw_ver = mailbox;
+ cy_card[card_no].num_chips = (unsigned int)-1;
#ifdef CONFIG_CYZ_INTR
/* allocate IRQ only if board has an IRQ */
if (irq != 0 && irq != 255) {
@@ -5117,15 +5073,15 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev,
}
}
#endif /* CONFIG_CYZ_INTR */
- cy_card[card_no].num_chips = (unsigned int)-1;
}
/* set cy_card */
cy_card[card_no].base_addr = addr2;
- cy_card[card_no].ctl_addr = addr0;
+ cy_card[card_no].ctl_addr.p9050 = addr0;
cy_card[card_no].irq = irq;
cy_card[card_no].bus_index = 1;
cy_card[card_no].first_line = cy_next_channel;
+ cy_card[card_no].nports = nchan;
retval = cy_init_card(&cy_card[card_no]);
if (retval)
goto err_null;
@@ -5138,17 +5094,20 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev,
plx_ver = readb(addr2 + CyPLX_VER) & 0x0f;
switch (plx_ver) {
case PLX_9050:
-
cy_writeb(addr0 + 0x4c, 0x43);
break;
case PLX_9060:
case PLX_9080:
default: /* Old boards, use PLX_9060 */
- plx_init(pdev, irq, addr0);
- cy_writew(addr0 + 0x68, readw(addr0 + 0x68) | 0x0900);
+ {
+ struct RUNTIME_9060 __iomem *ctl_addr = addr0;
+ plx_init(pdev, irq, ctl_addr);
+ cy_writew(&ctl_addr->intr_ctrl_stat,
+ readw(&ctl_addr->intr_ctrl_stat) | 0x0900);
break;
}
+ }
}
dev_info(&pdev->dev, "%s/PCI #%d found: %d channels starting from "
@@ -5179,22 +5138,23 @@ static void __devexit cy_pci_remove(struct pci_dev *pdev)
unsigned int i;
/* non-Z with old PLX */
- if (!IS_CYC_Z(*cinfo) && (readb(cinfo->base_addr + CyPLX_VER) & 0x0f) ==
+ if (!cy_is_Z(cinfo) && (readb(cinfo->base_addr + CyPLX_VER) & 0x0f) ==
PLX_9050)
- cy_writeb(cinfo->ctl_addr + 0x4c, 0);
+ cy_writeb(cinfo->ctl_addr.p9050 + 0x4c, 0);
else
#ifndef CONFIG_CYZ_INTR
- if (!IS_CYC_Z(*cinfo))
+ if (!cy_is_Z(cinfo))
#endif
- cy_writew(cinfo->ctl_addr + 0x68,
- readw(cinfo->ctl_addr + 0x68) & ~0x0900);
+ cy_writew(&cinfo->ctl_addr.p9060->intr_ctrl_stat,
+ readw(&cinfo->ctl_addr.p9060->intr_ctrl_stat) &
+ ~0x0900);
iounmap(cinfo->base_addr);
- if (cinfo->ctl_addr)
- iounmap(cinfo->ctl_addr);
+ if (cinfo->ctl_addr.p9050)
+ iounmap(cinfo->ctl_addr.p9050);
if (cinfo->irq
#ifndef CONFIG_CYZ_INTR
- && !IS_CYC_Z(*cinfo)
+ && !cy_is_Z(cinfo)
#endif /* CONFIG_CYZ_INTR */
)
free_irq(cinfo->irq, cinfo);
@@ -5240,7 +5200,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
(cur_jifs - info->idle_stats.recv_idle)/
HZ, info->idle_stats.overruns,
/* FIXME: double check locking */
- (long)info->port.tty->ldisc.ops->num);
+ (long)info->port.tty->ldisc->ops->num);
else
seq_printf(m, "%3d %8lu %10lu %8lu "
"%10lu %8lu %9lu %6ld\n",
@@ -5386,11 +5346,11 @@ static void __exit cy_cleanup_module(void)
/* clear interrupt */
cy_writeb(card->base_addr + Cy_ClrIntr, 0);
iounmap(card->base_addr);
- if (card->ctl_addr)
- iounmap(card->ctl_addr);
+ if (card->ctl_addr.p9050)
+ iounmap(card->ctl_addr.p9050);
if (card->irq
#ifndef CONFIG_CYZ_INTR
- && !IS_CYC_Z(*card)
+ && !cy_is_Z(card)
#endif /* CONFIG_CYZ_INTR */
)
free_irq(card->irq, card);
diff --git a/drivers/char/epca.c b/drivers/char/epca.c
index af7c13ca949..abef1f7d84f 100644
--- a/drivers/char/epca.c
+++ b/drivers/char/epca.c
@@ -745,7 +745,7 @@ static int epca_carrier_raised(struct tty_port *port)
return 0;
}
-static void epca_raise_dtr_rts(struct tty_port *port)
+static void epca_dtr_rts(struct tty_port *port, int onoff)
{
}
@@ -925,7 +925,7 @@ static const struct tty_operations pc_ops = {
static const struct tty_port_operations epca_port_ops = {
.carrier_raised = epca_carrier_raised,
- .raise_dtr_rts = epca_raise_dtr_rts,
+ .dtr_rts = epca_dtr_rts,
};
static int info_open(struct tty_struct *tty, struct file *filp)
@@ -1518,7 +1518,7 @@ static void doevent(int crd)
if (event & MODEMCHG_IND) {
/* A modem signal change has been indicated */
ch->imodem = mstat;
- if (test_bit(ASYNC_CHECK_CD, &ch->port.flags)) {
+ if (test_bit(ASYNCB_CHECK_CD, &ch->port.flags)) {
/* We are now receiving dcd */
if (mstat & ch->dcd)
wake_up_interruptible(&ch->port.open_wait);
@@ -1765,9 +1765,9 @@ static void epcaparam(struct tty_struct *tty, struct channel *ch)
* that the driver will wait on carrier detect.
*/
if (ts->c_cflag & CLOCAL)
- clear_bit(ASYNC_CHECK_CD, &ch->port.flags);
+ clear_bit(ASYNCB_CHECK_CD, &ch->port.flags);
else
- set_bit(ASYNC_CHECK_CD, &ch->port.flags);
+ set_bit(ASYNCB_CHECK_CD, &ch->port.flags);
mval = ch->m_dtr | ch->m_rts;
} /* End CBAUD not detected */
iflag = termios2digi_i(ch, ts->c_iflag);
@@ -2114,8 +2114,8 @@ static int pc_ioctl(struct tty_struct *tty, struct file *file,
tty_wait_until_sent(tty, 0);
} else {
/* ldisc lock already held in ioctl */
- if (tty->ldisc.ops->flush_buffer)
- tty->ldisc.ops->flush_buffer(tty);
+ if (tty->ldisc->ops->flush_buffer)
+ tty->ldisc->ops->flush_buffer(tty);
}
unlock_kernel();
/* Fall Thru */
@@ -2244,7 +2244,8 @@ static void do_softint(struct work_struct *work)
if (test_and_clear_bit(EPCA_EVENT_HANGUP, &ch->event)) {
tty_hangup(tty);
wake_up_interruptible(&ch->port.open_wait);
- clear_bit(ASYNC_NORMAL_ACTIVE, &ch->port.flags);
+ clear_bit(ASYNCB_NORMAL_ACTIVE,
+ &ch->port.flags);
}
}
tty_kref_put(tty);
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 340ba4f9dc5..4a9f3492b92 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -224,7 +224,7 @@ static void hpet_timer_set_irq(struct hpet_dev *devp)
break;
}
- gsi = acpi_register_gsi(irq, ACPI_LEVEL_SENSITIVE,
+ gsi = acpi_register_gsi(NULL, irq, ACPI_LEVEL_SENSITIVE,
ACPI_ACTIVE_LOW);
if (gsi > 0)
break;
@@ -939,7 +939,7 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
irqp = &res->data.extended_irq;
for (i = 0; i < irqp->interrupt_count; i++) {
- irq = acpi_register_gsi(irqp->interrupts[i],
+ irq = acpi_register_gsi(NULL, irqp->interrupts[i],
irqp->triggering, irqp->polarity);
if (irq < 0)
return AE_ERROR;
diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
index 54481a88776..86105efb4eb 100644
--- a/drivers/char/hvc_iucv.c
+++ b/drivers/char/hvc_iucv.c
@@ -4,7 +4,7 @@
* This HVC device driver provides terminal access using
* z/VM IUCV communication paths.
*
- * Copyright IBM Corp. 2008
+ * Copyright IBM Corp. 2008, 2009
*
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
*/
@@ -15,6 +15,7 @@
#include <asm/ebcdic.h>
#include <linux/ctype.h>
#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/init.h>
#include <linux/mempool.h>
#include <linux/moduleparam.h>
@@ -74,6 +75,7 @@ struct hvc_iucv_private {
wait_queue_head_t sndbuf_waitq; /* wait for send completion */
struct list_head tty_outqueue; /* outgoing IUCV messages */
struct list_head tty_inqueue; /* incoming IUCV messages */
+ struct device *dev; /* device structure */
};
struct iucv_tty_buffer {
@@ -542,7 +544,68 @@ static void flush_sndbuf_sync(struct hvc_iucv_private *priv)
if (sync_wait)
wait_event_timeout(priv->sndbuf_waitq,
- tty_outqueue_empty(priv), HZ);
+ tty_outqueue_empty(priv), HZ/10);
+}
+
+/**
+ * hvc_iucv_hangup() - Sever IUCV path and schedule hvc tty hang up
+ * @priv: Pointer to hvc_iucv_private structure
+ *
+ * This routine severs an existing IUCV communication path and hangs
+ * up the underlying HVC terminal device.
+ * The hang-up occurs only if an IUCV communication path is established;
+ * otherwise there is no need to hang up the terminal device.
+ *
+ * The IUCV HVC hang-up is separated into two steps:
+ * 1. After the IUCV path has been severed, the iucv_state is set to
+ * IUCV_SEVERED.
+ * 2. Later, when the HVC thread calls hvc_iucv_get_chars(), the
+ * IUCV_SEVERED state causes the tty hang-up in the HVC layer.
+ *
+ * If the tty has not yet been opened, clean up the hvc_iucv_private
+ * structure to allow re-connects.
+ * If the tty has been opened, let get_chars() return -EPIPE to signal
+ * the HVC layer to hang up the tty and, if so, wake up the HVC thread
+ * to call get_chars()...
+ *
+ * Special notes on hanging up a HVC terminal instantiated as console:
+ * Hang-up: 1. do_tty_hangup() replaces file ops (= hung_up_tty_fops)
+ * 2. do_tty_hangup() calls tty->ops->close() for console_filp
+ * => no hangup notifier is called by HVC (default)
+ * 2. hvc_close() returns because of tty_hung_up_p(filp)
+ * => no delete notifier is called!
+ * Finally, the back-end is not being notified, thus, the tty session is
+ * kept active (TTY_OPEN) to be ready for re-connects.
+ *
+ * Locking: spin_lock(&priv->lock) w/o disabling bh
+ */
+static void hvc_iucv_hangup(struct hvc_iucv_private *priv)
+{
+ struct iucv_path *path;
+
+ path = NULL;
+ spin_lock(&priv->lock);
+ if (priv->iucv_state == IUCV_CONNECTED) {
+ path = priv->path;
+ priv->path = NULL;
+ priv->iucv_state = IUCV_SEVERED;
+ if (priv->tty_state == TTY_CLOSED)
+ hvc_iucv_cleanup(priv);
+ else
+ /* console is special (see above) */
+ if (priv->is_console) {
+ hvc_iucv_cleanup(priv);
+ priv->tty_state = TTY_OPENED;
+ } else
+ hvc_kick();
+ }
+ spin_unlock(&priv->lock);
+
+ /* finally sever path (outside of priv->lock due to lock ordering) */
+ if (path) {
+ iucv_path_sever(path, NULL);
+ iucv_path_free(path);
+ }
}
/**
@@ -735,11 +798,8 @@ out_path_handled:
* @ipuser: User specified data for this path
* (AF_IUCV: port/service name and originator port)
*
- * The function also severs the path (as required by the IUCV protocol) and
- * sets the iucv state to IUCV_SEVERED for the associated struct
- * hvc_iucv_private instance. Later, the IUCV_SEVERED state triggers a tty
- * hangup (hvc_iucv_get_chars() / hvc_iucv_write()).
- * If tty portion of the HVC is closed, clean up the outqueue.
+ * This function calls the hvc_iucv_hangup() function for the
+ * respective IUCV HVC terminal.
*
* Locking: struct hvc_iucv_private->lock
*/
@@ -747,33 +807,7 @@ static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
{
struct hvc_iucv_private *priv = path->private;
- spin_lock(&priv->lock);
- priv->iucv_state = IUCV_SEVERED;
-
- /* If the tty has not yet been opened, clean up the hvc_iucv_private
- * structure to allow re-connects.
- * This is also done for our console device because console hangups
- * are handled specially and no notifier is called by HVC.
- * The tty session is active (TTY_OPEN) and ready for re-connects...
- *
- * If it has been opened, let get_chars() return -EPIPE to signal the
- * HVC layer to hang up the tty.
- * If so, we need to wake up the HVC thread to call get_chars()...
- */
- priv->path = NULL;
- if (priv->tty_state == TTY_CLOSED)
- hvc_iucv_cleanup(priv);
- else
- if (priv->is_console) {
- hvc_iucv_cleanup(priv);
- priv->tty_state = TTY_OPENED;
- } else
- hvc_kick();
- spin_unlock(&priv->lock);
-
- /* finally sever path (outside of priv->lock due to lock ordering) */
- iucv_path_sever(path, ipuser);
- iucv_path_free(path);
+ hvc_iucv_hangup(priv);
}
/**
@@ -853,6 +887,37 @@ static void hvc_iucv_msg_complete(struct iucv_path *path,
destroy_tty_buffer_list(&list_remove);
}
+/**
+ * hvc_iucv_pm_freeze() - Freeze PM callback
+ * @dev: IUVC HVC terminal device
+ *
+ * Sever an established IUCV communication path and
+ * trigger a hang-up of the underlying HVC terminal.
+ */
+static int hvc_iucv_pm_freeze(struct device *dev)
+{
+ struct hvc_iucv_private *priv = dev_get_drvdata(dev);
+
+ local_bh_disable();
+ hvc_iucv_hangup(priv);
+ local_bh_enable();
+
+ return 0;
+}
+
+/**
+ * hvc_iucv_pm_restore_thaw() - Thaw and restore PM callback
+ * @dev: IUVC HVC terminal device
+ *
+ * Wake up the HVC thread to trigger hang-up and respective
+ * HVC back-end notifier invocations.
+ */
+static int hvc_iucv_pm_restore_thaw(struct device *dev)
+{
+ hvc_kick();
+ return 0;
+}
+
/* HVC operations */
static struct hv_ops hvc_iucv_ops = {
@@ -863,6 +928,20 @@ static struct hv_ops hvc_iucv_ops = {
.notifier_hangup = hvc_iucv_notifier_hangup,
};
+/* Suspend / resume device operations */
+static struct dev_pm_ops hvc_iucv_pm_ops = {
+ .freeze = hvc_iucv_pm_freeze,
+ .thaw = hvc_iucv_pm_restore_thaw,
+ .restore = hvc_iucv_pm_restore_thaw,
+};
+
+/* IUCV HVC device driver */
+static struct device_driver hvc_iucv_driver = {
+ .name = KMSG_COMPONENT,
+ .bus = &iucv_bus,
+ .pm = &hvc_iucv_pm_ops,
+};
+
/**
* hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance
* @id: hvc_iucv_table index
@@ -897,14 +976,12 @@ static int __init hvc_iucv_alloc(int id, unsigned int is_console)
/* set console flag */
priv->is_console = is_console;
- /* finally allocate hvc */
+ /* allocate hvc device */
priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, /* PAGE_SIZE */
HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256);
if (IS_ERR(priv->hvc)) {
rc = PTR_ERR(priv->hvc);
- free_page((unsigned long) priv->sndbuf);
- kfree(priv);
- return rc;
+ goto out_error_hvc;
}
/* notify HVC thread instead of using polling */
@@ -915,8 +992,45 @@ static int __init hvc_iucv_alloc(int id, unsigned int is_console)
memcpy(priv->srv_name, name, 8);
ASCEBC(priv->srv_name, 8);
+ /* create and setup device */
+ priv->dev = kzalloc(sizeof(*priv->dev), GFP_KERNEL);
+ if (!priv->dev) {
+ rc = -ENOMEM;
+ goto out_error_dev;
+ }
+ dev_set_name(priv->dev, "hvc_iucv%d", id);
+ dev_set_drvdata(priv->dev, priv);
+ priv->dev->bus = &iucv_bus;
+ priv->dev->parent = iucv_root;
+ priv->dev->driver = &hvc_iucv_driver;
+ priv->dev->release = (void (*)(struct device *)) kfree;
+ rc = device_register(priv->dev);
+ if (rc) {
+ kfree(priv->dev);
+ goto out_error_dev;
+ }
+
hvc_iucv_table[id] = priv;
return 0;
+
+out_error_dev:
+ hvc_remove(priv->hvc);
+out_error_hvc:
+ free_page((unsigned long) priv->sndbuf);
+ kfree(priv);
+
+ return rc;
+}
+
+/**
+ * hvc_iucv_destroy() - Destroy and free hvc_iucv_private instances
+ */
+static void __init hvc_iucv_destroy(struct hvc_iucv_private *priv)
+{
+ hvc_remove(priv->hvc);
+ device_unregister(priv->dev);
+ free_page((unsigned long) priv->sndbuf);
+ kfree(priv);
}
/**
@@ -1109,6 +1223,11 @@ static int __init hvc_iucv_init(void)
goto out_error;
}
+ /* register IUCV HVC device driver */
+ rc = driver_register(&hvc_iucv_driver);
+ if (rc)
+ goto out_error;
+
/* parse hvc_iucv_allow string and create z/VM user ID filter list */
if (hvc_iucv_filter_string) {
rc = hvc_iucv_setup_filter(hvc_iucv_filter_string);
@@ -1183,15 +1302,14 @@ out_error_iucv:
iucv_unregister(&hvc_iucv_handler, 0);
out_error_hvc:
for (i = 0; i < hvc_iucv_devices; i++)
- if (hvc_iucv_table[i]) {
- if (hvc_iucv_table[i]->hvc)
- hvc_remove(hvc_iucv_table[i]->hvc);
- kfree(hvc_iucv_table[i]);
- }
+ if (hvc_iucv_table[i])
+ hvc_iucv_destroy(hvc_iucv_table[i]);
out_error_memory:
mempool_destroy(hvc_iucv_mempool);
kmem_cache_destroy(hvc_iucv_buffer_cache);
out_error:
+ if (hvc_iucv_filter)
+ kfree(hvc_iucv_filter);
hvc_iucv_devices = 0; /* ensure that we do not provide any device */
return rc;
}
diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
index c76bccf5354..7d64e4230e6 100644
--- a/drivers/char/hvcs.c
+++ b/drivers/char/hvcs.c
@@ -347,7 +347,7 @@ static void __exit hvcs_module_exit(void);
static inline struct hvcs_struct *from_vio_dev(struct vio_dev *viod)
{
- return viod->dev.driver_data;
+ return dev_get_drvdata(&viod->dev);
}
/* The sysfs interface for the driver and devices */
@@ -785,7 +785,7 @@ static int __devinit hvcs_probe(
kref_init(&hvcsd->kref);
hvcsd->vdev = dev;
- dev->dev.driver_data = hvcsd;
+ dev_set_drvdata(&dev->dev, hvcsd);
hvcsd->index = index;
@@ -831,7 +831,7 @@ static int __devinit hvcs_probe(
static int __devexit hvcs_remove(struct vio_dev *dev)
{
- struct hvcs_struct *hvcsd = dev->dev.driver_data;
+ struct hvcs_struct *hvcsd = dev_get_drvdata(&dev->dev);
unsigned long flags;
struct tty_struct *tty;
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 5fab6470f4b..f4b3f7293fe 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -88,7 +88,7 @@ config HW_RANDOM_N2RNG
config HW_RANDOM_VIA
tristate "VIA HW Random Number Generator support"
- depends on HW_RANDOM && X86_32
+ depends on HW_RANDOM && X86
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
@@ -148,3 +148,15 @@ config HW_RANDOM_VIRTIO
To compile this driver as a module, choose M here: the
module will be called virtio-rng. If unsure, say N.
+
+config HW_RANDOM_MXC_RNGA
+ tristate "Freescale i.MX RNGA Random Number Generator"
+ depends on HW_RANDOM && ARCH_HAS_RNGA
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Freescale i.MX processors.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mxc-rnga.
+
+ If unsure, say Y.
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index e81d21a5f28..fd1ecd2f673 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -15,3 +15,4 @@ obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o
obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o
obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o
obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o
+obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index e5d583c84e4..fc93e2fc7c7 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -153,6 +153,7 @@ static const struct file_operations rng_chrdev_ops = {
static struct miscdevice rng_miscdev = {
.minor = RNG_MISCDEV_MINOR,
.name = RNG_MODULE_NAME,
+ .devnode = "hwrng",
.fops = &rng_chrdev_ops,
};
diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c
new file mode 100644
index 00000000000..187c6be80f4
--- /dev/null
+++ b/drivers/char/hw_random/mxc-rnga.c
@@ -0,0 +1,247 @@
+/*
+ * RNG driver for Freescale RNGA
+ *
+ * Copyright 2008-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Author: Alan Carvalho de Assis <acassis@gmail.com>
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ *
+ * This driver is based on other RNG drivers.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+
+/* RNGA Registers */
+#define RNGA_CONTROL 0x00
+#define RNGA_STATUS 0x04
+#define RNGA_ENTROPY 0x08
+#define RNGA_OUTPUT_FIFO 0x0c
+#define RNGA_MODE 0x10
+#define RNGA_VERIFICATION_CONTROL 0x14
+#define RNGA_OSC_CONTROL_COUNTER 0x18
+#define RNGA_OSC1_COUNTER 0x1c
+#define RNGA_OSC2_COUNTER 0x20
+#define RNGA_OSC_COUNTER_STATUS 0x24
+
+/* RNGA Registers Range */
+#define RNG_ADDR_RANGE 0x28
+
+/* RNGA Control Register */
+#define RNGA_CONTROL_SLEEP 0x00000010
+#define RNGA_CONTROL_CLEAR_INT 0x00000008
+#define RNGA_CONTROL_MASK_INTS 0x00000004
+#define RNGA_CONTROL_HIGH_ASSURANCE 0x00000002
+#define RNGA_CONTROL_GO 0x00000001
+
+#define RNGA_STATUS_LEVEL_MASK 0x0000ff00
+
+/* RNGA Status Register */
+#define RNGA_STATUS_OSC_DEAD 0x80000000
+#define RNGA_STATUS_SLEEP 0x00000010
+#define RNGA_STATUS_ERROR_INT 0x00000008
+#define RNGA_STATUS_FIFO_UNDERFLOW 0x00000004
+#define RNGA_STATUS_LAST_READ_STATUS 0x00000002
+#define RNGA_STATUS_SECURITY_VIOLATION 0x00000001
+
+static struct platform_device *rng_dev;
+
+static int mxc_rnga_data_present(struct hwrng *rng)
+{
+ int level;
+ void __iomem *rng_base = (void __iomem *)rng->priv;
+
+ /* how many random numbers is in FIFO? [0-16] */
+ level = ((__raw_readl(rng_base + RNGA_STATUS) &
+ RNGA_STATUS_LEVEL_MASK) >> 8);
+
+ return level > 0 ? 1 : 0;
+}
+
+static int mxc_rnga_data_read(struct hwrng *rng, u32 * data)
+{
+ int err;
+ u32 ctrl;
+ void __iomem *rng_base = (void __iomem *)rng->priv;
+
+ /* retrieve a random number from FIFO */
+ *data = __raw_readl(rng_base + RNGA_OUTPUT_FIFO);
+
+ /* some error while reading this random number? */
+ err = __raw_readl(rng_base + RNGA_STATUS) & RNGA_STATUS_ERROR_INT;
+
+ /* if error: clear error interrupt, but doesn't return random number */
+ if (err) {
+ dev_dbg(&rng_dev->dev, "Error while reading random number!\n");
+ ctrl = __raw_readl(rng_base + RNGA_CONTROL);
+ __raw_writel(ctrl | RNGA_CONTROL_CLEAR_INT,
+ rng_base + RNGA_CONTROL);
+ return 0;
+ } else
+ return 4;
+}
+
+static int mxc_rnga_init(struct hwrng *rng)
+{
+ u32 ctrl, osc;
+ void __iomem *rng_base = (void __iomem *)rng->priv;
+
+ /* wake up */
+ ctrl = __raw_readl(rng_base + RNGA_CONTROL);
+ __raw_writel(ctrl & ~RNGA_CONTROL_SLEEP, rng_base + RNGA_CONTROL);
+
+ /* verify if oscillator is working */
+ osc = __raw_readl(rng_base + RNGA_STATUS);
+ if (osc & RNGA_STATUS_OSC_DEAD) {
+ dev_err(&rng_dev->dev, "RNGA Oscillator is dead!\n");
+ return -ENODEV;
+ }
+
+ /* go running */
+ ctrl = __raw_readl(rng_base + RNGA_CONTROL);
+ __raw_writel(ctrl | RNGA_CONTROL_GO, rng_base + RNGA_CONTROL);
+
+ return 0;
+}
+
+static void mxc_rnga_cleanup(struct hwrng *rng)
+{
+ u32 ctrl;
+ void __iomem *rng_base = (void __iomem *)rng->priv;
+
+ ctrl = __raw_readl(rng_base + RNGA_CONTROL);
+
+ /* stop rnga */
+ __raw_writel(ctrl & ~RNGA_CONTROL_GO, rng_base + RNGA_CONTROL);
+}
+
+static struct hwrng mxc_rnga = {
+ .name = "mxc-rnga",
+ .init = mxc_rnga_init,
+ .cleanup = mxc_rnga_cleanup,
+ .data_present = mxc_rnga_data_present,
+ .data_read = mxc_rnga_data_read
+};
+
+static int __init mxc_rnga_probe(struct platform_device *pdev)
+{
+ int err = -ENODEV;
+ struct clk *clk;
+ struct resource *res, *mem;
+ void __iomem *rng_base = NULL;
+
+ if (rng_dev)
+ return -EBUSY;
+
+ clk = clk_get(&pdev->dev, "rng");
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "Could not get rng_clk!\n");
+ err = PTR_ERR(clk);
+ goto out;
+ }
+
+ clk_enable(clk);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ err = -ENOENT;
+ goto err_region;
+ }
+
+ mem = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (mem == NULL) {
+ err = -EBUSY;
+ goto err_region;
+ }
+
+ rng_base = ioremap(res->start, resource_size(res));
+ if (!rng_base) {
+ err = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ mxc_rnga.priv = (unsigned long)rng_base;
+
+ err = hwrng_register(&mxc_rnga);
+ if (err) {
+ dev_err(&pdev->dev, "MXC RNGA registering failed (%d)\n", err);
+ goto err_register;
+ }
+
+ rng_dev = pdev;
+
+ dev_info(&pdev->dev, "MXC RNGA Registered.\n");
+
+ return 0;
+
+err_register:
+ iounmap(rng_base);
+ rng_base = NULL;
+
+err_ioremap:
+ release_mem_region(res->start, resource_size(res));
+
+err_region:
+ clk_disable(clk);
+ clk_put(clk);
+
+out:
+ return err;
+}
+
+static int __exit mxc_rnga_remove(struct platform_device *pdev)
+{
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ void __iomem *rng_base = (void __iomem *)mxc_rnga.priv;
+ struct clk *clk = clk_get(&pdev->dev, "rng");
+
+ hwrng_unregister(&mxc_rnga);
+
+ iounmap(rng_base);
+
+ release_mem_region(res->start, resource_size(res));
+
+ clk_disable(clk);
+ clk_put(clk);
+
+ return 0;
+}
+
+static struct platform_driver mxc_rnga_driver = {
+ .driver = {
+ .name = "mxc_rnga",
+ .owner = THIS_MODULE,
+ },
+ .remove = __exit_p(mxc_rnga_remove),
+};
+
+static int __init mod_init(void)
+{
+ return platform_driver_probe(&mxc_rnga_driver, mxc_rnga_probe);
+}
+
+static void __exit mod_exit(void)
+{
+ platform_driver_unregister(&mxc_rnga_driver);
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("H/W RNGA driver for i.MX");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 538313f9e7a..00dd3de1be5 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -89,7 +89,7 @@ static struct hwrng omap_rng_ops = {
.data_read = omap_rng_data_read,
};
-static int __init omap_rng_probe(struct platform_device *pdev)
+static int __devinit omap_rng_probe(struct platform_device *pdev)
{
struct resource *res, *mem;
int ret;
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index dcd352ad0e7..a94e930575f 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -88,9 +88,9 @@ static struct hwrng timeriomem_rng_ops = {
.priv = 0,
};
-static int __init timeriomem_rng_probe(struct platform_device *pdev)
+static int __devinit timeriomem_rng_probe(struct platform_device *pdev)
{
- struct resource *res, *mem;
+ struct resource *res;
int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -98,21 +98,12 @@ static int __init timeriomem_rng_probe(struct platform_device *pdev)
if (!res)
return -ENOENT;
- mem = request_mem_region(res->start, res->end - res->start + 1,
- pdev->name);
- if (mem == NULL)
- return -EBUSY;
-
- dev_set_drvdata(&pdev->dev, mem);
-
timeriomem_rng_data = pdev->dev.platform_data;
timeriomem_rng_data->address = ioremap(res->start,
res->end - res->start + 1);
- if (!timeriomem_rng_data->address) {
- ret = -ENOMEM;
- goto err_ioremap;
- }
+ if (!timeriomem_rng_data->address)
+ return -EIO;
if (timeriomem_rng_data->period != 0
&& usecs_to_jiffies(timeriomem_rng_data->period) > 0) {
@@ -125,7 +116,7 @@ static int __init timeriomem_rng_probe(struct platform_device *pdev)
ret = hwrng_register(&timeriomem_rng_ops);
if (ret)
- goto err_register;
+ goto failed;
dev_info(&pdev->dev, "32bits from 0x%p @ %dus\n",
timeriomem_rng_data->address,
@@ -133,24 +124,19 @@ static int __init timeriomem_rng_probe(struct platform_device *pdev)
return 0;
-err_register:
+failed:
dev_err(&pdev->dev, "problem registering\n");
iounmap(timeriomem_rng_data->address);
-err_ioremap:
- release_resource(mem);
return ret;
}
static int __devexit timeriomem_rng_remove(struct platform_device *pdev)
{
- struct resource *mem = dev_get_drvdata(&pdev->dev);
-
del_timer_sync(&timeriomem_rng_timer);
hwrng_unregister(&timeriomem_rng_ops);
iounmap(timeriomem_rng_data->address);
- release_resource(mem);
return 0;
}
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index 4e9573c1d39..794aacb715c 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -132,6 +132,19 @@ static int via_rng_init(struct hwrng *rng)
struct cpuinfo_x86 *c = &cpu_data(0);
u32 lo, hi, old_lo;
+ /* VIA Nano CPUs don't have the MSR_VIA_RNG anymore. The RNG
+ * is always enabled if CPUID rng_en is set. There is no
+ * RNG configuration like it used to be the case in this
+ * register */
+ if ((c->x86 == 6) && (c->x86_model >= 0x0f)) {
+ if (!cpu_has_xstore_enabled) {
+ printk(KERN_ERR PFX "can't enable hardware RNG "
+ "if XSTORE is not enabled\n");
+ return -ENODEV;
+ }
+ return 0;
+ }
+
/* Control the RNG via MSR. Tread lightly and pay very close
* close attention to values written, as the reserved fields
* are documented to be "undefined and unpredictable"; but it
@@ -205,5 +218,5 @@ static void __exit mod_exit(void)
module_init(mod_init);
module_exit(mod_exit);
-MODULE_DESCRIPTION("H/W RNG driver for VIA chipsets");
+MODULE_DESCRIPTION("H/W RNG driver for VIA CPU with PadLock");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index 86e83f88313..32216b62324 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -35,13 +35,13 @@ static DECLARE_COMPLETION(have_data);
static void random_recv_done(struct virtqueue *vq)
{
- int len;
+ unsigned int len;
/* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */
if (!vq->vq_ops->get_buf(vq, &len))
return;
- data_left = len / sizeof(random_data[0]);
+ data_left += len;
complete(&have_data);
}
@@ -49,7 +49,7 @@ static void register_buffer(void)
{
struct scatterlist sg;
- sg_init_one(&sg, random_data, RANDOM_DATA_SIZE);
+ sg_init_one(&sg, random_data+data_left, RANDOM_DATA_SIZE-data_left);
/* There should always be room for one buffer. */
if (vq->vq_ops->add_buf(vq, &sg, 0, 1, random_data) != 0)
BUG();
@@ -59,24 +59,32 @@ static void register_buffer(void)
/* At least we don't udelay() in a loop like some other drivers. */
static int virtio_data_present(struct hwrng *rng, int wait)
{
- if (data_left)
+ if (data_left >= sizeof(u32))
return 1;
+again:
if (!wait)
return 0;
wait_for_completion(&have_data);
+
+ /* Not enough? Re-register. */
+ if (unlikely(data_left < sizeof(u32))) {
+ register_buffer();
+ goto again;
+ }
+
return 1;
}
/* virtio_data_present() must have succeeded before this is called. */
static int virtio_data_read(struct hwrng *rng, u32 *data)
{
- BUG_ON(!data_left);
-
- *data = random_data[--data_left];
+ BUG_ON(data_left < sizeof(u32));
+ data_left -= sizeof(u32);
+ *data = random_data[data_left / 4];
- if (!data_left) {
+ if (data_left < sizeof(u32)) {
init_completion(&have_data);
register_buffer();
}
@@ -94,13 +102,13 @@ static int virtrng_probe(struct virtio_device *vdev)
int err;
/* We expect a single virtqueue. */
- vq = vdev->config->find_vq(vdev, 0, random_recv_done);
+ vq = virtio_find_single_vq(vdev, random_recv_done, "input");
if (IS_ERR(vq))
return PTR_ERR(vq);
err = hwrng_register(&virtio_hwrng);
if (err) {
- vdev->config->del_vq(vq);
+ vdev->config->del_vqs(vdev);
return err;
}
@@ -112,7 +120,7 @@ static void virtrng_remove(struct virtio_device *vdev)
{
vdev->config->reset(vdev);
hwrng_unregister(&virtio_hwrng);
- vdev->config->del_vq(vq);
+ vdev->config->del_vqs(vdev);
}
static struct virtio_device_id id_table[] = {
diff --git a/drivers/char/ip2/i2lib.c b/drivers/char/ip2/i2lib.c
index 0061e18aff6..0d10b89218e 100644
--- a/drivers/char/ip2/i2lib.c
+++ b/drivers/char/ip2/i2lib.c
@@ -868,11 +868,11 @@ i2Input(i2ChanStrPtr pCh)
amountToMove = count;
}
// Move the first block
- pCh->pTTY->ldisc.ops->receive_buf( pCh->pTTY,
+ pCh->pTTY->ldisc->ops->receive_buf( pCh->pTTY,
&(pCh->Ibuf[stripIndex]), NULL, amountToMove );
// If we needed to wrap, do the second data move
if (count > amountToMove) {
- pCh->pTTY->ldisc.ops->receive_buf( pCh->pTTY,
+ pCh->pTTY->ldisc->ops->receive_buf( pCh->pTTY,
pCh->Ibuf, NULL, count - amountToMove );
}
// Bump and wrap the stripIndex all at once by the amount of data read. This
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c
index afd9247cf08..517271c762e 100644
--- a/drivers/char/ip2/ip2main.c
+++ b/drivers/char/ip2/ip2main.c
@@ -1315,8 +1315,8 @@ static inline void isig(int sig, struct tty_struct *tty, int flush)
if (tty->pgrp)
kill_pgrp(tty->pgrp, sig, 1);
if (flush || !L_NOFLSH(tty)) {
- if ( tty->ldisc.ops->flush_buffer )
- tty->ldisc.ops->flush_buffer(tty);
+ if ( tty->ldisc->ops->flush_buffer )
+ tty->ldisc->ops->flush_buffer(tty);
i2InputFlush( tty->driver_data );
}
}
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index aa83a0865ec..09050797c76 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2856,6 +2856,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
/* Assume a single IPMB channel at zero. */
intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
+ intf->curr_channel = IPMI_MAX_CHANNELS;
}
if (rv == 0)
@@ -3648,13 +3649,13 @@ static int handle_new_recv_msg(ipmi_smi_t intf,
}
/*
- ** We need to make sure the channels have been initialized.
- ** The channel_handler routine will set the "curr_channel"
- ** equal to or greater than IPMI_MAX_CHANNELS when all the
- ** channels for this interface have been initialized.
- */
+ * We need to make sure the channels have been initialized.
+ * The channel_handler routine will set the "curr_channel"
+ * equal to or greater than IPMI_MAX_CHANNELS when all the
+ * channels for this interface have been initialized.
+ */
if (intf->curr_channel < IPMI_MAX_CHANNELS) {
- requeue = 1; /* Just put the message back for now */
+ requeue = 0; /* Throw the message away */
goto out;
}
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 259644646b8..d2e698096ac 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2375,14 +2375,14 @@ static int __devinit ipmi_of_probe(struct of_device *dev,
info->io.addr_data, info->io.regsize, info->io.regspacing,
info->irq);
- dev->dev.driver_data = (void *) info;
+ dev_set_drvdata(&dev->dev, info);
return try_smi_init(info);
}
static int __devexit ipmi_of_remove(struct of_device *dev)
{
- cleanup_one_si(dev->dev.driver_data);
+ cleanup_one_si(dev_get_drvdata(&dev->dev));
return 0;
}
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index a59eac584d1..4d745a89504 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -329,7 +329,7 @@ static inline void drop_rts(struct isi_port *port)
/* card->lock MUST NOT be held */
-static void isicom_raise_dtr_rts(struct tty_port *port)
+static void isicom_dtr_rts(struct tty_port *port, int on)
{
struct isi_port *ip = container_of(port, struct isi_port, port);
struct isi_board *card = ip->card;
@@ -339,10 +339,17 @@ static void isicom_raise_dtr_rts(struct tty_port *port)
if (!lock_card(card))
return;
- outw(0x8000 | (channel << card->shift_count) | 0x02, base);
- outw(0x0f04, base);
- InterruptTheCard(base);
- ip->status |= (ISI_DTR | ISI_RTS);
+ if (on) {
+ outw(0x8000 | (channel << card->shift_count) | 0x02, base);
+ outw(0x0f04, base);
+ InterruptTheCard(base);
+ ip->status |= (ISI_DTR | ISI_RTS);
+ } else {
+ outw(0x8000 | (channel << card->shift_count) | 0x02, base);
+ outw(0x0C04, base);
+ InterruptTheCard(base);
+ ip->status &= ~(ISI_DTR | ISI_RTS);
+ }
unlock_card(card);
}
@@ -1339,7 +1346,7 @@ static const struct tty_operations isicom_ops = {
static const struct tty_port_operations isicom_port_ops = {
.carrier_raised = isicom_carrier_raised,
- .raise_dtr_rts = isicom_raise_dtr_rts,
+ .dtr_rts = isicom_dtr_rts,
};
static int __devinit reset_card(struct pci_dev *pdev,
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index fff19f7e29d..e18800c400b 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -1140,14 +1140,14 @@ static int stli_carrier_raised(struct tty_port *port)
return (portp->sigs & TIOCM_CD) ? 1 : 0;
}
-static void stli_raise_dtr_rts(struct tty_port *port)
+static void stli_dtr_rts(struct tty_port *port, int on)
{
struct stliport *portp = container_of(port, struct stliport, port);
struct stlibrd *brdp = stli_brds[portp->brdnr];
- stli_mkasysigs(&portp->asig, 1, 1);
+ stli_mkasysigs(&portp->asig, on, on);
if (stli_cmdwait(brdp, portp, A_SETSIGNALS, &portp->asig,
sizeof(asysigs_t), 0) < 0)
- printk(KERN_WARNING "istallion: dtr raise failed.\n");
+ printk(KERN_WARNING "istallion: dtr set failed.\n");
}
@@ -4417,7 +4417,7 @@ static const struct tty_operations stli_ops = {
static const struct tty_port_operations stli_port_ops = {
.carrier_raised = stli_carrier_raised,
- .raise_dtr_rts = stli_raise_dtr_rts,
+ .dtr_rts = stli_dtr_rts,
};
/*****************************************************************************/
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 8f05c38c2f0..f96d0bef855 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -694,6 +694,8 @@ static ssize_t read_zero(struct file * file, char __user * buf,
written += chunk - unwritten;
if (unwritten)
break;
+ if (signal_pending(current))
+ return written ? written : -ERESTARTSYS;
buf += chunk;
count -= chunk;
cond_resched();
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index a5e0db9d766..62c99fa59e2 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -168,7 +168,6 @@ static const struct file_operations misc_fops = {
.open = misc_open,
};
-
/**
* misc_register - register a miscellaneous device
* @misc: device structure
@@ -217,8 +216,8 @@ int misc_register(struct miscdevice * misc)
misc_minors[misc->minor >> 3] |= 1 << (misc->minor & 7);
dev = MKDEV(MISC_MAJOR, misc->minor);
- misc->this_device = device_create(misc_class, misc->parent, dev, NULL,
- "%s", misc->name);
+ misc->this_device = device_create(misc_class, misc->parent, dev,
+ misc, "%s", misc->name);
if (IS_ERR(misc->this_device)) {
err = PTR_ERR(misc->this_device);
goto out;
@@ -264,6 +263,15 @@ int misc_deregister(struct miscdevice *misc)
EXPORT_SYMBOL(misc_register);
EXPORT_SYMBOL(misc_deregister);
+static char *misc_nodename(struct device *dev)
+{
+ struct miscdevice *c = dev_get_drvdata(dev);
+
+ if (c->devnode)
+ return kstrdup(c->devnode, GFP_KERNEL);
+ return NULL;
+}
+
static int __init misc_init(void)
{
int err;
@@ -279,6 +287,7 @@ static int __init misc_init(void)
err = -EIO;
if (register_chrdev(MISC_MAJOR,"misc",&misc_fops))
goto fail_printk;
+ misc_class->nodename = misc_nodename;
return 0;
fail_printk:
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index 4a4cab73d0b..65b6ff2442c 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -1184,6 +1184,11 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
return -ENODEV;
}
+ if (port % MAX_PORTS_PER_BOARD >= brd->numPorts) {
+ mutex_unlock(&moxa_openlock);
+ return -ENODEV;
+ }
+
ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
ch->port.count++;
tty->driver_data = ch;
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index a420e8d437d..9533f43a30b 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -547,14 +547,18 @@ static int mxser_carrier_raised(struct tty_port *port)
return (inb(mp->ioaddr + UART_MSR) & UART_MSR_DCD)?1:0;
}
-static void mxser_raise_dtr_rts(struct tty_port *port)
+static void mxser_dtr_rts(struct tty_port *port, int on)
{
struct mxser_port *mp = container_of(port, struct mxser_port, port);
unsigned long flags;
spin_lock_irqsave(&mp->slock, flags);
- outb(inb(mp->ioaddr + UART_MCR) |
- UART_MCR_DTR | UART_MCR_RTS, mp->ioaddr + UART_MCR);
+ if (on)
+ outb(inb(mp->ioaddr + UART_MCR) |
+ UART_MCR_DTR | UART_MCR_RTS, mp->ioaddr + UART_MCR);
+ else
+ outb(inb(mp->ioaddr + UART_MCR)&~(UART_MCR_DTR | UART_MCR_RTS),
+ mp->ioaddr + UART_MCR);
spin_unlock_irqrestore(&mp->slock, flags);
}
@@ -2356,7 +2360,7 @@ static const struct tty_operations mxser_ops = {
struct tty_port_operations mxser_port_ops = {
.carrier_raised = mxser_carrier_raised,
- .raise_dtr_rts = mxser_raise_dtr_rts,
+ .dtr_rts = mxser_dtr_rts,
};
/*
@@ -2711,7 +2715,7 @@ static int __init mxser_module_init(void)
continue;
brd = &mxser_boards[m];
- retval = mxser_get_ISA_conf(!ioaddr[b], brd);
+ retval = mxser_get_ISA_conf(ioaddr[b], brd);
if (retval <= 0) {
brd->info = NULL;
continue;
diff --git a/drivers/char/n_hdlc.c b/drivers/char/n_hdlc.c
index bacb3e2872a..461ece591a5 100644
--- a/drivers/char/n_hdlc.c
+++ b/drivers/char/n_hdlc.c
@@ -342,8 +342,8 @@ static int n_hdlc_tty_open (struct tty_struct *tty)
#endif
/* Flush any pending characters in the driver and discipline. */
- if (tty->ldisc.ops->flush_buffer)
- tty->ldisc.ops->flush_buffer(tty);
+ if (tty->ldisc->ops->flush_buffer)
+ tty->ldisc->ops->flush_buffer(tty);
tty_driver_flush_buffer(tty);
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c
index f6f0e4ec2b5..94a5d5020ab 100644
--- a/drivers/char/n_tty.c
+++ b/drivers/char/n_tty.c
@@ -73,24 +73,6 @@
#define ECHO_OP_SET_CANON_COL 0x81
#define ECHO_OP_ERASE_TAB 0x82
-static inline unsigned char *alloc_buf(void)
-{
- gfp_t prio = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
-
- if (PAGE_SIZE != N_TTY_BUF_SIZE)
- return kmalloc(N_TTY_BUF_SIZE, prio);
- else
- return (unsigned char *)__get_free_page(prio);
-}
-
-static inline void free_buf(unsigned char *buf)
-{
- if (PAGE_SIZE != N_TTY_BUF_SIZE)
- kfree(buf);
- else
- free_page((unsigned long) buf);
-}
-
static inline int tty_put_user(struct tty_struct *tty, unsigned char x,
unsigned char __user *ptr)
{
@@ -1558,11 +1540,11 @@ static void n_tty_close(struct tty_struct *tty)
{
n_tty_flush_buffer(tty);
if (tty->read_buf) {
- free_buf(tty->read_buf);
+ kfree(tty->read_buf);
tty->read_buf = NULL;
}
if (tty->echo_buf) {
- free_buf(tty->echo_buf);
+ kfree(tty->echo_buf);
tty->echo_buf = NULL;
}
}
@@ -1584,17 +1566,16 @@ static int n_tty_open(struct tty_struct *tty)
/* These are ugly. Currently a malloc failure here can panic */
if (!tty->read_buf) {
- tty->read_buf = alloc_buf();
+ tty->read_buf = kzalloc(N_TTY_BUF_SIZE, GFP_KERNEL);
if (!tty->read_buf)
return -ENOMEM;
}
if (!tty->echo_buf) {
- tty->echo_buf = alloc_buf();
+ tty->echo_buf = kzalloc(N_TTY_BUF_SIZE, GFP_KERNEL);
+
if (!tty->echo_buf)
return -ENOMEM;
}
- memset(tty->read_buf, 0, N_TTY_BUF_SIZE);
- memset(tty->echo_buf, 0, N_TTY_BUF_SIZE);
reset_buffer_flags(tty);
tty->column = 0;
n_tty_set_termios(tty, NULL);
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 19d79fc5446..77b36488922 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -383,7 +383,7 @@ static void async_mode(MGSLPC_INFO *info);
static void tx_timeout(unsigned long context);
static int carrier_raised(struct tty_port *port);
-static void raise_dtr_rts(struct tty_port *port);
+static void dtr_rts(struct tty_port *port, int onoff);
#if SYNCLINK_GENERIC_HDLC
#define dev_to_port(D) (dev_to_hdlc(D)->priv)
@@ -513,7 +513,7 @@ static void ldisc_receive_buf(struct tty_struct *tty,
static const struct tty_port_operations mgslpc_port_ops = {
.carrier_raised = carrier_raised,
- .raise_dtr_rts = raise_dtr_rts
+ .dtr_rts = dtr_rts
};
static int mgslpc_probe(struct pcmcia_device *link)
@@ -2528,13 +2528,16 @@ static int carrier_raised(struct tty_port *port)
return 0;
}
-static void raise_dtr_rts(struct tty_port *port)
+static void dtr_rts(struct tty_port *port, int onoff)
{
MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port);
unsigned long flags;
spin_lock_irqsave(&info->lock,flags);
- info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ if (onoff)
+ info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ else
+ info->serial_signals &= ~SerialSignal_RTS + SerialSignal_DTR;
set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
diff --git a/drivers/char/ps3flash.c b/drivers/char/ps3flash.c
index afbe45676d7..f424d394a28 100644
--- a/drivers/char/ps3flash.c
+++ b/drivers/char/ps3flash.c
@@ -33,48 +33,64 @@
struct ps3flash_private {
struct mutex mutex; /* Bounce buffer mutex */
+ u64 chunk_sectors;
+ int tag; /* Start sector of buffer, -1 if invalid */
+ bool dirty;
};
static struct ps3_storage_device *ps3flash_dev;
-static ssize_t ps3flash_read_write_sectors(struct ps3_storage_device *dev,
- u64 lpar, u64 start_sector,
- u64 sectors, int write)
+static int ps3flash_read_write_sectors(struct ps3_storage_device *dev,
+ u64 start_sector, int write)
{
- u64 res = ps3stor_read_write_sectors(dev, lpar, start_sector, sectors,
+ struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
+ u64 res = ps3stor_read_write_sectors(dev, dev->bounce_lpar,
+ start_sector, priv->chunk_sectors,
write);
if (res) {
dev_err(&dev->sbd.core, "%s:%u: %s failed 0x%llx\n", __func__,
__LINE__, write ? "write" : "read", res);
return -EIO;
}
- return sectors;
+ return 0;
}
-static ssize_t ps3flash_read_sectors(struct ps3_storage_device *dev,
- u64 start_sector, u64 sectors,
- unsigned int sector_offset)
+static int ps3flash_writeback(struct ps3_storage_device *dev)
{
- u64 max_sectors, lpar;
+ struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
+ int res;
- max_sectors = dev->bounce_size / dev->blk_size;
- if (sectors > max_sectors) {
- dev_dbg(&dev->sbd.core, "%s:%u Limiting sectors to %llu\n",
- __func__, __LINE__, max_sectors);
- sectors = max_sectors;
- }
+ if (!priv->dirty || priv->tag < 0)
+ return 0;
- lpar = dev->bounce_lpar + sector_offset * dev->blk_size;
- return ps3flash_read_write_sectors(dev, lpar, start_sector, sectors,
- 0);
+ res = ps3flash_read_write_sectors(dev, priv->tag, 1);
+ if (res)
+ return res;
+
+ priv->dirty = false;
+ return 0;
}
-static ssize_t ps3flash_write_chunk(struct ps3_storage_device *dev,
- u64 start_sector)
+static int ps3flash_fetch(struct ps3_storage_device *dev, u64 start_sector)
{
- u64 sectors = dev->bounce_size / dev->blk_size;
- return ps3flash_read_write_sectors(dev, dev->bounce_lpar, start_sector,
- sectors, 1);
+ struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
+ int res;
+
+ if (start_sector == priv->tag)
+ return 0;
+
+ res = ps3flash_writeback(dev);
+ if (res)
+ return res;
+
+ priv->tag = -1;
+
+ res = ps3flash_read_write_sectors(dev, start_sector, 0);
+ if (res)
+ return res;
+
+ priv->tag = start_sector;
+ return 0;
}
static loff_t ps3flash_llseek(struct file *file, loff_t offset, int origin)
@@ -104,18 +120,19 @@ out:
return res;
}
-static ssize_t ps3flash_read(struct file *file, char __user *buf, size_t count,
- loff_t *pos)
+static ssize_t ps3flash_read(char __user *userbuf, void *kernelbuf,
+ size_t count, loff_t *pos)
{
struct ps3_storage_device *dev = ps3flash_dev;
- struct ps3flash_private *priv = dev->sbd.core.driver_data;
- u64 size, start_sector, end_sector, offset;
- ssize_t sectors_read;
+ struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
+ u64 size, sector, offset;
+ int res;
size_t remaining, n;
+ const void *src;
dev_dbg(&dev->sbd.core,
- "%s:%u: Reading %zu bytes at position %lld to user 0x%p\n",
- __func__, __LINE__, count, *pos, buf);
+ "%s:%u: Reading %zu bytes at position %lld to U0x%p/K0x%p\n",
+ __func__, __LINE__, count, *pos, userbuf, kernelbuf);
size = dev->regions[dev->region_idx].size*dev->blk_size;
if (*pos >= size || !count)
@@ -128,61 +145,63 @@ static ssize_t ps3flash_read(struct file *file, char __user *buf, size_t count,
count = size - *pos;
}
- start_sector = *pos / dev->blk_size;
- offset = *pos % dev->blk_size;
- end_sector = DIV_ROUND_UP(*pos + count, dev->blk_size);
+ sector = *pos / dev->bounce_size * priv->chunk_sectors;
+ offset = *pos % dev->bounce_size;
remaining = count;
do {
+ n = min_t(u64, remaining, dev->bounce_size - offset);
+ src = dev->bounce_buf + offset;
+
mutex_lock(&priv->mutex);
- sectors_read = ps3flash_read_sectors(dev, start_sector,
- end_sector-start_sector,
- 0);
- if (sectors_read < 0) {
- mutex_unlock(&priv->mutex);
+ res = ps3flash_fetch(dev, sector);
+ if (res)
goto fail;
- }
- n = min_t(u64, remaining, sectors_read*dev->blk_size-offset);
dev_dbg(&dev->sbd.core,
- "%s:%u: copy %lu bytes from 0x%p to user 0x%p\n",
- __func__, __LINE__, n, dev->bounce_buf+offset, buf);
- if (copy_to_user(buf, dev->bounce_buf+offset, n)) {
- mutex_unlock(&priv->mutex);
- sectors_read = -EFAULT;
- goto fail;
+ "%s:%u: copy %lu bytes from 0x%p to U0x%p/K0x%p\n",
+ __func__, __LINE__, n, src, userbuf, kernelbuf);
+ if (userbuf) {
+ if (copy_to_user(userbuf, src, n)) {
+ res = -EFAULT;
+ goto fail;
+ }
+ userbuf += n;
+ }
+ if (kernelbuf) {
+ memcpy(kernelbuf, src, n);
+ kernelbuf += n;
}
mutex_unlock(&priv->mutex);
*pos += n;
- buf += n;
remaining -= n;
- start_sector += sectors_read;
+ sector += priv->chunk_sectors;
offset = 0;
} while (remaining > 0);
return count;
fail:
- return sectors_read;
+ mutex_unlock(&priv->mutex);
+ return res;
}
-static ssize_t ps3flash_write(struct file *file, const char __user *buf,
- size_t count, loff_t *pos)
+static ssize_t ps3flash_write(const char __user *userbuf,
+ const void *kernelbuf, size_t count, loff_t *pos)
{
struct ps3_storage_device *dev = ps3flash_dev;
- struct ps3flash_private *priv = dev->sbd.core.driver_data;
- u64 size, chunk_sectors, start_write_sector, end_write_sector,
- end_read_sector, start_read_sector, head, tail, offset;
- ssize_t res;
+ struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
+ u64 size, sector, offset;
+ int res = 0;
size_t remaining, n;
- unsigned int sec_off;
+ void *dst;
dev_dbg(&dev->sbd.core,
- "%s:%u: Writing %zu bytes at position %lld from user 0x%p\n",
- __func__, __LINE__, count, *pos, buf);
+ "%s:%u: Writing %zu bytes at position %lld from U0x%p/K0x%p\n",
+ __func__, __LINE__, count, *pos, userbuf, kernelbuf);
size = dev->regions[dev->region_idx].size*dev->blk_size;
if (*pos >= size || !count)
@@ -195,89 +214,46 @@ static ssize_t ps3flash_write(struct file *file, const char __user *buf,
count = size - *pos;
}
- chunk_sectors = dev->bounce_size / dev->blk_size;
-
- start_write_sector = *pos / dev->bounce_size * chunk_sectors;
+ sector = *pos / dev->bounce_size * priv->chunk_sectors;
offset = *pos % dev->bounce_size;
- end_write_sector = DIV_ROUND_UP(*pos + count, dev->bounce_size) *
- chunk_sectors;
-
- end_read_sector = DIV_ROUND_UP(*pos, dev->blk_size);
- start_read_sector = (*pos + count) / dev->blk_size;
-
- /*
- * As we have to write in 256 KiB chunks, while we can read in blk_size
- * (usually 512 bytes) chunks, we perform the following steps:
- * 1. Read from start_write_sector to end_read_sector ("head")
- * 2. Read from start_read_sector to end_write_sector ("tail")
- * 3. Copy data to buffer
- * 4. Write from start_write_sector to end_write_sector
- * All of this is complicated by using only one 256 KiB bounce buffer.
- */
-
- head = end_read_sector - start_write_sector;
- tail = end_write_sector - start_read_sector;
remaining = count;
do {
+ n = min_t(u64, remaining, dev->bounce_size - offset);
+ dst = dev->bounce_buf + offset;
+
mutex_lock(&priv->mutex);
- if (end_read_sector >= start_read_sector) {
- /* Merge head and tail */
- dev_dbg(&dev->sbd.core,
- "Merged head and tail: %llu sectors at %llu\n",
- chunk_sectors, start_write_sector);
- res = ps3flash_read_sectors(dev, start_write_sector,
- chunk_sectors, 0);
- if (res < 0)
+ if (n != dev->bounce_size)
+ res = ps3flash_fetch(dev, sector);
+ else if (sector != priv->tag)
+ res = ps3flash_writeback(dev);
+ if (res)
+ goto fail;
+
+ dev_dbg(&dev->sbd.core,
+ "%s:%u: copy %lu bytes from U0x%p/K0x%p to 0x%p\n",
+ __func__, __LINE__, n, userbuf, kernelbuf, dst);
+ if (userbuf) {
+ if (copy_from_user(dst, userbuf, n)) {
+ res = -EFAULT;
goto fail;
- } else {
- if (head) {
- /* Read head */
- dev_dbg(&dev->sbd.core,
- "head: %llu sectors at %llu\n", head,
- start_write_sector);
- res = ps3flash_read_sectors(dev,
- start_write_sector,
- head, 0);
- if (res < 0)
- goto fail;
- }
- if (start_read_sector <
- start_write_sector+chunk_sectors) {
- /* Read tail */
- dev_dbg(&dev->sbd.core,
- "tail: %llu sectors at %llu\n", tail,
- start_read_sector);
- sec_off = start_read_sector-start_write_sector;
- res = ps3flash_read_sectors(dev,
- start_read_sector,
- tail, sec_off);
- if (res < 0)
- goto fail;
}
+ userbuf += n;
}
-
- n = min_t(u64, remaining, dev->bounce_size-offset);
- dev_dbg(&dev->sbd.core,
- "%s:%u: copy %lu bytes from user 0x%p to 0x%p\n",
- __func__, __LINE__, n, buf, dev->bounce_buf+offset);
- if (copy_from_user(dev->bounce_buf+offset, buf, n)) {
- res = -EFAULT;
- goto fail;
+ if (kernelbuf) {
+ memcpy(dst, kernelbuf, n);
+ kernelbuf += n;
}
- res = ps3flash_write_chunk(dev, start_write_sector);
- if (res < 0)
- goto fail;
+ priv->tag = sector;
+ priv->dirty = true;
mutex_unlock(&priv->mutex);
*pos += n;
- buf += n;
remaining -= n;
- start_write_sector += chunk_sectors;
- head = 0;
+ sector += priv->chunk_sectors;
offset = 0;
} while (remaining > 0);
@@ -288,6 +264,51 @@ fail:
return res;
}
+static ssize_t ps3flash_user_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ return ps3flash_read(buf, NULL, count, pos);
+}
+
+static ssize_t ps3flash_user_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ return ps3flash_write(buf, NULL, count, pos);
+}
+
+static ssize_t ps3flash_kernel_read(void *buf, size_t count, loff_t pos)
+{
+ return ps3flash_read(NULL, buf, count, &pos);
+}
+
+static ssize_t ps3flash_kernel_write(const void *buf, size_t count,
+ loff_t pos)
+{
+ ssize_t res;
+ int wb;
+
+ res = ps3flash_write(NULL, buf, count, &pos);
+ if (res < 0)
+ return res;
+
+ /* Make kernel writes synchronous */
+ wb = ps3flash_writeback(ps3flash_dev);
+ if (wb)
+ return wb;
+
+ return res;
+}
+
+static int ps3flash_flush(struct file *file, fl_owner_t id)
+{
+ return ps3flash_writeback(ps3flash_dev);
+}
+
+static int ps3flash_fsync(struct file *file, struct dentry *dentry,
+ int datasync)
+{
+ return ps3flash_writeback(ps3flash_dev);
+}
static irqreturn_t ps3flash_interrupt(int irq, void *data)
{
@@ -312,12 +333,18 @@ static irqreturn_t ps3flash_interrupt(int irq, void *data)
return IRQ_HANDLED;
}
-
static const struct file_operations ps3flash_fops = {
.owner = THIS_MODULE,
.llseek = ps3flash_llseek,
- .read = ps3flash_read,
- .write = ps3flash_write,
+ .read = ps3flash_user_read,
+ .write = ps3flash_user_write,
+ .flush = ps3flash_flush,
+ .fsync = ps3flash_fsync,
+};
+
+static const struct ps3_os_area_flash_ops ps3flash_kernel_ops = {
+ .read = ps3flash_kernel_read,
+ .write = ps3flash_kernel_write,
};
static struct miscdevice ps3flash_misc = {
@@ -366,11 +393,13 @@ static int __devinit ps3flash_probe(struct ps3_system_bus_device *_dev)
goto fail;
}
- dev->sbd.core.driver_data = priv;
+ ps3_system_bus_set_drvdata(&dev->sbd, priv);
mutex_init(&priv->mutex);
+ priv->tag = -1;
dev->bounce_size = ps3flash_bounce_buffer.size;
dev->bounce_buf = ps3flash_bounce_buffer.address;
+ priv->chunk_sectors = dev->bounce_size / dev->blk_size;
error = ps3stor_setup(dev, ps3flash_interrupt);
if (error)
@@ -386,13 +415,15 @@ static int __devinit ps3flash_probe(struct ps3_system_bus_device *_dev)
dev_info(&dev->sbd.core, "%s:%u: registered misc device %d\n",
__func__, __LINE__, ps3flash_misc.minor);
+
+ ps3_os_area_flash_register(&ps3flash_kernel_ops);
return 0;
fail_teardown:
ps3stor_teardown(dev);
fail_free_priv:
kfree(priv);
- dev->sbd.core.driver_data = NULL;
+ ps3_system_bus_set_drvdata(&dev->sbd, NULL);
fail:
ps3flash_dev = NULL;
return error;
@@ -402,10 +433,11 @@ static int ps3flash_remove(struct ps3_system_bus_device *_dev)
{
struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
+ ps3_os_area_flash_register(NULL);
misc_deregister(&ps3flash_misc);
ps3stor_teardown(dev);
- kfree(dev->sbd.core.driver_data);
- dev->sbd.core.driver_data = NULL;
+ kfree(ps3_system_bus_get_drvdata(&dev->sbd));
+ ps3_system_bus_set_drvdata(&dev->sbd, NULL);
ps3flash_dev = NULL;
return 0;
}
diff --git a/drivers/char/pty.c b/drivers/char/pty.c
index 31038a0052a..daebe1ba43d 100644
--- a/drivers/char/pty.c
+++ b/drivers/char/pty.c
@@ -30,7 +30,6 @@
#include <asm/system.h>
-/* These are global because they are accessed in tty_io.c */
#ifdef CONFIG_UNIX98_PTYS
static struct tty_driver *ptm_driver;
static struct tty_driver *pts_driver;
@@ -96,23 +95,34 @@ static void pty_unthrottle(struct tty_struct *tty)
* a count.
*
* FIXME: Our pty_write method is called with our ldisc lock held but
- * not our partners. We can't just take the other one blindly without
- * risking deadlocks.
+ * not our partners. We can't just wait on the other one blindly without
+ * risking deadlocks. At some point when everything has settled down we need
+ * to look into making pty_write at least able to sleep over an ldisc change.
+ *
+ * The return on no ldisc is a bit counter intuitive but the logic works
+ * like this. During an ldisc change the other end will flush its buffers. We
+ * thus return the full length which is identical to the case where we had
+ * proper locking and happened to queue the bytes just before the flush during
+ * the ldisc change.
*/
static int pty_write(struct tty_struct *tty, const unsigned char *buf,
int count)
{
struct tty_struct *to = tty->link;
- int c;
+ struct tty_ldisc *ld;
+ int c = count;
if (!to || tty->stopped)
return 0;
-
- c = to->receive_room;
- if (c > count)
- c = count;
- to->ldisc.ops->receive_buf(to, buf, NULL, c);
-
+ ld = tty_ldisc_ref(to);
+
+ if (ld) {
+ c = to->receive_room;
+ if (c > count)
+ c = count;
+ ld->ops->receive_buf(to, buf, NULL, c);
+ tty_ldisc_deref(ld);
+ }
return c;
}
@@ -146,14 +156,23 @@ static int pty_write_room(struct tty_struct *tty)
static int pty_chars_in_buffer(struct tty_struct *tty)
{
struct tty_struct *to = tty->link;
- int count;
+ struct tty_ldisc *ld;
+ int count = 0;
/* We should get the line discipline lock for "tty->link" */
- if (!to || !to->ldisc.ops->chars_in_buffer)
+ if (!to)
+ return 0;
+ /* We cannot take a sleeping reference here without deadlocking with
+ an ldisc change - but it doesn't really matter */
+ ld = tty_ldisc_ref(to);
+ if (ld == NULL)
return 0;
/* The ldisc must report 0 if no characters available to be read */
- count = to->ldisc.ops->chars_in_buffer(to);
+ if (ld->ops->chars_in_buffer)
+ count = ld->ops->chars_in_buffer(to);
+
+ tty_ldisc_deref(ld);
if (tty->driver->subtype == PTY_TYPE_SLAVE)
return count;
@@ -183,12 +202,19 @@ static void pty_flush_buffer(struct tty_struct *tty)
{
struct tty_struct *to = tty->link;
unsigned long flags;
+ struct tty_ldisc *ld;
if (!to)
return;
+ ld = tty_ldisc_ref(to);
+
+ /* The other end is changing discipline */
+ if (!ld)
+ return;
- if (to->ldisc.ops->flush_buffer)
- to->ldisc.ops->flush_buffer(to);
+ if (ld->ops->flush_buffer)
+ to->ldisc->ops->flush_buffer(to);
+ tty_ldisc_deref(ld);
if (to->packet) {
spin_lock_irqsave(&tty->ctrl_lock, flags);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index b2ced39d76b..8c7444857a4 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1673,7 +1673,7 @@ unsigned int get_random_int(void)
int ret;
keyptr = get_keyptr();
- hash[0] += current->pid + jiffies + get_cycles() + (int)(long)&ret;
+ hash[0] += current->pid + jiffies + get_cycles();
ret = half_md4_transform(hash, keyptr->secret);
put_cpu_var(get_random_int_hash);
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index 20d90e6a6e5..05f9d18b936 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -71,7 +71,7 @@ static int raw_open(struct inode *inode, struct file *filp)
err = bd_claim(bdev, raw_open);
if (err)
goto out1;
- err = set_blocksize(bdev, bdev_hardsect_size(bdev));
+ err = set_blocksize(bdev, bdev_logical_block_size(bdev));
if (err)
goto out2;
filp->f_flags |= O_DIRECT;
@@ -261,6 +261,11 @@ static const struct file_operations raw_ctl_fops = {
static struct cdev raw_cdev;
+static char *raw_nodename(struct device *dev)
+{
+ return kasprintf(GFP_KERNEL, "raw/%s", dev_name(dev));
+}
+
static int __init raw_init(void)
{
dev_t dev = MKDEV(RAW_MAJOR, 0);
@@ -284,6 +289,7 @@ static int __init raw_init(void)
ret = PTR_ERR(raw_class);
goto error_region;
}
+ raw_class->nodename = raw_nodename;
device_create(raw_class, NULL, MKDEV(RAW_MAJOR, 0), NULL, "rawctl");
return 0;
diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
index f59fc5cea06..63d5b628477 100644
--- a/drivers/char/rocket.c
+++ b/drivers/char/rocket.c
@@ -872,11 +872,16 @@ static int carrier_raised(struct tty_port *port)
return (sGetChanStatusLo(&info->channel) & CD_ACT) ? 1 : 0;
}
-static void raise_dtr_rts(struct tty_port *port)
+static void dtr_rts(struct tty_port *port, int on)
{
struct r_port *info = container_of(port, struct r_port, port);
- sSetDTR(&info->channel);
- sSetRTS(&info->channel);
+ if (on) {
+ sSetDTR(&info->channel);
+ sSetRTS(&info->channel);
+ } else {
+ sClrDTR(&info->channel);
+ sClrRTS(&info->channel);
+ }
}
/*
@@ -934,7 +939,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
/*
* Info->count is now 1; so it's safe to sleep now.
*/
- if (!test_bit(ASYNC_INITIALIZED, &port->flags)) {
+ if (!test_bit(ASYNCB_INITIALIZED, &port->flags)) {
cp = &info->channel;
sSetRxTrigger(cp, TRIG_1);
if (sGetChanStatus(cp) & CD_ACT)
@@ -958,7 +963,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
sEnRxFIFO(cp);
sEnTransmit(cp);
- set_bit(ASYNC_INITIALIZED, &info->port.flags);
+ set_bit(ASYNCB_INITIALIZED, &info->port.flags);
/*
* Set up the tty->alt_speed kludge
@@ -1641,7 +1646,7 @@ static int rp_write(struct tty_struct *tty,
/* Write remaining data into the port's xmit_buf */
while (1) {
/* Hung up ? */
- if (!test_bit(ASYNC_NORMAL_ACTIVE, &info->port.flags))
+ if (!test_bit(ASYNCB_NORMAL_ACTIVE, &info->port.flags))
goto end;
c = min(count, XMIT_BUF_SIZE - info->xmit_cnt - 1);
c = min(c, XMIT_BUF_SIZE - info->xmit_head);
@@ -2250,7 +2255,7 @@ static const struct tty_operations rocket_ops = {
static const struct tty_port_operations rocket_port_ops = {
.carrier_raised = carrier_raised,
- .raise_dtr_rts = raise_dtr_rts,
+ .dtr_rts = dtr_rts,
};
/*
diff --git a/drivers/char/selection.c b/drivers/char/selection.c
index cb8ca569896..f97b9e84806 100644
--- a/drivers/char/selection.c
+++ b/drivers/char/selection.c
@@ -327,7 +327,7 @@ int paste_selection(struct tty_struct *tty)
}
count = sel_buffer_lth - pasted;
count = min(count, tty->receive_room);
- tty->ldisc.ops->receive_buf(tty, sel_buffer + pasted,
+ tty->ldisc->ops->receive_buf(tty, sel_buffer + pasted,
NULL, count);
pasted += count;
}
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
index 2ad813a801d..53e504f41b2 100644
--- a/drivers/char/stallion.c
+++ b/drivers/char/stallion.c
@@ -772,11 +772,11 @@ static int stl_carrier_raised(struct tty_port *port)
return (portp->sigs & TIOCM_CD) ? 1 : 0;
}
-static void stl_raise_dtr_rts(struct tty_port *port)
+static void stl_dtr_rts(struct tty_port *port, int on)
{
struct stlport *portp = container_of(port, struct stlport, port);
/* Takes brd_lock internally */
- stl_setsignals(portp, 1, 1);
+ stl_setsignals(portp, on, on);
}
/*****************************************************************************/
@@ -2547,7 +2547,7 @@ static const struct tty_operations stl_ops = {
static const struct tty_port_operations stl_port_ops = {
.carrier_raised = stl_carrier_raised,
- .raise_dtr_rts = stl_raise_dtr_rts,
+ .dtr_rts = stl_dtr_rts,
};
/*****************************************************************************/
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index afd0b26ca05..afded3a2379 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -3247,13 +3247,16 @@ static int carrier_raised(struct tty_port *port)
return (info->serial_signals & SerialSignal_DCD) ? 1 : 0;
}
-static void raise_dtr_rts(struct tty_port *port)
+static void dtr_rts(struct tty_port *port, int on)
{
struct mgsl_struct *info = container_of(port, struct mgsl_struct, port);
unsigned long flags;
spin_lock_irqsave(&info->irq_spinlock,flags);
- info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ if (on)
+ info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ else
+ info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
usc_set_serial_signals(info);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
}
@@ -4258,7 +4261,7 @@ static void mgsl_add_device( struct mgsl_struct *info )
static const struct tty_port_operations mgsl_port_ops = {
.carrier_raised = carrier_raised,
- .raise_dtr_rts = raise_dtr_rts,
+ .dtr_rts = dtr_rts,
};
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 5e256494686..1386625fc4c 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -214,6 +214,7 @@ struct slgt_desc
#define set_desc_next(a,b) (a).next = cpu_to_le32((unsigned int)(b))
#define set_desc_count(a,b)(a).count = cpu_to_le16((unsigned short)(b))
#define set_desc_eof(a,b) (a).status = cpu_to_le16((b) ? (le16_to_cpu((a).status) | BIT0) : (le16_to_cpu((a).status) & ~BIT0))
+#define set_desc_status(a, b) (a).status = cpu_to_le16((unsigned short)(b))
#define desc_count(a) (le16_to_cpu((a).count))
#define desc_status(a) (le16_to_cpu((a).status))
#define desc_complete(a) (le16_to_cpu((a).status) & BIT15)
@@ -297,6 +298,7 @@ struct slgt_info {
u32 max_frame_size; /* as set by device config */
unsigned int rbuf_fill_level;
+ unsigned int rx_pio;
unsigned int if_mode;
unsigned int base_clock;
@@ -331,6 +333,8 @@ struct slgt_info {
struct slgt_desc *rbufs;
unsigned int rbuf_current;
unsigned int rbuf_index;
+ unsigned int rbuf_fill_index;
+ unsigned short rbuf_fill_count;
unsigned int tbuf_count;
struct slgt_desc *tbufs;
@@ -2110,6 +2114,40 @@ static void ri_change(struct slgt_info *info, unsigned short status)
info->pending_bh |= BH_STATUS;
}
+static void isr_rxdata(struct slgt_info *info)
+{
+ unsigned int count = info->rbuf_fill_count;
+ unsigned int i = info->rbuf_fill_index;
+ unsigned short reg;
+
+ while (rd_reg16(info, SSR) & IRQ_RXDATA) {
+ reg = rd_reg16(info, RDR);
+ DBGISR(("isr_rxdata %s RDR=%04X\n", info->device_name, reg));
+ if (desc_complete(info->rbufs[i])) {
+ /* all buffers full */
+ rx_stop(info);
+ info->rx_restart = 1;
+ continue;
+ }
+ info->rbufs[i].buf[count++] = (unsigned char)reg;
+ /* async mode saves status byte to buffer for each data byte */
+ if (info->params.mode == MGSL_MODE_ASYNC)
+ info->rbufs[i].buf[count++] = (unsigned char)(reg >> 8);
+ if (count == info->rbuf_fill_level || (reg & BIT10)) {
+ /* buffer full or end of frame */
+ set_desc_count(info->rbufs[i], count);
+ set_desc_status(info->rbufs[i], BIT15 | (reg >> 8));
+ info->rbuf_fill_count = count = 0;
+ if (++i == info->rbuf_count)
+ i = 0;
+ info->pending_bh |= BH_RECEIVE;
+ }
+ }
+
+ info->rbuf_fill_index = i;
+ info->rbuf_fill_count = count;
+}
+
static void isr_serial(struct slgt_info *info)
{
unsigned short status = rd_reg16(info, SSR);
@@ -2125,6 +2163,8 @@ static void isr_serial(struct slgt_info *info)
if (info->tx_count)
isr_txeom(info, status);
}
+ if (info->rx_pio && (status & IRQ_RXDATA))
+ isr_rxdata(info);
if ((status & IRQ_RXBREAK) && (status & RXBREAK)) {
info->icount.brk++;
/* process break detection if tty control allows */
@@ -2141,7 +2181,8 @@ static void isr_serial(struct slgt_info *info)
} else {
if (status & (IRQ_TXIDLE + IRQ_TXUNDER))
isr_txeom(info, status);
-
+ if (info->rx_pio && (status & IRQ_RXDATA))
+ isr_rxdata(info);
if (status & IRQ_RXIDLE) {
if (status & RXIDLE)
info->icount.rxidle++;
@@ -2642,6 +2683,10 @@ static int rx_enable(struct slgt_info *info, int enable)
return -EINVAL;
}
info->rbuf_fill_level = rbuf_fill_level;
+ if (rbuf_fill_level < 128)
+ info->rx_pio = 1; /* PIO mode */
+ else
+ info->rx_pio = 0; /* DMA mode */
rx_stop(info); /* restart receiver to use new fill level */
}
@@ -3099,13 +3144,16 @@ static int carrier_raised(struct tty_port *port)
return (info->signals & SerialSignal_DCD) ? 1 : 0;
}
-static void raise_dtr_rts(struct tty_port *port)
+static void dtr_rts(struct tty_port *port, int on)
{
unsigned long flags;
struct slgt_info *info = container_of(port, struct slgt_info, port);
spin_lock_irqsave(&info->lock,flags);
- info->signals |= SerialSignal_RTS + SerialSignal_DTR;
+ if (on)
+ info->signals |= SerialSignal_RTS + SerialSignal_DTR;
+ else
+ info->signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
@@ -3419,7 +3467,7 @@ static void add_device(struct slgt_info *info)
static const struct tty_port_operations slgt_port_ops = {
.carrier_raised = carrier_raised,
- .raise_dtr_rts = raise_dtr_rts,
+ .dtr_rts = dtr_rts,
};
/*
@@ -3841,15 +3889,27 @@ static void rx_start(struct slgt_info *info)
rdma_reset(info);
reset_rbufs(info);
- /* set 1st descriptor address */
- wr_reg32(info, RDDAR, info->rbufs[0].pdesc);
-
- if (info->params.mode != MGSL_MODE_ASYNC) {
- /* enable rx DMA and DMA interrupt */
- wr_reg32(info, RDCSR, (BIT2 + BIT0));
+ if (info->rx_pio) {
+ /* rx request when rx FIFO not empty */
+ wr_reg16(info, SCR, (unsigned short)(rd_reg16(info, SCR) & ~BIT14));
+ slgt_irq_on(info, IRQ_RXDATA);
+ if (info->params.mode == MGSL_MODE_ASYNC) {
+ /* enable saving of rx status */
+ wr_reg32(info, RDCSR, BIT6);
+ }
} else {
- /* enable saving of rx status, rx DMA and DMA interrupt */
- wr_reg32(info, RDCSR, (BIT6 + BIT2 + BIT0));
+ /* rx request when rx FIFO half full */
+ wr_reg16(info, SCR, (unsigned short)(rd_reg16(info, SCR) | BIT14));
+ /* set 1st descriptor address */
+ wr_reg32(info, RDDAR, info->rbufs[0].pdesc);
+
+ if (info->params.mode != MGSL_MODE_ASYNC) {
+ /* enable rx DMA and DMA interrupt */
+ wr_reg32(info, RDCSR, (BIT2 + BIT0));
+ } else {
+ /* enable saving of rx status, rx DMA and DMA interrupt */
+ wr_reg32(info, RDCSR, (BIT6 + BIT2 + BIT0));
+ }
}
slgt_irq_on(info, IRQ_RXOVER);
@@ -4467,6 +4527,8 @@ static void free_rbufs(struct slgt_info *info, unsigned int i, unsigned int last
static void reset_rbufs(struct slgt_info *info)
{
free_rbufs(info, 0, info->rbuf_count - 1);
+ info->rbuf_fill_index = 0;
+ info->rbuf_fill_count = 0;
}
/*
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c
index 26de60efe4b..6f727e3c53a 100644
--- a/drivers/char/synclinkmp.c
+++ b/drivers/char/synclinkmp.c
@@ -3277,13 +3277,16 @@ static int carrier_raised(struct tty_port *port)
return (info->serial_signals & SerialSignal_DCD) ? 1 : 0;
}
-static void raise_dtr_rts(struct tty_port *port)
+static void dtr_rts(struct tty_port *port, int on)
{
SLMP_INFO *info = container_of(port, SLMP_INFO, port);
unsigned long flags;
spin_lock_irqsave(&info->lock,flags);
- info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ if (on)
+ info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ else
+ info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
@@ -3746,7 +3749,7 @@ static void add_device(SLMP_INFO *info)
static const struct tty_port_operations port_ops = {
.carrier_raised = carrier_raised,
- .raise_dtr_rts = raise_dtr_rts,
+ .dtr_rts = dtr_rts,
};
/* Allocate and initialize a device instance structure
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index d6a807f4077..39a05b5fa9c 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -25,6 +25,7 @@
#include <linux/kbd_kern.h>
#include <linux/proc_fs.h>
#include <linux/quotaops.h>
+#include <linux/perf_counter.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/suspend.h>
@@ -243,6 +244,7 @@ static void sysrq_handle_showregs(int key, struct tty_struct *tty)
struct pt_regs *regs = get_irq_regs();
if (regs)
show_regs(regs);
+ perf_counter_print_debug();
}
static struct sysrq_key_op sysrq_showregs_op = {
.handler = sysrq_handle_showregs,
diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
index ed306eb1057..0c2f55a38b9 100644
--- a/drivers/char/tpm/tpm_bios.c
+++ b/drivers/char/tpm/tpm_bios.c
@@ -212,7 +212,8 @@ static int get_event_name(char *dest, struct tcpa_event *event,
unsigned char * event_entry)
{
const char *name = "";
- char data[40] = "";
+ /* 41 so there is room for 40 data and 1 nul */
+ char data[41] = "";
int i, n_len = 0, d_len = 0;
struct tcpa_pc_event *pc_event;
diff --git a/drivers/char/tty_audit.c b/drivers/char/tty_audit.c
index 55ba6f14288..ac16fbec72d 100644
--- a/drivers/char/tty_audit.c
+++ b/drivers/char/tty_audit.c
@@ -29,10 +29,7 @@ static struct tty_audit_buf *tty_audit_buf_alloc(int major, int minor,
buf = kmalloc(sizeof(*buf), GFP_KERNEL);
if (!buf)
goto err;
- if (PAGE_SIZE != N_TTY_BUF_SIZE)
- buf->data = kmalloc(N_TTY_BUF_SIZE, GFP_KERNEL);
- else
- buf->data = (unsigned char *)__get_free_page(GFP_KERNEL);
+ buf->data = kmalloc(N_TTY_BUF_SIZE, GFP_KERNEL);
if (!buf->data)
goto err_buf;
atomic_set(&buf->count, 1);
@@ -52,10 +49,7 @@ err:
static void tty_audit_buf_free(struct tty_audit_buf *buf)
{
WARN_ON(buf->valid != 0);
- if (PAGE_SIZE != N_TTY_BUF_SIZE)
- kfree(buf->data);
- else
- free_page((unsigned long)buf->data);
+ kfree(buf->data);
kfree(buf);
}
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 66b99a2049e..a3afa0c387c 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -295,7 +295,7 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line)
struct tty_driver *p, *res = NULL;
int tty_line = 0;
int len;
- char *str;
+ char *str, *stp;
for (str = name; *str; str++)
if ((*str >= '0' && *str <= '9') || *str == ',')
@@ -311,13 +311,14 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line)
list_for_each_entry(p, &tty_drivers, tty_drivers) {
if (strncmp(name, p->name, len) != 0)
continue;
- if (*str == ',')
- str++;
- if (*str == '\0')
- str = NULL;
+ stp = str;
+ if (*stp == ',')
+ stp++;
+ if (*stp == '\0')
+ stp = NULL;
if (tty_line >= 0 && tty_line <= p->num && p->ops &&
- p->ops->poll_init && !p->ops->poll_init(p, tty_line, str)) {
+ p->ops->poll_init && !p->ops->poll_init(p, tty_line, stp)) {
res = tty_driver_kref_get(p);
*line = tty_line;
break;
@@ -470,43 +471,6 @@ void tty_wakeup(struct tty_struct *tty)
EXPORT_SYMBOL_GPL(tty_wakeup);
/**
- * tty_ldisc_flush - flush line discipline queue
- * @tty: tty
- *
- * Flush the line discipline queue (if any) for this tty. If there
- * is no line discipline active this is a no-op.
- */
-
-void tty_ldisc_flush(struct tty_struct *tty)
-{
- struct tty_ldisc *ld = tty_ldisc_ref(tty);
- if (ld) {
- if (ld->ops->flush_buffer)
- ld->ops->flush_buffer(tty);
- tty_ldisc_deref(ld);
- }
- tty_buffer_flush(tty);
-}
-
-EXPORT_SYMBOL_GPL(tty_ldisc_flush);
-
-/**
- * tty_reset_termios - reset terminal state
- * @tty: tty to reset
- *
- * Restore a terminal to the driver default state
- */
-
-static void tty_reset_termios(struct tty_struct *tty)
-{
- mutex_lock(&tty->termios_mutex);
- *tty->termios = tty->driver->init_termios;
- tty->termios->c_ispeed = tty_termios_input_baud_rate(tty->termios);
- tty->termios->c_ospeed = tty_termios_baud_rate(tty->termios);
- mutex_unlock(&tty->termios_mutex);
-}
-
-/**
* do_tty_hangup - actual handler for hangup events
* @work: tty device
*
@@ -535,7 +499,6 @@ static void do_tty_hangup(struct work_struct *work)
struct file *cons_filp = NULL;
struct file *filp, *f = NULL;
struct task_struct *p;
- struct tty_ldisc *ld;
int closecount = 0, n;
unsigned long flags;
int refs = 0;
@@ -566,40 +529,8 @@ static void do_tty_hangup(struct work_struct *work)
filp->f_op = &hung_up_tty_fops;
}
file_list_unlock();
- /*
- * FIXME! What are the locking issues here? This may me overdoing
- * things... This question is especially important now that we've
- * removed the irqlock.
- */
- ld = tty_ldisc_ref(tty);
- if (ld != NULL) {
- /* We may have no line discipline at this point */
- if (ld->ops->flush_buffer)
- ld->ops->flush_buffer(tty);
- tty_driver_flush_buffer(tty);
- if ((test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) &&
- ld->ops->write_wakeup)
- ld->ops->write_wakeup(tty);
- if (ld->ops->hangup)
- ld->ops->hangup(tty);
- }
- /*
- * FIXME: Once we trust the LDISC code better we can wait here for
- * ldisc completion and fix the driver call race
- */
- wake_up_interruptible_poll(&tty->write_wait, POLLOUT);
- wake_up_interruptible_poll(&tty->read_wait, POLLIN);
- /*
- * Shutdown the current line discipline, and reset it to
- * N_TTY.
- */
- if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS)
- tty_reset_termios(tty);
- /* Defer ldisc switch */
- /* tty_deferred_ldisc_switch(N_TTY);
- This should get done automatically when the port closes and
- tty_release is called */
+ tty_ldisc_hangup(tty);
read_lock(&tasklist_lock);
if (tty->session) {
@@ -628,12 +559,15 @@ static void do_tty_hangup(struct work_struct *work)
read_unlock(&tasklist_lock);
spin_lock_irqsave(&tty->ctrl_lock, flags);
- tty->flags = 0;
+ clear_bit(TTY_THROTTLED, &tty->flags);
+ clear_bit(TTY_PUSH, &tty->flags);
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
put_pid(tty->session);
put_pid(tty->pgrp);
tty->session = NULL;
tty->pgrp = NULL;
tty->ctrl_status = 0;
+ set_bit(TTY_HUPPED, &tty->flags);
spin_unlock_irqrestore(&tty->ctrl_lock, flags);
/* Account for the p->signal references we killed */
@@ -659,10 +593,7 @@ static void do_tty_hangup(struct work_struct *work)
* can't yet guarantee all that.
*/
set_bit(TTY_HUPPED, &tty->flags);
- if (ld) {
- tty_ldisc_enable(tty);
- tty_ldisc_deref(ld);
- }
+ tty_ldisc_enable(tty);
unlock_kernel();
if (f)
fput(f);
@@ -1332,7 +1263,9 @@ static int tty_reopen(struct tty_struct *tty)
tty->count++;
tty->driver = driver; /* N.B. why do this every time?? */
+ mutex_lock(&tty->ldisc_mutex);
WARN_ON(!test_bit(TTY_LDISC, &tty->flags));
+ mutex_unlock(&tty->ldisc_mutex);
return 0;
}
@@ -2480,6 +2413,24 @@ static int tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int
return tty->ops->tiocmset(tty, file, set, clear);
}
+struct tty_struct *tty_pair_get_tty(struct tty_struct *tty)
+{
+ if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+ tty->driver->subtype == PTY_TYPE_MASTER)
+ tty = tty->link;
+ return tty;
+}
+EXPORT_SYMBOL(tty_pair_get_tty);
+
+struct tty_struct *tty_pair_get_pty(struct tty_struct *tty)
+{
+ if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+ tty->driver->subtype == PTY_TYPE_MASTER)
+ return tty;
+ return tty->link;
+}
+EXPORT_SYMBOL(tty_pair_get_pty);
+
/*
* Split this up, as gcc can choke on it otherwise..
*/
@@ -2495,11 +2446,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (tty_paranoia_check(tty, inode, "tty_ioctl"))
return -EINVAL;
- real_tty = tty;
- if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
- tty->driver->subtype == PTY_TYPE_MASTER)
- real_tty = tty->link;
-
+ real_tty = tty_pair_get_tty(tty);
/*
* Factor out some common prep work
@@ -2555,7 +2502,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case TIOCGSID:
return tiocgsid(tty, real_tty, p);
case TIOCGETD:
- return put_user(tty->ldisc.ops->num, (int __user *)p);
+ return put_user(tty->ldisc->ops->num, (int __user *)p);
case TIOCSETD:
return tiocsetd(tty, p);
/*
@@ -2770,6 +2717,7 @@ void initialize_tty_struct(struct tty_struct *tty,
tty->buf.head = tty->buf.tail = NULL;
tty_buffer_init(tty);
mutex_init(&tty->termios_mutex);
+ mutex_init(&tty->ldisc_mutex);
init_waitqueue_head(&tty->write_wait);
init_waitqueue_head(&tty->read_wait);
INIT_WORK(&tty->hangup_work, do_tty_hangup);
diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c
index 6f4c7d0a53b..b24f6c6a1ea 100644
--- a/drivers/char/tty_ioctl.c
+++ b/drivers/char/tty_ioctl.c
@@ -97,14 +97,19 @@ EXPORT_SYMBOL(tty_driver_flush_buffer);
* @tty: terminal
*
* Indicate that a tty should stop transmitting data down the stack.
+ * Takes the termios mutex to protect against parallel throttle/unthrottle
+ * and also to ensure the driver can consistently reference its own
+ * termios data at this point when implementing software flow control.
*/
void tty_throttle(struct tty_struct *tty)
{
+ mutex_lock(&tty->termios_mutex);
/* check TTY_THROTTLED first so it indicates our state */
if (!test_and_set_bit(TTY_THROTTLED, &tty->flags) &&
tty->ops->throttle)
tty->ops->throttle(tty);
+ mutex_unlock(&tty->termios_mutex);
}
EXPORT_SYMBOL(tty_throttle);
@@ -113,13 +118,21 @@ EXPORT_SYMBOL(tty_throttle);
* @tty: terminal
*
* Indicate that a tty may continue transmitting data down the stack.
+ * Takes the termios mutex to protect against parallel throttle/unthrottle
+ * and also to ensure the driver can consistently reference its own
+ * termios data at this point when implementing software flow control.
+ *
+ * Drivers should however remember that the stack can issue a throttle,
+ * then change flow control method, then unthrottle.
*/
void tty_unthrottle(struct tty_struct *tty)
{
+ mutex_lock(&tty->termios_mutex);
if (test_and_clear_bit(TTY_THROTTLED, &tty->flags) &&
tty->ops->unthrottle)
tty->ops->unthrottle(tty);
+ mutex_unlock(&tty->termios_mutex);
}
EXPORT_SYMBOL(tty_unthrottle);
@@ -613,9 +626,25 @@ static int set_termios(struct tty_struct *tty, void __user *arg, int opt)
return 0;
}
+static void copy_termios(struct tty_struct *tty, struct ktermios *kterm)
+{
+ mutex_lock(&tty->termios_mutex);
+ memcpy(kterm, tty->termios, sizeof(struct ktermios));
+ mutex_unlock(&tty->termios_mutex);
+}
+
+static void copy_termios_locked(struct tty_struct *tty, struct ktermios *kterm)
+{
+ mutex_lock(&tty->termios_mutex);
+ memcpy(kterm, tty->termios_locked, sizeof(struct ktermios));
+ mutex_unlock(&tty->termios_mutex);
+}
+
static int get_termio(struct tty_struct *tty, struct termio __user *termio)
{
- if (kernel_termios_to_user_termio(termio, tty->termios))
+ struct ktermios kterm;
+ copy_termios(tty, &kterm);
+ if (kernel_termios_to_user_termio(termio, &kterm))
return -EFAULT;
return 0;
}
@@ -917,6 +946,7 @@ int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
struct tty_struct *real_tty;
void __user *p = (void __user *)arg;
int ret = 0;
+ struct ktermios kterm;
if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
tty->driver->subtype == PTY_TYPE_MASTER)
@@ -952,23 +982,20 @@ int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
return set_termios(real_tty, p, TERMIOS_OLD);
#ifndef TCGETS2
case TCGETS:
- mutex_lock(&real_tty->termios_mutex);
- if (kernel_termios_to_user_termios((struct termios __user *)arg, real_tty->termios))
+ copy_termios(real_tty, &kterm);
+ if (kernel_termios_to_user_termios((struct termios __user *)arg, &kterm))
ret = -EFAULT;
- mutex_unlock(&real_tty->termios_mutex);
return ret;
#else
case TCGETS:
- mutex_lock(&real_tty->termios_mutex);
- if (kernel_termios_to_user_termios_1((struct termios __user *)arg, real_tty->termios))
+ copy_termios(real_tty, &kterm);
+ if (kernel_termios_to_user_termios_1((struct termios __user *)arg, &kterm))
ret = -EFAULT;
- mutex_unlock(&real_tty->termios_mutex);
return ret;
case TCGETS2:
- mutex_lock(&real_tty->termios_mutex);
- if (kernel_termios_to_user_termios((struct termios2 __user *)arg, real_tty->termios))
+ copy_termios(real_tty, &kterm);
+ if (kernel_termios_to_user_termios((struct termios2 __user *)arg, &kterm))
ret = -EFAULT;
- mutex_unlock(&real_tty->termios_mutex);
return ret;
case TCSETSF2:
return set_termios(real_tty, p, TERMIOS_FLUSH | TERMIOS_WAIT);
@@ -987,46 +1014,51 @@ int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
return set_termios(real_tty, p, TERMIOS_TERMIO);
#ifndef TCGETS2
case TIOCGLCKTRMIOS:
- mutex_lock(&real_tty->termios_mutex);
- if (kernel_termios_to_user_termios((struct termios __user *)arg, real_tty->termios_locked))
+ copy_termios_locked(real_tty, &kterm);
+ if (kernel_termios_to_user_termios((struct termios __user *)arg, &kterm))
ret = -EFAULT;
- mutex_unlock(&real_tty->termios_mutex);
return ret;
case TIOCSLCKTRMIOS:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- mutex_lock(&real_tty->termios_mutex);
- if (user_termios_to_kernel_termios(real_tty->termios_locked,
+ copy_termios_locked(real_tty, &kterm);
+ if (user_termios_to_kernel_termios(&kterm,
(struct termios __user *) arg))
- ret = -EFAULT;
+ return -EFAULT;
+ mutex_lock(&real_tty->termios_mutex);
+ memcpy(real_tty->termios_locked, &kterm, sizeof(struct ktermios));
mutex_unlock(&real_tty->termios_mutex);
- return ret;
+ return 0;
#else
case TIOCGLCKTRMIOS:
- mutex_lock(&real_tty->termios_mutex);
- if (kernel_termios_to_user_termios_1((struct termios __user *)arg, real_tty->termios_locked))
+ copy_termios_locked(real_tty, &kterm);
+ if (kernel_termios_to_user_termios_1((struct termios __user *)arg, &kterm))
ret = -EFAULT;
- mutex_unlock(&real_tty->termios_mutex);
return ret;
case TIOCSLCKTRMIOS:
if (!capable(CAP_SYS_ADMIN))
- ret = -EPERM;
- mutex_lock(&real_tty->termios_mutex);
- if (user_termios_to_kernel_termios_1(real_tty->termios_locked,
+ return -EPERM;
+ copy_termios_locked(real_tty, &kterm);
+ if (user_termios_to_kernel_termios_1(&kterm,
(struct termios __user *) arg))
- ret = -EFAULT;
+ return -EFAULT;
+ mutex_lock(&real_tty->termios_mutex);
+ memcpy(real_tty->termios_locked, &kterm, sizeof(struct ktermios));
mutex_unlock(&real_tty->termios_mutex);
return ret;
#endif
#ifdef TCGETX
- case TCGETX:
+ case TCGETX: {
+ struct termiox ktermx;
if (real_tty->termiox == NULL)
return -EINVAL;
mutex_lock(&real_tty->termios_mutex);
- if (copy_to_user(p, real_tty->termiox, sizeof(struct termiox)))
- ret = -EFAULT;
+ memcpy(&ktermx, real_tty->termiox, sizeof(struct termiox));
mutex_unlock(&real_tty->termios_mutex);
+ if (copy_to_user(p, &ktermx, sizeof(struct termiox)))
+ ret = -EFAULT;
return ret;
+ }
case TCSETX:
return set_termiox(real_tty, p, 0);
case TCSETXW:
@@ -1035,10 +1067,9 @@ int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
return set_termiox(real_tty, p, TERMIOS_FLUSH);
#endif
case TIOCGSOFTCAR:
- mutex_lock(&real_tty->termios_mutex);
- ret = put_user(C_CLOCAL(real_tty) ? 1 : 0,
+ copy_termios(real_tty, &kterm);
+ ret = put_user((kterm.c_cflag & CLOCAL) ? 1 : 0,
(int __user *)arg);
- mutex_unlock(&real_tty->termios_mutex);
return ret;
case TIOCSSOFTCAR:
if (get_user(arg, (unsigned int __user *) arg))
diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
index f78f5b0127a..a19e935847b 100644
--- a/drivers/char/tty_ldisc.c
+++ b/drivers/char/tty_ldisc.c
@@ -115,19 +115,22 @@ EXPORT_SYMBOL(tty_unregister_ldisc);
/**
* tty_ldisc_try_get - try and reference an ldisc
* @disc: ldisc number
- * @ld: tty ldisc structure to complete
*
* Attempt to open and lock a line discipline into place. Return
- * the line discipline refcounted and assigned in ld. On an error
- * report the error code back
+ * the line discipline refcounted or an error.
*/
-static int tty_ldisc_try_get(int disc, struct tty_ldisc *ld)
+static struct tty_ldisc *tty_ldisc_try_get(int disc)
{
unsigned long flags;
+ struct tty_ldisc *ld;
struct tty_ldisc_ops *ldops;
int err = -EINVAL;
-
+
+ ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL);
+ if (ld == NULL)
+ return ERR_PTR(-ENOMEM);
+
spin_lock_irqsave(&tty_ldisc_lock, flags);
ld->ops = NULL;
ldops = tty_ldiscs[disc];
@@ -140,17 +143,21 @@ static int tty_ldisc_try_get(int disc, struct tty_ldisc *ld)
/* lock it */
ldops->refcount++;
ld->ops = ldops;
+ ld->refcount = 0;
err = 0;
}
}
spin_unlock_irqrestore(&tty_ldisc_lock, flags);
- return err;
+ if (err) {
+ kfree(ld);
+ return ERR_PTR(err);
+ }
+ return ld;
}
/**
* tty_ldisc_get - take a reference to an ldisc
* @disc: ldisc number
- * @ld: tty line discipline structure to use
*
* Takes a reference to a line discipline. Deals with refcounts and
* module locking counts. Returns NULL if the discipline is not available.
@@ -161,52 +168,55 @@ static int tty_ldisc_try_get(int disc, struct tty_ldisc *ld)
* takes tty_ldisc_lock to guard against ldisc races
*/
-static int tty_ldisc_get(int disc, struct tty_ldisc *ld)
+static struct tty_ldisc *tty_ldisc_get(int disc)
{
- int err;
+ struct tty_ldisc *ld;
if (disc < N_TTY || disc >= NR_LDISCS)
- return -EINVAL;
- err = tty_ldisc_try_get(disc, ld);
- if (err < 0) {
+ return ERR_PTR(-EINVAL);
+ ld = tty_ldisc_try_get(disc);
+ if (IS_ERR(ld)) {
request_module("tty-ldisc-%d", disc);
- err = tty_ldisc_try_get(disc, ld);
+ ld = tty_ldisc_try_get(disc);
}
- return err;
+ return ld;
}
/**
* tty_ldisc_put - drop ldisc reference
- * @disc: ldisc number
+ * @ld: ldisc
*
* Drop a reference to a line discipline. Manage refcounts and
- * module usage counts
+ * module usage counts. Free the ldisc once the recount hits zero.
*
* Locking:
* takes tty_ldisc_lock to guard against ldisc races
*/
-static void tty_ldisc_put(struct tty_ldisc_ops *ld)
+static void tty_ldisc_put(struct tty_ldisc *ld)
{
unsigned long flags;
- int disc = ld->num;
+ int disc = ld->ops->num;
+ struct tty_ldisc_ops *ldo;
BUG_ON(disc < N_TTY || disc >= NR_LDISCS);
spin_lock_irqsave(&tty_ldisc_lock, flags);
- ld = tty_ldiscs[disc];
- BUG_ON(ld->refcount == 0);
- ld->refcount--;
- module_put(ld->owner);
+ ldo = tty_ldiscs[disc];
+ BUG_ON(ldo->refcount == 0);
+ ldo->refcount--;
+ module_put(ldo->owner);
spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+ WARN_ON(ld->refcount);
+ kfree(ld);
}
-static void * tty_ldiscs_seq_start(struct seq_file *m, loff_t *pos)
+static void *tty_ldiscs_seq_start(struct seq_file *m, loff_t *pos)
{
return (*pos < NR_LDISCS) ? pos : NULL;
}
-static void * tty_ldiscs_seq_next(struct seq_file *m, void *v, loff_t *pos)
+static void *tty_ldiscs_seq_next(struct seq_file *m, void *v, loff_t *pos)
{
(*pos)++;
return (*pos < NR_LDISCS) ? pos : NULL;
@@ -219,12 +229,13 @@ static void tty_ldiscs_seq_stop(struct seq_file *m, void *v)
static int tty_ldiscs_seq_show(struct seq_file *m, void *v)
{
int i = *(loff_t *)v;
- struct tty_ldisc ld;
-
- if (tty_ldisc_get(i, &ld) < 0)
+ struct tty_ldisc *ld;
+
+ ld = tty_ldisc_try_get(i);
+ if (IS_ERR(ld))
return 0;
- seq_printf(m, "%-10s %2d\n", ld.ops->name ? ld.ops->name : "???", i);
- tty_ldisc_put(ld.ops);
+ seq_printf(m, "%-10s %2d\n", ld->ops->name ? ld->ops->name : "???", i);
+ tty_ldisc_put(ld);
return 0;
}
@@ -254,7 +265,7 @@ const struct file_operations tty_ldiscs_proc_fops = {
* @ld: line discipline
*
* Install an instance of a line discipline into a tty structure. The
- * ldisc must have a reference count above zero to ensure it remains/
+ * ldisc must have a reference count above zero to ensure it remains.
* The tty instance refcount starts at zero.
*
* Locking:
@@ -263,8 +274,7 @@ const struct file_operations tty_ldiscs_proc_fops = {
static void tty_ldisc_assign(struct tty_struct *tty, struct tty_ldisc *ld)
{
- ld->refcount = 0;
- tty->ldisc = *ld;
+ tty->ldisc = ld;
}
/**
@@ -286,7 +296,7 @@ static int tty_ldisc_try(struct tty_struct *tty)
int ret = 0;
spin_lock_irqsave(&tty_ldisc_lock, flags);
- ld = &tty->ldisc;
+ ld = tty->ldisc;
if (test_bit(TTY_LDISC, &tty->flags)) {
ld->refcount++;
ret = 1;
@@ -315,10 +325,9 @@ struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty)
{
/* wait_event is a macro */
wait_event(tty_ldisc_wait, tty_ldisc_try(tty));
- WARN_ON(tty->ldisc.refcount == 0);
- return &tty->ldisc;
+ WARN_ON(tty->ldisc->refcount == 0);
+ return tty->ldisc;
}
-
EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait);
/**
@@ -335,10 +344,9 @@ EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait);
struct tty_ldisc *tty_ldisc_ref(struct tty_struct *tty)
{
if (tty_ldisc_try(tty))
- return &tty->ldisc;
+ return tty->ldisc;
return NULL;
}
-
EXPORT_SYMBOL_GPL(tty_ldisc_ref);
/**
@@ -366,7 +374,6 @@ void tty_ldisc_deref(struct tty_ldisc *ld)
wake_up(&tty_ldisc_wait);
spin_unlock_irqrestore(&tty_ldisc_lock, flags);
}
-
EXPORT_SYMBOL_GPL(tty_ldisc_deref);
/**
@@ -389,6 +396,26 @@ void tty_ldisc_enable(struct tty_struct *tty)
}
/**
+ * tty_ldisc_flush - flush line discipline queue
+ * @tty: tty
+ *
+ * Flush the line discipline queue (if any) for this tty. If there
+ * is no line discipline active this is a no-op.
+ */
+
+void tty_ldisc_flush(struct tty_struct *tty)
+{
+ struct tty_ldisc *ld = tty_ldisc_ref(tty);
+ if (ld) {
+ if (ld->ops->flush_buffer)
+ ld->ops->flush_buffer(tty);
+ tty_ldisc_deref(ld);
+ }
+ tty_buffer_flush(tty);
+}
+EXPORT_SYMBOL_GPL(tty_ldisc_flush);
+
+/**
* tty_set_termios_ldisc - set ldisc field
* @tty: tty structure
* @num: line discipline number
@@ -407,6 +434,39 @@ static void tty_set_termios_ldisc(struct tty_struct *tty, int num)
mutex_unlock(&tty->termios_mutex);
}
+/**
+ * tty_ldisc_open - open a line discipline
+ * @tty: tty we are opening the ldisc on
+ * @ld: discipline to open
+ *
+ * A helper opening method. Also a convenient debugging and check
+ * point.
+ */
+
+static int tty_ldisc_open(struct tty_struct *tty, struct tty_ldisc *ld)
+{
+ WARN_ON(test_and_set_bit(TTY_LDISC_OPEN, &tty->flags));
+ if (ld->ops->open)
+ return ld->ops->open(tty);
+ return 0;
+}
+
+/**
+ * tty_ldisc_close - close a line discipline
+ * @tty: tty we are opening the ldisc on
+ * @ld: discipline to close
+ *
+ * A helper close method. Also a convenient debugging and check
+ * point.
+ */
+
+static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
+{
+ WARN_ON(!test_bit(TTY_LDISC_OPEN, &tty->flags));
+ clear_bit(TTY_LDISC_OPEN, &tty->flags);
+ if (ld->ops->close)
+ ld->ops->close(tty);
+}
/**
* tty_ldisc_restore - helper for tty ldisc change
@@ -420,66 +480,136 @@ static void tty_set_termios_ldisc(struct tty_struct *tty, int num)
static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
{
char buf[64];
- struct tty_ldisc new_ldisc;
+ struct tty_ldisc *new_ldisc;
+ int r;
/* There is an outstanding reference here so this is safe */
- tty_ldisc_get(old->ops->num, old);
+ old = tty_ldisc_get(old->ops->num);
+ WARN_ON(IS_ERR(old));
tty_ldisc_assign(tty, old);
tty_set_termios_ldisc(tty, old->ops->num);
- if (old->ops->open && (old->ops->open(tty) < 0)) {
- tty_ldisc_put(old->ops);
+ if (tty_ldisc_open(tty, old) < 0) {
+ tty_ldisc_put(old);
/* This driver is always present */
- if (tty_ldisc_get(N_TTY, &new_ldisc) < 0)
+ new_ldisc = tty_ldisc_get(N_TTY);
+ if (IS_ERR(new_ldisc))
panic("n_tty: get");
- tty_ldisc_assign(tty, &new_ldisc);
+ tty_ldisc_assign(tty, new_ldisc);
tty_set_termios_ldisc(tty, N_TTY);
- if (new_ldisc.ops->open) {
- int r = new_ldisc.ops->open(tty);
- if (r < 0)
- panic("Couldn't open N_TTY ldisc for "
- "%s --- error %d.",
- tty_name(tty, buf), r);
- }
+ r = tty_ldisc_open(tty, new_ldisc);
+ if (r < 0)
+ panic("Couldn't open N_TTY ldisc for "
+ "%s --- error %d.",
+ tty_name(tty, buf), r);
}
}
/**
+ * tty_ldisc_halt - shut down the line discipline
+ * @tty: tty device
+ *
+ * Shut down the line discipline and work queue for this tty device.
+ * The TTY_LDISC flag being cleared ensures no further references can
+ * be obtained while the delayed work queue halt ensures that no more
+ * data is fed to the ldisc.
+ *
+ * In order to wait for any existing references to complete see
+ * tty_ldisc_wait_idle.
+ */
+
+static int tty_ldisc_halt(struct tty_struct *tty)
+{
+ clear_bit(TTY_LDISC, &tty->flags);
+ return cancel_delayed_work(&tty->buf.work);
+}
+
+/**
+ * tty_ldisc_wait_idle - wait for the ldisc to become idle
+ * @tty: tty to wait for
+ *
+ * Wait for the line discipline to become idle. The discipline must
+ * have been halted for this to guarantee it remains idle.
+ *
+ * tty_ldisc_lock protects the ref counts currently.
+ */
+
+static int tty_ldisc_wait_idle(struct tty_struct *tty)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+ while (tty->ldisc->refcount) {
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+ if (wait_event_timeout(tty_ldisc_wait,
+ tty->ldisc->refcount == 0, 5 * HZ) == 0)
+ return -EBUSY;
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+ }
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+ return 0;
+}
+
+/**
* tty_set_ldisc - set line discipline
* @tty: the terminal to set
* @ldisc: the line discipline
*
* Set the discipline of a tty line. Must be called from a process
- * context.
+ * context. The ldisc change logic has to protect itself against any
+ * overlapping ldisc change (including on the other end of pty pairs),
+ * the close of one side of a tty/pty pair, and eventually hangup.
*
- * Locking: takes tty_ldisc_lock.
- * called functions take termios_mutex
+ * Locking: takes tty_ldisc_lock, termios_mutex
*/
int tty_set_ldisc(struct tty_struct *tty, int ldisc)
{
int retval;
- struct tty_ldisc o_ldisc, new_ldisc;
- int work;
- unsigned long flags;
+ struct tty_ldisc *o_ldisc, *new_ldisc;
+ int work, o_work = 0;
struct tty_struct *o_tty;
-restart:
- /* This is a bit ugly for now but means we can break the 'ldisc
- is part of the tty struct' assumption later */
- retval = tty_ldisc_get(ldisc, &new_ldisc);
- if (retval)
- return retval;
+ new_ldisc = tty_ldisc_get(ldisc);
+ if (IS_ERR(new_ldisc))
+ return PTR_ERR(new_ldisc);
+
+ /*
+ * We need to look at the tty locking here for pty/tty pairs
+ * when both sides try to change in parallel.
+ */
+
+ o_tty = tty->link; /* o_tty is the pty side or NULL */
+
+
+ /*
+ * Check the no-op case
+ */
+
+ if (tty->ldisc->ops->num == ldisc) {
+ tty_ldisc_put(new_ldisc);
+ return 0;
+ }
/*
* Problem: What do we do if this blocks ?
+ * We could deadlock here
*/
tty_wait_until_sent(tty, 0);
- if (tty->ldisc.ops->num == ldisc) {
- tty_ldisc_put(new_ldisc.ops);
- return 0;
+ mutex_lock(&tty->ldisc_mutex);
+
+ /*
+ * We could be midstream of another ldisc change which has
+ * dropped the lock during processing. If so we need to wait.
+ */
+
+ while (test_bit(TTY_LDISC_CHANGING, &tty->flags)) {
+ mutex_unlock(&tty->ldisc_mutex);
+ wait_event(tty_ldisc_wait,
+ test_bit(TTY_LDISC_CHANGING, &tty->flags) == 0);
+ mutex_lock(&tty->ldisc_mutex);
}
+ set_bit(TTY_LDISC_CHANGING, &tty->flags);
/*
* No more input please, we are switching. The new ldisc
@@ -489,8 +619,6 @@ restart:
tty->receive_room = 0;
o_ldisc = tty->ldisc;
- o_tty = tty->link;
-
/*
* Make sure we don't change while someone holds a
* reference to the line discipline. The TTY_LDISC bit
@@ -501,108 +629,183 @@ restart:
* with a userspace app continually trying to use the tty in
* parallel to the change and re-referencing the tty.
*/
- clear_bit(TTY_LDISC, &tty->flags);
- if (o_tty)
- clear_bit(TTY_LDISC, &o_tty->flags);
- spin_lock_irqsave(&tty_ldisc_lock, flags);
- if (tty->ldisc.refcount || (o_tty && o_tty->ldisc.refcount)) {
- if (tty->ldisc.refcount) {
- /* Free the new ldisc we grabbed. Must drop the lock
- first. */
- spin_unlock_irqrestore(&tty_ldisc_lock, flags);
- tty_ldisc_put(o_ldisc.ops);
- /*
- * There are several reasons we may be busy, including
- * random momentary I/O traffic. We must therefore
- * retry. We could distinguish between blocking ops
- * and retries if we made tty_ldisc_wait() smarter.
- * That is up for discussion.
- */
- if (wait_event_interruptible(tty_ldisc_wait, tty->ldisc.refcount == 0) < 0)
- return -ERESTARTSYS;
- goto restart;
- }
- if (o_tty && o_tty->ldisc.refcount) {
- spin_unlock_irqrestore(&tty_ldisc_lock, flags);
- tty_ldisc_put(o_tty->ldisc.ops);
- if (wait_event_interruptible(tty_ldisc_wait, o_tty->ldisc.refcount == 0) < 0)
- return -ERESTARTSYS;
- goto restart;
- }
- }
- /*
- * If the TTY_LDISC bit is set, then we are racing against
- * another ldisc change
- */
- if (test_bit(TTY_LDISC_CHANGING, &tty->flags)) {
- struct tty_ldisc *ld;
- spin_unlock_irqrestore(&tty_ldisc_lock, flags);
- tty_ldisc_put(new_ldisc.ops);
- ld = tty_ldisc_ref_wait(tty);
- tty_ldisc_deref(ld);
- goto restart;
- }
- /*
- * This flag is used to avoid two parallel ldisc changes. Once
- * open and close are fine grained locked this may work better
- * as a mutex shared with the open/close/hup paths
- */
- set_bit(TTY_LDISC_CHANGING, &tty->flags);
+ work = tty_ldisc_halt(tty);
if (o_tty)
- set_bit(TTY_LDISC_CHANGING, &o_tty->flags);
- spin_unlock_irqrestore(&tty_ldisc_lock, flags);
-
- /*
- * From this point on we know nobody has an ldisc
- * usage reference, nor can they obtain one until
- * we say so later on.
- */
+ o_work = tty_ldisc_halt(o_tty);
- work = cancel_delayed_work(&tty->buf.work);
/*
- * Wait for ->hangup_work and ->buf.work handlers to terminate
- * MUST NOT hold locks here.
+ * Wait for ->hangup_work and ->buf.work handlers to terminate.
+ * We must drop the mutex here in case a hangup is also in process.
*/
+
+ mutex_unlock(&tty->ldisc_mutex);
+
flush_scheduled_work();
+
+ /* Let any existing reference holders finish */
+ retval = tty_ldisc_wait_idle(tty);
+ if (retval < 0) {
+ clear_bit(TTY_LDISC_CHANGING, &tty->flags);
+ tty_ldisc_put(new_ldisc);
+ return retval;
+ }
+
+ mutex_lock(&tty->ldisc_mutex);
+ if (test_bit(TTY_HUPPED, &tty->flags)) {
+ /* We were raced by the hangup method. It will have stomped
+ the ldisc data and closed the ldisc down */
+ clear_bit(TTY_LDISC_CHANGING, &tty->flags);
+ mutex_unlock(&tty->ldisc_mutex);
+ tty_ldisc_put(new_ldisc);
+ return -EIO;
+ }
+
/* Shutdown the current discipline. */
- if (o_ldisc.ops->close)
- (o_ldisc.ops->close)(tty);
+ tty_ldisc_close(tty, o_ldisc);
/* Now set up the new line discipline. */
- tty_ldisc_assign(tty, &new_ldisc);
+ tty_ldisc_assign(tty, new_ldisc);
tty_set_termios_ldisc(tty, ldisc);
- if (new_ldisc.ops->open)
- retval = (new_ldisc.ops->open)(tty);
+
+ retval = tty_ldisc_open(tty, new_ldisc);
if (retval < 0) {
- tty_ldisc_put(new_ldisc.ops);
- tty_ldisc_restore(tty, &o_ldisc);
+ /* Back to the old one or N_TTY if we can't */
+ tty_ldisc_put(new_ldisc);
+ tty_ldisc_restore(tty, o_ldisc);
}
+
/* At this point we hold a reference to the new ldisc and a
a reference to the old ldisc. If we ended up flipping back
to the existing ldisc we have two references to it */
- if (tty->ldisc.ops->num != o_ldisc.ops->num && tty->ops->set_ldisc)
+ if (tty->ldisc->ops->num != o_ldisc->ops->num && tty->ops->set_ldisc)
tty->ops->set_ldisc(tty);
- tty_ldisc_put(o_ldisc.ops);
+ tty_ldisc_put(o_ldisc);
/*
- * Allow ldisc referencing to occur as soon as the driver
- * ldisc callback completes.
+ * Allow ldisc referencing to occur again
*/
tty_ldisc_enable(tty);
if (o_tty)
tty_ldisc_enable(o_tty);
- /* Restart it in case no characters kick it off. Safe if
+ /* Restart the work queue in case no characters kick it off. Safe if
already running */
if (work)
schedule_delayed_work(&tty->buf.work, 1);
+ if (o_work)
+ schedule_delayed_work(&o_tty->buf.work, 1);
+ mutex_unlock(&tty->ldisc_mutex);
return retval;
}
+/**
+ * tty_reset_termios - reset terminal state
+ * @tty: tty to reset
+ *
+ * Restore a terminal to the driver default state.
+ */
+
+static void tty_reset_termios(struct tty_struct *tty)
+{
+ mutex_lock(&tty->termios_mutex);
+ *tty->termios = tty->driver->init_termios;
+ tty->termios->c_ispeed = tty_termios_input_baud_rate(tty->termios);
+ tty->termios->c_ospeed = tty_termios_baud_rate(tty->termios);
+ mutex_unlock(&tty->termios_mutex);
+}
+
+
+/**
+ * tty_ldisc_reinit - reinitialise the tty ldisc
+ * @tty: tty to reinit
+ *
+ * Switch the tty back to N_TTY line discipline and leave the
+ * ldisc state closed
+ */
+
+static void tty_ldisc_reinit(struct tty_struct *tty)
+{
+ struct tty_ldisc *ld;
+
+ tty_ldisc_close(tty, tty->ldisc);
+ tty_ldisc_put(tty->ldisc);
+ tty->ldisc = NULL;
+ /*
+ * Switch the line discipline back
+ */
+ ld = tty_ldisc_get(N_TTY);
+ BUG_ON(IS_ERR(ld));
+ tty_ldisc_assign(tty, ld);
+ tty_set_termios_ldisc(tty, N_TTY);
+}
+
+/**
+ * tty_ldisc_hangup - hangup ldisc reset
+ * @tty: tty being hung up
+ *
+ * Some tty devices reset their termios when they receive a hangup
+ * event. In that situation we must also switch back to N_TTY properly
+ * before we reset the termios data.
+ *
+ * Locking: We can take the ldisc mutex as the rest of the code is
+ * careful to allow for this.
+ *
+ * In the pty pair case this occurs in the close() path of the
+ * tty itself so we must be careful about locking rules.
+ */
+
+void tty_ldisc_hangup(struct tty_struct *tty)
+{
+ struct tty_ldisc *ld;
+
+ /*
+ * FIXME! What are the locking issues here? This may me overdoing
+ * things... This question is especially important now that we've
+ * removed the irqlock.
+ */
+ ld = tty_ldisc_ref(tty);
+ if (ld != NULL) {
+ /* We may have no line discipline at this point */
+ if (ld->ops->flush_buffer)
+ ld->ops->flush_buffer(tty);
+ tty_driver_flush_buffer(tty);
+ if ((test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) &&
+ ld->ops->write_wakeup)
+ ld->ops->write_wakeup(tty);
+ if (ld->ops->hangup)
+ ld->ops->hangup(tty);
+ tty_ldisc_deref(ld);
+ }
+ /*
+ * FIXME: Once we trust the LDISC code better we can wait here for
+ * ldisc completion and fix the driver call race
+ */
+ wake_up_interruptible_poll(&tty->write_wait, POLLOUT);
+ wake_up_interruptible_poll(&tty->read_wait, POLLIN);
+ /*
+ * Shutdown the current line discipline, and reset it to
+ * N_TTY.
+ */
+ if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) {
+ /* Avoid racing set_ldisc */
+ mutex_lock(&tty->ldisc_mutex);
+ /* Switch back to N_TTY */
+ tty_ldisc_halt(tty);
+ tty_ldisc_wait_idle(tty);
+ tty_ldisc_reinit(tty);
+ /* At this point we have a closed ldisc and we want to
+ reopen it. We could defer this to the next open but
+ it means auditing a lot of other paths so this is a FIXME */
+ WARN_ON(tty_ldisc_open(tty, tty->ldisc));
+ tty_ldisc_enable(tty);
+ mutex_unlock(&tty->ldisc_mutex);
+ tty_reset_termios(tty);
+ }
+}
/**
* tty_ldisc_setup - open line discipline
@@ -610,24 +813,23 @@ restart:
* @o_tty: pair tty for pty/tty pairs
*
* Called during the initial open of a tty/pty pair in order to set up the
- * line discplines and bind them to the tty.
+ * line disciplines and bind them to the tty. This has no locking issues
+ * as the device isn't yet active.
*/
int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty)
{
- struct tty_ldisc *ld = &tty->ldisc;
+ struct tty_ldisc *ld = tty->ldisc;
int retval;
- if (ld->ops->open) {
- retval = (ld->ops->open)(tty);
- if (retval)
- return retval;
- }
- if (o_tty && o_tty->ldisc.ops->open) {
- retval = (o_tty->ldisc.ops->open)(o_tty);
+ retval = tty_ldisc_open(tty, ld);
+ if (retval)
+ return retval;
+
+ if (o_tty) {
+ retval = tty_ldisc_open(o_tty, o_tty->ldisc);
if (retval) {
- if (ld->ops->close)
- (ld->ops->close)(tty);
+ tty_ldisc_close(tty, ld);
return retval;
}
tty_ldisc_enable(o_tty);
@@ -635,32 +837,25 @@ int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty)
tty_ldisc_enable(tty);
return 0;
}
-
/**
* tty_ldisc_release - release line discipline
* @tty: tty being shut down
* @o_tty: pair tty for pty/tty pairs
*
- * Called during the final close of a tty/pty pair in order to shut down the
- * line discpline layer.
+ * Called during the final close of a tty/pty pair in order to shut down
+ * the line discpline layer. On exit the ldisc assigned is N_TTY and the
+ * ldisc has not been opened.
*/
void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
{
- unsigned long flags;
- struct tty_ldisc ld;
/*
* Prevent flush_to_ldisc() from rescheduling the work for later. Then
* kill any delayed work. As this is the final close it does not
* race with the set_ldisc code path.
*/
- clear_bit(TTY_LDISC, &tty->flags);
- cancel_delayed_work(&tty->buf.work);
-
- /*
- * Wait for ->hangup_work and ->buf.work handlers to terminate
- */
+ tty_ldisc_halt(tty);
flush_scheduled_work();
/*
@@ -668,38 +863,19 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
* side waiters as the file is closing so user count on the file
* side is zero.
*/
- spin_lock_irqsave(&tty_ldisc_lock, flags);
- while (tty->ldisc.refcount) {
- spin_unlock_irqrestore(&tty_ldisc_lock, flags);
- wait_event(tty_ldisc_wait, tty->ldisc.refcount == 0);
- spin_lock_irqsave(&tty_ldisc_lock, flags);
- }
- spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+
+ tty_ldisc_wait_idle(tty);
+
/*
* Shutdown the current line discipline, and reset it to N_TTY.
*
* FIXME: this MUST get fixed for the new reflocking
*/
- if (tty->ldisc.ops->close)
- (tty->ldisc.ops->close)(tty);
- tty_ldisc_put(tty->ldisc.ops);
- /*
- * Switch the line discipline back
- */
- WARN_ON(tty_ldisc_get(N_TTY, &ld));
- tty_ldisc_assign(tty, &ld);
- tty_set_termios_ldisc(tty, N_TTY);
- if (o_tty) {
- /* FIXME: could o_tty be in setldisc here ? */
- clear_bit(TTY_LDISC, &o_tty->flags);
- if (o_tty->ldisc.ops->close)
- (o_tty->ldisc.ops->close)(o_tty);
- tty_ldisc_put(o_tty->ldisc.ops);
- WARN_ON(tty_ldisc_get(N_TTY, &ld));
- tty_ldisc_assign(o_tty, &ld);
- tty_set_termios_ldisc(o_tty, N_TTY);
- }
+ tty_ldisc_reinit(tty);
+ /* This will need doing differently if we need to lock */
+ if (o_tty)
+ tty_ldisc_release(o_tty, NULL);
}
/**
@@ -712,10 +888,10 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
void tty_ldisc_init(struct tty_struct *tty)
{
- struct tty_ldisc ld;
- if (tty_ldisc_get(N_TTY, &ld) < 0)
+ struct tty_ldisc *ld = tty_ldisc_get(N_TTY);
+ if (IS_ERR(ld))
panic("n_tty: init_tty");
- tty_ldisc_assign(tty, &ld);
+ tty_ldisc_assign(tty, ld);
}
void tty_ldisc_begin(void)
diff --git a/drivers/char/tty_port.c b/drivers/char/tty_port.c
index 9b8004c7268..62dadfc95e3 100644
--- a/drivers/char/tty_port.c
+++ b/drivers/char/tty_port.c
@@ -137,7 +137,7 @@ int tty_port_carrier_raised(struct tty_port *port)
EXPORT_SYMBOL(tty_port_carrier_raised);
/**
- * tty_port_raise_dtr_rts - Riase DTR/RTS
+ * tty_port_raise_dtr_rts - Raise DTR/RTS
* @port: tty port
*
* Wrapper for the DTR/RTS raise logic. For the moment this is used
@@ -147,12 +147,28 @@ EXPORT_SYMBOL(tty_port_carrier_raised);
void tty_port_raise_dtr_rts(struct tty_port *port)
{
- if (port->ops->raise_dtr_rts)
- port->ops->raise_dtr_rts(port);
+ if (port->ops->dtr_rts)
+ port->ops->dtr_rts(port, 1);
}
EXPORT_SYMBOL(tty_port_raise_dtr_rts);
/**
+ * tty_port_lower_dtr_rts - Lower DTR/RTS
+ * @port: tty port
+ *
+ * Wrapper for the DTR/RTS raise logic. For the moment this is used
+ * to hide some internal details. This will eventually become entirely
+ * internal to the tty port.
+ */
+
+void tty_port_lower_dtr_rts(struct tty_port *port)
+{
+ if (port->ops->dtr_rts)
+ port->ops->dtr_rts(port, 0);
+}
+EXPORT_SYMBOL(tty_port_lower_dtr_rts);
+
+/**
* tty_port_block_til_ready - Waiting logic for tty open
* @port: the tty port being opened
* @tty: the tty device being bound
@@ -167,7 +183,7 @@ EXPORT_SYMBOL(tty_port_raise_dtr_rts);
* - port flags and counts
*
* The passed tty_port must implement the carrier_raised method if it can
- * do carrier detect and the raise_dtr_rts method if it supports software
+ * do carrier detect and the dtr_rts method if it supports software
* management of these lines. Note that the dtr/rts raise is done each
* iteration as a hangup may have previously dropped them while we wait.
*/
@@ -182,7 +198,8 @@ int tty_port_block_til_ready(struct tty_port *port,
/* block if port is in the process of being closed */
if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) {
- interruptible_sleep_on(&port->close_wait);
+ wait_event_interruptible(port->close_wait,
+ !(port->flags & ASYNC_CLOSING));
if (port->flags & ASYNC_HUP_NOTIFY)
return -EAGAIN;
else
@@ -205,7 +222,6 @@ int tty_port_block_til_ready(struct tty_port *port,
before the next open may complete */
retval = 0;
- add_wait_queue(&port->open_wait, &wait);
/* The port lock protects the port counts */
spin_lock_irqsave(&port->lock, flags);
@@ -219,7 +235,7 @@ int tty_port_block_til_ready(struct tty_port *port,
if (tty->termios->c_cflag & CBAUD)
tty_port_raise_dtr_rts(port);
- set_current_state(TASK_INTERRUPTIBLE);
+ prepare_to_wait(&port->open_wait, &wait, TASK_INTERRUPTIBLE);
/* Check for a hangup or uninitialised port. Return accordingly */
if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)) {
if (port->flags & ASYNC_HUP_NOTIFY)
@@ -240,8 +256,7 @@ int tty_port_block_til_ready(struct tty_port *port,
}
schedule();
}
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&port->open_wait, &wait);
+ finish_wait(&port->open_wait, &wait);
/* Update counts. A parallel hangup will have set count to zero and
we must not mess that up further */
@@ -292,6 +307,17 @@ int tty_port_close_start(struct tty_port *port, struct tty_struct *tty, struct f
if (port->flags & ASYNC_INITIALIZED &&
port->closing_wait != ASYNC_CLOSING_WAIT_NONE)
tty_wait_until_sent(tty, port->closing_wait);
+ if (port->drain_delay) {
+ unsigned int bps = tty_get_baud_rate(tty);
+ long timeout;
+
+ if (bps > 1200)
+ timeout = max_t(long, (HZ * 10 * port->drain_delay) / bps,
+ HZ / 10);
+ else
+ timeout = 2 * HZ;
+ schedule_timeout_interruptible(timeout);
+ }
return 1;
}
EXPORT_SYMBOL(tty_port_close_start);
@@ -302,6 +328,9 @@ void tty_port_close_end(struct tty_port *port, struct tty_struct *tty)
tty_ldisc_flush(tty);
+ if (tty->termios->c_cflag & HUPCL)
+ tty_port_lower_dtr_rts(port);
+
spin_lock_irqsave(&port->lock, flags);
tty->closing = 0;
diff --git a/drivers/char/viotape.c b/drivers/char/viotape.c
index ffc9254f7e0..042c8149a6d 100644
--- a/drivers/char/viotape.c
+++ b/drivers/char/viotape.c
@@ -867,7 +867,7 @@ static int viotape_probe(struct vio_dev *vdev, const struct vio_device_id *id)
int j;
struct device_node *node = vdev->dev.archdata.of_node;
- if (i > VIOTAPE_MAX_TAPE)
+ if (i >= VIOTAPE_MAX_TAPE)
return -ENODEV;
if (!node)
return -ENODEV;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index ff6f5a4b58f..c74dacfa679 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -188,6 +188,9 @@ static void hvc_handle_input(struct virtqueue *vq)
* Finally we put our input buffer in the input queue, ready to receive. */
static int __devinit virtcons_probe(struct virtio_device *dev)
{
+ vq_callback_t *callbacks[] = { hvc_handle_input, NULL};
+ const char *names[] = { "input", "output" };
+ struct virtqueue *vqs[2];
int err;
vdev = dev;
@@ -199,20 +202,15 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
goto fail;
}
- /* Find the input queue. */
+ /* Find the queues. */
/* FIXME: This is why we want to wean off hvc: we do nothing
* when input comes in. */
- in_vq = vdev->config->find_vq(vdev, 0, hvc_handle_input);
- if (IS_ERR(in_vq)) {
- err = PTR_ERR(in_vq);
+ err = vdev->config->find_vqs(vdev, 2, vqs, callbacks, names);
+ if (err)
goto free;
- }
- out_vq = vdev->config->find_vq(vdev, 1, NULL);
- if (IS_ERR(out_vq)) {
- err = PTR_ERR(out_vq);
- goto free_in_vq;
- }
+ in_vq = vqs[0];
+ out_vq = vqs[1];
/* Start using the new console output. */
virtio_cons.get_chars = get_chars;
@@ -233,17 +231,15 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
hvc = hvc_alloc(0, 0, &virtio_cons, PAGE_SIZE);
if (IS_ERR(hvc)) {
err = PTR_ERR(hvc);
- goto free_out_vq;
+ goto free_vqs;
}
/* Register the input buffer the first time. */
add_inbuf();
return 0;
-free_out_vq:
- vdev->config->del_vq(out_vq);
-free_in_vq:
- vdev->config->del_vq(in_vq);
+free_vqs:
+ vdev->config->del_vqs(vdev);
free:
kfree(inbuf);
fail:
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 08151d4de48..d9113b4c76e 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -95,7 +95,6 @@
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
-#include <linux/bootmem.h>
#include <linux/pm.h>
#include <linux/font.h>
#include <linux/bitops.h>
@@ -172,8 +171,9 @@ int do_poke_blanked_console;
int console_blanked;
static int vesa_blank_mode; /* 0:none 1:suspendV 2:suspendH 3:powerdown */
-static int blankinterval = 10*60*HZ;
static int vesa_off_interval;
+static int blankinterval = 10*60;
+core_param(consoleblank, blankinterval, int, 0444);
static DECLARE_WORK(console_work, console_callback);
@@ -1486,7 +1486,7 @@ static void setterm_command(struct vc_data *vc)
update_attr(vc);
break;
case 9: /* set blanking interval */
- blankinterval = ((vc->vc_par[1] < 60) ? vc->vc_par[1] : 60) * 60 * HZ;
+ blankinterval = ((vc->vc_par[1] < 60) ? vc->vc_par[1] : 60) * 60;
poke_blanked_console();
break;
case 10: /* set bell frequency in Hz */
@@ -2872,17 +2872,14 @@ static int __init con_init(void)
if (blankinterval) {
blank_state = blank_normal_wait;
- mod_timer(&console_timer, jiffies + blankinterval);
+ mod_timer(&console_timer, jiffies + (blankinterval * HZ));
}
- /*
- * kmalloc is not running yet - we use the bootmem allocator.
- */
for (currcons = 0; currcons < MIN_NR_CONSOLES; currcons++) {
- vc_cons[currcons].d = vc = alloc_bootmem(sizeof(struct vc_data));
+ vc_cons[currcons].d = vc = kzalloc(sizeof(struct vc_data), GFP_NOWAIT);
INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK);
visual_init(vc, currcons, 1);
- vc->vc_screenbuf = (unsigned short *)alloc_bootmem(vc->vc_screenbuf_size);
+ vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_NOWAIT);
vc->vc_kmalloced = 0;
vc_init(vc, vc->vc_rows, vc->vc_cols,
currcons || !vc->vc_sw->con_save_screen);
@@ -3681,7 +3678,7 @@ void do_unblank_screen(int leaving_gfx)
return; /* but leave console_blanked != 0 */
if (blankinterval) {
- mod_timer(&console_timer, jiffies + blankinterval);
+ mod_timer(&console_timer, jiffies + (blankinterval * HZ));
blank_state = blank_normal_wait;
}
@@ -3715,7 +3712,7 @@ void unblank_screen(void)
static void blank_screen_t(unsigned long dummy)
{
if (unlikely(!keventd_up())) {
- mod_timer(&console_timer, jiffies + blankinterval);
+ mod_timer(&console_timer, jiffies + (blankinterval * HZ));
return;
}
blank_timer_expired = 1;
@@ -3745,7 +3742,7 @@ void poke_blanked_console(void)
if (console_blanked)
unblank_screen();
else if (blankinterval) {
- mod_timer(&console_timer, jiffies + blankinterval);
+ mod_timer(&console_timer, jiffies + (blankinterval * HZ));
blank_state = blank_normal_wait;
}
}
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 1efb2879a94..eef216f7f61 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -3,3 +3,5 @@ obj-$(CONFIG_X86_CYCLONE_TIMER) += cyclone.o
obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o
obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o
obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o
+obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o
+obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index 40bd8c61c7d..72a633a6ec9 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -18,6 +18,7 @@
#include <linux/acpi_pmtmr.h>
#include <linux/clocksource.h>
+#include <linux/timex.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/pci.h>
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 1c92c39a53a..cf56a2af5fe 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -18,7 +18,6 @@
*/
#include <linux/init.h>
-#include <linux/bootmem.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
@@ -29,7 +28,7 @@
#include <linux/err.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
-#include <linux/sh_cmt.h>
+#include <linux/sh_timer.h>
struct sh_cmt_priv {
void __iomem *mapbase;
@@ -47,6 +46,7 @@ struct sh_cmt_priv {
unsigned long rate;
spinlock_t lock;
struct clock_event_device ced;
+ struct clocksource cs;
unsigned long total_cycles;
};
@@ -59,7 +59,7 @@ static DEFINE_SPINLOCK(sh_cmt_lock);
static inline unsigned long sh_cmt_read(struct sh_cmt_priv *p, int reg_nr)
{
- struct sh_cmt_config *cfg = p->pdev->dev.platform_data;
+ struct sh_timer_config *cfg = p->pdev->dev.platform_data;
void __iomem *base = p->mapbase;
unsigned long offs;
@@ -83,7 +83,7 @@ static inline unsigned long sh_cmt_read(struct sh_cmt_priv *p, int reg_nr)
static inline void sh_cmt_write(struct sh_cmt_priv *p, int reg_nr,
unsigned long value)
{
- struct sh_cmt_config *cfg = p->pdev->dev.platform_data;
+ struct sh_timer_config *cfg = p->pdev->dev.platform_data;
void __iomem *base = p->mapbase;
unsigned long offs;
@@ -110,23 +110,28 @@ static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p,
int *has_wrapped)
{
unsigned long v1, v2, v3;
+ int o1, o2;
+
+ o1 = sh_cmt_read(p, CMCSR) & p->overflow_bit;
/* Make sure the timer value is stable. Stolen from acpi_pm.c */
do {
+ o2 = o1;
v1 = sh_cmt_read(p, CMCNT);
v2 = sh_cmt_read(p, CMCNT);
v3 = sh_cmt_read(p, CMCNT);
- } while (unlikely((v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1)
- || (v3 > v1 && v3 < v2)));
+ o1 = sh_cmt_read(p, CMCSR) & p->overflow_bit;
+ } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
+ || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
- *has_wrapped = sh_cmt_read(p, CMCSR) & p->overflow_bit;
+ *has_wrapped = o1;
return v2;
}
static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
{
- struct sh_cmt_config *cfg = p->pdev->dev.platform_data;
+ struct sh_timer_config *cfg = p->pdev->dev.platform_data;
unsigned long flags, value;
/* start stop register shared by multiple timer channels */
@@ -144,7 +149,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
{
- struct sh_cmt_config *cfg = p->pdev->dev.platform_data;
+ struct sh_timer_config *cfg = p->pdev->dev.platform_data;
int ret;
/* enable clock */
@@ -153,16 +158,18 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
pr_err("sh_cmt: cannot enable clock \"%s\"\n", cfg->clk);
return ret;
}
- *rate = clk_get_rate(p->clk) / 8;
/* make sure channel is disabled */
sh_cmt_start_stop_ch(p, 0);
/* configure channel, periodic mode and maximum timeout */
- if (p->width == 16)
- sh_cmt_write(p, CMCSR, 0);
- else
+ if (p->width == 16) {
+ *rate = clk_get_rate(p->clk) / 512;
+ sh_cmt_write(p, CMCSR, 0x43);
+ } else {
+ *rate = clk_get_rate(p->clk) / 8;
sh_cmt_write(p, CMCSR, 0x01a4);
+ }
sh_cmt_write(p, CMCOR, 0xffffffff);
sh_cmt_write(p, CMCNT, 0);
@@ -376,6 +383,68 @@ static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
spin_unlock_irqrestore(&p->lock, flags);
}
+static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs)
+{
+ return container_of(cs, struct sh_cmt_priv, cs);
+}
+
+static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
+{
+ struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
+ unsigned long flags, raw;
+ unsigned long value;
+ int has_wrapped;
+
+ spin_lock_irqsave(&p->lock, flags);
+ value = p->total_cycles;
+ raw = sh_cmt_get_counter(p, &has_wrapped);
+
+ if (unlikely(has_wrapped))
+ raw += p->match_value;
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ return value + raw;
+}
+
+static int sh_cmt_clocksource_enable(struct clocksource *cs)
+{
+ struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
+ int ret;
+
+ p->total_cycles = 0;
+
+ ret = sh_cmt_start(p, FLAG_CLOCKSOURCE);
+ if (ret)
+ return ret;
+
+ /* TODO: calculate good shift from rate and counter bit width */
+ cs->shift = 0;
+ cs->mult = clocksource_hz2mult(p->rate, cs->shift);
+ return 0;
+}
+
+static void sh_cmt_clocksource_disable(struct clocksource *cs)
+{
+ sh_cmt_stop(cs_to_sh_cmt(cs), FLAG_CLOCKSOURCE);
+}
+
+static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
+ char *name, unsigned long rating)
+{
+ struct clocksource *cs = &p->cs;
+
+ cs->name = name;
+ cs->rating = rating;
+ cs->read = sh_cmt_clocksource_read;
+ cs->enable = sh_cmt_clocksource_enable;
+ cs->disable = sh_cmt_clocksource_disable;
+ cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
+ cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ pr_info("sh_cmt: %s used as clock source\n", cs->name);
+ clocksource_register(cs);
+ return 0;
+}
+
static struct sh_cmt_priv *ced_to_sh_cmt(struct clock_event_device *ced)
{
return container_of(ced, struct sh_cmt_priv, ced);
@@ -468,9 +537,9 @@ static void sh_cmt_register_clockevent(struct sh_cmt_priv *p,
clockevents_register_device(ced);
}
-int sh_cmt_register(struct sh_cmt_priv *p, char *name,
- unsigned long clockevent_rating,
- unsigned long clocksource_rating)
+static int sh_cmt_register(struct sh_cmt_priv *p, char *name,
+ unsigned long clockevent_rating,
+ unsigned long clocksource_rating)
{
if (p->width == (sizeof(p->max_match_value) * 8))
p->max_match_value = ~0;
@@ -483,12 +552,15 @@ int sh_cmt_register(struct sh_cmt_priv *p, char *name,
if (clockevent_rating)
sh_cmt_register_clockevent(p, name, clockevent_rating);
+ if (clocksource_rating)
+ sh_cmt_register_clocksource(p, name, clocksource_rating);
+
return 0;
}
static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
{
- struct sh_cmt_config *cfg = pdev->dev.platform_data;
+ struct sh_timer_config *cfg = pdev->dev.platform_data;
struct resource *res;
int irq, ret;
ret = -ENXIO;
@@ -545,7 +617,7 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
if (resource_size(res) == 6) {
p->width = 16;
p->overflow_bit = 0x80;
- p->clear_bits = ~0xc0;
+ p->clear_bits = ~0x80;
} else {
p->width = 32;
p->overflow_bit = 0x8000;
@@ -566,8 +638,14 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
static int __devinit sh_cmt_probe(struct platform_device *pdev)
{
struct sh_cmt_priv *p = platform_get_drvdata(pdev);
+ struct sh_timer_config *cfg = pdev->dev.platform_data;
int ret;
+ if (p) {
+ pr_info("sh_cmt: %s kept as earlytimer\n", cfg->name);
+ return 0;
+ }
+
p = kmalloc(sizeof(*p), GFP_KERNEL);
if (p == NULL) {
dev_err(&pdev->dev, "failed to allocate driver data\n");
@@ -577,7 +655,6 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev)
ret = sh_cmt_setup(p, pdev);
if (ret) {
kfree(p);
-
platform_set_drvdata(pdev, NULL);
}
return ret;
@@ -606,6 +683,7 @@ static void __exit sh_cmt_exit(void)
platform_driver_unregister(&sh_cmt_device_driver);
}
+early_platform_init("earlytimer", &sh_cmt_device_driver);
module_init(sh_cmt_init);
module_exit(sh_cmt_exit);
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
new file mode 100644
index 00000000000..d1ae75454d1
--- /dev/null
+++ b/drivers/clocksource/sh_mtu2.c
@@ -0,0 +1,357 @@
+/*
+ * SuperH Timer Support - MTU2
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/err.h>
+#include <linux/clockchips.h>
+#include <linux/sh_timer.h>
+
+struct sh_mtu2_priv {
+ void __iomem *mapbase;
+ struct clk *clk;
+ struct irqaction irqaction;
+ struct platform_device *pdev;
+ unsigned long rate;
+ unsigned long periodic;
+ struct clock_event_device ced;
+};
+
+static DEFINE_SPINLOCK(sh_mtu2_lock);
+
+#define TSTR -1 /* shared register */
+#define TCR 0 /* channel register */
+#define TMDR 1 /* channel register */
+#define TIOR 2 /* channel register */
+#define TIER 3 /* channel register */
+#define TSR 4 /* channel register */
+#define TCNT 5 /* channel register */
+#define TGR 6 /* channel register */
+
+static unsigned long mtu2_reg_offs[] = {
+ [TCR] = 0,
+ [TMDR] = 1,
+ [TIOR] = 2,
+ [TIER] = 4,
+ [TSR] = 5,
+ [TCNT] = 6,
+ [TGR] = 8,
+};
+
+static inline unsigned long sh_mtu2_read(struct sh_mtu2_priv *p, int reg_nr)
+{
+ struct sh_timer_config *cfg = p->pdev->dev.platform_data;
+ void __iomem *base = p->mapbase;
+ unsigned long offs;
+
+ if (reg_nr == TSTR)
+ return ioread8(base + cfg->channel_offset);
+
+ offs = mtu2_reg_offs[reg_nr];
+
+ if ((reg_nr == TCNT) || (reg_nr == TGR))
+ return ioread16(base + offs);
+ else
+ return ioread8(base + offs);
+}
+
+static inline void sh_mtu2_write(struct sh_mtu2_priv *p, int reg_nr,
+ unsigned long value)
+{
+ struct sh_timer_config *cfg = p->pdev->dev.platform_data;
+ void __iomem *base = p->mapbase;
+ unsigned long offs;
+
+ if (reg_nr == TSTR) {
+ iowrite8(value, base + cfg->channel_offset);
+ return;
+ }
+
+ offs = mtu2_reg_offs[reg_nr];
+
+ if ((reg_nr == TCNT) || (reg_nr == TGR))
+ iowrite16(value, base + offs);
+ else
+ iowrite8(value, base + offs);
+}
+
+static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start)
+{
+ struct sh_timer_config *cfg = p->pdev->dev.platform_data;
+ unsigned long flags, value;
+
+ /* start stop register shared by multiple timer channels */
+ spin_lock_irqsave(&sh_mtu2_lock, flags);
+ value = sh_mtu2_read(p, TSTR);
+
+ if (start)
+ value |= 1 << cfg->timer_bit;
+ else
+ value &= ~(1 << cfg->timer_bit);
+
+ sh_mtu2_write(p, TSTR, value);
+ spin_unlock_irqrestore(&sh_mtu2_lock, flags);
+}
+
+static int sh_mtu2_enable(struct sh_mtu2_priv *p)
+{
+ struct sh_timer_config *cfg = p->pdev->dev.platform_data;
+ int ret;
+
+ /* enable clock */
+ ret = clk_enable(p->clk);
+ if (ret) {
+ pr_err("sh_mtu2: cannot enable clock \"%s\"\n", cfg->clk);
+ return ret;
+ }
+
+ /* make sure channel is disabled */
+ sh_mtu2_start_stop_ch(p, 0);
+
+ p->rate = clk_get_rate(p->clk) / 64;
+ p->periodic = (p->rate + HZ/2) / HZ;
+
+ /* "Periodic Counter Operation" */
+ sh_mtu2_write(p, TCR, 0x23); /* TGRA clear, divide clock by 64 */
+ sh_mtu2_write(p, TIOR, 0);
+ sh_mtu2_write(p, TGR, p->periodic);
+ sh_mtu2_write(p, TCNT, 0);
+ sh_mtu2_write(p, TMDR, 0);
+ sh_mtu2_write(p, TIER, 0x01);
+
+ /* enable channel */
+ sh_mtu2_start_stop_ch(p, 1);
+
+ return 0;
+}
+
+static void sh_mtu2_disable(struct sh_mtu2_priv *p)
+{
+ /* disable channel */
+ sh_mtu2_start_stop_ch(p, 0);
+
+ /* stop clock */
+ clk_disable(p->clk);
+}
+
+static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id)
+{
+ struct sh_mtu2_priv *p = dev_id;
+
+ /* acknowledge interrupt */
+ sh_mtu2_read(p, TSR);
+ sh_mtu2_write(p, TSR, 0xfe);
+
+ /* notify clockevent layer */
+ p->ced.event_handler(&p->ced);
+ return IRQ_HANDLED;
+}
+
+static struct sh_mtu2_priv *ced_to_sh_mtu2(struct clock_event_device *ced)
+{
+ return container_of(ced, struct sh_mtu2_priv, ced);
+}
+
+static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,
+ struct clock_event_device *ced)
+{
+ struct sh_mtu2_priv *p = ced_to_sh_mtu2(ced);
+ int disabled = 0;
+
+ /* deal with old setting first */
+ switch (ced->mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ sh_mtu2_disable(p);
+ disabled = 1;
+ break;
+ default:
+ break;
+ }
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ pr_info("sh_mtu2: %s used for periodic clock events\n",
+ ced->name);
+ sh_mtu2_enable(p);
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ if (!disabled)
+ sh_mtu2_disable(p);
+ break;
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ default:
+ break;
+ }
+}
+
+static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p,
+ char *name, unsigned long rating)
+{
+ struct clock_event_device *ced = &p->ced;
+ int ret;
+
+ memset(ced, 0, sizeof(*ced));
+
+ ced->name = name;
+ ced->features = CLOCK_EVT_FEAT_PERIODIC;
+ ced->rating = rating;
+ ced->cpumask = cpumask_of(0);
+ ced->set_mode = sh_mtu2_clock_event_mode;
+
+ ret = setup_irq(p->irqaction.irq, &p->irqaction);
+ if (ret) {
+ pr_err("sh_mtu2: failed to request irq %d\n",
+ p->irqaction.irq);
+ return;
+ }
+
+ pr_info("sh_mtu2: %s used for clock events\n", ced->name);
+ clockevents_register_device(ced);
+}
+
+static int sh_mtu2_register(struct sh_mtu2_priv *p, char *name,
+ unsigned long clockevent_rating)
+{
+ if (clockevent_rating)
+ sh_mtu2_register_clockevent(p, name, clockevent_rating);
+
+ return 0;
+}
+
+static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
+{
+ struct sh_timer_config *cfg = pdev->dev.platform_data;
+ struct resource *res;
+ int irq, ret;
+ ret = -ENXIO;
+
+ memset(p, 0, sizeof(*p));
+ p->pdev = pdev;
+
+ if (!cfg) {
+ dev_err(&p->pdev->dev, "missing platform data\n");
+ goto err0;
+ }
+
+ platform_set_drvdata(pdev, p);
+
+ res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&p->pdev->dev, "failed to get I/O memory\n");
+ goto err0;
+ }
+
+ irq = platform_get_irq(p->pdev, 0);
+ if (irq < 0) {
+ dev_err(&p->pdev->dev, "failed to get irq\n");
+ goto err0;
+ }
+
+ /* map memory, let mapbase point to our channel */
+ p->mapbase = ioremap_nocache(res->start, resource_size(res));
+ if (p->mapbase == NULL) {
+ pr_err("sh_mtu2: failed to remap I/O memory\n");
+ goto err0;
+ }
+
+ /* setup data for setup_irq() (too early for request_irq()) */
+ p->irqaction.name = cfg->name;
+ p->irqaction.handler = sh_mtu2_interrupt;
+ p->irqaction.dev_id = p;
+ p->irqaction.irq = irq;
+ p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
+ p->irqaction.mask = CPU_MASK_NONE;
+
+ /* get hold of clock */
+ p->clk = clk_get(&p->pdev->dev, cfg->clk);
+ if (IS_ERR(p->clk)) {
+ pr_err("sh_mtu2: cannot get clock \"%s\"\n", cfg->clk);
+ ret = PTR_ERR(p->clk);
+ goto err1;
+ }
+
+ return sh_mtu2_register(p, cfg->name, cfg->clockevent_rating);
+ err1:
+ iounmap(p->mapbase);
+ err0:
+ return ret;
+}
+
+static int __devinit sh_mtu2_probe(struct platform_device *pdev)
+{
+ struct sh_mtu2_priv *p = platform_get_drvdata(pdev);
+ struct sh_timer_config *cfg = pdev->dev.platform_data;
+ int ret;
+
+ if (p) {
+ pr_info("sh_mtu2: %s kept as earlytimer\n", cfg->name);
+ return 0;
+ }
+
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (p == NULL) {
+ dev_err(&pdev->dev, "failed to allocate driver data\n");
+ return -ENOMEM;
+ }
+
+ ret = sh_mtu2_setup(p, pdev);
+ if (ret) {
+ kfree(p);
+ platform_set_drvdata(pdev, NULL);
+ }
+ return ret;
+}
+
+static int __devexit sh_mtu2_remove(struct platform_device *pdev)
+{
+ return -EBUSY; /* cannot unregister clockevent */
+}
+
+static struct platform_driver sh_mtu2_device_driver = {
+ .probe = sh_mtu2_probe,
+ .remove = __devexit_p(sh_mtu2_remove),
+ .driver = {
+ .name = "sh_mtu2",
+ }
+};
+
+static int __init sh_mtu2_init(void)
+{
+ return platform_driver_register(&sh_mtu2_device_driver);
+}
+
+static void __exit sh_mtu2_exit(void)
+{
+ platform_driver_unregister(&sh_mtu2_device_driver);
+}
+
+early_platform_init("earlytimer", &sh_mtu2_device_driver);
+module_init(sh_mtu2_init);
+module_exit(sh_mtu2_exit);
+
+MODULE_AUTHOR("Magnus Damm");
+MODULE_DESCRIPTION("SuperH MTU2 Timer Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
new file mode 100644
index 00000000000..d6ea4398bf6
--- /dev/null
+++ b/drivers/clocksource/sh_tmu.c
@@ -0,0 +1,461 @@
+/*
+ * SuperH Timer Support - TMU
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/err.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/sh_timer.h>
+
+struct sh_tmu_priv {
+ void __iomem *mapbase;
+ struct clk *clk;
+ struct irqaction irqaction;
+ struct platform_device *pdev;
+ unsigned long rate;
+ unsigned long periodic;
+ struct clock_event_device ced;
+ struct clocksource cs;
+};
+
+static DEFINE_SPINLOCK(sh_tmu_lock);
+
+#define TSTR -1 /* shared register */
+#define TCOR 0 /* channel register */
+#define TCNT 1 /* channel register */
+#define TCR 2 /* channel register */
+
+static inline unsigned long sh_tmu_read(struct sh_tmu_priv *p, int reg_nr)
+{
+ struct sh_timer_config *cfg = p->pdev->dev.platform_data;
+ void __iomem *base = p->mapbase;
+ unsigned long offs;
+
+ if (reg_nr == TSTR)
+ return ioread8(base - cfg->channel_offset);
+
+ offs = reg_nr << 2;
+
+ if (reg_nr == TCR)
+ return ioread16(base + offs);
+ else
+ return ioread32(base + offs);
+}
+
+static inline void sh_tmu_write(struct sh_tmu_priv *p, int reg_nr,
+ unsigned long value)
+{
+ struct sh_timer_config *cfg = p->pdev->dev.platform_data;
+ void __iomem *base = p->mapbase;
+ unsigned long offs;
+
+ if (reg_nr == TSTR) {
+ iowrite8(value, base - cfg->channel_offset);
+ return;
+ }
+
+ offs = reg_nr << 2;
+
+ if (reg_nr == TCR)
+ iowrite16(value, base + offs);
+ else
+ iowrite32(value, base + offs);
+}
+
+static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
+{
+ struct sh_timer_config *cfg = p->pdev->dev.platform_data;
+ unsigned long flags, value;
+
+ /* start stop register shared by multiple timer channels */
+ spin_lock_irqsave(&sh_tmu_lock, flags);
+ value = sh_tmu_read(p, TSTR);
+
+ if (start)
+ value |= 1 << cfg->timer_bit;
+ else
+ value &= ~(1 << cfg->timer_bit);
+
+ sh_tmu_write(p, TSTR, value);
+ spin_unlock_irqrestore(&sh_tmu_lock, flags);
+}
+
+static int sh_tmu_enable(struct sh_tmu_priv *p)
+{
+ struct sh_timer_config *cfg = p->pdev->dev.platform_data;
+ int ret;
+
+ /* enable clock */
+ ret = clk_enable(p->clk);
+ if (ret) {
+ pr_err("sh_tmu: cannot enable clock \"%s\"\n", cfg->clk);
+ return ret;
+ }
+
+ /* make sure channel is disabled */
+ sh_tmu_start_stop_ch(p, 0);
+
+ /* maximum timeout */
+ sh_tmu_write(p, TCOR, 0xffffffff);
+ sh_tmu_write(p, TCNT, 0xffffffff);
+
+ /* configure channel to parent clock / 4, irq off */
+ p->rate = clk_get_rate(p->clk) / 4;
+ sh_tmu_write(p, TCR, 0x0000);
+
+ /* enable channel */
+ sh_tmu_start_stop_ch(p, 1);
+
+ return 0;
+}
+
+static void sh_tmu_disable(struct sh_tmu_priv *p)
+{
+ /* disable channel */
+ sh_tmu_start_stop_ch(p, 0);
+
+ /* stop clock */
+ clk_disable(p->clk);
+}
+
+static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta,
+ int periodic)
+{
+ /* stop timer */
+ sh_tmu_start_stop_ch(p, 0);
+
+ /* acknowledge interrupt */
+ sh_tmu_read(p, TCR);
+
+ /* enable interrupt */
+ sh_tmu_write(p, TCR, 0x0020);
+
+ /* reload delta value in case of periodic timer */
+ if (periodic)
+ sh_tmu_write(p, TCOR, delta);
+ else
+ sh_tmu_write(p, TCOR, 0);
+
+ sh_tmu_write(p, TCNT, delta);
+
+ /* start timer */
+ sh_tmu_start_stop_ch(p, 1);
+}
+
+static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id)
+{
+ struct sh_tmu_priv *p = dev_id;
+
+ /* disable or acknowledge interrupt */
+ if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT)
+ sh_tmu_write(p, TCR, 0x0000);
+ else
+ sh_tmu_write(p, TCR, 0x0020);
+
+ /* notify clockevent layer */
+ p->ced.event_handler(&p->ced);
+ return IRQ_HANDLED;
+}
+
+static struct sh_tmu_priv *cs_to_sh_tmu(struct clocksource *cs)
+{
+ return container_of(cs, struct sh_tmu_priv, cs);
+}
+
+static cycle_t sh_tmu_clocksource_read(struct clocksource *cs)
+{
+ struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
+
+ return sh_tmu_read(p, TCNT) ^ 0xffffffff;
+}
+
+static int sh_tmu_clocksource_enable(struct clocksource *cs)
+{
+ struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
+ int ret;
+
+ ret = sh_tmu_enable(p);
+ if (ret)
+ return ret;
+
+ /* TODO: calculate good shift from rate and counter bit width */
+ cs->shift = 10;
+ cs->mult = clocksource_hz2mult(p->rate, cs->shift);
+ return 0;
+}
+
+static void sh_tmu_clocksource_disable(struct clocksource *cs)
+{
+ sh_tmu_disable(cs_to_sh_tmu(cs));
+}
+
+static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
+ char *name, unsigned long rating)
+{
+ struct clocksource *cs = &p->cs;
+
+ cs->name = name;
+ cs->rating = rating;
+ cs->read = sh_tmu_clocksource_read;
+ cs->enable = sh_tmu_clocksource_enable;
+ cs->disable = sh_tmu_clocksource_disable;
+ cs->mask = CLOCKSOURCE_MASK(32);
+ cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ pr_info("sh_tmu: %s used as clock source\n", cs->name);
+ clocksource_register(cs);
+ return 0;
+}
+
+static struct sh_tmu_priv *ced_to_sh_tmu(struct clock_event_device *ced)
+{
+ return container_of(ced, struct sh_tmu_priv, ced);
+}
+
+static void sh_tmu_clock_event_start(struct sh_tmu_priv *p, int periodic)
+{
+ struct clock_event_device *ced = &p->ced;
+
+ sh_tmu_enable(p);
+
+ /* TODO: calculate good shift from rate and counter bit width */
+
+ ced->shift = 32;
+ ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift);
+ ced->max_delta_ns = clockevent_delta2ns(0xffffffff, ced);
+ ced->min_delta_ns = 5000;
+
+ if (periodic) {
+ p->periodic = (p->rate + HZ/2) / HZ;
+ sh_tmu_set_next(p, p->periodic, 1);
+ }
+}
+
+static void sh_tmu_clock_event_mode(enum clock_event_mode mode,
+ struct clock_event_device *ced)
+{
+ struct sh_tmu_priv *p = ced_to_sh_tmu(ced);
+ int disabled = 0;
+
+ /* deal with old setting first */
+ switch (ced->mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ case CLOCK_EVT_MODE_ONESHOT:
+ sh_tmu_disable(p);
+ disabled = 1;
+ break;
+ default:
+ break;
+ }
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ pr_info("sh_tmu: %s used for periodic clock events\n",
+ ced->name);
+ sh_tmu_clock_event_start(p, 1);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ pr_info("sh_tmu: %s used for oneshot clock events\n",
+ ced->name);
+ sh_tmu_clock_event_start(p, 0);
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ if (!disabled)
+ sh_tmu_disable(p);
+ break;
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ default:
+ break;
+ }
+}
+
+static int sh_tmu_clock_event_next(unsigned long delta,
+ struct clock_event_device *ced)
+{
+ struct sh_tmu_priv *p = ced_to_sh_tmu(ced);
+
+ BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
+
+ /* program new delta value */
+ sh_tmu_set_next(p, delta, 0);
+ return 0;
+}
+
+static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
+ char *name, unsigned long rating)
+{
+ struct clock_event_device *ced = &p->ced;
+ int ret;
+
+ memset(ced, 0, sizeof(*ced));
+
+ ced->name = name;
+ ced->features = CLOCK_EVT_FEAT_PERIODIC;
+ ced->features |= CLOCK_EVT_FEAT_ONESHOT;
+ ced->rating = rating;
+ ced->cpumask = cpumask_of(0);
+ ced->set_next_event = sh_tmu_clock_event_next;
+ ced->set_mode = sh_tmu_clock_event_mode;
+
+ ret = setup_irq(p->irqaction.irq, &p->irqaction);
+ if (ret) {
+ pr_err("sh_tmu: failed to request irq %d\n",
+ p->irqaction.irq);
+ return;
+ }
+
+ pr_info("sh_tmu: %s used for clock events\n", ced->name);
+ clockevents_register_device(ced);
+}
+
+static int sh_tmu_register(struct sh_tmu_priv *p, char *name,
+ unsigned long clockevent_rating,
+ unsigned long clocksource_rating)
+{
+ if (clockevent_rating)
+ sh_tmu_register_clockevent(p, name, clockevent_rating);
+ else if (clocksource_rating)
+ sh_tmu_register_clocksource(p, name, clocksource_rating);
+
+ return 0;
+}
+
+static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
+{
+ struct sh_timer_config *cfg = pdev->dev.platform_data;
+ struct resource *res;
+ int irq, ret;
+ ret = -ENXIO;
+
+ memset(p, 0, sizeof(*p));
+ p->pdev = pdev;
+
+ if (!cfg) {
+ dev_err(&p->pdev->dev, "missing platform data\n");
+ goto err0;
+ }
+
+ platform_set_drvdata(pdev, p);
+
+ res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&p->pdev->dev, "failed to get I/O memory\n");
+ goto err0;
+ }
+
+ irq = platform_get_irq(p->pdev, 0);
+ if (irq < 0) {
+ dev_err(&p->pdev->dev, "failed to get irq\n");
+ goto err0;
+ }
+
+ /* map memory, let mapbase point to our channel */
+ p->mapbase = ioremap_nocache(res->start, resource_size(res));
+ if (p->mapbase == NULL) {
+ pr_err("sh_tmu: failed to remap I/O memory\n");
+ goto err0;
+ }
+
+ /* setup data for setup_irq() (too early for request_irq()) */
+ p->irqaction.name = cfg->name;
+ p->irqaction.handler = sh_tmu_interrupt;
+ p->irqaction.dev_id = p;
+ p->irqaction.irq = irq;
+ p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
+ p->irqaction.mask = CPU_MASK_NONE;
+
+ /* get hold of clock */
+ p->clk = clk_get(&p->pdev->dev, cfg->clk);
+ if (IS_ERR(p->clk)) {
+ pr_err("sh_tmu: cannot get clock \"%s\"\n", cfg->clk);
+ ret = PTR_ERR(p->clk);
+ goto err1;
+ }
+
+ return sh_tmu_register(p, cfg->name,
+ cfg->clockevent_rating,
+ cfg->clocksource_rating);
+ err1:
+ iounmap(p->mapbase);
+ err0:
+ return ret;
+}
+
+static int __devinit sh_tmu_probe(struct platform_device *pdev)
+{
+ struct sh_tmu_priv *p = platform_get_drvdata(pdev);
+ struct sh_timer_config *cfg = pdev->dev.platform_data;
+ int ret;
+
+ if (p) {
+ pr_info("sh_tmu: %s kept as earlytimer\n", cfg->name);
+ return 0;
+ }
+
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (p == NULL) {
+ dev_err(&pdev->dev, "failed to allocate driver data\n");
+ return -ENOMEM;
+ }
+
+ ret = sh_tmu_setup(p, pdev);
+ if (ret) {
+ kfree(p);
+ platform_set_drvdata(pdev, NULL);
+ }
+ return ret;
+}
+
+static int __devexit sh_tmu_remove(struct platform_device *pdev)
+{
+ return -EBUSY; /* cannot unregister clockevent and clocksource */
+}
+
+static struct platform_driver sh_tmu_device_driver = {
+ .probe = sh_tmu_probe,
+ .remove = __devexit_p(sh_tmu_remove),
+ .driver = {
+ .name = "sh_tmu",
+ }
+};
+
+static int __init sh_tmu_init(void)
+{
+ return platform_driver_register(&sh_tmu_device_driver);
+}
+
+static void __exit sh_tmu_exit(void)
+{
+ platform_driver_unregister(&sh_tmu_device_driver);
+}
+
+early_platform_init("earlytimer", &sh_tmu_device_driver);
+module_init(sh_tmu_init);
+module_exit(sh_tmu_exit);
+
+MODULE_AUTHOR("Magnus Damm");
+MODULE_DESCRIPTION("SuperH TMU Timer Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/connector/Kconfig b/drivers/connector/Kconfig
index 100bfd42206..6e6730f9dfd 100644
--- a/drivers/connector/Kconfig
+++ b/drivers/connector/Kconfig
@@ -7,7 +7,7 @@ menuconfig CONNECTOR
of the netlink socket protocol.
Connector support can also be built as a module. If so, the module
- will be called cn.ko.
+ will be called cn.
if CONNECTOR
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index d270e8eb3e6..6e2ec0b1894 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -808,7 +808,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
ret = -ENOMEM;
goto nomem_out;
}
- if (!alloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) {
+ if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) {
free_cpumask_var(policy->cpus);
kfree(policy);
ret = -ENOMEM;
@@ -1070,11 +1070,11 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
#endif
+ unlock_policy_rwsem_write(cpu);
+
if (cpufreq_driver->target)
__cpufreq_governor(data, CPUFREQ_GOV_STOP);
- unlock_policy_rwsem_write(cpu);
-
kobject_put(&data->kobj);
/* we need to make sure that the underlying kobj is actually
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 2ecd95e4ab1..7a74d175287 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -91,6 +91,9 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */
* (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
* cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
* is recursive for the same process. -Venki
+ * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it
+ * would deadlock with cancel_delayed_work_sync(), which is needed for proper
+ * raceless workqueue teardown.
*/
static DEFINE_MUTEX(dbs_mutex);
@@ -542,7 +545,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
{
dbs_info->enable = 0;
- cancel_delayed_work(&dbs_info->work);
+ cancel_delayed_work_sync(&dbs_info->work);
}
static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 338f428a15b..e741c339df7 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -98,6 +98,9 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */
* (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
* cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
* is recursive for the same process. -Venki
+ * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it
+ * would deadlock with cancel_delayed_work_sync(), which is needed for proper
+ * raceless workqueue teardown.
*/
static DEFINE_MUTEX(dbs_mutex);
@@ -562,7 +565,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
{
dbs_info->enable = 0;
- cancel_delayed_work(&dbs_info->work);
+ cancel_delayed_work_sync(&dbs_info->work);
}
static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 01afd758072..5b27692372b 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -12,7 +12,7 @@ if CRYPTO_HW
config CRYPTO_DEV_PADLOCK
tristate "Support for VIA PadLock ACE"
- depends on X86_32 && !UML
+ depends on X86 && !UML
select CRYPTO_ALGAPI
help
Some VIA processors come with an integrated crypto engine
@@ -34,7 +34,7 @@ config CRYPTO_DEV_PADLOCK_AES
Available in VIA C3 and newer CPUs.
If unsure say M. The compiled module will be
- called padlock-aes.ko
+ called padlock-aes.
config CRYPTO_DEV_PADLOCK_SHA
tristate "PadLock driver for SHA1 and SHA256 algorithms"
@@ -47,7 +47,7 @@ config CRYPTO_DEV_PADLOCK_SHA
Available in VIA C7 and newer processors.
If unsure say M. The compiled module will be
- called padlock-sha.ko
+ called padlock-sha.
config CRYPTO_DEV_GEODE
tristate "Support for the Geode LX AES engine"
@@ -79,7 +79,7 @@ config ZCRYPT_MONOLITHIC
bool "Monolithic zcrypt module"
depends on ZCRYPT="m"
help
- Select this option if you want to have a single module z90crypt.ko
+ Select this option if you want to have a single module z90crypt,
that contains all parts of the crypto device driver (ap bus,
request router and all the card drivers).
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index 2bef086fb34..5f753fc0873 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -2564,7 +2564,7 @@ static void hifn_tasklet_callback(unsigned long data)
hifn_process_queue(dev);
}
-static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int __devinit hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int err, i;
struct hifn_device *dev;
@@ -2696,7 +2696,7 @@ err_out_disable_pci_device:
return err;
}
-static void hifn_remove(struct pci_dev *pdev)
+static void __devexit hifn_remove(struct pci_dev *pdev)
{
int i;
struct hifn_device *dev;
@@ -2744,7 +2744,7 @@ static struct pci_driver hifn_pci_driver = {
.remove = __devexit_p(hifn_remove),
};
-static int __devinit hifn_init(void)
+static int __init hifn_init(void)
{
unsigned int freq;
int err;
@@ -2789,7 +2789,7 @@ static int __devinit hifn_init(void)
return 0;
}
-static void __devexit hifn_fini(void)
+static void __exit hifn_fini(void)
{
pci_unregister_driver(&hifn_pci_driver);
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index f9f05d7a707..6c6656d3b1e 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -415,6 +415,7 @@ static void crypto_done_action(unsigned long arg)
static int init_ixp_crypto(void)
{
int ret = -ENODEV;
+ u32 msg[2] = { 0, 0 };
if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
@@ -426,9 +427,35 @@ static int init_ixp_crypto(void)
return ret;
if (!npe_running(npe_c)) {
- npe_load_firmware(npe_c, npe_name(npe_c), dev);
+ ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
+ if (ret) {
+ return ret;
+ }
+ if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
+ goto npe_error;
+ } else {
+ if (npe_send_message(npe_c, msg, "STATUS_MSG"))
+ goto npe_error;
+
+ if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
+ goto npe_error;
}
+ switch ((msg[1]>>16) & 0xff) {
+ case 3:
+ printk(KERN_WARNING "Firmware of %s lacks AES support\n",
+ npe_name(npe_c));
+ support_aes = 0;
+ break;
+ case 4:
+ case 5:
+ support_aes = 1;
+ break;
+ default:
+ printk(KERN_ERR "Firmware of %s lacks crypto support\n",
+ npe_name(npe_c));
+ return -ENODEV;
+ }
/* buffer_pool will also be used to sometimes store the hmac,
* so assure it is large enough
*/
@@ -459,6 +486,10 @@ static int init_ixp_crypto(void)
qmgr_enable_irq(RECV_QID);
return 0;
+
+npe_error:
+ printk(KERN_ERR "%s not responding\n", npe_name(npe_c));
+ ret = -EIO;
err:
if (ctx_pool)
dma_pool_destroy(ctx_pool);
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 3f0fdd18255..87f92c39b5f 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -154,7 +154,11 @@ static inline void padlock_reset_key(struct cword *cword)
int cpu = raw_smp_processor_id();
if (cword != per_cpu(last_cword, cpu))
+#ifndef CONFIG_X86_64
asm volatile ("pushfl; popfl");
+#else
+ asm volatile ("pushfq; popfq");
+#endif
}
static inline void padlock_store_cword(struct cword *cword)
@@ -208,10 +212,19 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
asm volatile ("test $1, %%cl;"
"je 1f;"
+#ifndef CONFIG_X86_64
"lea -1(%%ecx), %%eax;"
"mov $1, %%ecx;"
+#else
+ "lea -1(%%rcx), %%rax;"
+ "mov $1, %%rcx;"
+#endif
".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */
+#ifndef CONFIG_X86_64
"mov %%eax, %%ecx;"
+#else
+ "mov %%rax, %%rcx;"
+#endif
"1:"
".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
: "+S"(input), "+D"(output)
@@ -489,4 +502,4 @@ MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michal Ludvig");
-MODULE_ALIAS("aes-all");
+MODULE_ALIAS("aes");
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index a3918c16b3d..c70775fd3ce 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -44,6 +44,8 @@
#include <crypto/sha.h>
#include <crypto/aead.h>
#include <crypto/authenc.h>
+#include <crypto/skcipher.h>
+#include <crypto/scatterwalk.h>
#include "talitos.h"
@@ -339,7 +341,8 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
status = error;
dma_unmap_single(dev, request->dma_desc,
- sizeof(struct talitos_desc), DMA_BIDIRECTIONAL);
+ sizeof(struct talitos_desc),
+ DMA_BIDIRECTIONAL);
/* copy entries so we can call callback outside lock */
saved_req.desc = request->desc;
@@ -413,7 +416,8 @@ static struct talitos_desc *current_desc(struct device *dev, int ch)
/*
* user diagnostics; report root cause of error based on execution unit status
*/
-static void report_eu_error(struct device *dev, int ch, struct talitos_desc *desc)
+static void report_eu_error(struct device *dev, int ch,
+ struct talitos_desc *desc)
{
struct talitos_private *priv = dev_get_drvdata(dev);
int i;
@@ -684,8 +688,8 @@ struct talitos_ctx {
unsigned int authsize;
};
-static int aead_authenc_setauthsize(struct crypto_aead *authenc,
- unsigned int authsize)
+static int aead_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
{
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
@@ -694,8 +698,8 @@ static int aead_authenc_setauthsize(struct crypto_aead *authenc,
return 0;
}
-static int aead_authenc_setkey(struct crypto_aead *authenc,
- const u8 *key, unsigned int keylen)
+static int aead_setkey(struct crypto_aead *authenc,
+ const u8 *key, unsigned int keylen)
{
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
struct rtattr *rta = (void *)key;
@@ -740,7 +744,7 @@ badkey:
}
/*
- * ipsec_esp_edesc - s/w-extended ipsec_esp descriptor
+ * talitos_edesc - s/w-extended descriptor
* @src_nents: number of segments in input scatterlist
* @dst_nents: number of segments in output scatterlist
* @dma_len: length of dma mapped link_tbl space
@@ -752,17 +756,67 @@ badkey:
* is greater than 1, an integrity check value is concatenated to the end
* of link_tbl data
*/
-struct ipsec_esp_edesc {
+struct talitos_edesc {
int src_nents;
int dst_nents;
+ int src_is_chained;
+ int dst_is_chained;
int dma_len;
dma_addr_t dma_link_tbl;
struct talitos_desc desc;
struct talitos_ptr link_tbl[0];
};
+static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
+ unsigned int nents, enum dma_data_direction dir,
+ int chained)
+{
+ if (unlikely(chained))
+ while (sg) {
+ dma_map_sg(dev, sg, 1, dir);
+ sg = scatterwalk_sg_next(sg);
+ }
+ else
+ dma_map_sg(dev, sg, nents, dir);
+ return nents;
+}
+
+static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
+ enum dma_data_direction dir)
+{
+ while (sg) {
+ dma_unmap_sg(dev, sg, 1, dir);
+ sg = scatterwalk_sg_next(sg);
+ }
+}
+
+static void talitos_sg_unmap(struct device *dev,
+ struct talitos_edesc *edesc,
+ struct scatterlist *src,
+ struct scatterlist *dst)
+{
+ unsigned int src_nents = edesc->src_nents ? : 1;
+ unsigned int dst_nents = edesc->dst_nents ? : 1;
+
+ if (src != dst) {
+ if (edesc->src_is_chained)
+ talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
+ else
+ dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
+
+ if (edesc->dst_is_chained)
+ talitos_unmap_sg_chain(dev, dst, DMA_FROM_DEVICE);
+ else
+ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+ } else
+ if (edesc->src_is_chained)
+ talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
+ else
+ dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
+}
+
static void ipsec_esp_unmap(struct device *dev,
- struct ipsec_esp_edesc *edesc,
+ struct talitos_edesc *edesc,
struct aead_request *areq)
{
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
@@ -772,15 +826,7 @@ static void ipsec_esp_unmap(struct device *dev,
dma_unmap_sg(dev, areq->assoc, 1, DMA_TO_DEVICE);
- if (areq->src != areq->dst) {
- dma_unmap_sg(dev, areq->src, edesc->src_nents ? : 1,
- DMA_TO_DEVICE);
- dma_unmap_sg(dev, areq->dst, edesc->dst_nents ? : 1,
- DMA_FROM_DEVICE);
- } else {
- dma_unmap_sg(dev, areq->src, edesc->src_nents ? : 1,
- DMA_BIDIRECTIONAL);
- }
+ talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
if (edesc->dma_len)
dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
@@ -795,13 +841,14 @@ static void ipsec_esp_encrypt_done(struct device *dev,
int err)
{
struct aead_request *areq = context;
- struct ipsec_esp_edesc *edesc =
- container_of(desc, struct ipsec_esp_edesc, desc);
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
+ struct talitos_edesc *edesc;
struct scatterlist *sg;
void *icvdata;
+ edesc = container_of(desc, struct talitos_edesc, desc);
+
ipsec_esp_unmap(dev, edesc, areq);
/* copy the generated ICV to dst */
@@ -819,17 +866,18 @@ static void ipsec_esp_encrypt_done(struct device *dev,
}
static void ipsec_esp_decrypt_swauth_done(struct device *dev,
- struct talitos_desc *desc, void *context,
- int err)
+ struct talitos_desc *desc,
+ void *context, int err)
{
struct aead_request *req = context;
- struct ipsec_esp_edesc *edesc =
- container_of(desc, struct ipsec_esp_edesc, desc);
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
+ struct talitos_edesc *edesc;
struct scatterlist *sg;
void *icvdata;
+ edesc = container_of(desc, struct talitos_edesc, desc);
+
ipsec_esp_unmap(dev, edesc, req);
if (!err) {
@@ -851,20 +899,20 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
}
static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
- struct talitos_desc *desc, void *context,
- int err)
+ struct talitos_desc *desc,
+ void *context, int err)
{
struct aead_request *req = context;
- struct ipsec_esp_edesc *edesc =
- container_of(desc, struct ipsec_esp_edesc, desc);
+ struct talitos_edesc *edesc;
+
+ edesc = container_of(desc, struct talitos_edesc, desc);
ipsec_esp_unmap(dev, edesc, req);
/* check ICV auth status */
- if (!err)
- if ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
- DESC_HDR_LO_ICCR1_PASS)
- err = -EBADMSG;
+ if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
+ DESC_HDR_LO_ICCR1_PASS))
+ err = -EBADMSG;
kfree(edesc);
@@ -886,7 +934,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
link_tbl_ptr->j_extent = 0;
link_tbl_ptr++;
cryptlen -= sg_dma_len(sg);
- sg = sg_next(sg);
+ sg = scatterwalk_sg_next(sg);
}
/* adjust (decrease) last one (or two) entry's len to cryptlen */
@@ -910,7 +958,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
/*
* fill in and submit ipsec_esp descriptor
*/
-static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
+static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
u8 *giv, u64 seq,
void (*callback) (struct device *dev,
struct talitos_desc *desc,
@@ -952,32 +1000,31 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
desc->ptr[4].len = cpu_to_be16(cryptlen);
desc->ptr[4].j_extent = authsize;
- if (areq->src == areq->dst)
- sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ? : 1,
- DMA_BIDIRECTIONAL);
- else
- sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ? : 1,
- DMA_TO_DEVICE);
+ sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
+ (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
+ : DMA_TO_DEVICE,
+ edesc->src_is_chained);
if (sg_count == 1) {
desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src));
} else {
sg_link_tbl_len = cryptlen;
- if ((edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) &&
- (edesc->desc.hdr & DESC_HDR_MODE0_ENCRYPT) == 0) {
+ if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
sg_link_tbl_len = cryptlen + authsize;
- }
+
sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len,
&edesc->link_tbl[0]);
if (sg_count > 1) {
desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl);
- dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
- edesc->dma_len, DMA_BIDIRECTIONAL);
+ dma_sync_single_for_device(dev, edesc->dma_link_tbl,
+ edesc->dma_len,
+ DMA_BIDIRECTIONAL);
} else {
/* Only one segment now, so no link tbl needed */
- desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src));
+ desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->
+ src));
}
}
@@ -985,10 +1032,11 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
desc->ptr[5].len = cpu_to_be16(cryptlen);
desc->ptr[5].j_extent = authsize;
- if (areq->src != areq->dst) {
- sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1,
- DMA_FROM_DEVICE);
- }
+ if (areq->src != areq->dst)
+ sg_count = talitos_map_sg(dev, areq->dst,
+ edesc->dst_nents ? : 1,
+ DMA_FROM_DEVICE,
+ edesc->dst_is_chained);
if (sg_count == 1) {
desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst));
@@ -1033,49 +1081,55 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
return ret;
}
-
/*
* derive number of elements in scatterlist
*/
-static int sg_count(struct scatterlist *sg_list, int nbytes)
+static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained)
{
struct scatterlist *sg = sg_list;
int sg_nents = 0;
- while (nbytes) {
+ *chained = 0;
+ while (nbytes > 0) {
sg_nents++;
nbytes -= sg->length;
- sg = sg_next(sg);
+ if (!sg_is_last(sg) && (sg + 1)->length == 0)
+ *chained = 1;
+ sg = scatterwalk_sg_next(sg);
}
return sg_nents;
}
/*
- * allocate and map the ipsec_esp extended descriptor
+ * allocate and map the extended descriptor
*/
-static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
- int icv_stashing)
+static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
+ struct scatterlist *src,
+ struct scatterlist *dst,
+ unsigned int cryptlen,
+ unsigned int authsize,
+ int icv_stashing,
+ u32 cryptoflags)
{
- struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
- struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
- struct ipsec_esp_edesc *edesc;
+ struct talitos_edesc *edesc;
int src_nents, dst_nents, alloc_len, dma_len;
- gfp_t flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ int src_chained, dst_chained = 0;
+ gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
- if (areq->cryptlen + ctx->authsize > TALITOS_MAX_DATA_LEN) {
- dev_err(ctx->dev, "cryptlen exceeds h/w max limit\n");
+ if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) {
+ dev_err(dev, "length exceeds h/w max limit\n");
return ERR_PTR(-EINVAL);
}
- src_nents = sg_count(areq->src, areq->cryptlen + ctx->authsize);
+ src_nents = sg_count(src, cryptlen + authsize, &src_chained);
src_nents = (src_nents == 1) ? 0 : src_nents;
- if (areq->dst == areq->src) {
+ if (dst == src) {
dst_nents = src_nents;
} else {
- dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize);
+ dst_nents = sg_count(dst, cryptlen + authsize, &dst_chained);
dst_nents = (dst_nents == 1) ? 0 : dst_nents;
}
@@ -1084,39 +1138,52 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
* allowing for two separate entries for ICV and generated ICV (+ 2),
* and the ICV data itself
*/
- alloc_len = sizeof(struct ipsec_esp_edesc);
+ alloc_len = sizeof(struct talitos_edesc);
if (src_nents || dst_nents) {
dma_len = (src_nents + dst_nents + 2) *
- sizeof(struct talitos_ptr) + ctx->authsize;
+ sizeof(struct talitos_ptr) + authsize;
alloc_len += dma_len;
} else {
dma_len = 0;
- alloc_len += icv_stashing ? ctx->authsize : 0;
+ alloc_len += icv_stashing ? authsize : 0;
}
edesc = kmalloc(alloc_len, GFP_DMA | flags);
if (!edesc) {
- dev_err(ctx->dev, "could not allocate edescriptor\n");
+ dev_err(dev, "could not allocate edescriptor\n");
return ERR_PTR(-ENOMEM);
}
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
+ edesc->src_is_chained = src_chained;
+ edesc->dst_is_chained = dst_chained;
edesc->dma_len = dma_len;
- edesc->dma_link_tbl = dma_map_single(ctx->dev, &edesc->link_tbl[0],
+ edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
edesc->dma_len, DMA_BIDIRECTIONAL);
return edesc;
}
-static int aead_authenc_encrypt(struct aead_request *req)
+static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq,
+ int icv_stashing)
+{
+ struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
+
+ return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
+ areq->cryptlen, ctx->authsize, icv_stashing,
+ areq->base.flags);
+}
+
+static int aead_encrypt(struct aead_request *req)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
- struct ipsec_esp_edesc *edesc;
+ struct talitos_edesc *edesc;
/* allocate extended descriptor */
- edesc = ipsec_esp_edesc_alloc(req, 0);
+ edesc = aead_edesc_alloc(req, 0);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1126,70 +1193,67 @@ static int aead_authenc_encrypt(struct aead_request *req)
return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done);
}
-
-
-static int aead_authenc_decrypt(struct aead_request *req)
+static int aead_decrypt(struct aead_request *req)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
unsigned int authsize = ctx->authsize;
struct talitos_private *priv = dev_get_drvdata(ctx->dev);
- struct ipsec_esp_edesc *edesc;
+ struct talitos_edesc *edesc;
struct scatterlist *sg;
void *icvdata;
req->cryptlen -= authsize;
/* allocate extended descriptor */
- edesc = ipsec_esp_edesc_alloc(req, 1);
+ edesc = aead_edesc_alloc(req, 1);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
- (((!edesc->src_nents && !edesc->dst_nents) ||
- priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT))) {
+ ((!edesc->src_nents && !edesc->dst_nents) ||
+ priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
/* decrypt and check the ICV */
- edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND |
+ edesc->desc.hdr = ctx->desc_hdr_template |
+ DESC_HDR_DIR_INBOUND |
DESC_HDR_MODE1_MDEU_CICV;
/* reset integrity check result bits */
edesc->desc.hdr_lo = 0;
- return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_hwauth_done);
+ return ipsec_esp(edesc, req, NULL, 0,
+ ipsec_esp_decrypt_hwauth_done);
- } else {
-
- /* Have to check the ICV with software */
+ }
- edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
+ /* Have to check the ICV with software */
+ edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
- /* stash incoming ICV for later cmp with ICV generated by the h/w */
- if (edesc->dma_len)
- icvdata = &edesc->link_tbl[edesc->src_nents +
- edesc->dst_nents + 2];
- else
- icvdata = &edesc->link_tbl[0];
+ /* stash incoming ICV for later cmp with ICV generated by the h/w */
+ if (edesc->dma_len)
+ icvdata = &edesc->link_tbl[edesc->src_nents +
+ edesc->dst_nents + 2];
+ else
+ icvdata = &edesc->link_tbl[0];
- sg = sg_last(req->src, edesc->src_nents ? : 1);
+ sg = sg_last(req->src, edesc->src_nents ? : 1);
- memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
- ctx->authsize);
+ memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
+ ctx->authsize);
- return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done);
- }
+ return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done);
}
-static int aead_authenc_givencrypt(
- struct aead_givcrypt_request *req)
+static int aead_givencrypt(struct aead_givcrypt_request *req)
{
struct aead_request *areq = &req->areq;
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
- struct ipsec_esp_edesc *edesc;
+ struct talitos_edesc *edesc;
/* allocate extended descriptor */
- edesc = ipsec_esp_edesc_alloc(areq, 0);
+ edesc = aead_edesc_alloc(areq, 0);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1204,31 +1268,228 @@ static int aead_authenc_givencrypt(
ipsec_esp_encrypt_done);
}
+static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct ablkcipher_alg *alg = crypto_ablkcipher_alg(cipher);
+
+ if (keylen > TALITOS_MAX_KEY_SIZE)
+ goto badkey;
+
+ if (keylen < alg->min_keysize || keylen > alg->max_keysize)
+ goto badkey;
+
+ memcpy(&ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ return 0;
+
+badkey:
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+}
+
+static void common_nonsnoop_unmap(struct device *dev,
+ struct talitos_edesc *edesc,
+ struct ablkcipher_request *areq)
+{
+ unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
+ unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
+ unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
+
+ talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
+
+ if (edesc->dma_len)
+ dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
+ DMA_BIDIRECTIONAL);
+}
+
+static void ablkcipher_done(struct device *dev,
+ struct talitos_desc *desc, void *context,
+ int err)
+{
+ struct ablkcipher_request *areq = context;
+ struct talitos_edesc *edesc;
+
+ edesc = container_of(desc, struct talitos_edesc, desc);
+
+ common_nonsnoop_unmap(dev, edesc, areq);
+
+ kfree(edesc);
+
+ areq->base.complete(&areq->base, err);
+}
+
+static int common_nonsnoop(struct talitos_edesc *edesc,
+ struct ablkcipher_request *areq,
+ u8 *giv,
+ void (*callback) (struct device *dev,
+ struct talitos_desc *desc,
+ void *context, int error))
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct device *dev = ctx->dev;
+ struct talitos_desc *desc = &edesc->desc;
+ unsigned int cryptlen = areq->nbytes;
+ unsigned int ivsize;
+ int sg_count, ret;
+
+ /* first DWORD empty */
+ desc->ptr[0].len = 0;
+ desc->ptr[0].ptr = 0;
+ desc->ptr[0].j_extent = 0;
+
+ /* cipher iv */
+ ivsize = crypto_ablkcipher_ivsize(cipher);
+ map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, giv ?: areq->info, 0,
+ DMA_TO_DEVICE);
+
+ /* cipher key */
+ map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
+ (char *)&ctx->key, 0, DMA_TO_DEVICE);
+
+ /*
+ * cipher in
+ */
+ desc->ptr[3].len = cpu_to_be16(cryptlen);
+ desc->ptr[3].j_extent = 0;
+
+ sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
+ (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
+ : DMA_TO_DEVICE,
+ edesc->src_is_chained);
+
+ if (sg_count == 1) {
+ desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->src));
+ } else {
+ sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
+ &edesc->link_tbl[0]);
+ if (sg_count > 1) {
+ desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
+ desc->ptr[3].ptr = cpu_to_be32(edesc->dma_link_tbl);
+ dma_sync_single_for_device(dev, edesc->dma_link_tbl,
+ edesc->dma_len,
+ DMA_BIDIRECTIONAL);
+ } else {
+ /* Only one segment now, so no link tbl needed */
+ desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->
+ src));
+ }
+ }
+
+ /* cipher out */
+ desc->ptr[4].len = cpu_to_be16(cryptlen);
+ desc->ptr[4].j_extent = 0;
+
+ if (areq->src != areq->dst)
+ sg_count = talitos_map_sg(dev, areq->dst,
+ edesc->dst_nents ? : 1,
+ DMA_FROM_DEVICE,
+ edesc->dst_is_chained);
+
+ if (sg_count == 1) {
+ desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->dst));
+ } else {
+ struct talitos_ptr *link_tbl_ptr =
+ &edesc->link_tbl[edesc->src_nents + 1];
+
+ desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
+ desc->ptr[4].ptr = cpu_to_be32((struct talitos_ptr *)
+ edesc->dma_link_tbl +
+ edesc->src_nents + 1);
+ sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
+ link_tbl_ptr);
+ dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
+ edesc->dma_len, DMA_BIDIRECTIONAL);
+ }
+
+ /* iv out */
+ map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 0,
+ DMA_FROM_DEVICE);
+
+ /* last DWORD empty */
+ desc->ptr[6].len = 0;
+ desc->ptr[6].ptr = 0;
+ desc->ptr[6].j_extent = 0;
+
+ ret = talitos_submit(dev, desc, callback, areq);
+ if (ret != -EINPROGRESS) {
+ common_nonsnoop_unmap(dev, edesc, areq);
+ kfree(edesc);
+ }
+ return ret;
+}
+
+static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
+ areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, areq->nbytes,
+ 0, 0, areq->base.flags);
+}
+
+static int ablkcipher_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct talitos_edesc *edesc;
+
+ /* allocate extended descriptor */
+ edesc = ablkcipher_edesc_alloc(areq);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ /* set encrypt */
+ edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
+
+ return common_nonsnoop(edesc, areq, NULL, ablkcipher_done);
+}
+
+static int ablkcipher_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct talitos_edesc *edesc;
+
+ /* allocate extended descriptor */
+ edesc = ablkcipher_edesc_alloc(areq);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
+
+ return common_nonsnoop(edesc, areq, NULL, ablkcipher_done);
+}
+
struct talitos_alg_template {
- char name[CRYPTO_MAX_ALG_NAME];
- char driver_name[CRYPTO_MAX_ALG_NAME];
- unsigned int blocksize;
- struct aead_alg aead;
- struct device *dev;
+ struct crypto_alg alg;
__be32 desc_hdr_template;
};
static struct talitos_alg_template driver_algs[] = {
- /* single-pass ipsec_esp descriptor */
+ /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
{
- .name = "authenc(hmac(sha1),cbc(aes))",
- .driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
- .blocksize = AES_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
- .geniv = "<built-in>",
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA1_DIGEST_SIZE,
- },
+ .alg = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_aead_type,
+ .cra_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ }
+ },
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_AESU |
DESC_HDR_MODE0_AESU_CBC |
@@ -1238,19 +1499,23 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_SHA1_HMAC,
},
{
- .name = "authenc(hmac(sha1),cbc(des3_ede))",
- .driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
- .blocksize = DES3_EDE_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
- .geniv = "<built-in>",
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = SHA1_DIGEST_SIZE,
- },
+ .alg = {
+ .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_aead_type,
+ .cra_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ }
+ },
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
DESC_HDR_MODE0_DEU_CBC |
@@ -1261,19 +1526,23 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_SHA1_HMAC,
},
{
- .name = "authenc(hmac(sha256),cbc(aes))",
- .driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
- .blocksize = AES_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
- .geniv = "<built-in>",
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA256_DIGEST_SIZE,
- },
+ .alg = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_aead_type,
+ .cra_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ }
+ },
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_AESU |
DESC_HDR_MODE0_AESU_CBC |
@@ -1283,19 +1552,23 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_SHA256_HMAC,
},
{
- .name = "authenc(hmac(sha256),cbc(des3_ede))",
- .driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
- .blocksize = DES3_EDE_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
- .geniv = "<built-in>",
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = SHA256_DIGEST_SIZE,
- },
+ .alg = {
+ .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_aead_type,
+ .cra_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ }
+ },
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
DESC_HDR_MODE0_DEU_CBC |
@@ -1306,19 +1579,23 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_SHA256_HMAC,
},
{
- .name = "authenc(hmac(md5),cbc(aes))",
- .driver_name = "authenc-hmac-md5-cbc-aes-talitos",
- .blocksize = AES_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
- .geniv = "<built-in>",
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = MD5_DIGEST_SIZE,
- },
+ .alg = {
+ .cra_name = "authenc(hmac(md5),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_aead_type,
+ .cra_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ }
+ },
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_AESU |
DESC_HDR_MODE0_AESU_CBC |
@@ -1328,19 +1605,23 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_MD5_HMAC,
},
{
- .name = "authenc(hmac(md5),cbc(des3_ede))",
- .driver_name = "authenc-hmac-md5-cbc-3des-talitos",
- .blocksize = DES3_EDE_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
- .geniv = "<built-in>",
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = MD5_DIGEST_SIZE,
- },
+ .alg = {
+ .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_aead_type,
+ .cra_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ }
+ },
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
DESC_HDR_MODE0_DEU_CBC |
@@ -1349,6 +1630,52 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_INIT |
DESC_HDR_MODE1_MDEU_PAD |
DESC_HDR_MODE1_MDEU_MD5_HMAC,
+ },
+ /* ABLKCIPHER algorithms. */
+ {
+ .alg = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-talitos",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "eseqiv",
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_AESU |
+ DESC_HDR_MODE0_AESU_CBC,
+ },
+ {
+ .alg = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-3des-talitos",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "eseqiv",
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_DEU |
+ DESC_HDR_MODE0_DEU_CBC |
+ DESC_HDR_MODE0_DEU_3DES,
}
};
@@ -1362,12 +1689,14 @@ struct talitos_crypto_alg {
static int talitos_cra_init(struct crypto_tfm *tfm)
{
struct crypto_alg *alg = tfm->__crt_alg;
- struct talitos_crypto_alg *talitos_alg =
- container_of(alg, struct talitos_crypto_alg, crypto_alg);
+ struct talitos_crypto_alg *talitos_alg;
struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+ talitos_alg = container_of(alg, struct talitos_crypto_alg, crypto_alg);
+
/* update context with ptr to dev */
ctx->dev = talitos_alg->dev;
+
/* copy descriptor header template value */
ctx->desc_hdr_template = talitos_alg->desc_hdr_template;
@@ -1453,19 +1782,13 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
return ERR_PTR(-ENOMEM);
alg = &t_alg->crypto_alg;
+ *alg = template->alg;
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- template->driver_name);
alg->cra_module = THIS_MODULE;
alg->cra_init = talitos_cra_init;
alg->cra_priority = TALITOS_CRA_PRIORITY;
- alg->cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
- alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
- alg->cra_type = &crypto_aead_type;
alg->cra_ctxsize = sizeof(struct talitos_ctx);
- alg->cra_u.aead = template->aead;
t_alg->desc_hdr_template = template->desc_hdr_template;
t_alg->dev = dev;
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index da8a8ed9e41..f18d1bde043 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -179,9 +179,14 @@ static void dma_halt(struct fsl_dma_chan *fsl_chan)
static void set_ld_eol(struct fsl_dma_chan *fsl_chan,
struct fsl_desc_sw *desc)
{
+ u64 snoop_bits;
+
+ snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
+ ? FSL_DMA_SNEN : 0;
+
desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
- DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL,
- 64);
+ DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
+ | snoop_bits, 64);
}
static void append_ld_queue(struct fsl_dma_chan *fsl_chan,
@@ -313,8 +318,8 @@ static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable)
static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
{
- struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan);
+ struct fsl_desc_sw *desc;
unsigned long flags;
dma_cookie_t cookie;
@@ -322,14 +327,17 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
spin_lock_irqsave(&fsl_chan->desc_lock, flags);
cookie = fsl_chan->common.cookie;
- cookie++;
- if (cookie < 0)
- cookie = 1;
- desc->async_tx.cookie = cookie;
- fsl_chan->common.cookie = desc->async_tx.cookie;
+ list_for_each_entry(desc, &tx->tx_list, node) {
+ cookie++;
+ if (cookie < 0)
+ cookie = 1;
- append_ld_queue(fsl_chan, desc);
- list_splice_init(&desc->async_tx.tx_list, fsl_chan->ld_queue.prev);
+ desc->async_tx.cookie = cookie;
+ }
+
+ fsl_chan->common.cookie = cookie;
+ append_ld_queue(fsl_chan, tx_to_fsl_desc(tx));
+ list_splice_init(&tx->tx_list, fsl_chan->ld_queue.prev);
spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
@@ -454,8 +462,8 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
{
struct fsl_dma_chan *fsl_chan;
struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
+ struct list_head *list;
size_t copy;
- LIST_HEAD(link_chain);
if (!chan)
return NULL;
@@ -472,7 +480,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
if (!new) {
dev_err(fsl_chan->dev,
"No free memory for link descriptor\n");
- return NULL;
+ goto fail;
}
#ifdef FSL_DMA_LD_DEBUG
dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new);
@@ -507,7 +515,19 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
/* Set End-of-link to the last link descriptor of new list*/
set_ld_eol(fsl_chan, new);
- return first ? &first->async_tx : NULL;
+ return &first->async_tx;
+
+fail:
+ if (!first)
+ return NULL;
+
+ list = &first->async_tx.tx_list;
+ list_for_each_entry_safe_reverse(new, prev, list, node) {
+ list_del(&new->node);
+ dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys);
+ }
+
+ return NULL;
}
/**
@@ -598,15 +618,16 @@ static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan)
dma_addr_t next_dest_addr;
unsigned long flags;
+ spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+
if (!dma_is_idle(fsl_chan))
- return;
+ goto out_unlock;
dma_halt(fsl_chan);
/* If there are some link descriptors
* not transfered in queue. We need to start it.
*/
- spin_lock_irqsave(&fsl_chan->desc_lock, flags);
/* Find the first un-transfer desciptor */
for (ld_node = fsl_chan->ld_queue.next;
@@ -617,19 +638,20 @@ static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan)
fsl_chan->common.cookie) == DMA_SUCCESS);
ld_node = ld_node->next);
- spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
-
if (ld_node != &fsl_chan->ld_queue) {
/* Get the ld start address from ld_queue */
next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys;
- dev_dbg(fsl_chan->dev, "xfer LDs staring from %p\n",
- (void *)next_dest_addr);
+ dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%llx\n",
+ (unsigned long long)next_dest_addr);
set_cdar(fsl_chan, next_dest_addr);
dma_start(fsl_chan);
} else {
set_cdar(fsl_chan, 0);
set_ndar(fsl_chan, 0);
}
+
+out_unlock:
+ spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
}
/**
@@ -734,8 +756,9 @@ static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
*/
if (stat & FSL_DMA_SR_EOSI) {
dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n");
- dev_dbg(fsl_chan->dev, "event: clndar %p, nlndar %p\n",
- (void *)get_cdar(fsl_chan), (void *)get_ndar(fsl_chan));
+ dev_dbg(fsl_chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n",
+ (unsigned long long)get_cdar(fsl_chan),
+ (unsigned long long)get_ndar(fsl_chan));
stat &= ~FSL_DMA_SR_EOSI;
update_cookie = 1;
}
@@ -830,7 +853,7 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1);
new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7;
- if (new_fsl_chan->id > FSL_DMA_MAX_CHANS_PER_DEVICE) {
+ if (new_fsl_chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
dev_err(fdev->dev, "There is no %d channel!\n",
new_fsl_chan->id);
err = -EINVAL;
@@ -925,8 +948,8 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
}
dev_info(&dev->dev, "Probe the Freescale DMA driver for %s "
- "controller at %p...\n",
- match->compatible, (void *)fdev->reg.start);
+ "controller at 0x%llx...\n",
+ match->compatible, (unsigned long long)fdev->reg.start);
fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end
- fdev->reg.start + 1);
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index 1955ee8d6d2..a600fc0f796 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -173,7 +173,7 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
- if (i7300_idle_platform_probe(NULL, NULL) == 0) {
+ if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) {
device->common.chancnt--;
}
#endif
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index e5f5c5a8ba6..ab4f3592a11 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -49,7 +49,6 @@ config EDAC_DEBUG_VERBOSE
config EDAC_MM_EDAC
tristate "Main Memory EDAC (Error Detection And Correction) reporting"
- default y
help
Some systems are able to detect and correct errors in main
memory. EDAC can report statistics on memory error
@@ -58,6 +57,31 @@ config EDAC_MM_EDAC
occurred so that a particular failing memory module can be
replaced. If unsure, select 'Y'.
+config EDAC_AMD64
+ tristate "AMD64 (Opteron, Athlon64) K8, F10h, F11h"
+ depends on EDAC_MM_EDAC && K8_NB && X86_64 && PCI
+ help
+ Support for error detection and correction on the AMD 64
+ Families of Memory Controllers (K8, F10h and F11h)
+
+config EDAC_AMD64_ERROR_INJECTION
+ bool "Sysfs Error Injection facilities"
+ depends on EDAC_AMD64
+ help
+ Recent Opterons (Family 10h and later) provide for Memory Error
+ Injection into the ECC detection circuits. The amd64_edac module
+ allows the operator/user to inject Uncorrectable and Correctable
+ errors into DRAM.
+
+ When enabled, in each of the respective memory controller directories
+ (/sys/devices/system/edac/mc/mcX), there are 3 input files:
+
+ - inject_section (0..3, 16-byte section of 64-byte cacheline),
+ - inject_word (0..8, 16-bit word of 16-byte section),
+ - inject_ecc_vector (hex ecc vector: select bits of inject word)
+
+ In addition, there are two control files, inject_read and inject_write,
+ which trigger the DRAM ECC Read and Write respectively.
config EDAC_AMD76X
tristate "AMD 76x (760, 762, 768)"
@@ -192,16 +216,20 @@ config EDAC_PPC4XX
config EDAC_AMD8131
tristate "AMD8131 HyperTransport PCI-X Tunnel"
- depends on EDAC_MM_EDAC && PCI
+ depends on EDAC_MM_EDAC && PCI && PPC_MAPLE
help
Support for error detection and correction on the
AMD8131 HyperTransport PCI-X Tunnel chip.
+ Note, add more Kconfig dependency if it's adopted
+ on some machine other than Maple.
config EDAC_AMD8111
tristate "AMD8111 HyperTransport I/O Hub"
- depends on EDAC_MM_EDAC && PCI
+ depends on EDAC_MM_EDAC && PCI && PPC_MAPLE
help
Support for error detection and correction on the
AMD8111 HyperTransport I/O Hub chip.
+ Note, add more Kconfig dependency if it's adopted
+ on some machine other than Maple.
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index a5fdcf02f59..633dc5604ee 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -30,8 +30,17 @@ obj-$(CONFIG_EDAC_I3000) += i3000_edac.o
obj-$(CONFIG_EDAC_X38) += x38_edac.o
obj-$(CONFIG_EDAC_I82860) += i82860_edac.o
obj-$(CONFIG_EDAC_R82600) += r82600_edac.o
+
+amd64_edac_mod-y := amd64_edac_err_types.o amd64_edac.o
+amd64_edac_mod-$(CONFIG_EDAC_DEBUG) += amd64_edac_dbg.o
+amd64_edac_mod-$(CONFIG_EDAC_AMD64_ERROR_INJECTION) += amd64_edac_inj.o
+
+obj-$(CONFIG_EDAC_AMD64) += amd64_edac_mod.o
+
obj-$(CONFIG_EDAC_PASEMI) += pasemi_edac.o
obj-$(CONFIG_EDAC_MPC85XX) += mpc85xx_edac.o
obj-$(CONFIG_EDAC_MV64X60) += mv64x60_edac.o
obj-$(CONFIG_EDAC_CELL) += cell_edac.o
obj-$(CONFIG_EDAC_PPC4XX) += ppc4xx_edac.o
+obj-$(CONFIG_EDAC_AMD8111) += amd8111_edac.o
+obj-$(CONFIG_EDAC_AMD8131) += amd8131_edac.o
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
new file mode 100644
index 00000000000..c36bf40568c
--- /dev/null
+++ b/drivers/edac/amd64_edac.c
@@ -0,0 +1,3354 @@
+#include "amd64_edac.h"
+#include <asm/k8.h>
+
+static struct edac_pci_ctl_info *amd64_ctl_pci;
+
+static int report_gart_errors;
+module_param(report_gart_errors, int, 0644);
+
+/*
+ * Set by command line parameter. If BIOS has enabled the ECC, this override is
+ * cleared to prevent re-enabling the hardware by this driver.
+ */
+static int ecc_enable_override;
+module_param(ecc_enable_override, int, 0644);
+
+/* Lookup table for all possible MC control instances */
+struct amd64_pvt;
+static struct mem_ctl_info *mci_lookup[MAX_NUMNODES];
+static struct amd64_pvt *pvt_lookup[MAX_NUMNODES];
+
+/*
+ * Memory scrubber control interface. For K8, memory scrubbing is handled by
+ * hardware and can involve L2 cache, dcache as well as the main memory. With
+ * F10, this is extended to L3 cache scrubbing on CPU models sporting that
+ * functionality.
+ *
+ * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
+ * (dram) over to cache lines. This is nasty, so we will use bandwidth in
+ * bytes/sec for the setting.
+ *
+ * Currently, we only do dram scrubbing. If the scrubbing is done in software on
+ * other archs, we might not have access to the caches directly.
+ */
+
+/*
+ * scan the scrub rate mapping table for a close or matching bandwidth value to
+ * issue. If requested is too big, then use last maximum value found.
+ */
+static int amd64_search_set_scrub_rate(struct pci_dev *ctl, u32 new_bw,
+ u32 min_scrubrate)
+{
+ u32 scrubval;
+ int i;
+
+ /*
+ * map the configured rate (new_bw) to a value specific to the AMD64
+ * memory controller and apply to register. Search for the first
+ * bandwidth entry that is greater or equal than the setting requested
+ * and program that. If at last entry, turn off DRAM scrubbing.
+ */
+ for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
+ /*
+ * skip scrub rates which aren't recommended
+ * (see F10 BKDG, F3x58)
+ */
+ if (scrubrates[i].scrubval < min_scrubrate)
+ continue;
+
+ if (scrubrates[i].bandwidth <= new_bw)
+ break;
+
+ /*
+ * if no suitable bandwidth found, turn off DRAM scrubbing
+ * entirely by falling back to the last element in the
+ * scrubrates array.
+ */
+ }
+
+ scrubval = scrubrates[i].scrubval;
+ if (scrubval)
+ edac_printk(KERN_DEBUG, EDAC_MC,
+ "Setting scrub rate bandwidth: %u\n",
+ scrubrates[i].bandwidth);
+ else
+ edac_printk(KERN_DEBUG, EDAC_MC, "Turning scrubbing off.\n");
+
+ pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F);
+
+ return 0;
+}
+
+static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 *bandwidth)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ u32 min_scrubrate = 0x0;
+
+ switch (boot_cpu_data.x86) {
+ case 0xf:
+ min_scrubrate = K8_MIN_SCRUB_RATE_BITS;
+ break;
+ case 0x10:
+ min_scrubrate = F10_MIN_SCRUB_RATE_BITS;
+ break;
+ case 0x11:
+ min_scrubrate = F11_MIN_SCRUB_RATE_BITS;
+ break;
+
+ default:
+ amd64_printk(KERN_ERR, "Unsupported family!\n");
+ break;
+ }
+ return amd64_search_set_scrub_rate(pvt->misc_f3_ctl, *bandwidth,
+ min_scrubrate);
+}
+
+static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ u32 scrubval = 0;
+ int status = -1, i, ret = 0;
+
+ ret = pci_read_config_dword(pvt->misc_f3_ctl, K8_SCRCTRL, &scrubval);
+ if (ret)
+ debugf0("Reading K8_SCRCTRL failed\n");
+
+ scrubval = scrubval & 0x001F;
+
+ edac_printk(KERN_DEBUG, EDAC_MC,
+ "pci-read, sdram scrub control value: %d \n", scrubval);
+
+ for (i = 0; ARRAY_SIZE(scrubrates); i++) {
+ if (scrubrates[i].scrubval == scrubval) {
+ *bw = scrubrates[i].bandwidth;
+ status = 0;
+ break;
+ }
+ }
+
+ return status;
+}
+
+/* Map from a CSROW entry to the mask entry that operates on it */
+static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
+{
+ return csrow >> (pvt->num_dcsm >> 3);
+}
+
+/* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */
+static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow)
+{
+ if (dct == 0)
+ return pvt->dcsb0[csrow];
+ else
+ return pvt->dcsb1[csrow];
+}
+
+/*
+ * Return the 'mask' address the i'th CS entry. This function is needed because
+ * there number of DCSM registers on Rev E and prior vs Rev F and later is
+ * different.
+ */
+static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow)
+{
+ if (dct == 0)
+ return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)];
+ else
+ return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)];
+}
+
+
+/*
+ * In *base and *limit, pass back the full 40-bit base and limit physical
+ * addresses for the node given by node_id. This information is obtained from
+ * DRAM Base (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers. The
+ * base and limit addresses are of type SysAddr, as defined at the start of
+ * section 3.4.4 (p. 70). They are the lowest and highest physical addresses
+ * in the address range they represent.
+ */
+static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id,
+ u64 *base, u64 *limit)
+{
+ *base = pvt->dram_base[node_id];
+ *limit = pvt->dram_limit[node_id];
+}
+
+/*
+ * Return 1 if the SysAddr given by sys_addr matches the base/limit associated
+ * with node_id
+ */
+static int amd64_base_limit_match(struct amd64_pvt *pvt,
+ u64 sys_addr, int node_id)
+{
+ u64 base, limit, addr;
+
+ amd64_get_base_and_limit(pvt, node_id, &base, &limit);
+
+ /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
+ * all ones if the most significant implemented address bit is 1.
+ * Here we discard bits 63-40. See section 3.4.2 of AMD publication
+ * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
+ * Application Programming.
+ */
+ addr = sys_addr & 0x000000ffffffffffull;
+
+ return (addr >= base) && (addr <= limit);
+}
+
+/*
+ * Attempt to map a SysAddr to a node. On success, return a pointer to the
+ * mem_ctl_info structure for the node that the SysAddr maps to.
+ *
+ * On failure, return NULL.
+ */
+static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
+ u64 sys_addr)
+{
+ struct amd64_pvt *pvt;
+ int node_id;
+ u32 intlv_en, bits;
+
+ /*
+ * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
+ * 3.4.4.2) registers to map the SysAddr to a node ID.
+ */
+ pvt = mci->pvt_info;
+
+ /*
+ * The value of this field should be the same for all DRAM Base
+ * registers. Therefore we arbitrarily choose to read it from the
+ * register for node 0.
+ */
+ intlv_en = pvt->dram_IntlvEn[0];
+
+ if (intlv_en == 0) {
+ for (node_id = 0; ; ) {
+ if (amd64_base_limit_match(pvt, sys_addr, node_id))
+ break;
+
+ if (++node_id >= DRAM_REG_COUNT)
+ goto err_no_match;
+ }
+ goto found;
+ }
+
+ if (unlikely((intlv_en != (0x01 << 8)) &&
+ (intlv_en != (0x03 << 8)) &&
+ (intlv_en != (0x07 << 8)))) {
+ amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from "
+ "IntlvEn field of DRAM Base Register for node 0: "
+ "This probably indicates a BIOS bug.\n", intlv_en);
+ return NULL;
+ }
+
+ bits = (((u32) sys_addr) >> 12) & intlv_en;
+
+ for (node_id = 0; ; ) {
+ if ((pvt->dram_limit[node_id] & intlv_en) == bits)
+ break; /* intlv_sel field matches */
+
+ if (++node_id >= DRAM_REG_COUNT)
+ goto err_no_match;
+ }
+
+ /* sanity test for sys_addr */
+ if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
+ amd64_printk(KERN_WARNING,
+ "%s(): sys_addr 0x%lx falls outside base/limit "
+ "address range for node %d with node interleaving "
+ "enabled.\n", __func__, (unsigned long)sys_addr,
+ node_id);
+ return NULL;
+ }
+
+found:
+ return edac_mc_find(node_id);
+
+err_no_match:
+ debugf2("sys_addr 0x%lx doesn't match any node\n",
+ (unsigned long)sys_addr);
+
+ return NULL;
+}
+
+/*
+ * Extract the DRAM CS base address from selected csrow register.
+ */
+static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow)
+{
+ return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) <<
+ pvt->dcs_shift;
+}
+
+/*
+ * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way.
+ */
+static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow)
+{
+ u64 dcsm_bits, other_bits;
+ u64 mask;
+
+ /* Extract bits from DRAM CS Mask. */
+ dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask;
+
+ other_bits = pvt->dcsm_mask;
+ other_bits = ~(other_bits << pvt->dcs_shift);
+
+ /*
+ * The extracted bits from DCSM belong in the spaces represented by
+ * the cleared bits in other_bits.
+ */
+ mask = (dcsm_bits << pvt->dcs_shift) | other_bits;
+
+ return mask;
+}
+
+/*
+ * @input_addr is an InputAddr associated with the node given by mci. Return the
+ * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
+ */
+static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
+{
+ struct amd64_pvt *pvt;
+ int csrow;
+ u64 base, mask;
+
+ pvt = mci->pvt_info;
+
+ /*
+ * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS
+ * base/mask register pair, test the condition shown near the start of
+ * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E).
+ */
+ for (csrow = 0; csrow < CHIPSELECT_COUNT; csrow++) {
+
+ /* This DRAM chip select is disabled on this node */
+ if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0)
+ continue;
+
+ base = base_from_dct_base(pvt, csrow);
+ mask = ~mask_from_dct_mask(pvt, csrow);
+
+ if ((input_addr & mask) == (base & mask)) {
+ debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
+ (unsigned long)input_addr, csrow,
+ pvt->mc_node_id);
+
+ return csrow;
+ }
+ }
+
+ debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
+ (unsigned long)input_addr, pvt->mc_node_id);
+
+ return -1;
+}
+
+/*
+ * Return the base value defined by the DRAM Base register for the node
+ * represented by mci. This function returns the full 40-bit value despite the
+ * fact that the register only stores bits 39-24 of the value. See section
+ * 3.4.4.1 (BKDG #26094, K8, revA-E)
+ */
+static inline u64 get_dram_base(struct mem_ctl_info *mci)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+
+ return pvt->dram_base[pvt->mc_node_id];
+}
+
+/*
+ * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
+ * for the node represented by mci. Info is passed back in *hole_base,
+ * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
+ * info is invalid. Info may be invalid for either of the following reasons:
+ *
+ * - The revision of the node is not E or greater. In this case, the DRAM Hole
+ * Address Register does not exist.
+ *
+ * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
+ * indicating that its contents are not valid.
+ *
+ * The values passed back in *hole_base, *hole_offset, and *hole_size are
+ * complete 32-bit values despite the fact that the bitfields in the DHAR
+ * only represent bits 31-24 of the base and offset values.
+ */
+int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
+ u64 *hole_offset, u64 *hole_size)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ u64 base;
+
+ /* only revE and later have the DRAM Hole Address Register */
+ if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_E) {
+ debugf1(" revision %d for node %d does not support DHAR\n",
+ pvt->ext_model, pvt->mc_node_id);
+ return 1;
+ }
+
+ /* only valid for Fam10h */
+ if (boot_cpu_data.x86 == 0x10 &&
+ (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) {
+ debugf1(" Dram Memory Hoisting is DISABLED on this system\n");
+ return 1;
+ }
+
+ if ((pvt->dhar & DHAR_VALID) == 0) {
+ debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n",
+ pvt->mc_node_id);
+ return 1;
+ }
+
+ /* This node has Memory Hoisting */
+
+ /* +------------------+--------------------+--------------------+-----
+ * | memory | DRAM hole | relocated |
+ * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
+ * | | | DRAM hole |
+ * | | | [0x100000000, |
+ * | | | (0x100000000+ |
+ * | | | (0xffffffff-x))] |
+ * +------------------+--------------------+--------------------+-----
+ *
+ * Above is a diagram of physical memory showing the DRAM hole and the
+ * relocated addresses from the DRAM hole. As shown, the DRAM hole
+ * starts at address x (the base address) and extends through address
+ * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
+ * addresses in the hole so that they start at 0x100000000.
+ */
+
+ base = dhar_base(pvt->dhar);
+
+ *hole_base = base;
+ *hole_size = (0x1ull << 32) - base;
+
+ if (boot_cpu_data.x86 > 0xf)
+ *hole_offset = f10_dhar_offset(pvt->dhar);
+ else
+ *hole_offset = k8_dhar_offset(pvt->dhar);
+
+ debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
+ pvt->mc_node_id, (unsigned long)*hole_base,
+ (unsigned long)*hole_offset, (unsigned long)*hole_size);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
+
+/*
+ * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
+ * assumed that sys_addr maps to the node given by mci.
+ *
+ * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
+ * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
+ * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
+ * then it is also involved in translating a SysAddr to a DramAddr. Sections
+ * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
+ * These parts of the documentation are unclear. I interpret them as follows:
+ *
+ * When node n receives a SysAddr, it processes the SysAddr as follows:
+ *
+ * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
+ * Limit registers for node n. If the SysAddr is not within the range
+ * specified by the base and limit values, then node n ignores the Sysaddr
+ * (since it does not map to node n). Otherwise continue to step 2 below.
+ *
+ * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
+ * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
+ * the range of relocated addresses (starting at 0x100000000) from the DRAM
+ * hole. If not, skip to step 3 below. Else get the value of the
+ * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
+ * offset defined by this value from the SysAddr.
+ *
+ * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
+ * Base register for node n. To obtain the DramAddr, subtract the base
+ * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
+ */
+static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
+{
+ u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
+ int ret = 0;
+
+ dram_base = get_dram_base(mci);
+
+ ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
+ &hole_size);
+ if (!ret) {
+ if ((sys_addr >= (1ull << 32)) &&
+ (sys_addr < ((1ull << 32) + hole_size))) {
+ /* use DHAR to translate SysAddr to DramAddr */
+ dram_addr = sys_addr - hole_offset;
+
+ debugf2("using DHAR to translate SysAddr 0x%lx to "
+ "DramAddr 0x%lx\n",
+ (unsigned long)sys_addr,
+ (unsigned long)dram_addr);
+
+ return dram_addr;
+ }
+ }
+
+ /*
+ * Translate the SysAddr to a DramAddr as shown near the start of
+ * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
+ * only deals with 40-bit values. Therefore we discard bits 63-40 of
+ * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
+ * discard are all 1s. Otherwise the bits we discard are all 0s. See
+ * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
+ * Programmer's Manual Volume 1 Application Programming.
+ */
+ dram_addr = (sys_addr & 0xffffffffffull) - dram_base;
+
+ debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
+ "DramAddr 0x%lx\n", (unsigned long)sys_addr,
+ (unsigned long)dram_addr);
+ return dram_addr;
+}
+
+/*
+ * @intlv_en is the value of the IntlvEn field from a DRAM Base register
+ * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
+ * for node interleaving.
+ */
+static int num_node_interleave_bits(unsigned intlv_en)
+{
+ static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
+ int n;
+
+ BUG_ON(intlv_en > 7);
+ n = intlv_shift_table[intlv_en];
+ return n;
+}
+
+/* Translate the DramAddr given by @dram_addr to an InputAddr. */
+static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
+{
+ struct amd64_pvt *pvt;
+ int intlv_shift;
+ u64 input_addr;
+
+ pvt = mci->pvt_info;
+
+ /*
+ * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
+ * concerning translating a DramAddr to an InputAddr.
+ */
+ intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
+ input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) +
+ (dram_addr & 0xfff);
+
+ debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
+ intlv_shift, (unsigned long)dram_addr,
+ (unsigned long)input_addr);
+
+ return input_addr;
+}
+
+/*
+ * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
+ * assumed that @sys_addr maps to the node given by mci.
+ */
+static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
+{
+ u64 input_addr;
+
+ input_addr =
+ dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
+
+ debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
+ (unsigned long)sys_addr, (unsigned long)input_addr);
+
+ return input_addr;
+}
+
+
+/*
+ * @input_addr is an InputAddr associated with the node represented by mci.
+ * Translate @input_addr to a DramAddr and return the result.
+ */
+static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
+{
+ struct amd64_pvt *pvt;
+ int node_id, intlv_shift;
+ u64 bits, dram_addr;
+ u32 intlv_sel;
+
+ /*
+ * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
+ * shows how to translate a DramAddr to an InputAddr. Here we reverse
+ * this procedure. When translating from a DramAddr to an InputAddr, the
+ * bits used for node interleaving are discarded. Here we recover these
+ * bits from the IntlvSel field of the DRAM Limit register (section
+ * 3.4.4.2) for the node that input_addr is associated with.
+ */
+ pvt = mci->pvt_info;
+ node_id = pvt->mc_node_id;
+ BUG_ON((node_id < 0) || (node_id > 7));
+
+ intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
+
+ if (intlv_shift == 0) {
+ debugf1(" InputAddr 0x%lx translates to DramAddr of "
+ "same value\n", (unsigned long)input_addr);
+
+ return input_addr;
+ }
+
+ bits = ((input_addr & 0xffffff000ull) << intlv_shift) +
+ (input_addr & 0xfff);
+
+ intlv_sel = pvt->dram_IntlvSel[node_id] & ((1 << intlv_shift) - 1);
+ dram_addr = bits + (intlv_sel << 12);
+
+ debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
+ "(%d node interleave bits)\n", (unsigned long)input_addr,
+ (unsigned long)dram_addr, intlv_shift);
+
+ return dram_addr;
+}
+
+/*
+ * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
+ * @dram_addr to a SysAddr.
+ */
+static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ u64 hole_base, hole_offset, hole_size, base, limit, sys_addr;
+ int ret = 0;
+
+ ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
+ &hole_size);
+ if (!ret) {
+ if ((dram_addr >= hole_base) &&
+ (dram_addr < (hole_base + hole_size))) {
+ sys_addr = dram_addr + hole_offset;
+
+ debugf1("using DHAR to translate DramAddr 0x%lx to "
+ "SysAddr 0x%lx\n", (unsigned long)dram_addr,
+ (unsigned long)sys_addr);
+
+ return sys_addr;
+ }
+ }
+
+ amd64_get_base_and_limit(pvt, pvt->mc_node_id, &base, &limit);
+ sys_addr = dram_addr + base;
+
+ /*
+ * The sys_addr we have computed up to this point is a 40-bit value
+ * because the k8 deals with 40-bit values. However, the value we are
+ * supposed to return is a full 64-bit physical address. The AMD
+ * x86-64 architecture specifies that the most significant implemented
+ * address bit through bit 63 of a physical address must be either all
+ * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a
+ * 64-bit value below. See section 3.4.2 of AMD publication 24592:
+ * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
+ * Programming.
+ */
+ sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
+
+ debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
+ pvt->mc_node_id, (unsigned long)dram_addr,
+ (unsigned long)sys_addr);
+
+ return sys_addr;
+}
+
+/*
+ * @input_addr is an InputAddr associated with the node given by mci. Translate
+ * @input_addr to a SysAddr.
+ */
+static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
+ u64 input_addr)
+{
+ return dram_addr_to_sys_addr(mci,
+ input_addr_to_dram_addr(mci, input_addr));
+}
+
+/*
+ * Find the minimum and maximum InputAddr values that map to the given @csrow.
+ * Pass back these values in *input_addr_min and *input_addr_max.
+ */
+static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
+ u64 *input_addr_min, u64 *input_addr_max)
+{
+ struct amd64_pvt *pvt;
+ u64 base, mask;
+
+ pvt = mci->pvt_info;
+ BUG_ON((csrow < 0) || (csrow >= CHIPSELECT_COUNT));
+
+ base = base_from_dct_base(pvt, csrow);
+ mask = mask_from_dct_mask(pvt, csrow);
+
+ *input_addr_min = base & ~mask;
+ *input_addr_max = base | mask | pvt->dcs_mask_notused;
+}
+
+/*
+ * Extract error address from MCA NB Address Low (section 3.6.4.5) and MCA NB
+ * Address High (section 3.6.4.6) register values and return the result. Address
+ * is located in the info structure (nbeah and nbeal), the encoding is device
+ * specific.
+ */
+static u64 extract_error_address(struct mem_ctl_info *mci,
+ struct amd64_error_info_regs *info)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+
+ return pvt->ops->get_error_address(mci, info);
+}
+
+
+/* Map the Error address to a PAGE and PAGE OFFSET. */
+static inline void error_address_to_page_and_offset(u64 error_address,
+ u32 *page, u32 *offset)
+{
+ *page = (u32) (error_address >> PAGE_SHIFT);
+ *offset = ((u32) error_address) & ~PAGE_MASK;
+}
+
+/*
+ * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
+ * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
+ * of a node that detected an ECC memory error. mci represents the node that
+ * the error address maps to (possibly different from the node that detected
+ * the error). Return the number of the csrow that sys_addr maps to, or -1 on
+ * error.
+ */
+static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
+{
+ int csrow;
+
+ csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
+
+ if (csrow == -1)
+ amd64_mc_printk(mci, KERN_ERR,
+ "Failed to translate InputAddr to csrow for "
+ "address 0x%lx\n", (unsigned long)sys_addr);
+ return csrow;
+}
+
+static int get_channel_from_ecc_syndrome(unsigned short syndrome);
+
+static void amd64_cpu_display_info(struct amd64_pvt *pvt)
+{
+ if (boot_cpu_data.x86 == 0x11)
+ edac_printk(KERN_DEBUG, EDAC_MC, "F11h CPU detected\n");
+ else if (boot_cpu_data.x86 == 0x10)
+ edac_printk(KERN_DEBUG, EDAC_MC, "F10h CPU detected\n");
+ else if (boot_cpu_data.x86 == 0xf)
+ edac_printk(KERN_DEBUG, EDAC_MC, "%s detected\n",
+ (pvt->ext_model >= OPTERON_CPU_REV_F) ?
+ "Rev F or later" : "Rev E or earlier");
+ else
+ /* we'll hardly ever ever get here */
+ edac_printk(KERN_ERR, EDAC_MC, "Unknown cpu!\n");
+}
+
+/*
+ * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
+ * are ECC capable.
+ */
+static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
+{
+ int bit;
+ enum dev_type edac_cap = EDAC_NONE;
+
+ bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= OPTERON_CPU_REV_F)
+ ? 19
+ : 17;
+
+ if (pvt->dclr0 >> BIT(bit))
+ edac_cap = EDAC_FLAG_SECDED;
+
+ return edac_cap;
+}
+
+
+static void f10_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt,
+ int ganged);
+
+/* Display and decode various NB registers for debug purposes. */
+static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
+{
+ int ganged;
+
+ debugf1(" nbcap:0x%8.08x DctDualCap=%s DualNode=%s 8-Node=%s\n",
+ pvt->nbcap,
+ (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "True" : "False",
+ (pvt->nbcap & K8_NBCAP_DUAL_NODE) ? "True" : "False",
+ (pvt->nbcap & K8_NBCAP_8_NODE) ? "True" : "False");
+ debugf1(" ECC Capable=%s ChipKill Capable=%s\n",
+ (pvt->nbcap & K8_NBCAP_SECDED) ? "True" : "False",
+ (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "True" : "False");
+ debugf1(" DramCfg0-low=0x%08x DIMM-ECC=%s Parity=%s Width=%s\n",
+ pvt->dclr0,
+ (pvt->dclr0 & BIT(19)) ? "Enabled" : "Disabled",
+ (pvt->dclr0 & BIT(8)) ? "Enabled" : "Disabled",
+ (pvt->dclr0 & BIT(11)) ? "128b" : "64b");
+ debugf1(" DIMM x4 Present: L0=%s L1=%s L2=%s L3=%s DIMM Type=%s\n",
+ (pvt->dclr0 & BIT(12)) ? "Y" : "N",
+ (pvt->dclr0 & BIT(13)) ? "Y" : "N",
+ (pvt->dclr0 & BIT(14)) ? "Y" : "N",
+ (pvt->dclr0 & BIT(15)) ? "Y" : "N",
+ (pvt->dclr0 & BIT(16)) ? "UN-Buffered" : "Buffered");
+
+
+ debugf1(" online-spare: 0x%8.08x\n", pvt->online_spare);
+
+ if (boot_cpu_data.x86 == 0xf) {
+ debugf1(" dhar: 0x%8.08x Base=0x%08x Offset=0x%08x\n",
+ pvt->dhar, dhar_base(pvt->dhar),
+ k8_dhar_offset(pvt->dhar));
+ debugf1(" DramHoleValid=%s\n",
+ (pvt->dhar & DHAR_VALID) ? "True" : "False");
+
+ debugf1(" dbam-dkt: 0x%8.08x\n", pvt->dbam0);
+
+ /* everything below this point is Fam10h and above */
+ return;
+
+ } else {
+ debugf1(" dhar: 0x%8.08x Base=0x%08x Offset=0x%08x\n",
+ pvt->dhar, dhar_base(pvt->dhar),
+ f10_dhar_offset(pvt->dhar));
+ debugf1(" DramMemHoistValid=%s DramHoleValid=%s\n",
+ (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) ?
+ "True" : "False",
+ (pvt->dhar & DHAR_VALID) ?
+ "True" : "False");
+ }
+
+ /* Only if NOT ganged does dcl1 have valid info */
+ if (!dct_ganging_enabled(pvt)) {
+ debugf1(" DramCfg1-low=0x%08x DIMM-ECC=%s Parity=%s "
+ "Width=%s\n", pvt->dclr1,
+ (pvt->dclr1 & BIT(19)) ? "Enabled" : "Disabled",
+ (pvt->dclr1 & BIT(8)) ? "Enabled" : "Disabled",
+ (pvt->dclr1 & BIT(11)) ? "128b" : "64b");
+ debugf1(" DIMM x4 Present: L0=%s L1=%s L2=%s L3=%s "
+ "DIMM Type=%s\n",
+ (pvt->dclr1 & BIT(12)) ? "Y" : "N",
+ (pvt->dclr1 & BIT(13)) ? "Y" : "N",
+ (pvt->dclr1 & BIT(14)) ? "Y" : "N",
+ (pvt->dclr1 & BIT(15)) ? "Y" : "N",
+ (pvt->dclr1 & BIT(16)) ? "UN-Buffered" : "Buffered");
+ }
+
+ /*
+ * Determine if ganged and then dump memory sizes for first controller,
+ * and if NOT ganged dump info for 2nd controller.
+ */
+ ganged = dct_ganging_enabled(pvt);
+
+ f10_debug_display_dimm_sizes(0, pvt, ganged);
+
+ if (!ganged)
+ f10_debug_display_dimm_sizes(1, pvt, ganged);
+}
+
+/* Read in both of DBAM registers */
+static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
+{
+ int err = 0;
+ unsigned int reg;
+
+ reg = DBAM0;
+ err = pci_read_config_dword(pvt->dram_f2_ctl, reg, &pvt->dbam0);
+ if (err)
+ goto err_reg;
+
+ if (boot_cpu_data.x86 >= 0x10) {
+ reg = DBAM1;
+ err = pci_read_config_dword(pvt->dram_f2_ctl, reg, &pvt->dbam1);
+
+ if (err)
+ goto err_reg;
+ }
+
+err_reg:
+ debugf0("Error reading F2x%03x.\n", reg);
+}
+
+/*
+ * NOTE: CPU Revision Dependent code: Rev E and Rev F
+ *
+ * Set the DCSB and DCSM mask values depending on the CPU revision value. Also
+ * set the shift factor for the DCSB and DCSM values.
+ *
+ * ->dcs_mask_notused, RevE:
+ *
+ * To find the max InputAddr for the csrow, start with the base address and set
+ * all bits that are "don't care" bits in the test at the start of section
+ * 3.5.4 (p. 84).
+ *
+ * The "don't care" bits are all set bits in the mask and all bits in the gaps
+ * between bit ranges [35:25] and [19:13]. The value REV_E_DCS_NOTUSED_BITS
+ * represents bits [24:20] and [12:0], which are all bits in the above-mentioned
+ * gaps.
+ *
+ * ->dcs_mask_notused, RevF and later:
+ *
+ * To find the max InputAddr for the csrow, start with the base address and set
+ * all bits that are "don't care" bits in the test at the start of NPT section
+ * 4.5.4 (p. 87).
+ *
+ * The "don't care" bits are all set bits in the mask and all bits in the gaps
+ * between bit ranges [36:27] and [21:13].
+ *
+ * The value REV_F_F1Xh_DCS_NOTUSED_BITS represents bits [26:22] and [12:0],
+ * which are all bits in the above-mentioned gaps.
+ */
+static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
+{
+ if (pvt->ext_model >= OPTERON_CPU_REV_F) {
+ pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS;
+ pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS;
+ pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS;
+ pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT;
+
+ switch (boot_cpu_data.x86) {
+ case 0xf:
+ pvt->num_dcsm = REV_F_DCSM_COUNT;
+ break;
+
+ case 0x10:
+ pvt->num_dcsm = F10_DCSM_COUNT;
+ break;
+
+ case 0x11:
+ pvt->num_dcsm = F11_DCSM_COUNT;
+ break;
+
+ default:
+ amd64_printk(KERN_ERR, "Unsupported family!\n");
+ break;
+ }
+ } else {
+ pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
+ pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
+ pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
+ pvt->dcs_shift = REV_E_DCS_SHIFT;
+ pvt->num_dcsm = REV_E_DCSM_COUNT;
+ }
+}
+
+/*
+ * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers
+ */
+static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
+{
+ int cs, reg, err = 0;
+
+ amd64_set_dct_base_and_mask(pvt);
+
+ for (cs = 0; cs < CHIPSELECT_COUNT; cs++) {
+ reg = K8_DCSB0 + (cs * 4);
+ err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
+ &pvt->dcsb0[cs]);
+ if (unlikely(err))
+ debugf0("Reading K8_DCSB0[%d] failed\n", cs);
+ else
+ debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
+ cs, pvt->dcsb0[cs], reg);
+
+ /* If DCT are NOT ganged, then read in DCT1's base */
+ if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
+ reg = F10_DCSB1 + (cs * 4);
+ err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
+ &pvt->dcsb1[cs]);
+ if (unlikely(err))
+ debugf0("Reading F10_DCSB1[%d] failed\n", cs);
+ else
+ debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
+ cs, pvt->dcsb1[cs], reg);
+ } else {
+ pvt->dcsb1[cs] = 0;
+ }
+ }
+
+ for (cs = 0; cs < pvt->num_dcsm; cs++) {
+ reg = K8_DCSB0 + (cs * 4);
+ err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
+ &pvt->dcsm0[cs]);
+ if (unlikely(err))
+ debugf0("Reading K8_DCSM0 failed\n");
+ else
+ debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
+ cs, pvt->dcsm0[cs], reg);
+
+ /* If DCT are NOT ganged, then read in DCT1's mask */
+ if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
+ reg = F10_DCSM1 + (cs * 4);
+ err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
+ &pvt->dcsm1[cs]);
+ if (unlikely(err))
+ debugf0("Reading F10_DCSM1[%d] failed\n", cs);
+ else
+ debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
+ cs, pvt->dcsm1[cs], reg);
+ } else
+ pvt->dcsm1[cs] = 0;
+ }
+}
+
+static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt)
+{
+ enum mem_type type;
+
+ if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= OPTERON_CPU_REV_F) {
+ /* Rev F and later */
+ type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
+ } else {
+ /* Rev E and earlier */
+ type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
+ }
+
+ debugf1(" Memory type is: %s\n",
+ (type == MEM_DDR2) ? "MEM_DDR2" :
+ (type == MEM_RDDR2) ? "MEM_RDDR2" :
+ (type == MEM_DDR) ? "MEM_DDR" : "MEM_RDDR");
+
+ return type;
+}
+
+/*
+ * Read the DRAM Configuration Low register. It differs between CG, D & E revs
+ * and the later RevF memory controllers (DDR vs DDR2)
+ *
+ * Return:
+ * number of memory channels in operation
+ * Pass back:
+ * contents of the DCL0_LOW register
+ */
+static int k8_early_channel_count(struct amd64_pvt *pvt)
+{
+ int flag, err = 0;
+
+ err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
+ if (err)
+ return err;
+
+ if ((boot_cpu_data.x86_model >> 4) >= OPTERON_CPU_REV_F) {
+ /* RevF (NPT) and later */
+ flag = pvt->dclr0 & F10_WIDTH_128;
+ } else {
+ /* RevE and earlier */
+ flag = pvt->dclr0 & REVE_WIDTH_128;
+ }
+
+ /* not used */
+ pvt->dclr1 = 0;
+
+ return (flag) ? 2 : 1;
+}
+
+/* extract the ERROR ADDRESS for the K8 CPUs */
+static u64 k8_get_error_address(struct mem_ctl_info *mci,
+ struct amd64_error_info_regs *info)
+{
+ return (((u64) (info->nbeah & 0xff)) << 32) +
+ (info->nbeal & ~0x03);
+}
+
+/*
+ * Read the Base and Limit registers for K8 based Memory controllers; extract
+ * fields from the 'raw' reg into separate data fields
+ *
+ * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN
+ */
+static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
+{
+ u32 low;
+ u32 off = dram << 3; /* 8 bytes between DRAM entries */
+ int err;
+
+ err = pci_read_config_dword(pvt->addr_f1_ctl,
+ K8_DRAM_BASE_LOW + off, &low);
+ if (err)
+ debugf0("Reading K8_DRAM_BASE_LOW failed\n");
+
+ /* Extract parts into separate data entries */
+ pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8;
+ pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
+ pvt->dram_rw_en[dram] = (low & 0x3);
+
+ err = pci_read_config_dword(pvt->addr_f1_ctl,
+ K8_DRAM_LIMIT_LOW + off, &low);
+ if (err)
+ debugf0("Reading K8_DRAM_LIMIT_LOW failed\n");
+
+ /*
+ * Extract parts into separate data entries. Limit is the HIGHEST memory
+ * location of the region, so lower 24 bits need to be all ones
+ */
+ pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF;
+ pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7;
+ pvt->dram_DstNode[dram] = (low & 0x7);
+}
+
+static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
+ struct amd64_error_info_regs *info,
+ u64 SystemAddress)
+{
+ struct mem_ctl_info *src_mci;
+ unsigned short syndrome;
+ int channel, csrow;
+ u32 page, offset;
+
+ /* Extract the syndrome parts and form a 16-bit syndrome */
+ syndrome = EXTRACT_HIGH_SYNDROME(info->nbsl) << 8;
+ syndrome |= EXTRACT_LOW_SYNDROME(info->nbsh);
+
+ /* CHIPKILL enabled */
+ if (info->nbcfg & K8_NBCFG_CHIPKILL) {
+ channel = get_channel_from_ecc_syndrome(syndrome);
+ if (channel < 0) {
+ /*
+ * Syndrome didn't map, so we don't know which of the
+ * 2 DIMMs is in error. So we need to ID 'both' of them
+ * as suspect.
+ */
+ amd64_mc_printk(mci, KERN_WARNING,
+ "unknown syndrome 0x%x - possible error "
+ "reporting race\n", syndrome);
+ edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+ return;
+ }
+ } else {
+ /*
+ * non-chipkill ecc mode
+ *
+ * The k8 documentation is unclear about how to determine the
+ * channel number when using non-chipkill memory. This method
+ * was obtained from email communication with someone at AMD.
+ * (Wish the email was placed in this comment - norsk)
+ */
+ channel = ((SystemAddress & BIT(3)) != 0);
+ }
+
+ /*
+ * Find out which node the error address belongs to. This may be
+ * different from the node that detected the error.
+ */
+ src_mci = find_mc_by_sys_addr(mci, SystemAddress);
+ if (src_mci) {
+ amd64_mc_printk(mci, KERN_ERR,
+ "failed to map error address 0x%lx to a node\n",
+ (unsigned long)SystemAddress);
+ edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+ return;
+ }
+
+ /* Now map the SystemAddress to a CSROW */
+ csrow = sys_addr_to_csrow(src_mci, SystemAddress);
+ if (csrow < 0) {
+ edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
+ } else {
+ error_address_to_page_and_offset(SystemAddress, &page, &offset);
+
+ edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
+ channel, EDAC_MOD_STR);
+ }
+}
+
+/*
+ * determrine the number of PAGES in for this DIMM's size based on its DRAM
+ * Address Mapping.
+ *
+ * First step is to calc the number of bits to shift a value of 1 left to
+ * indicate show many pages. Start with the DBAM value as the starting bits,
+ * then proceed to adjust those shift bits, based on CPU rev and the table.
+ * See BKDG on the DBAM
+ */
+static int k8_dbam_map_to_pages(struct amd64_pvt *pvt, int dram_map)
+{
+ int nr_pages;
+
+ if (pvt->ext_model >= OPTERON_CPU_REV_F) {
+ nr_pages = 1 << (revf_quad_ddr2_shift[dram_map] - PAGE_SHIFT);
+ } else {
+ /*
+ * RevE and less section; this line is tricky. It collapses the
+ * table used by RevD and later to one that matches revisions CG
+ * and earlier.
+ */
+ dram_map -= (pvt->ext_model >= OPTERON_CPU_REV_D) ?
+ (dram_map > 8 ? 4 : (dram_map > 5 ?
+ 3 : (dram_map > 2 ? 1 : 0))) : 0;
+
+ /* 25 shift is 32MiB minimum DIMM size in RevE and prior */
+ nr_pages = 1 << (dram_map + 25 - PAGE_SHIFT);
+ }
+
+ return nr_pages;
+}
+
+/*
+ * Get the number of DCT channels in use.
+ *
+ * Return:
+ * number of Memory Channels in operation
+ * Pass back:
+ * contents of the DCL0_LOW register
+ */
+static int f10_early_channel_count(struct amd64_pvt *pvt)
+{
+ int err = 0, channels = 0;
+ u32 dbam;
+
+ err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
+ if (err)
+ goto err_reg;
+
+ err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1);
+ if (err)
+ goto err_reg;
+
+ /* If we are in 128 bit mode, then we are using 2 channels */
+ if (pvt->dclr0 & F10_WIDTH_128) {
+ debugf0("Data WIDTH is 128 bits - 2 channels\n");
+ channels = 2;
+ return channels;
+ }
+
+ /*
+ * Need to check if in UN-ganged mode: In such, there are 2 channels,
+ * but they are NOT in 128 bit mode and thus the above 'dcl0' status bit
+ * will be OFF.
+ *
+ * Need to check DCT0[0] and DCT1[0] to see if only one of them has
+ * their CSEnable bit on. If so, then SINGLE DIMM case.
+ */
+ debugf0("Data WIDTH is NOT 128 bits - need more decoding\n");
+
+ /*
+ * Check DRAM Bank Address Mapping values for each DIMM to see if there
+ * is more than just one DIMM present in unganged mode. Need to check
+ * both controllers since DIMMs can be placed in either one.
+ */
+ channels = 0;
+ err = pci_read_config_dword(pvt->dram_f2_ctl, DBAM0, &dbam);
+ if (err)
+ goto err_reg;
+
+ if (DBAM_DIMM(0, dbam) > 0)
+ channels++;
+ if (DBAM_DIMM(1, dbam) > 0)
+ channels++;
+ if (DBAM_DIMM(2, dbam) > 0)
+ channels++;
+ if (DBAM_DIMM(3, dbam) > 0)
+ channels++;
+
+ /* If more than 2 DIMMs are present, then we have 2 channels */
+ if (channels > 2)
+ channels = 2;
+ else if (channels == 0) {
+ /* No DIMMs on DCT0, so look at DCT1 */
+ err = pci_read_config_dword(pvt->dram_f2_ctl, DBAM1, &dbam);
+ if (err)
+ goto err_reg;
+
+ if (DBAM_DIMM(0, dbam) > 0)
+ channels++;
+ if (DBAM_DIMM(1, dbam) > 0)
+ channels++;
+ if (DBAM_DIMM(2, dbam) > 0)
+ channels++;
+ if (DBAM_DIMM(3, dbam) > 0)
+ channels++;
+
+ if (channels > 2)
+ channels = 2;
+ }
+
+ /* If we found ALL 0 values, then assume just ONE DIMM-ONE Channel */
+ if (channels == 0)
+ channels = 1;
+
+ debugf0("DIMM count= %d\n", channels);
+
+ return channels;
+
+err_reg:
+ return -1;
+
+}
+
+static int f10_dbam_map_to_pages(struct amd64_pvt *pvt, int dram_map)
+{
+ return 1 << (revf_quad_ddr2_shift[dram_map] - PAGE_SHIFT);
+}
+
+/* Enable extended configuration access via 0xCF8 feature */
+static void amd64_setup(struct amd64_pvt *pvt)
+{
+ u32 reg;
+
+ pci_read_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg);
+
+ pvt->flags.cf8_extcfg = !!(reg & F10_NB_CFG_LOW_ENABLE_EXT_CFG);
+ reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
+ pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg);
+}
+
+/* Restore the extended configuration access via 0xCF8 feature */
+static void amd64_teardown(struct amd64_pvt *pvt)
+{
+ u32 reg;
+
+ pci_read_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg);
+
+ reg &= ~F10_NB_CFG_LOW_ENABLE_EXT_CFG;
+ if (pvt->flags.cf8_extcfg)
+ reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
+ pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg);
+}
+
+static u64 f10_get_error_address(struct mem_ctl_info *mci,
+ struct amd64_error_info_regs *info)
+{
+ return (((u64) (info->nbeah & 0xffff)) << 32) +
+ (info->nbeal & ~0x01);
+}
+
+/*
+ * Read the Base and Limit registers for F10 based Memory controllers. Extract
+ * fields from the 'raw' reg into separate data fields.
+ *
+ * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN.
+ */
+static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
+{
+ u32 high_offset, low_offset, high_base, low_base, high_limit, low_limit;
+
+ low_offset = K8_DRAM_BASE_LOW + (dram << 3);
+ high_offset = F10_DRAM_BASE_HIGH + (dram << 3);
+
+ /* read the 'raw' DRAM BASE Address register */
+ pci_read_config_dword(pvt->addr_f1_ctl, low_offset, &low_base);
+
+ /* Read from the ECS data register */
+ pci_read_config_dword(pvt->addr_f1_ctl, high_offset, &high_base);
+
+ /* Extract parts into separate data entries */
+ pvt->dram_rw_en[dram] = (low_base & 0x3);
+
+ if (pvt->dram_rw_en[dram] == 0)
+ return;
+
+ pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7;
+
+ pvt->dram_base[dram] = (((((u64) high_base & 0x000000FF) << 32) |
+ ((u64) low_base & 0xFFFF0000))) << 8;
+
+ low_offset = K8_DRAM_LIMIT_LOW + (dram << 3);
+ high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
+
+ /* read the 'raw' LIMIT registers */
+ pci_read_config_dword(pvt->addr_f1_ctl, low_offset, &low_limit);
+
+ /* Read from the ECS data register for the HIGH portion */
+ pci_read_config_dword(pvt->addr_f1_ctl, high_offset, &high_limit);
+
+ debugf0(" HW Regs: BASE=0x%08x-%08x LIMIT= 0x%08x-%08x\n",
+ high_base, low_base, high_limit, low_limit);
+
+ pvt->dram_DstNode[dram] = (low_limit & 0x7);
+ pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7;
+
+ /*
+ * Extract address values and form a LIMIT address. Limit is the HIGHEST
+ * memory location of the region, so low 24 bits need to be all ones.
+ */
+ low_limit |= 0x0000FFFF;
+ pvt->dram_limit[dram] =
+ ((((u64) high_limit << 32) + (u64) low_limit) << 8) | (0xFF);
+}
+
+static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
+{
+ int err = 0;
+
+ err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCTL_SEL_LOW,
+ &pvt->dram_ctl_select_low);
+ if (err) {
+ debugf0("Reading F10_DCTL_SEL_LOW failed\n");
+ } else {
+ debugf0("DRAM_DCTL_SEL_LOW=0x%x DctSelBaseAddr=0x%x\n",
+ pvt->dram_ctl_select_low, dct_sel_baseaddr(pvt));
+
+ debugf0(" DRAM DCTs are=%s DRAM Is=%s DRAM-Ctl-"
+ "sel-hi-range=%s\n",
+ (dct_ganging_enabled(pvt) ? "GANGED" : "NOT GANGED"),
+ (dct_dram_enabled(pvt) ? "Enabled" : "Disabled"),
+ (dct_high_range_enabled(pvt) ? "Enabled" : "Disabled"));
+
+ debugf0(" DctDatIntLv=%s MemCleared=%s DctSelIntLvAddr=0x%x\n",
+ (dct_data_intlv_enabled(pvt) ? "Enabled" : "Disabled"),
+ (dct_memory_cleared(pvt) ? "True " : "False "),
+ dct_sel_interleave_addr(pvt));
+ }
+
+ err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCTL_SEL_HIGH,
+ &pvt->dram_ctl_select_high);
+ if (err)
+ debugf0("Reading F10_DCTL_SEL_HIGH failed\n");
+}
+
+/*
+ * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory
+ * Interleaving Modes.
+ */
+static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
+ int hi_range_sel, u32 intlv_en)
+{
+ u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1;
+
+ if (dct_ganging_enabled(pvt))
+ cs = 0;
+ else if (hi_range_sel)
+ cs = dct_sel_high;
+ else if (dct_interleave_enabled(pvt)) {
+ /*
+ * see F2x110[DctSelIntLvAddr] - channel interleave mode
+ */
+ if (dct_sel_interleave_addr(pvt) == 0)
+ cs = sys_addr >> 6 & 1;
+ else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) {
+ temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
+
+ if (dct_sel_interleave_addr(pvt) & 1)
+ cs = (sys_addr >> 9 & 1) ^ temp;
+ else
+ cs = (sys_addr >> 6 & 1) ^ temp;
+ } else if (intlv_en & 4)
+ cs = sys_addr >> 15 & 1;
+ else if (intlv_en & 2)
+ cs = sys_addr >> 14 & 1;
+ else if (intlv_en & 1)
+ cs = sys_addr >> 13 & 1;
+ else
+ cs = sys_addr >> 12 & 1;
+ } else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt))
+ cs = ~dct_sel_high & 1;
+ else
+ cs = 0;
+
+ return cs;
+}
+
+static inline u32 f10_map_intlv_en_to_shift(u32 intlv_en)
+{
+ if (intlv_en == 1)
+ return 1;
+ else if (intlv_en == 3)
+ return 2;
+ else if (intlv_en == 7)
+ return 3;
+
+ return 0;
+}
+
+/* See F10h BKDG, 2.8.10.2 DctSelBaseOffset Programming */
+static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel,
+ u32 dct_sel_base_addr,
+ u64 dct_sel_base_off,
+ u32 hole_valid, u32 hole_off,
+ u64 dram_base)
+{
+ u64 chan_off;
+
+ if (hi_range_sel) {
+ if (!(dct_sel_base_addr & 0xFFFFF800) &&
+ hole_valid && (sys_addr >= 0x100000000ULL))
+ chan_off = hole_off << 16;
+ else
+ chan_off = dct_sel_base_off;
+ } else {
+ if (hole_valid && (sys_addr >= 0x100000000ULL))
+ chan_off = hole_off << 16;
+ else
+ chan_off = dram_base & 0xFFFFF8000000ULL;
+ }
+
+ return (sys_addr & 0x0000FFFFFFFFFFC0ULL) -
+ (chan_off & 0x0000FFFFFF800000ULL);
+}
+
+/* Hack for the time being - Can we get this from BIOS?? */
+#define CH0SPARE_RANK 0
+#define CH1SPARE_RANK 1
+
+/*
+ * checks if the csrow passed in is marked as SPARED, if so returns the new
+ * spare row
+ */
+static inline int f10_process_possible_spare(int csrow,
+ u32 cs, struct amd64_pvt *pvt)
+{
+ u32 swap_done;
+ u32 bad_dram_cs;
+
+ /* Depending on channel, isolate respective SPARING info */
+ if (cs) {
+ swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare);
+ bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare);
+ if (swap_done && (csrow == bad_dram_cs))
+ csrow = CH1SPARE_RANK;
+ } else {
+ swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare);
+ bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare);
+ if (swap_done && (csrow == bad_dram_cs))
+ csrow = CH0SPARE_RANK;
+ }
+ return csrow;
+}
+
+/*
+ * Iterate over the DRAM DCT "base" and "mask" registers looking for a
+ * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
+ *
+ * Return:
+ * -EINVAL: NOT FOUND
+ * 0..csrow = Chip-Select Row
+ */
+static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
+{
+ struct mem_ctl_info *mci;
+ struct amd64_pvt *pvt;
+ u32 cs_base, cs_mask;
+ int cs_found = -EINVAL;
+ int csrow;
+
+ mci = mci_lookup[nid];
+ if (!mci)
+ return cs_found;
+
+ pvt = mci->pvt_info;
+
+ debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs);
+
+ for (csrow = 0; csrow < CHIPSELECT_COUNT; csrow++) {
+
+ cs_base = amd64_get_dct_base(pvt, cs, csrow);
+ if (!(cs_base & K8_DCSB_CS_ENABLE))
+ continue;
+
+ /*
+ * We have an ENABLED CSROW, Isolate just the MASK bits of the
+ * target: [28:19] and [13:5], which map to [36:27] and [21:13]
+ * of the actual address.
+ */
+ cs_base &= REV_F_F1Xh_DCSB_BASE_BITS;
+
+ /*
+ * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and
+ * [4:0] to become ON. Then mask off bits [28:0] ([36:8])
+ */
+ cs_mask = amd64_get_dct_mask(pvt, cs, csrow);
+
+ debugf1(" CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n",
+ csrow, cs_base, cs_mask);
+
+ cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF;
+
+ debugf1(" Final CSMask=0x%x\n", cs_mask);
+ debugf1(" (InputAddr & ~CSMask)=0x%x "
+ "(CSBase & ~CSMask)=0x%x\n",
+ (in_addr & ~cs_mask), (cs_base & ~cs_mask));
+
+ if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) {
+ cs_found = f10_process_possible_spare(csrow, cs, pvt);
+
+ debugf1(" MATCH csrow=%d\n", cs_found);
+ break;
+ }
+ }
+ return cs_found;
+}
+
+/* For a given @dram_range, check if @sys_addr falls within it. */
+static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
+ u64 sys_addr, int *nid, int *chan_sel)
+{
+ int node_id, cs_found = -EINVAL, high_range = 0;
+ u32 intlv_en, intlv_sel, intlv_shift, hole_off;
+ u32 hole_valid, tmp, dct_sel_base, channel;
+ u64 dram_base, chan_addr, dct_sel_base_off;
+
+ dram_base = pvt->dram_base[dram_range];
+ intlv_en = pvt->dram_IntlvEn[dram_range];
+
+ node_id = pvt->dram_DstNode[dram_range];
+ intlv_sel = pvt->dram_IntlvSel[dram_range];
+
+ debugf1("(dram=%d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n",
+ dram_range, dram_base, sys_addr, pvt->dram_limit[dram_range]);
+
+ /*
+ * This assumes that one node's DHAR is the same as all the other
+ * nodes' DHAR.
+ */
+ hole_off = (pvt->dhar & 0x0000FF80);
+ hole_valid = (pvt->dhar & 0x1);
+ dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16;
+
+ debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n",
+ hole_off, hole_valid, intlv_sel);
+
+ if (intlv_en ||
+ (intlv_sel != ((sys_addr >> 12) & intlv_en)))
+ return -EINVAL;
+
+ dct_sel_base = dct_sel_baseaddr(pvt);
+
+ /*
+ * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
+ * select between DCT0 and DCT1.
+ */
+ if (dct_high_range_enabled(pvt) &&
+ !dct_ganging_enabled(pvt) &&
+ ((sys_addr >> 27) >= (dct_sel_base >> 11)))
+ high_range = 1;
+
+ channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en);
+
+ chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base,
+ dct_sel_base_off, hole_valid,
+ hole_off, dram_base);
+
+ intlv_shift = f10_map_intlv_en_to_shift(intlv_en);
+
+ /* remove Node ID (in case of memory interleaving) */
+ tmp = chan_addr & 0xFC0;
+
+ chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp;
+
+ /* remove channel interleave and hash */
+ if (dct_interleave_enabled(pvt) &&
+ !dct_high_range_enabled(pvt) &&
+ !dct_ganging_enabled(pvt)) {
+ if (dct_sel_interleave_addr(pvt) != 1)
+ chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL;
+ else {
+ tmp = chan_addr & 0xFC0;
+ chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1)
+ | tmp;
+ }
+ }
+
+ debugf1(" (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n",
+ chan_addr, (u32)(chan_addr >> 8));
+
+ cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel);
+
+ if (cs_found >= 0) {
+ *nid = node_id;
+ *chan_sel = channel;
+ }
+ return cs_found;
+}
+
+static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
+ int *node, int *chan_sel)
+{
+ int dram_range, cs_found = -EINVAL;
+ u64 dram_base, dram_limit;
+
+ for (dram_range = 0; dram_range < DRAM_REG_COUNT; dram_range++) {
+
+ if (!pvt->dram_rw_en[dram_range])
+ continue;
+
+ dram_base = pvt->dram_base[dram_range];
+ dram_limit = pvt->dram_limit[dram_range];
+
+ if ((dram_base <= sys_addr) && (sys_addr <= dram_limit)) {
+
+ cs_found = f10_match_to_this_node(pvt, dram_range,
+ sys_addr, node,
+ chan_sel);
+ if (cs_found >= 0)
+ break;
+ }
+ }
+ return cs_found;
+}
+
+/*
+ * This the F10h reference code from AMD to map a @sys_addr to NodeID,
+ * CSROW, Channel.
+ *
+ * The @sys_addr is usually an error address received from the hardware.
+ */
+static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
+ struct amd64_error_info_regs *info,
+ u64 sys_addr)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ u32 page, offset;
+ unsigned short syndrome;
+ int nid, csrow, chan = 0;
+
+ csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
+
+ if (csrow >= 0) {
+ error_address_to_page_and_offset(sys_addr, &page, &offset);
+
+ syndrome = EXTRACT_HIGH_SYNDROME(info->nbsl) << 8;
+ syndrome |= EXTRACT_LOW_SYNDROME(info->nbsh);
+
+ /*
+ * Is CHIPKILL on? If so, then we can attempt to use the
+ * syndrome to isolate which channel the error was on.
+ */
+ if (pvt->nbcfg & K8_NBCFG_CHIPKILL)
+ chan = get_channel_from_ecc_syndrome(syndrome);
+
+ if (chan >= 0) {
+ edac_mc_handle_ce(mci, page, offset, syndrome,
+ csrow, chan, EDAC_MOD_STR);
+ } else {
+ /*
+ * Channel unknown, report all channels on this
+ * CSROW as failed.
+ */
+ for (chan = 0; chan < mci->csrows[csrow].nr_channels;
+ chan++) {
+ edac_mc_handle_ce(mci, page, offset,
+ syndrome,
+ csrow, chan,
+ EDAC_MOD_STR);
+ }
+ }
+
+ } else {
+ edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+ }
+}
+
+/*
+ * Input (@index) is the DBAM DIMM value (1 of 4) used as an index into a shift
+ * table (revf_quad_ddr2_shift) which starts at 128MB DIMM size. Index of 0
+ * indicates an empty DIMM slot, as reported by Hardware on empty slots.
+ *
+ * Normalize to 128MB by subracting 27 bit shift.
+ */
+static int map_dbam_to_csrow_size(int index)
+{
+ int mega_bytes = 0;
+
+ if (index > 0 && index <= DBAM_MAX_VALUE)
+ mega_bytes = ((128 << (revf_quad_ddr2_shift[index]-27)));
+
+ return mega_bytes;
+}
+
+/*
+ * debug routine to display the memory sizes of a DIMM (ganged or not) and it
+ * CSROWs as well
+ */
+static void f10_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt,
+ int ganged)
+{
+ int dimm, size0, size1;
+ u32 dbam;
+ u32 *dcsb;
+
+ debugf1(" dbam%d: 0x%8.08x CSROW is %s\n", ctrl,
+ ctrl ? pvt->dbam1 : pvt->dbam0,
+ ganged ? "GANGED - dbam1 not used" : "NON-GANGED");
+
+ dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
+ dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0;
+
+ /* Dump memory sizes for DIMM and its CSROWs */
+ for (dimm = 0; dimm < 4; dimm++) {
+
+ size0 = 0;
+ if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE)
+ size0 = map_dbam_to_csrow_size(DBAM_DIMM(dimm, dbam));
+
+ size1 = 0;
+ if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE)
+ size1 = map_dbam_to_csrow_size(DBAM_DIMM(dimm, dbam));
+
+ debugf1(" CTRL-%d DIMM-%d=%5dMB CSROW-%d=%5dMB "
+ "CSROW-%d=%5dMB\n",
+ ctrl,
+ dimm,
+ size0 + size1,
+ dimm * 2,
+ size0,
+ dimm * 2 + 1,
+ size1);
+ }
+}
+
+/*
+ * Very early hardware probe on pci_probe thread to determine if this module
+ * supports the hardware.
+ *
+ * Return:
+ * 0 for OK
+ * 1 for error
+ */
+static int f10_probe_valid_hardware(struct amd64_pvt *pvt)
+{
+ int ret = 0;
+
+ /*
+ * If we are on a DDR3 machine, we don't know yet if
+ * we support that properly at this time
+ */
+ if ((pvt->dchr0 & F10_DCHR_Ddr3Mode) ||
+ (pvt->dchr1 & F10_DCHR_Ddr3Mode)) {
+
+ amd64_printk(KERN_WARNING,
+ "%s() This machine is running with DDR3 memory. "
+ "This is not currently supported. "
+ "DCHR0=0x%x DCHR1=0x%x\n",
+ __func__, pvt->dchr0, pvt->dchr1);
+
+ amd64_printk(KERN_WARNING,
+ " Contact '%s' module MAINTAINER to help add"
+ " support.\n",
+ EDAC_MOD_STR);
+
+ ret = 1;
+
+ }
+ return ret;
+}
+
+/*
+ * There currently are 3 types type of MC devices for AMD Athlon/Opterons
+ * (as per PCI DEVICE_IDs):
+ *
+ * Family K8: That is the Athlon64 and Opteron CPUs. They all have the same PCI
+ * DEVICE ID, even though there is differences between the different Revisions
+ * (CG,D,E,F).
+ *
+ * Family F10h and F11h.
+ *
+ */
+static struct amd64_family_type amd64_family_types[] = {
+ [K8_CPUS] = {
+ .ctl_name = "RevF",
+ .addr_f1_ctl = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
+ .misc_f3_ctl = PCI_DEVICE_ID_AMD_K8_NB_MISC,
+ .ops = {
+ .early_channel_count = k8_early_channel_count,
+ .get_error_address = k8_get_error_address,
+ .read_dram_base_limit = k8_read_dram_base_limit,
+ .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
+ .dbam_map_to_pages = k8_dbam_map_to_pages,
+ }
+ },
+ [F10_CPUS] = {
+ .ctl_name = "Family 10h",
+ .addr_f1_ctl = PCI_DEVICE_ID_AMD_10H_NB_MAP,
+ .misc_f3_ctl = PCI_DEVICE_ID_AMD_10H_NB_MISC,
+ .ops = {
+ .probe_valid_hardware = f10_probe_valid_hardware,
+ .early_channel_count = f10_early_channel_count,
+ .get_error_address = f10_get_error_address,
+ .read_dram_base_limit = f10_read_dram_base_limit,
+ .read_dram_ctl_register = f10_read_dram_ctl_register,
+ .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
+ .dbam_map_to_pages = f10_dbam_map_to_pages,
+ }
+ },
+ [F11_CPUS] = {
+ .ctl_name = "Family 11h",
+ .addr_f1_ctl = PCI_DEVICE_ID_AMD_11H_NB_MAP,
+ .misc_f3_ctl = PCI_DEVICE_ID_AMD_11H_NB_MISC,
+ .ops = {
+ .probe_valid_hardware = f10_probe_valid_hardware,
+ .early_channel_count = f10_early_channel_count,
+ .get_error_address = f10_get_error_address,
+ .read_dram_base_limit = f10_read_dram_base_limit,
+ .read_dram_ctl_register = f10_read_dram_ctl_register,
+ .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
+ .dbam_map_to_pages = f10_dbam_map_to_pages,
+ }
+ },
+};
+
+static struct pci_dev *pci_get_related_function(unsigned int vendor,
+ unsigned int device,
+ struct pci_dev *related)
+{
+ struct pci_dev *dev = NULL;
+
+ dev = pci_get_device(vendor, device, dev);
+ while (dev) {
+ if ((dev->bus->number == related->bus->number) &&
+ (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
+ break;
+ dev = pci_get_device(vendor, device, dev);
+ }
+
+ return dev;
+}
+
+/*
+ * syndrome mapping table for ECC ChipKill devices
+ *
+ * The comment in each row is the token (nibble) number that is in error.
+ * The least significant nibble of the syndrome is the mask for the bits
+ * that are in error (need to be toggled) for the particular nibble.
+ *
+ * Each row contains 16 entries.
+ * The first entry (0th) is the channel number for that row of syndromes.
+ * The remaining 15 entries are the syndromes for the respective Error
+ * bit mask index.
+ *
+ * 1st index entry is 0x0001 mask, indicating that the rightmost bit is the
+ * bit in error.
+ * The 2nd index entry is 0x0010 that the second bit is damaged.
+ * The 3rd index entry is 0x0011 indicating that the rightmost 2 bits
+ * are damaged.
+ * Thus so on until index 15, 0x1111, whose entry has the syndrome
+ * indicating that all 4 bits are damaged.
+ *
+ * A search is performed on this table looking for a given syndrome.
+ *
+ * See the AMD documentation for ECC syndromes. This ECC table is valid
+ * across all the versions of the AMD64 processors.
+ *
+ * A fast lookup is to use the LAST four bits of the 16-bit syndrome as a
+ * COLUMN index, then search all ROWS of that column, looking for a match
+ * with the input syndrome. The ROW value will be the token number.
+ *
+ * The 0'th entry on that row, can be returned as the CHANNEL (0 or 1) of this
+ * error.
+ */
+#define NUMBER_ECC_ROWS 36
+static const unsigned short ecc_chipkill_syndromes[NUMBER_ECC_ROWS][16] = {
+ /* Channel 0 syndromes */
+ {/*0*/ 0, 0xe821, 0x7c32, 0x9413, 0xbb44, 0x5365, 0xc776, 0x2f57,
+ 0xdd88, 0x35a9, 0xa1ba, 0x499b, 0x66cc, 0x8eed, 0x1afe, 0xf2df },
+ {/*1*/ 0, 0x5d31, 0xa612, 0xfb23, 0x9584, 0xc8b5, 0x3396, 0x6ea7,
+ 0xeac8, 0xb7f9, 0x4cda, 0x11eb, 0x7f4c, 0x227d, 0xd95e, 0x846f },
+ {/*2*/ 0, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
+ 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f },
+ {/*3*/ 0, 0x2021, 0x3032, 0x1013, 0x4044, 0x6065, 0x7076, 0x5057,
+ 0x8088, 0xa0a9, 0xb0ba, 0x909b, 0xc0cc, 0xe0ed, 0xf0fe, 0xd0df },
+ {/*4*/ 0, 0x5041, 0xa082, 0xf0c3, 0x9054, 0xc015, 0x30d6, 0x6097,
+ 0xe0a8, 0xb0e9, 0x402a, 0x106b, 0x70fc, 0x20bd, 0xd07e, 0x803f },
+ {/*5*/ 0, 0xbe21, 0xd732, 0x6913, 0x2144, 0x9f65, 0xf676, 0x4857,
+ 0x3288, 0x8ca9, 0xe5ba, 0x5b9b, 0x13cc, 0xaded, 0xc4fe, 0x7adf },
+ {/*6*/ 0, 0x4951, 0x8ea2, 0xc7f3, 0x5394, 0x1ac5, 0xdd36, 0x9467,
+ 0xa1e8, 0xe8b9, 0x2f4a, 0x661b, 0xf27c, 0xbb2d, 0x7cde, 0x358f },
+ {/*7*/ 0, 0x74e1, 0x9872, 0xec93, 0xd6b4, 0xa255, 0x4ec6, 0x3a27,
+ 0x6bd8, 0x1f39, 0xf3aa, 0x874b, 0xbd6c, 0xc98d, 0x251e, 0x51ff },
+ {/*8*/ 0, 0x15c1, 0x2a42, 0x3f83, 0xcef4, 0xdb35, 0xe4b6, 0xf177,
+ 0x4758, 0x5299, 0x6d1a, 0x78db, 0x89ac, 0x9c6d, 0xa3ee, 0xb62f },
+ {/*9*/ 0, 0x3d01, 0x1602, 0x2b03, 0x8504, 0xb805, 0x9306, 0xae07,
+ 0xca08, 0xf709, 0xdc0a, 0xe10b, 0x4f0c, 0x720d, 0x590e, 0x640f },
+ {/*a*/ 0, 0x9801, 0xec02, 0x7403, 0x6b04, 0xf305, 0x8706, 0x1f07,
+ 0xbd08, 0x2509, 0x510a, 0xc90b, 0xd60c, 0x4e0d, 0x3a0e, 0xa20f },
+ {/*b*/ 0, 0xd131, 0x6212, 0xb323, 0x3884, 0xe9b5, 0x5a96, 0x8ba7,
+ 0x1cc8, 0xcdf9, 0x7eda, 0xafeb, 0x244c, 0xf57d, 0x465e, 0x976f },
+ {/*c*/ 0, 0xe1d1, 0x7262, 0x93b3, 0xb834, 0x59e5, 0xca56, 0x2b87,
+ 0xdc18, 0x3dc9, 0xae7a, 0x4fab, 0x542c, 0x85fd, 0x164e, 0xf79f },
+ {/*d*/ 0, 0x6051, 0xb0a2, 0xd0f3, 0x1094, 0x70c5, 0xa036, 0xc067,
+ 0x20e8, 0x40b9, 0x904a, 0x601b, 0x307c, 0x502d, 0x80de, 0xe08f },
+ {/*e*/ 0, 0xa4c1, 0xf842, 0x5c83, 0xe6f4, 0x4235, 0x1eb6, 0xba77,
+ 0x7b58, 0xdf99, 0x831a, 0x27db, 0x9dac, 0x396d, 0x65ee, 0xc12f },
+ {/*f*/ 0, 0x11c1, 0x2242, 0x3383, 0xc8f4, 0xd935, 0xeab6, 0xfb77,
+ 0x4c58, 0x5d99, 0x6e1a, 0x7fdb, 0x84ac, 0x956d, 0xa6ee, 0xb72f },
+
+ /* Channel 1 syndromes */
+ {/*10*/ 1, 0x45d1, 0x8a62, 0xcfb3, 0x5e34, 0x1be5, 0xd456, 0x9187,
+ 0xa718, 0xe2c9, 0x2d7a, 0x68ab, 0xf92c, 0xbcfd, 0x734e, 0x369f },
+ {/*11*/ 1, 0x63e1, 0xb172, 0xd293, 0x14b4, 0x7755, 0xa5c6, 0xc627,
+ 0x28d8, 0x4b39, 0x99aa, 0xfa4b, 0x3c6c, 0x5f8d, 0x8d1e, 0xeeff },
+ {/*12*/ 1, 0xb741, 0xd982, 0x6ec3, 0x2254, 0x9515, 0xfbd6, 0x4c97,
+ 0x33a8, 0x84e9, 0xea2a, 0x5d6b, 0x11fc, 0xa6bd, 0xc87e, 0x7f3f },
+ {/*13*/ 1, 0xdd41, 0x6682, 0xbbc3, 0x3554, 0xe815, 0x53d6, 0xce97,
+ 0x1aa8, 0xc7e9, 0x7c2a, 0xa1fb, 0x2ffc, 0xf2bd, 0x497e, 0x943f },
+ {/*14*/ 1, 0x2bd1, 0x3d62, 0x16b3, 0x4f34, 0x64e5, 0x7256, 0x5987,
+ 0x8518, 0xaec9, 0xb87a, 0x93ab, 0xca2c, 0xe1fd, 0xf74e, 0xdc9f },
+ {/*15*/ 1, 0x83c1, 0xc142, 0x4283, 0xa4f4, 0x2735, 0x65b6, 0xe677,
+ 0xf858, 0x7b99, 0x391a, 0xbadb, 0x5cac, 0xdf6d, 0x9dee, 0x1e2f },
+ {/*16*/ 1, 0x8fd1, 0xc562, 0x4ab3, 0xa934, 0x26e5, 0x6c56, 0xe387,
+ 0xfe18, 0x71c9, 0x3b7a, 0xb4ab, 0x572c, 0xd8fd, 0x924e, 0x1d9f },
+ {/*17*/ 1, 0x4791, 0x89e2, 0xce73, 0x5264, 0x15f5, 0xdb86, 0x9c17,
+ 0xa3b8, 0xe429, 0x2a5a, 0x6dcb, 0xf1dc, 0xb64d, 0x783e, 0x3faf },
+ {/*18*/ 1, 0x5781, 0xa9c2, 0xfe43, 0x92a4, 0xc525, 0x3b66, 0x6ce7,
+ 0xe3f8, 0xb479, 0x4a3a, 0x1dbb, 0x715c, 0x26dd, 0xd89e, 0x8f1f },
+ {/*19*/ 1, 0xbf41, 0xd582, 0x6ac3, 0x2954, 0x9615, 0xfcd6, 0x4397,
+ 0x3ea8, 0x81e9, 0xeb2a, 0x546b, 0x17fc, 0xa8bd, 0xc27e, 0x7d3f },
+ {/*1a*/ 1, 0x9891, 0xe1e2, 0x7273, 0x6464, 0xf7f5, 0x8586, 0x1617,
+ 0xb8b8, 0x2b29, 0x595a, 0xcacb, 0xdcdc, 0x4f4d, 0x3d3e, 0xaeaf },
+ {/*1b*/ 1, 0xcce1, 0x4472, 0x8893, 0xfdb4, 0x3f55, 0xb9c6, 0x7527,
+ 0x56d8, 0x9a39, 0x12aa, 0xde4b, 0xab6c, 0x678d, 0xef1e, 0x23ff },
+ {/*1c*/ 1, 0xa761, 0xf9b2, 0x5ed3, 0xe214, 0x4575, 0x1ba6, 0xbcc7,
+ 0x7328, 0xd449, 0x8a9a, 0x2dfb, 0x913c, 0x365d, 0x688e, 0xcfef },
+ {/*1d*/ 1, 0xff61, 0x55b2, 0xaad3, 0x7914, 0x8675, 0x2ca6, 0xd3c7,
+ 0x9e28, 0x6149, 0xcb9a, 0x34fb, 0xe73c, 0x185d, 0xb28e, 0x4def },
+ {/*1e*/ 1, 0x5451, 0xa8a2, 0xfcf3, 0x9694, 0xc2c5, 0x3e36, 0x6a67,
+ 0xebe8, 0xbfb9, 0x434a, 0x171b, 0x7d7c, 0x292d, 0xd5de, 0x818f },
+ {/*1f*/ 1, 0x6fc1, 0xb542, 0xda83, 0x19f4, 0x7635, 0xacb6, 0xc377,
+ 0x2e58, 0x4199, 0x9b1a, 0xf4db, 0x37ac, 0x586d, 0x82ee, 0xed2f },
+
+ /* ECC bits are also in the set of tokens and they too can go bad
+ * first 2 cover channel 0, while the second 2 cover channel 1
+ */
+ {/*20*/ 0, 0xbe01, 0xd702, 0x6903, 0x2104, 0x9f05, 0xf606, 0x4807,
+ 0x3208, 0x8c09, 0xe50a, 0x5b0b, 0x130c, 0xad0d, 0xc40e, 0x7a0f },
+ {/*21*/ 0, 0x4101, 0x8202, 0xc303, 0x5804, 0x1905, 0xda06, 0x9b07,
+ 0xac08, 0xed09, 0x2e0a, 0x6f0b, 0x640c, 0xb50d, 0x760e, 0x370f },
+ {/*22*/ 1, 0xc441, 0x4882, 0x8cc3, 0xf654, 0x3215, 0xbed6, 0x7a97,
+ 0x5ba8, 0x9fe9, 0x132a, 0xd76b, 0xadfc, 0x69bd, 0xe57e, 0x213f },
+ {/*23*/ 1, 0x7621, 0x9b32, 0xed13, 0xda44, 0xac65, 0x4176, 0x3757,
+ 0x6f88, 0x19a9, 0xf4ba, 0x829b, 0xb5cc, 0xc3ed, 0x2efe, 0x58df }
+};
+
+/*
+ * Given the syndrome argument, scan each of the channel tables for a syndrome
+ * match. Depending on which table it is found, return the channel number.
+ */
+static int get_channel_from_ecc_syndrome(unsigned short syndrome)
+{
+ int row;
+ int column;
+
+ /* Determine column to scan */
+ column = syndrome & 0xF;
+
+ /* Scan all rows, looking for syndrome, or end of table */
+ for (row = 0; row < NUMBER_ECC_ROWS; row++) {
+ if (ecc_chipkill_syndromes[row][column] == syndrome)
+ return ecc_chipkill_syndromes[row][0];
+ }
+
+ debugf0("syndrome(%x) not found\n", syndrome);
+ return -1;
+}
+
+/*
+ * Check for valid error in the NB Status High register. If so, proceed to read
+ * NB Status Low, NB Address Low and NB Address High registers and store data
+ * into error structure.
+ *
+ * Returns:
+ * - 1: if hardware regs contains valid error info
+ * - 0: if no valid error is indicated
+ */
+static int amd64_get_error_info_regs(struct mem_ctl_info *mci,
+ struct amd64_error_info_regs *regs)
+{
+ struct amd64_pvt *pvt;
+ struct pci_dev *misc_f3_ctl;
+ int err = 0;
+
+ pvt = mci->pvt_info;
+ misc_f3_ctl = pvt->misc_f3_ctl;
+
+ err = pci_read_config_dword(misc_f3_ctl, K8_NBSH, &regs->nbsh);
+ if (err)
+ goto err_reg;
+
+ if (!(regs->nbsh & K8_NBSH_VALID_BIT))
+ return 0;
+
+ /* valid error, read remaining error information registers */
+ err = pci_read_config_dword(misc_f3_ctl, K8_NBSL, &regs->nbsl);
+ if (err)
+ goto err_reg;
+
+ err = pci_read_config_dword(misc_f3_ctl, K8_NBEAL, &regs->nbeal);
+ if (err)
+ goto err_reg;
+
+ err = pci_read_config_dword(misc_f3_ctl, K8_NBEAH, &regs->nbeah);
+ if (err)
+ goto err_reg;
+
+ err = pci_read_config_dword(misc_f3_ctl, K8_NBCFG, &regs->nbcfg);
+ if (err)
+ goto err_reg;
+
+ return 1;
+
+err_reg:
+ debugf0("Reading error info register failed\n");
+ return 0;
+}
+
+/*
+ * This function is called to retrieve the error data from hardware and store it
+ * in the info structure.
+ *
+ * Returns:
+ * - 1: if a valid error is found
+ * - 0: if no error is found
+ */
+static int amd64_get_error_info(struct mem_ctl_info *mci,
+ struct amd64_error_info_regs *info)
+{
+ struct amd64_pvt *pvt;
+ struct amd64_error_info_regs regs;
+
+ pvt = mci->pvt_info;
+
+ if (!amd64_get_error_info_regs(mci, info))
+ return 0;
+
+ /*
+ * Here's the problem with the K8's EDAC reporting: There are four
+ * registers which report pieces of error information. They are shared
+ * between CEs and UEs. Furthermore, contrary to what is stated in the
+ * BKDG, the overflow bit is never used! Every error always updates the
+ * reporting registers.
+ *
+ * Can you see the race condition? All four error reporting registers
+ * must be read before a new error updates them! There is no way to read
+ * all four registers atomically. The best than can be done is to detect
+ * that a race has occured and then report the error without any kind of
+ * precision.
+ *
+ * What is still positive is that errors are still reported and thus
+ * problems can still be detected - just not localized because the
+ * syndrome and address are spread out across registers.
+ *
+ * Grrrrr!!!!! Here's hoping that AMD fixes this in some future K8 rev.
+ * UEs and CEs should have separate register sets with proper overflow
+ * bits that are used! At very least the problem can be fixed by
+ * honoring the ErrValid bit in 'nbsh' and not updating registers - just
+ * set the overflow bit - unless the current error is CE and the new
+ * error is UE which would be the only situation for overwriting the
+ * current values.
+ */
+
+ regs = *info;
+
+ /* Use info from the second read - most current */
+ if (unlikely(!amd64_get_error_info_regs(mci, info)))
+ return 0;
+
+ /* clear the error bits in hardware */
+ pci_write_bits32(pvt->misc_f3_ctl, K8_NBSH, 0, K8_NBSH_VALID_BIT);
+
+ /* Check for the possible race condition */
+ if ((regs.nbsh != info->nbsh) ||
+ (regs.nbsl != info->nbsl) ||
+ (regs.nbeah != info->nbeah) ||
+ (regs.nbeal != info->nbeal)) {
+ amd64_mc_printk(mci, KERN_WARNING,
+ "hardware STATUS read access race condition "
+ "detected!\n");
+ return 0;
+ }
+ return 1;
+}
+
+static inline void amd64_decode_gart_tlb_error(struct mem_ctl_info *mci,
+ struct amd64_error_info_regs *info)
+{
+ u32 err_code;
+ u32 ec_tt; /* error code transaction type (2b) */
+ u32 ec_ll; /* error code cache level (2b) */
+
+ err_code = EXTRACT_ERROR_CODE(info->nbsl);
+ ec_ll = EXTRACT_LL_CODE(err_code);
+ ec_tt = EXTRACT_TT_CODE(err_code);
+
+ amd64_mc_printk(mci, KERN_ERR,
+ "GART TLB event: transaction type(%s), "
+ "cache level(%s)\n", tt_msgs[ec_tt], ll_msgs[ec_ll]);
+}
+
+static inline void amd64_decode_mem_cache_error(struct mem_ctl_info *mci,
+ struct amd64_error_info_regs *info)
+{
+ u32 err_code;
+ u32 ec_rrrr; /* error code memory transaction (4b) */
+ u32 ec_tt; /* error code transaction type (2b) */
+ u32 ec_ll; /* error code cache level (2b) */
+
+ err_code = EXTRACT_ERROR_CODE(info->nbsl);
+ ec_ll = EXTRACT_LL_CODE(err_code);
+ ec_tt = EXTRACT_TT_CODE(err_code);
+ ec_rrrr = EXTRACT_RRRR_CODE(err_code);
+
+ amd64_mc_printk(mci, KERN_ERR,
+ "cache hierarchy error: memory transaction type(%s), "
+ "transaction type(%s), cache level(%s)\n",
+ rrrr_msgs[ec_rrrr], tt_msgs[ec_tt], ll_msgs[ec_ll]);
+}
+
+
+/*
+ * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
+ * ADDRESS and process.
+ */
+static void amd64_handle_ce(struct mem_ctl_info *mci,
+ struct amd64_error_info_regs *info)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ u64 SystemAddress;
+
+ /* Ensure that the Error Address is VALID */
+ if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) {
+ amd64_mc_printk(mci, KERN_ERR,
+ "HW has no ERROR_ADDRESS available\n");
+ edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+ return;
+ }
+
+ SystemAddress = extract_error_address(mci, info);
+
+ amd64_mc_printk(mci, KERN_ERR,
+ "CE ERROR_ADDRESS= 0x%llx\n", SystemAddress);
+
+ pvt->ops->map_sysaddr_to_csrow(mci, info, SystemAddress);
+}
+
+/* Handle any Un-correctable Errors (UEs) */
+static void amd64_handle_ue(struct mem_ctl_info *mci,
+ struct amd64_error_info_regs *info)
+{
+ int csrow;
+ u64 SystemAddress;
+ u32 page, offset;
+ struct mem_ctl_info *log_mci, *src_mci = NULL;
+
+ log_mci = mci;
+
+ if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) {
+ amd64_mc_printk(mci, KERN_CRIT,
+ "HW has no ERROR_ADDRESS available\n");
+ edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
+ return;
+ }
+
+ SystemAddress = extract_error_address(mci, info);
+
+ /*
+ * Find out which node the error address belongs to. This may be
+ * different from the node that detected the error.
+ */
+ src_mci = find_mc_by_sys_addr(mci, SystemAddress);
+ if (!src_mci) {
+ amd64_mc_printk(mci, KERN_CRIT,
+ "ERROR ADDRESS (0x%lx) value NOT mapped to a MC\n",
+ (unsigned long)SystemAddress);
+ edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
+ return;
+ }
+
+ log_mci = src_mci;
+
+ csrow = sys_addr_to_csrow(log_mci, SystemAddress);
+ if (csrow < 0) {
+ amd64_mc_printk(mci, KERN_CRIT,
+ "ERROR_ADDRESS (0x%lx) value NOT mapped to 'csrow'\n",
+ (unsigned long)SystemAddress);
+ edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
+ } else {
+ error_address_to_page_and_offset(SystemAddress, &page, &offset);
+ edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR);
+ }
+}
+
+static void amd64_decode_bus_error(struct mem_ctl_info *mci,
+ struct amd64_error_info_regs *info)
+{
+ u32 err_code, ext_ec;
+ u32 ec_pp; /* error code participating processor (2p) */
+ u32 ec_to; /* error code timed out (1b) */
+ u32 ec_rrrr; /* error code memory transaction (4b) */
+ u32 ec_ii; /* error code memory or I/O (2b) */
+ u32 ec_ll; /* error code cache level (2b) */
+
+ ext_ec = EXTRACT_EXT_ERROR_CODE(info->nbsl);
+ err_code = EXTRACT_ERROR_CODE(info->nbsl);
+
+ ec_ll = EXTRACT_LL_CODE(err_code);
+ ec_ii = EXTRACT_II_CODE(err_code);
+ ec_rrrr = EXTRACT_RRRR_CODE(err_code);
+ ec_to = EXTRACT_TO_CODE(err_code);
+ ec_pp = EXTRACT_PP_CODE(err_code);
+
+ amd64_mc_printk(mci, KERN_ERR,
+ "BUS ERROR:\n"
+ " time-out(%s) mem or i/o(%s)\n"
+ " participating processor(%s)\n"
+ " memory transaction type(%s)\n"
+ " cache level(%s) Error Found by: %s\n",
+ to_msgs[ec_to],
+ ii_msgs[ec_ii],
+ pp_msgs[ec_pp],
+ rrrr_msgs[ec_rrrr],
+ ll_msgs[ec_ll],
+ (info->nbsh & K8_NBSH_ERR_SCRUBER) ?
+ "Scrubber" : "Normal Operation");
+
+ /* If this was an 'observed' error, early out */
+ if (ec_pp == K8_NBSL_PP_OBS)
+ return; /* We aren't the node involved */
+
+ /* Parse out the extended error code for ECC events */
+ switch (ext_ec) {
+ /* F10 changed to one Extended ECC error code */
+ case F10_NBSL_EXT_ERR_RES: /* Reserved field */
+ case F10_NBSL_EXT_ERR_ECC: /* F10 ECC ext err code */
+ break;
+
+ default:
+ amd64_mc_printk(mci, KERN_ERR, "NOT ECC: no special error "
+ "handling for this error\n");
+ return;
+ }
+
+ if (info->nbsh & K8_NBSH_CECC)
+ amd64_handle_ce(mci, info);
+ else if (info->nbsh & K8_NBSH_UECC)
+ amd64_handle_ue(mci, info);
+
+ /*
+ * If main error is CE then overflow must be CE. If main error is UE
+ * then overflow is unknown. We'll call the overflow a CE - if
+ * panic_on_ue is set then we're already panic'ed and won't arrive
+ * here. Else, then apparently someone doesn't think that UE's are
+ * catastrophic.
+ */
+ if (info->nbsh & K8_NBSH_OVERFLOW)
+ edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR
+ "Error Overflow set");
+}
+
+int amd64_process_error_info(struct mem_ctl_info *mci,
+ struct amd64_error_info_regs *info,
+ int handle_errors)
+{
+ struct amd64_pvt *pvt;
+ struct amd64_error_info_regs *regs;
+ u32 err_code, ext_ec;
+ int gart_tlb_error = 0;
+
+ pvt = mci->pvt_info;
+
+ /* If caller doesn't want us to process the error, return */
+ if (!handle_errors)
+ return 1;
+
+ regs = info;
+
+ debugf1("NorthBridge ERROR: mci(0x%p)\n", mci);
+ debugf1(" MC node(%d) Error-Address(0x%.8x-%.8x)\n",
+ pvt->mc_node_id, regs->nbeah, regs->nbeal);
+ debugf1(" nbsh(0x%.8x) nbsl(0x%.8x)\n",
+ regs->nbsh, regs->nbsl);
+ debugf1(" Valid Error=%s Overflow=%s\n",
+ (regs->nbsh & K8_NBSH_VALID_BIT) ? "True" : "False",
+ (regs->nbsh & K8_NBSH_OVERFLOW) ? "True" : "False");
+ debugf1(" Err Uncorrected=%s MCA Error Reporting=%s\n",
+ (regs->nbsh & K8_NBSH_UNCORRECTED_ERR) ?
+ "True" : "False",
+ (regs->nbsh & K8_NBSH_ERR_ENABLE) ?
+ "True" : "False");
+ debugf1(" MiscErr Valid=%s ErrAddr Valid=%s PCC=%s\n",
+ (regs->nbsh & K8_NBSH_MISC_ERR_VALID) ?
+ "True" : "False",
+ (regs->nbsh & K8_NBSH_VALID_ERROR_ADDR) ?
+ "True" : "False",
+ (regs->nbsh & K8_NBSH_PCC) ?
+ "True" : "False");
+ debugf1(" CECC=%s UECC=%s Found by Scruber=%s\n",
+ (regs->nbsh & K8_NBSH_CECC) ?
+ "True" : "False",
+ (regs->nbsh & K8_NBSH_UECC) ?
+ "True" : "False",
+ (regs->nbsh & K8_NBSH_ERR_SCRUBER) ?
+ "True" : "False");
+ debugf1(" CORE0=%s CORE1=%s CORE2=%s CORE3=%s\n",
+ (regs->nbsh & K8_NBSH_CORE0) ? "True" : "False",
+ (regs->nbsh & K8_NBSH_CORE1) ? "True" : "False",
+ (regs->nbsh & K8_NBSH_CORE2) ? "True" : "False",
+ (regs->nbsh & K8_NBSH_CORE3) ? "True" : "False");
+
+
+ err_code = EXTRACT_ERROR_CODE(regs->nbsl);
+
+ /* Determine which error type:
+ * 1) GART errors - non-fatal, developmental events
+ * 2) MEMORY errors
+ * 3) BUS errors
+ * 4) Unknown error
+ */
+ if (TEST_TLB_ERROR(err_code)) {
+ /*
+ * GART errors are intended to help graphics driver developers
+ * to detect bad GART PTEs. It is recommended by AMD to disable
+ * GART table walk error reporting by default[1] (currently
+ * being disabled in mce_cpu_quirks()) and according to the
+ * comment in mce_cpu_quirks(), such GART errors can be
+ * incorrectly triggered. We may see these errors anyway and
+ * unless requested by the user, they won't be reported.
+ *
+ * [1] section 13.10.1 on BIOS and Kernel Developers Guide for
+ * AMD NPT family 0Fh processors
+ */
+ if (report_gart_errors == 0)
+ return 1;
+
+ /*
+ * Only if GART error reporting is requested should we generate
+ * any logs.
+ */
+ gart_tlb_error = 1;
+
+ debugf1("GART TLB error\n");
+ amd64_decode_gart_tlb_error(mci, info);
+ } else if (TEST_MEM_ERROR(err_code)) {
+ debugf1("Memory/Cache error\n");
+ amd64_decode_mem_cache_error(mci, info);
+ } else if (TEST_BUS_ERROR(err_code)) {
+ debugf1("Bus (Link/DRAM) error\n");
+ amd64_decode_bus_error(mci, info);
+ } else {
+ /* shouldn't reach here! */
+ amd64_mc_printk(mci, KERN_WARNING,
+ "%s(): unknown MCE error 0x%x\n", __func__,
+ err_code);
+ }
+
+ ext_ec = EXTRACT_EXT_ERROR_CODE(regs->nbsl);
+ amd64_mc_printk(mci, KERN_ERR,
+ "ExtErr=(0x%x) %s\n", ext_ec, ext_msgs[ext_ec]);
+
+ if (((ext_ec >= F10_NBSL_EXT_ERR_CRC &&
+ ext_ec <= F10_NBSL_EXT_ERR_TGT) ||
+ (ext_ec == F10_NBSL_EXT_ERR_RMW)) &&
+ EXTRACT_LDT_LINK(info->nbsh)) {
+
+ amd64_mc_printk(mci, KERN_ERR,
+ "Error on hypertransport link: %s\n",
+ htlink_msgs[
+ EXTRACT_LDT_LINK(info->nbsh)]);
+ }
+
+ /*
+ * Check the UE bit of the NB status high register, if set generate some
+ * logs. If NOT a GART error, then process the event as a NO-INFO event.
+ * If it was a GART error, skip that process.
+ */
+ if (regs->nbsh & K8_NBSH_UNCORRECTED_ERR) {
+ amd64_mc_printk(mci, KERN_CRIT, "uncorrected error\n");
+ if (!gart_tlb_error)
+ edac_mc_handle_ue_no_info(mci, "UE bit is set\n");
+ }
+
+ if (regs->nbsh & K8_NBSH_PCC)
+ amd64_mc_printk(mci, KERN_CRIT,
+ "PCC (processor context corrupt) set\n");
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(amd64_process_error_info);
+
+/*
+ * The main polling 'check' function, called FROM the edac core to perform the
+ * error checking and if an error is encountered, error processing.
+ */
+static void amd64_check(struct mem_ctl_info *mci)
+{
+ struct amd64_error_info_regs info;
+
+ if (amd64_get_error_info(mci, &info))
+ amd64_process_error_info(mci, &info, 1);
+}
+
+/*
+ * Input:
+ * 1) struct amd64_pvt which contains pvt->dram_f2_ctl pointer
+ * 2) AMD Family index value
+ *
+ * Ouput:
+ * Upon return of 0, the following filled in:
+ *
+ * struct pvt->addr_f1_ctl
+ * struct pvt->misc_f3_ctl
+ *
+ * Filled in with related device funcitions of 'dram_f2_ctl'
+ * These devices are "reserved" via the pci_get_device()
+ *
+ * Upon return of 1 (error status):
+ *
+ * Nothing reserved
+ */
+static int amd64_reserve_mc_sibling_devices(struct amd64_pvt *pvt, int mc_idx)
+{
+ const struct amd64_family_type *amd64_dev = &amd64_family_types[mc_idx];
+
+ /* Reserve the ADDRESS MAP Device */
+ pvt->addr_f1_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor,
+ amd64_dev->addr_f1_ctl,
+ pvt->dram_f2_ctl);
+
+ if (!pvt->addr_f1_ctl) {
+ amd64_printk(KERN_ERR, "error address map device not found: "
+ "vendor %x device 0x%x (broken BIOS?)\n",
+ PCI_VENDOR_ID_AMD, amd64_dev->addr_f1_ctl);
+ return 1;
+ }
+
+ /* Reserve the MISC Device */
+ pvt->misc_f3_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor,
+ amd64_dev->misc_f3_ctl,
+ pvt->dram_f2_ctl);
+
+ if (!pvt->misc_f3_ctl) {
+ pci_dev_put(pvt->addr_f1_ctl);
+ pvt->addr_f1_ctl = NULL;
+
+ amd64_printk(KERN_ERR, "error miscellaneous device not found: "
+ "vendor %x device 0x%x (broken BIOS?)\n",
+ PCI_VENDOR_ID_AMD, amd64_dev->misc_f3_ctl);
+ return 1;
+ }
+
+ debugf1(" Addr Map device PCI Bus ID:\t%s\n",
+ pci_name(pvt->addr_f1_ctl));
+ debugf1(" DRAM MEM-CTL PCI Bus ID:\t%s\n",
+ pci_name(pvt->dram_f2_ctl));
+ debugf1(" Misc device PCI Bus ID:\t%s\n",
+ pci_name(pvt->misc_f3_ctl));
+
+ return 0;
+}
+
+static void amd64_free_mc_sibling_devices(struct amd64_pvt *pvt)
+{
+ pci_dev_put(pvt->addr_f1_ctl);
+ pci_dev_put(pvt->misc_f3_ctl);
+}
+
+/*
+ * Retrieve the hardware registers of the memory controller (this includes the
+ * 'Address Map' and 'Misc' device regs)
+ */
+static void amd64_read_mc_registers(struct amd64_pvt *pvt)
+{
+ u64 msr_val;
+ int dram, err = 0;
+
+ /*
+ * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
+ * those are Read-As-Zero
+ */
+ rdmsrl(MSR_K8_TOP_MEM1, msr_val);
+ pvt->top_mem = msr_val >> 23;
+ debugf0(" TOP_MEM=0x%08llx\n", pvt->top_mem);
+
+ /* check first whether TOP_MEM2 is enabled */
+ rdmsrl(MSR_K8_SYSCFG, msr_val);
+ if (msr_val & (1U << 21)) {
+ rdmsrl(MSR_K8_TOP_MEM2, msr_val);
+ pvt->top_mem2 = msr_val >> 23;
+ debugf0(" TOP_MEM2=0x%08llx\n", pvt->top_mem2);
+ } else
+ debugf0(" TOP_MEM2 disabled.\n");
+
+ amd64_cpu_display_info(pvt);
+
+ err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCAP, &pvt->nbcap);
+ if (err)
+ goto err_reg;
+
+ if (pvt->ops->read_dram_ctl_register)
+ pvt->ops->read_dram_ctl_register(pvt);
+
+ for (dram = 0; dram < DRAM_REG_COUNT; dram++) {
+ /*
+ * Call CPU specific READ function to get the DRAM Base and
+ * Limit values from the DCT.
+ */
+ pvt->ops->read_dram_base_limit(pvt, dram);
+
+ /*
+ * Only print out debug info on rows with both R and W Enabled.
+ * Normal processing, compiler should optimize this whole 'if'
+ * debug output block away.
+ */
+ if (pvt->dram_rw_en[dram] != 0) {
+ debugf1(" DRAM_BASE[%d]: 0x%8.08x-%8.08x "
+ "DRAM_LIMIT: 0x%8.08x-%8.08x\n",
+ dram,
+ (u32)(pvt->dram_base[dram] >> 32),
+ (u32)(pvt->dram_base[dram] & 0xFFFFFFFF),
+ (u32)(pvt->dram_limit[dram] >> 32),
+ (u32)(pvt->dram_limit[dram] & 0xFFFFFFFF));
+ debugf1(" IntlvEn=%s %s %s "
+ "IntlvSel=%d DstNode=%d\n",
+ pvt->dram_IntlvEn[dram] ?
+ "Enabled" : "Disabled",
+ (pvt->dram_rw_en[dram] & 0x2) ? "W" : "!W",
+ (pvt->dram_rw_en[dram] & 0x1) ? "R" : "!R",
+ pvt->dram_IntlvSel[dram],
+ pvt->dram_DstNode[dram]);
+ }
+ }
+
+ amd64_read_dct_base_mask(pvt);
+
+ err = pci_read_config_dword(pvt->addr_f1_ctl, K8_DHAR, &pvt->dhar);
+ if (err)
+ goto err_reg;
+
+ amd64_read_dbam_reg(pvt);
+
+ err = pci_read_config_dword(pvt->misc_f3_ctl,
+ F10_ONLINE_SPARE, &pvt->online_spare);
+ if (err)
+ goto err_reg;
+
+ err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
+ if (err)
+ goto err_reg;
+
+ err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCHR_0, &pvt->dchr0);
+ if (err)
+ goto err_reg;
+
+ if (!dct_ganging_enabled(pvt)) {
+ err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_1,
+ &pvt->dclr1);
+ if (err)
+ goto err_reg;
+
+ err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCHR_1,
+ &pvt->dchr1);
+ if (err)
+ goto err_reg;
+ }
+
+ amd64_dump_misc_regs(pvt);
+
+err_reg:
+ debugf0("Reading an MC register failed\n");
+
+}
+
+/*
+ * NOTE: CPU Revision Dependent code
+ *
+ * Input:
+ * @csrow_nr ChipSelect Row Number (0..CHIPSELECT_COUNT-1)
+ * k8 private pointer to -->
+ * DRAM Bank Address mapping register
+ * node_id
+ * DCL register where dual_channel_active is
+ *
+ * The DBAM register consists of 4 sets of 4 bits each definitions:
+ *
+ * Bits: CSROWs
+ * 0-3 CSROWs 0 and 1
+ * 4-7 CSROWs 2 and 3
+ * 8-11 CSROWs 4 and 5
+ * 12-15 CSROWs 6 and 7
+ *
+ * Values range from: 0 to 15
+ * The meaning of the values depends on CPU revision and dual-channel state,
+ * see relevant BKDG more info.
+ *
+ * The memory controller provides for total of only 8 CSROWs in its current
+ * architecture. Each "pair" of CSROWs normally represents just one DIMM in
+ * single channel or two (2) DIMMs in dual channel mode.
+ *
+ * The following code logic collapses the various tables for CSROW based on CPU
+ * revision.
+ *
+ * Returns:
+ * The number of PAGE_SIZE pages on the specified CSROW number it
+ * encompasses
+ *
+ */
+static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
+{
+ u32 dram_map, nr_pages;
+
+ /*
+ * The math on this doesn't look right on the surface because x/2*4 can
+ * be simplified to x*2 but this expression makes use of the fact that
+ * it is integral math where 1/2=0. This intermediate value becomes the
+ * number of bits to shift the DBAM register to extract the proper CSROW
+ * field.
+ */
+ dram_map = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
+
+ nr_pages = pvt->ops->dbam_map_to_pages(pvt, dram_map);
+
+ /*
+ * If dual channel then double the memory size of single channel.
+ * Channel count is 1 or 2
+ */
+ nr_pages <<= (pvt->channel_count - 1);
+
+ debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, dram_map);
+ debugf0(" nr_pages= %u channel-count = %d\n",
+ nr_pages, pvt->channel_count);
+
+ return nr_pages;
+}
+
+/*
+ * Initialize the array of csrow attribute instances, based on the values
+ * from pci config hardware registers.
+ */
+static int amd64_init_csrows(struct mem_ctl_info *mci)
+{
+ struct csrow_info *csrow;
+ struct amd64_pvt *pvt;
+ u64 input_addr_min, input_addr_max, sys_addr;
+ int i, err = 0, empty = 1;
+
+ pvt = mci->pvt_info;
+
+ err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &pvt->nbcfg);
+ if (err)
+ debugf0("Reading K8_NBCFG failed\n");
+
+ debugf0("NBCFG= 0x%x CHIPKILL= %s DRAM ECC= %s\n", pvt->nbcfg,
+ (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
+ (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"
+ );
+
+ for (i = 0; i < CHIPSELECT_COUNT; i++) {
+ csrow = &mci->csrows[i];
+
+ if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) {
+ debugf1("----CSROW %d EMPTY for node %d\n", i,
+ pvt->mc_node_id);
+ continue;
+ }
+
+ debugf1("----CSROW %d VALID for MC node %d\n",
+ i, pvt->mc_node_id);
+
+ empty = 0;
+ csrow->nr_pages = amd64_csrow_nr_pages(i, pvt);
+ find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
+ sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
+ csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
+ sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
+ csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
+ csrow->page_mask = ~mask_from_dct_mask(pvt, i);
+ /* 8 bytes of resolution */
+
+ csrow->mtype = amd64_determine_memory_type(pvt);
+
+ debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i);
+ debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n",
+ (unsigned long)input_addr_min,
+ (unsigned long)input_addr_max);
+ debugf1(" sys_addr: 0x%lx page_mask: 0x%lx\n",
+ (unsigned long)sys_addr, csrow->page_mask);
+ debugf1(" nr_pages: %u first_page: 0x%lx "
+ "last_page: 0x%lx\n",
+ (unsigned)csrow->nr_pages,
+ csrow->first_page, csrow->last_page);
+
+ /*
+ * determine whether CHIPKILL or JUST ECC or NO ECC is operating
+ */
+ if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE)
+ csrow->edac_mode =
+ (pvt->nbcfg & K8_NBCFG_CHIPKILL) ?
+ EDAC_S4ECD4ED : EDAC_SECDED;
+ else
+ csrow->edac_mode = EDAC_NONE;
+ }
+
+ return empty;
+}
+
+/*
+ * Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we"
+ * enable it.
+ */
+static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id);
+ int cpu, idx = 0, err = 0;
+ struct msr msrs[cpumask_weight(cpumask)];
+ u32 value;
+ u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
+
+ if (!ecc_enable_override)
+ return;
+
+ memset(msrs, 0, sizeof(msrs));
+
+ amd64_printk(KERN_WARNING,
+ "'ecc_enable_override' parameter is active, "
+ "Enabling AMD ECC hardware now: CAUTION\n");
+
+ err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value);
+ if (err)
+ debugf0("Reading K8_NBCTL failed\n");
+
+ /* turn on UECCn and CECCEn bits */
+ pvt->old_nbctl = value & mask;
+ pvt->nbctl_mcgctl_saved = 1;
+
+ value |= mask;
+ pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
+
+ rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
+
+ for_each_cpu(cpu, cpumask) {
+ if (msrs[idx].l & K8_MSR_MCGCTL_NBE)
+ set_bit(idx, &pvt->old_mcgctl);
+
+ msrs[idx].l |= K8_MSR_MCGCTL_NBE;
+ idx++;
+ }
+ wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
+
+ err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
+ if (err)
+ debugf0("Reading K8_NBCFG failed\n");
+
+ debugf0("NBCFG(1)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value,
+ (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
+ (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
+
+ if (!(value & K8_NBCFG_ECC_ENABLE)) {
+ amd64_printk(KERN_WARNING,
+ "This node reports that DRAM ECC is "
+ "currently Disabled; ENABLING now\n");
+
+ /* Attempt to turn on DRAM ECC Enable */
+ value |= K8_NBCFG_ECC_ENABLE;
+ pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value);
+
+ err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
+ if (err)
+ debugf0("Reading K8_NBCFG failed\n");
+
+ if (!(value & K8_NBCFG_ECC_ENABLE)) {
+ amd64_printk(KERN_WARNING,
+ "Hardware rejects Enabling DRAM ECC checking\n"
+ "Check memory DIMM configuration\n");
+ } else {
+ amd64_printk(KERN_DEBUG,
+ "Hardware accepted DRAM ECC Enable\n");
+ }
+ }
+ debugf0("NBCFG(2)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value,
+ (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
+ (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
+
+ pvt->ctl_error_info.nbcfg = value;
+}
+
+static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
+{
+ const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id);
+ int cpu, idx = 0, err = 0;
+ struct msr msrs[cpumask_weight(cpumask)];
+ u32 value;
+ u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
+
+ if (!pvt->nbctl_mcgctl_saved)
+ return;
+
+ memset(msrs, 0, sizeof(msrs));
+
+ err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value);
+ if (err)
+ debugf0("Reading K8_NBCTL failed\n");
+ value &= ~mask;
+ value |= pvt->old_nbctl;
+
+ /* restore the NB Enable MCGCTL bit */
+ pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
+
+ rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
+
+ for_each_cpu(cpu, cpumask) {
+ msrs[idx].l &= ~K8_MSR_MCGCTL_NBE;
+ msrs[idx].l |=
+ test_bit(idx, &pvt->old_mcgctl) << K8_MSR_MCGCTL_NBE;
+ idx++;
+ }
+
+ wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
+}
+
+static void check_mcg_ctl(void *ret)
+{
+ u64 msr_val = 0;
+ u8 nbe;
+
+ rdmsrl(MSR_IA32_MCG_CTL, msr_val);
+ nbe = msr_val & K8_MSR_MCGCTL_NBE;
+
+ debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
+ raw_smp_processor_id(), msr_val,
+ (nbe ? "enabled" : "disabled"));
+
+ if (!nbe)
+ *(int *)ret = 0;
+}
+
+/* check MCG_CTL on all the cpus on this node */
+static int amd64_mcg_ctl_enabled_on_cpus(const cpumask_t *mask)
+{
+ int ret = 1;
+ preempt_disable();
+ smp_call_function_many(mask, check_mcg_ctl, &ret, 1);
+ preempt_enable();
+
+ return ret;
+}
+
+/*
+ * EDAC requires that the BIOS have ECC enabled before taking over the
+ * processing of ECC errors. This is because the BIOS can properly initialize
+ * the memory system completely. A command line option allows to force-enable
+ * hardware ECC later in amd64_enable_ecc_error_reporting().
+ */
+static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
+{
+ u32 value;
+ int err = 0, ret = 0;
+ u8 ecc_enabled = 0;
+
+ err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
+ if (err)
+ debugf0("Reading K8_NBCTL failed\n");
+
+ ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE);
+
+ ret = amd64_mcg_ctl_enabled_on_cpus(cpumask_of_node(pvt->mc_node_id));
+
+ debugf0("K8_NBCFG=0x%x, DRAM ECC is %s\n", value,
+ (value & K8_NBCFG_ECC_ENABLE ? "enabled" : "disabled"));
+
+ if (!ecc_enabled || !ret) {
+ if (!ecc_enabled) {
+ amd64_printk(KERN_WARNING, "This node reports that "
+ "Memory ECC is currently "
+ "disabled.\n");
+
+ amd64_printk(KERN_WARNING, "bit 0x%lx in register "
+ "F3x%x of the MISC_CONTROL device (%s) "
+ "should be enabled\n", K8_NBCFG_ECC_ENABLE,
+ K8_NBCFG, pci_name(pvt->misc_f3_ctl));
+ }
+ if (!ret) {
+ amd64_printk(KERN_WARNING, "bit 0x%016lx in MSR 0x%08x "
+ "of node %d should be enabled\n",
+ K8_MSR_MCGCTL_NBE, MSR_IA32_MCG_CTL,
+ pvt->mc_node_id);
+ }
+ if (!ecc_enable_override) {
+ amd64_printk(KERN_WARNING, "WARNING: ECC is NOT "
+ "currently enabled by the BIOS. Module "
+ "will NOT be loaded.\n"
+ " Either Enable ECC in the BIOS, "
+ "or use the 'ecc_enable_override' "
+ "parameter.\n"
+ " Might be a BIOS bug, if BIOS says "
+ "ECC is enabled\n"
+ " Use of the override can cause "
+ "unknown side effects.\n");
+ ret = -ENODEV;
+ }
+ } else {
+ amd64_printk(KERN_INFO,
+ "ECC is enabled by BIOS, Proceeding "
+ "with EDAC module initialization\n");
+
+ /* CLEAR the override, since BIOS controlled it */
+ ecc_enable_override = 0;
+ }
+
+ return ret;
+}
+
+struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) +
+ ARRAY_SIZE(amd64_inj_attrs) +
+ 1];
+
+struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } };
+
+static void amd64_set_mc_sysfs_attributes(struct mem_ctl_info *mci)
+{
+ unsigned int i = 0, j = 0;
+
+ for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++)
+ sysfs_attrs[i] = amd64_dbg_attrs[i];
+
+ for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++)
+ sysfs_attrs[i] = amd64_inj_attrs[j];
+
+ sysfs_attrs[i] = terminator;
+
+ mci->mc_driver_sysfs_attributes = sysfs_attrs;
+}
+
+static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+
+ mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE;
+ mci->edac_cap = EDAC_FLAG_NONE;
+
+ if (pvt->nbcap & K8_NBCAP_SECDED)
+ mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
+
+ if (pvt->nbcap & K8_NBCAP_CHIPKILL)
+ mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
+
+ mci->edac_cap = amd64_determine_edac_cap(pvt);
+ mci->mod_name = EDAC_MOD_STR;
+ mci->mod_ver = EDAC_AMD64_VERSION;
+ mci->ctl_name = get_amd_family_name(pvt->mc_type_index);
+ mci->dev_name = pci_name(pvt->dram_f2_ctl);
+ mci->ctl_page_to_phys = NULL;
+
+ /* IMPORTANT: Set the polling 'check' function in this module */
+ mci->edac_check = amd64_check;
+
+ /* memory scrubber interface */
+ mci->set_sdram_scrub_rate = amd64_set_scrub_rate;
+ mci->get_sdram_scrub_rate = amd64_get_scrub_rate;
+}
+
+/*
+ * Init stuff for this DRAM Controller device.
+ *
+ * Due to a hardware feature on Fam10h CPUs, the Enable Extended Configuration
+ * Space feature MUST be enabled on ALL Processors prior to actually reading
+ * from the ECS registers. Since the loading of the module can occur on any
+ * 'core', and cores don't 'see' all the other processors ECS data when the
+ * others are NOT enabled. Our solution is to first enable ECS access in this
+ * routine on all processors, gather some data in a amd64_pvt structure and
+ * later come back in a finish-setup function to perform that final
+ * initialization. See also amd64_init_2nd_stage() for that.
+ */
+static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl,
+ int mc_type_index)
+{
+ struct amd64_pvt *pvt = NULL;
+ int err = 0, ret;
+
+ ret = -ENOMEM;
+ pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
+ if (!pvt)
+ goto err_exit;
+
+ pvt->mc_node_id = get_mc_node_id_from_pdev(dram_f2_ctl);
+
+ pvt->dram_f2_ctl = dram_f2_ctl;
+ pvt->ext_model = boot_cpu_data.x86_model >> 4;
+ pvt->mc_type_index = mc_type_index;
+ pvt->ops = family_ops(mc_type_index);
+ pvt->old_mcgctl = 0;
+
+ /*
+ * We have the dram_f2_ctl device as an argument, now go reserve its
+ * sibling devices from the PCI system.
+ */
+ ret = -ENODEV;
+ err = amd64_reserve_mc_sibling_devices(pvt, mc_type_index);
+ if (err)
+ goto err_free;
+
+ ret = -EINVAL;
+ err = amd64_check_ecc_enabled(pvt);
+ if (err)
+ goto err_put;
+
+ /*
+ * Key operation here: setup of HW prior to performing ops on it. Some
+ * setup is required to access ECS data. After this is performed, the
+ * 'teardown' function must be called upon error and normal exit paths.
+ */
+ if (boot_cpu_data.x86 >= 0x10)
+ amd64_setup(pvt);
+
+ /*
+ * Save the pointer to the private data for use in 2nd initialization
+ * stage
+ */
+ pvt_lookup[pvt->mc_node_id] = pvt;
+
+ return 0;
+
+err_put:
+ amd64_free_mc_sibling_devices(pvt);
+
+err_free:
+ kfree(pvt);
+
+err_exit:
+ return ret;
+}
+
+/*
+ * This is the finishing stage of the init code. Needs to be performed after all
+ * MCs' hardware have been prepped for accessing extended config space.
+ */
+static int amd64_init_2nd_stage(struct amd64_pvt *pvt)
+{
+ int node_id = pvt->mc_node_id;
+ struct mem_ctl_info *mci;
+ int ret, err = 0;
+
+ amd64_read_mc_registers(pvt);
+
+ ret = -ENODEV;
+ if (pvt->ops->probe_valid_hardware) {
+ err = pvt->ops->probe_valid_hardware(pvt);
+ if (err)
+ goto err_exit;
+ }
+
+ /*
+ * We need to determine how many memory channels there are. Then use
+ * that information for calculating the size of the dynamic instance
+ * tables in the 'mci' structure
+ */
+ pvt->channel_count = pvt->ops->early_channel_count(pvt);
+ if (pvt->channel_count < 0)
+ goto err_exit;
+
+ ret = -ENOMEM;
+ mci = edac_mc_alloc(0, CHIPSELECT_COUNT, pvt->channel_count, node_id);
+ if (!mci)
+ goto err_exit;
+
+ mci->pvt_info = pvt;
+
+ mci->dev = &pvt->dram_f2_ctl->dev;
+ amd64_setup_mci_misc_attributes(mci);
+
+ if (amd64_init_csrows(mci))
+ mci->edac_cap = EDAC_FLAG_NONE;
+
+ amd64_enable_ecc_error_reporting(mci);
+ amd64_set_mc_sysfs_attributes(mci);
+
+ ret = -ENODEV;
+ if (edac_mc_add_mc(mci)) {
+ debugf1("failed edac_mc_add_mc()\n");
+ goto err_add_mc;
+ }
+
+ mci_lookup[node_id] = mci;
+ pvt_lookup[node_id] = NULL;
+ return 0;
+
+err_add_mc:
+ edac_mc_free(mci);
+
+err_exit:
+ debugf0("failure to init 2nd stage: ret=%d\n", ret);
+
+ amd64_restore_ecc_error_reporting(pvt);
+
+ if (boot_cpu_data.x86 > 0xf)
+ amd64_teardown(pvt);
+
+ amd64_free_mc_sibling_devices(pvt);
+
+ kfree(pvt_lookup[pvt->mc_node_id]);
+ pvt_lookup[node_id] = NULL;
+
+ return ret;
+}
+
+
+static int __devinit amd64_init_one_instance(struct pci_dev *pdev,
+ const struct pci_device_id *mc_type)
+{
+ int ret = 0;
+
+ debugf0("(MC node=%d,mc_type='%s')\n",
+ get_mc_node_id_from_pdev(pdev),
+ get_amd_family_name(mc_type->driver_data));
+
+ ret = pci_enable_device(pdev);
+ if (ret < 0)
+ ret = -EIO;
+ else
+ ret = amd64_probe_one_instance(pdev, mc_type->driver_data);
+
+ if (ret < 0)
+ debugf0("ret=%d\n", ret);
+
+ return ret;
+}
+
+static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+ struct amd64_pvt *pvt;
+
+ /* Remove from EDAC CORE tracking list */
+ mci = edac_mc_del_mc(&pdev->dev);
+ if (!mci)
+ return;
+
+ pvt = mci->pvt_info;
+
+ amd64_restore_ecc_error_reporting(pvt);
+
+ if (boot_cpu_data.x86 > 0xf)
+ amd64_teardown(pvt);
+
+ amd64_free_mc_sibling_devices(pvt);
+
+ kfree(pvt);
+ mci->pvt_info = NULL;
+
+ mci_lookup[pvt->mc_node_id] = NULL;
+
+ /* Free the EDAC CORE resources */
+ edac_mc_free(mci);
+}
+
+/*
+ * This table is part of the interface for loading drivers for PCI devices. The
+ * PCI core identifies what devices are on a system during boot, and then
+ * inquiry this table to see if this driver is for a given device found.
+ */
+static const struct pci_device_id amd64_pci_table[] __devinitdata = {
+ {
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = 0,
+ .class_mask = 0,
+ .driver_data = K8_CPUS
+ },
+ {
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = 0,
+ .class_mask = 0,
+ .driver_data = F10_CPUS
+ },
+ {
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = PCI_DEVICE_ID_AMD_11H_NB_DRAM,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = 0,
+ .class_mask = 0,
+ .driver_data = F11_CPUS
+ },
+ {0, }
+};
+MODULE_DEVICE_TABLE(pci, amd64_pci_table);
+
+static struct pci_driver amd64_pci_driver = {
+ .name = EDAC_MOD_STR,
+ .probe = amd64_init_one_instance,
+ .remove = __devexit_p(amd64_remove_one_instance),
+ .id_table = amd64_pci_table,
+};
+
+static void amd64_setup_pci_device(void)
+{
+ struct mem_ctl_info *mci;
+ struct amd64_pvt *pvt;
+
+ if (amd64_ctl_pci)
+ return;
+
+ mci = mci_lookup[0];
+ if (mci) {
+
+ pvt = mci->pvt_info;
+ amd64_ctl_pci =
+ edac_pci_create_generic_ctl(&pvt->dram_f2_ctl->dev,
+ EDAC_MOD_STR);
+
+ if (!amd64_ctl_pci) {
+ pr_warning("%s(): Unable to create PCI control\n",
+ __func__);
+
+ pr_warning("%s(): PCI error report via EDAC not set\n",
+ __func__);
+ }
+ }
+}
+
+static int __init amd64_edac_init(void)
+{
+ int nb, err = -ENODEV;
+
+ edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
+
+ opstate_init();
+
+ if (cache_k8_northbridges() < 0)
+ goto err_exit;
+
+ err = pci_register_driver(&amd64_pci_driver);
+ if (err)
+ return err;
+
+ /*
+ * At this point, the array 'pvt_lookup[]' contains pointers to alloc'd
+ * amd64_pvt structs. These will be used in the 2nd stage init function
+ * to finish initialization of the MC instances.
+ */
+ for (nb = 0; nb < num_k8_northbridges; nb++) {
+ if (!pvt_lookup[nb])
+ continue;
+
+ err = amd64_init_2nd_stage(pvt_lookup[nb]);
+ if (err)
+ goto err_exit;
+ }
+
+ amd64_setup_pci_device();
+
+ return 0;
+
+err_exit:
+ debugf0("'finish_setup' stage failed\n");
+ pci_unregister_driver(&amd64_pci_driver);
+
+ return err;
+}
+
+static void __exit amd64_edac_exit(void)
+{
+ if (amd64_ctl_pci)
+ edac_pci_release_generic_ctl(amd64_ctl_pci);
+
+ pci_unregister_driver(&amd64_pci_driver);
+}
+
+module_init(amd64_edac_init);
+module_exit(amd64_edac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
+ "Dave Peterson, Thayne Harbaugh");
+MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
+ EDAC_AMD64_VERSION);
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
new file mode 100644
index 00000000000..a159957e167
--- /dev/null
+++ b/drivers/edac/amd64_edac.h
@@ -0,0 +1,644 @@
+/*
+ * AMD64 class Memory Controller kernel module
+ *
+ * Copyright (c) 2009 SoftwareBitMaker.
+ * Copyright (c) 2009 Advanced Micro Devices, Inc.
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Originally Written by Thayne Harbaugh
+ *
+ * Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
+ * - K8 CPU Revision D and greater support
+ *
+ * Changes by Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com>:
+ * - Module largely rewritten, with new (and hopefully correct)
+ * code for dealing with node and chip select interleaving,
+ * various code cleanup, and bug fixes
+ * - Added support for memory hoisting using DRAM hole address
+ * register
+ *
+ * Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
+ * -K8 Rev (1207) revision support added, required Revision
+ * specific mini-driver code to support Rev F as well as
+ * prior revisions
+ *
+ * Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
+ * -Family 10h revision support added. New PCI Device IDs,
+ * indicating new changes. Actual registers modified
+ * were slight, less than the Rev E to Rev F transition
+ * but changing the PCI Device ID was the proper thing to
+ * do, as it provides for almost automactic family
+ * detection. The mods to Rev F required more family
+ * information detection.
+ *
+ * Changes/Fixes by Borislav Petkov <borislav.petkov@amd.com>:
+ * - misc fixes and code cleanups
+ *
+ * This module is based on the following documents
+ * (available from http://www.amd.com/):
+ *
+ * Title: BIOS and Kernel Developer's Guide for AMD Athlon 64 and AMD
+ * Opteron Processors
+ * AMD publication #: 26094
+ *` Revision: 3.26
+ *
+ * Title: BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh
+ * Processors
+ * AMD publication #: 32559
+ * Revision: 3.00
+ * Issue Date: May 2006
+ *
+ * Title: BIOS and Kernel Developer's Guide (BKDG) For AMD Family 10h
+ * Processors
+ * AMD publication #: 31116
+ * Revision: 3.00
+ * Issue Date: September 07, 2007
+ *
+ * Sections in the first 2 documents are no longer in sync with each other.
+ * The Family 10h BKDG was totally re-written from scratch with a new
+ * presentation model.
+ * Therefore, comments that refer to a Document section might be off.
+ */
+
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/mmzone.h>
+#include <linux/edac.h>
+#include <asm/msr.h>
+#include "edac_core.h"
+
+#define amd64_printk(level, fmt, arg...) \
+ edac_printk(level, "amd64", fmt, ##arg)
+
+#define amd64_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "amd64", fmt, ##arg)
+
+/*
+ * Throughout the comments in this code, the following terms are used:
+ *
+ * SysAddr, DramAddr, and InputAddr
+ *
+ * These terms come directly from the amd64 documentation
+ * (AMD publication #26094). They are defined as follows:
+ *
+ * SysAddr:
+ * This is a physical address generated by a CPU core or a device
+ * doing DMA. If generated by a CPU core, a SysAddr is the result of
+ * a virtual to physical address translation by the CPU core's address
+ * translation mechanism (MMU).
+ *
+ * DramAddr:
+ * A DramAddr is derived from a SysAddr by subtracting an offset that
+ * depends on which node the SysAddr maps to and whether the SysAddr
+ * is within a range affected by memory hoisting. The DRAM Base
+ * (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers
+ * determine which node a SysAddr maps to.
+ *
+ * If the DRAM Hole Address Register (DHAR) is enabled and the SysAddr
+ * is within the range of addresses specified by this register, then
+ * a value x from the DHAR is subtracted from the SysAddr to produce a
+ * DramAddr. Here, x represents the base address for the node that
+ * the SysAddr maps to plus an offset due to memory hoisting. See
+ * section 3.4.8 and the comments in amd64_get_dram_hole_info() and
+ * sys_addr_to_dram_addr() below for more information.
+ *
+ * If the SysAddr is not affected by the DHAR then a value y is
+ * subtracted from the SysAddr to produce a DramAddr. Here, y is the
+ * base address for the node that the SysAddr maps to. See section
+ * 3.4.4 and the comments in sys_addr_to_dram_addr() below for more
+ * information.
+ *
+ * InputAddr:
+ * A DramAddr is translated to an InputAddr before being passed to the
+ * memory controller for the node that the DramAddr is associated
+ * with. The memory controller then maps the InputAddr to a csrow.
+ * If node interleaving is not in use, then the InputAddr has the same
+ * value as the DramAddr. Otherwise, the InputAddr is produced by
+ * discarding the bits used for node interleaving from the DramAddr.
+ * See section 3.4.4 for more information.
+ *
+ * The memory controller for a given node uses its DRAM CS Base and
+ * DRAM CS Mask registers to map an InputAddr to a csrow. See
+ * sections 3.5.4 and 3.5.5 for more information.
+ */
+
+#define EDAC_AMD64_VERSION " Ver: 3.2.0 " __DATE__
+#define EDAC_MOD_STR "amd64_edac"
+
+/* Extended Model from CPUID, for CPU Revision numbers */
+#define OPTERON_CPU_LE_REV_C 0
+#define OPTERON_CPU_REV_D 1
+#define OPTERON_CPU_REV_E 2
+
+/* NPT processors have the following Extended Models */
+#define OPTERON_CPU_REV_F 4
+#define OPTERON_CPU_REV_FA 5
+
+/* Hardware limit on ChipSelect rows per MC and processors per system */
+#define CHIPSELECT_COUNT 8
+#define DRAM_REG_COUNT 8
+
+
+/*
+ * PCI-defined configuration space registers
+ */
+
+
+/*
+ * Function 1 - Address Map
+ */
+#define K8_DRAM_BASE_LOW 0x40
+#define K8_DRAM_LIMIT_LOW 0x44
+#define K8_DHAR 0xf0
+
+#define DHAR_VALID BIT(0)
+#define F10_DRAM_MEM_HOIST_VALID BIT(1)
+
+#define DHAR_BASE_MASK 0xff000000
+#define dhar_base(dhar) (dhar & DHAR_BASE_MASK)
+
+#define K8_DHAR_OFFSET_MASK 0x0000ff00
+#define k8_dhar_offset(dhar) ((dhar & K8_DHAR_OFFSET_MASK) << 16)
+
+#define F10_DHAR_OFFSET_MASK 0x0000ff80
+ /* NOTE: Extra mask bit vs K8 */
+#define f10_dhar_offset(dhar) ((dhar & F10_DHAR_OFFSET_MASK) << 16)
+
+
+/* F10 High BASE/LIMIT registers */
+#define F10_DRAM_BASE_HIGH 0x140
+#define F10_DRAM_LIMIT_HIGH 0x144
+
+
+/*
+ * Function 2 - DRAM controller
+ */
+#define K8_DCSB0 0x40
+#define F10_DCSB1 0x140
+
+#define K8_DCSB_CS_ENABLE BIT(0)
+#define K8_DCSB_NPT_SPARE BIT(1)
+#define K8_DCSB_NPT_TESTFAIL BIT(2)
+
+/*
+ * REV E: select [31:21] and [15:9] from DCSB and the shift amount to form
+ * the address
+ */
+#define REV_E_DCSB_BASE_BITS (0xFFE0FE00ULL)
+#define REV_E_DCS_SHIFT 4
+#define REV_E_DCSM_COUNT 8
+
+#define REV_F_F1Xh_DCSB_BASE_BITS (0x1FF83FE0ULL)
+#define REV_F_F1Xh_DCS_SHIFT 8
+
+/*
+ * REV F and later: selects [28:19] and [13:5] from DCSB and the shift amount
+ * to form the address
+ */
+#define REV_F_DCSB_BASE_BITS (0x1FF83FE0ULL)
+#define REV_F_DCS_SHIFT 8
+#define REV_F_DCSM_COUNT 4
+#define F10_DCSM_COUNT 4
+#define F11_DCSM_COUNT 2
+
+/* DRAM CS Mask Registers */
+#define K8_DCSM0 0x60
+#define F10_DCSM1 0x160
+
+/* REV E: select [29:21] and [15:9] from DCSM */
+#define REV_E_DCSM_MASK_BITS 0x3FE0FE00
+
+/* unused bits [24:20] and [12:0] */
+#define REV_E_DCS_NOTUSED_BITS 0x01F01FFF
+
+/* REV F and later: select [28:19] and [13:5] from DCSM */
+#define REV_F_F1Xh_DCSM_MASK_BITS 0x1FF83FE0
+
+/* unused bits [26:22] and [12:0] */
+#define REV_F_F1Xh_DCS_NOTUSED_BITS 0x07C01FFF
+
+#define DBAM0 0x80
+#define DBAM1 0x180
+
+/* Extract the DIMM 'type' on the i'th DIMM from the DBAM reg value passed */
+#define DBAM_DIMM(i, reg) ((((reg) >> (4*i))) & 0xF)
+
+#define DBAM_MAX_VALUE 11
+
+
+#define F10_DCLR_0 0x90
+#define F10_DCLR_1 0x190
+#define REVE_WIDTH_128 BIT(16)
+#define F10_WIDTH_128 BIT(11)
+
+
+#define F10_DCHR_0 0x94
+#define F10_DCHR_1 0x194
+
+#define F10_DCHR_FOUR_RANK_DIMM BIT(18)
+#define F10_DCHR_Ddr3Mode BIT(8)
+#define F10_DCHR_MblMode BIT(6)
+
+
+#define F10_DCTL_SEL_LOW 0x110
+
+#define dct_sel_baseaddr(pvt) \
+ ((pvt->dram_ctl_select_low) & 0xFFFFF800)
+
+#define dct_sel_interleave_addr(pvt) \
+ (((pvt->dram_ctl_select_low) >> 6) & 0x3)
+
+enum {
+ F10_DCTL_SEL_LOW_DctSelHiRngEn = BIT(0),
+ F10_DCTL_SEL_LOW_DctSelIntLvEn = BIT(2),
+ F10_DCTL_SEL_LOW_DctGangEn = BIT(4),
+ F10_DCTL_SEL_LOW_DctDatIntLv = BIT(5),
+ F10_DCTL_SEL_LOW_DramEnable = BIT(8),
+ F10_DCTL_SEL_LOW_MemCleared = BIT(10),
+};
+
+#define dct_high_range_enabled(pvt) \
+ (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctSelHiRngEn)
+
+#define dct_interleave_enabled(pvt) \
+ (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctSelIntLvEn)
+
+#define dct_ganging_enabled(pvt) \
+ (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctGangEn)
+
+#define dct_data_intlv_enabled(pvt) \
+ (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctDatIntLv)
+
+#define dct_dram_enabled(pvt) \
+ (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DramEnable)
+
+#define dct_memory_cleared(pvt) \
+ (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_MemCleared)
+
+
+#define F10_DCTL_SEL_HIGH 0x114
+
+
+/*
+ * Function 3 - Misc Control
+ */
+#define K8_NBCTL 0x40
+
+/* Correctable ECC error reporting enable */
+#define K8_NBCTL_CECCEn BIT(0)
+
+/* UnCorrectable ECC error reporting enable */
+#define K8_NBCTL_UECCEn BIT(1)
+
+#define K8_NBCFG 0x44
+#define K8_NBCFG_CHIPKILL BIT(23)
+#define K8_NBCFG_ECC_ENABLE BIT(22)
+
+#define K8_NBSL 0x48
+
+
+#define EXTRACT_HIGH_SYNDROME(x) (((x) >> 24) & 0xff)
+#define EXTRACT_EXT_ERROR_CODE(x) (((x) >> 16) & 0x1f)
+
+/* Family F10h: Normalized Extended Error Codes */
+#define F10_NBSL_EXT_ERR_RES 0x0
+#define F10_NBSL_EXT_ERR_CRC 0x1
+#define F10_NBSL_EXT_ERR_SYNC 0x2
+#define F10_NBSL_EXT_ERR_MST 0x3
+#define F10_NBSL_EXT_ERR_TGT 0x4
+#define F10_NBSL_EXT_ERR_GART 0x5
+#define F10_NBSL_EXT_ERR_RMW 0x6
+#define F10_NBSL_EXT_ERR_WDT 0x7
+#define F10_NBSL_EXT_ERR_ECC 0x8
+#define F10_NBSL_EXT_ERR_DEV 0x9
+#define F10_NBSL_EXT_ERR_LINK_DATA 0xA
+
+/* Next two are overloaded values */
+#define F10_NBSL_EXT_ERR_LINK_PROTO 0xB
+#define F10_NBSL_EXT_ERR_L3_PROTO 0xB
+
+#define F10_NBSL_EXT_ERR_NB_ARRAY 0xC
+#define F10_NBSL_EXT_ERR_DRAM_PARITY 0xD
+#define F10_NBSL_EXT_ERR_LINK_RETRY 0xE
+
+/* Next two are overloaded values */
+#define F10_NBSL_EXT_ERR_GART_WALK 0xF
+#define F10_NBSL_EXT_ERR_DEV_WALK 0xF
+
+/* 0x10 to 0x1B: Reserved */
+#define F10_NBSL_EXT_ERR_L3_DATA 0x1C
+#define F10_NBSL_EXT_ERR_L3_TAG 0x1D
+#define F10_NBSL_EXT_ERR_L3_LRU 0x1E
+
+/* K8: Normalized Extended Error Codes */
+#define K8_NBSL_EXT_ERR_ECC 0x0
+#define K8_NBSL_EXT_ERR_CRC 0x1
+#define K8_NBSL_EXT_ERR_SYNC 0x2
+#define K8_NBSL_EXT_ERR_MST 0x3
+#define K8_NBSL_EXT_ERR_TGT 0x4
+#define K8_NBSL_EXT_ERR_GART 0x5
+#define K8_NBSL_EXT_ERR_RMW 0x6
+#define K8_NBSL_EXT_ERR_WDT 0x7
+#define K8_NBSL_EXT_ERR_CHIPKILL_ECC 0x8
+#define K8_NBSL_EXT_ERR_DRAM_PARITY 0xD
+
+#define EXTRACT_ERROR_CODE(x) ((x) & 0xffff)
+#define TEST_TLB_ERROR(x) (((x) & 0xFFF0) == 0x0010)
+#define TEST_MEM_ERROR(x) (((x) & 0xFF00) == 0x0100)
+#define TEST_BUS_ERROR(x) (((x) & 0xF800) == 0x0800)
+#define EXTRACT_TT_CODE(x) (((x) >> 2) & 0x3)
+#define EXTRACT_II_CODE(x) (((x) >> 2) & 0x3)
+#define EXTRACT_LL_CODE(x) (((x) >> 0) & 0x3)
+#define EXTRACT_RRRR_CODE(x) (((x) >> 4) & 0xf)
+#define EXTRACT_TO_CODE(x) (((x) >> 8) & 0x1)
+#define EXTRACT_PP_CODE(x) (((x) >> 9) & 0x3)
+
+/*
+ * The following are for BUS type errors AFTER values have been normalized by
+ * shifting right
+ */
+#define K8_NBSL_PP_SRC 0x0
+#define K8_NBSL_PP_RES 0x1
+#define K8_NBSL_PP_OBS 0x2
+#define K8_NBSL_PP_GENERIC 0x3
+
+
+#define K8_NBSH 0x4C
+
+#define K8_NBSH_VALID_BIT BIT(31)
+#define K8_NBSH_OVERFLOW BIT(30)
+#define K8_NBSH_UNCORRECTED_ERR BIT(29)
+#define K8_NBSH_ERR_ENABLE BIT(28)
+#define K8_NBSH_MISC_ERR_VALID BIT(27)
+#define K8_NBSH_VALID_ERROR_ADDR BIT(26)
+#define K8_NBSH_PCC BIT(25)
+#define K8_NBSH_CECC BIT(14)
+#define K8_NBSH_UECC BIT(13)
+#define K8_NBSH_ERR_SCRUBER BIT(8)
+#define K8_NBSH_CORE3 BIT(3)
+#define K8_NBSH_CORE2 BIT(2)
+#define K8_NBSH_CORE1 BIT(1)
+#define K8_NBSH_CORE0 BIT(0)
+
+#define EXTRACT_LDT_LINK(x) (((x) >> 4) & 0x7)
+#define EXTRACT_ERR_CPU_MAP(x) ((x) & 0xF)
+#define EXTRACT_LOW_SYNDROME(x) (((x) >> 15) & 0xff)
+
+
+#define K8_NBEAL 0x50
+#define K8_NBEAH 0x54
+#define K8_SCRCTRL 0x58
+
+#define F10_NB_CFG_LOW 0x88
+#define F10_NB_CFG_LOW_ENABLE_EXT_CFG BIT(14)
+
+#define F10_NB_CFG_HIGH 0x8C
+
+#define F10_ONLINE_SPARE 0xB0
+#define F10_ONLINE_SPARE_SWAPDONE0(x) ((x) & BIT(1))
+#define F10_ONLINE_SPARE_SWAPDONE1(x) ((x) & BIT(3))
+#define F10_ONLINE_SPARE_BADDRAM_CS0(x) (((x) >> 4) & 0x00000007)
+#define F10_ONLINE_SPARE_BADDRAM_CS1(x) (((x) >> 8) & 0x00000007)
+
+#define F10_NB_ARRAY_ADDR 0xB8
+
+#define F10_NB_ARRAY_DRAM_ECC 0x80000000
+
+/* Bits [2:1] are used to select 16-byte section within a 64-byte cacheline */
+#define SET_NB_ARRAY_ADDRESS(section) (((section) & 0x3) << 1)
+
+#define F10_NB_ARRAY_DATA 0xBC
+
+#define SET_NB_DRAM_INJECTION_WRITE(word, bits) \
+ (BIT(((word) & 0xF) + 20) | \
+ BIT(17) | \
+ ((bits) & 0xF))
+
+#define SET_NB_DRAM_INJECTION_READ(word, bits) \
+ (BIT(((word) & 0xF) + 20) | \
+ BIT(16) | \
+ ((bits) & 0xF))
+
+#define K8_NBCAP 0xE8
+#define K8_NBCAP_CORES (BIT(12)|BIT(13))
+#define K8_NBCAP_CHIPKILL BIT(4)
+#define K8_NBCAP_SECDED BIT(3)
+#define K8_NBCAP_8_NODE BIT(2)
+#define K8_NBCAP_DUAL_NODE BIT(1)
+#define K8_NBCAP_DCT_DUAL BIT(0)
+
+/*
+ * MSR Regs
+ */
+#define K8_MSR_MCGCTL 0x017b
+#define K8_MSR_MCGCTL_NBE BIT(4)
+
+#define K8_MSR_MC4CTL 0x0410
+#define K8_MSR_MC4STAT 0x0411
+#define K8_MSR_MC4ADDR 0x0412
+
+/* AMD sets the first MC device at device ID 0x18. */
+static inline int get_mc_node_id_from_pdev(struct pci_dev *pdev)
+{
+ return PCI_SLOT(pdev->devfn) - 0x18;
+}
+
+enum amd64_chipset_families {
+ K8_CPUS = 0,
+ F10_CPUS,
+ F11_CPUS,
+};
+
+/*
+ * Structure to hold:
+ *
+ * 1) dynamically read status and error address HW registers
+ * 2) sysfs entered values
+ * 3) MCE values
+ *
+ * Depends on entry into the modules
+ */
+struct amd64_error_info_regs {
+ u32 nbcfg;
+ u32 nbsh;
+ u32 nbsl;
+ u32 nbeah;
+ u32 nbeal;
+};
+
+/* Error injection control structure */
+struct error_injection {
+ u32 section;
+ u32 word;
+ u32 bit_map;
+};
+
+struct amd64_pvt {
+ /* pci_device handles which we utilize */
+ struct pci_dev *addr_f1_ctl;
+ struct pci_dev *dram_f2_ctl;
+ struct pci_dev *misc_f3_ctl;
+
+ int mc_node_id; /* MC index of this MC node */
+ int ext_model; /* extended model value of this node */
+
+ struct low_ops *ops; /* pointer to per PCI Device ID func table */
+
+ int channel_count;
+
+ /* Raw registers */
+ u32 dclr0; /* DRAM Configuration Low DCT0 reg */
+ u32 dclr1; /* DRAM Configuration Low DCT1 reg */
+ u32 dchr0; /* DRAM Configuration High DCT0 reg */
+ u32 dchr1; /* DRAM Configuration High DCT1 reg */
+ u32 nbcap; /* North Bridge Capabilities */
+ u32 nbcfg; /* F10 North Bridge Configuration */
+ u32 ext_nbcfg; /* Extended F10 North Bridge Configuration */
+ u32 dhar; /* DRAM Hoist reg */
+ u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */
+ u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */
+
+ /* DRAM CS Base Address Registers F2x[1,0][5C:40] */
+ u32 dcsb0[CHIPSELECT_COUNT];
+ u32 dcsb1[CHIPSELECT_COUNT];
+
+ /* DRAM CS Mask Registers F2x[1,0][6C:60] */
+ u32 dcsm0[CHIPSELECT_COUNT];
+ u32 dcsm1[CHIPSELECT_COUNT];
+
+ /*
+ * Decoded parts of DRAM BASE and LIMIT Registers
+ * F1x[78,70,68,60,58,50,48,40]
+ */
+ u64 dram_base[DRAM_REG_COUNT];
+ u64 dram_limit[DRAM_REG_COUNT];
+ u8 dram_IntlvSel[DRAM_REG_COUNT];
+ u8 dram_IntlvEn[DRAM_REG_COUNT];
+ u8 dram_DstNode[DRAM_REG_COUNT];
+ u8 dram_rw_en[DRAM_REG_COUNT];
+
+ /*
+ * The following fields are set at (load) run time, after CPU revision
+ * has been determined, since the dct_base and dct_mask registers vary
+ * based on revision
+ */
+ u32 dcsb_base; /* DCSB base bits */
+ u32 dcsm_mask; /* DCSM mask bits */
+ u32 num_dcsm; /* Number of DCSM registers */
+ u32 dcs_mask_notused; /* DCSM notused mask bits */
+ u32 dcs_shift; /* DCSB and DCSM shift value */
+
+ u64 top_mem; /* top of memory below 4GB */
+ u64 top_mem2; /* top of memory above 4GB */
+
+ u32 dram_ctl_select_low; /* DRAM Controller Select Low Reg */
+ u32 dram_ctl_select_high; /* DRAM Controller Select High Reg */
+ u32 online_spare; /* On-Line spare Reg */
+
+ /* temp storage for when input is received from sysfs */
+ struct amd64_error_info_regs ctl_error_info;
+
+ /* place to store error injection parameters prior to issue */
+ struct error_injection injection;
+
+ /* Save old hw registers' values before we modified them */
+ u32 nbctl_mcgctl_saved; /* When true, following 2 are valid */
+ u32 old_nbctl;
+ unsigned long old_mcgctl; /* per core on this node */
+
+ /* MC Type Index value: socket F vs Family 10h */
+ u32 mc_type_index;
+
+ /* misc settings */
+ struct flags {
+ unsigned long cf8_extcfg:1;
+ } flags;
+};
+
+struct scrubrate {
+ u32 scrubval; /* bit pattern for scrub rate */
+ u32 bandwidth; /* bandwidth consumed (bytes/sec) */
+};
+
+extern struct scrubrate scrubrates[23];
+extern u32 revf_quad_ddr2_shift[16];
+extern const char *tt_msgs[4];
+extern const char *ll_msgs[4];
+extern const char *rrrr_msgs[16];
+extern const char *to_msgs[2];
+extern const char *pp_msgs[4];
+extern const char *ii_msgs[4];
+extern const char *ext_msgs[32];
+extern const char *htlink_msgs[8];
+
+#ifdef CONFIG_EDAC_DEBUG
+#define NUM_DBG_ATTRS 9
+#else
+#define NUM_DBG_ATTRS 0
+#endif
+
+#ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
+#define NUM_INJ_ATTRS 5
+#else
+#define NUM_INJ_ATTRS 0
+#endif
+
+extern struct mcidev_sysfs_attribute amd64_dbg_attrs[NUM_DBG_ATTRS],
+ amd64_inj_attrs[NUM_INJ_ATTRS];
+
+/*
+ * Each of the PCI Device IDs types have their own set of hardware accessor
+ * functions and per device encoding/decoding logic.
+ */
+struct low_ops {
+ int (*probe_valid_hardware)(struct amd64_pvt *pvt);
+ int (*early_channel_count)(struct amd64_pvt *pvt);
+
+ u64 (*get_error_address)(struct mem_ctl_info *mci,
+ struct amd64_error_info_regs *info);
+ void (*read_dram_base_limit)(struct amd64_pvt *pvt, int dram);
+ void (*read_dram_ctl_register)(struct amd64_pvt *pvt);
+ void (*map_sysaddr_to_csrow)(struct mem_ctl_info *mci,
+ struct amd64_error_info_regs *info,
+ u64 SystemAddr);
+ int (*dbam_map_to_pages)(struct amd64_pvt *pvt, int dram_map);
+};
+
+struct amd64_family_type {
+ const char *ctl_name;
+ u16 addr_f1_ctl;
+ u16 misc_f3_ctl;
+ struct low_ops ops;
+};
+
+static struct amd64_family_type amd64_family_types[];
+
+static inline const char *get_amd_family_name(int index)
+{
+ return amd64_family_types[index].ctl_name;
+}
+
+static inline struct low_ops *family_ops(int index)
+{
+ return &amd64_family_types[index].ops;
+}
+
+/*
+ * For future CPU versions, verify the following as new 'slow' rates appear and
+ * modify the necessary skip values for the supported CPU.
+ */
+#define K8_MIN_SCRUB_RATE_BITS 0x0
+#define F10_MIN_SCRUB_RATE_BITS 0x5
+#define F11_MIN_SCRUB_RATE_BITS 0x6
+
+int amd64_process_error_info(struct mem_ctl_info *mci,
+ struct amd64_error_info_regs *info,
+ int handle_errors);
+int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
+ u64 *hole_offset, u64 *hole_size);
diff --git a/drivers/edac/amd64_edac_dbg.c b/drivers/edac/amd64_edac_dbg.c
new file mode 100644
index 00000000000..0a41b248a4a
--- /dev/null
+++ b/drivers/edac/amd64_edac_dbg.c
@@ -0,0 +1,255 @@
+#include "amd64_edac.h"
+
+/*
+ * accept a hex value and store it into the virtual error register file, field:
+ * nbeal and nbeah. Assume virtual error values have already been set for: NBSL,
+ * NBSH and NBCFG. Then proceed to map the error values to a MC, CSROW and
+ * CHANNEL
+ */
+static ssize_t amd64_nbea_store(struct mem_ctl_info *mci, const char *data,
+ size_t count)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ unsigned long long value;
+ int ret = 0;
+
+ ret = strict_strtoull(data, 16, &value);
+ if (ret != -EINVAL) {
+ debugf0("received NBEA= 0x%llx\n", value);
+
+ /* place the value into the virtual error packet */
+ pvt->ctl_error_info.nbeal = (u32) value;
+ value >>= 32;
+ pvt->ctl_error_info.nbeah = (u32) value;
+
+ /* Process the Mapping request */
+ /* TODO: Add race prevention */
+ amd64_process_error_info(mci, &pvt->ctl_error_info, 1);
+
+ return count;
+ }
+ return ret;
+}
+
+/* display back what the last NBEA (MCA NB Address (MC4_ADDR)) was written */
+static ssize_t amd64_nbea_show(struct mem_ctl_info *mci, char *data)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ u64 value;
+
+ value = pvt->ctl_error_info.nbeah;
+ value <<= 32;
+ value |= pvt->ctl_error_info.nbeal;
+
+ return sprintf(data, "%llx\n", value);
+}
+
+/* store the NBSL (MCA NB Status Low (MC4_STATUS)) value user desires */
+static ssize_t amd64_nbsl_store(struct mem_ctl_info *mci, const char *data,
+ size_t count)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ int ret = 0;
+
+ ret = strict_strtoul(data, 16, &value);
+ if (ret != -EINVAL) {
+ debugf0("received NBSL= 0x%lx\n", value);
+
+ pvt->ctl_error_info.nbsl = (u32) value;
+
+ return count;
+ }
+ return ret;
+}
+
+/* display back what the last NBSL value written */
+static ssize_t amd64_nbsl_show(struct mem_ctl_info *mci, char *data)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ u32 value;
+
+ value = pvt->ctl_error_info.nbsl;
+
+ return sprintf(data, "%x\n", value);
+}
+
+/* store the NBSH (MCA NB Status High) value user desires */
+static ssize_t amd64_nbsh_store(struct mem_ctl_info *mci, const char *data,
+ size_t count)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ int ret = 0;
+
+ ret = strict_strtoul(data, 16, &value);
+ if (ret != -EINVAL) {
+ debugf0("received NBSH= 0x%lx\n", value);
+
+ pvt->ctl_error_info.nbsh = (u32) value;
+
+ return count;
+ }
+ return ret;
+}
+
+/* display back what the last NBSH value written */
+static ssize_t amd64_nbsh_show(struct mem_ctl_info *mci, char *data)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ u32 value;
+
+ value = pvt->ctl_error_info.nbsh;
+
+ return sprintf(data, "%x\n", value);
+}
+
+/* accept and store the NBCFG (MCA NB Configuration) value user desires */
+static ssize_t amd64_nbcfg_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ int ret = 0;
+
+ ret = strict_strtoul(data, 16, &value);
+ if (ret != -EINVAL) {
+ debugf0("received NBCFG= 0x%lx\n", value);
+
+ pvt->ctl_error_info.nbcfg = (u32) value;
+
+ return count;
+ }
+ return ret;
+}
+
+/* various show routines for the controls of a MCI */
+static ssize_t amd64_nbcfg_show(struct mem_ctl_info *mci, char *data)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+
+ return sprintf(data, "%x\n", pvt->ctl_error_info.nbcfg);
+}
+
+
+static ssize_t amd64_dhar_show(struct mem_ctl_info *mci, char *data)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+
+ return sprintf(data, "%x\n", pvt->dhar);
+}
+
+
+static ssize_t amd64_dbam_show(struct mem_ctl_info *mci, char *data)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+
+ return sprintf(data, "%x\n", pvt->dbam0);
+}
+
+
+static ssize_t amd64_topmem_show(struct mem_ctl_info *mci, char *data)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+
+ return sprintf(data, "%llx\n", pvt->top_mem);
+}
+
+
+static ssize_t amd64_topmem2_show(struct mem_ctl_info *mci, char *data)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+
+ return sprintf(data, "%llx\n", pvt->top_mem2);
+}
+
+static ssize_t amd64_hole_show(struct mem_ctl_info *mci, char *data)
+{
+ u64 hole_base = 0;
+ u64 hole_offset = 0;
+ u64 hole_size = 0;
+
+ amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size);
+
+ return sprintf(data, "%llx %llx %llx\n", hole_base, hole_offset,
+ hole_size);
+}
+
+/*
+ * update NUM_DBG_ATTRS in case you add new members
+ */
+struct mcidev_sysfs_attribute amd64_dbg_attrs[] = {
+
+ {
+ .attr = {
+ .name = "nbea_ctl",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = amd64_nbea_show,
+ .store = amd64_nbea_store,
+ },
+ {
+ .attr = {
+ .name = "nbsl_ctl",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = amd64_nbsl_show,
+ .store = amd64_nbsl_store,
+ },
+ {
+ .attr = {
+ .name = "nbsh_ctl",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = amd64_nbsh_show,
+ .store = amd64_nbsh_store,
+ },
+ {
+ .attr = {
+ .name = "nbcfg_ctl",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = amd64_nbcfg_show,
+ .store = amd64_nbcfg_store,
+ },
+ {
+ .attr = {
+ .name = "dhar",
+ .mode = (S_IRUGO)
+ },
+ .show = amd64_dhar_show,
+ .store = NULL,
+ },
+ {
+ .attr = {
+ .name = "dbam",
+ .mode = (S_IRUGO)
+ },
+ .show = amd64_dbam_show,
+ .store = NULL,
+ },
+ {
+ .attr = {
+ .name = "topmem",
+ .mode = (S_IRUGO)
+ },
+ .show = amd64_topmem_show,
+ .store = NULL,
+ },
+ {
+ .attr = {
+ .name = "topmem2",
+ .mode = (S_IRUGO)
+ },
+ .show = amd64_topmem2_show,
+ .store = NULL,
+ },
+ {
+ .attr = {
+ .name = "dram_hole",
+ .mode = (S_IRUGO)
+ },
+ .show = amd64_hole_show,
+ .store = NULL,
+ },
+};
diff --git a/drivers/edac/amd64_edac_err_types.c b/drivers/edac/amd64_edac_err_types.c
new file mode 100644
index 00000000000..f212ff12a9d
--- /dev/null
+++ b/drivers/edac/amd64_edac_err_types.c
@@ -0,0 +1,161 @@
+#include "amd64_edac.h"
+
+/*
+ * See F2x80 for K8 and F2x[1,0]80 for Fam10 and later. The table below is only
+ * for DDR2 DRAM mapping.
+ */
+u32 revf_quad_ddr2_shift[] = {
+ 0, /* 0000b NULL DIMM (128mb) */
+ 28, /* 0001b 256mb */
+ 29, /* 0010b 512mb */
+ 29, /* 0011b 512mb */
+ 29, /* 0100b 512mb */
+ 30, /* 0101b 1gb */
+ 30, /* 0110b 1gb */
+ 31, /* 0111b 2gb */
+ 31, /* 1000b 2gb */
+ 32, /* 1001b 4gb */
+ 32, /* 1010b 4gb */
+ 33, /* 1011b 8gb */
+ 0, /* 1100b future */
+ 0, /* 1101b future */
+ 0, /* 1110b future */
+ 0 /* 1111b future */
+};
+
+/*
+ * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
+ * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
+ * or higher value'.
+ *
+ *FIXME: Produce a better mapping/linearisation.
+ */
+
+struct scrubrate scrubrates[] = {
+ { 0x01, 1600000000UL},
+ { 0x02, 800000000UL},
+ { 0x03, 400000000UL},
+ { 0x04, 200000000UL},
+ { 0x05, 100000000UL},
+ { 0x06, 50000000UL},
+ { 0x07, 25000000UL},
+ { 0x08, 12284069UL},
+ { 0x09, 6274509UL},
+ { 0x0A, 3121951UL},
+ { 0x0B, 1560975UL},
+ { 0x0C, 781440UL},
+ { 0x0D, 390720UL},
+ { 0x0E, 195300UL},
+ { 0x0F, 97650UL},
+ { 0x10, 48854UL},
+ { 0x11, 24427UL},
+ { 0x12, 12213UL},
+ { 0x13, 6101UL},
+ { 0x14, 3051UL},
+ { 0x15, 1523UL},
+ { 0x16, 761UL},
+ { 0x00, 0UL}, /* scrubbing off */
+};
+
+/*
+ * string representation for the different MCA reported error types, see F3x48
+ * or MSR0000_0411.
+ */
+const char *tt_msgs[] = { /* transaction type */
+ "instruction",
+ "data",
+ "generic",
+ "reserved"
+};
+
+const char *ll_msgs[] = { /* cache level */
+ "L0",
+ "L1",
+ "L2",
+ "L3/generic"
+};
+
+const char *rrrr_msgs[] = {
+ "generic",
+ "generic read",
+ "generic write",
+ "data read",
+ "data write",
+ "inst fetch",
+ "prefetch",
+ "evict",
+ "snoop",
+ "reserved RRRR= 9",
+ "reserved RRRR= 10",
+ "reserved RRRR= 11",
+ "reserved RRRR= 12",
+ "reserved RRRR= 13",
+ "reserved RRRR= 14",
+ "reserved RRRR= 15"
+};
+
+const char *pp_msgs[] = { /* participating processor */
+ "local node originated (SRC)",
+ "local node responded to request (RES)",
+ "local node observed as 3rd party (OBS)",
+ "generic"
+};
+
+const char *to_msgs[] = {
+ "no timeout",
+ "timed out"
+};
+
+const char *ii_msgs[] = { /* memory or i/o */
+ "mem access",
+ "reserved",
+ "i/o access",
+ "generic"
+};
+
+/* Map the 5 bits of Extended Error code to the string table. */
+const char *ext_msgs[] = { /* extended error */
+ "K8 ECC error/F10 reserved", /* 0_0000b */
+ "CRC error", /* 0_0001b */
+ "sync error", /* 0_0010b */
+ "mst abort", /* 0_0011b */
+ "tgt abort", /* 0_0100b */
+ "GART error", /* 0_0101b */
+ "RMW error", /* 0_0110b */
+ "Wdog timer error", /* 0_0111b */
+ "F10-ECC/K8-Chipkill error", /* 0_1000b */
+ "DEV Error", /* 0_1001b */
+ "Link Data error", /* 0_1010b */
+ "Link or L3 Protocol error", /* 0_1011b */
+ "NB Array error", /* 0_1100b */
+ "DRAM Parity error", /* 0_1101b */
+ "Link Retry/GART Table Walk/DEV Table Walk error", /* 0_1110b */
+ "Res 0x0ff error", /* 0_1111b */
+ "Res 0x100 error", /* 1_0000b */
+ "Res 0x101 error", /* 1_0001b */
+ "Res 0x102 error", /* 1_0010b */
+ "Res 0x103 error", /* 1_0011b */
+ "Res 0x104 error", /* 1_0100b */
+ "Res 0x105 error", /* 1_0101b */
+ "Res 0x106 error", /* 1_0110b */
+ "Res 0x107 error", /* 1_0111b */
+ "Res 0x108 error", /* 1_1000b */
+ "Res 0x109 error", /* 1_1001b */
+ "Res 0x10A error", /* 1_1010b */
+ "Res 0x10B error", /* 1_1011b */
+ "L3 Cache Data error", /* 1_1100b */
+ "L3 CacheTag error", /* 1_1101b */
+ "L3 Cache LRU error", /* 1_1110b */
+ "Res 0x1FF error" /* 1_1111b */
+};
+
+const char *htlink_msgs[] = {
+ "none",
+ "1",
+ "2",
+ "1 2",
+ "3",
+ "1 3",
+ "2 3",
+ "1 2 3"
+};
diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c
new file mode 100644
index 00000000000..d3675b76b3a
--- /dev/null
+++ b/drivers/edac/amd64_edac_inj.c
@@ -0,0 +1,185 @@
+#include "amd64_edac.h"
+
+/*
+ * store error injection section value which refers to one of 4 16-byte sections
+ * within a 64-byte cacheline
+ *
+ * range: 0..3
+ */
+static ssize_t amd64_inject_section_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ int ret = 0;
+
+ ret = strict_strtoul(data, 10, &value);
+ if (ret != -EINVAL) {
+ pvt->injection.section = (u32) value;
+ return count;
+ }
+ return ret;
+}
+
+/*
+ * store error injection word value which refers to one of 9 16-bit word of the
+ * 16-byte (128-bit + ECC bits) section
+ *
+ * range: 0..8
+ */
+static ssize_t amd64_inject_word_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ int ret = 0;
+
+ ret = strict_strtoul(data, 10, &value);
+ if (ret != -EINVAL) {
+
+ value = (value <= 8) ? value : 0;
+ pvt->injection.word = (u32) value;
+
+ return count;
+ }
+ return ret;
+}
+
+/*
+ * store 16 bit error injection vector which enables injecting errors to the
+ * corresponding bit within the error injection word above. When used during a
+ * DRAM ECC read, it holds the contents of the of the DRAM ECC bits.
+ */
+static ssize_t amd64_inject_ecc_vector_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ int ret = 0;
+
+ ret = strict_strtoul(data, 16, &value);
+ if (ret != -EINVAL) {
+
+ pvt->injection.bit_map = (u32) value & 0xFFFF;
+
+ return count;
+ }
+ return ret;
+}
+
+/*
+ * Do a DRAM ECC read. Assemble staged values in the pvt area, format into
+ * fields needed by the injection registers and read the NB Array Data Port.
+ */
+static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ u32 section, word_bits;
+ int ret = 0;
+
+ ret = strict_strtoul(data, 10, &value);
+ if (ret != -EINVAL) {
+
+ /* Form value to choose 16-byte section of cacheline */
+ section = F10_NB_ARRAY_DRAM_ECC |
+ SET_NB_ARRAY_ADDRESS(pvt->injection.section);
+ pci_write_config_dword(pvt->misc_f3_ctl,
+ F10_NB_ARRAY_ADDR, section);
+
+ word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection.word,
+ pvt->injection.bit_map);
+
+ /* Issue 'word' and 'bit' along with the READ request */
+ pci_write_config_dword(pvt->misc_f3_ctl,
+ F10_NB_ARRAY_DATA, word_bits);
+
+ debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
+
+ return count;
+ }
+ return ret;
+}
+
+/*
+ * Do a DRAM ECC write. Assemble staged values in the pvt area and format into
+ * fields needed by the injection registers.
+ */
+static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ u32 section, word_bits;
+ int ret = 0;
+
+ ret = strict_strtoul(data, 10, &value);
+ if (ret != -EINVAL) {
+
+ /* Form value to choose 16-byte section of cacheline */
+ section = F10_NB_ARRAY_DRAM_ECC |
+ SET_NB_ARRAY_ADDRESS(pvt->injection.section);
+ pci_write_config_dword(pvt->misc_f3_ctl,
+ F10_NB_ARRAY_ADDR, section);
+
+ word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection.word,
+ pvt->injection.bit_map);
+
+ /* Issue 'word' and 'bit' along with the READ request */
+ pci_write_config_dword(pvt->misc_f3_ctl,
+ F10_NB_ARRAY_DATA, word_bits);
+
+ debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
+
+ return count;
+ }
+ return ret;
+}
+
+/*
+ * update NUM_INJ_ATTRS in case you add new members
+ */
+struct mcidev_sysfs_attribute amd64_inj_attrs[] = {
+
+ {
+ .attr = {
+ .name = "inject_section",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = NULL,
+ .store = amd64_inject_section_store,
+ },
+ {
+ .attr = {
+ .name = "inject_word",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = NULL,
+ .store = amd64_inject_word_store,
+ },
+ {
+ .attr = {
+ .name = "inject_ecc_vector",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = NULL,
+ .store = amd64_inject_ecc_vector_store,
+ },
+ {
+ .attr = {
+ .name = "inject_write",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = NULL,
+ .store = amd64_inject_write_store,
+ },
+ {
+ .attr = {
+ .name = "inject_read",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = NULL,
+ .store = amd64_inject_read_store,
+ },
+};
diff --git a/drivers/edac/amd8111_edac.c b/drivers/edac/amd8111_edac.c
index 61469218112..2cb58ef743e 100644
--- a/drivers/edac/amd8111_edac.c
+++ b/drivers/edac/amd8111_edac.c
@@ -389,7 +389,7 @@ static int amd8111_dev_probe(struct pci_dev *dev,
dev_info->edac_dev->dev = &dev_info->dev->dev;
dev_info->edac_dev->mod_name = AMD8111_EDAC_MOD_STR;
dev_info->edac_dev->ctl_name = dev_info->ctl_name;
- dev_info->edac_dev->dev_name = dev_info->dev->dev.bus_id;
+ dev_info->edac_dev->dev_name = dev_name(&dev_info->dev->dev);
if (edac_op_state == EDAC_OPSTATE_POLL)
dev_info->edac_dev->edac_check = dev_info->check;
@@ -473,7 +473,7 @@ static int amd8111_pci_probe(struct pci_dev *dev,
pci_info->edac_dev->dev = &pci_info->dev->dev;
pci_info->edac_dev->mod_name = AMD8111_EDAC_MOD_STR;
pci_info->edac_dev->ctl_name = pci_info->ctl_name;
- pci_info->edac_dev->dev_name = pci_info->dev->dev.bus_id;
+ pci_info->edac_dev->dev_name = dev_name(&pci_info->dev->dev);
if (edac_op_state == EDAC_OPSTATE_POLL)
pci_info->edac_dev->edac_check = pci_info->check;
diff --git a/drivers/edac/amd8131_edac.c b/drivers/edac/amd8131_edac.c
index c083b31cac5..b432d60c622 100644
--- a/drivers/edac/amd8131_edac.c
+++ b/drivers/edac/amd8131_edac.c
@@ -287,7 +287,7 @@ static int amd8131_probe(struct pci_dev *dev, const struct pci_device_id *id)
dev_info->edac_dev->dev = &dev_info->dev->dev;
dev_info->edac_dev->mod_name = AMD8131_EDAC_MOD_STR;
dev_info->edac_dev->ctl_name = dev_info->ctl_name;
- dev_info->edac_dev->dev_name = dev_info->dev->dev.bus_id;
+ dev_info->edac_dev->dev_name = dev_name(&dev_info->dev->dev);
if (edac_op_state == EDAC_OPSTATE_POLL)
dev_info->edac_dev->edac_check = amd8131_chipset.check;
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index facfdb1fa71..d205d493a68 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -1084,7 +1084,7 @@ static void e752x_init_sysbus_parity_mask(struct e752x_pvt *pvt)
struct pci_dev *dev = pvt->dev_d0f1;
int enable = 1;
- /* Allow module paramter override, else see if CPU supports parity */
+ /* Allow module parameter override, else see if CPU supports parity */
if (sysbus_parity != -1) {
enable = sysbus_parity;
} else if (cpu_id[0] &&
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 6ad95c8d636..48d3b140983 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -76,10 +76,11 @@
extern int edac_debug_level;
#ifndef CONFIG_EDAC_DEBUG_VERBOSE
-#define edac_debug_printk(level, fmt, arg...) \
- do { \
- if (level <= edac_debug_level) \
- edac_printk(KERN_DEBUG, EDAC_DEBUG, fmt, ##arg); \
+#define edac_debug_printk(level, fmt, arg...) \
+ do { \
+ if (level <= edac_debug_level) \
+ edac_printk(KERN_DEBUG, EDAC_DEBUG, \
+ "%s: " fmt, __func__, ##arg); \
} while (0)
#else /* CONFIG_EDAC_DEBUG_VERBOSE */
#define edac_debug_printk(level, fmt, arg...) \
diff --git a/drivers/eisa/eisa.ids b/drivers/eisa/eisa.ids
index ed69837d8b7..6cbb7a51443 100644
--- a/drivers/eisa/eisa.ids
+++ b/drivers/eisa/eisa.ids
@@ -1140,6 +1140,11 @@ NON0301 "c't Universale Graphic Adapter"
NON0401 "c't Universal Ethernet Adapter"
NON0501 "c't Universal 16-Bit Sound Adapter"
NON0601 "c't Universal 8-Bit Adapter"
+NPI0120 "Network Peripherals NP-EISA-1 FDDI Interface"
+NPI0221 "Network Peripherals NP-EISA-2 FDDI Interface"
+NPI0223 "Network Peripherals NP-EISA-2E Enhanced FDDI Interface"
+NPI0301 "Network Peripherals NP-EISA-3 FDDI Interface"
+NPI0303 "Network Peripherals NP-EISA-3E Enhanced FDDI Interface"
NSS0011 "Newport Systems Solutions WNIC Adapter"
NVL0701 "Novell NE3200 Bus Master Ethernet"
NVL0702 "Novell NE3200T Bus Master Ethernet"
diff --git a/drivers/eisa/pci_eisa.c b/drivers/eisa/pci_eisa.c
index 74edb1d0110..0dd0f633b18 100644
--- a/drivers/eisa/pci_eisa.c
+++ b/drivers/eisa/pci_eisa.c
@@ -31,11 +31,11 @@ static int __init pci_eisa_init(struct pci_dev *pdev,
}
pci_eisa_root.dev = &pdev->dev;
- pci_eisa_root.dev->driver_data = &pci_eisa_root;
pci_eisa_root.res = pdev->bus->resource[0];
pci_eisa_root.bus_base_addr = pdev->bus->resource[0]->start;
pci_eisa_root.slots = EISA_MAX_SLOTS;
pci_eisa_root.dma_mask = pdev->dma_mask;
+ dev_set_drvdata(pci_eisa_root.dev, &pci_eisa_root);
if (eisa_root_register (&pci_eisa_root)) {
printk (KERN_ERR "pci_eisa : Could not register EISA root\n");
diff --git a/drivers/eisa/virtual_root.c b/drivers/eisa/virtual_root.c
index 3074879f231..535e4f9c83f 100644
--- a/drivers/eisa/virtual_root.c
+++ b/drivers/eisa/virtual_root.c
@@ -57,7 +57,7 @@ static int __init virtual_eisa_root_init (void)
eisa_bus_root.force_probe = force_probe;
- eisa_root_dev.dev.driver_data = &eisa_bus_root;
+ dev_set_drvdata(&eisa_root_dev.dev, &eisa_bus_root);
if (eisa_root_register (&eisa_bus_root)) {
/* A real bridge may have been registered before
diff --git a/drivers/firewire/Makefile b/drivers/firewire/Makefile
index a7c31e9039c..bc3b9bf822b 100644
--- a/drivers/firewire/Makefile
+++ b/drivers/firewire/Makefile
@@ -2,10 +2,10 @@
# Makefile for the Linux IEEE 1394 implementation
#
-firewire-core-y += fw-card.o fw-topology.o fw-transaction.o fw-iso.o \
- fw-device.o fw-cdev.o
-firewire-ohci-y += fw-ohci.o
-firewire-sbp2-y += fw-sbp2.o
+firewire-core-y += core-card.o core-cdev.o core-device.o \
+ core-iso.o core-topology.o core-transaction.o
+firewire-ohci-y += ohci.o
+firewire-sbp2-y += sbp2.o
obj-$(CONFIG_FIREWIRE) += firewire-core.o
obj-$(CONFIG_FIREWIRE_OHCI) += firewire-ohci.o
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/core-card.c
index 8b8c8c22f0f..4c1be64fddd 100644
--- a/drivers/firewire/fw-card.c
+++ b/drivers/firewire/core-card.c
@@ -16,18 +16,27 @@
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/bug.h>
#include <linux/completion.h>
#include <linux/crc-itu-t.h>
-#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
#include <linux/kref.h>
+#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
-#include "fw-transaction.h"
-#include "fw-topology.h"
-#include "fw-device.h"
+#include <asm/atomic.h>
+#include <asm/byteorder.h>
+
+#include "core.h"
int fw_compute_block_crc(u32 *block)
{
@@ -181,12 +190,6 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc)
mutex_unlock(&card_mutex);
}
-static int set_broadcast_channel(struct device *dev, void *data)
-{
- fw_device_set_broadcast_channel(fw_device(dev), (long)data);
- return 0;
-}
-
static void allocate_broadcast_channel(struct fw_card *card, int generation)
{
int channel, bandwidth = 0;
@@ -196,7 +199,7 @@ static void allocate_broadcast_channel(struct fw_card *card, int generation)
if (channel == 31) {
card->broadcast_channel_allocated = true;
device_for_each_child(card->device, (void *)(long)generation,
- set_broadcast_channel);
+ fw_device_set_broadcast_channel);
}
}
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/core-cdev.c
index 7eb6594cc3e..d1d30c615b0 100644
--- a/drivers/firewire/fw-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -22,6 +22,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
+#include <linux/firewire.h>
#include <linux/firewire-cdev.h>
#include <linux/idr.h>
#include <linux/jiffies.h>
@@ -34,16 +35,14 @@
#include <linux/preempt.h>
#include <linux/spinlock.h>
#include <linux/time.h>
+#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <asm/system.h>
-#include <asm/uaccess.h>
-#include "fw-device.h"
-#include "fw-topology.h"
-#include "fw-transaction.h"
+#include "core.h"
struct client {
u32 version;
@@ -739,15 +738,11 @@ static void release_descriptor(struct client *client,
static int ioctl_add_descriptor(struct client *client, void *buffer)
{
struct fw_cdev_add_descriptor *request = buffer;
- struct fw_card *card = client->device->card;
struct descriptor_resource *r;
int ret;
/* Access policy: Allow this ioctl only on local nodes' device files. */
- spin_lock_irq(&card->lock);
- ret = client->device->node_id != card->local_node->node_id;
- spin_unlock_irq(&card->lock);
- if (ret)
+ if (!client->device->is_local)
return -ENOSYS;
if (request->length > 256)
diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/core-device.c
index a47e2129d83..97e656af2d2 100644
--- a/drivers/firewire/fw-device.c
+++ b/drivers/firewire/core-device.c
@@ -22,10 +22,14 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
#include <linux/idr.h>
#include <linux/jiffies.h>
#include <linux/kobject.h>
#include <linux/list.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/semaphore.h>
@@ -33,11 +37,11 @@
#include <linux/string.h>
#include <linux/workqueue.h>
+#include <asm/atomic.h>
+#include <asm/byteorder.h>
#include <asm/system.h>
-#include "fw-device.h"
-#include "fw-topology.h"
-#include "fw-transaction.h"
+#include "core.h"
void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p)
{
@@ -55,9 +59,10 @@ int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value)
}
EXPORT_SYMBOL(fw_csr_iterator_next);
-static int is_fw_unit(struct device *dev);
+static bool is_fw_unit(struct device *dev);
-static int match_unit_directory(u32 * directory, const struct fw_device_id *id)
+static int match_unit_directory(u32 *directory, u32 match_flags,
+ const struct ieee1394_device_id *id)
{
struct fw_csr_iterator ci;
int key, value, match;
@@ -65,31 +70,42 @@ static int match_unit_directory(u32 * directory, const struct fw_device_id *id)
match = 0;
fw_csr_iterator_init(&ci, directory);
while (fw_csr_iterator_next(&ci, &key, &value)) {
- if (key == CSR_VENDOR && value == id->vendor)
- match |= FW_MATCH_VENDOR;
- if (key == CSR_MODEL && value == id->model)
- match |= FW_MATCH_MODEL;
+ if (key == CSR_VENDOR && value == id->vendor_id)
+ match |= IEEE1394_MATCH_VENDOR_ID;
+ if (key == CSR_MODEL && value == id->model_id)
+ match |= IEEE1394_MATCH_MODEL_ID;
if (key == CSR_SPECIFIER_ID && value == id->specifier_id)
- match |= FW_MATCH_SPECIFIER_ID;
+ match |= IEEE1394_MATCH_SPECIFIER_ID;
if (key == CSR_VERSION && value == id->version)
- match |= FW_MATCH_VERSION;
+ match |= IEEE1394_MATCH_VERSION;
}
- return (match & id->match_flags) == id->match_flags;
+ return (match & match_flags) == match_flags;
}
static int fw_unit_match(struct device *dev, struct device_driver *drv)
{
struct fw_unit *unit = fw_unit(dev);
- struct fw_driver *driver = fw_driver(drv);
- int i;
+ struct fw_device *device;
+ const struct ieee1394_device_id *id;
/* We only allow binding to fw_units. */
if (!is_fw_unit(dev))
return 0;
- for (i = 0; driver->id_table[i].match_flags != 0; i++) {
- if (match_unit_directory(unit->directory, &driver->id_table[i]))
+ device = fw_parent_device(unit);
+ id = container_of(drv, struct fw_driver, driver)->id_table;
+
+ for (; id->match_flags != 0; id++) {
+ if (match_unit_directory(unit->directory, id->match_flags, id))
+ return 1;
+
+ /* Also check vendor ID in the root directory. */
+ if ((id->match_flags & IEEE1394_MATCH_VENDOR_ID) &&
+ match_unit_directory(&device->config_rom[5],
+ IEEE1394_MATCH_VENDOR_ID, id) &&
+ match_unit_directory(unit->directory, id->match_flags
+ & ~IEEE1394_MATCH_VENDOR_ID, id))
return 1;
}
@@ -98,7 +114,7 @@ static int fw_unit_match(struct device *dev, struct device_driver *drv)
static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size)
{
- struct fw_device *device = fw_device(unit->device.parent);
+ struct fw_device *device = fw_parent_device(unit);
struct fw_csr_iterator ci;
int key, value;
@@ -292,8 +308,7 @@ static void init_fw_attribute_group(struct device *dev,
group->attrs[j++] = &attr->attr;
}
- BUG_ON(j >= ARRAY_SIZE(group->attrs));
- group->attrs[j++] = NULL;
+ group->attrs[j] = NULL;
group->groups[0] = &group->group;
group->groups[1] = NULL;
group->group.attrs = group->attrs;
@@ -356,9 +371,56 @@ static ssize_t guid_show(struct device *dev,
return ret;
}
+static int units_sprintf(char *buf, u32 *directory)
+{
+ struct fw_csr_iterator ci;
+ int key, value;
+ int specifier_id = 0;
+ int version = 0;
+
+ fw_csr_iterator_init(&ci, directory);
+ while (fw_csr_iterator_next(&ci, &key, &value)) {
+ switch (key) {
+ case CSR_SPECIFIER_ID:
+ specifier_id = value;
+ break;
+ case CSR_VERSION:
+ version = value;
+ break;
+ }
+ }
+
+ return sprintf(buf, "0x%06x:0x%06x ", specifier_id, version);
+}
+
+static ssize_t units_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fw_device *device = fw_device(dev);
+ struct fw_csr_iterator ci;
+ int key, value, i = 0;
+
+ down_read(&fw_device_rwsem);
+ fw_csr_iterator_init(&ci, &device->config_rom[5]);
+ while (fw_csr_iterator_next(&ci, &key, &value)) {
+ if (key != (CSR_UNIT | CSR_DIRECTORY))
+ continue;
+ i += units_sprintf(&buf[i], ci.p + value - 1);
+ if (i >= PAGE_SIZE - (8 + 1 + 8 + 1))
+ break;
+ }
+ up_read(&fw_device_rwsem);
+
+ if (i)
+ buf[i - 1] = '\n';
+
+ return i;
+}
+
static struct device_attribute fw_device_attributes[] = {
__ATTR_RO(config_rom),
__ATTR_RO(guid),
+ __ATTR_RO(units),
__ATTR_NULL,
};
@@ -518,7 +580,9 @@ static int read_bus_info_block(struct fw_device *device, int generation)
kfree(old_rom);
ret = 0;
- device->cmc = rom[2] >> 30 & 1;
+ device->max_rec = rom[2] >> 12 & 0xf;
+ device->cmc = rom[2] >> 30 & 1;
+ device->irmc = rom[2] >> 31 & 1;
out:
kfree(rom);
@@ -537,7 +601,7 @@ static struct device_type fw_unit_type = {
.release = fw_unit_release,
};
-static int is_fw_unit(struct device *dev)
+static bool is_fw_unit(struct device *dev)
{
return dev->type == &fw_unit_type;
}
@@ -570,9 +634,13 @@ static void create_units(struct fw_device *device)
unit->device.parent = &device->device;
dev_set_name(&unit->device, "%s.%d", dev_name(&device->device), i++);
+ BUILD_BUG_ON(ARRAY_SIZE(unit->attribute_group.attrs) <
+ ARRAY_SIZE(fw_unit_attributes) +
+ ARRAY_SIZE(config_rom_attributes));
init_fw_attribute_group(&unit->device,
fw_unit_attributes,
&unit->attribute_group);
+
if (device_register(&unit->device) < 0)
goto skip_unit;
@@ -683,6 +751,11 @@ static struct device_type fw_device_type = {
.release = fw_device_release,
};
+static bool is_fw_device(struct device *dev)
+{
+ return dev->type == &fw_device_type;
+}
+
static int update_unit(struct device *dev, void *data)
{
struct fw_unit *unit = fw_unit(dev);
@@ -719,6 +792,9 @@ static int lookup_existing_device(struct device *dev, void *data)
struct fw_card *card = new->card;
int match = 0;
+ if (!is_fw_device(dev))
+ return 0;
+
down_read(&fw_device_rwsem); /* serialize config_rom access */
spin_lock_irq(&card->lock); /* serialize node access */
@@ -758,7 +834,7 @@ static int lookup_existing_device(struct device *dev, void *data)
enum { BC_UNKNOWN = 0, BC_UNIMPLEMENTED, BC_IMPLEMENTED, };
-void fw_device_set_broadcast_channel(struct fw_device *device, int generation)
+static void set_broadcast_channel(struct fw_device *device, int generation)
{
struct fw_card *card = device->card;
__be32 data;
@@ -767,6 +843,20 @@ void fw_device_set_broadcast_channel(struct fw_device *device, int generation)
if (!card->broadcast_channel_allocated)
return;
+ /*
+ * The Broadcast_Channel Valid bit is required by nodes which want to
+ * transmit on this channel. Such transmissions are practically
+ * exclusive to IP over 1394 (RFC 2734). IP capable nodes are required
+ * to be IRM capable and have a max_rec of 8 or more. We use this fact
+ * to narrow down to which nodes we send Broadcast_Channel updates.
+ */
+ if (!device->irmc || device->max_rec < 8)
+ return;
+
+ /*
+ * Some 1394-1995 nodes crash if this 1394a-2000 register is written.
+ * Perform a read test first.
+ */
if (device->bc_implemented == BC_UNKNOWN) {
rcode = fw_run_transaction(card, TCODE_READ_QUADLET_REQUEST,
device->node_id, generation, device->max_speed,
@@ -794,6 +884,14 @@ void fw_device_set_broadcast_channel(struct fw_device *device, int generation)
}
}
+int fw_device_set_broadcast_channel(struct device *dev, void *gen)
+{
+ if (is_fw_device(dev))
+ set_broadcast_channel(fw_device(dev), (long)gen);
+
+ return 0;
+}
+
static void fw_device_init(struct work_struct *work)
{
struct fw_device *device =
@@ -849,9 +947,13 @@ static void fw_device_init(struct work_struct *work)
device->device.devt = MKDEV(fw_cdev_major, minor);
dev_set_name(&device->device, "fw%d", minor);
+ BUILD_BUG_ON(ARRAY_SIZE(device->attribute_group.attrs) <
+ ARRAY_SIZE(fw_device_attributes) +
+ ARRAY_SIZE(config_rom_attributes));
init_fw_attribute_group(&device->device,
fw_device_attributes,
&device->attribute_group);
+
if (device_add(&device->device)) {
fw_error("Failed to add device.\n");
goto error_with_cdev;
@@ -888,7 +990,7 @@ static void fw_device_init(struct work_struct *work)
1 << device->max_speed);
device->config_rom_retries = 0;
- fw_device_set_broadcast_channel(device, device->generation);
+ set_broadcast_channel(device, device->generation);
}
/*
@@ -993,6 +1095,9 @@ static void fw_device_refresh(struct work_struct *work)
create_units(device);
+ /* Userspace may want to re-read attributes. */
+ kobject_uevent(&device->device.kobj, KOBJ_CHANGE);
+
if (atomic_cmpxchg(&device->state,
FW_DEVICE_INITIALIZING,
FW_DEVICE_RUNNING) == FW_DEVICE_GONE)
@@ -1042,6 +1147,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
device->node = fw_node_get(node);
device->node_id = node->node_id;
device->generation = card->generation;
+ device->is_local = node == card->local_node;
mutex_init(&device->client_list_mutex);
INIT_LIST_HEAD(&device->client_list);
@@ -1075,7 +1181,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) {
PREPARE_DELAYED_WORK(&device->work, fw_device_refresh);
schedule_delayed_work(&device->work,
- node == card->local_node ? 0 : INITIAL_DELAY);
+ device->is_local ? 0 : INITIAL_DELAY);
}
break;
diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/core-iso.c
index 2baf1007253..28076c892d7 100644
--- a/drivers/firewire/fw-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -22,14 +22,16 @@
#include <linux/dma-mapping.h>
#include <linux/errno.h>
+#include <linux/firewire.h>
#include <linux/firewire-constants.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
-#include "fw-topology.h"
-#include "fw-transaction.h"
+#include <asm/byteorder.h>
+
+#include "core.h"
/*
* Isochronous DMA context management
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/core-topology.c
index d0deecc4de9..fddf2b35893 100644
--- a/drivers/firewire/fw-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -18,13 +18,22 @@
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
-#include <linux/module.h>
-#include <linux/wait.h>
+#include <linux/bug.h>
#include <linux/errno.h>
-#include <asm/bug.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+
+#include <asm/atomic.h>
#include <asm/system.h>
-#include "fw-transaction.h"
-#include "fw-topology.h"
+
+#include "core.h"
#define SELF_ID_PHY_ID(q) (((q) >> 24) & 0x3f)
#define SELF_ID_EXTENDED(q) (((q) >> 23) & 0x01)
@@ -37,6 +46,11 @@
#define SELF_ID_EXT_SEQUENCE(q) (((q) >> 20) & 0x07)
+#define SELFID_PORT_CHILD 0x3
+#define SELFID_PORT_PARENT 0x2
+#define SELFID_PORT_NCONN 0x1
+#define SELFID_PORT_NONE 0x0
+
static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
{
u32 q;
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/core-transaction.c
index 283dac6d327..479b22f5a1e 100644
--- a/drivers/firewire/fw-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -18,24 +18,28 @@
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/bug.h>
#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
+#include <linux/fs.h>
+#include <linux/init.h>
#include <linux/idr.h>
+#include <linux/jiffies.h>
#include <linux/kernel.h>
-#include <linux/kref.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/poll.h>
#include <linux/list.h>
-#include <linux/kthread.h>
-#include <asm/uaccess.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/types.h>
-#include "fw-transaction.h"
-#include "fw-topology.h"
-#include "fw-device.h"
+#include <asm/byteorder.h>
+
+#include "core.h"
#define HEADER_PRI(pri) ((pri) << 0)
#define HEADER_TCODE(tcode) ((tcode) << 4)
@@ -60,6 +64,10 @@
#define HEADER_DESTINATION_IS_BROADCAST(q) \
(((q) & HEADER_DESTINATION(0x3f)) == HEADER_DESTINATION(0x3f))
+#define PHY_PACKET_CONFIG 0x0
+#define PHY_PACKET_LINK_ON 0x1
+#define PHY_PACKET_SELF_ID 0x2
+
#define PHY_CONFIG_GAP_COUNT(gap_count) (((gap_count) << 16) | (1 << 22))
#define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23))
#define PHY_IDENTIFIER(id) ((id) << 30)
@@ -74,7 +82,7 @@ static int close_transaction(struct fw_transaction *transaction,
list_for_each_entry(t, &card->transaction_list, link) {
if (t == transaction) {
list_del(&t->link);
- card->tlabel_mask &= ~(1 << t->tlabel);
+ card->tlabel_mask &= ~(1ULL << t->tlabel);
break;
}
}
@@ -280,14 +288,14 @@ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
spin_lock_irqsave(&card->lock, flags);
tlabel = card->current_tlabel;
- if (card->tlabel_mask & (1 << tlabel)) {
+ if (card->tlabel_mask & (1ULL << tlabel)) {
spin_unlock_irqrestore(&card->lock, flags);
callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
return;
}
- card->current_tlabel = (card->current_tlabel + 1) & 0x1f;
- card->tlabel_mask |= (1 << tlabel);
+ card->current_tlabel = (card->current_tlabel + 1) & 0x3f;
+ card->tlabel_mask |= (1ULL << tlabel);
t->node_id = destination_id;
t->tlabel = tlabel;
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
new file mode 100644
index 00000000000..0a25a7b38a8
--- /dev/null
+++ b/drivers/firewire/core.h
@@ -0,0 +1,293 @@
+#ifndef _FIREWIRE_CORE_H
+#define _FIREWIRE_CORE_H
+
+#include <linux/dma-mapping.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/idr.h>
+#include <linux/mm_types.h>
+#include <linux/rwsem.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <asm/atomic.h>
+
+struct device;
+struct fw_card;
+struct fw_device;
+struct fw_iso_buffer;
+struct fw_iso_context;
+struct fw_iso_packet;
+struct fw_node;
+struct fw_packet;
+
+
+/* -card */
+
+/* bitfields within the PHY registers */
+#define PHY_LINK_ACTIVE 0x80
+#define PHY_CONTENDER 0x40
+#define PHY_BUS_RESET 0x40
+#define PHY_BUS_SHORT_RESET 0x40
+
+#define BANDWIDTH_AVAILABLE_INITIAL 4915
+#define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31)
+#define BROADCAST_CHANNEL_VALID (1 << 30)
+
+struct fw_card_driver {
+ /*
+ * Enable the given card with the given initial config rom.
+ * This function is expected to activate the card, and either
+ * enable the PHY or set the link_on bit and initiate a bus
+ * reset.
+ */
+ int (*enable)(struct fw_card *card, u32 *config_rom, size_t length);
+
+ int (*update_phy_reg)(struct fw_card *card, int address,
+ int clear_bits, int set_bits);
+
+ /*
+ * Update the config rom for an enabled card. This function
+ * should change the config rom that is presented on the bus
+ * an initiate a bus reset.
+ */
+ int (*set_config_rom)(struct fw_card *card,
+ u32 *config_rom, size_t length);
+
+ void (*send_request)(struct fw_card *card, struct fw_packet *packet);
+ void (*send_response)(struct fw_card *card, struct fw_packet *packet);
+ /* Calling cancel is valid once a packet has been submitted. */
+ int (*cancel_packet)(struct fw_card *card, struct fw_packet *packet);
+
+ /*
+ * Allow the specified node ID to do direct DMA out and in of
+ * host memory. The card will disable this for all node when
+ * a bus reset happens, so driver need to reenable this after
+ * bus reset. Returns 0 on success, -ENODEV if the card
+ * doesn't support this, -ESTALE if the generation doesn't
+ * match.
+ */
+ int (*enable_phys_dma)(struct fw_card *card,
+ int node_id, int generation);
+
+ u64 (*get_bus_time)(struct fw_card *card);
+
+ struct fw_iso_context *
+ (*allocate_iso_context)(struct fw_card *card,
+ int type, int channel, size_t header_size);
+ void (*free_iso_context)(struct fw_iso_context *ctx);
+
+ int (*start_iso)(struct fw_iso_context *ctx,
+ s32 cycle, u32 sync, u32 tags);
+
+ int (*queue_iso)(struct fw_iso_context *ctx,
+ struct fw_iso_packet *packet,
+ struct fw_iso_buffer *buffer,
+ unsigned long payload);
+
+ int (*stop_iso)(struct fw_iso_context *ctx);
+};
+
+void fw_card_initialize(struct fw_card *card,
+ const struct fw_card_driver *driver, struct device *device);
+int fw_card_add(struct fw_card *card,
+ u32 max_receive, u32 link_speed, u64 guid);
+void fw_core_remove_card(struct fw_card *card);
+int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
+int fw_compute_block_crc(u32 *block);
+void fw_schedule_bm_work(struct fw_card *card, unsigned long delay);
+
+struct fw_descriptor {
+ struct list_head link;
+ size_t length;
+ u32 immediate;
+ u32 key;
+ const u32 *data;
+};
+
+int fw_core_add_descriptor(struct fw_descriptor *desc);
+void fw_core_remove_descriptor(struct fw_descriptor *desc);
+
+
+/* -cdev */
+
+extern const struct file_operations fw_device_ops;
+
+void fw_device_cdev_update(struct fw_device *device);
+void fw_device_cdev_remove(struct fw_device *device);
+
+
+/* -device */
+
+extern struct rw_semaphore fw_device_rwsem;
+extern struct idr fw_device_idr;
+extern int fw_cdev_major;
+
+struct fw_device *fw_device_get_by_devt(dev_t devt);
+int fw_device_set_broadcast_channel(struct device *dev, void *gen);
+void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
+
+
+/* -iso */
+
+/*
+ * The iso packet format allows for an immediate header/payload part
+ * stored in 'header' immediately after the packet info plus an
+ * indirect payload part that is pointer to by the 'payload' field.
+ * Applications can use one or the other or both to implement simple
+ * low-bandwidth streaming (e.g. audio) or more advanced
+ * scatter-gather streaming (e.g. assembling video frame automatically).
+ */
+struct fw_iso_packet {
+ u16 payload_length; /* Length of indirect payload. */
+ u32 interrupt:1; /* Generate interrupt on this packet */
+ u32 skip:1; /* Set to not send packet at all. */
+ u32 tag:2;
+ u32 sy:4;
+ u32 header_length:8; /* Length of immediate header. */
+ u32 header[0];
+};
+
+#define FW_ISO_CONTEXT_TRANSMIT 0
+#define FW_ISO_CONTEXT_RECEIVE 1
+
+#define FW_ISO_CONTEXT_MATCH_TAG0 1
+#define FW_ISO_CONTEXT_MATCH_TAG1 2
+#define FW_ISO_CONTEXT_MATCH_TAG2 4
+#define FW_ISO_CONTEXT_MATCH_TAG3 8
+#define FW_ISO_CONTEXT_MATCH_ALL_TAGS 15
+
+/*
+ * An iso buffer is just a set of pages mapped for DMA in the
+ * specified direction. Since the pages are to be used for DMA, they
+ * are not mapped into the kernel virtual address space. We store the
+ * DMA address in the page private. The helper function
+ * fw_iso_buffer_map() will map the pages into a given vma.
+ */
+struct fw_iso_buffer {
+ enum dma_data_direction direction;
+ struct page **pages;
+ int page_count;
+};
+
+typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
+ u32 cycle, size_t header_length,
+ void *header, void *data);
+
+struct fw_iso_context {
+ struct fw_card *card;
+ int type;
+ int channel;
+ int speed;
+ size_t header_size;
+ fw_iso_callback_t callback;
+ void *callback_data;
+};
+
+int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
+ int page_count, enum dma_data_direction direction);
+int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
+void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
+
+struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
+ int type, int channel, int speed, size_t header_size,
+ fw_iso_callback_t callback, void *callback_data);
+int fw_iso_context_queue(struct fw_iso_context *ctx,
+ struct fw_iso_packet *packet,
+ struct fw_iso_buffer *buffer,
+ unsigned long payload);
+int fw_iso_context_start(struct fw_iso_context *ctx,
+ int cycle, int sync, int tags);
+int fw_iso_context_stop(struct fw_iso_context *ctx);
+void fw_iso_context_destroy(struct fw_iso_context *ctx);
+
+void fw_iso_resource_manage(struct fw_card *card, int generation,
+ u64 channels_mask, int *channel, int *bandwidth, bool allocate);
+
+
+/* -topology */
+
+enum {
+ FW_NODE_CREATED,
+ FW_NODE_UPDATED,
+ FW_NODE_DESTROYED,
+ FW_NODE_LINK_ON,
+ FW_NODE_LINK_OFF,
+ FW_NODE_INITIATED_RESET,
+};
+
+struct fw_node {
+ u16 node_id;
+ u8 color;
+ u8 port_count;
+ u8 link_on:1;
+ u8 initiated_reset:1;
+ u8 b_path:1;
+ u8 phy_speed:2; /* As in the self ID packet. */
+ u8 max_speed:2; /* Minimum of all phy-speeds on the path from the
+ * local node to this node. */
+ u8 max_depth:4; /* Maximum depth to any leaf node */
+ u8 max_hops:4; /* Max hops in this sub tree */
+ atomic_t ref_count;
+
+ /* For serializing node topology into a list. */
+ struct list_head link;
+
+ /* Upper layer specific data. */
+ void *data;
+
+ struct fw_node *ports[0];
+};
+
+static inline struct fw_node *fw_node_get(struct fw_node *node)
+{
+ atomic_inc(&node->ref_count);
+
+ return node;
+}
+
+static inline void fw_node_put(struct fw_node *node)
+{
+ if (atomic_dec_and_test(&node->ref_count))
+ kfree(node);
+}
+
+void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
+ int generation, int self_id_count, u32 *self_ids);
+void fw_destroy_nodes(struct fw_card *card);
+
+/*
+ * Check whether new_generation is the immediate successor of old_generation.
+ * Take counter roll-over at 255 (as per OHCI) into account.
+ */
+static inline bool is_next_generation(int new_generation, int old_generation)
+{
+ return (new_generation & 0xff) == ((old_generation + 1) & 0xff);
+}
+
+
+/* -transaction */
+
+#define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4)
+#define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0)
+#define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0)
+#define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0)
+#define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4)
+#define TCODE_HAS_RESPONSE_DATA(tcode) (((tcode) & 12) != 0)
+
+#define LOCAL_BUS 0xffc0
+
+void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
+void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
+void fw_fill_response(struct fw_packet *response, u32 *request_header,
+ int rcode, void *payload, size_t length);
+void fw_flush_transactions(struct fw_card *card);
+void fw_send_phy_config(struct fw_card *card,
+ int node_id, int generation, int gap_count);
+
+static inline int fw_stream_packet_destination_id(int tag, int channel, int sy)
+{
+ return tag << 14 | channel << 8 | sy;
+}
+
+#endif /* _FIREWIRE_CORE_H */
diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h
deleted file mode 100644
index 97588937c01..00000000000
--- a/drivers/firewire/fw-device.h
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#ifndef __fw_device_h
-#define __fw_device_h
-
-#include <linux/device.h>
-#include <linux/fs.h>
-#include <linux/idr.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/rwsem.h>
-#include <linux/sysfs.h>
-#include <linux/types.h>
-#include <linux/workqueue.h>
-
-#include <asm/atomic.h>
-
-enum fw_device_state {
- FW_DEVICE_INITIALIZING,
- FW_DEVICE_RUNNING,
- FW_DEVICE_GONE,
- FW_DEVICE_SHUTDOWN,
-};
-
-struct fw_attribute_group {
- struct attribute_group *groups[2];
- struct attribute_group group;
- struct attribute *attrs[11];
-};
-
-struct fw_node;
-struct fw_card;
-
-/*
- * Note, fw_device.generation always has to be read before fw_device.node_id.
- * Use SMP memory barriers to ensure this. Otherwise requests will be sent
- * to an outdated node_id if the generation was updated in the meantime due
- * to a bus reset.
- *
- * Likewise, fw-core will take care to update .node_id before .generation so
- * that whenever fw_device.generation is current WRT the actual bus generation,
- * fw_device.node_id is guaranteed to be current too.
- *
- * The same applies to fw_device.card->node_id vs. fw_device.generation.
- *
- * fw_device.config_rom and fw_device.config_rom_length may be accessed during
- * the lifetime of any fw_unit belonging to the fw_device, before device_del()
- * was called on the last fw_unit. Alternatively, they may be accessed while
- * holding fw_device_rwsem.
- */
-struct fw_device {
- atomic_t state;
- struct fw_node *node;
- int node_id;
- int generation;
- unsigned max_speed;
- struct fw_card *card;
- struct device device;
-
- struct mutex client_list_mutex;
- struct list_head client_list;
-
- u32 *config_rom;
- size_t config_rom_length;
- int config_rom_retries;
- unsigned cmc:1;
- unsigned bc_implemented:2;
-
- struct delayed_work work;
- struct fw_attribute_group attribute_group;
-};
-
-static inline struct fw_device *fw_device(struct device *dev)
-{
- return container_of(dev, struct fw_device, device);
-}
-
-static inline int fw_device_is_shutdown(struct fw_device *device)
-{
- return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN;
-}
-
-static inline struct fw_device *fw_device_get(struct fw_device *device)
-{
- get_device(&device->device);
-
- return device;
-}
-
-static inline void fw_device_put(struct fw_device *device)
-{
- put_device(&device->device);
-}
-
-struct fw_device *fw_device_get_by_devt(dev_t devt);
-int fw_device_enable_phys_dma(struct fw_device *device);
-void fw_device_set_broadcast_channel(struct fw_device *device, int generation);
-
-void fw_device_cdev_update(struct fw_device *device);
-void fw_device_cdev_remove(struct fw_device *device);
-
-extern struct rw_semaphore fw_device_rwsem;
-extern struct idr fw_device_idr;
-extern int fw_cdev_major;
-
-/*
- * fw_unit.directory must not be accessed after device_del(&fw_unit.device).
- */
-struct fw_unit {
- struct device device;
- u32 *directory;
- struct fw_attribute_group attribute_group;
-};
-
-static inline struct fw_unit *fw_unit(struct device *dev)
-{
- return container_of(dev, struct fw_unit, device);
-}
-
-static inline struct fw_unit *fw_unit_get(struct fw_unit *unit)
-{
- get_device(&unit->device);
-
- return unit;
-}
-
-static inline void fw_unit_put(struct fw_unit *unit)
-{
- put_device(&unit->device);
-}
-
-#define CSR_OFFSET 0x40
-#define CSR_LEAF 0x80
-#define CSR_DIRECTORY 0xc0
-
-#define CSR_DESCRIPTOR 0x01
-#define CSR_VENDOR 0x03
-#define CSR_HARDWARE_VERSION 0x04
-#define CSR_NODE_CAPABILITIES 0x0c
-#define CSR_UNIT 0x11
-#define CSR_SPECIFIER_ID 0x12
-#define CSR_VERSION 0x13
-#define CSR_DEPENDENT_INFO 0x14
-#define CSR_MODEL 0x17
-#define CSR_INSTANCE 0x18
-#define CSR_DIRECTORY_ID 0x20
-
-struct fw_csr_iterator {
- u32 *p;
- u32 *end;
-};
-
-void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 *p);
-int fw_csr_iterator_next(struct fw_csr_iterator *ci,
- int *key, int *value);
-
-#define FW_MATCH_VENDOR 0x0001
-#define FW_MATCH_MODEL 0x0002
-#define FW_MATCH_SPECIFIER_ID 0x0004
-#define FW_MATCH_VERSION 0x0008
-
-struct fw_device_id {
- u32 match_flags;
- u32 vendor;
- u32 model;
- u32 specifier_id;
- u32 version;
- void *driver_data;
-};
-
-struct fw_driver {
- struct device_driver driver;
- /* Called when the parent device sits through a bus reset. */
- void (*update) (struct fw_unit *unit);
- const struct fw_device_id *id_table;
-};
-
-static inline struct fw_driver *fw_driver(struct device_driver *drv)
-{
- return container_of(drv, struct fw_driver, driver);
-}
-
-extern const struct file_operations fw_device_ops;
-
-#endif /* __fw_device_h */
diff --git a/drivers/firewire/fw-topology.h b/drivers/firewire/fw-topology.h
deleted file mode 100644
index 3c497bb4fae..00000000000
--- a/drivers/firewire/fw-topology.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#ifndef __fw_topology_h
-#define __fw_topology_h
-
-#include <linux/list.h>
-#include <linux/slab.h>
-
-#include <asm/atomic.h>
-
-enum {
- FW_NODE_CREATED,
- FW_NODE_UPDATED,
- FW_NODE_DESTROYED,
- FW_NODE_LINK_ON,
- FW_NODE_LINK_OFF,
- FW_NODE_INITIATED_RESET,
-};
-
-struct fw_node {
- u16 node_id;
- u8 color;
- u8 port_count;
- u8 link_on : 1;
- u8 initiated_reset : 1;
- u8 b_path : 1;
- u8 phy_speed : 2; /* As in the self ID packet. */
- u8 max_speed : 2; /* Minimum of all phy-speeds on the path from the
- * local node to this node. */
- u8 max_depth : 4; /* Maximum depth to any leaf node */
- u8 max_hops : 4; /* Max hops in this sub tree */
- atomic_t ref_count;
-
- /* For serializing node topology into a list. */
- struct list_head link;
-
- /* Upper layer specific data. */
- void *data;
-
- struct fw_node *ports[0];
-};
-
-static inline struct fw_node *fw_node_get(struct fw_node *node)
-{
- atomic_inc(&node->ref_count);
-
- return node;
-}
-
-static inline void fw_node_put(struct fw_node *node)
-{
- if (atomic_dec_and_test(&node->ref_count))
- kfree(node);
-}
-
-struct fw_card;
-void fw_destroy_nodes(struct fw_card *card);
-
-int fw_compute_block_crc(u32 *block);
-
-#endif /* __fw_topology_h */
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h
deleted file mode 100644
index dfa799068f8..00000000000
--- a/drivers/firewire/fw-transaction.h
+++ /dev/null
@@ -1,446 +0,0 @@
-/*
- * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#ifndef __fw_transaction_h
-#define __fw_transaction_h
-
-#include <linux/completion.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/firewire-constants.h>
-#include <linux/kref.h>
-#include <linux/list.h>
-#include <linux/spinlock_types.h>
-#include <linux/timer.h>
-#include <linux/types.h>
-#include <linux/workqueue.h>
-
-#define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4)
-#define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0)
-#define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0)
-#define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0)
-#define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4)
-#define TCODE_HAS_RESPONSE_DATA(tcode) (((tcode) & 12) != 0)
-
-#define LOCAL_BUS 0xffc0
-
-#define SELFID_PORT_CHILD 0x3
-#define SELFID_PORT_PARENT 0x2
-#define SELFID_PORT_NCONN 0x1
-#define SELFID_PORT_NONE 0x0
-
-#define PHY_PACKET_CONFIG 0x0
-#define PHY_PACKET_LINK_ON 0x1
-#define PHY_PACKET_SELF_ID 0x2
-
-/* Bit fields _within_ the PHY registers. */
-#define PHY_LINK_ACTIVE 0x80
-#define PHY_CONTENDER 0x40
-#define PHY_BUS_RESET 0x40
-#define PHY_BUS_SHORT_RESET 0x40
-
-#define CSR_REGISTER_BASE 0xfffff0000000ULL
-
-/* register offsets relative to CSR_REGISTER_BASE */
-#define CSR_STATE_CLEAR 0x0
-#define CSR_STATE_SET 0x4
-#define CSR_NODE_IDS 0x8
-#define CSR_RESET_START 0xc
-#define CSR_SPLIT_TIMEOUT_HI 0x18
-#define CSR_SPLIT_TIMEOUT_LO 0x1c
-#define CSR_CYCLE_TIME 0x200
-#define CSR_BUS_TIME 0x204
-#define CSR_BUSY_TIMEOUT 0x210
-#define CSR_BUS_MANAGER_ID 0x21c
-#define CSR_BANDWIDTH_AVAILABLE 0x220
-#define CSR_CHANNELS_AVAILABLE 0x224
-#define CSR_CHANNELS_AVAILABLE_HI 0x224
-#define CSR_CHANNELS_AVAILABLE_LO 0x228
-#define CSR_BROADCAST_CHANNEL 0x234
-#define CSR_CONFIG_ROM 0x400
-#define CSR_CONFIG_ROM_END 0x800
-#define CSR_FCP_COMMAND 0xB00
-#define CSR_FCP_RESPONSE 0xD00
-#define CSR_FCP_END 0xF00
-#define CSR_TOPOLOGY_MAP 0x1000
-#define CSR_TOPOLOGY_MAP_END 0x1400
-#define CSR_SPEED_MAP 0x2000
-#define CSR_SPEED_MAP_END 0x3000
-
-#define BANDWIDTH_AVAILABLE_INITIAL 4915
-#define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31)
-#define BROADCAST_CHANNEL_VALID (1 << 30)
-
-#define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args)
-#define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
-
-static inline void fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
-{
- u32 *dst = _dst;
- __be32 *src = _src;
- int i;
-
- for (i = 0; i < size / 4; i++)
- dst[i] = be32_to_cpu(src[i]);
-}
-
-static inline void fw_memcpy_to_be32(void *_dst, void *_src, size_t size)
-{
- fw_memcpy_from_be32(_dst, _src, size);
-}
-
-struct fw_card;
-struct fw_packet;
-struct fw_node;
-struct fw_request;
-
-struct fw_descriptor {
- struct list_head link;
- size_t length;
- u32 immediate;
- u32 key;
- const u32 *data;
-};
-
-int fw_core_add_descriptor(struct fw_descriptor *desc);
-void fw_core_remove_descriptor(struct fw_descriptor *desc);
-
-typedef void (*fw_packet_callback_t)(struct fw_packet *packet,
- struct fw_card *card, int status);
-
-typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
- void *data, size_t length,
- void *callback_data);
-
-/*
- * Important note: The callback must guarantee that either fw_send_response()
- * or kfree() is called on the @request.
- */
-typedef void (*fw_address_callback_t)(struct fw_card *card,
- struct fw_request *request,
- int tcode, int destination, int source,
- int generation, int speed,
- unsigned long long offset,
- void *data, size_t length,
- void *callback_data);
-
-struct fw_packet {
- int speed;
- int generation;
- u32 header[4];
- size_t header_length;
- void *payload;
- size_t payload_length;
- dma_addr_t payload_bus;
- u32 timestamp;
-
- /*
- * This callback is called when the packet transmission has
- * completed; for successful transmission, the status code is
- * the ack received from the destination, otherwise it's a
- * negative errno: ENOMEM, ESTALE, ETIMEDOUT, ENODEV, EIO.
- * The callback can be called from tasklet context and thus
- * must never block.
- */
- fw_packet_callback_t callback;
- int ack;
- struct list_head link;
- void *driver_data;
-};
-
-struct fw_transaction {
- int node_id; /* The generation is implied; it is always the current. */
- int tlabel;
- int timestamp;
- struct list_head link;
-
- struct fw_packet packet;
-
- /*
- * The data passed to the callback is valid only during the
- * callback.
- */
- fw_transaction_callback_t callback;
- void *callback_data;
-};
-
-struct fw_address_handler {
- u64 offset;
- size_t length;
- fw_address_callback_t address_callback;
- void *callback_data;
- struct list_head link;
-};
-
-struct fw_address_region {
- u64 start;
- u64 end;
-};
-
-extern const struct fw_address_region fw_high_memory_region;
-
-int fw_core_add_address_handler(struct fw_address_handler *handler,
- const struct fw_address_region *region);
-void fw_core_remove_address_handler(struct fw_address_handler *handler);
-void fw_fill_response(struct fw_packet *response, u32 *request_header,
- int rcode, void *payload, size_t length);
-void fw_send_response(struct fw_card *card,
- struct fw_request *request, int rcode);
-
-extern struct bus_type fw_bus_type;
-
-struct fw_card {
- const struct fw_card_driver *driver;
- struct device *device;
- struct kref kref;
- struct completion done;
-
- int node_id;
- int generation;
- int current_tlabel, tlabel_mask;
- struct list_head transaction_list;
- struct timer_list flush_timer;
- unsigned long reset_jiffies;
-
- unsigned long long guid;
- unsigned max_receive;
- int link_speed;
- int config_rom_generation;
-
- spinlock_t lock; /* Take this lock when handling the lists in
- * this struct. */
- struct fw_node *local_node;
- struct fw_node *root_node;
- struct fw_node *irm_node;
- u8 color; /* must be u8 to match the definition in struct fw_node */
- int gap_count;
- bool beta_repeaters_present;
-
- int index;
-
- struct list_head link;
-
- /* Work struct for BM duties. */
- struct delayed_work work;
- int bm_retries;
- int bm_generation;
-
- bool broadcast_channel_allocated;
- u32 broadcast_channel;
- u32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
-};
-
-static inline struct fw_card *fw_card_get(struct fw_card *card)
-{
- kref_get(&card->kref);
-
- return card;
-}
-
-void fw_card_release(struct kref *kref);
-
-static inline void fw_card_put(struct fw_card *card)
-{
- kref_put(&card->kref, fw_card_release);
-}
-
-extern void fw_schedule_bm_work(struct fw_card *card, unsigned long delay);
-
-/*
- * Check whether new_generation is the immediate successor of old_generation.
- * Take counter roll-over at 255 (as per to OHCI) into account.
- */
-static inline bool is_next_generation(int new_generation, int old_generation)
-{
- return (new_generation & 0xff) == ((old_generation + 1) & 0xff);
-}
-
-/*
- * The iso packet format allows for an immediate header/payload part
- * stored in 'header' immediately after the packet info plus an
- * indirect payload part that is pointer to by the 'payload' field.
- * Applications can use one or the other or both to implement simple
- * low-bandwidth streaming (e.g. audio) or more advanced
- * scatter-gather streaming (e.g. assembling video frame automatically).
- */
-
-struct fw_iso_packet {
- u16 payload_length; /* Length of indirect payload. */
- u32 interrupt : 1; /* Generate interrupt on this packet */
- u32 skip : 1; /* Set to not send packet at all. */
- u32 tag : 2;
- u32 sy : 4;
- u32 header_length : 8; /* Length of immediate header. */
- u32 header[0];
-};
-
-#define FW_ISO_CONTEXT_TRANSMIT 0
-#define FW_ISO_CONTEXT_RECEIVE 1
-
-#define FW_ISO_CONTEXT_MATCH_TAG0 1
-#define FW_ISO_CONTEXT_MATCH_TAG1 2
-#define FW_ISO_CONTEXT_MATCH_TAG2 4
-#define FW_ISO_CONTEXT_MATCH_TAG3 8
-#define FW_ISO_CONTEXT_MATCH_ALL_TAGS 15
-
-struct fw_iso_context;
-
-typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
- u32 cycle, size_t header_length,
- void *header, void *data);
-
-/*
- * An iso buffer is just a set of pages mapped for DMA in the
- * specified direction. Since the pages are to be used for DMA, they
- * are not mapped into the kernel virtual address space. We store the
- * DMA address in the page private. The helper function
- * fw_iso_buffer_map() will map the pages into a given vma.
- */
-
-struct fw_iso_buffer {
- enum dma_data_direction direction;
- struct page **pages;
- int page_count;
-};
-
-struct fw_iso_context {
- struct fw_card *card;
- int type;
- int channel;
- int speed;
- size_t header_size;
- fw_iso_callback_t callback;
- void *callback_data;
-};
-
-int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
- int page_count, enum dma_data_direction direction);
-int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
-void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
-
-struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
- int type, int channel, int speed, size_t header_size,
- fw_iso_callback_t callback, void *callback_data);
-int fw_iso_context_queue(struct fw_iso_context *ctx,
- struct fw_iso_packet *packet,
- struct fw_iso_buffer *buffer,
- unsigned long payload);
-int fw_iso_context_start(struct fw_iso_context *ctx,
- int cycle, int sync, int tags);
-int fw_iso_context_stop(struct fw_iso_context *ctx);
-void fw_iso_context_destroy(struct fw_iso_context *ctx);
-
-void fw_iso_resource_manage(struct fw_card *card, int generation,
- u64 channels_mask, int *channel, int *bandwidth, bool allocate);
-
-struct fw_card_driver {
- /*
- * Enable the given card with the given initial config rom.
- * This function is expected to activate the card, and either
- * enable the PHY or set the link_on bit and initiate a bus
- * reset.
- */
- int (*enable)(struct fw_card *card, u32 *config_rom, size_t length);
-
- int (*update_phy_reg)(struct fw_card *card, int address,
- int clear_bits, int set_bits);
-
- /*
- * Update the config rom for an enabled card. This function
- * should change the config rom that is presented on the bus
- * an initiate a bus reset.
- */
- int (*set_config_rom)(struct fw_card *card,
- u32 *config_rom, size_t length);
-
- void (*send_request)(struct fw_card *card, struct fw_packet *packet);
- void (*send_response)(struct fw_card *card, struct fw_packet *packet);
- /* Calling cancel is valid once a packet has been submitted. */
- int (*cancel_packet)(struct fw_card *card, struct fw_packet *packet);
-
- /*
- * Allow the specified node ID to do direct DMA out and in of
- * host memory. The card will disable this for all node when
- * a bus reset happens, so driver need to reenable this after
- * bus reset. Returns 0 on success, -ENODEV if the card
- * doesn't support this, -ESTALE if the generation doesn't
- * match.
- */
- int (*enable_phys_dma)(struct fw_card *card,
- int node_id, int generation);
-
- u64 (*get_bus_time)(struct fw_card *card);
-
- struct fw_iso_context *
- (*allocate_iso_context)(struct fw_card *card,
- int type, int channel, size_t header_size);
- void (*free_iso_context)(struct fw_iso_context *ctx);
-
- int (*start_iso)(struct fw_iso_context *ctx,
- s32 cycle, u32 sync, u32 tags);
-
- int (*queue_iso)(struct fw_iso_context *ctx,
- struct fw_iso_packet *packet,
- struct fw_iso_buffer *buffer,
- unsigned long payload);
-
- int (*stop_iso)(struct fw_iso_context *ctx);
-};
-
-int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
-
-void fw_send_request(struct fw_card *card, struct fw_transaction *t,
- int tcode, int destination_id, int generation, int speed,
- unsigned long long offset, void *payload, size_t length,
- fw_transaction_callback_t callback, void *callback_data);
-int fw_cancel_transaction(struct fw_card *card,
- struct fw_transaction *transaction);
-void fw_flush_transactions(struct fw_card *card);
-int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
- int generation, int speed, unsigned long long offset,
- void *payload, size_t length);
-void fw_send_phy_config(struct fw_card *card,
- int node_id, int generation, int gap_count);
-
-static inline int fw_stream_packet_destination_id(int tag, int channel, int sy)
-{
- return tag << 14 | channel << 8 | sy;
-}
-
-/*
- * Called by the topology code to inform the device code of node
- * activity; found, lost, or updated nodes.
- */
-void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
-
-/* API used by card level drivers */
-
-void fw_card_initialize(struct fw_card *card,
- const struct fw_card_driver *driver, struct device *device);
-int fw_card_add(struct fw_card *card,
- u32 max_receive, u32 link_speed, u64 guid);
-void fw_core_remove_card(struct fw_card *card);
-void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
- int generation, int self_id_count, u32 *self_ids);
-void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
-void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
-
-extern int fw_irm_set_broadcast_channel_register(struct device *dev,
- void *data);
-
-#endif /* __fw_transaction_h */
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/ohci.c
index 1180d0be0bb..ecddd11b797 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/ohci.c
@@ -20,17 +20,25 @@
#include <linux/compiler.h>
#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
#include <linux/gfp.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <asm/atomic.h>
+#include <asm/byteorder.h>
#include <asm/page.h>
#include <asm/system.h>
@@ -38,8 +46,8 @@
#include <asm/pmac_feature.h>
#endif
-#include "fw-ohci.h"
-#include "fw-transaction.h"
+#include "core.h"
+#include "ohci.h"
#define DESCRIPTOR_OUTPUT_MORE 0
#define DESCRIPTOR_OUTPUT_LAST (1 << 12)
@@ -178,7 +186,7 @@ struct fw_ohci {
int node_id;
int generation;
int request_generation; /* for timestamping incoming requests */
- u32 bus_seconds;
+ atomic_t bus_seconds;
bool use_dualbuffer;
bool old_uninorth;
@@ -231,7 +239,6 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
#define OHCI1394_MAX_AT_RESP_RETRIES 0x2
#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
-#define FW_OHCI_MAJOR 240
#define OHCI1394_REGISTER_SIZE 0x800
#define OHCI_LOOP_COUNT 500
#define OHCI1394_PCI_HCI_Control 0x40
@@ -1434,7 +1441,7 @@ static irqreturn_t irq_handler(int irq, void *data)
if (event & OHCI1394_cycle64Seconds) {
cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
if ((cycle_time & 0x80000000) == 0)
- ohci->bus_seconds++;
+ atomic_inc(&ohci->bus_seconds);
}
return IRQ_HANDLED;
@@ -1770,7 +1777,7 @@ static u64 ohci_get_bus_time(struct fw_card *card)
u64 bus_time;
cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
- bus_time = ((u64) ohci->bus_seconds << 32) | cycle_time;
+ bus_time = ((u64)atomic_read(&ohci->bus_seconds) << 32) | cycle_time;
return bus_time;
}
diff --git a/drivers/firewire/fw-ohci.h b/drivers/firewire/ohci.h
index a2fbb6240ca..ba492d85c51 100644
--- a/drivers/firewire/fw-ohci.h
+++ b/drivers/firewire/ohci.h
@@ -1,5 +1,5 @@
-#ifndef __fw_ohci_h
-#define __fw_ohci_h
+#ifndef _FIREWIRE_OHCI_H
+#define _FIREWIRE_OHCI_H
/* OHCI register map */
@@ -154,4 +154,4 @@
#define OHCI1394_phy_tcode 0xe
-#endif /* __fw_ohci_h */
+#endif /* _FIREWIRE_OHCI_H */
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/sbp2.c
index 2bcf51557c7..24c45635376 100644
--- a/drivers/firewire/fw-sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -30,18 +30,28 @@
#include <linux/blkdev.h>
#include <linux/bug.h>
+#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/list.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/stringify.h>
-#include <linux/timer.h>
#include <linux/workqueue.h>
+
+#include <asm/byteorder.h>
#include <asm/system.h>
#include <scsi/scsi.h>
@@ -49,10 +59,6 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
-#include "fw-device.h"
-#include "fw-topology.h"
-#include "fw-transaction.h"
-
/*
* So far only bridges from Oxford Semiconductor are known to support
* concurrent logins. Depending on firmware, four or two concurrent logins
@@ -174,6 +180,11 @@ struct sbp2_target {
int blocked; /* ditto */
};
+static struct fw_device *target_device(struct sbp2_target *tgt)
+{
+ return fw_parent_device(tgt->unit);
+}
+
/* Impossible login_id, to detect logout attempt before successful login */
#define INVALID_LOGIN_ID 0x10000
@@ -482,7 +493,7 @@ static void complete_transaction(struct fw_card *card, int rcode,
static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
int node_id, int generation, u64 offset)
{
- struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
+ struct fw_device *device = target_device(lu->tgt);
unsigned long flags;
orb->pointer.high = 0;
@@ -504,7 +515,7 @@ static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
{
- struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
+ struct fw_device *device = target_device(lu->tgt);
struct sbp2_orb *orb, *next;
struct list_head list;
unsigned long flags;
@@ -542,7 +553,7 @@ static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
int generation, int function,
int lun_or_login_id, void *response)
{
- struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
+ struct fw_device *device = target_device(lu->tgt);
struct sbp2_management_orb *orb;
unsigned int timeout;
int retval = -ENOMEM;
@@ -638,7 +649,7 @@ static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
static void sbp2_agent_reset(struct sbp2_logical_unit *lu)
{
- struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
+ struct fw_device *device = target_device(lu->tgt);
__be32 d = 0;
fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST,
@@ -655,7 +666,7 @@ static void complete_agent_reset_write_no_wait(struct fw_card *card,
static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu)
{
- struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
+ struct fw_device *device = target_device(lu->tgt);
struct fw_transaction *t;
static __be32 d;
@@ -694,7 +705,7 @@ static inline void sbp2_allow_block(struct sbp2_logical_unit *lu)
static void sbp2_conditionally_block(struct sbp2_logical_unit *lu)
{
struct sbp2_target *tgt = lu->tgt;
- struct fw_card *card = fw_device(tgt->unit->device.parent)->card;
+ struct fw_card *card = target_device(tgt)->card;
struct Scsi_Host *shost =
container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
unsigned long flags;
@@ -718,7 +729,7 @@ static void sbp2_conditionally_block(struct sbp2_logical_unit *lu)
static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu)
{
struct sbp2_target *tgt = lu->tgt;
- struct fw_card *card = fw_device(tgt->unit->device.parent)->card;
+ struct fw_card *card = target_device(tgt)->card;
struct Scsi_Host *shost =
container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
unsigned long flags;
@@ -743,7 +754,7 @@ static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu)
*/
static void sbp2_unblock(struct sbp2_target *tgt)
{
- struct fw_card *card = fw_device(tgt->unit->device.parent)->card;
+ struct fw_card *card = target_device(tgt)->card;
struct Scsi_Host *shost =
container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
unsigned long flags;
@@ -773,7 +784,7 @@ static void sbp2_release_target(struct kref *kref)
struct Scsi_Host *shost =
container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
struct scsi_device *sdev;
- struct fw_device *device = fw_device(tgt->unit->device.parent);
+ struct fw_device *device = target_device(tgt);
/* prevent deadlocks */
sbp2_unblock(tgt);
@@ -846,7 +857,7 @@ static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay)
*/
static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu)
{
- struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
+ struct fw_device *device = target_device(lu->tgt);
__be32 d = cpu_to_be32(SBP2_CYCLE_LIMIT | SBP2_RETRY_LIMIT);
fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST,
@@ -862,7 +873,7 @@ static void sbp2_login(struct work_struct *work)
struct sbp2_logical_unit *lu =
container_of(work, struct sbp2_logical_unit, work.work);
struct sbp2_target *tgt = lu->tgt;
- struct fw_device *device = fw_device(tgt->unit->device.parent);
+ struct fw_device *device = target_device(tgt);
struct Scsi_Host *shost;
struct scsi_device *sdev;
struct sbp2_login_response response;
@@ -1110,7 +1121,7 @@ static struct scsi_host_template scsi_driver_template;
static int sbp2_probe(struct device *dev)
{
struct fw_unit *unit = fw_unit(dev);
- struct fw_device *device = fw_device(unit->device.parent);
+ struct fw_device *device = fw_parent_device(unit);
struct sbp2_target *tgt;
struct sbp2_logical_unit *lu;
struct Scsi_Host *shost;
@@ -1125,7 +1136,7 @@ static int sbp2_probe(struct device *dev)
return -ENOMEM;
tgt = (struct sbp2_target *)shost->hostdata;
- unit->device.driver_data = tgt;
+ dev_set_drvdata(&unit->device, tgt);
tgt->unit = unit;
kref_init(&tgt->kref);
INIT_LIST_HEAD(&tgt->lu_list);
@@ -1180,7 +1191,7 @@ static int sbp2_probe(struct device *dev)
static int sbp2_remove(struct device *dev)
{
struct fw_unit *unit = fw_unit(dev);
- struct sbp2_target *tgt = unit->device.driver_data;
+ struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
sbp2_target_put(tgt);
return 0;
@@ -1191,7 +1202,7 @@ static void sbp2_reconnect(struct work_struct *work)
struct sbp2_logical_unit *lu =
container_of(work, struct sbp2_logical_unit, work.work);
struct sbp2_target *tgt = lu->tgt;
- struct fw_device *device = fw_device(tgt->unit->device.parent);
+ struct fw_device *device = target_device(tgt);
int generation, node_id, local_node_id;
if (fw_device_is_shutdown(device))
@@ -1240,10 +1251,10 @@ static void sbp2_reconnect(struct work_struct *work)
static void sbp2_update(struct fw_unit *unit)
{
- struct sbp2_target *tgt = unit->device.driver_data;
+ struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
struct sbp2_logical_unit *lu;
- fw_device_enable_phys_dma(fw_device(unit->device.parent));
+ fw_device_enable_phys_dma(fw_parent_device(unit));
/*
* Fw-core serializes sbp2_update() against sbp2_remove().
@@ -1259,9 +1270,10 @@ static void sbp2_update(struct fw_unit *unit)
#define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e
#define SBP2_SW_VERSION_ENTRY 0x00010483
-static const struct fw_device_id sbp2_id_table[] = {
+static const struct ieee1394_device_id sbp2_id_table[] = {
{
- .match_flags = FW_MATCH_SPECIFIER_ID | FW_MATCH_VERSION,
+ .match_flags = IEEE1394_MATCH_SPECIFIER_ID |
+ IEEE1394_MATCH_VERSION,
.specifier_id = SBP2_UNIT_SPEC_ID_ENTRY,
.version = SBP2_SW_VERSION_ENTRY,
},
@@ -1335,7 +1347,7 @@ static void complete_command_orb(struct sbp2_orb *base_orb,
{
struct sbp2_command_orb *orb =
container_of(base_orb, struct sbp2_command_orb, base);
- struct fw_device *device = fw_device(orb->lu->tgt->unit->device.parent);
+ struct fw_device *device = target_device(orb->lu->tgt);
int result;
if (status != NULL) {
@@ -1442,7 +1454,7 @@ static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
{
struct sbp2_logical_unit *lu = cmd->device->hostdata;
- struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
+ struct fw_device *device = target_device(lu->tgt);
struct sbp2_command_orb *orb;
int generation, retval = SCSI_MLQUEUE_HOST_BUSY;
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 5f1b5400d96..24c84ae8152 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -596,6 +596,7 @@ int dmi_get_year(int field)
return year;
}
+EXPORT_SYMBOL(dmi_get_year);
/**
* dmi_walk - Walk the DMI table and get called back for every record
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
index 05aa2d406ac..d5ea8a68d33 100644
--- a/drivers/firmware/memmap.c
+++ b/drivers/firmware/memmap.c
@@ -31,8 +31,12 @@
* information is necessary as for the resource tree.
*/
struct firmware_map_entry {
- resource_size_t start; /* start of the memory range */
- resource_size_t end; /* end of the memory range (incl.) */
+ /*
+ * start and end must be u64 rather than resource_size_t, because e820
+ * resources can lie at addresses above 4G.
+ */
+ u64 start; /* start of the memory range */
+ u64 end; /* end of the memory range (incl.) */
const char *type; /* type of the memory range */
struct list_head list; /* entry for the linked list */
struct kobject kobj; /* kobject for each entry */
@@ -101,7 +105,7 @@ static LIST_HEAD(map_entries);
* Common implementation of firmware_map_add() and firmware_map_add_early()
* which expects a pre-allocated struct firmware_map_entry.
**/
-static int firmware_map_add_entry(resource_size_t start, resource_size_t end,
+static int firmware_map_add_entry(u64 start, u64 end,
const char *type,
struct firmware_map_entry *entry)
{
@@ -132,8 +136,7 @@ static int firmware_map_add_entry(resource_size_t start, resource_size_t end,
*
* Returns 0 on success, or -ENOMEM if no memory could be allocated.
**/
-int firmware_map_add(resource_size_t start, resource_size_t end,
- const char *type)
+int firmware_map_add(u64 start, u64 end, const char *type)
{
struct firmware_map_entry *entry;
@@ -157,8 +160,7 @@ int firmware_map_add(resource_size_t start, resource_size_t end,
*
* Returns 0 on success, or -ENOMEM if no memory could be allocated.
**/
-int __init firmware_map_add_early(resource_size_t start, resource_size_t end,
- const char *type)
+int __init firmware_map_add_early(u64 start, u64 end, const char *type)
{
struct firmware_map_entry *entry;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index edb02530e46..11f373971fa 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -69,7 +69,7 @@ comment "Memory mapped GPIO expanders:"
config GPIO_XILINX
bool "Xilinx GPIO support"
- depends on PPC_OF
+ depends on PPC_OF || MICROBLAZE
help
Say yes here to support the Xilinx FPGA GPIO device
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 17b24c580c0..c961fe415ae 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -18,6 +18,14 @@ menuconfig DRM
details. You should also select and configure AGP
(/dev/agpgart) support.
+config DRM_TTM
+ tristate
+ depends on DRM
+ help
+ GPU memory management subsystem for devices with multiple
+ GPU memory types. Will be enabled automatically if a device driver
+ uses it.
+
config DRM_TDFX
tristate "3dfx Banshee/Voodoo3+"
depends on DRM && PCI
@@ -36,6 +44,11 @@ config DRM_R128
config DRM_RADEON
tristate "ATI Radeon"
depends on DRM && PCI
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ select FB
+ select FRAMEBUFFER_CONSOLE if !EMBEDDED
help
Choose this option if you have an ATI Radeon graphics card. There
are both PCI and AGP versions. You don't need to choose this to
@@ -67,12 +80,18 @@ config DRM_I830
will load the correct one.
config DRM_I915
+ tristate "i915 driver"
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB
select FRAMEBUFFER_CONSOLE if !EMBEDDED
- tristate "i915 driver"
+ # i915 depends on ACPI_VIDEO when ACPI is enabled
+ # but for select to work, need to select ACPI_VIDEO's dependencies, ick
+ select VIDEO_OUTPUT_CONTROL if ACPI
+ select BACKLIGHT_CLASS_DEVICE if ACPI
+ select INPUT if ACPI
+ select ACPI_VIDEO if ACPI
help
Choose this option if you have a system that has Intel 830M, 845G,
852GM, 855GM 865G or 915G integrated graphics. If M is selected, the
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 4ec5061fa58..4e89ab08b7b 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -26,4 +26,4 @@ obj-$(CONFIG_DRM_I915) += i915/
obj-$(CONFIG_DRM_SIS) += sis/
obj-$(CONFIG_DRM_SAVAGE)+= savage/
obj-$(CONFIG_DRM_VIA) +=via/
-
+obj-$(CONFIG_DRM_TTM) += ttm/
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 6d80d17f1e9..80a257554b3 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -170,6 +170,14 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
}
DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
(unsigned long long)map->offset, map->size, map->type);
+
+ /* page-align _DRM_SHM maps. They are allocated here so there is no security
+ * hole created by that and it works around various broken drivers that use
+ * a non-aligned quantity to map the SAREA. --BenH
+ */
+ if (map->type == _DRM_SHM)
+ map->size = PAGE_ALIGN(map->size);
+
if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return -EINVAL;
@@ -363,7 +371,8 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
list->user_token = list->hash.key << PAGE_SHIFT;
mutex_unlock(&dev->struct_mutex);
- list->master = dev->primary->master;
+ if (!(map->flags & _DRM_DRIVER))
+ list->master = dev->primary->master;
*maplist = list;
return 0;
}
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 94a76887173..8fab7890a36 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -2294,7 +2294,12 @@ int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
}
}
- if (connector->funcs->set_property)
+ /* Do DPMS ourselves */
+ if (property == connector->dev->mode_config.dpms_property) {
+ if (connector->funcs->dpms)
+ (*connector->funcs->dpms)(connector, (int) out_resp->value);
+ ret = 0;
+ } else if (connector->funcs->set_property)
ret = connector->funcs->set_property(connector, property, out_resp->value);
/* store the property value if succesful */
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 45890447fee..a6f73f1e99d 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -199,6 +199,29 @@ static void drm_helper_add_std_modes(struct drm_device *dev,
}
/**
+ * drm_helper_encoder_in_use - check if a given encoder is in use
+ * @encoder: encoder to check
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Walk @encoders's DRM device's mode_config and see if it's in use.
+ *
+ * RETURNS:
+ * True if @encoder is part of the mode_config, false otherwise.
+ */
+bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
+{
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder == encoder)
+ return true;
+ return false;
+}
+EXPORT_SYMBOL(drm_helper_encoder_in_use);
+
+/**
* drm_helper_crtc_in_use - check if a given CRTC is in a mode_config
* @crtc: CRTC to check
*
@@ -216,7 +239,7 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
/* FIXME: Locking around list access? */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
- if (encoder->crtc == crtc)
+ if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder))
return true;
return false;
}
@@ -240,7 +263,7 @@ void drm_helper_disable_unused_functions(struct drm_device *dev)
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
encoder_funcs = encoder->helper_private;
- if (!encoder->crtc)
+ if (!drm_helper_encoder_in_use(encoder))
(*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
}
@@ -935,6 +958,88 @@ bool drm_helper_initial_config(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_helper_initial_config);
+static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
+{
+ int dpms = DRM_MODE_DPMS_OFF;
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder == encoder)
+ if (connector->dpms < dpms)
+ dpms = connector->dpms;
+ return dpms;
+}
+
+static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
+{
+ int dpms = DRM_MODE_DPMS_OFF;
+ struct drm_connector *connector;
+ struct drm_device *dev = crtc->dev;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder && connector->encoder->crtc == crtc)
+ if (connector->dpms < dpms)
+ dpms = connector->dpms;
+ return dpms;
+}
+
+/**
+ * drm_helper_connector_dpms
+ * @connector affected connector
+ * @mode DPMS mode
+ *
+ * Calls the low-level connector DPMS function, then
+ * calls appropriate encoder and crtc DPMS functions as well
+ */
+void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
+{
+ struct drm_encoder *encoder = connector->encoder;
+ struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
+ int old_dpms;
+
+ if (mode == connector->dpms)
+ return;
+
+ old_dpms = connector->dpms;
+ connector->dpms = mode;
+
+ /* from off to on, do crtc then encoder */
+ if (mode < old_dpms) {
+ if (crtc) {
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ if (crtc_funcs->dpms)
+ (*crtc_funcs->dpms) (crtc,
+ drm_helper_choose_crtc_dpms(crtc));
+ }
+ if (encoder) {
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+ if (encoder_funcs->dpms)
+ (*encoder_funcs->dpms) (encoder,
+ drm_helper_choose_encoder_dpms(encoder));
+ }
+ }
+
+ /* from on to off, do encoder then crtc */
+ if (mode > old_dpms) {
+ if (encoder) {
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+ if (encoder_funcs->dpms)
+ (*encoder_funcs->dpms) (encoder,
+ drm_helper_choose_encoder_dpms(encoder));
+ }
+ if (crtc) {
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ if (crtc_funcs->dpms)
+ (*crtc_funcs->dpms) (crtc,
+ drm_helper_choose_crtc_dpms(crtc));
+ }
+ }
+
+ return;
+}
+EXPORT_SYMBOL(drm_helper_connector_dpms);
+
/**
* drm_hotplug_stage_two
* @dev DRM device
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index c77c6c6d9d2..6ce0e2667a8 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -105,7 +105,7 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count,
ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO,
root, tmp, &drm_debugfs_fops);
if (!ent) {
- DRM_ERROR("Cannot create /debugfs/dri/%s/%s\n",
+ DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n",
name, files[i].name);
drm_free(tmp, sizeof(struct drm_info_node),
_DRM_DRIVER);
@@ -133,9 +133,9 @@ EXPORT_SYMBOL(drm_debugfs_create_files);
* \param minor device minor number
* \param root DRI debugfs dir entry.
*
- * Create the DRI debugfs root entry "/debugfs/dri", the device debugfs root entry
- * "/debugfs/dri/%minor%/", and each entry in debugfs_list as
- * "/debugfs/dri/%minor%/%name%".
+ * Create the DRI debugfs root entry "/sys/kernel/debug/dri", the device debugfs root entry
+ * "/sys/kernel/debug/dri/%minor%/", and each entry in debugfs_list as
+ * "/sys/kernel/debug/dri/%minor%/%name%".
*/
int drm_debugfs_init(struct drm_minor *minor, int minor_id,
struct dentry *root)
@@ -148,7 +148,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
sprintf(name, "%d", minor_id);
minor->debugfs_root = debugfs_create_dir(name, root);
if (!minor->debugfs_root) {
- DRM_ERROR("Cannot create /debugfs/dri/%s\n", name);
+ DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s\n", name);
return -1;
}
@@ -165,7 +165,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
ret = dev->driver->debugfs_init(minor);
if (ret) {
DRM_ERROR("DRM: Driver failed to initialize "
- "/debugfs/dri.\n");
+ "/sys/kernel/debug/dri.\n");
return ret;
}
}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index f01def16a66..1bf7efd8d33 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -339,7 +339,7 @@ static int __init drm_core_init(void)
drm_debugfs_root = debugfs_create_dir("dri", NULL);
if (!drm_debugfs_root) {
- DRM_ERROR("Cannot create /debugfs/dri\n");
+ DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
ret = -1;
goto err_p3;
}
@@ -481,7 +481,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
}
retcode = func(dev, kdata, file_priv);
- if ((retcode == 0) && (cmd & IOC_OUT)) {
+ if (cmd & IOC_OUT) {
if (copy_to_user((void __user *)arg, kdata,
_IOC_SIZE(cmd)) != 0)
retcode = -EFAULT;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index ca9c6165671..801a0d0e081 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -289,6 +289,11 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
struct drm_display_mode *mode;
struct detailed_pixel_timing *pt = &timing->data.pixel_data;
+ /* ignore tiny modes */
+ if (((pt->hactive_hi << 8) | pt->hactive_lo) < 64 ||
+ ((pt->vactive_hi << 8) | pt->hactive_lo) < 64)
+ return NULL;
+
if (pt->stereo) {
printk(KERN_WARNING "stereo mode not supported\n");
return NULL;
@@ -584,85 +589,13 @@ int drm_do_probe_ddc_edid(struct i2c_adapter *adapter,
}
EXPORT_SYMBOL(drm_do_probe_ddc_edid);
-/**
- * Get EDID information.
- *
- * \param adapter : i2c device adaptor.
- * \param buf : EDID data buffer to be filled
- * \param len : EDID data buffer length
- * \return 0 on success or -1 on failure.
- *
- * Initialize DDC, then fetch EDID information
- * by calling drm_do_probe_ddc_edid function.
- */
-static int drm_ddc_read(struct i2c_adapter *adapter,
- unsigned char *buf, int len)
-{
- struct i2c_algo_bit_data *algo_data = adapter->algo_data;
- int i, j;
- int ret = -1;
-
- algo_data->setscl(algo_data->data, 1);
-
- for (i = 0; i < 1; i++) {
- /* For some old monitors we need the
- * following process to initialize/stop DDC
- */
- algo_data->setsda(algo_data->data, 1);
- msleep(13);
-
- algo_data->setscl(algo_data->data, 1);
- for (j = 0; j < 5; j++) {
- msleep(10);
- if (algo_data->getscl(algo_data->data))
- break;
- }
- if (j == 5)
- continue;
-
- algo_data->setsda(algo_data->data, 0);
- msleep(15);
- algo_data->setscl(algo_data->data, 0);
- msleep(15);
- algo_data->setsda(algo_data->data, 1);
- msleep(15);
-
- /* Do the real work */
- ret = drm_do_probe_ddc_edid(adapter, buf, len);
- algo_data->setsda(algo_data->data, 0);
- algo_data->setscl(algo_data->data, 0);
- msleep(15);
-
- algo_data->setscl(algo_data->data, 1);
- for (j = 0; j < 10; j++) {
- msleep(10);
- if (algo_data->getscl(algo_data->data))
- break;
- }
-
- algo_data->setsda(algo_data->data, 1);
- msleep(15);
- algo_data->setscl(algo_data->data, 0);
- algo_data->setsda(algo_data->data, 0);
- if (ret == 0)
- break;
- }
- /* Release the DDC lines when done or the Apple Cinema HD display
- * will switch off
- */
- algo_data->setsda(algo_data->data, 1);
- algo_data->setscl(algo_data->data, 1);
-
- return ret;
-}
-
static int drm_ddc_read_edid(struct drm_connector *connector,
struct i2c_adapter *adapter,
char *buf, int len)
{
int ret;
- ret = drm_ddc_read(adapter, buf, len);
+ ret = drm_do_probe_ddc_edid(adapter, buf, len);
if (ret != 0) {
dev_info(&connector->dev->pdev->dev, "%s: no EDID data\n",
drm_get_connector_name(connector));
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 4984aa89cf3..ec43005100d 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -133,7 +133,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
BUG_ON((size & (PAGE_SIZE - 1)) != 0);
- obj = kcalloc(1, sizeof(*obj), GFP_KERNEL);
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
obj->dev = dev;
obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index af539f7d87d..ac35145c3e2 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -62,6 +62,7 @@ int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
}
return 0;
}
+EXPORT_SYMBOL(drm_ht_create);
void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
{
@@ -156,6 +157,7 @@ int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *it
}
return 0;
}
+EXPORT_SYMBOL(drm_ht_just_insert_please);
int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
struct drm_hash_item **item)
@@ -169,6 +171,7 @@ int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
*item = hlist_entry(list, struct drm_hash_item, head);
return 0;
}
+EXPORT_SYMBOL(drm_ht_find_item);
int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
{
@@ -202,3 +205,4 @@ void drm_ht_remove(struct drm_open_hash *ht)
ht->table = NULL;
}
}
+EXPORT_SYMBOL(drm_ht_remove);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 93e677a481f..fc8e5acd9d9 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -196,6 +196,7 @@ int drm_irq_install(struct drm_device *dev)
{
int ret = 0;
unsigned long sh_flags = 0;
+ char *irqname;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
@@ -227,8 +228,13 @@ int drm_irq_install(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
sh_flags = IRQF_SHARED;
+ if (dev->devname)
+ irqname = dev->devname;
+ else
+ irqname = dev->driver->name;
+
ret = request_irq(drm_dev_to_irq(dev), dev->driver->irq_handler,
- sh_flags, dev->devname, dev);
+ sh_flags, irqname, dev);
if (ret < 0) {
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 367c590ffbb..a912a0ff11c 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -42,8 +42,11 @@
*/
#include "drmP.h"
+#include "drm_mm.h"
#include <linux/slab.h>
+#define MM_UNUSED_TARGET 4
+
unsigned long drm_mm_tail_space(struct drm_mm *mm)
{
struct list_head *tail_node;
@@ -74,16 +77,62 @@ int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
return 0;
}
+static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
+{
+ struct drm_mm_node *child;
+
+ if (atomic)
+ child = kmalloc(sizeof(*child), GFP_ATOMIC);
+ else
+ child = kmalloc(sizeof(*child), GFP_KERNEL);
+
+ if (unlikely(child == NULL)) {
+ spin_lock(&mm->unused_lock);
+ if (list_empty(&mm->unused_nodes))
+ child = NULL;
+ else {
+ child =
+ list_entry(mm->unused_nodes.next,
+ struct drm_mm_node, fl_entry);
+ list_del(&child->fl_entry);
+ --mm->num_unused;
+ }
+ spin_unlock(&mm->unused_lock);
+ }
+ return child;
+}
+
+int drm_mm_pre_get(struct drm_mm *mm)
+{
+ struct drm_mm_node *node;
+
+ spin_lock(&mm->unused_lock);
+ while (mm->num_unused < MM_UNUSED_TARGET) {
+ spin_unlock(&mm->unused_lock);
+ node = kmalloc(sizeof(*node), GFP_KERNEL);
+ spin_lock(&mm->unused_lock);
+
+ if (unlikely(node == NULL)) {
+ int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
+ spin_unlock(&mm->unused_lock);
+ return ret;
+ }
+ ++mm->num_unused;
+ list_add_tail(&node->fl_entry, &mm->unused_nodes);
+ }
+ spin_unlock(&mm->unused_lock);
+ return 0;
+}
+EXPORT_SYMBOL(drm_mm_pre_get);
static int drm_mm_create_tail_node(struct drm_mm *mm,
- unsigned long start,
- unsigned long size)
+ unsigned long start,
+ unsigned long size, int atomic)
{
struct drm_mm_node *child;
- child = (struct drm_mm_node *)
- drm_alloc(sizeof(*child), DRM_MEM_MM);
- if (!child)
+ child = drm_mm_kmalloc(mm, atomic);
+ if (unlikely(child == NULL))
return -ENOMEM;
child->free = 1;
@@ -97,8 +146,7 @@ static int drm_mm_create_tail_node(struct drm_mm *mm,
return 0;
}
-
-int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size)
+int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
{
struct list_head *tail_node;
struct drm_mm_node *entry;
@@ -106,20 +154,21 @@ int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size)
tail_node = mm->ml_entry.prev;
entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
if (!entry->free) {
- return drm_mm_create_tail_node(mm, entry->start + entry->size, size);
+ return drm_mm_create_tail_node(mm, entry->start + entry->size,
+ size, atomic);
}
entry->size += size;
return 0;
}
static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
- unsigned long size)
+ unsigned long size,
+ int atomic)
{
struct drm_mm_node *child;
- child = (struct drm_mm_node *)
- drm_alloc(sizeof(*child), DRM_MEM_MM);
- if (!child)
+ child = drm_mm_kmalloc(parent->mm, atomic);
+ if (unlikely(child == NULL))
return NULL;
INIT_LIST_HEAD(&child->fl_entry);
@@ -139,8 +188,41 @@ static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
-struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
- unsigned long size, unsigned alignment)
+struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *node,
+ unsigned long size, unsigned alignment)
+{
+
+ struct drm_mm_node *align_splitoff = NULL;
+ unsigned tmp = 0;
+
+ if (alignment)
+ tmp = node->start % alignment;
+
+ if (tmp) {
+ align_splitoff =
+ drm_mm_split_at_start(node, alignment - tmp, 0);
+ if (unlikely(align_splitoff == NULL))
+ return NULL;
+ }
+
+ if (node->size == size) {
+ list_del_init(&node->fl_entry);
+ node->free = 0;
+ } else {
+ node = drm_mm_split_at_start(node, size, 0);
+ }
+
+ if (align_splitoff)
+ drm_mm_put_block(align_splitoff);
+
+ return node;
+}
+
+EXPORT_SYMBOL(drm_mm_get_block);
+
+struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment)
{
struct drm_mm_node *align_splitoff = NULL;
@@ -151,8 +233,9 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
tmp = parent->start % alignment;
if (tmp) {
- align_splitoff = drm_mm_split_at_start(parent, alignment - tmp);
- if (!align_splitoff)
+ align_splitoff =
+ drm_mm_split_at_start(parent, alignment - tmp, 1);
+ if (unlikely(align_splitoff == NULL))
return NULL;
}
@@ -161,7 +244,7 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
parent->free = 0;
return parent;
} else {
- child = drm_mm_split_at_start(parent, size);
+ child = drm_mm_split_at_start(parent, size, 1);
}
if (align_splitoff)
@@ -169,14 +252,14 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
return child;
}
-EXPORT_SYMBOL(drm_mm_get_block);
+EXPORT_SYMBOL(drm_mm_get_block_atomic);
/*
* Put a block. Merge with the previous and / or next block if they are free.
* Otherwise add to the free stack.
*/
-void drm_mm_put_block(struct drm_mm_node * cur)
+void drm_mm_put_block(struct drm_mm_node *cur)
{
struct drm_mm *mm = cur->mm;
@@ -188,21 +271,27 @@ void drm_mm_put_block(struct drm_mm_node * cur)
int merged = 0;
if (cur_head->prev != root_head) {
- prev_node = list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
+ prev_node =
+ list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
if (prev_node->free) {
prev_node->size += cur->size;
merged = 1;
}
}
if (cur_head->next != root_head) {
- next_node = list_entry(cur_head->next, struct drm_mm_node, ml_entry);
+ next_node =
+ list_entry(cur_head->next, struct drm_mm_node, ml_entry);
if (next_node->free) {
if (merged) {
prev_node->size += next_node->size;
list_del(&next_node->ml_entry);
list_del(&next_node->fl_entry);
- drm_free(next_node, sizeof(*next_node),
- DRM_MEM_MM);
+ if (mm->num_unused < MM_UNUSED_TARGET) {
+ list_add(&next_node->fl_entry,
+ &mm->unused_nodes);
+ ++mm->num_unused;
+ } else
+ kfree(next_node);
} else {
next_node->size += cur->size;
next_node->start = cur->start;
@@ -215,14 +304,19 @@ void drm_mm_put_block(struct drm_mm_node * cur)
list_add(&cur->fl_entry, &mm->fl_entry);
} else {
list_del(&cur->ml_entry);
- drm_free(cur, sizeof(*cur), DRM_MEM_MM);
+ if (mm->num_unused < MM_UNUSED_TARGET) {
+ list_add(&cur->fl_entry, &mm->unused_nodes);
+ ++mm->num_unused;
+ } else
+ kfree(cur);
}
}
+
EXPORT_SYMBOL(drm_mm_put_block);
-struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
- unsigned long size,
- unsigned alignment, int best_match)
+struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment, int best_match)
{
struct list_head *list;
const struct list_head *free_stack = &mm->fl_entry;
@@ -247,7 +341,6 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
wasted += alignment - tmp;
}
-
if (entry->size >= size + wasted) {
if (!best_match)
return entry;
@@ -260,6 +353,7 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
return best;
}
+EXPORT_SYMBOL(drm_mm_search_free);
int drm_mm_clean(struct drm_mm * mm)
{
@@ -267,14 +361,17 @@ int drm_mm_clean(struct drm_mm * mm)
return (head->next->next == head);
}
-EXPORT_SYMBOL(drm_mm_search_free);
+EXPORT_SYMBOL(drm_mm_clean);
int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
{
INIT_LIST_HEAD(&mm->ml_entry);
INIT_LIST_HEAD(&mm->fl_entry);
+ INIT_LIST_HEAD(&mm->unused_nodes);
+ mm->num_unused = 0;
+ spin_lock_init(&mm->unused_lock);
- return drm_mm_create_tail_node(mm, start, size);
+ return drm_mm_create_tail_node(mm, start, size, 0);
}
EXPORT_SYMBOL(drm_mm_init);
@@ -282,6 +379,7 @@ void drm_mm_takedown(struct drm_mm * mm)
{
struct list_head *bnode = mm->fl_entry.next;
struct drm_mm_node *entry;
+ struct drm_mm_node *next;
entry = list_entry(bnode, struct drm_mm_node, fl_entry);
@@ -293,7 +391,16 @@ void drm_mm_takedown(struct drm_mm * mm)
list_del(&entry->fl_entry);
list_del(&entry->ml_entry);
+ kfree(entry);
+
+ spin_lock(&mm->unused_lock);
+ list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) {
+ list_del(&entry->fl_entry);
+ kfree(entry);
+ --mm->num_unused;
+ }
+ spin_unlock(&mm->unused_lock);
- drm_free(entry, sizeof(*entry), DRM_MEM_MM);
+ BUG_ON(mm->num_unused != 0);
}
EXPORT_SYMBOL(drm_mm_takedown);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index c9b80fdd463..54f492a488a 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -38,6 +38,7 @@
#include "drm.h"
#include "drm_crtc.h"
+#define DRM_MODESET_DEBUG "drm_mode"
/**
* drm_mode_debug_printmodeline - debug print a mode
* @dev: DRM device
@@ -50,12 +51,13 @@
*/
void drm_mode_debug_printmodeline(struct drm_display_mode *mode)
{
- DRM_DEBUG("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n",
- mode->base.id, mode->name, mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start,
- mode->hsync_end, mode->htotal,
- mode->vdisplay, mode->vsync_start,
- mode->vsync_end, mode->vtotal, mode->type, mode->flags);
+ DRM_DEBUG_MODE(DRM_MODESET_DEBUG,
+ "Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n",
+ mode->base.id, mode->name, mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal, mode->type, mode->flags);
}
EXPORT_SYMBOL(drm_mode_debug_printmodeline);
@@ -401,7 +403,9 @@ void drm_mode_prune_invalid(struct drm_device *dev,
list_del(&mode->head);
if (verbose) {
drm_mode_debug_printmodeline(mode);
- DRM_DEBUG("Not using %s mode %d\n", mode->name, mode->status);
+ DRM_DEBUG_MODE(DRM_MODESET_DEBUG,
+ "Not using %s mode %d\n",
+ mode->name, mode->status);
}
drm_mode_destroy(dev, mode);
}
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index b9631e3a1ea..387a8de1bc7 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -51,7 +51,22 @@ struct idr drm_minors_idr;
struct class *drm_class;
struct proc_dir_entry *drm_proc_root;
struct dentry *drm_debugfs_root;
-
+void drm_ut_debug_printk(unsigned int request_level,
+ const char *prefix,
+ const char *function_name,
+ const char *format, ...)
+{
+ va_list args;
+
+ if (drm_debug & request_level) {
+ if (function_name)
+ printk(KERN_DEBUG "[%s:%s], ", prefix, function_name);
+ va_start(args, format);
+ vprintk(format, args);
+ va_end(args);
+ }
+}
+EXPORT_SYMBOL(drm_ut_debug_printk);
static int drm_minor_get_id(struct drm_device *dev, int type)
{
int new_id;
@@ -328,7 +343,7 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t
#if defined(CONFIG_DEBUG_FS)
ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
if (ret) {
- DRM_ERROR("DRM: Failed to initialize /debugfs/dri.\n");
+ DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
goto err_g2;
}
#endif
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 8f9372921f8..85ec31b3ff0 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -70,6 +70,11 @@ static ssize_t version_show(struct class *dev, char *buf)
CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
}
+static char *drm_nodename(struct device *dev)
+{
+ return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
+}
+
static CLASS_ATTR(version, S_IRUGO, version_show, NULL);
/**
@@ -101,6 +106,8 @@ struct class *drm_sysfs_create(struct module *owner, char *name)
if (err)
goto err_out_class;
+ class->nodename = drm_nodename;
+
return class;
err_out_class:
@@ -147,7 +154,7 @@ static ssize_t status_show(struct device *device,
enum drm_connector_status status;
status = connector->funcs->detect(connector);
- return snprintf(buf, PAGE_SIZE, "%s",
+ return snprintf(buf, PAGE_SIZE, "%s\n",
drm_get_connector_status_name(status));
}
@@ -166,7 +173,7 @@ static ssize_t dpms_show(struct device *device,
if (ret)
return 0;
- return snprintf(buf, PAGE_SIZE, "%s",
+ return snprintf(buf, PAGE_SIZE, "%s\n",
drm_get_dpms_name((int)dpms_status));
}
@@ -176,7 +183,7 @@ static ssize_t enabled_show(struct device *device,
{
struct drm_connector *connector = to_drm_connector(device);
- return snprintf(buf, PAGE_SIZE, connector->encoder ? "enabled" :
+ return snprintf(buf, PAGE_SIZE, "%s\n", connector->encoder ? "enabled" :
"disabled");
}
@@ -317,6 +324,7 @@ static struct device_attribute connector_attrs_opt1[] = {
static struct bin_attribute edid_attr = {
.attr.name = "edid",
+ .attr.mode = 0444,
.size = 128,
.read = edid_show,
};
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 53d54455262..1a60626f680 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -33,6 +33,8 @@
#include "i915_drm.h"
#include "i915_drv.h"
+#define I915_DRV "i915_drv"
+
/* Really want an OS-independent resettable timer. Would like to have
* this loop run for (eg) 3 sec, but have the timer reset every time
* the head pointer changes, so that EBUSY only happens if the ring
@@ -99,7 +101,7 @@ static int i915_init_phys_hws(struct drm_device *dev)
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
- DRM_DEBUG("Enabled hardware status page\n");
+ DRM_DEBUG_DRIVER(I915_DRV, "Enabled hardware status page\n");
return 0;
}
@@ -185,7 +187,8 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
master_priv->sarea_priv = (drm_i915_sarea_t *)
((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
} else {
- DRM_DEBUG("sarea not found assuming DRI2 userspace\n");
+ DRM_DEBUG_DRIVER(I915_DRV,
+ "sarea not found assuming DRI2 userspace\n");
}
if (init->ring_size != 0) {
@@ -235,7 +238,7 @@ static int i915_dma_resume(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- DRM_DEBUG("%s\n", __func__);
+ DRM_DEBUG_DRIVER(I915_DRV, "%s\n", __func__);
if (dev_priv->ring.map.handle == NULL) {
DRM_ERROR("can not ioremap virtual address for"
@@ -248,13 +251,14 @@ static int i915_dma_resume(struct drm_device * dev)
DRM_ERROR("Can not find hardware status page\n");
return -EINVAL;
}
- DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
+ DRM_DEBUG_DRIVER(I915_DRV, "hw status page @ %p\n",
+ dev_priv->hw_status_page);
if (dev_priv->status_gfx_addr != 0)
I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
else
I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
- DRM_DEBUG("Enabled hardware status page\n");
+ DRM_DEBUG_DRIVER(I915_DRV, "Enabled hardware status page\n");
return 0;
}
@@ -548,10 +552,10 @@ static int i915_dispatch_flip(struct drm_device * dev)
if (!master_priv->sarea_priv)
return -EINVAL;
- DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
- __func__,
- dev_priv->current_page,
- master_priv->sarea_priv->pf_current_page);
+ DRM_DEBUG_DRIVER(I915_DRV, "%s: page=%d pfCurrentPage=%d\n",
+ __func__,
+ dev_priv->current_page,
+ master_priv->sarea_priv->pf_current_page);
i915_kernel_lost_context(dev);
@@ -629,8 +633,9 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
return -EINVAL;
}
- DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
- batch->start, batch->used, batch->num_cliprects);
+ DRM_DEBUG_DRIVER(I915_DRV,
+ "i915 batchbuffer, start %x used %d cliprects %d\n",
+ batch->start, batch->used, batch->num_cliprects);
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
@@ -678,8 +683,9 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
void *batch_data;
int ret;
- DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
- cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
+ DRM_DEBUG_DRIVER(I915_DRV,
+ "i915 cmdbuffer, buf %p sz %d cliprects %d\n",
+ cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
@@ -734,7 +740,7 @@ static int i915_flip_bufs(struct drm_device *dev, void *data,
{
int ret;
- DRM_DEBUG("%s\n", __func__);
+ DRM_DEBUG_DRIVER(I915_DRV, "%s\n", __func__);
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
@@ -777,7 +783,8 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
break;
default:
- DRM_DEBUG("Unknown parameter %d\n", param->param);
+ DRM_DEBUG_DRIVER(I915_DRV, "Unknown parameter %d\n",
+ param->param);
return -EINVAL;
}
@@ -817,7 +824,8 @@ static int i915_setparam(struct drm_device *dev, void *data,
dev_priv->fence_reg_start = param->value;
break;
default:
- DRM_DEBUG("unknown parameter %d\n", param->param);
+ DRM_DEBUG_DRIVER(I915_DRV, "unknown parameter %d\n",
+ param->param);
return -EINVAL;
}
@@ -865,9 +873,10 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
- DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n",
- dev_priv->status_gfx_addr);
- DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
+ DRM_DEBUG_DRIVER(I915_DRV, "load hws HWS_PGA with gfx mem 0x%x\n",
+ dev_priv->status_gfx_addr);
+ DRM_DEBUG_DRIVER(I915_DRV, "load hws at %p\n",
+ dev_priv->hw_status_page);
return 0;
}
@@ -922,7 +931,7 @@ static int i915_probe_agp(struct drm_device *dev, unsigned long *aperture_size,
* Some of the preallocated space is taken by the GTT
* and popup. GTT is 1K per MB of aperture size, and popup is 4K.
*/
- if (IS_G4X(dev) || IS_IGD(dev))
+ if (IS_G4X(dev) || IS_IGD(dev) || IS_IGDNG(dev))
overhead = 4096;
else
overhead = (*aperture_size / 1024) + 4096;
@@ -987,12 +996,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
int fb_bar = IS_I9XX(dev) ? 2 : 0;
int ret = 0;
- dev->devname = kstrdup(DRIVER_NAME, GFP_KERNEL);
- if (!dev->devname) {
- ret = -ENOMEM;
- goto out;
- }
-
dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) &
0xff000000;
@@ -1006,7 +1009,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
ret = i915_probe_agp(dev, &agp_size, &prealloc_size);
if (ret)
- goto kfree_devname;
+ goto out;
/* Basic memrange allocator for stolen space (aka vram) */
drm_mm_init(&dev_priv->vram, 0, prealloc_size);
@@ -1024,7 +1027,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
ret = i915_gem_init_ringbuffer(dev);
if (ret)
- goto kfree_devname;
+ goto out;
/* Allow hardware batchbuffers unless told otherwise.
*/
@@ -1056,8 +1059,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
destroy_ringbuffer:
i915_gem_cleanup_ringbuffer(dev);
-kfree_devname:
- kfree(dev->devname);
out:
return ret;
}
@@ -1161,8 +1162,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
#endif
dev->driver->get_vblank_counter = i915_get_vblank_counter;
- if (IS_GM45(dev))
+ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+ if (IS_G4X(dev) || IS_IGDNG(dev)) {
+ dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
dev->driver->get_vblank_counter = gm45_get_vblank_counter;
+ }
i915_gem_load(dev);
@@ -1206,7 +1210,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
}
/* Must be done after probing outputs */
- intel_opregion_init(dev, 0);
+ /* FIXME: verify on IGDNG */
+ if (!IS_IGDNG(dev))
+ intel_opregion_init(dev, 0);
return 0;
@@ -1240,7 +1246,8 @@ int i915_driver_unload(struct drm_device *dev)
if (dev_priv->regs != NULL)
iounmap(dev_priv->regs);
- intel_opregion_free(dev, 0);
+ if (!IS_IGDNG(dev))
+ intel_opregion_free(dev, 0);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_modeset_cleanup(dev);
@@ -1264,7 +1271,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_i915_file_private *i915_file_priv;
- DRM_DEBUG("\n");
+ DRM_DEBUG_DRIVER(I915_DRV, "\n");
i915_file_priv = (struct drm_i915_file_private *)
drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES);
@@ -1273,8 +1280,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
file_priv->driver_priv = i915_file_priv;
- i915_file_priv->mm.last_gem_seqno = 0;
- i915_file_priv->mm.last_gem_throttle_seqno = 0;
+ INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
return 0;
}
@@ -1311,6 +1317,7 @@ void i915_driver_lastclose(struct drm_device * dev)
void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ i915_gem_release(dev, file_priv);
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_mem_release(dev, file_priv, dev_priv->agp_heap);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9b149fe824c..8ef6bcec211 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -126,6 +126,13 @@ struct drm_i915_fence_reg {
struct drm_gem_object *obj;
};
+struct sdvo_device_mapping {
+ u8 dvo_port;
+ u8 slave_addr;
+ u8 dvo_wiring;
+ u8 initialized;
+};
+
typedef struct drm_i915_private {
struct drm_device *dev;
@@ -143,6 +150,8 @@ typedef struct drm_i915_private {
drm_local_map_t hws_map;
struct drm_gem_object *hws_obj;
+ struct resource mch_res;
+
unsigned int cpp;
int back_offset;
int front_offset;
@@ -158,6 +167,11 @@ typedef struct drm_i915_private {
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 irq_mask_reg;
u32 pipestat[2];
+ /** splitted irq regs for graphics and display engine on IGDNG,
+ irq_mask_reg is still used for display irq. */
+ u32 gt_irq_mask_reg;
+ u32 gt_irq_enable_reg;
+ u32 de_irq_enable_reg;
u32 hotplug_supported_mask;
struct work_struct hotplug_work;
@@ -180,7 +194,8 @@ typedef struct drm_i915_private {
int backlight_duty_cycle; /* restore backlight to this value */
bool panel_wants_dither;
struct drm_display_mode *panel_fixed_mode;
- struct drm_display_mode *vbt_mode; /* if any */
+ struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
+ struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
/* Feature bits from the VBIOS */
unsigned int int_tv_support:1;
@@ -284,6 +299,13 @@ typedef struct drm_i915_private {
u8 saveDACMASK;
u8 saveCR[37];
uint64_t saveFENCE[16];
+ u32 saveCURACNTR;
+ u32 saveCURAPOS;
+ u32 saveCURABASE;
+ u32 saveCURBCNTR;
+ u32 saveCURBPOS;
+ u32 saveCURBBASE;
+ u32 saveCURSIZE;
struct {
struct drm_mm gtt_space;
@@ -381,6 +403,7 @@ typedef struct drm_i915_private {
/* storage for physical objects */
struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
} mm;
+ struct sdvo_device_mapping sdvo_mappings[2];
} drm_i915_private_t;
/** driver private structure attached to each drm_gem_object */
@@ -490,13 +513,16 @@ struct drm_i915_gem_request {
/** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
+ /** global list entry for this request */
struct list_head list;
+
+ /** file_priv list entry for this request */
+ struct list_head client_list;
};
struct drm_i915_file_private {
struct {
- uint32_t last_gem_seqno;
- uint32_t last_gem_throttle_seqno;
+ struct list_head request_list;
} mm;
};
@@ -641,6 +667,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
void i915_gem_free_all_phys_object(struct drm_device *dev);
int i915_gem_object_get_pages(struct drm_gem_object *obj);
void i915_gem_object_put_pages(struct drm_gem_object *obj);
+void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
/* i915_gem_tiling.c */
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
@@ -784,7 +811,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
(dev)->pci_device == 0x2E02 || \
(dev)->pci_device == 0x2E12 || \
(dev)->pci_device == 0x2E22 || \
- (dev)->pci_device == 0x2E32)
+ (dev)->pci_device == 0x2E32 || \
+ (dev)->pci_device == 0x0042 || \
+ (dev)->pci_device == 0x0046)
#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02 || \
(dev)->pci_device == 0x2A12)
@@ -806,20 +835,26 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
(dev)->pci_device == 0x29D2 || \
(IS_IGD(dev)))
+#define IS_IGDNG_D(dev) ((dev)->pci_device == 0x0042)
+#define IS_IGDNG_M(dev) ((dev)->pci_device == 0x0046)
+#define IS_IGDNG(dev) (IS_IGDNG_D(dev) || IS_IGDNG_M(dev))
+
#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
- IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
+ IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \
+ IS_IGDNG(dev))
#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
- IS_IGD(dev))
+ IS_IGD(dev) || IS_IGDNG_M(dev))
-#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev))
+#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \
+ IS_IGDNG(dev))
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
*/
#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
IS_I915GM(dev)))
-#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev))
+#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev))
#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev))
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b189b49c760..c0ae6bbbd9b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -349,7 +349,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
num_pages = last_data_page - first_data_page + 1;
- user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
+ user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
if (user_pages == NULL)
return -ENOMEM;
@@ -429,7 +429,7 @@ fail_put_user_pages:
SetPageDirty(user_pages[i]);
page_cache_release(user_pages[i]);
}
- kfree(user_pages);
+ drm_free_large(user_pages);
return ret;
}
@@ -649,7 +649,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
num_pages = last_data_page - first_data_page + 1;
- user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
+ user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
if (user_pages == NULL)
return -ENOMEM;
@@ -719,7 +719,7 @@ out_unlock:
out_unpin_pages:
for (i = 0; i < pinned_pages; i++)
page_cache_release(user_pages[i]);
- kfree(user_pages);
+ drm_free_large(user_pages);
return ret;
}
@@ -824,7 +824,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
num_pages = last_data_page - first_data_page + 1;
- user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
+ user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
if (user_pages == NULL)
return -ENOMEM;
@@ -902,7 +902,7 @@ fail_unlock:
fail_put_user_pages:
for (i = 0; i < pinned_pages; i++)
page_cache_release(user_pages[i]);
- kfree(user_pages);
+ drm_free_large(user_pages);
return ret;
}
@@ -989,10 +989,10 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
return -ENODEV;
/* Only handle setting domains to types used by the CPU. */
- if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+ if (write_domain & I915_GEM_GPU_DOMAINS)
return -EINVAL;
- if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+ if (read_domains & I915_GEM_GPU_DOMAINS)
return -EINVAL;
/* Having something in the write domain implies it's in the read
@@ -1145,7 +1145,14 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
mutex_unlock(&dev->struct_mutex);
return VM_FAULT_SIGBUS;
}
- list_add(&obj_priv->list, &dev_priv->mm.inactive_list);
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, write);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return VM_FAULT_SIGBUS;
+ }
+
+ list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
}
/* Need a new fence register? */
@@ -1375,7 +1382,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
mutex_unlock(&dev->struct_mutex);
return ret;
}
- list_add(&obj_priv->list, &dev_priv->mm.inactive_list);
+ list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
}
drm_gem_object_unreference(obj);
@@ -1408,9 +1415,7 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
}
obj_priv->dirty = 0;
- drm_free(obj_priv->pages,
- page_count * sizeof(struct page *),
- DRM_MEM_DRIVER);
+ drm_free_large(obj_priv->pages);
obj_priv->pages = NULL;
}
@@ -1476,14 +1481,19 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
* Returned sequence numbers are nonzero on success.
*/
static uint32_t
-i915_add_request(struct drm_device *dev, uint32_t flush_domains)
+i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
+ uint32_t flush_domains)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_file_private *i915_file_priv = NULL;
struct drm_i915_gem_request *request;
uint32_t seqno;
int was_empty;
RING_LOCALS;
+ if (file_priv != NULL)
+ i915_file_priv = file_priv->driver_priv;
+
request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
if (request == NULL)
return 0;
@@ -1510,6 +1520,12 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains)
request->emitted_jiffies = jiffies;
was_empty = list_empty(&dev_priv->mm.request_list);
list_add_tail(&request->list, &dev_priv->mm.request_list);
+ if (i915_file_priv) {
+ list_add_tail(&request->client_list,
+ &i915_file_priv->mm.request_list);
+ } else {
+ INIT_LIST_HEAD(&request->client_list);
+ }
/* Associate any objects on the flushing list matching the write
* domain we're flushing with our flush.
@@ -1659,6 +1675,7 @@ i915_gem_retire_requests(struct drm_device *dev)
i915_gem_retire_request(dev, request);
list_del(&request->list);
+ list_del(&request->client_list);
drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
} else
break;
@@ -1697,7 +1714,10 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
BUG_ON(seqno == 0);
if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
- ier = I915_READ(IER);
+ if (IS_IGDNG(dev))
+ ier = I915_READ(DEIER) | I915_READ(GTIER);
+ else
+ ier = I915_READ(IER);
if (!ier) {
DRM_ERROR("something (likely vbetool) disabled "
"interrupts, re-enabling\n");
@@ -1749,8 +1769,7 @@ i915_gem_flush(struct drm_device *dev,
if (flush_domains & I915_GEM_DOMAIN_CPU)
drm_agp_chipset_flush(dev);
- if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
- I915_GEM_DOMAIN_GTT)) {
+ if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
/*
* read/write caches:
*
@@ -1972,7 +1991,7 @@ i915_gem_evict_something(struct drm_device *dev)
i915_gem_flush(dev,
obj->write_domain,
obj->write_domain);
- i915_add_request(dev, obj->write_domain);
+ i915_add_request(dev, NULL, obj->write_domain);
obj = NULL;
continue;
@@ -1986,7 +2005,7 @@ i915_gem_evict_something(struct drm_device *dev)
/* If we didn't do any of the above, there's nothing to be done
* and we just can't fit it in.
*/
- return -ENOMEM;
+ return -ENOSPC;
}
return ret;
}
@@ -2001,7 +2020,7 @@ i915_gem_evict_everything(struct drm_device *dev)
if (ret != 0)
break;
}
- if (ret == -ENOMEM)
+ if (ret == -ENOSPC)
return 0;
return ret;
}
@@ -2024,8 +2043,7 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
*/
page_count = obj->size / PAGE_SIZE;
BUG_ON(obj_priv->pages != NULL);
- obj_priv->pages = drm_calloc(page_count, sizeof(struct page *),
- DRM_MEM_DRIVER);
+ obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
if (obj_priv->pages == NULL) {
DRM_ERROR("Faled to allocate page list\n");
obj_priv->pages_refcount--;
@@ -2131,8 +2149,10 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
return;
}
- pitch_val = (obj_priv->stride / 128) - 1;
- WARN_ON(pitch_val & ~0x0000000f);
+ pitch_val = obj_priv->stride / 128;
+ pitch_val = ffs(pitch_val) - 1;
+ WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
+
val = obj_priv->gtt_offset;
if (obj_priv->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
@@ -2209,7 +2229,7 @@ try_again:
loff_t offset;
if (avail == 0)
- return -ENOMEM;
+ return -ENOSPC;
for (i = dev_priv->fence_reg_start;
i < dev_priv->num_fence_regs; i++) {
@@ -2242,7 +2262,7 @@ try_again:
i915_gem_flush(dev,
I915_GEM_GPU_DOMAINS,
I915_GEM_GPU_DOMAINS);
- seqno = i915_add_request(dev,
+ seqno = i915_add_request(dev, NULL,
I915_GEM_GPU_DOMAINS);
if (seqno == 0)
return -ENOMEM;
@@ -2254,9 +2274,6 @@ try_again:
goto try_again;
}
- BUG_ON(old_obj_priv->active ||
- (reg->obj->write_domain & I915_GEM_GPU_DOMAINS));
-
/*
* Zap this virtual mapping so we can set up a fence again
* for this object next time we need it.
@@ -2361,7 +2378,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
spin_unlock(&dev_priv->mm.active_list_lock);
if (lists_empty) {
DRM_ERROR("GTT full, but LRU list empty\n");
- return -ENOMEM;
+ return -ENOSPC;
}
ret = i915_gem_evict_something(dev);
@@ -2406,8 +2423,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
* wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache
*/
- BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
- BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
+ BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
+ BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
return 0;
}
@@ -2424,6 +2441,16 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
if (obj_priv->pages == NULL)
return;
+ /* XXX: The 865 in particular appears to be weird in how it handles
+ * cache flushing. We haven't figured it out, but the
+ * clflush+agp_chipset_flush doesn't appear to successfully get the
+ * data visible to the PGU, while wbinvd + agp_chipset_flush does.
+ */
+ if (IS_I865G(obj->dev)) {
+ wbinvd();
+ return;
+ }
+
drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
}
@@ -2439,7 +2466,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
/* Queue the GPU write cache flushing we need. */
i915_gem_flush(dev, 0, obj->write_domain);
- seqno = i915_add_request(dev, obj->write_domain);
+ seqno = i915_add_request(dev, NULL, obj->write_domain);
obj->write_domain = 0;
i915_gem_object_move_to_active(obj, seqno);
}
@@ -3022,20 +3049,12 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
drm_i915_private_t *dev_priv = dev->dev_private;
int nbox = exec->num_cliprects;
int i = 0, count;
- uint32_t exec_start, exec_len;
+ uint32_t exec_start, exec_len;
RING_LOCALS;
exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
exec_len = (uint32_t) exec->batch_len;
- if ((exec_start | exec_len) & 0x7) {
- DRM_ERROR("alignment\n");
- return -EINVAL;
- }
-
- if (!exec_start)
- return -EINVAL;
-
count = nbox ? nbox : 1;
for (i = 0; i < count; i++) {
@@ -3076,6 +3095,10 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
/* Throttle our rendering by waiting until the ring has completed our requests
* emitted over 20 msec ago.
*
+ * Note that if we were to use the current jiffies each time around the loop,
+ * we wouldn't escape the function with any frames outstanding if the time to
+ * render a frame was over 20ms.
+ *
* This should get us reasonable parallelism between CPU and GPU but also
* relatively low latency when blocking on a particular request to finish.
*/
@@ -3084,15 +3107,25 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
int ret = 0;
- uint32_t seqno;
+ unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
mutex_lock(&dev->struct_mutex);
- seqno = i915_file_priv->mm.last_gem_throttle_seqno;
- i915_file_priv->mm.last_gem_throttle_seqno =
- i915_file_priv->mm.last_gem_seqno;
- if (seqno)
- ret = i915_wait_request(dev, seqno);
+ while (!list_empty(&i915_file_priv->mm.request_list)) {
+ struct drm_i915_gem_request *request;
+
+ request = list_first_entry(&i915_file_priv->mm.request_list,
+ struct drm_i915_gem_request,
+ client_list);
+
+ if (time_after_eq(request->emitted_jiffies, recent_enough))
+ break;
+
+ ret = i915_wait_request(dev, request->seqno);
+ if (ret != 0)
+ break;
+ }
mutex_unlock(&dev->struct_mutex);
+
return ret;
}
@@ -3111,7 +3144,7 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
reloc_count += exec_list[i].relocation_count;
}
- *relocs = drm_calloc(reloc_count, sizeof(**relocs), DRM_MEM_DRIVER);
+ *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
if (*relocs == NULL)
return -ENOMEM;
@@ -3125,8 +3158,7 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
exec_list[i].relocation_count *
sizeof(**relocs));
if (ret != 0) {
- drm_free(*relocs, reloc_count * sizeof(**relocs),
- DRM_MEM_DRIVER);
+ drm_free_large(*relocs);
*relocs = NULL;
return -EFAULT;
}
@@ -3165,17 +3197,34 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
}
err:
- drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER);
+ drm_free_large(relocs);
return ret;
}
+static int
+i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
+ uint64_t exec_offset)
+{
+ uint32_t exec_start, exec_len;
+
+ exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+ exec_len = (uint32_t) exec->batch_len;
+
+ if ((exec_start | exec_len) & 0x7)
+ return -EINVAL;
+
+ if (!exec_start)
+ return -EINVAL;
+
+ return 0;
+}
+
int
i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
struct drm_i915_gem_execbuffer *args = data;
struct drm_i915_gem_exec_object *exec_list = NULL;
struct drm_gem_object **object_list = NULL;
@@ -3198,10 +3247,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
return -EINVAL;
}
/* Copy in the exec list from userland */
- exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
- DRM_MEM_DRIVER);
- object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
- DRM_MEM_DRIVER);
+ exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count);
+ object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count);
if (exec_list == NULL || object_list == NULL) {
DRM_ERROR("Failed to allocate exec or object list "
"for %d buffers\n",
@@ -3302,7 +3349,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
break;
/* error other than GTT full, or we've already tried again */
- if (ret != -ENOMEM || pin_tries >= 1) {
+ if (ret != -ENOSPC || pin_tries >= 1) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to pin buffers %d\n", ret);
goto err;
@@ -3321,8 +3368,20 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
/* Set the pending read domains for the batch buffer to COMMAND */
batch_obj = object_list[args->buffer_count-1];
- batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
- batch_obj->pending_write_domain = 0;
+ if (batch_obj->pending_write_domain) {
+ DRM_ERROR("Attempting to use self-modifying batch buffer\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+
+ /* Sanity check the batch buffer, prior to moving objects */
+ exec_offset = exec_list[args->buffer_count - 1].offset;
+ ret = i915_gem_check_execbuffer (args, exec_offset);
+ if (ret != 0) {
+ DRM_ERROR("execbuf with invalid offset/length\n");
+ goto err;
+ }
i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -3353,7 +3412,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
dev->invalidate_domains,
dev->flush_domains);
if (dev->flush_domains)
- (void)i915_add_request(dev, dev->flush_domains);
+ (void)i915_add_request(dev, file_priv,
+ dev->flush_domains);
}
for (i = 0; i < args->buffer_count; i++) {
@@ -3371,8 +3431,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
}
#endif
- exec_offset = exec_list[args->buffer_count - 1].offset;
-
#if WATCH_EXEC
i915_gem_dump_object(batch_obj,
args->batch_len,
@@ -3402,9 +3460,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
* *some* interrupts representing completion of buffers that we can
* wait on when trying to clear up gtt space).
*/
- seqno = i915_add_request(dev, flush_domains);
+ seqno = i915_add_request(dev, file_priv, flush_domains);
BUG_ON(seqno == 0);
- i915_file_priv->mm.last_gem_seqno = seqno;
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
@@ -3462,10 +3519,8 @@ err:
}
pre_mutex_err:
- drm_free(object_list, sizeof(*object_list) * args->buffer_count,
- DRM_MEM_DRIVER);
- drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
- DRM_MEM_DRIVER);
+ drm_free_large(object_list);
+ drm_free_large(exec_list);
drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects,
DRM_MEM_DRIVER);
@@ -3512,8 +3567,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
atomic_inc(&dev->pin_count);
atomic_add(obj->size, &dev->pin_memory);
if (!obj_priv->active &&
- (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
- I915_GEM_DOMAIN_GTT)) == 0 &&
+ (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
!list_empty(&obj_priv->list))
list_del_init(&obj_priv->list);
}
@@ -3540,8 +3594,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
*/
if (obj_priv->pin_count == 0) {
if (!obj_priv->active &&
- (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
- I915_GEM_DOMAIN_GTT)) == 0)
+ (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
list_move_tail(&obj_priv->list,
&dev_priv->mm.inactive_list);
atomic_dec(&dev->pin_count);
@@ -3645,15 +3698,14 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
- mutex_lock(&dev->struct_mutex);
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL) {
DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
args->handle);
- mutex_unlock(&dev->struct_mutex);
return -EBADF;
}
+ mutex_lock(&dev->struct_mutex);
/* Update the active list for the hardware's current position.
* Otherwise this only updates on a delayed timer or when irqs are
* actually unmasked, and our working set ends up being larger than
@@ -3792,9 +3844,8 @@ i915_gem_idle(struct drm_device *dev)
/* Flush the GPU along with all non-CPU write domains
*/
- i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
- ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
- seqno = i915_add_request(dev, ~I915_GEM_DOMAIN_CPU);
+ i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
if (seqno == 0) {
mutex_unlock(&dev->struct_mutex);
@@ -4344,3 +4395,17 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
drm_agp_chipset_flush(dev);
return 0;
}
+
+void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
+{
+ struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+
+ /* Clean up our request list when the client is going away, so that
+ * later retire_requests won't dereference our soon-to-be-gone
+ * file_priv.
+ */
+ mutex_lock(&dev->struct_mutex);
+ while (!list_empty(&i915_file_priv->mm.request_list))
+ list_del_init(i915_file_priv->mm.request_list.next);
+ mutex_unlock(&dev->struct_mutex);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 52a059354e8..9a05cadaa4a 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -25,6 +25,8 @@
*
*/
+#include <linux/acpi.h>
+#include <linux/pnp.h>
#include "linux/string.h"
#include "linux/bitops.h"
#include "drmP.h"
@@ -81,6 +83,143 @@
* to match what the GPU expects.
*/
+#define MCHBAR_I915 0x44
+#define MCHBAR_I965 0x48
+#define MCHBAR_SIZE (4*4096)
+
+#define DEVEN_REG 0x54
+#define DEVEN_MCHBAR_EN (1 << 28)
+
+/* Allocate space for the MCH regs if needed, return nonzero on error */
+static int
+intel_alloc_mchbar_resource(struct drm_device *dev)
+{
+ struct pci_dev *bridge_dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
+ u32 temp_lo, temp_hi = 0;
+ u64 mchbar_addr;
+ int ret = 0;
+
+ bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
+ if (!bridge_dev) {
+ DRM_DEBUG("no bridge dev?!\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (IS_I965G(dev))
+ pci_read_config_dword(bridge_dev, reg + 4, &temp_hi);
+ pci_read_config_dword(bridge_dev, reg, &temp_lo);
+ mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
+
+ /* If ACPI doesn't have it, assume we need to allocate it ourselves */
+ if (mchbar_addr &&
+ pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
+ ret = 0;
+ goto out_put;
+ }
+
+ /* Get some space for it */
+ ret = pci_bus_alloc_resource(bridge_dev->bus, &dev_priv->mch_res,
+ MCHBAR_SIZE, MCHBAR_SIZE,
+ PCIBIOS_MIN_MEM,
+ 0, pcibios_align_resource,
+ bridge_dev);
+ if (ret) {
+ DRM_DEBUG("failed bus alloc: %d\n", ret);
+ dev_priv->mch_res.start = 0;
+ goto out_put;
+ }
+
+ if (IS_I965G(dev))
+ pci_write_config_dword(bridge_dev, reg + 4,
+ upper_32_bits(dev_priv->mch_res.start));
+
+ pci_write_config_dword(bridge_dev, reg,
+ lower_32_bits(dev_priv->mch_res.start));
+out_put:
+ pci_dev_put(bridge_dev);
+out:
+ return ret;
+}
+
+/* Setup MCHBAR if possible, return true if we should disable it again */
+static bool
+intel_setup_mchbar(struct drm_device *dev)
+{
+ struct pci_dev *bridge_dev;
+ int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
+ u32 temp;
+ bool need_disable = false, enabled;
+
+ bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
+ if (!bridge_dev) {
+ DRM_DEBUG("no bridge dev?!\n");
+ goto out;
+ }
+
+ if (IS_I915G(dev) || IS_I915GM(dev)) {
+ pci_read_config_dword(bridge_dev, DEVEN_REG, &temp);
+ enabled = !!(temp & DEVEN_MCHBAR_EN);
+ } else {
+ pci_read_config_dword(bridge_dev, mchbar_reg, &temp);
+ enabled = temp & 1;
+ }
+
+ /* If it's already enabled, don't have to do anything */
+ if (enabled)
+ goto out_put;
+
+ if (intel_alloc_mchbar_resource(dev))
+ goto out_put;
+
+ need_disable = true;
+
+ /* Space is allocated or reserved, so enable it. */
+ if (IS_I915G(dev) || IS_I915GM(dev)) {
+ pci_write_config_dword(bridge_dev, DEVEN_REG,
+ temp | DEVEN_MCHBAR_EN);
+ } else {
+ pci_read_config_dword(bridge_dev, mchbar_reg, &temp);
+ pci_write_config_dword(bridge_dev, mchbar_reg, temp | 1);
+ }
+out_put:
+ pci_dev_put(bridge_dev);
+out:
+ return need_disable;
+}
+
+static void
+intel_teardown_mchbar(struct drm_device *dev, bool disable)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct pci_dev *bridge_dev;
+ int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
+ u32 temp;
+
+ bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
+ if (!bridge_dev) {
+ DRM_DEBUG("no bridge dev?!\n");
+ return;
+ }
+
+ if (disable) {
+ if (IS_I915G(dev) || IS_I915GM(dev)) {
+ pci_read_config_dword(bridge_dev, DEVEN_REG, &temp);
+ temp &= ~DEVEN_MCHBAR_EN;
+ pci_write_config_dword(bridge_dev, DEVEN_REG, temp);
+ } else {
+ pci_read_config_dword(bridge_dev, mchbar_reg, &temp);
+ temp &= ~1;
+ pci_write_config_dword(bridge_dev, mchbar_reg, temp);
+ }
+ }
+
+ if (dev_priv->mch_res.start)
+ release_resource(&dev_priv->mch_res);
+}
+
/**
* Detects bit 6 swizzling of address lookup between IGD access and CPU
* access through main memory.
@@ -91,6 +230,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+ bool need_disable;
if (!IS_I9XX(dev)) {
/* As far as we know, the 865 doesn't have these bit 6
@@ -101,6 +241,9 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
} else if (IS_MOBILE(dev)) {
uint32_t dcc;
+ /* Try to make sure MCHBAR is enabled before poking at it */
+ need_disable = intel_setup_mchbar(dev);
+
/* On mobile 9xx chipsets, channel interleave by the CPU is
* determined by DCC. For single-channel, neither the CPU
* nor the GPU do swizzling. For dual channel interleaved,
@@ -140,6 +283,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
}
+
+ intel_teardown_mchbar(dev, need_disable);
} else {
/* The 965, G33, and newer, have a very flexible memory
* configuration. It will enable dual-channel mode
@@ -170,6 +315,13 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
}
}
+ /* FIXME: check with memory config on IGDNG */
+ if (IS_IGDNG(dev)) {
+ DRM_ERROR("disable tiling on IGDNG...\n");
+ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+ swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+ }
+
dev_priv->mm.bit_6_swizzle_x = swizzle_x;
dev_priv->mm.bit_6_swizzle_y = swizzle_y;
}
@@ -213,7 +365,8 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
if (tiling_mode == I915_TILING_NONE)
return true;
- if (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
+ if (!IS_I9XX(dev) ||
+ (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
tile_width = 128;
else
tile_width = 512;
@@ -225,11 +378,18 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
return false;
} else if (IS_I9XX(dev)) {
- if (stride / tile_width > I830_FENCE_MAX_PITCH_VAL ||
+ uint32_t pitch_val = ffs(stride / tile_width) - 1;
+
+ /* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB)
+ * instead of 4 (2KB) on 945s.
+ */
+ if (pitch_val > I915_FENCE_MAX_PITCH_VAL ||
size > (I830_FENCE_MAX_SIZE_VAL << 20))
return false;
} else {
- if (stride / 128 > I830_FENCE_MAX_PITCH_VAL ||
+ uint32_t pitch_val = ffs(stride / tile_width) - 1;
+
+ if (pitch_val > I830_FENCE_MAX_PITCH_VAL ||
size > (I830_FENCE_MAX_SIZE_VAL << 19))
return false;
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 98bb4c878c4..b86b7b7130c 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -58,6 +58,47 @@
DRM_I915_VBLANK_PIPE_B)
void
+igdng_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+ if ((dev_priv->gt_irq_mask_reg & mask) != 0) {
+ dev_priv->gt_irq_mask_reg &= ~mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
+ (void) I915_READ(GTIMR);
+ }
+}
+
+static inline void
+igdng_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+ if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
+ dev_priv->gt_irq_mask_reg |= mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
+ (void) I915_READ(GTIMR);
+ }
+}
+
+/* For display hotplug interrupt */
+void
+igdng_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+ if ((dev_priv->irq_mask_reg & mask) != 0) {
+ dev_priv->irq_mask_reg &= ~mask;
+ I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
+ (void) I915_READ(DEIMR);
+ }
+}
+
+static inline void
+igdng_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+ if ((dev_priv->irq_mask_reg & mask) != mask) {
+ dev_priv->irq_mask_reg |= mask;
+ I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
+ (void) I915_READ(DEIMR);
+ }
+}
+
+void
i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
{
if ((dev_priv->irq_mask_reg & mask) != 0) {
@@ -196,6 +237,47 @@ static void i915_hotplug_work_func(struct work_struct *work)
drm_sysfs_hotplug_event(dev);
}
+irqreturn_t igdng_irq_handler(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int ret = IRQ_NONE;
+ u32 de_iir, gt_iir;
+ u32 new_de_iir, new_gt_iir;
+ struct drm_i915_master_private *master_priv;
+
+ de_iir = I915_READ(DEIIR);
+ gt_iir = I915_READ(GTIIR);
+
+ for (;;) {
+ if (de_iir == 0 && gt_iir == 0)
+ break;
+
+ ret = IRQ_HANDLED;
+
+ I915_WRITE(DEIIR, de_iir);
+ new_de_iir = I915_READ(DEIIR);
+ I915_WRITE(GTIIR, gt_iir);
+ new_gt_iir = I915_READ(GTIIR);
+
+ if (dev->primary->master) {
+ master_priv = dev->primary->master->driver_priv;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->last_dispatch =
+ READ_BREADCRUMB(dev_priv);
+ }
+
+ if (gt_iir & GT_USER_INTERRUPT) {
+ dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
+ DRM_WAKEUP(&dev_priv->irq_queue);
+ }
+
+ de_iir = new_de_iir;
+ gt_iir = new_gt_iir;
+ }
+
+ return ret;
+}
+
irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
@@ -212,6 +294,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
atomic_inc(&dev_priv->irq_received);
+ if (IS_IGDNG(dev))
+ return igdng_irq_handler(dev);
+
iir = I915_READ(IIR);
if (IS_I965G(dev)) {
@@ -349,8 +434,12 @@ void i915_user_irq_get(struct drm_device *dev)
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1))
- i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
+ if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
+ if (IS_IGDNG(dev))
+ igdng_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
+ else
+ i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
+ }
spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
}
@@ -361,8 +450,12 @@ void i915_user_irq_put(struct drm_device *dev)
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
- if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0))
- i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
+ if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
+ if (IS_IGDNG(dev))
+ igdng_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
+ else
+ i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
+ }
spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
}
@@ -455,6 +548,9 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
if (!(pipeconf & PIPEACONF_ENABLE))
return -EINVAL;
+ if (IS_IGDNG(dev))
+ return 0;
+
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
if (IS_I965G(dev))
i915_enable_pipestat(dev_priv, pipe,
@@ -474,6 +570,9 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
+ if (IS_IGDNG(dev))
+ return;
+
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
i915_disable_pipestat(dev_priv, pipe,
PIPE_VBLANK_INTERRUPT_ENABLE |
@@ -484,7 +583,9 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
void i915_enable_interrupt (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- opregion_enable_asle(dev);
+
+ if (!IS_IGDNG(dev))
+ opregion_enable_asle(dev);
dev_priv->irq_enabled = 1;
}
@@ -545,12 +646,65 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
/* drm_dma.h hooks
*/
+static void igdng_irq_preinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ I915_WRITE(HWSTAM, 0xeffe);
+
+ /* XXX hotplug from PCH */
+
+ I915_WRITE(DEIMR, 0xffffffff);
+ I915_WRITE(DEIER, 0x0);
+ (void) I915_READ(DEIER);
+
+ /* and GT */
+ I915_WRITE(GTIMR, 0xffffffff);
+ I915_WRITE(GTIER, 0x0);
+ (void) I915_READ(GTIER);
+}
+
+static int igdng_irq_postinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ /* enable kind of interrupts always enabled */
+ u32 display_mask = DE_MASTER_IRQ_CONTROL /*| DE_PCH_EVENT */;
+ u32 render_mask = GT_USER_INTERRUPT;
+
+ dev_priv->irq_mask_reg = ~display_mask;
+ dev_priv->de_irq_enable_reg = display_mask;
+
+ /* should always can generate irq */
+ I915_WRITE(DEIIR, I915_READ(DEIIR));
+ I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
+ I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
+ (void) I915_READ(DEIER);
+
+ /* user interrupt should be enabled, but masked initial */
+ dev_priv->gt_irq_mask_reg = 0xffffffff;
+ dev_priv->gt_irq_enable_reg = render_mask;
+
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
+ I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
+ (void) I915_READ(GTIER);
+
+ return 0;
+}
+
void i915_driver_irq_preinstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
atomic_set(&dev_priv->irq_received, 0);
+ INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
+
+ if (IS_IGDNG(dev)) {
+ igdng_irq_preinstall(dev);
+ return;
+ }
+
if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -562,7 +716,6 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
I915_WRITE(IMR, 0xffffffff);
I915_WRITE(IER, 0x0);
(void) I915_READ(IER);
- INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
}
int i915_driver_irq_postinstall(struct drm_device *dev)
@@ -570,9 +723,12 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
+ DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
+
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
- dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+ if (IS_IGDNG(dev))
+ return igdng_irq_postinstall(dev);
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
@@ -613,11 +769,24 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
(void) I915_READ(IER);
opregion_enable_asle(dev);
- DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
return 0;
}
+static void igdng_irq_uninstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ I915_WRITE(HWSTAM, 0xffffffff);
+
+ I915_WRITE(DEIMR, 0xffffffff);
+ I915_WRITE(DEIER, 0x0);
+ I915_WRITE(DEIIR, I915_READ(DEIIR));
+
+ I915_WRITE(GTIMR, 0xffffffff);
+ I915_WRITE(GTIER, 0x0);
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+}
+
void i915_driver_irq_uninstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -627,6 +796,11 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
dev_priv->vblank_pipe = 0;
+ if (IS_IGDNG(dev)) {
+ igdng_irq_uninstall(dev);
+ return;
+ }
+
if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 15da44cf21b..f6237a0b113 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -190,7 +190,8 @@
#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
#define I830_FENCE_PITCH_SHIFT 4
#define I830_FENCE_REG_VALID (1<<0)
-#define I830_FENCE_MAX_PITCH_VAL 0x10
+#define I915_FENCE_MAX_PITCH_VAL 0x10
+#define I830_FENCE_MAX_PITCH_VAL 6
#define I830_FENCE_MAX_SIZE_VAL (1<<8)
#define I915_FENCE_START_MASK 0x0ff00000
@@ -449,6 +450,13 @@
#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
#define PLL_REF_INPUT_MASK (3 << 13)
#define PLL_LOAD_PULSE_PHASE_SHIFT 9
+/* IGDNG */
+# define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9
+# define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9)
+# define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x)-1) << 9)
+# define DPLL_FPA1_P1_POST_DIV_SHIFT 0
+# define DPLL_FPA1_P1_POST_DIV_MASK 0xff
+
/*
* Parallel to Serial Load Pulse phase selection.
* Selects the phase for the 10X DPLL clock for the PCIe
@@ -630,8 +638,11 @@
/* Hotplug control (945+ only) */
#define PORT_HOTPLUG_EN 0x61110
#define HDMIB_HOTPLUG_INT_EN (1 << 29)
+#define DPB_HOTPLUG_INT_EN (1 << 29)
#define HDMIC_HOTPLUG_INT_EN (1 << 28)
+#define DPC_HOTPLUG_INT_EN (1 << 28)
#define HDMID_HOTPLUG_INT_EN (1 << 27)
+#define DPD_HOTPLUG_INT_EN (1 << 27)
#define SDVOB_HOTPLUG_INT_EN (1 << 26)
#define SDVOC_HOTPLUG_INT_EN (1 << 25)
#define TV_HOTPLUG_INT_EN (1 << 18)
@@ -664,8 +675,11 @@
#define PORT_HOTPLUG_STAT 0x61114
#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
+#define DPB_HOTPLUG_INT_STATUS (1 << 29)
#define HDMIC_HOTPLUG_INT_STATUS (1 << 28)
+#define DPC_HOTPLUG_INT_STATUS (1 << 28)
#define HDMID_HOTPLUG_INT_STATUS (1 << 27)
+#define DPD_HOTPLUG_INT_STATUS (1 << 27)
#define CRT_HOTPLUG_INT_STATUS (1 << 11)
#define TV_HOTPLUG_INT_STATUS (1 << 10)
#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
@@ -950,15 +964,15 @@
# define DAC_A_1_3_V (0 << 4)
# define DAC_A_1_1_V (1 << 4)
# define DAC_A_0_7_V (2 << 4)
-# define DAC_A_OFF (3 << 4)
+# define DAC_A_MASK (3 << 4)
# define DAC_B_1_3_V (0 << 2)
# define DAC_B_1_1_V (1 << 2)
# define DAC_B_0_7_V (2 << 2)
-# define DAC_B_OFF (3 << 2)
+# define DAC_B_MASK (3 << 2)
# define DAC_C_1_3_V (0 << 0)
# define DAC_C_1_1_V (1 << 0)
# define DAC_C_0_7_V (2 << 0)
-# define DAC_C_OFF (3 << 0)
+# define DAC_C_MASK (3 << 0)
/**
* CSC coefficients are stored in a floating point format with 9 bits of
@@ -1327,6 +1341,163 @@
#define TV_V_CHROMA_0 0x68400
#define TV_V_CHROMA_42 0x684a8
+/* Display Port */
+#define DP_B 0x64100
+#define DP_C 0x64200
+#define DP_D 0x64300
+
+#define DP_PORT_EN (1 << 31)
+#define DP_PIPEB_SELECT (1 << 30)
+
+/* Link training mode - select a suitable mode for each stage */
+#define DP_LINK_TRAIN_PAT_1 (0 << 28)
+#define DP_LINK_TRAIN_PAT_2 (1 << 28)
+#define DP_LINK_TRAIN_PAT_IDLE (2 << 28)
+#define DP_LINK_TRAIN_OFF (3 << 28)
+#define DP_LINK_TRAIN_MASK (3 << 28)
+#define DP_LINK_TRAIN_SHIFT 28
+
+/* Signal voltages. These are mostly controlled by the other end */
+#define DP_VOLTAGE_0_4 (0 << 25)
+#define DP_VOLTAGE_0_6 (1 << 25)
+#define DP_VOLTAGE_0_8 (2 << 25)
+#define DP_VOLTAGE_1_2 (3 << 25)
+#define DP_VOLTAGE_MASK (7 << 25)
+#define DP_VOLTAGE_SHIFT 25
+
+/* Signal pre-emphasis levels, like voltages, the other end tells us what
+ * they want
+ */
+#define DP_PRE_EMPHASIS_0 (0 << 22)
+#define DP_PRE_EMPHASIS_3_5 (1 << 22)
+#define DP_PRE_EMPHASIS_6 (2 << 22)
+#define DP_PRE_EMPHASIS_9_5 (3 << 22)
+#define DP_PRE_EMPHASIS_MASK (7 << 22)
+#define DP_PRE_EMPHASIS_SHIFT 22
+
+/* How many wires to use. I guess 3 was too hard */
+#define DP_PORT_WIDTH_1 (0 << 19)
+#define DP_PORT_WIDTH_2 (1 << 19)
+#define DP_PORT_WIDTH_4 (3 << 19)
+#define DP_PORT_WIDTH_MASK (7 << 19)
+
+/* Mystic DPCD version 1.1 special mode */
+#define DP_ENHANCED_FRAMING (1 << 18)
+
+/** locked once port is enabled */
+#define DP_PORT_REVERSAL (1 << 15)
+
+/** sends the clock on lane 15 of the PEG for debug */
+#define DP_CLOCK_OUTPUT_ENABLE (1 << 13)
+
+#define DP_SCRAMBLING_DISABLE (1 << 12)
+
+/** limit RGB values to avoid confusing TVs */
+#define DP_COLOR_RANGE_16_235 (1 << 8)
+
+/** Turn on the audio link */
+#define DP_AUDIO_OUTPUT_ENABLE (1 << 6)
+
+/** vs and hs sync polarity */
+#define DP_SYNC_VS_HIGH (1 << 4)
+#define DP_SYNC_HS_HIGH (1 << 3)
+
+/** A fantasy */
+#define DP_DETECTED (1 << 2)
+
+/** The aux channel provides a way to talk to the
+ * signal sink for DDC etc. Max packet size supported
+ * is 20 bytes in each direction, hence the 5 fixed
+ * data registers
+ */
+#define DPB_AUX_CH_CTL 0x64110
+#define DPB_AUX_CH_DATA1 0x64114
+#define DPB_AUX_CH_DATA2 0x64118
+#define DPB_AUX_CH_DATA3 0x6411c
+#define DPB_AUX_CH_DATA4 0x64120
+#define DPB_AUX_CH_DATA5 0x64124
+
+#define DPC_AUX_CH_CTL 0x64210
+#define DPC_AUX_CH_DATA1 0x64214
+#define DPC_AUX_CH_DATA2 0x64218
+#define DPC_AUX_CH_DATA3 0x6421c
+#define DPC_AUX_CH_DATA4 0x64220
+#define DPC_AUX_CH_DATA5 0x64224
+
+#define DPD_AUX_CH_CTL 0x64310
+#define DPD_AUX_CH_DATA1 0x64314
+#define DPD_AUX_CH_DATA2 0x64318
+#define DPD_AUX_CH_DATA3 0x6431c
+#define DPD_AUX_CH_DATA4 0x64320
+#define DPD_AUX_CH_DATA5 0x64324
+
+#define DP_AUX_CH_CTL_SEND_BUSY (1 << 31)
+#define DP_AUX_CH_CTL_DONE (1 << 30)
+#define DP_AUX_CH_CTL_INTERRUPT (1 << 29)
+#define DP_AUX_CH_CTL_TIME_OUT_ERROR (1 << 28)
+#define DP_AUX_CH_CTL_TIME_OUT_400us (0 << 26)
+#define DP_AUX_CH_CTL_TIME_OUT_600us (1 << 26)
+#define DP_AUX_CH_CTL_TIME_OUT_800us (2 << 26)
+#define DP_AUX_CH_CTL_TIME_OUT_1600us (3 << 26)
+#define DP_AUX_CH_CTL_TIME_OUT_MASK (3 << 26)
+#define DP_AUX_CH_CTL_RECEIVE_ERROR (1 << 25)
+#define DP_AUX_CH_CTL_MESSAGE_SIZE_MASK (0x1f << 20)
+#define DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT 20
+#define DP_AUX_CH_CTL_PRECHARGE_2US_MASK (0xf << 16)
+#define DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT 16
+#define DP_AUX_CH_CTL_AUX_AKSV_SELECT (1 << 15)
+#define DP_AUX_CH_CTL_MANCHESTER_TEST (1 << 14)
+#define DP_AUX_CH_CTL_SYNC_TEST (1 << 13)
+#define DP_AUX_CH_CTL_DEGLITCH_TEST (1 << 12)
+#define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11)
+#define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff)
+#define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0
+
+/*
+ * Computing GMCH M and N values for the Display Port link
+ *
+ * GMCH M/N = dot clock * bytes per pixel / ls_clk * # of lanes
+ *
+ * ls_clk (we assume) is the DP link clock (1.62 or 2.7 GHz)
+ *
+ * The GMCH value is used internally
+ *
+ * bytes_per_pixel is the number of bytes coming out of the plane,
+ * which is after the LUTs, so we want the bytes for our color format.
+ * For our current usage, this is always 3, one byte for R, G and B.
+ */
+#define PIPEA_GMCH_DATA_M 0x70050
+#define PIPEB_GMCH_DATA_M 0x71050
+
+/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
+#define PIPE_GMCH_DATA_M_TU_SIZE_MASK (0x3f << 25)
+#define PIPE_GMCH_DATA_M_TU_SIZE_SHIFT 25
+
+#define PIPE_GMCH_DATA_M_MASK (0xffffff)
+
+#define PIPEA_GMCH_DATA_N 0x70054
+#define PIPEB_GMCH_DATA_N 0x71054
+#define PIPE_GMCH_DATA_N_MASK (0xffffff)
+
+/*
+ * Computing Link M and N values for the Display Port link
+ *
+ * Link M / N = pixel_clock / ls_clk
+ *
+ * (the DP spec calls pixel_clock the 'strm_clk')
+ *
+ * The Link value is transmitted in the Main Stream
+ * Attributes and VB-ID.
+ */
+
+#define PIPEA_DP_LINK_M 0x70060
+#define PIPEB_DP_LINK_M 0x71060
+#define PIPEA_DP_LINK_M_MASK (0xffffff)
+
+#define PIPEA_DP_LINK_N 0x70064
+#define PIPEB_DP_LINK_N 0x71064
+#define PIPEA_DP_LINK_N_MASK (0xffffff)
+
/* Display & cursor control */
/* Pipe A */
@@ -1410,9 +1581,25 @@
/* Cursor A & B regs */
#define CURACNTR 0x70080
+/* Old style CUR*CNTR flags (desktop 8xx) */
+#define CURSOR_ENABLE 0x80000000
+#define CURSOR_GAMMA_ENABLE 0x40000000
+#define CURSOR_STRIDE_MASK 0x30000000
+#define CURSOR_FORMAT_SHIFT 24
+#define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT)
+#define CURSOR_FORMAT_2C (0x00 << CURSOR_FORMAT_SHIFT)
+#define CURSOR_FORMAT_3C (0x01 << CURSOR_FORMAT_SHIFT)
+#define CURSOR_FORMAT_4C (0x02 << CURSOR_FORMAT_SHIFT)
+#define CURSOR_FORMAT_ARGB (0x04 << CURSOR_FORMAT_SHIFT)
+#define CURSOR_FORMAT_XRGB (0x05 << CURSOR_FORMAT_SHIFT)
+/* New style CUR*CNTR flags */
+#define CURSOR_MODE 0x27
#define CURSOR_MODE_DISABLE 0x00
#define CURSOR_MODE_64_32B_AX 0x07
#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
+#define MCURSOR_PIPE_SELECT (1 << 28)
+#define MCURSOR_PIPE_A 0x00
+#define MCURSOR_PIPE_B (1 << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
#define CURABASE 0x70084
#define CURAPOS 0x70088
@@ -1420,6 +1607,7 @@
#define CURSOR_POS_SIGN 0x8000
#define CURSOR_X_SHIFT 0
#define CURSOR_Y_SHIFT 16
+#define CURSIZE 0x700a0
#define CURBCNTR 0x700c0
#define CURBBASE 0x700c4
#define CURBPOS 0x700c8
@@ -1499,4 +1687,444 @@
# define VGA_2X_MODE (1 << 30)
# define VGA_PIPE_B_SELECT (1 << 29)
+/* IGDNG */
+
+#define CPU_VGACNTRL 0x41000
+
+#define DIGITAL_PORT_HOTPLUG_CNTRL 0x44030
+#define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4)
+#define DIGITAL_PORTA_SHORT_PULSE_2MS (0 << 2)
+#define DIGITAL_PORTA_SHORT_PULSE_4_5MS (1 << 2)
+#define DIGITAL_PORTA_SHORT_PULSE_6MS (2 << 2)
+#define DIGITAL_PORTA_SHORT_PULSE_100MS (3 << 2)
+#define DIGITAL_PORTA_NO_DETECT (0 << 0)
+#define DIGITAL_PORTA_LONG_PULSE_DETECT_MASK (1 << 1)
+#define DIGITAL_PORTA_SHORT_PULSE_DETECT_MASK (1 << 0)
+
+/* refresh rate hardware control */
+#define RR_HW_CTL 0x45300
+#define RR_HW_LOW_POWER_FRAMES_MASK 0xff
+#define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00
+
+#define FDI_PLL_BIOS_0 0x46000
+#define FDI_PLL_BIOS_1 0x46004
+#define FDI_PLL_BIOS_2 0x46008
+#define DISPLAY_PORT_PLL_BIOS_0 0x4600c
+#define DISPLAY_PORT_PLL_BIOS_1 0x46010
+#define DISPLAY_PORT_PLL_BIOS_2 0x46014
+
+#define FDI_PLL_FREQ_CTL 0x46030
+#define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24)
+#define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00
+#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff
+
+
+#define PIPEA_DATA_M1 0x60030
+#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
+#define TU_SIZE_MASK 0x7e000000
+#define PIPEA_DATA_M1_OFFSET 0
+#define PIPEA_DATA_N1 0x60034
+#define PIPEA_DATA_N1_OFFSET 0
+
+#define PIPEA_DATA_M2 0x60038
+#define PIPEA_DATA_M2_OFFSET 0
+#define PIPEA_DATA_N2 0x6003c
+#define PIPEA_DATA_N2_OFFSET 0
+
+#define PIPEA_LINK_M1 0x60040
+#define PIPEA_LINK_M1_OFFSET 0
+#define PIPEA_LINK_N1 0x60044
+#define PIPEA_LINK_N1_OFFSET 0
+
+#define PIPEA_LINK_M2 0x60048
+#define PIPEA_LINK_M2_OFFSET 0
+#define PIPEA_LINK_N2 0x6004c
+#define PIPEA_LINK_N2_OFFSET 0
+
+/* PIPEB timing regs are same start from 0x61000 */
+
+#define PIPEB_DATA_M1 0x61030
+#define PIPEB_DATA_M1_OFFSET 0
+#define PIPEB_DATA_N1 0x61034
+#define PIPEB_DATA_N1_OFFSET 0
+
+#define PIPEB_DATA_M2 0x61038
+#define PIPEB_DATA_M2_OFFSET 0
+#define PIPEB_DATA_N2 0x6103c
+#define PIPEB_DATA_N2_OFFSET 0
+
+#define PIPEB_LINK_M1 0x61040
+#define PIPEB_LINK_M1_OFFSET 0
+#define PIPEB_LINK_N1 0x61044
+#define PIPEB_LINK_N1_OFFSET 0
+
+#define PIPEB_LINK_M2 0x61048
+#define PIPEB_LINK_M2_OFFSET 0
+#define PIPEB_LINK_N2 0x6104c
+#define PIPEB_LINK_N2_OFFSET 0
+
+/* CPU panel fitter */
+#define PFA_CTL_1 0x68080
+#define PFB_CTL_1 0x68880
+#define PF_ENABLE (1<<31)
+
+/* legacy palette */
+#define LGC_PALETTE_A 0x4a000
+#define LGC_PALETTE_B 0x4a800
+
+/* interrupts */
+#define DE_MASTER_IRQ_CONTROL (1 << 31)
+#define DE_SPRITEB_FLIP_DONE (1 << 29)
+#define DE_SPRITEA_FLIP_DONE (1 << 28)
+#define DE_PLANEB_FLIP_DONE (1 << 27)
+#define DE_PLANEA_FLIP_DONE (1 << 26)
+#define DE_PCU_EVENT (1 << 25)
+#define DE_GTT_FAULT (1 << 24)
+#define DE_POISON (1 << 23)
+#define DE_PERFORM_COUNTER (1 << 22)
+#define DE_PCH_EVENT (1 << 21)
+#define DE_AUX_CHANNEL_A (1 << 20)
+#define DE_DP_A_HOTPLUG (1 << 19)
+#define DE_GSE (1 << 18)
+#define DE_PIPEB_VBLANK (1 << 15)
+#define DE_PIPEB_EVEN_FIELD (1 << 14)
+#define DE_PIPEB_ODD_FIELD (1 << 13)
+#define DE_PIPEB_LINE_COMPARE (1 << 12)
+#define DE_PIPEB_VSYNC (1 << 11)
+#define DE_PIPEB_FIFO_UNDERRUN (1 << 8)
+#define DE_PIPEA_VBLANK (1 << 7)
+#define DE_PIPEA_EVEN_FIELD (1 << 6)
+#define DE_PIPEA_ODD_FIELD (1 << 5)
+#define DE_PIPEA_LINE_COMPARE (1 << 4)
+#define DE_PIPEA_VSYNC (1 << 3)
+#define DE_PIPEA_FIFO_UNDERRUN (1 << 0)
+
+#define DEISR 0x44000
+#define DEIMR 0x44004
+#define DEIIR 0x44008
+#define DEIER 0x4400c
+
+/* GT interrupt */
+#define GT_SYNC_STATUS (1 << 2)
+#define GT_USER_INTERRUPT (1 << 0)
+
+#define GTISR 0x44010
+#define GTIMR 0x44014
+#define GTIIR 0x44018
+#define GTIER 0x4401c
+
+/* PCH */
+
+/* south display engine interrupt */
+#define SDE_CRT_HOTPLUG (1 << 11)
+#define SDE_PORTD_HOTPLUG (1 << 10)
+#define SDE_PORTC_HOTPLUG (1 << 9)
+#define SDE_PORTB_HOTPLUG (1 << 8)
+#define SDE_SDVOB_HOTPLUG (1 << 6)
+
+#define SDEISR 0xc4000
+#define SDEIMR 0xc4004
+#define SDEIIR 0xc4008
+#define SDEIER 0xc400c
+
+/* digital port hotplug */
+#define PCH_PORT_HOTPLUG 0xc4030
+#define PORTD_HOTPLUG_ENABLE (1 << 20)
+#define PORTD_PULSE_DURATION_2ms (0)
+#define PORTD_PULSE_DURATION_4_5ms (1 << 18)
+#define PORTD_PULSE_DURATION_6ms (2 << 18)
+#define PORTD_PULSE_DURATION_100ms (3 << 18)
+#define PORTD_HOTPLUG_NO_DETECT (0)
+#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
+#define PORTD_HOTPLUG_LONG_DETECT (1 << 17)
+#define PORTC_HOTPLUG_ENABLE (1 << 12)
+#define PORTC_PULSE_DURATION_2ms (0)
+#define PORTC_PULSE_DURATION_4_5ms (1 << 10)
+#define PORTC_PULSE_DURATION_6ms (2 << 10)
+#define PORTC_PULSE_DURATION_100ms (3 << 10)
+#define PORTC_HOTPLUG_NO_DETECT (0)
+#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
+#define PORTC_HOTPLUG_LONG_DETECT (1 << 9)
+#define PORTB_HOTPLUG_ENABLE (1 << 4)
+#define PORTB_PULSE_DURATION_2ms (0)
+#define PORTB_PULSE_DURATION_4_5ms (1 << 2)
+#define PORTB_PULSE_DURATION_6ms (2 << 2)
+#define PORTB_PULSE_DURATION_100ms (3 << 2)
+#define PORTB_HOTPLUG_NO_DETECT (0)
+#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
+#define PORTB_HOTPLUG_LONG_DETECT (1 << 1)
+
+#define PCH_GPIOA 0xc5010
+#define PCH_GPIOB 0xc5014
+#define PCH_GPIOC 0xc5018
+#define PCH_GPIOD 0xc501c
+#define PCH_GPIOE 0xc5020
+#define PCH_GPIOF 0xc5024
+
+#define PCH_DPLL_A 0xc6014
+#define PCH_DPLL_B 0xc6018
+
+#define PCH_FPA0 0xc6040
+#define PCH_FPA1 0xc6044
+#define PCH_FPB0 0xc6048
+#define PCH_FPB1 0xc604c
+
+#define PCH_DPLL_TEST 0xc606c
+
+#define PCH_DREF_CONTROL 0xC6200
+#define DREF_CONTROL_MASK 0x7fc3
+#define DREF_CPU_SOURCE_OUTPUT_DISABLE (0<<13)
+#define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2<<13)
+#define DREF_CPU_SOURCE_OUTPUT_NONSPREAD (3<<13)
+#define DREF_CPU_SOURCE_OUTPUT_MASK (3<<13)
+#define DREF_SSC_SOURCE_DISABLE (0<<11)
+#define DREF_SSC_SOURCE_ENABLE (2<<11)
+#define DREF_SSC_SOURCE_MASK (2<<11)
+#define DREF_NONSPREAD_SOURCE_DISABLE (0<<9)
+#define DREF_NONSPREAD_CK505_ENABLE (1<<9)
+#define DREF_NONSPREAD_SOURCE_ENABLE (2<<9)
+#define DREF_NONSPREAD_SOURCE_MASK (2<<9)
+#define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7)
+#define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7)
+#define DREF_SSC4_DOWNSPREAD (0<<6)
+#define DREF_SSC4_CENTERSPREAD (1<<6)
+#define DREF_SSC1_DISABLE (0<<1)
+#define DREF_SSC1_ENABLE (1<<1)
+#define DREF_SSC4_DISABLE (0)
+#define DREF_SSC4_ENABLE (1)
+
+#define PCH_RAWCLK_FREQ 0xc6204
+#define FDL_TP1_TIMER_SHIFT 12
+#define FDL_TP1_TIMER_MASK (3<<12)
+#define FDL_TP2_TIMER_SHIFT 10
+#define FDL_TP2_TIMER_MASK (3<<10)
+#define RAWCLK_FREQ_MASK 0x3ff
+
+#define PCH_DPLL_TMR_CFG 0xc6208
+
+#define PCH_SSC4_PARMS 0xc6210
+#define PCH_SSC4_AUX_PARMS 0xc6214
+
+/* transcoder */
+
+#define TRANS_HTOTAL_A 0xe0000
+#define TRANS_HTOTAL_SHIFT 16
+#define TRANS_HACTIVE_SHIFT 0
+#define TRANS_HBLANK_A 0xe0004
+#define TRANS_HBLANK_END_SHIFT 16
+#define TRANS_HBLANK_START_SHIFT 0
+#define TRANS_HSYNC_A 0xe0008
+#define TRANS_HSYNC_END_SHIFT 16
+#define TRANS_HSYNC_START_SHIFT 0
+#define TRANS_VTOTAL_A 0xe000c
+#define TRANS_VTOTAL_SHIFT 16
+#define TRANS_VACTIVE_SHIFT 0
+#define TRANS_VBLANK_A 0xe0010
+#define TRANS_VBLANK_END_SHIFT 16
+#define TRANS_VBLANK_START_SHIFT 0
+#define TRANS_VSYNC_A 0xe0014
+#define TRANS_VSYNC_END_SHIFT 16
+#define TRANS_VSYNC_START_SHIFT 0
+
+#define TRANSA_DATA_M1 0xe0030
+#define TRANSA_DATA_N1 0xe0034
+#define TRANSA_DATA_M2 0xe0038
+#define TRANSA_DATA_N2 0xe003c
+#define TRANSA_DP_LINK_M1 0xe0040
+#define TRANSA_DP_LINK_N1 0xe0044
+#define TRANSA_DP_LINK_M2 0xe0048
+#define TRANSA_DP_LINK_N2 0xe004c
+
+#define TRANS_HTOTAL_B 0xe1000
+#define TRANS_HBLANK_B 0xe1004
+#define TRANS_HSYNC_B 0xe1008
+#define TRANS_VTOTAL_B 0xe100c
+#define TRANS_VBLANK_B 0xe1010
+#define TRANS_VSYNC_B 0xe1014
+
+#define TRANSB_DATA_M1 0xe1030
+#define TRANSB_DATA_N1 0xe1034
+#define TRANSB_DATA_M2 0xe1038
+#define TRANSB_DATA_N2 0xe103c
+#define TRANSB_DP_LINK_M1 0xe1040
+#define TRANSB_DP_LINK_N1 0xe1044
+#define TRANSB_DP_LINK_M2 0xe1048
+#define TRANSB_DP_LINK_N2 0xe104c
+
+#define TRANSACONF 0xf0008
+#define TRANSBCONF 0xf1008
+#define TRANS_DISABLE (0<<31)
+#define TRANS_ENABLE (1<<31)
+#define TRANS_STATE_MASK (1<<30)
+#define TRANS_STATE_DISABLE (0<<30)
+#define TRANS_STATE_ENABLE (1<<30)
+#define TRANS_FSYNC_DELAY_HB1 (0<<27)
+#define TRANS_FSYNC_DELAY_HB2 (1<<27)
+#define TRANS_FSYNC_DELAY_HB3 (2<<27)
+#define TRANS_FSYNC_DELAY_HB4 (3<<27)
+#define TRANS_DP_AUDIO_ONLY (1<<26)
+#define TRANS_DP_VIDEO_AUDIO (0<<26)
+#define TRANS_PROGRESSIVE (0<<21)
+#define TRANS_8BPC (0<<5)
+#define TRANS_10BPC (1<<5)
+#define TRANS_6BPC (2<<5)
+#define TRANS_12BPC (3<<5)
+
+#define FDI_RXA_CHICKEN 0xc200c
+#define FDI_RXB_CHICKEN 0xc2010
+#define FDI_RX_PHASE_SYNC_POINTER_ENABLE (1)
+
+/* CPU: FDI_TX */
+#define FDI_TXA_CTL 0x60100
+#define FDI_TXB_CTL 0x61100
+#define FDI_TX_DISABLE (0<<31)
+#define FDI_TX_ENABLE (1<<31)
+#define FDI_LINK_TRAIN_PATTERN_1 (0<<28)
+#define FDI_LINK_TRAIN_PATTERN_2 (1<<28)
+#define FDI_LINK_TRAIN_PATTERN_IDLE (2<<28)
+#define FDI_LINK_TRAIN_NONE (3<<28)
+#define FDI_LINK_TRAIN_VOLTAGE_0_4V (0<<25)
+#define FDI_LINK_TRAIN_VOLTAGE_0_6V (1<<25)
+#define FDI_LINK_TRAIN_VOLTAGE_0_8V (2<<25)
+#define FDI_LINK_TRAIN_VOLTAGE_1_2V (3<<25)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_NONE (0<<22)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2<<22)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3<<22)
+#define FDI_DP_PORT_WIDTH_X1 (0<<19)
+#define FDI_DP_PORT_WIDTH_X2 (1<<19)
+#define FDI_DP_PORT_WIDTH_X3 (2<<19)
+#define FDI_DP_PORT_WIDTH_X4 (3<<19)
+#define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18)
+/* IGDNG: hardwired to 1 */
+#define FDI_TX_PLL_ENABLE (1<<14)
+/* both Tx and Rx */
+#define FDI_SCRAMBLING_ENABLE (0<<7)
+#define FDI_SCRAMBLING_DISABLE (1<<7)
+
+/* FDI_RX, FDI_X is hard-wired to Transcoder_X */
+#define FDI_RXA_CTL 0xf000c
+#define FDI_RXB_CTL 0xf100c
+#define FDI_RX_ENABLE (1<<31)
+#define FDI_RX_DISABLE (0<<31)
+/* train, dp width same as FDI_TX */
+#define FDI_DP_PORT_WIDTH_X8 (7<<19)
+#define FDI_8BPC (0<<16)
+#define FDI_10BPC (1<<16)
+#define FDI_6BPC (2<<16)
+#define FDI_12BPC (3<<16)
+#define FDI_LINK_REVERSE_OVERWRITE (1<<15)
+#define FDI_DMI_LINK_REVERSE_MASK (1<<14)
+#define FDI_RX_PLL_ENABLE (1<<13)
+#define FDI_FS_ERR_CORRECT_ENABLE (1<<11)
+#define FDI_FE_ERR_CORRECT_ENABLE (1<<10)
+#define FDI_FS_ERR_REPORT_ENABLE (1<<9)
+#define FDI_FE_ERR_REPORT_ENABLE (1<<8)
+#define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6)
+#define FDI_SEL_RAWCLK (0<<4)
+#define FDI_SEL_PCDCLK (1<<4)
+
+#define FDI_RXA_MISC 0xf0010
+#define FDI_RXB_MISC 0xf1010
+#define FDI_RXA_TUSIZE1 0xf0030
+#define FDI_RXA_TUSIZE2 0xf0038
+#define FDI_RXB_TUSIZE1 0xf1030
+#define FDI_RXB_TUSIZE2 0xf1038
+
+/* FDI_RX interrupt register format */
+#define FDI_RX_INTER_LANE_ALIGN (1<<10)
+#define FDI_RX_SYMBOL_LOCK (1<<9) /* train 2 */
+#define FDI_RX_BIT_LOCK (1<<8) /* train 1 */
+#define FDI_RX_TRAIN_PATTERN_2_FAIL (1<<7)
+#define FDI_RX_FS_CODE_ERR (1<<6)
+#define FDI_RX_FE_CODE_ERR (1<<5)
+#define FDI_RX_SYMBOL_ERR_RATE_ABOVE (1<<4)
+#define FDI_RX_HDCP_LINK_FAIL (1<<3)
+#define FDI_RX_PIXEL_FIFO_OVERFLOW (1<<2)
+#define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1)
+#define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0)
+
+#define FDI_RXA_IIR 0xf0014
+#define FDI_RXA_IMR 0xf0018
+#define FDI_RXB_IIR 0xf1014
+#define FDI_RXB_IMR 0xf1018
+
+#define FDI_PLL_CTL_1 0xfe000
+#define FDI_PLL_CTL_2 0xfe004
+
+/* CRT */
+#define PCH_ADPA 0xe1100
+#define ADPA_TRANS_SELECT_MASK (1<<30)
+#define ADPA_TRANS_A_SELECT 0
+#define ADPA_TRANS_B_SELECT (1<<30)
+#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
+#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
+#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
+#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
+#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
+#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
+#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
+#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
+#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
+#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
+#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
+#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
+#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
+#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
+#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
+
+/* or SDVOB */
+#define HDMIB 0xe1140
+#define PORT_ENABLE (1 << 31)
+#define TRANSCODER_A (0)
+#define TRANSCODER_B (1 << 30)
+#define COLOR_FORMAT_8bpc (0)
+#define COLOR_FORMAT_12bpc (3 << 26)
+#define SDVOB_HOTPLUG_ENABLE (1 << 23)
+#define SDVO_ENCODING (0)
+#define TMDS_ENCODING (2 << 10)
+#define NULL_PACKET_VSYNC_ENABLE (1 << 9)
+#define SDVOB_BORDER_ENABLE (1 << 7)
+#define AUDIO_ENABLE (1 << 6)
+#define VSYNC_ACTIVE_HIGH (1 << 4)
+#define HSYNC_ACTIVE_HIGH (1 << 3)
+#define PORT_DETECTED (1 << 2)
+
+#define HDMIC 0xe1150
+#define HDMID 0xe1160
+
+#define PCH_LVDS 0xe1180
+#define LVDS_DETECTED (1 << 1)
+
+#define BLC_PWM_CPU_CTL2 0x48250
+#define PWM_ENABLE (1 << 31)
+#define PWM_PIPE_A (0 << 29)
+#define PWM_PIPE_B (1 << 29)
+#define BLC_PWM_CPU_CTL 0x48254
+
+#define BLC_PWM_PCH_CTL1 0xc8250
+#define PWM_PCH_ENABLE (1 << 31)
+#define PWM_POLARITY_ACTIVE_LOW (1 << 29)
+#define PWM_POLARITY_ACTIVE_HIGH (0 << 29)
+#define PWM_POLARITY_ACTIVE_LOW2 (1 << 28)
+#define PWM_POLARITY_ACTIVE_HIGH2 (0 << 28)
+
+#define BLC_PWM_PCH_CTL2 0xc8254
+
+#define PCH_PP_STATUS 0xc7200
+#define PCH_PP_CONTROL 0xc7204
+#define EDP_FORCE_VDD (1 << 3)
+#define EDP_BLC_ENABLE (1 << 2)
+#define PANEL_POWER_RESET (1 << 1)
+#define PANEL_POWER_OFF (0 << 0)
+#define PANEL_POWER_ON (1 << 0)
+#define PCH_PP_ON_DELAYS 0xc7208
+#define EDP_PANEL (1 << 30)
+#define PCH_PP_OFF_DELAYS 0xc720c
+#define PCH_PP_DIVISOR 0xc7210
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index ce8a21344a7..a98e2831ed3 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -295,6 +295,16 @@ int i915_save_state(struct drm_device *dev)
i915_save_palette(dev, PIPE_B);
dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
+ /* Cursor state */
+ dev_priv->saveCURACNTR = I915_READ(CURACNTR);
+ dev_priv->saveCURAPOS = I915_READ(CURAPOS);
+ dev_priv->saveCURABASE = I915_READ(CURABASE);
+ dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
+ dev_priv->saveCURBPOS = I915_READ(CURBPOS);
+ dev_priv->saveCURBBASE = I915_READ(CURBBASE);
+ if (!IS_I9XX(dev))
+ dev_priv->saveCURSIZE = I915_READ(CURSIZE);
+
/* CRT state */
dev_priv->saveADPA = I915_READ(ADPA);
@@ -480,6 +490,16 @@ int i915_restore_state(struct drm_device *dev)
I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
+ /* Cursor state */
+ I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
+ I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
+ I915_WRITE(CURABASE, dev_priv->saveCURABASE);
+ I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
+ I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
+ I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
+ if (!IS_I9XX(dev))
+ I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
+
/* CRT state */
I915_WRITE(ADPA, dev_priv->saveADPA);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index fc28e2bbd54..754dd22fdd7 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -30,6 +30,8 @@
#include "i915_drv.h"
#include "intel_bios.h"
+#define SLAVE_ADDR1 0x70
+#define SLAVE_ADDR2 0x72
static void *
find_section(struct bdb_header *bdb, int section_id)
@@ -57,9 +59,43 @@ find_section(struct bdb_header *bdb, int section_id)
return NULL;
}
-/* Try to find panel data */
static void
-parse_panel_data(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
+fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
+ struct lvds_dvo_timing *dvo_timing)
+{
+ panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
+ dvo_timing->hactive_lo;
+ panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
+ ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
+ panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
+ dvo_timing->hsync_pulse_width;
+ panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
+ ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
+
+ panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
+ dvo_timing->vactive_lo;
+ panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
+ dvo_timing->vsync_off;
+ panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
+ dvo_timing->vsync_pulse_width;
+ panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
+ ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
+ panel_fixed_mode->clock = dvo_timing->clock * 10;
+ panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
+
+ /* Some VBTs have bogus h/vtotal values */
+ if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
+ panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
+ if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
+ panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
+
+ drm_mode_set_name(panel_fixed_mode);
+}
+
+/* Try to find integrated panel data */
+static void
+parse_lfp_panel_data(struct drm_i915_private *dev_priv,
+ struct bdb_header *bdb)
{
struct bdb_lvds_options *lvds_options;
struct bdb_lvds_lfp_data *lvds_lfp_data;
@@ -91,38 +127,45 @@ parse_panel_data(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
panel_fixed_mode = drm_calloc(1, sizeof(*panel_fixed_mode),
DRM_MEM_DRIVER);
- panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
- dvo_timing->hactive_lo;
- panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
- ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
- panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
- dvo_timing->hsync_pulse_width;
- panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
- ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
+ fill_detail_timing_data(panel_fixed_mode, dvo_timing);
- panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
- dvo_timing->vactive_lo;
- panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
- dvo_timing->vsync_off;
- panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
- dvo_timing->vsync_pulse_width;
- panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
- ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
- panel_fixed_mode->clock = dvo_timing->clock * 10;
- panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
+ dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
- /* Some VBTs have bogus h/vtotal values */
- if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
- panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
- if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
- panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
+ DRM_DEBUG("Found panel mode in BIOS VBT tables:\n");
+ drm_mode_debug_printmodeline(panel_fixed_mode);
- drm_mode_set_name(panel_fixed_mode);
+ return;
+}
- dev_priv->vbt_mode = panel_fixed_mode;
+/* Try to find sdvo panel data */
+static void
+parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct bdb_sdvo_lvds_options *sdvo_lvds_options;
+ struct lvds_dvo_timing *dvo_timing;
+ struct drm_display_mode *panel_fixed_mode;
- DRM_DEBUG("Found panel mode in BIOS VBT tables:\n");
- drm_mode_debug_printmodeline(panel_fixed_mode);
+ dev_priv->sdvo_lvds_vbt_mode = NULL;
+
+ sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
+ if (!sdvo_lvds_options)
+ return;
+
+ dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
+ if (!dvo_timing)
+ return;
+
+ panel_fixed_mode = drm_calloc(1, sizeof(*panel_fixed_mode),
+ DRM_MEM_DRIVER);
+
+ if (!panel_fixed_mode)
+ return;
+
+ fill_detail_timing_data(panel_fixed_mode,
+ dvo_timing + sdvo_lvds_options->panel_type);
+
+ dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
return;
}
@@ -152,6 +195,88 @@ parse_general_features(struct drm_i915_private *dev_priv,
}
}
+static void
+parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct sdvo_device_mapping *p_mapping;
+ struct bdb_general_definitions *p_defs;
+ struct child_device_config *p_child;
+ int i, child_device_num, count;
+ u16 block_size, *block_ptr;
+
+ p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ if (!p_defs) {
+ DRM_DEBUG("No general definition block is found\n");
+ return;
+ }
+ /* judge whether the size of child device meets the requirements.
+ * If the child device size obtained from general definition block
+ * is different with sizeof(struct child_device_config), skip the
+ * parsing of sdvo device info
+ */
+ if (p_defs->child_dev_size != sizeof(*p_child)) {
+ /* different child dev size . Ignore it */
+ DRM_DEBUG("different child size is found. Invalid.\n");
+ return;
+ }
+ /* get the block size of general definitions */
+ block_ptr = (u16 *)((char *)p_defs - 2);
+ block_size = *block_ptr;
+ /* get the number of child device */
+ child_device_num = (block_size - sizeof(*p_defs)) /
+ sizeof(*p_child);
+ count = 0;
+ for (i = 0; i < child_device_num; i++) {
+ p_child = &(p_defs->devices[i]);
+ if (!p_child->device_type) {
+ /* skip the device block if device type is invalid */
+ continue;
+ }
+ if (p_child->slave_addr != SLAVE_ADDR1 &&
+ p_child->slave_addr != SLAVE_ADDR2) {
+ /*
+ * If the slave address is neither 0x70 nor 0x72,
+ * it is not a SDVO device. Skip it.
+ */
+ continue;
+ }
+ if (p_child->dvo_port != DEVICE_PORT_DVOB &&
+ p_child->dvo_port != DEVICE_PORT_DVOC) {
+ /* skip the incorrect SDVO port */
+ DRM_DEBUG("Incorrect SDVO port. Skip it \n");
+ continue;
+ }
+ DRM_DEBUG("the SDVO device with slave addr %2x is found on "
+ "%s port\n",
+ p_child->slave_addr,
+ (p_child->dvo_port == DEVICE_PORT_DVOB) ?
+ "SDVOB" : "SDVOC");
+ p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]);
+ if (!p_mapping->initialized) {
+ p_mapping->dvo_port = p_child->dvo_port;
+ p_mapping->slave_addr = p_child->slave_addr;
+ p_mapping->dvo_wiring = p_child->dvo_wiring;
+ p_mapping->initialized = 1;
+ } else {
+ DRM_DEBUG("Maybe one SDVO port is shared by "
+ "two SDVO device.\n");
+ }
+ if (p_child->slave2_addr) {
+ /* Maybe this is a SDVO device with multiple inputs */
+ /* And the mapping info is not added */
+ DRM_DEBUG("there exists the slave2_addr. Maybe this "
+ "is a SDVO device with multiple inputs.\n");
+ }
+ count++;
+ }
+
+ if (!count) {
+ /* No SDVO device info is found */
+ DRM_DEBUG("No SDVO device info is found in VBT\n");
+ }
+ return;
+}
/**
* intel_init_bios - initialize VBIOS settings & find VBT
* @dev: DRM device
@@ -199,8 +324,9 @@ intel_init_bios(struct drm_device *dev)
/* Grab useful general definitions */
parse_general_features(dev_priv, bdb);
- parse_panel_data(dev_priv, bdb);
-
+ parse_lfp_panel_data(dev_priv, bdb);
+ parse_sdvo_panel_data(dev_priv, bdb);
+ parse_sdvo_device_mapping(dev_priv, bdb);
pci_unmap_rom(pdev, bios);
return 0;
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index de621aad85b..fe72e1c225d 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -135,6 +135,86 @@ struct bdb_general_features {
u8 rsvd11:6; /* finish byte */
} __attribute__((packed));
+/* pre-915 */
+#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */
+#define GPIO_PIN_ADD_I2C 0x05 /* "ADDCARD I2C GPIO pins" */
+#define GPIO_PIN_ADD_DDC 0x04 /* "ADDCARD DDC GPIO pins" */
+#define GPIO_PIN_ADD_DDC_I2C 0x06 /* "ADDCARD DDC/I2C GPIO pins" */
+
+/* Pre 915 */
+#define DEVICE_TYPE_NONE 0x00
+#define DEVICE_TYPE_CRT 0x01
+#define DEVICE_TYPE_TV 0x09
+#define DEVICE_TYPE_EFP 0x12
+#define DEVICE_TYPE_LFP 0x22
+/* On 915+ */
+#define DEVICE_TYPE_CRT_DPMS 0x6001
+#define DEVICE_TYPE_CRT_DPMS_HOTPLUG 0x4001
+#define DEVICE_TYPE_TV_COMPOSITE 0x0209
+#define DEVICE_TYPE_TV_MACROVISION 0x0289
+#define DEVICE_TYPE_TV_RF_COMPOSITE 0x020c
+#define DEVICE_TYPE_TV_SVIDEO_COMPOSITE 0x0609
+#define DEVICE_TYPE_TV_SCART 0x0209
+#define DEVICE_TYPE_TV_CODEC_HOTPLUG_PWR 0x6009
+#define DEVICE_TYPE_EFP_HOTPLUG_PWR 0x6012
+#define DEVICE_TYPE_EFP_DVI_HOTPLUG_PWR 0x6052
+#define DEVICE_TYPE_EFP_DVI_I 0x6053
+#define DEVICE_TYPE_EFP_DVI_D_DUAL 0x6152
+#define DEVICE_TYPE_EFP_DVI_D_HDCP 0x60d2
+#define DEVICE_TYPE_OPENLDI_HOTPLUG_PWR 0x6062
+#define DEVICE_TYPE_OPENLDI_DUALPIX 0x6162
+#define DEVICE_TYPE_LFP_PANELLINK 0x5012
+#define DEVICE_TYPE_LFP_CMOS_PWR 0x5042
+#define DEVICE_TYPE_LFP_LVDS_PWR 0x5062
+#define DEVICE_TYPE_LFP_LVDS_DUAL 0x5162
+#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP 0x51e2
+
+#define DEVICE_CFG_NONE 0x00
+#define DEVICE_CFG_12BIT_DVOB 0x01
+#define DEVICE_CFG_12BIT_DVOC 0x02
+#define DEVICE_CFG_24BIT_DVOBC 0x09
+#define DEVICE_CFG_24BIT_DVOCB 0x0a
+#define DEVICE_CFG_DUAL_DVOB 0x11
+#define DEVICE_CFG_DUAL_DVOC 0x12
+#define DEVICE_CFG_DUAL_DVOBC 0x13
+#define DEVICE_CFG_DUAL_LINK_DVOBC 0x19
+#define DEVICE_CFG_DUAL_LINK_DVOCB 0x1a
+
+#define DEVICE_WIRE_NONE 0x00
+#define DEVICE_WIRE_DVOB 0x01
+#define DEVICE_WIRE_DVOC 0x02
+#define DEVICE_WIRE_DVOBC 0x03
+#define DEVICE_WIRE_DVOBB 0x05
+#define DEVICE_WIRE_DVOCC 0x06
+#define DEVICE_WIRE_DVOB_MASTER 0x0d
+#define DEVICE_WIRE_DVOC_MASTER 0x0e
+
+#define DEVICE_PORT_DVOA 0x00 /* none on 845+ */
+#define DEVICE_PORT_DVOB 0x01
+#define DEVICE_PORT_DVOC 0x02
+
+struct child_device_config {
+ u16 handle;
+ u16 device_type;
+ u8 device_id[10]; /* See DEVICE_TYPE_* above */
+ u16 addin_offset;
+ u8 dvo_port; /* See Device_PORT_* above */
+ u8 i2c_pin;
+ u8 slave_addr;
+ u8 ddc_pin;
+ u16 edid_ptr;
+ u8 dvo_cfg; /* See DEVICE_CFG_* above */
+ u8 dvo2_port;
+ u8 i2c2_pin;
+ u8 slave2_addr;
+ u8 ddc2_pin;
+ u8 capabilities;
+ u8 dvo_wiring;/* See DEVICE_WIRE_* above */
+ u8 dvo2_wiring;
+ u16 extended_type;
+ u8 dvo_function;
+} __attribute__((packed));
+
struct bdb_general_definitions {
/* DDC GPIO */
u8 crt_ddc_gmbus_pin;
@@ -149,14 +229,19 @@ struct bdb_general_definitions {
u8 boot_display[2];
u8 child_dev_size;
- /* device info */
- u8 tv_or_lvds_info[33];
- u8 dev1[33];
- u8 dev2[33];
- u8 dev3[33];
- u8 dev4[33];
- /* may be another device block here on some platforms */
-};
+ /*
+ * Device info:
+ * If TV is present, it'll be at devices[0].
+ * LVDS will be next, either devices[0] or [1], if present.
+ * On some platforms the number of device is 6. But could be as few as
+ * 4 if both TV and LVDS are missing.
+ * And the device num is related with the size of general definition
+ * block. It is obtained by using the following formula:
+ * number = (block_size - sizeof(bdb_general_definitions))/
+ * sizeof(child_device_config);
+ */
+ struct child_device_config devices[0];
+} __attribute__((packed));
struct bdb_lvds_options {
u8 panel_type;
@@ -279,6 +364,23 @@ struct vch_bdb_22 {
struct vch_panel_data panels[16];
} __attribute__((packed));
+struct bdb_sdvo_lvds_options {
+ u8 panel_backlight;
+ u8 h40_set_panel_type;
+ u8 panel_type;
+ u8 ssc_clk_freq;
+ u16 als_low_trip;
+ u16 als_high_trip;
+ u8 sclalarcoeff_tab_row_num;
+ u8 sclalarcoeff_tab_row_size;
+ u8 coefficient[8];
+ u8 panel_misc_bits_1;
+ u8 panel_misc_bits_2;
+ u8 panel_misc_bits_3;
+ u8 panel_misc_bits_4;
+} __attribute__((packed));
+
+
bool intel_init_bios(struct drm_device *dev);
/*
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 19148c3df63..6de97fc6602 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -37,9 +37,14 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 temp;
+ u32 temp, reg;
- temp = I915_READ(ADPA);
+ if (IS_IGDNG(dev))
+ reg = PCH_ADPA;
+ else
+ reg = ADPA;
+
+ temp = I915_READ(reg);
temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
temp |= ADPA_DAC_ENABLE;
@@ -58,7 +63,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
break;
}
- I915_WRITE(ADPA, temp);
+ I915_WRITE(reg, temp);
}
static int intel_crt_mode_valid(struct drm_connector *connector,
@@ -101,17 +106,23 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
int dpll_md_reg;
u32 adpa, dpll_md;
+ u32 adpa_reg;
if (intel_crtc->pipe == 0)
dpll_md_reg = DPLL_A_MD;
else
dpll_md_reg = DPLL_B_MD;
+ if (IS_IGDNG(dev))
+ adpa_reg = PCH_ADPA;
+ else
+ adpa_reg = ADPA;
+
/*
* Disable separate mode multiplier used when cloning SDVO to CRT
* XXX this needs to be adjusted when we really are cloning
*/
- if (IS_I965G(dev)) {
+ if (IS_I965G(dev) && !IS_IGDNG(dev)) {
dpll_md = I915_READ(dpll_md_reg);
I915_WRITE(dpll_md_reg,
dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
@@ -125,13 +136,53 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
if (intel_crtc->pipe == 0) {
adpa |= ADPA_PIPE_A_SELECT;
- I915_WRITE(BCLRPAT_A, 0);
+ if (!IS_IGDNG(dev))
+ I915_WRITE(BCLRPAT_A, 0);
} else {
adpa |= ADPA_PIPE_B_SELECT;
- I915_WRITE(BCLRPAT_B, 0);
+ if (!IS_IGDNG(dev))
+ I915_WRITE(BCLRPAT_B, 0);
}
- I915_WRITE(ADPA, adpa);
+ I915_WRITE(adpa_reg, adpa);
+}
+
+static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 adpa, temp;
+ bool ret;
+
+ temp = adpa = I915_READ(PCH_ADPA);
+
+ adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+
+ adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
+ ADPA_CRT_HOTPLUG_WARMUP_10MS |
+ ADPA_CRT_HOTPLUG_SAMPLE_4S |
+ ADPA_CRT_HOTPLUG_VOLTAGE_50 | /* default */
+ ADPA_CRT_HOTPLUG_VOLREF_325MV |
+ ADPA_CRT_HOTPLUG_ENABLE |
+ ADPA_CRT_HOTPLUG_FORCE_TRIGGER);
+
+ DRM_DEBUG("pch crt adpa 0x%x", adpa);
+ I915_WRITE(PCH_ADPA, adpa);
+
+ /* This might not be needed as not specified in spec...*/
+ udelay(1000);
+
+ /* Check the status to see if both blue and green are on now */
+ adpa = I915_READ(PCH_ADPA);
+ if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) ==
+ ADPA_CRT_HOTPLUG_MONITOR_COLOR)
+ ret = true;
+ else
+ ret = false;
+
+ /* restore origin register */
+ I915_WRITE(PCH_ADPA, temp);
+ return ret;
}
/**
@@ -148,6 +199,10 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_en;
int i, tries = 0;
+
+ if (IS_IGDNG(dev))
+ return intel_igdng_crt_detect_hotplug(connector);
+
/*
* On 4 series desktop, CRT detect sequence need to be done twice
* to get a reliable result.
@@ -198,9 +253,142 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
return intel_ddc_probe(intel_output);
}
+static enum drm_connector_status
+intel_crt_load_detect(struct drm_crtc *crtc, struct intel_output *intel_output)
+{
+ struct drm_encoder *encoder = &intel_output->enc;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ uint32_t pipe = intel_crtc->pipe;
+ uint32_t save_bclrpat;
+ uint32_t save_vtotal;
+ uint32_t vtotal, vactive;
+ uint32_t vsample;
+ uint32_t vblank, vblank_start, vblank_end;
+ uint32_t dsl;
+ uint32_t bclrpat_reg;
+ uint32_t vtotal_reg;
+ uint32_t vblank_reg;
+ uint32_t vsync_reg;
+ uint32_t pipeconf_reg;
+ uint32_t pipe_dsl_reg;
+ uint8_t st00;
+ enum drm_connector_status status;
+
+ if (pipe == 0) {
+ bclrpat_reg = BCLRPAT_A;
+ vtotal_reg = VTOTAL_A;
+ vblank_reg = VBLANK_A;
+ vsync_reg = VSYNC_A;
+ pipeconf_reg = PIPEACONF;
+ pipe_dsl_reg = PIPEADSL;
+ } else {
+ bclrpat_reg = BCLRPAT_B;
+ vtotal_reg = VTOTAL_B;
+ vblank_reg = VBLANK_B;
+ vsync_reg = VSYNC_B;
+ pipeconf_reg = PIPEBCONF;
+ pipe_dsl_reg = PIPEBDSL;
+ }
+
+ save_bclrpat = I915_READ(bclrpat_reg);
+ save_vtotal = I915_READ(vtotal_reg);
+ vblank = I915_READ(vblank_reg);
+
+ vtotal = ((save_vtotal >> 16) & 0xfff) + 1;
+ vactive = (save_vtotal & 0x7ff) + 1;
+
+ vblank_start = (vblank & 0xfff) + 1;
+ vblank_end = ((vblank >> 16) & 0xfff) + 1;
+
+ /* Set the border color to purple. */
+ I915_WRITE(bclrpat_reg, 0x500050);
+
+ if (IS_I9XX(dev)) {
+ uint32_t pipeconf = I915_READ(pipeconf_reg);
+ I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
+ /* Wait for next Vblank to substitue
+ * border color for Color info */
+ intel_wait_for_vblank(dev);
+ st00 = I915_READ8(VGA_MSR_WRITE);
+ status = ((st00 & (1 << 4)) != 0) ?
+ connector_status_connected :
+ connector_status_disconnected;
+
+ I915_WRITE(pipeconf_reg, pipeconf);
+ } else {
+ bool restore_vblank = false;
+ int count, detect;
+
+ /*
+ * If there isn't any border, add some.
+ * Yes, this will flicker
+ */
+ if (vblank_start <= vactive && vblank_end >= vtotal) {
+ uint32_t vsync = I915_READ(vsync_reg);
+ uint32_t vsync_start = (vsync & 0xffff) + 1;
+
+ vblank_start = vsync_start;
+ I915_WRITE(vblank_reg,
+ (vblank_start - 1) |
+ ((vblank_end - 1) << 16));
+ restore_vblank = true;
+ }
+ /* sample in the vertical border, selecting the larger one */
+ if (vblank_start - vactive >= vtotal - vblank_end)
+ vsample = (vblank_start + vactive) >> 1;
+ else
+ vsample = (vtotal + vblank_end) >> 1;
+
+ /*
+ * Wait for the border to be displayed
+ */
+ while (I915_READ(pipe_dsl_reg) >= vactive)
+ ;
+ while ((dsl = I915_READ(pipe_dsl_reg)) <= vsample)
+ ;
+ /*
+ * Watch ST00 for an entire scanline
+ */
+ detect = 0;
+ count = 0;
+ do {
+ count++;
+ /* Read the ST00 VGA status register */
+ st00 = I915_READ8(VGA_MSR_WRITE);
+ if (st00 & (1 << 4))
+ detect++;
+ } while ((I915_READ(pipe_dsl_reg) == dsl));
+
+ /* restore vblank if necessary */
+ if (restore_vblank)
+ I915_WRITE(vblank_reg, vblank);
+ /*
+ * If more than 3/4 of the scanline detected a monitor,
+ * then it is assumed to be present. This works even on i830,
+ * where there isn't any way to force the border color across
+ * the screen
+ */
+ status = detect * 4 > count * 3 ?
+ connector_status_connected :
+ connector_status_disconnected;
+ }
+
+ /* Restore previous settings */
+ I915_WRITE(bclrpat_reg, save_bclrpat);
+
+ return status;
+}
+
static enum drm_connector_status intel_crt_detect(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct drm_encoder *encoder = &intel_output->enc;
+ struct drm_crtc *crtc;
+ int dpms_mode;
+ enum drm_connector_status status;
if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
if (intel_crt_detect_hotplug(connector))
@@ -212,8 +400,20 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
if (intel_crt_detect_ddc(connector))
return connector_status_connected;
- /* TODO use load detect */
- return connector_status_unknown;
+ /* for pre-945g platforms use load detect */
+ if (encoder->crtc && encoder->crtc->enabled) {
+ status = intel_crt_load_detect(encoder->crtc, intel_output);
+ } else {
+ crtc = intel_get_load_detect_pipe(intel_output,
+ NULL, &dpms_mode);
+ if (crtc) {
+ status = intel_crt_load_detect(crtc, intel_output);
+ intel_release_load_detect_pipe(intel_output, dpms_mode);
+ } else
+ status = connector_status_unknown;
+ }
+
+ return status;
}
static void intel_crt_destroy(struct drm_connector *connector)
@@ -236,11 +436,6 @@ static int intel_crt_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t value)
{
- struct drm_device *dev = connector->dev;
-
- if (property == dev->mode_config.dpms_property && connector->encoder)
- intel_crt_dpms(connector->encoder, (uint32_t)(value & 0xf));
-
return 0;
}
@@ -257,6 +452,7 @@ static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = {
};
static const struct drm_connector_funcs intel_crt_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
.detect = intel_crt_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = intel_crt_destroy,
@@ -282,6 +478,7 @@ void intel_crt_init(struct drm_device *dev)
{
struct drm_connector *connector;
struct intel_output *intel_output;
+ u32 i2c_reg;
intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
if (!intel_output)
@@ -298,7 +495,11 @@ void intel_crt_init(struct drm_device *dev)
&intel_output->enc);
/* Set up the DDC bus. */
- intel_output->ddc_bus = intel_i2c_create(dev, GPIOA, "CRTDDC_A");
+ if (IS_IGDNG(dev))
+ i2c_reg = PCH_GPIOA;
+ else
+ i2c_reg = GPIOA;
+ intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
if (!intel_output->ddc_bus) {
dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
"failed.\n");
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3387cf32f38..028f5b66e3d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -137,6 +137,8 @@ struct intel_limit {
#define INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS 7
#define INTEL_LIMIT_IGD_SDVO_DAC 8
#define INTEL_LIMIT_IGD_LVDS 9
+#define INTEL_LIMIT_IGDNG_SDVO_DAC 10
+#define INTEL_LIMIT_IGDNG_LVDS 11
/*The parameter is for SDVO on G4x platform*/
#define G4X_DOT_SDVO_MIN 25000
@@ -216,12 +218,43 @@ struct intel_limit {
#define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7
#define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0
+/* IGDNG */
+/* as we calculate clock using (register_value + 2) for
+ N/M1/M2, so here the range value for them is (actual_value-2).
+ */
+#define IGDNG_DOT_MIN 25000
+#define IGDNG_DOT_MAX 350000
+#define IGDNG_VCO_MIN 1760000
+#define IGDNG_VCO_MAX 3510000
+#define IGDNG_N_MIN 1
+#define IGDNG_N_MAX 5
+#define IGDNG_M_MIN 79
+#define IGDNG_M_MAX 118
+#define IGDNG_M1_MIN 12
+#define IGDNG_M1_MAX 23
+#define IGDNG_M2_MIN 5
+#define IGDNG_M2_MAX 9
+#define IGDNG_P_SDVO_DAC_MIN 5
+#define IGDNG_P_SDVO_DAC_MAX 80
+#define IGDNG_P_LVDS_MIN 28
+#define IGDNG_P_LVDS_MAX 112
+#define IGDNG_P1_MIN 1
+#define IGDNG_P1_MAX 8
+#define IGDNG_P2_SDVO_DAC_SLOW 10
+#define IGDNG_P2_SDVO_DAC_FAST 5
+#define IGDNG_P2_LVDS_SLOW 14 /* single channel */
+#define IGDNG_P2_LVDS_FAST 7 /* double channel */
+#define IGDNG_P2_DOT_LIMIT 225000 /* 225Mhz */
+
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
static bool
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
+static bool
+intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *best_clock);
static const intel_limit_t intel_limits[] = {
{ /* INTEL_LIMIT_I8XX_DVO_DAC */
@@ -383,9 +416,47 @@ static const intel_limit_t intel_limits[] = {
.p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
.find_pll = intel_find_best_PLL,
},
-
+ { /* INTEL_LIMIT_IGDNG_SDVO_DAC */
+ .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX },
+ .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX },
+ .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX },
+ .m = { .min = IGDNG_M_MIN, .max = IGDNG_M_MAX },
+ .m1 = { .min = IGDNG_M1_MIN, .max = IGDNG_M1_MAX },
+ .m2 = { .min = IGDNG_M2_MIN, .max = IGDNG_M2_MAX },
+ .p = { .min = IGDNG_P_SDVO_DAC_MIN, .max = IGDNG_P_SDVO_DAC_MAX },
+ .p1 = { .min = IGDNG_P1_MIN, .max = IGDNG_P1_MAX },
+ .p2 = { .dot_limit = IGDNG_P2_DOT_LIMIT,
+ .p2_slow = IGDNG_P2_SDVO_DAC_SLOW,
+ .p2_fast = IGDNG_P2_SDVO_DAC_FAST },
+ .find_pll = intel_igdng_find_best_PLL,
+ },
+ { /* INTEL_LIMIT_IGDNG_LVDS */
+ .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX },
+ .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX },
+ .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX },
+ .m = { .min = IGDNG_M_MIN, .max = IGDNG_M_MAX },
+ .m1 = { .min = IGDNG_M1_MIN, .max = IGDNG_M1_MAX },
+ .m2 = { .min = IGDNG_M2_MIN, .max = IGDNG_M2_MAX },
+ .p = { .min = IGDNG_P_LVDS_MIN, .max = IGDNG_P_LVDS_MAX },
+ .p1 = { .min = IGDNG_P1_MIN, .max = IGDNG_P1_MAX },
+ .p2 = { .dot_limit = IGDNG_P2_DOT_LIMIT,
+ .p2_slow = IGDNG_P2_LVDS_SLOW,
+ .p2_fast = IGDNG_P2_LVDS_FAST },
+ .find_pll = intel_igdng_find_best_PLL,
+ },
};
+static const intel_limit_t *intel_igdng_limit(struct drm_crtc *crtc)
+{
+ const intel_limit_t *limit;
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ limit = &intel_limits[INTEL_LIMIT_IGDNG_LVDS];
+ else
+ limit = &intel_limits[INTEL_LIMIT_IGDNG_SDVO_DAC];
+
+ return limit;
+}
+
static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -418,7 +489,9 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
const intel_limit_t *limit;
- if (IS_G4X(dev)) {
+ if (IS_IGDNG(dev))
+ limit = intel_igdng_limit(crtc);
+ else if (IS_G4X(dev)) {
limit = intel_g4x_limit(crtc);
} else if (IS_I9XX(dev) && !IS_IGD(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
@@ -630,7 +703,64 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
}
}
}
+ return found;
+}
+
+static bool
+intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *best_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ intel_clock_t clock;
+ int max_n;
+ bool found;
+ int err_most = 47;
+ found = false;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+ LVDS_CLKB_POWER_UP)
+ clock.p2 = limit->p2.p2_fast;
+ else
+ clock.p2 = limit->p2.p2_slow;
+ } else {
+ if (target < limit->p2.dot_limit)
+ clock.p2 = limit->p2.p2_slow;
+ else
+ clock.p2 = limit->p2.p2_fast;
+ }
+
+ memset(best_clock, 0, sizeof(*best_clock));
+ max_n = limit->n.max;
+ /* based on hardware requriment prefer smaller n to precision */
+ for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
+ /* based on hardware requirment prefere larger m1,m2, p1 */
+ for (clock.m1 = limit->m1.max;
+ clock.m1 >= limit->m1.min; clock.m1--) {
+ for (clock.m2 = limit->m2.max;
+ clock.m2 >= limit->m2.min; clock.m2--) {
+ for (clock.p1 = limit->p1.max;
+ clock.p1 >= limit->p1.min; clock.p1--) {
+ int this_err;
+ intel_clock(dev, refclk, &clock);
+ if (!intel_PLL_is_valid(crtc, &clock))
+ continue;
+ this_err = abs((10000 - (target*10000/clock.dot)));
+ if (this_err < err_most) {
+ *best_clock = clock;
+ err_most = this_err;
+ max_n = clock.n;
+ found = true;
+ /* found on first matching */
+ goto out;
+ }
+ }
+ }
+ }
+ }
+out:
return found;
}
@@ -785,18 +915,292 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
+static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->pipe;
+ int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
+ int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
+ int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
+ int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
+ int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
+ int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
+ int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
+ int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1;
+ int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+ int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+ int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+ int cpu_vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+ int cpu_vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+ int cpu_vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+ int trans_htot_reg = (pipe == 0) ? TRANS_HTOTAL_A : TRANS_HTOTAL_B;
+ int trans_hblank_reg = (pipe == 0) ? TRANS_HBLANK_A : TRANS_HBLANK_B;
+ int trans_hsync_reg = (pipe == 0) ? TRANS_HSYNC_A : TRANS_HSYNC_B;
+ int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B;
+ int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B;
+ int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
+ u32 temp;
+ int tries = 5, j;
+
+ /* XXX: When our outputs are all unaware of DPMS modes other than off
+ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+ */
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ DRM_DEBUG("crtc %d dpms on\n", pipe);
+ /* enable PCH DPLL */
+ temp = I915_READ(pch_dpll_reg);
+ if ((temp & DPLL_VCO_ENABLE) == 0) {
+ I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE);
+ I915_READ(pch_dpll_reg);
+ }
+ /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
+ temp = I915_READ(fdi_rx_reg);
+ I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE |
+ FDI_SEL_PCDCLK |
+ FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */
+ I915_READ(fdi_rx_reg);
+ udelay(200);
+
+ /* Enable CPU FDI TX PLL, always on for IGDNG */
+ temp = I915_READ(fdi_tx_reg);
+ if ((temp & FDI_TX_PLL_ENABLE) == 0) {
+ I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
+ I915_READ(fdi_tx_reg);
+ udelay(100);
+ }
-/**
- * Sets the power management mode of the pipe and plane.
- *
- * This code should probably grow support for turning the cursor off and back
- * on appropriately at the same time as we're turning the pipe off/on.
- */
-static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
+ /* Enable CPU pipe */
+ temp = I915_READ(pipeconf_reg);
+ if ((temp & PIPEACONF_ENABLE) == 0) {
+ I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+ I915_READ(pipeconf_reg);
+ udelay(100);
+ }
+
+ /* configure and enable CPU plane */
+ temp = I915_READ(dspcntr_reg);
+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+ I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+ }
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ temp = I915_READ(fdi_tx_reg);
+ temp |= FDI_TX_ENABLE;
+ temp |= FDI_DP_PORT_WIDTH_X4; /* default */
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(fdi_tx_reg, temp);
+ I915_READ(fdi_tx_reg);
+
+ temp = I915_READ(fdi_rx_reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
+ I915_READ(fdi_rx_reg);
+
+ udelay(150);
+
+ /* Train FDI. */
+ /* umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ temp = I915_READ(fdi_rx_imr_reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ I915_WRITE(fdi_rx_imr_reg, temp);
+ I915_READ(fdi_rx_imr_reg);
+ udelay(150);
+
+ temp = I915_READ(fdi_rx_iir_reg);
+ DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
+
+ if ((temp & FDI_RX_BIT_LOCK) == 0) {
+ for (j = 0; j < tries; j++) {
+ temp = I915_READ(fdi_rx_iir_reg);
+ DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
+ if (temp & FDI_RX_BIT_LOCK)
+ break;
+ udelay(200);
+ }
+ if (j != tries)
+ I915_WRITE(fdi_rx_iir_reg,
+ temp | FDI_RX_BIT_LOCK);
+ else
+ DRM_DEBUG("train 1 fail\n");
+ } else {
+ I915_WRITE(fdi_rx_iir_reg,
+ temp | FDI_RX_BIT_LOCK);
+ DRM_DEBUG("train 1 ok 2!\n");
+ }
+ temp = I915_READ(fdi_tx_reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ I915_WRITE(fdi_tx_reg, temp);
+
+ temp = I915_READ(fdi_rx_reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ I915_WRITE(fdi_rx_reg, temp);
+
+ udelay(150);
+
+ temp = I915_READ(fdi_rx_iir_reg);
+ DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
+
+ if ((temp & FDI_RX_SYMBOL_LOCK) == 0) {
+ for (j = 0; j < tries; j++) {
+ temp = I915_READ(fdi_rx_iir_reg);
+ DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
+ if (temp & FDI_RX_SYMBOL_LOCK)
+ break;
+ udelay(200);
+ }
+ if (j != tries) {
+ I915_WRITE(fdi_rx_iir_reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ DRM_DEBUG("train 2 ok 1!\n");
+ } else
+ DRM_DEBUG("train 2 fail\n");
+ } else {
+ I915_WRITE(fdi_rx_iir_reg, temp | FDI_RX_SYMBOL_LOCK);
+ DRM_DEBUG("train 2 ok 2!\n");
+ }
+ DRM_DEBUG("train done\n");
+
+ /* set transcoder timing */
+ I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg));
+ I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg));
+ I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg));
+
+ I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg));
+ I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg));
+ I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg));
+
+ /* enable PCH transcoder */
+ temp = I915_READ(transconf_reg);
+ I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
+ I915_READ(transconf_reg);
+
+ while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0)
+ ;
+
+ /* enable normal */
+
+ temp = I915_READ(fdi_tx_reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
+ FDI_TX_ENHANCE_FRAME_ENABLE);
+ I915_READ(fdi_tx_reg);
+
+ temp = I915_READ(fdi_rx_reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ I915_WRITE(fdi_rx_reg, temp | FDI_LINK_TRAIN_NONE |
+ FDI_RX_ENHANCE_FRAME_ENABLE);
+ I915_READ(fdi_rx_reg);
+
+ /* wait one idle pattern time */
+ udelay(100);
+
+ intel_crtc_load_lut(crtc);
+
+ break;
+ case DRM_MODE_DPMS_OFF:
+ DRM_DEBUG("crtc %d dpms off\n", pipe);
+
+ /* Disable the VGA plane that we never use */
+ I915_WRITE(CPU_VGACNTRL, VGA_DISP_DISABLE);
+
+ /* Disable display plane */
+ temp = I915_READ(dspcntr_reg);
+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+ I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+ I915_READ(dspbase_reg);
+ }
+
+ /* disable cpu pipe, disable after all planes disabled */
+ temp = I915_READ(pipeconf_reg);
+ if ((temp & PIPEACONF_ENABLE) != 0) {
+ I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
+ I915_READ(pipeconf_reg);
+ /* wait for cpu pipe off, pipe state */
+ while ((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) != 0)
+ ;
+ } else
+ DRM_DEBUG("crtc %d is disabled\n", pipe);
+
+ /* IGDNG-A : disable cpu panel fitter ? */
+ temp = I915_READ(pf_ctl_reg);
+ if ((temp & PF_ENABLE) != 0) {
+ I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
+ I915_READ(pf_ctl_reg);
+ }
+
+ /* disable CPU FDI tx and PCH FDI rx */
+ temp = I915_READ(fdi_tx_reg);
+ I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_ENABLE);
+ I915_READ(fdi_tx_reg);
+
+ temp = I915_READ(fdi_rx_reg);
+ I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE);
+ I915_READ(fdi_rx_reg);
+
+ /* still set train pattern 1 */
+ temp = I915_READ(fdi_tx_reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(fdi_tx_reg, temp);
+
+ temp = I915_READ(fdi_rx_reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(fdi_rx_reg, temp);
+
+ /* disable PCH transcoder */
+ temp = I915_READ(transconf_reg);
+ if ((temp & TRANS_ENABLE) != 0) {
+ I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE);
+ I915_READ(transconf_reg);
+ /* wait for PCH transcoder off, transcoder state */
+ while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) != 0)
+ ;
+ }
+
+ /* disable PCH DPLL */
+ temp = I915_READ(pch_dpll_reg);
+ if ((temp & DPLL_VCO_ENABLE) != 0) {
+ I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
+ I915_READ(pch_dpll_reg);
+ }
+
+ temp = I915_READ(fdi_rx_reg);
+ if ((temp & FDI_RX_PLL_ENABLE) != 0) {
+ temp &= ~FDI_SEL_PCDCLK;
+ temp &= ~FDI_RX_PLL_ENABLE;
+ I915_WRITE(fdi_rx_reg, temp);
+ I915_READ(fdi_rx_reg);
+ }
+
+ /* Wait for the clocks to turn off. */
+ udelay(150);
+ break;
+ }
+}
+
+static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_master_private *master_priv;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
@@ -805,7 +1209,6 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
int dspbase_reg = (pipe == 0) ? DSPAADDR : DSPBADDR;
int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
u32 temp;
- bool enabled;
/* XXX: When our outputs are all unaware of DPMS modes other than off
* and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
@@ -890,6 +1293,26 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
udelay(150);
break;
}
+}
+
+/**
+ * Sets the power management mode of the pipe and plane.
+ *
+ * This code should probably grow support for turning the cursor off and back
+ * on appropriately at the same time as we're turning the pipe off/on.
+ */
+static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_master_private *master_priv;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ bool enabled;
+
+ if (IS_IGDNG(dev))
+ igdng_crtc_dpms(crtc, mode);
+ else
+ i9xx_crtc_dpms(crtc, mode);
if (!dev->primary->master)
return;
@@ -947,6 +1370,12 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ struct drm_device *dev = crtc->dev;
+ if (IS_IGDNG(dev)) {
+ /* FDI link clock is fixed at 2.7G */
+ if (mode->clock * 3 > 27000 * 4)
+ return MODE_CLOCK_HIGH;
+ }
return true;
}
@@ -1030,6 +1459,48 @@ static int intel_panel_fitter_pipe (struct drm_device *dev)
return 1;
}
+struct fdi_m_n {
+ u32 tu;
+ u32 gmch_m;
+ u32 gmch_n;
+ u32 link_m;
+ u32 link_n;
+};
+
+static void
+fdi_reduce_ratio(u32 *num, u32 *den)
+{
+ while (*num > 0xffffff || *den > 0xffffff) {
+ *num >>= 1;
+ *den >>= 1;
+ }
+}
+
+#define DATA_N 0x800000
+#define LINK_N 0x80000
+
+static void
+igdng_compute_m_n(int bytes_per_pixel, int nlanes,
+ int pixel_clock, int link_clock,
+ struct fdi_m_n *m_n)
+{
+ u64 temp;
+
+ m_n->tu = 64; /* default size */
+
+ temp = (u64) DATA_N * pixel_clock;
+ temp = div_u64(temp, link_clock);
+ m_n->gmch_m = (temp * bytes_per_pixel) / nlanes;
+ m_n->gmch_n = DATA_N;
+ fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
+
+ temp = (u64) LINK_N * pixel_clock;
+ m_n->link_m = div_u64(temp, link_clock);
+ m_n->link_n = LINK_N;
+ fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
+}
+
+
static int intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -1063,6 +1534,17 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_connector *connector;
const intel_limit_t *limit;
int ret;
+ struct fdi_m_n m_n = {0};
+ int data_m1_reg = (pipe == 0) ? PIPEA_DATA_M1 : PIPEB_DATA_M1;
+ int data_n1_reg = (pipe == 0) ? PIPEA_DATA_N1 : PIPEB_DATA_N1;
+ int link_m1_reg = (pipe == 0) ? PIPEA_LINK_M1 : PIPEB_LINK_M1;
+ int link_n1_reg = (pipe == 0) ? PIPEA_LINK_N1 : PIPEB_LINK_N1;
+ int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0;
+ int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
+ int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
+ int lvds_reg = LVDS;
+ u32 temp;
+ int sdvo_pixel_multiply;
drm_vblank_pre_modeset(dev, pipe);
@@ -1101,6 +1583,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
DRM_DEBUG("using SSC reference clock of %d MHz\n", refclk / 1000);
} else if (IS_I9XX(dev)) {
refclk = 96000;
+ if (IS_IGDNG(dev))
+ refclk = 120000; /* 120Mhz refclk */
} else {
refclk = 48000;
}
@@ -1114,6 +1598,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ drm_vblank_post_modeset(dev, pipe);
return -EINVAL;
}
@@ -1137,12 +1622,21 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
}
+ /* FDI link */
+ if (IS_IGDNG(dev))
+ igdng_compute_m_n(3, 4, /* lane num 4 */
+ adjusted_mode->clock,
+ 270000, /* lane clock */
+ &m_n);
+
if (IS_IGD(dev))
fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
else
fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
- dpll = DPLL_VGA_MODE_DIS;
+ if (!IS_IGDNG(dev))
+ dpll = DPLL_VGA_MODE_DIS;
+
if (IS_I9XX(dev)) {
if (is_lvds)
dpll |= DPLLB_MODE_LVDS;
@@ -1150,17 +1644,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
dpll |= DPLLB_MODE_DAC_SERIAL;
if (is_sdvo) {
dpll |= DPLL_DVO_HIGH_SPEED;
- if (IS_I945G(dev) || IS_I945GM(dev)) {
- int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
+ sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
+ if (IS_I945G(dev) || IS_I945GM(dev))
dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
- }
+ else if (IS_IGDNG(dev))
+ dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
}
/* compute bitmask from p1 value */
if (IS_IGD(dev))
dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_IGD;
- else
+ else {
dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ /* also FPA1 */
+ if (IS_IGDNG(dev))
+ dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+ }
switch (clock.p2) {
case 5:
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
@@ -1175,7 +1674,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
break;
}
- if (IS_I965G(dev))
+ if (IS_I965G(dev) && !IS_IGDNG(dev))
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
} else {
if (is_lvds) {
@@ -1207,10 +1706,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
- if (pipe == 0)
- dspcntr |= DISPPLANE_SEL_PIPE_A;
- else
- dspcntr |= DISPPLANE_SEL_PIPE_B;
+ /* IGDNG's plane is forced to pipe, bit 24 is to
+ enable color space conversion */
+ if (!IS_IGDNG(dev)) {
+ if (pipe == 0)
+ dspcntr |= DISPPLANE_SEL_PIPE_A;
+ else
+ dspcntr |= DISPPLANE_SEL_PIPE_B;
+ }
if (pipe == 0 && !IS_I965G(dev)) {
/* Enable pixel doubling when the dot clock is > 90% of the (display)
@@ -1231,12 +1734,17 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* Disable the panel fitter if it was on our pipe */
- if (intel_panel_fitter_pipe(dev) == pipe)
+ if (!IS_IGDNG(dev) && intel_panel_fitter_pipe(dev) == pipe)
I915_WRITE(PFIT_CONTROL, 0);
DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
+ /* assign to IGDNG registers */
+ if (IS_IGDNG(dev)) {
+ fp_reg = pch_fp_reg;
+ dpll_reg = pch_dpll_reg;
+ }
if (dpll & DPLL_VCO_ENABLE) {
I915_WRITE(fp_reg, fp);
@@ -1245,13 +1753,33 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
udelay(150);
}
+ if (IS_IGDNG(dev)) {
+ /* enable PCH clock reference source */
+ /* XXX need to change the setting for other outputs */
+ u32 temp;
+ temp = I915_READ(PCH_DREF_CONTROL);
+ temp &= ~DREF_NONSPREAD_SOURCE_MASK;
+ temp |= DREF_NONSPREAD_CK505_ENABLE;
+ temp &= ~DREF_SSC_SOURCE_MASK;
+ temp |= DREF_SSC_SOURCE_ENABLE;
+ temp &= ~DREF_SSC1_ENABLE;
+ /* if no eDP, disable source output to CPU */
+ temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+ temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+ I915_WRITE(PCH_DREF_CONTROL, temp);
+ }
+
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
* things on.
*/
if (is_lvds) {
- u32 lvds = I915_READ(LVDS);
+ u32 lvds;
+
+ if (IS_IGDNG(dev))
+ lvds_reg = PCH_LVDS;
+ lvds = I915_READ(lvds_reg);
lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT;
/* Set the B0-B3 data pairs corresponding to whether we're going to
* set the DPLLs for dual-channel mode or not.
@@ -1266,8 +1794,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
* panels behave in the two modes.
*/
- I915_WRITE(LVDS, lvds);
- I915_READ(LVDS);
+ I915_WRITE(lvds_reg, lvds);
+ I915_READ(lvds_reg);
}
I915_WRITE(fp_reg, fp);
@@ -1276,8 +1804,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* Wait for the clocks to stabilize. */
udelay(150);
- if (IS_I965G(dev)) {
- int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
+ if (IS_I965G(dev) && !IS_IGDNG(dev)) {
+ sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
} else {
@@ -1303,9 +1831,25 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* pipesrc and dspsize control the size that is scaled from, which should
* always be the user's requested size.
*/
- I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
- I915_WRITE(dsppos_reg, 0);
+ if (!IS_IGDNG(dev)) {
+ I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) |
+ (mode->hdisplay - 1));
+ I915_WRITE(dsppos_reg, 0);
+ }
I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+
+ if (IS_IGDNG(dev)) {
+ I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m);
+ I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n);
+ I915_WRITE(link_m1_reg, m_n.link_m);
+ I915_WRITE(link_n1_reg, m_n.link_n);
+
+ /* enable FDI RX PLL too */
+ temp = I915_READ(fdi_rx_reg);
+ I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
+ udelay(200);
+ }
+
I915_WRITE(pipeconf_reg, pipeconf);
I915_READ(pipeconf_reg);
@@ -1315,12 +1859,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* Flush the plane changes */
ret = intel_pipe_set_base(crtc, x, y, old_fb);
- if (ret != 0)
- return ret;
-
drm_vblank_post_modeset(dev, pipe);
- return 0;
+ return ret;
}
/** Loads the palette/gamma unit for the CRTC with the prepared values */
@@ -1336,6 +1877,11 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
if (!crtc->enabled)
return;
+ /* use legacy palette for IGDNG */
+ if (IS_IGDNG(dev))
+ palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A :
+ LGC_PALETTE_B;
+
for (i = 0; i < 256; i++) {
I915_WRITE(palreg + 4 * i,
(intel_crtc->lut_r[i] << 16) |
@@ -1357,7 +1903,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
int pipe = intel_crtc->pipe;
uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
- uint32_t temp;
+ uint32_t temp = I915_READ(control);
size_t addr;
int ret;
@@ -1366,7 +1912,12 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
/* if we want to turn off the cursor ignore width and height */
if (!handle) {
DRM_DEBUG("cursor off\n");
- temp = CURSOR_MODE_DISABLE;
+ if (IS_MOBILE(dev) || IS_I9XX(dev)) {
+ temp &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
+ temp |= CURSOR_MODE_DISABLE;
+ } else {
+ temp &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
+ }
addr = 0;
bo = NULL;
mutex_lock(&dev->struct_mutex);
@@ -1409,10 +1960,19 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
addr = obj_priv->phys_obj->handle->busaddr;
}
- temp = 0;
- /* set the pipe for the cursor */
- temp |= (pipe << 28);
- temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+ if (!IS_I9XX(dev))
+ I915_WRITE(CURSIZE, (height << 12) | width);
+
+ /* Hooray for CUR*CNTR differences */
+ if (IS_MOBILE(dev) || IS_I9XX(dev)) {
+ temp &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
+ temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+ temp |= (pipe << 28); /* Connect to correct pipe */
+ } else {
+ temp &= ~(CURSOR_FORMAT_MASK);
+ temp |= CURSOR_ENABLE;
+ temp |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE;
+ }
finish:
I915_WRITE(control, temp);
@@ -1450,16 +2010,16 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
uint32_t adder;
if (x < 0) {
- temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
+ temp |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
x = -x;
}
if (y < 0) {
- temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
+ temp |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
y = -y;
}
- temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
- temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
+ temp |= x << CURSOR_X_SHIFT;
+ temp |= y << CURSOR_Y_SHIFT;
adder = intel_crtc->cursor_addr;
I915_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
@@ -1576,6 +2136,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output,
}
encoder->crtc = crtc;
+ intel_output->base.encoder = encoder;
intel_output->load_detect_temp = true;
intel_crtc = to_intel_crtc(crtc);
@@ -1611,6 +2172,7 @@ void intel_release_load_detect_pipe(struct intel_output *intel_output, int dpms_
if (intel_output->load_detect_temp) {
encoder->crtc = NULL;
+ intel_output->base.encoder = NULL;
intel_output->load_detect_temp = false;
crtc->enabled = drm_helper_crtc_in_use(crtc);
drm_helper_disable_unused_functions(dev);
@@ -1748,6 +2310,8 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ if (intel_crtc->mode_set.mode)
+ drm_mode_destroy(crtc->dev, intel_crtc->mode_set.mode);
drm_crtc_cleanup(crtc);
kfree(intel_crtc);
}
@@ -1874,7 +2438,24 @@ static void intel_setup_outputs(struct drm_device *dev)
if (IS_MOBILE(dev) && !IS_I830(dev))
intel_lvds_init(dev);
- if (IS_I9XX(dev)) {
+ if (IS_IGDNG(dev)) {
+ int found;
+
+ if (I915_READ(HDMIB) & PORT_DETECTED) {
+ /* check SDVOB */
+ /* found = intel_sdvo_init(dev, HDMIB); */
+ found = 0;
+ if (!found)
+ intel_hdmi_init(dev, HDMIB);
+ }
+
+ if (I915_READ(HDMIC) & PORT_DETECTED)
+ intel_hdmi_init(dev, HDMIC);
+
+ if (I915_READ(HDMID) & PORT_DETECTED)
+ intel_hdmi_init(dev, HDMID);
+
+ } else if (IS_I9XX(dev)) {
int found;
u32 reg;
@@ -1898,7 +2479,7 @@ static void intel_setup_outputs(struct drm_device *dev)
} else
intel_dvo_init(dev);
- if (IS_I9XX(dev) && IS_MOBILE(dev))
+ if (IS_I9XX(dev) && IS_MOBILE(dev) && !IS_IGDNG(dev))
intel_tv_init(dev);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 8b8d6e65cd3..1ee3007d6ec 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -316,6 +316,7 @@ static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
};
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
.save = intel_dvo_save,
.restore = intel_dvo_restore,
.detect = intel_dvo_detect,
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index e4652dcdd9b..8e28e5993df 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -207,7 +207,7 @@ static int intelfb_set_par(struct fb_info *info)
if (var->pixclock != -1) {
- DRM_ERROR("PIXEL CLCOK SET\n");
+ DRM_ERROR("PIXEL CLOCK SET\n");
return -EINVAL;
} else {
struct drm_crtc *crtc;
@@ -504,6 +504,14 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
info->fbops = &intelfb_ops;
info->fix.line_length = fb->pitch;
+
+ /* setup aperture base/size for vesafb takeover */
+ info->aperture_base = dev->mode_config.fb_base;
+ if (IS_I9XX(dev))
+ info->aperture_size = pci_resource_len(dev->pdev, 2);
+ else
+ info->aperture_size = pci_resource_len(dev->pdev, 0);
+
info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset;
info->fix.smem_len = size;
@@ -674,8 +682,12 @@ static int intelfb_multi_fb_probe_crtc(struct drm_device *dev, struct drm_crtc *
par->crtc_ids[0] = crtc->base.id;
modeset->num_connectors = conn_count;
- if (modeset->mode != modeset->crtc->desired_mode)
- modeset->mode = modeset->crtc->desired_mode;
+ if (modeset->crtc->desired_mode) {
+ if (modeset->mode)
+ drm_mode_destroy(dev, modeset->mode);
+ modeset->mode = drm_mode_duplicate(dev,
+ modeset->crtc->desired_mode);
+ }
par->crtc_count = 1;
@@ -824,8 +836,12 @@ static int intelfb_single_fb_probe(struct drm_device *dev)
par->crtc_ids[crtc_count++] = crtc->base.id;
modeset->num_connectors = conn_count;
- if (modeset->mode != modeset->crtc->desired_mode)
- modeset->mode = modeset->crtc->desired_mode;
+ if (modeset->crtc->desired_mode) {
+ if (modeset->mode)
+ drm_mode_destroy(dev, modeset->mode);
+ modeset->mode = drm_mode_duplicate(dev,
+ modeset->crtc->desired_mode);
+ }
}
par->crtc_count = crtc_count;
@@ -857,9 +873,15 @@ void intelfb_restore(void)
drm_crtc_helper_set_config(&kernelfb_mode);
}
+static void intelfb_restore_work_fn(struct work_struct *ignored)
+{
+ intelfb_restore();
+}
+static DECLARE_WORK(intelfb_restore_work, intelfb_restore_work_fn);
+
static void intelfb_sysrq(int dummy1, struct tty_struct *dummy3)
{
- intelfb_restore();
+ schedule_work(&intelfb_restore_work);
}
static struct sysrq_key_op sysrq_intelfb_restore_op = {
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index d0983bb93a1..4ea2a651b92 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -56,7 +56,8 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
sdvox = SDVO_ENCODING_HDMI |
SDVO_BORDER_ENABLE |
SDVO_VSYNC_ACTIVE_HIGH |
- SDVO_HSYNC_ACTIVE_HIGH;
+ SDVO_HSYNC_ACTIVE_HIGH |
+ SDVO_NULL_PACKETS_DURING_VSYNC;
if (hdmi_priv->has_hdmi_sink)
sdvox |= SDVO_AUDIO_ENABLE;
@@ -145,6 +146,22 @@ intel_hdmi_sink_detect(struct drm_connector *connector)
}
static enum drm_connector_status
+igdng_hdmi_detect(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
+
+ /* FIXME hotplug detect */
+
+ hdmi_priv->has_hdmi_sink = false;
+ intel_hdmi_sink_detect(connector);
+ if (hdmi_priv->has_hdmi_sink)
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
+}
+
+static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
@@ -153,6 +170,9 @@ intel_hdmi_detect(struct drm_connector *connector)
struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
u32 temp, bit;
+ if (IS_IGDNG(dev))
+ return igdng_hdmi_detect(connector);
+
temp = I915_READ(PORT_HOTPLUG_EN);
switch (hdmi_priv->sdvox_reg) {
@@ -219,6 +239,7 @@ static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
};
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
.save = intel_hdmi_save,
.restore = intel_hdmi_restore,
.detect = intel_hdmi_detect,
@@ -268,8 +289,17 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
/* Set up the DDC bus. */
if (sdvox_reg == SDVOB)
intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
- else
+ else if (sdvox_reg == SDVOC)
intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
+ else if (sdvox_reg == HDMIB)
+ intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
+ "HDMIB");
+ else if (sdvox_reg == HDMIC)
+ intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
+ "HDMIC");
+ else if (sdvox_reg == HDMID)
+ intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
+ "HDMID");
if (!intel_output->ddc_bus)
goto err_connector;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 439a8651499..f073ed8432e 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -37,6 +37,8 @@
#include "i915_drm.h"
#include "i915_drv.h"
+#define I915_LVDS "i915_lvds"
+
/**
* Sets the backlight level.
*
@@ -45,10 +47,15 @@
static void intel_lvds_set_backlight(struct drm_device *dev, int level)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 blc_pwm_ctl;
+ u32 blc_pwm_ctl, reg;
+
+ if (IS_IGDNG(dev))
+ reg = BLC_PWM_CPU_CTL;
+ else
+ reg = BLC_PWM_CTL;
- blc_pwm_ctl = I915_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- I915_WRITE(BLC_PWM_CTL, (blc_pwm_ctl |
+ blc_pwm_ctl = I915_READ(reg) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ I915_WRITE(reg, (blc_pwm_ctl |
(level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
}
@@ -58,8 +65,14 @@ static void intel_lvds_set_backlight(struct drm_device *dev, int level)
static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg;
+
+ if (IS_IGDNG(dev))
+ reg = BLC_PWM_PCH_CTL2;
+ else
+ reg = BLC_PWM_CTL;
- return ((I915_READ(BLC_PWM_CTL) & BACKLIGHT_MODULATION_FREQ_MASK) >>
+ return ((I915_READ(reg) & BACKLIGHT_MODULATION_FREQ_MASK) >>
BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
}
@@ -69,23 +82,31 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
static void intel_lvds_set_power(struct drm_device *dev, bool on)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp_status;
+ u32 pp_status, ctl_reg, status_reg;
+
+ if (IS_IGDNG(dev)) {
+ ctl_reg = PCH_PP_CONTROL;
+ status_reg = PCH_PP_STATUS;
+ } else {
+ ctl_reg = PP_CONTROL;
+ status_reg = PP_STATUS;
+ }
if (on) {
- I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
+ I915_WRITE(ctl_reg, I915_READ(ctl_reg) |
POWER_TARGET_ON);
do {
- pp_status = I915_READ(PP_STATUS);
+ pp_status = I915_READ(status_reg);
} while ((pp_status & PP_ON) == 0);
intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle);
} else {
intel_lvds_set_backlight(dev, 0);
- I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) &
+ I915_WRITE(ctl_reg, I915_READ(ctl_reg) &
~POWER_TARGET_ON);
do {
- pp_status = I915_READ(PP_STATUS);
+ pp_status = I915_READ(status_reg);
} while (pp_status & PP_ON);
}
}
@@ -106,12 +127,28 @@ static void intel_lvds_save(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
+ u32 pwm_ctl_reg;
+
+ if (IS_IGDNG(dev)) {
+ pp_on_reg = PCH_PP_ON_DELAYS;
+ pp_off_reg = PCH_PP_OFF_DELAYS;
+ pp_ctl_reg = PCH_PP_CONTROL;
+ pp_div_reg = PCH_PP_DIVISOR;
+ pwm_ctl_reg = BLC_PWM_CPU_CTL;
+ } else {
+ pp_on_reg = PP_ON_DELAYS;
+ pp_off_reg = PP_OFF_DELAYS;
+ pp_ctl_reg = PP_CONTROL;
+ pp_div_reg = PP_DIVISOR;
+ pwm_ctl_reg = BLC_PWM_CTL;
+ }
- dev_priv->savePP_ON = I915_READ(PP_ON_DELAYS);
- dev_priv->savePP_OFF = I915_READ(PP_OFF_DELAYS);
- dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
- dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
- dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+ dev_priv->savePP_ON = I915_READ(pp_on_reg);
+ dev_priv->savePP_OFF = I915_READ(pp_off_reg);
+ dev_priv->savePP_CONTROL = I915_READ(pp_ctl_reg);
+ dev_priv->savePP_DIVISOR = I915_READ(pp_div_reg);
+ dev_priv->saveBLC_PWM_CTL = I915_READ(pwm_ctl_reg);
dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
BACKLIGHT_DUTY_CYCLE_MASK);
@@ -127,12 +164,28 @@ static void intel_lvds_restore(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
+ u32 pwm_ctl_reg;
+
+ if (IS_IGDNG(dev)) {
+ pp_on_reg = PCH_PP_ON_DELAYS;
+ pp_off_reg = PCH_PP_OFF_DELAYS;
+ pp_ctl_reg = PCH_PP_CONTROL;
+ pp_div_reg = PCH_PP_DIVISOR;
+ pwm_ctl_reg = BLC_PWM_CPU_CTL;
+ } else {
+ pp_on_reg = PP_ON_DELAYS;
+ pp_off_reg = PP_OFF_DELAYS;
+ pp_ctl_reg = PP_CONTROL;
+ pp_div_reg = PP_DIVISOR;
+ pwm_ctl_reg = BLC_PWM_CTL;
+ }
- I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
- I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON);
- I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF);
- I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
- I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
+ I915_WRITE(pwm_ctl_reg, dev_priv->saveBLC_PWM_CTL);
+ I915_WRITE(pp_on_reg, dev_priv->savePP_ON);
+ I915_WRITE(pp_off_reg, dev_priv->savePP_OFF);
+ I915_WRITE(pp_div_reg, dev_priv->savePP_DIVISOR);
+ I915_WRITE(pp_ctl_reg, dev_priv->savePP_CONTROL);
if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
intel_lvds_set_power(dev, true);
else
@@ -216,8 +269,14 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg;
+
+ if (IS_IGDNG(dev))
+ reg = BLC_PWM_CPU_CTL;
+ else
+ reg = BLC_PWM_CTL;
- dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+ dev_priv->saveBLC_PWM_CTL = I915_READ(reg);
dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
BACKLIGHT_DUTY_CYCLE_MASK);
@@ -251,6 +310,10 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
* settings.
*/
+ /* No panel fitting yet, fixme */
+ if (IS_IGDNG(dev))
+ return;
+
/*
* Enable automatic panel scaling so that non-native modes fill the
* screen. Should be enabled before the pipe is enabled, according to
@@ -343,11 +406,6 @@ static int intel_lvds_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t value)
{
- struct drm_device *dev = connector->dev;
-
- if (property == dev->mode_config.dpms_property && connector->encoder)
- intel_lvds_dpms(connector->encoder, (uint32_t)(value & 0xf));
-
return 0;
}
@@ -366,6 +424,7 @@ static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs
};
static const struct drm_connector_funcs intel_lvds_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
.save = intel_lvds_save,
.restore = intel_lvds_restore,
.detect = intel_lvds_detect,
@@ -386,12 +445,13 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
{
- DRM_DEBUG("Skipping LVDS initialization for %s\n", id->ident);
+ DRM_DEBUG_KMS(I915_LVDS,
+ "Skipping LVDS initialization for %s\n", id->ident);
return 1;
}
/* These systems claim to have LVDS, but really don't */
-static const struct dmi_system_id __initdata intel_no_lvds[] = {
+static const struct dmi_system_id intel_no_lvds[] = {
{
.callback = intel_no_lvds_dmi_callback,
.ident = "Apple Mac Mini (Core series)",
@@ -424,8 +484,21 @@ static const struct dmi_system_id __initdata intel_no_lvds[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Studio Hybrid 140g"),
},
},
-
- /* FIXME: add a check for the Aopen Mini PC */
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "AOpen Mini PC",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "AOpen"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "i965GMx-IF"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Aopen i945GTt-VFA",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
+ },
+ },
{ } /* terminating entry */
};
@@ -446,12 +519,18 @@ void intel_lvds_init(struct drm_device *dev)
struct drm_display_mode *scan; /* *modes, *bios_mode; */
struct drm_crtc *crtc;
u32 lvds;
- int pipe;
+ int pipe, gpio = GPIOC;
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds))
return;
+ if (IS_IGDNG(dev)) {
+ if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
+ return;
+ gpio = PCH_GPIOC;
+ }
+
intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
if (!intel_output) {
return;
@@ -486,7 +565,7 @@ void intel_lvds_init(struct drm_device *dev)
*/
/* Set up the DDC bus. */
- intel_output->ddc_bus = intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
+ intel_output->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C");
if (!intel_output->ddc_bus) {
dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
"failed.\n");
@@ -511,10 +590,10 @@ void intel_lvds_init(struct drm_device *dev)
}
/* Failed to get EDID, what about VBT? */
- if (dev_priv->vbt_mode) {
+ if (dev_priv->lfp_lvds_vbt_mode) {
mutex_lock(&dev->mode_config.mutex);
dev_priv->panel_fixed_mode =
- drm_mode_duplicate(dev, dev_priv->vbt_mode);
+ drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
mutex_unlock(&dev->mode_config.mutex);
if (dev_priv->panel_fixed_mode) {
dev_priv->panel_fixed_mode->type |=
@@ -528,6 +607,11 @@ void intel_lvds_init(struct drm_device *dev)
* on. If so, assume that whatever is currently programmed is the
* correct mode.
*/
+
+ /* IGDNG: FIXME if still fail, not try pipe mode now */
+ if (IS_IGDNG(dev))
+ goto failed;
+
lvds = I915_READ(LVDS);
pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
crtc = intel_get_crtc_from_pipe(dev, pipe);
@@ -546,11 +630,22 @@ void intel_lvds_init(struct drm_device *dev)
goto failed;
out:
+ if (IS_IGDNG(dev)) {
+ u32 pwm;
+ /* make sure PWM is enabled */
+ pwm = I915_READ(BLC_PWM_CPU_CTL2);
+ pwm |= (PWM_ENABLE | PWM_PIPE_B);
+ I915_WRITE(BLC_PWM_CPU_CTL2, pwm);
+
+ pwm = I915_READ(BLC_PWM_PCH_CTL1);
+ pwm |= PWM_PCH_ENABLE;
+ I915_WRITE(BLC_PWM_PCH_CTL1, pwm);
+ }
drm_sysfs_connector_add(connector);
return;
failed:
- DRM_DEBUG("No LVDS modes found, disabling.\n");
+ DRM_DEBUG_KMS(I915_LVDS, "No LVDS modes found, disabling.\n");
if (intel_output->ddc_bus)
intel_i2c_destroy(intel_output->ddc_bus);
drm_connector_cleanup(connector);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 9913651c1e1..9a00adb3a50 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -36,7 +36,7 @@
#include "intel_sdvo_regs.h"
#undef SDVO_DEBUG
-
+#define I915_SDVO "i915_sdvo"
struct intel_sdvo_priv {
struct intel_i2c_chan *i2c_bus;
int slaveaddr;
@@ -69,6 +69,10 @@ struct intel_sdvo_priv {
* This is set if we treat the device as HDMI, instead of DVI.
*/
bool is_hdmi;
+ /**
+ * This is set if we detect output of sdvo device as LVDS.
+ */
+ bool is_lvds;
/**
* Returned SDTV resolutions allowed for the current format, if the
@@ -273,20 +277,21 @@ static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
int i;
- printk(KERN_DEBUG "%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd);
+ DRM_DEBUG_KMS(I915_SDVO, "%s: W: %02X ",
+ SDVO_NAME(sdvo_priv), cmd);
for (i = 0; i < args_len; i++)
- printk(KERN_DEBUG "%02X ", ((u8 *)args)[i]);
+ DRM_LOG_KMS("%02X ", ((u8 *)args)[i]);
for (; i < 8; i++)
- printk(KERN_DEBUG " ");
+ DRM_LOG_KMS(" ");
for (i = 0; i < sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]); i++) {
if (cmd == sdvo_cmd_names[i].cmd) {
- printk(KERN_DEBUG "(%s)", sdvo_cmd_names[i].name);
+ DRM_LOG_KMS("(%s)", sdvo_cmd_names[i].name);
break;
}
}
if (i == sizeof(sdvo_cmd_names)/ sizeof(sdvo_cmd_names[0]))
- printk(KERN_DEBUG "(%02X)", cmd);
- printk(KERN_DEBUG "\n");
+ DRM_LOG_KMS("(%02X)", cmd);
+ DRM_LOG_KMS("\n");
}
#else
#define intel_sdvo_debug_write(o, c, a, l)
@@ -325,16 +330,16 @@ static void intel_sdvo_debug_response(struct intel_output *intel_output,
struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
int i;
- printk(KERN_DEBUG "%s: R: ", SDVO_NAME(sdvo_priv));
+ DRM_DEBUG_KMS(I915_SDVO, "%s: R: ", SDVO_NAME(sdvo_priv));
for (i = 0; i < response_len; i++)
- printk(KERN_DEBUG "%02X ", ((u8 *)response)[i]);
+ DRM_LOG_KMS("%02X ", ((u8 *)response)[i]);
for (; i < 8; i++)
- printk(KERN_DEBUG " ");
+ DRM_LOG_KMS(" ");
if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
- printk(KERN_DEBUG "(%s)", cmd_status_names[status]);
+ DRM_LOG_KMS("(%s)", cmd_status_names[status]);
else
- printk(KERN_DEBUG "(??? %d)", status);
- printk(KERN_DEBUG "\n");
+ DRM_LOG_KMS("(??? %d)", status);
+ DRM_LOG_KMS("\n");
}
#else
#define intel_sdvo_debug_response(o, r, l, s)
@@ -1398,10 +1403,8 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
{
struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
/* set the bus switch and get the modes */
- intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
intel_ddc_get_modes(intel_output);
#if 0
@@ -1543,6 +1546,37 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
}
}
+static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+
+ /*
+ * Attempt to get the mode list from DDC.
+ * Assume that the preferred modes are
+ * arranged in priority order.
+ */
+ /* set the bus switch and get the modes */
+ intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
+ intel_ddc_get_modes(intel_output);
+ if (list_empty(&connector->probed_modes) == false)
+ return;
+
+ /* Fetch modes from VBT */
+ if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
+ struct drm_display_mode *newmode;
+ newmode = drm_mode_duplicate(connector->dev,
+ dev_priv->sdvo_lvds_vbt_mode);
+ if (newmode != NULL) {
+ /* Guarantee the mode is preferred */
+ newmode->type = (DRM_MODE_TYPE_PREFERRED |
+ DRM_MODE_TYPE_DRIVER);
+ drm_mode_probed_add(connector, newmode);
+ }
+ }
+}
+
static int intel_sdvo_get_modes(struct drm_connector *connector)
{
struct intel_output *output = to_intel_output(connector);
@@ -1550,6 +1584,8 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
if (sdvo_priv->is_tv)
intel_sdvo_get_tv_modes(connector);
+ else if (sdvo_priv->is_lvds == true)
+ intel_sdvo_get_lvds_modes(connector);
else
intel_sdvo_get_ddc_modes(connector);
@@ -1564,6 +1600,9 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
if (intel_output->i2c_bus)
intel_i2c_destroy(intel_output->i2c_bus);
+ if (intel_output->ddc_bus)
+ intel_i2c_destroy(intel_output->ddc_bus);
+
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(intel_output);
@@ -1578,6 +1617,7 @@ static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
};
static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
.save = intel_sdvo_save,
.restore = intel_sdvo_restore,
.detect = intel_sdvo_detect,
@@ -1660,33 +1700,107 @@ intel_sdvo_get_digital_encoding_mode(struct intel_output *output)
return true;
}
+static struct intel_output *
+intel_sdvo_chan_to_intel_output(struct intel_i2c_chan *chan)
+{
+ struct drm_device *dev = chan->drm_dev;
+ struct drm_connector *connector;
+ struct intel_output *intel_output = NULL;
+
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list, head) {
+ if (to_intel_output(connector)->ddc_bus == chan) {
+ intel_output = to_intel_output(connector);
+ break;
+ }
+ }
+ return intel_output;
+}
+
+static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg msgs[], int num)
+{
+ struct intel_output *intel_output;
+ struct intel_sdvo_priv *sdvo_priv;
+ struct i2c_algo_bit_data *algo_data;
+ struct i2c_algorithm *algo;
+
+ algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data;
+ intel_output =
+ intel_sdvo_chan_to_intel_output(
+ (struct intel_i2c_chan *)(algo_data->data));
+ if (intel_output == NULL)
+ return -EINVAL;
+
+ sdvo_priv = intel_output->dev_priv;
+ algo = (struct i2c_algorithm *)intel_output->i2c_bus->adapter.algo;
+
+ intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
+ return algo->master_xfer(i2c_adap, msgs, num);
+}
+
+static struct i2c_algorithm intel_sdvo_i2c_bit_algo = {
+ .master_xfer = intel_sdvo_master_xfer,
+};
+
+static u8
+intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct sdvo_device_mapping *my_mapping, *other_mapping;
+
+ if (output_device == SDVOB) {
+ my_mapping = &dev_priv->sdvo_mappings[0];
+ other_mapping = &dev_priv->sdvo_mappings[1];
+ } else {
+ my_mapping = &dev_priv->sdvo_mappings[1];
+ other_mapping = &dev_priv->sdvo_mappings[0];
+ }
+
+ /* If the BIOS described our SDVO device, take advantage of it. */
+ if (my_mapping->slave_addr)
+ return my_mapping->slave_addr;
+
+ /* If the BIOS only described a different SDVO device, use the
+ * address that it isn't using.
+ */
+ if (other_mapping->slave_addr) {
+ if (other_mapping->slave_addr == 0x70)
+ return 0x72;
+ else
+ return 0x70;
+ }
+
+ /* No SDVO device info is found for another DVO port,
+ * so use mapping assumption we had before BIOS parsing.
+ */
+ if (output_device == SDVOB)
+ return 0x70;
+ else
+ return 0x72;
+}
+
bool intel_sdvo_init(struct drm_device *dev, int output_device)
{
struct drm_connector *connector;
struct intel_output *intel_output;
struct intel_sdvo_priv *sdvo_priv;
struct intel_i2c_chan *i2cbus = NULL;
+ struct intel_i2c_chan *ddcbus = NULL;
int connector_type;
u8 ch[0x40];
int i;
int encoder_type, output_id;
+ u8 slave_addr;
intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
if (!intel_output) {
return false;
}
- connector = &intel_output->base;
-
- drm_connector_init(dev, connector, &intel_sdvo_connector_funcs,
- DRM_MODE_CONNECTOR_Unknown);
- drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1);
intel_output->type = INTEL_OUTPUT_SDVO;
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
-
/* setup the DDC bus. */
if (output_device == SDVOB)
i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
@@ -1694,32 +1808,47 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
if (!i2cbus)
- goto err_connector;
+ goto err_inteloutput;
+ slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
sdvo_priv->i2c_bus = i2cbus;
if (output_device == SDVOB) {
output_id = 1;
- sdvo_priv->i2c_bus->slave_addr = 0x38;
} else {
output_id = 2;
- sdvo_priv->i2c_bus->slave_addr = 0x39;
}
-
+ sdvo_priv->i2c_bus->slave_addr = slave_addr >> 1;
sdvo_priv->output_device = output_device;
intel_output->i2c_bus = i2cbus;
intel_output->dev_priv = sdvo_priv;
-
/* Read the regs to test if we can talk to the device */
for (i = 0; i < 0x40; i++) {
if (!intel_sdvo_read_byte(intel_output, i, &ch[i])) {
- DRM_DEBUG("No SDVO device found on SDVO%c\n",
- output_device == SDVOB ? 'B' : 'C');
+ DRM_DEBUG_KMS(I915_SDVO,
+ "No SDVO device found on SDVO%c\n",
+ output_device == SDVOB ? 'B' : 'C');
goto err_i2c;
}
}
+ /* setup the DDC bus. */
+ if (output_device == SDVOB)
+ ddcbus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
+ else
+ ddcbus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
+
+ if (ddcbus == NULL)
+ goto err_i2c;
+
+ intel_sdvo_i2c_bit_algo.functionality =
+ intel_output->i2c_bus->adapter.algo->functionality;
+ ddcbus->adapter.algo = &intel_sdvo_i2c_bit_algo;
+ intel_output->ddc_bus = ddcbus;
+
+ /* In defaut case sdvo lvds is false */
+ sdvo_priv->is_lvds = false;
intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps);
if (sdvo_priv->caps.output_flags &
@@ -1729,7 +1858,6 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
else
sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1;
- connector->display_info.subpixel_order = SubPixelHorizontalRGB;
encoder_type = DRM_MODE_ENCODER_TMDS;
connector_type = DRM_MODE_CONNECTOR_DVID;
@@ -1747,7 +1875,6 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_SVID0)
{
sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0;
- connector->display_info.subpixel_order = SubPixelHorizontalRGB;
encoder_type = DRM_MODE_ENCODER_TVDAC;
connector_type = DRM_MODE_CONNECTOR_SVIDEO;
sdvo_priv->is_tv = true;
@@ -1756,30 +1883,28 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0)
{
sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0;
- connector->display_info.subpixel_order = SubPixelHorizontalRGB;
encoder_type = DRM_MODE_ENCODER_DAC;
connector_type = DRM_MODE_CONNECTOR_VGA;
}
else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1)
{
sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1;
- connector->display_info.subpixel_order = SubPixelHorizontalRGB;
encoder_type = DRM_MODE_ENCODER_DAC;
connector_type = DRM_MODE_CONNECTOR_VGA;
}
else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS0)
{
sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
- connector->display_info.subpixel_order = SubPixelHorizontalRGB;
encoder_type = DRM_MODE_ENCODER_LVDS;
connector_type = DRM_MODE_CONNECTOR_LVDS;
+ sdvo_priv->is_lvds = true;
}
else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS1)
{
sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1;
- connector->display_info.subpixel_order = SubPixelHorizontalRGB;
encoder_type = DRM_MODE_ENCODER_LVDS;
connector_type = DRM_MODE_CONNECTOR_LVDS;
+ sdvo_priv->is_lvds = true;
}
else
{
@@ -1787,17 +1912,25 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
sdvo_priv->controlled_output = 0;
memcpy (bytes, &sdvo_priv->caps.output_flags, 2);
- DRM_DEBUG("%s: Unknown SDVO output type (0x%02x%02x)\n",
- SDVO_NAME(sdvo_priv),
- bytes[0], bytes[1]);
+ DRM_DEBUG_KMS(I915_SDVO,
+ "%s: Unknown SDVO output type (0x%02x%02x)\n",
+ SDVO_NAME(sdvo_priv),
+ bytes[0], bytes[1]);
encoder_type = DRM_MODE_ENCODER_NONE;
connector_type = DRM_MODE_CONNECTOR_Unknown;
goto err_i2c;
}
+ connector = &intel_output->base;
+ drm_connector_init(dev, connector, &intel_sdvo_connector_funcs,
+ connector_type);
+ drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+
drm_encoder_init(dev, &intel_output->enc, &intel_sdvo_enc_funcs, encoder_type);
drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs);
- connector->connector_type = connector_type;
drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
drm_sysfs_connector_add(connector);
@@ -1812,31 +1945,30 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
&sdvo_priv->pixel_clock_max);
- DRM_DEBUG("%s device VID/DID: %02X:%02X.%02X, "
- "clock range %dMHz - %dMHz, "
- "input 1: %c, input 2: %c, "
- "output 1: %c, output 2: %c\n",
- SDVO_NAME(sdvo_priv),
- sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id,
- sdvo_priv->caps.device_rev_id,
- sdvo_priv->pixel_clock_min / 1000,
- sdvo_priv->pixel_clock_max / 1000,
- (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
- (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
- /* check currently supported outputs */
- sdvo_priv->caps.output_flags &
+ DRM_DEBUG_KMS(I915_SDVO, "%s device VID/DID: %02X:%02X.%02X, "
+ "clock range %dMHz - %dMHz, "
+ "input 1: %c, input 2: %c, "
+ "output 1: %c, output 2: %c\n",
+ SDVO_NAME(sdvo_priv),
+ sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id,
+ sdvo_priv->caps.device_rev_id,
+ sdvo_priv->pixel_clock_min / 1000,
+ sdvo_priv->pixel_clock_max / 1000,
+ (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
+ (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
+ /* check currently supported outputs */
+ sdvo_priv->caps.output_flags &
(SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
- sdvo_priv->caps.output_flags &
+ sdvo_priv->caps.output_flags &
(SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
- intel_output->ddc_bus = i2cbus;
-
return true;
err_i2c:
+ if (ddcbus != NULL)
+ intel_i2c_destroy(intel_output->ddc_bus);
intel_i2c_destroy(intel_output->i2c_bus);
-err_connector:
- drm_connector_cleanup(connector);
+err_inteloutput:
kfree(intel_output);
return false;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index d2c32983242..50d7ed70b33 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1392,6 +1392,9 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
tv_ctl &= ~TV_TEST_MODE_MASK;
tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
tv_dac &= ~TVDAC_SENSE_MASK;
+ tv_dac &= ~DAC_A_MASK;
+ tv_dac &= ~DAC_B_MASK;
+ tv_dac &= ~DAC_C_MASK;
tv_dac |= (TVDAC_STATE_CHG_EN |
TVDAC_A_SENSE_CTL |
TVDAC_B_SENSE_CTL |
@@ -1626,6 +1629,7 @@ static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
};
static const struct drm_connector_funcs intel_tv_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
.save = intel_tv_save,
.restore = intel_tv_restore,
.detect = intel_tv_detect,
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
new file mode 100644
index 00000000000..2168d67f09a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -0,0 +1,34 @@
+config DRM_RADEON_KMS
+ bool "Enable modesetting on radeon by default"
+ depends on DRM_RADEON
+ select DRM_TTM
+ help
+ Choose this option if you want kernel modesetting enabled by default,
+ and you have a new enough userspace to support this. Running old
+ userspaces with this enabled will cause pain.
+
+ When kernel modesetting is enabled the IOCTL of radeon/drm
+ driver are considered as invalid and an error message is printed
+ in the log and they return failure.
+
+ KMS enabled userspace will use new API to talk with the radeon/drm
+ driver. The new API provide functions to create/destroy/share/mmap
+ buffer object which are then managed by the kernel memory manager
+ (here TTM). In order to submit command to the GPU the userspace
+ provide a buffer holding the command stream, along this buffer
+ userspace have to provide a list of buffer object used by the
+ command stream. The kernel radeon driver will then place buffer
+ in GPU accessible memory and will update command stream to reflect
+ the position of the different buffers.
+
+ The kernel will also perform security check on command stream
+ provided by the user, we want to catch and forbid any illegal use
+ of the GPU such as DMA into random system memory or into memory
+ not owned by the process supplying the command stream. This part
+ of the code is still incomplete and this why we propose that patch
+ as a staging driver addition, future security might forbid current
+ experimental userspace to run.
+
+ This code support the following hardware : R1XX,R2XX,R3XX,R4XX,R5XX
+ (radeon up to X1950). Works is underway to provide support for R6XX,
+ R7XX and newer hardware (radeon from HD2XXX to HD4XXX).
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 52ce439a0f2..5fae1e074b4 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -3,7 +3,17 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ccflags-y := -Iinclude/drm
-radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o r600_cp.o
+radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
+ radeon_irq.o r300_cmdbuf.o r600_cp.o
+
+radeon-$(CONFIG_DRM_RADEON_KMS) += radeon_device.o radeon_kms.o \
+ radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \
+ atom.o radeon_fence.o radeon_ttm.o radeon_object.o radeon_gart.o \
+ radeon_legacy_crtc.o radeon_legacy_encoders.o radeon_connectors.o \
+ radeon_encoders.o radeon_display.o radeon_cursor.o radeon_i2c.o \
+ radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \
+ radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
+ rs400.o rs600.o rs690.o rv515.o r520.o r600.o rs780.o rv770.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
diff --git a/drivers/gpu/drm/radeon/ObjectID.h b/drivers/gpu/drm/radeon/ObjectID.h
new file mode 100644
index 00000000000..6d0183c61d3
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ObjectID.h
@@ -0,0 +1,578 @@
+/*
+* Copyright 2006-2007 Advanced Micro Devices, Inc.
+*
+* Permission is hereby granted, free of charge, to any person obtaining a
+* copy of this software and associated documentation files (the "Software"),
+* to deal in the Software without restriction, including without limitation
+* the rights to use, copy, modify, merge, publish, distribute, sublicense,
+* and/or sell copies of the Software, and to permit persons to whom the
+* Software is furnished to do so, subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be included in
+* all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+* OTHER DEALINGS IN THE SOFTWARE.
+*/
+/* based on stg/asic_reg/drivers/inc/asic_reg/ObjectID.h ver 23 */
+
+#ifndef _OBJECTID_H
+#define _OBJECTID_H
+
+#if defined(_X86_)
+#pragma pack(1)
+#endif
+
+/****************************************************/
+/* Graphics Object Type Definition */
+/****************************************************/
+#define GRAPH_OBJECT_TYPE_NONE 0x0
+#define GRAPH_OBJECT_TYPE_GPU 0x1
+#define GRAPH_OBJECT_TYPE_ENCODER 0x2
+#define GRAPH_OBJECT_TYPE_CONNECTOR 0x3
+#define GRAPH_OBJECT_TYPE_ROUTER 0x4
+/* deleted */
+
+/****************************************************/
+/* Encoder Object ID Definition */
+/****************************************************/
+#define ENCODER_OBJECT_ID_NONE 0x00
+
+/* Radeon Class Display Hardware */
+#define ENCODER_OBJECT_ID_INTERNAL_LVDS 0x01
+#define ENCODER_OBJECT_ID_INTERNAL_TMDS1 0x02
+#define ENCODER_OBJECT_ID_INTERNAL_TMDS2 0x03
+#define ENCODER_OBJECT_ID_INTERNAL_DAC1 0x04
+#define ENCODER_OBJECT_ID_INTERNAL_DAC2 0x05 /* TV/CV DAC */
+#define ENCODER_OBJECT_ID_INTERNAL_SDVOA 0x06
+#define ENCODER_OBJECT_ID_INTERNAL_SDVOB 0x07
+
+/* External Third Party Encoders */
+#define ENCODER_OBJECT_ID_SI170B 0x08
+#define ENCODER_OBJECT_ID_CH7303 0x09
+#define ENCODER_OBJECT_ID_CH7301 0x0A
+#define ENCODER_OBJECT_ID_INTERNAL_DVO1 0x0B /* This belongs to Radeon Class Display Hardware */
+#define ENCODER_OBJECT_ID_EXTERNAL_SDVOA 0x0C
+#define ENCODER_OBJECT_ID_EXTERNAL_SDVOB 0x0D
+#define ENCODER_OBJECT_ID_TITFP513 0x0E
+#define ENCODER_OBJECT_ID_INTERNAL_LVTM1 0x0F /* not used for Radeon */
+#define ENCODER_OBJECT_ID_VT1623 0x10
+#define ENCODER_OBJECT_ID_HDMI_SI1930 0x11
+#define ENCODER_OBJECT_ID_HDMI_INTERNAL 0x12
+/* Kaleidoscope (KLDSCP) Class Display Hardware (internal) */
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 0x15
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 0x16 /* Shared with CV/TV and CRT */
+#define ENCODER_OBJECT_ID_SI178 0X17 /* External TMDS (dual link, no HDCP.) */
+#define ENCODER_OBJECT_ID_MVPU_FPGA 0x18 /* MVPU FPGA chip */
+#define ENCODER_OBJECT_ID_INTERNAL_DDI 0x19
+#define ENCODER_OBJECT_ID_VT1625 0x1A
+#define ENCODER_OBJECT_ID_HDMI_SI1932 0x1B
+#define ENCODER_OBJECT_ID_DP_AN9801 0x1C
+#define ENCODER_OBJECT_ID_DP_DP501 0x1D
+#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY 0x1E
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA 0x1F
+#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 0x20
+#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 0x21
+
+#define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF
+
+/****************************************************/
+/* Connector Object ID Definition */
+/****************************************************/
+#define CONNECTOR_OBJECT_ID_NONE 0x00
+#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I 0x01
+#define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I 0x02
+#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D 0x03
+#define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D 0x04
+#define CONNECTOR_OBJECT_ID_VGA 0x05
+#define CONNECTOR_OBJECT_ID_COMPOSITE 0x06
+#define CONNECTOR_OBJECT_ID_SVIDEO 0x07
+#define CONNECTOR_OBJECT_ID_YPbPr 0x08
+#define CONNECTOR_OBJECT_ID_D_CONNECTOR 0x09
+#define CONNECTOR_OBJECT_ID_9PIN_DIN 0x0A /* Supports both CV & TV */
+#define CONNECTOR_OBJECT_ID_SCART 0x0B
+#define CONNECTOR_OBJECT_ID_HDMI_TYPE_A 0x0C
+#define CONNECTOR_OBJECT_ID_HDMI_TYPE_B 0x0D
+#define CONNECTOR_OBJECT_ID_LVDS 0x0E
+#define CONNECTOR_OBJECT_ID_7PIN_DIN 0x0F
+#define CONNECTOR_OBJECT_ID_PCIE_CONNECTOR 0x10
+#define CONNECTOR_OBJECT_ID_CROSSFIRE 0x11
+#define CONNECTOR_OBJECT_ID_HARDCODE_DVI 0x12
+#define CONNECTOR_OBJECT_ID_DISPLAYPORT 0x13
+
+/* deleted */
+
+/****************************************************/
+/* Router Object ID Definition */
+/****************************************************/
+#define ROUTER_OBJECT_ID_NONE 0x00
+#define ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL 0x01
+
+/****************************************************/
+/* Graphics Object ENUM ID Definition */
+/****************************************************/
+#define GRAPH_OBJECT_ENUM_ID1 0x01
+#define GRAPH_OBJECT_ENUM_ID2 0x02
+#define GRAPH_OBJECT_ENUM_ID3 0x03
+#define GRAPH_OBJECT_ENUM_ID4 0x04
+#define GRAPH_OBJECT_ENUM_ID5 0x05
+#define GRAPH_OBJECT_ENUM_ID6 0x06
+
+/****************************************************/
+/* Graphics Object ID Bit definition */
+/****************************************************/
+#define OBJECT_ID_MASK 0x00FF
+#define ENUM_ID_MASK 0x0700
+#define RESERVED1_ID_MASK 0x0800
+#define OBJECT_TYPE_MASK 0x7000
+#define RESERVED2_ID_MASK 0x8000
+
+#define OBJECT_ID_SHIFT 0x00
+#define ENUM_ID_SHIFT 0x08
+#define OBJECT_TYPE_SHIFT 0x0C
+
+/****************************************************/
+/* Graphics Object family definition */
+/****************************************************/
+#define CONSTRUCTOBJECTFAMILYID(GRAPHICS_OBJECT_TYPE, GRAPHICS_OBJECT_ID) \
+ (GRAPHICS_OBJECT_TYPE << OBJECT_TYPE_SHIFT | \
+ GRAPHICS_OBJECT_ID << OBJECT_ID_SHIFT)
+/****************************************************/
+/* GPU Object ID definition - Shared with BIOS */
+/****************************************************/
+#define GPU_ENUM_ID1 (GRAPH_OBJECT_TYPE_GPU << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT)
+
+/****************************************************/
+/* Encoder Object ID definition - Shared with BIOS */
+/****************************************************/
+/*
+#define ENCODER_INTERNAL_LVDS_ENUM_ID1 0x2101
+#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 0x2102
+#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 0x2103
+#define ENCODER_INTERNAL_DAC1_ENUM_ID1 0x2104
+#define ENCODER_INTERNAL_DAC2_ENUM_ID1 0x2105
+#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 0x2106
+#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 0x2107
+#define ENCODER_SIL170B_ENUM_ID1 0x2108
+#define ENCODER_CH7303_ENUM_ID1 0x2109
+#define ENCODER_CH7301_ENUM_ID1 0x210A
+#define ENCODER_INTERNAL_DVO1_ENUM_ID1 0x210B
+#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1 0x210C
+#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1 0x210D
+#define ENCODER_TITFP513_ENUM_ID1 0x210E
+#define ENCODER_INTERNAL_LVTM1_ENUM_ID1 0x210F
+#define ENCODER_VT1623_ENUM_ID1 0x2110
+#define ENCODER_HDMI_SI1930_ENUM_ID1 0x2111
+#define ENCODER_HDMI_INTERNAL_ENUM_ID1 0x2112
+#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 0x2113
+#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 0x2114
+#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 0x2115
+#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 0x2116
+#define ENCODER_SI178_ENUM_ID1 0x2117
+#define ENCODER_MVPU_FPGA_ENUM_ID1 0x2118
+#define ENCODER_INTERNAL_DDI_ENUM_ID1 0x2119
+#define ENCODER_VT1625_ENUM_ID1 0x211A
+#define ENCODER_HDMI_SI1932_ENUM_ID1 0x211B
+#define ENCODER_ENCODER_DP_AN9801_ENUM_ID1 0x211C
+#define ENCODER_DP_DP501_ENUM_ID1 0x211D
+#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 0x211E
+*/
+#define ENCODER_INTERNAL_LVDS_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_LVDS << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_TMDS1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_TMDS2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DAC1_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_DAC1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DAC2_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_DAC2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_SDVOA_ENUM_ID2 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_SDVOB << OBJECT_ID_SHIFT)
+
+#define ENCODER_SIL170B_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_SI170B << OBJECT_ID_SHIFT)
+
+#define ENCODER_CH7303_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_CH7303 << OBJECT_ID_SHIFT)
+
+#define ENCODER_CH7301_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_CH7301 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DVO1_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_DVO1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_EXTERNAL_SDVOA_ENUM_ID2 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_EXTERNAL_SDVOB << OBJECT_ID_SHIFT)
+
+#define ENCODER_TITFP513_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_TITFP513 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_LVTM1_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_LVTM1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_VT1623_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_VT1623 << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_SI1930_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_HDMI_SI1930 << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_INTERNAL_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_HDMI_INTERNAL << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID2 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 << OBJECT_ID_SHIFT) /* Shared with CV/TV and CRT */
+
+#define ENCODER_SI178_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_SI178 << OBJECT_ID_SHIFT)
+
+#define ENCODER_MVPU_FPGA_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_MVPU_FPGA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DDI_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_DDI << OBJECT_ID_SHIFT)
+
+#define ENCODER_VT1625_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_VT1625 << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_SI1932_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_HDMI_SI1932 << OBJECT_ID_SHIFT)
+
+#define ENCODER_DP_DP501_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_DP_DP501 << OBJECT_ID_SHIFT)
+
+#define ENCODER_DP_AN9801_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_DP_AN9801 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY_ENUM_ID2 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_LVTMA_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID2 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID2 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
+
+/****************************************************/
+/* Connector Object ID definition - Shared with BIOS */
+/****************************************************/
+/*
+#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1 0x3101
+#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1 0x3102
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1 0x3103
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 0x3104
+#define CONNECTOR_VGA_ENUM_ID1 0x3105
+#define CONNECTOR_COMPOSITE_ENUM_ID1 0x3106
+#define CONNECTOR_SVIDEO_ENUM_ID1 0x3107
+#define CONNECTOR_YPbPr_ENUM_ID1 0x3108
+#define CONNECTOR_D_CONNECTORE_ENUM_ID1 0x3109
+#define CONNECTOR_9PIN_DIN_ENUM_ID1 0x310A
+#define CONNECTOR_SCART_ENUM_ID1 0x310B
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1 0x310C
+#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 0x310D
+#define CONNECTOR_LVDS_ENUM_ID1 0x310E
+#define CONNECTOR_7PIN_DIN_ENUM_ID1 0x310F
+#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 0x3110
+*/
+#define CONNECTOR_LVDS_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID2 \
+ (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID2 \
+ (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID2 \
+ (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_VGA_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_VGA_ENUM_ID2 \
+ (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_COMPOSITE_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SVIDEO_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_YPbPr_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_D_CONNECTOR_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_9PIN_DIN_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SCART_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_7PIN_DIN_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID2 \
+ (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_CROSSFIRE_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_CROSSFIRE_ENUM_ID2 \
+ (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HARDCODE_DVI_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HARDCODE_DVI_ENUM_ID2 \
+ (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID2 \
+ (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID3 \
+ (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID4 \
+ (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+/****************************************************/
+/* Router Object ID definition - Shared with BIOS */
+/****************************************************/
+#define ROUTER_I2C_EXTENDER_CNTL_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_ROUTER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL << OBJECT_ID_SHIFT)
+
+/* deleted */
+
+/****************************************************/
+/* Object Cap definition - Shared with BIOS */
+/****************************************************/
+#define GRAPHICS_OBJECT_CAP_I2C 0x00000001L
+#define GRAPHICS_OBJECT_CAP_TABLE_ID 0x00000002L
+
+#define GRAPHICS_OBJECT_I2CCOMMAND_TABLE_ID 0x01
+#define GRAPHICS_OBJECT_HOTPLUGDETECTIONINTERUPT_TABLE_ID 0x02
+#define GRAPHICS_OBJECT_ENCODER_OUTPUT_PROTECTION_TABLE_ID 0x03
+
+#if defined(_X86_)
+#pragma pack()
+#endif
+
+#endif /*GRAPHICTYPE */
diff --git a/drivers/gpu/drm/radeon/atom-bits.h b/drivers/gpu/drm/radeon/atom-bits.h
new file mode 100644
index 00000000000..e8fae5c7751
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atom-bits.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Stanislaw Skowronek
+ */
+
+#ifndef ATOM_BITS_H
+#define ATOM_BITS_H
+
+static inline uint8_t get_u8(void *bios, int ptr)
+{
+ return ((unsigned char *)bios)[ptr];
+}
+#define U8(ptr) get_u8(ctx->ctx->bios, (ptr))
+#define CU8(ptr) get_u8(ctx->bios, (ptr))
+static inline uint16_t get_u16(void *bios, int ptr)
+{
+ return get_u8(bios ,ptr)|(((uint16_t)get_u8(bios, ptr+1))<<8);
+}
+#define U16(ptr) get_u16(ctx->ctx->bios, (ptr))
+#define CU16(ptr) get_u16(ctx->bios, (ptr))
+static inline uint32_t get_u32(void *bios, int ptr)
+{
+ return get_u16(bios, ptr)|(((uint32_t)get_u16(bios, ptr+2))<<16);
+}
+#define U32(ptr) get_u32(ctx->ctx->bios, (ptr))
+#define CU32(ptr) get_u32(ctx->bios, (ptr))
+#define CSTR(ptr) (((char *)(ctx->bios))+(ptr))
+
+#endif
diff --git a/drivers/gpu/drm/radeon/atom-names.h b/drivers/gpu/drm/radeon/atom-names.h
new file mode 100644
index 00000000000..6f907a5ffa5
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atom-names.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Stanislaw Skowronek
+ */
+
+#ifndef ATOM_NAMES_H
+#define ATOM_NAMES_H
+
+#include "atom.h"
+
+#ifdef ATOM_DEBUG
+
+#define ATOM_OP_NAMES_CNT 123
+static char *atom_op_names[ATOM_OP_NAMES_CNT] = {
+"RESERVED", "MOVE_REG", "MOVE_PS", "MOVE_WS", "MOVE_FB", "MOVE_PLL",
+"MOVE_MC", "AND_REG", "AND_PS", "AND_WS", "AND_FB", "AND_PLL", "AND_MC",
+"OR_REG", "OR_PS", "OR_WS", "OR_FB", "OR_PLL", "OR_MC", "SHIFT_LEFT_REG",
+"SHIFT_LEFT_PS", "SHIFT_LEFT_WS", "SHIFT_LEFT_FB", "SHIFT_LEFT_PLL",
+"SHIFT_LEFT_MC", "SHIFT_RIGHT_REG", "SHIFT_RIGHT_PS", "SHIFT_RIGHT_WS",
+"SHIFT_RIGHT_FB", "SHIFT_RIGHT_PLL", "SHIFT_RIGHT_MC", "MUL_REG",
+"MUL_PS", "MUL_WS", "MUL_FB", "MUL_PLL", "MUL_MC", "DIV_REG", "DIV_PS",
+"DIV_WS", "DIV_FB", "DIV_PLL", "DIV_MC", "ADD_REG", "ADD_PS", "ADD_WS",
+"ADD_FB", "ADD_PLL", "ADD_MC", "SUB_REG", "SUB_PS", "SUB_WS", "SUB_FB",
+"SUB_PLL", "SUB_MC", "SET_ATI_PORT", "SET_PCI_PORT", "SET_SYS_IO_PORT",
+"SET_REG_BLOCK", "SET_FB_BASE", "COMPARE_REG", "COMPARE_PS",
+"COMPARE_WS", "COMPARE_FB", "COMPARE_PLL", "COMPARE_MC", "SWITCH",
+"JUMP", "JUMP_EQUAL", "JUMP_BELOW", "JUMP_ABOVE", "JUMP_BELOW_OR_EQUAL",
+"JUMP_ABOVE_OR_EQUAL", "JUMP_NOT_EQUAL", "TEST_REG", "TEST_PS", "TEST_WS",
+"TEST_FB", "TEST_PLL", "TEST_MC", "DELAY_MILLISEC", "DELAY_MICROSEC",
+"CALL_TABLE", "REPEAT", "CLEAR_REG", "CLEAR_PS", "CLEAR_WS", "CLEAR_FB",
+"CLEAR_PLL", "CLEAR_MC", "NOP", "EOT", "MASK_REG", "MASK_PS", "MASK_WS",
+"MASK_FB", "MASK_PLL", "MASK_MC", "POST_CARD", "BEEP", "SAVE_REG",
+"RESTORE_REG", "SET_DATA_BLOCK", "XOR_REG", "XOR_PS", "XOR_WS", "XOR_FB",
+"XOR_PLL", "XOR_MC", "SHL_REG", "SHL_PS", "SHL_WS", "SHL_FB", "SHL_PLL",
+"SHL_MC", "SHR_REG", "SHR_PS", "SHR_WS", "SHR_FB", "SHR_PLL", "SHR_MC",
+"DEBUG", "CTB_DS",
+};
+
+#define ATOM_TABLE_NAMES_CNT 74
+static char *atom_table_names[ATOM_TABLE_NAMES_CNT] = {
+"ASIC_Init", "GetDisplaySurfaceSize", "ASIC_RegistersInit",
+"VRAM_BlockVenderDetection", "SetClocksRatio", "MemoryControllerInit",
+"GPIO_PinInit", "MemoryParamAdjust", "DVOEncoderControl",
+"GPIOPinControl", "SetEngineClock", "SetMemoryClock", "SetPixelClock",
+"DynamicClockGating", "ResetMemoryDLL", "ResetMemoryDevice",
+"MemoryPLLInit", "EnableMemorySelfRefresh", "AdjustMemoryController",
+"EnableASIC_StaticPwrMgt", "ASIC_StaticPwrMgtStatusChange",
+"DAC_LoadDetection", "TMDS2EncoderControl", "LCD1OutputControl",
+"DAC1EncoderControl", "DAC2EncoderControl", "DVOOutputControl",
+"CV1OutputControl", "SetCRTC_DPM_State", "TVEncoderControl",
+"TMDS1EncoderControl", "LVDSEncoderControl", "TV1OutputControl",
+"EnableScaler", "BlankCRTC", "EnableCRTC", "GetPixelClock",
+"EnableVGA_Render", "EnableVGA_Access", "SetCRTC_Timing",
+"SetCRTC_OverScan", "SetCRTC_Replication", "SelectCRTC_Source",
+"EnableGraphSurfaces", "UpdateCRTC_DoubleBufferRegisters",
+"LUT_AutoFill", "EnableHW_IconCursor", "GetMemoryClock",
+"GetEngineClock", "SetCRTC_UsingDTDTiming", "TVBootUpStdPinDetection",
+"DFP2OutputControl", "VRAM_BlockDetectionByStrap", "MemoryCleanUp",
+"ReadEDIDFromHWAssistedI2C", "WriteOneByteToHWAssistedI2C",
+"ReadHWAssistedI2CStatus", "SpeedFanControl", "PowerConnectorDetection",
+"MC_Synchronization", "ComputeMemoryEnginePLL", "MemoryRefreshConversion",
+"VRAM_GetCurrentInfoBlock", "DynamicMemorySettings", "MemoryTraining",
+"EnableLVDS_SS", "DFP1OutputControl", "SetVoltage", "CRT1OutputControl",
+"CRT2OutputControl", "SetupHWAssistedI2CStatus", "ClockSource",
+"MemoryDeviceInit", "EnableYUV",
+};
+
+#define ATOM_IO_NAMES_CNT 5
+static char *atom_io_names[ATOM_IO_NAMES_CNT] = {
+"MM", "PLL", "MC", "PCIE", "PCIE PORT",
+};
+
+#else
+
+#define ATOM_OP_NAMES_CNT 0
+#define ATOM_TABLE_NAMES_CNT 0
+#define ATOM_IO_NAMES_CNT 0
+
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/radeon/atom-types.h b/drivers/gpu/drm/radeon/atom-types.h
new file mode 100644
index 00000000000..1125b866cdb
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atom-types.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Dave Airlie
+ */
+
+#ifndef ATOM_TYPES_H
+#define ATOM_TYPES_H
+
+/* sync atom types to kernel types */
+
+typedef uint16_t USHORT;
+typedef uint32_t ULONG;
+typedef uint8_t UCHAR;
+
+
+#ifndef ATOM_BIG_ENDIAN
+#if defined(__BIG_ENDIAN)
+#define ATOM_BIG_ENDIAN 1
+#else
+#define ATOM_BIG_ENDIAN 0
+#endif
+#endif
+#endif
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
new file mode 100644
index 00000000000..901befe03da
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -0,0 +1,1215 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Stanislaw Skowronek
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+
+#define ATOM_DEBUG
+
+#include "atom.h"
+#include "atom-names.h"
+#include "atom-bits.h"
+
+#define ATOM_COND_ABOVE 0
+#define ATOM_COND_ABOVEOREQUAL 1
+#define ATOM_COND_ALWAYS 2
+#define ATOM_COND_BELOW 3
+#define ATOM_COND_BELOWOREQUAL 4
+#define ATOM_COND_EQUAL 5
+#define ATOM_COND_NOTEQUAL 6
+
+#define ATOM_PORT_ATI 0
+#define ATOM_PORT_PCI 1
+#define ATOM_PORT_SYSIO 2
+
+#define ATOM_UNIT_MICROSEC 0
+#define ATOM_UNIT_MILLISEC 1
+
+#define PLL_INDEX 2
+#define PLL_DATA 3
+
+typedef struct {
+ struct atom_context *ctx;
+
+ uint32_t *ps, *ws;
+ int ps_shift;
+ uint16_t start;
+} atom_exec_context;
+
+int atom_debug = 0;
+void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
+
+static uint32_t atom_arg_mask[8] =
+ { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
+0xFF000000 };
+static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
+
+static int atom_dst_to_src[8][4] = {
+ /* translate destination alignment field to the source alignment encoding */
+ {0, 0, 0, 0},
+ {1, 2, 3, 0},
+ {1, 2, 3, 0},
+ {1, 2, 3, 0},
+ {4, 5, 6, 7},
+ {4, 5, 6, 7},
+ {4, 5, 6, 7},
+ {4, 5, 6, 7},
+};
+static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 };
+
+static int debug_depth = 0;
+#ifdef ATOM_DEBUG
+static void debug_print_spaces(int n)
+{
+ while (n--)
+ printk(" ");
+}
+
+#define DEBUG(...) do if (atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0)
+#define SDEBUG(...) do if (atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0)
+#else
+#define DEBUG(...) do { } while (0)
+#define SDEBUG(...) do { } while (0)
+#endif
+
+static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
+ uint32_t index, uint32_t data)
+{
+ uint32_t temp = 0xCDCDCDCD;
+ while (1)
+ switch (CU8(base)) {
+ case ATOM_IIO_NOP:
+ base++;
+ break;
+ case ATOM_IIO_READ:
+ temp = ctx->card->reg_read(ctx->card, CU16(base + 1));
+ base += 3;
+ break;
+ case ATOM_IIO_WRITE:
+ ctx->card->reg_write(ctx->card, CU16(base + 1), temp);
+ base += 3;
+ break;
+ case ATOM_IIO_CLEAR:
+ temp &=
+ ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
+ CU8(base + 2));
+ base += 3;
+ break;
+ case ATOM_IIO_SET:
+ temp |=
+ (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base +
+ 2);
+ base += 3;
+ break;
+ case ATOM_IIO_MOVE_INDEX:
+ temp &=
+ ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
+ CU8(base + 2));
+ temp |=
+ ((index >> CU8(base + 2)) &
+ (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
+ 3);
+ base += 4;
+ break;
+ case ATOM_IIO_MOVE_DATA:
+ temp &=
+ ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
+ CU8(base + 2));
+ temp |=
+ ((data >> CU8(base + 2)) &
+ (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
+ 3);
+ base += 4;
+ break;
+ case ATOM_IIO_MOVE_ATTR:
+ temp &=
+ ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
+ CU8(base + 2));
+ temp |=
+ ((ctx->
+ io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
+ CU8
+ (base
+ +
+ 1))))
+ << CU8(base + 3);
+ base += 4;
+ break;
+ case ATOM_IIO_END:
+ return temp;
+ default:
+ printk(KERN_INFO "Unknown IIO opcode.\n");
+ return 0;
+ }
+}
+
+static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
+ int *ptr, uint32_t *saved, int print)
+{
+ uint32_t idx, val = 0xCDCDCDCD, align, arg;
+ struct atom_context *gctx = ctx->ctx;
+ arg = attr & 7;
+ align = (attr >> 3) & 7;
+ switch (arg) {
+ case ATOM_ARG_REG:
+ idx = U16(*ptr);
+ (*ptr) += 2;
+ if (print)
+ DEBUG("REG[0x%04X]", idx);
+ idx += gctx->reg_block;
+ switch (gctx->io_mode) {
+ case ATOM_IO_MM:
+ val = gctx->card->reg_read(gctx->card, idx);
+ break;
+ case ATOM_IO_PCI:
+ printk(KERN_INFO
+ "PCI registers are not implemented.\n");
+ return 0;
+ case ATOM_IO_SYSIO:
+ printk(KERN_INFO
+ "SYSIO registers are not implemented.\n");
+ return 0;
+ default:
+ if (!(gctx->io_mode & 0x80)) {
+ printk(KERN_INFO "Bad IO mode.\n");
+ return 0;
+ }
+ if (!gctx->iio[gctx->io_mode & 0x7F]) {
+ printk(KERN_INFO
+ "Undefined indirect IO read method %d.\n",
+ gctx->io_mode & 0x7F);
+ return 0;
+ }
+ val =
+ atom_iio_execute(gctx,
+ gctx->iio[gctx->io_mode & 0x7F],
+ idx, 0);
+ }
+ break;
+ case ATOM_ARG_PS:
+ idx = U8(*ptr);
+ (*ptr)++;
+ val = le32_to_cpu(ctx->ps[idx]);
+ if (print)
+ DEBUG("PS[0x%02X,0x%04X]", idx, val);
+ break;
+ case ATOM_ARG_WS:
+ idx = U8(*ptr);
+ (*ptr)++;
+ if (print)
+ DEBUG("WS[0x%02X]", idx);
+ switch (idx) {
+ case ATOM_WS_QUOTIENT:
+ val = gctx->divmul[0];
+ break;
+ case ATOM_WS_REMAINDER:
+ val = gctx->divmul[1];
+ break;
+ case ATOM_WS_DATAPTR:
+ val = gctx->data_block;
+ break;
+ case ATOM_WS_SHIFT:
+ val = gctx->shift;
+ break;
+ case ATOM_WS_OR_MASK:
+ val = 1 << gctx->shift;
+ break;
+ case ATOM_WS_AND_MASK:
+ val = ~(1 << gctx->shift);
+ break;
+ case ATOM_WS_FB_WINDOW:
+ val = gctx->fb_base;
+ break;
+ case ATOM_WS_ATTRIBUTES:
+ val = gctx->io_attr;
+ break;
+ default:
+ val = ctx->ws[idx];
+ }
+ break;
+ case ATOM_ARG_ID:
+ idx = U16(*ptr);
+ (*ptr) += 2;
+ if (print) {
+ if (gctx->data_block)
+ DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block);
+ else
+ DEBUG("ID[0x%04X]", idx);
+ }
+ val = U32(idx + gctx->data_block);
+ break;
+ case ATOM_ARG_FB:
+ idx = U8(*ptr);
+ (*ptr)++;
+ if (print)
+ DEBUG("FB[0x%02X]", idx);
+ printk(KERN_INFO "FB access is not implemented.\n");
+ return 0;
+ case ATOM_ARG_IMM:
+ switch (align) {
+ case ATOM_SRC_DWORD:
+ val = U32(*ptr);
+ (*ptr) += 4;
+ if (print)
+ DEBUG("IMM 0x%08X\n", val);
+ return val;
+ case ATOM_SRC_WORD0:
+ case ATOM_SRC_WORD8:
+ case ATOM_SRC_WORD16:
+ val = U16(*ptr);
+ (*ptr) += 2;
+ if (print)
+ DEBUG("IMM 0x%04X\n", val);
+ return val;
+ case ATOM_SRC_BYTE0:
+ case ATOM_SRC_BYTE8:
+ case ATOM_SRC_BYTE16:
+ case ATOM_SRC_BYTE24:
+ val = U8(*ptr);
+ (*ptr)++;
+ if (print)
+ DEBUG("IMM 0x%02X\n", val);
+ return val;
+ }
+ return 0;
+ case ATOM_ARG_PLL:
+ idx = U8(*ptr);
+ (*ptr)++;
+ if (print)
+ DEBUG("PLL[0x%02X]", idx);
+ val = gctx->card->pll_read(gctx->card, idx);
+ break;
+ case ATOM_ARG_MC:
+ idx = U8(*ptr);
+ (*ptr)++;
+ if (print)
+ DEBUG("MC[0x%02X]", idx);
+ val = gctx->card->mc_read(gctx->card, idx);
+ break;
+ }
+ if (saved)
+ *saved = val;
+ val &= atom_arg_mask[align];
+ val >>= atom_arg_shift[align];
+ if (print)
+ switch (align) {
+ case ATOM_SRC_DWORD:
+ DEBUG(".[31:0] -> 0x%08X\n", val);
+ break;
+ case ATOM_SRC_WORD0:
+ DEBUG(".[15:0] -> 0x%04X\n", val);
+ break;
+ case ATOM_SRC_WORD8:
+ DEBUG(".[23:8] -> 0x%04X\n", val);
+ break;
+ case ATOM_SRC_WORD16:
+ DEBUG(".[31:16] -> 0x%04X\n", val);
+ break;
+ case ATOM_SRC_BYTE0:
+ DEBUG(".[7:0] -> 0x%02X\n", val);
+ break;
+ case ATOM_SRC_BYTE8:
+ DEBUG(".[15:8] -> 0x%02X\n", val);
+ break;
+ case ATOM_SRC_BYTE16:
+ DEBUG(".[23:16] -> 0x%02X\n", val);
+ break;
+ case ATOM_SRC_BYTE24:
+ DEBUG(".[31:24] -> 0x%02X\n", val);
+ break;
+ }
+ return val;
+}
+
+static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
+{
+ uint32_t align = (attr >> 3) & 7, arg = attr & 7;
+ switch (arg) {
+ case ATOM_ARG_REG:
+ case ATOM_ARG_ID:
+ (*ptr) += 2;
+ break;
+ case ATOM_ARG_PLL:
+ case ATOM_ARG_MC:
+ case ATOM_ARG_PS:
+ case ATOM_ARG_WS:
+ case ATOM_ARG_FB:
+ (*ptr)++;
+ break;
+ case ATOM_ARG_IMM:
+ switch (align) {
+ case ATOM_SRC_DWORD:
+ (*ptr) += 4;
+ return;
+ case ATOM_SRC_WORD0:
+ case ATOM_SRC_WORD8:
+ case ATOM_SRC_WORD16:
+ (*ptr) += 2;
+ return;
+ case ATOM_SRC_BYTE0:
+ case ATOM_SRC_BYTE8:
+ case ATOM_SRC_BYTE16:
+ case ATOM_SRC_BYTE24:
+ (*ptr)++;
+ return;
+ }
+ return;
+ }
+}
+
+static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
+{
+ return atom_get_src_int(ctx, attr, ptr, NULL, 1);
+}
+
+static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
+ int *ptr, uint32_t *saved, int print)
+{
+ return atom_get_src_int(ctx,
+ arg | atom_dst_to_src[(attr >> 3) &
+ 7][(attr >> 6) & 3] << 3,
+ ptr, saved, print);
+}
+
+static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr)
+{
+ atom_skip_src_int(ctx,
+ arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) &
+ 3] << 3, ptr);
+}
+
+static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
+ int *ptr, uint32_t val, uint32_t saved)
+{
+ uint32_t align =
+ atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val =
+ val, idx;
+ struct atom_context *gctx = ctx->ctx;
+ old_val &= atom_arg_mask[align] >> atom_arg_shift[align];
+ val <<= atom_arg_shift[align];
+ val &= atom_arg_mask[align];
+ saved &= ~atom_arg_mask[align];
+ val |= saved;
+ switch (arg) {
+ case ATOM_ARG_REG:
+ idx = U16(*ptr);
+ (*ptr) += 2;
+ DEBUG("REG[0x%04X]", idx);
+ idx += gctx->reg_block;
+ switch (gctx->io_mode) {
+ case ATOM_IO_MM:
+ if (idx == 0)
+ gctx->card->reg_write(gctx->card, idx,
+ val << 2);
+ else
+ gctx->card->reg_write(gctx->card, idx, val);
+ break;
+ case ATOM_IO_PCI:
+ printk(KERN_INFO
+ "PCI registers are not implemented.\n");
+ return;
+ case ATOM_IO_SYSIO:
+ printk(KERN_INFO
+ "SYSIO registers are not implemented.\n");
+ return;
+ default:
+ if (!(gctx->io_mode & 0x80)) {
+ printk(KERN_INFO "Bad IO mode.\n");
+ return;
+ }
+ if (!gctx->iio[gctx->io_mode & 0xFF]) {
+ printk(KERN_INFO
+ "Undefined indirect IO write method %d.\n",
+ gctx->io_mode & 0x7F);
+ return;
+ }
+ atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF],
+ idx, val);
+ }
+ break;
+ case ATOM_ARG_PS:
+ idx = U8(*ptr);
+ (*ptr)++;
+ DEBUG("PS[0x%02X]", idx);
+ ctx->ps[idx] = cpu_to_le32(val);
+ break;
+ case ATOM_ARG_WS:
+ idx = U8(*ptr);
+ (*ptr)++;
+ DEBUG("WS[0x%02X]", idx);
+ switch (idx) {
+ case ATOM_WS_QUOTIENT:
+ gctx->divmul[0] = val;
+ break;
+ case ATOM_WS_REMAINDER:
+ gctx->divmul[1] = val;
+ break;
+ case ATOM_WS_DATAPTR:
+ gctx->data_block = val;
+ break;
+ case ATOM_WS_SHIFT:
+ gctx->shift = val;
+ break;
+ case ATOM_WS_OR_MASK:
+ case ATOM_WS_AND_MASK:
+ break;
+ case ATOM_WS_FB_WINDOW:
+ gctx->fb_base = val;
+ break;
+ case ATOM_WS_ATTRIBUTES:
+ gctx->io_attr = val;
+ break;
+ default:
+ ctx->ws[idx] = val;
+ }
+ break;
+ case ATOM_ARG_FB:
+ idx = U8(*ptr);
+ (*ptr)++;
+ DEBUG("FB[0x%02X]", idx);
+ printk(KERN_INFO "FB access is not implemented.\n");
+ return;
+ case ATOM_ARG_PLL:
+ idx = U8(*ptr);
+ (*ptr)++;
+ DEBUG("PLL[0x%02X]", idx);
+ gctx->card->pll_write(gctx->card, idx, val);
+ break;
+ case ATOM_ARG_MC:
+ idx = U8(*ptr);
+ (*ptr)++;
+ DEBUG("MC[0x%02X]", idx);
+ gctx->card->mc_write(gctx->card, idx, val);
+ return;
+ }
+ switch (align) {
+ case ATOM_SRC_DWORD:
+ DEBUG(".[31:0] <- 0x%08X\n", old_val);
+ break;
+ case ATOM_SRC_WORD0:
+ DEBUG(".[15:0] <- 0x%04X\n", old_val);
+ break;
+ case ATOM_SRC_WORD8:
+ DEBUG(".[23:8] <- 0x%04X\n", old_val);
+ break;
+ case ATOM_SRC_WORD16:
+ DEBUG(".[31:16] <- 0x%04X\n", old_val);
+ break;
+ case ATOM_SRC_BYTE0:
+ DEBUG(".[7:0] <- 0x%02X\n", old_val);
+ break;
+ case ATOM_SRC_BYTE8:
+ DEBUG(".[15:8] <- 0x%02X\n", old_val);
+ break;
+ case ATOM_SRC_BYTE16:
+ DEBUG(".[23:16] <- 0x%02X\n", old_val);
+ break;
+ case ATOM_SRC_BYTE24:
+ DEBUG(".[31:24] <- 0x%02X\n", old_val);
+ break;
+ }
+}
+
+static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++);
+ uint32_t dst, src, saved;
+ int dptr = *ptr;
+ SDEBUG(" dst: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+ SDEBUG(" src: ");
+ src = atom_get_src(ctx, attr, ptr);
+ dst += src;
+ SDEBUG(" dst: ");
+ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++);
+ uint32_t dst, src, saved;
+ int dptr = *ptr;
+ SDEBUG(" dst: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+ SDEBUG(" src: ");
+ src = atom_get_src(ctx, attr, ptr);
+ dst &= src;
+ SDEBUG(" dst: ");
+ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
+{
+ printk("ATOM BIOS beeped!\n");
+}
+
+static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
+{
+ int idx = U8((*ptr)++);
+ if (idx < ATOM_TABLE_NAMES_CNT)
+ SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]);
+ else
+ SDEBUG(" table: %d\n", idx);
+ if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
+ atom_execute_table(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
+}
+
+static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++);
+ uint32_t saved;
+ int dptr = *ptr;
+ attr &= 0x38;
+ attr |= atom_def_dst[attr >> 3] << 6;
+ atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
+ SDEBUG(" dst: ");
+ atom_put_dst(ctx, arg, attr, &dptr, 0, saved);
+}
+
+static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++);
+ uint32_t dst, src;
+ SDEBUG(" src1: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
+ SDEBUG(" src2: ");
+ src = atom_get_src(ctx, attr, ptr);
+ ctx->ctx->cs_equal = (dst == src);
+ ctx->ctx->cs_above = (dst > src);
+ SDEBUG(" result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",
+ ctx->ctx->cs_above ? "GT" : "LE");
+}
+
+static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t count = U8((*ptr)++);
+ SDEBUG(" count: %d\n", count);
+ if (arg == ATOM_UNIT_MICROSEC)
+ schedule_timeout_uninterruptible(usecs_to_jiffies(count));
+ else
+ schedule_timeout_uninterruptible(msecs_to_jiffies(count));
+}
+
+static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++);
+ uint32_t dst, src;
+ SDEBUG(" src1: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
+ SDEBUG(" src2: ");
+ src = atom_get_src(ctx, attr, ptr);
+ if (src != 0) {
+ ctx->ctx->divmul[0] = dst / src;
+ ctx->ctx->divmul[1] = dst % src;
+ } else {
+ ctx->ctx->divmul[0] = 0;
+ ctx->ctx->divmul[1] = 0;
+ }
+}
+
+static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
+{
+ /* functionally, a nop */
+}
+
+static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
+{
+ int execute = 0, target = U16(*ptr);
+ (*ptr) += 2;
+ switch (arg) {
+ case ATOM_COND_ABOVE:
+ execute = ctx->ctx->cs_above;
+ break;
+ case ATOM_COND_ABOVEOREQUAL:
+ execute = ctx->ctx->cs_above || ctx->ctx->cs_equal;
+ break;
+ case ATOM_COND_ALWAYS:
+ execute = 1;
+ break;
+ case ATOM_COND_BELOW:
+ execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal);
+ break;
+ case ATOM_COND_BELOWOREQUAL:
+ execute = !ctx->ctx->cs_above;
+ break;
+ case ATOM_COND_EQUAL:
+ execute = ctx->ctx->cs_equal;
+ break;
+ case ATOM_COND_NOTEQUAL:
+ execute = !ctx->ctx->cs_equal;
+ break;
+ }
+ if (arg != ATOM_COND_ALWAYS)
+ SDEBUG(" taken: %s\n", execute ? "yes" : "no");
+ SDEBUG(" target: 0x%04X\n", target);
+ if (execute)
+ *ptr = ctx->start + target;
+}
+
+static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++);
+ uint32_t dst, src1, src2, saved;
+ int dptr = *ptr;
+ SDEBUG(" dst: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+ SDEBUG(" src1: ");
+ src1 = atom_get_src(ctx, attr, ptr);
+ SDEBUG(" src2: ");
+ src2 = atom_get_src(ctx, attr, ptr);
+ dst &= src1;
+ dst |= src2;
+ SDEBUG(" dst: ");
+ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++);
+ uint32_t src, saved;
+ int dptr = *ptr;
+ if (((attr >> 3) & 7) != ATOM_SRC_DWORD)
+ atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
+ else {
+ atom_skip_dst(ctx, arg, attr, ptr);
+ saved = 0xCDCDCDCD;
+ }
+ SDEBUG(" src: ");
+ src = atom_get_src(ctx, attr, ptr);
+ SDEBUG(" dst: ");
+ atom_put_dst(ctx, arg, attr, &dptr, src, saved);
+}
+
+static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++);
+ uint32_t dst, src;
+ SDEBUG(" src1: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
+ SDEBUG(" src2: ");
+ src = atom_get_src(ctx, attr, ptr);
+ ctx->ctx->divmul[0] = dst * src;
+}
+
+static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
+{
+ /* nothing */
+}
+
+static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++);
+ uint32_t dst, src, saved;
+ int dptr = *ptr;
+ SDEBUG(" dst: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+ SDEBUG(" src: ");
+ src = atom_get_src(ctx, attr, ptr);
+ dst |= src;
+ SDEBUG(" dst: ");
+ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t val = U8((*ptr)++);
+ SDEBUG("POST card output: 0x%02X\n", val);
+}
+
+static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg)
+{
+ printk(KERN_INFO "unimplemented!\n");
+}
+
+static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg)
+{
+ printk(KERN_INFO "unimplemented!\n");
+}
+
+static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg)
+{
+ printk(KERN_INFO "unimplemented!\n");
+}
+
+static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg)
+{
+ int idx = U8(*ptr);
+ (*ptr)++;
+ SDEBUG(" block: %d\n", idx);
+ if (!idx)
+ ctx->ctx->data_block = 0;
+ else if (idx == 255)
+ ctx->ctx->data_block = ctx->start;
+ else
+ ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx);
+ SDEBUG(" base: 0x%04X\n", ctx->ctx->data_block);
+}
+
+static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++);
+ SDEBUG(" fb_base: ");
+ ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr);
+}
+
+static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg)
+{
+ int port;
+ switch (arg) {
+ case ATOM_PORT_ATI:
+ port = U16(*ptr);
+ if (port < ATOM_IO_NAMES_CNT)
+ SDEBUG(" port: %d (%s)\n", port, atom_io_names[port]);
+ else
+ SDEBUG(" port: %d\n", port);
+ if (!port)
+ ctx->ctx->io_mode = ATOM_IO_MM;
+ else
+ ctx->ctx->io_mode = ATOM_IO_IIO | port;
+ (*ptr) += 2;
+ break;
+ case ATOM_PORT_PCI:
+ ctx->ctx->io_mode = ATOM_IO_PCI;
+ (*ptr)++;
+ break;
+ case ATOM_PORT_SYSIO:
+ ctx->ctx->io_mode = ATOM_IO_SYSIO;
+ (*ptr)++;
+ break;
+ }
+}
+
+static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
+{
+ ctx->ctx->reg_block = U16(*ptr);
+ (*ptr) += 2;
+ SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block);
+}
+
+static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++), shift;
+ uint32_t saved, dst;
+ int dptr = *ptr;
+ attr &= 0x38;
+ attr |= atom_def_dst[attr >> 3] << 6;
+ SDEBUG(" dst: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+ shift = U8((*ptr)++);
+ SDEBUG(" shift: %d\n", shift);
+ dst <<= shift;
+ SDEBUG(" dst: ");
+ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++), shift;
+ uint32_t saved, dst;
+ int dptr = *ptr;
+ attr &= 0x38;
+ attr |= atom_def_dst[attr >> 3] << 6;
+ SDEBUG(" dst: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+ shift = U8((*ptr)++);
+ SDEBUG(" shift: %d\n", shift);
+ dst >>= shift;
+ SDEBUG(" dst: ");
+ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++);
+ uint32_t dst, src, saved;
+ int dptr = *ptr;
+ SDEBUG(" dst: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+ SDEBUG(" src: ");
+ src = atom_get_src(ctx, attr, ptr);
+ dst -= src;
+ SDEBUG(" dst: ");
+ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++);
+ uint32_t src, val, target;
+ SDEBUG(" switch: ");
+ src = atom_get_src(ctx, attr, ptr);
+ while (U16(*ptr) != ATOM_CASE_END)
+ if (U8(*ptr) == ATOM_CASE_MAGIC) {
+ (*ptr)++;
+ SDEBUG(" case: ");
+ val =
+ atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM,
+ ptr);
+ target = U16(*ptr);
+ if (val == src) {
+ SDEBUG(" target: %04X\n", target);
+ *ptr = ctx->start + target;
+ return;
+ }
+ (*ptr) += 2;
+ } else {
+ printk(KERN_INFO "Bad case.\n");
+ return;
+ }
+ (*ptr) += 2;
+}
+
+static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++);
+ uint32_t dst, src;
+ SDEBUG(" src1: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
+ SDEBUG(" src2: ");
+ src = atom_get_src(ctx, attr, ptr);
+ ctx->ctx->cs_equal = ((dst & src) == 0);
+ SDEBUG(" result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE");
+}
+
+static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++);
+ uint32_t dst, src, saved;
+ int dptr = *ptr;
+ SDEBUG(" dst: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+ SDEBUG(" src: ");
+ src = atom_get_src(ctx, attr, ptr);
+ dst ^= src;
+ SDEBUG(" dst: ");
+ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
+{
+ printk(KERN_INFO "unimplemented!\n");
+}
+
+static struct {
+ void (*func) (atom_exec_context *, int *, int);
+ int arg;
+} opcode_table[ATOM_OP_CNT] = {
+ {
+ NULL, 0}, {
+ atom_op_move, ATOM_ARG_REG}, {
+ atom_op_move, ATOM_ARG_PS}, {
+ atom_op_move, ATOM_ARG_WS}, {
+ atom_op_move, ATOM_ARG_FB}, {
+ atom_op_move, ATOM_ARG_PLL}, {
+ atom_op_move, ATOM_ARG_MC}, {
+ atom_op_and, ATOM_ARG_REG}, {
+ atom_op_and, ATOM_ARG_PS}, {
+ atom_op_and, ATOM_ARG_WS}, {
+ atom_op_and, ATOM_ARG_FB}, {
+ atom_op_and, ATOM_ARG_PLL}, {
+ atom_op_and, ATOM_ARG_MC}, {
+ atom_op_or, ATOM_ARG_REG}, {
+ atom_op_or, ATOM_ARG_PS}, {
+ atom_op_or, ATOM_ARG_WS}, {
+ atom_op_or, ATOM_ARG_FB}, {
+ atom_op_or, ATOM_ARG_PLL}, {
+ atom_op_or, ATOM_ARG_MC}, {
+ atom_op_shl, ATOM_ARG_REG}, {
+ atom_op_shl, ATOM_ARG_PS}, {
+ atom_op_shl, ATOM_ARG_WS}, {
+ atom_op_shl, ATOM_ARG_FB}, {
+ atom_op_shl, ATOM_ARG_PLL}, {
+ atom_op_shl, ATOM_ARG_MC}, {
+ atom_op_shr, ATOM_ARG_REG}, {
+ atom_op_shr, ATOM_ARG_PS}, {
+ atom_op_shr, ATOM_ARG_WS}, {
+ atom_op_shr, ATOM_ARG_FB}, {
+ atom_op_shr, ATOM_ARG_PLL}, {
+ atom_op_shr, ATOM_ARG_MC}, {
+ atom_op_mul, ATOM_ARG_REG}, {
+ atom_op_mul, ATOM_ARG_PS}, {
+ atom_op_mul, ATOM_ARG_WS}, {
+ atom_op_mul, ATOM_ARG_FB}, {
+ atom_op_mul, ATOM_ARG_PLL}, {
+ atom_op_mul, ATOM_ARG_MC}, {
+ atom_op_div, ATOM_ARG_REG}, {
+ atom_op_div, ATOM_ARG_PS}, {
+ atom_op_div, ATOM_ARG_WS}, {
+ atom_op_div, ATOM_ARG_FB}, {
+ atom_op_div, ATOM_ARG_PLL}, {
+ atom_op_div, ATOM_ARG_MC}, {
+ atom_op_add, ATOM_ARG_REG}, {
+ atom_op_add, ATOM_ARG_PS}, {
+ atom_op_add, ATOM_ARG_WS}, {
+ atom_op_add, ATOM_ARG_FB}, {
+ atom_op_add, ATOM_ARG_PLL}, {
+ atom_op_add, ATOM_ARG_MC}, {
+ atom_op_sub, ATOM_ARG_REG}, {
+ atom_op_sub, ATOM_ARG_PS}, {
+ atom_op_sub, ATOM_ARG_WS}, {
+ atom_op_sub, ATOM_ARG_FB}, {
+ atom_op_sub, ATOM_ARG_PLL}, {
+ atom_op_sub, ATOM_ARG_MC}, {
+ atom_op_setport, ATOM_PORT_ATI}, {
+ atom_op_setport, ATOM_PORT_PCI}, {
+ atom_op_setport, ATOM_PORT_SYSIO}, {
+ atom_op_setregblock, 0}, {
+ atom_op_setfbbase, 0}, {
+ atom_op_compare, ATOM_ARG_REG}, {
+ atom_op_compare, ATOM_ARG_PS}, {
+ atom_op_compare, ATOM_ARG_WS}, {
+ atom_op_compare, ATOM_ARG_FB}, {
+ atom_op_compare, ATOM_ARG_PLL}, {
+ atom_op_compare, ATOM_ARG_MC}, {
+ atom_op_switch, 0}, {
+ atom_op_jump, ATOM_COND_ALWAYS}, {
+ atom_op_jump, ATOM_COND_EQUAL}, {
+ atom_op_jump, ATOM_COND_BELOW}, {
+ atom_op_jump, ATOM_COND_ABOVE}, {
+ atom_op_jump, ATOM_COND_BELOWOREQUAL}, {
+ atom_op_jump, ATOM_COND_ABOVEOREQUAL}, {
+ atom_op_jump, ATOM_COND_NOTEQUAL}, {
+ atom_op_test, ATOM_ARG_REG}, {
+ atom_op_test, ATOM_ARG_PS}, {
+ atom_op_test, ATOM_ARG_WS}, {
+ atom_op_test, ATOM_ARG_FB}, {
+ atom_op_test, ATOM_ARG_PLL}, {
+ atom_op_test, ATOM_ARG_MC}, {
+ atom_op_delay, ATOM_UNIT_MILLISEC}, {
+ atom_op_delay, ATOM_UNIT_MICROSEC}, {
+ atom_op_calltable, 0}, {
+ atom_op_repeat, 0}, {
+ atom_op_clear, ATOM_ARG_REG}, {
+ atom_op_clear, ATOM_ARG_PS}, {
+ atom_op_clear, ATOM_ARG_WS}, {
+ atom_op_clear, ATOM_ARG_FB}, {
+ atom_op_clear, ATOM_ARG_PLL}, {
+ atom_op_clear, ATOM_ARG_MC}, {
+ atom_op_nop, 0}, {
+ atom_op_eot, 0}, {
+ atom_op_mask, ATOM_ARG_REG}, {
+ atom_op_mask, ATOM_ARG_PS}, {
+ atom_op_mask, ATOM_ARG_WS}, {
+ atom_op_mask, ATOM_ARG_FB}, {
+ atom_op_mask, ATOM_ARG_PLL}, {
+ atom_op_mask, ATOM_ARG_MC}, {
+ atom_op_postcard, 0}, {
+ atom_op_beep, 0}, {
+ atom_op_savereg, 0}, {
+ atom_op_restorereg, 0}, {
+ atom_op_setdatablock, 0}, {
+ atom_op_xor, ATOM_ARG_REG}, {
+ atom_op_xor, ATOM_ARG_PS}, {
+ atom_op_xor, ATOM_ARG_WS}, {
+ atom_op_xor, ATOM_ARG_FB}, {
+ atom_op_xor, ATOM_ARG_PLL}, {
+ atom_op_xor, ATOM_ARG_MC}, {
+ atom_op_shl, ATOM_ARG_REG}, {
+ atom_op_shl, ATOM_ARG_PS}, {
+ atom_op_shl, ATOM_ARG_WS}, {
+ atom_op_shl, ATOM_ARG_FB}, {
+ atom_op_shl, ATOM_ARG_PLL}, {
+ atom_op_shl, ATOM_ARG_MC}, {
+ atom_op_shr, ATOM_ARG_REG}, {
+ atom_op_shr, ATOM_ARG_PS}, {
+ atom_op_shr, ATOM_ARG_WS}, {
+ atom_op_shr, ATOM_ARG_FB}, {
+ atom_op_shr, ATOM_ARG_PLL}, {
+ atom_op_shr, ATOM_ARG_MC}, {
+atom_op_debug, 0},};
+
+void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
+{
+ int base = CU16(ctx->cmd_table + 4 + 2 * index);
+ int len, ws, ps, ptr;
+ unsigned char op;
+ atom_exec_context ectx;
+
+ if (!base)
+ return;
+
+ len = CU16(base + ATOM_CT_SIZE_PTR);
+ ws = CU8(base + ATOM_CT_WS_PTR);
+ ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK;
+ ptr = base + ATOM_CT_CODE_PTR;
+
+ SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
+
+ /* reset reg block */
+ ctx->reg_block = 0;
+ ectx.ctx = ctx;
+ ectx.ps_shift = ps / 4;
+ ectx.start = base;
+ ectx.ps = params;
+ if (ws)
+ ectx.ws = kzalloc(4 * ws, GFP_KERNEL);
+ else
+ ectx.ws = NULL;
+
+ debug_depth++;
+ while (1) {
+ op = CU8(ptr++);
+ if (op < ATOM_OP_NAMES_CNT)
+ SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
+ else
+ SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
+
+ if (op < ATOM_OP_CNT && op > 0)
+ opcode_table[op].func(&ectx, &ptr,
+ opcode_table[op].arg);
+ else
+ break;
+
+ if (op == ATOM_OP_EOT)
+ break;
+ }
+ debug_depth--;
+ SDEBUG("<<\n");
+
+ if (ws)
+ kfree(ectx.ws);
+}
+
+static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
+
+static void atom_index_iio(struct atom_context *ctx, int base)
+{
+ ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
+ while (CU8(base) == ATOM_IIO_START) {
+ ctx->iio[CU8(base + 1)] = base + 2;
+ base += 2;
+ while (CU8(base) != ATOM_IIO_END)
+ base += atom_iio_len[CU8(base)];
+ base += 3;
+ }
+}
+
+struct atom_context *atom_parse(struct card_info *card, void *bios)
+{
+ int base;
+ struct atom_context *ctx =
+ kzalloc(sizeof(struct atom_context), GFP_KERNEL);
+ char *str;
+ char name[512];
+ int i;
+
+ ctx->card = card;
+ ctx->bios = bios;
+
+ if (CU16(0) != ATOM_BIOS_MAGIC) {
+ printk(KERN_INFO "Invalid BIOS magic.\n");
+ kfree(ctx);
+ return NULL;
+ }
+ if (strncmp
+ (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC,
+ strlen(ATOM_ATI_MAGIC))) {
+ printk(KERN_INFO "Invalid ATI magic.\n");
+ kfree(ctx);
+ return NULL;
+ }
+
+ base = CU16(ATOM_ROM_TABLE_PTR);
+ if (strncmp
+ (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC,
+ strlen(ATOM_ROM_MAGIC))) {
+ printk(KERN_INFO "Invalid ATOM magic.\n");
+ kfree(ctx);
+ return NULL;
+ }
+
+ ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
+ ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
+ atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
+
+ str = CSTR(CU16(base + ATOM_ROM_MSG_PTR));
+ while (*str && ((*str == '\n') || (*str == '\r')))
+ str++;
+ /* name string isn't always 0 terminated */
+ for (i = 0; i < 511; i++) {
+ name[i] = str[i];
+ if (name[i] < '.' || name[i] > 'z') {
+ name[i] = 0;
+ break;
+ }
+ }
+ printk(KERN_INFO "ATOM BIOS: %s\n", name);
+
+ return ctx;
+}
+
+int atom_asic_init(struct atom_context *ctx)
+{
+ int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
+ uint32_t ps[16];
+ memset(ps, 0, 64);
+
+ ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
+ ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR));
+ if (!ps[0] || !ps[1])
+ return 1;
+
+ if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
+ return 1;
+ atom_execute_table(ctx, ATOM_CMD_INIT, ps);
+
+ return 0;
+}
+
+void atom_destroy(struct atom_context *ctx)
+{
+ if (ctx->iio)
+ kfree(ctx->iio);
+ kfree(ctx);
+}
+
+void atom_parse_data_header(struct atom_context *ctx, int index,
+ uint16_t * size, uint8_t * frev, uint8_t * crev,
+ uint16_t * data_start)
+{
+ int offset = index * 2 + 4;
+ int idx = CU16(ctx->data_table + offset);
+
+ if (size)
+ *size = CU16(idx);
+ if (frev)
+ *frev = CU8(idx + 2);
+ if (crev)
+ *crev = CU8(idx + 3);
+ *data_start = idx;
+ return;
+}
+
+void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
+ uint8_t * crev)
+{
+ int offset = index * 2 + 4;
+ int idx = CU16(ctx->cmd_table + offset);
+
+ if (frev)
+ *frev = CU8(idx + 2);
+ if (crev)
+ *crev = CU8(idx + 3);
+ return;
+}
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
new file mode 100644
index 00000000000..e6eb38f2bca
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Stanislaw Skowronek
+ */
+
+#ifndef ATOM_H
+#define ATOM_H
+
+#include <linux/types.h>
+#include "drmP.h"
+
+#define ATOM_BIOS_MAGIC 0xAA55
+#define ATOM_ATI_MAGIC_PTR 0x30
+#define ATOM_ATI_MAGIC " 761295520"
+#define ATOM_ROM_TABLE_PTR 0x48
+
+#define ATOM_ROM_MAGIC "ATOM"
+#define ATOM_ROM_MAGIC_PTR 4
+
+#define ATOM_ROM_MSG_PTR 0x10
+#define ATOM_ROM_CMD_PTR 0x1E
+#define ATOM_ROM_DATA_PTR 0x20
+
+#define ATOM_CMD_INIT 0
+#define ATOM_CMD_SETSCLK 0x0A
+#define ATOM_CMD_SETMCLK 0x0B
+#define ATOM_CMD_SETPCLK 0x0C
+
+#define ATOM_DATA_FWI_PTR 0xC
+#define ATOM_DATA_IIO_PTR 0x32
+
+#define ATOM_FWI_DEFSCLK_PTR 8
+#define ATOM_FWI_DEFMCLK_PTR 0xC
+#define ATOM_FWI_MAXSCLK_PTR 0x24
+#define ATOM_FWI_MAXMCLK_PTR 0x28
+
+#define ATOM_CT_SIZE_PTR 0
+#define ATOM_CT_WS_PTR 4
+#define ATOM_CT_PS_PTR 5
+#define ATOM_CT_PS_MASK 0x7F
+#define ATOM_CT_CODE_PTR 6
+
+#define ATOM_OP_CNT 123
+#define ATOM_OP_EOT 91
+
+#define ATOM_CASE_MAGIC 0x63
+#define ATOM_CASE_END 0x5A5A
+
+#define ATOM_ARG_REG 0
+#define ATOM_ARG_PS 1
+#define ATOM_ARG_WS 2
+#define ATOM_ARG_FB 3
+#define ATOM_ARG_ID 4
+#define ATOM_ARG_IMM 5
+#define ATOM_ARG_PLL 6
+#define ATOM_ARG_MC 7
+
+#define ATOM_SRC_DWORD 0
+#define ATOM_SRC_WORD0 1
+#define ATOM_SRC_WORD8 2
+#define ATOM_SRC_WORD16 3
+#define ATOM_SRC_BYTE0 4
+#define ATOM_SRC_BYTE8 5
+#define ATOM_SRC_BYTE16 6
+#define ATOM_SRC_BYTE24 7
+
+#define ATOM_WS_QUOTIENT 0x40
+#define ATOM_WS_REMAINDER 0x41
+#define ATOM_WS_DATAPTR 0x42
+#define ATOM_WS_SHIFT 0x43
+#define ATOM_WS_OR_MASK 0x44
+#define ATOM_WS_AND_MASK 0x45
+#define ATOM_WS_FB_WINDOW 0x46
+#define ATOM_WS_ATTRIBUTES 0x47
+
+#define ATOM_IIO_NOP 0
+#define ATOM_IIO_START 1
+#define ATOM_IIO_READ 2
+#define ATOM_IIO_WRITE 3
+#define ATOM_IIO_CLEAR 4
+#define ATOM_IIO_SET 5
+#define ATOM_IIO_MOVE_INDEX 6
+#define ATOM_IIO_MOVE_ATTR 7
+#define ATOM_IIO_MOVE_DATA 8
+#define ATOM_IIO_END 9
+
+#define ATOM_IO_MM 0
+#define ATOM_IO_PCI 1
+#define ATOM_IO_SYSIO 2
+#define ATOM_IO_IIO 0x80
+
+struct card_info {
+ struct drm_device *dev;
+ void (* reg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
+ uint32_t (* reg_read)(struct card_info *, uint32_t); /* filled by driver */
+ void (* mc_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
+ uint32_t (* mc_read)(struct card_info *, uint32_t); /* filled by driver */
+ void (* pll_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
+ uint32_t (* pll_read)(struct card_info *, uint32_t); /* filled by driver */
+};
+
+struct atom_context {
+ struct card_info *card;
+ void *bios;
+ uint32_t cmd_table, data_table;
+ uint16_t *iio;
+
+ uint16_t data_block;
+ uint32_t fb_base;
+ uint32_t divmul[2];
+ uint16_t io_attr;
+ uint16_t reg_block;
+ uint8_t shift;
+ int cs_equal, cs_above;
+ int io_mode;
+};
+
+extern int atom_debug;
+
+struct atom_context *atom_parse(struct card_info *, void *);
+void atom_execute_table(struct atom_context *, int, uint32_t *);
+int atom_asic_init(struct atom_context *);
+void atom_destroy(struct atom_context *);
+void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start);
+void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, uint8_t *crev);
+#include "atom-types.h"
+#include "atombios.h"
+#include "ObjectID.h"
+
+#endif
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
new file mode 100644
index 00000000000..cf67928abbc
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -0,0 +1,4785 @@
+/*
+ * Copyright 2006-2007 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/****************************************************************************/
+/*Portion I: Definitions shared between VBIOS and Driver */
+/****************************************************************************/
+
+#ifndef _ATOMBIOS_H
+#define _ATOMBIOS_H
+
+#define ATOM_VERSION_MAJOR 0x00020000
+#define ATOM_VERSION_MINOR 0x00000002
+
+#define ATOM_HEADER_VERSION (ATOM_VERSION_MAJOR | ATOM_VERSION_MINOR)
+
+/* Endianness should be specified before inclusion,
+ * default to little endian
+ */
+#ifndef ATOM_BIG_ENDIAN
+#error Endian not specified
+#endif
+
+#ifdef _H2INC
+#ifndef ULONG
+typedef unsigned long ULONG;
+#endif
+
+#ifndef UCHAR
+typedef unsigned char UCHAR;
+#endif
+
+#ifndef USHORT
+typedef unsigned short USHORT;
+#endif
+#endif
+
+#define ATOM_DAC_A 0
+#define ATOM_DAC_B 1
+#define ATOM_EXT_DAC 2
+
+#define ATOM_CRTC1 0
+#define ATOM_CRTC2 1
+
+#define ATOM_DIGA 0
+#define ATOM_DIGB 1
+
+#define ATOM_PPLL1 0
+#define ATOM_PPLL2 1
+
+#define ATOM_SCALER1 0
+#define ATOM_SCALER2 1
+
+#define ATOM_SCALER_DISABLE 0
+#define ATOM_SCALER_CENTER 1
+#define ATOM_SCALER_EXPANSION 2
+#define ATOM_SCALER_MULTI_EX 3
+
+#define ATOM_DISABLE 0
+#define ATOM_ENABLE 1
+#define ATOM_LCD_BLOFF (ATOM_DISABLE+2)
+#define ATOM_LCD_BLON (ATOM_ENABLE+2)
+#define ATOM_LCD_BL_BRIGHTNESS_CONTROL (ATOM_ENABLE+3)
+#define ATOM_LCD_SELFTEST_START (ATOM_DISABLE+5)
+#define ATOM_LCD_SELFTEST_STOP (ATOM_ENABLE+5)
+#define ATOM_ENCODER_INIT (ATOM_DISABLE+7)
+
+#define ATOM_BLANKING 1
+#define ATOM_BLANKING_OFF 0
+
+#define ATOM_CURSOR1 0
+#define ATOM_CURSOR2 1
+
+#define ATOM_ICON1 0
+#define ATOM_ICON2 1
+
+#define ATOM_CRT1 0
+#define ATOM_CRT2 1
+
+#define ATOM_TV_NTSC 1
+#define ATOM_TV_NTSCJ 2
+#define ATOM_TV_PAL 3
+#define ATOM_TV_PALM 4
+#define ATOM_TV_PALCN 5
+#define ATOM_TV_PALN 6
+#define ATOM_TV_PAL60 7
+#define ATOM_TV_SECAM 8
+#define ATOM_TV_CV 16
+
+#define ATOM_DAC1_PS2 1
+#define ATOM_DAC1_CV 2
+#define ATOM_DAC1_NTSC 3
+#define ATOM_DAC1_PAL 4
+
+#define ATOM_DAC2_PS2 ATOM_DAC1_PS2
+#define ATOM_DAC2_CV ATOM_DAC1_CV
+#define ATOM_DAC2_NTSC ATOM_DAC1_NTSC
+#define ATOM_DAC2_PAL ATOM_DAC1_PAL
+
+#define ATOM_PM_ON 0
+#define ATOM_PM_STANDBY 1
+#define ATOM_PM_SUSPEND 2
+#define ATOM_PM_OFF 3
+
+/* Bit0:{=0:single, =1:dual},
+ Bit1 {=0:666RGB, =1:888RGB},
+ Bit2:3:{Grey level}
+ Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888}*/
+
+#define ATOM_PANEL_MISC_DUAL 0x00000001
+#define ATOM_PANEL_MISC_888RGB 0x00000002
+#define ATOM_PANEL_MISC_GREY_LEVEL 0x0000000C
+#define ATOM_PANEL_MISC_FPDI 0x00000010
+#define ATOM_PANEL_MISC_GREY_LEVEL_SHIFT 2
+#define ATOM_PANEL_MISC_SPATIAL 0x00000020
+#define ATOM_PANEL_MISC_TEMPORAL 0x00000040
+#define ATOM_PANEL_MISC_API_ENABLED 0x00000080
+
+#define MEMTYPE_DDR1 "DDR1"
+#define MEMTYPE_DDR2 "DDR2"
+#define MEMTYPE_DDR3 "DDR3"
+#define MEMTYPE_DDR4 "DDR4"
+
+#define ASIC_BUS_TYPE_PCI "PCI"
+#define ASIC_BUS_TYPE_AGP "AGP"
+#define ASIC_BUS_TYPE_PCIE "PCI_EXPRESS"
+
+/* Maximum size of that FireGL flag string */
+
+#define ATOM_FIREGL_FLAG_STRING "FGL" /* Flag used to enable FireGL Support */
+#define ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING 3 /* sizeof( ATOM_FIREGL_FLAG_STRING ) */
+
+#define ATOM_FAKE_DESKTOP_STRING "DSK" /* Flag used to enable mobile ASIC on Desktop */
+#define ATOM_MAX_SIZE_OF_FAKE_DESKTOP_STRING ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING
+
+#define ATOM_M54T_FLAG_STRING "M54T" /* Flag used to enable M54T Support */
+#define ATOM_MAX_SIZE_OF_M54T_FLAG_STRING 4 /* sizeof( ATOM_M54T_FLAG_STRING ) */
+
+#define HW_ASSISTED_I2C_STATUS_FAILURE 2
+#define HW_ASSISTED_I2C_STATUS_SUCCESS 1
+
+#pragma pack(1) /* BIOS data must use byte aligment */
+
+/* Define offset to location of ROM header. */
+
+#define OFFSET_TO_POINTER_TO_ATOM_ROM_HEADER 0x00000048L
+#define OFFSET_TO_ATOM_ROM_IMAGE_SIZE 0x00000002L
+
+#define OFFSET_TO_ATOMBIOS_ASIC_BUS_MEM_TYPE 0x94
+#define MAXSIZE_OF_ATOMBIOS_ASIC_BUS_MEM_TYPE 20 /* including the terminator 0x0! */
+#define OFFSET_TO_GET_ATOMBIOS_STRINGS_NUMBER 0x002f
+#define OFFSET_TO_GET_ATOMBIOS_STRINGS_START 0x006e
+
+/* Common header for all ROM Data tables.
+ Every table pointed _ATOM_MASTER_DATA_TABLE has this common header.
+ And the pointer actually points to this header. */
+
+typedef struct _ATOM_COMMON_TABLE_HEADER {
+ USHORT usStructureSize;
+ UCHAR ucTableFormatRevision; /*Change it when the Parser is not backward compatible */
+ UCHAR ucTableContentRevision; /*Change it only when the table needs to change but the firmware */
+ /*Image can't be updated, while Driver needs to carry the new table! */
+} ATOM_COMMON_TABLE_HEADER;
+
+typedef struct _ATOM_ROM_HEADER {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR uaFirmWareSignature[4]; /*Signature to distinguish between Atombios and non-atombios,
+ atombios should init it as "ATOM", don't change the position */
+ USHORT usBiosRuntimeSegmentAddress;
+ USHORT usProtectedModeInfoOffset;
+ USHORT usConfigFilenameOffset;
+ USHORT usCRC_BlockOffset;
+ USHORT usBIOS_BootupMessageOffset;
+ USHORT usInt10Offset;
+ USHORT usPciBusDevInitCode;
+ USHORT usIoBaseAddress;
+ USHORT usSubsystemVendorID;
+ USHORT usSubsystemID;
+ USHORT usPCI_InfoOffset;
+ USHORT usMasterCommandTableOffset; /*Offset for SW to get all command table offsets, Don't change the position */
+ USHORT usMasterDataTableOffset; /*Offset for SW to get all data table offsets, Don't change the position */
+ UCHAR ucExtendedFunctionCode;
+ UCHAR ucReserved;
+} ATOM_ROM_HEADER;
+
+/*==============================Command Table Portion==================================== */
+
+#ifdef UEFI_BUILD
+#define UTEMP USHORT
+#define USHORT void*
+#endif
+
+typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES {
+ USHORT ASIC_Init; /* Function Table, used by various SW components,latest version 1.1 */
+ USHORT GetDisplaySurfaceSize; /* Atomic Table, Used by Bios when enabling HW ICON */
+ USHORT ASIC_RegistersInit; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */
+ USHORT VRAM_BlockVenderDetection; /* Atomic Table, used only by Bios */
+ USHORT DIGxEncoderControl; /* Only used by Bios */
+ USHORT MemoryControllerInit; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */
+ USHORT EnableCRTCMemReq; /* Function Table,directly used by various SW components,latest version 2.1 */
+ USHORT MemoryParamAdjust; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock if needed */
+ USHORT DVOEncoderControl; /* Function Table,directly used by various SW components,latest version 1.2 */
+ USHORT GPIOPinControl; /* Atomic Table, only used by Bios */
+ USHORT SetEngineClock; /*Function Table,directly used by various SW components,latest version 1.1 */
+ USHORT SetMemoryClock; /* Function Table,directly used by various SW components,latest version 1.1 */
+ USHORT SetPixelClock; /*Function Table,directly used by various SW components,latest version 1.2 */
+ USHORT DynamicClockGating; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */
+ USHORT ResetMemoryDLL; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
+ USHORT ResetMemoryDevice; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
+ USHORT MemoryPLLInit;
+ USHORT AdjustDisplayPll; /* only used by Bios */
+ USHORT AdjustMemoryController; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
+ USHORT EnableASIC_StaticPwrMgt; /* Atomic Table, only used by Bios */
+ USHORT ASIC_StaticPwrMgtStatusChange; /* Obsolete, only used by Bios */
+ USHORT DAC_LoadDetection; /* Atomic Table, directly used by various SW components,latest version 1.2 */
+ USHORT LVTMAEncoderControl; /* Atomic Table,directly used by various SW components,latest version 1.3 */
+ USHORT LCD1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
+ USHORT DAC1EncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
+ USHORT DAC2EncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
+ USHORT DVOOutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
+ USHORT CV1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
+ USHORT GetConditionalGoldenSetting; /* only used by Bios */
+ USHORT TVEncoderControl; /* Function Table,directly used by various SW components,latest version 1.1 */
+ USHORT TMDSAEncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.3 */
+ USHORT LVDSEncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.3 */
+ USHORT TV1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
+ USHORT EnableScaler; /* Atomic Table, used only by Bios */
+ USHORT BlankCRTC; /* Atomic Table, directly used by various SW components,latest version 1.1 */
+ USHORT EnableCRTC; /* Atomic Table, directly used by various SW components,latest version 1.1 */
+ USHORT GetPixelClock; /* Atomic Table, directly used by various SW components,latest version 1.1 */
+ USHORT EnableVGA_Render; /* Function Table,directly used by various SW components,latest version 1.1 */
+ USHORT EnableVGA_Access; /* Obsolete , only used by Bios */
+ USHORT SetCRTC_Timing; /* Atomic Table, directly used by various SW components,latest version 1.1 */
+ USHORT SetCRTC_OverScan; /* Atomic Table, used by various SW components,latest version 1.1 */
+ USHORT SetCRTC_Replication; /* Atomic Table, used only by Bios */
+ USHORT SelectCRTC_Source; /* Atomic Table, directly used by various SW components,latest version 1.1 */
+ USHORT EnableGraphSurfaces; /* Atomic Table, used only by Bios */
+ USHORT UpdateCRTC_DoubleBufferRegisters;
+ USHORT LUT_AutoFill; /* Atomic Table, only used by Bios */
+ USHORT EnableHW_IconCursor; /* Atomic Table, only used by Bios */
+ USHORT GetMemoryClock; /* Atomic Table, directly used by various SW components,latest version 1.1 */
+ USHORT GetEngineClock; /* Atomic Table, directly used by various SW components,latest version 1.1 */
+ USHORT SetCRTC_UsingDTDTiming; /* Atomic Table, directly used by various SW components,latest version 1.1 */
+ USHORT ExternalEncoderControl; /* Atomic Table, directly used by various SW components,latest version 2.1 */
+ USHORT LVTMAOutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
+ USHORT VRAM_BlockDetectionByStrap; /* Atomic Table, used only by Bios */
+ USHORT MemoryCleanUp; /* Atomic Table, only used by Bios */
+ USHORT ProcessI2cChannelTransaction; /* Function Table,only used by Bios */
+ USHORT WriteOneByteToHWAssistedI2C; /* Function Table,indirectly used by various SW components */
+ USHORT ReadHWAssistedI2CStatus; /* Atomic Table, indirectly used by various SW components */
+ USHORT SpeedFanControl; /* Function Table,indirectly used by various SW components,called from ASIC_Init */
+ USHORT PowerConnectorDetection; /* Atomic Table, directly used by various SW components,latest version 1.1 */
+ USHORT MC_Synchronization; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
+ USHORT ComputeMemoryEnginePLL; /* Atomic Table, indirectly used by various SW components,called from SetMemory/EngineClock */
+ USHORT MemoryRefreshConversion; /* Atomic Table, indirectly used by various SW components,called from SetMemory or SetEngineClock */
+ USHORT VRAM_GetCurrentInfoBlock; /* Atomic Table, used only by Bios */
+ USHORT DynamicMemorySettings; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
+ USHORT MemoryTraining; /* Atomic Table, used only by Bios */
+ USHORT EnableSpreadSpectrumOnPPLL; /* Atomic Table, directly used by various SW components,latest version 1.2 */
+ USHORT TMDSAOutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
+ USHORT SetVoltage; /* Function Table,directly and/or indirectly used by various SW components,latest version 1.1 */
+ USHORT DAC1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
+ USHORT DAC2OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
+ USHORT SetupHWAssistedI2CStatus; /* Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C" */
+ USHORT ClockSource; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */
+ USHORT MemoryDeviceInit; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
+ USHORT EnableYUV; /* Atomic Table, indirectly used by various SW components,called from EnableVGARender */
+ USHORT DIG1EncoderControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */
+ USHORT DIG2EncoderControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */
+ USHORT DIG1TransmitterControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */
+ USHORT DIG2TransmitterControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */
+ USHORT ProcessAuxChannelTransaction; /* Function Table,only used by Bios */
+ USHORT DPEncoderService; /* Function Table,only used by Bios */
+} ATOM_MASTER_LIST_OF_COMMAND_TABLES;
+
+/* For backward compatible */
+#define ReadEDIDFromHWAssistedI2C ProcessI2cChannelTransaction
+#define UNIPHYTransmitterControl DIG1TransmitterControl
+#define LVTMATransmitterControl DIG2TransmitterControl
+#define SetCRTC_DPM_State GetConditionalGoldenSetting
+#define SetUniphyInstance ASIC_StaticPwrMgtStatusChange
+
+typedef struct _ATOM_MASTER_COMMAND_TABLE {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_MASTER_LIST_OF_COMMAND_TABLES ListOfCommandTables;
+} ATOM_MASTER_COMMAND_TABLE;
+
+/****************************************************************************/
+/* Structures used in every command table */
+/****************************************************************************/
+typedef struct _ATOM_TABLE_ATTRIBUTE {
+#if ATOM_BIG_ENDIAN
+ USHORT UpdatedByUtility:1; /* [15]=Table updated by utility flag */
+ USHORT PS_SizeInBytes:7; /* [14:8]=Size of parameter space in Bytes (multiple of a dword), */
+ USHORT WS_SizeInBytes:8; /* [7:0]=Size of workspace in Bytes (in multiple of a dword), */
+#else
+ USHORT WS_SizeInBytes:8; /* [7:0]=Size of workspace in Bytes (in multiple of a dword), */
+ USHORT PS_SizeInBytes:7; /* [14:8]=Size of parameter space in Bytes (multiple of a dword), */
+ USHORT UpdatedByUtility:1; /* [15]=Table updated by utility flag */
+#endif
+} ATOM_TABLE_ATTRIBUTE;
+
+typedef union _ATOM_TABLE_ATTRIBUTE_ACCESS {
+ ATOM_TABLE_ATTRIBUTE sbfAccess;
+ USHORT susAccess;
+} ATOM_TABLE_ATTRIBUTE_ACCESS;
+
+/****************************************************************************/
+/* Common header for all command tables. */
+/* Every table pointed by _ATOM_MASTER_COMMAND_TABLE has this common header. */
+/* And the pointer actually points to this header. */
+/****************************************************************************/
+typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER {
+ ATOM_COMMON_TABLE_HEADER CommonHeader;
+ ATOM_TABLE_ATTRIBUTE TableAttribute;
+} ATOM_COMMON_ROM_COMMAND_TABLE_HEADER;
+
+/****************************************************************************/
+/* Structures used by ComputeMemoryEnginePLLTable */
+/****************************************************************************/
+#define COMPUTE_MEMORY_PLL_PARAM 1
+#define COMPUTE_ENGINE_PLL_PARAM 2
+
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS {
+ ULONG ulClock; /* When returen, it's the re-calculated clock based on given Fb_div Post_Div and ref_div */
+ UCHAR ucAction; /* 0:reserved //1:Memory //2:Engine */
+ UCHAR ucReserved; /* may expand to return larger Fbdiv later */
+ UCHAR ucFbDiv; /* return value */
+ UCHAR ucPostDiv; /* return value */
+} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS;
+
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 {
+ ULONG ulClock; /* When return, [23:0] return real clock */
+ UCHAR ucAction; /* 0:reserved;COMPUTE_MEMORY_PLL_PARAM:Memory;COMPUTE_ENGINE_PLL_PARAM:Engine. it return ref_div to be written to register */
+ USHORT usFbDiv; /* return Feedback value to be written to register */
+ UCHAR ucPostDiv; /* return post div to be written to register */
+} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2;
+#define COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS
+
+#define SET_CLOCK_FREQ_MASK 0x00FFFFFF /* Clock change tables only take bit [23:0] as the requested clock value */
+#define USE_NON_BUS_CLOCK_MASK 0x01000000 /* Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa) */
+#define USE_MEMORY_SELF_REFRESH_MASK 0x02000000 /* Only applicable to memory clock change, when set, using memory self refresh during clock transition */
+#define SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04000000 /* Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change */
+#define FIRST_TIME_CHANGE_CLOCK 0x08000000 /* Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup */
+#define SKIP_SW_PROGRAM_PLL 0x10000000 /* Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL */
+#define USE_SS_ENABLED_PIXEL_CLOCK USE_NON_BUS_CLOCK_MASK
+
+#define b3USE_NON_BUS_CLOCK_MASK 0x01 /* Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa) */
+#define b3USE_MEMORY_SELF_REFRESH 0x02 /* Only applicable to memory clock change, when set, using memory self refresh during clock transition */
+#define b3SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04 /* Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change */
+#define b3FIRST_TIME_CHANGE_CLOCK 0x08 /* Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup */
+#define b3SKIP_SW_PROGRAM_PLL 0x10 /* Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL */
+
+typedef struct _ATOM_COMPUTE_CLOCK_FREQ {
+#if ATOM_BIG_ENDIAN
+ ULONG ulComputeClockFlag:8; /* =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM */
+ ULONG ulClockFreq:24; /* in unit of 10kHz */
+#else
+ ULONG ulClockFreq:24; /* in unit of 10kHz */
+ ULONG ulComputeClockFlag:8; /* =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM */
+#endif
+} ATOM_COMPUTE_CLOCK_FREQ;
+
+typedef struct _ATOM_S_MPLL_FB_DIVIDER {
+ USHORT usFbDivFrac;
+ USHORT usFbDiv;
+} ATOM_S_MPLL_FB_DIVIDER;
+
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 {
+ union {
+ ATOM_COMPUTE_CLOCK_FREQ ulClock; /* Input Parameter */
+ ATOM_S_MPLL_FB_DIVIDER ulFbDiv; /* Output Parameter */
+ };
+ UCHAR ucRefDiv; /* Output Parameter */
+ UCHAR ucPostDiv; /* Output Parameter */
+ UCHAR ucCntlFlag; /* Output Parameter */
+ UCHAR ucReserved;
+} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3;
+
+/* ucCntlFlag */
+#define ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN 1
+#define ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE 2
+#define ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE 4
+
+typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER {
+ ATOM_COMPUTE_CLOCK_FREQ ulClock;
+ ULONG ulReserved[2];
+} DYNAMICE_MEMORY_SETTINGS_PARAMETER;
+
+typedef struct _DYNAMICE_ENGINE_SETTINGS_PARAMETER {
+ ATOM_COMPUTE_CLOCK_FREQ ulClock;
+ ULONG ulMemoryClock;
+ ULONG ulReserved;
+} DYNAMICE_ENGINE_SETTINGS_PARAMETER;
+
+/****************************************************************************/
+/* Structures used by SetEngineClockTable */
+/****************************************************************************/
+typedef struct _SET_ENGINE_CLOCK_PARAMETERS {
+ ULONG ulTargetEngineClock; /* In 10Khz unit */
+} SET_ENGINE_CLOCK_PARAMETERS;
+
+typedef struct _SET_ENGINE_CLOCK_PS_ALLOCATION {
+ ULONG ulTargetEngineClock; /* In 10Khz unit */
+ COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
+} SET_ENGINE_CLOCK_PS_ALLOCATION;
+
+/****************************************************************************/
+/* Structures used by SetMemoryClockTable */
+/****************************************************************************/
+typedef struct _SET_MEMORY_CLOCK_PARAMETERS {
+ ULONG ulTargetMemoryClock; /* In 10Khz unit */
+} SET_MEMORY_CLOCK_PARAMETERS;
+
+typedef struct _SET_MEMORY_CLOCK_PS_ALLOCATION {
+ ULONG ulTargetMemoryClock; /* In 10Khz unit */
+ COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
+} SET_MEMORY_CLOCK_PS_ALLOCATION;
+
+/****************************************************************************/
+/* Structures used by ASIC_Init.ctb */
+/****************************************************************************/
+typedef struct _ASIC_INIT_PARAMETERS {
+ ULONG ulDefaultEngineClock; /* In 10Khz unit */
+ ULONG ulDefaultMemoryClock; /* In 10Khz unit */
+} ASIC_INIT_PARAMETERS;
+
+typedef struct _ASIC_INIT_PS_ALLOCATION {
+ ASIC_INIT_PARAMETERS sASICInitClocks;
+ SET_ENGINE_CLOCK_PS_ALLOCATION sReserved; /* Caller doesn't need to init this structure */
+} ASIC_INIT_PS_ALLOCATION;
+
+/****************************************************************************/
+/* Structure used by DynamicClockGatingTable.ctb */
+/****************************************************************************/
+typedef struct _DYNAMIC_CLOCK_GATING_PARAMETERS {
+ UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
+ UCHAR ucPadding[3];
+} DYNAMIC_CLOCK_GATING_PARAMETERS;
+#define DYNAMIC_CLOCK_GATING_PS_ALLOCATION DYNAMIC_CLOCK_GATING_PARAMETERS
+
+/****************************************************************************/
+/* Structure used by EnableASIC_StaticPwrMgtTable.ctb */
+/****************************************************************************/
+typedef struct _ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS {
+ UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
+ UCHAR ucPadding[3];
+} ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS;
+#define ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS
+
+/****************************************************************************/
+/* Structures used by DAC_LoadDetectionTable.ctb */
+/****************************************************************************/
+typedef struct _DAC_LOAD_DETECTION_PARAMETERS {
+ USHORT usDeviceID; /* {ATOM_DEVICE_CRTx_SUPPORT,ATOM_DEVICE_TVx_SUPPORT,ATOM_DEVICE_CVx_SUPPORT} */
+ UCHAR ucDacType; /* {ATOM_DAC_A,ATOM_DAC_B, ATOM_EXT_DAC} */
+ UCHAR ucMisc; /* Valid only when table revision =1.3 and above */
+} DAC_LOAD_DETECTION_PARAMETERS;
+
+/* DAC_LOAD_DETECTION_PARAMETERS.ucMisc */
+#define DAC_LOAD_MISC_YPrPb 0x01
+
+typedef struct _DAC_LOAD_DETECTION_PS_ALLOCATION {
+ DAC_LOAD_DETECTION_PARAMETERS sDacload;
+ ULONG Reserved[2]; /* Don't set this one, allocation for EXT DAC */
+} DAC_LOAD_DETECTION_PS_ALLOCATION;
+
+/****************************************************************************/
+/* Structures used by DAC1EncoderControlTable.ctb and DAC2EncoderControlTable.ctb */
+/****************************************************************************/
+typedef struct _DAC_ENCODER_CONTROL_PARAMETERS {
+ USHORT usPixelClock; /* in 10KHz; for bios convenient */
+ UCHAR ucDacStandard; /* See definition of ATOM_DACx_xxx, For DEC3.0, bit 7 used as internal flag to indicate DAC2 (==1) or DAC1 (==0) */
+ UCHAR ucAction; /* 0: turn off encoder */
+ /* 1: setup and turn on encoder */
+ /* 7: ATOM_ENCODER_INIT Initialize DAC */
+} DAC_ENCODER_CONTROL_PARAMETERS;
+
+#define DAC_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PARAMETERS
+
+/****************************************************************************/
+/* Structures used by DIG1EncoderControlTable */
+/* DIG2EncoderControlTable */
+/* ExternalEncoderControlTable */
+/****************************************************************************/
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS {
+ USHORT usPixelClock; /* in 10KHz; for bios convenient */
+ UCHAR ucConfig;
+ /* [2] Link Select: */
+ /* =0: PHY linkA if bfLane<3 */
+ /* =1: PHY linkB if bfLanes<3 */
+ /* =0: PHY linkA+B if bfLanes=3 */
+ /* [3] Transmitter Sel */
+ /* =0: UNIPHY or PCIEPHY */
+ /* =1: LVTMA */
+ UCHAR ucAction; /* =0: turn off encoder */
+ /* =1: turn on encoder */
+ UCHAR ucEncoderMode;
+ /* =0: DP encoder */
+ /* =1: LVDS encoder */
+ /* =2: DVI encoder */
+ /* =3: HDMI encoder */
+ /* =4: SDVO encoder */
+ UCHAR ucLaneNum; /* how many lanes to enable */
+ UCHAR ucReserved[2];
+} DIG_ENCODER_CONTROL_PARAMETERS;
+#define DIG_ENCODER_CONTROL_PS_ALLOCATION DIG_ENCODER_CONTROL_PARAMETERS
+#define EXTERNAL_ENCODER_CONTROL_PARAMETER DIG_ENCODER_CONTROL_PARAMETERS
+
+/* ucConfig */
+#define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK 0x01
+#define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ 0x00
+#define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ 0x01
+#define ATOM_ENCODER_CONFIG_LINK_SEL_MASK 0x04
+#define ATOM_ENCODER_CONFIG_LINKA 0x00
+#define ATOM_ENCODER_CONFIG_LINKB 0x04
+#define ATOM_ENCODER_CONFIG_LINKA_B ATOM_TRANSMITTER_CONFIG_LINKA
+#define ATOM_ENCODER_CONFIG_LINKB_A ATOM_ENCODER_CONFIG_LINKB
+#define ATOM_ENCODER_CONFIG_TRANSMITTER_SEL_MASK 0x08
+#define ATOM_ENCODER_CONFIG_UNIPHY 0x00
+#define ATOM_ENCODER_CONFIG_LVTMA 0x08
+#define ATOM_ENCODER_CONFIG_TRANSMITTER1 0x00
+#define ATOM_ENCODER_CONFIG_TRANSMITTER2 0x08
+#define ATOM_ENCODER_CONFIG_DIGB 0x80 /* VBIOS Internal use, outside SW should set this bit=0 */
+/* ucAction */
+/* ATOM_ENABLE: Enable Encoder */
+/* ATOM_DISABLE: Disable Encoder */
+
+/* ucEncoderMode */
+#define ATOM_ENCODER_MODE_DP 0
+#define ATOM_ENCODER_MODE_LVDS 1
+#define ATOM_ENCODER_MODE_DVI 2
+#define ATOM_ENCODER_MODE_HDMI 3
+#define ATOM_ENCODER_MODE_SDVO 4
+#define ATOM_ENCODER_MODE_TV 13
+#define ATOM_ENCODER_MODE_CV 14
+#define ATOM_ENCODER_MODE_CRT 15
+
+typedef struct _ATOM_DIG_ENCODER_CONFIG_V2 {
+#if ATOM_BIG_ENDIAN
+ UCHAR ucReserved1:2;
+ UCHAR ucTransmitterSel:2; /* =0: UniphyAB, =1: UniphyCD =2: UniphyEF */
+ UCHAR ucLinkSel:1; /* =0: linkA/C/E =1: linkB/D/F */
+ UCHAR ucReserved:1;
+ UCHAR ucDPLinkRate:1; /* =0: 1.62Ghz, =1: 2.7Ghz */
+#else
+ UCHAR ucDPLinkRate:1; /* =0: 1.62Ghz, =1: 2.7Ghz */
+ UCHAR ucReserved:1;
+ UCHAR ucLinkSel:1; /* =0: linkA/C/E =1: linkB/D/F */
+ UCHAR ucTransmitterSel:2; /* =0: UniphyAB, =1: UniphyCD =2: UniphyEF */
+ UCHAR ucReserved1:2;
+#endif
+} ATOM_DIG_ENCODER_CONFIG_V2;
+
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2 {
+ USHORT usPixelClock; /* in 10KHz; for bios convenient */
+ ATOM_DIG_ENCODER_CONFIG_V2 acConfig;
+ UCHAR ucAction;
+ UCHAR ucEncoderMode;
+ /* =0: DP encoder */
+ /* =1: LVDS encoder */
+ /* =2: DVI encoder */
+ /* =3: HDMI encoder */
+ /* =4: SDVO encoder */
+ UCHAR ucLaneNum; /* how many lanes to enable */
+ UCHAR ucReserved[2];
+} DIG_ENCODER_CONTROL_PARAMETERS_V2;
+
+/* ucConfig */
+#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_MASK 0x01
+#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_1_62GHZ 0x00
+#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_2_70GHZ 0x01
+#define ATOM_ENCODER_CONFIG_V2_LINK_SEL_MASK 0x04
+#define ATOM_ENCODER_CONFIG_V2_LINKA 0x00
+#define ATOM_ENCODER_CONFIG_V2_LINKB 0x04
+#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER_SEL_MASK 0x18
+#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER1 0x00
+#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER2 0x08
+#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER3 0x10
+
+/****************************************************************************/
+/* Structures used by UNIPHYTransmitterControlTable */
+/* LVTMATransmitterControlTable */
+/* DVOOutputControlTable */
+/****************************************************************************/
+typedef struct _ATOM_DP_VS_MODE {
+ UCHAR ucLaneSel;
+ UCHAR ucLaneSet;
+} ATOM_DP_VS_MODE;
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS {
+ union {
+ USHORT usPixelClock; /* in 10KHz; for bios convenient */
+ USHORT usInitInfo; /* when init uniphy,lower 8bit is used for connector type defined in objectid.h */
+ ATOM_DP_VS_MODE asMode; /* DP Voltage swing mode */
+ };
+ UCHAR ucConfig;
+ /* [0]=0: 4 lane Link, */
+ /* =1: 8 lane Link ( Dual Links TMDS ) */
+ /* [1]=0: InCoherent mode */
+ /* =1: Coherent Mode */
+ /* [2] Link Select: */
+ /* =0: PHY linkA if bfLane<3 */
+ /* =1: PHY linkB if bfLanes<3 */
+ /* =0: PHY linkA+B if bfLanes=3 */
+ /* [5:4]PCIE lane Sel */
+ /* =0: lane 0~3 or 0~7 */
+ /* =1: lane 4~7 */
+ /* =2: lane 8~11 or 8~15 */
+ /* =3: lane 12~15 */
+ UCHAR ucAction; /* =0: turn off encoder */
+ /* =1: turn on encoder */
+ UCHAR ucReserved[4];
+} DIG_TRANSMITTER_CONTROL_PARAMETERS;
+
+#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PARAMETERS
+
+/* ucInitInfo */
+#define ATOM_TRAMITTER_INITINFO_CONNECTOR_MASK 0x00ff
+
+/* ucConfig */
+#define ATOM_TRANSMITTER_CONFIG_8LANE_LINK 0x01
+#define ATOM_TRANSMITTER_CONFIG_COHERENT 0x02
+#define ATOM_TRANSMITTER_CONFIG_LINK_SEL_MASK 0x04
+#define ATOM_TRANSMITTER_CONFIG_LINKA 0x00
+#define ATOM_TRANSMITTER_CONFIG_LINKB 0x04
+#define ATOM_TRANSMITTER_CONFIG_LINKA_B 0x00
+#define ATOM_TRANSMITTER_CONFIG_LINKB_A 0x04
+
+#define ATOM_TRANSMITTER_CONFIG_ENCODER_SEL_MASK 0x08 /* only used when ATOM_TRANSMITTER_ACTION_ENABLE */
+#define ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER 0x00 /* only used when ATOM_TRANSMITTER_ACTION_ENABLE */
+#define ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER 0x08 /* only used when ATOM_TRANSMITTER_ACTION_ENABLE */
+
+#define ATOM_TRANSMITTER_CONFIG_CLKSRC_MASK 0x30
+#define ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL 0x00
+#define ATOM_TRANSMITTER_CONFIG_CLKSRC_PCIE 0x20
+#define ATOM_TRANSMITTER_CONFIG_CLKSRC_XTALIN 0x30
+#define ATOM_TRANSMITTER_CONFIG_LANE_SEL_MASK 0xc0
+#define ATOM_TRANSMITTER_CONFIG_LANE_0_3 0x00
+#define ATOM_TRANSMITTER_CONFIG_LANE_0_7 0x00
+#define ATOM_TRANSMITTER_CONFIG_LANE_4_7 0x40
+#define ATOM_TRANSMITTER_CONFIG_LANE_8_11 0x80
+#define ATOM_TRANSMITTER_CONFIG_LANE_8_15 0x80
+#define ATOM_TRANSMITTER_CONFIG_LANE_12_15 0xc0
+
+/* ucAction */
+#define ATOM_TRANSMITTER_ACTION_DISABLE 0
+#define ATOM_TRANSMITTER_ACTION_ENABLE 1
+#define ATOM_TRANSMITTER_ACTION_LCD_BLOFF 2
+#define ATOM_TRANSMITTER_ACTION_LCD_BLON 3
+#define ATOM_TRANSMITTER_ACTION_BL_BRIGHTNESS_CONTROL 4
+#define ATOM_TRANSMITTER_ACTION_LCD_SELFTEST_START 5
+#define ATOM_TRANSMITTER_ACTION_LCD_SELFTEST_STOP 6
+#define ATOM_TRANSMITTER_ACTION_INIT 7
+#define ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT 8
+#define ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT 9
+#define ATOM_TRANSMITTER_ACTION_SETUP 10
+#define ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH 11
+
+/* Following are used for DigTransmitterControlTable ver1.2 */
+typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V2 {
+#if ATOM_BIG_ENDIAN
+ UCHAR ucTransmitterSel:2; /* bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) */
+ /* =1 Dig Transmitter 2 ( Uniphy CD ) */
+ /* =2 Dig Transmitter 3 ( Uniphy EF ) */
+ UCHAR ucReserved:1;
+ UCHAR fDPConnector:1; /* bit4=0: DP connector =1: None DP connector */
+ UCHAR ucEncoderSel:1; /* bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 ) */
+ UCHAR ucLinkSel:1; /* bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E */
+ /* =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F */
+
+ UCHAR fCoherentMode:1; /* bit1=1: Coherent Mode ( for DVI/HDMI mode ) */
+ UCHAR fDualLinkConnector:1; /* bit0=1: Dual Link DVI connector */
+#else
+ UCHAR fDualLinkConnector:1; /* bit0=1: Dual Link DVI connector */
+ UCHAR fCoherentMode:1; /* bit1=1: Coherent Mode ( for DVI/HDMI mode ) */
+ UCHAR ucLinkSel:1; /* bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E */
+ /* =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F */
+ UCHAR ucEncoderSel:1; /* bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 ) */
+ UCHAR fDPConnector:1; /* bit4=0: DP connector =1: None DP connector */
+ UCHAR ucReserved:1;
+ UCHAR ucTransmitterSel:2; /* bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) */
+ /* =1 Dig Transmitter 2 ( Uniphy CD ) */
+ /* =2 Dig Transmitter 3 ( Uniphy EF ) */
+#endif
+} ATOM_DIG_TRANSMITTER_CONFIG_V2;
+
+/* ucConfig */
+/* Bit0 */
+#define ATOM_TRANSMITTER_CONFIG_V2_DUAL_LINK_CONNECTOR 0x01
+
+/* Bit1 */
+#define ATOM_TRANSMITTER_CONFIG_V2_COHERENT 0x02
+
+/* Bit2 */
+#define ATOM_TRANSMITTER_CONFIG_V2_LINK_SEL_MASK 0x04
+#define ATOM_TRANSMITTER_CONFIG_V2_LINKA 0x00
+#define ATOM_TRANSMITTER_CONFIG_V2_LINKB 0x04
+
+/* Bit3 */
+#define ATOM_TRANSMITTER_CONFIG_V2_ENCODER_SEL_MASK 0x08
+#define ATOM_TRANSMITTER_CONFIG_V2_DIG1_ENCODER 0x00 /* only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP */
+#define ATOM_TRANSMITTER_CONFIG_V2_DIG2_ENCODER 0x08 /* only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP */
+
+/* Bit4 */
+#define ATOM_TRASMITTER_CONFIG_V2_DP_CONNECTOR 0x10
+
+/* Bit7:6 */
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER_SEL_MASK 0xC0
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER1 0x00 /* AB */
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER2 0x40 /* CD */
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER3 0x80 /* EF */
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 {
+ union {
+ USHORT usPixelClock; /* in 10KHz; for bios convenient */
+ USHORT usInitInfo; /* when init uniphy,lower 8bit is used for connector type defined in objectid.h */
+ ATOM_DP_VS_MODE asMode; /* DP Voltage swing mode */
+ };
+ ATOM_DIG_TRANSMITTER_CONFIG_V2 acConfig;
+ UCHAR ucAction; /* define as ATOM_TRANSMITER_ACTION_XXX */
+ UCHAR ucReserved[4];
+} DIG_TRANSMITTER_CONTROL_PARAMETERS_V2;
+
+/****************************************************************************/
+/* Structures used by DAC1OuputControlTable */
+/* DAC2OuputControlTable */
+/* LVTMAOutputControlTable (Before DEC30) */
+/* TMDSAOutputControlTable (Before DEC30) */
+/****************************************************************************/
+typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS {
+ UCHAR ucAction; /* Possible input:ATOM_ENABLE||ATOMDISABLE */
+ /* When the display is LCD, in addition to above: */
+ /* ATOM_LCD_BLOFF|| ATOM_LCD_BLON ||ATOM_LCD_BL_BRIGHTNESS_CONTROL||ATOM_LCD_SELFTEST_START|| */
+ /* ATOM_LCD_SELFTEST_STOP */
+
+ UCHAR aucPadding[3]; /* padding to DWORD aligned */
+} DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS;
+
+#define DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+
+#define CRT1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define CRT1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define CRT2_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define CRT2_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define CV1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define CV1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define TV1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define TV1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define DFP1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define DFP1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define DFP2_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define DFP2_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define LCD1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define LCD1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define DVO_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define DVO_OUTPUT_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PS_ALLOCATION
+#define DVO_OUTPUT_CONTROL_PARAMETERS_V3 DIG_TRANSMITTER_CONTROL_PARAMETERS
+
+/****************************************************************************/
+/* Structures used by BlankCRTCTable */
+/****************************************************************************/
+typedef struct _BLANK_CRTC_PARAMETERS {
+ UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
+ UCHAR ucBlanking; /* ATOM_BLANKING or ATOM_BLANKINGOFF */
+ USHORT usBlackColorRCr;
+ USHORT usBlackColorGY;
+ USHORT usBlackColorBCb;
+} BLANK_CRTC_PARAMETERS;
+#define BLANK_CRTC_PS_ALLOCATION BLANK_CRTC_PARAMETERS
+
+/****************************************************************************/
+/* Structures used by EnableCRTCTable */
+/* EnableCRTCMemReqTable */
+/* UpdateCRTC_DoubleBufferRegistersTable */
+/****************************************************************************/
+typedef struct _ENABLE_CRTC_PARAMETERS {
+ UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
+ UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
+ UCHAR ucPadding[2];
+} ENABLE_CRTC_PARAMETERS;
+#define ENABLE_CRTC_PS_ALLOCATION ENABLE_CRTC_PARAMETERS
+
+/****************************************************************************/
+/* Structures used by SetCRTC_OverScanTable */
+/****************************************************************************/
+typedef struct _SET_CRTC_OVERSCAN_PARAMETERS {
+ USHORT usOverscanRight; /* right */
+ USHORT usOverscanLeft; /* left */
+ USHORT usOverscanBottom; /* bottom */
+ USHORT usOverscanTop; /* top */
+ UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
+ UCHAR ucPadding[3];
+} SET_CRTC_OVERSCAN_PARAMETERS;
+#define SET_CRTC_OVERSCAN_PS_ALLOCATION SET_CRTC_OVERSCAN_PARAMETERS
+
+/****************************************************************************/
+/* Structures used by SetCRTC_ReplicationTable */
+/****************************************************************************/
+typedef struct _SET_CRTC_REPLICATION_PARAMETERS {
+ UCHAR ucH_Replication; /* horizontal replication */
+ UCHAR ucV_Replication; /* vertical replication */
+ UCHAR usCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
+ UCHAR ucPadding;
+} SET_CRTC_REPLICATION_PARAMETERS;
+#define SET_CRTC_REPLICATION_PS_ALLOCATION SET_CRTC_REPLICATION_PARAMETERS
+
+/****************************************************************************/
+/* Structures used by SelectCRTC_SourceTable */
+/****************************************************************************/
+typedef struct _SELECT_CRTC_SOURCE_PARAMETERS {
+ UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
+ UCHAR ucDevice; /* ATOM_DEVICE_CRT1|ATOM_DEVICE_CRT2|.... */
+ UCHAR ucPadding[2];
+} SELECT_CRTC_SOURCE_PARAMETERS;
+#define SELECT_CRTC_SOURCE_PS_ALLOCATION SELECT_CRTC_SOURCE_PARAMETERS
+
+typedef struct _SELECT_CRTC_SOURCE_PARAMETERS_V2 {
+ UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
+ UCHAR ucEncoderID; /* DAC1/DAC2/TVOUT/DIG1/DIG2/DVO */
+ UCHAR ucEncodeMode; /* Encoding mode, only valid when using DIG1/DIG2/DVO */
+ UCHAR ucPadding;
+} SELECT_CRTC_SOURCE_PARAMETERS_V2;
+
+/* ucEncoderID */
+/* #define ASIC_INT_DAC1_ENCODER_ID 0x00 */
+/* #define ASIC_INT_TV_ENCODER_ID 0x02 */
+/* #define ASIC_INT_DIG1_ENCODER_ID 0x03 */
+/* #define ASIC_INT_DAC2_ENCODER_ID 0x04 */
+/* #define ASIC_EXT_TV_ENCODER_ID 0x06 */
+/* #define ASIC_INT_DVO_ENCODER_ID 0x07 */
+/* #define ASIC_INT_DIG2_ENCODER_ID 0x09 */
+/* #define ASIC_EXT_DIG_ENCODER_ID 0x05 */
+
+/* ucEncodeMode */
+/* #define ATOM_ENCODER_MODE_DP 0 */
+/* #define ATOM_ENCODER_MODE_LVDS 1 */
+/* #define ATOM_ENCODER_MODE_DVI 2 */
+/* #define ATOM_ENCODER_MODE_HDMI 3 */
+/* #define ATOM_ENCODER_MODE_SDVO 4 */
+/* #define ATOM_ENCODER_MODE_TV 13 */
+/* #define ATOM_ENCODER_MODE_CV 14 */
+/* #define ATOM_ENCODER_MODE_CRT 15 */
+
+/****************************************************************************/
+/* Structures used by SetPixelClockTable */
+/* GetPixelClockTable */
+/****************************************************************************/
+/* Major revision=1., Minor revision=1 */
+typedef struct _PIXEL_CLOCK_PARAMETERS {
+ USHORT usPixelClock; /* in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */
+ /* 0 means disable PPLL */
+ USHORT usRefDiv; /* Reference divider */
+ USHORT usFbDiv; /* feedback divider */
+ UCHAR ucPostDiv; /* post divider */
+ UCHAR ucFracFbDiv; /* fractional feedback divider */
+ UCHAR ucPpll; /* ATOM_PPLL1 or ATOM_PPL2 */
+ UCHAR ucRefDivSrc; /* ATOM_PJITTER or ATO_NONPJITTER */
+ UCHAR ucCRTC; /* Which CRTC uses this Ppll */
+ UCHAR ucPadding;
+} PIXEL_CLOCK_PARAMETERS;
+
+/* Major revision=1., Minor revision=2, add ucMiscIfno */
+/* ucMiscInfo: */
+#define MISC_FORCE_REPROG_PIXEL_CLOCK 0x1
+#define MISC_DEVICE_INDEX_MASK 0xF0
+#define MISC_DEVICE_INDEX_SHIFT 4
+
+typedef struct _PIXEL_CLOCK_PARAMETERS_V2 {
+ USHORT usPixelClock; /* in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */
+ /* 0 means disable PPLL */
+ USHORT usRefDiv; /* Reference divider */
+ USHORT usFbDiv; /* feedback divider */
+ UCHAR ucPostDiv; /* post divider */
+ UCHAR ucFracFbDiv; /* fractional feedback divider */
+ UCHAR ucPpll; /* ATOM_PPLL1 or ATOM_PPL2 */
+ UCHAR ucRefDivSrc; /* ATOM_PJITTER or ATO_NONPJITTER */
+ UCHAR ucCRTC; /* Which CRTC uses this Ppll */
+ UCHAR ucMiscInfo; /* Different bits for different purpose, bit [7:4] as device index, bit[0]=Force prog */
+} PIXEL_CLOCK_PARAMETERS_V2;
+
+/* Major revision=1., Minor revision=3, structure/definition change */
+/* ucEncoderMode: */
+/* ATOM_ENCODER_MODE_DP */
+/* ATOM_ENOCDER_MODE_LVDS */
+/* ATOM_ENOCDER_MODE_DVI */
+/* ATOM_ENOCDER_MODE_HDMI */
+/* ATOM_ENOCDER_MODE_SDVO */
+/* ATOM_ENCODER_MODE_TV 13 */
+/* ATOM_ENCODER_MODE_CV 14 */
+/* ATOM_ENCODER_MODE_CRT 15 */
+
+/* ucDVOConfig */
+/* #define DVO_ENCODER_CONFIG_RATE_SEL 0x01 */
+/* #define DVO_ENCODER_CONFIG_DDR_SPEED 0x00 */
+/* #define DVO_ENCODER_CONFIG_SDR_SPEED 0x01 */
+/* #define DVO_ENCODER_CONFIG_OUTPUT_SEL 0x0c */
+/* #define DVO_ENCODER_CONFIG_LOW12BIT 0x00 */
+/* #define DVO_ENCODER_CONFIG_UPPER12BIT 0x04 */
+/* #define DVO_ENCODER_CONFIG_24BIT 0x08 */
+
+/* ucMiscInfo: also changed, see below */
+#define PIXEL_CLOCK_MISC_FORCE_PROG_PPLL 0x01
+#define PIXEL_CLOCK_MISC_VGA_MODE 0x02
+#define PIXEL_CLOCK_MISC_CRTC_SEL_MASK 0x04
+#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1 0x00
+#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2 0x04
+#define PIXEL_CLOCK_MISC_USE_ENGINE_FOR_DISPCLK 0x08
+
+typedef struct _PIXEL_CLOCK_PARAMETERS_V3 {
+ USHORT usPixelClock; /* in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */
+ /* 0 means disable PPLL. For VGA PPLL,make sure this value is not 0. */
+ USHORT usRefDiv; /* Reference divider */
+ USHORT usFbDiv; /* feedback divider */
+ UCHAR ucPostDiv; /* post divider */
+ UCHAR ucFracFbDiv; /* fractional feedback divider */
+ UCHAR ucPpll; /* ATOM_PPLL1 or ATOM_PPL2 */
+ UCHAR ucTransmitterId; /* graphic encoder id defined in objectId.h */
+ union {
+ UCHAR ucEncoderMode; /* encoder type defined as ATOM_ENCODER_MODE_DP/DVI/HDMI/ */
+ UCHAR ucDVOConfig; /* when use DVO, need to know SDR/DDR, 12bit or 24bit */
+ };
+ UCHAR ucMiscInfo; /* bit[0]=Force program, bit[1]= set pclk for VGA, b[2]= CRTC sel */
+ /* bit[3]=0:use PPLL for dispclk source, =1: use engine clock for dispclock source */
+} PIXEL_CLOCK_PARAMETERS_V3;
+
+#define PIXEL_CLOCK_PARAMETERS_LAST PIXEL_CLOCK_PARAMETERS_V2
+#define GET_PIXEL_CLOCK_PS_ALLOCATION PIXEL_CLOCK_PARAMETERS_LAST
+
+/****************************************************************************/
+/* Structures used by AdjustDisplayPllTable */
+/****************************************************************************/
+typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS {
+ USHORT usPixelClock;
+ UCHAR ucTransmitterID;
+ UCHAR ucEncodeMode;
+ union {
+ UCHAR ucDVOConfig; /* if DVO, need passing link rate and output 12bitlow or 24bit */
+ UCHAR ucConfig; /* if none DVO, not defined yet */
+ };
+ UCHAR ucReserved[3];
+} ADJUST_DISPLAY_PLL_PARAMETERS;
+
+#define ADJUST_DISPLAY_CONFIG_SS_ENABLE 0x10
+
+#define ADJUST_DISPLAY_PLL_PS_ALLOCATION ADJUST_DISPLAY_PLL_PARAMETERS
+
+/****************************************************************************/
+/* Structures used by EnableYUVTable */
+/****************************************************************************/
+typedef struct _ENABLE_YUV_PARAMETERS {
+ UCHAR ucEnable; /* ATOM_ENABLE:Enable YUV or ATOM_DISABLE:Disable YUV (RGB) */
+ UCHAR ucCRTC; /* Which CRTC needs this YUV or RGB format */
+ UCHAR ucPadding[2];
+} ENABLE_YUV_PARAMETERS;
+#define ENABLE_YUV_PS_ALLOCATION ENABLE_YUV_PARAMETERS
+
+/****************************************************************************/
+/* Structures used by GetMemoryClockTable */
+/****************************************************************************/
+typedef struct _GET_MEMORY_CLOCK_PARAMETERS {
+ ULONG ulReturnMemoryClock; /* current memory speed in 10KHz unit */
+} GET_MEMORY_CLOCK_PARAMETERS;
+#define GET_MEMORY_CLOCK_PS_ALLOCATION GET_MEMORY_CLOCK_PARAMETERS
+
+/****************************************************************************/
+/* Structures used by GetEngineClockTable */
+/****************************************************************************/
+typedef struct _GET_ENGINE_CLOCK_PARAMETERS {
+ ULONG ulReturnEngineClock; /* current engine speed in 10KHz unit */
+} GET_ENGINE_CLOCK_PARAMETERS;
+#define GET_ENGINE_CLOCK_PS_ALLOCATION GET_ENGINE_CLOCK_PARAMETERS
+
+/****************************************************************************/
+/* Following Structures and constant may be obsolete */
+/****************************************************************************/
+/* Maxium 8 bytes,the data read in will be placed in the parameter space. */
+/* Read operaion successeful when the paramter space is non-zero, otherwise read operation failed */
+typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS {
+ USHORT usPrescale; /* Ratio between Engine clock and I2C clock */
+ USHORT usVRAMAddress; /* Adress in Frame Buffer where to pace raw EDID */
+ USHORT usStatus; /* When use output: lower byte EDID checksum, high byte hardware status */
+ /* WHen use input: lower byte as 'byte to read':currently limited to 128byte or 1byte */
+ UCHAR ucSlaveAddr; /* Read from which slave */
+ UCHAR ucLineNumber; /* Read from which HW assisted line */
+} READ_EDID_FROM_HW_I2C_DATA_PARAMETERS;
+#define READ_EDID_FROM_HW_I2C_DATA_PS_ALLOCATION READ_EDID_FROM_HW_I2C_DATA_PARAMETERS
+
+#define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSDATABYTE 0
+#define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSTWODATABYTES 1
+#define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_PSOFFSET_IDDATABLOCK 2
+#define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_IDOFFSET_PLUS_IDDATABLOCK 3
+#define ATOM_WRITE_I2C_FORMAT_IDCOUNTER_IDOFFSET_IDDATABLOCK 4
+
+typedef struct _WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS {
+ USHORT usPrescale; /* Ratio between Engine clock and I2C clock */
+ USHORT usByteOffset; /* Write to which byte */
+ /* Upper portion of usByteOffset is Format of data */
+ /* 1bytePS+offsetPS */
+ /* 2bytesPS+offsetPS */
+ /* blockID+offsetPS */
+ /* blockID+offsetID */
+ /* blockID+counterID+offsetID */
+ UCHAR ucData; /* PS data1 */
+ UCHAR ucStatus; /* Status byte 1=success, 2=failure, Also is used as PS data2 */
+ UCHAR ucSlaveAddr; /* Write to which slave */
+ UCHAR ucLineNumber; /* Write from which HW assisted line */
+} WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS;
+
+#define WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+
+typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS {
+ USHORT usPrescale; /* Ratio between Engine clock and I2C clock */
+ UCHAR ucSlaveAddr; /* Write to which slave */
+ UCHAR ucLineNumber; /* Write from which HW assisted line */
+} SET_UP_HW_I2C_DATA_PARAMETERS;
+
+/**************************************************************************/
+#define SPEED_FAN_CONTROL_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+
+/****************************************************************************/
+/* Structures used by PowerConnectorDetectionTable */
+/****************************************************************************/
+typedef struct _POWER_CONNECTOR_DETECTION_PARAMETERS {
+ UCHAR ucPowerConnectorStatus; /* Used for return value 0: detected, 1:not detected */
+ UCHAR ucPwrBehaviorId;
+ USHORT usPwrBudget; /* how much power currently boot to in unit of watt */
+} POWER_CONNECTOR_DETECTION_PARAMETERS;
+
+typedef struct POWER_CONNECTOR_DETECTION_PS_ALLOCATION {
+ UCHAR ucPowerConnectorStatus; /* Used for return value 0: detected, 1:not detected */
+ UCHAR ucReserved;
+ USHORT usPwrBudget; /* how much power currently boot to in unit of watt */
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
+} POWER_CONNECTOR_DETECTION_PS_ALLOCATION;
+
+/****************************LVDS SS Command Table Definitions**********************/
+
+/****************************************************************************/
+/* Structures used by EnableSpreadSpectrumOnPPLLTable */
+/****************************************************************************/
+typedef struct _ENABLE_LVDS_SS_PARAMETERS {
+ USHORT usSpreadSpectrumPercentage;
+ UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */
+ UCHAR ucSpreadSpectrumStepSize_Delay; /* bits3:2 SS_STEP_SIZE; bit 6:4 SS_DELAY */
+ UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
+ UCHAR ucPadding[3];
+} ENABLE_LVDS_SS_PARAMETERS;
+
+/* ucTableFormatRevision=1,ucTableContentRevision=2 */
+typedef struct _ENABLE_LVDS_SS_PARAMETERS_V2 {
+ USHORT usSpreadSpectrumPercentage;
+ UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */
+ UCHAR ucSpreadSpectrumStep; /* */
+ UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
+ UCHAR ucSpreadSpectrumDelay;
+ UCHAR ucSpreadSpectrumRange;
+ UCHAR ucPadding;
+} ENABLE_LVDS_SS_PARAMETERS_V2;
+
+/* This new structure is based on ENABLE_LVDS_SS_PARAMETERS but expands to SS on PPLL, so other devices can use SS. */
+typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL {
+ USHORT usSpreadSpectrumPercentage;
+ UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */
+ UCHAR ucSpreadSpectrumStep; /* */
+ UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
+ UCHAR ucSpreadSpectrumDelay;
+ UCHAR ucSpreadSpectrumRange;
+ UCHAR ucPpll; /* ATOM_PPLL1/ATOM_PPLL2 */
+} ENABLE_SPREAD_SPECTRUM_ON_PPLL;
+
+#define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION ENABLE_SPREAD_SPECTRUM_ON_PPLL
+
+/**************************************************************************/
+
+typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION {
+ PIXEL_CLOCK_PARAMETERS sPCLKInput;
+ ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved; /* Caller doesn't need to init this portion */
+} SET_PIXEL_CLOCK_PS_ALLOCATION;
+
+#define ENABLE_VGA_RENDER_PS_ALLOCATION SET_PIXEL_CLOCK_PS_ALLOCATION
+
+/****************************************************************************/
+/* Structures used by ### */
+/****************************************************************************/
+typedef struct _MEMORY_TRAINING_PARAMETERS {
+ ULONG ulTargetMemoryClock; /* In 10Khz unit */
+} MEMORY_TRAINING_PARAMETERS;
+#define MEMORY_TRAINING_PS_ALLOCATION MEMORY_TRAINING_PARAMETERS
+
+/****************************LVDS and other encoder command table definitions **********************/
+
+/****************************************************************************/
+/* Structures used by LVDSEncoderControlTable (Before DCE30) */
+/* LVTMAEncoderControlTable (Before DCE30) */
+/* TMDSAEncoderControlTable (Before DCE30) */
+/****************************************************************************/
+typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS {
+ USHORT usPixelClock; /* in 10KHz; for bios convenient */
+ UCHAR ucMisc; /* bit0=0: Enable single link */
+ /* =1: Enable dual link */
+ /* Bit1=0: 666RGB */
+ /* =1: 888RGB */
+ UCHAR ucAction; /* 0: turn off encoder */
+ /* 1: setup and turn on encoder */
+} LVDS_ENCODER_CONTROL_PARAMETERS;
+
+#define LVDS_ENCODER_CONTROL_PS_ALLOCATION LVDS_ENCODER_CONTROL_PARAMETERS
+
+#define TMDS1_ENCODER_CONTROL_PARAMETERS LVDS_ENCODER_CONTROL_PARAMETERS
+#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION TMDS1_ENCODER_CONTROL_PARAMETERS
+
+#define TMDS2_ENCODER_CONTROL_PARAMETERS TMDS1_ENCODER_CONTROL_PARAMETERS
+#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION TMDS2_ENCODER_CONTROL_PARAMETERS
+
+/* ucTableFormatRevision=1,ucTableContentRevision=2 */
+typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2 {
+ USHORT usPixelClock; /* in 10KHz; for bios convenient */
+ UCHAR ucMisc; /* see PANEL_ENCODER_MISC_xx defintions below */
+ UCHAR ucAction; /* 0: turn off encoder */
+ /* 1: setup and turn on encoder */
+ UCHAR ucTruncate; /* bit0=0: Disable truncate */
+ /* =1: Enable truncate */
+ /* bit4=0: 666RGB */
+ /* =1: 888RGB */
+ UCHAR ucSpatial; /* bit0=0: Disable spatial dithering */
+ /* =1: Enable spatial dithering */
+ /* bit4=0: 666RGB */
+ /* =1: 888RGB */
+ UCHAR ucTemporal; /* bit0=0: Disable temporal dithering */
+ /* =1: Enable temporal dithering */
+ /* bit4=0: 666RGB */
+ /* =1: 888RGB */
+ /* bit5=0: Gray level 2 */
+ /* =1: Gray level 4 */
+ UCHAR ucFRC; /* bit4=0: 25FRC_SEL pattern E */
+ /* =1: 25FRC_SEL pattern F */
+ /* bit6:5=0: 50FRC_SEL pattern A */
+ /* =1: 50FRC_SEL pattern B */
+ /* =2: 50FRC_SEL pattern C */
+ /* =3: 50FRC_SEL pattern D */
+ /* bit7=0: 75FRC_SEL pattern E */
+ /* =1: 75FRC_SEL pattern F */
+} LVDS_ENCODER_CONTROL_PARAMETERS_V2;
+
+#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2
+
+#define TMDS1_ENCODER_CONTROL_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2
+#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2
+
+#define TMDS2_ENCODER_CONTROL_PARAMETERS_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2
+#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS2_ENCODER_CONTROL_PARAMETERS_V2
+
+#define LVDS_ENCODER_CONTROL_PARAMETERS_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V2
+#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V3
+
+#define TMDS1_ENCODER_CONTROL_PARAMETERS_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS1_ENCODER_CONTROL_PARAMETERS_V3
+
+#define TMDS2_ENCODER_CONTROL_PARAMETERS_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS2_ENCODER_CONTROL_PARAMETERS_V3
+
+/****************************************************************************/
+/* Structures used by ### */
+/****************************************************************************/
+typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS {
+ UCHAR ucEnable; /* Enable or Disable External TMDS encoder */
+ UCHAR ucMisc; /* Bit0=0:Enable Single link;=1:Enable Dual link;Bit1 {=0:666RGB, =1:888RGB} */
+ UCHAR ucPadding[2];
+} ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS;
+
+typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION {
+ ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS sXTmdsEncoder;
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Caller doesn't need to init this portion */
+} ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION;
+
+#define ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2
+
+typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2 {
+ ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 sXTmdsEncoder;
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Caller doesn't need to init this portion */
+} ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2;
+
+typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION {
+ DIG_ENCODER_CONTROL_PARAMETERS sDigEncoder;
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
+} EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION;
+
+/****************************************************************************/
+/* Structures used by DVOEncoderControlTable */
+/****************************************************************************/
+/* ucTableFormatRevision=1,ucTableContentRevision=3 */
+
+/* ucDVOConfig: */
+#define DVO_ENCODER_CONFIG_RATE_SEL 0x01
+#define DVO_ENCODER_CONFIG_DDR_SPEED 0x00
+#define DVO_ENCODER_CONFIG_SDR_SPEED 0x01
+#define DVO_ENCODER_CONFIG_OUTPUT_SEL 0x0c
+#define DVO_ENCODER_CONFIG_LOW12BIT 0x00
+#define DVO_ENCODER_CONFIG_UPPER12BIT 0x04
+#define DVO_ENCODER_CONFIG_24BIT 0x08
+
+typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3 {
+ USHORT usPixelClock;
+ UCHAR ucDVOConfig;
+ UCHAR ucAction; /* ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT */
+ UCHAR ucReseved[4];
+} DVO_ENCODER_CONTROL_PARAMETERS_V3;
+#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 DVO_ENCODER_CONTROL_PARAMETERS_V3
+
+/* ucTableFormatRevision=1 */
+/* ucTableContentRevision=3 structure is not changed but usMisc add bit 1 as another input for */
+/* bit1=0: non-coherent mode */
+/* =1: coherent mode */
+
+/* ========================================================================================== */
+/* Only change is here next time when changing encoder parameter definitions again! */
+#define LVDS_ENCODER_CONTROL_PARAMETERS_LAST LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_LAST LVDS_ENCODER_CONTROL_PARAMETERS_LAST
+
+#define TMDS1_ENCODER_CONTROL_PARAMETERS_LAST LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_LAST TMDS1_ENCODER_CONTROL_PARAMETERS_LAST
+
+#define TMDS2_ENCODER_CONTROL_PARAMETERS_LAST LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_LAST TMDS2_ENCODER_CONTROL_PARAMETERS_LAST
+
+#define DVO_ENCODER_CONTROL_PARAMETERS_LAST DVO_ENCODER_CONTROL_PARAMETERS
+#define DVO_ENCODER_CONTROL_PS_ALLOCATION_LAST DVO_ENCODER_CONTROL_PS_ALLOCATION
+
+/* ========================================================================================== */
+#define PANEL_ENCODER_MISC_DUAL 0x01
+#define PANEL_ENCODER_MISC_COHERENT 0x02
+#define PANEL_ENCODER_MISC_TMDS_LINKB 0x04
+#define PANEL_ENCODER_MISC_HDMI_TYPE 0x08
+
+#define PANEL_ENCODER_ACTION_DISABLE ATOM_DISABLE
+#define PANEL_ENCODER_ACTION_ENABLE ATOM_ENABLE
+#define PANEL_ENCODER_ACTION_COHERENTSEQ (ATOM_ENABLE+1)
+
+#define PANEL_ENCODER_TRUNCATE_EN 0x01
+#define PANEL_ENCODER_TRUNCATE_DEPTH 0x10
+#define PANEL_ENCODER_SPATIAL_DITHER_EN 0x01
+#define PANEL_ENCODER_SPATIAL_DITHER_DEPTH 0x10
+#define PANEL_ENCODER_TEMPORAL_DITHER_EN 0x01
+#define PANEL_ENCODER_TEMPORAL_DITHER_DEPTH 0x10
+#define PANEL_ENCODER_TEMPORAL_LEVEL_4 0x20
+#define PANEL_ENCODER_25FRC_MASK 0x10
+#define PANEL_ENCODER_25FRC_E 0x00
+#define PANEL_ENCODER_25FRC_F 0x10
+#define PANEL_ENCODER_50FRC_MASK 0x60
+#define PANEL_ENCODER_50FRC_A 0x00
+#define PANEL_ENCODER_50FRC_B 0x20
+#define PANEL_ENCODER_50FRC_C 0x40
+#define PANEL_ENCODER_50FRC_D 0x60
+#define PANEL_ENCODER_75FRC_MASK 0x80
+#define PANEL_ENCODER_75FRC_E 0x00
+#define PANEL_ENCODER_75FRC_F 0x80
+
+/****************************************************************************/
+/* Structures used by SetVoltageTable */
+/****************************************************************************/
+#define SET_VOLTAGE_TYPE_ASIC_VDDC 1
+#define SET_VOLTAGE_TYPE_ASIC_MVDDC 2
+#define SET_VOLTAGE_TYPE_ASIC_MVDDQ 3
+#define SET_VOLTAGE_TYPE_ASIC_VDDCI 4
+#define SET_VOLTAGE_INIT_MODE 5
+#define SET_VOLTAGE_GET_MAX_VOLTAGE 6 /* Gets the Max. voltage for the soldered Asic */
+
+#define SET_ASIC_VOLTAGE_MODE_ALL_SOURCE 0x1
+#define SET_ASIC_VOLTAGE_MODE_SOURCE_A 0x2
+#define SET_ASIC_VOLTAGE_MODE_SOURCE_B 0x4
+
+#define SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE 0x0
+#define SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL 0x1
+#define SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK 0x2
+
+typedef struct _SET_VOLTAGE_PARAMETERS {
+ UCHAR ucVoltageType; /* To tell which voltage to set up, VDDC/MVDDC/MVDDQ */
+ UCHAR ucVoltageMode; /* To set all, to set source A or source B or ... */
+ UCHAR ucVoltageIndex; /* An index to tell which voltage level */
+ UCHAR ucReserved;
+} SET_VOLTAGE_PARAMETERS;
+
+typedef struct _SET_VOLTAGE_PARAMETERS_V2 {
+ UCHAR ucVoltageType; /* To tell which voltage to set up, VDDC/MVDDC/MVDDQ */
+ UCHAR ucVoltageMode; /* Not used, maybe use for state machine for differen power mode */
+ USHORT usVoltageLevel; /* real voltage level */
+} SET_VOLTAGE_PARAMETERS_V2;
+
+typedef struct _SET_VOLTAGE_PS_ALLOCATION {
+ SET_VOLTAGE_PARAMETERS sASICSetVoltage;
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
+} SET_VOLTAGE_PS_ALLOCATION;
+
+/****************************************************************************/
+/* Structures used by TVEncoderControlTable */
+/****************************************************************************/
+typedef struct _TV_ENCODER_CONTROL_PARAMETERS {
+ USHORT usPixelClock; /* in 10KHz; for bios convenient */
+ UCHAR ucTvStandard; /* See definition "ATOM_TV_NTSC ..." */
+ UCHAR ucAction; /* 0: turn off encoder */
+ /* 1: setup and turn on encoder */
+} TV_ENCODER_CONTROL_PARAMETERS;
+
+typedef struct _TV_ENCODER_CONTROL_PS_ALLOCATION {
+ TV_ENCODER_CONTROL_PARAMETERS sTVEncoder;
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Don't set this one */
+} TV_ENCODER_CONTROL_PS_ALLOCATION;
+
+/* ==============================Data Table Portion==================================== */
+
+#ifdef UEFI_BUILD
+#define UTEMP USHORT
+#define USHORT void*
+#endif
+
+/****************************************************************************/
+/* Structure used in Data.mtb */
+/****************************************************************************/
+typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES {
+ USHORT UtilityPipeLine; /* Offest for the utility to get parser info,Don't change this position! */
+ USHORT MultimediaCapabilityInfo; /* Only used by MM Lib,latest version 1.1, not configuable from Bios, need to include the table to build Bios */
+ USHORT MultimediaConfigInfo; /* Only used by MM Lib,latest version 2.1, not configuable from Bios, need to include the table to build Bios */
+ USHORT StandardVESA_Timing; /* Only used by Bios */
+ USHORT FirmwareInfo; /* Shared by various SW components,latest version 1.4 */
+ USHORT DAC_Info; /* Will be obsolete from R600 */
+ USHORT LVDS_Info; /* Shared by various SW components,latest version 1.1 */
+ USHORT TMDS_Info; /* Will be obsolete from R600 */
+ USHORT AnalogTV_Info; /* Shared by various SW components,latest version 1.1 */
+ USHORT SupportedDevicesInfo; /* Will be obsolete from R600 */
+ USHORT GPIO_I2C_Info; /* Shared by various SW components,latest version 1.2 will be used from R600 */
+ USHORT VRAM_UsageByFirmware; /* Shared by various SW components,latest version 1.3 will be used from R600 */
+ USHORT GPIO_Pin_LUT; /* Shared by various SW components,latest version 1.1 */
+ USHORT VESA_ToInternalModeLUT; /* Only used by Bios */
+ USHORT ComponentVideoInfo; /* Shared by various SW components,latest version 2.1 will be used from R600 */
+ USHORT PowerPlayInfo; /* Shared by various SW components,latest version 2.1,new design from R600 */
+ USHORT CompassionateData; /* Will be obsolete from R600 */
+ USHORT SaveRestoreInfo; /* Only used by Bios */
+ USHORT PPLL_SS_Info; /* Shared by various SW components,latest version 1.2, used to call SS_Info, change to new name because of int ASIC SS info */
+ USHORT OemInfo; /* Defined and used by external SW, should be obsolete soon */
+ USHORT XTMDS_Info; /* Will be obsolete from R600 */
+ USHORT MclkSS_Info; /* Shared by various SW components,latest version 1.1, only enabled when ext SS chip is used */
+ USHORT Object_Header; /* Shared by various SW components,latest version 1.1 */
+ USHORT IndirectIOAccess; /* Only used by Bios,this table position can't change at all!! */
+ USHORT MC_InitParameter; /* Only used by command table */
+ USHORT ASIC_VDDC_Info; /* Will be obsolete from R600 */
+ USHORT ASIC_InternalSS_Info; /* New tabel name from R600, used to be called "ASIC_MVDDC_Info" */
+ USHORT TV_VideoMode; /* Only used by command table */
+ USHORT VRAM_Info; /* Only used by command table, latest version 1.3 */
+ USHORT MemoryTrainingInfo; /* Used for VBIOS and Diag utility for memory training purpose since R600. the new table rev start from 2.1 */
+ USHORT IntegratedSystemInfo; /* Shared by various SW components */
+ USHORT ASIC_ProfilingInfo; /* New table name from R600, used to be called "ASIC_VDDCI_Info" for pre-R600 */
+ USHORT VoltageObjectInfo; /* Shared by various SW components, latest version 1.1 */
+ USHORT PowerSourceInfo; /* Shared by various SW components, latest versoin 1.1 */
+} ATOM_MASTER_LIST_OF_DATA_TABLES;
+
+#ifdef UEFI_BUILD
+#define USHORT UTEMP
+#endif
+
+typedef struct _ATOM_MASTER_DATA_TABLE {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables;
+} ATOM_MASTER_DATA_TABLE;
+
+/****************************************************************************/
+/* Structure used in MultimediaCapabilityInfoTable */
+/****************************************************************************/
+typedef struct _ATOM_MULTIMEDIA_CAPABILITY_INFO {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulSignature; /* HW info table signature string "$ATI" */
+ UCHAR ucI2C_Type; /* I2C type (normal GP_IO, ImpactTV GP_IO, Dedicated I2C pin, etc) */
+ UCHAR ucTV_OutInfo; /* Type of TV out supported (3:0) and video out crystal frequency (6:4) and TV data port (7) */
+ UCHAR ucVideoPortInfo; /* Provides the video port capabilities */
+ UCHAR ucHostPortInfo; /* Provides host port configuration information */
+} ATOM_MULTIMEDIA_CAPABILITY_INFO;
+
+/****************************************************************************/
+/* Structure used in MultimediaConfigInfoTable */
+/****************************************************************************/
+typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulSignature; /* MM info table signature sting "$MMT" */
+ UCHAR ucTunerInfo; /* Type of tuner installed on the adapter (4:0) and video input for tuner (7:5) */
+ UCHAR ucAudioChipInfo; /* List the audio chip type (3:0) product type (4) and OEM revision (7:5) */
+ UCHAR ucProductID; /* Defines as OEM ID or ATI board ID dependent on product type setting */
+ UCHAR ucMiscInfo1; /* Tuner voltage (1:0) HW teletext support (3:2) FM audio decoder (5:4) reserved (6) audio scrambling (7) */
+ UCHAR ucMiscInfo2; /* I2S input config (0) I2S output config (1) I2S Audio Chip (4:2) SPDIF Output Config (5) reserved (7:6) */
+ UCHAR ucMiscInfo3; /* Video Decoder Type (3:0) Video In Standard/Crystal (7:4) */
+ UCHAR ucMiscInfo4; /* Video Decoder Host Config (2:0) reserved (7:3) */
+ UCHAR ucVideoInput0Info; /* Video Input 0 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
+ UCHAR ucVideoInput1Info; /* Video Input 1 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
+ UCHAR ucVideoInput2Info; /* Video Input 2 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
+ UCHAR ucVideoInput3Info; /* Video Input 3 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
+ UCHAR ucVideoInput4Info; /* Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
+} ATOM_MULTIMEDIA_CONFIG_INFO;
+
+/****************************************************************************/
+/* Structures used in FirmwareInfoTable */
+/****************************************************************************/
+
+/* usBIOSCapability Defintion: */
+/* Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted; */
+/* Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported; */
+/* Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported; */
+/* Others: Reserved */
+#define ATOM_BIOS_INFO_ATOM_FIRMWARE_POSTED 0x0001
+#define ATOM_BIOS_INFO_DUAL_CRTC_SUPPORT 0x0002
+#define ATOM_BIOS_INFO_EXTENDED_DESKTOP_SUPPORT 0x0004
+#define ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT 0x0008
+#define ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT 0x0010
+#define ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU 0x0020
+#define ATOM_BIOS_INFO_WMI_SUPPORT 0x0040
+#define ATOM_BIOS_INFO_PPMODE_ASSIGNGED_BY_SYSTEM 0x0080
+#define ATOM_BIOS_INFO_HYPERMEMORY_SUPPORT 0x0100
+#define ATOM_BIOS_INFO_HYPERMEMORY_SIZE_MASK 0x1E00
+#define ATOM_BIOS_INFO_VPOST_WITHOUT_FIRST_MODE_SET 0x2000
+#define ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE 0x4000
+
+#ifndef _H2INC
+
+/* Please don't add or expand this bitfield structure below, this one will retire soon.! */
+typedef struct _ATOM_FIRMWARE_CAPABILITY {
+#if ATOM_BIG_ENDIAN
+ USHORT Reserved:3;
+ USHORT HyperMemory_Size:4;
+ USHORT HyperMemory_Support:1;
+ USHORT PPMode_Assigned:1;
+ USHORT WMI_SUPPORT:1;
+ USHORT GPUControlsBL:1;
+ USHORT EngineClockSS_Support:1;
+ USHORT MemoryClockSS_Support:1;
+ USHORT ExtendedDesktopSupport:1;
+ USHORT DualCRTC_Support:1;
+ USHORT FirmwarePosted:1;
+#else
+ USHORT FirmwarePosted:1;
+ USHORT DualCRTC_Support:1;
+ USHORT ExtendedDesktopSupport:1;
+ USHORT MemoryClockSS_Support:1;
+ USHORT EngineClockSS_Support:1;
+ USHORT GPUControlsBL:1;
+ USHORT WMI_SUPPORT:1;
+ USHORT PPMode_Assigned:1;
+ USHORT HyperMemory_Support:1;
+ USHORT HyperMemory_Size:4;
+ USHORT Reserved:3;
+#endif
+} ATOM_FIRMWARE_CAPABILITY;
+
+typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS {
+ ATOM_FIRMWARE_CAPABILITY sbfAccess;
+ USHORT susAccess;
+} ATOM_FIRMWARE_CAPABILITY_ACCESS;
+
+#else
+
+typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS {
+ USHORT susAccess;
+} ATOM_FIRMWARE_CAPABILITY_ACCESS;
+
+#endif
+
+typedef struct _ATOM_FIRMWARE_INFO {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulFirmwareRevision;
+ ULONG ulDefaultEngineClock; /* In 10Khz unit */
+ ULONG ulDefaultMemoryClock; /* In 10Khz unit */
+ ULONG ulDriverTargetEngineClock; /* In 10Khz unit */
+ ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */
+ ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */
+ ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */
+ ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */
+ ULONG ulASICMaxEngineClock; /* In 10Khz unit */
+ ULONG ulASICMaxMemoryClock; /* In 10Khz unit */
+ UCHAR ucASICMaxTemperature;
+ UCHAR ucPadding[3]; /* Don't use them */
+ ULONG aulReservedForBIOS[3]; /* Don't use them */
+ USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */
+ USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */
+ USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */
+ USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */
+ USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */
+ USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */
+ USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */
+ USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */
+ USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */
+ USHORT usMinPixelClockPLL_Output; /* In 10Khz unit, the definitions above can't change!!! */
+ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+ USHORT usReferenceClock; /* In 10Khz unit */
+ USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */
+ UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */
+ UCHAR ucDesign_ID; /* Indicate what is the board design */
+ UCHAR ucMemoryModule_ID; /* Indicate what is the board design */
+} ATOM_FIRMWARE_INFO;
+
+typedef struct _ATOM_FIRMWARE_INFO_V1_2 {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulFirmwareRevision;
+ ULONG ulDefaultEngineClock; /* In 10Khz unit */
+ ULONG ulDefaultMemoryClock; /* In 10Khz unit */
+ ULONG ulDriverTargetEngineClock; /* In 10Khz unit */
+ ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */
+ ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */
+ ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */
+ ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */
+ ULONG ulASICMaxEngineClock; /* In 10Khz unit */
+ ULONG ulASICMaxMemoryClock; /* In 10Khz unit */
+ UCHAR ucASICMaxTemperature;
+ UCHAR ucMinAllowedBL_Level;
+ UCHAR ucPadding[2]; /* Don't use them */
+ ULONG aulReservedForBIOS[2]; /* Don't use them */
+ ULONG ulMinPixelClockPLL_Output; /* In 10Khz unit */
+ USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */
+ USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */
+ USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */
+ USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */
+ USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */
+ USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */
+ USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */
+ USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */
+ USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */
+ USHORT usMinPixelClockPLL_Output; /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */
+ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+ USHORT usReferenceClock; /* In 10Khz unit */
+ USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */
+ UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */
+ UCHAR ucDesign_ID; /* Indicate what is the board design */
+ UCHAR ucMemoryModule_ID; /* Indicate what is the board design */
+} ATOM_FIRMWARE_INFO_V1_2;
+
+typedef struct _ATOM_FIRMWARE_INFO_V1_3 {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulFirmwareRevision;
+ ULONG ulDefaultEngineClock; /* In 10Khz unit */
+ ULONG ulDefaultMemoryClock; /* In 10Khz unit */
+ ULONG ulDriverTargetEngineClock; /* In 10Khz unit */
+ ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */
+ ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */
+ ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */
+ ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */
+ ULONG ulASICMaxEngineClock; /* In 10Khz unit */
+ ULONG ulASICMaxMemoryClock; /* In 10Khz unit */
+ UCHAR ucASICMaxTemperature;
+ UCHAR ucMinAllowedBL_Level;
+ UCHAR ucPadding[2]; /* Don't use them */
+ ULONG aulReservedForBIOS; /* Don't use them */
+ ULONG ul3DAccelerationEngineClock; /* In 10Khz unit */
+ ULONG ulMinPixelClockPLL_Output; /* In 10Khz unit */
+ USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */
+ USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */
+ USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */
+ USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */
+ USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */
+ USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */
+ USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */
+ USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */
+ USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */
+ USHORT usMinPixelClockPLL_Output; /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */
+ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+ USHORT usReferenceClock; /* In 10Khz unit */
+ USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */
+ UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */
+ UCHAR ucDesign_ID; /* Indicate what is the board design */
+ UCHAR ucMemoryModule_ID; /* Indicate what is the board design */
+} ATOM_FIRMWARE_INFO_V1_3;
+
+typedef struct _ATOM_FIRMWARE_INFO_V1_4 {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulFirmwareRevision;
+ ULONG ulDefaultEngineClock; /* In 10Khz unit */
+ ULONG ulDefaultMemoryClock; /* In 10Khz unit */
+ ULONG ulDriverTargetEngineClock; /* In 10Khz unit */
+ ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */
+ ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */
+ ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */
+ ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */
+ ULONG ulASICMaxEngineClock; /* In 10Khz unit */
+ ULONG ulASICMaxMemoryClock; /* In 10Khz unit */
+ UCHAR ucASICMaxTemperature;
+ UCHAR ucMinAllowedBL_Level;
+ USHORT usBootUpVDDCVoltage; /* In MV unit */
+ USHORT usLcdMinPixelClockPLL_Output; /* In MHz unit */
+ USHORT usLcdMaxPixelClockPLL_Output; /* In MHz unit */
+ ULONG ul3DAccelerationEngineClock; /* In 10Khz unit */
+ ULONG ulMinPixelClockPLL_Output; /* In 10Khz unit */
+ USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */
+ USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */
+ USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */
+ USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */
+ USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */
+ USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */
+ USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */
+ USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */
+ USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */
+ USHORT usMinPixelClockPLL_Output; /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */
+ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+ USHORT usReferenceClock; /* In 10Khz unit */
+ USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */
+ UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */
+ UCHAR ucDesign_ID; /* Indicate what is the board design */
+ UCHAR ucMemoryModule_ID; /* Indicate what is the board design */
+} ATOM_FIRMWARE_INFO_V1_4;
+
+#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V1_4
+
+/****************************************************************************/
+/* Structures used in IntegratedSystemInfoTable */
+/****************************************************************************/
+#define IGP_CAP_FLAG_DYNAMIC_CLOCK_EN 0x2
+#define IGP_CAP_FLAG_AC_CARD 0x4
+#define IGP_CAP_FLAG_SDVO_CARD 0x8
+#define IGP_CAP_FLAG_POSTDIV_BY_2_MODE 0x10
+
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulBootUpEngineClock; /* in 10kHz unit */
+ ULONG ulBootUpMemoryClock; /* in 10kHz unit */
+ ULONG ulMaxSystemMemoryClock; /* in 10kHz unit */
+ ULONG ulMinSystemMemoryClock; /* in 10kHz unit */
+ UCHAR ucNumberOfCyclesInPeriodHi;
+ UCHAR ucLCDTimingSel; /* =0:not valid.!=0 sel this timing descriptor from LCD EDID. */
+ USHORT usReserved1;
+ USHORT usInterNBVoltageLow; /* An intermidiate PMW value to set the voltage */
+ USHORT usInterNBVoltageHigh; /* Another intermidiate PMW value to set the voltage */
+ ULONG ulReserved[2];
+
+ USHORT usFSBClock; /* In MHz unit */
+ USHORT usCapabilityFlag; /* Bit0=1 indicates the fake HDMI support,Bit1=0/1 for Dynamic clocking dis/enable */
+ /* Bit[3:2]== 0:No PCIE card, 1:AC card, 2:SDVO card */
+ /* Bit[4]==1: P/2 mode, ==0: P/1 mode */
+ USHORT usPCIENBCfgReg7; /* bit[7:0]=MUX_Sel, bit[9:8]=MUX_SEL_LEVEL2, bit[10]=Lane_Reversal */
+ USHORT usK8MemoryClock; /* in MHz unit */
+ USHORT usK8SyncStartDelay; /* in 0.01 us unit */
+ USHORT usK8DataReturnTime; /* in 0.01 us unit */
+ UCHAR ucMaxNBVoltage;
+ UCHAR ucMinNBVoltage;
+ UCHAR ucMemoryType; /* [7:4]=1:DDR1;=2:DDR2;=3:DDR3.[3:0] is reserved */
+ UCHAR ucNumberOfCyclesInPeriod; /* CG.FVTHROT_PWM_CTRL_REG0.NumberOfCyclesInPeriod */
+ UCHAR ucStartingPWM_HighTime; /* CG.FVTHROT_PWM_CTRL_REG0.StartingPWM_HighTime */
+ UCHAR ucHTLinkWidth; /* 16 bit vs. 8 bit */
+ UCHAR ucMaxNBVoltageHigh;
+ UCHAR ucMinNBVoltageHigh;
+} ATOM_INTEGRATED_SYSTEM_INFO;
+
+/* Explanation on entries in ATOM_INTEGRATED_SYSTEM_INFO
+ulBootUpMemoryClock: For Intel IGP,it's the UMA system memory clock
+ For AMD IGP,it's 0 if no SidePort memory installed or it's the boot-up SidePort memory clock
+ulMaxSystemMemoryClock: For Intel IGP,it's the Max freq from memory SPD if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0
+ For AMD IGP,for now this can be 0
+ulMinSystemMemoryClock: For Intel IGP,it's 133MHz if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0
+ For AMD IGP,for now this can be 0
+
+usFSBClock: For Intel IGP,it's FSB Freq
+ For AMD IGP,it's HT Link Speed
+
+usK8MemoryClock: For AMD IGP only. For RevF CPU, set it to 200
+usK8SyncStartDelay: For AMD IGP only. Memory access latency in K8, required for watermark calculation
+usK8DataReturnTime: For AMD IGP only. Memory access latency in K8, required for watermark calculation
+
+VC:Voltage Control
+ucMaxNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all.
+ucMinNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all.
+
+ucNumberOfCyclesInPeriod: Indicate how many cycles when PWM duty is 100%. low 8 bits of the value.
+ucNumberOfCyclesInPeriodHi: Indicate how many cycles when PWM duty is 100%. high 8 bits of the value.If the PWM has an inverter,set bit [7]==1,otherwise set it 0
+
+ucMaxNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all.
+ucMinNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all.
+
+usInterNBVoltageLow: Voltage regulator dependent PWM value. The value makes the the voltage >=Min NB voltage but <=InterNBVoltageHigh. Set this to 0x0000 if VC without PWM or no VC at all.
+usInterNBVoltageHigh: Voltage regulator dependent PWM value. The value makes the the voltage >=InterNBVoltageLow but <=Max NB voltage.Set this to 0x0000 if VC without PWM or no VC at all.
+*/
+
+/*
+The following IGP table is introduced from RS780, which is supposed to be put by SBIOS in FB before IGP VBIOS starts VPOST;
+Then VBIOS will copy the whole structure to its image so all GPU SW components can access this data structure to get whatever they need.
+The enough reservation should allow us to never change table revisions. Whenever needed, a GPU SW component can use reserved portion for new data entries.
+
+SW components can access the IGP system infor structure in the same way as before
+*/
+
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulBootUpEngineClock; /* in 10kHz unit */
+ ULONG ulReserved1[2]; /* must be 0x0 for the reserved */
+ ULONG ulBootUpUMAClock; /* in 10kHz unit */
+ ULONG ulBootUpSidePortClock; /* in 10kHz unit */
+ ULONG ulMinSidePortClock; /* in 10kHz unit */
+ ULONG ulReserved2[6]; /* must be 0x0 for the reserved */
+ ULONG ulSystemConfig; /* see explanation below */
+ ULONG ulBootUpReqDisplayVector;
+ ULONG ulOtherDisplayMisc;
+ ULONG ulDDISlot1Config;
+ ULONG ulDDISlot2Config;
+ UCHAR ucMemoryType; /* [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved */
+ UCHAR ucUMAChannelNumber;
+ UCHAR ucDockingPinBit;
+ UCHAR ucDockingPinPolarity;
+ ULONG ulDockingPinCFGInfo;
+ ULONG ulCPUCapInfo;
+ USHORT usNumberOfCyclesInPeriod;
+ USHORT usMaxNBVoltage;
+ USHORT usMinNBVoltage;
+ USHORT usBootUpNBVoltage;
+ ULONG ulHTLinkFreq; /* in 10Khz */
+ USHORT usMinHTLinkWidth;
+ USHORT usMaxHTLinkWidth;
+ USHORT usUMASyncStartDelay;
+ USHORT usUMADataReturnTime;
+ USHORT usLinkStatusZeroTime;
+ USHORT usReserved;
+ ULONG ulHighVoltageHTLinkFreq; /* in 10Khz */
+ ULONG ulLowVoltageHTLinkFreq; /* in 10Khz */
+ USHORT usMaxUpStreamHTLinkWidth;
+ USHORT usMaxDownStreamHTLinkWidth;
+ USHORT usMinUpStreamHTLinkWidth;
+ USHORT usMinDownStreamHTLinkWidth;
+ ULONG ulReserved3[97]; /* must be 0x0 */
+} ATOM_INTEGRATED_SYSTEM_INFO_V2;
+
+/*
+ulBootUpEngineClock: Boot-up Engine Clock in 10Khz;
+ulBootUpUMAClock: Boot-up UMA Clock in 10Khz; it must be 0x0 when UMA is not present
+ulBootUpSidePortClock: Boot-up SidePort Clock in 10Khz; it must be 0x0 when SidePort Memory is not present,this could be equal to or less than maximum supported Sideport memory clock
+
+ulSystemConfig:
+Bit[0]=1: PowerExpress mode =0 Non-PowerExpress mode;
+Bit[1]=1: system boots up at AMD overdrived state or user customized mode. In this case, driver will just stick to this boot-up mode. No other PowerPlay state
+ =0: system boots up at driver control state. Power state depends on PowerPlay table.
+Bit[2]=1: PWM method is used on NB voltage control. =0: GPIO method is used.
+Bit[3]=1: Only one power state(Performance) will be supported.
+ =0: Multiple power states supported from PowerPlay table.
+Bit[4]=1: CLMC is supported and enabled on current system.
+ =0: CLMC is not supported or enabled on current system. SBIOS need to support HT link/freq change through ATIF interface.
+Bit[5]=1: Enable CDLW for all driver control power states. Max HT width is from SBIOS, while Min HT width is determined by display requirement.
+ =0: CDLW is disabled. If CLMC is enabled case, Min HT width will be set equal to Max HT width. If CLMC disabled case, Max HT width will be applied.
+Bit[6]=1: High Voltage requested for all power states. In this case, voltage will be forced at 1.1v and powerplay table voltage drop/throttling request will be ignored.
+ =0: Voltage settings is determined by powerplay table.
+Bit[7]=1: Enable CLMC as hybrid Mode. CDLD and CILR will be disabled in this case and we're using legacy C1E. This is workaround for CPU(Griffin) performance issue.
+ =0: Enable CLMC as regular mode, CDLD and CILR will be enabled.
+
+ulBootUpReqDisplayVector: This dword is a bit vector indicates what display devices are requested during boot-up. Refer to ATOM_DEVICE_xxx_SUPPORT for the bit vector definitions.
+
+ulOtherDisplayMisc: [15:8]- Bootup LCD Expansion selection; 0-center, 1-full panel size expansion;
+ [7:0] - BootupTV standard selection; This is a bit vector to indicate what TV standards are supported by the system. Refer to ucTVSuppportedStd definition;
+
+ulDDISlot1Config: Describes the PCIE lane configuration on this DDI PCIE slot (ADD2 card) or connector (Mobile design).
+ [3:0] - Bit vector to indicate PCIE lane config of the DDI slot/connector on chassis (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12)
+ [7:4] - Bit vector to indicate PCIE lane config of the same DDI slot/connector on docking station (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12)
+ [15:8] - Lane configuration attribute;
+ [23:16]- Connector type, possible value:
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D
+ CONNECTOR_OBJECT_ID_HDMI_TYPE_A
+ CONNECTOR_OBJECT_ID_DISPLAYPORT
+ [31:24]- Reserved
+
+ulDDISlot2Config: Same as Slot1.
+ucMemoryType: SidePort memory type, set it to 0x0 when Sideport memory is not installed. Driver needs this info to change sideport memory clock. Not for display in CCC.
+For IGP, Hypermemory is the only memory type showed in CCC.
+
+ucUMAChannelNumber: how many channels for the UMA;
+
+ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pin; [31:16]-reg offset in CFG to read this pin
+ucDockingPinBit: which bit in this register to read the pin status;
+ucDockingPinPolarity:Polarity of the pin when docked;
+
+ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, other bits reserved for now and must be 0x0
+
+usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%.
+usMaxNBVoltage:Max. voltage control value in either PWM or GPIO mode.
+usMinNBVoltage:Min. voltage control value in either PWM or GPIO mode.
+ GPIO mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=0
+ PWM mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=1
+ GPU SW don't control mode: usMaxNBVoltage & usMinNBVoltage=0 and no care about ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE
+usBootUpNBVoltage:Boot-up voltage regulator dependent PWM value.
+
+ulHTLinkFreq: Bootup HT link Frequency in 10Khz.
+usMinHTLinkWidth: Bootup minimum HT link width. If CDLW disabled, this is equal to usMaxHTLinkWidth.
+ If CDLW enabled, both upstream and downstream width should be the same during bootup.
+usMaxHTLinkWidth: Bootup maximum HT link width. If CDLW disabled, this is equal to usMinHTLinkWidth.
+ If CDLW enabled, both upstream and downstream width should be the same during bootup.
+
+usUMASyncStartDelay: Memory access latency, required for watermark calculation
+usUMADataReturnTime: Memory access latency, required for watermark calculation
+usLinkStatusZeroTime:Memory access latency required for watermark calculation, set this to 0x0 for K8 CPU, set a proper value in 0.01 the unit of us
+for Griffin or Greyhound. SBIOS needs to convert to actual time by:
+ if T0Ttime [5:4]=00b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.1us (0.0 to 1.5us)
+ if T0Ttime [5:4]=01b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.5us (0.0 to 7.5us)
+ if T0Ttime [5:4]=10b, then usLinkStatusZeroTime=T0Ttime [3:0]*2.0us (0.0 to 30us)
+ if T0Ttime [5:4]=11b, and T0Ttime [3:0]=0x0 to 0xa, then usLinkStatusZeroTime=T0Ttime [3:0]*20us (0.0 to 200us)
+
+ulHighVoltageHTLinkFreq: HT link frequency for power state with low voltage. If boot up runs in HT1, this must be 0.
+ This must be less than or equal to ulHTLinkFreq(bootup frequency).
+ulLowVoltageHTLinkFreq: HT link frequency for power state with low voltage or voltage scaling 1.0v~1.1v. If boot up runs in HT1, this must be 0.
+ This must be less than or equal to ulHighVoltageHTLinkFreq.
+
+usMaxUpStreamHTLinkWidth: Asymmetric link width support in the future, to replace usMaxHTLinkWidth. Not used for now.
+usMaxDownStreamHTLinkWidth: same as above.
+usMinUpStreamHTLinkWidth: Asymmetric link width support in the future, to replace usMinHTLinkWidth. Not used for now.
+usMinDownStreamHTLinkWidth: same as above.
+*/
+
+#define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001
+#define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002
+#define SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE 0x00000004
+#define SYSTEM_CONFIG_PERFORMANCE_POWERSTATE_ONLY 0x00000008
+#define SYSTEM_CONFIG_CLMC_ENABLED 0x00000010
+#define SYSTEM_CONFIG_CDLW_ENABLED 0x00000020
+#define SYSTEM_CONFIG_HIGH_VOLTAGE_REQUESTED 0x00000040
+#define SYSTEM_CONFIG_CLMC_HYBRID_MODE_ENABLED 0x00000080
+
+#define IGP_DDI_SLOT_LANE_CONFIG_MASK 0x000000FF
+
+#define b0IGP_DDI_SLOT_LANE_MAP_MASK 0x0F
+#define b0IGP_DDI_SLOT_DOCKING_LANE_MAP_MASK 0xF0
+#define b0IGP_DDI_SLOT_CONFIG_LANE_0_3 0x01
+#define b0IGP_DDI_SLOT_CONFIG_LANE_4_7 0x02
+#define b0IGP_DDI_SLOT_CONFIG_LANE_8_11 0x04
+#define b0IGP_DDI_SLOT_CONFIG_LANE_12_15 0x08
+
+#define IGP_DDI_SLOT_ATTRIBUTE_MASK 0x0000FF00
+#define IGP_DDI_SLOT_CONFIG_REVERSED 0x00000100
+#define b1IGP_DDI_SLOT_CONFIG_REVERSED 0x01
+
+#define IGP_DDI_SLOT_CONNECTOR_TYPE_MASK 0x00FF0000
+
+#define ATOM_CRT_INT_ENCODER1_INDEX 0x00000000
+#define ATOM_LCD_INT_ENCODER1_INDEX 0x00000001
+#define ATOM_TV_INT_ENCODER1_INDEX 0x00000002
+#define ATOM_DFP_INT_ENCODER1_INDEX 0x00000003
+#define ATOM_CRT_INT_ENCODER2_INDEX 0x00000004
+#define ATOM_LCD_EXT_ENCODER1_INDEX 0x00000005
+#define ATOM_TV_EXT_ENCODER1_INDEX 0x00000006
+#define ATOM_DFP_EXT_ENCODER1_INDEX 0x00000007
+#define ATOM_CV_INT_ENCODER1_INDEX 0x00000008
+#define ATOM_DFP_INT_ENCODER2_INDEX 0x00000009
+#define ATOM_CRT_EXT_ENCODER1_INDEX 0x0000000A
+#define ATOM_CV_EXT_ENCODER1_INDEX 0x0000000B
+#define ATOM_DFP_INT_ENCODER3_INDEX 0x0000000C
+#define ATOM_DFP_INT_ENCODER4_INDEX 0x0000000D
+
+/* define ASIC internal encoder id ( bit vector ) */
+#define ASIC_INT_DAC1_ENCODER_ID 0x00
+#define ASIC_INT_TV_ENCODER_ID 0x02
+#define ASIC_INT_DIG1_ENCODER_ID 0x03
+#define ASIC_INT_DAC2_ENCODER_ID 0x04
+#define ASIC_EXT_TV_ENCODER_ID 0x06
+#define ASIC_INT_DVO_ENCODER_ID 0x07
+#define ASIC_INT_DIG2_ENCODER_ID 0x09
+#define ASIC_EXT_DIG_ENCODER_ID 0x05
+
+/* define Encoder attribute */
+#define ATOM_ANALOG_ENCODER 0
+#define ATOM_DIGITAL_ENCODER 1
+
+#define ATOM_DEVICE_CRT1_INDEX 0x00000000
+#define ATOM_DEVICE_LCD1_INDEX 0x00000001
+#define ATOM_DEVICE_TV1_INDEX 0x00000002
+#define ATOM_DEVICE_DFP1_INDEX 0x00000003
+#define ATOM_DEVICE_CRT2_INDEX 0x00000004
+#define ATOM_DEVICE_LCD2_INDEX 0x00000005
+#define ATOM_DEVICE_TV2_INDEX 0x00000006
+#define ATOM_DEVICE_DFP2_INDEX 0x00000007
+#define ATOM_DEVICE_CV_INDEX 0x00000008
+#define ATOM_DEVICE_DFP3_INDEX 0x00000009
+#define ATOM_DEVICE_DFP4_INDEX 0x0000000A
+#define ATOM_DEVICE_DFP5_INDEX 0x0000000B
+#define ATOM_DEVICE_RESERVEDC_INDEX 0x0000000C
+#define ATOM_DEVICE_RESERVEDD_INDEX 0x0000000D
+#define ATOM_DEVICE_RESERVEDE_INDEX 0x0000000E
+#define ATOM_DEVICE_RESERVEDF_INDEX 0x0000000F
+#define ATOM_MAX_SUPPORTED_DEVICE_INFO (ATOM_DEVICE_DFP3_INDEX+1)
+#define ATOM_MAX_SUPPORTED_DEVICE_INFO_2 ATOM_MAX_SUPPORTED_DEVICE_INFO
+#define ATOM_MAX_SUPPORTED_DEVICE_INFO_3 (ATOM_DEVICE_DFP5_INDEX + 1)
+
+#define ATOM_MAX_SUPPORTED_DEVICE (ATOM_DEVICE_RESERVEDF_INDEX+1)
+
+#define ATOM_DEVICE_CRT1_SUPPORT (0x1L << ATOM_DEVICE_CRT1_INDEX)
+#define ATOM_DEVICE_LCD1_SUPPORT (0x1L << ATOM_DEVICE_LCD1_INDEX)
+#define ATOM_DEVICE_TV1_SUPPORT (0x1L << ATOM_DEVICE_TV1_INDEX)
+#define ATOM_DEVICE_DFP1_SUPPORT (0x1L << ATOM_DEVICE_DFP1_INDEX)
+#define ATOM_DEVICE_CRT2_SUPPORT (0x1L << ATOM_DEVICE_CRT2_INDEX)
+#define ATOM_DEVICE_LCD2_SUPPORT (0x1L << ATOM_DEVICE_LCD2_INDEX)
+#define ATOM_DEVICE_TV2_SUPPORT (0x1L << ATOM_DEVICE_TV2_INDEX)
+#define ATOM_DEVICE_DFP2_SUPPORT (0x1L << ATOM_DEVICE_DFP2_INDEX)
+#define ATOM_DEVICE_CV_SUPPORT (0x1L << ATOM_DEVICE_CV_INDEX)
+#define ATOM_DEVICE_DFP3_SUPPORT (0x1L << ATOM_DEVICE_DFP3_INDEX)
+#define ATOM_DEVICE_DFP4_SUPPORT (0x1L << ATOM_DEVICE_DFP4_INDEX )
+#define ATOM_DEVICE_DFP5_SUPPORT (0x1L << ATOM_DEVICE_DFP5_INDEX)
+
+#define ATOM_DEVICE_CRT_SUPPORT \
+ (ATOM_DEVICE_CRT1_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT)
+#define ATOM_DEVICE_DFP_SUPPORT \
+ (ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_DFP2_SUPPORT | \
+ ATOM_DEVICE_DFP3_SUPPORT | ATOM_DEVICE_DFP4_SUPPORT | \
+ ATOM_DEVICE_DFP5_SUPPORT)
+#define ATOM_DEVICE_TV_SUPPORT \
+ (ATOM_DEVICE_TV1_SUPPORT | ATOM_DEVICE_TV2_SUPPORT)
+#define ATOM_DEVICE_LCD_SUPPORT \
+ (ATOM_DEVICE_LCD1_SUPPORT | ATOM_DEVICE_LCD2_SUPPORT)
+
+#define ATOM_DEVICE_CONNECTOR_TYPE_MASK 0x000000F0
+#define ATOM_DEVICE_CONNECTOR_TYPE_SHIFT 0x00000004
+#define ATOM_DEVICE_CONNECTOR_VGA 0x00000001
+#define ATOM_DEVICE_CONNECTOR_DVI_I 0x00000002
+#define ATOM_DEVICE_CONNECTOR_DVI_D 0x00000003
+#define ATOM_DEVICE_CONNECTOR_DVI_A 0x00000004
+#define ATOM_DEVICE_CONNECTOR_SVIDEO 0x00000005
+#define ATOM_DEVICE_CONNECTOR_COMPOSITE 0x00000006
+#define ATOM_DEVICE_CONNECTOR_LVDS 0x00000007
+#define ATOM_DEVICE_CONNECTOR_DIGI_LINK 0x00000008
+#define ATOM_DEVICE_CONNECTOR_SCART 0x00000009
+#define ATOM_DEVICE_CONNECTOR_HDMI_TYPE_A 0x0000000A
+#define ATOM_DEVICE_CONNECTOR_HDMI_TYPE_B 0x0000000B
+#define ATOM_DEVICE_CONNECTOR_CASE_1 0x0000000E
+#define ATOM_DEVICE_CONNECTOR_DISPLAYPORT 0x0000000F
+
+#define ATOM_DEVICE_DAC_INFO_MASK 0x0000000F
+#define ATOM_DEVICE_DAC_INFO_SHIFT 0x00000000
+#define ATOM_DEVICE_DAC_INFO_NODAC 0x00000000
+#define ATOM_DEVICE_DAC_INFO_DACA 0x00000001
+#define ATOM_DEVICE_DAC_INFO_DACB 0x00000002
+#define ATOM_DEVICE_DAC_INFO_EXDAC 0x00000003
+
+#define ATOM_DEVICE_I2C_ID_NOI2C 0x00000000
+
+#define ATOM_DEVICE_I2C_LINEMUX_MASK 0x0000000F
+#define ATOM_DEVICE_I2C_LINEMUX_SHIFT 0x00000000
+
+#define ATOM_DEVICE_I2C_ID_MASK 0x00000070
+#define ATOM_DEVICE_I2C_ID_SHIFT 0x00000004
+#define ATOM_DEVICE_I2C_ID_IS_FOR_NON_MM_USE 0x00000001
+#define ATOM_DEVICE_I2C_ID_IS_FOR_MM_USE 0x00000002
+#define ATOM_DEVICE_I2C_ID_IS_FOR_SDVO_USE 0x00000003 /* For IGP RS600 */
+#define ATOM_DEVICE_I2C_ID_IS_FOR_DAC_SCL 0x00000004 /* For IGP RS690 */
+
+#define ATOM_DEVICE_I2C_HARDWARE_CAP_MASK 0x00000080
+#define ATOM_DEVICE_I2C_HARDWARE_CAP_SHIFT 0x00000007
+#define ATOM_DEVICE_USES_SOFTWARE_ASSISTED_I2C 0x00000000
+#define ATOM_DEVICE_USES_HARDWARE_ASSISTED_I2C 0x00000001
+
+/* usDeviceSupport: */
+/* Bits0 = 0 - no CRT1 support= 1- CRT1 is supported */
+/* Bit 1 = 0 - no LCD1 support= 1- LCD1 is supported */
+/* Bit 2 = 0 - no TV1 support= 1- TV1 is supported */
+/* Bit 3 = 0 - no DFP1 support= 1- DFP1 is supported */
+/* Bit 4 = 0 - no CRT2 support= 1- CRT2 is supported */
+/* Bit 5 = 0 - no LCD2 support= 1- LCD2 is supported */
+/* Bit 6 = 0 - no TV2 support= 1- TV2 is supported */
+/* Bit 7 = 0 - no DFP2 support= 1- DFP2 is supported */
+/* Bit 8 = 0 - no CV support= 1- CV is supported */
+/* Bit 9 = 0 - no DFP3 support= 1- DFP3 is supported */
+/* Byte1 (Supported Device Info) */
+/* Bit 0 = = 0 - no CV support= 1- CV is supported */
+/* */
+/* */
+
+/* ucI2C_ConfigID */
+/* [7:0] - I2C LINE Associate ID */
+/* = 0 - no I2C */
+/* [7] - HW_Cap = 1, [6:0]=HW assisted I2C ID(HW line selection) */
+/* = 0, [6:0]=SW assisted I2C ID */
+/* [6-4] - HW_ENGINE_ID = 1, HW engine for NON multimedia use */
+/* = 2, HW engine for Multimedia use */
+/* = 3-7 Reserved for future I2C engines */
+/* [3-0] - I2C_LINE_MUX = A Mux number when it's HW assisted I2C or GPIO ID when it's SW I2C */
+
+typedef struct _ATOM_I2C_ID_CONFIG {
+#if ATOM_BIG_ENDIAN
+ UCHAR bfHW_Capable:1;
+ UCHAR bfHW_EngineID:3;
+ UCHAR bfI2C_LineMux:4;
+#else
+ UCHAR bfI2C_LineMux:4;
+ UCHAR bfHW_EngineID:3;
+ UCHAR bfHW_Capable:1;
+#endif
+} ATOM_I2C_ID_CONFIG;
+
+typedef union _ATOM_I2C_ID_CONFIG_ACCESS {
+ ATOM_I2C_ID_CONFIG sbfAccess;
+ UCHAR ucAccess;
+} ATOM_I2C_ID_CONFIG_ACCESS;
+
+/****************************************************************************/
+/* Structure used in GPIO_I2C_InfoTable */
+/****************************************************************************/
+typedef struct _ATOM_GPIO_I2C_ASSIGMENT {
+ USHORT usClkMaskRegisterIndex;
+ USHORT usClkEnRegisterIndex;
+ USHORT usClkY_RegisterIndex;
+ USHORT usClkA_RegisterIndex;
+ USHORT usDataMaskRegisterIndex;
+ USHORT usDataEnRegisterIndex;
+ USHORT usDataY_RegisterIndex;
+ USHORT usDataA_RegisterIndex;
+ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
+ UCHAR ucClkMaskShift;
+ UCHAR ucClkEnShift;
+ UCHAR ucClkY_Shift;
+ UCHAR ucClkA_Shift;
+ UCHAR ucDataMaskShift;
+ UCHAR ucDataEnShift;
+ UCHAR ucDataY_Shift;
+ UCHAR ucDataA_Shift;
+ UCHAR ucReserved1;
+ UCHAR ucReserved2;
+} ATOM_GPIO_I2C_ASSIGMENT;
+
+typedef struct _ATOM_GPIO_I2C_INFO {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_GPIO_I2C_ASSIGMENT asGPIO_Info[ATOM_MAX_SUPPORTED_DEVICE];
+} ATOM_GPIO_I2C_INFO;
+
+/****************************************************************************/
+/* Common Structure used in other structures */
+/****************************************************************************/
+
+#ifndef _H2INC
+
+/* Please don't add or expand this bitfield structure below, this one will retire soon.! */
+typedef struct _ATOM_MODE_MISC_INFO {
+#if ATOM_BIG_ENDIAN
+ USHORT Reserved:6;
+ USHORT RGB888:1;
+ USHORT DoubleClock:1;
+ USHORT Interlace:1;
+ USHORT CompositeSync:1;
+ USHORT V_ReplicationBy2:1;
+ USHORT H_ReplicationBy2:1;
+ USHORT VerticalCutOff:1;
+ USHORT VSyncPolarity:1; /* 0=Active High, 1=Active Low */
+ USHORT HSyncPolarity:1; /* 0=Active High, 1=Active Low */
+ USHORT HorizontalCutOff:1;
+#else
+ USHORT HorizontalCutOff:1;
+ USHORT HSyncPolarity:1; /* 0=Active High, 1=Active Low */
+ USHORT VSyncPolarity:1; /* 0=Active High, 1=Active Low */
+ USHORT VerticalCutOff:1;
+ USHORT H_ReplicationBy2:1;
+ USHORT V_ReplicationBy2:1;
+ USHORT CompositeSync:1;
+ USHORT Interlace:1;
+ USHORT DoubleClock:1;
+ USHORT RGB888:1;
+ USHORT Reserved:6;
+#endif
+} ATOM_MODE_MISC_INFO;
+
+typedef union _ATOM_MODE_MISC_INFO_ACCESS {
+ ATOM_MODE_MISC_INFO sbfAccess;
+ USHORT usAccess;
+} ATOM_MODE_MISC_INFO_ACCESS;
+
+#else
+
+typedef union _ATOM_MODE_MISC_INFO_ACCESS {
+ USHORT usAccess;
+} ATOM_MODE_MISC_INFO_ACCESS;
+
+#endif
+
+/* usModeMiscInfo- */
+#define ATOM_H_CUTOFF 0x01
+#define ATOM_HSYNC_POLARITY 0x02 /* 0=Active High, 1=Active Low */
+#define ATOM_VSYNC_POLARITY 0x04 /* 0=Active High, 1=Active Low */
+#define ATOM_V_CUTOFF 0x08
+#define ATOM_H_REPLICATIONBY2 0x10
+#define ATOM_V_REPLICATIONBY2 0x20
+#define ATOM_COMPOSITESYNC 0x40
+#define ATOM_INTERLACE 0x80
+#define ATOM_DOUBLE_CLOCK_MODE 0x100
+#define ATOM_RGB888_MODE 0x200
+
+/* usRefreshRate- */
+#define ATOM_REFRESH_43 43
+#define ATOM_REFRESH_47 47
+#define ATOM_REFRESH_56 56
+#define ATOM_REFRESH_60 60
+#define ATOM_REFRESH_65 65
+#define ATOM_REFRESH_70 70
+#define ATOM_REFRESH_72 72
+#define ATOM_REFRESH_75 75
+#define ATOM_REFRESH_85 85
+
+/* ATOM_MODE_TIMING data are exactly the same as VESA timing data. */
+/* Translation from EDID to ATOM_MODE_TIMING, use the following formula. */
+/* */
+/* VESA_HTOTAL = VESA_ACTIVE + 2* VESA_BORDER + VESA_BLANK */
+/* = EDID_HA + EDID_HBL */
+/* VESA_HDISP = VESA_ACTIVE = EDID_HA */
+/* VESA_HSYNC_START = VESA_ACTIVE + VESA_BORDER + VESA_FRONT_PORCH */
+/* = EDID_HA + EDID_HSO */
+/* VESA_HSYNC_WIDTH = VESA_HSYNC_TIME = EDID_HSPW */
+/* VESA_BORDER = EDID_BORDER */
+
+/****************************************************************************/
+/* Structure used in SetCRTC_UsingDTDTimingTable */
+/****************************************************************************/
+typedef struct _SET_CRTC_USING_DTD_TIMING_PARAMETERS {
+ USHORT usH_Size;
+ USHORT usH_Blanking_Time;
+ USHORT usV_Size;
+ USHORT usV_Blanking_Time;
+ USHORT usH_SyncOffset;
+ USHORT usH_SyncWidth;
+ USHORT usV_SyncOffset;
+ USHORT usV_SyncWidth;
+ ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
+ UCHAR ucH_Border; /* From DFP EDID */
+ UCHAR ucV_Border;
+ UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
+ UCHAR ucPadding[3];
+} SET_CRTC_USING_DTD_TIMING_PARAMETERS;
+
+/****************************************************************************/
+/* Structure used in SetCRTC_TimingTable */
+/****************************************************************************/
+typedef struct _SET_CRTC_TIMING_PARAMETERS {
+ USHORT usH_Total; /* horizontal total */
+ USHORT usH_Disp; /* horizontal display */
+ USHORT usH_SyncStart; /* horozontal Sync start */
+ USHORT usH_SyncWidth; /* horizontal Sync width */
+ USHORT usV_Total; /* vertical total */
+ USHORT usV_Disp; /* vertical display */
+ USHORT usV_SyncStart; /* vertical Sync start */
+ USHORT usV_SyncWidth; /* vertical Sync width */
+ ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
+ UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
+ UCHAR ucOverscanRight; /* right */
+ UCHAR ucOverscanLeft; /* left */
+ UCHAR ucOverscanBottom; /* bottom */
+ UCHAR ucOverscanTop; /* top */
+ UCHAR ucReserved;
+} SET_CRTC_TIMING_PARAMETERS;
+#define SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION SET_CRTC_TIMING_PARAMETERS
+
+/****************************************************************************/
+/* Structure used in StandardVESA_TimingTable */
+/* AnalogTV_InfoTable */
+/* ComponentVideoInfoTable */
+/****************************************************************************/
+typedef struct _ATOM_MODE_TIMING {
+ USHORT usCRTC_H_Total;
+ USHORT usCRTC_H_Disp;
+ USHORT usCRTC_H_SyncStart;
+ USHORT usCRTC_H_SyncWidth;
+ USHORT usCRTC_V_Total;
+ USHORT usCRTC_V_Disp;
+ USHORT usCRTC_V_SyncStart;
+ USHORT usCRTC_V_SyncWidth;
+ USHORT usPixelClock; /* in 10Khz unit */
+ ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
+ USHORT usCRTC_OverscanRight;
+ USHORT usCRTC_OverscanLeft;
+ USHORT usCRTC_OverscanBottom;
+ USHORT usCRTC_OverscanTop;
+ USHORT usReserve;
+ UCHAR ucInternalModeNumber;
+ UCHAR ucRefreshRate;
+} ATOM_MODE_TIMING;
+
+typedef struct _ATOM_DTD_FORMAT {
+ USHORT usPixClk;
+ USHORT usHActive;
+ USHORT usHBlanking_Time;
+ USHORT usVActive;
+ USHORT usVBlanking_Time;
+ USHORT usHSyncOffset;
+ USHORT usHSyncWidth;
+ USHORT usVSyncOffset;
+ USHORT usVSyncWidth;
+ USHORT usImageHSize;
+ USHORT usImageVSize;
+ UCHAR ucHBorder;
+ UCHAR ucVBorder;
+ ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
+ UCHAR ucInternalModeNumber;
+ UCHAR ucRefreshRate;
+} ATOM_DTD_FORMAT;
+
+/****************************************************************************/
+/* Structure used in LVDS_InfoTable */
+/* * Need a document to describe this table */
+/****************************************************************************/
+#define SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004
+#define SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008
+#define SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010
+#define SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020
+
+/* Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12. */
+/* Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL */
+#define LCDPANEL_CAP_READ_EDID 0x1
+
+/* ucTableFormatRevision=1 */
+/* ucTableContentRevision=1 */
+typedef struct _ATOM_LVDS_INFO {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_DTD_FORMAT sLCDTiming;
+ USHORT usModePatchTableOffset;
+ USHORT usSupportedRefreshRate; /* Refer to panel info table in ATOMBIOS extension Spec. */
+ USHORT usOffDelayInMs;
+ UCHAR ucPowerSequenceDigOntoDEin10Ms;
+ UCHAR ucPowerSequenceDEtoBLOnin10Ms;
+ UCHAR ucLVDS_Misc; /* Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level} */
+ /* Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} */
+ /* Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled} */
+ /* Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled} */
+ UCHAR ucPanelDefaultRefreshRate;
+ UCHAR ucPanelIdentification;
+ UCHAR ucSS_Id;
+} ATOM_LVDS_INFO;
+
+/* ucTableFormatRevision=1 */
+/* ucTableContentRevision=2 */
+typedef struct _ATOM_LVDS_INFO_V12 {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_DTD_FORMAT sLCDTiming;
+ USHORT usExtInfoTableOffset;
+ USHORT usSupportedRefreshRate; /* Refer to panel info table in ATOMBIOS extension Spec. */
+ USHORT usOffDelayInMs;
+ UCHAR ucPowerSequenceDigOntoDEin10Ms;
+ UCHAR ucPowerSequenceDEtoBLOnin10Ms;
+ UCHAR ucLVDS_Misc; /* Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level} */
+ /* Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} */
+ /* Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled} */
+ /* Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled} */
+ UCHAR ucPanelDefaultRefreshRate;
+ UCHAR ucPanelIdentification;
+ UCHAR ucSS_Id;
+ USHORT usLCDVenderID;
+ USHORT usLCDProductID;
+ UCHAR ucLCDPanel_SpecialHandlingCap;
+ UCHAR ucPanelInfoSize; /* start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable */
+ UCHAR ucReserved[2];
+} ATOM_LVDS_INFO_V12;
+
+#define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12
+
+typedef struct _ATOM_PATCH_RECORD_MODE {
+ UCHAR ucRecordType;
+ USHORT usHDisp;
+ USHORT usVDisp;
+} ATOM_PATCH_RECORD_MODE;
+
+typedef struct _ATOM_LCD_RTS_RECORD {
+ UCHAR ucRecordType;
+ UCHAR ucRTSValue;
+} ATOM_LCD_RTS_RECORD;
+
+/* !! If the record below exits, it shoud always be the first record for easy use in command table!!! */
+typedef struct _ATOM_LCD_MODE_CONTROL_CAP {
+ UCHAR ucRecordType;
+ USHORT usLCDCap;
+} ATOM_LCD_MODE_CONTROL_CAP;
+
+#define LCD_MODE_CAP_BL_OFF 1
+#define LCD_MODE_CAP_CRTC_OFF 2
+#define LCD_MODE_CAP_PANEL_OFF 4
+
+typedef struct _ATOM_FAKE_EDID_PATCH_RECORD {
+ UCHAR ucRecordType;
+ UCHAR ucFakeEDIDLength;
+ UCHAR ucFakeEDIDString[1]; /* This actually has ucFakeEdidLength elements. */
+} ATOM_FAKE_EDID_PATCH_RECORD;
+
+typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD {
+ UCHAR ucRecordType;
+ USHORT usHSize;
+ USHORT usVSize;
+} ATOM_PANEL_RESOLUTION_PATCH_RECORD;
+
+#define LCD_MODE_PATCH_RECORD_MODE_TYPE 1
+#define LCD_RTS_RECORD_TYPE 2
+#define LCD_CAP_RECORD_TYPE 3
+#define LCD_FAKE_EDID_PATCH_RECORD_TYPE 4
+#define LCD_PANEL_RESOLUTION_RECORD_TYPE 5
+#define ATOM_RECORD_END_TYPE 0xFF
+
+/****************************Spread Spectrum Info Table Definitions **********************/
+
+/* ucTableFormatRevision=1 */
+/* ucTableContentRevision=2 */
+typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT {
+ USHORT usSpreadSpectrumPercentage;
+ UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */
+ UCHAR ucSS_Step;
+ UCHAR ucSS_Delay;
+ UCHAR ucSS_Id;
+ UCHAR ucRecommandedRef_Div;
+ UCHAR ucSS_Range; /* it was reserved for V11 */
+} ATOM_SPREAD_SPECTRUM_ASSIGNMENT;
+
+#define ATOM_MAX_SS_ENTRY 16
+#define ATOM_DP_SS_ID1 0x0f1 /* SS modulation freq=30k */
+#define ATOM_DP_SS_ID2 0x0f2 /* SS modulation freq=33k */
+
+#define ATOM_SS_DOWN_SPREAD_MODE_MASK 0x00000000
+#define ATOM_SS_DOWN_SPREAD_MODE 0x00000000
+#define ATOM_SS_CENTRE_SPREAD_MODE_MASK 0x00000001
+#define ATOM_SS_CENTRE_SPREAD_MODE 0x00000001
+#define ATOM_INTERNAL_SS_MASK 0x00000000
+#define ATOM_EXTERNAL_SS_MASK 0x00000002
+#define EXEC_SS_STEP_SIZE_SHIFT 2
+#define EXEC_SS_DELAY_SHIFT 4
+#define ACTIVEDATA_TO_BLON_DELAY_SHIFT 4
+
+typedef struct _ATOM_SPREAD_SPECTRUM_INFO {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_SPREAD_SPECTRUM_ASSIGNMENT asSS_Info[ATOM_MAX_SS_ENTRY];
+} ATOM_SPREAD_SPECTRUM_INFO;
+
+/****************************************************************************/
+/* Structure used in AnalogTV_InfoTable (Top level) */
+/****************************************************************************/
+/* ucTVBootUpDefaultStd definiton: */
+
+/* ATOM_TV_NTSC 1 */
+/* ATOM_TV_NTSCJ 2 */
+/* ATOM_TV_PAL 3 */
+/* ATOM_TV_PALM 4 */
+/* ATOM_TV_PALCN 5 */
+/* ATOM_TV_PALN 6 */
+/* ATOM_TV_PAL60 7 */
+/* ATOM_TV_SECAM 8 */
+
+/* ucTVSuppportedStd definition: */
+#define NTSC_SUPPORT 0x1
+#define NTSCJ_SUPPORT 0x2
+
+#define PAL_SUPPORT 0x4
+#define PALM_SUPPORT 0x8
+#define PALCN_SUPPORT 0x10
+#define PALN_SUPPORT 0x20
+#define PAL60_SUPPORT 0x40
+#define SECAM_SUPPORT 0x80
+
+#define MAX_SUPPORTED_TV_TIMING 2
+
+typedef struct _ATOM_ANALOG_TV_INFO {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucTV_SupportedStandard;
+ UCHAR ucTV_BootUpDefaultStandard;
+ UCHAR ucExt_TV_ASIC_ID;
+ UCHAR ucExt_TV_ASIC_SlaveAddr;
+ /*ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING]; */
+ ATOM_MODE_TIMING aModeTimings[MAX_SUPPORTED_TV_TIMING];
+} ATOM_ANALOG_TV_INFO;
+
+/**************************************************************************/
+/* VRAM usage and their defintions */
+
+/* One chunk of VRAM used by Bios are for HWICON surfaces,EDID data. */
+/* Current Mode timing and Dail Timing and/or STD timing data EACH device. They can be broken down as below. */
+/* All the addresses below are the offsets from the frame buffer start.They all MUST be Dword aligned! */
+/* To driver: The physical address of this memory portion=mmFB_START(4K aligned)+ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR */
+/* To Bios: ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR->MM_INDEX */
+
+#ifndef VESA_MEMORY_IN_64K_BLOCK
+#define VESA_MEMORY_IN_64K_BLOCK 0x100 /* 256*64K=16Mb (Max. VESA memory is 16Mb!) */
+#endif
+
+#define ATOM_EDID_RAW_DATASIZE 256 /* In Bytes */
+#define ATOM_HWICON_SURFACE_SIZE 4096 /* In Bytes */
+#define ATOM_HWICON_INFOTABLE_SIZE 32
+#define MAX_DTD_MODE_IN_VRAM 6
+#define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) /* 28= (SIZEOF ATOM_DTD_FORMAT) */
+#define ATOM_STD_MODE_SUPPORT_TBL_SIZE (32*8) /* 32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT) */
+#define DFP_ENCODER_TYPE_OFFSET 0x80
+#define DP_ENCODER_LANE_NUM_OFFSET 0x84
+#define DP_ENCODER_LINK_RATE_OFFSET 0x88
+
+#define ATOM_HWICON1_SURFACE_ADDR 0
+#define ATOM_HWICON2_SURFACE_ADDR (ATOM_HWICON1_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE)
+#define ATOM_HWICON_INFOTABLE_ADDR (ATOM_HWICON2_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE)
+#define ATOM_CRT1_EDID_ADDR (ATOM_HWICON_INFOTABLE_ADDR + ATOM_HWICON_INFOTABLE_SIZE)
+#define ATOM_CRT1_DTD_MODE_TBL_ADDR (ATOM_CRT1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_CRT1_STD_MODE_TBL_ADDR (ATOM_CRT1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_LCD1_EDID_ADDR (ATOM_CRT1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_LCD1_DTD_MODE_TBL_ADDR (ATOM_LCD1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_LCD1_STD_MODE_TBL_ADDR (ATOM_LCD1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_TV1_DTD_MODE_TBL_ADDR (ATOM_LCD1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP1_EDID_ADDR (ATOM_TV1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP1_DTD_MODE_TBL_ADDR (ATOM_DFP1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP1_STD_MODE_TBL_ADDR (ATOM_DFP1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_CRT2_EDID_ADDR (ATOM_DFP1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_CRT2_DTD_MODE_TBL_ADDR (ATOM_CRT2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_CRT2_STD_MODE_TBL_ADDR (ATOM_CRT2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_LCD2_EDID_ADDR (ATOM_CRT2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_LCD2_DTD_MODE_TBL_ADDR (ATOM_LCD2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_LCD2_STD_MODE_TBL_ADDR (ATOM_LCD2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_TV2_EDID_ADDR (ATOM_LCD2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_TV2_DTD_MODE_TBL_ADDR (ATOM_TV2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_TV2_STD_MODE_TBL_ADDR (ATOM_TV2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP2_EDID_ADDR (ATOM_TV2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP2_DTD_MODE_TBL_ADDR (ATOM_DFP2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP2_STD_MODE_TBL_ADDR (ATOM_DFP2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_CV_EDID_ADDR (ATOM_DFP2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_CV_DTD_MODE_TBL_ADDR (ATOM_CV_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_CV_STD_MODE_TBL_ADDR (ATOM_CV_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP3_EDID_ADDR (ATOM_CV_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP3_DTD_MODE_TBL_ADDR (ATOM_DFP3_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP3_STD_MODE_TBL_ADDR (ATOM_DFP3_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP4_EDID_ADDR (ATOM_DFP3_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP4_DTD_MODE_TBL_ADDR (ATOM_DFP4_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP4_STD_MODE_TBL_ADDR (ATOM_DFP4_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP5_EDID_ADDR (ATOM_DFP4_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP5_DTD_MODE_TBL_ADDR (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP5_STD_MODE_TBL_ADDR (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR+ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR + 256)
+#define ATOM_STACK_STORAGE_END (ATOM_STACK_STORAGE_START + 512)
+
+/* The size below is in Kb! */
+#define ATOM_VRAM_RESERVE_SIZE ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC)
+
+#define ATOM_VRAM_OPERATION_FLAGS_MASK 0xC0000000L
+#define ATOM_VRAM_OPERATION_FLAGS_SHIFT 30
+#define ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION 0x1
+#define ATOM_VRAM_BLOCK_NEEDS_RESERVATION 0x0
+
+/***********************************************************************************/
+/* Structure used in VRAM_UsageByFirmwareTable */
+/* Note1: This table is filled by SetBiosReservationStartInFB in CoreCommSubs.asm */
+/* at running time. */
+/* note2: From RV770, the memory is more than 32bit addressable, so we will change */
+/* ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains */
+/* exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware */
+/* (in offset to start of memory address) is KB aligned instead of byte aligend. */
+/***********************************************************************************/
+#define ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO 1
+
+typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO {
+ ULONG ulStartAddrUsedByFirmware;
+ USHORT usFirmwareUseInKb;
+ USHORT usReserved;
+} ATOM_FIRMWARE_VRAM_RESERVE_INFO;
+
+typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_FIRMWARE_VRAM_RESERVE_INFO
+ asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO];
+} ATOM_VRAM_USAGE_BY_FIRMWARE;
+
+/****************************************************************************/
+/* Structure used in GPIO_Pin_LUTTable */
+/****************************************************************************/
+typedef struct _ATOM_GPIO_PIN_ASSIGNMENT {
+ USHORT usGpioPin_AIndex;
+ UCHAR ucGpioPinBitShift;
+ UCHAR ucGPIO_ID;
+} ATOM_GPIO_PIN_ASSIGNMENT;
+
+typedef struct _ATOM_GPIO_PIN_LUT {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_GPIO_PIN_ASSIGNMENT asGPIO_Pin[1];
+} ATOM_GPIO_PIN_LUT;
+
+/****************************************************************************/
+/* Structure used in ComponentVideoInfoTable */
+/****************************************************************************/
+#define GPIO_PIN_ACTIVE_HIGH 0x1
+
+#define MAX_SUPPORTED_CV_STANDARDS 5
+
+/* definitions for ATOM_D_INFO.ucSettings */
+#define ATOM_GPIO_SETTINGS_BITSHIFT_MASK 0x1F /* [4:0] */
+#define ATOM_GPIO_SETTINGS_RESERVED_MASK 0x60 /* [6:5] = must be zeroed out */
+#define ATOM_GPIO_SETTINGS_ACTIVE_MASK 0x80 /* [7] */
+
+typedef struct _ATOM_GPIO_INFO {
+ USHORT usAOffset;
+ UCHAR ucSettings;
+ UCHAR ucReserved;
+} ATOM_GPIO_INFO;
+
+/* definitions for ATOM_COMPONENT_VIDEO_INFO.ucMiscInfo (bit vector) */
+#define ATOM_CV_RESTRICT_FORMAT_SELECTION 0x2
+
+/* definitions for ATOM_COMPONENT_VIDEO_INFO.uc480i/uc480p/uc720p/uc1080i */
+#define ATOM_GPIO_DEFAULT_MODE_EN 0x80 /* [7]; */
+#define ATOM_GPIO_SETTING_PERMODE_MASK 0x7F /* [6:0] */
+
+/* definitions for ATOM_COMPONENT_VIDEO_INFO.ucLetterBoxMode */
+/* Line 3 out put 5V. */
+#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_A 0x01 /* represent gpio 3 state for 16:9 */
+#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_B 0x02 /* represent gpio 4 state for 16:9 */
+#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_SHIFT 0x0
+
+/* Line 3 out put 2.2V */
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_A 0x04 /* represent gpio 3 state for 4:3 Letter box */
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_B 0x08 /* represent gpio 4 state for 4:3 Letter box */
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_SHIFT 0x2
+
+/* Line 3 out put 0V */
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_A 0x10 /* represent gpio 3 state for 4:3 */
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_B 0x20 /* represent gpio 4 state for 4:3 */
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_SHIFT 0x4
+
+#define ATOM_CV_LINE3_ASPECTRATIO_MASK 0x3F /* bit [5:0] */
+
+#define ATOM_CV_LINE3_ASPECTRATIO_EXIST 0x80 /* bit 7 */
+
+/* GPIO bit index in gpio setting per mode value, also represend the block no. in gpio blocks. */
+#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_A 3 /* bit 3 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode. */
+#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_B 4 /* bit 4 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode. */
+
+typedef struct _ATOM_COMPONENT_VIDEO_INFO {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usMask_PinRegisterIndex;
+ USHORT usEN_PinRegisterIndex;
+ USHORT usY_PinRegisterIndex;
+ USHORT usA_PinRegisterIndex;
+ UCHAR ucBitShift;
+ UCHAR ucPinActiveState; /* ucPinActiveState: Bit0=1 active high, =0 active low */
+ ATOM_DTD_FORMAT sReserved; /* must be zeroed out */
+ UCHAR ucMiscInfo;
+ UCHAR uc480i;
+ UCHAR uc480p;
+ UCHAR uc720p;
+ UCHAR uc1080i;
+ UCHAR ucLetterBoxMode;
+ UCHAR ucReserved[3];
+ UCHAR ucNumOfWbGpioBlocks; /* For Component video D-Connector support. If zere, NTSC type connector */
+ ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
+ ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
+} ATOM_COMPONENT_VIDEO_INFO;
+
+/* ucTableFormatRevision=2 */
+/* ucTableContentRevision=1 */
+typedef struct _ATOM_COMPONENT_VIDEO_INFO_V21 {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucMiscInfo;
+ UCHAR uc480i;
+ UCHAR uc480p;
+ UCHAR uc720p;
+ UCHAR uc1080i;
+ UCHAR ucReserved;
+ UCHAR ucLetterBoxMode;
+ UCHAR ucNumOfWbGpioBlocks; /* For Component video D-Connector support. If zere, NTSC type connector */
+ ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
+ ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
+} ATOM_COMPONENT_VIDEO_INFO_V21;
+
+#define ATOM_COMPONENT_VIDEO_INFO_LAST ATOM_COMPONENT_VIDEO_INFO_V21
+
+/****************************************************************************/
+/* Structure used in object_InfoTable */
+/****************************************************************************/
+typedef struct _ATOM_OBJECT_HEADER {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usDeviceSupport;
+ USHORT usConnectorObjectTableOffset;
+ USHORT usRouterObjectTableOffset;
+ USHORT usEncoderObjectTableOffset;
+ USHORT usProtectionObjectTableOffset; /* only available when Protection block is independent. */
+ USHORT usDisplayPathTableOffset;
+} ATOM_OBJECT_HEADER;
+
+typedef struct _ATOM_DISPLAY_OBJECT_PATH {
+ USHORT usDeviceTag; /* supported device */
+ USHORT usSize; /* the size of ATOM_DISPLAY_OBJECT_PATH */
+ USHORT usConnObjectId; /* Connector Object ID */
+ USHORT usGPUObjectId; /* GPU ID */
+ USHORT usGraphicObjIds[1]; /* 1st Encoder Obj source from GPU to last Graphic Obj destinate to connector. */
+} ATOM_DISPLAY_OBJECT_PATH;
+
+typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE {
+ UCHAR ucNumOfDispPath;
+ UCHAR ucVersion;
+ UCHAR ucPadding[2];
+ ATOM_DISPLAY_OBJECT_PATH asDispPath[1];
+} ATOM_DISPLAY_OBJECT_PATH_TABLE;
+
+typedef struct _ATOM_OBJECT /* each object has this structure */
+{
+ USHORT usObjectID;
+ USHORT usSrcDstTableOffset;
+ USHORT usRecordOffset; /* this pointing to a bunch of records defined below */
+ USHORT usReserved;
+} ATOM_OBJECT;
+
+typedef struct _ATOM_OBJECT_TABLE /* Above 4 object table offset pointing to a bunch of objects all have this structure */
+{
+ UCHAR ucNumberOfObjects;
+ UCHAR ucPadding[3];
+ ATOM_OBJECT asObjects[1];
+} ATOM_OBJECT_TABLE;
+
+typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT /* usSrcDstTableOffset pointing to this structure */
+{
+ UCHAR ucNumberOfSrc;
+ USHORT usSrcObjectID[1];
+ UCHAR ucNumberOfDst;
+ USHORT usDstObjectID[1];
+} ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT;
+
+/* Related definitions, all records are differnt but they have a commond header */
+typedef struct _ATOM_COMMON_RECORD_HEADER {
+ UCHAR ucRecordType; /* An emun to indicate the record type */
+ UCHAR ucRecordSize; /* The size of the whole record in byte */
+} ATOM_COMMON_RECORD_HEADER;
+
+#define ATOM_I2C_RECORD_TYPE 1
+#define ATOM_HPD_INT_RECORD_TYPE 2
+#define ATOM_OUTPUT_PROTECTION_RECORD_TYPE 3
+#define ATOM_CONNECTOR_DEVICE_TAG_RECORD_TYPE 4
+#define ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD_TYPE 5 /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */
+#define ATOM_ENCODER_FPGA_CONTROL_RECORD_TYPE 6 /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */
+#define ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD_TYPE 7
+#define ATOM_JTAG_RECORD_TYPE 8 /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */
+#define ATOM_OBJECT_GPIO_CNTL_RECORD_TYPE 9
+#define ATOM_ENCODER_DVO_CF_RECORD_TYPE 10
+#define ATOM_CONNECTOR_CF_RECORD_TYPE 11
+#define ATOM_CONNECTOR_HARDCODE_DTD_RECORD_TYPE 12
+#define ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE 13
+#define ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE 14
+#define ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE 15
+
+/* Must be updated when new record type is added,equal to that record definition! */
+#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_CONNECTOR_CF_RECORD_TYPE
+
+typedef struct _ATOM_I2C_RECORD {
+ ATOM_COMMON_RECORD_HEADER sheader;
+ ATOM_I2C_ID_CONFIG sucI2cId;
+ UCHAR ucI2CAddr; /* The slave address, it's 0 when the record is attached to connector for DDC */
+} ATOM_I2C_RECORD;
+
+typedef struct _ATOM_HPD_INT_RECORD {
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucHPDIntGPIOID; /* Corresponding block in GPIO_PIN_INFO table gives the pin info */
+ UCHAR ucPluggged_PinState;
+} ATOM_HPD_INT_RECORD;
+
+typedef struct _ATOM_OUTPUT_PROTECTION_RECORD {
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucProtectionFlag;
+ UCHAR ucReserved;
+} ATOM_OUTPUT_PROTECTION_RECORD;
+
+typedef struct _ATOM_CONNECTOR_DEVICE_TAG {
+ ULONG ulACPIDeviceEnum; /* Reserved for now */
+ USHORT usDeviceID; /* This Id is same as "ATOM_DEVICE_XXX_SUPPORT" */
+ USHORT usPadding;
+} ATOM_CONNECTOR_DEVICE_TAG;
+
+typedef struct _ATOM_CONNECTOR_DEVICE_TAG_RECORD {
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucNumberOfDevice;
+ UCHAR ucReserved;
+ ATOM_CONNECTOR_DEVICE_TAG asDeviceTag[1]; /* This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation */
+} ATOM_CONNECTOR_DEVICE_TAG_RECORD;
+
+typedef struct _ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD {
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucConfigGPIOID;
+ UCHAR ucConfigGPIOState; /* Set to 1 when it's active high to enable external flow in */
+ UCHAR ucFlowinGPIPID;
+ UCHAR ucExtInGPIPID;
+} ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD;
+
+typedef struct _ATOM_ENCODER_FPGA_CONTROL_RECORD {
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucCTL1GPIO_ID;
+ UCHAR ucCTL1GPIOState; /* Set to 1 when it's active high */
+ UCHAR ucCTL2GPIO_ID;
+ UCHAR ucCTL2GPIOState; /* Set to 1 when it's active high */
+ UCHAR ucCTL3GPIO_ID;
+ UCHAR ucCTL3GPIOState; /* Set to 1 when it's active high */
+ UCHAR ucCTLFPGA_IN_ID;
+ UCHAR ucPadding[3];
+} ATOM_ENCODER_FPGA_CONTROL_RECORD;
+
+typedef struct _ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD {
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucGPIOID; /* Corresponding block in GPIO_PIN_INFO table gives the pin info */
+ UCHAR ucTVActiveState; /* Indicating when the pin==0 or 1 when TV is connected */
+} ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD;
+
+typedef struct _ATOM_JTAG_RECORD {
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucTMSGPIO_ID;
+ UCHAR ucTMSGPIOState; /* Set to 1 when it's active high */
+ UCHAR ucTCKGPIO_ID;
+ UCHAR ucTCKGPIOState; /* Set to 1 when it's active high */
+ UCHAR ucTDOGPIO_ID;
+ UCHAR ucTDOGPIOState; /* Set to 1 when it's active high */
+ UCHAR ucTDIGPIO_ID;
+ UCHAR ucTDIGPIOState; /* Set to 1 when it's active high */
+ UCHAR ucPadding[2];
+} ATOM_JTAG_RECORD;
+
+/* The following generic object gpio pin control record type will replace JTAG_RECORD/FPGA_CONTROL_RECORD/DVI_EXT_INPUT_RECORD above gradually */
+typedef struct _ATOM_GPIO_PIN_CONTROL_PAIR {
+ UCHAR ucGPIOID; /* GPIO_ID, find the corresponding ID in GPIO_LUT table */
+ UCHAR ucGPIO_PinState; /* Pin state showing how to set-up the pin */
+} ATOM_GPIO_PIN_CONTROL_PAIR;
+
+typedef struct _ATOM_OBJECT_GPIO_CNTL_RECORD {
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucFlags; /* Future expnadibility */
+ UCHAR ucNumberOfPins; /* Number of GPIO pins used to control the object */
+ ATOM_GPIO_PIN_CONTROL_PAIR asGpio[1]; /* the real gpio pin pair determined by number of pins ucNumberOfPins */
+} ATOM_OBJECT_GPIO_CNTL_RECORD;
+
+/* Definitions for GPIO pin state */
+#define GPIO_PIN_TYPE_INPUT 0x00
+#define GPIO_PIN_TYPE_OUTPUT 0x10
+#define GPIO_PIN_TYPE_HW_CONTROL 0x20
+
+/* For GPIO_PIN_TYPE_OUTPUT the following is defined */
+#define GPIO_PIN_OUTPUT_STATE_MASK 0x01
+#define GPIO_PIN_OUTPUT_STATE_SHIFT 0
+#define GPIO_PIN_STATE_ACTIVE_LOW 0x0
+#define GPIO_PIN_STATE_ACTIVE_HIGH 0x1
+
+typedef struct _ATOM_ENCODER_DVO_CF_RECORD {
+ ATOM_COMMON_RECORD_HEADER sheader;
+ ULONG ulStrengthControl; /* DVOA strength control for CF */
+ UCHAR ucPadding[2];
+} ATOM_ENCODER_DVO_CF_RECORD;
+
+/* value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle */
+#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA 1
+#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB 2
+
+typedef struct _ATOM_CONNECTOR_CF_RECORD {
+ ATOM_COMMON_RECORD_HEADER sheader;
+ USHORT usMaxPixClk;
+ UCHAR ucFlowCntlGpioId;
+ UCHAR ucSwapCntlGpioId;
+ UCHAR ucConnectedDvoBundle;
+ UCHAR ucPadding;
+} ATOM_CONNECTOR_CF_RECORD;
+
+typedef struct _ATOM_CONNECTOR_HARDCODE_DTD_RECORD {
+ ATOM_COMMON_RECORD_HEADER sheader;
+ ATOM_DTD_FORMAT asTiming;
+} ATOM_CONNECTOR_HARDCODE_DTD_RECORD;
+
+typedef struct _ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD {
+ ATOM_COMMON_RECORD_HEADER sheader; /* ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE */
+ UCHAR ucSubConnectorType; /* CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D|X_ID_DUAL_LINK_DVI_D|HDMI_TYPE_A */
+ UCHAR ucReserved;
+} ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD;
+
+typedef struct _ATOM_ROUTER_DDC_PATH_SELECT_RECORD {
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucMuxType; /* decide the number of ucMuxState, =0, no pin state, =1: single state with complement, >1: multiple state */
+ UCHAR ucMuxControlPin;
+ UCHAR ucMuxState[2]; /* for alligment purpose */
+} ATOM_ROUTER_DDC_PATH_SELECT_RECORD;
+
+typedef struct _ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD {
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucMuxType;
+ UCHAR ucMuxControlPin;
+ UCHAR ucMuxState[2]; /* for alligment purpose */
+} ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD;
+
+/* define ucMuxType */
+#define ATOM_ROUTER_MUX_PIN_STATE_MASK 0x0f
+#define ATOM_ROUTER_MUX_PIN_SINGLE_STATE_COMPLEMENT 0x01
+
+/****************************************************************************/
+/* ASIC voltage data table */
+/****************************************************************************/
+typedef struct _ATOM_VOLTAGE_INFO_HEADER {
+ USHORT usVDDCBaseLevel; /* In number of 50mv unit */
+ USHORT usReserved; /* For possible extension table offset */
+ UCHAR ucNumOfVoltageEntries;
+ UCHAR ucBytesPerVoltageEntry;
+ UCHAR ucVoltageStep; /* Indicating in how many mv increament is one step, 0.5mv unit */
+ UCHAR ucDefaultVoltageEntry;
+ UCHAR ucVoltageControlI2cLine;
+ UCHAR ucVoltageControlAddress;
+ UCHAR ucVoltageControlOffset;
+} ATOM_VOLTAGE_INFO_HEADER;
+
+typedef struct _ATOM_VOLTAGE_INFO {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_VOLTAGE_INFO_HEADER viHeader;
+ UCHAR ucVoltageEntries[64]; /* 64 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries*ucBytesPerVoltageEntry */
+} ATOM_VOLTAGE_INFO;
+
+typedef struct _ATOM_VOLTAGE_FORMULA {
+ USHORT usVoltageBaseLevel; /* In number of 1mv unit */
+ USHORT usVoltageStep; /* Indicating in how many mv increament is one step, 1mv unit */
+ UCHAR ucNumOfVoltageEntries; /* Number of Voltage Entry, which indicate max Voltage */
+ UCHAR ucFlag; /* bit0=0 :step is 1mv =1 0.5mv */
+ UCHAR ucBaseVID; /* if there is no lookup table, VID= BaseVID + ( Vol - BaseLevle ) /VoltageStep */
+ UCHAR ucReserved;
+ UCHAR ucVIDAdjustEntries[32]; /* 32 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries */
+} ATOM_VOLTAGE_FORMULA;
+
+typedef struct _ATOM_VOLTAGE_CONTROL {
+ UCHAR ucVoltageControlId; /* Indicate it is controlled by I2C or GPIO or HW state machine */
+ UCHAR ucVoltageControlI2cLine;
+ UCHAR ucVoltageControlAddress;
+ UCHAR ucVoltageControlOffset;
+ USHORT usGpioPin_AIndex; /* GPIO_PAD register index */
+ UCHAR ucGpioPinBitShift[9]; /* at most 8 pin support 255 VIDs, termintate with 0xff */
+ UCHAR ucReserved;
+} ATOM_VOLTAGE_CONTROL;
+
+/* Define ucVoltageControlId */
+#define VOLTAGE_CONTROLLED_BY_HW 0x00
+#define VOLTAGE_CONTROLLED_BY_I2C_MASK 0x7F
+#define VOLTAGE_CONTROLLED_BY_GPIO 0x80
+#define VOLTAGE_CONTROL_ID_LM64 0x01 /* I2C control, used for R5xx Core Voltage */
+#define VOLTAGE_CONTROL_ID_DAC 0x02 /* I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI */
+#define VOLTAGE_CONTROL_ID_VT116xM 0x03 /* I2C control, used for R6xx Core Voltage */
+#define VOLTAGE_CONTROL_ID_DS4402 0x04
+
+typedef struct _ATOM_VOLTAGE_OBJECT {
+ UCHAR ucVoltageType; /* Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI */
+ UCHAR ucSize; /* Size of Object */
+ ATOM_VOLTAGE_CONTROL asControl; /* describ how to control */
+ ATOM_VOLTAGE_FORMULA asFormula; /* Indicate How to convert real Voltage to VID */
+} ATOM_VOLTAGE_OBJECT;
+
+typedef struct _ATOM_VOLTAGE_OBJECT_INFO {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_VOLTAGE_OBJECT asVoltageObj[3]; /* Info for Voltage control */
+} ATOM_VOLTAGE_OBJECT_INFO;
+
+typedef struct _ATOM_LEAKID_VOLTAGE {
+ UCHAR ucLeakageId;
+ UCHAR ucReserved;
+ USHORT usVoltage;
+} ATOM_LEAKID_VOLTAGE;
+
+typedef struct _ATOM_ASIC_PROFILE_VOLTAGE {
+ UCHAR ucProfileId;
+ UCHAR ucReserved;
+ USHORT usSize;
+ USHORT usEfuseSpareStartAddr;
+ USHORT usFuseIndex[8]; /* from LSB to MSB, Max 8bit,end of 0xffff if less than 8 efuse id, */
+ ATOM_LEAKID_VOLTAGE asLeakVol[2]; /* Leakid and relatd voltage */
+} ATOM_ASIC_PROFILE_VOLTAGE;
+
+/* ucProfileId */
+#define ATOM_ASIC_PROFILE_ID_EFUSE_VOLTAGE 1
+#define ATOM_ASIC_PROFILE_ID_EFUSE_PERFORMANCE_VOLTAGE 1
+#define ATOM_ASIC_PROFILE_ID_EFUSE_THERMAL_VOLTAGE 2
+
+typedef struct _ATOM_ASIC_PROFILING_INFO {
+ ATOM_COMMON_TABLE_HEADER asHeader;
+ ATOM_ASIC_PROFILE_VOLTAGE asVoltage;
+} ATOM_ASIC_PROFILING_INFO;
+
+typedef struct _ATOM_POWER_SOURCE_OBJECT {
+ UCHAR ucPwrSrcId; /* Power source */
+ UCHAR ucPwrSensorType; /* GPIO, I2C or none */
+ UCHAR ucPwrSensId; /* if GPIO detect, it is GPIO id, if I2C detect, it is I2C id */
+ UCHAR ucPwrSensSlaveAddr; /* Slave address if I2C detect */
+ UCHAR ucPwrSensRegIndex; /* I2C register Index if I2C detect */
+ UCHAR ucPwrSensRegBitMask; /* detect which bit is used if I2C detect */
+ UCHAR ucPwrSensActiveState; /* high active or low active */
+ UCHAR ucReserve[3]; /* reserve */
+ USHORT usSensPwr; /* in unit of watt */
+} ATOM_POWER_SOURCE_OBJECT;
+
+typedef struct _ATOM_POWER_SOURCE_INFO {
+ ATOM_COMMON_TABLE_HEADER asHeader;
+ UCHAR asPwrbehave[16];
+ ATOM_POWER_SOURCE_OBJECT asPwrObj[1];
+} ATOM_POWER_SOURCE_INFO;
+
+/* Define ucPwrSrcId */
+#define POWERSOURCE_PCIE_ID1 0x00
+#define POWERSOURCE_6PIN_CONNECTOR_ID1 0x01
+#define POWERSOURCE_8PIN_CONNECTOR_ID1 0x02
+#define POWERSOURCE_6PIN_CONNECTOR_ID2 0x04
+#define POWERSOURCE_8PIN_CONNECTOR_ID2 0x08
+
+/* define ucPwrSensorId */
+#define POWER_SENSOR_ALWAYS 0x00
+#define POWER_SENSOR_GPIO 0x01
+#define POWER_SENSOR_I2C 0x02
+
+/**************************************************************************/
+/* This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design */
+/* Memory SS Info Table */
+/* Define Memory Clock SS chip ID */
+#define ICS91719 1
+#define ICS91720 2
+
+/* Define one structure to inform SW a "block of data" writing to external SS chip via I2C protocol */
+typedef struct _ATOM_I2C_DATA_RECORD {
+ UCHAR ucNunberOfBytes; /* Indicates how many bytes SW needs to write to the external ASIC for one block, besides to "Start" and "Stop" */
+ UCHAR ucI2CData[1]; /* I2C data in bytes, should be less than 16 bytes usually */
+} ATOM_I2C_DATA_RECORD;
+
+/* Define one structure to inform SW how many blocks of data writing to external SS chip via I2C protocol, in addition to other information */
+typedef struct _ATOM_I2C_DEVICE_SETUP_INFO {
+ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; /* I2C line and HW/SW assisted cap. */
+ UCHAR ucSSChipID; /* SS chip being used */
+ UCHAR ucSSChipSlaveAddr; /* Slave Address to set up this SS chip */
+ UCHAR ucNumOfI2CDataRecords; /* number of data block */
+ ATOM_I2C_DATA_RECORD asI2CData[1];
+} ATOM_I2C_DEVICE_SETUP_INFO;
+
+/* ========================================================================================== */
+typedef struct _ATOM_ASIC_MVDD_INFO {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_I2C_DEVICE_SETUP_INFO asI2CSetup[1];
+} ATOM_ASIC_MVDD_INFO;
+
+/* ========================================================================================== */
+#define ATOM_MCLK_SS_INFO ATOM_ASIC_MVDD_INFO
+
+/* ========================================================================================== */
+/**************************************************************************/
+
+typedef struct _ATOM_ASIC_SS_ASSIGNMENT {
+ ULONG ulTargetClockRange; /* Clock Out frequence (VCO ), in unit of 10Khz */
+ USHORT usSpreadSpectrumPercentage; /* in unit of 0.01% */
+ USHORT usSpreadRateInKhz; /* in unit of kHz, modulation freq */
+ UCHAR ucClockIndication; /* Indicate which clock source needs SS */
+ UCHAR ucSpreadSpectrumMode; /* Bit1=0 Down Spread,=1 Center Spread. */
+ UCHAR ucReserved[2];
+} ATOM_ASIC_SS_ASSIGNMENT;
+
+/* Define ucSpreadSpectrumType */
+#define ASIC_INTERNAL_MEMORY_SS 1
+#define ASIC_INTERNAL_ENGINE_SS 2
+#define ASIC_INTERNAL_UVD_SS 3
+
+typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_ASIC_SS_ASSIGNMENT asSpreadSpectrum[4];
+} ATOM_ASIC_INTERNAL_SS_INFO;
+
+/* ==============================Scratch Pad Definition Portion=============================== */
+#define ATOM_DEVICE_CONNECT_INFO_DEF 0
+#define ATOM_ROM_LOCATION_DEF 1
+#define ATOM_TV_STANDARD_DEF 2
+#define ATOM_ACTIVE_INFO_DEF 3
+#define ATOM_LCD_INFO_DEF 4
+#define ATOM_DOS_REQ_INFO_DEF 5
+#define ATOM_ACC_CHANGE_INFO_DEF 6
+#define ATOM_DOS_MODE_INFO_DEF 7
+#define ATOM_I2C_CHANNEL_STATUS_DEF 8
+#define ATOM_I2C_CHANNEL_STATUS1_DEF 9
+
+/* BIOS_0_SCRATCH Definition */
+#define ATOM_S0_CRT1_MONO 0x00000001L
+#define ATOM_S0_CRT1_COLOR 0x00000002L
+#define ATOM_S0_CRT1_MASK (ATOM_S0_CRT1_MONO+ATOM_S0_CRT1_COLOR)
+
+#define ATOM_S0_TV1_COMPOSITE_A 0x00000004L
+#define ATOM_S0_TV1_SVIDEO_A 0x00000008L
+#define ATOM_S0_TV1_MASK_A (ATOM_S0_TV1_COMPOSITE_A+ATOM_S0_TV1_SVIDEO_A)
+
+#define ATOM_S0_CV_A 0x00000010L
+#define ATOM_S0_CV_DIN_A 0x00000020L
+#define ATOM_S0_CV_MASK_A (ATOM_S0_CV_A+ATOM_S0_CV_DIN_A)
+
+#define ATOM_S0_CRT2_MONO 0x00000100L
+#define ATOM_S0_CRT2_COLOR 0x00000200L
+#define ATOM_S0_CRT2_MASK (ATOM_S0_CRT2_MONO+ATOM_S0_CRT2_COLOR)
+
+#define ATOM_S0_TV1_COMPOSITE 0x00000400L
+#define ATOM_S0_TV1_SVIDEO 0x00000800L
+#define ATOM_S0_TV1_SCART 0x00004000L
+#define ATOM_S0_TV1_MASK (ATOM_S0_TV1_COMPOSITE+ATOM_S0_TV1_SVIDEO+ATOM_S0_TV1_SCART)
+
+#define ATOM_S0_CV 0x00001000L
+#define ATOM_S0_CV_DIN 0x00002000L
+#define ATOM_S0_CV_MASK (ATOM_S0_CV+ATOM_S0_CV_DIN)
+
+#define ATOM_S0_DFP1 0x00010000L
+#define ATOM_S0_DFP2 0x00020000L
+#define ATOM_S0_LCD1 0x00040000L
+#define ATOM_S0_LCD2 0x00080000L
+#define ATOM_S0_TV2 0x00100000L
+#define ATOM_S0_DFP3 0x00200000L
+#define ATOM_S0_DFP4 0x00400000L
+#define ATOM_S0_DFP5 0x00800000L
+
+#define ATOM_S0_DFP_MASK \
+ (ATOM_S0_DFP1 | ATOM_S0_DFP2 | ATOM_S0_DFP3 | ATOM_S0_DFP4 | ATOM_S0_DFP5)
+
+#define ATOM_S0_FAD_REGISTER_BUG 0x02000000L /* If set, indicates we are running a PCIE asic with */
+ /* the FAD/HDP reg access bug. Bit is read by DAL */
+
+#define ATOM_S0_THERMAL_STATE_MASK 0x1C000000L
+#define ATOM_S0_THERMAL_STATE_SHIFT 26
+
+#define ATOM_S0_SYSTEM_POWER_STATE_MASK 0xE0000000L
+#define ATOM_S0_SYSTEM_POWER_STATE_SHIFT 29
+
+#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC 1
+#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC 2
+#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3
+
+/* Byte aligned defintion for BIOS usage */
+#define ATOM_S0_CRT1_MONOb0 0x01
+#define ATOM_S0_CRT1_COLORb0 0x02
+#define ATOM_S0_CRT1_MASKb0 (ATOM_S0_CRT1_MONOb0+ATOM_S0_CRT1_COLORb0)
+
+#define ATOM_S0_TV1_COMPOSITEb0 0x04
+#define ATOM_S0_TV1_SVIDEOb0 0x08
+#define ATOM_S0_TV1_MASKb0 (ATOM_S0_TV1_COMPOSITEb0+ATOM_S0_TV1_SVIDEOb0)
+
+#define ATOM_S0_CVb0 0x10
+#define ATOM_S0_CV_DINb0 0x20
+#define ATOM_S0_CV_MASKb0 (ATOM_S0_CVb0+ATOM_S0_CV_DINb0)
+
+#define ATOM_S0_CRT2_MONOb1 0x01
+#define ATOM_S0_CRT2_COLORb1 0x02
+#define ATOM_S0_CRT2_MASKb1 (ATOM_S0_CRT2_MONOb1+ATOM_S0_CRT2_COLORb1)
+
+#define ATOM_S0_TV1_COMPOSITEb1 0x04
+#define ATOM_S0_TV1_SVIDEOb1 0x08
+#define ATOM_S0_TV1_SCARTb1 0x40
+#define ATOM_S0_TV1_MASKb1 (ATOM_S0_TV1_COMPOSITEb1+ATOM_S0_TV1_SVIDEOb1+ATOM_S0_TV1_SCARTb1)
+
+#define ATOM_S0_CVb1 0x10
+#define ATOM_S0_CV_DINb1 0x20
+#define ATOM_S0_CV_MASKb1 (ATOM_S0_CVb1+ATOM_S0_CV_DINb1)
+
+#define ATOM_S0_DFP1b2 0x01
+#define ATOM_S0_DFP2b2 0x02
+#define ATOM_S0_LCD1b2 0x04
+#define ATOM_S0_LCD2b2 0x08
+#define ATOM_S0_TV2b2 0x10
+#define ATOM_S0_DFP3b2 0x20
+
+#define ATOM_S0_THERMAL_STATE_MASKb3 0x1C
+#define ATOM_S0_THERMAL_STATE_SHIFTb3 2
+
+#define ATOM_S0_SYSTEM_POWER_STATE_MASKb3 0xE0
+#define ATOM_S0_LCD1_SHIFT 18
+
+/* BIOS_1_SCRATCH Definition */
+#define ATOM_S1_ROM_LOCATION_MASK 0x0000FFFFL
+#define ATOM_S1_PCI_BUS_DEV_MASK 0xFFFF0000L
+
+/* BIOS_2_SCRATCH Definition */
+#define ATOM_S2_TV1_STANDARD_MASK 0x0000000FL
+#define ATOM_S2_CURRENT_BL_LEVEL_MASK 0x0000FF00L
+#define ATOM_S2_CURRENT_BL_LEVEL_SHIFT 8
+
+#define ATOM_S2_CRT1_DPMS_STATE 0x00010000L
+#define ATOM_S2_LCD1_DPMS_STATE 0x00020000L
+#define ATOM_S2_TV1_DPMS_STATE 0x00040000L
+#define ATOM_S2_DFP1_DPMS_STATE 0x00080000L
+#define ATOM_S2_CRT2_DPMS_STATE 0x00100000L
+#define ATOM_S2_LCD2_DPMS_STATE 0x00200000L
+#define ATOM_S2_TV2_DPMS_STATE 0x00400000L
+#define ATOM_S2_DFP2_DPMS_STATE 0x00800000L
+#define ATOM_S2_CV_DPMS_STATE 0x01000000L
+#define ATOM_S2_DFP3_DPMS_STATE 0x02000000L
+#define ATOM_S2_DFP4_DPMS_STATE 0x04000000L
+#define ATOM_S2_DFP5_DPMS_STATE 0x08000000L
+
+#define ATOM_S2_DFP_DPM_STATE \
+ (ATOM_S2_DFP1_DPMS_STATE | ATOM_S2_DFP2_DPMS_STATE | \
+ ATOM_S2_DFP3_DPMS_STATE | ATOM_S2_DFP4_DPMS_STATE | \
+ ATOM_S2_DFP5_DPMS_STATE)
+
+#define ATOM_S2_DEVICE_DPMS_STATE \
+ (ATOM_S2_CRT1_DPMS_STATE + ATOM_S2_LCD1_DPMS_STATE + \
+ ATOM_S2_TV1_DPMS_STATE + ATOM_S2_DFP_DPMS_STATE + \
+ ATOM_S2_CRT2_DPMS_STATE + ATOM_S2_LCD2_DPMS_STATE + \
+ ATOM_S2_TV2_DPMS_STATE + ATOM_S2_CV_DPMS_STATE)
+
+#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK 0x0C000000L
+#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK_SHIFT 26
+#define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGE 0x10000000L
+
+#define ATOM_S2_VRI_BRIGHT_ENABLE 0x20000000L
+
+#define ATOM_S2_DISPLAY_ROTATION_0_DEGREE 0x0
+#define ATOM_S2_DISPLAY_ROTATION_90_DEGREE 0x1
+#define ATOM_S2_DISPLAY_ROTATION_180_DEGREE 0x2
+#define ATOM_S2_DISPLAY_ROTATION_270_DEGREE 0x3
+#define ATOM_S2_DISPLAY_ROTATION_DEGREE_SHIFT 30
+#define ATOM_S2_DISPLAY_ROTATION_ANGLE_MASK 0xC0000000L
+
+/* Byte aligned defintion for BIOS usage */
+#define ATOM_S2_TV1_STANDARD_MASKb0 0x0F
+#define ATOM_S2_CURRENT_BL_LEVEL_MASKb1 0xFF
+#define ATOM_S2_CRT1_DPMS_STATEb2 0x01
+#define ATOM_S2_LCD1_DPMS_STATEb2 0x02
+#define ATOM_S2_TV1_DPMS_STATEb2 0x04
+#define ATOM_S2_DFP1_DPMS_STATEb2 0x08
+#define ATOM_S2_CRT2_DPMS_STATEb2 0x10
+#define ATOM_S2_LCD2_DPMS_STATEb2 0x20
+#define ATOM_S2_TV2_DPMS_STATEb2 0x40
+#define ATOM_S2_DFP2_DPMS_STATEb2 0x80
+#define ATOM_S2_CV_DPMS_STATEb3 0x01
+#define ATOM_S2_DFP3_DPMS_STATEb3 0x02
+#define ATOM_S2_DFP4_DPMS_STATEb3 0x04
+#define ATOM_S2_DFP5_DPMS_STATEb3 0x08
+
+#define ATOM_S2_DEVICE_DPMS_MASKw1 0x3FF
+#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASKb3 0x0C
+#define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGEb3 0x10
+#define ATOM_S2_VRI_BRIGHT_ENABLEb3 0x20
+#define ATOM_S2_ROTATION_STATE_MASKb3 0xC0
+
+/* BIOS_3_SCRATCH Definition */
+#define ATOM_S3_CRT1_ACTIVE 0x00000001L
+#define ATOM_S3_LCD1_ACTIVE 0x00000002L
+#define ATOM_S3_TV1_ACTIVE 0x00000004L
+#define ATOM_S3_DFP1_ACTIVE 0x00000008L
+#define ATOM_S3_CRT2_ACTIVE 0x00000010L
+#define ATOM_S3_LCD2_ACTIVE 0x00000020L
+#define ATOM_S3_TV2_ACTIVE 0x00000040L
+#define ATOM_S3_DFP2_ACTIVE 0x00000080L
+#define ATOM_S3_CV_ACTIVE 0x00000100L
+#define ATOM_S3_DFP3_ACTIVE 0x00000200L
+#define ATOM_S3_DFP4_ACTIVE 0x00000400L
+#define ATOM_S3_DFP5_ACTIVE 0x00000800L
+
+#define ATOM_S3_DEVICE_ACTIVE_MASK 0x000003FFL
+
+#define ATOM_S3_LCD_FULLEXPANSION_ACTIVE 0x00001000L
+#define ATOM_S3_LCD_EXPANSION_ASPEC_RATIO_ACTIVE 0x00002000L
+
+#define ATOM_S3_CRT1_CRTC_ACTIVE 0x00010000L
+#define ATOM_S3_LCD1_CRTC_ACTIVE 0x00020000L
+#define ATOM_S3_TV1_CRTC_ACTIVE 0x00040000L
+#define ATOM_S3_DFP1_CRTC_ACTIVE 0x00080000L
+#define ATOM_S3_CRT2_CRTC_ACTIVE 0x00100000L
+#define ATOM_S3_LCD2_CRTC_ACTIVE 0x00200000L
+#define ATOM_S3_TV2_CRTC_ACTIVE 0x00400000L
+#define ATOM_S3_DFP2_CRTC_ACTIVE 0x00800000L
+#define ATOM_S3_CV_CRTC_ACTIVE 0x01000000L
+#define ATOM_S3_DFP3_CRTC_ACTIVE 0x02000000L
+#define ATOM_S3_DFP4_CRTC_ACTIVE 0x04000000L
+#define ATOM_S3_DFP5_CRTC_ACTIVE 0x08000000L
+
+#define ATOM_S3_DEVICE_CRTC_ACTIVE_MASK 0x0FFF0000L
+#define ATOM_S3_ASIC_GUI_ENGINE_HUNG 0x20000000L
+#define ATOM_S3_ALLOW_FAST_PWR_SWITCH 0x40000000L
+#define ATOM_S3_RQST_GPU_USE_MIN_PWR 0x80000000L
+
+/* Byte aligned defintion for BIOS usage */
+#define ATOM_S3_CRT1_ACTIVEb0 0x01
+#define ATOM_S3_LCD1_ACTIVEb0 0x02
+#define ATOM_S3_TV1_ACTIVEb0 0x04
+#define ATOM_S3_DFP1_ACTIVEb0 0x08
+#define ATOM_S3_CRT2_ACTIVEb0 0x10
+#define ATOM_S3_LCD2_ACTIVEb0 0x20
+#define ATOM_S3_TV2_ACTIVEb0 0x40
+#define ATOM_S3_DFP2_ACTIVEb0 0x80
+#define ATOM_S3_CV_ACTIVEb1 0x01
+#define ATOM_S3_DFP3_ACTIVEb1 0x02
+#define ATOM_S3_DFP4_ACTIVEb1 0x04
+#define ATOM_S3_DFP5_ACTIVEb1 0x08
+
+#define ATOM_S3_ACTIVE_CRTC1w0 0xFFF
+
+#define ATOM_S3_CRT1_CRTC_ACTIVEb2 0x01
+#define ATOM_S3_LCD1_CRTC_ACTIVEb2 0x02
+#define ATOM_S3_TV1_CRTC_ACTIVEb2 0x04
+#define ATOM_S3_DFP1_CRTC_ACTIVEb2 0x08
+#define ATOM_S3_CRT2_CRTC_ACTIVEb2 0x10
+#define ATOM_S3_LCD2_CRTC_ACTIVEb2 0x20
+#define ATOM_S3_TV2_CRTC_ACTIVEb2 0x40
+#define ATOM_S3_DFP2_CRTC_ACTIVEb2 0x80
+#define ATOM_S3_CV_CRTC_ACTIVEb3 0x01
+#define ATOM_S3_DFP3_CRTC_ACTIVEb3 0x02
+#define ATOM_S3_DFP4_CRTC_ACTIVEb3 0x04
+#define ATOM_S3_DFP5_CRTC_ACTIVEb3 0x08
+
+#define ATOM_S3_ACTIVE_CRTC2w1 0xFFF
+
+#define ATOM_S3_ASIC_GUI_ENGINE_HUNGb3 0x20
+#define ATOM_S3_ALLOW_FAST_PWR_SWITCHb3 0x40
+#define ATOM_S3_RQST_GPU_USE_MIN_PWRb3 0x80
+
+/* BIOS_4_SCRATCH Definition */
+#define ATOM_S4_LCD1_PANEL_ID_MASK 0x000000FFL
+#define ATOM_S4_LCD1_REFRESH_MASK 0x0000FF00L
+#define ATOM_S4_LCD1_REFRESH_SHIFT 8
+
+/* Byte aligned defintion for BIOS usage */
+#define ATOM_S4_LCD1_PANEL_ID_MASKb0 0x0FF
+#define ATOM_S4_LCD1_REFRESH_MASKb1 ATOM_S4_LCD1_PANEL_ID_MASKb0
+#define ATOM_S4_VRAM_INFO_MASKb2 ATOM_S4_LCD1_PANEL_ID_MASKb0
+
+/* BIOS_5_SCRATCH Definition, BIOS_5_SCRATCH is used by Firmware only !!!! */
+#define ATOM_S5_DOS_REQ_CRT1b0 0x01
+#define ATOM_S5_DOS_REQ_LCD1b0 0x02
+#define ATOM_S5_DOS_REQ_TV1b0 0x04
+#define ATOM_S5_DOS_REQ_DFP1b0 0x08
+#define ATOM_S5_DOS_REQ_CRT2b0 0x10
+#define ATOM_S5_DOS_REQ_LCD2b0 0x20
+#define ATOM_S5_DOS_REQ_TV2b0 0x40
+#define ATOM_S5_DOS_REQ_DFP2b0 0x80
+#define ATOM_S5_DOS_REQ_CVb1 0x01
+#define ATOM_S5_DOS_REQ_DFP3b1 0x02
+#define ATOM_S5_DOS_REQ_DFP4b1 0x04
+#define ATOM_S5_DOS_REQ_DFP5b1 0x08
+
+#define ATOM_S5_DOS_REQ_DEVICEw0 0x03FF
+
+#define ATOM_S5_DOS_REQ_CRT1 0x0001
+#define ATOM_S5_DOS_REQ_LCD1 0x0002
+#define ATOM_S5_DOS_REQ_TV1 0x0004
+#define ATOM_S5_DOS_REQ_DFP1 0x0008
+#define ATOM_S5_DOS_REQ_CRT2 0x0010
+#define ATOM_S5_DOS_REQ_LCD2 0x0020
+#define ATOM_S5_DOS_REQ_TV2 0x0040
+#define ATOM_S5_DOS_REQ_DFP2 0x0080
+#define ATOM_S5_DOS_REQ_CV 0x0100
+#define ATOM_S5_DOS_REQ_DFP3 0x0200
+#define ATOM_S5_DOS_REQ_DFP4 0x0400
+#define ATOM_S5_DOS_REQ_DFP5 0x0800
+
+#define ATOM_S5_DOS_FORCE_CRT1b2 ATOM_S5_DOS_REQ_CRT1b0
+#define ATOM_S5_DOS_FORCE_TV1b2 ATOM_S5_DOS_REQ_TV1b0
+#define ATOM_S5_DOS_FORCE_CRT2b2 ATOM_S5_DOS_REQ_CRT2b0
+#define ATOM_S5_DOS_FORCE_CVb3 ATOM_S5_DOS_REQ_CVb1
+#define ATOM_S5_DOS_FORCE_DEVICEw1 \
+ (ATOM_S5_DOS_FORCE_CRT1b2 + ATOM_S5_DOS_FORCE_TV1b2 + \
+ ATOM_S5_DOS_FORCE_CRT2b2 + (ATOM_S5_DOS_FORCE_CVb3 << 8))
+
+/* BIOS_6_SCRATCH Definition */
+#define ATOM_S6_DEVICE_CHANGE 0x00000001L
+#define ATOM_S6_SCALER_CHANGE 0x00000002L
+#define ATOM_S6_LID_CHANGE 0x00000004L
+#define ATOM_S6_DOCKING_CHANGE 0x00000008L
+#define ATOM_S6_ACC_MODE 0x00000010L
+#define ATOM_S6_EXT_DESKTOP_MODE 0x00000020L
+#define ATOM_S6_LID_STATE 0x00000040L
+#define ATOM_S6_DOCK_STATE 0x00000080L
+#define ATOM_S6_CRITICAL_STATE 0x00000100L
+#define ATOM_S6_HW_I2C_BUSY_STATE 0x00000200L
+#define ATOM_S6_THERMAL_STATE_CHANGE 0x00000400L
+#define ATOM_S6_INTERRUPT_SET_BY_BIOS 0x00000800L
+#define ATOM_S6_REQ_LCD_EXPANSION_FULL 0x00001000L /* Normal expansion Request bit for LCD */
+#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIO 0x00002000L /* Aspect ratio expansion Request bit for LCD */
+
+#define ATOM_S6_DISPLAY_STATE_CHANGE 0x00004000L /* This bit is recycled when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_H_expansion */
+#define ATOM_S6_I2C_STATE_CHANGE 0x00008000L /* This bit is recycled,when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_V_expansion */
+
+#define ATOM_S6_ACC_REQ_CRT1 0x00010000L
+#define ATOM_S6_ACC_REQ_LCD1 0x00020000L
+#define ATOM_S6_ACC_REQ_TV1 0x00040000L
+#define ATOM_S6_ACC_REQ_DFP1 0x00080000L
+#define ATOM_S6_ACC_REQ_CRT2 0x00100000L
+#define ATOM_S6_ACC_REQ_LCD2 0x00200000L
+#define ATOM_S6_ACC_REQ_TV2 0x00400000L
+#define ATOM_S6_ACC_REQ_DFP2 0x00800000L
+#define ATOM_S6_ACC_REQ_CV 0x01000000L
+#define ATOM_S6_ACC_REQ_DFP3 0x02000000L
+#define ATOM_S6_ACC_REQ_DFP4 0x04000000L
+#define ATOM_S6_ACC_REQ_DFP5 0x08000000L
+
+#define ATOM_S6_ACC_REQ_MASK 0x0FFF0000L
+#define ATOM_S6_SYSTEM_POWER_MODE_CHANGE 0x10000000L
+#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH 0x20000000L
+#define ATOM_S6_VRI_BRIGHTNESS_CHANGE 0x40000000L
+#define ATOM_S6_CONFIG_DISPLAY_CHANGE_MASK 0x80000000L
+
+/* Byte aligned defintion for BIOS usage */
+#define ATOM_S6_DEVICE_CHANGEb0 0x01
+#define ATOM_S6_SCALER_CHANGEb0 0x02
+#define ATOM_S6_LID_CHANGEb0 0x04
+#define ATOM_S6_DOCKING_CHANGEb0 0x08
+#define ATOM_S6_ACC_MODEb0 0x10
+#define ATOM_S6_EXT_DESKTOP_MODEb0 0x20
+#define ATOM_S6_LID_STATEb0 0x40
+#define ATOM_S6_DOCK_STATEb0 0x80
+#define ATOM_S6_CRITICAL_STATEb1 0x01
+#define ATOM_S6_HW_I2C_BUSY_STATEb1 0x02
+#define ATOM_S6_THERMAL_STATE_CHANGEb1 0x04
+#define ATOM_S6_INTERRUPT_SET_BY_BIOSb1 0x08
+#define ATOM_S6_REQ_LCD_EXPANSION_FULLb1 0x10
+#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIOb1 0x20
+
+#define ATOM_S6_ACC_REQ_CRT1b2 0x01
+#define ATOM_S6_ACC_REQ_LCD1b2 0x02
+#define ATOM_S6_ACC_REQ_TV1b2 0x04
+#define ATOM_S6_ACC_REQ_DFP1b2 0x08
+#define ATOM_S6_ACC_REQ_CRT2b2 0x10
+#define ATOM_S6_ACC_REQ_LCD2b2 0x20
+#define ATOM_S6_ACC_REQ_TV2b2 0x40
+#define ATOM_S6_ACC_REQ_DFP2b2 0x80
+#define ATOM_S6_ACC_REQ_CVb3 0x01
+#define ATOM_S6_ACC_REQ_DFP3b3 0x02
+#define ATOM_S6_ACC_REQ_DFP4b3 0x04
+#define ATOM_S6_ACC_REQ_DFP5b3 0x08
+
+#define ATOM_S6_ACC_REQ_DEVICEw1 ATOM_S5_DOS_REQ_DEVICEw0
+#define ATOM_S6_SYSTEM_POWER_MODE_CHANGEb3 0x10
+#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCHb3 0x20
+#define ATOM_S6_VRI_BRIGHTNESS_CHANGEb3 0x40
+#define ATOM_S6_CONFIG_DISPLAY_CHANGEb3 0x80
+
+#define ATOM_S6_DEVICE_CHANGE_SHIFT 0
+#define ATOM_S6_SCALER_CHANGE_SHIFT 1
+#define ATOM_S6_LID_CHANGE_SHIFT 2
+#define ATOM_S6_DOCKING_CHANGE_SHIFT 3
+#define ATOM_S6_ACC_MODE_SHIFT 4
+#define ATOM_S6_EXT_DESKTOP_MODE_SHIFT 5
+#define ATOM_S6_LID_STATE_SHIFT 6
+#define ATOM_S6_DOCK_STATE_SHIFT 7
+#define ATOM_S6_CRITICAL_STATE_SHIFT 8
+#define ATOM_S6_HW_I2C_BUSY_STATE_SHIFT 9
+#define ATOM_S6_THERMAL_STATE_CHANGE_SHIFT 10
+#define ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT 11
+#define ATOM_S6_REQ_SCALER_SHIFT 12
+#define ATOM_S6_REQ_SCALER_ARATIO_SHIFT 13
+#define ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT 14
+#define ATOM_S6_I2C_STATE_CHANGE_SHIFT 15
+#define ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT 28
+#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH_SHIFT 29
+#define ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT 30
+#define ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT 31
+
+/* BIOS_7_SCRATCH Definition, BIOS_7_SCRATCH is used by Firmware only !!!! */
+#define ATOM_S7_DOS_MODE_TYPEb0 0x03
+#define ATOM_S7_DOS_MODE_VGAb0 0x00
+#define ATOM_S7_DOS_MODE_VESAb0 0x01
+#define ATOM_S7_DOS_MODE_EXTb0 0x02
+#define ATOM_S7_DOS_MODE_PIXEL_DEPTHb0 0x0C
+#define ATOM_S7_DOS_MODE_PIXEL_FORMATb0 0xF0
+#define ATOM_S7_DOS_8BIT_DAC_ENb1 0x01
+#define ATOM_S7_DOS_MODE_NUMBERw1 0x0FFFF
+
+#define ATOM_S7_DOS_8BIT_DAC_EN_SHIFT 8
+
+/* BIOS_8_SCRATCH Definition */
+#define ATOM_S8_I2C_CHANNEL_BUSY_MASK 0x00000FFFF
+#define ATOM_S8_I2C_HW_ENGINE_BUSY_MASK 0x0FFFF0000
+
+#define ATOM_S8_I2C_CHANNEL_BUSY_SHIFT 0
+#define ATOM_S8_I2C_ENGINE_BUSY_SHIFT 16
+
+/* BIOS_9_SCRATCH Definition */
+#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_MASK
+#define ATOM_S9_I2C_CHANNEL_COMPLETED_MASK 0x0000FFFF
+#endif
+#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_MASK
+#define ATOM_S9_I2C_CHANNEL_ABORTED_MASK 0xFFFF0000
+#endif
+#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT
+#define ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT 0
+#endif
+#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT
+#define ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT 16
+#endif
+
+#define ATOM_FLAG_SET 0x20
+#define ATOM_FLAG_CLEAR 0
+#define CLEAR_ATOM_S6_ACC_MODE \
+ ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+ ATOM_S6_ACC_MODE_SHIFT | ATOM_FLAG_CLEAR)
+#define SET_ATOM_S6_DEVICE_CHANGE \
+ ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+ ATOM_S6_DEVICE_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_VRI_BRIGHTNESS_CHANGE \
+ ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+ ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_SCALER_CHANGE \
+ ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+ ATOM_S6_SCALER_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_LID_CHANGE \
+ ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+ ATOM_S6_LID_CHANGE_SHIFT | ATOM_FLAG_SET)
+
+#define SET_ATOM_S6_LID_STATE \
+ ((ATOM_ACC_CHANGE_INFO_DEF << 8) |\
+ ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_LID_STATE \
+ ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+ ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_CLEAR)
+
+#define SET_ATOM_S6_DOCK_CHANGE \
+ ((ATOM_ACC_CHANGE_INFO_DEF << 8)| \
+ ATOM_S6_DOCKING_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_DOCK_STATE \
+ ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+ ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_DOCK_STATE \
+ ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+ ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_CLEAR)
+
+#define SET_ATOM_S6_THERMAL_STATE_CHANGE \
+ ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+ ATOM_S6_THERMAL_STATE_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_SYSTEM_POWER_MODE_CHANGE \
+ ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+ ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_INTERRUPT_SET_BY_BIOS \
+ ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+ ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT | ATOM_FLAG_SET)
+
+#define SET_ATOM_S6_CRITICAL_STATE \
+ ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+ ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_CRITICAL_STATE \
+ ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+ ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_CLEAR)
+
+#define SET_ATOM_S6_REQ_SCALER \
+ ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+ ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_REQ_SCALER \
+ ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+ ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_CLEAR )
+
+#define SET_ATOM_S6_REQ_SCALER_ARATIO \
+ ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+ ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_SET )
+#define CLEAR_ATOM_S6_REQ_SCALER_ARATIO \
+ ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+ ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_CLEAR )
+
+#define SET_ATOM_S6_I2C_STATE_CHANGE \
+ ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+ ATOM_S6_I2C_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
+
+#define SET_ATOM_S6_DISPLAY_STATE_CHANGE \
+ ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+ ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
+
+#define SET_ATOM_S6_DEVICE_RECONFIG \
+ ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
+ ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S0_LCD1 \
+ ((ATOM_DEVICE_CONNECT_INFO_DEF << 8 ) | \
+ ATOM_S0_LCD1_SHIFT | ATOM_FLAG_CLEAR )
+#define SET_ATOM_S7_DOS_8BIT_DAC_EN \
+ ((ATOM_DOS_MODE_INFO_DEF << 8) | \
+ ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_SET )
+#define CLEAR_ATOM_S7_DOS_8BIT_DAC_EN \
+ ((ATOM_DOS_MODE_INFO_DEF << 8) | \
+ ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_CLEAR )
+
+/****************************************************************************/
+/* Portion II: Definitinos only used in Driver */
+/****************************************************************************/
+
+/* Macros used by driver */
+
+#define GetIndexIntoMasterTable(MasterOrData, FieldName) (((char *)(&((ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES *)0)->FieldName)-(char *)0)/sizeof(USHORT))
+
+#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableFormatRevision)&0x3F)
+#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableContentRevision)&0x3F)
+
+#define GET_DATA_TABLE_MAJOR_REVISION GET_COMMAND_TABLE_COMMANDSET_REVISION
+#define GET_DATA_TABLE_MINOR_REVISION GET_COMMAND_TABLE_PARAMETER_REVISION
+
+/****************************************************************************/
+/* Portion III: Definitinos only used in VBIOS */
+/****************************************************************************/
+#define ATOM_DAC_SRC 0x80
+#define ATOM_SRC_DAC1 0
+#define ATOM_SRC_DAC2 0x80
+
+#ifdef UEFI_BUILD
+#define USHORT UTEMP
+#endif
+
+typedef struct _MEMORY_PLLINIT_PARAMETERS {
+ ULONG ulTargetMemoryClock; /* In 10Khz unit */
+ UCHAR ucAction; /* not define yet */
+ UCHAR ucFbDiv_Hi; /* Fbdiv Hi byte */
+ UCHAR ucFbDiv; /* FB value */
+ UCHAR ucPostDiv; /* Post div */
+} MEMORY_PLLINIT_PARAMETERS;
+
+#define MEMORY_PLLINIT_PS_ALLOCATION MEMORY_PLLINIT_PARAMETERS
+
+#define GPIO_PIN_WRITE 0x01
+#define GPIO_PIN_READ 0x00
+
+typedef struct _GPIO_PIN_CONTROL_PARAMETERS {
+ UCHAR ucGPIO_ID; /* return value, read from GPIO pins */
+ UCHAR ucGPIOBitShift; /* define which bit in uGPIOBitVal need to be update */
+ UCHAR ucGPIOBitVal; /* Set/Reset corresponding bit defined in ucGPIOBitMask */
+ UCHAR ucAction; /* =GPIO_PIN_WRITE: Read; =GPIO_PIN_READ: Write */
+} GPIO_PIN_CONTROL_PARAMETERS;
+
+typedef struct _ENABLE_SCALER_PARAMETERS {
+ UCHAR ucScaler; /* ATOM_SCALER1, ATOM_SCALER2 */
+ UCHAR ucEnable; /* ATOM_SCALER_DISABLE or ATOM_SCALER_CENTER or ATOM_SCALER_EXPANSION */
+ UCHAR ucTVStandard; /* */
+ UCHAR ucPadding[1];
+} ENABLE_SCALER_PARAMETERS;
+#define ENABLE_SCALER_PS_ALLOCATION ENABLE_SCALER_PARAMETERS
+
+/* ucEnable: */
+#define SCALER_BYPASS_AUTO_CENTER_NO_REPLICATION 0
+#define SCALER_BYPASS_AUTO_CENTER_AUTO_REPLICATION 1
+#define SCALER_ENABLE_2TAP_ALPHA_MODE 2
+#define SCALER_ENABLE_MULTITAP_MODE 3
+
+typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS {
+ ULONG usHWIconHorzVertPosn; /* Hardware Icon Vertical position */
+ UCHAR ucHWIconVertOffset; /* Hardware Icon Vertical offset */
+ UCHAR ucHWIconHorzOffset; /* Hardware Icon Horizontal offset */
+ UCHAR ucSelection; /* ATOM_CURSOR1 or ATOM_ICON1 or ATOM_CURSOR2 or ATOM_ICON2 */
+ UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
+} ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS;
+
+typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION {
+ ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS sEnableIcon;
+ ENABLE_CRTC_PARAMETERS sReserved;
+} ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS {
+ USHORT usHight; /* Image Hight */
+ USHORT usWidth; /* Image Width */
+ UCHAR ucSurface; /* Surface 1 or 2 */
+ UCHAR ucPadding[3];
+} ENABLE_GRAPH_SURFACE_PARAMETERS;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2 {
+ USHORT usHight; /* Image Hight */
+ USHORT usWidth; /* Image Width */
+ UCHAR ucSurface; /* Surface 1 or 2 */
+ UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
+ UCHAR ucPadding[2];
+} ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION {
+ ENABLE_GRAPH_SURFACE_PARAMETERS sSetSurface;
+ ENABLE_YUV_PS_ALLOCATION sReserved; /* Don't set this one */
+} ENABLE_GRAPH_SURFACE_PS_ALLOCATION;
+
+typedef struct _MEMORY_CLEAN_UP_PARAMETERS {
+ USHORT usMemoryStart; /* in 8Kb boundry, offset from memory base address */
+ USHORT usMemorySize; /* 8Kb blocks aligned */
+} MEMORY_CLEAN_UP_PARAMETERS;
+#define MEMORY_CLEAN_UP_PS_ALLOCATION MEMORY_CLEAN_UP_PARAMETERS
+
+typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS {
+ USHORT usX_Size; /* When use as input parameter, usX_Size indicates which CRTC */
+ USHORT usY_Size;
+} GET_DISPLAY_SURFACE_SIZE_PARAMETERS;
+
+typedef struct _INDIRECT_IO_ACCESS {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR IOAccessSequence[256];
+} INDIRECT_IO_ACCESS;
+
+#define INDIRECT_READ 0x00
+#define INDIRECT_WRITE 0x80
+
+#define INDIRECT_IO_MM 0
+#define INDIRECT_IO_PLL 1
+#define INDIRECT_IO_MC 2
+#define INDIRECT_IO_PCIE 3
+#define INDIRECT_IO_PCIEP 4
+#define INDIRECT_IO_NBMISC 5
+
+#define INDIRECT_IO_PLL_READ INDIRECT_IO_PLL | INDIRECT_READ
+#define INDIRECT_IO_PLL_WRITE INDIRECT_IO_PLL | INDIRECT_WRITE
+#define INDIRECT_IO_MC_READ INDIRECT_IO_MC | INDIRECT_READ
+#define INDIRECT_IO_MC_WRITE INDIRECT_IO_MC | INDIRECT_WRITE
+#define INDIRECT_IO_PCIE_READ INDIRECT_IO_PCIE | INDIRECT_READ
+#define INDIRECT_IO_PCIE_WRITE INDIRECT_IO_PCIE | INDIRECT_WRITE
+#define INDIRECT_IO_PCIEP_READ INDIRECT_IO_PCIEP | INDIRECT_READ
+#define INDIRECT_IO_PCIEP_WRITE INDIRECT_IO_PCIEP | INDIRECT_WRITE
+#define INDIRECT_IO_NBMISC_READ INDIRECT_IO_NBMISC | INDIRECT_READ
+#define INDIRECT_IO_NBMISC_WRITE INDIRECT_IO_NBMISC | INDIRECT_WRITE
+
+typedef struct _ATOM_OEM_INFO {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
+} ATOM_OEM_INFO;
+
+typedef struct _ATOM_TV_MODE {
+ UCHAR ucVMode_Num; /* Video mode number */
+ UCHAR ucTV_Mode_Num; /* Internal TV mode number */
+} ATOM_TV_MODE;
+
+typedef struct _ATOM_BIOS_INT_TVSTD_MODE {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usTV_Mode_LUT_Offset; /* Pointer to standard to internal number conversion table */
+ USHORT usTV_FIFO_Offset; /* Pointer to FIFO entry table */
+ USHORT usNTSC_Tbl_Offset; /* Pointer to SDTV_Mode_NTSC table */
+ USHORT usPAL_Tbl_Offset; /* Pointer to SDTV_Mode_PAL table */
+ USHORT usCV_Tbl_Offset; /* Pointer to SDTV_Mode_PAL table */
+} ATOM_BIOS_INT_TVSTD_MODE;
+
+typedef struct _ATOM_TV_MODE_SCALER_PTR {
+ USHORT ucFilter0_Offset; /* Pointer to filter format 0 coefficients */
+ USHORT usFilter1_Offset; /* Pointer to filter format 0 coefficients */
+ UCHAR ucTV_Mode_Num;
+} ATOM_TV_MODE_SCALER_PTR;
+
+typedef struct _ATOM_STANDARD_VESA_TIMING {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_DTD_FORMAT aModeTimings[16]; /* 16 is not the real array number, just for initial allocation */
+} ATOM_STANDARD_VESA_TIMING;
+
+typedef struct _ATOM_STD_FORMAT {
+ USHORT usSTD_HDisp;
+ USHORT usSTD_VDisp;
+ USHORT usSTD_RefreshRate;
+ USHORT usReserved;
+} ATOM_STD_FORMAT;
+
+typedef struct _ATOM_VESA_TO_EXTENDED_MODE {
+ USHORT usVESA_ModeNumber;
+ USHORT usExtendedModeNumber;
+} ATOM_VESA_TO_EXTENDED_MODE;
+
+typedef struct _ATOM_VESA_TO_INTENAL_MODE_LUT {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_VESA_TO_EXTENDED_MODE asVESA_ToExtendedModeInfo[76];
+} ATOM_VESA_TO_INTENAL_MODE_LUT;
+
+/*************** ATOM Memory Related Data Structure ***********************/
+typedef struct _ATOM_MEMORY_VENDOR_BLOCK {
+ UCHAR ucMemoryType;
+ UCHAR ucMemoryVendor;
+ UCHAR ucAdjMCId;
+ UCHAR ucDynClkId;
+ ULONG ulDllResetClkRange;
+} ATOM_MEMORY_VENDOR_BLOCK;
+
+typedef struct _ATOM_MEMORY_SETTING_ID_CONFIG {
+#if ATOM_BIG_ENDIAN
+ ULONG ucMemBlkId:8;
+ ULONG ulMemClockRange:24;
+#else
+ ULONG ulMemClockRange:24;
+ ULONG ucMemBlkId:8;
+#endif
+} ATOM_MEMORY_SETTING_ID_CONFIG;
+
+typedef union _ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS {
+ ATOM_MEMORY_SETTING_ID_CONFIG slAccess;
+ ULONG ulAccess;
+} ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS;
+
+typedef struct _ATOM_MEMORY_SETTING_DATA_BLOCK {
+ ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS ulMemoryID;
+ ULONG aulMemData[1];
+} ATOM_MEMORY_SETTING_DATA_BLOCK;
+
+typedef struct _ATOM_INIT_REG_INDEX_FORMAT {
+ USHORT usRegIndex; /* MC register index */
+ UCHAR ucPreRegDataLength; /* offset in ATOM_INIT_REG_DATA_BLOCK.saRegDataBuf */
+} ATOM_INIT_REG_INDEX_FORMAT;
+
+typedef struct _ATOM_INIT_REG_BLOCK {
+ USHORT usRegIndexTblSize; /* size of asRegIndexBuf */
+ USHORT usRegDataBlkSize; /* size of ATOM_MEMORY_SETTING_DATA_BLOCK */
+ ATOM_INIT_REG_INDEX_FORMAT asRegIndexBuf[1];
+ ATOM_MEMORY_SETTING_DATA_BLOCK asRegDataBuf[1];
+} ATOM_INIT_REG_BLOCK;
+
+#define END_OF_REG_INDEX_BLOCK 0x0ffff
+#define END_OF_REG_DATA_BLOCK 0x00000000
+#define ATOM_INIT_REG_MASK_FLAG 0x80
+#define CLOCK_RANGE_HIGHEST 0x00ffffff
+
+#define VALUE_DWORD SIZEOF ULONG
+#define VALUE_SAME_AS_ABOVE 0
+#define VALUE_MASK_DWORD 0x84
+
+#define INDEX_ACCESS_RANGE_BEGIN (VALUE_DWORD + 1)
+#define INDEX_ACCESS_RANGE_END (INDEX_ACCESS_RANGE_BEGIN + 1)
+#define VALUE_INDEX_ACCESS_SINGLE (INDEX_ACCESS_RANGE_END + 1)
+
+typedef struct _ATOM_MC_INIT_PARAM_TABLE {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usAdjustARB_SEQDataOffset;
+ USHORT usMCInitMemTypeTblOffset;
+ USHORT usMCInitCommonTblOffset;
+ USHORT usMCInitPowerDownTblOffset;
+ ULONG ulARB_SEQDataBuf[32];
+ ATOM_INIT_REG_BLOCK asMCInitMemType;
+ ATOM_INIT_REG_BLOCK asMCInitCommon;
+} ATOM_MC_INIT_PARAM_TABLE;
+
+#define _4Mx16 0x2
+#define _4Mx32 0x3
+#define _8Mx16 0x12
+#define _8Mx32 0x13
+#define _16Mx16 0x22
+#define _16Mx32 0x23
+#define _32Mx16 0x32
+#define _32Mx32 0x33
+#define _64Mx8 0x41
+#define _64Mx16 0x42
+
+#define SAMSUNG 0x1
+#define INFINEON 0x2
+#define ELPIDA 0x3
+#define ETRON 0x4
+#define NANYA 0x5
+#define HYNIX 0x6
+#define MOSEL 0x7
+#define WINBOND 0x8
+#define ESMT 0x9
+#define MICRON 0xF
+
+#define QIMONDA INFINEON
+#define PROMOS MOSEL
+
+/* ///////////Support for GDDR5 MC uCode to reside in upper 64K of ROM///////////// */
+
+#define UCODE_ROM_START_ADDRESS 0x1c000
+#define UCODE_SIGNATURE 0x4375434d /* 'MCuC' - MC uCode */
+
+/* uCode block header for reference */
+
+typedef struct _MCuCodeHeader {
+ ULONG ulSignature;
+ UCHAR ucRevision;
+ UCHAR ucChecksum;
+ UCHAR ucReserved1;
+ UCHAR ucReserved2;
+ USHORT usParametersLength;
+ USHORT usUCodeLength;
+ USHORT usReserved1;
+ USHORT usReserved2;
+} MCuCodeHeader;
+
+/* //////////////////////////////////////////////////////////////////////////////// */
+
+#define ATOM_MAX_NUMBER_OF_VRAM_MODULE 16
+
+#define ATOM_VRAM_MODULE_MEMORY_VENDOR_ID_MASK 0xF
+typedef struct _ATOM_VRAM_MODULE_V1 {
+ ULONG ulReserved;
+ USHORT usEMRSValue;
+ USHORT usMRSValue;
+ USHORT usReserved;
+ UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
+ UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] reserved; */
+ UCHAR ucMemoryVenderID; /* Predefined,never change across designs or memory type/vender */
+ UCHAR ucMemoryDeviceCfg; /* [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32... */
+ UCHAR ucRow; /* Number of Row,in power of 2; */
+ UCHAR ucColumn; /* Number of Column,in power of 2; */
+ UCHAR ucBank; /* Nunber of Bank; */
+ UCHAR ucRank; /* Number of Rank, in power of 2 */
+ UCHAR ucChannelNum; /* Number of channel; */
+ UCHAR ucChannelConfig; /* [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2 */
+ UCHAR ucDefaultMVDDQ_ID; /* Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data; */
+ UCHAR ucDefaultMVDDC_ID; /* Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data; */
+ UCHAR ucReserved[2];
+} ATOM_VRAM_MODULE_V1;
+
+typedef struct _ATOM_VRAM_MODULE_V2 {
+ ULONG ulReserved;
+ ULONG ulFlags; /* To enable/disable functionalities based on memory type */
+ ULONG ulEngineClock; /* Override of default engine clock for particular memory type */
+ ULONG ulMemoryClock; /* Override of default memory clock for particular memory type */
+ USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */
+ USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */
+ USHORT usEMRSValue;
+ USHORT usMRSValue;
+ USHORT usReserved;
+ UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
+ UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now; */
+ UCHAR ucMemoryVenderID; /* Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed */
+ UCHAR ucMemoryDeviceCfg; /* [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32... */
+ UCHAR ucRow; /* Number of Row,in power of 2; */
+ UCHAR ucColumn; /* Number of Column,in power of 2; */
+ UCHAR ucBank; /* Nunber of Bank; */
+ UCHAR ucRank; /* Number of Rank, in power of 2 */
+ UCHAR ucChannelNum; /* Number of channel; */
+ UCHAR ucChannelConfig; /* [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2 */
+ UCHAR ucDefaultMVDDQ_ID; /* Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data; */
+ UCHAR ucDefaultMVDDC_ID; /* Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data; */
+ UCHAR ucRefreshRateFactor;
+ UCHAR ucReserved[3];
+} ATOM_VRAM_MODULE_V2;
+
+typedef struct _ATOM_MEMORY_TIMING_FORMAT {
+ ULONG ulClkRange; /* memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing */
+ union {
+ USHORT usMRS; /* mode register */
+ USHORT usDDR3_MR0;
+ };
+ union {
+ USHORT usEMRS; /* extended mode register */
+ USHORT usDDR3_MR1;
+ };
+ UCHAR ucCL; /* CAS latency */
+ UCHAR ucWL; /* WRITE Latency */
+ UCHAR uctRAS; /* tRAS */
+ UCHAR uctRC; /* tRC */
+ UCHAR uctRFC; /* tRFC */
+ UCHAR uctRCDR; /* tRCDR */
+ UCHAR uctRCDW; /* tRCDW */
+ UCHAR uctRP; /* tRP */
+ UCHAR uctRRD; /* tRRD */
+ UCHAR uctWR; /* tWR */
+ UCHAR uctWTR; /* tWTR */
+ UCHAR uctPDIX; /* tPDIX */
+ UCHAR uctFAW; /* tFAW */
+ UCHAR uctAOND; /* tAOND */
+ union {
+ struct {
+ UCHAR ucflag; /* flag to control memory timing calculation. bit0= control EMRS2 Infineon */
+ UCHAR ucReserved;
+ };
+ USHORT usDDR3_MR2;
+ };
+} ATOM_MEMORY_TIMING_FORMAT;
+
+typedef struct _ATOM_MEMORY_TIMING_FORMAT_V1 {
+ ULONG ulClkRange; /* memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing */
+ USHORT usMRS; /* mode register */
+ USHORT usEMRS; /* extended mode register */
+ UCHAR ucCL; /* CAS latency */
+ UCHAR ucWL; /* WRITE Latency */
+ UCHAR uctRAS; /* tRAS */
+ UCHAR uctRC; /* tRC */
+ UCHAR uctRFC; /* tRFC */
+ UCHAR uctRCDR; /* tRCDR */
+ UCHAR uctRCDW; /* tRCDW */
+ UCHAR uctRP; /* tRP */
+ UCHAR uctRRD; /* tRRD */
+ UCHAR uctWR; /* tWR */
+ UCHAR uctWTR; /* tWTR */
+ UCHAR uctPDIX; /* tPDIX */
+ UCHAR uctFAW; /* tFAW */
+ UCHAR uctAOND; /* tAOND */
+ UCHAR ucflag; /* flag to control memory timing calculation. bit0= control EMRS2 Infineon */
+/* ///////////////////////GDDR parameters/////////////////////////////////// */
+ UCHAR uctCCDL; /* */
+ UCHAR uctCRCRL; /* */
+ UCHAR uctCRCWL; /* */
+ UCHAR uctCKE; /* */
+ UCHAR uctCKRSE; /* */
+ UCHAR uctCKRSX; /* */
+ UCHAR uctFAW32; /* */
+ UCHAR ucReserved1; /* */
+ UCHAR ucReserved2; /* */
+ UCHAR ucTerminator;
+} ATOM_MEMORY_TIMING_FORMAT_V1;
+
+typedef struct _ATOM_MEMORY_FORMAT {
+ ULONG ulDllDisClock; /* memory DLL will be disable when target memory clock is below this clock */
+ union {
+ USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */
+ USHORT usDDR3_Reserved; /* Not used for DDR3 memory */
+ };
+ union {
+ USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */
+ USHORT usDDR3_MR3; /* Used for DDR3 memory */
+ };
+ UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now; */
+ UCHAR ucMemoryVenderID; /* Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed */
+ UCHAR ucRow; /* Number of Row,in power of 2; */
+ UCHAR ucColumn; /* Number of Column,in power of 2; */
+ UCHAR ucBank; /* Nunber of Bank; */
+ UCHAR ucRank; /* Number of Rank, in power of 2 */
+ UCHAR ucBurstSize; /* burst size, 0= burst size=4 1= burst size=8 */
+ UCHAR ucDllDisBit; /* position of DLL Enable/Disable bit in EMRS ( Extended Mode Register ) */
+ UCHAR ucRefreshRateFactor; /* memory refresh rate in unit of ms */
+ UCHAR ucDensity; /* _8Mx32, _16Mx32, _16Mx16, _32Mx16 */
+ UCHAR ucPreamble; /* [7:4] Write Preamble, [3:0] Read Preamble */
+ UCHAR ucMemAttrib; /* Memory Device Addribute, like RDBI/WDBI etc */
+ ATOM_MEMORY_TIMING_FORMAT asMemTiming[5]; /* Memory Timing block sort from lower clock to higher clock */
+} ATOM_MEMORY_FORMAT;
+
+typedef struct _ATOM_VRAM_MODULE_V3 {
+ ULONG ulChannelMapCfg; /* board dependent paramenter:Channel combination */
+ USHORT usSize; /* size of ATOM_VRAM_MODULE_V3 */
+ USHORT usDefaultMVDDQ; /* board dependent parameter:Default Memory Core Voltage */
+ USHORT usDefaultMVDDC; /* board dependent parameter:Default Memory IO Voltage */
+ UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
+ UCHAR ucChannelNum; /* board dependent parameter:Number of channel; */
+ UCHAR ucChannelSize; /* board dependent parameter:32bit or 64bit */
+ UCHAR ucVREFI; /* board dependnt parameter: EXT or INT +160mv to -140mv */
+ UCHAR ucNPL_RT; /* board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */
+ UCHAR ucFlag; /* To enable/disable functionalities based on memory type */
+ ATOM_MEMORY_FORMAT asMemory; /* describ all of video memory parameters from memory spec */
+} ATOM_VRAM_MODULE_V3;
+
+/* ATOM_VRAM_MODULE_V3.ucNPL_RT */
+#define NPL_RT_MASK 0x0f
+#define BATTERY_ODT_MASK 0xc0
+
+#define ATOM_VRAM_MODULE ATOM_VRAM_MODULE_V3
+
+typedef struct _ATOM_VRAM_MODULE_V4 {
+ ULONG ulChannelMapCfg; /* board dependent parameter: Channel combination */
+ USHORT usModuleSize; /* size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE */
+ USHORT usPrivateReserved; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */
+ /* MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) */
+ USHORT usReserved;
+ UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
+ UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; */
+ UCHAR ucChannelNum; /* Number of channels present in this module config */
+ UCHAR ucChannelWidth; /* 0 - 32 bits; 1 - 64 bits */
+ UCHAR ucDensity; /* _8Mx32, _16Mx32, _16Mx16, _32Mx16 */
+ UCHAR ucFlag; /* To enable/disable functionalities based on memory type */
+ UCHAR ucMisc; /* bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 */
+ UCHAR ucVREFI; /* board dependent parameter */
+ UCHAR ucNPL_RT; /* board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */
+ UCHAR ucPreamble; /* [7:4] Write Preamble, [3:0] Read Preamble */
+ UCHAR ucMemorySize; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */
+ /* Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros */
+ UCHAR ucReserved[3];
+
+/* compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level */
+ union {
+ USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */
+ USHORT usDDR3_Reserved;
+ };
+ union {
+ USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */
+ USHORT usDDR3_MR3; /* Used for DDR3 memory */
+ };
+ UCHAR ucMemoryVenderID; /* Predefined, If not predefined, vendor detection table gets executed */
+ UCHAR ucRefreshRateFactor; /* [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) */
+ UCHAR ucReserved2[2];
+ ATOM_MEMORY_TIMING_FORMAT asMemTiming[5]; /* Memory Timing block sort from lower clock to higher clock */
+} ATOM_VRAM_MODULE_V4;
+
+#define VRAM_MODULE_V4_MISC_RANK_MASK 0x3
+#define VRAM_MODULE_V4_MISC_DUAL_RANK 0x1
+#define VRAM_MODULE_V4_MISC_BL_MASK 0x4
+#define VRAM_MODULE_V4_MISC_BL8 0x4
+#define VRAM_MODULE_V4_MISC_DUAL_CS 0x10
+
+typedef struct _ATOM_VRAM_MODULE_V5 {
+ ULONG ulChannelMapCfg; /* board dependent parameter: Channel combination */
+ USHORT usModuleSize; /* size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE */
+ USHORT usPrivateReserved; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */
+ /* MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) */
+ USHORT usReserved;
+ UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
+ UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; */
+ UCHAR ucChannelNum; /* Number of channels present in this module config */
+ UCHAR ucChannelWidth; /* 0 - 32 bits; 1 - 64 bits */
+ UCHAR ucDensity; /* _8Mx32, _16Mx32, _16Mx16, _32Mx16 */
+ UCHAR ucFlag; /* To enable/disable functionalities based on memory type */
+ UCHAR ucMisc; /* bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 */
+ UCHAR ucVREFI; /* board dependent parameter */
+ UCHAR ucNPL_RT; /* board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */
+ UCHAR ucPreamble; /* [7:4] Write Preamble, [3:0] Read Preamble */
+ UCHAR ucMemorySize; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */
+ /* Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros */
+ UCHAR ucReserved[3];
+
+/* compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level */
+ USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */
+ USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */
+ UCHAR ucMemoryVenderID; /* Predefined, If not predefined, vendor detection table gets executed */
+ UCHAR ucRefreshRateFactor; /* [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) */
+ UCHAR ucFIFODepth; /* FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth */
+ UCHAR ucCDR_Bandwidth; /* [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth */
+ ATOM_MEMORY_TIMING_FORMAT_V1 asMemTiming[5]; /* Memory Timing block sort from lower clock to higher clock */
+} ATOM_VRAM_MODULE_V5;
+
+typedef struct _ATOM_VRAM_INFO_V2 {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucNumOfVRAMModule;
+ ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; /* just for allocation, real number of blocks is in ucNumOfVRAMModule; */
+} ATOM_VRAM_INFO_V2;
+
+typedef struct _ATOM_VRAM_INFO_V3 {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usMemAdjustTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting */
+ USHORT usMemClkPatchTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting */
+ USHORT usRerseved;
+ UCHAR aVID_PinsShift[9]; /* 8 bit strap maximum+terminator */
+ UCHAR ucNumOfVRAMModule;
+ ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; /* just for allocation, real number of blocks is in ucNumOfVRAMModule; */
+ ATOM_INIT_REG_BLOCK asMemPatch; /* for allocation */
+ /* ATOM_INIT_REG_BLOCK aMemAdjust; */
+} ATOM_VRAM_INFO_V3;
+
+#define ATOM_VRAM_INFO_LAST ATOM_VRAM_INFO_V3
+
+typedef struct _ATOM_VRAM_INFO_V4 {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usMemAdjustTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting */
+ USHORT usMemClkPatchTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting */
+ USHORT usRerseved;
+ UCHAR ucMemDQ7_0ByteRemap; /* DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3 */
+ ULONG ulMemDQ7_0BitRemap; /* each DQ line ( 7~0) use 3bits, like: DQ0=Bit[2:0], DQ1:[5:3], ... DQ7:[23:21] */
+ UCHAR ucReservde[4];
+ UCHAR ucNumOfVRAMModule;
+ ATOM_VRAM_MODULE_V4 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; /* just for allocation, real number of blocks is in ucNumOfVRAMModule; */
+ ATOM_INIT_REG_BLOCK asMemPatch; /* for allocation */
+ /* ATOM_INIT_REG_BLOCK aMemAdjust; */
+} ATOM_VRAM_INFO_V4;
+
+typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR aVID_PinsShift[9]; /* 8 bit strap maximum+terminator */
+} ATOM_VRAM_GPIO_DETECTION_INFO;
+
+typedef struct _ATOM_MEMORY_TRAINING_INFO {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucTrainingLoop;
+ UCHAR ucReserved[3];
+ ATOM_INIT_REG_BLOCK asMemTrainingSetting;
+} ATOM_MEMORY_TRAINING_INFO;
+
+typedef struct SW_I2C_CNTL_DATA_PARAMETERS {
+ UCHAR ucControl;
+ UCHAR ucData;
+ UCHAR ucSatus;
+ UCHAR ucTemp;
+} SW_I2C_CNTL_DATA_PARAMETERS;
+
+#define SW_I2C_CNTL_DATA_PS_ALLOCATION SW_I2C_CNTL_DATA_PARAMETERS
+
+typedef struct _SW_I2C_IO_DATA_PARAMETERS {
+ USHORT GPIO_Info;
+ UCHAR ucAct;
+ UCHAR ucData;
+} SW_I2C_IO_DATA_PARAMETERS;
+
+#define SW_I2C_IO_DATA_PS_ALLOCATION SW_I2C_IO_DATA_PARAMETERS
+
+/****************************SW I2C CNTL DEFINITIONS**********************/
+#define SW_I2C_IO_RESET 0
+#define SW_I2C_IO_GET 1
+#define SW_I2C_IO_DRIVE 2
+#define SW_I2C_IO_SET 3
+#define SW_I2C_IO_START 4
+
+#define SW_I2C_IO_CLOCK 0
+#define SW_I2C_IO_DATA 0x80
+
+#define SW_I2C_IO_ZERO 0
+#define SW_I2C_IO_ONE 0x100
+
+#define SW_I2C_CNTL_READ 0
+#define SW_I2C_CNTL_WRITE 1
+#define SW_I2C_CNTL_START 2
+#define SW_I2C_CNTL_STOP 3
+#define SW_I2C_CNTL_OPEN 4
+#define SW_I2C_CNTL_CLOSE 5
+#define SW_I2C_CNTL_WRITE1BIT 6
+
+/* ==============================VESA definition Portion=============================== */
+#define VESA_OEM_PRODUCT_REV '01.00'
+#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB /* refer to VBE spec p.32, no TTY support */
+#define VESA_MODE_WIN_ATTRIBUTE 7
+#define VESA_WIN_SIZE 64
+
+typedef struct _PTR_32_BIT_STRUCTURE {
+ USHORT Offset16;
+ USHORT Segment16;
+} PTR_32_BIT_STRUCTURE;
+
+typedef union _PTR_32_BIT_UNION {
+ PTR_32_BIT_STRUCTURE SegmentOffset;
+ ULONG Ptr32_Bit;
+} PTR_32_BIT_UNION;
+
+typedef struct _VBE_1_2_INFO_BLOCK_UPDATABLE {
+ UCHAR VbeSignature[4];
+ USHORT VbeVersion;
+ PTR_32_BIT_UNION OemStringPtr;
+ UCHAR Capabilities[4];
+ PTR_32_BIT_UNION VideoModePtr;
+ USHORT TotalMemory;
+} VBE_1_2_INFO_BLOCK_UPDATABLE;
+
+typedef struct _VBE_2_0_INFO_BLOCK_UPDATABLE {
+ VBE_1_2_INFO_BLOCK_UPDATABLE CommonBlock;
+ USHORT OemSoftRev;
+ PTR_32_BIT_UNION OemVendorNamePtr;
+ PTR_32_BIT_UNION OemProductNamePtr;
+ PTR_32_BIT_UNION OemProductRevPtr;
+} VBE_2_0_INFO_BLOCK_UPDATABLE;
+
+typedef union _VBE_VERSION_UNION {
+ VBE_2_0_INFO_BLOCK_UPDATABLE VBE_2_0_InfoBlock;
+ VBE_1_2_INFO_BLOCK_UPDATABLE VBE_1_2_InfoBlock;
+} VBE_VERSION_UNION;
+
+typedef struct _VBE_INFO_BLOCK {
+ VBE_VERSION_UNION UpdatableVBE_Info;
+ UCHAR Reserved[222];
+ UCHAR OemData[256];
+} VBE_INFO_BLOCK;
+
+typedef struct _VBE_FP_INFO {
+ USHORT HSize;
+ USHORT VSize;
+ USHORT FPType;
+ UCHAR RedBPP;
+ UCHAR GreenBPP;
+ UCHAR BlueBPP;
+ UCHAR ReservedBPP;
+ ULONG RsvdOffScrnMemSize;
+ ULONG RsvdOffScrnMEmPtr;
+ UCHAR Reserved[14];
+} VBE_FP_INFO;
+
+typedef struct _VESA_MODE_INFO_BLOCK {
+/* Mandatory information for all VBE revisions */
+ USHORT ModeAttributes; /* dw ? ; mode attributes */
+ UCHAR WinAAttributes; /* db ? ; window A attributes */
+ UCHAR WinBAttributes; /* db ? ; window B attributes */
+ USHORT WinGranularity; /* dw ? ; window granularity */
+ USHORT WinSize; /* dw ? ; window size */
+ USHORT WinASegment; /* dw ? ; window A start segment */
+ USHORT WinBSegment; /* dw ? ; window B start segment */
+ ULONG WinFuncPtr; /* dd ? ; real mode pointer to window function */
+ USHORT BytesPerScanLine; /* dw ? ; bytes per scan line */
+
+/* ; Mandatory information for VBE 1.2 and above */
+ USHORT XResolution; /* dw ? ; horizontal resolution in pixels or characters */
+ USHORT YResolution; /* dw ? ; vertical resolution in pixels or characters */
+ UCHAR XCharSize; /* db ? ; character cell width in pixels */
+ UCHAR YCharSize; /* db ? ; character cell height in pixels */
+ UCHAR NumberOfPlanes; /* db ? ; number of memory planes */
+ UCHAR BitsPerPixel; /* db ? ; bits per pixel */
+ UCHAR NumberOfBanks; /* db ? ; number of banks */
+ UCHAR MemoryModel; /* db ? ; memory model type */
+ UCHAR BankSize; /* db ? ; bank size in KB */
+ UCHAR NumberOfImagePages; /* db ? ; number of images */
+ UCHAR ReservedForPageFunction; /* db 1 ; reserved for page function */
+
+/* ; Direct Color fields(required for direct/6 and YUV/7 memory models) */
+ UCHAR RedMaskSize; /* db ? ; size of direct color red mask in bits */
+ UCHAR RedFieldPosition; /* db ? ; bit position of lsb of red mask */
+ UCHAR GreenMaskSize; /* db ? ; size of direct color green mask in bits */
+ UCHAR GreenFieldPosition; /* db ? ; bit position of lsb of green mask */
+ UCHAR BlueMaskSize; /* db ? ; size of direct color blue mask in bits */
+ UCHAR BlueFieldPosition; /* db ? ; bit position of lsb of blue mask */
+ UCHAR RsvdMaskSize; /* db ? ; size of direct color reserved mask in bits */
+ UCHAR RsvdFieldPosition; /* db ? ; bit position of lsb of reserved mask */
+ UCHAR DirectColorModeInfo; /* db ? ; direct color mode attributes */
+
+/* ; Mandatory information for VBE 2.0 and above */
+ ULONG PhysBasePtr; /* dd ? ; physical address for flat memory frame buffer */
+ ULONG Reserved_1; /* dd 0 ; reserved - always set to 0 */
+ USHORT Reserved_2; /* dw 0 ; reserved - always set to 0 */
+
+/* ; Mandatory information for VBE 3.0 and above */
+ USHORT LinBytesPerScanLine; /* dw ? ; bytes per scan line for linear modes */
+ UCHAR BnkNumberOfImagePages; /* db ? ; number of images for banked modes */
+ UCHAR LinNumberOfImagPages; /* db ? ; number of images for linear modes */
+ UCHAR LinRedMaskSize; /* db ? ; size of direct color red mask(linear modes) */
+ UCHAR LinRedFieldPosition; /* db ? ; bit position of lsb of red mask(linear modes) */
+ UCHAR LinGreenMaskSize; /* db ? ; size of direct color green mask(linear modes) */
+ UCHAR LinGreenFieldPosition; /* db ? ; bit position of lsb of green mask(linear modes) */
+ UCHAR LinBlueMaskSize; /* db ? ; size of direct color blue mask(linear modes) */
+ UCHAR LinBlueFieldPosition; /* db ? ; bit position of lsb of blue mask(linear modes) */
+ UCHAR LinRsvdMaskSize; /* db ? ; size of direct color reserved mask(linear modes) */
+ UCHAR LinRsvdFieldPosition; /* db ? ; bit position of lsb of reserved mask(linear modes) */
+ ULONG MaxPixelClock; /* dd ? ; maximum pixel clock(in Hz) for graphics mode */
+ UCHAR Reserved; /* db 190 dup (0) */
+} VESA_MODE_INFO_BLOCK;
+
+/* BIOS function CALLS */
+#define ATOM_BIOS_EXTENDED_FUNCTION_CODE 0xA0 /* ATI Extended Function code */
+#define ATOM_BIOS_FUNCTION_COP_MODE 0x00
+#define ATOM_BIOS_FUNCTION_SHORT_QUERY1 0x04
+#define ATOM_BIOS_FUNCTION_SHORT_QUERY2 0x05
+#define ATOM_BIOS_FUNCTION_SHORT_QUERY3 0x06
+#define ATOM_BIOS_FUNCTION_GET_DDC 0x0B
+#define ATOM_BIOS_FUNCTION_ASIC_DSTATE 0x0E
+#define ATOM_BIOS_FUNCTION_DEBUG_PLAY 0x0F
+#define ATOM_BIOS_FUNCTION_STV_STD 0x16
+#define ATOM_BIOS_FUNCTION_DEVICE_DET 0x17
+#define ATOM_BIOS_FUNCTION_DEVICE_SWITCH 0x18
+
+#define ATOM_BIOS_FUNCTION_PANEL_CONTROL 0x82
+#define ATOM_BIOS_FUNCTION_OLD_DEVICE_DET 0x83
+#define ATOM_BIOS_FUNCTION_OLD_DEVICE_SWITCH 0x84
+#define ATOM_BIOS_FUNCTION_HW_ICON 0x8A
+#define ATOM_BIOS_FUNCTION_SET_CMOS 0x8B
+#define SUB_FUNCTION_UPDATE_DISPLAY_INFO 0x8000 /* Sub function 80 */
+#define SUB_FUNCTION_UPDATE_EXPANSION_INFO 0x8100 /* Sub function 80 */
+
+#define ATOM_BIOS_FUNCTION_DISPLAY_INFO 0x8D
+#define ATOM_BIOS_FUNCTION_DEVICE_ON_OFF 0x8E
+#define ATOM_BIOS_FUNCTION_VIDEO_STATE 0x8F
+#define ATOM_SUB_FUNCTION_GET_CRITICAL_STATE 0x0300 /* Sub function 03 */
+#define ATOM_SUB_FUNCTION_GET_LIDSTATE 0x0700 /* Sub function 7 */
+#define ATOM_SUB_FUNCTION_THERMAL_STATE_NOTICE 0x1400 /* Notify caller the current thermal state */
+#define ATOM_SUB_FUNCTION_CRITICAL_STATE_NOTICE 0x8300 /* Notify caller the current critical state */
+#define ATOM_SUB_FUNCTION_SET_LIDSTATE 0x8500 /* Sub function 85 */
+#define ATOM_SUB_FUNCTION_GET_REQ_DISPLAY_FROM_SBIOS_MODE 0x8900 /* Sub function 89 */
+#define ATOM_SUB_FUNCTION_INFORM_ADC_SUPPORT 0x9400 /* Notify caller that ADC is supported */
+
+#define ATOM_BIOS_FUNCTION_VESA_DPMS 0x4F10 /* Set DPMS */
+#define ATOM_SUB_FUNCTION_SET_DPMS 0x0001 /* BL: Sub function 01 */
+#define ATOM_SUB_FUNCTION_GET_DPMS 0x0002 /* BL: Sub function 02 */
+#define ATOM_PARAMETER_VESA_DPMS_ON 0x0000 /* BH Parameter for DPMS ON. */
+#define ATOM_PARAMETER_VESA_DPMS_STANDBY 0x0100 /* BH Parameter for DPMS STANDBY */
+#define ATOM_PARAMETER_VESA_DPMS_SUSPEND 0x0200 /* BH Parameter for DPMS SUSPEND */
+#define ATOM_PARAMETER_VESA_DPMS_OFF 0x0400 /* BH Parameter for DPMS OFF */
+#define ATOM_PARAMETER_VESA_DPMS_REDUCE_ON 0x0800 /* BH Parameter for DPMS REDUCE ON (NOT SUPPORTED) */
+
+#define ATOM_BIOS_RETURN_CODE_MASK 0x0000FF00L
+#define ATOM_BIOS_REG_HIGH_MASK 0x0000FF00L
+#define ATOM_BIOS_REG_LOW_MASK 0x000000FFL
+
+/* structure used for VBIOS only */
+
+/* DispOutInfoTable */
+typedef struct _ASIC_TRANSMITTER_INFO {
+ USHORT usTransmitterObjId;
+ USHORT usSupportDevice;
+ UCHAR ucTransmitterCmdTblId;
+ UCHAR ucConfig;
+ UCHAR ucEncoderID; /* available 1st encoder ( default ) */
+ UCHAR ucOptionEncoderID; /* available 2nd encoder ( optional ) */
+ UCHAR uc2ndEncoderID;
+ UCHAR ucReserved;
+} ASIC_TRANSMITTER_INFO;
+
+typedef struct _ASIC_ENCODER_INFO {
+ UCHAR ucEncoderID;
+ UCHAR ucEncoderConfig;
+ USHORT usEncoderCmdTblId;
+} ASIC_ENCODER_INFO;
+
+typedef struct _ATOM_DISP_OUT_INFO {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT ptrTransmitterInfo;
+ USHORT ptrEncoderInfo;
+ ASIC_TRANSMITTER_INFO asTransmitterInfo[1];
+ ASIC_ENCODER_INFO asEncoderInfo[1];
+} ATOM_DISP_OUT_INFO;
+
+/* DispDevicePriorityInfo */
+typedef struct _ATOM_DISPLAY_DEVICE_PRIORITY_INFO {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT asDevicePriority[16];
+} ATOM_DISPLAY_DEVICE_PRIORITY_INFO;
+
+/* ProcessAuxChannelTransactionTable */
+typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS {
+ USHORT lpAuxRequest;
+ USHORT lpDataOut;
+ UCHAR ucChannelID;
+ union {
+ UCHAR ucReplyStatus;
+ UCHAR ucDelay;
+ };
+ UCHAR ucDataOutLen;
+ UCHAR ucReserved;
+} PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS;
+
+#define PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS
+
+/* GetSinkType */
+
+typedef struct _DP_ENCODER_SERVICE_PARAMETERS {
+ USHORT ucLinkClock;
+ union {
+ UCHAR ucConfig; /* for DP training command */
+ UCHAR ucI2cId; /* use for GET_SINK_TYPE command */
+ };
+ UCHAR ucAction;
+ UCHAR ucStatus;
+ UCHAR ucLaneNum;
+ UCHAR ucReserved[2];
+} DP_ENCODER_SERVICE_PARAMETERS;
+
+/* ucAction */
+#define ATOM_DP_ACTION_GET_SINK_TYPE 0x01
+#define ATOM_DP_ACTION_TRAINING_START 0x02
+#define ATOM_DP_ACTION_TRAINING_COMPLETE 0x03
+#define ATOM_DP_ACTION_TRAINING_PATTERN_SEL 0x04
+#define ATOM_DP_ACTION_SET_VSWING_PREEMP 0x05
+#define ATOM_DP_ACTION_GET_VSWING_PREEMP 0x06
+#define ATOM_DP_ACTION_BLANKING 0x07
+
+/* ucConfig */
+#define ATOM_DP_CONFIG_ENCODER_SEL_MASK 0x03
+#define ATOM_DP_CONFIG_DIG1_ENCODER 0x00
+#define ATOM_DP_CONFIG_DIG2_ENCODER 0x01
+#define ATOM_DP_CONFIG_EXTERNAL_ENCODER 0x02
+#define ATOM_DP_CONFIG_LINK_SEL_MASK 0x04
+#define ATOM_DP_CONFIG_LINK_A 0x00
+#define ATOM_DP_CONFIG_LINK_B 0x04
+
+#define DP_ENCODER_SERVICE_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+
+/* DP_TRAINING_TABLE */
+#define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR
+#define DPCD_SET_SS_CNTL_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 8 )
+#define DPCD_SET_LANE_VSWING_PREEMP_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 16)
+#define DPCD_SET_TRAINING_PATTERN0_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 24)
+#define DPCD_SET_TRAINING_PATTERN2_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 32)
+#define DPCD_GET_LINKRATE_LANENUM_SS_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 40)
+#define DPCD_GET_LANE_STATUS_ADJUST_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 48)
+#define DP_I2C_AUX_DDC_WRITE_START_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 60)
+#define DP_I2C_AUX_DDC_WRITE_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 64)
+#define DP_I2C_AUX_DDC_READ_START_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 72)
+#define DP_I2C_AUX_DDC_READ_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 76)
+#define DP_I2C_AUX_DDC_READ_END_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 80)
+
+typedef struct _PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS {
+ UCHAR ucI2CSpeed;
+ union {
+ UCHAR ucRegIndex;
+ UCHAR ucStatus;
+ };
+ USHORT lpI2CDataOut;
+ UCHAR ucFlag;
+ UCHAR ucTransBytes;
+ UCHAR ucSlaveAddr;
+ UCHAR ucLineNumber;
+} PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS;
+
+#define PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS
+
+/* ucFlag */
+#define HW_I2C_WRITE 1
+#define HW_I2C_READ 0
+
+/****************************************************************************/
+/* Portion VI: Definitinos being oboselete */
+/****************************************************************************/
+
+/* ========================================================================================== */
+/* Remove the definitions below when driver is ready! */
+typedef struct _ATOM_DAC_INFO {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usMaxFrequency; /* in 10kHz unit */
+ USHORT usReserved;
+} ATOM_DAC_INFO;
+
+typedef struct _COMPASSIONATE_DATA {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+
+ /* ============================== DAC1 portion */
+ UCHAR ucDAC1_BG_Adjustment;
+ UCHAR ucDAC1_DAC_Adjustment;
+ USHORT usDAC1_FORCE_Data;
+ /* ============================== DAC2 portion */
+ UCHAR ucDAC2_CRT2_BG_Adjustment;
+ UCHAR ucDAC2_CRT2_DAC_Adjustment;
+ USHORT usDAC2_CRT2_FORCE_Data;
+ USHORT usDAC2_CRT2_MUX_RegisterIndex;
+ UCHAR ucDAC2_CRT2_MUX_RegisterInfo; /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */
+ UCHAR ucDAC2_NTSC_BG_Adjustment;
+ UCHAR ucDAC2_NTSC_DAC_Adjustment;
+ USHORT usDAC2_TV1_FORCE_Data;
+ USHORT usDAC2_TV1_MUX_RegisterIndex;
+ UCHAR ucDAC2_TV1_MUX_RegisterInfo; /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */
+ UCHAR ucDAC2_CV_BG_Adjustment;
+ UCHAR ucDAC2_CV_DAC_Adjustment;
+ USHORT usDAC2_CV_FORCE_Data;
+ USHORT usDAC2_CV_MUX_RegisterIndex;
+ UCHAR ucDAC2_CV_MUX_RegisterInfo; /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */
+ UCHAR ucDAC2_PAL_BG_Adjustment;
+ UCHAR ucDAC2_PAL_DAC_Adjustment;
+ USHORT usDAC2_TV2_FORCE_Data;
+} COMPASSIONATE_DATA;
+
+/****************************Supported Device Info Table Definitions**********************/
+/* ucConnectInfo: */
+/* [7:4] - connector type */
+/* = 1 - VGA connector */
+/* = 2 - DVI-I */
+/* = 3 - DVI-D */
+/* = 4 - DVI-A */
+/* = 5 - SVIDEO */
+/* = 6 - COMPOSITE */
+/* = 7 - LVDS */
+/* = 8 - DIGITAL LINK */
+/* = 9 - SCART */
+/* = 0xA - HDMI_type A */
+/* = 0xB - HDMI_type B */
+/* = 0xE - Special case1 (DVI+DIN) */
+/* Others=TBD */
+/* [3:0] - DAC Associated */
+/* = 0 - no DAC */
+/* = 1 - DACA */
+/* = 2 - DACB */
+/* = 3 - External DAC */
+/* Others=TBD */
+/* */
+
+typedef struct _ATOM_CONNECTOR_INFO {
+#if ATOM_BIG_ENDIAN
+ UCHAR bfConnectorType:4;
+ UCHAR bfAssociatedDAC:4;
+#else
+ UCHAR bfAssociatedDAC:4;
+ UCHAR bfConnectorType:4;
+#endif
+} ATOM_CONNECTOR_INFO;
+
+typedef union _ATOM_CONNECTOR_INFO_ACCESS {
+ ATOM_CONNECTOR_INFO sbfAccess;
+ UCHAR ucAccess;
+} ATOM_CONNECTOR_INFO_ACCESS;
+
+typedef struct _ATOM_CONNECTOR_INFO_I2C {
+ ATOM_CONNECTOR_INFO_ACCESS sucConnectorInfo;
+ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
+} ATOM_CONNECTOR_INFO_I2C;
+
+typedef struct _ATOM_SUPPORTED_DEVICES_INFO {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usDeviceSupport;
+ ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO];
+} ATOM_SUPPORTED_DEVICES_INFO;
+
+#define NO_INT_SRC_MAPPED 0xFF
+
+typedef struct _ATOM_CONNECTOR_INC_SRC_BITMAP {
+ UCHAR ucIntSrcBitmap;
+} ATOM_CONNECTOR_INC_SRC_BITMAP;
+
+typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2 {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usDeviceSupport;
+ ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
+ ATOM_CONNECTOR_INC_SRC_BITMAP
+ asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
+} ATOM_SUPPORTED_DEVICES_INFO_2;
+
+typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usDeviceSupport;
+ ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE];
+ ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE];
+} ATOM_SUPPORTED_DEVICES_INFO_2d1;
+
+#define ATOM_SUPPORTED_DEVICES_INFO_LAST ATOM_SUPPORTED_DEVICES_INFO_2d1
+
+typedef struct _ATOM_MISC_CONTROL_INFO {
+ USHORT usFrequency;
+ UCHAR ucPLL_ChargePump; /* PLL charge-pump gain control */
+ UCHAR ucPLL_DutyCycle; /* PLL duty cycle control */
+ UCHAR ucPLL_VCO_Gain; /* PLL VCO gain control */
+ UCHAR ucPLL_VoltageSwing; /* PLL driver voltage swing control */
+} ATOM_MISC_CONTROL_INFO;
+
+#define ATOM_MAX_MISC_INFO 4
+
+typedef struct _ATOM_TMDS_INFO {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usMaxFrequency; /* in 10Khz */
+ ATOM_MISC_CONTROL_INFO asMiscInfo[ATOM_MAX_MISC_INFO];
+} ATOM_TMDS_INFO;
+
+typedef struct _ATOM_ENCODER_ANALOG_ATTRIBUTE {
+ UCHAR ucTVStandard; /* Same as TV standards defined above, */
+ UCHAR ucPadding[1];
+} ATOM_ENCODER_ANALOG_ATTRIBUTE;
+
+typedef struct _ATOM_ENCODER_DIGITAL_ATTRIBUTE {
+ UCHAR ucAttribute; /* Same as other digital encoder attributes defined above */
+ UCHAR ucPadding[1];
+} ATOM_ENCODER_DIGITAL_ATTRIBUTE;
+
+typedef union _ATOM_ENCODER_ATTRIBUTE {
+ ATOM_ENCODER_ANALOG_ATTRIBUTE sAlgAttrib;
+ ATOM_ENCODER_DIGITAL_ATTRIBUTE sDigAttrib;
+} ATOM_ENCODER_ATTRIBUTE;
+
+typedef struct _DVO_ENCODER_CONTROL_PARAMETERS {
+ USHORT usPixelClock;
+ USHORT usEncoderID;
+ UCHAR ucDeviceType; /* Use ATOM_DEVICE_xxx1_Index to indicate device type only. */
+ UCHAR ucAction; /* ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT */
+ ATOM_ENCODER_ATTRIBUTE usDevAttr;
+} DVO_ENCODER_CONTROL_PARAMETERS;
+
+typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION {
+ DVO_ENCODER_CONTROL_PARAMETERS sDVOEncoder;
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Caller doesn't need to init this portion */
+} DVO_ENCODER_CONTROL_PS_ALLOCATION;
+
+#define ATOM_XTMDS_ASIC_SI164_ID 1
+#define ATOM_XTMDS_ASIC_SI178_ID 2
+#define ATOM_XTMDS_ASIC_TFP513_ID 3
+#define ATOM_XTMDS_SUPPORTED_SINGLELINK 0x00000001
+#define ATOM_XTMDS_SUPPORTED_DUALLINK 0x00000002
+#define ATOM_XTMDS_MVPU_FPGA 0x00000004
+
+typedef struct _ATOM_XTMDS_INFO {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usSingleLinkMaxFrequency;
+ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; /* Point the ID on which I2C is used to control external chip */
+ UCHAR ucXtransimitterID;
+ UCHAR ucSupportedLink; /* Bit field, bit0=1, single link supported;bit1=1,dual link supported */
+ UCHAR ucSequnceAlterID; /* Even with the same external TMDS asic, it's possible that the program seqence alters */
+ /* due to design. This ID is used to alert driver that the sequence is not "standard"! */
+ UCHAR ucMasterAddress; /* Address to control Master xTMDS Chip */
+ UCHAR ucSlaveAddress; /* Address to control Slave xTMDS Chip */
+} ATOM_XTMDS_INFO;
+
+typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS {
+ UCHAR ucEnable; /* ATOM_ENABLE=On or ATOM_DISABLE=Off */
+ UCHAR ucDevice; /* ATOM_DEVICE_DFP1_INDEX.... */
+ UCHAR ucPadding[2];
+} DFP_DPMS_STATUS_CHANGE_PARAMETERS;
+
+/****************************Legacy Power Play Table Definitions **********************/
+
+/* Definitions for ulPowerPlayMiscInfo */
+#define ATOM_PM_MISCINFO_SPLIT_CLOCK 0x00000000L
+#define ATOM_PM_MISCINFO_USING_MCLK_SRC 0x00000001L
+#define ATOM_PM_MISCINFO_USING_SCLK_SRC 0x00000002L
+
+#define ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT 0x00000004L
+#define ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH 0x00000008L
+
+#define ATOM_PM_MISCINFO_LOAD_PERFORMANCE_EN 0x00000010L
+
+#define ATOM_PM_MISCINFO_ENGINE_CLOCK_CONTRL_EN 0x00000020L
+#define ATOM_PM_MISCINFO_MEMORY_CLOCK_CONTRL_EN 0x00000040L
+#define ATOM_PM_MISCINFO_PROGRAM_VOLTAGE 0x00000080L /* When this bit set, ucVoltageDropIndex is not an index for GPIO pin, but a voltage ID that SW needs program */
+
+#define ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN 0x00000100L
+#define ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN 0x00000200L
+#define ATOM_PM_MISCINFO_ASIC_SLEEP_MODE_EN 0x00000400L
+#define ATOM_PM_MISCINFO_LOAD_BALANCE_EN 0x00000800L
+#define ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE 0x00001000L
+#define ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE 0x00002000L
+#define ATOM_PM_MISCINFO_LOW_LCD_REFRESH_RATE 0x00004000L
+
+#define ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE 0x00008000L
+#define ATOM_PM_MISCINFO_OVER_CLOCK_MODE 0x00010000L
+#define ATOM_PM_MISCINFO_OVER_DRIVE_MODE 0x00020000L
+#define ATOM_PM_MISCINFO_POWER_SAVING_MODE 0x00040000L
+#define ATOM_PM_MISCINFO_THERMAL_DIODE_MODE 0x00080000L
+
+#define ATOM_PM_MISCINFO_FRAME_MODULATION_MASK 0x00300000L /* 0-FM Disable, 1-2 level FM, 2-4 level FM, 3-Reserved */
+#define ATOM_PM_MISCINFO_FRAME_MODULATION_SHIFT 20
+
+#define ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE 0x00400000L
+#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2 0x00800000L
+#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4 0x01000000L
+#define ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN 0x02000000L /* When set, Dynamic */
+#define ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN 0x04000000L /* When set, Dynamic */
+#define ATOM_PM_MISCINFO_3D_ACCELERATION_EN 0x08000000L /* When set, This mode is for acceleated 3D mode */
+
+#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_MASK 0x70000000L /* 1-Optimal Battery Life Group, 2-High Battery, 3-Balanced, 4-High Performance, 5- Optimal Performance (Default state with Default clocks) */
+#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_SHIFT 28
+#define ATOM_PM_MISCINFO_ENABLE_BACK_BIAS 0x80000000L
+
+#define ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE 0x00000001L
+#define ATOM_PM_MISCINFO2_MULTI_DISPLAY_SUPPORT 0x00000002L
+#define ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN 0x00000004L
+#define ATOM_PM_MISCINFO2_FS3D_OVERDRIVE_INFO 0x00000008L
+#define ATOM_PM_MISCINFO2_FORCEDLOWPWR_MODE 0x00000010L
+#define ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN 0x00000020L
+#define ATOM_PM_MISCINFO2_VIDEO_PLAYBACK_CAPABLE 0x00000040L /* If this bit is set in multi-pp mode, then driver will pack up one with the minior power consumption. */
+ /* If it's not set in any pp mode, driver will use its default logic to pick a pp mode in video playback */
+#define ATOM_PM_MISCINFO2_NOT_VALID_ON_DC 0x00000080L
+#define ATOM_PM_MISCINFO2_STUTTER_MODE_EN 0x00000100L
+#define ATOM_PM_MISCINFO2_UVD_SUPPORT_MODE 0x00000200L
+
+/* ucTableFormatRevision=1 */
+/* ucTableContentRevision=1 */
+typedef struct _ATOM_POWERMODE_INFO {
+ ULONG ulMiscInfo; /* The power level should be arranged in ascending order */
+ ULONG ulReserved1; /* must set to 0 */
+ ULONG ulReserved2; /* must set to 0 */
+ USHORT usEngineClock;
+ USHORT usMemoryClock;
+ UCHAR ucVoltageDropIndex; /* index to GPIO table */
+ UCHAR ucSelectedPanel_RefreshRate; /* panel refresh rate */
+ UCHAR ucMinTemperature;
+ UCHAR ucMaxTemperature;
+ UCHAR ucNumPciELanes; /* number of PCIE lanes */
+} ATOM_POWERMODE_INFO;
+
+/* ucTableFormatRevision=2 */
+/* ucTableContentRevision=1 */
+typedef struct _ATOM_POWERMODE_INFO_V2 {
+ ULONG ulMiscInfo; /* The power level should be arranged in ascending order */
+ ULONG ulMiscInfo2;
+ ULONG ulEngineClock;
+ ULONG ulMemoryClock;
+ UCHAR ucVoltageDropIndex; /* index to GPIO table */
+ UCHAR ucSelectedPanel_RefreshRate; /* panel refresh rate */
+ UCHAR ucMinTemperature;
+ UCHAR ucMaxTemperature;
+ UCHAR ucNumPciELanes; /* number of PCIE lanes */
+} ATOM_POWERMODE_INFO_V2;
+
+/* ucTableFormatRevision=2 */
+/* ucTableContentRevision=2 */
+typedef struct _ATOM_POWERMODE_INFO_V3 {
+ ULONG ulMiscInfo; /* The power level should be arranged in ascending order */
+ ULONG ulMiscInfo2;
+ ULONG ulEngineClock;
+ ULONG ulMemoryClock;
+ UCHAR ucVoltageDropIndex; /* index to Core (VDDC) votage table */
+ UCHAR ucSelectedPanel_RefreshRate; /* panel refresh rate */
+ UCHAR ucMinTemperature;
+ UCHAR ucMaxTemperature;
+ UCHAR ucNumPciELanes; /* number of PCIE lanes */
+ UCHAR ucVDDCI_VoltageDropIndex; /* index to VDDCI votage table */
+} ATOM_POWERMODE_INFO_V3;
+
+#define ATOM_MAX_NUMBEROF_POWER_BLOCK 8
+
+#define ATOM_PP_OVERDRIVE_INTBITMAP_AUXWIN 0x01
+#define ATOM_PP_OVERDRIVE_INTBITMAP_OVERDRIVE 0x02
+
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM63 0x01
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ADM1032 0x02
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ADM1030 0x03
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_MUA6649 0x04
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM64 0x05
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_F75375 0x06
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ASC7512 0x07 /* Andigilog */
+
+typedef struct _ATOM_POWERPLAY_INFO {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucOverdriveThermalController;
+ UCHAR ucOverdriveI2cLine;
+ UCHAR ucOverdriveIntBitmap;
+ UCHAR ucOverdriveControllerAddress;
+ UCHAR ucSizeOfPowerModeEntry;
+ UCHAR ucNumOfPowerModeEntries;
+ ATOM_POWERMODE_INFO asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
+} ATOM_POWERPLAY_INFO;
+
+typedef struct _ATOM_POWERPLAY_INFO_V2 {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucOverdriveThermalController;
+ UCHAR ucOverdriveI2cLine;
+ UCHAR ucOverdriveIntBitmap;
+ UCHAR ucOverdriveControllerAddress;
+ UCHAR ucSizeOfPowerModeEntry;
+ UCHAR ucNumOfPowerModeEntries;
+ ATOM_POWERMODE_INFO_V2 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
+} ATOM_POWERPLAY_INFO_V2;
+
+typedef struct _ATOM_POWERPLAY_INFO_V3 {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucOverdriveThermalController;
+ UCHAR ucOverdriveI2cLine;
+ UCHAR ucOverdriveIntBitmap;
+ UCHAR ucOverdriveControllerAddress;
+ UCHAR ucSizeOfPowerModeEntry;
+ UCHAR ucNumOfPowerModeEntries;
+ ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
+} ATOM_POWERPLAY_INFO_V3;
+
+/**************************************************************************/
+
+/* Following definitions are for compatiblity issue in different SW components. */
+#define ATOM_MASTER_DATA_TABLE_REVISION 0x01
+#define Object_Info Object_Header
+#define AdjustARB_SEQ MC_InitParameter
+#define VRAM_GPIO_DetectionInfo VoltageObjectInfo
+#define ASIC_VDDCI_Info ASIC_ProfilingInfo
+#define ASIC_MVDDQ_Info MemoryTrainingInfo
+#define SS_Info PPLL_SS_Info
+#define ASIC_MVDDC_Info ASIC_InternalSS_Info
+#define DispDevicePriorityInfo SaveRestoreInfo
+#define DispOutInfo TV_VideoMode
+
+#define ATOM_ENCODER_OBJECT_TABLE ATOM_OBJECT_TABLE
+#define ATOM_CONNECTOR_OBJECT_TABLE ATOM_OBJECT_TABLE
+
+/* New device naming, remove them when both DAL/VBIOS is ready */
+#define DFP2I_OUTPUT_CONTROL_PARAMETERS CRT1_OUTPUT_CONTROL_PARAMETERS
+#define DFP2I_OUTPUT_CONTROL_PS_ALLOCATION DFP2I_OUTPUT_CONTROL_PARAMETERS
+
+#define DFP1X_OUTPUT_CONTROL_PARAMETERS CRT1_OUTPUT_CONTROL_PARAMETERS
+#define DFP1X_OUTPUT_CONTROL_PS_ALLOCATION DFP1X_OUTPUT_CONTROL_PARAMETERS
+
+#define DFP1I_OUTPUT_CONTROL_PARAMETERS DFP1_OUTPUT_CONTROL_PARAMETERS
+#define DFP1I_OUTPUT_CONTROL_PS_ALLOCATION DFP1_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define ATOM_DEVICE_DFP1I_SUPPORT ATOM_DEVICE_DFP1_SUPPORT
+#define ATOM_DEVICE_DFP1X_SUPPORT ATOM_DEVICE_DFP2_SUPPORT
+
+#define ATOM_DEVICE_DFP1I_INDEX ATOM_DEVICE_DFP1_INDEX
+#define ATOM_DEVICE_DFP1X_INDEX ATOM_DEVICE_DFP2_INDEX
+
+#define ATOM_DEVICE_DFP2I_INDEX 0x00000009
+#define ATOM_DEVICE_DFP2I_SUPPORT (0x1L << ATOM_DEVICE_DFP2I_INDEX)
+
+#define ATOM_S0_DFP1I ATOM_S0_DFP1
+#define ATOM_S0_DFP1X ATOM_S0_DFP2
+
+#define ATOM_S0_DFP2I 0x00200000L
+#define ATOM_S0_DFP2Ib2 0x20
+
+#define ATOM_S2_DFP1I_DPMS_STATE ATOM_S2_DFP1_DPMS_STATE
+#define ATOM_S2_DFP1X_DPMS_STATE ATOM_S2_DFP2_DPMS_STATE
+
+#define ATOM_S2_DFP2I_DPMS_STATE 0x02000000L
+#define ATOM_S2_DFP2I_DPMS_STATEb3 0x02
+
+#define ATOM_S3_DFP2I_ACTIVEb1 0x02
+
+#define ATOM_S3_DFP1I_ACTIVE ATOM_S3_DFP1_ACTIVE
+#define ATOM_S3_DFP1X_ACTIVE ATOM_S3_DFP2_ACTIVE
+
+#define ATOM_S3_DFP2I_ACTIVE 0x00000200L
+
+#define ATOM_S3_DFP1I_CRTC_ACTIVE ATOM_S3_DFP1_CRTC_ACTIVE
+#define ATOM_S3_DFP1X_CRTC_ACTIVE ATOM_S3_DFP2_CRTC_ACTIVE
+#define ATOM_S3_DFP2I_CRTC_ACTIVE 0x02000000L
+
+#define ATOM_S3_DFP2I_CRTC_ACTIVEb3 0x02
+#define ATOM_S5_DOS_REQ_DFP2Ib1 0x02
+
+#define ATOM_S5_DOS_REQ_DFP2I 0x0200
+#define ATOM_S6_ACC_REQ_DFP1I ATOM_S6_ACC_REQ_DFP1
+#define ATOM_S6_ACC_REQ_DFP1X ATOM_S6_ACC_REQ_DFP2
+
+#define ATOM_S6_ACC_REQ_DFP2Ib3 0x02
+#define ATOM_S6_ACC_REQ_DFP2I 0x02000000L
+
+#define TMDS1XEncoderControl DVOEncoderControl
+#define DFP1XOutputControl DVOOutputControl
+
+#define ExternalDFPOutputControl DFP1XOutputControl
+#define EnableExternalTMDS_Encoder TMDS1XEncoderControl
+
+#define DFP1IOutputControl TMDSAOutputControl
+#define DFP2IOutputControl LVTMAOutputControl
+
+#define DAC1_ENCODER_CONTROL_PARAMETERS DAC_ENCODER_CONTROL_PARAMETERS
+#define DAC1_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION
+
+#define DAC2_ENCODER_CONTROL_PARAMETERS DAC_ENCODER_CONTROL_PARAMETERS
+#define DAC2_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION
+
+#define ucDac1Standard ucDacStandard
+#define ucDac2Standard ucDacStandard
+
+#define TMDS1EncoderControl TMDSAEncoderControl
+#define TMDS2EncoderControl LVTMAEncoderControl
+
+#define DFP1OutputControl TMDSAOutputControl
+#define DFP2OutputControl LVTMAOutputControl
+#define CRT1OutputControl DAC1OutputControl
+#define CRT2OutputControl DAC2OutputControl
+
+/* These two lines will be removed for sure in a few days, will follow up with Michael V. */
+#define EnableLVDS_SS EnableSpreadSpectrumOnPPLL
+#define ENABLE_LVDS_SS_PARAMETERS_V3 ENABLE_SPREAD_SPECTRUM_ON_PPLL
+
+/*********************************************************************************/
+
+#pragma pack() /* BIOS data must use byte aligment */
+
+#endif /* _ATOMBIOS_H */
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
new file mode 100644
index 00000000000..c0080cc9bf8
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -0,0 +1,695 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/radeon_drm.h>
+#include "radeon_fixed.h"
+#include "radeon.h"
+#include "atom.h"
+#include "atom-bits.h"
+
+static void atombios_lock_crtc(struct drm_crtc *crtc, int lock)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int index =
+ GetIndexIntoMasterTable(COMMAND, UpdateCRTC_DoubleBufferRegisters);
+ ENABLE_CRTC_PS_ALLOCATION args;
+
+ memset(&args, 0, sizeof(args));
+
+ args.ucCRTC = radeon_crtc->crtc_id;
+ args.ucEnable = lock;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_enable_crtc(struct drm_crtc *crtc, int state)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int index = GetIndexIntoMasterTable(COMMAND, EnableCRTC);
+ ENABLE_CRTC_PS_ALLOCATION args;
+
+ memset(&args, 0, sizeof(args));
+
+ args.ucCRTC = radeon_crtc->crtc_id;
+ args.ucEnable = state;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_enable_crtc_memreq(struct drm_crtc *crtc, int state)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int index = GetIndexIntoMasterTable(COMMAND, EnableCRTCMemReq);
+ ENABLE_CRTC_PS_ALLOCATION args;
+
+ memset(&args, 0, sizeof(args));
+
+ args.ucCRTC = radeon_crtc->crtc_id;
+ args.ucEnable = state;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC);
+ BLANK_CRTC_PS_ALLOCATION args;
+
+ memset(&args, 0, sizeof(args));
+
+ args.ucCRTC = radeon_crtc->crtc_id;
+ args.ucBlanking = state;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ if (ASIC_IS_DCE3(rdev))
+ atombios_enable_crtc_memreq(crtc, 1);
+ atombios_enable_crtc(crtc, 1);
+ atombios_blank_crtc(crtc, 0);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ atombios_blank_crtc(crtc, 1);
+ atombios_enable_crtc(crtc, 0);
+ if (ASIC_IS_DCE3(rdev))
+ atombios_enable_crtc_memreq(crtc, 0);
+ break;
+ }
+
+ if (mode != DRM_MODE_DPMS_OFF) {
+ radeon_crtc_load_lut(crtc);
+ }
+}
+
+static void
+atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
+ SET_CRTC_USING_DTD_TIMING_PARAMETERS * crtc_param)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ SET_CRTC_USING_DTD_TIMING_PARAMETERS conv_param;
+ int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_UsingDTDTiming);
+
+ conv_param.usH_Size = cpu_to_le16(crtc_param->usH_Size);
+ conv_param.usH_Blanking_Time =
+ cpu_to_le16(crtc_param->usH_Blanking_Time);
+ conv_param.usV_Size = cpu_to_le16(crtc_param->usV_Size);
+ conv_param.usV_Blanking_Time =
+ cpu_to_le16(crtc_param->usV_Blanking_Time);
+ conv_param.usH_SyncOffset = cpu_to_le16(crtc_param->usH_SyncOffset);
+ conv_param.usH_SyncWidth = cpu_to_le16(crtc_param->usH_SyncWidth);
+ conv_param.usV_SyncOffset = cpu_to_le16(crtc_param->usV_SyncOffset);
+ conv_param.usV_SyncWidth = cpu_to_le16(crtc_param->usV_SyncWidth);
+ conv_param.susModeMiscInfo.usAccess =
+ cpu_to_le16(crtc_param->susModeMiscInfo.usAccess);
+ conv_param.ucCRTC = crtc_param->ucCRTC;
+
+ printk("executing set crtc dtd timing\n");
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&conv_param);
+}
+
+void atombios_crtc_set_timing(struct drm_crtc *crtc,
+ SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION *
+ crtc_param)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION conv_param;
+ int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_Timing);
+
+ conv_param.usH_Total = cpu_to_le16(crtc_param->usH_Total);
+ conv_param.usH_Disp = cpu_to_le16(crtc_param->usH_Disp);
+ conv_param.usH_SyncStart = cpu_to_le16(crtc_param->usH_SyncStart);
+ conv_param.usH_SyncWidth = cpu_to_le16(crtc_param->usH_SyncWidth);
+ conv_param.usV_Total = cpu_to_le16(crtc_param->usV_Total);
+ conv_param.usV_Disp = cpu_to_le16(crtc_param->usV_Disp);
+ conv_param.usV_SyncStart = cpu_to_le16(crtc_param->usV_SyncStart);
+ conv_param.usV_SyncWidth = cpu_to_le16(crtc_param->usV_SyncWidth);
+ conv_param.susModeMiscInfo.usAccess =
+ cpu_to_le16(crtc_param->susModeMiscInfo.usAccess);
+ conv_param.ucCRTC = crtc_param->ucCRTC;
+ conv_param.ucOverscanRight = crtc_param->ucOverscanRight;
+ conv_param.ucOverscanLeft = crtc_param->ucOverscanLeft;
+ conv_param.ucOverscanBottom = crtc_param->ucOverscanBottom;
+ conv_param.ucOverscanTop = crtc_param->ucOverscanTop;
+ conv_param.ucReserved = crtc_param->ucReserved;
+
+ printk("executing set crtc timing\n");
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&conv_param);
+}
+
+void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_encoder *encoder = NULL;
+ struct radeon_encoder *radeon_encoder = NULL;
+ uint8_t frev, crev;
+ int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
+ SET_PIXEL_CLOCK_PS_ALLOCATION args;
+ PIXEL_CLOCK_PARAMETERS *spc1_ptr;
+ PIXEL_CLOCK_PARAMETERS_V2 *spc2_ptr;
+ PIXEL_CLOCK_PARAMETERS_V3 *spc3_ptr;
+ uint32_t sclock = mode->clock;
+ uint32_t ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
+ struct radeon_pll *pll;
+ int pll_flags = 0;
+
+ memset(&args, 0, sizeof(args));
+
+ if (ASIC_IS_AVIVO(rdev)) {
+ uint32_t ss_cntl;
+
+ if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */
+ pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
+ else
+ pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+
+ /* disable spread spectrum clocking for now -- thanks Hedy Lamarr */
+ if (radeon_crtc->crtc_id == 0) {
+ ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL);
+ WREG32(AVIVO_P1PLL_INT_SS_CNTL, ss_cntl & ~1);
+ } else {
+ ss_cntl = RREG32(AVIVO_P2PLL_INT_SS_CNTL);
+ WREG32(AVIVO_P2PLL_INT_SS_CNTL, ss_cntl & ~1);
+ }
+ } else {
+ pll_flags |= RADEON_PLL_LEGACY;
+
+ if (mode->clock > 200000) /* range limits??? */
+ pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
+ else
+ pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ if (!ASIC_IS_AVIVO(rdev)) {
+ if (encoder->encoder_type !=
+ DRM_MODE_ENCODER_DAC)
+ pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
+ if (!ASIC_IS_AVIVO(rdev)
+ && (encoder->encoder_type ==
+ DRM_MODE_ENCODER_LVDS))
+ pll_flags |= RADEON_PLL_USE_REF_DIV;
+ }
+ radeon_encoder = to_radeon_encoder(encoder);
+ }
+ }
+
+ if (radeon_crtc->crtc_id == 0)
+ pll = &rdev->clock.p1pll;
+ else
+ pll = &rdev->clock.p2pll;
+
+ radeon_compute_pll(pll, mode->clock, &sclock, &fb_div, &frac_fb_div,
+ &ref_div, &post_div, pll_flags);
+
+ atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
+ &crev);
+
+ switch (frev) {
+ case 1:
+ switch (crev) {
+ case 1:
+ spc1_ptr = (PIXEL_CLOCK_PARAMETERS *) & args.sPCLKInput;
+ spc1_ptr->usPixelClock = cpu_to_le16(sclock);
+ spc1_ptr->usRefDiv = cpu_to_le16(ref_div);
+ spc1_ptr->usFbDiv = cpu_to_le16(fb_div);
+ spc1_ptr->ucFracFbDiv = frac_fb_div;
+ spc1_ptr->ucPostDiv = post_div;
+ spc1_ptr->ucPpll =
+ radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
+ spc1_ptr->ucCRTC = radeon_crtc->crtc_id;
+ spc1_ptr->ucRefDivSrc = 1;
+ break;
+ case 2:
+ spc2_ptr =
+ (PIXEL_CLOCK_PARAMETERS_V2 *) & args.sPCLKInput;
+ spc2_ptr->usPixelClock = cpu_to_le16(sclock);
+ spc2_ptr->usRefDiv = cpu_to_le16(ref_div);
+ spc2_ptr->usFbDiv = cpu_to_le16(fb_div);
+ spc2_ptr->ucFracFbDiv = frac_fb_div;
+ spc2_ptr->ucPostDiv = post_div;
+ spc2_ptr->ucPpll =
+ radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
+ spc2_ptr->ucCRTC = radeon_crtc->crtc_id;
+ spc2_ptr->ucRefDivSrc = 1;
+ break;
+ case 3:
+ if (!encoder)
+ return;
+ spc3_ptr =
+ (PIXEL_CLOCK_PARAMETERS_V3 *) & args.sPCLKInput;
+ spc3_ptr->usPixelClock = cpu_to_le16(sclock);
+ spc3_ptr->usRefDiv = cpu_to_le16(ref_div);
+ spc3_ptr->usFbDiv = cpu_to_le16(fb_div);
+ spc3_ptr->ucFracFbDiv = frac_fb_div;
+ spc3_ptr->ucPostDiv = post_div;
+ spc3_ptr->ucPpll =
+ radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
+ spc3_ptr->ucMiscInfo = (radeon_crtc->crtc_id << 2);
+ spc3_ptr->ucTransmitterId = radeon_encoder->encoder_id;
+ spc3_ptr->ucEncoderMode =
+ atombios_get_encoder_mode(encoder);
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+ return;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+ return;
+ }
+
+ printk("executing set pll\n");
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_framebuffer *radeon_fb;
+ struct drm_gem_object *obj;
+ struct drm_radeon_gem_object *obj_priv;
+ uint64_t fb_location;
+ uint32_t fb_format, fb_pitch_pixels;
+
+ if (!crtc->fb)
+ return -EINVAL;
+
+ radeon_fb = to_radeon_framebuffer(crtc->fb);
+
+ obj = radeon_fb->obj;
+ obj_priv = obj->driver_private;
+
+ if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &fb_location)) {
+ return -EINVAL;
+ }
+
+ switch (crtc->fb->bits_per_pixel) {
+ case 15:
+ fb_format =
+ AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
+ AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555;
+ break;
+ case 16:
+ fb_format =
+ AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
+ AVIVO_D1GRPH_CONTROL_16BPP_RGB565;
+ break;
+ case 24:
+ case 32:
+ fb_format =
+ AVIVO_D1GRPH_CONTROL_DEPTH_32BPP |
+ AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888;
+ break;
+ default:
+ DRM_ERROR("Unsupported screen depth %d\n",
+ crtc->fb->bits_per_pixel);
+ return -EINVAL;
+ }
+
+ /* TODO tiling */
+ if (radeon_crtc->crtc_id == 0)
+ WREG32(AVIVO_D1VGA_CONTROL, 0);
+ else
+ WREG32(AVIVO_D2VGA_CONTROL, 0);
+ WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32) fb_location);
+ WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS +
+ radeon_crtc->crtc_offset, (u32) fb_location);
+ WREG32(AVIVO_D1GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
+
+ WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
+ WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
+ WREG32(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, 0);
+ WREG32(AVIVO_D1GRPH_Y_START + radeon_crtc->crtc_offset, 0);
+ WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, crtc->fb->width);
+ WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, crtc->fb->height);
+
+ fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
+ WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
+ WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
+
+ WREG32(AVIVO_D1MODE_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
+ crtc->mode.vdisplay);
+ x &= ~3;
+ y &= ~1;
+ WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset,
+ (x << 16) | y);
+ WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
+ (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
+
+ if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
+ WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
+ AVIVO_D1MODE_INTERLEAVE_EN);
+ else
+ WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
+
+ if (old_fb && old_fb != crtc->fb) {
+ radeon_fb = to_radeon_framebuffer(old_fb);
+ radeon_gem_object_unpin(radeon_fb->obj);
+ }
+ return 0;
+}
+
+int atombios_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_encoder *encoder;
+ SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION crtc_timing;
+
+ /* TODO color tiling */
+ memset(&crtc_timing, 0, sizeof(crtc_timing));
+
+ /* TODO tv */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+ }
+
+ crtc_timing.ucCRTC = radeon_crtc->crtc_id;
+ crtc_timing.usH_Total = adjusted_mode->crtc_htotal;
+ crtc_timing.usH_Disp = adjusted_mode->crtc_hdisplay;
+ crtc_timing.usH_SyncStart = adjusted_mode->crtc_hsync_start;
+ crtc_timing.usH_SyncWidth =
+ adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start;
+
+ crtc_timing.usV_Total = adjusted_mode->crtc_vtotal;
+ crtc_timing.usV_Disp = adjusted_mode->crtc_vdisplay;
+ crtc_timing.usV_SyncStart = adjusted_mode->crtc_vsync_start;
+ crtc_timing.usV_SyncWidth =
+ adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ crtc_timing.susModeMiscInfo.usAccess |= ATOM_VSYNC_POLARITY;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ crtc_timing.susModeMiscInfo.usAccess |= ATOM_HSYNC_POLARITY;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_CSYNC)
+ crtc_timing.susModeMiscInfo.usAccess |= ATOM_COMPOSITESYNC;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ crtc_timing.susModeMiscInfo.usAccess |= ATOM_INTERLACE;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ crtc_timing.susModeMiscInfo.usAccess |= ATOM_DOUBLE_CLOCK_MODE;
+
+ atombios_crtc_set_pll(crtc, adjusted_mode);
+ atombios_crtc_set_timing(crtc, &crtc_timing);
+
+ if (ASIC_IS_AVIVO(rdev))
+ atombios_crtc_set_base(crtc, x, y, old_fb);
+ else {
+ if (radeon_crtc->crtc_id == 0) {
+ SET_CRTC_USING_DTD_TIMING_PARAMETERS crtc_dtd_timing;
+ memset(&crtc_dtd_timing, 0, sizeof(crtc_dtd_timing));
+
+ /* setup FP shadow regs on R4xx */
+ crtc_dtd_timing.ucCRTC = radeon_crtc->crtc_id;
+ crtc_dtd_timing.usH_Size = adjusted_mode->crtc_hdisplay;
+ crtc_dtd_timing.usV_Size = adjusted_mode->crtc_vdisplay;
+ crtc_dtd_timing.usH_Blanking_Time =
+ adjusted_mode->crtc_hblank_end -
+ adjusted_mode->crtc_hdisplay;
+ crtc_dtd_timing.usV_Blanking_Time =
+ adjusted_mode->crtc_vblank_end -
+ adjusted_mode->crtc_vdisplay;
+ crtc_dtd_timing.usH_SyncOffset =
+ adjusted_mode->crtc_hsync_start -
+ adjusted_mode->crtc_hdisplay;
+ crtc_dtd_timing.usV_SyncOffset =
+ adjusted_mode->crtc_vsync_start -
+ adjusted_mode->crtc_vdisplay;
+ crtc_dtd_timing.usH_SyncWidth =
+ adjusted_mode->crtc_hsync_end -
+ adjusted_mode->crtc_hsync_start;
+ crtc_dtd_timing.usV_SyncWidth =
+ adjusted_mode->crtc_vsync_end -
+ adjusted_mode->crtc_vsync_start;
+ /* crtc_dtd_timing.ucH_Border = adjusted_mode->crtc_hborder; */
+ /* crtc_dtd_timing.ucV_Border = adjusted_mode->crtc_vborder; */
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ crtc_dtd_timing.susModeMiscInfo.usAccess |=
+ ATOM_VSYNC_POLARITY;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ crtc_dtd_timing.susModeMiscInfo.usAccess |=
+ ATOM_HSYNC_POLARITY;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_CSYNC)
+ crtc_dtd_timing.susModeMiscInfo.usAccess |=
+ ATOM_COMPOSITESYNC;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ crtc_dtd_timing.susModeMiscInfo.usAccess |=
+ ATOM_INTERLACE;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ crtc_dtd_timing.susModeMiscInfo.usAccess |=
+ ATOM_DOUBLE_CLOCK_MODE;
+
+ atombios_set_crtc_dtd_timing(crtc, &crtc_dtd_timing);
+ }
+ radeon_crtc_set_base(crtc, x, y, old_fb);
+ radeon_legacy_atom_set_surface(crtc);
+ }
+ return 0;
+}
+
+static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void atombios_crtc_prepare(struct drm_crtc *crtc)
+{
+ atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ atombios_lock_crtc(crtc, 1);
+}
+
+static void atombios_crtc_commit(struct drm_crtc *crtc)
+{
+ atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+ atombios_lock_crtc(crtc, 0);
+}
+
+static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
+ .dpms = atombios_crtc_dpms,
+ .mode_fixup = atombios_crtc_mode_fixup,
+ .mode_set = atombios_crtc_mode_set,
+ .mode_set_base = atombios_crtc_set_base,
+ .prepare = atombios_crtc_prepare,
+ .commit = atombios_crtc_commit,
+};
+
+void radeon_atombios_init_crtc(struct drm_device *dev,
+ struct radeon_crtc *radeon_crtc)
+{
+ if (radeon_crtc->crtc_id == 1)
+ radeon_crtc->crtc_offset =
+ AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL;
+ drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
+}
+
+void radeon_init_disp_bw_avivo(struct drm_device *dev,
+ struct drm_display_mode *mode1,
+ uint32_t pixel_bytes1,
+ struct drm_display_mode *mode2,
+ uint32_t pixel_bytes2)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ fixed20_12 min_mem_eff;
+ fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff;
+ fixed20_12 sclk_ff, mclk_ff;
+ uint32_t dc_lb_memory_split, temp;
+
+ min_mem_eff.full = rfixed_const_8(0);
+ if (rdev->disp_priority == 2) {
+ uint32_t mc_init_misc_lat_timer = 0;
+ if (rdev->family == CHIP_RV515)
+ mc_init_misc_lat_timer =
+ RREG32_MC(RV515_MC_INIT_MISC_LAT_TIMER);
+ else if (rdev->family == CHIP_RS690)
+ mc_init_misc_lat_timer =
+ RREG32_MC(RS690_MC_INIT_MISC_LAT_TIMER);
+
+ mc_init_misc_lat_timer &=
+ ~(R300_MC_DISP1R_INIT_LAT_MASK <<
+ R300_MC_DISP1R_INIT_LAT_SHIFT);
+ mc_init_misc_lat_timer &=
+ ~(R300_MC_DISP0R_INIT_LAT_MASK <<
+ R300_MC_DISP0R_INIT_LAT_SHIFT);
+
+ if (mode2)
+ mc_init_misc_lat_timer |=
+ (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
+ if (mode1)
+ mc_init_misc_lat_timer |=
+ (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
+
+ if (rdev->family == CHIP_RV515)
+ WREG32_MC(RV515_MC_INIT_MISC_LAT_TIMER,
+ mc_init_misc_lat_timer);
+ else if (rdev->family == CHIP_RS690)
+ WREG32_MC(RS690_MC_INIT_MISC_LAT_TIMER,
+ mc_init_misc_lat_timer);
+ }
+
+ /*
+ * determine is there is enough bw for current mode
+ */
+ temp_ff.full = rfixed_const(100);
+ mclk_ff.full = rfixed_const(rdev->clock.default_mclk);
+ mclk_ff.full = rfixed_div(mclk_ff, temp_ff);
+ sclk_ff.full = rfixed_const(rdev->clock.default_sclk);
+ sclk_ff.full = rfixed_div(sclk_ff, temp_ff);
+
+ temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
+ temp_ff.full = rfixed_const(temp);
+ mem_bw.full = rfixed_mul(mclk_ff, temp_ff);
+ mem_bw.full = rfixed_mul(mem_bw, min_mem_eff);
+
+ pix_clk.full = 0;
+ pix_clk2.full = 0;
+ peak_disp_bw.full = 0;
+ if (mode1) {
+ temp_ff.full = rfixed_const(1000);
+ pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */
+ pix_clk.full = rfixed_div(pix_clk, temp_ff);
+ temp_ff.full = rfixed_const(pixel_bytes1);
+ peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff);
+ }
+ if (mode2) {
+ temp_ff.full = rfixed_const(1000);
+ pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */
+ pix_clk2.full = rfixed_div(pix_clk2, temp_ff);
+ temp_ff.full = rfixed_const(pixel_bytes2);
+ peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff);
+ }
+
+ if (peak_disp_bw.full >= mem_bw.full) {
+ DRM_ERROR
+ ("You may not have enough display bandwidth for current mode\n"
+ "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
+ printk("peak disp bw %d, mem_bw %d\n",
+ rfixed_trunc(peak_disp_bw), rfixed_trunc(mem_bw));
+ }
+
+ /*
+ * Line Buffer Setup
+ * There is a single line buffer shared by both display controllers.
+ * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between the display
+ * controllers. The paritioning can either be done manually or via one of four
+ * preset allocations specified in bits 1:0:
+ * 0 - line buffer is divided in half and shared between each display controller
+ * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4
+ * 2 - D1 gets the whole buffer
+ * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4
+ * Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual allocation mode.
+ * In manual allocation mode, D1 always starts at 0, D1 end/2 is specified in bits
+ * 14:4; D2 allocation follows D1.
+ */
+
+ /* is auto or manual better ? */
+ dc_lb_memory_split =
+ RREG32(AVIVO_DC_LB_MEMORY_SPLIT) & ~AVIVO_DC_LB_MEMORY_SPLIT_MASK;
+ dc_lb_memory_split &= ~AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE;
+#if 1
+ /* auto */
+ if (mode1 && mode2) {
+ if (mode1->hdisplay > mode2->hdisplay) {
+ if (mode1->hdisplay > 2560)
+ dc_lb_memory_split |=
+ AVIVO_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q;
+ else
+ dc_lb_memory_split |=
+ AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
+ } else if (mode2->hdisplay > mode1->hdisplay) {
+ if (mode2->hdisplay > 2560)
+ dc_lb_memory_split |=
+ AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
+ else
+ dc_lb_memory_split |=
+ AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
+ } else
+ dc_lb_memory_split |=
+ AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
+ } else if (mode1) {
+ dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_D1_ONLY;
+ } else if (mode2) {
+ dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
+ }
+#else
+ /* manual */
+ dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE;
+ dc_lb_memory_split &=
+ ~(AVIVO_DC_LB_DISP1_END_ADR_MASK <<
+ AVIVO_DC_LB_DISP1_END_ADR_SHIFT);
+ if (mode1) {
+ dc_lb_memory_split |=
+ ((((mode1->hdisplay / 2) + 64) & AVIVO_DC_LB_DISP1_END_ADR_MASK)
+ << AVIVO_DC_LB_DISP1_END_ADR_SHIFT);
+ } else if (mode2) {
+ dc_lb_memory_split |= (0 << AVIVO_DC_LB_DISP1_END_ADR_SHIFT);
+ }
+#endif
+ WREG32(AVIVO_DC_LB_MEMORY_SPLIT, dc_lb_memory_split);
+}
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
new file mode 100644
index 00000000000..5225f5be7ea
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -0,0 +1,1524 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include <linux/seq_file.h>
+#include "drmP.h"
+#include "drm.h"
+#include "radeon_drm.h"
+#include "radeon_microcode.h"
+#include "radeon_reg.h"
+#include "radeon.h"
+
+/* This files gather functions specifics to:
+ * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
+ *
+ * Some of these functions might be used by newer ASICs.
+ */
+void r100_hdp_reset(struct radeon_device *rdev);
+void r100_gpu_init(struct radeon_device *rdev);
+int r100_gui_wait_for_idle(struct radeon_device *rdev);
+int r100_mc_wait_for_idle(struct radeon_device *rdev);
+void r100_gpu_wait_for_vsync(struct radeon_device *rdev);
+void r100_gpu_wait_for_vsync2(struct radeon_device *rdev);
+int r100_debugfs_mc_info_init(struct radeon_device *rdev);
+
+
+/*
+ * PCI GART
+ */
+void r100_pci_gart_tlb_flush(struct radeon_device *rdev)
+{
+ /* TODO: can we do somethings here ? */
+ /* It seems hw only cache one entry so we should discard this
+ * entry otherwise if first GPU GART read hit this entry it
+ * could end up in wrong address. */
+}
+
+int r100_pci_gart_enable(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+ int r;
+
+ /* Initialize common gart structure */
+ r = radeon_gart_init(rdev);
+ if (r) {
+ return r;
+ }
+ if (rdev->gart.table.ram.ptr == NULL) {
+ rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
+ r = radeon_gart_table_ram_alloc(rdev);
+ if (r) {
+ return r;
+ }
+ }
+ /* discard memory request outside of configured range */
+ tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
+ WREG32(RADEON_AIC_CNTL, tmp);
+ /* set address range for PCI address translate */
+ WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location);
+ tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
+ WREG32(RADEON_AIC_HI_ADDR, tmp);
+ /* Enable bus mastering */
+ tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
+ WREG32(RADEON_BUS_CNTL, tmp);
+ /* set PCI GART page-table base address */
+ WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
+ tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
+ WREG32(RADEON_AIC_CNTL, tmp);
+ r100_pci_gart_tlb_flush(rdev);
+ rdev->gart.ready = true;
+ return 0;
+}
+
+void r100_pci_gart_disable(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+
+ /* discard memory request outside of configured range */
+ tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
+ WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
+ WREG32(RADEON_AIC_LO_ADDR, 0);
+ WREG32(RADEON_AIC_HI_ADDR, 0);
+}
+
+int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
+{
+ if (i < 0 || i > rdev->gart.num_gpu_pages) {
+ return -EINVAL;
+ }
+ rdev->gart.table.ram.ptr[i] = cpu_to_le32((uint32_t)addr);
+ return 0;
+}
+
+int r100_gart_enable(struct radeon_device *rdev)
+{
+ if (rdev->flags & RADEON_IS_AGP) {
+ r100_pci_gart_disable(rdev);
+ return 0;
+ }
+ return r100_pci_gart_enable(rdev);
+}
+
+
+/*
+ * MC
+ */
+void r100_mc_disable_clients(struct radeon_device *rdev)
+{
+ uint32_t ov0_scale_cntl, crtc_ext_cntl, crtc_gen_cntl, crtc2_gen_cntl;
+
+ /* FIXME: is this function correct for rs100,rs200,rs300 ? */
+ if (r100_gui_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait GUI idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+
+ /* stop display and memory access */
+ ov0_scale_cntl = RREG32(RADEON_OV0_SCALE_CNTL);
+ WREG32(RADEON_OV0_SCALE_CNTL, ov0_scale_cntl & ~RADEON_SCALER_ENABLE);
+ crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+ WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl | RADEON_CRTC_DISPLAY_DIS);
+ crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
+
+ r100_gpu_wait_for_vsync(rdev);
+
+ WREG32(RADEON_CRTC_GEN_CNTL,
+ (crtc_gen_cntl & ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_ICON_EN)) |
+ RADEON_CRTC_DISP_REQ_EN_B | RADEON_CRTC_EXT_DISP_EN);
+
+ if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+ crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+
+ r100_gpu_wait_for_vsync2(rdev);
+ WREG32(RADEON_CRTC2_GEN_CNTL,
+ (crtc2_gen_cntl &
+ ~(RADEON_CRTC2_CUR_EN | RADEON_CRTC2_ICON_EN)) |
+ RADEON_CRTC2_DISP_REQ_EN_B);
+ }
+
+ udelay(500);
+}
+
+void r100_mc_setup(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+ int r;
+
+ r = r100_debugfs_mc_info_init(rdev);
+ if (r) {
+ DRM_ERROR("Failed to register debugfs file for R100 MC !\n");
+ }
+ /* Write VRAM size in case we are limiting it */
+ WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
+ tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
+ tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16);
+ tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16);
+ WREG32(RADEON_MC_FB_LOCATION, tmp);
+
+ /* Enable bus mastering */
+ tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
+ WREG32(RADEON_BUS_CNTL, tmp);
+
+ if (rdev->flags & RADEON_IS_AGP) {
+ tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
+ tmp = REG_SET(RADEON_MC_AGP_TOP, tmp >> 16);
+ tmp |= REG_SET(RADEON_MC_AGP_START, rdev->mc.gtt_location >> 16);
+ WREG32(RADEON_MC_AGP_LOCATION, tmp);
+ WREG32(RADEON_AGP_BASE, rdev->mc.agp_base);
+ } else {
+ WREG32(RADEON_MC_AGP_LOCATION, 0x0FFFFFFF);
+ WREG32(RADEON_AGP_BASE, 0);
+ }
+
+ tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
+ tmp |= (7 << 28);
+ WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
+ (void)RREG32(RADEON_HOST_PATH_CNTL);
+ WREG32(RADEON_HOST_PATH_CNTL, tmp);
+ (void)RREG32(RADEON_HOST_PATH_CNTL);
+}
+
+int r100_mc_init(struct radeon_device *rdev)
+{
+ int r;
+
+ if (r100_debugfs_rbbm_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for RBBM !\n");
+ }
+
+ r100_gpu_init(rdev);
+ /* Disable gart which also disable out of gart access */
+ r100_pci_gart_disable(rdev);
+
+ /* Setup GPU memory space */
+ rdev->mc.vram_location = 0xFFFFFFFFUL;
+ rdev->mc.gtt_location = 0xFFFFFFFFUL;
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r) {
+ printk(KERN_WARNING "[drm] Disabling AGP\n");
+ rdev->flags &= ~RADEON_IS_AGP;
+ rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+ } else {
+ rdev->mc.gtt_location = rdev->mc.agp_base;
+ }
+ }
+ r = radeon_mc_setup(rdev);
+ if (r) {
+ return r;
+ }
+
+ r100_mc_disable_clients(rdev);
+ if (r100_mc_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait MC idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+
+ r100_mc_setup(rdev);
+ return 0;
+}
+
+void r100_mc_fini(struct radeon_device *rdev)
+{
+ r100_pci_gart_disable(rdev);
+ radeon_gart_table_ram_free(rdev);
+ radeon_gart_fini(rdev);
+}
+
+
+/*
+ * Fence emission
+ */
+void r100_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ /* Who ever call radeon_fence_emit should call ring_lock and ask
+ * for enough space (today caller are ib schedule and buffer move) */
+ /* Wait until IDLE & CLEAN */
+ radeon_ring_write(rdev, PACKET0(0x1720, 0));
+ radeon_ring_write(rdev, (1 << 16) | (1 << 17));
+ /* Emit fence sequence & fire IRQ */
+ radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
+ radeon_ring_write(rdev, fence->seq);
+ radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
+ radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
+}
+
+
+/*
+ * Writeback
+ */
+int r100_wb_init(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->wb.wb_obj == NULL) {
+ r = radeon_object_create(rdev, NULL, 4096,
+ true,
+ RADEON_GEM_DOMAIN_GTT,
+ false, &rdev->wb.wb_obj);
+ if (r) {
+ DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r);
+ return r;
+ }
+ r = radeon_object_pin(rdev->wb.wb_obj,
+ RADEON_GEM_DOMAIN_GTT,
+ &rdev->wb.gpu_addr);
+ if (r) {
+ DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r);
+ return r;
+ }
+ r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+ if (r) {
+ DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r);
+ return r;
+ }
+ }
+ WREG32(0x774, rdev->wb.gpu_addr);
+ WREG32(0x70C, rdev->wb.gpu_addr + 1024);
+ WREG32(0x770, 0xff);
+ return 0;
+}
+
+void r100_wb_fini(struct radeon_device *rdev)
+{
+ if (rdev->wb.wb_obj) {
+ radeon_object_kunmap(rdev->wb.wb_obj);
+ radeon_object_unpin(rdev->wb.wb_obj);
+ radeon_object_unref(&rdev->wb.wb_obj);
+ rdev->wb.wb = NULL;
+ rdev->wb.wb_obj = NULL;
+ }
+}
+
+int r100_copy_blit(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_pages,
+ struct radeon_fence *fence)
+{
+ uint32_t cur_pages;
+ uint32_t stride_bytes = PAGE_SIZE;
+ uint32_t pitch;
+ uint32_t stride_pixels;
+ unsigned ndw;
+ int num_loops;
+ int r = 0;
+
+ /* radeon limited to 16k stride */
+ stride_bytes &= 0x3fff;
+ /* radeon pitch is /64 */
+ pitch = stride_bytes / 64;
+ stride_pixels = stride_bytes / 4;
+ num_loops = DIV_ROUND_UP(num_pages, 8191);
+
+ /* Ask for enough room for blit + flush + fence */
+ ndw = 64 + (10 * num_loops);
+ r = radeon_ring_lock(rdev, ndw);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
+ return -EINVAL;
+ }
+ while (num_pages > 0) {
+ cur_pages = num_pages;
+ if (cur_pages > 8191) {
+ cur_pages = 8191;
+ }
+ num_pages -= cur_pages;
+
+ /* pages are in Y direction - height
+ page width in X direction - width */
+ radeon_ring_write(rdev, PACKET3(PACKET3_BITBLT_MULTI, 8));
+ radeon_ring_write(rdev,
+ RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
+ RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+ RADEON_GMC_SRC_CLIPPING |
+ RADEON_GMC_DST_CLIPPING |
+ RADEON_GMC_BRUSH_NONE |
+ (RADEON_COLOR_FORMAT_ARGB8888 << 8) |
+ RADEON_GMC_SRC_DATATYPE_COLOR |
+ RADEON_ROP3_S |
+ RADEON_DP_SRC_SOURCE_MEMORY |
+ RADEON_GMC_CLR_CMP_CNTL_DIS |
+ RADEON_GMC_WR_MSK_DIS);
+ radeon_ring_write(rdev, (pitch << 22) | (src_offset >> 10));
+ radeon_ring_write(rdev, (pitch << 22) | (dst_offset >> 10));
+ radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
+ radeon_ring_write(rdev, num_pages);
+ radeon_ring_write(rdev, num_pages);
+ radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
+ }
+ radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, RADEON_RB2D_DC_FLUSH_ALL);
+ radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(rdev,
+ RADEON_WAIT_2D_IDLECLEAN |
+ RADEON_WAIT_HOST_IDLECLEAN |
+ RADEON_WAIT_DMA_GUI_IDLE);
+ if (fence) {
+ r = radeon_fence_emit(rdev, fence);
+ }
+ radeon_ring_unlock_commit(rdev);
+ return r;
+}
+
+
+/*
+ * CP
+ */
+void r100_ring_start(struct radeon_device *rdev)
+{
+ int r;
+
+ r = radeon_ring_lock(rdev, 2);
+ if (r) {
+ return;
+ }
+ radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
+ radeon_ring_write(rdev,
+ RADEON_ISYNC_ANY2D_IDLE3D |
+ RADEON_ISYNC_ANY3D_IDLE2D |
+ RADEON_ISYNC_WAIT_IDLEGUI |
+ RADEON_ISYNC_CPSCRATCH_IDLEGUI);
+ radeon_ring_unlock_commit(rdev);
+}
+
+static void r100_cp_load_microcode(struct radeon_device *rdev)
+{
+ int i;
+
+ if (r100_gui_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait GUI idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+
+ WREG32(RADEON_CP_ME_RAM_ADDR, 0);
+ if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
+ (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
+ (rdev->family == CHIP_RS200)) {
+ DRM_INFO("Loading R100 Microcode\n");
+ for (i = 0; i < 256; i++) {
+ WREG32(RADEON_CP_ME_RAM_DATAH, R100_cp_microcode[i][1]);
+ WREG32(RADEON_CP_ME_RAM_DATAL, R100_cp_microcode[i][0]);
+ }
+ } else if ((rdev->family == CHIP_R200) ||
+ (rdev->family == CHIP_RV250) ||
+ (rdev->family == CHIP_RV280) ||
+ (rdev->family == CHIP_RS300)) {
+ DRM_INFO("Loading R200 Microcode\n");
+ for (i = 0; i < 256; i++) {
+ WREG32(RADEON_CP_ME_RAM_DATAH, R200_cp_microcode[i][1]);
+ WREG32(RADEON_CP_ME_RAM_DATAL, R200_cp_microcode[i][0]);
+ }
+ } else if ((rdev->family == CHIP_R300) ||
+ (rdev->family == CHIP_R350) ||
+ (rdev->family == CHIP_RV350) ||
+ (rdev->family == CHIP_RV380) ||
+ (rdev->family == CHIP_RS400) ||
+ (rdev->family == CHIP_RS480)) {
+ DRM_INFO("Loading R300 Microcode\n");
+ for (i = 0; i < 256; i++) {
+ WREG32(RADEON_CP_ME_RAM_DATAH, R300_cp_microcode[i][1]);
+ WREG32(RADEON_CP_ME_RAM_DATAL, R300_cp_microcode[i][0]);
+ }
+ } else if ((rdev->family == CHIP_R420) ||
+ (rdev->family == CHIP_R423) ||
+ (rdev->family == CHIP_RV410)) {
+ DRM_INFO("Loading R400 Microcode\n");
+ for (i = 0; i < 256; i++) {
+ WREG32(RADEON_CP_ME_RAM_DATAH, R420_cp_microcode[i][1]);
+ WREG32(RADEON_CP_ME_RAM_DATAL, R420_cp_microcode[i][0]);
+ }
+ } else if ((rdev->family == CHIP_RS690) ||
+ (rdev->family == CHIP_RS740)) {
+ DRM_INFO("Loading RS690/RS740 Microcode\n");
+ for (i = 0; i < 256; i++) {
+ WREG32(RADEON_CP_ME_RAM_DATAH, RS690_cp_microcode[i][1]);
+ WREG32(RADEON_CP_ME_RAM_DATAL, RS690_cp_microcode[i][0]);
+ }
+ } else if (rdev->family == CHIP_RS600) {
+ DRM_INFO("Loading RS600 Microcode\n");
+ for (i = 0; i < 256; i++) {
+ WREG32(RADEON_CP_ME_RAM_DATAH, RS600_cp_microcode[i][1]);
+ WREG32(RADEON_CP_ME_RAM_DATAL, RS600_cp_microcode[i][0]);
+ }
+ } else if ((rdev->family == CHIP_RV515) ||
+ (rdev->family == CHIP_R520) ||
+ (rdev->family == CHIP_RV530) ||
+ (rdev->family == CHIP_R580) ||
+ (rdev->family == CHIP_RV560) ||
+ (rdev->family == CHIP_RV570)) {
+ DRM_INFO("Loading R500 Microcode\n");
+ for (i = 0; i < 256; i++) {
+ WREG32(RADEON_CP_ME_RAM_DATAH, R520_cp_microcode[i][1]);
+ WREG32(RADEON_CP_ME_RAM_DATAL, R520_cp_microcode[i][0]);
+ }
+ }
+}
+
+int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
+{
+ unsigned rb_bufsz;
+ unsigned rb_blksz;
+ unsigned max_fetch;
+ unsigned pre_write_timer;
+ unsigned pre_write_limit;
+ unsigned indirect2_start;
+ unsigned indirect1_start;
+ uint32_t tmp;
+ int r;
+
+ if (r100_debugfs_cp_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for CP !\n");
+ }
+ /* Reset CP */
+ tmp = RREG32(RADEON_CP_CSQ_STAT);
+ if ((tmp & (1 << 31))) {
+ DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp);
+ WREG32(RADEON_CP_CSQ_MODE, 0);
+ WREG32(RADEON_CP_CSQ_CNTL, 0);
+ WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
+ tmp = RREG32(RADEON_RBBM_SOFT_RESET);
+ mdelay(2);
+ WREG32(RADEON_RBBM_SOFT_RESET, 0);
+ tmp = RREG32(RADEON_RBBM_SOFT_RESET);
+ mdelay(2);
+ tmp = RREG32(RADEON_CP_CSQ_STAT);
+ if ((tmp & (1 << 31))) {
+ DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp);
+ }
+ } else {
+ DRM_INFO("radeon: cp idle (0x%08X)\n", tmp);
+ }
+ /* Align ring size */
+ rb_bufsz = drm_order(ring_size / 8);
+ ring_size = (1 << (rb_bufsz + 1)) * 4;
+ r100_cp_load_microcode(rdev);
+ r = radeon_ring_init(rdev, ring_size);
+ if (r) {
+ return r;
+ }
+ /* Each time the cp read 1024 bytes (16 dword/quadword) update
+ * the rptr copy in system ram */
+ rb_blksz = 9;
+ /* cp will read 128bytes at a time (4 dwords) */
+ max_fetch = 1;
+ rdev->cp.align_mask = 16 - 1;
+ /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
+ pre_write_timer = 64;
+ /* Force CP_RB_WPTR write if written more than one time before the
+ * delay expire
+ */
+ pre_write_limit = 0;
+ /* Setup the cp cache like this (cache size is 96 dwords) :
+ * RING 0 to 15
+ * INDIRECT1 16 to 79
+ * INDIRECT2 80 to 95
+ * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
+ * indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords))
+ * indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
+ * Idea being that most of the gpu cmd will be through indirect1 buffer
+ * so it gets the bigger cache.
+ */
+ indirect2_start = 80;
+ indirect1_start = 16;
+ /* cp setup */
+ WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
+ WREG32(RADEON_CP_RB_CNTL,
+ REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
+ REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
+ REG_SET(RADEON_MAX_FETCH, max_fetch) |
+ RADEON_RB_NO_UPDATE);
+ /* Set ring address */
+ DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
+ WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
+ /* Force read & write ptr to 0 */
+ tmp = RREG32(RADEON_CP_RB_CNTL);
+ WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+ WREG32(RADEON_CP_RB_RPTR_WR, 0);
+ WREG32(RADEON_CP_RB_WPTR, 0);
+ WREG32(RADEON_CP_RB_CNTL, tmp);
+ udelay(10);
+ rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+ rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
+ /* Set cp mode to bus mastering & enable cp*/
+ WREG32(RADEON_CP_CSQ_MODE,
+ REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
+ REG_SET(RADEON_INDIRECT1_START, indirect1_start));
+ WREG32(0x718, 0);
+ WREG32(0x744, 0x00004D4D);
+ WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
+ radeon_ring_start(rdev);
+ r = radeon_ring_test(rdev);
+ if (r) {
+ DRM_ERROR("radeon: cp isn't working (%d).\n", r);
+ return r;
+ }
+ rdev->cp.ready = true;
+ return 0;
+}
+
+void r100_cp_fini(struct radeon_device *rdev)
+{
+ /* Disable ring */
+ rdev->cp.ready = false;
+ WREG32(RADEON_CP_CSQ_CNTL, 0);
+ radeon_ring_fini(rdev);
+ DRM_INFO("radeon: cp finalized\n");
+}
+
+void r100_cp_disable(struct radeon_device *rdev)
+{
+ /* Disable ring */
+ rdev->cp.ready = false;
+ WREG32(RADEON_CP_CSQ_MODE, 0);
+ WREG32(RADEON_CP_CSQ_CNTL, 0);
+ if (r100_gui_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait GUI idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+}
+
+int r100_cp_reset(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+ bool reinit_cp;
+ int i;
+
+ reinit_cp = rdev->cp.ready;
+ rdev->cp.ready = false;
+ WREG32(RADEON_CP_CSQ_MODE, 0);
+ WREG32(RADEON_CP_CSQ_CNTL, 0);
+ WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
+ (void)RREG32(RADEON_RBBM_SOFT_RESET);
+ udelay(200);
+ WREG32(RADEON_RBBM_SOFT_RESET, 0);
+ /* Wait to prevent race in RBBM_STATUS */
+ mdelay(1);
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = RREG32(RADEON_RBBM_STATUS);
+ if (!(tmp & (1 << 16))) {
+ DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n",
+ tmp);
+ if (reinit_cp) {
+ return r100_cp_init(rdev, rdev->cp.ring_size);
+ }
+ return 0;
+ }
+ DRM_UDELAY(1);
+ }
+ tmp = RREG32(RADEON_RBBM_STATUS);
+ DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp);
+ return -1;
+}
+
+
+/*
+ * CS functions
+ */
+int r100_cs_parse_packet0(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned *auth, unsigned n,
+ radeon_packet0_check_t check)
+{
+ unsigned reg;
+ unsigned i, j, m;
+ unsigned idx;
+ int r;
+
+ idx = pkt->idx + 1;
+ reg = pkt->reg;
+ if (pkt->one_reg_wr) {
+ if ((reg >> 7) > n) {
+ return -EINVAL;
+ }
+ } else {
+ if (((reg + (pkt->count << 2)) >> 7) > n) {
+ return -EINVAL;
+ }
+ }
+ for (i = 0; i <= pkt->count; i++, idx++) {
+ j = (reg >> 7);
+ m = 1 << ((reg >> 2) & 31);
+ if (auth[j] & m) {
+ r = check(p, pkt, idx, reg);
+ if (r) {
+ return r;
+ }
+ }
+ if (pkt->one_reg_wr) {
+ if (!(auth[j] & m)) {
+ break;
+ }
+ } else {
+ reg += 4;
+ }
+ }
+ return 0;
+}
+
+int r100_cs_parse_packet3(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned *auth, unsigned n,
+ radeon_packet3_check_t check)
+{
+ unsigned i, m;
+
+ if ((pkt->opcode >> 5) > n) {
+ return -EINVAL;
+ }
+ i = pkt->opcode >> 5;
+ m = 1 << (pkt->opcode & 31);
+ if (auth[i] & m) {
+ return check(p, pkt);
+ }
+ return 0;
+}
+
+void r100_cs_dump_packet(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt)
+{
+ struct radeon_cs_chunk *ib_chunk;
+ volatile uint32_t *ib;
+ unsigned i;
+ unsigned idx;
+
+ ib = p->ib->ptr;
+ ib_chunk = &p->chunks[p->chunk_ib_idx];
+ idx = pkt->idx;
+ for (i = 0; i <= (pkt->count + 1); i++, idx++) {
+ DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
+ }
+}
+
+/**
+ * r100_cs_packet_parse() - parse cp packet and point ib index to next packet
+ * @parser: parser structure holding parsing context.
+ * @pkt: where to store packet informations
+ *
+ * Assume that chunk_ib_index is properly set. Will return -EINVAL
+ * if packet is bigger than remaining ib size. or if packets is unknown.
+ **/
+int r100_cs_packet_parse(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx)
+{
+ struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+ uint32_t header = ib_chunk->kdata[idx];
+
+ if (idx >= ib_chunk->length_dw) {
+ DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+ idx, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ pkt->idx = idx;
+ pkt->type = CP_PACKET_GET_TYPE(header);
+ pkt->count = CP_PACKET_GET_COUNT(header);
+ switch (pkt->type) {
+ case PACKET_TYPE0:
+ pkt->reg = CP_PACKET0_GET_REG(header);
+ pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header);
+ break;
+ case PACKET_TYPE3:
+ pkt->opcode = CP_PACKET3_GET_OPCODE(header);
+ break;
+ case PACKET_TYPE2:
+ pkt->count = -1;
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
+ return -EINVAL;
+ }
+ if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
+ DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
+ pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3
+ * @parser: parser structure holding parsing context.
+ * @data: pointer to relocation data
+ * @offset_start: starting offset
+ * @offset_mask: offset mask (to align start offset on)
+ * @reloc: reloc informations
+ *
+ * Check next packet is relocation packet3, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc)
+{
+ struct radeon_cs_chunk *ib_chunk;
+ struct radeon_cs_chunk *relocs_chunk;
+ struct radeon_cs_packet p3reloc;
+ unsigned idx;
+ int r;
+
+ if (p->chunk_relocs_idx == -1) {
+ DRM_ERROR("No relocation chunk !\n");
+ return -EINVAL;
+ }
+ *cs_reloc = NULL;
+ ib_chunk = &p->chunks[p->chunk_ib_idx];
+ relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ r = r100_cs_packet_parse(p, &p3reloc, p->idx);
+ if (r) {
+ return r;
+ }
+ p->idx += p3reloc.count + 2;
+ if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
+ DRM_ERROR("No packet3 for relocation for packet at %d.\n",
+ p3reloc.idx);
+ r100_cs_dump_packet(p, &p3reloc);
+ return -EINVAL;
+ }
+ idx = ib_chunk->kdata[p3reloc.idx + 1];
+ if (idx >= relocs_chunk->length_dw) {
+ DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+ idx, relocs_chunk->length_dw);
+ r100_cs_dump_packet(p, &p3reloc);
+ return -EINVAL;
+ }
+ /* FIXME: we assume reloc size is 4 dwords */
+ *cs_reloc = p->relocs_ptr[(idx / 4)];
+ return 0;
+}
+
+static int r100_packet0_check(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt)
+{
+ struct radeon_cs_chunk *ib_chunk;
+ struct radeon_cs_reloc *reloc;
+ volatile uint32_t *ib;
+ uint32_t tmp;
+ unsigned reg;
+ unsigned i;
+ unsigned idx;
+ bool onereg;
+ int r;
+
+ ib = p->ib->ptr;
+ ib_chunk = &p->chunks[p->chunk_ib_idx];
+ idx = pkt->idx + 1;
+ reg = pkt->reg;
+ onereg = false;
+ if (CP_PACKET0_GET_ONE_REG_WR(ib_chunk->kdata[pkt->idx])) {
+ onereg = true;
+ }
+ for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
+ switch (reg) {
+ /* FIXME: only allow PACKET3 blit? easier to check for out of
+ * range access */
+ case RADEON_DST_PITCH_OFFSET:
+ case RADEON_SRC_PITCH_OFFSET:
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
+ tmp = ib_chunk->kdata[idx] & 0x003fffff;
+ tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
+ ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp;
+ break;
+ case RADEON_RB3D_DEPTHOFFSET:
+ case RADEON_RB3D_COLOROFFSET:
+ case R300_RB3D_COLOROFFSET0:
+ case R300_ZB_DEPTHOFFSET:
+ case R200_PP_TXOFFSET_0:
+ case R200_PP_TXOFFSET_1:
+ case R200_PP_TXOFFSET_2:
+ case R200_PP_TXOFFSET_3:
+ case R200_PP_TXOFFSET_4:
+ case R200_PP_TXOFFSET_5:
+ case RADEON_PP_TXOFFSET_0:
+ case RADEON_PP_TXOFFSET_1:
+ case RADEON_PP_TXOFFSET_2:
+ case R300_TX_OFFSET_0:
+ case R300_TX_OFFSET_0+4:
+ case R300_TX_OFFSET_0+8:
+ case R300_TX_OFFSET_0+12:
+ case R300_TX_OFFSET_0+16:
+ case R300_TX_OFFSET_0+20:
+ case R300_TX_OFFSET_0+24:
+ case R300_TX_OFFSET_0+28:
+ case R300_TX_OFFSET_0+32:
+ case R300_TX_OFFSET_0+36:
+ case R300_TX_OFFSET_0+40:
+ case R300_TX_OFFSET_0+44:
+ case R300_TX_OFFSET_0+48:
+ case R300_TX_OFFSET_0+52:
+ case R300_TX_OFFSET_0+56:
+ case R300_TX_OFFSET_0+60:
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
+ ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+ break;
+ default:
+ /* FIXME: we don't want to allow anyothers packet */
+ break;
+ }
+ if (onereg) {
+ /* FIXME: forbid onereg write to register on relocate */
+ break;
+ }
+ }
+ return 0;
+}
+
+static int r100_packet3_check(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt)
+{
+ struct radeon_cs_chunk *ib_chunk;
+ struct radeon_cs_reloc *reloc;
+ unsigned idx;
+ unsigned i, c;
+ volatile uint32_t *ib;
+ int r;
+
+ ib = p->ib->ptr;
+ ib_chunk = &p->chunks[p->chunk_ib_idx];
+ idx = pkt->idx + 1;
+ switch (pkt->opcode) {
+ case PACKET3_3D_LOAD_VBPNTR:
+ c = ib_chunk->kdata[idx++];
+ for (i = 0; i < (c - 1); i += 2, idx += 3) {
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for packet3 %d\n",
+ pkt->opcode);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
+ ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for packet3 %d\n",
+ pkt->opcode);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
+ ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
+ }
+ if (c & 1) {
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for packet3 %d\n",
+ pkt->opcode);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
+ ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
+ }
+ break;
+ case PACKET3_INDX_BUFFER:
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
+ ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
+ break;
+ case 0x23:
+ /* FIXME: cleanup */
+ /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
+ ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+ break;
+ case PACKET3_3D_DRAW_IMMD:
+ /* triggers drawing using in-packet vertex data */
+ case PACKET3_3D_DRAW_IMMD_2:
+ /* triggers drawing using in-packet vertex data */
+ case PACKET3_3D_DRAW_VBUF_2:
+ /* triggers drawing of vertex buffers setup elsewhere */
+ case PACKET3_3D_DRAW_INDX_2:
+ /* triggers drawing using indices to vertex buffer */
+ case PACKET3_3D_DRAW_VBUF:
+ /* triggers drawing of vertex buffers setup elsewhere */
+ case PACKET3_3D_DRAW_INDX:
+ /* triggers drawing using indices to vertex buffer */
+ case PACKET3_NOP:
+ break;
+ default:
+ DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int r100_cs_parse(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_packet pkt;
+ int r;
+
+ do {
+ r = r100_cs_packet_parse(p, &pkt, p->idx);
+ if (r) {
+ return r;
+ }
+ p->idx += pkt.count + 2;
+ switch (pkt.type) {
+ case PACKET_TYPE0:
+ r = r100_packet0_check(p, &pkt);
+ break;
+ case PACKET_TYPE2:
+ break;
+ case PACKET_TYPE3:
+ r = r100_packet3_check(p, &pkt);
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d !\n",
+ pkt.type);
+ return -EINVAL;
+ }
+ if (r) {
+ return r;
+ }
+ } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+ return 0;
+}
+
+
+/*
+ * Global GPU functions
+ */
+void r100_errata(struct radeon_device *rdev)
+{
+ rdev->pll_errata = 0;
+
+ if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
+ rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
+ }
+
+ if (rdev->family == CHIP_RV100 ||
+ rdev->family == CHIP_RS100 ||
+ rdev->family == CHIP_RS200) {
+ rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
+ }
+}
+
+/* Wait for vertical sync on primary CRTC */
+void r100_gpu_wait_for_vsync(struct radeon_device *rdev)
+{
+ uint32_t crtc_gen_cntl, tmp;
+ int i;
+
+ crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
+ if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) ||
+ !(crtc_gen_cntl & RADEON_CRTC_EN)) {
+ return;
+ }
+ /* Clear the CRTC_VBLANK_SAVE bit */
+ WREG32(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR);
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = RREG32(RADEON_CRTC_STATUS);
+ if (tmp & RADEON_CRTC_VBLANK_SAVE) {
+ return;
+ }
+ DRM_UDELAY(1);
+ }
+}
+
+/* Wait for vertical sync on secondary CRTC */
+void r100_gpu_wait_for_vsync2(struct radeon_device *rdev)
+{
+ uint32_t crtc2_gen_cntl, tmp;
+ int i;
+
+ crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+ if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) ||
+ !(crtc2_gen_cntl & RADEON_CRTC2_EN))
+ return;
+
+ /* Clear the CRTC_VBLANK_SAVE bit */
+ WREG32(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR);
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = RREG32(RADEON_CRTC2_STATUS);
+ if (tmp & RADEON_CRTC2_VBLANK_SAVE) {
+ return;
+ }
+ DRM_UDELAY(1);
+ }
+}
+
+int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
+{
+ unsigned i;
+ uint32_t tmp;
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK;
+ if (tmp >= n) {
+ return 0;
+ }
+ DRM_UDELAY(1);
+ }
+ return -1;
+}
+
+int r100_gui_wait_for_idle(struct radeon_device *rdev)
+{
+ unsigned i;
+ uint32_t tmp;
+
+ if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) {
+ printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !"
+ " Bad things might happen.\n");
+ }
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = RREG32(RADEON_RBBM_STATUS);
+ if (!(tmp & (1 << 31))) {
+ return 0;
+ }
+ DRM_UDELAY(1);
+ }
+ return -1;
+}
+
+int r100_mc_wait_for_idle(struct radeon_device *rdev)
+{
+ unsigned i;
+ uint32_t tmp;
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ /* read MC_STATUS */
+ tmp = RREG32(0x0150);
+ if (tmp & (1 << 2)) {
+ return 0;
+ }
+ DRM_UDELAY(1);
+ }
+ return -1;
+}
+
+void r100_gpu_init(struct radeon_device *rdev)
+{
+ /* TODO: anythings to do here ? pipes ? */
+ r100_hdp_reset(rdev);
+}
+
+void r100_hdp_reset(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+
+ tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
+ tmp |= (7 << 28);
+ WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
+ (void)RREG32(RADEON_HOST_PATH_CNTL);
+ udelay(200);
+ WREG32(RADEON_RBBM_SOFT_RESET, 0);
+ WREG32(RADEON_HOST_PATH_CNTL, tmp);
+ (void)RREG32(RADEON_HOST_PATH_CNTL);
+}
+
+int r100_rb2d_reset(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+ int i;
+
+ WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2);
+ (void)RREG32(RADEON_RBBM_SOFT_RESET);
+ udelay(200);
+ WREG32(RADEON_RBBM_SOFT_RESET, 0);
+ /* Wait to prevent race in RBBM_STATUS */
+ mdelay(1);
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = RREG32(RADEON_RBBM_STATUS);
+ if (!(tmp & (1 << 26))) {
+ DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n",
+ tmp);
+ return 0;
+ }
+ DRM_UDELAY(1);
+ }
+ tmp = RREG32(RADEON_RBBM_STATUS);
+ DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp);
+ return -1;
+}
+
+int r100_gpu_reset(struct radeon_device *rdev)
+{
+ uint32_t status;
+
+ /* reset order likely matter */
+ status = RREG32(RADEON_RBBM_STATUS);
+ /* reset HDP */
+ r100_hdp_reset(rdev);
+ /* reset rb2d */
+ if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
+ r100_rb2d_reset(rdev);
+ }
+ /* TODO: reset 3D engine */
+ /* reset CP */
+ status = RREG32(RADEON_RBBM_STATUS);
+ if (status & (1 << 16)) {
+ r100_cp_reset(rdev);
+ }
+ /* Check if GPU is idle */
+ status = RREG32(RADEON_RBBM_STATUS);
+ if (status & (1 << 31)) {
+ DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
+ return -1;
+ }
+ DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
+ return 0;
+}
+
+
+/*
+ * VRAM info
+ */
+static void r100_vram_get_type(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+
+ rdev->mc.vram_is_ddr = false;
+ if (rdev->flags & RADEON_IS_IGP)
+ rdev->mc.vram_is_ddr = true;
+ else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR)
+ rdev->mc.vram_is_ddr = true;
+ if ((rdev->family == CHIP_RV100) ||
+ (rdev->family == CHIP_RS100) ||
+ (rdev->family == CHIP_RS200)) {
+ tmp = RREG32(RADEON_MEM_CNTL);
+ if (tmp & RV100_HALF_MODE) {
+ rdev->mc.vram_width = 32;
+ } else {
+ rdev->mc.vram_width = 64;
+ }
+ if (rdev->flags & RADEON_SINGLE_CRTC) {
+ rdev->mc.vram_width /= 4;
+ rdev->mc.vram_is_ddr = true;
+ }
+ } else if (rdev->family <= CHIP_RV280) {
+ tmp = RREG32(RADEON_MEM_CNTL);
+ if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) {
+ rdev->mc.vram_width = 128;
+ } else {
+ rdev->mc.vram_width = 64;
+ }
+ } else {
+ /* newer IGPs */
+ rdev->mc.vram_width = 128;
+ }
+}
+
+void r100_vram_info(struct radeon_device *rdev)
+{
+ r100_vram_get_type(rdev);
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ uint32_t tom;
+ /* read NB_TOM to get the amount of ram stolen for the GPU */
+ tom = RREG32(RADEON_NB_TOM);
+ rdev->mc.vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
+ WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
+ } else {
+ rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
+ /* Some production boards of m6 will report 0
+ * if it's 8 MB
+ */
+ if (rdev->mc.vram_size == 0) {
+ rdev->mc.vram_size = 8192 * 1024;
+ WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
+ }
+ }
+
+ rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+ rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+ if (rdev->mc.aper_size > rdev->mc.vram_size) {
+ /* Why does some hw doesn't have CONFIG_MEMSIZE properly
+ * setup ? */
+ rdev->mc.vram_size = rdev->mc.aper_size;
+ WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
+ }
+}
+
+
+/*
+ * Indirect registers accessor
+ */
+void r100_pll_errata_after_index(struct radeon_device *rdev)
+{
+ if (!(rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS)) {
+ return;
+ }
+ (void)RREG32(RADEON_CLOCK_CNTL_DATA);
+ (void)RREG32(RADEON_CRTC_GEN_CNTL);
+}
+
+static void r100_pll_errata_after_data(struct radeon_device *rdev)
+{
+ /* This workarounds is necessary on RV100, RS100 and RS200 chips
+ * or the chip could hang on a subsequent access
+ */
+ if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
+ udelay(5000);
+ }
+
+ /* This function is required to workaround a hardware bug in some (all?)
+ * revisions of the R300. This workaround should be called after every
+ * CLOCK_CNTL_INDEX register access. If not, register reads afterward
+ * may not be correct.
+ */
+ if (rdev->pll_errata & CHIP_ERRATA_R300_CG) {
+ uint32_t save, tmp;
+
+ save = RREG32(RADEON_CLOCK_CNTL_INDEX);
+ tmp = save & ~(0x3f | RADEON_PLL_WR_EN);
+ WREG32(RADEON_CLOCK_CNTL_INDEX, tmp);
+ tmp = RREG32(RADEON_CLOCK_CNTL_DATA);
+ WREG32(RADEON_CLOCK_CNTL_INDEX, save);
+ }
+}
+
+uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+ uint32_t data;
+
+ WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
+ r100_pll_errata_after_index(rdev);
+ data = RREG32(RADEON_CLOCK_CNTL_DATA);
+ r100_pll_errata_after_data(rdev);
+ return data;
+}
+
+void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+ WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
+ r100_pll_errata_after_index(rdev);
+ WREG32(RADEON_CLOCK_CNTL_DATA, v);
+ r100_pll_errata_after_data(rdev);
+}
+
+uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+ if (reg < 0x10000)
+ return readl(((void __iomem *)rdev->rmmio) + reg);
+ else {
+ writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
+ return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+ }
+}
+
+void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+ if (reg < 0x10000)
+ writel(v, ((void __iomem *)rdev->rmmio) + reg);
+ else {
+ writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
+ writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+ }
+}
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int r100_debugfs_rbbm_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t reg, value;
+ unsigned i;
+
+ seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS));
+ seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C));
+ seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
+ for (i = 0; i < 64; i++) {
+ WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100);
+ reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2;
+ WREG32(RADEON_RBBM_CMDFIFO_ADDR, i);
+ value = RREG32(RADEON_RBBM_CMDFIFO_DATA);
+ seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value);
+ }
+ return 0;
+}
+
+static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t rdp, wdp;
+ unsigned count, i, j;
+
+ radeon_ring_free_size(rdev);
+ rdp = RREG32(RADEON_CP_RB_RPTR);
+ wdp = RREG32(RADEON_CP_RB_WPTR);
+ count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
+ seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
+ seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
+ seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
+ seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
+ seq_printf(m, "%u dwords in ring\n", count);
+ for (j = 0; j <= count; j++) {
+ i = (rdp + j) & rdev->cp.ptr_mask;
+ seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
+ }
+ return 0;
+}
+
+
+static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t csq_stat, csq2_stat, tmp;
+ unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr;
+ unsigned i;
+
+ seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
+ seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE));
+ csq_stat = RREG32(RADEON_CP_CSQ_STAT);
+ csq2_stat = RREG32(RADEON_CP_CSQ2_STAT);
+ r_rptr = (csq_stat >> 0) & 0x3ff;
+ r_wptr = (csq_stat >> 10) & 0x3ff;
+ ib1_rptr = (csq_stat >> 20) & 0x3ff;
+ ib1_wptr = (csq2_stat >> 0) & 0x3ff;
+ ib2_rptr = (csq2_stat >> 10) & 0x3ff;
+ ib2_wptr = (csq2_stat >> 20) & 0x3ff;
+ seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat);
+ seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat);
+ seq_printf(m, "Ring rptr %u\n", r_rptr);
+ seq_printf(m, "Ring wptr %u\n", r_wptr);
+ seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr);
+ seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr);
+ seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr);
+ seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr);
+ /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms
+ * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */
+ seq_printf(m, "Ring fifo:\n");
+ for (i = 0; i < 256; i++) {
+ WREG32(RADEON_CP_CSQ_ADDR, i << 2);
+ tmp = RREG32(RADEON_CP_CSQ_DATA);
+ seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp);
+ }
+ seq_printf(m, "Indirect1 fifo:\n");
+ for (i = 256; i <= 512; i++) {
+ WREG32(RADEON_CP_CSQ_ADDR, i << 2);
+ tmp = RREG32(RADEON_CP_CSQ_DATA);
+ seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp);
+ }
+ seq_printf(m, "Indirect2 fifo:\n");
+ for (i = 640; i < ib1_wptr; i++) {
+ WREG32(RADEON_CP_CSQ_ADDR, i << 2);
+ tmp = RREG32(RADEON_CP_CSQ_DATA);
+ seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp);
+ }
+ return 0;
+}
+
+static int r100_debugfs_mc_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t tmp;
+
+ tmp = RREG32(RADEON_CONFIG_MEMSIZE);
+ seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp);
+ tmp = RREG32(RADEON_MC_FB_LOCATION);
+ seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp);
+ tmp = RREG32(RADEON_BUS_CNTL);
+ seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
+ tmp = RREG32(RADEON_MC_AGP_LOCATION);
+ seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
+ tmp = RREG32(RADEON_AGP_BASE);
+ seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
+ tmp = RREG32(RADEON_HOST_PATH_CNTL);
+ seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
+ tmp = RREG32(0x01D0);
+ seq_printf(m, "AIC_CTRL 0x%08x\n", tmp);
+ tmp = RREG32(RADEON_AIC_LO_ADDR);
+ seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp);
+ tmp = RREG32(RADEON_AIC_HI_ADDR);
+ seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp);
+ tmp = RREG32(0x01E4);
+ seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp);
+ return 0;
+}
+
+static struct drm_info_list r100_debugfs_rbbm_list[] = {
+ {"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL},
+};
+
+static struct drm_info_list r100_debugfs_cp_list[] = {
+ {"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL},
+ {"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL},
+};
+
+static struct drm_info_list r100_debugfs_mc_info_list[] = {
+ {"r100_mc_info", r100_debugfs_mc_info, 0, NULL},
+};
+#endif
+
+int r100_debugfs_rbbm_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1);
+#else
+ return 0;
+#endif
+}
+
+int r100_debugfs_cp_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2);
+#else
+ return 0;
+#endif
+}
+
+int r100_debugfs_mc_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1);
+#else
+ return 0;
+#endif
+}
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
new file mode 100644
index 00000000000..f5870a099d4
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -0,0 +1,1116 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include <linux/seq_file.h>
+#include "drmP.h"
+#include "drm.h"
+#include "radeon_reg.h"
+#include "radeon.h"
+
+/* r300,r350,rv350,rv370,rv380 depends on : */
+void r100_hdp_reset(struct radeon_device *rdev);
+int r100_cp_reset(struct radeon_device *rdev);
+int r100_rb2d_reset(struct radeon_device *rdev);
+int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
+int r100_pci_gart_enable(struct radeon_device *rdev);
+void r100_pci_gart_disable(struct radeon_device *rdev);
+void r100_mc_setup(struct radeon_device *rdev);
+void r100_mc_disable_clients(struct radeon_device *rdev);
+int r100_gui_wait_for_idle(struct radeon_device *rdev);
+int r100_cs_packet_parse(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx);
+int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc);
+int r100_cs_parse_packet0(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned *auth, unsigned n,
+ radeon_packet0_check_t check);
+int r100_cs_parse_packet3(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned *auth, unsigned n,
+ radeon_packet3_check_t check);
+void r100_cs_dump_packet(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt);
+
+/* This files gather functions specifics to:
+ * r300,r350,rv350,rv370,rv380
+ *
+ * Some of these functions might be used by newer ASICs.
+ */
+void r300_gpu_init(struct radeon_device *rdev);
+int r300_mc_wait_for_idle(struct radeon_device *rdev);
+int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
+
+
+/*
+ * rv370,rv380 PCIE GART
+ */
+void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+ int i;
+
+ /* Workaround HW bug do flush 2 times */
+ for (i = 0; i < 2; i++) {
+ tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
+ WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB);
+ (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
+ WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
+ mb();
+ }
+}
+
+int rv370_pcie_gart_enable(struct radeon_device *rdev)
+{
+ uint32_t table_addr;
+ uint32_t tmp;
+ int r;
+
+ /* Initialize common gart structure */
+ r = radeon_gart_init(rdev);
+ if (r) {
+ return r;
+ }
+ r = rv370_debugfs_pcie_gart_info_init(rdev);
+ if (r) {
+ DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
+ }
+ rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
+ r = radeon_gart_table_vram_alloc(rdev);
+ if (r) {
+ return r;
+ }
+ /* discard memory request outside of configured range */
+ tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
+ WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
+ WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location);
+ tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 4096;
+ WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
+ WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
+ WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
+ table_addr = rdev->gart.table_addr;
+ WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
+ /* FIXME: setup default page */
+ WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_location);
+ WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
+ /* Clear error */
+ WREG32_PCIE(0x18, 0);
+ tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
+ tmp |= RADEON_PCIE_TX_GART_EN;
+ tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
+ WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
+ rv370_pcie_gart_tlb_flush(rdev);
+ DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n",
+ rdev->mc.gtt_size >> 20, table_addr);
+ rdev->gart.ready = true;
+ return 0;
+}
+
+void rv370_pcie_gart_disable(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+
+ tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
+ tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
+ WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
+ if (rdev->gart.table.vram.robj) {
+ radeon_object_kunmap(rdev->gart.table.vram.robj);
+ radeon_object_unpin(rdev->gart.table.vram.robj);
+ }
+}
+
+int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
+{
+ void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
+
+ if (i < 0 || i > rdev->gart.num_gpu_pages) {
+ return -EINVAL;
+ }
+ addr = (((u32)addr) >> 8) | ((upper_32_bits(addr) & 0xff) << 4) | 0xC;
+ writel(cpu_to_le32(addr), ((void __iomem *)ptr) + (i * 4));
+ return 0;
+}
+
+int r300_gart_enable(struct radeon_device *rdev)
+{
+#if __OS_HAS_AGP
+ if (rdev->flags & RADEON_IS_AGP) {
+ if (rdev->family > CHIP_RV350) {
+ rv370_pcie_gart_disable(rdev);
+ } else {
+ r100_pci_gart_disable(rdev);
+ }
+ return 0;
+ }
+#endif
+ if (rdev->flags & RADEON_IS_PCIE) {
+ rdev->asic->gart_disable = &rv370_pcie_gart_disable;
+ rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
+ rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
+ return rv370_pcie_gart_enable(rdev);
+ }
+ return r100_pci_gart_enable(rdev);
+}
+
+
+/*
+ * MC
+ */
+int r300_mc_init(struct radeon_device *rdev)
+{
+ int r;
+
+ if (r100_debugfs_rbbm_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for RBBM !\n");
+ }
+
+ r300_gpu_init(rdev);
+ r100_pci_gart_disable(rdev);
+ if (rdev->flags & RADEON_IS_PCIE) {
+ rv370_pcie_gart_disable(rdev);
+ }
+
+ /* Setup GPU memory space */
+ rdev->mc.vram_location = 0xFFFFFFFFUL;
+ rdev->mc.gtt_location = 0xFFFFFFFFUL;
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r) {
+ printk(KERN_WARNING "[drm] Disabling AGP\n");
+ rdev->flags &= ~RADEON_IS_AGP;
+ rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+ } else {
+ rdev->mc.gtt_location = rdev->mc.agp_base;
+ }
+ }
+ r = radeon_mc_setup(rdev);
+ if (r) {
+ return r;
+ }
+
+ /* Program GPU memory space */
+ r100_mc_disable_clients(rdev);
+ if (r300_mc_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait MC idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+ r100_mc_setup(rdev);
+ return 0;
+}
+
+void r300_mc_fini(struct radeon_device *rdev)
+{
+ if (rdev->flags & RADEON_IS_PCIE) {
+ rv370_pcie_gart_disable(rdev);
+ radeon_gart_table_vram_free(rdev);
+ } else {
+ r100_pci_gart_disable(rdev);
+ radeon_gart_table_ram_free(rdev);
+ }
+ radeon_gart_fini(rdev);
+}
+
+
+/*
+ * Fence emission
+ */
+void r300_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ /* Who ever call radeon_fence_emit should call ring_lock and ask
+ * for enough space (today caller are ib schedule and buffer move) */
+ /* Write SC register so SC & US assert idle */
+ radeon_ring_write(rdev, PACKET0(0x43E0, 0));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, PACKET0(0x43E4, 0));
+ radeon_ring_write(rdev, 0);
+ /* Flush 3D cache */
+ radeon_ring_write(rdev, PACKET0(0x4E4C, 0));
+ radeon_ring_write(rdev, (2 << 0));
+ radeon_ring_write(rdev, PACKET0(0x4F18, 0));
+ radeon_ring_write(rdev, (1 << 0));
+ /* Wait until IDLE & CLEAN */
+ radeon_ring_write(rdev, PACKET0(0x1720, 0));
+ radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9));
+ /* Emit fence sequence & fire IRQ */
+ radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
+ radeon_ring_write(rdev, fence->seq);
+ radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
+ radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
+}
+
+
+/*
+ * Global GPU functions
+ */
+int r300_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_pages,
+ struct radeon_fence *fence)
+{
+ uint32_t size;
+ uint32_t cur_size;
+ int i, num_loops;
+ int r = 0;
+
+ /* radeon pitch is /64 */
+ size = num_pages << PAGE_SHIFT;
+ num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
+ r = radeon_ring_lock(rdev, num_loops * 4 + 64);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+ /* Must wait for 2D idle & clean before DMA or hangs might happen */
+ radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(rdev, (1 << 16));
+ for (i = 0; i < num_loops; i++) {
+ cur_size = size;
+ if (cur_size > 0x1FFFFF) {
+ cur_size = 0x1FFFFF;
+ }
+ size -= cur_size;
+ radeon_ring_write(rdev, PACKET0(0x720, 2));
+ radeon_ring_write(rdev, src_offset);
+ radeon_ring_write(rdev, dst_offset);
+ radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
+ src_offset += cur_size;
+ dst_offset += cur_size;
+ }
+ radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
+ if (fence) {
+ r = radeon_fence_emit(rdev, fence);
+ }
+ radeon_ring_unlock_commit(rdev);
+ return r;
+}
+
+void r300_ring_start(struct radeon_device *rdev)
+{
+ unsigned gb_tile_config;
+ int r;
+
+ /* Sub pixel 1/12 so we can have 4K rendering according to doc */
+ gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
+ switch (rdev->num_gb_pipes) {
+ case 2:
+ gb_tile_config |= R300_PIPE_COUNT_R300;
+ break;
+ case 3:
+ gb_tile_config |= R300_PIPE_COUNT_R420_3P;
+ break;
+ case 4:
+ gb_tile_config |= R300_PIPE_COUNT_R420;
+ break;
+ case 1:
+ default:
+ gb_tile_config |= R300_PIPE_COUNT_RV350;
+ break;
+ }
+
+ r = radeon_ring_lock(rdev, 64);
+ if (r) {
+ return;
+ }
+ radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
+ radeon_ring_write(rdev,
+ RADEON_ISYNC_ANY2D_IDLE3D |
+ RADEON_ISYNC_ANY3D_IDLE2D |
+ RADEON_ISYNC_WAIT_IDLEGUI |
+ RADEON_ISYNC_CPSCRATCH_IDLEGUI);
+ radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0));
+ radeon_ring_write(rdev, gb_tile_config);
+ radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(rdev,
+ RADEON_WAIT_2D_IDLECLEAN |
+ RADEON_WAIT_3D_IDLECLEAN);
+ radeon_ring_write(rdev, PACKET0(0x170C, 0));
+ radeon_ring_write(rdev, 1 << 31);
+ radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
+ radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
+ radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(rdev,
+ RADEON_WAIT_2D_IDLECLEAN |
+ RADEON_WAIT_3D_IDLECLEAN);
+ radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
+ radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
+ radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0));
+ radeon_ring_write(rdev,
+ ((6 << R300_MS_X0_SHIFT) |
+ (6 << R300_MS_Y0_SHIFT) |
+ (6 << R300_MS_X1_SHIFT) |
+ (6 << R300_MS_Y1_SHIFT) |
+ (6 << R300_MS_X2_SHIFT) |
+ (6 << R300_MS_Y2_SHIFT) |
+ (6 << R300_MSBD0_Y_SHIFT) |
+ (6 << R300_MSBD0_X_SHIFT)));
+ radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0));
+ radeon_ring_write(rdev,
+ ((6 << R300_MS_X3_SHIFT) |
+ (6 << R300_MS_Y3_SHIFT) |
+ (6 << R300_MS_X4_SHIFT) |
+ (6 << R300_MS_Y4_SHIFT) |
+ (6 << R300_MS_X5_SHIFT) |
+ (6 << R300_MS_Y5_SHIFT) |
+ (6 << R300_MSBD1_SHIFT)));
+ radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0));
+ radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
+ radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0));
+ radeon_ring_write(rdev,
+ R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
+ radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0));
+ radeon_ring_write(rdev,
+ R300_GEOMETRY_ROUND_NEAREST |
+ R300_COLOR_ROUND_NEAREST);
+ radeon_ring_unlock_commit(rdev);
+}
+
+void r300_errata(struct radeon_device *rdev)
+{
+ rdev->pll_errata = 0;
+
+ if (rdev->family == CHIP_R300 &&
+ (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) {
+ rdev->pll_errata |= CHIP_ERRATA_R300_CG;
+ }
+}
+
+int r300_mc_wait_for_idle(struct radeon_device *rdev)
+{
+ unsigned i;
+ uint32_t tmp;
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ /* read MC_STATUS */
+ tmp = RREG32(0x0150);
+ if (tmp & (1 << 4)) {
+ return 0;
+ }
+ DRM_UDELAY(1);
+ }
+ return -1;
+}
+
+void r300_gpu_init(struct radeon_device *rdev)
+{
+ uint32_t gb_tile_config, tmp;
+
+ r100_hdp_reset(rdev);
+ /* FIXME: rv380 one pipes ? */
+ if ((rdev->family == CHIP_R300) || (rdev->family == CHIP_R350)) {
+ /* r300,r350 */
+ rdev->num_gb_pipes = 2;
+ } else {
+ /* rv350,rv370,rv380 */
+ rdev->num_gb_pipes = 1;
+ }
+ gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
+ switch (rdev->num_gb_pipes) {
+ case 2:
+ gb_tile_config |= R300_PIPE_COUNT_R300;
+ break;
+ case 3:
+ gb_tile_config |= R300_PIPE_COUNT_R420_3P;
+ break;
+ case 4:
+ gb_tile_config |= R300_PIPE_COUNT_R420;
+ break;
+ case 1:
+ default:
+ gb_tile_config |= R300_PIPE_COUNT_RV350;
+ break;
+ }
+ WREG32(R300_GB_TILE_CONFIG, gb_tile_config);
+
+ if (r100_gui_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait GUI idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+
+ tmp = RREG32(0x170C);
+ WREG32(0x170C, tmp | (1 << 31));
+
+ WREG32(R300_RB2D_DSTCACHE_MODE,
+ R300_DC_AUTOFLUSH_ENABLE |
+ R300_DC_DC_DISABLE_IGNORE_PE);
+
+ if (r100_gui_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait GUI idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+ if (r300_mc_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait MC idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+ DRM_INFO("radeon: %d pipes initialized.\n", rdev->num_gb_pipes);
+}
+
+int r300_ga_reset(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+ bool reinit_cp;
+ int i;
+
+ reinit_cp = rdev->cp.ready;
+ rdev->cp.ready = false;
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ WREG32(RADEON_CP_CSQ_MODE, 0);
+ WREG32(RADEON_CP_CSQ_CNTL, 0);
+ WREG32(RADEON_RBBM_SOFT_RESET, 0x32005);
+ (void)RREG32(RADEON_RBBM_SOFT_RESET);
+ udelay(200);
+ WREG32(RADEON_RBBM_SOFT_RESET, 0);
+ /* Wait to prevent race in RBBM_STATUS */
+ mdelay(1);
+ tmp = RREG32(RADEON_RBBM_STATUS);
+ if (tmp & ((1 << 20) | (1 << 26))) {
+ DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp);
+ /* GA still busy soft reset it */
+ WREG32(0x429C, 0x200);
+ WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
+ WREG32(0x43E0, 0);
+ WREG32(0x43E4, 0);
+ WREG32(0x24AC, 0);
+ }
+ /* Wait to prevent race in RBBM_STATUS */
+ mdelay(1);
+ tmp = RREG32(RADEON_RBBM_STATUS);
+ if (!(tmp & ((1 << 20) | (1 << 26)))) {
+ break;
+ }
+ }
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = RREG32(RADEON_RBBM_STATUS);
+ if (!(tmp & ((1 << 20) | (1 << 26)))) {
+ DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
+ tmp);
+ if (reinit_cp) {
+ return r100_cp_init(rdev, rdev->cp.ring_size);
+ }
+ return 0;
+ }
+ DRM_UDELAY(1);
+ }
+ tmp = RREG32(RADEON_RBBM_STATUS);
+ DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
+ return -1;
+}
+
+int r300_gpu_reset(struct radeon_device *rdev)
+{
+ uint32_t status;
+
+ /* reset order likely matter */
+ status = RREG32(RADEON_RBBM_STATUS);
+ /* reset HDP */
+ r100_hdp_reset(rdev);
+ /* reset rb2d */
+ if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
+ r100_rb2d_reset(rdev);
+ }
+ /* reset GA */
+ if (status & ((1 << 20) | (1 << 26))) {
+ r300_ga_reset(rdev);
+ }
+ /* reset CP */
+ status = RREG32(RADEON_RBBM_STATUS);
+ if (status & (1 << 16)) {
+ r100_cp_reset(rdev);
+ }
+ /* Check if GPU is idle */
+ status = RREG32(RADEON_RBBM_STATUS);
+ if (status & (1 << 31)) {
+ DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
+ return -1;
+ }
+ DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
+ return 0;
+}
+
+
+/*
+ * r300,r350,rv350,rv380 VRAM info
+ */
+void r300_vram_info(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+
+ /* DDR for all card after R300 & IGP */
+ rdev->mc.vram_is_ddr = true;
+ tmp = RREG32(RADEON_MEM_CNTL);
+ if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
+ rdev->mc.vram_width = 128;
+ } else {
+ rdev->mc.vram_width = 64;
+ }
+ rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
+
+ rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+ rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+}
+
+
+/*
+ * Indirect registers accessor
+ */
+uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+ uint32_t r;
+
+ WREG8(RADEON_PCIE_INDEX, ((reg) & 0xff));
+ (void)RREG32(RADEON_PCIE_INDEX);
+ r = RREG32(RADEON_PCIE_DATA);
+ return r;
+}
+
+void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+ WREG8(RADEON_PCIE_INDEX, ((reg) & 0xff));
+ (void)RREG32(RADEON_PCIE_INDEX);
+ WREG32(RADEON_PCIE_DATA, (v));
+ (void)RREG32(RADEON_PCIE_DATA);
+}
+
+/*
+ * PCIE Lanes
+ */
+
+void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
+{
+ uint32_t link_width_cntl, mask;
+
+ if (rdev->flags & RADEON_IS_IGP)
+ return;
+
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ return;
+
+ /* FIXME wait for idle */
+
+ switch (lanes) {
+ case 0:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
+ break;
+ case 1:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
+ break;
+ case 2:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
+ break;
+ case 4:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
+ break;
+ case 8:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
+ break;
+ case 12:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
+ break;
+ case 16:
+ default:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
+ break;
+ }
+
+ link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+ if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
+ (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
+ return;
+
+ link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
+ RADEON_PCIE_LC_RECONFIG_NOW |
+ RADEON_PCIE_LC_RECONFIG_LATER |
+ RADEON_PCIE_LC_SHORT_RECONFIG_EN);
+ link_width_cntl |= mask;
+ WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
+ RADEON_PCIE_LC_RECONFIG_NOW));
+
+ /* wait for lane set to complete */
+ link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+ while (link_width_cntl == 0xffffffff)
+ link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+}
+
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t tmp;
+
+ tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
+ seq_printf(m, "PCIE_TX_GART_CNTL 0x%08x\n", tmp);
+ tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE);
+ seq_printf(m, "PCIE_TX_GART_BASE 0x%08x\n", tmp);
+ tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO);
+ seq_printf(m, "PCIE_TX_GART_START_LO 0x%08x\n", tmp);
+ tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI);
+ seq_printf(m, "PCIE_TX_GART_START_HI 0x%08x\n", tmp);
+ tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO);
+ seq_printf(m, "PCIE_TX_GART_END_LO 0x%08x\n", tmp);
+ tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI);
+ seq_printf(m, "PCIE_TX_GART_END_HI 0x%08x\n", tmp);
+ tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR);
+ seq_printf(m, "PCIE_TX_GART_ERROR 0x%08x\n", tmp);
+ return 0;
+}
+
+static struct drm_info_list rv370_pcie_gart_info_list[] = {
+ {"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info, 0, NULL},
+};
+#endif
+
+int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1);
+#else
+ return 0;
+#endif
+}
+
+
+/*
+ * CS functions
+ */
+struct r300_cs_track_cb {
+ struct radeon_object *robj;
+ unsigned pitch;
+ unsigned cpp;
+ unsigned offset;
+};
+
+struct r300_cs_track {
+ unsigned num_cb;
+ unsigned maxy;
+ struct r300_cs_track_cb cb[4];
+ struct r300_cs_track_cb zb;
+ bool z_enabled;
+};
+
+int r300_cs_track_check(struct radeon_device *rdev, struct r300_cs_track *track)
+{
+ unsigned i;
+ unsigned long size;
+
+ for (i = 0; i < track->num_cb; i++) {
+ if (track->cb[i].robj == NULL) {
+ DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
+ return -EINVAL;
+ }
+ size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
+ size += track->cb[i].offset;
+ if (size > radeon_object_size(track->cb[i].robj)) {
+ DRM_ERROR("[drm] Buffer too small for color buffer %d "
+ "(need %lu have %lu) !\n", i, size,
+ radeon_object_size(track->cb[i].robj));
+ DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
+ i, track->cb[i].pitch, track->cb[i].cpp,
+ track->cb[i].offset, track->maxy);
+ return -EINVAL;
+ }
+ }
+ if (track->z_enabled) {
+ if (track->zb.robj == NULL) {
+ DRM_ERROR("[drm] No buffer for z buffer !\n");
+ return -EINVAL;
+ }
+ size = track->zb.pitch * track->zb.cpp * track->maxy;
+ size += track->zb.offset;
+ if (size > radeon_object_size(track->zb.robj)) {
+ DRM_ERROR("[drm] Buffer too small for z buffer "
+ "(need %lu have %lu) !\n", size,
+ radeon_object_size(track->zb.robj));
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static inline void r300_cs_track_clear(struct r300_cs_track *track)
+{
+ unsigned i;
+
+ track->num_cb = 4;
+ track->maxy = 4096;
+ for (i = 0; i < track->num_cb; i++) {
+ track->cb[i].robj = NULL;
+ track->cb[i].pitch = 8192;
+ track->cb[i].cpp = 16;
+ track->cb[i].offset = 0;
+ }
+ track->z_enabled = true;
+ track->zb.robj = NULL;
+ track->zb.pitch = 8192;
+ track->zb.cpp = 4;
+ track->zb.offset = 0;
+}
+
+static unsigned r300_auth_reg[] = {
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF,
+ 0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFCFCC, 0xF00E9FFF, 0x007C0000,
+ 0xF0000078, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFC78, 0xFFFFFFFF, 0xFFFFFFFC, 0xFFFFFFFF,
+ 0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE009FF,
+ 0x00000000, 0x00000000, 0xFFFF0000, 0x00000000,
+ 0x00000000, 0x0000C100, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x0003FC01, 0xFFFFFFF8, 0xFE800B19,
+};
+
+static int r300_packet0_check(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx, unsigned reg)
+{
+ struct radeon_cs_chunk *ib_chunk;
+ struct radeon_cs_reloc *reloc;
+ struct r300_cs_track *track;
+ volatile uint32_t *ib;
+ uint32_t tmp;
+ unsigned i;
+ int r;
+
+ ib = p->ib->ptr;
+ ib_chunk = &p->chunks[p->chunk_ib_idx];
+ track = (struct r300_cs_track *)p->track;
+ switch (reg) {
+ case RADEON_DST_PITCH_OFFSET:
+ case RADEON_SRC_PITCH_OFFSET:
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
+ tmp = ib_chunk->kdata[idx] & 0x003fffff;
+ tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
+ ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp;
+ break;
+ case R300_RB3D_COLOROFFSET0:
+ case R300_RB3D_COLOROFFSET1:
+ case R300_RB3D_COLOROFFSET2:
+ case R300_RB3D_COLOROFFSET3:
+ i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
+ track->cb[i].robj = reloc->robj;
+ track->cb[i].offset = ib_chunk->kdata[idx];
+ ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+ break;
+ case R300_ZB_DEPTHOFFSET:
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
+ track->zb.robj = reloc->robj;
+ track->zb.offset = ib_chunk->kdata[idx];
+ ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+ break;
+ case R300_TX_OFFSET_0:
+ case R300_TX_OFFSET_0+4:
+ case R300_TX_OFFSET_0+8:
+ case R300_TX_OFFSET_0+12:
+ case R300_TX_OFFSET_0+16:
+ case R300_TX_OFFSET_0+20:
+ case R300_TX_OFFSET_0+24:
+ case R300_TX_OFFSET_0+28:
+ case R300_TX_OFFSET_0+32:
+ case R300_TX_OFFSET_0+36:
+ case R300_TX_OFFSET_0+40:
+ case R300_TX_OFFSET_0+44:
+ case R300_TX_OFFSET_0+48:
+ case R300_TX_OFFSET_0+52:
+ case R300_TX_OFFSET_0+56:
+ case R300_TX_OFFSET_0+60:
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
+ ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+ break;
+ /* Tracked registers */
+ case 0x43E4:
+ /* SC_SCISSOR1 */
+
+ track->maxy = ((ib_chunk->kdata[idx] >> 13) & 0x1FFF) + 1;
+ if (p->rdev->family < CHIP_RV515) {
+ track->maxy -= 1440;
+ }
+ break;
+ case 0x4E00:
+ /* RB3D_CCTL */
+ track->num_cb = ((ib_chunk->kdata[idx] >> 5) & 0x3) + 1;
+ break;
+ case 0x4E38:
+ case 0x4E3C:
+ case 0x4E40:
+ case 0x4E44:
+ /* RB3D_COLORPITCH0 */
+ /* RB3D_COLORPITCH1 */
+ /* RB3D_COLORPITCH2 */
+ /* RB3D_COLORPITCH3 */
+ i = (reg - 0x4E38) >> 2;
+ track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE;
+ switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) {
+ case 9:
+ case 11:
+ case 12:
+ track->cb[i].cpp = 1;
+ break;
+ case 3:
+ case 4:
+ case 13:
+ case 15:
+ track->cb[i].cpp = 2;
+ break;
+ case 6:
+ track->cb[i].cpp = 4;
+ break;
+ case 10:
+ track->cb[i].cpp = 8;
+ break;
+ case 7:
+ track->cb[i].cpp = 16;
+ break;
+ default:
+ DRM_ERROR("Invalid color buffer format (%d) !\n",
+ ((ib_chunk->kdata[idx] >> 21) & 0xF));
+ return -EINVAL;
+ }
+ break;
+ case 0x4F00:
+ /* ZB_CNTL */
+ if (ib_chunk->kdata[idx] & 2) {
+ track->z_enabled = true;
+ } else {
+ track->z_enabled = false;
+ }
+ break;
+ case 0x4F10:
+ /* ZB_FORMAT */
+ switch ((ib_chunk->kdata[idx] & 0xF)) {
+ case 0:
+ case 1:
+ track->zb.cpp = 2;
+ break;
+ case 2:
+ track->zb.cpp = 4;
+ break;
+ default:
+ DRM_ERROR("Invalid z buffer format (%d) !\n",
+ (ib_chunk->kdata[idx] & 0xF));
+ return -EINVAL;
+ }
+ break;
+ case 0x4F24:
+ /* ZB_DEPTHPITCH */
+ track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC;
+ break;
+ default:
+ printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", reg, idx);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int r300_packet3_check(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt)
+{
+ struct radeon_cs_chunk *ib_chunk;
+ struct radeon_cs_reloc *reloc;
+ struct r300_cs_track *track;
+ volatile uint32_t *ib;
+ unsigned idx;
+ unsigned i, c;
+ int r;
+
+ ib = p->ib->ptr;
+ ib_chunk = &p->chunks[p->chunk_ib_idx];
+ idx = pkt->idx + 1;
+ track = (struct r300_cs_track *)p->track;
+ switch (pkt->opcode) {
+ case PACKET3_3D_LOAD_VBPNTR:
+ c = ib_chunk->kdata[idx++];
+ for (i = 0; i < (c - 1); i += 2, idx += 3) {
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for packet3 %d\n",
+ pkt->opcode);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
+ ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for packet3 %d\n",
+ pkt->opcode);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
+ ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
+ }
+ if (c & 1) {
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for packet3 %d\n",
+ pkt->opcode);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
+ ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
+ }
+ break;
+ case PACKET3_INDX_BUFFER:
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
+ ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
+ break;
+ /* Draw packet */
+ case PACKET3_3D_DRAW_VBUF:
+ case PACKET3_3D_DRAW_IMMD:
+ case PACKET3_3D_DRAW_INDX:
+ case PACKET3_3D_DRAW_VBUF_2:
+ case PACKET3_3D_DRAW_IMMD_2:
+ case PACKET3_3D_DRAW_INDX_2:
+ r = r300_cs_track_check(p->rdev, track);
+ if (r) {
+ return r;
+ }
+ break;
+ case PACKET3_NOP:
+ break;
+ default:
+ DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int r300_cs_parse(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_packet pkt;
+ struct r300_cs_track track;
+ int r;
+
+ r300_cs_track_clear(&track);
+ p->track = &track;
+ do {
+ r = r100_cs_packet_parse(p, &pkt, p->idx);
+ if (r) {
+ return r;
+ }
+ p->idx += pkt.count + 2;
+ switch (pkt.type) {
+ case PACKET_TYPE0:
+ r = r100_cs_parse_packet0(p, &pkt,
+ r300_auth_reg,
+ ARRAY_SIZE(r300_auth_reg),
+ &r300_packet0_check);
+ break;
+ case PACKET_TYPE2:
+ break;
+ case PACKET_TYPE3:
+ r = r300_packet3_check(p, &pkt);
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+ return -EINVAL;
+ }
+ if (r) {
+ return r;
+ }
+ } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
index bdbc95fa672..70f48609515 100644
--- a/drivers/gpu/drm/radeon/r300_reg.h
+++ b/drivers/gpu/drm/radeon/r300_reg.h
@@ -1,30 +1,34 @@
-/**************************************************************************
-
-Copyright (C) 2004-2005 Nicolai Haehnle et al.
-
-Permission is hereby granted, free of charge, to any person obtaining a
-copy of this software and associated documentation files (the "Software"),
-to deal in the Software without restriction, including without limitation
-on the rights to use, copy, modify, merge, publish, distribute, sub
-license, and/or sell copies of the Software, and to permit persons to whom
-the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice (including the next
-paragraph) shall be included in all copies or substantial portions of the
-Software.
+/*
+ * Copyright 2005 Nicolai Haehnle et al.
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Nicolai Haehnle
+ * Jerome Glisse
+ */
+#ifndef _R300_REG_H_
+#define _R300_REG_H_
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
-THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
-DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-USE OR OTHER DEALINGS IN THE SOFTWARE.
-**************************************************************************/
-#ifndef _R300_REG_H
-#define _R300_REG_H
#define R300_MC_INIT_MISC_LAT_TIMER 0x180
# define R300_MC_MISC__MC_CPR_INIT_LAT_SHIFT 0
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
new file mode 100644
index 00000000000..dea497a979f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -0,0 +1,223 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include <linux/seq_file.h>
+#include "drmP.h"
+#include "radeon_reg.h"
+#include "radeon.h"
+
+/* r420,r423,rv410 depends on : */
+void r100_pci_gart_disable(struct radeon_device *rdev);
+void r100_hdp_reset(struct radeon_device *rdev);
+void r100_mc_setup(struct radeon_device *rdev);
+int r100_gui_wait_for_idle(struct radeon_device *rdev);
+void r100_mc_disable_clients(struct radeon_device *rdev);
+void r300_vram_info(struct radeon_device *rdev);
+int r300_mc_wait_for_idle(struct radeon_device *rdev);
+int rv370_pcie_gart_enable(struct radeon_device *rdev);
+void rv370_pcie_gart_disable(struct radeon_device *rdev);
+
+/* This files gather functions specifics to :
+ * r420,r423,rv410
+ *
+ * Some of these functions might be used by newer ASICs.
+ */
+void r420_gpu_init(struct radeon_device *rdev);
+int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
+
+
+/*
+ * MC
+ */
+int r420_mc_init(struct radeon_device *rdev)
+{
+ int r;
+
+ if (r100_debugfs_rbbm_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for RBBM !\n");
+ }
+ if (r420_debugfs_pipes_info_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for pipes !\n");
+ }
+
+ r420_gpu_init(rdev);
+ r100_pci_gart_disable(rdev);
+ if (rdev->flags & RADEON_IS_PCIE) {
+ rv370_pcie_gart_disable(rdev);
+ }
+
+ /* Setup GPU memory space */
+ rdev->mc.vram_location = 0xFFFFFFFFUL;
+ rdev->mc.gtt_location = 0xFFFFFFFFUL;
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r) {
+ printk(KERN_WARNING "[drm] Disabling AGP\n");
+ rdev->flags &= ~RADEON_IS_AGP;
+ rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+ } else {
+ rdev->mc.gtt_location = rdev->mc.agp_base;
+ }
+ }
+ r = radeon_mc_setup(rdev);
+ if (r) {
+ return r;
+ }
+
+ /* Program GPU memory space */
+ r100_mc_disable_clients(rdev);
+ if (r300_mc_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait MC idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+ r100_mc_setup(rdev);
+ return 0;
+}
+
+void r420_mc_fini(struct radeon_device *rdev)
+{
+ rv370_pcie_gart_disable(rdev);
+ radeon_gart_table_vram_free(rdev);
+ radeon_gart_fini(rdev);
+}
+
+
+/*
+ * Global GPU functions
+ */
+void r420_errata(struct radeon_device *rdev)
+{
+ rdev->pll_errata = 0;
+}
+
+void r420_pipes_init(struct radeon_device *rdev)
+{
+ unsigned tmp;
+ unsigned gb_pipe_select;
+ unsigned num_pipes;
+
+ /* GA_ENHANCE workaround TCL deadlock issue */
+ WREG32(0x4274, (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3));
+ /* get max number of pipes */
+ gb_pipe_select = RREG32(0x402C);
+ num_pipes = ((gb_pipe_select >> 12) & 3) + 1;
+ rdev->num_gb_pipes = num_pipes;
+ tmp = 0;
+ switch (num_pipes) {
+ default:
+ /* force to 1 pipe */
+ num_pipes = 1;
+ case 1:
+ tmp = (0 << 1);
+ break;
+ case 2:
+ tmp = (3 << 1);
+ break;
+ case 3:
+ tmp = (6 << 1);
+ break;
+ case 4:
+ tmp = (7 << 1);
+ break;
+ }
+ WREG32(0x42C8, (1 << num_pipes) - 1);
+ /* Sub pixel 1/12 so we can have 4K rendering according to doc */
+ tmp |= (1 << 4) | (1 << 0);
+ WREG32(0x4018, tmp);
+ if (r100_gui_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait GUI idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+
+ tmp = RREG32(0x170C);
+ WREG32(0x170C, tmp | (1 << 31));
+
+ WREG32(R300_RB2D_DSTCACHE_MODE,
+ RREG32(R300_RB2D_DSTCACHE_MODE) |
+ R300_DC_AUTOFLUSH_ENABLE |
+ R300_DC_DC_DISABLE_IGNORE_PE);
+
+ if (r100_gui_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait GUI idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+ DRM_INFO("radeon: %d pipes initialized.\n", rdev->num_gb_pipes);
+}
+
+void r420_gpu_init(struct radeon_device *rdev)
+{
+ r100_hdp_reset(rdev);
+ r420_pipes_init(rdev);
+ if (r300_mc_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait MC idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+}
+
+
+/*
+ * r420,r423,rv410 VRAM info
+ */
+void r420_vram_info(struct radeon_device *rdev)
+{
+ r300_vram_info(rdev);
+}
+
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int r420_debugfs_pipes_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t tmp;
+
+ tmp = RREG32(R400_GB_PIPE_SELECT);
+ seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp);
+ tmp = RREG32(R300_GB_TILE_CONFIG);
+ seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp);
+ tmp = RREG32(R300_DST_PIPE_CONFIG);
+ seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp);
+ return 0;
+}
+
+static struct drm_info_list r420_pipes_info_list[] = {
+ {"r420_pipes_info", r420_debugfs_pipes_info, 0, NULL},
+};
+#endif
+
+int r420_debugfs_pipes_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, r420_pipes_info_list, 1);
+#else
+ return 0;
+#endif
+}
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
new file mode 100644
index 00000000000..9070a1c2ce2
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -0,0 +1,749 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#ifndef __R500_REG_H__
+#define __R500_REG_H__
+
+/* pipe config regs */
+#define R300_GA_POLY_MODE 0x4288
+# define R300_FRONT_PTYPE_POINT (0 << 4)
+# define R300_FRONT_PTYPE_LINE (1 << 4)
+# define R300_FRONT_PTYPE_TRIANGE (2 << 4)
+# define R300_BACK_PTYPE_POINT (0 << 7)
+# define R300_BACK_PTYPE_LINE (1 << 7)
+# define R300_BACK_PTYPE_TRIANGE (2 << 7)
+#define R300_GA_ROUND_MODE 0x428c
+# define R300_GEOMETRY_ROUND_TRUNC (0 << 0)
+# define R300_GEOMETRY_ROUND_NEAREST (1 << 0)
+# define R300_COLOR_ROUND_TRUNC (0 << 2)
+# define R300_COLOR_ROUND_NEAREST (1 << 2)
+#define R300_GB_MSPOS0 0x4010
+# define R300_MS_X0_SHIFT 0
+# define R300_MS_Y0_SHIFT 4
+# define R300_MS_X1_SHIFT 8
+# define R300_MS_Y1_SHIFT 12
+# define R300_MS_X2_SHIFT 16
+# define R300_MS_Y2_SHIFT 20
+# define R300_MSBD0_Y_SHIFT 24
+# define R300_MSBD0_X_SHIFT 28
+#define R300_GB_MSPOS1 0x4014
+# define R300_MS_X3_SHIFT 0
+# define R300_MS_Y3_SHIFT 4
+# define R300_MS_X4_SHIFT 8
+# define R300_MS_Y4_SHIFT 12
+# define R300_MS_X5_SHIFT 16
+# define R300_MS_Y5_SHIFT 20
+# define R300_MSBD1_SHIFT 24
+
+#define R300_GA_ENHANCE 0x4274
+# define R300_GA_DEADLOCK_CNTL (1 << 0)
+# define R300_GA_FASTSYNC_CNTL (1 << 1)
+#define R300_RB3D_DSTCACHE_CTLSTAT 0x4e4c
+# define R300_RB3D_DC_FLUSH (2 << 0)
+# define R300_RB3D_DC_FREE (2 << 2)
+# define R300_RB3D_DC_FINISH (1 << 4)
+#define R300_RB3D_ZCACHE_CTLSTAT 0x4f18
+# define R300_ZC_FLUSH (1 << 0)
+# define R300_ZC_FREE (1 << 1)
+# define R300_ZC_FLUSH_ALL 0x3
+#define R400_GB_PIPE_SELECT 0x402c
+#define R500_DYN_SCLK_PWMEM_PIPE 0x000d /* PLL */
+#define R500_SU_REG_DEST 0x42c8
+#define R300_GB_TILE_CONFIG 0x4018
+# define R300_ENABLE_TILING (1 << 0)
+# define R300_PIPE_COUNT_RV350 (0 << 1)
+# define R300_PIPE_COUNT_R300 (3 << 1)
+# define R300_PIPE_COUNT_R420_3P (6 << 1)
+# define R300_PIPE_COUNT_R420 (7 << 1)
+# define R300_TILE_SIZE_8 (0 << 4)
+# define R300_TILE_SIZE_16 (1 << 4)
+# define R300_TILE_SIZE_32 (2 << 4)
+# define R300_SUBPIXEL_1_12 (0 << 16)
+# define R300_SUBPIXEL_1_16 (1 << 16)
+#define R300_DST_PIPE_CONFIG 0x170c
+# define R300_PIPE_AUTO_CONFIG (1 << 31)
+#define R300_RB2D_DSTCACHE_MODE 0x3428
+# define R300_DC_AUTOFLUSH_ENABLE (1 << 8)
+# define R300_DC_DC_DISABLE_IGNORE_PE (1 << 17)
+
+#define RADEON_CP_STAT 0x7C0
+#define RADEON_RBBM_CMDFIFO_ADDR 0xE70
+#define RADEON_RBBM_CMDFIFO_DATA 0xE74
+#define RADEON_ISYNC_CNTL 0x1724
+# define RADEON_ISYNC_ANY2D_IDLE3D (1 << 0)
+# define RADEON_ISYNC_ANY3D_IDLE2D (1 << 1)
+# define RADEON_ISYNC_TRIG2D_IDLE3D (1 << 2)
+# define RADEON_ISYNC_TRIG3D_IDLE2D (1 << 3)
+# define RADEON_ISYNC_WAIT_IDLEGUI (1 << 4)
+# define RADEON_ISYNC_CPSCRATCH_IDLEGUI (1 << 5)
+
+#define RS480_NB_MC_INDEX 0x168
+# define RS480_NB_MC_IND_WR_EN (1 << 8)
+#define RS480_NB_MC_DATA 0x16c
+
+/*
+ * RS690
+ */
+#define RS690_MCCFG_FB_LOCATION 0x100
+#define RS690_MC_FB_START_MASK 0x0000FFFF
+#define RS690_MC_FB_START_SHIFT 0
+#define RS690_MC_FB_TOP_MASK 0xFFFF0000
+#define RS690_MC_FB_TOP_SHIFT 16
+#define RS690_MCCFG_AGP_LOCATION 0x101
+#define RS690_MC_AGP_START_MASK 0x0000FFFF
+#define RS690_MC_AGP_START_SHIFT 0
+#define RS690_MC_AGP_TOP_MASK 0xFFFF0000
+#define RS690_MC_AGP_TOP_SHIFT 16
+#define RS690_MCCFG_AGP_BASE 0x102
+#define RS690_MCCFG_AGP_BASE_2 0x103
+#define RS690_MC_INIT_MISC_LAT_TIMER 0x104
+#define RS690_HDP_FB_LOCATION 0x0134
+#define RS690_MC_INDEX 0x78
+# define RS690_MC_INDEX_MASK 0x1ff
+# define RS690_MC_INDEX_WR_EN (1 << 9)
+# define RS690_MC_INDEX_WR_ACK 0x7f
+#define RS690_MC_DATA 0x7c
+#define RS690_MC_STATUS 0x90
+#define RS690_MC_STATUS_IDLE (1 << 0)
+#define RS480_AGP_BASE_2 0x0164
+#define RS480_MC_MISC_CNTL 0x18
+# define RS480_DISABLE_GTW (1 << 1)
+# define RS480_GART_INDEX_REG_EN (1 << 12)
+# define RS690_BLOCK_GFX_D3_EN (1 << 14)
+#define RS480_GART_FEATURE_ID 0x2b
+# define RS480_HANG_EN (1 << 11)
+# define RS480_TLB_ENABLE (1 << 18)
+# define RS480_P2P_ENABLE (1 << 19)
+# define RS480_GTW_LAC_EN (1 << 25)
+# define RS480_2LEVEL_GART (0 << 30)
+# define RS480_1LEVEL_GART (1 << 30)
+# define RS480_PDC_EN (1 << 31)
+#define RS480_GART_BASE 0x2c
+#define RS480_GART_CACHE_CNTRL 0x2e
+# define RS480_GART_CACHE_INVALIDATE (1 << 0) /* wait for it to clear */
+#define RS480_AGP_ADDRESS_SPACE_SIZE 0x38
+# define RS480_GART_EN (1 << 0)
+# define RS480_VA_SIZE_32MB (0 << 1)
+# define RS480_VA_SIZE_64MB (1 << 1)
+# define RS480_VA_SIZE_128MB (2 << 1)
+# define RS480_VA_SIZE_256MB (3 << 1)
+# define RS480_VA_SIZE_512MB (4 << 1)
+# define RS480_VA_SIZE_1GB (5 << 1)
+# define RS480_VA_SIZE_2GB (6 << 1)
+#define RS480_AGP_MODE_CNTL 0x39
+# define RS480_POST_GART_Q_SIZE (1 << 18)
+# define RS480_NONGART_SNOOP (1 << 19)
+# define RS480_AGP_RD_BUF_SIZE (1 << 20)
+# define RS480_REQ_TYPE_SNOOP_SHIFT 22
+# define RS480_REQ_TYPE_SNOOP_MASK 0x3
+# define RS480_REQ_TYPE_SNOOP_DIS (1 << 24)
+
+#define RS690_AIC_CTRL_SCRATCH 0x3A
+# define RS690_DIS_OUT_OF_PCI_GART_ACCESS (1 << 1)
+
+/*
+ * RS600
+ */
+#define RS600_MC_STATUS 0x0
+#define RS600_MC_STATUS_IDLE (1 << 0)
+#define RS600_MC_INDEX 0x70
+# define RS600_MC_ADDR_MASK 0xffff
+# define RS600_MC_IND_SEQ_RBS_0 (1 << 16)
+# define RS600_MC_IND_SEQ_RBS_1 (1 << 17)
+# define RS600_MC_IND_SEQ_RBS_2 (1 << 18)
+# define RS600_MC_IND_SEQ_RBS_3 (1 << 19)
+# define RS600_MC_IND_AIC_RBS (1 << 20)
+# define RS600_MC_IND_CITF_ARB0 (1 << 21)
+# define RS600_MC_IND_CITF_ARB1 (1 << 22)
+# define RS600_MC_IND_WR_EN (1 << 23)
+#define RS600_MC_DATA 0x74
+#define RS600_MC_STATUS 0x0
+# define RS600_MC_IDLE (1 << 1)
+#define RS600_MC_FB_LOCATION 0x4
+#define RS600_MC_FB_START_MASK 0x0000FFFF
+#define RS600_MC_FB_START_SHIFT 0
+#define RS600_MC_FB_TOP_MASK 0xFFFF0000
+#define RS600_MC_FB_TOP_SHIFT 16
+#define RS600_MC_AGP_LOCATION 0x5
+#define RS600_MC_AGP_START_MASK 0x0000FFFF
+#define RS600_MC_AGP_START_SHIFT 0
+#define RS600_MC_AGP_TOP_MASK 0xFFFF0000
+#define RS600_MC_AGP_TOP_SHIFT 16
+#define RS600_MC_AGP_BASE 0x6
+#define RS600_MC_AGP_BASE_2 0x7
+#define RS600_MC_CNTL1 0x9
+# define RS600_ENABLE_PAGE_TABLES (1 << 26)
+#define RS600_MC_PT0_CNTL 0x100
+# define RS600_ENABLE_PT (1 << 0)
+# define RS600_EFFECTIVE_L2_CACHE_SIZE(x) ((x) << 15)
+# define RS600_EFFECTIVE_L2_QUEUE_SIZE(x) ((x) << 21)
+# define RS600_INVALIDATE_ALL_L1_TLBS (1 << 28)
+# define RS600_INVALIDATE_L2_CACHE (1 << 29)
+#define RS600_MC_PT0_CONTEXT0_CNTL 0x102
+# define RS600_ENABLE_PAGE_TABLE (1 << 0)
+# define RS600_PAGE_TABLE_TYPE_FLAT (0 << 1)
+#define RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR 0x112
+#define RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR 0x114
+#define RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR 0x11c
+#define RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR 0x12c
+#define RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR 0x13c
+#define RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR 0x14c
+#define RS600_MC_PT0_CLIENT0_CNTL 0x16c
+# define RS600_ENABLE_TRANSLATION_MODE_OVERRIDE (1 << 0)
+# define RS600_TRANSLATION_MODE_OVERRIDE (1 << 1)
+# define RS600_SYSTEM_ACCESS_MODE_MASK (3 << 8)
+# define RS600_SYSTEM_ACCESS_MODE_PA_ONLY (0 << 8)
+# define RS600_SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 8)
+# define RS600_SYSTEM_ACCESS_MODE_IN_SYS (2 << 8)
+# define RS600_SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 8)
+# define RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASSTHROUGH (0 << 10)
+# define RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE (1 << 10)
+# define RS600_EFFECTIVE_L1_CACHE_SIZE(x) ((x) << 11)
+# define RS600_ENABLE_FRAGMENT_PROCESSING (1 << 14)
+# define RS600_EFFECTIVE_L1_QUEUE_SIZE(x) ((x) << 15)
+# define RS600_INVALIDATE_L1_TLB (1 << 20)
+/* rs600/rs690/rs740 */
+# define RS600_BUS_MASTER_DIS (1 << 14)
+# define RS600_MSI_REARM (1 << 20)
+/* see RS400_MSI_REARM in AIC_CNTL for rs480 */
+
+
+
+#define RV515_MC_FB_LOCATION 0x01
+#define RV515_MC_FB_START_MASK 0x0000FFFF
+#define RV515_MC_FB_START_SHIFT 0
+#define RV515_MC_FB_TOP_MASK 0xFFFF0000
+#define RV515_MC_FB_TOP_SHIFT 16
+#define RV515_MC_AGP_LOCATION 0x02
+#define RV515_MC_AGP_START_MASK 0x0000FFFF
+#define RV515_MC_AGP_START_SHIFT 0
+#define RV515_MC_AGP_TOP_MASK 0xFFFF0000
+#define RV515_MC_AGP_TOP_SHIFT 16
+#define RV515_MC_AGP_BASE 0x03
+#define RV515_MC_AGP_BASE_2 0x04
+
+#define R520_MC_FB_LOCATION 0x04
+#define R520_MC_FB_START_MASK 0x0000FFFF
+#define R520_MC_FB_START_SHIFT 0
+#define R520_MC_FB_TOP_MASK 0xFFFF0000
+#define R520_MC_FB_TOP_SHIFT 16
+#define R520_MC_AGP_LOCATION 0x05
+#define R520_MC_AGP_START_MASK 0x0000FFFF
+#define R520_MC_AGP_START_SHIFT 0
+#define R520_MC_AGP_TOP_MASK 0xFFFF0000
+#define R520_MC_AGP_TOP_SHIFT 16
+#define R520_MC_AGP_BASE 0x06
+#define R520_MC_AGP_BASE_2 0x07
+
+
+#define AVIVO_MC_INDEX 0x0070
+#define R520_MC_STATUS 0x00
+#define R520_MC_STATUS_IDLE (1<<1)
+#define RV515_MC_STATUS 0x08
+#define RV515_MC_STATUS_IDLE (1<<4)
+#define RV515_MC_INIT_MISC_LAT_TIMER 0x09
+#define AVIVO_MC_DATA 0x0074
+
+#define R520_MC_IND_INDEX 0x70
+#define R520_MC_IND_WR_EN (1 << 24)
+#define R520_MC_IND_DATA 0x74
+
+#define RV515_MC_CNTL 0x5
+# define RV515_MEM_NUM_CHANNELS_MASK 0x3
+#define R520_MC_CNTL0 0x8
+# define R520_MEM_NUM_CHANNELS_MASK (0x3 << 24)
+# define R520_MEM_NUM_CHANNELS_SHIFT 24
+# define R520_MC_CHANNEL_SIZE (1 << 23)
+
+#define AVIVO_CP_DYN_CNTL 0x000f /* PLL */
+# define AVIVO_CP_FORCEON (1 << 0)
+#define AVIVO_E2_DYN_CNTL 0x0011 /* PLL */
+# define AVIVO_E2_FORCEON (1 << 0)
+#define AVIVO_IDCT_DYN_CNTL 0x0013 /* PLL */
+# define AVIVO_IDCT_FORCEON (1 << 0)
+
+#define AVIVO_HDP_FB_LOCATION 0x134
+
+#define AVIVO_VGA_RENDER_CONTROL 0x0300
+# define AVIVO_VGA_VSTATUS_CNTL_MASK (3 << 16)
+#define AVIVO_D1VGA_CONTROL 0x0330
+# define AVIVO_DVGA_CONTROL_MODE_ENABLE (1<<0)
+# define AVIVO_DVGA_CONTROL_TIMING_SELECT (1<<8)
+# define AVIVO_DVGA_CONTROL_SYNC_POLARITY_SELECT (1<<9)
+# define AVIVO_DVGA_CONTROL_OVERSCAN_TIMING_SELECT (1<<10)
+# define AVIVO_DVGA_CONTROL_OVERSCAN_COLOR_EN (1<<16)
+# define AVIVO_DVGA_CONTROL_ROTATE (1<<24)
+#define AVIVO_D2VGA_CONTROL 0x0338
+
+#define AVIVO_EXT1_PPLL_REF_DIV_SRC 0x400
+#define AVIVO_EXT1_PPLL_REF_DIV 0x404
+#define AVIVO_EXT1_PPLL_UPDATE_LOCK 0x408
+#define AVIVO_EXT1_PPLL_UPDATE_CNTL 0x40c
+
+#define AVIVO_EXT2_PPLL_REF_DIV_SRC 0x410
+#define AVIVO_EXT2_PPLL_REF_DIV 0x414
+#define AVIVO_EXT2_PPLL_UPDATE_LOCK 0x418
+#define AVIVO_EXT2_PPLL_UPDATE_CNTL 0x41c
+
+#define AVIVO_EXT1_PPLL_FB_DIV 0x430
+#define AVIVO_EXT2_PPLL_FB_DIV 0x434
+
+#define AVIVO_EXT1_PPLL_POST_DIV_SRC 0x438
+#define AVIVO_EXT1_PPLL_POST_DIV 0x43c
+
+#define AVIVO_EXT2_PPLL_POST_DIV_SRC 0x440
+#define AVIVO_EXT2_PPLL_POST_DIV 0x444
+
+#define AVIVO_EXT1_PPLL_CNTL 0x448
+#define AVIVO_EXT2_PPLL_CNTL 0x44c
+
+#define AVIVO_P1PLL_CNTL 0x450
+#define AVIVO_P2PLL_CNTL 0x454
+#define AVIVO_P1PLL_INT_SS_CNTL 0x458
+#define AVIVO_P2PLL_INT_SS_CNTL 0x45c
+#define AVIVO_P1PLL_TMDSA_CNTL 0x460
+#define AVIVO_P2PLL_LVTMA_CNTL 0x464
+
+#define AVIVO_PCLK_CRTC1_CNTL 0x480
+#define AVIVO_PCLK_CRTC2_CNTL 0x484
+
+#define AVIVO_D1CRTC_H_TOTAL 0x6000
+#define AVIVO_D1CRTC_H_BLANK_START_END 0x6004
+#define AVIVO_D1CRTC_H_SYNC_A 0x6008
+#define AVIVO_D1CRTC_H_SYNC_A_CNTL 0x600c
+#define AVIVO_D1CRTC_H_SYNC_B 0x6010
+#define AVIVO_D1CRTC_H_SYNC_B_CNTL 0x6014
+
+#define AVIVO_D1CRTC_V_TOTAL 0x6020
+#define AVIVO_D1CRTC_V_BLANK_START_END 0x6024
+#define AVIVO_D1CRTC_V_SYNC_A 0x6028
+#define AVIVO_D1CRTC_V_SYNC_A_CNTL 0x602c
+#define AVIVO_D1CRTC_V_SYNC_B 0x6030
+#define AVIVO_D1CRTC_V_SYNC_B_CNTL 0x6034
+
+#define AVIVO_D1CRTC_CONTROL 0x6080
+# define AVIVO_CRTC_EN (1 << 0)
+#define AVIVO_D1CRTC_BLANK_CONTROL 0x6084
+#define AVIVO_D1CRTC_INTERLACE_CONTROL 0x6088
+#define AVIVO_D1CRTC_INTERLACE_STATUS 0x608c
+#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
+
+/* master controls */
+#define AVIVO_DC_CRTC_MASTER_EN 0x60f8
+#define AVIVO_DC_CRTC_TV_CONTROL 0x60fc
+
+#define AVIVO_D1GRPH_ENABLE 0x6100
+#define AVIVO_D1GRPH_CONTROL 0x6104
+# define AVIVO_D1GRPH_CONTROL_DEPTH_8BPP (0 << 0)
+# define AVIVO_D1GRPH_CONTROL_DEPTH_16BPP (1 << 0)
+# define AVIVO_D1GRPH_CONTROL_DEPTH_32BPP (2 << 0)
+# define AVIVO_D1GRPH_CONTROL_DEPTH_64BPP (3 << 0)
+
+# define AVIVO_D1GRPH_CONTROL_8BPP_INDEXED (0 << 8)
+
+# define AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555 (0 << 8)
+# define AVIVO_D1GRPH_CONTROL_16BPP_RGB565 (1 << 8)
+# define AVIVO_D1GRPH_CONTROL_16BPP_ARGB4444 (2 << 8)
+# define AVIVO_D1GRPH_CONTROL_16BPP_AI88 (3 << 8)
+# define AVIVO_D1GRPH_CONTROL_16BPP_MONO16 (4 << 8)
+
+# define AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888 (0 << 8)
+# define AVIVO_D1GRPH_CONTROL_32BPP_ARGB2101010 (1 << 8)
+# define AVIVO_D1GRPH_CONTROL_32BPP_DIGITAL (2 << 8)
+# define AVIVO_D1GRPH_CONTROL_32BPP_8B_ARGB2101010 (3 << 8)
+
+
+# define AVIVO_D1GRPH_CONTROL_64BPP_ARGB16161616 (0 << 8)
+
+# define AVIVO_D1GRPH_SWAP_RB (1 << 16)
+# define AVIVO_D1GRPH_TILED (1 << 20)
+# define AVIVO_D1GRPH_MACRO_ADDRESS_MODE (1 << 21)
+
+#define AVIVO_D1GRPH_LUT_SEL 0x6108
+#define AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
+#define AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118
+#define AVIVO_D1GRPH_PITCH 0x6120
+#define AVIVO_D1GRPH_SURFACE_OFFSET_X 0x6124
+#define AVIVO_D1GRPH_SURFACE_OFFSET_Y 0x6128
+#define AVIVO_D1GRPH_X_START 0x612c
+#define AVIVO_D1GRPH_Y_START 0x6130
+#define AVIVO_D1GRPH_X_END 0x6134
+#define AVIVO_D1GRPH_Y_END 0x6138
+#define AVIVO_D1GRPH_UPDATE 0x6144
+# define AVIVO_D1GRPH_UPDATE_LOCK (1 << 16)
+#define AVIVO_D1GRPH_FLIP_CONTROL 0x6148
+
+#define AVIVO_D1CUR_CONTROL 0x6400
+# define AVIVO_D1CURSOR_EN (1 << 0)
+# define AVIVO_D1CURSOR_MODE_SHIFT 8
+# define AVIVO_D1CURSOR_MODE_MASK (3 << 8)
+# define AVIVO_D1CURSOR_MODE_24BPP 2
+#define AVIVO_D1CUR_SURFACE_ADDRESS 0x6408
+#define AVIVO_D1CUR_SIZE 0x6410
+#define AVIVO_D1CUR_POSITION 0x6414
+#define AVIVO_D1CUR_HOT_SPOT 0x6418
+#define AVIVO_D1CUR_UPDATE 0x6424
+# define AVIVO_D1CURSOR_UPDATE_LOCK (1 << 16)
+
+#define AVIVO_DC_LUT_RW_SELECT 0x6480
+#define AVIVO_DC_LUT_RW_MODE 0x6484
+#define AVIVO_DC_LUT_RW_INDEX 0x6488
+#define AVIVO_DC_LUT_SEQ_COLOR 0x648c
+#define AVIVO_DC_LUT_PWL_DATA 0x6490
+#define AVIVO_DC_LUT_30_COLOR 0x6494
+#define AVIVO_DC_LUT_READ_PIPE_SELECT 0x6498
+#define AVIVO_DC_LUT_WRITE_EN_MASK 0x649c
+#define AVIVO_DC_LUT_AUTOFILL 0x64a0
+
+#define AVIVO_DC_LUTA_CONTROL 0x64c0
+#define AVIVO_DC_LUTA_BLACK_OFFSET_BLUE 0x64c4
+#define AVIVO_DC_LUTA_BLACK_OFFSET_GREEN 0x64c8
+#define AVIVO_DC_LUTA_BLACK_OFFSET_RED 0x64cc
+#define AVIVO_DC_LUTA_WHITE_OFFSET_BLUE 0x64d0
+#define AVIVO_DC_LUTA_WHITE_OFFSET_GREEN 0x64d4
+#define AVIVO_DC_LUTA_WHITE_OFFSET_RED 0x64d8
+
+#define AVIVO_DC_LB_MEMORY_SPLIT 0x6520
+# define AVIVO_DC_LB_MEMORY_SPLIT_MASK 0x3
+# define AVIVO_DC_LB_MEMORY_SPLIT_SHIFT 0
+# define AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0
+# define AVIVO_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1
+# define AVIVO_DC_LB_MEMORY_SPLIT_D1_ONLY 2
+# define AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3
+# define AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2)
+# define AVIVO_DC_LB_DISP1_END_ADR_SHIFT 4
+# define AVIVO_DC_LB_DISP1_END_ADR_MASK 0x7ff
+
+#define R500_DxMODE_INT_MASK 0x6540
+#define R500_D1MODE_INT_MASK (1<<0)
+#define R500_D2MODE_INT_MASK (1<<8)
+
+#define AVIVO_D1MODE_DATA_FORMAT 0x6528
+# define AVIVO_D1MODE_INTERLEAVE_EN (1 << 0)
+#define AVIVO_D1MODE_DESKTOP_HEIGHT 0x652C
+#define AVIVO_D1MODE_VIEWPORT_START 0x6580
+#define AVIVO_D1MODE_VIEWPORT_SIZE 0x6584
+#define AVIVO_D1MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6588
+#define AVIVO_D1MODE_EXT_OVERSCAN_TOP_BOTTOM 0x658c
+
+#define AVIVO_D1SCL_SCALER_ENABLE 0x6590
+#define AVIVO_D1SCL_SCALER_TAP_CONTROL 0x6594
+#define AVIVO_D1SCL_UPDATE 0x65cc
+# define AVIVO_D1SCL_UPDATE_LOCK (1 << 16)
+
+/* second crtc */
+#define AVIVO_D2CRTC_H_TOTAL 0x6800
+#define AVIVO_D2CRTC_H_BLANK_START_END 0x6804
+#define AVIVO_D2CRTC_H_SYNC_A 0x6808
+#define AVIVO_D2CRTC_H_SYNC_A_CNTL 0x680c
+#define AVIVO_D2CRTC_H_SYNC_B 0x6810
+#define AVIVO_D2CRTC_H_SYNC_B_CNTL 0x6814
+
+#define AVIVO_D2CRTC_V_TOTAL 0x6820
+#define AVIVO_D2CRTC_V_BLANK_START_END 0x6824
+#define AVIVO_D2CRTC_V_SYNC_A 0x6828
+#define AVIVO_D2CRTC_V_SYNC_A_CNTL 0x682c
+#define AVIVO_D2CRTC_V_SYNC_B 0x6830
+#define AVIVO_D2CRTC_V_SYNC_B_CNTL 0x6834
+
+#define AVIVO_D2CRTC_CONTROL 0x6880
+#define AVIVO_D2CRTC_BLANK_CONTROL 0x6884
+#define AVIVO_D2CRTC_INTERLACE_CONTROL 0x6888
+#define AVIVO_D2CRTC_INTERLACE_STATUS 0x688c
+#define AVIVO_D2CRTC_STEREO_CONTROL 0x68c4
+
+#define AVIVO_D2GRPH_ENABLE 0x6900
+#define AVIVO_D2GRPH_CONTROL 0x6904
+#define AVIVO_D2GRPH_LUT_SEL 0x6908
+#define AVIVO_D2GRPH_PRIMARY_SURFACE_ADDRESS 0x6910
+#define AVIVO_D2GRPH_SECONDARY_SURFACE_ADDRESS 0x6918
+#define AVIVO_D2GRPH_PITCH 0x6920
+#define AVIVO_D2GRPH_SURFACE_OFFSET_X 0x6924
+#define AVIVO_D2GRPH_SURFACE_OFFSET_Y 0x6928
+#define AVIVO_D2GRPH_X_START 0x692c
+#define AVIVO_D2GRPH_Y_START 0x6930
+#define AVIVO_D2GRPH_X_END 0x6934
+#define AVIVO_D2GRPH_Y_END 0x6938
+#define AVIVO_D2GRPH_UPDATE 0x6944
+#define AVIVO_D2GRPH_FLIP_CONTROL 0x6948
+
+#define AVIVO_D2CUR_CONTROL 0x6c00
+#define AVIVO_D2CUR_SURFACE_ADDRESS 0x6c08
+#define AVIVO_D2CUR_SIZE 0x6c10
+#define AVIVO_D2CUR_POSITION 0x6c14
+
+#define AVIVO_D2MODE_VIEWPORT_START 0x6d80
+#define AVIVO_D2MODE_VIEWPORT_SIZE 0x6d84
+#define AVIVO_D2MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6d88
+#define AVIVO_D2MODE_EXT_OVERSCAN_TOP_BOTTOM 0x6d8c
+
+#define AVIVO_D2SCL_SCALER_ENABLE 0x6d90
+#define AVIVO_D2SCL_SCALER_TAP_CONTROL 0x6d94
+
+#define AVIVO_DDIA_BIT_DEPTH_CONTROL 0x7214
+
+#define AVIVO_DACA_ENABLE 0x7800
+# define AVIVO_DAC_ENABLE (1 << 0)
+#define AVIVO_DACA_SOURCE_SELECT 0x7804
+# define AVIVO_DAC_SOURCE_CRTC1 (0 << 0)
+# define AVIVO_DAC_SOURCE_CRTC2 (1 << 0)
+# define AVIVO_DAC_SOURCE_TV (2 << 0)
+
+#define AVIVO_DACA_FORCE_OUTPUT_CNTL 0x783c
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_FORCE_DATA_EN (1 << 0)
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_SHIFT (8)
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_BLUE (1 << 0)
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_GREEN (1 << 1)
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_RED (1 << 2)
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_ON_BLANKB_ONLY (1 << 24)
+#define AVIVO_DACA_POWERDOWN 0x7850
+# define AVIVO_DACA_POWERDOWN_POWERDOWN (1 << 0)
+# define AVIVO_DACA_POWERDOWN_BLUE (1 << 8)
+# define AVIVO_DACA_POWERDOWN_GREEN (1 << 16)
+# define AVIVO_DACA_POWERDOWN_RED (1 << 24)
+
+#define AVIVO_DACB_ENABLE 0x7a00
+#define AVIVO_DACB_SOURCE_SELECT 0x7a04
+#define AVIVO_DACB_FORCE_OUTPUT_CNTL 0x7a3c
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_FORCE_DATA_EN (1 << 0)
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_SHIFT (8)
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_BLUE (1 << 0)
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_GREEN (1 << 1)
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_RED (1 << 2)
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_ON_BLANKB_ONLY (1 << 24)
+#define AVIVO_DACB_POWERDOWN 0x7a50
+# define AVIVO_DACB_POWERDOWN_POWERDOWN (1 << 0)
+# define AVIVO_DACB_POWERDOWN_BLUE (1 << 8)
+# define AVIVO_DACB_POWERDOWN_GREEN (1 << 16)
+# define AVIVO_DACB_POWERDOWN_RED
+
+#define AVIVO_TMDSA_CNTL 0x7880
+# define AVIVO_TMDSA_CNTL_ENABLE (1 << 0)
+# define AVIVO_TMDSA_CNTL_HPD_MASK (1 << 4)
+# define AVIVO_TMDSA_CNTL_HPD_SELECT (1 << 8)
+# define AVIVO_TMDSA_CNTL_SYNC_PHASE (1 << 12)
+# define AVIVO_TMDSA_CNTL_PIXEL_ENCODING (1 << 16)
+# define AVIVO_TMDSA_CNTL_DUAL_LINK_ENABLE (1 << 24)
+# define AVIVO_TMDSA_CNTL_SWAP (1 << 28)
+#define AVIVO_TMDSA_SOURCE_SELECT 0x7884
+/* 78a8 appears to be some kind of (reasonably tolerant) clock?
+ * 78d0 definitely hits the transmitter, definitely clock. */
+/* MYSTERY1 This appears to control dithering? */
+#define AVIVO_TMDSA_BIT_DEPTH_CONTROL 0x7894
+# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN (1 << 0)
+# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH (1 << 4)
+# define AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN (1 << 8)
+# define AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH (1 << 12)
+# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_EN (1 << 16)
+# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_DEPTH (1 << 20)
+# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_LEVEL (1 << 24)
+# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_RESET (1 << 26)
+#define AVIVO_TMDSA_DCBALANCER_CONTROL 0x78d0
+# define AVIVO_TMDSA_DCBALANCER_CONTROL_EN (1 << 0)
+# define AVIVO_TMDSA_DCBALANCER_CONTROL_TEST_EN (1 << 8)
+# define AVIVO_TMDSA_DCBALANCER_CONTROL_TEST_IN_SHIFT (16)
+# define AVIVO_TMDSA_DCBALANCER_CONTROL_FORCE (1 << 24)
+#define AVIVO_TMDSA_DATA_SYNCHRONIZATION 0x78d8
+# define AVIVO_TMDSA_DATA_SYNCHRONIZATION_DSYNSEL (1 << 0)
+# define AVIVO_TMDSA_DATA_SYNCHRONIZATION_PFREQCHG (1 << 8)
+#define AVIVO_TMDSA_CLOCK_ENABLE 0x7900
+#define AVIVO_TMDSA_TRANSMITTER_ENABLE 0x7904
+# define AVIVO_TMDSA_TRANSMITTER_ENABLE_TX0_ENABLE (1 << 0)
+# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKC0EN (1 << 1)
+# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD00EN (1 << 2)
+# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD01EN (1 << 3)
+# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD02EN (1 << 4)
+# define AVIVO_TMDSA_TRANSMITTER_ENABLE_TX1_ENABLE (1 << 8)
+# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD10EN (1 << 10)
+# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD11EN (1 << 11)
+# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD12EN (1 << 12)
+# define AVIVO_TMDSA_TRANSMITTER_ENABLE_TX_ENABLE_HPD_MASK (1 << 16)
+# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKCEN_HPD_MASK (1 << 17)
+# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKDEN_HPD_MASK (1 << 18)
+
+#define AVIVO_TMDSA_TRANSMITTER_CONTROL 0x7910
+# define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_ENABLE (1 << 0)
+# define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_RESET (1 << 1)
+# define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_HPD_MASK_SHIFT (2)
+# define AVIVO_TMDSA_TRANSMITTER_CONTROL_IDSCKSEL (1 << 4)
+# define AVIVO_TMDSA_TRANSMITTER_CONTROL_BGSLEEP (1 << 5)
+# define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_PWRUP_SEQ_EN (1 << 6)
+# define AVIVO_TMDSA_TRANSMITTER_CONTROL_TMCLK (1 << 8)
+# define AVIVO_TMDSA_TRANSMITTER_CONTROL_TMCLK_FROM_PADS (1 << 13)
+# define AVIVO_TMDSA_TRANSMITTER_CONTROL_TDCLK (1 << 14)
+# define AVIVO_TMDSA_TRANSMITTER_CONTROL_TDCLK_FROM_PADS (1 << 15)
+# define AVIVO_TMDSA_TRANSMITTER_CONTROL_CLK_PATTERN_SHIFT (16)
+# define AVIVO_TMDSA_TRANSMITTER_CONTROL_BYPASS_PLL (1 << 28)
+# define AVIVO_TMDSA_TRANSMITTER_CONTROL_USE_CLK_DATA (1 << 29)
+# define AVIVO_TMDSA_TRANSMITTER_CONTROL_INPUT_TEST_CLK_SEL (1 << 31)
+
+#define AVIVO_LVTMA_CNTL 0x7a80
+# define AVIVO_LVTMA_CNTL_ENABLE (1 << 0)
+# define AVIVO_LVTMA_CNTL_HPD_MASK (1 << 4)
+# define AVIVO_LVTMA_CNTL_HPD_SELECT (1 << 8)
+# define AVIVO_LVTMA_CNTL_SYNC_PHASE (1 << 12)
+# define AVIVO_LVTMA_CNTL_PIXEL_ENCODING (1 << 16)
+# define AVIVO_LVTMA_CNTL_DUAL_LINK_ENABLE (1 << 24)
+# define AVIVO_LVTMA_CNTL_SWAP (1 << 28)
+#define AVIVO_LVTMA_SOURCE_SELECT 0x7a84
+#define AVIVO_LVTMA_COLOR_FORMAT 0x7a88
+#define AVIVO_LVTMA_BIT_DEPTH_CONTROL 0x7a94
+# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN (1 << 0)
+# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH (1 << 4)
+# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN (1 << 8)
+# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH (1 << 12)
+# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_EN (1 << 16)
+# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_DEPTH (1 << 20)
+# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_LEVEL (1 << 24)
+# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_RESET (1 << 26)
+
+
+
+#define AVIVO_LVTMA_DCBALANCER_CONTROL 0x7ad0
+# define AVIVO_LVTMA_DCBALANCER_CONTROL_EN (1 << 0)
+# define AVIVO_LVTMA_DCBALANCER_CONTROL_TEST_EN (1 << 8)
+# define AVIVO_LVTMA_DCBALANCER_CONTROL_TEST_IN_SHIFT (16)
+# define AVIVO_LVTMA_DCBALANCER_CONTROL_FORCE (1 << 24)
+
+#define AVIVO_LVTMA_DATA_SYNCHRONIZATION 0x78d8
+# define AVIVO_LVTMA_DATA_SYNCHRONIZATION_DSYNSEL (1 << 0)
+# define AVIVO_LVTMA_DATA_SYNCHRONIZATION_PFREQCHG (1 << 8)
+#define R500_LVTMA_CLOCK_ENABLE 0x7b00
+#define R600_LVTMA_CLOCK_ENABLE 0x7b04
+
+#define R500_LVTMA_TRANSMITTER_ENABLE 0x7b04
+#define R600_LVTMA_TRANSMITTER_ENABLE 0x7b08
+# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKC0EN (1 << 1)
+# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD00EN (1 << 2)
+# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD01EN (1 << 3)
+# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD02EN (1 << 4)
+# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD03EN (1 << 5)
+# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKC1EN (1 << 9)
+# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD10EN (1 << 10)
+# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD11EN (1 << 11)
+# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD12EN (1 << 12)
+# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKCEN_HPD_MASK (1 << 17)
+# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKDEN_HPD_MASK (1 << 18)
+
+#define R500_LVTMA_TRANSMITTER_CONTROL 0x7b10
+#define R600_LVTMA_TRANSMITTER_CONTROL 0x7b14
+# define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_ENABLE (1 << 0)
+# define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_RESET (1 << 1)
+# define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_HPD_MASK_SHIFT (2)
+# define AVIVO_LVTMA_TRANSMITTER_CONTROL_IDSCKSEL (1 << 4)
+# define AVIVO_LVTMA_TRANSMITTER_CONTROL_BGSLEEP (1 << 5)
+# define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_PWRUP_SEQ_EN (1 << 6)
+# define AVIVO_LVTMA_TRANSMITTER_CONTROL_TMCLK (1 << 8)
+# define AVIVO_LVTMA_TRANSMITTER_CONTROL_TMCLK_FROM_PADS (1 << 13)
+# define AVIVO_LVTMA_TRANSMITTER_CONTROL_TDCLK (1 << 14)
+# define AVIVO_LVTMA_TRANSMITTER_CONTROL_TDCLK_FROM_PADS (1 << 15)
+# define AVIVO_LVTMA_TRANSMITTER_CONTROL_CLK_PATTERN_SHIFT (16)
+# define AVIVO_LVTMA_TRANSMITTER_CONTROL_BYPASS_PLL (1 << 28)
+# define AVIVO_LVTMA_TRANSMITTER_CONTROL_USE_CLK_DATA (1 << 29)
+# define AVIVO_LVTMA_TRANSMITTER_CONTROL_INPUT_TEST_CLK_SEL (1 << 31)
+
+#define R500_LVTMA_PWRSEQ_CNTL 0x7af0
+#define R600_LVTMA_PWRSEQ_CNTL 0x7af4
+# define AVIVO_LVTMA_PWRSEQ_EN (1 << 0)
+# define AVIVO_LVTMA_PWRSEQ_PLL_ENABLE_MASK (1 << 2)
+# define AVIVO_LVTMA_PWRSEQ_PLL_RESET_MASK (1 << 3)
+# define AVIVO_LVTMA_PWRSEQ_TARGET_STATE (1 << 4)
+# define AVIVO_LVTMA_SYNCEN (1 << 8)
+# define AVIVO_LVTMA_SYNCEN_OVRD (1 << 9)
+# define AVIVO_LVTMA_SYNCEN_POL (1 << 10)
+# define AVIVO_LVTMA_DIGON (1 << 16)
+# define AVIVO_LVTMA_DIGON_OVRD (1 << 17)
+# define AVIVO_LVTMA_DIGON_POL (1 << 18)
+# define AVIVO_LVTMA_BLON (1 << 24)
+# define AVIVO_LVTMA_BLON_OVRD (1 << 25)
+# define AVIVO_LVTMA_BLON_POL (1 << 26)
+
+#define R500_LVTMA_PWRSEQ_STATE 0x7af4
+#define R600_LVTMA_PWRSEQ_STATE 0x7af8
+# define AVIVO_LVTMA_PWRSEQ_STATE_TARGET_STATE_R (1 << 0)
+# define AVIVO_LVTMA_PWRSEQ_STATE_DIGON (1 << 1)
+# define AVIVO_LVTMA_PWRSEQ_STATE_SYNCEN (1 << 2)
+# define AVIVO_LVTMA_PWRSEQ_STATE_BLON (1 << 3)
+# define AVIVO_LVTMA_PWRSEQ_STATE_DONE (1 << 4)
+# define AVIVO_LVTMA_PWRSEQ_STATE_STATUS_SHIFT (8)
+
+#define AVIVO_LVDS_BACKLIGHT_CNTL 0x7af8
+# define AVIVO_LVDS_BACKLIGHT_CNTL_EN (1 << 0)
+# define AVIVO_LVDS_BACKLIGHT_LEVEL_MASK 0x0000ff00
+# define AVIVO_LVDS_BACKLIGHT_LEVEL_SHIFT 8
+
+#define AVIVO_DVOA_BIT_DEPTH_CONTROL 0x7988
+
+#define AVIVO_GPIO_0 0x7e30
+#define AVIVO_GPIO_1 0x7e40
+#define AVIVO_GPIO_2 0x7e50
+#define AVIVO_GPIO_3 0x7e60
+
+#define AVIVO_DC_GPIO_HPD_Y 0x7e9c
+
+#define AVIVO_I2C_STATUS 0x7d30
+# define AVIVO_I2C_STATUS_DONE (1 << 0)
+# define AVIVO_I2C_STATUS_NACK (1 << 1)
+# define AVIVO_I2C_STATUS_HALT (1 << 2)
+# define AVIVO_I2C_STATUS_GO (1 << 3)
+# define AVIVO_I2C_STATUS_MASK 0x7
+/* If radeon_mm_i2c is to be believed, this is HALT, NACK, and maybe
+ * DONE? */
+# define AVIVO_I2C_STATUS_CMD_RESET 0x7
+# define AVIVO_I2C_STATUS_CMD_WAIT (1 << 3)
+#define AVIVO_I2C_STOP 0x7d34
+#define AVIVO_I2C_START_CNTL 0x7d38
+# define AVIVO_I2C_START (1 << 8)
+# define AVIVO_I2C_CONNECTOR0 (0 << 16)
+# define AVIVO_I2C_CONNECTOR1 (1 << 16)
+#define R520_I2C_START (1<<0)
+#define R520_I2C_STOP (1<<1)
+#define R520_I2C_RX (1<<2)
+#define R520_I2C_EN (1<<8)
+#define R520_I2C_DDC1 (0<<16)
+#define R520_I2C_DDC2 (1<<16)
+#define R520_I2C_DDC3 (2<<16)
+#define R520_I2C_DDC_MASK (3<<16)
+#define AVIVO_I2C_CONTROL2 0x7d3c
+# define AVIVO_I2C_7D3C_SIZE_SHIFT 8
+# define AVIVO_I2C_7D3C_SIZE_MASK (0xf << 8)
+#define AVIVO_I2C_CONTROL3 0x7d40
+/* Reading is done 4 bytes at a time: read the bottom 8 bits from
+ * 7d44, four times in a row.
+ * Writing is a little more complex. First write DATA with
+ * 0xnnnnnnzz, then 0xnnnnnnyy, where nnnnnn is some non-deterministic
+ * magic number, zz is, I think, the slave address, and yy is the byte
+ * you want to write. */
+#define AVIVO_I2C_DATA 0x7d44
+#define R520_I2C_ADDR_COUNT_MASK (0x7)
+#define R520_I2C_DATA_COUNT_SHIFT (8)
+#define R520_I2C_DATA_COUNT_MASK (0xF00)
+#define AVIVO_I2C_CNTL 0x7d50
+# define AVIVO_I2C_EN (1 << 0)
+# define AVIVO_I2C_RESET (1 << 8)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
new file mode 100644
index 00000000000..570a244bd88
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -0,0 +1,234 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include "drmP.h"
+#include "radeon_reg.h"
+#include "radeon.h"
+
+/* r520,rv530,rv560,rv570,r580 depends on : */
+void r100_hdp_reset(struct radeon_device *rdev);
+int rv370_pcie_gart_enable(struct radeon_device *rdev);
+void rv370_pcie_gart_disable(struct radeon_device *rdev);
+void r420_pipes_init(struct radeon_device *rdev);
+void rs600_mc_disable_clients(struct radeon_device *rdev);
+void rs600_disable_vga(struct radeon_device *rdev);
+int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
+int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
+
+/* This files gather functions specifics to:
+ * r520,rv530,rv560,rv570,r580
+ *
+ * Some of these functions might be used by newer ASICs.
+ */
+void r520_gpu_init(struct radeon_device *rdev);
+int r520_mc_wait_for_idle(struct radeon_device *rdev);
+
+
+/*
+ * MC
+ */
+int r520_mc_init(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+ int r;
+
+ if (r100_debugfs_rbbm_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for RBBM !\n");
+ }
+ if (rv515_debugfs_pipes_info_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for pipes !\n");
+ }
+ if (rv515_debugfs_ga_info_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for pipes !\n");
+ }
+
+ r520_gpu_init(rdev);
+ rv370_pcie_gart_disable(rdev);
+
+ /* Setup GPU memory space */
+ rdev->mc.vram_location = 0xFFFFFFFFUL;
+ rdev->mc.gtt_location = 0xFFFFFFFFUL;
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r) {
+ printk(KERN_WARNING "[drm] Disabling AGP\n");
+ rdev->flags &= ~RADEON_IS_AGP;
+ rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+ } else {
+ rdev->mc.gtt_location = rdev->mc.agp_base;
+ }
+ }
+ r = radeon_mc_setup(rdev);
+ if (r) {
+ return r;
+ }
+
+ /* Program GPU memory space */
+ rs600_mc_disable_clients(rdev);
+ if (r520_mc_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait MC idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+ /* Write VRAM size in case we are limiting it */
+ WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
+ tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
+ tmp = REG_SET(R520_MC_FB_TOP, tmp >> 16);
+ tmp |= REG_SET(R520_MC_FB_START, rdev->mc.vram_location >> 16);
+ WREG32_MC(R520_MC_FB_LOCATION, tmp);
+ WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
+ WREG32(0x310, rdev->mc.vram_location);
+ if (rdev->flags & RADEON_IS_AGP) {
+ tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
+ tmp = REG_SET(R520_MC_AGP_TOP, tmp >> 16);
+ tmp |= REG_SET(R520_MC_AGP_START, rdev->mc.gtt_location >> 16);
+ WREG32_MC(R520_MC_AGP_LOCATION, tmp);
+ WREG32_MC(R520_MC_AGP_BASE, rdev->mc.agp_base);
+ WREG32_MC(R520_MC_AGP_BASE_2, 0);
+ } else {
+ WREG32_MC(R520_MC_AGP_LOCATION, 0x0FFFFFFF);
+ WREG32_MC(R520_MC_AGP_BASE, 0);
+ WREG32_MC(R520_MC_AGP_BASE_2, 0);
+ }
+ return 0;
+}
+
+void r520_mc_fini(struct radeon_device *rdev)
+{
+ rv370_pcie_gart_disable(rdev);
+ radeon_gart_table_vram_free(rdev);
+ radeon_gart_fini(rdev);
+}
+
+
+/*
+ * Global GPU functions
+ */
+void r520_errata(struct radeon_device *rdev)
+{
+ rdev->pll_errata = 0;
+}
+
+int r520_mc_wait_for_idle(struct radeon_device *rdev)
+{
+ unsigned i;
+ uint32_t tmp;
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ /* read MC_STATUS */
+ tmp = RREG32_MC(R520_MC_STATUS);
+ if (tmp & R520_MC_STATUS_IDLE) {
+ return 0;
+ }
+ DRM_UDELAY(1);
+ }
+ return -1;
+}
+
+void r520_gpu_init(struct radeon_device *rdev)
+{
+ unsigned pipe_select_current, gb_pipe_select, tmp;
+
+ r100_hdp_reset(rdev);
+ rs600_disable_vga(rdev);
+ /*
+ * DST_PIPE_CONFIG 0x170C
+ * GB_TILE_CONFIG 0x4018
+ * GB_FIFO_SIZE 0x4024
+ * GB_PIPE_SELECT 0x402C
+ * GB_PIPE_SELECT2 0x4124
+ * Z_PIPE_SHIFT 0
+ * Z_PIPE_MASK 0x000000003
+ * GB_FIFO_SIZE2 0x4128
+ * SC_SFIFO_SIZE_SHIFT 0
+ * SC_SFIFO_SIZE_MASK 0x000000003
+ * SC_MFIFO_SIZE_SHIFT 2
+ * SC_MFIFO_SIZE_MASK 0x00000000C
+ * FG_SFIFO_SIZE_SHIFT 4
+ * FG_SFIFO_SIZE_MASK 0x000000030
+ * ZB_MFIFO_SIZE_SHIFT 6
+ * ZB_MFIFO_SIZE_MASK 0x0000000C0
+ * GA_ENHANCE 0x4274
+ * SU_REG_DEST 0x42C8
+ */
+ /* workaround for RV530 */
+ if (rdev->family == CHIP_RV530) {
+ WREG32(0x4124, 1);
+ WREG32(0x4128, 0xFF);
+ }
+ r420_pipes_init(rdev);
+ gb_pipe_select = RREG32(0x402C);
+ tmp = RREG32(0x170C);
+ pipe_select_current = (tmp >> 2) & 3;
+ tmp = (1 << pipe_select_current) |
+ (((gb_pipe_select >> 8) & 0xF) << 4);
+ WREG32_PLL(0x000D, tmp);
+ if (r520_mc_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait MC idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+}
+
+
+/*
+ * VRAM info
+ */
+static void r520_vram_get_type(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+
+ rdev->mc.vram_width = 128;
+ rdev->mc.vram_is_ddr = true;
+ tmp = RREG32_MC(R520_MC_CNTL0);
+ switch ((tmp & R520_MEM_NUM_CHANNELS_MASK) >> R520_MEM_NUM_CHANNELS_SHIFT) {
+ case 0:
+ rdev->mc.vram_width = 32;
+ break;
+ case 1:
+ rdev->mc.vram_width = 64;
+ break;
+ case 2:
+ rdev->mc.vram_width = 128;
+ break;
+ case 3:
+ rdev->mc.vram_width = 256;
+ break;
+ default:
+ rdev->mc.vram_width = 128;
+ break;
+ }
+ if (tmp & R520_MC_CHANNEL_SIZE)
+ rdev->mc.vram_width *= 2;
+}
+
+void r520_vram_info(struct radeon_device *rdev)
+{
+ r520_vram_get_type(rdev);
+ rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
+
+ rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+ rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
new file mode 100644
index 00000000000..c45559fc97f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include "drmP.h"
+#include "radeon_reg.h"
+#include "radeon.h"
+
+/* r600,rv610,rv630,rv620,rv635,rv670 depends on : */
+void rs600_mc_disable_clients(struct radeon_device *rdev);
+
+/* This files gather functions specifics to:
+ * r600,rv610,rv630,rv620,rv635,rv670
+ *
+ * Some of these functions might be used by newer ASICs.
+ */
+int r600_mc_wait_for_idle(struct radeon_device *rdev);
+void r600_gpu_init(struct radeon_device *rdev);
+
+
+/*
+ * MC
+ */
+int r600_mc_init(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+
+ r600_gpu_init(rdev);
+
+ /* setup the gart before changing location so we can ask to
+ * discard unmapped mc request
+ */
+ /* FIXME: disable out of gart access */
+ tmp = rdev->mc.gtt_location / 4096;
+ tmp = REG_SET(R600_LOGICAL_PAGE_NUMBER, tmp);
+ WREG32(R600_MC_VM_SYSTEM_APERTURE_LOW_ADDR, tmp);
+ tmp = (rdev->mc.gtt_location + rdev->mc.gtt_size) / 4096;
+ tmp = REG_SET(R600_LOGICAL_PAGE_NUMBER, tmp);
+ WREG32(R600_MC_VM_SYSTEM_APERTURE_HIGH_ADDR, tmp);
+
+ rs600_mc_disable_clients(rdev);
+ if (r600_mc_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait MC idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+
+ tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
+ tmp = REG_SET(R600_MC_FB_TOP, tmp >> 24);
+ tmp |= REG_SET(R600_MC_FB_BASE, rdev->mc.vram_location >> 24);
+ WREG32(R600_MC_VM_FB_LOCATION, tmp);
+ tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
+ tmp = REG_SET(R600_MC_AGP_TOP, tmp >> 22);
+ WREG32(R600_MC_VM_AGP_TOP, tmp);
+ tmp = REG_SET(R600_MC_AGP_BOT, rdev->mc.gtt_location >> 22);
+ WREG32(R600_MC_VM_AGP_BOT, tmp);
+ return 0;
+}
+
+void r600_mc_fini(struct radeon_device *rdev)
+{
+ /* FIXME: implement */
+}
+
+
+/*
+ * Global GPU functions
+ */
+void r600_errata(struct radeon_device *rdev)
+{
+ rdev->pll_errata = 0;
+}
+
+int r600_mc_wait_for_idle(struct radeon_device *rdev)
+{
+ /* FIXME: implement */
+ return 0;
+}
+
+void r600_gpu_init(struct radeon_device *rdev)
+{
+ /* FIXME: implement */
+}
+
+
+/*
+ * VRAM info
+ */
+void r600_vram_get_type(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+ int chansize;
+
+ rdev->mc.vram_width = 128;
+ rdev->mc.vram_is_ddr = true;
+
+ tmp = RREG32(R600_RAMCFG);
+ if (tmp & R600_CHANSIZE_OVERRIDE) {
+ chansize = 16;
+ } else if (tmp & R600_CHANSIZE) {
+ chansize = 64;
+ } else {
+ chansize = 32;
+ }
+ if (rdev->family == CHIP_R600) {
+ rdev->mc.vram_width = 8 * chansize;
+ } else if (rdev->family == CHIP_RV670) {
+ rdev->mc.vram_width = 4 * chansize;
+ } else if ((rdev->family == CHIP_RV610) ||
+ (rdev->family == CHIP_RV620)) {
+ rdev->mc.vram_width = chansize;
+ } else if ((rdev->family == CHIP_RV630) ||
+ (rdev->family == CHIP_RV635)) {
+ rdev->mc.vram_width = 2 * chansize;
+ }
+}
+
+void r600_vram_info(struct radeon_device *rdev)
+{
+ r600_vram_get_type(rdev);
+ rdev->mc.vram_size = RREG32(R600_CONFIG_MEMSIZE);
+
+ /* Could aper size report 0 ? */
+ rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+ rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+}
+
+/*
+ * Indirect registers accessor
+ */
+uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+ uint32_t r;
+
+ WREG32(R600_PCIE_PORT_INDEX, ((reg) & 0xff));
+ (void)RREG32(R600_PCIE_PORT_INDEX);
+ r = RREG32(R600_PCIE_PORT_DATA);
+ return r;
+}
+
+void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+ WREG32(R600_PCIE_PORT_INDEX, ((reg) & 0xff));
+ (void)RREG32(R600_PCIE_PORT_INDEX);
+ WREG32(R600_PCIE_PORT_DATA, (v));
+ (void)RREG32(R600_PCIE_PORT_DATA);
+}
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index bc9d09dfa8e..146f3570af8 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -478,26 +478,27 @@ static void r700_cp_load_microcode(drm_radeon_private_t *dev_priv)
if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV770)) {
RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
- DRM_INFO("Loading RV770 PFP Microcode\n");
+ DRM_INFO("Loading RV770/RV790 PFP Microcode\n");
for (i = 0; i < R700_PFP_UCODE_SIZE; i++)
RADEON_WRITE(R600_CP_PFP_UCODE_DATA, RV770_pfp_microcode[i]);
RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
- DRM_INFO("Loading RV770 CP Microcode\n");
+ DRM_INFO("Loading RV770/RV790 CP Microcode\n");
for (i = 0; i < R700_PM4_UCODE_SIZE; i++)
RADEON_WRITE(R600_CP_ME_RAM_DATA, RV770_cp_microcode[i]);
RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
- } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV730)) {
+ } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV730) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)) {
RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
- DRM_INFO("Loading RV730 PFP Microcode\n");
+ DRM_INFO("Loading RV730/RV740 PFP Microcode\n");
for (i = 0; i < R700_PFP_UCODE_SIZE; i++)
RADEON_WRITE(R600_CP_PFP_UCODE_DATA, RV730_pfp_microcode[i]);
RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
- DRM_INFO("Loading RV730 CP Microcode\n");
+ DRM_INFO("Loading RV730/RV740 CP Microcode\n");
for (i = 0; i < R700_PM4_UCODE_SIZE; i++)
RADEON_WRITE(R600_CP_ME_RAM_DATA, RV730_cp_microcode[i]);
RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
@@ -1324,6 +1325,10 @@ static void r700_gfx_init(struct drm_device *dev,
dev_priv->r700_sc_prim_fifo_size = 0xf9;
dev_priv->r700_sc_hiz_tile_fifo_size = 0x30;
dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130;
+ if (dev_priv->r600_sx_max_export_pos_size > 16) {
+ dev_priv->r600_sx_max_export_pos_size -= 16;
+ dev_priv->r600_sx_max_export_smx_size += 16;
+ }
break;
case CHIP_RV710:
dev_priv->r600_max_pipes = 2;
@@ -1345,6 +1350,31 @@ static void r700_gfx_init(struct drm_device *dev,
dev_priv->r700_sc_hiz_tile_fifo_size = 0x30;
dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130;
break;
+ case CHIP_RV740:
+ dev_priv->r600_max_pipes = 4;
+ dev_priv->r600_max_tile_pipes = 4;
+ dev_priv->r600_max_simds = 8;
+ dev_priv->r600_max_backends = 4;
+ dev_priv->r600_max_gprs = 256;
+ dev_priv->r600_max_threads = 248;
+ dev_priv->r600_max_stack_entries = 512;
+ dev_priv->r600_max_hw_contexts = 8;
+ dev_priv->r600_max_gs_threads = 16 * 2;
+ dev_priv->r600_sx_max_export_size = 256;
+ dev_priv->r600_sx_max_export_pos_size = 32;
+ dev_priv->r600_sx_max_export_smx_size = 224;
+ dev_priv->r600_sq_num_cf_insts = 2;
+
+ dev_priv->r700_sx_num_of_sets = 7;
+ dev_priv->r700_sc_prim_fifo_size = 0x100;
+ dev_priv->r700_sc_hiz_tile_fifo_size = 0x30;
+ dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130;
+
+ if (dev_priv->r600_sx_max_export_pos_size > 16) {
+ dev_priv->r600_sx_max_export_pos_size -= 16;
+ dev_priv->r600_sx_max_export_smx_size += 16;
+ }
+ break;
default:
break;
}
@@ -1493,6 +1523,7 @@ static void r700_gfx_init(struct drm_device *dev,
break;
case CHIP_RV730:
case CHIP_RV710:
+ case CHIP_RV740:
default:
sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x4);
break;
@@ -1569,6 +1600,7 @@ static void r700_gfx_init(struct drm_device *dev,
switch (dev_priv->flags & RADEON_FAMILY_MASK) {
case CHIP_RV770:
case CHIP_RV730:
+ case CHIP_RV740:
gs_prim_buffer_depth = 384;
break;
case CHIP_RV710:
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
new file mode 100644
index 00000000000..e2d1f5f33f7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#ifndef __R600_REG_H__
+#define __R600_REG_H__
+
+#define R600_PCIE_PORT_INDEX 0x0038
+#define R600_PCIE_PORT_DATA 0x003c
+
+#define R600_MC_VM_FB_LOCATION 0x2180
+#define R600_MC_FB_BASE_MASK 0x0000FFFF
+#define R600_MC_FB_BASE_SHIFT 0
+#define R600_MC_FB_TOP_MASK 0xFFFF0000
+#define R600_MC_FB_TOP_SHIFT 16
+#define R600_MC_VM_AGP_TOP 0x2184
+#define R600_MC_AGP_TOP_MASK 0x0003FFFF
+#define R600_MC_AGP_TOP_SHIFT 0
+#define R600_MC_VM_AGP_BOT 0x2188
+#define R600_MC_AGP_BOT_MASK 0x0003FFFF
+#define R600_MC_AGP_BOT_SHIFT 0
+#define R600_MC_VM_AGP_BASE 0x218c
+#define R600_MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2190
+#define R600_LOGICAL_PAGE_NUMBER_MASK 0x000FFFFF
+#define R600_LOGICAL_PAGE_NUMBER_SHIFT 0
+#define R600_MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2194
+#define R600_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x2198
+
+#define R700_MC_VM_FB_LOCATION 0x2024
+#define R700_MC_FB_BASE_MASK 0x0000FFFF
+#define R700_MC_FB_BASE_SHIFT 0
+#define R700_MC_FB_TOP_MASK 0xFFFF0000
+#define R700_MC_FB_TOP_SHIFT 16
+#define R700_MC_VM_AGP_TOP 0x2028
+#define R700_MC_AGP_TOP_MASK 0x0003FFFF
+#define R700_MC_AGP_TOP_SHIFT 0
+#define R700_MC_VM_AGP_BOT 0x202c
+#define R700_MC_AGP_BOT_MASK 0x0003FFFF
+#define R700_MC_AGP_BOT_SHIFT 0
+#define R700_MC_VM_AGP_BASE 0x2030
+#define R700_MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
+#define R700_LOGICAL_PAGE_NUMBER_MASK 0x000FFFFF
+#define R700_LOGICAL_PAGE_NUMBER_SHIFT 0
+#define R700_MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
+#define R700_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203c
+
+#define R600_RAMCFG 0x2408
+# define R600_CHANSIZE (1 << 7)
+# define R600_CHANSIZE_OVERRIDE (1 << 10)
+
+
+#define R600_GENERAL_PWRMGT 0x618
+# define R600_OPEN_DRAIN_PADS (1 << 11)
+
+#define R600_LOWER_GPIO_ENABLE 0x710
+#define R600_CTXSW_VID_LOWER_GPIO_CNTL 0x718
+#define R600_HIGH_VID_LOWER_GPIO_CNTL 0x71c
+#define R600_MEDIUM_VID_LOWER_GPIO_CNTL 0x720
+#define R600_LOW_VID_LOWER_GPIO_CNTL 0x724
+
+
+
+#define R600_HDP_NONSURFACE_BASE 0x2c04
+
+#define R600_BUS_CNTL 0x5420
+#define R600_CONFIG_CNTL 0x5424
+#define R600_CONFIG_MEMSIZE 0x5428
+#define R600_CONFIG_F0_BASE 0x542C
+#define R600_CONFIG_APER_SIZE 0x5430
+
+#define R600_ROM_CNTL 0x1600
+# define R600_SCK_OVERWRITE (1 << 1)
+# define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28
+# define R600_SCK_PRESCALE_CRYSTAL_CLK_MASK (0xf << 28)
+
+#define R600_CG_SPLL_FUNC_CNTL 0x600
+# define R600_SPLL_BYPASS_EN (1 << 3)
+#define R600_CG_SPLL_STATUS 0x60c
+# define R600_SPLL_CHG_STATUS (1 << 1)
+
+#define R600_BIOS_0_SCRATCH 0x1724
+#define R600_BIOS_1_SCRATCH 0x1728
+#define R600_BIOS_2_SCRATCH 0x172c
+#define R600_BIOS_3_SCRATCH 0x1730
+#define R600_BIOS_4_SCRATCH 0x1734
+#define R600_BIOS_5_SCRATCH 0x1738
+#define R600_BIOS_6_SCRATCH 0x173c
+#define R600_BIOS_7_SCRATCH 0x1740
+
+
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
new file mode 100644
index 00000000000..c3f24cc5600
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -0,0 +1,793 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#ifndef __RADEON_H__
+#define __RADEON_H__
+
+#include "radeon_object.h"
+
+/* TODO: Here are things that needs to be done :
+ * - surface allocator & initializer : (bit like scratch reg) should
+ * initialize HDP_ stuff on RS600, R600, R700 hw, well anythings
+ * related to surface
+ * - WB : write back stuff (do it bit like scratch reg things)
+ * - Vblank : look at Jesse's rework and what we should do
+ * - r600/r700: gart & cp
+ * - cs : clean cs ioctl use bitmap & things like that.
+ * - power management stuff
+ * - Barrier in gart code
+ * - Unmappabled vram ?
+ * - TESTING, TESTING, TESTING
+ */
+
+#include <asm/atomic.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/kref.h>
+
+#include "radeon_mode.h"
+#include "radeon_reg.h"
+
+
+/*
+ * Modules parameters.
+ */
+extern int radeon_no_wb;
+extern int radeon_modeset;
+extern int radeon_dynclks;
+extern int radeon_r4xx_atom;
+extern int radeon_agpmode;
+extern int radeon_vram_limit;
+extern int radeon_gart_size;
+extern int radeon_benchmarking;
+extern int radeon_connector_table;
+
+/*
+ * Copy from radeon_drv.h so we don't have to include both and have conflicting
+ * symbol;
+ */
+#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
+#define RADEON_IB_POOL_SIZE 16
+#define RADEON_DEBUGFS_MAX_NUM_FILES 32
+#define RADEONFB_CONN_LIMIT 4
+
+enum radeon_family {
+ CHIP_R100,
+ CHIP_RV100,
+ CHIP_RS100,
+ CHIP_RV200,
+ CHIP_RS200,
+ CHIP_R200,
+ CHIP_RV250,
+ CHIP_RS300,
+ CHIP_RV280,
+ CHIP_R300,
+ CHIP_R350,
+ CHIP_RV350,
+ CHIP_RV380,
+ CHIP_R420,
+ CHIP_R423,
+ CHIP_RV410,
+ CHIP_RS400,
+ CHIP_RS480,
+ CHIP_RS600,
+ CHIP_RS690,
+ CHIP_RS740,
+ CHIP_RV515,
+ CHIP_R520,
+ CHIP_RV530,
+ CHIP_RV560,
+ CHIP_RV570,
+ CHIP_R580,
+ CHIP_R600,
+ CHIP_RV610,
+ CHIP_RV630,
+ CHIP_RV620,
+ CHIP_RV635,
+ CHIP_RV670,
+ CHIP_RS780,
+ CHIP_RV770,
+ CHIP_RV730,
+ CHIP_RV710,
+ CHIP_LAST,
+};
+
+enum radeon_chip_flags {
+ RADEON_FAMILY_MASK = 0x0000ffffUL,
+ RADEON_FLAGS_MASK = 0xffff0000UL,
+ RADEON_IS_MOBILITY = 0x00010000UL,
+ RADEON_IS_IGP = 0x00020000UL,
+ RADEON_SINGLE_CRTC = 0x00040000UL,
+ RADEON_IS_AGP = 0x00080000UL,
+ RADEON_HAS_HIERZ = 0x00100000UL,
+ RADEON_IS_PCIE = 0x00200000UL,
+ RADEON_NEW_MEMMAP = 0x00400000UL,
+ RADEON_IS_PCI = 0x00800000UL,
+ RADEON_IS_IGPGART = 0x01000000UL,
+};
+
+
+/*
+ * Errata workarounds.
+ */
+enum radeon_pll_errata {
+ CHIP_ERRATA_R300_CG = 0x00000001,
+ CHIP_ERRATA_PLL_DUMMYREADS = 0x00000002,
+ CHIP_ERRATA_PLL_DELAY = 0x00000004
+};
+
+
+struct radeon_device;
+
+
+/*
+ * BIOS.
+ */
+bool radeon_get_bios(struct radeon_device *rdev);
+
+/*
+ * Clocks
+ */
+
+struct radeon_clock {
+ struct radeon_pll p1pll;
+ struct radeon_pll p2pll;
+ struct radeon_pll spll;
+ struct radeon_pll mpll;
+ /* 10 Khz units */
+ uint32_t default_mclk;
+ uint32_t default_sclk;
+};
+
+/*
+ * Fences.
+ */
+struct radeon_fence_driver {
+ uint32_t scratch_reg;
+ atomic_t seq;
+ uint32_t last_seq;
+ unsigned long count_timeout;
+ wait_queue_head_t queue;
+ rwlock_t lock;
+ struct list_head created;
+ struct list_head emited;
+ struct list_head signaled;
+};
+
+struct radeon_fence {
+ struct radeon_device *rdev;
+ struct kref kref;
+ struct list_head list;
+ /* protected by radeon_fence.lock */
+ uint32_t seq;
+ unsigned long timeout;
+ bool emited;
+ bool signaled;
+};
+
+int radeon_fence_driver_init(struct radeon_device *rdev);
+void radeon_fence_driver_fini(struct radeon_device *rdev);
+int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence);
+int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence);
+void radeon_fence_process(struct radeon_device *rdev);
+bool radeon_fence_signaled(struct radeon_fence *fence);
+int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
+int radeon_fence_wait_next(struct radeon_device *rdev);
+int radeon_fence_wait_last(struct radeon_device *rdev);
+struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
+void radeon_fence_unref(struct radeon_fence **fence);
+
+
+/*
+ * Radeon buffer.
+ */
+struct radeon_object;
+
+struct radeon_object_list {
+ struct list_head list;
+ struct radeon_object *robj;
+ uint64_t gpu_offset;
+ unsigned rdomain;
+ unsigned wdomain;
+};
+
+int radeon_object_init(struct radeon_device *rdev);
+void radeon_object_fini(struct radeon_device *rdev);
+int radeon_object_create(struct radeon_device *rdev,
+ struct drm_gem_object *gobj,
+ unsigned long size,
+ bool kernel,
+ uint32_t domain,
+ bool interruptible,
+ struct radeon_object **robj_ptr);
+int radeon_object_kmap(struct radeon_object *robj, void **ptr);
+void radeon_object_kunmap(struct radeon_object *robj);
+void radeon_object_unref(struct radeon_object **robj);
+int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
+ uint64_t *gpu_addr);
+void radeon_object_unpin(struct radeon_object *robj);
+int radeon_object_wait(struct radeon_object *robj);
+int radeon_object_evict_vram(struct radeon_device *rdev);
+int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset);
+void radeon_object_force_delete(struct radeon_device *rdev);
+void radeon_object_list_add_object(struct radeon_object_list *lobj,
+ struct list_head *head);
+int radeon_object_list_validate(struct list_head *head, void *fence);
+void radeon_object_list_unvalidate(struct list_head *head);
+void radeon_object_list_clean(struct list_head *head);
+int radeon_object_fbdev_mmap(struct radeon_object *robj,
+ struct vm_area_struct *vma);
+unsigned long radeon_object_size(struct radeon_object *robj);
+
+
+/*
+ * GEM objects.
+ */
+struct radeon_gem {
+ struct list_head objects;
+};
+
+int radeon_gem_init(struct radeon_device *rdev);
+void radeon_gem_fini(struct radeon_device *rdev);
+int radeon_gem_object_create(struct radeon_device *rdev, int size,
+ int alignment, int initial_domain,
+ bool discardable, bool kernel,
+ bool interruptible,
+ struct drm_gem_object **obj);
+int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
+ uint64_t *gpu_addr);
+void radeon_gem_object_unpin(struct drm_gem_object *obj);
+
+
+/*
+ * GART structures, functions & helpers
+ */
+struct radeon_mc;
+
+struct radeon_gart_table_ram {
+ volatile uint32_t *ptr;
+};
+
+struct radeon_gart_table_vram {
+ struct radeon_object *robj;
+ volatile uint32_t *ptr;
+};
+
+union radeon_gart_table {
+ struct radeon_gart_table_ram ram;
+ struct radeon_gart_table_vram vram;
+};
+
+struct radeon_gart {
+ dma_addr_t table_addr;
+ unsigned num_gpu_pages;
+ unsigned num_cpu_pages;
+ unsigned table_size;
+ union radeon_gart_table table;
+ struct page **pages;
+ dma_addr_t *pages_addr;
+ bool ready;
+};
+
+int radeon_gart_table_ram_alloc(struct radeon_device *rdev);
+void radeon_gart_table_ram_free(struct radeon_device *rdev);
+int radeon_gart_table_vram_alloc(struct radeon_device *rdev);
+void radeon_gart_table_vram_free(struct radeon_device *rdev);
+int radeon_gart_init(struct radeon_device *rdev);
+void radeon_gart_fini(struct radeon_device *rdev);
+void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
+ int pages);
+int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
+ int pages, struct page **pagelist);
+
+
+/*
+ * GPU MC structures, functions & helpers
+ */
+struct radeon_mc {
+ resource_size_t aper_size;
+ resource_size_t aper_base;
+ resource_size_t agp_base;
+ unsigned gtt_location;
+ unsigned gtt_size;
+ unsigned vram_location;
+ unsigned vram_size;
+ unsigned vram_width;
+ int vram_mtrr;
+ bool vram_is_ddr;
+};
+
+int radeon_mc_setup(struct radeon_device *rdev);
+
+
+/*
+ * GPU scratch registers structures, functions & helpers
+ */
+struct radeon_scratch {
+ unsigned num_reg;
+ bool free[32];
+ uint32_t reg[32];
+};
+
+int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg);
+void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
+
+
+/*
+ * IRQS.
+ */
+struct radeon_irq {
+ bool installed;
+ bool sw_int;
+ /* FIXME: use a define max crtc rather than hardcode it */
+ bool crtc_vblank_int[2];
+};
+
+int radeon_irq_kms_init(struct radeon_device *rdev);
+void radeon_irq_kms_fini(struct radeon_device *rdev);
+
+
+/*
+ * CP & ring.
+ */
+struct radeon_ib {
+ struct list_head list;
+ unsigned long idx;
+ uint64_t gpu_addr;
+ struct radeon_fence *fence;
+ volatile uint32_t *ptr;
+ uint32_t length_dw;
+};
+
+struct radeon_ib_pool {
+ struct mutex mutex;
+ struct radeon_object *robj;
+ struct list_head scheduled_ibs;
+ struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
+ bool ready;
+ DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE);
+};
+
+struct radeon_cp {
+ struct radeon_object *ring_obj;
+ volatile uint32_t *ring;
+ unsigned rptr;
+ unsigned wptr;
+ unsigned wptr_old;
+ unsigned ring_size;
+ unsigned ring_free_dw;
+ int count_dw;
+ uint64_t gpu_addr;
+ uint32_t align_mask;
+ uint32_t ptr_mask;
+ struct mutex mutex;
+ bool ready;
+};
+
+int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib);
+void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib);
+int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
+int radeon_ib_pool_init(struct radeon_device *rdev);
+void radeon_ib_pool_fini(struct radeon_device *rdev);
+int radeon_ib_test(struct radeon_device *rdev);
+/* Ring access between begin & end cannot sleep */
+void radeon_ring_free_size(struct radeon_device *rdev);
+int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw);
+void radeon_ring_unlock_commit(struct radeon_device *rdev);
+void radeon_ring_unlock_undo(struct radeon_device *rdev);
+int radeon_ring_test(struct radeon_device *rdev);
+int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size);
+void radeon_ring_fini(struct radeon_device *rdev);
+
+
+/*
+ * CS.
+ */
+struct radeon_cs_reloc {
+ struct drm_gem_object *gobj;
+ struct radeon_object *robj;
+ struct radeon_object_list lobj;
+ uint32_t handle;
+ uint32_t flags;
+};
+
+struct radeon_cs_chunk {
+ uint32_t chunk_id;
+ uint32_t length_dw;
+ uint32_t *kdata;
+};
+
+struct radeon_cs_parser {
+ struct radeon_device *rdev;
+ struct drm_file *filp;
+ /* chunks */
+ unsigned nchunks;
+ struct radeon_cs_chunk *chunks;
+ uint64_t *chunks_array;
+ /* IB */
+ unsigned idx;
+ /* relocations */
+ unsigned nrelocs;
+ struct radeon_cs_reloc *relocs;
+ struct radeon_cs_reloc **relocs_ptr;
+ struct list_head validated;
+ /* indices of various chunks */
+ int chunk_ib_idx;
+ int chunk_relocs_idx;
+ struct radeon_ib *ib;
+ void *track;
+};
+
+struct radeon_cs_packet {
+ unsigned idx;
+ unsigned type;
+ unsigned reg;
+ unsigned opcode;
+ int count;
+ unsigned one_reg_wr;
+};
+
+typedef int (*radeon_packet0_check_t)(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx, unsigned reg);
+typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt);
+
+
+/*
+ * AGP
+ */
+int radeon_agp_init(struct radeon_device *rdev);
+void radeon_agp_fini(struct radeon_device *rdev);
+
+
+/*
+ * Writeback
+ */
+struct radeon_wb {
+ struct radeon_object *wb_obj;
+ volatile uint32_t *wb;
+ uint64_t gpu_addr;
+};
+
+
+/*
+ * Benchmarking
+ */
+void radeon_benchmark(struct radeon_device *rdev);
+
+
+/*
+ * Debugfs
+ */
+int radeon_debugfs_add_files(struct radeon_device *rdev,
+ struct drm_info_list *files,
+ unsigned nfiles);
+int radeon_debugfs_fence_init(struct radeon_device *rdev);
+int r100_debugfs_rbbm_init(struct radeon_device *rdev);
+int r100_debugfs_cp_init(struct radeon_device *rdev);
+
+
+/*
+ * ASIC specific functions.
+ */
+struct radeon_asic {
+ void (*errata)(struct radeon_device *rdev);
+ void (*vram_info)(struct radeon_device *rdev);
+ int (*gpu_reset)(struct radeon_device *rdev);
+ int (*mc_init)(struct radeon_device *rdev);
+ void (*mc_fini)(struct radeon_device *rdev);
+ int (*wb_init)(struct radeon_device *rdev);
+ void (*wb_fini)(struct radeon_device *rdev);
+ int (*gart_enable)(struct radeon_device *rdev);
+ void (*gart_disable)(struct radeon_device *rdev);
+ void (*gart_tlb_flush)(struct radeon_device *rdev);
+ int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
+ int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
+ void (*cp_fini)(struct radeon_device *rdev);
+ void (*cp_disable)(struct radeon_device *rdev);
+ void (*ring_start)(struct radeon_device *rdev);
+ int (*irq_set)(struct radeon_device *rdev);
+ int (*irq_process)(struct radeon_device *rdev);
+ void (*fence_ring_emit)(struct radeon_device *rdev, struct radeon_fence *fence);
+ int (*cs_parse)(struct radeon_cs_parser *p);
+ int (*copy_blit)(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_pages,
+ struct radeon_fence *fence);
+ int (*copy_dma)(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_pages,
+ struct radeon_fence *fence);
+ int (*copy)(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_pages,
+ struct radeon_fence *fence);
+ void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
+ void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
+ void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
+ void (*set_clock_gating)(struct radeon_device *rdev, int enable);
+};
+
+
+/*
+ * IOCTL.
+ */
+int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+
+
+/*
+ * Core structure, functions and helpers.
+ */
+typedef uint32_t (*radeon_rreg_t)(struct radeon_device*, uint32_t);
+typedef void (*radeon_wreg_t)(struct radeon_device*, uint32_t, uint32_t);
+
+struct radeon_device {
+ struct drm_device *ddev;
+ struct pci_dev *pdev;
+ /* ASIC */
+ enum radeon_family family;
+ unsigned long flags;
+ int usec_timeout;
+ enum radeon_pll_errata pll_errata;
+ int num_gb_pipes;
+ int disp_priority;
+ /* BIOS */
+ uint8_t *bios;
+ bool is_atom_bios;
+ uint16_t bios_header_start;
+ struct radeon_object *stollen_vga_memory;
+ struct fb_info *fbdev_info;
+ struct radeon_object *fbdev_robj;
+ struct radeon_framebuffer *fbdev_rfb;
+ /* Register mmio */
+ unsigned long rmmio_base;
+ unsigned long rmmio_size;
+ void *rmmio;
+ radeon_rreg_t mm_rreg;
+ radeon_wreg_t mm_wreg;
+ radeon_rreg_t mc_rreg;
+ radeon_wreg_t mc_wreg;
+ radeon_rreg_t pll_rreg;
+ radeon_wreg_t pll_wreg;
+ radeon_rreg_t pcie_rreg;
+ radeon_wreg_t pcie_wreg;
+ radeon_rreg_t pciep_rreg;
+ radeon_wreg_t pciep_wreg;
+ struct radeon_clock clock;
+ struct radeon_mc mc;
+ struct radeon_gart gart;
+ struct radeon_mode_info mode_info;
+ struct radeon_scratch scratch;
+ struct radeon_mman mman;
+ struct radeon_fence_driver fence_drv;
+ struct radeon_cp cp;
+ struct radeon_ib_pool ib_pool;
+ struct radeon_irq irq;
+ struct radeon_asic *asic;
+ struct radeon_gem gem;
+ struct mutex cs_mutex;
+ struct radeon_wb wb;
+ bool gpu_lockup;
+ bool shutdown;
+ bool suspend;
+};
+
+int radeon_device_init(struct radeon_device *rdev,
+ struct drm_device *ddev,
+ struct pci_dev *pdev,
+ uint32_t flags);
+void radeon_device_fini(struct radeon_device *rdev);
+int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
+
+
+/*
+ * Registers read & write functions.
+ */
+#define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg))
+#define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg))
+#define RREG32(reg) rdev->mm_rreg(rdev, (reg))
+#define WREG32(reg, v) rdev->mm_wreg(rdev, (reg), (v))
+#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
+#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
+#define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg))
+#define WREG32_PLL(reg, v) rdev->pll_wreg(rdev, (reg), (v))
+#define RREG32_MC(reg) rdev->mc_rreg(rdev, (reg))
+#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
+#define RREG32_PCIE(reg) rdev->pcie_rreg(rdev, (reg))
+#define WREG32_PCIE(reg, v) rdev->pcie_wreg(rdev, (reg), (v))
+#define WREG32_P(reg, val, mask) \
+ do { \
+ uint32_t tmp_ = RREG32(reg); \
+ tmp_ &= (mask); \
+ tmp_ |= ((val) & ~(mask)); \
+ WREG32(reg, tmp_); \
+ } while (0)
+#define WREG32_PLL_P(reg, val, mask) \
+ do { \
+ uint32_t tmp_ = RREG32_PLL(reg); \
+ tmp_ &= (mask); \
+ tmp_ |= ((val) & ~(mask)); \
+ WREG32_PLL(reg, tmp_); \
+ } while (0)
+
+void r100_pll_errata_after_index(struct radeon_device *rdev);
+
+
+/*
+ * ASICs helpers.
+ */
+#define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \
+ (rdev->family == CHIP_RV200) || \
+ (rdev->family == CHIP_RS100) || \
+ (rdev->family == CHIP_RS200) || \
+ (rdev->family == CHIP_RV250) || \
+ (rdev->family == CHIP_RV280) || \
+ (rdev->family == CHIP_RS300))
+#define ASIC_IS_R300(rdev) ((rdev->family == CHIP_R300) || \
+ (rdev->family == CHIP_RV350) || \
+ (rdev->family == CHIP_R350) || \
+ (rdev->family == CHIP_RV380) || \
+ (rdev->family == CHIP_R420) || \
+ (rdev->family == CHIP_R423) || \
+ (rdev->family == CHIP_RV410) || \
+ (rdev->family == CHIP_RS400) || \
+ (rdev->family == CHIP_RS480))
+#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
+#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
+#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
+
+
+/*
+ * BIOS helpers.
+ */
+#define RBIOS8(i) (rdev->bios[i])
+#define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
+#define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
+
+int radeon_combios_init(struct radeon_device *rdev);
+void radeon_combios_fini(struct radeon_device *rdev);
+int radeon_atombios_init(struct radeon_device *rdev);
+void radeon_atombios_fini(struct radeon_device *rdev);
+
+
+/*
+ * RING helpers.
+ */
+#define CP_PACKET0 0x00000000
+#define PACKET0_BASE_INDEX_SHIFT 0
+#define PACKET0_BASE_INDEX_MASK (0x1ffff << 0)
+#define PACKET0_COUNT_SHIFT 16
+#define PACKET0_COUNT_MASK (0x3fff << 16)
+#define CP_PACKET1 0x40000000
+#define CP_PACKET2 0x80000000
+#define PACKET2_PAD_SHIFT 0
+#define PACKET2_PAD_MASK (0x3fffffff << 0)
+#define CP_PACKET3 0xC0000000
+#define PACKET3_IT_OPCODE_SHIFT 8
+#define PACKET3_IT_OPCODE_MASK (0xff << 8)
+#define PACKET3_COUNT_SHIFT 16
+#define PACKET3_COUNT_MASK (0x3fff << 16)
+/* PACKET3 op code */
+#define PACKET3_NOP 0x10
+#define PACKET3_3D_DRAW_VBUF 0x28
+#define PACKET3_3D_DRAW_IMMD 0x29
+#define PACKET3_3D_DRAW_INDX 0x2A
+#define PACKET3_3D_LOAD_VBPNTR 0x2F
+#define PACKET3_INDX_BUFFER 0x33
+#define PACKET3_3D_DRAW_VBUF_2 0x34
+#define PACKET3_3D_DRAW_IMMD_2 0x35
+#define PACKET3_3D_DRAW_INDX_2 0x36
+#define PACKET3_BITBLT_MULTI 0x9B
+
+#define PACKET0(reg, n) (CP_PACKET0 | \
+ REG_SET(PACKET0_BASE_INDEX, (reg) >> 2) | \
+ REG_SET(PACKET0_COUNT, (n)))
+#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+#define PACKET3(op, n) (CP_PACKET3 | \
+ REG_SET(PACKET3_IT_OPCODE, (op)) | \
+ REG_SET(PACKET3_COUNT, (n)))
+
+#define PACKET_TYPE0 0
+#define PACKET_TYPE1 1
+#define PACKET_TYPE2 2
+#define PACKET_TYPE3 3
+
+#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
+#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
+#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
+#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
+#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
+
+static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
+{
+#if DRM_DEBUG_CODE
+ if (rdev->cp.count_dw <= 0) {
+ DRM_ERROR("radeon: writting more dword to ring than expected !\n");
+ }
+#endif
+ rdev->cp.ring[rdev->cp.wptr++] = v;
+ rdev->cp.wptr &= rdev->cp.ptr_mask;
+ rdev->cp.count_dw--;
+ rdev->cp.ring_free_dw--;
+}
+
+
+/*
+ * ASICs macro.
+ */
+#define radeon_cs_parse(p) rdev->asic->cs_parse((p))
+#define radeon_errata(rdev) (rdev)->asic->errata((rdev))
+#define radeon_vram_info(rdev) (rdev)->asic->vram_info((rdev))
+#define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev))
+#define radeon_mc_init(rdev) (rdev)->asic->mc_init((rdev))
+#define radeon_mc_fini(rdev) (rdev)->asic->mc_fini((rdev))
+#define radeon_wb_init(rdev) (rdev)->asic->wb_init((rdev))
+#define radeon_wb_fini(rdev) (rdev)->asic->wb_fini((rdev))
+#define radeon_gart_enable(rdev) (rdev)->asic->gart_enable((rdev))
+#define radeon_gart_disable(rdev) (rdev)->asic->gart_disable((rdev))
+#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
+#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
+#define radeon_cp_init(rdev,rsize) (rdev)->asic->cp_init((rdev), (rsize))
+#define radeon_cp_fini(rdev) (rdev)->asic->cp_fini((rdev))
+#define radeon_cp_disable(rdev) (rdev)->asic->cp_disable((rdev))
+#define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev))
+#define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev))
+#define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev))
+#define radeon_fence_ring_emit(rdev, fence) (rdev)->asic->fence_ring_emit((rdev), (fence))
+#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f))
+#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f))
+#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy((rdev), (s), (d), (np), (f))
+#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
+#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
+#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
+#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
+
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
new file mode 100644
index 00000000000..23ea9955ac5
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dave Airlie
+ * Jerome Glisse <glisse@freedesktop.org>
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "radeon.h"
+#include "radeon_drm.h"
+
+#if __OS_HAS_AGP
+
+struct radeon_agpmode_quirk {
+ u32 hostbridge_vendor;
+ u32 hostbridge_device;
+ u32 chip_vendor;
+ u32 chip_device;
+ u32 subsys_vendor;
+ u32 subsys_device;
+ u32 default_mode;
+};
+
+static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = {
+ /* Intel E7505 Memory Controller Hub / RV350 AR [Radeon 9600XT] Needs AGPMode 4 (deb #515326) */
+ { PCI_VENDOR_ID_INTEL, 0x2550, PCI_VENDOR_ID_ATI, 0x4152, 0x1458, 0x4038, 4},
+ /* Intel 82865G/PE/P DRAM Controller/Host-Hub / Mobility 9800 Needs AGPMode 4 (deb #462590) */
+ { PCI_VENDOR_ID_INTEL, 0x2570, PCI_VENDOR_ID_ATI, 0x4a4e, PCI_VENDOR_ID_DELL, 0x5106, 4},
+ /* Intel 82865G/PE/P DRAM Controller/Host-Hub / RV280 [Radeon 9200 SE] Needs AGPMode 4 (lp #300304) */
+ { PCI_VENDOR_ID_INTEL, 0x2570, PCI_VENDOR_ID_ATI, 0x5964,
+ 0x148c, 0x2073, 4},
+ /* Intel 82855PM Processor to I/O Controller / Mobility M6 LY Needs AGPMode 1 (deb #467235) */
+ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c59,
+ PCI_VENDOR_ID_IBM, 0x052f, 1},
+ /* Intel 82855PM host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #195051) */
+ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e50,
+ PCI_VENDOR_ID_IBM, 0x0550, 1},
+ /* Intel 82855PM host bridge / Mobility M7 needs AGPMode 1 */
+ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c57,
+ PCI_VENDOR_ID_IBM, 0x0530, 1},
+ /* Intel 82855PM host bridge / FireGL Mobility T2 RV350 Needs AGPMode 2 (fdo #20647) */
+ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e54,
+ PCI_VENDOR_ID_IBM, 0x054f, 2},
+ /* Intel 82855PM host bridge / Mobility M9+ / VaioPCG-V505DX Needs AGPMode 2 (fdo #17928) */
+ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x5c61,
+ PCI_VENDOR_ID_SONY, 0x816b, 2},
+ /* Intel 82855PM Processor to I/O Controller / Mobility M9+ Needs AGPMode 8 (phoronix forum) */
+ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x5c61,
+ PCI_VENDOR_ID_SONY, 0x8195, 8},
+ /* Intel 82830 830 Chipset Host Bridge / Mobility M6 LY Needs AGPMode 2 (fdo #17360)*/
+ { PCI_VENDOR_ID_INTEL, 0x3575, PCI_VENDOR_ID_ATI, 0x4c59,
+ PCI_VENDOR_ID_DELL, 0x00e3, 2},
+ /* Intel 82852/82855 host bridge / Mobility FireGL 9000 R250 Needs AGPMode 1 (lp #296617) */
+ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4c66,
+ PCI_VENDOR_ID_DELL, 0x0149, 1},
+ /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (deb #467460) */
+ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
+ 0x1025, 0x0061, 1},
+ /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #203007) */
+ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
+ 0x1025, 0x0064, 1},
+ /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #141551) */
+ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
+ PCI_VENDOR_ID_ASUSTEK, 0x1942, 1},
+ /* Intel 82852/82855 host bridge / Mobility 9600/9700 Needs AGPMode 1 (deb #510208) */
+ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
+ 0x10cf, 0x127f, 1},
+ /* ASRock K7VT4A+ AGP 8x / ATI Radeon 9250 AGP Needs AGPMode 4 (lp #133192) */
+ { 0x1849, 0x3189, PCI_VENDOR_ID_ATI, 0x5960,
+ 0x1787, 0x5960, 4},
+ /* VIA K8M800 Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 4 (fdo #12544) */
+ { PCI_VENDOR_ID_VIA, 0x0204, PCI_VENDOR_ID_ATI, 0x5960,
+ 0x17af, 0x2020, 4},
+ /* VIA KT880 Host Bridge / RV350 [Radeon 9550] Needs AGPMode 4 (fdo #19981) */
+ { PCI_VENDOR_ID_VIA, 0x0269, PCI_VENDOR_ID_ATI, 0x4153,
+ PCI_VENDOR_ID_ASUSTEK, 0x003c, 4},
+ /* VIA VT8363 Host Bridge / R200 QL [Radeon 8500] Needs AGPMode 2 (lp #141551) */
+ { PCI_VENDOR_ID_VIA, 0x0305, PCI_VENDOR_ID_ATI, 0x514c,
+ PCI_VENDOR_ID_ATI, 0x013a, 2},
+ /* VIA VT82C693A Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 2 (deb #515512) */
+ { PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_ATI, 0x5960,
+ PCI_VENDOR_ID_ASUSTEK, 0x004c, 2},
+ /* VIA VT82C693A Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 2 */
+ { PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_ATI, 0x5960,
+ PCI_VENDOR_ID_ASUSTEK, 0x0054, 2},
+ /* VIA VT8377 Host Bridge / R200 QM [Radeon 9100] Needs AGPMode 4 (deb #461144) */
+ { PCI_VENDOR_ID_VIA, 0x3189, PCI_VENDOR_ID_ATI, 0x514d,
+ 0x174b, 0x7149, 4},
+ /* VIA VT8377 Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 4 (lp #312693) */
+ { PCI_VENDOR_ID_VIA, 0x3189, PCI_VENDOR_ID_ATI, 0x5960,
+ 0x1462, 0x0380, 4},
+ /* VIA VT8377 Host Bridge / RV280 Needs AGPMode 4 (ati ML) */
+ { PCI_VENDOR_ID_VIA, 0x3189, PCI_VENDOR_ID_ATI, 0x5964,
+ 0x148c, 0x2073, 4},
+ /* ATI Host Bridge / RV280 [M9+] Needs AGPMode 1 (phoronix forum) */
+ { PCI_VENDOR_ID_ATI, 0xcbb2, PCI_VENDOR_ID_ATI, 0x5c61,
+ PCI_VENDOR_ID_SONY, 0x8175, 1},
+ /* HP Host Bridge / R300 [FireGL X1] Needs AGPMode 2 (fdo #7770) */
+ { PCI_VENDOR_ID_HP, 0x122e, PCI_VENDOR_ID_ATI, 0x4e47,
+ PCI_VENDOR_ID_ATI, 0x0152, 2},
+ { 0, 0, 0, 0, 0, 0, 0 },
+};
+#endif
+
+int radeon_agp_init(struct radeon_device *rdev)
+{
+#if __OS_HAS_AGP
+ struct radeon_agpmode_quirk *p = radeon_agpmode_quirk_list;
+ struct drm_agp_mode mode;
+ struct drm_agp_info info;
+ uint32_t agp_status;
+ int default_mode;
+ bool is_v3;
+ int ret;
+
+ /* Acquire AGP. */
+ if (!rdev->ddev->agp->acquired) {
+ ret = drm_agp_acquire(rdev->ddev);
+ if (ret) {
+ DRM_ERROR("Unable to acquire AGP: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = drm_agp_info(rdev->ddev, &info);
+ if (ret) {
+ DRM_ERROR("Unable to get AGP info: %d\n", ret);
+ return ret;
+ }
+ mode.mode = info.mode;
+ agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
+ is_v3 = !!(agp_status & RADEON_AGPv3_MODE);
+
+ if (is_v3) {
+ default_mode = (agp_status & RADEON_AGPv3_8X_MODE) ? 8 : 4;
+ } else {
+ if (agp_status & RADEON_AGP_4X_MODE) {
+ default_mode = 4;
+ } else if (agp_status & RADEON_AGP_2X_MODE) {
+ default_mode = 2;
+ } else {
+ default_mode = 1;
+ }
+ }
+
+ /* Apply AGPMode Quirks */
+ while (p && p->chip_device != 0) {
+ if (info.id_vendor == p->hostbridge_vendor &&
+ info.id_device == p->hostbridge_device &&
+ rdev->pdev->vendor == p->chip_vendor &&
+ rdev->pdev->device == p->chip_device &&
+ rdev->pdev->subsystem_vendor == p->subsys_vendor &&
+ rdev->pdev->subsystem_device == p->subsys_device) {
+ default_mode = p->default_mode;
+ }
+ ++p;
+ }
+
+ if (radeon_agpmode > 0) {
+ if ((radeon_agpmode < (is_v3 ? 4 : 1)) ||
+ (radeon_agpmode > (is_v3 ? 8 : 4)) ||
+ (radeon_agpmode & (radeon_agpmode - 1))) {
+ DRM_ERROR("Illegal AGP Mode: %d (valid %s), leaving at %d\n",
+ radeon_agpmode, is_v3 ? "4, 8" : "1, 2, 4",
+ default_mode);
+ radeon_agpmode = default_mode;
+ } else {
+ DRM_INFO("AGP mode requested: %d\n", radeon_agpmode);
+ }
+ } else {
+ radeon_agpmode = default_mode;
+ }
+
+ mode.mode &= ~RADEON_AGP_MODE_MASK;
+ if (is_v3) {
+ switch (radeon_agpmode) {
+ case 8:
+ mode.mode |= RADEON_AGPv3_8X_MODE;
+ break;
+ case 4:
+ default:
+ mode.mode |= RADEON_AGPv3_4X_MODE;
+ break;
+ }
+ } else {
+ switch (radeon_agpmode) {
+ case 4:
+ mode.mode |= RADEON_AGP_4X_MODE;
+ break;
+ case 2:
+ mode.mode |= RADEON_AGP_2X_MODE;
+ break;
+ case 1:
+ default:
+ mode.mode |= RADEON_AGP_1X_MODE;
+ break;
+ }
+ }
+
+ mode.mode &= ~RADEON_AGP_FW_MODE; /* disable fw */
+ ret = drm_agp_enable(rdev->ddev, mode);
+ if (ret) {
+ DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
+ return ret;
+ }
+
+ rdev->mc.agp_base = rdev->ddev->agp->agp_info.aper_base;
+ rdev->mc.gtt_size = rdev->ddev->agp->agp_info.aper_size << 20;
+
+ /* workaround some hw issues */
+ if (rdev->family < CHIP_R200) {
+ WREG32(RADEON_AGP_CNTL, RREG32(RADEON_AGP_CNTL) | 0x000e0000);
+ }
+ return 0;
+#else
+ return 0;
+#endif
+}
+
+void radeon_agp_fini(struct radeon_device *rdev)
+{
+#if __OS_HAS_AGP
+ if (rdev->flags & RADEON_IS_AGP) {
+ if (rdev->ddev->agp && rdev->ddev->agp->acquired) {
+ drm_agp_release(rdev->ddev);
+ }
+ }
+#endif
+}
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
new file mode 100644
index 00000000000..e57d8a784e9
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -0,0 +1,405 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#ifndef __RADEON_ASIC_H__
+#define __RADEON_ASIC_H__
+
+/*
+ * common functions
+ */
+void radeon_legacy_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
+void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
+
+void radeon_atom_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
+void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock);
+void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
+
+/*
+ * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
+ */
+uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
+void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+void r100_errata(struct radeon_device *rdev);
+void r100_vram_info(struct radeon_device *rdev);
+int r100_gpu_reset(struct radeon_device *rdev);
+int r100_mc_init(struct radeon_device *rdev);
+void r100_mc_fini(struct radeon_device *rdev);
+int r100_wb_init(struct radeon_device *rdev);
+void r100_wb_fini(struct radeon_device *rdev);
+int r100_gart_enable(struct radeon_device *rdev);
+void r100_pci_gart_disable(struct radeon_device *rdev);
+void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
+int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
+int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
+void r100_cp_fini(struct radeon_device *rdev);
+void r100_cp_disable(struct radeon_device *rdev);
+void r100_ring_start(struct radeon_device *rdev);
+int r100_irq_set(struct radeon_device *rdev);
+int r100_irq_process(struct radeon_device *rdev);
+void r100_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence);
+int r100_cs_parse(struct radeon_cs_parser *p);
+void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
+int r100_copy_blit(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_pages,
+ struct radeon_fence *fence);
+
+static struct radeon_asic r100_asic = {
+ .errata = &r100_errata,
+ .vram_info = &r100_vram_info,
+ .gpu_reset = &r100_gpu_reset,
+ .mc_init = &r100_mc_init,
+ .mc_fini = &r100_mc_fini,
+ .wb_init = &r100_wb_init,
+ .wb_fini = &r100_wb_fini,
+ .gart_enable = &r100_gart_enable,
+ .gart_disable = &r100_pci_gart_disable,
+ .gart_tlb_flush = &r100_pci_gart_tlb_flush,
+ .gart_set_page = &r100_pci_gart_set_page,
+ .cp_init = &r100_cp_init,
+ .cp_fini = &r100_cp_fini,
+ .cp_disable = &r100_cp_disable,
+ .ring_start = &r100_ring_start,
+ .irq_set = &r100_irq_set,
+ .irq_process = &r100_irq_process,
+ .fence_ring_emit = &r100_fence_ring_emit,
+ .cs_parse = &r100_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = NULL,
+ .copy = &r100_copy_blit,
+ .set_engine_clock = &radeon_legacy_set_engine_clock,
+ .set_memory_clock = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = &radeon_legacy_set_clock_gating,
+};
+
+
+/*
+ * r300,r350,rv350,rv380
+ */
+void r300_errata(struct radeon_device *rdev);
+void r300_vram_info(struct radeon_device *rdev);
+int r300_gpu_reset(struct radeon_device *rdev);
+int r300_mc_init(struct radeon_device *rdev);
+void r300_mc_fini(struct radeon_device *rdev);
+void r300_ring_start(struct radeon_device *rdev);
+void r300_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence);
+int r300_cs_parse(struct radeon_cs_parser *p);
+int r300_gart_enable(struct radeon_device *rdev);
+void rv370_pcie_gart_disable(struct radeon_device *rdev);
+void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
+int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
+uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
+void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
+int r300_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_pages,
+ struct radeon_fence *fence);
+static struct radeon_asic r300_asic = {
+ .errata = &r300_errata,
+ .vram_info = &r300_vram_info,
+ .gpu_reset = &r300_gpu_reset,
+ .mc_init = &r300_mc_init,
+ .mc_fini = &r300_mc_fini,
+ .wb_init = &r100_wb_init,
+ .wb_fini = &r100_wb_fini,
+ .gart_enable = &r300_gart_enable,
+ .gart_disable = &r100_pci_gart_disable,
+ .gart_tlb_flush = &r100_pci_gart_tlb_flush,
+ .gart_set_page = &r100_pci_gart_set_page,
+ .cp_init = &r100_cp_init,
+ .cp_fini = &r100_cp_fini,
+ .cp_disable = &r100_cp_disable,
+ .ring_start = &r300_ring_start,
+ .irq_set = &r100_irq_set,
+ .irq_process = &r100_irq_process,
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r300_copy_dma,
+ .copy = &r100_copy_blit,
+ .set_engine_clock = &radeon_legacy_set_engine_clock,
+ .set_memory_clock = NULL,
+ .set_pcie_lanes = &rv370_set_pcie_lanes,
+ .set_clock_gating = &radeon_legacy_set_clock_gating,
+};
+
+/*
+ * r420,r423,rv410
+ */
+void r420_errata(struct radeon_device *rdev);
+void r420_vram_info(struct radeon_device *rdev);
+int r420_mc_init(struct radeon_device *rdev);
+void r420_mc_fini(struct radeon_device *rdev);
+static struct radeon_asic r420_asic = {
+ .errata = &r420_errata,
+ .vram_info = &r420_vram_info,
+ .gpu_reset = &r300_gpu_reset,
+ .mc_init = &r420_mc_init,
+ .mc_fini = &r420_mc_fini,
+ .wb_init = &r100_wb_init,
+ .wb_fini = &r100_wb_fini,
+ .gart_enable = &r300_gart_enable,
+ .gart_disable = &rv370_pcie_gart_disable,
+ .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .gart_set_page = &rv370_pcie_gart_set_page,
+ .cp_init = &r100_cp_init,
+ .cp_fini = &r100_cp_fini,
+ .cp_disable = &r100_cp_disable,
+ .ring_start = &r300_ring_start,
+ .irq_set = &r100_irq_set,
+ .irq_process = &r100_irq_process,
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r300_copy_dma,
+ .copy = &r100_copy_blit,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .set_pcie_lanes = &rv370_set_pcie_lanes,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+};
+
+
+/*
+ * rs400,rs480
+ */
+void rs400_errata(struct radeon_device *rdev);
+void rs400_vram_info(struct radeon_device *rdev);
+int rs400_mc_init(struct radeon_device *rdev);
+void rs400_mc_fini(struct radeon_device *rdev);
+int rs400_gart_enable(struct radeon_device *rdev);
+void rs400_gart_disable(struct radeon_device *rdev);
+void rs400_gart_tlb_flush(struct radeon_device *rdev);
+int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
+uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+static struct radeon_asic rs400_asic = {
+ .errata = &rs400_errata,
+ .vram_info = &rs400_vram_info,
+ .gpu_reset = &r300_gpu_reset,
+ .mc_init = &rs400_mc_init,
+ .mc_fini = &rs400_mc_fini,
+ .wb_init = &r100_wb_init,
+ .wb_fini = &r100_wb_fini,
+ .gart_enable = &rs400_gart_enable,
+ .gart_disable = &rs400_gart_disable,
+ .gart_tlb_flush = &rs400_gart_tlb_flush,
+ .gart_set_page = &rs400_gart_set_page,
+ .cp_init = &r100_cp_init,
+ .cp_fini = &r100_cp_fini,
+ .cp_disable = &r100_cp_disable,
+ .ring_start = &r300_ring_start,
+ .irq_set = &r100_irq_set,
+ .irq_process = &r100_irq_process,
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r300_copy_dma,
+ .copy = &r100_copy_blit,
+ .set_engine_clock = &radeon_legacy_set_engine_clock,
+ .set_memory_clock = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = &radeon_legacy_set_clock_gating,
+};
+
+
+/*
+ * rs600.
+ */
+void rs600_errata(struct radeon_device *rdev);
+void rs600_vram_info(struct radeon_device *rdev);
+int rs600_mc_init(struct radeon_device *rdev);
+void rs600_mc_fini(struct radeon_device *rdev);
+int rs600_irq_set(struct radeon_device *rdev);
+int rs600_gart_enable(struct radeon_device *rdev);
+void rs600_gart_disable(struct radeon_device *rdev);
+void rs600_gart_tlb_flush(struct radeon_device *rdev);
+int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
+uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+static struct radeon_asic rs600_asic = {
+ .errata = &rs600_errata,
+ .vram_info = &rs600_vram_info,
+ .gpu_reset = &r300_gpu_reset,
+ .mc_init = &rs600_mc_init,
+ .mc_fini = &rs600_mc_fini,
+ .wb_init = &r100_wb_init,
+ .wb_fini = &r100_wb_fini,
+ .gart_enable = &rs600_gart_enable,
+ .gart_disable = &rs600_gart_disable,
+ .gart_tlb_flush = &rs600_gart_tlb_flush,
+ .gart_set_page = &rs600_gart_set_page,
+ .cp_init = &r100_cp_init,
+ .cp_fini = &r100_cp_fini,
+ .cp_disable = &r100_cp_disable,
+ .ring_start = &r300_ring_start,
+ .irq_set = &rs600_irq_set,
+ .irq_process = &r100_irq_process,
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r300_copy_dma,
+ .copy = &r100_copy_blit,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+};
+
+
+/*
+ * rs690,rs740
+ */
+void rs690_errata(struct radeon_device *rdev);
+void rs690_vram_info(struct radeon_device *rdev);
+int rs690_mc_init(struct radeon_device *rdev);
+void rs690_mc_fini(struct radeon_device *rdev);
+uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+static struct radeon_asic rs690_asic = {
+ .errata = &rs690_errata,
+ .vram_info = &rs690_vram_info,
+ .gpu_reset = &r300_gpu_reset,
+ .mc_init = &rs690_mc_init,
+ .mc_fini = &rs690_mc_fini,
+ .wb_init = &r100_wb_init,
+ .wb_fini = &r100_wb_fini,
+ .gart_enable = &rs400_gart_enable,
+ .gart_disable = &rs400_gart_disable,
+ .gart_tlb_flush = &rs400_gart_tlb_flush,
+ .gart_set_page = &rs400_gart_set_page,
+ .cp_init = &r100_cp_init,
+ .cp_fini = &r100_cp_fini,
+ .cp_disable = &r100_cp_disable,
+ .ring_start = &r300_ring_start,
+ .irq_set = &rs600_irq_set,
+ .irq_process = &r100_irq_process,
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r300_copy_dma,
+ .copy = &r300_copy_dma,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+};
+
+
+/*
+ * rv515
+ */
+void rv515_errata(struct radeon_device *rdev);
+void rv515_vram_info(struct radeon_device *rdev);
+int rv515_gpu_reset(struct radeon_device *rdev);
+int rv515_mc_init(struct radeon_device *rdev);
+void rv515_mc_fini(struct radeon_device *rdev);
+uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+void rv515_ring_start(struct radeon_device *rdev);
+uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
+void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+static struct radeon_asic rv515_asic = {
+ .errata = &rv515_errata,
+ .vram_info = &rv515_vram_info,
+ .gpu_reset = &rv515_gpu_reset,
+ .mc_init = &rv515_mc_init,
+ .mc_fini = &rv515_mc_fini,
+ .wb_init = &r100_wb_init,
+ .wb_fini = &r100_wb_fini,
+ .gart_enable = &r300_gart_enable,
+ .gart_disable = &rv370_pcie_gart_disable,
+ .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .gart_set_page = &rv370_pcie_gart_set_page,
+ .cp_init = &r100_cp_init,
+ .cp_fini = &r100_cp_fini,
+ .cp_disable = &r100_cp_disable,
+ .ring_start = &rv515_ring_start,
+ .irq_set = &r100_irq_set,
+ .irq_process = &r100_irq_process,
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r100_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r300_copy_dma,
+ .copy = &r100_copy_blit,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .set_pcie_lanes = &rv370_set_pcie_lanes,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+};
+
+
+/*
+ * r520,rv530,rv560,rv570,r580
+ */
+void r520_errata(struct radeon_device *rdev);
+void r520_vram_info(struct radeon_device *rdev);
+int r520_mc_init(struct radeon_device *rdev);
+void r520_mc_fini(struct radeon_device *rdev);
+static struct radeon_asic r520_asic = {
+ .errata = &r520_errata,
+ .vram_info = &r520_vram_info,
+ .gpu_reset = &rv515_gpu_reset,
+ .mc_init = &r520_mc_init,
+ .mc_fini = &r520_mc_fini,
+ .wb_init = &r100_wb_init,
+ .wb_fini = &r100_wb_fini,
+ .gart_enable = &r300_gart_enable,
+ .gart_disable = &rv370_pcie_gart_disable,
+ .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .gart_set_page = &rv370_pcie_gart_set_page,
+ .cp_init = &r100_cp_init,
+ .cp_fini = &r100_cp_fini,
+ .cp_disable = &r100_cp_disable,
+ .ring_start = &rv515_ring_start,
+ .irq_set = &r100_irq_set,
+ .irq_process = &r100_irq_process,
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r100_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r300_copy_dma,
+ .copy = &r100_copy_blit,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .set_pcie_lanes = &rv370_set_pcie_lanes,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+};
+
+/*
+ * r600,rv610,rv630,rv620,rv635,rv670,rs780,rv770,rv730,rv710
+ */
+uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
+void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
new file mode 100644
index 00000000000..786632d3e37
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -0,0 +1,1298 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ */
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+
+#include "atom.h"
+#include "atom-bits.h"
+
+/* from radeon_encoder.c */
+extern uint32_t
+radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device,
+ uint8_t dac);
+extern void radeon_link_encoder_connector(struct drm_device *dev);
+extern void
+radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id,
+ uint32_t supported_device);
+
+/* from radeon_connector.c */
+extern void
+radeon_add_atom_connector(struct drm_device *dev,
+ uint32_t connector_id,
+ uint32_t supported_device,
+ int connector_type,
+ struct radeon_i2c_bus_rec *i2c_bus,
+ bool linkb, uint32_t igp_lane_info);
+
+/* from radeon_legacy_encoder.c */
+extern void
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id,
+ uint32_t supported_device);
+
+union atom_supported_devices {
+ struct _ATOM_SUPPORTED_DEVICES_INFO info;
+ struct _ATOM_SUPPORTED_DEVICES_INFO_2 info_2;
+ struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1;
+};
+
+static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device
+ *dev, uint8_t id)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct atom_context *ctx = rdev->mode_info.atom_context;
+ ATOM_GPIO_I2C_ASSIGMENT gpio;
+ struct radeon_i2c_bus_rec i2c;
+ int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
+ struct _ATOM_GPIO_I2C_INFO *i2c_info;
+ uint16_t data_offset;
+
+ memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
+ i2c.valid = false;
+
+ atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset);
+
+ i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
+
+ gpio = i2c_info->asGPIO_Info[id];
+
+ i2c.mask_clk_reg = le16_to_cpu(gpio.usClkMaskRegisterIndex) * 4;
+ i2c.mask_data_reg = le16_to_cpu(gpio.usDataMaskRegisterIndex) * 4;
+ i2c.put_clk_reg = le16_to_cpu(gpio.usClkEnRegisterIndex) * 4;
+ i2c.put_data_reg = le16_to_cpu(gpio.usDataEnRegisterIndex) * 4;
+ i2c.get_clk_reg = le16_to_cpu(gpio.usClkY_RegisterIndex) * 4;
+ i2c.get_data_reg = le16_to_cpu(gpio.usDataY_RegisterIndex) * 4;
+ i2c.a_clk_reg = le16_to_cpu(gpio.usClkA_RegisterIndex) * 4;
+ i2c.a_data_reg = le16_to_cpu(gpio.usDataA_RegisterIndex) * 4;
+ i2c.mask_clk_mask = (1 << gpio.ucClkMaskShift);
+ i2c.mask_data_mask = (1 << gpio.ucDataMaskShift);
+ i2c.put_clk_mask = (1 << gpio.ucClkEnShift);
+ i2c.put_data_mask = (1 << gpio.ucDataEnShift);
+ i2c.get_clk_mask = (1 << gpio.ucClkY_Shift);
+ i2c.get_data_mask = (1 << gpio.ucDataY_Shift);
+ i2c.a_clk_mask = (1 << gpio.ucClkA_Shift);
+ i2c.a_data_mask = (1 << gpio.ucDataA_Shift);
+ i2c.valid = true;
+
+ return i2c;
+}
+
+static bool radeon_atom_apply_quirks(struct drm_device *dev,
+ uint32_t supported_device,
+ int *connector_type,
+ struct radeon_i2c_bus_rec *i2c_bus)
+{
+
+ /* Asus M2A-VM HDMI board lists the DVI port as HDMI */
+ if ((dev->pdev->device == 0x791e) &&
+ (dev->pdev->subsystem_vendor == 0x1043) &&
+ (dev->pdev->subsystem_device == 0x826d)) {
+ if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
+ (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
+ *connector_type = DRM_MODE_CONNECTOR_DVID;
+ }
+
+ /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
+ if ((dev->pdev->device == 0x7941) &&
+ (dev->pdev->subsystem_vendor == 0x147b) &&
+ (dev->pdev->subsystem_device == 0x2412)) {
+ if (*connector_type == DRM_MODE_CONNECTOR_DVII)
+ return false;
+ }
+
+ /* Falcon NW laptop lists vga ddc line for LVDS */
+ if ((dev->pdev->device == 0x5653) &&
+ (dev->pdev->subsystem_vendor == 0x1462) &&
+ (dev->pdev->subsystem_device == 0x0291)) {
+ if (*connector_type == DRM_MODE_CONNECTOR_LVDS)
+ i2c_bus->valid = false;
+ }
+
+ /* Funky macbooks */
+ if ((dev->pdev->device == 0x71C5) &&
+ (dev->pdev->subsystem_vendor == 0x106b) &&
+ (dev->pdev->subsystem_device == 0x0080)) {
+ if ((supported_device == ATOM_DEVICE_CRT1_SUPPORT) ||
+ (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
+ return false;
+ }
+
+ /* some BIOSes seem to report DAC on HDMI - they hurt me with their lies */
+ if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) ||
+ (*connector_type == DRM_MODE_CONNECTOR_HDMIB)) {
+ if (supported_device & (ATOM_DEVICE_CRT_SUPPORT)) {
+ return false;
+ }
+ }
+
+ /* ASUS HD 3600 XT board lists the DVI port as HDMI */
+ if ((dev->pdev->device == 0x9598) &&
+ (dev->pdev->subsystem_vendor == 0x1043) &&
+ (dev->pdev->subsystem_device == 0x01da)) {
+ if (*connector_type == DRM_MODE_CONNECTOR_HDMIB) {
+ *connector_type = DRM_MODE_CONNECTOR_DVID;
+ }
+ }
+
+ return true;
+}
+
+const int supported_devices_connector_convert[] = {
+ DRM_MODE_CONNECTOR_Unknown,
+ DRM_MODE_CONNECTOR_VGA,
+ DRM_MODE_CONNECTOR_DVII,
+ DRM_MODE_CONNECTOR_DVID,
+ DRM_MODE_CONNECTOR_DVIA,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ DRM_MODE_CONNECTOR_Composite,
+ DRM_MODE_CONNECTOR_LVDS,
+ DRM_MODE_CONNECTOR_Unknown,
+ DRM_MODE_CONNECTOR_Unknown,
+ DRM_MODE_CONNECTOR_HDMIA,
+ DRM_MODE_CONNECTOR_HDMIB,
+ DRM_MODE_CONNECTOR_Unknown,
+ DRM_MODE_CONNECTOR_Unknown,
+ DRM_MODE_CONNECTOR_9PinDIN,
+ DRM_MODE_CONNECTOR_DisplayPort
+};
+
+const int object_connector_convert[] = {
+ DRM_MODE_CONNECTOR_Unknown,
+ DRM_MODE_CONNECTOR_DVII,
+ DRM_MODE_CONNECTOR_DVII,
+ DRM_MODE_CONNECTOR_DVID,
+ DRM_MODE_CONNECTOR_DVID,
+ DRM_MODE_CONNECTOR_VGA,
+ DRM_MODE_CONNECTOR_Composite,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ DRM_MODE_CONNECTOR_Unknown,
+ DRM_MODE_CONNECTOR_9PinDIN,
+ DRM_MODE_CONNECTOR_Unknown,
+ DRM_MODE_CONNECTOR_HDMIA,
+ DRM_MODE_CONNECTOR_HDMIB,
+ DRM_MODE_CONNECTOR_HDMIB,
+ DRM_MODE_CONNECTOR_LVDS,
+ DRM_MODE_CONNECTOR_9PinDIN,
+ DRM_MODE_CONNECTOR_Unknown,
+ DRM_MODE_CONNECTOR_Unknown,
+ DRM_MODE_CONNECTOR_Unknown,
+ DRM_MODE_CONNECTOR_DisplayPort
+};
+
+bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ struct atom_context *ctx = mode_info->atom_context;
+ int index = GetIndexIntoMasterTable(DATA, Object_Header);
+ uint16_t size, data_offset;
+ uint8_t frev, crev, line_mux = 0;
+ ATOM_CONNECTOR_OBJECT_TABLE *con_obj;
+ ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
+ ATOM_OBJECT_HEADER *obj_header;
+ int i, j, path_size, device_support;
+ int connector_type;
+ uint16_t igp_lane_info;
+ bool linkb;
+ struct radeon_i2c_bus_rec ddc_bus;
+
+ atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
+
+ if (data_offset == 0)
+ return false;
+
+ if (crev < 2)
+ return false;
+
+ obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset);
+ path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *)
+ (ctx->bios + data_offset +
+ le16_to_cpu(obj_header->usDisplayPathTableOffset));
+ con_obj = (ATOM_CONNECTOR_OBJECT_TABLE *)
+ (ctx->bios + data_offset +
+ le16_to_cpu(obj_header->usConnectorObjectTableOffset));
+ device_support = le16_to_cpu(obj_header->usDeviceSupport);
+
+ path_size = 0;
+ for (i = 0; i < path_obj->ucNumOfDispPath; i++) {
+ uint8_t *addr = (uint8_t *) path_obj->asDispPath;
+ ATOM_DISPLAY_OBJECT_PATH *path;
+ addr += path_size;
+ path = (ATOM_DISPLAY_OBJECT_PATH *) addr;
+ path_size += le16_to_cpu(path->usSize);
+ linkb = false;
+
+ if (device_support & le16_to_cpu(path->usDeviceTag)) {
+ uint8_t con_obj_id, con_obj_num, con_obj_type;
+
+ con_obj_id =
+ (le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK)
+ >> OBJECT_ID_SHIFT;
+ con_obj_num =
+ (le16_to_cpu(path->usConnObjectId) & ENUM_ID_MASK)
+ >> ENUM_ID_SHIFT;
+ con_obj_type =
+ (le16_to_cpu(path->usConnObjectId) &
+ OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
+
+ if ((le16_to_cpu(path->usDeviceTag) ==
+ ATOM_DEVICE_TV1_SUPPORT)
+ || (le16_to_cpu(path->usDeviceTag) ==
+ ATOM_DEVICE_TV2_SUPPORT)
+ || (le16_to_cpu(path->usDeviceTag) ==
+ ATOM_DEVICE_CV_SUPPORT))
+ continue;
+
+ if ((rdev->family == CHIP_RS780) &&
+ (con_obj_id ==
+ CONNECTOR_OBJECT_ID_PCIE_CONNECTOR)) {
+ uint16_t igp_offset = 0;
+ ATOM_INTEGRATED_SYSTEM_INFO_V2 *igp_obj;
+
+ index =
+ GetIndexIntoMasterTable(DATA,
+ IntegratedSystemInfo);
+
+ atom_parse_data_header(ctx, index, &size, &frev,
+ &crev, &igp_offset);
+
+ if (crev >= 2) {
+ igp_obj =
+ (ATOM_INTEGRATED_SYSTEM_INFO_V2
+ *) (ctx->bios + igp_offset);
+
+ if (igp_obj) {
+ uint32_t slot_config, ct;
+
+ if (con_obj_num == 1)
+ slot_config =
+ igp_obj->
+ ulDDISlot1Config;
+ else
+ slot_config =
+ igp_obj->
+ ulDDISlot2Config;
+
+ ct = (slot_config >> 16) & 0xff;
+ connector_type =
+ object_connector_convert
+ [ct];
+ igp_lane_info =
+ slot_config & 0xffff;
+ } else
+ continue;
+ } else
+ continue;
+ } else {
+ igp_lane_info = 0;
+ connector_type =
+ object_connector_convert[con_obj_id];
+ }
+
+ if (connector_type == DRM_MODE_CONNECTOR_Unknown)
+ continue;
+
+ for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2);
+ j++) {
+ uint8_t enc_obj_id, enc_obj_num, enc_obj_type;
+
+ enc_obj_id =
+ (le16_to_cpu(path->usGraphicObjIds[j]) &
+ OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+ enc_obj_num =
+ (le16_to_cpu(path->usGraphicObjIds[j]) &
+ ENUM_ID_MASK) >> ENUM_ID_SHIFT;
+ enc_obj_type =
+ (le16_to_cpu(path->usGraphicObjIds[j]) &
+ OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
+
+ /* FIXME: add support for router objects */
+ if (enc_obj_type == GRAPH_OBJECT_TYPE_ENCODER) {
+ if (enc_obj_num == 2)
+ linkb = true;
+ else
+ linkb = false;
+
+ radeon_add_atom_encoder(dev,
+ enc_obj_id,
+ le16_to_cpu
+ (path->
+ usDeviceTag));
+
+ }
+ }
+
+ /* look up gpio for ddc */
+ if ((le16_to_cpu(path->usDeviceTag) &
+ (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+ == 0) {
+ for (j = 0; j < con_obj->ucNumberOfObjects; j++) {
+ if (le16_to_cpu(path->usConnObjectId) ==
+ le16_to_cpu(con_obj->asObjects[j].
+ usObjectID)) {
+ ATOM_COMMON_RECORD_HEADER
+ *record =
+ (ATOM_COMMON_RECORD_HEADER
+ *)
+ (ctx->bios + data_offset +
+ le16_to_cpu(con_obj->
+ asObjects[j].
+ usRecordOffset));
+ ATOM_I2C_RECORD *i2c_record;
+
+ while (record->ucRecordType > 0
+ && record->
+ ucRecordType <=
+ ATOM_MAX_OBJECT_RECORD_NUMBER) {
+ DRM_ERROR
+ ("record type %d\n",
+ record->
+ ucRecordType);
+ switch (record->
+ ucRecordType) {
+ case ATOM_I2C_RECORD_TYPE:
+ i2c_record =
+ (ATOM_I2C_RECORD
+ *) record;
+ line_mux =
+ i2c_record->
+ sucI2cId.
+ bfI2C_LineMux;
+ break;
+ }
+ record =
+ (ATOM_COMMON_RECORD_HEADER
+ *) ((char *)record
+ +
+ record->
+ ucRecordSize);
+ }
+ break;
+ }
+ }
+ } else
+ line_mux = 0;
+
+ if ((le16_to_cpu(path->usDeviceTag) ==
+ ATOM_DEVICE_TV1_SUPPORT)
+ || (le16_to_cpu(path->usDeviceTag) ==
+ ATOM_DEVICE_TV2_SUPPORT)
+ || (le16_to_cpu(path->usDeviceTag) ==
+ ATOM_DEVICE_CV_SUPPORT))
+ ddc_bus.valid = false;
+ else
+ ddc_bus = radeon_lookup_gpio(dev, line_mux);
+
+ radeon_add_atom_connector(dev,
+ le16_to_cpu(path->
+ usConnObjectId),
+ le16_to_cpu(path->
+ usDeviceTag),
+ connector_type, &ddc_bus,
+ linkb, igp_lane_info);
+
+ }
+ }
+
+ radeon_link_encoder_connector(dev);
+
+ return true;
+}
+
+struct bios_connector {
+ bool valid;
+ uint8_t line_mux;
+ uint16_t devices;
+ int connector_type;
+ struct radeon_i2c_bus_rec ddc_bus;
+};
+
+bool radeon_get_atom_connector_info_from_supported_devices_table(struct
+ drm_device
+ *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ struct atom_context *ctx = mode_info->atom_context;
+ int index = GetIndexIntoMasterTable(DATA, SupportedDevicesInfo);
+ uint16_t size, data_offset;
+ uint8_t frev, crev;
+ uint16_t device_support;
+ uint8_t dac;
+ union atom_supported_devices *supported_devices;
+ int i, j;
+ struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
+
+ atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
+
+ supported_devices =
+ (union atom_supported_devices *)(ctx->bios + data_offset);
+
+ device_support = le16_to_cpu(supported_devices->info.usDeviceSupport);
+
+ for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
+ ATOM_CONNECTOR_INFO_I2C ci =
+ supported_devices->info.asConnInfo[i];
+
+ bios_connectors[i].valid = false;
+
+ if (!(device_support & (1 << i))) {
+ continue;
+ }
+
+ if (i == ATOM_DEVICE_CV_INDEX) {
+ DRM_DEBUG("Skipping Component Video\n");
+ continue;
+ }
+
+ if (i == ATOM_DEVICE_TV1_INDEX) {
+ DRM_DEBUG("Skipping TV Out\n");
+ continue;
+ }
+
+ bios_connectors[i].connector_type =
+ supported_devices_connector_convert[ci.sucConnectorInfo.
+ sbfAccess.
+ bfConnectorType];
+
+ if (bios_connectors[i].connector_type ==
+ DRM_MODE_CONNECTOR_Unknown)
+ continue;
+
+ dac = ci.sucConnectorInfo.sbfAccess.bfAssociatedDAC;
+
+ if ((rdev->family == CHIP_RS690) ||
+ (rdev->family == CHIP_RS740)) {
+ if ((i == ATOM_DEVICE_DFP2_INDEX)
+ && (ci.sucI2cId.sbfAccess.bfI2C_LineMux == 2))
+ bios_connectors[i].line_mux =
+ ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1;
+ else if ((i == ATOM_DEVICE_DFP3_INDEX)
+ && (ci.sucI2cId.sbfAccess.bfI2C_LineMux == 1))
+ bios_connectors[i].line_mux =
+ ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1;
+ else
+ bios_connectors[i].line_mux =
+ ci.sucI2cId.sbfAccess.bfI2C_LineMux;
+ } else
+ bios_connectors[i].line_mux =
+ ci.sucI2cId.sbfAccess.bfI2C_LineMux;
+
+ /* give tv unique connector ids */
+ if (i == ATOM_DEVICE_TV1_INDEX) {
+ bios_connectors[i].ddc_bus.valid = false;
+ bios_connectors[i].line_mux = 50;
+ } else if (i == ATOM_DEVICE_TV2_INDEX) {
+ bios_connectors[i].ddc_bus.valid = false;
+ bios_connectors[i].line_mux = 51;
+ } else if (i == ATOM_DEVICE_CV_INDEX) {
+ bios_connectors[i].ddc_bus.valid = false;
+ bios_connectors[i].line_mux = 52;
+ } else
+ bios_connectors[i].ddc_bus =
+ radeon_lookup_gpio(dev,
+ bios_connectors[i].line_mux);
+
+ /* Always set the connector type to VGA for CRT1/CRT2. if they are
+ * shared with a DVI port, we'll pick up the DVI connector when we
+ * merge the outputs. Some bioses incorrectly list VGA ports as DVI.
+ */
+ if (i == ATOM_DEVICE_CRT1_INDEX || i == ATOM_DEVICE_CRT2_INDEX)
+ bios_connectors[i].connector_type =
+ DRM_MODE_CONNECTOR_VGA;
+
+ if (!radeon_atom_apply_quirks
+ (dev, (1 << i), &bios_connectors[i].connector_type,
+ &bios_connectors[i].ddc_bus))
+ continue;
+
+ bios_connectors[i].valid = true;
+ bios_connectors[i].devices = (1 << i);
+
+ if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom)
+ radeon_add_atom_encoder(dev,
+ radeon_get_encoder_id(dev,
+ (1 << i),
+ dac),
+ (1 << i));
+ else
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ (1 <<
+ i),
+ dac),
+ (1 << i));
+ }
+
+ /* combine shared connectors */
+ for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
+ if (bios_connectors[i].valid) {
+ for (j = 0; j < ATOM_MAX_SUPPORTED_DEVICE; j++) {
+ if (bios_connectors[j].valid && (i != j)) {
+ if (bios_connectors[i].line_mux ==
+ bios_connectors[j].line_mux) {
+ if (((bios_connectors[i].
+ devices &
+ (ATOM_DEVICE_DFP_SUPPORT))
+ && (bios_connectors[j].
+ devices &
+ (ATOM_DEVICE_CRT_SUPPORT)))
+ ||
+ ((bios_connectors[j].
+ devices &
+ (ATOM_DEVICE_DFP_SUPPORT))
+ && (bios_connectors[i].
+ devices &
+ (ATOM_DEVICE_CRT_SUPPORT)))) {
+ bios_connectors[i].
+ devices |=
+ bios_connectors[j].
+ devices;
+ bios_connectors[i].
+ connector_type =
+ DRM_MODE_CONNECTOR_DVII;
+ bios_connectors[j].
+ valid = false;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /* add the connectors */
+ for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
+ if (bios_connectors[i].valid)
+ radeon_add_atom_connector(dev,
+ bios_connectors[i].line_mux,
+ bios_connectors[i].devices,
+ bios_connectors[i].
+ connector_type,
+ &bios_connectors[i].ddc_bus,
+ false, 0);
+ }
+
+ radeon_link_encoder_connector(dev);
+
+ return true;
+}
+
+union firmware_info {
+ ATOM_FIRMWARE_INFO info;
+ ATOM_FIRMWARE_INFO_V1_2 info_12;
+ ATOM_FIRMWARE_INFO_V1_3 info_13;
+ ATOM_FIRMWARE_INFO_V1_4 info_14;
+};
+
+bool radeon_atom_get_clock_info(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+ union firmware_info *firmware_info;
+ uint8_t frev, crev;
+ struct radeon_pll *p1pll = &rdev->clock.p1pll;
+ struct radeon_pll *p2pll = &rdev->clock.p2pll;
+ struct radeon_pll *spll = &rdev->clock.spll;
+ struct radeon_pll *mpll = &rdev->clock.mpll;
+ uint16_t data_offset;
+
+ atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
+ &crev, &data_offset);
+
+ firmware_info =
+ (union firmware_info *)(mode_info->atom_context->bios +
+ data_offset);
+
+ if (firmware_info) {
+ /* pixel clocks */
+ p1pll->reference_freq =
+ le16_to_cpu(firmware_info->info.usReferenceClock);
+ p1pll->reference_div = 0;
+
+ p1pll->pll_out_min =
+ le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
+ p1pll->pll_out_max =
+ le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
+
+ if (p1pll->pll_out_min == 0) {
+ if (ASIC_IS_AVIVO(rdev))
+ p1pll->pll_out_min = 64800;
+ else
+ p1pll->pll_out_min = 20000;
+ }
+
+ p1pll->pll_in_min =
+ le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Input);
+ p1pll->pll_in_max =
+ le16_to_cpu(firmware_info->info.usMaxPixelClockPLL_Input);
+
+ *p2pll = *p1pll;
+
+ /* system clock */
+ spll->reference_freq =
+ le16_to_cpu(firmware_info->info.usReferenceClock);
+ spll->reference_div = 0;
+
+ spll->pll_out_min =
+ le16_to_cpu(firmware_info->info.usMinEngineClockPLL_Output);
+ spll->pll_out_max =
+ le32_to_cpu(firmware_info->info.ulMaxEngineClockPLL_Output);
+
+ /* ??? */
+ if (spll->pll_out_min == 0) {
+ if (ASIC_IS_AVIVO(rdev))
+ spll->pll_out_min = 64800;
+ else
+ spll->pll_out_min = 20000;
+ }
+
+ spll->pll_in_min =
+ le16_to_cpu(firmware_info->info.usMinEngineClockPLL_Input);
+ spll->pll_in_max =
+ le16_to_cpu(firmware_info->info.usMaxEngineClockPLL_Input);
+
+ /* memory clock */
+ mpll->reference_freq =
+ le16_to_cpu(firmware_info->info.usReferenceClock);
+ mpll->reference_div = 0;
+
+ mpll->pll_out_min =
+ le16_to_cpu(firmware_info->info.usMinMemoryClockPLL_Output);
+ mpll->pll_out_max =
+ le32_to_cpu(firmware_info->info.ulMaxMemoryClockPLL_Output);
+
+ /* ??? */
+ if (mpll->pll_out_min == 0) {
+ if (ASIC_IS_AVIVO(rdev))
+ mpll->pll_out_min = 64800;
+ else
+ mpll->pll_out_min = 20000;
+ }
+
+ mpll->pll_in_min =
+ le16_to_cpu(firmware_info->info.usMinMemoryClockPLL_Input);
+ mpll->pll_in_max =
+ le16_to_cpu(firmware_info->info.usMaxMemoryClockPLL_Input);
+
+ rdev->clock.default_sclk =
+ le32_to_cpu(firmware_info->info.ulDefaultEngineClock);
+ rdev->clock.default_mclk =
+ le32_to_cpu(firmware_info->info.ulDefaultMemoryClock);
+
+ return true;
+ }
+ return false;
+}
+
+struct radeon_encoder_int_tmds *radeon_atombios_get_tmds_info(struct
+ radeon_encoder
+ *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, TMDS_Info);
+ uint16_t data_offset;
+ struct _ATOM_TMDS_INFO *tmds_info;
+ uint8_t frev, crev;
+ uint16_t maxfreq;
+ int i;
+ struct radeon_encoder_int_tmds *tmds = NULL;
+
+ atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
+ &crev, &data_offset);
+
+ tmds_info =
+ (struct _ATOM_TMDS_INFO *)(mode_info->atom_context->bios +
+ data_offset);
+
+ if (tmds_info) {
+ tmds =
+ kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL);
+
+ if (!tmds)
+ return NULL;
+
+ maxfreq = le16_to_cpu(tmds_info->usMaxFrequency);
+ for (i = 0; i < 4; i++) {
+ tmds->tmds_pll[i].freq =
+ le16_to_cpu(tmds_info->asMiscInfo[i].usFrequency);
+ tmds->tmds_pll[i].value =
+ tmds_info->asMiscInfo[i].ucPLL_ChargePump & 0x3f;
+ tmds->tmds_pll[i].value |=
+ (tmds_info->asMiscInfo[i].
+ ucPLL_VCO_Gain & 0x3f) << 6;
+ tmds->tmds_pll[i].value |=
+ (tmds_info->asMiscInfo[i].
+ ucPLL_DutyCycle & 0xf) << 12;
+ tmds->tmds_pll[i].value |=
+ (tmds_info->asMiscInfo[i].
+ ucPLL_VoltageSwing & 0xf) << 16;
+
+ DRM_DEBUG("TMDS PLL From ATOMBIOS %u %x\n",
+ tmds->tmds_pll[i].freq,
+ tmds->tmds_pll[i].value);
+
+ if (maxfreq == tmds->tmds_pll[i].freq) {
+ tmds->tmds_pll[i].freq = 0xffffffff;
+ break;
+ }
+ }
+ }
+ return tmds;
+}
+
+union lvds_info {
+ struct _ATOM_LVDS_INFO info;
+ struct _ATOM_LVDS_INFO_V12 info_12;
+};
+
+struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
+ radeon_encoder
+ *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, LVDS_Info);
+ uint16_t data_offset;
+ union lvds_info *lvds_info;
+ uint8_t frev, crev;
+ struct radeon_encoder_atom_dig *lvds = NULL;
+
+ atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
+ &crev, &data_offset);
+
+ lvds_info =
+ (union lvds_info *)(mode_info->atom_context->bios + data_offset);
+
+ if (lvds_info) {
+ lvds =
+ kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
+
+ if (!lvds)
+ return NULL;
+
+ lvds->native_mode.dotclock =
+ le16_to_cpu(lvds_info->info.sLCDTiming.usPixClk) * 10;
+ lvds->native_mode.panel_xres =
+ le16_to_cpu(lvds_info->info.sLCDTiming.usHActive);
+ lvds->native_mode.panel_yres =
+ le16_to_cpu(lvds_info->info.sLCDTiming.usVActive);
+ lvds->native_mode.hblank =
+ le16_to_cpu(lvds_info->info.sLCDTiming.usHBlanking_Time);
+ lvds->native_mode.hoverplus =
+ le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncOffset);
+ lvds->native_mode.hsync_width =
+ le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncWidth);
+ lvds->native_mode.vblank =
+ le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time);
+ lvds->native_mode.voverplus =
+ le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset);
+ lvds->native_mode.vsync_width =
+ le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
+ lvds->panel_pwr_delay =
+ le16_to_cpu(lvds_info->info.usOffDelayInMs);
+ lvds->lvds_misc = lvds_info->info.ucLVDS_Misc;
+
+ encoder->native_mode = lvds->native_mode;
+ }
+ return lvds;
+}
+
+struct radeon_encoder_primary_dac *
+radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, CompassionateData);
+ uint16_t data_offset;
+ struct _COMPASSIONATE_DATA *dac_info;
+ uint8_t frev, crev;
+ uint8_t bg, dac;
+ int i;
+ struct radeon_encoder_primary_dac *p_dac = NULL;
+
+ atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
+
+ dac_info = (struct _COMPASSIONATE_DATA *)(mode_info->atom_context->bios + data_offset);
+
+ if (dac_info) {
+ p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac), GFP_KERNEL);
+
+ if (!p_dac)
+ return NULL;
+
+ bg = dac_info->ucDAC1_BG_Adjustment;
+ dac = dac_info->ucDAC1_DAC_Adjustment;
+ p_dac->ps2_pdac_adj = (bg << 8) | (dac);
+
+ }
+ return p_dac;
+}
+
+struct radeon_encoder_tv_dac *
+radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, CompassionateData);
+ uint16_t data_offset;
+ struct _COMPASSIONATE_DATA *dac_info;
+ uint8_t frev, crev;
+ uint8_t bg, dac;
+ int i;
+ struct radeon_encoder_tv_dac *tv_dac = NULL;
+
+ atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
+
+ dac_info = (struct _COMPASSIONATE_DATA *)(mode_info->atom_context->bios + data_offset);
+
+ if (dac_info) {
+ tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL);
+
+ if (!tv_dac)
+ return NULL;
+
+ bg = dac_info->ucDAC2_CRT2_BG_Adjustment;
+ dac = dac_info->ucDAC2_CRT2_DAC_Adjustment;
+ tv_dac->ps2_tvdac_adj = (bg << 16) | (dac << 20);
+
+ bg = dac_info->ucDAC2_PAL_BG_Adjustment;
+ dac = dac_info->ucDAC2_PAL_DAC_Adjustment;
+ tv_dac->pal_tvdac_adj = (bg << 16) | (dac << 20);
+
+ bg = dac_info->ucDAC2_NTSC_BG_Adjustment;
+ dac = dac_info->ucDAC2_NTSC_DAC_Adjustment;
+ tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
+
+ }
+ return tv_dac;
+}
+
+void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
+{
+ DYNAMIC_CLOCK_GATING_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, DynamicClockGating);
+
+ args.ucEnable = enable;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+void radeon_atom_static_pwrmgt_setup(struct radeon_device *rdev, int enable)
+{
+ ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, EnableASIC_StaticPwrMgt);
+
+ args.ucEnable = enable;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+void radeon_atom_set_engine_clock(struct radeon_device *rdev,
+ uint32_t eng_clock)
+{
+ SET_ENGINE_CLOCK_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock);
+
+ args.ulTargetEngineClock = eng_clock; /* 10 khz */
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+void radeon_atom_set_memory_clock(struct radeon_device *rdev,
+ uint32_t mem_clock)
+{
+ SET_MEMORY_CLOCK_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock);
+
+ if (rdev->flags & RADEON_IS_IGP)
+ return;
+
+ args.ulTargetMemoryClock = mem_clock; /* 10 khz */
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t bios_2_scratch, bios_6_scratch;
+
+ if (rdev->family >= CHIP_R600) {
+ bios_2_scratch = RREG32(R600_BIOS_0_SCRATCH);
+ bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH);
+ } else {
+ bios_2_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
+ bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+ }
+
+ /* let the bios control the backlight */
+ bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE;
+
+ /* tell the bios not to handle mode switching */
+ bios_6_scratch |= (ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH | ATOM_S6_ACC_MODE);
+
+ if (rdev->family >= CHIP_R600) {
+ WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
+ WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
+ } else {
+ WREG32(RADEON_BIOS_2_SCRATCH, bios_2_scratch);
+ WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+ }
+
+}
+
+void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t bios_6_scratch;
+
+ if (rdev->family >= CHIP_R600)
+ bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH);
+ else
+ bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+
+ if (lock)
+ bios_6_scratch |= ATOM_S6_CRITICAL_STATE;
+ else
+ bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE;
+
+ if (rdev->family >= CHIP_R600)
+ WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
+ else
+ WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+}
+
+/* at some point we may want to break this out into individual functions */
+void
+radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
+ struct drm_encoder *encoder,
+ bool connected)
+{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_connector *radeon_connector =
+ to_radeon_connector(connector);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t bios_0_scratch, bios_3_scratch, bios_6_scratch;
+
+ if (rdev->family >= CHIP_R600) {
+ bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
+ bios_3_scratch = RREG32(R600_BIOS_3_SCRATCH);
+ bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH);
+ } else {
+ bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
+ bios_3_scratch = RREG32(RADEON_BIOS_3_SCRATCH);
+ bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+ }
+
+ if ((radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) &&
+ (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT)) {
+ if (connected) {
+ DRM_DEBUG("TV1 connected\n");
+ bios_3_scratch |= ATOM_S3_TV1_ACTIVE;
+ bios_6_scratch |= ATOM_S6_ACC_REQ_TV1;
+ } else {
+ DRM_DEBUG("TV1 disconnected\n");
+ bios_0_scratch &= ~ATOM_S0_TV1_MASK;
+ bios_3_scratch &= ~ATOM_S3_TV1_ACTIVE;
+ bios_6_scratch &= ~ATOM_S6_ACC_REQ_TV1;
+ }
+ }
+ if ((radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) &&
+ (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT)) {
+ if (connected) {
+ DRM_DEBUG("CV connected\n");
+ bios_3_scratch |= ATOM_S3_CV_ACTIVE;
+ bios_6_scratch |= ATOM_S6_ACC_REQ_CV;
+ } else {
+ DRM_DEBUG("CV disconnected\n");
+ bios_0_scratch &= ~ATOM_S0_CV_MASK;
+ bios_3_scratch &= ~ATOM_S3_CV_ACTIVE;
+ bios_6_scratch &= ~ATOM_S6_ACC_REQ_CV;
+ }
+ }
+ if ((radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) &&
+ (radeon_connector->devices & ATOM_DEVICE_LCD1_SUPPORT)) {
+ if (connected) {
+ DRM_DEBUG("LCD1 connected\n");
+ bios_0_scratch |= ATOM_S0_LCD1;
+ bios_3_scratch |= ATOM_S3_LCD1_ACTIVE;
+ bios_6_scratch |= ATOM_S6_ACC_REQ_LCD1;
+ } else {
+ DRM_DEBUG("LCD1 disconnected\n");
+ bios_0_scratch &= ~ATOM_S0_LCD1;
+ bios_3_scratch &= ~ATOM_S3_LCD1_ACTIVE;
+ bios_6_scratch &= ~ATOM_S6_ACC_REQ_LCD1;
+ }
+ }
+ if ((radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) &&
+ (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)) {
+ if (connected) {
+ DRM_DEBUG("CRT1 connected\n");
+ bios_0_scratch |= ATOM_S0_CRT1_COLOR;
+ bios_3_scratch |= ATOM_S3_CRT1_ACTIVE;
+ bios_6_scratch |= ATOM_S6_ACC_REQ_CRT1;
+ } else {
+ DRM_DEBUG("CRT1 disconnected\n");
+ bios_0_scratch &= ~ATOM_S0_CRT1_MASK;
+ bios_3_scratch &= ~ATOM_S3_CRT1_ACTIVE;
+ bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT1;
+ }
+ }
+ if ((radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) &&
+ (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)) {
+ if (connected) {
+ DRM_DEBUG("CRT2 connected\n");
+ bios_0_scratch |= ATOM_S0_CRT2_COLOR;
+ bios_3_scratch |= ATOM_S3_CRT2_ACTIVE;
+ bios_6_scratch |= ATOM_S6_ACC_REQ_CRT2;
+ } else {
+ DRM_DEBUG("CRT2 disconnected\n");
+ bios_0_scratch &= ~ATOM_S0_CRT2_MASK;
+ bios_3_scratch &= ~ATOM_S3_CRT2_ACTIVE;
+ bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT2;
+ }
+ }
+ if ((radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) &&
+ (radeon_connector->devices & ATOM_DEVICE_DFP1_SUPPORT)) {
+ if (connected) {
+ DRM_DEBUG("DFP1 connected\n");
+ bios_0_scratch |= ATOM_S0_DFP1;
+ bios_3_scratch |= ATOM_S3_DFP1_ACTIVE;
+ bios_6_scratch |= ATOM_S6_ACC_REQ_DFP1;
+ } else {
+ DRM_DEBUG("DFP1 disconnected\n");
+ bios_0_scratch &= ~ATOM_S0_DFP1;
+ bios_3_scratch &= ~ATOM_S3_DFP1_ACTIVE;
+ bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP1;
+ }
+ }
+ if ((radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) &&
+ (radeon_connector->devices & ATOM_DEVICE_DFP2_SUPPORT)) {
+ if (connected) {
+ DRM_DEBUG("DFP2 connected\n");
+ bios_0_scratch |= ATOM_S0_DFP2;
+ bios_3_scratch |= ATOM_S3_DFP2_ACTIVE;
+ bios_6_scratch |= ATOM_S6_ACC_REQ_DFP2;
+ } else {
+ DRM_DEBUG("DFP2 disconnected\n");
+ bios_0_scratch &= ~ATOM_S0_DFP2;
+ bios_3_scratch &= ~ATOM_S3_DFP2_ACTIVE;
+ bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP2;
+ }
+ }
+ if ((radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) &&
+ (radeon_connector->devices & ATOM_DEVICE_DFP3_SUPPORT)) {
+ if (connected) {
+ DRM_DEBUG("DFP3 connected\n");
+ bios_0_scratch |= ATOM_S0_DFP3;
+ bios_3_scratch |= ATOM_S3_DFP3_ACTIVE;
+ bios_6_scratch |= ATOM_S6_ACC_REQ_DFP3;
+ } else {
+ DRM_DEBUG("DFP3 disconnected\n");
+ bios_0_scratch &= ~ATOM_S0_DFP3;
+ bios_3_scratch &= ~ATOM_S3_DFP3_ACTIVE;
+ bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP3;
+ }
+ }
+ if ((radeon_encoder->devices & ATOM_DEVICE_DFP4_SUPPORT) &&
+ (radeon_connector->devices & ATOM_DEVICE_DFP4_SUPPORT)) {
+ if (connected) {
+ DRM_DEBUG("DFP4 connected\n");
+ bios_0_scratch |= ATOM_S0_DFP4;
+ bios_3_scratch |= ATOM_S3_DFP4_ACTIVE;
+ bios_6_scratch |= ATOM_S6_ACC_REQ_DFP4;
+ } else {
+ DRM_DEBUG("DFP4 disconnected\n");
+ bios_0_scratch &= ~ATOM_S0_DFP4;
+ bios_3_scratch &= ~ATOM_S3_DFP4_ACTIVE;
+ bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP4;
+ }
+ }
+ if ((radeon_encoder->devices & ATOM_DEVICE_DFP5_SUPPORT) &&
+ (radeon_connector->devices & ATOM_DEVICE_DFP5_SUPPORT)) {
+ if (connected) {
+ DRM_DEBUG("DFP5 connected\n");
+ bios_0_scratch |= ATOM_S0_DFP5;
+ bios_3_scratch |= ATOM_S3_DFP5_ACTIVE;
+ bios_6_scratch |= ATOM_S6_ACC_REQ_DFP5;
+ } else {
+ DRM_DEBUG("DFP5 disconnected\n");
+ bios_0_scratch &= ~ATOM_S0_DFP5;
+ bios_3_scratch &= ~ATOM_S3_DFP5_ACTIVE;
+ bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP5;
+ }
+ }
+
+ if (rdev->family >= CHIP_R600) {
+ WREG32(R600_BIOS_0_SCRATCH, bios_0_scratch);
+ WREG32(R600_BIOS_3_SCRATCH, bios_3_scratch);
+ WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
+ } else {
+ WREG32(RADEON_BIOS_0_SCRATCH, bios_0_scratch);
+ WREG32(RADEON_BIOS_3_SCRATCH, bios_3_scratch);
+ WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+ }
+}
+
+void
+radeon_atombios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t bios_3_scratch;
+
+ if (rdev->family >= CHIP_R600)
+ bios_3_scratch = RREG32(R600_BIOS_3_SCRATCH);
+ else
+ bios_3_scratch = RREG32(RADEON_BIOS_3_SCRATCH);
+
+ if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) {
+ bios_3_scratch &= ~ATOM_S3_TV1_CRTC_ACTIVE;
+ bios_3_scratch |= (crtc << 18);
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) {
+ bios_3_scratch &= ~ATOM_S3_CV_CRTC_ACTIVE;
+ bios_3_scratch |= (crtc << 24);
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+ bios_3_scratch &= ~ATOM_S3_CRT1_CRTC_ACTIVE;
+ bios_3_scratch |= (crtc << 16);
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+ bios_3_scratch &= ~ATOM_S3_CRT2_CRTC_ACTIVE;
+ bios_3_scratch |= (crtc << 20);
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
+ bios_3_scratch &= ~ATOM_S3_LCD1_CRTC_ACTIVE;
+ bios_3_scratch |= (crtc << 17);
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) {
+ bios_3_scratch &= ~ATOM_S3_DFP1_CRTC_ACTIVE;
+ bios_3_scratch |= (crtc << 19);
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) {
+ bios_3_scratch &= ~ATOM_S3_DFP2_CRTC_ACTIVE;
+ bios_3_scratch |= (crtc << 23);
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) {
+ bios_3_scratch &= ~ATOM_S3_DFP3_CRTC_ACTIVE;
+ bios_3_scratch |= (crtc << 25);
+ }
+
+ if (rdev->family >= CHIP_R600)
+ WREG32(R600_BIOS_3_SCRATCH, bios_3_scratch);
+ else
+ WREG32(RADEON_BIOS_3_SCRATCH, bios_3_scratch);
+}
+
+void
+radeon_atombios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t bios_2_scratch;
+
+ if (rdev->family >= CHIP_R600)
+ bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
+ else
+ bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH);
+
+ if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) {
+ if (on)
+ bios_2_scratch &= ~ATOM_S2_TV1_DPMS_STATE;
+ else
+ bios_2_scratch |= ATOM_S2_TV1_DPMS_STATE;
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) {
+ if (on)
+ bios_2_scratch &= ~ATOM_S2_CV_DPMS_STATE;
+ else
+ bios_2_scratch |= ATOM_S2_CV_DPMS_STATE;
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+ if (on)
+ bios_2_scratch &= ~ATOM_S2_CRT1_DPMS_STATE;
+ else
+ bios_2_scratch |= ATOM_S2_CRT1_DPMS_STATE;
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+ if (on)
+ bios_2_scratch &= ~ATOM_S2_CRT2_DPMS_STATE;
+ else
+ bios_2_scratch |= ATOM_S2_CRT2_DPMS_STATE;
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
+ if (on)
+ bios_2_scratch &= ~ATOM_S2_LCD1_DPMS_STATE;
+ else
+ bios_2_scratch |= ATOM_S2_LCD1_DPMS_STATE;
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) {
+ if (on)
+ bios_2_scratch &= ~ATOM_S2_DFP1_DPMS_STATE;
+ else
+ bios_2_scratch |= ATOM_S2_DFP1_DPMS_STATE;
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) {
+ if (on)
+ bios_2_scratch &= ~ATOM_S2_DFP2_DPMS_STATE;
+ else
+ bios_2_scratch |= ATOM_S2_DFP2_DPMS_STATE;
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) {
+ if (on)
+ bios_2_scratch &= ~ATOM_S2_DFP3_DPMS_STATE;
+ else
+ bios_2_scratch |= ATOM_S2_DFP3_DPMS_STATE;
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_DFP4_SUPPORT) {
+ if (on)
+ bios_2_scratch &= ~ATOM_S2_DFP4_DPMS_STATE;
+ else
+ bios_2_scratch |= ATOM_S2_DFP4_DPMS_STATE;
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_DFP5_SUPPORT) {
+ if (on)
+ bios_2_scratch &= ~ATOM_S2_DFP5_DPMS_STATE;
+ else
+ bios_2_scratch |= ATOM_S2_DFP5_DPMS_STATE;
+ }
+
+ if (rdev->family >= CHIP_R600)
+ WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
+ else
+ WREG32(RADEON_BIOS_2_SCRATCH, bios_2_scratch);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
new file mode 100644
index 00000000000..c44403a2ca7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+
+void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
+ unsigned sdomain, unsigned ddomain)
+{
+ struct radeon_object *dobj = NULL;
+ struct radeon_object *sobj = NULL;
+ struct radeon_fence *fence = NULL;
+ uint64_t saddr, daddr;
+ unsigned long start_jiffies;
+ unsigned long end_jiffies;
+ unsigned long time;
+ unsigned i, n, size;
+ int r;
+
+ size = bsize;
+ n = 1024;
+ r = radeon_object_create(rdev, NULL, size, true, sdomain, false, &sobj);
+ if (r) {
+ goto out_cleanup;
+ }
+ r = radeon_object_pin(sobj, sdomain, &saddr);
+ if (r) {
+ goto out_cleanup;
+ }
+ r = radeon_object_create(rdev, NULL, size, true, ddomain, false, &dobj);
+ if (r) {
+ goto out_cleanup;
+ }
+ r = radeon_object_pin(dobj, ddomain, &daddr);
+ if (r) {
+ goto out_cleanup;
+ }
+ start_jiffies = jiffies;
+ for (i = 0; i < n; i++) {
+ r = radeon_fence_create(rdev, &fence);
+ if (r) {
+ goto out_cleanup;
+ }
+ r = radeon_copy_dma(rdev, saddr, daddr, size >> 14, fence);
+ if (r) {
+ goto out_cleanup;
+ }
+ r = radeon_fence_wait(fence, false);
+ if (r) {
+ goto out_cleanup;
+ }
+ radeon_fence_unref(&fence);
+ }
+ end_jiffies = jiffies;
+ time = end_jiffies - start_jiffies;
+ time = jiffies_to_msecs(time);
+ if (time > 0) {
+ i = ((n * size) >> 10) / time;
+ printk(KERN_INFO "radeon: dma %u bo moves of %ukb from %d to %d"
+ " in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10,
+ sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024);
+ }
+ start_jiffies = jiffies;
+ for (i = 0; i < n; i++) {
+ r = radeon_fence_create(rdev, &fence);
+ if (r) {
+ goto out_cleanup;
+ }
+ r = radeon_copy_blit(rdev, saddr, daddr, size >> 14, fence);
+ if (r) {
+ goto out_cleanup;
+ }
+ r = radeon_fence_wait(fence, false);
+ if (r) {
+ goto out_cleanup;
+ }
+ radeon_fence_unref(&fence);
+ }
+ end_jiffies = jiffies;
+ time = end_jiffies - start_jiffies;
+ time = jiffies_to_msecs(time);
+ if (time > 0) {
+ i = ((n * size) >> 10) / time;
+ printk(KERN_INFO "radeon: blit %u bo moves of %ukb from %d to %d"
+ " in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10,
+ sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024);
+ }
+out_cleanup:
+ if (sobj) {
+ radeon_object_unpin(sobj);
+ radeon_object_unref(&sobj);
+ }
+ if (dobj) {
+ radeon_object_unpin(dobj);
+ radeon_object_unref(&dobj);
+ }
+ if (fence) {
+ radeon_fence_unref(&fence);
+ }
+ if (r) {
+ printk(KERN_WARNING "Error while benchmarking BO move.\n");
+ }
+}
+
+void radeon_benchmark(struct radeon_device *rdev)
+{
+ radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_GTT,
+ RADEON_GEM_DOMAIN_VRAM);
+ radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_VRAM,
+ RADEON_GEM_DOMAIN_GTT);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
new file mode 100644
index 00000000000..96e37a6e7ce
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -0,0 +1,390 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include "drmP.h"
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "atom.h"
+
+/*
+ * BIOS.
+ */
+static bool radeon_read_bios(struct radeon_device *rdev)
+{
+ uint8_t __iomem *bios;
+ size_t size;
+
+ rdev->bios = NULL;
+ bios = pci_map_rom(rdev->pdev, &size);
+ if (!bios) {
+ return false;
+ }
+
+ if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
+ pci_unmap_rom(rdev->pdev, bios);
+ return false;
+ }
+ rdev->bios = kmalloc(size, GFP_KERNEL);
+ if (rdev->bios == NULL) {
+ pci_unmap_rom(rdev->pdev, bios);
+ return false;
+ }
+ memcpy(rdev->bios, bios, size);
+ pci_unmap_rom(rdev->pdev, bios);
+ return true;
+}
+
+static bool r700_read_disabled_bios(struct radeon_device *rdev)
+{
+ uint32_t viph_control;
+ uint32_t bus_cntl;
+ uint32_t d1vga_control;
+ uint32_t d2vga_control;
+ uint32_t vga_render_control;
+ uint32_t rom_cntl;
+ uint32_t cg_spll_func_cntl = 0;
+ uint32_t cg_spll_status;
+ bool r;
+
+ viph_control = RREG32(RADEON_VIPH_CONTROL);
+ bus_cntl = RREG32(RADEON_BUS_CNTL);
+ d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
+ d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
+ vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
+ rom_cntl = RREG32(R600_ROM_CNTL);
+
+ /* disable VIP */
+ WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
+ /* enable the rom */
+ WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+ /* Disable VGA mode */
+ WREG32(AVIVO_D1VGA_CONTROL,
+ (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+ AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+ WREG32(AVIVO_D2VGA_CONTROL,
+ (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+ AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+ WREG32(AVIVO_VGA_RENDER_CONTROL,
+ (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
+
+ if (rdev->family == CHIP_RV730) {
+ cg_spll_func_cntl = RREG32(R600_CG_SPLL_FUNC_CNTL);
+
+ /* enable bypass mode */
+ WREG32(R600_CG_SPLL_FUNC_CNTL, (cg_spll_func_cntl |
+ R600_SPLL_BYPASS_EN));
+
+ /* wait for SPLL_CHG_STATUS to change to 1 */
+ cg_spll_status = 0;
+ while (!(cg_spll_status & R600_SPLL_CHG_STATUS))
+ cg_spll_status = RREG32(R600_CG_SPLL_STATUS);
+
+ WREG32(R600_ROM_CNTL, (rom_cntl & ~R600_SCK_OVERWRITE));
+ } else
+ WREG32(R600_ROM_CNTL, (rom_cntl | R600_SCK_OVERWRITE));
+
+ r = radeon_read_bios(rdev);
+
+ /* restore regs */
+ if (rdev->family == CHIP_RV730) {
+ WREG32(R600_CG_SPLL_FUNC_CNTL, cg_spll_func_cntl);
+
+ /* wait for SPLL_CHG_STATUS to change to 1 */
+ cg_spll_status = 0;
+ while (!(cg_spll_status & R600_SPLL_CHG_STATUS))
+ cg_spll_status = RREG32(R600_CG_SPLL_STATUS);
+ }
+ WREG32(RADEON_VIPH_CONTROL, viph_control);
+ WREG32(RADEON_BUS_CNTL, bus_cntl);
+ WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+ WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+ WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
+ WREG32(R600_ROM_CNTL, rom_cntl);
+ return r;
+}
+
+static bool r600_read_disabled_bios(struct radeon_device *rdev)
+{
+ uint32_t viph_control;
+ uint32_t bus_cntl;
+ uint32_t d1vga_control;
+ uint32_t d2vga_control;
+ uint32_t vga_render_control;
+ uint32_t rom_cntl;
+ uint32_t general_pwrmgt;
+ uint32_t low_vid_lower_gpio_cntl;
+ uint32_t medium_vid_lower_gpio_cntl;
+ uint32_t high_vid_lower_gpio_cntl;
+ uint32_t ctxsw_vid_lower_gpio_cntl;
+ uint32_t lower_gpio_enable;
+ bool r;
+
+ viph_control = RREG32(RADEON_VIPH_CONTROL);
+ bus_cntl = RREG32(RADEON_BUS_CNTL);
+ d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
+ d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
+ vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
+ rom_cntl = RREG32(R600_ROM_CNTL);
+ general_pwrmgt = RREG32(R600_GENERAL_PWRMGT);
+ low_vid_lower_gpio_cntl = RREG32(R600_LOW_VID_LOWER_GPIO_CNTL);
+ medium_vid_lower_gpio_cntl = RREG32(R600_MEDIUM_VID_LOWER_GPIO_CNTL);
+ high_vid_lower_gpio_cntl = RREG32(R600_HIGH_VID_LOWER_GPIO_CNTL);
+ ctxsw_vid_lower_gpio_cntl = RREG32(R600_CTXSW_VID_LOWER_GPIO_CNTL);
+ lower_gpio_enable = RREG32(R600_LOWER_GPIO_ENABLE);
+
+ /* disable VIP */
+ WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
+ /* enable the rom */
+ WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+ /* Disable VGA mode */
+ WREG32(AVIVO_D1VGA_CONTROL,
+ (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+ AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+ WREG32(AVIVO_D2VGA_CONTROL,
+ (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+ AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+ WREG32(AVIVO_VGA_RENDER_CONTROL,
+ (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
+
+ WREG32(R600_ROM_CNTL,
+ ((rom_cntl & ~R600_SCK_PRESCALE_CRYSTAL_CLK_MASK) |
+ (1 << R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT) |
+ R600_SCK_OVERWRITE));
+
+ WREG32(R600_GENERAL_PWRMGT, (general_pwrmgt & ~R600_OPEN_DRAIN_PADS));
+ WREG32(R600_LOW_VID_LOWER_GPIO_CNTL,
+ (low_vid_lower_gpio_cntl & ~0x400));
+ WREG32(R600_MEDIUM_VID_LOWER_GPIO_CNTL,
+ (medium_vid_lower_gpio_cntl & ~0x400));
+ WREG32(R600_HIGH_VID_LOWER_GPIO_CNTL,
+ (high_vid_lower_gpio_cntl & ~0x400));
+ WREG32(R600_CTXSW_VID_LOWER_GPIO_CNTL,
+ (ctxsw_vid_lower_gpio_cntl & ~0x400));
+ WREG32(R600_LOWER_GPIO_ENABLE, (lower_gpio_enable | 0x400));
+
+ r = radeon_read_bios(rdev);
+
+ /* restore regs */
+ WREG32(RADEON_VIPH_CONTROL, viph_control);
+ WREG32(RADEON_BUS_CNTL, bus_cntl);
+ WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+ WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+ WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
+ WREG32(R600_ROM_CNTL, rom_cntl);
+ WREG32(R600_GENERAL_PWRMGT, general_pwrmgt);
+ WREG32(R600_LOW_VID_LOWER_GPIO_CNTL, low_vid_lower_gpio_cntl);
+ WREG32(R600_MEDIUM_VID_LOWER_GPIO_CNTL, medium_vid_lower_gpio_cntl);
+ WREG32(R600_HIGH_VID_LOWER_GPIO_CNTL, high_vid_lower_gpio_cntl);
+ WREG32(R600_CTXSW_VID_LOWER_GPIO_CNTL, ctxsw_vid_lower_gpio_cntl);
+ WREG32(R600_LOWER_GPIO_ENABLE, lower_gpio_enable);
+ return r;
+}
+
+static bool avivo_read_disabled_bios(struct radeon_device *rdev)
+{
+ uint32_t seprom_cntl1;
+ uint32_t viph_control;
+ uint32_t bus_cntl;
+ uint32_t d1vga_control;
+ uint32_t d2vga_control;
+ uint32_t vga_render_control;
+ uint32_t gpiopad_a;
+ uint32_t gpiopad_en;
+ uint32_t gpiopad_mask;
+ bool r;
+
+ seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
+ viph_control = RREG32(RADEON_VIPH_CONTROL);
+ bus_cntl = RREG32(RADEON_BUS_CNTL);
+ d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
+ d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
+ vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
+ gpiopad_a = RREG32(RADEON_GPIOPAD_A);
+ gpiopad_en = RREG32(RADEON_GPIOPAD_EN);
+ gpiopad_mask = RREG32(RADEON_GPIOPAD_MASK);
+
+ WREG32(RADEON_SEPROM_CNTL1,
+ ((seprom_cntl1 & ~RADEON_SCK_PRESCALE_MASK) |
+ (0xc << RADEON_SCK_PRESCALE_SHIFT)));
+ WREG32(RADEON_GPIOPAD_A, 0);
+ WREG32(RADEON_GPIOPAD_EN, 0);
+ WREG32(RADEON_GPIOPAD_MASK, 0);
+
+ /* disable VIP */
+ WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
+
+ /* enable the rom */
+ WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+
+ /* Disable VGA mode */
+ WREG32(AVIVO_D1VGA_CONTROL,
+ (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+ AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+ WREG32(AVIVO_D2VGA_CONTROL,
+ (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+ AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+ WREG32(AVIVO_VGA_RENDER_CONTROL,
+ (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
+
+ r = radeon_read_bios(rdev);
+
+ /* restore regs */
+ WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
+ WREG32(RADEON_VIPH_CONTROL, viph_control);
+ WREG32(RADEON_BUS_CNTL, bus_cntl);
+ WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+ WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+ WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
+ WREG32(RADEON_GPIOPAD_A, gpiopad_a);
+ WREG32(RADEON_GPIOPAD_EN, gpiopad_en);
+ WREG32(RADEON_GPIOPAD_MASK, gpiopad_mask);
+ return r;
+}
+
+static bool legacy_read_disabled_bios(struct radeon_device *rdev)
+{
+ uint32_t seprom_cntl1;
+ uint32_t viph_control;
+ uint32_t bus_cntl;
+ uint32_t crtc_gen_cntl;
+ uint32_t crtc2_gen_cntl;
+ uint32_t crtc_ext_cntl;
+ uint32_t fp2_gen_cntl;
+ bool r;
+
+ seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
+ viph_control = RREG32(RADEON_VIPH_CONTROL);
+ bus_cntl = RREG32(RADEON_BUS_CNTL);
+ crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
+ crtc2_gen_cntl = 0;
+ crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+ fp2_gen_cntl = 0;
+
+ if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+ fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+ }
+
+ if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+ crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+ }
+
+ WREG32(RADEON_SEPROM_CNTL1,
+ ((seprom_cntl1 & ~RADEON_SCK_PRESCALE_MASK) |
+ (0xc << RADEON_SCK_PRESCALE_SHIFT)));
+
+ /* disable VIP */
+ WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
+
+ /* enable the rom */
+ WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+
+ /* Turn off mem requests and CRTC for both controllers */
+ WREG32(RADEON_CRTC_GEN_CNTL,
+ ((crtc_gen_cntl & ~RADEON_CRTC_EN) |
+ (RADEON_CRTC_DISP_REQ_EN_B |
+ RADEON_CRTC_EXT_DISP_EN)));
+ if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+ WREG32(RADEON_CRTC2_GEN_CNTL,
+ ((crtc2_gen_cntl & ~RADEON_CRTC2_EN) |
+ RADEON_CRTC2_DISP_REQ_EN_B));
+ }
+ /* Turn off CRTC */
+ WREG32(RADEON_CRTC_EXT_CNTL,
+ ((crtc_ext_cntl & ~RADEON_CRTC_CRT_ON) |
+ (RADEON_CRTC_SYNC_TRISTAT |
+ RADEON_CRTC_DISPLAY_DIS)));
+
+ if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+ WREG32(RADEON_FP2_GEN_CNTL, (fp2_gen_cntl & ~RADEON_FP2_ON));
+ }
+
+ r = radeon_read_bios(rdev);
+
+ /* restore regs */
+ WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
+ WREG32(RADEON_VIPH_CONTROL, viph_control);
+ WREG32(RADEON_BUS_CNTL, bus_cntl);
+ WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
+ if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+ WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+ }
+ WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
+ if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+ WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+ }
+ return r;
+}
+
+static bool radeon_read_disabled_bios(struct radeon_device *rdev)
+{
+ if (rdev->family >= CHIP_RV770)
+ return r700_read_disabled_bios(rdev);
+ else if (rdev->family >= CHIP_R600)
+ return r600_read_disabled_bios(rdev);
+ else if (rdev->family >= CHIP_RS600)
+ return avivo_read_disabled_bios(rdev);
+ else
+ return legacy_read_disabled_bios(rdev);
+}
+
+bool radeon_get_bios(struct radeon_device *rdev)
+{
+ bool r;
+ uint16_t tmp;
+
+ r = radeon_read_bios(rdev);
+ if (r == false) {
+ r = radeon_read_disabled_bios(rdev);
+ }
+ if (r == false || rdev->bios == NULL) {
+ DRM_ERROR("Unable to locate a BIOS ROM\n");
+ rdev->bios = NULL;
+ return false;
+ }
+ if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) {
+ goto free_bios;
+ }
+
+ rdev->bios_header_start = RBIOS16(0x48);
+ if (!rdev->bios_header_start) {
+ goto free_bios;
+ }
+ tmp = rdev->bios_header_start + 4;
+ if (!memcmp(rdev->bios + tmp, "ATOM", 4) ||
+ !memcmp(rdev->bios + tmp, "MOTA", 4)) {
+ rdev->is_atom_bios = true;
+ } else {
+ rdev->is_atom_bios = false;
+ }
+
+ DRM_DEBUG("%sBIOS detected\n", rdev->is_atom_bios ? "ATOM" : "COM");
+ return true;
+free_bios:
+ kfree(rdev->bios);
+ rdev->bios = NULL;
+ return false;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
new file mode 100644
index 00000000000..a37cbce5318
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -0,0 +1,833 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "atom.h"
+
+/* 10 khz */
+static uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
+{
+ struct radeon_pll *spll = &rdev->clock.spll;
+ uint32_t fb_div, ref_div, post_div, sclk;
+
+ fb_div = RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV);
+ fb_div = (fb_div >> RADEON_SPLL_FB_DIV_SHIFT) & RADEON_SPLL_FB_DIV_MASK;
+ fb_div <<= 1;
+ fb_div *= spll->reference_freq;
+
+ ref_div =
+ RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
+ sclk = fb_div / ref_div;
+
+ post_div = RREG32_PLL(RADEON_SCLK_CNTL) & RADEON_SCLK_SRC_SEL_MASK;
+ if (post_div == 2)
+ sclk >>= 1;
+ else if (post_div == 3)
+ sclk >>= 2;
+ else if (post_div == 4)
+ sclk >>= 4;
+
+ return sclk;
+}
+
+/* 10 khz */
+static uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
+{
+ struct radeon_pll *mpll = &rdev->clock.mpll;
+ uint32_t fb_div, ref_div, post_div, mclk;
+
+ fb_div = RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV);
+ fb_div = (fb_div >> RADEON_MPLL_FB_DIV_SHIFT) & RADEON_MPLL_FB_DIV_MASK;
+ fb_div <<= 1;
+ fb_div *= mpll->reference_freq;
+
+ ref_div =
+ RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
+ mclk = fb_div / ref_div;
+
+ post_div = RREG32_PLL(RADEON_MCLK_CNTL) & 0x7;
+ if (post_div == 2)
+ mclk >>= 1;
+ else if (post_div == 3)
+ mclk >>= 2;
+ else if (post_div == 4)
+ mclk >>= 4;
+
+ return mclk;
+}
+
+void radeon_get_clock_info(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_pll *p1pll = &rdev->clock.p1pll;
+ struct radeon_pll *p2pll = &rdev->clock.p2pll;
+ struct radeon_pll *spll = &rdev->clock.spll;
+ struct radeon_pll *mpll = &rdev->clock.mpll;
+ int ret;
+
+ if (rdev->is_atom_bios)
+ ret = radeon_atom_get_clock_info(dev);
+ else
+ ret = radeon_combios_get_clock_info(dev);
+
+ if (ret) {
+ if (p1pll->reference_div < 2)
+ p1pll->reference_div = 12;
+ if (p2pll->reference_div < 2)
+ p2pll->reference_div = 12;
+ if (spll->reference_div < 2)
+ spll->reference_div =
+ RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
+ RADEON_M_SPLL_REF_DIV_MASK;
+ if (mpll->reference_div < 2)
+ mpll->reference_div = spll->reference_div;
+ } else {
+ if (ASIC_IS_AVIVO(rdev)) {
+ /* TODO FALLBACK */
+ } else {
+ DRM_INFO("Using generic clock info\n");
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ p1pll->reference_freq = 1432;
+ p2pll->reference_freq = 1432;
+ spll->reference_freq = 1432;
+ mpll->reference_freq = 1432;
+ } else {
+ p1pll->reference_freq = 2700;
+ p2pll->reference_freq = 2700;
+ spll->reference_freq = 2700;
+ mpll->reference_freq = 2700;
+ }
+ p1pll->reference_div =
+ RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff;
+ if (p1pll->reference_div < 2)
+ p1pll->reference_div = 12;
+ p2pll->reference_div = p1pll->reference_div;
+
+ if (rdev->family >= CHIP_R420) {
+ p1pll->pll_in_min = 100;
+ p1pll->pll_in_max = 1350;
+ p1pll->pll_out_min = 20000;
+ p1pll->pll_out_max = 50000;
+ p2pll->pll_in_min = 100;
+ p2pll->pll_in_max = 1350;
+ p2pll->pll_out_min = 20000;
+ p2pll->pll_out_max = 50000;
+ } else {
+ p1pll->pll_in_min = 40;
+ p1pll->pll_in_max = 500;
+ p1pll->pll_out_min = 12500;
+ p1pll->pll_out_max = 35000;
+ p2pll->pll_in_min = 40;
+ p2pll->pll_in_max = 500;
+ p2pll->pll_out_min = 12500;
+ p2pll->pll_out_max = 35000;
+ }
+
+ spll->reference_div =
+ RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
+ RADEON_M_SPLL_REF_DIV_MASK;
+ mpll->reference_div = spll->reference_div;
+ rdev->clock.default_sclk =
+ radeon_legacy_get_engine_clock(rdev);
+ rdev->clock.default_mclk =
+ radeon_legacy_get_memory_clock(rdev);
+ }
+ }
+
+ /* pixel clocks */
+ if (ASIC_IS_AVIVO(rdev)) {
+ p1pll->min_post_div = 2;
+ p1pll->max_post_div = 0x7f;
+ p1pll->min_frac_feedback_div = 0;
+ p1pll->max_frac_feedback_div = 9;
+ p2pll->min_post_div = 2;
+ p2pll->max_post_div = 0x7f;
+ p2pll->min_frac_feedback_div = 0;
+ p2pll->max_frac_feedback_div = 9;
+ } else {
+ p1pll->min_post_div = 1;
+ p1pll->max_post_div = 16;
+ p1pll->min_frac_feedback_div = 0;
+ p1pll->max_frac_feedback_div = 0;
+ p2pll->min_post_div = 1;
+ p2pll->max_post_div = 12;
+ p2pll->min_frac_feedback_div = 0;
+ p2pll->max_frac_feedback_div = 0;
+ }
+
+ p1pll->min_ref_div = 2;
+ p1pll->max_ref_div = 0x3ff;
+ p1pll->min_feedback_div = 4;
+ p1pll->max_feedback_div = 0x7ff;
+ p1pll->best_vco = 0;
+
+ p2pll->min_ref_div = 2;
+ p2pll->max_ref_div = 0x3ff;
+ p2pll->min_feedback_div = 4;
+ p2pll->max_feedback_div = 0x7ff;
+ p2pll->best_vco = 0;
+
+ /* system clock */
+ spll->min_post_div = 1;
+ spll->max_post_div = 1;
+ spll->min_ref_div = 2;
+ spll->max_ref_div = 0xff;
+ spll->min_feedback_div = 4;
+ spll->max_feedback_div = 0xff;
+ spll->best_vco = 0;
+
+ /* memory clock */
+ mpll->min_post_div = 1;
+ mpll->max_post_div = 1;
+ mpll->min_ref_div = 2;
+ mpll->max_ref_div = 0xff;
+ mpll->min_feedback_div = 4;
+ mpll->max_feedback_div = 0xff;
+ mpll->best_vco = 0;
+
+}
+
+/* 10 khz */
+static uint32_t calc_eng_mem_clock(struct radeon_device *rdev,
+ uint32_t req_clock,
+ int *fb_div, int *post_div)
+{
+ struct radeon_pll *spll = &rdev->clock.spll;
+ int ref_div = spll->reference_div;
+
+ if (!ref_div)
+ ref_div =
+ RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
+ RADEON_M_SPLL_REF_DIV_MASK;
+
+ if (req_clock < 15000) {
+ *post_div = 8;
+ req_clock *= 8;
+ } else if (req_clock < 30000) {
+ *post_div = 4;
+ req_clock *= 4;
+ } else if (req_clock < 60000) {
+ *post_div = 2;
+ req_clock *= 2;
+ } else
+ *post_div = 1;
+
+ req_clock *= ref_div;
+ req_clock += spll->reference_freq;
+ req_clock /= (2 * spll->reference_freq);
+
+ *fb_div = req_clock & 0xff;
+
+ req_clock = (req_clock & 0xffff) << 1;
+ req_clock *= spll->reference_freq;
+ req_clock /= ref_div;
+ req_clock /= *post_div;
+
+ return req_clock;
+}
+
+/* 10 khz */
+void radeon_legacy_set_engine_clock(struct radeon_device *rdev,
+ uint32_t eng_clock)
+{
+ uint32_t tmp;
+ int fb_div, post_div;
+
+ /* XXX: wait for idle */
+
+ eng_clock = calc_eng_mem_clock(rdev, eng_clock, &fb_div, &post_div);
+
+ tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
+ tmp &= ~RADEON_DONT_USE_XTALIN;
+ WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
+
+ tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+ tmp &= ~RADEON_SCLK_SRC_SEL_MASK;
+ WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+ udelay(10);
+
+ tmp = RREG32_PLL(RADEON_SPLL_CNTL);
+ tmp |= RADEON_SPLL_SLEEP;
+ WREG32_PLL(RADEON_SPLL_CNTL, tmp);
+
+ udelay(2);
+
+ tmp = RREG32_PLL(RADEON_SPLL_CNTL);
+ tmp |= RADEON_SPLL_RESET;
+ WREG32_PLL(RADEON_SPLL_CNTL, tmp);
+
+ udelay(200);
+
+ tmp = RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV);
+ tmp &= ~(RADEON_SPLL_FB_DIV_MASK << RADEON_SPLL_FB_DIV_SHIFT);
+ tmp |= (fb_div & RADEON_SPLL_FB_DIV_MASK) << RADEON_SPLL_FB_DIV_SHIFT;
+ WREG32_PLL(RADEON_M_SPLL_REF_FB_DIV, tmp);
+
+ /* XXX: verify on different asics */
+ tmp = RREG32_PLL(RADEON_SPLL_CNTL);
+ tmp &= ~RADEON_SPLL_PVG_MASK;
+ if ((eng_clock * post_div) >= 90000)
+ tmp |= (0x7 << RADEON_SPLL_PVG_SHIFT);
+ else
+ tmp |= (0x4 << RADEON_SPLL_PVG_SHIFT);
+ WREG32_PLL(RADEON_SPLL_CNTL, tmp);
+
+ tmp = RREG32_PLL(RADEON_SPLL_CNTL);
+ tmp &= ~RADEON_SPLL_SLEEP;
+ WREG32_PLL(RADEON_SPLL_CNTL, tmp);
+
+ udelay(2);
+
+ tmp = RREG32_PLL(RADEON_SPLL_CNTL);
+ tmp &= ~RADEON_SPLL_RESET;
+ WREG32_PLL(RADEON_SPLL_CNTL, tmp);
+
+ udelay(200);
+
+ tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+ tmp &= ~RADEON_SCLK_SRC_SEL_MASK;
+ switch (post_div) {
+ case 1:
+ default:
+ tmp |= 1;
+ break;
+ case 2:
+ tmp |= 2;
+ break;
+ case 4:
+ tmp |= 3;
+ break;
+ case 8:
+ tmp |= 4;
+ break;
+ }
+ WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+ udelay(20);
+
+ tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
+ tmp |= RADEON_DONT_USE_XTALIN;
+ WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
+
+ udelay(10);
+}
+
+void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
+{
+ uint32_t tmp;
+
+ if (enable) {
+ if (rdev->flags & RADEON_SINGLE_CRTC) {
+ tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+ if ((RREG32(RADEON_CONFIG_CNTL) &
+ RADEON_CFG_ATI_REV_ID_MASK) >
+ RADEON_CFG_ATI_REV_A13) {
+ tmp &=
+ ~(RADEON_SCLK_FORCE_CP |
+ RADEON_SCLK_FORCE_RB);
+ }
+ tmp &=
+ ~(RADEON_SCLK_FORCE_HDP | RADEON_SCLK_FORCE_DISP1 |
+ RADEON_SCLK_FORCE_TOP | RADEON_SCLK_FORCE_SE |
+ RADEON_SCLK_FORCE_IDCT | RADEON_SCLK_FORCE_RE |
+ RADEON_SCLK_FORCE_PB | RADEON_SCLK_FORCE_TAM |
+ RADEON_SCLK_FORCE_TDM);
+ WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+ } else if (ASIC_IS_R300(rdev)) {
+ if ((rdev->family == CHIP_RS400) ||
+ (rdev->family == CHIP_RS480)) {
+ tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+ tmp &=
+ ~(RADEON_SCLK_FORCE_DISP2 |
+ RADEON_SCLK_FORCE_CP |
+ RADEON_SCLK_FORCE_HDP |
+ RADEON_SCLK_FORCE_DISP1 |
+ RADEON_SCLK_FORCE_TOP |
+ RADEON_SCLK_FORCE_E2 | R300_SCLK_FORCE_VAP
+ | RADEON_SCLK_FORCE_IDCT |
+ RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR
+ | R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX
+ | R300_SCLK_FORCE_US |
+ RADEON_SCLK_FORCE_TV_SCLK |
+ R300_SCLK_FORCE_SU |
+ RADEON_SCLK_FORCE_OV0);
+ tmp |= RADEON_DYN_STOP_LAT_MASK;
+ tmp |=
+ RADEON_SCLK_FORCE_TOP |
+ RADEON_SCLK_FORCE_VIP;
+ WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+ tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+ tmp &= ~RADEON_SCLK_MORE_FORCEON;
+ tmp |= RADEON_SCLK_MORE_MAX_DYN_STOP_LAT;
+ WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+
+ tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+ tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
+ RADEON_PIXCLK_DAC_ALWAYS_ONb);
+ WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+
+ tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+ tmp |= (RADEON_PIX2CLK_ALWAYS_ONb |
+ RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+ RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
+ R300_DVOCLK_ALWAYS_ONb |
+ RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+ RADEON_PIXCLK_GV_ALWAYS_ONb |
+ R300_PIXCLK_DVO_ALWAYS_ONb |
+ RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+ RADEON_PIXCLK_TMDS_ALWAYS_ONb |
+ R300_PIXCLK_TRANS_ALWAYS_ONb |
+ R300_PIXCLK_TVO_ALWAYS_ONb |
+ R300_P2G2CLK_ALWAYS_ONb |
+ R300_P2G2CLK_ALWAYS_ONb);
+ WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+ } else if (rdev->family >= CHIP_RV350) {
+ tmp = RREG32_PLL(R300_SCLK_CNTL2);
+ tmp &= ~(R300_SCLK_FORCE_TCL |
+ R300_SCLK_FORCE_GA |
+ R300_SCLK_FORCE_CBA);
+ tmp |= (R300_SCLK_TCL_MAX_DYN_STOP_LAT |
+ R300_SCLK_GA_MAX_DYN_STOP_LAT |
+ R300_SCLK_CBA_MAX_DYN_STOP_LAT);
+ WREG32_PLL(R300_SCLK_CNTL2, tmp);
+
+ tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+ tmp &=
+ ~(RADEON_SCLK_FORCE_DISP2 |
+ RADEON_SCLK_FORCE_CP |
+ RADEON_SCLK_FORCE_HDP |
+ RADEON_SCLK_FORCE_DISP1 |
+ RADEON_SCLK_FORCE_TOP |
+ RADEON_SCLK_FORCE_E2 | R300_SCLK_FORCE_VAP
+ | RADEON_SCLK_FORCE_IDCT |
+ RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR
+ | R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX
+ | R300_SCLK_FORCE_US |
+ RADEON_SCLK_FORCE_TV_SCLK |
+ R300_SCLK_FORCE_SU |
+ RADEON_SCLK_FORCE_OV0);
+ tmp |= RADEON_DYN_STOP_LAT_MASK;
+ WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+ tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+ tmp &= ~RADEON_SCLK_MORE_FORCEON;
+ tmp |= RADEON_SCLK_MORE_MAX_DYN_STOP_LAT;
+ WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+
+ tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+ tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
+ RADEON_PIXCLK_DAC_ALWAYS_ONb);
+ WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+
+ tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+ tmp |= (RADEON_PIX2CLK_ALWAYS_ONb |
+ RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+ RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
+ R300_DVOCLK_ALWAYS_ONb |
+ RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+ RADEON_PIXCLK_GV_ALWAYS_ONb |
+ R300_PIXCLK_DVO_ALWAYS_ONb |
+ RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+ RADEON_PIXCLK_TMDS_ALWAYS_ONb |
+ R300_PIXCLK_TRANS_ALWAYS_ONb |
+ R300_PIXCLK_TVO_ALWAYS_ONb |
+ R300_P2G2CLK_ALWAYS_ONb |
+ R300_P2G2CLK_ALWAYS_ONb);
+ WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+
+ tmp = RREG32_PLL(RADEON_MCLK_MISC);
+ tmp |= (RADEON_MC_MCLK_DYN_ENABLE |
+ RADEON_IO_MCLK_DYN_ENABLE);
+ WREG32_PLL(RADEON_MCLK_MISC, tmp);
+
+ tmp = RREG32_PLL(RADEON_MCLK_CNTL);
+ tmp |= (RADEON_FORCEON_MCLKA |
+ RADEON_FORCEON_MCLKB);
+
+ tmp &= ~(RADEON_FORCEON_YCLKA |
+ RADEON_FORCEON_YCLKB |
+ RADEON_FORCEON_MC);
+
+ /* Some releases of vbios have set DISABLE_MC_MCLKA
+ and DISABLE_MC_MCLKB bits in the vbios table. Setting these
+ bits will cause H/W hang when reading video memory with dynamic clocking
+ enabled. */
+ if ((tmp & R300_DISABLE_MC_MCLKA) &&
+ (tmp & R300_DISABLE_MC_MCLKB)) {
+ /* If both bits are set, then check the active channels */
+ tmp = RREG32_PLL(RADEON_MCLK_CNTL);
+ if (rdev->mc.vram_width == 64) {
+ if (RREG32(RADEON_MEM_CNTL) &
+ R300_MEM_USE_CD_CH_ONLY)
+ tmp &=
+ ~R300_DISABLE_MC_MCLKB;
+ else
+ tmp &=
+ ~R300_DISABLE_MC_MCLKA;
+ } else {
+ tmp &= ~(R300_DISABLE_MC_MCLKA |
+ R300_DISABLE_MC_MCLKB);
+ }
+ }
+
+ WREG32_PLL(RADEON_MCLK_CNTL, tmp);
+ } else {
+ tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+ tmp &= ~(R300_SCLK_FORCE_VAP);
+ tmp |= RADEON_SCLK_FORCE_CP;
+ WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+ udelay(15000);
+
+ tmp = RREG32_PLL(R300_SCLK_CNTL2);
+ tmp &= ~(R300_SCLK_FORCE_TCL |
+ R300_SCLK_FORCE_GA |
+ R300_SCLK_FORCE_CBA);
+ WREG32_PLL(R300_SCLK_CNTL2, tmp);
+ }
+ } else {
+ tmp = RREG32_PLL(RADEON_CLK_PWRMGT_CNTL);
+
+ tmp &= ~(RADEON_ACTIVE_HILO_LAT_MASK |
+ RADEON_DISP_DYN_STOP_LAT_MASK |
+ RADEON_DYN_STOP_MODE_MASK);
+
+ tmp |= (RADEON_ENGIN_DYNCLK_MODE |
+ (0x01 << RADEON_ACTIVE_HILO_LAT_SHIFT));
+ WREG32_PLL(RADEON_CLK_PWRMGT_CNTL, tmp);
+ udelay(15000);
+
+ tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
+ tmp |= RADEON_SCLK_DYN_START_CNTL;
+ WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
+ udelay(15000);
+
+ /* When DRI is enabled, setting DYN_STOP_LAT to zero can cause some R200
+ to lockup randomly, leave them as set by BIOS.
+ */
+ tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+ /*tmp &= RADEON_SCLK_SRC_SEL_MASK; */
+ tmp &= ~RADEON_SCLK_FORCEON_MASK;
+
+ /*RAGE_6::A11 A12 A12N1 A13, RV250::A11 A12, R300 */
+ if (((rdev->family == CHIP_RV250) &&
+ ((RREG32(RADEON_CONFIG_CNTL) &
+ RADEON_CFG_ATI_REV_ID_MASK) <
+ RADEON_CFG_ATI_REV_A13))
+ || ((rdev->family == CHIP_RV100)
+ &&
+ ((RREG32(RADEON_CONFIG_CNTL) &
+ RADEON_CFG_ATI_REV_ID_MASK) <=
+ RADEON_CFG_ATI_REV_A13))) {
+ tmp |= RADEON_SCLK_FORCE_CP;
+ tmp |= RADEON_SCLK_FORCE_VIP;
+ }
+
+ WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+ if ((rdev->family == CHIP_RV200) ||
+ (rdev->family == CHIP_RV250) ||
+ (rdev->family == CHIP_RV280)) {
+ tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+ tmp &= ~RADEON_SCLK_MORE_FORCEON;
+
+ /* RV200::A11 A12 RV250::A11 A12 */
+ if (((rdev->family == CHIP_RV200) ||
+ (rdev->family == CHIP_RV250)) &&
+ ((RREG32(RADEON_CONFIG_CNTL) &
+ RADEON_CFG_ATI_REV_ID_MASK) <
+ RADEON_CFG_ATI_REV_A13)) {
+ tmp |= RADEON_SCLK_MORE_FORCEON;
+ }
+ WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+ udelay(15000);
+ }
+
+ /* RV200::A11 A12, RV250::A11 A12 */
+ if (((rdev->family == CHIP_RV200) ||
+ (rdev->family == CHIP_RV250)) &&
+ ((RREG32(RADEON_CONFIG_CNTL) &
+ RADEON_CFG_ATI_REV_ID_MASK) <
+ RADEON_CFG_ATI_REV_A13)) {
+ tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
+ tmp |= RADEON_TCL_BYPASS_DISABLE;
+ WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
+ }
+ udelay(15000);
+
+ /*enable dynamic mode for display clocks (PIXCLK and PIX2CLK) */
+ tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+ tmp |= (RADEON_PIX2CLK_ALWAYS_ONb |
+ RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+ RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+ RADEON_PIXCLK_GV_ALWAYS_ONb |
+ RADEON_PIXCLK_DIG_TMDS_ALWAYS_ONb |
+ RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+ RADEON_PIXCLK_TMDS_ALWAYS_ONb);
+
+ WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+ udelay(15000);
+
+ tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+ tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
+ RADEON_PIXCLK_DAC_ALWAYS_ONb);
+
+ WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+ udelay(15000);
+ }
+ } else {
+ /* Turn everything OFF (ForceON to everything) */
+ if (rdev->flags & RADEON_SINGLE_CRTC) {
+ tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+ tmp |= (RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_HDP |
+ RADEON_SCLK_FORCE_DISP1 | RADEON_SCLK_FORCE_TOP
+ | RADEON_SCLK_FORCE_E2 | RADEON_SCLK_FORCE_SE |
+ RADEON_SCLK_FORCE_IDCT | RADEON_SCLK_FORCE_VIP |
+ RADEON_SCLK_FORCE_RE | RADEON_SCLK_FORCE_PB |
+ RADEON_SCLK_FORCE_TAM | RADEON_SCLK_FORCE_TDM |
+ RADEON_SCLK_FORCE_RB);
+ WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+ } else if ((rdev->family == CHIP_RS400) ||
+ (rdev->family == CHIP_RS480)) {
+ tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+ tmp |= (RADEON_SCLK_FORCE_DISP2 | RADEON_SCLK_FORCE_CP |
+ RADEON_SCLK_FORCE_HDP | RADEON_SCLK_FORCE_DISP1
+ | RADEON_SCLK_FORCE_TOP | RADEON_SCLK_FORCE_E2 |
+ R300_SCLK_FORCE_VAP | RADEON_SCLK_FORCE_IDCT |
+ RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR |
+ R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX |
+ R300_SCLK_FORCE_US | RADEON_SCLK_FORCE_TV_SCLK |
+ R300_SCLK_FORCE_SU | RADEON_SCLK_FORCE_OV0);
+ WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+ tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+ tmp |= RADEON_SCLK_MORE_FORCEON;
+ WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+
+ tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+ tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
+ RADEON_PIXCLK_DAC_ALWAYS_ONb |
+ R300_DISP_DAC_PIXCLK_DAC_BLANK_OFF);
+ WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+
+ tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+ tmp &= ~(RADEON_PIX2CLK_ALWAYS_ONb |
+ RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+ RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
+ R300_DVOCLK_ALWAYS_ONb |
+ RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+ RADEON_PIXCLK_GV_ALWAYS_ONb |
+ R300_PIXCLK_DVO_ALWAYS_ONb |
+ RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+ RADEON_PIXCLK_TMDS_ALWAYS_ONb |
+ R300_PIXCLK_TRANS_ALWAYS_ONb |
+ R300_PIXCLK_TVO_ALWAYS_ONb |
+ R300_P2G2CLK_ALWAYS_ONb |
+ R300_P2G2CLK_ALWAYS_ONb |
+ R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF);
+ WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+ } else if (rdev->family >= CHIP_RV350) {
+ /* for RV350/M10, no delays are required. */
+ tmp = RREG32_PLL(R300_SCLK_CNTL2);
+ tmp |= (R300_SCLK_FORCE_TCL |
+ R300_SCLK_FORCE_GA | R300_SCLK_FORCE_CBA);
+ WREG32_PLL(R300_SCLK_CNTL2, tmp);
+
+ tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+ tmp |= (RADEON_SCLK_FORCE_DISP2 | RADEON_SCLK_FORCE_CP |
+ RADEON_SCLK_FORCE_HDP | RADEON_SCLK_FORCE_DISP1
+ | RADEON_SCLK_FORCE_TOP | RADEON_SCLK_FORCE_E2 |
+ R300_SCLK_FORCE_VAP | RADEON_SCLK_FORCE_IDCT |
+ RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR |
+ R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX |
+ R300_SCLK_FORCE_US | RADEON_SCLK_FORCE_TV_SCLK |
+ R300_SCLK_FORCE_SU | RADEON_SCLK_FORCE_OV0);
+ WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+ tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+ tmp |= RADEON_SCLK_MORE_FORCEON;
+ WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+
+ tmp = RREG32_PLL(RADEON_MCLK_CNTL);
+ tmp |= (RADEON_FORCEON_MCLKA |
+ RADEON_FORCEON_MCLKB |
+ RADEON_FORCEON_YCLKA |
+ RADEON_FORCEON_YCLKB | RADEON_FORCEON_MC);
+ WREG32_PLL(RADEON_MCLK_CNTL, tmp);
+
+ tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+ tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
+ RADEON_PIXCLK_DAC_ALWAYS_ONb |
+ R300_DISP_DAC_PIXCLK_DAC_BLANK_OFF);
+ WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+
+ tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+ tmp &= ~(RADEON_PIX2CLK_ALWAYS_ONb |
+ RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+ RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
+ R300_DVOCLK_ALWAYS_ONb |
+ RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+ RADEON_PIXCLK_GV_ALWAYS_ONb |
+ R300_PIXCLK_DVO_ALWAYS_ONb |
+ RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+ RADEON_PIXCLK_TMDS_ALWAYS_ONb |
+ R300_PIXCLK_TRANS_ALWAYS_ONb |
+ R300_PIXCLK_TVO_ALWAYS_ONb |
+ R300_P2G2CLK_ALWAYS_ONb |
+ R300_P2G2CLK_ALWAYS_ONb |
+ R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF);
+ WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+ } else {
+ tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+ tmp |= (RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_E2);
+ tmp |= RADEON_SCLK_FORCE_SE;
+
+ if (rdev->flags & RADEON_SINGLE_CRTC) {
+ tmp |= (RADEON_SCLK_FORCE_RB |
+ RADEON_SCLK_FORCE_TDM |
+ RADEON_SCLK_FORCE_TAM |
+ RADEON_SCLK_FORCE_PB |
+ RADEON_SCLK_FORCE_RE |
+ RADEON_SCLK_FORCE_VIP |
+ RADEON_SCLK_FORCE_IDCT |
+ RADEON_SCLK_FORCE_TOP |
+ RADEON_SCLK_FORCE_DISP1 |
+ RADEON_SCLK_FORCE_DISP2 |
+ RADEON_SCLK_FORCE_HDP);
+ } else if ((rdev->family == CHIP_R300) ||
+ (rdev->family == CHIP_R350)) {
+ tmp |= (RADEON_SCLK_FORCE_HDP |
+ RADEON_SCLK_FORCE_DISP1 |
+ RADEON_SCLK_FORCE_DISP2 |
+ RADEON_SCLK_FORCE_TOP |
+ RADEON_SCLK_FORCE_IDCT |
+ RADEON_SCLK_FORCE_VIP);
+ }
+ WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+ udelay(16000);
+
+ if ((rdev->family == CHIP_R300) ||
+ (rdev->family == CHIP_R350)) {
+ tmp = RREG32_PLL(R300_SCLK_CNTL2);
+ tmp |= (R300_SCLK_FORCE_TCL |
+ R300_SCLK_FORCE_GA |
+ R300_SCLK_FORCE_CBA);
+ WREG32_PLL(R300_SCLK_CNTL2, tmp);
+ udelay(16000);
+ }
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ tmp = RREG32_PLL(RADEON_MCLK_CNTL);
+ tmp &= ~(RADEON_FORCEON_MCLKA |
+ RADEON_FORCEON_YCLKA);
+ WREG32_PLL(RADEON_MCLK_CNTL, tmp);
+ udelay(16000);
+ }
+
+ if ((rdev->family == CHIP_RV200) ||
+ (rdev->family == CHIP_RV250) ||
+ (rdev->family == CHIP_RV280)) {
+ tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+ tmp |= RADEON_SCLK_MORE_FORCEON;
+ WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+ udelay(16000);
+ }
+
+ tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+ tmp &= ~(RADEON_PIX2CLK_ALWAYS_ONb |
+ RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+ RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+ RADEON_PIXCLK_GV_ALWAYS_ONb |
+ RADEON_PIXCLK_DIG_TMDS_ALWAYS_ONb |
+ RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+ RADEON_PIXCLK_TMDS_ALWAYS_ONb);
+
+ WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+ udelay(16000);
+
+ tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+ tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
+ RADEON_PIXCLK_DAC_ALWAYS_ONb);
+ WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+ }
+ }
+}
+
+static void radeon_apply_clock_quirks(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+
+ /* XXX make sure engine is idle */
+
+ if (rdev->family < CHIP_RS600) {
+ tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+ if (ASIC_IS_R300(rdev) || ASIC_IS_RV100(rdev))
+ tmp |= RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_VIP;
+ if ((rdev->family == CHIP_RV250)
+ || (rdev->family == CHIP_RV280))
+ tmp |=
+ RADEON_SCLK_FORCE_DISP1 | RADEON_SCLK_FORCE_DISP2;
+ if ((rdev->family == CHIP_RV350)
+ || (rdev->family == CHIP_RV380))
+ tmp |= R300_SCLK_FORCE_VAP;
+ if (rdev->family == CHIP_R420)
+ tmp |= R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX;
+ WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+ } else if (rdev->family < CHIP_R600) {
+ tmp = RREG32_PLL(AVIVO_CP_DYN_CNTL);
+ tmp |= AVIVO_CP_FORCEON;
+ WREG32_PLL(AVIVO_CP_DYN_CNTL, tmp);
+
+ tmp = RREG32_PLL(AVIVO_E2_DYN_CNTL);
+ tmp |= AVIVO_E2_FORCEON;
+ WREG32_PLL(AVIVO_E2_DYN_CNTL, tmp);
+
+ tmp = RREG32_PLL(AVIVO_IDCT_DYN_CNTL);
+ tmp |= AVIVO_IDCT_FORCEON;
+ WREG32_PLL(AVIVO_IDCT_DYN_CNTL, tmp);
+ }
+}
+
+int radeon_static_clocks_init(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+
+ /* XXX make sure engine is idle */
+
+ if (radeon_dynclks != -1) {
+ if (radeon_dynclks)
+ radeon_set_clock_gating(rdev, 1);
+ }
+ radeon_apply_clock_quirks(rdev);
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
new file mode 100644
index 00000000000..06e8038bc4a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -0,0 +1,2481 @@
+/*
+ * Copyright 2004 ATI Technologies Inc., Markham, Ontario
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ */
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+#include "atom.h"
+
+#ifdef CONFIG_PPC_PMAC
+/* not sure which of these are needed */
+#include <asm/machdep.h>
+#include <asm/pmac_feature.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+#endif /* CONFIG_PPC_PMAC */
+
+/* from radeon_encoder.c */
+extern uint32_t
+radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device,
+ uint8_t dac);
+extern void radeon_link_encoder_connector(struct drm_device *dev);
+
+/* from radeon_connector.c */
+extern void
+radeon_add_legacy_connector(struct drm_device *dev,
+ uint32_t connector_id,
+ uint32_t supported_device,
+ int connector_type,
+ struct radeon_i2c_bus_rec *i2c_bus);
+
+/* from radeon_legacy_encoder.c */
+extern void
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id,
+ uint32_t supported_device);
+
+/* old legacy ATI BIOS routines */
+
+/* COMBIOS table offsets */
+enum radeon_combios_table_offset {
+ /* absolute offset tables */
+ COMBIOS_ASIC_INIT_1_TABLE,
+ COMBIOS_BIOS_SUPPORT_TABLE,
+ COMBIOS_DAC_PROGRAMMING_TABLE,
+ COMBIOS_MAX_COLOR_DEPTH_TABLE,
+ COMBIOS_CRTC_INFO_TABLE,
+ COMBIOS_PLL_INFO_TABLE,
+ COMBIOS_TV_INFO_TABLE,
+ COMBIOS_DFP_INFO_TABLE,
+ COMBIOS_HW_CONFIG_INFO_TABLE,
+ COMBIOS_MULTIMEDIA_INFO_TABLE,
+ COMBIOS_TV_STD_PATCH_TABLE,
+ COMBIOS_LCD_INFO_TABLE,
+ COMBIOS_MOBILE_INFO_TABLE,
+ COMBIOS_PLL_INIT_TABLE,
+ COMBIOS_MEM_CONFIG_TABLE,
+ COMBIOS_SAVE_MASK_TABLE,
+ COMBIOS_HARDCODED_EDID_TABLE,
+ COMBIOS_ASIC_INIT_2_TABLE,
+ COMBIOS_CONNECTOR_INFO_TABLE,
+ COMBIOS_DYN_CLK_1_TABLE,
+ COMBIOS_RESERVED_MEM_TABLE,
+ COMBIOS_EXT_TMDS_INFO_TABLE,
+ COMBIOS_MEM_CLK_INFO_TABLE,
+ COMBIOS_EXT_DAC_INFO_TABLE,
+ COMBIOS_MISC_INFO_TABLE,
+ COMBIOS_CRT_INFO_TABLE,
+ COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE,
+ COMBIOS_COMPONENT_VIDEO_INFO_TABLE,
+ COMBIOS_FAN_SPEED_INFO_TABLE,
+ COMBIOS_OVERDRIVE_INFO_TABLE,
+ COMBIOS_OEM_INFO_TABLE,
+ COMBIOS_DYN_CLK_2_TABLE,
+ COMBIOS_POWER_CONNECTOR_INFO_TABLE,
+ COMBIOS_I2C_INFO_TABLE,
+ /* relative offset tables */
+ COMBIOS_ASIC_INIT_3_TABLE, /* offset from misc info */
+ COMBIOS_ASIC_INIT_4_TABLE, /* offset from misc info */
+ COMBIOS_DETECTED_MEM_TABLE, /* offset from misc info */
+ COMBIOS_ASIC_INIT_5_TABLE, /* offset from misc info */
+ COMBIOS_RAM_RESET_TABLE, /* offset from mem config */
+ COMBIOS_POWERPLAY_INFO_TABLE, /* offset from mobile info */
+ COMBIOS_GPIO_INFO_TABLE, /* offset from mobile info */
+ COMBIOS_LCD_DDC_INFO_TABLE, /* offset from mobile info */
+ COMBIOS_TMDS_POWER_TABLE, /* offset from mobile info */
+ COMBIOS_TMDS_POWER_ON_TABLE, /* offset from tmds power */
+ COMBIOS_TMDS_POWER_OFF_TABLE, /* offset from tmds power */
+};
+
+enum radeon_combios_ddc {
+ DDC_NONE_DETECTED,
+ DDC_MONID,
+ DDC_DVI,
+ DDC_VGA,
+ DDC_CRT2,
+ DDC_LCD,
+ DDC_GPIO,
+};
+
+enum radeon_combios_connector {
+ CONNECTOR_NONE_LEGACY,
+ CONNECTOR_PROPRIETARY_LEGACY,
+ CONNECTOR_CRT_LEGACY,
+ CONNECTOR_DVI_I_LEGACY,
+ CONNECTOR_DVI_D_LEGACY,
+ CONNECTOR_CTV_LEGACY,
+ CONNECTOR_STV_LEGACY,
+ CONNECTOR_UNSUPPORTED_LEGACY
+};
+
+const int legacy_connector_convert[] = {
+ DRM_MODE_CONNECTOR_Unknown,
+ DRM_MODE_CONNECTOR_DVID,
+ DRM_MODE_CONNECTOR_VGA,
+ DRM_MODE_CONNECTOR_DVII,
+ DRM_MODE_CONNECTOR_DVID,
+ DRM_MODE_CONNECTOR_Composite,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ DRM_MODE_CONNECTOR_Unknown,
+};
+
+static uint16_t combios_get_table_offset(struct drm_device *dev,
+ enum radeon_combios_table_offset table)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ int rev;
+ uint16_t offset = 0, check_offset;
+
+ switch (table) {
+ /* absolute offset tables */
+ case COMBIOS_ASIC_INIT_1_TABLE:
+ check_offset = RBIOS16(rdev->bios_header_start + 0xc);
+ if (check_offset)
+ offset = check_offset;
+ break;
+ case COMBIOS_BIOS_SUPPORT_TABLE:
+ check_offset = RBIOS16(rdev->bios_header_start + 0x14);
+ if (check_offset)
+ offset = check_offset;
+ break;
+ case COMBIOS_DAC_PROGRAMMING_TABLE:
+ check_offset = RBIOS16(rdev->bios_header_start + 0x2a);
+ if (check_offset)
+ offset = check_offset;
+ break;
+ case COMBIOS_MAX_COLOR_DEPTH_TABLE:
+ check_offset = RBIOS16(rdev->bios_header_start + 0x2c);
+ if (check_offset)
+ offset = check_offset;
+ break;
+ case COMBIOS_CRTC_INFO_TABLE:
+ check_offset = RBIOS16(rdev->bios_header_start + 0x2e);
+ if (check_offset)
+ offset = check_offset;
+ break;
+ case COMBIOS_PLL_INFO_TABLE:
+ check_offset = RBIOS16(rdev->bios_header_start + 0x30);
+ if (check_offset)
+ offset = check_offset;
+ break;
+ case COMBIOS_TV_INFO_TABLE:
+ check_offset = RBIOS16(rdev->bios_header_start + 0x32);
+ if (check_offset)
+ offset = check_offset;
+ break;
+ case COMBIOS_DFP_INFO_TABLE:
+ check_offset = RBIOS16(rdev->bios_header_start + 0x34);
+ if (check_offset)
+ offset = check_offset;
+ break;
+ case COMBIOS_HW_CONFIG_INFO_TABLE:
+ check_offset = RBIOS16(rdev->bios_header_start + 0x36);
+ if (check_offset)
+ offset = check_offset;
+ break;
+ case COMBIOS_MULTIMEDIA_INFO_TABLE:
+ check_offset = RBIOS16(rdev->bios_header_start + 0x38);
+ if (check_offset)
+ offset = check_offset;
+ break;
+ case COMBIOS_TV_STD_PATCH_TABLE:
+ check_offset = RBIOS16(rdev->bios_header_start + 0x3e);
+ if (check_offset)
+ offset = check_offset;
+ break;
+ case COMBIOS_LCD_INFO_TABLE:
+ check_offset = RBIOS16(rdev->bios_header_start + 0x40);
+ if (check_offset)
+ offset = check_offset;
+ break;
+ case COMBIOS_MOBILE_INFO_TABLE:
+ check_offset = RBIOS16(rdev->bios_header_start + 0x42);
+ if (check_offset)
+ offset = check_offset;
+ break;
+ case COMBIOS_PLL_INIT_TABLE:
+ check_offset = RBIOS16(rdev->bios_header_start + 0x46);
+ if (check_offset)
+ offset = check_offset;
+ break;
+ case COMBIOS_MEM_CONFIG_TABLE:
+ check_offset = RBIOS16(rdev->bios_header_start + 0x48);
+ if (check_offset)
+ offset = check_offset;
+ break;
+ case COMBIOS_SAVE_MASK_TABLE:
+ check_offset = RBIOS16(rdev->bios_header_start + 0x4a);
+ if (check_offset)
+ offset = check_offset;
+ break;
+ case COMBIOS_HARDCODED_EDID_TABLE:
+ check_offset = RBIOS16(rdev->bios_header_start + 0x4c);
+ if (check_offset)
+ offset = check_offset;
+ break;
+ case COMBIOS_ASIC_INIT_2_TABLE:
+ check_offset = RBIOS16(rdev->bios_header_start + 0x4e);
+ if (check_offset)
+ offset = check_offset;
+ break;
+ case COMBIOS_CONNECTOR_INFO_TABLE:
+ check_offset = RBIOS16(rdev->bios_header_start + 0x50);
+ if (check_offset)
+ offset = check_offset;
+ break;
+ case COMBIOS_DYN_CLK_1_TABLE:
+ check_offset = RBIOS16(rdev->bios_header_start + 0x52);
+ if (check_offset)
+ offset = check_offset;
+ break;
+ case COMBIOS_RESERVED_MEM_TABLE:
+ check_offset = RBIOS16(rdev->bios_header_start + 0x54);
+ if (check_offset)
+ offset = check_offset;
+ break;
+ case COMBIOS_EXT_TMDS_INFO_TABLE:
+ check_offset = RBIOS16(rdev->bios_header_start + 0x58);
+ if (check_offset)
+ offset = check_offset;
+ break;
+ case COMBIOS_MEM_CLK_INFO_TABLE:
+ check_offset = RBIOS16(rdev->bios_header_start + 0x5a);
+ if (check_offset)
+ offset = check_offset;
+ break;
+ case COMBIOS_EXT_DAC_INFO_TABLE:
+ check_offset = RBIOS16(rdev->bios_header_start + 0x5c);
+ if (check_offset)
+ offset = check_offset;
+ break;
+ case COMBIOS_MISC_INFO_TABLE:
+ check_offset = RBIOS16(rdev->bios_header_start + 0x5e);
+ if (check_offset)
+ offset = check_offset;
+ break;
+ case COMBIOS_CRT_INFO_TABLE:
+ check_offset = RBIOS16(rdev->bios_header_start + 0x60);
+ if (check_offset)
+ offset = check_offset;
+ break;
+ case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE:
+ check_offset = RBIOS16(rdev->bios_header_start + 0x62);
+ if (check_offset)
+ offset = check_offset;
+ break;
+ case COMBIOS_COMPONENT_VIDEO_INFO_TABLE:
+ check_offset = RBIOS16(rdev->bios_header_start + 0x64);
+ if (check_offset)
+ offset = check_offset;
+ break;
+ case COMBIOS_FAN_SPEED_INFO_TABLE:
+ check_offset = RBIOS16(rdev->bios_header_start + 0x66);
+ if (check_offset)
+ offset = check_offset;
+ break;
+ case COMBIOS_OVERDRIVE_INFO_TABLE:
+ check_offset = RBIOS16(rdev->bios_header_start + 0x68);
+ if (check_offset)
+ offset = check_offset;
+ break;
+ case COMBIOS_OEM_INFO_TABLE:
+ check_offset = RBIOS16(rdev->bios_header_start + 0x6a);
+ if (check_offset)
+ offset = check_offset;
+ break;
+ case COMBIOS_DYN_CLK_2_TABLE:
+ check_offset = RBIOS16(rdev->bios_header_start + 0x6c);
+ if (check_offset)
+ offset = check_offset;
+ break;
+ case COMBIOS_POWER_CONNECTOR_INFO_TABLE:
+ check_offset = RBIOS16(rdev->bios_header_start + 0x6e);
+ if (check_offset)
+ offset = check_offset;
+ break;
+ case COMBIOS_I2C_INFO_TABLE:
+ check_offset = RBIOS16(rdev->bios_header_start + 0x70);
+ if (check_offset)
+ offset = check_offset;
+ break;
+ /* relative offset tables */
+ case COMBIOS_ASIC_INIT_3_TABLE: /* offset from misc info */
+ check_offset =
+ combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE);
+ if (check_offset) {
+ rev = RBIOS8(check_offset);
+ if (rev > 0) {
+ check_offset = RBIOS16(check_offset + 0x3);
+ if (check_offset)
+ offset = check_offset;
+ }
+ }
+ break;
+ case COMBIOS_ASIC_INIT_4_TABLE: /* offset from misc info */
+ check_offset =
+ combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE);
+ if (check_offset) {
+ rev = RBIOS8(check_offset);
+ if (rev > 0) {
+ check_offset = RBIOS16(check_offset + 0x5);
+ if (check_offset)
+ offset = check_offset;
+ }
+ }
+ break;
+ case COMBIOS_DETECTED_MEM_TABLE: /* offset from misc info */
+ check_offset =
+ combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE);
+ if (check_offset) {
+ rev = RBIOS8(check_offset);
+ if (rev > 0) {
+ check_offset = RBIOS16(check_offset + 0x7);
+ if (check_offset)
+ offset = check_offset;
+ }
+ }
+ break;
+ case COMBIOS_ASIC_INIT_5_TABLE: /* offset from misc info */
+ check_offset =
+ combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE);
+ if (check_offset) {
+ rev = RBIOS8(check_offset);
+ if (rev == 2) {
+ check_offset = RBIOS16(check_offset + 0x9);
+ if (check_offset)
+ offset = check_offset;
+ }
+ }
+ break;
+ case COMBIOS_RAM_RESET_TABLE: /* offset from mem config */
+ check_offset =
+ combios_get_table_offset(dev, COMBIOS_MEM_CONFIG_TABLE);
+ if (check_offset) {
+ while (RBIOS8(check_offset++));
+ check_offset += 2;
+ if (check_offset)
+ offset = check_offset;
+ }
+ break;
+ case COMBIOS_POWERPLAY_INFO_TABLE: /* offset from mobile info */
+ check_offset =
+ combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE);
+ if (check_offset) {
+ check_offset = RBIOS16(check_offset + 0x11);
+ if (check_offset)
+ offset = check_offset;
+ }
+ break;
+ case COMBIOS_GPIO_INFO_TABLE: /* offset from mobile info */
+ check_offset =
+ combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE);
+ if (check_offset) {
+ check_offset = RBIOS16(check_offset + 0x13);
+ if (check_offset)
+ offset = check_offset;
+ }
+ break;
+ case COMBIOS_LCD_DDC_INFO_TABLE: /* offset from mobile info */
+ check_offset =
+ combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE);
+ if (check_offset) {
+ check_offset = RBIOS16(check_offset + 0x15);
+ if (check_offset)
+ offset = check_offset;
+ }
+ break;
+ case COMBIOS_TMDS_POWER_TABLE: /* offset from mobile info */
+ check_offset =
+ combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE);
+ if (check_offset) {
+ check_offset = RBIOS16(check_offset + 0x17);
+ if (check_offset)
+ offset = check_offset;
+ }
+ break;
+ case COMBIOS_TMDS_POWER_ON_TABLE: /* offset from tmds power */
+ check_offset =
+ combios_get_table_offset(dev, COMBIOS_TMDS_POWER_TABLE);
+ if (check_offset) {
+ check_offset = RBIOS16(check_offset + 0x2);
+ if (check_offset)
+ offset = check_offset;
+ }
+ break;
+ case COMBIOS_TMDS_POWER_OFF_TABLE: /* offset from tmds power */
+ check_offset =
+ combios_get_table_offset(dev, COMBIOS_TMDS_POWER_TABLE);
+ if (check_offset) {
+ check_offset = RBIOS16(check_offset + 0x4);
+ if (check_offset)
+ offset = check_offset;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return offset;
+
+}
+
+struct radeon_i2c_bus_rec combios_setup_i2c_bus(int ddc_line)
+{
+ struct radeon_i2c_bus_rec i2c;
+
+ i2c.mask_clk_mask = RADEON_GPIO_EN_1;
+ i2c.mask_data_mask = RADEON_GPIO_EN_0;
+ i2c.a_clk_mask = RADEON_GPIO_A_1;
+ i2c.a_data_mask = RADEON_GPIO_A_0;
+ i2c.put_clk_mask = RADEON_GPIO_EN_1;
+ i2c.put_data_mask = RADEON_GPIO_EN_0;
+ i2c.get_clk_mask = RADEON_GPIO_Y_1;
+ i2c.get_data_mask = RADEON_GPIO_Y_0;
+ if ((ddc_line == RADEON_LCD_GPIO_MASK) ||
+ (ddc_line == RADEON_MDGPIO_EN_REG)) {
+ i2c.mask_clk_reg = ddc_line;
+ i2c.mask_data_reg = ddc_line;
+ i2c.a_clk_reg = ddc_line;
+ i2c.a_data_reg = ddc_line;
+ i2c.put_clk_reg = ddc_line;
+ i2c.put_data_reg = ddc_line;
+ i2c.get_clk_reg = ddc_line + 4;
+ i2c.get_data_reg = ddc_line + 4;
+ } else {
+ i2c.mask_clk_reg = ddc_line;
+ i2c.mask_data_reg = ddc_line;
+ i2c.a_clk_reg = ddc_line;
+ i2c.a_data_reg = ddc_line;
+ i2c.put_clk_reg = ddc_line;
+ i2c.put_data_reg = ddc_line;
+ i2c.get_clk_reg = ddc_line;
+ i2c.get_data_reg = ddc_line;
+ }
+
+ if (ddc_line)
+ i2c.valid = true;
+ else
+ i2c.valid = false;
+
+ return i2c;
+}
+
+bool radeon_combios_get_clock_info(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ uint16_t pll_info;
+ struct radeon_pll *p1pll = &rdev->clock.p1pll;
+ struct radeon_pll *p2pll = &rdev->clock.p2pll;
+ struct radeon_pll *spll = &rdev->clock.spll;
+ struct radeon_pll *mpll = &rdev->clock.mpll;
+ int8_t rev;
+ uint16_t sclk, mclk;
+
+ if (rdev->bios == NULL)
+ return NULL;
+
+ pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE);
+ if (pll_info) {
+ rev = RBIOS8(pll_info);
+
+ /* pixel clocks */
+ p1pll->reference_freq = RBIOS16(pll_info + 0xe);
+ p1pll->reference_div = RBIOS16(pll_info + 0x10);
+ p1pll->pll_out_min = RBIOS32(pll_info + 0x12);
+ p1pll->pll_out_max = RBIOS32(pll_info + 0x16);
+
+ if (rev > 9) {
+ p1pll->pll_in_min = RBIOS32(pll_info + 0x36);
+ p1pll->pll_in_max = RBIOS32(pll_info + 0x3a);
+ } else {
+ p1pll->pll_in_min = 40;
+ p1pll->pll_in_max = 500;
+ }
+ *p2pll = *p1pll;
+
+ /* system clock */
+ spll->reference_freq = RBIOS16(pll_info + 0x1a);
+ spll->reference_div = RBIOS16(pll_info + 0x1c);
+ spll->pll_out_min = RBIOS32(pll_info + 0x1e);
+ spll->pll_out_max = RBIOS32(pll_info + 0x22);
+
+ if (rev > 10) {
+ spll->pll_in_min = RBIOS32(pll_info + 0x48);
+ spll->pll_in_max = RBIOS32(pll_info + 0x4c);
+ } else {
+ /* ??? */
+ spll->pll_in_min = 40;
+ spll->pll_in_max = 500;
+ }
+
+ /* memory clock */
+ mpll->reference_freq = RBIOS16(pll_info + 0x26);
+ mpll->reference_div = RBIOS16(pll_info + 0x28);
+ mpll->pll_out_min = RBIOS32(pll_info + 0x2a);
+ mpll->pll_out_max = RBIOS32(pll_info + 0x2e);
+
+ if (rev > 10) {
+ mpll->pll_in_min = RBIOS32(pll_info + 0x5a);
+ mpll->pll_in_max = RBIOS32(pll_info + 0x5e);
+ } else {
+ /* ??? */
+ mpll->pll_in_min = 40;
+ mpll->pll_in_max = 500;
+ }
+
+ /* default sclk/mclk */
+ sclk = RBIOS16(pll_info + 0xa);
+ mclk = RBIOS16(pll_info + 0x8);
+ if (sclk == 0)
+ sclk = 200 * 100;
+ if (mclk == 0)
+ mclk = 200 * 100;
+
+ rdev->clock.default_sclk = sclk;
+ rdev->clock.default_mclk = mclk;
+
+ return true;
+ }
+ return false;
+}
+
+struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
+ radeon_encoder
+ *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint16_t dac_info;
+ uint8_t rev, bg, dac;
+ struct radeon_encoder_primary_dac *p_dac = NULL;
+
+ if (rdev->bios == NULL)
+ return NULL;
+
+ /* check CRT table */
+ dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
+ if (dac_info) {
+ p_dac =
+ kzalloc(sizeof(struct radeon_encoder_primary_dac),
+ GFP_KERNEL);
+
+ if (!p_dac)
+ return NULL;
+
+ rev = RBIOS8(dac_info) & 0x3;
+ if (rev < 2) {
+ bg = RBIOS8(dac_info + 0x2) & 0xf;
+ dac = (RBIOS8(dac_info + 0x2) >> 4) & 0xf;
+ p_dac->ps2_pdac_adj = (bg << 8) | (dac);
+ } else {
+ bg = RBIOS8(dac_info + 0x2) & 0xf;
+ dac = RBIOS8(dac_info + 0x3) & 0xf;
+ p_dac->ps2_pdac_adj = (bg << 8) | (dac);
+ }
+
+ }
+
+ return p_dac;
+}
+
+static enum radeon_tv_std
+radeon_combios_get_tv_info(struct radeon_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint16_t tv_info;
+ enum radeon_tv_std tv_std = TV_STD_NTSC;
+
+ tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
+ if (tv_info) {
+ if (RBIOS8(tv_info + 6) == 'T') {
+ switch (RBIOS8(tv_info + 7) & 0xf) {
+ case 1:
+ tv_std = TV_STD_NTSC;
+ DRM_INFO("Default TV standard: NTSC\n");
+ break;
+ case 2:
+ tv_std = TV_STD_PAL;
+ DRM_INFO("Default TV standard: PAL\n");
+ break;
+ case 3:
+ tv_std = TV_STD_PAL_M;
+ DRM_INFO("Default TV standard: PAL-M\n");
+ break;
+ case 4:
+ tv_std = TV_STD_PAL_60;
+ DRM_INFO("Default TV standard: PAL-60\n");
+ break;
+ case 5:
+ tv_std = TV_STD_NTSC_J;
+ DRM_INFO("Default TV standard: NTSC-J\n");
+ break;
+ case 6:
+ tv_std = TV_STD_SCART_PAL;
+ DRM_INFO("Default TV standard: SCART-PAL\n");
+ break;
+ default:
+ tv_std = TV_STD_NTSC;
+ DRM_INFO
+ ("Unknown TV standard; defaulting to NTSC\n");
+ break;
+ }
+
+ switch ((RBIOS8(tv_info + 9) >> 2) & 0x3) {
+ case 0:
+ DRM_INFO("29.498928713 MHz TV ref clk\n");
+ break;
+ case 1:
+ DRM_INFO("28.636360000 MHz TV ref clk\n");
+ break;
+ case 2:
+ DRM_INFO("14.318180000 MHz TV ref clk\n");
+ break;
+ case 3:
+ DRM_INFO("27.000000000 MHz TV ref clk\n");
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ return tv_std;
+}
+
+static const uint32_t default_tvdac_adj[CHIP_LAST] = {
+ 0x00000000, /* r100 */
+ 0x00280000, /* rv100 */
+ 0x00000000, /* rs100 */
+ 0x00880000, /* rv200 */
+ 0x00000000, /* rs200 */
+ 0x00000000, /* r200 */
+ 0x00770000, /* rv250 */
+ 0x00290000, /* rs300 */
+ 0x00560000, /* rv280 */
+ 0x00780000, /* r300 */
+ 0x00770000, /* r350 */
+ 0x00780000, /* rv350 */
+ 0x00780000, /* rv380 */
+ 0x01080000, /* r420 */
+ 0x01080000, /* r423 */
+ 0x01080000, /* rv410 */
+ 0x00780000, /* rs400 */
+ 0x00780000, /* rs480 */
+};
+
+static struct radeon_encoder_tv_dac
+ *radeon_legacy_get_tv_dac_info_from_table(struct radeon_device *rdev)
+{
+ struct radeon_encoder_tv_dac *tv_dac = NULL;
+
+ tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL);
+
+ if (!tv_dac)
+ return NULL;
+
+ tv_dac->ps2_tvdac_adj = default_tvdac_adj[rdev->family];
+ if ((rdev->flags & RADEON_IS_MOBILITY) && (rdev->family == CHIP_RV250))
+ tv_dac->ps2_tvdac_adj = 0x00880000;
+ tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
+ tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
+
+ return tv_dac;
+}
+
+struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
+ radeon_encoder
+ *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint16_t dac_info;
+ uint8_t rev, bg, dac;
+ struct radeon_encoder_tv_dac *tv_dac = NULL;
+
+ if (rdev->bios == NULL)
+ return radeon_legacy_get_tv_dac_info_from_table(rdev);
+
+ /* first check TV table */
+ dac_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
+ if (dac_info) {
+ tv_dac =
+ kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL);
+
+ if (!tv_dac)
+ return NULL;
+
+ rev = RBIOS8(dac_info + 0x3);
+ if (rev > 4) {
+ bg = RBIOS8(dac_info + 0xc) & 0xf;
+ dac = RBIOS8(dac_info + 0xd) & 0xf;
+ tv_dac->ps2_tvdac_adj = (bg << 16) | (dac << 20);
+
+ bg = RBIOS8(dac_info + 0xe) & 0xf;
+ dac = RBIOS8(dac_info + 0xf) & 0xf;
+ tv_dac->pal_tvdac_adj = (bg << 16) | (dac << 20);
+
+ bg = RBIOS8(dac_info + 0x10) & 0xf;
+ dac = RBIOS8(dac_info + 0x11) & 0xf;
+ tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
+ } else if (rev > 1) {
+ bg = RBIOS8(dac_info + 0xc) & 0xf;
+ dac = (RBIOS8(dac_info + 0xc) >> 4) & 0xf;
+ tv_dac->ps2_tvdac_adj = (bg << 16) | (dac << 20);
+
+ bg = RBIOS8(dac_info + 0xd) & 0xf;
+ dac = (RBIOS8(dac_info + 0xd) >> 4) & 0xf;
+ tv_dac->pal_tvdac_adj = (bg << 16) | (dac << 20);
+
+ bg = RBIOS8(dac_info + 0xe) & 0xf;
+ dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf;
+ tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
+ }
+
+ tv_dac->tv_std = radeon_combios_get_tv_info(encoder);
+
+ } else {
+ /* then check CRT table */
+ dac_info =
+ combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
+ if (dac_info) {
+ tv_dac =
+ kzalloc(sizeof(struct radeon_encoder_tv_dac),
+ GFP_KERNEL);
+
+ if (!tv_dac)
+ return NULL;
+
+ rev = RBIOS8(dac_info) & 0x3;
+ if (rev < 2) {
+ bg = RBIOS8(dac_info + 0x3) & 0xf;
+ dac = (RBIOS8(dac_info + 0x3) >> 4) & 0xf;
+ tv_dac->ps2_tvdac_adj =
+ (bg << 16) | (dac << 20);
+ tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
+ tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
+ } else {
+ bg = RBIOS8(dac_info + 0x4) & 0xf;
+ dac = RBIOS8(dac_info + 0x5) & 0xf;
+ tv_dac->ps2_tvdac_adj =
+ (bg << 16) | (dac << 20);
+ tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
+ tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
+ }
+ } else {
+ DRM_INFO("No TV DAC info found in BIOS\n");
+ return radeon_legacy_get_tv_dac_info_from_table(rdev);
+ }
+ }
+
+ return tv_dac;
+}
+
+static struct radeon_encoder_lvds *radeon_legacy_get_lvds_info_from_regs(struct
+ radeon_device
+ *rdev)
+{
+ struct radeon_encoder_lvds *lvds = NULL;
+ uint32_t fp_vert_stretch, fp_horz_stretch;
+ uint32_t ppll_div_sel, ppll_val;
+
+ lvds = kzalloc(sizeof(struct radeon_encoder_lvds), GFP_KERNEL);
+
+ if (!lvds)
+ return NULL;
+
+ fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH);
+ fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH);
+
+ if (fp_vert_stretch & RADEON_VERT_STRETCH_ENABLE)
+ lvds->native_mode.panel_yres =
+ ((fp_vert_stretch & RADEON_VERT_PANEL_SIZE) >>
+ RADEON_VERT_PANEL_SHIFT) + 1;
+ else
+ lvds->native_mode.panel_yres =
+ (RREG32(RADEON_CRTC_V_TOTAL_DISP) >> 16) + 1;
+
+ if (fp_horz_stretch & RADEON_HORZ_STRETCH_ENABLE)
+ lvds->native_mode.panel_xres =
+ (((fp_horz_stretch & RADEON_HORZ_PANEL_SIZE) >>
+ RADEON_HORZ_PANEL_SHIFT) + 1) * 8;
+ else
+ lvds->native_mode.panel_xres =
+ ((RREG32(RADEON_CRTC_H_TOTAL_DISP) >> 16) + 1) * 8;
+
+ if ((lvds->native_mode.panel_xres < 640) ||
+ (lvds->native_mode.panel_yres < 480)) {
+ lvds->native_mode.panel_xres = 640;
+ lvds->native_mode.panel_yres = 480;
+ }
+
+ ppll_div_sel = RREG8(RADEON_CLOCK_CNTL_INDEX + 1) & 0x3;
+ ppll_val = RREG32_PLL(RADEON_PPLL_DIV_0 + ppll_div_sel);
+ if ((ppll_val & 0x000707ff) == 0x1bb)
+ lvds->use_bios_dividers = false;
+ else {
+ lvds->panel_ref_divider =
+ RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff;
+ lvds->panel_post_divider = (ppll_val >> 16) & 0x7;
+ lvds->panel_fb_divider = ppll_val & 0x7ff;
+
+ if ((lvds->panel_ref_divider != 0) &&
+ (lvds->panel_fb_divider > 3))
+ lvds->use_bios_dividers = true;
+ }
+ lvds->panel_vcc_delay = 200;
+
+ DRM_INFO("Panel info derived from registers\n");
+ DRM_INFO("Panel Size %dx%d\n", lvds->native_mode.panel_xres,
+ lvds->native_mode.panel_yres);
+
+ return lvds;
+}
+
+struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
+ *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint16_t lcd_info;
+ uint32_t panel_setup;
+ char stmp[30];
+ int tmp, i;
+ struct radeon_encoder_lvds *lvds = NULL;
+
+ if (rdev->bios == NULL)
+ return radeon_legacy_get_lvds_info_from_regs(rdev);
+
+ lcd_info = combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE);
+
+ if (lcd_info) {
+ lvds = kzalloc(sizeof(struct radeon_encoder_lvds), GFP_KERNEL);
+
+ if (!lvds)
+ return NULL;
+
+ for (i = 0; i < 24; i++)
+ stmp[i] = RBIOS8(lcd_info + i + 1);
+ stmp[24] = 0;
+
+ DRM_INFO("Panel ID String: %s\n", stmp);
+
+ lvds->native_mode.panel_xres = RBIOS16(lcd_info + 0x19);
+ lvds->native_mode.panel_yres = RBIOS16(lcd_info + 0x1b);
+
+ DRM_INFO("Panel Size %dx%d\n", lvds->native_mode.panel_xres,
+ lvds->native_mode.panel_yres);
+
+ lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c);
+ if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0)
+ lvds->panel_vcc_delay = 2000;
+
+ lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24);
+ lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf;
+ lvds->panel_blon_delay = (RBIOS16(lcd_info + 0x38) >> 4) & 0xf;
+
+ lvds->panel_ref_divider = RBIOS16(lcd_info + 0x2e);
+ lvds->panel_post_divider = RBIOS8(lcd_info + 0x30);
+ lvds->panel_fb_divider = RBIOS16(lcd_info + 0x31);
+ if ((lvds->panel_ref_divider != 0) &&
+ (lvds->panel_fb_divider > 3))
+ lvds->use_bios_dividers = true;
+
+ panel_setup = RBIOS32(lcd_info + 0x39);
+ lvds->lvds_gen_cntl = 0xff00;
+ if (panel_setup & 0x1)
+ lvds->lvds_gen_cntl |= RADEON_LVDS_PANEL_FORMAT;
+
+ if ((panel_setup >> 4) & 0x1)
+ lvds->lvds_gen_cntl |= RADEON_LVDS_PANEL_TYPE;
+
+ switch ((panel_setup >> 8) & 0x7) {
+ case 0:
+ lvds->lvds_gen_cntl |= RADEON_LVDS_NO_FM;
+ break;
+ case 1:
+ lvds->lvds_gen_cntl |= RADEON_LVDS_2_GREY;
+ break;
+ case 2:
+ lvds->lvds_gen_cntl |= RADEON_LVDS_4_GREY;
+ break;
+ default:
+ break;
+ }
+
+ if ((panel_setup >> 16) & 0x1)
+ lvds->lvds_gen_cntl |= RADEON_LVDS_FP_POL_LOW;
+
+ if ((panel_setup >> 17) & 0x1)
+ lvds->lvds_gen_cntl |= RADEON_LVDS_LP_POL_LOW;
+
+ if ((panel_setup >> 18) & 0x1)
+ lvds->lvds_gen_cntl |= RADEON_LVDS_DTM_POL_LOW;
+
+ if ((panel_setup >> 23) & 0x1)
+ lvds->lvds_gen_cntl |= RADEON_LVDS_BL_CLK_SEL;
+
+ lvds->lvds_gen_cntl |= (panel_setup & 0xf0000000);
+
+ for (i = 0; i < 32; i++) {
+ tmp = RBIOS16(lcd_info + 64 + i * 2);
+ if (tmp == 0)
+ break;
+
+ if ((RBIOS16(tmp) == lvds->native_mode.panel_xres) &&
+ (RBIOS16(tmp + 2) ==
+ lvds->native_mode.panel_yres)) {
+ lvds->native_mode.hblank =
+ (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
+ lvds->native_mode.hoverplus =
+ (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) -
+ 1) * 8;
+ lvds->native_mode.hsync_width =
+ RBIOS8(tmp + 23) * 8;
+
+ lvds->native_mode.vblank = (RBIOS16(tmp + 24) -
+ RBIOS16(tmp + 26));
+ lvds->native_mode.voverplus =
+ ((RBIOS16(tmp + 28) & 0x7ff) -
+ RBIOS16(tmp + 26));
+ lvds->native_mode.vsync_width =
+ ((RBIOS16(tmp + 28) & 0xf800) >> 11);
+ lvds->native_mode.dotclock =
+ RBIOS16(tmp + 9) * 10;
+ lvds->native_mode.flags = 0;
+ }
+ }
+ encoder->native_mode = lvds->native_mode;
+ } else {
+ DRM_INFO("No panel info found in BIOS\n");
+ return radeon_legacy_get_lvds_info_from_regs(rdev);
+ }
+ return lvds;
+}
+
+static const struct radeon_tmds_pll default_tmds_pll[CHIP_LAST][4] = {
+ {{12000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}}, /* CHIP_R100 */
+ {{12000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}}, /* CHIP_RV100 */
+ {{0, 0}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_RS100 */
+ {{15000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}}, /* CHIP_RV200 */
+ {{12000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}}, /* CHIP_RS200 */
+ {{15000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}}, /* CHIP_R200 */
+ {{15500, 0x81b}, {0xffffffff, 0x83f}, {0, 0}, {0, 0}}, /* CHIP_RV250 */
+ {{0, 0}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_RS300 */
+ {{13000, 0x400f4}, {15000, 0x400f7}, {0xffffffff, 0x40111}, {0, 0}}, /* CHIP_RV280 */
+ {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R300 */
+ {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R350 */
+ {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RV350 */
+ {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RV380 */
+ {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R420 */
+ {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R423 */
+ {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_RV410 */
+ {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RS400 */
+ {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RS480 */
+};
+
+static struct radeon_encoder_int_tmds
+ *radeon_legacy_get_tmds_info_from_table(struct radeon_device *rdev)
+{
+ int i;
+ struct radeon_encoder_int_tmds *tmds = NULL;
+
+ tmds = kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL);
+
+ if (!tmds)
+ return NULL;
+
+ for (i = 0; i < 4; i++) {
+ tmds->tmds_pll[i].value =
+ default_tmds_pll[rdev->family][i].value;
+ tmds->tmds_pll[i].freq = default_tmds_pll[rdev->family][i].freq;
+ }
+
+ return tmds;
+}
+
+struct radeon_encoder_int_tmds *radeon_combios_get_tmds_info(struct
+ radeon_encoder
+ *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint16_t tmds_info;
+ int i, n;
+ uint8_t ver;
+ struct radeon_encoder_int_tmds *tmds = NULL;
+
+ if (rdev->bios == NULL)
+ return radeon_legacy_get_tmds_info_from_table(rdev);
+
+ tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE);
+
+ if (tmds_info) {
+ tmds =
+ kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL);
+
+ if (!tmds)
+ return NULL;
+
+ ver = RBIOS8(tmds_info);
+ DRM_INFO("DFP table revision: %d\n", ver);
+ if (ver == 3) {
+ n = RBIOS8(tmds_info + 5) + 1;
+ if (n > 4)
+ n = 4;
+ for (i = 0; i < n; i++) {
+ tmds->tmds_pll[i].value =
+ RBIOS32(tmds_info + i * 10 + 0x08);
+ tmds->tmds_pll[i].freq =
+ RBIOS16(tmds_info + i * 10 + 0x10);
+ DRM_DEBUG("TMDS PLL From COMBIOS %u %x\n",
+ tmds->tmds_pll[i].freq,
+ tmds->tmds_pll[i].value);
+ }
+ } else if (ver == 4) {
+ int stride = 0;
+ n = RBIOS8(tmds_info + 5) + 1;
+ if (n > 4)
+ n = 4;
+ for (i = 0; i < n; i++) {
+ tmds->tmds_pll[i].value =
+ RBIOS32(tmds_info + stride + 0x08);
+ tmds->tmds_pll[i].freq =
+ RBIOS16(tmds_info + stride + 0x10);
+ if (i == 0)
+ stride += 10;
+ else
+ stride += 6;
+ DRM_DEBUG("TMDS PLL From COMBIOS %u %x\n",
+ tmds->tmds_pll[i].freq,
+ tmds->tmds_pll[i].value);
+ }
+ }
+ } else
+ DRM_INFO("No TMDS info found in BIOS\n");
+ return tmds;
+}
+
+void radeon_combios_get_ext_tmds_info(struct radeon_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint16_t ext_tmds_info;
+ uint8_t ver;
+
+ if (rdev->bios == NULL)
+ return;
+
+ ext_tmds_info =
+ combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
+ if (ext_tmds_info) {
+ ver = RBIOS8(ext_tmds_info);
+ DRM_INFO("External TMDS Table revision: %d\n", ver);
+ // TODO
+ }
+}
+
+bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_i2c_bus_rec ddc_i2c;
+
+ rdev->mode_info.connector_table = radeon_connector_table;
+ if (rdev->mode_info.connector_table == CT_NONE) {
+#ifdef CONFIG_PPC_PMAC
+ if (machine_is_compatible("PowerBook3,3")) {
+ /* powerbook with VGA */
+ rdev->mode_info.connector_table = CT_POWERBOOK_VGA;
+ } else if (machine_is_compatible("PowerBook3,4") ||
+ machine_is_compatible("PowerBook3,5")) {
+ /* powerbook with internal tmds */
+ rdev->mode_info.connector_table = CT_POWERBOOK_INTERNAL;
+ } else if (machine_is_compatible("PowerBook5,1") ||
+ machine_is_compatible("PowerBook5,2") ||
+ machine_is_compatible("PowerBook5,3") ||
+ machine_is_compatible("PowerBook5,4") ||
+ machine_is_compatible("PowerBook5,5")) {
+ /* powerbook with external single link tmds (sil164) */
+ rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
+ } else if (machine_is_compatible("PowerBook5,6")) {
+ /* powerbook with external dual or single link tmds */
+ rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
+ } else if (machine_is_compatible("PowerBook5,7") ||
+ machine_is_compatible("PowerBook5,8") ||
+ machine_is_compatible("PowerBook5,9")) {
+ /* PowerBook6,2 ? */
+ /* powerbook with external dual link tmds (sil1178?) */
+ rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
+ } else if (machine_is_compatible("PowerBook4,1") ||
+ machine_is_compatible("PowerBook4,2") ||
+ machine_is_compatible("PowerBook4,3") ||
+ machine_is_compatible("PowerBook6,3") ||
+ machine_is_compatible("PowerBook6,5") ||
+ machine_is_compatible("PowerBook6,7")) {
+ /* ibook */
+ rdev->mode_info.connector_table = CT_IBOOK;
+ } else if (machine_is_compatible("PowerMac4,4")) {
+ /* emac */
+ rdev->mode_info.connector_table = CT_EMAC;
+ } else if (machine_is_compatible("PowerMac10,1")) {
+ /* mini with internal tmds */
+ rdev->mode_info.connector_table = CT_MINI_INTERNAL;
+ } else if (machine_is_compatible("PowerMac10,2")) {
+ /* mini with external tmds */
+ rdev->mode_info.connector_table = CT_MINI_EXTERNAL;
+ } else if (machine_is_compatible("PowerMac12,1")) {
+ /* PowerMac8,1 ? */
+ /* imac g5 isight */
+ rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT;
+ } else
+#endif /* CONFIG_PPC_PMAC */
+ rdev->mode_info.connector_table = CT_GENERIC;
+ }
+
+ switch (rdev->mode_info.connector_table) {
+ case CT_GENERIC:
+ DRM_INFO("Connector Table: %d (generic)\n",
+ rdev->mode_info.connector_table);
+ /* these are the most common settings */
+ if (rdev->flags & RADEON_SINGLE_CRTC) {
+ /* VGA - primary dac */
+ ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ radeon_add_legacy_connector(dev, 0,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_VGA,
+ &ddc_i2c);
+ } else if (rdev->flags & RADEON_IS_MOBILITY) {
+ /* LVDS */
+ ddc_i2c = combios_setup_i2c_bus(RADEON_LCD_GPIO_MASK);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_LCD1_SUPPORT,
+ 0),
+ ATOM_DEVICE_LCD1_SUPPORT);
+ radeon_add_legacy_connector(dev, 0,
+ ATOM_DEVICE_LCD1_SUPPORT,
+ DRM_MODE_CONNECTOR_LVDS,
+ &ddc_i2c);
+
+ /* VGA - primary dac */
+ ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ radeon_add_legacy_connector(dev, 1,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_VGA,
+ &ddc_i2c);
+ } else {
+ /* DVI-I - tv dac, int tmds */
+ ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_DFP1_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP1_SUPPORT);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_CRT2_SUPPORT,
+ 2),
+ ATOM_DEVICE_CRT2_SUPPORT);
+ radeon_add_legacy_connector(dev, 0,
+ ATOM_DEVICE_DFP1_SUPPORT |
+ ATOM_DEVICE_CRT2_SUPPORT,
+ DRM_MODE_CONNECTOR_DVII,
+ &ddc_i2c);
+
+ /* VGA - primary dac */
+ ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ radeon_add_legacy_connector(dev, 1,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_VGA,
+ &ddc_i2c);
+ }
+
+ if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) {
+ /* TV - tv dac */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, 2,
+ ATOM_DEVICE_TV1_SUPPORT,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ &ddc_i2c);
+ }
+ break;
+ case CT_IBOOK:
+ DRM_INFO("Connector Table: %d (ibook)\n",
+ rdev->mode_info.connector_table);
+ /* LVDS */
+ ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_LCD1_SUPPORT,
+ 0),
+ ATOM_DEVICE_LCD1_SUPPORT);
+ radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+ DRM_MODE_CONNECTOR_LVDS, &ddc_i2c);
+ /* VGA - TV DAC */
+ ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_CRT2_SUPPORT,
+ 2),
+ ATOM_DEVICE_CRT2_SUPPORT);
+ radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
+ DRM_MODE_CONNECTOR_VGA, &ddc_i2c);
+ /* TV - TV DAC */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ &ddc_i2c);
+ break;
+ case CT_POWERBOOK_EXTERNAL:
+ DRM_INFO("Connector Table: %d (powerbook external tmds)\n",
+ rdev->mode_info.connector_table);
+ /* LVDS */
+ ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_LCD1_SUPPORT,
+ 0),
+ ATOM_DEVICE_LCD1_SUPPORT);
+ radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+ DRM_MODE_CONNECTOR_LVDS, &ddc_i2c);
+ /* DVI-I - primary dac, ext tmds */
+ ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_DFP2_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP2_SUPPORT);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ radeon_add_legacy_connector(dev, 1,
+ ATOM_DEVICE_DFP2_SUPPORT |
+ ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c);
+ /* TV - TV DAC */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ &ddc_i2c);
+ break;
+ case CT_POWERBOOK_INTERNAL:
+ DRM_INFO("Connector Table: %d (powerbook internal tmds)\n",
+ rdev->mode_info.connector_table);
+ /* LVDS */
+ ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_LCD1_SUPPORT,
+ 0),
+ ATOM_DEVICE_LCD1_SUPPORT);
+ radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+ DRM_MODE_CONNECTOR_LVDS, &ddc_i2c);
+ /* DVI-I - primary dac, int tmds */
+ ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_DFP1_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP1_SUPPORT);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ radeon_add_legacy_connector(dev, 1,
+ ATOM_DEVICE_DFP1_SUPPORT |
+ ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c);
+ /* TV - TV DAC */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ &ddc_i2c);
+ break;
+ case CT_POWERBOOK_VGA:
+ DRM_INFO("Connector Table: %d (powerbook vga)\n",
+ rdev->mode_info.connector_table);
+ /* LVDS */
+ ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_LCD1_SUPPORT,
+ 0),
+ ATOM_DEVICE_LCD1_SUPPORT);
+ radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+ DRM_MODE_CONNECTOR_LVDS, &ddc_i2c);
+ /* VGA - primary dac */
+ ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_VGA, &ddc_i2c);
+ /* TV - TV DAC */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ &ddc_i2c);
+ break;
+ case CT_MINI_EXTERNAL:
+ DRM_INFO("Connector Table: %d (mini external tmds)\n",
+ rdev->mode_info.connector_table);
+ /* DVI-I - tv dac, ext tmds */
+ ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_DFP2_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP2_SUPPORT);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_CRT2_SUPPORT,
+ 2),
+ ATOM_DEVICE_CRT2_SUPPORT);
+ radeon_add_legacy_connector(dev, 0,
+ ATOM_DEVICE_DFP2_SUPPORT |
+ ATOM_DEVICE_CRT2_SUPPORT,
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c);
+ /* TV - TV DAC */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ &ddc_i2c);
+ break;
+ case CT_MINI_INTERNAL:
+ DRM_INFO("Connector Table: %d (mini internal tmds)\n",
+ rdev->mode_info.connector_table);
+ /* DVI-I - tv dac, int tmds */
+ ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_DFP1_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP1_SUPPORT);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_CRT2_SUPPORT,
+ 2),
+ ATOM_DEVICE_CRT2_SUPPORT);
+ radeon_add_legacy_connector(dev, 0,
+ ATOM_DEVICE_DFP1_SUPPORT |
+ ATOM_DEVICE_CRT2_SUPPORT,
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c);
+ /* TV - TV DAC */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ &ddc_i2c);
+ break;
+ case CT_IMAC_G5_ISIGHT:
+ DRM_INFO("Connector Table: %d (imac g5 isight)\n",
+ rdev->mode_info.connector_table);
+ /* DVI-D - int tmds */
+ ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_MONID);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_DFP1_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP1_SUPPORT);
+ radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_DFP1_SUPPORT,
+ DRM_MODE_CONNECTOR_DVID, &ddc_i2c);
+ /* VGA - tv dac */
+ ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_CRT2_SUPPORT,
+ 2),
+ ATOM_DEVICE_CRT2_SUPPORT);
+ radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
+ DRM_MODE_CONNECTOR_VGA, &ddc_i2c);
+ /* TV - TV DAC */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ &ddc_i2c);
+ break;
+ case CT_EMAC:
+ DRM_INFO("Connector Table: %d (emac)\n",
+ rdev->mode_info.connector_table);
+ /* VGA - primary dac */
+ ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_VGA, &ddc_i2c);
+ /* VGA - tv dac */
+ ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_CRT2_SUPPORT,
+ 2),
+ ATOM_DEVICE_CRT2_SUPPORT);
+ radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
+ DRM_MODE_CONNECTOR_VGA, &ddc_i2c);
+ /* TV - TV DAC */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ &ddc_i2c);
+ break;
+ default:
+ DRM_INFO("Connector table: %d (invalid)\n",
+ rdev->mode_info.connector_table);
+ return false;
+ }
+
+ radeon_link_encoder_connector(dev);
+
+ return true;
+}
+
+static bool radeon_apply_legacy_quirks(struct drm_device *dev,
+ int bios_index,
+ enum radeon_combios_connector
+ *legacy_connector,
+ struct radeon_i2c_bus_rec *ddc_i2c)
+{
+ struct radeon_device *rdev = dev->dev_private;
+
+ /* XPRESS DDC quirks */
+ if ((rdev->family == CHIP_RS400 ||
+ rdev->family == CHIP_RS480) &&
+ ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
+ *ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_MONID);
+ else if ((rdev->family == CHIP_RS400 ||
+ rdev->family == CHIP_RS480) &&
+ ddc_i2c->mask_clk_reg == RADEON_GPIO_MONID) {
+ ddc_i2c->valid = true;
+ ddc_i2c->mask_clk_mask = (0x20 << 8);
+ ddc_i2c->mask_data_mask = 0x80;
+ ddc_i2c->a_clk_mask = (0x20 << 8);
+ ddc_i2c->a_data_mask = 0x80;
+ ddc_i2c->put_clk_mask = (0x20 << 8);
+ ddc_i2c->put_data_mask = 0x80;
+ ddc_i2c->get_clk_mask = (0x20 << 8);
+ ddc_i2c->get_data_mask = 0x80;
+ ddc_i2c->mask_clk_reg = RADEON_GPIOPAD_MASK;
+ ddc_i2c->mask_data_reg = RADEON_GPIOPAD_MASK;
+ ddc_i2c->a_clk_reg = RADEON_GPIOPAD_A;
+ ddc_i2c->a_data_reg = RADEON_GPIOPAD_A;
+ ddc_i2c->put_clk_reg = RADEON_GPIOPAD_EN;
+ ddc_i2c->put_data_reg = RADEON_GPIOPAD_EN;
+ ddc_i2c->get_clk_reg = RADEON_LCD_GPIO_Y_REG;
+ ddc_i2c->get_data_reg = RADEON_LCD_GPIO_Y_REG;
+ }
+
+ /* Certain IBM chipset RN50s have a BIOS reporting two VGAs,
+ one with VGA DDC and one with CRT2 DDC. - kill the CRT2 DDC one */
+ if (dev->pdev->device == 0x515e &&
+ dev->pdev->subsystem_vendor == 0x1014) {
+ if (*legacy_connector == CONNECTOR_CRT_LEGACY &&
+ ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
+ return false;
+ }
+
+ /* Some RV100 cards with 2 VGA ports show up with DVI+VGA */
+ if (dev->pdev->device == 0x5159 &&
+ dev->pdev->subsystem_vendor == 0x1002 &&
+ dev->pdev->subsystem_device == 0x013a) {
+ if (*legacy_connector == CONNECTOR_DVI_I_LEGACY)
+ *legacy_connector = CONNECTOR_CRT_LEGACY;
+
+ }
+
+ /* X300 card with extra non-existent DVI port */
+ if (dev->pdev->device == 0x5B60 &&
+ dev->pdev->subsystem_vendor == 0x17af &&
+ dev->pdev->subsystem_device == 0x201e && bios_index == 2) {
+ if (*legacy_connector == CONNECTOR_DVI_I_LEGACY)
+ return false;
+ }
+
+ return true;
+}
+
+bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t conn_info, entry, devices;
+ uint16_t tmp;
+ enum radeon_combios_ddc ddc_type;
+ enum radeon_combios_connector connector;
+ int i = 0;
+ struct radeon_i2c_bus_rec ddc_i2c;
+
+ if (rdev->bios == NULL)
+ return false;
+
+ conn_info = combios_get_table_offset(dev, COMBIOS_CONNECTOR_INFO_TABLE);
+ if (conn_info) {
+ for (i = 0; i < 4; i++) {
+ entry = conn_info + 2 + i * 2;
+
+ if (!RBIOS16(entry))
+ break;
+
+ tmp = RBIOS16(entry);
+
+ connector = (tmp >> 12) & 0xf;
+
+ ddc_type = (tmp >> 8) & 0xf;
+ switch (ddc_type) {
+ case DDC_MONID:
+ ddc_i2c =
+ combios_setup_i2c_bus(RADEON_GPIO_MONID);
+ break;
+ case DDC_DVI:
+ ddc_i2c =
+ combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+ break;
+ case DDC_VGA:
+ ddc_i2c =
+ combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ break;
+ case DDC_CRT2:
+ ddc_i2c =
+ combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
+ break;
+ default:
+ break;
+ }
+
+ radeon_apply_legacy_quirks(dev, i, &connector,
+ &ddc_i2c);
+
+ switch (connector) {
+ case CONNECTOR_PROPRIETARY_LEGACY:
+ if ((tmp >> 4) & 0x1)
+ devices = ATOM_DEVICE_DFP2_SUPPORT;
+ else
+ devices = ATOM_DEVICE_DFP1_SUPPORT;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id
+ (dev, devices, 0),
+ devices);
+ radeon_add_legacy_connector(dev, i, devices,
+ legacy_connector_convert
+ [connector],
+ &ddc_i2c);
+ break;
+ case CONNECTOR_CRT_LEGACY:
+ if (tmp & 0x1) {
+ devices = ATOM_DEVICE_CRT2_SUPPORT;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id
+ (dev,
+ ATOM_DEVICE_CRT2_SUPPORT,
+ 2),
+ ATOM_DEVICE_CRT2_SUPPORT);
+ } else {
+ devices = ATOM_DEVICE_CRT1_SUPPORT;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id
+ (dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ }
+ radeon_add_legacy_connector(dev,
+ i,
+ devices,
+ legacy_connector_convert
+ [connector],
+ &ddc_i2c);
+ break;
+ case CONNECTOR_DVI_I_LEGACY:
+ devices = 0;
+ if (tmp & 0x1) {
+ devices |= ATOM_DEVICE_CRT2_SUPPORT;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id
+ (dev,
+ ATOM_DEVICE_CRT2_SUPPORT,
+ 2),
+ ATOM_DEVICE_CRT2_SUPPORT);
+ } else {
+ devices |= ATOM_DEVICE_CRT1_SUPPORT;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id
+ (dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ }
+ if ((tmp >> 4) & 0x1) {
+ devices |= ATOM_DEVICE_DFP2_SUPPORT;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id
+ (dev,
+ ATOM_DEVICE_DFP2_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP2_SUPPORT);
+ } else {
+ devices |= ATOM_DEVICE_DFP1_SUPPORT;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id
+ (dev,
+ ATOM_DEVICE_DFP1_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP1_SUPPORT);
+ }
+ radeon_add_legacy_connector(dev,
+ i,
+ devices,
+ legacy_connector_convert
+ [connector],
+ &ddc_i2c);
+ break;
+ case CONNECTOR_DVI_D_LEGACY:
+ if ((tmp >> 4) & 0x1)
+ devices = ATOM_DEVICE_DFP2_SUPPORT;
+ else
+ devices = ATOM_DEVICE_DFP1_SUPPORT;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id
+ (dev, devices, 0),
+ devices);
+ radeon_add_legacy_connector(dev, i, devices,
+ legacy_connector_convert
+ [connector],
+ &ddc_i2c);
+ break;
+ case CONNECTOR_CTV_LEGACY:
+ case CONNECTOR_STV_LEGACY:
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id
+ (dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, i,
+ ATOM_DEVICE_TV1_SUPPORT,
+ legacy_connector_convert
+ [connector],
+ &ddc_i2c);
+ break;
+ default:
+ DRM_ERROR("Unknown connector type: %d\n",
+ connector);
+ continue;
+ }
+
+ }
+ } else {
+ uint16_t tmds_info =
+ combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE);
+ if (tmds_info) {
+ DRM_DEBUG("Found DFP table, assuming DVI connector\n");
+
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_DFP1_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP1_SUPPORT);
+
+ ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+ radeon_add_legacy_connector(dev,
+ 0,
+ ATOM_DEVICE_CRT1_SUPPORT |
+ ATOM_DEVICE_DFP1_SUPPORT,
+ DRM_MODE_CONNECTOR_DVII,
+ &ddc_i2c);
+ } else {
+ DRM_DEBUG("No connector info found\n");
+ return false;
+ }
+ }
+
+ if (rdev->flags & RADEON_IS_MOBILITY || rdev->flags & RADEON_IS_IGP) {
+ uint16_t lcd_info =
+ combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE);
+ if (lcd_info) {
+ uint16_t lcd_ddc_info =
+ combios_get_table_offset(dev,
+ COMBIOS_LCD_DDC_INFO_TABLE);
+
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_LCD1_SUPPORT,
+ 0),
+ ATOM_DEVICE_LCD1_SUPPORT);
+
+ if (lcd_ddc_info) {
+ ddc_type = RBIOS8(lcd_ddc_info + 2);
+ switch (ddc_type) {
+ case DDC_MONID:
+ ddc_i2c =
+ combios_setup_i2c_bus
+ (RADEON_GPIO_MONID);
+ break;
+ case DDC_DVI:
+ ddc_i2c =
+ combios_setup_i2c_bus
+ (RADEON_GPIO_DVI_DDC);
+ break;
+ case DDC_VGA:
+ ddc_i2c =
+ combios_setup_i2c_bus
+ (RADEON_GPIO_VGA_DDC);
+ break;
+ case DDC_CRT2:
+ ddc_i2c =
+ combios_setup_i2c_bus
+ (RADEON_GPIO_CRT2_DDC);
+ break;
+ case DDC_LCD:
+ ddc_i2c =
+ combios_setup_i2c_bus
+ (RADEON_LCD_GPIO_MASK);
+ ddc_i2c.mask_clk_mask =
+ RBIOS32(lcd_ddc_info + 3);
+ ddc_i2c.mask_data_mask =
+ RBIOS32(lcd_ddc_info + 7);
+ ddc_i2c.a_clk_mask =
+ RBIOS32(lcd_ddc_info + 3);
+ ddc_i2c.a_data_mask =
+ RBIOS32(lcd_ddc_info + 7);
+ ddc_i2c.put_clk_mask =
+ RBIOS32(lcd_ddc_info + 3);
+ ddc_i2c.put_data_mask =
+ RBIOS32(lcd_ddc_info + 7);
+ ddc_i2c.get_clk_mask =
+ RBIOS32(lcd_ddc_info + 3);
+ ddc_i2c.get_data_mask =
+ RBIOS32(lcd_ddc_info + 7);
+ break;
+ case DDC_GPIO:
+ ddc_i2c =
+ combios_setup_i2c_bus
+ (RADEON_MDGPIO_EN_REG);
+ ddc_i2c.mask_clk_mask =
+ RBIOS32(lcd_ddc_info + 3);
+ ddc_i2c.mask_data_mask =
+ RBIOS32(lcd_ddc_info + 7);
+ ddc_i2c.a_clk_mask =
+ RBIOS32(lcd_ddc_info + 3);
+ ddc_i2c.a_data_mask =
+ RBIOS32(lcd_ddc_info + 7);
+ ddc_i2c.put_clk_mask =
+ RBIOS32(lcd_ddc_info + 3);
+ ddc_i2c.put_data_mask =
+ RBIOS32(lcd_ddc_info + 7);
+ ddc_i2c.get_clk_mask =
+ RBIOS32(lcd_ddc_info + 3);
+ ddc_i2c.get_data_mask =
+ RBIOS32(lcd_ddc_info + 7);
+ break;
+ default:
+ ddc_i2c.valid = false;
+ break;
+ }
+ DRM_DEBUG("LCD DDC Info Table found!\n");
+ } else
+ ddc_i2c.valid = false;
+
+ radeon_add_legacy_connector(dev,
+ 5,
+ ATOM_DEVICE_LCD1_SUPPORT,
+ DRM_MODE_CONNECTOR_LVDS,
+ &ddc_i2c);
+ }
+ }
+
+ /* check TV table */
+ if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) {
+ uint32_t tv_info =
+ combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
+ if (tv_info) {
+ if (RBIOS8(tv_info + 6) == 'T') {
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id
+ (dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, 6,
+ ATOM_DEVICE_TV1_SUPPORT,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ &ddc_i2c);
+ }
+ }
+ }
+
+ radeon_link_encoder_connector(dev);
+
+ return true;
+}
+
+static void combios_parse_mmio_table(struct drm_device *dev, uint16_t offset)
+{
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (offset) {
+ while (RBIOS16(offset)) {
+ uint16_t cmd = ((RBIOS16(offset) & 0xe000) >> 13);
+ uint32_t addr = (RBIOS16(offset) & 0x1fff);
+ uint32_t val, and_mask, or_mask;
+ uint32_t tmp;
+
+ offset += 2;
+ switch (cmd) {
+ case 0:
+ val = RBIOS32(offset);
+ offset += 4;
+ WREG32(addr, val);
+ break;
+ case 1:
+ val = RBIOS32(offset);
+ offset += 4;
+ WREG32(addr, val);
+ break;
+ case 2:
+ and_mask = RBIOS32(offset);
+ offset += 4;
+ or_mask = RBIOS32(offset);
+ offset += 4;
+ tmp = RREG32(addr);
+ tmp &= and_mask;
+ tmp |= or_mask;
+ WREG32(addr, tmp);
+ break;
+ case 3:
+ and_mask = RBIOS32(offset);
+ offset += 4;
+ or_mask = RBIOS32(offset);
+ offset += 4;
+ tmp = RREG32(addr);
+ tmp &= and_mask;
+ tmp |= or_mask;
+ WREG32(addr, tmp);
+ break;
+ case 4:
+ val = RBIOS16(offset);
+ offset += 2;
+ udelay(val);
+ break;
+ case 5:
+ val = RBIOS16(offset);
+ offset += 2;
+ switch (addr) {
+ case 8:
+ while (val--) {
+ if (!
+ (RREG32_PLL
+ (RADEON_CLK_PWRMGT_CNTL) &
+ RADEON_MC_BUSY))
+ break;
+ }
+ break;
+ case 9:
+ while (val--) {
+ if ((RREG32(RADEON_MC_STATUS) &
+ RADEON_MC_IDLE))
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+}
+
+static void combios_parse_pll_table(struct drm_device *dev, uint16_t offset)
+{
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (offset) {
+ while (RBIOS8(offset)) {
+ uint8_t cmd = ((RBIOS8(offset) & 0xc0) >> 6);
+ uint8_t addr = (RBIOS8(offset) & 0x3f);
+ uint32_t val, shift, tmp;
+ uint32_t and_mask, or_mask;
+
+ offset++;
+ switch (cmd) {
+ case 0:
+ val = RBIOS32(offset);
+ offset += 4;
+ WREG32_PLL(addr, val);
+ break;
+ case 1:
+ shift = RBIOS8(offset) * 8;
+ offset++;
+ and_mask = RBIOS8(offset) << shift;
+ and_mask |= ~(0xff << shift);
+ offset++;
+ or_mask = RBIOS8(offset) << shift;
+ offset++;
+ tmp = RREG32_PLL(addr);
+ tmp &= and_mask;
+ tmp |= or_mask;
+ WREG32_PLL(addr, tmp);
+ break;
+ case 2:
+ case 3:
+ tmp = 1000;
+ switch (addr) {
+ case 1:
+ udelay(150);
+ break;
+ case 2:
+ udelay(1000);
+ break;
+ case 3:
+ while (tmp--) {
+ if (!
+ (RREG32_PLL
+ (RADEON_CLK_PWRMGT_CNTL) &
+ RADEON_MC_BUSY))
+ break;
+ }
+ break;
+ case 4:
+ while (tmp--) {
+ if (RREG32_PLL
+ (RADEON_CLK_PWRMGT_CNTL) &
+ RADEON_DLL_READY)
+ break;
+ }
+ break;
+ case 5:
+ tmp =
+ RREG32_PLL(RADEON_CLK_PWRMGT_CNTL);
+ if (tmp & RADEON_CG_NO1_DEBUG_0) {
+#if 0
+ uint32_t mclk_cntl =
+ RREG32_PLL
+ (RADEON_MCLK_CNTL);
+ mclk_cntl &= 0xffff0000;
+ /*mclk_cntl |= 0x00001111;*//* ??? */
+ WREG32_PLL(RADEON_MCLK_CNTL,
+ mclk_cntl);
+ udelay(10000);
+#endif
+ WREG32_PLL
+ (RADEON_CLK_PWRMGT_CNTL,
+ tmp &
+ ~RADEON_CG_NO1_DEBUG_0);
+ udelay(10000);
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+}
+
+static void combios_parse_ram_reset_table(struct drm_device *dev,
+ uint16_t offset)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t tmp;
+
+ if (offset) {
+ uint8_t val = RBIOS8(offset);
+ while (val != 0xff) {
+ offset++;
+
+ if (val == 0x0f) {
+ uint32_t channel_complete_mask;
+
+ if (ASIC_IS_R300(rdev))
+ channel_complete_mask =
+ R300_MEM_PWRUP_COMPLETE;
+ else
+ channel_complete_mask =
+ RADEON_MEM_PWRUP_COMPLETE;
+ tmp = 20000;
+ while (tmp--) {
+ if ((RREG32(RADEON_MEM_STR_CNTL) &
+ channel_complete_mask) ==
+ channel_complete_mask)
+ break;
+ }
+ } else {
+ uint32_t or_mask = RBIOS16(offset);
+ offset += 2;
+
+ tmp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
+ tmp &= RADEON_SDRAM_MODE_MASK;
+ tmp |= or_mask;
+ WREG32(RADEON_MEM_SDRAM_MODE_REG, tmp);
+
+ or_mask = val << 24;
+ tmp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
+ tmp &= RADEON_B3MEM_RESET_MASK;
+ tmp |= or_mask;
+ WREG32(RADEON_MEM_SDRAM_MODE_REG, tmp);
+ }
+ val = RBIOS8(offset);
+ }
+ }
+}
+
+static uint32_t combios_detect_ram(struct drm_device *dev, int ram,
+ int mem_addr_mapping)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t mem_cntl;
+ uint32_t mem_size;
+ uint32_t addr = 0;
+
+ mem_cntl = RREG32(RADEON_MEM_CNTL);
+ if (mem_cntl & RV100_HALF_MODE)
+ ram /= 2;
+ mem_size = ram;
+ mem_cntl &= ~(0xff << 8);
+ mem_cntl |= (mem_addr_mapping & 0xff) << 8;
+ WREG32(RADEON_MEM_CNTL, mem_cntl);
+ RREG32(RADEON_MEM_CNTL);
+
+ /* sdram reset ? */
+
+ /* something like this???? */
+ while (ram--) {
+ addr = ram * 1024 * 1024;
+ /* write to each page */
+ WREG32(RADEON_MM_INDEX, (addr) | RADEON_MM_APER);
+ WREG32(RADEON_MM_DATA, 0xdeadbeef);
+ /* read back and verify */
+ WREG32(RADEON_MM_INDEX, (addr) | RADEON_MM_APER);
+ if (RREG32(RADEON_MM_DATA) != 0xdeadbeef)
+ return 0;
+ }
+
+ return mem_size;
+}
+
+static void combios_write_ram_size(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ uint8_t rev;
+ uint16_t offset;
+ uint32_t mem_size = 0;
+ uint32_t mem_cntl = 0;
+
+ /* should do something smarter here I guess... */
+ if (rdev->flags & RADEON_IS_IGP)
+ return;
+
+ /* first check detected mem table */
+ offset = combios_get_table_offset(dev, COMBIOS_DETECTED_MEM_TABLE);
+ if (offset) {
+ rev = RBIOS8(offset);
+ if (rev < 3) {
+ mem_cntl = RBIOS32(offset + 1);
+ mem_size = RBIOS16(offset + 5);
+ if (((rdev->flags & RADEON_FAMILY_MASK) < CHIP_R200) &&
+ ((dev->pdev->device != 0x515e)
+ && (dev->pdev->device != 0x5969)))
+ WREG32(RADEON_MEM_CNTL, mem_cntl);
+ }
+ }
+
+ if (!mem_size) {
+ offset =
+ combios_get_table_offset(dev, COMBIOS_MEM_CONFIG_TABLE);
+ if (offset) {
+ rev = RBIOS8(offset - 1);
+ if (rev < 1) {
+ if (((rdev->flags & RADEON_FAMILY_MASK) <
+ CHIP_R200)
+ && ((dev->pdev->device != 0x515e)
+ && (dev->pdev->device != 0x5969))) {
+ int ram = 0;
+ int mem_addr_mapping = 0;
+
+ while (RBIOS8(offset)) {
+ ram = RBIOS8(offset);
+ mem_addr_mapping =
+ RBIOS8(offset + 1);
+ if (mem_addr_mapping != 0x25)
+ ram *= 2;
+ mem_size =
+ combios_detect_ram(dev, ram,
+ mem_addr_mapping);
+ if (mem_size)
+ break;
+ offset += 2;
+ }
+ } else
+ mem_size = RBIOS8(offset);
+ } else {
+ mem_size = RBIOS8(offset);
+ mem_size *= 2; /* convert to MB */
+ }
+ }
+ }
+
+ mem_size *= (1024 * 1024); /* convert to bytes */
+ WREG32(RADEON_CONFIG_MEMSIZE, mem_size);
+}
+
+void radeon_combios_dyn_clk_setup(struct drm_device *dev, int enable)
+{
+ uint16_t dyn_clk_info =
+ combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
+
+ if (dyn_clk_info)
+ combios_parse_pll_table(dev, dyn_clk_info);
+}
+
+void radeon_combios_asic_init(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ uint16_t table;
+
+ /* port hardcoded mac stuff from radeonfb */
+ if (rdev->bios == NULL)
+ return;
+
+ /* ASIC INIT 1 */
+ table = combios_get_table_offset(dev, COMBIOS_ASIC_INIT_1_TABLE);
+ if (table)
+ combios_parse_mmio_table(dev, table);
+
+ /* PLL INIT */
+ table = combios_get_table_offset(dev, COMBIOS_PLL_INIT_TABLE);
+ if (table)
+ combios_parse_pll_table(dev, table);
+
+ /* ASIC INIT 2 */
+ table = combios_get_table_offset(dev, COMBIOS_ASIC_INIT_2_TABLE);
+ if (table)
+ combios_parse_mmio_table(dev, table);
+
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ /* ASIC INIT 4 */
+ table =
+ combios_get_table_offset(dev, COMBIOS_ASIC_INIT_4_TABLE);
+ if (table)
+ combios_parse_mmio_table(dev, table);
+
+ /* RAM RESET */
+ table = combios_get_table_offset(dev, COMBIOS_RAM_RESET_TABLE);
+ if (table)
+ combios_parse_ram_reset_table(dev, table);
+
+ /* ASIC INIT 3 */
+ table =
+ combios_get_table_offset(dev, COMBIOS_ASIC_INIT_3_TABLE);
+ if (table)
+ combios_parse_mmio_table(dev, table);
+
+ /* write CONFIG_MEMSIZE */
+ combios_write_ram_size(dev);
+ }
+
+ /* DYN CLK 1 */
+ table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
+ if (table)
+ combios_parse_pll_table(dev, table);
+
+}
+
+void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t bios_0_scratch, bios_6_scratch, bios_7_scratch;
+
+ bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
+ bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+ bios_7_scratch = RREG32(RADEON_BIOS_7_SCRATCH);
+
+ /* let the bios control the backlight */
+ bios_0_scratch &= ~RADEON_DRIVER_BRIGHTNESS_EN;
+
+ /* tell the bios not to handle mode switching */
+ bios_6_scratch |= (RADEON_DISPLAY_SWITCHING_DIS |
+ RADEON_ACC_MODE_CHANGE);
+
+ /* tell the bios a driver is loaded */
+ bios_7_scratch |= RADEON_DRV_LOADED;
+
+ WREG32(RADEON_BIOS_0_SCRATCH, bios_0_scratch);
+ WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+ WREG32(RADEON_BIOS_7_SCRATCH, bios_7_scratch);
+}
+
+void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t bios_6_scratch;
+
+ bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+
+ if (lock)
+ bios_6_scratch |= RADEON_DRIVER_CRITICAL;
+ else
+ bios_6_scratch &= ~RADEON_DRIVER_CRITICAL;
+
+ WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+}
+
+void
+radeon_combios_connected_scratch_regs(struct drm_connector *connector,
+ struct drm_encoder *encoder,
+ bool connected)
+{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_connector *radeon_connector =
+ to_radeon_connector(connector);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t bios_4_scratch = RREG32(RADEON_BIOS_4_SCRATCH);
+ uint32_t bios_5_scratch = RREG32(RADEON_BIOS_5_SCRATCH);
+
+ if ((radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) &&
+ (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT)) {
+ if (connected) {
+ DRM_DEBUG("TV1 connected\n");
+ /* fix me */
+ bios_4_scratch |= RADEON_TV1_ATTACHED_SVIDEO;
+ /*save->bios_4_scratch |= RADEON_TV1_ATTACHED_COMP; */
+ bios_5_scratch |= RADEON_TV1_ON;
+ bios_5_scratch |= RADEON_ACC_REQ_TV1;
+ } else {
+ DRM_DEBUG("TV1 disconnected\n");
+ bios_4_scratch &= ~RADEON_TV1_ATTACHED_MASK;
+ bios_5_scratch &= ~RADEON_TV1_ON;
+ bios_5_scratch &= ~RADEON_ACC_REQ_TV1;
+ }
+ }
+ if ((radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) &&
+ (radeon_connector->devices & ATOM_DEVICE_LCD1_SUPPORT)) {
+ if (connected) {
+ DRM_DEBUG("LCD1 connected\n");
+ bios_4_scratch |= RADEON_LCD1_ATTACHED;
+ bios_5_scratch |= RADEON_LCD1_ON;
+ bios_5_scratch |= RADEON_ACC_REQ_LCD1;
+ } else {
+ DRM_DEBUG("LCD1 disconnected\n");
+ bios_4_scratch &= ~RADEON_LCD1_ATTACHED;
+ bios_5_scratch &= ~RADEON_LCD1_ON;
+ bios_5_scratch &= ~RADEON_ACC_REQ_LCD1;
+ }
+ }
+ if ((radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) &&
+ (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)) {
+ if (connected) {
+ DRM_DEBUG("CRT1 connected\n");
+ bios_4_scratch |= RADEON_CRT1_ATTACHED_COLOR;
+ bios_5_scratch |= RADEON_CRT1_ON;
+ bios_5_scratch |= RADEON_ACC_REQ_CRT1;
+ } else {
+ DRM_DEBUG("CRT1 disconnected\n");
+ bios_4_scratch &= ~RADEON_CRT1_ATTACHED_MASK;
+ bios_5_scratch &= ~RADEON_CRT1_ON;
+ bios_5_scratch &= ~RADEON_ACC_REQ_CRT1;
+ }
+ }
+ if ((radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) &&
+ (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)) {
+ if (connected) {
+ DRM_DEBUG("CRT2 connected\n");
+ bios_4_scratch |= RADEON_CRT2_ATTACHED_COLOR;
+ bios_5_scratch |= RADEON_CRT2_ON;
+ bios_5_scratch |= RADEON_ACC_REQ_CRT2;
+ } else {
+ DRM_DEBUG("CRT2 disconnected\n");
+ bios_4_scratch &= ~RADEON_CRT2_ATTACHED_MASK;
+ bios_5_scratch &= ~RADEON_CRT2_ON;
+ bios_5_scratch &= ~RADEON_ACC_REQ_CRT2;
+ }
+ }
+ if ((radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) &&
+ (radeon_connector->devices & ATOM_DEVICE_DFP1_SUPPORT)) {
+ if (connected) {
+ DRM_DEBUG("DFP1 connected\n");
+ bios_4_scratch |= RADEON_DFP1_ATTACHED;
+ bios_5_scratch |= RADEON_DFP1_ON;
+ bios_5_scratch |= RADEON_ACC_REQ_DFP1;
+ } else {
+ DRM_DEBUG("DFP1 disconnected\n");
+ bios_4_scratch &= ~RADEON_DFP1_ATTACHED;
+ bios_5_scratch &= ~RADEON_DFP1_ON;
+ bios_5_scratch &= ~RADEON_ACC_REQ_DFP1;
+ }
+ }
+ if ((radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) &&
+ (radeon_connector->devices & ATOM_DEVICE_DFP2_SUPPORT)) {
+ if (connected) {
+ DRM_DEBUG("DFP2 connected\n");
+ bios_4_scratch |= RADEON_DFP2_ATTACHED;
+ bios_5_scratch |= RADEON_DFP2_ON;
+ bios_5_scratch |= RADEON_ACC_REQ_DFP2;
+ } else {
+ DRM_DEBUG("DFP2 disconnected\n");
+ bios_4_scratch &= ~RADEON_DFP2_ATTACHED;
+ bios_5_scratch &= ~RADEON_DFP2_ON;
+ bios_5_scratch &= ~RADEON_ACC_REQ_DFP2;
+ }
+ }
+ WREG32(RADEON_BIOS_4_SCRATCH, bios_4_scratch);
+ WREG32(RADEON_BIOS_5_SCRATCH, bios_5_scratch);
+}
+
+void
+radeon_combios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t bios_5_scratch = RREG32(RADEON_BIOS_5_SCRATCH);
+
+ if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) {
+ bios_5_scratch &= ~RADEON_TV1_CRTC_MASK;
+ bios_5_scratch |= (crtc << RADEON_TV1_CRTC_SHIFT);
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+ bios_5_scratch &= ~RADEON_CRT1_CRTC_MASK;
+ bios_5_scratch |= (crtc << RADEON_CRT1_CRTC_SHIFT);
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+ bios_5_scratch &= ~RADEON_CRT2_CRTC_MASK;
+ bios_5_scratch |= (crtc << RADEON_CRT2_CRTC_SHIFT);
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
+ bios_5_scratch &= ~RADEON_LCD1_CRTC_MASK;
+ bios_5_scratch |= (crtc << RADEON_LCD1_CRTC_SHIFT);
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) {
+ bios_5_scratch &= ~RADEON_DFP1_CRTC_MASK;
+ bios_5_scratch |= (crtc << RADEON_DFP1_CRTC_SHIFT);
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) {
+ bios_5_scratch &= ~RADEON_DFP2_CRTC_MASK;
+ bios_5_scratch |= (crtc << RADEON_DFP2_CRTC_SHIFT);
+ }
+ WREG32(RADEON_BIOS_5_SCRATCH, bios_5_scratch);
+}
+
+void
+radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+
+ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) {
+ if (on)
+ bios_6_scratch |= RADEON_TV_DPMS_ON;
+ else
+ bios_6_scratch &= ~RADEON_TV_DPMS_ON;
+ }
+ if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
+ if (on)
+ bios_6_scratch |= RADEON_CRT_DPMS_ON;
+ else
+ bios_6_scratch &= ~RADEON_CRT_DPMS_ON;
+ }
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ if (on)
+ bios_6_scratch |= RADEON_LCD_DPMS_ON;
+ else
+ bios_6_scratch &= ~RADEON_LCD_DPMS_ON;
+ }
+ if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ if (on)
+ bios_6_scratch |= RADEON_DFP_DPMS_ON;
+ else
+ bios_6_scratch &= ~RADEON_DFP_DPMS_ON;
+ }
+ WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
new file mode 100644
index 00000000000..70ede6a52d4
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -0,0 +1,603 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ */
+#include "drmP.h"
+#include "drm_edid.h"
+#include "drm_crtc_helper.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+
+extern void
+radeon_combios_connected_scratch_regs(struct drm_connector *connector,
+ struct drm_encoder *encoder,
+ bool connected);
+extern void
+radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
+ struct drm_encoder *encoder,
+ bool connected);
+
+static void
+radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_connector_status status)
+{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_encoder *best_encoder = NULL;
+ struct drm_encoder *encoder = NULL;
+ struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
+ struct drm_mode_object *obj;
+ bool connected;
+ int i;
+
+ best_encoder = connector_funcs->best_encoder(connector);
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == 0)
+ break;
+
+ obj = drm_mode_object_find(connector->dev,
+ connector->encoder_ids[i],
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ continue;
+
+ encoder = obj_to_encoder(obj);
+
+ if ((encoder == best_encoder) && (status == connector_status_connected))
+ connected = true;
+ else
+ connected = false;
+
+ if (rdev->is_atom_bios)
+ radeon_atombios_connected_scratch_regs(connector, encoder, connected);
+ else
+ radeon_combios_connected_scratch_regs(connector, encoder, connected);
+
+ }
+}
+
+struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
+{
+ int enc_id = connector->encoder_ids[0];
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+
+ /* pick the encoder ids */
+ if (enc_id) {
+ obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ return NULL;
+ encoder = obj_to_encoder(obj);
+ return encoder;
+ }
+ return NULL;
+}
+
+static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_display_mode *mode = NULL;
+ struct radeon_native_mode *native_mode = &radeon_encoder->native_mode;
+
+ if (native_mode->panel_xres != 0 &&
+ native_mode->panel_yres != 0 &&
+ native_mode->dotclock != 0) {
+ mode = drm_mode_create(dev);
+
+ mode->hdisplay = native_mode->panel_xres;
+ mode->vdisplay = native_mode->panel_yres;
+
+ mode->htotal = mode->hdisplay + native_mode->hblank;
+ mode->hsync_start = mode->hdisplay + native_mode->hoverplus;
+ mode->hsync_end = mode->hsync_start + native_mode->hsync_width;
+ mode->vtotal = mode->vdisplay + native_mode->vblank;
+ mode->vsync_start = mode->vdisplay + native_mode->voverplus;
+ mode->vsync_end = mode->vsync_start + native_mode->vsync_width;
+ mode->clock = native_mode->dotclock;
+ mode->flags = 0;
+
+ mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
+ drm_mode_set_name(mode);
+
+ DRM_DEBUG("Adding native panel mode %s\n", mode->name);
+ }
+ return mode;
+}
+
+int radeon_connector_set_property(struct drm_connector *connector, struct drm_property *property,
+ uint64_t val)
+{
+ return 0;
+}
+
+
+static int radeon_lvds_get_modes(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct drm_encoder *encoder;
+ int ret = 0;
+ struct drm_display_mode *mode;
+
+ if (radeon_connector->ddc_bus) {
+ ret = radeon_ddc_get_modes(radeon_connector);
+ if (ret > 0) {
+ return ret;
+ }
+ }
+
+ encoder = radeon_best_single_encoder(connector);
+ if (!encoder)
+ return 0;
+
+ /* we have no EDID modes */
+ mode = radeon_fp_native_mode(encoder);
+ if (mode) {
+ ret = 1;
+ drm_mode_probed_add(connector, mode);
+ }
+ return ret;
+}
+
+static int radeon_lvds_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connector)
+{
+ enum drm_connector_status ret = connector_status_connected;
+ /* check acpi lid status ??? */
+ radeon_connector_update_scratch_regs(connector, ret);
+ return ret;
+}
+
+static void radeon_connector_destroy(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ if (radeon_connector->ddc_bus)
+ radeon_i2c_destroy(radeon_connector->ddc_bus);
+ kfree(radeon_connector->con_priv);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+struct drm_connector_helper_funcs radeon_lvds_connector_helper_funcs = {
+ .get_modes = radeon_lvds_get_modes,
+ .mode_valid = radeon_lvds_mode_valid,
+ .best_encoder = radeon_best_single_encoder,
+};
+
+struct drm_connector_funcs radeon_lvds_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = radeon_lvds_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = radeon_connector_destroy,
+ .set_property = radeon_connector_set_property,
+};
+
+static int radeon_vga_get_modes(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ int ret;
+
+ ret = radeon_ddc_get_modes(radeon_connector);
+
+ return ret;
+}
+
+static int radeon_vga_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+
+ return MODE_OK;
+}
+
+static enum drm_connector_status radeon_vga_detect(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct drm_encoder *encoder;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ bool dret;
+ enum drm_connector_status ret = connector_status_disconnected;
+
+ radeon_i2c_do_lock(radeon_connector, 1);
+ dret = radeon_ddc_probe(radeon_connector);
+ radeon_i2c_do_lock(radeon_connector, 0);
+ if (dret)
+ ret = connector_status_connected;
+ else {
+ /* if EDID fails to a load detect */
+ encoder = radeon_best_single_encoder(connector);
+ if (!encoder)
+ ret = connector_status_disconnected;
+ else {
+ encoder_funcs = encoder->helper_private;
+ ret = encoder_funcs->detect(encoder, connector);
+ }
+ }
+
+ radeon_connector_update_scratch_regs(connector, ret);
+ return ret;
+}
+
+struct drm_connector_helper_funcs radeon_vga_connector_helper_funcs = {
+ .get_modes = radeon_vga_get_modes,
+ .mode_valid = radeon_vga_mode_valid,
+ .best_encoder = radeon_best_single_encoder,
+};
+
+struct drm_connector_funcs radeon_vga_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = radeon_vga_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = radeon_connector_destroy,
+ .set_property = radeon_connector_set_property,
+};
+
+static int radeon_dvi_get_modes(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ int ret;
+
+ ret = radeon_ddc_get_modes(radeon_connector);
+ /* reset scratch regs here since radeon_dvi_detect doesn't check digital bit */
+ radeon_connector_update_scratch_regs(connector, connector_status_connected);
+ return ret;
+}
+
+static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct drm_encoder *encoder;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ struct drm_mode_object *obj;
+ int i;
+ enum drm_connector_status ret = connector_status_disconnected;
+ bool dret;
+
+ radeon_i2c_do_lock(radeon_connector, 1);
+ dret = radeon_ddc_probe(radeon_connector);
+ radeon_i2c_do_lock(radeon_connector, 0);
+ if (dret)
+ ret = connector_status_connected;
+ else {
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == 0)
+ break;
+
+ obj = drm_mode_object_find(connector->dev,
+ connector->encoder_ids[i],
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ continue;
+
+ encoder = obj_to_encoder(obj);
+
+ encoder_funcs = encoder->helper_private;
+ if (encoder_funcs->detect) {
+ ret = encoder_funcs->detect(encoder, connector);
+ if (ret == connector_status_connected) {
+ radeon_connector->use_digital = 0;
+ break;
+ }
+ }
+ }
+ }
+
+ /* updated in get modes as well since we need to know if it's analog or digital */
+ radeon_connector_update_scratch_regs(connector, ret);
+ return ret;
+}
+
+/* okay need to be smart in here about which encoder to pick */
+struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
+{
+ int enc_id = connector->encoder_ids[0];
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+ int i;
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == 0)
+ break;
+
+ obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ continue;
+
+ encoder = obj_to_encoder(obj);
+
+ if (radeon_connector->use_digital) {
+ if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
+ return encoder;
+ } else {
+ if (encoder->encoder_type == DRM_MODE_ENCODER_DAC ||
+ encoder->encoder_type == DRM_MODE_ENCODER_TVDAC)
+ return encoder;
+ }
+ }
+
+ /* see if we have a default encoder TODO */
+
+ /* then check use digitial */
+ /* pick the first one */
+ if (enc_id) {
+ obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ return NULL;
+ encoder = obj_to_encoder(obj);
+ return encoder;
+ }
+ return NULL;
+}
+
+struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
+ .get_modes = radeon_dvi_get_modes,
+ .mode_valid = radeon_vga_mode_valid,
+ .best_encoder = radeon_dvi_encoder,
+};
+
+struct drm_connector_funcs radeon_dvi_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = radeon_dvi_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = radeon_connector_set_property,
+ .destroy = radeon_connector_destroy,
+};
+
+void
+radeon_add_atom_connector(struct drm_device *dev,
+ uint32_t connector_id,
+ uint32_t supported_device,
+ int connector_type,
+ struct radeon_i2c_bus_rec *i2c_bus,
+ bool linkb,
+ uint32_t igp_lane_info)
+{
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
+ struct radeon_connector_atom_dig *radeon_dig_connector;
+ uint32_t subpixel_order = SubPixelNone;
+
+ /* fixme - tv/cv/din */
+ if ((connector_type == DRM_MODE_CONNECTOR_Unknown) ||
+ (connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
+ (connector_type == DRM_MODE_CONNECTOR_Composite) ||
+ (connector_type == DRM_MODE_CONNECTOR_9PinDIN))
+ return;
+
+ /* see if we already added it */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ radeon_connector = to_radeon_connector(connector);
+ if (radeon_connector->connector_id == connector_id) {
+ radeon_connector->devices |= supported_device;
+ return;
+ }
+ }
+
+ radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL);
+ if (!radeon_connector)
+ return;
+
+ connector = &radeon_connector->base;
+
+ radeon_connector->connector_id = connector_id;
+ radeon_connector->devices = supported_device;
+ switch (connector_type) {
+ case DRM_MODE_CONNECTOR_VGA:
+ drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
+ if (i2c_bus->valid) {
+ radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
+ if (!radeon_connector->ddc_bus)
+ goto failed;
+ }
+ break;
+ case DRM_MODE_CONNECTOR_DVIA:
+ drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
+ if (i2c_bus->valid) {
+ radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
+ if (!radeon_connector->ddc_bus)
+ goto failed;
+ }
+ break;
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_DVID:
+ radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
+ if (!radeon_dig_connector)
+ goto failed;
+ radeon_dig_connector->linkb = linkb;
+ radeon_dig_connector->igp_lane_info = igp_lane_info;
+ radeon_connector->con_priv = radeon_dig_connector;
+ drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
+ if (i2c_bus->valid) {
+ radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
+ if (!radeon_connector->ddc_bus)
+ goto failed;
+ }
+ subpixel_order = SubPixelHorizontalRGB;
+ break;
+ case DRM_MODE_CONNECTOR_HDMIA:
+ case DRM_MODE_CONNECTOR_HDMIB:
+ radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
+ if (!radeon_dig_connector)
+ goto failed;
+ radeon_dig_connector->linkb = linkb;
+ radeon_dig_connector->igp_lane_info = igp_lane_info;
+ radeon_connector->con_priv = radeon_dig_connector;
+ drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
+ if (i2c_bus->valid) {
+ radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "HDMI");
+ if (!radeon_connector->ddc_bus)
+ goto failed;
+ }
+ subpixel_order = SubPixelHorizontalRGB;
+ break;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
+ if (!radeon_dig_connector)
+ goto failed;
+ radeon_dig_connector->linkb = linkb;
+ radeon_dig_connector->igp_lane_info = igp_lane_info;
+ radeon_connector->con_priv = radeon_dig_connector;
+ drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
+ if (i2c_bus->valid) {
+ radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
+ if (!radeon_connector->ddc_bus)
+ goto failed;
+ }
+ subpixel_order = SubPixelHorizontalRGB;
+ break;
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ case DRM_MODE_CONNECTOR_Composite:
+ case DRM_MODE_CONNECTOR_9PinDIN:
+ break;
+ case DRM_MODE_CONNECTOR_LVDS:
+ radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
+ if (!radeon_dig_connector)
+ goto failed;
+ radeon_dig_connector->linkb = linkb;
+ radeon_dig_connector->igp_lane_info = igp_lane_info;
+ radeon_connector->con_priv = radeon_dig_connector;
+ drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
+ if (i2c_bus->valid) {
+ radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
+ if (!radeon_connector->ddc_bus)
+ goto failed;
+ }
+ subpixel_order = SubPixelHorizontalRGB;
+ break;
+ }
+
+ connector->display_info.subpixel_order = subpixel_order;
+ drm_sysfs_connector_add(connector);
+ return;
+
+failed:
+ if (radeon_connector->ddc_bus)
+ radeon_i2c_destroy(radeon_connector->ddc_bus);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+void
+radeon_add_legacy_connector(struct drm_device *dev,
+ uint32_t connector_id,
+ uint32_t supported_device,
+ int connector_type,
+ struct radeon_i2c_bus_rec *i2c_bus)
+{
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
+ uint32_t subpixel_order = SubPixelNone;
+
+ /* fixme - tv/cv/din */
+ if ((connector_type == DRM_MODE_CONNECTOR_Unknown) ||
+ (connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
+ (connector_type == DRM_MODE_CONNECTOR_Composite) ||
+ (connector_type == DRM_MODE_CONNECTOR_9PinDIN))
+ return;
+
+ /* see if we already added it */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ radeon_connector = to_radeon_connector(connector);
+ if (radeon_connector->connector_id == connector_id) {
+ radeon_connector->devices |= supported_device;
+ return;
+ }
+ }
+
+ radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL);
+ if (!radeon_connector)
+ return;
+
+ connector = &radeon_connector->base;
+
+ radeon_connector->connector_id = connector_id;
+ radeon_connector->devices = supported_device;
+ switch (connector_type) {
+ case DRM_MODE_CONNECTOR_VGA:
+ drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
+ if (i2c_bus->valid) {
+ radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
+ if (!radeon_connector->ddc_bus)
+ goto failed;
+ }
+ break;
+ case DRM_MODE_CONNECTOR_DVIA:
+ drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
+ if (i2c_bus->valid) {
+ radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
+ if (!radeon_connector->ddc_bus)
+ goto failed;
+ }
+ break;
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_DVID:
+ drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
+ if (i2c_bus->valid) {
+ radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
+ if (!radeon_connector->ddc_bus)
+ goto failed;
+ }
+ subpixel_order = SubPixelHorizontalRGB;
+ break;
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ case DRM_MODE_CONNECTOR_Composite:
+ case DRM_MODE_CONNECTOR_9PinDIN:
+ break;
+ case DRM_MODE_CONNECTOR_LVDS:
+ drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
+ if (i2c_bus->valid) {
+ radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
+ if (!radeon_connector->ddc_bus)
+ goto failed;
+ }
+ subpixel_order = SubPixelHorizontalRGB;
+ break;
+ }
+
+ connector->display_info.subpixel_order = subpixel_order;
+ drm_sysfs_connector_add(connector);
+ return;
+
+failed:
+ if (radeon_connector->ddc_bus)
+ radeon_i2c_destroy(radeon_connector->ddc_bus);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 77a7a4d8465..89c4c44169f 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -2109,7 +2109,7 @@ int radeon_master_create(struct drm_device *dev, struct drm_master *master)
/* prebuild the SAREA */
sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE);
- ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK|_DRM_DRIVER,
+ ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK,
&master_priv->sarea);
if (ret) {
DRM_ERROR("SAREA setup failed\n");
@@ -2185,9 +2185,9 @@ void radeon_commit_ring(drm_radeon_private_t *dev_priv)
/* check if the ring is padded out to 16-dword alignment */
- tail_aligned = dev_priv->ring.tail & 0xf;
+ tail_aligned = dev_priv->ring.tail & (RADEON_RING_ALIGN-1);
if (tail_aligned) {
- int num_p2 = 16 - tail_aligned;
+ int num_p2 = RADEON_RING_ALIGN - tail_aligned;
ring = dev_priv->ring.start;
/* pad with some CP_PACKET2 */
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
new file mode 100644
index 00000000000..b843f9bdfb1
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright 2008 Jerome Glisse.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jerome Glisse <glisse@freedesktop.org>
+ */
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon_reg.h"
+#include "radeon.h"
+
+void r100_cs_dump_packet(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt);
+
+int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
+{
+ struct drm_device *ddev = p->rdev->ddev;
+ struct radeon_cs_chunk *chunk;
+ unsigned i, j;
+ bool duplicate;
+
+ if (p->chunk_relocs_idx == -1) {
+ return 0;
+ }
+ chunk = &p->chunks[p->chunk_relocs_idx];
+ /* FIXME: we assume that each relocs use 4 dwords */
+ p->nrelocs = chunk->length_dw / 4;
+ p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
+ if (p->relocs_ptr == NULL) {
+ return -ENOMEM;
+ }
+ p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
+ if (p->relocs == NULL) {
+ return -ENOMEM;
+ }
+ for (i = 0; i < p->nrelocs; i++) {
+ struct drm_radeon_cs_reloc *r;
+
+ duplicate = false;
+ r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
+ for (j = 0; j < p->nrelocs; j++) {
+ if (r->handle == p->relocs[j].handle) {
+ p->relocs_ptr[i] = &p->relocs[j];
+ duplicate = true;
+ break;
+ }
+ }
+ if (!duplicate) {
+ p->relocs[i].gobj = drm_gem_object_lookup(ddev,
+ p->filp,
+ r->handle);
+ if (p->relocs[i].gobj == NULL) {
+ DRM_ERROR("gem object lookup failed 0x%x\n",
+ r->handle);
+ return -EINVAL;
+ }
+ p->relocs_ptr[i] = &p->relocs[i];
+ p->relocs[i].robj = p->relocs[i].gobj->driver_private;
+ p->relocs[i].lobj.robj = p->relocs[i].robj;
+ p->relocs[i].lobj.rdomain = r->read_domains;
+ p->relocs[i].lobj.wdomain = r->write_domain;
+ p->relocs[i].handle = r->handle;
+ p->relocs[i].flags = r->flags;
+ INIT_LIST_HEAD(&p->relocs[i].lobj.list);
+ radeon_object_list_add_object(&p->relocs[i].lobj,
+ &p->validated);
+ }
+ }
+ return radeon_object_list_validate(&p->validated, p->ib->fence);
+}
+
+int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
+{
+ struct drm_radeon_cs *cs = data;
+ uint64_t *chunk_array_ptr;
+ unsigned size, i;
+
+ if (!cs->num_chunks) {
+ return 0;
+ }
+ /* get chunks */
+ INIT_LIST_HEAD(&p->validated);
+ p->idx = 0;
+ p->chunk_ib_idx = -1;
+ p->chunk_relocs_idx = -1;
+ p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
+ if (p->chunks_array == NULL) {
+ return -ENOMEM;
+ }
+ chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
+ if (DRM_COPY_FROM_USER(p->chunks_array, chunk_array_ptr,
+ sizeof(uint64_t)*cs->num_chunks)) {
+ return -EFAULT;
+ }
+ p->nchunks = cs->num_chunks;
+ p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
+ if (p->chunks == NULL) {
+ return -ENOMEM;
+ }
+ for (i = 0; i < p->nchunks; i++) {
+ struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
+ struct drm_radeon_cs_chunk user_chunk;
+ uint32_t __user *cdata;
+
+ chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
+ if (DRM_COPY_FROM_USER(&user_chunk, chunk_ptr,
+ sizeof(struct drm_radeon_cs_chunk))) {
+ return -EFAULT;
+ }
+ p->chunks[i].chunk_id = user_chunk.chunk_id;
+ if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
+ p->chunk_relocs_idx = i;
+ }
+ if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
+ p->chunk_ib_idx = i;
+ }
+ p->chunks[i].length_dw = user_chunk.length_dw;
+ cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
+
+ p->chunks[i].kdata = NULL;
+ size = p->chunks[i].length_dw * sizeof(uint32_t);
+ p->chunks[i].kdata = kzalloc(size, GFP_KERNEL);
+ if (p->chunks[i].kdata == NULL) {
+ return -ENOMEM;
+ }
+ if (DRM_COPY_FROM_USER(p->chunks[i].kdata, cdata, size)) {
+ return -EFAULT;
+ }
+ }
+ if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
+ DRM_ERROR("cs IB too big: %d\n",
+ p->chunks[p->chunk_ib_idx].length_dw);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * cs_parser_fini() - clean parser states
+ * @parser: parser structure holding parsing context.
+ * @error: error number
+ *
+ * If error is set than unvalidate buffer, otherwise just free memory
+ * used by parsing context.
+ **/
+static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
+{
+ unsigned i;
+
+ if (error) {
+ radeon_object_list_unvalidate(&parser->validated);
+ } else {
+ radeon_object_list_clean(&parser->validated);
+ }
+ for (i = 0; i < parser->nrelocs; i++) {
+ if (parser->relocs[i].gobj) {
+ mutex_lock(&parser->rdev->ddev->struct_mutex);
+ drm_gem_object_unreference(parser->relocs[i].gobj);
+ mutex_unlock(&parser->rdev->ddev->struct_mutex);
+ }
+ }
+ kfree(parser->relocs);
+ kfree(parser->relocs_ptr);
+ for (i = 0; i < parser->nchunks; i++) {
+ kfree(parser->chunks[i].kdata);
+ }
+ kfree(parser->chunks);
+ kfree(parser->chunks_array);
+ radeon_ib_free(parser->rdev, &parser->ib);
+}
+
+int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_cs_parser parser;
+ struct radeon_cs_chunk *ib_chunk;
+ int r;
+
+ mutex_lock(&rdev->cs_mutex);
+ if (rdev->gpu_lockup) {
+ mutex_unlock(&rdev->cs_mutex);
+ return -EINVAL;
+ }
+ /* initialize parser */
+ memset(&parser, 0, sizeof(struct radeon_cs_parser));
+ parser.filp = filp;
+ parser.rdev = rdev;
+ r = radeon_cs_parser_init(&parser, data);
+ if (r) {
+ DRM_ERROR("Failed to initialize parser !\n");
+ radeon_cs_parser_fini(&parser, r);
+ mutex_unlock(&rdev->cs_mutex);
+ return r;
+ }
+ r = radeon_ib_get(rdev, &parser.ib);
+ if (r) {
+ DRM_ERROR("Failed to get ib !\n");
+ radeon_cs_parser_fini(&parser, r);
+ mutex_unlock(&rdev->cs_mutex);
+ return r;
+ }
+ r = radeon_cs_parser_relocs(&parser);
+ if (r) {
+ DRM_ERROR("Failed to parse relocation !\n");
+ radeon_cs_parser_fini(&parser, r);
+ mutex_unlock(&rdev->cs_mutex);
+ return r;
+ }
+ /* Copy the packet into the IB, the parser will read from the
+ * input memory (cached) and write to the IB (which can be
+ * uncached). */
+ ib_chunk = &parser.chunks[parser.chunk_ib_idx];
+ parser.ib->length_dw = ib_chunk->length_dw;
+ memcpy((void *)parser.ib->ptr, ib_chunk->kdata, ib_chunk->length_dw*4);
+ r = radeon_cs_parse(&parser);
+ if (r) {
+ DRM_ERROR("Invalid command stream !\n");
+ radeon_cs_parser_fini(&parser, r);
+ mutex_unlock(&rdev->cs_mutex);
+ return r;
+ }
+ r = radeon_ib_schedule(rdev, parser.ib);
+ if (r) {
+ DRM_ERROR("Faild to schedule IB !\n");
+ }
+ radeon_cs_parser_fini(&parser, r);
+ mutex_unlock(&rdev->cs_mutex);
+ return r;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
new file mode 100644
index 00000000000..5232441f119
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -0,0 +1,252 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ */
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+
+#define CURSOR_WIDTH 64
+#define CURSOR_HEIGHT 64
+
+static void radeon_lock_cursor(struct drm_crtc *crtc, bool lock)
+{
+ struct radeon_device *rdev = crtc->dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ uint32_t cur_lock;
+
+ if (ASIC_IS_AVIVO(rdev)) {
+ cur_lock = RREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset);
+ if (lock)
+ cur_lock |= AVIVO_D1CURSOR_UPDATE_LOCK;
+ else
+ cur_lock &= ~AVIVO_D1CURSOR_UPDATE_LOCK;
+ WREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock);
+ } else {
+ cur_lock = RREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset);
+ if (lock)
+ cur_lock |= RADEON_CUR_LOCK;
+ else
+ cur_lock &= ~RADEON_CUR_LOCK;
+ WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, cur_lock);
+ }
+}
+
+static void radeon_hide_cursor(struct drm_crtc *crtc)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct radeon_device *rdev = crtc->dev->dev_private;
+
+ if (ASIC_IS_AVIVO(rdev)) {
+ WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
+ WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
+ } else {
+ switch (radeon_crtc->crtc_id) {
+ case 0:
+ WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
+ break;
+ case 1:
+ WREG32(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL);
+ break;
+ default:
+ return;
+ }
+ WREG32_P(RADEON_MM_DATA, 0, ~RADEON_CRTC_CUR_EN);
+ }
+}
+
+static void radeon_show_cursor(struct drm_crtc *crtc)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct radeon_device *rdev = crtc->dev->dev_private;
+
+ if (ASIC_IS_AVIVO(rdev)) {
+ WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
+ WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
+ (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
+ } else {
+ switch (radeon_crtc->crtc_id) {
+ case 0:
+ WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
+ break;
+ case 1:
+ WREG32(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL);
+ break;
+ default:
+ return;
+ }
+
+ WREG32_P(RADEON_MM_DATA, (RADEON_CRTC_CUR_EN |
+ (RADEON_CRTC_CUR_MODE_24BPP << RADEON_CRTC_CUR_MODE_SHIFT)),
+ ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_CUR_MODE_MASK));
+ }
+}
+
+static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
+ uint32_t gpu_addr)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct radeon_device *rdev = crtc->dev->dev_private;
+
+ if (ASIC_IS_AVIVO(rdev))
+ WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr);
+ else
+ /* offset is from DISP(2)_BASE_ADDRESS */
+ WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, gpu_addr);
+}
+
+int radeon_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width,
+ uint32_t height)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_gem_object *obj;
+ uint64_t gpu_addr;
+ int ret;
+
+ if (!handle) {
+ /* turn off cursor */
+ radeon_hide_cursor(crtc);
+ obj = NULL;
+ goto unpin;
+ }
+
+ if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
+ DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
+ return -EINVAL;
+ }
+
+ radeon_crtc->cursor_width = width;
+ radeon_crtc->cursor_height = height;
+
+ obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+ if (!obj) {
+ DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id);
+ return -EINVAL;
+ }
+
+ ret = radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
+ if (ret)
+ goto fail;
+
+ radeon_lock_cursor(crtc, true);
+ /* XXX only 27 bit offset for legacy cursor */
+ radeon_set_cursor(crtc, obj, gpu_addr);
+ radeon_show_cursor(crtc);
+ radeon_lock_cursor(crtc, false);
+
+unpin:
+ if (radeon_crtc->cursor_bo) {
+ radeon_gem_object_unpin(radeon_crtc->cursor_bo);
+ mutex_lock(&crtc->dev->struct_mutex);
+ drm_gem_object_unreference(radeon_crtc->cursor_bo);
+ mutex_unlock(&crtc->dev->struct_mutex);
+ }
+
+ radeon_crtc->cursor_bo = obj;
+ return 0;
+fail:
+ mutex_lock(&crtc->dev->struct_mutex);
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&crtc->dev->struct_mutex);
+
+ return 0;
+}
+
+int radeon_crtc_cursor_move(struct drm_crtc *crtc,
+ int x, int y)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct radeon_device *rdev = crtc->dev->dev_private;
+ int xorigin = 0, yorigin = 0;
+
+ if (x < 0)
+ xorigin = -x + 1;
+ if (y < 0)
+ yorigin = -y + 1;
+ if (xorigin >= CURSOR_WIDTH)
+ xorigin = CURSOR_WIDTH - 1;
+ if (yorigin >= CURSOR_HEIGHT)
+ yorigin = CURSOR_HEIGHT - 1;
+
+ radeon_lock_cursor(crtc, true);
+ if (ASIC_IS_AVIVO(rdev)) {
+ int w = radeon_crtc->cursor_width;
+ int i = 0;
+ struct drm_crtc *crtc_p;
+
+ /* avivo cursor are offset into the total surface */
+ x += crtc->x;
+ y += crtc->y;
+ DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
+
+ /* avivo cursor image can't end on 128 pixel boundry or
+ * go past the end of the frame if both crtcs are enabled
+ */
+ list_for_each_entry(crtc_p, &crtc->dev->mode_config.crtc_list, head) {
+ if (crtc_p->enabled)
+ i++;
+ }
+ if (i > 1) {
+ int cursor_end, frame_end;
+
+ cursor_end = x - xorigin + w;
+ frame_end = crtc->x + crtc->mode.crtc_hdisplay;
+ if (cursor_end >= frame_end) {
+ w = w - (cursor_end - frame_end);
+ if (!(frame_end & 0x7f))
+ w--;
+ } else {
+ if (!(cursor_end & 0x7f))
+ w--;
+ }
+ if (w <= 0)
+ w = 1;
+ }
+
+ WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset,
+ ((xorigin ? 0 : x) << 16) |
+ (yorigin ? 0 : y));
+ WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
+ WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset,
+ ((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
+ } else {
+ if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)
+ y *= 2;
+
+ WREG32(RADEON_CUR_HORZ_VERT_OFF + radeon_crtc->crtc_offset,
+ (RADEON_CUR_LOCK
+ | (xorigin << 16)
+ | yorigin));
+ WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset,
+ (RADEON_CUR_LOCK
+ | ((xorigin ? 0 : x) << 16)
+ | (yorigin ? 0 : y)));
+ }
+ radeon_lock_cursor(crtc, false);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
new file mode 100644
index 00000000000..5fd2b639bf6
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -0,0 +1,813 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include <linux/console.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "atom.h"
+
+/*
+ * GPU scratch registers helpers function.
+ */
+static void radeon_scratch_init(struct radeon_device *rdev)
+{
+ int i;
+
+ /* FIXME: check this out */
+ if (rdev->family < CHIP_R300) {
+ rdev->scratch.num_reg = 5;
+ } else {
+ rdev->scratch.num_reg = 7;
+ }
+ for (i = 0; i < rdev->scratch.num_reg; i++) {
+ rdev->scratch.free[i] = true;
+ rdev->scratch.reg[i] = RADEON_SCRATCH_REG0 + (i * 4);
+ }
+}
+
+int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
+{
+ int i;
+
+ for (i = 0; i < rdev->scratch.num_reg; i++) {
+ if (rdev->scratch.free[i]) {
+ rdev->scratch.free[i] = false;
+ *reg = rdev->scratch.reg[i];
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
+{
+ int i;
+
+ for (i = 0; i < rdev->scratch.num_reg; i++) {
+ if (rdev->scratch.reg[i] == reg) {
+ rdev->scratch.free[i] = true;
+ return;
+ }
+ }
+}
+
+/*
+ * MC common functions
+ */
+int radeon_mc_setup(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+
+ /* Some chips have an "issue" with the memory controller, the
+ * location must be aligned to the size. We just align it down,
+ * too bad if we walk over the top of system memory, we don't
+ * use DMA without a remapped anyway.
+ * Affected chips are rv280, all r3xx, and all r4xx, but not IGP
+ */
+ /* FGLRX seems to setup like this, VRAM a 0, then GART.
+ */
+ /*
+ * Note: from R6xx the address space is 40bits but here we only
+ * use 32bits (still have to see a card which would exhaust 4G
+ * address space).
+ */
+ if (rdev->mc.vram_location != 0xFFFFFFFFUL) {
+ /* vram location was already setup try to put gtt after
+ * if it fits */
+ tmp = rdev->mc.vram_location + rdev->mc.vram_size;
+ tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
+ if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
+ rdev->mc.gtt_location = tmp;
+ } else {
+ if (rdev->mc.gtt_size >= rdev->mc.vram_location) {
+ printk(KERN_ERR "[drm] GTT too big to fit "
+ "before or after vram location.\n");
+ return -EINVAL;
+ }
+ rdev->mc.gtt_location = 0;
+ }
+ } else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) {
+ /* gtt location was already setup try to put vram before
+ * if it fits */
+ if (rdev->mc.vram_size < rdev->mc.gtt_location) {
+ rdev->mc.vram_location = 0;
+ } else {
+ tmp = rdev->mc.gtt_location + rdev->mc.gtt_size;
+ tmp += (rdev->mc.vram_size - 1);
+ tmp &= ~(rdev->mc.vram_size - 1);
+ if ((0xFFFFFFFFUL - tmp) >= rdev->mc.vram_size) {
+ rdev->mc.vram_location = tmp;
+ } else {
+ printk(KERN_ERR "[drm] vram too big to fit "
+ "before or after GTT location.\n");
+ return -EINVAL;
+ }
+ }
+ } else {
+ rdev->mc.vram_location = 0;
+ rdev->mc.gtt_location = rdev->mc.vram_size;
+ }
+ DRM_INFO("radeon: VRAM %uM\n", rdev->mc.vram_size >> 20);
+ DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n",
+ rdev->mc.vram_location,
+ rdev->mc.vram_location + rdev->mc.vram_size - 1);
+ DRM_INFO("radeon: GTT %uM\n", rdev->mc.gtt_size >> 20);
+ DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n",
+ rdev->mc.gtt_location,
+ rdev->mc.gtt_location + rdev->mc.gtt_size - 1);
+ return 0;
+}
+
+
+/*
+ * GPU helpers function.
+ */
+static bool radeon_card_posted(struct radeon_device *rdev)
+{
+ uint32_t reg;
+
+ /* first check CRTCs */
+ if (ASIC_IS_AVIVO(rdev)) {
+ reg = RREG32(AVIVO_D1CRTC_CONTROL) |
+ RREG32(AVIVO_D2CRTC_CONTROL);
+ if (reg & AVIVO_CRTC_EN) {
+ return true;
+ }
+ } else {
+ reg = RREG32(RADEON_CRTC_GEN_CNTL) |
+ RREG32(RADEON_CRTC2_GEN_CNTL);
+ if (reg & RADEON_CRTC_EN) {
+ return true;
+ }
+ }
+
+ /* then check MEM_SIZE, in case the crtcs are off */
+ if (rdev->family >= CHIP_R600)
+ reg = RREG32(R600_CONFIG_MEMSIZE);
+ else
+ reg = RREG32(RADEON_CONFIG_MEMSIZE);
+
+ if (reg)
+ return true;
+
+ return false;
+
+}
+
+
+/*
+ * Registers accessors functions.
+ */
+uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+ DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
+ BUG_ON(1);
+ return 0;
+}
+
+void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+ DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
+ reg, v);
+ BUG_ON(1);
+}
+
+void radeon_register_accessor_init(struct radeon_device *rdev)
+{
+ rdev->mm_rreg = &r100_mm_rreg;
+ rdev->mm_wreg = &r100_mm_wreg;
+ rdev->mc_rreg = &radeon_invalid_rreg;
+ rdev->mc_wreg = &radeon_invalid_wreg;
+ rdev->pll_rreg = &radeon_invalid_rreg;
+ rdev->pll_wreg = &radeon_invalid_wreg;
+ rdev->pcie_rreg = &radeon_invalid_rreg;
+ rdev->pcie_wreg = &radeon_invalid_wreg;
+ rdev->pciep_rreg = &radeon_invalid_rreg;
+ rdev->pciep_wreg = &radeon_invalid_wreg;
+
+ /* Don't change order as we are overridding accessor. */
+ if (rdev->family < CHIP_RV515) {
+ rdev->pcie_rreg = &rv370_pcie_rreg;
+ rdev->pcie_wreg = &rv370_pcie_wreg;
+ }
+ if (rdev->family >= CHIP_RV515) {
+ rdev->pcie_rreg = &rv515_pcie_rreg;
+ rdev->pcie_wreg = &rv515_pcie_wreg;
+ }
+ /* FIXME: not sure here */
+ if (rdev->family <= CHIP_R580) {
+ rdev->pll_rreg = &r100_pll_rreg;
+ rdev->pll_wreg = &r100_pll_wreg;
+ }
+ if (rdev->family >= CHIP_RV515) {
+ rdev->mc_rreg = &rv515_mc_rreg;
+ rdev->mc_wreg = &rv515_mc_wreg;
+ }
+ if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
+ rdev->mc_rreg = &rs400_mc_rreg;
+ rdev->mc_wreg = &rs400_mc_wreg;
+ }
+ if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
+ rdev->mc_rreg = &rs690_mc_rreg;
+ rdev->mc_wreg = &rs690_mc_wreg;
+ }
+ if (rdev->family == CHIP_RS600) {
+ rdev->mc_rreg = &rs600_mc_rreg;
+ rdev->mc_wreg = &rs600_mc_wreg;
+ }
+ if (rdev->family >= CHIP_R600) {
+ rdev->pciep_rreg = &r600_pciep_rreg;
+ rdev->pciep_wreg = &r600_pciep_wreg;
+ }
+}
+
+
+/*
+ * ASIC
+ */
+int radeon_asic_init(struct radeon_device *rdev)
+{
+ radeon_register_accessor_init(rdev);
+ switch (rdev->family) {
+ case CHIP_R100:
+ case CHIP_RV100:
+ case CHIP_RS100:
+ case CHIP_RV200:
+ case CHIP_RS200:
+ case CHIP_R200:
+ case CHIP_RV250:
+ case CHIP_RS300:
+ case CHIP_RV280:
+ rdev->asic = &r100_asic;
+ break;
+ case CHIP_R300:
+ case CHIP_R350:
+ case CHIP_RV350:
+ case CHIP_RV380:
+ rdev->asic = &r300_asic;
+ break;
+ case CHIP_R420:
+ case CHIP_R423:
+ case CHIP_RV410:
+ rdev->asic = &r420_asic;
+ break;
+ case CHIP_RS400:
+ case CHIP_RS480:
+ rdev->asic = &rs400_asic;
+ break;
+ case CHIP_RS600:
+ rdev->asic = &rs600_asic;
+ break;
+ case CHIP_RS690:
+ case CHIP_RS740:
+ rdev->asic = &rs690_asic;
+ break;
+ case CHIP_RV515:
+ rdev->asic = &rv515_asic;
+ break;
+ case CHIP_R520:
+ case CHIP_RV530:
+ case CHIP_RV560:
+ case CHIP_RV570:
+ case CHIP_R580:
+ rdev->asic = &r520_asic;
+ break;
+ case CHIP_R600:
+ case CHIP_RV610:
+ case CHIP_RV630:
+ case CHIP_RV620:
+ case CHIP_RV635:
+ case CHIP_RV670:
+ case CHIP_RS780:
+ case CHIP_RV770:
+ case CHIP_RV730:
+ case CHIP_RV710:
+ default:
+ /* FIXME: not supported yet */
+ return -EINVAL;
+ }
+ return 0;
+}
+
+
+/*
+ * Wrapper around modesetting bits.
+ */
+int radeon_clocks_init(struct radeon_device *rdev)
+{
+ int r;
+
+ radeon_get_clock_info(rdev->ddev);
+ r = radeon_static_clocks_init(rdev->ddev);
+ if (r) {
+ return r;
+ }
+ DRM_INFO("Clocks initialized !\n");
+ return 0;
+}
+
+void radeon_clocks_fini(struct radeon_device *rdev)
+{
+}
+
+/* ATOM accessor methods */
+static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
+{
+ struct radeon_device *rdev = info->dev->dev_private;
+ uint32_t r;
+
+ r = rdev->pll_rreg(rdev, reg);
+ return r;
+}
+
+static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+ struct radeon_device *rdev = info->dev->dev_private;
+
+ rdev->pll_wreg(rdev, reg, val);
+}
+
+static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
+{
+ struct radeon_device *rdev = info->dev->dev_private;
+ uint32_t r;
+
+ r = rdev->mc_rreg(rdev, reg);
+ return r;
+}
+
+static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+ struct radeon_device *rdev = info->dev->dev_private;
+
+ rdev->mc_wreg(rdev, reg, val);
+}
+
+static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+ struct radeon_device *rdev = info->dev->dev_private;
+
+ WREG32(reg*4, val);
+}
+
+static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
+{
+ struct radeon_device *rdev = info->dev->dev_private;
+ uint32_t r;
+
+ r = RREG32(reg*4);
+ return r;
+}
+
+static struct card_info atom_card_info = {
+ .dev = NULL,
+ .reg_read = cail_reg_read,
+ .reg_write = cail_reg_write,
+ .mc_read = cail_mc_read,
+ .mc_write = cail_mc_write,
+ .pll_read = cail_pll_read,
+ .pll_write = cail_pll_write,
+};
+
+int radeon_atombios_init(struct radeon_device *rdev)
+{
+ atom_card_info.dev = rdev->ddev;
+ rdev->mode_info.atom_context = atom_parse(&atom_card_info, rdev->bios);
+ radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
+ return 0;
+}
+
+void radeon_atombios_fini(struct radeon_device *rdev)
+{
+ kfree(rdev->mode_info.atom_context);
+}
+
+int radeon_combios_init(struct radeon_device *rdev)
+{
+ radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
+ return 0;
+}
+
+void radeon_combios_fini(struct radeon_device *rdev)
+{
+}
+
+int radeon_modeset_init(struct radeon_device *rdev);
+void radeon_modeset_fini(struct radeon_device *rdev);
+
+
+/*
+ * Radeon device.
+ */
+int radeon_device_init(struct radeon_device *rdev,
+ struct drm_device *ddev,
+ struct pci_dev *pdev,
+ uint32_t flags)
+{
+ int r, ret;
+
+ DRM_INFO("radeon: Initializing kernel modesetting.\n");
+ rdev->shutdown = false;
+ rdev->ddev = ddev;
+ rdev->pdev = pdev;
+ rdev->flags = flags;
+ rdev->family = flags & RADEON_FAMILY_MASK;
+ rdev->is_atom_bios = false;
+ rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
+ rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+ rdev->gpu_lockup = false;
+ /* mutex initialization are all done here so we
+ * can recall function without having locking issues */
+ mutex_init(&rdev->cs_mutex);
+ mutex_init(&rdev->ib_pool.mutex);
+ mutex_init(&rdev->cp.mutex);
+ rwlock_init(&rdev->fence_drv.lock);
+
+ if (radeon_agpmode == -1) {
+ rdev->flags &= ~RADEON_IS_AGP;
+ if (rdev->family > CHIP_RV515 ||
+ rdev->family == CHIP_RV380 ||
+ rdev->family == CHIP_RV410 ||
+ rdev->family == CHIP_R423) {
+ DRM_INFO("Forcing AGP to PCIE mode\n");
+ rdev->flags |= RADEON_IS_PCIE;
+ } else {
+ DRM_INFO("Forcing AGP to PCI mode\n");
+ rdev->flags |= RADEON_IS_PCI;
+ }
+ }
+
+ /* Set asic functions */
+ r = radeon_asic_init(rdev);
+ if (r) {
+ return r;
+ }
+
+ /* Report DMA addressing limitation */
+ r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
+ if (r) {
+ printk(KERN_WARNING "radeon: No suitable DMA available.\n");
+ }
+
+ /* Registers mapping */
+ /* TODO: block userspace mapping of io register */
+ rdev->rmmio_base = drm_get_resource_start(rdev->ddev, 2);
+ rdev->rmmio_size = drm_get_resource_len(rdev->ddev, 2);
+ rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
+ if (rdev->rmmio == NULL) {
+ return -ENOMEM;
+ }
+ DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
+ DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
+
+ /* Setup errata flags */
+ radeon_errata(rdev);
+ /* Initialize scratch registers */
+ radeon_scratch_init(rdev);
+
+ /* TODO: disable VGA need to use VGA request */
+ /* BIOS*/
+ if (!radeon_get_bios(rdev)) {
+ if (ASIC_IS_AVIVO(rdev))
+ return -EINVAL;
+ }
+ if (rdev->is_atom_bios) {
+ r = radeon_atombios_init(rdev);
+ if (r) {
+ return r;
+ }
+ } else {
+ r = radeon_combios_init(rdev);
+ if (r) {
+ return r;
+ }
+ }
+ /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+ if (radeon_gpu_reset(rdev)) {
+ /* FIXME: what do we want to do here ? */
+ }
+ /* check if cards are posted or not */
+ if (!radeon_card_posted(rdev) && rdev->bios) {
+ DRM_INFO("GPU not posted. posting now...\n");
+ if (rdev->is_atom_bios) {
+ atom_asic_init(rdev->mode_info.atom_context);
+ } else {
+ radeon_combios_asic_init(rdev->ddev);
+ }
+ }
+ /* Get vram informations */
+ radeon_vram_info(rdev);
+ /* Device is severly broken if aper size > vram size.
+ * for RN50/M6/M7 - Novell bug 204882 ?
+ */
+ if (rdev->mc.vram_size < rdev->mc.aper_size) {
+ rdev->mc.aper_size = rdev->mc.vram_size;
+ }
+ /* Add an MTRR for the VRAM */
+ rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
+ MTRR_TYPE_WRCOMB, 1);
+ DRM_INFO("Detected VRAM RAM=%uM, BAR=%uM\n",
+ rdev->mc.vram_size >> 20,
+ (unsigned)rdev->mc.aper_size >> 20);
+ DRM_INFO("RAM width %dbits %cDR\n",
+ rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
+ /* Initialize clocks */
+ r = radeon_clocks_init(rdev);
+ if (r) {
+ return r;
+ }
+ /* Initialize memory controller (also test AGP) */
+ r = radeon_mc_init(rdev);
+ if (r) {
+ return r;
+ }
+ /* Fence driver */
+ r = radeon_fence_driver_init(rdev);
+ if (r) {
+ return r;
+ }
+ r = radeon_irq_kms_init(rdev);
+ if (r) {
+ return r;
+ }
+ /* Memory manager */
+ r = radeon_object_init(rdev);
+ if (r) {
+ return r;
+ }
+ /* Initialize GART (initialize after TTM so we can allocate
+ * memory through TTM but finalize after TTM) */
+ r = radeon_gart_enable(rdev);
+ if (!r) {
+ r = radeon_gem_init(rdev);
+ }
+
+ /* 1M ring buffer */
+ if (!r) {
+ r = radeon_cp_init(rdev, 1024 * 1024);
+ }
+ if (!r) {
+ r = radeon_wb_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failled initializing WB (%d).\n", r);
+ return r;
+ }
+ }
+ if (!r) {
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
+ return r;
+ }
+ }
+ if (!r) {
+ r = radeon_ib_test(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+ return r;
+ }
+ }
+ ret = r;
+ r = radeon_modeset_init(rdev);
+ if (r) {
+ return r;
+ }
+ if (rdev->fbdev_rfb && rdev->fbdev_rfb->obj) {
+ rdev->fbdev_robj = rdev->fbdev_rfb->obj->driver_private;
+ }
+ if (!ret) {
+ DRM_INFO("radeon: kernel modesetting successfully initialized.\n");
+ }
+ if (radeon_benchmarking) {
+ radeon_benchmark(rdev);
+ }
+ return ret;
+}
+
+void radeon_device_fini(struct radeon_device *rdev)
+{
+ if (rdev == NULL || rdev->rmmio == NULL) {
+ return;
+ }
+ DRM_INFO("radeon: finishing device.\n");
+ rdev->shutdown = true;
+ /* Order matter so becarefull if you rearrange anythings */
+ radeon_modeset_fini(rdev);
+ radeon_ib_pool_fini(rdev);
+ radeon_cp_fini(rdev);
+ radeon_wb_fini(rdev);
+ radeon_gem_fini(rdev);
+ radeon_object_fini(rdev);
+ /* mc_fini must be after object_fini */
+ radeon_mc_fini(rdev);
+#if __OS_HAS_AGP
+ radeon_agp_fini(rdev);
+#endif
+ radeon_irq_kms_fini(rdev);
+ radeon_fence_driver_fini(rdev);
+ radeon_clocks_fini(rdev);
+ if (rdev->is_atom_bios) {
+ radeon_atombios_fini(rdev);
+ } else {
+ radeon_combios_fini(rdev);
+ }
+ kfree(rdev->bios);
+ rdev->bios = NULL;
+ iounmap(rdev->rmmio);
+ rdev->rmmio = NULL;
+}
+
+
+/*
+ * Suspend & resume.
+ */
+int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_crtc *crtc;
+
+ if (dev == NULL || rdev == NULL) {
+ return -ENODEV;
+ }
+ if (state.event == PM_EVENT_PRETHAW) {
+ return 0;
+ }
+ /* unpin the front buffers */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
+ struct radeon_object *robj;
+
+ if (rfb == NULL || rfb->obj == NULL) {
+ continue;
+ }
+ robj = rfb->obj->driver_private;
+ if (robj != rdev->fbdev_robj) {
+ radeon_object_unpin(robj);
+ }
+ }
+ /* evict vram memory */
+ radeon_object_evict_vram(rdev);
+ /* wait for gpu to finish processing current batch */
+ radeon_fence_wait_last(rdev);
+
+ radeon_cp_disable(rdev);
+ radeon_gart_disable(rdev);
+
+ /* evict remaining vram memory */
+ radeon_object_evict_vram(rdev);
+
+ rdev->irq.sw_int = false;
+ radeon_irq_set(rdev);
+
+ pci_save_state(dev->pdev);
+ if (state.event == PM_EVENT_SUSPEND) {
+ /* Shut down the device */
+ pci_disable_device(dev->pdev);
+ pci_set_power_state(dev->pdev, PCI_D3hot);
+ }
+ acquire_console_sem();
+ fb_set_suspend(rdev->fbdev_info, 1);
+ release_console_sem();
+ return 0;
+}
+
+int radeon_resume_kms(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ int r;
+
+ acquire_console_sem();
+ pci_set_power_state(dev->pdev, PCI_D0);
+ pci_restore_state(dev->pdev);
+ if (pci_enable_device(dev->pdev)) {
+ release_console_sem();
+ return -1;
+ }
+ pci_set_master(dev->pdev);
+ /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+ if (radeon_gpu_reset(rdev)) {
+ /* FIXME: what do we want to do here ? */
+ }
+ /* post card */
+ if (rdev->is_atom_bios) {
+ atom_asic_init(rdev->mode_info.atom_context);
+ } else {
+ radeon_combios_asic_init(rdev->ddev);
+ }
+ /* Initialize clocks */
+ r = radeon_clocks_init(rdev);
+ if (r) {
+ release_console_sem();
+ return r;
+ }
+ /* Enable IRQ */
+ rdev->irq.sw_int = true;
+ radeon_irq_set(rdev);
+ /* Initialize GPU Memory Controller */
+ r = radeon_mc_init(rdev);
+ if (r) {
+ goto out;
+ }
+ r = radeon_gart_enable(rdev);
+ if (r) {
+ goto out;
+ }
+ r = radeon_cp_init(rdev, rdev->cp.ring_size);
+ if (r) {
+ goto out;
+ }
+out:
+ fb_set_suspend(rdev->fbdev_info, 0);
+ release_console_sem();
+
+ /* blat the mode back in */
+ drm_helper_resume_force_mode(dev);
+ return 0;
+}
+
+
+/*
+ * Debugfs
+ */
+struct radeon_debugfs {
+ struct drm_info_list *files;
+ unsigned num_files;
+};
+static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_NUM_FILES];
+static unsigned _radeon_debugfs_count = 0;
+
+int radeon_debugfs_add_files(struct radeon_device *rdev,
+ struct drm_info_list *files,
+ unsigned nfiles)
+{
+ unsigned i;
+
+ for (i = 0; i < _radeon_debugfs_count; i++) {
+ if (_radeon_debugfs[i].files == files) {
+ /* Already registered */
+ return 0;
+ }
+ }
+ if ((_radeon_debugfs_count + nfiles) > RADEON_DEBUGFS_MAX_NUM_FILES) {
+ DRM_ERROR("Reached maximum number of debugfs files.\n");
+ DRM_ERROR("Report so we increase RADEON_DEBUGFS_MAX_NUM_FILES.\n");
+ return -EINVAL;
+ }
+ _radeon_debugfs[_radeon_debugfs_count].files = files;
+ _radeon_debugfs[_radeon_debugfs_count].num_files = nfiles;
+ _radeon_debugfs_count++;
+#if defined(CONFIG_DEBUG_FS)
+ drm_debugfs_create_files(files, nfiles,
+ rdev->ddev->control->debugfs_root,
+ rdev->ddev->control);
+ drm_debugfs_create_files(files, nfiles,
+ rdev->ddev->primary->debugfs_root,
+ rdev->ddev->primary);
+#endif
+ return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+int radeon_debugfs_init(struct drm_minor *minor)
+{
+ return 0;
+}
+
+void radeon_debugfs_cleanup(struct drm_minor *minor)
+{
+ unsigned i;
+
+ for (i = 0; i < _radeon_debugfs_count; i++) {
+ drm_debugfs_remove_files(_radeon_debugfs[i].files,
+ _radeon_debugfs[i].num_files, minor);
+ }
+}
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
new file mode 100644
index 00000000000..5452bb9d925
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -0,0 +1,692 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ */
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+
+#include "atom.h"
+#include <asm/div64.h>
+
+#include "drm_crtc_helper.h"
+#include "drm_edid.h"
+
+static int radeon_ddc_dump(struct drm_connector *connector);
+
+static void avivo_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int i;
+
+ DRM_DEBUG("%d\n", radeon_crtc->crtc_id);
+ WREG32(AVIVO_DC_LUTA_CONTROL + radeon_crtc->crtc_offset, 0);
+
+ WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
+ WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
+ WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
+
+ WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
+ WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
+ WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
+
+ WREG32(AVIVO_DC_LUT_RW_SELECT, radeon_crtc->crtc_id);
+ WREG32(AVIVO_DC_LUT_RW_MODE, 0);
+ WREG32(AVIVO_DC_LUT_WRITE_EN_MASK, 0x0000003f);
+
+ WREG8(AVIVO_DC_LUT_RW_INDEX, 0);
+ for (i = 0; i < 256; i++) {
+ WREG32(AVIVO_DC_LUT_30_COLOR,
+ (radeon_crtc->lut_r[i] << 20) |
+ (radeon_crtc->lut_g[i] << 10) |
+ (radeon_crtc->lut_b[i] << 0));
+ }
+
+ WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id);
+}
+
+static void legacy_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int i;
+ uint32_t dac2_cntl;
+
+ dac2_cntl = RREG32(RADEON_DAC_CNTL2);
+ if (radeon_crtc->crtc_id == 0)
+ dac2_cntl &= (uint32_t)~RADEON_DAC2_PALETTE_ACC_CTL;
+ else
+ dac2_cntl |= RADEON_DAC2_PALETTE_ACC_CTL;
+ WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+
+ WREG8(RADEON_PALETTE_INDEX, 0);
+ for (i = 0; i < 256; i++) {
+ WREG32(RADEON_PALETTE_30_DATA,
+ (radeon_crtc->lut_r[i] << 20) |
+ (radeon_crtc->lut_g[i] << 10) |
+ (radeon_crtc->lut_b[i] << 0));
+ }
+}
+
+void radeon_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (!crtc->enabled)
+ return;
+
+ if (ASIC_IS_AVIVO(rdev))
+ avivo_crtc_load_lut(crtc);
+ else
+ legacy_crtc_load_lut(crtc);
+}
+
+/** Sets the color ramps on behalf of RandR */
+void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+ if (regno == 0)
+ DRM_DEBUG("gamma set %d\n", radeon_crtc->crtc_id);
+ radeon_crtc->lut_r[regno] = red >> 6;
+ radeon_crtc->lut_g[regno] = green >> 6;
+ radeon_crtc->lut_b[regno] = blue >> 6;
+}
+
+static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ int i, j;
+
+ if (size != 256) {
+ return;
+ }
+ if (crtc->fb == NULL) {
+ return;
+ }
+
+ if (crtc->fb->depth == 16) {
+ for (i = 0; i < 64; i++) {
+ if (i <= 31) {
+ for (j = 0; j < 8; j++) {
+ radeon_crtc->lut_r[i * 8 + j] = red[i] >> 6;
+ radeon_crtc->lut_b[i * 8 + j] = blue[i] >> 6;
+ }
+ }
+ for (j = 0; j < 4; j++)
+ radeon_crtc->lut_g[i * 4 + j] = green[i] >> 6;
+ }
+ } else {
+ for (i = 0; i < 256; i++) {
+ radeon_crtc->lut_r[i] = red[i] >> 6;
+ radeon_crtc->lut_g[i] = green[i] >> 6;
+ radeon_crtc->lut_b[i] = blue[i] >> 6;
+ }
+ }
+
+ radeon_crtc_load_lut(crtc);
+}
+
+static void radeon_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+ if (radeon_crtc->mode_set.mode) {
+ drm_mode_destroy(crtc->dev, radeon_crtc->mode_set.mode);
+ }
+ drm_crtc_cleanup(crtc);
+ kfree(radeon_crtc);
+}
+
+static const struct drm_crtc_funcs radeon_crtc_funcs = {
+ .cursor_set = radeon_crtc_cursor_set,
+ .cursor_move = radeon_crtc_cursor_move,
+ .gamma_set = radeon_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = radeon_crtc_destroy,
+};
+
+static void radeon_crtc_init(struct drm_device *dev, int index)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc;
+ int i;
+
+ radeon_crtc = kzalloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
+ if (radeon_crtc == NULL)
+ return;
+
+ drm_crtc_init(dev, &radeon_crtc->base, &radeon_crtc_funcs);
+
+ drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256);
+ radeon_crtc->crtc_id = index;
+
+ radeon_crtc->mode_set.crtc = &radeon_crtc->base;
+ radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1);
+ radeon_crtc->mode_set.num_connectors = 0;
+
+ for (i = 0; i < 256; i++) {
+ radeon_crtc->lut_r[i] = i << 2;
+ radeon_crtc->lut_g[i] = i << 2;
+ radeon_crtc->lut_b[i] = i << 2;
+ }
+
+ if (rdev->is_atom_bios && (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom))
+ radeon_atombios_init_crtc(dev, radeon_crtc);
+ else
+ radeon_legacy_init_crtc(dev, radeon_crtc);
+}
+
+static const char *encoder_names[34] = {
+ "NONE",
+ "INTERNAL_LVDS",
+ "INTERNAL_TMDS1",
+ "INTERNAL_TMDS2",
+ "INTERNAL_DAC1",
+ "INTERNAL_DAC2",
+ "INTERNAL_SDVOA",
+ "INTERNAL_SDVOB",
+ "SI170B",
+ "CH7303",
+ "CH7301",
+ "INTERNAL_DVO1",
+ "EXTERNAL_SDVOA",
+ "EXTERNAL_SDVOB",
+ "TITFP513",
+ "INTERNAL_LVTM1",
+ "VT1623",
+ "HDMI_SI1930",
+ "HDMI_INTERNAL",
+ "INTERNAL_KLDSCP_TMDS1",
+ "INTERNAL_KLDSCP_DVO1",
+ "INTERNAL_KLDSCP_DAC1",
+ "INTERNAL_KLDSCP_DAC2",
+ "SI178",
+ "MVPU_FPGA",
+ "INTERNAL_DDI",
+ "VT1625",
+ "HDMI_SI1932",
+ "DP_AN9801",
+ "DP_DP501",
+ "INTERNAL_UNIPHY",
+ "INTERNAL_KLDSCP_LVTMA",
+ "INTERNAL_UNIPHY1",
+ "INTERNAL_UNIPHY2",
+};
+
+static const char *connector_names[13] = {
+ "Unknown",
+ "VGA",
+ "DVI-I",
+ "DVI-D",
+ "DVI-A",
+ "Composite",
+ "S-video",
+ "LVDS",
+ "Component",
+ "DIN",
+ "DisplayPort",
+ "HDMI-A",
+ "HDMI-B",
+};
+
+static void radeon_print_display_setup(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
+ struct drm_encoder *encoder;
+ struct radeon_encoder *radeon_encoder;
+ uint32_t devices;
+ int i = 0;
+
+ DRM_INFO("Radeon Display Connectors\n");
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ radeon_connector = to_radeon_connector(connector);
+ DRM_INFO("Connector %d:\n", i);
+ DRM_INFO(" %s\n", connector_names[connector->connector_type]);
+ if (radeon_connector->ddc_bus)
+ DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ radeon_connector->ddc_bus->rec.mask_clk_reg,
+ radeon_connector->ddc_bus->rec.mask_data_reg,
+ radeon_connector->ddc_bus->rec.a_clk_reg,
+ radeon_connector->ddc_bus->rec.a_data_reg,
+ radeon_connector->ddc_bus->rec.put_clk_reg,
+ radeon_connector->ddc_bus->rec.put_data_reg,
+ radeon_connector->ddc_bus->rec.get_clk_reg,
+ radeon_connector->ddc_bus->rec.get_data_reg);
+ DRM_INFO(" Encoders:\n");
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ radeon_encoder = to_radeon_encoder(encoder);
+ devices = radeon_encoder->devices & radeon_connector->devices;
+ if (devices) {
+ if (devices & ATOM_DEVICE_CRT1_SUPPORT)
+ DRM_INFO(" CRT1: %s\n", encoder_names[radeon_encoder->encoder_id]);
+ if (devices & ATOM_DEVICE_CRT2_SUPPORT)
+ DRM_INFO(" CRT2: %s\n", encoder_names[radeon_encoder->encoder_id]);
+ if (devices & ATOM_DEVICE_LCD1_SUPPORT)
+ DRM_INFO(" LCD1: %s\n", encoder_names[radeon_encoder->encoder_id]);
+ if (devices & ATOM_DEVICE_DFP1_SUPPORT)
+ DRM_INFO(" DFP1: %s\n", encoder_names[radeon_encoder->encoder_id]);
+ if (devices & ATOM_DEVICE_DFP2_SUPPORT)
+ DRM_INFO(" DFP2: %s\n", encoder_names[radeon_encoder->encoder_id]);
+ if (devices & ATOM_DEVICE_DFP3_SUPPORT)
+ DRM_INFO(" DFP3: %s\n", encoder_names[radeon_encoder->encoder_id]);
+ if (devices & ATOM_DEVICE_DFP4_SUPPORT)
+ DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]);
+ if (devices & ATOM_DEVICE_DFP5_SUPPORT)
+ DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]);
+ if (devices & ATOM_DEVICE_TV1_SUPPORT)
+ DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]);
+ if (devices & ATOM_DEVICE_CV_SUPPORT)
+ DRM_INFO(" CV: %s\n", encoder_names[radeon_encoder->encoder_id]);
+ }
+ }
+ i++;
+ }
+}
+
+bool radeon_setup_enc_conn(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_connector *drm_connector;
+ bool ret = false;
+
+ if (rdev->bios) {
+ if (rdev->is_atom_bios) {
+ if (rdev->family >= CHIP_R600)
+ ret = radeon_get_atom_connector_info_from_object_table(dev);
+ else
+ ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
+ } else
+ ret = radeon_get_legacy_connector_info_from_bios(dev);
+ } else {
+ if (!ASIC_IS_AVIVO(rdev))
+ ret = radeon_get_legacy_connector_info_from_table(dev);
+ }
+ if (ret) {
+ radeon_print_display_setup(dev);
+ list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head)
+ radeon_ddc_dump(drm_connector);
+ }
+
+ return ret;
+}
+
+int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
+{
+ struct edid *edid;
+ int ret = 0;
+
+ if (!radeon_connector->ddc_bus)
+ return -1;
+ radeon_i2c_do_lock(radeon_connector, 1);
+ edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
+ radeon_i2c_do_lock(radeon_connector, 0);
+ if (edid) {
+ /* update digital bits here */
+ if (edid->digital)
+ radeon_connector->use_digital = 1;
+ else
+ radeon_connector->use_digital = 0;
+ drm_mode_connector_update_edid_property(&radeon_connector->base, edid);
+ ret = drm_add_edid_modes(&radeon_connector->base, edid);
+ kfree(edid);
+ return ret;
+ }
+ drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
+ return -1;
+}
+
+static int radeon_ddc_dump(struct drm_connector *connector)
+{
+ struct edid *edid;
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ int ret = 0;
+
+ if (!radeon_connector->ddc_bus)
+ return -1;
+ radeon_i2c_do_lock(radeon_connector, 1);
+ edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
+ radeon_i2c_do_lock(radeon_connector, 0);
+ if (edid) {
+ kfree(edid);
+ }
+ return ret;
+}
+
+static inline uint32_t radeon_div(uint64_t n, uint32_t d)
+{
+ uint64_t mod;
+
+ n += d / 2;
+
+ mod = do_div(n, d);
+ return n;
+}
+
+void radeon_compute_pll(struct radeon_pll *pll,
+ uint64_t freq,
+ uint32_t *dot_clock_p,
+ uint32_t *fb_div_p,
+ uint32_t *frac_fb_div_p,
+ uint32_t *ref_div_p,
+ uint32_t *post_div_p,
+ int flags)
+{
+ uint32_t min_ref_div = pll->min_ref_div;
+ uint32_t max_ref_div = pll->max_ref_div;
+ uint32_t min_fractional_feed_div = 0;
+ uint32_t max_fractional_feed_div = 0;
+ uint32_t best_vco = pll->best_vco;
+ uint32_t best_post_div = 1;
+ uint32_t best_ref_div = 1;
+ uint32_t best_feedback_div = 1;
+ uint32_t best_frac_feedback_div = 0;
+ uint32_t best_freq = -1;
+ uint32_t best_error = 0xffffffff;
+ uint32_t best_vco_diff = 1;
+ uint32_t post_div;
+
+ DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
+ freq = freq * 1000;
+
+ if (flags & RADEON_PLL_USE_REF_DIV)
+ min_ref_div = max_ref_div = pll->reference_div;
+ else {
+ while (min_ref_div < max_ref_div-1) {
+ uint32_t mid = (min_ref_div + max_ref_div) / 2;
+ uint32_t pll_in = pll->reference_freq / mid;
+ if (pll_in < pll->pll_in_min)
+ max_ref_div = mid;
+ else if (pll_in > pll->pll_in_max)
+ min_ref_div = mid;
+ else
+ break;
+ }
+ }
+
+ if (flags & RADEON_PLL_USE_FRAC_FB_DIV) {
+ min_fractional_feed_div = pll->min_frac_feedback_div;
+ max_fractional_feed_div = pll->max_frac_feedback_div;
+ }
+
+ for (post_div = pll->min_post_div; post_div <= pll->max_post_div; ++post_div) {
+ uint32_t ref_div;
+
+ if ((flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
+ continue;
+
+ /* legacy radeons only have a few post_divs */
+ if (flags & RADEON_PLL_LEGACY) {
+ if ((post_div == 5) ||
+ (post_div == 7) ||
+ (post_div == 9) ||
+ (post_div == 10) ||
+ (post_div == 11) ||
+ (post_div == 13) ||
+ (post_div == 14) ||
+ (post_div == 15))
+ continue;
+ }
+
+ for (ref_div = min_ref_div; ref_div <= max_ref_div; ++ref_div) {
+ uint32_t feedback_div, current_freq = 0, error, vco_diff;
+ uint32_t pll_in = pll->reference_freq / ref_div;
+ uint32_t min_feed_div = pll->min_feedback_div;
+ uint32_t max_feed_div = pll->max_feedback_div + 1;
+
+ if (pll_in < pll->pll_in_min || pll_in > pll->pll_in_max)
+ continue;
+
+ while (min_feed_div < max_feed_div) {
+ uint32_t vco;
+ uint32_t min_frac_feed_div = min_fractional_feed_div;
+ uint32_t max_frac_feed_div = max_fractional_feed_div + 1;
+ uint32_t frac_feedback_div;
+ uint64_t tmp;
+
+ feedback_div = (min_feed_div + max_feed_div) / 2;
+
+ tmp = (uint64_t)pll->reference_freq * feedback_div;
+ vco = radeon_div(tmp, ref_div);
+
+ if (vco < pll->pll_out_min) {
+ min_feed_div = feedback_div + 1;
+ continue;
+ } else if (vco > pll->pll_out_max) {
+ max_feed_div = feedback_div;
+ continue;
+ }
+
+ while (min_frac_feed_div < max_frac_feed_div) {
+ frac_feedback_div = (min_frac_feed_div + max_frac_feed_div) / 2;
+ tmp = (uint64_t)pll->reference_freq * 10000 * feedback_div;
+ tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div;
+ current_freq = radeon_div(tmp, ref_div * post_div);
+
+ error = abs(current_freq - freq);
+ vco_diff = abs(vco - best_vco);
+
+ if ((best_vco == 0 && error < best_error) ||
+ (best_vco != 0 &&
+ (error < best_error - 100 ||
+ (abs(error - best_error) < 100 && vco_diff < best_vco_diff)))) {
+ best_post_div = post_div;
+ best_ref_div = ref_div;
+ best_feedback_div = feedback_div;
+ best_frac_feedback_div = frac_feedback_div;
+ best_freq = current_freq;
+ best_error = error;
+ best_vco_diff = vco_diff;
+ } else if (current_freq == freq) {
+ if (best_freq == -1) {
+ best_post_div = post_div;
+ best_ref_div = ref_div;
+ best_feedback_div = feedback_div;
+ best_frac_feedback_div = frac_feedback_div;
+ best_freq = current_freq;
+ best_error = error;
+ best_vco_diff = vco_diff;
+ } else if (((flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) ||
+ ((flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) ||
+ ((flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) ||
+ ((flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) ||
+ ((flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) ||
+ ((flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) {
+ best_post_div = post_div;
+ best_ref_div = ref_div;
+ best_feedback_div = feedback_div;
+ best_frac_feedback_div = frac_feedback_div;
+ best_freq = current_freq;
+ best_error = error;
+ best_vco_diff = vco_diff;
+ }
+ }
+ if (current_freq < freq)
+ min_frac_feed_div = frac_feedback_div + 1;
+ else
+ max_frac_feed_div = frac_feedback_div;
+ }
+ if (current_freq < freq)
+ min_feed_div = feedback_div + 1;
+ else
+ max_feed_div = feedback_div;
+ }
+ }
+ }
+
+ *dot_clock_p = best_freq / 10000;
+ *fb_div_p = best_feedback_div;
+ *frac_fb_div_p = best_frac_feedback_div;
+ *ref_div_p = best_ref_div;
+ *post_div_p = best_post_div;
+}
+
+static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
+ struct drm_device *dev = fb->dev;
+
+ if (fb->fbdev)
+ radeonfb_remove(dev, fb);
+
+ if (radeon_fb->obj) {
+ radeon_gem_object_unpin(radeon_fb->obj);
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(radeon_fb->obj);
+ mutex_unlock(&dev->struct_mutex);
+ }
+ drm_framebuffer_cleanup(fb);
+ kfree(radeon_fb);
+}
+
+static int radeon_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
+
+ return drm_gem_handle_create(file_priv, radeon_fb->obj, handle);
+}
+
+static const struct drm_framebuffer_funcs radeon_fb_funcs = {
+ .destroy = radeon_user_framebuffer_destroy,
+ .create_handle = radeon_user_framebuffer_create_handle,
+};
+
+struct drm_framebuffer *
+radeon_framebuffer_create(struct drm_device *dev,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ struct radeon_framebuffer *radeon_fb;
+
+ radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
+ if (radeon_fb == NULL) {
+ return NULL;
+ }
+ drm_framebuffer_init(dev, &radeon_fb->base, &radeon_fb_funcs);
+ drm_helper_mode_fill_fb_struct(&radeon_fb->base, mode_cmd);
+ radeon_fb->obj = obj;
+ return &radeon_fb->base;
+}
+
+static struct drm_framebuffer *
+radeon_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_mode_fb_cmd *mode_cmd)
+{
+ struct drm_gem_object *obj;
+
+ obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
+
+ return radeon_framebuffer_create(dev, mode_cmd, obj);
+}
+
+static const struct drm_mode_config_funcs radeon_mode_funcs = {
+ .fb_create = radeon_user_framebuffer_create,
+ .fb_changed = radeonfb_probe,
+};
+
+int radeon_modeset_init(struct radeon_device *rdev)
+{
+ int num_crtc = 2, i;
+ int ret;
+
+ drm_mode_config_init(rdev->ddev);
+ rdev->mode_info.mode_config_initialized = true;
+
+ rdev->ddev->mode_config.funcs = (void *)&radeon_mode_funcs;
+
+ if (ASIC_IS_AVIVO(rdev)) {
+ rdev->ddev->mode_config.max_width = 8192;
+ rdev->ddev->mode_config.max_height = 8192;
+ } else {
+ rdev->ddev->mode_config.max_width = 4096;
+ rdev->ddev->mode_config.max_height = 4096;
+ }
+
+ rdev->ddev->mode_config.fb_base = rdev->mc.aper_base;
+
+ /* allocate crtcs - TODO single crtc */
+ for (i = 0; i < num_crtc; i++) {
+ radeon_crtc_init(rdev->ddev, i);
+ }
+
+ /* okay we should have all the bios connectors */
+ ret = radeon_setup_enc_conn(rdev->ddev);
+ if (!ret) {
+ return ret;
+ }
+ drm_helper_initial_config(rdev->ddev);
+ return 0;
+}
+
+void radeon_modeset_fini(struct radeon_device *rdev)
+{
+ if (rdev->mode_info.mode_config_initialized) {
+ drm_mode_config_cleanup(rdev->ddev);
+ rdev->mode_info.mode_config_initialized = false;
+ }
+}
+
+void radeon_init_disp_bandwidth(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_display_mode *modes[2];
+ int pixel_bytes[2];
+ struct drm_crtc *crtc;
+
+ pixel_bytes[0] = pixel_bytes[1] = 0;
+ modes[0] = modes[1] = NULL;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+ if (crtc->enabled && crtc->fb) {
+ modes[radeon_crtc->crtc_id] = &crtc->mode;
+ pixel_bytes[radeon_crtc->crtc_id] = crtc->fb->bits_per_pixel / 8;
+ }
+ }
+
+ if (ASIC_IS_AVIVO(rdev)) {
+ radeon_init_disp_bw_avivo(dev,
+ modes[0],
+ pixel_bytes[0],
+ modes[1],
+ pixel_bytes[1]);
+ } else {
+ radeon_init_disp_bw_legacy(dev,
+ modes[0],
+ pixel_bytes[0],
+ modes[1],
+ pixel_bytes[1]);
+ }
+}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 13a60f4d422..c815a2cbf7b 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -35,12 +35,92 @@
#include "radeon_drv.h"
#include "drm_pciids.h"
+#include <linux/console.h>
+
+
+#if defined(CONFIG_DRM_RADEON_KMS)
+/*
+ * KMS wrapper.
+ */
+#define KMS_DRIVER_MAJOR 2
+#define KMS_DRIVER_MINOR 0
+#define KMS_DRIVER_PATCHLEVEL 0
+int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
+int radeon_driver_unload_kms(struct drm_device *dev);
+int radeon_driver_firstopen_kms(struct drm_device *dev);
+void radeon_driver_lastclose_kms(struct drm_device *dev);
+int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
+void radeon_driver_postclose_kms(struct drm_device *dev,
+ struct drm_file *file_priv);
+void radeon_driver_preclose_kms(struct drm_device *dev,
+ struct drm_file *file_priv);
+int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
+int radeon_resume_kms(struct drm_device *dev);
+u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc);
+int radeon_enable_vblank_kms(struct drm_device *dev, int crtc);
+void radeon_disable_vblank_kms(struct drm_device *dev, int crtc);
+void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
+int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
+void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
+irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
+int radeon_master_create_kms(struct drm_device *dev, struct drm_master *master);
+void radeon_master_destroy_kms(struct drm_device *dev,
+ struct drm_master *master);
+int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int radeon_gem_object_init(struct drm_gem_object *obj);
+void radeon_gem_object_free(struct drm_gem_object *obj);
+extern struct drm_ioctl_desc radeon_ioctls_kms[];
+extern int radeon_max_kms_ioctl;
+int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
+#if defined(CONFIG_DEBUG_FS)
+int radeon_debugfs_init(struct drm_minor *minor);
+void radeon_debugfs_cleanup(struct drm_minor *minor);
+#endif
+#endif
+
int radeon_no_wb;
+#if defined(CONFIG_DRM_RADEON_KMS)
+int radeon_modeset = -1;
+int radeon_dynclks = -1;
+int radeon_r4xx_atom = 0;
+int radeon_agpmode = 0;
+int radeon_vram_limit = 0;
+int radeon_gart_size = 512; /* default gart size */
+int radeon_benchmarking = 0;
+int radeon_connector_table = 0;
+#endif
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
+#if defined(CONFIG_DRM_RADEON_KMS)
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, radeon_modeset, int, 0400);
+
+MODULE_PARM_DESC(dynclks, "Disable/Enable dynamic clocks");
+module_param_named(dynclks, radeon_dynclks, int, 0444);
+
+MODULE_PARM_DESC(r4xx_atom, "Enable ATOMBIOS modesetting for R4xx");
+module_param_named(r4xx_atom, radeon_r4xx_atom, int, 0444);
+
+MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing");
+module_param_named(vramlimit, radeon_vram_limit, int, 0600);
+
+MODULE_PARM_DESC(agpmode, "AGP Mode (-1 == PCI)");
+module_param_named(agpmode, radeon_agpmode, int, 0444);
+
+MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32,64, etc)\n");
+module_param_named(gartsize, radeon_gart_size, int, 0600);
+
+MODULE_PARM_DESC(benchmark, "Run benchmark");
+module_param_named(benchmark, radeon_benchmarking, int, 0444);
+
+MODULE_PARM_DESC(connector_table, "Force connector table");
+module_param_named(connector_table, radeon_connector_table, int, 0444);
+#endif
+
static int radeon_suspend(struct drm_device *dev, pm_message_t state)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -73,7 +153,11 @@ static struct pci_device_id pciidlist[] = {
radeon_PCI_IDS
};
-static struct drm_driver driver = {
+#if defined(CONFIG_DRM_RADEON_KMS)
+MODULE_DEVICE_TABLE(pci, pciidlist);
+#endif
+
+static struct drm_driver driver_old = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED,
@@ -127,18 +211,141 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+#if defined(CONFIG_DRM_RADEON_KMS)
+static struct drm_driver kms_driver;
+
+static int __devinit
+radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ return drm_get_dev(pdev, ent, &kms_driver);
+}
+
+static void
+radeon_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_put_dev(dev);
+}
+
+static int
+radeon_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ return radeon_suspend_kms(dev, state);
+}
+
+static int
+radeon_pci_resume(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ return radeon_resume_kms(dev);
+}
+
+static struct drm_driver kms_driver = {
+ .driver_features =
+ DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
+ DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_GEM,
+ .dev_priv_size = 0,
+ .load = radeon_driver_load_kms,
+ .firstopen = radeon_driver_firstopen_kms,
+ .open = radeon_driver_open_kms,
+ .preclose = radeon_driver_preclose_kms,
+ .postclose = radeon_driver_postclose_kms,
+ .lastclose = radeon_driver_lastclose_kms,
+ .unload = radeon_driver_unload_kms,
+ .suspend = radeon_suspend_kms,
+ .resume = radeon_resume_kms,
+ .get_vblank_counter = radeon_get_vblank_counter_kms,
+ .enable_vblank = radeon_enable_vblank_kms,
+ .disable_vblank = radeon_disable_vblank_kms,
+ .master_create = radeon_master_create_kms,
+ .master_destroy = radeon_master_destroy_kms,
+#if defined(CONFIG_DEBUG_FS)
+ .debugfs_init = radeon_debugfs_init,
+ .debugfs_cleanup = radeon_debugfs_cleanup,
+#endif
+ .irq_preinstall = radeon_driver_irq_preinstall_kms,
+ .irq_postinstall = radeon_driver_irq_postinstall_kms,
+ .irq_uninstall = radeon_driver_irq_uninstall_kms,
+ .irq_handler = radeon_driver_irq_handler_kms,
+ .reclaim_buffers = drm_core_reclaim_buffers,
+ .get_map_ofs = drm_core_get_map_ofs,
+ .get_reg_ofs = drm_core_get_reg_ofs,
+ .ioctls = radeon_ioctls_kms,
+ .gem_init_object = radeon_gem_object_init,
+ .gem_free_object = radeon_gem_object_free,
+ .dma_ioctl = radeon_dma_ioctl_kms,
+ .fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .ioctl = drm_ioctl,
+ .mmap = radeon_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = NULL,
+#endif
+ },
+
+ .pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = radeon_pci_probe,
+ .remove = radeon_pci_remove,
+ .suspend = radeon_pci_suspend,
+ .resume = radeon_pci_resume,
+ },
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = KMS_DRIVER_MAJOR,
+ .minor = KMS_DRIVER_MINOR,
+ .patchlevel = KMS_DRIVER_PATCHLEVEL,
+};
+#endif
+
+static struct drm_driver *driver;
+
static int __init radeon_init(void)
{
- driver.num_ioctls = radeon_max_ioctl;
- return drm_init(&driver);
+ driver = &driver_old;
+ driver->num_ioctls = radeon_max_ioctl;
+#if defined(CONFIG_DRM_RADEON_KMS) && defined(CONFIG_X86)
+ /* if enabled by default */
+ if (radeon_modeset == -1) {
+ DRM_INFO("radeon default to kernel modesetting.\n");
+ radeon_modeset = 1;
+ }
+ if (radeon_modeset == 1) {
+ DRM_INFO("radeon kernel modesetting enabled.\n");
+ driver = &kms_driver;
+ driver->driver_features |= DRIVER_MODESET;
+ driver->num_ioctls = radeon_max_kms_ioctl;
+ }
+
+ /* if the vga console setting is enabled still
+ * let modprobe override it */
+#ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force() && radeon_modeset == -1) {
+ DRM_INFO("VGACON disable radeon kernel modesetting.\n");
+ driver = &driver_old;
+ driver->driver_features &= ~DRIVER_MODESET;
+ radeon_modeset = 0;
+ }
+#endif
+#endif
+ return drm_init(driver);
}
static void __exit radeon_exit(void)
{
- drm_exit(&driver);
+ drm_exit(driver);
}
-module_init(radeon_init);
+late_initcall(radeon_init);
module_exit(radeon_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 8071d965f14..127d0456f62 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -146,6 +146,7 @@ enum radeon_family {
CHIP_RV770,
CHIP_RV730,
CHIP_RV710,
+ CHIP_RV740,
CHIP_LAST,
};
@@ -1964,11 +1965,14 @@ do { \
#define RING_LOCALS int write, _nr, _align_nr; unsigned int mask; u32 *ring;
+#define RADEON_RING_ALIGN 16
+
#define BEGIN_RING( n ) do { \
if ( RADEON_VERBOSE ) { \
DRM_INFO( "BEGIN_RING( %d )\n", (n)); \
} \
- _align_nr = (n + 0xf) & ~0xf; \
+ _align_nr = RADEON_RING_ALIGN - ((dev_priv->ring.tail + n) & (RADEON_RING_ALIGN-1)); \
+ _align_nr += n; \
if (dev_priv->ring.space <= (_align_nr * sizeof(u32))) { \
COMMIT_RING(); \
radeon_wait_ring( dev_priv, _align_nr * sizeof(u32)); \
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
new file mode 100644
index 00000000000..c8ef0d14ffa
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -0,0 +1,1708 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ */
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+#include "atom.h"
+
+extern int atom_debug;
+
+uint32_t
+radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t ret = 0;
+
+ switch (supported_device) {
+ case ATOM_DEVICE_CRT1_SUPPORT:
+ case ATOM_DEVICE_TV1_SUPPORT:
+ case ATOM_DEVICE_TV2_SUPPORT:
+ case ATOM_DEVICE_CRT2_SUPPORT:
+ case ATOM_DEVICE_CV_SUPPORT:
+ switch (dac) {
+ case 1: /* dac a */
+ if ((rdev->family == CHIP_RS300) ||
+ (rdev->family == CHIP_RS400) ||
+ (rdev->family == CHIP_RS480))
+ ret = ENCODER_OBJECT_ID_INTERNAL_DAC2;
+ else if (ASIC_IS_AVIVO(rdev))
+ ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1;
+ else
+ ret = ENCODER_OBJECT_ID_INTERNAL_DAC1;
+ break;
+ case 2: /* dac b */
+ if (ASIC_IS_AVIVO(rdev))
+ ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2;
+ else {
+ /*if (rdev->family == CHIP_R200)
+ ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
+ else*/
+ ret = ENCODER_OBJECT_ID_INTERNAL_DAC2;
+ }
+ break;
+ case 3: /* external dac */
+ if (ASIC_IS_AVIVO(rdev))
+ ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1;
+ else
+ ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
+ break;
+ }
+ break;
+ case ATOM_DEVICE_LCD1_SUPPORT:
+ if (ASIC_IS_AVIVO(rdev))
+ ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1;
+ else
+ ret = ENCODER_OBJECT_ID_INTERNAL_LVDS;
+ break;
+ case ATOM_DEVICE_DFP1_SUPPORT:
+ if ((rdev->family == CHIP_RS300) ||
+ (rdev->family == CHIP_RS400) ||
+ (rdev->family == CHIP_RS480))
+ ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
+ else if (ASIC_IS_AVIVO(rdev))
+ ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1;
+ else
+ ret = ENCODER_OBJECT_ID_INTERNAL_TMDS1;
+ break;
+ case ATOM_DEVICE_LCD2_SUPPORT:
+ case ATOM_DEVICE_DFP2_SUPPORT:
+ if ((rdev->family == CHIP_RS600) ||
+ (rdev->family == CHIP_RS690) ||
+ (rdev->family == CHIP_RS740))
+ ret = ENCODER_OBJECT_ID_INTERNAL_DDI;
+ else if (ASIC_IS_AVIVO(rdev))
+ ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1;
+ else
+ ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
+ break;
+ case ATOM_DEVICE_DFP3_SUPPORT:
+ ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1;
+ break;
+ }
+
+ return ret;
+}
+
+void
+radeon_link_encoder_connector(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
+ struct drm_encoder *encoder;
+ struct radeon_encoder *radeon_encoder;
+
+ /* walk the list and link encoders to connectors */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ radeon_connector = to_radeon_connector(connector);
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ radeon_encoder = to_radeon_encoder(encoder);
+ if (radeon_encoder->devices & radeon_connector->devices)
+ drm_mode_connector_attach_encoder(connector, encoder);
+ }
+ }
+}
+
+static struct drm_connector *
+radeon_get_connector_for_encoder(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ radeon_connector = to_radeon_connector(connector);
+ if (radeon_encoder->devices & radeon_connector->devices)
+ return connector;
+ }
+ return NULL;
+}
+
+/* used for both atom and legacy */
+void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_native_mode *native_mode = &radeon_encoder->native_mode;
+
+ if (mode->hdisplay < native_mode->panel_xres ||
+ mode->vdisplay < native_mode->panel_yres) {
+ radeon_encoder->flags |= RADEON_USE_RMX;
+ if (ASIC_IS_AVIVO(rdev)) {
+ adjusted_mode->hdisplay = native_mode->panel_xres;
+ adjusted_mode->vdisplay = native_mode->panel_yres;
+ adjusted_mode->htotal = native_mode->panel_xres + native_mode->hblank;
+ adjusted_mode->hsync_start = native_mode->panel_xres + native_mode->hoverplus;
+ adjusted_mode->hsync_end = adjusted_mode->hsync_start + native_mode->hsync_width;
+ adjusted_mode->vtotal = native_mode->panel_yres + native_mode->vblank;
+ adjusted_mode->vsync_start = native_mode->panel_yres + native_mode->voverplus;
+ adjusted_mode->vsync_end = adjusted_mode->vsync_start + native_mode->vsync_width;
+ /* update crtc values */
+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+ /* adjust crtc values */
+ adjusted_mode->crtc_hdisplay = native_mode->panel_xres;
+ adjusted_mode->crtc_vdisplay = native_mode->panel_yres;
+ adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + native_mode->hblank;
+ adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + native_mode->hoverplus;
+ adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + native_mode->hsync_width;
+ adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + native_mode->vblank;
+ adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + native_mode->voverplus;
+ adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + native_mode->vsync_width;
+ } else {
+ adjusted_mode->htotal = native_mode->panel_xres + native_mode->hblank;
+ adjusted_mode->hsync_start = native_mode->panel_xres + native_mode->hoverplus;
+ adjusted_mode->hsync_end = adjusted_mode->hsync_start + native_mode->hsync_width;
+ adjusted_mode->vtotal = native_mode->panel_yres + native_mode->vblank;
+ adjusted_mode->vsync_start = native_mode->panel_yres + native_mode->voverplus;
+ adjusted_mode->vsync_end = adjusted_mode->vsync_start + native_mode->vsync_width;
+ /* update crtc values */
+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+ /* adjust crtc values */
+ adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + native_mode->hblank;
+ adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + native_mode->hoverplus;
+ adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + native_mode->hsync_width;
+ adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + native_mode->vblank;
+ adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + native_mode->voverplus;
+ adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + native_mode->vsync_width;
+ }
+ adjusted_mode->flags = native_mode->flags;
+ adjusted_mode->clock = native_mode->dotclock;
+ }
+}
+
+static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+ radeon_encoder->flags &= ~RADEON_USE_RMX;
+
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+ if (radeon_encoder->rmx_type != RMX_OFF)
+ radeon_rmx_mode_fixup(encoder, mode, adjusted_mode);
+
+ /* hw bug */
+ if ((mode->flags & DRM_MODE_FLAG_INTERLACE)
+ && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
+ adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
+
+ return true;
+}
+
+static void
+atombios_dac_setup(struct drm_encoder *encoder, int action)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ DAC_ENCODER_CONTROL_PS_ALLOCATION args;
+ int index = 0, num = 0;
+ /* fixme - fill in enc_priv for atom dac */
+ enum radeon_tv_std tv_std = TV_STD_NTSC;
+
+ memset(&args, 0, sizeof(args));
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl);
+ num = 1;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl);
+ num = 2;
+ break;
+ }
+
+ args.ucAction = action;
+
+ if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
+ args.ucDacStandard = ATOM_DAC1_PS2;
+ else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
+ args.ucDacStandard = ATOM_DAC1_CV;
+ else {
+ switch (tv_std) {
+ case TV_STD_PAL:
+ case TV_STD_PAL_M:
+ case TV_STD_SCART_PAL:
+ case TV_STD_SECAM:
+ case TV_STD_PAL_CN:
+ args.ucDacStandard = ATOM_DAC1_PAL;
+ break;
+ case TV_STD_NTSC:
+ case TV_STD_NTSC_J:
+ case TV_STD_PAL_60:
+ default:
+ args.ucDacStandard = ATOM_DAC1_NTSC;
+ break;
+ }
+ }
+ args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+static void
+atombios_tv_setup(struct drm_encoder *encoder, int action)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ TV_ENCODER_CONTROL_PS_ALLOCATION args;
+ int index = 0;
+ /* fixme - fill in enc_priv for atom dac */
+ enum radeon_tv_std tv_std = TV_STD_NTSC;
+
+ memset(&args, 0, sizeof(args));
+
+ index = GetIndexIntoMasterTable(COMMAND, TVEncoderControl);
+
+ args.sTVEncoder.ucAction = action;
+
+ if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
+ args.sTVEncoder.ucTvStandard = ATOM_TV_CV;
+ else {
+ switch (tv_std) {
+ case TV_STD_NTSC:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
+ break;
+ case TV_STD_PAL:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_PAL;
+ break;
+ case TV_STD_PAL_M:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_PALM;
+ break;
+ case TV_STD_PAL_60:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_PAL60;
+ break;
+ case TV_STD_NTSC_J:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_NTSCJ;
+ break;
+ case TV_STD_SCART_PAL:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_PAL; /* ??? */
+ break;
+ case TV_STD_SECAM:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_SECAM;
+ break;
+ case TV_STD_PAL_CN:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_PALCN;
+ break;
+ default:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
+ break;
+ }
+ }
+
+ args.sTVEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+void
+atombios_external_tmds_setup(struct drm_encoder *encoder, int action)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION args;
+ int index = 0;
+
+ memset(&args, 0, sizeof(args));
+
+ index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
+
+ args.sXTmdsEncoder.ucEnable = action;
+
+ if (radeon_encoder->pixel_clock > 165000)
+ args.sXTmdsEncoder.ucMisc = PANEL_ENCODER_MISC_DUAL;
+
+ /*if (pScrn->rgbBits == 8)*/
+ args.sXTmdsEncoder.ucMisc |= (1 << 1);
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+static void
+atombios_ddia_setup(struct drm_encoder *encoder, int action)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ DVO_ENCODER_CONTROL_PS_ALLOCATION args;
+ int index = 0;
+
+ memset(&args, 0, sizeof(args));
+
+ index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
+
+ args.sDVOEncoder.ucAction = action;
+ args.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+
+ if (radeon_encoder->pixel_clock > 165000)
+ args.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute = PANEL_ENCODER_MISC_DUAL;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+union lvds_encoder_control {
+ LVDS_ENCODER_CONTROL_PS_ALLOCATION v1;
+ LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2;
+};
+
+static void
+atombios_digital_setup(struct drm_encoder *encoder, int action)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ union lvds_encoder_control args;
+ int index = 0;
+ uint8_t frev, crev;
+ struct radeon_encoder_atom_dig *dig;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
+ struct radeon_connector_atom_dig *dig_connector;
+
+ connector = radeon_get_connector_for_encoder(encoder);
+ if (!connector)
+ return;
+
+ radeon_connector = to_radeon_connector(connector);
+
+ if (!radeon_encoder->enc_priv)
+ return;
+
+ dig = radeon_encoder->enc_priv;
+
+ if (!radeon_connector->con_priv)
+ return;
+
+ dig_connector = radeon_connector->con_priv;
+
+ memset(&args, 0, sizeof(args));
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ index = GetIndexIntoMasterTable(COMMAND, TMDS1EncoderControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
+ else
+ index = GetIndexIntoMasterTable(COMMAND, TMDS2EncoderControl);
+ break;
+ }
+
+ atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
+
+ switch (frev) {
+ case 1:
+ case 2:
+ switch (crev) {
+ case 1:
+ args.v1.ucMisc = 0;
+ args.v1.ucAction = action;
+ if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr))
+ args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
+ args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ if (dig->lvds_misc & (1 << 0))
+ args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+ if (dig->lvds_misc & (1 << 1))
+ args.v1.ucMisc |= (1 << 1);
+ } else {
+ if (dig_connector->linkb)
+ args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
+ if (radeon_encoder->pixel_clock > 165000)
+ args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+ /*if (pScrn->rgbBits == 8) */
+ args.v1.ucMisc |= (1 << 1);
+ }
+ break;
+ case 2:
+ case 3:
+ args.v2.ucMisc = 0;
+ args.v2.ucAction = action;
+ if (crev == 3) {
+ if (dig->coherent_mode)
+ args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT;
+ }
+ if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr))
+ args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
+ args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ args.v2.ucTruncate = 0;
+ args.v2.ucSpatial = 0;
+ args.v2.ucTemporal = 0;
+ args.v2.ucFRC = 0;
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ if (dig->lvds_misc & (1 << 0))
+ args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+ if (dig->lvds_misc & (1 << 5)) {
+ args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN;
+ if (dig->lvds_misc & (1 << 1))
+ args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH;
+ }
+ if (dig->lvds_misc & (1 << 6)) {
+ args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN;
+ if (dig->lvds_misc & (1 << 1))
+ args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH;
+ if (((dig->lvds_misc >> 2) & 0x3) == 2)
+ args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
+ }
+ } else {
+ if (dig_connector->linkb)
+ args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
+ if (radeon_encoder->pixel_clock > 165000)
+ args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ break;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ break;
+ }
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+int
+atombios_get_encoder_mode(struct drm_encoder *encoder)
+{
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
+
+ connector = radeon_get_connector_for_encoder(encoder);
+ if (!connector)
+ return 0;
+
+ radeon_connector = to_radeon_connector(connector);
+
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_DVII:
+ if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr))
+ return ATOM_ENCODER_MODE_HDMI;
+ else if (radeon_connector->use_digital)
+ return ATOM_ENCODER_MODE_DVI;
+ else
+ return ATOM_ENCODER_MODE_CRT;
+ break;
+ case DRM_MODE_CONNECTOR_DVID:
+ case DRM_MODE_CONNECTOR_HDMIA:
+ case DRM_MODE_CONNECTOR_HDMIB:
+ default:
+ if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr))
+ return ATOM_ENCODER_MODE_HDMI;
+ else
+ return ATOM_ENCODER_MODE_DVI;
+ break;
+ case DRM_MODE_CONNECTOR_LVDS:
+ return ATOM_ENCODER_MODE_LVDS;
+ break;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ /*if (radeon_output->MonType == MT_DP)
+ return ATOM_ENCODER_MODE_DP;
+ else*/
+ if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr))
+ return ATOM_ENCODER_MODE_HDMI;
+ else
+ return ATOM_ENCODER_MODE_DVI;
+ break;
+ case CONNECTOR_DVI_A:
+ case CONNECTOR_VGA:
+ return ATOM_ENCODER_MODE_CRT;
+ break;
+ case CONNECTOR_STV:
+ case CONNECTOR_CTV:
+ case CONNECTOR_DIN:
+ /* fix me */
+ return ATOM_ENCODER_MODE_TV;
+ /*return ATOM_ENCODER_MODE_CV;*/
+ break;
+ }
+}
+
+static void
+atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ DIG_ENCODER_CONTROL_PS_ALLOCATION args;
+ int index = 0, num = 0;
+ uint8_t frev, crev;
+ struct radeon_encoder_atom_dig *dig;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
+ struct radeon_connector_atom_dig *dig_connector;
+
+ connector = radeon_get_connector_for_encoder(encoder);
+ if (!connector)
+ return;
+
+ radeon_connector = to_radeon_connector(connector);
+
+ if (!radeon_connector->con_priv)
+ return;
+
+ dig_connector = radeon_connector->con_priv;
+
+ if (!radeon_encoder->enc_priv)
+ return;
+
+ dig = radeon_encoder->enc_priv;
+
+ memset(&args, 0, sizeof(args));
+
+ if (ASIC_IS_DCE32(rdev)) {
+ if (dig->dig_block)
+ index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
+ else
+ index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
+ num = dig->dig_block + 1;
+ } else {
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
+ num = 1;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
+ num = 2;
+ break;
+ }
+ }
+
+ atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
+
+ args.ucAction = action;
+ args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+
+ if (ASIC_IS_DCE32(rdev)) {
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
+ break;
+ }
+ } else {
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ args.ucConfig = ATOM_ENCODER_CONFIG_TRANSMITTER1;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ args.ucConfig = ATOM_ENCODER_CONFIG_TRANSMITTER2;
+ break;
+ }
+ }
+
+ if (radeon_encoder->pixel_clock > 165000) {
+ args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA_B;
+ args.ucLaneNum = 8;
+ } else {
+ if (dig_connector->linkb)
+ args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
+ else
+ args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
+ args.ucLaneNum = 4;
+ }
+
+ args.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+union dig_transmitter_control {
+ DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
+ DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
+};
+
+static void
+atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ union dig_transmitter_control args;
+ int index = 0, num = 0;
+ uint8_t frev, crev;
+ struct radeon_encoder_atom_dig *dig;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
+ struct radeon_connector_atom_dig *dig_connector;
+
+ connector = radeon_get_connector_for_encoder(encoder);
+ if (!connector)
+ return;
+
+ radeon_connector = to_radeon_connector(connector);
+
+ if (!radeon_encoder->enc_priv)
+ return;
+
+ dig = radeon_encoder->enc_priv;
+
+ if (!radeon_connector->con_priv)
+ return;
+
+ dig_connector = radeon_connector->con_priv;
+
+ memset(&args, 0, sizeof(args));
+
+ if (ASIC_IS_DCE32(rdev))
+ index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
+ else {
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ index = GetIndexIntoMasterTable(COMMAND, DIG1TransmitterControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ index = GetIndexIntoMasterTable(COMMAND, DIG2TransmitterControl);
+ break;
+ }
+ }
+
+ atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
+
+ args.v1.ucAction = action;
+
+ if (ASIC_IS_DCE32(rdev)) {
+ if (radeon_encoder->pixel_clock > 165000) {
+ args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock * 10 * 2) / 100);
+ args.v2.acConfig.fDualLinkConnector = 1;
+ } else {
+ args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock * 10 * 4) / 100);
+ }
+ if (dig->dig_block)
+ args.v2.acConfig.ucEncoderSel = 1;
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ args.v2.acConfig.ucTransmitterSel = 0;
+ num = 0;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ args.v2.acConfig.ucTransmitterSel = 1;
+ num = 1;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ args.v2.acConfig.ucTransmitterSel = 2;
+ num = 2;
+ break;
+ }
+
+ if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ if (dig->coherent_mode)
+ args.v2.acConfig.fCoherentMode = 1;
+ }
+ } else {
+ args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
+ args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock) / 10);
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
+ if (rdev->flags & RADEON_IS_IGP) {
+ if (radeon_encoder->pixel_clock > 165000) {
+ args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
+ ATOM_TRANSMITTER_CONFIG_LINKA_B);
+ if (dig_connector->igp_lane_info & 0x3)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
+ else if (dig_connector->igp_lane_info & 0xc)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
+ } else {
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
+ if (dig_connector->igp_lane_info & 0x1)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
+ else if (dig_connector->igp_lane_info & 0x2)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
+ else if (dig_connector->igp_lane_info & 0x4)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
+ else if (dig_connector->igp_lane_info & 0x8)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
+ }
+ } else {
+ if (radeon_encoder->pixel_clock > 165000)
+ args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
+ ATOM_TRANSMITTER_CONFIG_LINKA_B |
+ ATOM_TRANSMITTER_CONFIG_LANE_0_7);
+ else {
+ if (dig_connector->linkb)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
+ else
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
+ }
+ }
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
+ if (radeon_encoder->pixel_clock > 165000)
+ args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
+ ATOM_TRANSMITTER_CONFIG_LINKA_B |
+ ATOM_TRANSMITTER_CONFIG_LANE_0_7);
+ else {
+ if (dig_connector->linkb)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
+ else
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
+ }
+ break;
+ }
+
+ if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ if (dig->coherent_mode)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
+ }
+ }
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+static void atom_rv515_force_tv_scaler(struct radeon_device *rdev)
+{
+
+ WREG32(0x659C, 0x0);
+ WREG32(0x6594, 0x705);
+ WREG32(0x65A4, 0x10001);
+ WREG32(0x65D8, 0x0);
+ WREG32(0x65B0, 0x0);
+ WREG32(0x65C0, 0x0);
+ WREG32(0x65D4, 0x0);
+ WREG32(0x6578, 0x0);
+ WREG32(0x657C, 0x841880A8);
+ WREG32(0x6578, 0x1);
+ WREG32(0x657C, 0x84208680);
+ WREG32(0x6578, 0x2);
+ WREG32(0x657C, 0xBFF880B0);
+ WREG32(0x6578, 0x100);
+ WREG32(0x657C, 0x83D88088);
+ WREG32(0x6578, 0x101);
+ WREG32(0x657C, 0x84608680);
+ WREG32(0x6578, 0x102);
+ WREG32(0x657C, 0xBFF080D0);
+ WREG32(0x6578, 0x200);
+ WREG32(0x657C, 0x83988068);
+ WREG32(0x6578, 0x201);
+ WREG32(0x657C, 0x84A08680);
+ WREG32(0x6578, 0x202);
+ WREG32(0x657C, 0xBFF080F8);
+ WREG32(0x6578, 0x300);
+ WREG32(0x657C, 0x83588058);
+ WREG32(0x6578, 0x301);
+ WREG32(0x657C, 0x84E08660);
+ WREG32(0x6578, 0x302);
+ WREG32(0x657C, 0xBFF88120);
+ WREG32(0x6578, 0x400);
+ WREG32(0x657C, 0x83188040);
+ WREG32(0x6578, 0x401);
+ WREG32(0x657C, 0x85008660);
+ WREG32(0x6578, 0x402);
+ WREG32(0x657C, 0xBFF88150);
+ WREG32(0x6578, 0x500);
+ WREG32(0x657C, 0x82D88030);
+ WREG32(0x6578, 0x501);
+ WREG32(0x657C, 0x85408640);
+ WREG32(0x6578, 0x502);
+ WREG32(0x657C, 0xBFF88180);
+ WREG32(0x6578, 0x600);
+ WREG32(0x657C, 0x82A08018);
+ WREG32(0x6578, 0x601);
+ WREG32(0x657C, 0x85808620);
+ WREG32(0x6578, 0x602);
+ WREG32(0x657C, 0xBFF081B8);
+ WREG32(0x6578, 0x700);
+ WREG32(0x657C, 0x82608010);
+ WREG32(0x6578, 0x701);
+ WREG32(0x657C, 0x85A08600);
+ WREG32(0x6578, 0x702);
+ WREG32(0x657C, 0x800081F0);
+ WREG32(0x6578, 0x800);
+ WREG32(0x657C, 0x8228BFF8);
+ WREG32(0x6578, 0x801);
+ WREG32(0x657C, 0x85E085E0);
+ WREG32(0x6578, 0x802);
+ WREG32(0x657C, 0xBFF88228);
+ WREG32(0x6578, 0x10000);
+ WREG32(0x657C, 0x82A8BF00);
+ WREG32(0x6578, 0x10001);
+ WREG32(0x657C, 0x82A08CC0);
+ WREG32(0x6578, 0x10002);
+ WREG32(0x657C, 0x8008BEF8);
+ WREG32(0x6578, 0x10100);
+ WREG32(0x657C, 0x81F0BF28);
+ WREG32(0x6578, 0x10101);
+ WREG32(0x657C, 0x83608CA0);
+ WREG32(0x6578, 0x10102);
+ WREG32(0x657C, 0x8018BED0);
+ WREG32(0x6578, 0x10200);
+ WREG32(0x657C, 0x8148BF38);
+ WREG32(0x6578, 0x10201);
+ WREG32(0x657C, 0x84408C80);
+ WREG32(0x6578, 0x10202);
+ WREG32(0x657C, 0x8008BEB8);
+ WREG32(0x6578, 0x10300);
+ WREG32(0x657C, 0x80B0BF78);
+ WREG32(0x6578, 0x10301);
+ WREG32(0x657C, 0x85008C20);
+ WREG32(0x6578, 0x10302);
+ WREG32(0x657C, 0x8020BEA0);
+ WREG32(0x6578, 0x10400);
+ WREG32(0x657C, 0x8028BF90);
+ WREG32(0x6578, 0x10401);
+ WREG32(0x657C, 0x85E08BC0);
+ WREG32(0x6578, 0x10402);
+ WREG32(0x657C, 0x8018BE90);
+ WREG32(0x6578, 0x10500);
+ WREG32(0x657C, 0xBFB8BFB0);
+ WREG32(0x6578, 0x10501);
+ WREG32(0x657C, 0x86C08B40);
+ WREG32(0x6578, 0x10502);
+ WREG32(0x657C, 0x8010BE90);
+ WREG32(0x6578, 0x10600);
+ WREG32(0x657C, 0xBF58BFC8);
+ WREG32(0x6578, 0x10601);
+ WREG32(0x657C, 0x87A08AA0);
+ WREG32(0x6578, 0x10602);
+ WREG32(0x657C, 0x8010BE98);
+ WREG32(0x6578, 0x10700);
+ WREG32(0x657C, 0xBF10BFF0);
+ WREG32(0x6578, 0x10701);
+ WREG32(0x657C, 0x886089E0);
+ WREG32(0x6578, 0x10702);
+ WREG32(0x657C, 0x8018BEB0);
+ WREG32(0x6578, 0x10800);
+ WREG32(0x657C, 0xBED8BFE8);
+ WREG32(0x6578, 0x10801);
+ WREG32(0x657C, 0x89408940);
+ WREG32(0x6578, 0x10802);
+ WREG32(0x657C, 0xBFE8BED8);
+ WREG32(0x6578, 0x20000);
+ WREG32(0x657C, 0x80008000);
+ WREG32(0x6578, 0x20001);
+ WREG32(0x657C, 0x90008000);
+ WREG32(0x6578, 0x20002);
+ WREG32(0x657C, 0x80008000);
+ WREG32(0x6578, 0x20003);
+ WREG32(0x657C, 0x80008000);
+ WREG32(0x6578, 0x20100);
+ WREG32(0x657C, 0x80108000);
+ WREG32(0x6578, 0x20101);
+ WREG32(0x657C, 0x8FE0BF70);
+ WREG32(0x6578, 0x20102);
+ WREG32(0x657C, 0xBFE880C0);
+ WREG32(0x6578, 0x20103);
+ WREG32(0x657C, 0x80008000);
+ WREG32(0x6578, 0x20200);
+ WREG32(0x657C, 0x8018BFF8);
+ WREG32(0x6578, 0x20201);
+ WREG32(0x657C, 0x8F80BF08);
+ WREG32(0x6578, 0x20202);
+ WREG32(0x657C, 0xBFD081A0);
+ WREG32(0x6578, 0x20203);
+ WREG32(0x657C, 0xBFF88000);
+ WREG32(0x6578, 0x20300);
+ WREG32(0x657C, 0x80188000);
+ WREG32(0x6578, 0x20301);
+ WREG32(0x657C, 0x8EE0BEC0);
+ WREG32(0x6578, 0x20302);
+ WREG32(0x657C, 0xBFB082A0);
+ WREG32(0x6578, 0x20303);
+ WREG32(0x657C, 0x80008000);
+ WREG32(0x6578, 0x20400);
+ WREG32(0x657C, 0x80188000);
+ WREG32(0x6578, 0x20401);
+ WREG32(0x657C, 0x8E00BEA0);
+ WREG32(0x6578, 0x20402);
+ WREG32(0x657C, 0xBF8883C0);
+ WREG32(0x6578, 0x20403);
+ WREG32(0x657C, 0x80008000);
+ WREG32(0x6578, 0x20500);
+ WREG32(0x657C, 0x80188000);
+ WREG32(0x6578, 0x20501);
+ WREG32(0x657C, 0x8D00BE90);
+ WREG32(0x6578, 0x20502);
+ WREG32(0x657C, 0xBF588500);
+ WREG32(0x6578, 0x20503);
+ WREG32(0x657C, 0x80008008);
+ WREG32(0x6578, 0x20600);
+ WREG32(0x657C, 0x80188000);
+ WREG32(0x6578, 0x20601);
+ WREG32(0x657C, 0x8BC0BE98);
+ WREG32(0x6578, 0x20602);
+ WREG32(0x657C, 0xBF308660);
+ WREG32(0x6578, 0x20603);
+ WREG32(0x657C, 0x80008008);
+ WREG32(0x6578, 0x20700);
+ WREG32(0x657C, 0x80108000);
+ WREG32(0x6578, 0x20701);
+ WREG32(0x657C, 0x8A80BEB0);
+ WREG32(0x6578, 0x20702);
+ WREG32(0x657C, 0xBF0087C0);
+ WREG32(0x6578, 0x20703);
+ WREG32(0x657C, 0x80008008);
+ WREG32(0x6578, 0x20800);
+ WREG32(0x657C, 0x80108000);
+ WREG32(0x6578, 0x20801);
+ WREG32(0x657C, 0x8920BED0);
+ WREG32(0x6578, 0x20802);
+ WREG32(0x657C, 0xBED08920);
+ WREG32(0x6578, 0x20803);
+ WREG32(0x657C, 0x80008010);
+ WREG32(0x6578, 0x30000);
+ WREG32(0x657C, 0x90008000);
+ WREG32(0x6578, 0x30001);
+ WREG32(0x657C, 0x80008000);
+ WREG32(0x6578, 0x30100);
+ WREG32(0x657C, 0x8FE0BF90);
+ WREG32(0x6578, 0x30101);
+ WREG32(0x657C, 0xBFF880A0);
+ WREG32(0x6578, 0x30200);
+ WREG32(0x657C, 0x8F60BF40);
+ WREG32(0x6578, 0x30201);
+ WREG32(0x657C, 0xBFE88180);
+ WREG32(0x6578, 0x30300);
+ WREG32(0x657C, 0x8EC0BF00);
+ WREG32(0x6578, 0x30301);
+ WREG32(0x657C, 0xBFC88280);
+ WREG32(0x6578, 0x30400);
+ WREG32(0x657C, 0x8DE0BEE0);
+ WREG32(0x6578, 0x30401);
+ WREG32(0x657C, 0xBFA083A0);
+ WREG32(0x6578, 0x30500);
+ WREG32(0x657C, 0x8CE0BED0);
+ WREG32(0x6578, 0x30501);
+ WREG32(0x657C, 0xBF7884E0);
+ WREG32(0x6578, 0x30600);
+ WREG32(0x657C, 0x8BA0BED8);
+ WREG32(0x6578, 0x30601);
+ WREG32(0x657C, 0xBF508640);
+ WREG32(0x6578, 0x30700);
+ WREG32(0x657C, 0x8A60BEE8);
+ WREG32(0x6578, 0x30701);
+ WREG32(0x657C, 0xBF2087A0);
+ WREG32(0x6578, 0x30800);
+ WREG32(0x657C, 0x8900BF00);
+ WREG32(0x6578, 0x30801);
+ WREG32(0x657C, 0xBF008900);
+}
+
+static void
+atombios_yuv_setup(struct drm_encoder *encoder, bool enable)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ ENABLE_YUV_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, EnableYUV);
+ uint32_t temp, reg;
+
+ memset(&args, 0, sizeof(args));
+
+ if (rdev->family >= CHIP_R600)
+ reg = R600_BIOS_3_SCRATCH;
+ else
+ reg = RADEON_BIOS_3_SCRATCH;
+
+ /* XXX: fix up scratch reg handling */
+ temp = RREG32(reg);
+ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
+ WREG32(reg, (ATOM_S3_TV1_ACTIVE |
+ (radeon_crtc->crtc_id << 18)));
+ else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
+ WREG32(reg, (ATOM_S3_CV_ACTIVE | (radeon_crtc->crtc_id << 24)));
+ else
+ WREG32(reg, 0);
+
+ if (enable)
+ args.ucEnable = ATOM_ENABLE;
+ args.ucCRTC = radeon_crtc->crtc_id;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ WREG32(reg, temp);
+}
+
+static void
+atombios_overscan_setup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ SET_CRTC_OVERSCAN_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan);
+
+ memset(&args, 0, sizeof(args));
+
+ args.usOverscanRight = 0;
+ args.usOverscanLeft = 0;
+ args.usOverscanBottom = 0;
+ args.usOverscanTop = 0;
+ args.ucCRTC = radeon_crtc->crtc_id;
+
+ if (radeon_encoder->flags & RADEON_USE_RMX) {
+ if (radeon_encoder->rmx_type == RMX_FULL) {
+ args.usOverscanRight = 0;
+ args.usOverscanLeft = 0;
+ args.usOverscanBottom = 0;
+ args.usOverscanTop = 0;
+ } else if (radeon_encoder->rmx_type == RMX_CENTER) {
+ args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
+ args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
+ args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
+ args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
+ } else if (radeon_encoder->rmx_type == RMX_ASPECT) {
+ int a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay;
+ int a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay;
+
+ if (a1 > a2) {
+ args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2;
+ args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2;
+ } else if (a2 > a1) {
+ args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
+ args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
+ }
+ }
+ }
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+static void
+atombios_scaler_setup(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ ENABLE_SCALER_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, EnableScaler);
+ /* fixme - fill in enc_priv for atom dac */
+ enum radeon_tv_std tv_std = TV_STD_NTSC;
+
+ if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id)
+ return;
+
+ memset(&args, 0, sizeof(args));
+
+ args.ucScaler = radeon_crtc->crtc_id;
+
+ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) {
+ switch (tv_std) {
+ case TV_STD_NTSC:
+ default:
+ args.ucTVStandard = ATOM_TV_NTSC;
+ break;
+ case TV_STD_PAL:
+ args.ucTVStandard = ATOM_TV_PAL;
+ break;
+ case TV_STD_PAL_M:
+ args.ucTVStandard = ATOM_TV_PALM;
+ break;
+ case TV_STD_PAL_60:
+ args.ucTVStandard = ATOM_TV_PAL60;
+ break;
+ case TV_STD_NTSC_J:
+ args.ucTVStandard = ATOM_TV_NTSCJ;
+ break;
+ case TV_STD_SCART_PAL:
+ args.ucTVStandard = ATOM_TV_PAL; /* ??? */
+ break;
+ case TV_STD_SECAM:
+ args.ucTVStandard = ATOM_TV_SECAM;
+ break;
+ case TV_STD_PAL_CN:
+ args.ucTVStandard = ATOM_TV_PALCN;
+ break;
+ }
+ args.ucEnable = SCALER_ENABLE_MULTITAP_MODE;
+ } else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT)) {
+ args.ucTVStandard = ATOM_TV_CV;
+ args.ucEnable = SCALER_ENABLE_MULTITAP_MODE;
+ } else if (radeon_encoder->flags & RADEON_USE_RMX) {
+ if (radeon_encoder->rmx_type == RMX_FULL)
+ args.ucEnable = ATOM_SCALER_EXPANSION;
+ else if (radeon_encoder->rmx_type == RMX_CENTER)
+ args.ucEnable = ATOM_SCALER_CENTER;
+ else if (radeon_encoder->rmx_type == RMX_ASPECT)
+ args.ucEnable = ATOM_SCALER_EXPANSION;
+ } else {
+ if (ASIC_IS_AVIVO(rdev))
+ args.ucEnable = ATOM_SCALER_DISABLE;
+ else
+ args.ucEnable = ATOM_SCALER_CENTER;
+ }
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)
+ && rdev->family >= CHIP_RV515 && rdev->family <= CHIP_RV570) {
+ atom_rv515_force_tv_scaler(rdev);
+ }
+
+}
+
+static void
+radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
+ int index = 0;
+ bool is_dig = false;
+
+ memset(&args, 0, sizeof(args));
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ index = GetIndexIntoMasterTable(COMMAND, TMDSAOutputControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ is_dig = true;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
+ else
+ index = GetIndexIntoMasterTable(COMMAND, LVTMAOutputControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
+ index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
+ else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
+ index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
+ else
+ index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
+ index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
+ else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
+ index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
+ else
+ index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl);
+ break;
+ }
+
+ if (is_dig) {
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE);
+ break;
+ }
+ } else {
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ args.ucAction = ATOM_ENABLE;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ args.ucAction = ATOM_DISABLE;
+ break;
+ }
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ }
+ radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+}
+
+union crtc_sourc_param {
+ SELECT_CRTC_SOURCE_PS_ALLOCATION v1;
+ SELECT_CRTC_SOURCE_PARAMETERS_V2 v2;
+};
+
+static void
+atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ union crtc_sourc_param args;
+ int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
+ uint8_t frev, crev;
+
+ memset(&args, 0, sizeof(args));
+
+ atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
+
+ switch (frev) {
+ case 1:
+ switch (crev) {
+ case 1:
+ default:
+ if (ASIC_IS_AVIVO(rdev))
+ args.v1.ucCRTC = radeon_crtc->crtc_id;
+ else {
+ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) {
+ args.v1.ucCRTC = radeon_crtc->crtc_id;
+ } else {
+ args.v1.ucCRTC = radeon_crtc->crtc_id << 2;
+ }
+ }
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ args.v1.ucDevice = ATOM_DEVICE_DFP1_INDEX;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT)
+ args.v1.ucDevice = ATOM_DEVICE_LCD1_INDEX;
+ else
+ args.v1.ucDevice = ATOM_DEVICE_DFP3_INDEX;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ args.v1.ucDevice = ATOM_DEVICE_DFP2_INDEX;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
+ args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
+ else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
+ args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
+ else
+ args.v1.ucDevice = ATOM_DEVICE_CRT1_INDEX;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
+ args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
+ else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
+ args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
+ else
+ args.v1.ucDevice = ATOM_DEVICE_CRT2_INDEX;
+ break;
+ }
+ break;
+ case 2:
+ args.v2.ucCRTC = radeon_crtc->crtc_id;
+ args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ if (ASIC_IS_DCE32(rdev)) {
+ if (radeon_crtc->crtc_id)
+ args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
+ else
+ args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
+ } else
+ args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
+ args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+ else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
+ args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+ else
+ args.v2.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
+ args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+ else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
+ args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+ else
+ args.v2.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID;
+ break;
+ }
+ break;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
+ break;
+ }
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+static void
+atombios_apply_encoder_quirks(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+
+ /* Funky macbooks */
+ if ((dev->pdev->device == 0x71C5) &&
+ (dev->pdev->subsystem_vendor == 0x106b) &&
+ (dev->pdev->subsystem_device == 0x0080)) {
+ if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
+ uint32_t lvtma_bit_depth_control = RREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL);
+
+ lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN;
+ lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN;
+
+ WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, lvtma_bit_depth_control);
+ }
+ }
+
+ /* set scaler clears this on some chips */
+ if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE))
+ WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, AVIVO_D1MODE_INTERLEAVE_EN);
+}
+
+static void
+radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+
+ if (radeon_encoder->enc_priv) {
+ struct radeon_encoder_atom_dig *dig;
+
+ dig = radeon_encoder->enc_priv;
+ dig->dig_block = radeon_crtc->crtc_id;
+ }
+ radeon_encoder->pixel_clock = adjusted_mode->clock;
+
+ radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+ atombios_overscan_setup(encoder, mode, adjusted_mode);
+ atombios_scaler_setup(encoder);
+ atombios_set_encoder_crtc_source(encoder);
+
+ if (ASIC_IS_AVIVO(rdev)) {
+ if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
+ atombios_yuv_setup(encoder, true);
+ else
+ atombios_yuv_setup(encoder, false);
+ }
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ /* disable the encoder and transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE);
+ atombios_dig_encoder_setup(encoder, ATOM_DISABLE);
+
+ /* setup and enable the encoder and transmitter */
+ atombios_dig_encoder_setup(encoder, ATOM_ENABLE);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ atombios_ddia_setup(encoder, ATOM_ENABLE);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ atombios_external_tmds_setup(encoder, ATOM_ENABLE);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ atombios_dac_setup(encoder, ATOM_ENABLE);
+ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+ atombios_tv_setup(encoder, ATOM_ENABLE);
+ break;
+ }
+ atombios_apply_encoder_quirks(encoder, adjusted_mode);
+}
+
+static bool
+atombios_dac_load_detect(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT |
+ ATOM_DEVICE_CV_SUPPORT |
+ ATOM_DEVICE_CRT_SUPPORT)) {
+ DAC_LOAD_DETECTION_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, DAC_LoadDetection);
+ uint8_t frev, crev;
+
+ memset(&args, 0, sizeof(args));
+
+ atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
+
+ args.sDacload.ucMisc = 0;
+
+ if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) ||
+ (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1))
+ args.sDacload.ucDacType = ATOM_DAC_A;
+ else
+ args.sDacload.ucDacType = ATOM_DAC_B;
+
+ if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT)
+ args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT1_SUPPORT);
+ else if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT)
+ args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT2_SUPPORT);
+ else if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) {
+ args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CV_SUPPORT);
+ if (crev >= 3)
+ args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
+ } else if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) {
+ args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_TV1_SUPPORT);
+ if (crev >= 3)
+ args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
+ }
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ return true;
+ } else
+ return false;
+}
+
+static enum drm_connector_status
+radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t bios_0_scratch;
+
+ if (!atombios_dac_load_detect(encoder)) {
+ DRM_DEBUG("detect returned false \n");
+ return connector_status_unknown;
+ }
+
+ if (rdev->family >= CHIP_R600)
+ bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
+ else
+ bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
+
+ DRM_DEBUG("Bios 0 scratch %x\n", bios_0_scratch);
+ if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+ if (bios_0_scratch & ATOM_S0_CRT1_MASK)
+ return connector_status_connected;
+ } else if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+ if (bios_0_scratch & ATOM_S0_CRT2_MASK)
+ return connector_status_connected;
+ } else if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) {
+ if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
+ return connector_status_connected;
+ } else if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) {
+ if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
+ return connector_status_connected; /* CTV */
+ else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
+ return connector_status_connected; /* STV */
+ }
+ return connector_status_disconnected;
+}
+
+static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
+{
+ radeon_atom_output_lock(encoder, true);
+ radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
+{
+ radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+ radeon_atom_output_lock(encoder, false);
+}
+
+static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = {
+ .dpms = radeon_atom_encoder_dpms,
+ .mode_fixup = radeon_atom_mode_fixup,
+ .prepare = radeon_atom_encoder_prepare,
+ .mode_set = radeon_atom_encoder_mode_set,
+ .commit = radeon_atom_encoder_commit,
+ /* no detect for TMDS/LVDS yet */
+};
+
+static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = {
+ .dpms = radeon_atom_encoder_dpms,
+ .mode_fixup = radeon_atom_mode_fixup,
+ .prepare = radeon_atom_encoder_prepare,
+ .mode_set = radeon_atom_encoder_mode_set,
+ .commit = radeon_atom_encoder_commit,
+ .detect = radeon_atom_dac_detect,
+};
+
+void radeon_enc_destroy(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ kfree(radeon_encoder->enc_priv);
+ drm_encoder_cleanup(encoder);
+ kfree(radeon_encoder);
+}
+
+static const struct drm_encoder_funcs radeon_atom_enc_funcs = {
+ .destroy = radeon_enc_destroy,
+};
+
+struct radeon_encoder_atom_dig *
+radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
+{
+ struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
+
+ if (!dig)
+ return NULL;
+
+ /* coherent mode by default */
+ dig->coherent_mode = true;
+
+ return dig;
+}
+
+void
+radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
+{
+ struct drm_encoder *encoder;
+ struct radeon_encoder *radeon_encoder;
+
+ /* see if we already added it */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ radeon_encoder = to_radeon_encoder(encoder);
+ if (radeon_encoder->encoder_id == encoder_id) {
+ radeon_encoder->devices |= supported_device;
+ return;
+ }
+
+ }
+
+ /* add a new one */
+ radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL);
+ if (!radeon_encoder)
+ return;
+
+ encoder = &radeon_encoder->base;
+ encoder->possible_crtcs = 0x3;
+ encoder->possible_clones = 0;
+
+ radeon_encoder->enc_priv = NULL;
+
+ radeon_encoder->encoder_id = encoder_id;
+ radeon_encoder->devices = supported_device;
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ radeon_encoder->rmx_type = RMX_FULL;
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
+ radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
+ } else {
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
+ radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
+ }
+ drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
+ drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TVDAC);
+ drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
+ radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
+ drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
+ break;
+ }
+}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
new file mode 100644
index 00000000000..fa86d398945
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -0,0 +1,825 @@
+/*
+ * Copyright © 2007 David Airlie
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * David Airlie
+ */
+ /*
+ * Modularization
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+
+struct radeon_fb_device {
+ struct radeon_device *rdev;
+ struct drm_display_mode *mode;
+ struct radeon_framebuffer *rfb;
+ int crtc_count;
+ /* crtc currently bound to this */
+ uint32_t crtc_ids[2];
+};
+
+static int radeonfb_setcolreg(unsigned regno,
+ unsigned red,
+ unsigned green,
+ unsigned blue,
+ unsigned transp,
+ struct fb_info *info)
+{
+ struct radeon_fb_device *rfbdev = info->par;
+ struct drm_device *dev = rfbdev->rdev->ddev;
+ struct drm_crtc *crtc;
+ int i;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_mode_set *modeset = &radeon_crtc->mode_set;
+ struct drm_framebuffer *fb = modeset->fb;
+
+ for (i = 0; i < rfbdev->crtc_count; i++) {
+ if (crtc->base.id == rfbdev->crtc_ids[i]) {
+ break;
+ }
+ }
+ if (i == rfbdev->crtc_count) {
+ continue;
+ }
+ if (regno > 255) {
+ return 1;
+ }
+ if (fb->depth == 8) {
+ radeon_crtc_fb_gamma_set(crtc, red, green, blue, regno);
+ return 0;
+ }
+
+ if (regno < 16) {
+ switch (fb->depth) {
+ case 15:
+ fb->pseudo_palette[regno] = ((red & 0xf800) >> 1) |
+ ((green & 0xf800) >> 6) |
+ ((blue & 0xf800) >> 11);
+ break;
+ case 16:
+ fb->pseudo_palette[regno] = (red & 0xf800) |
+ ((green & 0xfc00) >> 5) |
+ ((blue & 0xf800) >> 11);
+ break;
+ case 24:
+ case 32:
+ fb->pseudo_palette[regno] = ((red & 0xff00) << 8) |
+ (green & 0xff00) |
+ ((blue & 0xff00) >> 8);
+ break;
+ }
+ }
+ }
+ return 0;
+}
+
+static int radeonfb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct radeon_fb_device *rfbdev = info->par;
+ struct radeon_framebuffer *rfb = rfbdev->rfb;
+ struct drm_framebuffer *fb = &rfb->base;
+ int depth;
+
+ if (var->pixclock == -1 || !var->pixclock) {
+ return -EINVAL;
+ }
+ /* Need to resize the fb object !!! */
+ if (var->xres > fb->width || var->yres > fb->height) {
+ DRM_ERROR("Requested width/height is greater than current fb "
+ "object %dx%d > %dx%d\n", var->xres, var->yres,
+ fb->width, fb->height);
+ DRM_ERROR("Need resizing code.\n");
+ return -EINVAL;
+ }
+
+ switch (var->bits_per_pixel) {
+ case 16:
+ depth = (var->green.length == 6) ? 16 : 15;
+ break;
+ case 32:
+ depth = (var->transp.length > 0) ? 32 : 24;
+ break;
+ default:
+ depth = var->bits_per_pixel;
+ break;
+ }
+
+ switch (depth) {
+ case 8:
+ var->red.offset = 0;
+ var->green.offset = 0;
+ var->blue.offset = 0;
+ var->red.length = 8;
+ var->green.length = 8;
+ var->blue.length = 8;
+ var->transp.length = 0;
+ var->transp.offset = 0;
+ break;
+ case 15:
+ var->red.offset = 10;
+ var->green.offset = 5;
+ var->blue.offset = 0;
+ var->red.length = 5;
+ var->green.length = 5;
+ var->blue.length = 5;
+ var->transp.length = 1;
+ var->transp.offset = 15;
+ break;
+ case 16:
+ var->red.offset = 11;
+ var->green.offset = 5;
+ var->blue.offset = 0;
+ var->red.length = 5;
+ var->green.length = 6;
+ var->blue.length = 5;
+ var->transp.length = 0;
+ var->transp.offset = 0;
+ break;
+ case 24:
+ var->red.offset = 16;
+ var->green.offset = 8;
+ var->blue.offset = 0;
+ var->red.length = 8;
+ var->green.length = 8;
+ var->blue.length = 8;
+ var->transp.length = 0;
+ var->transp.offset = 0;
+ break;
+ case 32:
+ var->red.offset = 16;
+ var->green.offset = 8;
+ var->blue.offset = 0;
+ var->red.length = 8;
+ var->green.length = 8;
+ var->blue.length = 8;
+ var->transp.length = 8;
+ var->transp.offset = 24;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* this will let fbcon do the mode init */
+static int radeonfb_set_par(struct fb_info *info)
+{
+ struct radeon_fb_device *rfbdev = info->par;
+ struct drm_device *dev = rfbdev->rdev->ddev;
+ struct fb_var_screeninfo *var = &info->var;
+ struct drm_crtc *crtc;
+ int ret;
+ int i;
+
+ if (var->pixclock != -1) {
+ DRM_ERROR("PIXEL CLCOK SET\n");
+ return -EINVAL;
+ }
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+ for (i = 0; i < rfbdev->crtc_count; i++) {
+ if (crtc->base.id == rfbdev->crtc_ids[i]) {
+ break;
+ }
+ }
+ if (i == rfbdev->crtc_count) {
+ continue;
+ }
+ if (crtc->fb == radeon_crtc->mode_set.fb) {
+ mutex_lock(&dev->mode_config.mutex);
+ ret = crtc->funcs->set_config(&radeon_crtc->mode_set);
+ mutex_unlock(&dev->mode_config.mutex);
+ if (ret) {
+ return ret;
+ }
+ }
+ }
+ return 0;
+}
+
+static int radeonfb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct radeon_fb_device *rfbdev = info->par;
+ struct drm_device *dev = rfbdev->rdev->ddev;
+ struct drm_mode_set *modeset;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ int ret = 0;
+ int i;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ for (i = 0; i < rfbdev->crtc_count; i++) {
+ if (crtc->base.id == rfbdev->crtc_ids[i]) {
+ break;
+ }
+ }
+
+ if (i == rfbdev->crtc_count) {
+ continue;
+ }
+
+ radeon_crtc = to_radeon_crtc(crtc);
+ modeset = &radeon_crtc->mode_set;
+
+ modeset->x = var->xoffset;
+ modeset->y = var->yoffset;
+
+ if (modeset->num_connectors) {
+ mutex_lock(&dev->mode_config.mutex);
+ ret = crtc->funcs->set_config(modeset);
+ mutex_unlock(&dev->mode_config.mutex);
+ if (!ret) {
+ info->var.xoffset = var->xoffset;
+ info->var.yoffset = var->yoffset;
+ }
+ }
+ }
+ return ret;
+}
+
+static void radeonfb_on(struct fb_info *info)
+{
+ struct radeon_fb_device *rfbdev = info->par;
+ struct drm_device *dev = rfbdev->rdev->ddev;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ int i;
+
+ /*
+ * For each CRTC in this fb, find all associated encoders
+ * and turn them off, then turn off the CRTC.
+ */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+
+ for (i = 0; i < rfbdev->crtc_count; i++) {
+ if (crtc->base.id == rfbdev->crtc_ids[i]) {
+ break;
+ }
+ }
+
+ mutex_lock(&dev->mode_config.mutex);
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ /* Found a CRTC on this fb, now find encoders */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ struct drm_encoder_helper_funcs *encoder_funcs;
+
+ encoder_funcs = encoder->helper_private;
+ mutex_lock(&dev->mode_config.mutex);
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+ mutex_unlock(&dev->mode_config.mutex);
+ }
+ }
+ }
+}
+
+static void radeonfb_off(struct fb_info *info, int dpms_mode)
+{
+ struct radeon_fb_device *rfbdev = info->par;
+ struct drm_device *dev = rfbdev->rdev->ddev;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ int i;
+
+ /*
+ * For each CRTC in this fb, find all associated encoders
+ * and turn them off, then turn off the CRTC.
+ */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+
+ for (i = 0; i < rfbdev->crtc_count; i++) {
+ if (crtc->base.id == rfbdev->crtc_ids[i]) {
+ break;
+ }
+ }
+
+ /* Found a CRTC on this fb, now find encoders */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ struct drm_encoder_helper_funcs *encoder_funcs;
+
+ encoder_funcs = encoder->helper_private;
+ mutex_lock(&dev->mode_config.mutex);
+ encoder_funcs->dpms(encoder, dpms_mode);
+ mutex_unlock(&dev->mode_config.mutex);
+ }
+ }
+ if (dpms_mode == DRM_MODE_DPMS_OFF) {
+ mutex_lock(&dev->mode_config.mutex);
+ crtc_funcs->dpms(crtc, dpms_mode);
+ mutex_unlock(&dev->mode_config.mutex);
+ }
+ }
+}
+
+int radeonfb_blank(int blank, struct fb_info *info)
+{
+ switch (blank) {
+ case FB_BLANK_UNBLANK:
+ radeonfb_on(info);
+ break;
+ case FB_BLANK_NORMAL:
+ radeonfb_off(info, DRM_MODE_DPMS_STANDBY);
+ break;
+ case FB_BLANK_HSYNC_SUSPEND:
+ radeonfb_off(info, DRM_MODE_DPMS_STANDBY);
+ break;
+ case FB_BLANK_VSYNC_SUSPEND:
+ radeonfb_off(info, DRM_MODE_DPMS_SUSPEND);
+ break;
+ case FB_BLANK_POWERDOWN:
+ radeonfb_off(info, DRM_MODE_DPMS_OFF);
+ break;
+ }
+ return 0;
+}
+
+static struct fb_ops radeonfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = radeonfb_check_var,
+ .fb_set_par = radeonfb_set_par,
+ .fb_setcolreg = radeonfb_setcolreg,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_pan_display = radeonfb_pan_display,
+ .fb_blank = radeonfb_blank,
+};
+
+/**
+ * Curretly it is assumed that the old framebuffer is reused.
+ *
+ * LOCKING
+ * caller should hold the mode config lock.
+ *
+ */
+int radeonfb_resize(struct drm_device *dev, struct drm_crtc *crtc)
+{
+ struct fb_info *info;
+ struct drm_framebuffer *fb;
+ struct drm_display_mode *mode = crtc->desired_mode;
+
+ fb = crtc->fb;
+ if (fb == NULL) {
+ return 1;
+ }
+ info = fb->fbdev;
+ if (info == NULL) {
+ return 1;
+ }
+ if (mode == NULL) {
+ return 1;
+ }
+ info->var.xres = mode->hdisplay;
+ info->var.right_margin = mode->hsync_start - mode->hdisplay;
+ info->var.hsync_len = mode->hsync_end - mode->hsync_start;
+ info->var.left_margin = mode->htotal - mode->hsync_end;
+ info->var.yres = mode->vdisplay;
+ info->var.lower_margin = mode->vsync_start - mode->vdisplay;
+ info->var.vsync_len = mode->vsync_end - mode->vsync_start;
+ info->var.upper_margin = mode->vtotal - mode->vsync_end;
+ info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100;
+ /* avoid overflow */
+ info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
+
+ return 0;
+}
+EXPORT_SYMBOL(radeonfb_resize);
+
+static struct drm_mode_set panic_mode;
+
+int radeonfb_panic(struct notifier_block *n, unsigned long ununsed,
+ void *panic_str)
+{
+ DRM_ERROR("panic occurred, switching back to text console\n");
+ drm_crtc_helper_set_config(&panic_mode);
+ return 0;
+}
+EXPORT_SYMBOL(radeonfb_panic);
+
+static struct notifier_block paniced = {
+ .notifier_call = radeonfb_panic,
+};
+
+static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp)
+{
+ int aligned = width;
+ int align_large = (ASIC_IS_AVIVO(rdev));
+ int pitch_mask = 0;
+
+ switch (bpp / 8) {
+ case 1:
+ pitch_mask = align_large ? 255 : 127;
+ break;
+ case 2:
+ pitch_mask = align_large ? 127 : 31;
+ break;
+ case 3:
+ case 4:
+ pitch_mask = align_large ? 63 : 15;
+ break;
+ }
+
+ aligned += pitch_mask;
+ aligned &= ~pitch_mask;
+ return aligned;
+}
+
+int radeonfb_create(struct radeon_device *rdev,
+ uint32_t fb_width, uint32_t fb_height,
+ uint32_t surface_width, uint32_t surface_height,
+ struct radeon_framebuffer **rfb_p)
+{
+ struct fb_info *info;
+ struct radeon_fb_device *rfbdev;
+ struct drm_framebuffer *fb;
+ struct radeon_framebuffer *rfb;
+ struct drm_mode_fb_cmd mode_cmd;
+ struct drm_gem_object *gobj = NULL;
+ struct radeon_object *robj = NULL;
+ struct device *device = &rdev->pdev->dev;
+ int size, aligned_size, ret;
+ void *fbptr = NULL;
+
+ mode_cmd.width = surface_width;
+ mode_cmd.height = surface_height;
+ mode_cmd.bpp = 32;
+ /* need to align pitch with crtc limits */
+ mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp) * ((mode_cmd.bpp + 1) / 8);
+ mode_cmd.depth = 24;
+
+ size = mode_cmd.pitch * mode_cmd.height;
+ aligned_size = ALIGN(size, PAGE_SIZE);
+
+ ret = radeon_gem_object_create(rdev, aligned_size, 0,
+ RADEON_GEM_DOMAIN_VRAM,
+ false, ttm_bo_type_kernel,
+ false, &gobj);
+ if (ret) {
+ printk(KERN_ERR "failed to allocate framebuffer\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ robj = gobj->driver_private;
+
+ mutex_lock(&rdev->ddev->struct_mutex);
+ fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj);
+ if (fb == NULL) {
+ DRM_ERROR("failed to allocate fb.\n");
+ ret = -ENOMEM;
+ goto out_unref;
+ }
+
+ list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list);
+
+ rfb = to_radeon_framebuffer(fb);
+ *rfb_p = rfb;
+ rdev->fbdev_rfb = rfb;
+
+ info = framebuffer_alloc(sizeof(struct radeon_fb_device), device);
+ if (info == NULL) {
+ ret = -ENOMEM;
+ goto out_unref;
+ }
+ rfbdev = info->par;
+
+ ret = radeon_object_kmap(robj, &fbptr);
+ if (ret) {
+ goto out_unref;
+ }
+
+ strcpy(info->fix.id, "radeondrmfb");
+ info->fix.type = FB_TYPE_PACKED_PIXELS;
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+ info->fix.type_aux = 0;
+ info->fix.xpanstep = 1; /* doing it in hw */
+ info->fix.ypanstep = 1; /* doing it in hw */
+ info->fix.ywrapstep = 0;
+ info->fix.accel = FB_ACCEL_I830;
+ info->fix.type_aux = 0;
+ info->flags = FBINFO_DEFAULT;
+ info->fbops = &radeonfb_ops;
+ info->fix.line_length = fb->pitch;
+ info->screen_base = fbptr;
+ info->fix.smem_start = (unsigned long)fbptr;
+ info->fix.smem_len = size;
+ info->screen_base = fbptr;
+ info->screen_size = size;
+ info->pseudo_palette = fb->pseudo_palette;
+ info->var.xres_virtual = fb->width;
+ info->var.yres_virtual = fb->height;
+ info->var.bits_per_pixel = fb->bits_per_pixel;
+ info->var.xoffset = 0;
+ info->var.yoffset = 0;
+ info->var.activate = FB_ACTIVATE_NOW;
+ info->var.height = -1;
+ info->var.width = -1;
+ info->var.xres = fb_width;
+ info->var.yres = fb_height;
+ info->fix.mmio_start = pci_resource_start(rdev->pdev, 2);
+ info->fix.mmio_len = pci_resource_len(rdev->pdev, 2);
+ info->pixmap.size = 64*1024;
+ info->pixmap.buf_align = 8;
+ info->pixmap.access_align = 32;
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+ info->pixmap.scan_align = 1;
+ if (info->screen_base == NULL) {
+ ret = -ENOSPC;
+ goto out_unref;
+ }
+ DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
+ DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base);
+ DRM_INFO("size %lu\n", (unsigned long)size);
+ DRM_INFO("fb depth is %d\n", fb->depth);
+ DRM_INFO(" pitch is %d\n", fb->pitch);
+
+ switch (fb->depth) {
+ case 8:
+ info->var.red.offset = 0;
+ info->var.green.offset = 0;
+ info->var.blue.offset = 0;
+ info->var.red.length = 8; /* 8bit DAC */
+ info->var.green.length = 8;
+ info->var.blue.length = 8;
+ info->var.transp.offset = 0;
+ info->var.transp.length = 0;
+ break;
+ case 15:
+ info->var.red.offset = 10;
+ info->var.green.offset = 5;
+ info->var.blue.offset = 0;
+ info->var.red.length = 5;
+ info->var.green.length = 5;
+ info->var.blue.length = 5;
+ info->var.transp.offset = 15;
+ info->var.transp.length = 1;
+ break;
+ case 16:
+ info->var.red.offset = 11;
+ info->var.green.offset = 5;
+ info->var.blue.offset = 0;
+ info->var.red.length = 5;
+ info->var.green.length = 6;
+ info->var.blue.length = 5;
+ info->var.transp.offset = 0;
+ break;
+ case 24:
+ info->var.red.offset = 16;
+ info->var.green.offset = 8;
+ info->var.blue.offset = 0;
+ info->var.red.length = 8;
+ info->var.green.length = 8;
+ info->var.blue.length = 8;
+ info->var.transp.offset = 0;
+ info->var.transp.length = 0;
+ break;
+ case 32:
+ info->var.red.offset = 16;
+ info->var.green.offset = 8;
+ info->var.blue.offset = 0;
+ info->var.red.length = 8;
+ info->var.green.length = 8;
+ info->var.blue.length = 8;
+ info->var.transp.offset = 24;
+ info->var.transp.length = 8;
+ break;
+ default:
+ break;
+ }
+
+ fb->fbdev = info;
+ rfbdev->rfb = rfb;
+ rfbdev->rdev = rdev;
+
+ mutex_unlock(&rdev->ddev->struct_mutex);
+ return 0;
+
+out_unref:
+ if (robj) {
+ radeon_object_kunmap(robj);
+ }
+ if (ret) {
+ list_del(&fb->filp_head);
+ drm_gem_object_unreference(gobj);
+ drm_framebuffer_cleanup(fb);
+ kfree(fb);
+ }
+ drm_gem_object_unreference(gobj);
+ mutex_unlock(&rdev->ddev->struct_mutex);
+out:
+ return ret;
+}
+
+static int radeonfb_single_fb_probe(struct radeon_device *rdev)
+{
+ struct drm_crtc *crtc;
+ struct drm_connector *connector;
+ unsigned int fb_width = (unsigned)-1, fb_height = (unsigned)-1;
+ unsigned int surface_width = 0, surface_height = 0;
+ int new_fb = 0;
+ int crtc_count = 0;
+ int ret, i, conn_count = 0;
+ struct radeon_framebuffer *rfb;
+ struct fb_info *info;
+ struct radeon_fb_device *rfbdev;
+ struct drm_mode_set *modeset = NULL;
+
+ /* first up get a count of crtcs now in use and new min/maxes width/heights */
+ list_for_each_entry(crtc, &rdev->ddev->mode_config.crtc_list, head) {
+ if (drm_helper_crtc_in_use(crtc)) {
+ if (crtc->desired_mode) {
+ if (crtc->desired_mode->hdisplay < fb_width)
+ fb_width = crtc->desired_mode->hdisplay;
+
+ if (crtc->desired_mode->vdisplay < fb_height)
+ fb_height = crtc->desired_mode->vdisplay;
+
+ if (crtc->desired_mode->hdisplay > surface_width)
+ surface_width = crtc->desired_mode->hdisplay;
+
+ if (crtc->desired_mode->vdisplay > surface_height)
+ surface_height = crtc->desired_mode->vdisplay;
+ }
+ crtc_count++;
+ }
+ }
+
+ if (crtc_count == 0 || fb_width == -1 || fb_height == -1) {
+ /* hmm everyone went away - assume VGA cable just fell out
+ and will come back later. */
+ return 0;
+ }
+
+ /* do we have an fb already? */
+ if (list_empty(&rdev->ddev->mode_config.fb_kernel_list)) {
+ /* create an fb if we don't have one */
+ ret = radeonfb_create(rdev, fb_width, fb_height, surface_width, surface_height, &rfb);
+ if (ret) {
+ return -EINVAL;
+ }
+ new_fb = 1;
+ } else {
+ struct drm_framebuffer *fb;
+ fb = list_first_entry(&rdev->ddev->mode_config.fb_kernel_list, struct drm_framebuffer, filp_head);
+ rfb = to_radeon_framebuffer(fb);
+
+ /* if someone hotplugs something bigger than we have already allocated, we are pwned.
+ As really we can't resize an fbdev that is in the wild currently due to fbdev
+ not really being designed for the lower layers moving stuff around under it.
+ - so in the grand style of things - punt. */
+ if ((fb->width < surface_width) || (fb->height < surface_height)) {
+ DRM_ERROR("Framebuffer not large enough to scale console onto.\n");
+ return -EINVAL;
+ }
+ }
+
+ info = rfb->base.fbdev;
+ rdev->fbdev_info = info;
+ rfbdev = info->par;
+
+ crtc_count = 0;
+ /* okay we need to setup new connector sets in the crtcs */
+ list_for_each_entry(crtc, &rdev->ddev->mode_config.crtc_list, head) {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ modeset = &radeon_crtc->mode_set;
+ modeset->fb = &rfb->base;
+ conn_count = 0;
+ list_for_each_entry(connector, &rdev->ddev->mode_config.connector_list, head) {
+ if (connector->encoder)
+ if (connector->encoder->crtc == modeset->crtc) {
+ modeset->connectors[conn_count] = connector;
+ conn_count++;
+ if (conn_count > RADEONFB_CONN_LIMIT)
+ BUG();
+ }
+ }
+
+ for (i = conn_count; i < RADEONFB_CONN_LIMIT; i++)
+ modeset->connectors[i] = NULL;
+
+
+ rfbdev->crtc_ids[crtc_count++] = crtc->base.id;
+
+ modeset->num_connectors = conn_count;
+ if (modeset->crtc->desired_mode) {
+ if (modeset->mode) {
+ drm_mode_destroy(rdev->ddev, modeset->mode);
+ }
+ modeset->mode = drm_mode_duplicate(rdev->ddev,
+ modeset->crtc->desired_mode);
+ }
+ }
+ rfbdev->crtc_count = crtc_count;
+
+ if (new_fb) {
+ info->var.pixclock = -1;
+ if (register_framebuffer(info) < 0)
+ return -EINVAL;
+ } else {
+ radeonfb_set_par(info);
+ }
+ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
+ info->fix.id);
+
+ /* Switch back to kernel console on panic */
+ panic_mode = *modeset;
+ atomic_notifier_chain_register(&panic_notifier_list, &paniced);
+ printk(KERN_INFO "registered panic notifier\n");
+
+ return 0;
+}
+
+int radeonfb_probe(struct drm_device *dev)
+{
+ int ret;
+
+ /* something has changed in the lower levels of hell - deal with it
+ here */
+
+ /* two modes : a) 1 fb to rule all crtcs.
+ b) one fb per crtc.
+ two actions 1) new connected device
+ 2) device removed.
+ case a/1 : if the fb surface isn't big enough - resize the surface fb.
+ if the fb size isn't big enough - resize fb into surface.
+ if everything big enough configure the new crtc/etc.
+ case a/2 : undo the configuration
+ possibly resize down the fb to fit the new configuration.
+ case b/1 : see if it is on a new crtc - setup a new fb and add it.
+ case b/2 : teardown the new fb.
+ */
+ ret = radeonfb_single_fb_probe(dev->dev_private);
+ return ret;
+}
+EXPORT_SYMBOL(radeonfb_probe);
+
+int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
+{
+ struct fb_info *info;
+ struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb);
+ struct radeon_object *robj;
+
+ if (!fb) {
+ return -EINVAL;
+ }
+ info = fb->fbdev;
+ if (info) {
+ robj = rfb->obj->driver_private;
+ unregister_framebuffer(info);
+ radeon_object_kunmap(robj);
+ framebuffer_release(info);
+ }
+
+ printk(KERN_INFO "unregistered panic notifier\n");
+ atomic_notifier_chain_unregister(&panic_notifier_list, &paniced);
+ memset(&panic_mode, 0, sizeof(struct drm_mode_set));
+ return 0;
+}
+EXPORT_SYMBOL(radeonfb_remove);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
new file mode 100644
index 00000000000..96afbf5ae2a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -0,0 +1,387 @@
+/*
+ * Copyright 2009 Jerome Glisse.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ * Jerome Glisse <glisse@freedesktop.org>
+ * Dave Airlie
+ */
+#include <linux/seq_file.h>
+#include <asm/atomic.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/kref.h>
+#include "drmP.h"
+#include "drm.h"
+#include "radeon_reg.h"
+#include "radeon.h"
+
+int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
+{
+ unsigned long irq_flags;
+
+ write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+ if (fence->emited) {
+ write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ return 0;
+ }
+ fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
+ if (!rdev->cp.ready) {
+ /* FIXME: cp is not running assume everythings is done right
+ * away
+ */
+ WREG32(rdev->fence_drv.scratch_reg, fence->seq);
+ } else {
+ radeon_fence_ring_emit(rdev, fence);
+ }
+ fence->emited = true;
+ fence->timeout = jiffies + ((2000 * HZ) / 1000);
+ list_del(&fence->list);
+ list_add_tail(&fence->list, &rdev->fence_drv.emited);
+ write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ return 0;
+}
+
+static bool radeon_fence_poll_locked(struct radeon_device *rdev)
+{
+ struct radeon_fence *fence;
+ struct list_head *i, *n;
+ uint32_t seq;
+ bool wake = false;
+
+ if (rdev == NULL) {
+ return true;
+ }
+ if (rdev->shutdown) {
+ return true;
+ }
+ seq = RREG32(rdev->fence_drv.scratch_reg);
+ rdev->fence_drv.last_seq = seq;
+ n = NULL;
+ list_for_each(i, &rdev->fence_drv.emited) {
+ fence = list_entry(i, struct radeon_fence, list);
+ if (fence->seq == seq) {
+ n = i;
+ break;
+ }
+ }
+ /* all fence previous to this one are considered as signaled */
+ if (n) {
+ i = n;
+ do {
+ n = i->prev;
+ list_del(i);
+ list_add_tail(i, &rdev->fence_drv.signaled);
+ fence = list_entry(i, struct radeon_fence, list);
+ fence->signaled = true;
+ i = n;
+ } while (i != &rdev->fence_drv.emited);
+ wake = true;
+ }
+ return wake;
+}
+
+static void radeon_fence_destroy(struct kref *kref)
+{
+ unsigned long irq_flags;
+ struct radeon_fence *fence;
+
+ fence = container_of(kref, struct radeon_fence, kref);
+ write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
+ list_del(&fence->list);
+ fence->emited = false;
+ write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
+ kfree(fence);
+}
+
+int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
+{
+ unsigned long irq_flags;
+
+ *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
+ if ((*fence) == NULL) {
+ return -ENOMEM;
+ }
+ kref_init(&((*fence)->kref));
+ (*fence)->rdev = rdev;
+ (*fence)->emited = false;
+ (*fence)->signaled = false;
+ (*fence)->seq = 0;
+ INIT_LIST_HEAD(&(*fence)->list);
+
+ write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+ list_add_tail(&(*fence)->list, &rdev->fence_drv.created);
+ write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ return 0;
+}
+
+
+bool radeon_fence_signaled(struct radeon_fence *fence)
+{
+ struct radeon_device *rdev = fence->rdev;
+ unsigned long irq_flags;
+ bool signaled = false;
+
+ if (rdev->gpu_lockup) {
+ return true;
+ }
+ if (fence == NULL) {
+ return true;
+ }
+ write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
+ signaled = fence->signaled;
+ /* if we are shuting down report all fence as signaled */
+ if (fence->rdev->shutdown) {
+ signaled = true;
+ }
+ if (!fence->emited) {
+ WARN(1, "Querying an unemited fence : %p !\n", fence);
+ signaled = true;
+ }
+ if (!signaled) {
+ radeon_fence_poll_locked(fence->rdev);
+ signaled = fence->signaled;
+ }
+ write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
+ return signaled;
+}
+
+int radeon_fence_wait(struct radeon_fence *fence, bool interruptible)
+{
+ struct radeon_device *rdev;
+ unsigned long cur_jiffies;
+ unsigned long timeout;
+ bool expired = false;
+ int r;
+
+
+ if (fence == NULL) {
+ WARN(1, "Querying an invalid fence : %p !\n", fence);
+ return 0;
+ }
+ rdev = fence->rdev;
+ if (radeon_fence_signaled(fence)) {
+ return 0;
+ }
+retry:
+ cur_jiffies = jiffies;
+ timeout = HZ / 100;
+ if (time_after(fence->timeout, cur_jiffies)) {
+ timeout = fence->timeout - cur_jiffies;
+ }
+ if (interruptible) {
+ r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
+ radeon_fence_signaled(fence), timeout);
+ if (unlikely(r == -ERESTARTSYS)) {
+ return -ERESTART;
+ }
+ } else {
+ r = wait_event_timeout(rdev->fence_drv.queue,
+ radeon_fence_signaled(fence), timeout);
+ }
+ if (unlikely(!radeon_fence_signaled(fence))) {
+ if (unlikely(r == 0)) {
+ expired = true;
+ }
+ if (unlikely(expired)) {
+ timeout = 1;
+ if (time_after(cur_jiffies, fence->timeout)) {
+ timeout = cur_jiffies - fence->timeout;
+ }
+ timeout = jiffies_to_msecs(timeout);
+ if (timeout > 500) {
+ DRM_ERROR("fence(%p:0x%08X) %lums timeout "
+ "going to reset GPU\n",
+ fence, fence->seq, timeout);
+ radeon_gpu_reset(rdev);
+ WREG32(rdev->fence_drv.scratch_reg, fence->seq);
+ }
+ }
+ goto retry;
+ }
+ if (unlikely(expired)) {
+ rdev->fence_drv.count_timeout++;
+ cur_jiffies = jiffies;
+ timeout = 1;
+ if (time_after(cur_jiffies, fence->timeout)) {
+ timeout = cur_jiffies - fence->timeout;
+ }
+ timeout = jiffies_to_msecs(timeout);
+ DRM_ERROR("fence(%p:0x%08X) %lums timeout\n",
+ fence, fence->seq, timeout);
+ DRM_ERROR("last signaled fence(0x%08X)\n",
+ rdev->fence_drv.last_seq);
+ }
+ return 0;
+}
+
+int radeon_fence_wait_next(struct radeon_device *rdev)
+{
+ unsigned long irq_flags;
+ struct radeon_fence *fence;
+ int r;
+
+ if (rdev->gpu_lockup) {
+ return 0;
+ }
+ write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+ if (list_empty(&rdev->fence_drv.emited)) {
+ write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ return 0;
+ }
+ fence = list_entry(rdev->fence_drv.emited.next,
+ struct radeon_fence, list);
+ radeon_fence_ref(fence);
+ write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ r = radeon_fence_wait(fence, false);
+ radeon_fence_unref(&fence);
+ return r;
+}
+
+int radeon_fence_wait_last(struct radeon_device *rdev)
+{
+ unsigned long irq_flags;
+ struct radeon_fence *fence;
+ int r;
+
+ if (rdev->gpu_lockup) {
+ return 0;
+ }
+ write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+ if (list_empty(&rdev->fence_drv.emited)) {
+ write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ return 0;
+ }
+ fence = list_entry(rdev->fence_drv.emited.prev,
+ struct radeon_fence, list);
+ radeon_fence_ref(fence);
+ write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ r = radeon_fence_wait(fence, false);
+ radeon_fence_unref(&fence);
+ return r;
+}
+
+struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
+{
+ kref_get(&fence->kref);
+ return fence;
+}
+
+void radeon_fence_unref(struct radeon_fence **fence)
+{
+ struct radeon_fence *tmp = *fence;
+
+ *fence = NULL;
+ if (tmp) {
+ kref_put(&tmp->kref, &radeon_fence_destroy);
+ }
+}
+
+void radeon_fence_process(struct radeon_device *rdev)
+{
+ unsigned long irq_flags;
+ bool wake;
+
+ write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+ wake = radeon_fence_poll_locked(rdev);
+ write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ if (wake) {
+ wake_up_all(&rdev->fence_drv.queue);
+ }
+}
+
+int radeon_fence_driver_init(struct radeon_device *rdev)
+{
+ unsigned long irq_flags;
+ int r;
+
+ write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+ r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
+ if (r) {
+ DRM_ERROR("Fence failed to get a scratch register.");
+ write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ return r;
+ }
+ WREG32(rdev->fence_drv.scratch_reg, 0);
+ atomic_set(&rdev->fence_drv.seq, 0);
+ INIT_LIST_HEAD(&rdev->fence_drv.created);
+ INIT_LIST_HEAD(&rdev->fence_drv.emited);
+ INIT_LIST_HEAD(&rdev->fence_drv.signaled);
+ rdev->fence_drv.count_timeout = 0;
+ init_waitqueue_head(&rdev->fence_drv.queue);
+ write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ if (radeon_debugfs_fence_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for fence !\n");
+ }
+ return 0;
+}
+
+void radeon_fence_driver_fini(struct radeon_device *rdev)
+{
+ unsigned long irq_flags;
+
+ wake_up_all(&rdev->fence_drv.queue);
+ write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+ radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg);
+ write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ DRM_INFO("radeon: fence finalized\n");
+}
+
+
+/*
+ * Fence debugfs
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_fence *fence;
+
+ seq_printf(m, "Last signaled fence 0x%08X\n",
+ RREG32(rdev->fence_drv.scratch_reg));
+ if (!list_empty(&rdev->fence_drv.emited)) {
+ fence = list_entry(rdev->fence_drv.emited.prev,
+ struct radeon_fence, list);
+ seq_printf(m, "Last emited fence %p with 0x%08X\n",
+ fence, fence->seq);
+ }
+ return 0;
+}
+
+static struct drm_info_list radeon_debugfs_fence_list[] = {
+ {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
+};
+#endif
+
+int radeon_debugfs_fence_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
+#else
+ return 0;
+#endif
+}
diff --git a/drivers/gpu/drm/radeon/radeon_fixed.h b/drivers/gpu/drm/radeon/radeon_fixed.h
new file mode 100644
index 00000000000..90187d17384
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_fixed.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ */
+#ifndef RADEON_FIXED_H
+#define RADEON_FIXED_H
+
+typedef union rfixed {
+ u32 full;
+} fixed20_12;
+
+
+#define rfixed_const(A) (u32)(((A) << 12))/* + ((B + 0.000122)*4096)) */
+#define rfixed_const_half(A) (u32)(((A) << 12) + 2048)
+#define rfixed_const_666(A) (u32)(((A) << 12) + 2731)
+#define rfixed_const_8(A) (u32)(((A) << 12) + 3277)
+#define rfixed_mul(A, B) ((u64)((u64)(A).full * (B).full + 2048) >> 12)
+#define fixed_init(A) { .full = rfixed_const((A)) }
+#define fixed_init_half(A) { .full = rfixed_const_half((A)) }
+#define rfixed_trunc(A) ((A).full >> 12)
+
+static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B)
+{
+ u64 tmp = ((u64)A.full << 13);
+
+ do_div(tmp, B.full);
+ tmp += 1;
+ tmp /= 2;
+ return lower_32_bits(tmp);
+}
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
new file mode 100644
index 00000000000..d343a15316e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -0,0 +1,233 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+#include "radeon_reg.h"
+
+/*
+ * Common GART table functions.
+ */
+int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
+{
+ void *ptr;
+
+ ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size,
+ &rdev->gart.table_addr);
+ if (ptr == NULL) {
+ return -ENOMEM;
+ }
+#ifdef CONFIG_X86
+ if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
+ rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
+ set_memory_uc((unsigned long)ptr,
+ rdev->gart.table_size >> PAGE_SHIFT);
+ }
+#endif
+ rdev->gart.table.ram.ptr = ptr;
+ memset((void *)rdev->gart.table.ram.ptr, 0, rdev->gart.table_size);
+ return 0;
+}
+
+void radeon_gart_table_ram_free(struct radeon_device *rdev)
+{
+ if (rdev->gart.table.ram.ptr == NULL) {
+ return;
+ }
+#ifdef CONFIG_X86
+ if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
+ rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
+ set_memory_wb((unsigned long)rdev->gart.table.ram.ptr,
+ rdev->gart.table_size >> PAGE_SHIFT);
+ }
+#endif
+ pci_free_consistent(rdev->pdev, rdev->gart.table_size,
+ (void *)rdev->gart.table.ram.ptr,
+ rdev->gart.table_addr);
+ rdev->gart.table.ram.ptr = NULL;
+ rdev->gart.table_addr = 0;
+}
+
+int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
+{
+ uint64_t gpu_addr;
+ int r;
+
+ if (rdev->gart.table.vram.robj == NULL) {
+ r = radeon_object_create(rdev, NULL,
+ rdev->gart.table_size,
+ true,
+ RADEON_GEM_DOMAIN_VRAM,
+ false, &rdev->gart.table.vram.robj);
+ if (r) {
+ return r;
+ }
+ }
+ r = radeon_object_pin(rdev->gart.table.vram.robj,
+ RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
+ if (r) {
+ radeon_object_unref(&rdev->gart.table.vram.robj);
+ return r;
+ }
+ r = radeon_object_kmap(rdev->gart.table.vram.robj,
+ (void **)&rdev->gart.table.vram.ptr);
+ if (r) {
+ radeon_object_unpin(rdev->gart.table.vram.robj);
+ radeon_object_unref(&rdev->gart.table.vram.robj);
+ DRM_ERROR("radeon: failed to map gart vram table.\n");
+ return r;
+ }
+ rdev->gart.table_addr = gpu_addr;
+ return 0;
+}
+
+void radeon_gart_table_vram_free(struct radeon_device *rdev)
+{
+ if (rdev->gart.table.vram.robj == NULL) {
+ return;
+ }
+ radeon_object_kunmap(rdev->gart.table.vram.robj);
+ radeon_object_unpin(rdev->gart.table.vram.robj);
+ radeon_object_unref(&rdev->gart.table.vram.robj);
+}
+
+
+
+
+/*
+ * Common gart functions.
+ */
+void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
+ int pages)
+{
+ unsigned t;
+ unsigned p;
+ int i, j;
+
+ if (!rdev->gart.ready) {
+ WARN(1, "trying to unbind memory to unitialized GART !\n");
+ return;
+ }
+ t = offset / 4096;
+ p = t / (PAGE_SIZE / 4096);
+ for (i = 0; i < pages; i++, p++) {
+ if (rdev->gart.pages[p]) {
+ pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ rdev->gart.pages[p] = NULL;
+ rdev->gart.pages_addr[p] = 0;
+ for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) {
+ radeon_gart_set_page(rdev, t, 0);
+ }
+ }
+ }
+ mb();
+ radeon_gart_tlb_flush(rdev);
+}
+
+int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
+ int pages, struct page **pagelist)
+{
+ unsigned t;
+ unsigned p;
+ uint64_t page_base;
+ int i, j;
+
+ if (!rdev->gart.ready) {
+ DRM_ERROR("trying to bind memory to unitialized GART !\n");
+ return -EINVAL;
+ }
+ t = offset / 4096;
+ p = t / (PAGE_SIZE / 4096);
+
+ for (i = 0; i < pages; i++, p++) {
+ /* we need to support large memory configurations */
+ /* assume that unbind have already been call on the range */
+ rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i],
+ 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) {
+ /* FIXME: failed to map page (return -ENOMEM?) */
+ radeon_gart_unbind(rdev, offset, pages);
+ return -ENOMEM;
+ }
+ rdev->gart.pages[p] = pagelist[i];
+ page_base = (uint32_t)rdev->gart.pages_addr[p];
+ for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) {
+ radeon_gart_set_page(rdev, t, page_base);
+ page_base += 4096;
+ }
+ }
+ mb();
+ radeon_gart_tlb_flush(rdev);
+ return 0;
+}
+
+int radeon_gart_init(struct radeon_device *rdev)
+{
+ if (rdev->gart.pages) {
+ return 0;
+ }
+ /* We need PAGE_SIZE >= 4096 */
+ if (PAGE_SIZE < 4096) {
+ DRM_ERROR("Page size is smaller than GPU page size!\n");
+ return -EINVAL;
+ }
+ /* Compute table size */
+ rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
+ rdev->gart.num_gpu_pages = rdev->mc.gtt_size / 4096;
+ DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
+ rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
+ /* Allocate pages table */
+ rdev->gart.pages = kzalloc(sizeof(void *) * rdev->gart.num_cpu_pages,
+ GFP_KERNEL);
+ if (rdev->gart.pages == NULL) {
+ radeon_gart_fini(rdev);
+ return -ENOMEM;
+ }
+ rdev->gart.pages_addr = kzalloc(sizeof(dma_addr_t) *
+ rdev->gart.num_cpu_pages, GFP_KERNEL);
+ if (rdev->gart.pages_addr == NULL) {
+ radeon_gart_fini(rdev);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void radeon_gart_fini(struct radeon_device *rdev)
+{
+ if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
+ /* unbind pages */
+ radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
+ }
+ rdev->gart.ready = false;
+ kfree(rdev->gart.pages);
+ kfree(rdev->gart.pages_addr);
+ rdev->gart.pages = NULL;
+ rdev->gart.pages_addr = NULL;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
new file mode 100644
index 00000000000..eb516034235
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -0,0 +1,287 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+
+int radeon_gem_object_init(struct drm_gem_object *obj)
+{
+ /* we do nothings here */
+ return 0;
+}
+
+void radeon_gem_object_free(struct drm_gem_object *gobj)
+{
+ struct radeon_object *robj = gobj->driver_private;
+
+ gobj->driver_private = NULL;
+ if (robj) {
+ radeon_object_unref(&robj);
+ }
+}
+
+int radeon_gem_object_create(struct radeon_device *rdev, int size,
+ int alignment, int initial_domain,
+ bool discardable, bool kernel,
+ bool interruptible,
+ struct drm_gem_object **obj)
+{
+ struct drm_gem_object *gobj;
+ struct radeon_object *robj;
+ int r;
+
+ *obj = NULL;
+ gobj = drm_gem_object_alloc(rdev->ddev, size);
+ if (!gobj) {
+ return -ENOMEM;
+ }
+ /* At least align on page size */
+ if (alignment < PAGE_SIZE) {
+ alignment = PAGE_SIZE;
+ }
+ r = radeon_object_create(rdev, gobj, size, kernel, initial_domain,
+ interruptible, &robj);
+ if (r) {
+ DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n",
+ size, initial_domain, alignment);
+ mutex_lock(&rdev->ddev->struct_mutex);
+ drm_gem_object_unreference(gobj);
+ mutex_unlock(&rdev->ddev->struct_mutex);
+ return r;
+ }
+ gobj->driver_private = robj;
+ *obj = gobj;
+ return 0;
+}
+
+int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
+ uint64_t *gpu_addr)
+{
+ struct radeon_object *robj = obj->driver_private;
+ uint32_t flags;
+
+ switch (pin_domain) {
+ case RADEON_GEM_DOMAIN_VRAM:
+ flags = TTM_PL_FLAG_VRAM;
+ break;
+ case RADEON_GEM_DOMAIN_GTT:
+ flags = TTM_PL_FLAG_TT;
+ break;
+ default:
+ flags = TTM_PL_FLAG_SYSTEM;
+ break;
+ }
+ return radeon_object_pin(robj, flags, gpu_addr);
+}
+
+void radeon_gem_object_unpin(struct drm_gem_object *obj)
+{
+ struct radeon_object *robj = obj->driver_private;
+ radeon_object_unpin(robj);
+}
+
+int radeon_gem_set_domain(struct drm_gem_object *gobj,
+ uint32_t rdomain, uint32_t wdomain)
+{
+ struct radeon_object *robj;
+ uint32_t domain;
+ int r;
+
+ /* FIXME: reeimplement */
+ robj = gobj->driver_private;
+ /* work out where to validate the buffer to */
+ domain = wdomain;
+ if (!domain) {
+ domain = rdomain;
+ }
+ if (!domain) {
+ /* Do nothings */
+ printk(KERN_WARNING "Set domain withou domain !\n");
+ return 0;
+ }
+ if (domain == RADEON_GEM_DOMAIN_CPU) {
+ /* Asking for cpu access wait for object idle */
+ r = radeon_object_wait(robj);
+ if (r) {
+ printk(KERN_ERR "Failed to wait for object !\n");
+ return r;
+ }
+ }
+ return 0;
+}
+
+int radeon_gem_init(struct radeon_device *rdev)
+{
+ INIT_LIST_HEAD(&rdev->gem.objects);
+ return 0;
+}
+
+void radeon_gem_fini(struct radeon_device *rdev)
+{
+ radeon_object_force_delete(rdev);
+}
+
+
+/*
+ * GEM ioctls.
+ */
+int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_radeon_gem_info *args = data;
+
+ args->vram_size = rdev->mc.vram_size;
+ /* FIXME: report somethings that makes sense */
+ args->vram_visible = rdev->mc.vram_size - (4 * 1024 * 1024);
+ args->gart_size = rdev->mc.gtt_size;
+ return 0;
+}
+
+int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ /* TODO: implement */
+ DRM_ERROR("unimplemented %s\n", __func__);
+ return -ENOSYS;
+}
+
+int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ /* TODO: implement */
+ DRM_ERROR("unimplemented %s\n", __func__);
+ return -ENOSYS;
+}
+
+int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_radeon_gem_create *args = data;
+ struct drm_gem_object *gobj;
+ uint32_t handle;
+ int r;
+
+ /* create a gem object to contain this object in */
+ args->size = roundup(args->size, PAGE_SIZE);
+ r = radeon_gem_object_create(rdev, args->size, args->alignment,
+ args->initial_domain, false,
+ false, true, &gobj);
+ if (r) {
+ return r;
+ }
+ r = drm_gem_handle_create(filp, gobj, &handle);
+ if (r) {
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(gobj);
+ mutex_unlock(&dev->struct_mutex);
+ return r;
+ }
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_handle_unreference(gobj);
+ mutex_unlock(&dev->struct_mutex);
+ args->handle = handle;
+ return 0;
+}
+
+int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ /* transition the BO to a domain -
+ * just validate the BO into a certain domain */
+ struct drm_radeon_gem_set_domain *args = data;
+ struct drm_gem_object *gobj;
+ struct radeon_object *robj;
+ int r;
+
+ /* for now if someone requests domain CPU -
+ * just make sure the buffer is finished with */
+
+ /* just do a BO wait for now */
+ gobj = drm_gem_object_lookup(dev, filp, args->handle);
+ if (gobj == NULL) {
+ return -EINVAL;
+ }
+ robj = gobj->driver_private;
+
+ r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
+
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(gobj);
+ mutex_unlock(&dev->struct_mutex);
+ return r;
+}
+
+int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct drm_radeon_gem_mmap *args = data;
+ struct drm_gem_object *gobj;
+ struct radeon_object *robj;
+ int r;
+
+ gobj = drm_gem_object_lookup(dev, filp, args->handle);
+ if (gobj == NULL) {
+ return -EINVAL;
+ }
+ robj = gobj->driver_private;
+ r = radeon_object_mmap(robj, &args->addr_ptr);
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(gobj);
+ mutex_unlock(&dev->struct_mutex);
+ return r;
+}
+
+int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ /* FIXME: implement */
+ return 0;
+}
+
+int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct drm_radeon_gem_wait_idle *args = data;
+ struct drm_gem_object *gobj;
+ struct radeon_object *robj;
+ int r;
+
+ gobj = drm_gem_object_lookup(dev, filp, args->handle);
+ if (gobj == NULL) {
+ return -EINVAL;
+ }
+ robj = gobj->driver_private;
+ r = radeon_object_wait(robj);
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(gobj);
+ mutex_unlock(&dev->struct_mutex);
+ return r;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
new file mode 100644
index 00000000000..71465ed2688
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -0,0 +1,209 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ */
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+
+/**
+ * radeon_ddc_probe
+ *
+ */
+bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
+{
+ u8 out_buf[] = { 0x0, 0x0};
+ u8 buf[2];
+ int ret;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = 0x50,
+ .flags = 0,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .addr = 0x50,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = buf,
+ }
+ };
+
+ ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
+ if (ret == 2)
+ return true;
+
+ return false;
+}
+
+
+void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state)
+{
+ struct radeon_device *rdev = radeon_connector->base.dev->dev_private;
+ uint32_t temp;
+ struct radeon_i2c_bus_rec *rec = &radeon_connector->ddc_bus->rec;
+
+ /* RV410 appears to have a bug where the hw i2c in reset
+ * holds the i2c port in a bad state - switch hw i2c away before
+ * doing DDC - do this for all r200s/r300s/r400s for safety sake
+ */
+ if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) {
+ if (rec->a_clk_reg == RADEON_GPIO_MONID) {
+ WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
+ R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1)));
+ } else {
+ WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
+ R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3)));
+ }
+ }
+ if (lock_state) {
+ temp = RREG32(rec->a_clk_reg);
+ temp &= ~(rec->a_clk_mask);
+ WREG32(rec->a_clk_reg, temp);
+
+ temp = RREG32(rec->a_data_reg);
+ temp &= ~(rec->a_data_mask);
+ WREG32(rec->a_data_reg, temp);
+ }
+
+ temp = RREG32(rec->mask_clk_reg);
+ if (lock_state)
+ temp |= rec->mask_clk_mask;
+ else
+ temp &= ~rec->mask_clk_mask;
+ WREG32(rec->mask_clk_reg, temp);
+ temp = RREG32(rec->mask_clk_reg);
+
+ temp = RREG32(rec->mask_data_reg);
+ if (lock_state)
+ temp |= rec->mask_data_mask;
+ else
+ temp &= ~rec->mask_data_mask;
+ WREG32(rec->mask_data_reg, temp);
+ temp = RREG32(rec->mask_data_reg);
+}
+
+static int get_clock(void *i2c_priv)
+{
+ struct radeon_i2c_chan *i2c = i2c_priv;
+ struct radeon_device *rdev = i2c->dev->dev_private;
+ struct radeon_i2c_bus_rec *rec = &i2c->rec;
+ uint32_t val;
+
+ val = RREG32(rec->get_clk_reg);
+ val &= rec->get_clk_mask;
+
+ return (val != 0);
+}
+
+
+static int get_data(void *i2c_priv)
+{
+ struct radeon_i2c_chan *i2c = i2c_priv;
+ struct radeon_device *rdev = i2c->dev->dev_private;
+ struct radeon_i2c_bus_rec *rec = &i2c->rec;
+ uint32_t val;
+
+ val = RREG32(rec->get_data_reg);
+ val &= rec->get_data_mask;
+ return (val != 0);
+}
+
+static void set_clock(void *i2c_priv, int clock)
+{
+ struct radeon_i2c_chan *i2c = i2c_priv;
+ struct radeon_device *rdev = i2c->dev->dev_private;
+ struct radeon_i2c_bus_rec *rec = &i2c->rec;
+ uint32_t val;
+
+ val = RREG32(rec->put_clk_reg) & (uint32_t)~(rec->put_clk_mask);
+ val |= clock ? 0 : rec->put_clk_mask;
+ WREG32(rec->put_clk_reg, val);
+}
+
+static void set_data(void *i2c_priv, int data)
+{
+ struct radeon_i2c_chan *i2c = i2c_priv;
+ struct radeon_device *rdev = i2c->dev->dev_private;
+ struct radeon_i2c_bus_rec *rec = &i2c->rec;
+ uint32_t val;
+
+ val = RREG32(rec->put_data_reg) & (uint32_t)~(rec->put_data_mask);
+ val |= data ? 0 : rec->put_data_mask;
+ WREG32(rec->put_data_reg, val);
+}
+
+struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
+ struct radeon_i2c_bus_rec *rec,
+ const char *name)
+{
+ struct radeon_i2c_chan *i2c;
+ int ret;
+
+ i2c = drm_calloc(1, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER);
+ if (i2c == NULL)
+ return NULL;
+
+ i2c->adapter.owner = THIS_MODULE;
+ i2c->adapter.algo_data = &i2c->algo;
+ i2c->dev = dev;
+ i2c->algo.setsda = set_data;
+ i2c->algo.setscl = set_clock;
+ i2c->algo.getsda = get_data;
+ i2c->algo.getscl = get_clock;
+ i2c->algo.udelay = 20;
+ /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
+ * make this, 2 jiffies is a lot more reliable */
+ i2c->algo.timeout = 2;
+ i2c->algo.data = i2c;
+ i2c->rec = *rec;
+ i2c_set_adapdata(&i2c->adapter, i2c);
+
+ ret = i2c_bit_add_bus(&i2c->adapter);
+ if (ret) {
+ DRM_INFO("Failed to register i2c %s\n", name);
+ goto out_free;
+ }
+
+ return i2c;
+out_free:
+ drm_free(i2c, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER);
+ return NULL;
+
+}
+
+void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
+{
+ if (!i2c)
+ return;
+
+ i2c_del_adapter(&i2c->adapter);
+ drm_free(i2c, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER);
+}
+
+struct drm_encoder *radeon_best_encoder(struct drm_connector *connector)
+{
+ return NULL;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
new file mode 100644
index 00000000000..491d569deb0
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon_reg.h"
+#include "radeon_microcode.h"
+#include "radeon.h"
+#include "atom.h"
+
+static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
+{
+ uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
+ uint32_t irq_mask = RADEON_SW_INT_TEST;
+
+ if (irqs) {
+ WREG32(RADEON_GEN_INT_STATUS, irqs);
+ }
+ return irqs & irq_mask;
+}
+
+int r100_irq_set(struct radeon_device *rdev)
+{
+ uint32_t tmp = 0;
+
+ if (rdev->irq.sw_int) {
+ tmp |= RADEON_SW_INT_ENABLE;
+ }
+ /* Todo go through CRTC and enable vblank int or not */
+ WREG32(RADEON_GEN_INT_CNTL, tmp);
+ return 0;
+}
+
+int r100_irq_process(struct radeon_device *rdev)
+{
+ uint32_t status;
+
+ status = r100_irq_ack(rdev);
+ if (!status) {
+ return IRQ_NONE;
+ }
+ while (status) {
+ /* SW interrupt */
+ if (status & RADEON_SW_INT_TEST) {
+ radeon_fence_process(rdev);
+ }
+ status = r100_irq_ack(rdev);
+ }
+ return IRQ_HANDLED;
+}
+
+int rs600_irq_set(struct radeon_device *rdev)
+{
+ uint32_t tmp = 0;
+
+ if (rdev->irq.sw_int) {
+ tmp |= RADEON_SW_INT_ENABLE;
+ }
+ WREG32(RADEON_GEN_INT_CNTL, tmp);
+ /* Todo go through CRTC and enable vblank int or not */
+ WREG32(R500_DxMODE_INT_MASK, 0);
+ return 0;
+}
+
+irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
+{
+ struct drm_device *dev = (struct drm_device *) arg;
+ struct radeon_device *rdev = dev->dev_private;
+
+ return radeon_irq_process(rdev);
+}
+
+void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ unsigned i;
+
+ /* Disable *all* interrupts */
+ rdev->irq.sw_int = false;
+ for (i = 0; i < 2; i++) {
+ rdev->irq.crtc_vblank_int[i] = false;
+ }
+ radeon_irq_set(rdev);
+ /* Clear bits */
+ radeon_irq_process(rdev);
+}
+
+int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+
+ dev->max_vblank_count = 0x001fffff;
+ rdev->irq.sw_int = true;
+ radeon_irq_set(rdev);
+ return 0;
+}
+
+void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ unsigned i;
+
+ if (rdev == NULL) {
+ return;
+ }
+ /* Disable *all* interrupts */
+ rdev->irq.sw_int = false;
+ for (i = 0; i < 2; i++) {
+ rdev->irq.crtc_vblank_int[i] = false;
+ }
+ radeon_irq_set(rdev);
+}
+
+int radeon_irq_kms_init(struct radeon_device *rdev)
+{
+ int r = 0;
+
+ r = drm_vblank_init(rdev->ddev, 2);
+ if (r) {
+ return r;
+ }
+ drm_irq_install(rdev->ddev);
+ rdev->irq.installed = true;
+ DRM_INFO("radeon: irq initialized.\n");
+ return 0;
+}
+
+void radeon_irq_kms_fini(struct radeon_device *rdev)
+{
+ if (rdev->irq.installed) {
+ rdev->irq.installed = false;
+ drm_irq_uninstall(rdev->ddev);
+ }
+}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
new file mode 100644
index 00000000000..b0ce44b9f5a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -0,0 +1,295 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include "drmP.h"
+#include "drm_sarea.h"
+#include "radeon.h"
+#include "radeon_drm.h"
+
+
+/*
+ * Driver load/unload
+ */
+int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
+{
+ struct radeon_device *rdev;
+ int r;
+
+ rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
+ if (rdev == NULL) {
+ return -ENOMEM;
+ }
+ dev->dev_private = (void *)rdev;
+
+ /* update BUS flag */
+ if (drm_device_is_agp(dev)) {
+ flags |= RADEON_IS_AGP;
+ } else if (drm_device_is_pcie(dev)) {
+ flags |= RADEON_IS_PCIE;
+ } else {
+ flags |= RADEON_IS_PCI;
+ }
+
+ r = radeon_device_init(rdev, dev, dev->pdev, flags);
+ if (r) {
+ DRM_ERROR("Failed to initialize radeon, disabling IOCTL\n");
+ radeon_device_fini(rdev);
+ return r;
+ }
+ return 0;
+}
+
+int radeon_driver_unload_kms(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+
+ radeon_device_fini(rdev);
+ kfree(rdev);
+ dev->dev_private = NULL;
+ return 0;
+}
+
+
+/*
+ * Userspace get informations ioctl
+ */
+int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_radeon_info *info;
+ uint32_t *value_ptr;
+ uint32_t value;
+
+ info = data;
+ value_ptr = (uint32_t *)((unsigned long)info->value);
+ switch (info->request) {
+ case RADEON_INFO_DEVICE_ID:
+ value = dev->pci_device;
+ break;
+ case RADEON_INFO_NUM_GB_PIPES:
+ value = rdev->num_gb_pipes;
+ break;
+ default:
+ DRM_DEBUG("Invalid request %d\n", info->request);
+ return -EINVAL;
+ }
+ if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) {
+ DRM_ERROR("copy_to_user\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+
+/*
+ * Outdated mess for old drm with Xorg being in charge (void function now).
+ */
+int radeon_driver_firstopen_kms(struct drm_device *dev)
+{
+ return 0;
+}
+
+
+void radeon_driver_lastclose_kms(struct drm_device *dev)
+{
+}
+
+int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
+{
+ return 0;
+}
+
+void radeon_driver_postclose_kms(struct drm_device *dev,
+ struct drm_file *file_priv)
+{
+}
+
+void radeon_driver_preclose_kms(struct drm_device *dev,
+ struct drm_file *file_priv)
+{
+}
+
+
+/*
+ * VBlank related functions.
+ */
+u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
+{
+ /* FIXME: implement */
+ return 0;
+}
+
+int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
+{
+ /* FIXME: implement */
+ return 0;
+}
+
+void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
+{
+ /* FIXME: implement */
+}
+
+
+/*
+ * For multiple master (like multiple X).
+ */
+struct drm_radeon_master_private {
+ drm_local_map_t *sarea;
+ drm_radeon_sarea_t *sarea_priv;
+};
+
+int radeon_master_create_kms(struct drm_device *dev, struct drm_master *master)
+{
+ struct drm_radeon_master_private *master_priv;
+ unsigned long sareapage;
+ int ret;
+
+ master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER);
+ if (master_priv == NULL) {
+ return -ENOMEM;
+ }
+ /* prebuild the SAREA */
+ sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE);
+ ret = drm_addmap(dev, 0, sareapage, _DRM_SHM,
+ _DRM_CONTAINS_LOCK|_DRM_DRIVER,
+ &master_priv->sarea);
+ if (ret) {
+ DRM_ERROR("SAREA setup failed\n");
+ return ret;
+ }
+ master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea);
+ master_priv->sarea_priv->pfCurrentPage = 0;
+ master->driver_priv = master_priv;
+ return 0;
+}
+
+void radeon_master_destroy_kms(struct drm_device *dev,
+ struct drm_master *master)
+{
+ struct drm_radeon_master_private *master_priv = master->driver_priv;
+
+ if (master_priv == NULL) {
+ return;
+ }
+ if (master_priv->sarea) {
+ drm_rmmap_locked(dev, master_priv->sarea);
+ }
+ drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER);
+ master->driver_priv = NULL;
+}
+
+
+/*
+ * IOCTL.
+ */
+int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ /* Not valid in KMS. */
+ return -EINVAL;
+}
+
+#define KMS_INVALID_IOCTL(name) \
+int name(struct drm_device *dev, void *data, struct drm_file *file_priv)\
+{ \
+ DRM_ERROR("invalid ioctl with kms %s\n", __func__); \
+ return -EINVAL; \
+}
+
+/*
+ * All these ioctls are invalid in kms world.
+ */
+KMS_INVALID_IOCTL(radeon_cp_init_kms)
+KMS_INVALID_IOCTL(radeon_cp_start_kms)
+KMS_INVALID_IOCTL(radeon_cp_stop_kms)
+KMS_INVALID_IOCTL(radeon_cp_reset_kms)
+KMS_INVALID_IOCTL(radeon_cp_idle_kms)
+KMS_INVALID_IOCTL(radeon_cp_resume_kms)
+KMS_INVALID_IOCTL(radeon_engine_reset_kms)
+KMS_INVALID_IOCTL(radeon_fullscreen_kms)
+KMS_INVALID_IOCTL(radeon_cp_swap_kms)
+KMS_INVALID_IOCTL(radeon_cp_clear_kms)
+KMS_INVALID_IOCTL(radeon_cp_vertex_kms)
+KMS_INVALID_IOCTL(radeon_cp_indices_kms)
+KMS_INVALID_IOCTL(radeon_cp_texture_kms)
+KMS_INVALID_IOCTL(radeon_cp_stipple_kms)
+KMS_INVALID_IOCTL(radeon_cp_indirect_kms)
+KMS_INVALID_IOCTL(radeon_cp_vertex2_kms)
+KMS_INVALID_IOCTL(radeon_cp_cmdbuf_kms)
+KMS_INVALID_IOCTL(radeon_cp_getparam_kms)
+KMS_INVALID_IOCTL(radeon_cp_flip_kms)
+KMS_INVALID_IOCTL(radeon_mem_alloc_kms)
+KMS_INVALID_IOCTL(radeon_mem_free_kms)
+KMS_INVALID_IOCTL(radeon_mem_init_heap_kms)
+KMS_INVALID_IOCTL(radeon_irq_emit_kms)
+KMS_INVALID_IOCTL(radeon_irq_wait_kms)
+KMS_INVALID_IOCTL(radeon_cp_setparam_kms)
+KMS_INVALID_IOCTL(radeon_surface_alloc_kms)
+KMS_INVALID_IOCTL(radeon_surface_free_kms)
+
+
+struct drm_ioctl_desc radeon_ioctls_kms[] = {
+ DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
+ /* KMS */
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH),
+};
+int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
new file mode 100644
index 00000000000..8086ecf7f03
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -0,0 +1,1276 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/radeon_drm.h>
+#include "radeon_fixed.h"
+#include "radeon.h"
+
+void radeon_restore_common_regs(struct drm_device *dev)
+{
+ /* don't need this yet */
+}
+
+static void radeon_pll_wait_for_read_update_complete(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ int i = 0;
+
+ /* FIXME: Certain revisions of R300 can't recover here. Not sure of
+ the cause yet, but this workaround will mask the problem for now.
+ Other chips usually will pass at the very first test, so the
+ workaround shouldn't have any effect on them. */
+ for (i = 0;
+ (i < 10000 &&
+ RREG32_PLL(RADEON_PPLL_REF_DIV) & RADEON_PPLL_ATOMIC_UPDATE_R);
+ i++);
+}
+
+static void radeon_pll_write_update(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+
+ while (RREG32_PLL(RADEON_PPLL_REF_DIV) & RADEON_PPLL_ATOMIC_UPDATE_R);
+
+ WREG32_PLL_P(RADEON_PPLL_REF_DIV,
+ RADEON_PPLL_ATOMIC_UPDATE_W,
+ ~(RADEON_PPLL_ATOMIC_UPDATE_W));
+}
+
+static void radeon_pll2_wait_for_read_update_complete(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ int i = 0;
+
+
+ /* FIXME: Certain revisions of R300 can't recover here. Not sure of
+ the cause yet, but this workaround will mask the problem for now.
+ Other chips usually will pass at the very first test, so the
+ workaround shouldn't have any effect on them. */
+ for (i = 0;
+ (i < 10000 &&
+ RREG32_PLL(RADEON_P2PLL_REF_DIV) & RADEON_P2PLL_ATOMIC_UPDATE_R);
+ i++);
+}
+
+static void radeon_pll2_write_update(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+
+ while (RREG32_PLL(RADEON_P2PLL_REF_DIV) & RADEON_P2PLL_ATOMIC_UPDATE_R);
+
+ WREG32_PLL_P(RADEON_P2PLL_REF_DIV,
+ RADEON_P2PLL_ATOMIC_UPDATE_W,
+ ~(RADEON_P2PLL_ATOMIC_UPDATE_W));
+}
+
+static uint8_t radeon_compute_pll_gain(uint16_t ref_freq, uint16_t ref_div,
+ uint16_t fb_div)
+{
+ unsigned int vcoFreq;
+
+ if (!ref_div)
+ return 1;
+
+ vcoFreq = ((unsigned)ref_freq & fb_div) / ref_div;
+
+ /*
+ * This is horribly crude: the VCO frequency range is divided into
+ * 3 parts, each part having a fixed PLL gain value.
+ */
+ if (vcoFreq >= 30000)
+ /*
+ * [300..max] MHz : 7
+ */
+ return 7;
+ else if (vcoFreq >= 18000)
+ /*
+ * [180..300) MHz : 4
+ */
+ return 4;
+ else
+ /*
+ * [0..180) MHz : 1
+ */
+ return 1;
+}
+
+void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t mask;
+
+ if (radeon_crtc->crtc_id)
+ mask = (RADEON_CRTC2_EN |
+ RADEON_CRTC2_DISP_DIS |
+ RADEON_CRTC2_VSYNC_DIS |
+ RADEON_CRTC2_HSYNC_DIS |
+ RADEON_CRTC2_DISP_REQ_EN_B);
+ else
+ mask = (RADEON_CRTC_DISPLAY_DIS |
+ RADEON_CRTC_VSYNC_DIS |
+ RADEON_CRTC_HSYNC_DIS);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ if (radeon_crtc->crtc_id)
+ WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~mask);
+ else {
+ WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN |
+ RADEON_CRTC_DISP_REQ_EN_B));
+ WREG32_P(RADEON_CRTC_EXT_CNTL, 0, ~mask);
+ }
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ if (radeon_crtc->crtc_id)
+ WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~mask);
+ else {
+ WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN |
+ RADEON_CRTC_DISP_REQ_EN_B));
+ WREG32_P(RADEON_CRTC_EXT_CNTL, mask, ~mask);
+ }
+ break;
+ }
+
+ if (mode != DRM_MODE_DPMS_OFF) {
+ radeon_crtc_load_lut(crtc);
+ }
+}
+
+/* properly set crtc bpp when using atombios */
+void radeon_legacy_atom_set_surface(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ int format;
+ uint32_t crtc_gen_cntl;
+ uint32_t disp_merge_cntl;
+ uint32_t crtc_pitch;
+
+ switch (crtc->fb->bits_per_pixel) {
+ case 15: /* 555 */
+ format = 3;
+ break;
+ case 16: /* 565 */
+ format = 4;
+ break;
+ case 24: /* RGB */
+ format = 5;
+ break;
+ case 32: /* xRGB */
+ format = 6;
+ break;
+ default:
+ return;
+ }
+
+ crtc_pitch = ((((crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8)) * crtc->fb->bits_per_pixel) +
+ ((crtc->fb->bits_per_pixel * 8) - 1)) /
+ (crtc->fb->bits_per_pixel * 8));
+ crtc_pitch |= crtc_pitch << 16;
+
+ WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
+
+ switch (radeon_crtc->crtc_id) {
+ case 0:
+ disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
+ disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
+ WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
+
+ crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0xfffff0ff;
+ crtc_gen_cntl |= (format << 8);
+ crtc_gen_cntl |= RADEON_CRTC_EXT_DISP_EN;
+ WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
+ break;
+ case 1:
+ disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
+ disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
+ WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl);
+
+ crtc_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0xfffff0ff;
+ crtc_gen_cntl |= (format << 8);
+ WREG32(RADEON_CRTC2_GEN_CNTL, crtc_gen_cntl);
+ WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID));
+ WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID));
+ break;
+ }
+}
+
+int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct radeon_framebuffer *radeon_fb;
+ struct drm_gem_object *obj;
+ uint64_t base;
+ uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0;
+ uint32_t crtc_pitch, pitch_pixels;
+
+ DRM_DEBUG("\n");
+
+ radeon_fb = to_radeon_framebuffer(crtc->fb);
+
+ obj = radeon_fb->obj;
+ if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &base)) {
+ return -EINVAL;
+ }
+ crtc_offset = (u32)base;
+ crtc_offset_cntl = 0;
+
+ pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
+ crtc_pitch = (((pitch_pixels * crtc->fb->bits_per_pixel) +
+ ((crtc->fb->bits_per_pixel * 8) - 1)) /
+ (crtc->fb->bits_per_pixel * 8));
+ crtc_pitch |= crtc_pitch << 16;
+
+ /* TODO tiling */
+ if (0) {
+ if (ASIC_IS_R300(rdev))
+ crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN |
+ R300_CRTC_MICRO_TILE_BUFFER_DIS |
+ R300_CRTC_MACRO_TILE_EN);
+ else
+ crtc_offset_cntl |= RADEON_CRTC_TILE_EN;
+ } else {
+ if (ASIC_IS_R300(rdev))
+ crtc_offset_cntl &= ~(R300_CRTC_X_Y_MODE_EN |
+ R300_CRTC_MICRO_TILE_BUFFER_DIS |
+ R300_CRTC_MACRO_TILE_EN);
+ else
+ crtc_offset_cntl &= ~RADEON_CRTC_TILE_EN;
+ }
+
+
+ /* TODO more tiling */
+ if (0) {
+ if (ASIC_IS_R300(rdev)) {
+ crtc_tile_x0_y0 = x | (y << 16);
+ base &= ~0x7ff;
+ } else {
+ int byteshift = crtc->fb->bits_per_pixel >> 4;
+ int tile_addr = (((y >> 3) * crtc->fb->width + x) >> (8 - byteshift)) << 11;
+ base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8);
+ crtc_offset_cntl |= (y % 16);
+ }
+ } else {
+ int offset = y * pitch_pixels + x;
+ switch (crtc->fb->bits_per_pixel) {
+ case 15:
+ case 16:
+ offset *= 2;
+ break;
+ case 24:
+ offset *= 3;
+ break;
+ case 32:
+ offset *= 4;
+ break;
+ default:
+ return false;
+ }
+ base += offset;
+ }
+
+ base &= ~7;
+
+ /* update sarea TODO */
+
+ crtc_offset = (u32)base;
+
+ WREG32(RADEON_DISPLAY_BASE_ADDR + radeon_crtc->crtc_offset, rdev->mc.vram_location);
+
+ if (ASIC_IS_R300(rdev)) {
+ if (radeon_crtc->crtc_id)
+ WREG32(R300_CRTC2_TILE_X0_Y0, crtc_tile_x0_y0);
+ else
+ WREG32(R300_CRTC_TILE_X0_Y0, crtc_tile_x0_y0);
+ }
+ WREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset, crtc_offset_cntl);
+ WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, crtc_offset);
+ WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
+
+ if (old_fb && old_fb != crtc->fb) {
+ radeon_fb = to_radeon_framebuffer(old_fb);
+ radeon_gem_object_unpin(radeon_fb->obj);
+ }
+ return 0;
+}
+
+static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ int format;
+ int hsync_start;
+ int hsync_wid;
+ int vsync_wid;
+ uint32_t crtc_h_total_disp;
+ uint32_t crtc_h_sync_strt_wid;
+ uint32_t crtc_v_total_disp;
+ uint32_t crtc_v_sync_strt_wid;
+
+ DRM_DEBUG("\n");
+
+ switch (crtc->fb->bits_per_pixel) {
+ case 15: /* 555 */
+ format = 3;
+ break;
+ case 16: /* 565 */
+ format = 4;
+ break;
+ case 24: /* RGB */
+ format = 5;
+ break;
+ case 32: /* xRGB */
+ format = 6;
+ break;
+ default:
+ return false;
+ }
+
+ crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff)
+ | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
+
+ hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
+ if (!hsync_wid)
+ hsync_wid = 1;
+ hsync_start = mode->crtc_hsync_start - 8;
+
+ crtc_h_sync_strt_wid = ((hsync_start & 0x1fff)
+ | ((hsync_wid & 0x3f) << 16)
+ | ((mode->flags & DRM_MODE_FLAG_NHSYNC)
+ ? RADEON_CRTC_H_SYNC_POL
+ : 0));
+
+ /* This works for double scan mode. */
+ crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff)
+ | ((mode->crtc_vdisplay - 1) << 16));
+
+ vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ if (!vsync_wid)
+ vsync_wid = 1;
+
+ crtc_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff)
+ | ((vsync_wid & 0x1f) << 16)
+ | ((mode->flags & DRM_MODE_FLAG_NVSYNC)
+ ? RADEON_CRTC_V_SYNC_POL
+ : 0));
+
+ /* TODO -> Dell Server */
+ if (0) {
+ uint32_t disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
+ uint32_t tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+ uint32_t dac2_cntl = RREG32(RADEON_DAC_CNTL2);
+ uint32_t crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+
+ dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
+ dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
+
+ /* For CRT on DAC2, don't turn it on if BIOS didn't
+ enable it, even it's detected.
+ */
+ disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
+ tv_dac_cntl &= ~((1<<2) | (3<<8) | (7<<24) | (0xff<<16));
+ tv_dac_cntl |= (0x03 | (2<<8) | (0x58<<16));
+
+ WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+ WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
+ WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+ WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+ }
+
+ if (radeon_crtc->crtc_id) {
+ uint32_t crtc2_gen_cntl;
+ uint32_t disp2_merge_cntl;
+
+ /* check to see if TV DAC is enabled for another crtc and keep it enabled */
+ if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_CRT2_ON)
+ crtc2_gen_cntl = RADEON_CRTC2_CRT2_ON;
+ else
+ crtc2_gen_cntl = 0;
+
+ crtc2_gen_cntl |= ((format << 8)
+ | RADEON_CRTC2_VSYNC_DIS
+ | RADEON_CRTC2_HSYNC_DIS
+ | RADEON_CRTC2_DISP_DIS
+ | RADEON_CRTC2_DISP_REQ_EN_B
+ | ((mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ ? RADEON_CRTC2_DBL_SCAN_EN
+ : 0)
+ | ((mode->flags & DRM_MODE_FLAG_CSYNC)
+ ? RADEON_CRTC2_CSYNC_EN
+ : 0)
+ | ((mode->flags & DRM_MODE_FLAG_INTERLACE)
+ ? RADEON_CRTC2_INTERLACE_EN
+ : 0));
+
+ disp2_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
+ disp2_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
+
+ WREG32(RADEON_DISP2_MERGE_CNTL, disp2_merge_cntl);
+ WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+ } else {
+ uint32_t crtc_gen_cntl;
+ uint32_t crtc_ext_cntl;
+ uint32_t disp_merge_cntl;
+
+ crtc_gen_cntl = (RADEON_CRTC_EXT_DISP_EN
+ | (format << 8)
+ | RADEON_CRTC_DISP_REQ_EN_B
+ | ((mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ ? RADEON_CRTC_DBL_SCAN_EN
+ : 0)
+ | ((mode->flags & DRM_MODE_FLAG_CSYNC)
+ ? RADEON_CRTC_CSYNC_EN
+ : 0)
+ | ((mode->flags & DRM_MODE_FLAG_INTERLACE)
+ ? RADEON_CRTC_INTERLACE_EN
+ : 0));
+
+ crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+ crtc_ext_cntl |= (RADEON_XCRT_CNT_EN |
+ RADEON_CRTC_VSYNC_DIS |
+ RADEON_CRTC_HSYNC_DIS |
+ RADEON_CRTC_DISPLAY_DIS);
+
+ disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
+ disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
+
+ WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
+ WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
+ WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
+ }
+
+ WREG32(RADEON_CRTC_H_TOTAL_DISP + radeon_crtc->crtc_offset, crtc_h_total_disp);
+ WREG32(RADEON_CRTC_H_SYNC_STRT_WID + radeon_crtc->crtc_offset, crtc_h_sync_strt_wid);
+ WREG32(RADEON_CRTC_V_TOTAL_DISP + radeon_crtc->crtc_offset, crtc_v_total_disp);
+ WREG32(RADEON_CRTC_V_SYNC_STRT_WID + radeon_crtc->crtc_offset, crtc_v_sync_strt_wid);
+
+ return true;
+}
+
+static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_encoder *encoder;
+ uint32_t feedback_div = 0;
+ uint32_t frac_fb_div = 0;
+ uint32_t reference_div = 0;
+ uint32_t post_divider = 0;
+ uint32_t freq = 0;
+ uint8_t pll_gain;
+ int pll_flags = RADEON_PLL_LEGACY;
+ bool use_bios_divs = false;
+ /* PLL registers */
+ uint32_t pll_ref_div = 0;
+ uint32_t pll_fb_post_div = 0;
+ uint32_t htotal_cntl = 0;
+
+ struct radeon_pll *pll;
+
+ struct {
+ int divider;
+ int bitvalue;
+ } *post_div, post_divs[] = {
+ /* From RAGE 128 VR/RAGE 128 GL Register
+ * Reference Manual (Technical Reference
+ * Manual P/N RRG-G04100-C Rev. 0.04), page
+ * 3-17 (PLL_DIV_[3:0]).
+ */
+ { 1, 0 }, /* VCLK_SRC */
+ { 2, 1 }, /* VCLK_SRC/2 */
+ { 4, 2 }, /* VCLK_SRC/4 */
+ { 8, 3 }, /* VCLK_SRC/8 */
+ { 3, 4 }, /* VCLK_SRC/3 */
+ { 16, 5 }, /* VCLK_SRC/16 */
+ { 6, 6 }, /* VCLK_SRC/6 */
+ { 12, 7 }, /* VCLK_SRC/12 */
+ { 0, 0 }
+ };
+
+ if (radeon_crtc->crtc_id)
+ pll = &rdev->clock.p2pll;
+ else
+ pll = &rdev->clock.p1pll;
+
+ if (mode->clock > 200000) /* range limits??? */
+ pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
+ else
+ pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
+ pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
+ if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
+ if (lvds) {
+ if (lvds->use_bios_dividers) {
+ pll_ref_div = lvds->panel_ref_divider;
+ pll_fb_post_div = (lvds->panel_fb_divider |
+ (lvds->panel_post_divider << 16));
+ htotal_cntl = 0;
+ use_bios_divs = true;
+ }
+ }
+ pll_flags |= RADEON_PLL_USE_REF_DIV;
+ }
+ }
+ }
+
+ DRM_DEBUG("\n");
+
+ if (!use_bios_divs) {
+ radeon_compute_pll(pll, mode->clock,
+ &freq, &feedback_div, &frac_fb_div,
+ &reference_div, &post_divider,
+ pll_flags);
+
+ for (post_div = &post_divs[0]; post_div->divider; ++post_div) {
+ if (post_div->divider == post_divider)
+ break;
+ }
+
+ if (!post_div->divider)
+ post_div = &post_divs[0];
+
+ DRM_DEBUG("dc=%u, fd=%d, rd=%d, pd=%d\n",
+ (unsigned)freq,
+ feedback_div,
+ reference_div,
+ post_divider);
+
+ pll_ref_div = reference_div;
+#if defined(__powerpc__) && (0) /* TODO */
+ /* apparently programming this otherwise causes a hang??? */
+ if (info->MacModel == RADEON_MAC_IBOOK)
+ pll_fb_post_div = 0x000600ad;
+ else
+#endif
+ pll_fb_post_div = (feedback_div | (post_div->bitvalue << 16));
+
+ htotal_cntl = mode->htotal & 0x7;
+
+ }
+
+ pll_gain = radeon_compute_pll_gain(pll->reference_freq,
+ pll_ref_div & 0x3ff,
+ pll_fb_post_div & 0x7ff);
+
+ if (radeon_crtc->crtc_id) {
+ uint32_t pixclks_cntl = ((RREG32_PLL(RADEON_PIXCLKS_CNTL) &
+ ~(RADEON_PIX2CLK_SRC_SEL_MASK)) |
+ RADEON_PIX2CLK_SRC_SEL_P2PLLCLK);
+
+ WREG32_PLL_P(RADEON_PIXCLKS_CNTL,
+ RADEON_PIX2CLK_SRC_SEL_CPUCLK,
+ ~(RADEON_PIX2CLK_SRC_SEL_MASK));
+
+ WREG32_PLL_P(RADEON_P2PLL_CNTL,
+ RADEON_P2PLL_RESET
+ | RADEON_P2PLL_ATOMIC_UPDATE_EN
+ | ((uint32_t)pll_gain << RADEON_P2PLL_PVG_SHIFT),
+ ~(RADEON_P2PLL_RESET
+ | RADEON_P2PLL_ATOMIC_UPDATE_EN
+ | RADEON_P2PLL_PVG_MASK));
+
+ WREG32_PLL_P(RADEON_P2PLL_REF_DIV,
+ pll_ref_div,
+ ~RADEON_P2PLL_REF_DIV_MASK);
+
+ WREG32_PLL_P(RADEON_P2PLL_DIV_0,
+ pll_fb_post_div,
+ ~RADEON_P2PLL_FB0_DIV_MASK);
+
+ WREG32_PLL_P(RADEON_P2PLL_DIV_0,
+ pll_fb_post_div,
+ ~RADEON_P2PLL_POST0_DIV_MASK);
+
+ radeon_pll2_write_update(dev);
+ radeon_pll2_wait_for_read_update_complete(dev);
+
+ WREG32_PLL(RADEON_HTOTAL2_CNTL, htotal_cntl);
+
+ WREG32_PLL_P(RADEON_P2PLL_CNTL,
+ 0,
+ ~(RADEON_P2PLL_RESET
+ | RADEON_P2PLL_SLEEP
+ | RADEON_P2PLL_ATOMIC_UPDATE_EN));
+
+ DRM_DEBUG("Wrote2: 0x%08x 0x%08x 0x%08x (0x%08x)\n",
+ (unsigned)pll_ref_div,
+ (unsigned)pll_fb_post_div,
+ (unsigned)htotal_cntl,
+ RREG32_PLL(RADEON_P2PLL_CNTL));
+ DRM_DEBUG("Wrote2: rd=%u, fd=%u, pd=%u\n",
+ (unsigned)pll_ref_div & RADEON_P2PLL_REF_DIV_MASK,
+ (unsigned)pll_fb_post_div & RADEON_P2PLL_FB0_DIV_MASK,
+ (unsigned)((pll_fb_post_div &
+ RADEON_P2PLL_POST0_DIV_MASK) >> 16));
+
+ mdelay(50); /* Let the clock to lock */
+
+ WREG32_PLL_P(RADEON_PIXCLKS_CNTL,
+ RADEON_PIX2CLK_SRC_SEL_P2PLLCLK,
+ ~(RADEON_PIX2CLK_SRC_SEL_MASK));
+
+ WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
+ } else {
+ if (rdev->flags & RADEON_IS_MOBILITY) {
+ /* A temporal workaround for the occational blanking on certain laptop panels.
+ This appears to related to the PLL divider registers (fail to lock?).
+ It occurs even when all dividers are the same with their old settings.
+ In this case we really don't need to fiddle with PLL registers.
+ By doing this we can avoid the blanking problem with some panels.
+ */
+ if ((pll_ref_div == (RREG32_PLL(RADEON_PPLL_REF_DIV) & RADEON_PPLL_REF_DIV_MASK)) &&
+ (pll_fb_post_div == (RREG32_PLL(RADEON_PPLL_DIV_3) &
+ (RADEON_PPLL_POST3_DIV_MASK | RADEON_PPLL_FB3_DIV_MASK)))) {
+ WREG32_P(RADEON_CLOCK_CNTL_INDEX,
+ RADEON_PLL_DIV_SEL,
+ ~(RADEON_PLL_DIV_SEL));
+ r100_pll_errata_after_index(rdev);
+ return;
+ }
+ }
+
+ WREG32_PLL_P(RADEON_VCLK_ECP_CNTL,
+ RADEON_VCLK_SRC_SEL_CPUCLK,
+ ~(RADEON_VCLK_SRC_SEL_MASK));
+ WREG32_PLL_P(RADEON_PPLL_CNTL,
+ RADEON_PPLL_RESET
+ | RADEON_PPLL_ATOMIC_UPDATE_EN
+ | RADEON_PPLL_VGA_ATOMIC_UPDATE_EN
+ | ((uint32_t)pll_gain << RADEON_PPLL_PVG_SHIFT),
+ ~(RADEON_PPLL_RESET
+ | RADEON_PPLL_ATOMIC_UPDATE_EN
+ | RADEON_PPLL_VGA_ATOMIC_UPDATE_EN
+ | RADEON_PPLL_PVG_MASK));
+
+ WREG32_P(RADEON_CLOCK_CNTL_INDEX,
+ RADEON_PLL_DIV_SEL,
+ ~(RADEON_PLL_DIV_SEL));
+ r100_pll_errata_after_index(rdev);
+
+ if (ASIC_IS_R300(rdev) ||
+ (rdev->family == CHIP_RS300) ||
+ (rdev->family == CHIP_RS400) ||
+ (rdev->family == CHIP_RS480)) {
+ if (pll_ref_div & R300_PPLL_REF_DIV_ACC_MASK) {
+ /* When restoring console mode, use saved PPLL_REF_DIV
+ * setting.
+ */
+ WREG32_PLL_P(RADEON_PPLL_REF_DIV,
+ pll_ref_div,
+ 0);
+ } else {
+ /* R300 uses ref_div_acc field as real ref divider */
+ WREG32_PLL_P(RADEON_PPLL_REF_DIV,
+ (pll_ref_div << R300_PPLL_REF_DIV_ACC_SHIFT),
+ ~R300_PPLL_REF_DIV_ACC_MASK);
+ }
+ } else
+ WREG32_PLL_P(RADEON_PPLL_REF_DIV,
+ pll_ref_div,
+ ~RADEON_PPLL_REF_DIV_MASK);
+
+ WREG32_PLL_P(RADEON_PPLL_DIV_3,
+ pll_fb_post_div,
+ ~RADEON_PPLL_FB3_DIV_MASK);
+
+ WREG32_PLL_P(RADEON_PPLL_DIV_3,
+ pll_fb_post_div,
+ ~RADEON_PPLL_POST3_DIV_MASK);
+
+ radeon_pll_write_update(dev);
+ radeon_pll_wait_for_read_update_complete(dev);
+
+ WREG32_PLL(RADEON_HTOTAL_CNTL, htotal_cntl);
+
+ WREG32_PLL_P(RADEON_PPLL_CNTL,
+ 0,
+ ~(RADEON_PPLL_RESET
+ | RADEON_PPLL_SLEEP
+ | RADEON_PPLL_ATOMIC_UPDATE_EN
+ | RADEON_PPLL_VGA_ATOMIC_UPDATE_EN));
+
+ DRM_DEBUG("Wrote: 0x%08x 0x%08x 0x%08x (0x%08x)\n",
+ pll_ref_div,
+ pll_fb_post_div,
+ (unsigned)htotal_cntl,
+ RREG32_PLL(RADEON_PPLL_CNTL));
+ DRM_DEBUG("Wrote: rd=%d, fd=%d, pd=%d\n",
+ pll_ref_div & RADEON_PPLL_REF_DIV_MASK,
+ pll_fb_post_div & RADEON_PPLL_FB3_DIV_MASK,
+ (pll_fb_post_div & RADEON_PPLL_POST3_DIV_MASK) >> 16);
+
+ mdelay(50); /* Let the clock to lock */
+
+ WREG32_PLL_P(RADEON_VCLK_ECP_CNTL,
+ RADEON_VCLK_SRC_SEL_PPLLCLK,
+ ~(RADEON_VCLK_SRC_SEL_MASK));
+
+ }
+}
+
+static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static int radeon_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+
+ DRM_DEBUG("\n");
+
+ /* TODO TV */
+
+ radeon_crtc_set_base(crtc, x, y, old_fb);
+ radeon_set_crtc_timing(crtc, adjusted_mode);
+ radeon_set_pll(crtc, adjusted_mode);
+ radeon_init_disp_bandwidth(crtc->dev);
+
+ return 0;
+}
+
+static void radeon_crtc_prepare(struct drm_crtc *crtc)
+{
+ radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_crtc_commit(struct drm_crtc *crtc)
+{
+ radeon_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
+ .dpms = radeon_crtc_dpms,
+ .mode_fixup = radeon_crtc_mode_fixup,
+ .mode_set = radeon_crtc_mode_set,
+ .mode_set_base = radeon_crtc_set_base,
+ .prepare = radeon_crtc_prepare,
+ .commit = radeon_crtc_commit,
+};
+
+
+void radeon_legacy_init_crtc(struct drm_device *dev,
+ struct radeon_crtc *radeon_crtc)
+{
+ if (radeon_crtc->crtc_id == 1)
+ radeon_crtc->crtc_offset = RADEON_CRTC2_H_TOTAL_DISP - RADEON_CRTC_H_TOTAL_DISP;
+ drm_crtc_helper_add(&radeon_crtc->base, &legacy_helper_funcs);
+}
+
+void radeon_init_disp_bw_legacy(struct drm_device *dev,
+ struct drm_display_mode *mode1,
+ uint32_t pixel_bytes1,
+ struct drm_display_mode *mode2,
+ uint32_t pixel_bytes2)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff;
+ fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff;
+ fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
+ uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
+ fixed20_12 memtcas_ff[8] = {
+ fixed_init(1),
+ fixed_init(2),
+ fixed_init(3),
+ fixed_init(0),
+ fixed_init_half(1),
+ fixed_init_half(2),
+ fixed_init(0),
+ };
+ fixed20_12 memtcas_rs480_ff[8] = {
+ fixed_init(0),
+ fixed_init(1),
+ fixed_init(2),
+ fixed_init(3),
+ fixed_init(0),
+ fixed_init_half(1),
+ fixed_init_half(2),
+ fixed_init_half(3),
+ };
+ fixed20_12 memtcas2_ff[8] = {
+ fixed_init(0),
+ fixed_init(1),
+ fixed_init(2),
+ fixed_init(3),
+ fixed_init(4),
+ fixed_init(5),
+ fixed_init(6),
+ fixed_init(7),
+ };
+ fixed20_12 memtrbs[8] = {
+ fixed_init(1),
+ fixed_init_half(1),
+ fixed_init(2),
+ fixed_init_half(2),
+ fixed_init(3),
+ fixed_init_half(3),
+ fixed_init(4),
+ fixed_init_half(4)
+ };
+ fixed20_12 memtrbs_r4xx[8] = {
+ fixed_init(4),
+ fixed_init(5),
+ fixed_init(6),
+ fixed_init(7),
+ fixed_init(8),
+ fixed_init(9),
+ fixed_init(10),
+ fixed_init(11)
+ };
+ fixed20_12 min_mem_eff;
+ fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
+ fixed20_12 cur_latency_mclk, cur_latency_sclk;
+ fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate,
+ disp_drain_rate2, read_return_rate;
+ fixed20_12 time_disp1_drop_priority;
+ int c;
+ int cur_size = 16; /* in octawords */
+ int critical_point = 0, critical_point2;
+/* uint32_t read_return_rate, time_disp1_drop_priority; */
+ int stop_req, max_stop_req;
+
+ min_mem_eff.full = rfixed_const_8(0);
+ /* get modes */
+ if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
+ uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
+ mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT);
+ mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT);
+ /* check crtc enables */
+ if (mode2)
+ mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
+ if (mode1)
+ mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
+ WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer);
+ }
+
+ /*
+ * determine is there is enough bw for current mode
+ */
+ mclk_ff.full = rfixed_const(rdev->clock.default_mclk);
+ temp_ff.full = rfixed_const(100);
+ mclk_ff.full = rfixed_div(mclk_ff, temp_ff);
+ sclk_ff.full = rfixed_const(rdev->clock.default_sclk);
+ sclk_ff.full = rfixed_div(sclk_ff, temp_ff);
+
+ temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
+ temp_ff.full = rfixed_const(temp);
+ mem_bw.full = rfixed_mul(mclk_ff, temp_ff);
+
+ pix_clk.full = 0;
+ pix_clk2.full = 0;
+ peak_disp_bw.full = 0;
+ if (mode1) {
+ temp_ff.full = rfixed_const(1000);
+ pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */
+ pix_clk.full = rfixed_div(pix_clk, temp_ff);
+ temp_ff.full = rfixed_const(pixel_bytes1);
+ peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff);
+ }
+ if (mode2) {
+ temp_ff.full = rfixed_const(1000);
+ pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */
+ pix_clk2.full = rfixed_div(pix_clk2, temp_ff);
+ temp_ff.full = rfixed_const(pixel_bytes2);
+ peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff);
+ }
+
+ mem_bw.full = rfixed_mul(mem_bw, min_mem_eff);
+ if (peak_disp_bw.full >= mem_bw.full) {
+ DRM_ERROR("You may not have enough display bandwidth for current mode\n"
+ "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
+ }
+
+ /* Get values from the EXT_MEM_CNTL register...converting its contents. */
+ temp = RREG32(RADEON_MEM_TIMING_CNTL);
+ if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */
+ mem_trcd = ((temp >> 2) & 0x3) + 1;
+ mem_trp = ((temp & 0x3)) + 1;
+ mem_tras = ((temp & 0x70) >> 4) + 1;
+ } else if (rdev->family == CHIP_R300 ||
+ rdev->family == CHIP_R350) { /* r300, r350 */
+ mem_trcd = (temp & 0x7) + 1;
+ mem_trp = ((temp >> 8) & 0x7) + 1;
+ mem_tras = ((temp >> 11) & 0xf) + 4;
+ } else if (rdev->family == CHIP_RV350 ||
+ rdev->family <= CHIP_RV380) {
+ /* rv3x0 */
+ mem_trcd = (temp & 0x7) + 3;
+ mem_trp = ((temp >> 8) & 0x7) + 3;
+ mem_tras = ((temp >> 11) & 0xf) + 6;
+ } else if (rdev->family == CHIP_R420 ||
+ rdev->family == CHIP_R423 ||
+ rdev->family == CHIP_RV410) {
+ /* r4xx */
+ mem_trcd = (temp & 0xf) + 3;
+ if (mem_trcd > 15)
+ mem_trcd = 15;
+ mem_trp = ((temp >> 8) & 0xf) + 3;
+ if (mem_trp > 15)
+ mem_trp = 15;
+ mem_tras = ((temp >> 12) & 0x1f) + 6;
+ if (mem_tras > 31)
+ mem_tras = 31;
+ } else { /* RV200, R200 */
+ mem_trcd = (temp & 0x7) + 1;
+ mem_trp = ((temp >> 8) & 0x7) + 1;
+ mem_tras = ((temp >> 12) & 0xf) + 4;
+ }
+ /* convert to FF */
+ trcd_ff.full = rfixed_const(mem_trcd);
+ trp_ff.full = rfixed_const(mem_trp);
+ tras_ff.full = rfixed_const(mem_tras);
+
+ /* Get values from the MEM_SDRAM_MODE_REG register...converting its */
+ temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
+ data = (temp & (7 << 20)) >> 20;
+ if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) {
+ if (rdev->family == CHIP_RS480) /* don't think rs400 */
+ tcas_ff = memtcas_rs480_ff[data];
+ else
+ tcas_ff = memtcas_ff[data];
+ } else
+ tcas_ff = memtcas2_ff[data];
+
+ if (rdev->family == CHIP_RS400 ||
+ rdev->family == CHIP_RS480) {
+ /* extra cas latency stored in bits 23-25 0-4 clocks */
+ data = (temp >> 23) & 0x7;
+ if (data < 5)
+ tcas_ff.full += rfixed_const(data);
+ }
+
+ if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
+ /* on the R300, Tcas is included in Trbs.
+ */
+ temp = RREG32(RADEON_MEM_CNTL);
+ data = (R300_MEM_NUM_CHANNELS_MASK & temp);
+ if (data == 1) {
+ if (R300_MEM_USE_CD_CH_ONLY & temp) {
+ temp = RREG32(R300_MC_IND_INDEX);
+ temp &= ~R300_MC_IND_ADDR_MASK;
+ temp |= R300_MC_READ_CNTL_CD_mcind;
+ WREG32(R300_MC_IND_INDEX, temp);
+ temp = RREG32(R300_MC_IND_DATA);
+ data = (R300_MEM_RBS_POSITION_C_MASK & temp);
+ } else {
+ temp = RREG32(R300_MC_READ_CNTL_AB);
+ data = (R300_MEM_RBS_POSITION_A_MASK & temp);
+ }
+ } else {
+ temp = RREG32(R300_MC_READ_CNTL_AB);
+ data = (R300_MEM_RBS_POSITION_A_MASK & temp);
+ }
+ if (rdev->family == CHIP_RV410 ||
+ rdev->family == CHIP_R420 ||
+ rdev->family == CHIP_R423)
+ trbs_ff = memtrbs_r4xx[data];
+ else
+ trbs_ff = memtrbs[data];
+ tcas_ff.full += trbs_ff.full;
+ }
+
+ sclk_eff_ff.full = sclk_ff.full;
+
+ if (rdev->flags & RADEON_IS_AGP) {
+ fixed20_12 agpmode_ff;
+ agpmode_ff.full = rfixed_const(radeon_agpmode);
+ temp_ff.full = rfixed_const_666(16);
+ sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff);
+ }
+ /* TODO PCIE lanes may affect this - agpmode == 16?? */
+
+ if (ASIC_IS_R300(rdev)) {
+ sclk_delay_ff.full = rfixed_const(250);
+ } else {
+ if ((rdev->family == CHIP_RV100) ||
+ rdev->flags & RADEON_IS_IGP) {
+ if (rdev->mc.vram_is_ddr)
+ sclk_delay_ff.full = rfixed_const(41);
+ else
+ sclk_delay_ff.full = rfixed_const(33);
+ } else {
+ if (rdev->mc.vram_width == 128)
+ sclk_delay_ff.full = rfixed_const(57);
+ else
+ sclk_delay_ff.full = rfixed_const(41);
+ }
+ }
+
+ mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff);
+
+ if (rdev->mc.vram_is_ddr) {
+ if (rdev->mc.vram_width == 32) {
+ k1.full = rfixed_const(40);
+ c = 3;
+ } else {
+ k1.full = rfixed_const(20);
+ c = 1;
+ }
+ } else {
+ k1.full = rfixed_const(40);
+ c = 3;
+ }
+
+ temp_ff.full = rfixed_const(2);
+ mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff);
+ temp_ff.full = rfixed_const(c);
+ mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff);
+ temp_ff.full = rfixed_const(4);
+ mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff);
+ mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff);
+ mc_latency_mclk.full += k1.full;
+
+ mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff);
+ mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff);
+
+ /*
+ HW cursor time assuming worst case of full size colour cursor.
+ */
+ temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
+ temp_ff.full += trcd_ff.full;
+ if (temp_ff.full < tras_ff.full)
+ temp_ff.full = tras_ff.full;
+ cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff);
+
+ temp_ff.full = rfixed_const(cur_size);
+ cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff);
+ /*
+ Find the total latency for the display data.
+ */
+ disp_latency_overhead.full = rfixed_const(80);
+ disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff);
+ mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
+ mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
+
+ if (mc_latency_mclk.full > mc_latency_sclk.full)
+ disp_latency.full = mc_latency_mclk.full;
+ else
+ disp_latency.full = mc_latency_sclk.full;
+
+ /* setup Max GRPH_STOP_REQ default value */
+ if (ASIC_IS_RV100(rdev))
+ max_stop_req = 0x5c;
+ else
+ max_stop_req = 0x7c;
+
+ if (mode1) {
+ /* CRTC1
+ Set GRPH_BUFFER_CNTL register using h/w defined optimal values.
+ GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ]
+ */
+ stop_req = mode1->hdisplay * pixel_bytes1 / 16;
+
+ if (stop_req > max_stop_req)
+ stop_req = max_stop_req;
+
+ /*
+ Find the drain rate of the display buffer.
+ */
+ temp_ff.full = rfixed_const((16/pixel_bytes1));
+ disp_drain_rate.full = rfixed_div(pix_clk, temp_ff);
+
+ /*
+ Find the critical point of the display buffer.
+ */
+ crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency);
+ crit_point_ff.full += rfixed_const_half(0);
+
+ critical_point = rfixed_trunc(crit_point_ff);
+
+ if (rdev->disp_priority == 2) {
+ critical_point = 0;
+ }
+
+ /*
+ The critical point should never be above max_stop_req-4. Setting
+ GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time.
+ */
+ if (max_stop_req - critical_point < 4)
+ critical_point = 0;
+
+ if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) {
+ /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/
+ critical_point = 0x10;
+ }
+
+ temp = RREG32(RADEON_GRPH_BUFFER_CNTL);
+ temp &= ~(RADEON_GRPH_STOP_REQ_MASK);
+ temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
+ temp &= ~(RADEON_GRPH_START_REQ_MASK);
+ if ((rdev->family == CHIP_R350) &&
+ (stop_req > 0x15)) {
+ stop_req -= 0x10;
+ }
+ temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
+ temp |= RADEON_GRPH_BUFFER_SIZE;
+ temp &= ~(RADEON_GRPH_CRITICAL_CNTL |
+ RADEON_GRPH_CRITICAL_AT_SOF |
+ RADEON_GRPH_STOP_CNTL);
+ /*
+ Write the result into the register.
+ */
+ WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
+ (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
+
+#if 0
+ if ((rdev->family == CHIP_RS400) ||
+ (rdev->family == CHIP_RS480)) {
+ /* attempt to program RS400 disp regs correctly ??? */
+ temp = RREG32(RS400_DISP1_REG_CNTL);
+ temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK |
+ RS400_DISP1_STOP_REQ_LEVEL_MASK);
+ WREG32(RS400_DISP1_REQ_CNTL1, (temp |
+ (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
+ (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
+ temp = RREG32(RS400_DMIF_MEM_CNTL1);
+ temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK |
+ RS400_DISP1_CRITICAL_POINT_STOP_MASK);
+ WREG32(RS400_DMIF_MEM_CNTL1, (temp |
+ (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) |
+ (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT)));
+ }
+#endif
+
+ DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n",
+ /* (unsigned int)info->SavedReg->grph_buffer_cntl, */
+ (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL));
+ }
+
+ if (mode2) {
+ u32 grph2_cntl;
+ stop_req = mode2->hdisplay * pixel_bytes2 / 16;
+
+ if (stop_req > max_stop_req)
+ stop_req = max_stop_req;
+
+ /*
+ Find the drain rate of the display buffer.
+ */
+ temp_ff.full = rfixed_const((16/pixel_bytes2));
+ disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff);
+
+ grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
+ grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
+ grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
+ grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK);
+ if ((rdev->family == CHIP_R350) &&
+ (stop_req > 0x15)) {
+ stop_req -= 0x10;
+ }
+ grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
+ grph2_cntl |= RADEON_GRPH_BUFFER_SIZE;
+ grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL |
+ RADEON_GRPH_CRITICAL_AT_SOF |
+ RADEON_GRPH_STOP_CNTL);
+
+ if ((rdev->family == CHIP_RS100) ||
+ (rdev->family == CHIP_RS200))
+ critical_point2 = 0;
+ else {
+ temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
+ temp_ff.full = rfixed_const(temp);
+ temp_ff.full = rfixed_mul(mclk_ff, temp_ff);
+ if (sclk_ff.full < temp_ff.full)
+ temp_ff.full = sclk_ff.full;
+
+ read_return_rate.full = temp_ff.full;
+
+ if (mode1) {
+ temp_ff.full = read_return_rate.full - disp_drain_rate.full;
+ time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff);
+ } else {
+ time_disp1_drop_priority.full = 0;
+ }
+ crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
+ crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2);
+ crit_point_ff.full += rfixed_const_half(0);
+
+ critical_point2 = rfixed_trunc(crit_point_ff);
+
+ if (rdev->disp_priority == 2) {
+ critical_point2 = 0;
+ }
+
+ if (max_stop_req - critical_point2 < 4)
+ critical_point2 = 0;
+
+ }
+
+ if (critical_point2 == 0 && rdev->family == CHIP_R300) {
+ /* some R300 cards have problem with this set to 0 */
+ critical_point2 = 0x10;
+ }
+
+ WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
+ (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
+
+ if ((rdev->family == CHIP_RS400) ||
+ (rdev->family == CHIP_RS480)) {
+#if 0
+ /* attempt to program RS400 disp2 regs correctly ??? */
+ temp = RREG32(RS400_DISP2_REQ_CNTL1);
+ temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK |
+ RS400_DISP2_STOP_REQ_LEVEL_MASK);
+ WREG32(RS400_DISP2_REQ_CNTL1, (temp |
+ (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
+ (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
+ temp = RREG32(RS400_DISP2_REQ_CNTL2);
+ temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK |
+ RS400_DISP2_CRITICAL_POINT_STOP_MASK);
+ WREG32(RS400_DISP2_REQ_CNTL2, (temp |
+ (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) |
+ (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT)));
+#endif
+ WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC);
+ WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000);
+ WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC);
+ WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC);
+ }
+
+ DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n",
+ (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
+ }
+}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
new file mode 100644
index 00000000000..2c2f42de1d4
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -0,0 +1,1288 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ */
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+#include "atom.h"
+
+
+static void radeon_legacy_rmx_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ int xres = mode->hdisplay;
+ int yres = mode->vdisplay;
+ bool hscale = true, vscale = true;
+ int hsync_wid;
+ int vsync_wid;
+ int hsync_start;
+ uint32_t scale, inc;
+ uint32_t fp_horz_stretch, fp_vert_stretch, crtc_more_cntl, fp_horz_vert_active;
+ uint32_t fp_h_sync_strt_wid, fp_v_sync_strt_wid, fp_crtc_h_total_disp, fp_crtc_v_total_disp;
+ struct radeon_native_mode *native_mode = &radeon_encoder->native_mode;
+
+ DRM_DEBUG("\n");
+
+ fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH) &
+ (RADEON_VERT_STRETCH_RESERVED |
+ RADEON_VERT_AUTO_RATIO_INC);
+ fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH) &
+ (RADEON_HORZ_FP_LOOP_STRETCH |
+ RADEON_HORZ_AUTO_RATIO_INC);
+
+ crtc_more_cntl = 0;
+ if ((rdev->family == CHIP_RS100) ||
+ (rdev->family == CHIP_RS200)) {
+ /* This is to workaround the asic bug for RMX, some versions
+ of BIOS dosen't have this register initialized correctly. */
+ crtc_more_cntl |= RADEON_CRTC_H_CUTOFF_ACTIVE_EN;
+ }
+
+
+ fp_crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff)
+ | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
+
+ hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
+ if (!hsync_wid)
+ hsync_wid = 1;
+ hsync_start = mode->crtc_hsync_start - 8;
+
+ fp_h_sync_strt_wid = ((hsync_start & 0x1fff)
+ | ((hsync_wid & 0x3f) << 16)
+ | ((mode->flags & DRM_MODE_FLAG_NHSYNC)
+ ? RADEON_CRTC_H_SYNC_POL
+ : 0));
+
+ fp_crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff)
+ | ((mode->crtc_vdisplay - 1) << 16));
+
+ vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ if (!vsync_wid)
+ vsync_wid = 1;
+
+ fp_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff)
+ | ((vsync_wid & 0x1f) << 16)
+ | ((mode->flags & DRM_MODE_FLAG_NVSYNC)
+ ? RADEON_CRTC_V_SYNC_POL
+ : 0));
+
+ fp_horz_vert_active = 0;
+
+ if (native_mode->panel_xres == 0 ||
+ native_mode->panel_yres == 0) {
+ hscale = false;
+ vscale = false;
+ } else {
+ if (xres > native_mode->panel_xres)
+ xres = native_mode->panel_xres;
+ if (yres > native_mode->panel_yres)
+ yres = native_mode->panel_yres;
+
+ if (xres == native_mode->panel_xres)
+ hscale = false;
+ if (yres == native_mode->panel_yres)
+ vscale = false;
+ }
+
+ if (radeon_encoder->flags & RADEON_USE_RMX) {
+ if (radeon_encoder->rmx_type != RMX_CENTER) {
+ if (!hscale)
+ fp_horz_stretch |= ((xres/8-1) << 16);
+ else {
+ inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0;
+ scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX)
+ / native_mode->panel_xres + 1;
+ fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) |
+ RADEON_HORZ_STRETCH_BLEND |
+ RADEON_HORZ_STRETCH_ENABLE |
+ ((native_mode->panel_xres/8-1) << 16));
+ }
+
+ if (!vscale)
+ fp_vert_stretch |= ((yres-1) << 12);
+ else {
+ inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0;
+ scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX)
+ / native_mode->panel_yres + 1;
+ fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) |
+ RADEON_VERT_STRETCH_ENABLE |
+ RADEON_VERT_STRETCH_BLEND |
+ ((native_mode->panel_yres-1) << 12));
+ }
+ } else if (radeon_encoder->rmx_type == RMX_CENTER) {
+ int blank_width;
+
+ fp_horz_stretch |= ((xres/8-1) << 16);
+ fp_vert_stretch |= ((yres-1) << 12);
+
+ crtc_more_cntl |= (RADEON_CRTC_AUTO_HORZ_CENTER_EN |
+ RADEON_CRTC_AUTO_VERT_CENTER_EN);
+
+ blank_width = (mode->crtc_hblank_end - mode->crtc_hblank_start) / 8;
+ if (blank_width > 110)
+ blank_width = 110;
+
+ fp_crtc_h_total_disp = (((blank_width) & 0x3ff)
+ | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
+
+ hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
+ if (!hsync_wid)
+ hsync_wid = 1;
+
+ fp_h_sync_strt_wid = ((((mode->crtc_hsync_start - mode->crtc_hblank_start) / 8) & 0x1fff)
+ | ((hsync_wid & 0x3f) << 16)
+ | ((mode->flags & DRM_MODE_FLAG_NHSYNC)
+ ? RADEON_CRTC_H_SYNC_POL
+ : 0));
+
+ fp_crtc_v_total_disp = (((mode->crtc_vblank_end - mode->crtc_vblank_start) & 0xffff)
+ | ((mode->crtc_vdisplay - 1) << 16));
+
+ vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ if (!vsync_wid)
+ vsync_wid = 1;
+
+ fp_v_sync_strt_wid = ((((mode->crtc_vsync_start - mode->crtc_vblank_start) & 0xfff)
+ | ((vsync_wid & 0x1f) << 16)
+ | ((mode->flags & DRM_MODE_FLAG_NVSYNC)
+ ? RADEON_CRTC_V_SYNC_POL
+ : 0)));
+
+ fp_horz_vert_active = (((native_mode->panel_yres) & 0xfff) |
+ (((native_mode->panel_xres / 8) & 0x1ff) << 16));
+ }
+ } else {
+ fp_horz_stretch |= ((xres/8-1) << 16);
+ fp_vert_stretch |= ((yres-1) << 12);
+ }
+
+ WREG32(RADEON_FP_HORZ_STRETCH, fp_horz_stretch);
+ WREG32(RADEON_FP_VERT_STRETCH, fp_vert_stretch);
+ WREG32(RADEON_CRTC_MORE_CNTL, crtc_more_cntl);
+ WREG32(RADEON_FP_HORZ_VERT_ACTIVE, fp_horz_vert_active);
+ WREG32(RADEON_FP_H_SYNC_STRT_WID, fp_h_sync_strt_wid);
+ WREG32(RADEON_FP_V_SYNC_STRT_WID, fp_v_sync_strt_wid);
+ WREG32(RADEON_FP_CRTC_H_TOTAL_DISP, fp_crtc_h_total_disp);
+ WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp);
+
+}
+
+static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man;
+ int panel_pwr_delay = 2000;
+ DRM_DEBUG("\n");
+
+ if (radeon_encoder->enc_priv) {
+ if (rdev->is_atom_bios) {
+ struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
+ panel_pwr_delay = lvds->panel_pwr_delay;
+ } else {
+ struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
+ panel_pwr_delay = lvds->panel_pwr_delay;
+ }
+ }
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ disp_pwr_man = RREG32(RADEON_DISP_PWR_MAN);
+ disp_pwr_man |= RADEON_AUTO_PWRUP_EN;
+ WREG32(RADEON_DISP_PWR_MAN, disp_pwr_man);
+ lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
+ lvds_pll_cntl |= RADEON_LVDS_PLL_EN;
+ WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
+ udelay(1000);
+
+ lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
+ lvds_pll_cntl &= ~RADEON_LVDS_PLL_RESET;
+ WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
+
+ lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+ lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN | RADEON_LVDS_DIGON | RADEON_LVDS_BLON);
+ lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS);
+ udelay(panel_pwr_delay * 1000);
+ WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+ WREG32_PLL_P(RADEON_PIXCLKS_CNTL, 0, ~RADEON_PIXCLK_LVDS_ALWAYS_ONb);
+ lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+ lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
+ lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON);
+ udelay(panel_pwr_delay * 1000);
+ WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+ WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
+ break;
+ }
+
+ if (rdev->is_atom_bios)
+ radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ else
+ radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+}
+
+static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+
+ if (rdev->is_atom_bios)
+ radeon_atom_output_lock(encoder, true);
+ else
+ radeon_combios_output_lock(encoder, true);
+ radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_legacy_lvds_commit(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+
+ radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_ON);
+ if (rdev->is_atom_bios)
+ radeon_atom_output_lock(encoder, false);
+ else
+ radeon_combios_output_lock(encoder, false);
+}
+
+static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t lvds_pll_cntl, lvds_gen_cntl, lvds_ss_gen_cntl;
+
+ DRM_DEBUG("\n");
+
+ if (radeon_crtc->crtc_id == 0)
+ radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode);
+
+ lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
+ lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN;
+
+ lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL);
+ if ((!rdev->is_atom_bios)) {
+ struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
+ if (lvds) {
+ DRM_DEBUG("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl);
+ lvds_gen_cntl = lvds->lvds_gen_cntl;
+ lvds_ss_gen_cntl &= ~((0xf << RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) |
+ (0xf << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT));
+ lvds_ss_gen_cntl |= ((lvds->panel_digon_delay << RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) |
+ (lvds->panel_blon_delay << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT));
+ } else
+ lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+ } else
+ lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+ lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
+ lvds_gen_cntl &= ~(RADEON_LVDS_ON |
+ RADEON_LVDS_BLON |
+ RADEON_LVDS_EN |
+ RADEON_LVDS_RST_FM);
+
+ if (ASIC_IS_R300(rdev))
+ lvds_pll_cntl &= ~(R300_LVDS_SRC_SEL_MASK);
+
+ if (radeon_crtc->crtc_id == 0) {
+ if (ASIC_IS_R300(rdev)) {
+ if (radeon_encoder->flags & RADEON_USE_RMX)
+ lvds_pll_cntl |= R300_LVDS_SRC_SEL_RMX;
+ } else
+ lvds_gen_cntl &= ~RADEON_LVDS_SEL_CRTC2;
+ } else {
+ if (ASIC_IS_R300(rdev))
+ lvds_pll_cntl |= R300_LVDS_SRC_SEL_CRTC2;
+ else
+ lvds_gen_cntl |= RADEON_LVDS_SEL_CRTC2;
+ }
+
+ WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+ WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
+ WREG32(RADEON_LVDS_SS_GEN_CNTL, lvds_ss_gen_cntl);
+
+ if (rdev->family == CHIP_RV410)
+ WREG32(RADEON_CLOCK_CNTL_INDEX, 0);
+
+ if (rdev->is_atom_bios)
+ radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+ else
+ radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+}
+
+static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+ radeon_encoder->flags &= ~RADEON_USE_RMX;
+
+ if (radeon_encoder->rmx_type != RMX_OFF)
+ radeon_rmx_mode_fixup(encoder, mode, adjusted_mode);
+
+ return true;
+}
+
+static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
+ .dpms = radeon_legacy_lvds_dpms,
+ .mode_fixup = radeon_legacy_lvds_mode_fixup,
+ .prepare = radeon_legacy_lvds_prepare,
+ .mode_set = radeon_legacy_lvds_mode_set,
+ .commit = radeon_legacy_lvds_commit,
+};
+
+
+static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = {
+ .destroy = radeon_enc_destroy,
+};
+
+static bool radeon_legacy_primary_dac_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+ return true;
+}
+
+static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+ uint32_t dac_cntl = RREG32(RADEON_DAC_CNTL);
+ uint32_t dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL);
+
+ DRM_DEBUG("\n");
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ crtc_ext_cntl |= RADEON_CRTC_CRT_ON;
+ dac_cntl &= ~RADEON_DAC_PDWN;
+ dac_macro_cntl &= ~(RADEON_DAC_PDWN_R |
+ RADEON_DAC_PDWN_G |
+ RADEON_DAC_PDWN_B);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ crtc_ext_cntl &= ~RADEON_CRTC_CRT_ON;
+ dac_cntl |= RADEON_DAC_PDWN;
+ dac_macro_cntl |= (RADEON_DAC_PDWN_R |
+ RADEON_DAC_PDWN_G |
+ RADEON_DAC_PDWN_B);
+ break;
+ }
+
+ WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
+ WREG32(RADEON_DAC_CNTL, dac_cntl);
+ WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl);
+
+ if (rdev->is_atom_bios)
+ radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ else
+ radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+}
+
+static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+
+ if (rdev->is_atom_bios)
+ radeon_atom_output_lock(encoder, true);
+ else
+ radeon_combios_output_lock(encoder, true);
+ radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_legacy_primary_dac_commit(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+
+ radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_ON);
+
+ if (rdev->is_atom_bios)
+ radeon_atom_output_lock(encoder, false);
+ else
+ radeon_combios_output_lock(encoder, false);
+}
+
+static void radeon_legacy_primary_dac_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t disp_output_cntl, dac_cntl, dac2_cntl, dac_macro_cntl;
+
+ DRM_DEBUG("\n");
+
+ if (radeon_crtc->crtc_id == 0)
+ radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode);
+
+ if (radeon_crtc->crtc_id == 0) {
+ if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) {
+ disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) &
+ ~(RADEON_DISP_DAC_SOURCE_MASK);
+ WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
+ } else {
+ dac2_cntl = RREG32(RADEON_DAC_CNTL2) & ~(RADEON_DAC2_DAC_CLK_SEL);
+ WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+ }
+ } else {
+ if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) {
+ disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) &
+ ~(RADEON_DISP_DAC_SOURCE_MASK);
+ disp_output_cntl |= RADEON_DISP_DAC_SOURCE_CRTC2;
+ WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
+ } else {
+ dac2_cntl = RREG32(RADEON_DAC_CNTL2) | RADEON_DAC2_DAC_CLK_SEL;
+ WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+ }
+ }
+
+ dac_cntl = (RADEON_DAC_MASK_ALL |
+ RADEON_DAC_VGA_ADR_EN |
+ /* TODO 6-bits */
+ RADEON_DAC_8BIT_EN);
+
+ WREG32_P(RADEON_DAC_CNTL,
+ dac_cntl,
+ RADEON_DAC_RANGE_CNTL |
+ RADEON_DAC_BLANKING);
+
+ if (radeon_encoder->enc_priv) {
+ struct radeon_encoder_primary_dac *p_dac = (struct radeon_encoder_primary_dac *)radeon_encoder->enc_priv;
+ dac_macro_cntl = p_dac->ps2_pdac_adj;
+ } else
+ dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL);
+ dac_macro_cntl |= RADEON_DAC_PDWN_R | RADEON_DAC_PDWN_G | RADEON_DAC_PDWN_B;
+ WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl);
+
+ if (rdev->is_atom_bios)
+ radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+ else
+ radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+}
+
+static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t vclk_ecp_cntl, crtc_ext_cntl;
+ uint32_t dac_ext_cntl, dac_cntl, dac_macro_cntl, tmp;
+ enum drm_connector_status found = connector_status_disconnected;
+ bool color = true;
+
+ /* save the regs we need */
+ vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+ crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+ dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL);
+ dac_cntl = RREG32(RADEON_DAC_CNTL);
+ dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL);
+
+ tmp = vclk_ecp_cntl &
+ ~(RADEON_PIXCLK_ALWAYS_ONb | RADEON_PIXCLK_DAC_ALWAYS_ONb);
+ WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+
+ tmp = crtc_ext_cntl | RADEON_CRTC_CRT_ON;
+ WREG32(RADEON_CRTC_EXT_CNTL, tmp);
+
+ tmp = RADEON_DAC_FORCE_BLANK_OFF_EN |
+ RADEON_DAC_FORCE_DATA_EN;
+
+ if (color)
+ tmp |= RADEON_DAC_FORCE_DATA_SEL_RGB;
+ else
+ tmp |= RADEON_DAC_FORCE_DATA_SEL_G;
+
+ if (ASIC_IS_R300(rdev))
+ tmp |= (0x1b6 << RADEON_DAC_FORCE_DATA_SHIFT);
+ else
+ tmp |= (0x180 << RADEON_DAC_FORCE_DATA_SHIFT);
+
+ WREG32(RADEON_DAC_EXT_CNTL, tmp);
+
+ tmp = dac_cntl & ~(RADEON_DAC_RANGE_CNTL_MASK | RADEON_DAC_PDWN);
+ tmp |= RADEON_DAC_RANGE_CNTL_PS2 | RADEON_DAC_CMP_EN;
+ WREG32(RADEON_DAC_CNTL, tmp);
+
+ tmp &= ~(RADEON_DAC_PDWN_R |
+ RADEON_DAC_PDWN_G |
+ RADEON_DAC_PDWN_B);
+
+ WREG32(RADEON_DAC_MACRO_CNTL, tmp);
+
+ udelay(2000);
+
+ if (RREG32(RADEON_DAC_CNTL) & RADEON_DAC_CMP_OUTPUT)
+ found = connector_status_connected;
+
+ /* restore the regs we used */
+ WREG32(RADEON_DAC_CNTL, dac_cntl);
+ WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl);
+ WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl);
+ WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
+ WREG32_PLL(RADEON_VCLK_ECP_CNTL, vclk_ecp_cntl);
+
+ return found;
+}
+
+static const struct drm_encoder_helper_funcs radeon_legacy_primary_dac_helper_funcs = {
+ .dpms = radeon_legacy_primary_dac_dpms,
+ .mode_fixup = radeon_legacy_primary_dac_mode_fixup,
+ .prepare = radeon_legacy_primary_dac_prepare,
+ .mode_set = radeon_legacy_primary_dac_mode_set,
+ .commit = radeon_legacy_primary_dac_commit,
+ .detect = radeon_legacy_primary_dac_detect,
+};
+
+
+static const struct drm_encoder_funcs radeon_legacy_primary_dac_enc_funcs = {
+ .destroy = radeon_enc_destroy,
+};
+
+static bool radeon_legacy_tmds_int_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+ return true;
+}
+
+static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t fp_gen_cntl = RREG32(RADEON_FP_GEN_CNTL);
+ DRM_DEBUG("\n");
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ fp_gen_cntl |= (RADEON_FP_FPON | RADEON_FP_TMDS_EN);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN);
+ break;
+ }
+
+ WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl);
+
+ if (rdev->is_atom_bios)
+ radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ else
+ radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+}
+
+static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+
+ if (rdev->is_atom_bios)
+ radeon_atom_output_lock(encoder, true);
+ else
+ radeon_combios_output_lock(encoder, true);
+ radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_legacy_tmds_int_commit(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+
+ radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_ON);
+
+ if (rdev->is_atom_bios)
+ radeon_atom_output_lock(encoder, true);
+ else
+ radeon_combios_output_lock(encoder, true);
+}
+
+static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t tmp, tmds_pll_cntl, tmds_transmitter_cntl, fp_gen_cntl;
+ int i;
+
+ DRM_DEBUG("\n");
+
+ if (radeon_crtc->crtc_id == 0)
+ radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode);
+
+ tmp = tmds_pll_cntl = RREG32(RADEON_TMDS_PLL_CNTL);
+ tmp &= 0xfffff;
+ if (rdev->family == CHIP_RV280) {
+ /* bit 22 of TMDS_PLL_CNTL is read-back inverted */
+ tmp ^= (1 << 22);
+ tmds_pll_cntl ^= (1 << 22);
+ }
+
+ if (radeon_encoder->enc_priv) {
+ struct radeon_encoder_int_tmds *tmds = (struct radeon_encoder_int_tmds *)radeon_encoder->enc_priv;
+
+ for (i = 0; i < 4; i++) {
+ if (tmds->tmds_pll[i].freq == 0)
+ break;
+ if ((uint32_t)(mode->clock / 10) < tmds->tmds_pll[i].freq) {
+ tmp = tmds->tmds_pll[i].value ;
+ break;
+ }
+ }
+ }
+
+ if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV280)) {
+ if (tmp & 0xfff00000)
+ tmds_pll_cntl = tmp;
+ else {
+ tmds_pll_cntl &= 0xfff00000;
+ tmds_pll_cntl |= tmp;
+ }
+ } else
+ tmds_pll_cntl = tmp;
+
+ tmds_transmitter_cntl = RREG32(RADEON_TMDS_TRANSMITTER_CNTL) &
+ ~(RADEON_TMDS_TRANSMITTER_PLLRST);
+
+ if (rdev->family == CHIP_R200 ||
+ rdev->family == CHIP_R100 ||
+ ASIC_IS_R300(rdev))
+ tmds_transmitter_cntl &= ~(RADEON_TMDS_TRANSMITTER_PLLEN);
+ else /* RV chips got this bit reversed */
+ tmds_transmitter_cntl |= RADEON_TMDS_TRANSMITTER_PLLEN;
+
+ fp_gen_cntl = (RREG32(RADEON_FP_GEN_CNTL) |
+ (RADEON_FP_CRTC_DONT_SHADOW_VPAR |
+ RADEON_FP_CRTC_DONT_SHADOW_HEND));
+
+ fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN);
+
+ if (1) /* FIXME rgbBits == 8 */
+ fp_gen_cntl |= RADEON_FP_PANEL_FORMAT; /* 24 bit format */
+ else
+ fp_gen_cntl &= ~RADEON_FP_PANEL_FORMAT;/* 18 bit format */
+
+ if (radeon_crtc->crtc_id == 0) {
+ if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
+ fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
+ if (radeon_encoder->flags & RADEON_USE_RMX)
+ fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX;
+ else
+ fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1;
+ } else
+ fp_gen_cntl |= RADEON_FP_SEL_CRTC1;
+ } else {
+ if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
+ fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
+ fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC2;
+ } else
+ fp_gen_cntl |= RADEON_FP_SEL_CRTC2;
+ }
+
+ WREG32(RADEON_TMDS_PLL_CNTL, tmds_pll_cntl);
+ WREG32(RADEON_TMDS_TRANSMITTER_CNTL, tmds_transmitter_cntl);
+ WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl);
+
+ if (rdev->is_atom_bios)
+ radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+ else
+ radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+}
+
+static const struct drm_encoder_helper_funcs radeon_legacy_tmds_int_helper_funcs = {
+ .dpms = radeon_legacy_tmds_int_dpms,
+ .mode_fixup = radeon_legacy_tmds_int_mode_fixup,
+ .prepare = radeon_legacy_tmds_int_prepare,
+ .mode_set = radeon_legacy_tmds_int_mode_set,
+ .commit = radeon_legacy_tmds_int_commit,
+};
+
+
+static const struct drm_encoder_funcs radeon_legacy_tmds_int_enc_funcs = {
+ .destroy = radeon_enc_destroy,
+};
+
+static bool radeon_legacy_tmds_ext_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+ return true;
+}
+
+static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+ DRM_DEBUG("\n");
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ fp2_gen_cntl &= ~RADEON_FP2_BLANK_EN;
+ fp2_gen_cntl |= (RADEON_FP2_ON | RADEON_FP2_DVO_EN);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ fp2_gen_cntl |= RADEON_FP2_BLANK_EN;
+ fp2_gen_cntl &= ~(RADEON_FP2_ON | RADEON_FP2_DVO_EN);
+ break;
+ }
+
+ WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+
+ if (rdev->is_atom_bios)
+ radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ else
+ radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+}
+
+static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+
+ if (rdev->is_atom_bios)
+ radeon_atom_output_lock(encoder, true);
+ else
+ radeon_combios_output_lock(encoder, true);
+ radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_legacy_tmds_ext_commit(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_ON);
+
+ if (rdev->is_atom_bios)
+ radeon_atom_output_lock(encoder, false);
+ else
+ radeon_combios_output_lock(encoder, false);
+}
+
+static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t fp2_gen_cntl;
+
+ DRM_DEBUG("\n");
+
+ if (radeon_crtc->crtc_id == 0)
+ radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode);
+
+ if (rdev->is_atom_bios) {
+ radeon_encoder->pixel_clock = adjusted_mode->clock;
+ atombios_external_tmds_setup(encoder, ATOM_ENABLE);
+ fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+ } else {
+ fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+
+ if (1) /* FIXME rgbBits == 8 */
+ fp2_gen_cntl |= RADEON_FP2_PANEL_FORMAT; /* 24 bit format, */
+ else
+ fp2_gen_cntl &= ~RADEON_FP2_PANEL_FORMAT;/* 18 bit format, */
+
+ fp2_gen_cntl &= ~(RADEON_FP2_ON |
+ RADEON_FP2_DVO_EN |
+ RADEON_FP2_DVO_RATE_SEL_SDR);
+
+ /* XXX: these are oem specific */
+ if (ASIC_IS_R300(rdev)) {
+ if ((dev->pdev->device == 0x4850) &&
+ (dev->pdev->subsystem_vendor == 0x1028) &&
+ (dev->pdev->subsystem_device == 0x2001)) /* Dell Inspiron 8600 */
+ fp2_gen_cntl |= R300_FP2_DVO_CLOCK_MODE_SINGLE;
+ else
+ fp2_gen_cntl |= RADEON_FP2_PAD_FLOP_EN | R300_FP2_DVO_CLOCK_MODE_SINGLE;
+
+ /*if (mode->clock > 165000)
+ fp2_gen_cntl |= R300_FP2_DVO_DUAL_CHANNEL_EN;*/
+ }
+ }
+
+ if (radeon_crtc->crtc_id == 0) {
+ if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) {
+ fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK;
+ if (radeon_encoder->flags & RADEON_USE_RMX)
+ fp2_gen_cntl |= R200_FP2_SOURCE_SEL_RMX;
+ else
+ fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC1;
+ } else
+ fp2_gen_cntl &= ~RADEON_FP2_SRC_SEL_CRTC2;
+ } else {
+ if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) {
+ fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK;
+ fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC2;
+ } else
+ fp2_gen_cntl |= RADEON_FP2_SRC_SEL_CRTC2;
+ }
+
+ WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+
+ if (rdev->is_atom_bios)
+ radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+ else
+ radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+}
+
+static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs = {
+ .dpms = radeon_legacy_tmds_ext_dpms,
+ .mode_fixup = radeon_legacy_tmds_ext_mode_fixup,
+ .prepare = radeon_legacy_tmds_ext_prepare,
+ .mode_set = radeon_legacy_tmds_ext_mode_set,
+ .commit = radeon_legacy_tmds_ext_commit,
+};
+
+
+static const struct drm_encoder_funcs radeon_legacy_tmds_ext_enc_funcs = {
+ .destroy = radeon_enc_destroy,
+};
+
+static bool radeon_legacy_tv_dac_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+ return true;
+}
+
+static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t fp2_gen_cntl = 0, crtc2_gen_cntl = 0, tv_dac_cntl = 0;
+ /* uint32_t tv_master_cntl = 0; */
+
+ DRM_DEBUG("\n");
+
+ if (rdev->family == CHIP_R200)
+ fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+ else {
+ crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+ /* FIXME TV */
+ /* tv_master_cntl = RREG32(RADEON_TV_MASTER_CNTL); */
+ tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+ }
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ if (rdev->family == CHIP_R200) {
+ fp2_gen_cntl |= (RADEON_FP2_ON | RADEON_FP2_DVO_EN);
+ } else {
+ crtc2_gen_cntl |= RADEON_CRTC2_CRT2_ON;
+ /* tv_master_cntl |= RADEON_TV_ON; */
+ if (rdev->family == CHIP_R420 ||
+ rdev->family == CHIP_R423 ||
+ rdev->family == CHIP_RV410)
+ tv_dac_cntl &= ~(R420_TV_DAC_RDACPD |
+ R420_TV_DAC_GDACPD |
+ R420_TV_DAC_BDACPD |
+ RADEON_TV_DAC_BGSLEEP);
+ else
+ tv_dac_cntl &= ~(RADEON_TV_DAC_RDACPD |
+ RADEON_TV_DAC_GDACPD |
+ RADEON_TV_DAC_BDACPD |
+ RADEON_TV_DAC_BGSLEEP);
+ }
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ if (rdev->family == CHIP_R200)
+ fp2_gen_cntl &= ~(RADEON_FP2_ON | RADEON_FP2_DVO_EN);
+ else {
+ crtc2_gen_cntl &= ~RADEON_CRTC2_CRT2_ON;
+ /* tv_master_cntl &= ~RADEON_TV_ON; */
+ if (rdev->family == CHIP_R420 ||
+ rdev->family == CHIP_R423 ||
+ rdev->family == CHIP_RV410)
+ tv_dac_cntl |= (R420_TV_DAC_RDACPD |
+ R420_TV_DAC_GDACPD |
+ R420_TV_DAC_BDACPD |
+ RADEON_TV_DAC_BGSLEEP);
+ else
+ tv_dac_cntl |= (RADEON_TV_DAC_RDACPD |
+ RADEON_TV_DAC_GDACPD |
+ RADEON_TV_DAC_BDACPD |
+ RADEON_TV_DAC_BGSLEEP);
+ }
+ break;
+ }
+
+ if (rdev->family == CHIP_R200) {
+ WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+ } else {
+ WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+ /* WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl); */
+ WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+ }
+
+ if (rdev->is_atom_bios)
+ radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ else
+ radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+}
+
+static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+
+ if (rdev->is_atom_bios)
+ radeon_atom_output_lock(encoder, true);
+ else
+ radeon_combios_output_lock(encoder, true);
+ radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_legacy_tv_dac_commit(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+
+ radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_ON);
+
+ if (rdev->is_atom_bios)
+ radeon_atom_output_lock(encoder, true);
+ else
+ radeon_combios_output_lock(encoder, true);
+}
+
+static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t tv_dac_cntl, gpiopad_a = 0, dac2_cntl, disp_output_cntl = 0;
+ uint32_t disp_hw_debug = 0, fp2_gen_cntl = 0;
+
+ DRM_DEBUG("\n");
+
+ if (radeon_crtc->crtc_id == 0)
+ radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode);
+
+ if (rdev->family != CHIP_R200) {
+ tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+ if (rdev->family == CHIP_R420 ||
+ rdev->family == CHIP_R423 ||
+ rdev->family == CHIP_RV410) {
+ tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK |
+ RADEON_TV_DAC_BGADJ_MASK |
+ R420_TV_DAC_DACADJ_MASK |
+ R420_TV_DAC_RDACPD |
+ R420_TV_DAC_GDACPD |
+ R420_TV_DAC_GDACPD |
+ R420_TV_DAC_TVENABLE);
+ } else {
+ tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK |
+ RADEON_TV_DAC_BGADJ_MASK |
+ RADEON_TV_DAC_DACADJ_MASK |
+ RADEON_TV_DAC_RDACPD |
+ RADEON_TV_DAC_GDACPD |
+ RADEON_TV_DAC_GDACPD);
+ }
+
+ /* FIXME TV */
+ if (radeon_encoder->enc_priv) {
+ struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
+ tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
+ RADEON_TV_DAC_NHOLD |
+ RADEON_TV_DAC_STD_PS2 |
+ tv_dac->ps2_tvdac_adj);
+ } else
+ tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
+ RADEON_TV_DAC_NHOLD |
+ RADEON_TV_DAC_STD_PS2);
+
+ WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+ }
+
+ if (ASIC_IS_R300(rdev)) {
+ gpiopad_a = RREG32(RADEON_GPIOPAD_A) | 1;
+ disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
+ } else if (rdev->family == CHIP_R200)
+ fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+ else
+ disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
+
+ dac2_cntl = RREG32(RADEON_DAC_CNTL2) | RADEON_DAC2_DAC2_CLK_SEL;
+
+ if (radeon_crtc->crtc_id == 0) {
+ if (ASIC_IS_R300(rdev)) {
+ disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK;
+ disp_output_cntl |= RADEON_DISP_TVDAC_SOURCE_CRTC;
+ } else if (rdev->family == CHIP_R200) {
+ fp2_gen_cntl &= ~(R200_FP2_SOURCE_SEL_MASK |
+ RADEON_FP2_DVO_RATE_SEL_SDR);
+ } else
+ disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
+ } else {
+ if (ASIC_IS_R300(rdev)) {
+ disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK;
+ disp_output_cntl |= RADEON_DISP_TVDAC_SOURCE_CRTC2;
+ } else if (rdev->family == CHIP_R200) {
+ fp2_gen_cntl &= ~(R200_FP2_SOURCE_SEL_MASK |
+ RADEON_FP2_DVO_RATE_SEL_SDR);
+ fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC2;
+ } else
+ disp_hw_debug &= ~RADEON_CRT2_DISP1_SEL;
+ }
+
+ WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+
+ if (ASIC_IS_R300(rdev)) {
+ WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
+ WREG32(RADEON_DISP_TV_OUT_CNTL, disp_output_cntl);
+ } else if (rdev->family == CHIP_R200)
+ WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+ else
+ WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
+
+ if (rdev->is_atom_bios)
+ radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+ else
+ radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+
+}
+
+static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t crtc2_gen_cntl, tv_dac_cntl, dac_cntl2, dac_ext_cntl;
+ uint32_t disp_hw_debug, disp_output_cntl, gpiopad_a, pixclks_cntl, tmp;
+ enum drm_connector_status found = connector_status_disconnected;
+ bool color = true;
+
+ /* FIXME tv */
+
+ /* save the regs we need */
+ pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+ gpiopad_a = ASIC_IS_R300(rdev) ? RREG32(RADEON_GPIOPAD_A) : 0;
+ disp_output_cntl = ASIC_IS_R300(rdev) ? RREG32(RADEON_DISP_OUTPUT_CNTL) : 0;
+ disp_hw_debug = ASIC_IS_R300(rdev) ? 0 : RREG32(RADEON_DISP_HW_DEBUG);
+ crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+ tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+ dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL);
+ dac_cntl2 = RREG32(RADEON_DAC_CNTL2);
+
+ tmp = pixclks_cntl & ~(RADEON_PIX2CLK_ALWAYS_ONb
+ | RADEON_PIX2CLK_DAC_ALWAYS_ONb);
+ WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+
+ if (ASIC_IS_R300(rdev))
+ WREG32_P(RADEON_GPIOPAD_A, 1, ~1);
+
+ tmp = crtc2_gen_cntl & ~RADEON_CRTC2_PIX_WIDTH_MASK;
+ tmp |= RADEON_CRTC2_CRT2_ON |
+ (2 << RADEON_CRTC2_PIX_WIDTH_SHIFT);
+
+ WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
+
+ if (ASIC_IS_R300(rdev)) {
+ tmp = disp_output_cntl & ~RADEON_DISP_TVDAC_SOURCE_MASK;
+ tmp |= RADEON_DISP_TVDAC_SOURCE_CRTC2;
+ WREG32(RADEON_DISP_OUTPUT_CNTL, tmp);
+ } else {
+ tmp = disp_hw_debug & ~RADEON_CRT2_DISP1_SEL;
+ WREG32(RADEON_DISP_HW_DEBUG, tmp);
+ }
+
+ tmp = RADEON_TV_DAC_NBLANK |
+ RADEON_TV_DAC_NHOLD |
+ RADEON_TV_MONITOR_DETECT_EN |
+ RADEON_TV_DAC_STD_PS2;
+
+ WREG32(RADEON_TV_DAC_CNTL, tmp);
+
+ tmp = RADEON_DAC2_FORCE_BLANK_OFF_EN |
+ RADEON_DAC2_FORCE_DATA_EN;
+
+ if (color)
+ tmp |= RADEON_DAC_FORCE_DATA_SEL_RGB;
+ else
+ tmp |= RADEON_DAC_FORCE_DATA_SEL_G;
+
+ if (ASIC_IS_R300(rdev))
+ tmp |= (0x1b6 << RADEON_DAC_FORCE_DATA_SHIFT);
+ else
+ tmp |= (0x180 << RADEON_DAC_FORCE_DATA_SHIFT);
+
+ WREG32(RADEON_DAC_EXT_CNTL, tmp);
+
+ tmp = dac_cntl2 | RADEON_DAC2_DAC2_CLK_SEL | RADEON_DAC2_CMP_EN;
+ WREG32(RADEON_DAC_CNTL2, tmp);
+
+ udelay(10000);
+
+ if (ASIC_IS_R300(rdev)) {
+ if (RREG32(RADEON_DAC_CNTL2) & RADEON_DAC2_CMP_OUT_B)
+ found = connector_status_connected;
+ } else {
+ if (RREG32(RADEON_DAC_CNTL2) & RADEON_DAC2_CMP_OUTPUT)
+ found = connector_status_connected;
+ }
+
+ /* restore regs we used */
+ WREG32(RADEON_DAC_CNTL2, dac_cntl2);
+ WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl);
+ WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+ WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+
+ if (ASIC_IS_R300(rdev)) {
+ WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
+ WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
+ } else {
+ WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
+ }
+ WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
+
+ /* return found; */
+ return connector_status_disconnected;
+
+}
+
+static const struct drm_encoder_helper_funcs radeon_legacy_tv_dac_helper_funcs = {
+ .dpms = radeon_legacy_tv_dac_dpms,
+ .mode_fixup = radeon_legacy_tv_dac_mode_fixup,
+ .prepare = radeon_legacy_tv_dac_prepare,
+ .mode_set = radeon_legacy_tv_dac_mode_set,
+ .commit = radeon_legacy_tv_dac_commit,
+ .detect = radeon_legacy_tv_dac_detect,
+};
+
+
+static const struct drm_encoder_funcs radeon_legacy_tv_dac_enc_funcs = {
+ .destroy = radeon_enc_destroy,
+};
+
+void
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_encoder *encoder;
+ struct radeon_encoder *radeon_encoder;
+
+ /* see if we already added it */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ radeon_encoder = to_radeon_encoder(encoder);
+ if (radeon_encoder->encoder_id == encoder_id) {
+ radeon_encoder->devices |= supported_device;
+ return;
+ }
+
+ }
+
+ /* add a new one */
+ radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL);
+ if (!radeon_encoder)
+ return;
+
+ encoder = &radeon_encoder->base;
+ encoder->possible_crtcs = 0x3;
+ encoder->possible_clones = 0;
+
+ radeon_encoder->enc_priv = NULL;
+
+ radeon_encoder->encoder_id = encoder_id;
+ radeon_encoder->devices = supported_device;
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ drm_encoder_init(dev, encoder, &radeon_legacy_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS);
+ drm_encoder_helper_add(encoder, &radeon_legacy_lvds_helper_funcs);
+ if (rdev->is_atom_bios)
+ radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
+ else
+ radeon_encoder->enc_priv = radeon_combios_get_lvds_info(radeon_encoder);
+ radeon_encoder->rmx_type = RMX_FULL;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ drm_encoder_init(dev, encoder, &radeon_legacy_tmds_int_enc_funcs, DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &radeon_legacy_tmds_int_helper_funcs);
+ if (rdev->is_atom_bios)
+ radeon_encoder->enc_priv = radeon_atombios_get_tmds_info(radeon_encoder);
+ else
+ radeon_encoder->enc_priv = radeon_combios_get_tmds_info(radeon_encoder);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ drm_encoder_init(dev, encoder, &radeon_legacy_primary_dac_enc_funcs, DRM_MODE_ENCODER_DAC);
+ drm_encoder_helper_add(encoder, &radeon_legacy_primary_dac_helper_funcs);
+ if (rdev->is_atom_bios)
+ radeon_encoder->enc_priv = radeon_atombios_get_primary_dac_info(radeon_encoder);
+ else
+ radeon_encoder->enc_priv = radeon_combios_get_primary_dac_info(radeon_encoder);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ drm_encoder_init(dev, encoder, &radeon_legacy_tv_dac_enc_funcs, DRM_MODE_ENCODER_TVDAC);
+ drm_encoder_helper_add(encoder, &radeon_legacy_tv_dac_helper_funcs);
+ if (rdev->is_atom_bios)
+ radeon_encoder->enc_priv = radeon_atombios_get_tv_dac_info(radeon_encoder);
+ else
+ radeon_encoder->enc_priv = radeon_combios_get_tv_dac_info(radeon_encoder);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs);
+ if (!rdev->is_atom_bios)
+ radeon_combios_get_ext_tmds_info(radeon_encoder);
+ break;
+ }
+}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
new file mode 100644
index 00000000000..9173b687462
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -0,0 +1,398 @@
+/*
+ * Copyright 2000 ATI Technologies Inc., Markham, Ontario, and
+ * VA Linux Systems Inc., Fremont, California.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Original Authors:
+ * Kevin E. Martin, Rickard E. Faith, Alan Hourihane
+ *
+ * Kernel port Author: Dave Airlie
+ */
+
+#ifndef RADEON_MODE_H
+#define RADEON_MODE_H
+
+#include <drm_crtc.h>
+#include <drm_mode.h>
+#include <drm_edid.h>
+#include <linux/i2c.h>
+#include <linux/i2c-id.h>
+#include <linux/i2c-algo-bit.h>
+
+#define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base)
+#define to_radeon_connector(x) container_of(x, struct radeon_connector, base)
+#define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base)
+#define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base)
+
+enum radeon_connector_type {
+ CONNECTOR_NONE,
+ CONNECTOR_VGA,
+ CONNECTOR_DVI_I,
+ CONNECTOR_DVI_D,
+ CONNECTOR_DVI_A,
+ CONNECTOR_STV,
+ CONNECTOR_CTV,
+ CONNECTOR_LVDS,
+ CONNECTOR_DIGITAL,
+ CONNECTOR_SCART,
+ CONNECTOR_HDMI_TYPE_A,
+ CONNECTOR_HDMI_TYPE_B,
+ CONNECTOR_0XC,
+ CONNECTOR_0XD,
+ CONNECTOR_DIN,
+ CONNECTOR_DISPLAY_PORT,
+ CONNECTOR_UNSUPPORTED
+};
+
+enum radeon_dvi_type {
+ DVI_AUTO,
+ DVI_DIGITAL,
+ DVI_ANALOG
+};
+
+enum radeon_rmx_type {
+ RMX_OFF,
+ RMX_FULL,
+ RMX_CENTER,
+ RMX_ASPECT
+};
+
+enum radeon_tv_std {
+ TV_STD_NTSC,
+ TV_STD_PAL,
+ TV_STD_PAL_M,
+ TV_STD_PAL_60,
+ TV_STD_NTSC_J,
+ TV_STD_SCART_PAL,
+ TV_STD_SECAM,
+ TV_STD_PAL_CN,
+};
+
+struct radeon_i2c_bus_rec {
+ bool valid;
+ uint32_t mask_clk_reg;
+ uint32_t mask_data_reg;
+ uint32_t a_clk_reg;
+ uint32_t a_data_reg;
+ uint32_t put_clk_reg;
+ uint32_t put_data_reg;
+ uint32_t get_clk_reg;
+ uint32_t get_data_reg;
+ uint32_t mask_clk_mask;
+ uint32_t mask_data_mask;
+ uint32_t put_clk_mask;
+ uint32_t put_data_mask;
+ uint32_t get_clk_mask;
+ uint32_t get_data_mask;
+ uint32_t a_clk_mask;
+ uint32_t a_data_mask;
+};
+
+struct radeon_tmds_pll {
+ uint32_t freq;
+ uint32_t value;
+};
+
+#define RADEON_MAX_BIOS_CONNECTOR 16
+
+#define RADEON_PLL_USE_BIOS_DIVS (1 << 0)
+#define RADEON_PLL_NO_ODD_POST_DIV (1 << 1)
+#define RADEON_PLL_USE_REF_DIV (1 << 2)
+#define RADEON_PLL_LEGACY (1 << 3)
+#define RADEON_PLL_PREFER_LOW_REF_DIV (1 << 4)
+#define RADEON_PLL_PREFER_HIGH_REF_DIV (1 << 5)
+#define RADEON_PLL_PREFER_LOW_FB_DIV (1 << 6)
+#define RADEON_PLL_PREFER_HIGH_FB_DIV (1 << 7)
+#define RADEON_PLL_PREFER_LOW_POST_DIV (1 << 8)
+#define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9)
+#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10)
+
+struct radeon_pll {
+ uint16_t reference_freq;
+ uint16_t reference_div;
+ uint32_t pll_in_min;
+ uint32_t pll_in_max;
+ uint32_t pll_out_min;
+ uint32_t pll_out_max;
+ uint16_t xclk;
+
+ uint32_t min_ref_div;
+ uint32_t max_ref_div;
+ uint32_t min_post_div;
+ uint32_t max_post_div;
+ uint32_t min_feedback_div;
+ uint32_t max_feedback_div;
+ uint32_t min_frac_feedback_div;
+ uint32_t max_frac_feedback_div;
+ uint32_t best_vco;
+};
+
+struct radeon_i2c_chan {
+ struct drm_device *dev;
+ struct i2c_adapter adapter;
+ struct i2c_algo_bit_data algo;
+ struct radeon_i2c_bus_rec rec;
+};
+
+/* mostly for macs, but really any system without connector tables */
+enum radeon_connector_table {
+ CT_NONE,
+ CT_GENERIC,
+ CT_IBOOK,
+ CT_POWERBOOK_EXTERNAL,
+ CT_POWERBOOK_INTERNAL,
+ CT_POWERBOOK_VGA,
+ CT_MINI_EXTERNAL,
+ CT_MINI_INTERNAL,
+ CT_IMAC_G5_ISIGHT,
+ CT_EMAC,
+};
+
+struct radeon_mode_info {
+ struct atom_context *atom_context;
+ enum radeon_connector_table connector_table;
+ bool mode_config_initialized;
+};
+
+struct radeon_crtc {
+ struct drm_crtc base;
+ int crtc_id;
+ u16 lut_r[256], lut_g[256], lut_b[256];
+ bool enabled;
+ bool can_tile;
+ uint32_t crtc_offset;
+ struct radeon_framebuffer *fbdev_fb;
+ struct drm_mode_set mode_set;
+ struct drm_gem_object *cursor_bo;
+ uint64_t cursor_addr;
+ int cursor_width;
+ int cursor_height;
+};
+
+#define RADEON_USE_RMX 1
+
+struct radeon_native_mode {
+ /* preferred mode */
+ uint32_t panel_xres, panel_yres;
+ uint32_t hoverplus, hsync_width;
+ uint32_t hblank;
+ uint32_t voverplus, vsync_width;
+ uint32_t vblank;
+ uint32_t dotclock;
+ uint32_t flags;
+};
+
+struct radeon_encoder_primary_dac {
+ /* legacy primary dac */
+ uint32_t ps2_pdac_adj;
+};
+
+struct radeon_encoder_lvds {
+ /* legacy lvds */
+ uint16_t panel_vcc_delay;
+ uint8_t panel_pwr_delay;
+ uint8_t panel_digon_delay;
+ uint8_t panel_blon_delay;
+ uint16_t panel_ref_divider;
+ uint8_t panel_post_divider;
+ uint16_t panel_fb_divider;
+ bool use_bios_dividers;
+ uint32_t lvds_gen_cntl;
+ /* panel mode */
+ struct radeon_native_mode native_mode;
+};
+
+struct radeon_encoder_tv_dac {
+ /* legacy tv dac */
+ uint32_t ps2_tvdac_adj;
+ uint32_t ntsc_tvdac_adj;
+ uint32_t pal_tvdac_adj;
+
+ enum radeon_tv_std tv_std;
+};
+
+struct radeon_encoder_int_tmds {
+ /* legacy int tmds */
+ struct radeon_tmds_pll tmds_pll[4];
+};
+
+struct radeon_encoder_atom_dig {
+ /* atom dig */
+ bool coherent_mode;
+ int dig_block;
+ /* atom lvds */
+ uint32_t lvds_misc;
+ uint16_t panel_pwr_delay;
+ /* panel mode */
+ struct radeon_native_mode native_mode;
+};
+
+struct radeon_encoder {
+ struct drm_encoder base;
+ uint32_t encoder_id;
+ uint32_t devices;
+ uint32_t flags;
+ uint32_t pixel_clock;
+ enum radeon_rmx_type rmx_type;
+ struct radeon_native_mode native_mode;
+ void *enc_priv;
+};
+
+struct radeon_connector_atom_dig {
+ uint32_t igp_lane_info;
+ bool linkb;
+};
+
+struct radeon_connector {
+ struct drm_connector base;
+ uint32_t connector_id;
+ uint32_t devices;
+ struct radeon_i2c_chan *ddc_bus;
+ int use_digital;
+ void *con_priv;
+};
+
+struct radeon_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *obj;
+};
+
+extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
+ struct radeon_i2c_bus_rec *rec,
+ const char *name);
+extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c);
+extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
+extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
+
+extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
+
+extern void radeon_compute_pll(struct radeon_pll *pll,
+ uint64_t freq,
+ uint32_t *dot_clock_p,
+ uint32_t *fb_div_p,
+ uint32_t *frac_fb_div_p,
+ uint32_t *ref_div_p,
+ uint32_t *post_div_p,
+ int flags);
+
+struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index);
+struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev, int bios_index, int with_tv);
+struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv);
+struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index);
+struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index);
+extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action);
+extern int atombios_get_encoder_mode(struct drm_encoder *encoder);
+
+extern void radeon_crtc_load_lut(struct drm_crtc *crtc);
+extern int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb);
+extern int atombios_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb);
+extern void atombios_crtc_dpms(struct drm_crtc *crtc, int mode);
+
+extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb);
+extern void radeon_legacy_atom_set_surface(struct drm_crtc *crtc);
+
+extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width,
+ uint32_t height);
+extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
+ int x, int y);
+
+extern bool radeon_atom_get_clock_info(struct drm_device *dev);
+extern bool radeon_combios_get_clock_info(struct drm_device *dev);
+extern struct radeon_encoder_atom_dig *
+radeon_atombios_get_lvds_info(struct radeon_encoder *encoder);
+extern struct radeon_encoder_int_tmds *
+radeon_atombios_get_tmds_info(struct radeon_encoder *encoder);
+extern struct radeon_encoder_primary_dac *
+radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder);
+extern struct radeon_encoder_tv_dac *
+radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder);
+extern struct radeon_encoder_lvds *
+radeon_combios_get_lvds_info(struct radeon_encoder *encoder);
+extern struct radeon_encoder_int_tmds *
+radeon_combios_get_tmds_info(struct radeon_encoder *encoder);
+extern void radeon_combios_get_ext_tmds_info(struct radeon_encoder *encoder);
+extern struct radeon_encoder_tv_dac *
+radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder);
+extern struct radeon_encoder_primary_dac *
+radeon_combios_get_primary_dac_info(struct radeon_encoder *encoder);
+extern void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock);
+extern void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev);
+extern void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock);
+extern void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev);
+extern void
+radeon_atombios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc);
+extern void
+radeon_atombios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on);
+extern void
+radeon_combios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc);
+extern void
+radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on);
+extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno);
+struct drm_framebuffer *radeon_framebuffer_create(struct drm_device *dev,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_gem_object *obj);
+
+int radeonfb_probe(struct drm_device *dev);
+
+int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
+bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev);
+bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev);
+void radeon_atombios_init_crtc(struct drm_device *dev,
+ struct radeon_crtc *radeon_crtc);
+void radeon_legacy_init_crtc(struct drm_device *dev,
+ struct radeon_crtc *radeon_crtc);
+void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state);
+
+void radeon_get_clock_info(struct drm_device *dev);
+
+extern bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev);
+extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct drm_device *dev);
+
+void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+void radeon_enc_destroy(struct drm_encoder *encoder);
+void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
+void radeon_combios_asic_init(struct drm_device *dev);
+extern int radeon_static_clocks_init(struct drm_device *dev);
+void radeon_init_disp_bw_legacy(struct drm_device *dev,
+ struct drm_display_mode *mode1,
+ uint32_t pixel_bytes1,
+ struct drm_display_mode *mode2,
+ uint32_t pixel_bytes2);
+void radeon_init_disp_bw_avivo(struct drm_device *dev,
+ struct drm_display_mode *mode1,
+ uint32_t pixel_bytes1,
+ struct drm_display_mode *mode2,
+ uint32_t pixel_bytes2);
+void radeon_init_disp_bandwidth(struct drm_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
new file mode 100644
index 00000000000..983e8df5e00
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -0,0 +1,511 @@
+/*
+ * Copyright 2009 Jerome Glisse.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ * Jerome Glisse <glisse@freedesktop.org>
+ * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ * Dave Airlie
+ */
+#include <linux/list.h>
+#include <drm/drmP.h>
+#include "radeon_drm.h"
+#include "radeon.h"
+
+struct radeon_object {
+ struct ttm_buffer_object tobj;
+ struct list_head list;
+ struct radeon_device *rdev;
+ struct drm_gem_object *gobj;
+ struct ttm_bo_kmap_obj kmap;
+ unsigned pin_count;
+ uint64_t gpu_addr;
+ void *kptr;
+ bool is_iomem;
+};
+
+int radeon_ttm_init(struct radeon_device *rdev);
+void radeon_ttm_fini(struct radeon_device *rdev);
+
+/*
+ * To exclude mutual BO access we rely on bo_reserve exclusion, as all
+ * function are calling it.
+ */
+
+static int radeon_object_reserve(struct radeon_object *robj, bool interruptible)
+{
+ return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0);
+}
+
+static void radeon_object_unreserve(struct radeon_object *robj)
+{
+ ttm_bo_unreserve(&robj->tobj);
+}
+
+static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj)
+{
+ struct radeon_object *robj;
+
+ robj = container_of(tobj, struct radeon_object, tobj);
+ list_del_init(&robj->list);
+ kfree(robj);
+}
+
+static inline void radeon_object_gpu_addr(struct radeon_object *robj)
+{
+ /* Default gpu address */
+ robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
+ if (robj->tobj.mem.mm_node == NULL) {
+ return;
+ }
+ robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT;
+ switch (robj->tobj.mem.mem_type) {
+ case TTM_PL_VRAM:
+ robj->gpu_addr += (u64)robj->rdev->mc.vram_location;
+ break;
+ case TTM_PL_TT:
+ robj->gpu_addr += (u64)robj->rdev->mc.gtt_location;
+ break;
+ default:
+ DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type);
+ robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
+ return;
+ }
+}
+
+static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
+{
+ uint32_t flags = 0;
+ if (domain & RADEON_GEM_DOMAIN_VRAM) {
+ flags |= TTM_PL_FLAG_VRAM;
+ }
+ if (domain & RADEON_GEM_DOMAIN_GTT) {
+ flags |= TTM_PL_FLAG_TT;
+ }
+ if (domain & RADEON_GEM_DOMAIN_CPU) {
+ flags |= TTM_PL_FLAG_SYSTEM;
+ }
+ if (!flags) {
+ flags |= TTM_PL_FLAG_SYSTEM;
+ }
+ return flags;
+}
+
+int radeon_object_create(struct radeon_device *rdev,
+ struct drm_gem_object *gobj,
+ unsigned long size,
+ bool kernel,
+ uint32_t domain,
+ bool interruptible,
+ struct radeon_object **robj_ptr)
+{
+ struct radeon_object *robj;
+ enum ttm_bo_type type;
+ uint32_t flags;
+ int r;
+
+ if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
+ rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
+ }
+ if (kernel) {
+ type = ttm_bo_type_kernel;
+ } else {
+ type = ttm_bo_type_device;
+ }
+ *robj_ptr = NULL;
+ robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
+ if (robj == NULL) {
+ return -ENOMEM;
+ }
+ robj->rdev = rdev;
+ robj->gobj = gobj;
+ INIT_LIST_HEAD(&robj->list);
+
+ flags = radeon_object_flags_from_domain(domain);
+ r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags,
+ 0, 0, false, NULL, size,
+ &radeon_ttm_object_object_destroy);
+ if (unlikely(r != 0)) {
+ /* ttm call radeon_ttm_object_object_destroy if error happen */
+ DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n",
+ size, flags, 0);
+ return r;
+ }
+ *robj_ptr = robj;
+ if (gobj) {
+ list_add_tail(&robj->list, &rdev->gem.objects);
+ }
+ return 0;
+}
+
+int radeon_object_kmap(struct radeon_object *robj, void **ptr)
+{
+ int r;
+
+ spin_lock(&robj->tobj.lock);
+ if (robj->kptr) {
+ if (ptr) {
+ *ptr = robj->kptr;
+ }
+ spin_unlock(&robj->tobj.lock);
+ return 0;
+ }
+ spin_unlock(&robj->tobj.lock);
+ r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap);
+ if (r) {
+ return r;
+ }
+ spin_lock(&robj->tobj.lock);
+ robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem);
+ spin_unlock(&robj->tobj.lock);
+ if (ptr) {
+ *ptr = robj->kptr;
+ }
+ return 0;
+}
+
+void radeon_object_kunmap(struct radeon_object *robj)
+{
+ spin_lock(&robj->tobj.lock);
+ if (robj->kptr == NULL) {
+ spin_unlock(&robj->tobj.lock);
+ return;
+ }
+ robj->kptr = NULL;
+ spin_unlock(&robj->tobj.lock);
+ ttm_bo_kunmap(&robj->kmap);
+}
+
+void radeon_object_unref(struct radeon_object **robj)
+{
+ struct ttm_buffer_object *tobj;
+
+ if ((*robj) == NULL) {
+ return;
+ }
+ tobj = &((*robj)->tobj);
+ ttm_bo_unref(&tobj);
+ if (tobj == NULL) {
+ *robj = NULL;
+ }
+}
+
+int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset)
+{
+ *offset = robj->tobj.addr_space_offset;
+ return 0;
+}
+
+int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
+ uint64_t *gpu_addr)
+{
+ uint32_t flags;
+ uint32_t tmp;
+ void *fbptr;
+ int r;
+
+ flags = radeon_object_flags_from_domain(domain);
+ spin_lock(&robj->tobj.lock);
+ if (robj->pin_count) {
+ robj->pin_count++;
+ if (gpu_addr != NULL) {
+ *gpu_addr = robj->gpu_addr;
+ }
+ spin_unlock(&robj->tobj.lock);
+ return 0;
+ }
+ spin_unlock(&robj->tobj.lock);
+ r = radeon_object_reserve(robj, false);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
+ return r;
+ }
+ if (robj->rdev->fbdev_robj == robj) {
+ mutex_lock(&robj->rdev->fbdev_info->lock);
+ radeon_object_kunmap(robj);
+ }
+ tmp = robj->tobj.mem.placement;
+ ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
+ robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
+ r = ttm_buffer_object_validate(&robj->tobj,
+ robj->tobj.proposed_placement,
+ false, false);
+ radeon_object_gpu_addr(robj);
+ if (gpu_addr != NULL) {
+ *gpu_addr = robj->gpu_addr;
+ }
+ robj->pin_count = 1;
+ if (unlikely(r != 0)) {
+ DRM_ERROR("radeon: failed to pin object.\n");
+ }
+ radeon_object_unreserve(robj);
+ if (robj->rdev->fbdev_robj == robj) {
+ if (!r) {
+ r = radeon_object_kmap(robj, &fbptr);
+ }
+ if (!r) {
+ robj->rdev->fbdev_info->screen_base = fbptr;
+ robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr;
+ }
+ mutex_unlock(&robj->rdev->fbdev_info->lock);
+ }
+ return r;
+}
+
+void radeon_object_unpin(struct radeon_object *robj)
+{
+ uint32_t flags;
+ void *fbptr;
+ int r;
+
+ spin_lock(&robj->tobj.lock);
+ if (!robj->pin_count) {
+ spin_unlock(&robj->tobj.lock);
+ printk(KERN_WARNING "Unpin not necessary for %p !\n", robj);
+ return;
+ }
+ robj->pin_count--;
+ if (robj->pin_count) {
+ spin_unlock(&robj->tobj.lock);
+ return;
+ }
+ spin_unlock(&robj->tobj.lock);
+ r = radeon_object_reserve(robj, false);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
+ return;
+ }
+ if (robj->rdev->fbdev_robj == robj) {
+ mutex_lock(&robj->rdev->fbdev_info->lock);
+ radeon_object_kunmap(robj);
+ }
+ flags = robj->tobj.mem.placement;
+ robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
+ r = ttm_buffer_object_validate(&robj->tobj,
+ robj->tobj.proposed_placement,
+ false, false);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("radeon: failed to unpin buffer.\n");
+ }
+ radeon_object_unreserve(robj);
+ if (robj->rdev->fbdev_robj == robj) {
+ if (!r) {
+ r = radeon_object_kmap(robj, &fbptr);
+ }
+ if (!r) {
+ robj->rdev->fbdev_info->screen_base = fbptr;
+ robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr;
+ }
+ mutex_unlock(&robj->rdev->fbdev_info->lock);
+ }
+}
+
+int radeon_object_wait(struct radeon_object *robj)
+{
+ int r = 0;
+
+ /* FIXME: should use block reservation instead */
+ r = radeon_object_reserve(robj, true);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("radeon: failed to reserve object for waiting.\n");
+ return r;
+ }
+ spin_lock(&robj->tobj.lock);
+ if (robj->tobj.sync_obj) {
+ r = ttm_bo_wait(&robj->tobj, true, false, false);
+ }
+ spin_unlock(&robj->tobj.lock);
+ radeon_object_unreserve(robj);
+ return r;
+}
+
+int radeon_object_evict_vram(struct radeon_device *rdev)
+{
+ if (rdev->flags & RADEON_IS_IGP) {
+ /* Useless to evict on IGP chips */
+ return 0;
+ }
+ return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
+}
+
+void radeon_object_force_delete(struct radeon_device *rdev)
+{
+ struct radeon_object *robj, *n;
+ struct drm_gem_object *gobj;
+
+ if (list_empty(&rdev->gem.objects)) {
+ return;
+ }
+ DRM_ERROR("Userspace still has active objects !\n");
+ list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) {
+ mutex_lock(&rdev->ddev->struct_mutex);
+ gobj = robj->gobj;
+ DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n",
+ gobj, robj, (unsigned long)gobj->size,
+ *((unsigned long *)&gobj->refcount));
+ list_del_init(&robj->list);
+ radeon_object_unref(&robj);
+ gobj->driver_private = NULL;
+ drm_gem_object_unreference(gobj);
+ mutex_unlock(&rdev->ddev->struct_mutex);
+ }
+}
+
+int radeon_object_init(struct radeon_device *rdev)
+{
+ return radeon_ttm_init(rdev);
+}
+
+void radeon_object_fini(struct radeon_device *rdev)
+{
+ radeon_ttm_fini(rdev);
+}
+
+void radeon_object_list_add_object(struct radeon_object_list *lobj,
+ struct list_head *head)
+{
+ if (lobj->wdomain) {
+ list_add(&lobj->list, head);
+ } else {
+ list_add_tail(&lobj->list, head);
+ }
+}
+
+int radeon_object_list_reserve(struct list_head *head)
+{
+ struct radeon_object_list *lobj;
+ struct list_head *i;
+ int r;
+
+ list_for_each(i, head) {
+ lobj = list_entry(i, struct radeon_object_list, list);
+ if (!lobj->robj->pin_count) {
+ r = radeon_object_reserve(lobj->robj, true);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("radeon: failed to reserve object.\n");
+ return r;
+ }
+ } else {
+ }
+ }
+ return 0;
+}
+
+void radeon_object_list_unreserve(struct list_head *head)
+{
+ struct radeon_object_list *lobj;
+ struct list_head *i;
+
+ list_for_each(i, head) {
+ lobj = list_entry(i, struct radeon_object_list, list);
+ if (!lobj->robj->pin_count) {
+ radeon_object_unreserve(lobj->robj);
+ } else {
+ }
+ }
+}
+
+int radeon_object_list_validate(struct list_head *head, void *fence)
+{
+ struct radeon_object_list *lobj;
+ struct radeon_object *robj;
+ struct radeon_fence *old_fence = NULL;
+ struct list_head *i;
+ uint32_t flags;
+ int r;
+
+ r = radeon_object_list_reserve(head);
+ if (unlikely(r != 0)) {
+ radeon_object_list_unreserve(head);
+ return r;
+ }
+ list_for_each(i, head) {
+ lobj = list_entry(i, struct radeon_object_list, list);
+ robj = lobj->robj;
+ if (lobj->wdomain) {
+ flags = radeon_object_flags_from_domain(lobj->wdomain);
+ flags |= TTM_PL_FLAG_TT;
+ } else {
+ flags = radeon_object_flags_from_domain(lobj->rdomain);
+ flags |= TTM_PL_FLAG_TT;
+ flags |= TTM_PL_FLAG_VRAM;
+ }
+ if (!robj->pin_count) {
+ robj->tobj.proposed_placement = flags | TTM_PL_MASK_CACHING;
+ r = ttm_buffer_object_validate(&robj->tobj,
+ robj->tobj.proposed_placement,
+ true, false);
+ if (unlikely(r)) {
+ radeon_object_list_unreserve(head);
+ DRM_ERROR("radeon: failed to validate.\n");
+ return r;
+ }
+ radeon_object_gpu_addr(robj);
+ }
+ lobj->gpu_offset = robj->gpu_addr;
+ if (fence) {
+ old_fence = (struct radeon_fence *)robj->tobj.sync_obj;
+ robj->tobj.sync_obj = radeon_fence_ref(fence);
+ robj->tobj.sync_obj_arg = NULL;
+ }
+ if (old_fence) {
+ radeon_fence_unref(&old_fence);
+ }
+ }
+ return 0;
+}
+
+void radeon_object_list_unvalidate(struct list_head *head)
+{
+ struct radeon_object_list *lobj;
+ struct radeon_fence *old_fence = NULL;
+ struct list_head *i;
+
+ list_for_each(i, head) {
+ lobj = list_entry(i, struct radeon_object_list, list);
+ old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj;
+ lobj->robj->tobj.sync_obj = NULL;
+ if (old_fence) {
+ radeon_fence_unref(&old_fence);
+ }
+ }
+ radeon_object_list_unreserve(head);
+}
+
+void radeon_object_list_clean(struct list_head *head)
+{
+ radeon_object_list_unreserve(head);
+}
+
+int radeon_object_fbdev_mmap(struct radeon_object *robj,
+ struct vm_area_struct *vma)
+{
+ return ttm_fbdev_mmap(vma, &robj->tobj);
+}
+
+unsigned long radeon_object_size(struct radeon_object *robj)
+{
+ return robj->tobj.num_pages << PAGE_SHIFT;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
new file mode 100644
index 00000000000..473e4775dc5
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#ifndef __RADEON_OBJECT_H__
+#define __RADEON_OBJECT_H__
+
+#include <ttm/ttm_bo_api.h>
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_placement.h>
+#include <ttm/ttm_module.h>
+
+/*
+ * TTM.
+ */
+struct radeon_mman {
+ struct ttm_global_reference mem_global_ref;
+ bool mem_global_referenced;
+ struct ttm_bo_device bdev;
+};
+
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
new file mode 100644
index 00000000000..6d3d90406a2
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -0,0 +1,3570 @@
+/*
+ * Copyright 2000 ATI Technologies Inc., Markham, Ontario, and
+ * VA Linux Systems Inc., Fremont, California.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL ATI, VA LINUX SYSTEMS AND/OR
+ * THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Authors:
+ * Kevin E. Martin <martin@xfree86.org>
+ * Rickard E. Faith <faith@valinux.com>
+ * Alan Hourihane <alanh@fairlite.demon.co.uk>
+ *
+ * References:
+ *
+ * !!!! FIXME !!!!
+ * RAGE 128 VR/ RAGE 128 GL Register Reference Manual (Technical
+ * Reference Manual P/N RRG-G04100-C Rev. 0.04), ATI Technologies: April
+ * 1999.
+ *
+ * !!!! FIXME !!!!
+ * RAGE 128 Software Development Manual (Technical Reference Manual P/N
+ * SDK-G04000 Rev. 0.01), ATI Technologies: June 1999.
+ *
+ */
+
+/* !!!! FIXME !!!! NOTE: THIS FILE HAS BEEN CONVERTED FROM r128_reg.h
+ * AND CONTAINS REGISTERS AND REGISTER DEFINITIONS THAT ARE NOT CORRECT
+ * ON THE RADEON. A FULL AUDIT OF THIS CODE IS NEEDED! */
+#ifndef _RADEON_REG_H_
+#define _RADEON_REG_H_
+
+#include "r300_reg.h"
+#include "r500_reg.h"
+#include "r600_reg.h"
+
+
+#define RADEON_MC_AGP_LOCATION 0x014c
+#define RADEON_MC_AGP_START_MASK 0x0000FFFF
+#define RADEON_MC_AGP_START_SHIFT 0
+#define RADEON_MC_AGP_TOP_MASK 0xFFFF0000
+#define RADEON_MC_AGP_TOP_SHIFT 16
+#define RADEON_MC_FB_LOCATION 0x0148
+#define RADEON_MC_FB_START_MASK 0x0000FFFF
+#define RADEON_MC_FB_START_SHIFT 0
+#define RADEON_MC_FB_TOP_MASK 0xFFFF0000
+#define RADEON_MC_FB_TOP_SHIFT 16
+#define RADEON_AGP_BASE_2 0x015c /* r200+ only */
+#define RADEON_AGP_BASE 0x0170
+
+#define ATI_DATATYPE_VQ 0
+#define ATI_DATATYPE_CI4 1
+#define ATI_DATATYPE_CI8 2
+#define ATI_DATATYPE_ARGB1555 3
+#define ATI_DATATYPE_RGB565 4
+#define ATI_DATATYPE_RGB888 5
+#define ATI_DATATYPE_ARGB8888 6
+#define ATI_DATATYPE_RGB332 7
+#define ATI_DATATYPE_Y8 8
+#define ATI_DATATYPE_RGB8 9
+#define ATI_DATATYPE_CI16 10
+#define ATI_DATATYPE_VYUY_422 11
+#define ATI_DATATYPE_YVYU_422 12
+#define ATI_DATATYPE_AYUV_444 14
+#define ATI_DATATYPE_ARGB4444 15
+
+ /* Registers for 2D/Video/Overlay */
+#define RADEON_ADAPTER_ID 0x0f2c /* PCI */
+#define RADEON_AGP_BASE 0x0170
+#define RADEON_AGP_CNTL 0x0174
+# define RADEON_AGP_APER_SIZE_256MB (0x00 << 0)
+# define RADEON_AGP_APER_SIZE_128MB (0x20 << 0)
+# define RADEON_AGP_APER_SIZE_64MB (0x30 << 0)
+# define RADEON_AGP_APER_SIZE_32MB (0x38 << 0)
+# define RADEON_AGP_APER_SIZE_16MB (0x3c << 0)
+# define RADEON_AGP_APER_SIZE_8MB (0x3e << 0)
+# define RADEON_AGP_APER_SIZE_4MB (0x3f << 0)
+# define RADEON_AGP_APER_SIZE_MASK (0x3f << 0)
+#define RADEON_STATUS_PCI_CONFIG 0x06
+# define RADEON_CAP_LIST 0x100000
+#define RADEON_CAPABILITIES_PTR_PCI_CONFIG 0x34 /* offset in PCI config*/
+# define RADEON_CAP_PTR_MASK 0xfc /* mask off reserved bits of CAP_PTR */
+# define RADEON_CAP_ID_NULL 0x00 /* End of capability list */
+# define RADEON_CAP_ID_AGP 0x02 /* AGP capability ID */
+# define RADEON_CAP_ID_EXP 0x10 /* PCI Express */
+#define RADEON_AGP_COMMAND 0x0f60 /* PCI */
+#define RADEON_AGP_COMMAND_PCI_CONFIG 0x0060 /* offset in PCI config*/
+# define RADEON_AGP_ENABLE (1<<8)
+#define RADEON_AGP_PLL_CNTL 0x000b /* PLL */
+#define RADEON_AGP_STATUS 0x0f5c /* PCI */
+# define RADEON_AGP_1X_MODE 0x01
+# define RADEON_AGP_2X_MODE 0x02
+# define RADEON_AGP_4X_MODE 0x04
+# define RADEON_AGP_FW_MODE 0x10
+# define RADEON_AGP_MODE_MASK 0x17
+# define RADEON_AGPv3_MODE 0x08
+# define RADEON_AGPv3_4X_MODE 0x01
+# define RADEON_AGPv3_8X_MODE 0x02
+#define RADEON_ATTRDR 0x03c1 /* VGA */
+#define RADEON_ATTRDW 0x03c0 /* VGA */
+#define RADEON_ATTRX 0x03c0 /* VGA */
+#define RADEON_AUX_SC_CNTL 0x1660
+# define RADEON_AUX1_SC_EN (1 << 0)
+# define RADEON_AUX1_SC_MODE_OR (0 << 1)
+# define RADEON_AUX1_SC_MODE_NAND (1 << 1)
+# define RADEON_AUX2_SC_EN (1 << 2)
+# define RADEON_AUX2_SC_MODE_OR (0 << 3)
+# define RADEON_AUX2_SC_MODE_NAND (1 << 3)
+# define RADEON_AUX3_SC_EN (1 << 4)
+# define RADEON_AUX3_SC_MODE_OR (0 << 5)
+# define RADEON_AUX3_SC_MODE_NAND (1 << 5)
+#define RADEON_AUX1_SC_BOTTOM 0x1670
+#define RADEON_AUX1_SC_LEFT 0x1664
+#define RADEON_AUX1_SC_RIGHT 0x1668
+#define RADEON_AUX1_SC_TOP 0x166c
+#define RADEON_AUX2_SC_BOTTOM 0x1680
+#define RADEON_AUX2_SC_LEFT 0x1674
+#define RADEON_AUX2_SC_RIGHT 0x1678
+#define RADEON_AUX2_SC_TOP 0x167c
+#define RADEON_AUX3_SC_BOTTOM 0x1690
+#define RADEON_AUX3_SC_LEFT 0x1684
+#define RADEON_AUX3_SC_RIGHT 0x1688
+#define RADEON_AUX3_SC_TOP 0x168c
+#define RADEON_AUX_WINDOW_HORZ_CNTL 0x02d8
+#define RADEON_AUX_WINDOW_VERT_CNTL 0x02dc
+
+#define RADEON_BASE_CODE 0x0f0b
+#define RADEON_BIOS_0_SCRATCH 0x0010
+# define RADEON_FP_PANEL_SCALABLE (1 << 16)
+# define RADEON_FP_PANEL_SCALE_EN (1 << 17)
+# define RADEON_FP_CHIP_SCALE_EN (1 << 18)
+# define RADEON_DRIVER_BRIGHTNESS_EN (1 << 26)
+# define RADEON_DISPLAY_ROT_MASK (3 << 28)
+# define RADEON_DISPLAY_ROT_00 (0 << 28)
+# define RADEON_DISPLAY_ROT_90 (1 << 28)
+# define RADEON_DISPLAY_ROT_180 (2 << 28)
+# define RADEON_DISPLAY_ROT_270 (3 << 28)
+#define RADEON_BIOS_1_SCRATCH 0x0014
+#define RADEON_BIOS_2_SCRATCH 0x0018
+#define RADEON_BIOS_3_SCRATCH 0x001c
+#define RADEON_BIOS_4_SCRATCH 0x0020
+# define RADEON_CRT1_ATTACHED_MASK (3 << 0)
+# define RADEON_CRT1_ATTACHED_MONO (1 << 0)
+# define RADEON_CRT1_ATTACHED_COLOR (2 << 0)
+# define RADEON_LCD1_ATTACHED (1 << 2)
+# define RADEON_DFP1_ATTACHED (1 << 3)
+# define RADEON_TV1_ATTACHED_MASK (3 << 4)
+# define RADEON_TV1_ATTACHED_COMP (1 << 4)
+# define RADEON_TV1_ATTACHED_SVIDEO (2 << 4)
+# define RADEON_CRT2_ATTACHED_MASK (3 << 8)
+# define RADEON_CRT2_ATTACHED_MONO (1 << 8)
+# define RADEON_CRT2_ATTACHED_COLOR (2 << 8)
+# define RADEON_DFP2_ATTACHED (1 << 11)
+#define RADEON_BIOS_5_SCRATCH 0x0024
+# define RADEON_LCD1_ON (1 << 0)
+# define RADEON_CRT1_ON (1 << 1)
+# define RADEON_TV1_ON (1 << 2)
+# define RADEON_DFP1_ON (1 << 3)
+# define RADEON_CRT2_ON (1 << 5)
+# define RADEON_CV1_ON (1 << 6)
+# define RADEON_DFP2_ON (1 << 7)
+# define RADEON_LCD1_CRTC_MASK (1 << 8)
+# define RADEON_LCD1_CRTC_SHIFT 8
+# define RADEON_CRT1_CRTC_MASK (1 << 9)
+# define RADEON_CRT1_CRTC_SHIFT 9
+# define RADEON_TV1_CRTC_MASK (1 << 10)
+# define RADEON_TV1_CRTC_SHIFT 10
+# define RADEON_DFP1_CRTC_MASK (1 << 11)
+# define RADEON_DFP1_CRTC_SHIFT 11
+# define RADEON_CRT2_CRTC_MASK (1 << 12)
+# define RADEON_CRT2_CRTC_SHIFT 12
+# define RADEON_CV1_CRTC_MASK (1 << 13)
+# define RADEON_CV1_CRTC_SHIFT 13
+# define RADEON_DFP2_CRTC_MASK (1 << 14)
+# define RADEON_DFP2_CRTC_SHIFT 14
+# define RADEON_ACC_REQ_LCD1 (1 << 16)
+# define RADEON_ACC_REQ_CRT1 (1 << 17)
+# define RADEON_ACC_REQ_TV1 (1 << 18)
+# define RADEON_ACC_REQ_DFP1 (1 << 19)
+# define RADEON_ACC_REQ_CRT2 (1 << 21)
+# define RADEON_ACC_REQ_TV2 (1 << 22)
+# define RADEON_ACC_REQ_DFP2 (1 << 23)
+#define RADEON_BIOS_6_SCRATCH 0x0028
+# define RADEON_ACC_MODE_CHANGE (1 << 2)
+# define RADEON_EXT_DESKTOP_MODE (1 << 3)
+# define RADEON_LCD_DPMS_ON (1 << 20)
+# define RADEON_CRT_DPMS_ON (1 << 21)
+# define RADEON_TV_DPMS_ON (1 << 22)
+# define RADEON_DFP_DPMS_ON (1 << 23)
+# define RADEON_DPMS_MASK (3 << 24)
+# define RADEON_DPMS_ON (0 << 24)
+# define RADEON_DPMS_STANDBY (1 << 24)
+# define RADEON_DPMS_SUSPEND (2 << 24)
+# define RADEON_DPMS_OFF (3 << 24)
+# define RADEON_SCREEN_BLANKING (1 << 26)
+# define RADEON_DRIVER_CRITICAL (1 << 27)
+# define RADEON_DISPLAY_SWITCHING_DIS (1 << 30)
+#define RADEON_BIOS_7_SCRATCH 0x002c
+# define RADEON_SYS_HOTKEY (1 << 10)
+# define RADEON_DRV_LOADED (1 << 12)
+#define RADEON_BIOS_ROM 0x0f30 /* PCI */
+#define RADEON_BIST 0x0f0f /* PCI */
+#define RADEON_BRUSH_DATA0 0x1480
+#define RADEON_BRUSH_DATA1 0x1484
+#define RADEON_BRUSH_DATA10 0x14a8
+#define RADEON_BRUSH_DATA11 0x14ac
+#define RADEON_BRUSH_DATA12 0x14b0
+#define RADEON_BRUSH_DATA13 0x14b4
+#define RADEON_BRUSH_DATA14 0x14b8
+#define RADEON_BRUSH_DATA15 0x14bc
+#define RADEON_BRUSH_DATA16 0x14c0
+#define RADEON_BRUSH_DATA17 0x14c4
+#define RADEON_BRUSH_DATA18 0x14c8
+#define RADEON_BRUSH_DATA19 0x14cc
+#define RADEON_BRUSH_DATA2 0x1488
+#define RADEON_BRUSH_DATA20 0x14d0
+#define RADEON_BRUSH_DATA21 0x14d4
+#define RADEON_BRUSH_DATA22 0x14d8
+#define RADEON_BRUSH_DATA23 0x14dc
+#define RADEON_BRUSH_DATA24 0x14e0
+#define RADEON_BRUSH_DATA25 0x14e4
+#define RADEON_BRUSH_DATA26 0x14e8
+#define RADEON_BRUSH_DATA27 0x14ec
+#define RADEON_BRUSH_DATA28 0x14f0
+#define RADEON_BRUSH_DATA29 0x14f4
+#define RADEON_BRUSH_DATA3 0x148c
+#define RADEON_BRUSH_DATA30 0x14f8
+#define RADEON_BRUSH_DATA31 0x14fc
+#define RADEON_BRUSH_DATA32 0x1500
+#define RADEON_BRUSH_DATA33 0x1504
+#define RADEON_BRUSH_DATA34 0x1508
+#define RADEON_BRUSH_DATA35 0x150c
+#define RADEON_BRUSH_DATA36 0x1510
+#define RADEON_BRUSH_DATA37 0x1514
+#define RADEON_BRUSH_DATA38 0x1518
+#define RADEON_BRUSH_DATA39 0x151c
+#define RADEON_BRUSH_DATA4 0x1490
+#define RADEON_BRUSH_DATA40 0x1520
+#define RADEON_BRUSH_DATA41 0x1524
+#define RADEON_BRUSH_DATA42 0x1528
+#define RADEON_BRUSH_DATA43 0x152c
+#define RADEON_BRUSH_DATA44 0x1530
+#define RADEON_BRUSH_DATA45 0x1534
+#define RADEON_BRUSH_DATA46 0x1538
+#define RADEON_BRUSH_DATA47 0x153c
+#define RADEON_BRUSH_DATA48 0x1540
+#define RADEON_BRUSH_DATA49 0x1544
+#define RADEON_BRUSH_DATA5 0x1494
+#define RADEON_BRUSH_DATA50 0x1548
+#define RADEON_BRUSH_DATA51 0x154c
+#define RADEON_BRUSH_DATA52 0x1550
+#define RADEON_BRUSH_DATA53 0x1554
+#define RADEON_BRUSH_DATA54 0x1558
+#define RADEON_BRUSH_DATA55 0x155c
+#define RADEON_BRUSH_DATA56 0x1560
+#define RADEON_BRUSH_DATA57 0x1564
+#define RADEON_BRUSH_DATA58 0x1568
+#define RADEON_BRUSH_DATA59 0x156c
+#define RADEON_BRUSH_DATA6 0x1498
+#define RADEON_BRUSH_DATA60 0x1570
+#define RADEON_BRUSH_DATA61 0x1574
+#define RADEON_BRUSH_DATA62 0x1578
+#define RADEON_BRUSH_DATA63 0x157c
+#define RADEON_BRUSH_DATA7 0x149c
+#define RADEON_BRUSH_DATA8 0x14a0
+#define RADEON_BRUSH_DATA9 0x14a4
+#define RADEON_BRUSH_SCALE 0x1470
+#define RADEON_BRUSH_Y_X 0x1474
+#define RADEON_BUS_CNTL 0x0030
+# define RADEON_BUS_MASTER_DIS (1 << 6)
+# define RADEON_BUS_BIOS_DIS_ROM (1 << 12)
+# define RADEON_BUS_RD_DISCARD_EN (1 << 24)
+# define RADEON_BUS_RD_ABORT_EN (1 << 25)
+# define RADEON_BUS_MSTR_DISCONNECT_EN (1 << 28)
+# define RADEON_BUS_WRT_BURST (1 << 29)
+# define RADEON_BUS_READ_BURST (1 << 30)
+#define RADEON_BUS_CNTL1 0x0034
+# define RADEON_BUS_WAIT_ON_LOCK_EN (1 << 4)
+
+/* #define RADEON_PCIE_INDEX 0x0030 */
+/* #define RADEON_PCIE_DATA 0x0034 */
+#define RADEON_PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE */
+# define RADEON_PCIE_LC_LINK_WIDTH_SHIFT 0
+# define RADEON_PCIE_LC_LINK_WIDTH_MASK 0x7
+# define RADEON_PCIE_LC_LINK_WIDTH_X0 0
+# define RADEON_PCIE_LC_LINK_WIDTH_X1 1
+# define RADEON_PCIE_LC_LINK_WIDTH_X2 2
+# define RADEON_PCIE_LC_LINK_WIDTH_X4 3
+# define RADEON_PCIE_LC_LINK_WIDTH_X8 4
+# define RADEON_PCIE_LC_LINK_WIDTH_X12 5
+# define RADEON_PCIE_LC_LINK_WIDTH_X16 6
+# define RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT 4
+# define RADEON_PCIE_LC_LINK_WIDTH_RD_MASK 0x70
+# define RADEON_PCIE_LC_RECONFIG_NOW (1 << 8)
+# define RADEON_PCIE_LC_RECONFIG_LATER (1 << 9)
+# define RADEON_PCIE_LC_SHORT_RECONFIG_EN (1 << 10)
+
+#define RADEON_CACHE_CNTL 0x1724
+#define RADEON_CACHE_LINE 0x0f0c /* PCI */
+#define RADEON_CAPABILITIES_ID 0x0f50 /* PCI */
+#define RADEON_CAPABILITIES_PTR 0x0f34 /* PCI */
+#define RADEON_CLK_PIN_CNTL 0x0001 /* PLL */
+# define RADEON_DONT_USE_XTALIN (1 << 4)
+# define RADEON_SCLK_DYN_START_CNTL (1 << 15)
+#define RADEON_CLOCK_CNTL_DATA 0x000c
+#define RADEON_CLOCK_CNTL_INDEX 0x0008
+# define RADEON_PLL_WR_EN (1 << 7)
+# define RADEON_PLL_DIV_SEL (3 << 8)
+# define RADEON_PLL2_DIV_SEL_MASK (~(3 << 8))
+#define RADEON_CLK_PWRMGT_CNTL 0x0014
+# define RADEON_ENGIN_DYNCLK_MODE (1 << 12)
+# define RADEON_ACTIVE_HILO_LAT_MASK (3 << 13)
+# define RADEON_ACTIVE_HILO_LAT_SHIFT 13
+# define RADEON_DISP_DYN_STOP_LAT_MASK (1 << 12)
+# define RADEON_MC_BUSY (1 << 16)
+# define RADEON_DLL_READY (1 << 19)
+# define RADEON_CG_NO1_DEBUG_0 (1 << 24)
+# define RADEON_CG_NO1_DEBUG_MASK (0x1f << 24)
+# define RADEON_DYN_STOP_MODE_MASK (7 << 21)
+# define RADEON_TVPLL_PWRMGT_OFF (1 << 30)
+# define RADEON_TVCLK_TURNOFF (1 << 31)
+#define RADEON_PLL_PWRMGT_CNTL 0x0015 /* PLL */
+# define RADEON_TCL_BYPASS_DISABLE (1 << 20)
+#define RADEON_CLR_CMP_CLR_3D 0x1a24
+#define RADEON_CLR_CMP_CLR_DST 0x15c8
+#define RADEON_CLR_CMP_CLR_SRC 0x15c4
+#define RADEON_CLR_CMP_CNTL 0x15c0
+# define RADEON_SRC_CMP_EQ_COLOR (4 << 0)
+# define RADEON_SRC_CMP_NEQ_COLOR (5 << 0)
+# define RADEON_CLR_CMP_SRC_SOURCE (1 << 24)
+#define RADEON_CLR_CMP_MASK 0x15cc
+# define RADEON_CLR_CMP_MSK 0xffffffff
+#define RADEON_CLR_CMP_MASK_3D 0x1A28
+#define RADEON_COMMAND 0x0f04 /* PCI */
+#define RADEON_COMPOSITE_SHADOW_ID 0x1a0c
+#define RADEON_CONFIG_APER_0_BASE 0x0100
+#define RADEON_CONFIG_APER_1_BASE 0x0104
+#define RADEON_CONFIG_APER_SIZE 0x0108
+#define RADEON_CONFIG_BONDS 0x00e8
+#define RADEON_CONFIG_CNTL 0x00e0
+# define RADEON_CFG_ATI_REV_A11 (0 << 16)
+# define RADEON_CFG_ATI_REV_A12 (1 << 16)
+# define RADEON_CFG_ATI_REV_A13 (2 << 16)
+# define RADEON_CFG_ATI_REV_ID_MASK (0xf << 16)
+#define RADEON_CONFIG_MEMSIZE 0x00f8
+#define RADEON_CONFIG_MEMSIZE_EMBEDDED 0x0114
+#define RADEON_CONFIG_REG_1_BASE 0x010c
+#define RADEON_CONFIG_REG_APER_SIZE 0x0110
+#define RADEON_CONFIG_XSTRAP 0x00e4
+#define RADEON_CONSTANT_COLOR_C 0x1d34
+# define RADEON_CONSTANT_COLOR_MASK 0x00ffffff
+# define RADEON_CONSTANT_COLOR_ONE 0x00ffffff
+# define RADEON_CONSTANT_COLOR_ZERO 0x00000000
+#define RADEON_CRC_CMDFIFO_ADDR 0x0740
+#define RADEON_CRC_CMDFIFO_DOUT 0x0744
+#define RADEON_GRPH_BUFFER_CNTL 0x02f0
+# define RADEON_GRPH_START_REQ_MASK (0x7f)
+# define RADEON_GRPH_START_REQ_SHIFT 0
+# define RADEON_GRPH_STOP_REQ_MASK (0x7f<<8)
+# define RADEON_GRPH_STOP_REQ_SHIFT 8
+# define RADEON_GRPH_CRITICAL_POINT_MASK (0x7f<<16)
+# define RADEON_GRPH_CRITICAL_POINT_SHIFT 16
+# define RADEON_GRPH_CRITICAL_CNTL (1<<28)
+# define RADEON_GRPH_BUFFER_SIZE (1<<29)
+# define RADEON_GRPH_CRITICAL_AT_SOF (1<<30)
+# define RADEON_GRPH_STOP_CNTL (1<<31)
+#define RADEON_GRPH2_BUFFER_CNTL 0x03f0
+# define RADEON_GRPH2_START_REQ_MASK (0x7f)
+# define RADEON_GRPH2_START_REQ_SHIFT 0
+# define RADEON_GRPH2_STOP_REQ_MASK (0x7f<<8)
+# define RADEON_GRPH2_STOP_REQ_SHIFT 8
+# define RADEON_GRPH2_CRITICAL_POINT_MASK (0x7f<<16)
+# define RADEON_GRPH2_CRITICAL_POINT_SHIFT 16
+# define RADEON_GRPH2_CRITICAL_CNTL (1<<28)
+# define RADEON_GRPH2_BUFFER_SIZE (1<<29)
+# define RADEON_GRPH2_CRITICAL_AT_SOF (1<<30)
+# define RADEON_GRPH2_STOP_CNTL (1<<31)
+#define RADEON_CRTC_CRNT_FRAME 0x0214
+#define RADEON_CRTC_EXT_CNTL 0x0054
+# define RADEON_CRTC_VGA_XOVERSCAN (1 << 0)
+# define RADEON_VGA_ATI_LINEAR (1 << 3)
+# define RADEON_XCRT_CNT_EN (1 << 6)
+# define RADEON_CRTC_HSYNC_DIS (1 << 8)
+# define RADEON_CRTC_VSYNC_DIS (1 << 9)
+# define RADEON_CRTC_DISPLAY_DIS (1 << 10)
+# define RADEON_CRTC_SYNC_TRISTAT (1 << 11)
+# define RADEON_CRTC_CRT_ON (1 << 15)
+#define RADEON_CRTC_EXT_CNTL_DPMS_BYTE 0x0055
+# define RADEON_CRTC_HSYNC_DIS_BYTE (1 << 0)
+# define RADEON_CRTC_VSYNC_DIS_BYTE (1 << 1)
+# define RADEON_CRTC_DISPLAY_DIS_BYTE (1 << 2)
+#define RADEON_CRTC_GEN_CNTL 0x0050
+# define RADEON_CRTC_DBL_SCAN_EN (1 << 0)
+# define RADEON_CRTC_INTERLACE_EN (1 << 1)
+# define RADEON_CRTC_CSYNC_EN (1 << 4)
+# define RADEON_CRTC_ICON_EN (1 << 15)
+# define RADEON_CRTC_CUR_EN (1 << 16)
+# define RADEON_CRTC_CUR_MODE_MASK (7 << 20)
+# define RADEON_CRTC_CUR_MODE_SHIFT 20
+# define RADEON_CRTC_CUR_MODE_MONO 0
+# define RADEON_CRTC_CUR_MODE_24BPP 2
+# define RADEON_CRTC_EXT_DISP_EN (1 << 24)
+# define RADEON_CRTC_EN (1 << 25)
+# define RADEON_CRTC_DISP_REQ_EN_B (1 << 26)
+#define RADEON_CRTC2_GEN_CNTL 0x03f8
+# define RADEON_CRTC2_DBL_SCAN_EN (1 << 0)
+# define RADEON_CRTC2_INTERLACE_EN (1 << 1)
+# define RADEON_CRTC2_SYNC_TRISTAT (1 << 4)
+# define RADEON_CRTC2_HSYNC_TRISTAT (1 << 5)
+# define RADEON_CRTC2_VSYNC_TRISTAT (1 << 6)
+# define RADEON_CRTC2_CRT2_ON (1 << 7)
+# define RADEON_CRTC2_PIX_WIDTH_SHIFT 8
+# define RADEON_CRTC2_PIX_WIDTH_MASK (0xf << 8)
+# define RADEON_CRTC2_ICON_EN (1 << 15)
+# define RADEON_CRTC2_CUR_EN (1 << 16)
+# define RADEON_CRTC2_CUR_MODE_MASK (7 << 20)
+# define RADEON_CRTC2_DISP_DIS (1 << 23)
+# define RADEON_CRTC2_EN (1 << 25)
+# define RADEON_CRTC2_DISP_REQ_EN_B (1 << 26)
+# define RADEON_CRTC2_CSYNC_EN (1 << 27)
+# define RADEON_CRTC2_HSYNC_DIS (1 << 28)
+# define RADEON_CRTC2_VSYNC_DIS (1 << 29)
+#define RADEON_CRTC_MORE_CNTL 0x27c
+# define RADEON_CRTC_AUTO_HORZ_CENTER_EN (1<<2)
+# define RADEON_CRTC_AUTO_VERT_CENTER_EN (1<<3)
+# define RADEON_CRTC_H_CUTOFF_ACTIVE_EN (1<<4)
+# define RADEON_CRTC_V_CUTOFF_ACTIVE_EN (1<<5)
+#define RADEON_CRTC_GUI_TRIG_VLINE 0x0218
+#define RADEON_CRTC_H_SYNC_STRT_WID 0x0204
+# define RADEON_CRTC_H_SYNC_STRT_PIX (0x07 << 0)
+# define RADEON_CRTC_H_SYNC_STRT_CHAR (0x3ff << 3)
+# define RADEON_CRTC_H_SYNC_STRT_CHAR_SHIFT 3
+# define RADEON_CRTC_H_SYNC_WID (0x3f << 16)
+# define RADEON_CRTC_H_SYNC_WID_SHIFT 16
+# define RADEON_CRTC_H_SYNC_POL (1 << 23)
+#define RADEON_CRTC2_H_SYNC_STRT_WID 0x0304
+# define RADEON_CRTC2_H_SYNC_STRT_PIX (0x07 << 0)
+# define RADEON_CRTC2_H_SYNC_STRT_CHAR (0x3ff << 3)
+# define RADEON_CRTC2_H_SYNC_STRT_CHAR_SHIFT 3
+# define RADEON_CRTC2_H_SYNC_WID (0x3f << 16)
+# define RADEON_CRTC2_H_SYNC_WID_SHIFT 16
+# define RADEON_CRTC2_H_SYNC_POL (1 << 23)
+#define RADEON_CRTC_H_TOTAL_DISP 0x0200
+# define RADEON_CRTC_H_TOTAL (0x03ff << 0)
+# define RADEON_CRTC_H_TOTAL_SHIFT 0
+# define RADEON_CRTC_H_DISP (0x01ff << 16)
+# define RADEON_CRTC_H_DISP_SHIFT 16
+#define RADEON_CRTC2_H_TOTAL_DISP 0x0300
+# define RADEON_CRTC2_H_TOTAL (0x03ff << 0)
+# define RADEON_CRTC2_H_TOTAL_SHIFT 0
+# define RADEON_CRTC2_H_DISP (0x01ff << 16)
+# define RADEON_CRTC2_H_DISP_SHIFT 16
+
+#define RADEON_CRTC_OFFSET_RIGHT 0x0220
+#define RADEON_CRTC_OFFSET 0x0224
+# define RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET (1<<30)
+# define RADEON_CRTC_OFFSET__OFFSET_LOCK (1<<31)
+
+#define RADEON_CRTC2_OFFSET 0x0324
+# define RADEON_CRTC2_OFFSET__GUI_TRIG_OFFSET (1<<30)
+# define RADEON_CRTC2_OFFSET__OFFSET_LOCK (1<<31)
+#define RADEON_CRTC_OFFSET_CNTL 0x0228
+# define RADEON_CRTC_TILE_LINE_SHIFT 0
+# define RADEON_CRTC_TILE_LINE_RIGHT_SHIFT 4
+# define R300_CRTC_X_Y_MODE_EN_RIGHT (1 << 6)
+# define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_MASK (3 << 7)
+# define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_AUTO (0 << 7)
+# define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_SINGLE (1 << 7)
+# define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_DOUBLE (2 << 7)
+# define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_DIS (3 << 7)
+# define R300_CRTC_X_Y_MODE_EN (1 << 9)
+# define R300_CRTC_MICRO_TILE_BUFFER_MASK (3 << 10)
+# define R300_CRTC_MICRO_TILE_BUFFER_AUTO (0 << 10)
+# define R300_CRTC_MICRO_TILE_BUFFER_SINGLE (1 << 10)
+# define R300_CRTC_MICRO_TILE_BUFFER_DOUBLE (2 << 10)
+# define R300_CRTC_MICRO_TILE_BUFFER_DIS (3 << 10)
+# define R300_CRTC_MICRO_TILE_EN_RIGHT (1 << 12)
+# define R300_CRTC_MICRO_TILE_EN (1 << 13)
+# define R300_CRTC_MACRO_TILE_EN_RIGHT (1 << 14)
+# define R300_CRTC_MACRO_TILE_EN (1 << 15)
+# define RADEON_CRTC_TILE_EN_RIGHT (1 << 14)
+# define RADEON_CRTC_TILE_EN (1 << 15)
+# define RADEON_CRTC_OFFSET_FLIP_CNTL (1 << 16)
+# define RADEON_CRTC_STEREO_OFFSET_EN (1 << 17)
+
+#define R300_CRTC_TILE_X0_Y0 0x0350
+#define R300_CRTC2_TILE_X0_Y0 0x0358
+
+#define RADEON_CRTC2_OFFSET_CNTL 0x0328
+# define RADEON_CRTC2_OFFSET_FLIP_CNTL (1 << 16)
+# define RADEON_CRTC2_TILE_EN (1 << 15)
+#define RADEON_CRTC_PITCH 0x022c
+# define RADEON_CRTC_PITCH__SHIFT 0
+# define RADEON_CRTC_PITCH__RIGHT_SHIFT 16
+
+#define RADEON_CRTC2_PITCH 0x032c
+#define RADEON_CRTC_STATUS 0x005c
+# define RADEON_CRTC_VBLANK_SAVE (1 << 1)
+# define RADEON_CRTC_VBLANK_SAVE_CLEAR (1 << 1)
+#define RADEON_CRTC2_STATUS 0x03fc
+# define RADEON_CRTC2_VBLANK_SAVE (1 << 1)
+# define RADEON_CRTC2_VBLANK_SAVE_CLEAR (1 << 1)
+#define RADEON_CRTC_V_SYNC_STRT_WID 0x020c
+# define RADEON_CRTC_V_SYNC_STRT (0x7ff << 0)
+# define RADEON_CRTC_V_SYNC_STRT_SHIFT 0
+# define RADEON_CRTC_V_SYNC_WID (0x1f << 16)
+# define RADEON_CRTC_V_SYNC_WID_SHIFT 16
+# define RADEON_CRTC_V_SYNC_POL (1 << 23)
+#define RADEON_CRTC2_V_SYNC_STRT_WID 0x030c
+# define RADEON_CRTC2_V_SYNC_STRT (0x7ff << 0)
+# define RADEON_CRTC2_V_SYNC_STRT_SHIFT 0
+# define RADEON_CRTC2_V_SYNC_WID (0x1f << 16)
+# define RADEON_CRTC2_V_SYNC_WID_SHIFT 16
+# define RADEON_CRTC2_V_SYNC_POL (1 << 23)
+#define RADEON_CRTC_V_TOTAL_DISP 0x0208
+# define RADEON_CRTC_V_TOTAL (0x07ff << 0)
+# define RADEON_CRTC_V_TOTAL_SHIFT 0
+# define RADEON_CRTC_V_DISP (0x07ff << 16)
+# define RADEON_CRTC_V_DISP_SHIFT 16
+#define RADEON_CRTC2_V_TOTAL_DISP 0x0308
+# define RADEON_CRTC2_V_TOTAL (0x07ff << 0)
+# define RADEON_CRTC2_V_TOTAL_SHIFT 0
+# define RADEON_CRTC2_V_DISP (0x07ff << 16)
+# define RADEON_CRTC2_V_DISP_SHIFT 16
+#define RADEON_CRTC_VLINE_CRNT_VLINE 0x0210
+# define RADEON_CRTC_CRNT_VLINE_MASK (0x7ff << 16)
+#define RADEON_CRTC2_CRNT_FRAME 0x0314
+#define RADEON_CRTC2_GUI_TRIG_VLINE 0x0318
+#define RADEON_CRTC2_STATUS 0x03fc
+#define RADEON_CRTC2_VLINE_CRNT_VLINE 0x0310
+#define RADEON_CRTC8_DATA 0x03d5 /* VGA, 0x3b5 */
+#define RADEON_CRTC8_IDX 0x03d4 /* VGA, 0x3b4 */
+#define RADEON_CUR_CLR0 0x026c
+#define RADEON_CUR_CLR1 0x0270
+#define RADEON_CUR_HORZ_VERT_OFF 0x0268
+#define RADEON_CUR_HORZ_VERT_POSN 0x0264
+#define RADEON_CUR_OFFSET 0x0260
+# define RADEON_CUR_LOCK (1 << 31)
+#define RADEON_CUR2_CLR0 0x036c
+#define RADEON_CUR2_CLR1 0x0370
+#define RADEON_CUR2_HORZ_VERT_OFF 0x0368
+#define RADEON_CUR2_HORZ_VERT_POSN 0x0364
+#define RADEON_CUR2_OFFSET 0x0360
+# define RADEON_CUR2_LOCK (1 << 31)
+
+#define RADEON_DAC_CNTL 0x0058
+# define RADEON_DAC_RANGE_CNTL (3 << 0)
+# define RADEON_DAC_RANGE_CNTL_PS2 (2 << 0)
+# define RADEON_DAC_RANGE_CNTL_MASK 0x03
+# define RADEON_DAC_BLANKING (1 << 2)
+# define RADEON_DAC_CMP_EN (1 << 3)
+# define RADEON_DAC_CMP_OUTPUT (1 << 7)
+# define RADEON_DAC_8BIT_EN (1 << 8)
+# define RADEON_DAC_TVO_EN (1 << 10)
+# define RADEON_DAC_VGA_ADR_EN (1 << 13)
+# define RADEON_DAC_PDWN (1 << 15)
+# define RADEON_DAC_MASK_ALL (0xff << 24)
+#define RADEON_DAC_CNTL2 0x007c
+# define RADEON_DAC2_TV_CLK_SEL (0 << 1)
+# define RADEON_DAC2_DAC_CLK_SEL (1 << 0)
+# define RADEON_DAC2_DAC2_CLK_SEL (1 << 1)
+# define RADEON_DAC2_PALETTE_ACC_CTL (1 << 5)
+# define RADEON_DAC2_CMP_EN (1 << 7)
+# define RADEON_DAC2_CMP_OUT_R (1 << 8)
+# define RADEON_DAC2_CMP_OUT_G (1 << 9)
+# define RADEON_DAC2_CMP_OUT_B (1 << 10)
+# define RADEON_DAC2_CMP_OUTPUT (1 << 11)
+#define RADEON_DAC_EXT_CNTL 0x0280
+# define RADEON_DAC2_FORCE_BLANK_OFF_EN (1 << 0)
+# define RADEON_DAC2_FORCE_DATA_EN (1 << 1)
+# define RADEON_DAC_FORCE_BLANK_OFF_EN (1 << 4)
+# define RADEON_DAC_FORCE_DATA_EN (1 << 5)
+# define RADEON_DAC_FORCE_DATA_SEL_MASK (3 << 6)
+# define RADEON_DAC_FORCE_DATA_SEL_R (0 << 6)
+# define RADEON_DAC_FORCE_DATA_SEL_G (1 << 6)
+# define RADEON_DAC_FORCE_DATA_SEL_B (2 << 6)
+# define RADEON_DAC_FORCE_DATA_SEL_RGB (3 << 6)
+# define RADEON_DAC_FORCE_DATA_MASK 0x0003ff00
+# define RADEON_DAC_FORCE_DATA_SHIFT 8
+#define RADEON_DAC_MACRO_CNTL 0x0d04
+# define RADEON_DAC_PDWN_R (1 << 16)
+# define RADEON_DAC_PDWN_G (1 << 17)
+# define RADEON_DAC_PDWN_B (1 << 18)
+#define RADEON_DISP_PWR_MAN 0x0d08
+# define RADEON_DISP_PWR_MAN_D3_CRTC_EN (1 << 0)
+# define RADEON_DISP_PWR_MAN_D3_CRTC2_EN (1 << 4)
+# define RADEON_DISP_PWR_MAN_DPMS_ON (0 << 8)
+# define RADEON_DISP_PWR_MAN_DPMS_STANDBY (1 << 8)
+# define RADEON_DISP_PWR_MAN_DPMS_SUSPEND (2 << 8)
+# define RADEON_DISP_PWR_MAN_DPMS_OFF (3 << 8)
+# define RADEON_DISP_D3_RST (1 << 16)
+# define RADEON_DISP_D3_REG_RST (1 << 17)
+# define RADEON_DISP_D3_GRPH_RST (1 << 18)
+# define RADEON_DISP_D3_SUBPIC_RST (1 << 19)
+# define RADEON_DISP_D3_OV0_RST (1 << 20)
+# define RADEON_DISP_D1D2_GRPH_RST (1 << 21)
+# define RADEON_DISP_D1D2_SUBPIC_RST (1 << 22)
+# define RADEON_DISP_D1D2_OV0_RST (1 << 23)
+# define RADEON_DIG_TMDS_ENABLE_RST (1 << 24)
+# define RADEON_TV_ENABLE_RST (1 << 25)
+# define RADEON_AUTO_PWRUP_EN (1 << 26)
+#define RADEON_TV_DAC_CNTL 0x088c
+# define RADEON_TV_DAC_NBLANK (1 << 0)
+# define RADEON_TV_DAC_NHOLD (1 << 1)
+# define RADEON_TV_DAC_PEDESTAL (1 << 2)
+# define RADEON_TV_MONITOR_DETECT_EN (1 << 4)
+# define RADEON_TV_DAC_CMPOUT (1 << 5)
+# define RADEON_TV_DAC_STD_MASK (3 << 8)
+# define RADEON_TV_DAC_STD_PAL (0 << 8)
+# define RADEON_TV_DAC_STD_NTSC (1 << 8)
+# define RADEON_TV_DAC_STD_PS2 (2 << 8)
+# define RADEON_TV_DAC_STD_RS343 (3 << 8)
+# define RADEON_TV_DAC_BGSLEEP (1 << 6)
+# define RADEON_TV_DAC_BGADJ_MASK (0xf << 16)
+# define RADEON_TV_DAC_BGADJ_SHIFT 16
+# define RADEON_TV_DAC_DACADJ_MASK (0xf << 20)
+# define RADEON_TV_DAC_DACADJ_SHIFT 20
+# define RADEON_TV_DAC_RDACPD (1 << 24)
+# define RADEON_TV_DAC_GDACPD (1 << 25)
+# define RADEON_TV_DAC_BDACPD (1 << 26)
+# define RADEON_TV_DAC_RDACDET (1 << 29)
+# define RADEON_TV_DAC_GDACDET (1 << 30)
+# define RADEON_TV_DAC_BDACDET (1 << 31)
+# define R420_TV_DAC_DACADJ_MASK (0x1f << 20)
+# define R420_TV_DAC_RDACPD (1 << 25)
+# define R420_TV_DAC_GDACPD (1 << 26)
+# define R420_TV_DAC_BDACPD (1 << 27)
+# define R420_TV_DAC_TVENABLE (1 << 28)
+#define RADEON_DISP_HW_DEBUG 0x0d14
+# define RADEON_CRT2_DISP1_SEL (1 << 5)
+#define RADEON_DISP_OUTPUT_CNTL 0x0d64
+# define RADEON_DISP_DAC_SOURCE_MASK 0x03
+# define RADEON_DISP_DAC2_SOURCE_MASK 0x0c
+# define RADEON_DISP_DAC_SOURCE_CRTC2 0x01
+# define RADEON_DISP_DAC_SOURCE_RMX 0x02
+# define RADEON_DISP_DAC_SOURCE_LTU 0x03
+# define RADEON_DISP_DAC2_SOURCE_CRTC2 0x04
+# define RADEON_DISP_TVDAC_SOURCE_MASK (0x03 << 2)
+# define RADEON_DISP_TVDAC_SOURCE_CRTC 0x0
+# define RADEON_DISP_TVDAC_SOURCE_CRTC2 (0x01 << 2)
+# define RADEON_DISP_TVDAC_SOURCE_RMX (0x02 << 2)
+# define RADEON_DISP_TVDAC_SOURCE_LTU (0x03 << 2)
+# define RADEON_DISP_TRANS_MATRIX_MASK (0x03 << 4)
+# define RADEON_DISP_TRANS_MATRIX_ALPHA_MSB (0x00 << 4)
+# define RADEON_DISP_TRANS_MATRIX_GRAPHICS (0x01 << 4)
+# define RADEON_DISP_TRANS_MATRIX_VIDEO (0x02 << 4)
+# define RADEON_DISP_TV_SOURCE_CRTC (1 << 16) /* crtc1 or crtc2 */
+# define RADEON_DISP_TV_SOURCE_LTU (0 << 16) /* linear transform unit */
+#define RADEON_DISP_TV_OUT_CNTL 0x0d6c
+# define RADEON_DISP_TV_PATH_SRC_CRTC2 (1 << 16)
+# define RADEON_DISP_TV_PATH_SRC_CRTC1 (0 << 16)
+#define RADEON_DAC_CRC_SIG 0x02cc
+#define RADEON_DAC_DATA 0x03c9 /* VGA */
+#define RADEON_DAC_MASK 0x03c6 /* VGA */
+#define RADEON_DAC_R_INDEX 0x03c7 /* VGA */
+#define RADEON_DAC_W_INDEX 0x03c8 /* VGA */
+#define RADEON_DDA_CONFIG 0x02e0
+#define RADEON_DDA_ON_OFF 0x02e4
+#define RADEON_DEFAULT_OFFSET 0x16e0
+#define RADEON_DEFAULT_PITCH 0x16e4
+#define RADEON_DEFAULT_SC_BOTTOM_RIGHT 0x16e8
+# define RADEON_DEFAULT_SC_RIGHT_MAX (0x1fff << 0)
+# define RADEON_DEFAULT_SC_BOTTOM_MAX (0x1fff << 16)
+#define RADEON_DESTINATION_3D_CLR_CMP_VAL 0x1820
+#define RADEON_DESTINATION_3D_CLR_CMP_MSK 0x1824
+#define RADEON_DEVICE_ID 0x0f02 /* PCI */
+#define RADEON_DISP_MISC_CNTL 0x0d00
+# define RADEON_SOFT_RESET_GRPH_PP (1 << 0)
+#define RADEON_DISP_MERGE_CNTL 0x0d60
+# define RADEON_DISP_ALPHA_MODE_MASK 0x03
+# define RADEON_DISP_ALPHA_MODE_KEY 0
+# define RADEON_DISP_ALPHA_MODE_PER_PIXEL 1
+# define RADEON_DISP_ALPHA_MODE_GLOBAL 2
+# define RADEON_DISP_RGB_OFFSET_EN (1 << 8)
+# define RADEON_DISP_GRPH_ALPHA_MASK (0xff << 16)
+# define RADEON_DISP_OV0_ALPHA_MASK (0xff << 24)
+# define RADEON_DISP_LIN_TRANS_BYPASS (0x01 << 9)
+#define RADEON_DISP2_MERGE_CNTL 0x0d68
+# define RADEON_DISP2_RGB_OFFSET_EN (1 << 8)
+#define RADEON_DISP_LIN_TRANS_GRPH_A 0x0d80
+#define RADEON_DISP_LIN_TRANS_GRPH_B 0x0d84
+#define RADEON_DISP_LIN_TRANS_GRPH_C 0x0d88
+#define RADEON_DISP_LIN_TRANS_GRPH_D 0x0d8c
+#define RADEON_DISP_LIN_TRANS_GRPH_E 0x0d90
+#define RADEON_DISP_LIN_TRANS_GRPH_F 0x0d98
+#define RADEON_DP_BRUSH_BKGD_CLR 0x1478
+#define RADEON_DP_BRUSH_FRGD_CLR 0x147c
+#define RADEON_DP_CNTL 0x16c0
+# define RADEON_DST_X_LEFT_TO_RIGHT (1 << 0)
+# define RADEON_DST_Y_TOP_TO_BOTTOM (1 << 1)
+# define RADEON_DP_DST_TILE_LINEAR (0 << 3)
+# define RADEON_DP_DST_TILE_MACRO (1 << 3)
+# define RADEON_DP_DST_TILE_MICRO (2 << 3)
+# define RADEON_DP_DST_TILE_BOTH (3 << 3)
+#define RADEON_DP_CNTL_XDIR_YDIR_YMAJOR 0x16d0
+# define RADEON_DST_Y_MAJOR (1 << 2)
+# define RADEON_DST_Y_DIR_TOP_TO_BOTTOM (1 << 15)
+# define RADEON_DST_X_DIR_LEFT_TO_RIGHT (1 << 31)
+#define RADEON_DP_DATATYPE 0x16c4
+# define RADEON_HOST_BIG_ENDIAN_EN (1 << 29)
+#define RADEON_DP_GUI_MASTER_CNTL 0x146c
+# define RADEON_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0)
+# define RADEON_GMC_DST_PITCH_OFFSET_CNTL (1 << 1)
+# define RADEON_GMC_SRC_CLIPPING (1 << 2)
+# define RADEON_GMC_DST_CLIPPING (1 << 3)
+# define RADEON_GMC_BRUSH_DATATYPE_MASK (0x0f << 4)
+# define RADEON_GMC_BRUSH_8X8_MONO_FG_BG (0 << 4)
+# define RADEON_GMC_BRUSH_8X8_MONO_FG_LA (1 << 4)
+# define RADEON_GMC_BRUSH_1X8_MONO_FG_BG (4 << 4)
+# define RADEON_GMC_BRUSH_1X8_MONO_FG_LA (5 << 4)
+# define RADEON_GMC_BRUSH_32x1_MONO_FG_BG (6 << 4)
+# define RADEON_GMC_BRUSH_32x1_MONO_FG_LA (7 << 4)
+# define RADEON_GMC_BRUSH_32x32_MONO_FG_BG (8 << 4)
+# define RADEON_GMC_BRUSH_32x32_MONO_FG_LA (9 << 4)
+# define RADEON_GMC_BRUSH_8x8_COLOR (10 << 4)
+# define RADEON_GMC_BRUSH_1X8_COLOR (12 << 4)
+# define RADEON_GMC_BRUSH_SOLID_COLOR (13 << 4)
+# define RADEON_GMC_BRUSH_NONE (15 << 4)
+# define RADEON_GMC_DST_8BPP_CI (2 << 8)
+# define RADEON_GMC_DST_15BPP (3 << 8)
+# define RADEON_GMC_DST_16BPP (4 << 8)
+# define RADEON_GMC_DST_24BPP (5 << 8)
+# define RADEON_GMC_DST_32BPP (6 << 8)
+# define RADEON_GMC_DST_8BPP_RGB (7 << 8)
+# define RADEON_GMC_DST_Y8 (8 << 8)
+# define RADEON_GMC_DST_RGB8 (9 << 8)
+# define RADEON_GMC_DST_VYUY (11 << 8)
+# define RADEON_GMC_DST_YVYU (12 << 8)
+# define RADEON_GMC_DST_AYUV444 (14 << 8)
+# define RADEON_GMC_DST_ARGB4444 (15 << 8)
+# define RADEON_GMC_DST_DATATYPE_MASK (0x0f << 8)
+# define RADEON_GMC_DST_DATATYPE_SHIFT 8
+# define RADEON_GMC_SRC_DATATYPE_MASK (3 << 12)
+# define RADEON_GMC_SRC_DATATYPE_MONO_FG_BG (0 << 12)
+# define RADEON_GMC_SRC_DATATYPE_MONO_FG_LA (1 << 12)
+# define RADEON_GMC_SRC_DATATYPE_COLOR (3 << 12)
+# define RADEON_GMC_BYTE_PIX_ORDER (1 << 14)
+# define RADEON_GMC_BYTE_MSB_TO_LSB (0 << 14)
+# define RADEON_GMC_BYTE_LSB_TO_MSB (1 << 14)
+# define RADEON_GMC_CONVERSION_TEMP (1 << 15)
+# define RADEON_GMC_CONVERSION_TEMP_6500 (0 << 15)
+# define RADEON_GMC_CONVERSION_TEMP_9300 (1 << 15)
+# define RADEON_GMC_ROP3_MASK (0xff << 16)
+# define RADEON_DP_SRC_SOURCE_MASK (7 << 24)
+# define RADEON_DP_SRC_SOURCE_MEMORY (2 << 24)
+# define RADEON_DP_SRC_SOURCE_HOST_DATA (3 << 24)
+# define RADEON_GMC_3D_FCN_EN (1 << 27)
+# define RADEON_GMC_CLR_CMP_CNTL_DIS (1 << 28)
+# define RADEON_GMC_AUX_CLIP_DIS (1 << 29)
+# define RADEON_GMC_WR_MSK_DIS (1 << 30)
+# define RADEON_GMC_LD_BRUSH_Y_X (1 << 31)
+# define RADEON_ROP3_ZERO 0x00000000
+# define RADEON_ROP3_DSa 0x00880000
+# define RADEON_ROP3_SDna 0x00440000
+# define RADEON_ROP3_S 0x00cc0000
+# define RADEON_ROP3_DSna 0x00220000
+# define RADEON_ROP3_D 0x00aa0000
+# define RADEON_ROP3_DSx 0x00660000
+# define RADEON_ROP3_DSo 0x00ee0000
+# define RADEON_ROP3_DSon 0x00110000
+# define RADEON_ROP3_DSxn 0x00990000
+# define RADEON_ROP3_Dn 0x00550000
+# define RADEON_ROP3_SDno 0x00dd0000
+# define RADEON_ROP3_Sn 0x00330000
+# define RADEON_ROP3_DSno 0x00bb0000
+# define RADEON_ROP3_DSan 0x00770000
+# define RADEON_ROP3_ONE 0x00ff0000
+# define RADEON_ROP3_DPa 0x00a00000
+# define RADEON_ROP3_PDna 0x00500000
+# define RADEON_ROP3_P 0x00f00000
+# define RADEON_ROP3_DPna 0x000a0000
+# define RADEON_ROP3_D 0x00aa0000
+# define RADEON_ROP3_DPx 0x005a0000
+# define RADEON_ROP3_DPo 0x00fa0000
+# define RADEON_ROP3_DPon 0x00050000
+# define RADEON_ROP3_PDxn 0x00a50000
+# define RADEON_ROP3_PDno 0x00f50000
+# define RADEON_ROP3_Pn 0x000f0000
+# define RADEON_ROP3_DPno 0x00af0000
+# define RADEON_ROP3_DPan 0x005f0000
+#define RADEON_DP_GUI_MASTER_CNTL_C 0x1c84
+#define RADEON_DP_MIX 0x16c8
+#define RADEON_DP_SRC_BKGD_CLR 0x15dc
+#define RADEON_DP_SRC_FRGD_CLR 0x15d8
+#define RADEON_DP_WRITE_MASK 0x16cc
+#define RADEON_DST_BRES_DEC 0x1630
+#define RADEON_DST_BRES_ERR 0x1628
+#define RADEON_DST_BRES_INC 0x162c
+#define RADEON_DST_BRES_LNTH 0x1634
+#define RADEON_DST_BRES_LNTH_SUB 0x1638
+#define RADEON_DST_HEIGHT 0x1410
+#define RADEON_DST_HEIGHT_WIDTH 0x143c
+#define RADEON_DST_HEIGHT_WIDTH_8 0x158c
+#define RADEON_DST_HEIGHT_WIDTH_BW 0x15b4
+#define RADEON_DST_HEIGHT_Y 0x15a0
+#define RADEON_DST_LINE_START 0x1600
+#define RADEON_DST_LINE_END 0x1604
+#define RADEON_DST_LINE_PATCOUNT 0x1608
+# define RADEON_BRES_CNTL_SHIFT 8
+#define RADEON_DST_OFFSET 0x1404
+#define RADEON_DST_PITCH 0x1408
+#define RADEON_DST_PITCH_OFFSET 0x142c
+#define RADEON_DST_PITCH_OFFSET_C 0x1c80
+# define RADEON_PITCH_SHIFT 21
+# define RADEON_DST_TILE_LINEAR (0 << 30)
+# define RADEON_DST_TILE_MACRO (1 << 30)
+# define RADEON_DST_TILE_MICRO (2 << 30)
+# define RADEON_DST_TILE_BOTH (3 << 30)
+#define RADEON_DST_WIDTH 0x140c
+#define RADEON_DST_WIDTH_HEIGHT 0x1598
+#define RADEON_DST_WIDTH_X 0x1588
+#define RADEON_DST_WIDTH_X_INCY 0x159c
+#define RADEON_DST_X 0x141c
+#define RADEON_DST_X_SUB 0x15a4
+#define RADEON_DST_X_Y 0x1594
+#define RADEON_DST_Y 0x1420
+#define RADEON_DST_Y_SUB 0x15a8
+#define RADEON_DST_Y_X 0x1438
+
+#define RADEON_FCP_CNTL 0x0910
+# define RADEON_FCP0_SRC_PCICLK 0
+# define RADEON_FCP0_SRC_PCLK 1
+# define RADEON_FCP0_SRC_PCLKb 2
+# define RADEON_FCP0_SRC_HREF 3
+# define RADEON_FCP0_SRC_GND 4
+# define RADEON_FCP0_SRC_HREFb 5
+#define RADEON_FLUSH_1 0x1704
+#define RADEON_FLUSH_2 0x1708
+#define RADEON_FLUSH_3 0x170c
+#define RADEON_FLUSH_4 0x1710
+#define RADEON_FLUSH_5 0x1714
+#define RADEON_FLUSH_6 0x1718
+#define RADEON_FLUSH_7 0x171c
+#define RADEON_FOG_3D_TABLE_START 0x1810
+#define RADEON_FOG_3D_TABLE_END 0x1814
+#define RADEON_FOG_3D_TABLE_DENSITY 0x181c
+#define RADEON_FOG_TABLE_INDEX 0x1a14
+#define RADEON_FOG_TABLE_DATA 0x1a18
+#define RADEON_FP_CRTC_H_TOTAL_DISP 0x0250
+#define RADEON_FP_CRTC_V_TOTAL_DISP 0x0254
+# define RADEON_FP_CRTC_H_TOTAL_MASK 0x000003ff
+# define RADEON_FP_CRTC_H_DISP_MASK 0x01ff0000
+# define RADEON_FP_CRTC_V_TOTAL_MASK 0x00000fff
+# define RADEON_FP_CRTC_V_DISP_MASK 0x0fff0000
+# define RADEON_FP_H_SYNC_STRT_CHAR_MASK 0x00001ff8
+# define RADEON_FP_H_SYNC_WID_MASK 0x003f0000
+# define RADEON_FP_V_SYNC_STRT_MASK 0x00000fff
+# define RADEON_FP_V_SYNC_WID_MASK 0x001f0000
+# define RADEON_FP_CRTC_H_TOTAL_SHIFT 0x00000000
+# define RADEON_FP_CRTC_H_DISP_SHIFT 0x00000010
+# define RADEON_FP_CRTC_V_TOTAL_SHIFT 0x00000000
+# define RADEON_FP_CRTC_V_DISP_SHIFT 0x00000010
+# define RADEON_FP_H_SYNC_STRT_CHAR_SHIFT 0x00000003
+# define RADEON_FP_H_SYNC_WID_SHIFT 0x00000010
+# define RADEON_FP_V_SYNC_STRT_SHIFT 0x00000000
+# define RADEON_FP_V_SYNC_WID_SHIFT 0x00000010
+#define RADEON_FP_GEN_CNTL 0x0284
+# define RADEON_FP_FPON (1 << 0)
+# define RADEON_FP_BLANK_EN (1 << 1)
+# define RADEON_FP_TMDS_EN (1 << 2)
+# define RADEON_FP_PANEL_FORMAT (1 << 3)
+# define RADEON_FP_EN_TMDS (1 << 7)
+# define RADEON_FP_DETECT_SENSE (1 << 8)
+# define R200_FP_SOURCE_SEL_MASK (3 << 10)
+# define R200_FP_SOURCE_SEL_CRTC1 (0 << 10)
+# define R200_FP_SOURCE_SEL_CRTC2 (1 << 10)
+# define R200_FP_SOURCE_SEL_RMX (2 << 10)
+# define R200_FP_SOURCE_SEL_TRANS (3 << 10)
+# define RADEON_FP_SEL_CRTC1 (0 << 13)
+# define RADEON_FP_SEL_CRTC2 (1 << 13)
+# define RADEON_FP_CRTC_DONT_SHADOW_HPAR (1 << 15)
+# define RADEON_FP_CRTC_DONT_SHADOW_VPAR (1 << 16)
+# define RADEON_FP_CRTC_DONT_SHADOW_HEND (1 << 17)
+# define RADEON_FP_CRTC_USE_SHADOW_VEND (1 << 18)
+# define RADEON_FP_RMX_HVSYNC_CONTROL_EN (1 << 20)
+# define RADEON_FP_DFP_SYNC_SEL (1 << 21)
+# define RADEON_FP_CRTC_LOCK_8DOT (1 << 22)
+# define RADEON_FP_CRT_SYNC_SEL (1 << 23)
+# define RADEON_FP_USE_SHADOW_EN (1 << 24)
+# define RADEON_FP_CRT_SYNC_ALT (1 << 26)
+#define RADEON_FP2_GEN_CNTL 0x0288
+# define RADEON_FP2_BLANK_EN (1 << 1)
+# define RADEON_FP2_ON (1 << 2)
+# define RADEON_FP2_PANEL_FORMAT (1 << 3)
+# define RADEON_FP2_DETECT_SENSE (1 << 8)
+# define R200_FP2_SOURCE_SEL_MASK (3 << 10)
+# define R200_FP2_SOURCE_SEL_CRTC1 (0 << 10)
+# define R200_FP2_SOURCE_SEL_CRTC2 (1 << 10)
+# define R200_FP2_SOURCE_SEL_RMX (2 << 10)
+# define R200_FP2_SOURCE_SEL_TRANS_UNIT (3 << 10)
+# define RADEON_FP2_SRC_SEL_MASK (3 << 13)
+# define RADEON_FP2_SRC_SEL_CRTC2 (1 << 13)
+# define RADEON_FP2_FP_POL (1 << 16)
+# define RADEON_FP2_LP_POL (1 << 17)
+# define RADEON_FP2_SCK_POL (1 << 18)
+# define RADEON_FP2_LCD_CNTL_MASK (7 << 19)
+# define RADEON_FP2_PAD_FLOP_EN (1 << 22)
+# define RADEON_FP2_CRC_EN (1 << 23)
+# define RADEON_FP2_CRC_READ_EN (1 << 24)
+# define RADEON_FP2_DVO_EN (1 << 25)
+# define RADEON_FP2_DVO_RATE_SEL_SDR (1 << 26)
+# define R200_FP2_DVO_RATE_SEL_SDR (1 << 27)
+# define R300_FP2_DVO_CLOCK_MODE_SINGLE (1 << 28)
+# define R300_FP2_DVO_DUAL_CHANNEL_EN (1 << 29)
+#define RADEON_FP_H_SYNC_STRT_WID 0x02c4
+#define RADEON_FP_H2_SYNC_STRT_WID 0x03c4
+#define RADEON_FP_HORZ_STRETCH 0x028c
+#define RADEON_FP_HORZ2_STRETCH 0x038c
+# define RADEON_HORZ_STRETCH_RATIO_MASK 0xffff
+# define RADEON_HORZ_STRETCH_RATIO_MAX 4096
+# define RADEON_HORZ_PANEL_SIZE (0x1ff << 16)
+# define RADEON_HORZ_PANEL_SHIFT 16
+# define RADEON_HORZ_STRETCH_PIXREP (0 << 25)
+# define RADEON_HORZ_STRETCH_BLEND (1 << 26)
+# define RADEON_HORZ_STRETCH_ENABLE (1 << 25)
+# define RADEON_HORZ_AUTO_RATIO (1 << 27)
+# define RADEON_HORZ_FP_LOOP_STRETCH (0x7 << 28)
+# define RADEON_HORZ_AUTO_RATIO_INC (1 << 31)
+#define RADEON_FP_HORZ_VERT_ACTIVE 0x0278
+#define RADEON_FP_V_SYNC_STRT_WID 0x02c8
+#define RADEON_FP_VERT_STRETCH 0x0290
+#define RADEON_FP_V2_SYNC_STRT_WID 0x03c8
+#define RADEON_FP_VERT2_STRETCH 0x0390
+# define RADEON_VERT_PANEL_SIZE (0xfff << 12)
+# define RADEON_VERT_PANEL_SHIFT 12
+# define RADEON_VERT_STRETCH_RATIO_MASK 0xfff
+# define RADEON_VERT_STRETCH_RATIO_SHIFT 0
+# define RADEON_VERT_STRETCH_RATIO_MAX 4096
+# define RADEON_VERT_STRETCH_ENABLE (1 << 25)
+# define RADEON_VERT_STRETCH_LINEREP (0 << 26)
+# define RADEON_VERT_STRETCH_BLEND (1 << 26)
+# define RADEON_VERT_AUTO_RATIO_EN (1 << 27)
+# define RADEON_VERT_AUTO_RATIO_INC (1 << 31)
+# define RADEON_VERT_STRETCH_RESERVED 0x71000000
+#define RS400_FP_2ND_GEN_CNTL 0x0384
+# define RS400_FP_2ND_ON (1 << 0)
+# define RS400_FP_2ND_BLANK_EN (1 << 1)
+# define RS400_TMDS_2ND_EN (1 << 2)
+# define RS400_PANEL_FORMAT_2ND (1 << 3)
+# define RS400_FP_2ND_EN_TMDS (1 << 7)
+# define RS400_FP_2ND_DETECT_SENSE (1 << 8)
+# define RS400_FP_2ND_SOURCE_SEL_MASK (3 << 10)
+# define RS400_FP_2ND_SOURCE_SEL_CRTC1 (0 << 10)
+# define RS400_FP_2ND_SOURCE_SEL_CRTC2 (1 << 10)
+# define RS400_FP_2ND_SOURCE_SEL_RMX (2 << 10)
+# define RS400_FP_2ND_DETECT_EN (1 << 12)
+# define RS400_HPD_2ND_SEL (1 << 13)
+#define RS400_FP2_2_GEN_CNTL 0x0388
+# define RS400_FP2_2_BLANK_EN (1 << 1)
+# define RS400_FP2_2_ON (1 << 2)
+# define RS400_FP2_2_PANEL_FORMAT (1 << 3)
+# define RS400_FP2_2_DETECT_SENSE (1 << 8)
+# define RS400_FP2_2_SOURCE_SEL_MASK (3 << 10)
+# define RS400_FP2_2_SOURCE_SEL_CRTC1 (0 << 10)
+# define RS400_FP2_2_SOURCE_SEL_CRTC2 (1 << 10)
+# define RS400_FP2_2_SOURCE_SEL_RMX (2 << 10)
+# define RS400_FP2_2_DVO2_EN (1 << 25)
+#define RS400_TMDS2_CNTL 0x0394
+#define RS400_TMDS2_TRANSMITTER_CNTL 0x03a4
+# define RS400_TMDS2_PLLEN (1 << 0)
+# define RS400_TMDS2_PLLRST (1 << 1)
+
+#define RADEON_GEN_INT_CNTL 0x0040
+# define RADEON_SW_INT_ENABLE (1 << 25)
+#define RADEON_GEN_INT_STATUS 0x0044
+# define RADEON_VSYNC_INT_AK (1 << 2)
+# define RADEON_VSYNC_INT (1 << 2)
+# define RADEON_VSYNC2_INT_AK (1 << 6)
+# define RADEON_VSYNC2_INT (1 << 6)
+# define RADEON_SW_INT_FIRE (1 << 26)
+# define RADEON_SW_INT_TEST (1 << 25)
+# define RADEON_SW_INT_TEST_ACK (1 << 25)
+#define RADEON_GENENB 0x03c3 /* VGA */
+#define RADEON_GENFC_RD 0x03ca /* VGA */
+#define RADEON_GENFC_WT 0x03da /* VGA, 0x03ba */
+#define RADEON_GENMO_RD 0x03cc /* VGA */
+#define RADEON_GENMO_WT 0x03c2 /* VGA */
+#define RADEON_GENS0 0x03c2 /* VGA */
+#define RADEON_GENS1 0x03da /* VGA, 0x03ba */
+#define RADEON_GPIO_MONID 0x0068 /* DDC interface via I2C */ /* DDC3 */
+#define RADEON_GPIO_MONIDB 0x006c
+#define RADEON_GPIO_CRT2_DDC 0x006c
+#define RADEON_GPIO_DVI_DDC 0x0064 /* DDC2 */
+#define RADEON_GPIO_VGA_DDC 0x0060 /* DDC1 */
+# define RADEON_GPIO_A_0 (1 << 0)
+# define RADEON_GPIO_A_1 (1 << 1)
+# define RADEON_GPIO_Y_0 (1 << 8)
+# define RADEON_GPIO_Y_1 (1 << 9)
+# define RADEON_GPIO_Y_SHIFT_0 8
+# define RADEON_GPIO_Y_SHIFT_1 9
+# define RADEON_GPIO_EN_0 (1 << 16)
+# define RADEON_GPIO_EN_1 (1 << 17)
+# define RADEON_GPIO_MASK_0 (1 << 24) /*??*/
+# define RADEON_GPIO_MASK_1 (1 << 25) /*??*/
+#define RADEON_GRPH8_DATA 0x03cf /* VGA */
+#define RADEON_GRPH8_IDX 0x03ce /* VGA */
+#define RADEON_GUI_SCRATCH_REG0 0x15e0
+#define RADEON_GUI_SCRATCH_REG1 0x15e4
+#define RADEON_GUI_SCRATCH_REG2 0x15e8
+#define RADEON_GUI_SCRATCH_REG3 0x15ec
+#define RADEON_GUI_SCRATCH_REG4 0x15f0
+#define RADEON_GUI_SCRATCH_REG5 0x15f4
+
+#define RADEON_HEADER 0x0f0e /* PCI */
+#define RADEON_HOST_DATA0 0x17c0
+#define RADEON_HOST_DATA1 0x17c4
+#define RADEON_HOST_DATA2 0x17c8
+#define RADEON_HOST_DATA3 0x17cc
+#define RADEON_HOST_DATA4 0x17d0
+#define RADEON_HOST_DATA5 0x17d4
+#define RADEON_HOST_DATA6 0x17d8
+#define RADEON_HOST_DATA7 0x17dc
+#define RADEON_HOST_DATA_LAST 0x17e0
+#define RADEON_HOST_PATH_CNTL 0x0130
+# define RADEON_HP_LIN_RD_CACHE_DIS (1 << 24)
+# define RADEON_HDP_READ_BUFFER_INVALIDATE (1 << 27)
+# define RADEON_HDP_SOFT_RESET (1 << 26)
+# define RADEON_HDP_APER_CNTL (1 << 23)
+#define RADEON_HTOTAL_CNTL 0x0009 /* PLL */
+# define RADEON_HTOT_CNTL_VGA_EN (1 << 28)
+#define RADEON_HTOTAL2_CNTL 0x002e /* PLL */
+
+ /* Multimedia I2C bus */
+#define RADEON_I2C_CNTL_0 0x0090
+#define RADEON_I2C_DONE (1<<0)
+#define RADEON_I2C_NACK (1<<1)
+#define RADEON_I2C_HALT (1<<2)
+#define RADEON_I2C_SOFT_RST (1<<5)
+#define RADEON_I2C_DRIVE_EN (1<<6)
+#define RADEON_I2C_DRIVE_SEL (1<<7)
+#define RADEON_I2C_START (1<<8)
+#define RADEON_I2C_STOP (1<<9)
+#define RADEON_I2C_RECEIVE (1<<10)
+#define RADEON_I2C_ABORT (1<<11)
+#define RADEON_I2C_GO (1<<12)
+#define RADEON_I2C_CNTL_1 0x0094
+#define RADEON_I2C_SEL (1<<16)
+#define RADEON_I2C_EN (1<<17)
+#define RADEON_I2C_DATA 0x0098
+
+#define RADEON_DVI_I2C_CNTL_0 0x02e0
+# define R200_DVI_I2C_PIN_SEL(x) ((x) << 3)
+# define R200_SEL_DDC1 0 /* 0x60 - VGA_DDC */
+# define R200_SEL_DDC2 1 /* 0x64 - DVI_DDC */
+# define R200_SEL_DDC3 2 /* 0x68 - MONID_DDC */
+#define RADEON_DVI_I2C_CNTL_1 0x02e4 /* ? */
+#define RADEON_DVI_I2C_DATA 0x02e8
+
+#define RADEON_INTERRUPT_LINE 0x0f3c /* PCI */
+#define RADEON_INTERRUPT_PIN 0x0f3d /* PCI */
+#define RADEON_IO_BASE 0x0f14 /* PCI */
+
+#define RADEON_LATENCY 0x0f0d /* PCI */
+#define RADEON_LEAD_BRES_DEC 0x1608
+#define RADEON_LEAD_BRES_LNTH 0x161c
+#define RADEON_LEAD_BRES_LNTH_SUB 0x1624
+#define RADEON_LVDS_GEN_CNTL 0x02d0
+# define RADEON_LVDS_ON (1 << 0)
+# define RADEON_LVDS_DISPLAY_DIS (1 << 1)
+# define RADEON_LVDS_PANEL_TYPE (1 << 2)
+# define RADEON_LVDS_PANEL_FORMAT (1 << 3)
+# define RADEON_LVDS_NO_FM (0 << 4)
+# define RADEON_LVDS_2_GREY (1 << 4)
+# define RADEON_LVDS_4_GREY (2 << 4)
+# define RADEON_LVDS_RST_FM (1 << 6)
+# define RADEON_LVDS_EN (1 << 7)
+# define RADEON_LVDS_BL_MOD_LEVEL_SHIFT 8
+# define RADEON_LVDS_BL_MOD_LEVEL_MASK (0xff << 8)
+# define RADEON_LVDS_BL_MOD_EN (1 << 16)
+# define RADEON_LVDS_BL_CLK_SEL (1 << 17)
+# define RADEON_LVDS_DIGON (1 << 18)
+# define RADEON_LVDS_BLON (1 << 19)
+# define RADEON_LVDS_FP_POL_LOW (1 << 20)
+# define RADEON_LVDS_LP_POL_LOW (1 << 21)
+# define RADEON_LVDS_DTM_POL_LOW (1 << 22)
+# define RADEON_LVDS_SEL_CRTC2 (1 << 23)
+# define RADEON_LVDS_FPDI_EN (1 << 27)
+# define RADEON_LVDS_HSYNC_DELAY_SHIFT 28
+#define RADEON_LVDS_PLL_CNTL 0x02d4
+# define RADEON_HSYNC_DELAY_SHIFT 28
+# define RADEON_HSYNC_DELAY_MASK (0xf << 28)
+# define RADEON_LVDS_PLL_EN (1 << 16)
+# define RADEON_LVDS_PLL_RESET (1 << 17)
+# define R300_LVDS_SRC_SEL_MASK (3 << 18)
+# define R300_LVDS_SRC_SEL_CRTC1 (0 << 18)
+# define R300_LVDS_SRC_SEL_CRTC2 (1 << 18)
+# define R300_LVDS_SRC_SEL_RMX (2 << 18)
+#define RADEON_LVDS_SS_GEN_CNTL 0x02ec
+# define RADEON_LVDS_PWRSEQ_DELAY1_SHIFT 16
+# define RADEON_LVDS_PWRSEQ_DELAY2_SHIFT 20
+
+#define RADEON_MAX_LATENCY 0x0f3f /* PCI */
+#define RADEON_DISPLAY_BASE_ADDR 0x23c
+#define RADEON_DISPLAY2_BASE_ADDR 0x33c
+#define RADEON_OV0_BASE_ADDR 0x43c
+#define RADEON_NB_TOM 0x15c
+#define R300_MC_INIT_MISC_LAT_TIMER 0x180
+# define R300_MC_DISP0R_INIT_LAT_SHIFT 8
+# define R300_MC_DISP0R_INIT_LAT_MASK 0xf
+# define R300_MC_DISP1R_INIT_LAT_SHIFT 12
+# define R300_MC_DISP1R_INIT_LAT_MASK 0xf
+#define RADEON_MCLK_CNTL 0x0012 /* PLL */
+# define RADEON_MCLKA_SRC_SEL_MASK 0x7
+# define RADEON_FORCEON_MCLKA (1 << 16)
+# define RADEON_FORCEON_MCLKB (1 << 17)
+# define RADEON_FORCEON_YCLKA (1 << 18)
+# define RADEON_FORCEON_YCLKB (1 << 19)
+# define RADEON_FORCEON_MC (1 << 20)
+# define RADEON_FORCEON_AIC (1 << 21)
+# define R300_DISABLE_MC_MCLKA (1 << 21)
+# define R300_DISABLE_MC_MCLKB (1 << 21)
+#define RADEON_MCLK_MISC 0x001f /* PLL */
+# define RADEON_MC_MCLK_MAX_DYN_STOP_LAT (1 << 12)
+# define RADEON_IO_MCLK_MAX_DYN_STOP_LAT (1 << 13)
+# define RADEON_MC_MCLK_DYN_ENABLE (1 << 14)
+# define RADEON_IO_MCLK_DYN_ENABLE (1 << 15)
+#define RADEON_LCD_GPIO_MASK 0x01a0
+#define RADEON_GPIOPAD_EN 0x01a0
+#define RADEON_LCD_GPIO_Y_REG 0x01a4
+#define RADEON_MDGPIO_A_REG 0x01ac
+#define RADEON_MDGPIO_EN_REG 0x01b0
+#define RADEON_MDGPIO_MASK 0x0198
+#define RADEON_GPIOPAD_MASK 0x0198
+#define RADEON_GPIOPAD_A 0x019c
+#define RADEON_MDGPIO_Y_REG 0x01b4
+#define RADEON_MEM_ADDR_CONFIG 0x0148
+#define RADEON_MEM_BASE 0x0f10 /* PCI */
+#define RADEON_MEM_CNTL 0x0140
+# define RADEON_MEM_NUM_CHANNELS_MASK 0x01
+# define RADEON_MEM_USE_B_CH_ONLY (1 << 1)
+# define RV100_HALF_MODE (1 << 3)
+# define R300_MEM_NUM_CHANNELS_MASK 0x03
+# define R300_MEM_USE_CD_CH_ONLY (1 << 2)
+#define RADEON_MEM_TIMING_CNTL 0x0144 /* EXT_MEM_CNTL */
+#define RADEON_MEM_INIT_LAT_TIMER 0x0154
+#define RADEON_MEM_INTF_CNTL 0x014c
+#define RADEON_MEM_SDRAM_MODE_REG 0x0158
+# define RADEON_SDRAM_MODE_MASK 0xffff0000
+# define RADEON_B3MEM_RESET_MASK 0x6fffffff
+# define RADEON_MEM_CFG_TYPE_DDR (1 << 30)
+#define RADEON_MEM_STR_CNTL 0x0150
+# define RADEON_MEM_PWRUP_COMPL_A (1 << 0)
+# define RADEON_MEM_PWRUP_COMPL_B (1 << 1)
+# define R300_MEM_PWRUP_COMPL_C (1 << 2)
+# define R300_MEM_PWRUP_COMPL_D (1 << 3)
+# define RADEON_MEM_PWRUP_COMPLETE 0x03
+# define R300_MEM_PWRUP_COMPLETE 0x0f
+#define RADEON_MC_STATUS 0x0150
+# define RADEON_MC_IDLE (1 << 2)
+# define R300_MC_IDLE (1 << 4)
+#define RADEON_MEM_VGA_RP_SEL 0x003c
+#define RADEON_MEM_VGA_WP_SEL 0x0038
+#define RADEON_MIN_GRANT 0x0f3e /* PCI */
+#define RADEON_MM_DATA 0x0004
+#define RADEON_MM_INDEX 0x0000
+# define RADEON_MM_APER (1 << 31)
+#define RADEON_MPLL_CNTL 0x000e /* PLL */
+#define RADEON_MPP_TB_CONFIG 0x01c0 /* ? */
+#define RADEON_MPP_GP_CONFIG 0x01c8 /* ? */
+#define RADEON_SEPROM_CNTL1 0x01c0
+# define RADEON_SCK_PRESCALE_SHIFT 24
+# define RADEON_SCK_PRESCALE_MASK (0xff << 24)
+#define R300_MC_IND_INDEX 0x01f8
+# define R300_MC_IND_ADDR_MASK 0x3f
+# define R300_MC_IND_WR_EN (1 << 8)
+#define R300_MC_IND_DATA 0x01fc
+#define R300_MC_READ_CNTL_AB 0x017c
+# define R300_MEM_RBS_POSITION_A_MASK 0x03
+#define R300_MC_READ_CNTL_CD_mcind 0x24
+# define R300_MEM_RBS_POSITION_C_MASK 0x03
+
+#define RADEON_N_VIF_COUNT 0x0248
+
+#define RADEON_OV0_AUTO_FLIP_CNTL 0x0470
+# define RADEON_OV0_AUTO_FLIP_CNTL_SOFT_BUF_NUM 0x00000007
+# define RADEON_OV0_AUTO_FLIP_CNTL_SOFT_REPEAT_FIELD 0x00000008
+# define RADEON_OV0_AUTO_FLIP_CNTL_SOFT_BUF_ODD 0x00000010
+# define RADEON_OV0_AUTO_FLIP_CNTL_IGNORE_REPEAT_FIELD 0x00000020
+# define RADEON_OV0_AUTO_FLIP_CNTL_SOFT_EOF_TOGGLE 0x00000040
+# define RADEON_OV0_AUTO_FLIP_CNTL_VID_PORT_SELECT 0x00000300
+# define RADEON_OV0_AUTO_FLIP_CNTL_P1_FIRST_LINE_EVEN 0x00010000
+# define RADEON_OV0_AUTO_FLIP_CNTL_SHIFT_EVEN_DOWN 0x00040000
+# define RADEON_OV0_AUTO_FLIP_CNTL_SHIFT_ODD_DOWN 0x00080000
+# define RADEON_OV0_AUTO_FLIP_CNTL_FIELD_POL_SOURCE 0x00800000
+
+#define RADEON_OV0_COLOUR_CNTL 0x04E0
+#define RADEON_OV0_DEINTERLACE_PATTERN 0x0474
+#define RADEON_OV0_EXCLUSIVE_HORZ 0x0408
+# define RADEON_EXCL_HORZ_START_MASK 0x000000ff
+# define RADEON_EXCL_HORZ_END_MASK 0x0000ff00
+# define RADEON_EXCL_HORZ_BACK_PORCH_MASK 0x00ff0000
+# define RADEON_EXCL_HORZ_EXCLUSIVE_EN 0x80000000
+#define RADEON_OV0_EXCLUSIVE_VERT 0x040C
+# define RADEON_EXCL_VERT_START_MASK 0x000003ff
+# define RADEON_EXCL_VERT_END_MASK 0x03ff0000
+#define RADEON_OV0_FILTER_CNTL 0x04A0
+# define RADEON_FILTER_PROGRAMMABLE_COEF 0x0
+# define RADEON_FILTER_HC_COEF_HORZ_Y 0x1
+# define RADEON_FILTER_HC_COEF_HORZ_UV 0x2
+# define RADEON_FILTER_HC_COEF_VERT_Y 0x4
+# define RADEON_FILTER_HC_COEF_VERT_UV 0x8
+# define RADEON_FILTER_HARDCODED_COEF 0xf
+# define RADEON_FILTER_COEF_MASK 0xf
+
+#define RADEON_OV0_FOUR_TAP_COEF_0 0x04B0
+#define RADEON_OV0_FOUR_TAP_COEF_1 0x04B4
+#define RADEON_OV0_FOUR_TAP_COEF_2 0x04B8
+#define RADEON_OV0_FOUR_TAP_COEF_3 0x04BC
+#define RADEON_OV0_FOUR_TAP_COEF_4 0x04C0
+#define RADEON_OV0_FLAG_CNTL 0x04DC
+#define RADEON_OV0_GAMMA_000_00F 0x0d40
+#define RADEON_OV0_GAMMA_010_01F 0x0d44
+#define RADEON_OV0_GAMMA_020_03F 0x0d48
+#define RADEON_OV0_GAMMA_040_07F 0x0d4c
+#define RADEON_OV0_GAMMA_080_0BF 0x0e00
+#define RADEON_OV0_GAMMA_0C0_0FF 0x0e04
+#define RADEON_OV0_GAMMA_100_13F 0x0e08
+#define RADEON_OV0_GAMMA_140_17F 0x0e0c
+#define RADEON_OV0_GAMMA_180_1BF 0x0e10
+#define RADEON_OV0_GAMMA_1C0_1FF 0x0e14
+#define RADEON_OV0_GAMMA_200_23F 0x0e18
+#define RADEON_OV0_GAMMA_240_27F 0x0e1c
+#define RADEON_OV0_GAMMA_280_2BF 0x0e20
+#define RADEON_OV0_GAMMA_2C0_2FF 0x0e24
+#define RADEON_OV0_GAMMA_300_33F 0x0e28
+#define RADEON_OV0_GAMMA_340_37F 0x0e2c
+#define RADEON_OV0_GAMMA_380_3BF 0x0d50
+#define RADEON_OV0_GAMMA_3C0_3FF 0x0d54
+#define RADEON_OV0_GRAPHICS_KEY_CLR_LOW 0x04EC
+#define RADEON_OV0_GRAPHICS_KEY_CLR_HIGH 0x04F0
+#define RADEON_OV0_H_INC 0x0480
+#define RADEON_OV0_KEY_CNTL 0x04F4
+# define RADEON_VIDEO_KEY_FN_MASK 0x00000003L
+# define RADEON_VIDEO_KEY_FN_FALSE 0x00000000L
+# define RADEON_VIDEO_KEY_FN_TRUE 0x00000001L
+# define RADEON_VIDEO_KEY_FN_EQ 0x00000002L
+# define RADEON_VIDEO_KEY_FN_NE 0x00000003L
+# define RADEON_GRAPHIC_KEY_FN_MASK 0x00000030L
+# define RADEON_GRAPHIC_KEY_FN_FALSE 0x00000000L
+# define RADEON_GRAPHIC_KEY_FN_TRUE 0x00000010L
+# define RADEON_GRAPHIC_KEY_FN_EQ 0x00000020L
+# define RADEON_GRAPHIC_KEY_FN_NE 0x00000030L
+# define RADEON_CMP_MIX_MASK 0x00000100L
+# define RADEON_CMP_MIX_OR 0x00000000L
+# define RADEON_CMP_MIX_AND 0x00000100L
+#define RADEON_OV0_LIN_TRANS_A 0x0d20
+#define RADEON_OV0_LIN_TRANS_B 0x0d24
+#define RADEON_OV0_LIN_TRANS_C 0x0d28
+#define RADEON_OV0_LIN_TRANS_D 0x0d2c
+#define RADEON_OV0_LIN_TRANS_E 0x0d30
+#define RADEON_OV0_LIN_TRANS_F 0x0d34
+#define RADEON_OV0_P1_BLANK_LINES_AT_TOP 0x0430
+# define RADEON_P1_BLNK_LN_AT_TOP_M1_MASK 0x00000fffL
+# define RADEON_P1_ACTIVE_LINES_M1 0x0fff0000L
+#define RADEON_OV0_P1_H_ACCUM_INIT 0x0488
+#define RADEON_OV0_P1_V_ACCUM_INIT 0x0428
+# define RADEON_OV0_P1_MAX_LN_IN_PER_LN_OUT 0x00000003L
+# define RADEON_OV0_P1_V_ACCUM_INIT_MASK 0x01ff8000L
+#define RADEON_OV0_P1_X_START_END 0x0494
+#define RADEON_OV0_P2_X_START_END 0x0498
+#define RADEON_OV0_P23_BLANK_LINES_AT_TOP 0x0434
+# define RADEON_P23_BLNK_LN_AT_TOP_M1_MASK 0x000007ffL
+# define RADEON_P23_ACTIVE_LINES_M1 0x07ff0000L
+#define RADEON_OV0_P23_H_ACCUM_INIT 0x048C
+#define RADEON_OV0_P23_V_ACCUM_INIT 0x042C
+#define RADEON_OV0_P3_X_START_END 0x049C
+#define RADEON_OV0_REG_LOAD_CNTL 0x0410
+# define RADEON_REG_LD_CTL_LOCK 0x00000001L
+# define RADEON_REG_LD_CTL_VBLANK_DURING_LOCK 0x00000002L
+# define RADEON_REG_LD_CTL_STALL_GUI_UNTIL_FLIP 0x00000004L
+# define RADEON_REG_LD_CTL_LOCK_READBACK 0x00000008L
+# define RADEON_REG_LD_CTL_FLIP_READBACK 0x00000010L
+#define RADEON_OV0_SCALE_CNTL 0x0420
+# define RADEON_SCALER_HORZ_PICK_NEAREST 0x00000004L
+# define RADEON_SCALER_VERT_PICK_NEAREST 0x00000008L
+# define RADEON_SCALER_SIGNED_UV 0x00000010L
+# define RADEON_SCALER_GAMMA_SEL_MASK 0x00000060L
+# define RADEON_SCALER_GAMMA_SEL_BRIGHT 0x00000000L
+# define RADEON_SCALER_GAMMA_SEL_G22 0x00000020L
+# define RADEON_SCALER_GAMMA_SEL_G18 0x00000040L
+# define RADEON_SCALER_GAMMA_SEL_G14 0x00000060L
+# define RADEON_SCALER_COMCORE_SHIFT_UP_ONE 0x00000080L
+# define RADEON_SCALER_SURFAC_FORMAT 0x00000f00L
+# define RADEON_SCALER_SOURCE_15BPP 0x00000300L
+# define RADEON_SCALER_SOURCE_16BPP 0x00000400L
+# define RADEON_SCALER_SOURCE_32BPP 0x00000600L
+# define RADEON_SCALER_SOURCE_YUV9 0x00000900L
+# define RADEON_SCALER_SOURCE_YUV12 0x00000A00L
+# define RADEON_SCALER_SOURCE_VYUY422 0x00000B00L
+# define RADEON_SCALER_SOURCE_YVYU422 0x00000C00L
+# define RADEON_SCALER_ADAPTIVE_DEINT 0x00001000L
+# define RADEON_SCALER_TEMPORAL_DEINT 0x00002000L
+# define RADEON_SCALER_CRTC_SEL 0x00004000L
+# define RADEON_SCALER_SMART_SWITCH 0x00008000L
+# define RADEON_SCALER_BURST_PER_PLANE 0x007F0000L
+# define RADEON_SCALER_DOUBLE_BUFFER 0x01000000L
+# define RADEON_SCALER_DIS_LIMIT 0x08000000L
+# define RADEON_SCALER_LIN_TRANS_BYPASS 0x10000000L
+# define RADEON_SCALER_INT_EMU 0x20000000L
+# define RADEON_SCALER_ENABLE 0x40000000L
+# define RADEON_SCALER_SOFT_RESET 0x80000000L
+#define RADEON_OV0_STEP_BY 0x0484
+#define RADEON_OV0_TEST 0x04F8
+#define RADEON_OV0_V_INC 0x0424
+#define RADEON_OV0_VID_BUF_PITCH0_VALUE 0x0460
+#define RADEON_OV0_VID_BUF_PITCH1_VALUE 0x0464
+#define RADEON_OV0_VID_BUF0_BASE_ADRS 0x0440
+# define RADEON_VIF_BUF0_PITCH_SEL 0x00000001L
+# define RADEON_VIF_BUF0_TILE_ADRS 0x00000002L
+# define RADEON_VIF_BUF0_BASE_ADRS_MASK 0x03fffff0L
+# define RADEON_VIF_BUF0_1ST_LINE_LSBS_MASK 0x48000000L
+#define RADEON_OV0_VID_BUF1_BASE_ADRS 0x0444
+# define RADEON_VIF_BUF1_PITCH_SEL 0x00000001L
+# define RADEON_VIF_BUF1_TILE_ADRS 0x00000002L
+# define RADEON_VIF_BUF1_BASE_ADRS_MASK 0x03fffff0L
+# define RADEON_VIF_BUF1_1ST_LINE_LSBS_MASK 0x48000000L
+#define RADEON_OV0_VID_BUF2_BASE_ADRS 0x0448
+# define RADEON_VIF_BUF2_PITCH_SEL 0x00000001L
+# define RADEON_VIF_BUF2_TILE_ADRS 0x00000002L
+# define RADEON_VIF_BUF2_BASE_ADRS_MASK 0x03fffff0L
+# define RADEON_VIF_BUF2_1ST_LINE_LSBS_MASK 0x48000000L
+#define RADEON_OV0_VID_BUF3_BASE_ADRS 0x044C
+#define RADEON_OV0_VID_BUF4_BASE_ADRS 0x0450
+#define RADEON_OV0_VID_BUF5_BASE_ADRS 0x0454
+#define RADEON_OV0_VIDEO_KEY_CLR_HIGH 0x04E8
+#define RADEON_OV0_VIDEO_KEY_CLR_LOW 0x04E4
+#define RADEON_OV0_Y_X_START 0x0400
+#define RADEON_OV0_Y_X_END 0x0404
+#define RADEON_OV1_Y_X_START 0x0600
+#define RADEON_OV1_Y_X_END 0x0604
+#define RADEON_OVR_CLR 0x0230
+#define RADEON_OVR_WID_LEFT_RIGHT 0x0234
+#define RADEON_OVR_WID_TOP_BOTTOM 0x0238
+
+/* first capture unit */
+
+#define RADEON_CAP0_BUF0_OFFSET 0x0920
+#define RADEON_CAP0_BUF1_OFFSET 0x0924
+#define RADEON_CAP0_BUF0_EVEN_OFFSET 0x0928
+#define RADEON_CAP0_BUF1_EVEN_OFFSET 0x092C
+
+#define RADEON_CAP0_BUF_PITCH 0x0930
+#define RADEON_CAP0_V_WINDOW 0x0934
+#define RADEON_CAP0_H_WINDOW 0x0938
+#define RADEON_CAP0_VBI0_OFFSET 0x093C
+#define RADEON_CAP0_VBI1_OFFSET 0x0940
+#define RADEON_CAP0_VBI_V_WINDOW 0x0944
+#define RADEON_CAP0_VBI_H_WINDOW 0x0948
+#define RADEON_CAP0_PORT_MODE_CNTL 0x094C
+#define RADEON_CAP0_TRIG_CNTL 0x0950
+#define RADEON_CAP0_DEBUG 0x0954
+#define RADEON_CAP0_CONFIG 0x0958
+# define RADEON_CAP0_CONFIG_CONTINUOS 0x00000001
+# define RADEON_CAP0_CONFIG_START_FIELD_EVEN 0x00000002
+# define RADEON_CAP0_CONFIG_START_BUF_GET 0x00000004
+# define RADEON_CAP0_CONFIG_START_BUF_SET 0x00000008
+# define RADEON_CAP0_CONFIG_BUF_TYPE_ALT 0x00000010
+# define RADEON_CAP0_CONFIG_BUF_TYPE_FRAME 0x00000020
+# define RADEON_CAP0_CONFIG_ONESHOT_MODE_FRAME 0x00000040
+# define RADEON_CAP0_CONFIG_BUF_MODE_DOUBLE 0x00000080
+# define RADEON_CAP0_CONFIG_BUF_MODE_TRIPLE 0x00000100
+# define RADEON_CAP0_CONFIG_MIRROR_EN 0x00000200
+# define RADEON_CAP0_CONFIG_ONESHOT_MIRROR_EN 0x00000400
+# define RADEON_CAP0_CONFIG_VIDEO_SIGNED_UV 0x00000800
+# define RADEON_CAP0_CONFIG_ANC_DECODE_EN 0x00001000
+# define RADEON_CAP0_CONFIG_VBI_EN 0x00002000
+# define RADEON_CAP0_CONFIG_SOFT_PULL_DOWN_EN 0x00004000
+# define RADEON_CAP0_CONFIG_VIP_EXTEND_FLAG_EN 0x00008000
+# define RADEON_CAP0_CONFIG_FAKE_FIELD_EN 0x00010000
+# define RADEON_CAP0_CONFIG_ODD_ONE_MORE_LINE 0x00020000
+# define RADEON_CAP0_CONFIG_EVEN_ONE_MORE_LINE 0x00040000
+# define RADEON_CAP0_CONFIG_HORZ_DIVIDE_2 0x00080000
+# define RADEON_CAP0_CONFIG_HORZ_DIVIDE_4 0x00100000
+# define RADEON_CAP0_CONFIG_VERT_DIVIDE_2 0x00200000
+# define RADEON_CAP0_CONFIG_VERT_DIVIDE_4 0x00400000
+# define RADEON_CAP0_CONFIG_FORMAT_BROOKTREE 0x00000000
+# define RADEON_CAP0_CONFIG_FORMAT_CCIR656 0x00800000
+# define RADEON_CAP0_CONFIG_FORMAT_ZV 0x01000000
+# define RADEON_CAP0_CONFIG_FORMAT_VIP 0x01800000
+# define RADEON_CAP0_CONFIG_FORMAT_TRANSPORT 0x02000000
+# define RADEON_CAP0_CONFIG_HORZ_DECIMATOR 0x04000000
+# define RADEON_CAP0_CONFIG_VIDEO_IN_YVYU422 0x00000000
+# define RADEON_CAP0_CONFIG_VIDEO_IN_VYUY422 0x20000000
+# define RADEON_CAP0_CONFIG_VBI_DIVIDE_2 0x40000000
+# define RADEON_CAP0_CONFIG_VBI_DIVIDE_4 0x80000000
+#define RADEON_CAP0_ANC_ODD_OFFSET 0x095C
+#define RADEON_CAP0_ANC_EVEN_OFFSET 0x0960
+#define RADEON_CAP0_ANC_H_WINDOW 0x0964
+#define RADEON_CAP0_VIDEO_SYNC_TEST 0x0968
+#define RADEON_CAP0_ONESHOT_BUF_OFFSET 0x096C
+#define RADEON_CAP0_BUF_STATUS 0x0970
+/* #define RADEON_CAP0_DWNSC_XRATIO 0x0978 */
+/* #define RADEON_CAP0_XSHARPNESS 0x097C */
+#define RADEON_CAP0_VBI2_OFFSET 0x0980
+#define RADEON_CAP0_VBI3_OFFSET 0x0984
+#define RADEON_CAP0_ANC2_OFFSET 0x0988
+#define RADEON_CAP0_ANC3_OFFSET 0x098C
+#define RADEON_VID_BUFFER_CONTROL 0x0900
+
+/* second capture unit */
+
+#define RADEON_CAP1_BUF0_OFFSET 0x0990
+#define RADEON_CAP1_BUF1_OFFSET 0x0994
+#define RADEON_CAP1_BUF0_EVEN_OFFSET 0x0998
+#define RADEON_CAP1_BUF1_EVEN_OFFSET 0x099C
+
+#define RADEON_CAP1_BUF_PITCH 0x09A0
+#define RADEON_CAP1_V_WINDOW 0x09A4
+#define RADEON_CAP1_H_WINDOW 0x09A8
+#define RADEON_CAP1_VBI_ODD_OFFSET 0x09AC
+#define RADEON_CAP1_VBI_EVEN_OFFSET 0x09B0
+#define RADEON_CAP1_VBI_V_WINDOW 0x09B4
+#define RADEON_CAP1_VBI_H_WINDOW 0x09B8
+#define RADEON_CAP1_PORT_MODE_CNTL 0x09BC
+#define RADEON_CAP1_TRIG_CNTL 0x09C0
+#define RADEON_CAP1_DEBUG 0x09C4
+#define RADEON_CAP1_CONFIG 0x09C8
+#define RADEON_CAP1_ANC_ODD_OFFSET 0x09CC
+#define RADEON_CAP1_ANC_EVEN_OFFSET 0x09D0
+#define RADEON_CAP1_ANC_H_WINDOW 0x09D4
+#define RADEON_CAP1_VIDEO_SYNC_TEST 0x09D8
+#define RADEON_CAP1_ONESHOT_BUF_OFFSET 0x09DC
+#define RADEON_CAP1_BUF_STATUS 0x09E0
+#define RADEON_CAP1_DWNSC_XRATIO 0x09E8
+#define RADEON_CAP1_XSHARPNESS 0x09EC
+
+/* misc multimedia registers */
+
+#define RADEON_IDCT_RUNS 0x1F80
+#define RADEON_IDCT_LEVELS 0x1F84
+#define RADEON_IDCT_CONTROL 0x1FBC
+#define RADEON_IDCT_AUTH_CONTROL 0x1F88
+#define RADEON_IDCT_AUTH 0x1F8C
+
+#define RADEON_P2PLL_CNTL 0x002a /* P2PLL */
+# define RADEON_P2PLL_RESET (1 << 0)
+# define RADEON_P2PLL_SLEEP (1 << 1)
+# define RADEON_P2PLL_PVG_MASK (7 << 11)
+# define RADEON_P2PLL_PVG_SHIFT 11
+# define RADEON_P2PLL_ATOMIC_UPDATE_EN (1 << 16)
+# define RADEON_P2PLL_VGA_ATOMIC_UPDATE_EN (1 << 17)
+# define RADEON_P2PLL_ATOMIC_UPDATE_VSYNC (1 << 18)
+#define RADEON_P2PLL_DIV_0 0x002c
+# define RADEON_P2PLL_FB0_DIV_MASK 0x07ff
+# define RADEON_P2PLL_POST0_DIV_MASK 0x00070000
+#define RADEON_P2PLL_REF_DIV 0x002B /* PLL */
+# define RADEON_P2PLL_REF_DIV_MASK 0x03ff
+# define RADEON_P2PLL_ATOMIC_UPDATE_R (1 << 15) /* same as _W */
+# define RADEON_P2PLL_ATOMIC_UPDATE_W (1 << 15) /* same as _R */
+# define R300_PPLL_REF_DIV_ACC_MASK (0x3ff << 18)
+# define R300_PPLL_REF_DIV_ACC_SHIFT 18
+#define RADEON_PALETTE_DATA 0x00b4
+#define RADEON_PALETTE_30_DATA 0x00b8
+#define RADEON_PALETTE_INDEX 0x00b0
+#define RADEON_PCI_GART_PAGE 0x017c
+#define RADEON_PIXCLKS_CNTL 0x002d
+# define RADEON_PIX2CLK_SRC_SEL_MASK 0x03
+# define RADEON_PIX2CLK_SRC_SEL_CPUCLK 0x00
+# define RADEON_PIX2CLK_SRC_SEL_PSCANCLK 0x01
+# define RADEON_PIX2CLK_SRC_SEL_BYTECLK 0x02
+# define RADEON_PIX2CLK_SRC_SEL_P2PLLCLK 0x03
+# define RADEON_PIX2CLK_ALWAYS_ONb (1<<6)
+# define RADEON_PIX2CLK_DAC_ALWAYS_ONb (1<<7)
+# define RADEON_PIXCLK_TV_SRC_SEL (1 << 8)
+# define RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb (1 << 9)
+# define R300_DVOCLK_ALWAYS_ONb (1 << 10)
+# define RADEON_PIXCLK_BLEND_ALWAYS_ONb (1 << 11)
+# define RADEON_PIXCLK_GV_ALWAYS_ONb (1 << 12)
+# define RADEON_PIXCLK_DIG_TMDS_ALWAYS_ONb (1 << 13)
+# define R300_PIXCLK_DVO_ALWAYS_ONb (1 << 13)
+# define RADEON_PIXCLK_LVDS_ALWAYS_ONb (1 << 14)
+# define RADEON_PIXCLK_TMDS_ALWAYS_ONb (1 << 15)
+# define R300_PIXCLK_TRANS_ALWAYS_ONb (1 << 16)
+# define R300_PIXCLK_TVO_ALWAYS_ONb (1 << 17)
+# define R300_P2G2CLK_ALWAYS_ONb (1 << 18)
+# define R300_P2G2CLK_DAC_ALWAYS_ONb (1 << 19)
+# define R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF (1 << 23)
+#define RADEON_PLANE_3D_MASK_C 0x1d44
+#define RADEON_PLL_TEST_CNTL 0x0013 /* PLL */
+# define RADEON_PLL_MASK_READ_B (1 << 9)
+#define RADEON_PMI_CAP_ID 0x0f5c /* PCI */
+#define RADEON_PMI_DATA 0x0f63 /* PCI */
+#define RADEON_PMI_NXT_CAP_PTR 0x0f5d /* PCI */
+#define RADEON_PMI_PMC_REG 0x0f5e /* PCI */
+#define RADEON_PMI_PMCSR_REG 0x0f60 /* PCI */
+#define RADEON_PMI_REGISTER 0x0f5c /* PCI */
+#define RADEON_PPLL_CNTL 0x0002 /* PLL */
+# define RADEON_PPLL_RESET (1 << 0)
+# define RADEON_PPLL_SLEEP (1 << 1)
+# define RADEON_PPLL_PVG_MASK (7 << 11)
+# define RADEON_PPLL_PVG_SHIFT 11
+# define RADEON_PPLL_ATOMIC_UPDATE_EN (1 << 16)
+# define RADEON_PPLL_VGA_ATOMIC_UPDATE_EN (1 << 17)
+# define RADEON_PPLL_ATOMIC_UPDATE_VSYNC (1 << 18)
+#define RADEON_PPLL_DIV_0 0x0004 /* PLL */
+#define RADEON_PPLL_DIV_1 0x0005 /* PLL */
+#define RADEON_PPLL_DIV_2 0x0006 /* PLL */
+#define RADEON_PPLL_DIV_3 0x0007 /* PLL */
+# define RADEON_PPLL_FB3_DIV_MASK 0x07ff
+# define RADEON_PPLL_POST3_DIV_MASK 0x00070000
+#define RADEON_PPLL_REF_DIV 0x0003 /* PLL */
+# define RADEON_PPLL_REF_DIV_MASK 0x03ff
+# define RADEON_PPLL_ATOMIC_UPDATE_R (1 << 15) /* same as _W */
+# define RADEON_PPLL_ATOMIC_UPDATE_W (1 << 15) /* same as _R */
+#define RADEON_PWR_MNGMT_CNTL_STATUS 0x0f60 /* PCI */
+
+#define RADEON_RBBM_GUICNTL 0x172c
+# define RADEON_HOST_DATA_SWAP_NONE (0 << 0)
+# define RADEON_HOST_DATA_SWAP_16BIT (1 << 0)
+# define RADEON_HOST_DATA_SWAP_32BIT (2 << 0)
+# define RADEON_HOST_DATA_SWAP_HDW (3 << 0)
+#define RADEON_RBBM_SOFT_RESET 0x00f0
+# define RADEON_SOFT_RESET_CP (1 << 0)
+# define RADEON_SOFT_RESET_HI (1 << 1)
+# define RADEON_SOFT_RESET_SE (1 << 2)
+# define RADEON_SOFT_RESET_RE (1 << 3)
+# define RADEON_SOFT_RESET_PP (1 << 4)
+# define RADEON_SOFT_RESET_E2 (1 << 5)
+# define RADEON_SOFT_RESET_RB (1 << 6)
+# define RADEON_SOFT_RESET_HDP (1 << 7)
+#define RADEON_RBBM_STATUS 0x0e40
+# define RADEON_RBBM_FIFOCNT_MASK 0x007f
+# define RADEON_RBBM_ACTIVE (1 << 31)
+#define RADEON_RB2D_DSTCACHE_CTLSTAT 0x342c
+# define RADEON_RB2D_DC_FLUSH (3 << 0)
+# define RADEON_RB2D_DC_FREE (3 << 2)
+# define RADEON_RB2D_DC_FLUSH_ALL 0xf
+# define RADEON_RB2D_DC_BUSY (1 << 31)
+#define RADEON_RB2D_DSTCACHE_MODE 0x3428
+#define RADEON_DSTCACHE_CTLSTAT 0x1714
+
+#define RADEON_RB3D_ZCACHE_MODE 0x3250
+#define RADEON_RB3D_ZCACHE_CTLSTAT 0x3254
+# define RADEON_RB3D_ZC_FLUSH_ALL 0x5
+#define RADEON_RB3D_DSTCACHE_MODE 0x3258
+# define RADEON_RB3D_DC_CACHE_ENABLE (0)
+# define RADEON_RB3D_DC_2D_CACHE_DISABLE (1)
+# define RADEON_RB3D_DC_3D_CACHE_DISABLE (2)
+# define RADEON_RB3D_DC_CACHE_DISABLE (3)
+# define RADEON_RB3D_DC_2D_CACHE_LINESIZE_128 (1 << 2)
+# define RADEON_RB3D_DC_3D_CACHE_LINESIZE_128 (2 << 2)
+# define RADEON_RB3D_DC_2D_CACHE_AUTOFLUSH (1 << 8)
+# define RADEON_RB3D_DC_3D_CACHE_AUTOFLUSH (2 << 8)
+# define R200_RB3D_DC_2D_CACHE_AUTOFREE (1 << 10)
+# define R200_RB3D_DC_3D_CACHE_AUTOFREE (2 << 10)
+# define RADEON_RB3D_DC_FORCE_RMW (1 << 16)
+# define RADEON_RB3D_DC_DISABLE_RI_FILL (1 << 24)
+# define RADEON_RB3D_DC_DISABLE_RI_READ (1 << 25)
+
+#define RADEON_RB3D_DSTCACHE_CTLSTAT 0x325C
+# define RADEON_RB3D_DC_FLUSH (3 << 0)
+# define RADEON_RB3D_DC_FREE (3 << 2)
+# define RADEON_RB3D_DC_FLUSH_ALL 0xf
+# define RADEON_RB3D_DC_BUSY (1 << 31)
+
+#define RADEON_REG_BASE 0x0f18 /* PCI */
+#define RADEON_REGPROG_INF 0x0f09 /* PCI */
+#define RADEON_REVISION_ID 0x0f08 /* PCI */
+
+#define RADEON_SC_BOTTOM 0x164c
+#define RADEON_SC_BOTTOM_RIGHT 0x16f0
+#define RADEON_SC_BOTTOM_RIGHT_C 0x1c8c
+#define RADEON_SC_LEFT 0x1640
+#define RADEON_SC_RIGHT 0x1644
+#define RADEON_SC_TOP 0x1648
+#define RADEON_SC_TOP_LEFT 0x16ec
+#define RADEON_SC_TOP_LEFT_C 0x1c88
+# define RADEON_SC_SIGN_MASK_LO 0x8000
+# define RADEON_SC_SIGN_MASK_HI 0x80000000
+#define RADEON_M_SPLL_REF_FB_DIV 0x000a /* PLL */
+# define RADEON_M_SPLL_REF_DIV_SHIFT 0
+# define RADEON_M_SPLL_REF_DIV_MASK 0xff
+# define RADEON_MPLL_FB_DIV_SHIFT 8
+# define RADEON_MPLL_FB_DIV_MASK 0xff
+# define RADEON_SPLL_FB_DIV_SHIFT 16
+# define RADEON_SPLL_FB_DIV_MASK 0xff
+#define RADEON_SPLL_CNTL 0x000c /* PLL */
+# define RADEON_SPLL_SLEEP (1 << 0)
+# define RADEON_SPLL_RESET (1 << 1)
+# define RADEON_SPLL_PCP_MASK 0x7
+# define RADEON_SPLL_PCP_SHIFT 8
+# define RADEON_SPLL_PVG_MASK 0x7
+# define RADEON_SPLL_PVG_SHIFT 11
+# define RADEON_SPLL_PDC_MASK 0x3
+# define RADEON_SPLL_PDC_SHIFT 14
+#define RADEON_SCLK_CNTL 0x000d /* PLL */
+# define RADEON_SCLK_SRC_SEL_MASK 0x0007
+# define RADEON_DYN_STOP_LAT_MASK 0x00007ff8
+# define RADEON_CP_MAX_DYN_STOP_LAT 0x0008
+# define RADEON_SCLK_FORCEON_MASK 0xffff8000
+# define RADEON_SCLK_FORCE_DISP2 (1<<15)
+# define RADEON_SCLK_FORCE_CP (1<<16)
+# define RADEON_SCLK_FORCE_HDP (1<<17)
+# define RADEON_SCLK_FORCE_DISP1 (1<<18)
+# define RADEON_SCLK_FORCE_TOP (1<<19)
+# define RADEON_SCLK_FORCE_E2 (1<<20)
+# define RADEON_SCLK_FORCE_SE (1<<21)
+# define RADEON_SCLK_FORCE_IDCT (1<<22)
+# define RADEON_SCLK_FORCE_VIP (1<<23)
+# define RADEON_SCLK_FORCE_RE (1<<24)
+# define RADEON_SCLK_FORCE_PB (1<<25)
+# define RADEON_SCLK_FORCE_TAM (1<<26)
+# define RADEON_SCLK_FORCE_TDM (1<<27)
+# define RADEON_SCLK_FORCE_RB (1<<28)
+# define RADEON_SCLK_FORCE_TV_SCLK (1<<29)
+# define RADEON_SCLK_FORCE_SUBPIC (1<<30)
+# define RADEON_SCLK_FORCE_OV0 (1<<31)
+# define R300_SCLK_FORCE_VAP (1<<21)
+# define R300_SCLK_FORCE_SR (1<<25)
+# define R300_SCLK_FORCE_PX (1<<26)
+# define R300_SCLK_FORCE_TX (1<<27)
+# define R300_SCLK_FORCE_US (1<<28)
+# define R300_SCLK_FORCE_SU (1<<30)
+#define R300_SCLK_CNTL2 0x1e /* PLL */
+# define R300_SCLK_TCL_MAX_DYN_STOP_LAT (1<<10)
+# define R300_SCLK_GA_MAX_DYN_STOP_LAT (1<<11)
+# define R300_SCLK_CBA_MAX_DYN_STOP_LAT (1<<12)
+# define R300_SCLK_FORCE_TCL (1<<13)
+# define R300_SCLK_FORCE_CBA (1<<14)
+# define R300_SCLK_FORCE_GA (1<<15)
+#define RADEON_SCLK_MORE_CNTL 0x0035 /* PLL */
+# define RADEON_SCLK_MORE_MAX_DYN_STOP_LAT 0x0007
+# define RADEON_SCLK_MORE_FORCEON 0x0700
+#define RADEON_SDRAM_MODE_REG 0x0158
+#define RADEON_SEQ8_DATA 0x03c5 /* VGA */
+#define RADEON_SEQ8_IDX 0x03c4 /* VGA */
+#define RADEON_SNAPSHOT_F_COUNT 0x0244
+#define RADEON_SNAPSHOT_VH_COUNTS 0x0240
+#define RADEON_SNAPSHOT_VIF_COUNT 0x024c
+#define RADEON_SRC_OFFSET 0x15ac
+#define RADEON_SRC_PITCH 0x15b0
+#define RADEON_SRC_PITCH_OFFSET 0x1428
+#define RADEON_SRC_SC_BOTTOM 0x165c
+#define RADEON_SRC_SC_BOTTOM_RIGHT 0x16f4
+#define RADEON_SRC_SC_RIGHT 0x1654
+#define RADEON_SRC_X 0x1414
+#define RADEON_SRC_X_Y 0x1590
+#define RADEON_SRC_Y 0x1418
+#define RADEON_SRC_Y_X 0x1434
+#define RADEON_STATUS 0x0f06 /* PCI */
+#define RADEON_SUBPIC_CNTL 0x0540 /* ? */
+#define RADEON_SUB_CLASS 0x0f0a /* PCI */
+#define RADEON_SURFACE_CNTL 0x0b00
+# define RADEON_SURF_TRANSLATION_DIS (1 << 8)
+# define RADEON_NONSURF_AP0_SWP_16BPP (1 << 20)
+# define RADEON_NONSURF_AP0_SWP_32BPP (1 << 21)
+# define RADEON_NONSURF_AP1_SWP_16BPP (1 << 22)
+# define RADEON_NONSURF_AP1_SWP_32BPP (1 << 23)
+#define RADEON_SURFACE0_INFO 0x0b0c
+# define RADEON_SURF_TILE_COLOR_MACRO (0 << 16)
+# define RADEON_SURF_TILE_COLOR_BOTH (1 << 16)
+# define RADEON_SURF_TILE_DEPTH_32BPP (2 << 16)
+# define RADEON_SURF_TILE_DEPTH_16BPP (3 << 16)
+# define R200_SURF_TILE_NONE (0 << 16)
+# define R200_SURF_TILE_COLOR_MACRO (1 << 16)
+# define R200_SURF_TILE_COLOR_MICRO (2 << 16)
+# define R200_SURF_TILE_COLOR_BOTH (3 << 16)
+# define R200_SURF_TILE_DEPTH_32BPP (4 << 16)
+# define R200_SURF_TILE_DEPTH_16BPP (5 << 16)
+# define R300_SURF_TILE_NONE (0 << 16)
+# define R300_SURF_TILE_COLOR_MACRO (1 << 16)
+# define R300_SURF_TILE_DEPTH_32BPP (2 << 16)
+# define RADEON_SURF_AP0_SWP_16BPP (1 << 20)
+# define RADEON_SURF_AP0_SWP_32BPP (1 << 21)
+# define RADEON_SURF_AP1_SWP_16BPP (1 << 22)
+# define RADEON_SURF_AP1_SWP_32BPP (1 << 23)
+#define RADEON_SURFACE0_LOWER_BOUND 0x0b04
+#define RADEON_SURFACE0_UPPER_BOUND 0x0b08
+#define RADEON_SURFACE1_INFO 0x0b1c
+#define RADEON_SURFACE1_LOWER_BOUND 0x0b14
+#define RADEON_SURFACE1_UPPER_BOUND 0x0b18
+#define RADEON_SURFACE2_INFO 0x0b2c
+#define RADEON_SURFACE2_LOWER_BOUND 0x0b24
+#define RADEON_SURFACE2_UPPER_BOUND 0x0b28
+#define RADEON_SURFACE3_INFO 0x0b3c
+#define RADEON_SURFACE3_LOWER_BOUND 0x0b34
+#define RADEON_SURFACE3_UPPER_BOUND 0x0b38
+#define RADEON_SURFACE4_INFO 0x0b4c
+#define RADEON_SURFACE4_LOWER_BOUND 0x0b44
+#define RADEON_SURFACE4_UPPER_BOUND 0x0b48
+#define RADEON_SURFACE5_INFO 0x0b5c
+#define RADEON_SURFACE5_LOWER_BOUND 0x0b54
+#define RADEON_SURFACE5_UPPER_BOUND 0x0b58
+#define RADEON_SURFACE6_INFO 0x0b6c
+#define RADEON_SURFACE6_LOWER_BOUND 0x0b64
+#define RADEON_SURFACE6_UPPER_BOUND 0x0b68
+#define RADEON_SURFACE7_INFO 0x0b7c
+#define RADEON_SURFACE7_LOWER_BOUND 0x0b74
+#define RADEON_SURFACE7_UPPER_BOUND 0x0b78
+#define RADEON_SW_SEMAPHORE 0x013c
+
+#define RADEON_TEST_DEBUG_CNTL 0x0120
+#define RADEON_TEST_DEBUG_CNTL__TEST_DEBUG_OUT_EN 0x00000001
+
+#define RADEON_TEST_DEBUG_MUX 0x0124
+#define RADEON_TEST_DEBUG_OUT 0x012c
+#define RADEON_TMDS_PLL_CNTL 0x02a8
+#define RADEON_TMDS_TRANSMITTER_CNTL 0x02a4
+# define RADEON_TMDS_TRANSMITTER_PLLEN 1
+# define RADEON_TMDS_TRANSMITTER_PLLRST 2
+#define RADEON_TRAIL_BRES_DEC 0x1614
+#define RADEON_TRAIL_BRES_ERR 0x160c
+#define RADEON_TRAIL_BRES_INC 0x1610
+#define RADEON_TRAIL_X 0x1618
+#define RADEON_TRAIL_X_SUB 0x1620
+
+#define RADEON_VCLK_ECP_CNTL 0x0008 /* PLL */
+# define RADEON_VCLK_SRC_SEL_MASK 0x03
+# define RADEON_VCLK_SRC_SEL_CPUCLK 0x00
+# define RADEON_VCLK_SRC_SEL_PSCANCLK 0x01
+# define RADEON_VCLK_SRC_SEL_BYTECLK 0x02
+# define RADEON_VCLK_SRC_SEL_PPLLCLK 0x03
+# define RADEON_PIXCLK_ALWAYS_ONb (1<<6)
+# define RADEON_PIXCLK_DAC_ALWAYS_ONb (1<<7)
+# define R300_DISP_DAC_PIXCLK_DAC_BLANK_OFF (1<<23)
+
+#define RADEON_VENDOR_ID 0x0f00 /* PCI */
+#define RADEON_VGA_DDA_CONFIG 0x02e8
+#define RADEON_VGA_DDA_ON_OFF 0x02ec
+#define RADEON_VID_BUFFER_CONTROL 0x0900
+#define RADEON_VIDEOMUX_CNTL 0x0190
+
+/* VIP bus */
+#define RADEON_VIPH_CH0_DATA 0x0c00
+#define RADEON_VIPH_CH1_DATA 0x0c04
+#define RADEON_VIPH_CH2_DATA 0x0c08
+#define RADEON_VIPH_CH3_DATA 0x0c0c
+#define RADEON_VIPH_CH0_ADDR 0x0c10
+#define RADEON_VIPH_CH1_ADDR 0x0c14
+#define RADEON_VIPH_CH2_ADDR 0x0c18
+#define RADEON_VIPH_CH3_ADDR 0x0c1c
+#define RADEON_VIPH_CH0_SBCNT 0x0c20
+#define RADEON_VIPH_CH1_SBCNT 0x0c24
+#define RADEON_VIPH_CH2_SBCNT 0x0c28
+#define RADEON_VIPH_CH3_SBCNT 0x0c2c
+#define RADEON_VIPH_CH0_ABCNT 0x0c30
+#define RADEON_VIPH_CH1_ABCNT 0x0c34
+#define RADEON_VIPH_CH2_ABCNT 0x0c38
+#define RADEON_VIPH_CH3_ABCNT 0x0c3c
+#define RADEON_VIPH_CONTROL 0x0c40
+# define RADEON_VIP_BUSY 0
+# define RADEON_VIP_IDLE 1
+# define RADEON_VIP_RESET 2
+# define RADEON_VIPH_EN (1 << 21)
+#define RADEON_VIPH_DV_LAT 0x0c44
+#define RADEON_VIPH_BM_CHUNK 0x0c48
+#define RADEON_VIPH_DV_INT 0x0c4c
+#define RADEON_VIPH_TIMEOUT_STAT 0x0c50
+#define RADEON_VIPH_TIMEOUT_STAT__VIPH_REG_STAT 0x00000010
+#define RADEON_VIPH_TIMEOUT_STAT__VIPH_REG_AK 0x00000010
+#define RADEON_VIPH_TIMEOUT_STAT__VIPH_REGR_DIS 0x01000000
+
+#define RADEON_VIPH_REG_DATA 0x0084
+#define RADEON_VIPH_REG_ADDR 0x0080
+
+
+#define RADEON_WAIT_UNTIL 0x1720
+# define RADEON_WAIT_CRTC_PFLIP (1 << 0)
+# define RADEON_WAIT_RE_CRTC_VLINE (1 << 1)
+# define RADEON_WAIT_FE_CRTC_VLINE (1 << 2)
+# define RADEON_WAIT_CRTC_VLINE (1 << 3)
+# define RADEON_WAIT_DMA_VID_IDLE (1 << 8)
+# define RADEON_WAIT_DMA_GUI_IDLE (1 << 9)
+# define RADEON_WAIT_CMDFIFO (1 << 10) /* wait for CMDFIFO_ENTRIES */
+# define RADEON_WAIT_OV0_FLIP (1 << 11)
+# define RADEON_WAIT_AGP_FLUSH (1 << 13)
+# define RADEON_WAIT_2D_IDLE (1 << 14)
+# define RADEON_WAIT_3D_IDLE (1 << 15)
+# define RADEON_WAIT_2D_IDLECLEAN (1 << 16)
+# define RADEON_WAIT_3D_IDLECLEAN (1 << 17)
+# define RADEON_WAIT_HOST_IDLECLEAN (1 << 18)
+# define RADEON_CMDFIFO_ENTRIES_SHIFT 10
+# define RADEON_CMDFIFO_ENTRIES_MASK 0x7f
+# define RADEON_WAIT_VAP_IDLE (1 << 28)
+# define RADEON_WAIT_BOTH_CRTC_PFLIP (1 << 30)
+# define RADEON_ENG_DISPLAY_SELECT_CRTC0 (0 << 31)
+# define RADEON_ENG_DISPLAY_SELECT_CRTC1 (1 << 31)
+
+#define RADEON_X_MPLL_REF_FB_DIV 0x000a /* PLL */
+#define RADEON_XCLK_CNTL 0x000d /* PLL */
+#define RADEON_XDLL_CNTL 0x000c /* PLL */
+#define RADEON_XPLL_CNTL 0x000b /* PLL */
+
+
+
+ /* Registers for 3D/TCL */
+#define RADEON_PP_BORDER_COLOR_0 0x1d40
+#define RADEON_PP_BORDER_COLOR_1 0x1d44
+#define RADEON_PP_BORDER_COLOR_2 0x1d48
+#define RADEON_PP_CNTL 0x1c38
+# define RADEON_STIPPLE_ENABLE (1 << 0)
+# define RADEON_SCISSOR_ENABLE (1 << 1)
+# define RADEON_PATTERN_ENABLE (1 << 2)
+# define RADEON_SHADOW_ENABLE (1 << 3)
+# define RADEON_TEX_ENABLE_MASK (0xf << 4)
+# define RADEON_TEX_0_ENABLE (1 << 4)
+# define RADEON_TEX_1_ENABLE (1 << 5)
+# define RADEON_TEX_2_ENABLE (1 << 6)
+# define RADEON_TEX_3_ENABLE (1 << 7)
+# define RADEON_TEX_BLEND_ENABLE_MASK (0xf << 12)
+# define RADEON_TEX_BLEND_0_ENABLE (1 << 12)
+# define RADEON_TEX_BLEND_1_ENABLE (1 << 13)
+# define RADEON_TEX_BLEND_2_ENABLE (1 << 14)
+# define RADEON_TEX_BLEND_3_ENABLE (1 << 15)
+# define RADEON_PLANAR_YUV_ENABLE (1 << 20)
+# define RADEON_SPECULAR_ENABLE (1 << 21)
+# define RADEON_FOG_ENABLE (1 << 22)
+# define RADEON_ALPHA_TEST_ENABLE (1 << 23)
+# define RADEON_ANTI_ALIAS_NONE (0 << 24)
+# define RADEON_ANTI_ALIAS_LINE (1 << 24)
+# define RADEON_ANTI_ALIAS_POLY (2 << 24)
+# define RADEON_ANTI_ALIAS_LINE_POLY (3 << 24)
+# define RADEON_BUMP_MAP_ENABLE (1 << 26)
+# define RADEON_BUMPED_MAP_T0 (0 << 27)
+# define RADEON_BUMPED_MAP_T1 (1 << 27)
+# define RADEON_BUMPED_MAP_T2 (2 << 27)
+# define RADEON_TEX_3D_ENABLE_0 (1 << 29)
+# define RADEON_TEX_3D_ENABLE_1 (1 << 30)
+# define RADEON_MC_ENABLE (1 << 31)
+#define RADEON_PP_FOG_COLOR 0x1c18
+# define RADEON_FOG_COLOR_MASK 0x00ffffff
+# define RADEON_FOG_VERTEX (0 << 24)
+# define RADEON_FOG_TABLE (1 << 24)
+# define RADEON_FOG_USE_DEPTH (0 << 25)
+# define RADEON_FOG_USE_DIFFUSE_ALPHA (2 << 25)
+# define RADEON_FOG_USE_SPEC_ALPHA (3 << 25)
+#define RADEON_PP_LUM_MATRIX 0x1d00
+#define RADEON_PP_MISC 0x1c14
+# define RADEON_REF_ALPHA_MASK 0x000000ff
+# define RADEON_ALPHA_TEST_FAIL (0 << 8)
+# define RADEON_ALPHA_TEST_LESS (1 << 8)
+# define RADEON_ALPHA_TEST_LEQUAL (2 << 8)
+# define RADEON_ALPHA_TEST_EQUAL (3 << 8)
+# define RADEON_ALPHA_TEST_GEQUAL (4 << 8)
+# define RADEON_ALPHA_TEST_GREATER (5 << 8)
+# define RADEON_ALPHA_TEST_NEQUAL (6 << 8)
+# define RADEON_ALPHA_TEST_PASS (7 << 8)
+# define RADEON_ALPHA_TEST_OP_MASK (7 << 8)
+# define RADEON_CHROMA_FUNC_FAIL (0 << 16)
+# define RADEON_CHROMA_FUNC_PASS (1 << 16)
+# define RADEON_CHROMA_FUNC_NEQUAL (2 << 16)
+# define RADEON_CHROMA_FUNC_EQUAL (3 << 16)
+# define RADEON_CHROMA_KEY_NEAREST (0 << 18)
+# define RADEON_CHROMA_KEY_ZERO (1 << 18)
+# define RADEON_SHADOW_ID_AUTO_INC (1 << 20)
+# define RADEON_SHADOW_FUNC_EQUAL (0 << 21)
+# define RADEON_SHADOW_FUNC_NEQUAL (1 << 21)
+# define RADEON_SHADOW_PASS_1 (0 << 22)
+# define RADEON_SHADOW_PASS_2 (1 << 22)
+# define RADEON_RIGHT_HAND_CUBE_D3D (0 << 24)
+# define RADEON_RIGHT_HAND_CUBE_OGL (1 << 24)
+#define RADEON_PP_ROT_MATRIX_0 0x1d58
+#define RADEON_PP_ROT_MATRIX_1 0x1d5c
+#define RADEON_PP_TXFILTER_0 0x1c54
+#define RADEON_PP_TXFILTER_1 0x1c6c
+#define RADEON_PP_TXFILTER_2 0x1c84
+# define RADEON_MAG_FILTER_NEAREST (0 << 0)
+# define RADEON_MAG_FILTER_LINEAR (1 << 0)
+# define RADEON_MAG_FILTER_MASK (1 << 0)
+# define RADEON_MIN_FILTER_NEAREST (0 << 1)
+# define RADEON_MIN_FILTER_LINEAR (1 << 1)
+# define RADEON_MIN_FILTER_NEAREST_MIP_NEAREST (2 << 1)
+# define RADEON_MIN_FILTER_NEAREST_MIP_LINEAR (3 << 1)
+# define RADEON_MIN_FILTER_LINEAR_MIP_NEAREST (6 << 1)
+# define RADEON_MIN_FILTER_LINEAR_MIP_LINEAR (7 << 1)
+# define RADEON_MIN_FILTER_ANISO_NEAREST (8 << 1)
+# define RADEON_MIN_FILTER_ANISO_LINEAR (9 << 1)
+# define RADEON_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (10 << 1)
+# define RADEON_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR (11 << 1)
+# define RADEON_MIN_FILTER_MASK (15 << 1)
+# define RADEON_MAX_ANISO_1_TO_1 (0 << 5)
+# define RADEON_MAX_ANISO_2_TO_1 (1 << 5)
+# define RADEON_MAX_ANISO_4_TO_1 (2 << 5)
+# define RADEON_MAX_ANISO_8_TO_1 (3 << 5)
+# define RADEON_MAX_ANISO_16_TO_1 (4 << 5)
+# define RADEON_MAX_ANISO_MASK (7 << 5)
+# define RADEON_LOD_BIAS_MASK (0xff << 8)
+# define RADEON_LOD_BIAS_SHIFT 8
+# define RADEON_MAX_MIP_LEVEL_MASK (0x0f << 16)
+# define RADEON_MAX_MIP_LEVEL_SHIFT 16
+# define RADEON_YUV_TO_RGB (1 << 20)
+# define RADEON_YUV_TEMPERATURE_COOL (0 << 21)
+# define RADEON_YUV_TEMPERATURE_HOT (1 << 21)
+# define RADEON_YUV_TEMPERATURE_MASK (1 << 21)
+# define RADEON_WRAPEN_S (1 << 22)
+# define RADEON_CLAMP_S_WRAP (0 << 23)
+# define RADEON_CLAMP_S_MIRROR (1 << 23)
+# define RADEON_CLAMP_S_CLAMP_LAST (2 << 23)
+# define RADEON_CLAMP_S_MIRROR_CLAMP_LAST (3 << 23)
+# define RADEON_CLAMP_S_CLAMP_BORDER (4 << 23)
+# define RADEON_CLAMP_S_MIRROR_CLAMP_BORDER (5 << 23)
+# define RADEON_CLAMP_S_CLAMP_GL (6 << 23)
+# define RADEON_CLAMP_S_MIRROR_CLAMP_GL (7 << 23)
+# define RADEON_CLAMP_S_MASK (7 << 23)
+# define RADEON_WRAPEN_T (1 << 26)
+# define RADEON_CLAMP_T_WRAP (0 << 27)
+# define RADEON_CLAMP_T_MIRROR (1 << 27)
+# define RADEON_CLAMP_T_CLAMP_LAST (2 << 27)
+# define RADEON_CLAMP_T_MIRROR_CLAMP_LAST (3 << 27)
+# define RADEON_CLAMP_T_CLAMP_BORDER (4 << 27)
+# define RADEON_CLAMP_T_MIRROR_CLAMP_BORDER (5 << 27)
+# define RADEON_CLAMP_T_CLAMP_GL (6 << 27)
+# define RADEON_CLAMP_T_MIRROR_CLAMP_GL (7 << 27)
+# define RADEON_CLAMP_T_MASK (7 << 27)
+# define RADEON_BORDER_MODE_OGL (0 << 31)
+# define RADEON_BORDER_MODE_D3D (1 << 31)
+#define RADEON_PP_TXFORMAT_0 0x1c58
+#define RADEON_PP_TXFORMAT_1 0x1c70
+#define RADEON_PP_TXFORMAT_2 0x1c88
+# define RADEON_TXFORMAT_I8 (0 << 0)
+# define RADEON_TXFORMAT_AI88 (1 << 0)
+# define RADEON_TXFORMAT_RGB332 (2 << 0)
+# define RADEON_TXFORMAT_ARGB1555 (3 << 0)
+# define RADEON_TXFORMAT_RGB565 (4 << 0)
+# define RADEON_TXFORMAT_ARGB4444 (5 << 0)
+# define RADEON_TXFORMAT_ARGB8888 (6 << 0)
+# define RADEON_TXFORMAT_RGBA8888 (7 << 0)
+# define RADEON_TXFORMAT_Y8 (8 << 0)
+# define RADEON_TXFORMAT_VYUY422 (10 << 0)
+# define RADEON_TXFORMAT_YVYU422 (11 << 0)
+# define RADEON_TXFORMAT_DXT1 (12 << 0)
+# define RADEON_TXFORMAT_DXT23 (14 << 0)
+# define RADEON_TXFORMAT_DXT45 (15 << 0)
+# define RADEON_TXFORMAT_FORMAT_MASK (31 << 0)
+# define RADEON_TXFORMAT_FORMAT_SHIFT 0
+# define RADEON_TXFORMAT_APPLE_YUV_MODE (1 << 5)
+# define RADEON_TXFORMAT_ALPHA_IN_MAP (1 << 6)
+# define RADEON_TXFORMAT_NON_POWER2 (1 << 7)
+# define RADEON_TXFORMAT_WIDTH_MASK (15 << 8)
+# define RADEON_TXFORMAT_WIDTH_SHIFT 8
+# define RADEON_TXFORMAT_HEIGHT_MASK (15 << 12)
+# define RADEON_TXFORMAT_HEIGHT_SHIFT 12
+# define RADEON_TXFORMAT_F5_WIDTH_MASK (15 << 16)
+# define RADEON_TXFORMAT_F5_WIDTH_SHIFT 16
+# define RADEON_TXFORMAT_F5_HEIGHT_MASK (15 << 20)
+# define RADEON_TXFORMAT_F5_HEIGHT_SHIFT 20
+# define RADEON_TXFORMAT_ST_ROUTE_STQ0 (0 << 24)
+# define RADEON_TXFORMAT_ST_ROUTE_MASK (3 << 24)
+# define RADEON_TXFORMAT_ST_ROUTE_STQ1 (1 << 24)
+# define RADEON_TXFORMAT_ST_ROUTE_STQ2 (2 << 24)
+# define RADEON_TXFORMAT_ENDIAN_NO_SWAP (0 << 26)
+# define RADEON_TXFORMAT_ENDIAN_16BPP_SWAP (1 << 26)
+# define RADEON_TXFORMAT_ENDIAN_32BPP_SWAP (2 << 26)
+# define RADEON_TXFORMAT_ENDIAN_HALFDW_SWAP (3 << 26)
+# define RADEON_TXFORMAT_ALPHA_MASK_ENABLE (1 << 28)
+# define RADEON_TXFORMAT_CHROMA_KEY_ENABLE (1 << 29)
+# define RADEON_TXFORMAT_CUBIC_MAP_ENABLE (1 << 30)
+# define RADEON_TXFORMAT_PERSPECTIVE_ENABLE (1 << 31)
+#define RADEON_PP_CUBIC_FACES_0 0x1d24
+#define RADEON_PP_CUBIC_FACES_1 0x1d28
+#define RADEON_PP_CUBIC_FACES_2 0x1d2c
+# define RADEON_FACE_WIDTH_1_SHIFT 0
+# define RADEON_FACE_HEIGHT_1_SHIFT 4
+# define RADEON_FACE_WIDTH_1_MASK (0xf << 0)
+# define RADEON_FACE_HEIGHT_1_MASK (0xf << 4)
+# define RADEON_FACE_WIDTH_2_SHIFT 8
+# define RADEON_FACE_HEIGHT_2_SHIFT 12
+# define RADEON_FACE_WIDTH_2_MASK (0xf << 8)
+# define RADEON_FACE_HEIGHT_2_MASK (0xf << 12)
+# define RADEON_FACE_WIDTH_3_SHIFT 16
+# define RADEON_FACE_HEIGHT_3_SHIFT 20
+# define RADEON_FACE_WIDTH_3_MASK (0xf << 16)
+# define RADEON_FACE_HEIGHT_3_MASK (0xf << 20)
+# define RADEON_FACE_WIDTH_4_SHIFT 24
+# define RADEON_FACE_HEIGHT_4_SHIFT 28
+# define RADEON_FACE_WIDTH_4_MASK (0xf << 24)
+# define RADEON_FACE_HEIGHT_4_MASK (0xf << 28)
+
+#define RADEON_PP_TXOFFSET_0 0x1c5c
+#define RADEON_PP_TXOFFSET_1 0x1c74
+#define RADEON_PP_TXOFFSET_2 0x1c8c
+# define RADEON_TXO_ENDIAN_NO_SWAP (0 << 0)
+# define RADEON_TXO_ENDIAN_BYTE_SWAP (1 << 0)
+# define RADEON_TXO_ENDIAN_WORD_SWAP (2 << 0)
+# define RADEON_TXO_ENDIAN_HALFDW_SWAP (3 << 0)
+# define RADEON_TXO_MACRO_LINEAR (0 << 2)
+# define RADEON_TXO_MACRO_TILE (1 << 2)
+# define RADEON_TXO_MICRO_LINEAR (0 << 3)
+# define RADEON_TXO_MICRO_TILE_X2 (1 << 3)
+# define RADEON_TXO_MICRO_TILE_OPT (2 << 3)
+# define RADEON_TXO_OFFSET_MASK 0xffffffe0
+# define RADEON_TXO_OFFSET_SHIFT 5
+
+#define RADEON_PP_CUBIC_OFFSET_T0_0 0x1dd0 /* bits [31:5] */
+#define RADEON_PP_CUBIC_OFFSET_T0_1 0x1dd4
+#define RADEON_PP_CUBIC_OFFSET_T0_2 0x1dd8
+#define RADEON_PP_CUBIC_OFFSET_T0_3 0x1ddc
+#define RADEON_PP_CUBIC_OFFSET_T0_4 0x1de0
+#define RADEON_PP_CUBIC_OFFSET_T1_0 0x1e00
+#define RADEON_PP_CUBIC_OFFSET_T1_1 0x1e04
+#define RADEON_PP_CUBIC_OFFSET_T1_2 0x1e08
+#define RADEON_PP_CUBIC_OFFSET_T1_3 0x1e0c
+#define RADEON_PP_CUBIC_OFFSET_T1_4 0x1e10
+#define RADEON_PP_CUBIC_OFFSET_T2_0 0x1e14
+#define RADEON_PP_CUBIC_OFFSET_T2_1 0x1e18
+#define RADEON_PP_CUBIC_OFFSET_T2_2 0x1e1c
+#define RADEON_PP_CUBIC_OFFSET_T2_3 0x1e20
+#define RADEON_PP_CUBIC_OFFSET_T2_4 0x1e24
+
+#define RADEON_PP_TEX_SIZE_0 0x1d04 /* NPOT */
+#define RADEON_PP_TEX_SIZE_1 0x1d0c
+#define RADEON_PP_TEX_SIZE_2 0x1d14
+# define RADEON_TEX_USIZE_MASK (0x7ff << 0)
+# define RADEON_TEX_USIZE_SHIFT 0
+# define RADEON_TEX_VSIZE_MASK (0x7ff << 16)
+# define RADEON_TEX_VSIZE_SHIFT 16
+# define RADEON_SIGNED_RGB_MASK (1 << 30)
+# define RADEON_SIGNED_RGB_SHIFT 30
+# define RADEON_SIGNED_ALPHA_MASK (1 << 31)
+# define RADEON_SIGNED_ALPHA_SHIFT 31
+#define RADEON_PP_TEX_PITCH_0 0x1d08 /* NPOT */
+#define RADEON_PP_TEX_PITCH_1 0x1d10 /* NPOT */
+#define RADEON_PP_TEX_PITCH_2 0x1d18 /* NPOT */
+/* note: bits 13-5: 32 byte aligned stride of texture map */
+
+#define RADEON_PP_TXCBLEND_0 0x1c60
+#define RADEON_PP_TXCBLEND_1 0x1c78
+#define RADEON_PP_TXCBLEND_2 0x1c90
+# define RADEON_COLOR_ARG_A_SHIFT 0
+# define RADEON_COLOR_ARG_A_MASK (0x1f << 0)
+# define RADEON_COLOR_ARG_A_ZERO (0 << 0)
+# define RADEON_COLOR_ARG_A_CURRENT_COLOR (2 << 0)
+# define RADEON_COLOR_ARG_A_CURRENT_ALPHA (3 << 0)
+# define RADEON_COLOR_ARG_A_DIFFUSE_COLOR (4 << 0)
+# define RADEON_COLOR_ARG_A_DIFFUSE_ALPHA (5 << 0)
+# define RADEON_COLOR_ARG_A_SPECULAR_COLOR (6 << 0)
+# define RADEON_COLOR_ARG_A_SPECULAR_ALPHA (7 << 0)
+# define RADEON_COLOR_ARG_A_TFACTOR_COLOR (8 << 0)
+# define RADEON_COLOR_ARG_A_TFACTOR_ALPHA (9 << 0)
+# define RADEON_COLOR_ARG_A_T0_COLOR (10 << 0)
+# define RADEON_COLOR_ARG_A_T0_ALPHA (11 << 0)
+# define RADEON_COLOR_ARG_A_T1_COLOR (12 << 0)
+# define RADEON_COLOR_ARG_A_T1_ALPHA (13 << 0)
+# define RADEON_COLOR_ARG_A_T2_COLOR (14 << 0)
+# define RADEON_COLOR_ARG_A_T2_ALPHA (15 << 0)
+# define RADEON_COLOR_ARG_A_T3_COLOR (16 << 0)
+# define RADEON_COLOR_ARG_A_T3_ALPHA (17 << 0)
+# define RADEON_COLOR_ARG_B_SHIFT 5
+# define RADEON_COLOR_ARG_B_MASK (0x1f << 5)
+# define RADEON_COLOR_ARG_B_ZERO (0 << 5)
+# define RADEON_COLOR_ARG_B_CURRENT_COLOR (2 << 5)
+# define RADEON_COLOR_ARG_B_CURRENT_ALPHA (3 << 5)
+# define RADEON_COLOR_ARG_B_DIFFUSE_COLOR (4 << 5)
+# define RADEON_COLOR_ARG_B_DIFFUSE_ALPHA (5 << 5)
+# define RADEON_COLOR_ARG_B_SPECULAR_COLOR (6 << 5)
+# define RADEON_COLOR_ARG_B_SPECULAR_ALPHA (7 << 5)
+# define RADEON_COLOR_ARG_B_TFACTOR_COLOR (8 << 5)
+# define RADEON_COLOR_ARG_B_TFACTOR_ALPHA (9 << 5)
+# define RADEON_COLOR_ARG_B_T0_COLOR (10 << 5)
+# define RADEON_COLOR_ARG_B_T0_ALPHA (11 << 5)
+# define RADEON_COLOR_ARG_B_T1_COLOR (12 << 5)
+# define RADEON_COLOR_ARG_B_T1_ALPHA (13 << 5)
+# define RADEON_COLOR_ARG_B_T2_COLOR (14 << 5)
+# define RADEON_COLOR_ARG_B_T2_ALPHA (15 << 5)
+# define RADEON_COLOR_ARG_B_T3_COLOR (16 << 5)
+# define RADEON_COLOR_ARG_B_T3_ALPHA (17 << 5)
+# define RADEON_COLOR_ARG_C_SHIFT 10
+# define RADEON_COLOR_ARG_C_MASK (0x1f << 10)
+# define RADEON_COLOR_ARG_C_ZERO (0 << 10)
+# define RADEON_COLOR_ARG_C_CURRENT_COLOR (2 << 10)
+# define RADEON_COLOR_ARG_C_CURRENT_ALPHA (3 << 10)
+# define RADEON_COLOR_ARG_C_DIFFUSE_COLOR (4 << 10)
+# define RADEON_COLOR_ARG_C_DIFFUSE_ALPHA (5 << 10)
+# define RADEON_COLOR_ARG_C_SPECULAR_COLOR (6 << 10)
+# define RADEON_COLOR_ARG_C_SPECULAR_ALPHA (7 << 10)
+# define RADEON_COLOR_ARG_C_TFACTOR_COLOR (8 << 10)
+# define RADEON_COLOR_ARG_C_TFACTOR_ALPHA (9 << 10)
+# define RADEON_COLOR_ARG_C_T0_COLOR (10 << 10)
+# define RADEON_COLOR_ARG_C_T0_ALPHA (11 << 10)
+# define RADEON_COLOR_ARG_C_T1_COLOR (12 << 10)
+# define RADEON_COLOR_ARG_C_T1_ALPHA (13 << 10)
+# define RADEON_COLOR_ARG_C_T2_COLOR (14 << 10)
+# define RADEON_COLOR_ARG_C_T2_ALPHA (15 << 10)
+# define RADEON_COLOR_ARG_C_T3_COLOR (16 << 10)
+# define RADEON_COLOR_ARG_C_T3_ALPHA (17 << 10)
+# define RADEON_COMP_ARG_A (1 << 15)
+# define RADEON_COMP_ARG_A_SHIFT 15
+# define RADEON_COMP_ARG_B (1 << 16)
+# define RADEON_COMP_ARG_B_SHIFT 16
+# define RADEON_COMP_ARG_C (1 << 17)
+# define RADEON_COMP_ARG_C_SHIFT 17
+# define RADEON_BLEND_CTL_MASK (7 << 18)
+# define RADEON_BLEND_CTL_ADD (0 << 18)
+# define RADEON_BLEND_CTL_SUBTRACT (1 << 18)
+# define RADEON_BLEND_CTL_ADDSIGNED (2 << 18)
+# define RADEON_BLEND_CTL_BLEND (3 << 18)
+# define RADEON_BLEND_CTL_DOT3 (4 << 18)
+# define RADEON_SCALE_SHIFT 21
+# define RADEON_SCALE_MASK (3 << 21)
+# define RADEON_SCALE_1X (0 << 21)
+# define RADEON_SCALE_2X (1 << 21)
+# define RADEON_SCALE_4X (2 << 21)
+# define RADEON_CLAMP_TX (1 << 23)
+# define RADEON_T0_EQ_TCUR (1 << 24)
+# define RADEON_T1_EQ_TCUR (1 << 25)
+# define RADEON_T2_EQ_TCUR (1 << 26)
+# define RADEON_T3_EQ_TCUR (1 << 27)
+# define RADEON_COLOR_ARG_MASK 0x1f
+# define RADEON_COMP_ARG_SHIFT 15
+#define RADEON_PP_TXABLEND_0 0x1c64
+#define RADEON_PP_TXABLEND_1 0x1c7c
+#define RADEON_PP_TXABLEND_2 0x1c94
+# define RADEON_ALPHA_ARG_A_SHIFT 0
+# define RADEON_ALPHA_ARG_A_MASK (0xf << 0)
+# define RADEON_ALPHA_ARG_A_ZERO (0 << 0)
+# define RADEON_ALPHA_ARG_A_CURRENT_ALPHA (1 << 0)
+# define RADEON_ALPHA_ARG_A_DIFFUSE_ALPHA (2 << 0)
+# define RADEON_ALPHA_ARG_A_SPECULAR_ALPHA (3 << 0)
+# define RADEON_ALPHA_ARG_A_TFACTOR_ALPHA (4 << 0)
+# define RADEON_ALPHA_ARG_A_T0_ALPHA (5 << 0)
+# define RADEON_ALPHA_ARG_A_T1_ALPHA (6 << 0)
+# define RADEON_ALPHA_ARG_A_T2_ALPHA (7 << 0)
+# define RADEON_ALPHA_ARG_A_T3_ALPHA (8 << 0)
+# define RADEON_ALPHA_ARG_B_SHIFT 4
+# define RADEON_ALPHA_ARG_B_MASK (0xf << 4)
+# define RADEON_ALPHA_ARG_B_ZERO (0 << 4)
+# define RADEON_ALPHA_ARG_B_CURRENT_ALPHA (1 << 4)
+# define RADEON_ALPHA_ARG_B_DIFFUSE_ALPHA (2 << 4)
+# define RADEON_ALPHA_ARG_B_SPECULAR_ALPHA (3 << 4)
+# define RADEON_ALPHA_ARG_B_TFACTOR_ALPHA (4 << 4)
+# define RADEON_ALPHA_ARG_B_T0_ALPHA (5 << 4)
+# define RADEON_ALPHA_ARG_B_T1_ALPHA (6 << 4)
+# define RADEON_ALPHA_ARG_B_T2_ALPHA (7 << 4)
+# define RADEON_ALPHA_ARG_B_T3_ALPHA (8 << 4)
+# define RADEON_ALPHA_ARG_C_SHIFT 8
+# define RADEON_ALPHA_ARG_C_MASK (0xf << 8)
+# define RADEON_ALPHA_ARG_C_ZERO (0 << 8)
+# define RADEON_ALPHA_ARG_C_CURRENT_ALPHA (1 << 8)
+# define RADEON_ALPHA_ARG_C_DIFFUSE_ALPHA (2 << 8)
+# define RADEON_ALPHA_ARG_C_SPECULAR_ALPHA (3 << 8)
+# define RADEON_ALPHA_ARG_C_TFACTOR_ALPHA (4 << 8)
+# define RADEON_ALPHA_ARG_C_T0_ALPHA (5 << 8)
+# define RADEON_ALPHA_ARG_C_T1_ALPHA (6 << 8)
+# define RADEON_ALPHA_ARG_C_T2_ALPHA (7 << 8)
+# define RADEON_ALPHA_ARG_C_T3_ALPHA (8 << 8)
+# define RADEON_DOT_ALPHA_DONT_REPLICATE (1 << 9)
+# define RADEON_ALPHA_ARG_MASK 0xf
+
+#define RADEON_PP_TFACTOR_0 0x1c68
+#define RADEON_PP_TFACTOR_1 0x1c80
+#define RADEON_PP_TFACTOR_2 0x1c98
+
+#define RADEON_RB3D_BLENDCNTL 0x1c20
+# define RADEON_COMB_FCN_MASK (3 << 12)
+# define RADEON_COMB_FCN_ADD_CLAMP (0 << 12)
+# define RADEON_COMB_FCN_ADD_NOCLAMP (1 << 12)
+# define RADEON_COMB_FCN_SUB_CLAMP (2 << 12)
+# define RADEON_COMB_FCN_SUB_NOCLAMP (3 << 12)
+# define RADEON_SRC_BLEND_GL_ZERO (32 << 16)
+# define RADEON_SRC_BLEND_GL_ONE (33 << 16)
+# define RADEON_SRC_BLEND_GL_SRC_COLOR (34 << 16)
+# define RADEON_SRC_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 16)
+# define RADEON_SRC_BLEND_GL_DST_COLOR (36 << 16)
+# define RADEON_SRC_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 16)
+# define RADEON_SRC_BLEND_GL_SRC_ALPHA (38 << 16)
+# define RADEON_SRC_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 16)
+# define RADEON_SRC_BLEND_GL_DST_ALPHA (40 << 16)
+# define RADEON_SRC_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 16)
+# define RADEON_SRC_BLEND_GL_SRC_ALPHA_SATURATE (42 << 16)
+# define RADEON_SRC_BLEND_MASK (63 << 16)
+# define RADEON_DST_BLEND_GL_ZERO (32 << 24)
+# define RADEON_DST_BLEND_GL_ONE (33 << 24)
+# define RADEON_DST_BLEND_GL_SRC_COLOR (34 << 24)
+# define RADEON_DST_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 24)
+# define RADEON_DST_BLEND_GL_DST_COLOR (36 << 24)
+# define RADEON_DST_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 24)
+# define RADEON_DST_BLEND_GL_SRC_ALPHA (38 << 24)
+# define RADEON_DST_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 24)
+# define RADEON_DST_BLEND_GL_DST_ALPHA (40 << 24)
+# define RADEON_DST_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 24)
+# define RADEON_DST_BLEND_MASK (63 << 24)
+#define RADEON_RB3D_CNTL 0x1c3c
+# define RADEON_ALPHA_BLEND_ENABLE (1 << 0)
+# define RADEON_PLANE_MASK_ENABLE (1 << 1)
+# define RADEON_DITHER_ENABLE (1 << 2)
+# define RADEON_ROUND_ENABLE (1 << 3)
+# define RADEON_SCALE_DITHER_ENABLE (1 << 4)
+# define RADEON_DITHER_INIT (1 << 5)
+# define RADEON_ROP_ENABLE (1 << 6)
+# define RADEON_STENCIL_ENABLE (1 << 7)
+# define RADEON_Z_ENABLE (1 << 8)
+# define RADEON_DEPTH_XZ_OFFEST_ENABLE (1 << 9)
+# define RADEON_RB3D_COLOR_FORMAT_SHIFT 10
+
+# define RADEON_COLOR_FORMAT_ARGB1555 3
+# define RADEON_COLOR_FORMAT_RGB565 4
+# define RADEON_COLOR_FORMAT_ARGB8888 6
+# define RADEON_COLOR_FORMAT_RGB332 7
+# define RADEON_COLOR_FORMAT_Y8 8
+# define RADEON_COLOR_FORMAT_RGB8 9
+# define RADEON_COLOR_FORMAT_YUV422_VYUY 11
+# define RADEON_COLOR_FORMAT_YUV422_YVYU 12
+# define RADEON_COLOR_FORMAT_aYUV444 14
+# define RADEON_COLOR_FORMAT_ARGB4444 15
+
+# define RADEON_CLRCMP_FLIP_ENABLE (1 << 14)
+#define RADEON_RB3D_COLOROFFSET 0x1c40
+# define RADEON_COLOROFFSET_MASK 0xfffffff0
+#define RADEON_RB3D_COLORPITCH 0x1c48
+# define RADEON_COLORPITCH_MASK 0x000001ff8
+# define RADEON_COLOR_TILE_ENABLE (1 << 16)
+# define RADEON_COLOR_MICROTILE_ENABLE (1 << 17)
+# define RADEON_COLOR_ENDIAN_NO_SWAP (0 << 18)
+# define RADEON_COLOR_ENDIAN_WORD_SWAP (1 << 18)
+# define RADEON_COLOR_ENDIAN_DWORD_SWAP (2 << 18)
+#define RADEON_RB3D_DEPTHOFFSET 0x1c24
+#define RADEON_RB3D_DEPTHPITCH 0x1c28
+# define RADEON_DEPTHPITCH_MASK 0x00001ff8
+# define RADEON_DEPTH_ENDIAN_NO_SWAP (0 << 18)
+# define RADEON_DEPTH_ENDIAN_WORD_SWAP (1 << 18)
+# define RADEON_DEPTH_ENDIAN_DWORD_SWAP (2 << 18)
+#define RADEON_RB3D_PLANEMASK 0x1d84
+#define RADEON_RB3D_ROPCNTL 0x1d80
+# define RADEON_ROP_MASK (15 << 8)
+# define RADEON_ROP_CLEAR (0 << 8)
+# define RADEON_ROP_NOR (1 << 8)
+# define RADEON_ROP_AND_INVERTED (2 << 8)
+# define RADEON_ROP_COPY_INVERTED (3 << 8)
+# define RADEON_ROP_AND_REVERSE (4 << 8)
+# define RADEON_ROP_INVERT (5 << 8)
+# define RADEON_ROP_XOR (6 << 8)
+# define RADEON_ROP_NAND (7 << 8)
+# define RADEON_ROP_AND (8 << 8)
+# define RADEON_ROP_EQUIV (9 << 8)
+# define RADEON_ROP_NOOP (10 << 8)
+# define RADEON_ROP_OR_INVERTED (11 << 8)
+# define RADEON_ROP_COPY (12 << 8)
+# define RADEON_ROP_OR_REVERSE (13 << 8)
+# define RADEON_ROP_OR (14 << 8)
+# define RADEON_ROP_SET (15 << 8)
+#define RADEON_RB3D_STENCILREFMASK 0x1d7c
+# define RADEON_STENCIL_REF_SHIFT 0
+# define RADEON_STENCIL_REF_MASK (0xff << 0)
+# define RADEON_STENCIL_MASK_SHIFT 16
+# define RADEON_STENCIL_VALUE_MASK (0xff << 16)
+# define RADEON_STENCIL_WRITEMASK_SHIFT 24
+# define RADEON_STENCIL_WRITE_MASK (0xff << 24)
+#define RADEON_RB3D_ZSTENCILCNTL 0x1c2c
+# define RADEON_DEPTH_FORMAT_MASK (0xf << 0)
+# define RADEON_DEPTH_FORMAT_16BIT_INT_Z (0 << 0)
+# define RADEON_DEPTH_FORMAT_24BIT_INT_Z (2 << 0)
+# define RADEON_DEPTH_FORMAT_24BIT_FLOAT_Z (3 << 0)
+# define RADEON_DEPTH_FORMAT_32BIT_INT_Z (4 << 0)
+# define RADEON_DEPTH_FORMAT_32BIT_FLOAT_Z (5 << 0)
+# define RADEON_DEPTH_FORMAT_16BIT_FLOAT_W (7 << 0)
+# define RADEON_DEPTH_FORMAT_24BIT_FLOAT_W (9 << 0)
+# define RADEON_DEPTH_FORMAT_32BIT_FLOAT_W (11 << 0)
+# define RADEON_Z_TEST_NEVER (0 << 4)
+# define RADEON_Z_TEST_LESS (1 << 4)
+# define RADEON_Z_TEST_LEQUAL (2 << 4)
+# define RADEON_Z_TEST_EQUAL (3 << 4)
+# define RADEON_Z_TEST_GEQUAL (4 << 4)
+# define RADEON_Z_TEST_GREATER (5 << 4)
+# define RADEON_Z_TEST_NEQUAL (6 << 4)
+# define RADEON_Z_TEST_ALWAYS (7 << 4)
+# define RADEON_Z_TEST_MASK (7 << 4)
+# define RADEON_STENCIL_TEST_NEVER (0 << 12)
+# define RADEON_STENCIL_TEST_LESS (1 << 12)
+# define RADEON_STENCIL_TEST_LEQUAL (2 << 12)
+# define RADEON_STENCIL_TEST_EQUAL (3 << 12)
+# define RADEON_STENCIL_TEST_GEQUAL (4 << 12)
+# define RADEON_STENCIL_TEST_GREATER (5 << 12)
+# define RADEON_STENCIL_TEST_NEQUAL (6 << 12)
+# define RADEON_STENCIL_TEST_ALWAYS (7 << 12)
+# define RADEON_STENCIL_TEST_MASK (0x7 << 12)
+# define RADEON_STENCIL_FAIL_KEEP (0 << 16)
+# define RADEON_STENCIL_FAIL_ZERO (1 << 16)
+# define RADEON_STENCIL_FAIL_REPLACE (2 << 16)
+# define RADEON_STENCIL_FAIL_INC (3 << 16)
+# define RADEON_STENCIL_FAIL_DEC (4 << 16)
+# define RADEON_STENCIL_FAIL_INVERT (5 << 16)
+# define RADEON_STENCIL_FAIL_MASK (0x7 << 16)
+# define RADEON_STENCIL_ZPASS_KEEP (0 << 20)
+# define RADEON_STENCIL_ZPASS_ZERO (1 << 20)
+# define RADEON_STENCIL_ZPASS_REPLACE (2 << 20)
+# define RADEON_STENCIL_ZPASS_INC (3 << 20)
+# define RADEON_STENCIL_ZPASS_DEC (4 << 20)
+# define RADEON_STENCIL_ZPASS_INVERT (5 << 20)
+# define RADEON_STENCIL_ZPASS_MASK (0x7 << 20)
+# define RADEON_STENCIL_ZFAIL_KEEP (0 << 24)
+# define RADEON_STENCIL_ZFAIL_ZERO (1 << 24)
+# define RADEON_STENCIL_ZFAIL_REPLACE (2 << 24)
+# define RADEON_STENCIL_ZFAIL_INC (3 << 24)
+# define RADEON_STENCIL_ZFAIL_DEC (4 << 24)
+# define RADEON_STENCIL_ZFAIL_INVERT (5 << 24)
+# define RADEON_STENCIL_ZFAIL_MASK (0x7 << 24)
+# define RADEON_Z_COMPRESSION_ENABLE (1 << 28)
+# define RADEON_FORCE_Z_DIRTY (1 << 29)
+# define RADEON_Z_WRITE_ENABLE (1 << 30)
+#define RADEON_RE_LINE_PATTERN 0x1cd0
+# define RADEON_LINE_PATTERN_MASK 0x0000ffff
+# define RADEON_LINE_REPEAT_COUNT_SHIFT 16
+# define RADEON_LINE_PATTERN_START_SHIFT 24
+# define RADEON_LINE_PATTERN_LITTLE_BIT_ORDER (0 << 28)
+# define RADEON_LINE_PATTERN_BIG_BIT_ORDER (1 << 28)
+# define RADEON_LINE_PATTERN_AUTO_RESET (1 << 29)
+#define RADEON_RE_LINE_STATE 0x1cd4
+# define RADEON_LINE_CURRENT_PTR_SHIFT 0
+# define RADEON_LINE_CURRENT_COUNT_SHIFT 8
+#define RADEON_RE_MISC 0x26c4
+# define RADEON_STIPPLE_COORD_MASK 0x1f
+# define RADEON_STIPPLE_X_OFFSET_SHIFT 0
+# define RADEON_STIPPLE_X_OFFSET_MASK (0x1f << 0)
+# define RADEON_STIPPLE_Y_OFFSET_SHIFT 8
+# define RADEON_STIPPLE_Y_OFFSET_MASK (0x1f << 8)
+# define RADEON_STIPPLE_LITTLE_BIT_ORDER (0 << 16)
+# define RADEON_STIPPLE_BIG_BIT_ORDER (1 << 16)
+#define RADEON_RE_SOLID_COLOR 0x1c1c
+#define RADEON_RE_TOP_LEFT 0x26c0
+# define RADEON_RE_LEFT_SHIFT 0
+# define RADEON_RE_TOP_SHIFT 16
+#define RADEON_RE_WIDTH_HEIGHT 0x1c44
+# define RADEON_RE_WIDTH_SHIFT 0
+# define RADEON_RE_HEIGHT_SHIFT 16
+
+#define RADEON_SE_CNTL 0x1c4c
+# define RADEON_FFACE_CULL_CW (0 << 0)
+# define RADEON_FFACE_CULL_CCW (1 << 0)
+# define RADEON_FFACE_CULL_DIR_MASK (1 << 0)
+# define RADEON_BFACE_CULL (0 << 1)
+# define RADEON_BFACE_SOLID (3 << 1)
+# define RADEON_FFACE_CULL (0 << 3)
+# define RADEON_FFACE_SOLID (3 << 3)
+# define RADEON_FFACE_CULL_MASK (3 << 3)
+# define RADEON_BADVTX_CULL_DISABLE (1 << 5)
+# define RADEON_FLAT_SHADE_VTX_0 (0 << 6)
+# define RADEON_FLAT_SHADE_VTX_1 (1 << 6)
+# define RADEON_FLAT_SHADE_VTX_2 (2 << 6)
+# define RADEON_FLAT_SHADE_VTX_LAST (3 << 6)
+# define RADEON_DIFFUSE_SHADE_SOLID (0 << 8)
+# define RADEON_DIFFUSE_SHADE_FLAT (1 << 8)
+# define RADEON_DIFFUSE_SHADE_GOURAUD (2 << 8)
+# define RADEON_DIFFUSE_SHADE_MASK (3 << 8)
+# define RADEON_ALPHA_SHADE_SOLID (0 << 10)
+# define RADEON_ALPHA_SHADE_FLAT (1 << 10)
+# define RADEON_ALPHA_SHADE_GOURAUD (2 << 10)
+# define RADEON_ALPHA_SHADE_MASK (3 << 10)
+# define RADEON_SPECULAR_SHADE_SOLID (0 << 12)
+# define RADEON_SPECULAR_SHADE_FLAT (1 << 12)
+# define RADEON_SPECULAR_SHADE_GOURAUD (2 << 12)
+# define RADEON_SPECULAR_SHADE_MASK (3 << 12)
+# define RADEON_FOG_SHADE_SOLID (0 << 14)
+# define RADEON_FOG_SHADE_FLAT (1 << 14)
+# define RADEON_FOG_SHADE_GOURAUD (2 << 14)
+# define RADEON_FOG_SHADE_MASK (3 << 14)
+# define RADEON_ZBIAS_ENABLE_POINT (1 << 16)
+# define RADEON_ZBIAS_ENABLE_LINE (1 << 17)
+# define RADEON_ZBIAS_ENABLE_TRI (1 << 18)
+# define RADEON_WIDELINE_ENABLE (1 << 20)
+# define RADEON_VPORT_XY_XFORM_ENABLE (1 << 24)
+# define RADEON_VPORT_Z_XFORM_ENABLE (1 << 25)
+# define RADEON_VTX_PIX_CENTER_D3D (0 << 27)
+# define RADEON_VTX_PIX_CENTER_OGL (1 << 27)
+# define RADEON_ROUND_MODE_TRUNC (0 << 28)
+# define RADEON_ROUND_MODE_ROUND (1 << 28)
+# define RADEON_ROUND_MODE_ROUND_EVEN (2 << 28)
+# define RADEON_ROUND_MODE_ROUND_ODD (3 << 28)
+# define RADEON_ROUND_PREC_16TH_PIX (0 << 30)
+# define RADEON_ROUND_PREC_8TH_PIX (1 << 30)
+# define RADEON_ROUND_PREC_4TH_PIX (2 << 30)
+# define RADEON_ROUND_PREC_HALF_PIX (3 << 30)
+#define R200_RE_CNTL 0x1c50
+# define R200_STIPPLE_ENABLE 0x1
+# define R200_SCISSOR_ENABLE 0x2
+# define R200_PATTERN_ENABLE 0x4
+# define R200_PERSPECTIVE_ENABLE 0x8
+# define R200_POINT_SMOOTH 0x20
+# define R200_VTX_STQ0_D3D 0x00010000
+# define R200_VTX_STQ1_D3D 0x00040000
+# define R200_VTX_STQ2_D3D 0x00100000
+# define R200_VTX_STQ3_D3D 0x00400000
+# define R200_VTX_STQ4_D3D 0x01000000
+# define R200_VTX_STQ5_D3D 0x04000000
+#define RADEON_SE_CNTL_STATUS 0x2140
+# define RADEON_VC_NO_SWAP (0 << 0)
+# define RADEON_VC_16BIT_SWAP (1 << 0)
+# define RADEON_VC_32BIT_SWAP (2 << 0)
+# define RADEON_VC_HALF_DWORD_SWAP (3 << 0)
+# define RADEON_TCL_BYPASS (1 << 8)
+#define RADEON_SE_COORD_FMT 0x1c50
+# define RADEON_VTX_XY_PRE_MULT_1_OVER_W0 (1 << 0)
+# define RADEON_VTX_Z_PRE_MULT_1_OVER_W0 (1 << 1)
+# define RADEON_VTX_ST0_NONPARAMETRIC (1 << 8)
+# define RADEON_VTX_ST1_NONPARAMETRIC (1 << 9)
+# define RADEON_VTX_ST2_NONPARAMETRIC (1 << 10)
+# define RADEON_VTX_ST3_NONPARAMETRIC (1 << 11)
+# define RADEON_VTX_W0_NORMALIZE (1 << 12)
+# define RADEON_VTX_W0_IS_NOT_1_OVER_W0 (1 << 16)
+# define RADEON_VTX_ST0_PRE_MULT_1_OVER_W0 (1 << 17)
+# define RADEON_VTX_ST1_PRE_MULT_1_OVER_W0 (1 << 19)
+# define RADEON_VTX_ST2_PRE_MULT_1_OVER_W0 (1 << 21)
+# define RADEON_VTX_ST3_PRE_MULT_1_OVER_W0 (1 << 23)
+# define RADEON_TEX1_W_ROUTING_USE_W0 (0 << 26)
+# define RADEON_TEX1_W_ROUTING_USE_Q1 (1 << 26)
+#define RADEON_SE_LINE_WIDTH 0x1db8
+#define RADEON_SE_TCL_LIGHT_MODEL_CTL 0x226c
+# define RADEON_LIGHTING_ENABLE (1 << 0)
+# define RADEON_LIGHT_IN_MODELSPACE (1 << 1)
+# define RADEON_LOCAL_VIEWER (1 << 2)
+# define RADEON_NORMALIZE_NORMALS (1 << 3)
+# define RADEON_RESCALE_NORMALS (1 << 4)
+# define RADEON_SPECULAR_LIGHTS (1 << 5)
+# define RADEON_DIFFUSE_SPECULAR_COMBINE (1 << 6)
+# define RADEON_LIGHT_ALPHA (1 << 7)
+# define RADEON_LOCAL_LIGHT_VEC_GL (1 << 8)
+# define RADEON_LIGHT_NO_NORMAL_AMBIENT_ONLY (1 << 9)
+# define RADEON_LM_SOURCE_STATE_PREMULT 0
+# define RADEON_LM_SOURCE_STATE_MULT 1
+# define RADEON_LM_SOURCE_VERTEX_DIFFUSE 2
+# define RADEON_LM_SOURCE_VERTEX_SPECULAR 3
+# define RADEON_EMISSIVE_SOURCE_SHIFT 16
+# define RADEON_AMBIENT_SOURCE_SHIFT 18
+# define RADEON_DIFFUSE_SOURCE_SHIFT 20
+# define RADEON_SPECULAR_SOURCE_SHIFT 22
+#define RADEON_SE_TCL_MATERIAL_AMBIENT_RED 0x2220
+#define RADEON_SE_TCL_MATERIAL_AMBIENT_GREEN 0x2224
+#define RADEON_SE_TCL_MATERIAL_AMBIENT_BLUE 0x2228
+#define RADEON_SE_TCL_MATERIAL_AMBIENT_ALPHA 0x222c
+#define RADEON_SE_TCL_MATERIAL_DIFFUSE_RED 0x2230
+#define RADEON_SE_TCL_MATERIAL_DIFFUSE_GREEN 0x2234
+#define RADEON_SE_TCL_MATERIAL_DIFFUSE_BLUE 0x2238
+#define RADEON_SE_TCL_MATERIAL_DIFFUSE_ALPHA 0x223c
+#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED 0x2210
+#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_GREEN 0x2214
+#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_BLUE 0x2218
+#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_ALPHA 0x221c
+#define RADEON_SE_TCL_MATERIAL_SPECULAR_RED 0x2240
+#define RADEON_SE_TCL_MATERIAL_SPECULAR_GREEN 0x2244
+#define RADEON_SE_TCL_MATERIAL_SPECULAR_BLUE 0x2248
+#define RADEON_SE_TCL_MATERIAL_SPECULAR_ALPHA 0x224c
+#define RADEON_SE_TCL_MATRIX_SELECT_0 0x225c
+# define RADEON_MODELVIEW_0_SHIFT 0
+# define RADEON_MODELVIEW_1_SHIFT 4
+# define RADEON_MODELVIEW_2_SHIFT 8
+# define RADEON_MODELVIEW_3_SHIFT 12
+# define RADEON_IT_MODELVIEW_0_SHIFT 16
+# define RADEON_IT_MODELVIEW_1_SHIFT 20
+# define RADEON_IT_MODELVIEW_2_SHIFT 24
+# define RADEON_IT_MODELVIEW_3_SHIFT 28
+#define RADEON_SE_TCL_MATRIX_SELECT_1 0x2260
+# define RADEON_MODELPROJECT_0_SHIFT 0
+# define RADEON_MODELPROJECT_1_SHIFT 4
+# define RADEON_MODELPROJECT_2_SHIFT 8
+# define RADEON_MODELPROJECT_3_SHIFT 12
+# define RADEON_TEXMAT_0_SHIFT 16
+# define RADEON_TEXMAT_1_SHIFT 20
+# define RADEON_TEXMAT_2_SHIFT 24
+# define RADEON_TEXMAT_3_SHIFT 28
+
+
+#define RADEON_SE_TCL_OUTPUT_VTX_FMT 0x2254
+# define RADEON_TCL_VTX_W0 (1 << 0)
+# define RADEON_TCL_VTX_FP_DIFFUSE (1 << 1)
+# define RADEON_TCL_VTX_FP_ALPHA (1 << 2)
+# define RADEON_TCL_VTX_PK_DIFFUSE (1 << 3)
+# define RADEON_TCL_VTX_FP_SPEC (1 << 4)
+# define RADEON_TCL_VTX_FP_FOG (1 << 5)
+# define RADEON_TCL_VTX_PK_SPEC (1 << 6)
+# define RADEON_TCL_VTX_ST0 (1 << 7)
+# define RADEON_TCL_VTX_ST1 (1 << 8)
+# define RADEON_TCL_VTX_Q1 (1 << 9)
+# define RADEON_TCL_VTX_ST2 (1 << 10)
+# define RADEON_TCL_VTX_Q2 (1 << 11)
+# define RADEON_TCL_VTX_ST3 (1 << 12)
+# define RADEON_TCL_VTX_Q3 (1 << 13)
+# define RADEON_TCL_VTX_Q0 (1 << 14)
+# define RADEON_TCL_VTX_WEIGHT_COUNT_SHIFT 15
+# define RADEON_TCL_VTX_NORM0 (1 << 18)
+# define RADEON_TCL_VTX_XY1 (1 << 27)
+# define RADEON_TCL_VTX_Z1 (1 << 28)
+# define RADEON_TCL_VTX_W1 (1 << 29)
+# define RADEON_TCL_VTX_NORM1 (1 << 30)
+# define RADEON_TCL_VTX_Z0 (1 << 31)
+
+#define RADEON_SE_TCL_OUTPUT_VTX_SEL 0x2258
+# define RADEON_TCL_COMPUTE_XYZW (1 << 0)
+# define RADEON_TCL_COMPUTE_DIFFUSE (1 << 1)
+# define RADEON_TCL_COMPUTE_SPECULAR (1 << 2)
+# define RADEON_TCL_FORCE_NAN_IF_COLOR_NAN (1 << 3)
+# define RADEON_TCL_FORCE_INORDER_PROC (1 << 4)
+# define RADEON_TCL_TEX_INPUT_TEX_0 0
+# define RADEON_TCL_TEX_INPUT_TEX_1 1
+# define RADEON_TCL_TEX_INPUT_TEX_2 2
+# define RADEON_TCL_TEX_INPUT_TEX_3 3
+# define RADEON_TCL_TEX_COMPUTED_TEX_0 8
+# define RADEON_TCL_TEX_COMPUTED_TEX_1 9
+# define RADEON_TCL_TEX_COMPUTED_TEX_2 10
+# define RADEON_TCL_TEX_COMPUTED_TEX_3 11
+# define RADEON_TCL_TEX_0_OUTPUT_SHIFT 16
+# define RADEON_TCL_TEX_1_OUTPUT_SHIFT 20
+# define RADEON_TCL_TEX_2_OUTPUT_SHIFT 24
+# define RADEON_TCL_TEX_3_OUTPUT_SHIFT 28
+
+#define RADEON_SE_TCL_PER_LIGHT_CTL_0 0x2270
+# define RADEON_LIGHT_0_ENABLE (1 << 0)
+# define RADEON_LIGHT_0_ENABLE_AMBIENT (1 << 1)
+# define RADEON_LIGHT_0_ENABLE_SPECULAR (1 << 2)
+# define RADEON_LIGHT_0_IS_LOCAL (1 << 3)
+# define RADEON_LIGHT_0_IS_SPOT (1 << 4)
+# define RADEON_LIGHT_0_DUAL_CONE (1 << 5)
+# define RADEON_LIGHT_0_ENABLE_RANGE_ATTEN (1 << 6)
+# define RADEON_LIGHT_0_CONSTANT_RANGE_ATTEN (1 << 7)
+# define RADEON_LIGHT_0_SHIFT 0
+# define RADEON_LIGHT_1_ENABLE (1 << 16)
+# define RADEON_LIGHT_1_ENABLE_AMBIENT (1 << 17)
+# define RADEON_LIGHT_1_ENABLE_SPECULAR (1 << 18)
+# define RADEON_LIGHT_1_IS_LOCAL (1 << 19)
+# define RADEON_LIGHT_1_IS_SPOT (1 << 20)
+# define RADEON_LIGHT_1_DUAL_CONE (1 << 21)
+# define RADEON_LIGHT_1_ENABLE_RANGE_ATTEN (1 << 22)
+# define RADEON_LIGHT_1_CONSTANT_RANGE_ATTEN (1 << 23)
+# define RADEON_LIGHT_1_SHIFT 16
+#define RADEON_SE_TCL_PER_LIGHT_CTL_1 0x2274
+# define RADEON_LIGHT_2_SHIFT 0
+# define RADEON_LIGHT_3_SHIFT 16
+#define RADEON_SE_TCL_PER_LIGHT_CTL_2 0x2278
+# define RADEON_LIGHT_4_SHIFT 0
+# define RADEON_LIGHT_5_SHIFT 16
+#define RADEON_SE_TCL_PER_LIGHT_CTL_3 0x227c
+# define RADEON_LIGHT_6_SHIFT 0
+# define RADEON_LIGHT_7_SHIFT 16
+
+#define RADEON_SE_TCL_SHININESS 0x2250
+
+#define RADEON_SE_TCL_TEXTURE_PROC_CTL 0x2268
+# define RADEON_TEXGEN_TEXMAT_0_ENABLE (1 << 0)
+# define RADEON_TEXGEN_TEXMAT_1_ENABLE (1 << 1)
+# define RADEON_TEXGEN_TEXMAT_2_ENABLE (1 << 2)
+# define RADEON_TEXGEN_TEXMAT_3_ENABLE (1 << 3)
+# define RADEON_TEXMAT_0_ENABLE (1 << 4)
+# define RADEON_TEXMAT_1_ENABLE (1 << 5)
+# define RADEON_TEXMAT_2_ENABLE (1 << 6)
+# define RADEON_TEXMAT_3_ENABLE (1 << 7)
+# define RADEON_TEXGEN_INPUT_MASK 0xf
+# define RADEON_TEXGEN_INPUT_TEXCOORD_0 0
+# define RADEON_TEXGEN_INPUT_TEXCOORD_1 1
+# define RADEON_TEXGEN_INPUT_TEXCOORD_2 2
+# define RADEON_TEXGEN_INPUT_TEXCOORD_3 3
+# define RADEON_TEXGEN_INPUT_OBJ 4
+# define RADEON_TEXGEN_INPUT_EYE 5
+# define RADEON_TEXGEN_INPUT_EYE_NORMAL 6
+# define RADEON_TEXGEN_INPUT_EYE_REFLECT 7
+# define RADEON_TEXGEN_INPUT_EYE_NORMALIZED 8
+# define RADEON_TEXGEN_0_INPUT_SHIFT 16
+# define RADEON_TEXGEN_1_INPUT_SHIFT 20
+# define RADEON_TEXGEN_2_INPUT_SHIFT 24
+# define RADEON_TEXGEN_3_INPUT_SHIFT 28
+
+#define RADEON_SE_TCL_UCP_VERT_BLEND_CTL 0x2264
+# define RADEON_UCP_IN_CLIP_SPACE (1 << 0)
+# define RADEON_UCP_IN_MODEL_SPACE (1 << 1)
+# define RADEON_UCP_ENABLE_0 (1 << 2)
+# define RADEON_UCP_ENABLE_1 (1 << 3)
+# define RADEON_UCP_ENABLE_2 (1 << 4)
+# define RADEON_UCP_ENABLE_3 (1 << 5)
+# define RADEON_UCP_ENABLE_4 (1 << 6)
+# define RADEON_UCP_ENABLE_5 (1 << 7)
+# define RADEON_TCL_FOG_MASK (3 << 8)
+# define RADEON_TCL_FOG_DISABLE (0 << 8)
+# define RADEON_TCL_FOG_EXP (1 << 8)
+# define RADEON_TCL_FOG_EXP2 (2 << 8)
+# define RADEON_TCL_FOG_LINEAR (3 << 8)
+# define RADEON_RNG_BASED_FOG (1 << 10)
+# define RADEON_LIGHT_TWOSIDE (1 << 11)
+# define RADEON_BLEND_OP_COUNT_MASK (7 << 12)
+# define RADEON_BLEND_OP_COUNT_SHIFT 12
+# define RADEON_POSITION_BLEND_OP_ENABLE (1 << 16)
+# define RADEON_NORMAL_BLEND_OP_ENABLE (1 << 17)
+# define RADEON_VERTEX_BLEND_SRC_0_PRIMARY (1 << 18)
+# define RADEON_VERTEX_BLEND_SRC_0_SECONDARY (1 << 18)
+# define RADEON_VERTEX_BLEND_SRC_1_PRIMARY (1 << 19)
+# define RADEON_VERTEX_BLEND_SRC_1_SECONDARY (1 << 19)
+# define RADEON_VERTEX_BLEND_SRC_2_PRIMARY (1 << 20)
+# define RADEON_VERTEX_BLEND_SRC_2_SECONDARY (1 << 20)
+# define RADEON_VERTEX_BLEND_SRC_3_PRIMARY (1 << 21)
+# define RADEON_VERTEX_BLEND_SRC_3_SECONDARY (1 << 21)
+# define RADEON_VERTEX_BLEND_WGT_MINUS_ONE (1 << 22)
+# define RADEON_CULL_FRONT_IS_CW (0 << 28)
+# define RADEON_CULL_FRONT_IS_CCW (1 << 28)
+# define RADEON_CULL_FRONT (1 << 29)
+# define RADEON_CULL_BACK (1 << 30)
+# define RADEON_FORCE_W_TO_ONE (1 << 31)
+
+#define RADEON_SE_VPORT_XSCALE 0x1d98
+#define RADEON_SE_VPORT_XOFFSET 0x1d9c
+#define RADEON_SE_VPORT_YSCALE 0x1da0
+#define RADEON_SE_VPORT_YOFFSET 0x1da4
+#define RADEON_SE_VPORT_ZSCALE 0x1da8
+#define RADEON_SE_VPORT_ZOFFSET 0x1dac
+#define RADEON_SE_ZBIAS_FACTOR 0x1db0
+#define RADEON_SE_ZBIAS_CONSTANT 0x1db4
+
+#define RADEON_SE_VTX_FMT 0x2080
+# define RADEON_SE_VTX_FMT_XY 0x00000000
+# define RADEON_SE_VTX_FMT_W0 0x00000001
+# define RADEON_SE_VTX_FMT_FPCOLOR 0x00000002
+# define RADEON_SE_VTX_FMT_FPALPHA 0x00000004
+# define RADEON_SE_VTX_FMT_PKCOLOR 0x00000008
+# define RADEON_SE_VTX_FMT_FPSPEC 0x00000010
+# define RADEON_SE_VTX_FMT_FPFOG 0x00000020
+# define RADEON_SE_VTX_FMT_PKSPEC 0x00000040
+# define RADEON_SE_VTX_FMT_ST0 0x00000080
+# define RADEON_SE_VTX_FMT_ST1 0x00000100
+# define RADEON_SE_VTX_FMT_Q1 0x00000200
+# define RADEON_SE_VTX_FMT_ST2 0x00000400
+# define RADEON_SE_VTX_FMT_Q2 0x00000800
+# define RADEON_SE_VTX_FMT_ST3 0x00001000
+# define RADEON_SE_VTX_FMT_Q3 0x00002000
+# define RADEON_SE_VTX_FMT_Q0 0x00004000
+# define RADEON_SE_VTX_FMT_BLND_WEIGHT_CNT_MASK 0x00038000
+# define RADEON_SE_VTX_FMT_N0 0x00040000
+# define RADEON_SE_VTX_FMT_XY1 0x08000000
+# define RADEON_SE_VTX_FMT_Z1 0x10000000
+# define RADEON_SE_VTX_FMT_W1 0x20000000
+# define RADEON_SE_VTX_FMT_N1 0x40000000
+# define RADEON_SE_VTX_FMT_Z 0x80000000
+
+#define RADEON_SE_VF_CNTL 0x2084
+# define RADEON_VF_PRIM_TYPE_POINT_LIST 1
+# define RADEON_VF_PRIM_TYPE_LINE_LIST 2
+# define RADEON_VF_PRIM_TYPE_LINE_STRIP 3
+# define RADEON_VF_PRIM_TYPE_TRIANGLE_LIST 4
+# define RADEON_VF_PRIM_TYPE_TRIANGLE_FAN 5
+# define RADEON_VF_PRIM_TYPE_TRIANGLE_STRIP 6
+# define RADEON_VF_PRIM_TYPE_TRIANGLE_FLAG 7
+# define RADEON_VF_PRIM_TYPE_RECTANGLE_LIST 8
+# define RADEON_VF_PRIM_TYPE_POINT_LIST_3 9
+# define RADEON_VF_PRIM_TYPE_LINE_LIST_3 10
+# define RADEON_VF_PRIM_TYPE_SPIRIT_LIST 11
+# define RADEON_VF_PRIM_TYPE_LINE_LOOP 12
+# define RADEON_VF_PRIM_TYPE_QUAD_LIST 13
+# define RADEON_VF_PRIM_TYPE_QUAD_STRIP 14
+# define RADEON_VF_PRIM_TYPE_POLYGON 15
+# define RADEON_VF_PRIM_WALK_STATE (0<<4)
+# define RADEON_VF_PRIM_WALK_INDEX (1<<4)
+# define RADEON_VF_PRIM_WALK_LIST (2<<4)
+# define RADEON_VF_PRIM_WALK_DATA (3<<4)
+# define RADEON_VF_COLOR_ORDER_RGBA (1<<6)
+# define RADEON_VF_RADEON_MODE (1<<8)
+# define RADEON_VF_TCL_OUTPUT_CTL_ENA (1<<9)
+# define RADEON_VF_PROG_STREAM_ENA (1<<10)
+# define RADEON_VF_INDEX_SIZE_SHIFT 11
+# define RADEON_VF_NUM_VERTICES_SHIFT 16
+
+#define RADEON_SE_PORT_DATA0 0x2000
+
+#define R200_SE_VAP_CNTL 0x2080
+# define R200_VAP_TCL_ENABLE 0x00000001
+# define R200_VAP_SINGLE_BUF_STATE_ENABLE 0x00000010
+# define R200_VAP_FORCE_W_TO_ONE 0x00010000
+# define R200_VAP_D3D_TEX_DEFAULT 0x00020000
+# define R200_VAP_VF_MAX_VTX_NUM__SHIFT 18
+# define R200_VAP_VF_MAX_VTX_NUM (9 << 18)
+# define R200_VAP_DX_CLIP_SPACE_DEF 0x00400000
+#define R200_VF_MAX_VTX_INDX 0x210c
+#define R200_VF_MIN_VTX_INDX 0x2110
+#define R200_SE_VTE_CNTL 0x20b0
+# define R200_VPORT_X_SCALE_ENA 0x00000001
+# define R200_VPORT_X_OFFSET_ENA 0x00000002
+# define R200_VPORT_Y_SCALE_ENA 0x00000004
+# define R200_VPORT_Y_OFFSET_ENA 0x00000008
+# define R200_VPORT_Z_SCALE_ENA 0x00000010
+# define R200_VPORT_Z_OFFSET_ENA 0x00000020
+# define R200_VTX_XY_FMT 0x00000100
+# define R200_VTX_Z_FMT 0x00000200
+# define R200_VTX_W0_FMT 0x00000400
+# define R200_VTX_W0_NORMALIZE 0x00000800
+# define R200_VTX_ST_DENORMALIZED 0x00001000
+#define R200_SE_VAP_CNTL_STATUS 0x2140
+# define R200_VC_NO_SWAP (0 << 0)
+# define R200_VC_16BIT_SWAP (1 << 0)
+# define R200_VC_32BIT_SWAP (2 << 0)
+#define R200_PP_TXFILTER_0 0x2c00
+#define R200_PP_TXFILTER_1 0x2c20
+#define R200_PP_TXFILTER_2 0x2c40
+#define R200_PP_TXFILTER_3 0x2c60
+#define R200_PP_TXFILTER_4 0x2c80
+#define R200_PP_TXFILTER_5 0x2ca0
+# define R200_MAG_FILTER_NEAREST (0 << 0)
+# define R200_MAG_FILTER_LINEAR (1 << 0)
+# define R200_MAG_FILTER_MASK (1 << 0)
+# define R200_MIN_FILTER_NEAREST (0 << 1)
+# define R200_MIN_FILTER_LINEAR (1 << 1)
+# define R200_MIN_FILTER_NEAREST_MIP_NEAREST (2 << 1)
+# define R200_MIN_FILTER_NEAREST_MIP_LINEAR (3 << 1)
+# define R200_MIN_FILTER_LINEAR_MIP_NEAREST (6 << 1)
+# define R200_MIN_FILTER_LINEAR_MIP_LINEAR (7 << 1)
+# define R200_MIN_FILTER_ANISO_NEAREST (8 << 1)
+# define R200_MIN_FILTER_ANISO_LINEAR (9 << 1)
+# define R200_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (10 << 1)
+# define R200_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR (11 << 1)
+# define R200_MIN_FILTER_MASK (15 << 1)
+# define R200_MAX_ANISO_1_TO_1 (0 << 5)
+# define R200_MAX_ANISO_2_TO_1 (1 << 5)
+# define R200_MAX_ANISO_4_TO_1 (2 << 5)
+# define R200_MAX_ANISO_8_TO_1 (3 << 5)
+# define R200_MAX_ANISO_16_TO_1 (4 << 5)
+# define R200_MAX_ANISO_MASK (7 << 5)
+# define R200_MAX_MIP_LEVEL_MASK (0x0f << 16)
+# define R200_MAX_MIP_LEVEL_SHIFT 16
+# define R200_YUV_TO_RGB (1 << 20)
+# define R200_YUV_TEMPERATURE_COOL (0 << 21)
+# define R200_YUV_TEMPERATURE_HOT (1 << 21)
+# define R200_YUV_TEMPERATURE_MASK (1 << 21)
+# define R200_WRAPEN_S (1 << 22)
+# define R200_CLAMP_S_WRAP (0 << 23)
+# define R200_CLAMP_S_MIRROR (1 << 23)
+# define R200_CLAMP_S_CLAMP_LAST (2 << 23)
+# define R200_CLAMP_S_MIRROR_CLAMP_LAST (3 << 23)
+# define R200_CLAMP_S_CLAMP_BORDER (4 << 23)
+# define R200_CLAMP_S_MIRROR_CLAMP_BORDER (5 << 23)
+# define R200_CLAMP_S_CLAMP_GL (6 << 23)
+# define R200_CLAMP_S_MIRROR_CLAMP_GL (7 << 23)
+# define R200_CLAMP_S_MASK (7 << 23)
+# define R200_WRAPEN_T (1 << 26)
+# define R200_CLAMP_T_WRAP (0 << 27)
+# define R200_CLAMP_T_MIRROR (1 << 27)
+# define R200_CLAMP_T_CLAMP_LAST (2 << 27)
+# define R200_CLAMP_T_MIRROR_CLAMP_LAST (3 << 27)
+# define R200_CLAMP_T_CLAMP_BORDER (4 << 27)
+# define R200_CLAMP_T_MIRROR_CLAMP_BORDER (5 << 27)
+# define R200_CLAMP_T_CLAMP_GL (6 << 27)
+# define R200_CLAMP_T_MIRROR_CLAMP_GL (7 << 27)
+# define R200_CLAMP_T_MASK (7 << 27)
+# define R200_KILL_LT_ZERO (1 << 30)
+# define R200_BORDER_MODE_OGL (0 << 31)
+# define R200_BORDER_MODE_D3D (1 << 31)
+#define R200_PP_TXFORMAT_0 0x2c04
+#define R200_PP_TXFORMAT_1 0x2c24
+#define R200_PP_TXFORMAT_2 0x2c44
+#define R200_PP_TXFORMAT_3 0x2c64
+#define R200_PP_TXFORMAT_4 0x2c84
+#define R200_PP_TXFORMAT_5 0x2ca4
+# define R200_TXFORMAT_I8 (0 << 0)
+# define R200_TXFORMAT_AI88 (1 << 0)
+# define R200_TXFORMAT_RGB332 (2 << 0)
+# define R200_TXFORMAT_ARGB1555 (3 << 0)
+# define R200_TXFORMAT_RGB565 (4 << 0)
+# define R200_TXFORMAT_ARGB4444 (5 << 0)
+# define R200_TXFORMAT_ARGB8888 (6 << 0)
+# define R200_TXFORMAT_RGBA8888 (7 << 0)
+# define R200_TXFORMAT_Y8 (8 << 0)
+# define R200_TXFORMAT_AVYU4444 (9 << 0)
+# define R200_TXFORMAT_VYUY422 (10 << 0)
+# define R200_TXFORMAT_YVYU422 (11 << 0)
+# define R200_TXFORMAT_DXT1 (12 << 0)
+# define R200_TXFORMAT_DXT23 (14 << 0)
+# define R200_TXFORMAT_DXT45 (15 << 0)
+# define R200_TXFORMAT_ABGR8888 (22 << 0)
+# define R200_TXFORMAT_FORMAT_MASK (31 << 0)
+# define R200_TXFORMAT_FORMAT_SHIFT 0
+# define R200_TXFORMAT_ALPHA_IN_MAP (1 << 6)
+# define R200_TXFORMAT_NON_POWER2 (1 << 7)
+# define R200_TXFORMAT_WIDTH_MASK (15 << 8)
+# define R200_TXFORMAT_WIDTH_SHIFT 8
+# define R200_TXFORMAT_HEIGHT_MASK (15 << 12)
+# define R200_TXFORMAT_HEIGHT_SHIFT 12
+# define R200_TXFORMAT_F5_WIDTH_MASK (15 << 16) /* cube face 5 */
+# define R200_TXFORMAT_F5_WIDTH_SHIFT 16
+# define R200_TXFORMAT_F5_HEIGHT_MASK (15 << 20)
+# define R200_TXFORMAT_F5_HEIGHT_SHIFT 20
+# define R200_TXFORMAT_ST_ROUTE_STQ0 (0 << 24)
+# define R200_TXFORMAT_ST_ROUTE_STQ1 (1 << 24)
+# define R200_TXFORMAT_ST_ROUTE_STQ2 (2 << 24)
+# define R200_TXFORMAT_ST_ROUTE_STQ3 (3 << 24)
+# define R200_TXFORMAT_ST_ROUTE_STQ4 (4 << 24)
+# define R200_TXFORMAT_ST_ROUTE_STQ5 (5 << 24)
+# define R200_TXFORMAT_ST_ROUTE_MASK (7 << 24)
+# define R200_TXFORMAT_ST_ROUTE_SHIFT 24
+# define R200_TXFORMAT_ALPHA_MASK_ENABLE (1 << 28)
+# define R200_TXFORMAT_CHROMA_KEY_ENABLE (1 << 29)
+# define R200_TXFORMAT_CUBIC_MAP_ENABLE (1 << 30)
+#define R200_PP_TXFORMAT_X_0 0x2c08
+#define R200_PP_TXFORMAT_X_1 0x2c28
+#define R200_PP_TXFORMAT_X_2 0x2c48
+#define R200_PP_TXFORMAT_X_3 0x2c68
+#define R200_PP_TXFORMAT_X_4 0x2c88
+#define R200_PP_TXFORMAT_X_5 0x2ca8
+
+#define R200_PP_TXSIZE_0 0x2c0c /* NPOT only */
+#define R200_PP_TXSIZE_1 0x2c2c /* NPOT only */
+#define R200_PP_TXSIZE_2 0x2c4c /* NPOT only */
+#define R200_PP_TXSIZE_3 0x2c6c /* NPOT only */
+#define R200_PP_TXSIZE_4 0x2c8c /* NPOT only */
+#define R200_PP_TXSIZE_5 0x2cac /* NPOT only */
+
+#define R200_PP_TXPITCH_0 0x2c10 /* NPOT only */
+#define R200_PP_TXPITCH_1 0x2c30 /* NPOT only */
+#define R200_PP_TXPITCH_2 0x2c50 /* NPOT only */
+#define R200_PP_TXPITCH_3 0x2c70 /* NPOT only */
+#define R200_PP_TXPITCH_4 0x2c90 /* NPOT only */
+#define R200_PP_TXPITCH_5 0x2cb0 /* NPOT only */
+
+#define R200_PP_TXOFFSET_0 0x2d00
+# define R200_TXO_ENDIAN_NO_SWAP (0 << 0)
+# define R200_TXO_ENDIAN_BYTE_SWAP (1 << 0)
+# define R200_TXO_ENDIAN_WORD_SWAP (2 << 0)
+# define R200_TXO_ENDIAN_HALFDW_SWAP (3 << 0)
+# define R200_TXO_MACRO_LINEAR (0 << 2)
+# define R200_TXO_MACRO_TILE (1 << 2)
+# define R200_TXO_MICRO_LINEAR (0 << 3)
+# define R200_TXO_MICRO_TILE (1 << 3)
+# define R200_TXO_OFFSET_MASK 0xffffffe0
+# define R200_TXO_OFFSET_SHIFT 5
+#define R200_PP_TXOFFSET_1 0x2d18
+#define R200_PP_TXOFFSET_2 0x2d30
+#define R200_PP_TXOFFSET_3 0x2d48
+#define R200_PP_TXOFFSET_4 0x2d60
+#define R200_PP_TXOFFSET_5 0x2d78
+
+#define R200_PP_TFACTOR_0 0x2ee0
+#define R200_PP_TFACTOR_1 0x2ee4
+#define R200_PP_TFACTOR_2 0x2ee8
+#define R200_PP_TFACTOR_3 0x2eec
+#define R200_PP_TFACTOR_4 0x2ef0
+#define R200_PP_TFACTOR_5 0x2ef4
+
+#define R200_PP_TXCBLEND_0 0x2f00
+# define R200_TXC_ARG_A_ZERO (0)
+# define R200_TXC_ARG_A_CURRENT_COLOR (2)
+# define R200_TXC_ARG_A_CURRENT_ALPHA (3)
+# define R200_TXC_ARG_A_DIFFUSE_COLOR (4)
+# define R200_TXC_ARG_A_DIFFUSE_ALPHA (5)
+# define R200_TXC_ARG_A_SPECULAR_COLOR (6)
+# define R200_TXC_ARG_A_SPECULAR_ALPHA (7)
+# define R200_TXC_ARG_A_TFACTOR_COLOR (8)
+# define R200_TXC_ARG_A_TFACTOR_ALPHA (9)
+# define R200_TXC_ARG_A_R0_COLOR (10)
+# define R200_TXC_ARG_A_R0_ALPHA (11)
+# define R200_TXC_ARG_A_R1_COLOR (12)
+# define R200_TXC_ARG_A_R1_ALPHA (13)
+# define R200_TXC_ARG_A_R2_COLOR (14)
+# define R200_TXC_ARG_A_R2_ALPHA (15)
+# define R200_TXC_ARG_A_R3_COLOR (16)
+# define R200_TXC_ARG_A_R3_ALPHA (17)
+# define R200_TXC_ARG_A_R4_COLOR (18)
+# define R200_TXC_ARG_A_R4_ALPHA (19)
+# define R200_TXC_ARG_A_R5_COLOR (20)
+# define R200_TXC_ARG_A_R5_ALPHA (21)
+# define R200_TXC_ARG_A_TFACTOR1_COLOR (26)
+# define R200_TXC_ARG_A_TFACTOR1_ALPHA (27)
+# define R200_TXC_ARG_A_MASK (31 << 0)
+# define R200_TXC_ARG_A_SHIFT 0
+# define R200_TXC_ARG_B_ZERO (0 << 5)
+# define R200_TXC_ARG_B_CURRENT_COLOR (2 << 5)
+# define R200_TXC_ARG_B_CURRENT_ALPHA (3 << 5)
+# define R200_TXC_ARG_B_DIFFUSE_COLOR (4 << 5)
+# define R200_TXC_ARG_B_DIFFUSE_ALPHA (5 << 5)
+# define R200_TXC_ARG_B_SPECULAR_COLOR (6 << 5)
+# define R200_TXC_ARG_B_SPECULAR_ALPHA (7 << 5)
+# define R200_TXC_ARG_B_TFACTOR_COLOR (8 << 5)
+# define R200_TXC_ARG_B_TFACTOR_ALPHA (9 << 5)
+# define R200_TXC_ARG_B_R0_COLOR (10 << 5)
+# define R200_TXC_ARG_B_R0_ALPHA (11 << 5)
+# define R200_TXC_ARG_B_R1_COLOR (12 << 5)
+# define R200_TXC_ARG_B_R1_ALPHA (13 << 5)
+# define R200_TXC_ARG_B_R2_COLOR (14 << 5)
+# define R200_TXC_ARG_B_R2_ALPHA (15 << 5)
+# define R200_TXC_ARG_B_R3_COLOR (16 << 5)
+# define R200_TXC_ARG_B_R3_ALPHA (17 << 5)
+# define R200_TXC_ARG_B_R4_COLOR (18 << 5)
+# define R200_TXC_ARG_B_R4_ALPHA (19 << 5)
+# define R200_TXC_ARG_B_R5_COLOR (20 << 5)
+# define R200_TXC_ARG_B_R5_ALPHA (21 << 5)
+# define R200_TXC_ARG_B_TFACTOR1_COLOR (26 << 5)
+# define R200_TXC_ARG_B_TFACTOR1_ALPHA (27 << 5)
+# define R200_TXC_ARG_B_MASK (31 << 5)
+# define R200_TXC_ARG_B_SHIFT 5
+# define R200_TXC_ARG_C_ZERO (0 << 10)
+# define R200_TXC_ARG_C_CURRENT_COLOR (2 << 10)
+# define R200_TXC_ARG_C_CURRENT_ALPHA (3 << 10)
+# define R200_TXC_ARG_C_DIFFUSE_COLOR (4 << 10)
+# define R200_TXC_ARG_C_DIFFUSE_ALPHA (5 << 10)
+# define R200_TXC_ARG_C_SPECULAR_COLOR (6 << 10)
+# define R200_TXC_ARG_C_SPECULAR_ALPHA (7 << 10)
+# define R200_TXC_ARG_C_TFACTOR_COLOR (8 << 10)
+# define R200_TXC_ARG_C_TFACTOR_ALPHA (9 << 10)
+# define R200_TXC_ARG_C_R0_COLOR (10 << 10)
+# define R200_TXC_ARG_C_R0_ALPHA (11 << 10)
+# define R200_TXC_ARG_C_R1_COLOR (12 << 10)
+# define R200_TXC_ARG_C_R1_ALPHA (13 << 10)
+# define R200_TXC_ARG_C_R2_COLOR (14 << 10)
+# define R200_TXC_ARG_C_R2_ALPHA (15 << 10)
+# define R200_TXC_ARG_C_R3_COLOR (16 << 10)
+# define R200_TXC_ARG_C_R3_ALPHA (17 << 10)
+# define R200_TXC_ARG_C_R4_COLOR (18 << 10)
+# define R200_TXC_ARG_C_R4_ALPHA (19 << 10)
+# define R200_TXC_ARG_C_R5_COLOR (20 << 10)
+# define R200_TXC_ARG_C_R5_ALPHA (21 << 10)
+# define R200_TXC_ARG_C_TFACTOR1_COLOR (26 << 10)
+# define R200_TXC_ARG_C_TFACTOR1_ALPHA (27 << 10)
+# define R200_TXC_ARG_C_MASK (31 << 10)
+# define R200_TXC_ARG_C_SHIFT 10
+# define R200_TXC_COMP_ARG_A (1 << 16)
+# define R200_TXC_COMP_ARG_A_SHIFT (16)
+# define R200_TXC_BIAS_ARG_A (1 << 17)
+# define R200_TXC_SCALE_ARG_A (1 << 18)
+# define R200_TXC_NEG_ARG_A (1 << 19)
+# define R200_TXC_COMP_ARG_B (1 << 20)
+# define R200_TXC_COMP_ARG_B_SHIFT (20)
+# define R200_TXC_BIAS_ARG_B (1 << 21)
+# define R200_TXC_SCALE_ARG_B (1 << 22)
+# define R200_TXC_NEG_ARG_B (1 << 23)
+# define R200_TXC_COMP_ARG_C (1 << 24)
+# define R200_TXC_COMP_ARG_C_SHIFT (24)
+# define R200_TXC_BIAS_ARG_C (1 << 25)
+# define R200_TXC_SCALE_ARG_C (1 << 26)
+# define R200_TXC_NEG_ARG_C (1 << 27)
+# define R200_TXC_OP_MADD (0 << 28)
+# define R200_TXC_OP_CND0 (2 << 28)
+# define R200_TXC_OP_LERP (3 << 28)
+# define R200_TXC_OP_DOT3 (4 << 28)
+# define R200_TXC_OP_DOT4 (5 << 28)
+# define R200_TXC_OP_CONDITIONAL (6 << 28)
+# define R200_TXC_OP_DOT2_ADD (7 << 28)
+# define R200_TXC_OP_MASK (7 << 28)
+#define R200_PP_TXCBLEND2_0 0x2f04
+# define R200_TXC_TFACTOR_SEL_SHIFT 0
+# define R200_TXC_TFACTOR_SEL_MASK 0x7
+# define R200_TXC_TFACTOR1_SEL_SHIFT 4
+# define R200_TXC_TFACTOR1_SEL_MASK (0x7 << 4)
+# define R200_TXC_SCALE_SHIFT 8
+# define R200_TXC_SCALE_MASK (7 << 8)
+# define R200_TXC_SCALE_1X (0 << 8)
+# define R200_TXC_SCALE_2X (1 << 8)
+# define R200_TXC_SCALE_4X (2 << 8)
+# define R200_TXC_SCALE_8X (3 << 8)
+# define R200_TXC_SCALE_INV2 (5 << 8)
+# define R200_TXC_SCALE_INV4 (6 << 8)
+# define R200_TXC_SCALE_INV8 (7 << 8)
+# define R200_TXC_CLAMP_SHIFT 12
+# define R200_TXC_CLAMP_MASK (3 << 12)
+# define R200_TXC_CLAMP_WRAP (0 << 12)
+# define R200_TXC_CLAMP_0_1 (1 << 12)
+# define R200_TXC_CLAMP_8_8 (2 << 12)
+# define R200_TXC_OUTPUT_REG_MASK (7 << 16)
+# define R200_TXC_OUTPUT_REG_NONE (0 << 16)
+# define R200_TXC_OUTPUT_REG_R0 (1 << 16)
+# define R200_TXC_OUTPUT_REG_R1 (2 << 16)
+# define R200_TXC_OUTPUT_REG_R2 (3 << 16)
+# define R200_TXC_OUTPUT_REG_R3 (4 << 16)
+# define R200_TXC_OUTPUT_REG_R4 (5 << 16)
+# define R200_TXC_OUTPUT_REG_R5 (6 << 16)
+# define R200_TXC_OUTPUT_MASK_MASK (7 << 20)
+# define R200_TXC_OUTPUT_MASK_RGB (0 << 20)
+# define R200_TXC_OUTPUT_MASK_RG (1 << 20)
+# define R200_TXC_OUTPUT_MASK_RB (2 << 20)
+# define R200_TXC_OUTPUT_MASK_R (3 << 20)
+# define R200_TXC_OUTPUT_MASK_GB (4 << 20)
+# define R200_TXC_OUTPUT_MASK_G (5 << 20)
+# define R200_TXC_OUTPUT_MASK_B (6 << 20)
+# define R200_TXC_OUTPUT_MASK_NONE (7 << 20)
+# define R200_TXC_REPL_NORMAL 0
+# define R200_TXC_REPL_RED 1
+# define R200_TXC_REPL_GREEN 2
+# define R200_TXC_REPL_BLUE 3
+# define R200_TXC_REPL_ARG_A_SHIFT 26
+# define R200_TXC_REPL_ARG_A_MASK (3 << 26)
+# define R200_TXC_REPL_ARG_B_SHIFT 28
+# define R200_TXC_REPL_ARG_B_MASK (3 << 28)
+# define R200_TXC_REPL_ARG_C_SHIFT 30
+# define R200_TXC_REPL_ARG_C_MASK (3 << 30)
+#define R200_PP_TXABLEND_0 0x2f08
+# define R200_TXA_ARG_A_ZERO (0)
+# define R200_TXA_ARG_A_CURRENT_ALPHA (2) /* guess */
+# define R200_TXA_ARG_A_CURRENT_BLUE (3) /* guess */
+# define R200_TXA_ARG_A_DIFFUSE_ALPHA (4)
+# define R200_TXA_ARG_A_DIFFUSE_BLUE (5)
+# define R200_TXA_ARG_A_SPECULAR_ALPHA (6)
+# define R200_TXA_ARG_A_SPECULAR_BLUE (7)
+# define R200_TXA_ARG_A_TFACTOR_ALPHA (8)
+# define R200_TXA_ARG_A_TFACTOR_BLUE (9)
+# define R200_TXA_ARG_A_R0_ALPHA (10)
+# define R200_TXA_ARG_A_R0_BLUE (11)
+# define R200_TXA_ARG_A_R1_ALPHA (12)
+# define R200_TXA_ARG_A_R1_BLUE (13)
+# define R200_TXA_ARG_A_R2_ALPHA (14)
+# define R200_TXA_ARG_A_R2_BLUE (15)
+# define R200_TXA_ARG_A_R3_ALPHA (16)
+# define R200_TXA_ARG_A_R3_BLUE (17)
+# define R200_TXA_ARG_A_R4_ALPHA (18)
+# define R200_TXA_ARG_A_R4_BLUE (19)
+# define R200_TXA_ARG_A_R5_ALPHA (20)
+# define R200_TXA_ARG_A_R5_BLUE (21)
+# define R200_TXA_ARG_A_TFACTOR1_ALPHA (26)
+# define R200_TXA_ARG_A_TFACTOR1_BLUE (27)
+# define R200_TXA_ARG_A_MASK (31 << 0)
+# define R200_TXA_ARG_A_SHIFT 0
+# define R200_TXA_ARG_B_ZERO (0 << 5)
+# define R200_TXA_ARG_B_CURRENT_ALPHA (2 << 5) /* guess */
+# define R200_TXA_ARG_B_CURRENT_BLUE (3 << 5) /* guess */
+# define R200_TXA_ARG_B_DIFFUSE_ALPHA (4 << 5)
+# define R200_TXA_ARG_B_DIFFUSE_BLUE (5 << 5)
+# define R200_TXA_ARG_B_SPECULAR_ALPHA (6 << 5)
+# define R200_TXA_ARG_B_SPECULAR_BLUE (7 << 5)
+# define R200_TXA_ARG_B_TFACTOR_ALPHA (8 << 5)
+# define R200_TXA_ARG_B_TFACTOR_BLUE (9 << 5)
+# define R200_TXA_ARG_B_R0_ALPHA (10 << 5)
+# define R200_TXA_ARG_B_R0_BLUE (11 << 5)
+# define R200_TXA_ARG_B_R1_ALPHA (12 << 5)
+# define R200_TXA_ARG_B_R1_BLUE (13 << 5)
+# define R200_TXA_ARG_B_R2_ALPHA (14 << 5)
+# define R200_TXA_ARG_B_R2_BLUE (15 << 5)
+# define R200_TXA_ARG_B_R3_ALPHA (16 << 5)
+# define R200_TXA_ARG_B_R3_BLUE (17 << 5)
+# define R200_TXA_ARG_B_R4_ALPHA (18 << 5)
+# define R200_TXA_ARG_B_R4_BLUE (19 << 5)
+# define R200_TXA_ARG_B_R5_ALPHA (20 << 5)
+# define R200_TXA_ARG_B_R5_BLUE (21 << 5)
+# define R200_TXA_ARG_B_TFACTOR1_ALPHA (26 << 5)
+# define R200_TXA_ARG_B_TFACTOR1_BLUE (27 << 5)
+# define R200_TXA_ARG_B_MASK (31 << 5)
+# define R200_TXA_ARG_B_SHIFT 5
+# define R200_TXA_ARG_C_ZERO (0 << 10)
+# define R200_TXA_ARG_C_CURRENT_ALPHA (2 << 10) /* guess */
+# define R200_TXA_ARG_C_CURRENT_BLUE (3 << 10) /* guess */
+# define R200_TXA_ARG_C_DIFFUSE_ALPHA (4 << 10)
+# define R200_TXA_ARG_C_DIFFUSE_BLUE (5 << 10)
+# define R200_TXA_ARG_C_SPECULAR_ALPHA (6 << 10)
+# define R200_TXA_ARG_C_SPECULAR_BLUE (7 << 10)
+# define R200_TXA_ARG_C_TFACTOR_ALPHA (8 << 10)
+# define R200_TXA_ARG_C_TFACTOR_BLUE (9 << 10)
+# define R200_TXA_ARG_C_R0_ALPHA (10 << 10)
+# define R200_TXA_ARG_C_R0_BLUE (11 << 10)
+# define R200_TXA_ARG_C_R1_ALPHA (12 << 10)
+# define R200_TXA_ARG_C_R1_BLUE (13 << 10)
+# define R200_TXA_ARG_C_R2_ALPHA (14 << 10)
+# define R200_TXA_ARG_C_R2_BLUE (15 << 10)
+# define R200_TXA_ARG_C_R3_ALPHA (16 << 10)
+# define R200_TXA_ARG_C_R3_BLUE (17 << 10)
+# define R200_TXA_ARG_C_R4_ALPHA (18 << 10)
+# define R200_TXA_ARG_C_R4_BLUE (19 << 10)
+# define R200_TXA_ARG_C_R5_ALPHA (20 << 10)
+# define R200_TXA_ARG_C_R5_BLUE (21 << 10)
+# define R200_TXA_ARG_C_TFACTOR1_ALPHA (26 << 10)
+# define R200_TXA_ARG_C_TFACTOR1_BLUE (27 << 10)
+# define R200_TXA_ARG_C_MASK (31 << 10)
+# define R200_TXA_ARG_C_SHIFT 10
+# define R200_TXA_COMP_ARG_A (1 << 16)
+# define R200_TXA_COMP_ARG_A_SHIFT (16)
+# define R200_TXA_BIAS_ARG_A (1 << 17)
+# define R200_TXA_SCALE_ARG_A (1 << 18)
+# define R200_TXA_NEG_ARG_A (1 << 19)
+# define R200_TXA_COMP_ARG_B (1 << 20)
+# define R200_TXA_COMP_ARG_B_SHIFT (20)
+# define R200_TXA_BIAS_ARG_B (1 << 21)
+# define R200_TXA_SCALE_ARG_B (1 << 22)
+# define R200_TXA_NEG_ARG_B (1 << 23)
+# define R200_TXA_COMP_ARG_C (1 << 24)
+# define R200_TXA_COMP_ARG_C_SHIFT (24)
+# define R200_TXA_BIAS_ARG_C (1 << 25)
+# define R200_TXA_SCALE_ARG_C (1 << 26)
+# define R200_TXA_NEG_ARG_C (1 << 27)
+# define R200_TXA_OP_MADD (0 << 28)
+# define R200_TXA_OP_CND0 (2 << 28)
+# define R200_TXA_OP_LERP (3 << 28)
+# define R200_TXA_OP_CONDITIONAL (6 << 28)
+# define R200_TXA_OP_MASK (7 << 28)
+#define R200_PP_TXABLEND2_0 0x2f0c
+# define R200_TXA_TFACTOR_SEL_SHIFT 0
+# define R200_TXA_TFACTOR_SEL_MASK 0x7
+# define R200_TXA_TFACTOR1_SEL_SHIFT 4
+# define R200_TXA_TFACTOR1_SEL_MASK (0x7 << 4)
+# define R200_TXA_SCALE_SHIFT 8
+# define R200_TXA_SCALE_MASK (7 << 8)
+# define R200_TXA_SCALE_1X (0 << 8)
+# define R200_TXA_SCALE_2X (1 << 8)
+# define R200_TXA_SCALE_4X (2 << 8)
+# define R200_TXA_SCALE_8X (3 << 8)
+# define R200_TXA_SCALE_INV2 (5 << 8)
+# define R200_TXA_SCALE_INV4 (6 << 8)
+# define R200_TXA_SCALE_INV8 (7 << 8)
+# define R200_TXA_CLAMP_SHIFT 12
+# define R200_TXA_CLAMP_MASK (3 << 12)
+# define R200_TXA_CLAMP_WRAP (0 << 12)
+# define R200_TXA_CLAMP_0_1 (1 << 12)
+# define R200_TXA_CLAMP_8_8 (2 << 12)
+# define R200_TXA_OUTPUT_REG_MASK (7 << 16)
+# define R200_TXA_OUTPUT_REG_NONE (0 << 16)
+# define R200_TXA_OUTPUT_REG_R0 (1 << 16)
+# define R200_TXA_OUTPUT_REG_R1 (2 << 16)
+# define R200_TXA_OUTPUT_REG_R2 (3 << 16)
+# define R200_TXA_OUTPUT_REG_R3 (4 << 16)
+# define R200_TXA_OUTPUT_REG_R4 (5 << 16)
+# define R200_TXA_OUTPUT_REG_R5 (6 << 16)
+# define R200_TXA_DOT_ALPHA (1 << 20)
+# define R200_TXA_REPL_NORMAL 0
+# define R200_TXA_REPL_RED 1
+# define R200_TXA_REPL_GREEN 2
+# define R200_TXA_REPL_ARG_A_SHIFT 26
+# define R200_TXA_REPL_ARG_A_MASK (3 << 26)
+# define R200_TXA_REPL_ARG_B_SHIFT 28
+# define R200_TXA_REPL_ARG_B_MASK (3 << 28)
+# define R200_TXA_REPL_ARG_C_SHIFT 30
+# define R200_TXA_REPL_ARG_C_MASK (3 << 30)
+
+#define R200_SE_VTX_FMT_0 0x2088
+# define R200_VTX_XY 0 /* always have xy */
+# define R200_VTX_Z0 (1<<0)
+# define R200_VTX_W0 (1<<1)
+# define R200_VTX_WEIGHT_COUNT_SHIFT (2)
+# define R200_VTX_PV_MATRIX_SEL (1<<5)
+# define R200_VTX_N0 (1<<6)
+# define R200_VTX_POINT_SIZE (1<<7)
+# define R200_VTX_DISCRETE_FOG (1<<8)
+# define R200_VTX_SHININESS_0 (1<<9)
+# define R200_VTX_SHININESS_1 (1<<10)
+# define R200_VTX_COLOR_NOT_PRESENT 0
+# define R200_VTX_PK_RGBA 1
+# define R200_VTX_FP_RGB 2
+# define R200_VTX_FP_RGBA 3
+# define R200_VTX_COLOR_MASK 3
+# define R200_VTX_COLOR_0_SHIFT 11
+# define R200_VTX_COLOR_1_SHIFT 13
+# define R200_VTX_COLOR_2_SHIFT 15
+# define R200_VTX_COLOR_3_SHIFT 17
+# define R200_VTX_COLOR_4_SHIFT 19
+# define R200_VTX_COLOR_5_SHIFT 21
+# define R200_VTX_COLOR_6_SHIFT 23
+# define R200_VTX_COLOR_7_SHIFT 25
+# define R200_VTX_XY1 (1<<28)
+# define R200_VTX_Z1 (1<<29)
+# define R200_VTX_W1 (1<<30)
+# define R200_VTX_N1 (1<<31)
+#define R200_SE_VTX_FMT_1 0x208c
+# define R200_VTX_TEX0_COMP_CNT_SHIFT 0
+# define R200_VTX_TEX1_COMP_CNT_SHIFT 3
+# define R200_VTX_TEX2_COMP_CNT_SHIFT 6
+# define R200_VTX_TEX3_COMP_CNT_SHIFT 9
+# define R200_VTX_TEX4_COMP_CNT_SHIFT 12
+# define R200_VTX_TEX5_COMP_CNT_SHIFT 15
+
+#define R200_SE_TCL_OUTPUT_VTX_FMT_0 0x2090
+#define R200_SE_TCL_OUTPUT_VTX_FMT_1 0x2094
+#define R200_SE_TCL_OUTPUT_VTX_COMP_SEL 0x2250
+# define R200_OUTPUT_XYZW (1<<0)
+# define R200_OUTPUT_COLOR_0 (1<<8)
+# define R200_OUTPUT_COLOR_1 (1<<9)
+# define R200_OUTPUT_TEX_0 (1<<16)
+# define R200_OUTPUT_TEX_1 (1<<17)
+# define R200_OUTPUT_TEX_2 (1<<18)
+# define R200_OUTPUT_TEX_3 (1<<19)
+# define R200_OUTPUT_TEX_4 (1<<20)
+# define R200_OUTPUT_TEX_5 (1<<21)
+# define R200_OUTPUT_TEX_MASK (0x3f<<16)
+# define R200_OUTPUT_DISCRETE_FOG (1<<24)
+# define R200_OUTPUT_PT_SIZE (1<<25)
+# define R200_FORCE_INORDER_PROC (1<<31)
+#define R200_PP_CNTL_X 0x2cc4
+#define R200_PP_TXMULTI_CTL_0 0x2c1c
+#define R200_SE_VTX_STATE_CNTL 0x2180
+# define R200_UPDATE_USER_COLOR_0_ENA_MASK (1<<16)
+
+ /* Registers for CP and Microcode Engine */
+#define RADEON_CP_ME_RAM_ADDR 0x07d4
+#define RADEON_CP_ME_RAM_RADDR 0x07d8
+#define RADEON_CP_ME_RAM_DATAH 0x07dc
+#define RADEON_CP_ME_RAM_DATAL 0x07e0
+
+#define RADEON_CP_RB_BASE 0x0700
+#define RADEON_CP_RB_CNTL 0x0704
+# define RADEON_RB_BUFSZ_SHIFT 0
+# define RADEON_RB_BUFSZ_MASK (0x3f << 0)
+# define RADEON_RB_BLKSZ_SHIFT 8
+# define RADEON_RB_BLKSZ_MASK (0x3f << 8)
+# define RADEON_MAX_FETCH_SHIFT 18
+# define RADEON_MAX_FETCH_MASK (0x3 << 18)
+# define RADEON_RB_NO_UPDATE (1 << 27)
+# define RADEON_RB_RPTR_WR_ENA (1 << 31)
+#define RADEON_CP_RB_RPTR_ADDR 0x070c
+#define RADEON_CP_RB_RPTR 0x0710
+#define RADEON_CP_RB_WPTR 0x0714
+#define RADEON_CP_RB_RPTR_WR 0x071c
+
+#define RADEON_CP_IB_BASE 0x0738
+#define RADEON_CP_IB_BUFSZ 0x073c
+
+#define RADEON_CP_CSQ_CNTL 0x0740
+# define RADEON_CSQ_CNT_PRIMARY_MASK (0xff << 0)
+# define RADEON_CSQ_PRIDIS_INDDIS (0 << 28)
+# define RADEON_CSQ_PRIPIO_INDDIS (1 << 28)
+# define RADEON_CSQ_PRIBM_INDDIS (2 << 28)
+# define RADEON_CSQ_PRIPIO_INDBM (3 << 28)
+# define RADEON_CSQ_PRIBM_INDBM (4 << 28)
+# define RADEON_CSQ_PRIPIO_INDPIO (15 << 28)
+
+#define R300_CP_RESYNC_ADDR 0x778
+#define R300_CP_RESYNC_DATA 0x77c
+
+#define RADEON_CP_CSQ_STAT 0x07f8
+# define RADEON_CSQ_RPTR_PRIMARY_MASK (0xff << 0)
+# define RADEON_CSQ_WPTR_PRIMARY_MASK (0xff << 8)
+# define RADEON_CSQ_RPTR_INDIRECT_MASK (0xff << 16)
+# define RADEON_CSQ_WPTR_INDIRECT_MASK (0xff << 24)
+#define RADEON_CP_CSQ2_STAT 0x07fc
+#define RADEON_CP_CSQ_ADDR 0x07f0
+#define RADEON_CP_CSQ_DATA 0x07f4
+#define RADEON_CP_CSQ_APER_PRIMARY 0x1000
+#define RADEON_CP_CSQ_APER_INDIRECT 0x1300
+
+#define RADEON_CP_RB_WPTR_DELAY 0x0718
+# define RADEON_PRE_WRITE_TIMER_SHIFT 0
+# define RADEON_PRE_WRITE_LIMIT_SHIFT 23
+#define RADEON_CP_CSQ_MODE 0x0744
+# define RADEON_INDIRECT2_START_SHIFT 0
+# define RADEON_INDIRECT2_START_MASK (0x7f << 0)
+# define RADEON_INDIRECT1_START_SHIFT 8
+# define RADEON_INDIRECT1_START_MASK (0x7f << 8)
+
+#define RADEON_AIC_CNTL 0x01d0
+# define RADEON_PCIGART_TRANSLATE_EN (1 << 0)
+# define RADEON_DIS_OUT_OF_PCI_GART_ACCESS (1 << 1)
+#define RADEON_AIC_LO_ADDR 0x01dc
+#define RADEON_AIC_PT_BASE 0x01d8
+#define RADEON_AIC_HI_ADDR 0x01e0
+
+
+
+ /* Constants */
+/* #define RADEON_LAST_FRAME_REG RADEON_GUI_SCRATCH_REG0 */
+/* efine RADEON_LAST_CLEAR_REG RADEON_GUI_SCRATCH_REG2 */
+
+
+
+ /* CP packet types */
+#define RADEON_CP_PACKET0 0x00000000
+#define RADEON_CP_PACKET1 0x40000000
+#define RADEON_CP_PACKET2 0x80000000
+#define RADEON_CP_PACKET3 0xC0000000
+# define RADEON_CP_PACKET_MASK 0xC0000000
+# define RADEON_CP_PACKET_COUNT_MASK 0x3fff0000
+# define RADEON_CP_PACKET_MAX_DWORDS (1 << 12)
+# define RADEON_CP_PACKET0_REG_MASK 0x000007ff
+# define R300_CP_PACKET0_REG_MASK 0x00001fff
+# define RADEON_CP_PACKET1_REG0_MASK 0x000007ff
+# define RADEON_CP_PACKET1_REG1_MASK 0x003ff800
+
+#define RADEON_CP_PACKET0_ONE_REG_WR 0x00008000
+
+#define RADEON_CP_PACKET3_NOP 0xC0001000
+#define RADEON_CP_PACKET3_NEXT_CHAR 0xC0001900
+#define RADEON_CP_PACKET3_PLY_NEXTSCAN 0xC0001D00
+#define RADEON_CP_PACKET3_SET_SCISSORS 0xC0001E00
+#define RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM 0xC0002300
+#define RADEON_CP_PACKET3_LOAD_MICROCODE 0xC0002400
+#define RADEON_CP_PACKET3_WAIT_FOR_IDLE 0xC0002600
+#define RADEON_CP_PACKET3_3D_DRAW_VBUF 0xC0002800
+#define RADEON_CP_PACKET3_3D_DRAW_IMMD 0xC0002900
+#define RADEON_CP_PACKET3_3D_DRAW_INDX 0xC0002A00
+#define RADEON_CP_PACKET3_LOAD_PALETTE 0xC0002C00
+#define R200_CP_PACKET3_3D_DRAW_IMMD_2 0xc0003500
+#define RADEON_CP_PACKET3_3D_LOAD_VBPNTR 0xC0002F00
+#define RADEON_CP_PACKET3_CNTL_PAINT 0xC0009100
+#define RADEON_CP_PACKET3_CNTL_BITBLT 0xC0009200
+#define RADEON_CP_PACKET3_CNTL_SMALLTEXT 0xC0009300
+#define RADEON_CP_PACKET3_CNTL_HOSTDATA_BLT 0xC0009400
+#define RADEON_CP_PACKET3_CNTL_POLYLINE 0xC0009500
+#define RADEON_CP_PACKET3_CNTL_POLYSCANLINES 0xC0009800
+#define RADEON_CP_PACKET3_CNTL_PAINT_MULTI 0xC0009A00
+#define RADEON_CP_PACKET3_CNTL_BITBLT_MULTI 0xC0009B00
+#define RADEON_CP_PACKET3_CNTL_TRANS_BITBLT 0xC0009C00
+
+
+#define RADEON_CP_VC_FRMT_XY 0x00000000
+#define RADEON_CP_VC_FRMT_W0 0x00000001
+#define RADEON_CP_VC_FRMT_FPCOLOR 0x00000002
+#define RADEON_CP_VC_FRMT_FPALPHA 0x00000004
+#define RADEON_CP_VC_FRMT_PKCOLOR 0x00000008
+#define RADEON_CP_VC_FRMT_FPSPEC 0x00000010
+#define RADEON_CP_VC_FRMT_FPFOG 0x00000020
+#define RADEON_CP_VC_FRMT_PKSPEC 0x00000040
+#define RADEON_CP_VC_FRMT_ST0 0x00000080
+#define RADEON_CP_VC_FRMT_ST1 0x00000100
+#define RADEON_CP_VC_FRMT_Q1 0x00000200
+#define RADEON_CP_VC_FRMT_ST2 0x00000400
+#define RADEON_CP_VC_FRMT_Q2 0x00000800
+#define RADEON_CP_VC_FRMT_ST3 0x00001000
+#define RADEON_CP_VC_FRMT_Q3 0x00002000
+#define RADEON_CP_VC_FRMT_Q0 0x00004000
+#define RADEON_CP_VC_FRMT_BLND_WEIGHT_CNT_MASK 0x00038000
+#define RADEON_CP_VC_FRMT_N0 0x00040000
+#define RADEON_CP_VC_FRMT_XY1 0x08000000
+#define RADEON_CP_VC_FRMT_Z1 0x10000000
+#define RADEON_CP_VC_FRMT_W1 0x20000000
+#define RADEON_CP_VC_FRMT_N1 0x40000000
+#define RADEON_CP_VC_FRMT_Z 0x80000000
+
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_NONE 0x00000000
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_POINT 0x00000001
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_LINE 0x00000002
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_LINE_STRIP 0x00000003
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_LIST 0x00000004
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_FAN 0x00000005
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_STRIP 0x00000006
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_TYPE_2 0x00000007
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_RECT_LIST 0x00000008
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_3VRT_POINT_LIST 0x00000009
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_3VRT_LINE_LIST 0x0000000a
+#define RADEON_CP_VC_CNTL_PRIM_WALK_IND 0x00000010
+#define RADEON_CP_VC_CNTL_PRIM_WALK_LIST 0x00000020
+#define RADEON_CP_VC_CNTL_PRIM_WALK_RING 0x00000030
+#define RADEON_CP_VC_CNTL_COLOR_ORDER_BGRA 0x00000000
+#define RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA 0x00000040
+#define RADEON_CP_VC_CNTL_MAOS_ENABLE 0x00000080
+#define RADEON_CP_VC_CNTL_VTX_FMT_NON_RADEON_MODE 0x00000000
+#define RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE 0x00000100
+#define RADEON_CP_VC_CNTL_TCL_DISABLE 0x00000000
+#define RADEON_CP_VC_CNTL_TCL_ENABLE 0x00000200
+#define RADEON_CP_VC_CNTL_NUM_SHIFT 16
+
+#define RADEON_VS_MATRIX_0_ADDR 0
+#define RADEON_VS_MATRIX_1_ADDR 4
+#define RADEON_VS_MATRIX_2_ADDR 8
+#define RADEON_VS_MATRIX_3_ADDR 12
+#define RADEON_VS_MATRIX_4_ADDR 16
+#define RADEON_VS_MATRIX_5_ADDR 20
+#define RADEON_VS_MATRIX_6_ADDR 24
+#define RADEON_VS_MATRIX_7_ADDR 28
+#define RADEON_VS_MATRIX_8_ADDR 32
+#define RADEON_VS_MATRIX_9_ADDR 36
+#define RADEON_VS_MATRIX_10_ADDR 40
+#define RADEON_VS_MATRIX_11_ADDR 44
+#define RADEON_VS_MATRIX_12_ADDR 48
+#define RADEON_VS_MATRIX_13_ADDR 52
+#define RADEON_VS_MATRIX_14_ADDR 56
+#define RADEON_VS_MATRIX_15_ADDR 60
+#define RADEON_VS_LIGHT_AMBIENT_ADDR 64
+#define RADEON_VS_LIGHT_DIFFUSE_ADDR 72
+#define RADEON_VS_LIGHT_SPECULAR_ADDR 80
+#define RADEON_VS_LIGHT_DIRPOS_ADDR 88
+#define RADEON_VS_LIGHT_HWVSPOT_ADDR 96
+#define RADEON_VS_LIGHT_ATTENUATION_ADDR 104
+#define RADEON_VS_MATRIX_EYE2CLIP_ADDR 112
+#define RADEON_VS_UCP_ADDR 116
+#define RADEON_VS_GLOBAL_AMBIENT_ADDR 122
+#define RADEON_VS_FOG_PARAM_ADDR 123
+#define RADEON_VS_EYE_VECTOR_ADDR 124
+
+#define RADEON_SS_LIGHT_DCD_ADDR 0
+#define RADEON_SS_LIGHT_SPOT_EXPONENT_ADDR 8
+#define RADEON_SS_LIGHT_SPOT_CUTOFF_ADDR 16
+#define RADEON_SS_LIGHT_SPECULAR_THRESH_ADDR 24
+#define RADEON_SS_LIGHT_RANGE_CUTOFF_ADDR 32
+#define RADEON_SS_VERT_GUARD_CLIP_ADJ_ADDR 48
+#define RADEON_SS_VERT_GUARD_DISCARD_ADJ_ADDR 49
+#define RADEON_SS_HORZ_GUARD_CLIP_ADJ_ADDR 50
+#define RADEON_SS_HORZ_GUARD_DISCARD_ADJ_ADDR 51
+#define RADEON_SS_SHININESS 60
+
+#define RADEON_TV_MASTER_CNTL 0x0800
+# define RADEON_TV_ASYNC_RST (1 << 0)
+# define RADEON_CRT_ASYNC_RST (1 << 1)
+# define RADEON_RESTART_PHASE_FIX (1 << 3)
+# define RADEON_TV_FIFO_ASYNC_RST (1 << 4)
+# define RADEON_VIN_ASYNC_RST (1 << 5)
+# define RADEON_AUD_ASYNC_RST (1 << 6)
+# define RADEON_DVS_ASYNC_RST (1 << 7)
+# define RADEON_CRT_FIFO_CE_EN (1 << 9)
+# define RADEON_TV_FIFO_CE_EN (1 << 10)
+# define RADEON_RE_SYNC_NOW_SEL_MASK (3 << 14)
+# define RADEON_TVCLK_ALWAYS_ONb (1 << 30)
+# define RADEON_TV_ON (1 << 31)
+#define RADEON_TV_PRE_DAC_MUX_CNTL 0x0888
+# define RADEON_Y_RED_EN (1 << 0)
+# define RADEON_C_GRN_EN (1 << 1)
+# define RADEON_CMP_BLU_EN (1 << 2)
+# define RADEON_DAC_DITHER_EN (1 << 3)
+# define RADEON_RED_MX_FORCE_DAC_DATA (6 << 4)
+# define RADEON_GRN_MX_FORCE_DAC_DATA (6 << 8)
+# define RADEON_BLU_MX_FORCE_DAC_DATA (6 << 12)
+# define RADEON_TV_FORCE_DAC_DATA_SHIFT 16
+#define RADEON_TV_RGB_CNTL 0x0804
+# define RADEON_SWITCH_TO_BLUE (1 << 4)
+# define RADEON_RGB_DITHER_EN (1 << 5)
+# define RADEON_RGB_SRC_SEL_MASK (3 << 8)
+# define RADEON_RGB_SRC_SEL_CRTC1 (0 << 8)
+# define RADEON_RGB_SRC_SEL_RMX (1 << 8)
+# define RADEON_RGB_SRC_SEL_CRTC2 (2 << 8)
+# define RADEON_RGB_CONVERT_BY_PASS (1 << 10)
+# define RADEON_UVRAM_READ_MARGIN_SHIFT 16
+# define RADEON_FIFORAM_FFMACRO_READ_MARGIN_SHIFT 20
+# define RADEON_TVOUT_SCALE_EN (1 << 26)
+#define RADEON_TV_SYNC_CNTL 0x0808
+# define RADEON_SYNC_OE (1 << 0)
+# define RADEON_SYNC_OUT (1 << 1)
+# define RADEON_SYNC_IN (1 << 2)
+# define RADEON_SYNC_PUB (1 << 3)
+# define RADEON_SYNC_PD (1 << 4)
+# define RADEON_TV_SYNC_IO_DRIVE (1 << 5)
+#define RADEON_TV_HTOTAL 0x080c
+#define RADEON_TV_HDISP 0x0810
+#define RADEON_TV_HSTART 0x0818
+#define RADEON_TV_HCOUNT 0x081C
+#define RADEON_TV_VTOTAL 0x0820
+#define RADEON_TV_VDISP 0x0824
+#define RADEON_TV_VCOUNT 0x0828
+#define RADEON_TV_FTOTAL 0x082c
+#define RADEON_TV_FCOUNT 0x0830
+#define RADEON_TV_FRESTART 0x0834
+#define RADEON_TV_HRESTART 0x0838
+#define RADEON_TV_VRESTART 0x083c
+#define RADEON_TV_HOST_READ_DATA 0x0840
+#define RADEON_TV_HOST_WRITE_DATA 0x0844
+#define RADEON_TV_HOST_RD_WT_CNTL 0x0848
+# define RADEON_HOST_FIFO_RD (1 << 12)
+# define RADEON_HOST_FIFO_RD_ACK (1 << 13)
+# define RADEON_HOST_FIFO_WT (1 << 14)
+# define RADEON_HOST_FIFO_WT_ACK (1 << 15)
+#define RADEON_TV_VSCALER_CNTL1 0x084c
+# define RADEON_UV_INC_MASK 0xffff
+# define RADEON_UV_INC_SHIFT 0
+# define RADEON_Y_W_EN (1 << 24)
+# define RADEON_RESTART_FIELD (1 << 29) /* restart on field 0 */
+# define RADEON_Y_DEL_W_SIG_SHIFT 26
+#define RADEON_TV_TIMING_CNTL 0x0850
+# define RADEON_H_INC_MASK 0xfff
+# define RADEON_H_INC_SHIFT 0
+# define RADEON_REQ_Y_FIRST (1 << 19)
+# define RADEON_FORCE_BURST_ALWAYS (1 << 21)
+# define RADEON_UV_POST_SCALE_BYPASS (1 << 23)
+# define RADEON_UV_OUTPUT_POST_SCALE_SHIFT 24
+#define RADEON_TV_VSCALER_CNTL2 0x0854
+# define RADEON_DITHER_MODE (1 << 0)
+# define RADEON_Y_OUTPUT_DITHER_EN (1 << 1)
+# define RADEON_UV_OUTPUT_DITHER_EN (1 << 2)
+# define RADEON_UV_TO_BUF_DITHER_EN (1 << 3)
+#define RADEON_TV_Y_FALL_CNTL 0x0858
+# define RADEON_Y_FALL_PING_PONG (1 << 16)
+# define RADEON_Y_COEF_EN (1 << 17)
+#define RADEON_TV_Y_RISE_CNTL 0x085c
+# define RADEON_Y_RISE_PING_PONG (1 << 16)
+#define RADEON_TV_Y_SAW_TOOTH_CNTL 0x0860
+#define RADEON_TV_UPSAMP_AND_GAIN_CNTL 0x0864
+# define RADEON_YUPSAMP_EN (1 << 0)
+# define RADEON_UVUPSAMP_EN (1 << 2)
+#define RADEON_TV_GAIN_LIMIT_SETTINGS 0x0868
+# define RADEON_Y_GAIN_LIMIT_SHIFT 0
+# define RADEON_UV_GAIN_LIMIT_SHIFT 16
+#define RADEON_TV_LINEAR_GAIN_SETTINGS 0x086c
+# define RADEON_Y_GAIN_SHIFT 0
+# define RADEON_UV_GAIN_SHIFT 16
+#define RADEON_TV_MODULATOR_CNTL1 0x0870
+# define RADEON_YFLT_EN (1 << 2)
+# define RADEON_UVFLT_EN (1 << 3)
+# define RADEON_ALT_PHASE_EN (1 << 6)
+# define RADEON_SYNC_TIP_LEVEL (1 << 7)
+# define RADEON_BLANK_LEVEL_SHIFT 8
+# define RADEON_SET_UP_LEVEL_SHIFT 16
+# define RADEON_SLEW_RATE_LIMIT (1 << 23)
+# define RADEON_CY_FILT_BLEND_SHIFT 28
+#define RADEON_TV_MODULATOR_CNTL2 0x0874
+# define RADEON_TV_U_BURST_LEVEL_MASK 0x1ff
+# define RADEON_TV_V_BURST_LEVEL_MASK 0x1ff
+# define RADEON_TV_V_BURST_LEVEL_SHIFT 16
+#define RADEON_TV_CRC_CNTL 0x0890
+#define RADEON_TV_UV_ADR 0x08ac
+# define RADEON_MAX_UV_ADR_MASK 0x000000ff
+# define RADEON_MAX_UV_ADR_SHIFT 0
+# define RADEON_TABLE1_BOT_ADR_MASK 0x0000ff00
+# define RADEON_TABLE1_BOT_ADR_SHIFT 8
+# define RADEON_TABLE3_TOP_ADR_MASK 0x00ff0000
+# define RADEON_TABLE3_TOP_ADR_SHIFT 16
+# define RADEON_HCODE_TABLE_SEL_MASK 0x06000000
+# define RADEON_HCODE_TABLE_SEL_SHIFT 25
+# define RADEON_VCODE_TABLE_SEL_MASK 0x18000000
+# define RADEON_VCODE_TABLE_SEL_SHIFT 27
+# define RADEON_TV_MAX_FIFO_ADDR 0x1a7
+# define RADEON_TV_MAX_FIFO_ADDR_INTERNAL 0x1ff
+#define RADEON_TV_PLL_FINE_CNTL 0x0020 /* PLL */
+#define RADEON_TV_PLL_CNTL 0x0021 /* PLL */
+# define RADEON_TV_M0LO_MASK 0xff
+# define RADEON_TV_M0HI_MASK 0x7
+# define RADEON_TV_M0HI_SHIFT 18
+# define RADEON_TV_N0LO_MASK 0x1ff
+# define RADEON_TV_N0LO_SHIFT 8
+# define RADEON_TV_N0HI_MASK 0x3
+# define RADEON_TV_N0HI_SHIFT 21
+# define RADEON_TV_P_MASK 0xf
+# define RADEON_TV_P_SHIFT 24
+# define RADEON_TV_SLIP_EN (1 << 23)
+# define RADEON_TV_DTO_EN (1 << 28)
+#define RADEON_TV_PLL_CNTL1 0x0022 /* PLL */
+# define RADEON_TVPLL_RESET (1 << 1)
+# define RADEON_TVPLL_SLEEP (1 << 3)
+# define RADEON_TVPLL_REFCLK_SEL (1 << 4)
+# define RADEON_TVPCP_SHIFT 8
+# define RADEON_TVPCP_MASK (7 << 8)
+# define RADEON_TVPVG_SHIFT 11
+# define RADEON_TVPVG_MASK (7 << 11)
+# define RADEON_TVPDC_SHIFT 14
+# define RADEON_TVPDC_MASK (3 << 14)
+# define RADEON_TVPLL_TEST_DIS (1 << 31)
+# define RADEON_TVCLK_SRC_SEL_TVPLL (1 << 30)
+
+#define RS400_DISP2_REQ_CNTL1 0xe30
+# define RS400_DISP2_START_REQ_LEVEL_SHIFT 0
+# define RS400_DISP2_START_REQ_LEVEL_MASK 0x3ff
+# define RS400_DISP2_STOP_REQ_LEVEL_SHIFT 12
+# define RS400_DISP2_STOP_REQ_LEVEL_MASK 0x3ff
+# define RS400_DISP2_ALLOW_FID_LEVEL_SHIFT 22
+# define RS400_DISP2_ALLOW_FID_LEVEL_MASK 0x3ff
+#define RS400_DISP2_REQ_CNTL2 0xe34
+# define RS400_DISP2_CRITICAL_POINT_START_SHIFT 12
+# define RS400_DISP2_CRITICAL_POINT_START_MASK 0x3ff
+# define RS400_DISP2_CRITICAL_POINT_STOP_SHIFT 22
+# define RS400_DISP2_CRITICAL_POINT_STOP_MASK 0x3ff
+#define RS400_DMIF_MEM_CNTL1 0xe38
+# define RS400_DISP2_START_ADR_SHIFT 0
+# define RS400_DISP2_START_ADR_MASK 0x3ff
+# define RS400_DISP1_CRITICAL_POINT_START_SHIFT 12
+# define RS400_DISP1_CRITICAL_POINT_START_MASK 0x3ff
+# define RS400_DISP1_CRITICAL_POINT_STOP_SHIFT 22
+# define RS400_DISP1_CRITICAL_POINT_STOP_MASK 0x3ff
+#define RS400_DISP1_REQ_CNTL1 0xe3c
+# define RS400_DISP1_START_REQ_LEVEL_SHIFT 0
+# define RS400_DISP1_START_REQ_LEVEL_MASK 0x3ff
+# define RS400_DISP1_STOP_REQ_LEVEL_SHIFT 12
+# define RS400_DISP1_STOP_REQ_LEVEL_MASK 0x3ff
+# define RS400_DISP1_ALLOW_FID_LEVEL_SHIFT 22
+# define RS400_DISP1_ALLOW_FID_LEVEL_MASK 0x3ff
+
+#define RADEON_PCIE_INDEX 0x0030
+#define RADEON_PCIE_DATA 0x0034
+#define RADEON_PCIE_TX_GART_CNTL 0x10
+# define RADEON_PCIE_TX_GART_EN (1 << 0)
+# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_PASS_THRU (0 << 1)
+# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_CLAMP_LO (1 << 1)
+# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD (3 << 1)
+# define RADEON_PCIE_TX_GART_MODE_32_128_CACHE (0 << 3)
+# define RADEON_PCIE_TX_GART_MODE_8_4_128_CACHE (1 << 3)
+# define RADEON_PCIE_TX_GART_CHK_RW_VALID_EN (1 << 5)
+# define RADEON_PCIE_TX_GART_INVALIDATE_TLB (1 << 8)
+#define RADEON_PCIE_TX_DISCARD_RD_ADDR_LO 0x11
+#define RADEON_PCIE_TX_DISCARD_RD_ADDR_HI 0x12
+#define RADEON_PCIE_TX_GART_BASE 0x13
+#define RADEON_PCIE_TX_GART_START_LO 0x14
+#define RADEON_PCIE_TX_GART_START_HI 0x15
+#define RADEON_PCIE_TX_GART_END_LO 0x16
+#define RADEON_PCIE_TX_GART_END_HI 0x17
+#define RADEON_PCIE_TX_GART_ERROR 0x18
+
+#define RADEON_SCRATCH_REG0 0x15e0
+#define RADEON_SCRATCH_REG1 0x15e4
+#define RADEON_SCRATCH_REG2 0x15e8
+#define RADEON_SCRATCH_REG3 0x15ec
+#define RADEON_SCRATCH_REG4 0x15f0
+#define RADEON_SCRATCH_REG5 0x15f4
+
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
new file mode 100644
index 00000000000..a853261d188
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -0,0 +1,485 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include <linux/seq_file.h>
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "atom.h"
+
+int radeon_debugfs_ib_init(struct radeon_device *rdev);
+
+/*
+ * IB.
+ */
+int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
+{
+ struct radeon_fence *fence;
+ struct radeon_ib *nib;
+ unsigned long i;
+ int r = 0;
+
+ *ib = NULL;
+ r = radeon_fence_create(rdev, &fence);
+ if (r) {
+ DRM_ERROR("failed to create fence for new IB\n");
+ return r;
+ }
+ mutex_lock(&rdev->ib_pool.mutex);
+ i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
+ if (i < RADEON_IB_POOL_SIZE) {
+ set_bit(i, rdev->ib_pool.alloc_bm);
+ rdev->ib_pool.ibs[i].length_dw = 0;
+ *ib = &rdev->ib_pool.ibs[i];
+ goto out;
+ }
+ if (list_empty(&rdev->ib_pool.scheduled_ibs)) {
+ /* we go do nothings here */
+ DRM_ERROR("all IB allocated none scheduled.\n");
+ r = -EINVAL;
+ goto out;
+ }
+ /* get the first ib on the scheduled list */
+ nib = list_entry(rdev->ib_pool.scheduled_ibs.next,
+ struct radeon_ib, list);
+ if (nib->fence == NULL) {
+ /* we go do nothings here */
+ DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx);
+ r = -EINVAL;
+ goto out;
+ }
+ r = radeon_fence_wait(nib->fence, false);
+ if (r) {
+ DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx,
+ (unsigned long)nib->gpu_addr, nib->length_dw);
+ DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n");
+ goto out;
+ }
+ radeon_fence_unref(&nib->fence);
+ nib->length_dw = 0;
+ list_del(&nib->list);
+ INIT_LIST_HEAD(&nib->list);
+ *ib = nib;
+out:
+ mutex_unlock(&rdev->ib_pool.mutex);
+ if (r) {
+ radeon_fence_unref(&fence);
+ } else {
+ (*ib)->fence = fence;
+ }
+ return r;
+}
+
+void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
+{
+ struct radeon_ib *tmp = *ib;
+
+ *ib = NULL;
+ if (tmp == NULL) {
+ return;
+ }
+ mutex_lock(&rdev->ib_pool.mutex);
+ if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) {
+ /* IB is scheduled & not signaled don't do anythings */
+ mutex_unlock(&rdev->ib_pool.mutex);
+ return;
+ }
+ list_del(&tmp->list);
+ INIT_LIST_HEAD(&tmp->list);
+ if (tmp->fence) {
+ radeon_fence_unref(&tmp->fence);
+ }
+ tmp->length_dw = 0;
+ clear_bit(tmp->idx, rdev->ib_pool.alloc_bm);
+ mutex_unlock(&rdev->ib_pool.mutex);
+}
+
+static void radeon_ib_align(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ while ((ib->length_dw & rdev->cp.align_mask)) {
+ ib->ptr[ib->length_dw++] = PACKET2(0);
+ }
+}
+
+static void radeon_ib_cpu_flush(struct radeon_device *rdev,
+ struct radeon_ib *ib)
+{
+ unsigned long tmp;
+ unsigned i;
+
+ /* To force CPU cache flush ugly but seems reliable */
+ for (i = 0; i < ib->length_dw; i += (rdev->cp.align_mask + 1)) {
+ tmp = readl(&ib->ptr[i]);
+ }
+}
+
+int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ int r = 0;
+
+ mutex_lock(&rdev->ib_pool.mutex);
+ radeon_ib_align(rdev, ib);
+ radeon_ib_cpu_flush(rdev, ib);
+ if (!ib->length_dw || !rdev->cp.ready) {
+ /* TODO: Nothings in the ib we should report. */
+ mutex_unlock(&rdev->ib_pool.mutex);
+ DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx);
+ return -EINVAL;
+ }
+ /* 64 dwords should be enought for fence too */
+ r = radeon_ring_lock(rdev, 64);
+ if (r) {
+ DRM_ERROR("radeon: scheduling IB failled (%d).\n", r);
+ mutex_unlock(&rdev->ib_pool.mutex);
+ return r;
+ }
+ radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1));
+ radeon_ring_write(rdev, ib->gpu_addr);
+ radeon_ring_write(rdev, ib->length_dw);
+ radeon_fence_emit(rdev, ib->fence);
+ radeon_ring_unlock_commit(rdev);
+ list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs);
+ mutex_unlock(&rdev->ib_pool.mutex);
+ return 0;
+}
+
+int radeon_ib_pool_init(struct radeon_device *rdev)
+{
+ void *ptr;
+ uint64_t gpu_addr;
+ int i;
+ int r = 0;
+
+ /* Allocate 1M object buffer */
+ INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
+ r = radeon_object_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
+ true, RADEON_GEM_DOMAIN_GTT,
+ false, &rdev->ib_pool.robj);
+ if (r) {
+ DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
+ return r;
+ }
+ r = radeon_object_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
+ if (r) {
+ DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
+ return r;
+ }
+ r = radeon_object_kmap(rdev->ib_pool.robj, &ptr);
+ if (r) {
+ DRM_ERROR("radeon: failed to map ib poll (%d).\n", r);
+ return r;
+ }
+ for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
+ unsigned offset;
+
+ offset = i * 64 * 1024;
+ rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset;
+ rdev->ib_pool.ibs[i].ptr = ptr + offset;
+ rdev->ib_pool.ibs[i].idx = i;
+ rdev->ib_pool.ibs[i].length_dw = 0;
+ INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list);
+ }
+ bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
+ rdev->ib_pool.ready = true;
+ DRM_INFO("radeon: ib pool ready.\n");
+ if (radeon_debugfs_ib_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for IB !\n");
+ }
+ return r;
+}
+
+void radeon_ib_pool_fini(struct radeon_device *rdev)
+{
+ if (!rdev->ib_pool.ready) {
+ return;
+ }
+ mutex_lock(&rdev->ib_pool.mutex);
+ bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
+ if (rdev->ib_pool.robj) {
+ radeon_object_kunmap(rdev->ib_pool.robj);
+ radeon_object_unref(&rdev->ib_pool.robj);
+ rdev->ib_pool.robj = NULL;
+ }
+ mutex_unlock(&rdev->ib_pool.mutex);
+}
+
+int radeon_ib_test(struct radeon_device *rdev)
+{
+ struct radeon_ib *ib;
+ uint32_t scratch;
+ uint32_t tmp = 0;
+ unsigned i;
+ int r;
+
+ r = radeon_scratch_get(rdev, &scratch);
+ if (r) {
+ DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
+ return r;
+ }
+ WREG32(scratch, 0xCAFEDEAD);
+ r = radeon_ib_get(rdev, &ib);
+ if (r) {
+ return r;
+ }
+ ib->ptr[0] = PACKET0(scratch, 0);
+ ib->ptr[1] = 0xDEADBEEF;
+ ib->ptr[2] = PACKET2(0);
+ ib->ptr[3] = PACKET2(0);
+ ib->ptr[4] = PACKET2(0);
+ ib->ptr[5] = PACKET2(0);
+ ib->ptr[6] = PACKET2(0);
+ ib->ptr[7] = PACKET2(0);
+ ib->length_dw = 8;
+ r = radeon_ib_schedule(rdev, ib);
+ if (r) {
+ radeon_scratch_free(rdev, scratch);
+ radeon_ib_free(rdev, &ib);
+ return r;
+ }
+ r = radeon_fence_wait(ib->fence, false);
+ if (r) {
+ return r;
+ }
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = RREG32(scratch);
+ if (tmp == 0xDEADBEEF) {
+ break;
+ }
+ DRM_UDELAY(1);
+ }
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ib test succeeded in %u usecs\n", i);
+ } else {
+ DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
+ scratch, tmp);
+ r = -EINVAL;
+ }
+ radeon_scratch_free(rdev, scratch);
+ radeon_ib_free(rdev, &ib);
+ return r;
+}
+
+
+/*
+ * Ring.
+ */
+void radeon_ring_free_size(struct radeon_device *rdev)
+{
+ rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+ /* This works because ring_size is a power of 2 */
+ rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4));
+ rdev->cp.ring_free_dw -= rdev->cp.wptr;
+ rdev->cp.ring_free_dw &= rdev->cp.ptr_mask;
+ if (!rdev->cp.ring_free_dw) {
+ rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
+ }
+}
+
+int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
+{
+ int r;
+
+ /* Align requested size with padding so unlock_commit can
+ * pad safely */
+ ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
+ mutex_lock(&rdev->cp.mutex);
+ while (ndw > (rdev->cp.ring_free_dw - 1)) {
+ radeon_ring_free_size(rdev);
+ if (ndw < rdev->cp.ring_free_dw) {
+ break;
+ }
+ r = radeon_fence_wait_next(rdev);
+ if (r) {
+ mutex_unlock(&rdev->cp.mutex);
+ return r;
+ }
+ }
+ rdev->cp.count_dw = ndw;
+ rdev->cp.wptr_old = rdev->cp.wptr;
+ return 0;
+}
+
+void radeon_ring_unlock_commit(struct radeon_device *rdev)
+{
+ unsigned count_dw_pad;
+ unsigned i;
+
+ /* We pad to match fetch size */
+ count_dw_pad = (rdev->cp.align_mask + 1) -
+ (rdev->cp.wptr & rdev->cp.align_mask);
+ for (i = 0; i < count_dw_pad; i++) {
+ radeon_ring_write(rdev, PACKET2(0));
+ }
+ DRM_MEMORYBARRIER();
+ WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
+ (void)RREG32(RADEON_CP_RB_WPTR);
+ mutex_unlock(&rdev->cp.mutex);
+}
+
+void radeon_ring_unlock_undo(struct radeon_device *rdev)
+{
+ rdev->cp.wptr = rdev->cp.wptr_old;
+ mutex_unlock(&rdev->cp.mutex);
+}
+
+int radeon_ring_test(struct radeon_device *rdev)
+{
+ uint32_t scratch;
+ uint32_t tmp = 0;
+ unsigned i;
+ int r;
+
+ r = radeon_scratch_get(rdev, &scratch);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
+ return r;
+ }
+ WREG32(scratch, 0xCAFEDEAD);
+ r = radeon_ring_lock(rdev, 2);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ radeon_scratch_free(rdev, scratch);
+ return r;
+ }
+ radeon_ring_write(rdev, PACKET0(scratch, 0));
+ radeon_ring_write(rdev, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev);
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = RREG32(scratch);
+ if (tmp == 0xDEADBEEF) {
+ break;
+ }
+ DRM_UDELAY(1);
+ }
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ring test succeeded in %d usecs\n", i);
+ } else {
+ DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n",
+ scratch, tmp);
+ r = -EINVAL;
+ }
+ radeon_scratch_free(rdev, scratch);
+ return r;
+}
+
+int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
+{
+ int r;
+
+ rdev->cp.ring_size = ring_size;
+ /* Allocate ring buffer */
+ if (rdev->cp.ring_obj == NULL) {
+ r = radeon_object_create(rdev, NULL, rdev->cp.ring_size,
+ true,
+ RADEON_GEM_DOMAIN_GTT,
+ false,
+ &rdev->cp.ring_obj);
+ if (r) {
+ DRM_ERROR("radeon: failed to create ring buffer (%d).\n", r);
+ mutex_unlock(&rdev->cp.mutex);
+ return r;
+ }
+ r = radeon_object_pin(rdev->cp.ring_obj,
+ RADEON_GEM_DOMAIN_GTT,
+ &rdev->cp.gpu_addr);
+ if (r) {
+ DRM_ERROR("radeon: failed to pin ring buffer (%d).\n", r);
+ mutex_unlock(&rdev->cp.mutex);
+ return r;
+ }
+ r = radeon_object_kmap(rdev->cp.ring_obj,
+ (void **)&rdev->cp.ring);
+ if (r) {
+ DRM_ERROR("radeon: failed to map ring buffer (%d).\n", r);
+ mutex_unlock(&rdev->cp.mutex);
+ return r;
+ }
+ }
+ rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1;
+ rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
+ return 0;
+}
+
+void radeon_ring_fini(struct radeon_device *rdev)
+{
+ mutex_lock(&rdev->cp.mutex);
+ if (rdev->cp.ring_obj) {
+ radeon_object_kunmap(rdev->cp.ring_obj);
+ radeon_object_unpin(rdev->cp.ring_obj);
+ radeon_object_unref(&rdev->cp.ring_obj);
+ rdev->cp.ring = NULL;
+ rdev->cp.ring_obj = NULL;
+ }
+ mutex_unlock(&rdev->cp.mutex);
+}
+
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct radeon_ib *ib = node->info_ent->data;
+ unsigned i;
+
+ if (ib == NULL) {
+ return 0;
+ }
+ seq_printf(m, "IB %04lu\n", ib->idx);
+ seq_printf(m, "IB fence %p\n", ib->fence);
+ seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
+ for (i = 0; i < ib->length_dw; i++) {
+ seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
+ }
+ return 0;
+}
+
+static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
+static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
+#endif
+
+int radeon_debugfs_ib_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ unsigned i;
+
+ for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
+ sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
+ radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
+ radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
+ radeon_debugfs_ib_list[i].driver_features = 0;
+ radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i];
+ }
+ return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
+ RADEON_IB_POOL_SIZE);
+#else
+ return 0;
+#endif
+}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
new file mode 100644
index 00000000000..4c087c1510d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -0,0 +1,653 @@
+/*
+ * Copyright 2009 Jerome Glisse.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ * Jerome Glisse <glisse@freedesktop.org>
+ * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ * Dave Airlie
+ */
+#include <ttm/ttm_bo_api.h>
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_placement.h>
+#include <ttm/ttm_module.h>
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
+{
+ struct radeon_mman *mman;
+ struct radeon_device *rdev;
+
+ mman = container_of(bdev, struct radeon_mman, bdev);
+ rdev = container_of(mman, struct radeon_device, mman);
+ return rdev;
+}
+
+
+/*
+ * Global memory.
+ */
+static int radeon_ttm_mem_global_init(struct ttm_global_reference *ref)
+{
+ return ttm_mem_global_init(ref->object);
+}
+
+static void radeon_ttm_mem_global_release(struct ttm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+static int radeon_ttm_global_init(struct radeon_device *rdev)
+{
+ struct ttm_global_reference *global_ref;
+ int r;
+
+ rdev->mman.mem_global_referenced = false;
+ global_ref = &rdev->mman.mem_global_ref;
+ global_ref->global_type = TTM_GLOBAL_TTM_MEM;
+ global_ref->size = sizeof(struct ttm_mem_global);
+ global_ref->init = &radeon_ttm_mem_global_init;
+ global_ref->release = &radeon_ttm_mem_global_release;
+ r = ttm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed referencing a global TTM memory object.\n");
+ return r;
+ }
+ rdev->mman.mem_global_referenced = true;
+ return 0;
+}
+
+static void radeon_ttm_global_fini(struct radeon_device *rdev)
+{
+ if (rdev->mman.mem_global_referenced) {
+ ttm_global_item_unref(&rdev->mman.mem_global_ref);
+ rdev->mman.mem_global_referenced = false;
+ }
+}
+
+struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev);
+
+static struct ttm_backend*
+radeon_create_ttm_backend_entry(struct ttm_bo_device *bdev)
+{
+ struct radeon_device *rdev;
+
+ rdev = radeon_get_rdev(bdev);
+#if __OS_HAS_AGP
+ if (rdev->flags & RADEON_IS_AGP) {
+ return ttm_agp_backend_init(bdev, rdev->ddev->agp->bridge);
+ } else
+#endif
+ {
+ return radeon_ttm_backend_create(rdev);
+ }
+}
+
+static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+{
+ return 0;
+}
+
+static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man)
+{
+ struct radeon_device *rdev;
+
+ rdev = radeon_get_rdev(bdev);
+
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ /* System memory */
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_TT:
+ man->gpu_offset = 0;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+#if __OS_HAS_AGP
+ if (rdev->flags & RADEON_IS_AGP) {
+ if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) {
+ DRM_ERROR("AGP is not enabled for memory type %u\n",
+ (unsigned)type);
+ return -EINVAL;
+ }
+ man->io_offset = rdev->mc.agp_base;
+ man->io_size = rdev->mc.gtt_size;
+ man->io_addr = NULL;
+ man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+ } else
+#endif
+ {
+ man->io_offset = 0;
+ man->io_size = 0;
+ man->io_addr = NULL;
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
+ TTM_MEMTYPE_FLAG_CMA;
+ }
+ break;
+ case TTM_PL_VRAM:
+ /* "On-card" video ram */
+ man->gpu_offset = 0;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+ man->io_addr = NULL;
+ man->io_offset = rdev->mc.aper_base;
+ man->io_size = rdev->mc.aper_size;
+ break;
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static uint32_t radeon_evict_flags(struct ttm_buffer_object *bo)
+{
+ uint32_t cur_placement = bo->mem.placement & ~TTM_PL_MASK_MEMTYPE;
+
+ switch (bo->mem.mem_type) {
+ default:
+ return (cur_placement & ~TTM_PL_MASK_CACHING) |
+ TTM_PL_FLAG_SYSTEM |
+ TTM_PL_FLAG_CACHED;
+ }
+}
+
+static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+ return 0;
+}
+
+static void radeon_move_null(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *new_mem)
+{
+ struct ttm_mem_reg *old_mem = &bo->mem;
+
+ BUG_ON(old_mem->mm_node != NULL);
+ *old_mem = *new_mem;
+ new_mem->mm_node = NULL;
+}
+
+static int radeon_move_blit(struct ttm_buffer_object *bo,
+ bool evict, int no_wait,
+ struct ttm_mem_reg *new_mem,
+ struct ttm_mem_reg *old_mem)
+{
+ struct radeon_device *rdev;
+ uint64_t old_start, new_start;
+ struct radeon_fence *fence;
+ int r;
+
+ rdev = radeon_get_rdev(bo->bdev);
+ r = radeon_fence_create(rdev, &fence);
+ if (unlikely(r)) {
+ return r;
+ }
+ old_start = old_mem->mm_node->start << PAGE_SHIFT;
+ new_start = new_mem->mm_node->start << PAGE_SHIFT;
+
+ switch (old_mem->mem_type) {
+ case TTM_PL_VRAM:
+ old_start += rdev->mc.vram_location;
+ break;
+ case TTM_PL_TT:
+ old_start += rdev->mc.gtt_location;
+ break;
+ default:
+ DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
+ return -EINVAL;
+ }
+ switch (new_mem->mem_type) {
+ case TTM_PL_VRAM:
+ new_start += rdev->mc.vram_location;
+ break;
+ case TTM_PL_TT:
+ new_start += rdev->mc.gtt_location;
+ break;
+ default:
+ DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
+ return -EINVAL;
+ }
+ if (!rdev->cp.ready) {
+ DRM_ERROR("Trying to move memory with CP turned off.\n");
+ return -EINVAL;
+ }
+ r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
+ /* FIXME: handle copy error */
+ r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
+ evict, no_wait, new_mem);
+ radeon_fence_unref(&fence);
+ return r;
+}
+
+static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
+ bool evict, bool interruptible, bool no_wait,
+ struct ttm_mem_reg *new_mem)
+{
+ struct radeon_device *rdev;
+ struct ttm_mem_reg *old_mem = &bo->mem;
+ struct ttm_mem_reg tmp_mem;
+ uint32_t proposed_placement;
+ int r;
+
+ rdev = radeon_get_rdev(bo->bdev);
+ tmp_mem = *new_mem;
+ tmp_mem.mm_node = NULL;
+ proposed_placement = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
+ r = ttm_bo_mem_space(bo, proposed_placement, &tmp_mem,
+ interruptible, no_wait);
+ if (unlikely(r)) {
+ return r;
+ }
+ r = ttm_tt_bind(bo->ttm, &tmp_mem);
+ if (unlikely(r)) {
+ goto out_cleanup;
+ }
+ r = radeon_move_blit(bo, true, no_wait, &tmp_mem, old_mem);
+ if (unlikely(r)) {
+ goto out_cleanup;
+ }
+ r = ttm_bo_move_ttm(bo, true, no_wait, new_mem);
+out_cleanup:
+ if (tmp_mem.mm_node) {
+ spin_lock(&rdev->mman.bdev.lru_lock);
+ drm_mm_put_block(tmp_mem.mm_node);
+ spin_unlock(&rdev->mman.bdev.lru_lock);
+ return r;
+ }
+ return r;
+}
+
+static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
+ bool evict, bool interruptible, bool no_wait,
+ struct ttm_mem_reg *new_mem)
+{
+ struct radeon_device *rdev;
+ struct ttm_mem_reg *old_mem = &bo->mem;
+ struct ttm_mem_reg tmp_mem;
+ uint32_t proposed_flags;
+ int r;
+
+ rdev = radeon_get_rdev(bo->bdev);
+ tmp_mem = *new_mem;
+ tmp_mem.mm_node = NULL;
+ proposed_flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
+ r = ttm_bo_mem_space(bo, proposed_flags, &tmp_mem,
+ interruptible, no_wait);
+ if (unlikely(r)) {
+ return r;
+ }
+ r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem);
+ if (unlikely(r)) {
+ goto out_cleanup;
+ }
+ r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem);
+ if (unlikely(r)) {
+ goto out_cleanup;
+ }
+out_cleanup:
+ if (tmp_mem.mm_node) {
+ spin_lock(&rdev->mman.bdev.lru_lock);
+ drm_mm_put_block(tmp_mem.mm_node);
+ spin_unlock(&rdev->mman.bdev.lru_lock);
+ return r;
+ }
+ return r;
+}
+
+static int radeon_bo_move(struct ttm_buffer_object *bo,
+ bool evict, bool interruptible, bool no_wait,
+ struct ttm_mem_reg *new_mem)
+{
+ struct radeon_device *rdev;
+ struct ttm_mem_reg *old_mem = &bo->mem;
+ int r;
+
+ rdev = radeon_get_rdev(bo->bdev);
+ if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
+ radeon_move_null(bo, new_mem);
+ return 0;
+ }
+ if ((old_mem->mem_type == TTM_PL_TT &&
+ new_mem->mem_type == TTM_PL_SYSTEM) ||
+ (old_mem->mem_type == TTM_PL_SYSTEM &&
+ new_mem->mem_type == TTM_PL_TT)) {
+ /* bind is enought */
+ radeon_move_null(bo, new_mem);
+ return 0;
+ }
+ if (!rdev->cp.ready) {
+ /* use memcpy */
+ DRM_ERROR("CP is not ready use memcpy.\n");
+ return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ }
+
+ if (old_mem->mem_type == TTM_PL_VRAM &&
+ new_mem->mem_type == TTM_PL_SYSTEM) {
+ return radeon_move_vram_ram(bo, evict, interruptible,
+ no_wait, new_mem);
+ } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
+ new_mem->mem_type == TTM_PL_VRAM) {
+ return radeon_move_ram_vram(bo, evict, interruptible,
+ no_wait, new_mem);
+ } else {
+ r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem);
+ if (unlikely(r)) {
+ return r;
+ }
+ }
+ return r;
+}
+
+const uint32_t radeon_mem_prios[] = {
+ TTM_PL_VRAM,
+ TTM_PL_TT,
+ TTM_PL_SYSTEM,
+};
+
+const uint32_t radeon_busy_prios[] = {
+ TTM_PL_TT,
+ TTM_PL_VRAM,
+ TTM_PL_SYSTEM,
+};
+
+static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
+ bool lazy, bool interruptible)
+{
+ return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
+}
+
+static int radeon_sync_obj_flush(void *sync_obj, void *sync_arg)
+{
+ return 0;
+}
+
+static void radeon_sync_obj_unref(void **sync_obj)
+{
+ radeon_fence_unref((struct radeon_fence **)sync_obj);
+}
+
+static void *radeon_sync_obj_ref(void *sync_obj)
+{
+ return radeon_fence_ref((struct radeon_fence *)sync_obj);
+}
+
+static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
+{
+ return radeon_fence_signaled((struct radeon_fence *)sync_obj);
+}
+
+static struct ttm_bo_driver radeon_bo_driver = {
+ .mem_type_prio = radeon_mem_prios,
+ .mem_busy_prio = radeon_busy_prios,
+ .num_mem_type_prio = ARRAY_SIZE(radeon_mem_prios),
+ .num_mem_busy_prio = ARRAY_SIZE(radeon_busy_prios),
+ .create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
+ .invalidate_caches = &radeon_invalidate_caches,
+ .init_mem_type = &radeon_init_mem_type,
+ .evict_flags = &radeon_evict_flags,
+ .move = &radeon_bo_move,
+ .verify_access = &radeon_verify_access,
+ .sync_obj_signaled = &radeon_sync_obj_signaled,
+ .sync_obj_wait = &radeon_sync_obj_wait,
+ .sync_obj_flush = &radeon_sync_obj_flush,
+ .sync_obj_unref = &radeon_sync_obj_unref,
+ .sync_obj_ref = &radeon_sync_obj_ref,
+};
+
+int radeon_ttm_init(struct radeon_device *rdev)
+{
+ int r;
+
+ r = radeon_ttm_global_init(rdev);
+ if (r) {
+ return r;
+ }
+ /* No others user of address space so set it to 0 */
+ r = ttm_bo_device_init(&rdev->mman.bdev,
+ rdev->mman.mem_global_ref.object,
+ &radeon_bo_driver, DRM_FILE_PAGE_OFFSET);
+ if (r) {
+ DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
+ return r;
+ }
+ r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0,
+ ((rdev->mc.aper_size) >> PAGE_SHIFT));
+ if (r) {
+ DRM_ERROR("Failed initializing VRAM heap.\n");
+ return r;
+ }
+ r = radeon_object_create(rdev, NULL, 256 * 1024, true,
+ RADEON_GEM_DOMAIN_VRAM, false,
+ &rdev->stollen_vga_memory);
+ if (r) {
+ return r;
+ }
+ r = radeon_object_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
+ if (r) {
+ radeon_object_unref(&rdev->stollen_vga_memory);
+ return r;
+ }
+ DRM_INFO("radeon: %uM of VRAM memory ready\n",
+ rdev->mc.vram_size / (1024 * 1024));
+ r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0,
+ ((rdev->mc.gtt_size) >> PAGE_SHIFT));
+ if (r) {
+ DRM_ERROR("Failed initializing GTT heap.\n");
+ return r;
+ }
+ DRM_INFO("radeon: %uM of GTT memory ready.\n",
+ rdev->mc.gtt_size / (1024 * 1024));
+ if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
+ rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
+ }
+ return 0;
+}
+
+void radeon_ttm_fini(struct radeon_device *rdev)
+{
+ if (rdev->stollen_vga_memory) {
+ radeon_object_unpin(rdev->stollen_vga_memory);
+ radeon_object_unref(&rdev->stollen_vga_memory);
+ }
+ ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
+ ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
+ ttm_bo_device_release(&rdev->mman.bdev);
+ radeon_gart_fini(rdev);
+ radeon_ttm_global_fini(rdev);
+ DRM_INFO("radeon: ttm finalized\n");
+}
+
+static struct vm_operations_struct radeon_ttm_vm_ops;
+static struct vm_operations_struct *ttm_vm_ops = NULL;
+
+static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct ttm_buffer_object *bo;
+ int r;
+
+ bo = (struct ttm_buffer_object *)vma->vm_private_data;
+ if (bo == NULL) {
+ return VM_FAULT_NOPAGE;
+ }
+ r = ttm_vm_ops->fault(vma, vmf);
+ return r;
+}
+
+int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct radeon_device *rdev;
+ int r;
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
+ return drm_mmap(filp, vma);
+ }
+
+ file_priv = (struct drm_file *)filp->private_data;
+ rdev = file_priv->minor->dev->dev_private;
+ if (rdev == NULL) {
+ return -EINVAL;
+ }
+ r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
+ if (unlikely(r != 0)) {
+ return r;
+ }
+ if (unlikely(ttm_vm_ops == NULL)) {
+ ttm_vm_ops = vma->vm_ops;
+ radeon_ttm_vm_ops = *ttm_vm_ops;
+ radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
+ }
+ vma->vm_ops = &radeon_ttm_vm_ops;
+ return 0;
+}
+
+
+/*
+ * TTM backend functions.
+ */
+struct radeon_ttm_backend {
+ struct ttm_backend backend;
+ struct radeon_device *rdev;
+ unsigned long num_pages;
+ struct page **pages;
+ struct page *dummy_read_page;
+ bool populated;
+ bool bound;
+ unsigned offset;
+};
+
+static int radeon_ttm_backend_populate(struct ttm_backend *backend,
+ unsigned long num_pages,
+ struct page **pages,
+ struct page *dummy_read_page)
+{
+ struct radeon_ttm_backend *gtt;
+
+ gtt = container_of(backend, struct radeon_ttm_backend, backend);
+ gtt->pages = pages;
+ gtt->num_pages = num_pages;
+ gtt->dummy_read_page = dummy_read_page;
+ gtt->populated = true;
+ return 0;
+}
+
+static void radeon_ttm_backend_clear(struct ttm_backend *backend)
+{
+ struct radeon_ttm_backend *gtt;
+
+ gtt = container_of(backend, struct radeon_ttm_backend, backend);
+ gtt->pages = NULL;
+ gtt->num_pages = 0;
+ gtt->dummy_read_page = NULL;
+ gtt->populated = false;
+ gtt->bound = false;
+}
+
+
+static int radeon_ttm_backend_bind(struct ttm_backend *backend,
+ struct ttm_mem_reg *bo_mem)
+{
+ struct radeon_ttm_backend *gtt;
+ int r;
+
+ gtt = container_of(backend, struct radeon_ttm_backend, backend);
+ gtt->offset = bo_mem->mm_node->start << PAGE_SHIFT;
+ if (!gtt->num_pages) {
+ WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend);
+ }
+ r = radeon_gart_bind(gtt->rdev, gtt->offset,
+ gtt->num_pages, gtt->pages);
+ if (r) {
+ DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
+ gtt->num_pages, gtt->offset);
+ return r;
+ }
+ gtt->bound = true;
+ return 0;
+}
+
+static int radeon_ttm_backend_unbind(struct ttm_backend *backend)
+{
+ struct radeon_ttm_backend *gtt;
+
+ gtt = container_of(backend, struct radeon_ttm_backend, backend);
+ radeon_gart_unbind(gtt->rdev, gtt->offset, gtt->num_pages);
+ gtt->bound = false;
+ return 0;
+}
+
+static void radeon_ttm_backend_destroy(struct ttm_backend *backend)
+{
+ struct radeon_ttm_backend *gtt;
+
+ gtt = container_of(backend, struct radeon_ttm_backend, backend);
+ if (gtt->bound) {
+ radeon_ttm_backend_unbind(backend);
+ }
+ kfree(gtt);
+}
+
+static struct ttm_backend_func radeon_backend_func = {
+ .populate = &radeon_ttm_backend_populate,
+ .clear = &radeon_ttm_backend_clear,
+ .bind = &radeon_ttm_backend_bind,
+ .unbind = &radeon_ttm_backend_unbind,
+ .destroy = &radeon_ttm_backend_destroy,
+};
+
+struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev)
+{
+ struct radeon_ttm_backend *gtt;
+
+ gtt = kzalloc(sizeof(struct radeon_ttm_backend), GFP_KERNEL);
+ if (gtt == NULL) {
+ return NULL;
+ }
+ gtt->backend.bdev = &rdev->mman.bdev;
+ gtt->backend.flags = 0;
+ gtt->backend.func = &radeon_backend_func;
+ gtt->rdev = rdev;
+ gtt->pages = NULL;
+ gtt->num_pages = 0;
+ gtt->dummy_read_page = NULL;
+ gtt->populated = false;
+ gtt->bound = false;
+ return &gtt->backend;
+}
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
new file mode 100644
index 00000000000..cc074b5a8f7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -0,0 +1,411 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include <linux/seq_file.h>
+#include <drm/drmP.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+
+/* rs400,rs480 depends on : */
+void r100_hdp_reset(struct radeon_device *rdev);
+void r100_mc_disable_clients(struct radeon_device *rdev);
+int r300_mc_wait_for_idle(struct radeon_device *rdev);
+void r420_pipes_init(struct radeon_device *rdev);
+
+/* This files gather functions specifics to :
+ * rs400,rs480
+ *
+ * Some of these functions might be used by newer ASICs.
+ */
+void rs400_gpu_init(struct radeon_device *rdev);
+int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
+
+
+/*
+ * GART functions.
+ */
+void rs400_gart_adjust_size(struct radeon_device *rdev)
+{
+ /* Check gart size */
+ switch (rdev->mc.gtt_size/(1024*1024)) {
+ case 32:
+ case 64:
+ case 128:
+ case 256:
+ case 512:
+ case 1024:
+ case 2048:
+ break;
+ default:
+ DRM_ERROR("Unable to use IGP GART size %uM\n",
+ rdev->mc.gtt_size >> 20);
+ DRM_ERROR("Valid GART size for IGP are 32M,64M,128M,256M,512M,1G,2G\n");
+ DRM_ERROR("Forcing to 32M GART size\n");
+ rdev->mc.gtt_size = 32 * 1024 * 1024;
+ return;
+ }
+ if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
+ /* FIXME: RS400 & RS480 seems to have issue with GART size
+ * if 4G of system memory (needs more testing) */
+ rdev->mc.gtt_size = 32 * 1024 * 1024;
+ DRM_ERROR("Forcing to 32M GART size (because of ASIC bug ?)\n");
+ }
+}
+
+void rs400_gart_tlb_flush(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+ unsigned int timeout = rdev->usec_timeout;
+
+ WREG32_MC(RS480_GART_CACHE_CNTRL, RS480_GART_CACHE_INVALIDATE);
+ do {
+ tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
+ if ((tmp & RS480_GART_CACHE_INVALIDATE) == 0)
+ break;
+ DRM_UDELAY(1);
+ timeout--;
+ } while (timeout > 0);
+ WREG32_MC(RS480_GART_CACHE_CNTRL, 0);
+}
+
+int rs400_gart_enable(struct radeon_device *rdev)
+{
+ uint32_t size_reg;
+ uint32_t tmp;
+ int r;
+
+ /* Initialize common gart structure */
+ r = radeon_gart_init(rdev);
+ if (r) {
+ return r;
+ }
+ if (rs400_debugfs_pcie_gart_info_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for RS400 GART !\n");
+ }
+
+ tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
+ tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
+ WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
+ /* Check gart size */
+ switch(rdev->mc.gtt_size / (1024 * 1024)) {
+ case 32:
+ size_reg = RS480_VA_SIZE_32MB;
+ break;
+ case 64:
+ size_reg = RS480_VA_SIZE_64MB;
+ break;
+ case 128:
+ size_reg = RS480_VA_SIZE_128MB;
+ break;
+ case 256:
+ size_reg = RS480_VA_SIZE_256MB;
+ break;
+ case 512:
+ size_reg = RS480_VA_SIZE_512MB;
+ break;
+ case 1024:
+ size_reg = RS480_VA_SIZE_1GB;
+ break;
+ case 2048:
+ size_reg = RS480_VA_SIZE_2GB;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (rdev->gart.table.ram.ptr == NULL) {
+ rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
+ r = radeon_gart_table_ram_alloc(rdev);
+ if (r) {
+ return r;
+ }
+ }
+ /* It should be fine to program it to max value */
+ if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
+ WREG32_MC(RS690_MCCFG_AGP_BASE, 0xFFFFFFFF);
+ WREG32_MC(RS690_MCCFG_AGP_BASE_2, 0);
+ } else {
+ WREG32(RADEON_AGP_BASE, 0xFFFFFFFF);
+ WREG32(RS480_AGP_BASE_2, 0);
+ }
+ tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
+ tmp = REG_SET(RS690_MC_AGP_TOP, tmp >> 16);
+ tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_location >> 16);
+ if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
+ WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp);
+ tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
+ WREG32(RADEON_BUS_CNTL, tmp);
+ } else {
+ WREG32(RADEON_MC_AGP_LOCATION, tmp);
+ tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
+ WREG32(RADEON_BUS_CNTL, tmp);
+ }
+ /* Table should be in 32bits address space so ignore bits above. */
+ tmp = rdev->gart.table_addr & 0xfffff000;
+ WREG32_MC(RS480_GART_BASE, tmp);
+ /* TODO: more tweaking here */
+ WREG32_MC(RS480_GART_FEATURE_ID,
+ (RS480_TLB_ENABLE |
+ RS480_GTW_LAC_EN | RS480_1LEVEL_GART));
+ /* Disable snooping */
+ WREG32_MC(RS480_AGP_MODE_CNTL,
+ (1 << RS480_REQ_TYPE_SNOOP_SHIFT) | RS480_REQ_TYPE_SNOOP_DIS);
+ /* Disable AGP mode */
+ /* FIXME: according to doc we should set HIDE_MMCFG_BAR=0,
+ * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */
+ if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
+ WREG32_MC(RS480_MC_MISC_CNTL,
+ (RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN));
+ } else {
+ WREG32_MC(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
+ }
+ /* Enable gart */
+ WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
+ rs400_gart_tlb_flush(rdev);
+ rdev->gart.ready = true;
+ return 0;
+}
+
+void rs400_gart_disable(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+
+ tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
+ tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
+ WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
+ WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
+}
+
+int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
+{
+ if (i < 0 || i > rdev->gart.num_gpu_pages) {
+ return -EINVAL;
+ }
+ rdev->gart.table.ram.ptr[i] = cpu_to_le32(((uint32_t)addr) | 0xC);
+ return 0;
+}
+
+
+/*
+ * MC functions.
+ */
+int rs400_mc_init(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+ int r;
+
+ if (r100_debugfs_rbbm_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for RBBM !\n");
+ }
+
+ rs400_gpu_init(rdev);
+ rs400_gart_disable(rdev);
+ rdev->mc.gtt_location = rdev->mc.vram_size;
+ rdev->mc.gtt_location += (rdev->mc.gtt_size - 1);
+ rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1);
+ rdev->mc.vram_location = 0xFFFFFFFFUL;
+ r = radeon_mc_setup(rdev);
+ if (r) {
+ return r;
+ }
+
+ r100_mc_disable_clients(rdev);
+ if (r300_mc_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait MC idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+
+ tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
+ tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16);
+ tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16);
+ WREG32(RADEON_MC_FB_LOCATION, tmp);
+ tmp = RREG32(RADEON_HOST_PATH_CNTL) | RADEON_HP_LIN_RD_CACHE_DIS;
+ WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
+ (void)RREG32(RADEON_HOST_PATH_CNTL);
+ WREG32(RADEON_HOST_PATH_CNTL, tmp);
+ (void)RREG32(RADEON_HOST_PATH_CNTL);
+ return 0;
+}
+
+void rs400_mc_fini(struct radeon_device *rdev)
+{
+ rs400_gart_disable(rdev);
+ radeon_gart_table_ram_free(rdev);
+ radeon_gart_fini(rdev);
+}
+
+
+/*
+ * Global GPU functions
+ */
+void rs400_errata(struct radeon_device *rdev)
+{
+ rdev->pll_errata = 0;
+}
+
+void rs400_gpu_init(struct radeon_device *rdev)
+{
+ /* FIXME: HDP same place on rs400 ? */
+ r100_hdp_reset(rdev);
+ /* FIXME: is this correct ? */
+ r420_pipes_init(rdev);
+ if (r300_mc_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait MC idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+}
+
+
+/*
+ * VRAM info.
+ */
+void rs400_vram_info(struct radeon_device *rdev)
+{
+ uint32_t tom;
+
+ rs400_gart_adjust_size(rdev);
+ /* DDR for all card after R300 & IGP */
+ rdev->mc.vram_is_ddr = true;
+ rdev->mc.vram_width = 128;
+
+ /* read NB_TOM to get the amount of ram stolen for the GPU */
+ tom = RREG32(RADEON_NB_TOM);
+ rdev->mc.vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
+ WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
+
+ /* Could aper size report 0 ? */
+ rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+ rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+}
+
+
+/*
+ * Indirect registers accessor
+ */
+uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+ uint32_t r;
+
+ WREG32(RS480_NB_MC_INDEX, reg & 0xff);
+ r = RREG32(RS480_NB_MC_DATA);
+ WREG32(RS480_NB_MC_INDEX, 0xff);
+ return r;
+}
+
+void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+ WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN);
+ WREG32(RS480_NB_MC_DATA, (v));
+ WREG32(RS480_NB_MC_INDEX, 0xff);
+}
+
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int rs400_debugfs_gart_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t tmp;
+
+ tmp = RREG32(RADEON_HOST_PATH_CNTL);
+ seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
+ tmp = RREG32(RADEON_BUS_CNTL);
+ seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
+ tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
+ seq_printf(m, "AIC_CTRL_SCRATCH 0x%08x\n", tmp);
+ if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
+ tmp = RREG32_MC(RS690_MCCFG_AGP_BASE);
+ seq_printf(m, "MCCFG_AGP_BASE 0x%08x\n", tmp);
+ tmp = RREG32_MC(RS690_MCCFG_AGP_BASE_2);
+ seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp);
+ tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION);
+ seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp);
+ tmp = RREG32_MC(0x100);
+ seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp);
+ tmp = RREG32(0x134);
+ seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp);
+ } else {
+ tmp = RREG32(RADEON_AGP_BASE);
+ seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
+ tmp = RREG32(RS480_AGP_BASE_2);
+ seq_printf(m, "AGP_BASE_2 0x%08x\n", tmp);
+ tmp = RREG32(RADEON_MC_AGP_LOCATION);
+ seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
+ }
+ tmp = RREG32_MC(RS480_GART_BASE);
+ seq_printf(m, "GART_BASE 0x%08x\n", tmp);
+ tmp = RREG32_MC(RS480_GART_FEATURE_ID);
+ seq_printf(m, "GART_FEATURE_ID 0x%08x\n", tmp);
+ tmp = RREG32_MC(RS480_AGP_MODE_CNTL);
+ seq_printf(m, "AGP_MODE_CONTROL 0x%08x\n", tmp);
+ tmp = RREG32_MC(RS480_MC_MISC_CNTL);
+ seq_printf(m, "MC_MISC_CNTL 0x%08x\n", tmp);
+ tmp = RREG32_MC(0x5F);
+ seq_printf(m, "MC_MISC_UMA_CNTL 0x%08x\n", tmp);
+ tmp = RREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE);
+ seq_printf(m, "AGP_ADDRESS_SPACE_SIZE 0x%08x\n", tmp);
+ tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
+ seq_printf(m, "GART_CACHE_CNTRL 0x%08x\n", tmp);
+ tmp = RREG32_MC(0x3B);
+ seq_printf(m, "MC_GART_ERROR_ADDRESS 0x%08x\n", tmp);
+ tmp = RREG32_MC(0x3C);
+ seq_printf(m, "MC_GART_ERROR_ADDRESS_HI 0x%08x\n", tmp);
+ tmp = RREG32_MC(0x30);
+ seq_printf(m, "GART_ERROR_0 0x%08x\n", tmp);
+ tmp = RREG32_MC(0x31);
+ seq_printf(m, "GART_ERROR_1 0x%08x\n", tmp);
+ tmp = RREG32_MC(0x32);
+ seq_printf(m, "GART_ERROR_2 0x%08x\n", tmp);
+ tmp = RREG32_MC(0x33);
+ seq_printf(m, "GART_ERROR_3 0x%08x\n", tmp);
+ tmp = RREG32_MC(0x34);
+ seq_printf(m, "GART_ERROR_4 0x%08x\n", tmp);
+ tmp = RREG32_MC(0x35);
+ seq_printf(m, "GART_ERROR_5 0x%08x\n", tmp);
+ tmp = RREG32_MC(0x36);
+ seq_printf(m, "GART_ERROR_6 0x%08x\n", tmp);
+ tmp = RREG32_MC(0x37);
+ seq_printf(m, "GART_ERROR_7 0x%08x\n", tmp);
+ return 0;
+}
+
+static struct drm_info_list rs400_gart_info_list[] = {
+ {"rs400_gart_info", rs400_debugfs_gart_info, 0, NULL},
+};
+#endif
+
+int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1);
+#else
+ return 0;
+#endif
+}
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
new file mode 100644
index 00000000000..ab0c967553e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -0,0 +1,324 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include "drmP.h"
+#include "radeon_reg.h"
+#include "radeon.h"
+
+/* rs600 depends on : */
+void r100_hdp_reset(struct radeon_device *rdev);
+int r100_gui_wait_for_idle(struct radeon_device *rdev);
+int r300_mc_wait_for_idle(struct radeon_device *rdev);
+void r420_pipes_init(struct radeon_device *rdev);
+
+/* This files gather functions specifics to :
+ * rs600
+ *
+ * Some of these functions might be used by newer ASICs.
+ */
+void rs600_gpu_init(struct radeon_device *rdev);
+int rs600_mc_wait_for_idle(struct radeon_device *rdev);
+void rs600_disable_vga(struct radeon_device *rdev);
+
+
+/*
+ * GART.
+ */
+void rs600_gart_tlb_flush(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+
+ tmp = RREG32_MC(RS600_MC_PT0_CNTL);
+ tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE);
+ WREG32_MC(RS600_MC_PT0_CNTL, tmp);
+
+ tmp = RREG32_MC(RS600_MC_PT0_CNTL);
+ tmp |= RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE;
+ WREG32_MC(RS600_MC_PT0_CNTL, tmp);
+
+ tmp = RREG32_MC(RS600_MC_PT0_CNTL);
+ tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE);
+ WREG32_MC(RS600_MC_PT0_CNTL, tmp);
+ tmp = RREG32_MC(RS600_MC_PT0_CNTL);
+}
+
+int rs600_gart_enable(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+ int i;
+ int r;
+
+ /* Initialize common gart structure */
+ r = radeon_gart_init(rdev);
+ if (r) {
+ return r;
+ }
+ rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
+ r = radeon_gart_table_vram_alloc(rdev);
+ if (r) {
+ return r;
+ }
+ /* FIXME: setup default page */
+ WREG32_MC(RS600_MC_PT0_CNTL,
+ (RS600_EFFECTIVE_L2_CACHE_SIZE(6) |
+ RS600_EFFECTIVE_L2_QUEUE_SIZE(6)));
+ for (i = 0; i < 19; i++) {
+ WREG32_MC(RS600_MC_PT0_CLIENT0_CNTL + i,
+ (RS600_ENABLE_TRANSLATION_MODE_OVERRIDE |
+ RS600_SYSTEM_ACCESS_MODE_IN_SYS |
+ RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE |
+ RS600_EFFECTIVE_L1_CACHE_SIZE(3) |
+ RS600_ENABLE_FRAGMENT_PROCESSING |
+ RS600_EFFECTIVE_L1_QUEUE_SIZE(3)));
+ }
+
+ /* System context map to GART space */
+ WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_location);
+ tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
+ WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, tmp);
+
+ /* enable first context */
+ WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_location);
+ tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
+ WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR, tmp);
+ WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL,
+ (RS600_ENABLE_PAGE_TABLE | RS600_PAGE_TABLE_TYPE_FLAT));
+ /* disable all other contexts */
+ for (i = 1; i < 8; i++) {
+ WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL + i, 0);
+ }
+
+ /* setup the page table */
+ WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
+ rdev->gart.table_addr);
+ WREG32_MC(RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
+
+ /* enable page tables */
+ tmp = RREG32_MC(RS600_MC_PT0_CNTL);
+ WREG32_MC(RS600_MC_PT0_CNTL, (tmp | RS600_ENABLE_PT));
+ tmp = RREG32_MC(RS600_MC_CNTL1);
+ WREG32_MC(RS600_MC_CNTL1, (tmp | RS600_ENABLE_PAGE_TABLES));
+ rs600_gart_tlb_flush(rdev);
+ rdev->gart.ready = true;
+ return 0;
+}
+
+void rs600_gart_disable(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+
+ /* FIXME: disable out of gart access */
+ WREG32_MC(RS600_MC_PT0_CNTL, 0);
+ tmp = RREG32_MC(RS600_MC_CNTL1);
+ tmp &= ~RS600_ENABLE_PAGE_TABLES;
+ WREG32_MC(RS600_MC_CNTL1, tmp);
+ radeon_object_kunmap(rdev->gart.table.vram.robj);
+ radeon_object_unpin(rdev->gart.table.vram.robj);
+}
+
+#define R600_PTE_VALID (1 << 0)
+#define R600_PTE_SYSTEM (1 << 1)
+#define R600_PTE_SNOOPED (1 << 2)
+#define R600_PTE_READABLE (1 << 5)
+#define R600_PTE_WRITEABLE (1 << 6)
+
+int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
+{
+ void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
+
+ if (i < 0 || i > rdev->gart.num_gpu_pages) {
+ return -EINVAL;
+ }
+ addr = addr & 0xFFFFFFFFFFFFF000ULL;
+ addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
+ addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
+ writeq(addr, ((void __iomem *)ptr) + (i * 8));
+ return 0;
+}
+
+
+/*
+ * MC.
+ */
+void rs600_mc_disable_clients(struct radeon_device *rdev)
+{
+ unsigned tmp;
+
+ if (r100_gui_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait GUI idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+
+ tmp = RREG32(AVIVO_D1VGA_CONTROL);
+ WREG32(AVIVO_D1VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE);
+ tmp = RREG32(AVIVO_D2VGA_CONTROL);
+ WREG32(AVIVO_D2VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE);
+
+ tmp = RREG32(AVIVO_D1CRTC_CONTROL);
+ WREG32(AVIVO_D1CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN);
+ tmp = RREG32(AVIVO_D2CRTC_CONTROL);
+ WREG32(AVIVO_D2CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN);
+
+ /* make sure all previous write got through */
+ tmp = RREG32(AVIVO_D2CRTC_CONTROL);
+
+ mdelay(1);
+}
+
+int rs600_mc_init(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+ int r;
+
+ if (r100_debugfs_rbbm_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for RBBM !\n");
+ }
+
+ rs600_gpu_init(rdev);
+ rs600_gart_disable(rdev);
+
+ /* Setup GPU memory space */
+ rdev->mc.vram_location = 0xFFFFFFFFUL;
+ rdev->mc.gtt_location = 0xFFFFFFFFUL;
+ r = radeon_mc_setup(rdev);
+ if (r) {
+ return r;
+ }
+
+ /* Program GPU memory space */
+ /* Enable bus master */
+ tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
+ WREG32(RADEON_BUS_CNTL, tmp);
+ /* FIXME: What does AGP means for such chipset ? */
+ WREG32_MC(RS600_MC_AGP_LOCATION, 0x0FFFFFFF);
+ /* FIXME: are this AGP reg in indirect MC range ? */
+ WREG32_MC(RS600_MC_AGP_BASE, 0);
+ WREG32_MC(RS600_MC_AGP_BASE_2, 0);
+ rs600_mc_disable_clients(rdev);
+ if (rs600_mc_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait MC idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+ tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
+ tmp = REG_SET(RS600_MC_FB_TOP, tmp >> 16);
+ tmp |= REG_SET(RS600_MC_FB_START, rdev->mc.vram_location >> 16);
+ WREG32_MC(RS600_MC_FB_LOCATION, tmp);
+ WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
+ return 0;
+}
+
+void rs600_mc_fini(struct radeon_device *rdev)
+{
+ rs600_gart_disable(rdev);
+ radeon_gart_table_vram_free(rdev);
+ radeon_gart_fini(rdev);
+}
+
+
+/*
+ * Global GPU functions
+ */
+void rs600_disable_vga(struct radeon_device *rdev)
+{
+ unsigned tmp;
+
+ WREG32(0x330, 0);
+ WREG32(0x338, 0);
+ tmp = RREG32(0x300);
+ tmp &= ~(3 << 16);
+ WREG32(0x300, tmp);
+ WREG32(0x308, (1 << 8));
+ WREG32(0x310, rdev->mc.vram_location);
+ WREG32(0x594, 0);
+}
+
+int rs600_mc_wait_for_idle(struct radeon_device *rdev)
+{
+ unsigned i;
+ uint32_t tmp;
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ /* read MC_STATUS */
+ tmp = RREG32_MC(RS600_MC_STATUS);
+ if (tmp & RS600_MC_STATUS_IDLE) {
+ return 0;
+ }
+ DRM_UDELAY(1);
+ }
+ return -1;
+}
+
+void rs600_errata(struct radeon_device *rdev)
+{
+ rdev->pll_errata = 0;
+}
+
+void rs600_gpu_init(struct radeon_device *rdev)
+{
+ /* FIXME: HDP same place on rs600 ? */
+ r100_hdp_reset(rdev);
+ rs600_disable_vga(rdev);
+ /* FIXME: is this correct ? */
+ r420_pipes_init(rdev);
+ if (rs600_mc_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait MC idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+}
+
+
+/*
+ * VRAM info.
+ */
+void rs600_vram_info(struct radeon_device *rdev)
+{
+ /* FIXME: to do or is these values sane ? */
+ rdev->mc.vram_is_ddr = true;
+ rdev->mc.vram_width = 128;
+}
+
+
+/*
+ * Indirect registers accessor
+ */
+uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+ uint32_t r;
+
+ WREG32(RS600_MC_INDEX,
+ ((reg & RS600_MC_ADDR_MASK) | RS600_MC_IND_CITF_ARB0));
+ r = RREG32(RS600_MC_DATA);
+ return r;
+}
+
+void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+ WREG32(RS600_MC_INDEX,
+ RS600_MC_IND_WR_EN | RS600_MC_IND_CITF_ARB0 |
+ ((reg) & RS600_MC_ADDR_MASK));
+ WREG32(RS600_MC_DATA, v);
+}
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
new file mode 100644
index 00000000000..79ba85042b5
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include "drmP.h"
+#include "radeon_reg.h"
+#include "radeon.h"
+
+/* rs690,rs740 depends on : */
+void r100_hdp_reset(struct radeon_device *rdev);
+int r300_mc_wait_for_idle(struct radeon_device *rdev);
+void r420_pipes_init(struct radeon_device *rdev);
+void rs400_gart_disable(struct radeon_device *rdev);
+int rs400_gart_enable(struct radeon_device *rdev);
+void rs400_gart_adjust_size(struct radeon_device *rdev);
+void rs600_mc_disable_clients(struct radeon_device *rdev);
+void rs600_disable_vga(struct radeon_device *rdev);
+
+/* This files gather functions specifics to :
+ * rs690,rs740
+ *
+ * Some of these functions might be used by newer ASICs.
+ */
+void rs690_gpu_init(struct radeon_device *rdev);
+int rs690_mc_wait_for_idle(struct radeon_device *rdev);
+
+
+/*
+ * MC functions.
+ */
+int rs690_mc_init(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+ int r;
+
+ if (r100_debugfs_rbbm_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for RBBM !\n");
+ }
+
+ rs690_gpu_init(rdev);
+ rs400_gart_disable(rdev);
+
+ /* Setup GPU memory space */
+ rdev->mc.gtt_location = rdev->mc.vram_size;
+ rdev->mc.gtt_location += (rdev->mc.gtt_size - 1);
+ rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1);
+ rdev->mc.vram_location = 0xFFFFFFFFUL;
+ r = radeon_mc_setup(rdev);
+ if (r) {
+ return r;
+ }
+
+ /* Program GPU memory space */
+ rs600_mc_disable_clients(rdev);
+ if (rs690_mc_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait MC idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+ tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
+ tmp = REG_SET(RS690_MC_FB_TOP, tmp >> 16);
+ tmp |= REG_SET(RS690_MC_FB_START, rdev->mc.vram_location >> 16);
+ WREG32_MC(RS690_MCCFG_FB_LOCATION, tmp);
+ /* FIXME: Does this reg exist on RS480,RS740 ? */
+ WREG32(0x310, rdev->mc.vram_location);
+ WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
+ return 0;
+}
+
+void rs690_mc_fini(struct radeon_device *rdev)
+{
+ rs400_gart_disable(rdev);
+ radeon_gart_table_ram_free(rdev);
+ radeon_gart_fini(rdev);
+}
+
+
+/*
+ * Global GPU functions
+ */
+int rs690_mc_wait_for_idle(struct radeon_device *rdev)
+{
+ unsigned i;
+ uint32_t tmp;
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ /* read MC_STATUS */
+ tmp = RREG32_MC(RS690_MC_STATUS);
+ if (tmp & RS690_MC_STATUS_IDLE) {
+ return 0;
+ }
+ DRM_UDELAY(1);
+ }
+ return -1;
+}
+
+void rs690_errata(struct radeon_device *rdev)
+{
+ rdev->pll_errata = 0;
+}
+
+void rs690_gpu_init(struct radeon_device *rdev)
+{
+ /* FIXME: HDP same place on rs690 ? */
+ r100_hdp_reset(rdev);
+ rs600_disable_vga(rdev);
+ /* FIXME: is this correct ? */
+ r420_pipes_init(rdev);
+ if (rs690_mc_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait MC idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+}
+
+
+/*
+ * VRAM info.
+ */
+void rs690_vram_info(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+
+ rs400_gart_adjust_size(rdev);
+ /* DDR for all card after R300 & IGP */
+ rdev->mc.vram_is_ddr = true;
+ /* FIXME: is this correct for RS690/RS740 ? */
+ tmp = RREG32(RADEON_MEM_CNTL);
+ if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
+ rdev->mc.vram_width = 128;
+ } else {
+ rdev->mc.vram_width = 64;
+ }
+ rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
+
+ rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+ rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+}
+
+
+/*
+ * Indirect registers accessor
+ */
+uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+ uint32_t r;
+
+ WREG32(RS690_MC_INDEX, (reg & RS690_MC_INDEX_MASK));
+ r = RREG32(RS690_MC_DATA);
+ WREG32(RS690_MC_INDEX, RS690_MC_INDEX_MASK);
+ return r;
+}
+
+void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+ WREG32(RS690_MC_INDEX,
+ RS690_MC_INDEX_WR_EN | ((reg) & RS690_MC_INDEX_MASK));
+ WREG32(RS690_MC_DATA, v);
+ WREG32(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK);
+}
diff --git a/drivers/gpu/drm/radeon/rs780.c b/drivers/gpu/drm/radeon/rs780.c
new file mode 100644
index 00000000000..0affcff8182
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs780.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include "drmP.h"
+#include "radeon_reg.h"
+#include "radeon.h"
+
+/* rs780 depends on : */
+void rs600_mc_disable_clients(struct radeon_device *rdev);
+
+/* This files gather functions specifics to:
+ * rs780
+ *
+ * Some of these functions might be used by newer ASICs.
+ */
+int rs780_mc_wait_for_idle(struct radeon_device *rdev);
+void rs780_gpu_init(struct radeon_device *rdev);
+
+
+/*
+ * MC
+ */
+int rs780_mc_init(struct radeon_device *rdev)
+{
+ rs780_gpu_init(rdev);
+ /* FIXME: implement */
+
+ rs600_mc_disable_clients(rdev);
+ if (rs780_mc_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait MC idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+ return 0;
+}
+
+void rs780_mc_fini(struct radeon_device *rdev)
+{
+ /* FIXME: implement */
+}
+
+
+/*
+ * Global GPU functions
+ */
+void rs780_errata(struct radeon_device *rdev)
+{
+ rdev->pll_errata = 0;
+}
+
+int rs780_mc_wait_for_idle(struct radeon_device *rdev)
+{
+ /* FIXME: implement */
+ return 0;
+}
+
+void rs780_gpu_init(struct radeon_device *rdev)
+{
+ /* FIXME: implement */
+}
+
+
+/*
+ * VRAM info
+ */
+void rs780_vram_get_type(struct radeon_device *rdev)
+{
+ /* FIXME: implement */
+}
+
+void rs780_vram_info(struct radeon_device *rdev)
+{
+ rs780_vram_get_type(rdev);
+
+ /* FIXME: implement */
+ /* Could aper size report 0 ? */
+ rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+ rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+}
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
new file mode 100644
index 00000000000..7eab95db58a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -0,0 +1,504 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include <linux/seq_file.h>
+#include "drmP.h"
+#include "radeon_reg.h"
+#include "radeon.h"
+
+/* rv515 depends on : */
+void r100_hdp_reset(struct radeon_device *rdev);
+int r100_cp_reset(struct radeon_device *rdev);
+int r100_rb2d_reset(struct radeon_device *rdev);
+int r100_gui_wait_for_idle(struct radeon_device *rdev);
+int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
+int rv370_pcie_gart_enable(struct radeon_device *rdev);
+void rv370_pcie_gart_disable(struct radeon_device *rdev);
+void r420_pipes_init(struct radeon_device *rdev);
+void rs600_mc_disable_clients(struct radeon_device *rdev);
+void rs600_disable_vga(struct radeon_device *rdev);
+
+/* This files gather functions specifics to:
+ * rv515
+ *
+ * Some of these functions might be used by newer ASICs.
+ */
+int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
+int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
+void rv515_gpu_init(struct radeon_device *rdev);
+int rv515_mc_wait_for_idle(struct radeon_device *rdev);
+
+
+/*
+ * MC
+ */
+int rv515_mc_init(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+ int r;
+
+ if (r100_debugfs_rbbm_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for RBBM !\n");
+ }
+ if (rv515_debugfs_pipes_info_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for pipes !\n");
+ }
+ if (rv515_debugfs_ga_info_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for pipes !\n");
+ }
+
+ rv515_gpu_init(rdev);
+ rv370_pcie_gart_disable(rdev);
+
+ /* Setup GPU memory space */
+ rdev->mc.vram_location = 0xFFFFFFFFUL;
+ rdev->mc.gtt_location = 0xFFFFFFFFUL;
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r) {
+ printk(KERN_WARNING "[drm] Disabling AGP\n");
+ rdev->flags &= ~RADEON_IS_AGP;
+ rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+ } else {
+ rdev->mc.gtt_location = rdev->mc.agp_base;
+ }
+ }
+ r = radeon_mc_setup(rdev);
+ if (r) {
+ return r;
+ }
+
+ /* Program GPU memory space */
+ rs600_mc_disable_clients(rdev);
+ if (rv515_mc_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait MC idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+ /* Write VRAM size in case we are limiting it */
+ WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
+ tmp = REG_SET(RV515_MC_FB_START, rdev->mc.vram_location >> 16);
+ WREG32(0x134, tmp);
+ tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
+ tmp = REG_SET(RV515_MC_FB_TOP, tmp >> 16);
+ tmp |= REG_SET(RV515_MC_FB_START, rdev->mc.vram_location >> 16);
+ WREG32_MC(RV515_MC_FB_LOCATION, tmp);
+ WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
+ WREG32(0x310, rdev->mc.vram_location);
+ if (rdev->flags & RADEON_IS_AGP) {
+ tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
+ tmp = REG_SET(RV515_MC_AGP_TOP, tmp >> 16);
+ tmp |= REG_SET(RV515_MC_AGP_START, rdev->mc.gtt_location >> 16);
+ WREG32_MC(RV515_MC_AGP_LOCATION, tmp);
+ WREG32_MC(RV515_MC_AGP_BASE, rdev->mc.agp_base);
+ WREG32_MC(RV515_MC_AGP_BASE_2, 0);
+ } else {
+ WREG32_MC(RV515_MC_AGP_LOCATION, 0x0FFFFFFF);
+ WREG32_MC(RV515_MC_AGP_BASE, 0);
+ WREG32_MC(RV515_MC_AGP_BASE_2, 0);
+ }
+ return 0;
+}
+
+void rv515_mc_fini(struct radeon_device *rdev)
+{
+ rv370_pcie_gart_disable(rdev);
+ radeon_gart_table_vram_free(rdev);
+ radeon_gart_fini(rdev);
+}
+
+
+/*
+ * Global GPU functions
+ */
+void rv515_ring_start(struct radeon_device *rdev)
+{
+ unsigned gb_tile_config;
+ int r;
+
+ /* Sub pixel 1/12 so we can have 4K rendering according to doc */
+ gb_tile_config = R300_ENABLE_TILING | R300_TILE_SIZE_16;
+ switch (rdev->num_gb_pipes) {
+ case 2:
+ gb_tile_config |= R300_PIPE_COUNT_R300;
+ break;
+ case 3:
+ gb_tile_config |= R300_PIPE_COUNT_R420_3P;
+ break;
+ case 4:
+ gb_tile_config |= R300_PIPE_COUNT_R420;
+ break;
+ case 1:
+ default:
+ gb_tile_config |= R300_PIPE_COUNT_RV350;
+ break;
+ }
+
+ r = radeon_ring_lock(rdev, 64);
+ if (r) {
+ return;
+ }
+ radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
+ radeon_ring_write(rdev,
+ RADEON_ISYNC_ANY2D_IDLE3D |
+ RADEON_ISYNC_ANY3D_IDLE2D |
+ RADEON_ISYNC_WAIT_IDLEGUI |
+ RADEON_ISYNC_CPSCRATCH_IDLEGUI);
+ radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0));
+ radeon_ring_write(rdev, gb_tile_config);
+ radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(rdev,
+ RADEON_WAIT_2D_IDLECLEAN |
+ RADEON_WAIT_3D_IDLECLEAN);
+ radeon_ring_write(rdev, PACKET0(0x170C, 0));
+ radeon_ring_write(rdev, 1 << 31);
+ radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, PACKET0(0x42C8, 0));
+ radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1);
+ radeon_ring_write(rdev, PACKET0(R500_VAP_INDEX_OFFSET, 0));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
+ radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
+ radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(rdev,
+ RADEON_WAIT_2D_IDLECLEAN |
+ RADEON_WAIT_3D_IDLECLEAN);
+ radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
+ radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
+ radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0));
+ radeon_ring_write(rdev,
+ ((6 << R300_MS_X0_SHIFT) |
+ (6 << R300_MS_Y0_SHIFT) |
+ (6 << R300_MS_X1_SHIFT) |
+ (6 << R300_MS_Y1_SHIFT) |
+ (6 << R300_MS_X2_SHIFT) |
+ (6 << R300_MS_Y2_SHIFT) |
+ (6 << R300_MSBD0_Y_SHIFT) |
+ (6 << R300_MSBD0_X_SHIFT)));
+ radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0));
+ radeon_ring_write(rdev,
+ ((6 << R300_MS_X3_SHIFT) |
+ (6 << R300_MS_Y3_SHIFT) |
+ (6 << R300_MS_X4_SHIFT) |
+ (6 << R300_MS_Y4_SHIFT) |
+ (6 << R300_MS_X5_SHIFT) |
+ (6 << R300_MS_Y5_SHIFT) |
+ (6 << R300_MSBD1_SHIFT)));
+ radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0));
+ radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
+ radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0));
+ radeon_ring_write(rdev,
+ R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
+ radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0));
+ radeon_ring_write(rdev,
+ R300_GEOMETRY_ROUND_NEAREST |
+ R300_COLOR_ROUND_NEAREST);
+ radeon_ring_unlock_commit(rdev);
+}
+
+void rv515_errata(struct radeon_device *rdev)
+{
+ rdev->pll_errata = 0;
+}
+
+int rv515_mc_wait_for_idle(struct radeon_device *rdev)
+{
+ unsigned i;
+ uint32_t tmp;
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ /* read MC_STATUS */
+ tmp = RREG32_MC(RV515_MC_STATUS);
+ if (tmp & RV515_MC_STATUS_IDLE) {
+ return 0;
+ }
+ DRM_UDELAY(1);
+ }
+ return -1;
+}
+
+void rv515_gpu_init(struct radeon_device *rdev)
+{
+ unsigned pipe_select_current, gb_pipe_select, tmp;
+
+ r100_hdp_reset(rdev);
+ r100_rb2d_reset(rdev);
+
+ if (r100_gui_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait GUI idle while "
+ "reseting GPU. Bad things might happen.\n");
+ }
+
+ rs600_disable_vga(rdev);
+
+ r420_pipes_init(rdev);
+ gb_pipe_select = RREG32(0x402C);
+ tmp = RREG32(0x170C);
+ pipe_select_current = (tmp >> 2) & 3;
+ tmp = (1 << pipe_select_current) |
+ (((gb_pipe_select >> 8) & 0xF) << 4);
+ WREG32_PLL(0x000D, tmp);
+ if (r100_gui_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait GUI idle while "
+ "reseting GPU. Bad things might happen.\n");
+ }
+ if (rv515_mc_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait MC idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+}
+
+int rv515_ga_reset(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+ bool reinit_cp;
+ int i;
+
+ reinit_cp = rdev->cp.ready;
+ rdev->cp.ready = false;
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ WREG32(RADEON_CP_CSQ_MODE, 0);
+ WREG32(RADEON_CP_CSQ_CNTL, 0);
+ WREG32(RADEON_RBBM_SOFT_RESET, 0x32005);
+ (void)RREG32(RADEON_RBBM_SOFT_RESET);
+ udelay(200);
+ WREG32(RADEON_RBBM_SOFT_RESET, 0);
+ /* Wait to prevent race in RBBM_STATUS */
+ mdelay(1);
+ tmp = RREG32(RADEON_RBBM_STATUS);
+ if (tmp & ((1 << 20) | (1 << 26))) {
+ DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp);
+ /* GA still busy soft reset it */
+ WREG32(0x429C, 0x200);
+ WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
+ WREG32(0x43E0, 0);
+ WREG32(0x43E4, 0);
+ WREG32(0x24AC, 0);
+ }
+ /* Wait to prevent race in RBBM_STATUS */
+ mdelay(1);
+ tmp = RREG32(RADEON_RBBM_STATUS);
+ if (!(tmp & ((1 << 20) | (1 << 26)))) {
+ break;
+ }
+ }
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = RREG32(RADEON_RBBM_STATUS);
+ if (!(tmp & ((1 << 20) | (1 << 26)))) {
+ DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
+ tmp);
+ DRM_INFO("GA_IDLE=0x%08X\n", RREG32(0x425C));
+ DRM_INFO("RB3D_RESET_STATUS=0x%08X\n", RREG32(0x46f0));
+ DRM_INFO("ISYNC_CNTL=0x%08X\n", RREG32(0x1724));
+ if (reinit_cp) {
+ return r100_cp_init(rdev, rdev->cp.ring_size);
+ }
+ return 0;
+ }
+ DRM_UDELAY(1);
+ }
+ tmp = RREG32(RADEON_RBBM_STATUS);
+ DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
+ return -1;
+}
+
+int rv515_gpu_reset(struct radeon_device *rdev)
+{
+ uint32_t status;
+
+ /* reset order likely matter */
+ status = RREG32(RADEON_RBBM_STATUS);
+ /* reset HDP */
+ r100_hdp_reset(rdev);
+ /* reset rb2d */
+ if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
+ r100_rb2d_reset(rdev);
+ }
+ /* reset GA */
+ if (status & ((1 << 20) | (1 << 26))) {
+ rv515_ga_reset(rdev);
+ }
+ /* reset CP */
+ status = RREG32(RADEON_RBBM_STATUS);
+ if (status & (1 << 16)) {
+ r100_cp_reset(rdev);
+ }
+ /* Check if GPU is idle */
+ status = RREG32(RADEON_RBBM_STATUS);
+ if (status & (1 << 31)) {
+ DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
+ return -1;
+ }
+ DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
+ return 0;
+}
+
+
+/*
+ * VRAM info
+ */
+static void rv515_vram_get_type(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+
+ rdev->mc.vram_width = 128;
+ rdev->mc.vram_is_ddr = true;
+ tmp = RREG32_MC(RV515_MC_CNTL);
+ tmp &= RV515_MEM_NUM_CHANNELS_MASK;
+ switch (tmp) {
+ case 0:
+ rdev->mc.vram_width = 64;
+ break;
+ case 1:
+ rdev->mc.vram_width = 128;
+ break;
+ default:
+ rdev->mc.vram_width = 128;
+ break;
+ }
+}
+
+void rv515_vram_info(struct radeon_device *rdev)
+{
+ rv515_vram_get_type(rdev);
+ rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
+
+ rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+ rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+}
+
+
+/*
+ * Indirect registers accessor
+ */
+uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+ uint32_t r;
+
+ WREG32(R520_MC_IND_INDEX, 0x7f0000 | (reg & 0xffff));
+ r = RREG32(R520_MC_IND_DATA);
+ WREG32(R520_MC_IND_INDEX, 0);
+ return r;
+}
+
+void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+ WREG32(R520_MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff));
+ WREG32(R520_MC_IND_DATA, (v));
+ WREG32(R520_MC_IND_INDEX, 0);
+}
+
+uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+ uint32_t r;
+
+ WREG32(RADEON_PCIE_INDEX, ((reg) & 0x7ff));
+ (void)RREG32(RADEON_PCIE_INDEX);
+ r = RREG32(RADEON_PCIE_DATA);
+ return r;
+}
+
+void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+ WREG32(RADEON_PCIE_INDEX, ((reg) & 0x7ff));
+ (void)RREG32(RADEON_PCIE_INDEX);
+ WREG32(RADEON_PCIE_DATA, (v));
+ (void)RREG32(RADEON_PCIE_DATA);
+}
+
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int rv515_debugfs_pipes_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t tmp;
+
+ tmp = RREG32(R400_GB_PIPE_SELECT);
+ seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp);
+ tmp = RREG32(R500_SU_REG_DEST);
+ seq_printf(m, "SU_REG_DEST 0x%08x\n", tmp);
+ tmp = RREG32(R300_GB_TILE_CONFIG);
+ seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp);
+ tmp = RREG32(R300_DST_PIPE_CONFIG);
+ seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp);
+ return 0;
+}
+
+static int rv515_debugfs_ga_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t tmp;
+
+ tmp = RREG32(0x2140);
+ seq_printf(m, "VAP_CNTL_STATUS 0x%08x\n", tmp);
+ radeon_gpu_reset(rdev);
+ tmp = RREG32(0x425C);
+ seq_printf(m, "GA_IDLE 0x%08x\n", tmp);
+ return 0;
+}
+
+static struct drm_info_list rv515_pipes_info_list[] = {
+ {"rv515_pipes_info", rv515_debugfs_pipes_info, 0, NULL},
+};
+
+static struct drm_info_list rv515_ga_info_list[] = {
+ {"rv515_ga_info", rv515_debugfs_ga_info, 0, NULL},
+};
+#endif
+
+int rv515_debugfs_pipes_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, rv515_pipes_info_list, 1);
+#else
+ return 0;
+#endif
+}
+
+int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, rv515_ga_info_list, 1);
+#else
+ return 0;
+#endif
+}
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
new file mode 100644
index 00000000000..da50cc51ede
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include "drmP.h"
+#include "radeon_reg.h"
+#include "radeon.h"
+
+/* rv770,rv730,rv710 depends on : */
+void rs600_mc_disable_clients(struct radeon_device *rdev);
+
+/* This files gather functions specifics to:
+ * rv770,rv730,rv710
+ *
+ * Some of these functions might be used by newer ASICs.
+ */
+int rv770_mc_wait_for_idle(struct radeon_device *rdev);
+void rv770_gpu_init(struct radeon_device *rdev);
+
+
+/*
+ * MC
+ */
+int rv770_mc_init(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+
+ rv770_gpu_init(rdev);
+
+ /* setup the gart before changing location so we can ask to
+ * discard unmapped mc request
+ */
+ /* FIXME: disable out of gart access */
+ tmp = rdev->mc.gtt_location / 4096;
+ tmp = REG_SET(R700_LOGICAL_PAGE_NUMBER, tmp);
+ WREG32(R700_MC_VM_SYSTEM_APERTURE_LOW_ADDR, tmp);
+ tmp = (rdev->mc.gtt_location + rdev->mc.gtt_size) / 4096;
+ tmp = REG_SET(R700_LOGICAL_PAGE_NUMBER, tmp);
+ WREG32(R700_MC_VM_SYSTEM_APERTURE_HIGH_ADDR, tmp);
+
+ rs600_mc_disable_clients(rdev);
+ if (rv770_mc_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait MC idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+
+ tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
+ tmp = REG_SET(R700_MC_FB_TOP, tmp >> 24);
+ tmp |= REG_SET(R700_MC_FB_BASE, rdev->mc.vram_location >> 24);
+ WREG32(R700_MC_VM_FB_LOCATION, tmp);
+ tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
+ tmp = REG_SET(R700_MC_AGP_TOP, tmp >> 22);
+ WREG32(R700_MC_VM_AGP_TOP, tmp);
+ tmp = REG_SET(R700_MC_AGP_BOT, rdev->mc.gtt_location >> 22);
+ WREG32(R700_MC_VM_AGP_BOT, tmp);
+ return 0;
+}
+
+void rv770_mc_fini(struct radeon_device *rdev)
+{
+ /* FIXME: implement */
+}
+
+
+/*
+ * Global GPU functions
+ */
+void rv770_errata(struct radeon_device *rdev)
+{
+ rdev->pll_errata = 0;
+}
+
+int rv770_mc_wait_for_idle(struct radeon_device *rdev)
+{
+ /* FIXME: implement */
+ return 0;
+}
+
+void rv770_gpu_init(struct radeon_device *rdev)
+{
+ /* FIXME: implement */
+}
+
+
+/*
+ * VRAM info
+ */
+void rv770_vram_get_type(struct radeon_device *rdev)
+{
+ /* FIXME: implement */
+}
+
+void rv770_vram_info(struct radeon_device *rdev)
+{
+ rv770_vram_get_type(rdev);
+
+ /* FIXME: implement */
+ /* Could aper size report 0 ? */
+ rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+ rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+}
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
new file mode 100644
index 00000000000..b0a9de7a57c
--- /dev/null
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the drm device driver. This driver provides support for the
+
+ccflags-y := -Iinclude/drm
+ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
+ ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o
+
+obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
new file mode 100644
index 00000000000..e8f6d2229d8
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -0,0 +1,150 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ * Keith Packard.
+ */
+
+#include "ttm/ttm_module.h"
+#include "ttm/ttm_bo_driver.h"
+#ifdef TTM_HAS_AGP
+#include "ttm/ttm_placement.h"
+#include <linux/agp_backend.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <asm/agp.h>
+
+struct ttm_agp_backend {
+ struct ttm_backend backend;
+ struct agp_memory *mem;
+ struct agp_bridge_data *bridge;
+};
+
+static int ttm_agp_populate(struct ttm_backend *backend,
+ unsigned long num_pages, struct page **pages,
+ struct page *dummy_read_page)
+{
+ struct ttm_agp_backend *agp_be =
+ container_of(backend, struct ttm_agp_backend, backend);
+ struct page **cur_page, **last_page = pages + num_pages;
+ struct agp_memory *mem;
+
+ mem = agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
+ if (unlikely(mem == NULL))
+ return -ENOMEM;
+
+ mem->page_count = 0;
+ for (cur_page = pages; cur_page < last_page; ++cur_page) {
+ struct page *page = *cur_page;
+ if (!page)
+ page = dummy_read_page;
+
+ mem->memory[mem->page_count++] =
+ phys_to_gart(page_to_phys(page));
+ }
+ agp_be->mem = mem;
+ return 0;
+}
+
+static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
+{
+ struct ttm_agp_backend *agp_be =
+ container_of(backend, struct ttm_agp_backend, backend);
+ struct agp_memory *mem = agp_be->mem;
+ int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
+ int ret;
+
+ mem->is_flushed = 1;
+ mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
+
+ ret = agp_bind_memory(mem, bo_mem->mm_node->start);
+ if (ret)
+ printk(KERN_ERR TTM_PFX "AGP Bind memory failed.\n");
+
+ return ret;
+}
+
+static int ttm_agp_unbind(struct ttm_backend *backend)
+{
+ struct ttm_agp_backend *agp_be =
+ container_of(backend, struct ttm_agp_backend, backend);
+
+ if (agp_be->mem->is_bound)
+ return agp_unbind_memory(agp_be->mem);
+ else
+ return 0;
+}
+
+static void ttm_agp_clear(struct ttm_backend *backend)
+{
+ struct ttm_agp_backend *agp_be =
+ container_of(backend, struct ttm_agp_backend, backend);
+ struct agp_memory *mem = agp_be->mem;
+
+ if (mem) {
+ ttm_agp_unbind(backend);
+ agp_free_memory(mem);
+ }
+ agp_be->mem = NULL;
+}
+
+static void ttm_agp_destroy(struct ttm_backend *backend)
+{
+ struct ttm_agp_backend *agp_be =
+ container_of(backend, struct ttm_agp_backend, backend);
+
+ if (agp_be->mem)
+ ttm_agp_clear(backend);
+ kfree(agp_be);
+}
+
+static struct ttm_backend_func ttm_agp_func = {
+ .populate = ttm_agp_populate,
+ .clear = ttm_agp_clear,
+ .bind = ttm_agp_bind,
+ .unbind = ttm_agp_unbind,
+ .destroy = ttm_agp_destroy,
+};
+
+struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
+ struct agp_bridge_data *bridge)
+{
+ struct ttm_agp_backend *agp_be;
+
+ agp_be = kmalloc(sizeof(*agp_be), GFP_KERNEL);
+ if (!agp_be)
+ return NULL;
+
+ agp_be->mem = NULL;
+ agp_be->bridge = bridge;
+ agp_be->backend.func = &ttm_agp_func;
+ agp_be->backend.bdev = bdev;
+ return &agp_be->backend;
+}
+EXPORT_SYMBOL(ttm_agp_backend_init);
+
+#endif
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
new file mode 100644
index 00000000000..1587aeca7be
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -0,0 +1,1698 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include "ttm/ttm_module.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/file.h>
+#include <linux/module.h>
+
+#define TTM_ASSERT_LOCKED(param)
+#define TTM_DEBUG(fmt, arg...)
+#define TTM_BO_HASH_ORDER 13
+
+static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
+static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
+static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
+
+static inline uint32_t ttm_bo_type_flags(unsigned type)
+{
+ return 1 << (type);
+}
+
+static void ttm_bo_release_list(struct kref *list_kref)
+{
+ struct ttm_buffer_object *bo =
+ container_of(list_kref, struct ttm_buffer_object, list_kref);
+ struct ttm_bo_device *bdev = bo->bdev;
+
+ BUG_ON(atomic_read(&bo->list_kref.refcount));
+ BUG_ON(atomic_read(&bo->kref.refcount));
+ BUG_ON(atomic_read(&bo->cpu_writers));
+ BUG_ON(bo->sync_obj != NULL);
+ BUG_ON(bo->mem.mm_node != NULL);
+ BUG_ON(!list_empty(&bo->lru));
+ BUG_ON(!list_empty(&bo->ddestroy));
+
+ if (bo->ttm)
+ ttm_tt_destroy(bo->ttm);
+ if (bo->destroy)
+ bo->destroy(bo);
+ else {
+ ttm_mem_global_free(bdev->mem_glob, bo->acc_size, false);
+ kfree(bo);
+ }
+}
+
+int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
+{
+
+ if (interruptible) {
+ int ret = 0;
+
+ ret = wait_event_interruptible(bo->event_queue,
+ atomic_read(&bo->reserved) == 0);
+ if (unlikely(ret != 0))
+ return -ERESTART;
+ } else {
+ wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
+ }
+ return 0;
+}
+
+static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_mem_type_manager *man;
+
+ BUG_ON(!atomic_read(&bo->reserved));
+
+ if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
+
+ BUG_ON(!list_empty(&bo->lru));
+
+ man = &bdev->man[bo->mem.mem_type];
+ list_add_tail(&bo->lru, &man->lru);
+ kref_get(&bo->list_kref);
+
+ if (bo->ttm != NULL) {
+ list_add_tail(&bo->swap, &bdev->swap_lru);
+ kref_get(&bo->list_kref);
+ }
+ }
+}
+
+/**
+ * Call with the lru_lock held.
+ */
+
+static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
+{
+ int put_count = 0;
+
+ if (!list_empty(&bo->swap)) {
+ list_del_init(&bo->swap);
+ ++put_count;
+ }
+ if (!list_empty(&bo->lru)) {
+ list_del_init(&bo->lru);
+ ++put_count;
+ }
+
+ /*
+ * TODO: Add a driver hook to delete from
+ * driver-specific LRU's here.
+ */
+
+ return put_count;
+}
+
+int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool no_wait, bool use_sequence, uint32_t sequence)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ int ret;
+
+ while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
+ if (use_sequence && bo->seq_valid &&
+ (sequence - bo->val_seq < (1 << 31))) {
+ return -EAGAIN;
+ }
+
+ if (no_wait)
+ return -EBUSY;
+
+ spin_unlock(&bdev->lru_lock);
+ ret = ttm_bo_wait_unreserved(bo, interruptible);
+ spin_lock(&bdev->lru_lock);
+
+ if (unlikely(ret))
+ return ret;
+ }
+
+ if (use_sequence) {
+ bo->val_seq = sequence;
+ bo->seq_valid = true;
+ } else {
+ bo->seq_valid = false;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ttm_bo_reserve);
+
+static void ttm_bo_ref_bug(struct kref *list_kref)
+{
+ BUG();
+}
+
+int ttm_bo_reserve(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool no_wait, bool use_sequence, uint32_t sequence)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ int put_count = 0;
+ int ret;
+
+ spin_lock(&bdev->lru_lock);
+ ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
+ sequence);
+ if (likely(ret == 0))
+ put_count = ttm_bo_del_from_lru(bo);
+ spin_unlock(&bdev->lru_lock);
+
+ while (put_count--)
+ kref_put(&bo->list_kref, ttm_bo_ref_bug);
+
+ return ret;
+}
+
+void ttm_bo_unreserve(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+
+ spin_lock(&bdev->lru_lock);
+ ttm_bo_add_to_lru(bo);
+ atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
+ spin_unlock(&bdev->lru_lock);
+}
+EXPORT_SYMBOL(ttm_bo_unreserve);
+
+/*
+ * Call bo->mutex locked.
+ */
+
+static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ int ret = 0;
+ uint32_t page_flags = 0;
+
+ TTM_ASSERT_LOCKED(&bo->mutex);
+ bo->ttm = NULL;
+
+ switch (bo->type) {
+ case ttm_bo_type_device:
+ if (zero_alloc)
+ page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
+ case ttm_bo_type_kernel:
+ bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
+ page_flags, bdev->dummy_read_page);
+ if (unlikely(bo->ttm == NULL))
+ ret = -ENOMEM;
+ break;
+ case ttm_bo_type_user:
+ bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
+ page_flags | TTM_PAGE_FLAG_USER,
+ bdev->dummy_read_page);
+ if (unlikely(bo->ttm == NULL))
+ ret = -ENOMEM;
+ break;
+
+ ret = ttm_tt_set_user(bo->ttm, current,
+ bo->buffer_start, bo->num_pages);
+ if (unlikely(ret != 0))
+ ttm_tt_destroy(bo->ttm);
+ break;
+ default:
+ printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem,
+ bool evict, bool interruptible, bool no_wait)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
+ bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
+ struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
+ struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
+ int ret = 0;
+
+ if (old_is_pci || new_is_pci ||
+ ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
+ ttm_bo_unmap_virtual(bo);
+
+ /*
+ * Create and bind a ttm if required.
+ */
+
+ if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
+ ret = ttm_bo_add_ttm(bo, false);
+ if (ret)
+ goto out_err;
+
+ ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
+ if (ret)
+ return ret;
+
+ if (mem->mem_type != TTM_PL_SYSTEM) {
+ ret = ttm_tt_bind(bo->ttm, mem);
+ if (ret)
+ goto out_err;
+ }
+
+ if (bo->mem.mem_type == TTM_PL_SYSTEM) {
+
+ struct ttm_mem_reg *old_mem = &bo->mem;
+ uint32_t save_flags = old_mem->placement;
+
+ *old_mem = *mem;
+ mem->mm_node = NULL;
+ ttm_flag_masked(&save_flags, mem->placement,
+ TTM_PL_MASK_MEMTYPE);
+ goto moved;
+ }
+
+ }
+
+ if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
+ !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
+ ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
+ else if (bdev->driver->move)
+ ret = bdev->driver->move(bo, evict, interruptible,
+ no_wait, mem);
+ else
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
+
+ if (ret)
+ goto out_err;
+
+moved:
+ if (bo->evicted) {
+ ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
+ if (ret)
+ printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
+ bo->evicted = false;
+ }
+
+ if (bo->mem.mm_node) {
+ spin_lock(&bo->lock);
+ bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
+ bdev->man[bo->mem.mem_type].gpu_offset;
+ bo->cur_placement = bo->mem.placement;
+ spin_unlock(&bo->lock);
+ }
+
+ return 0;
+
+out_err:
+ new_man = &bdev->man[bo->mem.mem_type];
+ if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
+ ttm_tt_unbind(bo->ttm);
+ ttm_tt_destroy(bo->ttm);
+ bo->ttm = NULL;
+ }
+
+ return ret;
+}
+
+/**
+ * If bo idle, remove from delayed- and lru lists, and unref.
+ * If not idle, and already on delayed list, do nothing.
+ * If not idle, and not on delayed list, put on delayed list,
+ * up the list_kref and schedule a delayed list check.
+ */
+
+static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_bo_driver *driver = bdev->driver;
+ int ret;
+
+ spin_lock(&bo->lock);
+ (void) ttm_bo_wait(bo, false, false, !remove_all);
+
+ if (!bo->sync_obj) {
+ int put_count;
+
+ spin_unlock(&bo->lock);
+
+ spin_lock(&bdev->lru_lock);
+ ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
+ BUG_ON(ret);
+ if (bo->ttm)
+ ttm_tt_unbind(bo->ttm);
+
+ if (!list_empty(&bo->ddestroy)) {
+ list_del_init(&bo->ddestroy);
+ kref_put(&bo->list_kref, ttm_bo_ref_bug);
+ }
+ if (bo->mem.mm_node) {
+ drm_mm_put_block(bo->mem.mm_node);
+ bo->mem.mm_node = NULL;
+ }
+ put_count = ttm_bo_del_from_lru(bo);
+ spin_unlock(&bdev->lru_lock);
+
+ atomic_set(&bo->reserved, 0);
+
+ while (put_count--)
+ kref_put(&bo->list_kref, ttm_bo_release_list);
+
+ return 0;
+ }
+
+ spin_lock(&bdev->lru_lock);
+ if (list_empty(&bo->ddestroy)) {
+ void *sync_obj = bo->sync_obj;
+ void *sync_obj_arg = bo->sync_obj_arg;
+
+ kref_get(&bo->list_kref);
+ list_add_tail(&bo->ddestroy, &bdev->ddestroy);
+ spin_unlock(&bdev->lru_lock);
+ spin_unlock(&bo->lock);
+
+ if (sync_obj)
+ driver->sync_obj_flush(sync_obj, sync_obj_arg);
+ schedule_delayed_work(&bdev->wq,
+ ((HZ / 100) < 1) ? 1 : HZ / 100);
+ ret = 0;
+
+ } else {
+ spin_unlock(&bdev->lru_lock);
+ spin_unlock(&bo->lock);
+ ret = -EBUSY;
+ }
+
+ return ret;
+}
+
+/**
+ * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
+ * encountered buffers.
+ */
+
+static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
+{
+ struct ttm_buffer_object *entry, *nentry;
+ struct list_head *list, *next;
+ int ret;
+
+ spin_lock(&bdev->lru_lock);
+ list_for_each_safe(list, next, &bdev->ddestroy) {
+ entry = list_entry(list, struct ttm_buffer_object, ddestroy);
+ nentry = NULL;
+
+ /*
+ * Protect the next list entry from destruction while we
+ * unlock the lru_lock.
+ */
+
+ if (next != &bdev->ddestroy) {
+ nentry = list_entry(next, struct ttm_buffer_object,
+ ddestroy);
+ kref_get(&nentry->list_kref);
+ }
+ kref_get(&entry->list_kref);
+
+ spin_unlock(&bdev->lru_lock);
+ ret = ttm_bo_cleanup_refs(entry, remove_all);
+ kref_put(&entry->list_kref, ttm_bo_release_list);
+
+ spin_lock(&bdev->lru_lock);
+ if (nentry) {
+ bool next_onlist = !list_empty(next);
+ spin_unlock(&bdev->lru_lock);
+ kref_put(&nentry->list_kref, ttm_bo_release_list);
+ spin_lock(&bdev->lru_lock);
+ /*
+ * Someone might have raced us and removed the
+ * next entry from the list. We don't bother restarting
+ * list traversal.
+ */
+
+ if (!next_onlist)
+ break;
+ }
+ if (ret)
+ break;
+ }
+ ret = !list_empty(&bdev->ddestroy);
+ spin_unlock(&bdev->lru_lock);
+
+ return ret;
+}
+
+static void ttm_bo_delayed_workqueue(struct work_struct *work)
+{
+ struct ttm_bo_device *bdev =
+ container_of(work, struct ttm_bo_device, wq.work);
+
+ if (ttm_bo_delayed_delete(bdev, false)) {
+ schedule_delayed_work(&bdev->wq,
+ ((HZ / 100) < 1) ? 1 : HZ / 100);
+ }
+}
+
+static void ttm_bo_release(struct kref *kref)
+{
+ struct ttm_buffer_object *bo =
+ container_of(kref, struct ttm_buffer_object, kref);
+ struct ttm_bo_device *bdev = bo->bdev;
+
+ if (likely(bo->vm_node != NULL)) {
+ rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
+ drm_mm_put_block(bo->vm_node);
+ bo->vm_node = NULL;
+ }
+ write_unlock(&bdev->vm_lock);
+ ttm_bo_cleanup_refs(bo, false);
+ kref_put(&bo->list_kref, ttm_bo_release_list);
+ write_lock(&bdev->vm_lock);
+}
+
+void ttm_bo_unref(struct ttm_buffer_object **p_bo)
+{
+ struct ttm_buffer_object *bo = *p_bo;
+ struct ttm_bo_device *bdev = bo->bdev;
+
+ *p_bo = NULL;
+ write_lock(&bdev->vm_lock);
+ kref_put(&bo->kref, ttm_bo_release);
+ write_unlock(&bdev->vm_lock);
+}
+EXPORT_SYMBOL(ttm_bo_unref);
+
+static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
+ bool interruptible, bool no_wait)
+{
+ int ret = 0;
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_mem_reg evict_mem;
+ uint32_t proposed_placement;
+
+ if (bo->mem.mem_type != mem_type)
+ goto out;
+
+ spin_lock(&bo->lock);
+ ret = ttm_bo_wait(bo, false, interruptible, no_wait);
+ spin_unlock(&bo->lock);
+
+ if (ret && ret != -ERESTART) {
+ printk(KERN_ERR TTM_PFX "Failed to expire sync object before "
+ "buffer eviction.\n");
+ goto out;
+ }
+
+ BUG_ON(!atomic_read(&bo->reserved));
+
+ evict_mem = bo->mem;
+ evict_mem.mm_node = NULL;
+
+ proposed_placement = bdev->driver->evict_flags(bo);
+
+ ret = ttm_bo_mem_space(bo, proposed_placement,
+ &evict_mem, interruptible, no_wait);
+ if (unlikely(ret != 0 && ret != -ERESTART))
+ ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM,
+ &evict_mem, interruptible, no_wait);
+
+ if (ret) {
+ if (ret != -ERESTART)
+ printk(KERN_ERR TTM_PFX
+ "Failed to find memory space for "
+ "buffer 0x%p eviction.\n", bo);
+ goto out;
+ }
+
+ ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
+ no_wait);
+ if (ret) {
+ if (ret != -ERESTART)
+ printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
+ goto out;
+ }
+
+ spin_lock(&bdev->lru_lock);
+ if (evict_mem.mm_node) {
+ drm_mm_put_block(evict_mem.mm_node);
+ evict_mem.mm_node = NULL;
+ }
+ spin_unlock(&bdev->lru_lock);
+ bo->evicted = true;
+out:
+ return ret;
+}
+
+/**
+ * Repeatedly evict memory from the LRU for @mem_type until we create enough
+ * space, or we've evicted everything and there isn't enough space.
+ */
+static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem,
+ uint32_t mem_type,
+ bool interruptible, bool no_wait)
+{
+ struct drm_mm_node *node;
+ struct ttm_buffer_object *entry;
+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+ struct list_head *lru;
+ unsigned long num_pages = mem->num_pages;
+ int put_count = 0;
+ int ret;
+
+retry_pre_get:
+ ret = drm_mm_pre_get(&man->manager);
+ if (unlikely(ret != 0))
+ return ret;
+
+ spin_lock(&bdev->lru_lock);
+ do {
+ node = drm_mm_search_free(&man->manager, num_pages,
+ mem->page_alignment, 1);
+ if (node)
+ break;
+
+ lru = &man->lru;
+ if (list_empty(lru))
+ break;
+
+ entry = list_first_entry(lru, struct ttm_buffer_object, lru);
+ kref_get(&entry->list_kref);
+
+ ret =
+ ttm_bo_reserve_locked(entry, interruptible, no_wait,
+ false, 0);
+
+ if (likely(ret == 0))
+ put_count = ttm_bo_del_from_lru(entry);
+
+ spin_unlock(&bdev->lru_lock);
+
+ if (unlikely(ret != 0))
+ return ret;
+
+ while (put_count--)
+ kref_put(&entry->list_kref, ttm_bo_ref_bug);
+
+ ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
+
+ ttm_bo_unreserve(entry);
+
+ kref_put(&entry->list_kref, ttm_bo_release_list);
+ if (ret)
+ return ret;
+
+ spin_lock(&bdev->lru_lock);
+ } while (1);
+
+ if (!node) {
+ spin_unlock(&bdev->lru_lock);
+ return -ENOMEM;
+ }
+
+ node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
+ if (unlikely(!node)) {
+ spin_unlock(&bdev->lru_lock);
+ goto retry_pre_get;
+ }
+
+ spin_unlock(&bdev->lru_lock);
+ mem->mm_node = node;
+ mem->mem_type = mem_type;
+ return 0;
+}
+
+static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
+ bool disallow_fixed,
+ uint32_t mem_type,
+ uint32_t mask, uint32_t *res_mask)
+{
+ uint32_t cur_flags = ttm_bo_type_flags(mem_type);
+
+ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
+ return false;
+
+ if ((cur_flags & mask & TTM_PL_MASK_MEM) == 0)
+ return false;
+
+ if ((mask & man->available_caching) == 0)
+ return false;
+ if (mask & man->default_caching)
+ cur_flags |= man->default_caching;
+ else if (mask & TTM_PL_FLAG_CACHED)
+ cur_flags |= TTM_PL_FLAG_CACHED;
+ else if (mask & TTM_PL_FLAG_WC)
+ cur_flags |= TTM_PL_FLAG_WC;
+ else
+ cur_flags |= TTM_PL_FLAG_UNCACHED;
+
+ *res_mask = cur_flags;
+ return true;
+}
+
+/**
+ * Creates space for memory region @mem according to its type.
+ *
+ * This function first searches for free space in compatible memory types in
+ * the priority order defined by the driver. If free space isn't found, then
+ * ttm_bo_mem_force_space is attempted in priority order to evict and find
+ * space.
+ */
+int ttm_bo_mem_space(struct ttm_buffer_object *bo,
+ uint32_t proposed_placement,
+ struct ttm_mem_reg *mem,
+ bool interruptible, bool no_wait)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_mem_type_manager *man;
+
+ uint32_t num_prios = bdev->driver->num_mem_type_prio;
+ const uint32_t *prios = bdev->driver->mem_type_prio;
+ uint32_t i;
+ uint32_t mem_type = TTM_PL_SYSTEM;
+ uint32_t cur_flags = 0;
+ bool type_found = false;
+ bool type_ok = false;
+ bool has_eagain = false;
+ struct drm_mm_node *node = NULL;
+ int ret;
+
+ mem->mm_node = NULL;
+ for (i = 0; i < num_prios; ++i) {
+ mem_type = prios[i];
+ man = &bdev->man[mem_type];
+
+ type_ok = ttm_bo_mt_compatible(man,
+ bo->type == ttm_bo_type_user,
+ mem_type, proposed_placement,
+ &cur_flags);
+
+ if (!type_ok)
+ continue;
+
+ if (mem_type == TTM_PL_SYSTEM)
+ break;
+
+ if (man->has_type && man->use_type) {
+ type_found = true;
+ do {
+ ret = drm_mm_pre_get(&man->manager);
+ if (unlikely(ret))
+ return ret;
+
+ spin_lock(&bdev->lru_lock);
+ node = drm_mm_search_free(&man->manager,
+ mem->num_pages,
+ mem->page_alignment,
+ 1);
+ if (unlikely(!node)) {
+ spin_unlock(&bdev->lru_lock);
+ break;
+ }
+ node = drm_mm_get_block_atomic(node,
+ mem->num_pages,
+ mem->
+ page_alignment);
+ spin_unlock(&bdev->lru_lock);
+ } while (!node);
+ }
+ if (node)
+ break;
+ }
+
+ if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
+ mem->mm_node = node;
+ mem->mem_type = mem_type;
+ mem->placement = cur_flags;
+ return 0;
+ }
+
+ if (!type_found)
+ return -EINVAL;
+
+ num_prios = bdev->driver->num_mem_busy_prio;
+ prios = bdev->driver->mem_busy_prio;
+
+ for (i = 0; i < num_prios; ++i) {
+ mem_type = prios[i];
+ man = &bdev->man[mem_type];
+
+ if (!man->has_type)
+ continue;
+
+ if (!ttm_bo_mt_compatible(man,
+ bo->type == ttm_bo_type_user,
+ mem_type,
+ proposed_placement, &cur_flags))
+ continue;
+
+ ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
+ interruptible, no_wait);
+
+ if (ret == 0 && mem->mm_node) {
+ mem->placement = cur_flags;
+ return 0;
+ }
+
+ if (ret == -ERESTART)
+ has_eagain = true;
+ }
+
+ ret = (has_eagain) ? -ERESTART : -ENOMEM;
+ return ret;
+}
+EXPORT_SYMBOL(ttm_bo_mem_space);
+
+int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
+{
+ int ret = 0;
+
+ if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
+ return -EBUSY;
+
+ ret = wait_event_interruptible(bo->event_queue,
+ atomic_read(&bo->cpu_writers) == 0);
+
+ if (ret == -ERESTARTSYS)
+ ret = -ERESTART;
+
+ return ret;
+}
+
+int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
+ uint32_t proposed_placement,
+ bool interruptible, bool no_wait)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ int ret = 0;
+ struct ttm_mem_reg mem;
+
+ BUG_ON(!atomic_read(&bo->reserved));
+
+ /*
+ * FIXME: It's possible to pipeline buffer moves.
+ * Have the driver move function wait for idle when necessary,
+ * instead of doing it here.
+ */
+
+ spin_lock(&bo->lock);
+ ret = ttm_bo_wait(bo, false, interruptible, no_wait);
+ spin_unlock(&bo->lock);
+
+ if (ret)
+ return ret;
+
+ mem.num_pages = bo->num_pages;
+ mem.size = mem.num_pages << PAGE_SHIFT;
+ mem.page_alignment = bo->mem.page_alignment;
+
+ /*
+ * Determine where to move the buffer.
+ */
+
+ ret = ttm_bo_mem_space(bo, proposed_placement, &mem,
+ interruptible, no_wait);
+ if (ret)
+ goto out_unlock;
+
+ ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
+
+out_unlock:
+ if (ret && mem.mm_node) {
+ spin_lock(&bdev->lru_lock);
+ drm_mm_put_block(mem.mm_node);
+ spin_unlock(&bdev->lru_lock);
+ }
+ return ret;
+}
+
+static int ttm_bo_mem_compat(uint32_t proposed_placement,
+ struct ttm_mem_reg *mem)
+{
+ if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0)
+ return 0;
+ if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0)
+ return 0;
+
+ return 1;
+}
+
+int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
+ uint32_t proposed_placement,
+ bool interruptible, bool no_wait)
+{
+ int ret;
+
+ BUG_ON(!atomic_read(&bo->reserved));
+ bo->proposed_placement = proposed_placement;
+
+ TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n",
+ (unsigned long)proposed_placement,
+ (unsigned long)bo->mem.placement);
+
+ /*
+ * Check whether we need to move buffer.
+ */
+
+ if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) {
+ ret = ttm_bo_move_buffer(bo, bo->proposed_placement,
+ interruptible, no_wait);
+ if (ret) {
+ if (ret != -ERESTART)
+ printk(KERN_ERR TTM_PFX
+ "Failed moving buffer. "
+ "Proposed placement 0x%08x\n",
+ bo->proposed_placement);
+ if (ret == -ENOMEM)
+ printk(KERN_ERR TTM_PFX
+ "Out of aperture space or "
+ "DRM memory quota.\n");
+ return ret;
+ }
+ }
+
+ /*
+ * We might need to add a TTM.
+ */
+
+ if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
+ ret = ttm_bo_add_ttm(bo, true);
+ if (ret)
+ return ret;
+ }
+ /*
+ * Validation has succeeded, move the access and other
+ * non-mapping-related flag bits from the proposed flags to
+ * the active flags
+ */
+
+ ttm_flag_masked(&bo->mem.placement, bo->proposed_placement,
+ ~TTM_PL_MASK_MEMTYPE);
+
+ return 0;
+}
+EXPORT_SYMBOL(ttm_buffer_object_validate);
+
+int
+ttm_bo_check_placement(struct ttm_buffer_object *bo,
+ uint32_t set_flags, uint32_t clr_flags)
+{
+ uint32_t new_mask = set_flags | clr_flags;
+
+ if ((bo->type == ttm_bo_type_user) &&
+ (clr_flags & TTM_PL_FLAG_CACHED)) {
+ printk(KERN_ERR TTM_PFX
+ "User buffers require cache-coherent memory.\n");
+ return -EINVAL;
+ }
+
+ if (!capable(CAP_SYS_ADMIN)) {
+ if (new_mask & TTM_PL_FLAG_NO_EVICT) {
+ printk(KERN_ERR TTM_PFX "Need to be root to modify"
+ " NO_EVICT status.\n");
+ return -EINVAL;
+ }
+
+ if ((clr_flags & bo->mem.placement & TTM_PL_MASK_MEMTYPE) &&
+ (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
+ printk(KERN_ERR TTM_PFX
+ "Incompatible memory specification"
+ " for NO_EVICT buffer.\n");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+int ttm_buffer_object_init(struct ttm_bo_device *bdev,
+ struct ttm_buffer_object *bo,
+ unsigned long size,
+ enum ttm_bo_type type,
+ uint32_t flags,
+ uint32_t page_alignment,
+ unsigned long buffer_start,
+ bool interruptible,
+ struct file *persistant_swap_storage,
+ size_t acc_size,
+ void (*destroy) (struct ttm_buffer_object *))
+{
+ int ret = 0;
+ unsigned long num_pages;
+
+ size += buffer_start & ~PAGE_MASK;
+ num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ if (num_pages == 0) {
+ printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
+ return -EINVAL;
+ }
+ bo->destroy = destroy;
+
+ spin_lock_init(&bo->lock);
+ kref_init(&bo->kref);
+ kref_init(&bo->list_kref);
+ atomic_set(&bo->cpu_writers, 0);
+ atomic_set(&bo->reserved, 1);
+ init_waitqueue_head(&bo->event_queue);
+ INIT_LIST_HEAD(&bo->lru);
+ INIT_LIST_HEAD(&bo->ddestroy);
+ INIT_LIST_HEAD(&bo->swap);
+ bo->bdev = bdev;
+ bo->type = type;
+ bo->num_pages = num_pages;
+ bo->mem.mem_type = TTM_PL_SYSTEM;
+ bo->mem.num_pages = bo->num_pages;
+ bo->mem.mm_node = NULL;
+ bo->mem.page_alignment = page_alignment;
+ bo->buffer_start = buffer_start & PAGE_MASK;
+ bo->priv_flags = 0;
+ bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
+ bo->seq_valid = false;
+ bo->persistant_swap_storage = persistant_swap_storage;
+ bo->acc_size = acc_size;
+
+ ret = ttm_bo_check_placement(bo, flags, 0ULL);
+ if (unlikely(ret != 0))
+ goto out_err;
+
+ /*
+ * If no caching attributes are set, accept any form of caching.
+ */
+
+ if ((flags & TTM_PL_MASK_CACHING) == 0)
+ flags |= TTM_PL_MASK_CACHING;
+
+ /*
+ * For ttm_bo_type_device buffers, allocate
+ * address space from the device.
+ */
+
+ if (bo->type == ttm_bo_type_device) {
+ ret = ttm_bo_setup_vm(bo);
+ if (ret)
+ goto out_err;
+ }
+
+ ret = ttm_buffer_object_validate(bo, flags, interruptible, false);
+ if (ret)
+ goto out_err;
+
+ ttm_bo_unreserve(bo);
+ return 0;
+
+out_err:
+ ttm_bo_unreserve(bo);
+ ttm_bo_unref(&bo);
+
+ return ret;
+}
+EXPORT_SYMBOL(ttm_buffer_object_init);
+
+static inline size_t ttm_bo_size(struct ttm_bo_device *bdev,
+ unsigned long num_pages)
+{
+ size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
+ PAGE_MASK;
+
+ return bdev->ttm_bo_size + 2 * page_array_size;
+}
+
+int ttm_buffer_object_create(struct ttm_bo_device *bdev,
+ unsigned long size,
+ enum ttm_bo_type type,
+ uint32_t flags,
+ uint32_t page_alignment,
+ unsigned long buffer_start,
+ bool interruptible,
+ struct file *persistant_swap_storage,
+ struct ttm_buffer_object **p_bo)
+{
+ struct ttm_buffer_object *bo;
+ int ret;
+ struct ttm_mem_global *mem_glob = bdev->mem_glob;
+
+ size_t acc_size =
+ ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
+ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
+ if (unlikely(ret != 0))
+ return ret;
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+
+ if (unlikely(bo == NULL)) {
+ ttm_mem_global_free(mem_glob, acc_size, false);
+ return -ENOMEM;
+ }
+
+ ret = ttm_buffer_object_init(bdev, bo, size, type, flags,
+ page_alignment, buffer_start,
+ interruptible,
+ persistant_swap_storage, acc_size, NULL);
+ if (likely(ret == 0))
+ *p_bo = bo;
+
+ return ret;
+}
+
+static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
+ uint32_t mem_type, bool allow_errors)
+{
+ int ret;
+
+ spin_lock(&bo->lock);
+ ret = ttm_bo_wait(bo, false, false, false);
+ spin_unlock(&bo->lock);
+
+ if (ret && allow_errors)
+ goto out;
+
+ if (bo->mem.mem_type == mem_type)
+ ret = ttm_bo_evict(bo, mem_type, false, false);
+
+ if (ret) {
+ if (allow_errors) {
+ goto out;
+ } else {
+ ret = 0;
+ printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n");
+ }
+ }
+
+out:
+ return ret;
+}
+
+static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
+ struct list_head *head,
+ unsigned mem_type, bool allow_errors)
+{
+ struct ttm_buffer_object *entry;
+ int ret;
+ int put_count;
+
+ /*
+ * Can't use standard list traversal since we're unlocking.
+ */
+
+ spin_lock(&bdev->lru_lock);
+
+ while (!list_empty(head)) {
+ entry = list_first_entry(head, struct ttm_buffer_object, lru);
+ kref_get(&entry->list_kref);
+ ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
+ put_count = ttm_bo_del_from_lru(entry);
+ spin_unlock(&bdev->lru_lock);
+ while (put_count--)
+ kref_put(&entry->list_kref, ttm_bo_ref_bug);
+ BUG_ON(ret);
+ ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
+ ttm_bo_unreserve(entry);
+ kref_put(&entry->list_kref, ttm_bo_release_list);
+ spin_lock(&bdev->lru_lock);
+ }
+
+ spin_unlock(&bdev->lru_lock);
+
+ return 0;
+}
+
+int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+ int ret = -EINVAL;
+
+ if (mem_type >= TTM_NUM_MEM_TYPES) {
+ printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
+ return ret;
+ }
+
+ if (!man->has_type) {
+ printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
+ "memory manager type %u\n", mem_type);
+ return ret;
+ }
+
+ man->use_type = false;
+ man->has_type = false;
+
+ ret = 0;
+ if (mem_type > 0) {
+ ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
+
+ spin_lock(&bdev->lru_lock);
+ if (drm_mm_clean(&man->manager))
+ drm_mm_takedown(&man->manager);
+ else
+ ret = -EBUSY;
+
+ spin_unlock(&bdev->lru_lock);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(ttm_bo_clean_mm);
+
+int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+
+ if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
+ printk(KERN_ERR TTM_PFX
+ "Illegal memory manager memory type %u.\n",
+ mem_type);
+ return -EINVAL;
+ }
+
+ if (!man->has_type) {
+ printk(KERN_ERR TTM_PFX
+ "Memory type %u has not been initialized.\n",
+ mem_type);
+ return 0;
+ }
+
+ return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true);
+}
+EXPORT_SYMBOL(ttm_bo_evict_mm);
+
+int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
+ unsigned long p_offset, unsigned long p_size)
+{
+ int ret = -EINVAL;
+ struct ttm_mem_type_manager *man;
+
+ if (type >= TTM_NUM_MEM_TYPES) {
+ printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
+ return ret;
+ }
+
+ man = &bdev->man[type];
+ if (man->has_type) {
+ printk(KERN_ERR TTM_PFX
+ "Memory manager already initialized for type %d\n",
+ type);
+ return ret;
+ }
+
+ ret = bdev->driver->init_mem_type(bdev, type, man);
+ if (ret)
+ return ret;
+
+ ret = 0;
+ if (type != TTM_PL_SYSTEM) {
+ if (!p_size) {
+ printk(KERN_ERR TTM_PFX
+ "Zero size memory manager type %d\n",
+ type);
+ return ret;
+ }
+ ret = drm_mm_init(&man->manager, p_offset, p_size);
+ if (ret)
+ return ret;
+ }
+ man->has_type = true;
+ man->use_type = true;
+ man->size = p_size;
+
+ INIT_LIST_HEAD(&man->lru);
+
+ return 0;
+}
+EXPORT_SYMBOL(ttm_bo_init_mm);
+
+int ttm_bo_device_release(struct ttm_bo_device *bdev)
+{
+ int ret = 0;
+ unsigned i = TTM_NUM_MEM_TYPES;
+ struct ttm_mem_type_manager *man;
+
+ while (i--) {
+ man = &bdev->man[i];
+ if (man->has_type) {
+ man->use_type = false;
+ if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
+ ret = -EBUSY;
+ printk(KERN_ERR TTM_PFX
+ "DRM memory manager type %d "
+ "is not clean.\n", i);
+ }
+ man->has_type = false;
+ }
+ }
+
+ if (!cancel_delayed_work(&bdev->wq))
+ flush_scheduled_work();
+
+ while (ttm_bo_delayed_delete(bdev, true))
+ ;
+
+ spin_lock(&bdev->lru_lock);
+ if (list_empty(&bdev->ddestroy))
+ TTM_DEBUG("Delayed destroy list was clean\n");
+
+ if (list_empty(&bdev->man[0].lru))
+ TTM_DEBUG("Swap list was clean\n");
+ spin_unlock(&bdev->lru_lock);
+
+ ttm_mem_unregister_shrink(bdev->mem_glob, &bdev->shrink);
+ BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
+ write_lock(&bdev->vm_lock);
+ drm_mm_takedown(&bdev->addr_space_mm);
+ write_unlock(&bdev->vm_lock);
+
+ __free_page(bdev->dummy_read_page);
+ return ret;
+}
+EXPORT_SYMBOL(ttm_bo_device_release);
+
+/*
+ * This function is intended to be called on drm driver load.
+ * If you decide to call it from firstopen, you must protect the call
+ * from a potentially racing ttm_bo_driver_finish in lastclose.
+ * (This may happen on X server restart).
+ */
+
+int ttm_bo_device_init(struct ttm_bo_device *bdev,
+ struct ttm_mem_global *mem_glob,
+ struct ttm_bo_driver *driver, uint64_t file_page_offset)
+{
+ int ret = -EINVAL;
+
+ bdev->dummy_read_page = NULL;
+ rwlock_init(&bdev->vm_lock);
+ spin_lock_init(&bdev->lru_lock);
+
+ bdev->driver = driver;
+ bdev->mem_glob = mem_glob;
+
+ memset(bdev->man, 0, sizeof(bdev->man));
+
+ bdev->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
+ if (unlikely(bdev->dummy_read_page == NULL)) {
+ ret = -ENOMEM;
+ goto out_err0;
+ }
+
+ /*
+ * Initialize the system memory buffer type.
+ * Other types need to be driver / IOCTL initialized.
+ */
+ ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
+ if (unlikely(ret != 0))
+ goto out_err1;
+
+ bdev->addr_space_rb = RB_ROOT;
+ ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
+ if (unlikely(ret != 0))
+ goto out_err2;
+
+ INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
+ bdev->nice_mode = true;
+ INIT_LIST_HEAD(&bdev->ddestroy);
+ INIT_LIST_HEAD(&bdev->swap_lru);
+ bdev->dev_mapping = NULL;
+ ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout);
+ ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink);
+ if (unlikely(ret != 0)) {
+ printk(KERN_ERR TTM_PFX
+ "Could not register buffer object swapout.\n");
+ goto out_err2;
+ }
+
+ bdev->ttm_bo_extra_size =
+ ttm_round_pot(sizeof(struct ttm_tt)) +
+ ttm_round_pot(sizeof(struct ttm_backend));
+
+ bdev->ttm_bo_size = bdev->ttm_bo_extra_size +
+ ttm_round_pot(sizeof(struct ttm_buffer_object));
+
+ return 0;
+out_err2:
+ ttm_bo_clean_mm(bdev, 0);
+out_err1:
+ __free_page(bdev->dummy_read_page);
+out_err0:
+ return ret;
+}
+EXPORT_SYMBOL(ttm_bo_device_init);
+
+/*
+ * buffer object vm functions.
+ */
+
+bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+
+ if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
+ if (mem->mem_type == TTM_PL_SYSTEM)
+ return false;
+
+ if (man->flags & TTM_MEMTYPE_FLAG_CMA)
+ return false;
+
+ if (mem->placement & TTM_PL_FLAG_CACHED)
+ return false;
+ }
+ return true;
+}
+
+int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem,
+ unsigned long *bus_base,
+ unsigned long *bus_offset, unsigned long *bus_size)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+
+ *bus_size = 0;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+
+ if (ttm_mem_reg_is_pci(bdev, mem)) {
+ *bus_offset = mem->mm_node->start << PAGE_SHIFT;
+ *bus_size = mem->num_pages << PAGE_SHIFT;
+ *bus_base = man->io_offset;
+ }
+
+ return 0;
+}
+
+void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ loff_t offset = (loff_t) bo->addr_space_offset;
+ loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
+
+ if (!bdev->dev_mapping)
+ return;
+
+ unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
+}
+
+static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct rb_node **cur = &bdev->addr_space_rb.rb_node;
+ struct rb_node *parent = NULL;
+ struct ttm_buffer_object *cur_bo;
+ unsigned long offset = bo->vm_node->start;
+ unsigned long cur_offset;
+
+ while (*cur) {
+ parent = *cur;
+ cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
+ cur_offset = cur_bo->vm_node->start;
+ if (offset < cur_offset)
+ cur = &parent->rb_left;
+ else if (offset > cur_offset)
+ cur = &parent->rb_right;
+ else
+ BUG();
+ }
+
+ rb_link_node(&bo->vm_rb, parent, cur);
+ rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
+}
+
+/**
+ * ttm_bo_setup_vm:
+ *
+ * @bo: the buffer to allocate address space for
+ *
+ * Allocate address space in the drm device so that applications
+ * can mmap the buffer and access the contents. This only
+ * applies to ttm_bo_type_device objects as others are not
+ * placed in the drm device address space.
+ */
+
+static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ int ret;
+
+retry_pre_get:
+ ret = drm_mm_pre_get(&bdev->addr_space_mm);
+ if (unlikely(ret != 0))
+ return ret;
+
+ write_lock(&bdev->vm_lock);
+ bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
+ bo->mem.num_pages, 0, 0);
+
+ if (unlikely(bo->vm_node == NULL)) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
+ bo->mem.num_pages, 0);
+
+ if (unlikely(bo->vm_node == NULL)) {
+ write_unlock(&bdev->vm_lock);
+ goto retry_pre_get;
+ }
+
+ ttm_bo_vm_insert_rb(bo);
+ write_unlock(&bdev->vm_lock);
+ bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
+
+ return 0;
+out_unlock:
+ write_unlock(&bdev->vm_lock);
+ return ret;
+}
+
+int ttm_bo_wait(struct ttm_buffer_object *bo,
+ bool lazy, bool interruptible, bool no_wait)
+{
+ struct ttm_bo_driver *driver = bo->bdev->driver;
+ void *sync_obj;
+ void *sync_obj_arg;
+ int ret = 0;
+
+ if (likely(bo->sync_obj == NULL))
+ return 0;
+
+ while (bo->sync_obj) {
+
+ if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
+ void *tmp_obj = bo->sync_obj;
+ bo->sync_obj = NULL;
+ clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
+ spin_unlock(&bo->lock);
+ driver->sync_obj_unref(&tmp_obj);
+ spin_lock(&bo->lock);
+ continue;
+ }
+
+ if (no_wait)
+ return -EBUSY;
+
+ sync_obj = driver->sync_obj_ref(bo->sync_obj);
+ sync_obj_arg = bo->sync_obj_arg;
+ spin_unlock(&bo->lock);
+ ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
+ lazy, interruptible);
+ if (unlikely(ret != 0)) {
+ driver->sync_obj_unref(&sync_obj);
+ spin_lock(&bo->lock);
+ return ret;
+ }
+ spin_lock(&bo->lock);
+ if (likely(bo->sync_obj == sync_obj &&
+ bo->sync_obj_arg == sync_obj_arg)) {
+ void *tmp_obj = bo->sync_obj;
+ bo->sync_obj = NULL;
+ clear_bit(TTM_BO_PRIV_FLAG_MOVING,
+ &bo->priv_flags);
+ spin_unlock(&bo->lock);
+ driver->sync_obj_unref(&sync_obj);
+ driver->sync_obj_unref(&tmp_obj);
+ spin_lock(&bo->lock);
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ttm_bo_wait);
+
+void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
+{
+ atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
+}
+
+int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
+ bool no_wait)
+{
+ int ret;
+
+ while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
+ if (no_wait)
+ return -EBUSY;
+ else if (interruptible) {
+ ret = wait_event_interruptible
+ (bo->event_queue, atomic_read(&bo->reserved) == 0);
+ if (unlikely(ret != 0))
+ return -ERESTART;
+ } else {
+ wait_event(bo->event_queue,
+ atomic_read(&bo->reserved) == 0);
+ }
+ }
+ return 0;
+}
+
+int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
+{
+ int ret = 0;
+
+ /*
+ * Using ttm_bo_reserve instead of ttm_bo_block_reservation
+ * makes sure the lru lists are updated.
+ */
+
+ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
+ if (unlikely(ret != 0))
+ return ret;
+ spin_lock(&bo->lock);
+ ret = ttm_bo_wait(bo, false, true, no_wait);
+ spin_unlock(&bo->lock);
+ if (likely(ret == 0))
+ atomic_inc(&bo->cpu_writers);
+ ttm_bo_unreserve(bo);
+ return ret;
+}
+
+void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
+{
+ if (atomic_dec_and_test(&bo->cpu_writers))
+ wake_up_all(&bo->event_queue);
+}
+
+/**
+ * A buffer object shrink method that tries to swap out the first
+ * buffer object on the bo_global::swap_lru list.
+ */
+
+static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
+{
+ struct ttm_bo_device *bdev =
+ container_of(shrink, struct ttm_bo_device, shrink);
+ struct ttm_buffer_object *bo;
+ int ret = -EBUSY;
+ int put_count;
+ uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
+
+ spin_lock(&bdev->lru_lock);
+ while (ret == -EBUSY) {
+ if (unlikely(list_empty(&bdev->swap_lru))) {
+ spin_unlock(&bdev->lru_lock);
+ return -EBUSY;
+ }
+
+ bo = list_first_entry(&bdev->swap_lru,
+ struct ttm_buffer_object, swap);
+ kref_get(&bo->list_kref);
+
+ /**
+ * Reserve buffer. Since we unlock while sleeping, we need
+ * to re-check that nobody removed us from the swap-list while
+ * we slept.
+ */
+
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ if (unlikely(ret == -EBUSY)) {
+ spin_unlock(&bdev->lru_lock);
+ ttm_bo_wait_unreserved(bo, false);
+ kref_put(&bo->list_kref, ttm_bo_release_list);
+ spin_lock(&bdev->lru_lock);
+ }
+ }
+
+ BUG_ON(ret != 0);
+ put_count = ttm_bo_del_from_lru(bo);
+ spin_unlock(&bdev->lru_lock);
+
+ while (put_count--)
+ kref_put(&bo->list_kref, ttm_bo_ref_bug);
+
+ /**
+ * Wait for GPU, then move to system cached.
+ */
+
+ spin_lock(&bo->lock);
+ ret = ttm_bo_wait(bo, false, false, false);
+ spin_unlock(&bo->lock);
+
+ if (unlikely(ret != 0))
+ goto out;
+
+ if ((bo->mem.placement & swap_placement) != swap_placement) {
+ struct ttm_mem_reg evict_mem;
+
+ evict_mem = bo->mem;
+ evict_mem.mm_node = NULL;
+ evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
+ evict_mem.mem_type = TTM_PL_SYSTEM;
+
+ ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
+ false, false);
+ if (unlikely(ret != 0))
+ goto out;
+ }
+
+ ttm_bo_unmap_virtual(bo);
+
+ /**
+ * Swap out. Buffer will be swapped in again as soon as
+ * anyone tries to access a ttm page.
+ */
+
+ ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
+out:
+
+ /**
+ *
+ * Unreserve without putting on LRU to avoid swapping out an
+ * already swapped buffer.
+ */
+
+ atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
+ kref_put(&bo->list_kref, ttm_bo_release_list);
+ return ret;
+}
+
+void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
+{
+ while (ttm_bo_swapout(&bdev->shrink) == 0)
+ ;
+}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
new file mode 100644
index 00000000000..517c8455963
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -0,0 +1,561 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include <linux/io.h>
+#include <linux/highmem.h>
+#include <linux/wait.h>
+#include <linux/vmalloc.h>
+#include <linux/version.h>
+#include <linux/module.h>
+
+void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
+{
+ struct ttm_mem_reg *old_mem = &bo->mem;
+
+ if (old_mem->mm_node) {
+ spin_lock(&bo->bdev->lru_lock);
+ drm_mm_put_block(old_mem->mm_node);
+ spin_unlock(&bo->bdev->lru_lock);
+ }
+ old_mem->mm_node = NULL;
+}
+
+int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
+ bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
+{
+ struct ttm_tt *ttm = bo->ttm;
+ struct ttm_mem_reg *old_mem = &bo->mem;
+ uint32_t save_flags = old_mem->placement;
+ int ret;
+
+ if (old_mem->mem_type != TTM_PL_SYSTEM) {
+ ttm_tt_unbind(ttm);
+ ttm_bo_free_old_node(bo);
+ ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
+ TTM_PL_MASK_MEM);
+ old_mem->mem_type = TTM_PL_SYSTEM;
+ save_flags = old_mem->placement;
+ }
+
+ ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (new_mem->mem_type != TTM_PL_SYSTEM) {
+ ret = ttm_tt_bind(ttm, new_mem);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ *old_mem = *new_mem;
+ new_mem->mm_node = NULL;
+ ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
+ return 0;
+}
+EXPORT_SYMBOL(ttm_bo_move_ttm);
+
+int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
+ void **virtual)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ unsigned long bus_offset;
+ unsigned long bus_size;
+ unsigned long bus_base;
+ int ret;
+ void *addr;
+
+ *virtual = NULL;
+ ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size);
+ if (ret || bus_size == 0)
+ return ret;
+
+ if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
+ addr = (void *)(((u8 *) man->io_addr) + bus_offset);
+ else {
+ if (mem->placement & TTM_PL_FLAG_WC)
+ addr = ioremap_wc(bus_base + bus_offset, bus_size);
+ else
+ addr = ioremap_nocache(bus_base + bus_offset, bus_size);
+ if (!addr)
+ return -ENOMEM;
+ }
+ *virtual = addr;
+ return 0;
+}
+
+void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
+ void *virtual)
+{
+ struct ttm_mem_type_manager *man;
+
+ man = &bdev->man[mem->mem_type];
+
+ if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
+ iounmap(virtual);
+}
+
+static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
+{
+ uint32_t *dstP =
+ (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
+ uint32_t *srcP =
+ (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
+
+ int i;
+ for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
+ iowrite32(ioread32(srcP++), dstP++);
+ return 0;
+}
+
+static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
+ unsigned long page)
+{
+ struct page *d = ttm_tt_get_page(ttm, page);
+ void *dst;
+
+ if (!d)
+ return -ENOMEM;
+
+ src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
+ dst = kmap(d);
+ if (!dst)
+ return -ENOMEM;
+
+ memcpy_fromio(dst, src, PAGE_SIZE);
+ kunmap(d);
+ return 0;
+}
+
+static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
+ unsigned long page)
+{
+ struct page *s = ttm_tt_get_page(ttm, page);
+ void *src;
+
+ if (!s)
+ return -ENOMEM;
+
+ dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
+ src = kmap(s);
+ if (!src)
+ return -ENOMEM;
+
+ memcpy_toio(dst, src, PAGE_SIZE);
+ kunmap(s);
+ return 0;
+}
+
+int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
+ bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
+ struct ttm_tt *ttm = bo->ttm;
+ struct ttm_mem_reg *old_mem = &bo->mem;
+ struct ttm_mem_reg old_copy = *old_mem;
+ void *old_iomap;
+ void *new_iomap;
+ int ret;
+ uint32_t save_flags = old_mem->placement;
+ unsigned long i;
+ unsigned long page;
+ unsigned long add = 0;
+ int dir;
+
+ ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
+ if (ret)
+ return ret;
+ ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
+ if (ret)
+ goto out;
+
+ if (old_iomap == NULL && new_iomap == NULL)
+ goto out2;
+ if (old_iomap == NULL && ttm == NULL)
+ goto out2;
+
+ add = 0;
+ dir = 1;
+
+ if ((old_mem->mem_type == new_mem->mem_type) &&
+ (new_mem->mm_node->start <
+ old_mem->mm_node->start + old_mem->mm_node->size)) {
+ dir = -1;
+ add = new_mem->num_pages - 1;
+ }
+
+ for (i = 0; i < new_mem->num_pages; ++i) {
+ page = i * dir + add;
+ if (old_iomap == NULL)
+ ret = ttm_copy_ttm_io_page(ttm, new_iomap, page);
+ else if (new_iomap == NULL)
+ ret = ttm_copy_io_ttm_page(ttm, old_iomap, page);
+ else
+ ret = ttm_copy_io_page(new_iomap, old_iomap, page);
+ if (ret)
+ goto out1;
+ }
+ mb();
+out2:
+ ttm_bo_free_old_node(bo);
+
+ *old_mem = *new_mem;
+ new_mem->mm_node = NULL;
+ ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
+
+ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
+ ttm_tt_unbind(ttm);
+ ttm_tt_destroy(ttm);
+ bo->ttm = NULL;
+ }
+
+out1:
+ ttm_mem_reg_iounmap(bdev, new_mem, new_iomap);
+out:
+ ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
+ return ret;
+}
+EXPORT_SYMBOL(ttm_bo_move_memcpy);
+
+static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
+{
+ kfree(bo);
+}
+
+/**
+ * ttm_buffer_object_transfer
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
+ * holding the data of @bo with the old placement.
+ *
+ * This is a utility function that may be called after an accelerated move
+ * has been scheduled. A new buffer object is created as a placeholder for
+ * the old data while it's being copied. When that buffer object is idle,
+ * it can be destroyed, releasing the space of the old placement.
+ * Returns:
+ * !0: Failure.
+ */
+
+static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
+ struct ttm_buffer_object **new_obj)
+{
+ struct ttm_buffer_object *fbo;
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_bo_driver *driver = bdev->driver;
+
+ fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
+ if (!fbo)
+ return -ENOMEM;
+
+ *fbo = *bo;
+
+ /**
+ * Fix up members that we shouldn't copy directly:
+ * TODO: Explicit member copy would probably be better here.
+ */
+
+ spin_lock_init(&fbo->lock);
+ init_waitqueue_head(&fbo->event_queue);
+ INIT_LIST_HEAD(&fbo->ddestroy);
+ INIT_LIST_HEAD(&fbo->lru);
+ INIT_LIST_HEAD(&fbo->swap);
+ fbo->vm_node = NULL;
+
+ fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
+ if (fbo->mem.mm_node)
+ fbo->mem.mm_node->private = (void *)fbo;
+ kref_init(&fbo->list_kref);
+ kref_init(&fbo->kref);
+ fbo->destroy = &ttm_transfered_destroy;
+
+ *new_obj = fbo;
+ return 0;
+}
+
+pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
+{
+#if defined(__i386__) || defined(__x86_64__)
+ if (caching_flags & TTM_PL_FLAG_WC)
+ tmp = pgprot_writecombine(tmp);
+ else if (boot_cpu_data.x86 > 3)
+ tmp = pgprot_noncached(tmp);
+
+#elif defined(__powerpc__)
+ if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
+ pgprot_val(tmp) |= _PAGE_NO_CACHE;
+ if (caching_flags & TTM_PL_FLAG_UNCACHED)
+ pgprot_val(tmp) |= _PAGE_GUARDED;
+ }
+#endif
+#if defined(__ia64__)
+ if (caching_flags & TTM_PL_FLAG_WC)
+ tmp = pgprot_writecombine(tmp);
+ else
+ tmp = pgprot_noncached(tmp);
+#endif
+#if defined(__sparc__)
+ if (!(caching_flags & TTM_PL_FLAG_CACHED))
+ tmp = pgprot_noncached(tmp);
+#endif
+ return tmp;
+}
+
+static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
+ unsigned long bus_base,
+ unsigned long bus_offset,
+ unsigned long bus_size,
+ struct ttm_bo_kmap_obj *map)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_mem_reg *mem = &bo->mem;
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+
+ if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) {
+ map->bo_kmap_type = ttm_bo_map_premapped;
+ map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
+ } else {
+ map->bo_kmap_type = ttm_bo_map_iomap;
+ if (mem->placement & TTM_PL_FLAG_WC)
+ map->virtual = ioremap_wc(bus_base + bus_offset,
+ bus_size);
+ else
+ map->virtual = ioremap_nocache(bus_base + bus_offset,
+ bus_size);
+ }
+ return (!map->virtual) ? -ENOMEM : 0;
+}
+
+static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
+ unsigned long start_page,
+ unsigned long num_pages,
+ struct ttm_bo_kmap_obj *map)
+{
+ struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
+ struct ttm_tt *ttm = bo->ttm;
+ struct page *d;
+ int i;
+
+ BUG_ON(!ttm);
+ if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
+ /*
+ * We're mapping a single page, and the desired
+ * page protection is consistent with the bo.
+ */
+
+ map->bo_kmap_type = ttm_bo_map_kmap;
+ map->page = ttm_tt_get_page(ttm, start_page);
+ map->virtual = kmap(map->page);
+ } else {
+ /*
+ * Populate the part we're mapping;
+ */
+ for (i = start_page; i < start_page + num_pages; ++i) {
+ d = ttm_tt_get_page(ttm, i);
+ if (!d)
+ return -ENOMEM;
+ }
+
+ /*
+ * We need to use vmap to get the desired page protection
+ * or to make the buffer object look contigous.
+ */
+ prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
+ PAGE_KERNEL :
+ ttm_io_prot(mem->placement, PAGE_KERNEL);
+ map->bo_kmap_type = ttm_bo_map_vmap;
+ map->virtual = vmap(ttm->pages + start_page, num_pages,
+ 0, prot);
+ }
+ return (!map->virtual) ? -ENOMEM : 0;
+}
+
+int ttm_bo_kmap(struct ttm_buffer_object *bo,
+ unsigned long start_page, unsigned long num_pages,
+ struct ttm_bo_kmap_obj *map)
+{
+ int ret;
+ unsigned long bus_base;
+ unsigned long bus_offset;
+ unsigned long bus_size;
+
+ BUG_ON(!list_empty(&bo->swap));
+ map->virtual = NULL;
+ if (num_pages > bo->num_pages)
+ return -EINVAL;
+ if (start_page > bo->num_pages)
+ return -EINVAL;
+#if 0
+ if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
+ return -EPERM;
+#endif
+ ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base,
+ &bus_offset, &bus_size);
+ if (ret)
+ return ret;
+ if (bus_size == 0) {
+ return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
+ } else {
+ bus_offset += start_page << PAGE_SHIFT;
+ bus_size = num_pages << PAGE_SHIFT;
+ return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
+ }
+}
+EXPORT_SYMBOL(ttm_bo_kmap);
+
+void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
+{
+ if (!map->virtual)
+ return;
+ switch (map->bo_kmap_type) {
+ case ttm_bo_map_iomap:
+ iounmap(map->virtual);
+ break;
+ case ttm_bo_map_vmap:
+ vunmap(map->virtual);
+ break;
+ case ttm_bo_map_kmap:
+ kunmap(map->page);
+ break;
+ case ttm_bo_map_premapped:
+ break;
+ default:
+ BUG();
+ }
+ map->virtual = NULL;
+ map->page = NULL;
+}
+EXPORT_SYMBOL(ttm_bo_kunmap);
+
+int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
+ unsigned long dst_offset,
+ unsigned long *pfn, pgprot_t *prot)
+{
+ struct ttm_mem_reg *mem = &bo->mem;
+ struct ttm_bo_device *bdev = bo->bdev;
+ unsigned long bus_offset;
+ unsigned long bus_size;
+ unsigned long bus_base;
+ int ret;
+ ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
+ &bus_size);
+ if (ret)
+ return -EINVAL;
+ if (bus_size != 0)
+ *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
+ else
+ if (!bo->ttm)
+ return -EINVAL;
+ else
+ *pfn = page_to_pfn(ttm_tt_get_page(bo->ttm,
+ dst_offset >>
+ PAGE_SHIFT));
+ *prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
+ PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL);
+
+ return 0;
+}
+
+int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
+ void *sync_obj,
+ void *sync_obj_arg,
+ bool evict, bool no_wait,
+ struct ttm_mem_reg *new_mem)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_bo_driver *driver = bdev->driver;
+ struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
+ struct ttm_mem_reg *old_mem = &bo->mem;
+ int ret;
+ uint32_t save_flags = old_mem->placement;
+ struct ttm_buffer_object *ghost_obj;
+ void *tmp_obj = NULL;
+
+ spin_lock(&bo->lock);
+ if (bo->sync_obj) {
+ tmp_obj = bo->sync_obj;
+ bo->sync_obj = NULL;
+ }
+ bo->sync_obj = driver->sync_obj_ref(sync_obj);
+ bo->sync_obj_arg = sync_obj_arg;
+ if (evict) {
+ ret = ttm_bo_wait(bo, false, false, false);
+ spin_unlock(&bo->lock);
+ driver->sync_obj_unref(&bo->sync_obj);
+
+ if (ret)
+ return ret;
+
+ ttm_bo_free_old_node(bo);
+ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
+ (bo->ttm != NULL)) {
+ ttm_tt_unbind(bo->ttm);
+ ttm_tt_destroy(bo->ttm);
+ bo->ttm = NULL;
+ }
+ } else {
+ /**
+ * This should help pipeline ordinary buffer moves.
+ *
+ * Hang old buffer memory on a new buffer object,
+ * and leave it to be released when the GPU
+ * operation has completed.
+ */
+
+ set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
+ spin_unlock(&bo->lock);
+
+ ret = ttm_buffer_object_transfer(bo, &ghost_obj);
+ if (ret)
+ return ret;
+
+ /**
+ * If we're not moving to fixed memory, the TTM object
+ * needs to stay alive. Otherwhise hang it on the ghost
+ * bo to be unbound and destroyed.
+ */
+
+ if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
+ ghost_obj->ttm = NULL;
+ else
+ bo->ttm = NULL;
+
+ ttm_bo_unreserve(ghost_obj);
+ ttm_bo_unref(&ghost_obj);
+ }
+
+ *old_mem = *new_mem;
+ new_mem->mm_node = NULL;
+ ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
+ return 0;
+}
+EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
new file mode 100644
index 00000000000..27b146c54fb
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -0,0 +1,454 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include <ttm/ttm_module.h>
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_placement.h>
+#include <linux/mm.h>
+#include <linux/version.h>
+#include <linux/rbtree.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+#define TTM_BO_VM_NUM_PREFAULT 16
+
+static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
+ unsigned long page_start,
+ unsigned long num_pages)
+{
+ struct rb_node *cur = bdev->addr_space_rb.rb_node;
+ unsigned long cur_offset;
+ struct ttm_buffer_object *bo;
+ struct ttm_buffer_object *best_bo = NULL;
+
+ while (likely(cur != NULL)) {
+ bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
+ cur_offset = bo->vm_node->start;
+ if (page_start >= cur_offset) {
+ cur = cur->rb_right;
+ best_bo = bo;
+ if (page_start == cur_offset)
+ break;
+ } else
+ cur = cur->rb_left;
+ }
+
+ if (unlikely(best_bo == NULL))
+ return NULL;
+
+ if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
+ (page_start + num_pages)))
+ return NULL;
+
+ return best_bo;
+}
+
+static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
+ vma->vm_private_data;
+ struct ttm_bo_device *bdev = bo->bdev;
+ unsigned long bus_base;
+ unsigned long bus_offset;
+ unsigned long bus_size;
+ unsigned long page_offset;
+ unsigned long page_last;
+ unsigned long pfn;
+ struct ttm_tt *ttm = NULL;
+ struct page *page;
+ int ret;
+ int i;
+ bool is_iomem;
+ unsigned long address = (unsigned long)vmf->virtual_address;
+ int retval = VM_FAULT_NOPAGE;
+
+ /*
+ * Work around locking order reversal in fault / nopfn
+ * between mmap_sem and bo_reserve: Perform a trylock operation
+ * for reserve, and if it fails, retry the fault after scheduling.
+ */
+
+ ret = ttm_bo_reserve(bo, true, true, false, 0);
+ if (unlikely(ret != 0)) {
+ if (ret == -EBUSY)
+ set_need_resched();
+ return VM_FAULT_NOPAGE;
+ }
+
+ /*
+ * Wait for buffer data in transit, due to a pipelined
+ * move.
+ */
+
+ spin_lock(&bo->lock);
+ if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
+ ret = ttm_bo_wait(bo, false, true, false);
+ spin_unlock(&bo->lock);
+ if (unlikely(ret != 0)) {
+ retval = (ret != -ERESTART) ?
+ VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
+ goto out_unlock;
+ }
+ } else
+ spin_unlock(&bo->lock);
+
+
+ ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset,
+ &bus_size);
+ if (unlikely(ret != 0)) {
+ retval = VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+
+ is_iomem = (bus_size != 0);
+
+ page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
+ bo->vm_node->start - vma->vm_pgoff;
+ page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
+ bo->vm_node->start - vma->vm_pgoff;
+
+ if (unlikely(page_offset >= bo->num_pages)) {
+ retval = VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+
+ /*
+ * Strictly, we're not allowed to modify vma->vm_page_prot here,
+ * since the mmap_sem is only held in read mode. However, we
+ * modify only the caching bits of vma->vm_page_prot and
+ * consider those bits protected by
+ * the bo->mutex, as we should be the only writers.
+ * There shouldn't really be any readers of these bits except
+ * within vm_insert_mixed()? fork?
+ *
+ * TODO: Add a list of vmas to the bo, and change the
+ * vma->vm_page_prot when the object changes caching policy, with
+ * the correct locks held.
+ */
+
+ if (is_iomem) {
+ vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
+ vma->vm_page_prot);
+ } else {
+ ttm = bo->ttm;
+ vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
+ vm_get_page_prot(vma->vm_flags) :
+ ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
+ }
+
+ /*
+ * Speculatively prefault a number of pages. Only error on
+ * first page.
+ */
+
+ for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
+
+ if (is_iomem)
+ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
+ page_offset;
+ else {
+ page = ttm_tt_get_page(ttm, page_offset);
+ if (unlikely(!page && i == 0)) {
+ retval = VM_FAULT_OOM;
+ goto out_unlock;
+ } else if (unlikely(!page)) {
+ break;
+ }
+ pfn = page_to_pfn(page);
+ }
+
+ ret = vm_insert_mixed(vma, address, pfn);
+ /*
+ * Somebody beat us to this PTE or prefaulting to
+ * an already populated PTE, or prefaulting error.
+ */
+
+ if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
+ break;
+ else if (unlikely(ret != 0)) {
+ retval =
+ (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
+ goto out_unlock;
+
+ }
+
+ address += PAGE_SIZE;
+ if (unlikely(++page_offset >= page_last))
+ break;
+ }
+
+out_unlock:
+ ttm_bo_unreserve(bo);
+ return retval;
+}
+
+static void ttm_bo_vm_open(struct vm_area_struct *vma)
+{
+ struct ttm_buffer_object *bo =
+ (struct ttm_buffer_object *)vma->vm_private_data;
+
+ (void)ttm_bo_reference(bo);
+}
+
+static void ttm_bo_vm_close(struct vm_area_struct *vma)
+{
+ struct ttm_buffer_object *bo =
+ (struct ttm_buffer_object *)vma->vm_private_data;
+
+ ttm_bo_unref(&bo);
+ vma->vm_private_data = NULL;
+}
+
+static struct vm_operations_struct ttm_bo_vm_ops = {
+ .fault = ttm_bo_vm_fault,
+ .open = ttm_bo_vm_open,
+ .close = ttm_bo_vm_close
+};
+
+int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
+ struct ttm_bo_device *bdev)
+{
+ struct ttm_bo_driver *driver;
+ struct ttm_buffer_object *bo;
+ int ret;
+
+ read_lock(&bdev->vm_lock);
+ bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
+ (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
+ if (likely(bo != NULL))
+ ttm_bo_reference(bo);
+ read_unlock(&bdev->vm_lock);
+
+ if (unlikely(bo == NULL)) {
+ printk(KERN_ERR TTM_PFX
+ "Could not find buffer object to map.\n");
+ return -EINVAL;
+ }
+
+ driver = bo->bdev->driver;
+ if (unlikely(!driver->verify_access)) {
+ ret = -EPERM;
+ goto out_unref;
+ }
+ ret = driver->verify_access(bo, filp);
+ if (unlikely(ret != 0))
+ goto out_unref;
+
+ vma->vm_ops = &ttm_bo_vm_ops;
+
+ /*
+ * Note: We're transferring the bo reference to
+ * vma->vm_private_data here.
+ */
+
+ vma->vm_private_data = bo;
+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
+ return 0;
+out_unref:
+ ttm_bo_unref(&bo);
+ return ret;
+}
+EXPORT_SYMBOL(ttm_bo_mmap);
+
+int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
+{
+ if (vma->vm_pgoff != 0)
+ return -EACCES;
+
+ vma->vm_ops = &ttm_bo_vm_ops;
+ vma->vm_private_data = ttm_bo_reference(bo);
+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
+ return 0;
+}
+EXPORT_SYMBOL(ttm_fbdev_mmap);
+
+
+ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
+ const char __user *wbuf, char __user *rbuf, size_t count,
+ loff_t *f_pos, bool write)
+{
+ struct ttm_buffer_object *bo;
+ struct ttm_bo_driver *driver;
+ struct ttm_bo_kmap_obj map;
+ unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
+ unsigned long kmap_offset;
+ unsigned long kmap_end;
+ unsigned long kmap_num;
+ size_t io_size;
+ unsigned int page_offset;
+ char *virtual;
+ int ret;
+ bool no_wait = false;
+ bool dummy;
+
+ read_lock(&bdev->vm_lock);
+ bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
+ if (likely(bo != NULL))
+ ttm_bo_reference(bo);
+ read_unlock(&bdev->vm_lock);
+
+ if (unlikely(bo == NULL))
+ return -EFAULT;
+
+ driver = bo->bdev->driver;
+ if (unlikely(driver->verify_access)) {
+ ret = -EPERM;
+ goto out_unref;
+ }
+
+ ret = driver->verify_access(bo, filp);
+ if (unlikely(ret != 0))
+ goto out_unref;
+
+ kmap_offset = dev_offset - bo->vm_node->start;
+ if (unlikely(kmap_offset) >= bo->num_pages) {
+ ret = -EFBIG;
+ goto out_unref;
+ }
+
+ page_offset = *f_pos & ~PAGE_MASK;
+ io_size = bo->num_pages - kmap_offset;
+ io_size = (io_size << PAGE_SHIFT) - page_offset;
+ if (count < io_size)
+ io_size = count;
+
+ kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
+ kmap_num = kmap_end - kmap_offset + 1;
+
+ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
+
+ switch (ret) {
+ case 0:
+ break;
+ case -ERESTART:
+ ret = -EINTR;
+ goto out_unref;
+ case -EBUSY:
+ ret = -EAGAIN;
+ goto out_unref;
+ default:
+ goto out_unref;
+ }
+
+ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
+ if (unlikely(ret != 0)) {
+ ttm_bo_unreserve(bo);
+ goto out_unref;
+ }
+
+ virtual = ttm_kmap_obj_virtual(&map, &dummy);
+ virtual += page_offset;
+
+ if (write)
+ ret = copy_from_user(virtual, wbuf, io_size);
+ else
+ ret = copy_to_user(rbuf, virtual, io_size);
+
+ ttm_bo_kunmap(&map);
+ ttm_bo_unreserve(bo);
+ ttm_bo_unref(&bo);
+
+ if (unlikely(ret != 0))
+ return -EFBIG;
+
+ *f_pos += io_size;
+
+ return io_size;
+out_unref:
+ ttm_bo_unref(&bo);
+ return ret;
+}
+
+ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
+ char __user *rbuf, size_t count, loff_t *f_pos,
+ bool write)
+{
+ struct ttm_bo_kmap_obj map;
+ unsigned long kmap_offset;
+ unsigned long kmap_end;
+ unsigned long kmap_num;
+ size_t io_size;
+ unsigned int page_offset;
+ char *virtual;
+ int ret;
+ bool no_wait = false;
+ bool dummy;
+
+ kmap_offset = (*f_pos >> PAGE_SHIFT);
+ if (unlikely(kmap_offset) >= bo->num_pages)
+ return -EFBIG;
+
+ page_offset = *f_pos & ~PAGE_MASK;
+ io_size = bo->num_pages - kmap_offset;
+ io_size = (io_size << PAGE_SHIFT) - page_offset;
+ if (count < io_size)
+ io_size = count;
+
+ kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
+ kmap_num = kmap_end - kmap_offset + 1;
+
+ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
+
+ switch (ret) {
+ case 0:
+ break;
+ case -ERESTART:
+ return -EINTR;
+ case -EBUSY:
+ return -EAGAIN;
+ default:
+ return ret;
+ }
+
+ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
+ if (unlikely(ret != 0)) {
+ ttm_bo_unreserve(bo);
+ return ret;
+ }
+
+ virtual = ttm_kmap_obj_virtual(&map, &dummy);
+ virtual += page_offset;
+
+ if (write)
+ ret = copy_from_user(virtual, wbuf, io_size);
+ else
+ ret = copy_to_user(rbuf, virtual, io_size);
+
+ ttm_bo_kunmap(&map);
+ ttm_bo_unreserve(bo);
+ ttm_bo_unref(&bo);
+
+ if (unlikely(ret != 0))
+ return ret;
+
+ *f_pos += io_size;
+
+ return io_size;
+}
diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c
new file mode 100644
index 00000000000..0b14eb1972b
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_global.c
@@ -0,0 +1,114 @@
+/**************************************************************************
+ *
+ * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include "ttm/ttm_module.h"
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+struct ttm_global_item {
+ struct mutex mutex;
+ void *object;
+ int refcount;
+};
+
+static struct ttm_global_item glob[TTM_GLOBAL_NUM];
+
+void ttm_global_init(void)
+{
+ int i;
+
+ for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
+ struct ttm_global_item *item = &glob[i];
+ mutex_init(&item->mutex);
+ item->object = NULL;
+ item->refcount = 0;
+ }
+}
+
+void ttm_global_release(void)
+{
+ int i;
+ for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
+ struct ttm_global_item *item = &glob[i];
+ BUG_ON(item->object != NULL);
+ BUG_ON(item->refcount != 0);
+ }
+}
+
+int ttm_global_item_ref(struct ttm_global_reference *ref)
+{
+ int ret;
+ struct ttm_global_item *item = &glob[ref->global_type];
+ void *object;
+
+ mutex_lock(&item->mutex);
+ if (item->refcount == 0) {
+ item->object = kmalloc(ref->size, GFP_KERNEL);
+ if (unlikely(item->object == NULL)) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ ref->object = item->object;
+ ret = ref->init(ref);
+ if (unlikely(ret != 0))
+ goto out_err;
+
+ ++item->refcount;
+ }
+ ref->object = item->object;
+ object = item->object;
+ mutex_unlock(&item->mutex);
+ return 0;
+out_err:
+ kfree(item->object);
+ mutex_unlock(&item->mutex);
+ item->object = NULL;
+ return ret;
+}
+EXPORT_SYMBOL(ttm_global_item_ref);
+
+void ttm_global_item_unref(struct ttm_global_reference *ref)
+{
+ struct ttm_global_item *item = &glob[ref->global_type];
+
+ mutex_lock(&item->mutex);
+ BUG_ON(item->refcount == 0);
+ BUG_ON(ref->object != item->object);
+ if (--item->refcount == 0) {
+ ref->release(ref);
+ kfree(item->object);
+ item->object = NULL;
+ }
+ mutex_unlock(&item->mutex);
+}
+EXPORT_SYMBOL(ttm_global_item_unref);
+
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
new file mode 100644
index 00000000000..87323d4ff68
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -0,0 +1,234 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "ttm/ttm_memory.h"
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+
+#define TTM_PFX "[TTM] "
+#define TTM_MEMORY_ALLOC_RETRIES 4
+
+/**
+ * At this point we only support a single shrink callback.
+ * Extend this if needed, perhaps using a linked list of callbacks.
+ * Note that this function is reentrant:
+ * many threads may try to swap out at any given time.
+ */
+
+static void ttm_shrink(struct ttm_mem_global *glob, bool from_workqueue,
+ uint64_t extra)
+{
+ int ret;
+ struct ttm_mem_shrink *shrink;
+ uint64_t target;
+ uint64_t total_target;
+
+ spin_lock(&glob->lock);
+ if (glob->shrink == NULL)
+ goto out;
+
+ if (from_workqueue) {
+ target = glob->swap_limit;
+ total_target = glob->total_memory_swap_limit;
+ } else if (capable(CAP_SYS_ADMIN)) {
+ total_target = glob->emer_total_memory;
+ target = glob->emer_memory;
+ } else {
+ total_target = glob->max_total_memory;
+ target = glob->max_memory;
+ }
+
+ total_target = (extra >= total_target) ? 0 : total_target - extra;
+ target = (extra >= target) ? 0 : target - extra;
+
+ while (glob->used_memory > target ||
+ glob->used_total_memory > total_target) {
+ shrink = glob->shrink;
+ spin_unlock(&glob->lock);
+ ret = shrink->do_shrink(shrink);
+ spin_lock(&glob->lock);
+ if (unlikely(ret != 0))
+ goto out;
+ }
+out:
+ spin_unlock(&glob->lock);
+}
+
+static void ttm_shrink_work(struct work_struct *work)
+{
+ struct ttm_mem_global *glob =
+ container_of(work, struct ttm_mem_global, work);
+
+ ttm_shrink(glob, true, 0ULL);
+}
+
+int ttm_mem_global_init(struct ttm_mem_global *glob)
+{
+ struct sysinfo si;
+ uint64_t mem;
+
+ spin_lock_init(&glob->lock);
+ glob->swap_queue = create_singlethread_workqueue("ttm_swap");
+ INIT_WORK(&glob->work, ttm_shrink_work);
+ init_waitqueue_head(&glob->queue);
+
+ si_meminfo(&si);
+
+ mem = si.totalram - si.totalhigh;
+ mem *= si.mem_unit;
+
+ glob->max_memory = mem >> 1;
+ glob->emer_memory = (mem >> 1) + (mem >> 2);
+ glob->swap_limit = glob->max_memory - (mem >> 3);
+ glob->used_memory = 0;
+ glob->used_total_memory = 0;
+ glob->shrink = NULL;
+
+ mem = si.totalram;
+ mem *= si.mem_unit;
+
+ glob->max_total_memory = mem >> 1;
+ glob->emer_total_memory = (mem >> 1) + (mem >> 2);
+
+ glob->total_memory_swap_limit = glob->max_total_memory - (mem >> 3);
+
+ printk(KERN_INFO TTM_PFX "TTM available graphics memory: %llu MiB\n",
+ glob->max_total_memory >> 20);
+ printk(KERN_INFO TTM_PFX "TTM available object memory: %llu MiB\n",
+ glob->max_memory >> 20);
+
+ return 0;
+}
+EXPORT_SYMBOL(ttm_mem_global_init);
+
+void ttm_mem_global_release(struct ttm_mem_global *glob)
+{
+ printk(KERN_INFO TTM_PFX "Used total memory is %llu bytes.\n",
+ (unsigned long long)glob->used_total_memory);
+ flush_workqueue(glob->swap_queue);
+ destroy_workqueue(glob->swap_queue);
+ glob->swap_queue = NULL;
+}
+EXPORT_SYMBOL(ttm_mem_global_release);
+
+static inline void ttm_check_swapping(struct ttm_mem_global *glob)
+{
+ bool needs_swapping;
+
+ spin_lock(&glob->lock);
+ needs_swapping = (glob->used_memory > glob->swap_limit ||
+ glob->used_total_memory >
+ glob->total_memory_swap_limit);
+ spin_unlock(&glob->lock);
+
+ if (unlikely(needs_swapping))
+ (void)queue_work(glob->swap_queue, &glob->work);
+
+}
+
+void ttm_mem_global_free(struct ttm_mem_global *glob,
+ uint64_t amount, bool himem)
+{
+ spin_lock(&glob->lock);
+ glob->used_total_memory -= amount;
+ if (!himem)
+ glob->used_memory -= amount;
+ wake_up_all(&glob->queue);
+ spin_unlock(&glob->lock);
+}
+
+static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
+ uint64_t amount, bool himem, bool reserve)
+{
+ uint64_t limit;
+ uint64_t lomem_limit;
+ int ret = -ENOMEM;
+
+ spin_lock(&glob->lock);
+
+ if (capable(CAP_SYS_ADMIN)) {
+ limit = glob->emer_total_memory;
+ lomem_limit = glob->emer_memory;
+ } else {
+ limit = glob->max_total_memory;
+ lomem_limit = glob->max_memory;
+ }
+
+ if (unlikely(glob->used_total_memory + amount > limit))
+ goto out_unlock;
+ if (unlikely(!himem && glob->used_memory + amount > lomem_limit))
+ goto out_unlock;
+
+ if (reserve) {
+ glob->used_total_memory += amount;
+ if (!himem)
+ glob->used_memory += amount;
+ }
+ ret = 0;
+out_unlock:
+ spin_unlock(&glob->lock);
+ ttm_check_swapping(glob);
+
+ return ret;
+}
+
+int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
+ bool no_wait, bool interruptible, bool himem)
+{
+ int count = TTM_MEMORY_ALLOC_RETRIES;
+
+ while (unlikely(ttm_mem_global_reserve(glob, memory, himem, true)
+ != 0)) {
+ if (no_wait)
+ return -ENOMEM;
+ if (unlikely(count-- == 0))
+ return -ENOMEM;
+ ttm_shrink(glob, false, memory + (memory >> 2) + 16);
+ }
+
+ return 0;
+}
+
+size_t ttm_round_pot(size_t size)
+{
+ if ((size & (size - 1)) == 0)
+ return size;
+ else if (size > PAGE_SIZE)
+ return PAGE_ALIGN(size);
+ else {
+ size_t tmp_size = 4;
+
+ while (tmp_size < size)
+ tmp_size <<= 1;
+
+ return tmp_size;
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
new file mode 100644
index 00000000000..59ce8191d58
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -0,0 +1,50 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ * Jerome Glisse
+ */
+#include <linux/module.h>
+#include <ttm/ttm_module.h>
+
+static int __init ttm_init(void)
+{
+ ttm_global_init();
+ return 0;
+}
+
+static void __exit ttm_exit(void)
+{
+ ttm_global_release();
+}
+
+module_init(ttm_init);
+module_exit(ttm_exit);
+
+MODULE_AUTHOR("Thomas Hellstrom, Jerome Glisse");
+MODULE_DESCRIPTION("TTM memory manager subsystem (for DRM device)");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
new file mode 100644
index 00000000000..c27ab3a877a
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -0,0 +1,635 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include <linux/version.h>
+#include <linux/vmalloc.h>
+#include <linux/sched.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/file.h>
+#include <linux/swap.h>
+#include "ttm/ttm_module.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+
+static int ttm_tt_swapin(struct ttm_tt *ttm);
+
+#if defined(CONFIG_X86)
+static void ttm_tt_clflush_page(struct page *page)
+{
+ uint8_t *page_virtual;
+ unsigned int i;
+
+ if (unlikely(page == NULL))
+ return;
+
+ page_virtual = kmap_atomic(page, KM_USER0);
+
+ for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
+ clflush(page_virtual + i);
+
+ kunmap_atomic(page_virtual, KM_USER0);
+}
+
+static void ttm_tt_cache_flush_clflush(struct page *pages[],
+ unsigned long num_pages)
+{
+ unsigned long i;
+
+ mb();
+ for (i = 0; i < num_pages; ++i)
+ ttm_tt_clflush_page(*pages++);
+ mb();
+}
+#else
+static void ttm_tt_ipi_handler(void *null)
+{
+ ;
+}
+#endif
+
+void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages)
+{
+
+#if defined(CONFIG_X86)
+ if (cpu_has_clflush) {
+ ttm_tt_cache_flush_clflush(pages, num_pages);
+ return;
+ }
+#else
+ if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0)
+ printk(KERN_ERR TTM_PFX
+ "Timed out waiting for drm cache flush.\n");
+#endif
+}
+
+/**
+ * Allocates storage for pointers to the pages that back the ttm.
+ *
+ * Uses kmalloc if possible. Otherwise falls back to vmalloc.
+ */
+static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
+{
+ unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
+ ttm->pages = NULL;
+
+ if (size <= PAGE_SIZE)
+ ttm->pages = kzalloc(size, GFP_KERNEL);
+
+ if (!ttm->pages) {
+ ttm->pages = vmalloc_user(size);
+ if (ttm->pages)
+ ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC;
+ }
+}
+
+static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
+{
+ if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) {
+ vfree(ttm->pages);
+ ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC;
+ } else {
+ kfree(ttm->pages);
+ }
+ ttm->pages = NULL;
+}
+
+static struct page *ttm_tt_alloc_page(unsigned page_flags)
+{
+ if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+ return alloc_page(GFP_HIGHUSER | __GFP_ZERO);
+
+ return alloc_page(GFP_HIGHUSER);
+}
+
+static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
+{
+ int write;
+ int dirty;
+ struct page *page;
+ int i;
+ struct ttm_backend *be = ttm->be;
+
+ BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
+ write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
+ dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
+
+ if (be)
+ be->func->clear(be);
+
+ for (i = 0; i < ttm->num_pages; ++i) {
+ page = ttm->pages[i];
+ if (page == NULL)
+ continue;
+
+ if (page == ttm->dummy_read_page) {
+ BUG_ON(write);
+ continue;
+ }
+
+ if (write && dirty && !PageReserved(page))
+ set_page_dirty_lock(page);
+
+ ttm->pages[i] = NULL;
+ ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, false);
+ put_page(page);
+ }
+ ttm->state = tt_unpopulated;
+ ttm->first_himem_page = ttm->num_pages;
+ ttm->last_lomem_page = -1;
+}
+
+static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
+{
+ struct page *p;
+ struct ttm_bo_device *bdev = ttm->bdev;
+ struct ttm_mem_global *mem_glob = bdev->mem_glob;
+ int ret;
+
+ while (NULL == (p = ttm->pages[index])) {
+ p = ttm_tt_alloc_page(ttm->page_flags);
+
+ if (!p)
+ return NULL;
+
+ if (PageHighMem(p)) {
+ ret =
+ ttm_mem_global_alloc(mem_glob, PAGE_SIZE,
+ false, false, true);
+ if (unlikely(ret != 0))
+ goto out_err;
+ ttm->pages[--ttm->first_himem_page] = p;
+ } else {
+ ret =
+ ttm_mem_global_alloc(mem_glob, PAGE_SIZE,
+ false, false, false);
+ if (unlikely(ret != 0))
+ goto out_err;
+ ttm->pages[++ttm->last_lomem_page] = p;
+ }
+ }
+ return p;
+out_err:
+ put_page(p);
+ return NULL;
+}
+
+struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
+{
+ int ret;
+
+ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+ ret = ttm_tt_swapin(ttm);
+ if (unlikely(ret != 0))
+ return NULL;
+ }
+ return __ttm_tt_get_page(ttm, index);
+}
+
+int ttm_tt_populate(struct ttm_tt *ttm)
+{
+ struct page *page;
+ unsigned long i;
+ struct ttm_backend *be;
+ int ret;
+
+ if (ttm->state != tt_unpopulated)
+ return 0;
+
+ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+ ret = ttm_tt_swapin(ttm);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ be = ttm->be;
+
+ for (i = 0; i < ttm->num_pages; ++i) {
+ page = __ttm_tt_get_page(ttm, i);
+ if (!page)
+ return -ENOMEM;
+ }
+
+ be->func->populate(be, ttm->num_pages, ttm->pages,
+ ttm->dummy_read_page);
+ ttm->state = tt_unbound;
+ return 0;
+}
+
+#ifdef CONFIG_X86
+static inline int ttm_tt_set_page_caching(struct page *p,
+ enum ttm_caching_state c_state)
+{
+ if (PageHighMem(p))
+ return 0;
+
+ switch (c_state) {
+ case tt_cached:
+ return set_pages_wb(p, 1);
+ case tt_wc:
+ return set_memory_wc((unsigned long) page_address(p), 1);
+ default:
+ return set_pages_uc(p, 1);
+ }
+}
+#else /* CONFIG_X86 */
+static inline int ttm_tt_set_page_caching(struct page *p,
+ enum ttm_caching_state c_state)
+{
+ return 0;
+}
+#endif /* CONFIG_X86 */
+
+/*
+ * Change caching policy for the linear kernel map
+ * for range of pages in a ttm.
+ */
+
+static int ttm_tt_set_caching(struct ttm_tt *ttm,
+ enum ttm_caching_state c_state)
+{
+ int i, j;
+ struct page *cur_page;
+ int ret;
+
+ if (ttm->caching_state == c_state)
+ return 0;
+
+ if (c_state != tt_cached) {
+ ret = ttm_tt_populate(ttm);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ if (ttm->caching_state == tt_cached)
+ ttm_tt_cache_flush(ttm->pages, ttm->num_pages);
+
+ for (i = 0; i < ttm->num_pages; ++i) {
+ cur_page = ttm->pages[i];
+ if (likely(cur_page != NULL)) {
+ ret = ttm_tt_set_page_caching(cur_page, c_state);
+ if (unlikely(ret != 0))
+ goto out_err;
+ }
+ }
+
+ ttm->caching_state = c_state;
+
+ return 0;
+
+out_err:
+ for (j = 0; j < i; ++j) {
+ cur_page = ttm->pages[j];
+ if (likely(cur_page != NULL)) {
+ (void)ttm_tt_set_page_caching(cur_page,
+ ttm->caching_state);
+ }
+ }
+
+ return ret;
+}
+
+int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
+{
+ enum ttm_caching_state state;
+
+ if (placement & TTM_PL_FLAG_WC)
+ state = tt_wc;
+ else if (placement & TTM_PL_FLAG_UNCACHED)
+ state = tt_uncached;
+ else
+ state = tt_cached;
+
+ return ttm_tt_set_caching(ttm, state);
+}
+
+static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
+{
+ int i;
+ struct page *cur_page;
+ struct ttm_backend *be = ttm->be;
+
+ if (be)
+ be->func->clear(be);
+ (void)ttm_tt_set_caching(ttm, tt_cached);
+ for (i = 0; i < ttm->num_pages; ++i) {
+ cur_page = ttm->pages[i];
+ ttm->pages[i] = NULL;
+ if (cur_page) {
+ if (page_count(cur_page) != 1)
+ printk(KERN_ERR TTM_PFX
+ "Erroneous page count. "
+ "Leaking pages.\n");
+ ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE,
+ PageHighMem(cur_page));
+ __free_page(cur_page);
+ }
+ }
+ ttm->state = tt_unpopulated;
+ ttm->first_himem_page = ttm->num_pages;
+ ttm->last_lomem_page = -1;
+}
+
+void ttm_tt_destroy(struct ttm_tt *ttm)
+{
+ struct ttm_backend *be;
+
+ if (unlikely(ttm == NULL))
+ return;
+
+ be = ttm->be;
+ if (likely(be != NULL)) {
+ be->func->destroy(be);
+ ttm->be = NULL;
+ }
+
+ if (likely(ttm->pages != NULL)) {
+ if (ttm->page_flags & TTM_PAGE_FLAG_USER)
+ ttm_tt_free_user_pages(ttm);
+ else
+ ttm_tt_free_alloced_pages(ttm);
+
+ ttm_tt_free_page_directory(ttm);
+ }
+
+ if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP) &&
+ ttm->swap_storage)
+ fput(ttm->swap_storage);
+
+ kfree(ttm);
+}
+
+int ttm_tt_set_user(struct ttm_tt *ttm,
+ struct task_struct *tsk,
+ unsigned long start, unsigned long num_pages)
+{
+ struct mm_struct *mm = tsk->mm;
+ int ret;
+ int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
+ struct ttm_mem_global *mem_glob = ttm->bdev->mem_glob;
+
+ BUG_ON(num_pages != ttm->num_pages);
+ BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
+
+ /**
+ * Account user pages as lowmem pages for now.
+ */
+
+ ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
+ false, false, false);
+ if (unlikely(ret != 0))
+ return ret;
+
+ down_read(&mm->mmap_sem);
+ ret = get_user_pages(tsk, mm, start, num_pages,
+ write, 0, ttm->pages, NULL);
+ up_read(&mm->mmap_sem);
+
+ if (ret != num_pages && write) {
+ ttm_tt_free_user_pages(ttm);
+ ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE, false);
+ return -ENOMEM;
+ }
+
+ ttm->tsk = tsk;
+ ttm->start = start;
+ ttm->state = tt_unbound;
+
+ return 0;
+}
+
+struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
+ uint32_t page_flags, struct page *dummy_read_page)
+{
+ struct ttm_bo_driver *bo_driver = bdev->driver;
+ struct ttm_tt *ttm;
+
+ if (!bo_driver)
+ return NULL;
+
+ ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
+ if (!ttm)
+ return NULL;
+
+ ttm->bdev = bdev;
+
+ ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ ttm->first_himem_page = ttm->num_pages;
+ ttm->last_lomem_page = -1;
+ ttm->caching_state = tt_cached;
+ ttm->page_flags = page_flags;
+
+ ttm->dummy_read_page = dummy_read_page;
+
+ ttm_tt_alloc_page_directory(ttm);
+ if (!ttm->pages) {
+ ttm_tt_destroy(ttm);
+ printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
+ return NULL;
+ }
+ ttm->be = bo_driver->create_ttm_backend_entry(bdev);
+ if (!ttm->be) {
+ ttm_tt_destroy(ttm);
+ printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n");
+ return NULL;
+ }
+ ttm->state = tt_unpopulated;
+ return ttm;
+}
+
+void ttm_tt_unbind(struct ttm_tt *ttm)
+{
+ int ret;
+ struct ttm_backend *be = ttm->be;
+
+ if (ttm->state == tt_bound) {
+ ret = be->func->unbind(be);
+ BUG_ON(ret);
+ ttm->state = tt_unbound;
+ }
+}
+
+int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
+{
+ int ret = 0;
+ struct ttm_backend *be;
+
+ if (!ttm)
+ return -EINVAL;
+
+ if (ttm->state == tt_bound)
+ return 0;
+
+ be = ttm->be;
+
+ ret = ttm_tt_populate(ttm);
+ if (ret)
+ return ret;
+
+ ret = be->func->bind(be, bo_mem);
+ if (ret) {
+ printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n");
+ return ret;
+ }
+
+ ttm->state = tt_bound;
+
+ if (ttm->page_flags & TTM_PAGE_FLAG_USER)
+ ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
+ return 0;
+}
+EXPORT_SYMBOL(ttm_tt_bind);
+
+static int ttm_tt_swapin(struct ttm_tt *ttm)
+{
+ struct address_space *swap_space;
+ struct file *swap_storage;
+ struct page *from_page;
+ struct page *to_page;
+ void *from_virtual;
+ void *to_virtual;
+ int i;
+ int ret;
+
+ if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
+ ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
+ ttm->num_pages);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
+ return 0;
+ }
+
+ swap_storage = ttm->swap_storage;
+ BUG_ON(swap_storage == NULL);
+
+ swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
+
+ for (i = 0; i < ttm->num_pages; ++i) {
+ from_page = read_mapping_page(swap_space, i, NULL);
+ if (IS_ERR(from_page))
+ goto out_err;
+ to_page = __ttm_tt_get_page(ttm, i);
+ if (unlikely(to_page == NULL))
+ goto out_err;
+
+ preempt_disable();
+ from_virtual = kmap_atomic(from_page, KM_USER0);
+ to_virtual = kmap_atomic(to_page, KM_USER1);
+ memcpy(to_virtual, from_virtual, PAGE_SIZE);
+ kunmap_atomic(to_virtual, KM_USER1);
+ kunmap_atomic(from_virtual, KM_USER0);
+ preempt_enable();
+ page_cache_release(from_page);
+ }
+
+ if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP))
+ fput(swap_storage);
+ ttm->swap_storage = NULL;
+ ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
+
+ return 0;
+out_err:
+ ttm_tt_free_alloced_pages(ttm);
+ return -ENOMEM;
+}
+
+int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
+{
+ struct address_space *swap_space;
+ struct file *swap_storage;
+ struct page *from_page;
+ struct page *to_page;
+ void *from_virtual;
+ void *to_virtual;
+ int i;
+
+ BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
+ BUG_ON(ttm->caching_state != tt_cached);
+
+ /*
+ * For user buffers, just unpin the pages, as there should be
+ * vma references.
+ */
+
+ if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
+ ttm_tt_free_user_pages(ttm);
+ ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
+ ttm->swap_storage = NULL;
+ return 0;
+ }
+
+ if (!persistant_swap_storage) {
+ swap_storage = shmem_file_setup("ttm swap",
+ ttm->num_pages << PAGE_SHIFT,
+ 0);
+ if (unlikely(IS_ERR(swap_storage))) {
+ printk(KERN_ERR "Failed allocating swap storage.\n");
+ return -ENOMEM;
+ }
+ } else
+ swap_storage = persistant_swap_storage;
+
+ swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
+
+ for (i = 0; i < ttm->num_pages; ++i) {
+ from_page = ttm->pages[i];
+ if (unlikely(from_page == NULL))
+ continue;
+ to_page = read_mapping_page(swap_space, i, NULL);
+ if (unlikely(to_page == NULL))
+ goto out_err;
+
+ preempt_disable();
+ from_virtual = kmap_atomic(from_page, KM_USER0);
+ to_virtual = kmap_atomic(to_page, KM_USER1);
+ memcpy(to_virtual, from_virtual, PAGE_SIZE);
+ kunmap_atomic(to_virtual, KM_USER1);
+ kunmap_atomic(from_virtual, KM_USER0);
+ preempt_enable();
+ set_page_dirty(to_page);
+ mark_page_accessed(to_page);
+ page_cache_release(to_page);
+ }
+
+ ttm_tt_free_alloced_pages(ttm);
+ ttm->swap_storage = swap_storage;
+ ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
+ if (persistant_swap_storage)
+ ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP;
+
+ return 0;
+out_err:
+ if (!persistant_swap_storage)
+ fput(swap_storage);
+
+ return -ENOMEM;
+}
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 409e00afdd0..327380888b4 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -195,10 +195,8 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
default:
vsg->state = dr_via_sg_init;
}
- if (vsg->bounce_buffer) {
- vfree(vsg->bounce_buffer);
- vsg->bounce_buffer = NULL;
- }
+ vfree(vsg->bounce_buffer);
+ vsg->bounce_buffer = NULL;
vsg->free_on_sequence = 0;
}
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 7e67dcb3d4f..7831a0318d3 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -116,9 +116,16 @@ config HID_CYPRESS
---help---
Support for cypress mouse and barcode readers.
-config DRAGONRISE_FF
- tristate "DragonRise Inc. force feedback support"
+config HID_DRAGONRISE
+ tristate "DragonRise Inc. support" if EMBEDDED
depends on USB_HID
+ default !EMBEDDED
+ ---help---
+ Say Y here if you have DragonRise Inc.game controllers.
+
+config DRAGONRISE_FF
+ bool "DragonRise Inc. force feedback support"
+ depends on HID_DRAGONRISE
select INPUT_FF_MEMLESS
---help---
Say Y here if you want to enable force feedback support for DragonRise Inc.
@@ -160,7 +167,7 @@ config HID_LOGITECH
Support for Logitech devices that are not fully compliant with HID standard.
config LOGITECH_FF
- bool "Logitech force feedback"
+ bool "Logitech force feedback support"
depends on HID_LOGITECH
select INPUT_FF_MEMLESS
help
@@ -176,7 +183,7 @@ config LOGITECH_FF
force feedback.
config LOGIRUMBLEPAD2_FF
- bool "Logitech Rumblepad 2 force feedback"
+ bool "Logitech Rumblepad 2 force feedback support"
depends on HID_LOGITECH
select INPUT_FF_MEMLESS
help
@@ -211,11 +218,19 @@ config HID_PANTHERLORD
---help---
Support for PantherLord/GreenAsia based device support.
+config HID_PANTHERLORD
+ tristate "Pantherlord support" if EMBEDDED
+ depends on USB_HID
+ default !EMBEDDED
+ ---help---
+ Say Y here if you have a PantherLord/GreenAsia based game controller
+ or adapter.
+
config PANTHERLORD_FF
bool "Pantherlord force feedback support"
depends on HID_PANTHERLORD
select INPUT_FF_MEMLESS
- help
+ ---help---
Say Y here if you have a PantherLord/GreenAsia based game controller
or adapter and want to enable force feedback support for it.
@@ -247,15 +262,38 @@ config HID_SUNPLUS
---help---
Support for Sunplus wireless desktop.
-config GREENASIA_FF
- tristate "GreenAsia (Product ID 0x12) force feedback support"
+config HID_GREENASIA
+ tristate "GreenAsia (Product ID 0x12) support" if EMBEDDED
depends on USB_HID
+ default !EMBEDDED
+ ---help---
+ Say Y here if you have a GreenAsia (Product ID 0x12) based game
+ controller or adapter.
+
+config GREENASIA_FF
+ bool "GreenAsia (Product ID 0x12) force feedback support"
+ depends on HID_GREENASIA
select INPUT_FF_MEMLESS
---help---
Say Y here if you have a GreenAsia (Product ID 0x12) based game controller
(like MANTA Warrior MM816 and SpeedLink Strike2 SL-6635) or adapter
and want to enable force feedback support for it.
+config HID_SMARTJOYPLUS
+ tristate "SmartJoy PLUS PS2/USB adapter support" if EMBEDDED
+ depends on USB_HID
+ default !EMBEDDED
+ ---help---
+ Support for SmartJoy PLUS PS2/USB adapter.
+
+config SMARTJOYPLUS_FF
+ bool "SmartJoy PLUS PS2/USB adapter force feedback support"
+ depends on HID_SMARTJOYPLUS
+ select INPUT_FF_MEMLESS
+ ---help---
+ Say Y here if you have a SmartJoy PLUS PS2/USB adapter and want to
+ enable force feedback support for it.
+
config HID_TOPSEED
tristate "TopSeed Cyberlink remote control support" if EMBEDDED
depends on USB_HID
@@ -263,21 +301,45 @@ config HID_TOPSEED
---help---
Say Y if you have a TopSeed Cyberlink remote control.
-config THRUSTMASTER_FF
- tristate "ThrustMaster devices support"
+config HID_THRUSTMASTER
+ tristate "ThrustMaster devices support" if EMBEDDED
depends on USB_HID
+ default !EMBEDDED
+ ---help---
+ Say Y here if you have a THRUSTMASTER FireStore Dual Power 2 or
+ a THRUSTMASTER Ferrari GT Rumble Wheel.
+
+config THRUSTMASTER_FF
+ bool "ThrustMaster devices force feedback support"
+ depends on HID_THRUSTMASTER
select INPUT_FF_MEMLESS
- help
+ ---help---
Say Y here if you have a THRUSTMASTER FireStore Dual Power 2 or
- a THRUSTMASTER Ferrari GT Rumble Force or Force Feedback Wheel.
+ a THRUSTMASTER Ferrari GT Rumble Force or Force Feedback Wheel and
+ want to enable force feedback support for it.
-config ZEROPLUS_FF
- tristate "Zeroplus based game controller support"
+config HID_WACOM
+ tristate "Wacom Bluetooth devices support" if EMBEDDED
+ depends on BT_HIDP
+ default !EMBEDDED
+ ---help---
+ Support for Wacom Graphire Bluetooth tablet.
+
+config HID_ZEROPLUS
+ tristate "Zeroplus based game controller support" if EMBEDDED
depends on USB_HID
- select INPUT_FF_MEMLESS
- help
+ default !EMBEDDED
+ ---help---
Say Y here if you have a Zeroplus based game controller.
+config ZEROPLUS_FF
+ bool "Zeroplus based game controller force feedback support"
+ depends on HID_ZEROPLUS
+ select INPUT_FF_MEMLESS
+ ---help---
+ Say Y here if you have a Zeroplus based game controller and want
+ to have force feedback support for it.
+
endmenu
endif # HID_SUPPORT
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 1f7cb0fd450..db35151673b 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -22,7 +22,7 @@ obj-$(CONFIG_HID_BELKIN) += hid-belkin.o
obj-$(CONFIG_HID_CHERRY) += hid-cherry.o
obj-$(CONFIG_HID_CHICONY) += hid-chicony.o
obj-$(CONFIG_HID_CYPRESS) += hid-cypress.o
-obj-$(CONFIG_DRAGONRISE_FF) += hid-drff.o
+obj-$(CONFIG_HID_DRAGONRISE) += hid-drff.o
obj-$(CONFIG_HID_EZKEY) += hid-ezkey.o
obj-$(CONFIG_HID_GYRATION) += hid-gyration.o
obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o
@@ -34,12 +34,14 @@ obj-$(CONFIG_HID_NTRIG) += hid-ntrig.o
obj-$(CONFIG_HID_PANTHERLORD) += hid-pl.o
obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o
obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o
+obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
obj-$(CONFIG_HID_SONY) += hid-sony.o
obj-$(CONFIG_HID_SUNPLUS) += hid-sunplus.o
-obj-$(CONFIG_GREENASIA_FF) += hid-gaff.o
-obj-$(CONFIG_THRUSTMASTER_FF) += hid-tmff.o
+obj-$(CONFIG_HID_GREENASIA) += hid-gaff.o
+obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o
obj-$(CONFIG_HID_TOPSEED) += hid-topseed.o
-obj-$(CONFIG_ZEROPLUS_FF) += hid-zpff.o
+obj-$(CONFIG_HID_ZEROPLUS) += hid-zpff.o
+obj-$(CONFIG_HID_WACOM) += hid-wacom.o
obj-$(CONFIG_USB_HID) += usbhid/
obj-$(CONFIG_USB_MOUSE) += usbhid/
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index acbce5745b0..303ccce05bb 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -436,10 +436,6 @@ static const struct hid_device_id apple_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
- /* Apple wireless Mighty Mouse */
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, 0x030c),
- .driver_data = APPLE_MIGHTYMOUSE | APPLE_INVERT_HWHEEL },
-
{ }
};
MODULE_DEVICE_TABLE(hid, apple_devices);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 8551693d645..f2c21d5d24e 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1312,6 +1312,8 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 47ac1a7d66e..04359ed64b8 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -137,6 +137,14 @@ static const struct hid_usage_entry hid_usage_table[] = {
{0, 0x44, "BarrelSwitch"},
{0, 0x45, "Eraser"},
{0, 0x46, "TabletPick"},
+ {0, 0x47, "Confidence"},
+ {0, 0x48, "Width"},
+ {0, 0x49, "Height"},
+ {0, 0x51, "ContactID"},
+ {0, 0x52, "InputMode"},
+ {0, 0x53, "DeviceIndex"},
+ {0, 0x54, "ContactCount"},
+ {0, 0x55, "ContactMaximumNumber"},
{ 15, 0, "PhysicalInterfaceDevice" },
{0, 0x00, "Undefined"},
{0, 0x01, "Physical_Interface_Device"},
@@ -514,9 +522,11 @@ static const char *events[EV_MAX + 1] = {
[EV_FF_STATUS] = "ForceFeedbackStatus",
};
-static const char *syncs[2] = {
+static const char *syncs[3] = {
[SYN_REPORT] = "Report", [SYN_CONFIG] = "Config",
+ [SYN_MT_REPORT] = "MT Report",
};
+
static const char *keys[KEY_MAX + 1] = {
[KEY_RESERVED] = "Reserved", [KEY_ESC] = "Esc",
[KEY_1] = "1", [KEY_2] = "2",
@@ -734,8 +744,17 @@ static const char *absolutes[ABS_MAX + 1] = {
[ABS_HAT2Y] = "Hat2Y", [ABS_HAT3X] = "Hat3X",
[ABS_HAT3Y] = "Hat 3Y", [ABS_PRESSURE] = "Pressure",
[ABS_DISTANCE] = "Distance", [ABS_TILT_X] = "XTilt",
- [ABS_TILT_Y] = "YTilt", [ABS_TOOL_WIDTH] = "Tool Width",
+ [ABS_TILT_Y] = "YTilt", [ABS_TOOL_WIDTH] = "ToolWidth",
[ABS_VOLUME] = "Volume", [ABS_MISC] = "Misc",
+ [ABS_MT_TOUCH_MAJOR] = "MTMajor",
+ [ABS_MT_TOUCH_MINOR] = "MTMinor",
+ [ABS_MT_WIDTH_MAJOR] = "MTMajorW",
+ [ABS_MT_WIDTH_MINOR] = "MTMinorW",
+ [ABS_MT_ORIENTATION] = "MTOrientation",
+ [ABS_MT_POSITION_X] = "MTPositionX",
+ [ABS_MT_POSITION_Y] = "MTPositionY",
+ [ABS_MT_TOOL_TYPE] = "MTToolType",
+ [ABS_MT_BLOB_ID] = "MTBlobID",
};
static const char *misc[MSC_MAX + 1] = {
diff --git a/drivers/hid/hid-drff.c b/drivers/hid/hid-drff.c
index 34f3eb65100..a239d20ad7a 100644
--- a/drivers/hid/hid-drff.c
+++ b/drivers/hid/hid-drff.c
@@ -32,6 +32,8 @@
#include <linux/hid.h>
#include "hid-ids.h"
+
+#ifdef CONFIG_DRAGONRISE_FF
#include "usbhid/usbhid.h"
struct drff_device {
@@ -135,6 +137,12 @@ static int drff_init(struct hid_device *hid)
return 0;
}
+#else
+static inline int drff_init(struct hid_device *hid)
+{
+ return 0;
+}
+#endif
static int dr_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
diff --git a/drivers/hid/hid-gaff.c b/drivers/hid/hid-gaff.c
index 510ad3ab8d3..8a11ccddaf2 100644
--- a/drivers/hid/hid-gaff.c
+++ b/drivers/hid/hid-gaff.c
@@ -31,6 +31,8 @@
#include <linux/usb.h>
#include <linux/hid.h>
#include "hid-ids.h"
+
+#ifdef CONFIG_GREENASIA_FF
#include "usbhid/usbhid.h"
struct gaff_device {
@@ -130,6 +132,12 @@ static int gaff_init(struct hid_device *hid)
return 0;
}
+#else
+static inline int gaff_init(struct hid_device *hdev)
+{
+ return 0;
+}
+#endif
static int ga_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 4d5ee2bbc62..63010103792 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -414,8 +414,10 @@
#define USB_DEVICE_ID_VERNIER_LCSPEC 0x0006
#define USB_VENDOR_ID_WACOM 0x056a
+#define USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH 0x81
#define USB_VENDOR_ID_WISEGROUP 0x0925
+#define USB_DEVICE_ID_SMARTJOY_PLUS 0x0005
#define USB_DEVICE_ID_1_PHIDGETSERVO_20 0x8101
#define USB_DEVICE_ID_4_PHIDGETSERVO_20 0x8104
#define USB_DEVICE_ID_8_8_4_IF_KIT 0x8201
diff --git a/drivers/hid/hid-lgff.c b/drivers/hid/hid-lgff.c
index 51aff08e10c..56099709581 100644
--- a/drivers/hid/hid-lgff.c
+++ b/drivers/hid/hid-lgff.c
@@ -50,6 +50,12 @@ static const signed short ff_joystick[] = {
-1
};
+static const signed short ff_joystick_ac[] = {
+ FF_CONSTANT,
+ FF_AUTOCENTER,
+ -1
+};
+
static const signed short ff_wheel[] = {
FF_CONSTANT,
FF_AUTOCENTER,
@@ -60,8 +66,8 @@ static const struct dev_type devices[] = {
{ 0x046d, 0xc211, ff_rumble },
{ 0x046d, 0xc219, ff_rumble },
{ 0x046d, 0xc283, ff_joystick },
- { 0x046d, 0xc286, ff_joystick },
- { 0x046d, 0xc294, ff_joystick },
+ { 0x046d, 0xc286, ff_joystick_ac },
+ { 0x046d, 0xc294, ff_wheel },
{ 0x046d, 0xc295, ff_joystick },
{ 0x046d, 0xca03, ff_wheel },
};
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
index c5b252be9c2..75ed9d2c1a3 100644
--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -1,13 +1,8 @@
/*
- * HID driver for some ntrig "special" devices
+ * HID driver for N-Trig touchscreens
*
- * Copyright (c) 1999 Andreas Gal
- * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
- * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
- * Copyright (c) 2006-2007 Jiri Kosina
- * Copyright (c) 2007 Paul Walmsley
- * Copyright (c) 2008 Jiri Slaby
* Copyright (c) 2008 Rafi Rubin
+ * Copyright (c) 2009 Stephane Chatty
*
*/
@@ -29,15 +24,79 @@
#define nt_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \
EV_KEY, (c))
+struct ntrig_data {
+ __s32 x, y, id, w, h;
+ char reading_a_point, found_contact_id;
+};
+
+/*
+ * this driver is aimed at two firmware versions in circulation:
+ * - dual pen/finger single touch
+ * - finger multitouch, pen not working
+ */
+
static int ntrig_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
- if ((usage->hid & HID_USAGE_PAGE) == HID_UP_DIGITIZER &&
- (usage->hid & 0xff) == 0x47) {
- nt_map_key_clear(BTN_TOOL_DOUBLETAP);
- return 1;
+ switch (usage->hid & HID_USAGE_PAGE) {
+
+ case HID_UP_GENDESK:
+ switch (usage->hid) {
+ case HID_GD_X:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_POSITION_X);
+ input_set_abs_params(hi->input, ABS_X,
+ field->logical_minimum,
+ field->logical_maximum, 0, 0);
+ return 1;
+ case HID_GD_Y:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_POSITION_Y);
+ input_set_abs_params(hi->input, ABS_Y,
+ field->logical_minimum,
+ field->logical_maximum, 0, 0);
+ return 1;
+ }
+ return 0;
+
+ case HID_UP_DIGITIZER:
+ switch (usage->hid) {
+ /* we do not want to map these for now */
+ case HID_DG_INVERT: /* value is always 0 */
+ case HID_DG_ERASER: /* value is always 0 */
+ case HID_DG_CONTACTID: /* value is useless */
+ case HID_DG_BARRELSWITCH: /* doubtful */
+ case HID_DG_INPUTMODE:
+ case HID_DG_DEVICEINDEX:
+ case HID_DG_CONTACTCOUNT:
+ case HID_DG_CONTACTMAX:
+ return -1;
+
+ /* original mapping by Rafi Rubin */
+ case HID_DG_CONFIDENCE:
+ nt_map_key_clear(BTN_TOOL_DOUBLETAP);
+ return 1;
+
+ /* width/height mapped on TouchMajor/TouchMinor/Orientation */
+ case HID_DG_WIDTH:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_TOUCH_MAJOR);
+ return 1;
+ case HID_DG_HEIGHT:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_TOUCH_MINOR);
+ input_set_abs_params(hi->input, ABS_MT_ORIENTATION,
+ 0, 1, 0, 0);
+ return 1;
+ }
+ return 0;
+
+ case 0xff000000:
+ /* we do not want to map these: no input-oriented meaning */
+ return -1;
}
+
return 0;
}
@@ -51,6 +110,138 @@ static int ntrig_input_mapped(struct hid_device *hdev, struct hid_input *hi,
return 0;
}
+
+/*
+ * this function is called upon all reports
+ * so that we can filter contact point information,
+ * decide whether we are in multi or single touch mode
+ * and call input_mt_sync after each point if necessary
+ */
+static int ntrig_event (struct hid_device *hid, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ struct input_dev *input = field->hidinput->input;
+ struct ntrig_data *nd = hid_get_drvdata(hid);
+
+ if (hid->claimed & HID_CLAIMED_INPUT) {
+ switch (usage->hid) {
+ case HID_GD_X:
+ nd->x = value;
+ nd->reading_a_point = 1;
+ break;
+ case HID_GD_Y:
+ nd->y = value;
+ break;
+ case HID_DG_CONTACTID:
+ nd->id = value;
+ /* we receive this only when in multitouch mode */
+ nd->found_contact_id = 1;
+ break;
+ case HID_DG_WIDTH:
+ nd->w = value;
+ break;
+ case HID_DG_HEIGHT:
+ nd->h = value;
+ /*
+ * when in single touch mode, this is the last
+ * report received in a finger event. We want
+ * to emit a normal (X, Y) position
+ */
+ if (! nd->found_contact_id) {
+ input_event(input, EV_ABS, ABS_X, nd->x);
+ input_event(input, EV_ABS, ABS_Y, nd->y);
+ }
+ break;
+ case HID_DG_TIPPRESSURE:
+ /*
+ * when in single touch mode, this is the last
+ * report received in a pen event. We want
+ * to emit a normal (X, Y) position
+ */
+ if (! nd->found_contact_id) {
+ input_event(input, EV_ABS, ABS_X, nd->x);
+ input_event(input, EV_ABS, ABS_Y, nd->y);
+ input_event(input, EV_ABS, ABS_PRESSURE, value);
+ }
+ break;
+ case 0xff000002:
+ /*
+ * we receive this when the device is in multitouch
+ * mode. The first of the three values tagged with
+ * this usage tells if the contact point is real
+ * or a placeholder
+ */
+ if (!nd->reading_a_point || value != 1)
+ break;
+ /* emit a normal (X, Y) for the first point only */
+ if (nd->id == 0) {
+ input_event(input, EV_ABS, ABS_X, nd->x);
+ input_event(input, EV_ABS, ABS_Y, nd->y);
+ }
+ input_event(input, EV_ABS, ABS_MT_POSITION_X, nd->x);
+ input_event(input, EV_ABS, ABS_MT_POSITION_Y, nd->y);
+ if (nd->w > nd->h) {
+ input_event(input, EV_ABS,
+ ABS_MT_ORIENTATION, 1);
+ input_event(input, EV_ABS,
+ ABS_MT_TOUCH_MAJOR, nd->w);
+ input_event(input, EV_ABS,
+ ABS_MT_TOUCH_MINOR, nd->h);
+ } else {
+ input_event(input, EV_ABS,
+ ABS_MT_ORIENTATION, 0);
+ input_event(input, EV_ABS,
+ ABS_MT_TOUCH_MAJOR, nd->h);
+ input_event(input, EV_ABS,
+ ABS_MT_TOUCH_MINOR, nd->w);
+ }
+ input_mt_sync(field->hidinput->input);
+ nd->reading_a_point = 0;
+ nd->found_contact_id = 0;
+ break;
+
+ default:
+ /* fallback to the generic hidinput handling */
+ return 0;
+ }
+ }
+
+ /* we have handled the hidinput part, now remains hiddev */
+ if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
+ hid->hiddev_hid_event(hid, field, usage, value);
+
+ return 1;
+}
+
+static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+ struct ntrig_data *nd;
+
+ nd = kmalloc(sizeof(struct ntrig_data), GFP_KERNEL);
+ if (!nd) {
+ dev_err(&hdev->dev, "cannot allocate N-Trig data\n");
+ return -ENOMEM;
+ }
+ nd->reading_a_point = 0;
+ nd->found_contact_id = 0;
+ hid_set_drvdata(hdev, nd);
+
+ ret = hid_parse(hdev);
+ if (!ret)
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+
+ if (ret)
+ kfree (nd);
+ return ret;
+}
+
+static void ntrig_remove(struct hid_device *hdev)
+{
+ hid_hw_stop(hdev);
+ kfree(hid_get_drvdata(hdev));
+}
+
static const struct hid_device_id ntrig_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN),
.driver_data = NTRIG_DUPLICATE_USAGES },
@@ -58,11 +249,20 @@ static const struct hid_device_id ntrig_devices[] = {
};
MODULE_DEVICE_TABLE(hid, ntrig_devices);
+static const struct hid_usage_id ntrig_grabbed_usages[] = {
+ { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
+ { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
+};
+
static struct hid_driver ntrig_driver = {
.name = "ntrig",
.id_table = ntrig_devices,
+ .probe = ntrig_probe,
+ .remove = ntrig_remove,
.input_mapping = ntrig_input_mapping,
.input_mapped = ntrig_input_mapped,
+ .usage_table = ntrig_grabbed_usages,
+ .event = ntrig_event,
};
static int ntrig_init(void)
diff --git a/drivers/hid/hid-sjoy.c b/drivers/hid/hid-sjoy.c
new file mode 100644
index 00000000000..eab169e5c37
--- /dev/null
+++ b/drivers/hid/hid-sjoy.c
@@ -0,0 +1,180 @@
+/*
+ * Force feedback support for SmartJoy PLUS PS2->USB adapter
+ *
+ * Copyright (c) 2009 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
+ *
+ * Based of hid-pl.c and hid-gaff.c
+ * Copyright (c) 2007, 2009 Anssi Hannula <anssi.hannula@gmail.com>
+ * Copyright (c) 2008 Lukasz Lubojanski <lukasz@lubojanski.info>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* #define DEBUG */
+
+#include <linux/input.h>
+#include <linux/usb.h>
+#include <linux/hid.h>
+#include "hid-ids.h"
+
+#ifdef CONFIG_SMARTJOYPLUS_FF
+#include "usbhid/usbhid.h"
+
+struct sjoyff_device {
+ struct hid_report *report;
+};
+
+static int hid_sjoyff_play(struct input_dev *dev, void *data,
+ struct ff_effect *effect)
+{
+ struct hid_device *hid = input_get_drvdata(dev);
+ struct sjoyff_device *sjoyff = data;
+ u32 left, right;
+
+ left = effect->u.rumble.strong_magnitude;
+ right = effect->u.rumble.weak_magnitude;
+ dev_dbg(&dev->dev, "called with 0x%08x 0x%08x\n", left, right);
+
+ left = left * 0xff / 0xffff;
+ right = (right != 0); /* on/off only */
+
+ sjoyff->report->field[0]->value[1] = right;
+ sjoyff->report->field[0]->value[2] = left;
+ dev_dbg(&dev->dev, "running with 0x%02x 0x%02x\n", left, right);
+ usbhid_submit_report(hid, sjoyff->report, USB_DIR_OUT);
+
+ return 0;
+}
+
+static int sjoyff_init(struct hid_device *hid)
+{
+ struct sjoyff_device *sjoyff;
+ struct hid_report *report;
+ struct hid_input *hidinput = list_entry(hid->inputs.next,
+ struct hid_input, list);
+ struct list_head *report_list =
+ &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct list_head *report_ptr = report_list;
+ struct input_dev *dev;
+ int error;
+
+ if (list_empty(report_list)) {
+ dev_err(&hid->dev, "no output reports found\n");
+ return -ENODEV;
+ }
+
+ report_ptr = report_ptr->next;
+
+ if (report_ptr == report_list) {
+ dev_err(&hid->dev, "required output report is "
+ "missing\n");
+ return -ENODEV;
+ }
+
+ report = list_entry(report_ptr, struct hid_report, list);
+ if (report->maxfield < 1) {
+ dev_err(&hid->dev, "no fields in the report\n");
+ return -ENODEV;
+ }
+
+ if (report->field[0]->report_count < 3) {
+ dev_err(&hid->dev, "not enough values in the field\n");
+ return -ENODEV;
+ }
+
+ sjoyff = kzalloc(sizeof(struct sjoyff_device), GFP_KERNEL);
+ if (!sjoyff)
+ return -ENOMEM;
+
+ dev = hidinput->input;
+
+ set_bit(FF_RUMBLE, dev->ffbit);
+
+ error = input_ff_create_memless(dev, sjoyff, hid_sjoyff_play);
+ if (error) {
+ kfree(sjoyff);
+ return error;
+ }
+
+ sjoyff->report = report;
+ sjoyff->report->field[0]->value[0] = 0x01;
+ sjoyff->report->field[0]->value[1] = 0x00;
+ sjoyff->report->field[0]->value[2] = 0x00;
+ usbhid_submit_report(hid, sjoyff->report, USB_DIR_OUT);
+
+ dev_info(&hid->dev,
+ "Force feedback for SmartJoy PLUS PS2/USB adapter\n");
+
+ return 0;
+}
+#else
+static inline int sjoyff_init(struct hid_device *hid)
+{
+ return 0;
+}
+#endif
+
+static int sjoy_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ dev_err(&hdev->dev, "parse failed\n");
+ goto err;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
+ if (ret) {
+ dev_err(&hdev->dev, "hw start failed\n");
+ goto err;
+ }
+
+ sjoyff_init(hdev);
+
+ return 0;
+err:
+ return ret;
+}
+
+static const struct hid_device_id sjoy_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, sjoy_devices);
+
+static struct hid_driver sjoy_driver = {
+ .name = "smartjoyplus",
+ .id_table = sjoy_devices,
+ .probe = sjoy_probe,
+};
+
+static int sjoy_init(void)
+{
+ return hid_register_driver(&sjoy_driver);
+}
+
+static void sjoy_exit(void)
+{
+ hid_unregister_driver(&sjoy_driver);
+}
+
+module_init(sjoy_init);
+module_exit(sjoy_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jussi Kivilinna");
+
diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
index 7c1f7b50330..fcd6ccd02fe 100644
--- a/drivers/hid/hid-tmff.c
+++ b/drivers/hid/hid-tmff.c
@@ -33,11 +33,6 @@
#include "hid-ids.h"
-#include "usbhid/usbhid.h"
-
-/* Usages for thrustmaster devices I know about */
-#define THRUSTMASTER_USAGE_FF (HID_UP_GENDESK | 0xbb)
-
static const signed short ff_rumble[] = {
FF_RUMBLE,
-1
@@ -48,6 +43,12 @@ static const signed short ff_joystick[] = {
-1
};
+#ifdef CONFIG_THRUSTMASTER_FF
+#include "usbhid/usbhid.h"
+
+/* Usages for thrustmaster devices I know about */
+#define THRUSTMASTER_USAGE_FF (HID_UP_GENDESK | 0xbb)
+
struct tmff_device {
struct hid_report *report;
struct hid_field *ff_field;
@@ -209,6 +210,12 @@ fail:
kfree(tmff);
return error;
}
+#else
+static inline int tmff_init(struct hid_device *hid, const signed short *ff_bits)
+{
+ return 0;
+}
+#endif
static int tm_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
new file mode 100644
index 00000000000..1f9237f511e
--- /dev/null
+++ b/drivers/hid/hid-wacom.c
@@ -0,0 +1,259 @@
+/*
+ * Bluetooth Wacom Tablet support
+ *
+ * Copyright (c) 1999 Andreas Gal
+ * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
+ * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
+ * Copyright (c) 2006-2007 Jiri Kosina
+ * Copyright (c) 2007 Paul Walmsley
+ * Copyright (c) 2008 Jiri Slaby <jirislaby@gmail.com>
+ * Copyright (c) 2006 Andrew Zabolotny <zap@homelink.ru>
+ * Copyright (c) 2009 Bastien Nocera <hadess@hadess.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+struct wacom_data {
+ __u16 tool;
+ unsigned char butstate;
+};
+
+static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *raw_data, int size)
+{
+ struct wacom_data *wdata = hid_get_drvdata(hdev);
+ struct hid_input *hidinput;
+ struct input_dev *input;
+ unsigned char *data = (unsigned char *) raw_data;
+ int tool, x, y, rw;
+
+ if (!(hdev->claimed & HID_CLAIMED_INPUT))
+ return 0;
+
+ tool = 0;
+ hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
+ input = hidinput->input;
+
+ /* Check if this is a tablet report */
+ if (data[0] != 0x03)
+ return 0;
+
+ /* Get X & Y positions */
+ x = le16_to_cpu(*(__le16 *) &data[2]);
+ y = le16_to_cpu(*(__le16 *) &data[4]);
+
+ /* Get current tool identifier */
+ if (data[1] & 0x90) { /* If pen is in the in/active area */
+ switch ((data[1] >> 5) & 3) {
+ case 0: /* Pen */
+ tool = BTN_TOOL_PEN;
+ break;
+
+ case 1: /* Rubber */
+ tool = BTN_TOOL_RUBBER;
+ break;
+
+ case 2: /* Mouse with wheel */
+ case 3: /* Mouse without wheel */
+ tool = BTN_TOOL_MOUSE;
+ break;
+ }
+
+ /* Reset tool if out of active tablet area */
+ if (!(data[1] & 0x10))
+ tool = 0;
+ }
+
+ /* If tool changed, notify input subsystem */
+ if (wdata->tool != tool) {
+ if (wdata->tool) {
+ /* Completely reset old tool state */
+ if (wdata->tool == BTN_TOOL_MOUSE) {
+ input_report_key(input, BTN_LEFT, 0);
+ input_report_key(input, BTN_RIGHT, 0);
+ input_report_key(input, BTN_MIDDLE, 0);
+ input_report_abs(input, ABS_DISTANCE,
+ input->absmax[ABS_DISTANCE]);
+ } else {
+ input_report_key(input, BTN_TOUCH, 0);
+ input_report_key(input, BTN_STYLUS, 0);
+ input_report_key(input, BTN_STYLUS2, 0);
+ input_report_abs(input, ABS_PRESSURE, 0);
+ }
+ input_report_key(input, wdata->tool, 0);
+ input_sync(input);
+ }
+ wdata->tool = tool;
+ if (tool)
+ input_report_key(input, tool, 1);
+ }
+
+ if (tool) {
+ input_report_abs(input, ABS_X, x);
+ input_report_abs(input, ABS_Y, y);
+
+ switch ((data[1] >> 5) & 3) {
+ case 2: /* Mouse with wheel */
+ input_report_key(input, BTN_MIDDLE, data[1] & 0x04);
+ rw = (data[6] & 0x01) ? -1 :
+ (data[6] & 0x02) ? 1 : 0;
+ input_report_rel(input, REL_WHEEL, rw);
+ /* fall through */
+
+ case 3: /* Mouse without wheel */
+ input_report_key(input, BTN_LEFT, data[1] & 0x01);
+ input_report_key(input, BTN_RIGHT, data[1] & 0x02);
+ /* Compute distance between mouse and tablet */
+ rw = 44 - (data[6] >> 2);
+ if (rw < 0)
+ rw = 0;
+ else if (rw > 31)
+ rw = 31;
+ input_report_abs(input, ABS_DISTANCE, rw);
+ break;
+
+ default:
+ input_report_abs(input, ABS_PRESSURE,
+ data[6] | (((__u16) (data[1] & 0x08)) << 5));
+ input_report_key(input, BTN_TOUCH, data[1] & 0x01);
+ input_report_key(input, BTN_STYLUS, data[1] & 0x02);
+ input_report_key(input, BTN_STYLUS2, (tool == BTN_TOOL_PEN) && data[1] & 0x04);
+ break;
+ }
+
+ input_sync(input);
+ }
+
+ /* Report the state of the two buttons at the top of the tablet
+ * as two extra fingerpad keys (buttons 4 & 5). */
+ rw = data[7] & 0x03;
+ if (rw != wdata->butstate) {
+ wdata->butstate = rw;
+ input_report_key(input, BTN_0, rw & 0x02);
+ input_report_key(input, BTN_1, rw & 0x01);
+ input_event(input, EV_MSC, MSC_SERIAL, 0xf0);
+ input_sync(input);
+ }
+
+ return 1;
+}
+
+static int wacom_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ struct hid_input *hidinput;
+ struct input_dev *input;
+ struct wacom_data *wdata;
+ int ret;
+
+ wdata = kzalloc(sizeof(*wdata), GFP_KERNEL);
+ if (wdata == NULL) {
+ dev_err(&hdev->dev, "can't alloc wacom descriptor\n");
+ return -ENOMEM;
+ }
+
+ hid_set_drvdata(hdev, wdata);
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ dev_err(&hdev->dev, "parse failed\n");
+ goto err_free;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret) {
+ dev_err(&hdev->dev, "hw start failed\n");
+ goto err_free;
+ }
+
+ hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
+ input = hidinput->input;
+
+ /* Basics */
+ input->evbit[0] |= BIT(EV_KEY) | BIT(EV_ABS) | BIT(EV_REL);
+ input->absbit[0] |= BIT(ABS_X) | BIT(ABS_Y) |
+ BIT(ABS_PRESSURE) | BIT(ABS_DISTANCE);
+ input->relbit[0] |= BIT(REL_WHEEL);
+ set_bit(BTN_TOOL_PEN, input->keybit);
+ set_bit(BTN_TOUCH, input->keybit);
+ set_bit(BTN_STYLUS, input->keybit);
+ set_bit(BTN_STYLUS2, input->keybit);
+ set_bit(BTN_LEFT, input->keybit);
+ set_bit(BTN_RIGHT, input->keybit);
+ set_bit(BTN_MIDDLE, input->keybit);
+
+ /* Pad */
+ input->evbit[0] |= BIT(EV_MSC);
+ input->mscbit[0] |= BIT(MSC_SERIAL);
+
+ /* Distance, rubber and mouse */
+ input->absbit[0] |= BIT(ABS_DISTANCE);
+ set_bit(BTN_TOOL_RUBBER, input->keybit);
+ set_bit(BTN_TOOL_MOUSE, input->keybit);
+
+ input->absmax[ABS_PRESSURE] = 511;
+ input->absmax[ABS_DISTANCE] = 32;
+
+ input->absmax[ABS_X] = 16704;
+ input->absmax[ABS_Y] = 12064;
+ input->absfuzz[ABS_X] = 4;
+ input->absfuzz[ABS_Y] = 4;
+
+ return 0;
+err_free:
+ kfree(wdata);
+ return ret;
+}
+
+static void wacom_remove(struct hid_device *hdev)
+{
+ hid_hw_stop(hdev);
+ kfree(hid_get_drvdata(hdev));
+}
+
+static const struct hid_device_id wacom_devices[] = {
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) },
+
+ { }
+};
+MODULE_DEVICE_TABLE(hid, wacom_devices);
+
+static struct hid_driver wacom_driver = {
+ .name = "wacom",
+ .id_table = wacom_devices,
+ .probe = wacom_probe,
+ .remove = wacom_remove,
+ .raw_event = wacom_raw_event,
+};
+
+static int wacom_init(void)
+{
+ int ret;
+
+ ret = hid_register_driver(&wacom_driver);
+ if (ret)
+ printk(KERN_ERR "can't register wacom driver\n");
+ printk(KERN_ERR "wacom driver registered\n");
+ return ret;
+}
+
+static void wacom_exit(void)
+{
+ hid_unregister_driver(&wacom_driver);
+}
+
+module_init(wacom_init);
+module_exit(wacom_exit);
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/hid/hid-zpff.c b/drivers/hid/hid-zpff.c
index 85a198a1853..57f710757bf 100644
--- a/drivers/hid/hid-zpff.c
+++ b/drivers/hid/hid-zpff.c
@@ -27,6 +27,7 @@
#include "hid-ids.h"
+#ifdef CONFIG_ZEROPLUS_FF
#include "usbhid/usbhid.h"
struct zpff_device {
@@ -108,6 +109,12 @@ static int zpff_init(struct hid_device *hid)
return 0;
}
+#else
+static inline int zpff_init(struct hid_device *hid)
+{
+ return 0;
+}
+#endif
static int zp_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 00ccf4b1985..0c6639ea03d 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -349,10 +349,7 @@ int hidraw_connect(struct hid_device *hid)
int minor, result;
struct hidraw *dev;
- /* TODO currently we accept any HID device. This should later
- * probably be fixed to accept only those devices which provide
- * non-input applications
- */
+ /* we accept any HID device, no matter the applications */
dev = kzalloc(sizeof(struct hidraw), GFP_KERNEL);
if (!dev)
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index ac8049b5f1e..76c4bbe9dcc 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -1234,12 +1234,11 @@ static int hid_post_reset(struct usb_interface *intf)
struct hid_device *hid = usb_get_intfdata(intf);
struct usbhid_device *usbhid = hid->driver_data;
int status;
-
+
spin_lock_irq(&usbhid->lock);
clear_bit(HID_RESET_PENDING, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
hid_set_idle(dev, intf->cur_altsetting->desc.bInterfaceNumber, 0, 0);
- /* FIXME: Any more reinitialization needed? */
status = hid_start_in(hid);
if (status < 0)
hid_io_error(hid);
@@ -1251,14 +1250,14 @@ static int hid_post_reset(struct usb_interface *intf)
int usbhid_get_power(struct hid_device *hid)
{
struct usbhid_device *usbhid = hid->driver_data;
-
+
return usb_autopm_get_interface(usbhid->intf);
}
void usbhid_put_power(struct hid_device *hid)
{
struct usbhid_device *usbhid = hid->driver_data;
-
+
usb_autopm_put_interface(usbhid->intf);
}
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index e9b436d2d94..9e9421525fb 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -850,8 +850,14 @@ static const struct file_operations hiddev_fops = {
#endif
};
+static char *hiddev_nodename(struct device *dev)
+{
+ return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
+}
+
static struct usb_class_driver hiddev_class = {
.name = "hiddev%d",
+ .nodename = hiddev_nodename,
.fops = &hiddev_fops,
.minor_base = HIDDEV_MINOR_BASE,
};
@@ -955,7 +961,6 @@ static int hiddev_usbd_probe(struct usb_interface *intf,
return -ENODEV;
}
-
static /* const */ struct usb_driver hiddev_driver = {
.name = "hiddev",
.probe = hiddev_usbd_probe,
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index d73f5f473e3..2d5016691d4 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -306,11 +306,11 @@ config SENSORS_F71805F
will be called f71805f.
config SENSORS_F71882FG
- tristate "Fintek F71862FG, F71882FG and F8000"
+ tristate "Fintek F71858FG, F71862FG, F71882FG and F8000"
depends on EXPERIMENTAL
help
If you say yes here you get support for hardware monitoring
- features of the Fintek F71882FG/F71883FG, F71862FG/71863FG
+ features of the Fintek F71858FG, F71862FG/71863FG, F71882FG/F71883FG
and F8000 Super-I/O chips.
This driver can also be built as a module. If so, the module
@@ -418,7 +418,7 @@ config SENSORS_IBMAEM
power sensors and capping hardware in various IBM System X
servers that support Active Energy Manager. This includes
the x3350, x3550, x3650, x3655, x3755, x3850 M2, x3950 M2,
- and certain HS2x/LS2x/QS2x blades.
+ and certain HC10/HS2x/LS2x/QS2x blades.
This driver can also be built as a module. If so, the module
will be called ibmaem.
@@ -787,6 +787,16 @@ config SENSORS_THMC50
This driver can also be built as a module. If so, the module
will be called thmc50.
+config SENSORS_TMP401
+ tristate "Texas Instruments TMP401 and compatibles"
+ depends on I2C && EXPERIMENTAL
+ help
+ If you say yes here you get support for Texas Instruments TMP401 and
+ TMP411 temperature sensor chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called tmp401.
+
config SENSORS_VIA686A
tristate "VIA686A"
depends on PCI
@@ -940,6 +950,7 @@ config SENSORS_HDAPS
config SENSORS_LIS3LV02D
tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer"
depends on ACPI && INPUT
+ select INPUT_POLLDEV
select NEW_LEDS
select LEDS_CLASS
default n
@@ -967,6 +978,7 @@ config SENSORS_LIS3LV02D
config SENSORS_LIS3_SPI
tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer (SPI)"
depends on !ACPI && SPI_MASTER && INPUT
+ select INPUT_POLLDEV
default n
help
This driver provides support for the LIS3LV02Dx accelerometer connected
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 0ae26984ba4..b793dce6bed 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -82,6 +82,7 @@ obj-$(CONFIG_SENSORS_SMSC47B397)+= smsc47b397.o
obj-$(CONFIG_SENSORS_SMSC47M1) += smsc47m1.o
obj-$(CONFIG_SENSORS_SMSC47M192)+= smsc47m192.o
obj-$(CONFIG_SENSORS_THMC50) += thmc50.o
+obj-$(CONFIG_SENSORS_TMP401) += tmp401.o
obj-$(CONFIG_SENSORS_VIA686A) += via686a.o
obj-$(CONFIG_SENSORS_VT1211) += vt1211.o
obj-$(CONFIG_SENSORS_VT8231) += vt8231.o
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 5f81ddf7150..4146105f1a5 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -1,6 +1,6 @@
/***************************************************************************
* Copyright (C) 2006 by Hans Edgington <hans@edgington.nl> *
- * Copyright (C) 2007,2008 by Hans de Goede <hdegoede@redhat.com> *
+ * Copyright (C) 2007-2009 Hans de Goede <hdegoede@redhat.com> *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
@@ -32,6 +32,7 @@
#define DRVNAME "f71882fg"
+#define SIO_F71858FG_LD_HWM 0x02 /* Hardware monitor logical device */
#define SIO_F71882FG_LD_HWM 0x04 /* Hardware monitor logical device */
#define SIO_UNLOCK_KEY 0x87 /* Key to enable Super-I/O */
#define SIO_LOCK_KEY 0xAA /* Key to diasble Super-I/O */
@@ -44,6 +45,7 @@
#define SIO_REG_ADDR 0x60 /* Logical device address (2 bytes) */
#define SIO_FINTEK_ID 0x1934 /* Manufacturers ID */
+#define SIO_F71858_ID 0x0507 /* Chipset ID */
#define SIO_F71862_ID 0x0601 /* Chipset ID */
#define SIO_F71882_ID 0x0541 /* Chipset ID */
#define SIO_F8000_ID 0x0581 /* Chipset ID */
@@ -70,6 +72,7 @@
#define F71882FG_REG_TEMP_HIGH(nr) (0x81 + 2 * (nr))
#define F71882FG_REG_TEMP_STATUS 0x62
#define F71882FG_REG_TEMP_BEEP 0x63
+#define F71882FG_REG_TEMP_CONFIG 0x69
#define F71882FG_REG_TEMP_HYST(nr) (0x6C + (nr))
#define F71882FG_REG_TEMP_TYPE 0x6B
#define F71882FG_REG_TEMP_DIODE_OPEN 0x6F
@@ -92,9 +95,10 @@ static unsigned short force_id;
module_param(force_id, ushort, 0);
MODULE_PARM_DESC(force_id, "Override the detected device ID");
-enum chips { f71862fg, f71882fg, f8000 };
+enum chips { f71858fg, f71862fg, f71882fg, f8000 };
static const char *f71882fg_names[] = {
+ "f71858fg",
"f71862fg",
"f71882fg",
"f8000",
@@ -119,6 +123,7 @@ struct f71882fg_data {
struct device *hwmon_dev;
struct mutex update_lock;
+ int temp_start; /* temp numbering start (0 or 1) */
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
unsigned long last_limits; /* In jiffies */
@@ -136,7 +141,7 @@ struct f71882fg_data {
/* Note: all models have only 3 temperature channels, but on some
they are addressed as 0-2 and on others as 1-3, so for coding
convenience we reserve space for 4 channels */
- u8 temp[4];
+ u16 temp[4];
u8 temp_ovt[4];
u8 temp_high[4];
u8 temp_hyst[2]; /* 2 hysts stored per reg */
@@ -144,6 +149,7 @@ struct f71882fg_data {
u8 temp_status;
u8 temp_beep;
u8 temp_diode_open;
+ u8 temp_config;
u8 pwm[4];
u8 pwm_enable;
u8 pwm_auto_point_hyst[2];
@@ -247,11 +253,55 @@ static struct platform_driver f71882fg_driver = {
.name = DRVNAME,
},
.probe = f71882fg_probe,
- .remove = __devexit_p(f71882fg_remove),
+ .remove = f71882fg_remove,
};
static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+/* Temp and in attr for the f71858fg */
+static struct sensor_device_attribute_2 f71858fg_in_temp_attr[] = {
+ SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0),
+ SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1),
+ SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2),
+ SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 0),
+ SENSOR_ATTR_2(temp1_max, S_IRUGO|S_IWUSR, show_temp_max,
+ store_temp_max, 0, 0),
+ SENSOR_ATTR_2(temp1_max_hyst, S_IRUGO|S_IWUSR, show_temp_max_hyst,
+ store_temp_max_hyst, 0, 0),
+ SENSOR_ATTR_2(temp1_max_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 0),
+ SENSOR_ATTR_2(temp1_crit, S_IRUGO|S_IWUSR, show_temp_crit,
+ store_temp_crit, 0, 0),
+ SENSOR_ATTR_2(temp1_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL,
+ 0, 0),
+ SENSOR_ATTR_2(temp1_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 4),
+ SENSOR_ATTR_2(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0, 0),
+ SENSOR_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, 0, 1),
+ SENSOR_ATTR_2(temp2_max, S_IRUGO|S_IWUSR, show_temp_max,
+ store_temp_max, 0, 1),
+ SENSOR_ATTR_2(temp2_max_hyst, S_IRUGO|S_IWUSR, show_temp_max_hyst,
+ store_temp_max_hyst, 0, 1),
+ SENSOR_ATTR_2(temp2_max_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 1),
+ SENSOR_ATTR_2(temp2_crit, S_IRUGO|S_IWUSR, show_temp_crit,
+ store_temp_crit, 0, 1),
+ SENSOR_ATTR_2(temp2_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL,
+ 0, 1),
+ SENSOR_ATTR_2(temp2_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 5),
+ SENSOR_ATTR_2(temp2_type, S_IRUGO, show_temp_type, NULL, 0, 1),
+ SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 1),
+ SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 2),
+ SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_max,
+ store_temp_max, 0, 2),
+ SENSOR_ATTR_2(temp3_max_hyst, S_IRUGO|S_IWUSR, show_temp_max_hyst,
+ store_temp_max_hyst, 0, 2),
+ SENSOR_ATTR_2(temp3_max_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 2),
+ SENSOR_ATTR_2(temp3_crit, S_IRUGO|S_IWUSR, show_temp_crit,
+ store_temp_crit, 0, 2),
+ SENSOR_ATTR_2(temp3_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL,
+ 0, 2),
+ SENSOR_ATTR_2(temp3_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 6),
+ SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 2),
+};
+
/* Temp and in attr common to both the f71862fg and f71882fg */
static struct sensor_device_attribute_2 f718x2fg_in_temp_attr[] = {
SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0),
@@ -344,6 +394,7 @@ static struct sensor_device_attribute_2 f8000_in_temp_attr[] = {
SENSOR_ATTR_2(temp1_max_hyst, S_IRUGO|S_IWUSR, show_temp_max,
store_temp_max, 0, 0),
SENSOR_ATTR_2(temp1_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 4),
+ SENSOR_ATTR_2(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0, 0),
SENSOR_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, 0, 1),
SENSOR_ATTR_2(temp2_max, S_IRUGO|S_IWUSR, show_temp_crit,
store_temp_crit, 0, 1),
@@ -351,12 +402,14 @@ static struct sensor_device_attribute_2 f8000_in_temp_attr[] = {
store_temp_max, 0, 1),
SENSOR_ATTR_2(temp2_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 5),
SENSOR_ATTR_2(temp2_type, S_IRUGO, show_temp_type, NULL, 0, 1),
+ SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 1),
SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 2),
SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_crit,
store_temp_crit, 0, 2),
SENSOR_ATTR_2(temp3_max_hyst, S_IRUGO|S_IWUSR, show_temp_max,
store_temp_max, 0, 2),
SENSOR_ATTR_2(temp3_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 6),
+ SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 2),
};
/* Fan / PWM attr common to all models */
@@ -395,6 +448,9 @@ static struct sensor_device_attribute_2 fxxxx_fan_attr[] = {
show_pwm_auto_point_channel,
store_pwm_auto_point_channel, 0, 1),
+ SENSOR_ATTR_2(pwm3, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 0, 2),
+ SENSOR_ATTR_2(pwm3_enable, S_IRUGO|S_IWUSR, show_pwm_enable,
+ store_pwm_enable, 0, 2),
SENSOR_ATTR_2(pwm3_interpolate, S_IRUGO|S_IWUSR,
show_pwm_interpolate, store_pwm_interpolate, 0, 2),
SENSOR_ATTR_2(pwm3_auto_channels_temp, S_IRUGO|S_IWUSR,
@@ -450,9 +506,6 @@ static struct sensor_device_attribute_2 f71862fg_fan_attr[] = {
SENSOR_ATTR_2(pwm2_auto_point2_temp_hyst, S_IRUGO,
show_pwm_auto_point_temp_hyst, NULL, 3, 1),
- SENSOR_ATTR_2(pwm3, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 0, 2),
- SENSOR_ATTR_2(pwm3_enable, S_IRUGO|S_IWUSR, show_pwm_enable,
- store_pwm_enable, 0, 2),
SENSOR_ATTR_2(pwm3_auto_point1_pwm, S_IRUGO|S_IWUSR,
show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
1, 2),
@@ -473,22 +526,8 @@ static struct sensor_device_attribute_2 f71862fg_fan_attr[] = {
show_pwm_auto_point_temp_hyst, NULL, 3, 2),
};
-/* Fan / PWM attr for the f71882fg */
-static struct sensor_device_attribute_2 f71882fg_fan_attr[] = {
- SENSOR_ATTR_2(fan1_beep, S_IRUGO|S_IWUSR, show_fan_beep,
- store_fan_beep, 0, 0),
- SENSOR_ATTR_2(fan2_beep, S_IRUGO|S_IWUSR, show_fan_beep,
- store_fan_beep, 0, 1),
- SENSOR_ATTR_2(fan3_beep, S_IRUGO|S_IWUSR, show_fan_beep,
- store_fan_beep, 0, 2),
- SENSOR_ATTR_2(fan4_input, S_IRUGO, show_fan, NULL, 0, 3),
- SENSOR_ATTR_2(fan4_full_speed, S_IRUGO|S_IWUSR,
- show_fan_full_speed,
- store_fan_full_speed, 0, 3),
- SENSOR_ATTR_2(fan4_beep, S_IRUGO|S_IWUSR, show_fan_beep,
- store_fan_beep, 0, 3),
- SENSOR_ATTR_2(fan4_alarm, S_IRUGO, show_fan_alarm, NULL, 0, 3),
-
+/* Fan / PWM attr common to both the f71882fg and f71858fg */
+static struct sensor_device_attribute_2 f71882fg_f71858fg_fan_attr[] = {
SENSOR_ATTR_2(pwm1_auto_point1_pwm, S_IRUGO|S_IWUSR,
show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
0, 0),
@@ -565,9 +604,6 @@ static struct sensor_device_attribute_2 f71882fg_fan_attr[] = {
SENSOR_ATTR_2(pwm2_auto_point4_temp_hyst, S_IRUGO,
show_pwm_auto_point_temp_hyst, NULL, 3, 1),
- SENSOR_ATTR_2(pwm3, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 0, 2),
- SENSOR_ATTR_2(pwm3_enable, S_IRUGO|S_IWUSR, show_pwm_enable,
- store_pwm_enable, 0, 2),
SENSOR_ATTR_2(pwm3_auto_point1_pwm, S_IRUGO|S_IWUSR,
show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
0, 2),
@@ -605,6 +641,24 @@ static struct sensor_device_attribute_2 f71882fg_fan_attr[] = {
show_pwm_auto_point_temp_hyst, NULL, 2, 2),
SENSOR_ATTR_2(pwm3_auto_point4_temp_hyst, S_IRUGO,
show_pwm_auto_point_temp_hyst, NULL, 3, 2),
+};
+
+/* Fan / PWM attr found on the f71882fg but not on the f71858fg */
+static struct sensor_device_attribute_2 f71882fg_fan_attr[] = {
+ SENSOR_ATTR_2(fan1_beep, S_IRUGO|S_IWUSR, show_fan_beep,
+ store_fan_beep, 0, 0),
+ SENSOR_ATTR_2(fan2_beep, S_IRUGO|S_IWUSR, show_fan_beep,
+ store_fan_beep, 0, 1),
+ SENSOR_ATTR_2(fan3_beep, S_IRUGO|S_IWUSR, show_fan_beep,
+ store_fan_beep, 0, 2),
+
+ SENSOR_ATTR_2(fan4_input, S_IRUGO, show_fan, NULL, 0, 3),
+ SENSOR_ATTR_2(fan4_full_speed, S_IRUGO|S_IWUSR,
+ show_fan_full_speed,
+ store_fan_full_speed, 0, 3),
+ SENSOR_ATTR_2(fan4_beep, S_IRUGO|S_IWUSR, show_fan_beep,
+ store_fan_beep, 0, 3),
+ SENSOR_ATTR_2(fan4_alarm, S_IRUGO, show_fan_alarm, NULL, 0, 3),
SENSOR_ATTR_2(pwm4, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 0, 3),
SENSOR_ATTR_2(pwm4_enable, S_IRUGO|S_IWUSR, show_pwm_enable,
@@ -659,8 +713,6 @@ static struct sensor_device_attribute_2 f71882fg_fan_attr[] = {
static struct sensor_device_attribute_2 f8000_fan_attr[] = {
SENSOR_ATTR_2(fan4_input, S_IRUGO, show_fan, NULL, 0, 3),
- SENSOR_ATTR_2(pwm3, S_IRUGO, show_pwm, NULL, 0, 2),
-
SENSOR_ATTR_2(temp1_auto_point1_pwm, S_IRUGO|S_IWUSR,
show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
0, 2),
@@ -857,13 +909,20 @@ static void f71882fg_write16(struct f71882fg_data *data, u8 reg, u16 val)
outb(val & 255, data->addr + DATA_REG_OFFSET);
}
+static u16 f71882fg_read_temp(struct f71882fg_data *data, int nr)
+{
+ if (data->type == f71858fg)
+ return f71882fg_read16(data, F71882FG_REG_TEMP(nr));
+ else
+ return f71882fg_read8(data, F71882FG_REG_TEMP(nr));
+}
+
static struct f71882fg_data *f71882fg_update_device(struct device *dev)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
int nr, reg = 0, reg2;
int nr_fans = (data->type == f71882fg) ? 4 : 3;
- int nr_ins = (data->type == f8000) ? 3 : 9;
- int temp_start = (data->type == f8000) ? 0 : 1;
+ int nr_ins = (data->type == f71858fg || data->type == f8000) ? 3 : 9;
mutex_lock(&data->update_lock);
@@ -878,7 +937,7 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
}
/* Get High & boundary temps*/
- for (nr = temp_start; nr < 3 + temp_start; nr++) {
+ for (nr = data->temp_start; nr < 3 + data->temp_start; nr++) {
data->temp_ovt[nr] = f71882fg_read8(data,
F71882FG_REG_TEMP_OVT(nr));
data->temp_high[nr] = f71882fg_read8(data,
@@ -886,14 +945,17 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
}
if (data->type != f8000) {
- data->fan_beep = f71882fg_read8(data,
- F71882FG_REG_FAN_BEEP);
- data->temp_beep = f71882fg_read8(data,
- F71882FG_REG_TEMP_BEEP);
data->temp_hyst[0] = f71882fg_read8(data,
F71882FG_REG_TEMP_HYST(0));
data->temp_hyst[1] = f71882fg_read8(data,
F71882FG_REG_TEMP_HYST(1));
+ }
+
+ if (data->type == f71862fg || data->type == f71882fg) {
+ data->fan_beep = f71882fg_read8(data,
+ F71882FG_REG_FAN_BEEP);
+ data->temp_beep = f71882fg_read8(data,
+ F71882FG_REG_TEMP_BEEP);
/* Have to hardcode type, because temp1 is special */
reg = f71882fg_read8(data, F71882FG_REG_TEMP_TYPE);
data->temp_type[2] = (reg & 0x04) ? 2 : 4;
@@ -904,10 +966,10 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
data->temp_type[1] = 6 /* PECI */;
else if ((reg2 & 0x03) == 0x02)
data->temp_type[1] = 5 /* AMDSI */;
- else if (data->type != f8000)
+ else if (data->type == f71862fg || data->type == f71882fg)
data->temp_type[1] = (reg & 0x02) ? 2 : 4;
else
- data->temp_type[1] = 2; /* F8000 only supports BJT */
+ data->temp_type[1] = 2; /* Only supports BJT */
data->pwm_enable = f71882fg_read8(data,
F71882FG_REG_PWM_ENABLE);
@@ -963,9 +1025,8 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
F71882FG_REG_TEMP_STATUS);
data->temp_diode_open = f71882fg_read8(data,
F71882FG_REG_TEMP_DIODE_OPEN);
- for (nr = temp_start; nr < 3 + temp_start; nr++)
- data->temp[nr] = f71882fg_read8(data,
- F71882FG_REG_TEMP(nr));
+ for (nr = data->temp_start; nr < 3 + data->temp_start; nr++)
+ data->temp[nr] = f71882fg_read_temp(data, nr);
data->fan_status = f71882fg_read8(data,
F71882FG_REG_FAN_STATUS);
@@ -1168,8 +1229,24 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
{
struct f71882fg_data *data = f71882fg_update_device(dev);
int nr = to_sensor_dev_attr_2(devattr)->index;
+ int sign, temp;
+
+ if (data->type == f71858fg) {
+ /* TEMP_TABLE_SEL 1 or 3 ? */
+ if (data->temp_config & 1) {
+ sign = data->temp[nr] & 0x0001;
+ temp = (data->temp[nr] >> 5) & 0x7ff;
+ } else {
+ sign = data->temp[nr] & 0x8000;
+ temp = (data->temp[nr] >> 5) & 0x3ff;
+ }
+ temp *= 125;
+ if (sign)
+ temp -= 128000;
+ } else
+ temp = data->temp[nr] * 1000;
- return sprintf(buf, "%d\n", data->temp[nr] * 1000);
+ return sprintf(buf, "%d\n", temp);
}
static ssize_t show_temp_max(struct device *dev, struct device_attribute
@@ -1440,6 +1517,10 @@ static ssize_t store_pwm_enable(struct device *dev, struct device_attribute
int nr = to_sensor_dev_attr_2(devattr)->index;
long val = simple_strtol(buf, NULL, 10);
+ /* Special case for F8000 pwm channel 3 which only does auto mode */
+ if (data->type == f8000 && nr == 2 && val != 2)
+ return -EINVAL;
+
mutex_lock(&data->update_lock);
data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE);
/* Special case for F8000 auto PWM mode / Thermostat mode */
@@ -1458,6 +1539,12 @@ static ssize_t store_pwm_enable(struct device *dev, struct device_attribute
} else {
switch (val) {
case 1:
+ /* The f71858fg does not support manual RPM mode */
+ if (data->type == f71858fg &&
+ ((data->pwm_enable >> (2 * nr)) & 1)) {
+ count = -EINVAL;
+ goto leave;
+ }
data->pwm_enable |= 2 << (2 * nr);
break; /* Manual */
case 2:
@@ -1616,9 +1703,9 @@ static ssize_t show_pwm_auto_point_channel(struct device *dev,
int result;
struct f71882fg_data *data = f71882fg_update_device(dev);
int nr = to_sensor_dev_attr_2(devattr)->index;
- int temp_start = (data->type == f8000) ? 0 : 1;
- result = 1 << ((data->pwm_auto_point_mapping[nr] & 3) - temp_start);
+ result = 1 << ((data->pwm_auto_point_mapping[nr] & 3) -
+ data->temp_start);
return sprintf(buf, "%d\n", result);
}
@@ -1629,7 +1716,6 @@ static ssize_t store_pwm_auto_point_channel(struct device *dev,
{
struct f71882fg_data *data = dev_get_drvdata(dev);
int nr = to_sensor_dev_attr_2(devattr)->index;
- int temp_start = (data->type == f8000) ? 0 : 1;
long val = simple_strtol(buf, NULL, 10);
switch (val) {
@@ -1645,7 +1731,7 @@ static ssize_t store_pwm_auto_point_channel(struct device *dev,
default:
return -EINVAL;
}
- val += temp_start;
+ val += data->temp_start;
mutex_lock(&data->update_lock);
data->pwm_auto_point_mapping[nr] =
f71882fg_read8(data, F71882FG_REG_POINT_MAPPING(nr));
@@ -1721,6 +1807,8 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
data->addr = platform_get_resource(pdev, IORESOURCE_IO, 0)->start;
data->type = sio_data->type;
+ data->temp_start =
+ (data->type == f71858fg || data->type == f8000) ? 0 : 1;
mutex_init(&data->update_lock);
platform_set_drvdata(pdev, data);
@@ -1736,19 +1824,6 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
goto exit_free;
}
- data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE);
- /* If it is a 71862 and the fan / pwm part is enabled sanity check
- the pwm settings */
- if (data->type == f71862fg && (start_reg & 0x02)) {
- if ((data->pwm_enable & 0x15) != 0x15) {
- dev_err(&pdev->dev,
- "Invalid (reserved) pwm settings: 0x%02x\n",
- (unsigned int)data->pwm_enable);
- err = -ENODEV;
- goto exit_free;
- }
- }
-
/* Register sysfs interface files */
err = device_create_file(&pdev->dev, &dev_attr_name);
if (err)
@@ -1756,6 +1831,20 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
if (start_reg & 0x01) {
switch (data->type) {
+ case f71858fg:
+ data->temp_config =
+ f71882fg_read8(data, F71882FG_REG_TEMP_CONFIG);
+ if (data->temp_config & 0x10)
+ /* The f71858fg temperature alarms behave as
+ the f8000 alarms in this mode */
+ err = f71882fg_create_sysfs_files(pdev,
+ f8000_in_temp_attr,
+ ARRAY_SIZE(f8000_in_temp_attr));
+ else
+ err = f71882fg_create_sysfs_files(pdev,
+ f71858fg_in_temp_attr,
+ ARRAY_SIZE(f71858fg_in_temp_attr));
+ break;
case f71882fg:
err = f71882fg_create_sysfs_files(pdev,
f71882fg_in_temp_attr,
@@ -1779,6 +1868,35 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
}
if (start_reg & 0x02) {
+ data->pwm_enable =
+ f71882fg_read8(data, F71882FG_REG_PWM_ENABLE);
+
+ /* Sanity check the pwm settings */
+ switch (data->type) {
+ case f71858fg:
+ err = 0;
+ for (i = 0; i < nr_fans; i++)
+ if (((data->pwm_enable >> (i * 2)) & 3) == 3)
+ err = 1;
+ break;
+ case f71862fg:
+ err = (data->pwm_enable & 0x15) != 0x15;
+ break;
+ case f71882fg:
+ err = 0;
+ break;
+ case f8000:
+ err = data->pwm_enable & 0x20;
+ break;
+ }
+ if (err) {
+ dev_err(&pdev->dev,
+ "Invalid (reserved) pwm settings: 0x%02x\n",
+ (unsigned int)data->pwm_enable);
+ err = -ENODEV;
+ goto exit_unregister_sysfs;
+ }
+
err = f71882fg_create_sysfs_files(pdev, fxxxx_fan_attr,
ARRAY_SIZE(fxxxx_fan_attr));
if (err)
@@ -1794,6 +1912,13 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
err = f71882fg_create_sysfs_files(pdev,
f71882fg_fan_attr,
ARRAY_SIZE(f71882fg_fan_attr));
+ if (err)
+ goto exit_unregister_sysfs;
+ /* fall through! */
+ case f71858fg:
+ err = f71882fg_create_sysfs_files(pdev,
+ f71882fg_f71858fg_fan_attr,
+ ARRAY_SIZE(f71882fg_f71858fg_fan_attr));
break;
case f8000:
err = f71882fg_create_sysfs_files(pdev,
@@ -1878,6 +2003,9 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
devid = force_id ? force_id : superio_inw(sioaddr, SIO_REG_DEVID);
switch (devid) {
+ case SIO_F71858_ID:
+ sio_data->type = f71858fg;
+ break;
case SIO_F71862_ID:
sio_data->type = f71862fg;
break;
@@ -1892,7 +2020,11 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
goto exit;
}
- superio_select(sioaddr, SIO_F71882FG_LD_HWM);
+ if (sio_data->type == f71858fg)
+ superio_select(sioaddr, SIO_F71858FG_LD_HWM);
+ else
+ superio_select(sioaddr, SIO_F71882FG_LD_HWM);
+
if (!(superio_inb(sioaddr, SIO_REG_ENABLE) & 0x01)) {
printk(KERN_WARNING DRVNAME ": Device not activated\n");
goto exit;
diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c
index abca7e9f953..6679854c85b 100644
--- a/drivers/hwmon/hp_accel.c
+++ b/drivers/hwmon/hp_accel.c
@@ -27,9 +27,6 @@
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
-#include <linux/input.h>
-#include <linux/kthread.h>
-#include <linux/semaphore.h>
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/poll.h>
@@ -161,6 +158,7 @@ static struct axis_conversion lis3lv02d_axis_normal = {1, 2, 3};
static struct axis_conversion lis3lv02d_axis_y_inverted = {1, -2, 3};
static struct axis_conversion lis3lv02d_axis_x_inverted = {-1, 2, 3};
static struct axis_conversion lis3lv02d_axis_z_inverted = {1, 2, -3};
+static struct axis_conversion lis3lv02d_axis_xy_swap = {2, 1, 3};
static struct axis_conversion lis3lv02d_axis_xy_rotated_left = {-2, 1, 3};
static struct axis_conversion lis3lv02d_axis_xy_rotated_left_usd = {-2, 1, -3};
static struct axis_conversion lis3lv02d_axis_xy_swap_inverted = {-2, -1, 3};
@@ -194,13 +192,16 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = {
AXIS_DMI_MATCH("NX9420", "HP Compaq nx9420", x_inverted),
AXIS_DMI_MATCH("NW9440", "HP Compaq nw9440", x_inverted),
AXIS_DMI_MATCH("NC2510", "HP Compaq 2510", y_inverted),
+ AXIS_DMI_MATCH("NC2710", "HP Compaq 2710", xy_swap),
AXIS_DMI_MATCH("NC8510", "HP Compaq 8510", xy_swap_inverted),
AXIS_DMI_MATCH("HP2133", "HP 2133", xy_rotated_left),
AXIS_DMI_MATCH("HP2140", "HP 2140", xy_swap_inverted),
AXIS_DMI_MATCH("NC653x", "HP Compaq 653", xy_rotated_left_usd),
AXIS_DMI_MATCH("NC673x", "HP Compaq 673", xy_rotated_left_usd),
AXIS_DMI_MATCH("NC651xx", "HP Compaq 651", xy_rotated_right),
- AXIS_DMI_MATCH("NC671xx", "HP Compaq 671", xy_swap_yz_inverted),
+ AXIS_DMI_MATCH("NC6710x", "HP Compaq 6710", xy_swap_yz_inverted),
+ AXIS_DMI_MATCH("NC6715x", "HP Compaq 6715", y_inverted),
+ AXIS_DMI_MATCH("NC693xx", "HP EliteBook 693", xy_rotated_right),
/* Intel-based HP Pavilion dv5 */
AXIS_DMI_MATCH2("HPDV5_I",
PRODUCT_NAME, "HP Pavilion dv5",
@@ -216,7 +217,6 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = {
{ NULL, }
/* Laptop models without axis info (yet):
* "NC6910" "HP Compaq 6910"
- * HP Compaq 8710x Notebook PC / Mobile Workstation
* "NC2400" "HP Compaq nc2400"
* "NX74x0" "HP Compaq nx74"
* "NX6325" "HP Compaq nx6325"
@@ -324,7 +324,7 @@ static int lis3lv02d_remove(struct acpi_device *device, int type)
flush_work(&hpled_led.work);
led_classdev_unregister(&hpled_led.led_classdev);
- return lis3lv02d_remove_fs();
+ return lis3lv02d_remove_fs(&lis3_dev);
}
@@ -338,13 +338,7 @@ static int lis3lv02d_suspend(struct acpi_device *device, pm_message_t state)
static int lis3lv02d_resume(struct acpi_device *device)
{
- /* put back the device in the right state (ACPI might turn it on) */
- mutex_lock(&lis3_dev.lock);
- if (lis3_dev.usage > 0)
- lis3lv02d_poweron(&lis3_dev);
- else
- lis3lv02d_poweroff(&lis3_dev);
- mutex_unlock(&lis3_dev.lock);
+ lis3lv02d_poweron(&lis3_dev);
return 0;
}
#else
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index e15c3e7b07e..29ea6753f3b 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -18,6 +18,7 @@
#include <linux/hwmon.h>
#include <linux/gfp.h>
#include <linux/spinlock.h>
+#include <linux/pci.h>
#define HWMON_ID_PREFIX "hwmon"
#define HWMON_ID_FORMAT HWMON_ID_PREFIX "%d"
@@ -86,8 +87,36 @@ void hwmon_device_unregister(struct device *dev)
"hwmon_device_unregister() failed: bad class ID!\n");
}
+static void __init hwmon_pci_quirks(void)
+{
+#if defined CONFIG_X86 && defined CONFIG_PCI
+ struct pci_dev *sb;
+ u16 base;
+ u8 enable;
+
+ /* Open access to 0x295-0x296 on MSI MS-7031 */
+ sb = pci_get_device(PCI_VENDOR_ID_ATI, 0x436c, NULL);
+ if (sb &&
+ (sb->subsystem_vendor == 0x1462 && /* MSI */
+ sb->subsystem_device == 0x0031)) { /* MS-7031 */
+
+ pci_read_config_byte(sb, 0x48, &enable);
+ pci_read_config_word(sb, 0x64, &base);
+
+ if (base == 0 && !(enable & BIT(2))) {
+ dev_info(&sb->dev,
+ "Opening wide generic port at 0x295\n");
+ pci_write_config_word(sb, 0x64, 0x295);
+ pci_write_config_byte(sb, 0x48, enable | BIT(2));
+ }
+ }
+#endif
+}
+
static int __init hwmon_init(void)
{
+ hwmon_pci_quirks();
+
hwmon_class = class_create(THIS_MODULE, "hwmon");
if (IS_ERR(hwmon_class)) {
printk(KERN_ERR "hwmon.c: couldn't create sysfs class\n");
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index fe74609a7fe..405d3fb5d76 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -1127,3 +1127,4 @@ MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3650-*");
MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3655-*");
MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3755-*");
MODULE_ALIAS("dmi:bvnIBM:*:pnIBM3850M2/x3950M2-*");
+MODULE_ALIAS("dmi:bvnIBM:*:pnIBMBladeHC10-*");
diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
index 778eb779598..271338bdb6b 100644
--- a/drivers/hwmon/lis3lv02d.c
+++ b/drivers/hwmon/lis3lv02d.c
@@ -27,9 +27,7 @@
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
-#include <linux/input.h>
-#include <linux/kthread.h>
-#include <linux/semaphore.h>
+#include <linux/input-polldev.h>
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/poll.h>
@@ -105,56 +103,39 @@ static void lis3lv02d_get_xyz(struct lis3lv02d *lis3, int *x, int *y, int *z)
{
int position[3];
- position[0] = lis3_dev.read_data(lis3, OUTX);
- position[1] = lis3_dev.read_data(lis3, OUTY);
- position[2] = lis3_dev.read_data(lis3, OUTZ);
+ position[0] = lis3->read_data(lis3, OUTX);
+ position[1] = lis3->read_data(lis3, OUTY);
+ position[2] = lis3->read_data(lis3, OUTZ);
- *x = lis3lv02d_get_axis(lis3_dev.ac.x, position);
- *y = lis3lv02d_get_axis(lis3_dev.ac.y, position);
- *z = lis3lv02d_get_axis(lis3_dev.ac.z, position);
+ *x = lis3lv02d_get_axis(lis3->ac.x, position);
+ *y = lis3lv02d_get_axis(lis3->ac.y, position);
+ *z = lis3lv02d_get_axis(lis3->ac.z, position);
}
void lis3lv02d_poweroff(struct lis3lv02d *lis3)
{
- lis3_dev.is_on = 0;
+ /* disable X,Y,Z axis and power down */
+ lis3->write(lis3, CTRL_REG1, 0x00);
}
EXPORT_SYMBOL_GPL(lis3lv02d_poweroff);
void lis3lv02d_poweron(struct lis3lv02d *lis3)
{
- lis3_dev.is_on = 1;
- lis3_dev.init(lis3);
-}
-EXPORT_SYMBOL_GPL(lis3lv02d_poweron);
+ u8 reg;
-/*
- * To be called before starting to use the device. It makes sure that the
- * device will always be on until a call to lis3lv02d_decrease_use(). Not to be
- * used from interrupt context.
- */
-static void lis3lv02d_increase_use(struct lis3lv02d *dev)
-{
- mutex_lock(&dev->lock);
- dev->usage++;
- if (dev->usage == 1) {
- if (!dev->is_on)
- lis3lv02d_poweron(dev);
- }
- mutex_unlock(&dev->lock);
-}
+ lis3->init(lis3);
-/*
- * To be called whenever a usage of the device is stopped.
- * It will make sure to turn off the device when there is not usage.
- */
-static void lis3lv02d_decrease_use(struct lis3lv02d *dev)
-{
- mutex_lock(&dev->lock);
- dev->usage--;
- if (dev->usage == 0)
- lis3lv02d_poweroff(dev);
- mutex_unlock(&dev->lock);
+ /*
+ * Common configuration
+ * BDU: LSB and MSB values are not updated until both have been read.
+ * So the value read will always be correct.
+ */
+ lis3->read(lis3, CTRL_REG2, &reg);
+ reg |= CTRL2_BDU;
+ lis3->write(lis3, CTRL_REG2, reg);
}
+EXPORT_SYMBOL_GPL(lis3lv02d_poweron);
+
static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
{
@@ -198,15 +179,12 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
printk(KERN_ERR DRIVER_NAME ": IRQ%d allocation failed\n", lis3_dev.irq);
return -EBUSY;
}
- lis3lv02d_increase_use(&lis3_dev);
- printk("lis3: registered interrupt %d\n", lis3_dev.irq);
return 0;
}
static int lis3lv02d_misc_release(struct inode *inode, struct file *file)
{
fasync_helper(-1, file, 0, &lis3_dev.async_queue);
- lis3lv02d_decrease_use(&lis3_dev);
free_irq(lis3_dev.irq, &lis3_dev);
clear_bit(0, &lis3_dev.misc_opened); /* release the device */
return 0;
@@ -290,46 +268,16 @@ static struct miscdevice lis3lv02d_misc_device = {
.fops = &lis3lv02d_misc_fops,
};
-/**
- * lis3lv02d_joystick_kthread - Kthread polling function
- * @data: unused - here to conform to threadfn prototype
- */
-static int lis3lv02d_joystick_kthread(void *data)
+static void lis3lv02d_joystick_poll(struct input_polled_dev *pidev)
{
int x, y, z;
- while (!kthread_should_stop()) {
- lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z);
- input_report_abs(lis3_dev.idev, ABS_X, x - lis3_dev.xcalib);
- input_report_abs(lis3_dev.idev, ABS_Y, y - lis3_dev.ycalib);
- input_report_abs(lis3_dev.idev, ABS_Z, z - lis3_dev.zcalib);
-
- input_sync(lis3_dev.idev);
-
- try_to_freeze();
- msleep_interruptible(MDPS_POLL_INTERVAL);
- }
-
- return 0;
-}
-
-static int lis3lv02d_joystick_open(struct input_dev *input)
-{
- lis3lv02d_increase_use(&lis3_dev);
- lis3_dev.kthread = kthread_run(lis3lv02d_joystick_kthread, NULL, "klis3lv02d");
- if (IS_ERR(lis3_dev.kthread)) {
- lis3lv02d_decrease_use(&lis3_dev);
- return PTR_ERR(lis3_dev.kthread);
- }
-
- return 0;
+ lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z);
+ input_report_abs(pidev->input, ABS_X, x - lis3_dev.xcalib);
+ input_report_abs(pidev->input, ABS_Y, y - lis3_dev.ycalib);
+ input_report_abs(pidev->input, ABS_Z, z - lis3_dev.zcalib);
}
-static void lis3lv02d_joystick_close(struct input_dev *input)
-{
- kthread_stop(lis3_dev.kthread);
- lis3lv02d_decrease_use(&lis3_dev);
-}
static inline void lis3lv02d_calibrate_joystick(void)
{
@@ -339,33 +287,36 @@ static inline void lis3lv02d_calibrate_joystick(void)
int lis3lv02d_joystick_enable(void)
{
+ struct input_dev *input_dev;
int err;
if (lis3_dev.idev)
return -EINVAL;
- lis3_dev.idev = input_allocate_device();
+ lis3_dev.idev = input_allocate_polled_device();
if (!lis3_dev.idev)
return -ENOMEM;
+ lis3_dev.idev->poll = lis3lv02d_joystick_poll;
+ lis3_dev.idev->poll_interval = MDPS_POLL_INTERVAL;
+ input_dev = lis3_dev.idev->input;
+
lis3lv02d_calibrate_joystick();
- lis3_dev.idev->name = "ST LIS3LV02DL Accelerometer";
- lis3_dev.idev->phys = DRIVER_NAME "/input0";
- lis3_dev.idev->id.bustype = BUS_HOST;
- lis3_dev.idev->id.vendor = 0;
- lis3_dev.idev->dev.parent = &lis3_dev.pdev->dev;
- lis3_dev.idev->open = lis3lv02d_joystick_open;
- lis3_dev.idev->close = lis3lv02d_joystick_close;
+ input_dev->name = "ST LIS3LV02DL Accelerometer";
+ input_dev->phys = DRIVER_NAME "/input0";
+ input_dev->id.bustype = BUS_HOST;
+ input_dev->id.vendor = 0;
+ input_dev->dev.parent = &lis3_dev.pdev->dev;
- set_bit(EV_ABS, lis3_dev.idev->evbit);
- input_set_abs_params(lis3_dev.idev, ABS_X, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3);
- input_set_abs_params(lis3_dev.idev, ABS_Y, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3);
- input_set_abs_params(lis3_dev.idev, ABS_Z, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3);
+ set_bit(EV_ABS, input_dev->evbit);
+ input_set_abs_params(input_dev, ABS_X, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3);
+ input_set_abs_params(input_dev, ABS_Y, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3);
+ input_set_abs_params(input_dev, ABS_Z, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3);
- err = input_register_device(lis3_dev.idev);
+ err = input_register_polled_device(lis3_dev.idev);
if (err) {
- input_free_device(lis3_dev.idev);
+ input_free_polled_device(lis3_dev.idev);
lis3_dev.idev = NULL;
}
@@ -378,8 +329,9 @@ void lis3lv02d_joystick_disable(void)
if (!lis3_dev.idev)
return;
- misc_deregister(&lis3lv02d_misc_device);
- input_unregister_device(lis3_dev.idev);
+ if (lis3_dev.irq)
+ misc_deregister(&lis3lv02d_misc_device);
+ input_unregister_polled_device(lis3_dev.idev);
lis3_dev.idev = NULL;
}
EXPORT_SYMBOL_GPL(lis3lv02d_joystick_disable);
@@ -390,9 +342,7 @@ static ssize_t lis3lv02d_position_show(struct device *dev,
{
int x, y, z;
- lis3lv02d_increase_use(&lis3_dev);
lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z);
- lis3lv02d_decrease_use(&lis3_dev);
return sprintf(buf, "(%d,%d,%d)\n", x, y, z);
}
@@ -406,9 +356,7 @@ static ssize_t lis3lv02d_calibrate_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- lis3lv02d_increase_use(&lis3_dev);
lis3lv02d_calibrate_joystick();
- lis3lv02d_decrease_use(&lis3_dev);
return count;
}
@@ -420,9 +368,7 @@ static ssize_t lis3lv02d_rate_show(struct device *dev,
u8 ctrl;
int val;
- lis3lv02d_increase_use(&lis3_dev);
lis3_dev.read(&lis3_dev, CTRL_REG1, &ctrl);
- lis3lv02d_decrease_use(&lis3_dev);
val = (ctrl & (CTRL1_DF0 | CTRL1_DF1)) >> 4;
return sprintf(buf, "%d\n", lis3lv02dl_df_val[val]);
}
@@ -446,17 +392,17 @@ static struct attribute_group lis3lv02d_attribute_group = {
static int lis3lv02d_add_fs(struct lis3lv02d *lis3)
{
- lis3_dev.pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0);
- if (IS_ERR(lis3_dev.pdev))
- return PTR_ERR(lis3_dev.pdev);
+ lis3->pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0);
+ if (IS_ERR(lis3->pdev))
+ return PTR_ERR(lis3->pdev);
- return sysfs_create_group(&lis3_dev.pdev->dev.kobj, &lis3lv02d_attribute_group);
+ return sysfs_create_group(&lis3->pdev->dev.kobj, &lis3lv02d_attribute_group);
}
-int lis3lv02d_remove_fs(void)
+int lis3lv02d_remove_fs(struct lis3lv02d *lis3)
{
- sysfs_remove_group(&lis3_dev.pdev->dev.kobj, &lis3lv02d_attribute_group);
- platform_device_unregister(lis3_dev.pdev);
+ sysfs_remove_group(&lis3->pdev->dev.kobj, &lis3lv02d_attribute_group);
+ platform_device_unregister(lis3->pdev);
return 0;
}
EXPORT_SYMBOL_GPL(lis3lv02d_remove_fs);
@@ -482,18 +428,35 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
break;
default:
printk(KERN_ERR DRIVER_NAME
- ": unknown sensor type 0x%X\n", lis3_dev.whoami);
+ ": unknown sensor type 0x%X\n", dev->whoami);
return -EINVAL;
}
- mutex_init(&dev->lock);
lis3lv02d_add_fs(dev);
- lis3lv02d_increase_use(dev);
+ lis3lv02d_poweron(dev);
if (lis3lv02d_joystick_enable())
printk(KERN_ERR DRIVER_NAME ": joystick initialization failed\n");
- printk("lis3_init_device: irq %d\n", dev->irq);
+ /* passing in platform specific data is purely optional and only
+ * used by the SPI transport layer at the moment */
+ if (dev->pdata) {
+ struct lis3lv02d_platform_data *p = dev->pdata;
+
+ if (p->click_flags && (dev->whoami == LIS_SINGLE_ID)) {
+ dev->write(dev, CLICK_CFG, p->click_flags);
+ dev->write(dev, CLICK_TIMELIMIT, p->click_time_limit);
+ dev->write(dev, CLICK_LATENCY, p->click_latency);
+ dev->write(dev, CLICK_WINDOW, p->click_window);
+ dev->write(dev, CLICK_THSZ, p->click_thresh_z & 0xf);
+ dev->write(dev, CLICK_THSY_X,
+ (p->click_thresh_x & 0xf) |
+ (p->click_thresh_y << 4));
+ }
+
+ if (p->irq_cfg)
+ dev->write(dev, CTRL_REG3, p->irq_cfg);
+ }
/* bail if we did not get an IRQ from the bus layer */
if (!dev->irq) {
@@ -502,11 +465,9 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
goto out;
}
- printk("lis3: registering device\n");
if (misc_register(&lis3lv02d_misc_device))
printk(KERN_ERR DRIVER_NAME ": misc_register failed\n");
out:
- lis3lv02d_decrease_use(dev);
return 0;
}
EXPORT_SYMBOL_GPL(lis3lv02d_init_device);
diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
index 745ec96806d..e320e2f511f 100644
--- a/drivers/hwmon/lis3lv02d.h
+++ b/drivers/hwmon/lis3lv02d.h
@@ -18,6 +18,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/platform_device.h>
+#include <linux/input-polldev.h>
/*
* The actual chip is STMicroelectronics LIS3LV02DL or LIS3LV02DQ that seems to
@@ -27,12 +29,14 @@
* They can also be connected via I²C.
*/
+#include <linux/lis3lv02d.h>
+
/* 2-byte registers */
#define LIS_DOUBLE_ID 0x3A /* LIS3LV02D[LQ] */
/* 1-byte registers */
#define LIS_SINGLE_ID 0x3B /* LIS[32]02DL and others */
-enum lis3lv02d_reg {
+enum lis3_reg {
WHO_AM_I = 0x0F,
OFFSET_X = 0x16,
OFFSET_Y = 0x17,
@@ -60,6 +64,19 @@ enum lis3lv02d_reg {
FF_WU_THS_L = 0x34,
FF_WU_THS_H = 0x35,
FF_WU_DURATION = 0x36,
+};
+
+enum lis302d_reg {
+ CLICK_CFG = 0x38,
+ CLICK_SRC = 0x39,
+ CLICK_THSY_X = 0x3B,
+ CLICK_THSZ = 0x3C,
+ CLICK_TIMELIMIT = 0x3D,
+ CLICK_LATENCY = 0x3E,
+ CLICK_WINDOW = 0x3F,
+};
+
+enum lis3lv02d_reg {
DD_CFG = 0x38,
DD_SRC = 0x39,
DD_ACK = 0x3A,
@@ -169,22 +186,20 @@ struct lis3lv02d {
s16 (*read_data) (struct lis3lv02d *lis3, int reg);
int mdps_max_val;
- struct input_dev *idev; /* input device */
- struct task_struct *kthread; /* kthread for input */
- struct mutex lock;
+ struct input_polled_dev *idev; /* input device */
struct platform_device *pdev; /* platform device */
atomic_t count; /* interrupt count after last read */
int xcalib; /* calibrated null value for x */
int ycalib; /* calibrated null value for y */
int zcalib; /* calibrated null value for z */
- unsigned char is_on; /* whether the device is on or off */
- unsigned char usage; /* usage counter */
struct axis_conversion ac; /* hw -> logical axis */
u32 irq; /* IRQ number */
struct fasync_struct *async_queue; /* queue for the misc device */
wait_queue_head_t misc_wait; /* Wait queue for the misc device */
unsigned long misc_opened; /* bit0: whether the device is open */
+
+ struct lis3lv02d_platform_data *pdata; /* for passing board config */
};
int lis3lv02d_init_device(struct lis3lv02d *lis3);
@@ -192,6 +207,6 @@ int lis3lv02d_joystick_enable(void);
void lis3lv02d_joystick_disable(void);
void lis3lv02d_poweroff(struct lis3lv02d *lis3);
void lis3lv02d_poweron(struct lis3lv02d *lis3);
-int lis3lv02d_remove_fs(void);
+int lis3lv02d_remove_fs(struct lis3lv02d *lis3);
extern struct lis3lv02d lis3_dev;
diff --git a/drivers/hwmon/lis3lv02d_spi.c b/drivers/hwmon/lis3lv02d_spi.c
index 07ae74b0e19..3827ff04485 100644
--- a/drivers/hwmon/lis3lv02d_spi.c
+++ b/drivers/hwmon/lis3lv02d_spi.c
@@ -72,6 +72,7 @@ static int __devinit lis302dl_spi_probe(struct spi_device *spi)
lis3_dev.write = lis3_spi_write;
lis3_dev.irq = spi->irq;
lis3_dev.ac = lis3lv02d_axis_normal;
+ lis3_dev.pdata = spi->dev.platform_data;
spi_set_drvdata(spi, &lis3_dev);
ret = lis3lv02d_init_device(&lis3_dev);
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index b5e3b285169..a1787fdf5b9 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -182,7 +182,7 @@ static struct platform_driver lm78_isa_driver = {
.name = "lm78",
},
.probe = lm78_isa_probe,
- .remove = lm78_isa_remove,
+ .remove = __devexit_p(lm78_isa_remove),
};
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index f27af6a9da4..86142a85823 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -12,7 +12,7 @@
* also work with the MAX6651. It does not distinguish max6650 and max6651
* chips.
*
- * Tha datasheet was last seen at:
+ * The datasheet was last seen at:
*
* http://pdfserv.maxim-ic.com/en/ds/MAX6650-MAX6651.pdf
*
@@ -98,6 +98,16 @@ I2C_CLIENT_INSMOD_1(max6650);
#define MAX6650_CFG_MODE_OPEN_LOOP 0x30
#define MAX6650_COUNT_MASK 0x03
+/*
+ * Alarm status register bits
+ */
+
+#define MAX6650_ALRM_MAX 0x01
+#define MAX6650_ALRM_MIN 0x02
+#define MAX6650_ALRM_TACH 0x04
+#define MAX6650_ALRM_GPIO1 0x08
+#define MAX6650_ALRM_GPIO2 0x10
+
/* Minimum and maximum values of the FAN-RPM */
#define FAN_RPM_MIN 240
#define FAN_RPM_MAX 30000
@@ -151,6 +161,7 @@ struct max6650_data
u8 tach[4];
u8 count;
u8 dac;
+ u8 alarm;
};
static ssize_t get_fan(struct device *dev, struct device_attribute *devattr,
@@ -418,6 +429,33 @@ static ssize_t set_div(struct device *dev, struct device_attribute *devattr,
return count;
}
+/*
+ * Get alarm stati:
+ * Possible values:
+ * 0 = no alarm
+ * 1 = alarm
+ */
+
+static ssize_t get_alarm(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct max6650_data *data = max6650_update_device(dev);
+ struct i2c_client *client = to_i2c_client(dev);
+ int alarm = 0;
+
+ if (data->alarm & attr->index) {
+ mutex_lock(&data->update_lock);
+ alarm = 1;
+ data->alarm &= ~attr->index;
+ data->alarm |= i2c_smbus_read_byte_data(client,
+ MAX6650_REG_ALARM);
+ mutex_unlock(&data->update_lock);
+ }
+
+ return sprintf(buf, "%d\n", alarm);
+}
+
static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, get_fan, NULL, 0);
static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, get_fan, NULL, 1);
static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, get_fan, NULL, 2);
@@ -426,7 +464,41 @@ static DEVICE_ATTR(fan1_target, S_IWUSR | S_IRUGO, get_target, set_target);
static DEVICE_ATTR(fan1_div, S_IWUSR | S_IRUGO, get_div, set_div);
static DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, get_enable, set_enable);
static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, get_pwm, set_pwm);
+static SENSOR_DEVICE_ATTR(fan1_max_alarm, S_IRUGO, get_alarm, NULL,
+ MAX6650_ALRM_MAX);
+static SENSOR_DEVICE_ATTR(fan1_min_alarm, S_IRUGO, get_alarm, NULL,
+ MAX6650_ALRM_MIN);
+static SENSOR_DEVICE_ATTR(fan1_fault, S_IRUGO, get_alarm, NULL,
+ MAX6650_ALRM_TACH);
+static SENSOR_DEVICE_ATTR(gpio1_alarm, S_IRUGO, get_alarm, NULL,
+ MAX6650_ALRM_GPIO1);
+static SENSOR_DEVICE_ATTR(gpio2_alarm, S_IRUGO, get_alarm, NULL,
+ MAX6650_ALRM_GPIO2);
+
+static mode_t max6650_attrs_visible(struct kobject *kobj, struct attribute *a,
+ int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct i2c_client *client = to_i2c_client(dev);
+ u8 alarm_en = i2c_smbus_read_byte_data(client, MAX6650_REG_ALARM_EN);
+ struct device_attribute *devattr;
+ /*
+ * Hide the alarms that have not been enabled by the firmware
+ */
+
+ devattr = container_of(a, struct device_attribute, attr);
+ if (devattr == &sensor_dev_attr_fan1_max_alarm.dev_attr
+ || devattr == &sensor_dev_attr_fan1_min_alarm.dev_attr
+ || devattr == &sensor_dev_attr_fan1_fault.dev_attr
+ || devattr == &sensor_dev_attr_gpio1_alarm.dev_attr
+ || devattr == &sensor_dev_attr_gpio2_alarm.dev_attr) {
+ if (!(alarm_en & to_sensor_dev_attr(devattr)->index))
+ return 0;
+ }
+
+ return a->mode;
+}
static struct attribute *max6650_attrs[] = {
&sensor_dev_attr_fan1_input.dev_attr.attr,
@@ -437,11 +509,17 @@ static struct attribute *max6650_attrs[] = {
&dev_attr_fan1_div.attr,
&dev_attr_pwm1_enable.attr,
&dev_attr_pwm1.attr,
+ &sensor_dev_attr_fan1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_fan1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_fan1_fault.dev_attr.attr,
+ &sensor_dev_attr_gpio1_alarm.dev_attr.attr,
+ &sensor_dev_attr_gpio2_alarm.dev_attr.attr,
NULL
};
static struct attribute_group max6650_attr_grp = {
.attrs = max6650_attrs,
+ .is_visible = max6650_attrs_visible,
};
/*
@@ -659,6 +737,12 @@ static struct max6650_data *max6650_update_device(struct device *dev)
MAX6650_REG_COUNT);
data->dac = i2c_smbus_read_byte_data(client, MAX6650_REG_DAC);
+ /* Alarms are cleared on read in case the condition that
+ * caused the alarm is removed. Keep the value latched here
+ * for providing the register through different alarm files. */
+ data->alarm |= i2c_smbus_read_byte_data(client,
+ MAX6650_REG_ALARM);
+
data->last_updated = jiffies;
data->valid = 1;
}
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index 6cbdc2fea73..56cd6004da3 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -627,35 +627,35 @@ static struct platform_driver sht_drivers[] = {
.owner = THIS_MODULE,
},
.probe = sht15_probe,
- .remove = sht15_remove,
+ .remove = __devexit_p(sht15_remove),
}, {
.driver = {
.name = "sht11",
.owner = THIS_MODULE,
},
.probe = sht15_probe,
- .remove = sht15_remove,
+ .remove = __devexit_p(sht15_remove),
}, {
.driver = {
.name = "sht15",
.owner = THIS_MODULE,
},
.probe = sht15_probe,
- .remove = sht15_remove,
+ .remove = __devexit_p(sht15_remove),
}, {
.driver = {
.name = "sht71",
.owner = THIS_MODULE,
},
.probe = sht15_probe,
- .remove = sht15_remove,
+ .remove = __devexit_p(sht15_remove),
}, {
.driver = {
.name = "sht75",
.owner = THIS_MODULE,
},
.probe = sht15_probe,
- .remove = sht15_remove,
+ .remove = __devexit_p(sht15_remove),
},
};
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
new file mode 100644
index 00000000000..7b34f2cd08b
--- /dev/null
+++ b/drivers/hwmon/tmp401.c
@@ -0,0 +1,690 @@
+/* tmp401.c
+ *
+ * Copyright (C) 2007,2008 Hans de Goede <hdegoede@redhat.com>
+ * Preliminary tmp411 support by:
+ * Gabriel Konat, Sander Leget, Wouter Willems
+ * Copyright (C) 2009 Andre Prendel <andre.prendel@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * Driver for the Texas Instruments TMP401 SMBUS temperature sensor IC.
+ *
+ * Note this IC is in some aspect similar to the LM90, but it has quite a
+ * few differences too, for example the local temp has a higher resolution
+ * and thus has 16 bits registers for its value and limit instead of 8 bits.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/sysfs.h>
+
+/* Addresses to scan */
+static const unsigned short normal_i2c[] = { 0x4c, I2C_CLIENT_END };
+
+/* Insmod parameters */
+I2C_CLIENT_INSMOD_2(tmp401, tmp411);
+
+/*
+ * The TMP401 registers, note some registers have different addresses for
+ * reading and writing
+ */
+#define TMP401_STATUS 0x02
+#define TMP401_CONFIG_READ 0x03
+#define TMP401_CONFIG_WRITE 0x09
+#define TMP401_CONVERSION_RATE_READ 0x04
+#define TMP401_CONVERSION_RATE_WRITE 0x0A
+#define TMP401_TEMP_CRIT_HYST 0x21
+#define TMP401_CONSECUTIVE_ALERT 0x22
+#define TMP401_MANUFACTURER_ID_REG 0xFE
+#define TMP401_DEVICE_ID_REG 0xFF
+#define TMP411_N_FACTOR_REG 0x18
+
+static const u8 TMP401_TEMP_MSB[2] = { 0x00, 0x01 };
+static const u8 TMP401_TEMP_LSB[2] = { 0x15, 0x10 };
+static const u8 TMP401_TEMP_LOW_LIMIT_MSB_READ[2] = { 0x06, 0x08 };
+static const u8 TMP401_TEMP_LOW_LIMIT_MSB_WRITE[2] = { 0x0C, 0x0E };
+static const u8 TMP401_TEMP_LOW_LIMIT_LSB[2] = { 0x17, 0x14 };
+static const u8 TMP401_TEMP_HIGH_LIMIT_MSB_READ[2] = { 0x05, 0x07 };
+static const u8 TMP401_TEMP_HIGH_LIMIT_MSB_WRITE[2] = { 0x0B, 0x0D };
+static const u8 TMP401_TEMP_HIGH_LIMIT_LSB[2] = { 0x16, 0x13 };
+/* These are called the THERM limit / hysteresis / mask in the datasheet */
+static const u8 TMP401_TEMP_CRIT_LIMIT[2] = { 0x20, 0x19 };
+
+static const u8 TMP411_TEMP_LOWEST_MSB[2] = { 0x30, 0x34 };
+static const u8 TMP411_TEMP_LOWEST_LSB[2] = { 0x31, 0x35 };
+static const u8 TMP411_TEMP_HIGHEST_MSB[2] = { 0x32, 0x36 };
+static const u8 TMP411_TEMP_HIGHEST_LSB[2] = { 0x33, 0x37 };
+
+/* Flags */
+#define TMP401_CONFIG_RANGE 0x04
+#define TMP401_CONFIG_SHUTDOWN 0x40
+#define TMP401_STATUS_LOCAL_CRIT 0x01
+#define TMP401_STATUS_REMOTE_CRIT 0x02
+#define TMP401_STATUS_REMOTE_OPEN 0x04
+#define TMP401_STATUS_REMOTE_LOW 0x08
+#define TMP401_STATUS_REMOTE_HIGH 0x10
+#define TMP401_STATUS_LOCAL_LOW 0x20
+#define TMP401_STATUS_LOCAL_HIGH 0x40
+
+/* Manufacturer / Device ID's */
+#define TMP401_MANUFACTURER_ID 0x55
+#define TMP401_DEVICE_ID 0x11
+#define TMP411_DEVICE_ID 0x12
+
+/*
+ * Functions declarations
+ */
+
+static int tmp401_probe(struct i2c_client *client,
+ const struct i2c_device_id *id);
+static int tmp401_detect(struct i2c_client *client, int kind,
+ struct i2c_board_info *info);
+static int tmp401_remove(struct i2c_client *client);
+static struct tmp401_data *tmp401_update_device(struct device *dev);
+
+/*
+ * Driver data (common to all clients)
+ */
+
+static const struct i2c_device_id tmp401_id[] = {
+ { "tmp401", tmp401 },
+ { "tmp411", tmp411 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tmp401_id);
+
+static struct i2c_driver tmp401_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "tmp401",
+ },
+ .probe = tmp401_probe,
+ .remove = tmp401_remove,
+ .id_table = tmp401_id,
+ .detect = tmp401_detect,
+ .address_data = &addr_data,
+};
+
+/*
+ * Client data (each client gets its own)
+ */
+
+struct tmp401_data {
+ struct device *hwmon_dev;
+ struct mutex update_lock;
+ char valid; /* zero until following fields are valid */
+ unsigned long last_updated; /* in jiffies */
+ int kind;
+
+ /* register values */
+ u8 status;
+ u8 config;
+ u16 temp[2];
+ u16 temp_low[2];
+ u16 temp_high[2];
+ u8 temp_crit[2];
+ u8 temp_crit_hyst;
+ u16 temp_lowest[2];
+ u16 temp_highest[2];
+};
+
+/*
+ * Sysfs attr show / store functions
+ */
+
+static int tmp401_register_to_temp(u16 reg, u8 config)
+{
+ int temp = reg;
+
+ if (config & TMP401_CONFIG_RANGE)
+ temp -= 64 * 256;
+
+ return (temp * 625 + 80) / 160;
+}
+
+static u16 tmp401_temp_to_register(long temp, u8 config)
+{
+ if (config & TMP401_CONFIG_RANGE) {
+ temp = SENSORS_LIMIT(temp, -64000, 191000);
+ temp += 64000;
+ } else
+ temp = SENSORS_LIMIT(temp, 0, 127000);
+
+ return (temp * 160 + 312) / 625;
+}
+
+static int tmp401_crit_register_to_temp(u8 reg, u8 config)
+{
+ int temp = reg;
+
+ if (config & TMP401_CONFIG_RANGE)
+ temp -= 64;
+
+ return temp * 1000;
+}
+
+static u8 tmp401_crit_temp_to_register(long temp, u8 config)
+{
+ if (config & TMP401_CONFIG_RANGE) {
+ temp = SENSORS_LIMIT(temp, -64000, 191000);
+ temp += 64000;
+ } else
+ temp = SENSORS_LIMIT(temp, 0, 127000);
+
+ return (temp + 500) / 1000;
+}
+
+static ssize_t show_temp_value(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ int index = to_sensor_dev_attr(devattr)->index;
+ struct tmp401_data *data = tmp401_update_device(dev);
+
+ return sprintf(buf, "%d\n",
+ tmp401_register_to_temp(data->temp[index], data->config));
+}
+
+static ssize_t show_temp_min(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ int index = to_sensor_dev_attr(devattr)->index;
+ struct tmp401_data *data = tmp401_update_device(dev);
+
+ return sprintf(buf, "%d\n",
+ tmp401_register_to_temp(data->temp_low[index], data->config));
+}
+
+static ssize_t show_temp_max(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ int index = to_sensor_dev_attr(devattr)->index;
+ struct tmp401_data *data = tmp401_update_device(dev);
+
+ return sprintf(buf, "%d\n",
+ tmp401_register_to_temp(data->temp_high[index], data->config));
+}
+
+static ssize_t show_temp_crit(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ int index = to_sensor_dev_attr(devattr)->index;
+ struct tmp401_data *data = tmp401_update_device(dev);
+
+ return sprintf(buf, "%d\n",
+ tmp401_crit_register_to_temp(data->temp_crit[index],
+ data->config));
+}
+
+static ssize_t show_temp_crit_hyst(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ int temp, index = to_sensor_dev_attr(devattr)->index;
+ struct tmp401_data *data = tmp401_update_device(dev);
+
+ mutex_lock(&data->update_lock);
+ temp = tmp401_crit_register_to_temp(data->temp_crit[index],
+ data->config);
+ temp -= data->temp_crit_hyst * 1000;
+ mutex_unlock(&data->update_lock);
+
+ return sprintf(buf, "%d\n", temp);
+}
+
+static ssize_t show_temp_lowest(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ int index = to_sensor_dev_attr(devattr)->index;
+ struct tmp401_data *data = tmp401_update_device(dev);
+
+ return sprintf(buf, "%d\n",
+ tmp401_register_to_temp(data->temp_lowest[index],
+ data->config));
+}
+
+static ssize_t show_temp_highest(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ int index = to_sensor_dev_attr(devattr)->index;
+ struct tmp401_data *data = tmp401_update_device(dev);
+
+ return sprintf(buf, "%d\n",
+ tmp401_register_to_temp(data->temp_highest[index],
+ data->config));
+}
+
+static ssize_t show_status(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ int mask = to_sensor_dev_attr(devattr)->index;
+ struct tmp401_data *data = tmp401_update_device(dev);
+
+ if (data->status & mask)
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
+static ssize_t store_temp_min(struct device *dev, struct device_attribute
+ *devattr, const char *buf, size_t count)
+{
+ int index = to_sensor_dev_attr(devattr)->index;
+ struct tmp401_data *data = tmp401_update_device(dev);
+ long val;
+ u16 reg;
+
+ if (strict_strtol(buf, 10, &val))
+ return -EINVAL;
+
+ reg = tmp401_temp_to_register(val, data->config);
+
+ mutex_lock(&data->update_lock);
+
+ i2c_smbus_write_byte_data(to_i2c_client(dev),
+ TMP401_TEMP_LOW_LIMIT_MSB_WRITE[index], reg >> 8);
+ i2c_smbus_write_byte_data(to_i2c_client(dev),
+ TMP401_TEMP_LOW_LIMIT_LSB[index], reg & 0xFF);
+
+ data->temp_low[index] = reg;
+
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t store_temp_max(struct device *dev, struct device_attribute
+ *devattr, const char *buf, size_t count)
+{
+ int index = to_sensor_dev_attr(devattr)->index;
+ struct tmp401_data *data = tmp401_update_device(dev);
+ long val;
+ u16 reg;
+
+ if (strict_strtol(buf, 10, &val))
+ return -EINVAL;
+
+ reg = tmp401_temp_to_register(val, data->config);
+
+ mutex_lock(&data->update_lock);
+
+ i2c_smbus_write_byte_data(to_i2c_client(dev),
+ TMP401_TEMP_HIGH_LIMIT_MSB_WRITE[index], reg >> 8);
+ i2c_smbus_write_byte_data(to_i2c_client(dev),
+ TMP401_TEMP_HIGH_LIMIT_LSB[index], reg & 0xFF);
+
+ data->temp_high[index] = reg;
+
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t store_temp_crit(struct device *dev, struct device_attribute
+ *devattr, const char *buf, size_t count)
+{
+ int index = to_sensor_dev_attr(devattr)->index;
+ struct tmp401_data *data = tmp401_update_device(dev);
+ long val;
+ u8 reg;
+
+ if (strict_strtol(buf, 10, &val))
+ return -EINVAL;
+
+ reg = tmp401_crit_temp_to_register(val, data->config);
+
+ mutex_lock(&data->update_lock);
+
+ i2c_smbus_write_byte_data(to_i2c_client(dev),
+ TMP401_TEMP_CRIT_LIMIT[index], reg);
+
+ data->temp_crit[index] = reg;
+
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t store_temp_crit_hyst(struct device *dev, struct device_attribute
+ *devattr, const char *buf, size_t count)
+{
+ int temp, index = to_sensor_dev_attr(devattr)->index;
+ struct tmp401_data *data = tmp401_update_device(dev);
+ long val;
+ u8 reg;
+
+ if (strict_strtol(buf, 10, &val))
+ return -EINVAL;
+
+ if (data->config & TMP401_CONFIG_RANGE)
+ val = SENSORS_LIMIT(val, -64000, 191000);
+ else
+ val = SENSORS_LIMIT(val, 0, 127000);
+
+ mutex_lock(&data->update_lock);
+ temp = tmp401_crit_register_to_temp(data->temp_crit[index],
+ data->config);
+ val = SENSORS_LIMIT(val, temp - 255000, temp);
+ reg = ((temp - val) + 500) / 1000;
+
+ i2c_smbus_write_byte_data(to_i2c_client(dev),
+ TMP401_TEMP_CRIT_HYST, reg);
+
+ data->temp_crit_hyst = reg;
+
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+/*
+ * Resets the historical measurements of minimum and maximum temperatures.
+ * This is done by writing any value to any of the minimum/maximum registers
+ * (0x30-0x37).
+ */
+static ssize_t reset_temp_history(struct device *dev,
+ struct device_attribute *devattr, const char *buf, size_t count)
+{
+ long val;
+
+ if (strict_strtol(buf, 10, &val))
+ return -EINVAL;
+
+ if (val != 1) {
+ dev_err(dev, "temp_reset_history value %ld not"
+ " supported. Use 1 to reset the history!\n", val);
+ return -EINVAL;
+ }
+ i2c_smbus_write_byte_data(to_i2c_client(dev),
+ TMP411_TEMP_LOWEST_MSB[0], val);
+
+ return count;
+}
+
+static struct sensor_device_attribute tmp401_attr[] = {
+ SENSOR_ATTR(temp1_input, 0444, show_temp_value, NULL, 0),
+ SENSOR_ATTR(temp1_min, 0644, show_temp_min, store_temp_min, 0),
+ SENSOR_ATTR(temp1_max, 0644, show_temp_max, store_temp_max, 0),
+ SENSOR_ATTR(temp1_crit, 0644, show_temp_crit, store_temp_crit, 0),
+ SENSOR_ATTR(temp1_crit_hyst, 0644, show_temp_crit_hyst,
+ store_temp_crit_hyst, 0),
+ SENSOR_ATTR(temp1_min_alarm, 0444, show_status, NULL,
+ TMP401_STATUS_LOCAL_LOW),
+ SENSOR_ATTR(temp1_max_alarm, 0444, show_status, NULL,
+ TMP401_STATUS_LOCAL_HIGH),
+ SENSOR_ATTR(temp1_crit_alarm, 0444, show_status, NULL,
+ TMP401_STATUS_LOCAL_CRIT),
+ SENSOR_ATTR(temp2_input, 0444, show_temp_value, NULL, 1),
+ SENSOR_ATTR(temp2_min, 0644, show_temp_min, store_temp_min, 1),
+ SENSOR_ATTR(temp2_max, 0644, show_temp_max, store_temp_max, 1),
+ SENSOR_ATTR(temp2_crit, 0644, show_temp_crit, store_temp_crit, 1),
+ SENSOR_ATTR(temp2_crit_hyst, 0444, show_temp_crit_hyst, NULL, 1),
+ SENSOR_ATTR(temp2_fault, 0444, show_status, NULL,
+ TMP401_STATUS_REMOTE_OPEN),
+ SENSOR_ATTR(temp2_min_alarm, 0444, show_status, NULL,
+ TMP401_STATUS_REMOTE_LOW),
+ SENSOR_ATTR(temp2_max_alarm, 0444, show_status, NULL,
+ TMP401_STATUS_REMOTE_HIGH),
+ SENSOR_ATTR(temp2_crit_alarm, 0444, show_status, NULL,
+ TMP401_STATUS_REMOTE_CRIT),
+};
+
+/*
+ * Additional features of the TMP411 chip.
+ * The TMP411 stores the minimum and maximum
+ * temperature measured since power-on, chip-reset, or
+ * minimum and maximum register reset for both the local
+ * and remote channels.
+ */
+static struct sensor_device_attribute tmp411_attr[] = {
+ SENSOR_ATTR(temp1_highest, 0444, show_temp_highest, NULL, 0),
+ SENSOR_ATTR(temp1_lowest, 0444, show_temp_lowest, NULL, 0),
+ SENSOR_ATTR(temp2_highest, 0444, show_temp_highest, NULL, 1),
+ SENSOR_ATTR(temp2_lowest, 0444, show_temp_lowest, NULL, 1),
+ SENSOR_ATTR(temp_reset_history, 0200, NULL, reset_temp_history, 0),
+};
+
+/*
+ * Begin non sysfs callback code (aka Real code)
+ */
+
+static void tmp401_init_client(struct i2c_client *client)
+{
+ int config, config_orig;
+
+ /* Set the conversion rate to 2 Hz */
+ i2c_smbus_write_byte_data(client, TMP401_CONVERSION_RATE_WRITE, 5);
+
+ /* Start conversions (disable shutdown if necessary) */
+ config = i2c_smbus_read_byte_data(client, TMP401_CONFIG_READ);
+ if (config < 0) {
+ dev_warn(&client->dev, "Initialization failed!\n");
+ return;
+ }
+
+ config_orig = config;
+ config &= ~TMP401_CONFIG_SHUTDOWN;
+
+ if (config != config_orig)
+ i2c_smbus_write_byte_data(client, TMP401_CONFIG_WRITE, config);
+}
+
+static int tmp401_detect(struct i2c_client *client, int kind,
+ struct i2c_board_info *info)
+{
+ struct i2c_adapter *adapter = client->adapter;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ /* Detect and identify the chip */
+ if (kind <= 0) {
+ u8 reg;
+
+ reg = i2c_smbus_read_byte_data(client,
+ TMP401_MANUFACTURER_ID_REG);
+ if (reg != TMP401_MANUFACTURER_ID)
+ return -ENODEV;
+
+ reg = i2c_smbus_read_byte_data(client, TMP401_DEVICE_ID_REG);
+
+ switch (reg) {
+ case TMP401_DEVICE_ID:
+ kind = tmp401;
+ break;
+ case TMP411_DEVICE_ID:
+ kind = tmp411;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ reg = i2c_smbus_read_byte_data(client, TMP401_CONFIG_READ);
+ if (reg & 0x1b)
+ return -ENODEV;
+
+ reg = i2c_smbus_read_byte_data(client,
+ TMP401_CONVERSION_RATE_READ);
+ /* Datasheet says: 0x1-0x6 */
+ if (reg > 15)
+ return -ENODEV;
+ }
+ strlcpy(info->type, tmp401_id[kind - 1].name, I2C_NAME_SIZE);
+
+ return 0;
+}
+
+static int tmp401_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int i, err = 0;
+ struct tmp401_data *data;
+ const char *names[] = { "TMP401", "TMP411" };
+
+ data = kzalloc(sizeof(struct tmp401_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+ data->kind = id->driver_data;
+
+ /* Initialize the TMP401 chip */
+ tmp401_init_client(client);
+
+ /* Register sysfs hooks */
+ for (i = 0; i < ARRAY_SIZE(tmp401_attr); i++) {
+ err = device_create_file(&client->dev,
+ &tmp401_attr[i].dev_attr);
+ if (err)
+ goto exit_remove;
+ }
+
+ /* Register aditional tmp411 sysfs hooks */
+ if (data->kind == tmp411) {
+ for (i = 0; i < ARRAY_SIZE(tmp411_attr); i++) {
+ err = device_create_file(&client->dev,
+ &tmp411_attr[i].dev_attr);
+ if (err)
+ goto exit_remove;
+ }
+ }
+
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ data->hwmon_dev = NULL;
+ goto exit_remove;
+ }
+
+ dev_info(&client->dev, "Detected TI %s chip\n",
+ names[data->kind - 1]);
+
+ return 0;
+
+exit_remove:
+ tmp401_remove(client); /* will also free data for us */
+ return err;
+}
+
+static int tmp401_remove(struct i2c_client *client)
+{
+ struct tmp401_data *data = i2c_get_clientdata(client);
+ int i;
+
+ if (data->hwmon_dev)
+ hwmon_device_unregister(data->hwmon_dev);
+
+ for (i = 0; i < ARRAY_SIZE(tmp401_attr); i++)
+ device_remove_file(&client->dev, &tmp401_attr[i].dev_attr);
+
+ if (data->kind == tmp411) {
+ for (i = 0; i < ARRAY_SIZE(tmp411_attr); i++)
+ device_remove_file(&client->dev,
+ &tmp411_attr[i].dev_attr);
+ }
+
+ kfree(data);
+ return 0;
+}
+
+static struct tmp401_data *tmp401_update_device_reg16(
+ struct i2c_client *client, struct tmp401_data *data)
+{
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ /*
+ * High byte must be read first immediately followed
+ * by the low byte
+ */
+ data->temp[i] = i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_MSB[i]) << 8;
+ data->temp[i] |= i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_LSB[i]);
+ data->temp_low[i] = i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_LOW_LIMIT_MSB_READ[i]) << 8;
+ data->temp_low[i] |= i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_LOW_LIMIT_LSB[i]);
+ data->temp_high[i] = i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_HIGH_LIMIT_MSB_READ[i]) << 8;
+ data->temp_high[i] |= i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_HIGH_LIMIT_LSB[i]);
+ data->temp_crit[i] = i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_CRIT_LIMIT[i]);
+
+ if (data->kind == tmp411) {
+ data->temp_lowest[i] = i2c_smbus_read_byte_data(client,
+ TMP411_TEMP_LOWEST_MSB[i]) << 8;
+ data->temp_lowest[i] |= i2c_smbus_read_byte_data(
+ client, TMP411_TEMP_LOWEST_LSB[i]);
+
+ data->temp_highest[i] = i2c_smbus_read_byte_data(
+ client, TMP411_TEMP_HIGHEST_MSB[i]) << 8;
+ data->temp_highest[i] |= i2c_smbus_read_byte_data(
+ client, TMP411_TEMP_HIGHEST_LSB[i]);
+ }
+ }
+ return data;
+}
+
+static struct tmp401_data *tmp401_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct tmp401_data *data = i2c_get_clientdata(client);
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
+ data->status = i2c_smbus_read_byte_data(client, TMP401_STATUS);
+ data->config = i2c_smbus_read_byte_data(client,
+ TMP401_CONFIG_READ);
+ tmp401_update_device_reg16(client, data);
+
+ data->temp_crit_hyst = i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_CRIT_HYST);
+
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return data;
+}
+
+static int __init tmp401_init(void)
+{
+ return i2c_add_driver(&tmp401_driver);
+}
+
+static void __exit tmp401_exit(void)
+{
+ i2c_del_driver(&tmp401_driver);
+}
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("Texas Instruments TMP401 temperature sensor driver");
+MODULE_LICENSE("GPL");
+
+module_init(tmp401_init);
+module_exit(tmp401_exit);
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index e64b42058b2..0e9746913d2 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -36,6 +36,7 @@
w83627ehf 10 5 4 3 0x8850 0x88 0x5ca3
0x8860 0xa1
w83627dhg 9 5 4 3 0xa020 0xc1 0x5ca3
+ w83627dhg-p 9 5 4 3 0xb070 0xc1 0x5ca3
w83667hg 9 5 3 3 0xa510 0xc1 0x5ca3
*/
@@ -53,12 +54,13 @@
#include <asm/io.h>
#include "lm75.h"
-enum kinds { w83627ehf, w83627dhg, w83667hg };
+enum kinds { w83627ehf, w83627dhg, w83627dhg_p, w83667hg };
/* used to set data->name = w83627ehf_device_names[data->sio_kind] */
static const char * w83627ehf_device_names[] = {
"w83627ehf",
"w83627dhg",
+ "w83627dhg",
"w83667hg",
};
@@ -86,6 +88,7 @@ MODULE_PARM_DESC(force_id, "Override the detected device ID");
#define SIO_W83627EHF_ID 0x8850
#define SIO_W83627EHG_ID 0x8860
#define SIO_W83627DHG_ID 0xa020
+#define SIO_W83627DHG_P_ID 0xb070
#define SIO_W83667HG_ID 0xa510
#define SIO_ID_MASK 0xFFF0
@@ -1517,6 +1520,7 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr,
static const char __initdata sio_name_W83627EHF[] = "W83627EHF";
static const char __initdata sio_name_W83627EHG[] = "W83627EHG";
static const char __initdata sio_name_W83627DHG[] = "W83627DHG";
+ static const char __initdata sio_name_W83627DHG_P[] = "W83627DHG-P";
static const char __initdata sio_name_W83667HG[] = "W83667HG";
u16 val;
@@ -1542,6 +1546,10 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr,
sio_data->kind = w83627dhg;
sio_name = sio_name_W83627DHG;
break;
+ case SIO_W83627DHG_P_ID:
+ sio_data->kind = w83627dhg_p;
+ sio_name = sio_name_W83627DHG_P;
+ break;
case SIO_W83667HG_ID:
sio_data->kind = w83667hg;
sio_name = sio_name_W83667HG;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index f1c6ca7e285..3c259ee7ddd 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -211,7 +211,7 @@ config I2C_VIA
will be called i2c-via.
config I2C_VIAPRO
- tristate "VIA VT82C596/82C686/82xx and CX700/VX800/VX820"
+ tristate "VIA VT82C596/82C686/82xx and CX700/VX8xx"
depends on PCI
help
If you say yes to this option, support will be included for the VIA
@@ -225,8 +225,8 @@ config I2C_VIAPRO
VT8237R/A/S
VT8251
CX700
- VX800
- VX820
+ VX800/VX820
+ VX855/VX875
This driver can also be built as a module. If so, the module
will be called i2c-viapro.
@@ -298,7 +298,7 @@ config I2C_BLACKFIN_TWI
config I2C_BLACKFIN_TWI_CLK_KHZ
int "Blackfin TWI I2C clock (kHz)"
depends on I2C_BLACKFIN_TWI
- range 10 400
+ range 21 400
default 50
help
The unit of the TWI clock is kHz.
@@ -513,6 +513,19 @@ config I2C_SIMTEC
This driver can also be built as a module. If so, the module
will be called i2c-simtec.
+config I2C_STU300
+ tristate "ST Microelectronics DDC I2C interface"
+ depends on MACH_U300
+ default y if MACH_U300
+ help
+ If you say yes to this option, support will be included for the
+ I2C interface from ST Microelectronics simply called "DDC I2C"
+ supporting both I2C and DDC, used in e.g. the U300 series
+ mobile platforms.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-stu300.
+
config I2C_VERSATILE
tristate "ARM Versatile/Realview I2C bus support"
depends on ARCH_VERSATILE || ARCH_REALVIEW
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 776acb6403a..edeabf00310 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_I2C_S6000) += i2c-s6000.o
obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o
obj-$(CONFIG_I2C_SH_MOBILE) += i2c-sh_mobile.o
obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o
+obj-$(CONFIG_I2C_STU300) += i2c-stu300.o
obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o
# External I2C/SMBus adapter drivers
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index 67d9dc5b351..06e1ecb4919 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -200,10 +200,10 @@ static int __devinit at91_i2c_probe(struct platform_device *pdev)
if (!res)
return -ENXIO;
- if (!request_mem_region(res->start, res->end - res->start + 1, "at91_i2c"))
+ if (!request_mem_region(res->start, resource_size(res), "at91_i2c"))
return -EBUSY;
- twi_base = ioremap(res->start, res->end - res->start + 1);
+ twi_base = ioremap(res->start, resource_size(res));
if (!twi_base) {
rc = -ENOMEM;
goto fail0;
@@ -252,7 +252,7 @@ fail2:
fail1:
iounmap(twi_base);
fail0:
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
return rc;
}
@@ -268,7 +268,7 @@ static int __devexit at91_i2c_remove(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
iounmap(twi_base);
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
clk_disable(twi_clk); /* disable peripheral clock */
clk_put(twi_clk);
diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c
index f78ce523e3d..532828bc50e 100644
--- a/drivers/i2c/busses/i2c-au1550.c
+++ b/drivers/i2c/busses/i2c-au1550.c
@@ -389,7 +389,7 @@ i2c_au1550_probe(struct platform_device *pdev)
goto out;
}
- priv->ioarea = request_mem_region(r->start, r->end - r->start + 1,
+ priv->ioarea = request_mem_region(r->start, resource_size(r),
pdev->name);
if (!priv->ioarea) {
ret = -EBUSY;
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index fc548b3d002..b309ac2c3d5 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/i2c.h>
+#include <linux/io.h>
#include <linux/mm.h>
#include <linux/timer.h>
#include <linux/spinlock.h>
@@ -104,9 +105,14 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
write_MASTER_CTL(iface,
read_MASTER_CTL(iface) | STOP);
else if (iface->cur_mode == TWI_I2C_MODE_REPEAT &&
- iface->cur_msg+1 < iface->msg_num)
- write_MASTER_CTL(iface,
- read_MASTER_CTL(iface) | RSTART);
+ iface->cur_msg + 1 < iface->msg_num) {
+ if (iface->pmsg[iface->cur_msg + 1].flags & I2C_M_RD)
+ write_MASTER_CTL(iface,
+ read_MASTER_CTL(iface) | RSTART | MDIR);
+ else
+ write_MASTER_CTL(iface,
+ (read_MASTER_CTL(iface) | RSTART) & ~MDIR);
+ }
SSYNC();
/* Clear status */
write_INT_STAT(iface, XMTSERV);
@@ -134,9 +140,13 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
read_MASTER_CTL(iface) | STOP);
SSYNC();
} else if (iface->cur_mode == TWI_I2C_MODE_REPEAT &&
- iface->cur_msg+1 < iface->msg_num) {
- write_MASTER_CTL(iface,
- read_MASTER_CTL(iface) | RSTART);
+ iface->cur_msg + 1 < iface->msg_num) {
+ if (iface->pmsg[iface->cur_msg + 1].flags & I2C_M_RD)
+ write_MASTER_CTL(iface,
+ read_MASTER_CTL(iface) | RSTART | MDIR);
+ else
+ write_MASTER_CTL(iface,
+ (read_MASTER_CTL(iface) | RSTART) & ~MDIR);
SSYNC();
}
/* Clear interrupt source */
@@ -196,8 +206,6 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
/* remove restart bit and enable master receive */
write_MASTER_CTL(iface,
read_MASTER_CTL(iface) & ~RSTART);
- write_MASTER_CTL(iface,
- read_MASTER_CTL(iface) | MEN | MDIR);
SSYNC();
} else if (iface->cur_mode == TWI_I2C_MODE_REPEAT &&
iface->cur_msg+1 < iface->msg_num) {
@@ -222,18 +230,19 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
}
if (iface->pmsg[iface->cur_msg].len <= 255)
- write_MASTER_CTL(iface,
- iface->pmsg[iface->cur_msg].len << 6);
+ write_MASTER_CTL(iface,
+ (read_MASTER_CTL(iface) &
+ (~(0xff << 6))) |
+ (iface->pmsg[iface->cur_msg].len << 6));
else {
- write_MASTER_CTL(iface, 0xff << 6);
+ write_MASTER_CTL(iface,
+ (read_MASTER_CTL(iface) |
+ (0xff << 6)));
iface->manual_stop = 1;
}
/* remove restart bit and enable master receive */
write_MASTER_CTL(iface,
read_MASTER_CTL(iface) & ~RSTART);
- write_MASTER_CTL(iface, read_MASTER_CTL(iface) |
- MEN | ((iface->read_write == I2C_SMBUS_READ) ?
- MDIR : 0));
SSYNC();
} else {
iface->result = 1;
@@ -441,6 +450,16 @@ int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr,
}
iface->transPtr = data->block;
break;
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ if (read_write == I2C_SMBUS_READ) {
+ iface->readNum = data->block[0];
+ iface->cur_mode = TWI_I2C_MODE_COMBINED;
+ } else {
+ iface->writeNum = data->block[0];
+ iface->cur_mode = TWI_I2C_MODE_STANDARDSUB;
+ }
+ iface->transPtr = (u8 *)&data->block[1];
+ break;
default:
return -1;
}
@@ -564,7 +583,7 @@ static u32 bfin_twi_functionality(struct i2c_adapter *adap)
return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_PROC_CALL |
- I2C_FUNC_I2C;
+ I2C_FUNC_I2C | I2C_FUNC_SMBUS_I2C_BLOCK;
}
static struct i2c_algorithm bfin_twi_algorithm = {
@@ -614,6 +633,7 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
struct i2c_adapter *p_adap;
struct resource *res;
int rc;
+ unsigned int clkhilow;
iface = kzalloc(sizeof(struct bfin_twi_iface), GFP_KERNEL);
if (!iface) {
@@ -632,7 +652,7 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
goto out_error_get_res;
}
- iface->regs_base = ioremap(res->start, res->end - res->start + 1);
+ iface->regs_base = ioremap(res->start, resource_size(res));
if (iface->regs_base == NULL) {
dev_err(&pdev->dev, "Cannot map IO\n");
rc = -ENXIO;
@@ -675,10 +695,14 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
/* Set TWI internal clock as 10MHz */
write_CONTROL(iface, ((get_sclk() / 1024 / 1024 + 5) / 10) & 0x7F);
+ /*
+ * We will not end up with a CLKDIV=0 because no one will specify
+ * 20kHz SCL or less in Kconfig now. (5 * 1024 / 20 = 0x100)
+ */
+ clkhilow = 5 * 1024 / CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ;
+
/* Set Twi interface clock as specified */
- write_CLKDIV(iface, ((5*1024 / CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ)
- << 8) | ((5*1024 / CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ)
- & 0xFF));
+ write_CLKDIV(iface, (clkhilow << 8) | clkhilow);
/* Enable TWI */
write_CONTROL(iface, read_CONTROL(iface) | TWI_ENA);
diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
index e5a8dae4a28..87ecace415d 100644
--- a/drivers/i2c/busses/i2c-highlander.c
+++ b/drivers/i2c/busses/i2c-highlander.c
@@ -373,7 +373,7 @@ static int __devinit highlander_i2c_probe(struct platform_device *pdev)
if (unlikely(!dev))
return -ENOMEM;
- dev->base = ioremap_nocache(res->start, res->end - res->start + 1);
+ dev->base = ioremap_nocache(res->start, resource_size(res));
if (unlikely(!dev->base)) {
ret = -ENXIO;
goto err;
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index 8b92a4666e0..e4476743f20 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -756,12 +756,12 @@ static int __devinit iic_probe(struct of_device *ofdev,
goto error_cleanup;
}
- /* Now register all the child nodes */
- of_register_i2c_devices(adap, np);
-
dev_info(&ofdev->dev, "using %s mode\n",
dev->fast_mode ? "fast (400 kHz)" : "standard (100 kHz)");
+ /* Now register all the child nodes */
+ of_register_i2c_devices(adap, np);
+
return 0;
error_cleanup:
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 5a4945d1dba..c3869d94ad4 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -469,7 +469,7 @@ mv64xxx_i2c_map_regs(struct platform_device *pd,
if (!r)
return -ENODEV;
- size = r->end - r->start + 1;
+ size = resource_size(r);
if (!request_mem_region(r->start, size, drv_data->adapter.name))
return -EBUSY;
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index e5193bf7548..0dabe643ec5 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -216,6 +216,7 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
struct ocores_i2c_platform_data *pdata;
struct resource *res, *res2;
int ret;
+ int i;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
@@ -233,14 +234,14 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
if (!i2c)
return -ENOMEM;
- if (!request_mem_region(res->start, res->end - res->start + 1,
+ if (!request_mem_region(res->start, resource_size(res),
pdev->name)) {
dev_err(&pdev->dev, "Memory region busy\n");
ret = -EBUSY;
goto request_mem_failed;
}
- i2c->base = ioremap(res->start, res->end - res->start + 1);
+ i2c->base = ioremap(res->start, resource_size(res));
if (!i2c->base) {
dev_err(&pdev->dev, "Unable to map registers\n");
ret = -EIO;
@@ -271,6 +272,10 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
goto add_adapter_failed;
}
+ /* add in known devices to the bus */
+ for (i = 0; i < pdata->num_devices; i++)
+ i2c_new_device(&i2c->adap, pdata->devices + i);
+
return 0;
add_adapter_failed:
@@ -278,7 +283,7 @@ add_adapter_failed:
request_irq_failed:
iounmap(i2c->base);
map_failed:
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
request_mem_failed:
kfree(i2c);
@@ -306,7 +311,7 @@ static int __devexit ocores_i2c_remove(struct platform_device* pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res)
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
kfree(i2c);
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index ece0125a1ee..b606db85525 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -333,8 +333,18 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
if (cpu_is_omap2430() || cpu_is_omap34xx()) {
- /* HSI2C controller internal clk rate should be 19.2 Mhz */
- internal_clk = 19200;
+ /*
+ * HSI2C controller internal clk rate should be 19.2 Mhz for
+ * HS and for all modes on 2430. On 34xx we can use lower rate
+ * to get longer filter period for better noise suppression.
+ * The filter is iclk (fclk for HS) period.
+ */
+ if (dev->speed > 400 || cpu_is_omap_2430())
+ internal_clk = 19200;
+ else if (dev->speed > 100)
+ internal_clk = 9600;
+ else
+ internal_clk = 4000;
fclk_rate = clk_get_rate(dev->fclk) / 1000;
/* Compute prescaler divisor */
@@ -343,17 +353,28 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
/* If configured for High Speed */
if (dev->speed > 400) {
+ unsigned long scl;
+
/* For first phase of HS mode */
- fsscll = internal_clk / (400 * 2) - 6;
- fssclh = internal_clk / (400 * 2) - 6;
+ scl = internal_clk / 400;
+ fsscll = scl - (scl / 3) - 7;
+ fssclh = (scl / 3) - 5;
/* For second phase of HS mode */
- hsscll = fclk_rate / (dev->speed * 2) - 6;
- hssclh = fclk_rate / (dev->speed * 2) - 6;
+ scl = fclk_rate / dev->speed;
+ hsscll = scl - (scl / 3) - 7;
+ hssclh = (scl / 3) - 5;
+ } else if (dev->speed > 100) {
+ unsigned long scl;
+
+ /* Fast mode */
+ scl = internal_clk / dev->speed;
+ fsscll = scl - (scl / 3) - 7;
+ fssclh = (scl / 3) - 5;
} else {
- /* To handle F/S modes */
- fsscll = internal_clk / (dev->speed * 2) - 6;
- fssclh = internal_clk / (dev->speed * 2) - 6;
+ /* Standard mode */
+ fsscll = internal_clk / (dev->speed * 2) - 7;
+ fssclh = internal_clk / (dev->speed * 2) - 5;
}
scll = (hsscll << OMAP_I2C_SCLL_HSSCLL) | fsscll;
sclh = (hssclh << OMAP_I2C_SCLH_HSSCLH) | fssclh;
@@ -807,7 +828,7 @@ omap_i2c_probe(struct platform_device *pdev)
dev->idle = 1;
dev->dev = &pdev->dev;
dev->irq = irq->start;
- dev->base = ioremap(mem->start, mem->end - mem->start + 1);
+ dev->base = ioremap(mem->start, resource_size(mem));
if (!dev->base) {
r = -ENOMEM;
goto err_free_mem;
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index 7b23891b7d5..c4df9d411cd 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -27,8 +27,6 @@
#include <asm/irq.h>
#include <asm/io.h>
-#define res_len(r) ((r)->end - (r)->start + 1)
-
struct i2c_pca_pf_data {
void __iomem *reg_base;
int irq; /* if 0, use polling */
@@ -148,7 +146,7 @@ static int __devinit i2c_pca_pf_probe(struct platform_device *pdev)
goto e_print;
}
- if (!request_mem_region(res->start, res_len(res), res->name)) {
+ if (!request_mem_region(res->start, resource_size(res), res->name)) {
ret = -ENOMEM;
goto e_print;
}
@@ -161,13 +159,13 @@ static int __devinit i2c_pca_pf_probe(struct platform_device *pdev)
init_waitqueue_head(&i2c->wait);
- i2c->reg_base = ioremap(res->start, res_len(res));
+ i2c->reg_base = ioremap(res->start, resource_size(res));
if (!i2c->reg_base) {
ret = -ENOMEM;
goto e_remap;
}
i2c->io_base = res->start;
- i2c->io_size = res_len(res);
+ i2c->io_size = resource_size(res);
i2c->irq = irq;
i2c->adap.nr = pdev->id >= 0 ? pdev->id : 0;
@@ -250,7 +248,7 @@ e_reqirq:
e_remap:
kfree(i2c);
e_alloc:
- release_mem_region(res->start, res_len(res));
+ release_mem_region(res->start, resource_size(res));
e_print:
printk(KERN_ERR "Registering PCA9564/PCA9665 FAILED! (%d)\n", ret);
return ret;
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index 0bdb2d7f057..7b57d5f267e 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -283,7 +283,7 @@ static int __devinit pmcmsptwi_probe(struct platform_device *pldev)
}
/* reserve the memory region */
- if (!request_mem_region(res->start, res->end - res->start + 1,
+ if (!request_mem_region(res->start, resource_size(res),
pldev->name)) {
dev_err(&pldev->dev,
"Unable to get memory/io address region 0x%08x\n",
@@ -294,7 +294,7 @@ static int __devinit pmcmsptwi_probe(struct platform_device *pldev)
/* remap the memory */
pmcmsptwi_data.iobase = ioremap_nocache(res->start,
- res->end - res->start + 1);
+ resource_size(res));
if (!pmcmsptwi_data.iobase) {
dev_err(&pldev->dev,
"Unable to ioremap address 0x%08x\n", res->start);
@@ -360,7 +360,7 @@ ret_unmap:
iounmap(pmcmsptwi_data.iobase);
ret_unreserve:
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
ret_err:
return rc;
@@ -385,7 +385,7 @@ static int __devexit pmcmsptwi_remove(struct platform_device *pldev)
iounmap(pmcmsptwi_data.iobase);
res = platform_get_resource(pldev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
return 0;
}
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index acc7143d965..762e1e53088 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -34,10 +34,24 @@
#include <linux/err.h>
#include <linux/clk.h>
-#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/io.h>
-#include <mach/i2c.h>
+#include <plat/i2c.h>
+
+/*
+ * I2C register offsets will be shifted 0 or 1 bit left, depending on
+ * different SoCs
+ */
+#define REG_SHIFT_0 (0 << 0)
+#define REG_SHIFT_1 (1 << 0)
+#define REG_SHIFT(d) ((d) & 0x1)
+
+static const struct platform_device_id i2c_pxa_id_table[] = {
+ { "pxa2xx-i2c", REG_SHIFT_1 },
+ { "pxa3xx-pwri2c", REG_SHIFT_0 },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, i2c_pxa_id_table);
/*
* I2C registers and bit definitions
@@ -979,12 +993,12 @@ static const struct i2c_algorithm i2c_pxa_pio_algorithm = {
.functionality = i2c_pxa_functionality,
};
-#define res_len(r) ((r)->end - (r)->start + 1)
static int i2c_pxa_probe(struct platform_device *dev)
{
struct pxa_i2c *i2c;
struct resource *res;
struct i2c_pxa_platform_data *plat = dev->dev.platform_data;
+ struct platform_device_id *id = platform_get_device_id(dev);
int ret;
int irq;
@@ -993,7 +1007,7 @@ static int i2c_pxa_probe(struct platform_device *dev)
if (res == NULL || irq < 0)
return -ENODEV;
- if (!request_mem_region(res->start, res_len(res), res->name))
+ if (!request_mem_region(res->start, resource_size(res), res->name))
return -ENOMEM;
i2c = kzalloc(sizeof(struct pxa_i2c), GFP_KERNEL);
@@ -1023,15 +1037,15 @@ static int i2c_pxa_probe(struct platform_device *dev)
goto eclk;
}
- i2c->reg_base = ioremap(res->start, res_len(res));
+ i2c->reg_base = ioremap(res->start, resource_size(res));
if (!i2c->reg_base) {
ret = -EIO;
goto eremap;
}
- i2c->reg_shift = (cpu_is_pxa3xx() && (dev->id == 1)) ? 0 : 1;
+ i2c->reg_shift = REG_SHIFT(id->driver_data);
i2c->iobase = res->start;
- i2c->iosize = res_len(res);
+ i2c->iosize = resource_size(res);
i2c->irq = irq;
@@ -1095,7 +1109,7 @@ eremap:
eclk:
kfree(i2c);
emalloc:
- release_mem_region(res->start, res_len(res));
+ release_mem_region(res->start, resource_size(res));
return ret;
}
@@ -1150,6 +1164,7 @@ static struct platform_driver i2c_pxa_driver = {
.name = "pxa2xx-i2c",
.owner = THIS_MODULE,
},
+ .id_table = i2c_pxa_id_table,
};
static int __init i2c_adap_pxa_init(void)
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 1691ef0f1ee..8f42a4536cd 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -51,6 +51,11 @@ enum s3c24xx_i2c_state {
STATE_STOP
};
+enum s3c24xx_i2c_type {
+ TYPE_S3C2410,
+ TYPE_S3C2440,
+};
+
struct s3c24xx_i2c {
spinlock_t lock;
wait_queue_head_t wait;
@@ -88,8 +93,10 @@ struct s3c24xx_i2c {
static inline int s3c24xx_i2c_is2440(struct s3c24xx_i2c *i2c)
{
struct platform_device *pdev = to_platform_device(i2c->dev);
+ enum s3c24xx_i2c_type type;
- return !strcmp(pdev->name, "s3c2440-i2c");
+ type = platform_get_device_id(pdev)->driver_data;
+ return type == TYPE_S3C2440;
}
/* s3c24xx_i2c_master_complete
@@ -821,7 +828,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
goto err_clk;
}
- i2c->ioarea = request_mem_region(res->start, (res->end-res->start)+1,
+ i2c->ioarea = request_mem_region(res->start, resource_size(res),
pdev->name);
if (i2c->ioarea == NULL) {
@@ -830,7 +837,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
goto err_clk;
}
- i2c->regs = ioremap(res->start, (res->end-res->start)+1);
+ i2c->regs = ioremap(res->start, resource_size(res));
if (i2c->regs == NULL) {
dev_err(&pdev->dev, "cannot map IO\n");
@@ -969,52 +976,41 @@ static int s3c24xx_i2c_resume(struct platform_device *dev)
/* device driver for platform bus bits */
-static struct platform_driver s3c2410_i2c_driver = {
- .probe = s3c24xx_i2c_probe,
- .remove = s3c24xx_i2c_remove,
- .suspend_late = s3c24xx_i2c_suspend_late,
- .resume = s3c24xx_i2c_resume,
- .driver = {
- .owner = THIS_MODULE,
- .name = "s3c2410-i2c",
- },
+static struct platform_device_id s3c24xx_driver_ids[] = {
+ {
+ .name = "s3c2410-i2c",
+ .driver_data = TYPE_S3C2410,
+ }, {
+ .name = "s3c2440-i2c",
+ .driver_data = TYPE_S3C2440,
+ }, { },
};
+MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
-static struct platform_driver s3c2440_i2c_driver = {
+static struct platform_driver s3c24xx_i2c_driver = {
.probe = s3c24xx_i2c_probe,
.remove = s3c24xx_i2c_remove,
.suspend_late = s3c24xx_i2c_suspend_late,
.resume = s3c24xx_i2c_resume,
+ .id_table = s3c24xx_driver_ids,
.driver = {
.owner = THIS_MODULE,
- .name = "s3c2440-i2c",
+ .name = "s3c-i2c",
},
};
static int __init i2c_adap_s3c_init(void)
{
- int ret;
-
- ret = platform_driver_register(&s3c2410_i2c_driver);
- if (ret == 0) {
- ret = platform_driver_register(&s3c2440_i2c_driver);
- if (ret)
- platform_driver_unregister(&s3c2410_i2c_driver);
- }
-
- return ret;
+ return platform_driver_register(&s3c24xx_i2c_driver);
}
subsys_initcall(i2c_adap_s3c_init);
static void __exit i2c_adap_s3c_exit(void)
{
- platform_driver_unregister(&s3c2410_i2c_driver);
- platform_driver_unregister(&s3c2440_i2c_driver);
+ platform_driver_unregister(&s3c24xx_i2c_driver);
}
module_exit(i2c_adap_s3c_exit);
MODULE_DESCRIPTION("S3C24XX I2C Bus driver");
MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:s3c2410-i2c");
-MODULE_ALIAS("platform:s3c2440-i2c");
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
index baa28b73ae4..b9680f50f54 100644
--- a/drivers/i2c/busses/i2c-sh7760.c
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -396,7 +396,7 @@ static int __devinit calc_CCR(unsigned long scl_hz)
signed char cdf, cdfm;
int scgd, scgdm, scgds;
- mclk = clk_get(NULL, "module_clk");
+ mclk = clk_get(NULL, "peripheral_clk");
if (IS_ERR(mclk)) {
return PTR_ERR(mclk);
} else {
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
new file mode 100644
index 00000000000..182e711318b
--- /dev/null
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -0,0 +1,1029 @@
+/*
+ * Copyright (C) 2007-2009 ST-Ericsson AB
+ * License terms: GNU General Public License (GPL) version 2
+ * ST DDC I2C master mode driver, used in e.g. U300 series platforms.
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/spinlock.h>
+#include <linux/completion.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+/* the name of this kernel module */
+#define NAME "stu300"
+
+/* CR (Control Register) 8bit (R/W) */
+#define I2C_CR (0x00000000)
+#define I2C_CR_RESET_VALUE (0x00)
+#define I2C_CR_RESET_UMASK (0x00)
+#define I2C_CR_DDC1_ENABLE (0x80)
+#define I2C_CR_TRANS_ENABLE (0x40)
+#define I2C_CR_PERIPHERAL_ENABLE (0x20)
+#define I2C_CR_DDC2B_ENABLE (0x10)
+#define I2C_CR_START_ENABLE (0x08)
+#define I2C_CR_ACK_ENABLE (0x04)
+#define I2C_CR_STOP_ENABLE (0x02)
+#define I2C_CR_INTERRUPT_ENABLE (0x01)
+/* SR1 (Status Register 1) 8bit (R/-) */
+#define I2C_SR1 (0x00000004)
+#define I2C_SR1_RESET_VALUE (0x00)
+#define I2C_SR1_RESET_UMASK (0x00)
+#define I2C_SR1_EVF_IND (0x80)
+#define I2C_SR1_ADD10_IND (0x40)
+#define I2C_SR1_TRA_IND (0x20)
+#define I2C_SR1_BUSY_IND (0x10)
+#define I2C_SR1_BTF_IND (0x08)
+#define I2C_SR1_ADSL_IND (0x04)
+#define I2C_SR1_MSL_IND (0x02)
+#define I2C_SR1_SB_IND (0x01)
+/* SR2 (Status Register 2) 8bit (R/-) */
+#define I2C_SR2 (0x00000008)
+#define I2C_SR2_RESET_VALUE (0x00)
+#define I2C_SR2_RESET_UMASK (0x40)
+#define I2C_SR2_MASK (0xBF)
+#define I2C_SR2_SCLFAL_IND (0x80)
+#define I2C_SR2_ENDAD_IND (0x20)
+#define I2C_SR2_AF_IND (0x10)
+#define I2C_SR2_STOPF_IND (0x08)
+#define I2C_SR2_ARLO_IND (0x04)
+#define I2C_SR2_BERR_IND (0x02)
+#define I2C_SR2_DDC2BF_IND (0x01)
+/* CCR (Clock Control Register) 8bit (R/W) */
+#define I2C_CCR (0x0000000C)
+#define I2C_CCR_RESET_VALUE (0x00)
+#define I2C_CCR_RESET_UMASK (0x00)
+#define I2C_CCR_MASK (0xFF)
+#define I2C_CCR_FMSM (0x80)
+#define I2C_CCR_CC_MASK (0x7F)
+/* OAR1 (Own Address Register 1) 8bit (R/W) */
+#define I2C_OAR1 (0x00000010)
+#define I2C_OAR1_RESET_VALUE (0x00)
+#define I2C_OAR1_RESET_UMASK (0x00)
+#define I2C_OAR1_ADD_MASK (0xFF)
+/* OAR2 (Own Address Register 2) 8bit (R/W) */
+#define I2C_OAR2 (0x00000014)
+#define I2C_OAR2_RESET_VALUE (0x40)
+#define I2C_OAR2_RESET_UMASK (0x19)
+#define I2C_OAR2_MASK (0xE6)
+#define I2C_OAR2_FR_25_10MHZ (0x00)
+#define I2C_OAR2_FR_10_1667MHZ (0x20)
+#define I2C_OAR2_FR_1667_2667MHZ (0x40)
+#define I2C_OAR2_FR_2667_40MHZ (0x60)
+#define I2C_OAR2_FR_40_5333MHZ (0x80)
+#define I2C_OAR2_FR_5333_66MHZ (0xA0)
+#define I2C_OAR2_FR_66_80MHZ (0xC0)
+#define I2C_OAR2_FR_80_100MHZ (0xE0)
+#define I2C_OAR2_FR_MASK (0xE0)
+#define I2C_OAR2_ADD_MASK (0x06)
+/* DR (Data Register) 8bit (R/W) */
+#define I2C_DR (0x00000018)
+#define I2C_DR_RESET_VALUE (0x00)
+#define I2C_DR_RESET_UMASK (0xFF)
+#define I2C_DR_D_MASK (0xFF)
+/* ECCR (Extended Clock Control Register) 8bit (R/W) */
+#define I2C_ECCR (0x0000001C)
+#define I2C_ECCR_RESET_VALUE (0x00)
+#define I2C_ECCR_RESET_UMASK (0xE0)
+#define I2C_ECCR_MASK (0x1F)
+#define I2C_ECCR_CC_MASK (0x1F)
+
+/*
+ * These events are more or less responses to commands
+ * sent into the hardware, presumably reflecting the state
+ * of an internal state machine.
+ */
+enum stu300_event {
+ STU300_EVENT_NONE = 0,
+ STU300_EVENT_1,
+ STU300_EVENT_2,
+ STU300_EVENT_3,
+ STU300_EVENT_4,
+ STU300_EVENT_5,
+ STU300_EVENT_6,
+ STU300_EVENT_7,
+ STU300_EVENT_8,
+ STU300_EVENT_9
+};
+
+enum stu300_error {
+ STU300_ERROR_NONE = 0,
+ STU300_ERROR_ACKNOWLEDGE_FAILURE,
+ STU300_ERROR_BUS_ERROR,
+ STU300_ERROR_ARBITRATION_LOST
+};
+
+/* timeout waiting for the controller to respond */
+#define STU300_TIMEOUT (msecs_to_jiffies(1000))
+
+/*
+ * The number of address send athemps tried before giving up.
+ * If the first one failes it seems like 5 to 8 attempts are required.
+ */
+#define NUM_ADDR_RESEND_ATTEMPTS 10
+
+/* I2C clock speed, in Hz 0-400kHz*/
+static unsigned int scl_frequency = 100000;
+module_param(scl_frequency, uint, 0644);
+
+/**
+ * struct stu300_dev - the stu300 driver state holder
+ * @pdev: parent platform device
+ * @adapter: corresponding I2C adapter
+ * @phybase: location of I/O area in memory
+ * @physize: size of I/O area in memory
+ * @clk: hardware block clock
+ * @irq: assigned interrupt line
+ * @cmd_issue_lock: this locks the following cmd_ variables
+ * @cmd_complete: acknowledge completion for an I2C command
+ * @cmd_event: expected event coming in as a response to a command
+ * @cmd_err: error code as response to a command
+ * @speed: current bus speed in Hz
+ * @msg_index: index of current message
+ * @msg_len: length of current message
+ */
+struct stu300_dev {
+ struct platform_device *pdev;
+ struct i2c_adapter adapter;
+ resource_size_t phybase;
+ resource_size_t physize;
+ void __iomem *virtbase;
+ struct clk *clk;
+ int irq;
+ spinlock_t cmd_issue_lock;
+ struct completion cmd_complete;
+ enum stu300_event cmd_event;
+ enum stu300_error cmd_err;
+ unsigned int speed;
+ int msg_index;
+ int msg_len;
+};
+
+/* Local forward function declarations */
+static int stu300_init_hw(struct stu300_dev *dev);
+
+/*
+ * The block needs writes in both MSW and LSW in order
+ * for all data lines to reach their destination.
+ */
+static inline void stu300_wr8(u32 value, void __iomem *address)
+{
+ writel((value << 16) | value, address);
+}
+
+/*
+ * This merely masks off the duplicates which appear
+ * in bytes 1-3. You _MUST_ use 32-bit bus access on this
+ * device, else it will not work.
+ */
+static inline u32 stu300_r8(void __iomem *address)
+{
+ return readl(address) & 0x000000FFU;
+}
+
+/*
+ * Tells whether a certain event or events occurred in
+ * response to a command. The events represent states in
+ * the internal state machine of the hardware. The events
+ * are not very well described in the hardware
+ * documentation and can only be treated as abstract state
+ * machine states.
+ *
+ * @ret 0 = event has not occurred, any other value means
+ * the event occurred.
+ */
+static int stu300_event_occurred(struct stu300_dev *dev,
+ enum stu300_event mr_event) {
+ u32 status1;
+ u32 status2;
+
+ /* What event happened? */
+ status1 = stu300_r8(dev->virtbase + I2C_SR1);
+ if (!(status1 & I2C_SR1_EVF_IND))
+ /* No event at all */
+ return 0;
+ status2 = stu300_r8(dev->virtbase + I2C_SR2);
+
+ switch (mr_event) {
+ case STU300_EVENT_1:
+ if (status1 & I2C_SR1_ADSL_IND)
+ return 1;
+ break;
+ case STU300_EVENT_2:
+ case STU300_EVENT_3:
+ case STU300_EVENT_7:
+ case STU300_EVENT_8:
+ if (status1 & I2C_SR1_BTF_IND) {
+ if (status2 & I2C_SR2_AF_IND)
+ dev->cmd_err = STU300_ERROR_ACKNOWLEDGE_FAILURE;
+ else if (status2 & I2C_SR2_BERR_IND)
+ dev->cmd_err = STU300_ERROR_BUS_ERROR;
+ return 1;
+ }
+ break;
+ case STU300_EVENT_4:
+ if (status2 & I2C_SR2_STOPF_IND)
+ return 1;
+ break;
+ case STU300_EVENT_5:
+ if (status1 & I2C_SR1_SB_IND)
+ /* Clear start bit */
+ return 1;
+ break;
+ case STU300_EVENT_6:
+ if (status2 & I2C_SR2_ENDAD_IND) {
+ /* First check for any errors */
+ if (status2 & I2C_SR2_AF_IND)
+ dev->cmd_err = STU300_ERROR_ACKNOWLEDGE_FAILURE;
+ return 1;
+ }
+ break;
+ case STU300_EVENT_9:
+ if (status1 & I2C_SR1_ADD10_IND)
+ return 1;
+ break;
+ default:
+ break;
+ }
+ if (status2 & I2C_SR2_ARLO_IND)
+ dev->cmd_err = STU300_ERROR_ARBITRATION_LOST;
+ return 0;
+}
+
+static irqreturn_t stu300_irh(int irq, void *data)
+{
+ struct stu300_dev *dev = data;
+ int res;
+
+ /* See if this was what we were waiting for */
+ spin_lock(&dev->cmd_issue_lock);
+ if (dev->cmd_event != STU300_EVENT_NONE) {
+ res = stu300_event_occurred(dev, dev->cmd_event);
+ if (res || dev->cmd_err != STU300_ERROR_NONE) {
+ u32 val;
+
+ complete(&dev->cmd_complete);
+ /* Block any multiple interrupts */
+ val = stu300_r8(dev->virtbase + I2C_CR);
+ val &= ~I2C_CR_INTERRUPT_ENABLE;
+ stu300_wr8(val, dev->virtbase + I2C_CR);
+ }
+ }
+ spin_unlock(&dev->cmd_issue_lock);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Sends a command and then waits for the bits masked by *flagmask*
+ * to go high or low by IRQ awaiting.
+ */
+static int stu300_start_and_await_event(struct stu300_dev *dev,
+ u8 cr_value,
+ enum stu300_event mr_event)
+{
+ int ret;
+
+ if (unlikely(irqs_disabled())) {
+ /* TODO: implement polling for this case if need be. */
+ WARN(1, "irqs are disabled, cannot poll for event\n");
+ return -EIO;
+ }
+
+ /* Lock command issue, fill in an event we wait for */
+ spin_lock_irq(&dev->cmd_issue_lock);
+ init_completion(&dev->cmd_complete);
+ dev->cmd_err = STU300_ERROR_NONE;
+ dev->cmd_event = mr_event;
+ spin_unlock_irq(&dev->cmd_issue_lock);
+
+ /* Turn on interrupt, send command and wait. */
+ cr_value |= I2C_CR_INTERRUPT_ENABLE;
+ stu300_wr8(cr_value, dev->virtbase + I2C_CR);
+ ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
+ STU300_TIMEOUT);
+
+ if (ret < 0) {
+ dev_err(&dev->pdev->dev,
+ "wait_for_completion_interruptible_timeout() "
+ "returned %d waiting for event %04x\n", ret, mr_event);
+ return ret;
+ }
+
+ if (ret == 0) {
+ dev_err(&dev->pdev->dev, "controller timed out "
+ "waiting for event %d, reinit hardware\n", mr_event);
+ (void) stu300_init_hw(dev);
+ return -ETIMEDOUT;
+ }
+
+ if (dev->cmd_err != STU300_ERROR_NONE) {
+ dev_err(&dev->pdev->dev, "controller (start) "
+ "error %d waiting for event %d, reinit hardware\n",
+ dev->cmd_err, mr_event);
+ (void) stu300_init_hw(dev);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * This waits for a flag to be set, if it is not set on entry, an interrupt is
+ * configured to wait for the flag using a completion.
+ */
+static int stu300_await_event(struct stu300_dev *dev,
+ enum stu300_event mr_event)
+{
+ int ret;
+ u32 val;
+
+ if (unlikely(irqs_disabled())) {
+ /* TODO: implement polling for this case if need be. */
+ dev_err(&dev->pdev->dev, "irqs are disabled on this "
+ "system!\n");
+ return -EIO;
+ }
+
+ /* Is it already here? */
+ spin_lock_irq(&dev->cmd_issue_lock);
+ dev->cmd_err = STU300_ERROR_NONE;
+ if (stu300_event_occurred(dev, mr_event)) {
+ spin_unlock_irq(&dev->cmd_issue_lock);
+ goto exit_await_check_err;
+ }
+ init_completion(&dev->cmd_complete);
+ dev->cmd_err = STU300_ERROR_NONE;
+ dev->cmd_event = mr_event;
+
+ /* Turn on the I2C interrupt for current operation */
+ val = stu300_r8(dev->virtbase + I2C_CR);
+ val |= I2C_CR_INTERRUPT_ENABLE;
+ stu300_wr8(val, dev->virtbase + I2C_CR);
+
+ /* Twice paranoia (possible HW glitch) */
+ stu300_wr8(val, dev->virtbase + I2C_CR);
+
+ /* Check again: is it already here? */
+ if (unlikely(stu300_event_occurred(dev, mr_event))) {
+ /* Disable IRQ again. */
+ val &= ~I2C_CR_INTERRUPT_ENABLE;
+ stu300_wr8(val, dev->virtbase + I2C_CR);
+ spin_unlock_irq(&dev->cmd_issue_lock);
+ goto exit_await_check_err;
+ }
+
+ /* Unlock the command block and wait for the event to occur */
+ spin_unlock_irq(&dev->cmd_issue_lock);
+ ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
+ STU300_TIMEOUT);
+
+ if (ret < 0) {
+ dev_err(&dev->pdev->dev,
+ "wait_for_completion_interruptible_timeout()"
+ "returned %d waiting for event %04x\n", ret, mr_event);
+ return ret;
+ }
+
+ if (ret == 0) {
+ if (mr_event != STU300_EVENT_6) {
+ dev_err(&dev->pdev->dev, "controller "
+ "timed out waiting for event %d, reinit "
+ "hardware\n", mr_event);
+ (void) stu300_init_hw(dev);
+ }
+ return -ETIMEDOUT;
+ }
+
+ exit_await_check_err:
+ if (dev->cmd_err != STU300_ERROR_NONE) {
+ if (mr_event != STU300_EVENT_6) {
+ dev_err(&dev->pdev->dev, "controller "
+ "error (await_event) %d waiting for event %d, "
+ "reinit hardware\n", dev->cmd_err, mr_event);
+ (void) stu300_init_hw(dev);
+ }
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * Waits for the busy bit to go low by repeated polling.
+ */
+#define BUSY_RELEASE_ATTEMPTS 10
+static int stu300_wait_while_busy(struct stu300_dev *dev)
+{
+ unsigned long timeout;
+ int i;
+
+ for (i = 0; i < BUSY_RELEASE_ATTEMPTS; i++) {
+ timeout = jiffies + STU300_TIMEOUT;
+
+ while (!time_after(jiffies, timeout)) {
+ /* Is not busy? */
+ if ((stu300_r8(dev->virtbase + I2C_SR1) &
+ I2C_SR1_BUSY_IND) == 0)
+ return 0;
+ msleep(1);
+ }
+
+ dev_err(&dev->pdev->dev, "transaction timed out "
+ "waiting for device to be free (not busy). "
+ "Attempt: %d\n", i+1);
+
+ dev_err(&dev->pdev->dev, "base address = "
+ "0x%08x, reinit hardware\n", (u32) dev->virtbase);
+
+ (void) stu300_init_hw(dev);
+ }
+
+ dev_err(&dev->pdev->dev, "giving up after %d attempts "
+ "to reset the bus.\n", BUSY_RELEASE_ATTEMPTS);
+
+ return -ETIMEDOUT;
+}
+
+struct stu300_clkset {
+ unsigned long rate;
+ u32 setting;
+};
+
+static const struct stu300_clkset stu300_clktable[] = {
+ { 0, 0xFFU },
+ { 2500000, I2C_OAR2_FR_25_10MHZ },
+ { 10000000, I2C_OAR2_FR_10_1667MHZ },
+ { 16670000, I2C_OAR2_FR_1667_2667MHZ },
+ { 26670000, I2C_OAR2_FR_2667_40MHZ },
+ { 40000000, I2C_OAR2_FR_40_5333MHZ },
+ { 53330000, I2C_OAR2_FR_5333_66MHZ },
+ { 66000000, I2C_OAR2_FR_66_80MHZ },
+ { 80000000, I2C_OAR2_FR_80_100MHZ },
+ { 100000000, 0xFFU },
+};
+
+static int stu300_set_clk(struct stu300_dev *dev, unsigned long clkrate)
+{
+
+ u32 val;
+ int i = 0;
+
+ /* Locate the apropriate clock setting */
+ while (i < ARRAY_SIZE(stu300_clktable) &&
+ stu300_clktable[i].rate < clkrate)
+ i++;
+
+ if (stu300_clktable[i].setting == 0xFFU) {
+ dev_err(&dev->pdev->dev, "too %s clock rate requested "
+ "(%lu Hz).\n", i ? "high" : "low", clkrate);
+ return -EINVAL;
+ }
+
+ stu300_wr8(stu300_clktable[i].setting,
+ dev->virtbase + I2C_OAR2);
+
+ dev_dbg(&dev->pdev->dev, "Clock rate %lu Hz, I2C bus speed %d Hz "
+ "virtbase %p\n", clkrate, dev->speed, dev->virtbase);
+
+ if (dev->speed > 100000)
+ /* Fast Mode I2C */
+ val = ((clkrate/dev->speed)-9)/3;
+ else
+ /* Standard Mode I2C */
+ val = ((clkrate/dev->speed)-7)/2;
+
+ /* According to spec the divider must be > 2 */
+ if (val < 0x002) {
+ dev_err(&dev->pdev->dev, "too low clock rate (%lu Hz).\n",
+ clkrate);
+ return -EINVAL;
+ }
+
+ /* We have 12 bits clock divider only! */
+ if (val & 0xFFFFF000U) {
+ dev_err(&dev->pdev->dev, "too high clock rate (%lu Hz).\n",
+ clkrate);
+ return -EINVAL;
+ }
+
+ if (dev->speed > 100000) {
+ /* CC6..CC0 */
+ stu300_wr8((val & I2C_CCR_CC_MASK) | I2C_CCR_FMSM,
+ dev->virtbase + I2C_CCR);
+ dev_dbg(&dev->pdev->dev, "set clock divider to 0x%08x, "
+ "Fast Mode I2C\n", val);
+ } else {
+ /* CC6..CC0 */
+ stu300_wr8((val & I2C_CCR_CC_MASK),
+ dev->virtbase + I2C_CCR);
+ dev_dbg(&dev->pdev->dev, "set clock divider to "
+ "0x%08x, Standard Mode I2C\n", val);
+ }
+
+ /* CC11..CC7 */
+ stu300_wr8(((val >> 7) & 0x1F),
+ dev->virtbase + I2C_ECCR);
+
+ return 0;
+}
+
+
+static int stu300_init_hw(struct stu300_dev *dev)
+{
+ u32 dummy;
+ unsigned long clkrate;
+ int ret;
+
+ /* Disable controller */
+ stu300_wr8(0x00, dev->virtbase + I2C_CR);
+ /*
+ * Set own address to some default value (0x00).
+ * We do not support slave mode anyway.
+ */
+ stu300_wr8(0x00, dev->virtbase + I2C_OAR1);
+ /*
+ * The I2C controller only operates properly in 26 MHz but we
+ * program this driver as if we didn't know. This will also set the two
+ * high bits of the own address to zero as well.
+ * There is no known hardware issue with running in 13 MHz
+ * However, speeds over 200 kHz are not used.
+ */
+ clkrate = clk_get_rate(dev->clk);
+ ret = stu300_set_clk(dev, clkrate);
+ if (ret)
+ return ret;
+ /*
+ * Enable block, do it TWICE (hardware glitch)
+ * Setting bit 7 can enable DDC mode. (Not used currently.)
+ */
+ stu300_wr8(I2C_CR_PERIPHERAL_ENABLE,
+ dev->virtbase + I2C_CR);
+ stu300_wr8(I2C_CR_PERIPHERAL_ENABLE,
+ dev->virtbase + I2C_CR);
+ /* Make a dummy read of the status register SR1 & SR2 */
+ dummy = stu300_r8(dev->virtbase + I2C_SR2);
+ dummy = stu300_r8(dev->virtbase + I2C_SR1);
+
+ return 0;
+}
+
+
+
+/* Send slave address. */
+static int stu300_send_address(struct stu300_dev *dev,
+ struct i2c_msg *msg, int resend)
+{
+ u32 val;
+ int ret;
+
+ if (msg->flags & I2C_M_TEN)
+ /* This is probably how 10 bit addresses look */
+ val = (0xf0 | (((u32) msg->addr & 0x300) >> 7)) &
+ I2C_DR_D_MASK;
+ else
+ val = ((msg->addr << 1) & I2C_DR_D_MASK);
+
+ if (msg->flags & I2C_M_RD) {
+ /* This is the direction bit */
+ val |= 0x01;
+ if (resend)
+ dev_dbg(&dev->pdev->dev, "read resend\n");
+ } else if (resend)
+ dev_dbg(&dev->pdev->dev, "write resend\n");
+ stu300_wr8(val, dev->virtbase + I2C_DR);
+
+ /* For 10bit addressing, await 10bit request (EVENT 9) */
+ if (msg->flags & I2C_M_TEN) {
+ ret = stu300_await_event(dev, STU300_EVENT_9);
+ /*
+ * The slave device wants a 10bit address, send the rest
+ * of the bits (the LSBits)
+ */
+ val = msg->addr & I2C_DR_D_MASK;
+ /* This clears "event 9" */
+ stu300_wr8(val, dev->virtbase + I2C_DR);
+ if (ret != 0)
+ return ret;
+ }
+ /* FIXME: Why no else here? two events for 10bit?
+ * Await event 6 (normal) or event 9 (10bit)
+ */
+
+ if (resend)
+ dev_dbg(&dev->pdev->dev, "await event 6\n");
+ ret = stu300_await_event(dev, STU300_EVENT_6);
+
+ /*
+ * Clear any pending EVENT 6 no matter what happend during
+ * await_event.
+ */
+ val = stu300_r8(dev->virtbase + I2C_CR);
+ val |= I2C_CR_PERIPHERAL_ENABLE;
+ stu300_wr8(val, dev->virtbase + I2C_CR);
+
+ return ret;
+}
+
+static int stu300_xfer_msg(struct i2c_adapter *adap,
+ struct i2c_msg *msg, int stop)
+{
+ u32 cr;
+ u32 val;
+ u32 i;
+ int ret;
+ int attempts = 0;
+ struct stu300_dev *dev = i2c_get_adapdata(adap);
+
+
+ clk_enable(dev->clk);
+
+ /* Remove this if (0) to trace each and every message. */
+ if (0) {
+ dev_dbg(&dev->pdev->dev, "I2C message to: 0x%04x, len: %d, "
+ "flags: 0x%04x, stop: %d\n",
+ msg->addr, msg->len, msg->flags, stop);
+ }
+
+ /* Zero-length messages are not supported by this hardware */
+ if (msg->len == 0) {
+ ret = -EINVAL;
+ goto exit_disable;
+ }
+
+ /*
+ * For some reason, sending the address sometimes fails when running
+ * on the 13 MHz clock. No interrupt arrives. This is a work around,
+ * which tries to restart and send the address up to 10 times before
+ * really giving up. Usually 5 to 8 attempts are enough.
+ */
+ do {
+ if (attempts)
+ dev_dbg(&dev->pdev->dev, "wait while busy\n");
+ /* Check that the bus is free, or wait until some timeout */
+ ret = stu300_wait_while_busy(dev);
+ if (ret != 0)
+ goto exit_disable;
+
+ if (attempts)
+ dev_dbg(&dev->pdev->dev, "re-int hw\n");
+ /*
+ * According to ST, there is no problem if the clock is
+ * changed between 13 and 26 MHz during a transfer.
+ */
+ ret = stu300_init_hw(dev);
+ if (ret)
+ goto exit_disable;
+
+ /* Send a start condition */
+ cr = I2C_CR_PERIPHERAL_ENABLE;
+ /* Setting the START bit puts the block in master mode */
+ if (!(msg->flags & I2C_M_NOSTART))
+ cr |= I2C_CR_START_ENABLE;
+ if ((msg->flags & I2C_M_RD) && (msg->len > 1))
+ /* On read more than 1 byte, we need ack. */
+ cr |= I2C_CR_ACK_ENABLE;
+ /* Check that it gets through */
+ if (!(msg->flags & I2C_M_NOSTART)) {
+ if (attempts)
+ dev_dbg(&dev->pdev->dev, "send start event\n");
+ ret = stu300_start_and_await_event(dev, cr,
+ STU300_EVENT_5);
+ }
+
+ if (attempts)
+ dev_dbg(&dev->pdev->dev, "send address\n");
+
+ if (ret == 0)
+ /* Send address */
+ ret = stu300_send_address(dev, msg, attempts != 0);
+
+ if (ret != 0) {
+ attempts++;
+ dev_dbg(&dev->pdev->dev, "failed sending address, "
+ "retrying. Attempt: %d msg_index: %d/%d\n",
+ attempts, dev->msg_index, dev->msg_len);
+ }
+
+ } while (ret != 0 && attempts < NUM_ADDR_RESEND_ATTEMPTS);
+
+ if (attempts < NUM_ADDR_RESEND_ATTEMPTS && attempts > 0) {
+ dev_dbg(&dev->pdev->dev, "managed to get address "
+ "through after %d attempts\n", attempts);
+ } else if (attempts == NUM_ADDR_RESEND_ATTEMPTS) {
+ dev_dbg(&dev->pdev->dev, "I give up, tried %d times "
+ "to resend address.\n",
+ NUM_ADDR_RESEND_ATTEMPTS);
+ goto exit_disable;
+ }
+
+ if (msg->flags & I2C_M_RD) {
+ /* READ: we read the actual bytes one at a time */
+ for (i = 0; i < msg->len; i++) {
+ if (i == msg->len-1) {
+ /*
+ * Disable ACK and set STOP condition before
+ * reading last byte
+ */
+ val = I2C_CR_PERIPHERAL_ENABLE;
+
+ if (stop)
+ val |= I2C_CR_STOP_ENABLE;
+
+ stu300_wr8(val,
+ dev->virtbase + I2C_CR);
+ }
+ /* Wait for this byte... */
+ ret = stu300_await_event(dev, STU300_EVENT_7);
+ if (ret != 0)
+ goto exit_disable;
+ /* This clears event 7 */
+ msg->buf[i] = (u8) stu300_r8(dev->virtbase + I2C_DR);
+ }
+ } else {
+ /* WRITE: we send the actual bytes one at a time */
+ for (i = 0; i < msg->len; i++) {
+ /* Write the byte */
+ stu300_wr8(msg->buf[i],
+ dev->virtbase + I2C_DR);
+ /* Check status */
+ ret = stu300_await_event(dev, STU300_EVENT_8);
+ /* Next write to DR will clear event 8 */
+ if (ret != 0) {
+ dev_err(&dev->pdev->dev, "error awaiting "
+ "event 8 (%d)\n", ret);
+ goto exit_disable;
+ }
+ }
+ /* Check NAK */
+ if (!(msg->flags & I2C_M_IGNORE_NAK)) {
+ if (stu300_r8(dev->virtbase + I2C_SR2) &
+ I2C_SR2_AF_IND) {
+ dev_err(&dev->pdev->dev, "I2C payload "
+ "send returned NAK!\n");
+ ret = -EIO;
+ goto exit_disable;
+ }
+ }
+ if (stop) {
+ /* Send stop condition */
+ val = I2C_CR_PERIPHERAL_ENABLE;
+ val |= I2C_CR_STOP_ENABLE;
+ stu300_wr8(val, dev->virtbase + I2C_CR);
+ }
+ }
+
+ /* Check that the bus is free, or wait until some timeout occurs */
+ ret = stu300_wait_while_busy(dev);
+ if (ret != 0) {
+ dev_err(&dev->pdev->dev, "timout waiting for transfer "
+ "to commence.\n");
+ goto exit_disable;
+ }
+
+ /* Dummy read status registers */
+ val = stu300_r8(dev->virtbase + I2C_SR2);
+ val = stu300_r8(dev->virtbase + I2C_SR1);
+ ret = 0;
+
+ exit_disable:
+ /* Disable controller */
+ stu300_wr8(0x00, dev->virtbase + I2C_CR);
+ clk_disable(dev->clk);
+ return ret;
+}
+
+static int stu300_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num)
+{
+ int ret = -1;
+ int i;
+ struct stu300_dev *dev = i2c_get_adapdata(adap);
+ dev->msg_len = num;
+ for (i = 0; i < num; i++) {
+ /*
+ * Another driver appears to send stop for each message,
+ * here we only do that for the last message. Possibly some
+ * peripherals require this behaviour, then their drivers
+ * have to send single messages in order to get "stop" for
+ * each message.
+ */
+ dev->msg_index = i;
+
+ ret = stu300_xfer_msg(adap, &msgs[i], (i == (num - 1)));
+ if (ret != 0) {
+ num = ret;
+ break;
+ }
+ }
+
+ return num;
+}
+
+static u32 stu300_func(struct i2c_adapter *adap)
+{
+ /* This is the simplest thing you can think of... */
+ return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR;
+}
+
+static const struct i2c_algorithm stu300_algo = {
+ .master_xfer = stu300_xfer,
+ .functionality = stu300_func,
+};
+
+static int __init
+stu300_probe(struct platform_device *pdev)
+{
+ struct stu300_dev *dev;
+ struct i2c_adapter *adap;
+ struct resource *res;
+ int bus_nr;
+ int ret = 0;
+
+ dev = kzalloc(sizeof(struct stu300_dev), GFP_KERNEL);
+ if (!dev) {
+ dev_err(&pdev->dev, "could not allocate device struct\n");
+ ret = -ENOMEM;
+ goto err_no_devmem;
+ }
+
+ bus_nr = pdev->id;
+ dev->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dev->clk)) {
+ ret = PTR_ERR(dev->clk);
+ dev_err(&pdev->dev, "could not retrieve i2c bus clock\n");
+ goto err_no_clk;
+ }
+
+ dev->pdev = pdev;
+ platform_set_drvdata(pdev, dev);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENOENT;
+ goto err_no_resource;
+ }
+
+ dev->phybase = res->start;
+ dev->physize = resource_size(res);
+
+ if (request_mem_region(dev->phybase, dev->physize,
+ NAME " I/O Area") == NULL) {
+ ret = -EBUSY;
+ goto err_no_ioregion;
+ }
+
+ dev->virtbase = ioremap(dev->phybase, dev->physize);
+ dev_dbg(&pdev->dev, "initialize bus device I2C%d on virtual "
+ "base %p\n", bus_nr, dev->virtbase);
+ if (!dev->virtbase) {
+ ret = -ENOMEM;
+ goto err_no_ioremap;
+ }
+
+ dev->irq = platform_get_irq(pdev, 0);
+ if (request_irq(dev->irq, stu300_irh, IRQF_DISABLED,
+ NAME, dev)) {
+ ret = -EIO;
+ goto err_no_irq;
+ }
+
+ dev->speed = scl_frequency;
+
+ clk_enable(dev->clk);
+ ret = stu300_init_hw(dev);
+ clk_disable(dev->clk);
+
+ if (ret != 0) {
+ dev_err(&dev->pdev->dev, "error initializing hardware.\n");
+ goto err_init_hw;
+ }
+
+ /* IRQ event handling initialization */
+ spin_lock_init(&dev->cmd_issue_lock);
+ dev->cmd_event = STU300_EVENT_NONE;
+ dev->cmd_err = STU300_ERROR_NONE;
+
+ adap = &dev->adapter;
+ adap->owner = THIS_MODULE;
+ /* DDC class but actually often used for more generic I2C */
+ adap->class = I2C_CLASS_DDC;
+ strncpy(adap->name, "ST Microelectronics DDC I2C adapter",
+ sizeof(adap->name));
+ adap->nr = bus_nr;
+ adap->algo = &stu300_algo;
+ adap->dev.parent = &pdev->dev;
+ i2c_set_adapdata(adap, dev);
+
+ /* i2c device drivers may be active on return from add_adapter() */
+ ret = i2c_add_numbered_adapter(adap);
+ if (ret) {
+ dev_err(&dev->pdev->dev, "failure adding ST Micro DDC "
+ "I2C adapter\n");
+ goto err_add_adapter;
+ }
+ return 0;
+
+ err_add_adapter:
+ err_init_hw:
+ free_irq(dev->irq, dev);
+ err_no_irq:
+ iounmap(dev->virtbase);
+ err_no_ioremap:
+ release_mem_region(dev->phybase, dev->physize);
+ err_no_ioregion:
+ platform_set_drvdata(pdev, NULL);
+ err_no_resource:
+ clk_put(dev->clk);
+ err_no_clk:
+ kfree(dev);
+ err_no_devmem:
+ dev_err(&pdev->dev, "failed to add " NAME " adapter: %d\n",
+ pdev->id);
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int stu300_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct stu300_dev *dev = platform_get_drvdata(pdev);
+
+ /* Turn off everything */
+ stu300_wr8(0x00, dev->virtbase + I2C_CR);
+ return 0;
+}
+
+static int stu300_resume(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct stu300_dev *dev = platform_get_drvdata(pdev);
+
+ clk_enable(dev->clk);
+ ret = stu300_init_hw(dev);
+ clk_disable(dev->clk);
+
+ if (ret != 0)
+ dev_err(&pdev->dev, "error re-initializing hardware.\n");
+ return ret;
+}
+#else
+#define stu300_suspend NULL
+#define stu300_resume NULL
+#endif
+
+static int __exit
+stu300_remove(struct platform_device *pdev)
+{
+ struct stu300_dev *dev = platform_get_drvdata(pdev);
+
+ i2c_del_adapter(&dev->adapter);
+ /* Turn off everything */
+ stu300_wr8(0x00, dev->virtbase + I2C_CR);
+ free_irq(dev->irq, dev);
+ iounmap(dev->virtbase);
+ release_mem_region(dev->phybase, dev->physize);
+ clk_put(dev->clk);
+ platform_set_drvdata(pdev, NULL);
+ kfree(dev);
+ return 0;
+}
+
+static struct platform_driver stu300_i2c_driver = {
+ .driver = {
+ .name = NAME,
+ .owner = THIS_MODULE,
+ },
+ .remove = __exit_p(stu300_remove),
+ .suspend = stu300_suspend,
+ .resume = stu300_resume,
+
+};
+
+static int __init stu300_init(void)
+{
+ return platform_driver_probe(&stu300_i2c_driver, stu300_probe);
+}
+
+static void __exit stu300_exit(void)
+{
+ platform_driver_unregister(&stu300_i2c_driver);
+}
+
+/*
+ * The systems using this bus often have very basic devices such
+ * as regulators on the I2C bus, so this needs to be loaded early.
+ * Therefore it is registered in the subsys_initcall().
+ */
+subsys_initcall(stu300_init);
+module_exit(stu300_exit);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
+MODULE_DESCRIPTION("ST Micro DDC I2C adapter (" NAME ")");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" NAME);
diff --git a/drivers/i2c/busses/i2c-versatile.c b/drivers/i2c/busses/i2c-versatile.c
index fede619ba22..70de8216346 100644
--- a/drivers/i2c/busses/i2c-versatile.c
+++ b/drivers/i2c/busses/i2c-versatile.c
@@ -76,7 +76,7 @@ static int i2c_versatile_probe(struct platform_device *dev)
goto err_out;
}
- if (!request_mem_region(r->start, r->end - r->start + 1, "versatile-i2c")) {
+ if (!request_mem_region(r->start, resource_size(r), "versatile-i2c")) {
ret = -EBUSY;
goto err_out;
}
@@ -87,7 +87,7 @@ static int i2c_versatile_probe(struct platform_device *dev)
goto err_release;
}
- i2c->base = ioremap(r->start, r->end - r->start + 1);
+ i2c->base = ioremap(r->start, resource_size(r));
if (!i2c->base) {
ret = -ENOMEM;
goto err_free;
@@ -118,7 +118,7 @@ static int i2c_versatile_probe(struct platform_device *dev)
err_free:
kfree(i2c);
err_release:
- release_mem_region(r->start, r->end - r->start + 1);
+ release_mem_region(r->start, resource_size(r));
err_out:
return ret;
}
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index 02e6f724b05..54d810a4d00 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -37,6 +37,7 @@
VT8251 0x3287 yes
CX700 0x8324 yes
VX800/VX820 0x8353 yes
+ VX855/VX875 0x8409 yes
Note: we assume there can only be one device, with one SMBus interface.
*/
@@ -404,6 +405,7 @@ found:
switch (pdev->device) {
case PCI_DEVICE_ID_VIA_CX700:
case PCI_DEVICE_ID_VIA_VX800:
+ case PCI_DEVICE_ID_VIA_VX855:
case PCI_DEVICE_ID_VIA_8251:
case PCI_DEVICE_ID_VIA_8237:
case PCI_DEVICE_ID_VIA_8237A:
@@ -469,6 +471,8 @@ static struct pci_device_id vt596_ids[] = {
.driver_data = SMBBA3 },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX800),
.driver_data = SMBBA3 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX855),
+ .driver_data = SMBBA3 },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-voodoo3.c b/drivers/i2c/busses/i2c-voodoo3.c
index 1a474acc0dd..7663d57833a 100644
--- a/drivers/i2c/busses/i2c-voodoo3.c
+++ b/drivers/i2c/busses/i2c-voodoo3.c
@@ -163,7 +163,6 @@ static struct i2c_algo_bit_data voo_i2c_bit_data = {
static struct i2c_adapter voodoo3_i2c_adapter = {
.owner = THIS_MODULE,
- .class = I2C_CLASS_TV_ANALOG,
.name = "I2C Voodoo3/Banshee adapter",
.algo_data = &voo_i2c_bit_data,
};
diff --git a/drivers/i2c/chips/Kconfig b/drivers/i2c/chips/Kconfig
index 8f8c81eb0ae..02d746c9c47 100644
--- a/drivers/i2c/chips/Kconfig
+++ b/drivers/i2c/chips/Kconfig
@@ -64,21 +64,6 @@ config SENSORS_PCA9539
This driver is deprecated and will be dropped soon. Use
drivers/gpio/pca953x.c instead.
-config SENSORS_MAX6875
- tristate "Maxim MAX6875 Power supply supervisor"
- depends on EXPERIMENTAL
- help
- If you say yes here you get support for the Maxim MAX6875
- EEPROM-programmable, quad power-supply sequencer/supervisor.
-
- This provides an interface to program the EEPROM and reset the chip.
-
- This driver also supports the Maxim MAX6874 hex power-supply
- sequencer/supervisor if found at a compatible address.
-
- This driver can also be built as a module. If so, the module
- will be called max6875.
-
config SENSORS_TSL2550
tristate "Taos TSL2550 ambient light sensor"
depends on EXPERIMENTAL
diff --git a/drivers/i2c/chips/Makefile b/drivers/i2c/chips/Makefile
index 55a37603718..f4680d16ee3 100644
--- a/drivers/i2c/chips/Makefile
+++ b/drivers/i2c/chips/Makefile
@@ -11,7 +11,6 @@
#
obj-$(CONFIG_DS1682) += ds1682.o
-obj-$(CONFIG_SENSORS_MAX6875) += max6875.o
obj-$(CONFIG_SENSORS_PCA9539) += pca9539.o
obj-$(CONFIG_SENSORS_PCF8574) += pcf8574.o
obj-$(CONFIG_PCF8575) += pcf8575.o
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 85e2e919d1c..5ed622ee65c 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -29,7 +29,6 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/idr.h>
-#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/completion.h>
#include <linux/hardirq.h>
@@ -451,16 +450,6 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
mutex_lock(&core_lock);
- /* Add the adapter to the driver core.
- * If the parent pointer is not set up,
- * we add this adapter to the host bus.
- */
- if (adap->dev.parent == NULL) {
- adap->dev.parent = &platform_bus;
- pr_debug("I2C adapter driver [%s] forgot to specify "
- "physical device\n", adap->name);
- }
-
/* Set default timeout to 1 second if not already set */
if (adap->timeout == 0)
adap->timeout = HZ;
@@ -1022,7 +1011,8 @@ module_exit(i2c_exit);
*/
int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
- int ret;
+ unsigned long orig_jiffies;
+ int ret, try;
/* REVISIT the fault reporting model here is weak:
*
@@ -1060,7 +1050,15 @@ int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
mutex_lock_nested(&adap->bus_lock, adap->level);
}
- ret = adap->algo->master_xfer(adap,msgs,num);
+ /* Retry automatically on arbitration loss */
+ orig_jiffies = jiffies;
+ for (ret = 0, try = 0; try <= adap->retries; try++) {
+ ret = adap->algo->master_xfer(adap, msgs, num);
+ if (ret != -EAGAIN)
+ break;
+ if (time_after(jiffies, orig_jiffies + adap->timeout))
+ break;
+ }
mutex_unlock(&adap->bus_lock);
return ret;
@@ -1509,7 +1507,7 @@ struct i2c_adapter* i2c_get_adapter(int id)
struct i2c_adapter *adapter;
mutex_lock(&core_lock);
- adapter = (struct i2c_adapter *)idr_find(&i2c_adapter_idr, id);
+ adapter = idr_find(&i2c_adapter_idr, id);
if (adapter && !try_module_get(adapter->owner))
adapter = NULL;
@@ -1995,14 +1993,27 @@ s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, unsigned short flags,
char read_write, u8 command, int protocol,
union i2c_smbus_data *data)
{
+ unsigned long orig_jiffies;
+ int try;
s32 res;
flags &= I2C_M_TEN | I2C_CLIENT_PEC;
if (adapter->algo->smbus_xfer) {
mutex_lock(&adapter->bus_lock);
- res = adapter->algo->smbus_xfer(adapter,addr,flags,read_write,
- command, protocol, data);
+
+ /* Retry automatically on arbitration loss */
+ orig_jiffies = jiffies;
+ for (res = 0, try = 0; try <= adapter->retries; try++) {
+ res = adapter->algo->smbus_xfer(adapter, addr, flags,
+ read_write, command,
+ protocol, data);
+ if (res != -EAGAIN)
+ break;
+ if (time_after(jiffies,
+ orig_jiffies + adapter->timeout))
+ break;
+ }
mutex_unlock(&adapter->bus_lock);
} else
res = i2c_smbus_xfer_emulated(adapter,addr,flags,read_write,
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index cf06494bb74..9a5d0aaac9d 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -46,7 +46,7 @@ menuconfig IDE
SMART parameters from disk drives.
To compile this driver as a module, choose M here: the
- module will be called ide-core.ko.
+ module will be called ide-core.
For further information, please read <file:Documentation/ide/ide.txt>.
diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
index 537da1cde16..e59b6dee9ae 100644
--- a/drivers/ide/alim15x3.c
+++ b/drivers/ide/alim15x3.c
@@ -402,27 +402,23 @@ static u8 ali_cable_detect(ide_hwif_t *hwif)
return cbl;
}
-#if !defined(CONFIG_SPARC64) && !defined(CONFIG_PPC)
+#ifndef CONFIG_SPARC64
/**
* init_hwif_ali15x3 - Initialize the ALI IDE x86 stuff
* @hwif: interface to configure
*
* Obtain the IRQ tables for an ALi based IDE solution on the PC
* class platforms. This part of the code isn't applicable to the
- * Sparc and PowerPC systems.
+ * Sparc systems.
*/
static void __devinit init_hwif_ali15x3 (ide_hwif_t *hwif)
{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
u8 ideic, inmir;
s8 irq_routing_table[] = { -1, 9, 3, 10, 4, 5, 7, 6,
1, 11, 0, 12, 0, 14, 0, 15 };
int irq = -1;
- if (dev->device == PCI_DEVICE_ID_AL_M5229)
- hwif->irq = hwif->channel ? 15 : 14;
-
if (isa_dev) {
/*
* read IDE interface control
@@ -455,7 +451,7 @@ static void __devinit init_hwif_ali15x3 (ide_hwif_t *hwif)
}
#else
#define init_hwif_ali15x3 NULL
-#endif /* !defined(CONFIG_SPARC64) && !defined(CONFIG_PPC) */
+#endif /* CONFIG_SPARC64 */
/**
* init_dma_ali15x3 - set up DMA on ALi15x3
diff --git a/drivers/ide/at91_ide.c b/drivers/ide/at91_ide.c
index 403d0e4265d..fc0949a8cfd 100644
--- a/drivers/ide/at91_ide.c
+++ b/drivers/ide/at91_ide.c
@@ -216,6 +216,7 @@ static const struct ide_port_info at91_ide_port_info __initdata = {
.host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA | IDE_HFLAG_SINGLE |
IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_UNMASK_IRQS,
.pio_mask = ATA_PIO6,
+ .chipset = ide_generic,
};
/*
@@ -246,8 +247,7 @@ irqreturn_t at91_irq_handler(int irq, void *dev_id)
static int __init at91_ide_probe(struct platform_device *pdev)
{
int ret;
- hw_regs_t hw;
- hw_regs_t *hws[] = { &hw, NULL, NULL, NULL };
+ struct ide_hw hw, *hws[] = { &hw };
struct ide_host *host;
struct resource *res;
unsigned long tf_base = 0, ctl_base = 0;
@@ -304,10 +304,9 @@ static int __init at91_ide_probe(struct platform_device *pdev)
ide_std_init_ports(&hw, tf_base, ctl_base + 6);
hw.irq = board->irq_pin;
- hw.chipset = ide_generic;
hw.dev = &pdev->dev;
- host = ide_host_alloc(&at91_ide_port_info, hws);
+ host = ide_host_alloc(&at91_ide_port_info, hws, 1);
if (!host) {
perr("failed to allocate ide host\n");
return -ENOMEM;
diff --git a/drivers/ide/au1xxx-ide.c b/drivers/ide/au1xxx-ide.c
index 46013644c96..58121bd6c11 100644
--- a/drivers/ide/au1xxx-ide.c
+++ b/drivers/ide/au1xxx-ide.c
@@ -449,7 +449,7 @@ static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
}
#endif
-static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif)
+static void auide_setup_ports(struct ide_hw *hw, _auide_hwif *ahwif)
{
int i;
unsigned long *ata_regs = hw->io_ports_array;
@@ -499,6 +499,7 @@ static const struct ide_port_info au1xxx_port_info = {
#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
.mwdma_mask = ATA_MWDMA2,
#endif
+ .chipset = ide_au1xxx,
};
static int au_ide_probe(struct platform_device *dev)
@@ -507,7 +508,7 @@ static int au_ide_probe(struct platform_device *dev)
struct resource *res;
struct ide_host *host;
int ret = 0;
- hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
+ struct ide_hw hw, *hws[] = { &hw };
#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
char *mode = "MWDMA2";
@@ -548,9 +549,8 @@ static int au_ide_probe(struct platform_device *dev)
auide_setup_ports(&hw, ahwif);
hw.irq = ahwif->irq;
hw.dev = &dev->dev;
- hw.chipset = ide_au1xxx;
- ret = ide_host_add(&au1xxx_port_info, hws, &host);
+ ret = ide_host_add(&au1xxx_port_info, hws, 1, &host);
if (ret)
goto out;
diff --git a/drivers/ide/buddha.c b/drivers/ide/buddha.c
index d028f8864bc..e3c6a591330 100644
--- a/drivers/ide/buddha.c
+++ b/drivers/ide/buddha.c
@@ -121,7 +121,7 @@ static int xsurf_ack_intr(ide_hwif_t *hwif)
return 1;
}
-static void __init buddha_setup_ports(hw_regs_t *hw, unsigned long base,
+static void __init buddha_setup_ports(struct ide_hw *hw, unsigned long base,
unsigned long ctl, unsigned long irq_port,
ide_ack_intr_t *ack_intr)
{
@@ -139,13 +139,12 @@ static void __init buddha_setup_ports(hw_regs_t *hw, unsigned long base,
hw->irq = IRQ_AMIGA_PORTS;
hw->ack_intr = ack_intr;
-
- hw->chipset = ide_generic;
}
static const struct ide_port_info buddha_port_info = {
.host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA,
.irq_flags = IRQF_SHARED,
+ .chipset = ide_generic,
};
/*
@@ -161,7 +160,7 @@ static int __init buddha_init(void)
while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
unsigned long board;
- hw_regs_t hw[MAX_NUM_HWIFS], *hws[] = { NULL, NULL, NULL, NULL };
+ struct ide_hw hw[MAX_NUM_HWIFS], *hws[MAX_NUM_HWIFS];
if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA) {
buddha_num_hwifs = BUDDHA_NUM_HWIFS;
@@ -225,7 +224,7 @@ fail_base2:
hws[i] = &hw[i];
}
- ide_host_add(&buddha_port_info, hws, NULL);
+ ide_host_add(&buddha_port_info, hws, i, NULL);
}
return 0;
diff --git a/drivers/ide/cmd640.c b/drivers/ide/cmd640.c
index 8890276fef7..1683ed5c732 100644
--- a/drivers/ide/cmd640.c
+++ b/drivers/ide/cmd640.c
@@ -708,7 +708,7 @@ static int __init cmd640x_init(void)
int second_port_cmd640 = 0, rc;
const char *bus_type, *port2;
u8 b, cfr;
- hw_regs_t hw[2], *hws[] = { NULL, NULL, NULL, NULL };
+ struct ide_hw hw[2], *hws[2];
if (cmd640_vlb && probe_for_cmd640_vlb()) {
bus_type = "VLB";
@@ -762,11 +762,9 @@ static int __init cmd640x_init(void)
ide_std_init_ports(&hw[0], 0x1f0, 0x3f6);
hw[0].irq = 14;
- hw[0].chipset = ide_cmd640;
ide_std_init_ports(&hw[1], 0x170, 0x376);
hw[1].irq = 15;
- hw[1].chipset = ide_cmd640;
printk(KERN_INFO "cmd640: buggy cmd640%c interface on %s, config=0x%02x"
"\n", 'a' + cmd640_chip_version - 1, bus_type, cfr);
@@ -824,7 +822,8 @@ static int __init cmd640x_init(void)
cmd640_dump_regs();
#endif
- return ide_host_add(&cmd640_port_info, hws, NULL);
+ return ide_host_add(&cmd640_port_info, hws, second_port_cmd640 ? 2 : 1,
+ NULL);
}
module_param_named(probe_vlb, cmd640_vlb, bool, 0);
diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
index 87987a7d36c..bd066bb9d61 100644
--- a/drivers/ide/cs5520.c
+++ b/drivers/ide/cs5520.c
@@ -110,7 +110,7 @@ static const struct ide_port_info cyrix_chipset __devinitdata = {
static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
const struct ide_port_info *d = &cyrix_chipset;
- hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL };
+ struct ide_hw hw[2], *hws[] = { NULL, NULL };
ide_setup_pci_noise(dev, d);
@@ -136,7 +136,7 @@ static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_devic
ide_pci_setup_ports(dev, d, &hw[0], &hws[0]);
hw[0].irq = 14;
- return ide_host_add(d, hws, NULL);
+ return ide_host_add(d, hws, 2, NULL);
}
static const struct pci_device_id cs5520_pci_tbl[] = {
diff --git a/drivers/ide/delkin_cb.c b/drivers/ide/delkin_cb.c
index f153b95619b..1e10eba62ce 100644
--- a/drivers/ide/delkin_cb.c
+++ b/drivers/ide/delkin_cb.c
@@ -68,6 +68,7 @@ static const struct ide_port_info delkin_cb_port_info = {
IDE_HFLAG_NO_DMA,
.irq_flags = IRQF_SHARED,
.init_chipset = delkin_cb_init_chipset,
+ .chipset = ide_pci,
};
static int __devinit
@@ -76,7 +77,7 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
struct ide_host *host;
unsigned long base;
int rc;
- hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
+ struct ide_hw hw, *hws[] = { &hw };
rc = pci_enable_device(dev);
if (rc) {
@@ -97,9 +98,8 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
ide_std_init_ports(&hw, base + 0x10, base + 0x1e);
hw.irq = dev->irq;
hw.dev = &dev->dev;
- hw.chipset = ide_pci; /* this enables IRQ sharing */
- rc = ide_host_add(&delkin_cb_port_info, hws, &host);
+ rc = ide_host_add(&delkin_cb_port_info, hws, 1, &host);
if (rc)
goto out_disable;
diff --git a/drivers/ide/falconide.c b/drivers/ide/falconide.c
index 0e2df6755ec..22fa27389c3 100644
--- a/drivers/ide/falconide.c
+++ b/drivers/ide/falconide.c
@@ -111,9 +111,10 @@ static const struct ide_port_info falconide_port_info = {
.host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_SERIALIZE |
IDE_HFLAG_NO_DMA,
.irq_flags = IRQF_SHARED,
+ .chipset = ide_generic,
};
-static void __init falconide_setup_ports(hw_regs_t *hw)
+static void __init falconide_setup_ports(struct ide_hw *hw)
{
int i;
@@ -128,8 +129,6 @@ static void __init falconide_setup_ports(hw_regs_t *hw)
hw->irq = IRQ_MFP_IDE;
hw->ack_intr = NULL;
-
- hw->chipset = ide_generic;
}
/*
@@ -139,7 +138,7 @@ static void __init falconide_setup_ports(hw_regs_t *hw)
static int __init falconide_init(void)
{
struct ide_host *host;
- hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
+ struct ide_hw hw, *hws[] = { &hw };
int rc;
if (!MACH_IS_ATARI || !ATARIHW_PRESENT(IDE))
@@ -154,7 +153,7 @@ static int __init falconide_init(void)
falconide_setup_ports(&hw);
- host = ide_host_alloc(&falconide_port_info, hws);
+ host = ide_host_alloc(&falconide_port_info, hws, 1);
if (host == NULL) {
rc = -ENOMEM;
goto err;
diff --git a/drivers/ide/gayle.c b/drivers/ide/gayle.c
index c7119516c5a..4451a6a5dfe 100644
--- a/drivers/ide/gayle.c
+++ b/drivers/ide/gayle.c
@@ -88,7 +88,7 @@ static int gayle_ack_intr_a1200(ide_hwif_t *hwif)
return 1;
}
-static void __init gayle_setup_ports(hw_regs_t *hw, unsigned long base,
+static void __init gayle_setup_ports(struct ide_hw *hw, unsigned long base,
unsigned long ctl, unsigned long irq_port,
ide_ack_intr_t *ack_intr)
{
@@ -106,14 +106,13 @@ static void __init gayle_setup_ports(hw_regs_t *hw, unsigned long base,
hw->irq = IRQ_AMIGA_PORTS;
hw->ack_intr = ack_intr;
-
- hw->chipset = ide_generic;
}
static const struct ide_port_info gayle_port_info = {
.host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_SERIALIZE |
IDE_HFLAG_NO_DMA,
.irq_flags = IRQF_SHARED,
+ .chipset = ide_generic,
};
/*
@@ -126,7 +125,7 @@ static int __init gayle_init(void)
unsigned long base, ctrlport, irqport;
ide_ack_intr_t *ack_intr;
int a4000, i, rc;
- hw_regs_t hw[GAYLE_NUM_HWIFS], *hws[] = { NULL, NULL, NULL, NULL };
+ struct ide_hw hw[GAYLE_NUM_HWIFS], *hws[GAYLE_NUM_HWIFS];
if (!MACH_IS_AMIGA)
return -ENODEV;
@@ -171,7 +170,7 @@ found:
hws[i] = &hw[i];
}
- rc = ide_host_add(&gayle_port_info, hws, NULL);
+ rc = ide_host_add(&gayle_port_info, hws, i, NULL);
if (rc)
release_mem_region(res_start, res_n);
diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
index 0feb66c720e..7ce68ef6b90 100644
--- a/drivers/ide/hpt366.c
+++ b/drivers/ide/hpt366.c
@@ -138,14 +138,6 @@
#undef HPT_RESET_STATE_ENGINE
#undef HPT_DELAY_INTERRUPT
-static const char *quirk_drives[] = {
- "QUANTUM FIREBALLlct08 08",
- "QUANTUM FIREBALLP KA6.4",
- "QUANTUM FIREBALLP LM20.4",
- "QUANTUM FIREBALLP LM20.5",
- NULL
-};
-
static const char *bad_ata100_5[] = {
"IBM-DTLA-307075",
"IBM-DTLA-307060",
@@ -729,27 +721,13 @@ static void hpt3xx_set_pio_mode(ide_drive_t *drive, const u8 pio)
hpt3xx_set_mode(drive, XFER_PIO_0 + pio);
}
-static void hpt3xx_quirkproc(ide_drive_t *drive)
-{
- char *m = (char *)&drive->id[ATA_ID_PROD];
- const char **list = quirk_drives;
-
- while (*list)
- if (strstr(m, *list++)) {
- drive->quirk_list = 1;
- return;
- }
-
- drive->quirk_list = 0;
-}
-
static void hpt3xx_maskproc(ide_drive_t *drive, int mask)
{
ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
struct hpt_info *info = hpt3xx_get_info(hwif->dev);
- if (drive->quirk_list == 0)
+ if ((drive->dev_flags & IDE_DFLAG_NIEN_QUIRK) == 0)
return;
if (info->chip_type >= HPT370) {
@@ -1404,7 +1382,6 @@ static int __devinit hpt36x_init(struct pci_dev *dev, struct pci_dev *dev2)
static const struct ide_port_ops hpt3xx_port_ops = {
.set_pio_mode = hpt3xx_set_pio_mode,
.set_dma_mode = hpt3xx_set_mode,
- .quirkproc = hpt3xx_quirkproc,
.maskproc = hpt3xx_maskproc,
.mdma_filter = hpt3xx_mdma_filter,
.udma_filter = hpt3xx_udma_filter,
diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c
index 4e16ce68b06..5af3d0ffaf0 100644
--- a/drivers/ide/icside.c
+++ b/drivers/ide/icside.c
@@ -65,8 +65,6 @@ static struct cardinfo icside_cardinfo_v6_2 = {
};
struct icside_state {
- unsigned int channel;
- unsigned int enabled;
void __iomem *irq_port;
void __iomem *ioc_base;
unsigned int sel;
@@ -116,18 +114,11 @@ static void icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
struct icside_state *state = ec->irq_data;
void __iomem *base = state->irq_port;
- state->enabled = 1;
+ writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1);
+ readb(base + ICS_ARCIN_V6_INTROFFSET_2);
- switch (state->channel) {
- case 0:
- writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1);
- readb(base + ICS_ARCIN_V6_INTROFFSET_2);
- break;
- case 1:
- writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2);
- readb(base + ICS_ARCIN_V6_INTROFFSET_1);
- break;
- }
+ writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2);
+ readb(base + ICS_ARCIN_V6_INTROFFSET_1);
}
/* Prototype: icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
@@ -137,8 +128,6 @@ static void icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
{
struct icside_state *state = ec->irq_data;
- state->enabled = 0;
-
readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
}
@@ -160,44 +149,6 @@ static const expansioncard_ops_t icside_ops_arcin_v6 = {
.irqpending = icside_irqpending_arcin_v6,
};
-/*
- * Handle routing of interrupts. This is called before
- * we write the command to the drive.
- */
-static void icside_maskproc(ide_drive_t *drive, int mask)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct expansion_card *ec = ECARD_DEV(hwif->dev);
- struct icside_state *state = ecard_get_drvdata(ec);
- unsigned long flags;
-
- local_irq_save(flags);
-
- state->channel = hwif->channel;
-
- if (state->enabled && !mask) {
- switch (hwif->channel) {
- case 0:
- writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
- readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
- break;
- case 1:
- writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
- readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
- break;
- }
- } else {
- readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
- readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
- }
-
- local_irq_restore(flags);
-}
-
-static const struct ide_port_ops icside_v6_no_dma_port_ops = {
- .maskproc = icside_maskproc,
-};
-
#ifdef CONFIG_BLK_DEV_IDEDMA_ICS
/*
* SG-DMA support.
@@ -275,7 +226,6 @@ static void icside_set_dma_mode(ide_drive_t *drive, const u8 xfer_mode)
static const struct ide_port_ops icside_v6_port_ops = {
.set_dma_mode = icside_set_dma_mode,
- .maskproc = icside_maskproc,
};
static void icside_dma_host_set(ide_drive_t *drive, int on)
@@ -320,11 +270,6 @@ static int icside_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
BUG_ON(dma_channel_active(ec->dma));
/*
- * Ensure that we have the right interrupt routed.
- */
- icside_maskproc(drive, 0);
-
- /*
* Route the DMA signals to the correct interface.
*/
writeb(state->sel | hwif->channel, state->ioc_base);
@@ -381,7 +326,7 @@ static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d)
return -EOPNOTSUPP;
}
-static void icside_setup_ports(hw_regs_t *hw, void __iomem *base,
+static void icside_setup_ports(struct ide_hw *hw, void __iomem *base,
struct cardinfo *info, struct expansion_card *ec)
{
unsigned long port = (unsigned long)base + info->dataoffset;
@@ -398,11 +343,11 @@ static void icside_setup_ports(hw_regs_t *hw, void __iomem *base,
hw->irq = ec->irq;
hw->dev = &ec->dev;
- hw->chipset = ide_acorn;
}
static const struct ide_port_info icside_v5_port_info = {
.host_flags = IDE_HFLAG_NO_DMA,
+ .chipset = ide_acorn,
};
static int __devinit
@@ -410,7 +355,7 @@ icside_register_v5(struct icside_state *state, struct expansion_card *ec)
{
void __iomem *base;
struct ide_host *host;
- hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
+ struct ide_hw hw, *hws[] = { &hw };
int ret;
base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0);
@@ -431,7 +376,7 @@ icside_register_v5(struct icside_state *state, struct expansion_card *ec)
icside_setup_ports(&hw, base, &icside_cardinfo_v5, ec);
- host = ide_host_alloc(&icside_v5_port_info, hws);
+ host = ide_host_alloc(&icside_v5_port_info, hws, 1);
if (host == NULL)
return -ENODEV;
@@ -452,11 +397,11 @@ err_free:
static const struct ide_port_info icside_v6_port_info __initdata = {
.init_dma = icside_dma_off_init,
- .port_ops = &icside_v6_no_dma_port_ops,
.dma_ops = &icside_v6_dma_ops,
.host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO,
.mwdma_mask = ATA_MWDMA2,
.swdma_mask = ATA_SWDMA2,
+ .chipset = ide_acorn,
};
static int __devinit
@@ -466,7 +411,7 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
struct ide_host *host;
unsigned int sel = 0;
int ret;
- hw_regs_t hw[2], *hws[] = { &hw[0], NULL, NULL, NULL };
+ struct ide_hw hw[2], *hws[] = { &hw[0], &hw[1] };
struct ide_port_info d = icside_v6_port_info;
ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
@@ -506,7 +451,7 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
icside_setup_ports(&hw[0], easi_base, &icside_cardinfo_v6_1, ec);
icside_setup_ports(&hw[1], easi_base, &icside_cardinfo_v6_2, ec);
- host = ide_host_alloc(&d, hws);
+ host = ide_host_alloc(&d, hws, 2);
if (host == NULL)
return -ENODEV;
diff --git a/drivers/ide/ide-4drives.c b/drivers/ide/ide-4drives.c
index 78aca75a2c4..979d342c338 100644
--- a/drivers/ide/ide-4drives.c
+++ b/drivers/ide/ide-4drives.c
@@ -25,12 +25,13 @@ static const struct ide_port_info ide_4drives_port_info = {
.port_ops = &ide_4drives_port_ops,
.host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_NO_DMA |
IDE_HFLAG_4DRIVES,
+ .chipset = ide_4drives,
};
static int __init ide_4drives_init(void)
{
unsigned long base = 0x1f0, ctl = 0x3f6;
- hw_regs_t hw, *hws[] = { &hw, &hw, NULL, NULL };
+ struct ide_hw hw, *hws[] = { &hw, &hw };
if (probe_4drives == 0)
return -ENODEV;
@@ -52,9 +53,8 @@ static int __init ide_4drives_init(void)
ide_std_init_ports(&hw, base, ctl);
hw.irq = 14;
- hw.chipset = ide_4drives;
- return ide_host_add(&ide_4drives_port_info, hws, NULL);
+ return ide_host_add(&ide_4drives_port_info, hws, 2, NULL);
}
module_init(ide_4drives_init);
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 7201b176d75..702ef64a0f1 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -80,34 +80,6 @@ void ide_init_pc(struct ide_atapi_pc *pc)
EXPORT_SYMBOL_GPL(ide_init_pc);
/*
- * Generate a new packet command request in front of the request queue, before
- * the current request, so that it will be processed immediately, on the next
- * pass through the driver.
- */
-static void ide_queue_pc_head(ide_drive_t *drive, struct gendisk *disk,
- struct ide_atapi_pc *pc, struct request *rq)
-{
- blk_rq_init(NULL, rq);
- rq->cmd_type = REQ_TYPE_SPECIAL;
- rq->cmd_flags |= REQ_PREEMPT;
- rq->buffer = (char *)pc;
- rq->rq_disk = disk;
-
- if (pc->req_xfer) {
- rq->data = pc->buf;
- rq->data_len = pc->req_xfer;
- }
-
- memcpy(rq->cmd, pc->c, 12);
- if (drive->media == ide_tape)
- rq->cmd[13] = REQ_IDETAPE_PC1;
-
- drive->hwif->rq = NULL;
-
- elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 0);
-}
-
-/*
* Add a special packet command request to the tail of the request queue,
* and wait for it to be serviced.
*/
@@ -119,19 +91,21 @@ int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk,
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
rq->cmd_type = REQ_TYPE_SPECIAL;
- rq->buffer = (char *)pc;
+ rq->special = (char *)pc;
if (pc->req_xfer) {
- rq->data = pc->buf;
- rq->data_len = pc->req_xfer;
+ error = blk_rq_map_kern(drive->queue, rq, pc->buf, pc->req_xfer,
+ GFP_NOIO);
+ if (error)
+ goto put_req;
}
memcpy(rq->cmd, pc->c, 12);
if (drive->media == ide_tape)
rq->cmd[13] = REQ_IDETAPE_PC1;
error = blk_execute_rq(drive->queue, disk, rq, 0);
+put_req:
blk_put_request(rq);
-
return error;
}
EXPORT_SYMBOL_GPL(ide_queue_pc_tail);
@@ -191,20 +165,113 @@ void ide_create_request_sense_cmd(ide_drive_t *drive, struct ide_atapi_pc *pc)
}
EXPORT_SYMBOL_GPL(ide_create_request_sense_cmd);
+void ide_prep_sense(ide_drive_t *drive, struct request *rq)
+{
+ struct request_sense *sense = &drive->sense_data;
+ struct request *sense_rq = &drive->sense_rq;
+ unsigned int cmd_len, sense_len;
+ int err;
+
+ debug_log("%s: enter\n", __func__);
+
+ switch (drive->media) {
+ case ide_floppy:
+ cmd_len = 255;
+ sense_len = 18;
+ break;
+ case ide_tape:
+ cmd_len = 20;
+ sense_len = 20;
+ break;
+ default:
+ cmd_len = 18;
+ sense_len = 18;
+ }
+
+ BUG_ON(sense_len > sizeof(*sense));
+
+ if (blk_sense_request(rq) || drive->sense_rq_armed)
+ return;
+
+ memset(sense, 0, sizeof(*sense));
+
+ blk_rq_init(rq->q, sense_rq);
+
+ err = blk_rq_map_kern(drive->queue, sense_rq, sense, sense_len,
+ GFP_NOIO);
+ if (unlikely(err)) {
+ if (printk_ratelimit())
+ printk(KERN_WARNING "%s: failed to map sense buffer\n",
+ drive->name);
+ return;
+ }
+
+ sense_rq->rq_disk = rq->rq_disk;
+ sense_rq->cmd[0] = GPCMD_REQUEST_SENSE;
+ sense_rq->cmd[4] = cmd_len;
+ sense_rq->cmd_type = REQ_TYPE_SENSE;
+ sense_rq->cmd_flags |= REQ_PREEMPT;
+
+ if (drive->media == ide_tape)
+ sense_rq->cmd[13] = REQ_IDETAPE_PC1;
+
+ drive->sense_rq_armed = true;
+}
+EXPORT_SYMBOL_GPL(ide_prep_sense);
+
+int ide_queue_sense_rq(ide_drive_t *drive, void *special)
+{
+ /* deferred failure from ide_prep_sense() */
+ if (!drive->sense_rq_armed) {
+ printk(KERN_WARNING "%s: failed queue sense request\n",
+ drive->name);
+ return -ENOMEM;
+ }
+
+ drive->sense_rq.special = special;
+ drive->sense_rq_armed = false;
+
+ drive->hwif->rq = NULL;
+
+ elv_add_request(drive->queue, &drive->sense_rq,
+ ELEVATOR_INSERT_FRONT, 0);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
+
/*
* Called when an error was detected during the last packet command.
- * We queue a request sense packet command in the head of the request list.
+ * We queue a request sense packet command at the head of the request
+ * queue.
*/
-void ide_retry_pc(ide_drive_t *drive, struct gendisk *disk)
+void ide_retry_pc(ide_drive_t *drive)
{
- struct request *rq = &drive->request_sense_rq;
+ struct request *failed_rq = drive->hwif->rq;
+ struct request *sense_rq = &drive->sense_rq;
struct ide_atapi_pc *pc = &drive->request_sense_pc;
(void)ide_read_error(drive);
- ide_create_request_sense_cmd(drive, pc);
+
+ /* init pc from sense_rq */
+ ide_init_pc(pc);
+ memcpy(pc->c, sense_rq->cmd, 12);
+ pc->buf = bio_data(sense_rq->bio); /* pointer to mapped address */
+ pc->req_xfer = blk_rq_bytes(sense_rq);
+
if (drive->media == ide_tape)
- set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags);
- ide_queue_pc_head(drive, disk, pc, rq);
+ drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC;
+
+ /*
+ * Push back the failed request and put request sense on top
+ * of it. The failed command will be retried after sense data
+ * is acquired.
+ */
+ blk_requeue_request(failed_rq->q, failed_rq);
+ drive->hwif->rq = NULL;
+ if (ide_queue_sense_rq(drive, pc)) {
+ blk_start_request(failed_rq);
+ ide_complete_rq(drive, -EIO, blk_rq_bytes(failed_rq));
+ }
}
EXPORT_SYMBOL_GPL(ide_retry_pc);
@@ -246,7 +313,7 @@ int ide_cd_get_xferlen(struct request *rq)
return 32768;
else if (blk_sense_request(rq) || blk_pc_request(rq) ||
rq->cmd_type == REQ_TYPE_ATA_PC)
- return rq->data_len;
+ return blk_rq_bytes(rq);
else
return 0;
}
@@ -276,7 +343,6 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
struct ide_cmd *cmd = &hwif->cmd;
struct request *rq = hwif->rq;
const struct ide_tp_ops *tp_ops = hwif->tp_ops;
- xfer_func_t *xferfunc;
unsigned int timeout, done;
u16 bcount;
u8 stat, ireason, dsc = 0;
@@ -303,18 +369,14 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
drive->name, rq_data_dir(pc->rq)
? "write" : "read");
pc->flags |= PC_FLAG_DMA_ERROR;
- } else {
+ } else
pc->xferred = pc->req_xfer;
- if (drive->pc_update_buffers)
- drive->pc_update_buffers(drive, pc);
- }
debug_log("%s: DMA finished\n", drive->name);
}
/* No more interrupts */
if ((stat & ATA_DRQ) == 0) {
int uptodate, error;
- unsigned int done;
debug_log("Packet command completed, %d bytes transferred\n",
pc->xferred);
@@ -343,7 +405,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
debug_log("[cmd %x]: check condition\n", rq->cmd[0]);
/* Retry operation */
- ide_retry_pc(drive, rq->rq_disk);
+ ide_retry_pc(drive);
/* queued, but not started */
return ide_stopped;
@@ -353,6 +415,12 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
if ((pc->flags & PC_FLAG_WAIT_FOR_DSC) && (stat & ATA_DSC) == 0)
dsc = 1;
+ /*
+ * ->pc_callback() might change rq->data_len for
+ * residual count, cache total length.
+ */
+ done = blk_rq_bytes(rq);
+
/* Command finished - Call the callback function */
uptodate = drive->pc_callback(drive, dsc);
@@ -361,7 +429,6 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
if (blk_special_request(rq)) {
rq->errors = 0;
- done = blk_rq_bytes(rq);
error = 0;
} else {
@@ -370,15 +437,10 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
rq->errors = -EIO;
}
- if (drive->media == ide_tape)
- done = ide_rq_bytes(rq); /* FIXME */
- else
- done = blk_rq_bytes(rq);
-
error = uptodate ? 0 : -EIO;
}
- ide_complete_rq(drive, error, done);
+ ide_complete_rq(drive, error, blk_rq_bytes(rq));
return ide_stopped;
}
@@ -407,21 +469,11 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
return ide_do_reset(drive);
}
- xferfunc = write ? tp_ops->output_data : tp_ops->input_data;
-
- if (drive->media == ide_floppy && pc->buf == NULL) {
- done = min_t(unsigned int, bcount, cmd->nleft);
- ide_pio_bytes(drive, cmd, write, done);
- } else if (drive->media == ide_tape && pc->bh) {
- done = drive->pc_io_buffers(drive, pc, bcount, write);
- } else {
- done = min_t(unsigned int, bcount, pc->req_xfer - pc->xferred);
- xferfunc(drive, NULL, pc->cur_pos, done);
- }
+ done = min_t(unsigned int, bcount, cmd->nleft);
+ ide_pio_bytes(drive, cmd, write, done);
- /* Update the current position */
+ /* Update transferred byte count */
pc->xferred += done;
- pc->cur_pos += done;
bcount -= done;
@@ -525,7 +577,7 @@ static ide_startstop_t ide_transfer_pc(ide_drive_t *drive)
/*
* If necessary schedule the packet transfer to occur 'timeout'
- * miliseconds later in ide_delayed_transfer_pc() after the
+ * milliseconds later in ide_delayed_transfer_pc() after the
* device says it's ready for a packet.
*/
if (drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) {
@@ -599,7 +651,6 @@ ide_startstop_t ide_issue_pc(ide_drive_t *drive, struct ide_cmd *cmd)
/* We haven't transferred any data yet */
pc->xferred = 0;
- pc->cur_pos = pc->buf;
valid_tf = IDE_VALID_DEVICE;
bcount = ((drive->media == ide_tape) ?
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 925eb9e245d..424140c6c40 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -182,7 +182,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
(sense->information[2] << 8) |
(sense->information[3]);
- if (drive->queue->hardsect_size == 2048)
+ if (queue_logical_block_size(drive->queue) == 2048)
/* device sector size is 2K */
sector <<= 2;
@@ -206,54 +206,25 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
ide_cd_log_error(drive->name, failed_command, sense);
}
-static void cdrom_queue_request_sense(ide_drive_t *drive, void *sense,
- struct request *failed_command)
-{
- struct cdrom_info *info = drive->driver_data;
- struct request *rq = &drive->request_sense_rq;
-
- ide_debug_log(IDE_DBG_SENSE, "enter");
-
- if (sense == NULL)
- sense = &info->sense_data;
-
- /* stuff the sense request in front of our current request */
- blk_rq_init(NULL, rq);
- rq->cmd_type = REQ_TYPE_ATA_PC;
- rq->rq_disk = info->disk;
-
- rq->data = sense;
- rq->cmd[0] = GPCMD_REQUEST_SENSE;
- rq->cmd[4] = 18;
- rq->data_len = 18;
-
- rq->cmd_type = REQ_TYPE_SENSE;
- rq->cmd_flags |= REQ_PREEMPT;
-
- /* NOTE! Save the failed command in "rq->buffer" */
- rq->buffer = (void *) failed_command;
-
- if (failed_command)
- ide_debug_log(IDE_DBG_SENSE, "failed_cmd: 0x%x",
- failed_command->cmd[0]);
-
- drive->hwif->rq = NULL;
-
- elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 0);
-}
-
static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq)
{
/*
- * For REQ_TYPE_SENSE, "rq->buffer" points to the original
- * failed request
+ * For REQ_TYPE_SENSE, "rq->special" points to the original
+ * failed request. Also, the sense data should be read
+ * directly from rq which might be different from the original
+ * sense buffer if it got copied during mapping.
*/
- struct request *failed = (struct request *)rq->buffer;
- struct cdrom_info *info = drive->driver_data;
- void *sense = &info->sense_data;
+ struct request *failed = (struct request *)rq->special;
+ void *sense = bio_data(rq->bio);
if (failed) {
if (failed->sense) {
+ /*
+ * Sense is always read into drive->sense_data.
+ * Copy back if the failed request has its
+ * sense pointer set.
+ */
+ memcpy(failed->sense, sense, 18);
sense = failed->sense;
failed->sense_len = rq->sense_len;
}
@@ -428,22 +399,13 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
/* if we got a CHECK_CONDITION status, queue a request sense command */
if (stat & ATA_ERR)
- cdrom_queue_request_sense(drive, NULL, NULL);
+ return ide_queue_sense_rq(drive, NULL) ? 2 : 1;
return 1;
end_request:
if (stat & ATA_ERR) {
- struct request_queue *q = drive->queue;
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- blkdev_dequeue_request(rq);
- spin_unlock_irqrestore(q->queue_lock, flags);
-
hwif->rq = NULL;
-
- cdrom_queue_request_sense(drive, rq->sense, rq);
- return 1;
+ return ide_queue_sense_rq(drive, rq) ? 2 : 1;
} else
return 2;
}
@@ -503,14 +465,8 @@ static void ide_cd_request_sense_fixup(ide_drive_t *drive, struct ide_cmd *cmd)
* and some drives don't send them. Sigh.
*/
if (rq->cmd[0] == GPCMD_REQUEST_SENSE &&
- cmd->nleft > 0 && cmd->nleft <= 5) {
- unsigned int ofs = cmd->nbytes - cmd->nleft;
-
- while (cmd->nleft > 0) {
- *((u8 *)rq->data + ofs++) = 0;
- cmd->nleft--;
- }
- }
+ cmd->nleft > 0 && cmd->nleft <= 5)
+ cmd->nleft = 0;
}
int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
@@ -543,14 +499,18 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
rq->cmd_flags |= cmd_flags;
rq->timeout = timeout;
if (buffer) {
- rq->data = buffer;
- rq->data_len = *bufflen;
+ error = blk_rq_map_kern(drive->queue, rq, buffer,
+ *bufflen, GFP_NOIO);
+ if (error) {
+ blk_put_request(rq);
+ return error;
+ }
}
error = blk_execute_rq(drive->queue, info->disk, rq, 0);
if (buffer)
- *bufflen = rq->data_len;
+ *bufflen = rq->resid_len;
flags = rq->cmd_flags;
blk_put_request(rq);
@@ -608,7 +568,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
struct request *rq = hwif->rq;
ide_expiry_t *expiry = NULL;
int dma_error = 0, dma, thislen, uptodate = 0;
- int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0, nsectors;
+ int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0;
int sense = blk_sense_request(rq);
unsigned int timeout;
u16 len;
@@ -738,13 +698,8 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
out_end:
if (blk_pc_request(rq) && rc == 0) {
- unsigned int dlen = rq->data_len;
-
- rq->data_len = 0;
-
- if (blk_end_request(rq, 0, dlen))
- BUG();
-
+ rq->resid_len = 0;
+ blk_end_request_all(rq, 0);
hwif->rq = NULL;
} else {
if (sense && uptodate)
@@ -762,21 +717,13 @@ out_end:
ide_cd_error_cmd(drive, cmd);
/* make sure it's fully ended */
- if (blk_pc_request(rq))
- nsectors = (rq->data_len + 511) >> 9;
- else
- nsectors = rq->hard_nr_sectors;
-
- if (nsectors == 0)
- nsectors = 1;
-
if (blk_fs_request(rq) == 0) {
- rq->data_len -= (cmd->nbytes - cmd->nleft);
+ rq->resid_len -= cmd->nbytes - cmd->nleft;
if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE))
- rq->data_len += cmd->last_xfer_len;
+ rq->resid_len += cmd->last_xfer_len;
}
- ide_complete_rq(drive, uptodate ? 0 : -EIO, nsectors << 9);
+ ide_complete_rq(drive, uptodate ? 0 : -EIO, blk_rq_bytes(rq));
if (sense && rc == 2)
ide_error(drive, "request sense failure", stat);
@@ -790,7 +737,7 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
struct request_queue *q = drive->queue;
int write = rq_data_dir(rq) == WRITE;
unsigned short sectors_per_frame =
- queue_hardsect_size(q) >> SECTOR_BITS;
+ queue_logical_block_size(q) >> SECTOR_BITS;
ide_debug_log(IDE_DBG_RQ, "rq->cmd[0]: 0x%x, rq->cmd_flags: 0x%x, "
"secs_per_frame: %u",
@@ -809,8 +756,8 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
}
/* fs requests *must* be hardware frame aligned */
- if ((rq->nr_sectors & (sectors_per_frame - 1)) ||
- (rq->sector & (sectors_per_frame - 1)))
+ if ((blk_rq_sectors(rq) & (sectors_per_frame - 1)) ||
+ (blk_rq_pos(rq) & (sectors_per_frame - 1)))
return ide_stopped;
/* use DMA, if possible */
@@ -838,15 +785,10 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
drive->dma = 0;
/* sg request */
- if (rq->bio || ((rq->cmd_type == REQ_TYPE_ATA_PC) && rq->data_len)) {
+ if (rq->bio) {
struct request_queue *q = drive->queue;
+ char *buf = bio_data(rq->bio);
unsigned int alignment;
- char *buf;
-
- if (rq->bio)
- buf = bio_data(rq->bio);
- else
- buf = rq->data;
drive->dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
@@ -858,7 +800,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
*/
alignment = queue_dma_alignment(q) | q->dma_pad_mask;
if ((unsigned long)buf & alignment
- || rq->data_len & q->dma_pad_mask
+ || blk_rq_bytes(rq) & q->dma_pad_mask
|| object_is_on_stack(buf))
drive->dma = 0;
}
@@ -896,6 +838,9 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
goto out_end;
}
+ /* prepare sense request for this command */
+ ide_prep_sense(drive, rq);
+
memset(&cmd, 0, sizeof(cmd));
if (rq_data_dir(rq))
@@ -903,15 +848,14 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
cmd.rq = rq;
- if (blk_fs_request(rq) || rq->data_len) {
- ide_init_sg_cmd(&cmd, blk_fs_request(rq) ? (rq->nr_sectors << 9)
- : rq->data_len);
+ if (blk_fs_request(rq) || blk_rq_bytes(rq)) {
+ ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
ide_map_sg(drive, &cmd);
}
return ide_issue_pc(drive, &cmd);
out_end:
- nsectors = rq->hard_nr_sectors;
+ nsectors = blk_rq_sectors(rq);
if (nsectors == 0)
nsectors = 1;
@@ -1077,8 +1021,8 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
/* save a private copy of the TOC capacity for error handling */
drive->probed_capacity = toc->capacity * sectors_per_frame;
- blk_queue_hardsect_size(drive->queue,
- sectors_per_frame << SECTOR_BITS);
+ blk_queue_logical_block_size(drive->queue,
+ sectors_per_frame << SECTOR_BITS);
/* first read just the header, so we know how long the TOC is */
stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr,
@@ -1394,9 +1338,9 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
/* standard prep_rq_fn that builds 10 byte cmds */
static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
{
- int hard_sect = queue_hardsect_size(q);
- long block = (long)rq->hard_sector / (hard_sect >> 9);
- unsigned long blocks = rq->hard_nr_sectors / (hard_sect >> 9);
+ int hard_sect = queue_logical_block_size(q);
+ long block = (long)blk_rq_pos(rq) / (hard_sect >> 9);
+ unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9);
memset(rq->cmd, 0, BLK_MAX_CDB);
@@ -1599,7 +1543,7 @@ static int ide_cdrom_setup(ide_drive_t *drive)
nslots = ide_cdrom_probe_capabilities(drive);
- blk_queue_hardsect_size(q, CD_FRAMESIZE);
+ blk_queue_logical_block_size(q, CD_FRAMESIZE);
if (ide_cdrom_register(drive, nslots)) {
printk(KERN_ERR PFX "%s: %s failed to register device with the"
diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h
index 1d97101099c..93a3cf1b0f3 100644
--- a/drivers/ide/ide-cd.h
+++ b/drivers/ide/ide-cd.h
@@ -87,10 +87,6 @@ struct cdrom_info {
struct atapi_toc *toc;
- /* The result of the last successful request sense command
- on this device. */
- struct request_sense sense_data;
-
u8 max_speed; /* Max speed of the drive. */
u8 current_speed; /* Current speed of the drive. */
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c
index 9e47f3529d5..527908ff298 100644
--- a/drivers/ide/ide-cs.c
+++ b/drivers/ide/ide-cs.c
@@ -155,6 +155,7 @@ static const struct ide_port_info idecs_port_info = {
.port_ops = &idecs_port_ops,
.host_flags = IDE_HFLAG_NO_DMA,
.irq_flags = IRQF_SHARED,
+ .chipset = ide_pci,
};
static struct ide_host *idecs_register(unsigned long io, unsigned long ctl,
@@ -163,7 +164,7 @@ static struct ide_host *idecs_register(unsigned long io, unsigned long ctl,
struct ide_host *host;
ide_hwif_t *hwif;
int i, rc;
- hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
+ struct ide_hw hw, *hws[] = { &hw };
if (!request_region(io, 8, DRV_NAME)) {
printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
@@ -181,10 +182,9 @@ static struct ide_host *idecs_register(unsigned long io, unsigned long ctl,
memset(&hw, 0, sizeof(hw));
ide_std_init_ports(&hw, io, ctl);
hw.irq = irq;
- hw.chipset = ide_pci;
hw.dev = &handle->dev;
- rc = ide_host_add(&idecs_port_info, hws, &host);
+ rc = ide_host_add(&idecs_port_info, hws, 1, &host);
if (rc)
goto out_release;
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index a9fbe2c3121..6a1de216970 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -82,7 +82,7 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
sector_t block)
{
ide_hwif_t *hwif = drive->hwif;
- u16 nsectors = (u16)rq->nr_sectors;
+ u16 nsectors = (u16)blk_rq_sectors(rq);
u8 lba48 = !!(drive->dev_flags & IDE_DFLAG_LBA48);
u8 dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
struct ide_cmd cmd;
@@ -90,7 +90,7 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
ide_startstop_t rc;
if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && lba48 && dma) {
- if (block + rq->nr_sectors > 1ULL << 28)
+ if (block + blk_rq_sectors(rq) > 1ULL << 28)
dma = 0;
else
lba48 = 0;
@@ -195,9 +195,9 @@ static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
ledtrig_ide_activity();
- pr_debug("%s: %sing: block=%llu, sectors=%lu, buffer=0x%08lx\n",
+ pr_debug("%s: %sing: block=%llu, sectors=%u, buffer=0x%08lx\n",
drive->name, rq_data_dir(rq) == READ ? "read" : "writ",
- (unsigned long long)block, rq->nr_sectors,
+ (unsigned long long)block, blk_rq_sectors(rq),
(unsigned long)rq->buffer);
if (hwif->rw_disk)
@@ -302,14 +302,12 @@ static const struct drive_list_entry hpa_list[] = {
{ NULL, NULL }
};
-static void idedisk_check_hpa(ide_drive_t *drive)
+static u64 ide_disk_hpa_get_native_capacity(ide_drive_t *drive, int lba48)
{
- unsigned long long capacity, set_max;
- int lba48 = ata_id_lba48_enabled(drive->id);
+ u64 capacity, set_max;
capacity = drive->capacity64;
-
- set_max = idedisk_read_native_max_address(drive, lba48);
+ set_max = idedisk_read_native_max_address(drive, lba48);
if (ide_in_drive_list(drive->id, hpa_list)) {
/*
@@ -320,9 +318,31 @@ static void idedisk_check_hpa(ide_drive_t *drive)
set_max--;
}
+ return set_max;
+}
+
+static u64 ide_disk_hpa_set_capacity(ide_drive_t *drive, u64 set_max, int lba48)
+{
+ set_max = idedisk_set_max_address(drive, set_max, lba48);
+ if (set_max)
+ drive->capacity64 = set_max;
+
+ return set_max;
+}
+
+static void idedisk_check_hpa(ide_drive_t *drive)
+{
+ u64 capacity, set_max;
+ int lba48 = ata_id_lba48_enabled(drive->id);
+
+ capacity = drive->capacity64;
+ set_max = ide_disk_hpa_get_native_capacity(drive, lba48);
+
if (set_max <= capacity)
return;
+ drive->probed_capacity = set_max;
+
printk(KERN_INFO "%s: Host Protected Area detected.\n"
"\tcurrent capacity is %llu sectors (%llu MB)\n"
"\tnative capacity is %llu sectors (%llu MB)\n",
@@ -330,13 +350,13 @@ static void idedisk_check_hpa(ide_drive_t *drive)
capacity, sectors_to_MB(capacity),
set_max, sectors_to_MB(set_max));
- set_max = idedisk_set_max_address(drive, set_max, lba48);
+ if ((drive->dev_flags & IDE_DFLAG_NOHPA) == 0)
+ return;
- if (set_max) {
- drive->capacity64 = set_max;
+ set_max = ide_disk_hpa_set_capacity(drive, set_max, lba48);
+ if (set_max)
printk(KERN_INFO "%s: Host Protected Area disabled.\n",
drive->name);
- }
}
static int ide_disk_get_capacity(ide_drive_t *drive)
@@ -358,6 +378,8 @@ static int ide_disk_get_capacity(ide_drive_t *drive)
drive->capacity64 = drive->cyl * drive->head * drive->sect;
}
+ drive->probed_capacity = drive->capacity64;
+
if (lba) {
drive->dev_flags |= IDE_DFLAG_LBA;
@@ -376,7 +398,7 @@ static int ide_disk_get_capacity(ide_drive_t *drive)
"%llu sectors (%llu MB)\n",
drive->name, (unsigned long long)drive->capacity64,
sectors_to_MB(drive->capacity64));
- drive->capacity64 = 1ULL << 28;
+ drive->probed_capacity = drive->capacity64 = 1ULL << 28;
}
if ((drive->hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) &&
@@ -392,6 +414,34 @@ static int ide_disk_get_capacity(ide_drive_t *drive)
return 0;
}
+static u64 ide_disk_set_capacity(ide_drive_t *drive, u64 capacity)
+{
+ u64 set = min(capacity, drive->probed_capacity);
+ u16 *id = drive->id;
+ int lba48 = ata_id_lba48_enabled(id);
+
+ if ((drive->dev_flags & IDE_DFLAG_LBA) == 0 ||
+ ata_id_hpa_enabled(id) == 0)
+ goto out;
+
+ /*
+ * according to the spec the SET MAX ADDRESS command shall be
+ * immediately preceded by a READ NATIVE MAX ADDRESS command
+ */
+ capacity = ide_disk_hpa_get_native_capacity(drive, lba48);
+ if (capacity == 0)
+ goto out;
+
+ set = ide_disk_hpa_set_capacity(drive, set, lba48);
+ if (set) {
+ /* needed for ->resume to disable HPA */
+ drive->dev_flags |= IDE_DFLAG_NOHPA;
+ return set;
+ }
+out:
+ return drive->capacity64;
+}
+
static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
{
ide_drive_t *drive = q->queuedata;
@@ -411,7 +461,6 @@ static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
cmd->protocol = ATA_PROT_NODATA;
rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
- rq->cmd_flags |= REQ_SOFTBARRIER;
rq->special = cmd;
}
@@ -429,14 +478,14 @@ static int set_multcount(ide_drive_t *drive, int arg)
if (arg < 0 || arg > (drive->id[ATA_ID_MAX_MULTSECT] & 0xff))
return -EINVAL;
- if (drive->special.b.set_multmode)
+ if (drive->special_flags & IDE_SFLAG_SET_MULTMODE)
return -EBUSY;
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
drive->mult_req = arg;
- drive->special.b.set_multmode = 1;
+ drive->special_flags |= IDE_SFLAG_SET_MULTMODE;
error = blk_execute_rq(drive->queue, NULL, rq, 0);
blk_put_request(rq);
@@ -640,7 +689,7 @@ static void ide_disk_setup(ide_drive_t *drive)
}
printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name,
- q->max_sectors / 2);
+ queue_max_sectors(q) / 2);
if (ata_id_is_ssd(id))
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
@@ -741,6 +790,7 @@ static int ide_disk_set_doorlock(ide_drive_t *drive, struct gendisk *disk,
const struct ide_disk_ops ide_ata_disk_ops = {
.check = ide_disk_check,
+ .set_capacity = ide_disk_set_capacity,
.get_capacity = ide_disk_get_capacity,
.setup = ide_disk_setup,
.flush = ide_disk_flush,
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index a0b8cab1d9a..219e6fb78dc 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -103,7 +103,7 @@ ide_startstop_t ide_dma_intr(ide_drive_t *drive)
ide_finish_cmd(drive, cmd, stat);
else
ide_complete_rq(drive, 0,
- cmd->rq->nr_sectors << 9);
+ blk_rq_sectors(cmd->rq) << 9);
return ide_stopped;
}
printk(KERN_ERR "%s: %s: bad DMA status (0x%02x)\n",
@@ -347,7 +347,6 @@ u8 ide_find_dma_mode(ide_drive_t *drive, u8 req_mode)
return mode;
}
-EXPORT_SYMBOL_GPL(ide_find_dma_mode);
static int ide_tune_dma(ide_drive_t *drive)
{
@@ -510,23 +509,11 @@ ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
/*
* un-busy drive etc and make sure request is sane
*/
-
rq = hwif->rq;
- if (!rq)
- goto out;
-
- hwif->rq = NULL;
-
- rq->errors = 0;
-
- if (!rq->bio)
- goto out;
-
- rq->sector = rq->bio->bi_sector;
- rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9;
- rq->hard_cur_sectors = rq->current_nr_sectors;
- rq->buffer = bio_data(rq->bio);
-out:
+ if (rq) {
+ hwif->rq = NULL;
+ rq->errors = 0;
+ }
return ret;
}
diff --git a/drivers/ide/ide-eh.c b/drivers/ide/ide-eh.c
index 5d5fb961b5c..2b914197961 100644
--- a/drivers/ide/ide-eh.c
+++ b/drivers/ide/ide-eh.c
@@ -52,7 +52,7 @@ static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq,
}
if ((rq->errors & ERROR_RECAL) == ERROR_RECAL)
- drive->special.b.recalibrate = 1;
+ drive->special_flags |= IDE_SFLAG_RECALIBRATE;
++rq->errors;
@@ -268,9 +268,8 @@ static void ide_disk_pre_reset(ide_drive_t *drive)
{
int legacy = (drive->id[ATA_ID_CFS_ENABLE_2] & 0x0400) ? 0 : 1;
- drive->special.all = 0;
- drive->special.b.set_geometry = legacy;
- drive->special.b.recalibrate = legacy;
+ drive->special_flags =
+ legacy ? (IDE_SFLAG_SET_GEOMETRY | IDE_SFLAG_RECALIBRATE) : 0;
drive->mult_count = 0;
drive->dev_flags &= ~IDE_DFLAG_PARKED;
@@ -280,7 +279,7 @@ static void ide_disk_pre_reset(ide_drive_t *drive)
drive->mult_req = 0;
if (drive->mult_req != drive->mult_count)
- drive->special.b.set_multmode = 1;
+ drive->special_flags |= IDE_SFLAG_SET_MULTMODE;
}
static void pre_reset(ide_drive_t *drive)
@@ -408,8 +407,9 @@ static ide_startstop_t do_reset1(ide_drive_t *drive, int do_not_try_atapi)
/* more than enough time */
udelay(10);
/* clear SRST, leave nIEN (unless device is on the quirk list) */
- tp_ops->write_devctl(hwif, (drive->quirk_list == 2 ? 0 : ATA_NIEN) |
- ATA_DEVCTL_OBS);
+ tp_ops->write_devctl(hwif,
+ ((drive->dev_flags & IDE_DFLAG_NIEN_QUIRK) ? 0 : ATA_NIEN) |
+ ATA_DEVCTL_OBS);
/* more than enough time */
udelay(10);
hwif->poll_timeout = jiffies + WAIT_WORSTCASE;
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 2b4868d95f8..650981758f1 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -134,13 +134,17 @@ static ide_startstop_t ide_floppy_issue_pc(ide_drive_t *drive,
drive->pc = pc;
if (pc->retries > IDEFLOPPY_MAX_PC_RETRIES) {
+ unsigned int done = blk_rq_bytes(drive->hwif->rq);
+
if (!(pc->flags & PC_FLAG_SUPPRESS_ERROR))
ide_floppy_report_error(floppy, pc);
+
/* Giving up */
pc->error = IDE_DRV_ERROR_GENERAL;
drive->failed_pc = NULL;
drive->pc_callback(drive, 0);
+ ide_complete_rq(drive, -EIO, done);
return ide_stopped;
}
@@ -190,7 +194,7 @@ static void idefloppy_create_rw_cmd(ide_drive_t *drive,
{
struct ide_disk_obj *floppy = drive->driver_data;
int block = sector / floppy->bs_factor;
- int blocks = rq->nr_sectors / floppy->bs_factor;
+ int blocks = blk_rq_sectors(rq) / floppy->bs_factor;
int cmd = rq_data_dir(rq);
ide_debug_log(IDE_DBG_FUNC, "block: %d, blocks: %d", block, blocks);
@@ -216,16 +220,14 @@ static void idefloppy_blockpc_cmd(struct ide_disk_obj *floppy,
ide_init_pc(pc);
memcpy(pc->c, rq->cmd, sizeof(pc->c));
pc->rq = rq;
- if (rq->data_len && rq_data_dir(rq) == WRITE)
- pc->flags |= PC_FLAG_WRITING;
- pc->buf = rq->data;
- if (rq->bio)
+ if (blk_rq_bytes(rq)) {
pc->flags |= PC_FLAG_DMA_OK;
- /*
- * possibly problematic, doesn't look like ide-floppy correctly
- * handled scattered requests if dma fails...
- */
- pc->req_xfer = pc->buf_size = rq->data_len;
+ if (rq_data_dir(rq) == WRITE)
+ pc->flags |= PC_FLAG_WRITING;
+ }
+ /* pio will be performed by ide_pio_bytes() which handles sg fine */
+ pc->buf = NULL;
+ pc->req_xfer = pc->buf_size = blk_rq_bytes(rq);
}
static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
@@ -257,16 +259,16 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
goto out_end;
}
if (blk_fs_request(rq)) {
- if (((long)rq->sector % floppy->bs_factor) ||
- (rq->nr_sectors % floppy->bs_factor)) {
+ if (((long)blk_rq_pos(rq) % floppy->bs_factor) ||
+ (blk_rq_sectors(rq) % floppy->bs_factor)) {
printk(KERN_ERR PFX "%s: unsupported r/w rq size\n",
drive->name);
goto out_end;
}
pc = &floppy->queued_pc;
idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block);
- } else if (blk_special_request(rq)) {
- pc = (struct ide_atapi_pc *) rq->buffer;
+ } else if (blk_special_request(rq) || blk_sense_request(rq)) {
+ pc = (struct ide_atapi_pc *)rq->special;
} else if (blk_pc_request(rq)) {
pc = &floppy->queued_pc;
idefloppy_blockpc_cmd(floppy, pc, rq);
@@ -275,6 +277,8 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
goto out_end;
}
+ ide_prep_sense(drive, rq);
+
memset(&cmd, 0, sizeof(cmd));
if (rq_data_dir(rq))
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
index 4b6b71e2cdf..214119026b3 100644
--- a/drivers/ide/ide-gd.c
+++ b/drivers/ide/ide-gd.c
@@ -287,6 +287,19 @@ static int ide_gd_media_changed(struct gendisk *disk)
return ret;
}
+static unsigned long long ide_gd_set_capacity(struct gendisk *disk,
+ unsigned long long capacity)
+{
+ struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
+ ide_drive_t *drive = idkp->drive;
+ const struct ide_disk_ops *disk_ops = drive->disk_ops;
+
+ if (disk_ops->set_capacity)
+ return disk_ops->set_capacity(drive, capacity);
+
+ return drive->capacity64;
+}
+
static int ide_gd_revalidate_disk(struct gendisk *disk)
{
struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
@@ -315,6 +328,7 @@ static struct block_device_operations ide_gd_ops = {
.locked_ioctl = ide_gd_ioctl,
.getgeo = ide_gd_getgeo,
.media_changed = ide_gd_media_changed,
+ .set_capacity = ide_gd_set_capacity,
.revalidate_disk = ide_gd_revalidate_disk
};
diff --git a/drivers/ide/ide-generic.c b/drivers/ide/ide-generic.c
index 7812ca0be13..54d7c4685d2 100644
--- a/drivers/ide/ide-generic.c
+++ b/drivers/ide/ide-generic.c
@@ -29,6 +29,7 @@ MODULE_PARM_DESC(probe_mask, "probe mask for legacy ISA IDE ports");
static const struct ide_port_info ide_generic_port_info = {
.host_flags = IDE_HFLAG_NO_DMA,
+ .chipset = ide_generic,
};
#ifdef CONFIG_ARM
@@ -85,7 +86,7 @@ static void ide_generic_check_pci_legacy_iobases(int *primary, int *secondary)
static int __init ide_generic_init(void)
{
- hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
+ struct ide_hw hw, *hws[] = { &hw };
unsigned long io_addr;
int i, rc = 0, primary = 0, secondary = 0;
@@ -132,9 +133,7 @@ static int __init ide_generic_init(void)
#else
hw.irq = legacy_irqs[i];
#endif
- hw.chipset = ide_generic;
-
- rc = ide_host_add(&ide_generic_port_info, hws, NULL);
+ rc = ide_host_add(&ide_generic_port_info, hws, 1, NULL);
if (rc) {
release_region(io_addr + 0x206, 1);
release_region(io_addr, 8);
diff --git a/drivers/ide/ide-h8300.c b/drivers/ide/ide-h8300.c
index c06ebdc4a13..520f42c5445 100644
--- a/drivers/ide/ide-h8300.c
+++ b/drivers/ide/ide-h8300.c
@@ -64,26 +64,26 @@ static const struct ide_tp_ops h8300_tp_ops = {
#define H8300_IDE_GAP (2)
-static inline void hw_setup(hw_regs_t *hw)
+static inline void hw_setup(struct ide_hw *hw)
{
int i;
- memset(hw, 0, sizeof(hw_regs_t));
+ memset(hw, 0, sizeof(*hw));
for (i = 0; i <= 7; i++)
hw->io_ports_array[i] = CONFIG_H8300_IDE_BASE + H8300_IDE_GAP*i;
hw->io_ports.ctl_addr = CONFIG_H8300_IDE_ALT;
hw->irq = EXT_IRQ0 + CONFIG_H8300_IDE_IRQ;
- hw->chipset = ide_generic;
}
static const struct ide_port_info h8300_port_info = {
.tp_ops = &h8300_tp_ops,
.host_flags = IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_NO_DMA,
+ .chipset = ide_generic,
};
static int __init h8300_ide_init(void)
{
- hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
+ struct ide_hw hw, *hws[] = { &hw };
printk(KERN_INFO DRV_NAME ": H8/300 generic IDE interface\n");
@@ -96,7 +96,7 @@ static int __init h8300_ide_init(void)
hw_setup(&hw);
- return ide_host_add(&h8300_port_info, hws, NULL);
+ return ide_host_add(&h8300_port_info, hws, 1, NULL);
out_busy:
printk(KERN_ERR "ide-h8300: IDE I/F resource already used.\n");
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 35dc38d3b2c..272cc38f6db 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -116,9 +116,9 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
unsigned int ide_rq_bytes(struct request *rq)
{
if (blk_pc_request(rq))
- return rq->data_len;
+ return blk_rq_bytes(rq);
else
- return rq->hard_cur_sectors << 9;
+ return blk_rq_cur_sectors(rq) << 9;
}
EXPORT_SYMBOL_GPL(ide_rq_bytes);
@@ -133,7 +133,7 @@ int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes)
* and complete the whole request right now
*/
if (blk_noretry_request(rq) && error <= 0)
- nr_bytes = rq->hard_nr_sectors << 9;
+ nr_bytes = blk_rq_sectors(rq) << 9;
rc = ide_end_rq(drive, rq, error, nr_bytes);
if (rc == 0)
@@ -184,29 +184,42 @@ static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
tf->command = ATA_CMD_SET_MULTI;
}
-static ide_startstop_t ide_disk_special(ide_drive_t *drive)
+/**
+ * do_special - issue some special commands
+ * @drive: drive the command is for
+ *
+ * do_special() is used to issue ATA_CMD_INIT_DEV_PARAMS,
+ * ATA_CMD_RESTORE and ATA_CMD_SET_MULTI commands to a drive.
+ */
+
+static ide_startstop_t do_special(ide_drive_t *drive)
{
- special_t *s = &drive->special;
struct ide_cmd cmd;
+#ifdef DEBUG
+ printk(KERN_DEBUG "%s: %s: 0x%02x\n", drive->name, __func__,
+ drive->special_flags);
+#endif
+ if (drive->media != ide_disk) {
+ drive->special_flags = 0;
+ drive->mult_req = 0;
+ return ide_stopped;
+ }
+
memset(&cmd, 0, sizeof(cmd));
cmd.protocol = ATA_PROT_NODATA;
- if (s->b.set_geometry) {
- s->b.set_geometry = 0;
+ if (drive->special_flags & IDE_SFLAG_SET_GEOMETRY) {
+ drive->special_flags &= ~IDE_SFLAG_SET_GEOMETRY;
ide_tf_set_specify_cmd(drive, &cmd.tf);
- } else if (s->b.recalibrate) {
- s->b.recalibrate = 0;
+ } else if (drive->special_flags & IDE_SFLAG_RECALIBRATE) {
+ drive->special_flags &= ~IDE_SFLAG_RECALIBRATE;
ide_tf_set_restore_cmd(drive, &cmd.tf);
- } else if (s->b.set_multmode) {
- s->b.set_multmode = 0;
+ } else if (drive->special_flags & IDE_SFLAG_SET_MULTMODE) {
+ drive->special_flags &= ~IDE_SFLAG_SET_MULTMODE;
ide_tf_set_setmult_cmd(drive, &cmd.tf);
- } else if (s->all) {
- int special = s->all;
- s->all = 0;
- printk(KERN_ERR "%s: bad special flag: 0x%02x\n", drive->name, special);
- return ide_stopped;
- }
+ } else
+ BUG();
cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
@@ -217,45 +230,13 @@ static ide_startstop_t ide_disk_special(ide_drive_t *drive)
return ide_started;
}
-/**
- * do_special - issue some special commands
- * @drive: drive the command is for
- *
- * do_special() is used to issue ATA_CMD_INIT_DEV_PARAMS,
- * ATA_CMD_RESTORE and ATA_CMD_SET_MULTI commands to a drive.
- *
- * It used to do much more, but has been scaled back.
- */
-
-static ide_startstop_t do_special (ide_drive_t *drive)
-{
- special_t *s = &drive->special;
-
-#ifdef DEBUG
- printk("%s: do_special: 0x%02x\n", drive->name, s->all);
-#endif
- if (drive->media == ide_disk)
- return ide_disk_special(drive);
-
- s->all = 0;
- drive->mult_req = 0;
- return ide_stopped;
-}
-
void ide_map_sg(ide_drive_t *drive, struct ide_cmd *cmd)
{
ide_hwif_t *hwif = drive->hwif;
struct scatterlist *sg = hwif->sg_table;
struct request *rq = cmd->rq;
- if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
- sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE);
- cmd->sg_nents = 1;
- } else if (!rq->bio) {
- sg_init_one(sg, rq->data, rq->data_len);
- cmd->sg_nents = 1;
- } else
- cmd->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
+ cmd->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
}
EXPORT_SYMBOL_GPL(ide_map_sg);
@@ -286,7 +267,7 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
if (cmd) {
if (cmd->protocol == ATA_PROT_PIO) {
- ide_init_sg_cmd(cmd, rq->nr_sectors << 9);
+ ide_init_sg_cmd(cmd, blk_rq_sectors(rq) << 9);
ide_map_sg(drive, cmd);
}
@@ -358,7 +339,8 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
printk(KERN_ERR "%s: drive not ready for command\n", drive->name);
return startstop;
}
- if (!drive->special.all) {
+
+ if (drive->special_flags == 0) {
struct ide_driver *drv;
/*
@@ -371,7 +353,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
return execute_drive_cmd(drive, rq);
else if (blk_pm_request(rq)) {
- struct request_pm_state *pm = rq->data;
+ struct request_pm_state *pm = rq->special;
#ifdef DEBUG_PM
printk("%s: start_power_step(step: %d)\n",
drive->name, pm->pm_step);
@@ -394,7 +376,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
drv = *(struct ide_driver **)rq->rq_disk->private_data;
- return drv->do_request(drive, rq, rq->sector);
+ return drv->do_request(drive, rq, blk_rq_pos(rq));
}
return do_special(drive);
kill_rq:
@@ -484,6 +466,9 @@ void do_ide_request(struct request_queue *q)
spin_unlock_irq(q->queue_lock);
+ /* HLD do_request() callback might sleep, make sure it's okay */
+ might_sleep();
+
if (ide_lock_host(host, hwif))
goto plug_device_2;
@@ -491,10 +476,10 @@ void do_ide_request(struct request_queue *q)
if (!ide_lock_port(hwif)) {
ide_hwif_t *prev_port;
+
+ WARN_ON_ONCE(hwif->rq);
repeat:
prev_port = hwif->host->cur_port;
- hwif->rq = NULL;
-
if (drive->dev_flags & IDE_DFLAG_SLEEPING &&
time_after(drive->sleep, jiffies)) {
ide_unlock_port(hwif);
@@ -503,11 +488,15 @@ repeat:
if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) &&
hwif != prev_port) {
+ ide_drive_t *cur_dev =
+ prev_port ? prev_port->cur_dev : NULL;
+
/*
* set nIEN for previous port, drives in the
- * quirk_list may not like intr setups/cleanups
+ * quirk list may not like intr setups/cleanups
*/
- if (prev_port && prev_port->cur_dev->quirk_list == 0)
+ if (cur_dev &&
+ (cur_dev->dev_flags & IDE_DFLAG_NIEN_QUIRK) == 0)
prev_port->tp_ops->write_devctl(prev_port,
ATA_NIEN |
ATA_DEVCTL_OBS);
@@ -523,7 +512,9 @@ repeat:
* we know that the queue isn't empty, but this can happen
* if the q->prep_rq_fn() decides to kill a request
*/
- rq = elv_next_request(drive->queue);
+ if (!rq)
+ rq = blk_fetch_request(drive->queue);
+
spin_unlock_irq(q->queue_lock);
spin_lock_irq(&hwif->lock);
@@ -535,7 +526,7 @@ repeat:
/*
* Sanity: don't accept a request that isn't a PM request
* if we are currently power managed. This is very important as
- * blk_stop_queue() doesn't prevent the elv_next_request()
+ * blk_stop_queue() doesn't prevent the blk_fetch_request()
* above to return us whatever is in the queue. Since we call
* ide_do_request() ourselves, we end up taking requests while
* the queue is blocked...
@@ -559,8 +550,11 @@ repeat:
startstop = start_request(drive, rq);
spin_lock_irq(&hwif->lock);
- if (startstop == ide_stopped)
+ if (startstop == ide_stopped) {
+ rq = hwif->rq;
+ hwif->rq = NULL;
goto repeat;
+ }
} else
goto plug_device;
out:
@@ -576,18 +570,24 @@ plug_device:
plug_device_2:
spin_lock_irq(q->queue_lock);
+ if (rq)
+ blk_requeue_request(q, rq);
if (!elv_queue_empty(q))
blk_plug_device(q);
}
-static void ide_plug_device(ide_drive_t *drive)
+static void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
{
struct request_queue *q = drive->queue;
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
+
+ if (rq)
+ blk_requeue_request(q, rq);
if (!elv_queue_empty(q))
blk_plug_device(q);
+
spin_unlock_irqrestore(q->queue_lock, flags);
}
@@ -636,6 +636,7 @@ void ide_timer_expiry (unsigned long data)
unsigned long flags;
int wait = -1;
int plug_device = 0;
+ struct request *uninitialized_var(rq_in_flight);
spin_lock_irqsave(&hwif->lock, flags);
@@ -696,7 +697,9 @@ void ide_timer_expiry (unsigned long data)
}
spin_lock_irq(&hwif->lock);
enable_irq(hwif->irq);
- if (startstop == ide_stopped) {
+ if (startstop == ide_stopped && hwif->polling == 0) {
+ rq_in_flight = hwif->rq;
+ hwif->rq = NULL;
ide_unlock_port(hwif);
plug_device = 1;
}
@@ -705,7 +708,7 @@ void ide_timer_expiry (unsigned long data)
if (plug_device) {
ide_unlock_host(hwif->host);
- ide_plug_device(drive);
+ ide_requeue_and_plug(drive, rq_in_flight);
}
}
@@ -791,6 +794,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
ide_startstop_t startstop;
irqreturn_t irq_ret = IRQ_NONE;
int plug_device = 0;
+ struct request *uninitialized_var(rq_in_flight);
if (host->host_flags & IDE_HFLAG_SERIALIZE) {
if (hwif != host->cur_port)
@@ -868,8 +872,10 @@ irqreturn_t ide_intr (int irq, void *dev_id)
* same irq as is currently being serviced here, and Linux
* won't allow another of the same (on any CPU) until we return.
*/
- if (startstop == ide_stopped) {
+ if (startstop == ide_stopped && hwif->polling == 0) {
BUG_ON(hwif->handler);
+ rq_in_flight = hwif->rq;
+ hwif->rq = NULL;
ide_unlock_port(hwif);
plug_device = 1;
}
@@ -879,7 +885,7 @@ out:
out_early:
if (plug_device) {
ide_unlock_host(hwif->host);
- ide_plug_device(drive);
+ ide_requeue_and_plug(drive, rq_in_flight);
}
return irq_ret;
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
index c1c25ebbaa1..5991b23793f 100644
--- a/drivers/ide/ide-ioctls.c
+++ b/drivers/ide/ide-ioctls.c
@@ -231,7 +231,6 @@ static int generic_drive_reset(ide_drive_t *drive)
rq->cmd_type = REQ_TYPE_SPECIAL;
rq->cmd_len = 1;
rq->cmd[0] = REQ_DRIVE_RESET;
- rq->cmd_flags |= REQ_SOFTBARRIER;
if (blk_execute_rq(drive->queue, NULL, rq, 1))
ret = rq->errors;
blk_put_request(rq);
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index c19a221b1e1..fa047150a1c 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -206,8 +206,6 @@ EXPORT_SYMBOL_GPL(ide_in_drive_list);
/*
* Early UDMA66 devices don't set bit14 to 1, only bit13 is valid.
- * We list them here and depend on the device side cable detection for them.
- *
* Some optical devices with the buggy firmwares have the same problem.
*/
static const struct drive_list_entry ivb_list[] = {
@@ -251,10 +249,25 @@ u8 eighty_ninty_three(ide_drive_t *drive)
* - force bit13 (80c cable present) check also for !ivb devices
* (unless the slave device is pre-ATA3)
*/
- if ((id[ATA_ID_HW_CONFIG] & 0x4000) ||
- (ivb && (id[ATA_ID_HW_CONFIG] & 0x2000)))
+ if (id[ATA_ID_HW_CONFIG] & 0x4000)
return 1;
+ if (ivb) {
+ const char *model = (char *)&id[ATA_ID_PROD];
+
+ if (strstr(model, "TSSTcorp CDDVDW SH-S202")) {
+ /*
+ * These ATAPI devices always report 80c cable
+ * so we have to depend on the host in this case.
+ */
+ if (hwif->cbl == ATA_CBL_PATA80)
+ return 1;
+ } else {
+ /* Depend on the device side cable detection. */
+ if (id[ATA_ID_HW_CONFIG] & 0x2000)
+ return 1;
+ }
+ }
no_80w:
if (drive->dev_flags & IDE_DFLAG_UDMA33_WARNED)
return 0;
@@ -269,6 +282,29 @@ no_80w:
return 0;
}
+static const char *nien_quirk_list[] = {
+ "QUANTUM FIREBALLlct08 08",
+ "QUANTUM FIREBALLP KA6.4",
+ "QUANTUM FIREBALLP KA9.1",
+ "QUANTUM FIREBALLP KX13.6",
+ "QUANTUM FIREBALLP KX20.5",
+ "QUANTUM FIREBALLP KX27.3",
+ "QUANTUM FIREBALLP LM20.4",
+ "QUANTUM FIREBALLP LM20.5",
+ NULL
+};
+
+void ide_check_nien_quirk_list(ide_drive_t *drive)
+{
+ const char **list, *m = (char *)&drive->id[ATA_ID_PROD];
+
+ for (list = nien_quirk_list; *list != NULL; list++)
+ if (strstr(m, *list) != NULL) {
+ drive->dev_flags |= IDE_DFLAG_NIEN_QUIRK;
+ return;
+ }
+}
+
int ide_driveid_update(ide_drive_t *drive)
{
u16 *id;
@@ -298,7 +334,6 @@ int ide_driveid_update(ide_drive_t *drive)
return 1;
out_err:
- SELECT_MASK(drive, 0);
if (rc == 2)
printk(KERN_ERR "%s: %s: bad status\n", drive->name, __func__);
kfree(id);
@@ -352,7 +387,7 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
tp_ops->exec_command(hwif, ATA_CMD_SET_FEATURES);
- if (drive->quirk_list == 2)
+ if (drive->dev_flags & IDE_DFLAG_NIEN_QUIRK)
tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
error = __ide_wait_stat(drive, drive->ready_stat,
diff --git a/drivers/ide/ide-legacy.c b/drivers/ide/ide-legacy.c
index 8c5dcbf2254..b9654a7bb7b 100644
--- a/drivers/ide/ide-legacy.c
+++ b/drivers/ide/ide-legacy.c
@@ -1,7 +1,7 @@
#include <linux/kernel.h>
#include <linux/ide.h>
-static void ide_legacy_init_one(hw_regs_t **hws, hw_regs_t *hw,
+static void ide_legacy_init_one(struct ide_hw **hws, struct ide_hw *hw,
u8 port_no, const struct ide_port_info *d,
unsigned long config)
{
@@ -33,7 +33,6 @@ static void ide_legacy_init_one(hw_regs_t **hws, hw_regs_t *hw,
ide_std_init_ports(hw, base, ctl);
hw->irq = irq;
- hw->chipset = d->chipset;
hw->config = config;
hws[port_no] = hw;
@@ -41,7 +40,7 @@ static void ide_legacy_init_one(hw_regs_t **hws, hw_regs_t *hw,
int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config)
{
- hw_regs_t hw[2], *hws[] = { NULL, NULL, NULL, NULL };
+ struct ide_hw hw[2], *hws[] = { NULL, NULL };
memset(&hw, 0, sizeof(hw));
@@ -53,6 +52,6 @@ int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config)
(d->host_flags & IDE_HFLAG_SINGLE))
return -ENOENT;
- return ide_host_add(d, hws, NULL);
+ return ide_host_add(d, hws, 2, NULL);
}
EXPORT_SYMBOL_GPL(ide_legacy_device_add);
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index 56ff8c46c7d..e386a32dc9b 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -31,24 +31,6 @@ void ide_toggle_bounce(ide_drive_t *drive, int on)
blk_queue_bounce_limit(drive->queue, addr);
}
-static void ide_dump_opcode(ide_drive_t *drive)
-{
- struct request *rq = drive->hwif->rq;
- struct ide_cmd *cmd = NULL;
-
- if (!rq)
- return;
-
- if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
- cmd = rq->special;
-
- printk(KERN_ERR "ide: failed opcode was: ");
- if (cmd == NULL)
- printk(KERN_CONT "unknown\n");
- else
- printk(KERN_CONT "0x%02x\n", cmd->tf.command);
-}
-
u64 ide_get_lba_addr(struct ide_cmd *cmd, int lba48)
{
struct ide_taskfile *tf = &cmd->tf;
@@ -91,7 +73,7 @@ static void ide_dump_sector(ide_drive_t *drive)
static void ide_dump_ata_error(ide_drive_t *drive, u8 err)
{
- printk(KERN_ERR "{ ");
+ printk(KERN_CONT "{ ");
if (err & ATA_ABORTED)
printk(KERN_CONT "DriveStatusError ");
if (err & ATA_ICRC)
@@ -114,14 +96,14 @@ static void ide_dump_ata_error(ide_drive_t *drive, u8 err)
if (rq)
printk(KERN_CONT ", sector=%llu",
- (unsigned long long)rq->sector);
+ (unsigned long long)blk_rq_pos(rq));
}
printk(KERN_CONT "\n");
}
static void ide_dump_atapi_error(ide_drive_t *drive, u8 err)
{
- printk(KERN_ERR "{ ");
+ printk(KERN_CONT "{ ");
if (err & ATAPI_ILI)
printk(KERN_CONT "IllegalLengthIndication ");
if (err & ATAPI_EOM)
@@ -179,7 +161,10 @@ u8 ide_dump_status(ide_drive_t *drive, const char *msg, u8 stat)
else
ide_dump_atapi_error(drive, err);
}
- ide_dump_opcode(drive);
+
+ printk(KERN_ERR "%s: possibly failed opcode: 0x%02x\n",
+ drive->name, drive->hwif->cmd.tf.command);
+
return err;
}
EXPORT_SYMBOL(ide_dump_status);
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
index 310d03f2b5b..a914023d6d0 100644
--- a/drivers/ide/ide-park.c
+++ b/drivers/ide/ide-park.c
@@ -24,11 +24,8 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
start_queue = 1;
spin_unlock_irq(&hwif->lock);
- if (start_queue) {
- spin_lock_irq(q->queue_lock);
- blk_start_queueing(q);
- spin_unlock_irq(q->queue_lock);
- }
+ if (start_queue)
+ blk_run_queue(q);
return;
}
spin_unlock_irq(&hwif->lock);
diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
index 61111fd2713..39d4e01f5c9 100644
--- a/drivers/ide/ide-pci-generic.c
+++ b/drivers/ide/ide-pci-generic.c
@@ -33,6 +33,16 @@ static int ide_generic_all; /* Set to claim all devices */
module_param_named(all_generic_ide, ide_generic_all, bool, 0444);
MODULE_PARM_DESC(all_generic_ide, "IDE generic will claim all unknown PCI IDE storage controllers.");
+static void netcell_quirkproc(ide_drive_t *drive)
+{
+ /* mark words 85-87 as valid */
+ drive->id[ATA_ID_CSF_DEFAULT] |= 0x4000;
+}
+
+static const struct ide_port_ops netcell_port_ops = {
+ .quirkproc = netcell_quirkproc,
+};
+
#define DECLARE_GENERIC_PCI_DEV(extra_flags) \
{ \
.name = DRV_NAME, \
@@ -74,6 +84,7 @@ static const struct ide_port_info generic_chipsets[] __devinitdata = {
{ /* 6: Revolution */
.name = DRV_NAME,
+ .port_ops = &netcell_port_ops,
.host_flags = IDE_HFLAG_CLEAR_SIMPLEX |
IDE_HFLAG_TRUST_BIOS_FOR_DMA |
IDE_HFLAG_OFF_BOARD,
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index 0d8a151c0a0..c14ca144cff 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -3,11 +3,11 @@
int generic_ide_suspend(struct device *dev, pm_message_t mesg)
{
- ide_drive_t *drive = dev->driver_data, *pair = ide_get_pair_dev(drive);
+ ide_drive_t *drive = dev_get_drvdata(dev);
+ ide_drive_t *pair = ide_get_pair_dev(drive);
ide_hwif_t *hwif = drive->hwif;
struct request *rq;
struct request_pm_state rqpm;
- struct ide_cmd cmd;
int ret;
/* call ACPI _GTM only once */
@@ -15,11 +15,9 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
ide_acpi_get_timing(hwif);
memset(&rqpm, 0, sizeof(rqpm));
- memset(&cmd, 0, sizeof(cmd));
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
rq->cmd_type = REQ_TYPE_PM_SUSPEND;
- rq->special = &cmd;
- rq->data = &rqpm;
+ rq->special = &rqpm;
rqpm.pm_step = IDE_PM_START_SUSPEND;
if (mesg.event == PM_EVENT_PRETHAW)
mesg.event = PM_EVENT_FREEZE;
@@ -37,11 +35,11 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
int generic_ide_resume(struct device *dev)
{
- ide_drive_t *drive = dev->driver_data, *pair = ide_get_pair_dev(drive);
+ ide_drive_t *drive = dev_get_drvdata(dev);
+ ide_drive_t *pair = ide_get_pair_dev(drive);
ide_hwif_t *hwif = drive->hwif;
struct request *rq;
struct request_pm_state rqpm;
- struct ide_cmd cmd;
int err;
/* call ACPI _PS0 / _STM only once */
@@ -53,12 +51,10 @@ int generic_ide_resume(struct device *dev)
ide_acpi_exec_tfs(drive);
memset(&rqpm, 0, sizeof(rqpm));
- memset(&cmd, 0, sizeof(cmd));
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
rq->cmd_type = REQ_TYPE_PM_RESUME;
rq->cmd_flags |= REQ_PREEMPT;
- rq->special = &cmd;
- rq->data = &rqpm;
+ rq->special = &rqpm;
rqpm.pm_step = IDE_PM_START_RESUME;
rqpm.pm_state = PM_EVENT_ON;
@@ -77,7 +73,7 @@ int generic_ide_resume(struct device *dev)
void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
{
- struct request_pm_state *pm = rq->data;
+ struct request_pm_state *pm = rq->special;
#ifdef DEBUG_PM
printk(KERN_INFO "%s: complete_power_step(step: %d)\n",
@@ -107,10 +103,8 @@ void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
{
- struct request_pm_state *pm = rq->data;
- struct ide_cmd *cmd = rq->special;
-
- memset(cmd, 0, sizeof(*cmd));
+ struct request_pm_state *pm = rq->special;
+ struct ide_cmd cmd = { };
switch (pm->pm_step) {
case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
@@ -123,12 +117,12 @@ ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
return ide_stopped;
}
if (ata_id_flush_ext_enabled(drive->id))
- cmd->tf.command = ATA_CMD_FLUSH_EXT;
+ cmd.tf.command = ATA_CMD_FLUSH_EXT;
else
- cmd->tf.command = ATA_CMD_FLUSH;
+ cmd.tf.command = ATA_CMD_FLUSH;
goto out_do_tf;
case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
- cmd->tf.command = ATA_CMD_STANDBYNOW1;
+ cmd.tf.command = ATA_CMD_STANDBYNOW1;
goto out_do_tf;
case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
ide_set_max_pio(drive);
@@ -141,7 +135,7 @@ ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
ide_complete_power_step(drive, rq);
return ide_stopped;
case IDE_PM_IDLE: /* Resume step 2 (idle) */
- cmd->tf.command = ATA_CMD_IDLEIMMEDIATE;
+ cmd.tf.command = ATA_CMD_IDLEIMMEDIATE;
goto out_do_tf;
case IDE_PM_RESTORE_DMA: /* Resume step 3 (restore DMA) */
/*
@@ -163,11 +157,11 @@ ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
return ide_stopped;
out_do_tf:
- cmd->valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
- cmd->valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
- cmd->protocol = ATA_PROT_NODATA;
+ cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
+ cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
+ cmd.protocol = ATA_PROT_NODATA;
- return do_rw_taskfile(drive, cmd);
+ return do_rw_taskfile(drive, &cmd);
}
/**
@@ -181,7 +175,7 @@ out_do_tf:
void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
{
struct request_queue *q = drive->queue;
- struct request_pm_state *pm = rq->data;
+ struct request_pm_state *pm = rq->special;
unsigned long flags;
ide_complete_power_step(drive, rq);
@@ -207,7 +201,7 @@ void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
{
- struct request_pm_state *pm = rq->data;
+ struct request_pm_state *pm = rq->special;
if (blk_pm_suspend_request(rq) &&
pm->pm_step == IDE_PM_START_SUSPEND)
diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c
index 6e80b774e88..017b1df3b80 100644
--- a/drivers/ide/ide-pnp.c
+++ b/drivers/ide/ide-pnp.c
@@ -29,6 +29,7 @@ static struct pnp_device_id idepnp_devices[] = {
static const struct ide_port_info ide_pnp_port_info = {
.host_flags = IDE_HFLAG_NO_DMA,
+ .chipset = ide_generic,
};
static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
@@ -36,7 +37,7 @@ static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
struct ide_host *host;
unsigned long base, ctl;
int rc;
- hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
+ struct ide_hw hw, *hws[] = { &hw };
printk(KERN_INFO DRV_NAME ": generic PnP IDE interface\n");
@@ -62,9 +63,8 @@ static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
memset(&hw, 0, sizeof(hw));
ide_std_init_ports(&hw, base, ctl);
hw.irq = pnp_irq(dev, 0);
- hw.chipset = ide_generic;
- rc = ide_host_add(&ide_pnp_port_info, hws, &host);
+ rc = ide_host_add(&ide_pnp_port_info, hws, 1, &host);
if (rc)
goto out;
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 7f264ed1141..79e0af3fd15 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -97,7 +97,7 @@ static void ide_disk_init_mult_count(ide_drive_t *drive)
drive->mult_req = id[ATA_ID_MULTSECT] & 0xff;
if (drive->mult_req)
- drive->special.b.set_multmode = 1;
+ drive->special_flags |= IDE_SFLAG_SET_MULTMODE;
}
}
@@ -295,7 +295,7 @@ int ide_dev_read_id(ide_drive_t *drive, u8 cmd, u16 *id)
timeout = ((cmd == ATA_CMD_ID_ATA) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2;
- if (ide_busy_sleep(hwif, timeout, use_altstatus))
+ if (ide_busy_sleep(drive, timeout, use_altstatus))
return 1;
/* wait for IRQ and ATA_DRQ */
@@ -316,8 +316,9 @@ int ide_dev_read_id(ide_drive_t *drive, u8 cmd, u16 *id)
return rc;
}
-int ide_busy_sleep(ide_hwif_t *hwif, unsigned long timeout, int altstatus)
+int ide_busy_sleep(ide_drive_t *drive, unsigned long timeout, int altstatus)
{
+ ide_hwif_t *hwif = drive->hwif;
u8 stat;
timeout += jiffies;
@@ -330,6 +331,8 @@ int ide_busy_sleep(ide_hwif_t *hwif, unsigned long timeout, int altstatus)
return 0;
} while (time_before(jiffies, timeout));
+ printk(KERN_ERR "%s: timeout in %s\n", drive->name, __func__);
+
return 1; /* drive timed-out */
}
@@ -420,7 +423,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
tp_ops->dev_select(drive);
msleep(50);
tp_ops->exec_command(hwif, ATA_CMD_DEV_RESET);
- (void)ide_busy_sleep(hwif, WAIT_WORSTCASE, 0);
+ (void)ide_busy_sleep(drive, WAIT_WORSTCASE, 0);
rc = ide_dev_read_id(drive, cmd, id);
}
@@ -462,23 +465,8 @@ static u8 probe_for_drive(ide_drive_t *drive)
int rc;
u8 cmd;
- /*
- * In order to keep things simple we have an id
- * block for all drives at all times. If the device
- * is pre ATA or refuses ATA/ATAPI identify we
- * will add faked data to this.
- *
- * Also note that 0 everywhere means "can't do X"
- */
-
drive->dev_flags &= ~IDE_DFLAG_ID_READ;
- drive->id = kzalloc(SECTOR_SIZE, GFP_KERNEL);
- if (drive->id == NULL) {
- printk(KERN_ERR "ide: out of memory for id data.\n");
- return 0;
- }
-
m = (char *)&drive->id[ATA_ID_PROD];
strcpy(m, "UNKNOWN");
@@ -494,7 +482,7 @@ static u8 probe_for_drive(ide_drive_t *drive)
}
if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
- goto out_free;
+ return 0;
/* identification failed? */
if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0) {
@@ -518,7 +506,7 @@ static u8 probe_for_drive(ide_drive_t *drive)
}
if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
- goto out_free;
+ return 0;
/* The drive wasn't being helpful. Add generic info only */
if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0) {
@@ -532,9 +520,6 @@ static u8 probe_for_drive(ide_drive_t *drive)
}
return 1;
-out_free:
- kfree(drive->id);
- return 0;
}
static void hwif_release_dev(struct device *dev)
@@ -550,7 +535,7 @@ static int ide_register_port(ide_hwif_t *hwif)
/* register with global device tree */
dev_set_name(&hwif->gendev, hwif->name);
- hwif->gendev.driver_data = hwif;
+ dev_set_drvdata(&hwif->gendev, hwif);
if (hwif->gendev.parent == NULL)
hwif->gendev.parent = hwif->dev;
hwif->gendev.release = hwif_release_dev;
@@ -699,8 +684,14 @@ static int ide_probe_port(ide_hwif_t *hwif)
if (irqd)
disable_irq(hwif->irq);
- if (ide_port_wait_ready(hwif) == -EBUSY)
- printk(KERN_DEBUG "%s: Wait for ready failed before probe !\n", hwif->name);
+ rc = ide_port_wait_ready(hwif);
+ if (rc == -ENODEV) {
+ printk(KERN_INFO "%s: no devices on the port\n", hwif->name);
+ goto out;
+ } else if (rc == -EBUSY)
+ printk(KERN_ERR "%s: not ready before the probe\n", hwif->name);
+ else
+ rc = -ENODEV;
/*
* Second drive should only exist if first drive was found,
@@ -711,7 +702,7 @@ static int ide_probe_port(ide_hwif_t *hwif)
if (drive->dev_flags & IDE_DFLAG_PRESENT)
rc = 0;
}
-
+out:
/*
* Use cached IRQ number. It might be (and is...) changed by probe
* code above
@@ -729,6 +720,8 @@ static void ide_port_tune_devices(ide_hwif_t *hwif)
int i;
ide_port_for_each_present_dev(i, drive, hwif) {
+ ide_check_nien_quirk_list(drive);
+
if (port_ops && port_ops->quirkproc)
port_ops->quirkproc(drive);
}
@@ -814,8 +807,6 @@ static int ide_port_setup_devices(ide_hwif_t *hwif)
if (ide_init_queue(drive)) {
printk(KERN_ERR "ide: failed to init %s\n",
drive->name);
- kfree(drive->id);
- drive->id = NULL;
drive->dev_flags &= ~IDE_DFLAG_PRESENT;
continue;
}
@@ -944,9 +935,6 @@ static void drive_release_dev (struct device *dev)
blk_cleanup_queue(drive->queue);
drive->queue = NULL;
- kfree(drive->id);
- drive->id = NULL;
-
drive->dev_flags &= ~IDE_DFLAG_PRESENT;
complete(&drive->gendev_rel_comp);
@@ -999,9 +987,9 @@ static void hwif_register_devices(ide_hwif_t *hwif)
int ret;
dev_set_name(dev, "%u.%u", hwif->index, i);
+ dev_set_drvdata(dev, drive);
dev->parent = &hwif->gendev;
dev->bus = &ide_bus_type;
- dev->driver_data = drive;
dev->release = drive_release_dev;
ret = device_register(dev);
@@ -1032,6 +1020,15 @@ static void ide_port_init_devices(ide_hwif_t *hwif)
if (port_ops && port_ops->init_dev)
port_ops->init_dev(drive);
}
+
+ ide_port_for_each_dev(i, drive, hwif) {
+ /*
+ * default to PIO Mode 0 before we figure out
+ * the most suited mode for the attached device
+ */
+ if (port_ops && port_ops->set_pio_mode)
+ port_ops->set_pio_mode(drive, 0);
+ }
}
static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
@@ -1039,8 +1036,7 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
{
hwif->channel = port;
- if (d->chipset)
- hwif->chipset = d->chipset;
+ hwif->chipset = d->chipset ? d->chipset : ide_pci;
if (d->init_iops)
d->init_iops(hwif);
@@ -1121,16 +1117,19 @@ static void ide_port_init_devices_data(ide_hwif_t *hwif)
ide_port_for_each_dev(i, drive, hwif) {
u8 j = (hwif->index * MAX_DRIVES) + i;
+ u16 *saved_id = drive->id;
memset(drive, 0, sizeof(*drive));
+ memset(saved_id, 0, SECTOR_SIZE);
+ drive->id = saved_id;
drive->media = ide_disk;
drive->select = (i << 4) | ATA_DEVICE_OBS;
drive->hwif = hwif;
drive->ready_stat = ATA_DRDY;
drive->bad_wstat = BAD_W_STAT;
- drive->special.b.recalibrate = 1;
- drive->special.b.set_geometry = 1;
+ drive->special_flags = IDE_SFLAG_RECALIBRATE |
+ IDE_SFLAG_SET_GEOMETRY;
drive->name[0] = 'h';
drive->name[1] = 'd';
drive->name[2] = 'a' + j;
@@ -1165,11 +1164,10 @@ static void ide_init_port_data(ide_hwif_t *hwif, unsigned int index)
ide_port_init_devices_data(hwif);
}
-static void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw)
+static void ide_init_port_hw(ide_hwif_t *hwif, struct ide_hw *hw)
{
memcpy(&hwif->io_ports, &hw->io_ports, sizeof(hwif->io_ports));
hwif->irq = hw->irq;
- hwif->chipset = hw->chipset;
hwif->dev = hw->dev;
hwif->gendev.parent = hw->parent ? hw->parent : hw->dev;
hwif->ack_intr = hw->ack_intr;
@@ -1230,8 +1228,10 @@ static void ide_port_free_devices(ide_hwif_t *hwif)
ide_drive_t *drive;
int i;
- ide_port_for_each_dev(i, drive, hwif)
+ ide_port_for_each_dev(i, drive, hwif) {
+ kfree(drive->id);
kfree(drive);
+ }
}
static int ide_port_alloc_devices(ide_hwif_t *hwif, int node)
@@ -1245,6 +1245,18 @@ static int ide_port_alloc_devices(ide_hwif_t *hwif, int node)
if (drive == NULL)
goto out_nomem;
+ /*
+ * In order to keep things simple we have an id
+ * block for all drives at all times. If the device
+ * is pre ATA or refuses ATA/ATAPI identify we
+ * will add faked data to this.
+ *
+ * Also note that 0 everywhere means "can't do X"
+ */
+ drive->id = kzalloc_node(SECTOR_SIZE, GFP_KERNEL, node);
+ if (drive->id == NULL)
+ goto out_nomem;
+
hwif->devices[i] = drive;
}
return 0;
@@ -1254,7 +1266,8 @@ out_nomem:
return -ENOMEM;
}
-struct ide_host *ide_host_alloc(const struct ide_port_info *d, hw_regs_t **hws)
+struct ide_host *ide_host_alloc(const struct ide_port_info *d,
+ struct ide_hw **hws, unsigned int n_ports)
{
struct ide_host *host;
struct device *dev = hws[0] ? hws[0]->dev : NULL;
@@ -1265,7 +1278,7 @@ struct ide_host *ide_host_alloc(const struct ide_port_info *d, hw_regs_t **hws)
if (host == NULL)
return NULL;
- for (i = 0; i < MAX_HOST_PORTS; i++) {
+ for (i = 0; i < n_ports; i++) {
ide_hwif_t *hwif;
int idx;
@@ -1285,6 +1298,7 @@ struct ide_host *ide_host_alloc(const struct ide_port_info *d, hw_regs_t **hws)
if (idx < 0) {
printk(KERN_ERR "%s: no free slot for interface\n",
d ? d->name : "ide");
+ ide_port_free_devices(hwif);
kfree(hwif);
continue;
}
@@ -1341,7 +1355,7 @@ static void ide_disable_port(ide_hwif_t *hwif)
}
int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
- hw_regs_t **hws)
+ struct ide_hw **hws)
{
ide_hwif_t *hwif, *mate = NULL;
int i, j = 0;
@@ -1435,13 +1449,13 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
}
EXPORT_SYMBOL_GPL(ide_host_register);
-int ide_host_add(const struct ide_port_info *d, hw_regs_t **hws,
- struct ide_host **hostp)
+int ide_host_add(const struct ide_port_info *d, struct ide_hw **hws,
+ unsigned int n_ports, struct ide_host **hostp)
{
struct ide_host *host;
int rc;
- host = ide_host_alloc(d, hws);
+ host = ide_host_alloc(d, hws, n_ports);
if (host == NULL)
return -ENOMEM;
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index cb942a9b580..4b447a8a49d 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -131,13 +131,6 @@ enum {
IDETAPE_DIR_WRITE = (1 << 2),
};
-struct idetape_bh {
- u32 b_size;
- atomic_t b_count;
- struct idetape_bh *b_reqnext;
- char *b_data;
-};
-
/* Tape door status */
#define DOOR_UNLOCKED 0
#define DOOR_LOCKED 1
@@ -219,18 +212,12 @@ typedef struct ide_tape_obj {
/* Data buffer size chosen based on the tape's recommendation */
int buffer_size;
- /* merge buffer */
- struct idetape_bh *merge_bh;
- /* size of the merge buffer */
- int merge_bh_size;
- /* pointer to current buffer head within the merge buffer */
- struct idetape_bh *bh;
- char *b_data;
- int b_count;
-
- int pages_per_buffer;
- /* Wasted space in each stage */
- int excess_bh_size;
+ /* Staging buffer of buffer_size bytes */
+ void *buf;
+ /* The read/write cursor */
+ void *cur;
+ /* The number of valid bytes in buf */
+ size_t valid;
/* Measures average tape speed */
unsigned long avg_time;
@@ -253,18 +240,27 @@ static struct class *idetape_sysfs_class;
static void ide_tape_release(struct device *);
-static struct ide_tape_obj *ide_tape_get(struct gendisk *disk)
+static struct ide_tape_obj *idetape_devs[MAX_HWIFS * MAX_DRIVES];
+
+static struct ide_tape_obj *ide_tape_get(struct gendisk *disk, bool cdev,
+ unsigned int i)
{
struct ide_tape_obj *tape = NULL;
mutex_lock(&idetape_ref_mutex);
- tape = ide_drv_g(disk, ide_tape_obj);
+
+ if (cdev)
+ tape = idetape_devs[i];
+ else
+ tape = ide_drv_g(disk, ide_tape_obj);
+
if (tape) {
if (ide_device_get(tape->drive))
tape = NULL;
else
get_device(&tape->dev);
}
+
mutex_unlock(&idetape_ref_mutex);
return tape;
}
@@ -280,102 +276,6 @@ static void ide_tape_put(struct ide_tape_obj *tape)
}
/*
- * The variables below are used for the character device interface. Additional
- * state variables are defined in our ide_drive_t structure.
- */
-static struct ide_tape_obj *idetape_devs[MAX_HWIFS * MAX_DRIVES];
-
-static struct ide_tape_obj *ide_tape_chrdev_get(unsigned int i)
-{
- struct ide_tape_obj *tape = NULL;
-
- mutex_lock(&idetape_ref_mutex);
- tape = idetape_devs[i];
- if (tape)
- get_device(&tape->dev);
- mutex_unlock(&idetape_ref_mutex);
- return tape;
-}
-
-static int idetape_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
- unsigned int bcount)
-{
- struct idetape_bh *bh = pc->bh;
- int count;
-
- while (bcount) {
- if (bh == NULL)
- break;
- count = min(
- (unsigned int)(bh->b_size - atomic_read(&bh->b_count)),
- bcount);
- drive->hwif->tp_ops->input_data(drive, NULL, bh->b_data +
- atomic_read(&bh->b_count), count);
- bcount -= count;
- atomic_add(count, &bh->b_count);
- if (atomic_read(&bh->b_count) == bh->b_size) {
- bh = bh->b_reqnext;
- if (bh)
- atomic_set(&bh->b_count, 0);
- }
- }
-
- pc->bh = bh;
-
- return bcount;
-}
-
-static int idetape_output_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
- unsigned int bcount)
-{
- struct idetape_bh *bh = pc->bh;
- int count;
-
- while (bcount) {
- if (bh == NULL)
- break;
- count = min((unsigned int)pc->b_count, (unsigned int)bcount);
- drive->hwif->tp_ops->output_data(drive, NULL, pc->b_data, count);
- bcount -= count;
- pc->b_data += count;
- pc->b_count -= count;
- if (!pc->b_count) {
- bh = bh->b_reqnext;
- pc->bh = bh;
- if (bh) {
- pc->b_data = bh->b_data;
- pc->b_count = atomic_read(&bh->b_count);
- }
- }
- }
-
- return bcount;
-}
-
-static void idetape_update_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc)
-{
- struct idetape_bh *bh = pc->bh;
- int count;
- unsigned int bcount = pc->xferred;
-
- if (pc->flags & PC_FLAG_WRITING)
- return;
- while (bcount) {
- if (bh == NULL) {
- printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
- __func__);
- return;
- }
- count = min((unsigned int)bh->b_size, (unsigned int)bcount);
- atomic_set(&bh->b_count, count);
- if (atomic_read(&bh->b_count) == bh->b_size)
- bh = bh->b_reqnext;
- bcount -= count;
- }
- pc->bh = bh;
-}
-
-/*
* called on each failed packet command retry to analyze the request sense. We
* currently do not utilize this information.
*/
@@ -392,12 +292,10 @@ static void idetape_analyze_error(ide_drive_t *drive, u8 *sense)
pc->c[0], tape->sense_key, tape->asc, tape->ascq);
/* Correct pc->xferred by asking the tape. */
- if (pc->flags & PC_FLAG_DMA_ERROR) {
+ if (pc->flags & PC_FLAG_DMA_ERROR)
pc->xferred = pc->req_xfer -
tape->blk_size *
get_unaligned_be32(&sense[3]);
- idetape_update_buffers(drive, pc);
- }
/*
* If error was the result of a zero-length read or write command,
@@ -436,29 +334,6 @@ static void idetape_analyze_error(ide_drive_t *drive, u8 *sense)
}
}
-/* Free data buffers completely. */
-static void ide_tape_kfree_buffer(idetape_tape_t *tape)
-{
- struct idetape_bh *prev_bh, *bh = tape->merge_bh;
-
- while (bh) {
- u32 size = bh->b_size;
-
- while (size) {
- unsigned int order = fls(size >> PAGE_SHIFT)-1;
-
- if (bh->b_data)
- free_pages((unsigned long)bh->b_data, order);
-
- size &= (order-1);
- bh->b_data += (1 << order) * PAGE_SIZE;
- }
- prev_bh = bh;
- bh = bh->b_reqnext;
- kfree(prev_bh);
- }
-}
-
static void ide_tape_handle_dsc(ide_drive_t *);
static int ide_tape_callback(ide_drive_t *drive, int dsc)
@@ -496,7 +371,7 @@ static int ide_tape_callback(ide_drive_t *drive, int dsc)
}
tape->first_frame += blocks;
- rq->current_nr_sectors -= blocks;
+ rq->resid_len -= blocks * tape->blk_size;
if (pc->error) {
uptodate = 0;
@@ -513,7 +388,8 @@ static int ide_tape_callback(ide_drive_t *drive, int dsc)
if (readpos[0] & 0x4) {
printk(KERN_INFO "ide-tape: Block location is unknown"
"to the tape\n");
- clear_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags);
+ clear_bit(ilog2(IDE_AFLAG_ADDRESS_VALID),
+ &drive->atapi_flags);
uptodate = 0;
err = IDE_DRV_ERROR_GENERAL;
} else {
@@ -522,7 +398,8 @@ static int ide_tape_callback(ide_drive_t *drive, int dsc)
tape->partition = readpos[1];
tape->first_frame = be32_to_cpup((__be32 *)&readpos[4]);
- set_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags);
+ set_bit(ilog2(IDE_AFLAG_ADDRESS_VALID),
+ &drive->atapi_flags);
}
}
@@ -558,19 +435,6 @@ static void ide_tape_handle_dsc(ide_drive_t *drive)
idetape_postpone_request(drive);
}
-static int ide_tape_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
- unsigned int bcount, int write)
-{
- unsigned int bleft;
-
- if (write)
- bleft = idetape_output_buffers(drive, pc, bcount);
- else
- bleft = idetape_input_buffers(drive, pc, bcount);
-
- return bcount - bleft;
-}
-
/*
* Packet Command Interface
*
@@ -614,12 +478,6 @@ static ide_startstop_t ide_tape_issue_pc(ide_drive_t *drive,
{
idetape_tape_t *tape = drive->driver_data;
- if (drive->pc->c[0] == REQUEST_SENSE &&
- pc->c[0] == REQUEST_SENSE) {
- printk(KERN_ERR "ide-tape: possible ide-tape.c bug - "
- "Two request sense in serial were issued\n");
- }
-
if (drive->failed_pc == NULL && pc->c[0] != REQUEST_SENSE)
drive->failed_pc = pc;
@@ -628,6 +486,8 @@ static ide_startstop_t ide_tape_issue_pc(ide_drive_t *drive,
if (pc->retries > IDETAPE_MAX_PC_RETRIES ||
(pc->flags & PC_FLAG_ABORT)) {
+ unsigned int done = blk_rq_bytes(drive->hwif->rq);
+
/*
* We will "abort" retrying a packet command in case legitimate
* error code was received (crossing a filemark, or end of the
@@ -647,8 +507,10 @@ static ide_startstop_t ide_tape_issue_pc(ide_drive_t *drive,
/* Giving up */
pc->error = IDE_DRV_ERROR_GENERAL;
}
+
drive->failed_pc = NULL;
drive->pc_callback(drive, 0);
+ ide_complete_rq(drive, -EIO, done);
return ide_stopped;
}
debug_log(DBG_SENSE, "Retry #%d, cmd = %02X\n", pc->retries, pc->c[0]);
@@ -701,7 +563,7 @@ static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
printk(KERN_ERR "ide-tape: %s: I/O error, ",
tape->name);
/* Retry operation */
- ide_retry_pc(drive, tape->disk);
+ ide_retry_pc(drive);
return ide_stopped;
}
pc->error = 0;
@@ -717,27 +579,22 @@ static void ide_tape_create_rw_cmd(idetape_tape_t *tape,
struct ide_atapi_pc *pc, struct request *rq,
u8 opcode)
{
- struct idetape_bh *bh = (struct idetape_bh *)rq->special;
- unsigned int length = rq->current_nr_sectors;
+ unsigned int length = blk_rq_sectors(rq);
ide_init_pc(pc);
put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
pc->c[1] = 1;
- pc->bh = bh;
pc->buf = NULL;
pc->buf_size = length * tape->blk_size;
pc->req_xfer = pc->buf_size;
if (pc->req_xfer == tape->buffer_size)
pc->flags |= PC_FLAG_DMA_OK;
- if (opcode == READ_6) {
+ if (opcode == READ_6)
pc->c[0] = READ_6;
- atomic_set(&bh->b_count, 0);
- } else if (opcode == WRITE_6) {
+ else if (opcode == WRITE_6) {
pc->c[0] = WRITE_6;
pc->flags |= PC_FLAG_WRITING;
- pc->b_data = bh->b_data;
- pc->b_count = atomic_read(&bh->b_count);
}
memcpy(rq->cmd, pc->c, 12);
@@ -753,12 +610,10 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
struct ide_cmd cmd;
u8 stat;
- debug_log(DBG_SENSE, "sector: %llu, nr_sectors: %lu,"
- " current_nr_sectors: %u\n",
- (unsigned long long)rq->sector, rq->nr_sectors,
- rq->current_nr_sectors);
+ debug_log(DBG_SENSE, "sector: %llu, nr_sectors: %u\n"
+ (unsigned long long)blk_rq_pos(rq), blk_rq_sectors(rq));
- if (!blk_special_request(rq)) {
+ if (!(blk_special_request(rq) || blk_sense_request(rq))) {
/* We do not support buffer cache originated requests. */
printk(KERN_NOTICE "ide-tape: %s: Unsupported request in "
"request queue (%d)\n", drive->name, rq->cmd_type);
@@ -794,15 +649,15 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
if ((drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) == 0 &&
(rq->cmd[13] & REQ_IDETAPE_PC2) == 0)
- set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags);
+ drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC;
if (drive->dev_flags & IDE_DFLAG_POST_RESET) {
- set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags);
+ drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC;
drive->dev_flags &= ~IDE_DFLAG_POST_RESET;
}
- if (!test_and_clear_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags) &&
- (stat & ATA_DSC) == 0) {
+ if (!(drive->atapi_flags & IDE_AFLAG_IGNORE_DSC) &&
+ !(stat & ATA_DSC)) {
if (postponed_rq == NULL) {
tape->dsc_polling_start = jiffies;
tape->dsc_poll_freq = tape->best_dsc_rw_freq;
@@ -822,7 +677,9 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
tape->dsc_poll_freq = IDETAPE_DSC_MA_SLOW;
idetape_postpone_request(drive);
return ide_stopped;
- }
+ } else
+ drive->atapi_flags &= ~IDE_AFLAG_IGNORE_DSC;
+
if (rq->cmd[13] & REQ_IDETAPE_READ) {
pc = &tape->queued_pc;
ide_tape_create_rw_cmd(tape, pc, rq, READ_6);
@@ -834,7 +691,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
goto out;
}
if (rq->cmd[13] & REQ_IDETAPE_PC1) {
- pc = (struct ide_atapi_pc *) rq->buffer;
+ pc = (struct ide_atapi_pc *)rq->special;
rq->cmd[13] &= ~(REQ_IDETAPE_PC1);
rq->cmd[13] |= REQ_IDETAPE_PC2;
goto out;
@@ -846,6 +703,9 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
BUG();
out:
+ /* prepare sense request for this command */
+ ide_prep_sense(drive, rq);
+
memset(&cmd, 0, sizeof(cmd));
if (rq_data_dir(rq))
@@ -853,167 +713,10 @@ out:
cmd.rq = rq;
- return ide_tape_issue_pc(drive, &cmd, pc);
-}
-
-/*
- * The function below uses __get_free_pages to allocate a data buffer of size
- * tape->buffer_size (or a bit more). We attempt to combine sequential pages as
- * much as possible.
- *
- * It returns a pointer to the newly allocated buffer, or NULL in case of
- * failure.
- */
-static struct idetape_bh *ide_tape_kmalloc_buffer(idetape_tape_t *tape,
- int full, int clear)
-{
- struct idetape_bh *prev_bh, *bh, *merge_bh;
- int pages = tape->pages_per_buffer;
- unsigned int order, b_allocd;
- char *b_data = NULL;
-
- merge_bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
- bh = merge_bh;
- if (bh == NULL)
- goto abort;
-
- order = fls(pages) - 1;
- bh->b_data = (char *) __get_free_pages(GFP_KERNEL, order);
- if (!bh->b_data)
- goto abort;
- b_allocd = (1 << order) * PAGE_SIZE;
- pages &= (order-1);
-
- if (clear)
- memset(bh->b_data, 0, b_allocd);
- bh->b_reqnext = NULL;
- bh->b_size = b_allocd;
- atomic_set(&bh->b_count, full ? bh->b_size : 0);
-
- while (pages) {
- order = fls(pages) - 1;
- b_data = (char *) __get_free_pages(GFP_KERNEL, order);
- if (!b_data)
- goto abort;
- b_allocd = (1 << order) * PAGE_SIZE;
-
- if (clear)
- memset(b_data, 0, b_allocd);
-
- /* newly allocated page frames below buffer header or ...*/
- if (bh->b_data == b_data + b_allocd) {
- bh->b_size += b_allocd;
- bh->b_data -= b_allocd;
- if (full)
- atomic_add(b_allocd, &bh->b_count);
- continue;
- }
- /* they are above the header */
- if (b_data == bh->b_data + bh->b_size) {
- bh->b_size += b_allocd;
- if (full)
- atomic_add(b_allocd, &bh->b_count);
- continue;
- }
- prev_bh = bh;
- bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
- if (!bh) {
- free_pages((unsigned long) b_data, order);
- goto abort;
- }
- bh->b_reqnext = NULL;
- bh->b_data = b_data;
- bh->b_size = b_allocd;
- atomic_set(&bh->b_count, full ? bh->b_size : 0);
- prev_bh->b_reqnext = bh;
-
- pages &= (order-1);
- }
+ ide_init_sg_cmd(&cmd, pc->req_xfer);
+ ide_map_sg(drive, &cmd);
- bh->b_size -= tape->excess_bh_size;
- if (full)
- atomic_sub(tape->excess_bh_size, &bh->b_count);
- return merge_bh;
-abort:
- ide_tape_kfree_buffer(tape);
- return NULL;
-}
-
-static int idetape_copy_stage_from_user(idetape_tape_t *tape,
- const char __user *buf, int n)
-{
- struct idetape_bh *bh = tape->bh;
- int count;
- int ret = 0;
-
- while (n) {
- if (bh == NULL) {
- printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
- __func__);
- return 1;
- }
- count = min((unsigned int)
- (bh->b_size - atomic_read(&bh->b_count)),
- (unsigned int)n);
- if (copy_from_user(bh->b_data + atomic_read(&bh->b_count), buf,
- count))
- ret = 1;
- n -= count;
- atomic_add(count, &bh->b_count);
- buf += count;
- if (atomic_read(&bh->b_count) == bh->b_size) {
- bh = bh->b_reqnext;
- if (bh)
- atomic_set(&bh->b_count, 0);
- }
- }
- tape->bh = bh;
- return ret;
-}
-
-static int idetape_copy_stage_to_user(idetape_tape_t *tape, char __user *buf,
- int n)
-{
- struct idetape_bh *bh = tape->bh;
- int count;
- int ret = 0;
-
- while (n) {
- if (bh == NULL) {
- printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
- __func__);
- return 1;
- }
- count = min(tape->b_count, n);
- if (copy_to_user(buf, tape->b_data, count))
- ret = 1;
- n -= count;
- tape->b_data += count;
- tape->b_count -= count;
- buf += count;
- if (!tape->b_count) {
- bh = bh->b_reqnext;
- tape->bh = bh;
- if (bh) {
- tape->b_data = bh->b_data;
- tape->b_count = atomic_read(&bh->b_count);
- }
- }
- }
- return ret;
-}
-
-static void idetape_init_merge_buffer(idetape_tape_t *tape)
-{
- struct idetape_bh *bh = tape->merge_bh;
- tape->bh = tape->merge_bh;
-
- if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
- atomic_set(&bh->b_count, 0);
- else {
- tape->b_data = bh->b_data;
- tape->b_count = atomic_read(&bh->b_count);
- }
+ return ide_tape_issue_pc(drive, &cmd, pc);
}
/*
@@ -1036,7 +739,7 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
int load_attempted = 0;
/* Wait for the tape to become ready */
- set_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags);
+ set_bit(ilog2(IDE_AFLAG_MEDIUM_PRESENT), &drive->atapi_flags);
timeout += jiffies;
while (time_before(jiffies, timeout)) {
if (ide_do_test_unit_ready(drive, disk) == 0)
@@ -1112,11 +815,11 @@ static void __ide_tape_discard_merge_buffer(ide_drive_t *drive)
if (tape->chrdev_dir != IDETAPE_DIR_READ)
return;
- clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags);
- tape->merge_bh_size = 0;
- if (tape->merge_bh != NULL) {
- ide_tape_kfree_buffer(tape);
- tape->merge_bh = NULL;
+ clear_bit(ilog2(IDE_AFLAG_FILEMARK), &drive->atapi_flags);
+ tape->valid = 0;
+ if (tape->buf != NULL) {
+ kfree(tape->buf);
+ tape->buf = NULL;
}
tape->chrdev_dir = IDETAPE_DIR_NONE;
@@ -1170,36 +873,44 @@ static void ide_tape_discard_merge_buffer(ide_drive_t *drive,
* Generate a read/write request for the block device interface and wait for it
* to be serviced.
*/
-static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks,
- struct idetape_bh *bh)
+static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
{
idetape_tape_t *tape = drive->driver_data;
struct request *rq;
- int ret, errors;
+ int ret;
debug_log(DBG_SENSE, "%s: cmd=%d\n", __func__, cmd);
+ BUG_ON(cmd != REQ_IDETAPE_READ && cmd != REQ_IDETAPE_WRITE);
+ BUG_ON(size < 0 || size % tape->blk_size);
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
rq->cmd_type = REQ_TYPE_SPECIAL;
rq->cmd[13] = cmd;
rq->rq_disk = tape->disk;
- rq->special = (void *)bh;
- rq->sector = tape->first_frame;
- rq->nr_sectors = blocks;
- rq->current_nr_sectors = blocks;
- blk_execute_rq(drive->queue, tape->disk, rq, 0);
+ rq->__sector = tape->first_frame;
- errors = rq->errors;
- ret = tape->blk_size * (blocks - rq->current_nr_sectors);
- blk_put_request(rq);
+ if (size) {
+ ret = blk_rq_map_kern(drive->queue, rq, tape->buf, size,
+ __GFP_WAIT);
+ if (ret)
+ goto out_put;
+ }
- if ((cmd & (REQ_IDETAPE_READ | REQ_IDETAPE_WRITE)) == 0)
- return 0;
+ blk_execute_rq(drive->queue, tape->disk, rq, 0);
- if (tape->merge_bh)
- idetape_init_merge_buffer(tape);
- if (errors == IDE_DRV_ERROR_GENERAL)
- return -EIO;
+ /* calculate the number of transferred bytes and update buffer state */
+ size -= rq->resid_len;
+ tape->cur = tape->buf;
+ if (cmd == REQ_IDETAPE_READ)
+ tape->valid = size;
+ else
+ tape->valid = 0;
+
+ ret = size;
+ if (rq->errors == IDE_DRV_ERROR_GENERAL)
+ ret = -EIO;
+out_put:
+ blk_put_request(rq);
return ret;
}
@@ -1236,153 +947,87 @@ static void idetape_create_space_cmd(struct ide_atapi_pc *pc, int count, u8 cmd)
pc->flags |= PC_FLAG_WAIT_FOR_DSC;
}
-/* Queue up a character device originated write request. */
-static int idetape_add_chrdev_write_request(ide_drive_t *drive, int blocks)
-{
- idetape_tape_t *tape = drive->driver_data;
-
- debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
-
- return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
- blocks, tape->merge_bh);
-}
-
static void ide_tape_flush_merge_buffer(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
- int blocks, min;
- struct idetape_bh *bh;
if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
printk(KERN_ERR "ide-tape: bug: Trying to empty merge buffer"
" but we are not writing.\n");
return;
}
- if (tape->merge_bh_size > tape->buffer_size) {
- printk(KERN_ERR "ide-tape: bug: merge_buffer too big\n");
- tape->merge_bh_size = tape->buffer_size;
- }
- if (tape->merge_bh_size) {
- blocks = tape->merge_bh_size / tape->blk_size;
- if (tape->merge_bh_size % tape->blk_size) {
- unsigned int i;
-
- blocks++;
- i = tape->blk_size - tape->merge_bh_size %
- tape->blk_size;
- bh = tape->bh->b_reqnext;
- while (bh) {
- atomic_set(&bh->b_count, 0);
- bh = bh->b_reqnext;
- }
- bh = tape->bh;
- while (i) {
- if (bh == NULL) {
- printk(KERN_INFO "ide-tape: bug,"
- " bh NULL\n");
- break;
- }
- min = min(i, (unsigned int)(bh->b_size -
- atomic_read(&bh->b_count)));
- memset(bh->b_data + atomic_read(&bh->b_count),
- 0, min);
- atomic_add(min, &bh->b_count);
- i -= min;
- bh = bh->b_reqnext;
- }
- }
- (void) idetape_add_chrdev_write_request(drive, blocks);
- tape->merge_bh_size = 0;
- }
- if (tape->merge_bh != NULL) {
- ide_tape_kfree_buffer(tape);
- tape->merge_bh = NULL;
+ if (tape->buf) {
+ size_t aligned = roundup(tape->valid, tape->blk_size);
+
+ memset(tape->cur, 0, aligned - tape->valid);
+ idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, aligned);
+ kfree(tape->buf);
+ tape->buf = NULL;
}
tape->chrdev_dir = IDETAPE_DIR_NONE;
}
-static int idetape_init_read(ide_drive_t *drive)
+static int idetape_init_rw(ide_drive_t *drive, int dir)
{
idetape_tape_t *tape = drive->driver_data;
- int bytes_read;
-
- /* Initialize read operation */
- if (tape->chrdev_dir != IDETAPE_DIR_READ) {
- if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
- ide_tape_flush_merge_buffer(drive);
- idetape_flush_tape_buffers(drive);
- }
- if (tape->merge_bh || tape->merge_bh_size) {
- printk(KERN_ERR "ide-tape: merge_bh_size should be"
- " 0 now\n");
- tape->merge_bh_size = 0;
- }
- tape->merge_bh = ide_tape_kmalloc_buffer(tape, 0, 0);
- if (!tape->merge_bh)
- return -ENOMEM;
- tape->chrdev_dir = IDETAPE_DIR_READ;
+ int rc;
- /*
- * Issue a read 0 command to ensure that DSC handshake is
- * switched from completion mode to buffer available mode.
- * No point in issuing this if DSC overlap isn't supported, some
- * drives (Seagate STT3401A) will return an error.
- */
- if (drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) {
- bytes_read = idetape_queue_rw_tail(drive,
- REQ_IDETAPE_READ, 0,
- tape->merge_bh);
- if (bytes_read < 0) {
- ide_tape_kfree_buffer(tape);
- tape->merge_bh = NULL;
- tape->chrdev_dir = IDETAPE_DIR_NONE;
- return bytes_read;
- }
- }
- }
+ BUG_ON(dir != IDETAPE_DIR_READ && dir != IDETAPE_DIR_WRITE);
- return 0;
-}
+ if (tape->chrdev_dir == dir)
+ return 0;
-/* called from idetape_chrdev_read() to service a chrdev read request. */
-static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks)
-{
- idetape_tape_t *tape = drive->driver_data;
+ if (tape->chrdev_dir == IDETAPE_DIR_READ)
+ ide_tape_discard_merge_buffer(drive, 1);
+ else if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
+ ide_tape_flush_merge_buffer(drive);
+ idetape_flush_tape_buffers(drive);
+ }
- debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks);
+ if (tape->buf || tape->valid) {
+ printk(KERN_ERR "ide-tape: valid should be 0 now\n");
+ tape->valid = 0;
+ }
- /* If we are at a filemark, return a read length of 0 */
- if (test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags))
- return 0;
+ tape->buf = kmalloc(tape->buffer_size, GFP_KERNEL);
+ if (!tape->buf)
+ return -ENOMEM;
+ tape->chrdev_dir = dir;
+ tape->cur = tape->buf;
- idetape_init_read(drive);
+ /*
+ * Issue a 0 rw command to ensure that DSC handshake is
+ * switched from completion mode to buffer available mode. No
+ * point in issuing this if DSC overlap isn't supported, some
+ * drives (Seagate STT3401A) will return an error.
+ */
+ if (drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) {
+ int cmd = dir == IDETAPE_DIR_READ ? REQ_IDETAPE_READ
+ : REQ_IDETAPE_WRITE;
+
+ rc = idetape_queue_rw_tail(drive, cmd, 0);
+ if (rc < 0) {
+ kfree(tape->buf);
+ tape->buf = NULL;
+ tape->chrdev_dir = IDETAPE_DIR_NONE;
+ return rc;
+ }
+ }
- return idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, blocks,
- tape->merge_bh);
+ return 0;
}
static void idetape_pad_zeros(ide_drive_t *drive, int bcount)
{
idetape_tape_t *tape = drive->driver_data;
- struct idetape_bh *bh;
- int blocks;
+
+ memset(tape->buf, 0, tape->buffer_size);
while (bcount) {
- unsigned int count;
+ unsigned int count = min(tape->buffer_size, bcount);
- bh = tape->merge_bh;
- count = min(tape->buffer_size, bcount);
+ idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, count);
bcount -= count;
- blocks = count / tape->blk_size;
- while (count) {
- atomic_set(&bh->b_count,
- min(count, (unsigned int)bh->b_size));
- memset(bh->b_data, 0, atomic_read(&bh->b_count));
- count -= atomic_read(&bh->b_count);
- bh = bh->b_reqnext;
- }
- idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, blocks,
- tape->merge_bh);
}
}
@@ -1462,8 +1107,9 @@ static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
}
if (tape->chrdev_dir == IDETAPE_DIR_READ) {
- tape->merge_bh_size = 0;
- if (test_and_clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags))
+ tape->valid = 0;
+ if (test_and_clear_bit(ilog2(IDE_AFLAG_FILEMARK),
+ &drive->atapi_flags))
++count;
ide_tape_discard_merge_buffer(drive, 0);
}
@@ -1511,61 +1157,56 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
{
struct ide_tape_obj *tape = file->private_data;
ide_drive_t *drive = tape->drive;
- ssize_t bytes_read, temp, actually_read = 0, rc;
+ size_t done = 0;
ssize_t ret = 0;
- u16 ctl = *(u16 *)&tape->caps[12];
+ int rc;
debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
if (tape->chrdev_dir != IDETAPE_DIR_READ) {
- if (test_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags))
+ if (test_bit(ilog2(IDE_AFLAG_DETECT_BS), &drive->atapi_flags))
if (count > tape->blk_size &&
(count % tape->blk_size) == 0)
tape->user_bs_factor = count / tape->blk_size;
}
- rc = idetape_init_read(drive);
+
+ rc = idetape_init_rw(drive, IDETAPE_DIR_READ);
if (rc < 0)
return rc;
- if (count == 0)
- return (0);
- if (tape->merge_bh_size) {
- actually_read = min((unsigned int)(tape->merge_bh_size),
- (unsigned int)count);
- if (idetape_copy_stage_to_user(tape, buf, actually_read))
- ret = -EFAULT;
- buf += actually_read;
- tape->merge_bh_size -= actually_read;
- count -= actually_read;
- }
- while (count >= tape->buffer_size) {
- bytes_read = idetape_add_chrdev_read_request(drive, ctl);
- if (bytes_read <= 0)
- goto finish;
- if (idetape_copy_stage_to_user(tape, buf, bytes_read))
- ret = -EFAULT;
- buf += bytes_read;
- count -= bytes_read;
- actually_read += bytes_read;
- }
- if (count) {
- bytes_read = idetape_add_chrdev_read_request(drive, ctl);
- if (bytes_read <= 0)
- goto finish;
- temp = min((unsigned long)count, (unsigned long)bytes_read);
- if (idetape_copy_stage_to_user(tape, buf, temp))
+
+ while (done < count) {
+ size_t todo;
+
+ /* refill if staging buffer is empty */
+ if (!tape->valid) {
+ /* If we are at a filemark, nothing more to read */
+ if (test_bit(ilog2(IDE_AFLAG_FILEMARK),
+ &drive->atapi_flags))
+ break;
+ /* read */
+ if (idetape_queue_rw_tail(drive, REQ_IDETAPE_READ,
+ tape->buffer_size) <= 0)
+ break;
+ }
+
+ /* copy out */
+ todo = min_t(size_t, count - done, tape->valid);
+ if (copy_to_user(buf + done, tape->cur, todo))
ret = -EFAULT;
- actually_read += temp;
- tape->merge_bh_size = bytes_read-temp;
+
+ tape->cur += todo;
+ tape->valid -= todo;
+ done += todo;
}
-finish:
- if (!actually_read && test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags)) {
+
+ if (!done && test_bit(ilog2(IDE_AFLAG_FILEMARK), &drive->atapi_flags)) {
debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name);
idetape_space_over_filemarks(drive, MTFSF, 1);
return 0;
}
- return ret ? ret : actually_read;
+ return ret ? ret : done;
}
static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
@@ -1573,9 +1214,9 @@ static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
{
struct ide_tape_obj *tape = file->private_data;
ide_drive_t *drive = tape->drive;
- ssize_t actually_written = 0;
+ size_t done = 0;
ssize_t ret = 0;
- u16 ctl = *(u16 *)&tape->caps[12];
+ int rc;
/* The drive is write protected. */
if (tape->write_prot)
@@ -1584,80 +1225,31 @@ static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
/* Initialize write operation */
- if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
- if (tape->chrdev_dir == IDETAPE_DIR_READ)
- ide_tape_discard_merge_buffer(drive, 1);
- if (tape->merge_bh || tape->merge_bh_size) {
- printk(KERN_ERR "ide-tape: merge_bh_size "
- "should be 0 now\n");
- tape->merge_bh_size = 0;
- }
- tape->merge_bh = ide_tape_kmalloc_buffer(tape, 0, 0);
- if (!tape->merge_bh)
- return -ENOMEM;
- tape->chrdev_dir = IDETAPE_DIR_WRITE;
- idetape_init_merge_buffer(tape);
+ rc = idetape_init_rw(drive, IDETAPE_DIR_WRITE);
+ if (rc < 0)
+ return rc;
- /*
- * Issue a write 0 command to ensure that DSC handshake is
- * switched from completion mode to buffer available mode. No
- * point in issuing this if DSC overlap isn't supported, some
- * drives (Seagate STT3401A) will return an error.
- */
- if (drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) {
- ssize_t retval = idetape_queue_rw_tail(drive,
- REQ_IDETAPE_WRITE, 0,
- tape->merge_bh);
- if (retval < 0) {
- ide_tape_kfree_buffer(tape);
- tape->merge_bh = NULL;
- tape->chrdev_dir = IDETAPE_DIR_NONE;
- return retval;
- }
- }
- }
- if (count == 0)
- return (0);
- if (tape->merge_bh_size) {
- if (tape->merge_bh_size >= tape->buffer_size) {
- printk(KERN_ERR "ide-tape: bug: merge buf too big\n");
- tape->merge_bh_size = 0;
- }
- actually_written = min((unsigned int)
- (tape->buffer_size - tape->merge_bh_size),
- (unsigned int)count);
- if (idetape_copy_stage_from_user(tape, buf, actually_written))
- ret = -EFAULT;
- buf += actually_written;
- tape->merge_bh_size += actually_written;
- count -= actually_written;
-
- if (tape->merge_bh_size == tape->buffer_size) {
- ssize_t retval;
- tape->merge_bh_size = 0;
- retval = idetape_add_chrdev_write_request(drive, ctl);
- if (retval <= 0)
- return (retval);
- }
- }
- while (count >= tape->buffer_size) {
- ssize_t retval;
- if (idetape_copy_stage_from_user(tape, buf, tape->buffer_size))
- ret = -EFAULT;
- buf += tape->buffer_size;
- count -= tape->buffer_size;
- retval = idetape_add_chrdev_write_request(drive, ctl);
- actually_written += tape->buffer_size;
- if (retval <= 0)
- return (retval);
- }
- if (count) {
- actually_written += count;
- if (idetape_copy_stage_from_user(tape, buf, count))
+ while (done < count) {
+ size_t todo;
+
+ /* flush if staging buffer is full */
+ if (tape->valid == tape->buffer_size &&
+ idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
+ tape->buffer_size) <= 0)
+ return rc;
+
+ /* copy in */
+ todo = min_t(size_t, count - done,
+ tape->buffer_size - tape->valid);
+ if (copy_from_user(tape->cur, buf + done, todo))
ret = -EFAULT;
- tape->merge_bh_size += count;
+
+ tape->cur += todo;
+ tape->valid += todo;
+ done += todo;
}
- return ret ? ret : actually_written;
+
+ return ret ? ret : done;
}
static int idetape_write_filemark(ide_drive_t *drive)
@@ -1741,7 +1333,8 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
ide_tape_discard_merge_buffer(drive, 0);
retval = ide_do_start_stop(drive, disk, !IDETAPE_LU_LOAD_MASK);
if (!retval)
- clear_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags);
+ clear_bit(ilog2(IDE_AFLAG_MEDIUM_PRESENT),
+ &drive->atapi_flags);
return retval;
case MTNOP:
ide_tape_discard_merge_buffer(drive, 0);
@@ -1763,9 +1356,11 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
mt_count % tape->blk_size)
return -EIO;
tape->user_bs_factor = mt_count / tape->blk_size;
- clear_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags);
+ clear_bit(ilog2(IDE_AFLAG_DETECT_BS),
+ &drive->atapi_flags);
} else
- set_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags);
+ set_bit(ilog2(IDE_AFLAG_DETECT_BS),
+ &drive->atapi_flags);
return 0;
case MTSEEK:
ide_tape_discard_merge_buffer(drive, 0);
@@ -1818,7 +1413,7 @@ static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
idetape_flush_tape_buffers(drive);
}
if (cmd == MTIOCGET || cmd == MTIOCPOS) {
- block_offset = tape->merge_bh_size /
+ block_offset = tape->valid /
(tape->blk_size * tape->user_bs_factor);
position = idetape_read_position(drive);
if (position < 0)
@@ -1891,7 +1486,7 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp)
return -ENXIO;
lock_kernel();
- tape = ide_tape_chrdev_get(i);
+ tape = ide_tape_get(NULL, true, i);
if (!tape) {
unlock_kernel();
return -ENXIO;
@@ -1910,20 +1505,20 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp)
filp->private_data = tape;
- if (test_and_set_bit(IDE_AFLAG_BUSY, &drive->atapi_flags)) {
+ if (test_and_set_bit(ilog2(IDE_AFLAG_BUSY), &drive->atapi_flags)) {
retval = -EBUSY;
goto out_put_tape;
}
retval = idetape_wait_ready(drive, 60 * HZ);
if (retval) {
- clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags);
+ clear_bit(ilog2(IDE_AFLAG_BUSY), &drive->atapi_flags);
printk(KERN_ERR "ide-tape: %s: drive not ready\n", tape->name);
goto out_put_tape;
}
idetape_read_position(drive);
- if (!test_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags))
+ if (!test_bit(ilog2(IDE_AFLAG_ADDRESS_VALID), &drive->atapi_flags))
(void)idetape_rewind_tape(drive);
/* Read block size and write protect status from drive. */
@@ -1939,7 +1534,7 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp)
if (tape->write_prot) {
if ((filp->f_flags & O_ACCMODE) == O_WRONLY ||
(filp->f_flags & O_ACCMODE) == O_RDWR) {
- clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags);
+ clear_bit(ilog2(IDE_AFLAG_BUSY), &drive->atapi_flags);
retval = -EROFS;
goto out_put_tape;
}
@@ -1966,12 +1561,12 @@ static void idetape_write_release(ide_drive_t *drive, unsigned int minor)
idetape_tape_t *tape = drive->driver_data;
ide_tape_flush_merge_buffer(drive);
- tape->merge_bh = ide_tape_kmalloc_buffer(tape, 1, 0);
- if (tape->merge_bh != NULL) {
+ tape->buf = kmalloc(tape->buffer_size, GFP_KERNEL);
+ if (tape->buf != NULL) {
idetape_pad_zeros(drive, tape->blk_size *
(tape->user_bs_factor - 1));
- ide_tape_kfree_buffer(tape);
- tape->merge_bh = NULL;
+ kfree(tape->buf);
+ tape->buf = NULL;
}
idetape_write_filemark(drive);
idetape_flush_tape_buffers(drive);
@@ -1996,15 +1591,17 @@ static int idetape_chrdev_release(struct inode *inode, struct file *filp)
ide_tape_discard_merge_buffer(drive, 1);
}
- if (minor < 128 && test_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags))
+ if (minor < 128 && test_bit(ilog2(IDE_AFLAG_MEDIUM_PRESENT),
+ &drive->atapi_flags))
(void) idetape_rewind_tape(drive);
+
if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
if (tape->door_locked == DOOR_LOCKED) {
if (!ide_set_media_lock(drive, tape->disk, 0))
tape->door_locked = DOOR_UNLOCKED;
}
}
- clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags);
+ clear_bit(ilog2(IDE_AFLAG_BUSY), &drive->atapi_flags);
ide_tape_put(tape);
unlock_kernel();
return 0;
@@ -2165,8 +1762,6 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
u16 *ctl = (u16 *)&tape->caps[12];
drive->pc_callback = ide_tape_callback;
- drive->pc_update_buffers = idetape_update_buffers;
- drive->pc_io_buffers = ide_tape_io_buffers;
drive->dev_flags |= IDE_DFLAG_DSC_OVERLAP;
@@ -2197,11 +1792,6 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
tape->buffer_size = *ctl * tape->blk_size;
}
buffer_size = tape->buffer_size;
- tape->pages_per_buffer = buffer_size / PAGE_SIZE;
- if (buffer_size % PAGE_SIZE) {
- tape->pages_per_buffer++;
- tape->excess_bh_size = PAGE_SIZE - buffer_size % PAGE_SIZE;
- }
/* select the "best" DSC read/write polling freq */
speed = max(*(u16 *)&tape->caps[14], *(u16 *)&tape->caps[8]);
@@ -2244,7 +1834,7 @@ static void ide_tape_release(struct device *dev)
ide_drive_t *drive = tape->drive;
struct gendisk *g = tape->disk;
- BUG_ON(tape->merge_bh_size);
+ BUG_ON(tape->valid);
drive->dev_flags &= ~IDE_DFLAG_DSC_OVERLAP;
drive->driver_data = NULL;
@@ -2317,7 +1907,7 @@ static const struct file_operations idetape_fops = {
static int idetape_open(struct block_device *bdev, fmode_t mode)
{
- struct ide_tape_obj *tape = ide_tape_get(bdev->bd_disk);
+ struct ide_tape_obj *tape = ide_tape_get(bdev->bd_disk, false, 0);
if (!tape)
return -ENXIO;
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 4aa6223c11b..75b85a8cd2d 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -98,7 +98,6 @@ ide_startstop_t do_rw_taskfile(ide_drive_t *drive, struct ide_cmd *orig_cmd)
if ((cmd->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) {
ide_tf_dump(drive->name, cmd);
tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
- SELECT_MASK(drive, 0);
if (cmd->ftf_flags & IDE_FTFLAG_OUT_DATA) {
u8 data[2] = { cmd->tf.data, cmd->hob.data };
@@ -166,7 +165,7 @@ static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) {
if (custom && tf->command == ATA_CMD_SET_MULTI) {
drive->mult_req = drive->mult_count = 0;
- drive->special.b.recalibrate = 1;
+ drive->special_flags |= IDE_SFLAG_RECALIBRATE;
(void)ide_dump_status(drive, __func__, stat);
return ide_stopped;
} else if (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) {
@@ -385,7 +384,7 @@ out_end:
if ((cmd->tf_flags & IDE_TFLAG_FS) == 0)
ide_finish_cmd(drive, cmd, stat);
else
- ide_complete_rq(drive, 0, cmd->rq->nr_sectors << 9);
+ ide_complete_rq(drive, 0, blk_rq_sectors(cmd->rq) << 9);
return ide_stopped;
out_err:
ide_error_cmd(drive, cmd);
@@ -424,7 +423,9 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
- rq->buffer = buf;
+
+ if (cmd->tf_flags & IDE_TFLAG_WRITE)
+ rq->cmd_flags |= REQ_RW;
/*
* (ks) We transfer currently only whole sectors.
@@ -432,18 +433,20 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
* if we would find a solution to transfer any size.
* To support special commands like READ LONG.
*/
- rq->hard_nr_sectors = rq->nr_sectors = nsect;
- rq->hard_cur_sectors = rq->current_nr_sectors = nsect;
-
- if (cmd->tf_flags & IDE_TFLAG_WRITE)
- rq->cmd_flags |= REQ_RW;
+ if (nsect) {
+ error = blk_rq_map_kern(drive->queue, rq, buf,
+ nsect * SECTOR_SIZE, __GFP_WAIT);
+ if (error)
+ goto put_req;
+ }
rq->special = cmd;
cmd->rq = rq;
error = blk_execute_rq(drive->queue, NULL, rq, 0);
- blk_put_request(rq);
+put_req:
+ blk_put_request(rq);
return error;
}
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 92c9b90931e..16d056939f9 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -211,6 +211,11 @@ static unsigned int ide_noflush;
module_param_call(noflush, ide_set_dev_param_mask, NULL, &ide_noflush, 0);
MODULE_PARM_DESC(noflush, "disable flush requests for a device");
+static unsigned int ide_nohpa;
+
+module_param_call(nohpa, ide_set_dev_param_mask, NULL, &ide_nohpa, 0);
+MODULE_PARM_DESC(nohpa, "disable Host Protected Area for a device");
+
static unsigned int ide_noprobe;
module_param_call(noprobe, ide_set_dev_param_mask, NULL, &ide_noprobe, 0);
@@ -281,6 +286,11 @@ static void ide_dev_apply_params(ide_drive_t *drive, u8 unit)
drive->name);
drive->dev_flags |= IDE_DFLAG_NOFLUSH;
}
+ if (ide_nohpa & (1 << i)) {
+ printk(KERN_INFO "ide: disabling Host Protected Area for %s\n",
+ drive->name);
+ drive->dev_flags |= IDE_DFLAG_NOHPA;
+ }
if (ide_noprobe & (1 << i)) {
printk(KERN_INFO "ide: skipping probe for %s\n", drive->name);
drive->dev_flags |= IDE_DFLAG_NOPROBE;
diff --git a/drivers/ide/ide_platform.c b/drivers/ide/ide_platform.c
index 051b4ab0f35..b579fbe8837 100644
--- a/drivers/ide/ide_platform.c
+++ b/drivers/ide/ide_platform.c
@@ -21,7 +21,7 @@
#include <linux/platform_device.h>
#include <linux/io.h>
-static void __devinit plat_ide_setup_ports(hw_regs_t *hw,
+static void __devinit plat_ide_setup_ports(struct ide_hw *hw,
void __iomem *base,
void __iomem *ctrl,
struct pata_platform_info *pdata,
@@ -40,12 +40,11 @@ static void __devinit plat_ide_setup_ports(hw_regs_t *hw,
hw->io_ports.ctl_addr = (unsigned long)ctrl;
hw->irq = irq;
-
- hw->chipset = ide_generic;
}
static const struct ide_port_info platform_ide_port_info = {
.host_flags = IDE_HFLAG_NO_DMA,
+ .chipset = ide_generic,
};
static int __devinit plat_ide_probe(struct platform_device *pdev)
@@ -55,7 +54,7 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
struct pata_platform_info *pdata;
struct ide_host *host;
int ret = 0, mmio = 0;
- hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
+ struct ide_hw hw, *hws[] = { &hw };
struct ide_port_info d = platform_ide_port_info;
pdata = pdev->dev.platform_data;
@@ -99,7 +98,7 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
if (mmio)
d.host_flags |= IDE_HFLAG_MMIO;
- ret = ide_host_add(&d, hws, &host);
+ ret = ide_host_add(&d, hws, 1, &host);
if (ret)
goto out;
@@ -113,7 +112,7 @@ out:
static int __devexit plat_ide_remove(struct platform_device *pdev)
{
- struct ide_host *host = pdev->dev.driver_data;
+ struct ide_host *host = dev_get_drvdata(&pdev->dev);
ide_host_remove(host);
diff --git a/drivers/ide/macide.c b/drivers/ide/macide.c
index 4b1718e8328..1447c8c9056 100644
--- a/drivers/ide/macide.c
+++ b/drivers/ide/macide.c
@@ -62,7 +62,7 @@ int macide_ack_intr(ide_hwif_t* hwif)
return 0;
}
-static void __init macide_setup_ports(hw_regs_t *hw, unsigned long base,
+static void __init macide_setup_ports(struct ide_hw *hw, unsigned long base,
int irq, ide_ack_intr_t *ack_intr)
{
int i;
@@ -76,13 +76,12 @@ static void __init macide_setup_ports(hw_regs_t *hw, unsigned long base,
hw->irq = irq;
hw->ack_intr = ack_intr;
-
- hw->chipset = ide_generic;
}
static const struct ide_port_info macide_port_info = {
.host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA,
.irq_flags = IRQF_SHARED,
+ .chipset = ide_generic,
};
static const char *mac_ide_name[] =
@@ -97,7 +96,7 @@ static int __init macide_init(void)
ide_ack_intr_t *ack_intr;
unsigned long base;
int irq;
- hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
+ struct ide_hw hw, *hws[] = { &hw };
if (!MACH_IS_MAC)
return -ENODEV;
@@ -127,7 +126,7 @@ static int __init macide_init(void)
macide_setup_ports(&hw, base, irq, ack_intr);
- return ide_host_add(&macide_port_info, hws, NULL);
+ return ide_host_add(&macide_port_info, hws, 1, NULL);
}
module_init(macide_init);
diff --git a/drivers/ide/palm_bk3710.c b/drivers/ide/palm_bk3710.c
index 09d813d313f..3c1dc015215 100644
--- a/drivers/ide/palm_bk3710.c
+++ b/drivers/ide/palm_bk3710.c
@@ -306,6 +306,7 @@ static struct ide_port_info __devinitdata palm_bk3710_port_info = {
.host_flags = IDE_HFLAG_MMIO,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
+ .chipset = ide_palm3710,
};
static int __init palm_bk3710_probe(struct platform_device *pdev)
@@ -315,7 +316,7 @@ static int __init palm_bk3710_probe(struct platform_device *pdev)
void __iomem *base;
unsigned long rate, mem_size;
int i, rc;
- hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
+ struct ide_hw hw, *hws[] = { &hw };
clk = clk_get(&pdev->dev, "IDECLK");
if (IS_ERR(clk))
@@ -363,13 +364,12 @@ static int __init palm_bk3710_probe(struct platform_device *pdev)
(base + IDE_PALM_ATA_PRI_CTL_OFFSET);
hw.irq = irq->start;
hw.dev = &pdev->dev;
- hw.chipset = ide_palm3710;
palm_bk3710_port_info.udma_mask = rate < 100000000 ? ATA_UDMA4 :
ATA_UDMA5;
/* Register the IDE interface with Linux */
- rc = ide_host_add(&palm_bk3710_port_info, hws, NULL);
+ rc = ide_host_add(&palm_bk3710_port_info, hws, 1, NULL);
if (rc)
goto out;
diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
index b68906c3c17..65ba8239e7b 100644
--- a/drivers/ide/pdc202xx_new.c
+++ b/drivers/ide/pdc202xx_new.c
@@ -40,18 +40,6 @@
#define DBG(fmt, args...)
#endif
-static const char *pdc_quirk_drives[] = {
- "QUANTUM FIREBALLlct08 08",
- "QUANTUM FIREBALLP KA6.4",
- "QUANTUM FIREBALLP KA9.1",
- "QUANTUM FIREBALLP LM20.4",
- "QUANTUM FIREBALLP KX13.6",
- "QUANTUM FIREBALLP KX20.5",
- "QUANTUM FIREBALLP KX27.3",
- "QUANTUM FIREBALLP LM20.5",
- NULL
-};
-
static u8 max_dma_rate(struct pci_dev *pdev)
{
u8 mode;
@@ -200,19 +188,6 @@ static u8 pdcnew_cable_detect(ide_hwif_t *hwif)
return ATA_CBL_PATA80;
}
-static void pdcnew_quirkproc(ide_drive_t *drive)
-{
- const char **list, *m = (char *)&drive->id[ATA_ID_PROD];
-
- for (list = pdc_quirk_drives; *list != NULL; list++)
- if (strstr(m, *list) != NULL) {
- drive->quirk_list = 2;
- return;
- }
-
- drive->quirk_list = 0;
-}
-
static void pdcnew_reset(ide_drive_t *drive)
{
/*
@@ -473,7 +448,6 @@ static struct pci_dev * __devinit pdc20270_get_dev2(struct pci_dev *dev)
static const struct ide_port_ops pdcnew_port_ops = {
.set_pio_mode = pdcnew_set_pio_mode,
.set_dma_mode = pdcnew_set_dma_mode,
- .quirkproc = pdcnew_quirkproc,
.resetproc = pdcnew_reset,
.cable_detect = pdcnew_cable_detect,
};
diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
index 248a54bd238..b6abf7e52ca 100644
--- a/drivers/ide/pdc202xx_old.c
+++ b/drivers/ide/pdc202xx_old.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2006-2007 MontaVista Software, Inc.
+ * Copyright (C) 2006-2007, 2009 MontaVista Software, Inc.
* Copyright (C) 2007 Bartlomiej Zolnierkiewicz
*
* Portions Copyright (C) 1999 Promise Technology, Inc.
@@ -23,18 +23,6 @@
#define PDC202XX_DEBUG_DRIVE_INFO 0
-static const char *pdc_quirk_drives[] = {
- "QUANTUM FIREBALLlct08 08",
- "QUANTUM FIREBALLP KA6.4",
- "QUANTUM FIREBALLP KA9.1",
- "QUANTUM FIREBALLP LM20.4",
- "QUANTUM FIREBALLP KX13.6",
- "QUANTUM FIREBALLP KX20.5",
- "QUANTUM FIREBALLP KX27.3",
- "QUANTUM FIREBALLP LM20.5",
- NULL
-};
-
static void pdc_old_disable_66MHz_clock(ide_hwif_t *);
static void pdc202xx_set_mode(ide_drive_t *drive, const u8 speed)
@@ -151,19 +139,6 @@ static void pdc_old_disable_66MHz_clock(ide_hwif_t *hwif)
outb(clock & ~(hwif->channel ? 0x08 : 0x02), clock_reg);
}
-static void pdc202xx_quirkproc(ide_drive_t *drive)
-{
- const char **list, *m = (char *)&drive->id[ATA_ID_PROD];
-
- for (list = pdc_quirk_drives; *list != NULL; list++)
- if (strstr(m, *list) != NULL) {
- drive->quirk_list = 2;
- return;
- }
-
- drive->quirk_list = 0;
-}
-
static void pdc202xx_dma_start(ide_drive_t *drive)
{
if (drive->current_speed > XFER_UDMA_2)
@@ -177,7 +152,7 @@ static void pdc202xx_dma_start(ide_drive_t *drive)
u8 clock = inb(high_16 + 0x11);
outb(clock | (hwif->channel ? 0x08 : 0x02), high_16 + 0x11);
- word_count = (rq->nr_sectors << 8);
+ word_count = (blk_rq_sectors(rq) << 8);
word_count = (rq_data_dir(rq) == READ) ?
word_count | 0x05000000 :
word_count | 0x06000000;
@@ -203,61 +178,6 @@ static int pdc202xx_dma_end(ide_drive_t *drive)
return ide_dma_end(drive);
}
-static int pdc202xx_dma_test_irq(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- unsigned long high_16 = hwif->extra_base - 16;
- u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
- u8 sc1d = inb(high_16 + 0x001d);
-
- if (hwif->channel) {
- /* bit7: Error, bit6: Interrupting, bit5: FIFO Full, bit4: FIFO Empty */
- if ((sc1d & 0x50) == 0x50)
- goto somebody_else;
- else if ((sc1d & 0x40) == 0x40)
- return (dma_stat & 4) == 4;
- } else {
- /* bit3: Error, bit2: Interrupting, bit1: FIFO Full, bit0: FIFO Empty */
- if ((sc1d & 0x05) == 0x05)
- goto somebody_else;
- else if ((sc1d & 0x04) == 0x04)
- return (dma_stat & 4) == 4;
- }
-somebody_else:
- return (dma_stat & 4) == 4; /* return 1 if INTR asserted */
-}
-
-static void pdc202xx_reset_host (ide_hwif_t *hwif)
-{
- unsigned long high_16 = hwif->extra_base - 16;
- u8 udma_speed_flag = inb(high_16 | 0x001f);
-
- outb(udma_speed_flag | 0x10, high_16 | 0x001f);
- mdelay(100);
- outb(udma_speed_flag & ~0x10, high_16 | 0x001f);
- mdelay(2000); /* 2 seconds ?! */
-
- printk(KERN_WARNING "PDC202XX: %s channel reset.\n",
- hwif->channel ? "Secondary" : "Primary");
-}
-
-static void pdc202xx_reset (ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- ide_hwif_t *mate = hwif->mate;
-
- pdc202xx_reset_host(hwif);
- pdc202xx_reset_host(mate);
-
- ide_set_max_pio(drive);
-}
-
-static void pdc202xx_dma_lost_irq(ide_drive_t *drive)
-{
- pdc202xx_reset(drive);
- ide_dma_lost_irq(drive);
-}
-
static int init_chipset_pdc202xx(struct pci_dev *dev)
{
unsigned long dmabase = pci_resource_start(dev, 4);
@@ -311,38 +231,22 @@ static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev,
static const struct ide_port_ops pdc20246_port_ops = {
.set_pio_mode = pdc202xx_set_pio_mode,
.set_dma_mode = pdc202xx_set_mode,
- .quirkproc = pdc202xx_quirkproc,
};
static const struct ide_port_ops pdc2026x_port_ops = {
.set_pio_mode = pdc202xx_set_pio_mode,
.set_dma_mode = pdc202xx_set_mode,
- .quirkproc = pdc202xx_quirkproc,
- .resetproc = pdc202xx_reset,
.cable_detect = pdc2026x_cable_detect,
};
-static const struct ide_dma_ops pdc20246_dma_ops = {
- .dma_host_set = ide_dma_host_set,
- .dma_setup = ide_dma_setup,
- .dma_start = ide_dma_start,
- .dma_end = ide_dma_end,
- .dma_test_irq = pdc202xx_dma_test_irq,
- .dma_lost_irq = pdc202xx_dma_lost_irq,
- .dma_timer_expiry = ide_dma_sff_timer_expiry,
- .dma_clear = pdc202xx_reset,
- .dma_sff_read_status = ide_dma_sff_read_status,
-};
-
static const struct ide_dma_ops pdc2026x_dma_ops = {
.dma_host_set = ide_dma_host_set,
.dma_setup = ide_dma_setup,
.dma_start = pdc202xx_dma_start,
.dma_end = pdc202xx_dma_end,
- .dma_test_irq = pdc202xx_dma_test_irq,
- .dma_lost_irq = pdc202xx_dma_lost_irq,
+ .dma_test_irq = ide_dma_test_irq,
+ .dma_lost_irq = ide_dma_lost_irq,
.dma_timer_expiry = ide_dma_sff_timer_expiry,
- .dma_clear = pdc202xx_reset,
.dma_sff_read_status = ide_dma_sff_read_status,
};
@@ -364,7 +268,7 @@ static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
.name = DRV_NAME,
.init_chipset = init_chipset_pdc202xx,
.port_ops = &pdc20246_port_ops,
- .dma_ops = &pdc20246_dma_ops,
+ .dma_ops = &sff_dma_ops,
.host_flags = IDE_HFLAGS_PDC202XX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
index 2aa69993306..69860dea382 100644
--- a/drivers/ide/piix.c
+++ b/drivers/ide/piix.c
@@ -263,6 +263,7 @@ static const struct ich_laptop ich_laptop[] = {
{ 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */
{ 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */
{ 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */
+ { 0x27df, 0x104d, 0x900e }, /* ICH7 on Sony TZ-90 */
/* end marker */
{ 0, }
};
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index f76e4e6b408..97642a7a79c 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -1023,13 +1023,14 @@ static const struct ide_port_info pmac_port_info = {
* Setup, register & probe an IDE channel driven by this driver, this is
* called by one of the 2 probe functions (macio or PCI).
*/
-static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif, hw_regs_t *hw)
+static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif,
+ struct ide_hw *hw)
{
struct device_node *np = pmif->node;
const int *bidp;
struct ide_host *host;
ide_hwif_t *hwif;
- hw_regs_t *hws[] = { hw, NULL, NULL, NULL };
+ struct ide_hw *hws[] = { hw };
struct ide_port_info d = pmac_port_info;
int rc;
@@ -1077,7 +1078,7 @@ static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif, hw_regs_t *hw)
/* Make sure we have sane timings */
sanitize_timings(pmif);
- host = ide_host_alloc(&d, hws);
+ host = ide_host_alloc(&d, hws, 1);
if (host == NULL)
return -ENOMEM;
hwif = host->ports[0];
@@ -1124,7 +1125,7 @@ static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif, hw_regs_t *hw)
return 0;
}
-static void __devinit pmac_ide_init_ports(hw_regs_t *hw, unsigned long base)
+static void __devinit pmac_ide_init_ports(struct ide_hw *hw, unsigned long base)
{
int i;
@@ -1144,7 +1145,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
unsigned long regbase;
pmac_ide_hwif_t *pmif;
int irq, rc;
- hw_regs_t hw;
+ struct ide_hw hw;
pmif = kzalloc(sizeof(*pmif), GFP_KERNEL);
if (pmif == NULL)
@@ -1268,7 +1269,7 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
void __iomem *base;
unsigned long rbase, rlen;
int rc;
- hw_regs_t hw;
+ struct ide_hw hw;
np = pci_device_to_OF_node(pdev);
if (np == NULL) {
diff --git a/drivers/ide/q40ide.c b/drivers/ide/q40ide.c
index c7934667924..ab49a97023d 100644
--- a/drivers/ide/q40ide.c
+++ b/drivers/ide/q40ide.c
@@ -51,11 +51,11 @@ static int q40ide_default_irq(unsigned long base)
/*
* Addresses are pretranslated for Q40 ISA access.
*/
-static void q40_ide_setup_ports(hw_regs_t *hw, unsigned long base,
+static void q40_ide_setup_ports(struct ide_hw *hw, unsigned long base,
ide_ack_intr_t *ack_intr,
int irq)
{
- memset(hw, 0, sizeof(hw_regs_t));
+ memset(hw, 0, sizeof(*hw));
/* BIG FAT WARNING:
assumption: only DATA port is ever used in 16 bit mode */
hw->io_ports.data_addr = Q40_ISA_IO_W(base);
@@ -70,8 +70,6 @@ static void q40_ide_setup_ports(hw_regs_t *hw, unsigned long base,
hw->irq = irq;
hw->ack_intr = ack_intr;
-
- hw->chipset = ide_generic;
}
static void q40ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd,
@@ -119,6 +117,7 @@ static const struct ide_port_info q40ide_port_info = {
.tp_ops = &q40ide_tp_ops,
.host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA,
.irq_flags = IRQF_SHARED,
+ .chipset = ide_generic,
};
/*
@@ -136,7 +135,7 @@ static const char *q40_ide_names[Q40IDE_NUM_HWIFS]={
static int __init q40ide_init(void)
{
int i;
- hw_regs_t hw[Q40IDE_NUM_HWIFS], *hws[] = { NULL, NULL, NULL, NULL };
+ struct ide_hw hw[Q40IDE_NUM_HWIFS], *hws[] = { NULL, NULL };
if (!MACH_IS_Q40)
return -ENODEV;
@@ -163,7 +162,7 @@ static int __init q40ide_init(void)
hws[i] = &hw[i];
}
- return ide_host_add(&q40ide_port_info, hws, NULL);
+ return ide_host_add(&q40ide_port_info, hws, Q40IDE_NUM_HWIFS, NULL);
}
module_init(q40ide_init);
diff --git a/drivers/ide/rapide.c b/drivers/ide/rapide.c
index d5003ca6980..00f54248f41 100644
--- a/drivers/ide/rapide.c
+++ b/drivers/ide/rapide.c
@@ -13,9 +13,10 @@
static const struct ide_port_info rapide_port_info = {
.host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA,
+ .chipset = ide_generic,
};
-static void rapide_setup_ports(hw_regs_t *hw, void __iomem *base,
+static void rapide_setup_ports(struct ide_hw *hw, void __iomem *base,
void __iomem *ctrl, unsigned int sz, int irq)
{
unsigned long port = (unsigned long)base;
@@ -35,7 +36,7 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
void __iomem *base;
struct ide_host *host;
int ret;
- hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
+ struct ide_hw hw, *hws[] = { &hw };
ret = ecard_request_resources(ec);
if (ret)
@@ -49,10 +50,9 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
memset(&hw, 0, sizeof(hw));
rapide_setup_ports(&hw, base, base + 0x818, 1 << 6, ec->irq);
- hw.chipset = ide_generic;
hw.dev = &ec->dev;
- ret = ide_host_add(&rapide_port_info, hws, &host);
+ ret = ide_host_add(&rapide_port_info, hws, 1, &host);
if (ret)
goto release;
diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
index 5be41f25204..1104bb301eb 100644
--- a/drivers/ide/scc_pata.c
+++ b/drivers/ide/scc_pata.c
@@ -559,7 +559,7 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev,
{
struct scc_ports *ports = pci_get_drvdata(dev);
struct ide_host *host;
- hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
+ struct ide_hw hw, *hws[] = { &hw };
int i, rc;
memset(&hw, 0, sizeof(hw));
@@ -567,9 +567,8 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev,
hw.io_ports_array[i] = ports->dma + 0x20 + i * 4;
hw.irq = dev->irq;
hw.dev = &dev->dev;
- hw.chipset = ide_pci;
- rc = ide_host_add(d, hws, &host);
+ rc = ide_host_add(d, hws, 1, &host);
if (rc)
return rc;
@@ -823,6 +822,7 @@ static const struct ide_port_info scc_chipset __devinitdata = {
.host_flags = IDE_HFLAG_SINGLE,
.irq_flags = IRQF_SHARED,
.pio_mask = ATA_PIO4,
+ .chipset = ide_pci,
};
/**
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
index 7a3a12d6e63..ab3db61d2ba 100644
--- a/drivers/ide/setup-pci.c
+++ b/drivers/ide/setup-pci.c
@@ -1,7 +1,7 @@
/*
* Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
* Copyright (C) 1995-1998 Mark Lord
- * Copyright (C) 2007 Bartlomiej Zolnierkiewicz
+ * Copyright (C) 2007-2009 Bartlomiej Zolnierkiewicz
*
* May be copied or modified under the terms of the GNU General Public License
*/
@@ -301,11 +301,11 @@ static int ide_pci_check_iomem(struct pci_dev *dev, const struct ide_port_info *
}
/**
- * ide_hw_configure - configure a hw_regs_t instance
+ * ide_hw_configure - configure a struct ide_hw instance
* @dev: PCI device holding interface
* @d: IDE port info
* @port: port number
- * @hw: hw_regs_t instance corresponding to this port
+ * @hw: struct ide_hw instance corresponding to this port
*
* Perform the initial set up for the hardware interface structure. This
* is done per interface port rather than per PCI device. There may be
@@ -315,7 +315,7 @@ static int ide_pci_check_iomem(struct pci_dev *dev, const struct ide_port_info *
*/
static int ide_hw_configure(struct pci_dev *dev, const struct ide_port_info *d,
- unsigned int port, hw_regs_t *hw)
+ unsigned int port, struct ide_hw *hw)
{
unsigned long ctl = 0, base = 0;
@@ -344,7 +344,6 @@ static int ide_hw_configure(struct pci_dev *dev, const struct ide_port_info *d,
memset(hw, 0, sizeof(*hw));
hw->dev = &dev->dev;
- hw->chipset = d->chipset ? d->chipset : ide_pci;
ide_std_init_ports(hw, base, ctl | 2);
return 0;
@@ -446,8 +445,8 @@ out:
* ide_pci_setup_ports - configure ports/devices on PCI IDE
* @dev: PCI device
* @d: IDE port info
- * @hw: hw_regs_t instances corresponding to this PCI IDE device
- * @hws: hw_regs_t pointers table to update
+ * @hw: struct ide_hw instances corresponding to this PCI IDE device
+ * @hws: struct ide_hw pointers table to update
*
* Scan the interfaces attached to this device and do any
* necessary per port setup. Attach the devices and ask the
@@ -459,7 +458,7 @@ out:
*/
void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d,
- hw_regs_t *hw, hw_regs_t **hws)
+ struct ide_hw *hw, struct ide_hw **hws)
{
int channels = (d->host_flags & IDE_HFLAG_SINGLE) ? 1 : 2, port;
u8 tmp;
@@ -535,61 +534,15 @@ out:
return ret;
}
-int ide_pci_init_one(struct pci_dev *dev, const struct ide_port_info *d,
- void *priv)
-{
- struct ide_host *host;
- hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL };
- int ret;
-
- ret = ide_setup_pci_controller(dev, d, 1);
- if (ret < 0)
- goto out;
-
- ide_pci_setup_ports(dev, d, &hw[0], &hws[0]);
-
- host = ide_host_alloc(d, hws);
- if (host == NULL) {
- ret = -ENOMEM;
- goto out;
- }
-
- host->dev[0] = &dev->dev;
-
- host->host_priv = priv;
-
- host->irq_flags = IRQF_SHARED;
-
- pci_set_drvdata(dev, host);
-
- ret = do_ide_setup_pci_device(dev, d, 1);
- if (ret < 0)
- goto out;
-
- /* fixup IRQ */
- if (ide_pci_is_in_compatibility_mode(dev)) {
- hw[0].irq = pci_get_legacy_ide_irq(dev, 0);
- hw[1].irq = pci_get_legacy_ide_irq(dev, 1);
- } else
- hw[1].irq = hw[0].irq = ret;
-
- ret = ide_host_register(host, d, hws);
- if (ret)
- ide_host_free(host);
-out:
- return ret;
-}
-EXPORT_SYMBOL_GPL(ide_pci_init_one);
-
int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
const struct ide_port_info *d, void *priv)
{
struct pci_dev *pdev[] = { dev1, dev2 };
struct ide_host *host;
- int ret, i;
- hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL };
+ int ret, i, n_ports = dev2 ? 4 : 2;
+ struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < n_ports / 2; i++) {
ret = ide_setup_pci_controller(pdev[i], d, !i);
if (ret < 0)
goto out;
@@ -597,23 +550,24 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
ide_pci_setup_ports(pdev[i], d, &hw[i*2], &hws[i*2]);
}
- host = ide_host_alloc(d, hws);
+ host = ide_host_alloc(d, hws, n_ports);
if (host == NULL) {
ret = -ENOMEM;
goto out;
}
host->dev[0] = &dev1->dev;
- host->dev[1] = &dev2->dev;
+ if (dev2)
+ host->dev[1] = &dev2->dev;
host->host_priv = priv;
-
host->irq_flags = IRQF_SHARED;
pci_set_drvdata(pdev[0], host);
- pci_set_drvdata(pdev[1], host);
+ if (dev2)
+ pci_set_drvdata(pdev[1], host);
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < n_ports / 2; i++) {
ret = do_ide_setup_pci_device(pdev[i], d, !i);
/*
@@ -639,6 +593,13 @@ out:
}
EXPORT_SYMBOL_GPL(ide_pci_init_two);
+int ide_pci_init_one(struct pci_dev *dev, const struct ide_port_info *d,
+ void *priv)
+{
+ return ide_pci_init_two(dev, NULL, d, priv);
+}
+EXPORT_SYMBOL_GPL(ide_pci_init_one);
+
void ide_pci_remove(struct pci_dev *dev)
{
struct ide_host *host = pci_get_drvdata(dev);
diff --git a/drivers/ide/sgiioc4.c b/drivers/ide/sgiioc4.c
index e5d2a48a84d..5f37f168f94 100644
--- a/drivers/ide/sgiioc4.c
+++ b/drivers/ide/sgiioc4.c
@@ -91,7 +91,7 @@ typedef struct {
static void
-sgiioc4_init_hwif_ports(hw_regs_t * hw, unsigned long data_port,
+sgiioc4_init_hwif_ports(struct ide_hw *hw, unsigned long data_port,
unsigned long ctrl_port, unsigned long irq_port)
{
unsigned long reg = data_port;
@@ -546,7 +546,7 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
unsigned long cmd_base, irqport;
unsigned long bar0, cmd_phys_base, ctl;
void __iomem *virt_base;
- hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
+ struct ide_hw hw, *hws[] = { &hw };
int rc;
/* Get the CmdBlk and CtrlBlk Base Registers */
@@ -575,13 +575,12 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
memset(&hw, 0, sizeof(hw));
sgiioc4_init_hwif_ports(&hw, cmd_base, ctl, irqport);
hw.irq = dev->irq;
- hw.chipset = ide_pci;
hw.dev = &dev->dev;
/* Initializing chipset IRQ Registers */
writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4));
- rc = ide_host_add(&sgiioc4_port_info, hws, NULL);
+ rc = ide_host_add(&sgiioc4_port_info, hws, 1, NULL);
if (!rc)
return 0;
diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
index e4973cd1fba..bd82d228608 100644
--- a/drivers/ide/siimage.c
+++ b/drivers/ide/siimage.c
@@ -451,8 +451,8 @@ static int sil_sata_reset_poll(ide_drive_t *drive)
static void sil_sata_pre_reset(ide_drive_t *drive)
{
if (drive->media == ide_disk) {
- drive->special.b.set_geometry = 0;
- drive->special.b.recalibrate = 0;
+ drive->special_flags &=
+ ~(IDE_SFLAG_SET_GEOMETRY | IDE_SFLAG_RECALIBRATE);
}
}
diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
index b0a46062533..0924abff52f 100644
--- a/drivers/ide/sl82c105.c
+++ b/drivers/ide/sl82c105.c
@@ -10,7 +10,7 @@
* with the timing registers setup.
* -- Benjamin Herrenschmidt (01/11/03) benh@kernel.crashing.org
*
- * Copyright (C) 2006-2007 MontaVista Software, Inc. <source@mvista.com>
+ * Copyright (C) 2006-2007,2009 MontaVista Software, Inc. <source@mvista.com>
* Copyright (C) 2007 Bartlomiej Zolnierkiewicz
*/
@@ -146,14 +146,15 @@ static void sl82c105_dma_lost_irq(ide_drive_t *drive)
u32 val, mask = hwif->channel ? CTRL_IDE_IRQB : CTRL_IDE_IRQA;
u8 dma_cmd;
- printk("sl82c105: lost IRQ, resetting host\n");
+ printk(KERN_WARNING "sl82c105: lost IRQ, resetting host\n");
/*
* Check the raw interrupt from the drive.
*/
pci_read_config_dword(dev, 0x40, &val);
if (val & mask)
- printk("sl82c105: drive was requesting IRQ, but host lost it\n");
+ printk(KERN_INFO "sl82c105: drive was requesting IRQ, "
+ "but host lost it\n");
/*
* Was DMA enabled? If so, disable it - we're resetting the
@@ -162,7 +163,7 @@ static void sl82c105_dma_lost_irq(ide_drive_t *drive)
dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
if (dma_cmd & 1) {
outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD);
- printk("sl82c105: DMA was enabled\n");
+ printk(KERN_INFO "sl82c105: DMA was enabled\n");
}
sl82c105_reset_host(dev);
diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
index b4cf42dc8a6..05a93d6baec 100644
--- a/drivers/ide/tc86c001.c
+++ b/drivers/ide/tc86c001.c
@@ -112,7 +112,7 @@ static void tc86c001_dma_start(ide_drive_t *drive)
ide_hwif_t *hwif = drive->hwif;
unsigned long sc_base = hwif->config_data;
unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04);
- unsigned long nsectors = hwif->rq->nr_sectors;
+ unsigned long nsectors = blk_rq_sectors(hwif->rq);
/*
* We have to manually load the sector count and size into
diff --git a/drivers/ide/tx4938ide.c b/drivers/ide/tx4938ide.c
index e33d764e294..ea89fddeed9 100644
--- a/drivers/ide/tx4938ide.c
+++ b/drivers/ide/tx4938ide.c
@@ -130,8 +130,7 @@ static const struct ide_port_info tx4938ide_port_info __initdata = {
static int __init tx4938ide_probe(struct platform_device *pdev)
{
- hw_regs_t hw;
- hw_regs_t *hws[] = { &hw, NULL, NULL, NULL };
+ struct ide_hw hw, *hws[] = { &hw };
struct ide_host *host;
struct resource *res;
struct tx4938ide_platform_info *pdata = pdev->dev.platform_data;
@@ -183,7 +182,7 @@ static int __init tx4938ide_probe(struct platform_device *pdev)
tx4938ide_tune_ebusc(pdata->ebus_ch, pdata->gbus_clock, 0);
else
d.port_ops = NULL;
- ret = ide_host_add(&d, hws, &host);
+ ret = ide_host_add(&d, hws, 1, &host);
if (!ret)
platform_set_drvdata(pdev, host);
return ret;
diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c
index 564422d2397..64b58ecc3f0 100644
--- a/drivers/ide/tx4939ide.c
+++ b/drivers/ide/tx4939ide.c
@@ -307,7 +307,7 @@ static int tx4939ide_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
tx4939ide_writew(SECTOR_SIZE / 2, base, drive->dn ?
TX4939IDE_Xfer_Cnt_2 : TX4939IDE_Xfer_Cnt_1);
- tx4939ide_writew(cmd->rq->nr_sectors, base, TX4939IDE_Sec_Cnt);
+ tx4939ide_writew(blk_rq_sectors(cmd->rq), base, TX4939IDE_Sec_Cnt);
return 0;
}
@@ -537,8 +537,7 @@ static const struct ide_port_info tx4939ide_port_info __initdata = {
static int __init tx4939ide_probe(struct platform_device *pdev)
{
- hw_regs_t hw;
- hw_regs_t *hws[] = { &hw, NULL, NULL, NULL };
+ struct ide_hw hw, *hws[] = { &hw };
struct ide_host *host;
struct resource *res;
int irq, ret;
@@ -581,7 +580,7 @@ static int __init tx4939ide_probe(struct platform_device *pdev)
hw.dev = &pdev->dev;
pr_info("TX4939 IDE interface (base %#lx, irq %d)\n", mapbase, irq);
- host = ide_host_alloc(&tx4939ide_port_info, hws);
+ host = ide_host_alloc(&tx4939ide_port_info, hws, 1);
if (!host)
return -ENOMEM;
/* use extra_base for base address of the all registers */
diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
index 3ff7231e485..028de26a25f 100644
--- a/drivers/ide/via82cxxx.c
+++ b/drivers/ide/via82cxxx.c
@@ -67,6 +67,7 @@ static struct via_isa_bridge {
u8 udma_mask;
u8 flags;
} via_isa_bridges[] = {
+ { "vx855", PCI_DEVICE_ID_VIA_VX855, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
{ "vx800", PCI_DEVICE_ID_VIA_VX800, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
{ "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
{ "vt8237s", PCI_DEVICE_ID_VIA_8237S, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
@@ -474,6 +475,7 @@ static const struct pci_device_id via_pci_tbl[] = {
{ PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C576_1), 0 },
{ PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C586_1), 0 },
{ PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_CX700_IDE), 0 },
+ { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_VX855_IDE), 0 },
{ PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_6410), 1 },
{ PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_SATA_EIDE), 1 },
{ 0, },
diff --git a/drivers/idle/i7300_idle.c b/drivers/idle/i7300_idle.c
index bf740394d70..949c97ff57e 100644
--- a/drivers/idle/i7300_idle.c
+++ b/drivers/idle/i7300_idle.c
@@ -41,6 +41,10 @@ static int debug;
module_param_named(debug, debug, uint, 0644);
MODULE_PARM_DESC(debug, "Enable debug printks in this driver");
+static int forceload;
+module_param_named(forceload, forceload, uint, 0644);
+MODULE_PARM_DESC(debug, "Enable driver testing on unvalidated i5000");
+
#define dprintk(fmt, arg...) \
do { if (debug) printk(KERN_INFO I7300_PRINT fmt, ##arg); } while (0)
@@ -552,7 +556,7 @@ static int __init i7300_idle_init(void)
cpus_clear(idle_cpumask);
total_us = 0;
- if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev))
+ if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev, forceload))
return -ENODEV;
if (i7300_idle_thrt_save())
diff --git a/drivers/ieee1394/csr1212.c b/drivers/ieee1394/csr1212.c
index a6dfeb0b337..e76cac64c53 100644
--- a/drivers/ieee1394/csr1212.c
+++ b/drivers/ieee1394/csr1212.c
@@ -35,6 +35,7 @@
#include <linux/errno.h>
#include <linux/kernel.h>
+#include <linux/kmemcheck.h>
#include <linux/string.h>
#include <asm/bug.h>
#include <asm/byteorder.h>
@@ -387,6 +388,7 @@ csr1212_new_descriptor_leaf(u8 dtype, u32 specifier_id,
if (!kv)
return NULL;
+ kmemcheck_annotate_variable(kv->value.leaf.data[0]);
CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, dtype);
CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, specifier_id);
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
index 823a6297a1a..2cd00b5b45b 100644
--- a/drivers/ieee1394/dv1394.c
+++ b/drivers/ieee1394/dv1394.c
@@ -1789,12 +1789,13 @@ static int dv1394_open(struct inode *inode, struct file *file)
} else {
/* look up the card by ID */
unsigned long flags;
+ int idx = ieee1394_file_to_instance(file);
spin_lock_irqsave(&dv1394_cards_lock, flags);
if (!list_empty(&dv1394_cards)) {
struct video_card *p;
list_for_each_entry(p, &dv1394_cards, list) {
- if ((p->id) == ieee1394_file_to_instance(file)) {
+ if ((p->id) == idx) {
video = p;
break;
}
@@ -1803,7 +1804,7 @@ static int dv1394_open(struct inode *inode, struct file *file)
spin_unlock_irqrestore(&dv1394_cards_lock, flags);
if (!video) {
- debug_printk("dv1394: OHCI card %d not found", ieee1394_file_to_instance(file));
+ debug_printk("dv1394: OHCI card %d not found", idx);
return -ENODEV;
}
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index 4ca103577c0..f5c586c2bba 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -361,7 +361,7 @@ static int eth1394_new_node(struct eth1394_host_info *hi,
node_info->pdg.sz = 0;
node_info->fifo = CSR1212_INVALID_ADDR_SPACE;
- ud->device.driver_data = node_info;
+ dev_set_drvdata(&ud->device, node_info);
new_node->ud = ud;
priv = netdev_priv(hi->dev);
@@ -406,7 +406,7 @@ static int eth1394_remove(struct device *dev)
list_del(&old_node->list);
kfree(old_node);
- node_info = (struct eth1394_node_info*)ud->device.driver_data;
+ node_info = dev_get_drvdata(&ud->device);
spin_lock_irqsave(&node_info->pdg.lock, flags);
/* The partial datagram list should be empty, but we'll just
@@ -416,7 +416,7 @@ static int eth1394_remove(struct device *dev)
spin_unlock_irqrestore(&node_info->pdg.lock, flags);
kfree(node_info);
- ud->device.driver_data = NULL;
+ dev_set_drvdata(&ud->device, NULL);
return 0;
}
@@ -688,7 +688,7 @@ static void ether1394_host_reset(struct hpsb_host *host)
ether1394_reset_priv(dev, 0);
list_for_each_entry(node, &priv->ip_node_list, list) {
- node_info = node->ud->device.driver_data;
+ node_info = dev_get_drvdata(&node->ud->device);
spin_lock_irqsave(&node_info->pdg.lock, flags);
@@ -872,8 +872,7 @@ static __be16 ether1394_parse_encap(struct sk_buff *skb, struct net_device *dev,
if (!node)
return cpu_to_be16(0);
- node_info =
- (struct eth1394_node_info *)node->ud->device.driver_data;
+ node_info = dev_get_drvdata(&node->ud->device);
/* Update our speed/payload/fifo_offset table */
node_info->maxpayload = maxpayload;
@@ -1080,7 +1079,7 @@ static int ether1394_data_handler(struct net_device *dev, int srcid, int destid,
priv->ud_list[NODEID_TO_NODE(srcid)] = ud;
}
- node_info = (struct eth1394_node_info *)ud->device.driver_data;
+ node_info = dev_get_drvdata(&ud->device);
/* First, did we receive a fragmented or unfragmented datagram? */
hdr->words.word1 = ntohs(hdr->words.word1);
@@ -1617,8 +1616,7 @@ static int ether1394_tx(struct sk_buff *skb, struct net_device *dev)
if (!node)
goto fail;
- node_info =
- (struct eth1394_node_info *)node->ud->device.driver_data;
+ node_info = dev_get_drvdata(&node->ud->device);
if (node_info->fifo == CSR1212_INVALID_ADDR_SPACE)
goto fail;
diff --git a/drivers/ieee1394/ieee1394_core.h b/drivers/ieee1394/ieee1394_core.h
index 21d50f73a21..28b9f58bafd 100644
--- a/drivers/ieee1394/ieee1394_core.h
+++ b/drivers/ieee1394/ieee1394_core.h
@@ -5,6 +5,7 @@
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/types.h>
+#include <linux/cdev.h>
#include <asm/atomic.h>
#include "hosts.h"
@@ -155,7 +156,10 @@ void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
*/
static inline unsigned char ieee1394_file_to_instance(struct file *file)
{
- return file->f_path.dentry->d_inode->i_cindex;
+ int idx = cdev_index(file->f_path.dentry->d_inode);
+ if (idx < 0)
+ idx = 0;
+ return idx;
}
extern int hpsb_disable_irm;
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index a6d55bebe61..5122b5a8aa2 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -10,6 +10,7 @@
#include <linux/bitmap.h>
#include <linux/kernel.h>
+#include <linux/kmemcheck.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/delay.h>
@@ -39,7 +40,10 @@ struct nodemgr_csr_info {
struct hpsb_host *host;
nodeid_t nodeid;
unsigned int generation;
+
+ kmemcheck_bitfield_begin(flags);
unsigned int speed_unverified:1;
+ kmemcheck_bitfield_end(flags);
};
@@ -1293,6 +1297,7 @@ static void nodemgr_node_scan_one(struct hpsb_host *host,
u8 *speed;
ci = kmalloc(sizeof(*ci), GFP_KERNEL);
+ kmemcheck_annotate_bitfield(ci, flags);
if (!ci)
return;
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index a51ab233342..83b734aec92 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -718,7 +718,7 @@ static int sbp2_remove(struct device *dev)
struct scsi_device *sdev;
ud = container_of(dev, struct unit_directory, device);
- lu = ud->device.driver_data;
+ lu = dev_get_drvdata(&ud->device);
if (!lu)
return 0;
@@ -746,7 +746,7 @@ static int sbp2_remove(struct device *dev)
static int sbp2_update(struct unit_directory *ud)
{
- struct sbp2_lu *lu = ud->device.driver_data;
+ struct sbp2_lu *lu = dev_get_drvdata(&ud->device);
if (sbp2_reconnect_device(lu) != 0) {
/*
@@ -815,7 +815,7 @@ static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *ud)
atomic_set(&lu->state, SBP2LU_STATE_RUNNING);
INIT_WORK(&lu->protocol_work, NULL);
- ud->device.driver_data = lu;
+ dev_set_drvdata(&ud->device, lu);
hi = hpsb_get_hostinfo(&sbp2_highlevel, ud->ne->host);
if (!hi) {
@@ -1051,7 +1051,7 @@ static void sbp2_remove_device(struct sbp2_lu *lu)
hpsb_unregister_addrspace(&sbp2_highlevel, hi->host,
lu->status_fifo_addr);
- lu->ud->device.driver_data = NULL;
+ dev_set_drvdata(&lu->ud->device, NULL);
module_put(hi->host->driver->owner);
no_hi:
diff --git a/drivers/ieee802154/Kconfig b/drivers/ieee802154/Kconfig
new file mode 100644
index 00000000000..9b9f43aa2f8
--- /dev/null
+++ b/drivers/ieee802154/Kconfig
@@ -0,0 +1,22 @@
+menuconfig IEEE802154_DRIVERS
+ tristate "IEEE 802.15.4 drivers"
+ depends on NETDEVICES && IEEE802154
+ default y
+ ---help---
+ Say Y here to get to see options for IEEE 802.15.4 Low-Rate
+ Wireless Personal Area Network device drivers. This option alone
+ does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and
+ disabled.
+
+config IEEE802154_FAKEHARD
+ tristate "Fake LR-WPAN driver with several interconnected devices"
+ depends on IEEE802154_DRIVERS
+ ---help---
+ Say Y here to enable the fake driver that serves as an example
+ of HardMAC device driver.
+
+ This driver can also be built as a module. To do so say M here.
+ The module will be called 'fakehard'.
+
diff --git a/drivers/ieee802154/Makefile b/drivers/ieee802154/Makefile
new file mode 100644
index 00000000000..e0e8e1a184f
--- /dev/null
+++ b/drivers/ieee802154/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_IEEE802154_FAKEHARD) += fakehard.o
+
+EXTRA_CFLAGS += -DDEBUG -DCONFIG_FFD
diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
new file mode 100644
index 00000000000..0384144c0b3
--- /dev/null
+++ b/drivers/ieee802154/fakehard.c
@@ -0,0 +1,270 @@
+/*
+ * Sample driver for HardMAC IEEE 802.15.4 devices
+ *
+ * Copyright (C) 2009 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Written by:
+ * Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com>
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+
+#include <net/ieee802154/af_ieee802154.h>
+#include <net/ieee802154/netdevice.h>
+#include <net/ieee802154/mac_def.h>
+#include <net/ieee802154/nl802154.h>
+
+static u16 fake_get_pan_id(struct net_device *dev)
+{
+ BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+ return 0xeba1;
+}
+
+static u16 fake_get_short_addr(struct net_device *dev)
+{
+ BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+ return 0x1;
+}
+
+static u8 fake_get_dsn(struct net_device *dev)
+{
+ BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+ return 0x00; /* DSN are implemented in HW, so return just 0 */
+}
+
+static u8 fake_get_bsn(struct net_device *dev)
+{
+ BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+ return 0x00; /* BSN are implemented in HW, so return just 0 */
+}
+
+static int fake_assoc_req(struct net_device *dev,
+ struct ieee802154_addr *addr, u8 channel, u8 cap)
+{
+ /* We simply emulate it here */
+ return ieee802154_nl_assoc_confirm(dev, fake_get_short_addr(dev),
+ IEEE802154_SUCCESS);
+}
+
+static int fake_assoc_resp(struct net_device *dev,
+ struct ieee802154_addr *addr, u16 short_addr, u8 status)
+{
+ return 0;
+}
+
+static int fake_disassoc_req(struct net_device *dev,
+ struct ieee802154_addr *addr, u8 reason)
+{
+ return ieee802154_nl_disassoc_confirm(dev, IEEE802154_SUCCESS);
+}
+
+static int fake_start_req(struct net_device *dev, struct ieee802154_addr *addr,
+ u8 channel,
+ u8 bcn_ord, u8 sf_ord, u8 pan_coord, u8 blx,
+ u8 coord_realign)
+{
+ return 0;
+}
+
+static int fake_scan_req(struct net_device *dev, u8 type, u32 channels,
+ u8 duration)
+{
+ u8 edl[27] = {};
+ return ieee802154_nl_scan_confirm(dev, IEEE802154_SUCCESS, type,
+ channels,
+ type == IEEE802154_MAC_SCAN_ED ? edl : NULL);
+}
+
+static struct ieee802154_mlme_ops fake_mlme = {
+ .assoc_req = fake_assoc_req,
+ .assoc_resp = fake_assoc_resp,
+ .disassoc_req = fake_disassoc_req,
+ .start_req = fake_start_req,
+ .scan_req = fake_scan_req,
+
+ .get_pan_id = fake_get_pan_id,
+ .get_short_addr = fake_get_short_addr,
+ .get_dsn = fake_get_dsn,
+ .get_bsn = fake_get_bsn,
+};
+
+static int ieee802154_fake_open(struct net_device *dev)
+{
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int ieee802154_fake_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ return 0;
+}
+
+static int ieee802154_fake_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ skb->iif = dev->ifindex;
+ skb->dev = dev;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
+ dev->trans_start = jiffies;
+
+ /* FIXME: do hardware work here ... */
+
+ return 0;
+}
+
+
+static int ieee802154_fake_ioctl(struct net_device *dev, struct ifreq *ifr,
+ int cmd)
+{
+ struct sockaddr_ieee802154 *sa =
+ (struct sockaddr_ieee802154 *)&ifr->ifr_addr;
+ u16 pan_id, short_addr;
+
+ switch (cmd) {
+ case SIOCGIFADDR:
+ /* FIXME: fixed here, get from device IRL */
+ pan_id = fake_get_pan_id(dev);
+ short_addr = fake_get_short_addr(dev);
+ if (pan_id == IEEE802154_PANID_BROADCAST ||
+ short_addr == IEEE802154_ADDR_BROADCAST)
+ return -EADDRNOTAVAIL;
+
+ sa->family = AF_IEEE802154;
+ sa->addr.addr_type = IEEE802154_ADDR_SHORT;
+ sa->addr.pan_id = pan_id;
+ sa->addr.short_addr = short_addr;
+ return 0;
+ }
+ return -ENOIOCTLCMD;
+}
+
+static int ieee802154_fake_mac_addr(struct net_device *dev, void *p)
+{
+ return -EBUSY; /* HW address is built into the device */
+}
+
+static const struct net_device_ops fake_ops = {
+ .ndo_open = ieee802154_fake_open,
+ .ndo_stop = ieee802154_fake_close,
+ .ndo_start_xmit = ieee802154_fake_xmit,
+ .ndo_do_ioctl = ieee802154_fake_ioctl,
+ .ndo_set_mac_address = ieee802154_fake_mac_addr,
+};
+
+
+static void ieee802154_fake_setup(struct net_device *dev)
+{
+ dev->addr_len = IEEE802154_ADDR_LEN;
+ memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
+ dev->features = NETIF_F_NO_CSUM;
+ dev->needed_tailroom = 2; /* FCS */
+ dev->mtu = 127;
+ dev->tx_queue_len = 10;
+ dev->type = ARPHRD_IEEE802154;
+ dev->flags = IFF_NOARP | IFF_BROADCAST;
+ dev->watchdog_timeo = 0;
+}
+
+
+static int __devinit ieee802154fake_probe(struct platform_device *pdev)
+{
+ struct net_device *dev =
+ alloc_netdev(0, "hardwpan%d", ieee802154_fake_setup);
+ int err;
+
+ if (!dev)
+ return -ENOMEM;
+
+ memcpy(dev->dev_addr, "\xba\xbe\xca\xfe\xde\xad\xbe\xef",
+ dev->addr_len);
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+
+ dev->netdev_ops = &fake_ops;
+ dev->ml_priv = &fake_mlme;
+
+ /*
+ * If the name is a format string the caller wants us to do a
+ * name allocation.
+ */
+ if (strchr(dev->name, '%')) {
+ err = dev_alloc_name(dev, dev->name);
+ if (err < 0)
+ goto out;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ platform_set_drvdata(pdev, dev);
+
+ err = register_netdev(dev);
+ if (err < 0)
+ goto out;
+
+
+ dev_info(&pdev->dev, "Added ieee802154 HardMAC hardware\n");
+ return 0;
+
+out:
+ unregister_netdev(dev);
+ return err;
+}
+
+static int __devexit ieee802154fake_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ unregister_netdev(dev);
+ free_netdev(dev);
+ return 0;
+}
+
+static struct platform_device *ieee802154fake_dev;
+
+static struct platform_driver ieee802154fake_driver = {
+ .probe = ieee802154fake_probe,
+ .remove = __devexit_p(ieee802154fake_remove),
+ .driver = {
+ .name = "ieee802154hardmac",
+ .owner = THIS_MODULE,
+ },
+};
+
+static __init int fake_init(void)
+{
+ ieee802154fake_dev = platform_device_register_simple(
+ "ieee802154hardmac", -1, NULL, 0);
+ return platform_driver_register(&ieee802154fake_driver);
+}
+
+static __exit void fake_exit(void)
+{
+ platform_driver_unregister(&ieee802154fake_driver);
+ platform_device_unregister(ieee802154fake_dev);
+}
+
+module_init(fake_init);
+module_exit(fake_exit);
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 5c04cfb54cb..158a214da2f 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -760,9 +760,9 @@ int ib_device_register_sysfs(struct ib_device *device)
int i;
class_dev->class = &ib_class;
- class_dev->driver_data = device;
class_dev->parent = device->dma_device;
dev_set_name(class_dev, device->name);
+ dev_set_drvdata(class_dev, device);
INIT_LIST_HEAD(&device->port_list);
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c
index bb17cce3cb5..f5c45b194f5 100644
--- a/drivers/infiniband/hw/amso1100/c2_cq.c
+++ b/drivers/infiniband/hw/amso1100/c2_cq.c
@@ -133,7 +133,7 @@ static inline int c2_poll_one(struct c2_dev *c2dev,
struct c2_qp *qp;
int is_recv = 0;
- ce = (struct c2wr_ce *) c2_mq_consume(&cq->mq);
+ ce = c2_mq_consume(&cq->mq);
if (!ce) {
return -EAGAIN;
}
@@ -146,7 +146,7 @@ static inline int c2_poll_one(struct c2_dev *c2dev,
while ((qp =
(struct c2_qp *) (unsigned long) ce->qp_user_context) == NULL) {
c2_mq_free(&cq->mq);
- ce = (struct c2wr_ce *) c2_mq_consume(&cq->mq);
+ ce = c2_mq_consume(&cq->mq);
if (!ce)
return -EAGAIN;
}
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index ff9be1a1310..32e3b1461d8 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -176,7 +176,7 @@ struct t3_send_wr {
struct t3_sge sgl[T3_MAX_SGE]; /* 4+ */
};
-#define T3_MAX_FASTREG_DEPTH 24
+#define T3_MAX_FASTREG_DEPTH 10
#define T3_MAX_FASTREG_FRAG 10
struct t3_fastreg_wr {
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 160ef482712..e2a63214008 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -40,6 +40,7 @@
#include <linux/spinlock.h>
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
+#include <linux/inetdevice.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -1152,12 +1153,39 @@ static int iwch_query_device(struct ib_device *ibdev,
static int iwch_query_port(struct ib_device *ibdev,
u8 port, struct ib_port_attr *props)
{
+ struct iwch_dev *dev;
+ struct net_device *netdev;
+ struct in_device *inetdev;
+
PDBG("%s ibdev %p\n", __func__, ibdev);
+ dev = to_iwch_dev(ibdev);
+ netdev = dev->rdev.port_info.lldevs[port-1];
+
memset(props, 0, sizeof(struct ib_port_attr));
props->max_mtu = IB_MTU_4096;
- props->active_mtu = IB_MTU_2048;
- props->state = IB_PORT_ACTIVE;
+ if (netdev->mtu >= 4096)
+ props->active_mtu = IB_MTU_4096;
+ else if (netdev->mtu >= 2048)
+ props->active_mtu = IB_MTU_2048;
+ else if (netdev->mtu >= 1024)
+ props->active_mtu = IB_MTU_1024;
+ else if (netdev->mtu >= 512)
+ props->active_mtu = IB_MTU_512;
+ else
+ props->active_mtu = IB_MTU_256;
+
+ if (!netif_carrier_ok(netdev))
+ props->state = IB_PORT_DOWN;
+ else {
+ inetdev = in_dev_get(netdev);
+ if (inetdev->ifa_list)
+ props->state = IB_PORT_ACTIVE;
+ else
+ props->state = IB_PORT_INIT;
+ in_dev_put(inetdev);
+ }
+
props->port_cap_flags =
IB_PORT_CM_SUP |
IB_PORT_SNMP_TUNNEL_SUP |
diff --git a/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h b/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h
index 1798e6466bd..689c35786dd 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h
@@ -165,7 +165,6 @@ struct hcp_modify_qp_control_block {
#define MQPCB_MASK_ALT_P_KEY_IDX EHCA_BMASK_IBM( 7, 7)
#define MQPCB_MASK_RDMA_ATOMIC_CTRL EHCA_BMASK_IBM( 8, 8)
#define MQPCB_MASK_QP_STATE EHCA_BMASK_IBM( 9, 9)
-#define MQPCB_QP_STATE EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES EHCA_BMASK_IBM(11, 11)
#define MQPCB_MASK_PATH_MIGRATION_STATE EHCA_BMASK_IBM(12, 12)
#define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP EHCA_BMASK_IBM(13, 13)
@@ -176,60 +175,33 @@ struct hcp_modify_qp_control_block {
#define MQPCB_MASK_RETRY_COUNT EHCA_BMASK_IBM(18, 18)
#define MQPCB_MASK_TIMEOUT EHCA_BMASK_IBM(19, 19)
#define MQPCB_MASK_PATH_MTU EHCA_BMASK_IBM(20, 20)
-#define MQPCB_PATH_MTU EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_MAX_STATIC_RATE EHCA_BMASK_IBM(21, 21)
-#define MQPCB_MAX_STATIC_RATE EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_DLID EHCA_BMASK_IBM(22, 22)
-#define MQPCB_DLID EHCA_BMASK_IBM(16, 31)
#define MQPCB_MASK_RNR_RETRY_COUNT EHCA_BMASK_IBM(23, 23)
-#define MQPCB_RNR_RETRY_COUNT EHCA_BMASK_IBM(29, 31)
#define MQPCB_MASK_SOURCE_PATH_BITS EHCA_BMASK_IBM(24, 24)
-#define MQPCB_SOURCE_PATH_BITS EHCA_BMASK_IBM(25, 31)
#define MQPCB_MASK_TRAFFIC_CLASS EHCA_BMASK_IBM(25, 25)
-#define MQPCB_TRAFFIC_CLASS EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_HOP_LIMIT EHCA_BMASK_IBM(26, 26)
-#define MQPCB_HOP_LIMIT EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_SOURCE_GID_IDX EHCA_BMASK_IBM(27, 27)
-#define MQPCB_SOURCE_GID_IDX EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_FLOW_LABEL EHCA_BMASK_IBM(28, 28)
-#define MQPCB_FLOW_LABEL EHCA_BMASK_IBM(12, 31)
#define MQPCB_MASK_DEST_GID EHCA_BMASK_IBM(30, 30)
#define MQPCB_MASK_SERVICE_LEVEL_AL EHCA_BMASK_IBM(31, 31)
-#define MQPCB_SERVICE_LEVEL_AL EHCA_BMASK_IBM(28, 31)
#define MQPCB_MASK_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(32, 32)
-#define MQPCB_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(31, 31)
#define MQPCB_MASK_RETRY_COUNT_AL EHCA_BMASK_IBM(33, 33)
-#define MQPCB_RETRY_COUNT_AL EHCA_BMASK_IBM(29, 31)
#define MQPCB_MASK_TIMEOUT_AL EHCA_BMASK_IBM(34, 34)
-#define MQPCB_TIMEOUT_AL EHCA_BMASK_IBM(27, 31)
#define MQPCB_MASK_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(35, 35)
-#define MQPCB_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_DLID_AL EHCA_BMASK_IBM(36, 36)
-#define MQPCB_DLID_AL EHCA_BMASK_IBM(16, 31)
#define MQPCB_MASK_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(37, 37)
-#define MQPCB_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(29, 31)
#define MQPCB_MASK_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(38, 38)
-#define MQPCB_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(25, 31)
#define MQPCB_MASK_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(39, 39)
-#define MQPCB_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_HOP_LIMIT_AL EHCA_BMASK_IBM(40, 40)
-#define MQPCB_HOP_LIMIT_AL EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(41, 41)
-#define MQPCB_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_FLOW_LABEL_AL EHCA_BMASK_IBM(42, 42)
-#define MQPCB_FLOW_LABEL_AL EHCA_BMASK_IBM(12, 31)
#define MQPCB_MASK_DEST_GID_AL EHCA_BMASK_IBM(44, 44)
#define MQPCB_MASK_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(45, 45)
-#define MQPCB_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31)
#define MQPCB_MASK_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(46, 46)
-#define MQPCB_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31)
#define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(47, 47)
-#define MQPCB_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(31, 31)
-#define MQPCB_QP_NUMBER EHCA_BMASK_IBM( 8, 31)
#define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48, 48)
-#define MQPCB_QP_ENABLE EHCA_BMASK_IBM(31, 31)
#define MQPCB_MASK_CURR_SRQ_LIMIT EHCA_BMASK_IBM(49, 49)
-#define MQPCB_CURR_SRQ_LIMIT EHCA_BMASK_IBM(16, 31)
#define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50, 50)
#define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51, 51)
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 99bcbd7ffb0..4b89b791be6 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -479,13 +479,13 @@ void ehca_tasklet_neq(unsigned long data)
struct ehca_eqe *eqe;
u64 ret;
- eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq);
+ eqe = ehca_poll_eq(shca, &shca->neq);
while (eqe) {
if (!EHCA_BMASK_GET(NEQE_COMPLETION_EVENT, eqe->entry))
parse_ec(shca, eqe->entry);
- eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq);
+ eqe = ehca_poll_eq(shca, &shca->neq);
}
ret = hipz_h_reset_event(shca->ipz_hca_handle,
@@ -572,8 +572,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
eqe_cnt = 0;
do {
u32 token;
- eqe_cache[eqe_cnt].eqe =
- (struct ehca_eqe *)ehca_poll_eq(shca, eq);
+ eqe_cache[eqe_cnt].eqe = ehca_poll_eq(shca, eq);
if (!eqe_cache[eqe_cnt].eqe)
break;
eqe_value = eqe_cache[eqe_cnt].eqe->entry;
@@ -637,7 +636,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
goto unlock_irq_spinlock;
do {
struct ehca_eqe *eqe;
- eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq);
+ eqe = ehca_poll_eq(shca, &shca->eq);
if (!eqe)
break;
process_eqe(shca, eqe);
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 368311ce332..ce4e6eff479 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -52,7 +52,7 @@
#include "ehca_tools.h"
#include "hcp_if.h"
-#define HCAD_VERSION "0026"
+#define HCAD_VERSION "0027"
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
@@ -636,7 +636,7 @@ static ssize_t ehca_show_##name(struct device *dev, \
struct hipz_query_hca *rblock; \
int data; \
\
- shca = dev->driver_data; \
+ shca = dev_get_drvdata(dev); \
\
rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); \
if (!rblock) { \
@@ -680,7 +680,7 @@ static ssize_t ehca_show_adapter_handle(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct ehca_shca *shca = dev->driver_data;
+ struct ehca_shca *shca = dev_get_drvdata(dev);
return sprintf(buf, "%llx\n", shca->ipz_hca_handle.handle);
@@ -749,7 +749,7 @@ static int __devinit ehca_probe(struct of_device *dev,
shca->ofdev = dev;
shca->ipz_hca_handle.handle = *handle;
- dev->dev.driver_data = shca;
+ dev_set_drvdata(&dev->dev, shca);
ret = ehca_sense_attributes(shca);
if (ret < 0) {
@@ -878,7 +878,7 @@ probe1:
static int __devexit ehca_remove(struct of_device *dev)
{
- struct ehca_shca *shca = dev->dev.driver_data;
+ struct ehca_shca *shca = dev_get_drvdata(&dev->dev);
unsigned long flags;
int ret;
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 00c10815971..0338f1fabe8 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -461,7 +461,7 @@ static struct ehca_qp *internal_create_qp(
ib_device);
struct ib_ucontext *context = NULL;
u64 h_ret;
- int is_llqp = 0, has_srq = 0;
+ int is_llqp = 0, has_srq = 0, is_user = 0;
int qp_type, max_send_sge, max_recv_sge, ret;
/* h_call's out parameters */
@@ -609,9 +609,6 @@ static struct ehca_qp *internal_create_qp(
}
}
- if (pd->uobject && udata)
- context = pd->uobject->context;
-
my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL);
if (!my_qp) {
ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
@@ -619,6 +616,11 @@ static struct ehca_qp *internal_create_qp(
return ERR_PTR(-ENOMEM);
}
+ if (pd->uobject && udata) {
+ is_user = 1;
+ context = pd->uobject->context;
+ }
+
atomic_set(&my_qp->nr_events, 0);
init_waitqueue_head(&my_qp->wait_completion);
spin_lock_init(&my_qp->spinlock_s);
@@ -707,7 +709,7 @@ static struct ehca_qp *internal_create_qp(
(parms.squeue.is_small || parms.rqueue.is_small);
}
- h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms);
+ h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms, is_user);
if (h_ret != H_SUCCESS) {
ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli",
h_ret);
@@ -769,18 +771,20 @@ static struct ehca_qp *internal_create_qp(
goto create_qp_exit2;
}
- my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length /
- my_qp->ipz_squeue.qe_size;
- my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries *
- sizeof(struct ehca_qmap_entry));
- if (!my_qp->sq_map.map) {
- ehca_err(pd->device, "Couldn't allocate squeue "
- "map ret=%i", ret);
- goto create_qp_exit3;
+ if (!is_user) {
+ my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length /
+ my_qp->ipz_squeue.qe_size;
+ my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries *
+ sizeof(struct ehca_qmap_entry));
+ if (!my_qp->sq_map.map) {
+ ehca_err(pd->device, "Couldn't allocate squeue "
+ "map ret=%i", ret);
+ goto create_qp_exit3;
+ }
+ INIT_LIST_HEAD(&my_qp->sq_err_node);
+ /* to avoid the generation of bogus flush CQEs */
+ reset_queue_map(&my_qp->sq_map);
}
- INIT_LIST_HEAD(&my_qp->sq_err_node);
- /* to avoid the generation of bogus flush CQEs */
- reset_queue_map(&my_qp->sq_map);
}
if (HAS_RQ(my_qp)) {
@@ -792,20 +796,21 @@ static struct ehca_qp *internal_create_qp(
"and pages ret=%i", ret);
goto create_qp_exit4;
}
-
- my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length /
- my_qp->ipz_rqueue.qe_size;
- my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries *
- sizeof(struct ehca_qmap_entry));
- if (!my_qp->rq_map.map) {
- ehca_err(pd->device, "Couldn't allocate squeue "
- "map ret=%i", ret);
- goto create_qp_exit5;
+ if (!is_user) {
+ my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length /
+ my_qp->ipz_rqueue.qe_size;
+ my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries *
+ sizeof(struct ehca_qmap_entry));
+ if (!my_qp->rq_map.map) {
+ ehca_err(pd->device, "Couldn't allocate squeue "
+ "map ret=%i", ret);
+ goto create_qp_exit5;
+ }
+ INIT_LIST_HEAD(&my_qp->rq_err_node);
+ /* to avoid the generation of bogus flush CQEs */
+ reset_queue_map(&my_qp->rq_map);
}
- INIT_LIST_HEAD(&my_qp->rq_err_node);
- /* to avoid the generation of bogus flush CQEs */
- reset_queue_map(&my_qp->rq_map);
- } else if (init_attr->srq) {
+ } else if (init_attr->srq && !is_user) {
/* this is a base QP, use the queue map of the SRQ */
my_qp->rq_map = my_srq->rq_map;
INIT_LIST_HEAD(&my_qp->rq_err_node);
@@ -918,7 +923,7 @@ create_qp_exit7:
kfree(my_qp->mod_qp_parm);
create_qp_exit6:
- if (HAS_RQ(my_qp))
+ if (HAS_RQ(my_qp) && !is_user)
vfree(my_qp->rq_map.map);
create_qp_exit5:
@@ -926,7 +931,7 @@ create_qp_exit5:
ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
create_qp_exit4:
- if (HAS_SQ(my_qp))
+ if (HAS_SQ(my_qp) && !is_user)
vfree(my_qp->sq_map.map);
create_qp_exit3:
@@ -1244,6 +1249,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
u64 update_mask;
u64 h_ret;
int bad_wqe_cnt = 0;
+ int is_user = 0;
int squeue_locked = 0;
unsigned long flags = 0;
@@ -1266,6 +1272,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
ret = ehca2ib_return_code(h_ret);
goto modify_qp_exit1;
}
+ if (ibqp->uobject)
+ is_user = 1;
qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state);
@@ -1728,7 +1736,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
goto modify_qp_exit2;
}
}
- if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR)) {
+ if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR)
+ && !is_user) {
ret = check_for_left_cqes(my_qp, shca);
if (ret)
goto modify_qp_exit2;
@@ -1738,16 +1747,17 @@ static int internal_modify_qp(struct ib_qp *ibqp,
ipz_qeit_reset(&my_qp->ipz_rqueue);
ipz_qeit_reset(&my_qp->ipz_squeue);
- if (qp_cur_state == IB_QPS_ERR) {
+ if (qp_cur_state == IB_QPS_ERR && !is_user) {
del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
if (HAS_RQ(my_qp))
del_from_err_list(my_qp->recv_cq,
&my_qp->rq_err_node);
}
- reset_queue_map(&my_qp->sq_map);
+ if (!is_user)
+ reset_queue_map(&my_qp->sq_map);
- if (HAS_RQ(my_qp))
+ if (HAS_RQ(my_qp) && !is_user)
reset_queue_map(&my_qp->rq_map);
}
@@ -1952,19 +1962,13 @@ int ehca_query_qp(struct ib_qp *qp,
qp_attr->cap.max_inline_data = my_qp->sq_max_inline_data_size;
qp_attr->dest_qp_num = qpcb->dest_qp_nr;
- qp_attr->pkey_index =
- EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX, qpcb->prim_p_key_idx);
-
- qp_attr->port_num =
- EHCA_BMASK_GET(MQPCB_PRIM_PHYS_PORT, qpcb->prim_phys_port);
-
+ qp_attr->pkey_index = qpcb->prim_p_key_idx;
+ qp_attr->port_num = qpcb->prim_phys_port;
qp_attr->timeout = qpcb->timeout;
qp_attr->retry_cnt = qpcb->retry_count;
qp_attr->rnr_retry = qpcb->rnr_retry_count;
- qp_attr->alt_pkey_index =
- EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX, qpcb->alt_p_key_idx);
-
+ qp_attr->alt_pkey_index = qpcb->alt_p_key_idx;
qp_attr->alt_port_num = qpcb->alt_phys_port;
qp_attr->alt_timeout = qpcb->timeout_al;
@@ -2051,8 +2055,7 @@ int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
update_mask |=
EHCA_BMASK_SET(MQPCB_MASK_CURR_SRQ_LIMIT, 1)
| EHCA_BMASK_SET(MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG, 1);
- mqpcb->curr_srq_limit =
- EHCA_BMASK_SET(MQPCB_CURR_SRQ_LIMIT, attr->srq_limit);
+ mqpcb->curr_srq_limit = attr->srq_limit;
mqpcb->qp_aff_asyn_ev_log_reg =
EHCA_BMASK_SET(QPX_AAELOG_RESET_SRQ_LIMIT, 1);
}
@@ -2115,8 +2118,7 @@ int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)
srq_attr->max_wr = qpcb->max_nr_outst_recv_wr - 1;
srq_attr->max_sge = 3;
- srq_attr->srq_limit = EHCA_BMASK_GET(
- MQPCB_CURR_SRQ_LIMIT, qpcb->curr_srq_limit);
+ srq_attr->srq_limit = qpcb->curr_srq_limit;
if (ehca_debug_level >= 2)
ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
@@ -2138,10 +2140,12 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
int ret;
u64 h_ret;
u8 port_num;
+ int is_user = 0;
enum ib_qp_type qp_type;
unsigned long flags;
if (uobject) {
+ is_user = 1;
if (my_qp->mm_count_galpa ||
my_qp->mm_count_rqueue || my_qp->mm_count_squeue) {
ehca_err(dev, "Resources still referenced in "
@@ -2168,10 +2172,10 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
* SRQs will never get into an error list and do not have a recv_cq,
* so we need to skip them here.
*/
- if (HAS_RQ(my_qp) && !IS_SRQ(my_qp))
+ if (HAS_RQ(my_qp) && !IS_SRQ(my_qp) && !is_user)
del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node);
- if (HAS_SQ(my_qp))
+ if (HAS_SQ(my_qp) && !is_user)
del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
/* now wait until all pending events have completed */
@@ -2209,13 +2213,13 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
if (HAS_RQ(my_qp)) {
ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
-
- vfree(my_qp->rq_map.map);
+ if (!is_user)
+ vfree(my_qp->rq_map.map);
}
if (HAS_SQ(my_qp)) {
ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
-
- vfree(my_qp->sq_map.map);
+ if (!is_user)
+ vfree(my_qp->sq_map.map);
}
kmem_cache_free(qp_cache, my_qp);
atomic_dec(&shca->num_qps);
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c
index d0ab0c0d5e9..4d5dc3304d4 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.c
+++ b/drivers/infiniband/hw/ehca/hcp_if.c
@@ -284,7 +284,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
param->act_pages = (u32)outs[4];
if (ret == H_SUCCESS)
- hcp_galpas_ctor(&cq->galpas, outs[5], outs[6]);
+ hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]);
if (ret == H_NOT_ENOUGH_RESOURCES)
ehca_gen_err("Not enough resources. ret=%lli", ret);
@@ -293,7 +293,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
}
u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
- struct ehca_alloc_qp_parms *parms)
+ struct ehca_alloc_qp_parms *parms, int is_user)
{
u64 ret;
u64 allocate_controls, max_r10_reg, r11, r12;
@@ -359,7 +359,7 @@ u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
(u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]);
if (ret == H_SUCCESS)
- hcp_galpas_ctor(&parms->galpas, outs[6], outs[6]);
+ hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]);
if (ret == H_NOT_ENOUGH_RESOURCES)
ehca_gen_err("Not enough resources. ret=%lli", ret);
diff --git a/drivers/infiniband/hw/ehca/hcp_if.h b/drivers/infiniband/hw/ehca/hcp_if.h
index 2c3c6e0ea5c..39c1c3618ec 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.h
+++ b/drivers/infiniband/hw/ehca/hcp_if.h
@@ -78,7 +78,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
* initialize resources, create empty QPPTs (2 rings).
*/
u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
- struct ehca_alloc_qp_parms *parms);
+ struct ehca_alloc_qp_parms *parms, int is_user);
u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
const u8 port_id,
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.c b/drivers/infiniband/hw/ehca/hcp_phyp.c
index 214821095cb..b3e0e72e8a7 100644
--- a/drivers/infiniband/hw/ehca/hcp_phyp.c
+++ b/drivers/infiniband/hw/ehca/hcp_phyp.c
@@ -54,12 +54,15 @@ int hcall_unmap_page(u64 mapaddr)
return 0;
}
-int hcp_galpas_ctor(struct h_galpas *galpas,
+int hcp_galpas_ctor(struct h_galpas *galpas, int is_user,
u64 paddr_kernel, u64 paddr_user)
{
- int ret = hcall_map_page(paddr_kernel, &galpas->kernel.fw_handle);
- if (ret)
- return ret;
+ if (!is_user) {
+ int ret = hcall_map_page(paddr_kernel, &galpas->kernel.fw_handle);
+ if (ret)
+ return ret;
+ } else
+ galpas->kernel.fw_handle = 0;
galpas->user.fw_handle = paddr_user;
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.h b/drivers/infiniband/hw/ehca/hcp_phyp.h
index 5305c2a3ed9..204227d5303 100644
--- a/drivers/infiniband/hw/ehca/hcp_phyp.h
+++ b/drivers/infiniband/hw/ehca/hcp_phyp.h
@@ -78,7 +78,7 @@ static inline void hipz_galpa_store(struct h_galpa galpa, u32 offset, u64 value)
*(volatile u64 __force *)addr = value;
}
-int hcp_galpas_ctor(struct h_galpas *galpas,
+int hcp_galpas_ctor(struct h_galpas *galpas, int is_user,
u64 paddr_kernel, u64 paddr_user);
int hcp_galpas_dtor(struct h_galpas *galpas);
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
index c3a32846543..1227c593627 100644
--- a/drivers/infiniband/hw/ehca/ipz_pt_fn.c
+++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
@@ -220,10 +220,13 @@ int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
queue->small_page = NULL;
/* allocate queue page pointers */
- queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *));
+ queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
if (!queue->queue_pages) {
- ehca_gen_err("Couldn't allocate queue page list");
- return 0;
+ queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *));
+ if (!queue->queue_pages) {
+ ehca_gen_err("Couldn't allocate queue page list");
+ return 0;
+ }
}
memset(queue->queue_pages, 0, nr_of_pages * sizeof(void *));
@@ -240,7 +243,10 @@ int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
ipz_queue_ctor_exit0:
ehca_gen_err("Couldn't alloc pages queue=%p "
"nr_of_pages=%x", queue, nr_of_pages);
- vfree(queue->queue_pages);
+ if (is_vmalloc_addr(queue->queue_pages))
+ vfree(queue->queue_pages);
+ else
+ kfree(queue->queue_pages);
return 0;
}
@@ -262,7 +268,10 @@ int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue)
free_page((unsigned long)queue->queue_pages[i]);
}
- vfree(queue->queue_pages);
+ if (is_vmalloc_addr(queue->queue_pages))
+ vfree(queue->queue_pages);
+ else
+ kfree(queue->queue_pages);
return 1;
}
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 20724aee76f..c4a02648c8a 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1585,12 +1585,16 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
break;
case IB_WR_LOCAL_INV:
+ ctrl->srcrb_flags |=
+ cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
set_local_inv_seg(wqe, wr->ex.invalidate_rkey);
wqe += sizeof (struct mlx4_wqe_local_inval_seg);
size += sizeof (struct mlx4_wqe_local_inval_seg) / 16;
break;
case IB_WR_FAST_REG_MR:
+ ctrl->srcrb_flags |=
+ cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
set_fmr_seg(wqe, wr);
wqe += sizeof (struct mlx4_wqe_fmr_seg);
size += sizeof (struct mlx4_wqe_fmr_seg) / 16;
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 6d55f9d748f..8c2ed994d54 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -1059,7 +1059,7 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MTT_OFFSET);
if (mthca_is_memfree(dev))
dev_lim->reserved_mtts = ALIGN((1 << (field >> 4)) * sizeof(u64),
- MTHCA_MTT_SEG_SIZE) / MTHCA_MTT_SEG_SIZE;
+ dev->limits.mtt_seg_size) / dev->limits.mtt_seg_size;
else
dev_lim->reserved_mtts = 1 << (field >> 4);
MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MRW_SZ_OFFSET);
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 252590116df..9ef611f6dd3 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -159,6 +159,7 @@ struct mthca_limits {
int reserved_eqs;
int num_mpts;
int num_mtt_segs;
+ int mtt_seg_size;
int fmr_reserved_mtts;
int reserved_mtts;
int reserved_mrws;
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 28f0e0c40d7..90e4e450a12 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -641,9 +641,11 @@ static void mthca_free_irqs(struct mthca_dev *dev)
if (dev->eq_table.have_irq)
free_irq(dev->pdev->irq, dev);
for (i = 0; i < MTHCA_NUM_EQ; ++i)
- if (dev->eq_table.eq[i].have_irq)
+ if (dev->eq_table.eq[i].have_irq) {
free_irq(dev->eq_table.eq[i].msi_x_vector,
dev->eq_table.eq + i);
+ dev->eq_table.eq[i].have_irq = 0;
+ }
}
static int mthca_map_reg(struct mthca_dev *dev,
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 1d83cf7caf3..13da9f1d24c 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -125,6 +125,10 @@ module_param_named(fmr_reserved_mtts, hca_profile.fmr_reserved_mtts, int, 0444);
MODULE_PARM_DESC(fmr_reserved_mtts,
"number of memory translation table segments reserved for FMR");
+static int log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8);
+module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
+MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)");
+
static char mthca_version[] __devinitdata =
DRV_NAME ": Mellanox InfiniBand HCA driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
@@ -162,6 +166,7 @@ static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
int err;
u8 status;
+ mdev->limits.mtt_seg_size = (1 << log_mtts_per_seg) * 8;
err = mthca_QUERY_DEV_LIM(mdev, dev_lim, &status);
if (err) {
mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
@@ -460,11 +465,11 @@ static int mthca_init_icm(struct mthca_dev *mdev,
}
/* CPU writes to non-reserved MTTs, while HCA might DMA to reserved mtts */
- mdev->limits.reserved_mtts = ALIGN(mdev->limits.reserved_mtts * MTHCA_MTT_SEG_SIZE,
- dma_get_cache_alignment()) / MTHCA_MTT_SEG_SIZE;
+ mdev->limits.reserved_mtts = ALIGN(mdev->limits.reserved_mtts * mdev->limits.mtt_seg_size,
+ dma_get_cache_alignment()) / mdev->limits.mtt_seg_size;
mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base,
- MTHCA_MTT_SEG_SIZE,
+ mdev->limits.mtt_seg_size,
mdev->limits.num_mtt_segs,
mdev->limits.reserved_mtts,
1, 0);
@@ -1315,6 +1320,12 @@ static void __init mthca_validate_profile(void)
printk(KERN_WARNING PFX "Corrected fmr_reserved_mtts to %d.\n",
hca_profile.fmr_reserved_mtts);
}
+
+ if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) {
+ printk(KERN_WARNING PFX "bad log_mtts_per_seg (%d). Using default - %d\n",
+ log_mtts_per_seg, ilog2(MTHCA_MTT_SEG_SIZE / 8));
+ log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8);
+ }
}
static int __init mthca_init(void)
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 882e6b73591..d606edf1085 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -220,7 +220,7 @@ static struct mthca_mtt *__mthca_alloc_mtt(struct mthca_dev *dev, int size,
mtt->buddy = buddy;
mtt->order = 0;
- for (i = MTHCA_MTT_SEG_SIZE / 8; i < size; i <<= 1)
+ for (i = dev->limits.mtt_seg_size / 8; i < size; i <<= 1)
++mtt->order;
mtt->first_seg = mthca_alloc_mtt_range(dev, mtt->order, buddy);
@@ -267,7 +267,7 @@ static int __mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
while (list_len > 0) {
mtt_entry[0] = cpu_to_be64(dev->mr_table.mtt_base +
- mtt->first_seg * MTHCA_MTT_SEG_SIZE +
+ mtt->first_seg * dev->limits.mtt_seg_size +
start_index * 8);
mtt_entry[1] = 0;
for (i = 0; i < list_len && i < MTHCA_MAILBOX_SIZE / 8 - 2; ++i)
@@ -326,7 +326,7 @@ static void mthca_tavor_write_mtt_seg(struct mthca_dev *dev,
u64 __iomem *mtts;
int i;
- mtts = dev->mr_table.tavor_fmr.mtt_base + mtt->first_seg * MTHCA_MTT_SEG_SIZE +
+ mtts = dev->mr_table.tavor_fmr.mtt_base + mtt->first_seg * dev->limits.mtt_seg_size +
start_index * sizeof (u64);
for (i = 0; i < list_len; ++i)
mthca_write64_raw(cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT),
@@ -345,10 +345,10 @@ static void mthca_arbel_write_mtt_seg(struct mthca_dev *dev,
/* For Arbel, all MTTs must fit in the same page. */
BUG_ON(s / PAGE_SIZE != (s + list_len * sizeof(u64) - 1) / PAGE_SIZE);
/* Require full segments */
- BUG_ON(s % MTHCA_MTT_SEG_SIZE);
+ BUG_ON(s % dev->limits.mtt_seg_size);
mtts = mthca_table_find(dev->mr_table.mtt_table, mtt->first_seg +
- s / MTHCA_MTT_SEG_SIZE, &dma_handle);
+ s / dev->limits.mtt_seg_size, &dma_handle);
BUG_ON(!mtts);
@@ -479,7 +479,7 @@ int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
if (mr->mtt)
mpt_entry->mtt_seg =
cpu_to_be64(dev->mr_table.mtt_base +
- mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE);
+ mr->mtt->first_seg * dev->limits.mtt_seg_size);
if (0) {
mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey);
@@ -626,7 +626,7 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
goto err_out_table;
}
- mtt_seg = mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE;
+ mtt_seg = mr->mtt->first_seg * dev->limits.mtt_seg_size;
if (mthca_is_memfree(dev)) {
mr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table,
@@ -908,7 +908,7 @@ int mthca_init_mr_table(struct mthca_dev *dev)
dev->mr_table.mtt_base);
dev->mr_table.tavor_fmr.mtt_base =
- ioremap(addr, mtts * MTHCA_MTT_SEG_SIZE);
+ ioremap(addr, mtts * dev->limits.mtt_seg_size);
if (!dev->mr_table.tavor_fmr.mtt_base) {
mthca_warn(dev, "MTT ioremap for FMR failed.\n");
err = -ENOMEM;
diff --git a/drivers/infiniband/hw/mthca/mthca_profile.c b/drivers/infiniband/hw/mthca/mthca_profile.c
index d168c254061..8edb28a9a0e 100644
--- a/drivers/infiniband/hw/mthca/mthca_profile.c
+++ b/drivers/infiniband/hw/mthca/mthca_profile.c
@@ -94,7 +94,7 @@ s64 mthca_make_profile(struct mthca_dev *dev,
profile[MTHCA_RES_RDB].size = MTHCA_RDB_ENTRY_SIZE;
profile[MTHCA_RES_MCG].size = MTHCA_MGM_ENTRY_SIZE;
profile[MTHCA_RES_MPT].size = dev_lim->mpt_entry_sz;
- profile[MTHCA_RES_MTT].size = MTHCA_MTT_SEG_SIZE;
+ profile[MTHCA_RES_MTT].size = dev->limits.mtt_seg_size;
profile[MTHCA_RES_UAR].size = dev_lim->uar_scratch_entry_sz;
profile[MTHCA_RES_UDAV].size = MTHCA_AV_SIZE;
profile[MTHCA_RES_UARC].size = request->uarc_size;
@@ -232,7 +232,7 @@ s64 mthca_make_profile(struct mthca_dev *dev,
dev->limits.num_mtt_segs = profile[i].num;
dev->mr_table.mtt_base = profile[i].start;
init_hca->mtt_base = profile[i].start;
- init_hca->mtt_seg_sz = ffs(MTHCA_MTT_SEG_SIZE) - 7;
+ init_hca->mtt_seg_sz = ffs(dev->limits.mtt_seg_size) - 7;
break;
case MTHCA_RES_UAR:
dev->limits.num_uars = profile[i].num;
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index b832a7b814a..4a84d02ece0 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -667,7 +667,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
i = 0;
while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) == 0) && i++ < 10000)
mdelay(1);
- if (i >= 10000) {
+ if (i > 10000) {
nes_debug(NES_DBG_INIT, "Did not see full soft reset done.\n");
return 0;
}
@@ -675,7 +675,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
i = 0;
while ((nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS) != 0x80) && i++ < 10000)
mdelay(1);
- if (i >= 10000) {
+ if (i > 10000) {
printk(KERN_ERR PFX "Internal CPU not ready, status = %02X\n",
nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS));
return 0;
@@ -701,7 +701,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
i = 0;
while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) == 0) && i++ < 10000)
mdelay(1);
- if (i >= 10000) {
+ if (i > 10000) {
nes_debug(NES_DBG_INIT, "Did not see port soft reset done.\n");
return 0;
}
@@ -711,7 +711,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0)
& 0x0000000f)) != 0x0000000f) && i++ < 5000)
mdelay(1);
- if (i >= 5000) {
+ if (i > 5000) {
nes_debug(NES_DBG_INIT, "Serdes 0 not ready, status=%x\n", u32temp);
return 0;
}
@@ -722,7 +722,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS1)
& 0x0000000f)) != 0x0000000f) && i++ < 5000)
mdelay(1);
- if (i >= 5000) {
+ if (i > 5000) {
nes_debug(NES_DBG_INIT, "Serdes 1 not ready, status=%x\n", u32temp);
return 0;
}
@@ -792,7 +792,7 @@ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0)
& 0x0000000f)) != 0x0000000f) && i++ < 5000)
mdelay(1);
- if (i >= 5000) {
+ if (i > 5000) {
nes_debug(NES_DBG_PHY, "Init: serdes 0 not ready, status=%x\n", u32temp);
return 1;
}
@@ -815,7 +815,7 @@ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS1)
& 0x0000000f)) != 0x0000000f) && (i++ < 5000))
mdelay(1);
- if (i >= 5000) {
+ if (i > 5000) {
printk("%s: Init: serdes 1 not ready, status=%x\n", __func__, u32temp);
/* return 1; */
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 47d588ba2a7..181b1f32325 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1394,8 +1394,8 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
struct ipoib_dev_priv *priv = netdev_priv(dev);
int e = skb_queue_empty(&priv->cm.skb_queue);
- if (skb->dst)
- skb->dst->ops->update_pmtu(skb->dst, mtu);
+ if (skb_dst(skb))
+ skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
skb_queue_tail(&priv->cm.skb_queue, skb);
if (e)
@@ -1455,13 +1455,15 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
struct net_device *dev = to_net_dev(d);
struct ipoib_dev_priv *priv = netdev_priv(dev);
+ if (!rtnl_trylock())
+ return restart_syscall();
+
/* flush paths if we switch modes so that connections are restarted */
if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
ipoib_warn(priv, "enabling connected mode "
"will cause multicast packet drops\n");
- rtnl_lock();
dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO);
rtnl_unlock();
priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
@@ -1473,7 +1475,6 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
if (!strcmp(buf, "datagram\n")) {
clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
- rtnl_lock();
if (test_bit(IPOIB_FLAG_CSUM, &priv->flags)) {
dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
if (priv->hca_caps & IB_DEVICE_UD_TSO)
@@ -1485,6 +1486,7 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
return count;
}
+ rtnl_unlock();
return -EINVAL;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index ab2c192c76b..e319d91f60a 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -561,7 +561,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
struct ipoib_neigh *neigh;
unsigned long flags;
- neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev);
+ neigh = ipoib_neigh_alloc(skb_dst(skb)->neighbour, skb->dev);
if (!neigh) {
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
@@ -570,9 +570,9 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
spin_lock_irqsave(&priv->lock, flags);
- path = __path_find(dev, skb->dst->neighbour->ha + 4);
+ path = __path_find(dev, skb_dst(skb)->neighbour->ha + 4);
if (!path) {
- path = path_rec_create(dev, skb->dst->neighbour->ha + 4);
+ path = path_rec_create(dev, skb_dst(skb)->neighbour->ha + 4);
if (!path)
goto err_path;
@@ -605,7 +605,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
goto err_drop;
}
} else
- ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb->dst->neighbour->ha));
+ ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb_dst(skb)->neighbour->ha));
} else {
neigh->ah = NULL;
@@ -635,15 +635,15 @@ static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
/* Look up path record for unicasts */
- if (skb->dst->neighbour->ha[4] != 0xff) {
+ if (skb_dst(skb)->neighbour->ha[4] != 0xff) {
neigh_add_path(skb, dev);
return;
}
/* Add in the P_Key for multicasts */
- skb->dst->neighbour->ha[8] = (priv->pkey >> 8) & 0xff;
- skb->dst->neighbour->ha[9] = priv->pkey & 0xff;
- ipoib_mcast_send(dev, skb->dst->neighbour->ha + 4, skb);
+ skb_dst(skb)->neighbour->ha[8] = (priv->pkey >> 8) & 0xff;
+ skb_dst(skb)->neighbour->ha[9] = priv->pkey & 0xff;
+ ipoib_mcast_send(dev, skb_dst(skb)->neighbour->ha + 4, skb);
}
static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
@@ -708,16 +708,16 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct ipoib_neigh *neigh;
unsigned long flags;
- if (likely(skb->dst && skb->dst->neighbour)) {
- if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
+ if (likely(skb_dst(skb) && skb_dst(skb)->neighbour)) {
+ if (unlikely(!*to_ipoib_neigh(skb_dst(skb)->neighbour))) {
ipoib_path_lookup(skb, dev);
return NETDEV_TX_OK;
}
- neigh = *to_ipoib_neigh(skb->dst->neighbour);
+ neigh = *to_ipoib_neigh(skb_dst(skb)->neighbour);
if (unlikely((memcmp(&neigh->dgid.raw,
- skb->dst->neighbour->ha + 4,
+ skb_dst(skb)->neighbour->ha + 4,
sizeof(union ib_gid))) ||
(neigh->dev != dev))) {
spin_lock_irqsave(&priv->lock, flags);
@@ -743,7 +743,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
} else if (neigh->ah) {
- ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(skb->dst->neighbour->ha));
+ ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(skb_dst(skb)->neighbour->ha));
return NETDEV_TX_OK;
}
@@ -772,7 +772,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
if ((be16_to_cpup((__be16 *) skb->data) != ETH_P_ARP) &&
(be16_to_cpup((__be16 *) skb->data) != ETH_P_RARP)) {
ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x %pI6\n",
- skb->dst ? "neigh" : "dst",
+ skb_dst(skb) ? "neigh" : "dst",
be16_to_cpup((__be16 *) skb->data),
IPOIB_QPN(phdr->hwaddr),
phdr->hwaddr + 4);
@@ -817,7 +817,7 @@ static int ipoib_hard_header(struct sk_buff *skb,
* destination address onto the front of the skb so we can
* figure out where to send the packet later.
*/
- if ((!skb->dst || !skb->dst->neighbour) && daddr) {
+ if ((!skb_dst(skb) || !skb_dst(skb)->neighbour) && daddr) {
struct ipoib_pseudoheader *phdr =
(struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
@@ -1053,6 +1053,7 @@ static void ipoib_setup(struct net_device *dev)
dev->tx_queue_len = ipoib_sendq_size * 2;
dev->features = (NETIF_F_VLAN_CHALLENGED |
NETIF_F_HIGHDMA);
+ dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 425e31112ed..a0e97532e71 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -261,7 +261,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
skb->dev = dev;
- if (!skb->dst || !skb->dst->neighbour) {
+ if (!skb_dst(skb) || !skb_dst(skb)->neighbour) {
/* put pseudoheader back on for next time */
skb_push(skb, sizeof (struct ipoib_pseudoheader));
}
@@ -707,10 +707,10 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
out:
if (mcast && mcast->ah) {
- if (skb->dst &&
- skb->dst->neighbour &&
- !*to_ipoib_neigh(skb->dst->neighbour)) {
- struct ipoib_neigh *neigh = ipoib_neigh_alloc(skb->dst->neighbour,
+ if (skb_dst(skb) &&
+ skb_dst(skb)->neighbour &&
+ !*to_ipoib_neigh(skb_dst(skb)->neighbour)) {
+ struct ipoib_neigh *neigh = ipoib_neigh_alloc(skb_dst(skb)->neighbour,
skb->dev);
if (neigh) {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 4c57f329dd5..e3bf00d8cd2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -61,7 +61,8 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
ppriv = netdev_priv(pdev);
- rtnl_lock();
+ if (!rtnl_trylock())
+ return restart_syscall();
mutex_lock(&ppriv->vlan_mutex);
/*
@@ -167,7 +168,8 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
ppriv = netdev_priv(pdev);
- rtnl_lock();
+ if (!rtnl_trylock())
+ return restart_syscall();
mutex_lock(&ppriv->vlan_mutex);
list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
if (priv->pkey == pkey) {
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 75223f50de5..0ba6ec87629 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -257,11 +257,8 @@ static void iscsi_iser_cleanup_task(struct iscsi_task *task)
{
struct iscsi_iser_task *iser_task = task->dd_data;
- /*
- * mgmt tasks do not need special cleanup and we do not
- * allocate anything in the init task callout
- */
- if (!task->sc || task->state == ISCSI_TASK_PENDING)
+ /* mgmt tasks do not need special cleanup */
+ if (!task->sc)
return;
if (iser_task->status == ISER_TASK_STATUS_STARTED) {
@@ -517,7 +514,8 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
}
static struct iscsi_endpoint *
-iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking)
+iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
+ int non_blocking)
{
int err;
struct iser_conn *ib_conn;
diff --git a/drivers/input/input.c b/drivers/input/input.c
index e54e002665b..7c237e6ac71 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -42,6 +42,7 @@ static unsigned int input_abs_bypass_init_data[] __initdata = {
ABS_MT_POSITION_Y,
ABS_MT_TOOL_TYPE,
ABS_MT_BLOB_ID,
+ ABS_MT_TRACKING_ID,
0
};
static unsigned long input_abs_bypass[BITS_TO_LONGS(ABS_CNT)];
@@ -1264,8 +1265,14 @@ static struct device_type input_dev_type = {
.uevent = input_dev_uevent,
};
+static char *input_nodename(struct device *dev)
+{
+ return kasprintf(GFP_KERNEL, "input/%s", dev_name(dev));
+}
+
struct class input_class = {
.name = "input",
+ .nodename = input_nodename,
};
EXPORT_SYMBOL_GPL(input_class);
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 356b3a25efa..1c0b529c06a 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -35,7 +35,7 @@
#include <linux/input.h>
#include <linux/gameport.h>
#include <linux/jiffies.h>
-#include <asm/timex.h>
+#include <linux/timex.h>
#define DRIVER_DESC "Analog joystick and gamepad driver"
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 5c0a631d145..06f46fcc077 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -232,7 +232,7 @@ config INPUT_GPIO_ROTARY_ENCODER
depends on GPIOLIB && GENERIC_GPIO
help
Say Y here to add support for rotary encoders connected to GPIO lines.
- Check file:Documentation/incput/rotary_encoder.txt for more
+ Check file:Documentation/input/rotary-encoder.txt for more
information.
To compile this driver as a module, choose M here: the
diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c
index d6a30cee7bc..6d67af5387a 100644
--- a/drivers/input/misc/pcspkr.c
+++ b/drivers/input/misc/pcspkr.c
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/input.h>
#include <linux/platform_device.h>
+#include <linux/timex.h>
#include <asm/io.h>
MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index da3c3a5d268..c4b3fbd1a80 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -192,7 +192,7 @@ config SERIO_RAW
config SERIO_XILINX_XPS_PS2
tristate "Xilinx XPS PS/2 Controller Support"
- depends on PPC
+ depends on PPC || MICROBLAZE
help
This driver supports XPS PS/2 IP from the Xilinx EDK on
PowerPC platform.
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index e29cdc13a19..89b394183a7 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -107,7 +107,7 @@ static void amba_kmi_close(struct serio *io)
clk_disable(kmi->clk);
}
-static int amba_kmi_probe(struct amba_device *dev, void *id)
+static int amba_kmi_probe(struct amba_device *dev, struct amba_id *id)
{
struct amba_kmi_port *kmi;
struct serio *io;
@@ -135,7 +135,7 @@ static int amba_kmi_probe(struct amba_device *dev, void *id)
io->dev.parent = &dev->dev;
kmi->io = io;
- kmi->base = ioremap(dev->res.start, KMI_SIZE);
+ kmi->base = ioremap(dev->res.start, resource_size(&dev->res));
if (!kmi->base) {
ret = -ENOMEM;
goto out;
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index 67248c31e19..be5bbbb8ae4 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -210,7 +210,7 @@ int ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command)
timeout = wait_event_timeout(ps2dev->wait,
!(ps2dev->flags & PS2_FLAG_CMD1), timeout);
- if (ps2dev->cmdcnt && timeout > 0) {
+ if (ps2dev->cmdcnt && !(ps2dev->flags & PS2_FLAG_CMD1)) {
timeout = ps2_adjust_timeout(ps2dev, command, timeout);
wait_event_timeout(ps2dev->wait,
diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c
index f100c7f4c1d..6954f550010 100644
--- a/drivers/input/touchscreen/ucb1400_ts.c
+++ b/drivers/input/touchscreen/ucb1400_ts.c
@@ -419,7 +419,7 @@ static int ucb1400_ts_remove(struct platform_device *dev)
#ifdef CONFIG_PM
static int ucb1400_ts_resume(struct platform_device *dev)
{
- struct ucb1400_ts *ucb = platform_get_drvdata(dev);
+ struct ucb1400_ts *ucb = dev->dev.platform_data;
if (ucb->ts_task) {
/*
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index 69af8385ab1..2957d48e004 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -569,7 +569,7 @@ static int wm97xx_probe(struct device *dev)
mutex_init(&wm->codec_mutex);
wm->dev = dev;
- dev->driver_data = wm;
+ dev_set_drvdata(dev, wm);
wm->ac97 = to_ac97_t(dev);
/* check that we have a supported codec */
diff --git a/drivers/input/xen-kbdfront.c b/drivers/input/xen-kbdfront.c
index 928d2ed8865..b115726dc08 100644
--- a/drivers/input/xen-kbdfront.c
+++ b/drivers/input/xen-kbdfront.c
@@ -114,7 +114,7 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev,
xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
return -ENOMEM;
}
- dev->dev.driver_data = info;
+ dev_set_drvdata(&dev->dev, info);
info->xbdev = dev;
info->irq = -1;
snprintf(info->phys, sizeof(info->phys), "xenbus/%s", dev->nodename);
@@ -186,7 +186,7 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev,
static int xenkbd_resume(struct xenbus_device *dev)
{
- struct xenkbd_info *info = dev->dev.driver_data;
+ struct xenkbd_info *info = dev_get_drvdata(&dev->dev);
xenkbd_disconnect_backend(info);
memset(info->page, 0, PAGE_SIZE);
@@ -195,7 +195,7 @@ static int xenkbd_resume(struct xenbus_device *dev)
static int xenkbd_remove(struct xenbus_device *dev)
{
- struct xenkbd_info *info = dev->dev.driver_data;
+ struct xenkbd_info *info = dev_get_drvdata(&dev->dev);
xenkbd_disconnect_backend(info);
if (info->kbd)
@@ -266,7 +266,7 @@ static void xenkbd_disconnect_backend(struct xenkbd_info *info)
static void xenkbd_backend_changed(struct xenbus_device *dev,
enum xenbus_state backend_state)
{
- struct xenkbd_info *info = dev->dev.driver_data;
+ struct xenkbd_info *info = dev_get_drvdata(&dev->dev);
int ret, val;
switch (backend_state) {
diff --git a/drivers/isdn/Kconfig b/drivers/isdn/Kconfig
index 3d113c6e4a7..02bdca6f95c 100644
--- a/drivers/isdn/Kconfig
+++ b/drivers/isdn/Kconfig
@@ -61,4 +61,6 @@ source "drivers/isdn/hardware/Kconfig"
endif # ISDN_CAPI
+source "drivers/isdn/gigaset/Kconfig"
+
endif # ISDN
diff --git a/drivers/isdn/capi/capiutil.c b/drivers/isdn/capi/capiutil.c
index 29419a8d31d..16f2e465e5f 100644
--- a/drivers/isdn/capi/capiutil.c
+++ b/drivers/isdn/capi/capiutil.c
@@ -490,7 +490,14 @@ static void pars_2_message(_cmsg * cmsg)
}
}
-/*-------------------------------------------------------*/
+/**
+ * capi_cmsg2message() - assemble CAPI 2.0 message from _cmsg structure
+ * @cmsg: _cmsg structure
+ * @msg: buffer for assembled message
+ *
+ * Return value: 0 for success
+ */
+
unsigned capi_cmsg2message(_cmsg * cmsg, u8 * msg)
{
cmsg->m = msg;
@@ -553,7 +560,14 @@ static void message_2_pars(_cmsg * cmsg)
}
}
-/*-------------------------------------------------------*/
+/**
+ * capi_message2cmsg() - disassemble CAPI 2.0 message into _cmsg structure
+ * @cmsg: _cmsg structure
+ * @msg: buffer for assembled message
+ *
+ * Return value: 0 for success
+ */
+
unsigned capi_message2cmsg(_cmsg * cmsg, u8 * msg)
{
memset(cmsg, 0, sizeof(_cmsg));
@@ -573,7 +587,18 @@ unsigned capi_message2cmsg(_cmsg * cmsg, u8 * msg)
return 0;
}
-/*-------------------------------------------------------*/
+/**
+ * capi_cmsg_header() - initialize header part of _cmsg structure
+ * @cmsg: _cmsg structure
+ * @_ApplId: ApplID field value
+ * @_Command: Command field value
+ * @_Subcommand: Subcommand field value
+ * @_Messagenumber: Message Number field value
+ * @_Controller: Controller/PLCI/NCCI field value
+ *
+ * Return value: 0 for success
+ */
+
unsigned capi_cmsg_header(_cmsg * cmsg, u16 _ApplId,
u8 _Command, u8 _Subcommand,
u16 _Messagenumber, u32 _Controller)
@@ -641,6 +666,14 @@ static char *mnames[] =
[0x4e] = "MANUFACTURER_RESP"
};
+/**
+ * capi_cmd2str() - convert CAPI 2.0 command/subcommand number to name
+ * @cmd: command number
+ * @subcmd: subcommand number
+ *
+ * Return value: static string, NULL if command/subcommand unknown
+ */
+
char *capi_cmd2str(u8 cmd, u8 subcmd)
{
return mnames[command_2_index(cmd, subcmd)];
@@ -879,6 +912,11 @@ init:
return cdb;
}
+/**
+ * cdebbuf_free() - free CAPI debug buffer
+ * @cdb: buffer to free
+ */
+
void cdebbuf_free(_cdebbuf *cdb)
{
if (likely(cdb == g_debbuf)) {
@@ -891,6 +929,16 @@ void cdebbuf_free(_cdebbuf *cdb)
}
+/**
+ * capi_message2str() - format CAPI 2.0 message for printing
+ * @msg: CAPI 2.0 message
+ *
+ * Allocates a CAPI debug buffer and fills it with a printable representation
+ * of the CAPI 2.0 message in @msg.
+ * Return value: allocated debug buffer, NULL on error
+ * The returned buffer should be freed by a call to cdebbuf_free() after use.
+ */
+
_cdebbuf *capi_message2str(u8 * msg)
{
_cdebbuf *cdb;
@@ -926,10 +974,23 @@ _cdebbuf *capi_message2str(u8 * msg)
return cdb;
}
+/**
+ * capi_cmsg2str() - format _cmsg structure for printing
+ * @cmsg: _cmsg structure
+ *
+ * Allocates a CAPI debug buffer and fills it with a printable representation
+ * of the CAPI 2.0 message stored in @cmsg by a previous call to
+ * capi_cmsg2message() or capi_message2cmsg().
+ * Return value: allocated debug buffer, NULL on error
+ * The returned buffer should be freed by a call to cdebbuf_free() after use.
+ */
+
_cdebbuf *capi_cmsg2str(_cmsg * cmsg)
{
_cdebbuf *cdb;
+ if (!cmsg->m)
+ return NULL; /* no message */
cdb = cdebbuf_alloc();
if (!cdb)
return NULL;
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index f33170368cd..57d26360f64 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -377,14 +377,14 @@ void capi_ctr_ready(struct capi_ctr * card)
EXPORT_SYMBOL(capi_ctr_ready);
/**
- * capi_ctr_reseted() - signal CAPI controller reset
+ * capi_ctr_down() - signal CAPI controller not ready
* @card: controller descriptor structure.
*
* Called by hardware driver to signal that the controller is down and
* unavailable for use.
*/
-void capi_ctr_reseted(struct capi_ctr * card)
+void capi_ctr_down(struct capi_ctr * card)
{
u16 appl;
@@ -413,7 +413,7 @@ void capi_ctr_reseted(struct capi_ctr * card)
notify_push(KCI_CONTRDOWN, card->cnr, 0, 0);
}
-EXPORT_SYMBOL(capi_ctr_reseted);
+EXPORT_SYMBOL(capi_ctr_down);
/**
* capi_ctr_suspend_output() - suspend controller
@@ -517,7 +517,7 @@ EXPORT_SYMBOL(attach_capi_ctr);
int detach_capi_ctr(struct capi_ctr *card)
{
if (card->cardstate != CARD_DETECTED)
- capi_ctr_reseted(card);
+ capi_ctr_down(card);
ncards--;
diff --git a/drivers/isdn/divert/isdn_divert.c b/drivers/isdn/divert/isdn_divert.c
index 7d97d54588d..77e9fdda059 100644
--- a/drivers/isdn/divert/isdn_divert.c
+++ b/drivers/isdn/divert/isdn_divert.c
@@ -183,7 +183,7 @@ int cf_command(int drvid, int mode,
(mode != 1) ? "" : " 0 ",
(mode != 1) ? "" : fwd_nr);
- retval = divert_if.ll_cmd(&cs->ics); /* excute command */
+ retval = divert_if.ll_cmd(&cs->ics); /* execute command */
if (!retval)
{ cs->prev = NULL;
diff --git a/drivers/isdn/gigaset/Kconfig b/drivers/isdn/gigaset/Kconfig
index 9ca889adf12..18ab8652aa5 100644
--- a/drivers/isdn/gigaset/Kconfig
+++ b/drivers/isdn/gigaset/Kconfig
@@ -1,5 +1,6 @@
menuconfig ISDN_DRV_GIGASET
tristate "Siemens Gigaset support"
+ depends on ISDN_I4L
select CRC_CCITT
select BITREVERSE
help
@@ -42,11 +43,4 @@ config GIGASET_DEBUG
This enables debugging code in the Gigaset drivers.
If in doubt, say yes.
-config GIGASET_UNDOCREQ
- bool "Support for undocumented USB requests"
- help
- This enables support for USB requests we only know from
- reverse engineering (currently M105 only). If you need
- features like configuration mode of M105, say yes.
-
endif # ISDN_DRV_GIGASET
diff --git a/drivers/isdn/gigaset/asyncdata.c b/drivers/isdn/gigaset/asyncdata.c
index 2a4ce96f04b..234cc5d5331 100644
--- a/drivers/isdn/gigaset/asyncdata.c
+++ b/drivers/isdn/gigaset/asyncdata.c
@@ -174,9 +174,8 @@ byte_stuff:
if (unlikely(fcs != PPP_GOODFCS)) {
dev_err(cs->dev,
- "Packet checksum at %lu failed, "
- "packet is corrupted (%u bytes)!\n",
- bcs->rcvbytes, skb->len);
+ "Checksum failed, %u bytes corrupted!\n",
+ skb->len);
compskb = NULL;
gigaset_rcv_error(compskb, cs, bcs);
error = 1;
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
index 0048ce98bfa..e4141bf8b2f 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/isdn/gigaset/common.c
@@ -565,8 +565,6 @@ static struct bc_state *gigaset_initbcs(struct bc_state *bcs,
gig_dbg(DEBUG_INIT, "setting up bcs[%d]->at_state", channel);
gigaset_at_init(&bcs->at_state, bcs, cs, -1);
- bcs->rcvbytes = 0;
-
#ifdef CONFIG_GIGASET_DEBUG
bcs->emptycount = 0;
#endif
@@ -672,14 +670,8 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
cs->tty = NULL;
cs->tty_dev = NULL;
cs->cidmode = cidmode != 0;
-
- //if(onechannel) { //FIXME
- cs->tabnocid = gigaset_tab_nocid_m10x;
- cs->tabcid = gigaset_tab_cid_m10x;
- //} else {
- // cs->tabnocid = gigaset_tab_nocid;
- // cs->tabcid = gigaset_tab_cid;
- //}
+ cs->tabnocid = gigaset_tab_nocid;
+ cs->tabcid = gigaset_tab_cid;
init_waitqueue_head(&cs->waitqueue);
cs->waiting = 0;
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
index e582a4887bc..ec5169604a6 100644
--- a/drivers/isdn/gigaset/ev-layer.c
+++ b/drivers/isdn/gigaset/ev-layer.c
@@ -160,7 +160,7 @@
// 100: init, 200: dle0, 250:dle1, 300: get cid (dial), 350: "hup" (no cid), 400: hup, 500: reset, 600: dial, 700: ring
-struct reply_t gigaset_tab_nocid_m10x[]= /* with dle mode */
+struct reply_t gigaset_tab_nocid[] =
{
/* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout, action, command */
@@ -280,7 +280,7 @@ struct reply_t gigaset_tab_nocid_m10x[]= /* with dle mode */
};
// 600: start dialing, 650: dial in progress, 800: connection is up, 700: ring, 400: hup, 750: accepted icall
-struct reply_t gigaset_tab_cid_m10x[] = /* for M10x */
+struct reply_t gigaset_tab_cid[] =
{
/* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout, action, command */
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
index 747178f03d2..a2f6125739e 100644
--- a/drivers/isdn/gigaset/gigaset.h
+++ b/drivers/isdn/gigaset/gigaset.h
@@ -282,8 +282,8 @@ struct reply_t {
char *command; /* NULL==none */
};
-extern struct reply_t gigaset_tab_cid_m10x[];
-extern struct reply_t gigaset_tab_nocid_m10x[];
+extern struct reply_t gigaset_tab_cid[];
+extern struct reply_t gigaset_tab_nocid[];
struct inbuf_t {
unsigned char *rcvbuf; /* usb-gigaset receive buffer */
@@ -384,7 +384,6 @@ struct bc_state {
int trans_up; /* Counter of packages (upstream) */
struct at_state_t at_state;
- unsigned long rcvbytes;
__u16 fcs;
struct sk_buff *skb;
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c
index 69a702f0db9..9b22f9cf2f3 100644
--- a/drivers/isdn/gigaset/i4l.c
+++ b/drivers/isdn/gigaset/i4l.c
@@ -544,11 +544,11 @@ int gigaset_register_to_LL(struct cardstate *cs, const char *isdnid)
gig_dbg(DEBUG_ANY, "Register driver capabilities to LL");
- //iif->id[sizeof(iif->id) - 1]=0;
- //strncpy(iif->id, isdnid, sizeof(iif->id) - 1);
if (snprintf(iif->id, sizeof iif->id, "%s_%u", isdnid, cs->minor_index)
- >= sizeof iif->id)
- return -ENOMEM; //FIXME EINVAL/...??
+ >= sizeof iif->id) {
+ pr_err("ID too long: %s\n", isdnid);
+ return 0;
+ }
iif->owner = THIS_MODULE;
iif->channels = cs->channels;
@@ -568,8 +568,10 @@ int gigaset_register_to_LL(struct cardstate *cs, const char *isdnid)
iif->rcvcallb_skb = NULL; /* Will be set by LL */
iif->statcallb = NULL; /* Will be set by LL */
- if (!register_isdn(iif))
+ if (!register_isdn(iif)) {
+ pr_err("register_isdn failed\n");
return 0;
+ }
cs->myid = iif->channels; /* Set my device id */
return 1;
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
index 820a30923fe..1ebfcab7466 100644
--- a/drivers/isdn/gigaset/interface.c
+++ b/drivers/isdn/gigaset/interface.c
@@ -599,8 +599,7 @@ void gigaset_if_init(struct cardstate *cs)
if (!IS_ERR(cs->tty_dev))
dev_set_drvdata(cs->tty_dev, cs);
else {
- dev_warn(cs->dev,
- "could not register device to the tty subsystem\n");
+ pr_warning("could not register device to the tty subsystem\n");
cs->tty_dev = NULL;
}
mutex_unlock(&cs->mutex);
diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c
index b171e75cb52..db3a1e4cd48 100644
--- a/drivers/isdn/gigaset/isocdata.c
+++ b/drivers/isdn/gigaset/isocdata.c
@@ -175,7 +175,7 @@ int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size)
return -EINVAL;
}
src = iwb->read;
- if (unlikely(limit > BAS_OUTBUFSIZE + BAS_OUTBUFPAD ||
+ if (unlikely(limit >= BAS_OUTBUFSIZE + BAS_OUTBUFPAD ||
(read < src && limit >= src))) {
pr_err("isoc write buffer frame reservation violated\n");
return -EFAULT;
@@ -246,6 +246,10 @@ static inline void dump_bytes(enum debuglevel level, const char *tag,
unsigned char c;
static char dbgline[3 * 32 + 1];
int i = 0;
+
+ if (!(gigaset_debuglevel & level))
+ return;
+
while (count-- > 0) {
if (i > sizeof(dbgline) - 4) {
dbgline[i] = '\0';
diff --git a/drivers/isdn/gigaset/proc.c b/drivers/isdn/gigaset/proc.c
index da6f3acf9fd..9715aad9c3f 100644
--- a/drivers/isdn/gigaset/proc.c
+++ b/drivers/isdn/gigaset/proc.c
@@ -79,5 +79,5 @@ void gigaset_init_dev_sysfs(struct cardstate *cs)
gig_dbg(DEBUG_INIT, "setting up sysfs");
if (device_create_file(cs->tty_dev, &dev_attr_cidmode))
- dev_err(cs->dev, "could not create sysfs attribute\n");
+ pr_err("could not create sysfs attribute\n");
}
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index d7838516609..4deb1ab0dbf 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -153,8 +153,6 @@ static inline unsigned tiocm_to_gigaset(unsigned state)
return ((state & TIOCM_DTR) ? 1 : 0) | ((state & TIOCM_RTS) ? 2 : 0);
}
-#ifdef CONFIG_GIGASET_UNDOCREQ
-/* WARNING: EXPERIMENTAL! */
static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
unsigned new_state)
{
@@ -176,6 +174,11 @@ static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
return 0;
}
+/*
+ * Set M105 configuration value
+ * using undocumented device commands reverse engineered from USB traces
+ * of the Siemens Windows driver
+ */
static int set_value(struct cardstate *cs, u8 req, u16 val)
{
struct usb_device *udev = cs->hw.usb->udev;
@@ -205,8 +208,10 @@ static int set_value(struct cardstate *cs, u8 req, u16 val)
return r < 0 ? r : (r2 < 0 ? r2 : 0);
}
-/* WARNING: HIGHLY EXPERIMENTAL! */
-// don't use this in an interrupt/BH
+/*
+ * set the baud rate on the internal serial adapter
+ * using the undocumented parameter setting command
+ */
static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag)
{
u16 val;
@@ -237,8 +242,10 @@ static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag)
return set_value(cs, 1, val);
}
-/* WARNING: HIGHLY EXPERIMENTAL! */
-// don't use this in an interrupt/BH
+/*
+ * set the line format on the internal serial adapter
+ * using the undocumented parameter setting command
+ */
static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
{
u16 val = 0;
@@ -274,24 +281,6 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
return set_value(cs, 3, val);
}
-#else
-static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
- unsigned new_state)
-{
- return -ENOTTY;
-}
-
-static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
-{
- return -ENOTTY;
-}
-
-static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag)
-{
- return -ENOTTY;
-}
-#endif
-
/*================================================================================================================*/
static int gigaset_init_bchannel(struct bc_state *bcs)
@@ -362,10 +351,8 @@ static void gigaset_modem_fill(unsigned long data)
} while (again);
}
-/**
- * gigaset_read_int_callback
- *
- * It is called if the data was received from the device.
+/*
+ * Interrupt Input URB completion routine
*/
static void gigaset_read_int_callback(struct urb *urb)
{
@@ -567,18 +554,19 @@ static int gigaset_chars_in_buffer(struct cardstate *cs)
return cs->cmdbytes;
}
+/*
+ * set the break characters on the internal serial adapter
+ * using undocumented device commands reverse engineered from USB traces
+ * of the Siemens Windows driver
+ */
static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
{
-#ifdef CONFIG_GIGASET_UNDOCREQ
struct usb_device *udev = cs->hw.usb->udev;
gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
memcpy(cs->hw.usb->bchars, buf, 6);
return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
0, 0, &buf, 6, 2000);
-#else
- return -ENOTTY;
-#endif
}
static int gigaset_freebcshw(struct bc_state *bcs)
@@ -625,7 +613,6 @@ static int gigaset_initcshw(struct cardstate *cs)
ucs->bchars[5] = 0x13;
ucs->bulk_out_buffer = NULL;
ucs->bulk_out_urb = NULL;
- //ucs->urb_cmd_out = NULL;
ucs->read_urb = NULL;
tasklet_init(&cs->write_tasklet,
&gigaset_modem_fill, (unsigned long) cs);
@@ -742,7 +729,7 @@ static int gigaset_probe(struct usb_interface *interface,
cs->dev = &interface->dev;
/* save address of controller structure */
- usb_set_intfdata(interface, cs); // dev_set_drvdata(&interface->dev, cs);
+ usb_set_intfdata(interface, cs);
endpoint = &hostif->endpoint[0].desc;
@@ -921,8 +908,7 @@ static const struct gigaset_ops ops = {
gigaset_m10x_input,
};
-/**
- * usb_gigaset_init
+/*
* This function is called while kernel-module is loaded
*/
static int __init usb_gigaset_init(void)
@@ -952,9 +938,7 @@ error:
return -1;
}
-
-/**
- * usb_gigaset_exit
+/*
* This function is called while unloading the kernel-module
*/
static void __exit usb_gigaset_exit(void)
diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
index abf05ec3176..a7c0083e78a 100644
--- a/drivers/isdn/hardware/avm/b1.c
+++ b/drivers/isdn/hardware/avm/b1.c
@@ -330,7 +330,7 @@ void b1_reset_ctr(struct capi_ctr *ctrl)
spin_lock_irqsave(&card->lock, flags);
capilib_release(&cinfo->ncci_head);
spin_unlock_irqrestore(&card->lock, flags);
- capi_ctr_reseted(ctrl);
+ capi_ctr_down(ctrl);
}
void b1_register_appl(struct capi_ctr *ctrl,
diff --git a/drivers/isdn/hardware/avm/b1dma.c b/drivers/isdn/hardware/avm/b1dma.c
index da34b98e3de..0e84aaae43f 100644
--- a/drivers/isdn/hardware/avm/b1dma.c
+++ b/drivers/isdn/hardware/avm/b1dma.c
@@ -759,7 +759,7 @@ void b1dma_reset_ctr(struct capi_ctr *ctrl)
memset(cinfo->version, 0, sizeof(cinfo->version));
capilib_release(&cinfo->ncci_head);
spin_unlock_irqrestore(&card->lock, flags);
- capi_ctr_reseted(ctrl);
+ capi_ctr_down(ctrl);
}
/* ------------------------------------------------------------- */
diff --git a/drivers/isdn/hardware/avm/c4.c b/drivers/isdn/hardware/avm/c4.c
index 9df1d3f66c8..6833301a45f 100644
--- a/drivers/isdn/hardware/avm/c4.c
+++ b/drivers/isdn/hardware/avm/c4.c
@@ -681,7 +681,7 @@ static irqreturn_t c4_handle_interrupt(avmcard *card)
spin_lock_irqsave(&card->lock, flags);
capilib_release(&cinfo->ncci_head);
spin_unlock_irqrestore(&card->lock, flags);
- capi_ctr_reseted(&cinfo->capi_ctrl);
+ capi_ctr_down(&cinfo->capi_ctrl);
}
card->nlogcontr = 0;
return IRQ_HANDLED;
@@ -909,7 +909,7 @@ static void c4_reset_ctr(struct capi_ctr *ctrl)
for (i=0; i < card->nr_controllers; i++) {
cinfo = &card->ctrlinfo[i];
memset(cinfo->version, 0, sizeof(cinfo->version));
- capi_ctr_reseted(&cinfo->capi_ctrl);
+ capi_ctr_down(&cinfo->capi_ctrl);
}
card->nlogcontr = 0;
}
diff --git a/drivers/isdn/hardware/avm/t1isa.c b/drivers/isdn/hardware/avm/t1isa.c
index e7724493738..1c53fd49adb 100644
--- a/drivers/isdn/hardware/avm/t1isa.c
+++ b/drivers/isdn/hardware/avm/t1isa.c
@@ -339,7 +339,7 @@ static void t1isa_reset_ctr(struct capi_ctr *ctrl)
spin_lock_irqsave(&card->lock, flags);
capilib_release(&cinfo->ncci_head);
spin_unlock_irqrestore(&card->lock, flags);
- capi_ctr_reseted(ctrl);
+ capi_ctr_down(ctrl);
}
static void t1isa_remove(struct pci_dev *pdev)
diff --git a/drivers/isdn/hardware/mISDN/Kconfig b/drivers/isdn/hardware/mISDN/Kconfig
index fd112ae252c..3024566dd09 100644
--- a/drivers/isdn/hardware/mISDN/Kconfig
+++ b/drivers/isdn/hardware/mISDN/Kconfig
@@ -13,7 +13,7 @@ config MISDN_HFCPCI
config MISDN_HFCMULTI
tristate "Support for HFC multiport cards (HFC-4S/8S/E1)"
- depends on PCI
+ depends on PCI || 8xx
depends on MISDN
help
Enable support for cards with Cologne Chip AG's HFC multiport
@@ -23,6 +23,15 @@ config MISDN_HFCMULTI
* HFC-8S (8 S/T interfaces on one chip)
* HFC-E1 (E1 interface for 2Mbit ISDN)
+config MISDN_HFCMULTI_8xx
+ boolean "Support for XHFC embedded board in HFC multiport driver"
+ depends on MISDN
+ depends on MISDN_HFCMULTI
+ depends on 8xx
+ default 8xx
+ help
+ Enable support for the XHFC embedded solution from Speech Design.
+
config MISDN_HFCUSB
tristate "Support for HFC-S USB based TAs"
depends on USB
diff --git a/drivers/isdn/hardware/mISDN/hfc_multi.h b/drivers/isdn/hardware/mISDN/hfc_multi.h
index 663b77f578b..0c773866efc 100644
--- a/drivers/isdn/hardware/mISDN/hfc_multi.h
+++ b/drivers/isdn/hardware/mISDN/hfc_multi.h
@@ -17,6 +17,16 @@
#define PCI_ENA_REGIO 0x01
#define PCI_ENA_MEMIO 0x02
+#define XHFC_IRQ 4 /* SIU_IRQ2 */
+#define XHFC_MEMBASE 0xFE000000
+#define XHFC_MEMSIZE 0x00001000
+#define XHFC_OFFSET 0x00001000
+#define PA_XHFC_A0 0x0020 /* PA10 */
+#define PB_XHFC_IRQ1 0x00000100 /* PB23 */
+#define PB_XHFC_IRQ2 0x00000200 /* PB22 */
+#define PB_XHFC_IRQ3 0x00000400 /* PB21 */
+#define PB_XHFC_IRQ4 0x00000800 /* PB20 */
+
/*
* NOTE: some registers are assigned multiple times due to different modes
* also registers are assigned differen for HFC-4s/8s and HFC-E1
@@ -44,6 +54,7 @@ struct hfc_chan {
int conf; /* conference setting of TX slot */
int txpending; /* if there is currently data in */
/* the FIFO 0=no, 1=yes, 2=splloop */
+ int Zfill; /* rx-fifo level on last hfcmulti_tx */
int rx_off; /* set to turn fifo receive off */
int coeff_count; /* curren coeff block */
s32 *coeff; /* memory pointer to 8 coeff blocks */
@@ -62,6 +73,7 @@ struct hfcm_hw {
u_char r_sci_msk;
u_char r_tx0, r_tx1;
u_char a_st_ctrl0[8];
+ u_char r_bert_wd_md;
timer_t timer;
};
@@ -79,6 +91,11 @@ struct hfcm_hw {
#define HFC_CFG_CRC4 10 /* disable CRC-4 Multiframe mode, */
/* use double frame instead. */
+#define HFC_TYPE_E1 1 /* controller is HFC-E1 */
+#define HFC_TYPE_4S 4 /* controller is HFC-4S */
+#define HFC_TYPE_8S 8 /* controller is HFC-8S */
+#define HFC_TYPE_XHFC 5 /* controller is XHFC */
+
#define HFC_CHIP_EXRAM_128 0 /* external ram 128k */
#define HFC_CHIP_EXRAM_512 1 /* external ram 256k */
#define HFC_CHIP_REVISION0 2 /* old fifo handling */
@@ -86,19 +103,22 @@ struct hfcm_hw {
#define HFC_CHIP_PCM_MASTER 4 /* PCM is master */
#define HFC_CHIP_RX_SYNC 5 /* disable pll sync for pcm */
#define HFC_CHIP_DTMF 6 /* DTMF decoding is enabled */
-#define HFC_CHIP_ULAW 7 /* ULAW mode */
-#define HFC_CHIP_CLOCK2 8 /* double clock mode */
-#define HFC_CHIP_E1CLOCK_GET 9 /* always get clock from E1 interface */
-#define HFC_CHIP_E1CLOCK_PUT 10 /* always put clock from E1 interface */
-#define HFC_CHIP_WATCHDOG 11 /* whether we should send signals */
+#define HFC_CHIP_CONF 7 /* conference handling is enabled */
+#define HFC_CHIP_ULAW 8 /* ULAW mode */
+#define HFC_CHIP_CLOCK2 9 /* double clock mode */
+#define HFC_CHIP_E1CLOCK_GET 10 /* always get clock from E1 interface */
+#define HFC_CHIP_E1CLOCK_PUT 11 /* always put clock from E1 interface */
+#define HFC_CHIP_WATCHDOG 12 /* whether we should send signals */
/* to the watchdog */
-#define HFC_CHIP_B410P 12 /* whether we have a b410p with echocan in */
+#define HFC_CHIP_B410P 13 /* whether we have a b410p with echocan in */
/* hw */
-#define HFC_CHIP_PLXSD 13 /* whether we have a Speech-Design PLX */
+#define HFC_CHIP_PLXSD 14 /* whether we have a Speech-Design PLX */
+#define HFC_CHIP_EMBSD 15 /* whether we have a SD Embedded board */
#define HFC_IO_MODE_PCIMEM 0x00 /* normal memory mapped IO */
#define HFC_IO_MODE_REGIO 0x01 /* PCI io access */
#define HFC_IO_MODE_PLXSD 0x02 /* access HFC via PLX9030 */
+#define HFC_IO_MODE_EMBSD 0x03 /* direct access */
/* table entry in the PCI devices list */
struct hm_map {
@@ -111,6 +131,7 @@ struct hm_map {
int opticalsupport;
int dip_type;
int io_mode;
+ int irq;
};
struct hfc_multi {
@@ -118,7 +139,7 @@ struct hfc_multi {
struct hm_map *mtyp;
int id;
int pcm; /* id of pcm bus */
- int type;
+ int ctype; /* controller type */
int ports;
u_int irq; /* irq used by card */
@@ -158,10 +179,16 @@ struct hfc_multi {
int len);
void (*write_fifo)(struct hfc_multi *hc, u_char *data,
int len);
- u_long pci_origmembase, plx_origmembase, dsp_origmembase;
+ u_long pci_origmembase, plx_origmembase;
void __iomem *pci_membase; /* PCI memory */
void __iomem *plx_membase; /* PLX memory */
- u_char *dsp_membase; /* DSP on PLX */
+ u_long xhfc_origmembase;
+ u_char *xhfc_membase;
+ u_long *xhfc_memaddr, *xhfc_memdata;
+#ifdef CONFIG_MISDN_HFCMULTI_8xx
+ struct immap *immap;
+#endif
+ u_long pb_irqmsk; /* Portbit mask to check the IRQ line */
u_long pci_iobase; /* PCI IO */
struct hfcm_hw hw; /* remember data of write-only-registers */
diff --git a/drivers/isdn/hardware/mISDN/hfc_multi_8xx.h b/drivers/isdn/hardware/mISDN/hfc_multi_8xx.h
new file mode 100644
index 00000000000..45ddced956d
--- /dev/null
+++ b/drivers/isdn/hardware/mISDN/hfc_multi_8xx.h
@@ -0,0 +1,167 @@
+/*
+ * For License see notice in hfc_multi.c
+ *
+ * special IO and init functions for the embedded XHFC board
+ * from Speech Design
+ *
+ */
+
+#include <asm/8xx_immap.h>
+
+/* Change this to the value used by your board */
+#ifndef IMAP_ADDR
+#define IMAP_ADDR 0xFFF00000
+#endif
+
+static void
+#ifdef HFC_REGISTER_DEBUG
+HFC_outb_embsd(struct hfc_multi *hc, u_char reg, u_char val,
+ const char *function, int line)
+#else
+HFC_outb_embsd(struct hfc_multi *hc, u_char reg, u_char val)
+#endif
+{
+ hc->immap->im_ioport.iop_padat |= PA_XHFC_A0;
+ writeb(reg, hc->xhfc_memaddr);
+ hc->immap->im_ioport.iop_padat &= ~(PA_XHFC_A0);
+ writeb(val, hc->xhfc_memdata);
+}
+static u_char
+#ifdef HFC_REGISTER_DEBUG
+HFC_inb_embsd(struct hfc_multi *hc, u_char reg, const char *function, int line)
+#else
+HFC_inb_embsd(struct hfc_multi *hc, u_char reg)
+#endif
+{
+ hc->immap->im_ioport.iop_padat |= PA_XHFC_A0;
+ writeb(reg, hc->xhfc_memaddr);
+ hc->immap->im_ioport.iop_padat &= ~(PA_XHFC_A0);
+ return readb(hc->xhfc_memdata);
+}
+static u_short
+#ifdef HFC_REGISTER_DEBUG
+HFC_inw_embsd(struct hfc_multi *hc, u_char reg, const char *function, int line)
+#else
+HFC_inw_embsd(struct hfc_multi *hc, u_char reg)
+#endif
+{
+ hc->immap->im_ioport.iop_padat |= PA_XHFC_A0;
+ writeb(reg, hc->xhfc_memaddr);
+ hc->immap->im_ioport.iop_padat &= ~(PA_XHFC_A0);
+ return readb(hc->xhfc_memdata);
+}
+static void
+#ifdef HFC_REGISTER_DEBUG
+HFC_wait_embsd(struct hfc_multi *hc, const char *function, int line)
+#else
+HFC_wait_embsd(struct hfc_multi *hc)
+#endif
+{
+ hc->immap->im_ioport.iop_padat |= PA_XHFC_A0;
+ writeb(R_STATUS, hc->xhfc_memaddr);
+ hc->immap->im_ioport.iop_padat &= ~(PA_XHFC_A0);
+ while (readb(hc->xhfc_memdata) & V_BUSY)
+ cpu_relax();
+}
+
+/* write fifo data (EMBSD) */
+void
+write_fifo_embsd(struct hfc_multi *hc, u_char *data, int len)
+{
+ hc->immap->im_ioport.iop_padat |= PA_XHFC_A0;
+ *hc->xhfc_memaddr = A_FIFO_DATA0;
+ hc->immap->im_ioport.iop_padat &= ~(PA_XHFC_A0);
+ while (len) {
+ *hc->xhfc_memdata = *data;
+ data++;
+ len--;
+ }
+}
+
+/* read fifo data (EMBSD) */
+void
+read_fifo_embsd(struct hfc_multi *hc, u_char *data, int len)
+{
+ hc->immap->im_ioport.iop_padat |= PA_XHFC_A0;
+ *hc->xhfc_memaddr = A_FIFO_DATA0;
+ hc->immap->im_ioport.iop_padat &= ~(PA_XHFC_A0);
+ while (len) {
+ *data = (u_char)(*hc->xhfc_memdata);
+ data++;
+ len--;
+ }
+}
+
+static int
+setup_embedded(struct hfc_multi *hc, struct hm_map *m)
+{
+ printk(KERN_INFO
+ "HFC-multi: card manufacturer: '%s' card name: '%s' clock: %s\n",
+ m->vendor_name, m->card_name, m->clock2 ? "double" : "normal");
+
+ hc->pci_dev = NULL;
+ if (m->clock2)
+ test_and_set_bit(HFC_CHIP_CLOCK2, &hc->chip);
+
+ hc->leds = m->leds;
+ hc->ledstate = 0xAFFEAFFE;
+ hc->opticalsupport = m->opticalsupport;
+
+ hc->pci_iobase = 0;
+ hc->pci_membase = 0;
+ hc->xhfc_membase = NULL;
+ hc->xhfc_memaddr = NULL;
+ hc->xhfc_memdata = NULL;
+
+ /* set memory access methods */
+ if (m->io_mode) /* use mode from card config */
+ hc->io_mode = m->io_mode;
+ switch (hc->io_mode) {
+ case HFC_IO_MODE_EMBSD:
+ test_and_set_bit(HFC_CHIP_EMBSD, &hc->chip);
+ hc->slots = 128; /* required */
+ /* fall through */
+ hc->HFC_outb = HFC_outb_embsd;
+ hc->HFC_inb = HFC_inb_embsd;
+ hc->HFC_inw = HFC_inw_embsd;
+ hc->HFC_wait = HFC_wait_embsd;
+ hc->read_fifo = read_fifo_embsd;
+ hc->write_fifo = write_fifo_embsd;
+ hc->xhfc_origmembase = XHFC_MEMBASE + XHFC_OFFSET * hc->id;
+ hc->xhfc_membase = (u_char *)ioremap(hc->xhfc_origmembase,
+ XHFC_MEMSIZE);
+ if (!hc->xhfc_membase) {
+ printk(KERN_WARNING
+ "HFC-multi: failed to remap xhfc address space. "
+ "(internal error)\n");
+ return -EIO;
+ }
+ hc->xhfc_memaddr = (u_long *)(hc->xhfc_membase + 4);
+ hc->xhfc_memdata = (u_long *)(hc->xhfc_membase);
+ printk(KERN_INFO
+ "HFC-multi: xhfc_membase:%#lx xhfc_origmembase:%#lx "
+ "xhfc_memaddr:%#lx xhfc_memdata:%#lx\n",
+ (u_long)hc->xhfc_membase, hc->xhfc_origmembase,
+ (u_long)hc->xhfc_memaddr, (u_long)hc->xhfc_memdata);
+ break;
+ default:
+ printk(KERN_WARNING "HFC-multi: Invalid IO mode.\n");
+ return -EIO;
+ }
+
+ /* Prepare the MPC8XX PortA 10 as output (address/data selector) */
+ hc->immap = (struct immap *)(IMAP_ADDR);
+ hc->immap->im_ioport.iop_papar &= ~(PA_XHFC_A0);
+ hc->immap->im_ioport.iop_paodr &= ~(PA_XHFC_A0);
+ hc->immap->im_ioport.iop_padir |= PA_XHFC_A0;
+
+ /* Prepare the MPC8xx PortB __X__ as input (ISDN__X__IRQ) */
+ hc->pb_irqmsk = (PB_XHFC_IRQ1 << hc->id);
+ hc->immap->im_cpm.cp_pbpar &= ~(hc->pb_irqmsk);
+ hc->immap->im_cpm.cp_pbodr &= ~(hc->pb_irqmsk);
+ hc->immap->im_cpm.cp_pbdir &= ~(hc->pb_irqmsk);
+
+ /* At this point the needed config is done */
+ /* fifos are still not enabled */
+ return 0;
+}
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index 0b28141e43b..e1dab30aed3 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -104,7 +104,7 @@
* If unsure, don't give this parameter.
*
* dslot:
- * NOTE: only one poll value must be given for every card.
+ * NOTE: only one dslot value must be given for every card.
* Also this value must be given for non-E1 cards. If omitted, the E1
* card has D-channel on time slot 16, which is default.
* If 1..15 or 17..31, an alternate time slot is used for D-channel.
@@ -139,6 +139,10 @@
* Selects interface with clock source for mISDN and applications.
* Set to card number starting with 1. Set to -1 to disable.
* By default, the first card is used as clock source.
+ *
+ * hwid:
+ * NOTE: only one hwid value must be given once
+ * Enable special embedded devices with XHFC controllers.
*/
/*
@@ -206,6 +210,11 @@ static int clock;
static uint timer;
static uint clockdelay_te = CLKDEL_TE;
static uint clockdelay_nt = CLKDEL_NT;
+#define HWID_NONE 0
+#define HWID_MINIP4 1
+#define HWID_MINIP8 2
+#define HWID_MINIP16 3
+static uint hwid = HWID_NONE;
static int HFC_cnt, Port_cnt, PCM_cnt = 99;
@@ -223,6 +232,7 @@ module_param_array(pcm, int, NULL, S_IRUGO | S_IWUSR);
module_param_array(dslot, int, NULL, S_IRUGO | S_IWUSR);
module_param_array(iomode, uint, NULL, S_IRUGO | S_IWUSR);
module_param_array(port, uint, NULL, S_IRUGO | S_IWUSR);
+module_param(hwid, uint, S_IRUGO | S_IWUSR); /* The hardware ID */
#ifdef HFC_REGISTER_DEBUG
#define HFC_outb(hc, reg, val) \
@@ -252,6 +262,10 @@ module_param_array(port, uint, NULL, S_IRUGO | S_IWUSR);
#define HFC_wait_nodebug(hc) (hc->HFC_wait_nodebug(hc))
#endif
+#ifdef CONFIG_MISDN_HFCMULTI_8xx
+#include "hfc_multi_8xx.h"
+#endif
+
/* HFC_IO_MODE_PCIMEM */
static void
#ifdef HFC_REGISTER_DEBUG
@@ -261,7 +275,7 @@ HFC_outb_pcimem(struct hfc_multi *hc, u_char reg, u_char val,
HFC_outb_pcimem(struct hfc_multi *hc, u_char reg, u_char val)
#endif
{
- writeb(val, (hc->pci_membase)+reg);
+ writeb(val, hc->pci_membase + reg);
}
static u_char
#ifdef HFC_REGISTER_DEBUG
@@ -270,7 +284,7 @@ HFC_inb_pcimem(struct hfc_multi *hc, u_char reg, const char *function, int line)
HFC_inb_pcimem(struct hfc_multi *hc, u_char reg)
#endif
{
- return readb((hc->pci_membase)+reg);
+ return readb(hc->pci_membase + reg);
}
static u_short
#ifdef HFC_REGISTER_DEBUG
@@ -279,7 +293,7 @@ HFC_inw_pcimem(struct hfc_multi *hc, u_char reg, const char *function, int line)
HFC_inw_pcimem(struct hfc_multi *hc, u_char reg)
#endif
{
- return readw((hc->pci_membase)+reg);
+ return readw(hc->pci_membase + reg);
}
static void
#ifdef HFC_REGISTER_DEBUG
@@ -288,7 +302,8 @@ HFC_wait_pcimem(struct hfc_multi *hc, const char *function, int line)
HFC_wait_pcimem(struct hfc_multi *hc)
#endif
{
- while (readb((hc->pci_membase)+R_STATUS) & V_BUSY);
+ while (readb(hc->pci_membase + R_STATUS) & V_BUSY)
+ cpu_relax();
}
/* HFC_IO_MODE_REGIO */
@@ -300,7 +315,7 @@ HFC_outb_regio(struct hfc_multi *hc, u_char reg, u_char val,
HFC_outb_regio(struct hfc_multi *hc, u_char reg, u_char val)
#endif
{
- outb(reg, (hc->pci_iobase)+4);
+ outb(reg, hc->pci_iobase + 4);
outb(val, hc->pci_iobase);
}
static u_char
@@ -310,7 +325,7 @@ HFC_inb_regio(struct hfc_multi *hc, u_char reg, const char *function, int line)
HFC_inb_regio(struct hfc_multi *hc, u_char reg)
#endif
{
- outb(reg, (hc->pci_iobase)+4);
+ outb(reg, hc->pci_iobase + 4);
return inb(hc->pci_iobase);
}
static u_short
@@ -320,7 +335,7 @@ HFC_inw_regio(struct hfc_multi *hc, u_char reg, const char *function, int line)
HFC_inw_regio(struct hfc_multi *hc, u_char reg)
#endif
{
- outb(reg, (hc->pci_iobase)+4);
+ outb(reg, hc->pci_iobase + 4);
return inw(hc->pci_iobase);
}
static void
@@ -330,8 +345,9 @@ HFC_wait_regio(struct hfc_multi *hc, const char *function, int line)
HFC_wait_regio(struct hfc_multi *hc)
#endif
{
- outb(R_STATUS, (hc->pci_iobase)+4);
- while (inb(hc->pci_iobase) & V_BUSY);
+ outb(R_STATUS, hc->pci_iobase + 4);
+ while (inb(hc->pci_iobase) & V_BUSY)
+ cpu_relax();
}
#ifdef HFC_REGISTER_DEBUG
@@ -350,14 +366,14 @@ HFC_outb_debug(struct hfc_multi *hc, u_char reg, u_char val,
if (regname[0] == '\0')
strcpy(regname, "register");
- bits[7] = '0'+(!!(val&1));
- bits[6] = '0'+(!!(val&2));
- bits[5] = '0'+(!!(val&4));
- bits[4] = '0'+(!!(val&8));
- bits[3] = '0'+(!!(val&16));
- bits[2] = '0'+(!!(val&32));
- bits[1] = '0'+(!!(val&64));
- bits[0] = '0'+(!!(val&128));
+ bits[7] = '0' + (!!(val & 1));
+ bits[6] = '0' + (!!(val & 2));
+ bits[5] = '0' + (!!(val & 4));
+ bits[4] = '0' + (!!(val & 8));
+ bits[3] = '0' + (!!(val & 16));
+ bits[2] = '0' + (!!(val & 32));
+ bits[1] = '0' + (!!(val & 64));
+ bits[0] = '0' + (!!(val & 128));
printk(KERN_DEBUG
"HFC_outb(chip %d, %02x=%s, 0x%02x=%s); in %s() line %d\n",
hc->id, reg, regname, val, bits, function, line);
@@ -380,14 +396,14 @@ HFC_inb_debug(struct hfc_multi *hc, u_char reg, const char *function, int line)
if (regname[0] == '\0')
strcpy(regname, "register");
- bits[7] = '0'+(!!(val&1));
- bits[6] = '0'+(!!(val&2));
- bits[5] = '0'+(!!(val&4));
- bits[4] = '0'+(!!(val&8));
- bits[3] = '0'+(!!(val&16));
- bits[2] = '0'+(!!(val&32));
- bits[1] = '0'+(!!(val&64));
- bits[0] = '0'+(!!(val&128));
+ bits[7] = '0' + (!!(val & 1));
+ bits[6] = '0' + (!!(val & 2));
+ bits[5] = '0' + (!!(val & 4));
+ bits[4] = '0' + (!!(val & 8));
+ bits[3] = '0' + (!!(val & 16));
+ bits[2] = '0' + (!!(val & 32));
+ bits[1] = '0' + (!!(val & 64));
+ bits[0] = '0' + (!!(val & 128));
printk(KERN_DEBUG
"HFC_inb(chip %d, %02x=%s) = 0x%02x=%s; in %s() line %d\n",
hc->id, reg, regname, val, bits, function, line);
@@ -467,6 +483,7 @@ write_fifo_pcimem(struct hfc_multi *hc, u_char *data, int len)
len--;
}
}
+
/* read fifo data (REGIO) */
static void
read_fifo_regio(struct hfc_multi *hc, u_char *data, int len)
@@ -512,7 +529,6 @@ read_fifo_pcimem(struct hfc_multi *hc, u_char *data, int len)
}
}
-
static void
enable_hwirq(struct hfc_multi *hc)
{
@@ -928,7 +944,7 @@ hfcmulti_resync(struct hfc_multi *locked, struct hfc_multi *newmaster, int rm)
writel(pv, plx_acc_32);
if (test_bit(HFC_CHIP_PCM_MASTER, &hc->chip)) {
pcmmaster = hc;
- if (hc->type == 1) {
+ if (hc->ctype == HFC_TYPE_E1) {
if (debug & DEBUG_HFCMULTI_PLXSD)
printk(KERN_DEBUG
"Schedule SYNC_I\n");
@@ -949,7 +965,8 @@ hfcmulti_resync(struct hfc_multi *locked, struct hfc_multi *newmaster, int rm)
pv |= PLX_SYNC_O_EN;
writel(pv, plx_acc_32);
/* switch to jatt PLL, if not disabled by RX_SYNC */
- if (hc->type == 1 && !test_bit(HFC_CHIP_RX_SYNC, &hc->chip)) {
+ if (hc->ctype == HFC_TYPE_E1
+ && !test_bit(HFC_CHIP_RX_SYNC, &hc->chip)) {
if (debug & DEBUG_HFCMULTI_PLXSD)
printk(KERN_DEBUG "Schedule jatt PLL\n");
hc->e1_resync |= 2; /* switch to jatt */
@@ -961,7 +978,7 @@ hfcmulti_resync(struct hfc_multi *locked, struct hfc_multi *newmaster, int rm)
printk(KERN_DEBUG
"id=%d (0x%p) = PCM master syncronized "
"with QUARTZ\n", hc->id, hc);
- if (hc->type == 1) {
+ if (hc->ctype == HFC_TYPE_E1) {
/* Use the crystal clock for the PCM
master card */
if (debug & DEBUG_HFCMULTI_PLXSD)
@@ -972,7 +989,7 @@ hfcmulti_resync(struct hfc_multi *locked, struct hfc_multi *newmaster, int rm)
if (debug & DEBUG_HFCMULTI_PLXSD)
printk(KERN_DEBUG
"QUARTZ is automatically "
- "enabled by HFC-%dS\n", hc->type);
+ "enabled by HFC-%dS\n", hc->ctype);
}
plx_acc_32 = hc->plx_membase + PLX_GPIOC;
pv = readl(plx_acc_32);
@@ -996,7 +1013,7 @@ plxsd_checksync(struct hfc_multi *hc, int rm)
if (hc->syncronized) {
if (syncmaster == NULL) {
if (debug & DEBUG_HFCMULTI_PLXSD)
- printk(KERN_WARNING "%s: GOT sync on card %d"
+ printk(KERN_DEBUG "%s: GOT sync on card %d"
" (id=%d)\n", __func__, hc->id + 1,
hc->id);
hfcmulti_resync(hc, hc, rm);
@@ -1004,7 +1021,7 @@ plxsd_checksync(struct hfc_multi *hc, int rm)
} else {
if (syncmaster == hc) {
if (debug & DEBUG_HFCMULTI_PLXSD)
- printk(KERN_WARNING "%s: LOST sync on card %d"
+ printk(KERN_DEBUG "%s: LOST sync on card %d"
" (id=%d)\n", __func__, hc->id + 1,
hc->id);
hfcmulti_resync(hc, NULL, rm);
@@ -1053,20 +1070,23 @@ release_io_hfcmulti(struct hfc_multi *hc)
pv &= ~PLX_DSP_RES_N;
writel(pv, plx_acc_32);
if (debug & DEBUG_HFCMULTI_INIT)
- printk(KERN_WARNING "%s: PCM off: PLX_GPIO=%x\n",
+ printk(KERN_DEBUG "%s: PCM off: PLX_GPIO=%x\n",
__func__, pv);
spin_unlock_irqrestore(&plx_lock, plx_flags);
}
/* disable memory mapped ports / io ports */
test_and_clear_bit(HFC_CHIP_PLXSD, &hc->chip); /* prevent resync */
- pci_write_config_word(hc->pci_dev, PCI_COMMAND, 0);
+ if (hc->pci_dev)
+ pci_write_config_word(hc->pci_dev, PCI_COMMAND, 0);
if (hc->pci_membase)
iounmap(hc->pci_membase);
if (hc->plx_membase)
iounmap(hc->plx_membase);
if (hc->pci_iobase)
release_region(hc->pci_iobase, 8);
+ if (hc->xhfc_membase)
+ iounmap((void *)hc->xhfc_membase);
if (hc->pci_dev) {
pci_disable_device(hc->pci_dev);
@@ -1100,8 +1120,9 @@ init_chip(struct hfc_multi *hc)
/* revision check */
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: entered\n", __func__);
- val = HFC_inb(hc, R_CHIP_ID)>>4;
- if (val != 0x8 && val != 0xc && val != 0xe) {
+ val = HFC_inb(hc, R_CHIP_ID);
+ if ((val >> 4) != 0x8 && (val >> 4) != 0xc && (val >> 4) != 0xe &&
+ (val >> 1) != 0x31) {
printk(KERN_INFO "HFC_multi: unknown CHIP_ID:%x\n", (u_int)val);
err = -EIO;
goto out;
@@ -1109,8 +1130,9 @@ init_chip(struct hfc_multi *hc)
rev = HFC_inb(hc, R_CHIP_RV);
printk(KERN_INFO
"HFC_multi: detected HFC with chip ID=0x%lx revision=%ld%s\n",
- val, rev, (rev == 0) ? " (old FIFO handling)" : "");
- if (rev == 0) {
+ val, rev, (rev == 0 && (hc->ctype != HFC_TYPE_XHFC)) ?
+ " (old FIFO handling)" : "");
+ if (hc->ctype != HFC_TYPE_XHFC && rev == 0) {
test_and_set_bit(HFC_CHIP_REVISION0, &hc->chip);
printk(KERN_WARNING
"HFC_multi: NOTE: Your chip is revision 0, "
@@ -1152,6 +1174,12 @@ init_chip(struct hfc_multi *hc)
hc->Zlen = 8000;
hc->DTMFbase = 0x2000;
}
+ if (hc->ctype == HFC_TYPE_XHFC) {
+ hc->Flen = 0x8;
+ hc->Zmin = 0x0;
+ hc->Zlen = 64;
+ hc->DTMFbase = 0x0;
+ }
hc->max_trans = poll << 1;
if (hc->max_trans > hc->Zlen)
hc->max_trans = hc->Zlen;
@@ -1176,7 +1204,7 @@ init_chip(struct hfc_multi *hc)
writel(pv, plx_acc_32);
spin_unlock_irqrestore(&plx_lock, plx_flags);
if (debug & DEBUG_HFCMULTI_INIT)
- printk(KERN_WARNING "%s: slave/term: PLX_GPIO=%x\n",
+ printk(KERN_DEBUG "%s: slave/term: PLX_GPIO=%x\n",
__func__, pv);
/*
* If we are the 3rd PLXSD card or higher, we must turn
@@ -1204,13 +1232,17 @@ init_chip(struct hfc_multi *hc)
writel(pv, plx_acc_32);
spin_unlock_irqrestore(&plx_lock, plx_flags);
if (debug & DEBUG_HFCMULTI_INIT)
- printk(KERN_WARNING "%s: term off: PLX_GPIO=%x\n",
- __func__, pv);
+ printk(KERN_DEBUG
+ "%s: term off: PLX_GPIO=%x\n",
+ __func__, pv);
}
spin_unlock_irqrestore(&HFClock, hfc_flags);
hc->hw.r_pcm_md0 = V_F0_LEN; /* shift clock for DSP */
}
+ if (test_bit(HFC_CHIP_EMBSD, &hc->chip))
+ hc->hw.r_pcm_md0 = V_F0_LEN; /* shift clock for DSP */
+
/* we only want the real Z2 read-pointer for revision > 0 */
if (!test_bit(HFC_CHIP_REVISION0, &hc->chip))
hc->hw.r_ram_sz |= V_FZ_MD;
@@ -1234,15 +1266,24 @@ init_chip(struct hfc_multi *hc)
/* soft reset */
HFC_outb(hc, R_CTRL, hc->hw.r_ctrl);
- HFC_outb(hc, R_RAM_SZ, hc->hw.r_ram_sz);
+ if (hc->ctype == HFC_TYPE_XHFC)
+ HFC_outb(hc, 0x0C /* R_FIFO_THRES */,
+ 0x11 /* 16 Bytes TX/RX */);
+ else
+ HFC_outb(hc, R_RAM_SZ, hc->hw.r_ram_sz);
HFC_outb(hc, R_FIFO_MD, 0);
- hc->hw.r_cirm = V_SRES | V_HFCRES | V_PCMRES | V_STRES | V_RLD_EPR;
+ if (hc->ctype == HFC_TYPE_XHFC)
+ hc->hw.r_cirm = V_SRES | V_HFCRES | V_PCMRES | V_STRES;
+ else
+ hc->hw.r_cirm = V_SRES | V_HFCRES | V_PCMRES | V_STRES
+ | V_RLD_EPR;
HFC_outb(hc, R_CIRM, hc->hw.r_cirm);
udelay(100);
hc->hw.r_cirm = 0;
HFC_outb(hc, R_CIRM, hc->hw.r_cirm);
udelay(100);
- HFC_outb(hc, R_RAM_SZ, hc->hw.r_ram_sz);
+ if (hc->ctype != HFC_TYPE_XHFC)
+ HFC_outb(hc, R_RAM_SZ, hc->hw.r_ram_sz);
/* Speech Design PLX bridge pcm and sync mode */
if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
@@ -1254,13 +1295,13 @@ init_chip(struct hfc_multi *hc)
pv |= PLX_MASTER_EN | PLX_SLAVE_EN_N;
pv |= PLX_SYNC_O_EN;
if (debug & DEBUG_HFCMULTI_INIT)
- printk(KERN_WARNING "%s: master: PLX_GPIO=%x\n",
+ printk(KERN_DEBUG "%s: master: PLX_GPIO=%x\n",
__func__, pv);
} else {
pv &= ~(PLX_MASTER_EN | PLX_SLAVE_EN_N);
pv &= ~PLX_SYNC_O_EN;
if (debug & DEBUG_HFCMULTI_INIT)
- printk(KERN_WARNING "%s: slave: PLX_GPIO=%x\n",
+ printk(KERN_DEBUG "%s: slave: PLX_GPIO=%x\n",
__func__, pv);
}
writel(pv, plx_acc_32);
@@ -1278,13 +1319,16 @@ init_chip(struct hfc_multi *hc)
HFC_outb(hc, R_PCM_MD0, hc->hw.r_pcm_md0 | 0xa0);
if (test_bit(HFC_CHIP_PLXSD, &hc->chip))
HFC_outb(hc, R_PCM_MD2, V_SYNC_SRC); /* sync via SYNC_I / O */
+ else if (test_bit(HFC_CHIP_EMBSD, &hc->chip))
+ HFC_outb(hc, R_PCM_MD2, 0x10); /* V_C2O_EN */
else
HFC_outb(hc, R_PCM_MD2, 0x00); /* sync from interface */
HFC_outb(hc, R_PCM_MD0, hc->hw.r_pcm_md0 | 0x00);
for (i = 0; i < 256; i++) {
HFC_outb_nodebug(hc, R_SLOT, i);
HFC_outb_nodebug(hc, A_SL_CFG, 0);
- HFC_outb_nodebug(hc, A_CONF, 0);
+ if (hc->ctype != HFC_TYPE_XHFC)
+ HFC_outb_nodebug(hc, A_CONF, 0);
hc->slot_owner[i] = -1;
}
@@ -1296,6 +1340,9 @@ init_chip(struct hfc_multi *hc)
HFC_outb(hc, R_BRG_PCM_CFG, V_PCM_CLK);
}
+ if (test_bit(HFC_CHIP_EMBSD, &hc->chip))
+ HFC_outb(hc, 0x02 /* R_CLK_CFG */, 0x40 /* V_CLKO_OFF */);
+
/* B410P GPIO */
if (test_bit(HFC_CHIP_B410P, &hc->chip)) {
printk(KERN_NOTICE "Setting GPIOs\n");
@@ -1366,8 +1413,8 @@ controller_fail:
writel(pv, plx_acc_32);
spin_unlock_irqrestore(&plx_lock, plx_flags);
if (debug & DEBUG_HFCMULTI_INIT)
- printk(KERN_WARNING "%s: master: PLX_GPIO"
- "=%x\n", __func__, pv);
+ printk(KERN_DEBUG "%s: master: "
+ "PLX_GPIO=%x\n", __func__, pv);
}
hc->hw.r_pcm_md0 |= V_PCM_MD;
HFC_outb(hc, R_PCM_MD0, hc->hw.r_pcm_md0 | 0x00);
@@ -1401,7 +1448,7 @@ controller_fail:
writel(pv, plx_acc_32);
spin_unlock_irqrestore(&plx_lock, plx_flags);
if (debug & DEBUG_HFCMULTI_INIT)
- printk(KERN_WARNING "%s: reset off: PLX_GPIO=%x\n",
+ printk(KERN_DEBUG "%s: reset off: PLX_GPIO=%x\n",
__func__, pv);
}
@@ -1424,7 +1471,7 @@ controller_fail:
hc->hw.r_irqmsk_misc |= V_TI_IRQMSK;
/* set E1 state machine IRQ */
- if (hc->type == 1)
+ if (hc->ctype == HFC_TYPE_E1)
hc->hw.r_irqmsk_misc |= V_STA_IRQMSK;
/* set DTMF detection */
@@ -1444,7 +1491,8 @@ controller_fail:
r_conf_en = V_CONF_EN | V_ULAW;
else
r_conf_en = V_CONF_EN;
- HFC_outb(hc, R_CONF_EN, r_conf_en);
+ if (hc->ctype != HFC_TYPE_XHFC)
+ HFC_outb(hc, R_CONF_EN, r_conf_en);
/* setting leds */
switch (hc->leds) {
@@ -1468,16 +1516,23 @@ controller_fail:
break;
}
+ if (test_bit(HFC_CHIP_EMBSD, &hc->chip)) {
+ hc->hw.r_st_sync = 0x10; /* V_AUTO_SYNCI */
+ HFC_outb(hc, R_ST_SYNC, hc->hw.r_st_sync);
+ }
+
/* set master clock */
if (hc->masterclk >= 0) {
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: setting ST master clock "
"to port %d (0..%d)\n",
__func__, hc->masterclk, hc->ports-1);
- hc->hw.r_st_sync = hc->masterclk | V_AUTO_SYNC;
+ hc->hw.r_st_sync |= (hc->masterclk | V_AUTO_SYNC);
HFC_outb(hc, R_ST_SYNC, hc->hw.r_st_sync);
}
+
+
/* setting misc irq */
HFC_outb(hc, R_IRQMSK_MISC, hc->hw.r_irqmsk_misc);
if (debug & DEBUG_HFCMULTI_INIT)
@@ -1817,8 +1872,8 @@ hfcmulti_dtmf(struct hfc_multi *hc)
coeff[(co<<1)|1] = mantissa;
}
if (debug & DEBUG_HFCMULTI_DTMF)
- printk("%s: DTMF ready %08x %08x %08x %08x "
- "%08x %08x %08x %08x\n", __func__,
+ printk(" DTMF ready %08x %08x %08x %08x "
+ "%08x %08x %08x %08x\n",
coeff[0], coeff[1], coeff[2], coeff[3],
coeff[4], coeff[5], coeff[6], coeff[7]);
hc->chan[ch].coeff_count++;
@@ -1826,7 +1881,7 @@ hfcmulti_dtmf(struct hfc_multi *hc)
hc->chan[ch].coeff_count = 0;
skb = mI_alloc_skb(512, GFP_ATOMIC);
if (!skb) {
- printk(KERN_WARNING "%s: No memory for skb\n",
+ printk(KERN_DEBUG "%s: No memory for skb\n",
__func__);
continue;
}
@@ -1929,7 +1984,7 @@ next_frame:
Fspace = 1;
}
/* one frame only for ST D-channels, to allow resending */
- if (hc->type != 1 && dch) {
+ if (hc->ctype != HFC_TYPE_E1 && dch) {
if (f1 != f2)
Fspace = 0;
}
@@ -1945,6 +2000,9 @@ next_frame:
"%d!=%d\n", __func__, hc->id + 1, temp, z2);
z2 = temp; /* repeat unti Z2 is equal */
}
+ hc->chan[ch].Zfill = z1 - z2;
+ if (hc->chan[ch].Zfill < 0)
+ hc->chan[ch].Zfill += hc->Zlen;
Zspace = z2 - z1;
if (Zspace <= 0)
Zspace += hc->Zlen;
@@ -1968,12 +2026,22 @@ next_frame:
"slot_tx %d\n",
__func__, ch, slot_tx);
/* connect slot */
- HFC_outb(hc, A_CON_HDLC, 0xc0 | 0x00 |
- V_HDLC_TRP | V_IFF);
+ if (hc->ctype == HFC_TYPE_XHFC)
+ HFC_outb(hc, A_CON_HDLC, 0xc0
+ | 0x07 << 2 | V_HDLC_TRP | V_IFF);
+ /* Enable FIFO, no interrupt */
+ else
+ HFC_outb(hc, A_CON_HDLC, 0xc0 | 0x00 |
+ V_HDLC_TRP | V_IFF);
HFC_outb_nodebug(hc, R_FIFO, ch<<1 | 1);
HFC_wait_nodebug(hc);
- HFC_outb(hc, A_CON_HDLC, 0xc0 | 0x00 |
- V_HDLC_TRP | V_IFF);
+ if (hc->ctype == HFC_TYPE_XHFC)
+ HFC_outb(hc, A_CON_HDLC, 0xc0
+ | 0x07 << 2 | V_HDLC_TRP | V_IFF);
+ /* Enable FIFO, no interrupt */
+ else
+ HFC_outb(hc, A_CON_HDLC, 0xc0 | 0x00 |
+ V_HDLC_TRP | V_IFF);
HFC_outb_nodebug(hc, R_FIFO, ch<<1);
HFC_wait_nodebug(hc);
}
@@ -2001,10 +2069,22 @@ next_frame:
"FIFO data: channel %d slot_tx %d\n",
__func__, ch, slot_tx);
/* disconnect slot */
- HFC_outb(hc, A_CON_HDLC, 0x80 | 0x00 | V_HDLC_TRP | V_IFF);
+ if (hc->ctype == HFC_TYPE_XHFC)
+ HFC_outb(hc, A_CON_HDLC, 0x80
+ | 0x07 << 2 | V_HDLC_TRP | V_IFF);
+ /* Enable FIFO, no interrupt */
+ else
+ HFC_outb(hc, A_CON_HDLC, 0x80 | 0x00 |
+ V_HDLC_TRP | V_IFF);
HFC_outb_nodebug(hc, R_FIFO, ch<<1 | 1);
HFC_wait_nodebug(hc);
- HFC_outb(hc, A_CON_HDLC, 0x80 | 0x00 | V_HDLC_TRP | V_IFF);
+ if (hc->ctype == HFC_TYPE_XHFC)
+ HFC_outb(hc, A_CON_HDLC, 0x80
+ | 0x07 << 2 | V_HDLC_TRP | V_IFF);
+ /* Enable FIFO, no interrupt */
+ else
+ HFC_outb(hc, A_CON_HDLC, 0x80 | 0x00 |
+ V_HDLC_TRP | V_IFF);
HFC_outb_nodebug(hc, R_FIFO, ch<<1);
HFC_wait_nodebug(hc);
}
@@ -2027,10 +2107,11 @@ next_frame:
printk(KERN_DEBUG "%s(card %d): fifo(%d) has %d bytes space "
"left (z1=%04x, z2=%04x) sending %d of %d bytes %s\n",
__func__, hc->id + 1, ch, Zspace, z1, z2, ii-i, len-i,
- temp ? "HDLC":"TRANS");
+ temp ? "HDLC" : "TRANS");
/* Have to prep the audio data */
hc->write_fifo(hc, d, ii - i);
+ hc->chan[ch].Zfill += ii - i;
*idxp = ii;
/* if not all data has been written */
@@ -2226,7 +2307,7 @@ next_frame:
if (dch)
recv_Dchannel(dch);
else
- recv_Bchannel(bch);
+ recv_Bchannel(bch, MISDN_ID_ANY);
*sp = skb;
again++;
goto next_frame;
@@ -2258,7 +2339,7 @@ next_frame:
"(z1=%04x, z2=%04x) TRANS\n",
__func__, hc->id + 1, ch, Zsize, z1, z2);
/* only bch is transparent */
- recv_Bchannel(bch);
+ recv_Bchannel(bch, hc->chan[ch].Zfill);
*sp = skb;
}
}
@@ -2323,7 +2404,7 @@ handle_timer_irq(struct hfc_multi *hc)
spin_unlock_irqrestore(&HFClock, flags);
}
- if (hc->type != 1 || hc->e1_state == 1)
+ if (hc->ctype != HFC_TYPE_E1 || hc->e1_state == 1)
for (ch = 0; ch <= 31; ch++) {
if (hc->created[hc->chan[ch].port]) {
hfcmulti_tx(hc, ch);
@@ -2346,7 +2427,7 @@ handle_timer_irq(struct hfc_multi *hc)
}
}
}
- if (hc->type == 1 && hc->created[0]) {
+ if (hc->ctype == HFC_TYPE_E1 && hc->created[0]) {
dch = hc->chan[hc->dslot].dch;
if (test_bit(HFC_CFG_REPORT_LOS, &hc->chan[hc->dslot].cfg)) {
/* LOS */
@@ -2606,7 +2687,10 @@ hfcmulti_interrupt(int intno, void *dev_id)
"card %d, this is no bug.\n", hc->id + 1, irqsem);
irqsem = hc->id + 1;
#endif
-
+#ifdef CONFIG_MISDN_HFCMULTI_8xx
+ if (hc->immap->im_cpm.cp_pbdat & hc->pb_irqmsk)
+ goto irq_notforus;
+#endif
if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
spin_lock_irqsave(&plx_lock, flags);
plx_acc = hc->plx_membase + PLX_INTCSR;
@@ -2646,7 +2730,7 @@ hfcmulti_interrupt(int intno, void *dev_id)
}
hc->irqcnt++;
if (r_irq_statech) {
- if (hc->type != 1)
+ if (hc->ctype != HFC_TYPE_E1)
ph_state_irq(hc, r_irq_statech);
}
if (status & V_EXT_IRQSTA)
@@ -2660,7 +2744,7 @@ hfcmulti_interrupt(int intno, void *dev_id)
r_irq_misc = HFC_inb_nodebug(hc, R_IRQ_MISC);
r_irq_misc &= hc->hw.r_irqmsk_misc; /* ignore disabled irqs */
if (r_irq_misc & V_STA_IRQ) {
- if (hc->type == 1) {
+ if (hc->ctype == HFC_TYPE_E1) {
/* state machine */
dch = hc->chan[hc->dslot].dch;
e1_syncsta = HFC_inb_nodebug(hc, R_SYNC_STA);
@@ -2699,13 +2783,13 @@ hfcmulti_interrupt(int intno, void *dev_id)
handle_timer_irq(hc);
}
- if (r_irq_misc & V_DTMF_IRQ) {
+ if (r_irq_misc & V_DTMF_IRQ)
hfcmulti_dtmf(hc);
- }
+
if (r_irq_misc & V_IRQ_PROC) {
static int irq_proc_cnt;
if (!irq_proc_cnt++)
- printk(KERN_WARNING "%s: got V_IRQ_PROC -"
+ printk(KERN_DEBUG "%s: got V_IRQ_PROC -"
" this should not happen\n", __func__);
}
@@ -2782,7 +2866,8 @@ mode_hfcmulti(struct hfc_multi *hc, int ch, int protocol, int slot_tx,
if (hc->slot_owner[oslot_tx<<1] == ch) {
HFC_outb(hc, R_SLOT, oslot_tx << 1);
HFC_outb(hc, A_SL_CFG, 0);
- HFC_outb(hc, A_CONF, 0);
+ if (hc->ctype != HFC_TYPE_XHFC)
+ HFC_outb(hc, A_CONF, 0);
hc->slot_owner[oslot_tx<<1] = -1;
} else {
if (debug & DEBUG_HFCMULTI_MODE)
@@ -2835,7 +2920,9 @@ mode_hfcmulti(struct hfc_multi *hc, int ch, int protocol, int slot_tx,
flow_tx, routing, conf);
HFC_outb(hc, R_SLOT, slot_tx << 1);
HFC_outb(hc, A_SL_CFG, (ch<<1) | routing);
- HFC_outb(hc, A_CONF, (conf < 0) ? 0 : (conf | V_CONF_SL));
+ if (hc->ctype != HFC_TYPE_XHFC)
+ HFC_outb(hc, A_CONF,
+ (conf < 0) ? 0 : (conf | V_CONF_SL));
hc->slot_owner[slot_tx << 1] = ch;
hc->chan[ch].slot_tx = slot_tx;
hc->chan[ch].bank_tx = bank_tx;
@@ -2852,7 +2939,7 @@ mode_hfcmulti(struct hfc_multi *hc, int ch, int protocol, int slot_tx,
else
flow_rx = 0xc0; /* ST->(FIFO,PCM) */
/* put on slot */
- routing = bank_rx?0x80:0xc0; /* reversed */
+ routing = bank_rx ? 0x80 : 0xc0; /* reversed */
if (conf >= 0 || bank_rx > 1)
routing = 0x40; /* loop */
if (debug & DEBUG_HFCMULTI_MODE)
@@ -2885,9 +2972,9 @@ mode_hfcmulti(struct hfc_multi *hc, int ch, int protocol, int slot_tx,
HFC_outb(hc, A_IRQ_MSK, 0);
HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
HFC_wait(hc);
- if (hc->chan[ch].bch && hc->type != 1) {
+ if (hc->chan[ch].bch && hc->ctype != HFC_TYPE_E1) {
hc->hw.a_st_ctrl0[hc->chan[ch].port] &=
- ((ch & 0x3) == 0)? ~V_B1_EN: ~V_B2_EN;
+ ((ch & 0x3) == 0) ? ~V_B1_EN : ~V_B2_EN;
HFC_outb(hc, R_ST_SEL, hc->chan[ch].port);
/* undocumented: delay after R_ST_SEL */
udelay(1);
@@ -2961,8 +3048,13 @@ mode_hfcmulti(struct hfc_multi *hc, int ch, int protocol, int slot_tx,
/* enable TX fifo */
HFC_outb(hc, R_FIFO, ch << 1);
HFC_wait(hc);
- HFC_outb(hc, A_CON_HDLC, flow_tx | 0x00 |
- V_HDLC_TRP | V_IFF);
+ if (hc->ctype == HFC_TYPE_XHFC)
+ HFC_outb(hc, A_CON_HDLC, flow_tx | 0x07 << 2 |
+ V_HDLC_TRP | V_IFF);
+ /* Enable FIFO, no interrupt */
+ else
+ HFC_outb(hc, A_CON_HDLC, flow_tx | 0x00 |
+ V_HDLC_TRP | V_IFF);
HFC_outb(hc, A_SUBCH_CFG, 0);
HFC_outb(hc, A_IRQ_MSK, 0);
HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
@@ -2972,13 +3064,19 @@ mode_hfcmulti(struct hfc_multi *hc, int ch, int protocol, int slot_tx,
/* enable RX fifo */
HFC_outb(hc, R_FIFO, (ch<<1)|1);
HFC_wait(hc);
- HFC_outb(hc, A_CON_HDLC, flow_rx | 0x00 | V_HDLC_TRP);
+ if (hc->ctype == HFC_TYPE_XHFC)
+ HFC_outb(hc, A_CON_HDLC, flow_rx | 0x07 << 2 |
+ V_HDLC_TRP);
+ /* Enable FIFO, no interrupt*/
+ else
+ HFC_outb(hc, A_CON_HDLC, flow_rx | 0x00 |
+ V_HDLC_TRP);
HFC_outb(hc, A_SUBCH_CFG, 0);
HFC_outb(hc, A_IRQ_MSK, 0);
HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
HFC_wait(hc);
}
- if (hc->type != 1) {
+ if (hc->ctype != HFC_TYPE_E1) {
hc->hw.a_st_ctrl0[hc->chan[ch].port] |=
((ch & 0x3) == 0) ? V_B1_EN : V_B2_EN;
HFC_outb(hc, R_ST_SEL, hc->chan[ch].port);
@@ -2999,7 +3097,7 @@ mode_hfcmulti(struct hfc_multi *hc, int ch, int protocol, int slot_tx,
/* enable TX fifo */
HFC_outb(hc, R_FIFO, ch<<1);
HFC_wait(hc);
- if (hc->type == 1 || hc->chan[ch].bch) {
+ if (hc->ctype == HFC_TYPE_E1 || hc->chan[ch].bch) {
/* E1 or B-channel */
HFC_outb(hc, A_CON_HDLC, flow_tx | 0x04);
HFC_outb(hc, A_SUBCH_CFG, 0);
@@ -3015,7 +3113,7 @@ mode_hfcmulti(struct hfc_multi *hc, int ch, int protocol, int slot_tx,
HFC_outb(hc, R_FIFO, (ch<<1)|1);
HFC_wait(hc);
HFC_outb(hc, A_CON_HDLC, flow_rx | 0x04);
- if (hc->type == 1 || hc->chan[ch].bch)
+ if (hc->ctype == HFC_TYPE_E1 || hc->chan[ch].bch)
HFC_outb(hc, A_SUBCH_CFG, 0); /* full 8 bits */
else
HFC_outb(hc, A_SUBCH_CFG, 2); /* 2 bits dchannel */
@@ -3024,7 +3122,7 @@ mode_hfcmulti(struct hfc_multi *hc, int ch, int protocol, int slot_tx,
HFC_wait(hc);
if (hc->chan[ch].bch) {
test_and_set_bit(FLG_HDLC, &hc->chan[ch].bch->Flags);
- if (hc->type != 1) {
+ if (hc->ctype != HFC_TYPE_E1) {
hc->hw.a_st_ctrl0[hc->chan[ch].port] |=
((ch&0x3) == 0) ? V_B1_EN : V_B2_EN;
HFC_outb(hc, R_ST_SEL, hc->chan[ch].port);
@@ -3104,7 +3202,7 @@ hfcm_l1callback(struct dchannel *dch, u_int cmd)
case HW_RESET_REQ:
/* start activation */
spin_lock_irqsave(&hc->lock, flags);
- if (hc->type == 1) {
+ if (hc->ctype == HFC_TYPE_E1) {
if (debug & DEBUG_HFCMULTI_MSG)
printk(KERN_DEBUG
"%s: HW_RESET_REQ no BRI\n",
@@ -3125,7 +3223,7 @@ hfcm_l1callback(struct dchannel *dch, u_int cmd)
case HW_DEACT_REQ:
/* start deactivation */
spin_lock_irqsave(&hc->lock, flags);
- if (hc->type == 1) {
+ if (hc->ctype == HFC_TYPE_E1) {
if (debug & DEBUG_HFCMULTI_MSG)
printk(KERN_DEBUG
"%s: HW_DEACT_REQ no BRI\n",
@@ -3159,7 +3257,7 @@ hfcm_l1callback(struct dchannel *dch, u_int cmd)
break;
case HW_POWERUP_REQ:
spin_lock_irqsave(&hc->lock, flags);
- if (hc->type == 1) {
+ if (hc->ctype == HFC_TYPE_E1) {
if (debug & DEBUG_HFCMULTI_MSG)
printk(KERN_DEBUG
"%s: HW_POWERUP_REQ no BRI\n",
@@ -3236,7 +3334,7 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb)
__func__, hc->chan[dch->slot].port,
hc->ports-1);
/* start activation */
- if (hc->type == 1) {
+ if (hc->ctype == HFC_TYPE_E1) {
ph_state_change(dch);
if (debug & DEBUG_HFCMULTI_STATE)
printk(KERN_DEBUG
@@ -3269,7 +3367,7 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb)
__func__, hc->chan[dch->slot].port,
hc->ports-1);
/* start deactivation */
- if (hc->type == 1) {
+ if (hc->ctype == HFC_TYPE_E1) {
if (debug & DEBUG_HFCMULTI_MSG)
printk(KERN_DEBUG
"%s: PH_DEACTIVATE no BRI\n",
@@ -3410,9 +3508,9 @@ handle_bmsg(struct mISDNchannel *ch, struct sk_buff *skb)
switch (hh->id) {
case HFC_SPL_LOOP_ON: /* set sample loop */
if (debug & DEBUG_HFCMULTI_MSG)
- printk(KERN_DEBUG
- "%s: HFC_SPL_LOOP_ON (len = %d)\n",
- __func__, skb->len);
+ printk(KERN_DEBUG
+ "%s: HFC_SPL_LOOP_ON (len = %d)\n",
+ __func__, skb->len);
ret = 0;
break;
case HFC_SPL_LOOP_OFF: /* set silence */
@@ -3489,6 +3587,8 @@ channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
features->hfc_id = hc->id;
if (test_bit(HFC_CHIP_DTMF, &hc->chip))
features->hfc_dtmf = 1;
+ if (test_bit(HFC_CHIP_CONF, &hc->chip))
+ features->hfc_conf = 1;
features->hfc_loops = 0;
if (test_bit(HFC_CHIP_B410P, &hc->chip)) {
features->hfc_echocanhw = 1;
@@ -3619,14 +3719,13 @@ ph_state_change(struct dchannel *dch)
int ch, i;
if (!dch) {
- printk(KERN_WARNING "%s: ERROR given dch is NULL\n",
- __func__);
+ printk(KERN_WARNING "%s: ERROR given dch is NULL\n", __func__);
return;
}
hc = dch->hw;
ch = dch->slot;
- if (hc->type == 1) {
+ if (hc->ctype == HFC_TYPE_E1) {
if (dch->dev.D.protocol == ISDN_P_TE_E1) {
if (debug & DEBUG_HFCMULTI_STATE)
printk(KERN_DEBUG
@@ -3641,14 +3740,15 @@ ph_state_change(struct dchannel *dch)
switch (dch->state) {
case (1):
if (hc->e1_state != 1) {
- for (i = 1; i <= 31; i++) {
- /* reset fifos on e1 activation */
- HFC_outb_nodebug(hc, R_FIFO, (i << 1) | 1);
- HFC_wait_nodebug(hc);
- HFC_outb_nodebug(hc,
- R_INC_RES_FIFO, V_RES_F);
- HFC_wait_nodebug(hc);
- }
+ for (i = 1; i <= 31; i++) {
+ /* reset fifos on e1 activation */
+ HFC_outb_nodebug(hc, R_FIFO,
+ (i << 1) | 1);
+ HFC_wait_nodebug(hc);
+ HFC_outb_nodebug(hc, R_INC_RES_FIFO,
+ V_RES_F);
+ HFC_wait_nodebug(hc);
+ }
}
test_and_set_bit(FLG_ACTIVE, &dch->Flags);
_queue_data(&dch->dev.D, PH_ACTIVATE_IND,
@@ -3751,7 +3851,7 @@ hfcmulti_initmode(struct dchannel *dch)
if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG "%s: entered\n", __func__);
- if (hc->type == 1) {
+ if (hc->ctype == HFC_TYPE_E1) {
hc->chan[hc->dslot].slot_tx = -1;
hc->chan[hc->dslot].slot_rx = -1;
hc->chan[hc->dslot].conf = -1;
@@ -3900,6 +4000,11 @@ hfcmulti_initmode(struct dchannel *dch)
}
if (!test_bit(HFC_CFG_NONCAP_TX, &hc->chan[i].cfg))
hc->hw.a_st_ctrl0[pt] |= V_TX_LI;
+ if (hc->ctype == HFC_TYPE_XHFC) {
+ hc->hw.a_st_ctrl0[pt] |= 0x40 /* V_ST_PU_CTRL */;
+ HFC_outb(hc, 0x35 /* A_ST_CTRL3 */,
+ 0x7c << 1 /* V_ST_PULSE */);
+ }
/* line setup */
HFC_outb(hc, A_ST_CTRL0, hc->hw.a_st_ctrl0[pt]);
/* disable E-channel */
@@ -3943,12 +4048,12 @@ open_dchannel(struct hfc_multi *hc, struct dchannel *dch,
return -EINVAL;
if ((dch->dev.D.protocol != ISDN_P_NONE) &&
(dch->dev.D.protocol != rq->protocol)) {
- if (debug & DEBUG_HFCMULTI_MODE)
- printk(KERN_WARNING "%s: change protocol %x to %x\n",
- __func__, dch->dev.D.protocol, rq->protocol);
+ if (debug & DEBUG_HFCMULTI_MODE)
+ printk(KERN_DEBUG "%s: change protocol %x to %x\n",
+ __func__, dch->dev.D.protocol, rq->protocol);
}
- if ((dch->dev.D.protocol == ISDN_P_TE_S0)
- && (rq->protocol != ISDN_P_TE_S0))
+ if ((dch->dev.D.protocol == ISDN_P_TE_S0) &&
+ (rq->protocol != ISDN_P_TE_S0))
l1_event(dch->l1, CLOSE_CHANNEL);
if (dch->dev.D.protocol != rq->protocol) {
if (rq->protocol == ISDN_P_TE_S0) {
@@ -3986,7 +4091,7 @@ open_bchannel(struct hfc_multi *hc, struct dchannel *dch,
return -EINVAL;
if (rq->protocol == ISDN_P_NONE)
return -EINVAL;
- if (hc->type == 1)
+ if (hc->ctype == HFC_TYPE_E1)
ch = rq->adr.channel;
else
ch = (rq->adr.channel - 1) + (dch->slot - 2);
@@ -4013,11 +4118,41 @@ open_bchannel(struct hfc_multi *hc, struct dchannel *dch,
static int
channel_dctrl(struct dchannel *dch, struct mISDN_ctrl_req *cq)
{
+ struct hfc_multi *hc = dch->hw;
int ret = 0;
+ int wd_mode, wd_cnt;
switch (cq->op) {
case MISDN_CTRL_GETOP:
- cq->op = 0;
+ cq->op = MISDN_CTRL_HFC_OP;
+ break;
+ case MISDN_CTRL_HFC_WD_INIT: /* init the watchdog */
+ wd_cnt = cq->p1 & 0xf;
+ wd_mode = !!(cq->p1 >> 4);
+ if (debug & DEBUG_HFCMULTI_MSG)
+ printk(KERN_DEBUG "%s: MISDN_CTRL_HFC_WD_INIT mode %s"
+ ", counter 0x%x\n", __func__,
+ wd_mode ? "AUTO" : "MANUAL", wd_cnt);
+ /* set the watchdog timer */
+ HFC_outb(hc, R_TI_WD, poll_timer | (wd_cnt << 4));
+ hc->hw.r_bert_wd_md = (wd_mode ? V_AUTO_WD_RES : 0);
+ if (hc->ctype == HFC_TYPE_XHFC)
+ hc->hw.r_bert_wd_md |= 0x40 /* V_WD_EN */;
+ /* init the watchdog register and reset the counter */
+ HFC_outb(hc, R_BERT_WD_MD, hc->hw.r_bert_wd_md | V_WD_RES);
+ if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
+ /* enable the watchdog output for Speech-Design */
+ HFC_outb(hc, R_GPIO_SEL, V_GPIO_SEL7);
+ HFC_outb(hc, R_GPIO_EN1, V_GPIO_EN15);
+ HFC_outb(hc, R_GPIO_OUT1, 0);
+ HFC_outb(hc, R_GPIO_OUT1, V_GPIO_OUT15);
+ }
+ break;
+ case MISDN_CTRL_HFC_WD_RESET: /* reset the watchdog counter */
+ if (debug & DEBUG_HFCMULTI_MSG)
+ printk(KERN_DEBUG "%s: MISDN_CTRL_HFC_WD_RESET\n",
+ __func__);
+ HFC_outb(hc, R_BERT_WD_MD, hc->hw.r_bert_wd_md | V_WD_RES);
break;
default:
printk(KERN_WARNING "%s: unknown Op %x\n",
@@ -4047,7 +4182,7 @@ hfcm_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
switch (rq->protocol) {
case ISDN_P_TE_S0:
case ISDN_P_NT_S0:
- if (hc->type == 1) {
+ if (hc->ctype == HFC_TYPE_E1) {
err = -EINVAL;
break;
}
@@ -4055,7 +4190,7 @@ hfcm_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
break;
case ISDN_P_TE_E1:
case ISDN_P_NT_E1:
- if (hc->type != 1) {
+ if (hc->ctype != HFC_TYPE_E1) {
err = -EINVAL;
break;
}
@@ -4122,13 +4257,13 @@ init_card(struct hfc_multi *hc)
disable_hwirq(hc);
spin_unlock_irqrestore(&hc->lock, flags);
- if (request_irq(hc->pci_dev->irq, hfcmulti_interrupt, IRQF_SHARED,
+ if (request_irq(hc->irq, hfcmulti_interrupt, IRQF_SHARED,
"HFC-multi", hc)) {
printk(KERN_WARNING "mISDN: Could not get interrupt %d.\n",
- hc->pci_dev->irq);
+ hc->irq);
+ hc->irq = 0;
return -EIO;
}
- hc->irq = hc->pci_dev->irq;
if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
spin_lock_irqsave(&plx_lock, plx_flags);
@@ -4187,7 +4322,7 @@ error:
}
if (debug & DEBUG_HFCMULTI_INIT)
- printk(KERN_WARNING "%s: free irq %d\n", __func__, hc->irq);
+ printk(KERN_DEBUG "%s: free irq %d\n", __func__, hc->irq);
if (hc->irq) {
free_irq(hc->irq, hc);
hc->irq = 0;
@@ -4235,6 +4370,10 @@ setup_pci(struct hfc_multi *hc, struct pci_dev *pdev,
hc->ledstate = 0xAFFEAFFE;
hc->opticalsupport = m->opticalsupport;
+ hc->pci_iobase = 0;
+ hc->pci_membase = NULL;
+ hc->plx_membase = NULL;
+
/* set memory access methods */
if (m->io_mode) /* use mode from card config */
hc->io_mode = m->io_mode;
@@ -4242,44 +4381,12 @@ setup_pci(struct hfc_multi *hc, struct pci_dev *pdev,
case HFC_IO_MODE_PLXSD:
test_and_set_bit(HFC_CHIP_PLXSD, &hc->chip);
hc->slots = 128; /* required */
- /* fall through */
- case HFC_IO_MODE_PCIMEM:
hc->HFC_outb = HFC_outb_pcimem;
hc->HFC_inb = HFC_inb_pcimem;
hc->HFC_inw = HFC_inw_pcimem;
hc->HFC_wait = HFC_wait_pcimem;
hc->read_fifo = read_fifo_pcimem;
hc->write_fifo = write_fifo_pcimem;
- break;
- case HFC_IO_MODE_REGIO:
- hc->HFC_outb = HFC_outb_regio;
- hc->HFC_inb = HFC_inb_regio;
- hc->HFC_inw = HFC_inw_regio;
- hc->HFC_wait = HFC_wait_regio;
- hc->read_fifo = read_fifo_regio;
- hc->write_fifo = write_fifo_regio;
- break;
- default:
- printk(KERN_WARNING "HFC-multi: Invalid IO mode.\n");
- pci_disable_device(hc->pci_dev);
- return -EIO;
- }
- hc->HFC_outb_nodebug = hc->HFC_outb;
- hc->HFC_inb_nodebug = hc->HFC_inb;
- hc->HFC_inw_nodebug = hc->HFC_inw;
- hc->HFC_wait_nodebug = hc->HFC_wait;
-#ifdef HFC_REGISTER_DEBUG
- hc->HFC_outb = HFC_outb_debug;
- hc->HFC_inb = HFC_inb_debug;
- hc->HFC_inw = HFC_inw_debug;
- hc->HFC_wait = HFC_wait_debug;
-#endif
- hc->pci_iobase = 0;
- hc->pci_membase = NULL;
- hc->plx_membase = NULL;
-
- switch (hc->io_mode) {
- case HFC_IO_MODE_PLXSD:
hc->plx_origmembase = hc->pci_dev->resource[0].start;
/* MEMBASE 1 is PLX PCI Bridge */
@@ -4327,6 +4434,12 @@ setup_pci(struct hfc_multi *hc, struct pci_dev *pdev,
pci_write_config_word(hc->pci_dev, PCI_COMMAND, PCI_ENA_MEMIO);
break;
case HFC_IO_MODE_PCIMEM:
+ hc->HFC_outb = HFC_outb_pcimem;
+ hc->HFC_inb = HFC_inb_pcimem;
+ hc->HFC_inw = HFC_inw_pcimem;
+ hc->HFC_wait = HFC_wait_pcimem;
+ hc->read_fifo = read_fifo_pcimem;
+ hc->write_fifo = write_fifo_pcimem;
hc->pci_origmembase = hc->pci_dev->resource[1].start;
if (!hc->pci_origmembase) {
printk(KERN_WARNING
@@ -4343,12 +4456,18 @@ setup_pci(struct hfc_multi *hc, struct pci_dev *pdev,
pci_disable_device(hc->pci_dev);
return -EIO;
}
- printk(KERN_INFO "card %d: defined at MEMBASE %#lx (%#lx) IRQ %d "
- "HZ %d leds-type %d\n", hc->id, (u_long)hc->pci_membase,
+ printk(KERN_INFO "card %d: defined at MEMBASE %#lx (%#lx) IRQ "
+ "%d HZ %d leds-type %d\n", hc->id, (u_long)hc->pci_membase,
hc->pci_origmembase, hc->pci_dev->irq, HZ, hc->leds);
pci_write_config_word(hc->pci_dev, PCI_COMMAND, PCI_ENA_MEMIO);
break;
case HFC_IO_MODE_REGIO:
+ hc->HFC_outb = HFC_outb_regio;
+ hc->HFC_inb = HFC_inb_regio;
+ hc->HFC_inw = HFC_inw_regio;
+ hc->HFC_wait = HFC_wait_regio;
+ hc->read_fifo = read_fifo_regio;
+ hc->write_fifo = write_fifo_regio;
hc->pci_iobase = (u_int) hc->pci_dev->resource[0].start;
if (!hc->pci_iobase) {
printk(KERN_WARNING
@@ -4430,7 +4549,7 @@ release_port(struct hfc_multi *hc, struct dchannel *dch)
dch->timer.function = NULL;
}
- if (hc->type == 1) { /* E1 */
+ if (hc->ctype == HFC_TYPE_E1) { /* E1 */
/* remove sync */
if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
hc->syncronized = 0;
@@ -4508,7 +4627,7 @@ release_card(struct hfc_multi *hc)
int ch;
if (debug & DEBUG_HFCMULTI_INIT)
- printk(KERN_WARNING "%s: release card (%d) entered\n",
+ printk(KERN_DEBUG "%s: release card (%d) entered\n",
__func__, hc->id);
/* unregister clock source */
@@ -4537,7 +4656,7 @@ release_card(struct hfc_multi *hc)
/* release hardware & irq */
if (hc->irq) {
if (debug & DEBUG_HFCMULTI_INIT)
- printk(KERN_WARNING "%s: free irq %d\n",
+ printk(KERN_DEBUG "%s: free irq %d\n",
__func__, hc->irq);
free_irq(hc->irq, hc);
hc->irq = 0;
@@ -4546,17 +4665,17 @@ release_card(struct hfc_multi *hc)
release_io_hfcmulti(hc);
if (debug & DEBUG_HFCMULTI_INIT)
- printk(KERN_WARNING "%s: remove instance from list\n",
+ printk(KERN_DEBUG "%s: remove instance from list\n",
__func__);
list_del(&hc->list);
if (debug & DEBUG_HFCMULTI_INIT)
- printk(KERN_WARNING "%s: delete instance\n", __func__);
+ printk(KERN_DEBUG "%s: delete instance\n", __func__);
if (hc == syncmaster)
syncmaster = NULL;
kfree(hc);
if (debug & DEBUG_HFCMULTI_INIT)
- printk(KERN_WARNING "%s: card successfully removed\n",
+ printk(KERN_DEBUG "%s: card successfully removed\n",
__func__);
}
@@ -4579,7 +4698,7 @@ init_e1_port(struct hfc_multi *hc, struct hm_map *m)
(1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
dch->dev.D.send = handle_dmsg;
dch->dev.D.ctrl = hfcm_dctrl;
- dch->dev.nrbchan = (hc->dslot)?30:31;
+ dch->dev.nrbchan = (hc->dslot) ? 30 : 31;
dch->slot = hc->dslot;
hc->chan[hc->dslot].dch = dch;
hc->chan[hc->dslot].port = 0;
@@ -4821,7 +4940,7 @@ init_multi_port(struct hfc_multi *hc, int pt)
}
/* disable E-channel */
if (port[Port_cnt] & 0x004) {
- if (debug & DEBUG_HFCMULTI_INIT)
+ if (debug & DEBUG_HFCMULTI_INIT)
printk(KERN_DEBUG
"%s: PROTOCOL disable E-channel: "
"card(%d) port(%d)\n",
@@ -4829,9 +4948,15 @@ init_multi_port(struct hfc_multi *hc, int pt)
test_and_set_bit(HFC_CFG_DIS_ECHANNEL,
&hc->chan[i + 2].cfg);
}
- snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-%ds.%d-%d",
- hc->type, HFC_cnt + 1, pt + 1);
- ret = mISDN_register_device(&dch->dev, &hc->pci_dev->dev, name);
+ if (hc->ctype == HFC_TYPE_XHFC) {
+ snprintf(name, MISDN_MAX_IDLEN - 1, "xhfc.%d-%d",
+ HFC_cnt + 1, pt + 1);
+ ret = mISDN_register_device(&dch->dev, NULL, name);
+ } else {
+ snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-%ds.%d-%d",
+ hc->ctype, HFC_cnt + 1, pt + 1);
+ ret = mISDN_register_device(&dch->dev, &hc->pci_dev->dev, name);
+ }
if (ret)
goto free_chan;
hc->created[pt] = 1;
@@ -4842,9 +4967,9 @@ free_chan:
}
static int
-hfcmulti_init(struct pci_dev *pdev, const struct pci_device_id *ent)
+hfcmulti_init(struct hm_map *m, struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
- struct hm_map *m = (struct hm_map *)ent->driver_data;
int ret_err = 0;
int pt;
struct hfc_multi *hc;
@@ -4879,16 +5004,18 @@ hfcmulti_init(struct pci_dev *pdev, const struct pci_device_id *ent)
}
spin_lock_init(&hc->lock);
hc->mtyp = m;
- hc->type = m->type;
+ hc->ctype = m->type;
hc->ports = m->ports;
hc->id = HFC_cnt;
hc->pcm = pcm[HFC_cnt];
hc->io_mode = iomode[HFC_cnt];
- if (dslot[HFC_cnt] < 0 && hc->type == 1) {
+ if (dslot[HFC_cnt] < 0 && hc->ctype == HFC_TYPE_E1) {
hc->dslot = 0;
printk(KERN_INFO "HFC-E1 card has disabled D-channel, but "
"31 B-channels\n");
- } if (dslot[HFC_cnt] > 0 && dslot[HFC_cnt] < 32 && hc->type == 1) {
+ }
+ if (dslot[HFC_cnt] > 0 && dslot[HFC_cnt] < 32
+ && hc->ctype == HFC_TYPE_E1) {
hc->dslot = dslot[HFC_cnt];
printk(KERN_INFO "HFC-E1 card has alternating D-channel on "
"time slot %d\n", dslot[HFC_cnt]);
@@ -4910,8 +5037,11 @@ hfcmulti_init(struct pci_dev *pdev, const struct pci_device_id *ent)
for (i = 0; i < (poll >> 1); i++)
hc->silence_data[i] = hc->silence;
- if (!(type[HFC_cnt] & 0x200))
- test_and_set_bit(HFC_CHIP_DTMF, &hc->chip);
+ if (hc->ctype != HFC_TYPE_XHFC) {
+ if (!(type[HFC_cnt] & 0x200))
+ test_and_set_bit(HFC_CHIP_DTMF, &hc->chip);
+ test_and_set_bit(HFC_CHIP_CONF, &hc->chip);
+ }
if (type[HFC_cnt] & 0x800)
test_and_set_bit(HFC_CHIP_PCM_SLAVE, &hc->chip);
@@ -4935,8 +5065,18 @@ hfcmulti_init(struct pci_dev *pdev, const struct pci_device_id *ent)
printk(KERN_NOTICE "Watchdog enabled\n");
}
- /* setup pci, hc->slots may change due to PLXSD */
- ret_err = setup_pci(hc, pdev, ent);
+ if (pdev && ent)
+ /* setup pci, hc->slots may change due to PLXSD */
+ ret_err = setup_pci(hc, pdev, ent);
+ else
+#ifdef CONFIG_MISDN_HFCMULTI_8xx
+ ret_err = setup_embedded(hc, m);
+#else
+ {
+ printk(KERN_WARNING "Embedded IO Mode not selected\n");
+ ret_err = -EIO;
+ }
+#endif
if (ret_err) {
if (hc == syncmaster)
syncmaster = NULL;
@@ -4944,7 +5084,17 @@ hfcmulti_init(struct pci_dev *pdev, const struct pci_device_id *ent)
return ret_err;
}
- /* crate channels */
+ hc->HFC_outb_nodebug = hc->HFC_outb;
+ hc->HFC_inb_nodebug = hc->HFC_inb;
+ hc->HFC_inw_nodebug = hc->HFC_inw;
+ hc->HFC_wait_nodebug = hc->HFC_wait;
+#ifdef HFC_REGISTER_DEBUG
+ hc->HFC_outb = HFC_outb_debug;
+ hc->HFC_inb = HFC_inb_debug;
+ hc->HFC_inw = HFC_inw_debug;
+ hc->HFC_wait = HFC_wait_debug;
+#endif
+ /* create channels */
for (pt = 0; pt < hc->ports; pt++) {
if (Port_cnt >= MAX_PORTS) {
printk(KERN_ERR "too many ports (max=%d).\n",
@@ -4952,7 +5102,7 @@ hfcmulti_init(struct pci_dev *pdev, const struct pci_device_id *ent)
ret_err = -EINVAL;
goto free_card;
}
- if (hc->type == 1)
+ if (hc->ctype == HFC_TYPE_E1)
ret_err = init_e1_port(hc, m);
else
ret_err = init_multi_port(hc, pt);
@@ -5036,6 +5186,7 @@ hfcmulti_init(struct pci_dev *pdev, const struct pci_device_id *ent)
hc->iclock = mISDN_register_clock("HFCMulti", 0, clockctl, hc);
/* initialize hardware */
+ hc->irq = (m->irq) ? : hc->pci_dev->irq;
ret_err = init_card(hc);
if (ret_err) {
printk(KERN_ERR "init card returns %d\n", ret_err);
@@ -5074,7 +5225,7 @@ static void __devexit hfc_remove_pci(struct pci_dev *pdev)
spin_unlock_irqrestore(&HFClock, flags);
} else {
if (debug)
- printk(KERN_WARNING "%s: drvdata allready removed\n",
+ printk(KERN_DEBUG "%s: drvdata allready removed\n",
__func__);
}
}
@@ -5086,45 +5237,48 @@ static void __devexit hfc_remove_pci(struct pci_dev *pdev)
#define VENDOR_PRIM "PrimuX"
static const struct hm_map hfcm_map[] = {
-/*0*/ {VENDOR_BN, "HFC-1S Card (mini PCI)", 4, 1, 1, 3, 0, DIP_4S, 0},
-/*1*/ {VENDOR_BN, "HFC-2S Card", 4, 2, 1, 3, 0, DIP_4S, 0},
-/*2*/ {VENDOR_BN, "HFC-2S Card (mini PCI)", 4, 2, 1, 3, 0, DIP_4S, 0},
-/*3*/ {VENDOR_BN, "HFC-4S Card", 4, 4, 1, 2, 0, DIP_4S, 0},
-/*4*/ {VENDOR_BN, "HFC-4S Card (mini PCI)", 4, 4, 1, 2, 0, 0, 0},
-/*5*/ {VENDOR_CCD, "HFC-4S Eval (old)", 4, 4, 0, 0, 0, 0, 0},
-/*6*/ {VENDOR_CCD, "HFC-4S IOB4ST", 4, 4, 1, 2, 0, DIP_4S, 0},
-/*7*/ {VENDOR_CCD, "HFC-4S", 4, 4, 1, 2, 0, 0, 0},
-/*8*/ {VENDOR_DIG, "HFC-4S Card", 4, 4, 0, 2, 0, 0, HFC_IO_MODE_REGIO},
-/*9*/ {VENDOR_CCD, "HFC-4S Swyx 4xS0 SX2 QuadBri", 4, 4, 1, 2, 0, 0, 0},
-/*10*/ {VENDOR_JH, "HFC-4S (junghanns 2.0)", 4, 4, 1, 2, 0, 0, 0},
-/*11*/ {VENDOR_PRIM, "HFC-2S Primux Card", 4, 2, 0, 0, 0, 0, 0},
-
-/*12*/ {VENDOR_BN, "HFC-8S Card", 8, 8, 1, 0, 0, 0, 0},
+/*0*/ {VENDOR_BN, "HFC-1S Card (mini PCI)", 4, 1, 1, 3, 0, DIP_4S, 0, 0},
+/*1*/ {VENDOR_BN, "HFC-2S Card", 4, 2, 1, 3, 0, DIP_4S, 0, 0},
+/*2*/ {VENDOR_BN, "HFC-2S Card (mini PCI)", 4, 2, 1, 3, 0, DIP_4S, 0, 0},
+/*3*/ {VENDOR_BN, "HFC-4S Card", 4, 4, 1, 2, 0, DIP_4S, 0, 0},
+/*4*/ {VENDOR_BN, "HFC-4S Card (mini PCI)", 4, 4, 1, 2, 0, 0, 0, 0},
+/*5*/ {VENDOR_CCD, "HFC-4S Eval (old)", 4, 4, 0, 0, 0, 0, 0, 0},
+/*6*/ {VENDOR_CCD, "HFC-4S IOB4ST", 4, 4, 1, 2, 0, DIP_4S, 0, 0},
+/*7*/ {VENDOR_CCD, "HFC-4S", 4, 4, 1, 2, 0, 0, 0, 0},
+/*8*/ {VENDOR_DIG, "HFC-4S Card", 4, 4, 0, 2, 0, 0, HFC_IO_MODE_REGIO, 0},
+/*9*/ {VENDOR_CCD, "HFC-4S Swyx 4xS0 SX2 QuadBri", 4, 4, 1, 2, 0, 0, 0, 0},
+/*10*/ {VENDOR_JH, "HFC-4S (junghanns 2.0)", 4, 4, 1, 2, 0, 0, 0, 0},
+/*11*/ {VENDOR_PRIM, "HFC-2S Primux Card", 4, 2, 0, 0, 0, 0, 0, 0},
+
+/*12*/ {VENDOR_BN, "HFC-8S Card", 8, 8, 1, 0, 0, 0, 0, 0},
/*13*/ {VENDOR_BN, "HFC-8S Card (+)", 8, 8, 1, 8, 0, DIP_8S,
- HFC_IO_MODE_REGIO},
-/*14*/ {VENDOR_CCD, "HFC-8S Eval (old)", 8, 8, 0, 0, 0, 0, 0},
-/*15*/ {VENDOR_CCD, "HFC-8S IOB4ST Recording", 8, 8, 1, 0, 0, 0, 0},
+ HFC_IO_MODE_REGIO, 0},
+/*14*/ {VENDOR_CCD, "HFC-8S Eval (old)", 8, 8, 0, 0, 0, 0, 0, 0},
+/*15*/ {VENDOR_CCD, "HFC-8S IOB4ST Recording", 8, 8, 1, 0, 0, 0, 0, 0},
-/*16*/ {VENDOR_CCD, "HFC-8S IOB8ST", 8, 8, 1, 0, 0, 0, 0},
-/*17*/ {VENDOR_CCD, "HFC-8S", 8, 8, 1, 0, 0, 0, 0},
-/*18*/ {VENDOR_CCD, "HFC-8S", 8, 8, 1, 0, 0, 0, 0},
+/*16*/ {VENDOR_CCD, "HFC-8S IOB8ST", 8, 8, 1, 0, 0, 0, 0, 0},
+/*17*/ {VENDOR_CCD, "HFC-8S", 8, 8, 1, 0, 0, 0, 0, 0},
+/*18*/ {VENDOR_CCD, "HFC-8S", 8, 8, 1, 0, 0, 0, 0, 0},
-/*19*/ {VENDOR_BN, "HFC-E1 Card", 1, 1, 0, 1, 0, DIP_E1, 0},
-/*20*/ {VENDOR_BN, "HFC-E1 Card (mini PCI)", 1, 1, 0, 1, 0, 0, 0},
-/*21*/ {VENDOR_BN, "HFC-E1+ Card (Dual)", 1, 1, 0, 1, 0, DIP_E1, 0},
-/*22*/ {VENDOR_BN, "HFC-E1 Card (Dual)", 1, 1, 0, 1, 0, DIP_E1, 0},
+/*19*/ {VENDOR_BN, "HFC-E1 Card", 1, 1, 0, 1, 0, DIP_E1, 0, 0},
+/*20*/ {VENDOR_BN, "HFC-E1 Card (mini PCI)", 1, 1, 0, 1, 0, 0, 0, 0},
+/*21*/ {VENDOR_BN, "HFC-E1+ Card (Dual)", 1, 1, 0, 1, 0, DIP_E1, 0, 0},
+/*22*/ {VENDOR_BN, "HFC-E1 Card (Dual)", 1, 1, 0, 1, 0, DIP_E1, 0, 0},
-/*23*/ {VENDOR_CCD, "HFC-E1 Eval (old)", 1, 1, 0, 0, 0, 0, 0},
-/*24*/ {VENDOR_CCD, "HFC-E1 IOB1E1", 1, 1, 0, 1, 0, 0, 0},
-/*25*/ {VENDOR_CCD, "HFC-E1", 1, 1, 0, 1, 0, 0, 0},
+/*23*/ {VENDOR_CCD, "HFC-E1 Eval (old)", 1, 1, 0, 0, 0, 0, 0, 0},
+/*24*/ {VENDOR_CCD, "HFC-E1 IOB1E1", 1, 1, 0, 1, 0, 0, 0, 0},
+/*25*/ {VENDOR_CCD, "HFC-E1", 1, 1, 0, 1, 0, 0, 0, 0},
/*26*/ {VENDOR_CCD, "HFC-4S Speech Design", 4, 4, 0, 0, 0, 0,
- HFC_IO_MODE_PLXSD},
+ HFC_IO_MODE_PLXSD, 0},
/*27*/ {VENDOR_CCD, "HFC-E1 Speech Design", 1, 1, 0, 0, 0, 0,
- HFC_IO_MODE_PLXSD},
-/*28*/ {VENDOR_CCD, "HFC-4S OpenVox", 4, 4, 1, 0, 0, 0, 0},
-/*29*/ {VENDOR_CCD, "HFC-2S OpenVox", 4, 2, 1, 0, 0, 0, 0},
-/*30*/ {VENDOR_CCD, "HFC-8S OpenVox", 8, 8, 1, 0, 0, 0, 0},
+ HFC_IO_MODE_PLXSD, 0},
+/*28*/ {VENDOR_CCD, "HFC-4S OpenVox", 4, 4, 1, 0, 0, 0, 0, 0},
+/*29*/ {VENDOR_CCD, "HFC-2S OpenVox", 4, 2, 1, 0, 0, 0, 0, 0},
+/*30*/ {VENDOR_CCD, "HFC-8S OpenVox", 8, 8, 1, 0, 0, 0, 0, 0},
+/*31*/ {VENDOR_CCD, "XHFC-4S Speech Design", 5, 4, 0, 0, 0, 0,
+ HFC_IO_MODE_EMBSD, XHFC_IRQ},
+/*32*/ {VENDOR_JH, "HFC-8S (junghanns)", 8, 8, 1, 0, 0, 0, 0, 0},
};
#undef H
@@ -5178,6 +5332,8 @@ static struct pci_device_id hfmultipci_ids[] __devinitdata = {
PCI_SUBDEVICE_ID_CCD_HFC8S, 0, 0, H(18)}, /* 8S */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
PCI_SUBDEVICE_ID_CCD_OV8S, 0, 0, H(30)}, /* OpenVox 8 */
+ { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
+ PCI_SUBDEVICE_ID_CCD_JH8S, 0, 0, H(32)}, /* Junganns 8S */
/* Cards with HFC-E1 Chip */
@@ -5201,6 +5357,10 @@ static struct pci_device_id hfmultipci_ids[] __devinitdata = {
PCI_SUBDEVICE_ID_CCD_SPD4S, 0, 0, H(26)}, /* PLX PCI Bridge */
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_CCD,
PCI_SUBDEVICE_ID_CCD_SPDE1, 0, 0, H(27)}, /* PLX PCI Bridge */
+
+ { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFCE1, PCI_VENDOR_ID_CCD,
+ PCI_SUBDEVICE_ID_CCD_JHSE1, 0, 0, H(25)}, /* Junghanns E1 */
+
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_ANY_ID, PCI_ANY_ID,
0, 0, 0},
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_ANY_ID, PCI_ANY_ID,
@@ -5231,7 +5391,7 @@ hfcmulti_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
"Please contact the driver maintainer for support.\n");
return -ENODEV;
}
- ret = hfcmulti_init(pdev, ent);
+ ret = hfcmulti_init(m, pdev, ent);
if (ret)
return ret;
HFC_cnt++;
@@ -5261,6 +5421,8 @@ static int __init
HFCmulti_init(void)
{
int err;
+ int i, xhfc = 0;
+ struct hm_map m;
printk(KERN_INFO "mISDN: HFC-multi driver %s\n", HFC_MULTI_VERSION);
@@ -5308,11 +5470,43 @@ HFCmulti_init(void)
if (!clock)
clock = 1;
+ /* Register the embedded devices.
+ * This should be done before the PCI cards registration */
+ switch (hwid) {
+ case HWID_MINIP4:
+ xhfc = 1;
+ m = hfcm_map[31];
+ break;
+ case HWID_MINIP8:
+ xhfc = 2;
+ m = hfcm_map[31];
+ break;
+ case HWID_MINIP16:
+ xhfc = 4;
+ m = hfcm_map[31];
+ break;
+ default:
+ xhfc = 0;
+ }
+
+ for (i = 0; i < xhfc; ++i) {
+ err = hfcmulti_init(&m, NULL, NULL);
+ if (err) {
+ printk(KERN_ERR "error registering embedded driver: "
+ "%x\n", err);
+ return -err;
+ }
+ HFC_cnt++;
+ printk(KERN_INFO "%d devices registered\n", HFC_cnt);
+ }
+
+ /* Register the PCI cards */
err = pci_register_driver(&hfcmultipci_driver);
if (err < 0) {
printk(KERN_ERR "error registering pci driver: %x\n", err);
return err;
}
+
return 0;
}
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 641a9cd1a53..228ffbed128 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -257,7 +257,7 @@ reset_hfcpci(struct hfc_pci *hc)
Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
/* Clear already pending ints */
- if (Read_hfc(hc, HFCPCI_INT_S1));
+ val = Read_hfc(hc, HFCPCI_INT_S1);
/* set NT/TE mode */
hfcpci_setmode(hc);
@@ -452,7 +452,7 @@ hfcpci_empty_bfifo(struct bchannel *bch, struct bzfifo *bz,
}
bz->za[new_f2].z2 = cpu_to_le16(new_z2);
bz->f2 = new_f2; /* next buffer */
- recv_Bchannel(bch);
+ recv_Bchannel(bch, MISDN_ID_ANY);
}
}
@@ -499,7 +499,8 @@ receive_dmsg(struct hfc_pci *hc)
df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) |
(MAX_D_FRAMES + 1); /* next buffer */
df->za[df->f2 & D_FREG_MASK].z2 =
- cpu_to_le16((le16_to_cpu(zp->z2) + rcnt) & (D_FIFO_SIZE - 1));
+ cpu_to_le16((le16_to_cpu(zp->z2) + rcnt) &
+ (D_FIFO_SIZE - 1));
} else {
dch->rx_skb = mI_alloc_skb(rcnt - 3, GFP_ATOMIC);
if (!dch->rx_skb) {
@@ -541,35 +542,45 @@ receive_dmsg(struct hfc_pci *hc)
* check for transparent receive data and read max one 'poll' size if avail
*/
static void
-hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *bz, u_char *bdata)
+hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *rxbz,
+ struct bzfifo *txbz, u_char *bdata)
{
- __le16 *z1r, *z2r;
- int new_z2, fcnt, maxlen;
- u_char *ptr, *ptr1;
+ __le16 *z1r, *z2r, *z1t, *z2t;
+ int new_z2, fcnt_rx, fcnt_tx, maxlen;
+ u_char *ptr, *ptr1;
- z1r = &bz->za[MAX_B_FRAMES].z1; /* pointer to z reg */
+ z1r = &rxbz->za[MAX_B_FRAMES].z1; /* pointer to z reg */
z2r = z1r + 1;
+ z1t = &txbz->za[MAX_B_FRAMES].z1;
+ z2t = z1t + 1;
- fcnt = le16_to_cpu(*z1r) - le16_to_cpu(*z2r);
- if (!fcnt)
+ fcnt_rx = le16_to_cpu(*z1r) - le16_to_cpu(*z2r);
+ if (!fcnt_rx)
return; /* no data avail */
- if (fcnt <= 0)
- fcnt += B_FIFO_SIZE; /* bytes actually buffered */
- new_z2 = le16_to_cpu(*z2r) + fcnt; /* new position in fifo */
+ if (fcnt_rx <= 0)
+ fcnt_rx += B_FIFO_SIZE; /* bytes actually buffered */
+ new_z2 = le16_to_cpu(*z2r) + fcnt_rx; /* new position in fifo */
if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
new_z2 -= B_FIFO_SIZE; /* buffer wrap */
- if (fcnt > MAX_DATA_SIZE) { /* flush, if oversized */
+ if (fcnt_rx > MAX_DATA_SIZE) { /* flush, if oversized */
*z2r = cpu_to_le16(new_z2); /* new position */
return;
}
- bch->rx_skb = mI_alloc_skb(fcnt, GFP_ATOMIC);
+ fcnt_tx = le16_to_cpu(*z2t) - le16_to_cpu(*z1t);
+ if (fcnt_tx <= 0)
+ fcnt_tx += B_FIFO_SIZE;
+ /* fcnt_tx contains available bytes in tx-fifo */
+ fcnt_tx = B_FIFO_SIZE - fcnt_tx;
+ /* remaining bytes to send (bytes in tx-fifo) */
+
+ bch->rx_skb = mI_alloc_skb(fcnt_rx, GFP_ATOMIC);
if (bch->rx_skb) {
- ptr = skb_put(bch->rx_skb, fcnt);
- if (le16_to_cpu(*z2r) + fcnt <= B_FIFO_SIZE + B_SUB_VAL)
- maxlen = fcnt; /* complete transfer */
+ ptr = skb_put(bch->rx_skb, fcnt_rx);
+ if (le16_to_cpu(*z2r) + fcnt_rx <= B_FIFO_SIZE + B_SUB_VAL)
+ maxlen = fcnt_rx; /* complete transfer */
else
maxlen = B_FIFO_SIZE + B_SUB_VAL - le16_to_cpu(*z2r);
/* maximum */
@@ -577,14 +588,14 @@ hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *bz, u_char *bdata)
ptr1 = bdata + (le16_to_cpu(*z2r) - B_SUB_VAL);
/* start of data */
memcpy(ptr, ptr1, maxlen); /* copy data */
- fcnt -= maxlen;
+ fcnt_rx -= maxlen;
- if (fcnt) { /* rest remaining */
+ if (fcnt_rx) { /* rest remaining */
ptr += maxlen;
ptr1 = bdata; /* start of buffer */
- memcpy(ptr, ptr1, fcnt); /* rest */
+ memcpy(ptr, ptr1, fcnt_rx); /* rest */
}
- recv_Bchannel(bch);
+ recv_Bchannel(bch, fcnt_tx); /* bch, id */
} else
printk(KERN_WARNING "HFCPCI: receive out of memory\n");
@@ -600,26 +611,28 @@ main_rec_hfcpci(struct bchannel *bch)
struct hfc_pci *hc = bch->hw;
int rcnt, real_fifo;
int receive = 0, count = 5;
- struct bzfifo *bz;
+ struct bzfifo *txbz, *rxbz;
u_char *bdata;
struct zt *zp;
if ((bch->nr & 2) && (!hc->hw.bswapped)) {
- bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b2;
+ rxbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b2;
+ txbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2;
bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.rxdat_b2;
real_fifo = 1;
} else {
- bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b1;
+ rxbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b1;
+ txbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1;
bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.rxdat_b1;
real_fifo = 0;
}
Begin:
count--;
- if (bz->f1 != bz->f2) {
+ if (rxbz->f1 != rxbz->f2) {
if (bch->debug & DEBUG_HW_BCHANNEL)
printk(KERN_DEBUG "hfcpci rec ch(%x) f1(%d) f2(%d)\n",
- bch->nr, bz->f1, bz->f2);
- zp = &bz->za[bz->f2];
+ bch->nr, rxbz->f1, rxbz->f2);
+ zp = &rxbz->za[rxbz->f2];
rcnt = le16_to_cpu(zp->z1) - le16_to_cpu(zp->z2);
if (rcnt < 0)
@@ -630,8 +643,8 @@ Begin:
"hfcpci rec ch(%x) z1(%x) z2(%x) cnt(%d)\n",
bch->nr, le16_to_cpu(zp->z1),
le16_to_cpu(zp->z2), rcnt);
- hfcpci_empty_bfifo(bch, bz, bdata, rcnt);
- rcnt = bz->f1 - bz->f2;
+ hfcpci_empty_bfifo(bch, rxbz, bdata, rcnt);
+ rcnt = rxbz->f1 - rxbz->f2;
if (rcnt < 0)
rcnt += MAX_B_FRAMES + 1;
if (hc->hw.last_bfifo_cnt[real_fifo] > rcnt + 1) {
@@ -644,7 +657,7 @@ Begin:
else
receive = 0;
} else if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
- hfcpci_empty_fifo_trans(bch, bz, bdata);
+ hfcpci_empty_fifo_trans(bch, rxbz, txbz, bdata);
return;
} else
receive = 0;
@@ -954,6 +967,7 @@ static void
ph_state_nt(struct dchannel *dch)
{
struct hfc_pci *hc = dch->hw;
+ u_char val;
if (dch->debug)
printk(KERN_DEBUG "%s: NT newstate %x\n",
@@ -967,7 +981,7 @@ ph_state_nt(struct dchannel *dch)
hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
/* Clear already pending ints */
- if (Read_hfc(hc, HFCPCI_INT_S1));
+ val = Read_hfc(hc, HFCPCI_INT_S1);
Write_hfc(hc, HFCPCI_STATES, 4 | HFCPCI_LOAD_STATE);
udelay(10);
Write_hfc(hc, HFCPCI_STATES, 4);
@@ -1256,8 +1270,7 @@ mode_hfcpci(struct bchannel *bch, int bc, int protocol)
rx_slot = (bc>>8) & 0xff;
tx_slot = (bc>>16) & 0xff;
bc = bc & 0xff;
- } else if (test_bit(HFC_CFG_PCM, &hc->cfg) &&
- (protocol > ISDN_P_NONE))
+ } else if (test_bit(HFC_CFG_PCM, &hc->cfg) && (protocol > ISDN_P_NONE))
printk(KERN_WARNING "%s: no pcm channel id but HFC_CFG_PCM\n",
__func__);
if (hc->chanlimit > 1) {
@@ -1315,8 +1328,8 @@ mode_hfcpci(struct bchannel *bch, int bc, int protocol)
case (ISDN_P_B_RAW):
bch->state = protocol;
bch->nr = bc;
- hfcpci_clear_fifo_rx(hc, (fifo2 & 2)?1:0);
- hfcpci_clear_fifo_tx(hc, (fifo2 & 2)?1:0);
+ hfcpci_clear_fifo_rx(hc, (fifo2 & 2) ? 1 : 0);
+ hfcpci_clear_fifo_tx(hc, (fifo2 & 2) ? 1 : 0);
if (bc & 2) {
hc->hw.sctrl |= SCTRL_B2_ENA;
hc->hw.sctrl_r |= SCTRL_B2_ENA;
@@ -1350,8 +1363,8 @@ mode_hfcpci(struct bchannel *bch, int bc, int protocol)
case (ISDN_P_B_HDLC):
bch->state = protocol;
bch->nr = bc;
- hfcpci_clear_fifo_rx(hc, (fifo2 & 2)?1:0);
- hfcpci_clear_fifo_tx(hc, (fifo2 & 2)?1:0);
+ hfcpci_clear_fifo_rx(hc, (fifo2 & 2) ? 1 : 0);
+ hfcpci_clear_fifo_tx(hc, (fifo2 & 2) ? 1 : 0);
if (bc & 2) {
hc->hw.sctrl |= SCTRL_B2_ENA;
hc->hw.sctrl_r |= SCTRL_B2_ENA;
@@ -1445,7 +1458,7 @@ set_hfcpci_rxtest(struct bchannel *bch, int protocol, int chan)
switch (protocol) {
case (ISDN_P_B_RAW):
bch->state = protocol;
- hfcpci_clear_fifo_rx(hc, (chan & 2)?1:0);
+ hfcpci_clear_fifo_rx(hc, (chan & 2) ? 1 : 0);
if (chan & 2) {
hc->hw.sctrl_r |= SCTRL_B2_ENA;
hc->hw.fifo_en |= HFCPCI_FIFOEN_B2RX;
@@ -1470,7 +1483,7 @@ set_hfcpci_rxtest(struct bchannel *bch, int protocol, int chan)
break;
case (ISDN_P_B_HDLC):
bch->state = protocol;
- hfcpci_clear_fifo_rx(hc, (chan & 2)?1:0);
+ hfcpci_clear_fifo_rx(hc, (chan & 2) ? 1 : 0);
if (chan & 2) {
hc->hw.sctrl_r |= SCTRL_B2_ENA;
hc->hw.last_bfifo_cnt[1] = 0;
@@ -1793,10 +1806,9 @@ init_card(struct hfc_pci *hc)
printk(KERN_WARNING
"HFC PCI: IRQ(%d) getting no interrupts "
"during init %d\n", hc->irq, 4 - cnt);
- if (cnt == 1) {
- spin_unlock_irqrestore(&hc->lock, flags);
- return -EIO;
- } else {
+ if (cnt == 1)
+ break;
+ else {
reset_hfcpci(hc);
cnt--;
}
@@ -2035,7 +2047,8 @@ setup_hw(struct hfc_pci *hc)
printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
return 1;
}
- hc->hw.pci_io = (char __iomem *)(unsigned long)hc->pdev->resource[1].start;
+ hc->hw.pci_io =
+ (char __iomem *)(unsigned long)hc->pdev->resource[1].start;
if (!hc->hw.pci_io) {
printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
@@ -2277,7 +2290,7 @@ hfc_remove_pci(struct pci_dev *pdev)
release_card(card);
else
if (debug)
- printk(KERN_WARNING "%s: drvdata already removed\n",
+ printk(KERN_DEBUG "%s: drvdata already removed\n",
__func__);
}
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 9c427fb204e..6b7704c41b9 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -947,7 +947,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
if (fifo->dch)
recv_Dchannel(fifo->dch);
if (fifo->bch)
- recv_Bchannel(fifo->bch);
+ recv_Bchannel(fifo->bch, MISDN_ID_ANY);
if (fifo->ech)
recv_Echannel(fifo->ech,
&hw->dch);
@@ -969,7 +969,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
} else {
/* deliver transparent data to layer2 */
if (rx_skb->len >= poll)
- recv_Bchannel(fifo->bch);
+ recv_Bchannel(fifo->bch, MISDN_ID_ANY);
}
spin_unlock(&hw->lock);
}
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index f1265667b06..3d337d924c2 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -82,8 +82,9 @@ release_io_hfcpci(struct IsdnCardState *cs)
Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, 0); /* disable memory mapped ports + busmaster */
del_timer(&cs->hw.hfcpci.timer);
- kfree(cs->hw.hfcpci.share_start);
- cs->hw.hfcpci.share_start = NULL;
+ pci_free_consistent(cs->hw.hfcpci.dev, 0x8000,
+ cs->hw.hfcpci.fifos, cs->hw.hfcpci.dma);
+ cs->hw.hfcpci.fifos = NULL;
iounmap((void *)cs->hw.hfcpci.pci_io);
}
@@ -1663,8 +1664,19 @@ setup_hfcpci(struct IsdnCard *card)
dev_hfcpci);
i++;
if (tmp_hfcpci) {
+ dma_addr_t dma_mask = DMA_BIT_MASK(32) & ~0x7fffUL;
if (pci_enable_device(tmp_hfcpci))
continue;
+ if (pci_set_dma_mask(tmp_hfcpci, dma_mask)) {
+ printk(KERN_WARNING
+ "HiSax hfc_pci: No suitable DMA available.\n");
+ continue;
+ }
+ if (pci_set_consistent_dma_mask(tmp_hfcpci, dma_mask)) {
+ printk(KERN_WARNING
+ "HiSax hfc_pci: No suitable consistent DMA available.\n");
+ continue;
+ }
pci_set_master(tmp_hfcpci);
if ((card->para[0]) && (card->para[0] != (tmp_hfcpci->resource[ 0].start & PCI_BASE_ADDRESS_IO_MASK)))
continue;
@@ -1693,22 +1705,29 @@ setup_hfcpci(struct IsdnCard *card)
printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
return (0);
}
+
/* Allocate memory for FIFOS */
- /* Because the HFC-PCI needs a 32K physical alignment, we */
- /* need to allocate the double mem and align the address */
- if (!(cs->hw.hfcpci.share_start = kmalloc(65536, GFP_KERNEL))) {
- printk(KERN_WARNING "HFC-PCI: Error allocating memory for FIFO!\n");
+ cs->hw.hfcpci.fifos = pci_alloc_consistent(cs->hw.hfcpci.dev,
+ 0x8000, &cs->hw.hfcpci.dma);
+ if (!cs->hw.hfcpci.fifos) {
+ printk(KERN_WARNING "HFC-PCI: Error allocating FIFO memory!\n");
+ return 0;
+ }
+ if (cs->hw.hfcpci.dma & 0x7fff) {
+ printk(KERN_WARNING
+ "HFC-PCI: Error DMA memory not on 32K boundary (%lx)\n",
+ (u_long)cs->hw.hfcpci.dma);
+ pci_free_consistent(cs->hw.hfcpci.dev, 0x8000,
+ cs->hw.hfcpci.fifos, cs->hw.hfcpci.dma);
return 0;
}
- cs->hw.hfcpci.fifos = (void *)
- (((ulong) cs->hw.hfcpci.share_start) & ~0x7FFF) + 0x8000;
- pci_write_config_dword(cs->hw.hfcpci.dev, 0x80, (u_int) virt_to_bus(cs->hw.hfcpci.fifos));
+ pci_write_config_dword(cs->hw.hfcpci.dev, 0x80, (u32)cs->hw.hfcpci.dma);
cs->hw.hfcpci.pci_io = ioremap((ulong) cs->hw.hfcpci.pci_io, 256);
printk(KERN_INFO
- "HFC-PCI: defined at mem %p fifo %p(%#x) IRQ %d HZ %d\n",
+ "HFC-PCI: defined at mem %p fifo %p(%lx) IRQ %d HZ %d\n",
cs->hw.hfcpci.pci_io,
cs->hw.hfcpci.fifos,
- (u_int) virt_to_bus(cs->hw.hfcpci.fifos),
+ (u_long)cs->hw.hfcpci.dma,
cs->irq, HZ);
spin_lock_irqsave(&cs->lock, flags);
diff --git a/drivers/isdn/hisax/hisax.h b/drivers/isdn/hisax/hisax.h
index f8527046f19..0685c194696 100644
--- a/drivers/isdn/hisax/hisax.h
+++ b/drivers/isdn/hisax/hisax.h
@@ -703,7 +703,7 @@ struct hfcPCI_hw {
int nt_timer;
struct pci_dev *dev;
unsigned char *pci_io; /* start of PCI IO memory */
- void *share_start; /* shared memory for Fifos start */
+ dma_addr_t dma; /* dma handle for Fifos */
void *fifos; /* FIFO memory */
int last_bfifo_cnt[2]; /* marker saving last b-fifo frame count */
struct timer_list timer;
diff --git a/drivers/isdn/hysdn/hycapi.c b/drivers/isdn/hysdn/hycapi.c
index 53f6ad1235d..4ffaa14b9fc 100644
--- a/drivers/isdn/hysdn/hycapi.c
+++ b/drivers/isdn/hysdn/hycapi.c
@@ -67,7 +67,7 @@ hycapi_reset_ctr(struct capi_ctr *ctrl)
printk(KERN_NOTICE "HYCAPI hycapi_reset_ctr\n");
#endif
capilib_release(&cinfo->ncci_head);
- capi_ctr_reseted(ctrl);
+ capi_ctr_down(ctrl);
}
/******************************
@@ -347,7 +347,7 @@ int hycapi_capi_stop(hysdn_card *card)
if(cinfo) {
ctrl = &cinfo->capi_ctrl;
/* ctrl->suspend_output(ctrl); */
- capi_ctr_reseted(ctrl);
+ capi_ctr_down(ctrl);
}
return 0;
}
diff --git a/drivers/isdn/i4l/Kconfig b/drivers/isdn/i4l/Kconfig
index 36778b270c3..ed3510f273d 100644
--- a/drivers/isdn/i4l/Kconfig
+++ b/drivers/isdn/i4l/Kconfig
@@ -135,5 +135,3 @@ source "drivers/isdn/act2000/Kconfig"
source "drivers/isdn/hysdn/Kconfig"
endmenu
-
-source "drivers/isdn/gigaset/Kconfig"
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index cb8943da4f1..34d54e7281f 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -1069,7 +1069,7 @@ isdn_net_xmit(struct net_device *ndev, struct sk_buff *skb)
lp = isdn_net_get_locked_lp(nd);
if (!lp) {
printk(KERN_WARNING "%s: all channels busy - requeuing!\n", ndev->name);
- return 1;
+ return NETDEV_TX_BUSY;
}
/* we have our lp locked from now on */
@@ -1273,14 +1273,14 @@ isdn_net_start_xmit(struct sk_buff *skb, struct net_device *ndev)
spin_unlock_irqrestore(&dev->lock, flags);
isdn_net_dial(); /* Initiate dialing */
netif_stop_queue(ndev);
- return 1; /* let upper layer requeue skb packet */
+ return NETDEV_TX_BUSY; /* let upper layer requeue skb packet */
}
#endif
/* Initiate dialing */
spin_unlock_irqrestore(&dev->lock, flags);
isdn_net_dial();
isdn_net_device_stop_queue(lp);
- return 1;
+ return NETDEV_TX_BUSY;
} else {
isdn_net_unreachable(ndev, skb,
"No phone number");
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index 1a2222cbb80..b4d4522e507 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -1592,7 +1592,7 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
int retval, line;
line = tty->index;
- if (line < 0 || line > ISDN_MAX_CHANNELS)
+ if (line < 0 || line >= ISDN_MAX_CHANNELS)
return -ENODEV;
info = &dev->mdm.info[line];
if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_open"))
diff --git a/drivers/isdn/mISDN/core.c b/drivers/isdn/mISDN/core.c
index 9426c9827e4..21d34be5af6 100644
--- a/drivers/isdn/mISDN/core.c
+++ b/drivers/isdn/mISDN/core.c
@@ -214,7 +214,7 @@ get_free_devid(void)
if (!test_and_set_bit(i, (u_long *)&device_ids))
break;
if (i > MAX_DEVICE_ID)
- return -1;
+ return -EBUSY;
return i;
}
@@ -224,10 +224,10 @@ mISDN_register_device(struct mISDNdevice *dev,
{
int err;
- dev->id = get_free_devid();
- err = -EBUSY;
- if (dev->id < 0)
+ err = get_free_devid();
+ if (err < 0)
goto error1;
+ dev->id = err;
device_initialize(&dev->dev);
if (name && name[0])
diff --git a/drivers/isdn/mISDN/dsp.h b/drivers/isdn/mISDN/dsp.h
index 98a33c58f09..18af86879c0 100644
--- a/drivers/isdn/mISDN/dsp.h
+++ b/drivers/isdn/mISDN/dsp.h
@@ -112,9 +112,11 @@ struct dsp_conf {
#define DSP_DTMF_NPOINTS 102
-#define ECHOCAN_BUFLEN (4*128)
+#define ECHOCAN_BUFF_SIZE 0x400 /* must be 2**n */
+#define ECHOCAN_BUFF_MASK 0x3ff /* -1 */
struct dsp_dtmf {
+ int enable; /* dtmf is enabled */
int treshold; /* above this is dtmf (square of) */
int software; /* dtmf uses software decoding */
int hardware; /* dtmf uses hardware decoding */
@@ -123,7 +125,7 @@ struct dsp_dtmf {
/* buffers one full dtmf frame */
u8 lastwhat, lastdigit;
int count;
- u8 digits[16]; /* just the dtmf result */
+ u8 digits[16]; /* dtmf result */
};
@@ -150,6 +152,15 @@ struct dsp_tone {
struct timer_list tl;
};
+/***************
+ * echo stuff *
+ ***************/
+
+struct dsp_echo {
+ int software; /* echo is generated by software */
+ int hardware; /* echo is generated by hardware */
+};
+
/*****************
* general stuff *
*****************/
@@ -160,7 +171,7 @@ struct dsp {
struct mISDNchannel *up;
unsigned char name[64];
int b_active;
- int echo; /* echo is enabled */
+ struct dsp_echo echo;
int rx_disabled; /* what the user wants */
int rx_is_off; /* what the card is */
int tx_mix;
@@ -261,5 +272,5 @@ extern int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg);
extern void dsp_pipeline_process_tx(struct dsp_pipeline *pipeline, u8 *data,
int len);
extern void dsp_pipeline_process_rx(struct dsp_pipeline *pipeline, u8 *data,
- int len);
+ int len, unsigned int txlen);
diff --git a/drivers/isdn/mISDN/dsp_audio.c b/drivers/isdn/mISDN/dsp_audio.c
index de3795e3f43..9c7c6451bf3 100644
--- a/drivers/isdn/mISDN/dsp_audio.c
+++ b/drivers/isdn/mISDN/dsp_audio.c
@@ -210,9 +210,8 @@ dsp_audio_generate_seven(void)
j = 0;
for (k = 0; k < 256; k++) {
if (dsp_audio_alaw_to_s32[k]
- < dsp_audio_alaw_to_s32[i]) {
- j++;
- }
+ < dsp_audio_alaw_to_s32[i])
+ j++;
}
sorted_alaw[j] = i;
}
diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
index 58c43e429f7..9c7c0d1ba55 100644
--- a/drivers/isdn/mISDN/dsp_cmx.c
+++ b/drivers/isdn/mISDN/dsp_cmx.c
@@ -163,8 +163,9 @@ dsp_cmx_debug(struct dsp *dsp)
printk(KERN_DEBUG "-----Current DSP\n");
list_for_each_entry(odsp, &dsp_ilist, list) {
- printk(KERN_DEBUG "* %s echo=%d txmix=%d",
- odsp->name, odsp->echo, odsp->tx_mix);
+ printk(KERN_DEBUG "* %s hardecho=%d softecho=%d txmix=%d",
+ odsp->name, odsp->echo.hardware, odsp->echo.software,
+ odsp->tx_mix);
if (odsp->conf)
printk(" (Conf %d)", odsp->conf->id);
if (dsp == odsp)
@@ -177,10 +178,12 @@ dsp_cmx_debug(struct dsp *dsp)
list_for_each_entry(member, &conf->mlist, list) {
printk(KERN_DEBUG
" - member = %s (slot_tx %d, bank_tx %d, "
- "slot_rx %d, bank_rx %d hfc_conf %d)%s\n",
+ "slot_rx %d, bank_rx %d hfc_conf %d "
+ "tx_data %d rx_is_off %d)%s\n",
member->dsp->name, member->dsp->pcm_slot_tx,
member->dsp->pcm_bank_tx, member->dsp->pcm_slot_rx,
member->dsp->pcm_bank_rx, member->dsp->hfc_conf,
+ member->dsp->tx_data, member->dsp->rx_is_off,
(member->dsp == dsp) ? " *this*" : "");
}
}
@@ -235,7 +238,7 @@ dsp_cmx_add_conf_member(struct dsp *dsp, struct dsp_conf *conf)
member = kzalloc(sizeof(struct dsp_conf_member), GFP_ATOMIC);
if (!member) {
- printk(KERN_ERR "kmalloc struct dsp_conf_member failed\n");
+ printk(KERN_ERR "kzalloc struct dsp_conf_member failed\n");
return -ENOMEM;
}
member->dsp = dsp;
@@ -314,7 +317,7 @@ static struct dsp_conf
conf = kzalloc(sizeof(struct dsp_conf), GFP_ATOMIC);
if (!conf) {
- printk(KERN_ERR "kmalloc struct dsp_conf failed\n");
+ printk(KERN_ERR "kzalloc struct dsp_conf failed\n");
return NULL;
}
INIT_LIST_HEAD(&conf->mlist);
@@ -385,7 +388,7 @@ dsp_cmx_hardware(struct dsp_conf *conf, struct dsp *dsp)
int freeunits[8];
u_char freeslots[256];
int same_hfc = -1, same_pcm = -1, current_conf = -1,
- all_conf = 1;
+ all_conf = 1, tx_data = 0;
/* dsp gets updated (no conf) */
if (!conf) {
@@ -409,7 +412,7 @@ one_member:
/* process hw echo */
if (dsp->features.pcm_banks < 1)
return;
- if (!dsp->echo) {
+ if (!dsp->echo.software && !dsp->echo.hardware) {
/* NO ECHO: remove PCM slot if assigned */
if (dsp->pcm_slot_tx >= 0 || dsp->pcm_slot_rx >= 0) {
if (dsp_debug & DEBUG_DSP_CMX)
@@ -427,10 +430,15 @@ one_member:
}
return;
}
+ /* echo is enabled, find out if we use soft or hardware */
+ dsp->echo.software = dsp->tx_data;
+ dsp->echo.hardware = 0;
/* ECHO: already echo */
if (dsp->pcm_slot_tx >= 0 && dsp->pcm_slot_rx < 0 &&
- dsp->pcm_bank_tx == 2 && dsp->pcm_bank_rx == 2)
+ dsp->pcm_bank_tx == 2 && dsp->pcm_bank_rx == 2) {
+ dsp->echo.hardware = 1;
return;
+ }
/* ECHO: if slot already assigned */
if (dsp->pcm_slot_tx >= 0) {
dsp->pcm_slot_rx = dsp->pcm_slot_tx;
@@ -443,6 +451,7 @@ one_member:
dsp->pcm_slot_tx);
dsp_cmx_hw_message(dsp, MISDN_CTRL_HFC_PCM_CONN,
dsp->pcm_slot_tx, 2, dsp->pcm_slot_rx, 2);
+ dsp->echo.hardware = 1;
return;
}
/* ECHO: find slot */
@@ -472,6 +481,7 @@ one_member:
"%s no slot available for echo\n",
__func__);
/* no more slots available */
+ dsp->echo.software = 1;
return;
}
/* assign free slot */
@@ -485,6 +495,7 @@ one_member:
__func__, dsp->name, dsp->pcm_slot_tx);
dsp_cmx_hw_message(dsp, MISDN_CTRL_HFC_PCM_CONN,
dsp->pcm_slot_tx, 2, dsp->pcm_slot_rx, 2);
+ dsp->echo.hardware = 1;
return;
}
@@ -554,7 +565,7 @@ conf_software:
return;
}
/* check if member has echo turned on */
- if (member->dsp->echo) {
+ if (member->dsp->echo.hardware || member->dsp->echo.software) {
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
"%s dsp %s cannot form a conf, because "
@@ -592,10 +603,9 @@ conf_software:
if (member->dsp->tx_data) {
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
- "%s dsp %s cannot form a conf, because "
- "tx_data is turned on\n",
+ "%s dsp %s tx_data is turned on\n",
__func__, member->dsp->name);
- goto conf_software;
+ tx_data = 1;
}
/* check if pipeline exists */
if (member->dsp->pipeline.inuse) {
@@ -794,7 +804,7 @@ conf_software:
nextm->dsp->pcm_slot_tx, nextm->dsp->pcm_bank_tx,
nextm->dsp->pcm_slot_rx, nextm->dsp->pcm_bank_rx);
conf->hardware = 1;
- conf->software = 0;
+ conf->software = tx_data;
return;
/* if members have one bank (or on the same chip) */
} else {
@@ -904,7 +914,7 @@ conf_software:
nextm->dsp->pcm_slot_tx, nextm->dsp->pcm_bank_tx,
nextm->dsp->pcm_slot_rx, nextm->dsp->pcm_bank_rx);
conf->hardware = 1;
- conf->software = 0;
+ conf->software = tx_data;
return;
}
}
@@ -937,6 +947,10 @@ conf_software:
if (current_conf >= 0) {
join_members:
list_for_each_entry(member, &conf->mlist, list) {
+ /* if no conference engine on our chip, change to
+ * software */
+ if (!member->dsp->features.hfc_conf)
+ goto conf_software;
/* in case of hdlc, change to software */
if (member->dsp->hdlc)
goto conf_software;
@@ -1295,17 +1309,25 @@ dsp_cmx_send_member(struct dsp *dsp, int len, s32 *c, int members)
int r, rr, t, tt, o_r, o_rr;
int preload = 0;
struct mISDNhead *hh, *thh;
+ int tx_data_only = 0;
/* don't process if: */
if (!dsp->b_active) { /* if not active */
dsp->last_tx = 0;
return;
}
- if (dsp->pcm_slot_tx >= 0 && /* connected to pcm slot */
+ if (((dsp->conf && dsp->conf->hardware) || /* hardware conf */
+ dsp->echo.hardware) && /* OR hardware echo */
dsp->tx_R == dsp->tx_W && /* AND no tx-data */
!(dsp->tone.tone && dsp->tone.software)) { /* AND not soft tones */
- dsp->last_tx = 0;
- return;
+ if (!dsp->tx_data) { /* no tx_data for user space required */
+ dsp->last_tx = 0;
+ return;
+ }
+ if (dsp->conf && dsp->conf->software && dsp->conf->hardware)
+ tx_data_only = 1;
+ if (dsp->conf->software && dsp->echo.hardware)
+ tx_data_only = 1;
}
#ifdef CMX_DEBUG
@@ -1367,7 +1389,8 @@ dsp_cmx_send_member(struct dsp *dsp, int len, s32 *c, int members)
while (r != rr && t != tt) {
#ifdef CMX_TX_DEBUG
if (strlen(debugbuf) < 48)
- sprintf(debugbuf+strlen(debugbuf), " %02x", p[t]);
+ sprintf(debugbuf+strlen(debugbuf), " %02x",
+ p[t]);
#endif
*d++ = p[t]; /* write tx_buff */
t = (t+1) & CMX_BUFF_MASK;
@@ -1388,7 +1411,7 @@ dsp_cmx_send_member(struct dsp *dsp, int len, s32 *c, int members)
/* PROCESS DATA (one member / no conf) */
if (!conf || members <= 1) {
/* -> if echo is NOT enabled */
- if (!dsp->echo) {
+ if (!dsp->echo.software) {
/* -> send tx-data if available or use 0-volume */
while (r != rr && t != tt) {
*d++ = p[t]; /* write tx_buff */
@@ -1438,7 +1461,7 @@ dsp_cmx_send_member(struct dsp *dsp, int len, s32 *c, int members)
o_r = (o_rr - rr + r) & CMX_BUFF_MASK;
/* start rx-pointer at current read position*/
/* -> if echo is NOT enabled */
- if (!dsp->echo) {
+ if (!dsp->echo.software) {
/*
* -> copy other member's rx-data,
* if tx-data is available, mix
@@ -1486,7 +1509,7 @@ dsp_cmx_send_member(struct dsp *dsp, int len, s32 *c, int members)
#endif
/* PROCESS DATA (three or more members) */
/* -> if echo is NOT enabled */
- if (!dsp->echo) {
+ if (!dsp->echo.software) {
/*
* -> substract rx-data from conf-data,
* if tx-data is available, mix
@@ -1550,27 +1573,40 @@ send_packet:
* becuase we want what we send, not what we filtered
*/
if (dsp->tx_data) {
- /* PREPARE RESULT */
- txskb = mI_alloc_skb(len, GFP_ATOMIC);
- if (!txskb) {
- printk(KERN_ERR
- "FATAL ERROR in mISDN_dsp.o: "
- "cannot alloc %d bytes\n", len);
+ if (tx_data_only) {
+ hh->prim = DL_DATA_REQ;
+ hh->id = 0;
+ /* queue and trigger */
+ skb_queue_tail(&dsp->sendq, nskb);
+ schedule_work(&dsp->workq);
+ /* exit because only tx_data is used */
+ return;
} else {
- thh = mISDN_HEAD_P(txskb);
- thh->prim = DL_DATA_REQ;
- thh->id = 0;
- memcpy(skb_put(txskb, len), nskb->data+preload, len);
- /* queue (trigger later) */
- skb_queue_tail(&dsp->sendq, txskb);
+ txskb = mI_alloc_skb(len, GFP_ATOMIC);
+ if (!txskb) {
+ printk(KERN_ERR
+ "FATAL ERROR in mISDN_dsp.o: "
+ "cannot alloc %d bytes\n", len);
+ } else {
+ thh = mISDN_HEAD_P(txskb);
+ thh->prim = DL_DATA_REQ;
+ thh->id = 0;
+ memcpy(skb_put(txskb, len), nskb->data+preload,
+ len);
+ /* queue (trigger later) */
+ skb_queue_tail(&dsp->sendq, txskb);
+ }
}
}
+
+ /* send data only to card, if we don't just calculated tx_data */
/* adjust volume */
if (dsp->tx_volume)
dsp_change_volume(nskb, dsp->tx_volume);
/* pipeline */
if (dsp->pipeline.inuse)
- dsp_pipeline_process_tx(&dsp->pipeline, nskb->data, nskb->len);
+ dsp_pipeline_process_tx(&dsp->pipeline, nskb->data,
+ nskb->len);
/* crypt */
if (dsp->bf_enable)
dsp_bf_encrypt(dsp, nskb->data, nskb->len);
@@ -1592,7 +1628,8 @@ dsp_cmx_send(void *arg)
struct dsp_conf_member *member;
struct dsp *dsp;
int mustmix, members;
- s32 mixbuffer[MAX_POLL+100], *c;
+ static s32 mixbuffer[MAX_POLL+100];
+ s32 *c;
u8 *p, *q;
int r, rr;
int jittercheck = 0, delay, i;
@@ -1890,10 +1927,8 @@ dsp_cmx_hdlc(struct dsp *dsp, struct sk_buff *skb)
/* no conf */
if (!dsp->conf) {
- /* in case of hardware (echo) */
- if (dsp->pcm_slot_tx >= 0)
- return;
- if (dsp->echo) {
+ /* in case of software echo */
+ if (dsp->echo.software) {
nskb = skb_clone(skb, GFP_ATOMIC);
if (nskb) {
hh = mISDN_HEAD_P(nskb);
@@ -1909,7 +1944,7 @@ dsp_cmx_hdlc(struct dsp *dsp, struct sk_buff *skb)
if (dsp->conf->hardware)
return;
list_for_each_entry(member, &dsp->conf->mlist, list) {
- if (dsp->echo || member->dsp != dsp) {
+ if (dsp->echo.software || member->dsp != dsp) {
nskb = skb_clone(skb, GFP_ATOMIC);
if (nskb) {
hh = mISDN_HEAD_P(nskb);
diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c
index 3083338716b..77ee2867c8b 100644
--- a/drivers/isdn/mISDN/dsp_core.c
+++ b/drivers/isdn/mISDN/dsp_core.c
@@ -203,13 +203,13 @@ dsp_rx_off_member(struct dsp *dsp)
else if (dsp->dtmf.software)
rx_off = 0;
/* echo in software */
- else if (dsp->echo && dsp->pcm_slot_tx < 0)
+ else if (dsp->echo.software)
rx_off = 0;
/* bridge in software */
- else if (dsp->conf) {
- if (dsp->conf->software)
- rx_off = 0;
- }
+ else if (dsp->conf && dsp->conf->software)
+ rx_off = 0;
+ /* data is not required by user space and not required
+ * for echo dtmf detection, soft-echo, soft-bridging */
if (rx_off == dsp->rx_is_off)
return;
@@ -280,7 +280,7 @@ dsp_fill_empty(struct dsp *dsp)
static int
dsp_control_req(struct dsp *dsp, struct mISDNhead *hh, struct sk_buff *skb)
{
- struct sk_buff *nskb;
+ struct sk_buff *nskb;
int ret = 0;
int cont;
u8 *data;
@@ -306,15 +306,18 @@ dsp_control_req(struct dsp *dsp, struct mISDNhead *hh, struct sk_buff *skb)
"to %d\n", *((int *)data));
dsp->dtmf.treshold = (*(int *)data) * 10000;
}
+ dsp->dtmf.enable = 1;
/* init goertzel */
dsp_dtmf_goertzel_init(dsp);
/* check dtmf hardware */
dsp_dtmf_hardware(dsp);
+ dsp_rx_off(dsp);
break;
case DTMF_TONE_STOP: /* turn off DTMF */
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: stop dtmf\n", __func__);
+ dsp->dtmf.enable = 0;
dsp->dtmf.hardware = 0;
dsp->dtmf.software = 0;
break;
@@ -414,7 +417,7 @@ tone_off:
dsp_rx_off(dsp);
break;
case DSP_ECHO_ON: /* enable echo */
- dsp->echo = 1; /* soft echo */
+ dsp->echo.software = 1; /* soft echo */
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: enable cmx-echo\n", __func__);
dsp_cmx_hardware(dsp->conf, dsp);
@@ -423,7 +426,8 @@ tone_off:
dsp_cmx_debug(dsp);
break;
case DSP_ECHO_OFF: /* disable echo */
- dsp->echo = 0;
+ dsp->echo.software = 0;
+ dsp->echo.hardware = 0;
if (dsp_debug & DEBUG_DSP_CORE)
printk(KERN_DEBUG "%s: disable cmx-echo\n", __func__);
dsp_cmx_hardware(dsp->conf, dsp);
@@ -502,7 +506,7 @@ tone_off:
break;
}
dsp->cmx_delay = (*((int *)data)) << 3;
- /* miliseconds to samples */
+ /* milliseconds to samples */
if (dsp->cmx_delay >= (CMX_BUFF_HALF>>1))
/* clip to half of maximum usable buffer
(half of half buffer) */
@@ -556,7 +560,7 @@ tone_off:
dsp->pipeline.inuse = 1;
dsp_cmx_hardware(dsp->conf, dsp);
ret = dsp_pipeline_build(&dsp->pipeline,
- len > 0 ? (char *)data : NULL);
+ len > 0 ? data : NULL);
dsp_cmx_hardware(dsp->conf, dsp);
dsp_rx_off(dsp);
}
@@ -657,11 +661,10 @@ get_features(struct mISDNchannel *ch)
static int
dsp_function(struct mISDNchannel *ch, struct sk_buff *skb)
{
- struct dsp *dsp = container_of(ch, struct dsp, ch);
+ struct dsp *dsp = container_of(ch, struct dsp, ch);
struct mISDNhead *hh;
int ret = 0;
- u8 *digits;
- int cont;
+ u8 *digits = NULL;
u_long flags;
hh = mISDN_HEAD_P(skb);
@@ -704,50 +707,55 @@ dsp_function(struct mISDNchannel *ch, struct sk_buff *skb)
break;
}
+ spin_lock_irqsave(&dsp_lock, flags);
+
/* decrypt if enabled */
if (dsp->bf_enable)
dsp_bf_decrypt(dsp, skb->data, skb->len);
/* pipeline */
if (dsp->pipeline.inuse)
dsp_pipeline_process_rx(&dsp->pipeline, skb->data,
- skb->len);
+ skb->len, hh->id);
/* change volume if requested */
if (dsp->rx_volume)
dsp_change_volume(skb, dsp->rx_volume);
-
/* check if dtmf soft decoding is turned on */
if (dsp->dtmf.software) {
digits = dsp_dtmf_goertzel_decode(dsp, skb->data,
- skb->len, (dsp_options&DSP_OPT_ULAW)?1:0);
+ skb->len, (dsp_options&DSP_OPT_ULAW) ? 1 : 0);
+ }
+ /* we need to process receive data if software */
+ if (dsp->conf && dsp->conf->software) {
+ /* process data from card at cmx */
+ dsp_cmx_receive(dsp, skb);
+ }
+
+ spin_unlock_irqrestore(&dsp_lock, flags);
+
+ /* send dtmf result, if any */
+ if (digits) {
while (*digits) {
+ int k;
struct sk_buff *nskb;
if (dsp_debug & DEBUG_DSP_DTMF)
printk(KERN_DEBUG "%s: digit"
"(%c) to layer %s\n",
__func__, *digits, dsp->name);
- cont = DTMF_TONE_VAL | *digits;
+ k = *digits | DTMF_TONE_VAL;
nskb = _alloc_mISDN_skb(PH_CONTROL_IND,
- MISDN_ID_ANY, sizeof(int), &cont,
- GFP_ATOMIC);
+ MISDN_ID_ANY, sizeof(int), &k,
+ GFP_ATOMIC);
if (nskb) {
if (dsp->up) {
if (dsp->up->send(
dsp->up, nskb))
- dev_kfree_skb(nskb);
+ dev_kfree_skb(nskb);
} else
dev_kfree_skb(nskb);
}
digits++;
}
}
- /* we need to process receive data if software */
- spin_lock_irqsave(&dsp_lock, flags);
- if (dsp->pcm_slot_tx < 0 && dsp->pcm_slot_rx < 0) {
- /* process data from card at cmx */
- dsp_cmx_receive(dsp, skb);
- }
- spin_unlock_irqrestore(&dsp_lock, flags);
-
if (dsp->rx_disabled) {
/* if receive is not allowed */
break;
@@ -787,7 +795,7 @@ dsp_function(struct mISDNchannel *ch, struct sk_buff *skb)
if (dsp->up) {
if (dsp->up->send(
dsp->up, nskb))
- dev_kfree_skb(nskb);
+ dev_kfree_skb(nskb);
} else
dev_kfree_skb(nskb);
}
@@ -946,7 +954,7 @@ dsp_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
int err = 0;
if (debug & DEBUG_DSP_CTRL)
- printk(KERN_DEBUG "%s:(%x)\n", __func__, cmd);
+ printk(KERN_DEBUG "%s:(%x)\n", __func__, cmd);
switch (cmd) {
case OPEN_CHANNEL:
@@ -1169,9 +1177,9 @@ static int dsp_init(void)
/* init conversion tables */
dsp_audio_generate_law_tables();
- dsp_silence = (dsp_options&DSP_OPT_ULAW)?0xff:0x2a;
- dsp_audio_law_to_s32 = (dsp_options&DSP_OPT_ULAW)?dsp_audio_ulaw_to_s32:
- dsp_audio_alaw_to_s32;
+ dsp_silence = (dsp_options&DSP_OPT_ULAW) ? 0xff : 0x2a;
+ dsp_audio_law_to_s32 = (dsp_options&DSP_OPT_ULAW) ?
+ dsp_audio_ulaw_to_s32 : dsp_audio_alaw_to_s32;
dsp_audio_generate_s2law_table();
dsp_audio_generate_seven();
dsp_audio_generate_mix_table();
diff --git a/drivers/isdn/mISDN/dsp_dtmf.c b/drivers/isdn/mISDN/dsp_dtmf.c
index efc371c1f0d..9ae2d33b06f 100644
--- a/drivers/isdn/mISDN/dsp_dtmf.c
+++ b/drivers/isdn/mISDN/dsp_dtmf.c
@@ -51,6 +51,9 @@ void dsp_dtmf_hardware(struct dsp *dsp)
{
int hardware = 1;
+ if (!dsp->dtmf.enable)
+ return;
+
if (!dsp->features.hfc_dtmf)
hardware = 0;
diff --git a/drivers/isdn/mISDN/dsp_ecdis.h b/drivers/isdn/mISDN/dsp_ecdis.h
index 8a20af43308..21dbd153ee2 100644
--- a/drivers/isdn/mISDN/dsp_ecdis.h
+++ b/drivers/isdn/mISDN/dsp_ecdis.h
@@ -91,7 +91,7 @@ int16_t amp)
&& det->tone_cycle_duration <= 475*8) {
det->good_cycles++;
if (det->good_cycles > 2)
- det->hit = TRUE;
+ det->hit = TRUE;
}
det->tone_cycle_duration = 0;
}
diff --git a/drivers/isdn/mISDN/dsp_pipeline.c b/drivers/isdn/mISDN/dsp_pipeline.c
index 18cf87c113e..e9941678edf 100644
--- a/drivers/isdn/mISDN/dsp_pipeline.c
+++ b/drivers/isdn/mISDN/dsp_pipeline.c
@@ -55,20 +55,19 @@ static ssize_t
attr_show_args(struct device *dev, struct device_attribute *attr, char *buf)
{
struct mISDN_dsp_element *elem = dev_get_drvdata(dev);
- ssize_t len = 0;
- int i = 0;
+ int i;
+ char *p = buf;
*buf = 0;
- for (; i < elem->num_args; ++i)
- len = sprintf(buf, "%sName: %s\n%s%s%sDescription: %s\n"
- "\n", buf,
+ for (i = 0; i < elem->num_args; i++)
+ p += sprintf(p, "Name: %s\n%s%s%sDescription: %s\n\n",
elem->args[i].name,
elem->args[i].def ? "Default: " : "",
elem->args[i].def ? elem->args[i].def : "",
elem->args[i].def ? "\n" : "",
elem->args[i].desc);
- return len;
+ return p - buf;
}
static struct device_attribute element_attributes[] = {
@@ -347,7 +346,8 @@ void dsp_pipeline_process_tx(struct dsp_pipeline *pipeline, u8 *data, int len)
entry->elem->process_tx(entry->p, data, len);
}
-void dsp_pipeline_process_rx(struct dsp_pipeline *pipeline, u8 *data, int len)
+void dsp_pipeline_process_rx(struct dsp_pipeline *pipeline, u8 *data, int len,
+ unsigned int txlen)
{
struct dsp_pipeline_entry *entry;
@@ -356,7 +356,7 @@ void dsp_pipeline_process_rx(struct dsp_pipeline *pipeline, u8 *data, int len)
list_for_each_entry_reverse(entry, &pipeline->list, list)
if (entry->elem->process_rx)
- entry->elem->process_rx(entry->p, data, len);
+ entry->elem->process_rx(entry->p, data, len, txlen);
}
diff --git a/drivers/isdn/mISDN/dsp_tones.c b/drivers/isdn/mISDN/dsp_tones.c
index 7a9af66f4b1..1debf53670d 100644
--- a/drivers/isdn/mISDN/dsp_tones.c
+++ b/drivers/isdn/mISDN/dsp_tones.c
@@ -253,18 +253,24 @@ static struct pattern {
{8000, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_GERMAN_DIALPBX,
- {DATA_GA, DATA_S, DATA_GA, DATA_S, DATA_GA, DATA_S, NULL, NULL, NULL, NULL},
- {SIZE_GA, SIZE_S, SIZE_GA, SIZE_S, SIZE_GA, SIZE_S, NULL, NULL, NULL, NULL},
+ {DATA_GA, DATA_S, DATA_GA, DATA_S, DATA_GA, DATA_S, NULL, NULL, NULL,
+ NULL},
+ {SIZE_GA, SIZE_S, SIZE_GA, SIZE_S, SIZE_GA, SIZE_S, NULL, NULL, NULL,
+ NULL},
{2000, 2000, 2000, 2000, 2000, 12000, 0, 0, 0, 0} },
{TONE_GERMAN_OLDDIALPBX,
- {DATA_GO, DATA_S, DATA_GO, DATA_S, DATA_GO, DATA_S, NULL, NULL, NULL, NULL},
- {SIZE_GO, SIZE_S, SIZE_GO, SIZE_S, SIZE_GO, SIZE_S, NULL, NULL, NULL, NULL},
+ {DATA_GO, DATA_S, DATA_GO, DATA_S, DATA_GO, DATA_S, NULL, NULL, NULL,
+ NULL},
+ {SIZE_GO, SIZE_S, SIZE_GO, SIZE_S, SIZE_GO, SIZE_S, NULL, NULL, NULL,
+ NULL},
{2000, 2000, 2000, 2000, 2000, 12000, 0, 0, 0, 0} },
{TONE_AMERICAN_DIALPBX,
- {DATA_DT, DATA_S, DATA_DT, DATA_S, DATA_DT, DATA_S, NULL, NULL, NULL, NULL},
- {SIZE_DT, SIZE_S, SIZE_DT, SIZE_S, SIZE_DT, SIZE_S, NULL, NULL, NULL, NULL},
+ {DATA_DT, DATA_S, DATA_DT, DATA_S, DATA_DT, DATA_S, NULL, NULL, NULL,
+ NULL},
+ {SIZE_DT, SIZE_S, SIZE_DT, SIZE_S, SIZE_DT, SIZE_S, NULL, NULL, NULL,
+ NULL},
{2000, 2000, 2000, 2000, 2000, 12000, 0, 0, 0, 0} },
{TONE_GERMAN_RINGING,
@@ -434,7 +440,7 @@ dsp_tone_hw_message(struct dsp *dsp, u8 *sample, int len)
/* unlocking is not required, because we don't expect a response */
nskb = _alloc_mISDN_skb(PH_CONTROL_REQ,
- (len)?HFC_SPL_LOOP_ON:HFC_SPL_LOOP_OFF, len, sample,
+ (len) ? HFC_SPL_LOOP_ON : HFC_SPL_LOOP_OFF, len, sample,
GFP_ATOMIC);
if (nskb) {
if (dsp->ch.peer) {
@@ -498,8 +504,7 @@ dsp_tone(struct dsp *dsp, int tone)
/* we turn off the tone */
if (!tone) {
- if (dsp->features.hfc_loops)
- if (timer_pending(&tonet->tl))
+ if (dsp->features.hfc_loops && timer_pending(&tonet->tl))
del_timer(&tonet->tl);
if (dsp->features.hfc_loops)
dsp_tone_hw_message(dsp, NULL, 0);
diff --git a/drivers/isdn/mISDN/hwchannel.c b/drivers/isdn/mISDN/hwchannel.c
index ab1168a110a..0481a0cdf6d 100644
--- a/drivers/isdn/mISDN/hwchannel.c
+++ b/drivers/isdn/mISDN/hwchannel.c
@@ -185,13 +185,13 @@ recv_Echannel(struct dchannel *ech, struct dchannel *dch)
EXPORT_SYMBOL(recv_Echannel);
void
-recv_Bchannel(struct bchannel *bch)
+recv_Bchannel(struct bchannel *bch, unsigned int id)
{
struct mISDNhead *hh;
hh = mISDN_HEAD_P(bch->rx_skb);
hh->prim = PH_DATA_IND;
- hh->id = MISDN_ID_ANY;
+ hh->id = id;
if (bch->rcount >= 64) {
printk(KERN_WARNING "B-channel %p receive queue overflow, "
"fushing!\n", bch);
diff --git a/drivers/isdn/mISDN/l1oip.h b/drivers/isdn/mISDN/l1oip.h
index a23d575449f..bc26c890d9a 100644
--- a/drivers/isdn/mISDN/l1oip.h
+++ b/drivers/isdn/mISDN/l1oip.h
@@ -76,7 +76,7 @@ struct l1oip {
struct sockaddr_in sin_local; /* local socket name */
struct sockaddr_in sin_remote; /* remote socket name */
struct msghdr sendmsg; /* ip message to send */
- struct iovec sendiov; /* iov for message */
+ struct kvec sendiov; /* iov for message */
/* frame */
struct l1oip_chan chan[128]; /* channel instances */
diff --git a/drivers/isdn/mISDN/l1oip_codec.c b/drivers/isdn/mISDN/l1oip_codec.c
index e4ecba3d48d..bbfd1b863ed 100644
--- a/drivers/isdn/mISDN/l1oip_codec.c
+++ b/drivers/isdn/mISDN/l1oip_codec.c
@@ -48,6 +48,7 @@ NOTE: The bytes are handled as they are law-encoded.
#include <linux/vmalloc.h>
#include <linux/mISDNif.h>
+#include <linux/in.h>
#include "core.h"
#include "l1oip.h"
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index abe57498957..990e6a7e667 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -279,7 +279,6 @@ l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask,
int multi = 0;
u8 frame[len+32];
struct socket *socket = NULL;
- mm_segment_t oldfs;
if (debug & DEBUG_L1OIP_MSG)
printk(KERN_DEBUG "%s: sending data to socket (len = %d)\n",
@@ -308,8 +307,8 @@ l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask,
/* assemble frame */
*p++ = (L1OIP_VERSION<<6) /* version and coding */
- | (hc->pri?0x20:0x00) /* type */
- | (hc->id?0x10:0x00) /* id */
+ | (hc->pri ? 0x20 : 0x00) /* type */
+ | (hc->id ? 0x10 : 0x00) /* id */
| localcodec;
if (hc->id) {
*p++ = hc->id>>24; /* id */
@@ -317,7 +316,7 @@ l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask,
*p++ = hc->id>>8;
*p++ = hc->id;
}
- *p++ = (multi == 1)?0x80:0x00 + channel; /* m-flag, channel */
+ *p++ = (multi == 1) ? 0x80 : 0x00 + channel; /* m-flag, channel */
if (multi == 1)
*p++ = len; /* length */
*p++ = timebase>>8; /* time base */
@@ -352,10 +351,7 @@ l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask,
"= %d)\n", __func__, len);
hc->sendiov.iov_base = frame;
hc->sendiov.iov_len = len;
- oldfs = get_fs();
- set_fs(KERNEL_DS);
- len = sock_sendmsg(socket, &hc->sendmsg, len);
- set_fs(oldfs);
+ len = kernel_sendmsg(socket, &hc->sendmsg, &hc->sendiov, 1, len);
/* give socket back */
hc->socket = socket; /* no locking required */
@@ -401,12 +397,12 @@ l1oip_socket_recv(struct l1oip *hc, u8 remotecodec, u8 channel, u16 timebase,
}
/* prepare message */
- nskb = mI_alloc_skb((remotecodec == 3)?(len<<1):len, GFP_ATOMIC);
+ nskb = mI_alloc_skb((remotecodec == 3) ? (len<<1) : len, GFP_ATOMIC);
if (!nskb) {
printk(KERN_ERR "%s: No mem for skb.\n", __func__);
return;
}
- p = skb_put(nskb, (remotecodec == 3)?(len<<1):len);
+ p = skb_put(nskb, (remotecodec == 3) ? (len<<1) : len);
if (remotecodec == 1 && ulaw)
l1oip_alaw_to_ulaw(buf, len, p);
@@ -458,7 +454,7 @@ l1oip_socket_recv(struct l1oip *hc, u8 remotecodec, u8 channel, u16 timebase,
hc->chan[channel].disorder_flag ^= 1;
if (nskb)
#endif
- queue_ch_frame(&bch->ch, PH_DATA_IND, rx_counter, nskb);
+ queue_ch_frame(&bch->ch, PH_DATA_IND, rx_counter, nskb);
}
}
@@ -660,21 +656,29 @@ l1oip_socket_thread(void *data)
struct l1oip *hc = (struct l1oip *)data;
int ret = 0;
struct msghdr msg;
- struct iovec iov;
- mm_segment_t oldfs;
struct sockaddr_in sin_rx;
- unsigned char recvbuf[1500];
+ unsigned char *recvbuf;
+ size_t recvbuf_size = 1500;
int recvlen;
struct socket *socket = NULL;
DECLARE_COMPLETION(wait);
+ /* allocate buffer memory */
+ recvbuf = kmalloc(recvbuf_size, GFP_KERNEL);
+ if (!recvbuf) {
+ printk(KERN_ERR "%s: Failed to alloc recvbuf.\n", __func__);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
/* make daemon */
allow_signal(SIGTERM);
/* create socket */
if (sock_create(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &socket)) {
printk(KERN_ERR "%s: Failed to create socket.\n", __func__);
- return -EIO;
+ ret = -EIO;
+ goto fail;
}
/* set incoming address */
@@ -708,16 +712,12 @@ l1oip_socket_thread(void *data)
msg.msg_namelen = sizeof(sin_rx);
msg.msg_control = NULL;
msg.msg_controllen = 0;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
/* build send message */
hc->sendmsg.msg_name = &hc->sin_remote;
hc->sendmsg.msg_namelen = sizeof(hc->sin_remote);
hc->sendmsg.msg_control = NULL;
hc->sendmsg.msg_controllen = 0;
- hc->sendmsg.msg_iov = &hc->sendiov;
- hc->sendmsg.msg_iovlen = 1;
/* give away socket */
spin_lock(&hc->socket_lock);
@@ -729,18 +729,18 @@ l1oip_socket_thread(void *data)
printk(KERN_DEBUG "%s: socket created and open\n",
__func__);
while (!signal_pending(current)) {
- iov.iov_base = recvbuf;
- iov.iov_len = sizeof(recvbuf);
- oldfs = get_fs();
- set_fs(KERNEL_DS);
- recvlen = sock_recvmsg(socket, &msg, sizeof(recvbuf), 0);
- set_fs(oldfs);
+ struct kvec iov = {
+ .iov_base = recvbuf,
+ .iov_len = sizeof(recvbuf),
+ };
+ recvlen = kernel_recvmsg(socket, &msg, &iov, 1,
+ sizeof(recvbuf), 0);
if (recvlen > 0) {
l1oip_socket_parse(hc, &sin_rx, recvbuf, recvlen);
} else {
if (debug & DEBUG_L1OIP_SOCKET)
- printk(KERN_WARNING "%s: broken pipe on socket\n",
- __func__);
+ printk(KERN_WARNING
+ "%s: broken pipe on socket\n", __func__);
}
}
@@ -760,6 +760,9 @@ l1oip_socket_thread(void *data)
__func__);
fail:
+ /* free recvbuf */
+ kfree(recvbuf);
+
/* close socket */
if (socket)
sock_release(socket);
@@ -912,7 +915,7 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb)
p = skb->data;
l = skb->len;
while (l) {
- ll = (l < L1OIP_MAX_PERFRAME)?l:L1OIP_MAX_PERFRAME;
+ ll = (l < L1OIP_MAX_PERFRAME) ? l : L1OIP_MAX_PERFRAME;
l1oip_socket_send(hc, 0, dch->slot, 0,
hc->chan[dch->slot].tx_counter++, p, ll);
p += ll;
@@ -1160,7 +1163,7 @@ handle_bmsg(struct mISDNchannel *ch, struct sk_buff *skb)
p = skb->data;
l = skb->len;
while (l) {
- ll = (l < L1OIP_MAX_PERFRAME)?l:L1OIP_MAX_PERFRAME;
+ ll = (l < L1OIP_MAX_PERFRAME) ? l : L1OIP_MAX_PERFRAME;
l1oip_socket_send(hc, hc->codec, bch->slot, 0,
hc->chan[bch->slot].tx_counter, p, ll);
hc->chan[bch->slot].tx_counter += ll;
@@ -1318,8 +1321,8 @@ init_card(struct l1oip *hc, int pri, int bundle)
spin_lock_init(&hc->socket_lock);
hc->idx = l1oip_cnt;
hc->pri = pri;
- hc->d_idx = pri?16:3;
- hc->b_num = pri?30:2;
+ hc->d_idx = pri ? 16 : 3;
+ hc->b_num = pri ? 30 : 2;
hc->bundle = bundle;
if (hc->pri)
sprintf(hc->name, "l1oip-e1.%d", l1oip_cnt + 1);
@@ -1504,9 +1507,9 @@ l1oip_init(void)
if (debug & DEBUG_L1OIP_INIT)
printk(KERN_DEBUG "%s: interface %d is %s with %s.\n",
- __func__, l1oip_cnt, pri?"PRI":"BRI",
- bundle?"bundled IP packet for all B-channels"
- :"seperate IP packets for every B-channel");
+ __func__, l1oip_cnt, pri ? "PRI" : "BRI",
+ bundle ? "bundled IP packet for all B-channels" :
+ "seperate IP packets for every B-channel");
hc = kzalloc(sizeof(struct l1oip), GFP_ATOMIC);
if (!hc) {
diff --git a/drivers/isdn/mISDN/layer2.c b/drivers/isdn/mISDN/layer2.c
index d6e2863f224..9c2589e986d 100644
--- a/drivers/isdn/mISDN/layer2.c
+++ b/drivers/isdn/mISDN/layer2.c
@@ -99,7 +99,7 @@ l2m_debug(struct FsmInst *fi, char *fmt, ...)
if (!(*debug & DEBUG_L2_FSM))
return;
va_start(va, fmt);
- printk(KERN_DEBUG "l2 (tei %d): ", l2->tei);
+ printk(KERN_DEBUG "l2 (sapi %d tei %d): ", l2->sapi, l2->tei);
vprintk(fmt, va);
printk("\n");
va_end(va);
@@ -1859,20 +1859,18 @@ ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb)
psapi >>= 2;
ptei >>= 1;
if (psapi != l2->sapi) {
- /* not our bussiness
- * printk(KERN_DEBUG "%s: sapi %d/%d sapi mismatch\n",
- * __func__,
- * psapi, l2->sapi);
- */
+ /* not our bussiness */
+ if (*debug & DEBUG_L2)
+ printk(KERN_DEBUG "%s: sapi %d/%d mismatch\n",
+ __func__, psapi, l2->sapi);
dev_kfree_skb(skb);
return 0;
}
if ((ptei != l2->tei) && (ptei != GROUP_TEI)) {
- /* not our bussiness
- * printk(KERN_DEBUG "%s: tei %d/%d sapi %d mismatch\n",
- * __func__,
- * ptei, l2->tei, psapi);
- */
+ /* not our bussiness */
+ if (*debug & DEBUG_L2)
+ printk(KERN_DEBUG "%s: tei %d/%d mismatch\n",
+ __func__, ptei, l2->tei);
dev_kfree_skb(skb);
return 0;
}
@@ -1927,8 +1925,8 @@ l2_send(struct mISDNchannel *ch, struct sk_buff *skb)
int ret = -EINVAL;
if (*debug & DEBUG_L2_RECV)
- printk(KERN_DEBUG "%s: prim(%x) id(%x) tei(%d)\n",
- __func__, hh->prim, hh->id, l2->tei);
+ printk(KERN_DEBUG "%s: prim(%x) id(%x) sapi(%d) tei(%d)\n",
+ __func__, hh->prim, hh->id, l2->sapi, l2->tei);
switch (hh->prim) {
case PH_DATA_IND:
ret = ph_data_indication(l2, hh, skb);
@@ -2068,7 +2066,8 @@ l2_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
}
struct layer2 *
-create_l2(struct mISDNchannel *ch, u_int protocol, u_long options, u_long arg)
+create_l2(struct mISDNchannel *ch, u_int protocol, u_long options, int tei,
+ int sapi)
{
struct layer2 *l2;
struct channel_req rq;
@@ -2089,7 +2088,7 @@ create_l2(struct mISDNchannel *ch, u_int protocol, u_long options, u_long arg)
test_and_set_bit(FLG_LAPD, &l2->flag);
test_and_set_bit(FLG_LAPD_NET, &l2->flag);
test_and_set_bit(FLG_MOD128, &l2->flag);
- l2->sapi = 0;
+ l2->sapi = sapi;
l2->maxlen = MAX_DFRAME_LEN;
if (test_bit(OPTION_L2_PMX, &options))
l2->window = 7;
@@ -2099,7 +2098,7 @@ create_l2(struct mISDNchannel *ch, u_int protocol, u_long options, u_long arg)
test_and_set_bit(FLG_PTP, &l2->flag);
if (test_bit(OPTION_L2_FIXEDTEI, &options))
test_and_set_bit(FLG_FIXED_TEI, &l2->flag);
- l2->tei = (u_int)arg;
+ l2->tei = tei;
l2->T200 = 1000;
l2->N200 = 3;
l2->T203 = 10000;
@@ -2114,7 +2113,7 @@ create_l2(struct mISDNchannel *ch, u_int protocol, u_long options, u_long arg)
test_and_set_bit(FLG_LAPD, &l2->flag);
test_and_set_bit(FLG_MOD128, &l2->flag);
test_and_set_bit(FLG_ORIG, &l2->flag);
- l2->sapi = 0;
+ l2->sapi = sapi;
l2->maxlen = MAX_DFRAME_LEN;
if (test_bit(OPTION_L2_PMX, &options))
l2->window = 7;
@@ -2124,7 +2123,7 @@ create_l2(struct mISDNchannel *ch, u_int protocol, u_long options, u_long arg)
test_and_set_bit(FLG_PTP, &l2->flag);
if (test_bit(OPTION_L2_FIXEDTEI, &options))
test_and_set_bit(FLG_FIXED_TEI, &l2->flag);
- l2->tei = (u_int)arg;
+ l2->tei = tei;
l2->T200 = 1000;
l2->N200 = 3;
l2->T203 = 10000;
@@ -2180,7 +2179,7 @@ x75create(struct channel_req *crq)
if (crq->protocol != ISDN_P_B_X75SLP)
return -EPROTONOSUPPORT;
- l2 = create_l2(crq->ch, crq->protocol, 0, 0);
+ l2 = create_l2(crq->ch, crq->protocol, 0, 0, 0);
if (!l2)
return -ENOMEM;
crq->ch = &l2->ch;
diff --git a/drivers/isdn/mISDN/layer2.h b/drivers/isdn/mISDN/layer2.h
index 6293f80dc2d..9547fb3707a 100644
--- a/drivers/isdn/mISDN/layer2.h
+++ b/drivers/isdn/mISDN/layer2.h
@@ -90,7 +90,7 @@ enum {
#define L2_STATE_COUNT (ST_L2_8+1)
extern struct layer2 *create_l2(struct mISDNchannel *, u_int,
- u_long, u_long);
+ u_long, int, int);
extern int tei_l2(struct layer2 *, u_int, u_long arg);
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 508945d1b9c..c36f5213745 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -209,7 +209,7 @@ mISDN_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
err = -EFAULT;
- goto drop;
+ goto done;
}
memcpy(mISDN_HEAD_P(skb), skb->data, MISDN_HEADER_LEN);
@@ -222,7 +222,7 @@ mISDN_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
} else { /* use default for L2 messages */
if ((sk->sk_protocol == ISDN_P_LAPD_TE) ||
(sk->sk_protocol == ISDN_P_LAPD_NT))
- mISDN_HEAD_ID(skb) = _pms(sk)->ch.nr;
+ mISDN_HEAD_ID(skb) = _pms(sk)->ch.nr;
}
if (*debug & DEBUG_SOCKET)
@@ -230,19 +230,21 @@ mISDN_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
__func__, mISDN_HEAD_ID(skb));
err = -ENODEV;
- if (!_pms(sk)->ch.peer ||
- (err = _pms(sk)->ch.recv(_pms(sk)->ch.peer, skb)))
- goto drop;
-
- err = len;
+ if (!_pms(sk)->ch.peer)
+ goto done;
+ err = _pms(sk)->ch.recv(_pms(sk)->ch.peer, skb);
+ if (err)
+ goto done;
+ else {
+ skb = NULL;
+ err = len;
+ }
done:
+ if (skb)
+ kfree_skb(skb);
release_sock(sk);
return err;
-
-drop:
- kfree_skb(skb);
- goto done;
}
static int
@@ -292,7 +294,7 @@ static int
data_sock_ioctl_bound(struct sock *sk, unsigned int cmd, void __user *p)
{
struct mISDN_ctrl_req cq;
- int err = -EINVAL, val;
+ int err = -EINVAL, val[2];
struct mISDNchannel *bchan, *next;
lock_sock(sk);
@@ -328,12 +330,27 @@ data_sock_ioctl_bound(struct sock *sk, unsigned int cmd, void __user *p)
err = -EINVAL;
break;
}
- if (get_user(val, (int __user *)p)) {
+ val[0] = cmd;
+ if (get_user(val[1], (int __user *)p)) {
+ err = -EFAULT;
+ break;
+ }
+ err = _pms(sk)->dev->teimgr->ctrl(_pms(sk)->dev->teimgr,
+ CONTROL_CHANNEL, val);
+ break;
+ case IMHOLD_L1:
+ if (sk->sk_protocol != ISDN_P_LAPD_NT
+ && sk->sk_protocol != ISDN_P_LAPD_TE) {
+ err = -EINVAL;
+ break;
+ }
+ val[0] = cmd;
+ if (get_user(val[1], (int __user *)p)) {
err = -EFAULT;
break;
}
err = _pms(sk)->dev->teimgr->ctrl(_pms(sk)->dev->teimgr,
- CONTROL_CHANNEL, &val);
+ CONTROL_CHANNEL, val);
break;
default:
err = -EINVAL;
diff --git a/drivers/isdn/mISDN/tei.c b/drivers/isdn/mISDN/tei.c
index b452dead8fd..e04bad6c5ba 100644
--- a/drivers/isdn/mISDN/tei.c
+++ b/drivers/isdn/mISDN/tei.c
@@ -122,8 +122,11 @@ da_deactivate(struct FsmInst *fi, int event, void *arg)
}
read_unlock_irqrestore(&mgr->lock, flags);
/* All TEI are inactiv */
- mISDN_FsmAddTimer(&mgr->datimer, DATIMER_VAL, EV_DATIMER, NULL, 1);
- mISDN_FsmChangeState(fi, ST_L1_DEACT_PENDING);
+ if (!test_bit(OPTION_L1_HOLD, &mgr->options)) {
+ mISDN_FsmAddTimer(&mgr->datimer, DATIMER_VAL, EV_DATIMER,
+ NULL, 1);
+ mISDN_FsmChangeState(fi, ST_L1_DEACT_PENDING);
+ }
}
static void
@@ -132,9 +135,11 @@ da_ui(struct FsmInst *fi, int event, void *arg)
struct manager *mgr = fi->userdata;
/* restart da timer */
- mISDN_FsmDelTimer(&mgr->datimer, 2);
- mISDN_FsmAddTimer(&mgr->datimer, DATIMER_VAL, EV_DATIMER, NULL, 2);
-
+ if (!test_bit(OPTION_L1_HOLD, &mgr->options)) {
+ mISDN_FsmDelTimer(&mgr->datimer, 2);
+ mISDN_FsmAddTimer(&mgr->datimer, DATIMER_VAL, EV_DATIMER,
+ NULL, 2);
+ }
}
static void
@@ -222,7 +227,7 @@ tei_debug(struct FsmInst *fi, char *fmt, ...)
if (!(*debug & DEBUG_L2_TEIFSM))
return;
va_start(va, fmt);
- printk(KERN_DEBUG "tei(%d): ", tm->l2->tei);
+ printk(KERN_DEBUG "sapi(%d) tei(%d): ", tm->l2->sapi, tm->l2->tei);
vprintk(fmt, va);
printk("\n");
va_end(va);
@@ -421,7 +426,7 @@ done:
}
static void
-put_tei_msg(struct manager *mgr, u_char m_id, unsigned int ri, u_char tei)
+put_tei_msg(struct manager *mgr, u_char m_id, unsigned int ri, int tei)
{
struct sk_buff *skb;
u_char bp[8];
@@ -435,9 +440,8 @@ put_tei_msg(struct manager *mgr, u_char m_id, unsigned int ri, u_char tei)
bp[4] = ri >> 8;
bp[5] = ri & 0xff;
bp[6] = m_id;
- bp[7] = (tei << 1) | 1;
- skb = _alloc_mISDN_skb(PH_DATA_REQ, new_id(mgr),
- 8, bp, GFP_ATOMIC);
+ bp[7] = ((tei << 1) & 0xff) | 1;
+ skb = _alloc_mISDN_skb(PH_DATA_REQ, new_id(mgr), 8, bp, GFP_ATOMIC);
if (!skb) {
printk(KERN_WARNING "%s: no skb for tei msg\n", __func__);
return;
@@ -772,7 +776,7 @@ tei_ph_data_ind(struct teimgr *tm, u_int mt, u_char *dp, int len)
}
static struct layer2 *
-create_new_tei(struct manager *mgr, int tei)
+create_new_tei(struct manager *mgr, int tei, int sapi)
{
u_long opt = 0;
u_long flags;
@@ -781,12 +785,12 @@ create_new_tei(struct manager *mgr, int tei)
if (!mgr->up)
return NULL;
- if (tei < 64)
+ if ((tei >= 0) && (tei < 64))
test_and_set_bit(OPTION_L2_FIXEDTEI, &opt);
if (mgr->ch.st->dev->Dprotocols
& ((1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1)))
test_and_set_bit(OPTION_L2_PMX, &opt);
- l2 = create_l2(mgr->up, ISDN_P_LAPD_NT, (u_int)opt, (u_long)tei);
+ l2 = create_l2(mgr->up, ISDN_P_LAPD_NT, opt, tei, sapi);
if (!l2) {
printk(KERN_WARNING "%s:no memory for layer2\n", __func__);
return NULL;
@@ -834,12 +838,17 @@ new_tei_req(struct manager *mgr, u_char *dp)
ri += dp[1];
if (!mgr->up)
goto denied;
- tei = get_free_tei(mgr);
+ if (!(dp[3] & 1)) /* Extension bit != 1 */
+ goto denied;
+ if (dp[3] != 0xff)
+ tei = dp[3] >> 1; /* 3GPP TS 08.56 6.1.11.2 */
+ else
+ tei = get_free_tei(mgr);
if (tei < 0) {
printk(KERN_WARNING "%s:No free tei\n", __func__);
goto denied;
}
- l2 = create_new_tei(mgr, tei);
+ l2 = create_new_tei(mgr, tei, CTRL_SAPI);
if (!l2)
goto denied;
else
@@ -853,8 +862,7 @@ static int
ph_data_ind(struct manager *mgr, struct sk_buff *skb)
{
int ret = -EINVAL;
- struct layer2 *l2;
- u_long flags;
+ struct layer2 *l2, *nl2;
u_char mt;
if (skb->len < 8) {
@@ -863,7 +871,6 @@ ph_data_ind(struct manager *mgr, struct sk_buff *skb)
__func__, skb->len);
goto done;
}
- if (*debug & DEBUG_L2_TEI)
if ((skb->data[0] >> 2) != TEI_SAPI) /* not for us */
goto done;
@@ -900,11 +907,9 @@ ph_data_ind(struct manager *mgr, struct sk_buff *skb)
new_tei_req(mgr, &skb->data[4]);
goto done;
}
- read_lock_irqsave(&mgr->lock, flags);
- list_for_each_entry(l2, &mgr->layer2, list) {
+ list_for_each_entry_safe(l2, nl2, &mgr->layer2, list) {
tei_ph_data_ind(l2->tm, mt, &skb->data[4], skb->len - 4);
}
- read_unlock_irqrestore(&mgr->lock, flags);
done:
return ret;
}
@@ -971,8 +976,6 @@ create_teimgr(struct manager *mgr, struct channel_req *crq)
__func__, dev_name(&mgr->ch.st->dev->dev),
crq->protocol, crq->adr.dev, crq->adr.channel,
crq->adr.sapi, crq->adr.tei);
- if (crq->adr.sapi != 0) /* not supported yet */
- return -EINVAL;
if (crq->adr.tei > GROUP_TEI)
return -EINVAL;
if (crq->adr.tei < 64)
@@ -1019,8 +1022,8 @@ create_teimgr(struct manager *mgr, struct channel_req *crq)
}
return 0;
}
- l2 = create_l2(crq->ch, crq->protocol, (u_int)opt,
- (u_long)crq->adr.tei);
+ l2 = create_l2(crq->ch, crq->protocol, opt,
+ crq->adr.tei, crq->adr.sapi);
if (!l2)
return -ENOMEM;
l2->tm = kzalloc(sizeof(struct teimgr), GFP_KERNEL);
@@ -1103,6 +1106,7 @@ free_teimanager(struct manager *mgr)
{
struct layer2 *l2, *nl2;
+ test_and_clear_bit(OPTION_L1_HOLD, &mgr->options);
if (test_bit(MGR_OPT_NETWORK, &mgr->options)) {
/* not locked lock is taken in release tei */
mgr->up = NULL;
@@ -1133,13 +1137,26 @@ static int
ctrl_teimanager(struct manager *mgr, void *arg)
{
/* currently we only have one option */
- int clean = *((int *)arg);
-
- if (clean)
- test_and_set_bit(OPTION_L2_CLEANUP, &mgr->options);
- else
- test_and_clear_bit(OPTION_L2_CLEANUP, &mgr->options);
- return 0;
+ int *val = (int *)arg;
+ int ret = 0;
+
+ switch (val[0]) {
+ case IMCLEAR_L2:
+ if (val[1])
+ test_and_set_bit(OPTION_L2_CLEANUP, &mgr->options);
+ else
+ test_and_clear_bit(OPTION_L2_CLEANUP, &mgr->options);
+ break;
+ case IMHOLD_L1:
+ if (val[1])
+ test_and_set_bit(OPTION_L1_HOLD, &mgr->options);
+ else
+ test_and_clear_bit(OPTION_L1_HOLD, &mgr->options);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
}
/* This function does create a L2 for fixed TEI in NT Mode */
@@ -1147,7 +1164,7 @@ static int
check_data(struct manager *mgr, struct sk_buff *skb)
{
struct mISDNhead *hh = mISDN_HEAD_P(skb);
- int ret, tei;
+ int ret, tei, sapi;
struct layer2 *l2;
if (*debug & DEBUG_L2_CTRL)
@@ -1159,20 +1176,27 @@ check_data(struct manager *mgr, struct sk_buff *skb)
return -ENOTCONN;
if (skb->len != 3)
return -ENOTCONN;
- if (skb->data[0] != 0)
- /* only SAPI 0 command */
- return -ENOTCONN;
+ if (skb->data[0] & 3) /* EA0 and CR must be 0 */
+ return -EINVAL;
+ sapi = skb->data[0] >> 2;
if (!(skb->data[1] & 1)) /* invalid EA1 */
return -EINVAL;
- tei = skb->data[1] >> 0;
+ tei = skb->data[1] >> 1;
if (tei > 63) /* not a fixed tei */
return -ENOTCONN;
if ((skb->data[2] & ~0x10) != SABME)
return -ENOTCONN;
/* We got a SABME for a fixed TEI */
- l2 = create_new_tei(mgr, tei);
- if (!l2)
+ if (*debug & DEBUG_L2_CTRL)
+ printk(KERN_DEBUG "%s: SABME sapi(%d) tei(%d)\n",
+ __func__, sapi, tei);
+ l2 = create_new_tei(mgr, tei, sapi);
+ if (!l2) {
+ if (*debug & DEBUG_L2_CTRL)
+ printk(KERN_DEBUG "%s: failed to create new tei\n",
+ __func__);
return -ENOMEM;
+ }
ret = l2->ch.send(&l2->ch, skb);
return ret;
}
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
index bbd99d3282c..5b7e9bf514f 100644
--- a/drivers/isdn/mISDN/timerdev.c
+++ b/drivers/isdn/mISDN/timerdev.c
@@ -259,7 +259,7 @@ mISDN_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
return ret;
}
-static struct file_operations mISDN_fops = {
+static const struct file_operations mISDN_fops = {
.read = mISDN_read,
.poll = mISDN_poll,
.ioctl = mISDN_ioctl,
diff --git a/drivers/leds/leds-h1940.c b/drivers/leds/leds-h1940.c
index 1aa46a390a0..173d104d9ff 100644
--- a/drivers/leds/leds-h1940.c
+++ b/drivers/leds/leds-h1940.c
@@ -16,6 +16,8 @@
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/leds.h>
+#include <linux/gpio.h>
+
#include <mach/regs-gpio.h>
#include <mach/hardware.h>
#include <mach/h1940-latch.h>
diff --git a/drivers/leds/leds-s3c24xx.c b/drivers/leds/leds-s3c24xx.c
index aa2e7ae0cda..aa7acf3b922 100644
--- a/drivers/leds/leds-s3c24xx.c
+++ b/drivers/leds/leds-s3c24xx.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/leds.h>
+#include <linux/gpio.h>
#include <mach/hardware.h>
#include <mach/regs-gpio.h>
diff --git a/drivers/lguest/Kconfig b/drivers/lguest/Kconfig
index a3d3cbab359..0aaa0597a62 100644
--- a/drivers/lguest/Kconfig
+++ b/drivers/lguest/Kconfig
@@ -1,6 +1,6 @@
config LGUEST
tristate "Linux hypervisor example code"
- depends on X86_32 && EXPERIMENTAL && !X86_PAE && FUTEX
+ depends on X86_32 && EXPERIMENTAL && EVENTFD
select HVC_DRIVER
---help---
This is a very simple module which allows you to run
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
index 4845fb3cf74..a6974e9b8eb 100644
--- a/drivers/lguest/core.c
+++ b/drivers/lguest/core.c
@@ -95,7 +95,7 @@ static __init int map_switcher(void)
* array of struct pages. It increments that pointer, but we don't
* care. */
pagep = switcher_page;
- err = map_vm_area(switcher_vma, PAGE_KERNEL, &pagep);
+ err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, &pagep);
if (err) {
printk("lguest: map_vm_area failed: %i\n", err);
goto free_vma;
@@ -188,6 +188,9 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
{
/* We stop running once the Guest is dead. */
while (!cpu->lg->dead) {
+ unsigned int irq;
+ bool more;
+
/* First we run any hypercalls the Guest wants done. */
if (cpu->hcall)
do_hypercalls(cpu);
@@ -195,23 +198,23 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
/* It's possible the Guest did a NOTIFY hypercall to the
* Launcher, in which case we return from the read() now. */
if (cpu->pending_notify) {
- if (put_user(cpu->pending_notify, user))
- return -EFAULT;
- return sizeof(cpu->pending_notify);
+ if (!send_notify_to_eventfd(cpu)) {
+ if (put_user(cpu->pending_notify, user))
+ return -EFAULT;
+ return sizeof(cpu->pending_notify);
+ }
}
/* Check for signals */
if (signal_pending(current))
return -ERESTARTSYS;
- /* If Waker set break_out, return to Launcher. */
- if (cpu->break_out)
- return -EAGAIN;
-
/* Check if there are any interrupts which can be delivered now:
* if so, this sets up the hander to be executed when we next
* run the Guest. */
- maybe_do_interrupt(cpu);
+ irq = interrupt_pending(cpu, &more);
+ if (irq < LGUEST_IRQS)
+ try_deliver_interrupt(cpu, irq, more);
/* All long-lived kernel loops need to check with this horrible
* thing called the freezer. If the Host is trying to suspend,
@@ -224,10 +227,15 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
break;
/* If the Guest asked to be stopped, we sleep. The Guest's
- * clock timer or LHREQ_BREAK from the Waker will wake us. */
+ * clock timer will wake us. */
if (cpu->halted) {
set_current_state(TASK_INTERRUPTIBLE);
- schedule();
+ /* Just before we sleep, make sure no interrupt snuck in
+ * which we should be doing. */
+ if (interrupt_pending(cpu, &more) < LGUEST_IRQS)
+ set_current_state(TASK_RUNNING);
+ else
+ schedule();
continue;
}
diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c
index 54d66f05fef..c29ffa19cb7 100644
--- a/drivers/lguest/hypercalls.c
+++ b/drivers/lguest/hypercalls.c
@@ -37,6 +37,10 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
/* This call does nothing, except by breaking out of the Guest
* it makes us process all the asynchronous hypercalls. */
break;
+ case LHCALL_SEND_INTERRUPTS:
+ /* This call does nothing too, but by breaking out of the Guest
+ * it makes us process any pending interrupts. */
+ break;
case LHCALL_LGUEST_INIT:
/* You can't get here unless you're already initialized. Don't
* do that. */
@@ -73,11 +77,21 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
guest_set_stack(cpu, args->arg1, args->arg2, args->arg3);
break;
case LHCALL_SET_PTE:
+#ifdef CONFIG_X86_PAE
+ guest_set_pte(cpu, args->arg1, args->arg2,
+ __pte(args->arg3 | (u64)args->arg4 << 32));
+#else
guest_set_pte(cpu, args->arg1, args->arg2, __pte(args->arg3));
+#endif
+ break;
+ case LHCALL_SET_PGD:
+ guest_set_pgd(cpu->lg, args->arg1, args->arg2);
break;
+#ifdef CONFIG_X86_PAE
case LHCALL_SET_PMD:
guest_set_pmd(cpu->lg, args->arg1, args->arg2);
break;
+#endif
case LHCALL_SET_CLOCKEVENT:
guest_set_clockevent(cpu, args->arg1);
break;
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c
index 6e99adbe194..0e9067b0d50 100644
--- a/drivers/lguest/interrupts_and_traps.c
+++ b/drivers/lguest/interrupts_and_traps.c
@@ -128,30 +128,39 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi,
/*H:205
* Virtual Interrupts.
*
- * maybe_do_interrupt() gets called before every entry to the Guest, to see if
- * we should divert the Guest to running an interrupt handler. */
-void maybe_do_interrupt(struct lg_cpu *cpu)
+ * interrupt_pending() returns the first pending interrupt which isn't blocked
+ * by the Guest. It is called before every entry to the Guest, and just before
+ * we go to sleep when the Guest has halted itself. */
+unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more)
{
unsigned int irq;
DECLARE_BITMAP(blk, LGUEST_IRQS);
- struct desc_struct *idt;
/* If the Guest hasn't even initialized yet, we can do nothing. */
if (!cpu->lg->lguest_data)
- return;
+ return LGUEST_IRQS;
/* Take our "irqs_pending" array and remove any interrupts the Guest
* wants blocked: the result ends up in "blk". */
if (copy_from_user(&blk, cpu->lg->lguest_data->blocked_interrupts,
sizeof(blk)))
- return;
+ return LGUEST_IRQS;
bitmap_andnot(blk, cpu->irqs_pending, blk, LGUEST_IRQS);
/* Find the first interrupt. */
irq = find_first_bit(blk, LGUEST_IRQS);
- /* None? Nothing to do */
- if (irq >= LGUEST_IRQS)
- return;
+ *more = find_next_bit(blk, LGUEST_IRQS, irq+1);
+
+ return irq;
+}
+
+/* This actually diverts the Guest to running an interrupt handler, once an
+ * interrupt has been identified by interrupt_pending(). */
+void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more)
+{
+ struct desc_struct *idt;
+
+ BUG_ON(irq >= LGUEST_IRQS);
/* They may be in the middle of an iret, where they asked us never to
* deliver interrupts. */
@@ -170,8 +179,12 @@ void maybe_do_interrupt(struct lg_cpu *cpu)
u32 irq_enabled;
if (get_user(irq_enabled, &cpu->lg->lguest_data->irq_enabled))
irq_enabled = 0;
- if (!irq_enabled)
+ if (!irq_enabled) {
+ /* Make sure they know an IRQ is pending. */
+ put_user(X86_EFLAGS_IF,
+ &cpu->lg->lguest_data->irq_pending);
return;
+ }
}
/* Look at the IDT entry the Guest gave us for this interrupt. The
@@ -194,6 +207,25 @@ void maybe_do_interrupt(struct lg_cpu *cpu)
* here is a compromise which means at least it gets updated every
* timer interrupt. */
write_timestamp(cpu);
+
+ /* If there are no other interrupts we want to deliver, clear
+ * the pending flag. */
+ if (!more)
+ put_user(0, &cpu->lg->lguest_data->irq_pending);
+}
+
+/* And this is the routine when we want to set an interrupt for the Guest. */
+void set_interrupt(struct lg_cpu *cpu, unsigned int irq)
+{
+ /* Next time the Guest runs, the core code will see if it can deliver
+ * this interrupt. */
+ set_bit(irq, cpu->irqs_pending);
+
+ /* Make sure it sees it; it might be asleep (eg. halted), or
+ * running the Guest right now, in which case kick_process()
+ * will knock it out. */
+ if (!wake_up_process(cpu->tsk))
+ kick_process(cpu->tsk);
}
/*:*/
@@ -510,10 +542,7 @@ static enum hrtimer_restart clockdev_fn(struct hrtimer *timer)
struct lg_cpu *cpu = container_of(timer, struct lg_cpu, hrt);
/* Remember the first interrupt is the timer interrupt. */
- set_bit(0, cpu->irqs_pending);
- /* If the Guest is actually stopped, we need to wake it up. */
- if (cpu->halted)
- wake_up_process(cpu->tsk);
+ set_interrupt(cpu, 0);
return HRTIMER_NORESTART;
}
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index af92a176697..d4e8979735c 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -49,7 +49,7 @@ struct lg_cpu {
u32 cr2;
int ts;
u32 esp1;
- u8 ss1;
+ u16 ss1;
/* Bitmap of what has changed: see CHANGED_* above. */
int changed;
@@ -71,9 +71,7 @@ struct lg_cpu {
/* Virtual clock device */
struct hrtimer hrt;
- /* Do we need to stop what we're doing and return to userspace? */
- int break_out;
- wait_queue_head_t break_wq;
+ /* Did the Guest tell us to halt? */
int halted;
/* Pending virtual interrupts */
@@ -82,6 +80,16 @@ struct lg_cpu {
struct lg_cpu_arch arch;
};
+struct lg_eventfd {
+ unsigned long addr;
+ struct file *event;
+};
+
+struct lg_eventfd_map {
+ unsigned int num;
+ struct lg_eventfd map[];
+};
+
/* The private info the thread maintains about the guest. */
struct lguest
{
@@ -102,6 +110,8 @@ struct lguest
unsigned int stack_pages;
u32 tsc_khz;
+ struct lg_eventfd_map *eventfds;
+
/* Dead? */
const char *dead;
};
@@ -137,9 +147,13 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user);
* in the kernel. */
#define pgd_flags(x) (pgd_val(x) & ~PAGE_MASK)
#define pgd_pfn(x) (pgd_val(x) >> PAGE_SHIFT)
+#define pmd_flags(x) (pmd_val(x) & ~PAGE_MASK)
+#define pmd_pfn(x) (pmd_val(x) >> PAGE_SHIFT)
/* interrupts_and_traps.c: */
-void maybe_do_interrupt(struct lg_cpu *cpu);
+unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more);
+void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more);
+void set_interrupt(struct lg_cpu *cpu, unsigned int irq);
bool deliver_trap(struct lg_cpu *cpu, unsigned int num);
void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int i,
u32 low, u32 hi);
@@ -150,6 +164,7 @@ void setup_default_idt_entries(struct lguest_ro_state *state,
void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt,
const unsigned long *def);
void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta);
+bool send_notify_to_eventfd(struct lg_cpu *cpu);
void init_clockdev(struct lg_cpu *cpu);
bool check_syscall_vector(struct lguest *lg);
int init_interrupts(void);
@@ -168,7 +183,10 @@ void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt);
int init_guest_pagetable(struct lguest *lg);
void free_guest_pagetable(struct lguest *lg);
void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable);
+void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 i);
+#ifdef CONFIG_X86_PAE
void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 i);
+#endif
void guest_pagetable_clear_all(struct lg_cpu *cpu);
void guest_pagetable_flush_user(struct lg_cpu *cpu);
void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir,
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
index df44d962626..e082cdac88b 100644
--- a/drivers/lguest/lguest_device.c
+++ b/drivers/lguest/lguest_device.c
@@ -228,7 +228,8 @@ extern void lguest_setup_irq(unsigned int irq);
* function. */
static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
unsigned index,
- void (*callback)(struct virtqueue *vq))
+ void (*callback)(struct virtqueue *vq),
+ const char *name)
{
struct lguest_device *ldev = to_lgdev(vdev);
struct lguest_vq_info *lvq;
@@ -263,7 +264,7 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
/* OK, tell virtio_ring.c to set up a virtqueue now we know its size
* and we've got a pointer to its pages. */
vq = vring_new_virtqueue(lvq->config.num, LGUEST_VRING_ALIGN,
- vdev, lvq->pages, lg_notify, callback);
+ vdev, lvq->pages, lg_notify, callback, name);
if (!vq) {
err = -ENOMEM;
goto unmap;
@@ -312,6 +313,38 @@ static void lg_del_vq(struct virtqueue *vq)
kfree(lvq);
}
+static void lg_del_vqs(struct virtio_device *vdev)
+{
+ struct virtqueue *vq, *n;
+
+ list_for_each_entry_safe(vq, n, &vdev->vqs, list)
+ lg_del_vq(vq);
+}
+
+static int lg_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+ struct virtqueue *vqs[],
+ vq_callback_t *callbacks[],
+ const char *names[])
+{
+ struct lguest_device *ldev = to_lgdev(vdev);
+ int i;
+
+ /* We must have this many virtqueues. */
+ if (nvqs > ldev->desc->num_vq)
+ return -ENOENT;
+
+ for (i = 0; i < nvqs; ++i) {
+ vqs[i] = lg_find_vq(vdev, i, callbacks[i], names[i]);
+ if (IS_ERR(vqs[i]))
+ goto error;
+ }
+ return 0;
+
+error:
+ lg_del_vqs(vdev);
+ return PTR_ERR(vqs[i]);
+}
+
/* The ops structure which hooks everything together. */
static struct virtio_config_ops lguest_config_ops = {
.get_features = lg_get_features,
@@ -321,8 +354,8 @@ static struct virtio_config_ops lguest_config_ops = {
.get_status = lg_get_status,
.set_status = lg_set_status,
.reset = lg_reset,
- .find_vq = lg_find_vq,
- .del_vq = lg_del_vq,
+ .find_vqs = lg_find_vqs,
+ .del_vqs = lg_del_vqs,
};
/* The root device for the lguest virtio devices. This makes them appear as
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
index b8ee103eed5..32e29712105 100644
--- a/drivers/lguest/lguest_user.c
+++ b/drivers/lguest/lguest_user.c
@@ -7,32 +7,83 @@
#include <linux/miscdevice.h>
#include <linux/fs.h>
#include <linux/sched.h>
+#include <linux/eventfd.h>
+#include <linux/file.h>
#include "lg.h"
-/*L:055 When something happens, the Waker process needs a way to stop the
- * kernel running the Guest and return to the Launcher. So the Waker writes
- * LHREQ_BREAK and the value "1" to /dev/lguest to do this. Once the Launcher
- * has done whatever needs attention, it writes LHREQ_BREAK and "0" to release
- * the Waker. */
-static int break_guest_out(struct lg_cpu *cpu, const unsigned long __user*input)
+bool send_notify_to_eventfd(struct lg_cpu *cpu)
{
- unsigned long on;
+ unsigned int i;
+ struct lg_eventfd_map *map;
+
+ /* lg->eventfds is RCU-protected */
+ rcu_read_lock();
+ map = rcu_dereference(cpu->lg->eventfds);
+ for (i = 0; i < map->num; i++) {
+ if (map->map[i].addr == cpu->pending_notify) {
+ eventfd_signal(map->map[i].event, 1);
+ cpu->pending_notify = 0;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return cpu->pending_notify == 0;
+}
- /* Fetch whether they're turning break on or off. */
- if (get_user(on, input) != 0)
- return -EFAULT;
+static int add_eventfd(struct lguest *lg, unsigned long addr, int fd)
+{
+ struct lg_eventfd_map *new, *old = lg->eventfds;
- if (on) {
- cpu->break_out = 1;
- /* Pop it out of the Guest (may be running on different CPU) */
- wake_up_process(cpu->tsk);
- /* Wait for them to reset it */
- return wait_event_interruptible(cpu->break_wq, !cpu->break_out);
- } else {
- cpu->break_out = 0;
- wake_up(&cpu->break_wq);
- return 0;
+ if (!addr)
+ return -EINVAL;
+
+ /* Replace the old array with the new one, carefully: others can
+ * be accessing it at the same time */
+ new = kmalloc(sizeof(*new) + sizeof(new->map[0]) * (old->num + 1),
+ GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+
+ /* First make identical copy. */
+ memcpy(new->map, old->map, sizeof(old->map[0]) * old->num);
+ new->num = old->num;
+
+ /* Now append new entry. */
+ new->map[new->num].addr = addr;
+ new->map[new->num].event = eventfd_fget(fd);
+ if (IS_ERR(new->map[new->num].event)) {
+ kfree(new);
+ return PTR_ERR(new->map[new->num].event);
}
+ new->num++;
+
+ /* Now put new one in place. */
+ rcu_assign_pointer(lg->eventfds, new);
+
+ /* We're not in a big hurry. Wait until noone's looking at old
+ * version, then delete it. */
+ synchronize_rcu();
+ kfree(old);
+
+ return 0;
+}
+
+static int attach_eventfd(struct lguest *lg, const unsigned long __user *input)
+{
+ unsigned long addr, fd;
+ int err;
+
+ if (get_user(addr, input) != 0)
+ return -EFAULT;
+ input++;
+ if (get_user(fd, input) != 0)
+ return -EFAULT;
+
+ mutex_lock(&lguest_lock);
+ err = add_eventfd(lg, addr, fd);
+ mutex_unlock(&lguest_lock);
+
+ return 0;
}
/*L:050 Sending an interrupt is done by writing LHREQ_IRQ and an interrupt
@@ -45,9 +96,8 @@ static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input)
return -EFAULT;
if (irq >= LGUEST_IRQS)
return -EINVAL;
- /* Next time the Guest runs, the core code will see if it can deliver
- * this interrupt. */
- set_bit(irq, cpu->irqs_pending);
+
+ set_interrupt(cpu, irq);
return 0;
}
@@ -126,9 +176,6 @@ static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip)
* address. */
lguest_arch_setup_regs(cpu, start_ip);
- /* Initialize the queue for the Waker to wait on */
- init_waitqueue_head(&cpu->break_wq);
-
/* We keep a pointer to the Launcher task (ie. current task) for when
* other Guests want to wake this one (eg. console input). */
cpu->tsk = current;
@@ -185,6 +232,13 @@ static int initialize(struct file *file, const unsigned long __user *input)
goto unlock;
}
+ lg->eventfds = kmalloc(sizeof(*lg->eventfds), GFP_KERNEL);
+ if (!lg->eventfds) {
+ err = -ENOMEM;
+ goto free_lg;
+ }
+ lg->eventfds->num = 0;
+
/* Populate the easy fields of our "struct lguest" */
lg->mem_base = (void __user *)args[0];
lg->pfn_limit = args[1];
@@ -192,7 +246,7 @@ static int initialize(struct file *file, const unsigned long __user *input)
/* This is the first cpu (cpu 0) and it will start booting at args[2] */
err = lg_cpu_start(&lg->cpus[0], 0, args[2]);
if (err)
- goto release_guest;
+ goto free_eventfds;
/* Initialize the Guest's shadow page tables, using the toplevel
* address the Launcher gave us. This allocates memory, so can fail. */
@@ -211,7 +265,9 @@ static int initialize(struct file *file, const unsigned long __user *input)
free_regs:
/* FIXME: This should be in free_vcpu */
free_page(lg->cpus[0].regs_page);
-release_guest:
+free_eventfds:
+ kfree(lg->eventfds);
+free_lg:
kfree(lg);
unlock:
mutex_unlock(&lguest_lock);
@@ -252,11 +308,6 @@ static ssize_t write(struct file *file, const char __user *in,
/* Once the Guest is dead, you can only read() why it died. */
if (lg->dead)
return -ENOENT;
-
- /* If you're not the task which owns the Guest, all you can do
- * is break the Launcher out of running the Guest. */
- if (current != cpu->tsk && req != LHREQ_BREAK)
- return -EPERM;
}
switch (req) {
@@ -264,8 +315,8 @@ static ssize_t write(struct file *file, const char __user *in,
return initialize(file, input);
case LHREQ_IRQ:
return user_send_irq(cpu, input);
- case LHREQ_BREAK:
- return break_guest_out(cpu, input);
+ case LHREQ_EVENTFD:
+ return attach_eventfd(lg, input);
default:
return -EINVAL;
}
@@ -303,6 +354,12 @@ static int close(struct inode *inode, struct file *file)
* the Launcher's memory management structure. */
mmput(lg->cpus[i].mm);
}
+
+ /* Release any eventfds they registered. */
+ for (i = 0; i < lg->eventfds->num; i++)
+ fput(lg->eventfds->map[i].event);
+ kfree(lg->eventfds);
+
/* If lg->dead doesn't contain an error code it will be NULL or a
* kmalloc()ed string, either of which is ok to hand to kfree(). */
if (!IS_ERR(lg->dead))
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index a059cf9980f..a6fe1abda24 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -53,6 +53,17 @@
* page. */
#define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1)
+/* For PAE we need the PMD index as well. We use the last 2MB, so we
+ * will need the last pmd entry of the last pmd page. */
+#ifdef CONFIG_X86_PAE
+#define SWITCHER_PMD_INDEX (PTRS_PER_PMD - 1)
+#define RESERVE_MEM 2U
+#define CHECK_GPGD_MASK _PAGE_PRESENT
+#else
+#define RESERVE_MEM 4U
+#define CHECK_GPGD_MASK _PAGE_TABLE
+#endif
+
/* We actually need a separate PTE page for each CPU. Remember that after the
* Switcher code itself comes two pages for each CPU, and we don't want this
* CPU's guest to see the pages of any other CPU. */
@@ -73,24 +84,59 @@ static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr)
{
unsigned int index = pgd_index(vaddr);
+#ifndef CONFIG_X86_PAE
/* We kill any Guest trying to touch the Switcher addresses. */
if (index >= SWITCHER_PGD_INDEX) {
kill_guest(cpu, "attempt to access switcher pages");
index = 0;
}
+#endif
/* Return a pointer index'th pgd entry for the i'th page table. */
return &cpu->lg->pgdirs[i].pgdir[index];
}
+#ifdef CONFIG_X86_PAE
+/* This routine then takes the PGD entry given above, which contains the
+ * address of the PMD page. It then returns a pointer to the PMD entry for the
+ * given address. */
+static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
+{
+ unsigned int index = pmd_index(vaddr);
+ pmd_t *page;
+
+ /* We kill any Guest trying to touch the Switcher addresses. */
+ if (pgd_index(vaddr) == SWITCHER_PGD_INDEX &&
+ index >= SWITCHER_PMD_INDEX) {
+ kill_guest(cpu, "attempt to access switcher pages");
+ index = 0;
+ }
+
+ /* You should never call this if the PGD entry wasn't valid */
+ BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
+ page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
+
+ return &page[index];
+}
+#endif
+
/* This routine then takes the page directory entry returned above, which
* contains the address of the page table entry (PTE) page. It then returns a
* pointer to the PTE entry for the given address. */
-static pte_t *spte_addr(pgd_t spgd, unsigned long vaddr)
+static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
{
+#ifdef CONFIG_X86_PAE
+ pmd_t *pmd = spmd_addr(cpu, spgd, vaddr);
+ pte_t *page = __va(pmd_pfn(*pmd) << PAGE_SHIFT);
+
+ /* You should never call this if the PMD entry wasn't valid */
+ BUG_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT));
+#else
pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
/* You should never call this if the PGD entry wasn't valid */
BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
- return &page[(vaddr >> PAGE_SHIFT) % PTRS_PER_PTE];
+#endif
+
+ return &page[pte_index(vaddr)];
}
/* These two functions just like the above two, except they access the Guest
@@ -101,12 +147,32 @@ static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr)
return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t);
}
-static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr)
+#ifdef CONFIG_X86_PAE
+static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr)
+{
+ unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
+ BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
+ return gpage + pmd_index(vaddr) * sizeof(pmd_t);
+}
+
+static unsigned long gpte_addr(struct lg_cpu *cpu,
+ pmd_t gpmd, unsigned long vaddr)
+{
+ unsigned long gpage = pmd_pfn(gpmd) << PAGE_SHIFT;
+
+ BUG_ON(!(pmd_flags(gpmd) & _PAGE_PRESENT));
+ return gpage + pte_index(vaddr) * sizeof(pte_t);
+}
+#else
+static unsigned long gpte_addr(struct lg_cpu *cpu,
+ pgd_t gpgd, unsigned long vaddr)
{
unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
+
BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
- return gpage + ((vaddr>>PAGE_SHIFT) % PTRS_PER_PTE) * sizeof(pte_t);
+ return gpage + pte_index(vaddr) * sizeof(pte_t);
}
+#endif
/*:*/
/*M:014 get_pfn is slow: we could probably try to grab batches of pages here as
@@ -171,7 +237,7 @@ static void release_pte(pte_t pte)
/* Remember that get_user_pages_fast() took a reference to the page, in
* get_pfn()? We have to put it back now. */
if (pte_flags(pte) & _PAGE_PRESENT)
- put_page(pfn_to_page(pte_pfn(pte)));
+ put_page(pte_page(pte));
}
/*:*/
@@ -184,11 +250,20 @@ static void check_gpte(struct lg_cpu *cpu, pte_t gpte)
static void check_gpgd(struct lg_cpu *cpu, pgd_t gpgd)
{
- if ((pgd_flags(gpgd) & ~_PAGE_TABLE) ||
+ if ((pgd_flags(gpgd) & ~CHECK_GPGD_MASK) ||
(pgd_pfn(gpgd) >= cpu->lg->pfn_limit))
kill_guest(cpu, "bad page directory entry");
}
+#ifdef CONFIG_X86_PAE
+static void check_gpmd(struct lg_cpu *cpu, pmd_t gpmd)
+{
+ if ((pmd_flags(gpmd) & ~_PAGE_TABLE) ||
+ (pmd_pfn(gpmd) >= cpu->lg->pfn_limit))
+ kill_guest(cpu, "bad page middle directory entry");
+}
+#endif
+
/*H:330
* (i) Looking up a page table entry when the Guest faults.
*
@@ -207,6 +282,11 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
pte_t gpte;
pte_t *spte;
+#ifdef CONFIG_X86_PAE
+ pmd_t *spmd;
+ pmd_t gpmd;
+#endif
+
/* First step: get the top-level Guest page table entry. */
gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
/* Toplevel not present? We can't map it in. */
@@ -228,12 +308,45 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
check_gpgd(cpu, gpgd);
/* And we copy the flags to the shadow PGD entry. The page
* number in the shadow PGD is the page we just allocated. */
- *spgd = __pgd(__pa(ptepage) | pgd_flags(gpgd));
+ set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags(gpgd)));
}
+#ifdef CONFIG_X86_PAE
+ gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
+ /* middle level not present? We can't map it in. */
+ if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
+ return false;
+
+ /* Now look at the matching shadow entry. */
+ spmd = spmd_addr(cpu, *spgd, vaddr);
+
+ if (!(pmd_flags(*spmd) & _PAGE_PRESENT)) {
+ /* No shadow entry: allocate a new shadow PTE page. */
+ unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
+
+ /* This is not really the Guest's fault, but killing it is
+ * simple for this corner case. */
+ if (!ptepage) {
+ kill_guest(cpu, "out of memory allocating pte page");
+ return false;
+ }
+
+ /* We check that the Guest pmd is OK. */
+ check_gpmd(cpu, gpmd);
+
+ /* And we copy the flags to the shadow PMD entry. The page
+ * number in the shadow PMD is the page we just allocated. */
+ native_set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags(gpmd)));
+ }
+
+ /* OK, now we look at the lower level in the Guest page table: keep its
+ * address, because we might update it later. */
+ gpte_ptr = gpte_addr(cpu, gpmd, vaddr);
+#else
/* OK, now we look at the lower level in the Guest page table: keep its
* address, because we might update it later. */
- gpte_ptr = gpte_addr(gpgd, vaddr);
+ gpte_ptr = gpte_addr(cpu, gpgd, vaddr);
+#endif
gpte = lgread(cpu, gpte_ptr, pte_t);
/* If this page isn't in the Guest page tables, we can't page it in. */
@@ -259,7 +372,7 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
gpte = pte_mkdirty(gpte);
/* Get the pointer to the shadow PTE entry we're going to set. */
- spte = spte_addr(*spgd, vaddr);
+ spte = spte_addr(cpu, *spgd, vaddr);
/* If there was a valid shadow PTE entry here before, we release it.
* This can happen with a write to a previously read-only entry. */
release_pte(*spte);
@@ -273,7 +386,7 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
* table entry, even if the Guest says it's writable. That way
* we will come back here when a write does actually occur, so
* we can update the Guest's _PAGE_DIRTY flag. */
- *spte = gpte_to_spte(cpu, pte_wrprotect(gpte), 0);
+ native_set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0));
/* Finally, we write the Guest PTE entry back: we've set the
* _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */
@@ -301,14 +414,23 @@ static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr)
pgd_t *spgd;
unsigned long flags;
+#ifdef CONFIG_X86_PAE
+ pmd_t *spmd;
+#endif
/* Look at the current top level entry: is it present? */
spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
if (!(pgd_flags(*spgd) & _PAGE_PRESENT))
return false;
+#ifdef CONFIG_X86_PAE
+ spmd = spmd_addr(cpu, *spgd, vaddr);
+ if (!(pmd_flags(*spmd) & _PAGE_PRESENT))
+ return false;
+#endif
+
/* Check the flags on the pte entry itself: it must be present and
* writable. */
- flags = pte_flags(*(spte_addr(*spgd, vaddr)));
+ flags = pte_flags(*(spte_addr(cpu, *spgd, vaddr)));
return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW);
}
@@ -322,8 +444,43 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
kill_guest(cpu, "bad stack page %#lx", vaddr);
}
+#ifdef CONFIG_X86_PAE
+static void release_pmd(pmd_t *spmd)
+{
+ /* If the entry's not present, there's nothing to release. */
+ if (pmd_flags(*spmd) & _PAGE_PRESENT) {
+ unsigned int i;
+ pte_t *ptepage = __va(pmd_pfn(*spmd) << PAGE_SHIFT);
+ /* For each entry in the page, we might need to release it. */
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ release_pte(ptepage[i]);
+ /* Now we can free the page of PTEs */
+ free_page((long)ptepage);
+ /* And zero out the PMD entry so we never release it twice. */
+ native_set_pmd(spmd, __pmd(0));
+ }
+}
+
+static void release_pgd(pgd_t *spgd)
+{
+ /* If the entry's not present, there's nothing to release. */
+ if (pgd_flags(*spgd) & _PAGE_PRESENT) {
+ unsigned int i;
+ pmd_t *pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
+
+ for (i = 0; i < PTRS_PER_PMD; i++)
+ release_pmd(&pmdpage[i]);
+
+ /* Now we can free the page of PMDs */
+ free_page((long)pmdpage);
+ /* And zero out the PGD entry so we never release it twice. */
+ set_pgd(spgd, __pgd(0));
+ }
+}
+
+#else /* !CONFIG_X86_PAE */
/*H:450 If we chase down the release_pgd() code, it looks like this: */
-static void release_pgd(struct lguest *lg, pgd_t *spgd)
+static void release_pgd(pgd_t *spgd)
{
/* If the entry's not present, there's nothing to release. */
if (pgd_flags(*spgd) & _PAGE_PRESENT) {
@@ -341,7 +498,7 @@ static void release_pgd(struct lguest *lg, pgd_t *spgd)
*spgd = __pgd(0);
}
}
-
+#endif
/*H:445 We saw flush_user_mappings() twice: once from the flush_user_mappings()
* hypercall and once in new_pgdir() when we re-used a top-level pgdir page.
* It simply releases every PTE page from 0 up to the Guest's kernel address. */
@@ -350,7 +507,7 @@ static void flush_user_mappings(struct lguest *lg, int idx)
unsigned int i;
/* Release every pgd entry up to the kernel's address. */
for (i = 0; i < pgd_index(lg->kernel_address); i++)
- release_pgd(lg, lg->pgdirs[idx].pgdir + i);
+ release_pgd(lg->pgdirs[idx].pgdir + i);
}
/*H:440 (v) Flushing (throwing away) page tables,
@@ -369,7 +526,9 @@ unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
{
pgd_t gpgd;
pte_t gpte;
-
+#ifdef CONFIG_X86_PAE
+ pmd_t gpmd;
+#endif
/* First step: get the top-level Guest page table entry. */
gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
/* Toplevel not present? We can't map it in. */
@@ -378,7 +537,14 @@ unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
return -1UL;
}
- gpte = lgread(cpu, gpte_addr(gpgd, vaddr), pte_t);
+#ifdef CONFIG_X86_PAE
+ gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
+ if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
+ kill_guest(cpu, "Bad address %#lx", vaddr);
+ gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t);
+#else
+ gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t);
+#endif
if (!(pte_flags(gpte) & _PAGE_PRESENT))
kill_guest(cpu, "Bad address %#lx", vaddr);
@@ -405,6 +571,9 @@ static unsigned int new_pgdir(struct lg_cpu *cpu,
int *blank_pgdir)
{
unsigned int next;
+#ifdef CONFIG_X86_PAE
+ pmd_t *pmd_table;
+#endif
/* We pick one entry at random to throw out. Choosing the Least
* Recently Used might be better, but this is easy. */
@@ -416,10 +585,27 @@ static unsigned int new_pgdir(struct lg_cpu *cpu,
/* If the allocation fails, just keep using the one we have */
if (!cpu->lg->pgdirs[next].pgdir)
next = cpu->cpu_pgd;
- else
- /* This is a blank page, so there are no kernel
- * mappings: caller must map the stack! */
+ else {
+#ifdef CONFIG_X86_PAE
+ /* In PAE mode, allocate a pmd page and populate the
+ * last pgd entry. */
+ pmd_table = (pmd_t *)get_zeroed_page(GFP_KERNEL);
+ if (!pmd_table) {
+ free_page((long)cpu->lg->pgdirs[next].pgdir);
+ set_pgd(cpu->lg->pgdirs[next].pgdir, __pgd(0));
+ next = cpu->cpu_pgd;
+ } else {
+ set_pgd(cpu->lg->pgdirs[next].pgdir +
+ SWITCHER_PGD_INDEX,
+ __pgd(__pa(pmd_table) | _PAGE_PRESENT));
+ /* This is a blank page, so there are no kernel
+ * mappings: caller must map the stack! */
+ *blank_pgdir = 1;
+ }
+#else
*blank_pgdir = 1;
+#endif
+ }
}
/* Record which Guest toplevel this shadows. */
cpu->lg->pgdirs[next].gpgdir = gpgdir;
@@ -431,7 +617,7 @@ static unsigned int new_pgdir(struct lg_cpu *cpu,
/*H:430 (iv) Switching page tables
*
- * Now we've seen all the page table setting and manipulation, let's see what
+ * Now we've seen all the page table setting and manipulation, let's see
* what happens when the Guest changes page tables (ie. changes the top-level
* pgdir). This occurs on almost every context switch. */
void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable)
@@ -460,10 +646,25 @@ static void release_all_pagetables(struct lguest *lg)
/* Every shadow pagetable this Guest has */
for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
- if (lg->pgdirs[i].pgdir)
+ if (lg->pgdirs[i].pgdir) {
+#ifdef CONFIG_X86_PAE
+ pgd_t *spgd;
+ pmd_t *pmdpage;
+ unsigned int k;
+
+ /* Get the last pmd page. */
+ spgd = lg->pgdirs[i].pgdir + SWITCHER_PGD_INDEX;
+ pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
+
+ /* And release the pmd entries of that pmd page,
+ * except for the switcher pmd. */
+ for (k = 0; k < SWITCHER_PMD_INDEX; k++)
+ release_pmd(&pmdpage[k]);
+#endif
/* Every PGD entry except the Switcher at the top */
for (j = 0; j < SWITCHER_PGD_INDEX; j++)
- release_pgd(lg, lg->pgdirs[i].pgdir + j);
+ release_pgd(lg->pgdirs[i].pgdir + j);
+ }
}
/* We also throw away everything when a Guest tells us it's changed a kernel
@@ -504,24 +705,37 @@ static void do_set_pte(struct lg_cpu *cpu, int idx,
{
/* Look up the matching shadow page directory entry. */
pgd_t *spgd = spgd_addr(cpu, idx, vaddr);
+#ifdef CONFIG_X86_PAE
+ pmd_t *spmd;
+#endif
/* If the top level isn't present, there's no entry to update. */
if (pgd_flags(*spgd) & _PAGE_PRESENT) {
- /* Otherwise, we start by releasing the existing entry. */
- pte_t *spte = spte_addr(*spgd, vaddr);
- release_pte(*spte);
-
- /* If they're setting this entry as dirty or accessed, we might
- * as well put that entry they've given us in now. This shaves
- * 10% off a copy-on-write micro-benchmark. */
- if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) {
- check_gpte(cpu, gpte);
- *spte = gpte_to_spte(cpu, gpte,
- pte_flags(gpte) & _PAGE_DIRTY);
- } else
- /* Otherwise kill it and we can demand_page() it in
- * later. */
- *spte = __pte(0);
+#ifdef CONFIG_X86_PAE
+ spmd = spmd_addr(cpu, *spgd, vaddr);
+ if (pmd_flags(*spmd) & _PAGE_PRESENT) {
+#endif
+ /* Otherwise, we start by releasing
+ * the existing entry. */
+ pte_t *spte = spte_addr(cpu, *spgd, vaddr);
+ release_pte(*spte);
+
+ /* If they're setting this entry as dirty or accessed,
+ * we might as well put that entry they've given us
+ * in now. This shaves 10% off a
+ * copy-on-write micro-benchmark. */
+ if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) {
+ check_gpte(cpu, gpte);
+ native_set_pte(spte,
+ gpte_to_spte(cpu, gpte,
+ pte_flags(gpte) & _PAGE_DIRTY));
+ } else
+ /* Otherwise kill it and we can demand_page()
+ * it in later. */
+ native_set_pte(spte, __pte(0));
+#ifdef CONFIG_X86_PAE
+ }
+#endif
}
}
@@ -568,12 +782,10 @@ void guest_set_pte(struct lg_cpu *cpu,
*
* So with that in mind here's our code to to update a (top-level) PGD entry:
*/
-void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 idx)
+void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx)
{
int pgdir;
- /* The kernel seems to try to initialize this early on: we ignore its
- * attempts to map over the Switcher. */
if (idx >= SWITCHER_PGD_INDEX)
return;
@@ -581,8 +793,14 @@ void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 idx)
pgdir = find_pgdir(lg, gpgdir);
if (pgdir < ARRAY_SIZE(lg->pgdirs))
/* ... throw it away. */
- release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx);
+ release_pgd(lg->pgdirs[pgdir].pgdir + idx);
}
+#ifdef CONFIG_X86_PAE
+void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx)
+{
+ guest_pagetable_clear_all(&lg->cpus[0]);
+}
+#endif
/* Once we know how much memory we have we can construct simple identity
* (which set virtual == physical) and linear mappings
@@ -596,8 +814,16 @@ static unsigned long setup_pagetables(struct lguest *lg,
{
pgd_t __user *pgdir;
pte_t __user *linear;
- unsigned int mapped_pages, i, linear_pages, phys_linear;
unsigned long mem_base = (unsigned long)lg->mem_base;
+ unsigned int mapped_pages, i, linear_pages;
+#ifdef CONFIG_X86_PAE
+ pmd_t __user *pmds;
+ unsigned int j;
+ pgd_t pgd;
+ pmd_t pmd;
+#else
+ unsigned int phys_linear;
+#endif
/* We have mapped_pages frames to map, so we need
* linear_pages page tables to map them. */
@@ -610,6 +836,9 @@ static unsigned long setup_pagetables(struct lguest *lg,
/* Now we use the next linear_pages pages as pte pages */
linear = (void *)pgdir - linear_pages * PAGE_SIZE;
+#ifdef CONFIG_X86_PAE
+ pmds = (void *)linear - PAGE_SIZE;
+#endif
/* Linear mapping is easy: put every page's address into the
* mapping in order. */
for (i = 0; i < mapped_pages; i++) {
@@ -621,6 +850,22 @@ static unsigned long setup_pagetables(struct lguest *lg,
/* The top level points to the linear page table pages above.
* We setup the identity and linear mappings here. */
+#ifdef CONFIG_X86_PAE
+ for (i = j = 0; i < mapped_pages && j < PTRS_PER_PMD;
+ i += PTRS_PER_PTE, j++) {
+ native_set_pmd(&pmd, __pmd(((unsigned long)(linear + i)
+ - mem_base) | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER));
+
+ if (copy_to_user(&pmds[j], &pmd, sizeof(pmd)) != 0)
+ return -EFAULT;
+ }
+
+ set_pgd(&pgd, __pgd(((u32)pmds - mem_base) | _PAGE_PRESENT));
+ if (copy_to_user(&pgdir[0], &pgd, sizeof(pgd)) != 0)
+ return -EFAULT;
+ if (copy_to_user(&pgdir[3], &pgd, sizeof(pgd)) != 0)
+ return -EFAULT;
+#else
phys_linear = (unsigned long)linear - mem_base;
for (i = 0; i < mapped_pages; i += PTRS_PER_PTE) {
pgd_t pgd;
@@ -633,6 +878,7 @@ static unsigned long setup_pagetables(struct lguest *lg,
&pgd, sizeof(pgd)))
return -EFAULT;
}
+#endif
/* We return the top level (guest-physical) address: remember where
* this is. */
@@ -648,7 +894,10 @@ int init_guest_pagetable(struct lguest *lg)
u64 mem;
u32 initrd_size;
struct boot_params __user *boot = (struct boot_params *)lg->mem_base;
-
+#ifdef CONFIG_X86_PAE
+ pgd_t *pgd;
+ pmd_t *pmd_table;
+#endif
/* Get the Guest memory size and the ramdisk size from the boot header
* located at lg->mem_base (Guest address 0). */
if (copy_from_user(&mem, &boot->e820_map[0].size, sizeof(mem))
@@ -663,6 +912,15 @@ int init_guest_pagetable(struct lguest *lg)
lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL);
if (!lg->pgdirs[0].pgdir)
return -ENOMEM;
+#ifdef CONFIG_X86_PAE
+ pgd = lg->pgdirs[0].pgdir;
+ pmd_table = (pmd_t *) get_zeroed_page(GFP_KERNEL);
+ if (!pmd_table)
+ return -ENOMEM;
+
+ set_pgd(pgd + SWITCHER_PGD_INDEX,
+ __pgd(__pa(pmd_table) | _PAGE_PRESENT));
+#endif
lg->cpus[0].cpu_pgd = 0;
return 0;
}
@@ -672,17 +930,24 @@ void page_table_guest_data_init(struct lg_cpu *cpu)
{
/* We get the kernel address: above this is all kernel memory. */
if (get_user(cpu->lg->kernel_address,
- &cpu->lg->lguest_data->kernel_address)
- /* We tell the Guest that it can't use the top 4MB of virtual
- * addresses used by the Switcher. */
- || put_user(4U*1024*1024, &cpu->lg->lguest_data->reserve_mem)
- || put_user(cpu->lg->pgdirs[0].gpgdir, &cpu->lg->lguest_data->pgdir))
+ &cpu->lg->lguest_data->kernel_address)
+ /* We tell the Guest that it can't use the top 2 or 4 MB
+ * of virtual addresses used by the Switcher. */
+ || put_user(RESERVE_MEM * 1024 * 1024,
+ &cpu->lg->lguest_data->reserve_mem)
+ || put_user(cpu->lg->pgdirs[0].gpgdir,
+ &cpu->lg->lguest_data->pgdir))
kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
/* In flush_user_mappings() we loop from 0 to
* "pgd_index(lg->kernel_address)". This assumes it won't hit the
* Switcher mappings, so check that now. */
+#ifdef CONFIG_X86_PAE
+ if (pgd_index(cpu->lg->kernel_address) == SWITCHER_PGD_INDEX &&
+ pmd_index(cpu->lg->kernel_address) == SWITCHER_PMD_INDEX)
+#else
if (pgd_index(cpu->lg->kernel_address) >= SWITCHER_PGD_INDEX)
+#endif
kill_guest(cpu, "bad kernel address %#lx",
cpu->lg->kernel_address);
}
@@ -708,16 +973,30 @@ void free_guest_pagetable(struct lguest *lg)
void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
{
pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages);
- pgd_t switcher_pgd;
pte_t regs_pte;
unsigned long pfn;
+#ifdef CONFIG_X86_PAE
+ pmd_t switcher_pmd;
+ pmd_t *pmd_table;
+
+ native_set_pmd(&switcher_pmd, pfn_pmd(__pa(switcher_pte_page) >>
+ PAGE_SHIFT, PAGE_KERNEL_EXEC));
+
+ pmd_table = __va(pgd_pfn(cpu->lg->
+ pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX])
+ << PAGE_SHIFT);
+ native_set_pmd(&pmd_table[SWITCHER_PMD_INDEX], switcher_pmd);
+#else
+ pgd_t switcher_pgd;
+
/* Make the last PGD entry for this Guest point to the Switcher's PTE
* page for this CPU (with appropriate flags). */
- switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL);
+ switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL_EXEC);
cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd;
+#endif
/* We also change the Switcher PTE page. When we're running the Guest,
* we want the Guest's "regs" page to appear where the first Switcher
* page for this CPU is. This is an optimization: when the Switcher
@@ -726,8 +1005,9 @@ void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
* page is already mapped there, we don't have to copy them out
* again. */
pfn = __pa(cpu->regs_page) >> PAGE_SHIFT;
- regs_pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL));
- switcher_pte_page[(unsigned long)pages/PAGE_SIZE%PTRS_PER_PTE] = regs_pte;
+ native_set_pte(&regs_pte, pfn_pte(pfn, PAGE_KERNEL));
+ native_set_pte(&switcher_pte_page[pte_index((unsigned long)pages)],
+ regs_pte);
}
/*:*/
@@ -752,21 +1032,21 @@ static __init void populate_switcher_pte_page(unsigned int cpu,
/* The first entries are easy: they map the Switcher code. */
for (i = 0; i < pages; i++) {
- pte[i] = mk_pte(switcher_page[i],
- __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED));
+ native_set_pte(&pte[i], mk_pte(switcher_page[i],
+ __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)));
}
/* The only other thing we map is this CPU's pair of pages. */
i = pages + cpu*2;
/* First page (Guest registers) is writable from the Guest */
- pte[i] = pfn_pte(page_to_pfn(switcher_page[i]),
- __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW));
+ native_set_pte(&pte[i], pfn_pte(page_to_pfn(switcher_page[i]),
+ __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW)));
/* The second page contains the "struct lguest_ro_state", and is
* read-only. */
- pte[i+1] = pfn_pte(page_to_pfn(switcher_page[i+1]),
- __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED));
+ native_set_pte(&pte[i+1], pfn_pte(page_to_pfn(switcher_page[i+1]),
+ __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)));
}
/* We've made it through the page table code. Perhaps our tired brains are
diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c
index 7ede64ffeef..482ed5a1875 100644
--- a/drivers/lguest/segments.c
+++ b/drivers/lguest/segments.c
@@ -150,7 +150,7 @@ void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi)
{
/* We assume the Guest has the same number of GDT entries as the
* Host, otherwise we'd have to dynamically allocate the Guest GDT. */
- if (num > ARRAY_SIZE(cpu->arch.gdt))
+ if (num >= ARRAY_SIZE(cpu->arch.gdt))
kill_guest(cpu, "too many gdt entries %i", num);
/* Set it up, then fix it. */
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 1a83910f674..eaf722fe309 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -358,6 +358,16 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
if (emulate_insn(cpu))
return;
}
+ /* If KVM is active, the vmcall instruction triggers a
+ * General Protection Fault. Normally it triggers an
+ * invalid opcode fault (6): */
+ case 6:
+ /* We need to check if ring == GUEST_PL and
+ * faulting instruction == vmcall. */
+ if (is_hypercall(cpu)) {
+ rewrite_hypercall(cpu);
+ return;
+ }
break;
case 14: /* We've intercepted a Page Fault. */
/* The Guest accessed a virtual address that wasn't mapped.
@@ -403,15 +413,6 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
* up the pointer now to indicate a hypercall is pending. */
cpu->hcall = (struct hcall_args *)cpu->regs;
return;
- case 6:
- /* kvm hypercalls trigger an invalid opcode fault (6).
- * We need to check if ring == GUEST_PL and
- * faulting instruction == vmcall. */
- if (is_hypercall(cpu)) {
- rewrite_hypercall(cpu);
- return;
- }
- break;
}
/* We didn't handle the trap, so it needs to go to the Guest. */
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index c0621d50c8a..fde377c60cc 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -37,6 +37,7 @@
#define CONFIG_REG 0x40
#define MANUAL_MASK 0xe0
#define AUTO_MASK 0x20
+#define INVERT_MASK 0x10
static u8 TEMP_REG[3] = {0x26, 0x25, 0x27}; /* local, sensor1, sensor2 */
static u8 LIMIT_REG[3] = {0x6b, 0x6a, 0x6c}; /* local, sensor1, sensor2 */
@@ -71,7 +72,7 @@ MODULE_PARM_DESC(verbose,"Verbose log operations "
"(default 0)");
struct thermostat {
- struct i2c_client clt;
+ struct i2c_client *clt;
u8 temps[3];
u8 cached_temp[3];
u8 initial_limits[3];
@@ -86,9 +87,6 @@ static struct of_device * of_dev;
static struct thermostat* thermostat;
static struct task_struct *thread_therm = NULL;
-static int attach_one_thermostat(struct i2c_adapter *adapter, int addr,
- int busno);
-
static void write_both_fan_speed(struct thermostat *th, int speed);
static void write_fan_speed(struct thermostat *th, int speed, int fan);
@@ -100,7 +98,7 @@ write_reg(struct thermostat* th, int reg, u8 data)
tmp[0] = reg;
tmp[1] = data;
- rc = i2c_master_send(&th->clt, (const char *)tmp, 2);
+ rc = i2c_master_send(th->clt, (const char *)tmp, 2);
if (rc < 0)
return rc;
if (rc != 2)
@@ -115,12 +113,12 @@ read_reg(struct thermostat* th, int reg)
int rc;
reg_addr = (u8)reg;
- rc = i2c_master_send(&th->clt, &reg_addr, 1);
+ rc = i2c_master_send(th->clt, &reg_addr, 1);
if (rc < 0)
return rc;
if (rc != 1)
return -ENODEV;
- rc = i2c_master_recv(&th->clt, (char *)&data, 1);
+ rc = i2c_master_recv(th->clt, (char *)&data, 1);
if (rc < 0)
return rc;
return data;
@@ -130,26 +128,36 @@ static int
attach_thermostat(struct i2c_adapter *adapter)
{
unsigned long bus_no;
+ struct i2c_board_info info;
+ struct i2c_client *client;
if (strncmp(adapter->name, "uni-n", 5))
return -ENODEV;
bus_no = simple_strtoul(adapter->name + 6, NULL, 10);
if (bus_no != therm_bus)
return -ENODEV;
- return attach_one_thermostat(adapter, therm_address, bus_no);
+
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "therm_adt746x", I2C_NAME_SIZE);
+ info.addr = therm_address;
+ client = i2c_new_device(adapter, &info);
+ if (!client)
+ return -ENODEV;
+
+ /*
+ * Let i2c-core delete that device on driver removal.
+ * This is safe because i2c-core holds the core_lock mutex for us.
+ */
+ list_add_tail(&client->detected, &client->driver->clients);
+ return 0;
}
static int
-detach_thermostat(struct i2c_adapter *adapter)
+remove_thermostat(struct i2c_client *client)
{
- struct thermostat* th;
+ struct thermostat *th = i2c_get_clientdata(client);
int i;
- if (thermostat == NULL)
- return 0;
-
- th = thermostat;
-
if (thread_therm != NULL) {
kthread_stop(thread_therm);
}
@@ -165,8 +173,6 @@ detach_thermostat(struct i2c_adapter *adapter)
write_both_fan_speed(th, -1);
- i2c_detach_client(&th->clt);
-
thermostat = NULL;
kfree(th);
@@ -174,14 +180,6 @@ detach_thermostat(struct i2c_adapter *adapter)
return 0;
}
-static struct i2c_driver thermostat_driver = {
- .driver = {
- .name = "therm_adt746x",
- },
- .attach_adapter = attach_thermostat,
- .detach_adapter = detach_thermostat,
-};
-
static int read_fan_speed(struct thermostat *th, u8 addr)
{
u8 tmp[2];
@@ -229,7 +227,8 @@ static void write_fan_speed(struct thermostat *th, int speed, int fan)
if (speed >= 0) {
manual = read_reg(th, MANUAL_MODE[fan]);
- write_reg(th, MANUAL_MODE[fan], manual|MANUAL_MASK);
+ write_reg(th, MANUAL_MODE[fan],
+ (manual|MANUAL_MASK) & (~INVERT_MASK));
write_reg(th, FAN_SPD_SET[fan], speed);
} else {
/* back to automatic */
@@ -369,8 +368,8 @@ static void set_limit(struct thermostat *th, int i)
th->limits[i] = default_limits_local[i] + limit_adjust;
}
-static int attach_one_thermostat(struct i2c_adapter *adapter, int addr,
- int busno)
+static int probe_thermostat(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct thermostat* th;
int rc;
@@ -383,16 +382,12 @@ static int attach_one_thermostat(struct i2c_adapter *adapter, int addr,
if (!th)
return -ENOMEM;
- th->clt.addr = addr;
- th->clt.adapter = adapter;
- th->clt.driver = &thermostat_driver;
- strcpy(th->clt.name, "thermostat");
+ i2c_set_clientdata(client, th);
+ th->clt = client;
rc = read_reg(th, 0);
if (rc < 0) {
- printk(KERN_ERR "adt746x: Thermostat failed to read config "
- "from bus %d !\n",
- busno);
+ dev_err(&client->dev, "Thermostat failed to read config!\n");
kfree(th);
return -ENODEV;
}
@@ -421,14 +416,6 @@ static int attach_one_thermostat(struct i2c_adapter *adapter, int addr,
thermostat = th;
- if (i2c_attach_client(&th->clt)) {
- printk(KERN_INFO "adt746x: Thermostat failed to attach "
- "client !\n");
- thermostat = NULL;
- kfree(th);
- return -ENODEV;
- }
-
/* be sure to really write fan speed the first time */
th->last_speed[0] = -2;
th->last_speed[1] = -2;
@@ -454,6 +441,21 @@ static int attach_one_thermostat(struct i2c_adapter *adapter, int addr,
return 0;
}
+static const struct i2c_device_id therm_adt746x_id[] = {
+ { "therm_adt746x", 0 },
+ { }
+};
+
+static struct i2c_driver thermostat_driver = {
+ .driver = {
+ .name = "therm_adt746x",
+ },
+ .attach_adapter = attach_thermostat,
+ .probe = probe_thermostat,
+ .remove = remove_thermostat,
+ .id_table = therm_adt746x_id,
+};
+
/*
* Now, unfortunately, sysfs doesn't give us a nice void * we could
* pass around to the attribute functions, so we don't really have
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index 817607e2af6..a028598af2d 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -287,22 +287,6 @@ struct fcu_fan_table fcu_fans[] = {
};
/*
- * i2c_driver structure to attach to the host i2c controller
- */
-
-static int therm_pm72_attach(struct i2c_adapter *adapter);
-static int therm_pm72_detach(struct i2c_adapter *adapter);
-
-static struct i2c_driver therm_pm72_driver =
-{
- .driver = {
- .name = "therm_pm72",
- },
- .attach_adapter = therm_pm72_attach,
- .detach_adapter = therm_pm72_detach,
-};
-
-/*
* Utility function to create an i2c_client structure and
* attach it to one of u3 adapters
*/
@@ -310,6 +294,7 @@ static struct i2c_client *attach_i2c_chip(int id, const char *name)
{
struct i2c_client *clt;
struct i2c_adapter *adap;
+ struct i2c_board_info info;
if (id & 0x200)
adap = k2;
@@ -320,31 +305,21 @@ static struct i2c_client *attach_i2c_chip(int id, const char *name)
if (adap == NULL)
return NULL;
- clt = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
- if (clt == NULL)
- return NULL;
-
- clt->addr = (id >> 1) & 0x7f;
- clt->adapter = adap;
- clt->driver = &therm_pm72_driver;
- strncpy(clt->name, name, I2C_NAME_SIZE-1);
-
- if (i2c_attach_client(clt)) {
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ info.addr = (id >> 1) & 0x7f;
+ strlcpy(info.type, "therm_pm72", I2C_NAME_SIZE);
+ clt = i2c_new_device(adap, &info);
+ if (!clt) {
printk(KERN_ERR "therm_pm72: Failed to attach to i2c ID 0x%x\n", id);
- kfree(clt);
return NULL;
}
- return clt;
-}
-/*
- * Utility function to get rid of the i2c_client structure
- * (will also detach from the adapter hopepfully)
- */
-static void detach_i2c_chip(struct i2c_client *clt)
-{
- i2c_detach_client(clt);
- kfree(clt);
+ /*
+ * Let i2c-core delete that device on driver removal.
+ * This is safe because i2c-core holds the core_lock mutex for us.
+ */
+ list_add_tail(&clt->detected, &clt->driver->clients);
+ return clt;
}
/*
@@ -1203,8 +1178,6 @@ static int init_cpu_state(struct cpu_pid_state *state, int index)
return 0;
fail:
- if (state->monitor)
- detach_i2c_chip(state->monitor);
state->monitor = NULL;
return -ENODEV;
@@ -1232,7 +1205,6 @@ static void dispose_cpu_state(struct cpu_pid_state *state)
device_remove_file(&of_dev->dev, &dev_attr_cpu1_intake_fan_rpm);
}
- detach_i2c_chip(state->monitor);
state->monitor = NULL;
}
@@ -1407,7 +1379,6 @@ static void dispose_backside_state(struct backside_pid_state *state)
device_remove_file(&of_dev->dev, &dev_attr_backside_temperature);
device_remove_file(&of_dev->dev, &dev_attr_backside_fan_pwm);
- detach_i2c_chip(state->monitor);
state->monitor = NULL;
}
@@ -1532,7 +1503,6 @@ static void dispose_drives_state(struct drives_pid_state *state)
device_remove_file(&of_dev->dev, &dev_attr_drives_temperature);
device_remove_file(&of_dev->dev, &dev_attr_drives_fan_rpm);
- detach_i2c_chip(state->monitor);
state->monitor = NULL;
}
@@ -1654,7 +1624,6 @@ static void dispose_dimms_state(struct dimm_pid_state *state)
device_remove_file(&of_dev->dev, &dev_attr_dimms_temperature);
- detach_i2c_chip(state->monitor);
state->monitor = NULL;
}
@@ -1779,7 +1748,6 @@ static void dispose_slots_state(struct slots_pid_state *state)
device_remove_file(&of_dev->dev, &dev_attr_slots_temperature);
device_remove_file(&of_dev->dev, &dev_attr_slots_fan_pwm);
- detach_i2c_chip(state->monitor);
state->monitor = NULL;
}
@@ -2008,8 +1976,6 @@ static int attach_fcu(void)
*/
static void detach_fcu(void)
{
- if (fcu)
- detach_i2c_chip(fcu);
fcu = NULL;
}
@@ -2060,12 +2026,21 @@ static int therm_pm72_attach(struct i2c_adapter *adapter)
return 0;
}
+static int therm_pm72_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ /* Always succeed, the real work was done in therm_pm72_attach() */
+ return 0;
+}
+
/*
- * Called on every adapter when the driver or the i2c controller
+ * Called when any of the devices which participates into thermal management
* is going away.
*/
-static int therm_pm72_detach(struct i2c_adapter *adapter)
+static int therm_pm72_remove(struct i2c_client *client)
{
+ struct i2c_adapter *adapter = client->adapter;
+
mutex_lock(&driver_lock);
if (state != state_detached)
@@ -2096,6 +2071,30 @@ static int therm_pm72_detach(struct i2c_adapter *adapter)
return 0;
}
+/*
+ * i2c_driver structure to attach to the host i2c controller
+ */
+
+static const struct i2c_device_id therm_pm72_id[] = {
+ /*
+ * Fake device name, thermal management is done by several
+ * chips but we don't need to differentiate between them at
+ * this point.
+ */
+ { "therm_pm72", 0 },
+ { }
+};
+
+static struct i2c_driver therm_pm72_driver = {
+ .driver = {
+ .name = "therm_pm72",
+ },
+ .attach_adapter = therm_pm72_attach,
+ .probe = therm_pm72_probe,
+ .remove = therm_pm72_remove,
+ .id_table = therm_pm72_id,
+};
+
static int fan_check_loc_match(const char *loc, int fan)
{
char tmp[64];
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index 3da0a02efd7..40023313a76 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -48,16 +48,6 @@
#define LOG_TEMP 0 /* continously log temperature */
-static int do_probe( struct i2c_adapter *adapter, int addr, int kind);
-
-/* scan 0x48-0x4f (DS1775) and 0x2c-2x2f (ADM1030) */
-static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b,
- 0x4c, 0x4d, 0x4e, 0x4f,
- 0x2c, 0x2d, 0x2e, 0x2f,
- I2C_CLIENT_END };
-
-I2C_CLIENT_INSMOD;
-
static struct {
volatile int running;
struct task_struct *poll_task;
@@ -315,53 +305,54 @@ static int control_loop(void *dummy)
static int
do_attach( struct i2c_adapter *adapter )
{
- int ret = 0;
+ /* scan 0x48-0x4f (DS1775) and 0x2c-2x2f (ADM1030) */
+ static const unsigned short scan_ds1775[] = {
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ I2C_CLIENT_END
+ };
+ static const unsigned short scan_adm1030[] = {
+ 0x2c, 0x2d, 0x2e, 0x2f,
+ I2C_CLIENT_END
+ };
if( strncmp(adapter->name, "uni-n", 5) )
return 0;
if( !x.running ) {
- ret = i2c_probe( adapter, &addr_data, &do_probe );
+ struct i2c_board_info info;
+
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "therm_ds1775", I2C_NAME_SIZE);
+ i2c_new_probed_device(adapter, &info, scan_ds1775);
+
+ strlcpy(info.type, "therm_adm1030", I2C_NAME_SIZE);
+ i2c_new_probed_device(adapter, &info, scan_adm1030);
+
if( x.thermostat && x.fan ) {
x.running = 1;
x.poll_task = kthread_run(control_loop, NULL, "g4fand");
}
}
- return ret;
+ return 0;
}
static int
-do_detach( struct i2c_client *client )
+do_remove(struct i2c_client *client)
{
- int err;
-
- if( (err=i2c_detach_client(client)) )
- printk(KERN_ERR "failed to detach thermostat client\n");
- else {
- if( x.running ) {
- x.running = 0;
- kthread_stop(x.poll_task);
- x.poll_task = NULL;
- }
- if( client == x.thermostat )
- x.thermostat = NULL;
- else if( client == x.fan )
- x.fan = NULL;
- else {
- printk(KERN_ERR "g4fan: bad client\n");
- }
- kfree( client );
+ if (x.running) {
+ x.running = 0;
+ kthread_stop(x.poll_task);
+ x.poll_task = NULL;
}
- return err;
-}
+ if (client == x.thermostat)
+ x.thermostat = NULL;
+ else if (client == x.fan)
+ x.fan = NULL;
+ else
+ printk(KERN_ERR "g4fan: bad client\n");
-static struct i2c_driver g4fan_driver = {
- .driver = {
- .name = "therm_windtunnel",
- },
- .attach_adapter = do_attach,
- .detach_client = do_detach,
-};
+ return 0;
+}
static int
attach_fan( struct i2c_client *cl )
@@ -374,13 +365,8 @@ attach_fan( struct i2c_client *cl )
goto out;
printk("ADM1030 fan controller [@%02x]\n", cl->addr );
- strlcpy( cl->name, "ADM1030 fan controller", sizeof(cl->name) );
-
- if( !i2c_attach_client(cl) )
- x.fan = cl;
+ x.fan = cl;
out:
- if( cl != x.fan )
- kfree( cl );
return 0;
}
@@ -412,39 +398,47 @@ attach_thermostat( struct i2c_client *cl )
x.temp = temp;
x.overheat_temp = os_temp;
x.overheat_hyst = hyst_temp;
-
- strlcpy( cl->name, "DS1775 thermostat", sizeof(cl->name) );
-
- if( !i2c_attach_client(cl) )
- x.thermostat = cl;
+ x.thermostat = cl;
out:
- if( cl != x.thermostat )
- kfree( cl );
return 0;
}
+enum chip { ds1775, adm1030 };
+
+static const struct i2c_device_id therm_windtunnel_id[] = {
+ { "therm_ds1775", ds1775 },
+ { "therm_adm1030", adm1030 },
+ { }
+};
+
static int
-do_probe( struct i2c_adapter *adapter, int addr, int kind )
+do_probe(struct i2c_client *cl, const struct i2c_device_id *id)
{
- struct i2c_client *cl;
+ struct i2c_adapter *adapter = cl->adapter;
if( !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA
| I2C_FUNC_SMBUS_WRITE_BYTE) )
return 0;
- if( !(cl=kzalloc(sizeof(*cl), GFP_KERNEL)) )
- return -ENOMEM;
-
- cl->addr = addr;
- cl->adapter = adapter;
- cl->driver = &g4fan_driver;
- cl->flags = 0;
-
- if( addr < 0x48 )
+ switch (id->driver_data) {
+ case adm1030:
return attach_fan( cl );
- return attach_thermostat( cl );
+ case ds1775:
+ return attach_thermostat(cl);
+ }
+ return 0;
}
+static struct i2c_driver g4fan_driver = {
+ .driver = {
+ .name = "therm_windtunnel",
+ },
+ .attach_adapter = do_attach,
+ .probe = do_probe,
+ .remove = do_remove,
+ .id_table = therm_windtunnel_id,
+};
+
/************************************************************************/
/* initialization / cleanup */
diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c
index b92b959fe16..529886c7a82 100644
--- a/drivers/macintosh/windfarm_lm75_sensor.c
+++ b/drivers/macintosh/windfarm_lm75_sensor.c
@@ -37,34 +37,22 @@
struct wf_lm75_sensor {
int ds1775 : 1;
int inited : 1;
- struct i2c_client i2c;
+ struct i2c_client *i2c;
struct wf_sensor sens;
};
#define wf_to_lm75(c) container_of(c, struct wf_lm75_sensor, sens)
-#define i2c_to_lm75(c) container_of(c, struct wf_lm75_sensor, i2c)
-
-static int wf_lm75_attach(struct i2c_adapter *adapter);
-static int wf_lm75_detach(struct i2c_client *client);
-
-static struct i2c_driver wf_lm75_driver = {
- .driver = {
- .name = "wf_lm75",
- },
- .attach_adapter = wf_lm75_attach,
- .detach_client = wf_lm75_detach,
-};
static int wf_lm75_get(struct wf_sensor *sr, s32 *value)
{
struct wf_lm75_sensor *lm = wf_to_lm75(sr);
s32 data;
- if (lm->i2c.adapter == NULL)
+ if (lm->i2c == NULL)
return -ENODEV;
/* Init chip if necessary */
if (!lm->inited) {
- u8 cfg_new, cfg = (u8)i2c_smbus_read_byte_data(&lm->i2c, 1);
+ u8 cfg_new, cfg = (u8)i2c_smbus_read_byte_data(lm->i2c, 1);
DBG("wf_lm75: Initializing %s, cfg was: %02x\n",
sr->name, cfg);
@@ -73,7 +61,7 @@ static int wf_lm75_get(struct wf_sensor *sr, s32 *value)
* the firmware for now
*/
cfg_new = cfg & ~0x01;
- i2c_smbus_write_byte_data(&lm->i2c, 1, cfg_new);
+ i2c_smbus_write_byte_data(lm->i2c, 1, cfg_new);
lm->inited = 1;
/* If we just powered it up, let's wait 200 ms */
@@ -81,7 +69,7 @@ static int wf_lm75_get(struct wf_sensor *sr, s32 *value)
}
/* Read temperature register */
- data = (s32)le16_to_cpu(i2c_smbus_read_word_data(&lm->i2c, 0));
+ data = (s32)le16_to_cpu(i2c_smbus_read_word_data(lm->i2c, 0));
data <<= 8;
*value = data;
@@ -92,12 +80,6 @@ static void wf_lm75_release(struct wf_sensor *sr)
{
struct wf_lm75_sensor *lm = wf_to_lm75(sr);
- /* check if client is registered and detach from i2c */
- if (lm->i2c.adapter) {
- i2c_detach_client(&lm->i2c);
- lm->i2c.adapter = NULL;
- }
-
kfree(lm);
}
@@ -107,59 +89,77 @@ static struct wf_sensor_ops wf_lm75_ops = {
.owner = THIS_MODULE,
};
-static struct wf_lm75_sensor *wf_lm75_create(struct i2c_adapter *adapter,
- u8 addr, int ds1775,
- const char *loc)
+static int wf_lm75_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct wf_lm75_sensor *lm;
int rc;
- DBG("wf_lm75: creating %s device at address 0x%02x\n",
- ds1775 ? "ds1775" : "lm75", addr);
-
lm = kzalloc(sizeof(struct wf_lm75_sensor), GFP_KERNEL);
if (lm == NULL)
- return NULL;
+ return -ENODEV;
+
+ lm->inited = 0;
+ lm->ds1775 = id->driver_data;
+ lm->i2c = client;
+ lm->sens.name = client->dev.platform_data;
+ lm->sens.ops = &wf_lm75_ops;
+ i2c_set_clientdata(client, lm);
+
+ rc = wf_register_sensor(&lm->sens);
+ if (rc) {
+ i2c_set_clientdata(client, NULL);
+ kfree(lm);
+ }
+
+ return rc;
+}
+
+static struct i2c_client *wf_lm75_create(struct i2c_adapter *adapter,
+ u8 addr, int ds1775,
+ const char *loc)
+{
+ struct i2c_board_info info;
+ struct i2c_client *client;
+ char *name;
+
+ DBG("wf_lm75: creating %s device at address 0x%02x\n",
+ ds1775 ? "ds1775" : "lm75", addr);
/* Usual rant about sensor names not beeing very consistent in
* the device-tree, oh well ...
* Add more entries below as you deal with more setups
*/
if (!strcmp(loc, "Hard drive") || !strcmp(loc, "DRIVE BAY"))
- lm->sens.name = "hd-temp";
+ name = "hd-temp";
else if (!strcmp(loc, "Incoming Air Temp"))
- lm->sens.name = "incoming-air-temp";
+ name = "incoming-air-temp";
else if (!strcmp(loc, "ODD Temp"))
- lm->sens.name = "optical-drive-temp";
+ name = "optical-drive-temp";
else if (!strcmp(loc, "HD Temp"))
- lm->sens.name = "hard-drive-temp";
+ name = "hard-drive-temp";
else
goto fail;
- lm->inited = 0;
- lm->sens.ops = &wf_lm75_ops;
- lm->ds1775 = ds1775;
- lm->i2c.addr = (addr >> 1) & 0x7f;
- lm->i2c.adapter = adapter;
- lm->i2c.driver = &wf_lm75_driver;
- strncpy(lm->i2c.name, lm->sens.name, I2C_NAME_SIZE-1);
-
- rc = i2c_attach_client(&lm->i2c);
- if (rc) {
- printk(KERN_ERR "windfarm: failed to attach %s %s to i2c,"
- " err %d\n", ds1775 ? "ds1775" : "lm75",
- lm->i2c.name, rc);
- goto fail;
- }
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ info.addr = (addr >> 1) & 0x7f;
+ info.platform_data = name;
+ strlcpy(info.type, ds1775 ? "wf_ds1775" : "wf_lm75", I2C_NAME_SIZE);
- if (wf_register_sensor(&lm->sens)) {
- i2c_detach_client(&lm->i2c);
+ client = i2c_new_device(adapter, &info);
+ if (client == NULL) {
+ printk(KERN_ERR "windfarm: failed to attach %s %s to i2c\n",
+ ds1775 ? "ds1775" : "lm75", name);
goto fail;
}
- return lm;
+ /*
+ * Let i2c-core delete that device on driver removal.
+ * This is safe because i2c-core holds the core_lock mutex for us.
+ */
+ list_add_tail(&client->detected, &client->driver->clients);
+ return client;
fail:
- kfree(lm);
return NULL;
}
@@ -202,21 +202,38 @@ static int wf_lm75_attach(struct i2c_adapter *adapter)
return 0;
}
-static int wf_lm75_detach(struct i2c_client *client)
+static int wf_lm75_remove(struct i2c_client *client)
{
- struct wf_lm75_sensor *lm = i2c_to_lm75(client);
+ struct wf_lm75_sensor *lm = i2c_get_clientdata(client);
DBG("wf_lm75: i2c detatch called for %s\n", lm->sens.name);
/* Mark client detached */
- lm->i2c.adapter = NULL;
+ lm->i2c = NULL;
/* release sensor */
wf_unregister_sensor(&lm->sens);
+ i2c_set_clientdata(client, NULL);
return 0;
}
+static const struct i2c_device_id wf_lm75_id[] = {
+ { "wf_lm75", 0 },
+ { "wf_ds1775", 1 },
+ { }
+};
+
+static struct i2c_driver wf_lm75_driver = {
+ .driver = {
+ .name = "wf_lm75",
+ },
+ .attach_adapter = wf_lm75_attach,
+ .probe = wf_lm75_probe,
+ .remove = wf_lm75_remove,
+ .id_table = wf_lm75_id,
+};
+
static int __init wf_lm75_sensor_init(void)
{
/* Don't register on old machines that use therm_pm72 for now */
diff --git a/drivers/macintosh/windfarm_max6690_sensor.c b/drivers/macintosh/windfarm_max6690_sensor.c
index e207a90d6b2..e2a55ecda2b 100644
--- a/drivers/macintosh/windfarm_max6690_sensor.c
+++ b/drivers/macintosh/windfarm_max6690_sensor.c
@@ -26,34 +26,22 @@
#define MAX6690_EXTERNAL_TEMP 1
struct wf_6690_sensor {
- struct i2c_client i2c;
+ struct i2c_client *i2c;
struct wf_sensor sens;
};
#define wf_to_6690(x) container_of((x), struct wf_6690_sensor, sens)
-#define i2c_to_6690(x) container_of((x), struct wf_6690_sensor, i2c)
-
-static int wf_max6690_attach(struct i2c_adapter *adapter);
-static int wf_max6690_detach(struct i2c_client *client);
-
-static struct i2c_driver wf_max6690_driver = {
- .driver = {
- .name = "wf_max6690",
- },
- .attach_adapter = wf_max6690_attach,
- .detach_client = wf_max6690_detach,
-};
static int wf_max6690_get(struct wf_sensor *sr, s32 *value)
{
struct wf_6690_sensor *max = wf_to_6690(sr);
s32 data;
- if (max->i2c.adapter == NULL)
+ if (max->i2c == NULL)
return -ENODEV;
/* chip gets initialized by firmware */
- data = i2c_smbus_read_byte_data(&max->i2c, MAX6690_EXTERNAL_TEMP);
+ data = i2c_smbus_read_byte_data(max->i2c, MAX6690_EXTERNAL_TEMP);
if (data < 0)
return data;
*value = data << 16;
@@ -64,10 +52,6 @@ static void wf_max6690_release(struct wf_sensor *sr)
{
struct wf_6690_sensor *max = wf_to_6690(sr);
- if (max->i2c.adapter) {
- i2c_detach_client(&max->i2c);
- max->i2c.adapter = NULL;
- }
kfree(max);
}
@@ -77,19 +61,40 @@ static struct wf_sensor_ops wf_max6690_ops = {
.owner = THIS_MODULE,
};
-static void wf_max6690_create(struct i2c_adapter *adapter, u8 addr,
- const char *loc)
+static int wf_max6690_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct wf_6690_sensor *max;
- char *name;
+ int rc;
max = kzalloc(sizeof(struct wf_6690_sensor), GFP_KERNEL);
if (max == NULL) {
- printk(KERN_ERR "windfarm: Couldn't create MAX6690 sensor %s: "
- "no memory\n", loc);
- return;
+ printk(KERN_ERR "windfarm: Couldn't create MAX6690 sensor: "
+ "no memory\n");
+ return -ENOMEM;
+ }
+
+ max->i2c = client;
+ max->sens.name = client->dev.platform_data;
+ max->sens.ops = &wf_max6690_ops;
+ i2c_set_clientdata(client, max);
+
+ rc = wf_register_sensor(&max->sens);
+ if (rc) {
+ i2c_set_clientdata(client, NULL);
+ kfree(max);
}
+ return rc;
+}
+
+static struct i2c_client *wf_max6690_create(struct i2c_adapter *adapter,
+ u8 addr, const char *loc)
+{
+ struct i2c_board_info info;
+ struct i2c_client *client;
+ char *name;
+
if (!strcmp(loc, "BACKSIDE"))
name = "backside-temp";
else if (!strcmp(loc, "NB Ambient"))
@@ -99,27 +104,26 @@ static void wf_max6690_create(struct i2c_adapter *adapter, u8 addr,
else
goto fail;
- max->sens.ops = &wf_max6690_ops;
- max->sens.name = name;
- max->i2c.addr = addr >> 1;
- max->i2c.adapter = adapter;
- max->i2c.driver = &wf_max6690_driver;
- strncpy(max->i2c.name, name, I2C_NAME_SIZE-1);
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ info.addr = addr >> 1;
+ info.platform_data = name;
+ strlcpy(info.type, "wf_max6690", I2C_NAME_SIZE);
- if (i2c_attach_client(&max->i2c)) {
+ client = i2c_new_device(adapter, &info);
+ if (client == NULL) {
printk(KERN_ERR "windfarm: failed to attach MAX6690 sensor\n");
goto fail;
}
- if (wf_register_sensor(&max->sens)) {
- i2c_detach_client(&max->i2c);
- goto fail;
- }
-
- return;
+ /*
+ * Let i2c-core delete that device on driver removal.
+ * This is safe because i2c-core holds the core_lock mutex for us.
+ */
+ list_add_tail(&client->detected, &client->driver->clients);
+ return client;
fail:
- kfree(max);
+ return NULL;
}
static int wf_max6690_attach(struct i2c_adapter *adapter)
@@ -154,16 +158,31 @@ static int wf_max6690_attach(struct i2c_adapter *adapter)
return 0;
}
-static int wf_max6690_detach(struct i2c_client *client)
+static int wf_max6690_remove(struct i2c_client *client)
{
- struct wf_6690_sensor *max = i2c_to_6690(client);
+ struct wf_6690_sensor *max = i2c_get_clientdata(client);
- max->i2c.adapter = NULL;
+ max->i2c = NULL;
wf_unregister_sensor(&max->sens);
return 0;
}
+static const struct i2c_device_id wf_max6690_id[] = {
+ { "wf_max6690", 0 },
+ { }
+};
+
+static struct i2c_driver wf_max6690_driver = {
+ .driver = {
+ .name = "wf_max6690",
+ },
+ .attach_adapter = wf_max6690_attach,
+ .probe = wf_max6690_probe,
+ .remove = wf_max6690_remove,
+ .id_table = wf_max6690_id,
+};
+
static int __init wf_max6690_sensor_init(void)
{
/* Don't register on old machines that use therm_pm72 for now */
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
index 7847e981ac3..5da729e58f9 100644
--- a/drivers/macintosh/windfarm_smu_sat.c
+++ b/drivers/macintosh/windfarm_smu_sat.c
@@ -39,7 +39,7 @@ struct wf_sat {
struct mutex mutex;
unsigned long last_read; /* jiffies when cache last updated */
u8 cache[16];
- struct i2c_client i2c;
+ struct i2c_client *i2c;
struct device_node *node;
};
@@ -54,18 +54,6 @@ struct wf_sat_sensor {
};
#define wf_to_sat(c) container_of(c, struct wf_sat_sensor, sens)
-#define i2c_to_sat(c) container_of(c, struct wf_sat, i2c)
-
-static int wf_sat_attach(struct i2c_adapter *adapter);
-static int wf_sat_detach(struct i2c_client *client);
-
-static struct i2c_driver wf_sat_driver = {
- .driver = {
- .name = "wf_smu_sat",
- },
- .attach_adapter = wf_sat_attach,
- .detach_client = wf_sat_detach,
-};
struct smu_sdbp_header *smu_sat_get_sdb_partition(unsigned int sat_id, int id,
unsigned int *size)
@@ -81,13 +69,13 @@ struct smu_sdbp_header *smu_sat_get_sdb_partition(unsigned int sat_id, int id,
if (sat_id > 1 || (sat = sats[sat_id]) == NULL)
return NULL;
- err = i2c_smbus_write_word_data(&sat->i2c, 8, id << 8);
+ err = i2c_smbus_write_word_data(sat->i2c, 8, id << 8);
if (err) {
printk(KERN_ERR "smu_sat_get_sdb_part wr error %d\n", err);
return NULL;
}
- err = i2c_smbus_read_word_data(&sat->i2c, 9);
+ err = i2c_smbus_read_word_data(sat->i2c, 9);
if (err < 0) {
printk(KERN_ERR "smu_sat_get_sdb_part rd len error\n");
return NULL;
@@ -105,7 +93,7 @@ struct smu_sdbp_header *smu_sat_get_sdb_partition(unsigned int sat_id, int id,
return NULL;
for (i = 0; i < len; i += 4) {
- err = i2c_smbus_read_i2c_block_data(&sat->i2c, 0xa, 4, data);
+ err = i2c_smbus_read_i2c_block_data(sat->i2c, 0xa, 4, data);
if (err < 0) {
printk(KERN_ERR "smu_sat_get_sdb_part rd err %d\n",
err);
@@ -138,7 +126,7 @@ static int wf_sat_read_cache(struct wf_sat *sat)
{
int err;
- err = i2c_smbus_read_i2c_block_data(&sat->i2c, 0x3f, 16, sat->cache);
+ err = i2c_smbus_read_i2c_block_data(sat->i2c, 0x3f, 16, sat->cache);
if (err < 0)
return err;
sat->last_read = jiffies;
@@ -161,7 +149,7 @@ static int wf_sat_get(struct wf_sensor *sr, s32 *value)
int i, err;
s32 val;
- if (sat->i2c.adapter == NULL)
+ if (sat->i2c == NULL)
return -ENODEV;
mutex_lock(&sat->mutex);
@@ -193,10 +181,6 @@ static void wf_sat_release(struct wf_sensor *sr)
struct wf_sat *sat = sens->sat;
if (atomic_dec_and_test(&sat->refcnt)) {
- if (sat->i2c.adapter) {
- i2c_detach_client(&sat->i2c);
- sat->i2c.adapter = NULL;
- }
if (sat->nr >= 0)
sats[sat->nr] = NULL;
kfree(sat);
@@ -212,38 +196,58 @@ static struct wf_sensor_ops wf_sat_ops = {
static void wf_sat_create(struct i2c_adapter *adapter, struct device_node *dev)
{
+ struct i2c_board_info info;
+ struct i2c_client *client;
+ const u32 *reg;
+ u8 addr;
+
+ reg = of_get_property(dev, "reg", NULL);
+ if (reg == NULL)
+ return;
+ addr = *reg;
+ DBG(KERN_DEBUG "wf_sat: creating sat at address %x\n", addr);
+
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ info.addr = (addr >> 1) & 0x7f;
+ info.platform_data = dev;
+ strlcpy(info.type, "wf_sat", I2C_NAME_SIZE);
+
+ client = i2c_new_device(adapter, &info);
+ if (client == NULL) {
+ printk(KERN_ERR "windfarm: failed to attach smu-sat to i2c\n");
+ return;
+ }
+
+ /*
+ * Let i2c-core delete that device on driver removal.
+ * This is safe because i2c-core holds the core_lock mutex for us.
+ */
+ list_add_tail(&client->detected, &client->driver->clients);
+}
+
+static int wf_sat_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device_node *dev = client->dev.platform_data;
struct wf_sat *sat;
struct wf_sat_sensor *sens;
const u32 *reg;
const char *loc, *type;
- u8 addr, chip, core;
+ u8 chip, core;
struct device_node *child;
int shift, cpu, index;
char *name;
int vsens[2], isens[2];
- reg = of_get_property(dev, "reg", NULL);
- if (reg == NULL)
- return;
- addr = *reg;
- DBG(KERN_DEBUG "wf_sat: creating sat at address %x\n", addr);
-
sat = kzalloc(sizeof(struct wf_sat), GFP_KERNEL);
if (sat == NULL)
- return;
+ return -ENOMEM;
sat->nr = -1;
sat->node = of_node_get(dev);
atomic_set(&sat->refcnt, 0);
mutex_init(&sat->mutex);
- sat->i2c.addr = (addr >> 1) & 0x7f;
- sat->i2c.adapter = adapter;
- sat->i2c.driver = &wf_sat_driver;
- strncpy(sat->i2c.name, "smu-sat", I2C_NAME_SIZE-1);
-
- if (i2c_attach_client(&sat->i2c)) {
- printk(KERN_ERR "windfarm: failed to attach smu-sat to i2c\n");
- goto fail;
- }
+ sat->i2c = client;
+ i2c_set_clientdata(client, sat);
vsens[0] = vsens[1] = -1;
isens[0] = isens[1] = -1;
@@ -344,10 +348,7 @@ static void wf_sat_create(struct i2c_adapter *adapter, struct device_node *dev)
if (sat->nr >= 0)
sats[sat->nr] = sat;
- return;
-
- fail:
- kfree(sat);
+ return 0;
}
static int wf_sat_attach(struct i2c_adapter *adapter)
@@ -366,16 +367,32 @@ static int wf_sat_attach(struct i2c_adapter *adapter)
return 0;
}
-static int wf_sat_detach(struct i2c_client *client)
+static int wf_sat_remove(struct i2c_client *client)
{
- struct wf_sat *sat = i2c_to_sat(client);
+ struct wf_sat *sat = i2c_get_clientdata(client);
/* XXX TODO */
- sat->i2c.adapter = NULL;
+ sat->i2c = NULL;
+ i2c_set_clientdata(client, NULL);
return 0;
}
+static const struct i2c_device_id wf_sat_id[] = {
+ { "wf_sat", 0 },
+ { }
+};
+
+static struct i2c_driver wf_sat_driver = {
+ .driver = {
+ .name = "wf_smu_sat",
+ },
+ .attach_adapter = wf_sat_attach,
+ .probe = wf_sat_probe,
+ .remove = wf_sat_remove,
+ .id_table = wf_sat_id,
+};
+
static int __init sat_sensors_init(void)
{
return i2c_add_driver(&wf_sat_driver);
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 47c68bc75a1..3319c2fec28 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -232,7 +232,7 @@ static struct page *read_sb_page(mddev_t *mddev, long offset,
target = rdev->sb_start + offset + index * (PAGE_SIZE/512);
if (sync_page_io(rdev->bdev, target,
- roundup(size, bdev_hardsect_size(rdev->bdev)),
+ roundup(size, bdev_logical_block_size(rdev->bdev)),
page, READ)) {
page->index = index;
attach_page_buffers(page, NULL); /* so that free_buffer will
@@ -287,7 +287,7 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
int size = PAGE_SIZE;
if (page->index == bitmap->file_pages-1)
size = roundup(bitmap->last_page_size,
- bdev_hardsect_size(rdev->bdev));
+ bdev_logical_block_size(rdev->bdev));
/* Just make sure we aren't corrupting data or
* metadata
*/
@@ -1097,14 +1097,12 @@ void bitmap_daemon_work(struct bitmap *bitmap)
}
bitmap->allclean = 1;
+ spin_lock_irqsave(&bitmap->lock, flags);
for (j = 0; j < bitmap->chunks; j++) {
bitmap_counter_t *bmc;
- spin_lock_irqsave(&bitmap->lock, flags);
- if (!bitmap->filemap) {
+ if (!bitmap->filemap)
/* error or shutdown */
- spin_unlock_irqrestore(&bitmap->lock, flags);
break;
- }
page = filemap_get_page(bitmap, j);
@@ -1121,6 +1119,8 @@ void bitmap_daemon_work(struct bitmap *bitmap)
write_page(bitmap, page, 0);
bitmap->allclean = 0;
}
+ spin_lock_irqsave(&bitmap->lock, flags);
+ j |= (PAGE_BITS - 1);
continue;
}
@@ -1181,9 +1181,10 @@ void bitmap_daemon_work(struct bitmap *bitmap)
ext2_clear_bit(file_page_offset(j), paddr);
kunmap_atomic(paddr, KM_USER0);
}
- }
- spin_unlock_irqrestore(&bitmap->lock, flags);
+ } else
+ j |= PAGE_COUNTER_MASK;
}
+ spin_unlock_irqrestore(&bitmap->lock, flags);
/* now sync the final page */
if (lastpage != NULL) {
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index a2e26c24214..75d8081a904 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -178,7 +178,7 @@ static int set_chunk_size(struct dm_exception_store *store,
}
/* Validate the chunk size against the device block size */
- if (chunk_size_ulong % (bdev_hardsect_size(store->cow->bdev) >> 9)) {
+ if (chunk_size_ulong % (bdev_logical_block_size(store->cow->bdev) >> 9)) {
*error = "Chunk size is not a multiple of device blocksize";
return -EINVAL;
}
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 823ceba6efa..1128d3fba79 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1513,6 +1513,7 @@ static const struct file_operations _ctl_fops = {
static struct miscdevice _dm_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = DM_NAME,
+ .devnode = "mapper/control",
.fops = &_ctl_fops
};
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index be233bc4d91..6fa8ccf91c7 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -413,7 +413,8 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
* Buffer holds both header and bitset.
*/
buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) +
- bitset_size, ti->limits.hardsect_size);
+ bitset_size,
+ ti->limits.logical_block_size);
if (buf_size > dev->bdev->bd_inode->i_size) {
DMWARN("log device %s too small: need %llu bytes",
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index e75c6dd76a9..2662a41337e 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -282,7 +282,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
*/
if (!ps->store->chunk_size) {
ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
- bdev_hardsect_size(ps->store->cow->bdev) >> 9);
+ bdev_logical_block_size(ps->store->cow->bdev) >> 9);
ps->store->chunk_mask = ps->store->chunk_size - 1;
ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
chunk_size_supplied = 0;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 429b50b975d..e9a73bb242b 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -108,7 +108,8 @@ static void combine_restrictions_low(struct io_restrictions *lhs,
lhs->max_hw_segments =
min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments);
- lhs->hardsect_size = max(lhs->hardsect_size, rhs->hardsect_size);
+ lhs->logical_block_size = max(lhs->logical_block_size,
+ rhs->logical_block_size);
lhs->max_segment_size =
min_not_zero(lhs->max_segment_size, rhs->max_segment_size);
@@ -509,7 +510,7 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
* combine_restrictions_low()
*/
rs->max_sectors =
- min_not_zero(rs->max_sectors, q->max_sectors);
+ min_not_zero(rs->max_sectors, queue_max_sectors(q));
/*
* Check if merge fn is supported.
@@ -524,24 +525,25 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
rs->max_phys_segments =
min_not_zero(rs->max_phys_segments,
- q->max_phys_segments);
+ queue_max_phys_segments(q));
rs->max_hw_segments =
- min_not_zero(rs->max_hw_segments, q->max_hw_segments);
+ min_not_zero(rs->max_hw_segments, queue_max_hw_segments(q));
- rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size);
+ rs->logical_block_size = max(rs->logical_block_size,
+ queue_logical_block_size(q));
rs->max_segment_size =
- min_not_zero(rs->max_segment_size, q->max_segment_size);
+ min_not_zero(rs->max_segment_size, queue_max_segment_size(q));
rs->max_hw_sectors =
- min_not_zero(rs->max_hw_sectors, q->max_hw_sectors);
+ min_not_zero(rs->max_hw_sectors, queue_max_hw_sectors(q));
rs->seg_boundary_mask =
min_not_zero(rs->seg_boundary_mask,
- q->seg_boundary_mask);
+ queue_segment_boundary(q));
- rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn);
+ rs->bounce_pfn = min_not_zero(rs->bounce_pfn, queue_bounce_pfn(q));
rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
}
@@ -683,8 +685,8 @@ static void check_for_valid_limits(struct io_restrictions *rs)
rs->max_phys_segments = MAX_PHYS_SEGMENTS;
if (!rs->max_hw_segments)
rs->max_hw_segments = MAX_HW_SEGMENTS;
- if (!rs->hardsect_size)
- rs->hardsect_size = 1 << SECTOR_SHIFT;
+ if (!rs->logical_block_size)
+ rs->logical_block_size = 1 << SECTOR_SHIFT;
if (!rs->max_segment_size)
rs->max_segment_size = MAX_SEGMENT_SIZE;
if (!rs->seg_boundary_mask)
@@ -912,13 +914,13 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
* restrictions.
*/
blk_queue_max_sectors(q, t->limits.max_sectors);
- q->max_phys_segments = t->limits.max_phys_segments;
- q->max_hw_segments = t->limits.max_hw_segments;
- q->hardsect_size = t->limits.hardsect_size;
- q->max_segment_size = t->limits.max_segment_size;
- q->max_hw_sectors = t->limits.max_hw_sectors;
- q->seg_boundary_mask = t->limits.seg_boundary_mask;
- q->bounce_pfn = t->limits.bounce_pfn;
+ blk_queue_max_phys_segments(q, t->limits.max_phys_segments);
+ blk_queue_max_hw_segments(q, t->limits.max_hw_segments);
+ blk_queue_logical_block_size(q, t->limits.logical_block_size);
+ blk_queue_max_segment_size(q, t->limits.max_segment_size);
+ blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors);
+ blk_queue_segment_boundary(q, t->limits.seg_boundary_mask);
+ blk_queue_bounce_limit(q, t->limits.bounce_pfn);
if (t->limits.no_cluster)
queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index e2ee4a79ea2..48db308fae6 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -19,8 +19,8 @@
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/hdreg.h>
-#include <linux/blktrace_api.h>
-#include <trace/block.h>
+
+#include <trace/events/block.h>
#define DM_MSG_PREFIX "core"
@@ -53,8 +53,6 @@ struct dm_target_io {
union map_info info;
};
-DEFINE_TRACE(block_bio_complete);
-
/*
* For request-based dm.
* One of these is allocated per request.
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 7a36e38393a..64f1f3e046e 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -146,7 +146,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
* a one page request is never in violation.
*/
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- mddev->queue->max_sectors > (PAGE_SIZE>>9))
+ queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
disk->num_sectors = rdev->sectors;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index fccc8343a25..20f6ac33834 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1202,7 +1202,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
- bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
+ bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
if (rdev->sb_size & bmask)
rdev->sb_size = (rdev->sb_size | bmask) + 1;
@@ -1375,6 +1375,9 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
sb->raid_disks = cpu_to_le32(mddev->raid_disks);
sb->size = cpu_to_le64(mddev->dev_sectors);
+ sb->chunksize = cpu_to_le32(mddev->chunk_size >> 9);
+ sb->level = cpu_to_le32(mddev->level);
+ sb->layout = cpu_to_le32(mddev->layout);
if (mddev->bitmap && mddev->bitmap_file == NULL) {
sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
@@ -3303,7 +3306,9 @@ static ssize_t
action_show(mddev_t *mddev, char *page)
{
char *type = "idle";
- if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
+ if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
+ type = "frozen";
+ else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
(!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
type = "reshape";
@@ -3326,7 +3331,12 @@ action_store(mddev_t *mddev, const char *page, size_t len)
if (!mddev->pers || !mddev->pers->sync_request)
return -EINVAL;
- if (cmd_match(page, "idle")) {
+ if (cmd_match(page, "frozen"))
+ set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ else
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+
+ if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_unregister_thread(mddev->sync_thread);
@@ -3680,7 +3690,7 @@ array_size_store(mddev_t *mddev, const char *buf, size_t len)
if (strict_blocks_to_sectors(buf, &sectors) < 0)
return -EINVAL;
if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
- return -EINVAL;
+ return -E2BIG;
mddev->external_size = 1;
}
@@ -5557,7 +5567,7 @@ static struct block_device_operations md_fops =
.owner = THIS_MODULE,
.open = md_open,
.release = md_release,
- .locked_ioctl = md_ioctl,
+ .ioctl = md_ioctl,
.getgeo = md_getgeo,
.media_changed = md_media_changed,
.revalidate_disk= md_revalidate,
@@ -6352,12 +6362,13 @@ void md_do_sync(mddev_t *mddev)
skipped = 0;
- if ((mddev->curr_resync > mddev->curr_resync_completed &&
- (mddev->curr_resync - mddev->curr_resync_completed)
- > (max_sectors >> 4)) ||
- (j - mddev->curr_resync_completed)*2
- >= mddev->resync_max - mddev->curr_resync_completed
- ) {
+ if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
+ ((mddev->curr_resync > mddev->curr_resync_completed &&
+ (mddev->curr_resync - mddev->curr_resync_completed)
+ > (max_sectors >> 4)) ||
+ (j - mddev->curr_resync_completed)*2
+ >= mddev->resync_max - mddev->curr_resync_completed
+ )) {
/* time to update curr_resync_completed */
blk_unplug(mddev->queue);
wait_event(mddev->recovery_wait,
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 41ced0cbe82..4ee31aa13c4 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -303,7 +303,7 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
* merge_bvec_fn will be involved in multipath.)
*/
if (q->merge_bvec_fn &&
- mddev->queue->max_sectors > (PAGE_SIZE>>9))
+ queue_max_sectors(q) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
conf->working_disks++;
@@ -467,7 +467,7 @@ static int multipath_run (mddev_t *mddev)
* violating it, not that we ever expect a device with
* a merge_bvec_fn to be involved in multipath */
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- mddev->queue->max_sectors > (PAGE_SIZE>>9))
+ queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
if (!test_bit(Faulty, &rdev->flags))
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index c08d7559be5..925507e7d67 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -144,7 +144,7 @@ static int create_strip_zones (mddev_t *mddev)
*/
if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
- mddev->queue->max_sectors > (PAGE_SIZE>>9))
+ queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
if (!smallest || (rdev1->sectors < smallest->sectors))
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 36df9109cde..e23758b4a34 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1130,7 +1130,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
* a one page request is never in violation.
*/
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- mddev->queue->max_sectors > (PAGE_SIZE>>9))
+ queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
p->head_position = 0;
@@ -1996,7 +1996,7 @@ static int run(mddev_t *mddev)
* a one page request is never in violation.
*/
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- mddev->queue->max_sectors > (PAGE_SIZE>>9))
+ queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
disk->head_position = 0;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 499620afb44..750550c1166 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1158,8 +1158,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
* a one page request is never in violation.
*/
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- mddev->queue->max_sectors > (PAGE_SIZE>>9))
- mddev->queue->max_sectors = (PAGE_SIZE>>9);
+ queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
+ blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
p->head_position = 0;
rdev->raid_disk = mirror;
@@ -2145,8 +2145,8 @@ static int run(mddev_t *mddev)
* a one page request is never in violation.
*/
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- mddev->queue->max_sectors > (PAGE_SIZE>>9))
- mddev->queue->max_sectors = (PAGE_SIZE>>9);
+ queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
+ blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
disk->head_position = 0;
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4616bc3a6e7..bef87669823 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -362,7 +362,7 @@ static void raid5_unplug_device(struct request_queue *q);
static struct stripe_head *
get_active_stripe(raid5_conf_t *conf, sector_t sector,
- int previous, int noblock)
+ int previous, int noblock, int noquiesce)
{
struct stripe_head *sh;
@@ -372,7 +372,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector,
do {
wait_event_lock_irq(conf->wait_for_stripe,
- conf->quiesce == 0,
+ conf->quiesce == 0 || noquiesce,
conf->device_lock, /* nothing */);
sh = __find_stripe(conf, sector, conf->generation - previous);
if (!sh) {
@@ -2671,7 +2671,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
sector_t bn = compute_blocknr(sh, i, 1);
sector_t s = raid5_compute_sector(conf, bn, 0,
&dd_idx, NULL);
- sh2 = get_active_stripe(conf, s, 0, 1);
+ sh2 = get_active_stripe(conf, s, 0, 1, 1);
if (sh2 == NULL)
/* so far only the early blocks of this stripe
* have been requested. When later blocks
@@ -2944,7 +2944,7 @@ static bool handle_stripe5(struct stripe_head *sh)
/* Finish reconstruct operations initiated by the expansion process */
if (sh->reconstruct_state == reconstruct_state_result) {
struct stripe_head *sh2
- = get_active_stripe(conf, sh->sector, 1, 1);
+ = get_active_stripe(conf, sh->sector, 1, 1, 1);
if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) {
/* sh cannot be written until sh2 has been read.
* so arrange for sh to be delayed a little
@@ -3189,7 +3189,7 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) {
struct stripe_head *sh2
- = get_active_stripe(conf, sh->sector, 1, 1);
+ = get_active_stripe(conf, sh->sector, 1, 1, 1);
if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) {
/* sh cannot be written until sh2 has been read.
* so arrange for sh to be delayed a little
@@ -3288,7 +3288,7 @@ static void unplug_slaves(mddev_t *mddev)
int i;
rcu_read_lock();
- for (i=0; i<mddev->raid_disks; i++) {
+ for (i = 0; i < conf->raid_disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
@@ -3463,10 +3463,10 @@ static int bio_fits_rdev(struct bio *bi)
{
struct request_queue *q = bdev_get_queue(bi->bi_bdev);
- if ((bi->bi_size>>9) > q->max_sectors)
+ if ((bi->bi_size>>9) > queue_max_sectors(q))
return 0;
blk_recount_segments(q, bi);
- if (bi->bi_phys_segments > q->max_phys_segments)
+ if (bi->bi_phys_segments > queue_max_phys_segments(q))
return 0;
if (q->merge_bvec_fn)
@@ -3675,7 +3675,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
(unsigned long long)logical_sector);
sh = get_active_stripe(conf, new_sector, previous,
- (bi->bi_rw&RWA_MASK));
+ (bi->bi_rw&RWA_MASK), 0);
if (sh) {
if (unlikely(previous)) {
/* expansion might have moved on while waiting for a
@@ -3811,13 +3811,13 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
safepos = conf->reshape_safe;
sector_div(safepos, data_disks);
if (mddev->delta_disks < 0) {
- writepos -= reshape_sectors;
+ writepos -= min_t(sector_t, reshape_sectors, writepos);
readpos += reshape_sectors;
safepos += reshape_sectors;
} else {
writepos += reshape_sectors;
- readpos -= reshape_sectors;
- safepos -= reshape_sectors;
+ readpos -= min_t(sector_t, reshape_sectors, readpos);
+ safepos -= min_t(sector_t, reshape_sectors, safepos);
}
/* 'writepos' is the most advanced device address we might write.
@@ -3873,7 +3873,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
int j;
int skipped = 0;
- sh = get_active_stripe(conf, stripe_addr+i, 0, 0);
+ sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
set_bit(STRIPE_EXPANDING, &sh->state);
atomic_inc(&conf->reshape_stripes);
/* If any of this stripe is beyond the end of the old
@@ -3916,13 +3916,13 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
raid5_compute_sector(conf, stripe_addr*(new_data_disks),
1, &dd_idx, NULL);
last_sector =
- raid5_compute_sector(conf, ((stripe_addr+conf->chunk_size/512)
+ raid5_compute_sector(conf, ((stripe_addr+reshape_sectors)
*(new_data_disks) - 1),
1, &dd_idx, NULL);
if (last_sector >= mddev->dev_sectors)
last_sector = mddev->dev_sectors - 1;
while (first_sector <= last_sector) {
- sh = get_active_stripe(conf, first_sector, 1, 0);
+ sh = get_active_stripe(conf, first_sector, 1, 0, 1);
set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
@@ -4022,9 +4022,9 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
bitmap_cond_end_sync(mddev->bitmap, sector_nr);
- sh = get_active_stripe(conf, sector_nr, 0, 1);
+ sh = get_active_stripe(conf, sector_nr, 0, 1, 0);
if (sh == NULL) {
- sh = get_active_stripe(conf, sector_nr, 0, 0);
+ sh = get_active_stripe(conf, sector_nr, 0, 0, 0);
/* make sure we don't swamp the stripe cache if someone else
* is trying to get access
*/
@@ -4034,7 +4034,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
* We don't need to check the 'failed' flag as when that gets set,
* recovery aborts.
*/
- for (i=0; i<mddev->raid_disks; i++)
+ for (i = 0; i < conf->raid_disks; i++)
if (conf->disks[i].rdev == NULL)
still_degraded = 1;
@@ -4086,7 +4086,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
/* already done this stripe */
continue;
- sh = get_active_stripe(conf, sector, 0, 1);
+ sh = get_active_stripe(conf, sector, 0, 1, 0);
if (!sh) {
/* failed to get a stripe - must wait */
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 223c36ede5a..ba69beeb0e2 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -2,8 +2,14 @@
# Multimedia device configuration
#
-menu "Multimedia devices"
+menuconfig MEDIA_SUPPORT
+ tristate "Multimedia support"
depends on HAS_IOMEM
+ help
+ If you want to use Video for Linux, DVB for Linux, or DAB adapters,
+ enable this option and other options below.
+
+if MEDIA_SUPPORT
comment "Multimedia core support"
@@ -136,4 +142,4 @@ config USB_DABUSB
module will be called dabusb.
endif # DAB
-endmenu
+endif # MEDIA_SUPPORT
diff --git a/drivers/media/common/tuners/tuner-simple.c b/drivers/media/common/tuners/tuner-simple.c
index 78412c9c424..149d54cdf7b 100644
--- a/drivers/media/common/tuners/tuner-simple.c
+++ b/drivers/media/common/tuners/tuner-simple.c
@@ -416,6 +416,24 @@ static int simple_std_setup(struct dvb_frontend *fe,
return 0;
}
+static int simple_set_aux_byte(struct dvb_frontend *fe, u8 config, u8 aux)
+{
+ struct tuner_simple_priv *priv = fe->tuner_priv;
+ int rc;
+ u8 buffer[2];
+
+ buffer[0] = (config & ~0x38) | 0x18;
+ buffer[1] = aux;
+
+ tuner_dbg("setting aux byte: 0x%02x 0x%02x\n", buffer[0], buffer[1]);
+
+ rc = tuner_i2c_xfer_send(&priv->i2c_props, buffer, 2);
+ if (2 != rc)
+ tuner_warn("i2c i/o error: rc == %d (should be 2)\n", rc);
+
+ return rc == 2 ? 0 : rc;
+}
+
static int simple_post_tune(struct dvb_frontend *fe, u8 *buffer,
u16 div, u8 config, u8 cb)
{
@@ -424,17 +442,10 @@ static int simple_post_tune(struct dvb_frontend *fe, u8 *buffer,
switch (priv->type) {
case TUNER_LG_TDVS_H06XF:
- /* Set the Auxiliary Byte. */
- buffer[0] = buffer[2];
- buffer[0] &= ~0x20;
- buffer[0] |= 0x18;
- buffer[1] = 0x20;
- tuner_dbg("tv 0x%02x 0x%02x\n", buffer[0], buffer[1]);
-
- rc = tuner_i2c_xfer_send(&priv->i2c_props, buffer, 2);
- if (2 != rc)
- tuner_warn("i2c i/o error: rc == %d "
- "(should be 2)\n", rc);
+ simple_set_aux_byte(fe, config, 0x20);
+ break;
+ case TUNER_PHILIPS_FQ1216LME_MK3:
+ simple_set_aux_byte(fe, config, 0x60); /* External AGC */
break;
case TUNER_MICROTUNE_4042FI5:
{
@@ -506,6 +517,11 @@ static int simple_radio_bandswitch(struct dvb_frontend *fe, u8 *buffer)
case TUNER_THOMSON_DTT761X:
buffer[3] = 0x39;
break;
+ case TUNER_PHILIPS_FQ1216LME_MK3:
+ tuner_err("This tuner doesn't have FM\n");
+ /* Set the low band for sanity, since it covers 88-108 MHz */
+ buffer[3] = 0x01;
+ break;
case TUNER_MICROTUNE_4049FM5:
default:
buffer[3] = 0xa4;
@@ -678,12 +694,12 @@ static int simple_set_radio_freq(struct dvb_frontend *fe,
return 0;
}
- /* Bandswitch byte */
- simple_radio_bandswitch(fe, &buffer[0]);
-
buffer[2] = (t_params->ranges[0].config & ~TUNER_RATIO_MASK) |
TUNER_RATIO_SELECT_50; /* 50 kHz step */
+ /* Bandswitch byte */
+ simple_radio_bandswitch(fe, &buffer[0]);
+
/* Convert from 1/16 kHz V4L steps to 1/20 MHz (=50 kHz) PLL steps
freq * (1 Mhz / 16000 V4L steps) * (20 PLL steps / 1 MHz) =
freq * (1/800) */
diff --git a/drivers/media/common/tuners/tuner-types.c b/drivers/media/common/tuners/tuner-types.c
index 7c0bc064c00..6a7f1a417c2 100644
--- a/drivers/media/common/tuners/tuner-types.c
+++ b/drivers/media/common/tuners/tuner-types.c
@@ -578,6 +578,31 @@ static struct tuner_params tuner_fm1216me_mk3_params[] = {
},
};
+/* ------------ TUNER_PHILIPS_FM1216MK5 - Philips PAL ------------ */
+
+static struct tuner_range tuner_fm1216mk5_pal_ranges[] = {
+ { 16 * 158.00 /*MHz*/, 0xce, 0x01, },
+ { 16 * 441.00 /*MHz*/, 0xce, 0x02, },
+ { 16 * 864.00 , 0xce, 0x04, },
+};
+
+static struct tuner_params tuner_fm1216mk5_params[] = {
+ {
+ .type = TUNER_PARAM_TYPE_PAL,
+ .ranges = tuner_fm1216mk5_pal_ranges,
+ .count = ARRAY_SIZE(tuner_fm1216mk5_pal_ranges),
+ .cb_first_if_lower_freq = 1,
+ .has_tda9887 = 1,
+ .port1_active = 1,
+ .port2_active = 1,
+ .port2_invert_for_secam_lc = 1,
+ .port1_fm_high_sensitivity = 1,
+ .default_top_mid = -2,
+ .default_top_secam_mid = -2,
+ .default_top_secam_high = -2,
+ },
+};
+
/* ------------ TUNER_LG_NTSC_NEW_TAPC - LGINNOTEK NTSC ------------ */
static struct tuner_params tuner_lg_ntsc_new_tapc_params[] = {
@@ -1254,6 +1279,28 @@ static struct tuner_params tuner_tcl_mf02gip_5n_params[] = {
},
};
+/* 80-89 */
+/* --------- TUNER_PHILIPS_FQ1216LME_MK3 -- active loopthrough, no FM ------- */
+
+static struct tuner_params tuner_fq1216lme_mk3_params[] = {
+ {
+ .type = TUNER_PARAM_TYPE_PAL,
+ .ranges = tuner_fm1216me_mk3_pal_ranges,
+ .count = ARRAY_SIZE(tuner_fm1216me_mk3_pal_ranges),
+ .cb_first_if_lower_freq = 1, /* not specified, but safe to do */
+ .has_tda9887 = 1, /* TDA9886 */
+ .port1_active = 1,
+ .port2_active = 1,
+ .port2_invert_for_secam_lc = 1,
+ .default_top_low = 4,
+ .default_top_mid = 4,
+ .default_top_high = 4,
+ .default_top_secam_low = 4,
+ .default_top_secam_mid = 4,
+ .default_top_secam_high = 4,
+ },
+};
+
/* --------------------------------------------------------------------- */
struct tunertype tuners[] = {
@@ -1694,6 +1741,18 @@ struct tunertype tuners[] = {
.initdata = tua603x_agc112,
.sleepdata = (u8[]){ 4, 0x9c, 0x60, 0x85, 0x54 },
},
+ [TUNER_PHILIPS_FM1216MK5] = { /* Philips PAL */
+ .name = "Philips PAL/SECAM multi (FM1216 MK5)",
+ .params = tuner_fm1216mk5_params,
+ .count = ARRAY_SIZE(tuner_fm1216mk5_params),
+ },
+
+ /* 80-89 */
+ [TUNER_PHILIPS_FQ1216LME_MK3] = { /* PAL/SECAM, Loop-thru, no FM */
+ .name = "Philips FQ1216LME MK3 PAL/SECAM w/active loopthrough",
+ .params = tuner_fq1216lme_mk3_params,
+ .count = ARRAY_SIZE(tuner_fq1216lme_mk3_params),
+ },
};
EXPORT_SYMBOL(tuners);
diff --git a/drivers/media/common/tuners/tuner-xc2028.c b/drivers/media/common/tuners/tuner-xc2028.c
index 1adce9ff52c..b6da9c3873f 100644
--- a/drivers/media/common/tuners/tuner-xc2028.c
+++ b/drivers/media/common/tuners/tuner-xc2028.c
@@ -30,7 +30,7 @@ MODULE_PARM_DESC(debug, "enable verbose debug messages");
static int no_poweroff;
module_param(no_poweroff, int, 0644);
-MODULE_PARM_DESC(debug, "0 (default) powers device off when not used.\n"
+MODULE_PARM_DESC(no_poweroff, "0 (default) powers device off when not used.\n"
"1 keep device energized and with tuner ready all the times.\n"
" Faster, but consumes more power and keeps the device hotter\n");
@@ -48,7 +48,7 @@ MODULE_PARM_DESC(audio_std,
"NICAM/A\n"
"NICAM/B\n");
-static char firmware_name[FIRMWARE_NAME_MAX];
+static char firmware_name[30];
module_param_string(firmware_name, firmware_name, sizeof(firmware_name), 0);
MODULE_PARM_DESC(firmware_name, "Firmware file name. Allows overriding the "
"default firmware name\n");
@@ -272,7 +272,7 @@ static int load_all_firmwares(struct dvb_frontend *fe)
fname = firmware_name;
tuner_dbg("Reading firmware %s\n", fname);
- rc = request_firmware(&fw, fname, &priv->i2c_props.adap->dev);
+ rc = request_firmware(&fw, fname, priv->i2c_props.adap->dev.parent);
if (rc < 0) {
if (rc == -ENOENT)
tuner_err("Error: firmware %s not found.\n",
@@ -917,22 +917,29 @@ static int generic_set_freq(struct dvb_frontend *fe, u32 freq /* in HZ */,
* that xc2028 will be in a safe state.
* Maybe this might also be needed for DTV.
*/
- if (new_mode == T_ANALOG_TV) {
+ if (new_mode == T_ANALOG_TV)
rc = send_seq(priv, {0x00, 0x00});
- } else if (priv->cur_fw.type & ATSC) {
- offset = 1750000;
- } else {
- offset = 2750000;
+
+ /*
+ * Digital modes require an offset to adjust to the
+ * proper frequency.
+ * Analog modes require offset = 0
+ */
+ if (new_mode == T_DIGITAL_TV) {
+ /* Sets the offset according with firmware */
+ if (priv->cur_fw.type & DTV6)
+ offset = 1750000;
+ else if (priv->cur_fw.type & DTV7)
+ offset = 2250000;
+ else /* DTV8 or DTV78 */
+ offset = 2750000;
+
/*
- * We must adjust the offset by 500kHz in two cases in order
- * to correctly center the IF output:
- * 1) When the ZARLINK456 or DIBCOM52 tables were explicitly
- * selected and a 7MHz channel is tuned;
- * 2) When tuning a VHF channel with DTV78 firmware.
+ * We must adjust the offset by 500kHz when
+ * tuning a 7MHz VHF channel with DTV78 firmware
+ * (used in Australia, Italy and Germany)
*/
- if (((priv->cur_fw.type & DTV7) &&
- (priv->cur_fw.scode_table & (ZARLINK456 | DIBCOM52))) ||
- ((priv->cur_fw.type & DTV78) && freq < 470000000))
+ if ((priv->cur_fw.type & DTV78) && freq < 470000000)
offset -= 500000;
}
@@ -991,7 +998,7 @@ static int xc2028_set_analog_freq(struct dvb_frontend *fe,
if (priv->ctrl.input1)
type |= INPUT1;
return generic_set_freq(fe, (625l * p->frequency) / 10,
- T_ANALOG_TV, type, 0, 0);
+ T_RADIO, type, 0, 0);
}
/* if std is not defined, choose one */
@@ -1022,21 +1029,20 @@ static int xc2028_set_params(struct dvb_frontend *fe,
switch(fe->ops.info.type) {
case FE_OFDM:
bw = p->u.ofdm.bandwidth;
- break;
- case FE_QAM:
- tuner_info("WARN: There are some reports that "
- "QAM 6 MHz doesn't work.\n"
- "If this works for you, please report by "
- "e-mail to: v4l-dvb-maintainer@linuxtv.org\n");
- bw = BANDWIDTH_6_MHZ;
- type |= QAM;
+ /*
+ * The only countries with 6MHz seem to be Taiwan/Uruguay.
+ * Both seem to require QAM firmware for OFDM decoding
+ * Tested in Taiwan by Terry Wu <terrywu2009@gmail.com>
+ */
+ if (bw == BANDWIDTH_6_MHZ)
+ type |= QAM;
break;
case FE_ATSC:
bw = BANDWIDTH_6_MHZ;
/* The only ATSC firmware (at least on v2.7) is D2633 */
type |= ATSC | D2633;
break;
- /* DVB-S is not supported */
+ /* DVB-S and pure QAM (FE_QAM) are not supported */
default:
return -EINVAL;
}
diff --git a/drivers/media/common/tuners/xc5000.c b/drivers/media/common/tuners/xc5000.c
index b54598550dc..f4ffcdc9b84 100644
--- a/drivers/media/common/tuners/xc5000.c
+++ b/drivers/media/common/tuners/xc5000.c
@@ -3,6 +3,7 @@
*
* Copyright (c) 2007 Xceive Corporation
* Copyright (c) 2007 Steven Toth <stoth@linuxtv.org>
+ * Copyright (c) 2009 Devin Heitmueller <dheitmueller@kernellabs.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -36,14 +37,20 @@ static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
+static int no_poweroff;
+module_param(no_poweroff, int, 0644);
+MODULE_PARM_DESC(no_poweroff, "0 (default) powers device off when not used.\n"
+ "\t\t1 keep device energized and with tuner ready all the times.\n"
+ "\t\tFaster, but consumes more power and keeps the device hotter");
+
static DEFINE_MUTEX(xc5000_list_mutex);
static LIST_HEAD(hybrid_tuner_instance_list);
#define dprintk(level, fmt, arg...) if (debug >= level) \
printk(KERN_INFO "%s: " fmt, "xc5000", ## arg)
-#define XC5000_DEFAULT_FIRMWARE "dvb-fe-xc5000-1.1.fw"
-#define XC5000_DEFAULT_FIRMWARE_SIZE 12332
+#define XC5000_DEFAULT_FIRMWARE "dvb-fe-xc5000-1.6.114.fw"
+#define XC5000_DEFAULT_FIRMWARE_SIZE 12401
struct xc5000_priv {
struct tuner_i2c_props i2c_props;
@@ -83,11 +90,11 @@ struct xc5000_priv {
#define XREG_D_CODE 0x04
#define XREG_IF_OUT 0x05
#define XREG_SEEK_MODE 0x07
-#define XREG_POWER_DOWN 0x0A
+#define XREG_POWER_DOWN 0x0A /* Obsolete */
#define XREG_SIGNALSOURCE 0x0D /* 0=Air, 1=Cable */
#define XREG_SMOOTHEDCVBS 0x0E
#define XREG_XTALFREQ 0x0F
-#define XREG_FINERFFREQ 0x10
+#define XREG_FINERFREQ 0x10
#define XREG_DDIMODE 0x11
#define XREG_ADC_ENV 0x00
@@ -100,6 +107,7 @@ struct xc5000_priv {
#define XREG_VERSION 0x07
#define XREG_PRODUCT_ID 0x08
#define XREG_BUSY 0x09
+#define XREG_BUILD 0x0D
/*
Basic firmware description. This will remain with
@@ -191,27 +199,36 @@ static struct XC_TV_STANDARD XC5000_Standard[MAX_TV_STANDARD] = {
{"FM Radio-INPUT1", 0x0208, 0x9002}
};
-static int xc5000_is_firmware_loaded(struct dvb_frontend *fe);
-static int xc5000_writeregs(struct xc5000_priv *priv, u8 *buf, u8 len);
-static int xc5000_readregs(struct xc5000_priv *priv, u8 *buf, u8 len);
-static void xc5000_TunerReset(struct dvb_frontend *fe);
+static int xc_load_fw_and_init_tuner(struct dvb_frontend *fe);
+static int xc5000_is_firmware_loaded(struct dvb_frontend *fe);
+static int xc5000_readreg(struct xc5000_priv *priv, u16 reg, u16 *val);
+static int xc5000_TunerReset(struct dvb_frontend *fe);
static int xc_send_i2c_data(struct xc5000_priv *priv, u8 *buf, int len)
{
- return xc5000_writeregs(priv, buf, len)
- ? XC_RESULT_I2C_WRITE_FAILURE : XC_RESULT_SUCCESS;
+ struct i2c_msg msg = { .addr = priv->i2c_props.addr,
+ .flags = 0, .buf = buf, .len = len };
+
+ if (i2c_transfer(priv->i2c_props.adap, &msg, 1) != 1) {
+ printk(KERN_ERR "xc5000: I2C write failed (len=%i)\n", len);
+ return XC_RESULT_I2C_WRITE_FAILURE;
+ }
+ return XC_RESULT_SUCCESS;
}
+/* This routine is never used because the only time we read data from the
+ i2c bus is when we read registers, and we want that to be an atomic i2c
+ transaction in case we are on a multi-master bus */
static int xc_read_i2c_data(struct xc5000_priv *priv, u8 *buf, int len)
{
- return xc5000_readregs(priv, buf, len)
- ? XC_RESULT_I2C_READ_FAILURE : XC_RESULT_SUCCESS;
-}
+ struct i2c_msg msg = { .addr = priv->i2c_props.addr,
+ .flags = I2C_M_RD, .buf = buf, .len = len };
-static int xc_reset(struct dvb_frontend *fe)
-{
- xc5000_TunerReset(fe);
- return XC_RESULT_SUCCESS;
+ if (i2c_transfer(priv->i2c_props.adap, &msg, 1) != 1) {
+ printk(KERN_ERR "xc5000 I2C read failed (len=%i)\n", len);
+ return -EREMOTEIO;
+ }
+ return 0;
}
static void xc_wait(int wait_ms)
@@ -219,7 +236,7 @@ static void xc_wait(int wait_ms)
msleep(wait_ms);
}
-static void xc5000_TunerReset(struct dvb_frontend *fe)
+static int xc5000_TunerReset(struct dvb_frontend *fe)
{
struct xc5000_priv *priv = fe->tuner_priv;
int ret;
@@ -232,16 +249,21 @@ static void xc5000_TunerReset(struct dvb_frontend *fe)
priv->i2c_props.adap->algo_data,
DVB_FRONTEND_COMPONENT_TUNER,
XC5000_TUNER_RESET, 0);
- if (ret)
+ if (ret) {
printk(KERN_ERR "xc5000: reset failed\n");
- } else
+ return XC_RESULT_RESET_FAILURE;
+ }
+ } else {
printk(KERN_ERR "xc5000: no tuner reset callback function, fatal\n");
+ return XC_RESULT_RESET_FAILURE;
+ }
+ return XC_RESULT_SUCCESS;
}
static int xc_write_reg(struct xc5000_priv *priv, u16 regAddr, u16 i2cData)
{
u8 buf[4];
- int WatchDogTimer = 5;
+ int WatchDogTimer = 100;
int result;
buf[0] = (regAddr >> 8) & 0xFF;
@@ -263,7 +285,7 @@ static int xc_write_reg(struct xc5000_priv *priv, u16 regAddr, u16 i2cData)
/* busy flag cleared */
break;
} else {
- xc_wait(100); /* wait 5 ms */
+ xc_wait(5); /* wait 5 ms */
WatchDogTimer--;
}
}
@@ -276,25 +298,6 @@ static int xc_write_reg(struct xc5000_priv *priv, u16 regAddr, u16 i2cData)
return result;
}
-static int xc_read_reg(struct xc5000_priv *priv, u16 regAddr, u16 *i2cData)
-{
- u8 buf[2];
- int result;
-
- buf[0] = (regAddr >> 8) & 0xFF;
- buf[1] = regAddr & 0xFF;
- result = xc_send_i2c_data(priv, buf, 2);
- if (result != XC_RESULT_SUCCESS)
- return result;
-
- result = xc_read_i2c_data(priv, buf, 2);
- if (result != XC_RESULT_SUCCESS)
- return result;
-
- *i2cData = buf[0] * 256 + buf[1];
- return result;
-}
-
static int xc_load_i2c_sequence(struct dvb_frontend *fe, const u8 *i2c_sequence)
{
struct xc5000_priv *priv = fe->tuner_priv;
@@ -309,7 +312,7 @@ static int xc_load_i2c_sequence(struct dvb_frontend *fe, const u8 *i2c_sequence)
len = i2c_sequence[index] * 256 + i2c_sequence[index+1];
if (len == 0x0000) {
/* RESET command */
- result = xc_reset(fe);
+ result = xc5000_TunerReset(fe);
index += 2;
if (result != XC_RESULT_SUCCESS)
return result;
@@ -371,15 +374,6 @@ static int xc_SetTVStandard(struct xc5000_priv *priv,
return ret;
}
-static int xc_shutdown(struct xc5000_priv *priv)
-{
- return XC_RESULT_SUCCESS;
- /* Fixme: cannot bring tuner back alive once shutdown
- * without reloading the driver modules.
- * return xc_write_reg(priv, XREG_POWER_DOWN, 0);
- */
-}
-
static int xc_SetSignalSource(struct xc5000_priv *priv, u16 rf_mode)
{
dprintk(1, "%s(%d) Source = %s\n", __func__, rf_mode,
@@ -408,7 +402,10 @@ static int xc_set_RF_frequency(struct xc5000_priv *priv, u32 freq_hz)
freq_code = (u16)(freq_hz / 15625);
- return xc_write_reg(priv, XREG_RF_FREQ, freq_code);
+ /* Starting in firmware version 1.1.44, Xceive recommends using the
+ FINERFREQ for all normal tuning (the doc indicates reg 0x03 should
+ only be used for fast scanning for channel lock) */
+ return xc_write_reg(priv, XREG_FINERFREQ, freq_code);
}
@@ -424,7 +421,7 @@ static int xc_set_IF_frequency(struct xc5000_priv *priv, u32 freq_khz)
static int xc_get_ADC_Envelope(struct xc5000_priv *priv, u16 *adc_envelope)
{
- return xc_read_reg(priv, XREG_ADC_ENV, adc_envelope);
+ return xc5000_readreg(priv, XREG_ADC_ENV, adc_envelope);
}
static int xc_get_frequency_error(struct xc5000_priv *priv, u32 *freq_error_hz)
@@ -433,8 +430,8 @@ static int xc_get_frequency_error(struct xc5000_priv *priv, u32 *freq_error_hz)
u16 regData;
u32 tmp;
- result = xc_read_reg(priv, XREG_FREQ_ERROR, &regData);
- if (result)
+ result = xc5000_readreg(priv, XREG_FREQ_ERROR, &regData);
+ if (result != XC_RESULT_SUCCESS)
return result;
tmp = (u32)regData;
@@ -444,7 +441,7 @@ static int xc_get_frequency_error(struct xc5000_priv *priv, u32 *freq_error_hz)
static int xc_get_lock_status(struct xc5000_priv *priv, u16 *lock_status)
{
- return xc_read_reg(priv, XREG_LOCK, lock_status);
+ return xc5000_readreg(priv, XREG_LOCK, lock_status);
}
static int xc_get_version(struct xc5000_priv *priv,
@@ -454,8 +451,8 @@ static int xc_get_version(struct xc5000_priv *priv,
u16 data;
int result;
- result = xc_read_reg(priv, XREG_VERSION, &data);
- if (result)
+ result = xc5000_readreg(priv, XREG_VERSION, &data);
+ if (result != XC_RESULT_SUCCESS)
return result;
(*hw_majorversion) = (data >> 12) & 0x0F;
@@ -466,13 +463,18 @@ static int xc_get_version(struct xc5000_priv *priv,
return 0;
}
+static int xc_get_buildversion(struct xc5000_priv *priv, u16 *buildrev)
+{
+ return xc5000_readreg(priv, XREG_BUILD, buildrev);
+}
+
static int xc_get_hsync_freq(struct xc5000_priv *priv, u32 *hsync_freq_hz)
{
u16 regData;
int result;
- result = xc_read_reg(priv, XREG_HSYNC_FREQ, &regData);
- if (result)
+ result = xc5000_readreg(priv, XREG_HSYNC_FREQ, &regData);
+ if (result != XC_RESULT_SUCCESS)
return result;
(*hsync_freq_hz) = ((regData & 0x0fff) * 763)/100;
@@ -481,12 +483,12 @@ static int xc_get_hsync_freq(struct xc5000_priv *priv, u32 *hsync_freq_hz)
static int xc_get_frame_lines(struct xc5000_priv *priv, u16 *frame_lines)
{
- return xc_read_reg(priv, XREG_FRAME_LINES, frame_lines);
+ return xc5000_readreg(priv, XREG_FRAME_LINES, frame_lines);
}
static int xc_get_quality(struct xc5000_priv *priv, u16 *quality)
{
- return xc_read_reg(priv, XREG_QUALITY, quality);
+ return xc5000_readreg(priv, XREG_QUALITY, quality);
}
static u16 WaitForLock(struct xc5000_priv *priv)
@@ -504,7 +506,9 @@ static u16 WaitForLock(struct xc5000_priv *priv)
return lockState;
}
-static int xc_tune_channel(struct xc5000_priv *priv, u32 freq_hz)
+#define XC_TUNE_ANALOG 0
+#define XC_TUNE_DIGITAL 1
+static int xc_tune_channel(struct xc5000_priv *priv, u32 freq_hz, int mode)
{
int found = 0;
@@ -513,8 +517,10 @@ static int xc_tune_channel(struct xc5000_priv *priv, u32 freq_hz)
if (xc_set_RF_frequency(priv, freq_hz) != XC_RESULT_SUCCESS)
return 0;
- if (WaitForLock(priv) == 1)
- found = 1;
+ if (mode == XC_TUNE_ANALOG) {
+ if (WaitForLock(priv) == 1)
+ found = 1;
+ }
return found;
}
@@ -536,32 +542,7 @@ static int xc5000_readreg(struct xc5000_priv *priv, u16 reg, u16 *val)
}
*val = (bval[0] << 8) | bval[1];
- return 0;
-}
-
-static int xc5000_writeregs(struct xc5000_priv *priv, u8 *buf, u8 len)
-{
- struct i2c_msg msg = { .addr = priv->i2c_props.addr,
- .flags = 0, .buf = buf, .len = len };
-
- if (i2c_transfer(priv->i2c_props.adap, &msg, 1) != 1) {
- printk(KERN_ERR "xc5000: I2C write failed (len=%i)\n",
- (int)len);
- return -EREMOTEIO;
- }
- return 0;
-}
-
-static int xc5000_readregs(struct xc5000_priv *priv, u8 *buf, u8 len)
-{
- struct i2c_msg msg = { .addr = priv->i2c_props.addr,
- .flags = I2C_M_RD, .buf = buf, .len = len };
-
- if (i2c_transfer(priv->i2c_props.adap, &msg, 1) != 1) {
- printk(KERN_ERR "xc5000 I2C read failed (len=%i)\n", (int)len);
- return -EREMOTEIO;
- }
- return 0;
+ return XC_RESULT_SUCCESS;
}
static int xc5000_fwupload(struct dvb_frontend *fe)
@@ -575,13 +556,13 @@ static int xc5000_fwupload(struct dvb_frontend *fe)
XC5000_DEFAULT_FIRMWARE);
ret = request_firmware(&fw, XC5000_DEFAULT_FIRMWARE,
- &priv->i2c_props.adap->dev);
+ priv->i2c_props.adap->dev.parent);
if (ret) {
printk(KERN_ERR "xc5000: Upload failed. (file not found?)\n");
ret = XC_RESULT_RESET_FAILURE;
goto out;
} else {
- printk(KERN_INFO "xc5000: firmware read %Zu bytes.\n",
+ printk(KERN_DEBUG "xc5000: firmware read %Zu bytes.\n",
fw->size);
ret = XC_RESULT_SUCCESS;
}
@@ -590,8 +571,9 @@ static int xc5000_fwupload(struct dvb_frontend *fe)
printk(KERN_ERR "xc5000: firmware incorrect size\n");
ret = XC_RESULT_RESET_FAILURE;
} else {
- printk(KERN_INFO "xc5000: firmware upload\n");
+ printk(KERN_INFO "xc5000: firmware uploading...\n");
ret = xc_load_i2c_sequence(fe, fw->data);
+ printk(KERN_INFO "xc5000: firmware upload complete...\n");
}
out:
@@ -609,6 +591,7 @@ static void xc_debug_dump(struct xc5000_priv *priv)
u16 quality;
u8 hw_majorversion = 0, hw_minorversion = 0;
u8 fw_majorversion = 0, fw_minorversion = 0;
+ u16 fw_buildversion = 0;
/* Wait for stats to stabilize.
* Frame Lines needs two frame times after initial lock
@@ -628,9 +611,10 @@ static void xc_debug_dump(struct xc5000_priv *priv)
xc_get_version(priv, &hw_majorversion, &hw_minorversion,
&fw_majorversion, &fw_minorversion);
- dprintk(1, "*** HW: V%02x.%02x, FW: V%02x.%02x\n",
+ xc_get_buildversion(priv, &fw_buildversion);
+ dprintk(1, "*** HW: V%02x.%02x, FW: V%02x.%02x.%04x\n",
hw_majorversion, hw_minorversion,
- fw_majorversion, fw_minorversion);
+ fw_majorversion, fw_minorversion, fw_buildversion);
xc_get_hsync_freq(priv, &hsync_freq_hz);
dprintk(1, "*** Horizontal sync frequency = %d Hz\n", hsync_freq_hz);
@@ -648,27 +632,57 @@ static int xc5000_set_params(struct dvb_frontend *fe,
struct xc5000_priv *priv = fe->tuner_priv;
int ret;
+ if (xc5000_is_firmware_loaded(fe) != XC_RESULT_SUCCESS)
+ xc_load_fw_and_init_tuner(fe);
+
dprintk(1, "%s() frequency=%d (Hz)\n", __func__, params->frequency);
- switch (params->u.vsb.modulation) {
- case VSB_8:
- case VSB_16:
- dprintk(1, "%s() VSB modulation\n", __func__);
+ if (fe->ops.info.type == FE_ATSC) {
+ dprintk(1, "%s() ATSC\n", __func__);
+ switch (params->u.vsb.modulation) {
+ case VSB_8:
+ case VSB_16:
+ dprintk(1, "%s() VSB modulation\n", __func__);
+ priv->rf_mode = XC_RF_MODE_AIR;
+ priv->freq_hz = params->frequency - 1750000;
+ priv->bandwidth = BANDWIDTH_6_MHZ;
+ priv->video_standard = DTV6;
+ break;
+ case QAM_64:
+ case QAM_256:
+ case QAM_AUTO:
+ dprintk(1, "%s() QAM modulation\n", __func__);
+ priv->rf_mode = XC_RF_MODE_CABLE;
+ priv->freq_hz = params->frequency - 1750000;
+ priv->bandwidth = BANDWIDTH_6_MHZ;
+ priv->video_standard = DTV6;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else if (fe->ops.info.type == FE_OFDM) {
+ dprintk(1, "%s() OFDM\n", __func__);
+ switch (params->u.ofdm.bandwidth) {
+ case BANDWIDTH_6_MHZ:
+ priv->bandwidth = BANDWIDTH_6_MHZ;
+ priv->video_standard = DTV6;
+ priv->freq_hz = params->frequency - 1750000;
+ break;
+ case BANDWIDTH_7_MHZ:
+ printk(KERN_ERR "xc5000 bandwidth 7MHz not supported\n");
+ return -EINVAL;
+ case BANDWIDTH_8_MHZ:
+ priv->bandwidth = BANDWIDTH_8_MHZ;
+ priv->video_standard = DTV8;
+ priv->freq_hz = params->frequency - 2750000;
+ break;
+ default:
+ printk(KERN_ERR "xc5000 bandwidth not set!\n");
+ return -EINVAL;
+ }
priv->rf_mode = XC_RF_MODE_AIR;
- priv->freq_hz = params->frequency - 1750000;
- priv->bandwidth = BANDWIDTH_6_MHZ;
- priv->video_standard = DTV6;
- break;
- case QAM_64:
- case QAM_256:
- case QAM_AUTO:
- dprintk(1, "%s() QAM modulation\n", __func__);
- priv->rf_mode = XC_RF_MODE_CABLE;
- priv->freq_hz = params->frequency - 1750000;
- priv->bandwidth = BANDWIDTH_6_MHZ;
- priv->video_standard = DTV6;
- break;
- default:
+ } else {
+ printk(KERN_ERR "xc5000 modulation type not supported!\n");
return -EINVAL;
}
@@ -698,7 +712,7 @@ static int xc5000_set_params(struct dvb_frontend *fe,
return -EIO;
}
- xc_tune_channel(priv, priv->freq_hz);
+ xc_tune_channel(priv, priv->freq_hz, XC_TUNE_DIGITAL);
if (debug)
xc_debug_dump(priv);
@@ -725,8 +739,6 @@ static int xc5000_is_firmware_loaded(struct dvb_frontend *fe)
return ret;
}
-static int xc_load_fw_and_init_tuner(struct dvb_frontend *fe);
-
static int xc5000_set_analog_params(struct dvb_frontend *fe,
struct analog_parameters *params)
{
@@ -807,7 +819,7 @@ tune_channel:
return -EREMOTEIO;
}
- xc_tune_channel(priv, priv->freq_hz);
+ xc_tune_channel(priv, priv->freq_hz, XC_TUNE_ANALOG);
if (debug)
xc_debug_dump(priv);
@@ -875,18 +887,18 @@ static int xc_load_fw_and_init_tuner(struct dvb_frontend *fe)
static int xc5000_sleep(struct dvb_frontend *fe)
{
- struct xc5000_priv *priv = fe->tuner_priv;
int ret;
dprintk(1, "%s()\n", __func__);
- /* On Pinnacle PCTV HD 800i, the tuner cannot be reinitialized
- * once shutdown without reloading the driver. Maybe I am not
- * doing something right.
- *
- */
+ /* Avoid firmware reload on slow devices */
+ if (no_poweroff)
+ return 0;
- ret = xc_shutdown(priv);
+ /* According to Xceive technical support, the "powerdown" register
+ was removed in newer versions of the firmware. The "supported"
+ way to sleep the tuner is to pull the reset pin low for 10ms */
+ ret = xc5000_TunerReset(fe);
if (ret != XC_RESULT_SUCCESS) {
printk(KERN_ERR
"xc5000: %s() unable to shutdown tuner\n",
@@ -991,7 +1003,7 @@ struct dvb_frontend *xc5000_attach(struct dvb_frontend *fe,
/* Check if firmware has been loaded. It is possible that another
instance of the driver has loaded the firmware.
*/
- if (xc5000_readreg(priv, XREG_PRODUCT_ID, &id) != 0)
+ if (xc5000_readreg(priv, XREG_PRODUCT_ID, &id) != XC_RESULT_SUCCESS)
goto fail;
switch (id) {
diff --git a/drivers/media/dvb/b2c2/flexcop-common.h b/drivers/media/dvb/b2c2/flexcop-common.h
index 3e1c472092a..9e2148a1996 100644
--- a/drivers/media/dvb/b2c2/flexcop-common.h
+++ b/drivers/media/dvb/b2c2/flexcop-common.h
@@ -1,9 +1,7 @@
/*
- * This file is part of linux driver the digital TV devices equipped with B2C2 FlexcopII(b)/III
- *
- * flexcop-common.h - common header file for device-specific source files also.
- *
- * see flexcop.c for copyright information.
+ * Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III
+ * flexcop-common.h - common header file for device-specific source files
+ * see flexcop.c for copyright information
*/
#ifndef __FLEXCOP_COMMON_H__
#define __FLEXCOP_COMMON_H__
diff --git a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
index f7afab5944c..efb4a6c2b57 100644
--- a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
+++ b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
@@ -1,34 +1,27 @@
/*
- * This file is part of linux driver the digital TV devices equipped with B2C2 FlexcopII(b)/III
- *
- * flexcop-fe-tuner.c - methods for attaching a frontend and controlling DiSEqC.
- *
- * see flexcop.c for copyright information.
+ * Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III
+ * flexcop-fe-tuner.c - methods for frontend attachment and DiSEqC controlling
+ * see flexcop.c for copyright information
*/
#include <media/tuner.h>
-
#include "flexcop.h"
-
-#include "stv0299.h"
-#include "mt352.h"
-#include "nxt200x.h"
-#include "bcm3510.h"
-#include "stv0297.h"
#include "mt312.h"
-#include "lgdt330x.h"
-#include "dvb-pll.h"
-#include "tuner-simple.h"
-
+#include "stv0299.h"
#include "s5h1420.h"
#include "itd1000.h"
-
-#include "cx24123.h"
#include "cx24113.h"
-
+#include "cx24123.h"
#include "isl6421.h"
+#include "mt352.h"
+#include "bcm3510.h"
+#include "nxt200x.h"
+#include "dvb-pll.h"
+#include "lgdt330x.h"
+#include "tuner-simple.h"
+#include "stv0297.h"
/* lnb control */
-
+#if defined(CONFIG_DVB_MT312_MODULE) || defined(CONFIG_DVB_STV0299_MODULE)
static int flexcop_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
{
struct flexcop_device *fc = fe->dvb->priv;
@@ -37,65 +30,62 @@ static int flexcop_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage
v = fc->read_ibi_reg(fc, misc_204);
switch (voltage) {
- case SEC_VOLTAGE_OFF:
- v.misc_204.ACPI1_sig = 1;
- break;
- case SEC_VOLTAGE_13:
- v.misc_204.ACPI1_sig = 0;
- v.misc_204.LNB_L_H_sig = 0;
- break;
- case SEC_VOLTAGE_18:
- v.misc_204.ACPI1_sig = 0;
- v.misc_204.LNB_L_H_sig = 1;
- break;
- default:
- err("unknown SEC_VOLTAGE value");
- return -EINVAL;
+ case SEC_VOLTAGE_OFF:
+ v.misc_204.ACPI1_sig = 1;
+ break;
+ case SEC_VOLTAGE_13:
+ v.misc_204.ACPI1_sig = 0;
+ v.misc_204.LNB_L_H_sig = 0;
+ break;
+ case SEC_VOLTAGE_18:
+ v.misc_204.ACPI1_sig = 0;
+ v.misc_204.LNB_L_H_sig = 1;
+ break;
+ default:
+ err("unknown SEC_VOLTAGE value");
+ return -EINVAL;
}
return fc->write_ibi_reg(fc, misc_204, v);
}
+#endif
+#if defined(CONFIG_DVB_S5H1420_MODULE) || defined(CONFIG_DVB_STV0299_MODULE) \
+ || defined(CONFIG_DVB_MT312_MODULE)
static int flexcop_sleep(struct dvb_frontend* fe)
{
struct flexcop_device *fc = fe->dvb->priv;
-/* flexcop_ibi_value v = fc->read_ibi_reg(fc,misc_204); */
-
if (fc->fe_sleep)
return fc->fe_sleep(fe);
-
-/* v.misc_204.ACPI3_sig = 1;
- fc->write_ibi_reg(fc,misc_204,v);*/
-
return 0;
}
+#endif
+/* SkyStar2 DVB-S rev 2.3 */
+#if defined(CONFIG_DVB_MT312_MODULE)
static int flexcop_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone)
{
- /* u16 wz_half_period_for_45_mhz[] = { 0x01ff, 0x0154, 0x00ff, 0x00cc }; */
+/* u16 wz_half_period_for_45_mhz[] = { 0x01ff, 0x0154, 0x00ff, 0x00cc }; */
struct flexcop_device *fc = fe->dvb->priv;
flexcop_ibi_value v;
u16 ax;
v.raw = 0;
-
deb_tuner("tone = %u\n",tone);
switch (tone) {
- case SEC_TONE_ON:
- ax = 0x01ff;
- break;
- case SEC_TONE_OFF:
- ax = 0;
- break;
- default:
- err("unknown SEC_TONE value");
- return -EINVAL;
+ case SEC_TONE_ON:
+ ax = 0x01ff;
+ break;
+ case SEC_TONE_OFF:
+ ax = 0;
+ break;
+ default:
+ err("unknown SEC_TONE value");
+ return -EINVAL;
}
v.lnb_switch_freq_200.LNB_CTLPrescaler_sig = 1; /* divide by 2 */
-
v.lnb_switch_freq_200.LNB_CTLHighCount_sig = ax;
v.lnb_switch_freq_200.LNB_CTLLowCount_sig = ax == 0 ? 0x1ff : ax;
-
return fc->write_ibi_reg(fc,lnb_switch_freq_200,v);
}
@@ -110,17 +100,16 @@ static void flexcop_diseqc_send_bit(struct dvb_frontend* fe, int data)
static void flexcop_diseqc_send_byte(struct dvb_frontend* fe, int data)
{
int i, par = 1, d;
-
for (i = 7; i >= 0; i--) {
d = (data >> i) & 1;
par ^= d;
flexcop_diseqc_send_bit(fe, d);
}
-
flexcop_diseqc_send_bit(fe, par);
}
-static int flexcop_send_diseqc_msg(struct dvb_frontend* fe, int len, u8 *msg, unsigned long burst)
+static int flexcop_send_diseqc_msg(struct dvb_frontend *fe,
+ int len, u8 *msg, unsigned long burst)
{
int i;
@@ -129,7 +118,6 @@ static int flexcop_send_diseqc_msg(struct dvb_frontend* fe, int len, u8 *msg, un
for (i = 0; i < len; i++)
flexcop_diseqc_send_byte(fe,msg[i]);
-
mdelay(16);
if (burst != -1) {
@@ -146,50 +134,110 @@ static int flexcop_send_diseqc_msg(struct dvb_frontend* fe, int len, u8 *msg, un
return 0;
}
-static int flexcop_diseqc_send_master_cmd(struct dvb_frontend* fe, struct dvb_diseqc_master_cmd* cmd)
+static int flexcop_diseqc_send_master_cmd(struct dvb_frontend *fe,
+ struct dvb_diseqc_master_cmd *cmd)
{
return flexcop_send_diseqc_msg(fe, cmd->msg_len, cmd->msg, 0);
}
-static int flexcop_diseqc_send_burst(struct dvb_frontend* fe, fe_sec_mini_cmd_t minicmd)
+static int flexcop_diseqc_send_burst(struct dvb_frontend *fe,
+ fe_sec_mini_cmd_t minicmd)
{
return flexcop_send_diseqc_msg(fe, 0, NULL, minicmd);
}
-/* dvb-s stv0299 */
-static int samsung_tbmu24112_set_symbol_rate(struct dvb_frontend* fe, u32 srate, u32 ratio)
+static struct mt312_config skystar23_samsung_tbdu18132_config = {
+ .demod_address = 0x0e,
+};
+
+static int skystar23_samsung_tbdu18132_tuner_set_params(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *params)
+{
+ u8 buf[4];
+ u32 div;
+ struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = buf,
+ .len = sizeof(buf) };
+ struct flexcop_device *fc = fe->dvb->priv;
+ div = (params->frequency + (125/2)) / 125;
+
+ buf[0] = (div >> 8) & 0x7f;
+ buf[1] = (div >> 0) & 0xff;
+ buf[2] = 0x84 | ((div >> 10) & 0x60);
+ buf[3] = 0x80;
+
+ if (params->frequency < 1550000)
+ buf[3] |= 0x02;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+ if (i2c_transfer(&fc->fc_i2c_adap[0].i2c_adap, &msg, 1) != 1)
+ return -EIO;
+ return 0;
+}
+
+static int skystar2_rev23_attach(struct flexcop_device *fc,
+ struct i2c_adapter *i2c)
+{
+ fc->fe = dvb_attach(mt312_attach, &skystar23_samsung_tbdu18132_config, i2c);
+ if (fc->fe != NULL) {
+ struct dvb_frontend_ops *ops = &fc->fe->ops;
+ ops->tuner_ops.set_params =
+ skystar23_samsung_tbdu18132_tuner_set_params;
+ ops->diseqc_send_master_cmd = flexcop_diseqc_send_master_cmd;
+ ops->diseqc_send_burst = flexcop_diseqc_send_burst;
+ ops->set_tone = flexcop_set_tone;
+ ops->set_voltage = flexcop_set_voltage;
+ fc->fe_sleep = ops->sleep;
+ ops->sleep = flexcop_sleep;
+ return 1;
+ }
+ return 0;
+}
+#endif
+
+/* SkyStar2 DVB-S rev 2.6 */
+#if defined(CONFIG_DVB_STV0299_MODULE)
+static int samsung_tbmu24112_set_symbol_rate(struct dvb_frontend *fe,
+ u32 srate, u32 ratio)
{
u8 aclk = 0;
u8 bclk = 0;
- if (srate < 1500000) { aclk = 0xb7; bclk = 0x47; }
- else if (srate < 3000000) { aclk = 0xb7; bclk = 0x4b; }
- else if (srate < 7000000) { aclk = 0xb7; bclk = 0x4f; }
- else if (srate < 14000000) { aclk = 0xb7; bclk = 0x53; }
- else if (srate < 30000000) { aclk = 0xb6; bclk = 0x53; }
- else if (srate < 45000000) { aclk = 0xb4; bclk = 0x51; }
-
- stv0299_writereg (fe, 0x13, aclk);
- stv0299_writereg (fe, 0x14, bclk);
- stv0299_writereg (fe, 0x1f, (ratio >> 16) & 0xff);
- stv0299_writereg (fe, 0x20, (ratio >> 8) & 0xff);
- stv0299_writereg (fe, 0x21, (ratio ) & 0xf0);
+ if (srate < 1500000) {
+ aclk = 0xb7; bclk = 0x47;
+ } else if (srate < 3000000) {
+ aclk = 0xb7; bclk = 0x4b;
+ } else if (srate < 7000000) {
+ aclk = 0xb7; bclk = 0x4f;
+ } else if (srate < 14000000) {
+ aclk = 0xb7; bclk = 0x53;
+ } else if (srate < 30000000) {
+ aclk = 0xb6; bclk = 0x53;
+ } else if (srate < 45000000) {
+ aclk = 0xb4; bclk = 0x51;
+ }
+ stv0299_writereg(fe, 0x13, aclk);
+ stv0299_writereg(fe, 0x14, bclk);
+ stv0299_writereg(fe, 0x1f, (ratio >> 16) & 0xff);
+ stv0299_writereg(fe, 0x20, (ratio >> 8) & 0xff);
+ stv0299_writereg(fe, 0x21, ratio & 0xf0);
return 0;
}
-static int samsung_tbmu24112_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters *params)
+static int samsung_tbmu24112_tuner_set_params(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *params)
{
u8 buf[4];
u32 div;
- struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = buf, .len = sizeof(buf) };
+ struct i2c_msg msg = {
+ .addr = 0x61, .flags = 0, .buf = buf, .len = sizeof(buf) };
struct flexcop_device *fc = fe->dvb->priv;
-
div = params->frequency / 125;
buf[0] = (div >> 8) & 0x7f;
buf[1] = div & 0xff;
- buf[2] = 0x84; /* 0xC4 */
+ buf[2] = 0x84; /* 0xC4 */
buf[3] = 0x08;
if (params->frequency < 1500000)
@@ -203,48 +251,48 @@ static int samsung_tbmu24112_tuner_set_params(struct dvb_frontend* fe, struct dv
}
static u8 samsung_tbmu24112_inittab[] = {
- 0x01, 0x15,
- 0x02, 0x30,
- 0x03, 0x00,
- 0x04, 0x7D,
- 0x05, 0x35,
- 0x06, 0x02,
- 0x07, 0x00,
- 0x08, 0xC3,
- 0x0C, 0x00,
- 0x0D, 0x81,
- 0x0E, 0x23,
- 0x0F, 0x12,
- 0x10, 0x7E,
- 0x11, 0x84,
- 0x12, 0xB9,
- 0x13, 0x88,
- 0x14, 0x89,
- 0x15, 0xC9,
- 0x16, 0x00,
- 0x17, 0x5C,
- 0x18, 0x00,
- 0x19, 0x00,
- 0x1A, 0x00,
- 0x1C, 0x00,
- 0x1D, 0x00,
- 0x1E, 0x00,
- 0x1F, 0x3A,
- 0x20, 0x2E,
- 0x21, 0x80,
- 0x22, 0xFF,
- 0x23, 0xC1,
- 0x28, 0x00,
- 0x29, 0x1E,
- 0x2A, 0x14,
- 0x2B, 0x0F,
- 0x2C, 0x09,
- 0x2D, 0x05,
- 0x31, 0x1F,
- 0x32, 0x19,
- 0x33, 0xFE,
- 0x34, 0x93,
- 0xff, 0xff,
+ 0x01, 0x15,
+ 0x02, 0x30,
+ 0x03, 0x00,
+ 0x04, 0x7D,
+ 0x05, 0x35,
+ 0x06, 0x02,
+ 0x07, 0x00,
+ 0x08, 0xC3,
+ 0x0C, 0x00,
+ 0x0D, 0x81,
+ 0x0E, 0x23,
+ 0x0F, 0x12,
+ 0x10, 0x7E,
+ 0x11, 0x84,
+ 0x12, 0xB9,
+ 0x13, 0x88,
+ 0x14, 0x89,
+ 0x15, 0xC9,
+ 0x16, 0x00,
+ 0x17, 0x5C,
+ 0x18, 0x00,
+ 0x19, 0x00,
+ 0x1A, 0x00,
+ 0x1C, 0x00,
+ 0x1D, 0x00,
+ 0x1E, 0x00,
+ 0x1F, 0x3A,
+ 0x20, 0x2E,
+ 0x21, 0x80,
+ 0x22, 0xFF,
+ 0x23, 0xC1,
+ 0x28, 0x00,
+ 0x29, 0x1E,
+ 0x2A, 0x14,
+ 0x2B, 0x0F,
+ 0x2C, 0x09,
+ 0x2D, 0x05,
+ 0x31, 0x1F,
+ 0x32, 0x19,
+ 0x33, 0xFE,
+ 0x34, 0x93,
+ 0xff, 0xff,
};
static struct stv0299_config samsung_tbmu24112_config = {
@@ -259,27 +307,155 @@ static struct stv0299_config samsung_tbmu24112_config = {
.set_symbol_rate = samsung_tbmu24112_set_symbol_rate,
};
-/* dvb-t mt352 */
-static int samsung_tdtc9251dh0_demod_init(struct dvb_frontend* fe)
+static int skystar2_rev26_attach(struct flexcop_device *fc,
+ struct i2c_adapter *i2c)
+{
+ fc->fe = dvb_attach(stv0299_attach, &samsung_tbmu24112_config, i2c);
+ if (fc->fe != NULL) {
+ struct dvb_frontend_ops *ops = &fc->fe->ops;
+ ops->tuner_ops.set_params = samsung_tbmu24112_tuner_set_params;
+ ops->set_voltage = flexcop_set_voltage;
+ fc->fe_sleep = ops->sleep;
+ ops->sleep = flexcop_sleep;
+ return 1;
+ }
+ return 0;
+}
+#endif
+
+/* SkyStar2 DVB-S rev 2.7 */
+#if defined(CONFIG_DVB_S5H1420_MODULE)
+static struct s5h1420_config skystar2_rev2_7_s5h1420_config = {
+ .demod_address = 0x53,
+ .invert = 1,
+ .repeated_start_workaround = 1,
+ .serial_mpeg = 1,
+};
+
+static struct itd1000_config skystar2_rev2_7_itd1000_config = {
+ .i2c_address = 0x61,
+};
+
+static int skystar2_rev27_attach(struct flexcop_device *fc,
+ struct i2c_adapter *i2c)
+{
+ flexcop_ibi_value r108;
+ struct i2c_adapter *i2c_tuner;
+
+ /* enable no_base_addr - no repeated start when reading */
+ fc->fc_i2c_adap[0].no_base_addr = 1;
+ fc->fe = dvb_attach(s5h1420_attach, &skystar2_rev2_7_s5h1420_config,
+ i2c);
+ if (!fc->fe)
+ goto fail;
+
+ i2c_tuner = s5h1420_get_tuner_i2c_adapter(fc->fe);
+ if (!i2c_tuner)
+ goto fail;
+
+ fc->fe_sleep = fc->fe->ops.sleep;
+ fc->fe->ops.sleep = flexcop_sleep;
+
+ /* enable no_base_addr - no repeated start when reading */
+ fc->fc_i2c_adap[2].no_base_addr = 1;
+ if (!dvb_attach(isl6421_attach, fc->fe, &fc->fc_i2c_adap[2].i2c_adap,
+ 0x08, 1, 1)) {
+ err("ISL6421 could NOT be attached");
+ goto fail_isl;
+ }
+ info("ISL6421 successfully attached");
+
+ /* the ITD1000 requires a lower i2c clock - is it a problem ? */
+ r108.raw = 0x00000506;
+ fc->write_ibi_reg(fc, tw_sm_c_108, r108);
+ if (!dvb_attach(itd1000_attach, fc->fe, i2c_tuner,
+ &skystar2_rev2_7_itd1000_config)) {
+ err("ITD1000 could NOT be attached");
+ /* Should i2c clock be restored? */
+ goto fail_isl;
+ }
+ info("ITD1000 successfully attached");
+
+ return 1;
+
+fail_isl:
+ fc->fc_i2c_adap[2].no_base_addr = 0;
+fail:
+ /* for the next devices we need it again */
+ fc->fc_i2c_adap[0].no_base_addr = 0;
+ return 0;
+}
+#endif
+
+/* SkyStar2 rev 2.8 */
+#if defined(CONFIG_DVB_CX24123_MODULE)
+static struct cx24123_config skystar2_rev2_8_cx24123_config = {
+ .demod_address = 0x55,
+ .dont_use_pll = 1,
+ .agc_callback = cx24113_agc_callback,
+};
+
+static const struct cx24113_config skystar2_rev2_8_cx24113_config = {
+ .i2c_addr = 0x54,
+ .xtal_khz = 10111,
+};
+
+static int skystar2_rev28_attach(struct flexcop_device *fc,
+ struct i2c_adapter *i2c)
+{
+ struct i2c_adapter *i2c_tuner;
+
+ fc->fe = dvb_attach(cx24123_attach, &skystar2_rev2_8_cx24123_config,
+ i2c);
+ if (!fc->fe)
+ return 0;
+
+ i2c_tuner = cx24123_get_tuner_i2c_adapter(fc->fe);;
+ if (!i2c_tuner)
+ return 0;
+
+ if (!dvb_attach(cx24113_attach, fc->fe, &skystar2_rev2_8_cx24113_config,
+ i2c_tuner)) {
+ err("CX24113 could NOT be attached");
+ return 0;
+ }
+ info("CX24113 successfully attached");
+
+ fc->fc_i2c_adap[2].no_base_addr = 1;
+ if (!dvb_attach(isl6421_attach, fc->fe, &fc->fc_i2c_adap[2].i2c_adap,
+ 0x08, 0, 0)) {
+ err("ISL6421 could NOT be attached");
+ fc->fc_i2c_adap[2].no_base_addr = 0;
+ return 0;
+ }
+ info("ISL6421 successfully attached");
+ /* TODO on i2c_adap[1] addr 0x11 (EEPROM) there seems to be an
+ * IR-receiver (PIC16F818) - but the card has no input for that ??? */
+ return 1;
+}
+#endif
+
+/* AirStar DVB-T */
+#if defined(CONFIG_DVB_MT352_MODULE)
+static int samsung_tdtc9251dh0_demod_init(struct dvb_frontend *fe)
{
- static u8 mt352_clock_config [] = { 0x89, 0x18, 0x2d };
- static u8 mt352_reset [] = { 0x50, 0x80 };
- static u8 mt352_adc_ctl_1_cfg [] = { 0x8E, 0x40 };
- static u8 mt352_agc_cfg [] = { 0x67, 0x28, 0xa1 };
+ static u8 mt352_clock_config[] = { 0x89, 0x18, 0x2d };
+ static u8 mt352_reset[] = { 0x50, 0x80 };
+ static u8 mt352_adc_ctl_1_cfg[] = { 0x8E, 0x40 };
+ static u8 mt352_agc_cfg[] = { 0x67, 0x28, 0xa1 };
static u8 mt352_capt_range_cfg[] = { 0x75, 0x32 };
mt352_write(fe, mt352_clock_config, sizeof(mt352_clock_config));
udelay(2000);
mt352_write(fe, mt352_reset, sizeof(mt352_reset));
mt352_write(fe, mt352_adc_ctl_1_cfg, sizeof(mt352_adc_ctl_1_cfg));
-
mt352_write(fe, mt352_agc_cfg, sizeof(mt352_agc_cfg));
mt352_write(fe, mt352_capt_range_cfg, sizeof(mt352_capt_range_cfg));
-
return 0;
}
-static int samsung_tdtc9251dh0_calc_regs(struct dvb_frontend* fe, struct dvb_frontend_parameters *params, u8* pllbuf, int buf_len)
+static int samsung_tdtc9251dh0_calc_regs(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *params, u8* pllbuf, int buf_len)
{
u32 div;
unsigned char bs = 0;
@@ -287,19 +463,20 @@ static int samsung_tdtc9251dh0_calc_regs(struct dvb_frontend* fe, struct dvb_fro
if (buf_len < 5)
return -EINVAL;
- #define IF_FREQUENCYx6 217 /* 6 * 36.16666666667MHz */
+#define IF_FREQUENCYx6 217 /* 6 * 36.16666666667MHz */
div = (((params->frequency + 83333) * 3) / 500000) + IF_FREQUENCYx6;
-
- if (params->frequency >= 48000000 && params->frequency <= 154000000) bs = 0x09;
- if (params->frequency >= 161000000 && params->frequency <= 439000000) bs = 0x0a;
- if (params->frequency >= 447000000 && params->frequency <= 863000000) bs = 0x08;
+ if (params->frequency >= 48000000 && params->frequency <= 154000000) \
+ bs = 0x09;
+ if (params->frequency >= 161000000 && params->frequency <= 439000000) \
+ bs = 0x0a;
+ if (params->frequency >= 447000000 && params->frequency <= 863000000) \
+ bs = 0x08;
pllbuf[0] = 0x61;
pllbuf[1] = div >> 8;
pllbuf[2] = div & 0xff;
pllbuf[3] = 0xcc;
pllbuf[4] = bs;
-
return 5;
}
@@ -308,70 +485,95 @@ static struct mt352_config samsung_tdtc9251dh0_config = {
.demod_init = samsung_tdtc9251dh0_demod_init,
};
-static int flexcop_fe_request_firmware(struct dvb_frontend* fe, const struct firmware **fw, char* name)
+static int airstar_dvbt_attach(struct flexcop_device *fc,
+ struct i2c_adapter *i2c)
+{
+ fc->fe = dvb_attach(mt352_attach, &samsung_tdtc9251dh0_config, i2c);
+ if (fc->fe != NULL) {
+ fc->fe->ops.tuner_ops.calc_regs = samsung_tdtc9251dh0_calc_regs;
+ return 1;
+ }
+ return 0;
+}
+#endif
+
+/* AirStar ATSC 1st generation */
+#if defined(CONFIG_DVB_BCM3510_MODULE)
+static int flexcop_fe_request_firmware(struct dvb_frontend *fe,
+ const struct firmware **fw, char* name)
{
struct flexcop_device *fc = fe->dvb->priv;
return request_firmware(fw, name, fc->dev);
}
-static struct lgdt330x_config air2pc_atsc_hd5000_config = {
- .demod_address = 0x59,
- .demod_chip = LGDT3303,
- .serial_mpeg = 0x04,
- .clock_polarity_flip = 1,
-};
-
-static struct nxt200x_config samsung_tbmv_config = {
- .demod_address = 0x0a,
-};
-
static struct bcm3510_config air2pc_atsc_first_gen_config = {
.demod_address = 0x0f,
.request_firmware = flexcop_fe_request_firmware,
};
-static int skystar23_samsung_tbdu18132_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters *params)
+static int airstar_atsc1_attach(struct flexcop_device *fc,
+ struct i2c_adapter *i2c)
{
- u8 buf[4];
- u32 div;
- struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = buf, .len = sizeof(buf) };
- struct flexcop_device *fc = fe->dvb->priv;
-
- div = (params->frequency + (125/2)) / 125;
+ fc->fe = dvb_attach(bcm3510_attach, &air2pc_atsc_first_gen_config, i2c);
+ return fc->fe != NULL;
+}
+#endif
- buf[0] = (div >> 8) & 0x7f;
- buf[1] = (div >> 0) & 0xff;
- buf[2] = 0x84 | ((div >> 10) & 0x60);
- buf[3] = 0x80;
+/* AirStar ATSC 2nd generation */
+#if defined(CONFIG_DVB_NXT200X_MODULE)
+static struct nxt200x_config samsung_tbmv_config = {
+ .demod_address = 0x0a,
+};
- if (params->frequency < 1550000)
- buf[3] |= 0x02;
+static int airstar_atsc2_attach(struct flexcop_device *fc,
+ struct i2c_adapter *i2c)
+{
+ fc->fe = dvb_attach(nxt200x_attach, &samsung_tbmv_config, i2c);
+ if (!fc->fe)
+ return 0;
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 1);
- if (i2c_transfer(&fc->fc_i2c_adap[0].i2c_adap, &msg, 1) != 1)
- return -EIO;
- return 0;
+ return !!dvb_attach(dvb_pll_attach, fc->fe, 0x61, NULL,
+ DVB_PLL_SAMSUNG_TBMV);
}
+#endif
-static struct mt312_config skystar23_samsung_tbdu18132_config = {
-
- .demod_address = 0x0e,
+/* AirStar ATSC 3rd generation */
+#if defined(CONFIG_DVB_LGDT330X_MODULE)
+static struct lgdt330x_config air2pc_atsc_hd5000_config = {
+ .demod_address = 0x59,
+ .demod_chip = LGDT3303,
+ .serial_mpeg = 0x04,
+ .clock_polarity_flip = 1,
};
+static int airstar_atsc3_attach(struct flexcop_device *fc,
+ struct i2c_adapter *i2c)
+{
+ fc->fe = dvb_attach(lgdt330x_attach, &air2pc_atsc_hd5000_config, i2c);
+ if (!fc->fe)
+ return 0;
+
+ return !!dvb_attach(simple_tuner_attach, fc->fe, i2c, 0x61,
+ TUNER_LG_TDVS_H06XF);
+}
+#endif
+
+/* CableStar2 DVB-C */
+#if defined(CONFIG_DVB_STV0297_MODULE)
static int alps_tdee4_stv0297_tuner_set_params(struct dvb_frontend* fe,
- struct dvb_frontend_parameters *fep)
+ struct dvb_frontend_parameters *fep)
{
struct flexcop_device *fc = fe->dvb->priv;
u8 buf[4];
u16 div;
int ret;
-/* 62.5 kHz * 10 */
+/* 62.5 kHz * 10 */
#define REF_FREQ 625
#define FREQ_OFFSET 36125
- div = ((fep->frequency/1000 + FREQ_OFFSET ) * 10) / REF_FREQ; // 4 MHz = 4000 KHz
+ div = ((fep->frequency/1000 + FREQ_OFFSET) * 10) / REF_FREQ;
+/* 4 MHz = 4000 KHz */
buf[0] = (u8)( div >> 8) & 0x7f;
buf[1] = (u8) div & 0xff;
@@ -384,11 +586,11 @@ static int alps_tdee4_stv0297_tuner_set_params(struct dvb_frontend* fe,
* AGD = 1, R3 R2 R1 R0 = 0 1 0 1 => byte 4 = 1**10101 = 0x95 */
buf[2] = 0x95;
-// Range(MHz) C1 * RE RTS BS4 BS3 BS2 BS1 Byte 5
-// 47 - 153 0 * 0 0 0 0 0 1 0x01
-// 153 - 430 0 * 0 0 0 0 1 0 0x02
-// 430 - 822 0 * 0 0 1 0 0 0 0x08
-// 822 - 862 1 * 0 0 1 0 0 0 0x88
+/* Range(MHz) C1 * RE RTS BS4 BS3 BS2 BS1 Byte 5
+ * 47 - 153 0 * 0 0 0 0 0 1 0x01
+ * 153 - 430 0 * 0 0 0 0 1 0 0x02
+ * 430 - 822 0 * 0 0 1 0 0 0 0x08
+ * 822 - 862 1 * 0 0 1 0 0 0 0x88 */
if (fep->frequency <= 153000000) buf[3] = 0x01;
else if (fep->frequency <= 430000000) buf[3] = 0x02;
@@ -397,11 +599,11 @@ static int alps_tdee4_stv0297_tuner_set_params(struct dvb_frontend* fe,
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
- deb_tuner("tuner buffer for %d Hz: %x %x %x %x\n",fep->frequency, buf[0],buf[1],buf[2],buf[3]);
+ deb_tuner("tuner buffer for %d Hz: %x %x %x %x\n", fep->frequency,
+ buf[0], buf[1], buf[2], buf[3]);
ret = fc->i2c_request(&fc->fc_i2c_adap[2],
- FC_WRITE, 0x61, buf[0], &buf[1], 3);
+ FC_WRITE, 0x61, buf[0], &buf[1], 3);
deb_tuner("tuner write returned: %d\n",ret);
-
return ret;
}
@@ -481,182 +683,73 @@ static u8 alps_tdee4_stv0297_inittab[] = {
static struct stv0297_config alps_tdee4_stv0297_config = {
.demod_address = 0x1c,
.inittab = alps_tdee4_stv0297_inittab,
-// .invert = 1,
-// .pll_set = alps_tdee4_stv0297_pll_set,
-};
-
-
-/* SkyStar2 rev2.7 (a/u) */
-static struct s5h1420_config skystar2_rev2_7_s5h1420_config = {
- .demod_address = 0x53,
- .invert = 1,
- .repeated_start_workaround = 1,
- .serial_mpeg = 1,
-};
-
-static struct itd1000_config skystar2_rev2_7_itd1000_config = {
- .i2c_address = 0x61,
};
-/* SkyStar2 rev2.8 */
-static struct cx24123_config skystar2_rev2_8_cx24123_config = {
- .demod_address = 0x55,
- .dont_use_pll = 1,
- .agc_callback = cx24113_agc_callback,
-};
-
-static const struct cx24113_config skystar2_rev2_8_cx24113_config = {
- .i2c_addr = 0x54,
- .xtal_khz = 10111,
-};
-
-/* try to figure out the frontend, each card/box can have on of the following list */
-int flexcop_frontend_init(struct flexcop_device *fc)
+static int cablestar2_attach(struct flexcop_device *fc,
+ struct i2c_adapter *i2c)
{
- struct dvb_frontend_ops *ops;
- struct i2c_adapter *i2c = &fc->fc_i2c_adap[0].i2c_adap;
- struct i2c_adapter *i2c_tuner;
-
- /* enable no_base_addr - no repeated start when reading */
- fc->fc_i2c_adap[0].no_base_addr = 1;
- fc->fe = dvb_attach(s5h1420_attach, &skystar2_rev2_7_s5h1420_config, i2c);
- if (fc->fe != NULL) {
- flexcop_ibi_value r108;
- i2c_tuner = s5h1420_get_tuner_i2c_adapter(fc->fe);
- ops = &fc->fe->ops;
-
- fc->fe_sleep = ops->sleep;
- ops->sleep = flexcop_sleep;
-
- fc->dev_type = FC_SKY_REV27;
-
- /* enable no_base_addr - no repeated start when reading */
- fc->fc_i2c_adap[2].no_base_addr = 1;
- if (dvb_attach(isl6421_attach, fc->fe, &fc->fc_i2c_adap[2].i2c_adap, 0x08, 1, 1) == NULL)
- err("ISL6421 could NOT be attached");
- else
- info("ISL6421 successfully attached");
-
- /* the ITD1000 requires a lower i2c clock - it slows down the stuff for everyone - but is it a problem ? */
- r108.raw = 0x00000506;
- fc->write_ibi_reg(fc, tw_sm_c_108, r108);
- if (i2c_tuner) {
- if (dvb_attach(itd1000_attach, fc->fe, i2c_tuner, &skystar2_rev2_7_itd1000_config) == NULL)
- err("ITD1000 could NOT be attached");
- else
- info("ITD1000 successfully attached");
- }
- goto fe_found;
- }
- fc->fc_i2c_adap[0].no_base_addr = 0; /* for the next devices we need it again */
-
- /* try the sky v2.8 (cx24123, isl6421) */
- fc->fe = dvb_attach(cx24123_attach,
- &skystar2_rev2_8_cx24123_config, i2c);
- if (fc->fe != NULL) {
- i2c_tuner = cx24123_get_tuner_i2c_adapter(fc->fe);
- if (i2c_tuner != NULL) {
- if (dvb_attach(cx24113_attach, fc->fe,
- &skystar2_rev2_8_cx24113_config,
- i2c_tuner) == NULL)
- err("CX24113 could NOT be attached");
- else
- info("CX24113 successfully attached");
- }
-
- fc->dev_type = FC_SKY_REV28;
-
- fc->fc_i2c_adap[2].no_base_addr = 1;
- if (dvb_attach(isl6421_attach, fc->fe,
- &fc->fc_i2c_adap[2].i2c_adap, 0x08, 0, 0) == NULL)
- err("ISL6421 could NOT be attached");
- else
- info("ISL6421 successfully attached");
-
- /* TODO on i2c_adap[1] addr 0x11 (EEPROM) there seems to be an
- * IR-receiver (PIC16F818) - but the card has no input for
- * that ??? */
-
- goto fe_found;
- }
-
- /* try the sky v2.6 (stv0299/Samsung tbmu24112(sl1935)) */
- fc->fe = dvb_attach(stv0299_attach, &samsung_tbmu24112_config, i2c);
- if (fc->fe != NULL) {
- ops = &fc->fe->ops;
-
- ops->tuner_ops.set_params = samsung_tbmu24112_tuner_set_params;
-
- ops->set_voltage = flexcop_set_voltage;
-
- fc->fe_sleep = ops->sleep;
- ops->sleep = flexcop_sleep;
-
- fc->dev_type = FC_SKY_REV26;
- goto fe_found;
- }
-
- /* try the air dvb-t (mt352/Samsung tdtc9251dh0(??)) */
- fc->fe = dvb_attach(mt352_attach, &samsung_tdtc9251dh0_config, i2c);
- if (fc->fe != NULL) {
- fc->dev_type = FC_AIR_DVBT;
- fc->fe->ops.tuner_ops.calc_regs = samsung_tdtc9251dh0_calc_regs;
- goto fe_found;
- }
-
- /* try the air atsc 2nd generation (nxt2002) */
- fc->fe = dvb_attach(nxt200x_attach, &samsung_tbmv_config, i2c);
- if (fc->fe != NULL) {
- fc->dev_type = FC_AIR_ATSC2;
- dvb_attach(dvb_pll_attach, fc->fe, 0x61, NULL, DVB_PLL_SAMSUNG_TBMV);
- goto fe_found;
- }
-
- fc->fe = dvb_attach(lgdt330x_attach, &air2pc_atsc_hd5000_config, i2c);
- if (fc->fe != NULL) {
- fc->dev_type = FC_AIR_ATSC3;
- dvb_attach(simple_tuner_attach, fc->fe, i2c, 0x61,
- TUNER_LG_TDVS_H06XF);
- goto fe_found;
- }
-
- /* try the air atsc 1nd generation (bcm3510)/panasonic ct10s */
- fc->fe = dvb_attach(bcm3510_attach, &air2pc_atsc_first_gen_config, i2c);
- if (fc->fe != NULL) {
- fc->dev_type = FC_AIR_ATSC1;
- goto fe_found;
- }
-
- /* try the cable dvb (stv0297) */
fc->fc_i2c_adap[0].no_base_addr = 1;
fc->fe = dvb_attach(stv0297_attach, &alps_tdee4_stv0297_config, i2c);
- if (fc->fe != NULL) {
- fc->dev_type = FC_CABLE;
- fc->fe->ops.tuner_ops.set_params = alps_tdee4_stv0297_tuner_set_params;
- goto fe_found;
+ if (!fc->fe) {
+ /* Reset for next frontend to try */
+ fc->fc_i2c_adap[0].no_base_addr = 0;
+ return 0;
}
- fc->fc_i2c_adap[0].no_base_addr = 0;
-
- /* try the sky v2.3 (vp310/Samsung tbdu18132(tsa5059)) */
- fc->fe = dvb_attach(mt312_attach,
- &skystar23_samsung_tbdu18132_config, i2c);
- if (fc->fe != NULL) {
- ops = &fc->fe->ops;
-
- ops->tuner_ops.set_params = skystar23_samsung_tbdu18132_tuner_set_params;
-
- ops->diseqc_send_master_cmd = flexcop_diseqc_send_master_cmd;
- ops->diseqc_send_burst = flexcop_diseqc_send_burst;
- ops->set_tone = flexcop_set_tone;
- ops->set_voltage = flexcop_set_voltage;
-
- fc->fe_sleep = ops->sleep;
- ops->sleep = flexcop_sleep;
+ fc->fe->ops.tuner_ops.set_params = alps_tdee4_stv0297_tuner_set_params;
+ return 1;
+}
+#endif
+
+static struct {
+ flexcop_device_type_t type;
+ int (*attach)(struct flexcop_device *, struct i2c_adapter *);
+} flexcop_frontends[] = {
+#if defined(CONFIG_DVB_S5H1420_MODULE)
+ { FC_SKY_REV27, skystar2_rev27_attach },
+#endif
+#if defined(CONFIG_DVB_CX24123_MODULE)
+ { FC_SKY_REV28, skystar2_rev28_attach },
+#endif
+#if defined(CONFIG_DVB_STV0299_MODULE)
+ { FC_SKY_REV26, skystar2_rev26_attach },
+#endif
+#if defined(CONFIG_DVB_MT352_MODULE)
+ { FC_AIR_DVBT, airstar_dvbt_attach },
+#endif
+#if defined(CONFIG_DVB_NXT200X_MODULE)
+ { FC_AIR_ATSC2, airstar_atsc2_attach },
+#endif
+#if defined(CONFIG_DVB_LGDT330X_MODULE)
+ { FC_AIR_ATSC3, airstar_atsc3_attach },
+#endif
+#if defined(CONFIG_DVB_BCM3510_MODULE)
+ { FC_AIR_ATSC1, airstar_atsc1_attach },
+#endif
+#if defined(CONFIG_DVB_STV0297_MODULE)
+ { FC_CABLE, cablestar2_attach },
+#endif
+#if defined(CONFIG_DVB_MT312_MODULE)
+ { FC_SKY_REV23, skystar2_rev23_attach },
+#endif
+};
- fc->dev_type = FC_SKY_REV23;
- goto fe_found;
+/* try to figure out the frontend */
+int flexcop_frontend_init(struct flexcop_device *fc)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(flexcop_frontends); i++) {
+ /* type needs to be set before, because of some workarounds
+ * done based on the probed card type */
+ fc->dev_type = flexcop_frontends[i].type;
+ if (flexcop_frontends[i].attach(fc, &fc->fc_i2c_adap[0].i2c_adap))
+ goto fe_found;
+ /* Clean up partially attached frontend */
+ if (fc->fe) {
+ dvb_frontend_detach(fc->fe);
+ fc->fe = NULL;
+ }
}
-
+ fc->dev_type = FC_UNK;
err("no frontend driver found for this B2C2/FlexCop adapter");
return -ENODEV;
@@ -664,9 +757,7 @@ fe_found:
info("found '%s' .", fc->fe->ops.info.name);
if (dvb_register_frontend(&fc->dvb_adapter, fc->fe)) {
err("frontend registration failed!");
- ops = &fc->fe->ops;
- if (ops->release != NULL)
- ops->release(fc->fe);
+ dvb_frontend_detach(fc->fe);
fc->fe = NULL;
return -EINVAL;
}
@@ -680,6 +771,5 @@ void flexcop_frontend_exit(struct flexcop_device *fc)
dvb_unregister_frontend(fc->fe);
dvb_frontend_detach(fc->fe);
}
-
fc->init_state &= ~FC_STATE_FE_INIT;
}
diff --git a/drivers/media/dvb/b2c2/flexcop-i2c.c b/drivers/media/dvb/b2c2/flexcop-i2c.c
index e2bed507648..fd1df235276 100644
--- a/drivers/media/dvb/b2c2/flexcop-i2c.c
+++ b/drivers/media/dvb/b2c2/flexcop-i2c.c
@@ -200,7 +200,7 @@ static int flexcop_master_xfer(struct i2c_adapter *i2c_adap,
msgs[i].buf[0], &msgs[i].buf[1],
msgs[i].len - 1);
if (ret < 0) {
- err("i2c master_xfer failed");
+ deb_i2c("i2c master_xfer failed");
break;
}
}
diff --git a/drivers/media/dvb/b2c2/flexcop-misc.c b/drivers/media/dvb/b2c2/flexcop-misc.c
index e56627d2f0f..f06f3a9070f 100644
--- a/drivers/media/dvb/b2c2/flexcop-misc.c
+++ b/drivers/media/dvb/b2c2/flexcop-misc.c
@@ -46,16 +46,16 @@ static const char *flexcop_revision_names[] = {
};
static const char *flexcop_device_names[] = {
- "Unknown device",
- "Air2PC/AirStar 2 DVB-T",
- "Air2PC/AirStar 2 ATSC 1st generation",
- "Air2PC/AirStar 2 ATSC 2nd generation",
- "Sky2PC/SkyStar 2 DVB-S",
- "Sky2PC/SkyStar 2 DVB-S (old version)",
- "Cable2PC/CableStar 2 DVB-C",
- "Air2PC/AirStar 2 ATSC 3rd generation (HD5000)",
- "Sky2PC/SkyStar 2 DVB-S rev 2.7a/u",
- "Sky2PC/SkyStar 2 DVB-S rev 2.8",
+ [FC_UNK] = "Unknown device",
+ [FC_CABLE] = "Cable2PC/CableStar 2 DVB-C",
+ [FC_AIR_DVBT] = "Air2PC/AirStar 2 DVB-T",
+ [FC_AIR_ATSC1] = "Air2PC/AirStar 2 ATSC 1st generation",
+ [FC_AIR_ATSC2] = "Air2PC/AirStar 2 ATSC 2nd generation",
+ [FC_AIR_ATSC3] = "Air2PC/AirStar 2 ATSC 3rd generation (HD5000)",
+ [FC_SKY_REV23] = "Sky2PC/SkyStar 2 DVB-S rev 2.3 (old version)",
+ [FC_SKY_REV26] = "Sky2PC/SkyStar 2 DVB-S rev 2.6",
+ [FC_SKY_REV27] = "Sky2PC/SkyStar 2 DVB-S rev 2.7a/u",
+ [FC_SKY_REV28] = "Sky2PC/SkyStar 2 DVB-S rev 2.8",
};
static const char *flexcop_bus_names[] = {
diff --git a/drivers/media/dvb/bt8xx/bt878.c b/drivers/media/dvb/bt8xx/bt878.c
index 56d8fab688b..a24c125331f 100644
--- a/drivers/media/dvb/bt8xx/bt878.c
+++ b/drivers/media/dvb/bt8xx/bt878.c
@@ -508,12 +508,6 @@ static int __devinit bt878_probe(struct pci_dev *dev,
pci_set_master(dev);
pci_set_drvdata(dev, bt);
-/* if(init_bt878(btv) < 0) {
- bt878_remove(dev);
- return -EIO;
- }
-*/
-
if ((result = bt878_mem_alloc(bt))) {
printk(KERN_ERR "bt878: failed to allocate memory!\n");
goto fail2;
@@ -579,7 +573,7 @@ static struct pci_driver bt878_pci_driver = {
.name = "bt878",
.id_table = bt878_pci_tbl,
.probe = bt878_probe,
- .remove = bt878_remove,
+ .remove = __devexit_p(bt878_remove),
};
static int bt878_pci_driver_registered;
diff --git a/drivers/media/dvb/dm1105/dm1105.c b/drivers/media/dvb/dm1105/dm1105.c
index 971a8b18f6d..4dbd7d4185a 100644
--- a/drivers/media/dvb/dm1105/dm1105.c
+++ b/drivers/media/dvb/dm1105/dm1105.c
@@ -51,6 +51,9 @@
#ifndef PCI_VENDOR_ID_TRIGEM
#define PCI_VENDOR_ID_TRIGEM 0x109f
#endif
+#ifndef PCI_VENDOR_ID_AXESS
+#define PCI_VENDOR_ID_AXESS 0x195d
+#endif
#ifndef PCI_DEVICE_ID_DM1105
#define PCI_DEVICE_ID_DM1105 0x036f
#endif
@@ -60,6 +63,9 @@
#ifndef PCI_DEVICE_ID_DW2004
#define PCI_DEVICE_ID_DW2004 0x2004
#endif
+#ifndef PCI_DEVICE_ID_DM05
+#define PCI_DEVICE_ID_DM05 0x1105
+#endif
/* ----------------------------------------------- */
/* sdmc dm1105 registers */
@@ -150,6 +156,11 @@
#define DM1105_LNB_13V 0x00010100
#define DM1105_LNB_18V 0x00000100
+/* GPIO's for LNB power control for Axess DM05 */
+#define DM05_LNB_MASK 0x00000000
+#define DM05_LNB_13V 0x00020000
+#define DM05_LNB_18V 0x00030000
+
static int ir_debug;
module_param(ir_debug, int, 0644);
MODULE_PARM_DESC(ir_debug, "enable debugging information for IR decoding");
@@ -188,6 +199,8 @@ struct dm1105dvb {
/* irq */
struct work_struct work;
+ struct workqueue_struct *wq;
+ char wqn[16];
/* dma */
dma_addr_t dma_addr;
@@ -313,15 +326,25 @@ static inline struct dm1105dvb *frontend_to_dm1105dvb(struct dvb_frontend *fe)
static int dm1105dvb_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
{
struct dm1105dvb *dm1105dvb = frontend_to_dm1105dvb(fe);
+ u32 lnb_mask, lnb_13v, lnb_18v;
- if (voltage == SEC_VOLTAGE_18) {
- outl(DM1105_LNB_MASK, dm_io_mem(DM1105_GPIOCTR));
- outl(DM1105_LNB_18V, dm_io_mem(DM1105_GPIOVAL));
- } else {
- /*LNB ON-13V by default!*/
- outl(DM1105_LNB_MASK, dm_io_mem(DM1105_GPIOCTR));
- outl(DM1105_LNB_13V, dm_io_mem(DM1105_GPIOVAL));
- }
+ switch (dm1105dvb->pdev->subsystem_device) {
+ case PCI_DEVICE_ID_DM05:
+ lnb_mask = DM05_LNB_MASK;
+ lnb_13v = DM05_LNB_13V;
+ lnb_18v = DM05_LNB_18V;
+ break;
+ default:
+ lnb_mask = DM1105_LNB_MASK;
+ lnb_13v = DM1105_LNB_13V;
+ lnb_18v = DM1105_LNB_18V;
+ }
+
+ outl(lnb_mask, dm_io_mem(DM1105_GPIOCTR));
+ if (voltage == SEC_VOLTAGE_18)
+ outl(lnb_18v , dm_io_mem(DM1105_GPIOVAL));
+ else
+ outl(lnb_13v, dm_io_mem(DM1105_GPIOVAL));
return 0;
}
@@ -440,7 +463,7 @@ static irqreturn_t dm1105dvb_irq(int irq, void *dev_id)
case (INTSTS_TSIRQ | INTSTS_IR):
dm1105dvb->nextwrp = inl(dm_io_mem(DM1105_WRP)) -
inl(dm_io_mem(DM1105_STADR));
- schedule_work(&dm1105dvb->work);
+ queue_work(dm1105dvb->wq, &dm1105dvb->work);
break;
case INTSTS_IR:
dm1105dvb->ir.ir_command = inl(dm_io_mem(DM1105_IRCODE));
@@ -567,46 +590,44 @@ static int __devinit frontend_init(struct dm1105dvb *dm1105dvb)
int ret;
switch (dm1105dvb->pdev->subsystem_device) {
- case PCI_DEVICE_ID_DW2002:
+ case PCI_DEVICE_ID_DW2004:
dm1105dvb->fe = dvb_attach(
- stv0299_attach, &sharp_z0194a_config,
+ cx24116_attach, &serit_sp2633_config,
&dm1105dvb->i2c_adap);
+ if (dm1105dvb->fe)
+ dm1105dvb->fe->ops.set_voltage = dm1105dvb_set_voltage;
+ break;
+ default:
+ dm1105dvb->fe = dvb_attach(
+ stv0299_attach, &sharp_z0194a_config,
+ &dm1105dvb->i2c_adap);
if (dm1105dvb->fe) {
dm1105dvb->fe->ops.set_voltage =
dm1105dvb_set_voltage;
dvb_attach(dvb_pll_attach, dm1105dvb->fe, 0x60,
&dm1105dvb->i2c_adap, DVB_PLL_OPERA1);
+ break;
}
- if (!dm1105dvb->fe) {
- dm1105dvb->fe = dvb_attach(
- stv0288_attach, &earda_config,
- &dm1105dvb->i2c_adap);
- if (dm1105dvb->fe) {
- dm1105dvb->fe->ops.set_voltage =
- dm1105dvb_set_voltage;
- dvb_attach(stb6000_attach, dm1105dvb->fe, 0x61,
- &dm1105dvb->i2c_adap);
- }
+ dm1105dvb->fe = dvb_attach(
+ stv0288_attach, &earda_config,
+ &dm1105dvb->i2c_adap);
+ if (dm1105dvb->fe) {
+ dm1105dvb->fe->ops.set_voltage =
+ dm1105dvb_set_voltage;
+ dvb_attach(stb6000_attach, dm1105dvb->fe, 0x61,
+ &dm1105dvb->i2c_adap);
+ break;
}
- if (!dm1105dvb->fe) {
- dm1105dvb->fe = dvb_attach(
- si21xx_attach, &serit_config,
- &dm1105dvb->i2c_adap);
- if (dm1105dvb->fe)
- dm1105dvb->fe->ops.set_voltage =
- dm1105dvb_set_voltage;
- }
- break;
- case PCI_DEVICE_ID_DW2004:
dm1105dvb->fe = dvb_attach(
- cx24116_attach, &serit_sp2633_config,
+ si21xx_attach, &serit_config,
&dm1105dvb->i2c_adap);
if (dm1105dvb->fe)
- dm1105dvb->fe->ops.set_voltage = dm1105dvb_set_voltage;
- break;
+ dm1105dvb->fe->ops.set_voltage =
+ dm1105dvb_set_voltage;
+
}
if (!dm1105dvb->fe) {
@@ -630,10 +651,17 @@ static void __devinit dm1105dvb_read_mac(struct dm1105dvb *dm1105dvb, u8 *mac)
static u8 command[1] = { 0x28 };
struct i2c_msg msg[] = {
- { .addr = IIC_24C01_addr >> 1, .flags = 0,
- .buf = command, .len = 1 },
- { .addr = IIC_24C01_addr >> 1, .flags = I2C_M_RD,
- .buf = mac, .len = 6 },
+ {
+ .addr = IIC_24C01_addr >> 1,
+ .flags = 0,
+ .buf = command,
+ .len = 1
+ }, {
+ .addr = IIC_24C01_addr >> 1,
+ .flags = I2C_M_RD,
+ .buf = mac,
+ .len = 6
+ },
};
dm1105_i2c_xfer(&dm1105dvb->i2c_adap, msg , 2);
@@ -752,14 +780,22 @@ static int __devinit dm1105_probe(struct pci_dev *pdev,
dm1105_ir_init(dm1105dvb);
INIT_WORK(&dm1105dvb->work, dm1105_dmx_buffer);
+ sprintf(dm1105dvb->wqn, "%s/%d", dvb_adapter->name, dvb_adapter->num);
+ dm1105dvb->wq = create_singlethread_workqueue(dm1105dvb->wqn);
+ if (!dm1105dvb->wq)
+ goto err_dvb_net;
ret = request_irq(pdev->irq, dm1105dvb_irq, IRQF_SHARED,
DRIVER_NAME, dm1105dvb);
if (ret < 0)
- goto err_free_irq;
+ goto err_workqueue;
return 0;
+err_workqueue:
+ destroy_workqueue(dm1105dvb->wq);
+err_dvb_net:
+ dvb_net_release(&dm1105dvb->dvbnet);
err_disconnect_frontend:
dmx->disconnect_frontend(dmx);
err_remove_mem_frontend:
@@ -776,8 +812,6 @@ err_i2c_del_adapter:
i2c_del_adapter(&dm1105dvb->i2c_adap);
err_dm1105dvb_hw_exit:
dm1105dvb_hw_exit(dm1105dvb);
-err_free_irq:
- free_irq(pdev->irq, dm1105dvb);
err_pci_iounmap:
pci_iounmap(pdev, dm1105dvb->io_mem);
err_pci_release_regions:
@@ -834,6 +868,11 @@ static struct pci_device_id dm1105_id_table[] __devinitdata = {
.subvendor = PCI_ANY_ID,
.subdevice = PCI_DEVICE_ID_DW2004,
}, {
+ .vendor = PCI_VENDOR_ID_AXESS,
+ .device = PCI_DEVICE_ID_DM05,
+ .subvendor = PCI_VENDOR_ID_AXESS,
+ .subdevice = PCI_DEVICE_ID_DM05,
+ }, {
/* empty */
},
};
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index c35fbb8d8f4..6d6121eb5d5 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -244,19 +244,13 @@ static ssize_t dvb_dvr_read(struct file *file, char __user *buf, size_t count,
{
struct dvb_device *dvbdev = file->private_data;
struct dmxdev *dmxdev = dvbdev->priv;
- int ret;
- if (dmxdev->exit) {
- mutex_unlock(&dmxdev->mutex);
+ if (dmxdev->exit)
return -ENODEV;
- }
- //mutex_lock(&dmxdev->mutex);
- ret = dvb_dmxdev_buffer_read(&dmxdev->dvr_buffer,
- file->f_flags & O_NONBLOCK,
- buf, count, ppos);
- //mutex_unlock(&dmxdev->mutex);
- return ret;
+ return dvb_dmxdev_buffer_read(&dmxdev->dvr_buffer,
+ file->f_flags & O_NONBLOCK,
+ buf, count, ppos);
}
static int dvb_dvr_set_buffer_size(struct dmxdev *dmxdev,
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index e2eca0b1fe7..cfe2768d24a 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -38,6 +38,16 @@
*/
// #define DVB_DEMUX_SECTION_LOSS_LOG
+static int dvb_demux_tscheck;
+module_param(dvb_demux_tscheck, int, 0644);
+MODULE_PARM_DESC(dvb_demux_tscheck,
+ "enable transport stream continuity and TEI check");
+
+#define dprintk_tscheck(x...) do { \
+ if (dvb_demux_tscheck && printk_ratelimit()) \
+ printk(x); \
+ } while (0)
+
/******************************************************************************
* static inlined helper functions
******************************************************************************/
@@ -376,6 +386,36 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
u16 pid = ts_pid(buf);
int dvr_done = 0;
+ if (dvb_demux_tscheck) {
+ if (!demux->cnt_storage)
+ demux->cnt_storage = vmalloc(MAX_PID + 1);
+
+ if (!demux->cnt_storage) {
+ printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n");
+ dvb_demux_tscheck = 0;
+ goto no_dvb_demux_tscheck;
+ }
+
+ /* check pkt counter */
+ if (pid < MAX_PID) {
+ if (buf[1] & 0x80)
+ dprintk_tscheck("TEI detected. "
+ "PID=0x%x data1=0x%x\n",
+ pid, buf[1]);
+
+ if ((buf[3] & 0xf) != demux->cnt_storage[pid])
+ dprintk_tscheck("TS packet counter mismatch. "
+ "PID=0x%x expected 0x%x "
+ "got 0x%x\n",
+ pid, demux->cnt_storage[pid],
+ buf[3] & 0xf);
+
+ demux->cnt_storage[pid] = ((buf[3] & 0xf) + 1)&0xf;
+ };
+ /* end check */
+ };
+no_dvb_demux_tscheck:
+
list_for_each_entry(feed, &demux->feed_list, list_head) {
if ((feed->pid != pid) && (feed->pid != 0x2000))
continue;
@@ -1160,6 +1200,7 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux)
int i;
struct dmx_demux *dmx = &dvbdemux->dmx;
+ dvbdemux->cnt_storage = NULL;
dvbdemux->users = 0;
dvbdemux->filter = vmalloc(dvbdemux->filternum * sizeof(struct dvb_demux_filter));
@@ -1226,6 +1267,7 @@ EXPORT_SYMBOL(dvb_dmx_init);
void dvb_dmx_release(struct dvb_demux *dvbdemux)
{
+ vfree(dvbdemux->cnt_storage);
vfree(dvbdemux->filter);
vfree(dvbdemux->feed);
}
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
index 2c5f915329c..2fe05d03240 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.h
+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
@@ -42,6 +42,8 @@
#define DVB_DEMUX_MASK_MAX 18
+#define MAX_PID 0x1fff
+
struct dvb_demux_filter {
struct dmx_section_filter filter;
u8 maskandmode[DMX_MAX_FILTER_SIZE];
@@ -127,6 +129,8 @@ struct dvb_demux {
struct mutex mutex;
spinlock_t lock;
+
+ uint8_t *cnt_storage; /* for TS continuity check */
};
int dvb_dmx_init(struct dvb_demux *dvbdemux);
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index ebc78157b9b..f50ca7292a7 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -543,6 +543,7 @@ restart:
if (kthread_should_stop() || dvb_frontend_is_exiting(fe)) {
/* got signal or quitting */
+ fepriv->exit = 1;
break;
}
@@ -656,6 +657,7 @@ restart:
}
fepriv->thread = NULL;
+ fepriv->exit = 0;
mb();
dvb_frontend_wakeup(fe);
diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
index a454ee8f1e4..479dd05762a 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.c
+++ b/drivers/media/dvb/dvb-core/dvbdev.c
@@ -447,6 +447,15 @@ static int dvb_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
+static char *dvb_nodename(struct device *dev)
+{
+ struct dvb_device *dvbdev = dev_get_drvdata(dev);
+
+ return kasprintf(GFP_KERNEL, "dvb/adapter%d/%s%d",
+ dvbdev->adapter->num, dnames[dvbdev->type], dvbdev->id);
+}
+
+
static int __init init_dvbdev(void)
{
int retval;
@@ -469,6 +478,7 @@ static int __init init_dvbdev(void)
goto error;
}
dvb_class->dev_uevent = dvb_uevent;
+ dvb_class->nodename = dvb_nodename;
return 0;
error:
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index 60955a70d88..496c1a37034 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -216,7 +216,7 @@ config DVB_USB_TTUSB2
help
Say Y here to support the Pinnacle 400e DVB-S USB2.0 receiver. The
firmware protocol used by this module is similar to the one used by the
- old ttusb-driver - that's why the module is called dvb-usb-ttusb2.ko.
+ old ttusb-driver - that's why the module is called dvb-usb-ttusb2.
config DVB_USB_DTT200U
tristate "WideView WT-200U and WT-220U (pen) DVB-T USB2.0 support (Yakumo/Hama/Typhoon/Yuan)"
@@ -261,6 +261,7 @@ config DVB_USB_DW2102
select DVB_STB6000 if !DVB_FE_CUSTOMISE
select DVB_CX24116 if !DVB_FE_CUSTOMISE
select DVB_SI21XX if !DVB_FE_CUSTOMISE
+ select DVB_TDA10021 if !DVB_FE_CUSTOMISE
help
Say Y here to support the DvbWorld DVB-S/S2 USB2.0 receivers
and the TeVii S650.
diff --git a/drivers/media/dvb/dvb-usb/af9015.c b/drivers/media/dvb/dvb-usb/af9015.c
index 53bfc8e42fb..4cb31e7c13c 100644
--- a/drivers/media/dvb/dvb-usb/af9015.c
+++ b/drivers/media/dvb/dvb-usb/af9015.c
@@ -40,7 +40,7 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
static DEFINE_MUTEX(af9015_usb_mutex);
static struct af9015_config af9015_config;
-static struct dvb_usb_device_properties af9015_properties[2];
+static struct dvb_usb_device_properties af9015_properties[3];
static int af9015_properties_count = ARRAY_SIZE(af9015_properties);
static struct af9013_config af9015_af9013_config[] = {
@@ -538,7 +538,7 @@ exit:
/* dump eeprom */
static int af9015_eeprom_dump(struct dvb_usb_device *d)
{
- char buf[52], buf2[4];
+ char buf[4+3*16+1], buf2[4];
u8 reg, val;
for (reg = 0; ; reg++) {
@@ -1261,7 +1261,11 @@ static struct usb_device_id af9015_usb_table[] = {
{USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U_2)},
{USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U_3)},
{USB_DEVICE(USB_VID_AFATECH, USB_PID_TREKSTOR_DVBT)},
- {USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A850)},
+/* 20 */{USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A850)},
+ {USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A805)},
+ {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_CONCEPTRONIC_CTVDIGRCU)},
+ {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_MC810)},
+ {USB_DEVICE(USB_VID_KYE, USB_PID_GENIUS_TVGO_DVB_T03)},
{0},
};
MODULE_DEVICE_TABLE(usb, af9015_usb_table);
@@ -1321,7 +1325,7 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.i2c_algo = &af9015_i2c_algo,
- .num_device_descs = 9,
+ .num_device_descs = 9, /* max 9 */
.devices = {
{
.name = "Afatech AF9015 DVB-T USB2.0 stick",
@@ -1426,7 +1430,7 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.i2c_algo = &af9015_i2c_algo,
- .num_device_descs = 9,
+ .num_device_descs = 9, /* max 9 */
.devices = {
{
.name = "Xtensions XD-380",
@@ -1478,7 +1482,85 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.warm_ids = {NULL},
},
}
- }
+ }, {
+ .caps = DVB_USB_IS_AN_I2C_ADAPTER,
+
+ .usb_ctrl = DEVICE_SPECIFIC,
+ .download_firmware = af9015_download_firmware,
+ .firmware = "dvb-usb-af9015.fw",
+ .no_reconnect = 1,
+
+ .size_of_priv = sizeof(struct af9015_state), \
+
+ .num_adapters = 2,
+ .adapter = {
+ {
+ .caps = DVB_USB_ADAP_HAS_PID_FILTER |
+ DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+
+ .pid_filter_count = 32,
+ .pid_filter = af9015_pid_filter,
+ .pid_filter_ctrl = af9015_pid_filter_ctrl,
+
+ .frontend_attach =
+ af9015_af9013_frontend_attach,
+ .tuner_attach = af9015_tuner_attach,
+ .stream = {
+ .type = USB_BULK,
+ .count = 6,
+ .endpoint = 0x84,
+ },
+ },
+ {
+ .frontend_attach =
+ af9015_af9013_frontend_attach,
+ .tuner_attach = af9015_tuner_attach,
+ .stream = {
+ .type = USB_BULK,
+ .count = 6,
+ .endpoint = 0x85,
+ .u = {
+ .bulk = {
+ .buffersize =
+ TS_USB20_MAX_PACKET_SIZE,
+ }
+ }
+ },
+ }
+ },
+
+ .identify_state = af9015_identify_state,
+
+ .rc_query = af9015_rc_query,
+ .rc_interval = 150,
+
+ .i2c_algo = &af9015_i2c_algo,
+
+ .num_device_descs = 4, /* max 9 */
+ .devices = {
+ {
+ .name = "AverMedia AVerTV Volar GPS 805 (A805)",
+ .cold_ids = {&af9015_usb_table[21], NULL},
+ .warm_ids = {NULL},
+ },
+ {
+ .name = "Conceptronic USB2.0 DVB-T CTVDIGRCU " \
+ "V3.0",
+ .cold_ids = {&af9015_usb_table[22], NULL},
+ .warm_ids = {NULL},
+ },
+ {
+ .name = "KWorld Digial MC-810",
+ .cold_ids = {&af9015_usb_table[23], NULL},
+ .warm_ids = {NULL},
+ },
+ {
+ .name = "Genius TVGo DVB-T03",
+ .cold_ids = {&af9015_usb_table[24], NULL},
+ .warm_ids = {NULL},
+ },
+ }
+ },
};
static int af9015_usb_probe(struct usb_interface *intf,
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index 8ddbadf6219..818b2ab584b 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -1346,9 +1346,9 @@ static int dib0700_xc5000_tuner_callback(void *priv, int component,
if (command == XC5000_TUNER_RESET) {
/* Reset the tuner */
dib0700_set_gpio(adap->dev, GPIO1, GPIO_OUT, 0);
- msleep(330); /* from Windows USB trace */
+ msleep(10);
dib0700_set_gpio(adap->dev, GPIO1, GPIO_OUT, 1);
- msleep(330); /* from Windows USB trace */
+ msleep(10);
} else {
err("xc5000: unknown tuner callback command: %d\n", command);
return -EINVAL;
@@ -1493,6 +1493,10 @@ struct usb_device_id dib0700_usb_id_table[] = {
{ USB_DEVICE(USB_VID_HAUPPAUGE, USB_PID_HAUPPAUGE_TIGER_ATSC_B210) },
{ USB_DEVICE(USB_VID_YUAN, USB_PID_YUAN_MC770) },
{ USB_DEVICE(USB_VID_ELGATO, USB_PID_ELGATO_EYETV_DTT) },
+/* 50 */{ USB_DEVICE(USB_VID_ELGATO, USB_PID_ELGATO_EYETV_DTT_Dlx) },
+ { USB_DEVICE(USB_VID_LEADTEK, USB_PID_WINFAST_DTV_DONGLE_H) },
+ { USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_T3) },
+ { USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_T5) },
{ 0 } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table);
@@ -1692,7 +1696,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
},
- .num_device_descs = 11,
+ .num_device_descs = 12,
.devices = {
{ "DiBcom STK7070P reference design",
{ &dib0700_usb_id_table[15], NULL },
@@ -1726,8 +1730,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
{ &dib0700_usb_id_table[30], NULL },
{ NULL },
},
- { "Terratec Cinergy T USB XXS",
- { &dib0700_usb_id_table[33], NULL },
+ { "Terratec Cinergy T USB XXS/ T3",
+ { &dib0700_usb_id_table[33],
+ &dib0700_usb_id_table[52], NULL },
{ NULL },
},
{ "Elgato EyeTV DTT",
@@ -1738,6 +1743,10 @@ struct dvb_usb_device_properties dib0700_devices[] = {
{ &dib0700_usb_id_table[45], NULL },
{ NULL },
},
+ { "Elgato EyeTV Dtt Dlx PD378S",
+ { &dib0700_usb_id_table[50], NULL },
+ { NULL },
+ },
},
.rc_interval = DEFAULT_RC_INTERVAL,
@@ -1784,8 +1793,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
{ &dib0700_usb_id_table[36], NULL },
{ NULL },
},
- { "Terratec Cinergy DT USB XS Diversity",
- { &dib0700_usb_id_table[43], NULL },
+ { "Terratec Cinergy DT USB XS Diversity/ T5",
+ { &dib0700_usb_id_table[43],
+ &dib0700_usb_id_table[53], NULL},
{ NULL },
},
{ "Sony PlayTV",
@@ -1812,7 +1822,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
},
- .num_device_descs = 7,
+ .num_device_descs = 8,
.devices = {
{ "Terratec Cinergy HT USB XE",
{ &dib0700_usb_id_table[27], NULL },
@@ -1842,6 +1852,11 @@ struct dvb_usb_device_properties dib0700_devices[] = {
{ &dib0700_usb_id_table[48], NULL },
{ NULL },
},
+ { "Leadtek WinFast DTV Dongle H",
+ { &dib0700_usb_id_table[51], NULL },
+ { NULL },
+ },
+
},
.rc_interval = DEFAULT_RC_INTERVAL,
.rc_key_map = dib0700_rc_keys,
diff --git a/drivers/media/dvb/dvb-usb/dibusb-common.c b/drivers/media/dvb/dvb-usb/dibusb-common.c
index 8ee6cd4da9e..8dbad1ec53c 100644
--- a/drivers/media/dvb/dvb-usb/dibusb-common.c
+++ b/drivers/media/dvb/dvb-usb/dibusb-common.c
@@ -133,14 +133,17 @@ static int dibusb_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num
for (i = 0; i < num; i++) {
/* write/read request */
- if (i+1 < num && (msg[i+1].flags & I2C_M_RD)) {
+ if (i+1 < num && (msg[i].flags & I2C_M_RD) == 0
+ && (msg[i+1].flags & I2C_M_RD)) {
if (dibusb_i2c_msg(d, msg[i].addr, msg[i].buf,msg[i].len,
msg[i+1].buf,msg[i+1].len) < 0)
break;
i++;
- } else
+ } else if ((msg[i].flags & I2C_M_RD) == 0) {
if (dibusb_i2c_msg(d, msg[i].addr, msg[i].buf,msg[i].len,NULL,0) < 0)
break;
+ } else
+ break;
}
mutex_unlock(&d->i2c_mutex);
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
index f506c74119f..9593b728999 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
@@ -80,6 +80,7 @@
#define USB_PID_COMPRO_DVBU2000_UNK_WARM 0x010d
#define USB_PID_COMPRO_VIDEOMATE_U500 0x1e78
#define USB_PID_COMPRO_VIDEOMATE_U500_PC 0x1e80
+#define USB_PID_CONCEPTRONIC_CTVDIGRCU 0xe397
#define USB_PID_CONEXANT_D680_DMB 0x86d6
#define USB_PID_DIBCOM_HOOK_DEFAULT 0x0064
#define USB_PID_DIBCOM_HOOK_DEFAULT_REENUM 0x0065
@@ -97,6 +98,7 @@
#define USB_PID_DPOSH_M9206_COLD 0x9206
#define USB_PID_DPOSH_M9206_WARM 0xa090
#define USB_PID_UNIWILL_STK7700P 0x6003
+#define USB_PID_GENIUS_TVGO_DVB_T03 0x4012
#define USB_PID_GRANDTEC_DVBT_USB_COLD 0x0fa0
#define USB_PID_GRANDTEC_DVBT_USB_WARM 0x0fa1
#define USB_PID_INTEL_CE9500 0x9500
@@ -104,6 +106,7 @@
#define USB_PID_KWORLD_395U 0xe396
#define USB_PID_KWORLD_395U_2 0xe39b
#define USB_PID_KWORLD_395U_3 0xe395
+#define USB_PID_KWORLD_MC810 0xc810
#define USB_PID_KWORLD_PC160_2T 0xc160
#define USB_PID_KWORLD_VSTREAM_COLD 0x17de
#define USB_PID_KWORLD_VSTREAM_WARM 0x17df
@@ -171,6 +174,7 @@
#define USB_PID_AVERMEDIA_A309 0xa309
#define USB_PID_AVERMEDIA_A310 0xa310
#define USB_PID_AVERMEDIA_A850 0x850a
+#define USB_PID_AVERMEDIA_A805 0xa805
#define USB_PID_TECHNOTREND_CONNECT_S2400 0x3006
#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY 0x005a
#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY_2 0x0081
@@ -178,6 +182,8 @@
#define USB_PID_TERRATEC_CINERGY_HT_EXPRESS 0x0060
#define USB_PID_TERRATEC_CINERGY_T_EXPRESS 0x0062
#define USB_PID_TERRATEC_CINERGY_T_XXS 0x0078
+#define USB_PID_TERRATEC_T3 0x10a0
+#define USB_PID_TERRATEC_T5 0x10a1
#define USB_PID_PINNACLE_EXPRESSCARD_320CX 0x022e
#define USB_PID_PINNACLE_PCTV2000E 0x022c
#define USB_PID_PINNACLE_PCTV_DVB_T_FLASH 0x0228
@@ -222,6 +228,7 @@
#define USB_PID_WINFAST_DTV_DONGLE_COLD 0x6025
#define USB_PID_WINFAST_DTV_DONGLE_WARM 0x6026
#define USB_PID_WINFAST_DTV_DONGLE_STK7700P 0x6f00
+#define USB_PID_WINFAST_DTV_DONGLE_H 0x60f6
#define USB_PID_WINFAST_DTV_DONGLE_STK7700P_2 0x6f01
#define USB_PID_WINFAST_DTV_DONGLE_GOLD 0x6029
#define USB_PID_GENPIX_8PSK_REV_1_COLD 0x0200
@@ -251,5 +258,6 @@
#define USB_PID_MSI_DIGI_VOX_MINI_III 0x8807
#define USB_PID_SONY_PLAYTV 0x0003
#define USB_PID_ELGATO_EYETV_DTT 0x0021
+#define USB_PID_ELGATO_EYETV_DTT_Dlx 0x0020
#endif
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb.h b/drivers/media/dvb/dvb-usb/dvb-usb.h
index 2d5352e54dc..e441d274e6c 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb.h
@@ -196,7 +196,7 @@ struct dvb_usb_device_properties {
#define CYPRESS_FX2 3
int usb_ctrl;
int (*download_firmware) (struct usb_device *, const struct firmware *);
- const char firmware[FIRMWARE_NAME_MAX];
+ const char *firmware;
int no_reconnect;
int size_of_priv;
@@ -223,7 +223,7 @@ struct dvb_usb_device_properties {
int generic_bulk_ctrl_endpoint;
int num_device_descs;
- struct dvb_usb_device_description devices[11];
+ struct dvb_usb_device_description devices[12];
};
/**
diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
index c65f273ff31..75de49c0d94 100644
--- a/drivers/media/dvb/dvb-usb/dw2102.c
+++ b/drivers/media/dvb/dvb-usb/dw2102.c
@@ -1,7 +1,7 @@
/* DVB USB framework compliant Linux driver for the
-* DVBWorld DVB-S 2101, 2102, DVB-S2 2104 Card
-*
-* Copyright (C) 2008 Igor M. Liplianin (liplianin@me.by)
+* DVBWorld DVB-S 2101, 2102, DVB-S2 2104, DVB-C 3101,
+* TeVii S600, S650 Cards
+* Copyright (C) 2008,2009 Igor M. Liplianin (liplianin@me.by)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -17,6 +17,7 @@
#include "stb6000.h"
#include "eds1547.h"
#include "cx24116.h"
+#include "tda1002x.h"
#ifndef USB_PID_DW2102
#define USB_PID_DW2102 0x2102
@@ -26,10 +27,18 @@
#define USB_PID_DW2104 0x2104
#endif
+#ifndef USB_PID_DW3101
+#define USB_PID_DW3101 0x3101
+#endif
+
#ifndef USB_PID_CINERGY_S
#define USB_PID_CINERGY_S 0x0064
#endif
+#ifndef USB_PID_TEVII_S650
+#define USB_PID_TEVII_S650 0xd650
+#endif
+
#define DW210X_READ_MSG 0
#define DW210X_WRITE_MSG 1
@@ -40,18 +49,21 @@
#define DW2102_VOLTAGE_CTRL (0x1800)
#define DW2102_RC_QUERY (0x1a00)
-struct dw210x_state {
- u32 last_key_pressed;
-};
-struct dw210x_rc_keys {
- u32 keycode;
- u32 event;
+struct dvb_usb_rc_keys_table {
+ struct dvb_usb_rc_key *rc_keys;
+ int rc_keys_size;
};
/* debug */
static int dvb_usb_dw2102_debug;
module_param_named(debug, dvb_usb_dw2102_debug, int, 0644);
-MODULE_PARM_DESC(debug, "set debugging level (1=info 2=xfer (or-able))." DVB_USB_DEBUG_STATUS);
+MODULE_PARM_DESC(debug, "set debugging level (1=info 2=xfer 4=rc(or-able))."
+ DVB_USB_DEBUG_STATUS);
+
+/* keymaps */
+static int ir_keymap;
+module_param_named(keymap, ir_keymap, int, 0644);
+MODULE_PARM_DESC(keymap, "set keymap 0=default 1=dvbworld 2=tevii 3=tbs ...");
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
@@ -79,7 +91,7 @@ static int dw210x_op_rw(struct usb_device *dev, u8 request, u16 value,
static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
int num)
{
-struct dvb_usb_device *d = i2c_get_adapdata(adap);
+ struct dvb_usb_device *d = i2c_get_adapdata(adap);
int i = 0, ret = 0;
u8 buf6[] = {0x2c, 0x05, 0xc0, 0, 0, 0, 0};
u16 value;
@@ -205,6 +217,7 @@ static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap,
mutex_unlock(&d->i2c_mutex);
return num;
}
+
static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
@@ -219,7 +232,7 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
case 2: {
/* read */
/* first write first register number */
- u8 ibuf [msg[1].len + 2], obuf[3];
+ u8 ibuf[msg[1].len + 2], obuf[3];
obuf[0] = 0xd0;
obuf[1] = msg[0].len;
obuf[2] = msg[0].buf[0];
@@ -293,7 +306,7 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
case 2: {
/* read */
/* first write first register number */
- u8 ibuf [msg[1].len + 2], obuf[3];
+ u8 ibuf[msg[1].len + 2], obuf[3];
obuf[0] = 0xaa;
obuf[1] = msg[0].len;
obuf[2] = msg[0].buf[0];
@@ -360,6 +373,69 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
return num;
}
+static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ int num)
+{
+ struct dvb_usb_device *d = i2c_get_adapdata(adap);
+ int ret = 0, i;
+
+ if (!d)
+ return -ENODEV;
+ if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
+ return -EAGAIN;
+
+ switch (num) {
+ case 2: {
+ /* read */
+ /* first write first register number */
+ u8 ibuf[msg[1].len + 2], obuf[3];
+ obuf[0] = msg[0].addr << 1;
+ obuf[1] = msg[0].len;
+ obuf[2] = msg[0].buf[0];
+ ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
+ obuf, msg[0].len + 2, DW210X_WRITE_MSG);
+ /* second read registers */
+ ret = dw210x_op_rw(d->udev, 0xc3, 0x19 , 0,
+ ibuf, msg[1].len + 2, DW210X_READ_MSG);
+ memcpy(msg[1].buf, ibuf + 2, msg[1].len);
+
+ break;
+ }
+ case 1:
+ switch (msg[0].addr) {
+ case 0x60:
+ case 0x0c: {
+ /* write to register */
+ u8 obuf[msg[0].len + 2];
+ obuf[0] = msg[0].addr << 1;
+ obuf[1] = msg[0].len;
+ memcpy(obuf + 2, msg[0].buf, msg[0].len);
+ ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
+ obuf, msg[0].len + 2, DW210X_WRITE_MSG);
+ break;
+ }
+ case(DW2102_RC_QUERY): {
+ u8 ibuf[2];
+ ret = dw210x_op_rw(d->udev, 0xb8, 0, 0,
+ ibuf, 2, DW210X_READ_MSG);
+ memcpy(msg[0].buf, ibuf , 2);
+ break;
+ }
+ }
+
+ break;
+ }
+
+ for (i = 0; i < num; i++) {
+ deb_xfer("%02x:%02x: %s ", i, msg[i].addr,
+ msg[i].flags == 0 ? ">>>" : "<<<");
+ debug_dump(msg[i].buf, msg[i].len, deb_xfer);
+ }
+
+ mutex_unlock(&d->i2c_mutex);
+ return num;
+}
+
static u32 dw210x_i2c_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C;
@@ -385,6 +461,11 @@ static struct i2c_algorithm dw2104_i2c_algo = {
.functionality = dw210x_i2c_func,
};
+static struct i2c_algorithm dw3101_i2c_algo = {
+ .master_xfer = dw3101_i2c_transfer,
+ .functionality = dw210x_i2c_func,
+};
+
static int dw210x_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
{
int i;
@@ -404,6 +485,7 @@ static int dw210x_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
debug_dump(eepromline, 16, deb_xfer);
}
}
+
memcpy(mac, eeprom + 8, 6);
return 0;
};
@@ -448,6 +530,11 @@ static struct si21xx_config serit_sp1511lhb_config = {
};
+static struct tda10023_config dw3101_tda10023_config = {
+ .demod_address = 0x0c,
+ .invert = 1,
+};
+
static int dw2104_frontend_attach(struct dvb_usb_adapter *d)
{
if ((d->fe = dvb_attach(cx24116_attach, &dw2104_config,
@@ -460,6 +547,7 @@ static int dw2104_frontend_attach(struct dvb_usb_adapter *d)
}
static struct dvb_usb_device_properties dw2102_properties;
+static struct dvb_usb_device_properties dw2104_properties;
static int dw2102_frontend_attach(struct dvb_usb_adapter *d)
{
@@ -497,6 +585,17 @@ static int dw2102_frontend_attach(struct dvb_usb_adapter *d)
return -EIO;
}
+static int dw3101_frontend_attach(struct dvb_usb_adapter *d)
+{
+ d->fe = dvb_attach(tda10023_attach, &dw3101_tda10023_config,
+ &d->dev->i2c_adap, 0x48);
+ if (d->fe != NULL) {
+ info("Attached tda10023!\n");
+ return 0;
+ }
+ return -EIO;
+}
+
static int dw2102_tuner_attach(struct dvb_usb_adapter *adap)
{
dvb_attach(dvb_pll_attach, adap->fe, 0x60,
@@ -512,6 +611,14 @@ static int dw2102_earda_tuner_attach(struct dvb_usb_adapter *adap)
return 0;
}
+static int dw3101_tuner_attach(struct dvb_usb_adapter *adap)
+{
+ dvb_attach(dvb_pll_attach, adap->fe, 0x60,
+ &adap->dev->i2c_adap, DVB_PLL_TUA6034);
+
+ return 0;
+}
+
static struct dvb_usb_rc_key dw210x_rc_keys[] = {
{ 0xf8, 0x0a, KEY_Q }, /*power*/
{ 0xf8, 0x0c, KEY_M }, /*mute*/
@@ -544,44 +651,147 @@ static struct dvb_usb_rc_key dw210x_rc_keys[] = {
{ 0xf8, 0x40, KEY_F }, /*full*/
{ 0xf8, 0x1e, KEY_W }, /*tvmode*/
{ 0xf8, 0x1b, KEY_B }, /*recall*/
+};
+static struct dvb_usb_rc_key tevii_rc_keys[] = {
+ { 0xf8, 0x0a, KEY_POWER },
+ { 0xf8, 0x0c, KEY_MUTE },
+ { 0xf8, 0x11, KEY_1 },
+ { 0xf8, 0x12, KEY_2 },
+ { 0xf8, 0x13, KEY_3 },
+ { 0xf8, 0x14, KEY_4 },
+ { 0xf8, 0x15, KEY_5 },
+ { 0xf8, 0x16, KEY_6 },
+ { 0xf8, 0x17, KEY_7 },
+ { 0xf8, 0x18, KEY_8 },
+ { 0xf8, 0x19, KEY_9 },
+ { 0xf8, 0x10, KEY_0 },
+ { 0xf8, 0x1c, KEY_MENU },
+ { 0xf8, 0x0f, KEY_VOLUMEDOWN },
+ { 0xf8, 0x1a, KEY_LAST },
+ { 0xf8, 0x0e, KEY_OPEN },
+ { 0xf8, 0x04, KEY_RECORD },
+ { 0xf8, 0x09, KEY_VOLUMEUP },
+ { 0xf8, 0x08, KEY_CHANNELUP },
+ { 0xf8, 0x07, KEY_PVR },
+ { 0xf8, 0x0b, KEY_TIME },
+ { 0xf8, 0x02, KEY_RIGHT },
+ { 0xf8, 0x03, KEY_LEFT },
+ { 0xf8, 0x00, KEY_UP },
+ { 0xf8, 0x1f, KEY_OK },
+ { 0xf8, 0x01, KEY_DOWN },
+ { 0xf8, 0x05, KEY_TUNER },
+ { 0xf8, 0x06, KEY_CHANNELDOWN },
+ { 0xf8, 0x40, KEY_PLAYPAUSE },
+ { 0xf8, 0x1e, KEY_REWIND },
+ { 0xf8, 0x1b, KEY_FAVORITES },
+ { 0xf8, 0x1d, KEY_BACK },
+ { 0xf8, 0x4d, KEY_FASTFORWARD },
+ { 0xf8, 0x44, KEY_EPG },
+ { 0xf8, 0x4c, KEY_INFO },
+ { 0xf8, 0x41, KEY_AB },
+ { 0xf8, 0x43, KEY_AUDIO },
+ { 0xf8, 0x45, KEY_SUBTITLE },
+ { 0xf8, 0x4a, KEY_LIST },
+ { 0xf8, 0x46, KEY_F1 },
+ { 0xf8, 0x47, KEY_F2 },
+ { 0xf8, 0x5e, KEY_F3 },
+ { 0xf8, 0x5c, KEY_F4 },
+ { 0xf8, 0x52, KEY_F5 },
+ { 0xf8, 0x5a, KEY_F6 },
+ { 0xf8, 0x56, KEY_MODE },
+ { 0xf8, 0x58, KEY_SWITCHVIDEOMODE },
};
+static struct dvb_usb_rc_key tbs_rc_keys[] = {
+ { 0xf8, 0x84, KEY_POWER },
+ { 0xf8, 0x94, KEY_MUTE },
+ { 0xf8, 0x87, KEY_1 },
+ { 0xf8, 0x86, KEY_2 },
+ { 0xf8, 0x85, KEY_3 },
+ { 0xf8, 0x8b, KEY_4 },
+ { 0xf8, 0x8a, KEY_5 },
+ { 0xf8, 0x89, KEY_6 },
+ { 0xf8, 0x8f, KEY_7 },
+ { 0xf8, 0x8e, KEY_8 },
+ { 0xf8, 0x8d, KEY_9 },
+ { 0xf8, 0x92, KEY_0 },
+ { 0xf8, 0x96, KEY_CHANNELUP },
+ { 0xf8, 0x91, KEY_CHANNELDOWN },
+ { 0xf8, 0x93, KEY_VOLUMEUP },
+ { 0xf8, 0x8c, KEY_VOLUMEDOWN },
+ { 0xf8, 0x83, KEY_RECORD },
+ { 0xf8, 0x98, KEY_PAUSE },
+ { 0xf8, 0x99, KEY_OK },
+ { 0xf8, 0x9a, KEY_SHUFFLE },
+ { 0xf8, 0x81, KEY_UP },
+ { 0xf8, 0x90, KEY_LEFT },
+ { 0xf8, 0x82, KEY_RIGHT },
+ { 0xf8, 0x88, KEY_DOWN },
+ { 0xf8, 0x95, KEY_FAVORITES },
+ { 0xf8, 0x97, KEY_SUBTITLE },
+ { 0xf8, 0x9d, KEY_ZOOM },
+ { 0xf8, 0x9f, KEY_EXIT },
+ { 0xf8, 0x9e, KEY_MENU },
+ { 0xf8, 0x9c, KEY_EPG },
+ { 0xf8, 0x80, KEY_PREVIOUS },
+ { 0xf8, 0x9b, KEY_MODE }
+};
+static struct dvb_usb_rc_keys_table keys_tables[] = {
+ { dw210x_rc_keys, ARRAY_SIZE(dw210x_rc_keys) },
+ { tevii_rc_keys, ARRAY_SIZE(tevii_rc_keys) },
+ { tbs_rc_keys, ARRAY_SIZE(tbs_rc_keys) },
+};
static int dw2102_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
{
- struct dw210x_state *st = d->priv;
+ struct dvb_usb_rc_key *keymap = d->props.rc_key_map;
+ int keymap_size = d->props.rc_key_map_size;
u8 key[2];
- struct i2c_msg msg[] = {
- {.addr = DW2102_RC_QUERY, .flags = I2C_M_RD, .buf = key,
- .len = 2},
+ struct i2c_msg msg = {
+ .addr = DW2102_RC_QUERY,
+ .flags = I2C_M_RD,
+ .buf = key,
+ .len = 2
};
int i;
+ /* override keymap */
+ if ((ir_keymap > 0) && (ir_keymap <= ARRAY_SIZE(keys_tables))) {
+ keymap = keys_tables[ir_keymap - 1].rc_keys ;
+ keymap_size = keys_tables[ir_keymap - 1].rc_keys_size;
+ }
*state = REMOTE_NO_KEY_PRESSED;
- if (dw2102_i2c_transfer(&d->i2c_adap, msg, 1) == 1) {
- for (i = 0; i < ARRAY_SIZE(dw210x_rc_keys); i++) {
- if (dw210x_rc_keys[i].data == msg[0].buf[0]) {
+ if (dw2102_i2c_transfer(&d->i2c_adap, &msg, 1) == 1) {
+ for (i = 0; i < keymap_size ; i++) {
+ if (keymap[i].data == msg.buf[0]) {
*state = REMOTE_KEY_PRESSED;
- *event = dw210x_rc_keys[i].event;
- st->last_key_pressed =
- dw210x_rc_keys[i].event;
+ *event = keymap[i].event;
break;
}
- st->last_key_pressed = 0;
+
}
+
+ if ((*state) == REMOTE_KEY_PRESSED)
+ deb_rc("%s: found rc key: %x, %x, event: %x\n",
+ __func__, key[0], key[1], (*event));
+ else if (key[0] != 0xff)
+ deb_rc("%s: unknown rc key: %x, %x\n",
+ __func__, key[0], key[1]);
+
}
- /* info("key: %x %x\n",key[0],key[1]); */
+
return 0;
}
static struct usb_device_id dw2102_table[] = {
{USB_DEVICE(USB_VID_CYPRESS, USB_PID_DW2102)},
{USB_DEVICE(USB_VID_CYPRESS, 0x2101)},
- {USB_DEVICE(USB_VID_CYPRESS, 0x2104)},
- {USB_DEVICE(0x9022, 0xd650)},
+ {USB_DEVICE(USB_VID_CYPRESS, USB_PID_DW2104)},
+ {USB_DEVICE(0x9022, USB_PID_TEVII_S650)},
{USB_DEVICE(USB_VID_TERRATEC, USB_PID_CINERGY_S)},
+ {USB_DEVICE(USB_VID_CYPRESS, USB_PID_DW3101)},
{ }
};
@@ -642,11 +852,16 @@ static int dw2102_load_firmware(struct usb_device *dev,
}
/* init registers */
switch (dev->descriptor.idProduct) {
+ case USB_PID_TEVII_S650:
+ dw2104_properties.rc_key_map = tevii_rc_keys;
+ dw2104_properties.rc_key_map_size =
+ ARRAY_SIZE(tevii_rc_keys);
case USB_PID_DW2104:
- case 0xd650:
reset = 1;
dw210x_op_rw(dev, 0xc4, 0x0000, 0, &reset, 1,
DW210X_WRITE_MSG);
+ /* break omitted intentionally */
+ case USB_PID_DW3101:
reset = 0;
dw210x_op_rw(dev, 0xbf, 0x0040, 0, &reset, 0,
DW210X_WRITE_MSG);
@@ -690,6 +905,7 @@ static int dw2102_load_firmware(struct usb_device *dev,
DW210X_READ_MSG);
break;
}
+
msleep(100);
kfree(p);
}
@@ -700,7 +916,6 @@ static struct dvb_usb_device_properties dw2102_properties = {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
.usb_ctrl = DEVICE_SPECIFIC,
.firmware = "dvb-usb-dw2102.fw",
- .size_of_priv = sizeof(struct dw210x_state),
.no_reconnect = 1,
.i2c_algo = &dw2102_serit_i2c_algo,
@@ -714,7 +929,7 @@ static struct dvb_usb_device_properties dw2102_properties = {
.num_adapters = 1,
.download_firmware = dw2102_load_firmware,
.read_mac_address = dw210x_read_mac_address,
- .adapter = {
+ .adapter = {
{
.frontend_attach = dw2102_frontend_attach,
.streaming_ctrl = NULL,
@@ -752,7 +967,6 @@ static struct dvb_usb_device_properties dw2104_properties = {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
.usb_ctrl = DEVICE_SPECIFIC,
.firmware = "dvb-usb-dw2104.fw",
- .size_of_priv = sizeof(struct dw210x_state),
.no_reconnect = 1,
.i2c_algo = &dw2104_i2c_algo,
@@ -796,12 +1010,57 @@ static struct dvb_usb_device_properties dw2104_properties = {
}
};
+static struct dvb_usb_device_properties dw3101_properties = {
+ .caps = DVB_USB_IS_AN_I2C_ADAPTER,
+ .usb_ctrl = DEVICE_SPECIFIC,
+ .firmware = "dvb-usb-dw3101.fw",
+ .no_reconnect = 1,
+
+ .i2c_algo = &dw3101_i2c_algo,
+ .rc_key_map = dw210x_rc_keys,
+ .rc_key_map_size = ARRAY_SIZE(dw210x_rc_keys),
+ .rc_interval = 150,
+ .rc_query = dw2102_rc_query,
+
+ .generic_bulk_ctrl_endpoint = 0x81,
+ /* parameter for the MPEG2-data transfer */
+ .num_adapters = 1,
+ .download_firmware = dw2102_load_firmware,
+ .read_mac_address = dw210x_read_mac_address,
+ .adapter = {
+ {
+ .frontend_attach = dw3101_frontend_attach,
+ .streaming_ctrl = NULL,
+ .tuner_attach = dw3101_tuner_attach,
+ .stream = {
+ .type = USB_BULK,
+ .count = 8,
+ .endpoint = 0x82,
+ .u = {
+ .bulk = {
+ .buffersize = 4096,
+ }
+ }
+ },
+ }
+ },
+ .num_device_descs = 1,
+ .devices = {
+ { "DVBWorld DVB-C 3101 USB2.0",
+ {&dw2102_table[5], NULL},
+ {NULL},
+ },
+ }
+};
+
static int dw2102_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
if (0 == dvb_usb_device_init(intf, &dw2102_properties,
THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf, &dw2104_properties,
+ THIS_MODULE, NULL, adapter_nr) ||
+ 0 == dvb_usb_device_init(intf, &dw3101_properties,
THIS_MODULE, NULL, adapter_nr)) {
return 0;
}
@@ -833,6 +1092,8 @@ module_init(dw2102_module_init);
module_exit(dw2102_module_exit);
MODULE_AUTHOR("Igor M. Liplianin (c) liplianin@me.by");
-MODULE_DESCRIPTION("Driver for DVBWorld DVB-S 2101, 2102, DVB-S2 2104 USB2.0 device");
+MODULE_DESCRIPTION("Driver for DVBWorld DVB-S 2101, 2102, DVB-S2 2104,"
+ " DVB-C 3101 USB2.0,"
+ " TeVii S600, S650 USB2.0 devices");
MODULE_VERSION("0.1");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/dvb-usb/dw2102.h b/drivers/media/dvb/dvb-usb/dw2102.h
index e3370734e95..5cd0b0eb6ce 100644
--- a/drivers/media/dvb/dvb-usb/dw2102.h
+++ b/drivers/media/dvb/dvb-usb/dw2102.h
@@ -5,4 +5,5 @@
#include "dvb-usb.h"
#define deb_xfer(args...) dprintk(dvb_usb_dw2102_debug, 0x02, args)
+#define deb_rc(args...) dprintk(dvb_usb_dw2102_debug, 0x04, args)
#endif
diff --git a/drivers/media/dvb/dvb-usb/gp8psk.c b/drivers/media/dvb/dvb-usb/gp8psk.c
index 3dd6843864e..afb444db43a 100644
--- a/drivers/media/dvb/dvb-usb/gp8psk.c
+++ b/drivers/media/dvb/dvb-usb/gp8psk.c
@@ -223,7 +223,7 @@ static struct usb_device_id gp8psk_usb_table [] = {
{ USB_DEVICE(USB_VID_GENPIX, USB_PID_GENPIX_8PSK_REV_1_WARM) },
{ USB_DEVICE(USB_VID_GENPIX, USB_PID_GENPIX_8PSK_REV_2) },
{ USB_DEVICE(USB_VID_GENPIX, USB_PID_GENPIX_SKYWALKER_1) },
- { USB_DEVICE(USB_VID_GENPIX, USB_PID_GENPIX_SKYWALKER_CW3K) },
+/* { USB_DEVICE(USB_VID_GENPIX, USB_PID_GENPIX_SKYWALKER_CW3K) }, */
{ 0 },
};
MODULE_DEVICE_TABLE(usb, gp8psk_usb_table);
@@ -254,7 +254,7 @@ static struct dvb_usb_device_properties gp8psk_properties = {
.generic_bulk_ctrl_endpoint = 0x01,
- .num_device_descs = 4,
+ .num_device_descs = 3,
.devices = {
{ .name = "Genpix 8PSK-to-USB2 Rev.1 DVB-S receiver",
.cold_ids = { &gp8psk_usb_table[0], NULL },
@@ -268,10 +268,6 @@ static struct dvb_usb_device_properties gp8psk_properties = {
.cold_ids = { NULL },
.warm_ids = { &gp8psk_usb_table[3], NULL },
},
- { .name = "Genpix SkyWalker-CW3K DVB-S receiver",
- .cold_ids = { NULL },
- .warm_ids = { &gp8psk_usb_table[4], NULL },
- },
{ NULL },
}
};
diff --git a/drivers/media/dvb/firewire/firedtv-1394.c b/drivers/media/dvb/firewire/firedtv-1394.c
index 4e207658c5d..2b6eeeab5b2 100644
--- a/drivers/media/dvb/firewire/firedtv-1394.c
+++ b/drivers/media/dvb/firewire/firedtv-1394.c
@@ -225,7 +225,7 @@ fail_free:
static int node_remove(struct device *dev)
{
- struct firedtv *fdtv = dev->driver_data;
+ struct firedtv *fdtv = dev_get_drvdata(dev);
fdtv_dvb_unregister(fdtv);
@@ -242,7 +242,7 @@ static int node_remove(struct device *dev)
static int node_update(struct unit_directory *ud)
{
- struct firedtv *fdtv = ud->device.driver_data;
+ struct firedtv *fdtv = dev_get_drvdata(&ud->device);
if (fdtv->isochannel >= 0)
cmp_establish_pp_connection(fdtv, fdtv->subunit,
diff --git a/drivers/media/dvb/firewire/firedtv-dvb.c b/drivers/media/dvb/firewire/firedtv-dvb.c
index 9d308dd32a5..5742fde79d9 100644
--- a/drivers/media/dvb/firewire/firedtv-dvb.c
+++ b/drivers/media/dvb/firewire/firedtv-dvb.c
@@ -268,7 +268,7 @@ struct firedtv *fdtv_alloc(struct device *dev,
if (!fdtv)
return NULL;
- dev->driver_data = fdtv;
+ dev_set_drvdata(dev, fdtv);
fdtv->device = dev;
fdtv->isochannel = -1;
fdtv->voltage = 0xff;
diff --git a/drivers/media/dvb/firewire/firedtv-rc.c b/drivers/media/dvb/firewire/firedtv-rc.c
index 46a6324d7b7..27bca2e283d 100644
--- a/drivers/media/dvb/firewire/firedtv-rc.c
+++ b/drivers/media/dvb/firewire/firedtv-rc.c
@@ -18,7 +18,7 @@
#include "firedtv.h"
/* fixed table with older keycodes, geared towards MythTV */
-const static u16 oldtable[] = {
+static const u16 oldtable[] = {
/* code from device: 0x4501...0x451f */
@@ -62,7 +62,7 @@ const static u16 oldtable[] = {
};
/* user-modifiable table for a remote as sold in 2008 */
-const static u16 keytable[] = {
+static const u16 keytable[] = {
/* code from device: 0x0300...0x031f */
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index 23e4cffeba3..be967ac09a3 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -35,6 +35,21 @@ config DVB_STB6100
A Silicon tuner from ST used in conjunction with the STB0899
demodulator. Say Y when you want to support this tuner.
+config DVB_STV090x
+ tristate "STV0900/STV0903(A/B) based"
+ depends on DVB_CORE && I2C
+ default m if DVB_FE_CUSTOMISE
+ help
+ DVB-S/S2/DSS Multistandard Professional/Broadcast demodulators.
+ Say Y when you want to support these frontends.
+
+config DVB_STV6110x
+ tristate "STV6110/(A) based tuners"
+ depends on DVB_CORE && I2C
+ default m if DVB_FE_CUSTOMISE
+ help
+ A Silicon tuner that supports DVB-S and DVB-S2 modes
+
comment "DVB-S (satellite) frontends"
depends on DVB_CORE
@@ -506,6 +521,13 @@ config DVB_ISL6421
help
An SEC control chip.
+config DVB_ISL6423
+ tristate "ISL6423 SEC controller"
+ depends on DVB_CORE && I2C
+ default m if DVB_FE_CUSTOMISE
+ help
+ A SEC controller chip from Intersil
+
config DVB_LGS8GL5
tristate "Silicon Legend LGS-8GL5 demodulator (OFDM)"
depends on DVB_CORE && I2C
diff --git a/drivers/media/dvb/frontends/Makefile b/drivers/media/dvb/frontends/Makefile
index bc2b00abd10..832473c1e51 100644
--- a/drivers/media/dvb/frontends/Makefile
+++ b/drivers/media/dvb/frontends/Makefile
@@ -71,4 +71,6 @@ obj-$(CONFIG_DVB_STB6000) += stb6000.o
obj-$(CONFIG_DVB_S921) += s921.o
obj-$(CONFIG_DVB_STV6110) += stv6110.o
obj-$(CONFIG_DVB_STV0900) += stv0900.o
-
+obj-$(CONFIG_DVB_STV090x) += stv090x.o
+obj-$(CONFIG_DVB_STV6110x) += stv6110x.o
+obj-$(CONFIG_DVB_ISL6423) += isl6423.o
diff --git a/drivers/media/dvb/frontends/af9013.c b/drivers/media/dvb/frontends/af9013.c
index b2b50fb4cfd..136c5863d81 100644
--- a/drivers/media/dvb/frontends/af9013.c
+++ b/drivers/media/dvb/frontends/af9013.c
@@ -1455,7 +1455,7 @@ static int af9013_download_firmware(struct af9013_state *state)
af9013_ops.info.name);
/* request the firmware, this will block and timeout */
- ret = request_firmware(&fw, fw_file, &state->i2c->dev);
+ ret = request_firmware(&fw, fw_file, state->i2c->dev.parent);
if (ret) {
err("did not find the firmware file. (%s) "
"Please see linux/Documentation/dvb/ for more details" \
diff --git a/drivers/media/dvb/frontends/au8522_dig.c b/drivers/media/dvb/frontends/au8522_dig.c
index 35731258bb0..956b80f4979 100644
--- a/drivers/media/dvb/frontends/au8522_dig.c
+++ b/drivers/media/dvb/frontends/au8522_dig.c
@@ -367,11 +367,90 @@ static struct {
{ 0x8231, 0x13 },
};
-/* QAM Modulation table */
+/* QAM64 Modulation table */
static struct {
u16 reg;
u16 data;
-} QAM_mod_tab[] = {
+} QAM64_mod_tab[] = {
+ { 0x00a3, 0x09 },
+ { 0x00a4, 0x00 },
+ { 0x0081, 0xc4 },
+ { 0x00a5, 0x40 },
+ { 0x00aa, 0x77 },
+ { 0x00ad, 0x77 },
+ { 0x00a6, 0x67 },
+ { 0x0262, 0x20 },
+ { 0x021c, 0x30 },
+ { 0x00b8, 0x3e },
+ { 0x00b9, 0xf0 },
+ { 0x00ba, 0x01 },
+ { 0x00bb, 0x18 },
+ { 0x00bc, 0x50 },
+ { 0x00bd, 0x00 },
+ { 0x00be, 0xea },
+ { 0x00bf, 0xef },
+ { 0x00c0, 0xfc },
+ { 0x00c1, 0xbd },
+ { 0x00c2, 0x1f },
+ { 0x00c3, 0xfc },
+ { 0x00c4, 0xdd },
+ { 0x00c5, 0xaf },
+ { 0x00c6, 0x00 },
+ { 0x00c7, 0x38 },
+ { 0x00c8, 0x30 },
+ { 0x00c9, 0x05 },
+ { 0x00ca, 0x4a },
+ { 0x00cb, 0xd0 },
+ { 0x00cc, 0x01 },
+ { 0x00cd, 0xd9 },
+ { 0x00ce, 0x6f },
+ { 0x00cf, 0xf9 },
+ { 0x00d0, 0x70 },
+ { 0x00d1, 0xdf },
+ { 0x00d2, 0xf7 },
+ { 0x00d3, 0xc2 },
+ { 0x00d4, 0xdf },
+ { 0x00d5, 0x02 },
+ { 0x00d6, 0x9a },
+ { 0x00d7, 0xd0 },
+ { 0x0250, 0x0d },
+ { 0x0251, 0xcd },
+ { 0x0252, 0xe0 },
+ { 0x0253, 0x05 },
+ { 0x0254, 0xa7 },
+ { 0x0255, 0xff },
+ { 0x0256, 0xed },
+ { 0x0257, 0x5b },
+ { 0x0258, 0xae },
+ { 0x0259, 0xe6 },
+ { 0x025a, 0x3d },
+ { 0x025b, 0x0f },
+ { 0x025c, 0x0d },
+ { 0x025d, 0xea },
+ { 0x025e, 0xf2 },
+ { 0x025f, 0x51 },
+ { 0x0260, 0xf5 },
+ { 0x0261, 0x06 },
+ { 0x021a, 0x00 },
+ { 0x0546, 0x40 },
+ { 0x0210, 0xc7 },
+ { 0x0211, 0xaa },
+ { 0x0212, 0xab },
+ { 0x0213, 0x02 },
+ { 0x0502, 0x00 },
+ { 0x0121, 0x04 },
+ { 0x0122, 0x04 },
+ { 0x052e, 0x10 },
+ { 0x00a4, 0xca },
+ { 0x00a7, 0x40 },
+ { 0x0526, 0x01 },
+};
+
+/* QAM256 Modulation table */
+static struct {
+ u16 reg;
+ u16 data;
+} QAM256_mod_tab[] = {
{ 0x80a3, 0x09 },
{ 0x80a4, 0x00 },
{ 0x8081, 0xc4 },
@@ -464,12 +543,19 @@ static int au8522_enable_modulation(struct dvb_frontend *fe,
au8522_set_if(fe, state->config->vsb_if);
break;
case QAM_64:
+ dprintk("%s() QAM 64\n", __func__);
+ for (i = 0; i < ARRAY_SIZE(QAM64_mod_tab); i++)
+ au8522_writereg(state,
+ QAM64_mod_tab[i].reg,
+ QAM64_mod_tab[i].data);
+ au8522_set_if(fe, state->config->qam_if);
+ break;
case QAM_256:
- dprintk("%s() QAM 64/256\n", __func__);
- for (i = 0; i < ARRAY_SIZE(QAM_mod_tab); i++)
+ dprintk("%s() QAM 256\n", __func__);
+ for (i = 0; i < ARRAY_SIZE(QAM256_mod_tab); i++)
au8522_writereg(state,
- QAM_mod_tab[i].reg,
- QAM_mod_tab[i].data);
+ QAM256_mod_tab[i].reg,
+ QAM256_mod_tab[i].data);
au8522_set_if(fe, state->config->qam_if);
break;
default:
diff --git a/drivers/media/dvb/frontends/cx24116.c b/drivers/media/dvb/frontends/cx24116.c
index 9b9f57264ce..2410d8b59b6 100644
--- a/drivers/media/dvb/frontends/cx24116.c
+++ b/drivers/media/dvb/frontends/cx24116.c
@@ -492,7 +492,7 @@ static int cx24116_firmware_ondemand(struct dvb_frontend *fe)
printk(KERN_INFO "%s: Waiting for firmware upload (%s)...\n",
__func__, CX24116_DEFAULT_FIRMWARE);
ret = request_firmware(&fw, CX24116_DEFAULT_FIRMWARE,
- &state->i2c->dev);
+ state->i2c->dev.parent);
printk(KERN_INFO "%s: Waiting for firmware upload(2)...\n",
__func__);
if (ret) {
diff --git a/drivers/media/dvb/frontends/drx397xD.c b/drivers/media/dvb/frontends/drx397xD.c
index 172f1f928f0..01007553522 100644
--- a/drivers/media/dvb/frontends/drx397xD.c
+++ b/drivers/media/dvb/frontends/drx397xD.c
@@ -123,10 +123,10 @@ static int drx_load_fw(struct drx397xD_state *s, enum fw_ix ix)
}
memset(&fw[ix].data[0], 0, sizeof(fw[0].data));
- if (request_firmware(&fw[ix].file, fw[ix].name, &s->i2c->dev) != 0) {
+ rc = request_firmware(&fw[ix].file, fw[ix].name, s->i2c->dev.parent);
+ if (rc != 0) {
printk(KERN_ERR "%s: Firmware \"%s\" not available\n",
mod_name, fw[ix].name);
- rc = -ENOENT;
goto exit_err;
}
diff --git a/drivers/media/dvb/frontends/isl6423.c b/drivers/media/dvb/frontends/isl6423.c
new file mode 100644
index 00000000000..dca5bebfeeb
--- /dev/null
+++ b/drivers/media/dvb/frontends/isl6423.c
@@ -0,0 +1,308 @@
+/*
+ Intersil ISL6423 SEC and LNB Power supply controller
+
+ Copyright (C) Manu Abraham <abraham.manu@gmail.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+
+#include "dvb_frontend.h"
+#include "isl6423.h"
+
+static unsigned int verbose;
+module_param(verbose, int, 0644);
+MODULE_PARM_DESC(verbose, "Set Verbosity level");
+
+#define FE_ERROR 0
+#define FE_NOTICE 1
+#define FE_INFO 2
+#define FE_DEBUG 3
+#define FE_DEBUGREG 4
+
+#define dprintk(__y, __z, format, arg...) do { \
+ if (__z) { \
+ if ((verbose > FE_ERROR) && (verbose > __y)) \
+ printk(KERN_ERR "%s: " format "\n", __func__ , ##arg); \
+ else if ((verbose > FE_NOTICE) && (verbose > __y)) \
+ printk(KERN_NOTICE "%s: " format "\n", __func__ , ##arg); \
+ else if ((verbose > FE_INFO) && (verbose > __y)) \
+ printk(KERN_INFO "%s: " format "\n", __func__ , ##arg); \
+ else if ((verbose > FE_DEBUG) && (verbose > __y)) \
+ printk(KERN_DEBUG "%s: " format "\n", __func__ , ##arg); \
+ } else { \
+ if (verbose > __y) \
+ printk(format, ##arg); \
+ } \
+} while (0)
+
+struct isl6423_dev {
+ const struct isl6423_config *config;
+ struct i2c_adapter *i2c;
+
+ u8 reg_3;
+ u8 reg_4;
+
+ unsigned int verbose;
+};
+
+static int isl6423_write(struct isl6423_dev *isl6423, u8 reg)
+{
+ struct i2c_adapter *i2c = isl6423->i2c;
+ u8 addr = isl6423->config->addr;
+ int err = 0;
+
+ struct i2c_msg msg = { .addr = addr, .flags = 0, .buf = &reg, .len = 1 };
+
+ dprintk(FE_DEBUG, 1, "write reg %02X", reg);
+ err = i2c_transfer(i2c, &msg, 1);
+ if (err < 0)
+ goto exit;
+ return 0;
+
+exit:
+ dprintk(FE_ERROR, 1, "I/O error <%d>", err);
+ return err;
+}
+
+static int isl6423_set_modulation(struct dvb_frontend *fe)
+{
+ struct isl6423_dev *isl6423 = (struct isl6423_dev *) fe->sec_priv;
+ const struct isl6423_config *config = isl6423->config;
+ int err = 0;
+ u8 reg_2 = 0;
+
+ reg_2 = 0x01 << 5;
+
+ if (config->mod_extern)
+ reg_2 |= (1 << 3);
+ else
+ reg_2 |= (1 << 4);
+
+ err = isl6423_write(isl6423, reg_2);
+ if (err < 0)
+ goto exit;
+ return 0;
+
+exit:
+ dprintk(FE_ERROR, 1, "I/O error <%d>", err);
+ return err;
+}
+
+static int isl6423_voltage_boost(struct dvb_frontend *fe, long arg)
+{
+ struct isl6423_dev *isl6423 = (struct isl6423_dev *) fe->sec_priv;
+ u8 reg_3 = isl6423->reg_3;
+ u8 reg_4 = isl6423->reg_4;
+ int err = 0;
+
+ if (arg) {
+ /* EN = 1, VSPEN = 1, VBOT = 1 */
+ reg_4 |= (1 << 4);
+ reg_4 |= 0x1;
+ reg_3 |= (1 << 3);
+ } else {
+ /* EN = 1, VSPEN = 1, VBOT = 0 */
+ reg_4 |= (1 << 4);
+ reg_4 &= ~0x1;
+ reg_3 |= (1 << 3);
+ }
+ err = isl6423_write(isl6423, reg_3);
+ if (err < 0)
+ goto exit;
+
+ err = isl6423_write(isl6423, reg_4);
+ if (err < 0)
+ goto exit;
+
+ isl6423->reg_3 = reg_3;
+ isl6423->reg_4 = reg_4;
+
+ return 0;
+exit:
+ dprintk(FE_ERROR, 1, "I/O error <%d>", err);
+ return err;
+}
+
+
+static int isl6423_set_voltage(struct dvb_frontend *fe,
+ enum fe_sec_voltage voltage)
+{
+ struct isl6423_dev *isl6423 = (struct isl6423_dev *) fe->sec_priv;
+ u8 reg_3 = isl6423->reg_3;
+ u8 reg_4 = isl6423->reg_4;
+ int err = 0;
+
+ switch (voltage) {
+ case SEC_VOLTAGE_OFF:
+ /* EN = 0 */
+ reg_4 &= ~(1 << 4);
+ break;
+
+ case SEC_VOLTAGE_13:
+ /* EN = 1, VSPEN = 1, VTOP = 0, VBOT = 0 */
+ reg_4 |= (1 << 4);
+ reg_4 &= ~0x3;
+ reg_3 |= (1 << 3);
+ break;
+
+ case SEC_VOLTAGE_18:
+ /* EN = 1, VSPEN = 1, VTOP = 1, VBOT = 0 */
+ reg_4 |= (1 << 4);
+ reg_4 |= 0x2;
+ reg_4 &= ~0x1;
+ reg_3 |= (1 << 3);
+ break;
+
+ default:
+ break;
+ }
+ err = isl6423_write(isl6423, reg_3);
+ if (err < 0)
+ goto exit;
+
+ err = isl6423_write(isl6423, reg_4);
+ if (err < 0)
+ goto exit;
+
+ isl6423->reg_3 = reg_3;
+ isl6423->reg_4 = reg_4;
+
+ return 0;
+exit:
+ dprintk(FE_ERROR, 1, "I/O error <%d>", err);
+ return err;
+}
+
+static int isl6423_set_current(struct dvb_frontend *fe)
+{
+ struct isl6423_dev *isl6423 = (struct isl6423_dev *) fe->sec_priv;
+ u8 reg_3 = isl6423->reg_3;
+ const struct isl6423_config *config = isl6423->config;
+ int err = 0;
+
+ switch (config->current_max) {
+ case SEC_CURRENT_275m:
+ /* 275mA */
+ /* ISELH = 0, ISELL = 0 */
+ reg_3 &= ~0x3;
+ break;
+
+ case SEC_CURRENT_515m:
+ /* 515mA */
+ /* ISELH = 0, ISELL = 1 */
+ reg_3 &= ~0x2;
+ reg_3 |= 0x1;
+ break;
+
+ case SEC_CURRENT_635m:
+ /* 635mA */
+ /* ISELH = 1, ISELL = 0 */
+ reg_3 &= ~0x1;
+ reg_3 |= 0x2;
+ break;
+
+ case SEC_CURRENT_800m:
+ /* 800mA */
+ /* ISELH = 1, ISELL = 1 */
+ reg_3 |= 0x3;
+ break;
+ }
+
+ err = isl6423_write(isl6423, reg_3);
+ if (err < 0)
+ goto exit;
+
+ switch (config->curlim) {
+ case SEC_CURRENT_LIM_ON:
+ /* DCL = 0 */
+ reg_3 &= ~0x10;
+ break;
+
+ case SEC_CURRENT_LIM_OFF:
+ /* DCL = 1 */
+ reg_3 |= 0x10;
+ break;
+ }
+
+ err = isl6423_write(isl6423, reg_3);
+ if (err < 0)
+ goto exit;
+
+ isl6423->reg_3 = reg_3;
+
+ return 0;
+exit:
+ dprintk(FE_ERROR, 1, "I/O error <%d>", err);
+ return err;
+}
+
+static void isl6423_release(struct dvb_frontend *fe)
+{
+ isl6423_set_voltage(fe, SEC_VOLTAGE_OFF);
+
+ kfree(fe->sec_priv);
+ fe->sec_priv = NULL;
+}
+
+struct dvb_frontend *isl6423_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c,
+ const struct isl6423_config *config)
+{
+ struct isl6423_dev *isl6423;
+
+ isl6423 = kzalloc(sizeof(struct isl6423_dev), GFP_KERNEL);
+ if (!isl6423)
+ return NULL;
+
+ isl6423->config = config;
+ isl6423->i2c = i2c;
+ fe->sec_priv = isl6423;
+
+ /* SR3H = 0, SR3M = 1, SR3L = 0 */
+ isl6423->reg_3 = 0x02 << 5;
+ /* SR4H = 0, SR4M = 1, SR4L = 1 */
+ isl6423->reg_4 = 0x03 << 5;
+
+ if (isl6423_set_current(fe))
+ goto exit;
+
+ if (isl6423_set_modulation(fe))
+ goto exit;
+
+ fe->ops.release_sec = isl6423_release;
+ fe->ops.set_voltage = isl6423_set_voltage;
+ fe->ops.enable_high_lnb_voltage = isl6423_voltage_boost;
+ isl6423->verbose = verbose;
+
+ return fe;
+
+exit:
+ kfree(isl6423);
+ fe->sec_priv = NULL;
+ return NULL;
+}
+EXPORT_SYMBOL(isl6423_attach);
+
+MODULE_DESCRIPTION("ISL6423 SEC");
+MODULE_AUTHOR("Manu Abraham");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/isl6423.h b/drivers/media/dvb/frontends/isl6423.h
new file mode 100644
index 00000000000..e1a37fba01c
--- /dev/null
+++ b/drivers/media/dvb/frontends/isl6423.h
@@ -0,0 +1,63 @@
+/*
+ Intersil ISL6423 SEC and LNB Power supply controller
+
+ Copyright (C) Manu Abraham <abraham.manu@gmail.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __ISL_6423_H
+#define __ISL_6423_H
+
+#include <linux/dvb/frontend.h>
+
+enum isl6423_current {
+ SEC_CURRENT_275m = 0,
+ SEC_CURRENT_515m,
+ SEC_CURRENT_635m,
+ SEC_CURRENT_800m,
+};
+
+enum isl6423_curlim {
+ SEC_CURRENT_LIM_ON = 1,
+ SEC_CURRENT_LIM_OFF
+};
+
+struct isl6423_config {
+ enum isl6423_current current_max;
+ enum isl6423_curlim curlim;
+ u8 addr;
+ u8 mod_extern;
+};
+
+#if defined(CONFIG_DVB_ISL6423) || (defined(CONFIG_DVB_ISL6423_MODULE) && defined(MODULE))
+
+
+extern struct dvb_frontend *isl6423_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c,
+ const struct isl6423_config *config);
+
+#else
+static inline struct dvb_frontend *isl6423_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c,
+ const struct isl6423_config *config)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+
+#endif /* CONFIG_DVB_ISL6423 */
+
+#endif /* __ISL_6423_H */
diff --git a/drivers/media/dvb/frontends/lgdt3305.c b/drivers/media/dvb/frontends/lgdt3305.c
index d92d0557a80..fde8c59700f 100644
--- a/drivers/media/dvb/frontends/lgdt3305.c
+++ b/drivers/media/dvb/frontends/lgdt3305.c
@@ -19,6 +19,7 @@
*
*/
+#include <asm/div64.h>
#include <linux/dvb/frontend.h>
#include "dvb_math.h"
#include "lgdt3305.h"
@@ -496,27 +497,15 @@ static int lgdt3305_set_if(struct lgdt3305_state *state,
nco = if_freq_khz / 10;
-#define LGDT3305_64BIT_DIVISION_ENABLED 0
- /* FIXME: 64bit division disabled to avoid linking error:
- * WARNING: "__udivdi3" [lgdt3305.ko] undefined!
- */
switch (param->u.vsb.modulation) {
case VSB_8:
-#if LGDT3305_64BIT_DIVISION_ENABLED
nco <<= 24;
- nco /= 625;
-#else
- nco *= ((1 << 24) / 625);
-#endif
+ do_div(nco, 625);
break;
case QAM_64:
case QAM_256:
-#if LGDT3305_64BIT_DIVISION_ENABLED
nco <<= 28;
- nco /= 625;
-#else
- nco *= ((1 << 28) / 625);
-#endif
+ do_div(nco, 625);
break;
default:
return -EINVAL;
diff --git a/drivers/media/dvb/frontends/lgs8gxx.c b/drivers/media/dvb/frontends/lgs8gxx.c
index f9785dfe735..fde27645bbe 100644
--- a/drivers/media/dvb/frontends/lgs8gxx.c
+++ b/drivers/media/dvb/frontends/lgs8gxx.c
@@ -37,14 +37,14 @@
} while (0)
static int debug;
-static int fake_signal_str;
+static int fake_signal_str = 1;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
module_param(fake_signal_str, int, 0644);
MODULE_PARM_DESC(fake_signal_str, "fake signal strength for LGS8913."
-"Signal strength calculation is slow.(default:off).");
+"Signal strength calculation is slow.(default:on).");
/* LGS8GXX internal helper functions */
@@ -610,7 +610,7 @@ static int lgs8gxx_read_signal_agc(struct lgs8gxx_state *priv, u16 *signal)
else
cat = 0;
- *signal = cat;
+ *signal = cat * 65535 / 5;
return 0;
}
@@ -630,8 +630,8 @@ static int lgs8913_read_signal_strength(struct lgs8gxx_state *priv, u16 *signal)
if (fake_signal_str) {
if ((t & 0xC0) == 0xC0) {
- dprintk("Fake signal strength as 50\n");
- *signal = 0x32;
+ dprintk("Fake signal strength\n");
+ *signal = 0x7FFF;
} else
*signal = 0;
return 0;
diff --git a/drivers/media/dvb/frontends/lnbp21.c b/drivers/media/dvb/frontends/lnbp21.c
index 1dcc56f32bf..71f607fe8fc 100644
--- a/drivers/media/dvb/frontends/lnbp21.c
+++ b/drivers/media/dvb/frontends/lnbp21.c
@@ -133,7 +133,7 @@ static struct dvb_frontend *lnbx2x_attach(struct dvb_frontend *fe,
/* override frontend ops */
fe->ops.set_voltage = lnbp21_set_voltage;
fe->ops.enable_high_lnb_voltage = lnbp21_enable_high_lnb_voltage;
- printk(KERN_INFO "LNBx2x attached on addr=%x", lnbp21->i2c_addr);
+ printk(KERN_INFO "LNBx2x attached on addr=%x\n", lnbp21->i2c_addr);
return fe;
}
diff --git a/drivers/media/dvb/frontends/mt312.c b/drivers/media/dvb/frontends/mt312.c
index 5ac9b15920f..a621f727935 100644
--- a/drivers/media/dvb/frontends/mt312.c
+++ b/drivers/media/dvb/frontends/mt312.c
@@ -77,7 +77,7 @@ static int mt312_read(struct mt312_state *state, const enum mt312_reg_addr reg,
ret = i2c_transfer(state->i2c, msg, 2);
if (ret != 2) {
- printk(KERN_ERR "%s: ret == %d\n", __func__, ret);
+ printk(KERN_DEBUG "%s: ret == %d\n", __func__, ret);
return -EREMOTEIO;
}
diff --git a/drivers/media/dvb/frontends/nxt200x.c b/drivers/media/dvb/frontends/nxt200x.c
index a8429ebfa8a..eac20650499 100644
--- a/drivers/media/dvb/frontends/nxt200x.c
+++ b/drivers/media/dvb/frontends/nxt200x.c
@@ -879,7 +879,8 @@ static int nxt2002_init(struct dvb_frontend* fe)
/* request the firmware, this will block until someone uploads it */
printk("nxt2002: Waiting for firmware upload (%s)...\n", NXT2002_DEFAULT_FIRMWARE);
- ret = request_firmware(&fw, NXT2002_DEFAULT_FIRMWARE, &state->i2c->dev);
+ ret = request_firmware(&fw, NXT2002_DEFAULT_FIRMWARE,
+ state->i2c->dev.parent);
printk("nxt2002: Waiting for firmware upload(2)...\n");
if (ret) {
printk("nxt2002: No firmware uploaded (timeout or file not found?)\n");
@@ -943,7 +944,8 @@ static int nxt2004_init(struct dvb_frontend* fe)
/* request the firmware, this will block until someone uploads it */
printk("nxt2004: Waiting for firmware upload (%s)...\n", NXT2004_DEFAULT_FIRMWARE);
- ret = request_firmware(&fw, NXT2004_DEFAULT_FIRMWARE, &state->i2c->dev);
+ ret = request_firmware(&fw, NXT2004_DEFAULT_FIRMWARE,
+ state->i2c->dev.parent);
printk("nxt2004: Waiting for firmware upload(2)...\n");
if (ret) {
printk("nxt2004: No firmware uploaded (timeout or file not found?)\n");
diff --git a/drivers/media/dvb/frontends/or51132.c b/drivers/media/dvb/frontends/or51132.c
index 5ed32544de3..8133ea3cddd 100644
--- a/drivers/media/dvb/frontends/or51132.c
+++ b/drivers/media/dvb/frontends/or51132.c
@@ -340,7 +340,7 @@ static int or51132_set_parameters(struct dvb_frontend* fe,
}
printk("or51132: Waiting for firmware upload(%s)...\n",
fwname);
- ret = request_firmware(&fw, fwname, &state->i2c->dev);
+ ret = request_firmware(&fw, fwname, state->i2c->dev.parent);
if (ret) {
printk(KERN_WARNING "or51132: No firmware up"
"loaded(timeout or file not found?)\n");
diff --git a/drivers/media/dvb/frontends/stv0900_priv.h b/drivers/media/dvb/frontends/stv0900_priv.h
index 762d5af62d7..67dc8ec634e 100644
--- a/drivers/media/dvb/frontends/stv0900_priv.h
+++ b/drivers/media/dvb/frontends/stv0900_priv.h
@@ -60,8 +60,6 @@
} \
} while (0)
-#define dmd_choose(a, b) (demod = STV0900_DEMOD_2 ? b : a))
-
static int stvdebug;
#define dprintk(args...) \
diff --git a/drivers/media/dvb/frontends/stv090x.c b/drivers/media/dvb/frontends/stv090x.c
new file mode 100644
index 00000000000..96ef745a2e4
--- /dev/null
+++ b/drivers/media/dvb/frontends/stv090x.c
@@ -0,0 +1,4299 @@
+/*
+ STV0900/0903 Multistandard Broadcast Frontend driver
+ Copyright (C) Manu Abraham <abraham.manu@gmail.com>
+
+ Copyright (C) ST Microelectronics
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/mutex.h>
+
+#include <linux/dvb/frontend.h>
+#include "dvb_frontend.h"
+
+#include "stv6110x.h" /* for demodulator internal modes */
+
+#include "stv090x_reg.h"
+#include "stv090x.h"
+#include "stv090x_priv.h"
+
+static unsigned int verbose;
+module_param(verbose, int, 0644);
+
+struct mutex demod_lock;
+
+/* DVBS1 and DSS C/N Lookup table */
+static const struct stv090x_tab stv090x_s1cn_tab[] = {
+ { 0, 8917 }, /* 0.0dB */
+ { 5, 8801 }, /* 0.5dB */
+ { 10, 8667 }, /* 1.0dB */
+ { 15, 8522 }, /* 1.5dB */
+ { 20, 8355 }, /* 2.0dB */
+ { 25, 8175 }, /* 2.5dB */
+ { 30, 7979 }, /* 3.0dB */
+ { 35, 7763 }, /* 3.5dB */
+ { 40, 7530 }, /* 4.0dB */
+ { 45, 7282 }, /* 4.5dB */
+ { 50, 7026 }, /* 5.0dB */
+ { 55, 6781 }, /* 5.5dB */
+ { 60, 6514 }, /* 6.0dB */
+ { 65, 6241 }, /* 6.5dB */
+ { 70, 5965 }, /* 7.0dB */
+ { 75, 5690 }, /* 7.5dB */
+ { 80, 5424 }, /* 8.0dB */
+ { 85, 5161 }, /* 8.5dB */
+ { 90, 4902 }, /* 9.0dB */
+ { 95, 4654 }, /* 9.5dB */
+ { 100, 4417 }, /* 10.0dB */
+ { 105, 4186 }, /* 10.5dB */
+ { 110, 3968 }, /* 11.0dB */
+ { 115, 3757 }, /* 11.5dB */
+ { 120, 3558 }, /* 12.0dB */
+ { 125, 3366 }, /* 12.5dB */
+ { 130, 3185 }, /* 13.0dB */
+ { 135, 3012 }, /* 13.5dB */
+ { 140, 2850 }, /* 14.0dB */
+ { 145, 2698 }, /* 14.5dB */
+ { 150, 2550 }, /* 15.0dB */
+ { 160, 2283 }, /* 16.0dB */
+ { 170, 2042 }, /* 17.0dB */
+ { 180, 1827 }, /* 18.0dB */
+ { 190, 1636 }, /* 19.0dB */
+ { 200, 1466 }, /* 20.0dB */
+ { 210, 1315 }, /* 21.0dB */
+ { 220, 1181 }, /* 22.0dB */
+ { 230, 1064 }, /* 23.0dB */
+ { 240, 960 }, /* 24.0dB */
+ { 250, 869 }, /* 25.0dB */
+ { 260, 792 }, /* 26.0dB */
+ { 270, 724 }, /* 27.0dB */
+ { 280, 665 }, /* 28.0dB */
+ { 290, 616 }, /* 29.0dB */
+ { 300, 573 }, /* 30.0dB */
+ { 310, 537 }, /* 31.0dB */
+ { 320, 507 }, /* 32.0dB */
+ { 330, 483 }, /* 33.0dB */
+ { 400, 398 }, /* 40.0dB */
+ { 450, 381 }, /* 45.0dB */
+ { 500, 377 } /* 50.0dB */
+};
+
+/* DVBS2 C/N Lookup table */
+static const struct stv090x_tab stv090x_s2cn_tab[] = {
+ { -30, 13348 }, /* -3.0dB */
+ { -20, 12640 }, /* -2d.0B */
+ { -10, 11883 }, /* -1.0dB */
+ { 0, 11101 }, /* -0.0dB */
+ { 5, 10718 }, /* 0.5dB */
+ { 10, 10339 }, /* 1.0dB */
+ { 15, 9947 }, /* 1.5dB */
+ { 20, 9552 }, /* 2.0dB */
+ { 25, 9183 }, /* 2.5dB */
+ { 30, 8799 }, /* 3.0dB */
+ { 35, 8422 }, /* 3.5dB */
+ { 40, 8062 }, /* 4.0dB */
+ { 45, 7707 }, /* 4.5dB */
+ { 50, 7353 }, /* 5.0dB */
+ { 55, 7025 }, /* 5.5dB */
+ { 60, 6684 }, /* 6.0dB */
+ { 65, 6331 }, /* 6.5dB */
+ { 70, 6036 }, /* 7.0dB */
+ { 75, 5727 }, /* 7.5dB */
+ { 80, 5437 }, /* 8.0dB */
+ { 85, 5164 }, /* 8.5dB */
+ { 90, 4902 }, /* 9.0dB */
+ { 95, 4653 }, /* 9.5dB */
+ { 100, 4408 }, /* 10.0dB */
+ { 105, 4187 }, /* 10.5dB */
+ { 110, 3961 }, /* 11.0dB */
+ { 115, 3751 }, /* 11.5dB */
+ { 120, 3558 }, /* 12.0dB */
+ { 125, 3368 }, /* 12.5dB */
+ { 130, 3191 }, /* 13.0dB */
+ { 135, 3017 }, /* 13.5dB */
+ { 140, 2862 }, /* 14.0dB */
+ { 145, 2710 }, /* 14.5dB */
+ { 150, 2565 }, /* 15.0dB */
+ { 160, 2300 }, /* 16.0dB */
+ { 170, 2058 }, /* 17.0dB */
+ { 180, 1849 }, /* 18.0dB */
+ { 190, 1663 }, /* 19.0dB */
+ { 200, 1495 }, /* 20.0dB */
+ { 210, 1349 }, /* 21.0dB */
+ { 220, 1222 }, /* 22.0dB */
+ { 230, 1110 }, /* 23.0dB */
+ { 240, 1011 }, /* 24.0dB */
+ { 250, 925 }, /* 25.0dB */
+ { 260, 853 }, /* 26.0dB */
+ { 270, 789 }, /* 27.0dB */
+ { 280, 734 }, /* 28.0dB */
+ { 290, 690 }, /* 29.0dB */
+ { 300, 650 }, /* 30.0dB */
+ { 310, 619 }, /* 31.0dB */
+ { 320, 593 }, /* 32.0dB */
+ { 330, 571 }, /* 33.0dB */
+ { 400, 498 }, /* 40.0dB */
+ { 450, 484 }, /* 45.0dB */
+ { 500, 481 } /* 50.0dB */
+};
+
+/* RF level C/N lookup table */
+static const struct stv090x_tab stv090x_rf_tab[] = {
+ { -5, 0xcaa1 }, /* -5dBm */
+ { -10, 0xc229 }, /* -10dBm */
+ { -15, 0xbb08 }, /* -15dBm */
+ { -20, 0xb4bc }, /* -20dBm */
+ { -25, 0xad5a }, /* -25dBm */
+ { -30, 0xa298 }, /* -30dBm */
+ { -35, 0x98a8 }, /* -35dBm */
+ { -40, 0x8389 }, /* -40dBm */
+ { -45, 0x59be }, /* -45dBm */
+ { -50, 0x3a14 }, /* -50dBm */
+ { -55, 0x2d11 }, /* -55dBm */
+ { -60, 0x210d }, /* -60dBm */
+ { -65, 0xa14f }, /* -65dBm */
+ { -70, 0x07aa } /* -70dBm */
+};
+
+
+static struct stv090x_reg stv0900_initval[] = {
+
+ { STV090x_OUTCFG, 0x00 },
+ { STV090x_MODECFG, 0xff },
+ { STV090x_AGCRF1CFG, 0x11 },
+ { STV090x_AGCRF2CFG, 0x13 },
+ { STV090x_TSGENERAL1X, 0x14 },
+ { STV090x_TSTTNR2, 0x21 },
+ { STV090x_TSTTNR4, 0x21 },
+ { STV090x_P2_DISTXCTL, 0x22 },
+ { STV090x_P2_F22TX, 0xc0 },
+ { STV090x_P2_F22RX, 0xc0 },
+ { STV090x_P2_DISRXCTL, 0x00 },
+ { STV090x_P2_DMDCFGMD, 0xF9 },
+ { STV090x_P2_DEMOD, 0x08 },
+ { STV090x_P2_DMDCFG3, 0xc4 },
+ { STV090x_P2_CARFREQ, 0xed },
+ { STV090x_P2_LDT, 0xd0 },
+ { STV090x_P2_LDT2, 0xb8 },
+ { STV090x_P2_TMGCFG, 0xd2 },
+ { STV090x_P2_TMGTHRISE, 0x20 },
+ { STV090x_P1_TMGCFG, 0xd2 },
+
+ { STV090x_P2_TMGTHFALL, 0x00 },
+ { STV090x_P2_FECSPY, 0x88 },
+ { STV090x_P2_FSPYDATA, 0x3a },
+ { STV090x_P2_FBERCPT4, 0x00 },
+ { STV090x_P2_FSPYBER, 0x10 },
+ { STV090x_P2_ERRCTRL1, 0x35 },
+ { STV090x_P2_ERRCTRL2, 0xc1 },
+ { STV090x_P2_CFRICFG, 0xf8 },
+ { STV090x_P2_NOSCFG, 0x1c },
+ { STV090x_P2_DMDTOM, 0x20 },
+ { STV090x_P2_CORRELMANT, 0x70 },
+ { STV090x_P2_CORRELABS, 0x88 },
+ { STV090x_P2_AGC2O, 0x5b },
+ { STV090x_P2_AGC2REF, 0x38 },
+ { STV090x_P2_CARCFG, 0xe4 },
+ { STV090x_P2_ACLC, 0x1A },
+ { STV090x_P2_BCLC, 0x09 },
+ { STV090x_P2_CARHDR, 0x08 },
+ { STV090x_P2_KREFTMG, 0xc1 },
+ { STV090x_P2_SFRUPRATIO, 0xf0 },
+ { STV090x_P2_SFRLOWRATIO, 0x70 },
+ { STV090x_P2_SFRSTEP, 0x58 },
+ { STV090x_P2_TMGCFG2, 0x01 },
+ { STV090x_P2_CAR2CFG, 0x26 },
+ { STV090x_P2_BCLC2S2Q, 0x86 },
+ { STV090x_P2_BCLC2S28, 0x86 },
+ { STV090x_P2_SMAPCOEF7, 0x77 },
+ { STV090x_P2_SMAPCOEF6, 0x85 },
+ { STV090x_P2_SMAPCOEF5, 0x77 },
+ { STV090x_P2_TSCFGL, 0x20 },
+ { STV090x_P2_DMDCFG2, 0x3b },
+ { STV090x_P2_MODCODLST0, 0xff },
+ { STV090x_P2_MODCODLST1, 0xff },
+ { STV090x_P2_MODCODLST2, 0xff },
+ { STV090x_P2_MODCODLST3, 0xff },
+ { STV090x_P2_MODCODLST4, 0xff },
+ { STV090x_P2_MODCODLST5, 0xff },
+ { STV090x_P2_MODCODLST6, 0xff },
+ { STV090x_P2_MODCODLST7, 0xcc },
+ { STV090x_P2_MODCODLST8, 0xcc },
+ { STV090x_P2_MODCODLST9, 0xcc },
+ { STV090x_P2_MODCODLSTA, 0xcc },
+ { STV090x_P2_MODCODLSTB, 0xcc },
+ { STV090x_P2_MODCODLSTC, 0xcc },
+ { STV090x_P2_MODCODLSTD, 0xcc },
+ { STV090x_P2_MODCODLSTE, 0xcc },
+ { STV090x_P2_MODCODLSTF, 0xcf },
+ { STV090x_P1_DISTXCTL, 0x22 },
+ { STV090x_P1_F22TX, 0xc0 },
+ { STV090x_P1_F22RX, 0xc0 },
+ { STV090x_P1_DISRXCTL, 0x00 },
+ { STV090x_P1_DMDCFGMD, 0xf9 },
+ { STV090x_P1_DEMOD, 0x08 },
+ { STV090x_P1_DMDCFG3, 0xc4 },
+ { STV090x_P1_DMDTOM, 0x20 },
+ { STV090x_P1_CARFREQ, 0xed },
+ { STV090x_P1_LDT, 0xd0 },
+ { STV090x_P1_LDT2, 0xb8 },
+ { STV090x_P1_TMGCFG, 0xd2 },
+ { STV090x_P1_TMGTHRISE, 0x20 },
+ { STV090x_P1_TMGTHFALL, 0x00 },
+ { STV090x_P1_SFRUPRATIO, 0xf0 },
+ { STV090x_P1_SFRLOWRATIO, 0x70 },
+ { STV090x_P1_TSCFGL, 0x20 },
+ { STV090x_P1_FECSPY, 0x88 },
+ { STV090x_P1_FSPYDATA, 0x3a },
+ { STV090x_P1_FBERCPT4, 0x00 },
+ { STV090x_P1_FSPYBER, 0x10 },
+ { STV090x_P1_ERRCTRL1, 0x35 },
+ { STV090x_P1_ERRCTRL2, 0xc1 },
+ { STV090x_P1_CFRICFG, 0xf8 },
+ { STV090x_P1_NOSCFG, 0x1c },
+ { STV090x_P1_CORRELMANT, 0x70 },
+ { STV090x_P1_CORRELABS, 0x88 },
+ { STV090x_P1_AGC2O, 0x5b },
+ { STV090x_P1_AGC2REF, 0x38 },
+ { STV090x_P1_CARCFG, 0xe4 },
+ { STV090x_P1_ACLC, 0x1A },
+ { STV090x_P1_BCLC, 0x09 },
+ { STV090x_P1_CARHDR, 0x08 },
+ { STV090x_P1_KREFTMG, 0xc1 },
+ { STV090x_P1_SFRSTEP, 0x58 },
+ { STV090x_P1_TMGCFG2, 0x01 },
+ { STV090x_P1_CAR2CFG, 0x26 },
+ { STV090x_P1_BCLC2S2Q, 0x86 },
+ { STV090x_P1_BCLC2S28, 0x86 },
+ { STV090x_P1_SMAPCOEF7, 0x77 },
+ { STV090x_P1_SMAPCOEF6, 0x85 },
+ { STV090x_P1_SMAPCOEF5, 0x77 },
+ { STV090x_P1_DMDCFG2, 0x3b },
+ { STV090x_P1_MODCODLST0, 0xff },
+ { STV090x_P1_MODCODLST1, 0xff },
+ { STV090x_P1_MODCODLST2, 0xff },
+ { STV090x_P1_MODCODLST3, 0xff },
+ { STV090x_P1_MODCODLST4, 0xff },
+ { STV090x_P1_MODCODLST5, 0xff },
+ { STV090x_P1_MODCODLST6, 0xff },
+ { STV090x_P1_MODCODLST7, 0xcc },
+ { STV090x_P1_MODCODLST8, 0xcc },
+ { STV090x_P1_MODCODLST9, 0xcc },
+ { STV090x_P1_MODCODLSTA, 0xcc },
+ { STV090x_P1_MODCODLSTB, 0xcc },
+ { STV090x_P1_MODCODLSTC, 0xcc },
+ { STV090x_P1_MODCODLSTD, 0xcc },
+ { STV090x_P1_MODCODLSTE, 0xcc },
+ { STV090x_P1_MODCODLSTF, 0xcf },
+ { STV090x_GENCFG, 0x1d },
+ { STV090x_NBITER_NF4, 0x37 },
+ { STV090x_NBITER_NF5, 0x29 },
+ { STV090x_NBITER_NF6, 0x37 },
+ { STV090x_NBITER_NF7, 0x33 },
+ { STV090x_NBITER_NF8, 0x31 },
+ { STV090x_NBITER_NF9, 0x2f },
+ { STV090x_NBITER_NF10, 0x39 },
+ { STV090x_NBITER_NF11, 0x3a },
+ { STV090x_NBITER_NF12, 0x29 },
+ { STV090x_NBITER_NF13, 0x37 },
+ { STV090x_NBITER_NF14, 0x33 },
+ { STV090x_NBITER_NF15, 0x2f },
+ { STV090x_NBITER_NF16, 0x39 },
+ { STV090x_NBITER_NF17, 0x3a },
+ { STV090x_NBITERNOERR, 0x04 },
+ { STV090x_GAINLLR_NF4, 0x0C },
+ { STV090x_GAINLLR_NF5, 0x0F },
+ { STV090x_GAINLLR_NF6, 0x11 },
+ { STV090x_GAINLLR_NF7, 0x14 },
+ { STV090x_GAINLLR_NF8, 0x17 },
+ { STV090x_GAINLLR_NF9, 0x19 },
+ { STV090x_GAINLLR_NF10, 0x20 },
+ { STV090x_GAINLLR_NF11, 0x21 },
+ { STV090x_GAINLLR_NF12, 0x0D },
+ { STV090x_GAINLLR_NF13, 0x0F },
+ { STV090x_GAINLLR_NF14, 0x13 },
+ { STV090x_GAINLLR_NF15, 0x1A },
+ { STV090x_GAINLLR_NF16, 0x1F },
+ { STV090x_GAINLLR_NF17, 0x21 },
+ { STV090x_RCCFGH, 0x20 },
+ { STV090x_P1_FECM, 0x01 }, /* disable DSS modes */
+ { STV090x_P2_FECM, 0x01 }, /* disable DSS modes */
+ { STV090x_P1_PRVIT, 0x2F }, /* disable PR 6/7 */
+ { STV090x_P2_PRVIT, 0x2F }, /* disable PR 6/7 */
+};
+
+static struct stv090x_reg stv0903_initval[] = {
+ { STV090x_OUTCFG, 0x00 },
+ { STV090x_AGCRF1CFG, 0x11 },
+ { STV090x_STOPCLK1, 0x48 },
+ { STV090x_STOPCLK2, 0x14 },
+ { STV090x_TSTTNR1, 0x27 },
+ { STV090x_TSTTNR2, 0x21 },
+ { STV090x_P1_DISTXCTL, 0x22 },
+ { STV090x_P1_F22TX, 0xc0 },
+ { STV090x_P1_F22RX, 0xc0 },
+ { STV090x_P1_DISRXCTL, 0x00 },
+ { STV090x_P1_DMDCFGMD, 0xF9 },
+ { STV090x_P1_DEMOD, 0x08 },
+ { STV090x_P1_DMDCFG3, 0xc4 },
+ { STV090x_P1_CARFREQ, 0xed },
+ { STV090x_P1_TNRCFG2, 0x82 },
+ { STV090x_P1_LDT, 0xd0 },
+ { STV090x_P1_LDT2, 0xb8 },
+ { STV090x_P1_TMGCFG, 0xd2 },
+ { STV090x_P1_TMGTHRISE, 0x20 },
+ { STV090x_P1_TMGTHFALL, 0x00 },
+ { STV090x_P1_SFRUPRATIO, 0xf0 },
+ { STV090x_P1_SFRLOWRATIO, 0x70 },
+ { STV090x_P1_TSCFGL, 0x20 },
+ { STV090x_P1_FECSPY, 0x88 },
+ { STV090x_P1_FSPYDATA, 0x3a },
+ { STV090x_P1_FBERCPT4, 0x00 },
+ { STV090x_P1_FSPYBER, 0x10 },
+ { STV090x_P1_ERRCTRL1, 0x35 },
+ { STV090x_P1_ERRCTRL2, 0xc1 },
+ { STV090x_P1_CFRICFG, 0xf8 },
+ { STV090x_P1_NOSCFG, 0x1c },
+ { STV090x_P1_DMDTOM, 0x20 },
+ { STV090x_P1_CORRELMANT, 0x70 },
+ { STV090x_P1_CORRELABS, 0x88 },
+ { STV090x_P1_AGC2O, 0x5b },
+ { STV090x_P1_AGC2REF, 0x38 },
+ { STV090x_P1_CARCFG, 0xe4 },
+ { STV090x_P1_ACLC, 0x1A },
+ { STV090x_P1_BCLC, 0x09 },
+ { STV090x_P1_CARHDR, 0x08 },
+ { STV090x_P1_KREFTMG, 0xc1 },
+ { STV090x_P1_SFRSTEP, 0x58 },
+ { STV090x_P1_TMGCFG2, 0x01 },
+ { STV090x_P1_CAR2CFG, 0x26 },
+ { STV090x_P1_BCLC2S2Q, 0x86 },
+ { STV090x_P1_BCLC2S28, 0x86 },
+ { STV090x_P1_SMAPCOEF7, 0x77 },
+ { STV090x_P1_SMAPCOEF6, 0x85 },
+ { STV090x_P1_SMAPCOEF5, 0x77 },
+ { STV090x_P1_DMDCFG2, 0x3b },
+ { STV090x_P1_MODCODLST0, 0xff },
+ { STV090x_P1_MODCODLST1, 0xff },
+ { STV090x_P1_MODCODLST2, 0xff },
+ { STV090x_P1_MODCODLST3, 0xff },
+ { STV090x_P1_MODCODLST4, 0xff },
+ { STV090x_P1_MODCODLST5, 0xff },
+ { STV090x_P1_MODCODLST6, 0xff },
+ { STV090x_P1_MODCODLST7, 0xcc },
+ { STV090x_P1_MODCODLST8, 0xcc },
+ { STV090x_P1_MODCODLST9, 0xcc },
+ { STV090x_P1_MODCODLSTA, 0xcc },
+ { STV090x_P1_MODCODLSTB, 0xcc },
+ { STV090x_P1_MODCODLSTC, 0xcc },
+ { STV090x_P1_MODCODLSTD, 0xcc },
+ { STV090x_P1_MODCODLSTE, 0xcc },
+ { STV090x_P1_MODCODLSTF, 0xcf },
+ { STV090x_GENCFG, 0x1c },
+ { STV090x_NBITER_NF4, 0x37 },
+ { STV090x_NBITER_NF5, 0x29 },
+ { STV090x_NBITER_NF6, 0x37 },
+ { STV090x_NBITER_NF7, 0x33 },
+ { STV090x_NBITER_NF8, 0x31 },
+ { STV090x_NBITER_NF9, 0x2f },
+ { STV090x_NBITER_NF10, 0x39 },
+ { STV090x_NBITER_NF11, 0x3a },
+ { STV090x_NBITER_NF12, 0x29 },
+ { STV090x_NBITER_NF13, 0x37 },
+ { STV090x_NBITER_NF14, 0x33 },
+ { STV090x_NBITER_NF15, 0x2f },
+ { STV090x_NBITER_NF16, 0x39 },
+ { STV090x_NBITER_NF17, 0x3a },
+ { STV090x_NBITERNOERR, 0x04 },
+ { STV090x_GAINLLR_NF4, 0x0C },
+ { STV090x_GAINLLR_NF5, 0x0F },
+ { STV090x_GAINLLR_NF6, 0x11 },
+ { STV090x_GAINLLR_NF7, 0x14 },
+ { STV090x_GAINLLR_NF8, 0x17 },
+ { STV090x_GAINLLR_NF9, 0x19 },
+ { STV090x_GAINLLR_NF10, 0x20 },
+ { STV090x_GAINLLR_NF11, 0x21 },
+ { STV090x_GAINLLR_NF12, 0x0D },
+ { STV090x_GAINLLR_NF13, 0x0F },
+ { STV090x_GAINLLR_NF14, 0x13 },
+ { STV090x_GAINLLR_NF15, 0x1A },
+ { STV090x_GAINLLR_NF16, 0x1F },
+ { STV090x_GAINLLR_NF17, 0x21 },
+ { STV090x_RCCFGH, 0x20 },
+ { STV090x_P1_FECM, 0x01 }, /*disable the DSS mode */
+ { STV090x_P1_PRVIT, 0x2f } /*disable puncture rate 6/7*/
+};
+
+static struct stv090x_reg stv0900_cut20_val[] = {
+
+ { STV090x_P2_DMDCFG3, 0xe8 },
+ { STV090x_P2_DMDCFG4, 0x10 },
+ { STV090x_P2_CARFREQ, 0x38 },
+ { STV090x_P2_CARHDR, 0x20 },
+ { STV090x_P2_KREFTMG, 0x5a },
+ { STV090x_P2_SMAPCOEF7, 0x06 },
+ { STV090x_P2_SMAPCOEF6, 0x00 },
+ { STV090x_P2_SMAPCOEF5, 0x04 },
+ { STV090x_P2_NOSCFG, 0x0c },
+ { STV090x_P1_DMDCFG3, 0xe8 },
+ { STV090x_P1_DMDCFG4, 0x10 },
+ { STV090x_P1_CARFREQ, 0x38 },
+ { STV090x_P1_CARHDR, 0x20 },
+ { STV090x_P1_KREFTMG, 0x5a },
+ { STV090x_P1_SMAPCOEF7, 0x06 },
+ { STV090x_P1_SMAPCOEF6, 0x00 },
+ { STV090x_P1_SMAPCOEF5, 0x04 },
+ { STV090x_P1_NOSCFG, 0x0c },
+ { STV090x_GAINLLR_NF4, 0x21 },
+ { STV090x_GAINLLR_NF5, 0x21 },
+ { STV090x_GAINLLR_NF6, 0x20 },
+ { STV090x_GAINLLR_NF7, 0x1F },
+ { STV090x_GAINLLR_NF8, 0x1E },
+ { STV090x_GAINLLR_NF9, 0x1E },
+ { STV090x_GAINLLR_NF10, 0x1D },
+ { STV090x_GAINLLR_NF11, 0x1B },
+ { STV090x_GAINLLR_NF12, 0x20 },
+ { STV090x_GAINLLR_NF13, 0x20 },
+ { STV090x_GAINLLR_NF14, 0x20 },
+ { STV090x_GAINLLR_NF15, 0x20 },
+ { STV090x_GAINLLR_NF16, 0x20 },
+ { STV090x_GAINLLR_NF17, 0x21 },
+};
+
+static struct stv090x_reg stv0903_cut20_val[] = {
+ { STV090x_P1_DMDCFG3, 0xe8 },
+ { STV090x_P1_DMDCFG4, 0x10 },
+ { STV090x_P1_CARFREQ, 0x38 },
+ { STV090x_P1_CARHDR, 0x20 },
+ { STV090x_P1_KREFTMG, 0x5a },
+ { STV090x_P1_SMAPCOEF7, 0x06 },
+ { STV090x_P1_SMAPCOEF6, 0x00 },
+ { STV090x_P1_SMAPCOEF5, 0x04 },
+ { STV090x_P1_NOSCFG, 0x0c },
+ { STV090x_GAINLLR_NF4, 0x21 },
+ { STV090x_GAINLLR_NF5, 0x21 },
+ { STV090x_GAINLLR_NF6, 0x20 },
+ { STV090x_GAINLLR_NF7, 0x1F },
+ { STV090x_GAINLLR_NF8, 0x1E },
+ { STV090x_GAINLLR_NF9, 0x1E },
+ { STV090x_GAINLLR_NF10, 0x1D },
+ { STV090x_GAINLLR_NF11, 0x1B },
+ { STV090x_GAINLLR_NF12, 0x20 },
+ { STV090x_GAINLLR_NF13, 0x20 },
+ { STV090x_GAINLLR_NF14, 0x20 },
+ { STV090x_GAINLLR_NF15, 0x20 },
+ { STV090x_GAINLLR_NF16, 0x20 },
+ { STV090x_GAINLLR_NF17, 0x21 }
+};
+
+/* Cut 2.0 Long Frame Tracking CR loop */
+static struct stv090x_long_frame_crloop stv090x_s2_crl_cut20[] = {
+ /* MODCOD 2MPon 2MPoff 5MPon 5MPoff 10MPon 10MPoff 20MPon 20MPoff 30MPon 30MPoff */
+ { STV090x_QPSK_12, 0x1f, 0x3f, 0x1e, 0x3f, 0x3d, 0x1f, 0x3d, 0x3e, 0x3d, 0x1e },
+ { STV090x_QPSK_35, 0x2f, 0x3f, 0x2e, 0x2f, 0x3d, 0x0f, 0x0e, 0x2e, 0x3d, 0x0e },
+ { STV090x_QPSK_23, 0x2f, 0x3f, 0x2e, 0x2f, 0x0e, 0x0f, 0x0e, 0x1e, 0x3d, 0x3d },
+ { STV090x_QPSK_34, 0x3f, 0x3f, 0x3e, 0x1f, 0x0e, 0x3e, 0x0e, 0x1e, 0x3d, 0x3d },
+ { STV090x_QPSK_45, 0x3f, 0x3f, 0x3e, 0x1f, 0x0e, 0x3e, 0x0e, 0x1e, 0x3d, 0x3d },
+ { STV090x_QPSK_56, 0x3f, 0x3f, 0x3e, 0x1f, 0x0e, 0x3e, 0x0e, 0x1e, 0x3d, 0x3d },
+ { STV090x_QPSK_89, 0x3f, 0x3f, 0x3e, 0x1f, 0x1e, 0x3e, 0x0e, 0x1e, 0x3d, 0x3d },
+ { STV090x_QPSK_910, 0x3f, 0x3f, 0x3e, 0x1f, 0x1e, 0x3e, 0x0e, 0x1e, 0x3d, 0x3d },
+ { STV090x_8PSK_35, 0x3c, 0x3e, 0x1c, 0x2e, 0x0c, 0x1e, 0x2b, 0x2d, 0x1b, 0x1d },
+ { STV090x_8PSK_23, 0x1d, 0x3e, 0x3c, 0x2e, 0x2c, 0x1e, 0x0c, 0x2d, 0x2b, 0x1d },
+ { STV090x_8PSK_34, 0x0e, 0x3e, 0x3d, 0x2e, 0x0d, 0x1e, 0x2c, 0x2d, 0x0c, 0x1d },
+ { STV090x_8PSK_56, 0x2e, 0x3e, 0x1e, 0x2e, 0x2d, 0x1e, 0x3c, 0x2d, 0x2c, 0x1d },
+ { STV090x_8PSK_89, 0x3e, 0x3e, 0x1e, 0x2e, 0x3d, 0x1e, 0x0d, 0x2d, 0x3c, 0x1d },
+ { STV090x_8PSK_910, 0x3e, 0x3e, 0x1e, 0x2e, 0x3d, 0x1e, 0x1d, 0x2d, 0x0d, 0x1d }
+};
+
+/* Cut 3.0 Long Frame Tracking CR loop */
+static struct stv090x_long_frame_crloop stv090x_s2_crl_cut30[] = {
+ /* MODCOD 2MPon 2MPoff 5MPon 5MPoff 10MPon 10MPoff 20MPon 20MPoff 30MPon 30MPoff */
+ { STV090x_QPSK_12, 0x3c, 0x2c, 0x0c, 0x2c, 0x1b, 0x2c, 0x1b, 0x1c, 0x0b, 0x3b },
+ { STV090x_QPSK_35, 0x0d, 0x0d, 0x0c, 0x0d, 0x1b, 0x3c, 0x1b, 0x1c, 0x0b, 0x3b },
+ { STV090x_QPSK_23, 0x1d, 0x0d, 0x0c, 0x1d, 0x2b, 0x3c, 0x1b, 0x1c, 0x0b, 0x3b },
+ { STV090x_QPSK_34, 0x1d, 0x1d, 0x0c, 0x1d, 0x2b, 0x3c, 0x1b, 0x1c, 0x0b, 0x3b },
+ { STV090x_QPSK_45, 0x2d, 0x1d, 0x1c, 0x1d, 0x2b, 0x3c, 0x2b, 0x0c, 0x1b, 0x3b },
+ { STV090x_QPSK_56, 0x2d, 0x1d, 0x1c, 0x1d, 0x2b, 0x3c, 0x2b, 0x0c, 0x1b, 0x3b },
+ { STV090x_QPSK_89, 0x3d, 0x2d, 0x1c, 0x1d, 0x3b, 0x3c, 0x2b, 0x0c, 0x1b, 0x3b },
+ { STV090x_QPSK_910, 0x3d, 0x2d, 0x1c, 0x1d, 0x3b, 0x3c, 0x2b, 0x0c, 0x1b, 0x3b },
+ { STV090x_8PSK_35, 0x39, 0x29, 0x39, 0x19, 0x19, 0x19, 0x19, 0x19, 0x09, 0x19 },
+ { STV090x_8PSK_23, 0x2a, 0x39, 0x1a, 0x0a, 0x39, 0x0a, 0x29, 0x39, 0x29, 0x0a },
+ { STV090x_8PSK_34, 0x2b, 0x3a, 0x1b, 0x1b, 0x3a, 0x1b, 0x1a, 0x0b, 0x1a, 0x3a },
+ { STV090x_8PSK_56, 0x0c, 0x1b, 0x3b, 0x3b, 0x1b, 0x3b, 0x3a, 0x3b, 0x3a, 0x1b },
+ { STV090x_8PSK_89, 0x0d, 0x3c, 0x2c, 0x2c, 0x2b, 0x0c, 0x0b, 0x3b, 0x0b, 0x1b },
+ { STV090x_8PSK_910, 0x0d, 0x0d, 0x2c, 0x3c, 0x3b, 0x1c, 0x0b, 0x3b, 0x0b, 0x1b }
+};
+
+/* Cut 2.0 Long Frame Tracking CR Loop */
+static struct stv090x_long_frame_crloop stv090x_s2_apsk_crl_cut20[] = {
+ /* MODCOD 2MPon 2MPoff 5MPon 5MPoff 10MPon 10MPoff 20MPon 20MPoff 30MPon 30MPoff */
+ { STV090x_16APSK_23, 0x0c, 0x0c, 0x0c, 0x0c, 0x1d, 0x0c, 0x3c, 0x0c, 0x2c, 0x0c },
+ { STV090x_16APSK_34, 0x0c, 0x0c, 0x0c, 0x0c, 0x0e, 0x0c, 0x2d, 0x0c, 0x1d, 0x0c },
+ { STV090x_16APSK_45, 0x0c, 0x0c, 0x0c, 0x0c, 0x1e, 0x0c, 0x3d, 0x0c, 0x2d, 0x0c },
+ { STV090x_16APSK_56, 0x0c, 0x0c, 0x0c, 0x0c, 0x1e, 0x0c, 0x3d, 0x0c, 0x2d, 0x0c },
+ { STV090x_16APSK_89, 0x0c, 0x0c, 0x0c, 0x0c, 0x2e, 0x0c, 0x0e, 0x0c, 0x3d, 0x0c },
+ { STV090x_16APSK_910, 0x0c, 0x0c, 0x0c, 0x0c, 0x2e, 0x0c, 0x0e, 0x0c, 0x3d, 0x0c },
+ { STV090x_32APSK_34, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c },
+ { STV090x_32APSK_45, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c },
+ { STV090x_32APSK_56, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c },
+ { STV090x_32APSK_89, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c },
+ { STV090x_32APSK_910, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c }
+};
+
+/* Cut 3.0 Long Frame Tracking CR Loop */
+static struct stv090x_long_frame_crloop stv090x_s2_apsk_crl_cut30[] = {
+ /* MODCOD 2MPon 2MPoff 5MPon 5MPoff 10MPon 10MPoff 20MPon 20MPoff 30MPon 30MPoff */
+ { STV090x_16APSK_23, 0x0a, 0x0a, 0x0a, 0x0a, 0x1a, 0x0a, 0x3a, 0x0a, 0x2a, 0x0a },
+ { STV090x_16APSK_34, 0x0a, 0x0a, 0x0a, 0x0a, 0x0b, 0x0a, 0x3b, 0x0a, 0x1b, 0x0a },
+ { STV090x_16APSK_45, 0x0a, 0x0a, 0x0a, 0x0a, 0x1b, 0x0a, 0x3b, 0x0a, 0x2b, 0x0a },
+ { STV090x_16APSK_56, 0x0a, 0x0a, 0x0a, 0x0a, 0x1b, 0x0a, 0x3b, 0x0a, 0x2b, 0x0a },
+ { STV090x_16APSK_89, 0x0a, 0x0a, 0x0a, 0x0a, 0x2b, 0x0a, 0x0c, 0x0a, 0x3b, 0x0a },
+ { STV090x_16APSK_910, 0x0a, 0x0a, 0x0a, 0x0a, 0x2b, 0x0a, 0x0c, 0x0a, 0x3b, 0x0a },
+ { STV090x_32APSK_34, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a },
+ { STV090x_32APSK_45, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a },
+ { STV090x_32APSK_56, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a },
+ { STV090x_32APSK_89, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a },
+ { STV090x_32APSK_910, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a }
+};
+
+static struct stv090x_long_frame_crloop stv090x_s2_lowqpsk_crl_cut20[] = {
+ /* MODCOD 2MPon 2MPoff 5MPon 5MPoff 10MPon 10MPoff 20MPon 20MPoff 30MPon 30MPoff */
+ { STV090x_QPSK_14, 0x0f, 0x3f, 0x0e, 0x3f, 0x2d, 0x2f, 0x2d, 0x1f, 0x3d, 0x3e },
+ { STV090x_QPSK_13, 0x0f, 0x3f, 0x0e, 0x3f, 0x2d, 0x2f, 0x3d, 0x0f, 0x3d, 0x2e },
+ { STV090x_QPSK_25, 0x1f, 0x3f, 0x1e, 0x3f, 0x3d, 0x1f, 0x3d, 0x3e, 0x3d, 0x2e }
+};
+
+static struct stv090x_long_frame_crloop stv090x_s2_lowqpsk_crl_cut30[] = {
+ /* MODCOD 2MPon 2MPoff 5MPon 5MPoff 10MPon 10MPoff 20MPon 20MPoff 30MPon 30MPoff */
+ { STV090x_QPSK_14, 0x0c, 0x3c, 0x0b, 0x3c, 0x2a, 0x2c, 0x2a, 0x1c, 0x3a, 0x3b },
+ { STV090x_QPSK_13, 0x0c, 0x3c, 0x0b, 0x3c, 0x2a, 0x2c, 0x3a, 0x0c, 0x3a, 0x2b },
+ { STV090x_QPSK_25, 0x1c, 0x3c, 0x1b, 0x3c, 0x3a, 0x1c, 0x3a, 0x3b, 0x3a, 0x2b }
+};
+
+/* Cut 2.0 Short Frame Tracking CR Loop */
+static struct stv090x_short_frame_crloop stv090x_s2_short_crl_cut20[] = {
+ /* MODCOD 2M 5M 10M 20M 30M */
+ { STV090x_QPSK, 0x2f, 0x2e, 0x0e, 0x0e, 0x3d },
+ { STV090x_8PSK, 0x3e, 0x0e, 0x2d, 0x0d, 0x3c },
+ { STV090x_16APSK, 0x1e, 0x1e, 0x1e, 0x3d, 0x2d },
+ { STV090x_32APSK, 0x1e, 0x1e, 0x1e, 0x3d, 0x2d }
+};
+
+/* Cut 3.0 Short Frame Tracking CR Loop */
+static struct stv090x_short_frame_crloop stv090x_s2_short_crl_cut30[] = {
+ /* MODCOD 2M 5M 10M 20M 30M */
+ { STV090x_QPSK, 0x2C, 0x2B, 0x0B, 0x0B, 0x3A },
+ { STV090x_8PSK, 0x3B, 0x0B, 0x2A, 0x0A, 0x39 },
+ { STV090x_16APSK, 0x1B, 0x1B, 0x1B, 0x3A, 0x2A },
+ { STV090x_32APSK, 0x1B, 0x1B, 0x1B, 0x3A, 0x2A }
+};
+
+static inline s32 comp2(s32 __x, s32 __width)
+{
+ if (__width == 32)
+ return __x;
+ else
+ return (__x >= (1 << (__width - 1))) ? (__x - (1 << __width)) : __x;
+}
+
+static int stv090x_read_reg(struct stv090x_state *state, unsigned int reg)
+{
+ const struct stv090x_config *config = state->config;
+ int ret;
+
+ u8 b0[] = { reg >> 8, reg & 0xff };
+ u8 buf;
+
+ struct i2c_msg msg[] = {
+ { .addr = config->address, .flags = 0, .buf = b0, .len = 2 },
+ { .addr = config->address, .flags = I2C_M_RD, .buf = &buf, .len = 1 }
+ };
+
+ ret = i2c_transfer(state->i2c, msg, 2);
+ if (ret != 2) {
+ if (ret != -ERESTARTSYS)
+ dprintk(FE_ERROR, 1,
+ "Read error, Reg=[0x%02x], Status=%d",
+ reg, ret);
+
+ return ret < 0 ? ret : -EREMOTEIO;
+ }
+ if (unlikely(*state->verbose >= FE_DEBUGREG))
+ dprintk(FE_ERROR, 1, "Reg=[0x%02x], data=%02x",
+ reg, buf);
+
+ return (unsigned int) buf;
+}
+
+static int stv090x_write_regs(struct stv090x_state *state, unsigned int reg, u8 *data, u32 count)
+{
+ const struct stv090x_config *config = state->config;
+ int ret;
+ u8 buf[2 + count];
+ struct i2c_msg i2c_msg = { .addr = config->address, .flags = 0, .buf = buf, .len = 2 + count };
+
+ buf[0] = reg >> 8;
+ buf[1] = reg & 0xff;
+ memcpy(&buf[2], data, count);
+
+ if (unlikely(*state->verbose >= FE_DEBUGREG)) {
+ int i;
+
+ printk(KERN_DEBUG "%s [0x%04x]:", __func__, reg);
+ for (i = 0; i < count; i++)
+ printk(" %02x", data[i]);
+ printk("\n");
+ }
+
+ ret = i2c_transfer(state->i2c, &i2c_msg, 1);
+ if (ret != 1) {
+ if (ret != -ERESTARTSYS)
+ dprintk(FE_ERROR, 1, "Reg=[0x%04x], Data=[0x%02x ...], Count=%u, Status=%d",
+ reg, data[0], count, ret);
+ return ret < 0 ? ret : -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+static int stv090x_write_reg(struct stv090x_state *state, unsigned int reg, u8 data)
+{
+ return stv090x_write_regs(state, reg, &data, 1);
+}
+
+static int stv090x_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
+{
+ struct stv090x_state *state = fe->demodulator_priv;
+ u32 reg;
+
+ reg = STV090x_READ_DEMOD(state, I2CRPT);
+ if (enable) {
+ dprintk(FE_DEBUG, 1, "Enable Gate");
+ STV090x_SETFIELD_Px(reg, I2CT_ON_FIELD, 1);
+ if (STV090x_WRITE_DEMOD(state, I2CRPT, reg) < 0)
+ goto err;
+
+ } else {
+ dprintk(FE_DEBUG, 1, "Disable Gate");
+ STV090x_SETFIELD_Px(reg, I2CT_ON_FIELD, 0);
+ if ((STV090x_WRITE_DEMOD(state, I2CRPT, reg)) < 0)
+ goto err;
+ }
+ return 0;
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+static void stv090x_get_lock_tmg(struct stv090x_state *state)
+{
+ switch (state->algo) {
+ case STV090x_BLIND_SEARCH:
+ dprintk(FE_DEBUG, 1, "Blind Search");
+ if (state->srate <= 1500000) { /*10Msps< SR <=15Msps*/
+ state->DemodTimeout = 1500;
+ state->FecTimeout = 400;
+ } else if (state->srate <= 5000000) { /*10Msps< SR <=15Msps*/
+ state->DemodTimeout = 1000;
+ state->FecTimeout = 300;
+ } else { /*SR >20Msps*/
+ state->DemodTimeout = 700;
+ state->FecTimeout = 100;
+ }
+ break;
+
+ case STV090x_COLD_SEARCH:
+ case STV090x_WARM_SEARCH:
+ default:
+ dprintk(FE_DEBUG, 1, "Normal Search");
+ if (state->srate <= 1000000) { /*SR <=1Msps*/
+ state->DemodTimeout = 4500;
+ state->FecTimeout = 1700;
+ } else if (state->srate <= 2000000) { /*1Msps < SR <= 2Msps */
+ state->DemodTimeout = 2500;
+ state->FecTimeout = 1100;
+ } else if (state->srate <= 5000000) { /*2Msps < SR <= 5Msps */
+ state->DemodTimeout = 1000;
+ state->FecTimeout = 550;
+ } else if (state->srate <= 10000000) { /*5Msps < SR <= 10Msps */
+ state->DemodTimeout = 700;
+ state->FecTimeout = 250;
+ } else if (state->srate <= 20000000) { /*10Msps < SR <= 20Msps */
+ state->DemodTimeout = 400;
+ state->FecTimeout = 130;
+ } else { /*SR >20Msps*/
+ state->DemodTimeout = 300;
+ state->FecTimeout = 100;
+ }
+ break;
+ }
+
+ if (state->algo == STV090x_WARM_SEARCH)
+ state->DemodTimeout /= 2;
+}
+
+static int stv090x_set_srate(struct stv090x_state *state, u32 srate)
+{
+ u32 sym;
+
+ if (srate > 60000000) {
+ sym = (srate << 4); /* SR * 2^16 / master_clk */
+ sym /= (state->mclk >> 12);
+ } else if (srate > 6000000) {
+ sym = (srate << 6);
+ sym /= (state->mclk >> 10);
+ } else {
+ sym = (srate << 9);
+ sym /= (state->mclk >> 7);
+ }
+
+ if (STV090x_WRITE_DEMOD(state, SFRINIT1, (sym >> 8) & 0x7f) < 0) /* MSB */
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, SFRINIT0, (sym & 0xff)) < 0) /* LSB */
+ goto err;
+
+ return 0;
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+static int stv090x_set_max_srate(struct stv090x_state *state, u32 clk, u32 srate)
+{
+ u32 sym;
+
+ srate = 105 * (srate / 100);
+ if (srate > 60000000) {
+ sym = (srate << 4); /* SR * 2^16 / master_clk */
+ sym /= (state->mclk >> 12);
+ } else if (srate > 6000000) {
+ sym = (srate << 6);
+ sym /= (state->mclk >> 10);
+ } else {
+ sym = (srate << 9);
+ sym /= (state->mclk >> 7);
+ }
+
+ if (sym < 0x7fff) {
+ if (STV090x_WRITE_DEMOD(state, SFRUP1, (sym >> 8) & 0x7f) < 0) /* MSB */
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, SFRUP0, sym & 0xff) < 0) /* LSB */
+ goto err;
+ } else {
+ if (STV090x_WRITE_DEMOD(state, SFRUP1, 0x7f) < 0) /* MSB */
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, SFRUP0, 0xff) < 0) /* LSB */
+ goto err;
+ }
+
+ return 0;
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+static int stv090x_set_min_srate(struct stv090x_state *state, u32 clk, u32 srate)
+{
+ u32 sym;
+
+ srate = 95 * (srate / 100);
+ if (srate > 60000000) {
+ sym = (srate << 4); /* SR * 2^16 / master_clk */
+ sym /= (state->mclk >> 12);
+ } else if (srate > 6000000) {
+ sym = (srate << 6);
+ sym /= (state->mclk >> 10);
+ } else {
+ sym = (srate << 9);
+ sym /= (state->mclk >> 7);
+ }
+
+ if (STV090x_WRITE_DEMOD(state, SFRLOW1, ((sym >> 8) & 0xff)) < 0) /* MSB */
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, SFRLOW0, (sym & 0xff)) < 0) /* LSB */
+ goto err;
+ return 0;
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+static u32 stv090x_car_width(u32 srate, enum stv090x_rolloff rolloff)
+{
+ u32 ro;
+
+ switch (rolloff) {
+ case STV090x_RO_20:
+ ro = 20;
+ break;
+ case STV090x_RO_25:
+ ro = 25;
+ break;
+ case STV090x_RO_35:
+ default:
+ ro = 35;
+ break;
+ }
+
+ return srate + (srate * ro) / 100;
+}
+
+static int stv090x_set_vit_thacq(struct stv090x_state *state)
+{
+ if (STV090x_WRITE_DEMOD(state, VTH12, 0x96) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, VTH23, 0x64) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, VTH34, 0x36) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, VTH56, 0x23) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, VTH67, 0x1e) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, VTH78, 0x19) < 0)
+ goto err;
+ return 0;
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+static int stv090x_set_vit_thtracq(struct stv090x_state *state)
+{
+ if (STV090x_WRITE_DEMOD(state, VTH12, 0xd0) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, VTH23, 0x7d) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, VTH34, 0x53) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, VTH56, 0x2f) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, VTH67, 0x24) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, VTH78, 0x1f) < 0)
+ goto err;
+ return 0;
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+static int stv090x_set_viterbi(struct stv090x_state *state)
+{
+ switch (state->search_mode) {
+ case STV090x_SEARCH_AUTO:
+ if (STV090x_WRITE_DEMOD(state, FECM, 0x10) < 0) /* DVB-S and DVB-S2 */
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, PRVIT, 0x3f) < 0) /* all puncture rate */
+ goto err;
+ break;
+ case STV090x_SEARCH_DVBS1:
+ if (STV090x_WRITE_DEMOD(state, FECM, 0x00) < 0) /* disable DSS */
+ goto err;
+ switch (state->fec) {
+ case STV090x_PR12:
+ if (STV090x_WRITE_DEMOD(state, PRVIT, 0x01) < 0)
+ goto err;
+ break;
+
+ case STV090x_PR23:
+ if (STV090x_WRITE_DEMOD(state, PRVIT, 0x02) < 0)
+ goto err;
+ break;
+
+ case STV090x_PR34:
+ if (STV090x_WRITE_DEMOD(state, PRVIT, 0x04) < 0)
+ goto err;
+ break;
+
+ case STV090x_PR56:
+ if (STV090x_WRITE_DEMOD(state, PRVIT, 0x08) < 0)
+ goto err;
+ break;
+
+ case STV090x_PR78:
+ if (STV090x_WRITE_DEMOD(state, PRVIT, 0x20) < 0)
+ goto err;
+ break;
+
+ default:
+ if (STV090x_WRITE_DEMOD(state, PRVIT, 0x2f) < 0) /* all */
+ goto err;
+ break;
+ }
+ break;
+ case STV090x_SEARCH_DSS:
+ if (STV090x_WRITE_DEMOD(state, FECM, 0x80) < 0)
+ goto err;
+ switch (state->fec) {
+ case STV090x_PR12:
+ if (STV090x_WRITE_DEMOD(state, PRVIT, 0x01) < 0)
+ goto err;
+ break;
+
+ case STV090x_PR23:
+ if (STV090x_WRITE_DEMOD(state, PRVIT, 0x02) < 0)
+ goto err;
+ break;
+
+ case STV090x_PR67:
+ if (STV090x_WRITE_DEMOD(state, PRVIT, 0x10) < 0)
+ goto err;
+ break;
+
+ default:
+ if (STV090x_WRITE_DEMOD(state, PRVIT, 0x13) < 0) /* 1/2, 2/3, 6/7 */
+ goto err;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+static int stv090x_stop_modcod(struct stv090x_state *state)
+{
+ if (STV090x_WRITE_DEMOD(state, MODCODLST0, 0xff) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST1, 0xff) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST2, 0xff) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST3, 0xff) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST4, 0xff) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST5, 0xff) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST6, 0xff) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST7, 0xff) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST8, 0xff) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST9, 0xff) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLSTA, 0xff) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLSTB, 0xff) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLSTC, 0xff) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLSTD, 0xff) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLSTE, 0xff) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLSTF, 0xff) < 0)
+ goto err;
+ return 0;
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+static int stv090x_activate_modcod(struct stv090x_state *state)
+{
+ if (STV090x_WRITE_DEMOD(state, MODCODLST0, 0xff) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST1, 0xfc) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST2, 0xcc) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST3, 0xcc) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST4, 0xcc) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST5, 0xcc) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST6, 0xcc) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST7, 0xcc) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST8, 0xcc) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST9, 0xcc) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLSTA, 0xcc) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLSTB, 0xcc) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLSTC, 0xcc) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLSTD, 0xcc) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLSTE, 0xcc) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLSTF, 0xcf) < 0)
+ goto err;
+
+ return 0;
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+static int stv090x_activate_modcod_single(struct stv090x_state *state)
+{
+
+ if (STV090x_WRITE_DEMOD(state, MODCODLST0, 0xff) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST1, 0xf0) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST2, 0x00) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST3, 0x00) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST4, 0x00) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST5, 0x00) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST6, 0x00) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST7, 0x00) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST8, 0x00) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST9, 0x00) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLSTA, 0x00) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLSTB, 0x00) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLSTC, 0x00) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLSTD, 0x00) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLSTE, 0x00) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLSTF, 0x0f) < 0)
+ goto err;
+
+ return 0;
+
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+static int stv090x_vitclk_ctl(struct stv090x_state *state, int enable)
+{
+ u32 reg;
+
+ switch (state->demod) {
+ case STV090x_DEMODULATOR_0:
+ mutex_lock(&demod_lock);
+ reg = stv090x_read_reg(state, STV090x_STOPCLK2);
+ STV090x_SETFIELD(reg, STOP_CLKVIT1_FIELD, enable);
+ if (stv090x_write_reg(state, STV090x_STOPCLK2, reg) < 0)
+ goto err;
+ mutex_unlock(&demod_lock);
+ break;
+
+ case STV090x_DEMODULATOR_1:
+ mutex_lock(&demod_lock);
+ reg = stv090x_read_reg(state, STV090x_STOPCLK2);
+ STV090x_SETFIELD(reg, STOP_CLKVIT2_FIELD, enable);
+ if (stv090x_write_reg(state, STV090x_STOPCLK2, reg) < 0)
+ goto err;
+ mutex_unlock(&demod_lock);
+ break;
+
+ default:
+ dprintk(FE_ERROR, 1, "Wrong demodulator!");
+ break;
+ }
+ return 0;
+err:
+ mutex_unlock(&demod_lock);
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+static int stv090x_dvbs_track_crl(struct stv090x_state *state)
+{
+ if (state->dev_ver >= 0x30) {
+ /* Set ACLC BCLC optimised value vs SR */
+ if (state->srate >= 15000000) {
+ if (STV090x_WRITE_DEMOD(state, ACLC, 0x2b) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, BCLC, 0x1a) < 0)
+ goto err;
+ } else if ((state->srate >= 7000000) && (15000000 > state->srate)) {
+ if (STV090x_WRITE_DEMOD(state, ACLC, 0x0c) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, BCLC, 0x1b) < 0)
+ goto err;
+ } else if (state->srate < 7000000) {
+ if (STV090x_WRITE_DEMOD(state, ACLC, 0x2c) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, BCLC, 0x1c) < 0)
+ goto err;
+ }
+
+ } else {
+ /* Cut 2.0 */
+ if (STV090x_WRITE_DEMOD(state, ACLC, 0x1a) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, BCLC, 0x09) < 0)
+ goto err;
+ }
+ return 0;
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+static int stv090x_delivery_search(struct stv090x_state *state)
+{
+ u32 reg;
+
+ switch (state->search_mode) {
+ case STV090x_SEARCH_DVBS1:
+ case STV090x_SEARCH_DSS:
+ reg = STV090x_READ_DEMOD(state, DMDCFGMD);
+ STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 1);
+ STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 0);
+ if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
+ goto err;
+
+ /* Activate Viterbi decoder in legacy search,
+ * do not use FRESVIT1, might impact VITERBI2
+ */
+ if (stv090x_vitclk_ctl(state, 0) < 0)
+ goto err;
+
+ if (stv090x_dvbs_track_crl(state) < 0)
+ goto err;
+
+ if (STV090x_WRITE_DEMOD(state, CAR2CFG, 0x22) < 0) /* disable DVB-S2 */
+ goto err;
+
+ if (stv090x_set_vit_thacq(state) < 0)
+ goto err;
+ if (stv090x_set_viterbi(state) < 0)
+ goto err;
+ break;
+
+ case STV090x_SEARCH_DVBS2:
+ reg = STV090x_READ_DEMOD(state, DMDCFGMD);
+ STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 0);
+ STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 0);
+ if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
+ goto err;
+ STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 1);
+ STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 1);
+ if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
+ goto err;
+
+ if (stv090x_vitclk_ctl(state, 1) < 0)
+ goto err;
+
+ if (STV090x_WRITE_DEMOD(state, ACLC, 0x1a) < 0) /* stop DVB-S CR loop */
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, BCLC, 0x09) < 0)
+ goto err;
+
+ if (state->dev_ver <= 0x20) {
+ /* enable S2 carrier loop */
+ if (STV090x_WRITE_DEMOD(state, CAR2CFG, 0x26) < 0)
+ goto err;
+ } else {
+ /* > Cut 3: Stop carrier 3 */
+ if (STV090x_WRITE_DEMOD(state, CAR2CFG, 0x66) < 0)
+ goto err;
+ }
+
+ if (state->demod_mode != STV090x_SINGLE) {
+ /* Cut 2: enable link during search */
+ if (stv090x_activate_modcod(state) < 0)
+ goto err;
+ } else {
+ /* Single demodulator
+ * Authorize SHORT and LONG frames,
+ * QPSK, 8PSK, 16APSK and 32APSK
+ */
+ if (stv090x_activate_modcod_single(state) < 0)
+ goto err;
+ }
+
+ break;
+
+ case STV090x_SEARCH_AUTO:
+ default:
+ /* enable DVB-S2 and DVB-S2 in Auto MODE */
+ reg = STV090x_READ_DEMOD(state, DMDCFGMD);
+ STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 1);
+ STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 1);
+ if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
+ goto err;
+
+ if (stv090x_vitclk_ctl(state, 0) < 0)
+ goto err;
+
+ if (stv090x_dvbs_track_crl(state) < 0)
+ goto err;
+
+ if (state->dev_ver <= 0x20) {
+ /* enable S2 carrier loop */
+ if (STV090x_WRITE_DEMOD(state, CAR2CFG, 0x26) < 0)
+ goto err;
+ } else {
+ /* > Cut 3: Stop carrier 3 */
+ if (STV090x_WRITE_DEMOD(state, CAR2CFG, 0x66) < 0)
+ goto err;
+ }
+
+ if (state->demod_mode != STV090x_SINGLE) {
+ /* Cut 2: enable link during search */
+ if (stv090x_activate_modcod(state) < 0)
+ goto err;
+ } else {
+ /* Single demodulator
+ * Authorize SHORT and LONG frames,
+ * QPSK, 8PSK, 16APSK and 32APSK
+ */
+ if (stv090x_activate_modcod_single(state) < 0)
+ goto err;
+ }
+
+ if (state->srate >= 2000000) {
+ /* Srate >= 2MSPS, Viterbi threshold to acquire */
+ if (stv090x_set_vit_thacq(state) < 0)
+ goto err;
+ } else {
+ /* Srate < 2MSPS, Reset Viterbi thresholdto track
+ * and then re-acquire
+ */
+ if (stv090x_set_vit_thtracq(state) < 0)
+ goto err;
+ }
+
+ if (stv090x_set_viterbi(state) < 0)
+ goto err;
+ break;
+ }
+ return 0;
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+static int stv090x_start_search(struct stv090x_state *state)
+{
+ u32 reg, freq_abs;
+ s16 freq;
+
+ /* Reset demodulator */
+ reg = STV090x_READ_DEMOD(state, DMDISTATE);
+ STV090x_SETFIELD_Px(reg, I2C_DEMOD_MODE_FIELD, 0x1f);
+ if (STV090x_WRITE_DEMOD(state, DMDISTATE, reg) < 0)
+ goto err;
+
+ if (state->dev_ver <= 0x20) {
+ if (state->srate <= 5000000) {
+ if (STV090x_WRITE_DEMOD(state, CARCFG, 0x44) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, CFRUP1, 0x0f) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, CFRUP1, 0xff) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, CFRLOW1, 0xf0) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, CFRLOW0, 0x00) < 0)
+ goto err;
+
+ /*enlarge the timing bandwith for Low SR*/
+ if (STV090x_WRITE_DEMOD(state, RTCS2, 0x68) < 0)
+ goto err;
+ } else {
+ /* If the symbol rate is >5 Msps
+ Set The carrier search up and low to auto mode */
+ if (STV090x_WRITE_DEMOD(state, CARCFG, 0xc4) < 0)
+ goto err;
+ /*reduce the timing bandwith for high SR*/
+ if (STV090x_WRITE_DEMOD(state, RTCS2, 0x44) < 0)
+ goto err;
+ }
+ } else {
+ /* >= Cut 3 */
+ if (state->srate <= 5000000) {
+ /* enlarge the timing bandwith for Low SR */
+ STV090x_WRITE_DEMOD(state, RTCS2, 0x68);
+ } else {
+ /* reduce timing bandwith for high SR */
+ STV090x_WRITE_DEMOD(state, RTCS2, 0x44);
+ }
+
+ /* Set CFR min and max to manual mode */
+ STV090x_WRITE_DEMOD(state, CARCFG, 0x46);
+
+ if (state->algo == STV090x_WARM_SEARCH) {
+ /* WARM Start
+ * CFR min = -1MHz,
+ * CFR max = +1MHz
+ */
+ freq_abs = 1000 << 16;
+ freq_abs /= (state->mclk / 1000);
+ freq = (s16) freq_abs;
+ } else {
+ /* COLD Start
+ * CFR min =- (SearchRange / 2 + 600KHz)
+ * CFR max = +(SearchRange / 2 + 600KHz)
+ * (600KHz for the tuner step size)
+ */
+ freq_abs = (state->search_range / 2000) + 600;
+ freq_abs = freq_abs << 16;
+ freq_abs /= (state->mclk / 1000);
+ freq = (s16) freq_abs;
+ }
+
+ if (STV090x_WRITE_DEMOD(state, CFRUP1, MSB(freq)) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, CFRUP1, LSB(freq)) < 0)
+ goto err;
+
+ freq *= -1;
+
+ if (STV090x_WRITE_DEMOD(state, CFRLOW1, MSB(freq)) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, CFRLOW0, LSB(freq)) < 0)
+ goto err;
+
+ }
+
+ if (STV090x_WRITE_DEMOD(state, CFRINIT1, 0) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, CFRINIT0, 0) < 0)
+ goto err;
+
+ if (state->dev_ver >= 0x20) {
+ if (STV090x_WRITE_DEMOD(state, EQUALCFG, 0x41) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, FFECFG, 0x41) < 0)
+ goto err;
+
+ if ((state->search_mode == STV090x_DVBS1) ||
+ (state->search_mode == STV090x_DSS) ||
+ (state->search_mode == STV090x_SEARCH_AUTO)) {
+
+ if (STV090x_WRITE_DEMOD(state, VITSCALE, 0x82) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, VAVSRVIT, 0x00) < 0)
+ goto err;
+ }
+ }
+
+ if (STV090x_WRITE_DEMOD(state, SFRSTEP, 0x00) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, TMGTHRISE, 0xe0) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, TMGTHFALL, 0xc0) < 0)
+ goto err;
+
+ reg = STV090x_READ_DEMOD(state, DMDCFGMD);
+ STV090x_SETFIELD_Px(reg, SCAN_ENABLE_FIELD, 0);
+ STV090x_SETFIELD_Px(reg, CFR_AUTOSCAN_FIELD, 0);
+ if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
+ goto err;
+ reg = STV090x_READ_DEMOD(state, DMDCFG2);
+ STV090x_SETFIELD_Px(reg, S1S2_SEQUENTIAL_FIELD, 0x0);
+ if (STV090x_WRITE_DEMOD(state, DMDCFG2, reg) < 0)
+ goto err;
+
+ if (state->dev_ver >= 0x20) {
+ /*Frequency offset detector setting*/
+ if (state->srate < 2000000) {
+ if (state->dev_ver <= 0x20) {
+ /* Cut 2 */
+ if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x39) < 0)
+ goto err;
+ } else {
+ /* Cut 2 */
+ if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x89) < 0)
+ goto err;
+ }
+ if (STV090x_WRITE_DEMOD(state, CARHDR, 0x40) < 0)
+ goto err;
+ }
+
+ if (state->srate < 10000000) {
+ if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x4c) < 0)
+ goto err;
+ } else {
+ if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x4b) < 0)
+ goto err;
+ }
+ } else {
+ if (state->srate < 10000000) {
+ if (STV090x_WRITE_DEMOD(state, CARFREQ, 0xef) < 0)
+ goto err;
+ } else {
+ if (STV090x_WRITE_DEMOD(state, CARFREQ, 0xed) < 0)
+ goto err;
+ }
+ }
+
+ switch (state->algo) {
+ case STV090x_WARM_SEARCH:
+ /* The symbol rate and the exact
+ * carrier Frequency are known
+ */
+ if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x18) < 0)
+ goto err;
+ break;
+
+ case STV090x_COLD_SEARCH:
+ /* The symbol rate is known */
+ if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x15) < 0)
+ goto err;
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+static int stv090x_get_agc2_min_level(struct stv090x_state *state)
+{
+ u32 agc2_min = 0, agc2 = 0, freq_init, freq_step, reg;
+ s32 i, j, steps, dir;
+
+ if (STV090x_WRITE_DEMOD(state, AGC2REF, 0x38) < 0)
+ goto err;
+ reg = STV090x_READ_DEMOD(state, DMDCFGMD);
+ STV090x_SETFIELD_Px(reg, SCAN_ENABLE_FIELD, 1);
+ STV090x_SETFIELD_Px(reg, CFR_AUTOSCAN_FIELD, 1);
+ if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
+ goto err;
+
+ if (STV090x_WRITE_DEMOD(state, SFRUP1, 0x83) < 0) /* SR = 65 Msps Max */
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, SFRUP0, 0xc0) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, SFRLOW1, 0x82) < 0) /* SR= 400 ksps Min */
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, SFRLOW0, 0xa0) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, DMDTOM, 0x00) < 0) /* stop acq @ coarse carrier state */
+ goto err;
+ if (stv090x_set_srate(state, 1000000) < 0)
+ goto err;
+
+ steps = -1 + state->search_range / 1000000;
+ steps /= 2;
+ steps = (2 * steps) + 1;
+ if (steps < 0)
+ steps = 1;
+
+ dir = 1;
+ freq_step = (1000000 * 256) / (state->mclk / 256);
+ freq_init = 0;
+
+ for (i = 0; i < steps; i++) {
+ if (dir > 0)
+ freq_init = freq_init + (freq_step * i);
+ else
+ freq_init = freq_init - (freq_step * i);
+
+ dir = -1;
+
+ if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x5c) < 0) /* Demod RESET */
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, CFRINIT1, (freq_init >> 8) & 0xff) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, CFRINIT0, freq_init & 0xff) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x58) < 0) /* Demod RESET */
+ goto err;
+ msleep(10);
+ for (j = 0; j < 10; j++) {
+ agc2 += STV090x_READ_DEMOD(state, AGC2I1) << 8;
+ agc2 |= STV090x_READ_DEMOD(state, AGC2I0);
+ }
+ agc2 /= 10;
+ agc2_min = 0xffff;
+ if (agc2 < 0xffff)
+ agc2_min = agc2;
+ }
+
+ return agc2_min;
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+static u32 stv090x_get_srate(struct stv090x_state *state, u32 clk)
+{
+ u8 r3, r2, r1, r0;
+ s32 srate, int_1, int_2, tmp_1, tmp_2;
+
+ r3 = STV090x_READ_DEMOD(state, SFR3);
+ r2 = STV090x_READ_DEMOD(state, SFR2);
+ r1 = STV090x_READ_DEMOD(state, SFR1);
+ r0 = STV090x_READ_DEMOD(state, SFR0);
+
+ srate = ((r3 << 24) | (r2 << 16) | (r1 << 8) | r0);
+
+ int_1 = clk >> 16;
+ int_2 = srate >> 16;
+
+ tmp_1 = clk % 0x10000;
+ tmp_2 = srate % 0x10000;
+
+ srate = (int_1 * int_2) +
+ ((int_1 * tmp_2) >> 16) +
+ ((int_2 * tmp_1) >> 16);
+
+ return srate;
+}
+
+static u32 stv090x_srate_srch_coarse(struct stv090x_state *state)
+{
+ struct dvb_frontend *fe = &state->frontend;
+
+ int tmg_lock = 0, i;
+ s32 tmg_cpt = 0, dir = 1, steps, cur_step = 0, freq;
+ u32 srate_coarse = 0, agc2 = 0, car_step = 1200, reg;
+
+ reg = STV090x_READ_DEMOD(state, DMDISTATE);
+ STV090x_SETFIELD_Px(reg, I2C_DEMOD_MODE_FIELD, 0x1f); /* Demod RESET */
+ if (STV090x_WRITE_DEMOD(state, DMDISTATE, reg) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, TMGCFG, 0x12) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, TMGTHRISE, 0xf0) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, TMGTHFALL, 0xe0) < 0)
+ goto err;
+ reg = STV090x_READ_DEMOD(state, DMDCFGMD);
+ STV090x_SETFIELD_Px(reg, SCAN_ENABLE_FIELD, 1);
+ STV090x_SETFIELD_Px(reg, CFR_AUTOSCAN_FIELD, 1);
+ if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
+ goto err;
+
+ if (STV090x_WRITE_DEMOD(state, SFRUP1, 0x83) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, SFRUP0, 0xc0) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, SFRLOW1, 0x82) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, SFRLOW0, 0xa0) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, DMDTOM, 0x00) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, AGC2REF, 0x60) < 0)
+ goto err;
+
+ if (state->dev_ver >= 0x30) {
+ if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x99) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, SFRSTEP, 0x95) < 0)
+ goto err;
+
+ } else if (state->dev_ver >= 0x20) {
+ if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x6a) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, SFRSTEP, 0x95) < 0)
+ goto err;
+ }
+
+ if (state->srate <= 2000000)
+ car_step = 1000;
+ else if (state->srate <= 5000000)
+ car_step = 2000;
+ else if (state->srate <= 12000000)
+ car_step = 3000;
+ else
+ car_step = 5000;
+
+ steps = -1 + ((state->search_range / 1000) / car_step);
+ steps /= 2;
+ steps = (2 * steps) + 1;
+ if (steps < 0)
+ steps = 1;
+ else if (steps > 10) {
+ steps = 11;
+ car_step = (state->search_range / 1000) / 10;
+ }
+ cur_step = 0;
+ dir = 1;
+ freq = state->frequency;
+
+ while ((!tmg_lock) && (cur_step < steps)) {
+ if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x5f) < 0) /* Demod RESET */
+ goto err;
+ reg = STV090x_READ_DEMOD(state, DMDISTATE);
+ STV090x_SETFIELD_Px(reg, I2C_DEMOD_MODE_FIELD, 0x00); /* trigger acquisition */
+ if (STV090x_WRITE_DEMOD(state, DMDISTATE, reg) < 0)
+ goto err;
+ msleep(50);
+ for (i = 0; i < 10; i++) {
+ reg = STV090x_READ_DEMOD(state, DSTATUS);
+ if (STV090x_GETFIELD_Px(reg, TMGLOCK_QUALITY_FIELD) >= 2)
+ tmg_cpt++;
+ agc2 += STV090x_READ_DEMOD(state, AGC2I1) << 8;
+ agc2 |= STV090x_READ_DEMOD(state, AGC2I0);
+ }
+ agc2 /= 10;
+ srate_coarse = stv090x_get_srate(state, state->mclk);
+ cur_step++;
+ dir *= -1;
+ if ((tmg_cpt >= 5) && (agc2 < 0x1f00) && (srate_coarse < 55000000) && (srate_coarse > 850000))
+ tmg_lock = 1;
+ else if (cur_step < steps) {
+ if (dir > 0)
+ freq += cur_step * car_step;
+ else
+ freq -= cur_step * car_step;
+
+ /* Setup tuner */
+ if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ goto err;
+
+ if (state->config->tuner_set_frequency) {
+ if (state->config->tuner_set_frequency(fe, state->frequency) < 0)
+ goto err;
+ }
+
+ if (state->config->tuner_set_bandwidth) {
+ if (state->config->tuner_set_bandwidth(fe, state->tuner_bw) < 0)
+ goto err;
+ }
+
+ if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ goto err;
+
+ msleep(50);
+
+ if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ goto err;
+
+ if (state->config->tuner_get_status) {
+ if (state->config->tuner_get_status(fe, &reg) < 0)
+ goto err;
+ }
+
+ if (reg)
+ dprintk(FE_DEBUG, 1, "Tuner phase locked");
+ else
+ dprintk(FE_DEBUG, 1, "Tuner unlocked");
+
+ if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ goto err;
+
+ }
+ }
+ if (!tmg_lock)
+ srate_coarse = 0;
+ else
+ srate_coarse = stv090x_get_srate(state, state->mclk);
+
+ return srate_coarse;
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+static u32 stv090x_srate_srch_fine(struct stv090x_state *state)
+{
+ u32 srate_coarse, freq_coarse, sym, reg;
+
+ srate_coarse = stv090x_get_srate(state, state->mclk);
+ freq_coarse = STV090x_READ_DEMOD(state, CFR2) << 8;
+ freq_coarse |= STV090x_READ_DEMOD(state, CFR1);
+ sym = 13 * (srate_coarse / 10); /* SFRUP = SFR + 30% */
+
+ if (sym < state->srate)
+ srate_coarse = 0;
+ else {
+ if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0) /* Demod RESET */
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, TMGCFG2, 0x01) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, TMGTHRISE, 0x20) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, TMGTHFALL, 0x00) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, TMGCFG, 0xd2) < 0)
+ goto err;
+ reg = STV090x_READ_DEMOD(state, DMDCFGMD);
+ STV090x_SETFIELD_Px(reg, CFR_AUTOSCAN_FIELD, 0x00);
+ if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
+ goto err;
+
+ if (state->dev_ver >= 0x30) {
+ if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x79) < 0)
+ goto err;
+ } else if (state->dev_ver >= 0x20) {
+ if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x49) < 0)
+ goto err;
+ }
+
+ if (srate_coarse > 3000000) {
+ sym = 13 * (srate_coarse / 10); /* SFRUP = SFR + 30% */
+ sym = (sym / 1000) * 65536;
+ sym /= (state->mclk / 1000);
+ if (STV090x_WRITE_DEMOD(state, SFRUP1, (sym >> 8) & 0x7f) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, SFRUP0, sym & 0xff) < 0)
+ goto err;
+ sym = 10 * (srate_coarse / 13); /* SFRLOW = SFR - 30% */
+ sym = (sym / 1000) * 65536;
+ sym /= (state->mclk / 1000);
+ if (STV090x_WRITE_DEMOD(state, SFRLOW1, (sym >> 8) & 0x7f) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, SFRLOW0, sym & 0xff) < 0)
+ goto err;
+ sym = (srate_coarse / 1000) * 65536;
+ sym /= (state->mclk / 1000);
+ if (STV090x_WRITE_DEMOD(state, SFRINIT1, (sym >> 8) & 0xff) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, SFRINIT0, sym & 0xff) < 0)
+ goto err;
+ } else {
+ sym = 13 * (srate_coarse / 10); /* SFRUP = SFR + 30% */
+ sym = (sym / 100) * 65536;
+ sym /= (state->mclk / 100);
+ if (STV090x_WRITE_DEMOD(state, SFRUP1, (sym >> 8) & 0x7f) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, SFRUP0, sym & 0xff) < 0)
+ goto err;
+ sym = 10 * (srate_coarse / 14); /* SFRLOW = SFR - 30% */
+ sym = (sym / 100) * 65536;
+ sym /= (state->mclk / 100);
+ if (STV090x_WRITE_DEMOD(state, SFRLOW1, (sym >> 8) & 0x7f) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, SFRLOW0, sym & 0xff) < 0)
+ goto err;
+ sym = (srate_coarse / 100) * 65536;
+ sym /= (state->mclk / 100);
+ if (STV090x_WRITE_DEMOD(state, SFRINIT1, (sym >> 8) & 0xff) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, SFRINIT0, sym & 0xff) < 0)
+ goto err;
+ }
+ if (STV090x_WRITE_DEMOD(state, DMDTOM, 0x20) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, CFRINIT1, (freq_coarse >> 8) & 0xff) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, CFRINIT0, freq_coarse & 0xff) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x15) < 0) /* trigger acquisition */
+ goto err;
+ }
+
+ return srate_coarse;
+
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+static int stv090x_get_dmdlock(struct stv090x_state *state, s32 timeout)
+{
+ s32 timer = 0, lock = 0;
+ u32 reg;
+ u8 stat;
+
+ while ((timer < timeout) && (!lock)) {
+ reg = STV090x_READ_DEMOD(state, DMDSTATE);
+ stat = STV090x_GETFIELD_Px(reg, HEADER_MODE_FIELD);
+
+ switch (stat) {
+ case 0: /* searching */
+ case 1: /* first PLH detected */
+ default:
+ dprintk(FE_DEBUG, 1, "Demodulator searching ..");
+ lock = 0;
+ break;
+ case 2: /* DVB-S2 mode */
+ case 3: /* DVB-S1/legacy mode */
+ reg = STV090x_READ_DEMOD(state, DSTATUS);
+ lock = STV090x_GETFIELD_Px(reg, LOCK_DEFINITIF_FIELD);
+ break;
+ }
+
+ if (!lock)
+ msleep(10);
+ else
+ dprintk(FE_DEBUG, 1, "Demodulator acquired LOCK");
+
+ timer += 10;
+ }
+ return lock;
+}
+
+static int stv090x_blind_search(struct stv090x_state *state)
+{
+ u32 agc2, reg, srate_coarse;
+ s32 timeout_dmd = 500, cpt_fail, agc2_ovflw, i;
+ u8 k_ref, k_max, k_min;
+ int coarse_fail, lock;
+
+ k_max = 120;
+ k_min = 30;
+
+ agc2 = stv090x_get_agc2_min_level(state);
+
+ if (agc2 > STV090x_SEARCH_AGC2_TH(state->dev_ver)) {
+ lock = 0;
+ } else {
+
+ if (state->dev_ver <= 0x20) {
+ if (STV090x_WRITE_DEMOD(state, CARCFG, 0xc4) < 0)
+ goto err;
+ } else {
+ /* > Cut 3 */
+ if (STV090x_WRITE_DEMOD(state, CARCFG, 0x06) < 0)
+ goto err;
+ }
+
+ if (STV090x_WRITE_DEMOD(state, RTCS2, 0x44) < 0)
+ goto err;
+
+ if (state->dev_ver >= 0x20) {
+ if (STV090x_WRITE_DEMOD(state, EQUALCFG, 0x41) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, FFECFG, 0x41) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, VITSCALE, 0x82) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, VAVSRVIT, 0x00) < 0) /* set viterbi hysteresis */
+ goto err;
+ }
+
+ k_ref = k_max;
+ do {
+ if (STV090x_WRITE_DEMOD(state, KREFTMG, k_ref) < 0)
+ goto err;
+ if (stv090x_srate_srch_coarse(state) != 0) {
+ srate_coarse = stv090x_srate_srch_fine(state);
+ if (srate_coarse != 0) {
+ stv090x_get_lock_tmg(state);
+ lock = stv090x_get_dmdlock(state, timeout_dmd);
+ } else {
+ lock = 0;
+ }
+ } else {
+ cpt_fail = 0;
+ agc2_ovflw = 0;
+ for (i = 0; i < 10; i++) {
+ agc2 = STV090x_READ_DEMOD(state, AGC2I1) << 8;
+ agc2 |= STV090x_READ_DEMOD(state, AGC2I0);
+ if (agc2 >= 0xff00)
+ agc2_ovflw++;
+ reg = STV090x_READ_DEMOD(state, DSTATUS2);
+ if ((STV090x_GETFIELD_Px(reg, CFR_OVERFLOW_FIELD) == 0x01) &&
+ (STV090x_GETFIELD_Px(reg, DEMOD_DELOCK_FIELD) == 0x01))
+
+ cpt_fail++;
+ }
+ if ((cpt_fail > 7) || (agc2_ovflw > 7))
+ coarse_fail = 1;
+
+ lock = 0;
+ }
+ k_ref -= 30;
+ } while ((k_ref >= k_min) && (!lock) && (!coarse_fail));
+ }
+
+ return lock;
+
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+static int stv090x_chk_tmg(struct stv090x_state *state)
+{
+ u32 reg;
+ s32 tmg_cpt = 0, i;
+ u8 freq, tmg_thh, tmg_thl;
+ int tmg_lock;
+
+ freq = STV090x_READ_DEMOD(state, CARFREQ);
+ tmg_thh = STV090x_READ_DEMOD(state, TMGTHRISE);
+ tmg_thl = STV090x_READ_DEMOD(state, TMGTHFALL);
+ if (STV090x_WRITE_DEMOD(state, TMGTHRISE, 0x20) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, TMGTHFALL, 0x00) < 0)
+ goto err;
+
+ reg = STV090x_READ_DEMOD(state, DMDCFGMD);
+ STV090x_SETFIELD_Px(reg, CFR_AUTOSCAN_FIELD, 0x00); /* stop carrier offset search */
+ if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, RTC, 0x80) < 0)
+ goto err;
+
+ if (STV090x_WRITE_DEMOD(state, RTCS2, 0x40) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x00) < 0)
+ goto err;
+
+ if (STV090x_WRITE_DEMOD(state, CFRINIT1, 0x00) < 0) /* set car ofset to 0 */
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, CFRINIT0, 0x00) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, AGC2REF, 0x65) < 0)
+ goto err;
+
+ if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x18) < 0) /* trigger acquisition */
+ goto err;
+ msleep(10);
+
+ for (i = 0; i < 10; i++) {
+ reg = STV090x_READ_DEMOD(state, DSTATUS);
+ if (STV090x_GETFIELD_Px(reg, TMGLOCK_QUALITY_FIELD) >= 2)
+ tmg_cpt++;
+ msleep(1);
+ }
+ if (tmg_cpt >= 3)
+ tmg_lock = 1;
+
+ if (STV090x_WRITE_DEMOD(state, AGC2REF, 0x38) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, RTC, 0x88) < 0) /* DVB-S1 timing */
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, RTCS2, 0x68) < 0) /* DVB-S2 timing */
+ goto err;
+
+ if (STV090x_WRITE_DEMOD(state, CARFREQ, freq) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, TMGTHRISE, tmg_thh) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, TMGTHFALL, tmg_thl) < 0)
+ goto err;
+
+ return tmg_lock;
+
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+static int stv090x_get_coldlock(struct stv090x_state *state, s32 timeout_dmd)
+{
+ struct dvb_frontend *fe = &state->frontend;
+
+ u32 reg;
+ s32 car_step, steps, cur_step, dir, freq, timeout_lock;
+ int lock = 0;
+
+ if (state->srate >= 10000000)
+ timeout_lock = timeout_dmd / 3;
+ else
+ timeout_lock = timeout_dmd / 2;
+
+ lock = stv090x_get_dmdlock(state, timeout_lock); /* cold start wait */
+ if (!lock) {
+ if (state->srate >= 10000000) {
+ if (stv090x_chk_tmg(state)) {
+ if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x15) < 0)
+ goto err;
+ lock = stv090x_get_dmdlock(state, timeout_dmd);
+ } else {
+ lock = 0;
+ }
+ } else {
+ if (state->srate <= 4000000)
+ car_step = 1000;
+ else if (state->srate <= 7000000)
+ car_step = 2000;
+ else if (state->srate <= 10000000)
+ car_step = 3000;
+ else
+ car_step = 5000;
+
+ steps = (state->search_range / 1000) / car_step;
+ steps /= 2;
+ steps = 2 * (steps + 1);
+ if (steps < 0)
+ steps = 2;
+ else if (steps > 12)
+ steps = 12;
+
+ cur_step = 1;
+ dir = 1;
+
+ if (!lock) {
+ freq = state->frequency;
+ state->tuner_bw = stv090x_car_width(state->srate, state->rolloff) + state->srate;
+ while ((cur_step <= steps) && (!lock)) {
+ if (dir > 0)
+ freq += cur_step * car_step;
+ else
+ freq -= cur_step * car_step;
+
+ /* Setup tuner */
+ if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ goto err;
+
+ if (state->config->tuner_set_frequency) {
+ if (state->config->tuner_set_frequency(fe, state->frequency) < 0)
+ goto err;
+ }
+
+ if (state->config->tuner_set_bandwidth) {
+ if (state->config->tuner_set_bandwidth(fe, state->tuner_bw) < 0)
+ goto err;
+ }
+
+ if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ goto err;
+
+ msleep(50);
+
+ if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ goto err;
+
+ if (state->config->tuner_get_status) {
+ if (state->config->tuner_get_status(fe, &reg) < 0)
+ goto err;
+ }
+
+ if (reg)
+ dprintk(FE_DEBUG, 1, "Tuner phase locked");
+ else
+ dprintk(FE_DEBUG, 1, "Tuner unlocked");
+
+ if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ goto err;
+
+ STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1c);
+ if (state->delsys == STV090x_DVBS2) {
+ reg = STV090x_READ_DEMOD(state, DMDCFGMD);
+ STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 0);
+ STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 0);
+ if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
+ goto err;
+ STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 1);
+ STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 1);
+ if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
+ goto err;
+ }
+ if (STV090x_WRITE_DEMOD(state, CFRINIT1, 0x00) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, CFRINIT0, 0x00) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x15) < 0)
+ goto err;
+ lock = stv090x_get_dmdlock(state, (timeout_dmd / 3));
+
+ dir *= -1;
+ cur_step++;
+ }
+ }
+ }
+ }
+
+ return lock;
+
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+static int stv090x_get_loop_params(struct stv090x_state *state, s32 *freq_inc, s32 *timeout_sw, s32 *steps)
+{
+ s32 timeout, inc, steps_max, srate, car_max;
+
+ srate = state->srate;
+ car_max = state->search_range / 1000;
+ car_max += car_max / 10;
+ car_max = 65536 * (car_max / 2);
+ car_max /= (state->mclk / 1000);
+
+ if (car_max > 0x4000)
+ car_max = 0x4000 ; /* maxcarrier should be<= +-1/4 Mclk */
+
+ inc = srate;
+ inc /= state->mclk / 1000;
+ inc *= 256;
+ inc *= 256;
+ inc /= 1000;
+
+ switch (state->search_mode) {
+ case STV090x_SEARCH_DVBS1:
+ case STV090x_SEARCH_DSS:
+ inc *= 3; /* freq step = 3% of srate */
+ timeout = 20;
+ break;
+
+ case STV090x_SEARCH_DVBS2:
+ inc *= 4;
+ timeout = 25;
+ break;
+
+ case STV090x_SEARCH_AUTO:
+ default:
+ inc *= 3;
+ timeout = 25;
+ break;
+ }
+ inc /= 100;
+ if ((inc > car_max) || (inc < 0))
+ inc = car_max / 2; /* increment <= 1/8 Mclk */
+
+ timeout *= 27500; /* 27.5 Msps reference */
+ if (srate > 0)
+ timeout /= (srate / 1000);
+
+ if ((timeout > 100) || (timeout < 0))
+ timeout = 100;
+
+ steps_max = (car_max / inc) + 1; /* min steps = 3 */
+ if ((steps_max > 100) || (steps_max < 0)) {
+ steps_max = 100; /* max steps <= 100 */
+ inc = car_max / steps_max;
+ }
+ *freq_inc = inc;
+ *timeout_sw = timeout;
+ *steps = steps_max;
+
+ return 0;
+}
+
+static int stv090x_chk_signal(struct stv090x_state *state)
+{
+ s32 offst_car, agc2, car_max;
+ int no_signal;
+
+ offst_car = STV090x_READ_DEMOD(state, CFR2) << 8;
+ offst_car |= STV090x_READ_DEMOD(state, CFR1);
+ offst_car = comp2(offst_car, 16);
+
+ agc2 = STV090x_READ_DEMOD(state, AGC2I1) << 8;
+ agc2 |= STV090x_READ_DEMOD(state, AGC2I0);
+ car_max = state->search_range / 1000;
+
+ car_max += (car_max / 10); /* 10% margin */
+ car_max = (65536 * car_max / 2);
+ car_max /= state->mclk / 1000;
+
+ if (car_max > 0x4000)
+ car_max = 0x4000;
+
+ if ((agc2 > 0x2000) || (offst_car > 2 * car_max) || (offst_car < -2 * car_max)) {
+ no_signal = 1;
+ dprintk(FE_DEBUG, 1, "No Signal");
+ } else {
+ no_signal = 0;
+ dprintk(FE_DEBUG, 1, "Found Signal");
+ }
+
+ return no_signal;
+}
+
+static int stv090x_search_car_loop(struct stv090x_state *state, s32 inc, s32 timeout, int zigzag, s32 steps_max)
+{
+ int no_signal, lock = 0;
+ s32 cpt_step = 0, offst_freq, car_max;
+ u32 reg;
+
+ car_max = state->search_range / 1000;
+ car_max += (car_max / 10);
+ car_max = (65536 * car_max / 2);
+ car_max /= (state->mclk / 1000);
+ if (car_max > 0x4000)
+ car_max = 0x4000;
+
+ if (zigzag)
+ offst_freq = 0;
+ else
+ offst_freq = -car_max + inc;
+
+ do {
+ if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1c) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, CFRINIT1, ((offst_freq / 256) & 0xff)) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, CFRINIT0, offst_freq & 0xff) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x18) < 0)
+ goto err;
+
+ reg = STV090x_READ_DEMOD(state, PDELCTRL1);
+ STV090x_SETFIELD_Px(reg, ALGOSWRST_FIELD, 0x1); /* stop DVB-S2 packet delin */
+ if (STV090x_WRITE_DEMOD(state, PDELCTRL1, reg) < 0)
+ goto err;
+
+ if (zigzag) {
+ if (offst_freq >= 0)
+ offst_freq = -offst_freq - 2 * inc;
+ else
+ offst_freq = -offst_freq;
+ } else {
+ offst_freq += 2 * inc;
+ }
+
+ cpt_step++;
+
+ lock = stv090x_get_dmdlock(state, timeout);
+ no_signal = stv090x_chk_signal(state);
+
+ } while ((!lock) &&
+ (!no_signal) &&
+ ((offst_freq - inc) < car_max) &&
+ ((offst_freq + inc) > -car_max) &&
+ (cpt_step < steps_max));
+
+ reg = STV090x_READ_DEMOD(state, PDELCTRL1);
+ STV090x_SETFIELD_Px(reg, ALGOSWRST_FIELD, 0);
+ if (STV090x_WRITE_DEMOD(state, PDELCTRL1, reg) < 0)
+ goto err;
+
+ return lock;
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+static int stv090x_sw_algo(struct stv090x_state *state)
+{
+ int no_signal, zigzag, lock = 0;
+ u32 reg;
+
+ s32 dvbs2_fly_wheel;
+ s32 inc, timeout_step, trials, steps_max;
+
+ /* get params */
+ stv090x_get_loop_params(state, &inc, &timeout_step, &steps_max);
+
+ switch (state->search_mode) {
+ case STV090x_SEARCH_DVBS1:
+ case STV090x_SEARCH_DSS:
+ /* accelerate the frequency detector */
+ if (state->dev_ver >= 0x20) {
+ if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x3B) < 0)
+ goto err;
+ }
+
+ if (STV090x_WRITE_DEMOD(state, DMDCFGMD, 0x49) < 0)
+ goto err;
+ zigzag = 0;
+ break;
+
+ case STV090x_SEARCH_DVBS2:
+ if (state->dev_ver >= 0x20) {
+ if (STV090x_WRITE_DEMOD(state, CORRELABS, 0x79) < 0)
+ goto err;
+ }
+
+ if (STV090x_WRITE_DEMOD(state, DMDCFGMD, 0x89) < 0)
+ goto err;
+ zigzag = 1;
+ break;
+
+ case STV090x_SEARCH_AUTO:
+ default:
+ /* accelerate the frequency detector */
+ if (state->dev_ver >= 0x20) {
+ if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x3b) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, CORRELABS, 0x79) < 0)
+ goto err;
+ }
+
+ if (STV090x_WRITE_DEMOD(state, DMDCFGMD, 0xc9) < 0)
+ goto err;
+ zigzag = 0;
+ break;
+ }
+
+ trials = 0;
+ do {
+ lock = stv090x_search_car_loop(state, inc, timeout_step, zigzag, steps_max);
+ no_signal = stv090x_chk_signal(state);
+ trials++;
+
+ /*run the SW search 2 times maximum*/
+ if (lock || no_signal || (trials == 2)) {
+ /*Check if the demod is not losing lock in DVBS2*/
+ if (state->dev_ver >= 0x20) {
+ if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x49) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, CORRELABS, 0x9e) < 0)
+ goto err;
+ }
+
+ reg = STV090x_READ_DEMOD(state, DMDSTATE);
+ if ((lock) && (STV090x_GETFIELD_Px(reg, HEADER_MODE_FIELD) == STV090x_DVBS2)) {
+ /*Check if the demod is not losing lock in DVBS2*/
+ msleep(timeout_step);
+ reg = STV090x_READ_DEMOD(state, DMDFLYW);
+ dvbs2_fly_wheel = STV090x_GETFIELD_Px(reg, FLYWHEEL_CPT_FIELD);
+ if (dvbs2_fly_wheel < 0xd) { /*if correct frames is decrementing */
+ msleep(timeout_step);
+ reg = STV090x_READ_DEMOD(state, DMDFLYW);
+ dvbs2_fly_wheel = STV090x_GETFIELD_Px(reg, FLYWHEEL_CPT_FIELD);
+ }
+ if (dvbs2_fly_wheel < 0xd) {
+ /*FALSE lock, The demod is loosing lock */
+ lock = 0;
+ if (trials < 2) {
+ if (state->dev_ver >= 0x20) {
+ if (STV090x_WRITE_DEMOD(state, CORRELABS, 0x79) < 0)
+ goto err;
+ }
+
+ if (STV090x_WRITE_DEMOD(state, DMDCFGMD, 0x89) < 0)
+ goto err;
+ }
+ }
+ }
+ }
+ } while ((!lock) && (trials < 2) && (!no_signal));
+
+ return lock;
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+static enum stv090x_delsys stv090x_get_std(struct stv090x_state *state)
+{
+ u32 reg;
+ enum stv090x_delsys delsys;
+
+ reg = STV090x_READ_DEMOD(state, DMDSTATE);
+ if (STV090x_GETFIELD_Px(reg, HEADER_MODE_FIELD) == 2)
+ delsys = STV090x_DVBS2;
+ else if (STV090x_GETFIELD_Px(reg, HEADER_MODE_FIELD) == 3) {
+ reg = STV090x_READ_DEMOD(state, FECM);
+ if (STV090x_GETFIELD_Px(reg, DSS_DVB_FIELD) == 1)
+ delsys = STV090x_DSS;
+ else
+ delsys = STV090x_DVBS1;
+ } else {
+ delsys = STV090x_ERROR;
+ }
+
+ return delsys;
+}
+
+/* in Hz */
+static s32 stv090x_get_car_freq(struct stv090x_state *state, u32 mclk)
+{
+ s32 derot, int_1, int_2, tmp_1, tmp_2;
+
+ derot = STV090x_READ_DEMOD(state, CFR2) << 16;
+ derot |= STV090x_READ_DEMOD(state, CFR1) << 8;
+ derot |= STV090x_READ_DEMOD(state, CFR0);
+
+ derot = comp2(derot, 24);
+ int_1 = state->mclk >> 12;
+ int_2 = derot >> 12;
+
+ /* carrier_frequency = MasterClock * Reg / 2^24 */
+ tmp_1 = state->mclk % 0x1000;
+ tmp_2 = derot % 0x1000;
+
+ derot = (int_1 * int_2) +
+ ((int_1 * tmp_2) >> 12) +
+ ((int_1 * tmp_1) >> 12);
+
+ return derot;
+}
+
+static int stv090x_get_viterbi(struct stv090x_state *state)
+{
+ u32 reg, rate;
+
+ reg = STV090x_READ_DEMOD(state, VITCURPUN);
+ rate = STV090x_GETFIELD_Px(reg, VIT_CURPUN_FIELD);
+
+ switch (rate) {
+ case 13:
+ state->fec = STV090x_PR12;
+ break;
+
+ case 18:
+ state->fec = STV090x_PR23;
+ break;
+
+ case 21:
+ state->fec = STV090x_PR34;
+ break;
+
+ case 24:
+ state->fec = STV090x_PR56;
+ break;
+
+ case 25:
+ state->fec = STV090x_PR67;
+ break;
+
+ case 26:
+ state->fec = STV090x_PR78;
+ break;
+
+ default:
+ state->fec = STV090x_PRERR;
+ break;
+ }
+
+ return 0;
+}
+
+static enum stv090x_signal_state stv090x_get_sig_params(struct stv090x_state *state)
+{
+ struct dvb_frontend *fe = &state->frontend;
+
+ u8 tmg;
+ u32 reg;
+ s32 i = 0, offst_freq;
+
+ msleep(5);
+
+ if (state->algo == STV090x_BLIND_SEARCH) {
+ tmg = STV090x_READ_DEMOD(state, TMGREG2);
+ STV090x_WRITE_DEMOD(state, SFRSTEP, 0x5c);
+ while ((i <= 50) && (tmg != 0) && (tmg != 0xff)) {
+ tmg = STV090x_READ_DEMOD(state, TMGREG2);
+ msleep(5);
+ i += 5;
+ }
+ }
+ state->delsys = stv090x_get_std(state);
+
+ if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ goto err;
+
+ if (state->config->tuner_get_frequency) {
+ if (state->config->tuner_get_frequency(fe, &state->frequency) < 0)
+ goto err;
+ }
+
+ if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ goto err;
+
+ offst_freq = stv090x_get_car_freq(state, state->mclk) / 1000;
+ state->frequency += offst_freq;
+
+ if (stv090x_get_viterbi(state) < 0)
+ goto err;
+
+ reg = STV090x_READ_DEMOD(state, DMDMODCOD);
+ state->modcod = STV090x_GETFIELD_Px(reg, DEMOD_MODCOD_FIELD);
+ state->pilots = STV090x_GETFIELD_Px(reg, DEMOD_TYPE_FIELD) & 0x01;
+ state->frame_len = STV090x_GETFIELD_Px(reg, DEMOD_TYPE_FIELD) >> 1;
+ reg = STV090x_READ_DEMOD(state, TMGOBS);
+ state->rolloff = STV090x_GETFIELD_Px(reg, ROLLOFF_STATUS_FIELD);
+ reg = STV090x_READ_DEMOD(state, FECM);
+ state->inversion = STV090x_GETFIELD_Px(reg, IQINV_FIELD);
+
+ if ((state->algo == STV090x_BLIND_SEARCH) || (state->srate < 10000000)) {
+
+ if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ goto err;
+
+ if (state->config->tuner_get_frequency) {
+ if (state->config->tuner_get_frequency(fe, &state->frequency) < 0)
+ goto err;
+ }
+
+ if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ goto err;
+
+ if (abs(offst_freq) <= ((state->search_range / 2000) + 500))
+ return STV090x_RANGEOK;
+ else if (abs(offst_freq) <= (stv090x_car_width(state->srate, state->rolloff) / 2000))
+ return STV090x_RANGEOK;
+ else
+ return STV090x_OUTOFRANGE; /* Out of Range */
+ } else {
+ if (abs(offst_freq) <= ((state->search_range / 2000) + 500))
+ return STV090x_RANGEOK;
+ else
+ return STV090x_OUTOFRANGE;
+ }
+
+ return STV090x_OUTOFRANGE;
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+static u32 stv090x_get_tmgoffst(struct stv090x_state *state, u32 srate)
+{
+ s32 offst_tmg;
+
+ offst_tmg = STV090x_READ_DEMOD(state, TMGREG2) << 16;
+ offst_tmg |= STV090x_READ_DEMOD(state, TMGREG1) << 8;
+ offst_tmg |= STV090x_READ_DEMOD(state, TMGREG0);
+
+ offst_tmg = comp2(offst_tmg, 24); /* 2's complement */
+ if (!offst_tmg)
+ offst_tmg = 1;
+
+ offst_tmg = ((s32) srate * 10) / ((s32) 0x1000000 / offst_tmg);
+ offst_tmg /= 320;
+
+ return offst_tmg;
+}
+
+static u8 stv090x_optimize_carloop(struct stv090x_state *state, enum stv090x_modcod modcod, s32 pilots)
+{
+ u8 aclc = 0x29;
+ s32 i;
+ struct stv090x_long_frame_crloop *car_loop, *car_loop_qpsk_low, *car_loop_apsk_low;
+
+ if (state->dev_ver == 0x20) {
+ car_loop = stv090x_s2_crl_cut20;
+ car_loop_qpsk_low = stv090x_s2_lowqpsk_crl_cut20;
+ car_loop_apsk_low = stv090x_s2_apsk_crl_cut20;
+ } else {
+ /* >= Cut 3 */
+ car_loop = stv090x_s2_crl_cut30;
+ car_loop_qpsk_low = stv090x_s2_lowqpsk_crl_cut30;
+ car_loop_apsk_low = stv090x_s2_apsk_crl_cut30;
+ }
+
+ if (modcod < STV090x_QPSK_12) {
+ i = 0;
+ while ((i < 3) && (modcod != car_loop_qpsk_low[i].modcod))
+ i++;
+
+ if (i >= 3)
+ i = 2;
+
+ } else {
+ i = 0;
+ while ((i < 14) && (modcod != car_loop[i].modcod))
+ i++;
+
+ if (i >= 14) {
+ i = 0;
+ while ((i < 11) && (modcod != car_loop_apsk_low[i].modcod))
+ i++;
+
+ if (i >= 11)
+ i = 10;
+ }
+ }
+
+ if (modcod <= STV090x_QPSK_25) {
+ if (pilots) {
+ if (state->srate <= 3000000)
+ aclc = car_loop_qpsk_low[i].crl_pilots_on_2;
+ else if (state->srate <= 7000000)
+ aclc = car_loop_qpsk_low[i].crl_pilots_on_5;
+ else if (state->srate <= 15000000)
+ aclc = car_loop_qpsk_low[i].crl_pilots_on_10;
+ else if (state->srate <= 25000000)
+ aclc = car_loop_qpsk_low[i].crl_pilots_on_20;
+ else
+ aclc = car_loop_qpsk_low[i].crl_pilots_on_30;
+ } else {
+ if (state->srate <= 3000000)
+ aclc = car_loop_qpsk_low[i].crl_pilots_off_2;
+ else if (state->srate <= 7000000)
+ aclc = car_loop_qpsk_low[i].crl_pilots_off_5;
+ else if (state->srate <= 15000000)
+ aclc = car_loop_qpsk_low[i].crl_pilots_off_10;
+ else if (state->srate <= 25000000)
+ aclc = car_loop_qpsk_low[i].crl_pilots_off_20;
+ else
+ aclc = car_loop_qpsk_low[i].crl_pilots_off_30;
+ }
+
+ } else if (modcod <= STV090x_8PSK_910) {
+ if (pilots) {
+ if (state->srate <= 3000000)
+ aclc = car_loop[i].crl_pilots_on_2;
+ else if (state->srate <= 7000000)
+ aclc = car_loop[i].crl_pilots_on_5;
+ else if (state->srate <= 15000000)
+ aclc = car_loop[i].crl_pilots_on_10;
+ else if (state->srate <= 25000000)
+ aclc = car_loop[i].crl_pilots_on_20;
+ else
+ aclc = car_loop[i].crl_pilots_on_30;
+ } else {
+ if (state->srate <= 3000000)
+ aclc = car_loop[i].crl_pilots_off_2;
+ else if (state->srate <= 7000000)
+ aclc = car_loop[i].crl_pilots_off_5;
+ else if (state->srate <= 15000000)
+ aclc = car_loop[i].crl_pilots_off_10;
+ else if (state->srate <= 25000000)
+ aclc = car_loop[i].crl_pilots_off_20;
+ else
+ aclc = car_loop[i].crl_pilots_off_30;
+ }
+ } else { /* 16APSK and 32APSK */
+ if (state->srate <= 3000000)
+ aclc = car_loop_apsk_low[i].crl_pilots_on_2;
+ else if (state->srate <= 7000000)
+ aclc = car_loop_apsk_low[i].crl_pilots_on_5;
+ else if (state->srate <= 15000000)
+ aclc = car_loop_apsk_low[i].crl_pilots_on_10;
+ else if (state->srate <= 25000000)
+ aclc = car_loop_apsk_low[i].crl_pilots_on_20;
+ else
+ aclc = car_loop_apsk_low[i].crl_pilots_on_30;
+ }
+
+ return aclc;
+}
+
+static u8 stv090x_optimize_carloop_short(struct stv090x_state *state)
+{
+ struct stv090x_short_frame_crloop *short_crl;
+ s32 index = 0;
+ u8 aclc = 0x0b;
+
+ switch (state->modulation) {
+ case STV090x_QPSK:
+ default:
+ index = 0;
+ break;
+ case STV090x_8PSK:
+ index = 1;
+ break;
+ case STV090x_16APSK:
+ index = 2;
+ break;
+ case STV090x_32APSK:
+ index = 3;
+ break;
+ }
+
+ if (state->dev_ver >= 0x30)
+ short_crl = stv090x_s2_short_crl_cut20;
+ else if (state->dev_ver >= 0x20)
+ short_crl = stv090x_s2_short_crl_cut30;
+
+ if (state->srate <= 3000000)
+ aclc = short_crl[index].crl_2;
+ else if (state->srate <= 7000000)
+ aclc = short_crl[index].crl_5;
+ else if (state->srate <= 15000000)
+ aclc = short_crl[index].crl_10;
+ else if (state->srate <= 25000000)
+ aclc = short_crl[index].crl_20;
+ else
+ aclc = short_crl[index].crl_30;
+
+ return aclc;
+}
+
+static int stv090x_optimize_track(struct stv090x_state *state)
+{
+ struct dvb_frontend *fe = &state->frontend;
+
+ enum stv090x_rolloff rolloff;
+ enum stv090x_modcod modcod;
+
+ s32 srate, pilots, aclc, f_1, f_0, i = 0, blind_tune = 0;
+ u32 reg;
+
+ srate = stv090x_get_srate(state, state->mclk);
+ srate += stv090x_get_tmgoffst(state, srate);
+
+ switch (state->delsys) {
+ case STV090x_DVBS1:
+ case STV090x_DSS:
+ if (state->algo == STV090x_SEARCH_AUTO) {
+ reg = STV090x_READ_DEMOD(state, DMDCFGMD);
+ STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 1);
+ STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 0);
+ if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
+ goto err;
+ }
+ reg = STV090x_READ_DEMOD(state, DEMOD);
+ STV090x_SETFIELD_Px(reg, ROLLOFF_CONTROL_FIELD, state->rolloff);
+ STV090x_SETFIELD_Px(reg, MANUAL_SXROLLOFF_FIELD, 0x01);
+ if (STV090x_WRITE_DEMOD(state, DEMOD, reg) < 0)
+ goto err;
+
+ if (state->dev_ver >= 0x30) {
+ if (stv090x_get_viterbi(state) < 0)
+ goto err;
+
+ if (state->fec == STV090x_PR12) {
+ if (STV090x_WRITE_DEMOD(state, GAUSSR0, 0x98) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, CCIR0, 0x18) < 0)
+ goto err;
+ } else {
+ if (STV090x_WRITE_DEMOD(state, GAUSSR0, 0x18) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, CCIR0, 0x18) < 0)
+ goto err;
+ }
+ }
+
+ if (STV090x_WRITE_DEMOD(state, ERRCTRL1, 0x75) < 0)
+ goto err;
+ break;
+
+ case STV090x_DVBS2:
+ reg = STV090x_READ_DEMOD(state, DMDCFGMD);
+ STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 0);
+ STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 1);
+ if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, ACLC, 0) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, BCLC, 0) < 0)
+ goto err;
+ if (state->frame_len == STV090x_LONG_FRAME) {
+ reg = STV090x_READ_DEMOD(state, DMDMODCOD);
+ modcod = STV090x_GETFIELD_Px(reg, DEMOD_MODCOD_FIELD);
+ pilots = STV090x_GETFIELD_Px(reg, DEMOD_TYPE_FIELD) & 0x01;
+ aclc = stv090x_optimize_carloop(state, modcod, pilots);
+ if (modcod <= STV090x_QPSK_910) {
+ STV090x_WRITE_DEMOD(state, ACLC2S2Q, aclc);
+ } else if (modcod <= STV090x_8PSK_910) {
+ if (STV090x_WRITE_DEMOD(state, ACLC2S2Q, 0x2a) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, ACLC2S28, aclc) < 0)
+ goto err;
+ }
+ if ((state->demod_mode == STV090x_SINGLE) && (modcod > STV090x_8PSK_910)) {
+ if (modcod <= STV090x_16APSK_910) {
+ if (STV090x_WRITE_DEMOD(state, ACLC2S2Q, 0x2a) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, ACLC2S216A, aclc) < 0)
+ goto err;
+ } else {
+ if (STV090x_WRITE_DEMOD(state, ACLC2S2Q, 0x2a) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, ACLC2S232A, aclc) < 0)
+ goto err;
+ }
+ }
+ } else {
+ /*Carrier loop setting for short frame*/
+ aclc = stv090x_optimize_carloop_short(state);
+ if (state->modulation == STV090x_QPSK) {
+ if (STV090x_WRITE_DEMOD(state, ACLC2S2Q, aclc) < 0)
+ goto err;
+ } else if (state->modulation == STV090x_8PSK) {
+ if (STV090x_WRITE_DEMOD(state, ACLC2S2Q, 0x2a) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, ACLC2S28, aclc) < 0)
+ goto err;
+ } else if (state->modulation == STV090x_16APSK) {
+ if (STV090x_WRITE_DEMOD(state, ACLC2S2Q, 0x2a) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, ACLC2S216A, aclc) < 0)
+ goto err;
+ } else if (state->modulation == STV090x_32APSK) {
+ if (STV090x_WRITE_DEMOD(state, ACLC2S2Q, 0x2a) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, ACLC2S232A, aclc) < 0)
+ goto err;
+ }
+ }
+
+ STV090x_WRITE_DEMOD(state, ERRCTRL1, 0x67); /* PER */
+ break;
+
+ case STV090x_UNKNOWN:
+ default:
+ reg = STV090x_READ_DEMOD(state, DMDCFGMD);
+ STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 1);
+ STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 1);
+ if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
+ goto err;
+ break;
+ }
+
+ f_1 = STV090x_READ_DEMOD(state, CFR2);
+ f_0 = STV090x_READ_DEMOD(state, CFR1);
+ reg = STV090x_READ_DEMOD(state, TMGOBS);
+ rolloff = STV090x_GETFIELD_Px(reg, ROLLOFF_STATUS_FIELD);
+
+ if (state->algo == STV090x_BLIND_SEARCH) {
+ STV090x_WRITE_DEMOD(state, SFRSTEP, 0x00);
+ reg = STV090x_READ_DEMOD(state, DMDCFGMD);
+ STV090x_SETFIELD_Px(reg, SCAN_ENABLE_FIELD, 0x00);
+ STV090x_SETFIELD_Px(reg, CFR_AUTOSCAN_FIELD, 0x00);
+ if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, TMGCFG2, 0xc1) < 0)
+ goto err;
+
+ if (stv090x_set_srate(state, srate) < 0)
+ goto err;
+ blind_tune = 1;
+ }
+
+ if (state->dev_ver >= 0x20) {
+ if ((state->search_mode == STV090x_SEARCH_DVBS1) ||
+ (state->search_mode == STV090x_SEARCH_DSS) ||
+ (state->search_mode == STV090x_SEARCH_AUTO)) {
+
+ if (STV090x_WRITE_DEMOD(state, VAVSRVIT, 0x0a) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, VITSCALE, 0x00) < 0)
+ goto err;
+ }
+ }
+
+ if (STV090x_WRITE_DEMOD(state, AGC2REF, 0x38) < 0)
+ goto err;
+
+ /* AUTO tracking MODE */
+ if (STV090x_WRITE_DEMOD(state, SFRUP1, 0x80) < 0)
+ goto err;
+ /* AUTO tracking MODE */
+ if (STV090x_WRITE_DEMOD(state, SFRLOW1, 0x80) < 0)
+ goto err;
+
+ if ((state->dev_ver >= 0x20) || (blind_tune == 1) || (state->srate < 10000000)) {
+ /* update initial carrier freq with the found freq offset */
+ if (STV090x_WRITE_DEMOD(state, CFRINIT1, f_1) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, CFRINIT0, f_0) < 0)
+ goto err;
+ state->tuner_bw = stv090x_car_width(srate, state->rolloff) + 10000000;
+
+ if ((state->dev_ver >= 0x20) || (blind_tune == 1)) {
+
+ if (state->algo != STV090x_WARM_SEARCH) {
+
+ if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ goto err;
+
+ if (state->config->tuner_set_bandwidth) {
+ if (state->config->tuner_set_bandwidth(fe, state->tuner_bw) < 0)
+ goto err;
+ }
+
+ if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ goto err;
+
+ }
+ }
+ if ((state->algo == STV090x_BLIND_SEARCH) || (state->srate < 10000000))
+ msleep(50); /* blind search: wait 50ms for SR stabilization */
+ else
+ msleep(5);
+
+ stv090x_get_lock_tmg(state);
+
+ if (!(stv090x_get_dmdlock(state, (state->DemodTimeout / 2)))) {
+ if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, CFRINIT1, f_1) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, CFRINIT0, f_0) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x18) < 0)
+ goto err;
+
+ i = 0;
+
+ while ((!(stv090x_get_dmdlock(state, (state->DemodTimeout / 2)))) && (i <= 2)) {
+
+ if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, CFRINIT1, f_1) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, CFRINIT0, f_0) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x18) < 0)
+ goto err;
+ i++;
+ }
+ }
+
+ }
+
+ if (state->dev_ver >= 0x20) {
+ if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x49) < 0)
+ goto err;
+ }
+
+ if ((state->delsys == STV090x_DVBS1) || (state->delsys == STV090x_DSS))
+ stv090x_set_vit_thtracq(state);
+
+ return 0;
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+static int stv090x_get_feclock(struct stv090x_state *state, s32 timeout)
+{
+ s32 timer = 0, lock = 0, stat;
+ u32 reg;
+
+ while ((timer < timeout) && (!lock)) {
+ reg = STV090x_READ_DEMOD(state, DMDSTATE);
+ stat = STV090x_GETFIELD_Px(reg, HEADER_MODE_FIELD);
+
+ switch (stat) {
+ case 0: /* searching */
+ case 1: /* first PLH detected */
+ default:
+ lock = 0;
+ break;
+
+ case 2: /* DVB-S2 mode */
+ reg = STV090x_READ_DEMOD(state, PDELSTATUS1);
+ lock = STV090x_GETFIELD_Px(reg, PKTDELIN_LOCK_FIELD);
+ break;
+
+ case 3: /* DVB-S1/legacy mode */
+ reg = STV090x_READ_DEMOD(state, VSTATUSVIT);
+ lock = STV090x_GETFIELD_Px(reg, LOCKEDVIT_FIELD);
+ break;
+ }
+ if (!lock) {
+ msleep(10);
+ timer += 10;
+ }
+ }
+ return lock;
+}
+
+static int stv090x_get_lock(struct stv090x_state *state, s32 timeout_dmd, s32 timeout_fec)
+{
+ u32 reg;
+ s32 timer = 0;
+ int lock;
+
+ lock = stv090x_get_dmdlock(state, timeout_dmd);
+ if (lock)
+ lock = stv090x_get_feclock(state, timeout_fec);
+
+ if (lock) {
+ lock = 0;
+
+ while ((timer < timeout_fec) && (!lock)) {
+ reg = STV090x_READ_DEMOD(state, TSSTATUS);
+ lock = STV090x_GETFIELD_Px(reg, TSFIFO_LINEOK_FIELD);
+ msleep(1);
+ timer++;
+ }
+ }
+
+ return lock;
+}
+
+static int stv090x_set_s2rolloff(struct stv090x_state *state)
+{
+ u32 reg;
+
+ if (state->dev_ver <= 0x20) {
+ /* rolloff to auto mode if DVBS2 */
+ reg = STV090x_READ_DEMOD(state, DEMOD);
+ STV090x_SETFIELD_Px(reg, MANUAL_SXROLLOFF_FIELD, 0x00);
+ if (STV090x_WRITE_DEMOD(state, DEMOD, reg) < 0)
+ goto err;
+ } else {
+ /* DVB-S2 rolloff to auto mode if DVBS2 */
+ reg = STV090x_READ_DEMOD(state, DEMOD);
+ STV090x_SETFIELD_Px(reg, MANUAL_S2ROLLOFF_FIELD, 0x00);
+ if (STV090x_WRITE_DEMOD(state, DEMOD, reg) < 0)
+ goto err;
+ }
+ return 0;
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+
+static enum stv090x_signal_state stv090x_algo(struct stv090x_state *state)
+{
+ struct dvb_frontend *fe = &state->frontend;
+ enum stv090x_signal_state signal_state = STV090x_NOCARRIER;
+ u32 reg;
+ s32 timeout_dmd = 500, timeout_fec = 50, agc1_power, power_iq = 0, i;
+ int lock = 0, low_sr = 0, no_signal = 0;
+
+ reg = STV090x_READ_DEMOD(state, TSCFGH);
+ STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 1); /* Stop path 1 stream merger */
+ if (STV090x_WRITE_DEMOD(state, TSCFGH, reg) < 0)
+ goto err;
+
+ if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x5c) < 0) /* Demod stop */
+ goto err;
+
+ if (state->dev_ver >= 0x20) {
+ if (STV090x_WRITE_DEMOD(state, CORRELABS, 0x9e) < 0) /* cut 2.0 */
+ goto err;
+ }
+
+ stv090x_get_lock_tmg(state);
+
+ if (state->algo == STV090x_BLIND_SEARCH) {
+ state->tuner_bw = 2 * 36000000; /* wide bw for unknown srate */
+ if (STV090x_WRITE_DEMOD(state, TMGCFG2, 0xc0) < 0) /* wider srate scan */
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, CORRELMANT, 0x70) < 0)
+ goto err;
+ if (stv090x_set_srate(state, 1000000) < 0) /* inital srate = 1Msps */
+ goto err;
+ } else {
+ /* known srate */
+ if (STV090x_WRITE_DEMOD(state, DMDTOM, 0x20) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, TMGCFG, 0xd2) < 0)
+ goto err;
+
+ if (state->srate < 2000000) {
+ /* SR < 2MSPS */
+ if (STV090x_WRITE_DEMOD(state, CORRELMANT, 0x63) < 0)
+ goto err;
+ } else {
+ /* SR >= 2Msps */
+ if (STV090x_WRITE_DEMOD(state, CORRELMANT, 0x70) < 0)
+ goto err;
+ }
+
+ if (STV090x_WRITE_DEMOD(state, AGC2REF, 0x38) < 0)
+ goto err;
+
+ if (state->dev_ver >= 0x20) {
+ if (STV090x_WRITE_DEMOD(state, KREFTMG, 0x5a) < 0)
+ goto err;
+ if (state->algo == STV090x_COLD_SEARCH)
+ state->tuner_bw = (15 * (stv090x_car_width(state->srate, state->rolloff) + 10000000)) / 10;
+ else if (state->algo == STV090x_WARM_SEARCH)
+ state->tuner_bw = stv090x_car_width(state->srate, state->rolloff) + 10000000;
+ }
+
+ /* if cold start or warm (Symbolrate is known)
+ * use a Narrow symbol rate scan range
+ */
+ if (STV090x_WRITE_DEMOD(state, TMGCFG2, 0xc1) < 0) /* narrow srate scan */
+ goto err;
+
+ if (stv090x_set_srate(state, state->srate) < 0)
+ goto err;
+
+ if (stv090x_set_max_srate(state, state->mclk, state->srate) < 0)
+ goto err;
+ if (stv090x_set_min_srate(state, state->mclk, state->srate) < 0)
+ goto err;
+
+ if (state->srate >= 10000000)
+ low_sr = 0;
+ else
+ low_sr = 1;
+ }
+
+ /* Setup tuner */
+ if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ goto err;
+
+ if (state->config->tuner_set_bbgain) {
+ if (state->config->tuner_set_bbgain(fe, 10) < 0) /* 10dB */
+ goto err;
+ }
+
+ if (state->config->tuner_set_frequency) {
+ if (state->config->tuner_set_frequency(fe, state->frequency) < 0)
+ goto err;
+ }
+
+ if (state->config->tuner_set_bandwidth) {
+ if (state->config->tuner_set_bandwidth(fe, state->tuner_bw) < 0)
+ goto err;
+ }
+
+ if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ goto err;
+
+ msleep(50);
+
+ if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ goto err;
+
+ if (state->config->tuner_get_status) {
+ if (state->config->tuner_get_status(fe, &reg) < 0)
+ goto err;
+ }
+
+ if (reg)
+ dprintk(FE_DEBUG, 1, "Tuner phase locked");
+ else
+ dprintk(FE_DEBUG, 1, "Tuner unlocked");
+
+ if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ goto err;
+
+ msleep(10);
+ agc1_power = MAKEWORD16(STV090x_READ_DEMOD(state, AGCIQIN1),
+ STV090x_READ_DEMOD(state, AGCIQIN0));
+
+ if (agc1_power == 0) {
+ /* If AGC1 integrator value is 0
+ * then read POWERI, POWERQ
+ */
+ for (i = 0; i < 5; i++) {
+ power_iq += (STV090x_READ_DEMOD(state, POWERI) +
+ STV090x_READ_DEMOD(state, POWERQ)) >> 1;
+ }
+ power_iq /= 5;
+ }
+
+ if ((agc1_power == 0) && (power_iq < STV090x_IQPOWER_THRESHOLD)) {
+ dprintk(FE_ERROR, 1, "No Signal: POWER_IQ=0x%02x", power_iq);
+ lock = 0;
+
+ } else {
+ reg = STV090x_READ_DEMOD(state, DEMOD);
+ STV090x_SETFIELD_Px(reg, SPECINV_CONTROL_FIELD, state->inversion);
+
+ if (state->dev_ver <= 0x20) {
+ /* rolloff to auto mode if DVBS2 */
+ STV090x_SETFIELD_Px(reg, MANUAL_SXROLLOFF_FIELD, 1);
+ } else {
+ /* DVB-S2 rolloff to auto mode if DVBS2 */
+ STV090x_SETFIELD_Px(reg, MANUAL_S2ROLLOFF_FIELD, 1);
+ }
+ if (STV090x_WRITE_DEMOD(state, DEMOD, reg) < 0)
+ goto err;
+
+ if (stv090x_delivery_search(state) < 0)
+ goto err;
+
+ if (state->algo != STV090x_BLIND_SEARCH) {
+ if (stv090x_start_search(state) < 0)
+ goto err;
+ }
+ }
+
+ /* need to check for AGC1 state */
+
+
+
+ if (state->algo == STV090x_BLIND_SEARCH)
+ lock = stv090x_blind_search(state);
+
+ else if (state->algo == STV090x_COLD_SEARCH)
+ lock = stv090x_get_coldlock(state, timeout_dmd);
+
+ else if (state->algo == STV090x_WARM_SEARCH)
+ lock = stv090x_get_dmdlock(state, timeout_dmd);
+
+ if ((!lock) && (state->algo == STV090x_COLD_SEARCH)) {
+ if (!low_sr) {
+ if (stv090x_chk_tmg(state))
+ lock = stv090x_sw_algo(state);
+ }
+ }
+
+ if (lock)
+ signal_state = stv090x_get_sig_params(state);
+
+ if ((lock) && (signal_state == STV090x_RANGEOK)) { /* signal within Range */
+ stv090x_optimize_track(state);
+
+ if (state->dev_ver >= 0x20) {
+ /* >= Cut 2.0 :release TS reset after
+ * demod lock and optimized Tracking
+ */
+ reg = STV090x_READ_DEMOD(state, TSCFGH);
+ STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 0); /* release merger reset */
+ if (STV090x_WRITE_DEMOD(state, TSCFGH, reg) < 0)
+ goto err;
+
+ msleep(3);
+
+ STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 1); /* merger reset */
+ if (STV090x_WRITE_DEMOD(state, TSCFGH, reg) < 0)
+ goto err;
+
+ STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 0); /* release merger reset */
+ if (STV090x_WRITE_DEMOD(state, TSCFGH, reg) < 0)
+ goto err;
+ }
+
+ if (stv090x_get_lock(state, timeout_fec, timeout_fec)) {
+ lock = 1;
+ if (state->delsys == STV090x_DVBS2) {
+ stv090x_set_s2rolloff(state);
+
+ reg = STV090x_READ_DEMOD(state, PDELCTRL2);
+ STV090x_SETFIELD_Px(reg, RESET_UPKO_COUNT, 1);
+ if (STV090x_WRITE_DEMOD(state, PDELCTRL2, reg) < 0)
+ goto err;
+ /* Reset DVBS2 packet delinator error counter */
+ reg = STV090x_READ_DEMOD(state, PDELCTRL2);
+ STV090x_SETFIELD_Px(reg, RESET_UPKO_COUNT, 0);
+ if (STV090x_WRITE_DEMOD(state, PDELCTRL2, reg) < 0)
+ goto err;
+
+ if (STV090x_WRITE_DEMOD(state, ERRCTRL1, 0x67) < 0) /* PER */
+ goto err;
+ } else {
+ if (STV090x_WRITE_DEMOD(state, ERRCTRL1, 0x75) < 0)
+ goto err;
+ }
+ /* Reset the Total packet counter */
+ if (STV090x_WRITE_DEMOD(state, FBERCPT4, 0x00) < 0)
+ goto err;
+ /* Reset the packet Error counter2 */
+ if (STV090x_WRITE_DEMOD(state, ERRCTRL2, 0xc1) < 0)
+ goto err;
+ } else {
+ lock = 0;
+ signal_state = STV090x_NODATA;
+ no_signal = stv090x_chk_signal(state);
+ }
+ }
+ return signal_state;
+
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+static enum dvbfe_search stv090x_search(struct dvb_frontend *fe, struct dvb_frontend_parameters *p)
+{
+ struct stv090x_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *props = &fe->dtv_property_cache;
+
+ state->delsys = props->delivery_system;
+ state->frequency = p->frequency;
+ state->srate = p->u.qpsk.symbol_rate;
+ state->search_mode = STV090x_SEARCH_AUTO;
+ state->algo = STV090x_COLD_SEARCH;
+ state->fec = STV090x_PRERR;
+ state->search_range = 2000000;
+
+ if (stv090x_algo(state) == STV090x_RANGEOK) {
+ dprintk(FE_DEBUG, 1, "Search success!");
+ return DVBFE_ALGO_SEARCH_SUCCESS;
+ } else {
+ dprintk(FE_DEBUG, 1, "Search failed!");
+ return DVBFE_ALGO_SEARCH_FAILED;
+ }
+
+ return DVBFE_ALGO_SEARCH_ERROR;
+}
+
+/* FIXME! */
+static int stv090x_read_status(struct dvb_frontend *fe, enum fe_status *status)
+{
+ struct stv090x_state *state = fe->demodulator_priv;
+ u32 reg;
+ u8 search_state;
+
+ reg = STV090x_READ_DEMOD(state, DMDSTATE);
+ search_state = STV090x_GETFIELD_Px(reg, HEADER_MODE_FIELD);
+
+ switch (search_state) {
+ case 0: /* searching */
+ case 1: /* first PLH detected */
+ default:
+ dprintk(FE_DEBUG, 1, "Status: Unlocked (Searching ..)");
+ *status = 0;
+ break;
+
+ case 2: /* DVB-S2 mode */
+ dprintk(FE_DEBUG, 1, "Delivery system: DVB-S2");
+ reg = STV090x_READ_DEMOD(state, DSTATUS);
+ if (STV090x_GETFIELD_Px(reg, LOCK_DEFINITIF_FIELD)) {
+ reg = STV090x_READ_DEMOD(state, TSSTATUS);
+ if (STV090x_GETFIELD_Px(reg, TSFIFO_LINEOK_FIELD)) {
+ *status = FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
+ }
+ }
+ break;
+
+ case 3: /* DVB-S1/legacy mode */
+ dprintk(FE_DEBUG, 1, "Delivery system: DVB-S");
+ reg = STV090x_READ_DEMOD(state, DSTATUS);
+ if (STV090x_GETFIELD_Px(reg, LOCK_DEFINITIF_FIELD)) {
+ reg = STV090x_READ_DEMOD(state, VSTATUSVIT);
+ if (STV090x_GETFIELD_Px(reg, LOCKEDVIT_FIELD)) {
+ reg = STV090x_READ_DEMOD(state, TSSTATUS);
+ if (STV090x_GETFIELD_Px(reg, TSFIFO_LINEOK_FIELD)) {
+ *status = FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
+ }
+ }
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static int stv090x_read_per(struct dvb_frontend *fe, u32 *per)
+{
+ struct stv090x_state *state = fe->demodulator_priv;
+
+ s32 count_4, count_3, count_2, count_1, count_0, count;
+ u32 reg, h, m, l;
+ enum fe_status status;
+
+ stv090x_read_status(fe, &status);
+ if (!(status & FE_HAS_LOCK)) {
+ *per = 1 << 23; /* Max PER */
+ } else {
+ /* Counter 2 */
+ reg = STV090x_READ_DEMOD(state, ERRCNT22);
+ h = STV090x_GETFIELD_Px(reg, ERR_CNT2_FIELD);
+
+ reg = STV090x_READ_DEMOD(state, ERRCNT21);
+ m = STV090x_GETFIELD_Px(reg, ERR_CNT21_FIELD);
+
+ reg = STV090x_READ_DEMOD(state, ERRCNT20);
+ l = STV090x_GETFIELD_Px(reg, ERR_CNT20_FIELD);
+
+ *per = ((h << 16) | (m << 8) | l);
+
+ count_4 = STV090x_READ_DEMOD(state, FBERCPT4);
+ count_3 = STV090x_READ_DEMOD(state, FBERCPT3);
+ count_2 = STV090x_READ_DEMOD(state, FBERCPT2);
+ count_1 = STV090x_READ_DEMOD(state, FBERCPT1);
+ count_0 = STV090x_READ_DEMOD(state, FBERCPT0);
+
+ if ((!count_4) && (!count_3)) {
+ count = (count_2 & 0xff) << 16;
+ count |= (count_1 & 0xff) << 8;
+ count |= count_0 & 0xff;
+ } else {
+ count = 1 << 24;
+ }
+ if (count == 0)
+ *per = 1;
+ }
+ if (STV090x_WRITE_DEMOD(state, FBERCPT4, 0) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, ERRCTRL2, 0xc1) < 0)
+ goto err;
+
+ return 0;
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+static int stv090x_table_lookup(const struct stv090x_tab *tab, int max, int val)
+{
+ int res = 0;
+ int min = 0, med;
+
+ if (val < tab[min].read)
+ res = tab[min].real;
+ else if (val >= tab[max].read)
+ res = tab[max].real;
+ else {
+ while ((max - min) > 1) {
+ med = (max + min) / 2;
+ if (val >= tab[min].read && val < tab[med].read)
+ max = med;
+ else
+ min = med;
+ }
+ res = ((val - tab[min].read) *
+ (tab[max].real - tab[min].real) /
+ (tab[max].read - tab[min].read)) +
+ tab[min].real;
+ }
+
+ return res;
+}
+
+static int stv090x_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
+{
+ struct stv090x_state *state = fe->demodulator_priv;
+ u32 reg;
+ s32 agc;
+
+ reg = STV090x_READ_DEMOD(state, AGCIQIN1);
+ agc = STV090x_GETFIELD_Px(reg, AGCIQ_VALUE_FIELD);
+
+ *strength = stv090x_table_lookup(stv090x_rf_tab, ARRAY_SIZE(stv090x_rf_tab) - 1, agc);
+ if (agc > stv090x_rf_tab[0].read)
+ *strength = 5;
+ else if (agc < stv090x_rf_tab[ARRAY_SIZE(stv090x_rf_tab) - 1].read)
+ *strength = -100;
+
+ return 0;
+}
+
+static int stv090x_read_cnr(struct dvb_frontend *fe, u16 *cnr)
+{
+ struct stv090x_state *state = fe->demodulator_priv;
+ u32 reg_0, reg_1, reg, i;
+ s32 val_0, val_1, val = 0;
+ u8 lock_f;
+
+ switch (state->delsys) {
+ case STV090x_DVBS2:
+ reg = STV090x_READ_DEMOD(state, DSTATUS);
+ lock_f = STV090x_GETFIELD_Px(reg, LOCK_DEFINITIF_FIELD);
+ if (lock_f) {
+ msleep(5);
+ for (i = 0; i < 16; i++) {
+ reg_1 = STV090x_READ_DEMOD(state, NNOSPLHT1);
+ val_1 = STV090x_GETFIELD_Px(reg_1, NOSPLHT_NORMED_FIELD);
+ reg_0 = STV090x_READ_DEMOD(state, NNOSPLHT0);
+ val_0 = STV090x_GETFIELD_Px(reg_1, NOSPLHT_NORMED_FIELD);
+ val += MAKEWORD16(val_1, val_0);
+ msleep(1);
+ }
+ val /= 16;
+ *cnr = stv090x_table_lookup(stv090x_s2cn_tab, ARRAY_SIZE(stv090x_s2cn_tab) - 1, val);
+ if (val < stv090x_s2cn_tab[ARRAY_SIZE(stv090x_s2cn_tab) - 1].read)
+ *cnr = 1000;
+ }
+ break;
+
+ case STV090x_DVBS1:
+ case STV090x_DSS:
+ reg = STV090x_READ_DEMOD(state, DSTATUS);
+ lock_f = STV090x_GETFIELD_Px(reg, LOCK_DEFINITIF_FIELD);
+ if (lock_f) {
+ msleep(5);
+ for (i = 0; i < 16; i++) {
+ reg_1 = STV090x_READ_DEMOD(state, NOSDATAT1);
+ val_1 = STV090x_GETFIELD_Px(reg_1, NOSDATAT_UNNORMED_FIELD);
+ reg_0 = STV090x_READ_DEMOD(state, NOSDATAT0);
+ val_0 = STV090x_GETFIELD_Px(reg_1, NOSDATAT_UNNORMED_FIELD);
+ val += MAKEWORD16(val_1, val_0);
+ msleep(1);
+ }
+ val /= 16;
+ *cnr = stv090x_table_lookup(stv090x_s1cn_tab, ARRAY_SIZE(stv090x_s1cn_tab) - 1, val);
+ if (val < stv090x_s2cn_tab[ARRAY_SIZE(stv090x_s1cn_tab) - 1].read)
+ *cnr = 1000;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int stv090x_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone)
+{
+ struct stv090x_state *state = fe->demodulator_priv;
+ u32 reg;
+
+ reg = STV090x_READ_DEMOD(state, DISTXCTL);
+ switch (tone) {
+ case SEC_TONE_ON:
+ STV090x_SETFIELD_Px(reg, DISTX_MODE_FIELD, 0);
+ STV090x_SETFIELD_Px(reg, DISEQC_RESET_FIELD, 1);
+ if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
+ goto err;
+ STV090x_SETFIELD_Px(reg, DISEQC_RESET_FIELD, 0);
+ if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
+ goto err;
+ break;
+
+ case SEC_TONE_OFF:
+ STV090x_SETFIELD_Px(reg, DISTX_MODE_FIELD, 0);
+ STV090x_SETFIELD_Px(reg, DISEQC_RESET_FIELD, 1);
+ if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
+ goto err;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+
+static enum dvbfe_algo stv090x_frontend_algo(struct dvb_frontend *fe)
+{
+ return DVBFE_ALGO_CUSTOM;
+}
+
+static int stv090x_send_diseqc_msg(struct dvb_frontend *fe, struct dvb_diseqc_master_cmd *cmd)
+{
+ struct stv090x_state *state = fe->demodulator_priv;
+ u32 reg, idle = 0, fifo_full = 1;
+ int i;
+
+ reg = STV090x_READ_DEMOD(state, DISTXCTL);
+
+ STV090x_SETFIELD_Px(reg, DISTX_MODE_FIELD, 2);
+ STV090x_SETFIELD_Px(reg, DISEQC_RESET_FIELD, 1);
+ if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
+ goto err;
+ STV090x_SETFIELD_Px(reg, DISEQC_RESET_FIELD, 0);
+ if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
+ goto err;
+
+ STV090x_SETFIELD_Px(reg, DIS_PRECHARGE_FIELD, 1);
+ if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
+ goto err;
+
+ for (i = 0; i < cmd->msg_len; i++) {
+
+ while (fifo_full) {
+ reg = STV090x_READ_DEMOD(state, DISTXSTATUS);
+ fifo_full = STV090x_GETFIELD_Px(reg, FIFO_FULL_FIELD);
+ }
+
+ if (STV090x_WRITE_DEMOD(state, DISTXDATA, cmd->msg[i]) < 0)
+ goto err;
+ }
+ reg = STV090x_READ_DEMOD(state, DISTXCTL);
+ STV090x_SETFIELD_Px(reg, DIS_PRECHARGE_FIELD, 0);
+ if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
+ goto err;
+
+ i = 0;
+
+ while ((!idle) && (i < 10)) {
+ reg = STV090x_READ_DEMOD(state, DISTXSTATUS);
+ idle = STV090x_GETFIELD_Px(reg, TX_IDLE_FIELD);
+ msleep(10);
+ i++;
+ }
+
+ return 0;
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+static int stv090x_send_diseqc_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t burst)
+{
+ struct stv090x_state *state = fe->demodulator_priv;
+ u32 reg, idle = 0, fifo_full = 1;
+ u8 mode, value;
+ int i;
+
+ reg = STV090x_READ_DEMOD(state, DISTXCTL);
+
+ if (burst == SEC_MINI_A) {
+ mode = 3;
+ value = 0x00;
+ } else {
+ mode = 2;
+ value = 0xFF;
+ }
+
+ STV090x_SETFIELD_Px(reg, DISTX_MODE_FIELD, mode);
+ STV090x_SETFIELD_Px(reg, DISEQC_RESET_FIELD, 1);
+ if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
+ goto err;
+ STV090x_SETFIELD_Px(reg, DISEQC_RESET_FIELD, 0);
+ if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
+ goto err;
+
+ STV090x_SETFIELD_Px(reg, DIS_PRECHARGE_FIELD, 1);
+ if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
+ goto err;
+
+ while (fifo_full) {
+ reg = STV090x_READ_DEMOD(state, DISTXSTATUS);
+ fifo_full = STV090x_GETFIELD_Px(reg, FIFO_FULL_FIELD);
+ }
+
+ if (STV090x_WRITE_DEMOD(state, DISTXDATA, value) < 0)
+ goto err;
+
+ reg = STV090x_READ_DEMOD(state, DISTXCTL);
+ STV090x_SETFIELD_Px(reg, DIS_PRECHARGE_FIELD, 0);
+ if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
+ goto err;
+
+ i = 0;
+
+ while ((!idle) && (i < 10)) {
+ reg = STV090x_READ_DEMOD(state, DISTXSTATUS);
+ idle = STV090x_GETFIELD_Px(reg, TX_IDLE_FIELD);
+ msleep(10);
+ i++;
+ }
+
+ return 0;
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+static int stv090x_recv_slave_reply(struct dvb_frontend *fe, struct dvb_diseqc_slave_reply *reply)
+{
+ struct stv090x_state *state = fe->demodulator_priv;
+ u32 reg = 0, i = 0, rx_end = 0;
+
+ while ((rx_end != 1) && (i < 10)) {
+ msleep(10);
+ i++;
+ reg = STV090x_READ_DEMOD(state, DISRX_ST0);
+ rx_end = STV090x_GETFIELD_Px(reg, RX_END_FIELD);
+ }
+
+ if (rx_end) {
+ reply->msg_len = STV090x_GETFIELD_Px(reg, FIFO_BYTENBR_FIELD);
+ for (i = 0; i < reply->msg_len; i++)
+ reply->msg[i] = STV090x_READ_DEMOD(state, DISRXDATA);
+ }
+
+ return 0;
+}
+
+static int stv090x_sleep(struct dvb_frontend *fe)
+{
+ struct stv090x_state *state = fe->demodulator_priv;
+ u32 reg;
+
+ dprintk(FE_DEBUG, 1, "Set %s to sleep",
+ state->device == STV0900 ? "STV0900" : "STV0903");
+
+ reg = stv090x_read_reg(state, STV090x_SYNTCTRL);
+ STV090x_SETFIELD(reg, STANDBY_FIELD, 0x01);
+ if (stv090x_write_reg(state, STV090x_SYNTCTRL, reg) < 0)
+ goto err;
+
+ reg = stv090x_read_reg(state, STV090x_TSTTNR1);
+ STV090x_SETFIELD(reg, ADC1_PON_FIELD, 0);
+ if (stv090x_write_reg(state, STV090x_TSTTNR1, reg) < 0)
+ goto err;
+
+ return 0;
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+static int stv090x_wakeup(struct dvb_frontend *fe)
+{
+ struct stv090x_state *state = fe->demodulator_priv;
+ u32 reg;
+
+ dprintk(FE_DEBUG, 1, "Wake %s from standby",
+ state->device == STV0900 ? "STV0900" : "STV0903");
+
+ reg = stv090x_read_reg(state, STV090x_SYNTCTRL);
+ STV090x_SETFIELD(reg, STANDBY_FIELD, 0x00);
+ if (stv090x_write_reg(state, STV090x_SYNTCTRL, reg) < 0)
+ goto err;
+
+ reg = stv090x_read_reg(state, STV090x_TSTTNR1);
+ STV090x_SETFIELD(reg, ADC1_PON_FIELD, 1);
+ if (stv090x_write_reg(state, STV090x_TSTTNR1, reg) < 0)
+ goto err;
+
+ return 0;
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+static void stv090x_release(struct dvb_frontend *fe)
+{
+ struct stv090x_state *state = fe->demodulator_priv;
+
+ kfree(state);
+}
+
+static int stv090x_ldpc_mode(struct stv090x_state *state, enum stv090x_mode ldpc_mode)
+{
+ u32 reg = 0;
+
+ switch (ldpc_mode) {
+ case STV090x_DUAL:
+ default:
+ if ((state->demod_mode != STV090x_DUAL) || (STV090x_GETFIELD(reg, DDEMOD_FIELD) != 1)) {
+ /* set LDPC to dual mode */
+ if (stv090x_write_reg(state, STV090x_GENCFG, 0x1d) < 0)
+ goto err;
+
+ state->demod_mode = STV090x_DUAL;
+
+ reg = stv090x_read_reg(state, STV090x_TSTRES0);
+ STV090x_SETFIELD(reg, FRESFEC_FIELD, 0x1);
+ if (stv090x_write_reg(state, STV090x_TSTRES0, reg) < 0)
+ goto err;
+ STV090x_SETFIELD(reg, FRESFEC_FIELD, 0x0);
+ if (stv090x_write_reg(state, STV090x_TSTRES0, reg) < 0)
+ goto err;
+
+ if (STV090x_WRITE_DEMOD(state, MODCODLST0, 0xff) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST1, 0xff) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST2, 0xff) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST3, 0xff) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST4, 0xff) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST5, 0xff) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST6, 0xff) < 0)
+ goto err;
+
+ if (STV090x_WRITE_DEMOD(state, MODCODLST7, 0xcc) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST8, 0xcc) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLST9, 0xcc) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLSTA, 0xcc) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLSTB, 0xcc) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLSTC, 0xcc) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLSTD, 0xcc) < 0)
+ goto err;
+
+ if (STV090x_WRITE_DEMOD(state, MODCODLSTE, 0xff) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, MODCODLSTF, 0xcf) < 0)
+ goto err;
+ }
+ break;
+
+ case STV090x_SINGLE:
+ if (stv090x_stop_modcod(state) < 0)
+ goto err;
+ if (stv090x_activate_modcod_single(state) < 0)
+ goto err;
+
+ if (state->demod == STV090x_DEMODULATOR_1) {
+ if (stv090x_write_reg(state, STV090x_GENCFG, 0x06) < 0) /* path 2 */
+ goto err;
+ } else {
+ if (stv090x_write_reg(state, STV090x_GENCFG, 0x04) < 0) /* path 1 */
+ goto err;
+ }
+
+ reg = stv090x_read_reg(state, STV090x_TSTRES0);
+ STV090x_SETFIELD(reg, FRESFEC_FIELD, 0x1);
+ if (stv090x_write_reg(state, STV090x_TSTRES0, reg) < 0)
+ goto err;
+ STV090x_SETFIELD(reg, FRESFEC_FIELD, 0x0);
+ if (stv090x_write_reg(state, STV090x_TSTRES0, reg) < 0)
+ goto err;
+
+ reg = STV090x_READ_DEMOD(state, PDELCTRL1);
+ STV090x_SETFIELD_Px(reg, ALGOSWRST_FIELD, 0x01);
+ if (STV090x_WRITE_DEMOD(state, PDELCTRL1, reg) < 0)
+ goto err;
+ STV090x_SETFIELD_Px(reg, ALGOSWRST_FIELD, 0x00);
+ if (STV090x_WRITE_DEMOD(state, PDELCTRL1, reg) < 0)
+ goto err;
+ break;
+ }
+
+ return 0;
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+/* return (Hz), clk in Hz*/
+static u32 stv090x_get_mclk(struct stv090x_state *state)
+{
+ const struct stv090x_config *config = state->config;
+ u32 div, reg;
+ u8 ratio;
+
+ div = stv090x_read_reg(state, STV090x_NCOARSE);
+ reg = stv090x_read_reg(state, STV090x_SYNTCTRL);
+ ratio = STV090x_GETFIELD(reg, SELX1RATIO_FIELD) ? 4 : 6;
+
+ return (div + 1) * config->xtal / ratio; /* kHz */
+}
+
+static int stv090x_set_mclk(struct stv090x_state *state, u32 mclk, u32 clk)
+{
+ const struct stv090x_config *config = state->config;
+ u32 reg, div, clk_sel;
+
+ reg = stv090x_read_reg(state, STV090x_SYNTCTRL);
+ clk_sel = ((STV090x_GETFIELD(reg, SELX1RATIO_FIELD) == 1) ? 4 : 6);
+
+ div = ((clk_sel * mclk) / config->xtal) - 1;
+
+ reg = stv090x_read_reg(state, STV090x_NCOARSE);
+ STV090x_SETFIELD(reg, M_DIV_FIELD, div);
+ if (stv090x_write_reg(state, STV090x_NCOARSE, reg) < 0)
+ goto err;
+
+ state->mclk = stv090x_get_mclk(state);
+
+ /*Set the DiseqC frequency to 22KHz */
+ div = state->mclk / 704000;
+ if (STV090x_WRITE_DEMOD(state, F22TX, div) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, F22RX, div) < 0)
+ goto err;
+
+ return 0;
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+static int stv090x_set_tspath(struct stv090x_state *state)
+{
+ u32 reg;
+
+ if (state->dev_ver >= 0x20) {
+ switch (state->config->ts1_mode) {
+ case STV090x_TSMODE_PARALLEL_PUNCTURED:
+ case STV090x_TSMODE_DVBCI:
+ switch (state->config->ts2_mode) {
+ case STV090x_TSMODE_SERIAL_PUNCTURED:
+ case STV090x_TSMODE_SERIAL_CONTINUOUS:
+ default:
+ stv090x_write_reg(state, STV090x_TSGENERAL, 0x00);
+ break;
+
+ case STV090x_TSMODE_PARALLEL_PUNCTURED:
+ case STV090x_TSMODE_DVBCI:
+ if (stv090x_write_reg(state, STV090x_TSGENERAL, 0x06) < 0) /* Mux'd stream mode */
+ goto err;
+ reg = stv090x_read_reg(state, STV090x_P1_TSCFGM);
+ STV090x_SETFIELD_Px(reg, TSFIFO_MANSPEED_FIELD, 3);
+ if (stv090x_write_reg(state, STV090x_P1_TSCFGM, reg) < 0)
+ goto err;
+ reg = stv090x_read_reg(state, STV090x_P2_TSCFGM);
+ STV090x_SETFIELD_Px(reg, TSFIFO_MANSPEED_FIELD, 3);
+ if (stv090x_write_reg(state, STV090x_P2_TSCFGM, reg) < 0)
+ goto err;
+ if (stv090x_write_reg(state, STV090x_P1_TSSPEED, 0x14) < 0)
+ goto err;
+ if (stv090x_write_reg(state, STV090x_P2_TSSPEED, 0x28) < 0)
+ goto err;
+ break;
+ }
+ break;
+
+ case STV090x_TSMODE_SERIAL_PUNCTURED:
+ case STV090x_TSMODE_SERIAL_CONTINUOUS:
+ default:
+ switch (state->config->ts2_mode) {
+ case STV090x_TSMODE_SERIAL_PUNCTURED:
+ case STV090x_TSMODE_SERIAL_CONTINUOUS:
+ default:
+ if (stv090x_write_reg(state, STV090x_TSGENERAL, 0x0c) < 0)
+ goto err;
+ break;
+
+ case STV090x_TSMODE_PARALLEL_PUNCTURED:
+ case STV090x_TSMODE_DVBCI:
+ if (stv090x_write_reg(state, STV090x_TSGENERAL, 0x0a) < 0)
+ goto err;
+ break;
+ }
+ break;
+ }
+ } else {
+ switch (state->config->ts1_mode) {
+ case STV090x_TSMODE_PARALLEL_PUNCTURED:
+ case STV090x_TSMODE_DVBCI:
+ switch (state->config->ts2_mode) {
+ case STV090x_TSMODE_SERIAL_PUNCTURED:
+ case STV090x_TSMODE_SERIAL_CONTINUOUS:
+ default:
+ stv090x_write_reg(state, STV090x_TSGENERAL1X, 0x10);
+ break;
+
+ case STV090x_TSMODE_PARALLEL_PUNCTURED:
+ case STV090x_TSMODE_DVBCI:
+ stv090x_write_reg(state, STV090x_TSGENERAL1X, 0x16);
+ reg = stv090x_read_reg(state, STV090x_P1_TSCFGM);
+ STV090x_SETFIELD_Px(reg, TSFIFO_MANSPEED_FIELD, 3);
+ if (stv090x_write_reg(state, STV090x_P1_TSCFGM, reg) < 0)
+ goto err;
+ reg = stv090x_read_reg(state, STV090x_P1_TSCFGM);
+ STV090x_SETFIELD_Px(reg, TSFIFO_MANSPEED_FIELD, 0);
+ if (stv090x_write_reg(state, STV090x_P1_TSCFGM, reg) < 0)
+ goto err;
+ if (stv090x_write_reg(state, STV090x_P1_TSSPEED, 0x14) < 0)
+ goto err;
+ if (stv090x_write_reg(state, STV090x_P2_TSSPEED, 0x28) < 0)
+ goto err;
+ break;
+ }
+ break;
+
+ case STV090x_TSMODE_SERIAL_PUNCTURED:
+ case STV090x_TSMODE_SERIAL_CONTINUOUS:
+ default:
+ switch (state->config->ts2_mode) {
+ case STV090x_TSMODE_SERIAL_PUNCTURED:
+ case STV090x_TSMODE_SERIAL_CONTINUOUS:
+ default:
+ stv090x_write_reg(state, STV090x_TSGENERAL1X, 0x14);
+ break;
+
+ case STV090x_TSMODE_PARALLEL_PUNCTURED:
+ case STV090x_TSMODE_DVBCI:
+ stv090x_write_reg(state, STV090x_TSGENERAL1X, 0x12);
+ break;
+ }
+ break;
+ }
+ }
+
+ switch (state->config->ts1_mode) {
+ case STV090x_TSMODE_PARALLEL_PUNCTURED:
+ reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
+ STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x00);
+ STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x00);
+ if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
+ goto err;
+ break;
+
+ case STV090x_TSMODE_DVBCI:
+ reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
+ STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x00);
+ STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x01);
+ if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
+ goto err;
+ break;
+
+ case STV090x_TSMODE_SERIAL_PUNCTURED:
+ reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
+ STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x01);
+ STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x00);
+ if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
+ goto err;
+ break;
+
+ case STV090x_TSMODE_SERIAL_CONTINUOUS:
+ reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
+ STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x01);
+ STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x01);
+ if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
+ goto err;
+ break;
+
+ default:
+ break;
+ }
+
+ switch (state->config->ts2_mode) {
+ case STV090x_TSMODE_PARALLEL_PUNCTURED:
+ reg = stv090x_read_reg(state, STV090x_P2_TSCFGH);
+ STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x00);
+ STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x00);
+ if (stv090x_write_reg(state, STV090x_P2_TSCFGH, reg) < 0)
+ goto err;
+ break;
+
+ case STV090x_TSMODE_DVBCI:
+ reg = stv090x_read_reg(state, STV090x_P2_TSCFGH);
+ STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x00);
+ STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x01);
+ if (stv090x_write_reg(state, STV090x_P2_TSCFGH, reg) < 0)
+ goto err;
+ break;
+
+ case STV090x_TSMODE_SERIAL_PUNCTURED:
+ reg = stv090x_read_reg(state, STV090x_P2_TSCFGH);
+ STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x01);
+ STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x00);
+ if (stv090x_write_reg(state, STV090x_P2_TSCFGH, reg) < 0)
+ goto err;
+ break;
+
+ case STV090x_TSMODE_SERIAL_CONTINUOUS:
+ reg = stv090x_read_reg(state, STV090x_P2_TSCFGH);
+ STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x01);
+ STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x01);
+ if (stv090x_write_reg(state, STV090x_P2_TSCFGH, reg) < 0)
+ goto err;
+ break;
+
+ default:
+ break;
+ }
+ reg = stv090x_read_reg(state, STV090x_P2_TSCFGH);
+ STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 0x01);
+ if (stv090x_write_reg(state, STV090x_P2_TSCFGH, reg) < 0)
+ goto err;
+ STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 0x00);
+ if (stv090x_write_reg(state, STV090x_P2_TSCFGH, reg) < 0)
+ goto err;
+
+ reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
+ STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 0x01);
+ if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
+ goto err;
+ STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 0x00);
+ if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
+ goto err;
+
+ return 0;
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+static int stv090x_init(struct dvb_frontend *fe)
+{
+ struct stv090x_state *state = fe->demodulator_priv;
+ const struct stv090x_config *config = state->config;
+ u32 reg;
+
+ if (stv090x_wakeup(fe) < 0) {
+ dprintk(FE_ERROR, 1, "Error waking device");
+ goto err;
+ }
+
+ if (stv090x_ldpc_mode(state, state->demod_mode) < 0)
+ goto err;
+
+ reg = STV090x_READ_DEMOD(state, TNRCFG2);
+ STV090x_SETFIELD_Px(reg, TUN_IQSWAP_FIELD, state->inversion);
+ if (STV090x_WRITE_DEMOD(state, TNRCFG2, reg) < 0)
+ goto err;
+ reg = STV090x_READ_DEMOD(state, DEMOD);
+ STV090x_SETFIELD_Px(reg, ROLLOFF_CONTROL_FIELD, state->rolloff);
+ if (STV090x_WRITE_DEMOD(state, DEMOD, reg) < 0)
+ goto err;
+
+ if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ goto err;
+
+ if (config->tuner_set_mode) {
+ if (config->tuner_set_mode(fe, TUNER_WAKE) < 0)
+ goto err;
+ }
+
+ if (config->tuner_init) {
+ if (config->tuner_init(fe) < 0)
+ goto err;
+ }
+
+ if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ goto err;
+
+ if (stv090x_set_tspath(state) < 0)
+ goto err;
+
+ return 0;
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+static int stv090x_setup(struct dvb_frontend *fe)
+{
+ struct stv090x_state *state = fe->demodulator_priv;
+ const struct stv090x_config *config = state->config;
+ const struct stv090x_reg *stv090x_initval = NULL;
+ const struct stv090x_reg *stv090x_cut20_val = NULL;
+ unsigned long t1_size = 0, t2_size = 0;
+ u32 reg = 0;
+
+ int i;
+
+ if (state->device == STV0900) {
+ dprintk(FE_DEBUG, 1, "Initializing STV0900");
+ stv090x_initval = stv0900_initval;
+ t1_size = ARRAY_SIZE(stv0900_initval);
+ stv090x_cut20_val = stv0900_cut20_val;
+ t2_size = ARRAY_SIZE(stv0900_cut20_val);
+ } else if (state->device == STV0903) {
+ dprintk(FE_DEBUG, 1, "Initializing STV0903");
+ stv090x_initval = stv0903_initval;
+ t1_size = ARRAY_SIZE(stv0903_initval);
+ stv090x_cut20_val = stv0903_cut20_val;
+ t2_size = ARRAY_SIZE(stv0903_cut20_val);
+ }
+
+ /* STV090x init */
+ if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x5c) < 0) /* Stop Demod */
+ goto err;
+
+ msleep(5);
+
+ if (STV090x_WRITE_DEMOD(state, TNRCFG, 0x6c) < 0) /* check register ! (No Tuner Mode) */
+ goto err;
+
+ STV090x_SETFIELD_Px(reg, ENARPT_LEVEL_FIELD, config->repeater_level);
+ if (STV090x_WRITE_DEMOD(state, I2CRPT, reg) < 0) /* repeater OFF */
+ goto err;
+
+ if (stv090x_write_reg(state, STV090x_NCOARSE, 0x13) < 0) /* set PLL divider */
+ goto err;
+ msleep(5);
+ if (stv090x_write_reg(state, STV090x_I2CCFG, 0x08) < 0) /* 1/41 oversampling */
+ goto err;
+ if (stv090x_write_reg(state, STV090x_SYNTCTRL, 0x20 | config->clk_mode) < 0) /* enable PLL */
+ goto err;
+ msleep(5);
+
+ /* write initval */
+ dprintk(FE_DEBUG, 1, "Setting up initial values");
+ for (i = 0; i < t1_size; i++) {
+ if (stv090x_write_reg(state, stv090x_initval[i].addr, stv090x_initval[i].data) < 0)
+ goto err;
+ }
+
+ state->dev_ver = stv090x_read_reg(state, STV090x_MID);
+ if (state->dev_ver >= 0x20) {
+ if (stv090x_write_reg(state, STV090x_TSGENERAL, 0x0c) < 0)
+ goto err;
+
+ /* write cut20_val*/
+ dprintk(FE_DEBUG, 1, "Setting up Cut 2.0 initial values");
+ for (i = 0; i < t2_size; i++) {
+ if (stv090x_write_reg(state, stv090x_cut20_val[i].addr, stv090x_cut20_val[i].data) < 0)
+ goto err;
+ }
+
+ } else if (state->dev_ver < 0x20) {
+ dprintk(FE_ERROR, 1, "ERROR: Unsupported Cut: 0x%02x!",
+ state->dev_ver);
+
+ goto err;
+ } else if (state->dev_ver > 0x30) {
+ /* we shouldn't bail out from here */
+ dprintk(FE_ERROR, 1, "INFO: Cut: 0x%02x probably incomplete support!",
+ state->dev_ver);
+ }
+
+ if (stv090x_write_reg(state, STV090x_TSTRES0, 0x80) < 0)
+ goto err;
+ if (stv090x_write_reg(state, STV090x_TSTRES0, 0x00) < 0)
+ goto err;
+
+ stv090x_set_mclk(state, 135000000, config->xtal); /* 135 Mhz */
+ msleep(5);
+ if (stv090x_write_reg(state, STV090x_SYNTCTRL, 0x20 | config->clk_mode) < 0)
+ goto err;
+ stv090x_get_mclk(state);
+
+ return 0;
+err:
+ dprintk(FE_ERROR, 1, "I/O error");
+ return -1;
+}
+
+static struct dvb_frontend_ops stv090x_ops = {
+
+ .info = {
+ .name = "STV090x Multistandard",
+ .type = FE_QPSK,
+ .frequency_min = 950000,
+ .frequency_max = 2150000,
+ .frequency_stepsize = 0,
+ .frequency_tolerance = 0,
+ .symbol_rate_min = 1000000,
+ .symbol_rate_max = 45000000,
+ .caps = FE_CAN_INVERSION_AUTO |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK |
+ FE_CAN_2G_MODULATION
+ },
+
+ .release = stv090x_release,
+ .init = stv090x_init,
+
+ .sleep = stv090x_sleep,
+ .get_frontend_algo = stv090x_frontend_algo,
+
+ .i2c_gate_ctrl = stv090x_i2c_gate_ctrl,
+
+ .diseqc_send_master_cmd = stv090x_send_diseqc_msg,
+ .diseqc_send_burst = stv090x_send_diseqc_burst,
+ .diseqc_recv_slave_reply = stv090x_recv_slave_reply,
+ .set_tone = stv090x_set_tone,
+
+ .search = stv090x_search,
+ .read_status = stv090x_read_status,
+ .read_ber = stv090x_read_per,
+ .read_signal_strength = stv090x_read_signal_strength,
+ .read_snr = stv090x_read_cnr
+};
+
+
+struct dvb_frontend *stv090x_attach(const struct stv090x_config *config,
+ struct i2c_adapter *i2c,
+ enum stv090x_demodulator demod)
+{
+ struct stv090x_state *state = NULL;
+
+ state = kzalloc(sizeof (struct stv090x_state), GFP_KERNEL);
+ if (state == NULL)
+ goto error;
+
+ state->verbose = &verbose;
+ state->config = config;
+ state->i2c = i2c;
+ state->frontend.ops = stv090x_ops;
+ state->frontend.demodulator_priv = state;
+ state->demod = demod;
+ state->demod_mode = config->demod_mode; /* Single or Dual mode */
+ state->device = config->device;
+ state->rolloff = STV090x_RO_35; /* default */
+
+ if (state->demod == STV090x_DEMODULATOR_0)
+ mutex_init(&demod_lock);
+
+ if (stv090x_sleep(&state->frontend) < 0) {
+ dprintk(FE_ERROR, 1, "Error putting device to sleep");
+ goto error;
+ }
+
+ if (stv090x_setup(&state->frontend) < 0) {
+ dprintk(FE_ERROR, 1, "Error setting up device");
+ goto error;
+ }
+ if (stv090x_wakeup(&state->frontend) < 0) {
+ dprintk(FE_ERROR, 1, "Error waking device");
+ goto error;
+ }
+
+ dprintk(FE_ERROR, 1, "Attaching %s demodulator(%d) Cut=0x%02x\n",
+ state->device == STV0900 ? "STV0900" : "STV0903",
+ demod,
+ state->dev_ver);
+
+ return &state->frontend;
+
+error:
+ kfree(state);
+ return NULL;
+}
+EXPORT_SYMBOL(stv090x_attach);
+MODULE_PARM_DESC(verbose, "Set Verbosity level");
+MODULE_AUTHOR("Manu Abraham");
+MODULE_DESCRIPTION("STV090x Multi-Std Broadcast frontend");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/stv090x.h b/drivers/media/dvb/frontends/stv090x.h
new file mode 100644
index 00000000000..e968c98bb70
--- /dev/null
+++ b/drivers/media/dvb/frontends/stv090x.h
@@ -0,0 +1,106 @@
+/*
+ STV0900/0903 Multistandard Broadcast Frontend driver
+ Copyright (C) Manu Abraham <abraham.manu@gmail.com>
+
+ Copyright (C) ST Microelectronics
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __STV090x_H
+#define __STV090x_H
+
+enum stv090x_demodulator {
+ STV090x_DEMODULATOR_0 = 1,
+ STV090x_DEMODULATOR_1
+};
+
+enum stv090x_device {
+ STV0903 = 0,
+ STV0900,
+};
+
+enum stv090x_mode {
+ STV090x_DUAL = 0,
+ STV090x_SINGLE
+};
+
+enum stv090x_tsmode {
+ STV090x_TSMODE_SERIAL_PUNCTURED = 1,
+ STV090x_TSMODE_SERIAL_CONTINUOUS,
+ STV090x_TSMODE_PARALLEL_PUNCTURED,
+ STV090x_TSMODE_DVBCI
+};
+
+enum stv090x_clkmode {
+ STV090x_CLK_INT = 0, /* Clk i/p = CLKI */
+ STV090x_CLK_EXT = 2 /* Clk i/p = XTALI */
+};
+
+enum stv090x_i2crpt {
+ STV090x_RPTLEVEL_256 = 0,
+ STV090x_RPTLEVEL_128 = 1,
+ STV090x_RPTLEVEL_64 = 2,
+ STV090x_RPTLEVEL_32 = 3,
+ STV090x_RPTLEVEL_16 = 4,
+ STV090x_RPTLEVEL_8 = 5,
+ STV090x_RPTLEVEL_4 = 6,
+ STV090x_RPTLEVEL_2 = 7,
+};
+
+struct stv090x_config {
+ enum stv090x_device device;
+ enum stv090x_mode demod_mode;
+ enum stv090x_clkmode clk_mode;
+
+ u32 xtal; /* default: 8000000 */
+ u8 address; /* default: 0x68 */
+
+ u32 ref_clk; /* default: 16000000 FIXME to tuner config */
+
+ u8 ts1_mode;
+ u8 ts2_mode;
+
+ enum stv090x_i2crpt repeater_level;
+
+ int (*tuner_init) (struct dvb_frontend *fe);
+ int (*tuner_set_mode) (struct dvb_frontend *fe, enum tuner_mode mode);
+ int (*tuner_set_frequency) (struct dvb_frontend *fe, u32 frequency);
+ int (*tuner_get_frequency) (struct dvb_frontend *fe, u32 *frequency);
+ int (*tuner_set_bandwidth) (struct dvb_frontend *fe, u32 bandwidth);
+ int (*tuner_get_bandwidth) (struct dvb_frontend *fe, u32 *bandwidth);
+ int (*tuner_set_bbgain) (struct dvb_frontend *fe, u32 gain);
+ int (*tuner_get_bbgain) (struct dvb_frontend *fe, u32 *gain);
+ int (*tuner_set_refclk) (struct dvb_frontend *fe, u32 refclk);
+ int (*tuner_get_status) (struct dvb_frontend *fe, u32 *status);
+};
+
+#if defined(CONFIG_DVB_STV090x) || (defined(CONFIG_DVB_STV090x_MODULE) && defined(MODULE))
+
+extern struct dvb_frontend *stv090x_attach(const struct stv090x_config *config,
+ struct i2c_adapter *i2c,
+ enum stv090x_demodulator demod);
+#else
+
+static inline struct dvb_frontend *stv090x_attach(const struct stv090x_config *config,
+ struct i2c_adapter *i2c,
+ enum stv090x_demodulator demod)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif /* CONFIG_DVB_STV090x */
+
+#endif /* __STV090x_H */
diff --git a/drivers/media/dvb/frontends/stv090x_priv.h b/drivers/media/dvb/frontends/stv090x_priv.h
new file mode 100644
index 00000000000..5a4a01740d8
--- /dev/null
+++ b/drivers/media/dvb/frontends/stv090x_priv.h
@@ -0,0 +1,269 @@
+/*
+ STV0900/0903 Multistandard Broadcast Frontend driver
+ Copyright (C) Manu Abraham <abraham.manu@gmail.com>
+
+ Copyright (C) ST Microelectronics
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __STV090x_PRIV_H
+#define __STV090x_PRIV_H
+
+#include "dvb_frontend.h"
+
+#define FE_ERROR 0
+#define FE_NOTICE 1
+#define FE_INFO 2
+#define FE_DEBUG 3
+#define FE_DEBUGREG 4
+
+#define dprintk(__y, __z, format, arg...) do { \
+ if (__z) { \
+ if ((verbose > FE_ERROR) && (verbose > __y)) \
+ printk(KERN_ERR "%s: " format "\n", __func__ , ##arg); \
+ else if ((verbose > FE_NOTICE) && (verbose > __y)) \
+ printk(KERN_NOTICE "%s: " format "\n", __func__ , ##arg); \
+ else if ((verbose > FE_INFO) && (verbose > __y)) \
+ printk(KERN_INFO "%s: " format "\n", __func__ , ##arg); \
+ else if ((verbose > FE_DEBUG) && (verbose > __y)) \
+ printk(KERN_DEBUG "%s: " format "\n", __func__ , ##arg); \
+ } else { \
+ if (verbose > __y) \
+ printk(format, ##arg); \
+ } \
+} while (0)
+
+#define STV090x_READ_DEMOD(__state, __reg) (( \
+ (__state)->demod == STV090x_DEMODULATOR_1) ? \
+ stv090x_read_reg(__state, STV090x_P2_##__reg) : \
+ stv090x_read_reg(__state, STV090x_P1_##__reg))
+
+#define STV090x_WRITE_DEMOD(__state, __reg, __data) (( \
+ (__state)->demod == STV090x_DEMODULATOR_1) ? \
+ stv090x_write_reg(__state, STV090x_P2_##__reg, __data) :\
+ stv090x_write_reg(__state, STV090x_P1_##__reg, __data))
+
+#define STV090x_ADDR_OFFST(__state, __x) (( \
+ (__state->demod) == STV090x_DEMODULATOR_1) ? \
+ STV090x_P1_##__x : \
+ STV090x_P2_##__x)
+
+
+#define STV090x_SETFIELD(mask, bitf, val) (mask = (mask & (~(((1 << STV090x_WIDTH_##bitf) - 1) <<\
+ STV090x_OFFST_##bitf))) | \
+ (val << STV090x_OFFST_##bitf))
+
+#define STV090x_GETFIELD(val, bitf) ((val >> STV090x_OFFST_##bitf) & ((1 << STV090x_WIDTH_##bitf) - 1))
+
+
+#define STV090x_SETFIELD_Px(mask, bitf, val) (mask = (mask & (~(((1 << STV090x_WIDTH_Px_##bitf) - 1) <<\
+ STV090x_OFFST_Px_##bitf))) | \
+ (val << STV090x_OFFST_Px_##bitf))
+
+#define STV090x_GETFIELD_Px(val, bitf) ((val >> STV090x_OFFST_Px_##bitf) & ((1 << STV090x_WIDTH_Px_##bitf) - 1))
+
+#define MAKEWORD16(__a, __b) (((__a) << 8) | (__b))
+
+#define MSB(__x) ((__x >> 8) & 0xff)
+#define LSB(__x) (__x & 0xff)
+
+
+#define STV090x_IQPOWER_THRESHOLD 30
+#define STV090x_SEARCH_AGC2_TH_CUT20 700
+#define STV090x_SEARCH_AGC2_TH_CUT30 1200
+
+#define STV090x_SEARCH_AGC2_TH(__ver) \
+ ((__ver <= 0x20) ? \
+ STV090x_SEARCH_AGC2_TH_CUT20 : \
+ STV090x_SEARCH_AGC2_TH_CUT30)
+
+enum stv090x_signal_state {
+ STV090x_NOCARRIER,
+ STV090x_NODATA,
+ STV090x_DATAOK,
+ STV090x_RANGEOK,
+ STV090x_OUTOFRANGE
+};
+
+enum stv090x_fec {
+ STV090x_PR12 = 0,
+ STV090x_PR23,
+ STV090x_PR34,
+ STV090x_PR45,
+ STV090x_PR56,
+ STV090x_PR67,
+ STV090x_PR78,
+ STV090x_PR89,
+ STV090x_PR910,
+ STV090x_PRERR
+};
+
+enum stv090x_modulation {
+ STV090x_QPSK,
+ STV090x_8PSK,
+ STV090x_16APSK,
+ STV090x_32APSK,
+ STV090x_UNKNOWN
+};
+
+enum stv090x_frame {
+ STV090x_LONG_FRAME,
+ STV090x_SHORT_FRAME
+};
+
+enum stv090x_pilot {
+ STV090x_PILOTS_OFF,
+ STV090x_PILOTS_ON
+};
+
+enum stv090x_rolloff {
+ STV090x_RO_35,
+ STV090x_RO_25,
+ STV090x_RO_20
+};
+
+enum stv090x_inversion {
+ STV090x_IQ_AUTO,
+ STV090x_IQ_NORMAL,
+ STV090x_IQ_SWAP
+};
+
+enum stv090x_modcod {
+ STV090x_DUMMY_PLF = 0,
+ STV090x_QPSK_14,
+ STV090x_QPSK_13,
+ STV090x_QPSK_25,
+ STV090x_QPSK_12,
+ STV090x_QPSK_35,
+ STV090x_QPSK_23,
+ STV090x_QPSK_34,
+ STV090x_QPSK_45,
+ STV090x_QPSK_56,
+ STV090x_QPSK_89,
+ STV090x_QPSK_910,
+ STV090x_8PSK_35,
+ STV090x_8PSK_23,
+ STV090x_8PSK_34,
+ STV090x_8PSK_56,
+ STV090x_8PSK_89,
+ STV090x_8PSK_910,
+ STV090x_16APSK_23,
+ STV090x_16APSK_34,
+ STV090x_16APSK_45,
+ STV090x_16APSK_56,
+ STV090x_16APSK_89,
+ STV090x_16APSK_910,
+ STV090x_32APSK_34,
+ STV090x_32APSK_45,
+ STV090x_32APSK_56,
+ STV090x_32APSK_89,
+ STV090x_32APSK_910,
+ STV090x_MODCODE_UNKNOWN
+};
+
+enum stv090x_search {
+ STV090x_SEARCH_DSS = 0,
+ STV090x_SEARCH_DVBS1,
+ STV090x_SEARCH_DVBS2,
+ STV090x_SEARCH_AUTO
+};
+
+enum stv090x_algo {
+ STV090x_BLIND_SEARCH,
+ STV090x_COLD_SEARCH,
+ STV090x_WARM_SEARCH
+};
+
+enum stv090x_delsys {
+ STV090x_ERROR = 0,
+ STV090x_DVBS1 = 1,
+ STV090x_DVBS2,
+ STV090x_DSS
+};
+
+struct stv090x_long_frame_crloop {
+ enum stv090x_modcod modcod;
+
+ u8 crl_pilots_on_2;
+ u8 crl_pilots_off_2;
+ u8 crl_pilots_on_5;
+ u8 crl_pilots_off_5;
+ u8 crl_pilots_on_10;
+ u8 crl_pilots_off_10;
+ u8 crl_pilots_on_20;
+ u8 crl_pilots_off_20;
+ u8 crl_pilots_on_30;
+ u8 crl_pilots_off_30;
+};
+
+struct stv090x_short_frame_crloop {
+ enum stv090x_modulation modulation;
+
+ u8 crl_2; /* SR < 3M */
+ u8 crl_5; /* 3 < SR <= 7M */
+ u8 crl_10; /* 7 < SR <= 15M */
+ u8 crl_20; /* 10 < SR <= 25M */
+ u8 crl_30; /* 10 < SR <= 45M */
+};
+
+struct stv090x_reg {
+ u16 addr;
+ u8 data;
+};
+
+struct stv090x_tab {
+ s32 real;
+ s32 read;
+};
+
+struct stv090x_state {
+ enum stv090x_device device;
+ enum stv090x_demodulator demod;
+ enum stv090x_mode demod_mode;
+ u32 dev_ver;
+
+ struct i2c_adapter *i2c;
+ const struct stv090x_config *config;
+ struct dvb_frontend frontend;
+
+ u32 *verbose; /* Cached module verbosity */
+
+ enum stv090x_delsys delsys;
+ enum stv090x_fec fec;
+ enum stv090x_modulation modulation;
+ enum stv090x_modcod modcod;
+ enum stv090x_search search_mode;
+ enum stv090x_frame frame_len;
+ enum stv090x_pilot pilots;
+ enum stv090x_rolloff rolloff;
+ enum stv090x_inversion inversion;
+ enum stv090x_algo algo;
+
+ u32 frequency;
+ u32 srate;
+
+ s32 mclk; /* Masterclock Divider factor */
+ s32 tuner_bw;
+
+ u32 tuner_refclk;
+
+ s32 search_range;
+
+ s32 DemodTimeout;
+ s32 FecTimeout;
+};
+
+#endif /* __STV090x_PRIV_H */
diff --git a/drivers/media/dvb/frontends/stv090x_reg.h b/drivers/media/dvb/frontends/stv090x_reg.h
new file mode 100644
index 00000000000..57b6abbbd32
--- /dev/null
+++ b/drivers/media/dvb/frontends/stv090x_reg.h
@@ -0,0 +1,2373 @@
+/*
+ STV0900/0903 Multistandard Broadcast Frontend driver
+ Copyright (C) Manu Abraham <abraham.manu@gmail.com>
+
+ Copyright (C) ST Microelectronics
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __STV090x_REG_H
+#define __STV090x_REG_H
+
+#define STV090x_MID 0xf100
+#define STV090x_OFFST_MCHIP_IDENT_FIELD 4
+#define STV090x_WIDTH_MCHIP_IDENT_FIELD 4
+#define STV090x_OFFST_MRELEASE_FIELD 0
+#define STV090x_WIDTH_MRELEASE_FIELD 4
+
+#define STV090x_DACR1 0xf113
+#define STV090x_OFFST_DACR1_MODE_FIELD 5
+#define STV090x_WIDTH_DACR1_MODE_FIELD 3
+#define STV090x_OFFST_DACR1_VALUE_FIELD 0
+#define STV090x_WIDTH_DACR1_VALUE_FIELD 4
+
+#define STV090x_DACR2 0xf114
+#define STV090x_OFFST_DACR2_VALUE_FIELD 0
+#define STV090x_WIDTH_DACR2_VALUE_FIELD 8
+
+#define STV090x_OUTCFG 0xf11c
+#define STV090x_OFFST_OUTSERRS1_HZ_FIELD 6
+#define STV090x_WIDTH_OUTSERRS1_HZ_FIELD 1
+#define STV090x_OFFST_OUTSERRS2_HZ_FIELD 5
+#define STV090x_WIDTH_OUTSERRS2_HZ_FIELD 1
+#define STV090x_OFFST_OUTSERRS3_HZ_FIELD 4
+#define STV090x_WIDTH_OUTPARRS3_HZ_FIELD 1
+#define STV090x_OFFST_OUTPARRS3_HZ_FIELD 3
+#define STV090x_WIDTH_OUTPARRS3_HZ_FIELD 1
+
+#define STV090x_MODECFG 0xf11d
+
+#define STV090x_IRQSTATUS3 0xf120
+#define STV090x_OFFST_SPLL_LOCK_FIELD 5
+#define STV090x_WIDTH_SPLL_LOCK_FIELD 1
+#define STV090x_OFFST_SSTREAM_LCK_3_FIELD 4
+#define STV090x_WIDTH_SSTREAM_LCK_3_FIELD 1
+#define STV090x_OFFST_SSTREAM_LCK_2_FIELD 3
+#define STV090x_WIDTH_SSTREAM_LCK_2_FIELD 1
+#define STV090x_OFFST_SSTREAM_LCK_1_FIELD 2
+#define STV090x_WIDTH_SSTREAM_LCK_1_FIELD 1
+#define STV090x_OFFST_SDVBS1_PRF_2_FIELD 1
+#define STV090x_WIDTH_SDVBS1_PRF_2_FIELD 1
+#define STV090x_OFFST_SDVBS1_PRF_1_FIELD 0
+#define STV090x_WIDTH_SDVBS1_PRF_1_FIELD 1
+
+#define STV090x_IRQSTATUS2 0xf121
+#define STV090x_OFFST_SSPY_ENDSIM_3_FIELD 7
+#define STV090x_WIDTH_SSPY_ENDSIM_3_FIELD 1
+#define STV090x_OFFST_SSPY_ENDSIM_2_FIELD 6
+#define STV090x_WIDTH_SSPY_ENDSIM_2_FIELD 1
+#define STV090x_OFFST_SSPY_ENDSIM_1_FIELD 5
+#define STV090x_WIDTH_SSPY_ENDSIM_1_FIELD 1
+#define STV090x_OFFST_SPKTDEL_ERROR_2_FIELD 4
+#define STV090x_WIDTH_SPKTDEL_ERROR_2_FIELD 1
+#define STV090x_OFFST_SPKTDEL_LOCKB_2_FIELD 3
+#define STV090x_WIDTH_SPKTDEL_LOCKB_2_FIELD 1
+#define STV090x_OFFST_SPKTDEL_LOCK_2_FIELD 2
+#define STV090x_WIDTH_SPKTDEL_LOCK_2_FIELD 1
+#define STV090x_OFFST_SPKTDEL_ERROR_1_FIELD 1
+#define STV090x_WIDTH_SPKTDEL_ERROR_1_FIELD 1
+#define STV090x_OFFST_SPKTDEL_LOCKB_1_FIELD 0
+#define STV090x_WIDTH_SPKTDEL_LOCKB_1_FIELD 1
+
+#define STV090x_IRQSTATUS1 0xf122
+#define STV090x_OFFST_SPKTDEL_LOCK_1_FIELD 7
+#define STV090x_WIDTH_SPKTDEL_LOCK_1_FIELD 1
+#define STV090x_OFFST_SDEMOD_LOCKB_2_FIELD 2
+#define STV090x_WIDTH_SDEMOD_LOCKB_2_FIELD 1
+#define STV090x_OFFST_SDEMOD_LOCK_2_FIELD 1
+#define STV090x_WIDTH_SDEMOD_LOCK_2_FIELD 1
+#define STV090x_OFFST_SDEMOD_IRQ_2_FIELD 0
+#define STV090x_WIDTH_SDEMOD_IRQ_2_FIELD 1
+
+#define STV090x_IRQSTATUS0 0xf123
+#define STV090x_OFFST_SDEMOD_LOCKB_1_FIELD 7
+#define STV090x_WIDTH_SDEMOD_LOCKB_1_FIELD 1
+#define STV090x_OFFST_SDEMOD_LOCK_1_FIELD 6
+#define STV090x_WIDTH_SDEMOD_LOCK_1_FIELD 1
+#define STV090x_OFFST_SDEMOD_IRQ_1_FIELD 5
+#define STV090x_WIDTH_SDEMOD_IRQ_1_FIELD 1
+#define STV090x_OFFST_SBCH_ERRFLAG_FIELD 4
+#define STV090x_WIDTH_SBCH_ERRFLAG_FIELD 1
+#define STV090x_OFFST_SDISEQC2RX_IRQ_FIELD 3
+#define STV090x_WIDTH_SDISEQC2RX_IRQ_FIELD 1
+#define STV090x_OFFST_SDISEQC2TX_IRQ_FIELD 2
+#define STV090x_WIDTH_SDISEQC2TX_IRQ_FIELD 1
+#define STV090x_OFFST_SDISEQC1RX_IRQ_FIELD 1
+#define STV090x_WIDTH_SDISEQC1RX_IRQ_FIELD 1
+#define STV090x_OFFST_SDISEQC1TX_IRQ_FIELD 0
+#define STV090x_WIDTH_SDISEQC1TX_IRQ_FIELD 1
+
+#define STV090x_IRQMASK3 0xf124
+#define STV090x_OFFST_MPLL_LOCK_FIELD 5
+#define STV090x_WIDTH_MPLL_LOCK_FIELD 1
+#define STV090x_OFFST_MSTREAM_LCK_3_FIELD 2
+#define STV090x_WIDTH_MSTREAM_LCK_3_FIELD 3
+#define STV090x_OFFST_MSTREAM_LCK_2_FIELD 2
+#define STV090x_WIDTH_MSTREAM_LCK_2_FIELD 3
+#define STV090x_OFFST_MSTREAM_LCK_1_FIELD 2
+#define STV090x_WIDTH_MSTREAM_LCK_1_FIELD 3
+#define STV090x_OFFST_MDVBS1_PRF_2_FIELD 1
+#define STV090x_WIDTH_MDVBS1_PRF_2_FIELD 1
+#define STV090x_OFFST_MDVBS1_PRF_1_FIELD 0
+#define STV090x_WIDTH_MDVBS1_PRF_1_FIELD 1
+
+#define STV090x_IRQMASK2 0xf125
+#define STV090x_OFFST_MSPY_ENDSIM_3_FIELD 5
+#define STV090x_WIDTH_MSPY_ENDSIM_3_FIELD 3
+#define STV090x_OFFST_MSPY_ENDSIM_2_FIELD 5
+#define STV090x_WIDTH_MSPY_ENDSIM_2_FIELD 3
+#define STV090x_OFFST_MSPY_ENDSIM_1_FIELD 5
+#define STV090x_WIDTH_MSPY_ENDSIM_1_FIELD 3
+#define STV090x_OFFST_MPKTDEL_ERROR_2_FIELD 4
+#define STV090x_WIDTH_MPKTDEL_ERROR_2_FIELD 1
+#define STV090x_OFFST_MPKTDEL_LOCKB_2_FIELD 3
+#define STV090x_WIDTH_MPKTDEL_LOCKB_2_FIELD 1
+#define STV090x_OFFST_MPKTDEL_LOCK_2_FIELD 2
+#define STV090x_WIDTH_MPKTDEL_LOCK_2_FIELD 1
+#define STV090x_OFFST_MPKTDEL_ERROR_1_FIELD 1
+#define STV090x_WIDTH_MPKTDEL_ERROR_1_FIELD 1
+#define STV090x_OFFST_MPKTDEL_LOCKB_1_FIELD 0
+#define STV090x_WIDTH_MPKTDEL_LOCKB_1_FIELD 1
+
+#define STV090x_IRQMASK1 0xf126
+#define STV090x_OFFST_MPKTDEL_LOCK_1_FIELD 7
+#define STV090x_WIDTH_MPKTDEL_LOCK_1_FIELD 1
+#define STV090x_OFFST_MEXTPINB2_FIELD 6
+#define STV090x_WIDTH_MEXTPINB2_FIELD 1
+#define STV090x_OFFST_MEXTPIN2_FIELD 5
+#define STV090x_WIDTH_MEXTPIN2_FIELD 1
+#define STV090x_OFFST_MEXTPINB1_FIELD 4
+#define STV090x_WIDTH_MEXTPINB1_FIELD 1
+#define STV090x_OFFST_MEXTPIN1_FIELD 3
+#define STV090x_WIDTH_MEXTPIN1_FIELD 1
+#define STV090x_OFFST_MDEMOD_LOCKB_2_FIELD 2
+#define STV090x_WIDTH_MDEMOD_LOCKB_2_FIELD 1
+#define STV090x_OFFST_MDEMOD_LOCK_2_FIELD 1
+#define STV090x_WIDTH_MDEMOD_LOCK_2_FIELD 1
+#define STV090x_OFFST_MDEMOD_IRQ_2_FIELD 0
+#define STV090x_WIDTH_MDEMOD_IRQ_2_FIELD 1
+
+#define STV090x_IRQMASK0 0xf127
+#define STV090x_OFFST_MDEMOD_LOCKB_1_FIELD 7
+#define STV090x_WIDTH_MDEMOD_LOCKB_1_FIELD 1
+#define STV090x_OFFST_MDEMOD_LOCK_1_FIELD 6
+#define STV090x_WIDTH_MDEMOD_LOCK_1_FIELD 1
+#define STV090x_OFFST_MDEMOD_IRQ_1_FIELD 5
+#define STV090x_WIDTH_MDEMOD_IRQ_1_FIELD 1
+#define STV090x_OFFST_MBCH_ERRFLAG_FIELD 4
+#define STV090x_WIDTH_MBCH_ERRFLAG_FIELD 1
+#define STV090x_OFFST_MDISEQC2RX_IRQ_FIELD 3
+#define STV090x_WIDTH_MDISEQC2RX_IRQ_FIELD 1
+#define STV090x_OFFST_MDISEQC2TX_IRQ_FIELD 2
+#define STV090x_WIDTH_MDISEQC2TX_IRQ_FIELD 1
+#define STV090x_OFFST_MDISEQC1RX_IRQ_FIELD 1
+#define STV090x_WIDTH_MDISEQC1RX_IRQ_FIELD 1
+#define STV090x_OFFST_MDISEQC1TX_IRQ_FIELD 0
+#define STV090x_WIDTH_MDISEQC1TX_IRQ_FIELD 1
+
+#define STV090x_I2CCFG 0xf129
+#define STV090x_OFFST_12C_FASTMODE_FIELD 3
+#define STV090x_WIDTH_12C_FASTMODE_FIELD 1
+#define STV090x_OFFST_12CADDR_INC_FIELD 0
+#define STV090x_WIDTH_12CADDR_INC_FIELD 2
+
+#define STV090x_Px_I2CRPT(__x) (0xf12a + (__x - 1) * 0x1)
+#define STV090x_P1_I2CRPT STV090x_Px_I2CRPT(1)
+#define STV090x_P2_I2CRPT STV090x_Px_I2CRPT(2)
+#define STV090x_OFFST_Px_I2CT_ON_FIELD 7
+#define STV090x_WIDTH_Px_I2CT_ON_FIELD 1
+#define STV090x_OFFST_Px_ENARPT_LEVEL_FIELD 4
+#define STV090x_WIDTH_Px_ENARPT_LEVEL_FIELD 3
+#define STV090x_OFFST_Px_SCLT_DELAY_FIELD 3
+#define STV090x_WIDTH_Px_SCLT_DELAY_FIELD 1
+#define STV090x_OFFST_Px_STOP_ENABLE_FIELD 2
+#define STV090x_WIDTH_Px_STOP_ENABLE_FIELD 1
+#define STV090x_OFFST_Px_STOP_SDAT2SDA_FIELD 1
+#define STV090x_WIDTH_Px_STOP_SDAT2SDA_FIELD 1
+
+#define STV090x_CLKI2CFG 0xf140
+#define STV090x_OFFST_CLKI2_OPD_FIELD 7
+#define STV090x_WIDTH_CLKI2_OPD_FIELD 1
+#define STV090x_OFFST_CLKI2_CONFIG_FIELD 1
+#define STV090x_WIDTH_CLKI2_CONFIG_FIELD 6
+#define STV090x_OFFST_CLKI2_XOR_FIELD 0
+#define STV090x_WIDTH_CLKI2_XOR_FIELD 1
+
+#define STV090x_GPIOxCFG(__x) (0xf141 + (__x - 1))
+#define STV090x_GPIO1CFG STV090x_GPIOxCFG(1)
+#define STV090x_GPIO2CFG STV090x_GPIOxCFG(2)
+#define STV090x_GPIO3CFG STV090x_GPIOxCFG(3)
+#define STV090x_GPIO4CFG STV090x_GPIOxCFG(4)
+#define STV090x_GPIO5CFG STV090x_GPIOxCFG(5)
+#define STV090x_GPIO6CFG STV090x_GPIOxCFG(6)
+#define STV090x_GPIO7CFG STV090x_GPIOxCFG(7)
+#define STV090x_GPIO8CFG STV090x_GPIOxCFG(8)
+#define STV090x_GPIO9CFG STV090x_GPIOxCFG(9)
+#define STV090x_GPIO10CFG STV090x_GPIOxCFG(10)
+#define STV090x_GPIO11CFG STV090x_GPIOxCFG(11)
+#define STV090x_GPIO12CFG STV090x_GPIOxCFG(12)
+#define STV090x_GPIO13CFG STV090x_GPIOxCFG(13)
+#define STV090x_OFFST_GPIOx_OPD_FIELD 7
+#define STV090x_WIDTH_GPIOx_OPD_FIELD 1
+#define STV090x_OFFST_GPIOx_CONFIG_FIELD 1
+#define STV090x_WIDTH_GPIOx_CONFIG_FIELD 6
+#define STV090x_OFFST_GPIOx_XOR_FIELD 0
+#define STV090x_WIDTH_GPIOx_XOR_FIELD 1
+
+#define STV090x_CSxCFG(__x) (0xf14e + __x * 0x1)
+#define STV090x_CS0CFG STV090x_CSxCFG(0)
+#define STV090x_CS1CFG STV090x_CSxCFG(1)
+#define STV090x_OFFST_CSX_OPD_FIELD 7
+#define STV090x_WIDTH_CSX_OPD_FIELD 1
+#define STV090x_OFFST_CSX_CONFIG_FIELD 1
+#define STV090x_WIDTH_CSX_CONFIG_FIELD 6
+#define STV090x_OFFST_CSX_XOR_FIELD 0
+#define STV090x_WIDTH_CSX_XOR_FIELD 1
+
+
+#define STV090x_STDBYCFG 0xf150
+#define STV090x_OFFST_STDBY_OPD_FIELD 7
+#define STV090x_WIDTH_STDBY_OPD_FIELD 1
+#define STV090x_OFFST_STDBY_CONFIG_FIELD 1
+#define STV090x_WIDTH_STDBY_CONFIG_FIELD 6
+#define STV090x_OFFST_STDBY_XOR_FIELD 0
+#define STV090x_WIDTH_STDBY_XOR_FIELD 1
+
+#define STV090x_DIRCLKCFG 0xf151
+#define STV090x_OFFST_DIRCLK_OPD_FIELD 7
+#define STV090x_WIDTH_DIRCLK_OPD_FIELD 1
+#define STV090x_OFFST_DIRCLK_CONFIG_FIELD 1
+#define STV090x_WIDTH_DIRCLK_CONFIG_FIELD 6
+#define STV090x_OFFST_DIRCLK_XOR_FIELD 0
+#define STV090x_WIDTH_DIRCLK_XOR_FIELD 1
+
+
+#define STV090x_AGCRFxCFG(__x) (0xf152 + (__x - 1) * 0x4)
+#define STV090x_AGCRF1CFG STV090x_AGCRFxCFG(1)
+#define STV090x_AGCRF2CFG STV090x_AGCRFxCFG(2)
+#define STV090x_OFFST_AGCRFx_OPD_FIELD 7
+#define STV090x_WIDTH_AGCRFx_OPD_FIELD 1
+#define STV090x_OFFST_AGCRFx_CONFIG_FIELD 1
+#define STV090x_WIDTH_AGCRFx_CONFIG_FIELD 6
+#define STV090x_OFFST_AGCRFx_XOR_FIELD 0
+#define STV090x_WIDTH_AGCRFx_XOR_FIELD 1
+
+#define STV090x_SDATxCFG(__x) (0xf153 + (__x - 1) * 0x4)
+#define STV090x_SDAT1CFG STV090x_SDATxCFG(1)
+#define STV090x_SDAT2CFG STV090x_SDATxCFG(2)
+#define STV090x_OFFST_SDATx_OPD_FIELD 7
+#define STV090x_WIDTH_SDATx_OPD_FIELD 1
+#define STV090x_OFFST_SDATx_CONFIG_FIELD 1
+#define STV090x_WIDTH_SDATx_CONFIG_FIELD 6
+#define STV090x_OFFST_SDATx_XOR_FIELD 0
+#define STV090x_WIDTH_SDATx_XOR_FIELD 1
+
+#define STV090x_SCLTxCFG(__x) (0xf154 + (__x - 1) * 0x4)
+#define STV090x_SCLT1CFG STV090x_SCLTxCFG(1)
+#define STV090x_SCLT2CFG STV090x_SCLTxCFG(2)
+#define STV090x_OFFST_SCLTx_OPD_FIELD 7
+#define STV090x_WIDTH_SCLTx_OPD_FIELD 1
+#define STV090x_OFFST_SCLTx_CONFIG_FIELD 1
+#define STV090x_WIDTH_SCLTx_CONFIG_FIELD 6
+#define STV090x_OFFST_SCLTx_XOR_FIELD 0
+#define STV090x_WIDTH_SCLTx_XOR_FIELD 1
+
+#define STV090x_DISEQCOxCFG(__x) (0xf155 + (__x - 1) * 0x4)
+#define STV090x_DISEQCO1CFG STV090x_DISEQCOxCFG(1)
+#define STV090x_DISEQCO2CFG STV090x_DISEQCOxCFG(2)
+#define STV090x_OFFST_DISEQCOx_OPD_FIELD 7
+#define STV090x_WIDTH_DISEQCOx_OPD_FIELD 1
+#define STV090x_OFFST_DISEQCOx_CONFIG_FIELD 1
+#define STV090x_WIDTH_DISEQCOx_CONFIG_FIELD 6
+#define STV090x_OFFST_DISEQCOx_XOR_FIELD 0
+#define STV090x_WIDTH_DISEQCOx_XOR_FIELD 1
+
+#define STV090x_CLKOUT27CFG 0xf15a
+#define STV090x_OFFST_CLKOUT27_OPD_FIELD 7
+#define STV090x_WIDTH_CLKOUT27_OPD_FIELD 1
+#define STV090x_OFFST_CLKOUT27_CONFIG_FIELD 1
+#define STV090x_WIDTH_CLKOUT27_CONFIG_FIELD 6
+#define STV090x_OFFST_CLKOUT27_XOR_FIELD 0
+#define STV090x_WIDTH_CLKOUT27_XOR_FIELD 1
+
+#define STV090x_ERRORxCFG(__x) (0xf15b + (__x - 1) * 0x5)
+#define STV090x_ERROR1CFG STV090x_ERRORxCFG(1)
+#define STV090x_ERROR2CFG STV090x_ERRORxCFG(2)
+#define STV090x_ERROR3CFG STV090x_ERRORxCFG(3)
+#define STV090x_OFFST_ERRORx_OPD_FIELD 7
+#define STV090x_WIDTH_ERRORx_OPD_FIELD 1
+#define STV090x_OFFST_ERRORx_CONFIG_FIELD 1
+#define STV090x_WIDTH_ERRORx_CONFIG_FIELD 6
+#define STV090x_OFFST_ERRORx_XOR_FIELD 0
+#define STV090x_WIDTH_ERRORx_XOR_FIELD 1
+
+#define STV090x_DPNxCFG(__x) (0xf15c + (__x - 1) * 0x5)
+#define STV090x_DPN1CFG STV090x_DPNxCFG(1)
+#define STV090x_DPN2CFG STV090x_DPNxCFG(2)
+#define STV090x_DPN3CFG STV090x_DPNxCFG(3)
+#define STV090x_OFFST_DPNx_OPD_FIELD 7
+#define STV090x_WIDTH_DPNx_OPD_FIELD 1
+#define STV090x_OFFST_DPNx_CONFIG_FIELD 1
+#define STV090x_WIDTH_DPNx_CONFIG_FIELD 6
+#define STV090x_OFFST_DPNx_XOR_FIELD 0
+#define STV090x_WIDTH_DPNx_XOR_FIELD 1
+
+#define STV090x_STROUTxCFG(__x) (0xf15d + (__x - 1) * 0x5)
+#define STV090x_STROUT1CFG STV090x_STROUTxCFG(1)
+#define STV090x_STROUT2CFG STV090x_STROUTxCFG(2)
+#define STV090x_STROUT3CFG STV090x_STROUTxCFG(3)
+#define STV090x_OFFST_STROUTx_OPD_FIELD 7
+#define STV090x_WIDTH_STROUTx_OPD_FIELD 1
+#define STV090x_OFFST_STROUTx_CONFIG_FIELD 1
+#define STV090x_WIDTH_STROUTx_CONFIG_FIELD 6
+#define STV090x_OFFST_STROUTx_XOR_FIELD 0
+#define STV090x_WIDTH_STROUTx_XOR_FIELD 1
+
+#define STV090x_CLKOUTxCFG(__x) (0xf15e + (__x - 1) * 0x5)
+#define STV090x_CLKOUT1CFG STV090x_CLKOUTxCFG(1)
+#define STV090x_CLKOUT2CFG STV090x_CLKOUTxCFG(2)
+#define STV090x_CLKOUT3CFG STV090x_CLKOUTxCFG(3)
+#define STV090x_OFFST_CLKOUTx_OPD_FIELD 7
+#define STV090x_WIDTH_CLKOUTx_OPD_FIELD 1
+#define STV090x_OFFST_CLKOUTx_CONFIG_FIELD 1
+#define STV090x_WIDTH_CLKOUTx_CONFIG_FIELD 6
+#define STV090x_OFFST_CLKOUTx_XOR_FIELD 0
+#define STV090x_WIDTH_CLKOUTx_XOR_FIELD 1
+
+#define STV090x_DATAxCFG(__x) (0xf15f + (__x - 71) * 0x5)
+#define STV090x_DATA71CFG STV090x_DATAxCFG(71)
+#define STV090x_DATA72CFG STV090x_DATAxCFG(72)
+#define STV090x_DATA73CFG STV090x_DATAxCFG(73)
+#define STV090x_OFFST_DATAx_OPD_FIELD 7
+#define STV090x_WIDTH_DATAx_OPD_FIELD 1
+#define STV090x_OFFST_DATAx_CONFIG_FIELD 1
+#define STV090x_WIDTH_DATAx_CONFIG_FIELD 6
+#define STV090x_OFFST_DATAx_XOR_FIELD 0
+#define STV090x_WIDTH_DATAx_XOR_FIELD 1
+
+#define STV090x_NCOARSE 0xf1b3
+#define STV090x_OFFST_M_DIV_FIELD 0
+#define STV090x_WIDTH_M_DIV_FIELD 8
+
+#define STV090x_SYNTCTRL 0xf1b6
+#define STV090x_OFFST_STANDBY_FIELD 7
+#define STV090x_WIDTH_STANDBY_FIELD 1
+#define STV090x_OFFST_BYPASSPLLCORE_FIELD 6
+#define STV090x_WIDTH_BYPASSPLLCORE_FIELD 1
+#define STV090x_OFFST_SELX1RATIO_FIELD 5
+#define STV090x_WIDTH_SELX1RATIO_FIELD 1
+#define STV090x_OFFST_STOP_PLL_FIELD 3
+#define STV090x_WIDTH_SELX1RATIO_FIELD 1
+#define STV090x_OFFST_BYPASSPLLFSK_FIELD 2
+#define STV090x_WIDTH_BYPASSPLLFSK_FIELD 1
+#define STV090x_OFFST_SELOSCI_FIELD 1
+#define STV090x_WIDTH_SELOSCI_FIELD 1
+#define STV090x_OFFST_BYPASSPLLADC_FIELD 0
+#define STV090x_WIDTH_BYPASSPLLADC_FIELD 1
+
+#define STV090x_FILTCTRL 0xf1b7
+#define STV090x_OFFST_INV_CLK135_FIELD 7
+#define STV090x_WIDTH_INV_CLK135_FIELD 1
+#define STV090x_OFFST_SEL_FSKCKDIV_FIELD 2
+#define STV090x_WIDTH_SEL_FSKCKDIV_FIELD 1
+#define STV090x_OFFST_INV_CLKFSK_FIELD 1
+#define STV090x_WIDTH_INV_CLKFSK_FIELD 1
+#define STV090x_OFFST_BYPASS_APPLI_FIELD 0
+#define STV090x_WIDTH_BYPASS_APPLI_FIELD 1
+
+#define STV090x_PLLSTAT 0xf1b8
+#define STV090x_OFFST_PLLLOCK_FIELD 0
+#define STV090x_WIDTH_PLLLOCK_FIELD 1
+
+#define STV090x_STOPCLK1 0xf1c2
+#define STV090x_OFFST_STOP_CLKPKDT2_FIELD 6
+#define STV090x_WIDTH_STOP_CLKPKDT2_FIELD 1
+#define STV090x_OFFST_STOP_CLKPKDT1_FIELD 5
+#define STV090x_WIDTH_STOP_CLKPKDT1_FIELD 1
+#define STV090x_OFFST_STOP_CLKFEC_FIELD 4
+#define STV090x_WIDTH_STOP_CLKFEC_FIELD 1
+#define STV090x_OFFST_STOP_CLKADCI2_FIELD 3
+#define STV090x_WIDTH_STOP_CLKADCI2_FIELD 1
+#define STV090x_OFFST_INV_CLKADCI2_FIELD 2
+#define STV090x_WIDTH_INV_CLKADCI2_FIELD 1
+#define STV090x_OFFST_STOP_CLKADCI1_FIELD 1
+#define STV090x_WIDTH_STOP_CLKADCI1_FIELD 1
+#define STV090x_OFFST_INV_CLKADCI1_FIELD 0
+#define STV090x_WIDTH_INV_CLKADCI1_FIELD 1
+
+#define STV090x_STOPCLK2 0xf1c3
+#define STV090x_OFFST_STOP_CLKSAMP2_FIELD 4
+#define STV090x_WIDTH_STOP_CLKSAMP2_FIELD 1
+#define STV090x_OFFST_STOP_CLKSAMP1_FIELD 3
+#define STV090x_WIDTH_STOP_CLKSAMP1_FIELD 1
+#define STV090x_OFFST_STOP_CLKVIT2_FIELD 2
+#define STV090x_WIDTH_STOP_CLKVIT2_FIELD 1
+#define STV090x_OFFST_STOP_CLKVIT1_FIELD 1
+#define STV090x_WIDTH_STOP_CLKVIT1_FIELD 1
+#define STV090x_OFFST_STOP_CLKTS_FIELD 0
+#define STV090x_WIDTH_STOP_CLKTS_FIELD 1
+
+#define STV090x_TSTTNR0 0xf1df
+#define STV090x_OFFST_SEL_FSK_FIELD 7
+#define STV090x_WIDTH_SEL_FSK_FIELD 1
+#define STV090x_OFFST_FSK_PON_FIELD 2
+#define STV090x_WIDTH_FSK_PON_FIELD 1
+
+#define STV090x_TSTTNR1 0xf1e0
+#define STV090x_OFFST_ADC1_PON_FIELD 1
+#define STV090x_WIDTH_ADC1_PON_FIELD 1
+#define STV090x_OFFST_ADC1_INMODE_FIELD 0
+#define STV090x_WIDTH_ADC1_INMODE_FIELD 1
+
+#define STV090x_TSTTNR2 0xf1e1
+#define STV090x_OFFST_DISEQC1_PON_FIELD 5
+#define STV090x_WIDTH_DISEQC1_PON_FIELD 1
+
+#define STV090x_TSTTNR3 0xf1e2
+#define STV090x_OFFST_ADC2_PON_FIELD 1
+#define STV090x_WIDTH_ADC2_PON_FIELD 1
+#define STV090x_OFFST_ADC2_INMODE_FIELD 0
+#define STV090x_WIDTH_ADC2_INMODE_FIELD 1
+
+#define STV090x_TSTTNR4 0xf1e3
+#define STV090x_OFFST_DISEQC2_PON_FIELD 5
+#define STV090x_WIDTH_DISEQC2_PON_FIELD 1
+
+#define STV090x_FSKTFC2 0xf170
+#define STV090x_OFFST_FSKT_KMOD_FIELD 2
+#define STV090x_WIDTH_FSKT_KMOD_FIELD 6
+#define STV090x_OFFST_FSKT_CAR_FIELD 0
+#define STV090x_WIDTH_FSKT_CAR_FIELD 2
+
+#define STV090x_FSKTFC1 0xf171
+#define STV090x_OFFST_FSKTC1_CAR_FIELD 0
+#define STV090x_WIDTH_FSKTC1_CAR_FIELD 8
+
+#define STV090x_FSKTFC0 0xf172
+#define STV090x_OFFST_FSKTC0_CAR_FIELD 0
+#define STV090x_WIDTH_FSKTC0_CAR_FIELD 8
+
+#define STV090x_FSKTDELTAF1 0xf173
+#define STV090x_OFFST_FSKTF1_DELTAF_FIELD 0
+#define STV090x_WIDTH_FSKTF1_DELTAF_FIELD 4
+
+#define STV090x_FSKTDELTAF0 0xf174
+#define STV090x_OFFST_FSKTF0_DELTAF_FIELD 0
+#define STV090x_WIDTH_FSKTF0_DELTAF_FIELD 8
+
+#define STV090x_FSKTCTRL 0xf175
+#define STV090x_OFFST_FSKT_EN_SGN_FIELD 6
+#define STV090x_WIDTH_FSKT_EN_SGN_FIELD 1
+#define STV090x_OFFST_FSKT_MOD_SGN_FIELD 5
+#define STV090x_WIDTH_FSKT_MOD_SGN_FIELD 1
+#define STV090x_OFFST_FSKT_MOD_EN_FIELD 2
+#define STV090x_WIDTH_FSKT_MOD_EN_FIELD 3
+#define STV090x_OFFST_FSKT_DACMODE_FIELD 0
+#define STV090x_WIDTH_FSKT_DACMODE_FIELD 2
+
+#define STV090x_FSKRFC2 0xf176
+#define STV090x_OFFST_FSKRC2_DETSGN_FIELD 6
+#define STV090x_WIDTH_FSKRC2_DETSGN_FIELD 1
+#define STV090x_OFFST_FSKRC2_OUTSGN_FIELD 5
+#define STV090x_WIDTH_FSKRC2_OUTSGN_FIELD 1
+#define STV090x_OFFST_FSKRC2_KAGC_FIELD 2
+#define STV090x_WIDTH_FSKRC2_KAGC_FIELD 3
+#define STV090x_OFFST_FSKRC2_CAR_FIELD 0
+#define STV090x_WIDTH_FSKRC2_CAR_FIELD 2
+
+#define STV090x_FSKRFC1 0xf177
+#define STV090x_OFFST_FSKRC1_CAR_FIELD 0
+#define STV090x_WIDTH_FSKRC1_CAR_FIELD 8
+
+#define STV090x_FSKRFC0 0xf178
+#define STV090x_OFFST_FSKRC0_CAR_FIELD 0
+#define STV090x_WIDTH_FSKRC0_CAR_FIELD 8
+
+#define STV090x_FSKRK1 0xf179
+#define STV090x_OFFST_FSKR_K1_EXP_FIELD 5
+#define STV090x_WIDTH_FSKR_K1_EXP_FIELD 3
+#define STV090x_OFFST_FSKR_K1_MANT_FIELD 0
+#define STV090x_WIDTH_FSKR_K1_MANT_FIELD 5
+
+#define STV090x_FSKRK2 0xf17a
+#define STV090x_OFFST_FSKR_K2_EXP_FIELD 5
+#define STV090x_WIDTH_FSKR_K2_EXP_FIELD 3
+#define STV090x_OFFST_FSKR_K2_MANT_FIELD 0
+#define STV090x_WIDTH_FSKR_K2_MANT_FIELD 5
+
+#define STV090x_FSKRAGCR 0xf17b
+#define STV090x_OFFST_FSKR_OUTCTL_FIELD 6
+#define STV090x_WIDTH_FSKR_OUTCTL_FIELD 2
+#define STV090x_OFFST_FSKR_AGC_REF_FIELD 0
+#define STV090x_WIDTH_FSKR_AGC_REF_FIELD 6
+
+#define STV090x_FSKRAGC 0xf17c
+#define STV090x_OFFST_FSKR_AGC_ACCU_FIELD 0
+#define STV090x_WIDTH_FSKR_AGC_ACCU_FIELD 8
+
+#define STV090x_FSKRALPHA 0xf17d
+#define STV090x_OFFST_FSKR_ALPHA_EXP_FIELD 2
+#define STV090x_WIDTH_FSKR_ALPHA_EXP_FIELD 3
+#define STV090x_OFFST_FSKR_ALPHA_M_FIELD 0
+#define STV090x_WIDTH_FSKR_ALPHA_M_FIELD 2
+
+#define STV090x_FSKRPLTH1 0xf17e
+#define STV090x_OFFST_FSKR_BETA_FIELD 4
+#define STV090x_WIDTH_FSKR_BETA_FIELD 4
+#define STV090x_OFFST_FSKR_PLL_TRESH1_FIELD 0
+#define STV090x_WIDTH_FSKR_PLL_TRESH1_FIELD 4
+
+#define STV090x_FSKRPLTH0 0xf17f
+#define STV090x_OFFST_FSKR_PLL_TRESH0_FIELD 0
+#define STV090x_WIDTH_FSKR_PLL_TRESH0_FIELD 8
+
+#define STV090x_FSKRDF1 0xf180
+#define STV090x_OFFST_FSKR_DELTAF1_FIELD 0
+#define STV090x_WIDTH_FSKR_DELTAF1_FIELD 5
+
+#define STV090x_FSKRDF0 0xf181
+#define STV090x_OFFST_FSKR_DELTAF0_FIELD 0
+#define STV090x_WIDTH_FSKR_DELTAF0_FIELD 8
+
+#define STV090x_FSKRSTEPP 0xf182
+#define STV090x_OFFST_FSKR_STEP_PLUS_FIELD 0
+#define STV090x_WIDTH_FSKR_STEP_PLUS_FIELD 8
+
+#define STV090x_FSKRSTEPM 0xf183
+#define STV090x_OFFST_FSKR_STEP_MINUS_FIELD 0
+#define STV090x_WIDTH_FSKR_STEP_MINUS_FIELD 8
+
+#define STV090x_FSKRDET1 0xf184
+#define STV090x_OFFST_FSKR_CARDET1_ACCU_FIELD 0
+#define STV090x_WIDTH_FSKR_CARDET1_ACCU_FIELD 4
+
+#define STV090x_FSKRDET0 0xf185
+#define STV090x_OFFST_FSKR_CARDET0_ACCU_FIELD 0
+#define STV090x_WIDTH_FSKR_CARDET0_ACCU_FIELD 8
+
+#define STV090x_FSKRDTH1 0xf186
+#define STV090x_OFFST_FSKR_CARLOSS_THRESH1_FIELD 4
+#define STV090x_WIDTH_FSKR_CARLOSS_THRESH1_FIELD 4
+#define STV090x_OFFST_FSKR_CARDET_THRESH1_FIELD 0
+#define STV090x_WIDTH_FSKR_CARDET_THRESH1_FIELD 4
+
+#define STV090x_FSKRDTH0 0xf187
+#define STV090x_OFFST_FSKR_CARDET_THRESH0_FIELD 0
+#define STV090x_WIDTH_FSKR_CARDET_THRESH0_FIELD 8
+
+#define STV090x_FSKRLOSS 0xf188
+#define STV090x_OFFST_FSKR_CARLOSS_THRESH_FIELD 0
+#define STV090x_WIDTH_FSKR_CARLOSS_THRESH_FIELD 8
+
+#define STV090x_Px_DISTXCTL(__x) (0xF1A0 - (__x - 1) * 0x10)
+#define STV090x_P1_DISTXCTL STV090x_Px_DISTXCTL(1)
+#define STV090x_P2_DISTXCTL STV090x_Px_DISTXCTL(2)
+#define STV090x_OFFST_Px_TIM_OFF_FIELD 7
+#define STV090x_WIDTH_Px_TIM_OFF_FIELD 1
+#define STV090x_OFFST_Px_DISEQC_RESET_FIELD 6
+#define STV090x_WIDTH_Px_DISEQC_RESET_FIELD 1
+#define STV090x_OFFST_Px_TIM_CMD_FIELD 4
+#define STV090x_WIDTH_Px_TIM_CMD_FIELD 2
+#define STV090x_OFFST_Px_DIS_PRECHARGE_FIELD 3
+#define STV090x_WIDTH_Px_DIS_PRECHARGE_FIELD 1
+#define STV090x_OFFST_Px_DISTX_MODE_FIELD 0
+#define STV090x_WIDTH_Px_DISTX_MODE_FIELD 3
+
+#define STV090x_Px_DISRXCTL(__x) (0xf1a1 - (__x - 1) * 0x10)
+#define STV090x_P1_DISRXCTL STV090x_Px_DISRXCTL(1)
+#define STV090x_P2_DISRXCTL STV090x_Px_DISRXCTL(2)
+#define STV090x_OFFST_Px_RECEIVER_ON_FIELD 7
+#define STV090x_WIDTH_Px_RECEIVER_ON_FIELD 1
+#define STV090x_OFFST_Px_IGNO_SHORT22K_FIELD 6
+#define STV090x_WIDTH_Px_IGNO_SHORT22K_FIELD 1
+#define STV090x_OFFST_Px_ONECHIP_TRX_FIELD 5
+#define STV090x_WIDTH_Px_ONECHIP_TRX_FIELD 1
+#define STV090x_OFFST_Px_EXT_ENVELOP_FIELD 4
+#define STV090x_WIDTH_Px_EXT_ENVELOP_FIELD 1
+#define STV090x_OFFST_Px_PIN_SELECT_FIELD 2
+#define STV090x_WIDTH_Px_PIN_SELECT_FIELD 2
+#define STV090x_OFFST_Px_IRQ_RXEND_FIELD 1
+#define STV090x_WIDTH_Px_IRQ_RXEND_FIELD 1
+#define STV090x_OFFST_Px_IRQ_4NBYTES_FIELD 0
+#define STV090x_WIDTH_Px_IRQ_4NBYTES_FIELD 1
+
+#define STV090x_Px_DISRX_ST0(__x) (0xf1a4 - (__x - 1) * 0x10)
+#define STV090x_P1_DISRX_ST0 STV090x_Px_DISRX_ST0(1)
+#define STV090x_P2_DISRX_ST0 STV090x_Px_DISRX_ST0(2)
+#define STV090x_OFFST_Px_RX_END_FIELD 7
+#define STV090x_WIDTH_Px_RX_END_FIELD 1
+#define STV090x_OFFST_Px_RX_ACTIVE_FIELD 6
+#define STV090x_WIDTH_Px_RX_ACTIVE_FIELD 1
+#define STV090x_OFFST_Px_SHORT_22KHZ_FIELD 5
+#define STV090x_WIDTH_Px_SHORT_22KHZ_FIELD 1
+#define STV090x_OFFST_Px_CONT_TONE_FIELD 4
+#define STV090x_WIDTH_Px_CONT_TONE_FIELD 1
+#define STV090x_OFFST_Px_FIFO_4BREADY_FIELD 3
+#define STV090x_WIDTH_Px_FIFO_4BREADY_FIELD 2
+#define STV090x_OFFST_Px_FIFO_EMPTY_FIELD 2
+#define STV090x_WIDTH_Px_FIFO_EMPTY_FIELD 1
+#define STV090x_OFFST_Px_ABORT_DISRX_FIELD 0
+#define STV090x_WIDTH_Px_ABORT_DISRX_FIELD 1
+
+#define STV090x_Px_DISRX_ST1(__x) (0xf1a5 - (__x - 1) * 0x10)
+#define STV090x_P1_DISRX_ST1 STV090x_Px_DISRX_ST1(1)
+#define STV090x_P2_DISRX_ST1 STV090x_Px_DISRX_ST1(2)
+#define STV090x_OFFST_Px_RX_FAIL_FIELD 7
+#define STV090x_WIDTH_Px_RX_FAIL_FIELD 1
+#define STV090x_OFFST_Px_FIFO_PARITYFAIL_FIELD 6
+#define STV090x_WIDTH_Px_FIFO_PARITYFAIL_FIELD 1
+#define STV090x_OFFST_Px_RX_NONBYTE_FIELD 5
+#define STV090x_WIDTH_Px_RX_NONBYTE_FIELD 1
+#define STV090x_OFFST_Px_FIFO_OVERFLOW_FIELD 4
+#define STV090x_WIDTH_Px_FIFO_OVERFLOW_FIELD 1
+#define STV090x_OFFST_Px_FIFO_BYTENBR_FIELD 0
+#define STV090x_WIDTH_Px_FIFO_BYTENBR_FIELD 4
+
+#define STV090x_Px_DISRXDATA(__x) (0xf1a6 - (__x - 1) * 0x10)
+#define STV090x_P1_DISRXDATA STV090x_Px_DISRXDATA(1)
+#define STV090x_P2_DISRXDATA STV090x_Px_DISRXDATA(2)
+#define STV090x_OFFST_Px_DISRX_DATA_FIELD 0
+#define STV090x_WIDTH_Px_DISRX_DATA_FIELD 8
+
+#define STV090x_Px_DISTXDATA(__x) (0xf1a7 - (__x - 1) * 0x10)
+#define STV090x_P1_DISTXDATA STV090x_Px_DISTXDATA(1)
+#define STV090x_P2_DISTXDATA STV090x_Px_DISTXDATA(2)
+#define STV090x_OFFST_Px_DISEQC_FIFO_FIELD 0
+#define STV090x_WIDTH_Px_DISEQC_FIFO_FIELD 8
+
+#define STV090x_Px_DISTXSTATUS(__x) (0xf1a8 - (__x - 1) * 0x10)
+#define STV090x_P1_DISTXSTATUS STV090x_Px_DISTXSTATUS(1)
+#define STV090x_P2_DISTXSTATUS STV090x_Px_DISTXSTATUS(2)
+#define STV090x_OFFST_Px_TX_FAIL_FIELD 7
+#define STV090x_WIDTH_Px_TX_FAIL_FIELD 1
+#define STV090x_OFFST_Px_FIFO_FULL_FIELD 6
+#define STV090x_WIDTH_Px_FIFO_FULL_FIELD 1
+#define STV090x_OFFST_Px_TX_IDLE_FIELD 5
+#define STV090x_WIDTH_Px_TX_IDLE_FIELD 1
+#define STV090x_OFFST_Px_GAP_BURST_FIELD 4
+#define STV090x_WIDTH_Px_GAP_BURST_FIELD 1
+#define STV090x_OFFST_Px_TXFIFO_BYTES_FIELD 0
+#define STV090x_WIDTH_Px_TXFIFO_BYTES_FIELD 4
+
+#define STV090x_Px_F22TX(__x) (0xf1a9 - (__x - 1) * 0x10)
+#define STV090x_P1_F22TX STV090x_Px_F22TX(1)
+#define STV090x_P2_F22TX STV090x_Px_F22TX(2)
+#define STV090x_OFFST_Px_F22_REG_FIELD 0
+#define STV090x_WIDTH_Px_F22_REG_FIELD 8
+
+#define STV090x_Px_F22RX(__x) (0xf1aa - (__x - 1) * 0x10)
+#define STV090x_P1_F22RX STV090x_Px_F22RX(1)
+#define STV090x_P2_F22RX STV090x_Px_F22RX(2)
+#define STV090x_OFFST_Px_F22RX_REG_FIELD 0
+#define STV090x_WIDTH_Px_F22RX_REG_FIELD 8
+
+#define STV090x_Px_ACRPRESC(__x) (0xf1ac - (__x - 1) * 0x10)
+#define STV090x_P1_ACRPRESC STV090x_Px_ACRPRESC(1)
+#define STV090x_P2_ACRPRESC STV090x_Px_ACRPRESC(2)
+#define STV090x_OFFST_Px_ACR_PRESC_FIELD 0
+#define STV090x_WIDTH_Px_ACR_PRESC_FIELD 3
+
+#define STV090x_Px_ACRDIV(__x) (0xf1ad - (__x - 1) * 0x10)
+#define STV090x_P1_ACRDIV STV090x_Px_ACRDIV(1)
+#define STV090x_P2_ACRDIV STV090x_Px_ACRDIV(2)
+#define STV090x_OFFST_Px_ACR_DIV_FIELD 0
+#define STV090x_WIDTH_Px_ACR_DIV_FIELD 8
+
+#define STV090x_Px_IQCONST(__x) (0xF400 - (__x - 1) * 0x200)
+#define STV090x_P1_IQCONST STV090x_Px_IQCONST(1)
+#define STV090x_P2_IQCONST STV090x_Px_IQCONST(2)
+#define STV090x_OFFST_Px_CONSTEL_SELECT_FIELD 5
+#define STV090x_WIDTH_Px_CONSTEL_SELECT_FIELD 2
+
+#define STV090x_Px_NOSCFG(__x) (0xF401 - (__x - 1) * 0x200)
+#define STV090x_P1_NOSCFG STV090x_Px_NOSCFG(1)
+#define STV090x_P2_NOSCFG STV090x_Px_NOSCFG(2)
+#define STV090x_OFFST_Px_NOSPLH_BETA_FIELD 3
+#define STV090x_WIDTH_Px_NOSPLH_BETA_FIELD 2
+#define STV090x_OFFST_Px_NOSDATA_BETA_FIELD 0
+#define STV090x_WIDTH_Px_NOSDATA_BETA_FIELD 3
+
+#define STV090x_Px_ISYMB(__x) (0xF402 - (__x - 1) * 0x200)
+#define STV090x_P1_ISYMB STV090x_Px_ISYMB(1)
+#define STV090x_P2_ISYMB STV090x_Px_ISYMB(2)
+#define STV090x_OFFST_Px_I_SYMBOL_FIELD 0
+#define STV090x_WIDTH_Px_I_SYMBOL_FIELD 8
+
+#define STV090x_Px_QSYMB(__x) (0xF403 - (__x - 1) * 0x200)
+#define STV090x_P1_QSYMB STV090x_Px_QSYMB(1)
+#define STV090x_P2_QSYMB STV090x_Px_QSYMB(2)
+#define STV090x_OFFST_Px_Q_SYMBOL_FIELD 0
+#define STV090x_WIDTH_Px_Q_SYMBOL_FIELD 8
+
+#define STV090x_Px_AGC1CFG(__x) (0xF404 - (__x - 1) * 0x200)
+#define STV090x_P1_AGC1CFG STV090x_Px_AGC1CFG(1)
+#define STV090x_P2_AGC1CFG STV090x_Px_AGC1CFG(2)
+#define STV090x_OFFST_Px_DC_FROZEN_FIELD 7
+#define STV090x_WIDTH_Px_DC_FROZEN_FIELD 1
+#define STV090x_OFFST_Px_DC_CORRECT_FIELD 6
+#define STV090x_WIDTH_Px_DC_CORRECT_FIELD 1
+#define STV090x_OFFST_Px_AMM_FROZEN_FIELD 5
+#define STV090x_WIDTH_Px_AMM_FROZEN_FIELD 1
+#define STV090x_OFFST_Px_AMM_CORRECT_FIELD 4
+#define STV090x_WIDTH_Px_AMM_CORRECT_FIELD 1
+#define STV090x_OFFST_Px_QUAD_FROZEN_FIELD 3
+#define STV090x_WIDTH_Px_QUAD_FROZEN_FIELD 1
+#define STV090x_OFFST_Px_QUAD_CORRECT_FIELD 2
+#define STV090x_WIDTH_Px_QUAD_CORRECT_FIELD 1
+
+#define STV090x_Px_AGC1CN(__x) (0xF406 - (__x - 1) * 0x200)
+#define STV090x_P1_AGC1CN STV090x_Px_AGC1CN(1)
+#define STV090x_P2_AGC1CN STV090x_Px_AGC1CN(2)
+#define STV090x_WIDTH_Px_AGC1_LOCKED_FIELD 7
+#define STV090x_OFFST_Px_AGC1_LOCKED_FIELD 1
+#define STV090x_OFFST_Px_AGC1_MINPOWER_FIELD 4
+#define STV090x_WIDTH_Px_AGC1_MINPOWER_FIELD 1
+#define STV090x_OFFST_Px_AGCOUT_FAST_FIELD 3
+#define STV090x_WIDTH_Px_AGCOUT_FAST_FIELD 1
+#define STV090x_OFFST_Px_AGCIQ_BETA_FIELD 0
+#define STV090x_WIDTH_Px_AGCIQ_BETA_FIELD 3
+
+#define STV090x_Px_AGC1REF(__x) (0xF407 - (__x - 1) * 0x200)
+#define STV090x_P1_AGC1REF STV090x_Px_AGC1REF(1)
+#define STV090x_P2_AGC1REF STV090x_Px_AGC1REF(2)
+#define STV090x_OFFST_Px_AGCIQ_REF_FIELD 0
+#define STV090x_WIDTH_Px_AGCIQ_REF_FIELD 8
+
+#define STV090x_Px_IDCCOMP(__x) (0xF408 - (__x - 1) * 0x200)
+#define STV090x_P1_IDCCOMP STV090x_Px_IDCCOMP(1)
+#define STV090x_P2_IDCCOMP STV090x_Px_IDCCOMP(2)
+#define STV090x_OFFST_Px_IAVERAGE_ADJ_FIELD 0
+#define STV090x_WIDTH_Px_IAVERAGE_ADJ_FIELD 8
+
+#define STV090x_Px_QDCCOMP(__x) (0xF409 - (__x - 1) * 0x200)
+#define STV090x_P1_QDCCOMP STV090x_Px_QDCCOMP(1)
+#define STV090x_P2_QDCCOMP STV090x_Px_QDCCOMP(2)
+#define STV090x_OFFST_Px_QAVERAGE_ADJ_FIELD 0
+#define STV090x_WIDTH_Px_QAVERAGE_ADJ_FIELD 8
+
+#define STV090x_Px_POWERI(__x) (0xF40A - (__x - 1) * 0x200)
+#define STV090x_P1_POWERI STV090x_Px_POWERI(1)
+#define STV090x_P2_POWERI STV090x_Px_POWERI(2)
+#define STV090x_OFFST_Px_POWER_I_FIELD 0
+#define STV090x_WIDTH_Px_POWER_I_FIELD 8
+
+#define STV090x_Px_POWERQ(__x) (0xF40B - (__x - 1) * 0x200)
+#define STV090x_P1_POWERQ STV090x_Px_POWERQ(1)
+#define STV090x_P2_POWERQ STV090x_Px_POWERQ(2)
+#define STV090x_OFFST_Px_POWER_Q_FIELD 0
+#define STV090x_WIDTH_Px_POWER_Q_FIELD 8
+
+#define STV090x_Px_AGC1AMM(__x) (0xF40C - (__x - 1) * 0x200)
+#define STV090x_P1_AGC1AMM STV090x_Px_AGC1AMM(1)
+#define STV090x_P2_AGC1AMM STV090x_Px_AGC1AMM(2)
+#define STV090x_OFFST_Px_AMM_VALUE_FIELD 0
+#define STV090x_WIDTH_Px_AMM_VALUE_FIELD 8
+
+#define STV090x_Px_AGC1QUAD(__x) (0xF40D - (__x - 1) * 0x200)
+#define STV090x_P1_AGC1QUAD STV090x_Px_AGC1QUAD(1)
+#define STV090x_P2_AGC1QUAD STV090x_Px_AGC1QUAD(2)
+#define STV090x_OFFST_Px_QUAD_VALUE_FIELD 0
+#define STV090x_WIDTH_Px_QUAD_VALUE_FIELD 8
+
+#define STV090x_Px_AGCIQINy(__x, __y) (0xF40F - (__x-1) * 0x200 - __y * 0x1)
+#define STV090x_P1_AGCIQIN0 STV090x_Px_AGCIQINy(1, 0)
+#define STV090x_P1_AGCIQIN1 STV090x_Px_AGCIQINy(1, 1)
+#define STV090x_P2_AGCIQIN0 STV090x_Px_AGCIQINy(2, 0)
+#define STV090x_P2_AGCIQIN1 STV090x_Px_AGCIQINy(2, 1)
+#define STV090x_OFFST_Px_AGCIQ_VALUE_FIELD 0
+#define STV090x_WIDTH_Px_AGCIQ_VALUE_FIELD 8
+
+#define STV090x_Px_DEMOD(__x) (0xF410 - (__x - 1) * 0x200)
+#define STV090x_P1_DEMOD STV090x_Px_DEMOD(1)
+#define STV090x_P2_DEMOD STV090x_Px_DEMOD(2)
+#define STV090x_OFFST_Px_MANUAL_S2ROLLOFF_FIELD 7
+#define STV090x_WIDTH_Px_MANUAL_S2ROLLOFF_FIELD 1
+#define STV090x_OFFST_Px_DEMOD_STOP_FIELD 6
+#define STV090x_WIDTH_Px_DEMOD_STOP_FIELD 1
+#define STV090x_OFFST_Px_SPECINV_CONTROL_FIELD 4
+#define STV090x_WIDTH_Px_SPECINV_CONTROL_FIELD 2
+#define STV090x_OFFST_Px_FORCE_ENASAMP_FIELD 3
+#define STV090x_WIDTH_Px_FORCE_ENASAMP_FIELD 1
+#define STV090x_OFFST_Px_MANUAL_SXROLLOFF_FIELD 2
+#define STV090x_WIDTH_Px_MANUAL_SXROLLOFF_FIELD 1
+#define STV090x_OFFST_Px_ROLLOFF_CONTROL_FIELD 0
+#define STV090x_WIDTH_Px_ROLLOFF_CONTROL_FIELD 2
+
+#define STV090x_Px_DMDMODCOD(__x) (0xF411 - (__x - 1) * 0x200)
+#define STV090x_P1_DMDMODCOD STV090x_Px_DMDMODCOD(1)
+#define STV090x_P2_DMDMODCOD STV090x_Px_DMDMODCOD(2)
+#define STV090x_OFFST_Px_MANUAL_MODCOD_FIELD 7
+#define STV090x_WIDTH_Px_MANUAL_MODCOD_FIELD 1
+#define STV090x_OFFST_Px_DEMOD_MODCOD_FIELD 2
+#define STV090x_WIDTH_Px_DEMOD_MODCOD_FIELD 5
+#define STV090x_OFFST_Px_DEMOD_TYPE_FIELD 0
+#define STV090x_WIDTH_Px_DEMOD_TYPE_FIELD 2
+
+#define STV090x_Px_DSTATUS(__x) (0xF412 - (__x - 1) * 0x200)
+#define STV090x_P1_DSTATUS STV090x_Px_DSTATUS(1)
+#define STV090x_P2_DSTATUS STV090x_Px_DSTATUS(2)
+#define STV090x_OFFST_Px_CAR_LOCK_FIELD 7
+#define STV090x_WIDTH_Px_CAR_LOCK_FIELD 1
+#define STV090x_OFFST_Px_TMGLOCK_QUALITY_FIELD 5
+#define STV090x_WIDTH_Px_TMGLOCK_QUALITY_FIELD 2
+#define STV090x_OFFST_Px_LOCK_DEFINITIF_FIELD 3
+#define STV090x_WIDTH_Px_LOCK_DEFINITIF_FIELD 1
+
+#define STV090x_Px_DSTATUS2(__x) (0xF413 - (__x - 1) * 0x200)
+#define STV090x_P1_DSTATUS2 STV090x_Px_DSTATUS2(1)
+#define STV090x_P2_DSTATUS2 STV090x_Px_DSTATUS2(2)
+#define STV090x_OFFST_Px_DEMOD_DELOCK_FIELD 7
+#define STV090x_WIDTH_Px_DEMOD_DELOCK_FIELD 1
+#define STV090x_OFFST_Px_AGC1_NOSIGNALACK_FIELD 3
+#define STV090x_WIDTH_Px_AGC1_NOSIGNALACK_FIELD 1
+#define STV090x_OFFST_Px_AGC2_OVERFLOW_FIELD 2
+#define STV090x_WIDTH_Px_AGC2_OVERFLOW_FIELD 1
+#define STV090x_OFFST_Px_CFR_OVERFLOW_FIELD 1
+#define STV090x_WIDTH_Px_CFR_OVERFLOW_FIELD 1
+#define STV090x_OFFST_Px_GAMMA_OVERUNDER_FIELD 0
+#define STV090x_WIDTH_Px_GAMMA_OVERUNDER_FIELD 1
+
+#define STV090x_Px_DMDCFGMD(__x) (0xF414 - (__x - 1) * 0x200)
+#define STV090x_P1_DMDCFGMD STV090x_Px_DMDCFGMD(1)
+#define STV090x_P2_DMDCFGMD STV090x_Px_DMDCFGMD(2)
+#define STV090x_OFFST_Px_DVBS2_ENABLE_FIELD 7
+#define STV090x_WIDTH_Px_DVBS2_ENABLE_FIELD 1
+#define STV090x_OFFST_Px_DVBS1_ENABLE_FIELD 6
+#define STV090x_WIDTH_Px_DVBS1_ENABLE_FIELD 1
+#define STV090x_OFFST_Px_CFR_AUTOSCAN_FIELD 5 /* check */
+#define STV090x_WIDTH_Px_CFR_AUTOSCAN_FIELD 1
+#define STV090x_OFFST_Px_SCAN_ENABLE_FIELD 4 /* check */
+#define STV090x_WIDTH_Px_SCAN_ENABLE_FIELD 1
+#define STV090x_OFFST_Px_TUN_AUTOSCAN_FIELD 3
+#define STV090x_WIDTH_Px_TUN_AUTOSCAN_FIELD 1
+#define STV090x_OFFST_Px_NOFORCE_RELOCK_FIELD 2
+#define STV090x_WIDTH_Px_NOFORCE_RELOCK_FIELD 1
+#define STV090x_OFFST_Px_TUN_RNG_FIELD 0
+#define STV090x_WIDTH_Px_TUN_RNG_FIELD 2
+
+#define STV090x_Px_DMDCFG2(__x) (0xF415 - (__x - 1) * 0x200)
+#define STV090x_P1_DMDCFG2 STV090x_Px_DMDCFG2(1)
+#define STV090x_P2_DMDCFG2 STV090x_Px_DMDCFG2(2)
+#define STV090x_OFFST_Px_S1S2_SEQUENTIAL_FIELD 6
+#define STV090x_WIDTH_Px_S1S2_SEQUENTIAL_FIELD 1
+
+#define STV090x_Px_DMDISTATE(__x) (0xF416 - (__x - 1) * 0x200)
+#define STV090x_P1_DMDISTATE STV090x_Px_DMDISTATE(1)
+#define STV090x_P2_DMDISTATE STV090x_Px_DMDISTATE(2)
+#define STV090x_OFFST_Px_I2C_DEMOD_MODE_FIELD 0
+#define STV090x_WIDTH_Px_I2C_DEMOD_MODE_FIELD 5
+
+#define STV090x_Px_DMDTOM(__x) (0xF417 - (__x - 1) * 0x200) /* check */
+#define STV090x_P1_DMDTOM STV090x_Px_DMDTOM(1)
+#define STV090x_P2_DMDTOM STV090x_Px_DMDTOM(2)
+
+#define STV090x_Px_DMDSTATE(__x) (0xF41B - (__x - 1) * 0x200)
+#define STV090x_P1_DMDSTATE STV090x_Px_DMDSTATE(1)
+#define STV090x_P2_DMDSTATE STV090x_Px_DMDSTATE(2)
+#define STV090x_OFFST_Px_HEADER_MODE_FIELD 5
+#define STV090x_WIDTH_Px_HEADER_MODE_FIELD 2
+
+#define STV090x_Px_DMDFLYW(__x) (0xF41C - (__x - 1) * 0x200)
+#define STV090x_P1_DMDFLYW STV090x_Px_DMDFLYW(1)
+#define STV090x_P2_DMDFLYW STV090x_Px_DMDFLYW(2)
+#define STV090x_OFFST_Px_I2C_IRQVAL_FIELD 4
+#define STV090x_WIDTH_Px_I2C_IRQVAL_FIELD 4
+#define STV090x_OFFST_Px_FLYWHEEL_CPT_FIELD 0 /* check */
+#define STV090x_WIDTH_Px_FLYWHEEL_CPT_FIELD 4
+
+#define STV090x_Px_DSTATUS3(__x) (0xF41D - (__x - 1) * 0x200)
+#define STV090x_P1_DSTATUS3 STV090x_Px_DSTATUS3(1)
+#define STV090x_P2_DSTATUS3 STV090x_Px_DSTATUS3(2)
+#define STV090x_OFFST_Px_DEMOD_CFGMODE_FIELD 5
+#define STV090x_WIDTH_Px_DEMOD_CFGMODE_FIELD 2
+
+#define STV090x_Px_DMDCFG3(__x) (0xF41E - (__x - 1) * 0x200)
+#define STV090x_P1_DMDCFG3 STV090x_Px_DMDCFG3(1)
+#define STV090x_P2_DMDCFG3 STV090x_Px_DMDCFG3(2)
+#define STV090x_OFFST_Px_NOSTOP_FIFOFULL_FIELD 3
+#define STV090x_WIDTH_Px_NOSTOP_FIFOFULL_FIELD 1
+
+#define STV090x_Px_DMDCFG4(__x) (0xf41f - (__x - 1) * 0x200)
+#define STV090x_P1_DMDCFG4 STV090x_Px_DMDCFG4(1)
+#define STV090x_P2_DMDCFG4 STV090x_Px_DMDCFG4(2)
+
+#define STV090x_Px_CORRELMANT(__x) (0xF420 - (__x - 1) * 0x200)
+#define STV090x_P1_CORRELMANT STV090x_Px_CORRELMANT(1)
+#define STV090x_P2_CORRELMANT STV090x_Px_CORRELMANT(2)
+#define STV090x_OFFST_Px_CORREL_MANT_FIELD 0
+#define STV090x_WIDTH_Px_CORREL_MANT_FIELD 8
+
+#define STV090x_Px_CORRELABS(__x) (0xF421 - (__x - 1) * 0x200)
+#define STV090x_P1_CORRELABS STV090x_Px_CORRELABS(1)
+#define STV090x_P2_CORRELABS STV090x_Px_CORRELABS(2)
+#define STV090x_OFFST_Px_CORREL_ABS_FIELD 0
+#define STV090x_WIDTH_Px_CORREL_ABS_FIELD 8
+
+#define STV090x_Px_CORRELEXP(__x) (0xF422 - (__x - 1) * 0x200)
+#define STV090x_P1_CORRELEXP STV090x_Px_CORRELEXP(1)
+#define STV090x_P2_CORRELEXP STV090x_Px_CORRELEXP(2)
+#define STV090x_OFFST_Px_CORREL_ABSEXP_FIELD 4
+#define STV090x_WIDTH_Px_CORREL_ABSEXP_FIELD 4
+#define STV090x_OFFST_Px_CORREL_EXP_FIELD 0
+#define STV090x_WIDTH_Px_CORREL_EXP_FIELD 4
+
+#define STV090x_Px_PLHMODCOD(__x) (0xF424 - (__x - 1) * 0x200)
+#define STV090x_P1_PLHMODCOD STV090x_Px_PLHMODCOD(1)
+#define STV090x_P2_PLHMODCOD STV090x_Px_PLHMODCOD(2)
+#define STV090x_OFFST_Px_SPECINV_DEMOD_FIELD 7
+#define STV090x_WIDTH_Px_SPECINV_DEMOD_FIELD 1
+#define STV090x_OFFST_Px_PLH_MODCOD_FIELD 2
+#define STV090x_WIDTH_Px_PLH_MODCOD_FIELD 5
+#define STV090x_OFFST_Px_PLH_TYPE_FIELD 0
+#define STV090x_WIDTH_Px_PLH_TYPE_FIELD 2
+
+#define STV090x_Px_AGCK32(__x) (0xf42b - (__x - 1) * 0x200)
+#define STV090x_P1_AGCK32 STV090x_Px_AGCK32(1)
+#define STV090x_P2_AGCK32 STV090x_Px_AGCK32(2)
+
+#define STV090x_Px_AGC2O(__x) (0xF42C - (__x - 1) * 0x200)
+#define STV090x_P1_AGC2O STV090x_Px_AGC2O(1)
+#define STV090x_P2_AGC2O STV090x_Px_AGC2O(2)
+
+#define STV090x_Px_AGC2REF(__x) (0xF42D - (__x - 1) * 0x200)
+#define STV090x_P1_AGC2REF STV090x_Px_AGC2REF(1)
+#define STV090x_P2_AGC2REF STV090x_Px_AGC2REF(2)
+#define STV090x_OFFST_Px_AGC2_REF_FIELD 0
+#define STV090x_WIDTH_Px_AGC2_REF_FIELD 8
+
+#define STV090x_Px_AGC1ADJ(__x) (0xF42E - (__x - 1) * 0x200)
+#define STV090x_P1_AGC1ADJ STV090x_Px_AGC1ADJ(1)
+#define STV090x_P2_AGC1ADJ STV090x_Px_AGC1ADJ(2)
+#define STV090x_OFFST_Px_AGC1_ADJUSTED_FIELD 0
+#define STV090x_WIDTH_Px_AGC1_ADJUSTED_FIELD 7
+
+#define STV090x_Px_AGC2Iy(__x, __y) (0xF437 - (__x - 1) * 0x200 - __y * 0x1)
+#define STV090x_P1_AGC2I0 STV090x_Px_AGC2Iy(1, 0)
+#define STV090x_P1_AGC2I1 STV090x_Px_AGC2Iy(1, 1)
+#define STV090x_P2_AGC2I0 STV090x_Px_AGC2Iy(2, 0)
+#define STV090x_P2_AGC2I1 STV090x_Px_AGC2Iy(2, 1)
+#define STV090x_OFFST_Px_AGC2_INTEGRATOR_FIELD 0
+#define STV090x_WIDTH_Px_AGC2_INTEGRATOR_FIELD 8
+
+#define STV090x_Px_CARCFG(__x) (0xF438 - (__x - 1) * 0x200)
+#define STV090x_P1_CARCFG STV090x_Px_CARCFG(1)
+#define STV090x_P2_CARCFG STV090x_Px_CARCFG(2)
+#define STV090x_OFFST_Px_EN_CAR2CENTER_FIELD 5
+#define STV090x_WIDTH_Px_EN_CAR2CENTER_FIELD 1
+#define STV090x_OFFST_Px_ROTATON_FIELD 2
+#define STV090x_WIDTH_Px_ROTATON_FIELD 1
+#define STV090x_OFFST_Px_PH_DET_ALGO_FIELD 0
+#define STV090x_WIDTH_Px_PH_DET_ALGO_FIELD 2
+
+#define STV090x_Px_ACLC(__x) (0xF439 - (__x - 1) * 0x200)
+#define STV090x_P1_ACLC STV090x_Px_ACLC(1)
+#define STV090x_P2_ACLC STV090x_Px_ACLC(2)
+#define STV090x_OFFST_Px_CAR_ALPHA_MANT_FIELD 4
+#define STV090x_WIDTH_Px_CAR_ALPHA_MANT_FIELD 2
+#define STV090x_OFFST_Px_CAR_ALPHA_EXP_FIELD 0
+#define STV090x_WIDTH_Px_CAR_ALPHA_EXP_FIELD 4
+
+#define STV090x_Px_BCLC(__x) (0xF43A - (__x - 1) * 0x200)
+#define STV090x_P1_BCLC STV090x_Px_BCLC(1)
+#define STV090x_P2_BCLC STV090x_Px_BCLC(2)
+#define STV090x_OFFST_Px_CAR_BETA_MANT_FIELD 4
+#define STV090x_WIDTH_Px_CAR_BETA_MANT_FIELD 2
+#define STV090x_OFFST_Px_CAR_BETA_EXP_FIELD 0
+#define STV090x_WIDTH_Px_CAR_BETA_EXP_FIELD 4
+
+#define STV090x_Px_CARFREQ(__x) (0xF43D - (__x - 1) * 0x200)
+#define STV090x_P1_CARFREQ STV090x_Px_CARFREQ(1)
+#define STV090x_P2_CARFREQ STV090x_Px_CARFREQ(2)
+#define STV090x_OFFST_Px_KC_COARSE_EXP_FIELD 4
+#define STV090x_WIDTH_Px_KC_COARSE_EXP_FIELD 4
+#define STV090x_OFFST_Px_BETA_FREQ_FIELD 0
+#define STV090x_WIDTH_Px_BETA_FREQ_FIELD 4
+
+#define STV090x_Px_CARHDR(__x) (0xF43E - (__x - 1) * 0x200)
+#define STV090x_P1_CARHDR STV090x_Px_CARHDR(1)
+#define STV090x_P2_CARHDR STV090x_Px_CARHDR(2)
+#define STV090x_OFFST_Px_FREQ_HDR_FIELD 0
+#define STV090x_WIDTH_Px_FREQ_HDR_FIELD 8
+
+#define STV090x_Px_LDT(__x) (0xF43F - (__x - 1) * 0x200)
+#define STV090x_P1_LDT STV090x_Px_LDT(1)
+#define STV090x_P2_LDT STV090x_Px_LDT(2)
+#define STV090x_OFFST_Px_CARLOCK_THRES_FIELD 0
+#define STV090x_WIDTH_Px_CARLOCK_THRES_FIELD 8
+
+#define STV090x_Px_LDT2(__x) (0xF440 - (__x - 1) * 0x200)
+#define STV090x_P1_LDT2 STV090x_Px_LDT2(1)
+#define STV090x_P2_LDT2 STV090x_Px_LDT2(2)
+#define STV090x_OFFST_Px_CARLOCK_THRES2_FIELD 0
+#define STV090x_WIDTH_Px_CARLOCK_THRES2_FIELD 8
+
+#define STV090x_Px_CFRICFG(__x) (0xF441 - (__x - 1) * 0x200)
+#define STV090x_P1_CFRICFG STV090x_Px_CFRICFG(1)
+#define STV090x_P2_CFRICFG STV090x_Px_CFRICFG(2)
+#define STV090x_OFFST_Px_NEG_CFRSTEP_FIELD 0
+#define STV090x_WIDTH_Px_NEG_CFRSTEP_FIELD 1
+
+#define STV090x_Pn_CFRUPy(__x, __y) (0xF443 - (__x - 1) * 0x200 - __y * 0x1)
+#define STV090x_P1_CFRUP0 STV090x_Pn_CFRUPy(1, 0)
+#define STV090x_P1_CFRUP1 STV090x_Pn_CFRUPy(1, 1)
+#define STV090x_P2_CFRUP0 STV090x_Pn_CFRUPy(2, 0)
+#define STV090x_P2_CFRUP1 STV090x_Pn_CFRUPy(2, 1)
+#define STV090x_OFFST_Px_CFR_UP_FIELD 0
+#define STV090x_WIDTH_Px_CFR_UP_FIELD 8
+
+#define STV090x_Pn_CFRLOWy(__x, __y) (0xF447 - (__x - 1) * 0x200 - __y * 0x1)
+#define STV090x_P1_CFRLOW0 STV090x_Pn_CFRLOWy(1, 0)
+#define STV090x_P1_CFRLOW1 STV090x_Pn_CFRLOWy(1, 1)
+#define STV090x_P2_CFRLOW0 STV090x_Pn_CFRLOWy(2, 0)
+#define STV090x_P2_CFRLOW1 STV090x_Pn_CFRLOWy(2, 1)
+#define STV090x_OFFST_Px_CFR_LOW_FIELD 0
+#define STV090x_WIDTH_Px_CFR_LOW_FIELD 8
+
+#define STV090x_Pn_CFRINITy(__x, __y) (0xF449 - (__x - 1) * 0x200 - __y * 0x1)
+#define STV090x_P1_CFRINIT0 STV090x_Pn_CFRINITy(1, 0)
+#define STV090x_P1_CFRINIT1 STV090x_Pn_CFRINITy(1, 1)
+#define STV090x_P2_CFRINIT0 STV090x_Pn_CFRINITy(2, 0)
+#define STV090x_P2_CFRINIT1 STV090x_Pn_CFRINITy(2, 1)
+#define STV090x_OFFST_Px_CFR_INIT_FIELD 0
+#define STV090x_WIDTH_Px_CFR_INIT_FIELD 8
+
+#define STV090x_Px_CFRINC1(__x) (0xF44A - (__x - 1) * 0x200)
+#define STV090x_P1_CFRINC1 STV090x_Px_CFRINC1(1)
+#define STV090x_P2_CFRINC1 STV090x_Px_CFRINC1(2)
+#define STV090x_OFFST_Px_CFR_INC1_FIELD 0
+#define STV090x_WIDTH_Px_CFR_INC1_FIELD 7
+
+#define STV090x_Px_CFRINC0(__x) (0xF44B - (__x - 1) * 0x200)
+#define STV090x_P1_CFRINC0 STV090x_Px_CFRINC0(1)
+#define STV090x_P2_CFRINC0 STV090x_Px_CFRINC0(2)
+#define STV090x_OFFST_Px_CFR_INC0_FIELD 4
+#define STV090x_WIDTH_Px_CFR_INC0_FIELD 4
+
+#define STV090x_Pn_CFRy(__x, __y) (0xF44E - (__x - 1) * 0x200 - __y * 0x1)
+#define STV090x_P1_CFR0 STV090x_Pn_CFRy(1, 0)
+#define STV090x_P1_CFR1 STV090x_Pn_CFRy(1, 1)
+#define STV090x_P1_CFR2 STV090x_Pn_CFRy(1, 2)
+#define STV090x_P2_CFR0 STV090x_Pn_CFRy(2, 0)
+#define STV090x_P2_CFR1 STV090x_Pn_CFRy(2, 1)
+#define STV090x_P2_CFR2 STV090x_Pn_CFRy(2, 2)
+#define STV090x_OFFST_Px_CAR_FREQ_FIELD 0
+#define STV090x_WIDTH_Px_CAR_FREQ_FIELD 8
+
+#define STV090x_Px_LDI(__x) (0xF44F - (__x - 1) * 0x200)
+#define STV090x_P1_LDI STV090x_Px_LDI(1)
+#define STV090x_P2_LDI STV090x_Px_LDI(2)
+#define STV090x_OFFST_Px_LOCK_DET_INTEGR_FIELD 0
+#define STV090x_WIDTH_Px_LOCK_DET_INTEGR_FIELD 8
+
+#define STV090x_Px_TMGCFG(__x) (0xF450 - (__x - 1) * 0x200)
+#define STV090x_P1_TMGCFG STV090x_Px_TMGCFG(1)
+#define STV090x_P2_TMGCFG STV090x_Px_TMGCFG(2)
+#define STV090x_OFFST_Px_TMGLOCK_BETA_FIELD 6
+#define STV090x_WIDTH_Px_TMGLOCK_BETA_FIELD 2
+#define STV090x_OFFST_Px_DO_TIMING_FIELD 4
+#define STV090x_WIDTH_Px_DO_TIMING_FIELD 1
+#define STV090x_OFFST_Px_TMG_MINFREQ_FIELD 0
+#define STV090x_WIDTH_Px_TMG_MINFREQ_FIELD 2
+
+#define STV090x_Px_RTC(__x) (0xF451 - (__x - 1) * 0x200)
+#define STV090x_P1_RTC STV090x_Px_RTC(1)
+#define STV090x_P2_RTC STV090x_Px_RTC(2)
+#define STV090x_OFFST_Px_TMGALPHA_EXP_FIELD 4
+#define STV090x_WIDTH_Px_TMGALPHA_EXP_FIELD 4
+#define STV090x_OFFST_Px_TMGBETA_EXP_FIELD 0
+#define STV090x_WIDTH_Px_TMGBETA_EXP_FIELD 4
+
+#define STV090x_Px_RTCS2(__x) (0xF452 - (__x - 1) * 0x200)
+#define STV090x_P1_RTCS2 STV090x_Px_RTCS2(1)
+#define STV090x_P2_RTCS2 STV090x_Px_RTCS2(2)
+#define STV090x_OFFST_Px_TMGALPHAS2_EXP_FIELD 4
+#define STV090x_WIDTH_Px_TMGALPHAS2_EXP_FIELD 4
+#define STV090x_OFFST_Px_TMGBETAS2_EXP_FIELD 0
+#define STV090x_WIDTH_Px_TMGBETAS2_EXP_FIELD 4
+
+#define STV090x_Px_TMGTHRISE(__x) (0xF453 - (__x - 1) * 0x200)
+#define STV090x_P1_TMGTHRISE STV090x_Px_TMGTHRISE(1)
+#define STV090x_P2_TMGTHRISE STV090x_Px_TMGTHRISE(2)
+#define STV090x_OFFST_Px_TMGLOCK_THRISE_FIELD 0
+#define STV090x_WIDTH_Px_TMGLOCK_THRISE_FIELD 8
+
+#define STV090x_Px_TMGTHFALL(__x) (0xF454 - (__x - 1) * 0x200)
+#define STV090x_P1_TMGTHFALL STV090x_Px_TMGTHFALL(1)
+#define STV090x_P2_TMGTHFALL STV090x_Px_TMGTHFALL(2)
+#define STV090x_OFFST_Px_TMGLOCK_THFALL_FIELD 0
+#define STV090x_WIDTH_Px_TMGLOCK_THFALL_FIELD 8
+
+#define STV090x_Px_SFRUPRATIO(__x) (0xF455 - (__x - 1) * 0x200)
+#define STV090x_P1_SFRUPRATIO STV090x_Px_SFRUPRATIO(1)
+#define STV090x_P2_SFRUPRATIO STV090x_Px_SFRUPRATIO(2)
+#define STV090x_OFFST_Px_SFR_UPRATIO_FIELD 0
+#define STV090x_WIDTH_Px_SFR_UPRATIO_FIELD 8
+
+#define STV090x_Px_SFRLOWRATIO(__x) (0xF456 - (__x - 1) * 0x200)
+#define STV090x_P1_SFRLOWRATIO STV090x_Px_SFRLOWRATIO(1)
+#define STV090x_P2_SFRLOWRATIO STV090x_Px_SFRLOWRATIO(2)
+#define STV090x_OFFST_Px_SFR_LOWRATIO_FIELD 0
+#define STV090x_WIDTH_Px_SFR_LOWRATIO_FIELD 8
+
+#define STV090x_Px_KREFTMG(__x) (0xF458 - (__x - 1) * 0x200)
+#define STV090x_P1_KREFTMG STV090x_Px_KREFTMG(1)
+#define STV090x_P2_KREFTMG STV090x_Px_KREFTMG(2)
+#define STV090x_OFFST_Px_KREF_TMG_FIELD 0
+#define STV090x_WIDTH_Px_KREF_TMG_FIELD 8
+
+#define STV090x_Px_SFRSTEP(__x) (0xF459 - (__x - 1) * 0x200)
+#define STV090x_P1_SFRSTEP STV090x_Px_SFRSTEP(1)
+#define STV090x_P2_SFRSTEP STV090x_Px_SFRSTEP(2)
+#define STV090x_OFFST_Px_SFR_SCANSTEP_FIELD 4
+#define STV090x_WIDTH_Px_SFR_SCANSTEP_FIELD 4
+#define STV090x_OFFST_Px_SFR_CENTERSTEP_FIELD 0
+#define STV090x_WIDTH_Px_SFR_CENTERSTEP_FIELD 4
+
+#define STV090x_Px_TMGCFG2(__x) (0xF45A - (__x - 1) * 0x200)
+#define STV090x_P1_TMGCFG2 STV090x_Px_TMGCFG2(1)
+#define STV090x_P2_TMGCFG2 STV090x_Px_TMGCFG2(2)
+#define STV090x_OFFST_Px_SFRRATIO_FINE_FIELD 0
+#define STV090x_WIDTH_Px_SFRRATIO_FINE_FIELD 1
+
+#define STV090x_Px_SFRINIT1(__x) (0xF45E - (__x - 1) * 0x200)
+#define STV090x_P1_SFRINIT1 STV090x_Px_SFRINIT1(1)
+#define STV090x_P2_SFRINIT1 STV090x_Px_SFRINIT1(2)
+#define STV090x_OFFST_Px_SFR_INIT_FIELD 0
+#define STV090x_WIDTH_Px_SFR_INIT_FIELD 8
+
+#define STV090x_Px_SFRINIT0(__x) (0xF45F - (__x - 1) * 0x200)
+#define STV090x_P1_SFRINIT0 STV090x_Px_SFRINIT0(1)
+#define STV090x_P2_SFRINIT0 STV090x_Px_SFRINIT0(2)
+#define STV090x_OFFST_Px_SFR_INIT_FIELD 0
+#define STV090x_WIDTH_Px_SFR_INIT_FIELD 8
+
+#define STV090x_Px_SFRUP1(__x) (0xF460 - (__x - 1) * 0x200)
+#define STV090x_P1_SFRUP1 STV090x_Px_SFRUP1(1)
+#define STV090x_P2_SFRUP1 STV090x_Px_SFRUP1(2)
+#define STV090x_OFFST_Px_SYMB_FREQ_UP1_FIELD 0
+#define STV090x_WIDTH_Px_SYMB_FREQ_UP1_FIELD 7
+
+#define STV090x_Px_SFRUP0(__x) (0xF461 - (__x - 1) * 0x200)
+#define STV090x_P1_SFRUP0 STV090x_Px_SFRUP0(1)
+#define STV090x_P2_SFRUP0 STV090x_Px_SFRUP0(2)
+#define STV090x_OFFST_Px_SYMB_FREQ_UP0_FIELD 0
+#define STV090x_WIDTH_Px_SYMB_FREQ_UP0_FIELD 8
+
+#define STV090x_Px_SFRLOW1(__x) (0xF462 - (__x - 1) * 0x200)
+#define STV090x_P1_SFRLOW1 STV090x_Px_SFRLOW1(1)
+#define STV090x_P2_SFRLOW1 STV090x_Px_SFRLOW1(2)
+#define STV090x_OFFST_Px_SYMB_FREQ_LOW1_FIELD 0
+#define STV090x_WIDTH_Px_SYMB_FREQ_LOW1_FIELD 7
+
+#define STV090x_Px_SFRLOW0(__x) (0xF463 - (__x - 1) * 0x200)
+#define STV090x_P1_SFRLOW0 STV090x_Px_SFRLOW0(1)
+#define STV090x_P2_SFRLOW0 STV090x_Px_SFRLOW0(2)
+#define STV090x_OFFST_Px_SYMB_FREQ_LOW0_FIELD 0
+#define STV090x_WIDTH_Px_SYMB_FREQ_LOW0_FIELD 8
+
+#define STV090x_Px_SFRy(__x, __y) (0xF464 - (__x-1) * 0x200 + (3 - __y))
+#define STV090x_P1_SFR0 STV090x_Px_SFRy(1, 0)
+#define STV090x_P1_SFR1 STV090x_Px_SFRy(1, 1)
+#define STV090x_P1_SFR2 STV090x_Px_SFRy(1, 2)
+#define STV090x_P1_SFR3 STV090x_Px_SFRy(1, 3)
+#define STV090x_P2_SFR0 STV090x_Px_SFRy(2, 0)
+#define STV090x_P2_SFR1 STV090x_Px_SFRy(2, 1)
+#define STV090x_P2_SFR2 STV090x_Px_SFRy(2, 2)
+#define STV090x_P2_SFR3 STV090x_Px_SFRy(2, 3)
+#define STV090x_OFFST_Px_SYMB_FREQ_FIELD 0
+#define STV090x_WIDTH_Px_SYMB_FREQ_FIELD 32
+
+#define STV090x_Px_TMGREG2(__x) (0xF468 - (__x - 1) * 0x200)
+#define STV090x_P1_TMGREG2 STV090x_Px_TMGREG2(1)
+#define STV090x_P2_TMGREG2 STV090x_Px_TMGREG2(2)
+#define STV090x_OFFST_Px_TMGREG_FIELD 0
+#define STV090x_WIDTH_Px_TMGREG_FIELD 8
+
+#define STV090x_Px_TMGREG1(__x) (0xF469 - (__x - 1) * 0x200)
+#define STV090x_P1_TMGREG1 STV090x_Px_TMGREG1(1)
+#define STV090x_P2_TMGREG1 STV090x_Px_TMGREG1(2)
+#define STV090x_OFFST_Px_TMGREG_FIELD 0
+#define STV090x_WIDTH_Px_TMGREG_FIELD 8
+
+#define STV090x_Px_TMGREG0(__x) (0xF46A - (__x - 1) * 0x200)
+#define STV090x_P1_TMGREG0 STV090x_Px_TMGREG0(1)
+#define STV090x_P2_TMGREG0 STV090x_Px_TMGREG0(2)
+#define STV090x_OFFST_Px_TMGREG_FIELD 0
+#define STV090x_WIDTH_Px_TMGREG_FIELD 8
+
+#define STV090x_Px_TMGLOCKy(__x, __y) (0xF46C - (__x - 1) * 0x200 - __y * 0x1)
+#define STV090x_P1_TMGLOCK0 STV090x_Px_TMGLOCKy(1, 0)
+#define STV090x_P1_TMGLOCK1 STV090x_Px_TMGLOCKy(1, 1)
+#define STV090x_P2_TMGLOCK0 STV090x_Px_TMGLOCKy(2, 0)
+#define STV090x_P2_TMGLOCK1 STV090x_Px_TMGLOCKy(2, 1)
+#define STV090x_OFFST_Px_TMGLOCK_LEVEL_FIELD 0
+#define STV090x_WIDTH_Px_TMGLOCK_LEVEL_FIELD 8
+
+#define STV090x_Px_TMGOBS(__x) (0xF46D - (__x - 1) * 0x200)
+#define STV090x_P1_TMGOBS STV090x_Px_TMGOBS(1)
+#define STV090x_P2_TMGOBS STV090x_Px_TMGOBS(2)
+#define STV090x_OFFST_Px_ROLLOFF_STATUS_FIELD 6
+#define STV090x_WIDTH_Px_ROLLOFF_STATUS_FIELD 2
+
+#define STV090x_Px_EQUALCFG(__x) (0xF46F - (__x - 1) * 0x200)
+#define STV090x_P1_EQUALCFG STV090x_Px_EQUALCFG(1)
+#define STV090x_P2_EQUALCFG STV090x_Px_EQUALCFG(2)
+#define STV090x_OFFST_Px_EQUAL_ON_FIELD 6
+#define STV090x_WIDTH_Px_EQUAL_ON_FIELD 1
+#define STV090x_OFFST_Px_MU_EQUALDFE_FIELD 0
+#define STV090x_WIDTH_Px_MU_EQUALDFE_FIELD 3
+
+#define STV090x_Px_EQUAIy(__x, __y) (0xf470 - (__x - 1) * 0x200 + (__y - 1))
+#define STV090x_P1_EQUAI1 STV090x_Px_EQUAIy(1, 1)
+#define STV090x_P1_EQUAI2 STV090x_Px_EQUAIy(1, 2)
+#define STV090x_P1_EQUAI3 STV090x_Px_EQUAIy(1, 3)
+#define STV090x_P1_EQUAI4 STV090x_Px_EQUAIy(1, 4)
+#define STV090x_P1_EQUAI5 STV090x_Px_EQUAIy(1, 5)
+#define STV090x_P1_EQUAI6 STV090x_Px_EQUAIy(1, 6)
+#define STV090x_P1_EQUAI7 STV090x_Px_EQUAIy(1, 7)
+#define STV090x_P1_EQUAI8 STV090x_Px_EQUAIy(1, 8)
+
+#define STV090x_P2_EQUAI1 STV090x_Px_EQUAIy(2, 1)
+#define STV090x_P2_EQUAI2 STV090x_Px_EQUAIy(2, 2)
+#define STV090x_P2_EQUAI3 STV090x_Px_EQUAIy(2, 3)
+#define STV090x_P2_EQUAI4 STV090x_Px_EQUAIy(2, 4)
+#define STV090x_P2_EQUAI5 STV090x_Px_EQUAIy(2, 5)
+#define STV090x_P2_EQUAI6 STV090x_Px_EQUAIy(2, 6)
+#define STV090x_P2_EQUAI7 STV090x_Px_EQUAIy(2, 7)
+#define STV090x_P2_EQUAI8 STV090x_Px_EQUAIy(2, 8)
+#define STV090x_OFFST_Px_EQUA_ACCIy_FIELD 0
+#define STV090x_WIDTH_Px_EQUA_ACCIy_FIELD 8
+
+#define STV090x_Px_EQUAQy(__x, __y) (0xf471 - (__x - 1) * 0x200 + (__y - 1))
+#define STV090x_P1_EQUAQ1 STV090x_Px_EQUAQy(1, 1)
+#define STV090x_P1_EQUAQ2 STV090x_Px_EQUAQy(1, 2)
+#define STV090x_P1_EQUAQ3 STV090x_Px_EQUAQy(1, 3)
+#define STV090x_P1_EQUAQ4 STV090x_Px_EQUAQy(1, 4)
+#define STV090x_P1_EQUAQ5 STV090x_Px_EQUAQy(1, 5)
+#define STV090x_P1_EQUAQ6 STV090x_Px_EQUAQy(1, 6)
+#define STV090x_P1_EQUAQ7 STV090x_Px_EQUAQy(1, 7)
+#define STV090x_P1_EQUAQ8 STV090x_Px_EQUAQy(1, 8)
+
+#define STV090x_P2_EQUAQ1 STV090x_Px_EQUAQy(2, 1)
+#define STV090x_P2_EQUAQ2 STV090x_Px_EQUAQy(2, 2)
+#define STV090x_P2_EQUAQ3 STV090x_Px_EQUAQy(2, 3)
+#define STV090x_P2_EQUAQ4 STV090x_Px_EQUAQy(2, 4)
+#define STV090x_P2_EQUAQ5 STV090x_Px_EQUAQy(2, 5)
+#define STV090x_P2_EQUAQ6 STV090x_Px_EQUAQy(2, 6)
+#define STV090x_P2_EQUAQ7 STV090x_Px_EQUAQy(2, 7)
+#define STV090x_P2_EQUAQ8 STV090x_Px_EQUAQy(2, 8)
+#define STV090x_OFFST_Px_EQUA_ACCQy_FIELD 0
+#define STV090x_WIDTH_Px_EQUA_ACCQy_FIELD 8
+
+#define STV090x_Px_NNOSDATATy(__x, __y) (0xf481 - (__x - 1) * 0x200 - __y * 0x1)
+#define STV090x_P1_NNOSDATAT0 STV090x_Px_NNOSDATATy(1, 0)
+#define STV090x_P1_NNOSDATAT1 STV090x_Px_NNOSDATATy(1, 1)
+#define STV090x_P2_NNOSDATAT0 STV090x_Px_NNOSDATATy(2, 0)
+#define STV090x_P2_NNOSDATAT1 STV090x_Px_NNOSDATATy(2, 1)
+#define STV090x_OFFST_Px_NOSDATAT_NORMED_FIELD 0
+#define STV090x_WIDTH_Px_NOSDATAT_NORMED_FIELD 8
+
+#define STV090x_Px_NNOSDATAy(__x, __y) (0xf483 - (__x - 1) * 0x200 - __y * 0x1)
+#define STV090x_P1_NNOSDATA0 STV090x_Px_NNOSDATAy(1, 0)
+#define STV090x_P1_NNOSDATA1 STV090x_Px_NNOSDATAy(1, 1)
+#define STV090x_P2_NNOSDATA0 STV090x_Px_NNOSDATAy(2, 0)
+#define STV090x_P2_NNOSDATA1 STV090x_Px_NNOSDATAy(2, 1)
+#define STV090x_OFFST_Px_NOSDATA_NORMED_FIELD 0
+#define STV090x_WIDTH_Px_NOSDATA_NORMED_FIELD 8
+
+#define STV090x_Px_NNOSPLHTy(__x, __y) (0xf485 - (__x - 1) * 0x200 - __y * 0x1)
+#define STV090x_P1_NNOSPLHT0 STV090x_Px_NNOSPLHTy(1, 0)
+#define STV090x_P1_NNOSPLHT1 STV090x_Px_NNOSPLHTy(1, 1)
+#define STV090x_P2_NNOSPLHT0 STV090x_Px_NNOSPLHTy(2, 0)
+#define STV090x_P2_NNOSPLHT1 STV090x_Px_NNOSPLHTy(2, 1)
+#define STV090x_OFFST_Px_NOSPLHT_NORMED_FIELD 0
+#define STV090x_WIDTH_Px_NOSPLHT_NORMED_FIELD 8
+
+#define STV090x_Px_NNOSPLHy(__x, __y) (0xf487 - (__x - 1) * 0x200 - __y * 0x1)
+#define STV090x_P1_NNOSPLH0 STV090x_Px_NNOSPLHy(1, 0)
+#define STV090x_P1_NNOSPLH1 STV090x_Px_NNOSPLHy(1, 1)
+#define STV090x_P2_NNOSPLH0 STV090x_Px_NNOSPLHy(2, 0)
+#define STV090x_P2_NNOSPLH1 STV090x_Px_NNOSPLHy(2, 1)
+#define STV090x_OFFST_Px_NOSPLH_NORMED_FIELD 0
+#define STV090x_WIDTH_Px_NOSPLH_NORMED_FIELD 8
+
+#define STV090x_Px_NOSDATATy(__x, __y) (0xf489 - (__x - 1) * 0x200 - __y * 0x1)
+#define STV090x_P1_NOSDATAT0 STV090x_Px_NOSDATATy(1, 0)
+#define STV090x_P1_NOSDATAT1 STV090x_Px_NOSDATATy(1, 1)
+#define STV090x_P2_NOSDATAT0 STV090x_Px_NOSDATATy(2, 0)
+#define STV090x_P2_NOSDATAT1 STV090x_Px_NOSDATATy(2, 1)
+#define STV090x_OFFST_Px_NOSDATAT_UNNORMED_FIELD 0
+#define STV090x_WIDTH_Px_NOSDATAT_UNNORMED_FIELD 8
+
+#define STV090x_Px_NOSDATAy(__x, __y) (0xf48b - (__x - 1) * 0x200 - __y * 0x1)
+#define STV090x_P1_NOSDATA0 STV090x_Px_NOSDATAy(1, 0)
+#define STV090x_P1_NOSDATA1 STV090x_Px_NOSDATAy(1, 1)
+#define STV090x_P2_NOSDATA0 STV090x_Px_NOSDATAy(2, 0)
+#define STV090x_P2_NOSDATA1 STV090x_Px_NOSDATAy(2, 1)
+#define STV090x_OFFST_Px_NOSDATA_UNNORMED_FIELD 0
+#define STV090x_WIDTH_Px_NOSDATA_UNNORMED_FIELD 8
+
+#define STV090x_Px_NOSPLHTy(__x, __y) (0xf48d - (__x - 1) * 0x200 - __y * 0x1)
+#define STV090x_P1_NOSPLHT0 STV090x_Px_NOSPLHTy(1, 0)
+#define STV090x_P1_NOSPLHT1 STV090x_Px_NOSPLHTy(1, 1)
+#define STV090x_P2_NOSPLHT0 STV090x_Px_NOSPLHTy(2, 0)
+#define STV090x_P2_NOSPLHT1 STV090x_Px_NOSPLHTy(2, 1)
+#define STV090x_OFFST_Px_NOSPLHT_UNNORMED_FIELD 0
+#define STV090x_WIDTH_Px_NOSPLHT_UNNORMED_FIELD 8
+
+#define STV090x_Px_NOSPLHy(__x, __y) (0xf48f - (__x - 1) * 0x200 - __y * 0x1)
+#define STv090x_P1_NOSPLH0 STV090x_Px_NOSPLHy(1, 0)
+#define STv090x_P1_NOSPLH1 STV090x_Px_NOSPLHy(1, 1)
+#define STv090x_P2_NOSPLH0 STV090x_Px_NOSPLHy(2, 0)
+#define STv090x_P2_NOSPLH1 STV090x_Px_NOSPLHy(2, 1)
+#define STV090x_OFFST_Px_NOSPLH_UNNORMED_FIELD 0
+#define STV090x_WIDTH_Px_NOSPLH_UNNORMED_FIELD 8
+
+#define STV090x_Px_CAR2CFG(__x) (0xf490 - (__x - 1) * 0x200)
+#define STV090x_P1_CAR2CFG STV090x_Px_CAR2CFG(1)
+#define STV090x_P2_CAR2CFG STV090x_Px_CAR2CFG(2)
+#define STV090x_OFFST_Px_PN4_SELECT_FIELD 6
+#define STV090x_WIDTH_Px_PN4_SELECT_FIELD 1
+#define STV090x_OFFST_Px_CFR2_STOPDVBS1_FIELD 5
+#define STV090x_WIDTH_Px_CFR2_STOPDVBS1_FIELD 1
+#define STV090x_OFFST_Px_ROTA2ON_FIELD 2
+#define STV090x_WIDTH_Px_ROTA2ON_FIELD 1
+#define STV090x_OFFST_Px_PH_DET_ALGO2_FIELD 0
+#define STV090x_WIDTH_Px_PH_DET_ALGO2_FIELD 2
+
+#define STV090x_Px_ACLC2(__x) (0xf491 - (__x - 1) * 0x200)
+#define STV090x_P1_ACLC2 STV090x_Px_ACLC2(1)
+#define STV090x_P2_ACLC2 STV090x_Px_ACLC2(2)
+#define STV090x_OFFST_Px_CAR2_ALPHA_MANT_FIELD 4
+#define STV090x_WIDTH_Px_CAR2_ALPHA_MANT_FIELD 2
+#define STV090x_OFFST_Px_CAR2_ALPHA_EXP_FIELD 0
+#define STV090x_WIDTH_Px_CAR2_ALPHA_EXP_FIELD 4
+
+#define STV090x_Px_BCLC2(__x) (0xf492 - (__x - 1) * 0x200)
+#define STV090x_P1_BCLC2 STV090x_Px_BCLC2(1)
+#define STV090x_P2_BCLC2 STV090x_Px_BCLC2(2)
+#define STV090x_OFFST_Px_CAR2_BETA_MANT_FIELD 4
+#define STV090x_WIDTH_Px_CAR2_BETA_MANT_FIELD 2
+#define STV090x_OFFST_Px_CAR2_BETA_EXP_FIELD 0
+#define STV090x_WIDTH_Px_CAR2_BETA_EXP_FIELD 4
+
+#define STV090x_Px_ACLC2S2Q(__x) (0xf497 - (__x - 1) * 0x200)
+#define STV090x_P1_ACLC2S2Q STV090x_Px_ACLC2S2Q(1)
+#define STV090x_P2_ACLC2S2Q STV090x_Px_ACLC2S2Q(2)
+#define STV090x_OFFST_Px_ENAB_SPSKSYMB_FIELD 7
+#define STV090x_WIDTH_Px_ENAB_SPSKSYMB_FIELD 1
+#define STV090x_OFFST_Px_CAR2S2_Q_ALPH_M_FIELD 4
+#define STV090x_WIDTH_Px_CAR2S2_Q_ALPH_M_FIELD 2
+#define STV090x_OFFST_Px_CAR2S2_Q_ALPH_E_FIELD 0
+#define STV090x_WIDTH_Px_CAR2S2_Q_ALPH_E_FIELD 4
+
+#define STV090x_Px_ACLC2S28(__x) (0xf498 - (__x - 1) * 0x200)
+#define STV090x_P1_ACLC2S28 STV090x_Px_ACLC2S28(1)
+#define STV090x_P2_ACLC2S28 STV090x_Px_ACLC2S28(2)
+#define STV090x_OFFST_Px_CAR2S2_8_ALPH_M_FIELD 4
+#define STV090x_WIDTH_Px_CAR2S2_8_ALPH_M_FIELD 2
+#define STV090x_OFFST_Px_CAR2S2_8_ALPH_E_FIELD 0
+#define STV090x_WIDTH_Px_CAR2S2_8_ALPH_E_FIELD 4
+
+#define STV090x_Px_ACLC2S216A(__x) (0xf499 - (__x - 1) * 0x200)
+#define STV090x_P1_ACLC2S216A STV090x_Px_ACLC2S216A(1)
+#define STV090x_P2_ACLC2S216A STV090x_Px_ACLC2S216A(2)
+#define STV090x_OFFST_Px_CAR2S2_16A_ALPH_M_FIELD 4
+#define STV090x_WIDTH_Px_CAR2S2_16A_ALPH_M_FIELD 2
+#define STV090x_OFFST_Px_CAR2S2_16A_ALPH_E_FIELD 0
+#define STV090x_WIDTH_Px_CAR2S2_16A_ALPH_E_FIELD 4
+
+#define STV090x_Px_ACLC2S232A(__x) (0xf499 - (__x - 1) * 0x200)
+#define STV090x_P1_ACLC2S232A STV090x_Px_ACLC2S232A(1)
+#define STV090x_P2_ACLC2S232A STV090x_Px_ACLC2S232A(2)
+#define STV090x_OFFST_Px_CAR2S2_32A_ALPH_M_FIELD 4
+#define STV090x_WIDTH_Px_CAR2S2_32A_ALPH_M_FIELD 2
+#define STV090x_OFFST_Px_CAR2S2_32A_ALPH_E_FIELD 0
+#define STV090x_WIDTH_Px_CAR2S2_32A_ALPH_E_FIELD 4
+
+#define STV090x_Px_BCLC2S2Q(__x) (0xf49c - (__x - 1) * 0x200)
+#define STV090x_P1_BCLC2S2Q STV090x_Px_BCLC2S2Q(1)
+#define STV090x_P2_BCLC2S2Q STV090x_Px_BCLC2S2Q(2)
+#define STV090x_OFFST_Px_CAR2S2_Q_BETA_M_FIELD 4
+#define STV090x_WIDTH_Px_CAR2S2_Q_BETA_M_FIELD 2
+#define STV090x_OFFST_Px_CAR2S2_Q_BETA_E_FIELD 0
+#define STV090x_WIDTH_Px_CAR2S2_Q_BETA_E_FIELD 4
+
+#define STV090x_Px_BCLC2S28(__x) (0xf49d - (__x - 1) * 0x200)
+#define STV090x_P1_BCLC2S28 STV090x_Px_BCLC2S28(1)
+#define STV090x_P2_BCLC2S28 STV090x_Px_BCLC2S28(1)
+#define STV090x_OFFST_Px_CAR2S2_8_BETA_M_FIELD 4
+#define STV090x_WIDTH_Px_CAR2S2_8_BETA_M_FIELD 2
+#define STV090x_OFFST_Px_CAR2S2_8_BETA_E_FIELD 0
+#define STV090x_WIDTH_Px_CAR2S2_8_BETA_E_FIELD 4
+
+#define STV090x_Px_BCLC2S216A(__x) (0xf49d - (__x - 1) * 0x200)
+#define STV090x_P1_BCLC2S216A STV090x_Px_BCLC2S216A(1)
+#define STV090x_P2_BCLC2S216A STV090x_Px_BCLC2S216A(1)
+#define STV090x_OFFST_Px_CAR2S2_16A_BETA_M_FIELD 4
+#define STV090x_WIDTH_Px_CAR2S2_16A_BETA_M_FIELD 2
+#define STV090x_OFFST_Px_CAR2S2_16A_BETA_E_FIELD 0
+#define STV090x_WIDTH_Px_CAR2S2_16A_BETA_E_FIELD 4
+
+#define STV090x_Px_BCLC2S232A(__x) (0xf49d - (__x - 1) * 0x200)
+#define STV090x_P1_BCLC2S232A STV090x_Px_BCLC2S232A(1)
+#define STV090x_P2_BCLC2S232A STV090x_Px_BCLC2S232A(1)
+#define STV090x_OFFST_Px_CAR2S2_32A_BETA_M_FIELD 4
+#define STV090x_WIDTH_Px_CAR2S2_32A_BETA_M_FIELD 2
+#define STV090x_OFFST_Px_CAR2S2_32A_BETA_E_FIELD 0
+#define STV090x_WIDTH_Px_CAR2S2_32A_BETA_E_FIELD 4
+
+#define STV090x_Px_PLROOT2(__x) (0xf4ac - (__x - 1) * 0x200)
+#define STV090x_P1_PLROOT2 STV090x_Px_PLROOT2(1)
+#define STV090x_P2_PLROOT2 STV090x_Px_PLROOT2(2)
+#define STV090x_OFFST_Px_PLSCRAMB_MODE_FIELD 2
+#define STV090x_WIDTH_Px_PLSCRAMB_MODE_FIELD 2
+#define STV090x_OFFST_Px_PLSCRAMB_ROOT_FIELD 0
+#define STV090x_WIDTH_Px_PLSCRAMB_ROOT_FIELD 2
+
+#define STV090x_Px_PLROOT1(__x) (0xf4ad - (__x - 1) * 0x200)
+#define STV090x_P1_PLROOT1 STV090x_Px_PLROOT1(1)
+#define STV090x_P2_PLROOT1 STV090x_Px_PLROOT1(2)
+#define STV090x_OFFST_Px_PLSCRAMB_ROOT1_FIELD 0
+#define STV090x_WIDTH_Px_PLSCRAMB_ROOT1_FIELD 8
+
+#define STV090x_Px_PLROOT0(__x) (0xf4ae - (__x - 1) * 0x200)
+#define STV090x_P1_PLROOT0 STV090x_Px_PLROOT0(1)
+#define STV090x_P2_PLROOT0 STV090x_Px_PLROOT0(2)
+#define STV090x_OFFST_Px_PLSCRAMB_ROOT0_FIELD 0
+#define STV090x_WIDTH_Px_PLSCRAMB_ROOT0_FIELD 8
+
+#define STV090x_Px_MODCODLST0(__x) (0xf4b0 - (__x - 1) * 0x200) /* check */
+#define STV090x_P1_MODCODLST0 STV090x_Px_MODCODLST0(1)
+#define STV090x_P2_MODCODLST0 STV090x_Px_MODCODLST0(2)
+
+#define STV090x_Px_MODCODLST1(__x) (0xf4b1 - (__x - 1) * 0x200)
+#define STV090x_P1_MODCODLST1 STV090x_Px_MODCODLST1(1)
+#define STV090x_P2_MODCODLST1 STV090x_Px_MODCODLST1(2)
+#define STV090x_OFFST_Px_DIS_MODCOD29_FIELD 4
+#define STV090x_WIDTH_Px_DIS_MODCOD29T_FIELD 4
+#define STV090x_OFFST_Px_DIS_32PSK_9_10_FIELD 0
+#define STV090x_WIDTH_Px_DIS_32PSK_9_10_FIELD 4
+
+#define STV090x_Px_MODCODLST2(__x) (0xf4b2 - (__x - 1) * 0x200)
+#define STV090x_P1_MODCODLST2 STV090x_Px_MODCODLST2(1)
+#define STV090x_P2_MODCODLST2 STV090x_Px_MODCODLST2(2)
+#define STV090x_OFFST_Px_DIS_32PSK_8_9_FIELD 4
+#define STV090x_WIDTH_Px_DIS_32PSK_8_9_FIELD 4
+#define STV090x_OFFST_Px_DIS_32PSK_5_6_FIELD 0
+#define STV090x_WIDTH_Px_DIS_32PSK_5_6_FIELD 4
+
+#define STV090x_Px_MODCODLST3(__x) (0xf4b3 - (__x - 1) * 0x200)
+#define STV090x_P1_MODCODLST3 STV090x_Px_MODCODLST3(1)
+#define STV090x_P2_MODCODLST3 STV090x_Px_MODCODLST3(2)
+#define STV090x_OFFST_Px_DIS_32PSK_4_5_FIELD 4
+#define STV090x_WIDTH_Px_DIS_32PSK_4_5_FIELD 4
+#define STV090x_OFFST_Px_DIS_32PSK_3_4_FIELD 0
+#define STV090x_WIDTH_Px_DIS_32PSK_3_4_FIELD 4
+
+#define STV090x_Px_MODCODLST4(__x) (0xf4b4 - (__x - 1) * 0x200)
+#define STV090x_P1_MODCODLST4 STV090x_Px_MODCODLST4(1)
+#define STV090x_P2_MODCODLST4 STV090x_Px_MODCODLST4(2)
+#define STV090x_OFFST_Px_DIS_16PSK_9_10_FIELD 4
+#define STV090x_WIDTH_Px_DIS_16PSK_9_10_FIELD 4
+#define STV090x_OFFST_Px_DIS_16PSK_8_9_FIELD 0
+#define STV090x_WIDTH_Px_DIS_16PSK_8_9_FIELD 4
+
+#define STV090x_Px_MODCODLST5(__x) (0xf4b5 - (__x - 1) * 0x200)
+#define STV090x_P1_MODCODLST5 STV090x_Px_MODCODLST5(1)
+#define STV090x_P2_MODCODLST5 STV090x_Px_MODCODLST5(2)
+#define STV090x_OFFST_Px_DIS_16PSK_5_6_FIELD 4
+#define STV090x_WIDTH_Px_DIS_16PSK_5_6_FIELD 4
+#define STV090x_OFFST_Px_DIS_16PSK_4_5_FIELD 0
+#define STV090x_WIDTH_Px_DIS_16PSK_4_5_FIELD 4
+
+#define STV090x_Px_MODCODLST6(__x) (0xf4b6 - (__x - 1) * 0x200)
+#define STV090x_P1_MODCODLST6 STV090x_Px_MODCODLST6(1)
+#define STV090x_P2_MODCODLST6 STV090x_Px_MODCODLST6(2)
+#define STV090x_OFFST_Px_DIS_16PSK_3_4_FIELD 4
+#define STV090x_WIDTH_Px_DIS_16PSK_3_4_FIELD 4
+#define STV090x_OFFST_Px_DIS_16PSK_2_3_FIELD 0
+#define STV090x_WIDTH_Px_DIS_16PSK_2_3_FIELD 4
+
+#define STV090x_Px_MODCODLST7(__x) (0xf4b7 - (__x - 1) * 0x200)
+#define STV090x_P1_MODCODLST7 STV090x_Px_MODCODLST7(1)
+#define STV090x_P2_MODCODLST7 STV090x_Px_MODCODLST7(2)
+#define STV090x_OFFST_Px_DIS_8P_9_10_FIELD 4
+#define STV090x_WIDTH_Px_DIS_8P_9_10_FIELD 4
+#define STV090x_OFFST_Px_DIS_8P_8_9_FIELD 0
+#define STV090x_WIDTH_Px_DIS_8P_8_9_FIELD 4
+
+#define STV090x_Px_MODCODLST8(__x) (0xf4b8 - (__x - 1) * 0x200)
+#define STV090x_P1_MODCODLST8 STV090x_Px_MODCODLST8(1)
+#define STV090x_P2_MODCODLST8 STV090x_Px_MODCODLST8(2)
+#define STV090x_OFFST_Px_DIS_8P_5_6_FIELD 4
+#define STV090x_WIDTH_Px_DIS_8P_5_6_FIELD 4
+#define STV090x_OFFST_Px_DIS_8P_3_4_FIELD 0
+#define STV090x_WIDTH_Px_DIS_8P_3_4_FIELD 4
+
+#define STV090x_Px_MODCODLST9(__x) (0xf4b9 - (__x - 1) * 0x200)
+#define STV090x_P1_MODCODLST9 STV090x_Px_MODCODLST9(1)
+#define STV090x_P2_MODCODLST9 STV090x_Px_MODCODLST9(2)
+#define STV090x_OFFST_Px_DIS_8P_2_3_FIELD 4
+#define STV090x_WIDTH_Px_DIS_8P_2_3_FIELD 4
+#define STV090x_OFFST_Px_DIS_8P_3_5_FIELD 0
+#define STV090x_WIDTH_Px_DIS_8P_3_5_FIELD 4
+
+#define STV090x_Px_MODCODLSTA(__x) (0xf4ba - (__x - 1) * 0x200)
+#define STV090x_P1_MODCODLSTA STV090x_Px_MODCODLSTA(1)
+#define STV090x_P2_MODCODLSTA STV090x_Px_MODCODLSTA(2)
+#define STV090x_OFFST_Px_DIS_QP_9_10_FIELD 4
+#define STV090x_WIDTH_Px_DIS_QP_9_10_FIELD 4
+#define STV090x_OFFST_Px_DIS_QP_8_9_FIELD 0
+#define STV090x_WIDTH_Px_DIS_QP_8_9_FIELD 4
+
+#define STV090x_Px_MODCODLSTB(__x) (0xf4bb - (__x - 1) * 0x200)
+#define STV090x_P1_MODCODLSTB STV090x_Px_MODCODLSTB(1)
+#define STV090x_P2_MODCODLSTB STV090x_Px_MODCODLSTB(2)
+#define STV090x_OFFST_Px_DIS_QP_5_6_FIELD 4
+#define STV090x_WIDTH_Px_DIS_QP_5_6_FIELD 4
+#define STV090x_OFFST_Px_DIS_QP_4_5_FIELD 0
+#define STV090x_WIDTH_Px_DIS_QP_4_5_FIELD 4
+
+#define STV090x_Px_MODCODLSTC(__x) (0xf4bc - (__x - 1) * 0x200)
+#define STV090x_P1_MODCODLSTC STV090x_Px_MODCODLSTC(1)
+#define STV090x_P2_MODCODLSTC STV090x_Px_MODCODLSTC(2)
+#define STV090x_OFFST_Px_DIS_QP_3_4_FIELD 4
+#define STV090x_WIDTH_Px_DIS_QP_3_4_FIELD 4
+#define STV090x_OFFST_Px_DIS_QP_2_3_FIELD 0
+#define STV090x_WIDTH_Px_DIS_QP_2_3_FIELD 4
+
+#define STV090x_Px_MODCODLSTD(__x) (0xf4bd - (__x - 1) * 0x200)
+#define STV090x_P1_MODCODLSTD STV090x_Px_MODCODLSTD(1)
+#define STV090x_P2_MODCODLSTD STV090x_Px_MODCODLSTD(2)
+#define STV090x_OFFST_Px_DIS_QP_3_5_FIELD 4
+#define STV090x_WIDTH_Px_DIS_QP_3_5_FIELD 4
+#define STV090x_OFFST_Px_DIS_QP_1_2_FIELD 0
+#define STV090x_WIDTH_Px_DIS_QP_1_2_FIELD 4
+
+#define STV090x_Px_MODCODLSTE(__x) (0xf4be - (__x - 1) * 0x200)
+#define STV090x_P1_MODCODLSTE STV090x_Px_MODCODLSTE(1)
+#define STV090x_P2_MODCODLSTE STV090x_Px_MODCODLSTE(2)
+#define STV090x_OFFST_Px_DIS_QP_2_5_FIELD 4
+#define STV090x_WIDTH_Px_DIS_QP_2_5_FIELD 4
+#define STV090x_OFFST_Px_DIS_QP_1_3_FIELD 0
+#define STV090x_WIDTH_Px_DIS_QP_1_3_FIELD 4
+
+#define STV090x_Px_MODCODLSTF(__x) (0xf4bf - (__x - 1) * 0x200)
+#define STV090x_P1_MODCODLSTF STV090x_Px_MODCODLSTF(1)
+#define STV090x_P2_MODCODLSTF STV090x_Px_MODCODLSTF(2)
+#define STV090x_OFFST_Px_DIS_QP_1_4_FIELD 4
+#define STV090x_WIDTH_Px_DIS_QP_1_4_FIELD 4
+
+#define STV090x_Px_GAUSSR0(__x) (0xf4c0 - (__x - 1) * 0x200)
+#define STV090x_P1_GAUSSR0 STV090x_Px_GAUSSR0(1)
+#define STV090x_P2_GAUSSR0 STV090x_Px_GAUSSR0(2)
+#define STV090x_OFFST_Px_EN_CCIMODE_FIELD 7
+#define STV090x_WIDTH_Px_EN_CCIMODE_FIELD 1
+#define STV090x_OFFST_Px_R0_GAUSSIEN_FIELD 0
+#define STV090x_WIDTH_Px_R0_GAUSSIEN_FIELD 7
+
+#define STV090x_Px_CCIR0(__x) (0xf4c1 - (__x - 1) * 0x200)
+#define STV090x_P1_CCIR0 STV090x_Px_CCIR0(1)
+#define STV090x_P2_CCIR0 STV090x_Px_CCIR0(2)
+#define STV090x_OFFST_Px_CCIDETECT_PLH_FIELD 7
+#define STV090x_WIDTH_Px_CCIDETECT_PLH_FIELD 1
+#define STV090x_OFFST_Px_R0_CCI_FIELD 0
+#define STV090x_WIDTH_Px_R0_CCI_FIELD 7
+
+#define STV090x_Px_CCIQUANT(__x) (0xf4c2 - (__x - 1) * 0x200)
+#define STV090x_P1_CCIQUANT STV090x_Px_CCIQUANT(1)
+#define STV090x_P2_CCIQUANT STV090x_Px_CCIQUANT(2)
+#define STV090x_OFFST_Px_CCI_BETA_FIELD 5
+#define STV090x_WIDTH_Px_CCI_BETA_FIELD 3
+#define STV090x_OFFST_Px_CCI_QUANT_FIELD 0
+#define STV090x_WIDTH_Px_CCI_QUANT_FIELD 5
+
+#define STV090x_Px_CCITHRESH(__x) (0xf4c3 - (__x - 1) * 0x200)
+#define STV090x_P1_CCITHRESH STV090x_Px_CCITHRESH(1)
+#define STV090x_P2_CCITHRESH STV090x_Px_CCITHRESH(2)
+#define STV090x_OFFST_Px_CCI_THRESHOLD_FIELD 0
+#define STV090x_WIDTH_Px_CCI_THRESHOLD_FIELD 8
+
+#define STV090x_Px_CCIACC(__x) (0xf4c4 - (__x - 1) * 0x200)
+#define STV090x_P1_CCIACC STV090x_Px_CCIACC(1)
+#define STV090x_P2_CCIACC STV090x_Px_CCIACC(1)
+#define STV090x_OFFST_Px_CCI_VALUE_FIELD 0
+#define STV090x_WIDTH_Px_CCI_VALUE_FIELD 8
+
+#define STV090x_Px_DMDRESCFG(__x) (0xF4C6 - (__x - 1) * 0x200)
+#define STV090x_P1_DMDRESCFG STV090x_Px_DMDRESCFG(1)
+#define STV090x_P2_DMDRESCFG STV090x_Px_DMDRESCFG(2)
+#define STV090x_OFFST_Px_DMDRES_RESET_FIELD 7
+#define STV090x_WIDTH_Px_DMDRES_RESET_FIELD 1
+
+#define STV090x_Px_DMDRESADR(__x) (0xF4C7 - (__x - 1) * 0x200)
+#define STV090x_P1_DMDRESADR STV090x_Px_DMDRESADR(1)
+#define STV090x_P2_DMDRESADR STV090x_Px_DMDRESADR(2)
+#define STV090x_OFFST_Px_DMDRES_RESNBR_FIELD 0
+#define STV090x_WIDTH_Px_DMDRES_RESNBR_FIELD 4
+
+#define STV090x_Px_DMDRESDATAy(__x, __y) (0xF4C8 - (__x - 1) * 0x200 + (7 - __y))
+#define STV090x_P1_DMDRESDATA0 STV090x_Px_DMDRESDATAy(1, 0)
+#define STV090x_P1_DMDRESDATA1 STV090x_Px_DMDRESDATAy(1, 1)
+#define STV090x_P1_DMDRESDATA2 STV090x_Px_DMDRESDATAy(1, 2)
+#define STV090x_P1_DMDRESDATA3 STV090x_Px_DMDRESDATAy(1, 3)
+#define STV090x_P1_DMDRESDATA4 STV090x_Px_DMDRESDATAy(1, 4)
+#define STV090x_P1_DMDRESDATA5 STV090x_Px_DMDRESDATAy(1, 5)
+#define STV090x_P1_DMDRESDATA6 STV090x_Px_DMDRESDATAy(1, 6)
+#define STV090x_P1_DMDRESDATA7 STV090x_Px_DMDRESDATAy(1, 7)
+#define STV090x_P2_DMDRESDATA0 STV090x_Px_DMDRESDATAy(2, 0)
+#define STV090x_P2_DMDRESDATA1 STV090x_Px_DMDRESDATAy(2, 1)
+#define STV090x_P2_DMDRESDATA2 STV090x_Px_DMDRESDATAy(2, 2)
+#define STV090x_P2_DMDRESDATA3 STV090x_Px_DMDRESDATAy(2, 3)
+#define STV090x_P2_DMDRESDATA4 STV090x_Px_DMDRESDATAy(2, 4)
+#define STV090x_P2_DMDRESDATA5 STV090x_Px_DMDRESDATAy(2, 5)
+#define STV090x_P2_DMDRESDATA6 STV090x_Px_DMDRESDATAy(2, 6)
+#define STV090x_P2_DMDRESDATA7 STV090x_Px_DMDRESDATAy(2, 7)
+#define STV090x_OFFST_Px_DMDRES_DATA_FIELD 0
+#define STV090x_WIDTH_Px_DMDRES_DATA_FIELD 8
+
+#define STV090x_Px_FFEIy(__x, __y) (0xf4d0 - (__x - 1) * 0x200 + 0x2 * (__y - 1))
+#define STV090x_P1_FFEI1 STV090x_Px_FFEIy(1, 1)
+#define STV090x_P1_FFEI2 STV090x_Px_FFEIy(1, 2)
+#define STV090x_P1_FFEI3 STV090x_Px_FFEIy(1, 3)
+#define STV090x_P1_FFEI4 STV090x_Px_FFEIy(1, 4)
+#define STV090x_P2_FFEI1 STV090x_Px_FFEIy(2, 1)
+#define STV090x_P2_FFEI2 STV090x_Px_FFEIy(2, 2)
+#define STV090x_P2_FFEI3 STV090x_Px_FFEIy(2, 3)
+#define STV090x_P2_FFEI4 STV090x_Px_FFEIy(2, 4)
+#define STV090x_OFFST_Px_FFE_ACCIy_FIELD 0
+#define STV090x_WIDTH_Px_FFE_ACCIy_FIELD 8
+
+#define STV090x_Px_FFEQy(__x, __y) (0xf4d1 - (__x - 1) * 0x200 + 0x2 * (__y - 1))
+#define STV090x_P1_FFEQ1 STV090x_Px_FFEQy(1, 1)
+#define STV090x_P1_FFEQ2 STV090x_Px_FFEQy(1, 2)
+#define STV090x_P1_FFEQ3 STV090x_Px_FFEQy(1, 3)
+#define STV090x_P1_FFEQ4 STV090x_Px_FFEQy(1, 4)
+#define STV090x_P2_FFEQ1 STV090x_Px_FFEQy(2, 1)
+#define STV090x_P2_FFEQ2 STV090x_Px_FFEQy(2, 2)
+#define STV090x_P2_FFEQ3 STV090x_Px_FFEQy(2, 3)
+#define STV090x_P2_FFEQ4 STV090x_Px_FFEQy(2, 4)
+#define STV090x_OFFST_Px_FFE_ACCQy_FIELD 0
+#define STV090x_WIDTH_Px_FFE_ACCQy_FIELD 8
+
+#define STV090x_Px_FFECFG(__x) (0xf4d8 - (__x - 1) * 0x200)
+#define STV090x_P1_FFECFG STV090x_Px_FFECFG(1)
+#define STV090x_P2_FFECFG STV090x_Px_FFECFG(2)
+#define STV090x_OFFST_Px_EQUALFFE_ON_FIELD 6
+#define STV090x_WIDTH_Px_EQUALFFE_ON_FIELD 1
+
+#define STV090x_Px_SMAPCOEF7(__x) (0xf500 - (__x - 1) * 0x200)
+#define STV090x_P1_SMAPCOEF7 STV090x_Px_SMAPCOEF7(1)
+#define STV090x_P2_SMAPCOEF7 STV090x_Px_SMAPCOEF7(2)
+#define STV090x_OFFST_Px_DIS_QSCALE_FIELD 7
+#define STV090x_WIDTH_Px_DIS_QSCALE_FIELD 1
+#define STV090x_OFFST_Px_SMAPCOEF_Q_LLR12_FIELD 0
+#define STV090x_WIDTH_Px_SMAPCOEF_Q_LLR12_FIELD 7
+
+#define STV090x_Px_SMAPCOEF6(__x) (0xf501 - (__x - 1) * 0x200)
+#define STV090x_P1_SMAPCOEF6 STV090x_Px_SMAPCOEF6(1)
+#define STV090x_P2_SMAPCOEF6 STV090x_Px_SMAPCOEF6(2)
+#define STV090x_OFFST_Px_ADJ_8PSKLLR1_FIELD 2
+#define STV090x_WIDTH_Px_ADJ_8PSKLLR1_FIELD 1
+#define STV090x_OFFST_Px_OLD_8PSKLLR1_FIELD 1
+#define STV090x_WIDTH_Px_OLD_8PSKLLR1_FIELD 1
+#define STV090x_OFFST_Px_DIS_AB8PSK_FIELD 0
+#define STV090x_WIDTH_Px_DIS_AB8PSK_FIELD 1
+
+#define STV090x_Px_SMAPCOEF5(__x) (0xf502 - (__x - 1) * 0x200)
+#define STV090x_P1_SMAPCOEF5 STV090x_Px_SMAPCOEF5(1)
+#define STV090x_P2_SMAPCOEF5 STV090x_Px_SMAPCOEF5(2)
+#define STV090x_OFFST_Px_DIS_8SCALE_FIELD 7
+#define STV090x_WIDTH_Px_DIS_8SCALE_FIELD 1
+#define STV090x_OFFST_Px_SMAPCOEF_8P_LLR23_FIELD 0
+#define STV090x_WIDTH_Px_SMAPCOEF_8P_LLR23_FIELD 7
+
+#define STV090x_Px_DMDPLHSTAT(__x) (0xF520 - (__x - 1) * 0x200)
+#define STV090x_P1_DMDPLHSTAT STV090x_Px_DMDPLHSTAT(1)
+#define STV090x_P2_DMDPLHSTAT STV090x_Px_DMDPLHSTAT(2)
+#define STV090x_OFFST_Px_PLH_STATISTIC_FIELD 0
+#define STV090x_WIDTH_Px_PLH_STATISTIC_FIELD 8
+
+#define STV090x_Px_LOCKTIMEy(__x, __y) (0xF525 - (__x - 1) * 0x200 - __y * 0x1)
+#define STV090x_P1_LOCKTIME0 STV090x_Px_LOCKTIMEy(1, 0)
+#define STV090x_P1_LOCKTIME1 STV090x_Px_LOCKTIMEy(1, 1)
+#define STV090x_P1_LOCKTIME2 STV090x_Px_LOCKTIMEy(1, 2)
+#define STV090x_P1_LOCKTIME3 STV090x_Px_LOCKTIMEy(1, 3)
+#define STV090x_P2_LOCKTIME0 STV090x_Px_LOCKTIMEy(2, 0)
+#define STV090x_P2_LOCKTIME1 STV090x_Px_LOCKTIMEy(2, 1)
+#define STV090x_P2_LOCKTIME2 STV090x_Px_LOCKTIMEy(2, 2)
+#define STV090x_P2_LOCKTIME3 STV090x_Px_LOCKTIMEy(2, 3)
+#define STV090x_OFFST_Px_DEMOD_LOCKTIME_FIELD 0
+#define STV090x_WIDTH_Px_DEMOD_LOCKTIME_FIELD 8
+
+#define STV090x_Px_TNRCFG(__x) (0xf4e0 - (__x - 1) * 0x200) /* check */
+#define STV090x_P1_TNRCFG STV090x_Px_TNRCFG(1)
+#define STV090x_P2_TNRCFG STV090x_Px_TNRCFG(2)
+
+#define STV090x_Px_TNRCFG2(__x) (0xf4e1 - (__x - 1) * 0x200)
+#define STV090x_P1_TNRCFG2 STV090x_Px_TNRCFG2(1)
+#define STV090x_P2_TNRCFG2 STV090x_Px_TNRCFG2(2)
+#define STV090x_OFFST_Px_TUN_IQSWAP_FIELD 7
+#define STV090x_WIDTH_Px_TUN_IQSWAP_FIELD 1
+
+#define STV090x_Px_VITSCALE(__x) (0xf532 - (__x - 1) * 0x200)
+#define STV090x_P1_VITSCALE STV090x_Px_VITSCALE(1)
+#define STV090x_P2_VITSCALE STV090x_Px_VITSCALE(2)
+#define STV090x_OFFST_Px_NVTH_NOSRANGE_FIELD 7
+#define STV090x_WIDTH_Px_NVTH_NOSRANGE_FIELD 1
+#define STV090x_OFFST_Px_VERROR_MAXMODE_FIELD 6
+#define STV090x_WIDTH_Px_VERROR_MAXMODE_FIELD 1
+#define STV090x_OFFST_Px_NSLOWSN_LOCKED_FIELD 3
+#define STV090x_WIDTH_Px_NSLOWSN_LOCKED_FIELD 1
+#define STV090x_OFFST_Px_DIS_RSFLOCK_FIELD 1
+#define STV090x_WIDTH_Px_DIS_RSFLOCK_FIELD 1
+
+#define STV090x_Px_FECM(__x) (0xf533 - (__x - 1) * 0x200)
+#define STV090x_P1_FECM STV090x_Px_FECM(1)
+#define STV090x_P2_FECM STV090x_Px_FECM(2)
+#define STV090x_OFFST_Px_DSS_DVB_FIELD 7
+#define STV090x_WIDTH_Px_DSS_DVB_FIELD 1
+#define STV090x_OFFST_Px_DSS_SRCH_FIELD 4
+#define STV090x_WIDTH_Px_DSS_SRCH_FIELD 1
+#define STV090x_OFFST_Px_SYNCVIT_FIELD 1
+#define STV090x_WIDTH_Px_SYNCVIT_FIELD 1
+#define STV090x_OFFST_Px_IQINV_FIELD 0
+#define STV090x_WIDTH_Px_IQINV_FIELD 1
+
+#define STV090x_Px_VTH12(__x) (0xf534 - (__x - 1) * 0x200)
+#define STV090x_P1_VTH12 STV090x_Px_VTH12(1)
+#define STV090x_P2_VTH12 STV090x_Px_VTH12(2)
+#define STV090x_OFFST_Px_VTH12_FIELD 0
+#define STV090x_WIDTH_Px_VTH12_FIELD 8
+
+#define STV090x_Px_VTH23(__x) (0xf535 - (__x - 1) * 0x200)
+#define STV090x_P1_VTH23 STV090x_Px_VTH23(1)
+#define STV090x_P2_VTH23 STV090x_Px_VTH23(2)
+#define STV090x_OFFST_Px_VTH23_FIELD 0
+#define STV090x_WIDTH_Px_VTH23_FIELD 8
+
+#define STV090x_Px_VTH34(__x) (0xf536 - (__x - 1) * 0x200)
+#define STV090x_P1_VTH34 STV090x_Px_VTH34(1)
+#define STV090x_P2_VTH34 STV090x_Px_VTH34(2)
+#define STV090x_OFFST_Px_VTH34_FIELD 0
+#define STV090x_WIDTH_Px_VTH34_FIELD 8
+
+#define STV090x_Px_VTH56(__x) (0xf537 - (__x - 1) * 0x200)
+#define STV090x_P1_VTH56 STV090x_Px_VTH56(1)
+#define STV090x_P2_VTH56 STV090x_Px_VTH56(2)
+#define STV090x_OFFST_Px_VTH56_FIELD 0
+#define STV090x_WIDTH_Px_VTH56_FIELD 8
+
+#define STV090x_Px_VTH67(__x) (0xf538 - (__x - 1) * 0x200)
+#define STV090x_P1_VTH67 STV090x_Px_VTH67(1)
+#define STV090x_P2_VTH67 STV090x_Px_VTH67(2)
+#define STV090x_OFFST_Px_VTH67_FIELD 0
+#define STV090x_WIDTH_Px_VTH67_FIELD 8
+
+#define STV090x_Px_VTH78(__x) (0xf539 - (__x - 1) * 0x200)
+#define STV090x_P1_VTH78 STV090x_Px_VTH78(1)
+#define STV090x_P2_VTH78 STV090x_Px_VTH78(2)
+#define STV090x_OFFST_Px_VTH78_FIELD 0
+#define STV090x_WIDTH_Px_VTH78_FIELD 8
+
+#define STV090x_Px_VITCURPUN(__x) (0xf53a - (__x - 1) * 0x200)
+#define STV090x_P1_VITCURPUN STV090x_Px_VITCURPUN(1)
+#define STV090x_P2_VITCURPUN STV090x_Px_VITCURPUN(2)
+#define STV090x_OFFST_Px_VIT_CURPUN_FIELD 0
+#define STV090x_WIDTH_Px_VIT_CURPUN_FIELD 5
+
+#define STV090x_Px_VERROR(__x) (0xf53b - (__x - 1) * 0x200)
+#define STV090x_P1_VERROR STV090x_Px_VERROR(1)
+#define STV090x_P2_VERROR STV090x_Px_VERROR(2)
+#define STV090x_OFFST_Px_REGERR_VIT_FIELD 0
+#define STV090x_WIDTH_Px_REGERR_VIT_FIELD 8
+
+#define STV090x_Px_PRVIT(__x) (0xf53c - (__x - 1) * 0x200)
+#define STV090x_P1_PRVIT STV090x_Px_PRVIT(1)
+#define STV090x_P2_PRVIT STV090x_Px_PRVIT(2)
+#define STV090x_OFFST_Px_DIS_VTHLOCK_FIELD 6
+#define STV090x_WIDTH_Px_DIS_VTHLOCK_FIELD 1
+#define STV090x_OFFST_Px_E7_8VIT_FIELD 5
+#define STV090x_WIDTH_Px_E7_8VIT_FIELD 1
+#define STV090x_OFFST_Px_E6_7VIT_FIELD 4
+#define STV090x_WIDTH_Px_E6_7VIT_FIELD 1
+#define STV090x_OFFST_Px_E5_6VIT_FIELD 3
+#define STV090x_WIDTH_Px_E5_6VIT_FIELD 1
+#define STV090x_OFFST_Px_E3_4VIT_FIELD 2
+#define STV090x_WIDTH_Px_E3_4VIT_FIELD 1
+#define STV090x_OFFST_Px_E2_3VIT_FIELD 1
+#define STV090x_WIDTH_Px_E2_3VIT_FIELD 1
+#define STV090x_OFFST_Px_E1_2VIT_FIELD 0
+#define STV090x_WIDTH_Px_E1_2VIT_FIELD 1
+
+#define STV090x_Px_VAVSRVIT(__x) (0xf53d - (__x - 1) * 0x200)
+#define STV090x_P1_VAVSRVIT STV090x_Px_VAVSRVIT(1)
+#define STV090x_P2_VAVSRVIT STV090x_Px_VAVSRVIT(2)
+#define STV090x_OFFST_Px_SNVIT_FIELD 4
+#define STV090x_WIDTH_Px_SNVIT_FIELD 2
+#define STV090x_OFFST_Px_TOVVIT_FIELD 2
+#define STV090x_WIDTH_Px_TOVVIT_FIELD 2
+#define STV090x_OFFST_Px_HYPVIT_FIELD 0
+#define STV090x_WIDTH_Px_HYPVIT_FIELD 2
+
+#define STV090x_Px_VSTATUSVIT(__x) (0xf53e - (__x - 1) * 0x200)
+#define STV090x_P1_VSTATUSVIT STV090x_Px_VSTATUSVIT(1)
+#define STV090x_P2_VSTATUSVIT STV090x_Px_VSTATUSVIT(2)
+#define STV090x_OFFST_Px_PRFVIT_FIELD 4
+#define STV090x_WIDTH_Px_PRFVIT_FIELD 1
+#define STV090x_OFFST_Px_LOCKEDVIT_FIELD 3
+#define STV090x_WIDTH_Px_LOCKEDVIT_FIELD 1
+
+#define STV090x_Px_VTHINUSE(__x) (0xf53f - (__x - 1) * 0x200)
+#define STV090x_P1_VTHINUSE STV090x_Px_VTHINUSE(1)
+#define STV090x_P2_VTHINUSE STV090x_Px_VTHINUSE(2)
+#define STV090x_OFFST_Px_VIT_INUSE_FIELD 0
+#define STV090x_WIDTH_Px_VIT_INUSE_FIELD 8
+
+#define STV090x_Px_KDIV12(__x) (0xf540 - (__x - 1) * 0x200)
+#define STV090x_P1_KDIV12 STV090x_Px_KDIV12(1)
+#define STV090x_P2_KDIV12 STV090x_Px_KDIV12(2)
+#define STV090x_OFFST_Px_K_DIVIDER_12_FIELD 0
+#define STV090x_WIDTH_Px_K_DIVIDER_12_FIELD 7
+
+#define STV090x_Px_KDIV23(__x) (0xf541 - (__x - 1) * 0x200)
+#define STV090x_P1_KDIV23 STV090x_Px_KDIV23(1)
+#define STV090x_P2_KDIV23 STV090x_Px_KDIV23(2)
+#define STV090x_OFFST_Px_K_DIVIDER_23_FIELD 0
+#define STV090x_WIDTH_Px_K_DIVIDER_23_FIELD 7
+
+#define STV090x_Px_KDIV34(__x) (0xf542 - (__x - 1) * 0x200)
+#define STV090x_P1_KDIV34 STV090x_Px_KDIV34(1)
+#define STV090x_P2_KDIV34 STV090x_Px_KDIV34(2)
+#define STV090x_OFFST_Px_K_DIVIDER_34_FIELD 0
+#define STV090x_WIDTH_Px_K_DIVIDER_34_FIELD 7
+
+#define STV090x_Px_KDIV56(__x) (0xf543 - (__x - 1) * 0x200)
+#define STV090x_P1_KDIV56 STV090x_Px_KDIV56(1)
+#define STV090x_P2_KDIV56 STV090x_Px_KDIV56(2)
+#define STV090x_OFFST_Px_K_DIVIDER_56_FIELD 0
+#define STV090x_WIDTH_Px_K_DIVIDER_56_FIELD 7
+
+#define STV090x_Px_KDIV67(__x) (0xf544 - (__x - 1) * 0x200)
+#define STV090x_P1_KDIV67 STV090x_Px_KDIV67(1)
+#define STV090x_P2_KDIV67 STV090x_Px_KDIV67(2)
+#define STV090x_OFFST_Px_K_DIVIDER_67_FIELD 0
+#define STV090x_WIDTH_Px_K_DIVIDER_67_FIELD 7
+
+#define STV090x_Px_KDIV78(__x) (0xf545 - (__x - 1) * 0x200)
+#define STV090x_P1_KDIV78 STV090x_Px_KDIV78(1)
+#define STV090x_P2_KDIV78 STV090x_Px_KDIV78(2)
+#define STV090x_OFFST_Px_K_DIVIDER_78_FIELD 0
+#define STV090x_WIDTH_Px_K_DIVIDER_78_FIELD 7
+
+#define STV090x_Px_PDELCTRL1(__x) (0xf550 - (__x - 1) * 0x200)
+#define STV090x_P1_PDELCTRL1 STV090x_Px_PDELCTRL1(1)
+#define STV090x_P2_PDELCTRL1 STV090x_Px_PDELCTRL1(2)
+#define STV090x_OFFST_Px_INV_MISMASK_FIELD 7
+#define STV090x_WIDTH_Px_INV_MISMASK_FIELD 1
+#define STV090x_OFFST_Px_FILTER_EN_FIELD 5
+#define STV090x_WIDTH_Px_FILTER_EN_FIELD 1
+#define STV090x_OFFST_Px_EN_MIS00_FIELD 1
+#define STV090x_WIDTH_Px_EN_MIS00_FIELD 1
+#define STV090x_OFFST_Px_ALGOSWRST_FIELD 0
+#define STV090x_WIDTH_Px_ALGOSWRST_FIELD 1
+
+#define STV090x_Px_PDELCTRL2(__x) (0xf551 - (__x - 1) * 0x200)
+#define STV090x_P1_PDELCTRL2 STV090x_Px_PDELCTRL2(1)
+#define STV090x_P2_PDELCTRL2 STV090x_Px_PDELCTRL2(2)
+#define STV090x_OFFST_Px_FORCE_CONTINUOUS 7
+#define STV090x_WIDTH_Px_FORCE_CONTINUOUS 1
+#define STV090x_OFFST_Px_RESET_UPKO_COUNT 6
+#define STV090x_WIDTH_Px_RESET_UPKO_COUNT 1
+#define STV090x_OFFST_Px_USER_PKTDELIN_NB 5
+#define STV090x_WIDTH_Px_USER_PKTDELIN_NB 1
+#define STV090x_OFFST_Px_FORCE_LOCKED 4
+#define STV090x_WIDTH_Px_FORCE_LOCKED 1
+#define STV090x_OFFST_Px_DATA_UNBBSCRAM 3
+#define STV090x_WIDTH_Px_DATA_UNBBSCRAM 1
+#define STV090x_OFFST_Px_FORCE_LONGPACKET 2
+#define STV090x_WIDTH_Px_FORCE_LONGPACKET 1
+#define STV090x_OFFST_Px_FRAME_MODE_FIELD 1
+#define STV090x_WIDTH_Px_FRAME_MODE_FIELD 1
+
+#define STV090x_Px_HYSTTHRESH(__x) (0xf554 - (__x - 1) * 0x200)
+#define STV090x_P1_HYSTTHRESH STV090x_Px_HYSTTHRESH(1)
+#define STV090x_P2_HYSTTHRESH STV090x_Px_HYSTTHRESH(2)
+#define STV090x_OFFST_Px_UNLCK_THRESH_FIELD 4
+#define STV090x_WIDTH_Px_UNLCK_THRESH_FIELD 4
+#define STV090x_OFFST_Px_DELIN_LCK_THRESH_FIELD 0
+#define STV090x_WIDTH_Px_DELIN_LCK_THRESH_FIELD 4
+
+#define STV090x_Px_ISIENTRY(__x) (0xf55e - (__x - 1) * 0x200)
+#define STV090x_P1_ISIENTRY STV090x_Px_ISIENTRY(1)
+#define STV090x_P2_ISIENTRY STV090x_Px_ISIENTRY(2)
+#define STV090x_OFFST_Px_ISI_ENTRY_FIELD 0
+#define STV090x_WIDTH_Px_ISI_ENTRY_FIELD 8
+
+#define STV090x_Px_ISIBITENA(__x) (0xf55f - (__x - 1) * 0x200)
+#define STV090x_P1_ISIBITENA STV090x_Px_ISIBITENA(1)
+#define STV090x_P2_ISIBITENA STV090x_Px_ISIBITENA(2)
+#define STV090x_OFFST_Px_ISI_BIT_EN_FIELD 0
+#define STV090x_WIDTH_Px_ISI_BIT_EN_FIELD 8
+
+#define STV090x_Px_MATSTRy(__x, __y) (0xf561 - (__x - 1) * 0x200 - __y * 0x1)
+#define STV090x_P1_MATSTR0 STV090x_Px_MATSTRy(1, 0)
+#define STV090x_P1_MATSTR1 STV090x_Px_MATSTRy(1, 1)
+#define STV090x_P2_MATSTR0 STV090x_Px_MATSTRy(2, 0)
+#define STV090x_P2_MATSTR1 STV090x_Px_MATSTRy(2, 1)
+#define STV090x_OFFST_Px_MATYPE_CURRENT_FIELD 0
+#define STV090x_WIDTH_Px_MATYPE_CURRENT_FIELD 8
+
+#define STV090x_Px_UPLSTRy(__x, __y) (0xf563 - (__x - 1) * 0x200 - __y * 0x1)
+#define STV090x_P1_UPLSTR0 STV090x_Px_UPLSTRy(1, 0)
+#define STV090x_P1_UPLSTR1 STV090x_Px_UPLSTRy(1, 1)
+#define STV090x_P2_UPLSTR0 STV090x_Px_UPLSTRy(2, 0)
+#define STV090x_P2_UPLSTR1 STV090x_Px_UPLSTRy(2, 1)
+#define STV090x_OFFST_Px_UPL_CURRENT_FIELD 0
+#define STV090x_WIDTH_Px_UPL_CURRENT_FIELD 8
+
+#define STV090x_Px_DFLSTRy(__x, __y) (0xf565 - (__x - 1) * 0x200 - __y * 0x1)
+#define STV090x_P1_DFLSTR0 STV090x_Px_DFLSTRy(1, 0)
+#define STV090x_P1_DFLSTR1 STV090x_Px_DFLSTRy(1, 1)
+#define STV090x_P2_DFLSTR0 STV090x_Px_DFLSTRy(2, 0)
+#define STV090x_P2_DFLSTR1 STV090x_Px_DFLSTRy(2, 1)
+#define STV090x_OFFST_Px_DFL_CURRENT_FIELD 0
+#define STV090x_WIDTH_Px_DFL_CURRENT_FIELD 8
+
+#define STV090x_Px_SYNCSTR(__x) (0xf566 - (__x - 1) * 0x200)
+#define STV090x_P1_SYNCSTR STV090x_Px_SYNCSTR(1)
+#define STV090x_P2_SYNCSTR STV090x_Px_SYNCSTR(2)
+#define STV090x_OFFST_Px_SYNC_CURRENT_FIELD 0
+#define STV090x_WIDTH_Px_SYNC_CURRENT_FIELD 8
+
+#define STV090x_Px_SYNCDSTRy(__x, __y) (0xf568 - (__x - 1) * 0x200 - __y * 0x1)
+#define STV090x_P1_SYNCDSTR0 STV090x_Px_SYNCDSTRy(1, 0)
+#define STV090x_P1_SYNCDSTR1 STV090x_Px_SYNCDSTRy(1, 1)
+#define STV090x_P2_SYNCDSTR0 STV090x_Px_SYNCDSTRy(2, 0)
+#define STV090x_P2_SYNCDSTR1 STV090x_Px_SYNCDSTRy(2, 1)
+#define STV090x_OFFST_Px_SYNCD_CURRENT_FIELD 0
+#define STV090x_WIDTH_Px_SYNCD_CURRENT_FIELD 8
+
+#define STV090x_Px_PDELSTATUS1(__x) (0xf569 - (__x - 1) * 0x200)
+#define STV090x_P1_PDELSTATUS1 STV090x_Px_PDELSTATUS1(1)
+#define STV090x_P2_PDELSTATUS1 STV090x_Px_PDELSTATUS1(2)
+#define STV090x_OFFST_Px_PKTDELIN_LOCK_FIELD 1
+#define STV090x_WIDTH_Px_PKTDELIN_LOCK_FIELD 1
+#define STV090x_OFFST_Px_FIRST_LOCK_FIELD 0
+#define STV090x_WIDTH_Px_FIRST_LOCK_FIELD 1
+
+#define STV090x_Px_PDELSTATUS2(__x) (0xf56a - (__x - 1) * 0x200)
+#define STV090x_P1_PDELSTATUS2 STV090x_Px_PDELSTATUS2(1)
+#define STV090x_P2_PDELSTATUS2 STV090x_Px_PDELSTATUS2(2)
+#define STV090x_OFFST_Px_FRAME_MODCOD_FIELD 2
+#define STV090x_WIDTH_Px_FRAME_MODCOD_FIELD 5
+#define STV090x_OFFST_Px_FRAME_TYPE_FIELD 0
+#define STV090x_WIDTH_Px_FRAME_TYPE_FIELD 2
+
+#define STV090x_Px_BBFCRCKO1(__x) (0xf56b - (__x - 1) * 0x200)
+#define STV090x_P1_BBFCRCKO1 STV090x_Px_BBFCRCKO1(1)
+#define STV090x_P2_BBFCRCKO1 STV090x_Px_BBFCRCKO1(2)
+#define STV090x_OFFST_Px_BBHCRC_KOCNT_FIELD 0
+#define STV090x_WIDTH_Px_BBHCRC_KOCNT_FIELD 8
+
+#define STV090x_Px_BBFCRCKO0(__x) (0xf56c - (__x - 1) * 0x200)
+#define STV090x_P1_BBFCRCKO0 STV090x_Px_BBFCRCKO0(1)
+#define STV090x_P2_BBFCRCKO0 STV090x_Px_BBFCRCKO0(2)
+#define STV090x_OFFST_Px_BBHCRC_KOCNT_FIELD 0
+#define STV090x_WIDTH_Px_BBHCRC_KOCNT_FIELD 8
+
+#define STV090x_Px_UPCRCKO1(__x) (0xf56d - (__x - 1) * 0x200)
+#define STV090x_P1_UPCRCKO1 STV090x_Px_UPCRCKO1(1)
+#define STV090x_P2_UPCRCKO1 STV090x_Px_UPCRCKO1(2)
+#define STV090x_OFFST_Px_PKTCRC_KOCNT_FIELD 0
+#define STV090x_WIDTH_Px_PKTCRC_KOCNT_FIELD 8
+
+#define STV090x_Px_UPCRCKO0(__x) (0xf56e - (__x - 1) * 0x200)
+#define STV090x_P1_UPCRCKO0 STV090x_Px_UPCRCKO0(1)
+#define STV090x_P2_UPCRCKO0 STV090x_Px_UPCRCKO0(2)
+#define STV090x_OFFST_Px_PKTCRC_KOCNT_FIELD 0
+#define STV090x_WIDTH_Px_PKTCRC_KOCNT_FIELD 8
+
+#define STV090x_NBITER_NFx(__x) (0xFA03 + (__x - 4) * 0x1)
+#define STV090x_NBITER_NF4 STV090x_NBITER_NFx(4)
+#define STV090x_NBITER_NF5 STV090x_NBITER_NFx(5)
+#define STV090x_NBITER_NF6 STV090x_NBITER_NFx(6)
+#define STV090x_NBITER_NF7 STV090x_NBITER_NFx(7)
+#define STV090x_NBITER_NF8 STV090x_NBITER_NFx(8)
+#define STV090x_NBITER_NF9 STV090x_NBITER_NFx(9)
+#define STV090x_NBITER_NF10 STV090x_NBITER_NFx(10)
+#define STV090x_NBITER_NF11 STV090x_NBITER_NFx(11)
+#define STV090x_NBITER_NF12 STV090x_NBITER_NFx(12)
+#define STV090x_NBITER_NF13 STV090x_NBITER_NFx(13)
+#define STV090x_NBITER_NF14 STV090x_NBITER_NFx(14)
+#define STV090x_NBITER_NF15 STV090x_NBITER_NFx(15)
+#define STV090x_NBITER_NF16 STV090x_NBITER_NFx(16)
+#define STV090x_NBITER_NF17 STV090x_NBITER_NFx(17)
+
+#define STV090x_NBITERNOERR 0xFA3F
+#define STV090x_OFFST_NBITER_STOP_CRIT_FIELD 0
+#define STV090x_WIDTH_NBITER_STOP_CRIT_FIELD 4
+
+#define STV090x_GAINLLR_NFx(__x) (0xFA43 + (__x - 4) * 0x1)
+#define STV090x_GAINLLR_NF4 STV090x_GAINLLR_NFx(4)
+#define STV090x_OFFST_GAINLLR_NF_QP_1_2_FIELD 0
+#define STV090x_WIDTH_GAINLLR_NF_QP_1_2_FIELD 7
+
+#define STV090x_GAINLLR_NF5 STV090x_GAINLLR_NFx(5)
+#define STV090x_OFFST_GAINLLR_NF_QP_3_5_FIELD 0
+#define STV090x_WIDTH_GAINLLR_NF_QP_3_5_FIELD 7
+
+#define STV090x_GAINLLR_NF6 STV090x_GAINLLR_NFx(6)
+#define STV090x_OFFST_GAINLLR_NF_QP_2_3_FIELD 0
+#define STV090x_WIDTH_GAINLLR_NF_QP_2_3_FIELD 7
+
+#define STV090x_GAINLLR_NF7 STV090x_GAINLLR_NFx(7)
+#define STV090x_OFFST_GAINLLR_NF_QP_3_4_FIELD 0
+#define STV090x_WIDTH_GAINLLR_NF_QP_3_4_FIELD 7
+
+#define STV090x_GAINLLR_NF8 STV090x_GAINLLR_NFx(8)
+#define STV090x_OFFST_GAINLLR_NF_QP_4_5_FIELD 0
+#define STV090x_WIDTH_GAINLLR_NF_QP_4_5_FIELD 7
+
+#define STV090x_GAINLLR_NF9 STV090x_GAINLLR_NFx(9)
+#define STV090x_OFFST_GAINLLR_NF_QP_5_6_FIELD 0
+#define STV090x_WIDTH_GAINLLR_NF_QP_5_6_FIELD 7
+
+#define STV090x_GAINLLR_NF10 STV090x_GAINLLR_NFx(10)
+#define STV090x_OFFST_GAINLLR_NF_QP_8_9_FIELD 0
+#define STV090x_WIDTH_GAINLLR_NF_QP_8_9_FIELD 7
+
+#define STV090x_GAINLLR_NF11 STV090x_GAINLLR_NFx(11)
+#define STV090x_OFFST_GAINLLR_NF_QP_9_10_FIELD 0
+#define STV090x_WIDTH_GAINLLR_NF_QP_9_10_FIELD 7
+
+#define STV090x_GAINLLR_NF12 STV090x_GAINLLR_NFx(12)
+#define STV090x_OFFST_GAINLLR_NF_8P_3_5_FIELD 0
+#define STV090x_WIDTH_GAINLLR_NF_8P_3_5_FIELD 7
+
+#define STV090x_GAINLLR_NF13 STV090x_GAINLLR_NFx(13)
+#define STV090x_OFFST_GAINLLR_NF_8P_2_3_FIELD 0
+#define STV090x_WIDTH_GAINLLR_NF_8P_2_3_FIELD 7
+
+#define STV090x_GAINLLR_NF14 STV090x_GAINLLR_NFx(14)
+#define STV090x_OFFST_GAINLLR_NF_8P_3_4_FIELD 0
+#define STV090x_WIDTH_GAINLLR_NF_8P_3_4_FIELD 7
+
+#define STV090x_GAINLLR_NF15 STV090x_GAINLLR_NFx(15)
+#define STV090x_OFFST_GAINLLR_NF_8P_5_6_FIELD 0
+#define STV090x_WIDTH_GAINLLR_NF_8P_5_6_FIELD 7
+
+#define STV090x_GAINLLR_NF16 STV090x_GAINLLR_NFx(16)
+#define STV090x_OFFST_GAINLLR_NF_8P_8_9_FIELD 0
+#define STV090x_WIDTH_GAINLLR_NF_8P_8_9_FIELD 7
+
+#define STV090x_GAINLLR_NF17 STV090x_GAINLLR_NFx(17)
+#define STV090x_OFFST_GAINLLR_NF_8P_9_10_FIELD 0
+#define STV090x_WIDTH_GAINLLR_NF_8P_9_10_FIELD 7
+
+#define STV090x_GENCFG 0xFA86
+#define STV090x_OFFST_BROADCAST_FIELD 4
+#define STV090x_WIDTH_BROADCAST_FIELD 1
+#define STV090x_OFFST_PRIORITY_FIELD 1
+#define STV090x_WIDTH_PRIORITY_FIELD 1
+#define STV090x_OFFST_DDEMOD_FIELD 0
+#define STV090x_WIDTH_DDEMOD_FIELD 1
+
+#define STV090x_LDPCERRx(__x) (0xFA97 - (__x * 0x1))
+#define STV090x_LDPCERR0 STV090x_LDPCERRx(0)
+#define STV090x_LDPCERR1 STV090x_LDPCERRx(1)
+#define STV090x_OFFST_Px_LDPC_ERRORS_COUNTER_FIELD 0
+#define STV090x_WIDTH_Px_LDPC_ERRORS_COUNTER_FIELD 8
+
+#define STV090x_BCHERR 0xFA98
+#define STV090x_OFFST_Px_ERRORFLAG_FIELD 4
+#define STV090x_WIDTH_Px_ERRORFLAG_FIELD 1
+#define STV090x_OFFST_Px_BCH_ERRORS_COUNTER_FIELD 0
+#define STV090x_WIDTH_Px_BCH_ERRORS_COUNTER_FIELD 4
+
+#define STV090x_Px_TSSTATEM(__x) (0xF570 - (__x - 1) * 0x200)
+#define STV090x_P1_TSSTATEM STV090x_Px_TSSTATEM(1)
+#define STV090x_P2_TSSTATEM STV090x_Px_TSSTATEM(2)
+#define STV090x_OFFST_Px_TSDIL_ON_FIELD 7
+#define STV090x_WIDTH_Px_TSDIL_ON_FIELD 1
+#define STV090x_OFFST_Px_TSRS_ON_FIELD 5
+#define STV090x_WIDTH_Px_TSRS_ON_FIELD 1
+
+#define STV090x_Px_TSCFGH(__x) (0xF572 - (__x - 1) * 0x200)
+#define STV090x_P1_TSCFGH STV090x_Px_TSCFGH(1)
+#define STV090x_P2_TSCFGH STV090x_Px_TSCFGH(2)
+#define STV090x_OFFST_Px_TSFIFO_DVBCI_FIELD 7
+#define STV090x_WIDTH_Px_TSFIFO_DVBCI_FIELD 1
+#define STV090x_OFFST_Px_TSFIFO_SERIAL_FIELD 6
+#define STV090x_WIDTH_Px_TSFIFO_SERIAL_FIELD 1
+#define STV090x_OFFST_Px_TSFIFO_TEIUPDATE_FIELD 5
+#define STV090x_WIDTH_Px_TSFIFO_TEIUPDATE_FIELD 1
+#define STV090x_OFFST_Px_TSFIFO_DUTY50_FIELD 4
+#define STV090x_WIDTH_Px_TSFIFO_DUTY50_FIELD 1
+#define STV090x_OFFST_Px_TSFIFO_HSGNLOUT_FIELD 3
+#define STV090x_WIDTH_Px_TSFIFO_HSGNLOUT_FIELD 1
+#define STV090x_OFFST_Px_TSFIFO_ERRORMODE_FIELD 1
+#define STV090x_WIDTH_Px_TSFIFO_ERRORMODE_FIELD 2
+#define STV090x_OFFST_Px_RST_HWARE_FIELD 0
+#define STV090x_WIDTH_Px_RST_HWARE_FIELD 1
+
+#define STV090x_Px_TSCFGM(__x) (0xF573 - (__x - 1) * 0x200)
+#define STV090x_P1_TSCFGM STV090x_Px_TSCFGM(1)
+#define STV090x_P2_TSCFGM STV090x_Px_TSCFGM(2)
+#define STV090x_OFFST_Px_TSFIFO_MANSPEED_FIELD 6
+#define STV090x_WIDTH_Px_TSFIFO_MANSPEED_FIELD 2
+#define STV090x_OFFST_Px_TSFIFO_PERMDATA_FIELD 5
+#define STV090x_WIDTH_Px_TSFIFO_PERMDATA_FIELD 1
+#define STV090x_OFFST_Px_TSFIFO_INVDATA_FIELD 0
+#define STV090x_WIDTH_Px_TSFIFO_INVDATA_FIELD 1
+
+#define STV090x_Px_TSCFGL(__x) (0xF574 - (__x - 1) * 0x200)
+#define STV090x_P1_TSCFGL STV090x_Px_TSCFGL(1)
+#define STV090x_P2_TSCFGL STV090x_Px_TSCFGL(2)
+#define STV090x_OFFST_Px_TSFIFO_BCLKDEL1CK_FIELD 6
+#define STV090x_WIDTH_Px_TSFIFO_BCLKDEL1CK_FIELD 2
+#define STV090x_OFFST_Px_BCHERROR_MODE_FIELD 4
+#define STV090x_WIDTH_Px_BCHERROR_MODE_FIELD 2
+#define STV090x_OFFST_Px_TSFIFO_NSGNL2DATA_FIELD 3
+#define STV090x_WIDTH_Px_TSFIFO_NSGNL2DATA_FIELD 1
+#define STV090x_OFFST_Px_TSFIFO_EMBINDVB_FIELD 2
+#define STV090x_WIDTH_Px_TSFIFO_EMBINDVB_FIELD 1
+#define STV090x_OFFST_Px_TSFIFO_DPUNACT_FIELD 1
+#define STV090x_WIDTH_Px_TSFIFO_DPUNACT_FIELD 1
+
+#define STV090x_Px_TSINSDELH(__x) (0xF576 - (__x - 1) * 0x200)
+#define STV090x_P1_TSINSDELH STV090x_Px_TSINSDELH(1)
+#define STV090x_P2_TSINSDELH STV090x_Px_TSINSDELH(2)
+#define STV090x_OFFST_Px_TSDEL_SYNCBYTE_FIELD 7
+#define STV090x_WIDTH_Px_TSDEL_SYNCBYTE_FIELD 1
+#define STV090x_OFFST_Px_TSDEL_XXHEADER_FIELD 6
+#define STV090x_WIDTH_Px_TSDEL_XXHEADER_FIELD 1
+
+#define STV090x_Px_TSSPEED(__x) (0xF580 - (__x - 1) * 0x200)
+#define STV090x_P1_TSSPEED STV090x_Px_TSSPEED(1)
+#define STV090x_P2_TSSPEED STV090x_Px_TSSPEED(2)
+#define STV090x_OFFST_Px_TSFIFO_OUTSPEED_FIELD 0
+#define STV090x_WIDTH_Px_TSFIFO_OUTSPEED_FIELD 8
+
+#define STV090x_Px_TSSTATUS(__x) (0xF581 - (__x - 1) * 0x200)
+#define STV090x_P1_TSSTATUS STV090x_Px_TSSTATUS(1)
+#define STV090x_P2_TSSTATUS STV090x_Px_TSSTATUS(2)
+#define STV090x_OFFST_Px_TSFIFO_LINEOK_FIELD 7
+#define STV090x_WIDTH_Px_TSFIFO_LINEOK_FIELD 1
+#define STV090x_OFFST_Px_TSFIFO_ERROR_FIELD 6
+#define STV090x_WIDTH_Px_TSFIFO_ERROR_FIELD 1
+
+#define STV090x_Px_TSSTATUS2(__x) (0xF582 - (__x - 1) * 0x200)
+#define STV090x_P1_TSSTATUS2 STV090x_Px_TSSTATUS2(1)
+#define STV090x_P2_TSSTATUS2 STV090x_Px_TSSTATUS2(2)
+#define STV090x_OFFST_Px_TSFIFO_DEMODSEL_FIELD 7
+#define STV090x_WIDTH_Px_TSFIFO_DEMODSEL_FIELD 1
+#define STV090x_OFFST_Px_TSFIFOSPEED_STORE_FIELD 6
+#define STV090x_WIDTH_Px_TSFIFOSPEED_STORE_FIELD 1
+#define STV090x_OFFST_Px_DILXX_RESET_FIELD 5
+#define STV090x_WIDTH_Px_DILXX_RESET_FIELD 1
+#define STV090x_OFFST_Px_TSSERIAL_IMPOS_FIELD 5
+#define STV090x_WIDTH_Px_TSSERIAL_IMPOS_FIELD 1
+#define STV090x_OFFST_Px_SCRAMBDETECT_FIELD 1
+#define STV090x_WIDTH_Px_SCRAMBDETECT_FIELD 1
+
+#define STV090x_Px_TSBITRATEy(__x, __y) (0xF584 - (__x - 1) * 0x200 - __y * 0x1)
+#define STV090x_P1_TSBITRATE0 STV090x_Px_TSBITRATEy(1, 0)
+#define STV090x_P1_TSBITRATE1 STV090x_Px_TSBITRATEy(1, 1)
+#define STV090x_P2_TSBITRATE0 STV090x_Px_TSBITRATEy(2, 0)
+#define STV090x_P2_TSBITRATE1 STV090x_Px_TSBITRATEy(2, 1)
+#define STV090x_OFFST_Px_TSFIFO_BITRATE_FIELD 7
+#define STV090x_WIDTH_Px_TSFIFO_BITRATE_FIELD 8
+
+#define STV090x_Px_ERRCTRL1(__x) (0xF598 - (__x - 1) * 0x200)
+#define STV090x_P1_ERRCTRL1 STV090x_Px_ERRCTRL1(1)
+#define STV090x_P2_ERRCTRL1 STV090x_Px_ERRCTRL1(2)
+#define STV090x_OFFST_Px_ERR_SOURCE_FIELD 4
+#define STV090x_WIDTH_Px_ERR_SOURCE_FIELD 4
+#define STV090x_OFFST_Px_NUM_EVENT_FIELD 0
+#define STV090x_WIDTH_Px_NUM_EVENT_FIELD 3
+
+#define STV090x_Px_ERRCNT12(__x) (0xF599 - (__x - 1) * 0x200)
+#define STV090x_P1_ERRCNT12 STV090x_Px_ERRCNT12(1)
+#define STV090x_P2_ERRCNT12 STV090x_Px_ERRCNT12(2)
+#define STV090x_OFFST_Px_ERRCNT1_OLDVALUE_FIELD 7
+#define STV090x_WIDTH_Px_ERRCNT1_OLDVALUE_FIELD 1
+#define STV090x_OFFST_Px_ERR_CNT12_FIELD 0
+#define STV090x_WIDTH_Px_ERR_CNT12_FIELD 7
+
+#define STV090x_Px_ERRCNT11(__x) (0xF59A - (__x - 1) * 0x200)
+#define STV090x_P1_ERRCNT11 STV090x_Px_ERRCNT11(1)
+#define STV090x_P2_ERRCNT11 STV090x_Px_ERRCNT11(2)
+#define STV090x_OFFST_Px_ERR_CNT11_FIELD 0
+#define STV090x_WIDTH_Px_ERR_CNT11_FIELD 8
+
+#define STV090x_Px_ERRCNT10(__x) (0xF59B - (__x - 1) * 0x200)
+#define STV090x_P1_ERRCNT10 STV090x_Px_ERRCNT10(1)
+#define STV090x_P2_ERRCNT10 STV090x_Px_ERRCNT10(2)
+#define STV090x_OFFST_Px_ERR_CNT10_FIELD 0
+#define STV090x_WIDTH_Px_ERR_CNT10_FIELD 8
+
+#define STV090x_Px_ERRCTRL2(__x) (0xF59C - (__x - 1) * 0x200)
+#define STV090x_P1_ERRCTRL2 STV090x_Px_ERRCTRL2(1)
+#define STV090x_P2_ERRCTRL2 STV090x_Px_ERRCTRL2(2)
+#define STV090x_OFFST_Px_ERR_SOURCE2_FIELD 4
+#define STV090x_WIDTH_Px_ERR_SOURCE2_FIELD 4
+#define STV090x_OFFST_Px_NUM_EVENT2_FIELD 0
+#define STV090x_WIDTH_Px_NUM_EVENT2_FIELD 3
+
+#define STV090x_Px_ERRCNT22(__x) (0xF59D - (__x - 1) * 0x200)
+#define STV090x_P1_ERRCNT22 STV090x_Px_ERRCNT22(1)
+#define STV090x_P2_ERRCNT22 STV090x_Px_ERRCNT22(2)
+#define STV090x_OFFST_Px_ERRCNT2_OLDVALUE_FIELD 7
+#define STV090x_WIDTH_Px_ERRCNT2_OLDVALUE_FIELD 1
+#define STV090x_OFFST_Px_ERR_CNT2_FIELD 0
+#define STV090x_WIDTH_Px_ERR_CNT2_FIELD 7
+
+#define STV090x_Px_ERRCNT21(__x) (0xF59E - (__x - 1) * 0x200)
+#define STV090x_P1_ERRCNT21 STV090x_Px_ERRCNT21(1)
+#define STV090x_P2_ERRCNT21 STV090x_Px_ERRCNT21(2)
+#define STV090x_OFFST_Px_ERR_CNT21_FIELD 0
+#define STV090x_WIDTH_Px_ERR_CNT21_FIELD 8
+
+#define STV090x_Px_ERRCNT20(__x) (0xF59F - (__x - 1) * 0x200)
+#define STV090x_P1_ERRCNT20 STV090x_Px_ERRCNT20(1)
+#define STV090x_P2_ERRCNT20 STV090x_Px_ERRCNT20(2)
+#define STV090x_OFFST_Px_ERR_CNT20_FIELD 0
+#define STV090x_WIDTH_Px_ERR_CNT20_FIELD 8
+
+#define STV090x_Px_FECSPY(__x) (0xF5A0 - (__x - 1) * 0x200)
+#define STV090x_P1_FECSPY STV090x_Px_FECSPY(1)
+#define STV090x_P2_FECSPY STV090x_Px_FECSPY(2)
+#define STV090x_OFFST_Px_SPY_ENABLE_FIELD 7
+#define STV090x_WIDTH_Px_SPY_ENABLE_FIELD 1
+#define STV090x_OFFST_Px_BERMETER_DATAMAODE_FIELD 2
+#define STV090x_WIDTH_Px_BERMETER_DATAMAODE_FIELD 2
+
+#define STV090x_Px_FSPYCFG(__x) (0xF5A1 - (__x - 1) * 0x200)
+#define STV090x_P1_FSPYCFG STV090x_Px_FSPYCFG(1)
+#define STV090x_P2_FSPYCFG STV090x_Px_FSPYCFG(2)
+#define STV090x_OFFST_Px_RST_ON_ERROR_FIELD 5
+#define STV090x_WIDTH_Px_RST_ON_ERROR_FIELD 1
+#define STV090x_OFFST_Px_ONE_SHOT_FIELD 4
+#define STV090x_WIDTH_Px_ONE_SHOT_FIELD 1
+#define STV090x_OFFST_Px_I2C_MODE_FIELD 2
+#define STV090x_WIDTH_Px_I2C_MODE_FIELD 2
+
+#define STV090x_Px_FSPYDATA(__x) (0xF5A2 - (__x - 1) * 0x200)
+#define STV090x_P1_FSPYDATA STV090x_Px_FSPYDATA(1)
+#define STV090x_P2_FSPYDATA STV090x_Px_FSPYDATA(2)
+#define STV090x_OFFST_Px_SPY_STUFFING_FIELD 7
+#define STV090x_WIDTH_Px_SPY_STUFFING_FIELD 1
+#define STV090x_OFFST_Px_SPY_CNULLPKT_FIELD 5
+#define STV090x_WIDTH_Px_SPY_CNULLPKT_FIELD 1
+#define STV090x_OFFST_Px_SPY_OUTDATA_MODE_FIELD 0
+#define STV090x_WIDTH_Px_SPY_OUTDATA_MODE_FIELD 5
+
+#define STV090x_Px_FSPYOUT(__x) (0xF5A3 - (__x - 1) * 0x200)
+#define STV090x_P1_FSPYOUT STV090x_Px_FSPYOUT(1)
+#define STV090x_P2_FSPYOUT STV090x_Px_FSPYOUT(2)
+#define STV090x_OFFST_Px_FSPY_DIRECT_FIELD 7
+#define STV090x_WIDTH_Px_FSPY_DIRECT_FIELD 1
+#define STV090x_OFFST_Px_STUFF_MODE_FIELD 0
+#define STV090x_WIDTH_Px_STUFF_MODE_FIELD 3
+
+#define STV090x_Px_FSTATUS(__x) (0xF5A4 - (__x - 1) * 0x200)
+#define STV090x_P1_FSTATUS STV090x_Px_FSTATUS(1)
+#define STV090x_P2_FSTATUS STV090x_Px_FSTATUS(2)
+#define STV090x_OFFST_Px_SPY_ENDSIM_FIELD 7
+#define STV090x_WIDTH_Px_SPY_ENDSIM_FIELD 1
+#define STV090x_OFFST_Px_VALID_SIM_FIELD 6
+#define STV090x_WIDTH_Px_VALID_SIM_FIELD 1
+#define STV090x_OFFST_Px_FOUND_SIGNAL_FIELD 5
+#define STV090x_WIDTH_Px_FOUND_SIGNAL_FIELD 1
+#define STV090x_OFFST_Px_DSS_SYNCBYTE_FIELD 4
+#define STV090x_WIDTH_Px_DSS_SYNCBYTE_FIELD 1
+#define STV090x_OFFST_Px_RESULT_STATE_FIELD 0
+#define STV090x_WIDTH_Px_RESULT_STATE_FIELD 4
+
+#define STV090x_Px_FBERCPT4(__x) (0xF5A8 - (__x - 1) * 0x200)
+#define STV090x_P1_FBERCPT4 STV090x_Px_FBERCPT4(1)
+#define STV090x_P2_FBERCPT4 STV090x_Px_FBERCPT4(2)
+#define STV090x_OFFST_Px_FBERMETER_CPT_FIELD 0
+#define STV090x_WIDTH_Px_FBERMETER_CPT_FIELD 8
+
+#define STV090x_Px_FBERCPT3(__x) (0xF5A9 - (__x - 1) * 0x200)
+#define STV090x_P1_FBERCPT3 STV090x_Px_FBERCPT3(1)
+#define STV090x_P2_FBERCPT3 STV090x_Px_FBERCPT3(2)
+#define STV090x_OFFST_Px_FBERMETER_CPT_FIELD 0
+#define STV090x_WIDTH_Px_FBERMETER_CPT_FIELD 8
+
+#define STV090x_Px_FBERCPT2(__x) (0xF5AA - (__x - 1) * 0x200)
+#define STV090x_P1_FBERCPT2 STV090x_Px_FBERCPT2(1)
+#define STV090x_P2_FBERCPT2 STV090x_Px_FBERCPT2(2)
+#define STV090x_OFFST_Px_FBERMETER_CPT_FIELD 0
+#define STV090x_WIDTH_Px_FBERMETER_CPT_FIELD 8
+
+#define STV090x_Px_FBERCPT1(__x) (0xF5AB - (__x - 1) * 0x200)
+#define STV090x_P1_FBERCPT1 STV090x_Px_FBERCPT1(1)
+#define STV090x_P2_FBERCPT1 STV090x_Px_FBERCPT1(2)
+#define STV090x_OFFST_Px_FBERMETER_CPT_FIELD 0
+#define STV090x_WIDTH_Px_FBERMETER_CPT_FIELD 8
+
+#define STV090x_Px_FBERCPT0(__x) (0xF5AC - (__x - 1) * 0x200)
+#define STV090x_P1_FBERCPT0 STV090x_Px_FBERCPT0(1)
+#define STV090x_P2_FBERCPT0 STV090x_Px_FBERCPT0(2)
+#define STV090x_OFFST_Px_FBERMETER_CPT_FIELD 0
+#define STV090x_WIDTH_Px_FBERMETER_CPT_FIELD 8
+
+#define STV090x_Px_FBERERRy(__x, __y) (0xF5AF - (__x - 1) * 0x200 - __y * 0x1)
+#define STV090x_P1_FBERERR0 STV090x_Px_FBERERRy(1, 0)
+#define STV090x_P1_FBERERR1 STV090x_Px_FBERERRy(1, 1)
+#define STV090x_P1_FBERERR2 STV090x_Px_FBERERRy(1, 2)
+#define STV090x_P2_FBERERR0 STV090x_Px_FBERERRy(2, 0)
+#define STV090x_P2_FBERERR1 STV090x_Px_FBERERRy(2, 1)
+#define STV090x_P2_FBERERR2 STV090x_Px_FBERERRy(2, 2)
+#define STV090x_OFFST_Px_FBERMETER_CPT_ERR_FIELD 0
+#define STV090x_WIDTH_Px_FBERMETER_CPT_ERR_FIELD 8
+
+#define STV090x_Px_FSPYBER(__x) (0xF5B2 - (__x - 1) * 0x200)
+#define STV090x_P1_FSPYBER STV090x_Px_FSPYBER(1)
+#define STV090x_P2_FSPYBER STV090x_Px_FSPYBER(2)
+#define STV090x_OFFST_Px_FSPYBER_SYNCBYTE_FIELD 4
+#define STV090x_WIDTH_Px_FSPYBER_SYNCBYTE_FIELD 1
+#define STV090x_OFFST_Px_FSPYBER_UNSYNC_FIELD 3
+#define STV090x_WIDTH_Px_FSPYBER_UNSYNC_FIELD 1
+#define STV090x_OFFST_Px_FSPYBER_CTIME_FIELD 0
+#define STV090x_WIDTH_Px_FSPYBER_CTIME_FIELD 3
+
+#define STV090x_RCCFGH 0xf600
+
+#define STV090x_TSGENERAL 0xF630
+#define STV090x_OFFST_Px_MUXSTREAM_OUT_FIELD 3
+#define STV090x_WIDTH_Px_MUXSTREAM_OUT_FIELD 1
+#define STV090x_OFFST_Px_TSFIFO_PERMPARAL_FIELD 1
+#define STV090x_WIDTH_Px_TSFIFO_PERMPARAL_FIELD 2
+
+#define STV090x_TSGENERAL1X 0xf670
+#define STV090x_CFGEXT 0xfa80
+
+#define STV090x_TSTRES0 0xFF11
+#define STV090x_OFFST_FRESFEC_FIELD 7
+#define STV090x_WIDTH_FRESFEC_FIELD 1
+
+#define STV090x_Px_TSTDISRX(__x) (0xFF67 - (__x - 1) * 0x2)
+#define STV090x_P1_TSTDISRX STV090x_Px_TSTDISRX(1)
+#define STV090x_P2_TSTDISRX STV090x_Px_TSTDISRX(2)
+#define STV090x_OFFST_Px_TSTDISRX_SELECT_FIELD 3
+#define STV090x_WIDTH_Px_TSTDISRX_SELECT_FIELD 1
+
+#endif /* __STV090x_REG_H */
diff --git a/drivers/media/dvb/frontends/stv6110x.c b/drivers/media/dvb/frontends/stv6110x.c
new file mode 100644
index 00000000000..3d8a2e01c9c
--- /dev/null
+++ b/drivers/media/dvb/frontends/stv6110x.c
@@ -0,0 +1,373 @@
+/*
+ STV6110(A) Silicon tuner driver
+
+ Copyright (C) Manu Abraham <abraham.manu@gmail.com>
+
+ Copyright (C) ST Microelectronics
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+
+#include "dvb_frontend.h"
+
+#include "stv6110x_reg.h"
+#include "stv6110x.h"
+#include "stv6110x_priv.h"
+
+static unsigned int verbose;
+module_param(verbose, int, 0644);
+MODULE_PARM_DESC(verbose, "Set Verbosity level");
+
+static u8 stv6110x_regs[] = {0x07, 0x11, 0xdc, 0x85, 0x17, 0x01, 0xe6, 0x1e};
+
+static int stv6110x_read_reg(struct stv6110x_state *stv6110x, u8 reg, u8 *data)
+{
+ int ret;
+ const struct stv6110x_config *config = stv6110x->config;
+ u8 b0[] = { reg };
+ u8 b1[] = { 0 };
+ struct i2c_msg msg[] = {
+ { .addr = config->addr, .flags = 0, .buf = b0, .len = 1 },
+ { .addr = config->addr, .flags = I2C_M_RD, .buf = b1, .len = 1 }
+ };
+
+ ret = i2c_transfer(stv6110x->i2c, msg, 2);
+ if (ret != 2) {
+ dprintk(FE_ERROR, 1, "I/O Error");
+ return -EREMOTEIO;
+ }
+ *data = b1[0];
+
+ return 0;
+}
+
+static int stv6110x_write_reg(struct stv6110x_state *stv6110x, u8 reg, u8 data)
+{
+ int ret;
+ const struct stv6110x_config *config = stv6110x->config;
+ u8 buf[] = { reg, data };
+ struct i2c_msg msg = { .addr = config->addr, .flags = 0, . buf = buf, .len = 2 };
+
+ ret = i2c_transfer(stv6110x->i2c, &msg, 1);
+ if (ret != 1) {
+ dprintk(FE_ERROR, 1, "I/O Error");
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+static int stv6110x_init(struct dvb_frontend *fe)
+{
+ struct stv6110x_state *stv6110x = fe->tuner_priv;
+ int ret;
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(stv6110x_regs); i++) {
+ ret = stv6110x_write_reg(stv6110x, i, stv6110x_regs[i]);
+ if (ret < 0) {
+ dprintk(FE_ERROR, 1, "Initialization failed");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int stv6110x_set_frequency(struct dvb_frontend *fe, u32 frequency)
+{
+ struct stv6110x_state *stv6110x = fe->tuner_priv;
+ u32 rDiv, divider;
+ s32 pVal, pCalc, rDivOpt = 0;
+ u8 i;
+
+ STV6110x_SETFIELD(stv6110x_regs[STV6110x_CTRL1], CTRL1_K, (REFCLOCK_MHz - 16));
+
+ if (frequency <= 1023000) {
+ STV6110x_SETFIELD(stv6110x_regs[STV6110x_TNG1], TNG1_DIV4SEL, 1);
+ STV6110x_SETFIELD(stv6110x_regs[STV6110x_TNG1], TNG1_PRESC32_ON, 0);
+ pVal = 40;
+ } else if (frequency <= 1300000) {
+ STV6110x_SETFIELD(stv6110x_regs[STV6110x_TNG1], TNG1_DIV4SEL, 1);
+ STV6110x_SETFIELD(stv6110x_regs[STV6110x_TNG1], TNG1_PRESC32_ON, 1);
+ pVal = 40;
+ } else if (frequency <= 2046000) {
+ STV6110x_SETFIELD(stv6110x_regs[STV6110x_TNG1], TNG1_DIV4SEL, 0);
+ STV6110x_SETFIELD(stv6110x_regs[STV6110x_TNG1], TNG1_PRESC32_ON, 0);
+ pVal = 20;
+ } else {
+ STV6110x_SETFIELD(stv6110x_regs[STV6110x_TNG1], TNG1_DIV4SEL, 0);
+ STV6110x_SETFIELD(stv6110x_regs[STV6110x_TNG1], TNG1_PRESC32_ON, 1);
+ pVal = 20;
+ }
+
+ for (rDiv = 0; rDiv <= 3; rDiv++) {
+ pCalc = (REFCLOCK_kHz / 100) / R_DIV(rDiv);
+
+ if ((abs((s32)(pCalc - pVal))) < (abs((s32)(1000 - pVal))))
+ rDivOpt = rDiv;
+ }
+
+ divider = (frequency * R_DIV(rDivOpt) * pVal) / REFCLOCK_kHz;
+ divider = (divider + 5) / 10;
+
+ STV6110x_SETFIELD(stv6110x_regs[STV6110x_TNG1], TNG1_R_DIV, rDivOpt);
+ STV6110x_SETFIELD(stv6110x_regs[STV6110x_TNG1], TNG1_N_DIV_11_8, MSB(divider));
+ STV6110x_SETFIELD(stv6110x_regs[STV6110x_TNG0], TNG0_N_DIV_7_0, LSB(divider));
+
+ /* VCO Auto calibration */
+ STV6110x_SETFIELD(stv6110x_regs[STV6110x_STAT1], STAT1_CALVCO_STRT, 1);
+
+ stv6110x_write_reg(stv6110x, STV6110x_CTRL1, stv6110x_regs[STV6110x_CTRL1]);
+ stv6110x_write_reg(stv6110x, STV6110x_TNG1, stv6110x_regs[STV6110x_TNG1]);
+ stv6110x_write_reg(stv6110x, STV6110x_TNG0, stv6110x_regs[STV6110x_TNG0]);
+ stv6110x_write_reg(stv6110x, STV6110x_STAT1, stv6110x_regs[STV6110x_STAT1]);
+
+ for (i = 0; i < TRIALS; i++) {
+ stv6110x_read_reg(stv6110x, STV6110x_STAT1, &stv6110x_regs[STV6110x_STAT1]);
+ if (!STV6110x_GETFIELD(STAT1_CALVCO_STRT, stv6110x_regs[STV6110x_STAT1]))
+ break;
+ msleep(1);
+ }
+
+ return 0;
+}
+
+static int stv6110x_get_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ struct stv6110x_state *stv6110x = fe->tuner_priv;
+
+ stv6110x_read_reg(stv6110x, STV6110x_TNG1, &stv6110x_regs[STV6110x_TNG1]);
+ stv6110x_read_reg(stv6110x, STV6110x_TNG0, &stv6110x_regs[STV6110x_TNG0]);
+
+ *frequency = (MAKEWORD16(STV6110x_GETFIELD(TNG1_N_DIV_11_8, stv6110x_regs[STV6110x_TNG1]),
+ STV6110x_GETFIELD(TNG0_N_DIV_7_0, stv6110x_regs[STV6110x_TNG0]))) * REFCLOCK_kHz;
+
+ *frequency /= (1 << (STV6110x_GETFIELD(TNG1_R_DIV, stv6110x_regs[STV6110x_TNG1]) +
+ STV6110x_GETFIELD(TNG1_DIV4SEL, stv6110x_regs[STV6110x_TNG1])));
+
+ *frequency >>= 2;
+
+ return 0;
+}
+
+static int stv6110x_set_bandwidth(struct dvb_frontend *fe, u32 bandwidth)
+{
+ struct stv6110x_state *stv6110x = fe->tuner_priv;
+ u32 halfbw;
+ u8 i;
+
+ halfbw = bandwidth >> 1;
+
+ if (halfbw > 36000000)
+ STV6110x_SETFIELD(stv6110x_regs[STV6110x_CTRL3], CTRL3_CF, 31); /* LPF */
+ else if (halfbw < 5000000)
+ STV6110x_SETFIELD(stv6110x_regs[STV6110x_CTRL3], CTRL3_CF, 0); /* LPF */
+ else
+ STV6110x_SETFIELD(stv6110x_regs[STV6110x_CTRL3], CTRL3_CF, ((halfbw / 1000000) - 5)); /* LPF */
+
+
+ STV6110x_SETFIELD(stv6110x_regs[STV6110x_CTRL3], CTRL3_RCCLK_OFF, 0x0); /* cal. clk activated */
+ STV6110x_SETFIELD(stv6110x_regs[STV6110x_STAT1], STAT1_CALRC_STRT, 0x1); /* LPF auto cal */
+
+ stv6110x_write_reg(stv6110x, STV6110x_CTRL3, stv6110x_regs[STV6110x_CTRL3]);
+ stv6110x_write_reg(stv6110x, STV6110x_STAT1, stv6110x_regs[STV6110x_STAT1]);
+
+ for (i = 0; i < TRIALS; i++) {
+ stv6110x_read_reg(stv6110x, STV6110x_STAT1, &stv6110x_regs[STV6110x_STAT1]);
+ if (!STV6110x_GETFIELD(STAT1_CALRC_STRT, stv6110x_regs[STV6110x_STAT1]))
+ break;
+ msleep(1);
+ }
+ STV6110x_SETFIELD(stv6110x_regs[STV6110x_CTRL3], CTRL3_RCCLK_OFF, 0x1); /* cal. done */
+ stv6110x_write_reg(stv6110x, STV6110x_CTRL3, stv6110x_regs[STV6110x_CTRL3]);
+
+ return 0;
+}
+
+static int stv6110x_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
+{
+ struct stv6110x_state *stv6110x = fe->tuner_priv;
+
+ stv6110x_read_reg(stv6110x, STV6110x_CTRL3, &stv6110x_regs[STV6110x_CTRL3]);
+ *bandwidth = (STV6110x_GETFIELD(CTRL3_CF, stv6110x_regs[STV6110x_CTRL3]) + 5) * 2000000;
+
+ return 0;
+}
+
+static int stv6110x_set_refclock(struct dvb_frontend *fe, u32 refclock)
+{
+ struct stv6110x_state *stv6110x = fe->tuner_priv;
+
+ /* setup divider */
+ switch (refclock) {
+ default:
+ case 1:
+ STV6110x_SETFIELD(stv6110x_regs[STV6110x_CTRL2], CTRL2_CO_DIV, 0);
+ break;
+ case 2:
+ STV6110x_SETFIELD(stv6110x_regs[STV6110x_CTRL2], CTRL2_CO_DIV, 1);
+ break;
+ case 4:
+ STV6110x_SETFIELD(stv6110x_regs[STV6110x_CTRL2], CTRL2_CO_DIV, 2);
+ break;
+ case 8:
+ case 0:
+ STV6110x_SETFIELD(stv6110x_regs[STV6110x_CTRL2], CTRL2_CO_DIV, 3);
+ break;
+ }
+ stv6110x_write_reg(stv6110x, STV6110x_CTRL2, stv6110x_regs[STV6110x_CTRL2]);
+
+ return 0;
+}
+
+static int stv6110x_get_bbgain(struct dvb_frontend *fe, u32 *gain)
+{
+ struct stv6110x_state *stv6110x = fe->tuner_priv;
+
+ stv6110x_read_reg(stv6110x, STV6110x_CTRL2, &stv6110x_regs[STV6110x_CTRL2]);
+ *gain = 2 * STV6110x_GETFIELD(CTRL2_BBGAIN, stv6110x_regs[STV6110x_CTRL2]);
+
+ return 0;
+}
+
+static int stv6110x_set_bbgain(struct dvb_frontend *fe, u32 gain)
+{
+ struct stv6110x_state *stv6110x = fe->tuner_priv;
+
+ STV6110x_SETFIELD(stv6110x_regs[STV6110x_CTRL2], CTRL2_BBGAIN, gain / 2);
+ stv6110x_write_reg(stv6110x, STV6110x_CTRL2, stv6110x_regs[STV6110x_CTRL2]);
+
+ return 0;
+}
+
+static int stv6110x_set_mode(struct dvb_frontend *fe, enum tuner_mode mode)
+{
+ struct stv6110x_state *stv6110x = fe->tuner_priv;
+ int ret;
+
+ switch (mode) {
+ case TUNER_SLEEP:
+ STV6110x_SETFIELD(stv6110x_regs[STV6110x_CTRL1], CTRL1_SYN, 0);
+ STV6110x_SETFIELD(stv6110x_regs[STV6110x_CTRL1], CTRL1_RX, 0);
+ STV6110x_SETFIELD(stv6110x_regs[STV6110x_CTRL1], CTRL1_LPT, 0);
+ break;
+
+ case TUNER_WAKE:
+ STV6110x_SETFIELD(stv6110x_regs[STV6110x_CTRL1], CTRL1_SYN, 1);
+ STV6110x_SETFIELD(stv6110x_regs[STV6110x_CTRL1], CTRL1_RX, 1);
+ STV6110x_SETFIELD(stv6110x_regs[STV6110x_CTRL1], CTRL1_LPT, 1);
+ break;
+ }
+
+ ret = stv6110x_write_reg(stv6110x, STV6110x_CTRL1, stv6110x_regs[STV6110x_CTRL1]);
+ if (ret < 0) {
+ dprintk(FE_ERROR, 1, "I/O Error");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int stv6110x_sleep(struct dvb_frontend *fe)
+{
+ return stv6110x_set_mode(fe, TUNER_SLEEP);
+}
+
+static int stv6110x_get_status(struct dvb_frontend *fe, u32 *status)
+{
+ struct stv6110x_state *stv6110x = fe->tuner_priv;
+
+ stv6110x_read_reg(stv6110x, STV6110x_STAT1, &stv6110x_regs[STV6110x_STAT1]);
+
+ if (STV6110x_GETFIELD(STAT1_LOCK, stv6110x_regs[STV6110x_STAT1]))
+ *status = TUNER_PHASELOCKED;
+ else
+ *status = 0;
+
+ return 0;
+}
+
+
+static int stv6110x_release(struct dvb_frontend *fe)
+{
+ struct stv6110x_state *stv6110x = fe->tuner_priv;
+
+ fe->tuner_priv = NULL;
+ kfree(stv6110x);
+
+ return 0;
+}
+
+static struct dvb_tuner_ops stv6110x_ops = {
+ .info = {
+ .name = "STV6110(A) Silicon Tuner",
+ .frequency_min = 950000,
+ .frequency_max = 2150000,
+ .frequency_step = 0,
+ },
+
+ .init = stv6110x_init,
+ .sleep = stv6110x_sleep,
+ .release = stv6110x_release
+};
+
+static struct stv6110x_devctl stv6110x_ctl = {
+ .tuner_init = stv6110x_init,
+ .tuner_set_mode = stv6110x_set_mode,
+ .tuner_set_frequency = stv6110x_set_frequency,
+ .tuner_get_frequency = stv6110x_get_frequency,
+ .tuner_set_bandwidth = stv6110x_set_bandwidth,
+ .tuner_get_bandwidth = stv6110x_get_bandwidth,
+ .tuner_set_bbgain = stv6110x_set_bbgain,
+ .tuner_get_bbgain = stv6110x_get_bbgain,
+ .tuner_set_refclk = stv6110x_set_refclock,
+ .tuner_get_status = stv6110x_get_status,
+};
+
+struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
+ const struct stv6110x_config *config,
+ struct i2c_adapter *i2c)
+{
+ struct stv6110x_state *stv6110x;
+
+ stv6110x = kzalloc(sizeof (struct stv6110x_state), GFP_KERNEL);
+ if (stv6110x == NULL)
+ goto error;
+
+ stv6110x->i2c = i2c;
+ stv6110x->config = config;
+ stv6110x->devctl = &stv6110x_ctl;
+
+ fe->tuner_priv = stv6110x;
+ fe->ops.tuner_ops = stv6110x_ops;
+
+ printk("%s: Attaching STV6110x \n", __func__);
+ return stv6110x->devctl;
+
+error:
+ kfree(stv6110x);
+ return NULL;
+}
+EXPORT_SYMBOL(stv6110x_attach);
+
+MODULE_AUTHOR("Manu Abraham");
+MODULE_DESCRIPTION("STV6110x Silicon tuner");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/stv6110x.h b/drivers/media/dvb/frontends/stv6110x.h
new file mode 100644
index 00000000000..a38257080e0
--- /dev/null
+++ b/drivers/media/dvb/frontends/stv6110x.h
@@ -0,0 +1,71 @@
+/*
+ STV6110(A) Silicon tuner driver
+
+ Copyright (C) Manu Abraham <abraham.manu@gmail.com>
+
+ Copyright (C) ST Microelectronics
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __STV6110x_H
+#define __STV6110x_H
+
+struct stv6110x_config {
+ u8 addr;
+ u32 refclk;
+};
+
+enum tuner_mode {
+ TUNER_SLEEP = 1,
+ TUNER_WAKE,
+};
+
+enum tuner_status {
+ TUNER_PHASELOCKED = 1,
+};
+
+struct stv6110x_devctl {
+ int (*tuner_init) (struct dvb_frontend *fe);
+ int (*tuner_set_mode) (struct dvb_frontend *fe, enum tuner_mode mode);
+ int (*tuner_set_frequency) (struct dvb_frontend *fe, u32 frequency);
+ int (*tuner_get_frequency) (struct dvb_frontend *fe, u32 *frequency);
+ int (*tuner_set_bandwidth) (struct dvb_frontend *fe, u32 bandwidth);
+ int (*tuner_get_bandwidth) (struct dvb_frontend *fe, u32 *bandwidth);
+ int (*tuner_set_bbgain) (struct dvb_frontend *fe, u32 gain);
+ int (*tuner_get_bbgain) (struct dvb_frontend *fe, u32 *gain);
+ int (*tuner_set_refclk) (struct dvb_frontend *fe, u32 refclk);
+ int (*tuner_get_status) (struct dvb_frontend *fe, u32 *status);
+};
+
+
+#if defined(CONFIG_DVB_STV6110x) || (defined(CONFIG_DVB_STV6110x_MODULE) && defined(MODULE))
+
+extern struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
+ const struct stv6110x_config *config,
+ struct i2c_adapter *i2c);
+
+#else
+static inline struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
+ const struct stv6110x_config *config,
+ struct i2c_adapter *i2c)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+
+#endif /* CONFIG_DVB_STV6110x */
+
+#endif /* __STV6110x_H */
diff --git a/drivers/media/dvb/frontends/stv6110x_priv.h b/drivers/media/dvb/frontends/stv6110x_priv.h
new file mode 100644
index 00000000000..7260da633d4
--- /dev/null
+++ b/drivers/media/dvb/frontends/stv6110x_priv.h
@@ -0,0 +1,75 @@
+/*
+ STV6110(A) Silicon tuner driver
+
+ Copyright (C) Manu Abraham <abraham.manu@gmail.com>
+
+ Copyright (C) ST Microelectronics
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __STV6110x_PRIV_H
+#define __STV6110x_PRIV_H
+
+#define FE_ERROR 0
+#define FE_NOTICE 1
+#define FE_INFO 2
+#define FE_DEBUG 3
+#define FE_DEBUGREG 4
+
+#define dprintk(__y, __z, format, arg...) do { \
+ if (__z) { \
+ if ((verbose > FE_ERROR) && (verbose > __y)) \
+ printk(KERN_ERR "%s: " format "\n", __func__ , ##arg); \
+ else if ((verbose > FE_NOTICE) && (verbose > __y)) \
+ printk(KERN_NOTICE "%s: " format "\n", __func__ , ##arg); \
+ else if ((verbose > FE_INFO) && (verbose > __y)) \
+ printk(KERN_INFO "%s: " format "\n", __func__ , ##arg); \
+ else if ((verbose > FE_DEBUG) && (verbose > __y)) \
+ printk(KERN_DEBUG "%s: " format "\n", __func__ , ##arg); \
+ } else { \
+ if (verbose > __y) \
+ printk(format, ##arg); \
+ } \
+} while (0)
+
+
+#define STV6110x_SETFIELD(mask, bitf, val) \
+ (mask = (mask & (~(((1 << STV6110x_WIDTH_##bitf) - 1) << \
+ STV6110x_OFFST_##bitf))) | \
+ (val << STV6110x_OFFST_##bitf))
+
+#define STV6110x_GETFIELD(bitf, val) \
+ ((val >> STV6110x_OFFST_##bitf) & \
+ ((1 << STV6110x_WIDTH_##bitf) - 1))
+
+#define MAKEWORD16(a, b) (((a) << 8) | (b))
+
+#define LSB(x) ((x & 0xff))
+#define MSB(y) ((y >> 8) & 0xff)
+
+#define TRIALS 10
+#define R_DIV(__div) (1 << (__div + 1))
+#define REFCLOCK_kHz (stv6110x->config->refclk / 1000)
+#define REFCLOCK_MHz (stv6110x->config->refclk / 1000000)
+
+struct stv6110x_state {
+ struct i2c_adapter *i2c;
+ const struct stv6110x_config *config;
+
+ struct stv6110x_devctl *devctl;
+};
+
+#endif /* __STV6110x_PRIV_H */
diff --git a/drivers/media/dvb/frontends/stv6110x_reg.h b/drivers/media/dvb/frontends/stv6110x_reg.h
new file mode 100644
index 00000000000..93e5c70e5fd
--- /dev/null
+++ b/drivers/media/dvb/frontends/stv6110x_reg.h
@@ -0,0 +1,82 @@
+/*
+ STV6110(A) Silicon tuner driver
+
+ Copyright (C) Manu Abraham <abraham.manu@gmail.com>
+
+ Copyright (C) ST Microelectronics
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __STV6110x_REG_H
+#define __STV6110x_REG_H
+
+#define STV6110x_CTRL1 0x00
+#define STV6110x_OFFST_CTRL1_K 3
+#define STV6110x_WIDTH_CTRL1_K 5
+#define STV6110x_OFFST_CTRL1_LPT 2
+#define STV6110x_WIDTH_CTRL1_LPT 1
+#define STV6110x_OFFST_CTRL1_RX 1
+#define STV6110x_WIDTH_CTRL1_RX 1
+#define STV6110x_OFFST_CTRL1_SYN 0
+#define STV6110x_WIDTH_CTRL1_SYN 1
+
+#define STV6110x_CTRL2 0x01
+#define STV6110x_OFFST_CTRL2_CO_DIV 6
+#define STV6110x_WIDTH_CTRL2_CO_DIV 2
+#define STV6110x_OFFST_CTRL2_RSVD 5
+#define STV6110x_WIDTH_CTRL2_RSVD 1
+#define STV6110x_OFFST_CTRL2_REFOUT_SEL 4
+#define STV6110x_WIDTH_CTRL2_REFOUT_SEL 1
+#define STV6110x_OFFST_CTRL2_BBGAIN 0
+#define STV6110x_WIDTH_CTRL2_BBGAIN 4
+
+#define STV6110x_TNG0 0x02
+#define STV6110x_OFFST_TNG0_N_DIV_7_0 0
+#define STV6110x_WIDTH_TNG0_N_DIV_7_0 8
+
+#define STV6110x_TNG1 0x03
+#define STV6110x_OFFST_TNG1_R_DIV 6
+#define STV6110x_WIDTH_TNG1_R_DIV 2
+#define STV6110x_OFFST_TNG1_PRESC32_ON 5
+#define STV6110x_WIDTH_TNG1_PRESC32_ON 1
+#define STV6110x_OFFST_TNG1_DIV4SEL 4
+#define STV6110x_WIDTH_TNG1_DIV4SEL 1
+#define STV6110x_OFFST_TNG1_N_DIV_11_8 0
+#define STV6110x_WIDTH_TNG1_N_DIV_11_8 4
+
+
+#define STV6110x_CTRL3 0x04
+#define STV6110x_OFFST_CTRL3_DCLOOP_OFF 7
+#define STV6110x_WIDTH_CTRL3_DCLOOP_OFF 1
+#define STV6110x_OFFST_CTRL3_RCCLK_OFF 6
+#define STV6110x_WIDTH_CTRL3_RCCLK_OFF 1
+#define STV6110x_OFFST_CTRL3_ICP 5
+#define STV6110x_WIDTH_CTRL3_ICP 1
+#define STV6110x_OFFST_CTRL3_CF 0
+#define STV6110x_WIDTH_CTRL3_CF 5
+
+#define STV6110x_STAT1 0x05
+#define STV6110x_OFFST_STAT1_CALVCO_STRT 2
+#define STV6110x_WIDTH_STAT1_CALVCO_STRT 1
+#define STV6110x_OFFST_STAT1_CALRC_STRT 1
+#define STV6110x_WIDTH_STAT1_CALRC_STRT 1
+#define STV6110x_OFFST_STAT1_LOCK 0
+#define STV6110x_WIDTH_STAT1_LOCK 1
+
+#define STV6110x_STAT2 0x06
+#define STV6110x_STAT3 0x07
+
+#endif /* __STV6110x_REG_H */
diff --git a/drivers/media/dvb/frontends/tda10048.c b/drivers/media/dvb/frontends/tda10048.c
index 2a8bbcd44cd..4302c563a6b 100644
--- a/drivers/media/dvb/frontends/tda10048.c
+++ b/drivers/media/dvb/frontends/tda10048.c
@@ -1,7 +1,7 @@
/*
NXP TDA10048HN DVB OFDM demodulator driver
- Copyright (C) 2008 Steven Toth <stoth@linuxtv.org>
+ Copyright (C) 2009 Steven Toth <stoth@kernellabs.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -25,6 +25,7 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <asm/div64.h>
#include "dvb_frontend.h"
#include "dvb_math.h"
#include "tda10048.h"
@@ -138,11 +139,20 @@ struct tda10048_state {
struct i2c_adapter *i2c;
- /* configuration settings */
- const struct tda10048_config *config;
+ /* We'll cache and update the attach config settings */
+ struct tda10048_config config;
struct dvb_frontend frontend;
int fwloaded;
+
+ u32 freq_if_hz;
+ u32 xtal_hz;
+ u32 pll_mfactor;
+ u32 pll_nfactor;
+ u32 pll_pfactor;
+ u32 sample_freq;
+
+ enum fe_bandwidth bandwidth;
};
static struct init_tab {
@@ -192,12 +202,26 @@ static struct init_tab {
{ TDA10048_CONF_C4_2, 0x04 },
};
+static struct pll_tab {
+ u32 clk_freq_khz;
+ u32 if_freq_khz;
+ u8 m, n, p;
+} pll_tab[] = {
+ { TDA10048_CLK_4000, TDA10048_IF_36130, 10, 0, 0 },
+ { TDA10048_CLK_16000, TDA10048_IF_3300, 10, 3, 0 },
+ { TDA10048_CLK_16000, TDA10048_IF_3500, 10, 3, 0 },
+ { TDA10048_CLK_16000, TDA10048_IF_4000, 10, 3, 0 },
+ { TDA10048_CLK_16000, TDA10048_IF_4300, 10, 3, 0 },
+ { TDA10048_CLK_16000, TDA10048_IF_36130, 10, 3, 0 },
+};
+
static int tda10048_writereg(struct tda10048_state *state, u8 reg, u8 data)
{
+ struct tda10048_config *config = &state->config;
int ret;
u8 buf[] = { reg, data };
struct i2c_msg msg = {
- .addr = state->config->demod_address,
+ .addr = config->demod_address,
.flags = 0, .buf = buf, .len = 2 };
dprintk(2, "%s(reg = 0x%02x, data = 0x%02x)\n", __func__, reg, data);
@@ -212,13 +236,14 @@ static int tda10048_writereg(struct tda10048_state *state, u8 reg, u8 data)
static u8 tda10048_readreg(struct tda10048_state *state, u8 reg)
{
+ struct tda10048_config *config = &state->config;
int ret;
u8 b0[] = { reg };
u8 b1[] = { 0 };
struct i2c_msg msg[] = {
- { .addr = state->config->demod_address,
+ { .addr = config->demod_address,
.flags = 0, .buf = b0, .len = 1 },
- { .addr = state->config->demod_address,
+ { .addr = config->demod_address,
.flags = I2C_M_RD, .buf = b1, .len = 1 } };
dprintk(2, "%s(reg = 0x%02x)\n", __func__, reg);
@@ -235,6 +260,7 @@ static u8 tda10048_readreg(struct tda10048_state *state, u8 reg)
static int tda10048_writeregbulk(struct tda10048_state *state, u8 reg,
const u8 *data, u16 len)
{
+ struct tda10048_config *config = &state->config;
int ret = -EREMOTEIO;
struct i2c_msg msg;
u8 *buf;
@@ -250,7 +276,7 @@ static int tda10048_writeregbulk(struct tda10048_state *state, u8 reg,
*buf = reg;
memcpy(buf + 1, data, len);
- msg.addr = state->config->demod_address;
+ msg.addr = config->demod_address;
msg.flags = 0;
msg.buf = buf;
msg.len = len + 1;
@@ -271,14 +297,206 @@ error:
return ret;
}
+static int tda10048_set_phy2(struct dvb_frontend *fe, u32 sample_freq_hz,
+ u32 if_hz)
+{
+ struct tda10048_state *state = fe->demodulator_priv;
+ u64 t;
+
+ dprintk(1, "%s()\n", __func__);
+
+ if (sample_freq_hz == 0)
+ return -EINVAL;
+
+ if (if_hz < (sample_freq_hz / 2)) {
+ /* PHY2 = (if2/fs) * 2^15 */
+ t = if_hz;
+ t *= 10;
+ t *= 32768;
+ do_div(t, sample_freq_hz);
+ t += 5;
+ do_div(t, 10);
+ } else {
+ /* PHY2 = ((IF1-fs)/fs) * 2^15 */
+ t = sample_freq_hz - if_hz;
+ t *= 10;
+ t *= 32768;
+ do_div(t, sample_freq_hz);
+ t += 5;
+ do_div(t, 10);
+ t = ~t + 1;
+ }
+
+ tda10048_writereg(state, TDA10048_FREQ_PHY2_LSB, (u8)t);
+ tda10048_writereg(state, TDA10048_FREQ_PHY2_MSB, (u8)(t >> 8));
+
+ return 0;
+}
+
+static int tda10048_set_wref(struct dvb_frontend *fe, u32 sample_freq_hz,
+ u32 bw)
+{
+ struct tda10048_state *state = fe->demodulator_priv;
+ u64 t, z;
+ u32 b = 8000000;
+
+ dprintk(1, "%s()\n", __func__);
+
+ if (sample_freq_hz == 0)
+ return -EINVAL;
+
+ if (bw == BANDWIDTH_6_MHZ)
+ b = 6000000;
+ else
+ if (bw == BANDWIDTH_7_MHZ)
+ b = 7000000;
+
+ /* WREF = (B / (7 * fs)) * 2^31 */
+ t = b * 10;
+ /* avoid warning: this decimal constant is unsigned only in ISO C90 */
+ /* t *= 2147483648 on 32bit platforms */
+ t *= (2048 * 1024);
+ t *= 1024;
+ z = 7 * sample_freq_hz;
+ do_div(t, z);
+ t += 5;
+ do_div(t, 10);
+
+ tda10048_writereg(state, TDA10048_TIME_WREF_LSB, (u8)t);
+ tda10048_writereg(state, TDA10048_TIME_WREF_MID1, (u8)(t >> 8));
+ tda10048_writereg(state, TDA10048_TIME_WREF_MID2, (u8)(t >> 16));
+ tda10048_writereg(state, TDA10048_TIME_WREF_MSB, (u8)(t >> 24));
+
+ return 0;
+}
+
+static int tda10048_set_invwref(struct dvb_frontend *fe, u32 sample_freq_hz,
+ u32 bw)
+{
+ struct tda10048_state *state = fe->demodulator_priv;
+ u64 t;
+ u32 b = 8000000;
+
+ dprintk(1, "%s()\n", __func__);
+
+ if (sample_freq_hz == 0)
+ return -EINVAL;
+
+ if (bw == BANDWIDTH_6_MHZ)
+ b = 6000000;
+ else
+ if (bw == BANDWIDTH_7_MHZ)
+ b = 7000000;
+
+ /* INVWREF = ((7 * fs) / B) * 2^5 */
+ t = sample_freq_hz;
+ t *= 7;
+ t *= 32;
+ t *= 10;
+ do_div(t, b);
+ t += 5;
+ do_div(t, 10);
+
+ tda10048_writereg(state, TDA10048_TIME_INVWREF_LSB, (u8)t);
+ tda10048_writereg(state, TDA10048_TIME_INVWREF_MSB, (u8)(t >> 8));
+
+ return 0;
+}
+
+static int tda10048_set_bandwidth(struct dvb_frontend *fe,
+ enum fe_bandwidth bw)
+{
+ struct tda10048_state *state = fe->demodulator_priv;
+ dprintk(1, "%s(bw=%d)\n", __func__, bw);
+
+ /* Bandwidth setting may need to be adjusted */
+ switch (bw) {
+ case BANDWIDTH_6_MHZ:
+ case BANDWIDTH_7_MHZ:
+ case BANDWIDTH_8_MHZ:
+ tda10048_set_wref(fe, state->sample_freq, bw);
+ tda10048_set_invwref(fe, state->sample_freq, bw);
+ break;
+ default:
+ printk(KERN_ERR "%s() invalid bandwidth\n", __func__);
+ return -EINVAL;
+ }
+
+ state->bandwidth = bw;
+
+ return 0;
+}
+
+static int tda10048_set_if(struct dvb_frontend *fe, enum fe_bandwidth bw)
+{
+ struct tda10048_state *state = fe->demodulator_priv;
+ struct tda10048_config *config = &state->config;
+ int i;
+ u32 if_freq_khz;
+
+ dprintk(1, "%s(bw = %d)\n", __func__, bw);
+
+ /* based on target bandwidth and clk we calculate pll factors */
+ switch (bw) {
+ case BANDWIDTH_6_MHZ:
+ if_freq_khz = config->dtv6_if_freq_khz;
+ break;
+ case BANDWIDTH_7_MHZ:
+ if_freq_khz = config->dtv7_if_freq_khz;
+ break;
+ case BANDWIDTH_8_MHZ:
+ if_freq_khz = config->dtv8_if_freq_khz;
+ break;
+ default:
+ printk(KERN_ERR "%s() no default\n", __func__);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pll_tab); i++) {
+ if ((pll_tab[i].clk_freq_khz == config->clk_freq_khz) &&
+ (pll_tab[i].if_freq_khz == if_freq_khz)) {
+
+ state->freq_if_hz = pll_tab[i].if_freq_khz * 1000;
+ state->xtal_hz = pll_tab[i].clk_freq_khz * 1000;
+ state->pll_mfactor = pll_tab[i].m;
+ state->pll_nfactor = pll_tab[i].n;
+ state->pll_pfactor = pll_tab[i].p;
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(pll_tab)) {
+ printk(KERN_ERR "%s() Incorrect attach settings\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ dprintk(1, "- freq_if_hz = %d\n", state->freq_if_hz);
+ dprintk(1, "- xtal_hz = %d\n", state->xtal_hz);
+ dprintk(1, "- pll_mfactor = %d\n", state->pll_mfactor);
+ dprintk(1, "- pll_nfactor = %d\n", state->pll_nfactor);
+ dprintk(1, "- pll_pfactor = %d\n", state->pll_pfactor);
+
+ /* Calculate the sample frequency */
+ state->sample_freq = state->xtal_hz * (state->pll_mfactor + 45);
+ state->sample_freq /= (state->pll_nfactor + 1);
+ state->sample_freq /= (state->pll_pfactor + 4);
+ dprintk(1, "- sample_freq = %d\n", state->sample_freq);
+
+ /* Update the I/F */
+ tda10048_set_phy2(fe, state->sample_freq, state->freq_if_hz);
+
+ return 0;
+}
+
static int tda10048_firmware_upload(struct dvb_frontend *fe)
{
struct tda10048_state *state = fe->demodulator_priv;
+ struct tda10048_config *config = &state->config;
const struct firmware *fw;
int ret;
int pos = 0;
int cnt;
- u8 wlen = state->config->fwbulkwritelen;
+ u8 wlen = config->fwbulkwritelen;
if ((wlen != TDA10048_BULKWRITE_200) && (wlen != TDA10048_BULKWRITE_50))
wlen = TDA10048_BULKWRITE_200;
@@ -289,7 +507,7 @@ static int tda10048_firmware_upload(struct dvb_frontend *fe)
TDA10048_DEFAULT_FIRMWARE);
ret = request_firmware(&fw, TDA10048_DEFAULT_FIRMWARE,
- &state->i2c->dev);
+ state->i2c->dev.parent);
if (ret) {
printk(KERN_ERR "%s: Upload failed. (file not found?)\n",
__func__);
@@ -484,8 +702,12 @@ static int tda10048_get_tps(struct tda10048_state *state,
static int tda10048_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
{
struct tda10048_state *state = fe->demodulator_priv;
+ struct tda10048_config *config = &state->config;
dprintk(1, "%s(%d)\n", __func__, enable);
+ if (config->disable_gate_access)
+ return 0;
+
if (enable)
return tda10048_writereg(state, TDA10048_CONF_C4_1,
tda10048_readreg(state, TDA10048_CONF_C4_1) | 0x02);
@@ -523,6 +745,12 @@ static int tda10048_set_frontend(struct dvb_frontend *fe,
dprintk(1, "%s(frequency=%d)\n", __func__, p->frequency);
+ /* Update the I/F pll's if the bandwidth changes */
+ if (p->u.ofdm.bandwidth != state->bandwidth) {
+ tda10048_set_if(fe, p->u.ofdm.bandwidth);
+ tda10048_set_bandwidth(fe, p->u.ofdm.bandwidth);
+ }
+
if (fe->ops.tuner_ops.set_params) {
if (fe->ops.i2c_gate_ctrl)
@@ -544,6 +772,7 @@ static int tda10048_set_frontend(struct dvb_frontend *fe,
static int tda10048_init(struct dvb_frontend *fe)
{
struct tda10048_state *state = fe->demodulator_priv;
+ struct tda10048_config *config = &state->config;
int ret = 0, i;
dprintk(1, "%s()\n", __func__);
@@ -556,10 +785,14 @@ static int tda10048_init(struct dvb_frontend *fe)
ret = tda10048_firmware_upload(fe);
/* Set either serial or parallel */
- tda10048_output_mode(fe, state->config->output_mode);
+ tda10048_output_mode(fe, config->output_mode);
+
+ /* Set inversion */
+ tda10048_set_inversion(fe, config->inversion);
- /* set inversion */
- tda10048_set_inversion(fe, state->config->inversion);
+ /* Establish default RF values */
+ tda10048_set_if(fe, BANDWIDTH_8_MHZ);
+ tda10048_set_bandwidth(fe, BANDWIDTH_8_MHZ);
/* Ensure we leave the gate closed */
tda10048_i2c_gate_ctrl(fe, 0);
@@ -812,6 +1045,45 @@ static void tda10048_release(struct dvb_frontend *fe)
kfree(state);
}
+static void tda10048_establish_defaults(struct dvb_frontend *fe)
+{
+ struct tda10048_state *state = fe->demodulator_priv;
+ struct tda10048_config *config = &state->config;
+
+ /* Validate/default the config */
+ if (config->dtv6_if_freq_khz == 0) {
+ config->dtv6_if_freq_khz = TDA10048_IF_4300;
+ printk(KERN_WARNING "%s() tda10048_config.dtv6_if_freq_khz "
+ "is not set (defaulting to %d)\n",
+ __func__,
+ config->dtv6_if_freq_khz);
+ }
+
+ if (config->dtv7_if_freq_khz == 0) {
+ config->dtv7_if_freq_khz = TDA10048_IF_4300;
+ printk(KERN_WARNING "%s() tda10048_config.dtv7_if_freq_khz "
+ "is not set (defaulting to %d)\n",
+ __func__,
+ config->dtv7_if_freq_khz);
+ }
+
+ if (config->dtv8_if_freq_khz == 0) {
+ config->dtv8_if_freq_khz = TDA10048_IF_4300;
+ printk(KERN_WARNING "%s() tda10048_config.dtv8_if_freq_khz "
+ "is not set (defaulting to %d)\n",
+ __func__,
+ config->dtv8_if_freq_khz);
+ }
+
+ if (config->clk_freq_khz == 0) {
+ config->clk_freq_khz = TDA10048_CLK_16000;
+ printk(KERN_WARNING "%s() tda10048_config.clk_freq_khz "
+ "is not set (defaulting to %d)\n",
+ __func__,
+ config->clk_freq_khz);
+ }
+}
+
static struct dvb_frontend_ops tda10048_ops;
struct dvb_frontend *tda10048_attach(const struct tda10048_config *config,
@@ -826,10 +1098,11 @@ struct dvb_frontend *tda10048_attach(const struct tda10048_config *config,
if (state == NULL)
goto error;
- /* setup the state */
- state->config = config;
+ /* setup the state and clone the config */
+ memcpy(&state->config, config, sizeof(*config));
state->i2c = i2c;
state->fwloaded = 0;
+ state->bandwidth = BANDWIDTH_8_MHZ;
/* check if the demod is present */
if (tda10048_readreg(state, TDA10048_IDENTITY) != 0x048)
@@ -840,6 +1113,17 @@ struct dvb_frontend *tda10048_attach(const struct tda10048_config *config,
sizeof(struct dvb_frontend_ops));
state->frontend.demodulator_priv = state;
+ /* Establish any defaults the the user didn't pass */
+ tda10048_establish_defaults(&state->frontend);
+
+ /* Set the xtal and freq defaults */
+ if (tda10048_set_if(&state->frontend, BANDWIDTH_8_MHZ) != 0)
+ goto error;
+
+ /* Default bandwidth */
+ if (tda10048_set_bandwidth(&state->frontend, BANDWIDTH_8_MHZ) != 0)
+ goto error;
+
/* Leave the gate closed */
tda10048_i2c_gate_ctrl(&state->frontend, 0);
diff --git a/drivers/media/dvb/frontends/tda10048.h b/drivers/media/dvb/frontends/tda10048.h
index 0457b24601f..8828ceaf74b 100644
--- a/drivers/media/dvb/frontends/tda10048.h
+++ b/drivers/media/dvb/frontends/tda10048.h
@@ -1,7 +1,7 @@
/*
NXP TDA10048HN DVB OFDM demodulator driver
- Copyright (C) 2008 Steven Toth <stoth@linuxtv.org>
+ Copyright (C) 2009 Steven Toth <stoth@kernellabs.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -43,6 +43,25 @@ struct tda10048_config {
#define TDA10048_INVERSION_OFF 0
#define TDA10048_INVERSION_ON 1
u8 inversion;
+
+#define TDA10048_IF_3300 3300
+#define TDA10048_IF_3500 3500
+#define TDA10048_IF_3800 3800
+#define TDA10048_IF_4000 4000
+#define TDA10048_IF_4300 4300
+#define TDA10048_IF_4500 4500
+#define TDA10048_IF_4750 4750
+#define TDA10048_IF_36130 36130
+ u16 dtv6_if_freq_khz;
+ u16 dtv7_if_freq_khz;
+ u16 dtv8_if_freq_khz;
+
+#define TDA10048_CLK_4000 4000
+#define TDA10048_CLK_16000 16000
+ u16 clk_freq_khz;
+
+ /* Disable I2C gate access */
+ u8 disable_gate_access;
};
#if defined(CONFIG_DVB_TDA10048) || \
diff --git a/drivers/media/dvb/siano/Makefile b/drivers/media/dvb/siano/Makefile
index bcf93f4828b..c6644d90943 100644
--- a/drivers/media/dvb/siano/Makefile
+++ b/drivers/media/dvb/siano/Makefile
@@ -1,4 +1,4 @@
-sms1xxx-objs := smscoreapi.o sms-cards.o
+sms1xxx-objs := smscoreapi.o sms-cards.o smsendian.o smsir.o
obj-$(CONFIG_DVB_SIANO_SMS1XXX) += sms1xxx.o
obj-$(CONFIG_DVB_SIANO_SMS1XXX) += smsusb.o
diff --git a/drivers/media/dvb/siano/sms-cards.c b/drivers/media/dvb/siano/sms-cards.c
index 63e4d0ec658..d8b15d583bd 100644
--- a/drivers/media/dvb/siano/sms-cards.c
+++ b/drivers/media/dvb/siano/sms-cards.c
@@ -18,6 +18,7 @@
*/
#include "sms-cards.h"
+#include "smsir.h"
static int sms_dbg;
module_param_named(cards_dbg, sms_dbg, int, 0644);
@@ -30,17 +31,14 @@ static struct sms_board sms_boards[] = {
[SMS1XXX_BOARD_SIANO_STELLAR] = {
.name = "Siano Stellar Digital Receiver",
.type = SMS_STELLAR,
- .fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-stellar-dvbt-01.fw",
},
[SMS1XXX_BOARD_SIANO_NOVA_A] = {
.name = "Siano Nova A Digital Receiver",
.type = SMS_NOVA_A0,
- .fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-nova-a-dvbt-01.fw",
},
[SMS1XXX_BOARD_SIANO_NOVA_B] = {
.name = "Siano Nova B Digital Receiver",
.type = SMS_NOVA_B0,
- .fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-nova-b-dvbt-01.fw",
},
[SMS1XXX_BOARD_SIANO_VEGA] = {
.name = "Siano Vega Digital Receiver",
@@ -65,6 +63,9 @@ static struct sms_board sms_boards[] = {
.name = "Hauppauge WinTV MiniStick",
.type = SMS_NOVA_B0,
.fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-hcw-55xxx-dvbt-02.fw",
+ .board_cfg.leds_power = 26,
+ .board_cfg.led0 = 27,
+ .board_cfg.led1 = 28,
.led_power = 26,
.led_lo = 27,
.led_hi = 28,
@@ -74,7 +75,9 @@ static struct sms_board sms_boards[] = {
.type = SMS_NOVA_B0,
.fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-hcw-55xxx-dvbt-02.fw",
.lna_ctrl = 29,
+ .board_cfg.foreign_lna0_ctrl = 29,
.rf_switch = 17,
+ .board_cfg.rf_switch_uhf = 17,
},
[SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2] = {
.name = "Hauppauge WinTV MiniCard",
@@ -82,6 +85,16 @@ static struct sms_board sms_boards[] = {
.fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-hcw-55xxx-dvbt-02.fw",
.lna_ctrl = -1,
},
+ [SMS1XXX_BOARD_SIANO_NICE] = {
+ /* 11 */
+ .name = "Siano Nice Digital Receiver",
+ .type = SMS_NOVA_B0,
+ },
+ [SMS1XXX_BOARD_SIANO_VENICE] = {
+ /* 12 */
+ .name = "Siano Venice Digital Receiver",
+ .type = SMS_VEGA,
+ },
};
struct sms_board *sms_get_board(int id)
@@ -91,12 +104,179 @@ struct sms_board *sms_get_board(int id)
return &sms_boards[id];
}
EXPORT_SYMBOL_GPL(sms_get_board);
+static inline void sms_gpio_assign_11xx_default_led_config(
+ struct smscore_gpio_config *pGpioConfig) {
+ pGpioConfig->Direction = SMS_GPIO_DIRECTION_OUTPUT;
+ pGpioConfig->InputCharacteristics =
+ SMS_GPIO_INPUT_CHARACTERISTICS_NORMAL;
+ pGpioConfig->OutputDriving = SMS_GPIO_OUTPUT_DRIVING_4mA;
+ pGpioConfig->OutputSlewRate = SMS_GPIO_OUTPUT_SLEW_RATE_0_45_V_NS;
+ pGpioConfig->PullUpDown = SMS_GPIO_PULL_UP_DOWN_NONE;
+}
+
+int sms_board_event(struct smscore_device_t *coredev,
+ enum SMS_BOARD_EVENTS gevent) {
+ int board_id = smscore_get_board_id(coredev);
+ struct sms_board *board = sms_get_board(board_id);
+ struct smscore_gpio_config MyGpioConfig;
+
+ sms_gpio_assign_11xx_default_led_config(&MyGpioConfig);
+
+ switch (gevent) {
+ case BOARD_EVENT_POWER_INIT: /* including hotplug */
+ switch (board_id) {
+ case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM:
+ /* set I/O and turn off all LEDs */
+ smscore_gpio_configure(coredev,
+ board->board_cfg.leds_power,
+ &MyGpioConfig);
+ smscore_gpio_set_level(coredev,
+ board->board_cfg.leds_power, 0);
+ smscore_gpio_configure(coredev, board->board_cfg.led0,
+ &MyGpioConfig);
+ smscore_gpio_set_level(coredev,
+ board->board_cfg.led0, 0);
+ smscore_gpio_configure(coredev, board->board_cfg.led1,
+ &MyGpioConfig);
+ smscore_gpio_set_level(coredev,
+ board->board_cfg.led1, 0);
+ break;
+ case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2:
+ case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD:
+ /* set I/O and turn off LNA */
+ smscore_gpio_configure(coredev,
+ board->board_cfg.foreign_lna0_ctrl,
+ &MyGpioConfig);
+ smscore_gpio_set_level(coredev,
+ board->board_cfg.foreign_lna0_ctrl,
+ 0);
+ break;
+ }
+ break; /* BOARD_EVENT_BIND */
+
+ case BOARD_EVENT_POWER_SUSPEND:
+ switch (board_id) {
+ case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM:
+ smscore_gpio_set_level(coredev,
+ board->board_cfg.leds_power, 0);
+ smscore_gpio_set_level(coredev,
+ board->board_cfg.led0, 0);
+ smscore_gpio_set_level(coredev,
+ board->board_cfg.led1, 0);
+ break;
+ case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2:
+ case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD:
+ smscore_gpio_set_level(coredev,
+ board->board_cfg.foreign_lna0_ctrl,
+ 0);
+ break;
+ }
+ break; /* BOARD_EVENT_POWER_SUSPEND */
+
+ case BOARD_EVENT_POWER_RESUME:
+ switch (board_id) {
+ case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM:
+ smscore_gpio_set_level(coredev,
+ board->board_cfg.leds_power, 1);
+ smscore_gpio_set_level(coredev,
+ board->board_cfg.led0, 1);
+ smscore_gpio_set_level(coredev,
+ board->board_cfg.led1, 0);
+ break;
+ case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2:
+ case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD:
+ smscore_gpio_set_level(coredev,
+ board->board_cfg.foreign_lna0_ctrl,
+ 1);
+ break;
+ }
+ break; /* BOARD_EVENT_POWER_RESUME */
+
+ case BOARD_EVENT_BIND:
+ switch (board_id) {
+ case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM:
+ smscore_gpio_set_level(coredev,
+ board->board_cfg.leds_power, 1);
+ smscore_gpio_set_level(coredev,
+ board->board_cfg.led0, 1);
+ smscore_gpio_set_level(coredev,
+ board->board_cfg.led1, 0);
+ break;
+ case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2:
+ case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD:
+ smscore_gpio_set_level(coredev,
+ board->board_cfg.foreign_lna0_ctrl,
+ 1);
+ break;
+ }
+ break; /* BOARD_EVENT_BIND */
+
+ case BOARD_EVENT_SCAN_PROG:
+ break; /* BOARD_EVENT_SCAN_PROG */
+ case BOARD_EVENT_SCAN_COMP:
+ break; /* BOARD_EVENT_SCAN_COMP */
+ case BOARD_EVENT_EMERGENCY_WARNING_SIGNAL:
+ break; /* BOARD_EVENT_EMERGENCY_WARNING_SIGNAL */
+ case BOARD_EVENT_FE_LOCK:
+ switch (board_id) {
+ case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM:
+ smscore_gpio_set_level(coredev,
+ board->board_cfg.led1, 1);
+ break;
+ }
+ break; /* BOARD_EVENT_FE_LOCK */
+ case BOARD_EVENT_FE_UNLOCK:
+ switch (board_id) {
+ case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM:
+ smscore_gpio_set_level(coredev,
+ board->board_cfg.led1, 0);
+ break;
+ }
+ break; /* BOARD_EVENT_FE_UNLOCK */
+ case BOARD_EVENT_DEMOD_LOCK:
+ break; /* BOARD_EVENT_DEMOD_LOCK */
+ case BOARD_EVENT_DEMOD_UNLOCK:
+ break; /* BOARD_EVENT_DEMOD_UNLOCK */
+ case BOARD_EVENT_RECEPTION_MAX_4:
+ break; /* BOARD_EVENT_RECEPTION_MAX_4 */
+ case BOARD_EVENT_RECEPTION_3:
+ break; /* BOARD_EVENT_RECEPTION_3 */
+ case BOARD_EVENT_RECEPTION_2:
+ break; /* BOARD_EVENT_RECEPTION_2 */
+ case BOARD_EVENT_RECEPTION_1:
+ break; /* BOARD_EVENT_RECEPTION_1 */
+ case BOARD_EVENT_RECEPTION_LOST_0:
+ break; /* BOARD_EVENT_RECEPTION_LOST_0 */
+ case BOARD_EVENT_MULTIPLEX_OK:
+ switch (board_id) {
+ case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM:
+ smscore_gpio_set_level(coredev,
+ board->board_cfg.led1, 1);
+ break;
+ }
+ break; /* BOARD_EVENT_MULTIPLEX_OK */
+ case BOARD_EVENT_MULTIPLEX_ERRORS:
+ switch (board_id) {
+ case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM:
+ smscore_gpio_set_level(coredev,
+ board->board_cfg.led1, 0);
+ break;
+ }
+ break; /* BOARD_EVENT_MULTIPLEX_ERRORS */
+
+ default:
+ sms_err("Unknown SMS board event");
+ break;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sms_board_event);
static int sms_set_gpio(struct smscore_device_t *coredev, int pin, int enable)
{
int lvl, ret;
u32 gpio;
- struct smscore_gpio_config gpioconfig = {
+ struct smscore_config_gpio gpioconfig = {
.direction = SMS_GPIO_DIRECTION_OUTPUT,
.pullupdown = SMS_GPIO_PULLUPDOWN_NONE,
.inputcharacteristics = SMS_GPIO_INPUTCHARACTERISTICS_NORMAL,
diff --git a/drivers/media/dvb/siano/sms-cards.h b/drivers/media/dvb/siano/sms-cards.h
index 64d74c59c33..38f062f6ad6 100644
--- a/drivers/media/dvb/siano/sms-cards.h
+++ b/drivers/media/dvb/siano/sms-cards.h
@@ -22,6 +22,7 @@
#include <linux/usb.h>
#include "smscoreapi.h"
+#include "smsir.h"
#define SMS_BOARD_UNKNOWN 0
#define SMS1XXX_BOARD_SIANO_STELLAR 1
@@ -34,10 +35,47 @@
#define SMS1XXX_BOARD_HAUPPAUGE_WINDHAM 8
#define SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD 9
#define SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2 10
+#define SMS1XXX_BOARD_SIANO_NICE 11
+#define SMS1XXX_BOARD_SIANO_VENICE 12
+
+struct sms_board_gpio_cfg {
+ int lna_vhf_exist;
+ int lna_vhf_ctrl;
+ int lna_uhf_exist;
+ int lna_uhf_ctrl;
+ int lna_uhf_d_ctrl;
+ int lna_sband_exist;
+ int lna_sband_ctrl;
+ int lna_sband_d_ctrl;
+ int foreign_lna0_ctrl;
+ int foreign_lna1_ctrl;
+ int foreign_lna2_ctrl;
+ int rf_switch_vhf;
+ int rf_switch_uhf;
+ int rf_switch_sband;
+ int leds_power;
+ int led0;
+ int led1;
+ int led2;
+ int led3;
+ int led4;
+ int ir;
+ int eeprom_wp;
+ int mrc_sense;
+ int mrc_pdn_resetn;
+ int mrc_gp0; /* mrcs spi int */
+ int mrc_gp1;
+ int mrc_gp2;
+ int mrc_gp3;
+ int mrc_gp4;
+ int host_spi_gsp_ts_int;
+};
struct sms_board {
enum sms_device_type_st type;
char *name, *fw[DEVICE_MODE_MAX];
+ struct sms_board_gpio_cfg board_cfg;
+ enum ir_kb_type ir_kb_type;
/* gpios */
int led_power, led_hi, led_lo, lna_ctrl, rf_switch;
@@ -45,6 +83,32 @@ struct sms_board {
struct sms_board *sms_get_board(int id);
+extern struct smscore_device_t *coredev;
+
+enum SMS_BOARD_EVENTS {
+ BOARD_EVENT_POWER_INIT,
+ BOARD_EVENT_POWER_SUSPEND,
+ BOARD_EVENT_POWER_RESUME,
+ BOARD_EVENT_BIND,
+ BOARD_EVENT_SCAN_PROG,
+ BOARD_EVENT_SCAN_COMP,
+ BOARD_EVENT_EMERGENCY_WARNING_SIGNAL,
+ BOARD_EVENT_FE_LOCK,
+ BOARD_EVENT_FE_UNLOCK,
+ BOARD_EVENT_DEMOD_LOCK,
+ BOARD_EVENT_DEMOD_UNLOCK,
+ BOARD_EVENT_RECEPTION_MAX_4,
+ BOARD_EVENT_RECEPTION_3,
+ BOARD_EVENT_RECEPTION_2,
+ BOARD_EVENT_RECEPTION_1,
+ BOARD_EVENT_RECEPTION_LOST_0,
+ BOARD_EVENT_MULTIPLEX_OK,
+ BOARD_EVENT_MULTIPLEX_ERRORS
+};
+
+int sms_board_event(struct smscore_device_t *coredev,
+ enum SMS_BOARD_EVENTS gevent);
+
int sms_board_setup(struct smscore_device_t *coredev);
#define SMS_LED_OFF 0
diff --git a/drivers/media/dvb/siano/smscoreapi.c b/drivers/media/dvb/siano/smscoreapi.c
index 7bd4d1dee2b..32be382f0e9 100644
--- a/drivers/media/dvb/siano/smscoreapi.c
+++ b/drivers/media/dvb/siano/smscoreapi.c
@@ -30,9 +30,13 @@
#include <linux/io.h>
#include <linux/firmware.h>
+#include <linux/wait.h>
+#include <asm/byteorder.h>
#include "smscoreapi.h"
#include "sms-cards.h"
+#include "smsir.h"
+#include "smsendian.h"
static int sms_dbg;
module_param_named(debug, sms_dbg, int, 0644);
@@ -58,42 +62,6 @@ struct smscore_client_t {
onremove_t onremove_handler;
};
-struct smscore_device_t {
- struct list_head entry;
-
- struct list_head clients;
- struct list_head subclients;
- spinlock_t clientslock;
-
- struct list_head buffers;
- spinlock_t bufferslock;
- int num_buffers;
-
- void *common_buffer;
- int common_buffer_size;
- dma_addr_t common_buffer_phys;
-
- void *context;
- struct device *device;
-
- char devpath[32];
- unsigned long device_flags;
-
- setmode_t setmode_handler;
- detectmode_t detectmode_handler;
- sendrequest_t sendrequest_handler;
- preload_t preload_handler;
- postload_t postload_handler;
-
- int mode, modes_supported;
-
- struct completion version_ex_done, data_download_done, trigger_done;
- struct completion init_device_done, reload_start_done, resume_done;
-
- int board_id;
- int led_state;
-};
-
void smscore_set_board_id(struct smscore_device_t *core, int id)
{
core->board_id = id;
@@ -384,6 +352,13 @@ int smscore_register_device(struct smsdevice_params_t *params,
init_completion(&dev->init_device_done);
init_completion(&dev->reload_start_done);
init_completion(&dev->resume_done);
+ init_completion(&dev->gpio_configuration_done);
+ init_completion(&dev->gpio_set_level_done);
+ init_completion(&dev->gpio_get_level_done);
+ init_completion(&dev->ir_init_done);
+
+ /* Buffer management */
+ init_waitqueue_head(&dev->buffer_mng_waitq);
/* alloc common buffer */
dev->common_buffer_size = params->buffer_size * params->num_buffers;
@@ -439,6 +414,71 @@ int smscore_register_device(struct smsdevice_params_t *params,
}
EXPORT_SYMBOL_GPL(smscore_register_device);
+
+static int smscore_sendrequest_and_wait(struct smscore_device_t *coredev,
+ void *buffer, size_t size, struct completion *completion) {
+ int rc = coredev->sendrequest_handler(coredev->context, buffer, size);
+ if (rc < 0) {
+ sms_info("sendrequest returned error %d", rc);
+ return rc;
+ }
+
+ return wait_for_completion_timeout(completion,
+ msecs_to_jiffies(SMS_PROTOCOL_MAX_RAOUNDTRIP_MS)) ?
+ 0 : -ETIME;
+}
+
+/**
+ * Starts & enables IR operations
+ *
+ * @return 0 on success, < 0 on error.
+ */
+static int smscore_init_ir(struct smscore_device_t *coredev)
+{
+ int ir_io;
+ int rc;
+ void *buffer;
+
+ coredev->ir.input_dev = NULL;
+ ir_io = sms_get_board(smscore_get_board_id(coredev))->board_cfg.ir;
+ if (ir_io) {/* only if IR port exist we use IR sub-module */
+ sms_info("IR loading");
+ rc = sms_ir_init(coredev);
+
+ if (rc != 0)
+ sms_err("Error initialization DTV IR sub-module");
+ else {
+ buffer = kmalloc(sizeof(struct SmsMsgData_ST2) +
+ SMS_DMA_ALIGNMENT,
+ GFP_KERNEL | GFP_DMA);
+ if (buffer) {
+ struct SmsMsgData_ST2 *msg =
+ (struct SmsMsgData_ST2 *)
+ SMS_ALIGN_ADDRESS(buffer);
+
+ SMS_INIT_MSG(&msg->xMsgHeader,
+ MSG_SMS_START_IR_REQ,
+ sizeof(struct SmsMsgData_ST2));
+ msg->msgData[0] = coredev->ir.controller;
+ msg->msgData[1] = coredev->ir.timeout;
+
+ smsendian_handle_tx_message(
+ (struct SmsMsgHdr_ST2 *)msg);
+ rc = smscore_sendrequest_and_wait(coredev, msg,
+ msg->xMsgHeader. msgLength,
+ &coredev->ir_init_done);
+
+ kfree(buffer);
+ } else
+ sms_err
+ ("Sending IR initialization message failed");
+ }
+ } else
+ sms_info("IR port has not been detected");
+
+ return 0;
+}
+
/**
* sets initial device mode and notifies client hotplugs that device is ready
*
@@ -459,6 +499,7 @@ int smscore_start_device(struct smscore_device_t *coredev)
kmutex_lock(&g_smscore_deviceslock);
rc = smscore_notify_callbacks(coredev, coredev->device, 1);
+ smscore_init_ir(coredev);
sms_info("device %p started, rc %d", coredev, rc);
@@ -468,29 +509,19 @@ int smscore_start_device(struct smscore_device_t *coredev)
}
EXPORT_SYMBOL_GPL(smscore_start_device);
-static int smscore_sendrequest_and_wait(struct smscore_device_t *coredev,
- void *buffer, size_t size,
- struct completion *completion)
-{
- int rc = coredev->sendrequest_handler(coredev->context, buffer, size);
- if (rc < 0) {
- sms_info("sendrequest returned error %d", rc);
- return rc;
- }
-
- return wait_for_completion_timeout(completion,
- msecs_to_jiffies(10000)) ?
- 0 : -ETIME;
-}
static int smscore_load_firmware_family2(struct smscore_device_t *coredev,
void *buffer, size_t size)
{
struct SmsFirmware_ST *firmware = (struct SmsFirmware_ST *) buffer;
struct SmsMsgHdr_ST *msg;
- u32 mem_address = firmware->StartAddress;
+ u32 mem_address;
u8 *payload = firmware->Payload;
int rc = 0;
+ firmware->StartAddress = le32_to_cpu(firmware->StartAddress);
+ firmware->Length = le32_to_cpu(firmware->Length);
+
+ mem_address = firmware->StartAddress;
sms_info("loading FW to addr 0x%x size %d",
mem_address, firmware->Length);
@@ -657,6 +688,9 @@ void smscore_unregister_device(struct smscore_device_t *coredev)
kmutex_lock(&g_smscore_deviceslock);
+ /* Release input device (IR) resources */
+ sms_ir_exit(coredev);
+
smscore_notify_clients(coredev);
smscore_notify_callbacks(coredev, NULL, 0);
@@ -664,7 +698,9 @@ void smscore_unregister_device(struct smscore_device_t *coredev)
* onresponse must no longer be called */
while (1) {
- while ((cb = smscore_getbuffer(coredev))) {
+ while (!list_empty(&coredev->buffers)) {
+ cb = (struct smscore_buffer_t *) coredev->buffers.next;
+ list_del(&cb->entry);
kfree(cb);
num_buffers++;
}
@@ -685,8 +721,10 @@ void smscore_unregister_device(struct smscore_device_t *coredev)
if (coredev->common_buffer)
dma_free_coherent(NULL, coredev->common_buffer_size,
- coredev->common_buffer,
- coredev->common_buffer_phys);
+ coredev->common_buffer, coredev->common_buffer_phys);
+
+ if (coredev->fw_buf != NULL)
+ kfree(coredev->fw_buf);
list_del(&coredev->entry);
kfree(coredev);
@@ -746,7 +784,7 @@ static char *smscore_fw_lkup[][SMS_NUM_OF_DEVICE_TYPES] = {
/*BDA*/
{"none", "dvb_nova_12mhz.inp", "dvb_nova_12mhz_b0.inp", "none"},
/*ISDBT*/
- {"none", "isdbt_nova_12mhz.inp", "dvb_nova_12mhz.inp", "none"},
+ {"none", "isdbt_nova_12mhz.inp", "isdbt_nova_12mhz_b0.inp", "none"},
/*ISDBTBDA*/
{"none", "isdbt_nova_12mhz.inp", "isdbt_nova_12mhz_b0.inp", "none"},
/*CMMB*/
@@ -870,7 +908,7 @@ int smscore_set_device_mode(struct smscore_device_t *coredev, int mode)
coredev->device_flags &= ~SMS_DEVICE_NOT_READY;
}
- if (rc != 0)
+ if (rc < 0)
sms_err("return error code %d.", rc);
return rc;
}
@@ -940,14 +978,11 @@ smscore_client_t *smscore_find_client(struct smscore_device_t *coredev,
*
*/
void smscore_onresponse(struct smscore_device_t *coredev,
- struct smscore_buffer_t *cb)
-{
- struct SmsMsgHdr_ST *phdr =
- (struct SmsMsgHdr_ST *)((u8 *) cb->p + cb->offset);
- struct smscore_client_t *client =
- smscore_find_client(coredev, phdr->msgType, phdr->msgDstId);
+ struct smscore_buffer_t *cb) {
+ struct SmsMsgHdr_ST *phdr = (struct SmsMsgHdr_ST *) ((u8 *) cb->p
+ + cb->offset);
+ struct smscore_client_t *client;
int rc = -EBUSY;
-
static unsigned long last_sample_time; /* = 0; */
static int data_total; /* = 0; */
unsigned long time_now = jiffies_to_msecs(jiffies);
@@ -965,6 +1000,16 @@ void smscore_onresponse(struct smscore_device_t *coredev,
}
data_total += cb->size;
+ /* Do we need to re-route? */
+ if ((phdr->msgType == MSG_SMS_HO_PER_SLICES_IND) ||
+ (phdr->msgType == MSG_SMS_TRANSMISSION_IND)) {
+ if (coredev->mode == DEVICE_MODE_DVBT_BDA)
+ phdr->msgDstId = DVBT_BDA_CONTROL_MSG_ID;
+ }
+
+
+ client = smscore_find_client(coredev, phdr->msgType, phdr->msgDstId);
+
/* If no client registered for type & id,
* check for control client where type is not registered */
if (client)
@@ -1009,6 +1054,35 @@ void smscore_onresponse(struct smscore_device_t *coredev,
case MSG_SMS_SLEEP_RESUME_COMP_IND:
complete(&coredev->resume_done);
break;
+ case MSG_SMS_GPIO_CONFIG_EX_RES:
+ sms_debug("MSG_SMS_GPIO_CONFIG_EX_RES");
+ complete(&coredev->gpio_configuration_done);
+ break;
+ case MSG_SMS_GPIO_SET_LEVEL_RES:
+ sms_debug("MSG_SMS_GPIO_SET_LEVEL_RES");
+ complete(&coredev->gpio_set_level_done);
+ break;
+ case MSG_SMS_GPIO_GET_LEVEL_RES:
+ {
+ u32 *msgdata = (u32 *) phdr;
+ coredev->gpio_get_res = msgdata[1];
+ sms_debug("MSG_SMS_GPIO_GET_LEVEL_RES gpio level %d",
+ coredev->gpio_get_res);
+ complete(&coredev->gpio_get_level_done);
+ break;
+ }
+ case MSG_SMS_START_IR_RES:
+ complete(&coredev->ir_init_done);
+ break;
+ case MSG_SMS_IR_SAMPLES_IND:
+ sms_ir_event(coredev,
+ (const char *)
+ ((char *)phdr
+ + sizeof(struct SmsMsgHdr_ST)),
+ (int)phdr->msgLength
+ - sizeof(struct SmsMsgHdr_ST));
+ break;
+
default:
break;
}
@@ -1030,12 +1104,24 @@ struct smscore_buffer_t *smscore_getbuffer(struct smscore_device_t *coredev)
struct smscore_buffer_t *cb = NULL;
unsigned long flags;
+ DEFINE_WAIT(wait);
+
spin_lock_irqsave(&coredev->bufferslock, flags);
- if (!list_empty(&coredev->buffers)) {
- cb = (struct smscore_buffer_t *) coredev->buffers.next;
- list_del(&cb->entry);
- }
+ /* This function must return a valid buffer, since the buffer list is
+ * finite, we check that there is an available buffer, if not, we wait
+ * until such buffer become available.
+ */
+
+ prepare_to_wait(&coredev->buffer_mng_waitq, &wait, TASK_INTERRUPTIBLE);
+
+ if (list_empty(&coredev->buffers))
+ schedule();
+
+ finish_wait(&coredev->buffer_mng_waitq, &wait);
+
+ cb = (struct smscore_buffer_t *) coredev->buffers.next;
+ list_del(&cb->entry);
spin_unlock_irqrestore(&coredev->bufferslock, flags);
@@ -1052,8 +1138,8 @@ EXPORT_SYMBOL_GPL(smscore_getbuffer);
*
*/
void smscore_putbuffer(struct smscore_device_t *coredev,
- struct smscore_buffer_t *cb)
-{
+ struct smscore_buffer_t *cb) {
+ wake_up_interruptible(&coredev->buffer_mng_waitq);
list_add_locked(&cb->entry, &coredev->buffers, &coredev->bufferslock);
}
EXPORT_SYMBOL_GPL(smscore_putbuffer);
@@ -1210,8 +1296,9 @@ int smsclient_sendrequest(struct smscore_client_t *client,
EXPORT_SYMBOL_GPL(smsclient_sendrequest);
+/* old GPIO managments implementation */
int smscore_configure_gpio(struct smscore_device_t *coredev, u32 pin,
- struct smscore_gpio_config *pinconfig)
+ struct smscore_config_gpio *pinconfig)
{
struct {
struct SmsMsgHdr_ST hdr;
@@ -1280,35 +1367,254 @@ int smscore_set_gpio(struct smscore_device_t *coredev, u32 pin, int level)
&msg, sizeof(msg));
}
-static int __init smscore_module_init(void)
-{
- int rc = 0;
+/* new GPIO managment implementation */
+static int GetGpioPinParams(u32 PinNum, u32 *pTranslatedPinNum,
+ u32 *pGroupNum, u32 *pGroupCfg) {
+
+ *pGroupCfg = 1;
+
+ if (PinNum >= 0 && PinNum <= 1) {
+ *pTranslatedPinNum = 0;
+ *pGroupNum = 9;
+ *pGroupCfg = 2;
+ } else if (PinNum >= 2 && PinNum <= 6) {
+ *pTranslatedPinNum = 2;
+ *pGroupNum = 0;
+ *pGroupCfg = 2;
+ } else if (PinNum >= 7 && PinNum <= 11) {
+ *pTranslatedPinNum = 7;
+ *pGroupNum = 1;
+ } else if (PinNum >= 12 && PinNum <= 15) {
+ *pTranslatedPinNum = 12;
+ *pGroupNum = 2;
+ *pGroupCfg = 3;
+ } else if (PinNum == 16) {
+ *pTranslatedPinNum = 16;
+ *pGroupNum = 23;
+ } else if (PinNum >= 17 && PinNum <= 24) {
+ *pTranslatedPinNum = 17;
+ *pGroupNum = 3;
+ } else if (PinNum == 25) {
+ *pTranslatedPinNum = 25;
+ *pGroupNum = 6;
+ } else if (PinNum >= 26 && PinNum <= 28) {
+ *pTranslatedPinNum = 26;
+ *pGroupNum = 4;
+ } else if (PinNum == 29) {
+ *pTranslatedPinNum = 29;
+ *pGroupNum = 5;
+ *pGroupCfg = 2;
+ } else if (PinNum == 30) {
+ *pTranslatedPinNum = 30;
+ *pGroupNum = 8;
+ } else if (PinNum == 31) {
+ *pTranslatedPinNum = 31;
+ *pGroupNum = 17;
+ } else
+ return -1;
- INIT_LIST_HEAD(&g_smscore_notifyees);
- INIT_LIST_HEAD(&g_smscore_devices);
- kmutex_init(&g_smscore_deviceslock);
+ *pGroupCfg <<= 24;
- INIT_LIST_HEAD(&g_smscore_registry);
- kmutex_init(&g_smscore_registrylock);
+ return 0;
+}
+
+int smscore_gpio_configure(struct smscore_device_t *coredev, u8 PinNum,
+ struct smscore_gpio_config *pGpioConfig) {
+
+ u32 totalLen;
+ u32 TranslatedPinNum;
+ u32 GroupNum;
+ u32 ElectricChar;
+ u32 groupCfg;
+ void *buffer;
+ int rc;
+
+ struct SetGpioMsg {
+ struct SmsMsgHdr_ST xMsgHeader;
+ u32 msgData[6];
+ } *pMsg;
+
+
+ if (PinNum > MAX_GPIO_PIN_NUMBER)
+ return -EINVAL;
+
+ if (pGpioConfig == NULL)
+ return -EINVAL;
+
+ totalLen = sizeof(struct SmsMsgHdr_ST) + (sizeof(u32) * 6);
+
+ buffer = kmalloc(totalLen + SMS_DMA_ALIGNMENT,
+ GFP_KERNEL | GFP_DMA);
+ if (!buffer)
+ return -ENOMEM;
+
+ pMsg = (struct SetGpioMsg *) SMS_ALIGN_ADDRESS(buffer);
+
+ pMsg->xMsgHeader.msgSrcId = DVBT_BDA_CONTROL_MSG_ID;
+ pMsg->xMsgHeader.msgDstId = HIF_TASK;
+ pMsg->xMsgHeader.msgFlags = 0;
+ pMsg->xMsgHeader.msgLength = (u16) totalLen;
+ pMsg->msgData[0] = PinNum;
+
+ if (!(coredev->device_flags & SMS_DEVICE_FAMILY2)) {
+ pMsg->xMsgHeader.msgType = MSG_SMS_GPIO_CONFIG_REQ;
+ if (GetGpioPinParams(PinNum, &TranslatedPinNum, &GroupNum,
+ &groupCfg) != 0)
+ return -EINVAL;
+
+ pMsg->msgData[1] = TranslatedPinNum;
+ pMsg->msgData[2] = GroupNum;
+ ElectricChar = (pGpioConfig->PullUpDown)
+ | (pGpioConfig->InputCharacteristics << 2)
+ | (pGpioConfig->OutputSlewRate << 3)
+ | (pGpioConfig->OutputDriving << 4);
+ pMsg->msgData[3] = ElectricChar;
+ pMsg->msgData[4] = pGpioConfig->Direction;
+ pMsg->msgData[5] = groupCfg;
+ } else {
+ pMsg->xMsgHeader.msgType = MSG_SMS_GPIO_CONFIG_EX_REQ;
+ pMsg->msgData[1] = pGpioConfig->PullUpDown;
+ pMsg->msgData[2] = pGpioConfig->OutputSlewRate;
+ pMsg->msgData[3] = pGpioConfig->OutputDriving;
+ pMsg->msgData[4] = pGpioConfig->Direction;
+ pMsg->msgData[5] = 0;
+ }
+
+ smsendian_handle_tx_message((struct SmsMsgHdr_ST *)pMsg);
+ rc = smscore_sendrequest_and_wait(coredev, pMsg, totalLen,
+ &coredev->gpio_configuration_done);
+
+ if (rc != 0) {
+ if (rc == -ETIME)
+ sms_err("smscore_gpio_configure timeout");
+ else
+ sms_err("smscore_gpio_configure error");
+ }
+ kfree(buffer);
+
+ return rc;
+}
+
+int smscore_gpio_set_level(struct smscore_device_t *coredev, u8 PinNum,
+ u8 NewLevel) {
+
+ u32 totalLen;
+ int rc;
+ void *buffer;
+
+ struct SetGpioMsg {
+ struct SmsMsgHdr_ST xMsgHeader;
+ u32 msgData[3]; /* keep it 3 ! */
+ } *pMsg;
+
+ if ((NewLevel > 1) || (PinNum > MAX_GPIO_PIN_NUMBER) ||
+ (PinNum > MAX_GPIO_PIN_NUMBER))
+ return -EINVAL;
+ totalLen = sizeof(struct SmsMsgHdr_ST) +
+ (3 * sizeof(u32)); /* keep it 3 ! */
+ buffer = kmalloc(totalLen + SMS_DMA_ALIGNMENT,
+ GFP_KERNEL | GFP_DMA);
+ if (!buffer)
+ return -ENOMEM;
+ pMsg = (struct SetGpioMsg *) SMS_ALIGN_ADDRESS(buffer);
+ pMsg->xMsgHeader.msgSrcId = DVBT_BDA_CONTROL_MSG_ID;
+ pMsg->xMsgHeader.msgDstId = HIF_TASK;
+ pMsg->xMsgHeader.msgFlags = 0;
+ pMsg->xMsgHeader.msgType = MSG_SMS_GPIO_SET_LEVEL_REQ;
+ pMsg->xMsgHeader.msgLength = (u16) totalLen;
+ pMsg->msgData[0] = PinNum;
+ pMsg->msgData[1] = NewLevel;
+ /* Send message to SMS */
+ smsendian_handle_tx_message((struct SmsMsgHdr_ST *)pMsg);
+ rc = smscore_sendrequest_and_wait(coredev, pMsg, totalLen,
+ &coredev->gpio_set_level_done);
+
+ if (rc != 0) {
+ if (rc == -ETIME)
+ sms_err("smscore_gpio_set_level timeout");
+ else
+ sms_err("smscore_gpio_set_level error");
+ }
+ kfree(buffer);
return rc;
- sms_debug("rc %d", rc);
+}
+
+int smscore_gpio_get_level(struct smscore_device_t *coredev, u8 PinNum,
+ u8 *level) {
+
+ u32 totalLen;
+ int rc;
+ void *buffer;
+
+ struct SetGpioMsg {
+ struct SmsMsgHdr_ST xMsgHeader;
+ u32 msgData[2];
+ } *pMsg;
+
+
+ if (PinNum > MAX_GPIO_PIN_NUMBER)
+ return -EINVAL;
+
+ totalLen = sizeof(struct SmsMsgHdr_ST) + (2 * sizeof(u32));
+
+ buffer = kmalloc(totalLen + SMS_DMA_ALIGNMENT,
+ GFP_KERNEL | GFP_DMA);
+ if (!buffer)
+ return -ENOMEM;
+
+ pMsg = (struct SetGpioMsg *) SMS_ALIGN_ADDRESS(buffer);
+
+ pMsg->xMsgHeader.msgSrcId = DVBT_BDA_CONTROL_MSG_ID;
+ pMsg->xMsgHeader.msgDstId = HIF_TASK;
+ pMsg->xMsgHeader.msgFlags = 0;
+ pMsg->xMsgHeader.msgType = MSG_SMS_GPIO_GET_LEVEL_REQ;
+ pMsg->xMsgHeader.msgLength = (u16) totalLen;
+ pMsg->msgData[0] = PinNum;
+ pMsg->msgData[1] = 0;
+
+ /* Send message to SMS */
+ smsendian_handle_tx_message((struct SmsMsgHdr_ST *)pMsg);
+ rc = smscore_sendrequest_and_wait(coredev, pMsg, totalLen,
+ &coredev->gpio_get_level_done);
+
+ if (rc != 0) {
+ if (rc == -ETIME)
+ sms_err("smscore_gpio_get_level timeout");
+ else
+ sms_err("smscore_gpio_get_level error");
+ }
+ kfree(buffer);
+
+ /* Its a race between other gpio_get_level() and the copy of the single
+ * global 'coredev->gpio_get_res' to the function's variable 'level'
+ */
+ *level = coredev->gpio_get_res;
return rc;
}
-static void __exit smscore_module_exit(void)
+static int __init smscore_module_init(void)
{
+ int rc = 0;
+ INIT_LIST_HEAD(&g_smscore_notifyees);
+ INIT_LIST_HEAD(&g_smscore_devices);
+ kmutex_init(&g_smscore_deviceslock);
+ INIT_LIST_HEAD(&g_smscore_registry);
+ kmutex_init(&g_smscore_registrylock);
+ return rc;
+}
-
+static void __exit smscore_module_exit(void)
+{
kmutex_lock(&g_smscore_deviceslock);
while (!list_empty(&g_smscore_notifyees)) {
struct smscore_device_notifyee_t *notifyee =
diff --git a/drivers/media/dvb/siano/smscoreapi.h b/drivers/media/dvb/siano/smscoreapi.h
index 548de9056e8..f1108c64e89 100644
--- a/drivers/media/dvb/siano/smscoreapi.h
+++ b/drivers/media/dvb/siano/smscoreapi.h
@@ -1,26 +1,26 @@
-/*
- * Driver for the Siano SMS1xxx USB dongle
- *
- * author: Anatoly Greenblat
- *
- * Copyright (c), 2005-2008 Siano Mobile Silicon, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation;
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
- *
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef __smscoreapi_h__
-#define __smscoreapi_h__
+/****************************************************************
+
+Siano Mobile Silicon, Inc.
+MDTV receiver kernel modules.
+Copyright (C) 2006-2008, Uri Shkolnik, Anatoly Greenblat
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 2 of the License, or
+(at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+****************************************************************/
+
+#ifndef __SMS_CORE_API_H__
+#define __SMS_CORE_API_H__
#include <linux/version.h>
#include <linux/device.h>
@@ -28,14 +28,13 @@
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/types.h>
-#include <asm/page.h>
#include <linux/mutex.h>
+#include <linux/wait.h>
+#include <linux/timer.h>
-#include "dmxdev.h"
-#include "dvbdev.h"
-#include "dvb_demux.h"
-#include "dvb_frontend.h"
+#include <asm/page.h>
+#include "smsir.h"
#define kmutex_init(_p_) mutex_init(_p_)
#define kmutex_lock(_p_) mutex_lock(_p_)
@@ -46,13 +45,14 @@
#define min(a, b) (((a) < (b)) ? (a) : (b))
#endif
-#define SMS_ALLOC_ALIGNMENT 128
-#define SMS_DMA_ALIGNMENT 16
+#define SMS_PROTOCOL_MAX_RAOUNDTRIP_MS (10000)
+#define SMS_ALLOC_ALIGNMENT 128
+#define SMS_DMA_ALIGNMENT 16
#define SMS_ALIGN_ADDRESS(addr) \
((((uintptr_t)(addr)) + (SMS_DMA_ALIGNMENT-1)) & ~(SMS_DMA_ALIGNMENT-1))
-#define SMS_DEVICE_FAMILY2 1
-#define SMS_ROM_NO_RESPONSE 2
+#define SMS_DEVICE_FAMILY2 1
+#define SMS_ROM_NO_RESPONSE 2
#define SMS_DEVICE_NOT_READY 0x8000000
enum sms_device_type_st {
@@ -83,13 +83,13 @@ typedef void (*onremove_t)(void *context);
struct smscore_buffer_t {
/* public members, once passed to clients can be changed freely */
struct list_head entry;
- int size;
- int offset;
+ int size;
+ int offset;
/* private members, read-only for clients */
- void *p;
- dma_addr_t phys;
- unsigned long offset_in_common;
+ void *p;
+ dma_addr_t phys;
+ unsigned long offset_in_common;
};
struct smsdevice_params_t {
@@ -116,10 +116,63 @@ struct smsclient_params_t {
int data_type;
onresponse_t onresponse_handler;
onremove_t onremove_handler;
-
void *context;
};
+struct smscore_device_t {
+ struct list_head entry;
+
+ struct list_head clients;
+ struct list_head subclients;
+ spinlock_t clientslock;
+
+ struct list_head buffers;
+ spinlock_t bufferslock;
+ int num_buffers;
+
+ void *common_buffer;
+ int common_buffer_size;
+ dma_addr_t common_buffer_phys;
+
+ void *context;
+ struct device *device;
+
+ char devpath[32];
+ unsigned long device_flags;
+
+ setmode_t setmode_handler;
+ detectmode_t detectmode_handler;
+ sendrequest_t sendrequest_handler;
+ preload_t preload_handler;
+ postload_t postload_handler;
+
+ int mode, modes_supported;
+
+ /* host <--> device messages */
+ struct completion version_ex_done, data_download_done, trigger_done;
+ struct completion init_device_done, reload_start_done, resume_done;
+ struct completion gpio_configuration_done, gpio_set_level_done;
+ struct completion gpio_get_level_done, ir_init_done;
+
+ /* Buffer management */
+ wait_queue_head_t buffer_mng_waitq;
+
+ /* GPIO */
+ int gpio_get_res;
+
+ /* Target hardware board */
+ int board_id;
+
+ /* Firmware */
+ u8 *fw_buf;
+ u32 fw_buf_size;
+
+ /* Infrared (IR) */
+ struct ir_t ir;
+
+ int led_state;
+};
+
/* GPIO definitions for antenna frequency domain control (SMS8021) */
#define SMS_ANTENNA_GPIO_0 1
#define SMS_ANTENNA_GPIO_1 0
@@ -154,18 +207,15 @@ struct smsclient_params_t {
#define MSG_SMS_INIT_DEVICE_RES 579
#define MSG_SMS_ADD_PID_FILTER_REQ 601
#define MSG_SMS_ADD_PID_FILTER_RES 602
-#define MSG_SMS_REMOVE_PID_FILTER_REQ 603
-#define MSG_SMS_REMOVE_PID_FILTER_RES 604
-#define MSG_SMS_DAB_CHANNEL 607
-#define MSG_SMS_GET_PID_FILTER_LIST_REQ 608
-#define MSG_SMS_GET_PID_FILTER_LIST_RES 609
-#define MSG_SMS_GET_STATISTICS_REQ 615
-#define MSG_SMS_GET_STATISTICS_RES 616
-#define MSG_SMS_SET_ANTENNA_CONFIG_REQ 651
-#define MSG_SMS_SET_ANTENNA_CONFIG_RES 652
-#define MSG_SMS_GET_STATISTICS_EX_REQ 653
-#define MSG_SMS_GET_STATISTICS_EX_RES 654
-#define MSG_SMS_SLEEP_RESUME_COMP_IND 655
+#define MSG_SMS_REMOVE_PID_FILTER_REQ 603
+#define MSG_SMS_REMOVE_PID_FILTER_RES 604
+#define MSG_SMS_DAB_CHANNEL 607
+#define MSG_SMS_GET_PID_FILTER_LIST_REQ 608
+#define MSG_SMS_GET_PID_FILTER_LIST_RES 609
+#define MSG_SMS_HO_PER_SLICES_IND 630
+#define MSG_SMS_SET_ANTENNA_CONFIG_REQ 651
+#define MSG_SMS_SET_ANTENNA_CONFIG_RES 652
+#define MSG_SMS_SLEEP_RESUME_COMP_IND 655
#define MSG_SMS_DATA_DOWNLOAD_REQ 660
#define MSG_SMS_DATA_DOWNLOAD_RES 661
#define MSG_SMS_SWDOWNLOAD_TRIGGER_REQ 664
@@ -190,14 +240,31 @@ struct smsclient_params_t {
#define MSG_SMS_GPIO_CONFIG_EX_RES 713
#define MSG_SMS_ISDBT_TUNE_REQ 776
#define MSG_SMS_ISDBT_TUNE_RES 777
+#define MSG_SMS_TRANSMISSION_IND 782
+#define MSG_SMS_START_IR_REQ 800
+#define MSG_SMS_START_IR_RES 801
+#define MSG_SMS_IR_SAMPLES_IND 802
+#define MSG_SMS_SIGNAL_DETECTED_IND 827
+#define MSG_SMS_NO_SIGNAL_IND 828
#define SMS_INIT_MSG_EX(ptr, type, src, dst, len) do { \
(ptr)->msgType = type; (ptr)->msgSrcId = src; (ptr)->msgDstId = dst; \
(ptr)->msgLength = len; (ptr)->msgFlags = 0; \
} while (0)
+
#define SMS_INIT_MSG(ptr, type, len) \
SMS_INIT_MSG_EX(ptr, type, 0, HIF_TASK, len)
+enum SMS_DVB3_EVENTS {
+ DVB3_EVENT_INIT = 0,
+ DVB3_EVENT_SLEEP,
+ DVB3_EVENT_HOTPLUG,
+ DVB3_EVENT_FE_LOCK,
+ DVB3_EVENT_FE_UNLOCK,
+ DVB3_EVENT_UNC_OK,
+ DVB3_EVENT_UNC_ERR
+};
+
enum SMS_DEVICE_MODE {
DEVICE_MODE_NONE = -1,
DEVICE_MODE_DVBT = 0,
@@ -221,8 +288,13 @@ struct SmsMsgHdr_ST {
};
struct SmsMsgData_ST {
- struct SmsMsgHdr_ST xMsgHeader;
- u32 msgData[1];
+ struct SmsMsgHdr_ST xMsgHeader;
+ u32 msgData[1];
+};
+
+struct SmsMsgData_ST2 {
+ struct SmsMsgHdr_ST xMsgHeader;
+ u32 msgData[2];
};
struct SmsDataDownload_ST {
@@ -238,11 +310,12 @@ struct SmsVersionRes_ST {
u8 Step; /* 0 - Step A */
u8 MetalFix; /* 0 - Metal 0 */
- u8 FirmwareId; /* 0xFF � ROM, otherwise the
- * value indicated by
- * SMSHOSTLIB_DEVICE_MODES_E */
- u8 SupportedProtocols; /* Bitwise OR combination of
+ /* FirmwareId 0xFF if ROM, otherwise the
+ * value indicated by SMSHOSTLIB_DEVICE_MODES_E */
+ u8 FirmwareId;
+ /* SupportedProtocols Bitwise OR combination of
* supported protocols */
+ u8 SupportedProtocols;
u8 VersionMajor;
u8 VersionMinor;
@@ -264,86 +337,219 @@ struct SmsFirmware_ST {
u8 Payload[1];
};
-struct SMSHOSTLIB_STATISTICS_ST {
- u32 Reserved; /* Reserved */
+/* Statistics information returned as response for
+ * SmsHostApiGetStatistics_Req */
+struct SMSHOSTLIB_STATISTICS_S {
+ u32 Reserved; /* Reserved */
/* Common parameters */
- u32 IsRfLocked; /* 0 - not locked, 1 - locked */
- u32 IsDemodLocked; /* 0 - not locked, 1 - locked */
- u32 IsExternalLNAOn; /* 0 - external LNA off, 1 - external LNA on */
+ u32 IsRfLocked; /* 0 - not locked, 1 - locked */
+ u32 IsDemodLocked; /* 0 - not locked, 1 - locked */
+ u32 IsExternalLNAOn; /* 0 - external LNA off, 1 - external LNA on */
/* Reception quality */
- s32 SNR; /* dB */
- u32 BER; /* Post Viterbi BER [1E-5] */
- u32 FIB_CRC; /* CRC errors percentage, valid only for DAB */
- u32 TS_PER; /* Transport stream PER, 0xFFFFFFFF indicate N/A,
- * valid only for DVB-T/H */
- u32 MFER; /* DVB-H frame error rate in percentage,
- * 0xFFFFFFFF indicate N/A, valid only for DVB-H */
- s32 RSSI; /* dBm */
- s32 InBandPwr; /* In band power in dBM */
- s32 CarrierOffset; /* Carrier Offset in bin/1024 */
-
- /* Transmission parameters, valid only for DVB-T/H */
- u32 Frequency; /* Frequency in Hz */
- u32 Bandwidth; /* Bandwidth in MHz */
- u32 TransmissionMode; /* Transmission Mode, for DAB modes 1-4,
- * for DVB-T/H FFT mode carriers in Kilos */
- u32 ModemState; /* from SMS_DvbModemState_ET */
- u32 GuardInterval; /* Guard Interval, 1 divided by value */
- u32 CodeRate; /* Code Rate from SMS_DvbModemState_ET */
- u32 LPCodeRate; /* Low Priority Code Rate from SMS_DvbModemState_ET */
- u32 Hierarchy; /* Hierarchy from SMS_Hierarchy_ET */
- u32 Constellation; /* Constellation from SMS_Constellation_ET */
+ s32 SNR; /* dB */
+ u32 BER; /* Post Viterbi BER [1E-5] */
+ u32 FIB_CRC; /* CRC errors percentage, valid only for DAB */
+ u32 TS_PER; /* Transport stream PER,
+ 0xFFFFFFFF indicate N/A, valid only for DVB-T/H */
+ u32 MFER; /* DVB-H frame error rate in percentage,
+ 0xFFFFFFFF indicate N/A, valid only for DVB-H */
+ s32 RSSI; /* dBm */
+ s32 InBandPwr; /* In band power in dBM */
+ s32 CarrierOffset; /* Carrier Offset in bin/1024 */
+
+ /* Transmission parameters */
+ u32 Frequency; /* Frequency in Hz */
+ u32 Bandwidth; /* Bandwidth in MHz, valid only for DVB-T/H */
+ u32 TransmissionMode; /* Transmission Mode, for DAB modes 1-4,
+ for DVB-T/H FFT mode carriers in Kilos */
+ u32 ModemState; /* from SMSHOSTLIB_DVB_MODEM_STATE_ET,
+ valid only for DVB-T/H */
+ u32 GuardInterval; /* Guard Interval from
+ SMSHOSTLIB_GUARD_INTERVALS_ET, valid only for DVB-T/H */
+ u32 CodeRate; /* Code Rate from SMSHOSTLIB_CODE_RATE_ET,
+ valid only for DVB-T/H */
+ u32 LPCodeRate; /* Low Priority Code Rate from
+ SMSHOSTLIB_CODE_RATE_ET, valid only for DVB-T/H */
+ u32 Hierarchy; /* Hierarchy from SMSHOSTLIB_HIERARCHY_ET,
+ valid only for DVB-T/H */
+ u32 Constellation; /* Constellation from
+ SMSHOSTLIB_CONSTELLATION_ET, valid only for DVB-T/H */
/* Burst parameters, valid only for DVB-H */
- u32 BurstSize; /* Current burst size in bytes */
- u32 BurstDuration; /* Current burst duration in mSec */
- u32 BurstCycleTime; /* Current burst cycle time in mSec */
- u32 CalculatedBurstCycleTime; /* Current burst cycle time in mSec,
- * as calculated by demodulator */
- u32 NumOfRows; /* Number of rows in MPE table */
- u32 NumOfPaddCols; /* Number of padding columns in MPE table */
- u32 NumOfPunctCols; /* Number of puncturing columns in MPE table */
- /* Burst parameters */
- u32 ErrorTSPackets; /* Number of erroneous transport-stream packets */
- u32 TotalTSPackets; /* Total number of transport-stream packets */
- u32 NumOfValidMpeTlbs; /* Number of MPE tables which do not include
- * errors after MPE RS decoding */
- u32 NumOfInvalidMpeTlbs; /* Number of MPE tables which include errors
- * after MPE RS decoding */
- u32 NumOfCorrectedMpeTlbs; /* Number of MPE tables which were corrected
- * by MPE RS decoding */
-
+ u32 BurstSize; /* Current burst size in bytes,
+ valid only for DVB-H */
+ u32 BurstDuration; /* Current burst duration in mSec,
+ valid only for DVB-H */
+ u32 BurstCycleTime; /* Current burst cycle time in mSec,
+ valid only for DVB-H */
+ u32 CalculatedBurstCycleTime;/* Current burst cycle time in mSec,
+ as calculated by demodulator, valid only for DVB-H */
+ u32 NumOfRows; /* Number of rows in MPE table,
+ valid only for DVB-H */
+ u32 NumOfPaddCols; /* Number of padding columns in MPE table,
+ valid only for DVB-H */
+ u32 NumOfPunctCols; /* Number of puncturing columns in MPE table,
+ valid only for DVB-H */
+ u32 ErrorTSPackets; /* Number of erroneous
+ transport-stream packets */
+ u32 TotalTSPackets; /* Total number of transport-stream packets */
+ u32 NumOfValidMpeTlbs; /* Number of MPE tables which do not include
+ errors after MPE RS decoding */
+ u32 NumOfInvalidMpeTlbs;/* Number of MPE tables which include errors
+ after MPE RS decoding */
+ u32 NumOfCorrectedMpeTlbs;/* Number of MPE tables which were
+ corrected by MPE RS decoding */
/* Common params */
- u32 BERErrorCount; /* Number of errornous SYNC bits. */
- u32 BERBitCount; /* Total number of SYNC bits. */
+ u32 BERErrorCount; /* Number of errornous SYNC bits. */
+ u32 BERBitCount; /* Total number of SYNC bits. */
/* Interface information */
- u32 SmsToHostTxErrors; /* Total number of transmission errors. */
+ u32 SmsToHostTxErrors; /* Total number of transmission errors. */
/* DAB/T-DMB */
- u32 PreBER; /* DAB/T-DMB only: Pre Viterbi BER [1E-5] */
+ u32 PreBER; /* DAB/T-DMB only: Pre Viterbi BER [1E-5] */
/* DVB-H TPS parameters */
- u32 CellId; /* TPS Cell ID in bits 15..0, bits 31..16 zero;
- * if set to 0xFFFFFFFF cell_id not yet recovered */
+ u32 CellId; /* TPS Cell ID in bits 15..0, bits 31..16 zero;
+ if set to 0xFFFFFFFF cell_id not yet recovered */
+ u32 DvbhSrvIndHP; /* DVB-H service indication info, bit 1 -
+ Time Slicing indicator, bit 0 - MPE-FEC indicator */
+ u32 DvbhSrvIndLP; /* DVB-H service indication info, bit 1 -
+ Time Slicing indicator, bit 0 - MPE-FEC indicator */
+ u32 NumMPEReceived; /* DVB-H, Num MPE section received */
+
+ u32 ReservedFields[10]; /* Reserved */
};
-struct SmsMsgStatisticsInfo_ST {
- u32 RequestResult;
+struct PID_STATISTICS_DATA_S {
+ struct PID_BURST_S {
+ u32 size;
+ u32 padding_cols;
+ u32 punct_cols;
+ u32 duration;
+ u32 cycle;
+ u32 calc_cycle;
+ } burst;
+
+ u32 tot_tbl_cnt;
+ u32 invalid_tbl_cnt;
+ u32 tot_cor_tbl;
+};
- struct SMSHOSTLIB_STATISTICS_ST Stat;
+struct PID_DATA_S {
+ u32 pid;
+ u32 num_rows;
+ struct PID_STATISTICS_DATA_S pid_statistics;
+};
- /* Split the calc of the SNR in DAB */
- u32 Signal; /* dB */
- u32 Noise; /* dB */
+#define CORRECT_STAT_RSSI(_stat) ((_stat).RSSI *= -1)
+#define CORRECT_STAT_BANDWIDTH(_stat) (_stat.Bandwidth = 8 - _stat.Bandwidth)
+#define CORRECT_STAT_TRANSMISSON_MODE(_stat) \
+ if (_stat.TransmissionMode == 0) \
+ _stat.TransmissionMode = 2; \
+ else if (_stat.TransmissionMode == 1) \
+ _stat.TransmissionMode = 8; \
+ else \
+ _stat.TransmissionMode = 4;
+
+struct TRANSMISSION_STATISTICS_S {
+ u32 Frequency; /* Frequency in Hz */
+ u32 Bandwidth; /* Bandwidth in MHz */
+ u32 TransmissionMode; /* FFT mode carriers in Kilos */
+ u32 GuardInterval; /* Guard Interval from
+ SMSHOSTLIB_GUARD_INTERVALS_ET */
+ u32 CodeRate; /* Code Rate from SMSHOSTLIB_CODE_RATE_ET */
+ u32 LPCodeRate; /* Low Priority Code Rate from
+ SMSHOSTLIB_CODE_RATE_ET */
+ u32 Hierarchy; /* Hierarchy from SMSHOSTLIB_HIERARCHY_ET */
+ u32 Constellation; /* Constellation from
+ SMSHOSTLIB_CONSTELLATION_ET */
+ /* DVB-H TPS parameters */
+ u32 CellId; /* TPS Cell ID in bits 15..0, bits 31..16 zero;
+ if set to 0xFFFFFFFF cell_id not yet recovered */
+ u32 DvbhSrvIndHP; /* DVB-H service indication info, bit 1 -
+ Time Slicing indicator, bit 0 - MPE-FEC indicator */
+ u32 DvbhSrvIndLP; /* DVB-H service indication info, bit 1 -
+ Time Slicing indicator, bit 0 - MPE-FEC indicator */
+ u32 IsDemodLocked; /* 0 - not locked, 1 - locked */
};
+struct RECEPTION_STATISTICS_S {
+ u32 IsRfLocked; /* 0 - not locked, 1 - locked */
+ u32 IsDemodLocked; /* 0 - not locked, 1 - locked */
+ u32 IsExternalLNAOn; /* 0 - external LNA off, 1 - external LNA on */
+
+ u32 ModemState; /* from SMSHOSTLIB_DVB_MODEM_STATE_ET */
+ s32 SNR; /* dB */
+ u32 BER; /* Post Viterbi BER [1E-5] */
+ u32 BERErrorCount; /* Number of erronous SYNC bits. */
+ u32 BERBitCount; /* Total number of SYNC bits. */
+ u32 TS_PER; /* Transport stream PER,
+ 0xFFFFFFFF indicate N/A */
+ u32 MFER; /* DVB-H frame error rate in percentage,
+ 0xFFFFFFFF indicate N/A, valid only for DVB-H */
+ s32 RSSI; /* dBm */
+ s32 InBandPwr; /* In band power in dBM */
+ s32 CarrierOffset; /* Carrier Offset in bin/1024 */
+ u32 ErrorTSPackets; /* Number of erroneous
+ transport-stream packets */
+ u32 TotalTSPackets; /* Total number of transport-stream packets */
+
+ s32 MRC_SNR; /* dB */
+ s32 MRC_RSSI; /* dBm */
+ s32 MRC_InBandPwr; /* In band power in dBM */
+};
-struct smscore_gpio_config {
+
+/* Statistics information returned as response for
+ * SmsHostApiGetStatisticsEx_Req for DVB applications, SMS1100 and up */
+struct SMSHOSTLIB_STATISTICS_DVB_S {
+ /* Reception */
+ struct RECEPTION_STATISTICS_S ReceptionData;
+
+ /* Transmission parameters */
+ struct TRANSMISSION_STATISTICS_S TransmissionData;
+
+ /* Burst parameters, valid only for DVB-H */
+#define SRVM_MAX_PID_FILTERS 8
+ struct PID_DATA_S PidData[SRVM_MAX_PID_FILTERS];
+};
+
+struct SRVM_SIGNAL_STATUS_S {
+ u32 result;
+ u32 snr;
+ u32 tsPackets;
+ u32 etsPackets;
+ u32 constellation;
+ u32 hpCode;
+ u32 tpsSrvIndLP;
+ u32 tpsSrvIndHP;
+ u32 cellId;
+ u32 reason;
+
+ s32 inBandPower;
+ u32 requestId;
+};
+
+struct SMSHOSTLIB_I2C_REQ_ST {
+ u32 DeviceAddress; /* I2c device address */
+ u32 WriteCount; /* number of bytes to write */
+ u32 ReadCount; /* number of bytes to read */
+ u8 Data[1];
+};
+
+struct SMSHOSTLIB_I2C_RES_ST {
+ u32 Status; /* non-zero value in case of failure */
+ u32 ReadCount; /* number of bytes read */
+ u8 Data[1];
+};
+
+
+struct smscore_config_gpio {
#define SMS_GPIO_DIRECTION_INPUT 0
#define SMS_GPIO_DIRECTION_OUTPUT 1
u8 direction;
@@ -369,6 +575,47 @@ struct smscore_gpio_config {
u8 outputdriving;
};
+struct smscore_gpio_config {
+#define SMS_GPIO_DIRECTION_INPUT 0
+#define SMS_GPIO_DIRECTION_OUTPUT 1
+ u8 Direction;
+
+#define SMS_GPIO_PULL_UP_DOWN_NONE 0
+#define SMS_GPIO_PULL_UP_DOWN_PULLDOWN 1
+#define SMS_GPIO_PULL_UP_DOWN_PULLUP 2
+#define SMS_GPIO_PULL_UP_DOWN_KEEPER 3
+ u8 PullUpDown;
+
+#define SMS_GPIO_INPUT_CHARACTERISTICS_NORMAL 0
+#define SMS_GPIO_INPUT_CHARACTERISTICS_SCHMITT 1
+ u8 InputCharacteristics;
+
+#define SMS_GPIO_OUTPUT_SLEW_RATE_SLOW 1 /* 10xx */
+#define SMS_GPIO_OUTPUT_SLEW_RATE_FAST 0 /* 10xx */
+
+
+#define SMS_GPIO_OUTPUT_SLEW_RATE_0_45_V_NS 0 /* 11xx */
+#define SMS_GPIO_OUTPUT_SLEW_RATE_0_9_V_NS 1 /* 11xx */
+#define SMS_GPIO_OUTPUT_SLEW_RATE_1_7_V_NS 2 /* 11xx */
+#define SMS_GPIO_OUTPUT_SLEW_RATE_3_3_V_NS 3 /* 11xx */
+ u8 OutputSlewRate;
+
+#define SMS_GPIO_OUTPUT_DRIVING_S_4mA 0 /* 10xx */
+#define SMS_GPIO_OUTPUT_DRIVING_S_8mA 1 /* 10xx */
+#define SMS_GPIO_OUTPUT_DRIVING_S_12mA 2 /* 10xx */
+#define SMS_GPIO_OUTPUT_DRIVING_S_16mA 3 /* 10xx */
+
+#define SMS_GPIO_OUTPUT_DRIVING_1_5mA 0 /* 11xx */
+#define SMS_GPIO_OUTPUT_DRIVING_2_8mA 1 /* 11xx */
+#define SMS_GPIO_OUTPUT_DRIVING_4mA 2 /* 11xx */
+#define SMS_GPIO_OUTPUT_DRIVING_7mA 3 /* 11xx */
+#define SMS_GPIO_OUTPUT_DRIVING_10mA 4 /* 11xx */
+#define SMS_GPIO_OUTPUT_DRIVING_11mA 5 /* 11xx */
+#define SMS_GPIO_OUTPUT_DRIVING_14mA 6 /* 11xx */
+#define SMS_GPIO_OUTPUT_DRIVING_16mA 7 /* 11xx */
+ u8 OutputDriving;
+};
+
extern void smscore_registry_setmode(char *devpath, int mode);
extern int smscore_registry_getmode(char *devpath);
@@ -410,10 +657,19 @@ struct smscore_buffer_t *smscore_getbuffer(struct smscore_device_t *coredev);
extern void smscore_putbuffer(struct smscore_device_t *coredev,
struct smscore_buffer_t *cb);
+/* old GPIO managment */
int smscore_configure_gpio(struct smscore_device_t *coredev, u32 pin,
- struct smscore_gpio_config *pinconfig);
+ struct smscore_config_gpio *pinconfig);
int smscore_set_gpio(struct smscore_device_t *coredev, u32 pin, int level);
+/* new GPIO managment */
+extern int smscore_gpio_configure(struct smscore_device_t *coredev, u8 PinNum,
+ struct smscore_gpio_config *pGpioConfig);
+extern int smscore_gpio_set_level(struct smscore_device_t *coredev, u8 PinNum,
+ u8 NewLevel);
+extern int smscore_gpio_get_level(struct smscore_device_t *coredev, u8 PinNum,
+ u8 *level);
+
void smscore_set_board_id(struct smscore_device_t *core, int id);
int smscore_get_board_id(struct smscore_device_t *core);
@@ -442,4 +698,4 @@ int smscore_led_state(struct smscore_device_t *core, int led);
dprintk(KERN_DEBUG, DBG_ADV, fmt, ##arg)
-#endif /* __smscoreapi_h__ */
+#endif /* __SMS_CORE_API_H__ */
diff --git a/drivers/media/dvb/siano/smsdvb.c b/drivers/media/dvb/siano/smsdvb.c
index ba080b95bef..3ee1c3902c5 100644
--- a/drivers/media/dvb/siano/smsdvb.c
+++ b/drivers/media/dvb/siano/smsdvb.c
@@ -1,28 +1,34 @@
-/*
- * Driver for the Siano SMS1xxx USB dongle
- *
- * Author: Uri Shkolni
- *
- * Copyright (c), 2005-2008 Siano Mobile Silicon, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation;
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
- *
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
+/****************************************************************
+
+Siano Mobile Silicon, Inc.
+MDTV receiver kernel modules.
+Copyright (C) 2006-2008, Uri Shkolnik
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 2 of the License, or
+(at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+****************************************************************/
#include <linux/module.h>
#include <linux/init.h>
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+
#include "smscoreapi.h"
+#include "smsendian.h"
#include "sms-cards.h"
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
@@ -39,12 +45,15 @@ struct smsdvb_client_t {
struct dvb_frontend frontend;
fe_status_t fe_status;
- int fe_ber, fe_snr, fe_unc, fe_signal_strength;
- struct completion tune_done, stat_done;
+ struct completion tune_done;
/* todo: save freq/band instead whole struct */
struct dvb_frontend_parameters fe_params;
+
+ struct SMSHOSTLIB_STATISTICS_DVB_S sms_stat_dvb;
+ int event_fe_state;
+ int event_unc_state;
};
static struct list_head g_smsdvb_clients;
@@ -54,11 +63,69 @@ static int sms_dbg;
module_param_named(debug, sms_dbg, int, 0644);
MODULE_PARM_DESC(debug, "set debug level (info=1, adv=2 (or-able))");
+/* Events that may come from DVB v3 adapter */
+static void sms_board_dvb3_event(struct smsdvb_client_t *client,
+ enum SMS_DVB3_EVENTS event) {
+
+ struct smscore_device_t *coredev = client->coredev;
+ switch (event) {
+ case DVB3_EVENT_INIT:
+ sms_debug("DVB3_EVENT_INIT");
+ sms_board_event(coredev, BOARD_EVENT_BIND);
+ break;
+ case DVB3_EVENT_SLEEP:
+ sms_debug("DVB3_EVENT_SLEEP");
+ sms_board_event(coredev, BOARD_EVENT_POWER_SUSPEND);
+ break;
+ case DVB3_EVENT_HOTPLUG:
+ sms_debug("DVB3_EVENT_HOTPLUG");
+ sms_board_event(coredev, BOARD_EVENT_POWER_INIT);
+ break;
+ case DVB3_EVENT_FE_LOCK:
+ if (client->event_fe_state != DVB3_EVENT_FE_LOCK) {
+ client->event_fe_state = DVB3_EVENT_FE_LOCK;
+ sms_debug("DVB3_EVENT_FE_LOCK");
+ sms_board_event(coredev, BOARD_EVENT_FE_LOCK);
+ }
+ break;
+ case DVB3_EVENT_FE_UNLOCK:
+ if (client->event_fe_state != DVB3_EVENT_FE_UNLOCK) {
+ client->event_fe_state = DVB3_EVENT_FE_UNLOCK;
+ sms_debug("DVB3_EVENT_FE_UNLOCK");
+ sms_board_event(coredev, BOARD_EVENT_FE_UNLOCK);
+ }
+ break;
+ case DVB3_EVENT_UNC_OK:
+ if (client->event_unc_state != DVB3_EVENT_UNC_OK) {
+ client->event_unc_state = DVB3_EVENT_UNC_OK;
+ sms_debug("DVB3_EVENT_UNC_OK");
+ sms_board_event(coredev, BOARD_EVENT_MULTIPLEX_OK);
+ }
+ break;
+ case DVB3_EVENT_UNC_ERR:
+ if (client->event_unc_state != DVB3_EVENT_UNC_ERR) {
+ client->event_unc_state = DVB3_EVENT_UNC_ERR;
+ sms_debug("DVB3_EVENT_UNC_ERR");
+ sms_board_event(coredev, BOARD_EVENT_MULTIPLEX_ERRORS);
+ }
+ break;
+
+ default:
+ sms_err("Unknown dvb3 api event");
+ break;
+ }
+}
+
static int smsdvb_onresponse(void *context, struct smscore_buffer_t *cb)
{
struct smsdvb_client_t *client = (struct smsdvb_client_t *) context;
- struct SmsMsgHdr_ST *phdr =
- (struct SmsMsgHdr_ST *)(((u8 *) cb->p) + cb->offset);
+ struct SmsMsgHdr_ST *phdr = (struct SmsMsgHdr_ST *) (((u8 *) cb->p)
+ + cb->offset);
+ u32 *pMsgData = (u32 *) phdr + 1;
+ /*u32 MsgDataLen = phdr->msgLength - sizeof(struct SmsMsgHdr_ST);*/
+ bool is_status_update = false;
+
+ smsendian_handle_rx_message((struct SmsMsgData_ST *) phdr);
switch (phdr->msgType) {
case MSG_SMS_DVBT_BDA_DATA:
@@ -70,43 +137,110 @@ static int smsdvb_onresponse(void *context, struct smscore_buffer_t *cb)
complete(&client->tune_done);
break;
- case MSG_SMS_GET_STATISTICS_RES:
- {
- struct SmsMsgStatisticsInfo_ST *p =
- (struct SmsMsgStatisticsInfo_ST *)(phdr + 1);
-
- if (p->Stat.IsDemodLocked) {
- client->fe_status = FE_HAS_SIGNAL |
- FE_HAS_CARRIER |
- FE_HAS_VITERBI |
- FE_HAS_SYNC |
- FE_HAS_LOCK;
-
- client->fe_snr = p->Stat.SNR;
- client->fe_ber = p->Stat.BER;
- client->fe_unc = p->Stat.BERErrorCount;
-
- if (p->Stat.InBandPwr < -95)
- client->fe_signal_strength = 0;
- else if (p->Stat.InBandPwr > -29)
- client->fe_signal_strength = 100;
- else
- client->fe_signal_strength =
- (p->Stat.InBandPwr + 95) * 3 / 2;
+ case MSG_SMS_SIGNAL_DETECTED_IND:
+ sms_info("MSG_SMS_SIGNAL_DETECTED_IND");
+ client->sms_stat_dvb.TransmissionData.IsDemodLocked = true;
+ is_status_update = true;
+ break;
+
+ case MSG_SMS_NO_SIGNAL_IND:
+ sms_info("MSG_SMS_NO_SIGNAL_IND");
+ client->sms_stat_dvb.TransmissionData.IsDemodLocked = false;
+ is_status_update = true;
+ break;
+
+ case MSG_SMS_TRANSMISSION_IND: {
+ sms_info("MSG_SMS_TRANSMISSION_IND");
+
+ pMsgData++;
+ memcpy(&client->sms_stat_dvb.TransmissionData, pMsgData,
+ sizeof(struct TRANSMISSION_STATISTICS_S));
+
+ /* Mo need to correct guard interval
+ * (as opposed to old statistics message).
+ */
+ CORRECT_STAT_BANDWIDTH(client->sms_stat_dvb.TransmissionData);
+ CORRECT_STAT_TRANSMISSON_MODE(
+ client->sms_stat_dvb.TransmissionData);
+ is_status_update = true;
+ break;
+ }
+ case MSG_SMS_HO_PER_SLICES_IND: {
+ struct RECEPTION_STATISTICS_S *pReceptionData =
+ &client->sms_stat_dvb.ReceptionData;
+ struct SRVM_SIGNAL_STATUS_S SignalStatusData;
+
+ /*sms_info("MSG_SMS_HO_PER_SLICES_IND");*/
+ pMsgData++;
+ SignalStatusData.result = pMsgData[0];
+ SignalStatusData.snr = pMsgData[1];
+ SignalStatusData.inBandPower = (s32) pMsgData[2];
+ SignalStatusData.tsPackets = pMsgData[3];
+ SignalStatusData.etsPackets = pMsgData[4];
+ SignalStatusData.constellation = pMsgData[5];
+ SignalStatusData.hpCode = pMsgData[6];
+ SignalStatusData.tpsSrvIndLP = pMsgData[7] & 0x03;
+ SignalStatusData.tpsSrvIndHP = pMsgData[8] & 0x03;
+ SignalStatusData.cellId = pMsgData[9] & 0xFFFF;
+ SignalStatusData.reason = pMsgData[10];
+ SignalStatusData.requestId = pMsgData[11];
+ pReceptionData->IsRfLocked = pMsgData[16];
+ pReceptionData->IsDemodLocked = pMsgData[17];
+ pReceptionData->ModemState = pMsgData[12];
+ pReceptionData->SNR = pMsgData[1];
+ pReceptionData->BER = pMsgData[13];
+ pReceptionData->RSSI = pMsgData[14];
+ CORRECT_STAT_RSSI(client->sms_stat_dvb.ReceptionData);
+
+ pReceptionData->InBandPwr = (s32) pMsgData[2];
+ pReceptionData->CarrierOffset = (s32) pMsgData[15];
+ pReceptionData->TotalTSPackets = pMsgData[3];
+ pReceptionData->ErrorTSPackets = pMsgData[4];
+
+ /* TS PER */
+ if ((SignalStatusData.tsPackets + SignalStatusData.etsPackets)
+ > 0) {
+ pReceptionData->TS_PER = (SignalStatusData.etsPackets
+ * 100) / (SignalStatusData.tsPackets
+ + SignalStatusData.etsPackets);
} else {
- client->fe_status = 0;
- client->fe_snr =
- client->fe_ber =
- client->fe_unc =
- client->fe_signal_strength = 0;
+ pReceptionData->TS_PER = 0;
}
- complete(&client->stat_done);
- break;
- } }
+ pReceptionData->BERBitCount = pMsgData[18];
+ pReceptionData->BERErrorCount = pMsgData[19];
+ pReceptionData->MRC_SNR = pMsgData[20];
+ pReceptionData->MRC_InBandPwr = pMsgData[21];
+ pReceptionData->MRC_RSSI = pMsgData[22];
+
+ is_status_update = true;
+ break;
+ }
+ }
smscore_putbuffer(client->coredev, cb);
+ if (is_status_update) {
+ if (client->sms_stat_dvb.ReceptionData.IsDemodLocked) {
+ client->fe_status = FE_HAS_SIGNAL | FE_HAS_CARRIER
+ | FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
+ sms_board_dvb3_event(client, DVB3_EVENT_FE_LOCK);
+ if (client->sms_stat_dvb.ReceptionData.ErrorTSPackets
+ == 0)
+ sms_board_dvb3_event(client, DVB3_EVENT_UNC_OK);
+ else
+ sms_board_dvb3_event(client,
+ DVB3_EVENT_UNC_ERR);
+
+ } else {
+ /*client->fe_status =
+ (phdr->msgType == MSG_SMS_NO_SIGNAL_IND) ?
+ 0 : FE_HAS_SIGNAL;*/
+ client->fe_status = 0;
+ sms_board_dvb3_event(client, DVB3_EVENT_FE_UNLOCK);
+ }
+ }
+
return 0;
}
@@ -149,6 +283,7 @@ static int smsdvb_start_feed(struct dvb_demux_feed *feed)
PidMsg.xMsgHeader.msgLength = sizeof(PidMsg);
PidMsg.msgData[0] = feed->pid;
+ smsendian_handle_tx_message((struct SmsMsgHdr_ST *)&PidMsg);
return smsclient_sendrequest(client->smsclient,
&PidMsg, sizeof(PidMsg));
}
@@ -169,6 +304,7 @@ static int smsdvb_stop_feed(struct dvb_demux_feed *feed)
PidMsg.xMsgHeader.msgLength = sizeof(PidMsg);
PidMsg.msgData[0] = feed->pid;
+ smsendian_handle_tx_message((struct SmsMsgHdr_ST *)&PidMsg);
return smsclient_sendrequest(client->smsclient,
&PidMsg, sizeof(PidMsg));
}
@@ -177,7 +313,10 @@ static int smsdvb_sendrequest_and_wait(struct smsdvb_client_t *client,
void *buffer, size_t size,
struct completion *completion)
{
- int rc = smsclient_sendrequest(client->smsclient, buffer, size);
+ int rc;
+
+ smsendian_handle_tx_message((struct SmsMsgHdr_ST *)buffer);
+ rc = smsclient_sendrequest(client->smsclient, buffer, size);
if (rc < 0)
return rc;
@@ -186,83 +325,61 @@ static int smsdvb_sendrequest_and_wait(struct smsdvb_client_t *client,
0 : -ETIME;
}
-static int smsdvb_send_statistics_request(struct smsdvb_client_t *client)
-{
- struct SmsMsgHdr_ST Msg = { MSG_SMS_GET_STATISTICS_REQ,
- DVBT_BDA_CONTROL_MSG_ID,
- HIF_TASK, sizeof(struct SmsMsgHdr_ST), 0 };
- int ret = smsdvb_sendrequest_and_wait(client, &Msg, sizeof(Msg),
- &client->stat_done);
- if (ret < 0)
- return ret;
-
- if (client->fe_status & FE_HAS_LOCK)
- sms_board_led_feedback(client->coredev,
- (client->fe_unc == 0) ?
- SMS_LED_HI : SMS_LED_LO);
- else
- sms_board_led_feedback(client->coredev, SMS_LED_OFF);
- return ret;
-}
-
static int smsdvb_read_status(struct dvb_frontend *fe, fe_status_t *stat)
{
- struct smsdvb_client_t *client =
- container_of(fe, struct smsdvb_client_t, frontend);
- int rc = smsdvb_send_statistics_request(client);
+ struct smsdvb_client_t *client;
+ client = container_of(fe, struct smsdvb_client_t, frontend);
- if (!rc)
- *stat = client->fe_status;
+ *stat = client->fe_status;
- return rc;
+ return 0;
}
static int smsdvb_read_ber(struct dvb_frontend *fe, u32 *ber)
{
- struct smsdvb_client_t *client =
- container_of(fe, struct smsdvb_client_t, frontend);
- int rc = smsdvb_send_statistics_request(client);
+ struct smsdvb_client_t *client;
+ client = container_of(fe, struct smsdvb_client_t, frontend);
- if (!rc)
- *ber = client->fe_ber;
+ *ber = client->sms_stat_dvb.ReceptionData.BER;
- return rc;
+ return 0;
}
static int smsdvb_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
{
- struct smsdvb_client_t *client =
- container_of(fe, struct smsdvb_client_t, frontend);
- int rc = smsdvb_send_statistics_request(client);
+ struct smsdvb_client_t *client;
+ client = container_of(fe, struct smsdvb_client_t, frontend);
- if (!rc)
- *strength = client->fe_signal_strength;
+ if (client->sms_stat_dvb.ReceptionData.InBandPwr < -95)
+ *strength = 0;
+ else if (client->sms_stat_dvb.ReceptionData.InBandPwr > -29)
+ *strength = 100;
+ else
+ *strength =
+ (client->sms_stat_dvb.ReceptionData.InBandPwr
+ + 95) * 3 / 2;
- return rc;
+ return 0;
}
static int smsdvb_read_snr(struct dvb_frontend *fe, u16 *snr)
{
- struct smsdvb_client_t *client =
- container_of(fe, struct smsdvb_client_t, frontend);
- int rc = smsdvb_send_statistics_request(client);
+ struct smsdvb_client_t *client;
+ client = container_of(fe, struct smsdvb_client_t, frontend);
- if (!rc)
- *snr = client->fe_snr;
+ *snr = client->sms_stat_dvb.ReceptionData.SNR;
- return rc;
+ return 0;
}
static int smsdvb_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
{
- struct smsdvb_client_t *client =
- container_of(fe, struct smsdvb_client_t, frontend);
- int rc = smsdvb_send_statistics_request(client);
+ struct smsdvb_client_t *client;
+ client = container_of(fe, struct smsdvb_client_t, frontend);
- if (!rc)
- *ucblocks = client->fe_unc;
+ *ucblocks = client->sms_stat_dvb.ReceptionData.ErrorTSPackets;
- return rc;
+ return 0;
}
static int smsdvb_get_tune_settings(struct dvb_frontend *fe,
@@ -286,12 +403,15 @@ static int smsdvb_set_frontend(struct dvb_frontend *fe,
struct SmsMsgHdr_ST Msg;
u32 Data[3];
} Msg;
- int ret;
- Msg.Msg.msgSrcId = DVBT_BDA_CONTROL_MSG_ID;
- Msg.Msg.msgDstId = HIF_TASK;
- Msg.Msg.msgFlags = 0;
- Msg.Msg.msgType = MSG_SMS_RF_TUNE_REQ;
+ client->fe_status = FE_HAS_SIGNAL;
+ client->event_fe_state = -1;
+ client->event_unc_state = -1;
+
+ Msg.Msg.msgSrcId = DVBT_BDA_CONTROL_MSG_ID;
+ Msg.Msg.msgDstId = HIF_TASK;
+ Msg.Msg.msgFlags = 0;
+ Msg.Msg.msgType = MSG_SMS_RF_TUNE_REQ;
Msg.Msg.msgLength = sizeof(Msg);
Msg.Data[0] = fep->frequency;
Msg.Data[2] = 12000000;
@@ -307,24 +427,6 @@ static int smsdvb_set_frontend(struct dvb_frontend *fe,
default: return -EINVAL;
}
- /* Disable LNA, if any. An error is returned if no LNA is present */
- ret = sms_board_lna_control(client->coredev, 0);
- if (ret == 0) {
- fe_status_t status;
-
- /* tune with LNA off at first */
- ret = smsdvb_sendrequest_and_wait(client, &Msg, sizeof(Msg),
- &client->tune_done);
-
- smsdvb_read_status(fe, &status);
-
- if (status & FE_HAS_LOCK)
- return ret;
-
- /* previous tune didnt lock - enable LNA and tune again */
- sms_board_lna_control(client->coredev, 1);
- }
-
return smsdvb_sendrequest_and_wait(client, &Msg, sizeof(Msg),
&client->tune_done);
}
@@ -349,8 +451,7 @@ static int smsdvb_init(struct dvb_frontend *fe)
struct smsdvb_client_t *client =
container_of(fe, struct smsdvb_client_t, frontend);
- sms_board_power(client->coredev, 1);
-
+ sms_board_dvb3_event(client, DVB3_EVENT_INIT);
return 0;
}
@@ -359,8 +460,7 @@ static int smsdvb_sleep(struct dvb_frontend *fe)
struct smsdvb_client_t *client =
container_of(fe, struct smsdvb_client_t, frontend);
- sms_board_led_feedback(client->coredev, SMS_LED_OFF);
- sms_board_power(client->coredev, 0);
+ sms_board_dvb3_event(client, DVB3_EVENT_SLEEP);
return 0;
}
@@ -485,7 +585,6 @@ static int smsdvb_hotplug(struct smscore_device_t *coredev,
client->coredev = coredev;
init_completion(&client->tune_done);
- init_completion(&client->stat_done);
kmutex_lock(&g_smsdvb_clientslock);
@@ -493,8 +592,11 @@ static int smsdvb_hotplug(struct smscore_device_t *coredev,
kmutex_unlock(&g_smsdvb_clientslock);
- sms_info("success");
+ client->event_fe_state = -1;
+ client->event_unc_state = -1;
+ sms_board_dvb3_event(client, DVB3_EVENT_HOTPLUG);
+ sms_info("success");
sms_board_setup(coredev);
return 0;
@@ -547,5 +649,5 @@ module_init(smsdvb_module_init);
module_exit(smsdvb_module_exit);
MODULE_DESCRIPTION("SMS DVB subsystem adaptation module");
-MODULE_AUTHOR("Siano Mobile Silicon, INC. (uris@siano-ms.com)");
+MODULE_AUTHOR("Siano Mobile Silicon, Inc. (uris@siano-ms.com)");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/siano/smsendian.c b/drivers/media/dvb/siano/smsendian.c
new file mode 100644
index 00000000000..457b6d02ef8
--- /dev/null
+++ b/drivers/media/dvb/siano/smsendian.c
@@ -0,0 +1,102 @@
+/****************************************************************
+
+ Siano Mobile Silicon, Inc.
+ MDTV receiver kernel modules.
+ Copyright (C) 2006-2009, Uri Shkolnik
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+ ****************************************************************/
+
+#include <asm/byteorder.h>
+
+#include "smsendian.h"
+#include "smscoreapi.h"
+
+void smsendian_handle_tx_message(void *buffer)
+{
+#ifdef __BIG_ENDIAN
+ struct SmsMsgData_ST *msg = (struct SmsMsgData_ST *)buffer;
+ int i;
+ int msgWords;
+
+ switch (msg->xMsgHeader.msgType) {
+ case MSG_SMS_DATA_DOWNLOAD_REQ:
+ {
+ msg->msgData[0] = le32_to_cpu(msg->msgData[0]);
+ break;
+ }
+
+ default:
+ msgWords = (msg->xMsgHeader.msgLength -
+ sizeof(struct SmsMsgHdr_ST))/4;
+
+ for (i = 0; i < msgWords; i++)
+ msg->msgData[i] = le32_to_cpu(msg->msgData[i]);
+
+ break;
+ }
+#endif /* __BIG_ENDIAN */
+}
+EXPORT_SYMBOL_GPL(smsendian_handle_tx_message);
+
+void smsendian_handle_rx_message(void *buffer)
+{
+#ifdef __BIG_ENDIAN
+ struct SmsMsgData_ST *msg = (struct SmsMsgData_ST *)buffer;
+ int i;
+ int msgWords;
+
+ switch (msg->xMsgHeader.msgType) {
+ case MSG_SMS_GET_VERSION_EX_RES:
+ {
+ struct SmsVersionRes_ST *ver =
+ (struct SmsVersionRes_ST *) msg;
+ ver->ChipModel = le16_to_cpu(ver->ChipModel);
+ break;
+ }
+
+ case MSG_SMS_DVBT_BDA_DATA:
+ case MSG_SMS_DAB_CHANNEL:
+ case MSG_SMS_DATA_MSG:
+ {
+ break;
+ }
+
+ default:
+ {
+ msgWords = (msg->xMsgHeader.msgLength -
+ sizeof(struct SmsMsgHdr_ST))/4;
+
+ for (i = 0; i < msgWords; i++)
+ msg->msgData[i] = le32_to_cpu(msg->msgData[i]);
+
+ break;
+ }
+ }
+#endif /* __BIG_ENDIAN */
+}
+EXPORT_SYMBOL_GPL(smsendian_handle_rx_message);
+
+void smsendian_handle_message_header(void *msg)
+{
+#ifdef __BIG_ENDIAN
+ struct SmsMsgHdr_ST *phdr = (struct SmsMsgHdr_ST *)msg;
+
+ phdr->msgType = le16_to_cpu(phdr->msgType);
+ phdr->msgLength = le16_to_cpu(phdr->msgLength);
+ phdr->msgFlags = le16_to_cpu(phdr->msgFlags);
+#endif /* __BIG_ENDIAN */
+}
+EXPORT_SYMBOL_GPL(smsendian_handle_message_header);
diff --git a/drivers/media/dvb/siano/smsendian.h b/drivers/media/dvb/siano/smsendian.h
new file mode 100644
index 00000000000..1624d6fd367
--- /dev/null
+++ b/drivers/media/dvb/siano/smsendian.h
@@ -0,0 +1,32 @@
+/****************************************************************
+
+Siano Mobile Silicon, Inc.
+MDTV receiver kernel modules.
+Copyright (C) 2006-2009, Uri Shkolnik
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 2 of the License, or
+(at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+****************************************************************/
+
+#ifndef __SMS_ENDIAN_H__
+#define __SMS_ENDIAN_H__
+
+#include <asm/byteorder.h>
+
+extern void smsendian_handle_tx_message(void *buffer);
+extern void smsendian_handle_rx_message(void *buffer);
+extern void smsendian_handle_message_header(void *msg);
+
+#endif /* __SMS_ENDIAN_H__ */
+
diff --git a/drivers/media/dvb/siano/smsir.c b/drivers/media/dvb/siano/smsir.c
new file mode 100644
index 00000000000..e3d776feeac
--- /dev/null
+++ b/drivers/media/dvb/siano/smsir.c
@@ -0,0 +1,301 @@
+/****************************************************************
+
+ Siano Mobile Silicon, Inc.
+ MDTV receiver kernel modules.
+ Copyright (C) 2006-2009, Uri Shkolnik
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+ ****************************************************************/
+
+
+#include <linux/types.h>
+#include <linux/input.h>
+
+#include "smscoreapi.h"
+#include "smsir.h"
+#include "sms-cards.h"
+
+/* In order to add new IR remote control -
+ * 1) Add it to the <enum ir_kb_type> @ smsir,h,
+ * 2) Add its map to keyboard_layout_maps below
+ * 3) Set your board (sms-cards sub-module) to use it
+ */
+
+static struct keyboard_layout_map_t keyboard_layout_maps[] = {
+ [SMS_IR_KB_DEFAULT_TV] = {
+ .ir_protocol = IR_RC5,
+ .rc5_kbd_address = KEYBOARD_ADDRESS_TV1,
+ .keyboard_layout_map = {
+ KEY_0, KEY_1, KEY_2,
+ KEY_3, KEY_4, KEY_5,
+ KEY_6, KEY_7, KEY_8,
+ KEY_9, 0, 0, KEY_POWER,
+ KEY_MUTE, 0, 0,
+ KEY_VOLUMEUP, KEY_VOLUMEDOWN,
+ KEY_BRIGHTNESSUP,
+ KEY_BRIGHTNESSDOWN, KEY_CHANNELUP,
+ KEY_CHANNELDOWN,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ }
+ },
+ [SMS_IR_KB_HCW_SILVER] = {
+ .ir_protocol = IR_RC5,
+ .rc5_kbd_address = KEYBOARD_ADDRESS_LIGHTING1,
+ .keyboard_layout_map = {
+ KEY_0, KEY_1, KEY_2,
+ KEY_3, KEY_4, KEY_5,
+ KEY_6, KEY_7, KEY_8,
+ KEY_9, KEY_TEXT, KEY_RED,
+ KEY_RADIO, KEY_MENU,
+ KEY_SUBTITLE,
+ KEY_MUTE, KEY_VOLUMEUP,
+ KEY_VOLUMEDOWN, KEY_PREVIOUS, 0,
+ KEY_UP, KEY_DOWN, KEY_LEFT,
+ KEY_RIGHT, KEY_VIDEO, KEY_AUDIO,
+ KEY_MHP, KEY_EPG, KEY_TV,
+ 0, KEY_NEXTSONG, KEY_EXIT,
+ KEY_CHANNELUP, KEY_CHANNELDOWN,
+ KEY_CHANNEL, 0,
+ KEY_PREVIOUSSONG, KEY_ENTER,
+ KEY_SLEEP, 0, 0, KEY_BLUE,
+ 0, 0, 0, 0, KEY_GREEN, 0,
+ KEY_PAUSE, 0, KEY_REWIND,
+ 0, KEY_FASTFORWARD, KEY_PLAY,
+ KEY_STOP, KEY_RECORD,
+ KEY_YELLOW, 0, 0, KEY_SELECT,
+ KEY_ZOOM, KEY_POWER, 0, 0
+ }
+ },
+ { } /* Terminating entry */
+};
+
+u32 ir_pos;
+u32 ir_word;
+u32 ir_toggle;
+
+#define RC5_PUSH_BIT(dst, bit, pos) \
+ { dst <<= 1; dst |= bit; pos++; }
+
+
+static void sms_ir_rc5_event(struct smscore_device_t *coredev,
+ u32 toggle, u32 addr, u32 cmd)
+{
+ bool toggle_changed;
+ u16 keycode;
+
+ sms_log("IR RC5 word: address %d, command %d, toggle %d",
+ addr, cmd, toggle);
+
+ toggle_changed = ir_toggle != toggle;
+ /* keep toggle */
+ ir_toggle = toggle;
+
+ if (addr !=
+ keyboard_layout_maps[coredev->ir.ir_kb_type].rc5_kbd_address)
+ return; /* Check for valid address */
+
+ keycode =
+ keyboard_layout_maps
+ [coredev->ir.ir_kb_type].keyboard_layout_map[cmd];
+
+ if (!toggle_changed &&
+ (keycode != KEY_VOLUMEUP && keycode != KEY_VOLUMEDOWN))
+ return; /* accept only repeated volume, reject other keys */
+
+ sms_log("kernel input keycode (from ir) %d", keycode);
+ input_report_key(coredev->ir.input_dev, keycode, 1);
+ input_sync(coredev->ir.input_dev);
+
+}
+
+/* decode raw bit pattern to RC5 code */
+/* taken from ir-functions.c */
+static u32 ir_rc5_decode(unsigned int code)
+{
+/* unsigned int org_code = code;*/
+ unsigned int pair;
+ unsigned int rc5 = 0;
+ int i;
+
+ for (i = 0; i < 14; ++i) {
+ pair = code & 0x3;
+ code >>= 2;
+
+ rc5 <<= 1;
+ switch (pair) {
+ case 0:
+ case 2:
+ break;
+ case 1:
+ rc5 |= 1;
+ break;
+ case 3:
+/* dprintk(1, "ir-common: ir_rc5_decode(%x) bad code\n", org_code);*/
+ sms_log("bad code");
+ return 0;
+ }
+ }
+/*
+ dprintk(1, "ir-common: code=%x, rc5=%x, start=%x,
+ toggle=%x, address=%x, "
+ "instr=%x\n", rc5, org_code, RC5_START(rc5),
+ RC5_TOGGLE(rc5), RC5_ADDR(rc5), RC5_INSTR(rc5));
+*/
+ return rc5;
+}
+
+static void sms_rc5_parse_word(struct smscore_device_t *coredev)
+{
+ #define RC5_START(x) (((x)>>12)&3)
+ #define RC5_TOGGLE(x) (((x)>>11)&1)
+ #define RC5_ADDR(x) (((x)>>6)&0x1F)
+ #define RC5_INSTR(x) ((x)&0x3F)
+
+ int i, j;
+ u32 rc5_word = 0;
+
+ /* Reverse the IR word direction */
+ for (i = 0 ; i < 28 ; i++)
+ RC5_PUSH_BIT(rc5_word, (ir_word>>i)&1, j)
+
+ rc5_word = ir_rc5_decode(rc5_word);
+ /* sms_log("temp = 0x%x, rc5_code = 0x%x", ir_word, rc5_word); */
+
+ sms_ir_rc5_event(coredev,
+ RC5_TOGGLE(rc5_word),
+ RC5_ADDR(rc5_word),
+ RC5_INSTR(rc5_word));
+}
+
+
+static void sms_rc5_accumulate_bits(struct smscore_device_t *coredev,
+ s32 ir_sample)
+{
+ #define RC5_TIME_GRANULARITY 200
+ #define RC5_DEF_BIT_TIME 889
+ #define RC5_MAX_SAME_BIT_CONT 4
+ #define RC5_WORD_LEN 27 /* 28 bit */
+
+ u32 i, j;
+ s32 delta_time;
+ u32 time = (ir_sample > 0) ? ir_sample : (0-ir_sample);
+ u32 level = (ir_sample < 0) ? 0 : 1;
+
+ for (i = RC5_MAX_SAME_BIT_CONT; i > 0; i--) {
+ delta_time = time - (i*RC5_DEF_BIT_TIME) + RC5_TIME_GRANULARITY;
+ if (delta_time < 0)
+ continue; /* not so many consecutive bits */
+ if (delta_time > (2 * RC5_TIME_GRANULARITY)) {
+ /* timeout */
+ if (ir_pos == (RC5_WORD_LEN-1))
+ /* complete last bit */
+ RC5_PUSH_BIT(ir_word, level, ir_pos)
+
+ if (ir_pos == RC5_WORD_LEN)
+ sms_rc5_parse_word(coredev);
+ else if (ir_pos) /* timeout within a word */
+ sms_log("IR error parsing a word");
+
+ ir_pos = 0;
+ ir_word = 0;
+ /* sms_log("timeout %d", time); */
+ break;
+ }
+ /* The time is within the range of this number of bits */
+ for (j = 0 ; j < i ; j++)
+ RC5_PUSH_BIT(ir_word, level, ir_pos)
+
+ break;
+ }
+}
+
+void sms_ir_event(struct smscore_device_t *coredev, const char *buf, int len)
+{
+ #define IR_DATA_RECEIVE_MAX_LEN 520 /* 128*4 + 4 + 4 */
+ u32 i;
+ enum ir_protocol ir_protocol =
+ keyboard_layout_maps[coredev->ir.ir_kb_type]
+ .ir_protocol;
+ s32 *samples;
+ int count = len>>2;
+
+ samples = (s32 *)buf;
+/* sms_log("IR buffer received, length = %d", count);*/
+
+ for (i = 0; i < count; i++)
+ if (ir_protocol == IR_RC5)
+ sms_rc5_accumulate_bits(coredev, samples[i]);
+ /* IR_RCMM not implemented */
+}
+
+int sms_ir_init(struct smscore_device_t *coredev)
+{
+ struct input_dev *input_dev;
+
+ sms_log("Allocating input device");
+ input_dev = input_allocate_device();
+ if (!input_dev) {
+ sms_err("Not enough memory");
+ return -ENOMEM;
+ }
+
+ coredev->ir.input_dev = input_dev;
+ coredev->ir.ir_kb_type =
+ sms_get_board(smscore_get_board_id(coredev))->ir_kb_type;
+ coredev->ir.keyboard_layout_map =
+ keyboard_layout_maps[coredev->ir.ir_kb_type].
+ keyboard_layout_map;
+ sms_log("IR remote keyboard type is %d", coredev->ir.ir_kb_type);
+
+ coredev->ir.controller = 0; /* Todo: vega/nova SPI number */
+ coredev->ir.timeout = IR_DEFAULT_TIMEOUT;
+ sms_log("IR port %d, timeout %d ms",
+ coredev->ir.controller, coredev->ir.timeout);
+
+ snprintf(coredev->ir.name,
+ IR_DEV_NAME_MAX_LEN,
+ "SMS IR w/kbd type %d",
+ coredev->ir.ir_kb_type);
+ input_dev->name = coredev->ir.name;
+ input_dev->phys = coredev->ir.name;
+ input_dev->dev.parent = coredev->device;
+
+ /* Key press events only */
+ input_dev->evbit[0] = BIT_MASK(EV_KEY);
+ input_dev->keybit[BIT_WORD(BTN_0)] = BIT_MASK(BTN_0);
+
+ sms_log("Input device (IR) %s is set for key events", input_dev->name);
+
+ if (input_register_device(input_dev)) {
+ sms_err("Failed to register device");
+ input_free_device(input_dev);
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+void sms_ir_exit(struct smscore_device_t *coredev)
+{
+ if (coredev->ir.input_dev)
+ input_unregister_device(coredev->ir.input_dev);
+
+ sms_log("");
+}
+
diff --git a/drivers/media/dvb/siano/smsir.h b/drivers/media/dvb/siano/smsir.h
new file mode 100644
index 00000000000..b7d703e2d33
--- /dev/null
+++ b/drivers/media/dvb/siano/smsir.h
@@ -0,0 +1,93 @@
+/****************************************************************
+
+Siano Mobile Silicon, Inc.
+MDTV receiver kernel modules.
+Copyright (C) 2006-2009, Uri Shkolnik
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 2 of the License, or
+(at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+****************************************************************/
+
+#ifndef __SMS_IR_H__
+#define __SMS_IR_H__
+
+#include <linux/input.h>
+
+#define IR_DEV_NAME_MAX_LEN 23 /* "SMS IR kbd type nn\0" */
+#define IR_KEYBOARD_LAYOUT_SIZE 64
+#define IR_DEFAULT_TIMEOUT 100
+
+enum ir_kb_type {
+ SMS_IR_KB_DEFAULT_TV,
+ SMS_IR_KB_HCW_SILVER
+};
+
+enum rc5_keyboard_address {
+ KEYBOARD_ADDRESS_TV1 = 0,
+ KEYBOARD_ADDRESS_TV2 = 1,
+ KEYBOARD_ADDRESS_TELETEXT = 2,
+ KEYBOARD_ADDRESS_VIDEO = 3,
+ KEYBOARD_ADDRESS_LV1 = 4,
+ KEYBOARD_ADDRESS_VCR1 = 5,
+ KEYBOARD_ADDRESS_VCR2 = 6,
+ KEYBOARD_ADDRESS_EXPERIMENTAL = 7,
+ KEYBOARD_ADDRESS_SAT1 = 8,
+ KEYBOARD_ADDRESS_CAMERA = 9,
+ KEYBOARD_ADDRESS_SAT2 = 10,
+ KEYBOARD_ADDRESS_CDV = 12,
+ KEYBOARD_ADDRESS_CAMCORDER = 13,
+ KEYBOARD_ADDRESS_PRE_AMP = 16,
+ KEYBOARD_ADDRESS_TUNER = 17,
+ KEYBOARD_ADDRESS_RECORDER1 = 18,
+ KEYBOARD_ADDRESS_PRE_AMP1 = 19,
+ KEYBOARD_ADDRESS_CD_PLAYER = 20,
+ KEYBOARD_ADDRESS_PHONO = 21,
+ KEYBOARD_ADDRESS_SATA = 22,
+ KEYBOARD_ADDRESS_RECORDER2 = 23,
+ KEYBOARD_ADDRESS_CDR = 26,
+ KEYBOARD_ADDRESS_LIGHTING = 29,
+ KEYBOARD_ADDRESS_LIGHTING1 = 30, /* KEYBOARD_ADDRESS_HCW_SILVER */
+ KEYBOARD_ADDRESS_PHONE = 31,
+ KEYBOARD_ADDRESS_NOT_RC5 = 0xFFFF
+};
+
+enum ir_protocol {
+ IR_RC5,
+ IR_RCMM
+};
+
+struct keyboard_layout_map_t {
+ enum ir_protocol ir_protocol;
+ enum rc5_keyboard_address rc5_kbd_address;
+ u16 keyboard_layout_map[IR_KEYBOARD_LAYOUT_SIZE];
+};
+
+struct smscore_device_t;
+
+struct ir_t {
+ struct input_dev *input_dev;
+ enum ir_kb_type ir_kb_type;
+ char name[IR_DEV_NAME_MAX_LEN+1];
+ u16 *keyboard_layout_map;
+ u32 timeout;
+ u32 controller;
+};
+
+int sms_ir_init(struct smscore_device_t *coredev);
+void sms_ir_exit(struct smscore_device_t *coredev);
+void sms_ir_event(struct smscore_device_t *coredev,
+ const char *buf, int len);
+
+#endif /* __SMS_IR_H__ */
+
diff --git a/drivers/media/dvb/siano/smssdio.c b/drivers/media/dvb/siano/smssdio.c
new file mode 100644
index 00000000000..dfaa49a53f3
--- /dev/null
+++ b/drivers/media/dvb/siano/smssdio.c
@@ -0,0 +1,357 @@
+/*
+ * smssdio.c - Siano 1xxx SDIO interface driver
+ *
+ * Copyright 2008 Pierre Ossman
+ *
+ * Based on code by Siano Mobile Silicon, Inc.,
+ * Copyright (C) 2006-2008, Uri Shkolnik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ *
+ * This hardware is a bit odd in that all transfers should be done
+ * to/from the SMSSDIO_DATA register, yet the "increase address" bit
+ * always needs to be set.
+ *
+ * Also, buffers from the card are always aligned to 128 byte
+ * boundaries.
+ */
+
+/*
+ * General cleanup notes:
+ *
+ * - only typedefs should be name *_t
+ *
+ * - use ERR_PTR and friends for smscore_register_device()
+ *
+ * - smscore_getbuffer should zero fields
+ *
+ * Fix stop command
+ */
+
+#include <linux/moduleparam.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+
+#include "smscoreapi.h"
+#include "sms-cards.h"
+
+/* Registers */
+
+#define SMSSDIO_DATA 0x00
+#define SMSSDIO_INT 0x04
+
+static const struct sdio_device_id smssdio_ids[] = {
+ {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_STELLAR),
+ .driver_data = SMS1XXX_BOARD_SIANO_STELLAR},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_NOVA_A0),
+ .driver_data = SMS1XXX_BOARD_SIANO_NOVA_A},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_NOVA_B0),
+ .driver_data = SMS1XXX_BOARD_SIANO_NOVA_B},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_VEGA_A0),
+ .driver_data = SMS1XXX_BOARD_SIANO_VEGA},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_VENICE),
+ .driver_data = SMS1XXX_BOARD_SIANO_VEGA},
+ { /* end: all zeroes */ },
+};
+
+MODULE_DEVICE_TABLE(sdio, smssdio_ids);
+
+struct smssdio_device {
+ struct sdio_func *func;
+
+ struct smscore_device_t *coredev;
+
+ struct smscore_buffer_t *split_cb;
+};
+
+/*******************************************************************/
+/* Siano core callbacks */
+/*******************************************************************/
+
+static int smssdio_sendrequest(void *context, void *buffer, size_t size)
+{
+ int ret;
+ struct smssdio_device *smsdev;
+
+ smsdev = context;
+
+ sdio_claim_host(smsdev->func);
+
+ while (size >= smsdev->func->cur_blksize) {
+ ret = sdio_write_blocks(smsdev->func, SMSSDIO_DATA, buffer, 1);
+ if (ret)
+ goto out;
+
+ buffer += smsdev->func->cur_blksize;
+ size -= smsdev->func->cur_blksize;
+ }
+
+ if (size) {
+ ret = sdio_write_bytes(smsdev->func, SMSSDIO_DATA,
+ buffer, size);
+ }
+
+out:
+ sdio_release_host(smsdev->func);
+
+ return ret;
+}
+
+/*******************************************************************/
+/* SDIO callbacks */
+/*******************************************************************/
+
+static void smssdio_interrupt(struct sdio_func *func)
+{
+ int ret, isr;
+
+ struct smssdio_device *smsdev;
+ struct smscore_buffer_t *cb;
+ struct SmsMsgHdr_ST *hdr;
+ size_t size;
+
+ smsdev = sdio_get_drvdata(func);
+
+ /*
+ * The interrupt register has no defined meaning. It is just
+ * a way of turning of the level triggered interrupt.
+ */
+ isr = sdio_readb(func, SMSSDIO_INT, &ret);
+ if (ret) {
+ dev_err(&smsdev->func->dev,
+ "Unable to read interrupt register!\n");
+ return;
+ }
+
+ if (smsdev->split_cb == NULL) {
+ cb = smscore_getbuffer(smsdev->coredev);
+ if (!cb) {
+ dev_err(&smsdev->func->dev,
+ "Unable to allocate data buffer!\n");
+ return;
+ }
+
+ ret = sdio_read_blocks(smsdev->func, cb->p, SMSSDIO_DATA, 1);
+ if (ret) {
+ dev_err(&smsdev->func->dev,
+ "Error %d reading initial block!\n", ret);
+ return;
+ }
+
+ hdr = cb->p;
+
+ if (hdr->msgFlags & MSG_HDR_FLAG_SPLIT_MSG) {
+ smsdev->split_cb = cb;
+ return;
+ }
+
+ size = hdr->msgLength - smsdev->func->cur_blksize;
+ } else {
+ cb = smsdev->split_cb;
+ hdr = cb->p;
+
+ size = hdr->msgLength - sizeof(struct SmsMsgHdr_ST);
+
+ smsdev->split_cb = NULL;
+ }
+
+ if (hdr->msgLength > smsdev->func->cur_blksize) {
+ void *buffer;
+
+ size = ALIGN(size, 128);
+ buffer = cb->p + hdr->msgLength;
+
+ BUG_ON(smsdev->func->cur_blksize != 128);
+
+ /*
+ * First attempt to transfer all of it in one go...
+ */
+ ret = sdio_read_blocks(smsdev->func, buffer,
+ SMSSDIO_DATA, size / 128);
+ if (ret && ret != -EINVAL) {
+ smscore_putbuffer(smsdev->coredev, cb);
+ dev_err(&smsdev->func->dev,
+ "Error %d reading data from card!\n", ret);
+ return;
+ }
+
+ /*
+ * ..then fall back to one block at a time if that is
+ * not possible...
+ *
+ * (we have to do this manually because of the
+ * problem with the "increase address" bit)
+ */
+ if (ret == -EINVAL) {
+ while (size) {
+ ret = sdio_read_blocks(smsdev->func,
+ buffer, SMSSDIO_DATA, 1);
+ if (ret) {
+ smscore_putbuffer(smsdev->coredev, cb);
+ dev_err(&smsdev->func->dev,
+ "Error %d reading "
+ "data from card!\n", ret);
+ return;
+ }
+
+ buffer += smsdev->func->cur_blksize;
+ if (size > smsdev->func->cur_blksize)
+ size -= smsdev->func->cur_blksize;
+ else
+ size = 0;
+ }
+ }
+ }
+
+ cb->size = hdr->msgLength;
+ cb->offset = 0;
+
+ smscore_onresponse(smsdev->coredev, cb);
+}
+
+static int smssdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ int ret;
+
+ int board_id;
+ struct smssdio_device *smsdev;
+ struct smsdevice_params_t params;
+
+ board_id = id->driver_data;
+
+ smsdev = kzalloc(sizeof(struct smssdio_device), GFP_KERNEL);
+ if (!smsdev)
+ return -ENOMEM;
+
+ smsdev->func = func;
+
+ memset(&params, 0, sizeof(struct smsdevice_params_t));
+
+ params.device = &func->dev;
+ params.buffer_size = 0x5000; /* ?? */
+ params.num_buffers = 22; /* ?? */
+ params.context = smsdev;
+
+ snprintf(params.devpath, sizeof(params.devpath),
+ "sdio\\%s", sdio_func_id(func));
+
+ params.sendrequest_handler = smssdio_sendrequest;
+
+ params.device_type = sms_get_board(board_id)->type;
+
+ if (params.device_type != SMS_STELLAR)
+ params.flags |= SMS_DEVICE_FAMILY2;
+ else {
+ /*
+ * FIXME: Stellar needs special handling...
+ */
+ ret = -ENODEV;
+ goto free;
+ }
+
+ ret = smscore_register_device(&params, &smsdev->coredev);
+ if (ret < 0)
+ goto free;
+
+ smscore_set_board_id(smsdev->coredev, board_id);
+
+ sdio_claim_host(func);
+
+ ret = sdio_enable_func(func);
+ if (ret)
+ goto release;
+
+ ret = sdio_set_block_size(func, 128);
+ if (ret)
+ goto disable;
+
+ ret = sdio_claim_irq(func, smssdio_interrupt);
+ if (ret)
+ goto disable;
+
+ sdio_set_drvdata(func, smsdev);
+
+ sdio_release_host(func);
+
+ ret = smscore_start_device(smsdev->coredev);
+ if (ret < 0)
+ goto reclaim;
+
+ return 0;
+
+reclaim:
+ sdio_claim_host(func);
+ sdio_release_irq(func);
+disable:
+ sdio_disable_func(func);
+release:
+ sdio_release_host(func);
+ smscore_unregister_device(smsdev->coredev);
+free:
+ kfree(smsdev);
+
+ return ret;
+}
+
+static void smssdio_remove(struct sdio_func *func)
+{
+ struct smssdio_device *smsdev;
+
+ smsdev = sdio_get_drvdata(func);
+
+ /* FIXME: racy! */
+ if (smsdev->split_cb)
+ smscore_putbuffer(smsdev->coredev, smsdev->split_cb);
+
+ smscore_unregister_device(smsdev->coredev);
+
+ sdio_claim_host(func);
+ sdio_release_irq(func);
+ sdio_disable_func(func);
+ sdio_release_host(func);
+
+ kfree(smsdev);
+}
+
+static struct sdio_driver smssdio_driver = {
+ .name = "smssdio",
+ .id_table = smssdio_ids,
+ .probe = smssdio_probe,
+ .remove = smssdio_remove,
+};
+
+/*******************************************************************/
+/* Module functions */
+/*******************************************************************/
+
+int smssdio_module_init(void)
+{
+ int ret = 0;
+
+ printk(KERN_INFO "smssdio: Siano SMS1xxx SDIO driver\n");
+ printk(KERN_INFO "smssdio: Copyright Pierre Ossman\n");
+
+ ret = sdio_register_driver(&smssdio_driver);
+
+ return ret;
+}
+
+void smssdio_module_exit(void)
+{
+ sdio_unregister_driver(&smssdio_driver);
+}
+
+module_init(smssdio_module_init);
+module_exit(smssdio_module_exit);
+
+MODULE_DESCRIPTION("Siano SMS1xxx SDIO driver");
+MODULE_AUTHOR("Pierre Ossman");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/siano/smsusb.c b/drivers/media/dvb/siano/smsusb.c
index 71c65f544c0..cb8a358b731 100644
--- a/drivers/media/dvb/siano/smsusb.c
+++ b/drivers/media/dvb/siano/smsusb.c
@@ -1,23 +1,23 @@
-/*
- * Driver for the Siano SMS1xxx USB dongle
- *
- * author: Anatoly Greenblat
- *
- * Copyright (c), 2005-2008 Siano Mobile Silicon, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation;
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
- *
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
+/****************************************************************
+
+Siano Mobile Silicon, Inc.
+MDTV receiver kernel modules.
+Copyright (C) 2005-2009, Uri Shkolnik, Anatoly Greenblat
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 2 of the License, or
+(at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+****************************************************************/
#include <linux/kernel.h>
#include <linux/init.h>
@@ -26,6 +26,7 @@
#include "smscoreapi.h"
#include "sms-cards.h"
+#include "smsendian.h"
static int sms_dbg;
module_param_named(debug, sms_dbg, int, 0644);
@@ -64,15 +65,16 @@ static void smsusb_onresponse(struct urb *urb)
struct smsusb_urb_t *surb = (struct smsusb_urb_t *) urb->context;
struct smsusb_device_t *dev = surb->dev;
- if (urb->status < 0) {
- sms_err("error, urb status %d, %d bytes",
+ if (urb->status == -ESHUTDOWN) {
+ sms_err("error, urb status %d (-ESHUTDOWN), %d bytes",
urb->status, urb->actual_length);
return;
}
- if (urb->actual_length > 0) {
- struct SmsMsgHdr_ST *phdr = (struct SmsMsgHdr_ST *) surb->cb->p;
+ if ((urb->actual_length > 0) && (urb->status == 0)) {
+ struct SmsMsgHdr_ST *phdr = (struct SmsMsgHdr_ST *)surb->cb->p;
+ smsendian_handle_message_header(phdr);
if (urb->actual_length >= phdr->msgLength) {
surb->cb->size = phdr->msgLength;
@@ -109,7 +111,10 @@ static void smsusb_onresponse(struct urb *urb)
"msglen %d actual %d",
phdr->msgLength, urb->actual_length);
}
- }
+ } else
+ sms_err("error, urb status %d, %d bytes",
+ urb->status, urb->actual_length);
+
exit_and_resubmit:
smsusb_submit_urb(dev, surb);
@@ -176,6 +181,7 @@ static int smsusb_sendrequest(void *context, void *buffer, size_t size)
struct smsusb_device_t *dev = (struct smsusb_device_t *) context;
int dummy;
+ smsendian_handle_message_header((struct SmsMsgHdr_ST *)buffer);
return usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2),
buffer, size, &dummy, 1000);
}
@@ -333,8 +339,8 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id)
case SMS_VEGA:
dev->buffer_size = USB2_BUFFER_SIZE;
dev->response_alignment =
- dev->udev->ep_in[1]->desc.wMaxPacketSize -
- sizeof(struct SmsMsgHdr_ST);
+ le16_to_cpu(dev->udev->ep_in[1]->desc.wMaxPacketSize) -
+ sizeof(struct SmsMsgHdr_ST);
params.flags |= SMS_DEVICE_FAMILY2;
break;
@@ -479,7 +485,6 @@ static int smsusb_resume(struct usb_interface *intf)
}
struct usb_device_id smsusb_id_table[] = {
-#ifdef CONFIG_DVB_SIANO_SMS1XXX_SMS_IDS
{ USB_DEVICE(0x187f, 0x0010),
.driver_info = SMS1XXX_BOARD_SIANO_STELLAR },
{ USB_DEVICE(0x187f, 0x0100),
@@ -490,7 +495,6 @@ struct usb_device_id smsusb_id_table[] = {
.driver_info = SMS1XXX_BOARD_SIANO_NOVA_B },
{ USB_DEVICE(0x187f, 0x0300),
.driver_info = SMS1XXX_BOARD_SIANO_VEGA },
-#endif
{ USB_DEVICE(0x2040, 0x1700),
.driver_info = SMS1XXX_BOARD_HAUPPAUGE_CATAMOUNT },
{ USB_DEVICE(0x2040, 0x1800),
@@ -521,8 +525,13 @@ struct usb_device_id smsusb_id_table[] = {
.driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
{ USB_DEVICE(0x2040, 0x5590),
.driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
- { } /* Terminating entry */
-};
+ { USB_DEVICE(0x187f, 0x0202),
+ .driver_info = SMS1XXX_BOARD_SIANO_NICE },
+ { USB_DEVICE(0x187f, 0x0301),
+ .driver_info = SMS1XXX_BOARD_SIANO_VENICE },
+ { } /* Terminating entry */
+ };
+
MODULE_DEVICE_TABLE(usb, smsusb_id_table);
static struct usb_driver smsusb_driver = {
@@ -548,14 +557,14 @@ int smsusb_module_init(void)
void smsusb_module_exit(void)
{
- sms_debug("");
/* Regular USB Cleanup */
usb_deregister(&smsusb_driver);
+ sms_info("end");
}
module_init(smsusb_module_init);
module_exit(smsusb_module_exit);
-MODULE_DESCRIPTION("Driver for the Siano SMS1XXX USB dongle");
+MODULE_DESCRIPTION("Driver for the Siano SMS1xxx USB dongle");
MODULE_AUTHOR("Siano Mobile Silicon, INC. (uris@siano-ms.com)");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/ttpci/av7110_av.c b/drivers/media/dvb/ttpci/av7110_av.c
index e4d0900d512..53884814161 100644
--- a/drivers/media/dvb/ttpci/av7110_av.c
+++ b/drivers/media/dvb/ttpci/av7110_av.c
@@ -89,6 +89,7 @@
static void p_to_t(u8 const *buf, long int length, u16 pid,
u8 *counter, struct dvb_demux_feed *feed);
+static int write_ts_to_decoder(struct av7110 *av7110, int type, const u8 *buf, size_t len);
int av7110_record_cb(struct dvb_filter_pes2ts *p2t, u8 *buf, size_t len)
@@ -192,8 +193,6 @@ int av7110_av_start_play(struct av7110 *av7110, int av)
ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Play, 2, AV_PES, 0);
break;
}
- if (!ret)
- ret = av7110->playing;
return ret;
}
@@ -437,6 +436,45 @@ static void play_audio_cb(u8 *buf, int count, void *priv)
aux_ring_buffer_write(&av7110->aout, buf, count);
}
+
+#define FREE_COND_TS (dvb_ringbuffer_free(rb) >= 4096)
+
+static ssize_t ts_play(struct av7110 *av7110, const char __user *buf,
+ unsigned long count, int nonblock, int type)
+{
+ struct dvb_ringbuffer *rb;
+ u8 *kb;
+ unsigned long todo = count;
+
+ dprintk(2, "%s: type %d cnt %lu\n", __func__, type, count);
+
+ rb = (type) ? &av7110->avout : &av7110->aout;
+ kb = av7110->kbuf[type];
+
+ if (!kb)
+ return -ENOBUFS;
+
+ if (nonblock && !FREE_COND_TS)
+ return -EWOULDBLOCK;
+
+ while (todo >= TS_SIZE) {
+ if (!FREE_COND_TS) {
+ if (nonblock)
+ return count - todo;
+ if (wait_event_interruptible(rb->queue, FREE_COND_TS))
+ return count - todo;
+ }
+ if (copy_from_user(kb, buf, TS_SIZE))
+ return -EFAULT;
+ write_ts_to_decoder(av7110, type, kb, TS_SIZE);
+ todo -= TS_SIZE;
+ buf += TS_SIZE;
+ }
+
+ return count - todo;
+}
+
+
#define FREE_COND (dvb_ringbuffer_free(&av7110->avout) >= 20 * 1024 && \
dvb_ringbuffer_free(&av7110->aout) >= 20 * 1024)
@@ -780,11 +818,37 @@ static void p_to_t(u8 const *buf, long int length, u16 pid, u8 *counter,
}
+static int write_ts_to_decoder(struct av7110 *av7110, int type, const u8 *buf, size_t len)
+{
+ struct ipack *ipack = &av7110->ipack[type];
+
+ if (buf[1] & TRANS_ERROR) {
+ av7110_ipack_reset(ipack);
+ return -1;
+ }
+
+ if (!(buf[3] & PAYLOAD))
+ return -1;
+
+ if (buf[1] & PAY_START)
+ av7110_ipack_flush(ipack);
+
+ if (buf[3] & ADAPT_FIELD) {
+ len -= buf[4] + 1;
+ buf += buf[4] + 1;
+ if (!len)
+ return 0;
+ }
+
+ av7110_ipack_instant_repack(buf + 4, len - 4, ipack);
+ return 0;
+}
+
+
int av7110_write_to_decoder(struct dvb_demux_feed *feed, const u8 *buf, size_t len)
{
struct dvb_demux *demux = feed->demux;
struct av7110 *av7110 = (struct av7110 *) demux->priv;
- struct ipack *ipack = &av7110->ipack[feed->pes_type];
dprintk(2, "av7110:%p, \n", av7110);
@@ -804,20 +868,7 @@ int av7110_write_to_decoder(struct dvb_demux_feed *feed, const u8 *buf, size_t l
return -1;
}
- if (!(buf[3] & 0x10)) /* no payload? */
- return -1;
- if (buf[1] & 0x40)
- av7110_ipack_flush(ipack);
-
- if (buf[3] & 0x20) { /* adaptation field? */
- len -= buf[4] + 1;
- buf += buf[4] + 1;
- if (!len)
- return 0;
- }
-
- av7110_ipack_instant_repack(buf + 4, len - 4, &av7110->ipack[feed->pes_type]);
- return 0;
+ return write_ts_to_decoder(av7110, feed->pes_type, buf, len);
}
@@ -916,6 +967,7 @@ static ssize_t dvb_video_write(struct file *file, const char __user *buf,
{
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
+ unsigned char c;
dprintk(2, "av7110:%p, \n", av7110);
@@ -925,7 +977,12 @@ static ssize_t dvb_video_write(struct file *file, const char __user *buf,
if (av7110->videostate.stream_source != VIDEO_SOURCE_MEMORY)
return -EPERM;
- return dvb_play(av7110, buf, count, file->f_flags & O_NONBLOCK, 1);
+ if (get_user(c, buf))
+ return -EFAULT;
+ if (c == 0x47 && count % TS_SIZE == 0)
+ return ts_play(av7110, buf, count, file->f_flags & O_NONBLOCK, 1);
+ else
+ return dvb_play(av7110, buf, count, file->f_flags & O_NONBLOCK, 1);
}
static unsigned int dvb_audio_poll(struct file *file, poll_table *wait)
@@ -952,6 +1009,7 @@ static ssize_t dvb_audio_write(struct file *file, const char __user *buf,
{
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
+ unsigned char c;
dprintk(2, "av7110:%p, \n", av7110);
@@ -959,7 +1017,13 @@ static ssize_t dvb_audio_write(struct file *file, const char __user *buf,
printk(KERN_ERR "not audio source memory\n");
return -EPERM;
}
- return dvb_aplay(av7110, buf, count, file->f_flags & O_NONBLOCK, 0);
+
+ if (get_user(c, buf))
+ return -EFAULT;
+ if (c == 0x47 && count % TS_SIZE == 0)
+ return ts_play(av7110, buf, count, file->f_flags & O_NONBLOCK, 0);
+ else
+ return dvb_aplay(av7110, buf, count, file->f_flags & O_NONBLOCK, 0);
}
static u8 iframe_header[] = { 0x00, 0x00, 0x01, 0xe0, 0x00, 0x00, 0x80, 0x00, 0x00 };
@@ -1062,7 +1126,6 @@ static int dvb_video_ioctl(struct inode *inode, struct file *file,
if (ret)
break;
}
-
if (av7110->videostate.stream_source == VIDEO_SOURCE_MEMORY) {
if (av7110->playing == RP_AV) {
ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Stop, 0);
@@ -1122,20 +1185,16 @@ static int dvb_video_ioctl(struct inode *inode, struct file *file,
case VIDEO_SET_DISPLAY_FORMAT:
{
video_displayformat_t format = (video_displayformat_t) arg;
-
switch (format) {
case VIDEO_PAN_SCAN:
av7110->display_panscan = VID_PAN_SCAN_PREF;
break;
-
case VIDEO_LETTER_BOX:
av7110->display_panscan = VID_VC_AND_PS_PREF;
break;
-
case VIDEO_CENTER_CUT_OUT:
av7110->display_panscan = VID_CENTRE_CUT_PREF;
break;
-
default:
ret = -EINVAL;
}
@@ -1183,7 +1242,8 @@ static int dvb_video_ioctl(struct inode *inode, struct file *file,
case VIDEO_SLOWMOTION:
if (av7110->playing&RP_VIDEO) {
- ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Slow, 2, 0, 0);
+ if (av7110->trickmode != TRICK_SLOW)
+ ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Slow, 2, 0, 0);
if (!ret)
ret = vidcom(av7110, AV_VIDEO_CMD_SLOW, arg);
} else {
@@ -1207,7 +1267,6 @@ static int dvb_video_ioctl(struct inode *inode, struct file *file,
case VIDEO_CLEAR_BUFFER:
dvb_ringbuffer_flush_spinlock_wakeup(&av7110->avout);
av7110_ipack_reset(&av7110->ipack[1]);
-
if (av7110->playing == RP_AV) {
ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY,
__Play, 2, AV_PES, 0);
@@ -1228,13 +1287,13 @@ static int dvb_video_ioctl(struct inode *inode, struct file *file,
break;
case VIDEO_SET_STREAMTYPE:
-
break;
default:
ret = -ENOIOCTLCMD;
break;
}
+
return ret;
}
@@ -1309,7 +1368,6 @@ static int dvb_audio_ioctl(struct inode *inode, struct file *file,
case AUDIO_CHANNEL_SELECT:
av7110->audiostate.channel_select = (audio_channel_select_t) arg;
-
switch(av7110->audiostate.channel_select) {
case AUDIO_STEREO:
ret = audcom(av7110, AUDIO_CMD_STEREO);
@@ -1320,7 +1378,6 @@ static int dvb_audio_ioctl(struct inode *inode, struct file *file,
msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0220);
}
break;
-
case AUDIO_MONO_LEFT:
ret = audcom(av7110, AUDIO_CMD_MONO_L);
if (!ret) {
@@ -1330,7 +1387,6 @@ static int dvb_audio_ioctl(struct inode *inode, struct file *file,
msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0200);
}
break;
-
case AUDIO_MONO_RIGHT:
ret = audcom(av7110, AUDIO_CMD_MONO_R);
if (!ret) {
@@ -1340,7 +1396,6 @@ static int dvb_audio_ioctl(struct inode *inode, struct file *file,
msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0210);
}
break;
-
default:
ret = -EINVAL;
break;
@@ -1366,21 +1421,24 @@ static int dvb_audio_ioctl(struct inode *inode, struct file *file,
ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY,
__Play, 2, AV_PES, 0);
break;
- case AUDIO_SET_ID:
+ case AUDIO_SET_ID:
break;
+
case AUDIO_SET_MIXER:
{
struct audio_mixer *amix = (struct audio_mixer *)parg;
-
ret = av7110_set_volume(av7110, amix->volume_left, amix->volume_right);
break;
}
+
case AUDIO_SET_STREAMTYPE:
break;
+
default:
ret = -ENOIOCTLCMD;
}
+
return ret;
}
diff --git a/drivers/media/dvb/ttpci/av7110_hw.c b/drivers/media/dvb/ttpci/av7110_hw.c
index 5e3f88911a1..e162691b515 100644
--- a/drivers/media/dvb/ttpci/av7110_hw.c
+++ b/drivers/media/dvb/ttpci/av7110_hw.c
@@ -1089,7 +1089,7 @@ int av7110_osd_cmd(struct av7110 *av7110, osd_cmd_t *dc)
else {
int i, len = dc->x0-dc->color+1;
u8 __user *colors = (u8 __user *)dc->data;
- u8 r, g, b, blend;
+ u8 r, g = 0, b = 0, blend = 0;
ret = 0;
for (i = 0; i<len; i++) {
if (get_user(r, colors + i * 4) ||
diff --git a/drivers/media/dvb/ttpci/av7110_v4l.c b/drivers/media/dvb/ttpci/av7110_v4l.c
index 2210cff738e..ce64c6214cc 100644
--- a/drivers/media/dvb/ttpci/av7110_v4l.c
+++ b/drivers/media/dvb/ttpci/av7110_v4l.c
@@ -458,7 +458,7 @@ static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i)
dprintk(2, "VIDIOC_ENUMINPUT: %d\n", i->index);
if (av7110->analog_tuner_flags) {
- if (i->index < 0 || i->index >= 4)
+ if (i->index >= 4)
return -EINVAL;
} else {
if (i->index != 0)
diff --git a/drivers/media/dvb/ttpci/budget-av.c b/drivers/media/dvb/ttpci/budget-av.c
index 855fe74b640..8ea91522767 100644
--- a/drivers/media/dvb/ttpci/budget-av.c
+++ b/drivers/media/dvb/ttpci/budget-av.c
@@ -1413,7 +1413,7 @@ static struct v4l2_input knc1_inputs[KNC1_INPUTS] = {
static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i)
{
dprintk(1, "VIDIOC_ENUMINPUT %d.\n", i->index);
- if (i->index < 0 || i->index >= KNC1_INPUTS)
+ if (i->index >= KNC1_INPUTS)
return -EINVAL;
memcpy(i, &knc1_inputs[i->index], sizeof(struct v4l2_input));
return 0;
diff --git a/drivers/media/dvb/ttpci/budget.c b/drivers/media/dvb/ttpci/budget.c
index 83e9e7750c8..e48380c4899 100644
--- a/drivers/media/dvb/ttpci/budget.c
+++ b/drivers/media/dvb/ttpci/budget.c
@@ -47,6 +47,9 @@
#include "bsru6.h"
#include "bsbe1.h"
#include "tdhd1.h"
+#include "stv6110x.h"
+#include "stv090x.h"
+#include "isl6423.h"
static int diseqc_method;
module_param(diseqc_method, int, 0444);
@@ -425,6 +428,44 @@ static u8 read_pwm(struct budget* budget)
return pwm;
}
+static struct stv090x_config tt1600_stv090x_config = {
+ .device = STV0903,
+ .demod_mode = STV090x_SINGLE,
+ .clk_mode = STV090x_CLK_EXT,
+
+ .xtal = 27000000,
+ .address = 0x68,
+ .ref_clk = 27000000,
+
+ .ts1_mode = STV090x_TSMODE_DVBCI,
+ .ts2_mode = STV090x_TSMODE_SERIAL_CONTINUOUS,
+
+ .repeater_level = STV090x_RPTLEVEL_16,
+
+ .tuner_init = NULL,
+ .tuner_set_mode = NULL,
+ .tuner_set_frequency = NULL,
+ .tuner_get_frequency = NULL,
+ .tuner_set_bandwidth = NULL,
+ .tuner_get_bandwidth = NULL,
+ .tuner_set_bbgain = NULL,
+ .tuner_get_bbgain = NULL,
+ .tuner_set_refclk = NULL,
+ .tuner_get_status = NULL,
+};
+
+static struct stv6110x_config tt1600_stv6110x_config = {
+ .addr = 0x60,
+ .refclk = 27000000,
+};
+
+static struct isl6423_config tt1600_isl6423_config = {
+ .current_max = SEC_CURRENT_515m,
+ .curlim = SEC_CURRENT_LIM_ON,
+ .mod_extern = 1,
+ .addr = 0x08,
+};
+
static void frontend_init(struct budget *budget)
{
(void)alps_bsbe1_config; /* avoid warning */
@@ -566,6 +607,48 @@ static void frontend_init(struct budget *budget)
}
break;
}
+
+ case 0x101c: { /* TT S2-1600 */
+ struct stv6110x_devctl *ctl;
+ saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTLO);
+ msleep(50);
+ saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTHI);
+ msleep(250);
+
+ budget->dvb_frontend = dvb_attach(stv090x_attach,
+ &tt1600_stv090x_config,
+ &budget->i2c_adap,
+ STV090x_DEMODULATOR_0);
+
+ if (budget->dvb_frontend) {
+
+ ctl = dvb_attach(stv6110x_attach,
+ budget->dvb_frontend,
+ &tt1600_stv6110x_config,
+ &budget->i2c_adap);
+
+ tt1600_stv090x_config.tuner_init = ctl->tuner_init;
+ tt1600_stv090x_config.tuner_set_mode = ctl->tuner_set_mode;
+ tt1600_stv090x_config.tuner_set_frequency = ctl->tuner_set_frequency;
+ tt1600_stv090x_config.tuner_get_frequency = ctl->tuner_get_frequency;
+ tt1600_stv090x_config.tuner_set_bandwidth = ctl->tuner_set_bandwidth;
+ tt1600_stv090x_config.tuner_get_bandwidth = ctl->tuner_get_bandwidth;
+ tt1600_stv090x_config.tuner_set_bbgain = ctl->tuner_set_bbgain;
+ tt1600_stv090x_config.tuner_get_bbgain = ctl->tuner_get_bbgain;
+ tt1600_stv090x_config.tuner_set_refclk = ctl->tuner_set_refclk;
+ tt1600_stv090x_config.tuner_get_status = ctl->tuner_get_status;
+
+ dvb_attach(isl6423_attach,
+ budget->dvb_frontend,
+ &budget->i2c_adap,
+ &tt1600_isl6423_config);
+
+ } else {
+ dvb_frontend_detach(budget->dvb_frontend);
+ budget->dvb_frontend = NULL;
+ }
+ }
+ break;
}
if (budget->dvb_frontend == NULL) {
@@ -641,6 +724,7 @@ MAKE_BUDGET_INFO(ttbc, "TT-Budget/WinTV-NOVA-C PCI", BUDGET_TT);
MAKE_BUDGET_INFO(ttbt, "TT-Budget/WinTV-NOVA-T PCI", BUDGET_TT);
MAKE_BUDGET_INFO(satel, "SATELCO Multimedia PCI", BUDGET_TT_HW_DISEQC);
MAKE_BUDGET_INFO(ttbs1401, "TT-Budget-S-1401 PCI", BUDGET_TT);
+MAKE_BUDGET_INFO(tt1600, "TT-Budget S2-1600 PCI", BUDGET_TT);
MAKE_BUDGET_INFO(fsacs0, "Fujitsu Siemens Activy Budget-S PCI (rev GR/grundig frontend)", BUDGET_FS_ACTIVY);
MAKE_BUDGET_INFO(fsacs1, "Fujitsu Siemens Activy Budget-S PCI (rev AL/alps frontend)", BUDGET_FS_ACTIVY);
MAKE_BUDGET_INFO(fsact, "Fujitsu Siemens Activy Budget-T PCI (rev GR/Grundig frontend)", BUDGET_FS_ACTIVY);
@@ -653,6 +737,7 @@ static struct pci_device_id pci_tbl[] = {
MAKE_EXTENSION_PCI(satel, 0x13c2, 0x1013),
MAKE_EXTENSION_PCI(ttbs, 0x13c2, 0x1016),
MAKE_EXTENSION_PCI(ttbs1401, 0x13c2, 0x1018),
+ MAKE_EXTENSION_PCI(tt1600, 0x13c2, 0x101c),
MAKE_EXTENSION_PCI(fsacs1,0x1131, 0x4f60),
MAKE_EXTENSION_PCI(fsacs0,0x1131, 0x4f61),
MAKE_EXTENSION_PCI(fsact1, 0x1131, 0x5f60),
diff --git a/drivers/media/radio/dsbr100.c b/drivers/media/radio/dsbr100.c
index 61357620229..ed9cd7ad060 100644
--- a/drivers/media/radio/dsbr100.c
+++ b/drivers/media/radio/dsbr100.c
@@ -33,6 +33,10 @@
History:
+ Version 0.46:
+ Removed usb_dsbr100_open/close calls and radio->users counter. Also,
+ radio->muted changed to radio->status and suspend/resume calls updated.
+
Version 0.45:
Converted to v4l2_device.
@@ -100,8 +104,8 @@
*/
#include <linux/version.h> /* for KERNEL_VERSION MACRO */
-#define DRIVER_VERSION "v0.45"
-#define RADIO_VERSION KERNEL_VERSION(0, 4, 5)
+#define DRIVER_VERSION "v0.46"
+#define RADIO_VERSION KERNEL_VERSION(0, 4, 6)
#define DRIVER_AUTHOR "Markus Demleitner <msdemlei@tucana.harvard.edu>"
#define DRIVER_DESC "D-Link DSB-R100 USB FM radio driver"
@@ -121,13 +125,15 @@ devices, that would be 76 and 91. */
#define FREQ_MAX 108.0
#define FREQ_MUL 16000
+/* defines for radio->status */
+#define STARTED 0
+#define STOPPED 1
+
#define videodev_to_radio(d) container_of(d, struct dsbr100_device, videodev)
static int usb_dsbr100_probe(struct usb_interface *intf,
const struct usb_device_id *id);
static void usb_dsbr100_disconnect(struct usb_interface *intf);
-static int usb_dsbr100_open(struct file *file);
-static int usb_dsbr100_close(struct file *file);
static int usb_dsbr100_suspend(struct usb_interface *intf,
pm_message_t message);
static int usb_dsbr100_resume(struct usb_interface *intf);
@@ -145,9 +151,8 @@ struct dsbr100_device {
struct mutex lock; /* buffer locking */
int curfreq;
int stereo;
- int users;
int removed;
- int muted;
+ int status;
};
static struct usb_device_id usb_dsbr100_device_table [] = {
@@ -201,7 +206,7 @@ static int dsbr100_start(struct dsbr100_device *radio)
goto usb_control_msg_failed;
}
- radio->muted = 0;
+ radio->status = STARTED;
mutex_unlock(&radio->lock);
return (radio->transfer_buffer)[0];
@@ -244,7 +249,7 @@ static int dsbr100_stop(struct dsbr100_device *radio)
goto usb_control_msg_failed;
}
- radio->muted = 1;
+ radio->status = STOPPED;
mutex_unlock(&radio->lock);
return (radio->transfer_buffer)[0];
@@ -258,12 +263,12 @@ usb_control_msg_failed:
}
/* set a frequency, freq is defined by v4l's TUNER_LOW, i.e. 1/16th kHz */
-static int dsbr100_setfreq(struct dsbr100_device *radio, int freq)
+static int dsbr100_setfreq(struct dsbr100_device *radio)
{
int retval;
int request;
+ int freq = (radio->curfreq / 16 * 80) / 1000 + 856;
- freq = (freq / 16 * 80) / 1000 + 856;
mutex_lock(&radio->lock);
retval = usb_control_msg(radio->usbdev,
@@ -431,7 +436,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
radio->curfreq = f->frequency;
mutex_unlock(&radio->lock);
- retval = dsbr100_setfreq(radio, radio->curfreq);
+ retval = dsbr100_setfreq(radio);
if (retval < 0)
dev_warn(&radio->usbdev->dev, "Set frequency failed\n");
return 0;
@@ -473,7 +478,7 @@ static int vidioc_g_ctrl(struct file *file, void *priv,
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
- ctrl->value = radio->muted;
+ ctrl->value = radio->status;
return 0;
}
return -EINVAL;
@@ -543,65 +548,27 @@ static int vidioc_s_audio(struct file *file, void *priv,
return 0;
}
-static int usb_dsbr100_open(struct file *file)
-{
- struct dsbr100_device *radio = video_drvdata(file);
- int retval;
-
- lock_kernel();
- radio->users = 1;
- radio->muted = 1;
-
- retval = dsbr100_start(radio);
- if (retval < 0) {
- dev_warn(&radio->usbdev->dev,
- "Radio did not start up properly\n");
- radio->users = 0;
- unlock_kernel();
- return -EIO;
- }
-
- retval = dsbr100_setfreq(radio, radio->curfreq);
- if (retval < 0)
- dev_warn(&radio->usbdev->dev,
- "set frequency failed\n");
-
- unlock_kernel();
- return 0;
-}
-
-static int usb_dsbr100_close(struct file *file)
-{
- struct dsbr100_device *radio = video_drvdata(file);
- int retval;
-
- if (!radio)
- return -ENODEV;
-
- mutex_lock(&radio->lock);
- radio->users = 0;
- mutex_unlock(&radio->lock);
-
- if (!radio->removed) {
- retval = dsbr100_stop(radio);
- if (retval < 0) {
- dev_warn(&radio->usbdev->dev,
- "dsbr100_stop failed\n");
- }
-
- }
- return 0;
-}
-
/* Suspend device - stop device. */
static int usb_dsbr100_suspend(struct usb_interface *intf, pm_message_t message)
{
struct dsbr100_device *radio = usb_get_intfdata(intf);
int retval;
- retval = dsbr100_stop(radio);
- if (retval < 0)
- dev_warn(&intf->dev, "dsbr100_stop failed\n");
+ if (radio->status == STARTED) {
+ retval = dsbr100_stop(radio);
+ if (retval < 0)
+ dev_warn(&intf->dev, "dsbr100_stop failed\n");
+
+ /* After dsbr100_stop() status set to STOPPED.
+ * If we want driver to start radio on resume
+ * we set status equal to STARTED.
+ * On resume we will check status and run radio if needed.
+ */
+
+ mutex_lock(&radio->lock);
+ radio->status = STARTED;
+ mutex_unlock(&radio->lock);
+ }
dev_info(&intf->dev, "going into suspend..\n");
@@ -614,9 +581,11 @@ static int usb_dsbr100_resume(struct usb_interface *intf)
struct dsbr100_device *radio = usb_get_intfdata(intf);
int retval;
- retval = dsbr100_start(radio);
- if (retval < 0)
- dev_warn(&intf->dev, "dsbr100_start failed\n");
+ if (radio->status == STARTED) {
+ retval = dsbr100_start(radio);
+ if (retval < 0)
+ dev_warn(&intf->dev, "dsbr100_start failed\n");
+ }
dev_info(&intf->dev, "coming out of suspend..\n");
@@ -636,8 +605,6 @@ static void usb_dsbr100_video_device_release(struct video_device *videodev)
/* File system interface */
static const struct v4l2_file_operations usb_dsbr100_fops = {
.owner = THIS_MODULE,
- .open = usb_dsbr100_open,
- .release = usb_dsbr100_close,
.ioctl = video_ioctl2,
};
@@ -695,9 +662,9 @@ static int usb_dsbr100_probe(struct usb_interface *intf,
mutex_init(&radio->lock);
radio->removed = 0;
- radio->users = 0;
radio->usbdev = interface_to_usbdev(intf);
radio->curfreq = FREQ_MIN * FREQ_MUL;
+ radio->status = STOPPED;
video_set_drvdata(&radio->videodev, radio);
diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
index cab19d05e02..837467f9380 100644
--- a/drivers/media/radio/radio-mr800.c
+++ b/drivers/media/radio/radio-mr800.c
@@ -64,6 +64,7 @@
#include <media/v4l2-ioctl.h>
#include <linux/usb.h>
#include <linux/version.h> /* for KERNEL_VERSION MACRO */
+#include <linux/mutex.h>
/* driver and module definitions */
#define DRIVER_AUTHOR "Alexey Klimov <klimov.linux@gmail.com>"
diff --git a/drivers/media/radio/radio-sf16fmi.c b/drivers/media/radio/radio-sf16fmi.c
index 5cf6c45b91f..49c4aab95da 100644
--- a/drivers/media/radio/radio-sf16fmi.c
+++ b/drivers/media/radio/radio-sf16fmi.c
@@ -49,7 +49,6 @@ struct fmi
int io;
int curvol; /* 1 or 0 */
unsigned long curfreq; /* freq in kHz */
- __u32 flags;
struct mutex lock;
};
@@ -57,7 +56,7 @@ static struct fmi fmi_card;
static struct pnp_dev *dev;
/* freq is in 1/16 kHz to internal number, hw precision is 50 kHz */
-/* It is only useful to give freq in intervall of 800 (=0.05Mhz),
+/* It is only useful to give freq in interval of 800 (=0.05Mhz),
* other bits will be truncated, e.g 92.7400016 -> 92.7, but
* 92.7400017 -> 92.75
*/
@@ -142,7 +141,6 @@ static int vidioc_querycap(struct file *file, void *priv,
static int vidioc_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *v)
{
- int mult;
struct fmi *fmi = video_drvdata(file);
if (v->index > 0)
@@ -150,11 +148,10 @@ static int vidioc_g_tuner(struct file *file, void *priv,
strlcpy(v->name, "FM", sizeof(v->name));
v->type = V4L2_TUNER_RADIO;
- mult = (fmi->flags & V4L2_TUNER_CAP_LOW) ? 1 : 1000;
- v->rangelow = RSF16_MINFREQ / mult;
- v->rangehigh = RSF16_MAXFREQ / mult;
+ v->rangelow = RSF16_MINFREQ;
+ v->rangehigh = RSF16_MAXFREQ;
v->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
- v->capability = fmi->flags & V4L2_TUNER_CAP_LOW;
+ v->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LOW;
v->audmode = V4L2_TUNER_MODE_STEREO;
v->signal = fmi_getsigstr(fmi);
return 0;
@@ -171,8 +168,6 @@ static int vidioc_s_frequency(struct file *file, void *priv,
{
struct fmi *fmi = video_drvdata(file);
- if (!(fmi->flags & V4L2_TUNER_CAP_LOW))
- f->frequency *= 1000;
if (f->frequency < RSF16_MINFREQ ||
f->frequency > RSF16_MAXFREQ)
return -EINVAL;
@@ -189,8 +184,6 @@ static int vidioc_g_frequency(struct file *file, void *priv,
f->type = V4L2_TUNER_RADIO;
f->frequency = fmi->curfreq;
- if (!(fmi->flags & V4L2_TUNER_CAP_LOW))
- f->frequency /= 1000;
return 0;
}
@@ -347,7 +340,6 @@ static int __init fmi_init(void)
return res;
}
- fmi->flags = V4L2_TUNER_CAP_LOW;
strlcpy(fmi->vdev.name, v4l2_dev->name, sizeof(fmi->vdev.name));
fmi->vdev.v4l2_dev = v4l2_dev;
fmi->vdev.fops = &fmi_fops;
diff --git a/drivers/media/radio/radio-sf16fmr2.c b/drivers/media/radio/radio-sf16fmr2.c
index 935ff9bcdfc..a11414f648d 100644
--- a/drivers/media/radio/radio-sf16fmr2.c
+++ b/drivers/media/radio/radio-sf16fmr2.c
@@ -61,13 +61,12 @@ struct fmr2
int stereo; /* card is producing stereo audio */
unsigned long curfreq; /* freq in kHz */
int card_type;
- u32 flags;
};
static struct fmr2 fmr2_card;
/* hw precision is 12.5 kHz
- * It is only useful to give freq in intervall of 200 (=0.0125Mhz),
+ * It is only useful to give freq in interval of 200 (=0.0125Mhz),
* other bits will be truncated
*/
#define RSF16_ENCODE(x) ((x) / 200 + 856)
@@ -221,7 +220,6 @@ static int vidioc_querycap(struct file *file, void *priv,
static int vidioc_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *v)
{
- int mult;
struct fmr2 *fmr2 = video_drvdata(file);
if (v->index > 0)
@@ -230,13 +228,12 @@ static int vidioc_g_tuner(struct file *file, void *priv,
strlcpy(v->name, "FM", sizeof(v->name));
v->type = V4L2_TUNER_RADIO;
- mult = (fmr2->flags & V4L2_TUNER_CAP_LOW) ? 1 : 1000;
- v->rangelow = RSF16_MINFREQ / mult;
- v->rangehigh = RSF16_MAXFREQ / mult;
- v->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
- v->capability = fmr2->flags&V4L2_TUNER_CAP_LOW;
- v->audmode = fmr2->stereo ? V4L2_TUNER_MODE_STEREO:
- V4L2_TUNER_MODE_MONO;
+ v->rangelow = RSF16_MINFREQ;
+ v->rangehigh = RSF16_MAXFREQ;
+ v->rxsubchans = fmr2->stereo ? V4L2_TUNER_SUB_STEREO :
+ V4L2_TUNER_SUB_MONO;
+ v->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LOW;
+ v->audmode = V4L2_TUNER_MODE_STEREO;
mutex_lock(&fmr2->lock);
v->signal = fmr2_getsigstr(fmr2);
mutex_unlock(&fmr2->lock);
@@ -254,8 +251,6 @@ static int vidioc_s_frequency(struct file *file, void *priv,
{
struct fmr2 *fmr2 = video_drvdata(file);
- if (!(fmr2->flags & V4L2_TUNER_CAP_LOW))
- f->frequency *= 1000;
if (f->frequency < RSF16_MINFREQ ||
f->frequency > RSF16_MAXFREQ)
return -EINVAL;
@@ -279,8 +274,6 @@ static int vidioc_g_frequency(struct file *file, void *priv,
f->type = V4L2_TUNER_RADIO;
f->frequency = fmr2->curfreq;
- if (!(fmr2->flags & V4L2_TUNER_CAP_LOW))
- f->frequency /= 1000;
return 0;
}
@@ -406,7 +399,6 @@ static int __init fmr2_init(void)
strlcpy(v4l2_dev->name, "sf16fmr2", sizeof(v4l2_dev->name));
fmr2->io = io;
fmr2->stereo = 1;
- fmr2->flags = V4L2_TUNER_CAP_LOW;
mutex_init(&fmr2->lock);
if (!request_region(fmr2->io, 2, "sf16fmr2")) {
diff --git a/drivers/media/radio/radio-si470x.c b/drivers/media/radio/radio-si470x.c
index bd945d04dc9..640421ceb24 100644
--- a/drivers/media/radio/radio-si470x.c
+++ b/drivers/media/radio/radio-si470x.c
@@ -1214,7 +1214,6 @@ static int si470x_fops_release(struct file *file)
usb_autopm_put_interface(radio->intf);
}
-unlock:
mutex_unlock(&radio->disconnect_lock);
done:
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 9d48da2fb01..94f440535c6 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -440,6 +440,24 @@ config VIDEO_ADV7175
To compile this driver as a module, choose M here: the
module will be called adv7175.
+config VIDEO_THS7303
+ tristate "THS7303 Video Amplifier"
+ depends on I2C
+ help
+ Support for TI THS7303 video amplifier
+
+ To compile this driver as a module, choose M here: the
+ module will be called ths7303.
+
+config VIDEO_ADV7343
+ tristate "ADV7343 video encoder"
+ depends on I2C
+ help
+ Support for Analog Devices I2C bus based ADV7343 encoder.
+
+ To compile this driver as a module, choose M here: the
+ module will be called adv7343.
+
comment "Video improvement chips"
config VIDEO_UPD64031A
@@ -694,7 +712,7 @@ config VIDEO_CAFE_CCIC
config SOC_CAMERA
tristate "SoC camera support"
- depends on VIDEO_V4L2 && HAS_DMA
+ depends on VIDEO_V4L2 && HAS_DMA && I2C
select VIDEOBUF_GEN
help
SoC Camera is a common API to several cameras, not connecting
@@ -758,10 +776,14 @@ config VIDEO_MX1
---help---
This is a v4l2 driver for the i.MX1/i.MXL CMOS Sensor Interface
+config MX3_VIDEO
+ bool
+
config VIDEO_MX3
tristate "i.MX3x Camera Sensor Interface driver"
depends on VIDEO_DEV && MX3_IPU && SOC_CAMERA
select VIDEOBUF_DMA_CONTIG
+ select MX3_VIDEO
---help---
This is a v4l2 driver for the i.MX3x Camera Sensor Interface
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 3f1a0350a56..7fb3add1b38 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -12,6 +12,8 @@ omap2cam-objs := omap24xxcam.o omap24xxcam-dma.o
videodev-objs := v4l2-dev.o v4l2-ioctl.o v4l2-device.o
+# V4L2 core modules
+
obj-$(CONFIG_VIDEO_DEV) += videodev.o v4l2-int-device.o
ifeq ($(CONFIG_COMPAT),y)
obj-$(CONFIG_VIDEO_DEV) += v4l2-compat-ioctl32.o
@@ -23,21 +25,15 @@ ifeq ($(CONFIG_VIDEO_V4L1_COMPAT),y)
obj-$(CONFIG_VIDEO_DEV) += v4l1-compat.o
endif
-obj-$(CONFIG_VIDEO_TUNER) += tuner.o
+# All i2c modules must come first:
-obj-$(CONFIG_VIDEO_BT848) += bt8xx/
-obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o
+obj-$(CONFIG_VIDEO_TUNER) += tuner.o
obj-$(CONFIG_VIDEO_TVAUDIO) += tvaudio.o
obj-$(CONFIG_VIDEO_TDA7432) += tda7432.o
obj-$(CONFIG_VIDEO_TDA9875) += tda9875.o
-
obj-$(CONFIG_VIDEO_SAA6588) += saa6588.o
obj-$(CONFIG_VIDEO_SAA5246A) += saa5246a.o
obj-$(CONFIG_VIDEO_SAA5249) += saa5249.o
-obj-$(CONFIG_VIDEO_CQCAM) += c-qcam.o
-obj-$(CONFIG_VIDEO_BWQCAM) += bw-qcam.o
-obj-$(CONFIG_VIDEO_W9966) += w9966.o
-
obj-$(CONFIG_VIDEO_TDA9840) += tda9840.o
obj-$(CONFIG_VIDEO_TEA6415C) += tea6415c.o
obj-$(CONFIG_VIDEO_TEA6420) += tea6420.o
@@ -49,16 +45,47 @@ obj-$(CONFIG_VIDEO_SAA7185) += saa7185.o
obj-$(CONFIG_VIDEO_SAA7191) += saa7191.o
obj-$(CONFIG_VIDEO_ADV7170) += adv7170.o
obj-$(CONFIG_VIDEO_ADV7175) += adv7175.o
+obj-$(CONFIG_VIDEO_ADV7343) += adv7343.o
obj-$(CONFIG_VIDEO_VPX3220) += vpx3220.o
obj-$(CONFIG_VIDEO_BT819) += bt819.o
obj-$(CONFIG_VIDEO_BT856) += bt856.o
obj-$(CONFIG_VIDEO_BT866) += bt866.o
obj-$(CONFIG_VIDEO_KS0127) += ks0127.o
+obj-$(CONFIG_VIDEO_THS7303) += ths7303.o
+obj-$(CONFIG_VIDEO_VINO) += indycam.o
+obj-$(CONFIG_VIDEO_TVP5150) += tvp5150.o
+obj-$(CONFIG_VIDEO_TVP514X) += tvp514x.o
+obj-$(CONFIG_VIDEO_MSP3400) += msp3400.o
+obj-$(CONFIG_VIDEO_CS5345) += cs5345.o
+obj-$(CONFIG_VIDEO_CS53L32A) += cs53l32a.o
+obj-$(CONFIG_VIDEO_M52790) += m52790.o
+obj-$(CONFIG_VIDEO_TLV320AIC23B) += tlv320aic23b.o
+obj-$(CONFIG_VIDEO_WM8775) += wm8775.o
+obj-$(CONFIG_VIDEO_WM8739) += wm8739.o
+obj-$(CONFIG_VIDEO_VP27SMPX) += vp27smpx.o
+obj-$(CONFIG_VIDEO_CX25840) += cx25840/
+obj-$(CONFIG_VIDEO_UPD64031A) += upd64031a.o
+obj-$(CONFIG_VIDEO_UPD64083) += upd64083.o
+obj-$(CONFIG_VIDEO_OV7670) += ov7670.o
+obj-$(CONFIG_VIDEO_TCM825X) += tcm825x.o
+obj-$(CONFIG_VIDEO_TVEEPROM) += tveeprom.o
-obj-$(CONFIG_VIDEO_ZORAN) += zoran/
+obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o
+obj-$(CONFIG_SOC_CAMERA_MT9M111) += mt9m111.o
+obj-$(CONFIG_SOC_CAMERA_MT9T031) += mt9t031.o
+obj-$(CONFIG_SOC_CAMERA_MT9V022) += mt9v022.o
+obj-$(CONFIG_SOC_CAMERA_OV772X) += ov772x.o
+obj-$(CONFIG_SOC_CAMERA_TW9910) += tw9910.o
+# And now the v4l2 drivers:
+
+obj-$(CONFIG_VIDEO_BT848) += bt8xx/
+obj-$(CONFIG_VIDEO_ZORAN) += zoran/
+obj-$(CONFIG_VIDEO_CQCAM) += c-qcam.o
+obj-$(CONFIG_VIDEO_BWQCAM) += bw-qcam.o
+obj-$(CONFIG_VIDEO_W9966) += w9966.o
obj-$(CONFIG_VIDEO_PMS) += pms.o
-obj-$(CONFIG_VIDEO_VINO) += vino.o indycam.o
+obj-$(CONFIG_VIDEO_VINO) += vino.o
obj-$(CONFIG_VIDEO_STRADIS) += stradis.o
obj-$(CONFIG_VIDEO_CPIA) += cpia.o
obj-$(CONFIG_VIDEO_CPIA_PP) += cpia_pp.o
@@ -69,17 +96,7 @@ obj-$(CONFIG_VIDEO_CX88) += cx88/
obj-$(CONFIG_VIDEO_EM28XX) += em28xx/
obj-$(CONFIG_VIDEO_CX231XX) += cx231xx/
obj-$(CONFIG_VIDEO_USBVISION) += usbvision/
-obj-$(CONFIG_VIDEO_TVP5150) += tvp5150.o
-obj-$(CONFIG_VIDEO_TVP514X) += tvp514x.o
obj-$(CONFIG_VIDEO_PVRUSB2) += pvrusb2/
-obj-$(CONFIG_VIDEO_MSP3400) += msp3400.o
-obj-$(CONFIG_VIDEO_CS5345) += cs5345.o
-obj-$(CONFIG_VIDEO_CS53L32A) += cs53l32a.o
-obj-$(CONFIG_VIDEO_M52790) += m52790.o
-obj-$(CONFIG_VIDEO_TLV320AIC23B) += tlv320aic23b.o
-obj-$(CONFIG_VIDEO_WM8775) += wm8775.o
-obj-$(CONFIG_VIDEO_WM8739) += wm8739.o
-obj-$(CONFIG_VIDEO_VP27SMPX) += vp27smpx.o
obj-$(CONFIG_VIDEO_OVCAMCHIP) += ovcamchip/
obj-$(CONFIG_VIDEO_CPIA2) += cpia2/
obj-$(CONFIG_VIDEO_MXB) += mxb.o
@@ -92,19 +109,12 @@ obj-$(CONFIG_VIDEOBUF_DMA_CONTIG) += videobuf-dma-contig.o
obj-$(CONFIG_VIDEOBUF_VMALLOC) += videobuf-vmalloc.o
obj-$(CONFIG_VIDEOBUF_DVB) += videobuf-dvb.o
obj-$(CONFIG_VIDEO_BTCX) += btcx-risc.o
-obj-$(CONFIG_VIDEO_TVEEPROM) += tveeprom.o
obj-$(CONFIG_VIDEO_M32R_AR_M64278) += arv.o
-obj-$(CONFIG_VIDEO_CX25840) += cx25840/
-obj-$(CONFIG_VIDEO_UPD64031A) += upd64031a.o
-obj-$(CONFIG_VIDEO_UPD64083) += upd64083.o
obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o
obj-$(CONFIG_VIDEO_CAFE_CCIC) += cafe_ccic.o
-obj-$(CONFIG_VIDEO_OV7670) += ov7670.o
-
-obj-$(CONFIG_VIDEO_TCM825X) += tcm825x.o
obj-$(CONFIG_USB_DABUSB) += dabusb.o
obj-$(CONFIG_USB_OV511) += ov511.o
@@ -134,24 +144,21 @@ obj-$(CONFIG_VIDEO_CX18) += cx18/
obj-$(CONFIG_VIDEO_VIVI) += vivi.o
obj-$(CONFIG_VIDEO_CX23885) += cx23885/
+obj-$(CONFIG_VIDEO_OMAP2) += omap2cam.o
+obj-$(CONFIG_SOC_CAMERA) += soc_camera.o
+obj-$(CONFIG_SOC_CAMERA_PLATFORM) += soc_camera_platform.o
+# soc-camera host drivers have to be linked after camera drivers
obj-$(CONFIG_VIDEO_MX1) += mx1_camera.o
obj-$(CONFIG_VIDEO_MX3) += mx3_camera.o
obj-$(CONFIG_VIDEO_PXA27x) += pxa_camera.o
obj-$(CONFIG_VIDEO_SH_MOBILE_CEU) += sh_mobile_ceu_camera.o
-obj-$(CONFIG_VIDEO_OMAP2) += omap2cam.o
-obj-$(CONFIG_SOC_CAMERA) += soc_camera.o
-obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o
-obj-$(CONFIG_SOC_CAMERA_MT9M111) += mt9m111.o
-obj-$(CONFIG_SOC_CAMERA_MT9T031) += mt9t031.o
-obj-$(CONFIG_SOC_CAMERA_MT9V022) += mt9v022.o
-obj-$(CONFIG_SOC_CAMERA_OV772X) += ov772x.o
-obj-$(CONFIG_SOC_CAMERA_PLATFORM) += soc_camera_platform.o
-obj-$(CONFIG_SOC_CAMERA_TW9910) += tw9910.o
obj-$(CONFIG_VIDEO_AU0828) += au0828/
obj-$(CONFIG_USB_VIDEO_CLASS) += uvc/
+obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o
+
EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
EXTRA_CFLAGS += -Idrivers/media/common/tuners
diff --git a/drivers/media/video/adv7343.c b/drivers/media/video/adv7343.c
new file mode 100644
index 00000000000..30f5caf5dda
--- /dev/null
+++ b/drivers/media/video/adv7343.c
@@ -0,0 +1,534 @@
+/*
+ * adv7343 - ADV7343 Video Encoder Driver
+ *
+ * The encoder hardware does not support SECAM.
+ *
+ * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed .as is. WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ctype.h>
+#include <linux/i2c.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+#include <linux/version.h>
+
+#include <media/adv7343.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-chip-ident.h>
+
+#include "adv7343_regs.h"
+
+MODULE_DESCRIPTION("ADV7343 video encoder driver");
+MODULE_LICENSE("GPL");
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug level 0-1");
+
+struct adv7343_state {
+ struct v4l2_subdev sd;
+ u8 reg00;
+ u8 reg01;
+ u8 reg02;
+ u8 reg35;
+ u8 reg80;
+ u8 reg82;
+ int bright;
+ int hue;
+ int gain;
+ u32 output;
+ v4l2_std_id std;
+};
+
+static inline struct adv7343_state *to_state(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct adv7343_state, sd);
+}
+
+static inline int adv7343_write(struct v4l2_subdev *sd, u8 reg, u8 value)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ return i2c_smbus_write_byte_data(client, reg, value);
+}
+
+static const u8 adv7343_init_reg_val[] = {
+ ADV7343_SOFT_RESET, ADV7343_SOFT_RESET_DEFAULT,
+ ADV7343_POWER_MODE_REG, ADV7343_POWER_MODE_REG_DEFAULT,
+
+ ADV7343_HD_MODE_REG1, ADV7343_HD_MODE_REG1_DEFAULT,
+ ADV7343_HD_MODE_REG2, ADV7343_HD_MODE_REG2_DEFAULT,
+ ADV7343_HD_MODE_REG3, ADV7343_HD_MODE_REG3_DEFAULT,
+ ADV7343_HD_MODE_REG4, ADV7343_HD_MODE_REG4_DEFAULT,
+ ADV7343_HD_MODE_REG5, ADV7343_HD_MODE_REG5_DEFAULT,
+ ADV7343_HD_MODE_REG6, ADV7343_HD_MODE_REG6_DEFAULT,
+ ADV7343_HD_MODE_REG7, ADV7343_HD_MODE_REG7_DEFAULT,
+
+ ADV7343_SD_MODE_REG1, ADV7343_SD_MODE_REG1_DEFAULT,
+ ADV7343_SD_MODE_REG2, ADV7343_SD_MODE_REG2_DEFAULT,
+ ADV7343_SD_MODE_REG3, ADV7343_SD_MODE_REG3_DEFAULT,
+ ADV7343_SD_MODE_REG4, ADV7343_SD_MODE_REG4_DEFAULT,
+ ADV7343_SD_MODE_REG5, ADV7343_SD_MODE_REG5_DEFAULT,
+ ADV7343_SD_MODE_REG6, ADV7343_SD_MODE_REG6_DEFAULT,
+ ADV7343_SD_MODE_REG7, ADV7343_SD_MODE_REG7_DEFAULT,
+ ADV7343_SD_MODE_REG8, ADV7343_SD_MODE_REG8_DEFAULT,
+
+ ADV7343_SD_HUE_REG, ADV7343_SD_HUE_REG_DEFAULT,
+ ADV7343_SD_CGMS_WSS0, ADV7343_SD_CGMS_WSS0_DEFAULT,
+ ADV7343_SD_BRIGHTNESS_WSS, ADV7343_SD_BRIGHTNESS_WSS_DEFAULT,
+};
+
+/*
+ * 2^32
+ * FSC(reg) = FSC (HZ) * --------
+ * 27000000
+ */
+static const struct adv7343_std_info stdinfo[] = {
+ {
+ /* FSC(Hz) = 3,579,545.45 Hz */
+ SD_STD_NTSC, 569408542, V4L2_STD_NTSC,
+ }, {
+ /* FSC(Hz) = 3,575,611.00 Hz */
+ SD_STD_PAL_M, 568782678, V4L2_STD_PAL_M,
+ }, {
+ /* FSC(Hz) = 3,582,056.00 */
+ SD_STD_PAL_N, 569807903, V4L2_STD_PAL_Nc,
+ }, {
+ /* FSC(Hz) = 4,433,618.75 Hz */
+ SD_STD_PAL_N, 705268427, V4L2_STD_PAL_N,
+ }, {
+ /* FSC(Hz) = 4,433,618.75 Hz */
+ SD_STD_PAL_BDGHI, 705268427, V4L2_STD_PAL,
+ }, {
+ /* FSC(Hz) = 4,433,618.75 Hz */
+ SD_STD_NTSC, 705268427, V4L2_STD_NTSC_443,
+ }, {
+ /* FSC(Hz) = 4,433,618.75 Hz */
+ SD_STD_PAL_M, 705268427, V4L2_STD_PAL_60,
+ },
+};
+
+static int adv7343_setstd(struct v4l2_subdev *sd, v4l2_std_id std)
+{
+ struct adv7343_state *state = to_state(sd);
+ struct adv7343_std_info *std_info;
+ int output_idx, num_std;
+ char *fsc_ptr;
+ u8 reg, val;
+ int err = 0;
+ int i = 0;
+
+ output_idx = state->output;
+
+ std_info = (struct adv7343_std_info *)stdinfo;
+ num_std = ARRAY_SIZE(stdinfo);
+
+ for (i = 0; i < num_std; i++) {
+ if (std_info[i].stdid & std)
+ break;
+ }
+
+ if (i == num_std) {
+ v4l2_dbg(1, debug, sd,
+ "Invalid std or std is not supported: %llx\n",
+ (unsigned long long)std);
+ return -EINVAL;
+ }
+
+ /* Set the standard */
+ val = state->reg80 & (~(SD_STD_MASK));
+ val |= std_info[i].standard_val3;
+ err = adv7343_write(sd, ADV7343_SD_MODE_REG1, val);
+ if (err < 0)
+ goto setstd_exit;
+
+ state->reg80 = val;
+
+ /* Configure the input mode register */
+ val = state->reg01 & (~((u8) INPUT_MODE_MASK));
+ val |= SD_INPUT_MODE;
+ err = adv7343_write(sd, ADV7343_MODE_SELECT_REG, val);
+ if (err < 0)
+ goto setstd_exit;
+
+ state->reg01 = val;
+
+ /* Program the sub carrier frequency registers */
+ fsc_ptr = (unsigned char *)&std_info[i].fsc_val;
+ reg = ADV7343_FSC_REG0;
+ for (i = 0; i < 4; i++, reg++, fsc_ptr++) {
+ err = adv7343_write(sd, reg, *fsc_ptr);
+ if (err < 0)
+ goto setstd_exit;
+ }
+
+ val = state->reg80;
+
+ /* Filter settings */
+ if (std & (V4L2_STD_NTSC | V4L2_STD_NTSC_443))
+ val &= 0x03;
+ else if (std & ~V4L2_STD_SECAM)
+ val |= 0x04;
+
+ err = adv7343_write(sd, ADV7343_SD_MODE_REG1, val);
+ if (err < 0)
+ goto setstd_exit;
+
+ state->reg80 = val;
+
+setstd_exit:
+ if (err != 0)
+ v4l2_err(sd, "Error setting std, write failed\n");
+
+ return err;
+}
+
+static int adv7343_setoutput(struct v4l2_subdev *sd, u32 output_type)
+{
+ struct adv7343_state *state = to_state(sd);
+ unsigned char val;
+ int err = 0;
+
+ if (output_type > ADV7343_SVIDEO_ID) {
+ v4l2_dbg(1, debug, sd,
+ "Invalid output type or output type not supported:%d\n",
+ output_type);
+ return -EINVAL;
+ }
+
+ /* Enable Appropriate DAC */
+ val = state->reg00 & 0x03;
+
+ if (output_type == ADV7343_COMPOSITE_ID)
+ val |= ADV7343_COMPOSITE_POWER_VALUE;
+ else if (output_type == ADV7343_COMPONENT_ID)
+ val |= ADV7343_COMPONENT_POWER_VALUE;
+ else
+ val |= ADV7343_SVIDEO_POWER_VALUE;
+
+ err = adv7343_write(sd, ADV7343_POWER_MODE_REG, val);
+ if (err < 0)
+ goto setoutput_exit;
+
+ state->reg00 = val;
+
+ /* Enable YUV output */
+ val = state->reg02 | YUV_OUTPUT_SELECT;
+ err = adv7343_write(sd, ADV7343_MODE_REG0, val);
+ if (err < 0)
+ goto setoutput_exit;
+
+ state->reg02 = val;
+
+ /* configure SD DAC Output 2 and SD DAC Output 1 bit to zero */
+ val = state->reg82 & (SD_DAC_1_DI & SD_DAC_2_DI);
+ err = adv7343_write(sd, ADV7343_SD_MODE_REG2, val);
+ if (err < 0)
+ goto setoutput_exit;
+
+ state->reg82 = val;
+
+ /* configure ED/HD Color DAC Swap and ED/HD RGB Input Enable bit to
+ * zero */
+ val = state->reg35 & (HD_RGB_INPUT_DI & HD_DAC_SWAP_DI);
+ err = adv7343_write(sd, ADV7343_HD_MODE_REG6, val);
+ if (err < 0)
+ goto setoutput_exit;
+
+ state->reg35 = val;
+
+setoutput_exit:
+ if (err != 0)
+ v4l2_err(sd, "Error setting output, write failed\n");
+
+ return err;
+}
+
+static int adv7343_log_status(struct v4l2_subdev *sd)
+{
+ struct adv7343_state *state = to_state(sd);
+
+ v4l2_info(sd, "Standard: %llx\n", (unsigned long long)state->std);
+ v4l2_info(sd, "Output: %s\n", (state->output == 0) ? "Composite" :
+ ((state->output == 1) ? "Component" : "S-Video"));
+ return 0;
+}
+
+static int adv7343_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
+{
+ switch (qc->id) {
+ case V4L2_CID_BRIGHTNESS:
+ return v4l2_ctrl_query_fill(qc, ADV7343_BRIGHTNESS_MIN,
+ ADV7343_BRIGHTNESS_MAX, 1,
+ ADV7343_BRIGHTNESS_DEF);
+ case V4L2_CID_HUE:
+ return v4l2_ctrl_query_fill(qc, ADV7343_HUE_MIN,
+ ADV7343_HUE_MAX, 1 ,
+ ADV7343_HUE_DEF);
+ case V4L2_CID_GAIN:
+ return v4l2_ctrl_query_fill(qc, ADV7343_GAIN_MIN,
+ ADV7343_GAIN_MAX, 1,
+ ADV7343_GAIN_DEF);
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int adv7343_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+{
+ struct adv7343_state *state = to_state(sd);
+ int err = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ if (ctrl->value < ADV7343_BRIGHTNESS_MIN ||
+ ctrl->value > ADV7343_BRIGHTNESS_MAX) {
+ v4l2_dbg(1, debug, sd,
+ "invalid brightness settings %d\n",
+ ctrl->value);
+ return -ERANGE;
+ }
+
+ state->bright = ctrl->value;
+ err = adv7343_write(sd, ADV7343_SD_BRIGHTNESS_WSS,
+ state->bright);
+ break;
+
+ case V4L2_CID_HUE:
+ if (ctrl->value < ADV7343_HUE_MIN ||
+ ctrl->value > ADV7343_HUE_MAX) {
+ v4l2_dbg(1, debug, sd, "invalid hue settings %d\n",
+ ctrl->value);
+ return -ERANGE;
+ }
+
+ state->hue = ctrl->value;
+ err = adv7343_write(sd, ADV7343_SD_HUE_REG, state->hue);
+ break;
+
+ case V4L2_CID_GAIN:
+ if (ctrl->value < ADV7343_GAIN_MIN ||
+ ctrl->value > ADV7343_GAIN_MAX) {
+ v4l2_dbg(1, debug, sd, "invalid gain settings %d\n",
+ ctrl->value);
+ return -ERANGE;
+ }
+
+ if ((ctrl->value > POSITIVE_GAIN_MAX) &&
+ (ctrl->value < NEGATIVE_GAIN_MIN)) {
+ v4l2_dbg(1, debug, sd,
+ "gain settings not within the specified range\n");
+ return -ERANGE;
+ }
+
+ state->gain = ctrl->value;
+ err = adv7343_write(sd, ADV7343_DAC2_OUTPUT_LEVEL, state->gain);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (err < 0)
+ v4l2_err(sd, "Failed to set the encoder controls\n");
+
+ return err;
+}
+
+static int adv7343_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+{
+ struct adv7343_state *state = to_state(sd);
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ ctrl->value = state->bright;
+ break;
+
+ case V4L2_CID_HUE:
+ ctrl->value = state->hue;
+ break;
+
+ case V4L2_CID_GAIN:
+ ctrl->value = state->gain;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int adv7343_g_chip_ident(struct v4l2_subdev *sd,
+ struct v4l2_dbg_chip_ident *chip)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_ADV7343, 0);
+}
+
+static const struct v4l2_subdev_core_ops adv7343_core_ops = {
+ .log_status = adv7343_log_status,
+ .g_chip_ident = adv7343_g_chip_ident,
+ .g_ctrl = adv7343_g_ctrl,
+ .s_ctrl = adv7343_s_ctrl,
+ .queryctrl = adv7343_queryctrl,
+};
+
+static int adv7343_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std)
+{
+ struct adv7343_state *state = to_state(sd);
+ int err = 0;
+
+ if (state->std == std)
+ return 0;
+
+ err = adv7343_setstd(sd, std);
+ if (!err)
+ state->std = std;
+
+ return err;
+}
+
+static int adv7343_s_routing(struct v4l2_subdev *sd,
+ u32 input, u32 output, u32 config)
+{
+ struct adv7343_state *state = to_state(sd);
+ int err = 0;
+
+ if (state->output == output)
+ return 0;
+
+ err = adv7343_setoutput(sd, output);
+ if (!err)
+ state->output = output;
+
+ return err;
+}
+
+static const struct v4l2_subdev_video_ops adv7343_video_ops = {
+ .s_std_output = adv7343_s_std_output,
+ .s_routing = adv7343_s_routing,
+};
+
+static const struct v4l2_subdev_ops adv7343_ops = {
+ .core = &adv7343_core_ops,
+ .video = &adv7343_video_ops,
+};
+
+static int adv7343_initialize(struct v4l2_subdev *sd)
+{
+ struct adv7343_state *state = to_state(sd);
+ int err = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(adv7343_init_reg_val); i += 2) {
+
+ err = adv7343_write(sd, adv7343_init_reg_val[i],
+ adv7343_init_reg_val[i+1]);
+ if (err) {
+ v4l2_err(sd, "Error initializing\n");
+ return err;
+ }
+ }
+
+ /* Configure for default video standard */
+ err = adv7343_setoutput(sd, state->output);
+ if (err < 0) {
+ v4l2_err(sd, "Error setting output during init\n");
+ return -EINVAL;
+ }
+
+ err = adv7343_setstd(sd, state->std);
+ if (err < 0) {
+ v4l2_err(sd, "Error setting std during init\n");
+ return -EINVAL;
+ }
+
+ return err;
+}
+
+static int adv7343_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct adv7343_state *state;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ v4l_info(client, "chip found @ 0x%x (%s)\n",
+ client->addr << 1, client->adapter->name);
+
+ state = kzalloc(sizeof(struct adv7343_state), GFP_KERNEL);
+ if (state == NULL)
+ return -ENOMEM;
+
+ state->reg00 = 0x80;
+ state->reg01 = 0x00;
+ state->reg02 = 0x20;
+ state->reg35 = 0x00;
+ state->reg80 = ADV7343_SD_MODE_REG1_DEFAULT;
+ state->reg82 = ADV7343_SD_MODE_REG2_DEFAULT;
+
+ state->output = ADV7343_COMPOSITE_ID;
+ state->std = V4L2_STD_NTSC;
+
+ v4l2_i2c_subdev_init(&state->sd, client, &adv7343_ops);
+ return adv7343_initialize(&state->sd);
+}
+
+static int adv7343_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+
+ v4l2_device_unregister_subdev(sd);
+ kfree(to_state(sd));
+
+ return 0;
+}
+
+static const struct i2c_device_id adv7343_id[] = {
+ {"adv7343", 0},
+ {},
+};
+
+MODULE_DEVICE_TABLE(i2c, adv7343_id);
+
+static struct i2c_driver adv7343_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "adv7343",
+ },
+ .probe = adv7343_probe,
+ .remove = adv7343_remove,
+ .id_table = adv7343_id,
+};
+
+static __init int init_adv7343(void)
+{
+ return i2c_add_driver(&adv7343_driver);
+}
+
+static __exit void exit_adv7343(void)
+{
+ i2c_del_driver(&adv7343_driver);
+}
+
+module_init(init_adv7343);
+module_exit(exit_adv7343);
diff --git a/drivers/media/video/adv7343_regs.h b/drivers/media/video/adv7343_regs.h
new file mode 100644
index 00000000000..3431045b33d
--- /dev/null
+++ b/drivers/media/video/adv7343_regs.h
@@ -0,0 +1,185 @@
+/*
+ * ADV7343 encoder related structure and register definitions
+ *
+ * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed .as is. WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef ADV7343_REG_H
+#define ADV7343_REGS_H
+
+struct adv7343_std_info {
+ u32 standard_val3;
+ u32 fsc_val;
+ v4l2_std_id stdid;
+};
+
+/* Register offset macros */
+#define ADV7343_POWER_MODE_REG (0x00)
+#define ADV7343_MODE_SELECT_REG (0x01)
+#define ADV7343_MODE_REG0 (0x02)
+
+#define ADV7343_DAC2_OUTPUT_LEVEL (0x0b)
+
+#define ADV7343_SOFT_RESET (0x17)
+
+#define ADV7343_HD_MODE_REG1 (0x30)
+#define ADV7343_HD_MODE_REG2 (0x31)
+#define ADV7343_HD_MODE_REG3 (0x32)
+#define ADV7343_HD_MODE_REG4 (0x33)
+#define ADV7343_HD_MODE_REG5 (0x34)
+#define ADV7343_HD_MODE_REG6 (0x35)
+
+#define ADV7343_HD_MODE_REG7 (0x39)
+
+#define ADV7343_SD_MODE_REG1 (0x80)
+#define ADV7343_SD_MODE_REG2 (0x82)
+#define ADV7343_SD_MODE_REG3 (0x83)
+#define ADV7343_SD_MODE_REG4 (0x84)
+#define ADV7343_SD_MODE_REG5 (0x86)
+#define ADV7343_SD_MODE_REG6 (0x87)
+#define ADV7343_SD_MODE_REG7 (0x88)
+#define ADV7343_SD_MODE_REG8 (0x89)
+
+#define ADV7343_FSC_REG0 (0x8C)
+#define ADV7343_FSC_REG1 (0x8D)
+#define ADV7343_FSC_REG2 (0x8E)
+#define ADV7343_FSC_REG3 (0x8F)
+
+#define ADV7343_SD_CGMS_WSS0 (0x99)
+
+#define ADV7343_SD_HUE_REG (0xA0)
+#define ADV7343_SD_BRIGHTNESS_WSS (0xA1)
+
+/* Default values for the registers */
+#define ADV7343_POWER_MODE_REG_DEFAULT (0x10)
+#define ADV7343_HD_MODE_REG1_DEFAULT (0x3C) /* Changed Default
+ 720p EAVSAV code*/
+#define ADV7343_HD_MODE_REG2_DEFAULT (0x01) /* Changed Pixel data
+ valid */
+#define ADV7343_HD_MODE_REG3_DEFAULT (0x00) /* Color delay 0 clks */
+#define ADV7343_HD_MODE_REG4_DEFAULT (0xE8) /* Changed */
+#define ADV7343_HD_MODE_REG5_DEFAULT (0x08)
+#define ADV7343_HD_MODE_REG6_DEFAULT (0x00)
+#define ADV7343_HD_MODE_REG7_DEFAULT (0x00)
+#define ADV7343_SD_MODE_REG8_DEFAULT (0x00)
+#define ADV7343_SOFT_RESET_DEFAULT (0x02)
+#define ADV7343_COMPOSITE_POWER_VALUE (0x80)
+#define ADV7343_COMPONENT_POWER_VALUE (0x1C)
+#define ADV7343_SVIDEO_POWER_VALUE (0x60)
+#define ADV7343_SD_HUE_REG_DEFAULT (127)
+#define ADV7343_SD_BRIGHTNESS_WSS_DEFAULT (0x03)
+
+#define ADV7343_SD_CGMS_WSS0_DEFAULT (0x10)
+
+#define ADV7343_SD_MODE_REG1_DEFAULT (0x00)
+#define ADV7343_SD_MODE_REG2_DEFAULT (0xC9)
+#define ADV7343_SD_MODE_REG3_DEFAULT (0x10)
+#define ADV7343_SD_MODE_REG4_DEFAULT (0x01)
+#define ADV7343_SD_MODE_REG5_DEFAULT (0x02)
+#define ADV7343_SD_MODE_REG6_DEFAULT (0x0C)
+#define ADV7343_SD_MODE_REG7_DEFAULT (0x04)
+#define ADV7343_SD_MODE_REG8_DEFAULT (0x00)
+
+/* Bit masks for Mode Select Register */
+#define INPUT_MODE_MASK (0x70)
+#define SD_INPUT_MODE (0x00)
+#define HD_720P_INPUT_MODE (0x10)
+#define HD_1080I_INPUT_MODE (0x10)
+
+/* Bit masks for Mode Register 0 */
+#define TEST_PATTERN_BLACK_BAR_EN (0x04)
+#define YUV_OUTPUT_SELECT (0x20)
+#define RGB_OUTPUT_SELECT (0xDF)
+
+/* Bit masks for DAC output levels */
+#define DAC_OUTPUT_LEVEL_MASK (0xFF)
+#define POSITIVE_GAIN_MAX (0x40)
+#define POSITIVE_GAIN_MIN (0x00)
+#define NEGATIVE_GAIN_MAX (0xFF)
+#define NEGATIVE_GAIN_MIN (0xC0)
+
+/* Bit masks for soft reset register */
+#define SOFT_RESET (0x02)
+
+/* Bit masks for HD Mode Register 1 */
+#define OUTPUT_STD_MASK (0x03)
+#define OUTPUT_STD_SHIFT (0)
+#define OUTPUT_STD_EIA0_2 (0x00)
+#define OUTPUT_STD_EIA0_1 (0x01)
+#define OUTPUT_STD_FULL (0x02)
+#define EMBEDDED_SYNC (0x04)
+#define EXTERNAL_SYNC (0xFB)
+#define STD_MODE_SHIFT (3)
+#define STD_MODE_MASK (0x1F)
+#define STD_MODE_720P (0x05)
+#define STD_MODE_720P_25 (0x08)
+#define STD_MODE_720P_30 (0x07)
+#define STD_MODE_720P_50 (0x06)
+#define STD_MODE_1080I (0x0D)
+#define STD_MODE_1080I_25fps (0x0E)
+#define STD_MODE_1080P_24 (0x12)
+#define STD_MODE_1080P_25 (0x10)
+#define STD_MODE_1080P_30 (0x0F)
+#define STD_MODE_525P (0x00)
+#define STD_MODE_625P (0x03)
+
+/* Bit masks for SD Mode Register 1 */
+#define SD_STD_MASK (0x03)
+#define SD_STD_NTSC (0x00)
+#define SD_STD_PAL_BDGHI (0x01)
+#define SD_STD_PAL_M (0x02)
+#define SD_STD_PAL_N (0x03)
+#define SD_LUMA_FLTR_MASK (0x7)
+#define SD_LUMA_FLTR_SHIFT (0x2)
+#define SD_CHROMA_FLTR_MASK (0x7)
+#define SD_CHROMA_FLTR_SHIFT (0x5)
+
+/* Bit masks for SD Mode Register 2 */
+#define SD_PBPR_SSAF_EN (0x01)
+#define SD_PBPR_SSAF_DI (0xFE)
+#define SD_DAC_1_DI (0xFD)
+#define SD_DAC_2_DI (0xFB)
+#define SD_PEDESTAL_EN (0x08)
+#define SD_PEDESTAL_DI (0xF7)
+#define SD_SQUARE_PIXEL_EN (0x10)
+#define SD_SQUARE_PIXEL_DI (0xEF)
+#define SD_PIXEL_DATA_VALID (0x40)
+#define SD_ACTIVE_EDGE_EN (0x80)
+#define SD_ACTIVE_EDGE_DI (0x7F)
+
+/* Bit masks for HD Mode Register 6 */
+#define HD_RGB_INPUT_EN (0x02)
+#define HD_RGB_INPUT_DI (0xFD)
+#define HD_PBPR_SYNC_EN (0x04)
+#define HD_PBPR_SYNC_DI (0xFB)
+#define HD_DAC_SWAP_EN (0x08)
+#define HD_DAC_SWAP_DI (0xF7)
+#define HD_GAMMA_CURVE_A (0xEF)
+#define HD_GAMMA_CURVE_B (0x10)
+#define HD_GAMMA_EN (0x20)
+#define HD_GAMMA_DI (0xDF)
+#define HD_ADPT_FLTR_MODEB (0x40)
+#define HD_ADPT_FLTR_MODEA (0xBF)
+#define HD_ADPT_FLTR_EN (0x80)
+#define HD_ADPT_FLTR_DI (0x7F)
+
+#define ADV7343_BRIGHTNESS_MAX (127)
+#define ADV7343_BRIGHTNESS_MIN (0)
+#define ADV7343_BRIGHTNESS_DEF (3)
+#define ADV7343_HUE_MAX (255)
+#define ADV7343_HUE_MIN (0)
+#define ADV7343_HUE_DEF (127)
+#define ADV7343_GAIN_MAX (255)
+#define ADV7343_GAIN_MIN (0)
+#define ADV7343_GAIN_DEF (0)
+
+#endif
diff --git a/drivers/media/video/au0828/au0828-cards.c b/drivers/media/video/au0828/au0828-cards.c
index 053bbe8c8e3..830c4a933f6 100644
--- a/drivers/media/video/au0828/au0828-cards.c
+++ b/drivers/media/video/au0828/au0828-cards.c
@@ -136,9 +136,9 @@ int au0828_tuner_callback(void *priv, int component, int command, int arg)
/* Tuner Reset Command from xc5000 */
/* Drive the tuner into reset and out */
au0828_clear(dev, REG_001, 2);
- mdelay(200);
+ mdelay(10);
au0828_set(dev, REG_001, 2);
- mdelay(50);
+ mdelay(10);
return 0;
} else {
printk(KERN_ERR
diff --git a/drivers/media/video/au0828/au0828-core.c b/drivers/media/video/au0828/au0828-core.c
index a1e4c0d769a..3544a2f12f1 100644
--- a/drivers/media/video/au0828/au0828-core.c
+++ b/drivers/media/video/au0828/au0828-core.c
@@ -36,6 +36,11 @@ int au0828_debug;
module_param_named(debug, au0828_debug, int, 0644);
MODULE_PARM_DESC(debug, "enable debug messages");
+static unsigned int disable_usb_speed_check;
+module_param(disable_usb_speed_check, int, 0444);
+MODULE_PARM_DESC(disable_usb_speed_check,
+ "override min bandwidth requirement of 480M bps");
+
#define _AU0828_BULKPIPE 0x03
#define _BULKPIPESIZE 0xffff
@@ -181,6 +186,18 @@ static int au0828_usb_probe(struct usb_interface *interface,
le16_to_cpu(usbdev->descriptor.idProduct),
ifnum);
+ /*
+ * Make sure we have 480 Mbps of bandwidth, otherwise things like
+ * video stream wouldn't likely work, since 12 Mbps is generally
+ * not enough even for most Digital TV streams.
+ */
+ if (usbdev->speed != USB_SPEED_HIGH && disable_usb_speed_check == 0) {
+ printk(KERN_ERR "au0828: Device initialization failed.\n");
+ printk(KERN_ERR "au0828: Device must be connected to a "
+ "high-speed USB 2.0 port.\n");
+ return -ENODEV;
+ }
+
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL) {
printk(KERN_ERR "%s() Unable to allocate memory\n", __func__);
diff --git a/drivers/media/video/au0828/au0828-video.c b/drivers/media/video/au0828/au0828-video.c
index 27bedc6c779..51527d7b55a 100644
--- a/drivers/media/video/au0828/au0828-video.c
+++ b/drivers/media/video/au0828/au0828-video.c
@@ -829,6 +829,9 @@ static int au0828_v4l2_close(struct file *filp)
au0828_uninit_isoc(dev);
+ /* Save some power by putting tuner to sleep */
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_standby);
+
/* When close the device, set the usb intf0 into alt0 to free
USB bandwidth */
ret = usb_set_interface(dev->usbdev, 0, 0);
@@ -910,11 +913,6 @@ static int au0828_v4l2_mmap(struct file *filp, struct vm_area_struct *vma)
rc = videobuf_mmap_mapper(&fh->vb_vidq, vma);
- dprintk(2, "vma start=0x%08lx, size=%ld, ret=%d\n",
- (unsigned long)vma->vm_start,
- (unsigned long)vma->vm_end-(unsigned long)vma->vm_start,
- rc);
-
return rc;
}
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index 23b7499b318..5eb1464af67 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -3152,6 +3152,7 @@ static unsigned int bttv_poll(struct file *file, poll_table *wait)
struct bttv_fh *fh = file->private_data;
struct bttv_buffer *buf;
enum v4l2_field field;
+ unsigned int rc = POLLERR;
if (V4L2_BUF_TYPE_VBI_CAPTURE == fh->type) {
if (!check_alloc_btres(fh->btv,fh,RESOURCE_VBI))
@@ -3160,9 +3161,10 @@ static unsigned int bttv_poll(struct file *file, poll_table *wait)
}
if (check_btres(fh,RESOURCE_VIDEO_STREAM)) {
+ mutex_lock(&fh->cap.vb_lock);
/* streaming capture */
if (list_empty(&fh->cap.stream))
- return POLLERR;
+ goto err;
buf = list_entry(fh->cap.stream.next,struct bttv_buffer,vb.stream);
} else {
/* read() capture */
@@ -3191,11 +3193,12 @@ static unsigned int bttv_poll(struct file *file, poll_table *wait)
poll_wait(file, &buf->vb.done, wait);
if (buf->vb.state == VIDEOBUF_DONE ||
buf->vb.state == VIDEOBUF_ERROR)
- return POLLIN|POLLRDNORM;
- return 0;
+ rc = POLLIN|POLLRDNORM;
+ else
+ rc = 0;
err:
mutex_unlock(&fh->cap.vb_lock);
- return POLLERR;
+ return rc;
}
static int bttv_open(struct file *file)
@@ -4166,7 +4169,6 @@ static struct video_device *vdev_init(struct bttv *btv,
if (NULL == vfd)
return NULL;
*vfd = *template;
- vfd->minor = -1;
vfd->v4l2_dev = &btv->c.v4l2_dev;
vfd->release = video_device_release;
vfd->debug = bttv_debug;
@@ -4629,7 +4631,7 @@ static int __init bttv_init_module(void)
#endif
if (gbuffers < 2 || gbuffers > VIDEO_MAX_FRAME)
gbuffers = 2;
- if (gbufsize < 0 || gbufsize > BTTV_MAX_FBUF)
+ if (gbufsize > BTTV_MAX_FBUF)
gbufsize = BTTV_MAX_FBUF;
gbufsize = (gbufsize + PAGE_SIZE - 1) & PAGE_MASK;
if (bttv_verbose)
diff --git a/drivers/media/video/bt8xx/bttv-i2c.c b/drivers/media/video/bt8xx/bttv-i2c.c
index a99d92fac3d..ebd1ee9dc87 100644
--- a/drivers/media/video/bt8xx/bttv-i2c.c
+++ b/drivers/media/video/bt8xx/bttv-i2c.c
@@ -389,6 +389,27 @@ int __devinit init_bttv_i2c(struct bttv *btv)
}
if (0 == btv->i2c_rc && i2c_scan)
do_i2c_scan(btv->c.v4l2_dev.name, &btv->i2c_client);
+
+ /* Instantiate the IR receiver device, if present */
+ if (0 == btv->i2c_rc) {
+ struct i2c_board_info info;
+ /* The external IR receiver is at i2c address 0x34 (0x35 for
+ reads). Future Hauppauge cards will have an internal
+ receiver at 0x30 (0x31 for reads). In theory, both can be
+ fitted, and Hauppauge suggest an external overrides an
+ internal.
+
+ That's why we probe 0x1a (~0x34) first. CB
+ */
+ const unsigned short addr_list[] = {
+ 0x1a, 0x18, 0x4b, 0x64, 0x30,
+ I2C_CLIENT_END
+ };
+
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
+ i2c_new_probed_device(&btv->c.i2c_adap, &info, addr_list);
+ }
return btv->i2c_rc;
}
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c
index d4099f5312a..0b4a8f309cf 100644
--- a/drivers/media/video/cpia2/cpia2_v4l.c
+++ b/drivers/media/video/cpia2/cpia2_v4l.c
@@ -1064,7 +1064,7 @@ static int ioctl_querymenu(void *arg,struct camera_data *cam)
switch(m->id) {
case CPIA2_CID_FLICKER_MODE:
- if(m->index < 0 || m->index >= NUM_FLICKER_CONTROLS)
+ if (m->index >= NUM_FLICKER_CONTROLS)
return -EINVAL;
strcpy(m->name, flicker_controls[m->index].name);
@@ -1082,14 +1082,14 @@ static int ioctl_querymenu(void *arg,struct camera_data *cam)
maximum = i;
}
}
- if(m->index < 0 || m->index > maximum)
+ if (m->index > maximum)
return -EINVAL;
strcpy(m->name, framerate_controls[m->index].name);
break;
}
case CPIA2_CID_LIGHTS:
- if(m->index < 0 || m->index >= NUM_LIGHTS_CONTROLS)
+ if (m->index >= NUM_LIGHTS_CONTROLS)
return -EINVAL;
strcpy(m->name, lights_controls[m->index].name);
diff --git a/drivers/media/video/cx18/cx18-audio.c b/drivers/media/video/cx18/cx18-audio.c
index 7a8ad5963de..35268923911 100644
--- a/drivers/media/video/cx18/cx18-audio.c
+++ b/drivers/media/video/cx18/cx18-audio.c
@@ -26,14 +26,18 @@
#include "cx18-cards.h"
#include "cx18-audio.h"
-#define CX18_AUDIO_ENABLE 0xc72014
+#define CX18_AUDIO_ENABLE 0xc72014
+#define CX18_AI1_MUX_MASK 0x30
+#define CX18_AI1_MUX_I2S1 0x00
+#define CX18_AI1_MUX_I2S2 0x10
+#define CX18_AI1_MUX_843_I2S 0x20
/* Selects the audio input and output according to the current
settings. */
int cx18_audio_set_io(struct cx18 *cx)
{
const struct cx18_card_audio_input *in;
- u32 val;
+ u32 u, v;
int err;
/* Determine which input to use */
@@ -52,9 +56,37 @@ int cx18_audio_set_io(struct cx18 *cx)
return err;
/* FIXME - this internal mux should be abstracted to a subdev */
- val = cx18_read_reg(cx, CX18_AUDIO_ENABLE) & ~0x30;
- val |= (in->audio_input > CX18_AV_AUDIO_SERIAL2) ? 0x20 :
- (in->audio_input << 4);
- cx18_write_reg_expect(cx, val | 0xb00, CX18_AUDIO_ENABLE, val, 0x30);
+ u = cx18_read_reg(cx, CX18_AUDIO_ENABLE);
+ v = u & ~CX18_AI1_MUX_MASK;
+ switch (in->audio_input) {
+ case CX18_AV_AUDIO_SERIAL1:
+ v |= CX18_AI1_MUX_I2S1;
+ break;
+ case CX18_AV_AUDIO_SERIAL2:
+ v |= CX18_AI1_MUX_I2S2;
+ break;
+ default:
+ v |= CX18_AI1_MUX_843_I2S;
+ break;
+ }
+ if (v == u) {
+ /* force a toggle of some AI1 MUX control bits */
+ u &= ~CX18_AI1_MUX_MASK;
+ switch (in->audio_input) {
+ case CX18_AV_AUDIO_SERIAL1:
+ u |= CX18_AI1_MUX_843_I2S;
+ break;
+ case CX18_AV_AUDIO_SERIAL2:
+ u |= CX18_AI1_MUX_843_I2S;
+ break;
+ default:
+ u |= CX18_AI1_MUX_I2S1;
+ break;
+ }
+ cx18_write_reg_expect(cx, u | 0xb00, CX18_AUDIO_ENABLE,
+ u, CX18_AI1_MUX_MASK);
+ }
+ cx18_write_reg_expect(cx, v | 0xb00, CX18_AUDIO_ENABLE,
+ v, CX18_AI1_MUX_MASK);
return 0;
}
diff --git a/drivers/media/video/cx18/cx18-av-core.c b/drivers/media/video/cx18/cx18-av-core.c
index cf2bd888a42..536dedb23ba 100644
--- a/drivers/media/video/cx18/cx18-av-core.c
+++ b/drivers/media/video/cx18/cx18-av-core.c
@@ -99,9 +99,39 @@ int cx18_av_and_or4(struct cx18 *cx, u16 addr, u32 and_mask,
or_value);
}
-static void cx18_av_initialize(struct cx18 *cx)
+static int cx18_av_init(struct v4l2_subdev *sd, u32 val)
{
- struct cx18_av_state *state = &cx->av_state;
+ struct cx18 *cx = v4l2_get_subdevdata(sd);
+
+ /*
+ * The crystal freq used in calculations in this driver will be
+ * 28.636360 MHz.
+ * Aim to run the PLLs' VCOs near 400 MHz to minimze errors.
+ */
+
+ /*
+ * VDCLK Integer = 0x0f, Post Divider = 0x04
+ * AIMCLK Integer = 0x0e, Post Divider = 0x16
+ */
+ cx18_av_write4(cx, CXADEC_PLL_CTRL1, 0x160e040f);
+
+ /* VDCLK Fraction = 0x2be2fe */
+ /* xtal * 0xf.15f17f0/4 = 108 MHz: 432 MHz before post divide */
+ cx18_av_write4(cx, CXADEC_VID_PLL_FRAC, 0x002be2fe);
+
+ /* AIMCLK Fraction = 0x05227ad */
+ /* xtal * 0xe.2913d68/0x16 = 48000 * 384: 406 MHz pre post-div*/
+ cx18_av_write4(cx, CXADEC_AUX_PLL_FRAC, 0x005227ad);
+
+ /* SA_MCLK_SEL=1, SA_MCLK_DIV=0x16 */
+ cx18_av_write(cx, CXADEC_I2S_MCLK, 0x56);
+ return 0;
+}
+
+static void cx18_av_initialize(struct v4l2_subdev *sd)
+{
+ struct cx18_av_state *state = to_cx18_av_state(sd);
+ struct cx18 *cx = v4l2_get_subdevdata(sd);
u32 v;
cx18_av_loadfw(cx);
@@ -150,6 +180,26 @@ static void cx18_av_initialize(struct cx18 *cx)
cx18_av_write4(cx, CXADEC_SOFT_RST_CTRL, 0x8000);
cx18_av_write4(cx, CXADEC_SOFT_RST_CTRL, 0);
+ /*
+ * Disable Video Auto-config of the Analog Front End and Video PLL.
+ *
+ * Since we only use BT.656 pixel mode, which works for both 525 and 625
+ * line systems, it's just easier for us to set registers
+ * 0x102 (CXADEC_CHIP_CTRL), 0x104-0x106 (CXADEC_AFE_CTRL),
+ * 0x108-0x109 (CXADEC_PLL_CTRL1), and 0x10c-0x10f (CXADEC_VID_PLL_FRAC)
+ * ourselves, than to run around cleaning up after the auto-config.
+ *
+ * (Note: my CX23418 chip doesn't seem to let the ACFG_DIS bit
+ * get set to 1, but OTOH, it doesn't seem to do AFE and VID PLL
+ * autoconfig either.)
+ *
+ * As a default, also turn off Dual mode for ADC2 and set ADC2 to CH3.
+ */
+ cx18_av_and_or4(cx, CXADEC_CHIP_CTRL, 0xFFFBFFFF, 0x00120000);
+
+ /* Setup the Video and and Aux/Audio PLLs */
+ cx18_av_init(sd, 0);
+
/* set video to auto-detect */
/* Clear bits 11-12 to enable slow locking mode. Set autodetect mode */
/* set the comb notch = 1 */
@@ -176,12 +226,23 @@ static void cx18_av_initialize(struct cx18 *cx)
/* EncSetSignalStd(dwDevNum, pEnc->dwSigStd); */
/* EncSetVideoInput(dwDevNum, pEnc->VidIndSelection); */
- v = cx18_av_read4(cx, CXADEC_AFE_CTRL);
- v &= 0xFFFBFFFF; /* turn OFF bit 18 for droop_comp_ch1 */
- v &= 0xFFFF7FFF; /* turn OFF bit 9 for clamp_sel_ch1 */
- v &= 0xFFFFFFFE; /* turn OFF bit 0 for 12db_ch1 */
- /* v |= 0x00000001;*/ /* turn ON bit 0 for 12db_ch1 */
- cx18_av_write4(cx, CXADEC_AFE_CTRL, v);
+ /*
+ * Analog Front End (AFE)
+ * Default to luma on ch1/ADC1, chroma on ch2/ADC2, SIF on ch3/ADC2
+ * bypass_ch[1-3] use filter
+ * droop_comp_ch[1-3] disable
+ * clamp_en_ch[1-3] disable
+ * aud_in_sel ADC2
+ * luma_in_sel ADC1
+ * chroma_in_sel ADC2
+ * clamp_sel_ch[2-3] midcode
+ * clamp_sel_ch1 video decoder
+ * vga_sel_ch3 audio decoder
+ * vga_sel_ch[1-2] video decoder
+ * half_bw_ch[1-3] disable
+ * +12db_ch[1-3] disable
+ */
+ cx18_av_and_or4(cx, CXADEC_AFE_CTRL, 0xFF000000, 0x00005D00);
/* if(dwEnable && dw3DCombAvailable) { */
/* CxDevWrReg(CXADEC_SRC_COMB_CFG, 0x7728021F); */
@@ -195,50 +256,18 @@ static void cx18_av_initialize(struct cx18 *cx)
static int cx18_av_reset(struct v4l2_subdev *sd, u32 val)
{
- struct cx18 *cx = v4l2_get_subdevdata(sd);
-
- cx18_av_initialize(cx);
- return 0;
-}
-
-static int cx18_av_init(struct v4l2_subdev *sd, u32 val)
-{
- struct cx18 *cx = v4l2_get_subdevdata(sd);
-
- /*
- * The crystal freq used in calculations in this driver will be
- * 28.636360 MHz.
- * Aim to run the PLLs' VCOs near 400 MHz to minimze errors.
- */
-
- /*
- * VDCLK Integer = 0x0f, Post Divider = 0x04
- * AIMCLK Integer = 0x0e, Post Divider = 0x16
- */
- cx18_av_write4(cx, CXADEC_PLL_CTRL1, 0x160e040f);
-
- /* VDCLK Fraction = 0x2be2fe */
- /* xtal * 0xf.15f17f0/4 = 108 MHz: 432 MHz before post divide */
- cx18_av_write4(cx, CXADEC_VID_PLL_FRAC, 0x002be2fe);
-
- /* AIMCLK Fraction = 0x05227ad */
- /* xtal * 0xe.2913d68/0x16 = 48000 * 384: 406 MHz pre post-div*/
- cx18_av_write4(cx, CXADEC_AUX_PLL_FRAC, 0x005227ad);
-
- /* SA_MCLK_SEL=1, SA_MCLK_DIV=0x16 */
- cx18_av_write(cx, CXADEC_I2S_MCLK, 0x56);
+ cx18_av_initialize(sd);
return 0;
}
static int cx18_av_load_fw(struct v4l2_subdev *sd)
{
struct cx18_av_state *state = to_cx18_av_state(sd);
- struct cx18 *cx = v4l2_get_subdevdata(sd);
if (!state->is_initialized) {
/* initialize on first use */
state->is_initialized = 1;
- cx18_av_initialize(cx);
+ cx18_av_initialize(sd);
}
return 0;
}
@@ -248,8 +277,15 @@ void cx18_av_std_setup(struct cx18 *cx)
struct cx18_av_state *state = &cx->av_state;
struct v4l2_subdev *sd = &state->sd;
v4l2_std_id std = state->std;
+
+ /*
+ * Video ADC crystal clock to pixel clock SRC decimation ratio
+ * 28.636360 MHz/13.5 Mpps * 256 = 0x21f.07b
+ */
+ const int src_decimation = 0x21f;
+
int hblank, hactive, burst, vblank, vactive, sc;
- int vblank656, src_decimation;
+ int vblank656;
int luma_lpf, uv_lpf, comb;
u32 pll_int, pll_frac, pll_post;
@@ -259,40 +295,96 @@ void cx18_av_std_setup(struct cx18 *cx)
else
cx18_av_write(cx, 0x49f, 0x14);
+ /*
+ * Note: At the end of a field, there are 3 sets of half line duration
+ * (double horizontal rate) pulses:
+ *
+ * 5 (625) or 6 (525) half-lines to blank for the vertical retrace
+ * 5 (625) or 6 (525) vertical sync pulses of half line duration
+ * 5 (625) or 6 (525) half-lines of equalization pulses
+ */
if (std & V4L2_STD_625_50) {
- /* FIXME - revisit these for Sliced VBI */
+ /*
+ * The following relationships of half line counts should hold:
+ * 625 = vblank656 + vactive
+ * 10 = vblank656 - vblank = vsync pulses + equalization pulses
+ *
+ * vblank656: half lines after line 625/mid-313 of blanked video
+ * vblank: half lines, after line 5/317, of blanked video
+ * vactive: half lines of active video +
+ * 5 half lines after the end of active video
+ *
+ * As far as I can tell:
+ * vblank656 starts counting from the falling edge of the first
+ * vsync pulse (start of line 1 or mid-313)
+ * vblank starts counting from the after the 5 vsync pulses and
+ * 5 or 4 equalization pulses (start of line 6 or 318)
+ *
+ * For 625 line systems the driver will extract VBI information
+ * from lines 6-23 and lines 318-335 (but the slicer can only
+ * handle 17 lines, not the 18 in the vblank region).
+ * In addition, we need vblank656 and vblank to be one whole
+ * line longer, to cover line 24 and 336, so the SAV/EAV RP
+ * codes get generated such that the encoder can actually
+ * extract line 23 & 335 (WSS). We'll lose 1 line in each field
+ * at the top of the screen.
+ *
+ * It appears the 5 half lines that happen after active
+ * video must be included in vactive (579 instead of 574),
+ * otherwise the colors get badly displayed in various regions
+ * of the screen. I guess the chroma comb filter gets confused
+ * without them (at least when a PVR-350 is the PAL source).
+ */
+ vblank656 = 48; /* lines 1 - 24 & 313 - 336 */
+ vblank = 38; /* lines 6 - 24 & 318 - 336 */
+ vactive = 579; /* lines 24 - 313 & 337 - 626 */
+
+ /*
+ * For a 13.5 Mpps clock and 15,625 Hz line rate, a line is
+ * is 864 pixels = 720 active + 144 blanking. ITU-R BT.601
+ * specifies 12 luma clock periods or ~ 0.9 * 13.5 Mpps after
+ * the end of active video to start a horizontal line, so that
+ * leaves 132 pixels of hblank to ignore.
+ */
hblank = 132;
hactive = 720;
- burst = 93;
- vblank = 36;
- vactive = 580;
- vblank656 = 40;
- src_decimation = 0x21f;
+ /*
+ * Burst gate delay (for 625 line systems)
+ * Hsync leading edge to color burst rise = 5.6 us
+ * Color burst width = 2.25 us
+ * Gate width = 4 pixel clocks
+ * (5.6 us + 2.25/2 us) * 13.5 Mpps + 4/2 clocks = 92.79 clocks
+ */
+ burst = 93;
luma_lpf = 2;
if (std & V4L2_STD_PAL) {
uv_lpf = 1;
comb = 0x20;
- sc = 688739;
+ /* sc = 4433618.75 * src_decimation/28636360 * 2^13 */
+ sc = 688700;
} else if (std == V4L2_STD_PAL_Nc) {
uv_lpf = 1;
comb = 0x20;
- sc = 556453;
+ /* sc = 3582056.25 * src_decimation/28636360 * 2^13 */
+ sc = 556422;
} else { /* SECAM */
uv_lpf = 0;
comb = 0;
- sc = 672351;
+ /* (fr + fb)/2 = (4406260 + 4250000)/2 = 4328130 */
+ /* sc = 4328130 * src_decimation/28636360 * 2^13 */
+ sc = 672314;
}
} else {
/*
* The following relationships of half line counts should hold:
- * 525 = vsync + vactive + vblank656
- * 12 = vblank656 - vblank
+ * 525 = prevsync + vblank656 + vactive
+ * 12 = vblank656 - vblank = vsync pulses + equalization pulses
*
- * vsync: always 6 half-lines of vsync pulses
- * vactive: half lines of active video
+ * prevsync: 6 half-lines before the vsync pulses
* vblank656: half lines, after line 3/mid-266, of blanked video
* vblank: half lines, after line 9/272, of blanked video
+ * vactive: half lines of active video
*
* As far as I can tell:
* vblank656 starts counting from the falling edge of the first
@@ -319,20 +411,30 @@ void cx18_av_std_setup(struct cx18 *cx)
luma_lpf = 1;
uv_lpf = 1;
- src_decimation = 0x21f;
+ /*
+ * Burst gate delay (for 525 line systems)
+ * Hsync leading edge to color burst rise = 5.3 us
+ * Color burst width = 2.5 us
+ * Gate width = 4 pixel clocks
+ * (5.3 us + 2.5/2 us) * 13.5 Mpps + 4/2 clocks = 90.425 clocks
+ */
if (std == V4L2_STD_PAL_60) {
- burst = 0x5b;
+ burst = 90;
luma_lpf = 2;
comb = 0x20;
- sc = 688739;
+ /* sc = 4433618.75 * src_decimation/28636360 * 2^13 */
+ sc = 688700;
} else if (std == V4L2_STD_PAL_M) {
- burst = 0x61;
+ /* The 97 needs to be verified against PAL-M timings */
+ burst = 97;
comb = 0x20;
- sc = 555452;
+ /* sc = 3575611.49 * src_decimation/28636360 * 2^13 */
+ sc = 555421;
} else {
- burst = 0x5b;
+ burst = 90;
comb = 0x66;
- sc = 556063;
+ /* sc = 3579545.45.. * src_decimation/28636360 * 2^13 */
+ sc = 556032;
}
}
@@ -344,23 +446,26 @@ void cx18_av_std_setup(struct cx18 *cx)
pll_int, pll_frac, pll_post);
if (pll_post) {
- int fin, fsc, pll;
+ int fsc, pll;
+ u64 tmp;
pll = (28636360L * ((((u64)pll_int) << 25) + pll_frac)) >> 25;
pll /= pll_post;
- CX18_DEBUG_INFO_DEV(sd, "PLL = %d.%06d MHz\n",
+ CX18_DEBUG_INFO_DEV(sd, "Video PLL = %d.%06d MHz\n",
pll / 1000000, pll % 1000000);
- CX18_DEBUG_INFO_DEV(sd, "PLL/8 = %d.%06d MHz\n",
+ CX18_DEBUG_INFO_DEV(sd, "Pixel rate = %d.%06d Mpixel/sec\n",
pll / 8000000, (pll / 8) % 1000000);
- fin = ((u64)src_decimation * pll) >> 12;
- CX18_DEBUG_INFO_DEV(sd, "ADC Sampling freq = %d.%06d MHz\n",
- fin / 1000000, fin % 1000000);
+ CX18_DEBUG_INFO_DEV(sd, "ADC XTAL/pixel clock decimation ratio "
+ "= %d.%03d\n", src_decimation / 256,
+ ((src_decimation % 256) * 1000) / 256);
- fsc = (((u64)sc) * pll) >> 24L;
+ tmp = 28636360 * (u64) sc;
+ do_div(tmp, src_decimation);
+ fsc = tmp >> 13;
CX18_DEBUG_INFO_DEV(sd,
- "Chroma sub-carrier freq = %d.%06d MHz\n",
- fsc / 1000000, fsc % 1000000);
+ "Chroma sub-carrier initial freq = %d.%06d "
+ "MHz\n", fsc / 1000000, fsc % 1000000);
CX18_DEBUG_INFO_DEV(sd, "hblank %i, hactive %i, vblank %i, "
"vactive %i, vblank656 %i, src_dec %i, "
@@ -470,16 +575,23 @@ static int set_input(struct cx18 *cx, enum cx18_av_video_input vid_input,
{
struct cx18_av_state *state = &cx->av_state;
struct v4l2_subdev *sd = &state->sd;
- u8 is_composite = (vid_input >= CX18_AV_COMPOSITE1 &&
- vid_input <= CX18_AV_COMPOSITE8);
- u8 reg;
- u8 v;
+
+ enum analog_signal_type {
+ NONE, CVBS, Y, C, SIF, Pb, Pr
+ } ch[3] = {NONE, NONE, NONE};
+
+ u8 afe_mux_cfg;
+ u8 adc2_cfg;
+ u32 afe_cfg;
+ int i;
CX18_DEBUG_INFO_DEV(sd, "decoder set video input %d, audio input %d\n",
vid_input, aud_input);
- if (is_composite) {
- reg = 0xf0 + (vid_input - CX18_AV_COMPOSITE1);
+ if (vid_input >= CX18_AV_COMPOSITE1 &&
+ vid_input <= CX18_AV_COMPOSITE8) {
+ afe_mux_cfg = 0xf0 + (vid_input - CX18_AV_COMPOSITE1);
+ ch[0] = CVBS;
} else {
int luma = vid_input & 0xf0;
int chroma = vid_input & 0xf00;
@@ -493,26 +605,45 @@ static int set_input(struct cx18 *cx, enum cx18_av_video_input vid_input,
vid_input);
return -EINVAL;
}
- reg = 0xf0 + ((luma - CX18_AV_SVIDEO_LUMA1) >> 4);
+ afe_mux_cfg = 0xf0 + ((luma - CX18_AV_SVIDEO_LUMA1) >> 4);
+ ch[0] = Y;
if (chroma >= CX18_AV_SVIDEO_CHROMA7) {
- reg &= 0x3f;
- reg |= (chroma - CX18_AV_SVIDEO_CHROMA7) >> 2;
+ afe_mux_cfg &= 0x3f;
+ afe_mux_cfg |= (chroma - CX18_AV_SVIDEO_CHROMA7) >> 2;
+ ch[2] = C;
} else {
- reg &= 0xcf;
- reg |= (chroma - CX18_AV_SVIDEO_CHROMA4) >> 4;
+ afe_mux_cfg &= 0xcf;
+ afe_mux_cfg |= (chroma - CX18_AV_SVIDEO_CHROMA4) >> 4;
+ ch[1] = C;
}
}
+ /* TODO: LeadTek WinFast DVR3100 H & WinFast PVR2100 can do Y/Pb/Pr */
switch (aud_input) {
case CX18_AV_AUDIO_SERIAL1:
case CX18_AV_AUDIO_SERIAL2:
/* do nothing, use serial audio input */
break;
- case CX18_AV_AUDIO4: reg &= ~0x30; break;
- case CX18_AV_AUDIO5: reg &= ~0x30; reg |= 0x10; break;
- case CX18_AV_AUDIO6: reg &= ~0x30; reg |= 0x20; break;
- case CX18_AV_AUDIO7: reg &= ~0xc0; break;
- case CX18_AV_AUDIO8: reg &= ~0xc0; reg |= 0x40; break;
+ case CX18_AV_AUDIO4:
+ afe_mux_cfg &= ~0x30;
+ ch[1] = SIF;
+ break;
+ case CX18_AV_AUDIO5:
+ afe_mux_cfg = (afe_mux_cfg & ~0x30) | 0x10;
+ ch[1] = SIF;
+ break;
+ case CX18_AV_AUDIO6:
+ afe_mux_cfg = (afe_mux_cfg & ~0x30) | 0x20;
+ ch[1] = SIF;
+ break;
+ case CX18_AV_AUDIO7:
+ afe_mux_cfg &= ~0xc0;
+ ch[2] = SIF;
+ break;
+ case CX18_AV_AUDIO8:
+ afe_mux_cfg = (afe_mux_cfg & ~0xc0) | 0x40;
+ ch[2] = SIF;
+ break;
default:
CX18_ERR_DEV(sd, "0x%04x is not a valid audio input!\n",
@@ -520,24 +651,65 @@ static int set_input(struct cx18 *cx, enum cx18_av_video_input vid_input,
return -EINVAL;
}
- cx18_av_write_expect(cx, 0x103, reg, reg, 0xf7);
+ /* Set up analog front end multiplexers */
+ cx18_av_write_expect(cx, 0x103, afe_mux_cfg, afe_mux_cfg, 0xf7);
/* Set INPUT_MODE to Composite (0) or S-Video (1) */
- cx18_av_and_or(cx, 0x401, ~0x6, is_composite ? 0 : 0x02);
+ cx18_av_and_or(cx, 0x401, ~0x6, ch[0] == CVBS ? 0 : 0x02);
/* Set CH_SEL_ADC2 to 1 if input comes from CH3 */
- v = cx18_av_read(cx, 0x102);
- if (reg & 0x80)
- v &= ~0x2;
+ adc2_cfg = cx18_av_read(cx, 0x102);
+ if (ch[2] == NONE)
+ adc2_cfg &= ~0x2; /* No sig on CH3, set ADC2 to CH2 for input */
else
- v |= 0x2;
+ adc2_cfg |= 0x2; /* Signal on CH3, set ADC2 to CH3 for input */
+
/* Set DUAL_MODE_ADC2 to 1 if input comes from both CH2 and CH3 */
- if ((reg & 0xc0) != 0xc0 && (reg & 0x30) != 0x30)
- v |= 0x4;
+ if (ch[1] != NONE && ch[2] != NONE)
+ adc2_cfg |= 0x4; /* Set dual mode */
else
- v &= ~0x4;
- cx18_av_write_expect(cx, 0x102, v, v, 0x17);
+ adc2_cfg &= ~0x4; /* Clear dual mode */
+ cx18_av_write_expect(cx, 0x102, adc2_cfg, adc2_cfg, 0x17);
+
+ /* Configure the analog front end */
+ afe_cfg = cx18_av_read4(cx, CXADEC_AFE_CTRL);
+ afe_cfg &= 0xff000000;
+ afe_cfg |= 0x00005000; /* CHROMA_IN, AUD_IN: ADC2; LUMA_IN: ADC1 */
+ if (ch[1] != NONE && ch[2] != NONE)
+ afe_cfg |= 0x00000030; /* half_bw_ch[2-3] since in dual mode */
+
+ for (i = 0; i < 3; i++) {
+ switch (ch[i]) {
+ default:
+ case NONE:
+ /* CLAMP_SEL = Fixed to midcode clamp level */
+ afe_cfg |= (0x00000200 << i);
+ break;
+ case CVBS:
+ case Y:
+ if (i > 0)
+ afe_cfg |= 0x00002000; /* LUMA_IN_SEL: ADC2 */
+ break;
+ case C:
+ case Pb:
+ case Pr:
+ /* CLAMP_SEL = Fixed to midcode clamp level */
+ afe_cfg |= (0x00000200 << i);
+ if (i == 0 && ch[i] == C)
+ afe_cfg &= ~0x00001000; /* CHROMA_IN_SEL ADC1 */
+ break;
+ case SIF:
+ /*
+ * VGA_GAIN_SEL = Audio Decoder
+ * CLAMP_SEL = Fixed to midcode clamp level
+ */
+ afe_cfg |= (0x00000240 << i);
+ if (i == 0)
+ afe_cfg &= ~0x00004000; /* AUD_IN_SEL ADC1 */
+ break;
+ }
+ }
- /*cx18_av_and_or4(cx, 0x104, ~0x001b4180, 0x00004180);*/
+ cx18_av_write4(cx, CXADEC_AFE_CTRL, afe_cfg);
state->vid_input = vid_input;
state->aud_input = aud_input;
@@ -858,9 +1030,9 @@ static int cx18_av_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
* cx18_av_std_setup(), above standard values:
*
* 480 + 1 for 60 Hz systems
- * 576 + 4 for 50 Hz systems
+ * 576 + 3 for 50 Hz systems
*/
- Vlines = pix->height + (is_50Hz ? 4 : 1);
+ Vlines = pix->height + (is_50Hz ? 3 : 1);
/*
* Invalid height and width scaling requests are:
diff --git a/drivers/media/video/cx18/cx18-av-firmware.c b/drivers/media/video/cx18/cx18-av-firmware.c
index 49a55cc8d83..b9e8cc5d264 100644
--- a/drivers/media/video/cx18/cx18-av-firmware.c
+++ b/drivers/media/video/cx18/cx18-av-firmware.c
@@ -24,15 +24,63 @@
#include "cx18-io.h"
#include <linux/firmware.h>
-#define CX18_AUDIO_ENABLE 0xc72014
+#define CX18_AUDIO_ENABLE 0xc72014
+#define CX18_AI1_MUX_MASK 0x30
+#define CX18_AI1_MUX_I2S1 0x00
+#define CX18_AI1_MUX_I2S2 0x10
+#define CX18_AI1_MUX_843_I2S 0x20
+#define CX18_AI1_MUX_INVALID 0x30
+
#define FWFILE "v4l-cx23418-dig.fw"
+static int cx18_av_verifyfw(struct cx18 *cx, const struct firmware *fw)
+{
+ struct v4l2_subdev *sd = &cx->av_state.sd;
+ int ret = 0;
+ const u8 *data;
+ u32 size;
+ int addr;
+ u32 expected, dl_control;
+
+ /* Ensure we put the 8051 in reset and enable firmware upload mode */
+ dl_control = cx18_av_read4(cx, CXADEC_DL_CTL);
+ do {
+ dl_control &= 0x00ffffff;
+ dl_control |= 0x0f000000;
+ cx18_av_write4_noretry(cx, CXADEC_DL_CTL, dl_control);
+ dl_control = cx18_av_read4(cx, CXADEC_DL_CTL);
+ } while ((dl_control & 0xff000000) != 0x0f000000);
+
+ /* Read and auto increment until at address 0x0000 */
+ while (dl_control & 0x3fff)
+ dl_control = cx18_av_read4(cx, CXADEC_DL_CTL);
+
+ data = fw->data;
+ size = fw->size;
+ for (addr = 0; addr < size; addr++) {
+ dl_control &= 0xffff3fff; /* ignore top 2 bits of address */
+ expected = 0x0f000000 | ((u32)data[addr] << 16) | addr;
+ if (expected != dl_control) {
+ CX18_ERR_DEV(sd, "verification of %s firmware load "
+ "failed: expected %#010x got %#010x\n",
+ FWFILE, expected, dl_control);
+ ret = -EIO;
+ break;
+ }
+ dl_control = cx18_av_read4(cx, CXADEC_DL_CTL);
+ }
+ if (ret == 0)
+ CX18_INFO_DEV(sd, "verified load of %s firmware (%d bytes)\n",
+ FWFILE, size);
+ return ret;
+}
+
int cx18_av_loadfw(struct cx18 *cx)
{
struct v4l2_subdev *sd = &cx->av_state.sd;
const struct firmware *fw = NULL;
u32 size;
- u32 v;
+ u32 u, v;
const u8 *ptr;
int i;
int retries1 = 0;
@@ -95,6 +143,12 @@ int cx18_av_loadfw(struct cx18 *cx)
}
cx18_av_write4_expect(cx, CXADEC_DL_CTL,
+ 0x03000000 | fw->size, 0x03000000, 0x13000000);
+
+ CX18_INFO_DEV(sd, "loaded %s firmware (%d bytes)\n", FWFILE, size);
+
+ if (cx18_av_verifyfw(cx, fw) == 0)
+ cx18_av_write4_expect(cx, CXADEC_DL_CTL,
0x13000000 | fw->size, 0x13000000, 0x13000000);
/* Output to the 416 */
@@ -135,6 +189,28 @@ int cx18_av_loadfw(struct cx18 *cx)
cx18_write_reg_expect(cx, v & 0xFFFFFBFF, CX18_AUDIO_ENABLE,
0, 0x400);
+ /* Toggle the AI1 MUX */
+ v = cx18_read_reg(cx, CX18_AUDIO_ENABLE);
+ u = v & CX18_AI1_MUX_MASK;
+ v &= ~CX18_AI1_MUX_MASK;
+ if (u == CX18_AI1_MUX_843_I2S || u == CX18_AI1_MUX_INVALID) {
+ /* Switch to I2S1 */
+ v |= CX18_AI1_MUX_I2S1;
+ cx18_write_reg_expect(cx, v | 0xb00, CX18_AUDIO_ENABLE,
+ v, CX18_AI1_MUX_MASK);
+ /* Switch back to the A/V decoder core I2S output */
+ v = (v & ~CX18_AI1_MUX_MASK) | CX18_AI1_MUX_843_I2S;
+ } else {
+ /* Switch to the A/V decoder core I2S output */
+ v |= CX18_AI1_MUX_843_I2S;
+ cx18_write_reg_expect(cx, v | 0xb00, CX18_AUDIO_ENABLE,
+ v, CX18_AI1_MUX_MASK);
+ /* Switch back to I2S1 or I2S2 */
+ v = (v & ~CX18_AI1_MUX_MASK) | u;
+ }
+ cx18_write_reg_expect(cx, v | 0xb00, CX18_AUDIO_ENABLE,
+ v, CX18_AI1_MUX_MASK);
+
/* Enable WW auto audio standard detection */
v = cx18_av_read4(cx, CXADEC_STD_DET_CTL);
v |= 0xFF; /* Auto by default */
@@ -143,7 +219,5 @@ int cx18_av_loadfw(struct cx18 *cx)
cx18_av_write4_expect(cx, CXADEC_STD_DET_CTL, v, v, 0x3F00FFFF);
release_firmware(fw);
-
- CX18_INFO_DEV(sd, "loaded %s firmware (%d bytes)\n", FWFILE, size);
return 0;
}
diff --git a/drivers/media/video/cx18/cx18-av-vbi.c b/drivers/media/video/cx18/cx18-av-vbi.c
index 23b31670bf1..a51732bcca4 100644
--- a/drivers/media/video/cx18/cx18-av-vbi.c
+++ b/drivers/media/video/cx18/cx18-av-vbi.c
@@ -255,8 +255,8 @@ int cx18_av_vbi_s_fmt(struct cx18 *cx, struct v4l2_format *fmt)
}
cx18_av_write(cx, 0x43c, 0x16);
- /* FIXME - should match vblank set in cx18_av_std_setup() */
- cx18_av_write(cx, 0x474, is_pal ? 0x2a : 26);
+ /* Should match vblank set in cx18_av_std_setup() */
+ cx18_av_write(cx, 0x474, is_pal ? 38 : 26);
return 0;
}
diff --git a/drivers/media/video/cx18/cx18-cards.c b/drivers/media/video/cx18/cx18-cards.c
index 9bc22183784..c92a25036f0 100644
--- a/drivers/media/video/cx18/cx18-cards.c
+++ b/drivers/media/video/cx18/cx18-cards.c
@@ -340,13 +340,12 @@ static const struct cx18_card cx18_card_toshiba_qosmio_dvbt = {
static const struct cx18_card_pci_info cx18_pci_leadtek_pvr2100[] = {
{ PCI_DEVICE_ID_CX23418, CX18_PCI_ID_LEADTEK, 0x6f27 }, /* PVR2100 */
- { PCI_DEVICE_ID_CX23418, CX18_PCI_ID_LEADTEK, 0x6690 }, /* DVR3100 H */
{ 0, 0, 0 }
};
static const struct cx18_card cx18_card_leadtek_pvr2100 = {
.type = CX18_CARD_LEADTEK_PVR2100,
- .name = "Leadtek WinFast PVR2100/DVR3100 H",
+ .name = "Leadtek WinFast PVR2100",
.comment = "Experimenters and photos needed for device to work well.\n"
"\tTo help, mail the ivtv-devel list (www.ivtvdriver.org).\n",
.v4l2_capabilities = CX18_CAP_ENCODER,
@@ -365,15 +364,12 @@ static const struct cx18_card cx18_card_leadtek_pvr2100 = {
{ CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, 1 },
},
.tuners = {
- /* XC3028 tuner */
+ /* XC2028 tuner */
{ .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 },
},
.radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 2 },
.ddr = {
- /*
- * Pointer to proper DDR config values provided by
- * Terry Wu <terrywu at leadtek.com.tw>
- */
+ /* Pointer to proper DDR config values provided by Terry Wu */
.chip_config = 0x303,
.refresh = 0x3bb,
.timing1 = 0x24220e83,
@@ -392,6 +388,58 @@ static const struct cx18_card cx18_card_leadtek_pvr2100 = {
/* ------------------------------------------------------------------------- */
+/* Leadtek WinFast DVR3100 H */
+
+static const struct cx18_card_pci_info cx18_pci_leadtek_dvr3100h[] = {
+ { PCI_DEVICE_ID_CX23418, CX18_PCI_ID_LEADTEK, 0x6690 }, /* DVR3100 H */
+ { 0, 0, 0 }
+};
+
+static const struct cx18_card cx18_card_leadtek_dvr3100h = {
+ .type = CX18_CARD_LEADTEK_DVR3100H,
+ .name = "Leadtek WinFast DVR3100 H",
+ .comment = "Simultaneous DVB-T and Analog capture supported,\n"
+ "\texcept when capturing Analog from the antenna input.\n",
+ .v4l2_capabilities = CX18_CAP_ENCODER,
+ .hw_audio_ctrl = CX18_HW_418_AV,
+ .hw_muxer = CX18_HW_GPIO_MUX,
+ .hw_all = CX18_HW_418_AV | CX18_HW_TUNER | CX18_HW_GPIO_MUX |
+ CX18_HW_DVB | CX18_HW_GPIO_RESET_CTRL,
+ .video_inputs = {
+ { CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE2 },
+ { CX18_CARD_INPUT_SVIDEO1, 1,
+ CX18_AV_SVIDEO_LUMA3 | CX18_AV_SVIDEO_CHROMA4 },
+ { CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE7 },
+ },
+ .audio_inputs = {
+ { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 0 },
+ { CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, 1 },
+ },
+ .tuners = {
+ /* XC3028 tuner */
+ { .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 },
+ },
+ .radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 2 },
+ .ddr = {
+ /* Pointer to proper DDR config values provided by Terry Wu */
+ .chip_config = 0x303,
+ .refresh = 0x3bb,
+ .timing1 = 0x24220e83,
+ .timing2 = 0x1f,
+ .tune_lane = 0,
+ .initial_emrs = 0x2,
+ },
+ .gpio_init.initial_value = 0x6,
+ .gpio_init.direction = 0x7,
+ .gpio_audio_input = { .mask = 0x7,
+ .tuner = 0x6, .linein = 0x2, .radio = 0x2 },
+ .xceive_pin = 1,
+ .pci_list = cx18_pci_leadtek_dvr3100h,
+ .i2c = &cx18_i2c_std,
+};
+
+/* ------------------------------------------------------------------------- */
+
static const struct cx18_card *cx18_card_list[] = {
&cx18_card_hvr1600_esmt,
&cx18_card_hvr1600_samsung,
@@ -400,6 +448,7 @@ static const struct cx18_card *cx18_card_list[] = {
&cx18_card_cnxt_raptor_pal,
&cx18_card_toshiba_qosmio_dvbt,
&cx18_card_leadtek_pvr2100,
+ &cx18_card_leadtek_dvr3100h,
};
const struct cx18_card *cx18_get_card(u16 index)
diff --git a/drivers/media/video/cx18/cx18-controls.c b/drivers/media/video/cx18/cx18-controls.c
index 82fc2f9d402..8e35c3aed54 100644
--- a/drivers/media/video/cx18/cx18-controls.c
+++ b/drivers/media/video/cx18/cx18-controls.c
@@ -176,8 +176,10 @@ static int cx18_setup_vbi_fmt(struct cx18 *cx,
return -EBUSY;
if (fmt != V4L2_MPEG_STREAM_VBI_FMT_IVTV ||
- type != V4L2_MPEG_STREAM_TYPE_MPEG2_PS) {
- /* We don't do VBI insertion aside from IVTV format in a PS */
+ !(type == V4L2_MPEG_STREAM_TYPE_MPEG2_PS ||
+ type == V4L2_MPEG_STREAM_TYPE_MPEG2_DVD ||
+ type == V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD)) {
+ /* Only IVTV fmt VBI insertion & only MPEG-2 PS type streams */
cx->vbi.insert_mpeg = V4L2_MPEG_STREAM_VBI_FMT_NONE;
CX18_DEBUG_INFO("disabled insertion of sliced VBI data into "
"the MPEG stream\n");
diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
index 49b1c3d7b1a..92026e82e10 100644
--- a/drivers/media/video/cx18/cx18-driver.c
+++ b/drivers/media/video/cx18/cx18-driver.c
@@ -30,6 +30,7 @@
#include "cx18-irq.h"
#include "cx18-gpio.h"
#include "cx18-firmware.h"
+#include "cx18-queue.h"
#include "cx18-streams.h"
#include "cx18-av-core.h"
#include "cx18-scb.h"
@@ -151,7 +152,8 @@ MODULE_PARM_DESC(cardtype,
"\t\t\t 4 = Yuan MPC718\n"
"\t\t\t 5 = Conexant Raptor PAL/SECAM\n"
"\t\t\t 6 = Toshiba Qosmio DVB-T/Analog\n"
- "\t\t\t 7 = Leadtek WinFast PVR2100/DVR3100 H\n"
+ "\t\t\t 7 = Leadtek WinFast PVR2100\n"
+ "\t\t\t 8 = Leadtek WinFast DVR3100 H\n"
"\t\t\t 0 = Autodetect (default)\n"
"\t\t\t-1 = Ignore this card\n\t\t");
MODULE_PARM_DESC(pal, "Set PAL standard: B, G, H, D, K, I, M, N, Nc, 60");
@@ -312,7 +314,7 @@ static void cx18_process_eeprom(struct cx18 *cx)
CX18_INFO("Autodetected %s\n", cx->card_name);
if (tv.tuner_type == TUNER_ABSENT)
- CX18_ERR("tveeprom cannot autodetect tuner!");
+ CX18_ERR("tveeprom cannot autodetect tuner!\n");
if (cx->options.tuner == -1)
cx->options.tuner = tv.tuner_type;
@@ -546,6 +548,40 @@ done:
cx->card_i2c = cx->card->i2c;
}
+static int __devinit cx18_create_in_workq(struct cx18 *cx)
+{
+ snprintf(cx->in_workq_name, sizeof(cx->in_workq_name), "%s-in",
+ cx->v4l2_dev.name);
+ cx->in_work_queue = create_singlethread_workqueue(cx->in_workq_name);
+ if (cx->in_work_queue == NULL) {
+ CX18_ERR("Unable to create incoming mailbox handler thread\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int __devinit cx18_create_out_workq(struct cx18 *cx)
+{
+ snprintf(cx->out_workq_name, sizeof(cx->out_workq_name), "%s-out",
+ cx->v4l2_dev.name);
+ cx->out_work_queue = create_workqueue(cx->out_workq_name);
+ if (cx->out_work_queue == NULL) {
+ CX18_ERR("Unable to create outgoing mailbox handler threads\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void __devinit cx18_init_in_work_orders(struct cx18 *cx)
+{
+ int i;
+ for (i = 0; i < CX18_MAX_IN_WORK_ORDERS; i++) {
+ cx->in_work_order[i].cx = cx;
+ cx->in_work_order[i].str = cx->epu_debug_str;
+ INIT_WORK(&cx->in_work_order[i].work, cx18_in_work_handler);
+ }
+}
+
/* Precondition: the cx18 structure has been memset to 0. Only
the dev and instance fields have been filled in.
No assumptions on the card type may be made here (see cx18_init_struct2
@@ -553,7 +589,7 @@ done:
*/
static int __devinit cx18_init_struct1(struct cx18 *cx)
{
- int i;
+ int ret;
cx->base_addr = pci_resource_start(cx->pci_dev, 0);
@@ -562,18 +598,18 @@ static int __devinit cx18_init_struct1(struct cx18 *cx)
mutex_init(&cx->epu2apu_mb_lock);
mutex_init(&cx->epu2cpu_mb_lock);
- cx->work_queue = create_singlethread_workqueue(cx->v4l2_dev.name);
- if (cx->work_queue == NULL) {
- CX18_ERR("Unable to create work hander thread\n");
- return -ENOMEM;
- }
+ ret = cx18_create_out_workq(cx);
+ if (ret)
+ return ret;
- for (i = 0; i < CX18_MAX_EPU_WORK_ORDERS; i++) {
- cx->epu_work_order[i].cx = cx;
- cx->epu_work_order[i].str = cx->epu_debug_str;
- INIT_WORK(&cx->epu_work_order[i].work, cx18_epu_work_handler);
+ ret = cx18_create_in_workq(cx);
+ if (ret) {
+ destroy_workqueue(cx->out_work_queue);
+ return ret;
}
+ cx18_init_in_work_orders(cx);
+
/* start counting open_id at 1 */
cx->open_id = 1;
@@ -759,17 +795,17 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
retval = -ENODEV;
goto err;
}
- if (cx18_init_struct1(cx)) {
- retval = -ENOMEM;
+
+ retval = cx18_init_struct1(cx);
+ if (retval)
goto err;
- }
CX18_DEBUG_INFO("base addr: 0x%08x\n", cx->base_addr);
/* PCI Device Setup */
retval = cx18_setup_pci(cx, pci_dev, pci_id);
if (retval != 0)
- goto free_workqueue;
+ goto free_workqueues;
/* map io memory */
CX18_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
@@ -943,8 +979,9 @@ free_map:
cx18_iounmap(cx);
free_mem:
release_mem_region(cx->base_addr, CX18_MEM_SIZE);
-free_workqueue:
- destroy_workqueue(cx->work_queue);
+free_workqueues:
+ destroy_workqueue(cx->in_work_queue);
+ destroy_workqueue(cx->out_work_queue);
err:
if (retval == 0)
retval = -ENODEV;
@@ -1053,11 +1090,19 @@ int cx18_init_on_first_open(struct cx18 *cx)
return 0;
}
-static void cx18_cancel_epu_work_orders(struct cx18 *cx)
+static void cx18_cancel_in_work_orders(struct cx18 *cx)
{
int i;
- for (i = 0; i < CX18_MAX_EPU_WORK_ORDERS; i++)
- cancel_work_sync(&cx->epu_work_order[i].work);
+ for (i = 0; i < CX18_MAX_IN_WORK_ORDERS; i++)
+ cancel_work_sync(&cx->in_work_order[i].work);
+}
+
+static void cx18_cancel_out_work_orders(struct cx18 *cx)
+{
+ int i;
+ for (i = 0; i < CX18_MAX_STREAMS; i++)
+ if (&cx->streams[i].video_dev != NULL)
+ cancel_work_sync(&cx->streams[i].out_work_order);
}
static void cx18_remove(struct pci_dev *pci_dev)
@@ -1073,15 +1118,20 @@ static void cx18_remove(struct pci_dev *pci_dev)
if (atomic_read(&cx->tot_capturing) > 0)
cx18_stop_all_captures(cx);
- /* Interrupts */
+ /* Stop interrupts that cause incoming work to be queued */
cx18_sw1_irq_disable(cx, IRQ_CPU_TO_EPU | IRQ_APU_TO_EPU);
+
+ /* Incoming work can cause outgoing work, so clean up incoming first */
+ cx18_cancel_in_work_orders(cx);
+ cx18_cancel_out_work_orders(cx);
+
+ /* Stop ack interrupts that may have been needed for work to finish */
cx18_sw2_irq_disable(cx, IRQ_CPU_TO_EPU_ACK | IRQ_APU_TO_EPU_ACK);
cx18_halt_firmware(cx);
- cx18_cancel_epu_work_orders(cx);
-
- destroy_workqueue(cx->work_queue);
+ destroy_workqueue(cx->in_work_queue);
+ destroy_workqueue(cx->out_work_queue);
cx18_streams_cleanup(cx, 1);
diff --git a/drivers/media/video/cx18/cx18-driver.h b/drivers/media/video/cx18/cx18-driver.h
index ece4f281ef4..c6a1e907f63 100644
--- a/drivers/media/video/cx18/cx18-driver.h
+++ b/drivers/media/video/cx18/cx18-driver.h
@@ -80,8 +80,9 @@
#define CX18_CARD_YUAN_MPC718 3 /* Yuan MPC718 */
#define CX18_CARD_CNXT_RAPTOR_PAL 4 /* Conexant Raptor PAL */
#define CX18_CARD_TOSHIBA_QOSMIO_DVBT 5 /* Toshiba Qosmio Interal DVB-T/Analog*/
-#define CX18_CARD_LEADTEK_PVR2100 6 /* Leadtek WinFast PVR2100/DVR3100 H */
-#define CX18_CARD_LAST 6
+#define CX18_CARD_LEADTEK_PVR2100 6 /* Leadtek WinFast PVR2100 */
+#define CX18_CARD_LEADTEK_DVR3100H 7 /* Leadtek WinFast DVR3100 H */
+#define CX18_CARD_LAST 7
#define CX18_ENC_STREAM_TYPE_MPG 0
#define CX18_ENC_STREAM_TYPE_TS 1
@@ -254,6 +255,7 @@ struct cx18_options {
#define CX18_F_S_INTERNAL_USE 5 /* this stream is used internally (sliced VBI processing) */
#define CX18_F_S_STREAMOFF 7 /* signal end of stream EOS */
#define CX18_F_S_APPL_IO 8 /* this stream is used read/written by an application */
+#define CX18_F_S_STOPPING 9 /* telling the fw to stop capturing */
/* per-cx18, i_flags */
#define CX18_F_I_LOADED_FW 0 /* Loaded firmware 1st time */
@@ -285,6 +287,7 @@ struct cx18_queue {
struct list_head list;
atomic_t buffers;
u32 bytesused;
+ spinlock_t lock;
};
struct cx18_dvb {
@@ -305,7 +308,7 @@ struct cx18_scb; /* forward reference */
#define CX18_MAX_MDL_ACKS 2
-#define CX18_MAX_EPU_WORK_ORDERS (CX18_MAX_FW_MDLS_PER_STREAM + 7)
+#define CX18_MAX_IN_WORK_ORDERS (CX18_MAX_FW_MDLS_PER_STREAM + 7)
/* CPU_DE_RELEASE_MDL can burst CX18_MAX_FW_MDLS_PER_STREAM orders in a group */
#define CX18_F_EWO_MB_STALE_UPON_RECEIPT 0x1
@@ -313,7 +316,7 @@ struct cx18_scb; /* forward reference */
#define CX18_F_EWO_MB_STALE \
(CX18_F_EWO_MB_STALE_UPON_RECEIPT | CX18_F_EWO_MB_STALE_WHILE_PROC)
-struct cx18_epu_work_order {
+struct cx18_in_work_order {
struct work_struct work;
atomic_t pending;
struct cx18 *cx;
@@ -337,7 +340,6 @@ struct cx18_stream {
unsigned mdl_offset;
u32 id;
- struct mutex qlock; /* locks access to the queues */
unsigned long s_flags; /* status flags, see above */
int dma; /* can be PCI_DMA_TODEVICE,
PCI_DMA_FROMDEVICE or
@@ -353,6 +355,8 @@ struct cx18_stream {
struct cx18_queue q_busy; /* busy buffers - in use by firmware */
struct cx18_queue q_full; /* full buffers - data for user apps */
+ struct work_struct out_work_order;
+
/* DVB / Digital Transport */
struct cx18_dvb dvb;
};
@@ -568,10 +572,14 @@ struct cx18 {
u32 sw2_irq_mask;
u32 hw2_irq_mask;
- struct workqueue_struct *work_queue;
- struct cx18_epu_work_order epu_work_order[CX18_MAX_EPU_WORK_ORDERS];
+ struct workqueue_struct *in_work_queue;
+ char in_workq_name[11]; /* "cx18-NN-in" */
+ struct cx18_in_work_order in_work_order[CX18_MAX_IN_WORK_ORDERS];
char epu_debug_str[256]; /* CX18_EPU_DEBUG is rare: use shared space */
+ struct workqueue_struct *out_work_queue;
+ char out_workq_name[12]; /* "cx18-NN-out" */
+
/* i2c */
struct i2c_adapter i2c_adap[2];
struct i2c_algo_bit_data i2c_algo[2];
diff --git a/drivers/media/video/cx18/cx18-dvb.c b/drivers/media/video/cx18/cx18-dvb.c
index 3b86f57cd15..6ea3fe623ef 100644
--- a/drivers/media/video/cx18/cx18-dvb.c
+++ b/drivers/media/video/cx18/cx18-dvb.c
@@ -23,14 +23,20 @@
#include "cx18-version.h"
#include "cx18-dvb.h"
#include "cx18-io.h"
+#include "cx18-queue.h"
#include "cx18-streams.h"
#include "cx18-cards.h"
+#include "cx18-gpio.h"
#include "s5h1409.h"
#include "mxl5005s.h"
+#include "zl10353.h"
+#include "tuner-xc2028.h"
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
#define CX18_REG_DMUX_NUM_PORT_0_CONTROL 0xd5a000
+#define CX18_CLOCK_ENABLE2 0xc71024
+#define CX18_DMUX_CLK_MASK 0x0080
static struct mxl5005s_config hauppauge_hvr1600_tuner = {
.i2c_address = 0xC6 >> 1,
@@ -57,7 +63,15 @@ static struct s5h1409_config hauppauge_hvr1600_config = {
.inversion = S5H1409_INVERSION_OFF,
.status_mode = S5H1409_DEMODLOCKING,
.mpeg_timing = S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK
+};
+/* Information/confirmation of proper config values provided by Terry Wu */
+static struct zl10353_config leadtek_dvr3100h_demod = {
+ .demod_address = 0x1e >> 1, /* Datasheet suggested straps */
+ .if2 = 45600, /* 4.560 MHz IF from the XC3028 */
+ .parallel_ts = 1, /* Not a serial TS */
+ .no_tuner = 1, /* XC3028 is not behind the gate */
+ .disable_i2c_gate_ctrl = 1, /* Disable the I2C gate */
};
static int dvb_register(struct cx18_stream *stream);
@@ -98,6 +112,7 @@ static int cx18_dvb_start_feed(struct dvb_demux_feed *feed)
cx18_write_reg(cx, v, CX18_REG_DMUX_NUM_PORT_0_CONTROL);
break;
+ case CX18_CARD_LEADTEK_DVR3100H:
default:
/* Assumption - Parallel transport - Signalling
* undefined or default.
@@ -267,8 +282,7 @@ void cx18_dvb_unregister(struct cx18_stream *stream)
}
/* All the DVB attach calls go here, this function get's modified
- * for each new card. No other function in this file needs
- * to change.
+ * for each new card. cx18_dvb_start_feed() will also need changes.
*/
static int dvb_register(struct cx18_stream *stream)
{
@@ -289,6 +303,29 @@ static int dvb_register(struct cx18_stream *stream)
ret = 0;
}
break;
+ case CX18_CARD_LEADTEK_DVR3100H:
+ dvb->fe = dvb_attach(zl10353_attach,
+ &leadtek_dvr3100h_demod,
+ &cx->i2c_adap[1]);
+ if (dvb->fe != NULL) {
+ struct dvb_frontend *fe;
+ struct xc2028_config cfg = {
+ .i2c_adap = &cx->i2c_adap[1],
+ .i2c_addr = 0xc2 >> 1,
+ .ctrl = NULL,
+ };
+ static struct xc2028_ctrl ctrl = {
+ .fname = XC2028_DEFAULT_FIRMWARE,
+ .max_len = 64,
+ .demod = XC3028_FE_ZARLINK456,
+ .type = XC2028_AUTO,
+ };
+
+ fe = dvb_attach(xc2028_attach, dvb->fe, &cfg);
+ if (fe != NULL && fe->ops.tuner_ops.set_config != NULL)
+ fe->ops.tuner_ops.set_config(fe, &ctrl);
+ }
+ break;
default:
/* No Digital Tv Support */
break;
@@ -299,6 +336,8 @@ static int dvb_register(struct cx18_stream *stream)
return -1;
}
+ dvb->fe->callback = cx18_reset_tuner_gpio;
+
ret = dvb_register_frontend(&dvb->dvb_adapter, dvb->fe);
if (ret < 0) {
if (dvb->fe->ops.release)
@@ -306,5 +345,16 @@ static int dvb_register(struct cx18_stream *stream)
return ret;
}
+ /*
+ * The firmware seems to enable the TS DMUX clock
+ * under various circumstances. However, since we know we
+ * might use it, let's just turn it on ourselves here.
+ */
+ cx18_write_reg_expect(cx,
+ (CX18_DMUX_CLK_MASK << 16) | CX18_DMUX_CLK_MASK,
+ CX18_CLOCK_ENABLE2,
+ CX18_DMUX_CLK_MASK,
+ (CX18_DMUX_CLK_MASK << 16) | CX18_DMUX_CLK_MASK);
+
return ret;
}
diff --git a/drivers/media/video/cx18/cx18-fileops.c b/drivers/media/video/cx18/cx18-fileops.c
index b3889c0b269..29969c18949 100644
--- a/drivers/media/video/cx18/cx18-fileops.c
+++ b/drivers/media/video/cx18/cx18-fileops.c
@@ -265,8 +265,13 @@ static size_t cx18_copy_buf_to_user(struct cx18_stream *s,
* an MPEG-2 Program Pack start code, and provide only
* up to that point to the user, so it's easy to insert VBI data
* the next time around.
+ *
+ * This will not work for an MPEG-2 TS and has only been
+ * verified by analysis to work for an MPEG-2 PS. Helen Buus
+ * pointed out this works for the CX23416 MPEG-2 DVD compatible
+ * stream, and research indicates both the MPEG 2 SVCD and DVD
+ * stream types use an MPEG-2 PS container.
*/
- /* FIXME - This only works for an MPEG-2 PS, not a TS */
/*
* An MPEG-2 Program Stream (PS) is a series of
* MPEG-2 Program Packs terminated by an
diff --git a/drivers/media/video/cx18/cx18-mailbox.c b/drivers/media/video/cx18/cx18-mailbox.c
index 2226e5791e9..afe46c3d405 100644
--- a/drivers/media/video/cx18/cx18-mailbox.c
+++ b/drivers/media/video/cx18/cx18-mailbox.c
@@ -131,7 +131,7 @@ static void dump_mb(struct cx18 *cx, struct cx18_mailbox *mb, char *name)
* Functions that run in a work_queue work handling context
*/
-static void epu_dma_done(struct cx18 *cx, struct cx18_epu_work_order *order)
+static void epu_dma_done(struct cx18 *cx, struct cx18_in_work_order *order)
{
u32 handle, mdl_ack_count, id;
struct cx18_mailbox *mb;
@@ -191,29 +191,30 @@ static void epu_dma_done(struct cx18 *cx, struct cx18_epu_work_order *order)
if (buf == NULL) {
CX18_WARN("Could not find buf %d for stream %s\n",
id, s->name);
- /* Put as many buffers as possible back into fw use */
- cx18_stream_load_fw_queue(s);
continue;
}
- if (s->type == CX18_ENC_STREAM_TYPE_TS && s->dvb.enabled) {
- CX18_DEBUG_HI_DMA("TS recv bytesused = %d\n",
- buf->bytesused);
- dvb_dmx_swfilter(&s->dvb.demux, buf->buf,
- buf->bytesused);
+ CX18_DEBUG_HI_DMA("%s recv bytesused = %d\n",
+ s->name, buf->bytesused);
+
+ if (s->type != CX18_ENC_STREAM_TYPE_TS)
+ cx18_enqueue(s, buf, &s->q_full);
+ else {
+ if (s->dvb.enabled)
+ dvb_dmx_swfilter(&s->dvb.demux, buf->buf,
+ buf->bytesused);
+ cx18_enqueue(s, buf, &s->q_free);
}
- /* Put as many buffers as possible back into fw use */
- cx18_stream_load_fw_queue(s);
- /* Put back TS buffer, since it was removed from all queues */
- if (s->type == CX18_ENC_STREAM_TYPE_TS)
- cx18_stream_put_buf_fw(s, buf);
}
+ /* Put as many buffers as possible back into fw use */
+ cx18_stream_load_fw_queue(s);
+
wake_up(&cx->dma_waitq);
if (s->id != -1)
wake_up(&s->waitq);
}
-static void epu_debug(struct cx18 *cx, struct cx18_epu_work_order *order)
+static void epu_debug(struct cx18 *cx, struct cx18_in_work_order *order)
{
char *p;
char *str = order->str;
@@ -224,7 +225,7 @@ static void epu_debug(struct cx18 *cx, struct cx18_epu_work_order *order)
CX18_INFO("FW version: %s\n", p - 1);
}
-static void epu_cmd(struct cx18 *cx, struct cx18_epu_work_order *order)
+static void epu_cmd(struct cx18 *cx, struct cx18_in_work_order *order)
{
switch (order->rpu) {
case CPU:
@@ -253,18 +254,18 @@ static void epu_cmd(struct cx18 *cx, struct cx18_epu_work_order *order)
}
static
-void free_epu_work_order(struct cx18 *cx, struct cx18_epu_work_order *order)
+void free_in_work_order(struct cx18 *cx, struct cx18_in_work_order *order)
{
atomic_set(&order->pending, 0);
}
-void cx18_epu_work_handler(struct work_struct *work)
+void cx18_in_work_handler(struct work_struct *work)
{
- struct cx18_epu_work_order *order =
- container_of(work, struct cx18_epu_work_order, work);
+ struct cx18_in_work_order *order =
+ container_of(work, struct cx18_in_work_order, work);
struct cx18 *cx = order->cx;
epu_cmd(cx, order);
- free_epu_work_order(cx, order);
+ free_in_work_order(cx, order);
}
@@ -272,7 +273,7 @@ void cx18_epu_work_handler(struct work_struct *work)
* Functions that run in an interrupt handling context
*/
-static void mb_ack_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
+static void mb_ack_irq(struct cx18 *cx, struct cx18_in_work_order *order)
{
struct cx18_mailbox __iomem *ack_mb;
u32 ack_irq, req;
@@ -308,7 +309,7 @@ static void mb_ack_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
return;
}
-static int epu_dma_done_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
+static int epu_dma_done_irq(struct cx18 *cx, struct cx18_in_work_order *order)
{
u32 handle, mdl_ack_offset, mdl_ack_count;
struct cx18_mailbox *mb;
@@ -334,7 +335,7 @@ static int epu_dma_done_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
}
static
-int epu_debug_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
+int epu_debug_irq(struct cx18 *cx, struct cx18_in_work_order *order)
{
u32 str_offset;
char *str = order->str;
@@ -355,7 +356,7 @@ int epu_debug_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
}
static inline
-int epu_cmd_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
+int epu_cmd_irq(struct cx18 *cx, struct cx18_in_work_order *order)
{
int ret = -1;
@@ -387,12 +388,12 @@ int epu_cmd_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
}
static inline
-struct cx18_epu_work_order *alloc_epu_work_order_irq(struct cx18 *cx)
+struct cx18_in_work_order *alloc_in_work_order_irq(struct cx18 *cx)
{
int i;
- struct cx18_epu_work_order *order = NULL;
+ struct cx18_in_work_order *order = NULL;
- for (i = 0; i < CX18_MAX_EPU_WORK_ORDERS; i++) {
+ for (i = 0; i < CX18_MAX_IN_WORK_ORDERS; i++) {
/*
* We only need "pending" atomic to inspect its contents,
* and need not do a check and set because:
@@ -401,8 +402,8 @@ struct cx18_epu_work_order *alloc_epu_work_order_irq(struct cx18 *cx)
* 2. "pending" is only set here, and we're serialized because
* we're called in an IRQ handler context.
*/
- if (atomic_read(&cx->epu_work_order[i].pending) == 0) {
- order = &cx->epu_work_order[i];
+ if (atomic_read(&cx->in_work_order[i].pending) == 0) {
+ order = &cx->in_work_order[i];
atomic_set(&order->pending, 1);
break;
}
@@ -414,7 +415,7 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
{
struct cx18_mailbox __iomem *mb;
struct cx18_mailbox *order_mb;
- struct cx18_epu_work_order *order;
+ struct cx18_in_work_order *order;
int submit;
switch (rpu) {
@@ -428,7 +429,7 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
return;
}
- order = alloc_epu_work_order_irq(cx);
+ order = alloc_in_work_order_irq(cx);
if (order == NULL) {
CX18_WARN("Unable to find blank work order form to schedule "
"incoming mailbox command processing\n");
@@ -461,7 +462,7 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
*/
submit = epu_cmd_irq(cx, order);
if (submit > 0) {
- queue_work(cx->work_queue, &order->work);
+ queue_work(cx->in_work_queue, &order->work);
}
}
@@ -478,9 +479,10 @@ static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[])
u32 __iomem *xpu_state;
wait_queue_head_t *waitq;
struct mutex *mb_lock;
- long int timeout, ret;
+ unsigned long int t0, timeout, ret;
int i;
char argstr[MAX_MB_ARGUMENTS*11+1];
+ DEFINE_WAIT(w);
if (info == NULL) {
CX18_WARN("unknown cmd %x\n", cmd);
@@ -562,25 +564,49 @@ static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[])
CX18_DEBUG_HI_IRQ("sending interrupt SW1: %x to send %s\n",
irq, info->name);
+
+ /* So we don't miss the wakeup, prepare to wait before notifying fw */
+ prepare_to_wait(waitq, &w, TASK_UNINTERRUPTIBLE);
cx18_write_reg_expect(cx, irq, SW1_INT_SET, irq, irq);
- ret = wait_event_timeout(
- *waitq,
- cx18_readl(cx, &mb->ack) == cx18_readl(cx, &mb->request),
- timeout);
+ t0 = jiffies;
+ ack = cx18_readl(cx, &mb->ack);
+ if (ack != req) {
+ schedule_timeout(timeout);
+ ret = jiffies - t0;
+ ack = cx18_readl(cx, &mb->ack);
+ } else {
+ ret = jiffies - t0;
+ }
- if (ret == 0) {
- /* Timed out */
+ finish_wait(waitq, &w);
+
+ if (req != ack) {
mutex_unlock(mb_lock);
- CX18_DEBUG_WARN("sending %s timed out waiting %d msecs for RPU "
- "acknowledgement\n",
- info->name, jiffies_to_msecs(timeout));
+ if (ret >= timeout) {
+ /* Timed out */
+ CX18_DEBUG_WARN("sending %s timed out waiting %d msecs "
+ "for RPU acknowledgement\n",
+ info->name, jiffies_to_msecs(ret));
+ } else {
+ CX18_DEBUG_WARN("woken up before mailbox ack was ready "
+ "after submitting %s to RPU. only "
+ "waited %d msecs on req %u but awakened"
+ " with unmatched ack %u\n",
+ info->name,
+ jiffies_to_msecs(ret),
+ req, ack);
+ }
return -EINVAL;
}
- if (ret != timeout)
+ if (ret >= timeout)
+ CX18_DEBUG_WARN("failed to be awakened upon RPU acknowledgment "
+ "sending %s; timed out waiting %d msecs\n",
+ info->name, jiffies_to_msecs(ret));
+ else
CX18_DEBUG_HI_API("waited %u msecs for %s to be acked\n",
- jiffies_to_msecs(timeout-ret), info->name);
+ jiffies_to_msecs(ret), info->name);
/* Collect data returned by the XPU */
for (i = 0; i < MAX_MB_ARGUMENTS; i++)
diff --git a/drivers/media/video/cx18/cx18-mailbox.h b/drivers/media/video/cx18/cx18-mailbox.h
index ce2b6686aa0..e23aaac5b28 100644
--- a/drivers/media/video/cx18/cx18-mailbox.h
+++ b/drivers/media/video/cx18/cx18-mailbox.h
@@ -95,6 +95,6 @@ int cx18_api_func(void *priv, u32 cmd, int in, int out,
void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu);
-void cx18_epu_work_handler(struct work_struct *work);
+void cx18_in_work_handler(struct work_struct *work);
#endif
diff --git a/drivers/media/video/cx18/cx18-queue.c b/drivers/media/video/cx18/cx18-queue.c
index 3046b8e7434..fa1ed7897d9 100644
--- a/drivers/media/video/cx18/cx18-queue.c
+++ b/drivers/media/video/cx18/cx18-queue.c
@@ -23,8 +23,8 @@
*/
#include "cx18-driver.h"
-#include "cx18-streams.h"
#include "cx18-queue.h"
+#include "cx18-streams.h"
#include "cx18-scb.h"
void cx18_buf_swap(struct cx18_buffer *buf)
@@ -53,13 +53,13 @@ struct cx18_queue *_cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf,
buf->skipped = 0;
}
- mutex_lock(&s->qlock);
-
/* q_busy is restricted to a max buffer count imposed by firmware */
if (q == &s->q_busy &&
atomic_read(&q->buffers) >= CX18_MAX_FW_MDLS_PER_STREAM)
q = &s->q_free;
+ spin_lock(&q->lock);
+
if (to_front)
list_add(&buf->list, &q->list); /* LIFO */
else
@@ -67,7 +67,7 @@ struct cx18_queue *_cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf,
q->bytesused += buf->bytesused - buf->readpos;
atomic_inc(&q->buffers);
- mutex_unlock(&s->qlock);
+ spin_unlock(&q->lock);
return q;
}
@@ -75,7 +75,7 @@ struct cx18_buffer *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q)
{
struct cx18_buffer *buf = NULL;
- mutex_lock(&s->qlock);
+ spin_lock(&q->lock);
if (!list_empty(&q->list)) {
buf = list_first_entry(&q->list, struct cx18_buffer, list);
list_del_init(&buf->list);
@@ -83,7 +83,7 @@ struct cx18_buffer *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q)
buf->skipped = 0;
atomic_dec(&q->buffers);
}
- mutex_unlock(&s->qlock);
+ spin_unlock(&q->lock);
return buf;
}
@@ -94,9 +94,23 @@ struct cx18_buffer *cx18_queue_get_buf(struct cx18_stream *s, u32 id,
struct cx18_buffer *buf;
struct cx18_buffer *tmp;
struct cx18_buffer *ret = NULL;
-
- mutex_lock(&s->qlock);
+ LIST_HEAD(sweep_up);
+
+ /*
+ * We don't have to acquire multiple q locks here, because we are
+ * serialized by the single threaded work handler.
+ * Buffers from the firmware will thus remain in order as
+ * they are moved from q_busy to q_full or to the dvb ring buffer.
+ */
+ spin_lock(&s->q_busy.lock);
list_for_each_entry_safe(buf, tmp, &s->q_busy.list, list) {
+ /*
+ * We should find what the firmware told us is done,
+ * right at the front of the queue. If we don't, we likely have
+ * missed a buffer done message from the firmware.
+ * Once we skip a buffer repeatedly, relative to the size of
+ * q_busy, we have high confidence we've missed it.
+ */
if (buf->id != id) {
buf->skipped++;
if (buf->skipped >= atomic_read(&s->q_busy.buffers)-1) {
@@ -105,38 +119,41 @@ struct cx18_buffer *cx18_queue_get_buf(struct cx18_stream *s, u32 id,
"times - it must have dropped out of "
"rotation\n", s->name, buf->id,
buf->skipped);
- /* move it to q_free */
- list_move_tail(&buf->list, &s->q_free.list);
- buf->bytesused = buf->readpos = buf->b_flags =
- buf->skipped = 0;
+ /* Sweep it up to put it back into rotation */
+ list_move_tail(&buf->list, &sweep_up);
atomic_dec(&s->q_busy.buffers);
- atomic_inc(&s->q_free.buffers);
}
continue;
}
-
- buf->bytesused = bytesused;
- /* Sync the buffer before we release the qlock */
- cx18_buf_sync_for_cpu(s, buf);
- if (s->type == CX18_ENC_STREAM_TYPE_TS) {
- /*
- * TS doesn't use q_full. As we pull the buffer off of
- * the queue here, the caller will have to put it back.
- */
- list_del_init(&buf->list);
- } else {
- /* Move buffer from q_busy to q_full */
- list_move_tail(&buf->list, &s->q_full.list);
- set_bit(CX18_F_B_NEED_BUF_SWAP, &buf->b_flags);
- s->q_full.bytesused += buf->bytesused;
- atomic_inc(&s->q_full.buffers);
- }
+ /*
+ * We pull the desired buffer off of the queue here. Something
+ * will have to put it back on a queue later.
+ */
+ list_del_init(&buf->list);
atomic_dec(&s->q_busy.buffers);
-
ret = buf;
break;
}
- mutex_unlock(&s->qlock);
+ spin_unlock(&s->q_busy.lock);
+
+ /*
+ * We found the buffer for which we were looking. Get it ready for
+ * the caller to put on q_full or in the dvb ring buffer.
+ */
+ if (ret != NULL) {
+ ret->bytesused = bytesused;
+ ret->skipped = 0;
+ /* readpos and b_flags were 0'ed when the buf went on q_busy */
+ cx18_buf_sync_for_cpu(s, ret);
+ if (s->type != CX18_ENC_STREAM_TYPE_TS)
+ set_bit(CX18_F_B_NEED_BUF_SWAP, &ret->b_flags);
+ }
+
+ /* Put any buffers the firmware is ignoring back into normal rotation */
+ list_for_each_entry_safe(buf, tmp, &sweep_up, list) {
+ list_del_init(&buf->list);
+ cx18_enqueue(s, buf, &s->q_free);
+ }
return ret;
}
@@ -148,7 +165,7 @@ static void cx18_queue_flush(struct cx18_stream *s, struct cx18_queue *q)
if (q == &s->q_free)
return;
- mutex_lock(&s->qlock);
+ spin_lock(&q->lock);
while (!list_empty(&q->list)) {
buf = list_first_entry(&q->list, struct cx18_buffer, list);
list_move_tail(&buf->list, &s->q_free.list);
@@ -156,7 +173,7 @@ static void cx18_queue_flush(struct cx18_stream *s, struct cx18_queue *q)
atomic_inc(&s->q_free.buffers);
}
cx18_queue_init(q);
- mutex_unlock(&s->qlock);
+ spin_unlock(&q->lock);
}
void cx18_flush_queues(struct cx18_stream *s)
diff --git a/drivers/media/video/cx18/cx18-streams.c b/drivers/media/video/cx18/cx18-streams.c
index 0932b76b237..54d248e16d8 100644
--- a/drivers/media/video/cx18/cx18-streams.c
+++ b/drivers/media/video/cx18/cx18-streams.c
@@ -116,12 +116,16 @@ static void cx18_stream_init(struct cx18 *cx, int type)
s->buffers = cx->stream_buffers[type];
s->buf_size = cx->stream_buf_size[type];
- mutex_init(&s->qlock);
init_waitqueue_head(&s->waitq);
s->id = -1;
+ spin_lock_init(&s->q_free.lock);
cx18_queue_init(&s->q_free);
+ spin_lock_init(&s->q_busy.lock);
cx18_queue_init(&s->q_busy);
+ spin_lock_init(&s->q_full.lock);
cx18_queue_init(&s->q_full);
+
+ INIT_WORK(&s->out_work_order, cx18_out_work_handler);
}
static int cx18_prep_dev(struct cx18 *cx, int type)
@@ -367,9 +371,14 @@ static void cx18_vbi_setup(struct cx18_stream *s)
* Tell the encoder to capture 21-4+1=18 lines per field,
* since we want lines 10 through 21.
*
- * FIXME - revisit for 625/50 systems
+ * For 625/50 systems, according to the VIP 2 & BT.656 std:
+ * The EAV RP code's Field bit toggles on line 1, a few lines
+ * after the Vertcal Blank bit has already toggled.
+ * (We've actually set the digitizer so that the Field bit
+ * toggles on line 2.) Tell the encoder to capture 23-2+1=22
+ * lines per field, since we want lines 6 through 23.
*/
- lines = cx->is_60hz ? (21 - 4 + 1) * 2 : 38;
+ lines = cx->is_60hz ? (21 - 4 + 1) * 2 : (23 - 2 + 1) * 2;
}
data[0] = s->handle;
@@ -431,14 +440,16 @@ static void cx18_vbi_setup(struct cx18_stream *s)
cx18_api(cx, CX18_CPU_SET_RAW_VBI_PARAM, 6, data);
}
-struct cx18_queue *cx18_stream_put_buf_fw(struct cx18_stream *s,
- struct cx18_buffer *buf)
+static
+struct cx18_queue *_cx18_stream_put_buf_fw(struct cx18_stream *s,
+ struct cx18_buffer *buf)
{
struct cx18 *cx = s->cx;
struct cx18_queue *q;
/* Don't give it to the firmware, if we're not running a capture */
if (s->handle == CX18_INVALID_TASK_HANDLE ||
+ test_bit(CX18_F_S_STOPPING, &s->s_flags) ||
!test_bit(CX18_F_S_STREAMING, &s->s_flags))
return cx18_enqueue(s, buf, &s->q_free);
@@ -453,7 +464,8 @@ struct cx18_queue *cx18_stream_put_buf_fw(struct cx18_stream *s,
return q;
}
-void cx18_stream_load_fw_queue(struct cx18_stream *s)
+static
+void _cx18_stream_load_fw_queue(struct cx18_stream *s)
{
struct cx18_queue *q;
struct cx18_buffer *buf;
@@ -467,11 +479,19 @@ void cx18_stream_load_fw_queue(struct cx18_stream *s)
buf = cx18_dequeue(s, &s->q_free);
if (buf == NULL)
break;
- q = cx18_stream_put_buf_fw(s, buf);
+ q = _cx18_stream_put_buf_fw(s, buf);
} while (atomic_read(&s->q_busy.buffers) < CX18_MAX_FW_MDLS_PER_STREAM
&& q == &s->q_busy);
}
+void cx18_out_work_handler(struct work_struct *work)
+{
+ struct cx18_stream *s =
+ container_of(work, struct cx18_stream, out_work_order);
+
+ _cx18_stream_load_fw_queue(s);
+}
+
int cx18_start_v4l2_encode_stream(struct cx18_stream *s)
{
u32 data[MAX_MB_ARGUMENTS];
@@ -600,19 +620,20 @@ int cx18_start_v4l2_encode_stream(struct cx18_stream *s)
/* Init all the cpu_mdls for this stream */
cx18_flush_queues(s);
- mutex_lock(&s->qlock);
+ spin_lock(&s->q_free.lock);
list_for_each_entry(buf, &s->q_free.list, list) {
cx18_writel(cx, buf->dma_handle,
&cx->scb->cpu_mdl[buf->id].paddr);
cx18_writel(cx, s->buf_size, &cx->scb->cpu_mdl[buf->id].length);
}
- mutex_unlock(&s->qlock);
- cx18_stream_load_fw_queue(s);
+ spin_unlock(&s->q_free.lock);
+ _cx18_stream_load_fw_queue(s);
/* begin_capture */
if (cx18_vapi(cx, CX18_CPU_CAPTURE_START, 1, s->handle)) {
CX18_DEBUG_WARN("Error starting capture!\n");
/* Ensure we're really not capturing before releasing MDLs */
+ set_bit(CX18_F_S_STOPPING, &s->s_flags);
if (s->type == CX18_ENC_STREAM_TYPE_MPG)
cx18_vapi(cx, CX18_CPU_CAPTURE_STOP, 2, s->handle, 1);
else
@@ -622,6 +643,7 @@ int cx18_start_v4l2_encode_stream(struct cx18_stream *s)
cx18_vapi(cx, CX18_CPU_DE_RELEASE_MDL, 1, s->handle);
cx18_vapi(cx, CX18_DESTROY_TASK, 1, s->handle);
s->handle = CX18_INVALID_TASK_HANDLE;
+ clear_bit(CX18_F_S_STOPPING, &s->s_flags);
if (atomic_read(&cx->tot_capturing) == 0) {
set_bit(CX18_F_I_EOS, &cx->i_flags);
cx18_write_reg(cx, 5, CX18_DSP0_INTERRUPT_MASK);
@@ -666,6 +688,7 @@ int cx18_stop_v4l2_encode_stream(struct cx18_stream *s, int gop_end)
if (atomic_read(&cx->tot_capturing) == 0)
return 0;
+ set_bit(CX18_F_S_STOPPING, &s->s_flags);
if (s->type == CX18_ENC_STREAM_TYPE_MPG)
cx18_vapi(cx, CX18_CPU_CAPTURE_STOP, 2, s->handle, !gop_end);
else
@@ -689,6 +712,7 @@ int cx18_stop_v4l2_encode_stream(struct cx18_stream *s, int gop_end)
cx18_vapi(cx, CX18_DESTROY_TASK, 1, s->handle);
s->handle = CX18_INVALID_TASK_HANDLE;
+ clear_bit(CX18_F_S_STOPPING, &s->s_flags);
if (atomic_read(&cx->tot_capturing) > 0)
return 0;
diff --git a/drivers/media/video/cx18/cx18-streams.h b/drivers/media/video/cx18/cx18-streams.h
index 420e0a17294..1afc3fd9d82 100644
--- a/drivers/media/video/cx18/cx18-streams.h
+++ b/drivers/media/video/cx18/cx18-streams.h
@@ -28,10 +28,24 @@ int cx18_streams_setup(struct cx18 *cx);
int cx18_streams_register(struct cx18 *cx);
void cx18_streams_cleanup(struct cx18 *cx, int unregister);
+/* Related to submission of buffers to firmware */
+static inline void cx18_stream_load_fw_queue(struct cx18_stream *s)
+{
+ struct cx18 *cx = s->cx;
+ queue_work(cx->out_work_queue, &s->out_work_order);
+}
+
+static inline void cx18_stream_put_buf_fw(struct cx18_stream *s,
+ struct cx18_buffer *buf)
+{
+ /* Put buf on q_free; the out work handler will move buf(s) to q_busy */
+ cx18_enqueue(s, buf, &s->q_free);
+ cx18_stream_load_fw_queue(s);
+}
+
+void cx18_out_work_handler(struct work_struct *work);
+
/* Capture related */
-void cx18_stream_load_fw_queue(struct cx18_stream *s);
-struct cx18_queue *cx18_stream_put_buf_fw(struct cx18_stream *s,
- struct cx18_buffer *buf);
int cx18_start_v4l2_encode_stream(struct cx18_stream *s);
int cx18_stop_v4l2_encode_stream(struct cx18_stream *s, int gop_end);
diff --git a/drivers/media/video/cx18/cx18-version.h b/drivers/media/video/cx18/cx18-version.h
index bd9bd44da79..45494b094e7 100644
--- a/drivers/media/video/cx18/cx18-version.h
+++ b/drivers/media/video/cx18/cx18-version.h
@@ -24,7 +24,7 @@
#define CX18_DRIVER_NAME "cx18"
#define CX18_DRIVER_VERSION_MAJOR 1
-#define CX18_DRIVER_VERSION_MINOR 1
+#define CX18_DRIVER_VERSION_MINOR 2
#define CX18_DRIVER_VERSION_PATCHLEVEL 0
#define CX18_VERSION __stringify(CX18_DRIVER_VERSION_MAJOR) "." __stringify(CX18_DRIVER_VERSION_MINOR) "." __stringify(CX18_DRIVER_VERSION_PATCHLEVEL)
diff --git a/drivers/media/video/cx231xx/cx231xx-avcore.c b/drivers/media/video/cx231xx/cx231xx-avcore.c
index 1be3881be99..6a9464079b4 100644
--- a/drivers/media/video/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/video/cx231xx/cx231xx-avcore.c
@@ -29,7 +29,6 @@
#include <linux/bitmap.h>
#include <linux/usb.h>
#include <linux/i2c.h>
-#include <linux/version.h>
#include <linux/mm.h>
#include <linux/mutex.h>
diff --git a/drivers/media/video/cx231xx/cx231xx-cards.c b/drivers/media/video/cx231xx/cx231xx-cards.c
index c8a32b1b538..63d2239fd32 100644
--- a/drivers/media/video/cx231xx/cx231xx-cards.c
+++ b/drivers/media/video/cx231xx/cx231xx-cards.c
@@ -281,12 +281,12 @@ static void cx231xx_config_tuner(struct cx231xx *dev)
}
/* ----------------------------------------------------------------------- */
-void cx231xx_set_ir(struct cx231xx *dev, struct IR_i2c *ir)
+void cx231xx_register_i2c_ir(struct cx231xx *dev)
{
- if (disable_ir) {
- ir->get_key = NULL;
+ if (disable_ir)
return;
- }
+
+ /* REVISIT: instantiate IR device */
/* detect & configure */
switch (dev->model) {
diff --git a/drivers/media/video/cx231xx/cx231xx-i2c.c b/drivers/media/video/cx231xx/cx231xx-i2c.c
index b4a03d813e0..33219dc4d64 100644
--- a/drivers/media/video/cx231xx/cx231xx-i2c.c
+++ b/drivers/media/video/cx231xx/cx231xx-i2c.c
@@ -424,34 +424,6 @@ static u32 functionality(struct i2c_adapter *adap)
return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C;
}
-/*
- * attach_inform()
- * gets called when a device attaches to the i2c bus
- * does some basic configuration
- */
-static int attach_inform(struct i2c_client *client)
-{
- struct cx231xx_i2c *bus = i2c_get_adapdata(client->adapter);
- struct cx231xx *dev = bus->dev;
-
- switch (client->addr << 1) {
- case 0x8e:
- {
- struct IR_i2c *ir = i2c_get_clientdata(client);
- dprintk1(1, "attach_inform: IR detected (%s).\n",
- ir->phys);
- cx231xx_set_ir(dev, ir);
- break;
- }
- break;
-
- default:
- break;
- }
-
- return 0;
-}
-
static struct i2c_algorithm cx231xx_algo = {
.master_xfer = cx231xx_i2c_xfer,
.functionality = functionality,
@@ -462,7 +434,6 @@ static struct i2c_adapter cx231xx_adap_template = {
.name = "cx231xx",
.id = I2C_HW_B_CX231XX,
.algo = &cx231xx_algo,
- .client_register = attach_inform,
};
static struct i2c_client cx231xx_client_template = {
@@ -537,6 +508,9 @@ int cx231xx_i2c_register(struct cx231xx_i2c *bus)
if (0 == bus->i2c_rc) {
if (i2c_scan)
cx231xx_do_i2c_scan(dev, &bus->i2c_client);
+
+ /* Instantiate the IR receiver device, if present */
+ cx231xx_register_i2c_ir(dev);
} else
cx231xx_warn("%s: i2c bus %d register FAILED\n",
dev->name, bus->nr);
diff --git a/drivers/media/video/cx231xx/cx231xx-input.c b/drivers/media/video/cx231xx/cx231xx-input.c
index 97e304c3c79..48f22fa38e6 100644
--- a/drivers/media/video/cx231xx/cx231xx-input.c
+++ b/drivers/media/video/cx231xx/cx231xx-input.c
@@ -36,7 +36,7 @@ MODULE_PARM_DESC(ir_debug, "enable debug messages [IR]");
#define i2cdprintk(fmt, arg...) \
if (ir_debug) { \
- printk(KERN_DEBUG "%s/ir: " fmt, ir->c.name , ## arg); \
+ printk(KERN_DEBUG "%s/ir: " fmt, ir->name , ## arg); \
}
#define dprintk(fmt, arg...) \
diff --git a/drivers/media/video/cx231xx/cx231xx-vbi.c b/drivers/media/video/cx231xx/cx231xx-vbi.c
index 94180526909..e97b8023a65 100644
--- a/drivers/media/video/cx231xx/cx231xx-vbi.c
+++ b/drivers/media/video/cx231xx/cx231xx-vbi.c
@@ -26,7 +26,6 @@
#include <linux/bitmap.h>
#include <linux/usb.h>
#include <linux/i2c.h>
-#include <linux/version.h>
#include <linux/mm.h>
#include <linux/mutex.h>
diff --git a/drivers/media/video/cx231xx/cx231xx.h b/drivers/media/video/cx231xx/cx231xx.h
index aa4a23ef491..e38eb2d425f 100644
--- a/drivers/media/video/cx231xx/cx231xx.h
+++ b/drivers/media/video/cx231xx/cx231xx.h
@@ -738,7 +738,7 @@ extern void cx231xx_card_setup(struct cx231xx *dev);
extern struct cx231xx_board cx231xx_boards[];
extern struct usb_device_id cx231xx_id_table[];
extern const unsigned int cx231xx_bcount;
-void cx231xx_set_ir(struct cx231xx *dev, struct IR_i2c *ir);
+void cx231xx_register_i2c_ir(struct cx231xx *dev);
int cx231xx_tuner_callback(void *ptr, int component, int command, int arg);
/* Provided by cx231xx-input.c */
diff --git a/drivers/media/video/cx23885/cimax2.c b/drivers/media/video/cx23885/cimax2.c
index 9a6536998d9..08582e58bdb 100644
--- a/drivers/media/video/cx23885/cimax2.c
+++ b/drivers/media/video/cx23885/cimax2.c
@@ -312,7 +312,7 @@ static void netup_read_ci_status(struct work_struct *work)
"TS config = %02x\n", __func__, state->ci_i2c_addr, 0, buf[0],
buf[32]);
- if (buf[0] && 1)
+ if (buf[0] & 1)
state->status = DVB_CA_EN50221_POLL_CAM_PRESENT |
DVB_CA_EN50221_POLL_CAM_READY;
else
diff --git a/drivers/media/video/cx23885/cx23885-417.c b/drivers/media/video/cx23885/cx23885-417.c
index 6f5df90af93..2943bfd32a9 100644
--- a/drivers/media/video/cx23885/cx23885-417.c
+++ b/drivers/media/video/cx23885/cx23885-417.c
@@ -1742,7 +1742,6 @@ static struct video_device *cx23885_video_dev_alloc(
if (NULL == vfd)
return NULL;
*vfd = *template;
- vfd->minor = -1;
snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)", dev->name,
type, cx23885_boards[tsport->dev->board].name);
vfd->parent = &pci->dev;
diff --git a/drivers/media/video/cx23885/cx23885-cards.c b/drivers/media/video/cx23885/cx23885-cards.c
index 6d6293f7d42..ce29b5e34a1 100644
--- a/drivers/media/video/cx23885/cx23885-cards.c
+++ b/drivers/media/video/cx23885/cx23885-cards.c
@@ -181,6 +181,26 @@ struct cx23885_board cx23885_boards[] = {
.portb = CX23885_MPEG_DVB,
.portc = CX23885_MPEG_DVB,
},
+ [CX23885_BOARD_HAUPPAUGE_HVR1270] = {
+ .name = "Hauppauge WinTV-HVR1270",
+ .portc = CX23885_MPEG_DVB,
+ },
+ [CX23885_BOARD_HAUPPAUGE_HVR1275] = {
+ .name = "Hauppauge WinTV-HVR1275",
+ .portc = CX23885_MPEG_DVB,
+ },
+ [CX23885_BOARD_HAUPPAUGE_HVR1255] = {
+ .name = "Hauppauge WinTV-HVR1255",
+ .portc = CX23885_MPEG_DVB,
+ },
+ [CX23885_BOARD_HAUPPAUGE_HVR1210] = {
+ .name = "Hauppauge WinTV-HVR1210",
+ .portc = CX23885_MPEG_DVB,
+ },
+ [CX23885_BOARD_MYGICA_X8506] = {
+ .name = "Mygica X8506 DMB-TH",
+ .portb = CX23885_MPEG_DVB,
+ },
};
const unsigned int cx23885_bcount = ARRAY_SIZE(cx23885_boards);
@@ -280,6 +300,30 @@ struct cx23885_subid cx23885_subids[] = {
.subvendor = 0x1b55,
.subdevice = 0x2a2c,
.card = CX23885_BOARD_NETUP_DUAL_DVBS2_CI,
+ }, {
+ .subvendor = 0x0070,
+ .subdevice = 0x2211,
+ .card = CX23885_BOARD_HAUPPAUGE_HVR1270,
+ }, {
+ .subvendor = 0x0070,
+ .subdevice = 0x2215,
+ .card = CX23885_BOARD_HAUPPAUGE_HVR1275,
+ }, {
+ .subvendor = 0x0070,
+ .subdevice = 0x2251,
+ .card = CX23885_BOARD_HAUPPAUGE_HVR1255,
+ }, {
+ .subvendor = 0x0070,
+ .subdevice = 0x2291,
+ .card = CX23885_BOARD_HAUPPAUGE_HVR1210,
+ }, {
+ .subvendor = 0x0070,
+ .subdevice = 0x2295,
+ .card = CX23885_BOARD_HAUPPAUGE_HVR1210,
+ }, {
+ .subvendor = 0x14f1,
+ .subdevice = 0x8651,
+ .card = CX23885_BOARD_MYGICA_X8506,
},
};
const unsigned int cx23885_idcount = ARRAY_SIZE(cx23885_subids);
@@ -321,6 +365,42 @@ static void hauppauge_eeprom(struct cx23885_dev *dev, u8 *eeprom_data)
/* Make sure we support the board model */
switch (tv.model) {
+ case 22001:
+ /* WinTV-HVR1270 (PCIe, Retail, half height)
+ * ATSC/QAM and basic analog, IR Blast */
+ case 22009:
+ /* WinTV-HVR1210 (PCIe, Retail, half height)
+ * DVB-T and basic analog, IR Blast */
+ case 22011:
+ /* WinTV-HVR1270 (PCIe, Retail, half height)
+ * ATSC/QAM and basic analog, IR Recv */
+ case 22019:
+ /* WinTV-HVR1210 (PCIe, Retail, half height)
+ * DVB-T and basic analog, IR Recv */
+ case 22021:
+ /* WinTV-HVR1275 (PCIe, Retail, half height)
+ * ATSC/QAM and basic analog, IR Recv */
+ case 22029:
+ /* WinTV-HVR1210 (PCIe, Retail, half height)
+ * DVB-T and basic analog, IR Recv */
+ case 22101:
+ /* WinTV-HVR1270 (PCIe, Retail, full height)
+ * ATSC/QAM and basic analog, IR Blast */
+ case 22109:
+ /* WinTV-HVR1210 (PCIe, Retail, full height)
+ * DVB-T and basic analog, IR Blast */
+ case 22111:
+ /* WinTV-HVR1270 (PCIe, Retail, full height)
+ * ATSC/QAM and basic analog, IR Recv */
+ case 22119:
+ /* WinTV-HVR1210 (PCIe, Retail, full height)
+ * DVB-T and basic analog, IR Recv */
+ case 22121:
+ /* WinTV-HVR1275 (PCIe, Retail, full height)
+ * ATSC/QAM and basic analog, IR Recv */
+ case 22129:
+ /* WinTV-HVR1210 (PCIe, Retail, full height)
+ * DVB-T and basic analog, IR Recv */
case 71009:
/* WinTV-HVR1200 (PCIe, Retail, full height)
* DVB-T and basic analog */
@@ -619,6 +699,30 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
/* enable irq */
cx_write(GPIO_ISM, 0x00000000);/* INTERRUPTS active low*/
break;
+ case CX23885_BOARD_HAUPPAUGE_HVR1270:
+ case CX23885_BOARD_HAUPPAUGE_HVR1275:
+ case CX23885_BOARD_HAUPPAUGE_HVR1255:
+ case CX23885_BOARD_HAUPPAUGE_HVR1210:
+ /* GPIO-5 RF Control: 0 = RF1 Terrestrial, 1 = RF2 Cable */
+ /* GPIO-6 I2C Gate which can isolate the demod from the bus */
+ /* GPIO-9 Demod reset */
+
+ /* Put the parts into reset and back */
+ cx23885_gpio_enable(dev, GPIO_9 | GPIO_6 | GPIO_5, 1);
+ cx23885_gpio_set(dev, GPIO_9 | GPIO_6 | GPIO_5);
+ cx23885_gpio_clear(dev, GPIO_9);
+ mdelay(20);
+ cx23885_gpio_set(dev, GPIO_9);
+ break;
+ case CX23885_BOARD_MYGICA_X8506:
+ /* GPIO-1 reset XC5000 */
+ /* GPIO-2 reset LGS8GL5 */
+ cx_set(GP0_IO, 0x00060000);
+ cx_clear(GP0_IO, 0x00000006);
+ mdelay(100);
+ cx_set(GP0_IO, 0x00060006);
+ mdelay(100);
+ break;
}
}
@@ -631,6 +735,10 @@ int cx23885_ir_init(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1800:
case CX23885_BOARD_HAUPPAUGE_HVR1200:
case CX23885_BOARD_HAUPPAUGE_HVR1400:
+ case CX23885_BOARD_HAUPPAUGE_HVR1270:
+ case CX23885_BOARD_HAUPPAUGE_HVR1275:
+ case CX23885_BOARD_HAUPPAUGE_HVR1255:
+ case CX23885_BOARD_HAUPPAUGE_HVR1210:
/* FIXME: Implement me */
break;
case CX23885_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL_EXP:
@@ -666,6 +774,10 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1800lp:
case CX23885_BOARD_HAUPPAUGE_HVR1200:
case CX23885_BOARD_HAUPPAUGE_HVR1700:
+ case CX23885_BOARD_HAUPPAUGE_HVR1270:
+ case CX23885_BOARD_HAUPPAUGE_HVR1275:
+ case CX23885_BOARD_HAUPPAUGE_HVR1255:
+ case CX23885_BOARD_HAUPPAUGE_HVR1210:
if (dev->i2c_bus[0].i2c_rc == 0)
hauppauge_eeprom(dev, eeprom+0xc0);
break;
@@ -714,6 +826,11 @@ void cx23885_card_setup(struct cx23885_dev *dev)
ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
break;
+ case CX23885_BOARD_MYGICA_X8506:
+ ts1->gen_ctrl_val = 0x5; /* Parallel */
+ ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
+ ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
+ break;
case CX23885_BOARD_HAUPPAUGE_HVR1250:
case CX23885_BOARD_HAUPPAUGE_HVR1500:
case CX23885_BOARD_HAUPPAUGE_HVR1500Q:
@@ -723,6 +840,10 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1400:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
+ case CX23885_BOARD_HAUPPAUGE_HVR1270:
+ case CX23885_BOARD_HAUPPAUGE_HVR1275:
+ case CX23885_BOARD_HAUPPAUGE_HVR1255:
+ case CX23885_BOARD_HAUPPAUGE_HVR1210:
default:
ts2->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
diff --git a/drivers/media/video/cx23885/cx23885-core.c b/drivers/media/video/cx23885/cx23885-core.c
index beda42925ce..bf7bb1c412f 100644
--- a/drivers/media/video/cx23885/cx23885-core.c
+++ b/drivers/media/video/cx23885/cx23885-core.c
@@ -1700,9 +1700,13 @@ static irqreturn_t cx23885_irq(int irq, void *dev_id)
}
if (cx23885_boards[dev->board].cimax > 0 &&
- ((pci_status & PCI_MSK_GPIO0) || (pci_status & PCI_MSK_GPIO1)))
- /* handled += cx23885_irq_gpio(dev, pci_status); */
- handled += netup_ci_slot_status(dev, pci_status);
+ ((pci_status & PCI_MSK_GPIO0) ||
+ (pci_status & PCI_MSK_GPIO1))) {
+
+ if (cx23885_boards[dev->board].cimax > 0)
+ handled += netup_ci_slot_status(dev, pci_status);
+
+ }
if (ts1_status) {
if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
@@ -1729,6 +1733,88 @@ out:
return IRQ_RETVAL(handled);
}
+static inline int encoder_on_portb(struct cx23885_dev *dev)
+{
+ return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER;
+}
+
+static inline int encoder_on_portc(struct cx23885_dev *dev)
+{
+ return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER;
+}
+
+/* Mask represents 32 different GPIOs, GPIO's are split into multiple
+ * registers depending on the board configuration (and whether the
+ * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will
+ * be pushed into the correct hardware register, regardless of the
+ * physical location. Certain registers are shared so we sanity check
+ * and report errors if we think we're tampering with a GPIo that might
+ * be assigned to the encoder (and used for the host bus).
+ *
+ * GPIO 2 thru 0 - On the cx23885 bridge
+ * GPIO 18 thru 3 - On the cx23417 host bus interface
+ * GPIO 23 thru 19 - On the cx25840 a/v core
+ */
+void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
+{
+ if (mask & 0x7)
+ cx_set(GP0_IO, mask & 0x7);
+
+ if (mask & 0x0007fff8) {
+ if (encoder_on_portb(dev) || encoder_on_portc(dev))
+ printk(KERN_ERR
+ "%s: Setting GPIO on encoder ports\n",
+ dev->name);
+ cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
+ }
+
+ /* TODO: 23-19 */
+ if (mask & 0x00f80000)
+ printk(KERN_INFO "%s: Unsupported\n", dev->name);
+}
+
+void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
+{
+ if (mask & 0x00000007)
+ cx_clear(GP0_IO, mask & 0x7);
+
+ if (mask & 0x0007fff8) {
+ if (encoder_on_portb(dev) || encoder_on_portc(dev))
+ printk(KERN_ERR
+ "%s: Clearing GPIO moving on encoder ports\n",
+ dev->name);
+ cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
+ }
+
+ /* TODO: 23-19 */
+ if (mask & 0x00f80000)
+ printk(KERN_INFO "%s: Unsupported\n", dev->name);
+}
+
+void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
+{
+ if ((mask & 0x00000007) && asoutput)
+ cx_set(GP0_IO, (mask & 0x7) << 16);
+ else if ((mask & 0x00000007) && !asoutput)
+ cx_clear(GP0_IO, (mask & 0x7) << 16);
+
+ if (mask & 0x0007fff8) {
+ if (encoder_on_portb(dev) || encoder_on_portc(dev))
+ printk(KERN_ERR
+ "%s: Enabling GPIO on encoder ports\n",
+ dev->name);
+ }
+
+ /* MC417_OEN is active low for output, write 1 for an input */
+ if ((mask & 0x0007fff8) && asoutput)
+ cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3);
+
+ else if ((mask & 0x0007fff8) && !asoutput)
+ cx_set(MC417_OEN, (mask & 0x7fff8) >> 3);
+
+ /* TODO: 23-19 */
+}
+
static int __devinit cx23885_initdev(struct pci_dev *pci_dev,
const struct pci_device_id *pci_id)
{
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
index 1dc070da865..e236df23370 100644
--- a/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/drivers/media/video/cx23885/cx23885-dvb.c
@@ -49,8 +49,10 @@
#include "lnbh24.h"
#include "cx24116.h"
#include "cimax2.h"
+#include "lgs8gxx.h"
#include "netup-eeprom.h"
#include "netup-init.h"
+#include "lgdt3305.h"
static unsigned int debug;
@@ -122,7 +124,22 @@ static struct tda10048_config hauppauge_hvr1200_config = {
.demod_address = 0x10 >> 1,
.output_mode = TDA10048_SERIAL_OUTPUT,
.fwbulkwritelen = TDA10048_BULKWRITE_200,
- .inversion = TDA10048_INVERSION_ON
+ .inversion = TDA10048_INVERSION_ON,
+ .dtv6_if_freq_khz = TDA10048_IF_3300,
+ .dtv7_if_freq_khz = TDA10048_IF_3800,
+ .dtv8_if_freq_khz = TDA10048_IF_4300,
+ .clk_freq_khz = TDA10048_CLK_16000,
+};
+
+static struct tda10048_config hauppauge_hvr1210_config = {
+ .demod_address = 0x10 >> 1,
+ .output_mode = TDA10048_SERIAL_OUTPUT,
+ .fwbulkwritelen = TDA10048_BULKWRITE_200,
+ .inversion = TDA10048_INVERSION_ON,
+ .dtv6_if_freq_khz = TDA10048_IF_3300,
+ .dtv7_if_freq_khz = TDA10048_IF_3500,
+ .dtv8_if_freq_khz = TDA10048_IF_4000,
+ .clk_freq_khz = TDA10048_CLK_16000,
};
static struct s5h1409_config hauppauge_ezqam_config = {
@@ -194,6 +211,16 @@ static struct s5h1411_config dvico_s5h1411_config = {
.mpeg_timing = S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK,
};
+static struct s5h1411_config hcw_s5h1411_config = {
+ .output_mode = S5H1411_SERIAL_OUTPUT,
+ .gpio = S5H1411_GPIO_OFF,
+ .vsb_if = S5H1411_IF_44000,
+ .qam_if = S5H1411_IF_4000,
+ .inversion = S5H1411_INVERSION_ON,
+ .status_mode = S5H1411_DEMODLOCKING,
+ .mpeg_timing = S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK,
+};
+
static struct xc5000_config hauppauge_hvr1500q_tunerconfig = {
.i2c_address = 0x61,
.if_khz = 5380,
@@ -224,6 +251,32 @@ static struct tda18271_config hauppauge_hvr1200_tuner_config = {
.gate = TDA18271_GATE_ANALOG,
};
+static struct tda18271_config hauppauge_hvr1210_tuner_config = {
+ .gate = TDA18271_GATE_DIGITAL,
+};
+
+static struct tda18271_std_map hauppauge_hvr127x_std_map = {
+ .atsc_6 = { .if_freq = 3250, .agc_mode = 3, .std = 4,
+ .if_lvl = 1, .rfagc_top = 0x58 },
+ .qam_6 = { .if_freq = 4000, .agc_mode = 3, .std = 5,
+ .if_lvl = 1, .rfagc_top = 0x58 },
+};
+
+static struct tda18271_config hauppauge_hvr127x_config = {
+ .std_map = &hauppauge_hvr127x_std_map,
+};
+
+static struct lgdt3305_config hauppauge_lgdt3305_config = {
+ .i2c_addr = 0x0e,
+ .mpeg_mode = LGDT3305_MPEG_SERIAL,
+ .tpclk_edge = LGDT3305_TPCLK_FALLING_EDGE,
+ .tpvalid_polarity = LGDT3305_TP_VALID_HIGH,
+ .deny_i2c_rptr = 1,
+ .spectral_inversion = 1,
+ .qam_if_khz = 4000,
+ .vsb_if_khz = 3250,
+};
+
static struct dibx000_agc_config xc3028_agc_config = {
BAND_VHF | BAND_UHF, /* band_caps */
@@ -368,10 +421,29 @@ static struct cx24116_config dvbworld_cx24116_config = {
.demod_address = 0x05,
};
+static struct lgs8gxx_config mygica_x8506_lgs8gl5_config = {
+ .prod = LGS8GXX_PROD_LGS8GL5,
+ .demod_address = 0x19,
+ .serial_ts = 0,
+ .ts_clk_pol = 1,
+ .ts_clk_gated = 1,
+ .if_clk_freq = 30400, /* 30.4 MHz */
+ .if_freq = 5380, /* 5.38 MHz */
+ .if_neg_center = 1,
+ .ext_adc = 0,
+ .adc_signed = 0,
+ .if_neg_edge = 0,
+};
+
+static struct xc5000_config mygica_x8506_xc5000_config = {
+ .i2c_address = 0x61,
+ .if_khz = 5380,
+};
+
static int dvb_register(struct cx23885_tsport *port)
{
struct cx23885_dev *dev = port->dev;
- struct cx23885_i2c *i2c_bus = NULL;
+ struct cx23885_i2c *i2c_bus = NULL, *i2c_bus2 = NULL;
struct videobuf_dvb_frontend *fe0;
int ret;
@@ -396,6 +468,29 @@ static int dvb_register(struct cx23885_tsport *port)
&hauppauge_generic_tunerconfig, 0);
}
break;
+ case CX23885_BOARD_HAUPPAUGE_HVR1270:
+ case CX23885_BOARD_HAUPPAUGE_HVR1275:
+ i2c_bus = &dev->i2c_bus[0];
+ fe0->dvb.frontend = dvb_attach(lgdt3305_attach,
+ &hauppauge_lgdt3305_config,
+ &i2c_bus->i2c_adap);
+ if (fe0->dvb.frontend != NULL) {
+ dvb_attach(tda18271_attach, fe0->dvb.frontend,
+ 0x60, &dev->i2c_bus[1].i2c_adap,
+ &hauppauge_hvr127x_config);
+ }
+ break;
+ case CX23885_BOARD_HAUPPAUGE_HVR1255:
+ i2c_bus = &dev->i2c_bus[0];
+ fe0->dvb.frontend = dvb_attach(s5h1411_attach,
+ &hcw_s5h1411_config,
+ &i2c_bus->i2c_adap);
+ if (fe0->dvb.frontend != NULL) {
+ dvb_attach(tda18271_attach, fe0->dvb.frontend,
+ 0x60, &dev->i2c_bus[1].i2c_adap,
+ &hauppauge_tda18271_config);
+ }
+ break;
case CX23885_BOARD_HAUPPAUGE_HVR1800:
i2c_bus = &dev->i2c_bus[0];
switch (alt_tuner) {
@@ -496,6 +591,17 @@ static int dvb_register(struct cx23885_tsport *port)
&hauppauge_hvr1200_tuner_config);
}
break;
+ case CX23885_BOARD_HAUPPAUGE_HVR1210:
+ i2c_bus = &dev->i2c_bus[0];
+ fe0->dvb.frontend = dvb_attach(tda10048_attach,
+ &hauppauge_hvr1210_config,
+ &i2c_bus->i2c_adap);
+ if (fe0->dvb.frontend != NULL) {
+ dvb_attach(tda18271_attach, fe0->dvb.frontend,
+ 0x60, &dev->i2c_bus[1].i2c_adap,
+ &hauppauge_hvr1210_tuner_config);
+ }
+ break;
case CX23885_BOARD_HAUPPAUGE_HVR1400:
i2c_bus = &dev->i2c_bus[0];
fe0->dvb.frontend = dvb_attach(dib7000p_attach,
@@ -659,6 +765,19 @@ static int dvb_register(struct cx23885_tsport *port)
break;
}
break;
+ case CX23885_BOARD_MYGICA_X8506:
+ i2c_bus = &dev->i2c_bus[0];
+ i2c_bus2 = &dev->i2c_bus[1];
+ fe0->dvb.frontend = dvb_attach(lgs8gxx_attach,
+ &mygica_x8506_lgs8gl5_config,
+ &i2c_bus->i2c_adap);
+ if (fe0->dvb.frontend != NULL) {
+ dvb_attach(xc5000_attach,
+ fe0->dvb.frontend,
+ &i2c_bus2->i2c_adap,
+ &mygica_x8506_xc5000_config);
+ }
+ break;
default:
printk(KERN_INFO "%s: The frontend of your DVB/ATSC card "
" isn't supported yet\n",
diff --git a/drivers/media/video/cx23885/cx23885-i2c.c b/drivers/media/video/cx23885/cx23885-i2c.c
index 3421bd12056..384dec34134 100644
--- a/drivers/media/video/cx23885/cx23885-i2c.c
+++ b/drivers/media/video/cx23885/cx23885-i2c.c
@@ -357,6 +357,18 @@ int cx23885_i2c_register(struct cx23885_i2c *bus)
printk(KERN_WARNING "%s: i2c bus %d register FAILED\n",
dev->name, bus->nr);
+ /* Instantiate the IR receiver device, if present */
+ if (0 == bus->i2c_rc) {
+ struct i2c_board_info info;
+ const unsigned short addr_list[] = {
+ 0x6b, I2C_CLIENT_END
+ };
+
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
+ i2c_new_probed_device(&bus->i2c_adap, &info, addr_list);
+ }
+
return bus->i2c_rc;
}
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c
index 68068c6d098..66bbd2e7110 100644
--- a/drivers/media/video/cx23885/cx23885-video.c
+++ b/drivers/media/video/cx23885/cx23885-video.c
@@ -796,6 +796,7 @@ static unsigned int video_poll(struct file *file,
{
struct cx23885_fh *fh = file->private_data;
struct cx23885_buffer *buf;
+ unsigned int rc = POLLERR;
if (V4L2_BUF_TYPE_VBI_CAPTURE == fh->type) {
if (!res_get(fh->dev, fh, RESOURCE_VBI))
@@ -803,23 +804,28 @@ static unsigned int video_poll(struct file *file,
return videobuf_poll_stream(file, &fh->vbiq, wait);
}
+ mutex_lock(&fh->vidq.vb_lock);
if (res_check(fh, RESOURCE_VIDEO)) {
/* streaming capture */
if (list_empty(&fh->vidq.stream))
- return POLLERR;
+ goto done;
buf = list_entry(fh->vidq.stream.next,
struct cx23885_buffer, vb.stream);
} else {
/* read() capture */
buf = (struct cx23885_buffer *)fh->vidq.read_buf;
if (NULL == buf)
- return POLLERR;
+ goto done;
}
poll_wait(file, &buf->vb.done, wait);
if (buf->vb.state == VIDEOBUF_DONE ||
buf->vb.state == VIDEOBUF_ERROR)
- return POLLIN|POLLRDNORM;
- return 0;
+ rc = POLLIN|POLLRDNORM;
+ else
+ rc = 0;
+done:
+ mutex_unlock(&fh->vidq.vb_lock);
+ return rc;
}
static int video_release(struct file *file)
diff --git a/drivers/media/video/cx23885/cx23885.h b/drivers/media/video/cx23885/cx23885.h
index 85642831ea8..1a2ac518a3f 100644
--- a/drivers/media/video/cx23885/cx23885.h
+++ b/drivers/media/video/cx23885/cx23885.h
@@ -71,6 +71,22 @@
#define CX23885_BOARD_TEVII_S470 15
#define CX23885_BOARD_DVBWORLD_2005 16
#define CX23885_BOARD_NETUP_DUAL_DVBS2_CI 17
+#define CX23885_BOARD_HAUPPAUGE_HVR1270 18
+#define CX23885_BOARD_HAUPPAUGE_HVR1275 19
+#define CX23885_BOARD_HAUPPAUGE_HVR1255 20
+#define CX23885_BOARD_HAUPPAUGE_HVR1210 21
+#define CX23885_BOARD_MYGICA_X8506 22
+
+#define GPIO_0 0x00000001
+#define GPIO_1 0x00000002
+#define GPIO_2 0x00000004
+#define GPIO_3 0x00000008
+#define GPIO_4 0x00000010
+#define GPIO_5 0x00000020
+#define GPIO_6 0x00000040
+#define GPIO_7 0x00000080
+#define GPIO_8 0x00000100
+#define GPIO_9 0x00000200
/* Currently unsupported by the driver: PAL/H, NTSC/Kr, SECAM B/G/H/LC */
#define CX23885_NORMS (\
@@ -422,6 +438,11 @@ extern int cx23885_restart_queue(struct cx23885_tsport *port,
extern void cx23885_wakeup(struct cx23885_tsport *port,
struct cx23885_dmaqueue *q, u32 count);
+extern void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask);
+extern void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask);
+extern void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask,
+ int asoutput);
+
/* ----------------------------------------------------------- */
/* cx23885-cards.c */
diff --git a/drivers/media/video/cx88/Makefile b/drivers/media/video/cx88/Makefile
index b06b1275a9e..5b7e26761f0 100644
--- a/drivers/media/video/cx88/Makefile
+++ b/drivers/media/video/cx88/Makefile
@@ -1,5 +1,5 @@
cx88xx-objs := cx88-cards.o cx88-core.o cx88-i2c.o cx88-tvaudio.o \
- cx88-input.o
+ cx88-dsp.o cx88-input.o
cx8800-objs := cx88-video.o cx88-vbi.o
cx8802-objs := cx88-mpeg.o
diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
index 0ccdf36626e..5a67445dd6e 100644
--- a/drivers/media/video/cx88/cx88-alsa.c
+++ b/drivers/media/video/cx88/cx88-alsa.c
@@ -871,7 +871,7 @@ static struct pci_driver cx88_audio_pci_driver = {
.name = "cx88_audio",
.id_table = cx88_audio_pci_tbl,
.probe = cx88_audio_initdev,
- .remove = cx88_audio_finidev,
+ .remove = __devexit_p(cx88_audio_finidev),
};
/****************************************************************************
@@ -881,7 +881,7 @@ static struct pci_driver cx88_audio_pci_driver = {
/*
* module init
*/
-static int cx88_audio_init(void)
+static int __init cx88_audio_init(void)
{
printk(KERN_INFO "cx2388x alsa driver version %d.%d.%d loaded\n",
(CX88_VERSION_CODE >> 16) & 0xff,
@@ -897,9 +897,8 @@ static int cx88_audio_init(void)
/*
* module remove
*/
-static void cx88_audio_fini(void)
+static void __exit cx88_audio_fini(void)
{
-
pci_unregister_driver(&cx88_audio_pci_driver);
}
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index 6bbbfc66bb4..94b7a52629d 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -1969,6 +1969,54 @@ static const struct cx88_board cx88_boards[] = {
},
.mpeg = CX88_MPEG_DVB,
},
+ [CX88_BOARD_HAUPPAUGE_IRONLY] = {
+ .name = "Hauppauge WinTV-IR Only",
+ .tuner_type = UNSET,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ },
+ [CX88_BOARD_WINFAST_DTV1800H] = {
+ .name = "Leadtek WinFast DTV1800 Hybrid",
+ .tuner_type = TUNER_XC2028,
+ .radio_type = TUNER_XC2028,
+ .tuner_addr = 0x61,
+ .radio_addr = 0x61,
+ /*
+ * GPIO setting
+ *
+ * 2: mute (0=off,1=on)
+ * 12: tuner reset pin
+ * 13: audio source (0=tuner audio,1=line in)
+ * 14: FM (0=on,1=off ???)
+ */
+ .input = {{
+ .type = CX88_VMUX_TELEVISION,
+ .vmux = 0,
+ .gpio0 = 0x0400, /* pin 2 = 0 */
+ .gpio1 = 0x6040, /* pin 13 = 0, pin 14 = 1 */
+ .gpio2 = 0x0000,
+ }, {
+ .type = CX88_VMUX_COMPOSITE1,
+ .vmux = 1,
+ .gpio0 = 0x0400, /* pin 2 = 0 */
+ .gpio1 = 0x6060, /* pin 13 = 1, pin 14 = 1 */
+ .gpio2 = 0x0000,
+ }, {
+ .type = CX88_VMUX_SVIDEO,
+ .vmux = 2,
+ .gpio0 = 0x0400, /* pin 2 = 0 */
+ .gpio1 = 0x6060, /* pin 13 = 1, pin 14 = 1 */
+ .gpio2 = 0x0000,
+ } },
+ .radio = {
+ .type = CX88_RADIO,
+ .gpio0 = 0x0400, /* pin 2 = 0 */
+ .gpio1 = 0x6000, /* pin 13 = 0, pin 14 = 0 */
+ .gpio2 = 0x0000,
+ },
+ .mpeg = CX88_MPEG_DVB,
+ },
};
/* ------------------------------------------------------------------ */
@@ -2382,6 +2430,14 @@ static const struct cx88_subid cx88_subids[] = {
.subvendor = 0x153b,
.subdevice = 0x1177,
.card = CX88_BOARD_TERRATEC_CINERGY_HT_PCI_MKII,
+ }, {
+ .subvendor = 0x0070,
+ .subdevice = 0x9290,
+ .card = CX88_BOARD_HAUPPAUGE_IRONLY,
+ }, {
+ .subvendor = 0x107d,
+ .subdevice = 0x6654,
+ .card = CX88_BOARD_WINFAST_DTV1800H,
},
};
@@ -2448,6 +2504,7 @@ static void hauppauge_eeprom(struct cx88_core *core, u8 *eeprom_data)
case 90500: /* Nova-T-PCI (oem) */
case 90501: /* Nova-T-PCI (oem/IR) */
case 92000: /* Nova-SE2 (OEM, No Video or IR) */
+ case 92900: /* WinTV-IROnly (No analog or digital Video inputs) */
case 94009: /* WinTV-HVR1100 (Video and IR Retail) */
case 94501: /* WinTV-HVR1100 (Video and IR OEM) */
case 96009: /* WinTV-HVR1300 (PAL Video, MPEG Video and IR RX) */
@@ -2579,6 +2636,23 @@ static int cx88_xc3028_geniatech_tuner_callback(struct cx88_core *core,
return -EINVAL;
}
+static int cx88_xc3028_winfast1800h_callback(struct cx88_core *core,
+ int command, int arg)
+{
+ switch (command) {
+ case XC2028_TUNER_RESET:
+ /* GPIO 12 (xc3028 tuner reset) */
+ cx_set(MO_GP1_IO, 0x1010);
+ mdelay(50);
+ cx_clear(MO_GP1_IO, 0x10);
+ mdelay(50);
+ cx_set(MO_GP1_IO, 0x10);
+ mdelay(50);
+ return 0;
+ }
+ return -EINVAL;
+}
+
/* ------------------------------------------------------------------- */
/* some Divco specific stuff */
static int cx88_pv_8000gt_callback(struct cx88_core *core,
@@ -2651,6 +2725,8 @@ static int cx88_xc2028_tuner_callback(struct cx88_core *core,
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PRO:
case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO:
return cx88_dvico_xc2028_callback(core, command, arg);
+ case CX88_BOARD_WINFAST_DTV1800H:
+ return cx88_xc3028_winfast1800h_callback(core, command, arg);
}
switch (command) {
@@ -2690,10 +2766,22 @@ static int cx88_xc5000_tuner_callback(struct cx88_core *core,
switch (core->boardnr) {
case CX88_BOARD_PINNACLE_PCTV_HD_800i:
if (command == 0) { /* This is the reset command from xc5000 */
- /* Reset XC5000 tuner via SYS_RSTO_pin */
- cx_write(MO_SRST_IO, 0);
- msleep(10);
- cx_write(MO_SRST_IO, 1);
+
+ /* djh - According to the engineer at PCTV Systems,
+ the xc5000 reset pin is supposed to be on GPIO12.
+ However, despite three nights of effort, pulling
+ that GPIO low didn't reset the xc5000. While
+ pulling MO_SRST_IO low does reset the xc5000, this
+ also resets in the s5h1409 being reset as well.
+ This causes tuning to always fail since the internal
+ state of the s5h1409 does not match the driver's
+ state. Given that the only two conditions in which
+ the driver performs a reset is during firmware load
+ and powering down the chip, I am taking out the
+ reset. We know that the chip is being reset
+ when the cx88 comes online, and not being able to
+ do power management for this board is worse than
+ not having any tuning at all. */
return 0;
} else {
err_printk(core, "xc5000: unknown tuner "
@@ -2825,6 +2913,16 @@ static void cx88_card_setup_pre_i2c(struct cx88_core *core)
cx_set(MO_GP0_IO, 0x00000080); /* 702 out of reset */
udelay(1000);
break;
+
+ case CX88_BOARD_WINFAST_DTV1800H:
+ /* GPIO 12 (xc3028 tuner reset) */
+ cx_set(MO_GP1_IO, 0x1010);
+ mdelay(50);
+ cx_clear(MO_GP1_IO, 0x10);
+ mdelay(50);
+ cx_set(MO_GP1_IO, 0x10);
+ mdelay(50);
+ break;
}
}
@@ -2845,6 +2943,7 @@ void cx88_setup_xc3028(struct cx88_core *core, struct xc2028_ctrl *ctl)
core->i2c_algo.udelay = 16;
break;
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PRO:
+ case CX88_BOARD_WINFAST_DTV1800H:
ctl->demod = XC3028_FE_ZARLINK456;
break;
case CX88_BOARD_KWORLD_ATSC_120:
@@ -2907,6 +3006,7 @@ static void cx88_card_setup(struct cx88_core *core)
case CX88_BOARD_HAUPPAUGE_HVR1300:
case CX88_BOARD_HAUPPAUGE_HVR4000:
case CX88_BOARD_HAUPPAUGE_HVR4000LITE:
+ case CX88_BOARD_HAUPPAUGE_IRONLY:
if (0 == core->i2c_rc)
hauppauge_eeprom(core, eeprom);
break;
diff --git a/drivers/media/video/cx88/cx88-core.c b/drivers/media/video/cx88/cx88-core.c
index 0e149b22bd1..cf634606ba9 100644
--- a/drivers/media/video/cx88/cx88-core.c
+++ b/drivers/media/video/cx88/cx88-core.c
@@ -231,7 +231,7 @@ cx88_free_buffer(struct videobuf_queue *q, struct cx88_buffer *buf)
* can use the whole SDRAM for the DMA fifos. To simplify things, we
* use a static memory layout. That surely will waste memory in case
* we don't use all DMA channels at the same time (which will be the
- * case most of the time). But that still gives us enougth FIFO space
+ * case most of the time). But that still gives us enough FIFO space
* to be able to deal with insane long pci latencies ...
*
* FIFO space allocations:
@@ -241,6 +241,7 @@ cx88_free_buffer(struct videobuf_queue *q, struct cx88_buffer *buf)
* channel 24 (vbi) - 4.0k
* channels 25+26 (audio) - 4.0k
* channel 28 (mpeg) - 4.0k
+ * channel 27 (audio rds)- 3.0k
* TOTAL = 29.0k
*
* Every channel has 160 bytes control data (64 bytes instruction
@@ -337,6 +338,18 @@ struct sram_channel cx88_sram_channels[] = {
.cnt1_reg = MO_DMA28_CNT1,
.cnt2_reg = MO_DMA28_CNT2,
},
+ [SRAM_CH27] = {
+ .name = "audio rds",
+ .cmds_start = 0x1801C0,
+ .ctrl_start = 0x180860,
+ .cdt = 0x180860 + 64,
+ .fifo_start = 0x187400,
+ .fifo_size = 0x000C00,
+ .ptr1_reg = MO_DMA27_PTR1,
+ .ptr2_reg = MO_DMA27_PTR2,
+ .cnt1_reg = MO_DMA27_CNT1,
+ .cnt2_reg = MO_DMA27_CNT2,
+ },
};
int cx88_sram_channel_setup(struct cx88_core *core,
@@ -598,6 +611,7 @@ int cx88_reset(struct cx88_core *core)
cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH25], 128, 0);
cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH26], 128, 0);
cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH28], 188*4, 0);
+ cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH27], 128, 0);
/* misc init ... */
cx_write(MO_INPUT_FORMAT, ((1 << 13) | // agc enable
@@ -796,6 +810,8 @@ int cx88_start_audio_dma(struct cx88_core *core)
/* constant 128 made buzz in analog Nicam-stereo for bigger fifo_size */
int bpl = cx88_sram_channels[SRAM_CH25].fifo_size/4;
+ int rds_bpl = cx88_sram_channels[SRAM_CH27].fifo_size/AUD_RDS_LINES;
+
/* If downstream RISC is enabled, bail out; ALSA is managing DMA */
if (cx_read(MO_AUD_DMACNTRL) & 0x10)
return 0;
@@ -803,12 +819,14 @@ int cx88_start_audio_dma(struct cx88_core *core)
/* setup fifo + format */
cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH25], bpl, 0);
cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH26], bpl, 0);
+ cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH27],
+ rds_bpl, 0);
cx_write(MO_AUDD_LNGTH, bpl); /* fifo bpl size */
- cx_write(MO_AUDR_LNGTH, bpl); /* fifo bpl size */
+ cx_write(MO_AUDR_LNGTH, rds_bpl); /* fifo bpl size */
- /* start dma */
- cx_write(MO_AUD_DMACNTRL, 0x0003); /* Up and Down fifo enable */
+ /* enable Up, Down and Audio RDS fifo */
+ cx_write(MO_AUD_DMACNTRL, 0x0007);
return 0;
}
@@ -1010,7 +1028,6 @@ struct video_device *cx88_vdev_init(struct cx88_core *core,
if (NULL == vfd)
return NULL;
*vfd = *template;
- vfd->minor = -1;
vfd->v4l2_dev = &core->v4l2_dev;
vfd->parent = &pci->dev;
vfd->release = video_device_release;
diff --git a/drivers/media/video/cx88/cx88-dsp.c b/drivers/media/video/cx88/cx88-dsp.c
new file mode 100644
index 00000000000..3e5eaf3fe2a
--- /dev/null
+++ b/drivers/media/video/cx88/cx88-dsp.c
@@ -0,0 +1,312 @@
+/*
+ *
+ * Stereo and SAP detection for cx88
+ *
+ * Copyright (c) 2009 Marton Balint <cus@fazekas.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <asm/div64.h>
+
+#include "cx88.h"
+#include "cx88-reg.h"
+
+#define INT_PI ((s32)(3.141592653589 * 32768.0))
+
+#define compat_remainder(a, b) \
+ ((float)(((s32)((a)*100))%((s32)((b)*100)))/100.0)
+
+#define baseband_freq(carrier, srate, tone) ((s32)( \
+ (compat_remainder(carrier + tone, srate)) / srate * 2 * INT_PI))
+
+/* We calculate the baseband frequencies of the carrier and the pilot tones
+ * based on the the sampling rate of the audio rds fifo. */
+
+#define FREQ_A2_CARRIER baseband_freq(54687.5, 2689.36, 0.0)
+#define FREQ_A2_DUAL baseband_freq(54687.5, 2689.36, 274.1)
+#define FREQ_A2_STEREO baseband_freq(54687.5, 2689.36, 117.5)
+
+/* The frequencies below are from the reference driver. They probably need
+ * further adjustments, because they are not tested at all. You may even need
+ * to play a bit with the registers of the chip to select the proper signal
+ * for the input of the audio rds fifo, and measure it's sampling rate to
+ * calculate the proper baseband frequencies... */
+
+#define FREQ_A2M_CARRIER ((s32)(2.114516 * 32768.0))
+#define FREQ_A2M_DUAL ((s32)(2.754916 * 32768.0))
+#define FREQ_A2M_STEREO ((s32)(2.462326 * 32768.0))
+
+#define FREQ_EIAJ_CARRIER ((s32)(1.963495 * 32768.0)) /* 5pi/8 */
+#define FREQ_EIAJ_DUAL ((s32)(2.562118 * 32768.0))
+#define FREQ_EIAJ_STEREO ((s32)(2.601053 * 32768.0))
+
+#define FREQ_BTSC_DUAL ((s32)(1.963495 * 32768.0)) /* 5pi/8 */
+#define FREQ_BTSC_DUAL_REF ((s32)(1.374446 * 32768.0)) /* 7pi/16 */
+
+#define FREQ_BTSC_SAP ((s32)(2.471532 * 32768.0))
+#define FREQ_BTSC_SAP_REF ((s32)(1.730072 * 32768.0))
+
+/* The spectrum of the signal should be empty between these frequencies. */
+#define FREQ_NOISE_START ((s32)(0.100000 * 32768.0))
+#define FREQ_NOISE_END ((s32)(1.200000 * 32768.0))
+
+static unsigned int dsp_debug;
+module_param(dsp_debug, int, 0644);
+MODULE_PARM_DESC(dsp_debug, "enable audio dsp debug messages");
+
+#define dprintk(level, fmt, arg...) if (dsp_debug >= level) \
+ printk(KERN_DEBUG "%s/0: " fmt, core->name , ## arg)
+
+static s32 int_cos(u32 x)
+{
+ u32 t2, t4, t6, t8;
+ s32 ret;
+ u16 period = x / INT_PI;
+ if (period % 2)
+ return -int_cos(x - INT_PI);
+ x = x % INT_PI;
+ if (x > INT_PI/2)
+ return -int_cos(INT_PI/2 - (x % (INT_PI/2)));
+ /* Now x is between 0 and INT_PI/2.
+ * To calculate cos(x) we use it's Taylor polinom. */
+ t2 = x*x/32768/2;
+ t4 = t2*x/32768*x/32768/3/4;
+ t6 = t4*x/32768*x/32768/5/6;
+ t8 = t6*x/32768*x/32768/7/8;
+ ret = 32768-t2+t4-t6+t8;
+ return ret;
+}
+
+static u32 int_goertzel(s16 x[], u32 N, u32 freq)
+{
+ /* We use the Goertzel algorithm to determine the power of the
+ * given frequency in the signal */
+ s32 s_prev = 0;
+ s32 s_prev2 = 0;
+ s32 coeff = 2*int_cos(freq);
+ u32 i;
+
+ u64 tmp;
+ u32 divisor;
+
+ for (i = 0; i < N; i++) {
+ s32 s = x[i] + ((s64)coeff*s_prev/32768) - s_prev2;
+ s_prev2 = s_prev;
+ s_prev = s;
+ }
+
+ tmp = (s64)s_prev2 * s_prev2 + (s64)s_prev * s_prev -
+ (s64)coeff * s_prev2 * s_prev / 32768;
+
+ /* XXX: N must be low enough so that N*N fits in s32.
+ * Else we need two divisions. */
+ divisor = N * N;
+ do_div(tmp, divisor);
+
+ return (u32) tmp;
+}
+
+static u32 freq_magnitude(s16 x[], u32 N, u32 freq)
+{
+ u32 sum = int_goertzel(x, N, freq);
+ return (u32)int_sqrt(sum);
+}
+
+static u32 noise_magnitude(s16 x[], u32 N, u32 freq_start, u32 freq_end)
+{
+ int i;
+ u32 sum = 0;
+ u32 freq_step;
+ int samples = 5;
+
+ if (N > 192) {
+ /* The last 192 samples are enough for noise detection */
+ x += (N-192);
+ N = 192;
+ }
+
+ freq_step = (freq_end - freq_start) / (samples - 1);
+
+ for (i = 0; i < samples; i++) {
+ sum += int_goertzel(x, N, freq_start);
+ freq_start += freq_step;
+ }
+
+ return (u32)int_sqrt(sum / samples);
+}
+
+static s32 detect_a2_a2m_eiaj(struct cx88_core *core, s16 x[], u32 N)
+{
+ s32 carrier, stereo, dual, noise;
+ s32 carrier_freq, stereo_freq, dual_freq;
+ s32 ret;
+
+ switch (core->tvaudio) {
+ case WW_BG:
+ case WW_DK:
+ carrier_freq = FREQ_A2_CARRIER;
+ stereo_freq = FREQ_A2_STEREO;
+ dual_freq = FREQ_A2_DUAL;
+ break;
+ case WW_M:
+ carrier_freq = FREQ_A2M_CARRIER;
+ stereo_freq = FREQ_A2M_STEREO;
+ dual_freq = FREQ_A2M_DUAL;
+ break;
+ case WW_EIAJ:
+ carrier_freq = FREQ_EIAJ_CARRIER;
+ stereo_freq = FREQ_EIAJ_STEREO;
+ dual_freq = FREQ_EIAJ_DUAL;
+ break;
+ default:
+ printk(KERN_WARNING "%s/0: unsupported audio mode %d for %s\n",
+ core->name, core->tvaudio, __func__);
+ return UNSET;
+ }
+
+ carrier = freq_magnitude(x, N, carrier_freq);
+ stereo = freq_magnitude(x, N, stereo_freq);
+ dual = freq_magnitude(x, N, dual_freq);
+ noise = noise_magnitude(x, N, FREQ_NOISE_START, FREQ_NOISE_END);
+
+ dprintk(1, "detect a2/a2m/eiaj: carrier=%d, stereo=%d, dual=%d, "
+ "noise=%d\n", carrier, stereo, dual, noise);
+
+ if (stereo > dual)
+ ret = V4L2_TUNER_SUB_STEREO;
+ else
+ ret = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
+
+ if (core->tvaudio == WW_EIAJ) {
+ /* EIAJ checks may need adjustments */
+ if ((carrier > max(stereo, dual)*2) &&
+ (carrier < max(stereo, dual)*6) &&
+ (carrier > 20 && carrier < 200) &&
+ (max(stereo, dual) > min(stereo, dual))) {
+ /* For EIAJ the carrier is always present,
+ so we probably don't need noise detection */
+ return ret;
+ }
+ } else {
+ if ((carrier > max(stereo, dual)*2) &&
+ (carrier < max(stereo, dual)*8) &&
+ (carrier > 20 && carrier < 200) &&
+ (noise < 10) &&
+ (max(stereo, dual) > min(stereo, dual)*2)) {
+ return ret;
+ }
+ }
+ return V4L2_TUNER_SUB_MONO;
+}
+
+static s32 detect_btsc(struct cx88_core *core, s16 x[], u32 N)
+{
+ s32 sap_ref = freq_magnitude(x, N, FREQ_BTSC_SAP_REF);
+ s32 sap = freq_magnitude(x, N, FREQ_BTSC_SAP);
+ s32 dual_ref = freq_magnitude(x, N, FREQ_BTSC_DUAL_REF);
+ s32 dual = freq_magnitude(x, N, FREQ_BTSC_DUAL);
+ dprintk(1, "detect btsc: dual_ref=%d, dual=%d, sap_ref=%d, sap=%d"
+ "\n", dual_ref, dual, sap_ref, sap);
+ /* FIXME: Currently not supported */
+ return UNSET;
+}
+
+static s16 *read_rds_samples(struct cx88_core *core, u32 *N)
+{
+ struct sram_channel *srch = &cx88_sram_channels[SRAM_CH27];
+ s16 *samples;
+
+ unsigned int i;
+ unsigned int bpl = srch->fifo_size/AUD_RDS_LINES;
+ unsigned int spl = bpl/4;
+ unsigned int sample_count = spl*(AUD_RDS_LINES-1);
+
+ u32 current_address = cx_read(srch->ptr1_reg);
+ u32 offset = (current_address - srch->fifo_start + bpl);
+
+ dprintk(1, "read RDS samples: current_address=%08x (offset=%08x), "
+ "sample_count=%d, aud_intstat=%08x\n", current_address,
+ current_address - srch->fifo_start, sample_count,
+ cx_read(MO_AUD_INTSTAT));
+
+ samples = kmalloc(sizeof(s16)*sample_count, GFP_KERNEL);
+ if (!samples)
+ return NULL;
+
+ *N = sample_count;
+
+ for (i = 0; i < sample_count; i++) {
+ offset = offset % (AUD_RDS_LINES*bpl);
+ samples[i] = cx_read(srch->fifo_start + offset);
+ offset += 4;
+ }
+
+ if (dsp_debug >= 2) {
+ dprintk(2, "RDS samples dump: ");
+ for (i = 0; i < sample_count; i++)
+ printk("%hd ", samples[i]);
+ printk(".\n");
+ }
+
+ return samples;
+}
+
+s32 cx88_dsp_detect_stereo_sap(struct cx88_core *core)
+{
+ s16 *samples;
+ u32 N = 0;
+ s32 ret = UNSET;
+
+ /* If audio RDS fifo is disabled, we can't read the samples */
+ if (!(cx_read(MO_AUD_DMACNTRL) & 0x04))
+ return ret;
+ if (!(cx_read(AUD_CTL) & EN_FMRADIO_EN_RDS))
+ return ret;
+
+ /* Wait at least 500 ms after an audio standard change */
+ if (time_before(jiffies, core->last_change + msecs_to_jiffies(500)))
+ return ret;
+
+ samples = read_rds_samples(core, &N);
+
+ if (!samples)
+ return ret;
+
+ switch (core->tvaudio) {
+ case WW_BG:
+ case WW_DK:
+ ret = detect_a2_a2m_eiaj(core, samples, N);
+ break;
+ case WW_BTSC:
+ ret = detect_btsc(core, samples, N);
+ break;
+ }
+
+ kfree(samples);
+
+ if (UNSET != ret)
+ dprintk(1, "stereo/sap detection result:%s%s%s\n",
+ (ret & V4L2_TUNER_SUB_MONO) ? " mono" : "",
+ (ret & V4L2_TUNER_SUB_STEREO) ? " stereo" : "",
+ (ret & V4L2_TUNER_SUB_LANG2) ? " dual" : "");
+
+ return ret;
+}
+EXPORT_SYMBOL(cx88_dsp_detect_stereo_sap);
+
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
index 9389cf290c1..c44e8760021 100644
--- a/drivers/media/video/cx88/cx88-dvb.c
+++ b/drivers/media/video/cx88/cx88-dvb.c
@@ -1014,6 +1014,7 @@ static int dvb_register(struct cx8802_dev *dev)
}
break;
case CX88_BOARD_PINNACLE_HYBRID_PCTV:
+ case CX88_BOARD_WINFAST_DTV1800H:
fe0->dvb.frontend = dvb_attach(zl10353_attach,
&cx88_pinnacle_hybrid_pctv,
&core->i2c_adap);
diff --git a/drivers/media/video/cx88/cx88-i2c.c b/drivers/media/video/cx88/cx88-i2c.c
index 996b4ed5a4f..ee1ca39db06 100644
--- a/drivers/media/video/cx88/cx88-i2c.c
+++ b/drivers/media/video/cx88/cx88-i2c.c
@@ -180,6 +180,19 @@ int cx88_i2c_init(struct cx88_core *core, struct pci_dev *pci)
do_i2c_scan(core->name,&core->i2c_client);
} else
printk("%s: i2c register FAILED\n", core->name);
+
+ /* Instantiate the IR receiver device, if present */
+ if (0 == core->i2c_rc) {
+ struct i2c_board_info info;
+ const unsigned short addr_list[] = {
+ 0x18, 0x6b, 0x71,
+ I2C_CLIENT_END
+ };
+
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
+ i2c_new_probed_device(&core->i2c_adap, &info, addr_list);
+ }
return core->i2c_rc;
}
diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c
index ec05312a9b6..d91f5c51206 100644
--- a/drivers/media/video/cx88/cx88-input.c
+++ b/drivers/media/video/cx88/cx88-input.c
@@ -91,6 +91,8 @@ static void cx88_ir_handle_key(struct cx88_IR *ir)
gpio=(gpio & 0x7fd) + (auxgpio & 0xef);
break;
case CX88_BOARD_WINFAST_DTV1000:
+ case CX88_BOARD_WINFAST_DTV1800H:
+ case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
gpio = (gpio & 0x6ff) | ((cx_read(MO_GP1_IO) << 8) & 0x900);
auxgpio = gpio;
break;
@@ -217,11 +219,13 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
case CX88_BOARD_HAUPPAUGE_HVR4000LITE:
case CX88_BOARD_PCHDTV_HD3000:
case CX88_BOARD_PCHDTV_HD5500:
+ case CX88_BOARD_HAUPPAUGE_IRONLY:
ir_codes = ir_codes_hauppauge_new;
ir_type = IR_TYPE_RC5;
ir->sampling = 1;
break;
case CX88_BOARD_WINFAST_DTV2000H:
+ case CX88_BOARD_WINFAST_DTV1800H:
ir_codes = ir_codes_winfast;
ir->gpio_addr = MO_GP0_IO;
ir->mask_keycode = 0x8f8;
@@ -230,6 +234,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
break;
case CX88_BOARD_WINFAST2000XP_EXPERT:
case CX88_BOARD_WINFAST_DTV1000:
+ case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
ir_codes = ir_codes_winfast;
ir->gpio_addr = MO_GP0_IO;
ir->mask_keycode = 0x8f8;
@@ -459,6 +464,7 @@ void cx88_ir_irq(struct cx88_core *core)
case CX88_BOARD_HAUPPAUGE_HVR4000LITE:
case CX88_BOARD_PCHDTV_HD3000:
case CX88_BOARD_PCHDTV_HD5500:
+ case CX88_BOARD_HAUPPAUGE_IRONLY:
ircode = ir_decode_biphase(ir->samples, ir->scount, 5, 7);
ir_dprintk("biphase decoded: %x\n", ircode);
/*
diff --git a/drivers/media/video/cx88/cx88-tvaudio.c b/drivers/media/video/cx88/cx88-tvaudio.c
index 7dd506b987f..e8316cf7f32 100644
--- a/drivers/media/video/cx88/cx88-tvaudio.c
+++ b/drivers/media/video/cx88/cx88-tvaudio.c
@@ -163,6 +163,8 @@ static void set_audio_finish(struct cx88_core *core, u32 ctl)
/* unmute */
volume = cx_sread(SHADOW_AUD_VOL_CTL);
cx_swrite(SHADOW_AUD_VOL_CTL, AUD_VOL_CTL, volume);
+
+ core->last_change = jiffies;
}
/* ----------------------------------------------------------- */
@@ -745,6 +747,7 @@ void cx88_set_tvaudio(struct cx88_core *core)
break;
case WW_BG:
case WW_DK:
+ case WW_M:
case WW_I:
case WW_L:
/* prepare all dsp registers */
@@ -756,6 +759,7 @@ void cx88_set_tvaudio(struct cx88_core *core)
if (0 == cx88_detect_nicam(core)) {
/* fall back to fm / am mono */
set_audio_standard_A2(core, EN_A2_FORCE_MONO1);
+ core->audiomode_current = V4L2_TUNER_MODE_MONO;
core->use_nicam = 0;
} else {
core->use_nicam = 1;
@@ -787,6 +791,7 @@ void cx88_set_tvaudio(struct cx88_core *core)
void cx88_newstation(struct cx88_core *core)
{
core->audiomode_manual = UNSET;
+ core->last_change = jiffies;
}
void cx88_get_stereo(struct cx88_core *core, struct v4l2_tuner *t)
@@ -805,12 +810,50 @@ void cx88_get_stereo(struct cx88_core *core, struct v4l2_tuner *t)
aud_ctl_names[cx_read(AUD_CTL) & 63]);
core->astat = reg;
-/* TODO
- Reading from AUD_STATUS is not enough
- for auto-detecting sap/dual-fm/nicam.
- Add some code here later.
-*/
+ t->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_SAP |
+ V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2;
+ t->rxsubchans = UNSET;
+ t->audmode = V4L2_TUNER_MODE_MONO;
+
+ switch (mode) {
+ case 0:
+ t->audmode = V4L2_TUNER_MODE_STEREO;
+ break;
+ case 1:
+ t->audmode = V4L2_TUNER_MODE_LANG2;
+ break;
+ case 2:
+ t->audmode = V4L2_TUNER_MODE_MONO;
+ break;
+ case 3:
+ t->audmode = V4L2_TUNER_MODE_SAP;
+ break;
+ }
+ switch (core->tvaudio) {
+ case WW_BTSC:
+ case WW_BG:
+ case WW_DK:
+ case WW_M:
+ case WW_EIAJ:
+ if (!core->use_nicam) {
+ t->rxsubchans = cx88_dsp_detect_stereo_sap(core);
+ break;
+ }
+ break;
+ default:
+ /* nothing */
+ break;
+ }
+
+ /* If software stereo detection is not supported... */
+ if (UNSET == t->rxsubchans) {
+ t->rxsubchans = V4L2_TUNER_SUB_MONO;
+ /* If the hardware itself detected stereo, also return
+ stereo as an available subchannel */
+ if (V4L2_TUNER_MODE_STEREO == t->audmode)
+ t->rxsubchans |= V4L2_TUNER_SUB_STEREO;
+ }
return;
}
@@ -847,6 +890,7 @@ void cx88_set_stereo(struct cx88_core *core, u32 mode, int manual)
break;
case WW_BG:
case WW_DK:
+ case WW_M:
case WW_I:
case WW_L:
if (1 == core->use_nicam) {
@@ -872,20 +916,18 @@ void cx88_set_stereo(struct cx88_core *core, u32 mode, int manual)
set_audio_standard_A2(core, EN_A2_FORCE_MONO1);
} else {
/* TODO: Add A2 autodection */
+ mask = 0x3f;
switch (mode) {
case V4L2_TUNER_MODE_MONO:
case V4L2_TUNER_MODE_LANG1:
- set_audio_standard_A2(core,
- EN_A2_FORCE_MONO1);
+ ctl = EN_A2_FORCE_MONO1;
break;
case V4L2_TUNER_MODE_LANG2:
- set_audio_standard_A2(core,
- EN_A2_FORCE_MONO2);
+ ctl = EN_A2_FORCE_MONO2;
break;
case V4L2_TUNER_MODE_STEREO:
case V4L2_TUNER_MODE_LANG1_LANG2:
- set_audio_standard_A2(core,
- EN_A2_FORCE_STEREO);
+ ctl = EN_A2_FORCE_STEREO;
break;
}
}
@@ -932,24 +974,39 @@ int cx88_audio_thread(void *data)
break;
try_to_freeze();
- /* just monitor the audio status for now ... */
- memset(&t, 0, sizeof(t));
- cx88_get_stereo(core, &t);
-
- if (UNSET != core->audiomode_manual)
- /* manually set, don't do anything. */
- continue;
-
- /* monitor signal */
- if (t.rxsubchans & V4L2_TUNER_SUB_STEREO)
- mode = V4L2_TUNER_MODE_STEREO;
- else
- mode = V4L2_TUNER_MODE_MONO;
- if (mode == core->audiomode_current)
- continue;
-
- /* automatically switch to best available mode */
- cx88_set_stereo(core, mode, 0);
+ switch (core->tvaudio) {
+ case WW_BG:
+ case WW_DK:
+ case WW_M:
+ case WW_I:
+ case WW_L:
+ if (core->use_nicam)
+ goto hw_autodetect;
+
+ /* just monitor the audio status for now ... */
+ memset(&t, 0, sizeof(t));
+ cx88_get_stereo(core, &t);
+
+ if (UNSET != core->audiomode_manual)
+ /* manually set, don't do anything. */
+ continue;
+
+ /* monitor signal and set stereo if available */
+ if (t.rxsubchans & V4L2_TUNER_SUB_STEREO)
+ mode = V4L2_TUNER_MODE_STEREO;
+ else
+ mode = V4L2_TUNER_MODE_MONO;
+ if (mode == core->audiomode_current)
+ continue;
+ /* automatically switch to best available mode */
+ cx88_set_stereo(core, mode, 0);
+ break;
+ default:
+hw_autodetect:
+ /* stereo autodetection is supported by hardware so
+ we don't need to do it manually. Do nothing. */
+ break;
+ }
}
dprintk("cx88: tvaudio thread exiting\n");
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index b993d42fe73..0ccac702bea 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -869,6 +869,7 @@ video_poll(struct file *file, struct poll_table_struct *wait)
{
struct cx8800_fh *fh = file->private_data;
struct cx88_buffer *buf;
+ unsigned int rc = POLLERR;
if (V4L2_BUF_TYPE_VBI_CAPTURE == fh->type) {
if (!res_get(fh->dev,fh,RESOURCE_VBI))
@@ -876,22 +877,27 @@ video_poll(struct file *file, struct poll_table_struct *wait)
return videobuf_poll_stream(file, &fh->vbiq, wait);
}
+ mutex_lock(&fh->vidq.vb_lock);
if (res_check(fh,RESOURCE_VIDEO)) {
/* streaming capture */
if (list_empty(&fh->vidq.stream))
- return POLLERR;
+ goto done;
buf = list_entry(fh->vidq.stream.next,struct cx88_buffer,vb.stream);
} else {
/* read() capture */
buf = (struct cx88_buffer*)fh->vidq.read_buf;
if (NULL == buf)
- return POLLERR;
+ goto done;
}
poll_wait(file, &buf->vb.done, wait);
if (buf->vb.state == VIDEOBUF_DONE ||
buf->vb.state == VIDEOBUF_ERROR)
- return POLLIN|POLLRDNORM;
- return 0;
+ rc = POLLIN|POLLRDNORM;
+ else
+ rc = 0;
+done:
+ mutex_unlock(&fh->vidq.vb_lock);
+ return rc;
}
static int video_release(struct file *file)
@@ -926,8 +932,10 @@ static int video_release(struct file *file)
file->private_data = NULL;
kfree(fh);
+ mutex_lock(&dev->core->lock);
if(atomic_dec_and_test(&dev->core->users))
call_all(dev->core, tuner, s_standby);
+ mutex_unlock(&dev->core->lock);
return 0;
}
diff --git a/drivers/media/video/cx88/cx88.h b/drivers/media/video/cx88/cx88.h
index 7724d168fc0..9d83762163f 100644
--- a/drivers/media/video/cx88/cx88.h
+++ b/drivers/media/video/cx88/cx88.h
@@ -65,6 +65,8 @@
#define VBI_LINE_COUNT 17
#define VBI_LINE_LENGTH 2048
+#define AUD_RDS_LINES 4
+
/* need "shadow" registers for some write-only ones ... */
#define SHADOW_AUD_VOL_CTL 1
#define SHADOW_AUD_BAL_CTL 2
@@ -132,6 +134,7 @@ struct cx88_ctrl {
#define SRAM_CH25 4 /* audio */
#define SRAM_CH26 5
#define SRAM_CH28 6 /* mpeg */
+#define SRAM_CH27 7 /* audio rds */
/* more */
struct sram_channel {
@@ -232,6 +235,8 @@ extern struct sram_channel cx88_sram_channels[];
#define CX88_BOARD_TBS_8910 77
#define CX88_BOARD_PROF_6200 78
#define CX88_BOARD_TERRATEC_CINERGY_HT_PCI_MKII 79
+#define CX88_BOARD_HAUPPAUGE_IRONLY 80
+#define CX88_BOARD_WINFAST_DTV1800H 81
enum cx88_itype {
CX88_VMUX_COMPOSITE1 = 1,
@@ -350,6 +355,7 @@ struct cx88_core {
u32 input;
u32 astat;
u32 use_nicam;
+ unsigned long last_change;
/* IR remote control state */
struct cx88_IR *ir;
@@ -652,6 +658,7 @@ extern void cx88_setup_xc3028(struct cx88_core *core, struct xc2028_ctrl *ctl);
#define WW_I2SPT 8
#define WW_FM 9
#define WW_I2SADC 10
+#define WW_M 11
void cx88_set_tvaudio(struct cx88_core *core);
void cx88_newstation(struct cx88_core *core);
@@ -665,6 +672,11 @@ struct cx8802_dev *cx8802_get_device(int minor);
struct cx8802_driver * cx8802_get_driver(struct cx8802_dev *dev, enum cx88_board_type btype);
/* ----------------------------------------------------------- */
+/* cx88-dsp.c */
+
+s32 cx88_dsp_detect_stereo_sap(struct cx88_core *core);
+
+/* ----------------------------------------------------------- */
/* cx88-input.c */
int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci);
diff --git a/drivers/media/video/dabusb.c b/drivers/media/video/dabusb.c
index ba3709bec3f..ec2f45dde16 100644
--- a/drivers/media/video/dabusb.c
+++ b/drivers/media/video/dabusb.c
@@ -747,8 +747,14 @@ static const struct file_operations dabusb_fops =
.release = dabusb_release,
};
+static char *dabusb_nodename(struct device *dev)
+{
+ return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
+}
+
static struct usb_class_driver dabusb_class = {
.name = "dabusb%d",
+ .nodename = dabusb_nodename,
.fops = &dabusb_fops,
.minor_base = DABUSB_MINOR,
};
diff --git a/drivers/media/video/em28xx/em28xx-audio.c b/drivers/media/video/em28xx/em28xx-audio.c
index 0131322475b..7bd8a70f0a0 100644
--- a/drivers/media/video/em28xx/em28xx-audio.c
+++ b/drivers/media/video/em28xx/em28xx-audio.c
@@ -339,6 +339,11 @@ static int snd_em28xx_pcm_close(struct snd_pcm_substream *substream)
mutex_lock(&dev->lock);
dev->adev.users--;
em28xx_audio_analog_set(dev);
+ if (substream->runtime->dma_area) {
+ dprintk("freeing\n");
+ vfree(substream->runtime->dma_area);
+ substream->runtime->dma_area = NULL;
+ }
mutex_unlock(&dev->lock);
return 0;
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 7c70738479d..00cc791a9e4 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -49,6 +49,11 @@ static unsigned int disable_ir;
module_param(disable_ir, int, 0444);
MODULE_PARM_DESC(disable_ir, "disable infrared remote support");
+static unsigned int disable_usb_speed_check;
+module_param(disable_usb_speed_check, int, 0444);
+MODULE_PARM_DESC(disable_usb_speed_check,
+ "override min bandwidth requirement of 480M bps");
+
static unsigned int card[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET };
module_param_array(card, int, NULL, 0444);
MODULE_PARM_DESC(card, "card type");
@@ -104,6 +109,24 @@ static struct em28xx_reg_seq em2880_msi_digivox_ad_analog[] = {
/* Board - EM2870 Kworld 355u
Analog - No input analog */
+/* Board - EM2882 Kworld 315U digital */
+static struct em28xx_reg_seq em2882_kworld_315u_digital[] = {
+ {EM28XX_R08_GPIO, 0xff, 0xff, 10},
+ {EM28XX_R08_GPIO, 0xfe, 0xff, 10},
+ {EM2880_R04_GPO, 0x04, 0xff, 10},
+ {EM2880_R04_GPO, 0x0c, 0xff, 10},
+ {EM28XX_R08_GPIO, 0x7e, 0xff, 10},
+ { -1, -1, -1, -1},
+};
+
+static struct em28xx_reg_seq em2882_kworld_315u_tuner_gpio[] = {
+ {EM2880_R04_GPO, 0x08, 0xff, 10},
+ {EM2880_R04_GPO, 0x0c, 0xff, 10},
+ {EM2880_R04_GPO, 0x08, 0xff, 10},
+ {EM2880_R04_GPO, 0x0c, 0xff, 10},
+ { -1, -1, -1, -1},
+};
+
static struct em28xx_reg_seq kworld_330u_analog[] = {
{EM28XX_R08_GPIO, 0x6d, ~EM_GPIO_4, 10},
{EM2880_R04_GPO, 0x00, 0xff, 10},
@@ -140,6 +163,16 @@ static struct em28xx_reg_seq compro_mute_gpio[] = {
{ -1, -1, -1, -1},
};
+/* Terratec AV350 */
+static struct em28xx_reg_seq terratec_av350_mute_gpio[] = {
+ {EM28XX_R08_GPIO, 0xff, 0x7f, 10},
+ { -1, -1, -1, -1},
+};
+
+static struct em28xx_reg_seq terratec_av350_unmute_gpio[] = {
+ {EM28XX_R08_GPIO, 0xff, 0xff, 10},
+ { -1, -1, -1, -1},
+};
/*
* Board definitions
*/
@@ -992,16 +1025,17 @@ struct em28xx_board em28xx_boards[] = {
.amux = EM28XX_AMUX_LINE_IN,
} },
},
- [EM2860_BOARD_POINTNIX_INTRAORAL_CAMERA] = {
- .name = "PointNix Intra-Oral Camera",
+ [EM2860_BOARD_SAA711X_REFERENCE_DESIGN] = {
+ .name = "EM2860/SAA711X Reference Design",
.has_snapshot_button = 1,
- .tda9887_conf = TDA9887_PRESENT,
.tuner_type = TUNER_ABSENT,
.decoder = EM28XX_SAA711X,
.input = { {
.type = EM28XX_VMUX_SVIDEO,
.vmux = SAA7115_SVIDEO3,
- .amux = EM28XX_AMUX_VIDEO,
+ }, {
+ .type = EM28XX_VMUX_COMPOSITE1,
+ .vmux = SAA7115_COMPOSITE0,
} },
},
[EM2880_BOARD_MSI_DIGIVOX_AD] = {
@@ -1095,6 +1129,63 @@ struct em28xx_board em28xx_boards[] = {
.gpio = default_analog,
} },
},
+ [EM2882_BOARD_KWORLD_ATSC_315U] = {
+ .name = "KWorld ATSC 315U HDTV TV Box",
+ .valid = EM28XX_BOARD_NOT_VALIDATED,
+ .tuner_type = TUNER_THOMSON_DTT761X,
+ .tuner_gpio = em2882_kworld_315u_tuner_gpio,
+ .tda9887_conf = TDA9887_PRESENT,
+ .decoder = EM28XX_SAA711X,
+ .has_dvb = 1,
+ .dvb_gpio = em2882_kworld_315u_digital,
+ .xclk = EM28XX_XCLK_FREQUENCY_12MHZ,
+ .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE,
+ /* Analog mode - still not ready */
+ /*.input = { {
+ .type = EM28XX_VMUX_TELEVISION,
+ .vmux = SAA7115_COMPOSITE2,
+ .amux = EM28XX_AMUX_VIDEO,
+ .gpio = em2882_kworld_315u_analog,
+ .aout = EM28XX_AOUT_PCM_IN | EM28XX_AOUT_PCM_STEREO,
+ }, {
+ .type = EM28XX_VMUX_COMPOSITE1,
+ .vmux = SAA7115_COMPOSITE0,
+ .amux = EM28XX_AMUX_LINE_IN,
+ .gpio = em2882_kworld_315u_analog1,
+ .aout = EM28XX_AOUT_PCM_IN | EM28XX_AOUT_PCM_STEREO,
+ }, {
+ .type = EM28XX_VMUX_SVIDEO,
+ .vmux = SAA7115_SVIDEO3,
+ .amux = EM28XX_AMUX_LINE_IN,
+ .gpio = em2882_kworld_315u_analog1,
+ .aout = EM28XX_AOUT_PCM_IN | EM28XX_AOUT_PCM_STEREO,
+ } }, */
+ },
+ [EM2880_BOARD_EMPIRE_DUAL_TV] = {
+ .name = "Empire dual TV",
+ .tuner_type = TUNER_XC2028,
+ .tuner_gpio = default_tuner_gpio,
+ .has_dvb = 1,
+ .dvb_gpio = default_digital,
+ .mts_firmware = 1,
+ .decoder = EM28XX_TVP5150,
+ .input = { {
+ .type = EM28XX_VMUX_TELEVISION,
+ .vmux = TVP5150_COMPOSITE0,
+ .amux = EM28XX_AMUX_VIDEO,
+ .gpio = default_analog,
+ }, {
+ .type = EM28XX_VMUX_COMPOSITE1,
+ .vmux = TVP5150_COMPOSITE1,
+ .amux = EM28XX_AMUX_LINE_IN,
+ .gpio = default_analog,
+ }, {
+ .type = EM28XX_VMUX_SVIDEO,
+ .vmux = TVP5150_SVIDEO,
+ .amux = EM28XX_AMUX_LINE_IN,
+ .gpio = default_analog,
+ } },
+ },
[EM2881_BOARD_DNT_DA2_HYBRID] = {
.name = "DNT DA2 Hybrid",
.valid = EM28XX_BOARD_NOT_VALIDATED,
@@ -1322,6 +1413,42 @@ struct em28xx_board em28xx_boards[] = {
.amux = EM28XX_AMUX_VIDEO,
} },
},
+ [EM2860_BOARD_TERRATEC_GRABBY] = {
+ .name = "Terratec Grabby",
+ .vchannels = 2,
+ .tuner_type = TUNER_ABSENT,
+ .decoder = EM28XX_SAA711X,
+ .xclk = EM28XX_XCLK_FREQUENCY_12MHZ,
+ .input = { {
+ .type = EM28XX_VMUX_COMPOSITE1,
+ .vmux = SAA7115_COMPOSITE0,
+ .amux = EM28XX_AMUX_VIDEO2,
+ }, {
+ .type = EM28XX_VMUX_SVIDEO,
+ .vmux = SAA7115_SVIDEO3,
+ .amux = EM28XX_AMUX_VIDEO2,
+ } },
+ },
+ [EM2860_BOARD_TERRATEC_AV350] = {
+ .name = "Terratec AV350",
+ .vchannels = 2,
+ .tuner_type = TUNER_ABSENT,
+ .decoder = EM28XX_TVP5150,
+ .xclk = EM28XX_XCLK_FREQUENCY_12MHZ,
+ .mute_gpio = terratec_av350_mute_gpio,
+ .input = { {
+ .type = EM28XX_VMUX_COMPOSITE1,
+ .vmux = TVP5150_COMPOSITE1,
+ .amux = EM28XX_AUDIO_SRC_LINE,
+ .gpio = terratec_av350_unmute_gpio,
+
+ }, {
+ .type = EM28XX_VMUX_SVIDEO,
+ .vmux = TVP5150_SVIDEO,
+ .amux = EM28XX_AUDIO_SRC_LINE,
+ .gpio = terratec_av350_unmute_gpio,
+ } },
+ },
};
const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards);
@@ -1355,6 +1482,8 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM2880_BOARD_KWORLD_DVB_305U },
{ USB_DEVICE(0xeb1a, 0xe310),
.driver_info = EM2880_BOARD_MSI_DIGIVOX_AD },
+ { USB_DEVICE(0xeb1a, 0xa313),
+ .driver_info = EM2882_BOARD_KWORLD_ATSC_315U },
{ USB_DEVICE(0xeb1a, 0xa316),
.driver_info = EM2883_BOARD_KWORLD_HYBRID_330U },
{ USB_DEVICE(0xeb1a, 0xe320),
@@ -1385,6 +1514,10 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM2870_BOARD_TERRATEC_XS },
{ USB_DEVICE(0x0ccd, 0x0047),
.driver_info = EM2880_BOARD_TERRATEC_PRODIGY_XS },
+ { USB_DEVICE(0x0ccd, 0x0084),
+ .driver_info = EM2860_BOARD_TERRATEC_AV350 },
+ { USB_DEVICE(0x0ccd, 0x0096),
+ .driver_info = EM2860_BOARD_TERRATEC_GRABBY },
{ USB_DEVICE(0x185b, 0x2870),
.driver_info = EM2870_BOARD_COMPRO_VIDEOMATE },
{ USB_DEVICE(0x185b, 0x2041),
@@ -1437,13 +1570,14 @@ static struct em28xx_hash_table em28xx_eeprom_hash[] = {
{0x6ce05a8f, EM2820_BOARD_PROLINK_PLAYTV_USB2, TUNER_YMEC_TVF_5533MF},
{0x72cc5a8b, EM2820_BOARD_PROLINK_PLAYTV_BOX4_USB2, TUNER_YMEC_TVF_5533MF},
{0x966a0441, EM2880_BOARD_KWORLD_DVB_310U, TUNER_XC2028},
+ {0x9567eb1a, EM2880_BOARD_EMPIRE_DUAL_TV, TUNER_XC2028},
};
/* I2C devicelist hash table for devices with generic USB IDs */
static struct em28xx_hash_table em28xx_i2c_hash[] = {
{0xb06a32c3, EM2800_BOARD_TERRATEC_CINERGY_200, TUNER_LG_PAL_NEW_TAPC},
{0xf51200e3, EM2800_BOARD_VGEAR_POCKETTV, TUNER_LG_PAL_NEW_TAPC},
- {0x1ba50080, EM2860_BOARD_POINTNIX_INTRAORAL_CAMERA, TUNER_ABSENT},
+ {0x1ba50080, EM2860_BOARD_SAA711X_REFERENCE_DESIGN, TUNER_ABSENT},
{0xc51200e3, EM2820_BOARD_GADMEI_TVR200, TUNER_LG_PAL_NEW_TAPC},
};
@@ -1619,6 +1753,17 @@ void em28xx_pre_card_setup(struct em28xx *dev)
em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xfd);
break;
+ case EM2882_BOARD_KWORLD_ATSC_315U:
+ em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xff);
+ msleep(10);
+ em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xfe);
+ msleep(10);
+ em28xx_write_reg(dev, EM2880_R04_GPO, 0x00);
+ msleep(10);
+ em28xx_write_reg(dev, EM2880_R04_GPO, 0x08);
+ msleep(10);
+ break;
+
case EM2860_BOARD_KAIOMY_TVNPC_U2:
em28xx_write_regs(dev, EM28XX_R0F_XCLK, "\x07", 1);
em28xx_write_regs(dev, EM28XX_R06_I2C_CLK, "\x40", 1);
@@ -1664,6 +1809,7 @@ static void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl)
ctl->mts = em28xx_boards[dev->model].mts_firmware;
switch (dev->model) {
+ case EM2880_BOARD_EMPIRE_DUAL_TV:
case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900:
ctl->demod = XC3028_FE_ZARLINK456;
break;
@@ -1835,12 +1981,20 @@ static int em28xx_hint_board(struct em28xx *dev)
}
/* ----------------------------------------------------------------------- */
-void em28xx_set_ir(struct em28xx *dev, struct IR_i2c *ir)
+void em28xx_register_i2c_ir(struct em28xx *dev)
{
- if (disable_ir) {
- ir->get_key = NULL;
- return ;
- }
+ struct i2c_board_info info;
+ struct IR_i2c_init_data init_data;
+ const unsigned short addr_list[] = {
+ 0x30, 0x47, I2C_CLIENT_END
+ };
+
+ if (disable_ir)
+ return;
+
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ memset(&init_data, 0, sizeof(struct IR_i2c_init_data));
+ strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
/* detect & configure */
switch (dev->model) {
@@ -1850,22 +2004,19 @@ void em28xx_set_ir(struct em28xx *dev, struct IR_i2c *ir)
break;
case (EM2800_BOARD_TERRATEC_CINERGY_200):
case (EM2820_BOARD_TERRATEC_CINERGY_250):
- ir->ir_codes = ir_codes_em_terratec;
- ir->get_key = em28xx_get_key_terratec;
- snprintf(ir->c.name, sizeof(ir->c.name),
- "i2c IR (EM28XX Terratec)");
+ init_data.ir_codes = ir_codes_em_terratec;
+ init_data.get_key = em28xx_get_key_terratec;
+ init_data.name = "i2c IR (EM28XX Terratec)";
break;
case (EM2820_BOARD_PINNACLE_USB_2):
- ir->ir_codes = ir_codes_pinnacle_grey;
- ir->get_key = em28xx_get_key_pinnacle_usb_grey;
- snprintf(ir->c.name, sizeof(ir->c.name),
- "i2c IR (EM28XX Pinnacle PCTV)");
+ init_data.ir_codes = ir_codes_pinnacle_grey;
+ init_data.get_key = em28xx_get_key_pinnacle_usb_grey;
+ init_data.name = "i2c IR (EM28XX Pinnacle PCTV)";
break;
case (EM2820_BOARD_HAUPPAUGE_WINTV_USB_2):
- ir->ir_codes = ir_codes_hauppauge_new;
- ir->get_key = em28xx_get_key_em_haup;
- snprintf(ir->c.name, sizeof(ir->c.name),
- "i2c IR (EM2840 Hauppauge)");
+ init_data.ir_codes = ir_codes_hauppauge_new;
+ init_data.get_key = em28xx_get_key_em_haup;
+ init_data.name = "i2c IR (EM2840 Hauppauge)";
break;
case (EM2820_BOARD_MSI_VOX_USB_2):
break;
@@ -1876,6 +2027,10 @@ void em28xx_set_ir(struct em28xx *dev, struct IR_i2c *ir)
case (EM2800_BOARD_GRABBEEX_USB2800):
break;
}
+
+ if (init_data.name)
+ info.platform_data = &init_data;
+ i2c_new_probed_device(&dev->i2c_adap, &info, addr_list);
}
void em28xx_card_setup(struct em28xx *dev)
@@ -1886,6 +2041,9 @@ void em28xx_card_setup(struct em28xx *dev)
if (em28xx_boards[dev->model].tuner_addr)
dev->tuner_addr = em28xx_boards[dev->model].tuner_addr;
+ if (em28xx_boards[dev->model].tda9887_conf)
+ dev->tda9887_conf = em28xx_boards[dev->model].tda9887_conf;
+
/* request some modules */
switch (dev->model) {
case EM2820_BOARD_HAUPPAUGE_WINTV_USB_2:
@@ -1915,6 +2073,12 @@ void em28xx_card_setup(struct em28xx *dev)
#endif
break;
}
+ case EM2882_BOARD_KWORLD_ATSC_315U:
+ em28xx_write_reg(dev, 0x0d, 0x42);
+ msleep(10);
+ em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xfd);
+ msleep(10);
+ break;
case EM2820_BOARD_KWORLD_PVRTV2800RF:
/* GPIO enables sound on KWORLD PVR TV 2800RF */
em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xf9);
@@ -2279,6 +2443,20 @@ static int em28xx_usb_probe(struct usb_interface *interface,
ifnum,
interface->altsetting->desc.bInterfaceNumber);
+ /*
+ * Make sure we have 480 Mbps of bandwidth, otherwise things like
+ * video stream wouldn't likely work, since 12 Mbps is generally
+ * not enough even for most Digital TV streams.
+ */
+ if (udev->speed != USB_SPEED_HIGH && disable_usb_speed_check == 0) {
+ printk(DRIVER_NAME ": Device initialization failed.\n");
+ printk(DRIVER_NAME ": Device must be connected to a high-speed"
+ " USB 2.0 port.\n");
+ em28xx_devused &= ~(1<<nr);
+ retval = -ENODEV;
+ goto err;
+ }
+
if (nr >= EM28XX_MAXBOARDS) {
printk(DRIVER_NAME ": Supports only %i em28xx boards.\n",
EM28XX_MAXBOARDS);
diff --git a/drivers/media/video/em28xx/em28xx-core.c b/drivers/media/video/em28xx/em28xx-core.c
index 192b76cdd5d..c8d7ce8fbd3 100644
--- a/drivers/media/video/em28xx/em28xx-core.c
+++ b/drivers/media/video/em28xx/em28xx-core.c
@@ -500,18 +500,21 @@ int em28xx_audio_setup(struct em28xx *dev)
/* See how this device is configured */
cfg = em28xx_read_reg(dev, EM28XX_R00_CHIPCFG);
- if (cfg < 0)
+ em28xx_info("Config register raw data: 0x%02x\n", cfg);
+ if (cfg < 0) {
+ /* Register read error? */
cfg = EM28XX_CHIPCFG_AC97; /* Be conservative */
- else
- em28xx_info("Config register raw data: 0x%02x\n", cfg);
-
- if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) ==
- EM28XX_CHIPCFG_I2S_3_SAMPRATES) {
+ } else if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) == 0x00) {
+ /* The device doesn't have vendor audio at all */
+ dev->has_alsa_audio = 0;
+ dev->audio_mode.has_audio = 0;
+ return 0;
+ } else if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) ==
+ EM28XX_CHIPCFG_I2S_3_SAMPRATES) {
em28xx_info("I2S Audio (3 sample rates)\n");
dev->audio_mode.i2s_3rates = 1;
- }
- if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) ==
- EM28XX_CHIPCFG_I2S_5_SAMPRATES) {
+ } else if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) ==
+ EM28XX_CHIPCFG_I2S_5_SAMPRATES) {
em28xx_info("I2S Audio (5 sample rates)\n");
dev->audio_mode.i2s_5rates = 1;
}
@@ -938,7 +941,7 @@ int em28xx_init_isoc(struct em28xx *dev, int max_packets,
dev->isoc_ctl.transfer_buffer = kzalloc(sizeof(void *)*num_bufs,
GFP_KERNEL);
if (!dev->isoc_ctl.transfer_buffer) {
- em28xx_errdev("cannot allocate memory for usbtransfer\n");
+ em28xx_errdev("cannot allocate memory for usb transfer\n");
kfree(dev->isoc_ctl.urb);
return -ENOMEM;
}
@@ -1012,6 +1015,41 @@ int em28xx_init_isoc(struct em28xx *dev, int max_packets,
}
EXPORT_SYMBOL_GPL(em28xx_init_isoc);
+/* Determine the packet size for the DVB stream for the given device
+ (underlying value programmed into the eeprom) */
+int em28xx_isoc_dvb_max_packetsize(struct em28xx *dev)
+{
+ unsigned int chip_cfg2;
+ unsigned int packet_size = 564;
+
+ if (dev->chip_id == CHIP_ID_EM2874) {
+ /* FIXME - for now assume 564 like it was before, but the
+ em2874 code should be added to return the proper value... */
+ packet_size = 564;
+ } else {
+ /* TS max packet size stored in bits 1-0 of R01 */
+ chip_cfg2 = em28xx_read_reg(dev, EM28XX_R01_CHIPCFG2);
+ switch (chip_cfg2 & EM28XX_CHIPCFG2_TS_PACKETSIZE_MASK) {
+ case EM28XX_CHIPCFG2_TS_PACKETSIZE_188:
+ packet_size = 188;
+ break;
+ case EM28XX_CHIPCFG2_TS_PACKETSIZE_376:
+ packet_size = 376;
+ break;
+ case EM28XX_CHIPCFG2_TS_PACKETSIZE_564:
+ packet_size = 564;
+ break;
+ case EM28XX_CHIPCFG2_TS_PACKETSIZE_752:
+ packet_size = 752;
+ break;
+ }
+ }
+
+ em28xx_coredbg("dvb max packet size=%d\n", packet_size);
+ return packet_size;
+}
+EXPORT_SYMBOL_GPL(em28xx_isoc_dvb_max_packetsize);
+
/*
* em28xx_wake_i2c()
* configure i2c attached devices
diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c
index fcd25511209..563dd2b1c8e 100644
--- a/drivers/media/video/em28xx/em28xx-dvb.c
+++ b/drivers/media/video/em28xx/em28xx-dvb.c
@@ -25,6 +25,8 @@
#include "em28xx.h"
#include <media/v4l2-common.h>
#include <media/videobuf-vmalloc.h>
+#include <media/tuner.h>
+#include "tuner-simple.h"
#include "lgdt330x.h"
#include "zl10353.h"
@@ -46,7 +48,6 @@ if (debug >= level) \
} while (0)
#define EM28XX_DVB_NUM_BUFS 5
-#define EM28XX_DVB_MAX_PACKETSIZE 564
#define EM28XX_DVB_MAX_PACKETS 64
struct em28xx_dvb {
@@ -142,14 +143,17 @@ static int start_streaming(struct em28xx_dvb *dvb)
{
int rc;
struct em28xx *dev = dvb->adapter.priv;
+ int max_dvb_packet_size;
usb_set_interface(dev->udev, 0, 1);
rc = em28xx_set_mode(dev, EM28XX_DIGITAL_MODE);
if (rc < 0)
return rc;
+ max_dvb_packet_size = em28xx_isoc_dvb_max_packetsize(dev);
+
return em28xx_init_isoc(dev, EM28XX_DVB_MAX_PACKETS,
- EM28XX_DVB_NUM_BUFS, EM28XX_DVB_MAX_PACKETSIZE,
+ EM28XX_DVB_NUM_BUFS, max_dvb_packet_size,
dvb_isoc_copy);
}
@@ -431,6 +435,7 @@ static int dvb_init(struct em28xx *dev)
case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900:
case EM2880_BOARD_TERRATEC_HYBRID_XS:
case EM2880_BOARD_KWORLD_DVB_310U:
+ case EM2880_BOARD_EMPIRE_DUAL_TV:
dvb->frontend = dvb_attach(zl10353_attach,
&em28xx_zl10353_with_xc3028,
&dev->i2c_adap);
@@ -448,6 +453,18 @@ static int dvb_init(struct em28xx *dev)
goto out_free;
}
break;
+ case EM2882_BOARD_KWORLD_ATSC_315U:
+ dvb->frontend = dvb_attach(lgdt330x_attach,
+ &em2880_lgdt3303_dev,
+ &dev->i2c_adap);
+ if (dvb->frontend != NULL) {
+ if (!dvb_attach(simple_tuner_attach, dvb->frontend,
+ &dev->i2c_adap, 0x61, TUNER_THOMSON_DTT761X)) {
+ result = -EINVAL;
+ goto out_free;
+ }
+ }
+ break;
case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2:
#ifdef EM28XX_DRX397XD_SUPPORT
/* We don't have the config structure properly populated, so
diff --git a/drivers/media/video/em28xx/em28xx-i2c.c b/drivers/media/video/em28xx/em28xx-i2c.c
index f0bf1d960c7..2c86fcf089f 100644
--- a/drivers/media/video/em28xx/em28xx-i2c.c
+++ b/drivers/media/video/em28xx/em28xx-i2c.c
@@ -451,27 +451,6 @@ static u32 functionality(struct i2c_adapter *adap)
return I2C_FUNC_SMBUS_EMUL;
}
-/*
- * attach_inform()
- * gets called when a device attaches to the i2c bus
- * does some basic configuration
- */
-static int attach_inform(struct i2c_client *client)
-{
- struct em28xx *dev = client->adapter->algo_data;
- struct IR_i2c *ir = i2c_get_clientdata(client);
-
- switch (client->addr << 1) {
- case 0x60:
- case 0x8e:
- dprintk1(1, "attach_inform: IR detected (%s).\n", ir->phys);
- em28xx_set_ir(dev, ir);
- break;
- }
-
- return 0;
-}
-
static struct i2c_algorithm em28xx_algo = {
.master_xfer = em28xx_i2c_xfer,
.functionality = functionality,
@@ -482,7 +461,6 @@ static struct i2c_adapter em28xx_adap_template = {
.name = "em28xx",
.id = I2C_HW_B_EM28XX,
.algo = &em28xx_algo,
- .client_register = attach_inform,
};
static struct i2c_client em28xx_client_template = {
@@ -575,6 +553,9 @@ int em28xx_i2c_register(struct em28xx *dev)
if (i2c_scan)
em28xx_do_i2c_scan(dev);
+ /* Instantiate the IR receiver device, if present */
+ em28xx_register_i2c_ir(dev);
+
return 0;
}
diff --git a/drivers/media/video/em28xx/em28xx-input.c b/drivers/media/video/em28xx/em28xx-input.c
index a5abfd7a19f..7a0fe3816e3 100644
--- a/drivers/media/video/em28xx/em28xx-input.c
+++ b/drivers/media/video/em28xx/em28xx-input.c
@@ -40,7 +40,7 @@ MODULE_PARM_DESC(ir_debug, "enable debug messages [IR]");
#define i2cdprintk(fmt, arg...) \
if (ir_debug) { \
- printk(KERN_DEBUG "%s/ir: " fmt, ir->c.name , ## arg); \
+ printk(KERN_DEBUG "%s/ir: " fmt, ir->name , ## arg); \
}
#define dprintk(fmt, arg...) \
@@ -85,7 +85,7 @@ int em28xx_get_key_terratec(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
unsigned char b;
/* poll IR chip */
- if (1 != i2c_master_recv(&ir->c, &b, 1)) {
+ if (1 != i2c_master_recv(ir->c, &b, 1)) {
i2cdprintk("read error\n");
return -EIO;
}
@@ -114,7 +114,7 @@ int em28xx_get_key_em_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
unsigned char code;
/* poll IR chip */
- if (2 != i2c_master_recv(&ir->c, buf, 2))
+ if (2 != i2c_master_recv(ir->c, buf, 2))
return -EIO;
/* Does eliminate repeated parity code */
@@ -147,7 +147,7 @@ int em28xx_get_key_pinnacle_usb_grey(struct IR_i2c *ir, u32 *ir_key,
/* poll IR chip */
- if (3 != i2c_master_recv(&ir->c, buf, 3)) {
+ if (3 != i2c_master_recv(ir->c, buf, 3)) {
i2cdprintk("read error\n");
return -EIO;
}
diff --git a/drivers/media/video/em28xx/em28xx-reg.h b/drivers/media/video/em28xx/em28xx-reg.h
index 24e39c56811..a2676d63cfd 100644
--- a/drivers/media/video/em28xx/em28xx-reg.h
+++ b/drivers/media/video/em28xx/em28xx-reg.h
@@ -27,6 +27,22 @@
#define EM28XX_CHIPCFG_AC97 0x10
#define EM28XX_CHIPCFG_AUDIOMASK 0x30
+#define EM28XX_R01_CHIPCFG2 0x01
+
+/* em28xx Chip Configuration 2 0x01 */
+#define EM28XX_CHIPCFG2_TS_PRESENT 0x10
+#define EM28XX_CHIPCFG2_TS_REQ_INTERVAL_MASK 0x0c /* bits 3-2 */
+#define EM28XX_CHIPCFG2_TS_REQ_INTERVAL_1MF 0x00
+#define EM28XX_CHIPCFG2_TS_REQ_INTERVAL_2MF 0x04
+#define EM28XX_CHIPCFG2_TS_REQ_INTERVAL_4MF 0x08
+#define EM28XX_CHIPCFG2_TS_REQ_INTERVAL_8MF 0x0c
+#define EM28XX_CHIPCFG2_TS_PACKETSIZE_MASK 0x03 /* bits 0-1 */
+#define EM28XX_CHIPCFG2_TS_PACKETSIZE_188 0x00
+#define EM28XX_CHIPCFG2_TS_PACKETSIZE_376 0x01
+#define EM28XX_CHIPCFG2_TS_PACKETSIZE_564 0x02
+#define EM28XX_CHIPCFG2_TS_PACKETSIZE_752 0x03
+
+
/* GPIO/GPO registers */
#define EM2880_R04_GPO 0x04 /* em2880-em2883 only */
#define EM28XX_R08_GPIO 0x08 /* em2820 or upper */
diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h
index 4c4e58004f5..8bf81be1da6 100644
--- a/drivers/media/video/em28xx/em28xx.h
+++ b/drivers/media/video/em28xx/em28xx.h
@@ -58,7 +58,7 @@
#define EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950 16
#define EM2880_BOARD_PINNACLE_PCTV_HD_PRO 17
#define EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2 18
-#define EM2860_BOARD_POINTNIX_INTRAORAL_CAMERA 19
+#define EM2860_BOARD_SAA711X_REFERENCE_DESIGN 19
#define EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600 20
#define EM2800_BOARD_GRABBEEX_USB2800 21
#define EM2750_BOARD_UNKNOWN 22
@@ -102,6 +102,10 @@
#define EM2860_BOARD_KAIOMY_TVNPC_U2 63
#define EM2860_BOARD_EASYCAP 64
#define EM2820_BOARD_IODATA_GVMVP_SZ 65
+#define EM2880_BOARD_EMPIRE_DUAL_TV 66
+#define EM2860_BOARD_TERRATEC_GRABBY 67
+#define EM2860_BOARD_TERRATEC_AV350 68
+#define EM2882_BOARD_KWORLD_ATSC_315U 69
/* Limits minimum and default number of buffers */
#define EM28XX_MIN_BUF 4
@@ -615,6 +619,7 @@ int em28xx_init_isoc(struct em28xx *dev, int max_packets,
int num_bufs, int max_pkt_size,
int (*isoc_copy) (struct em28xx *dev, struct urb *urb));
void em28xx_uninit_isoc(struct em28xx *dev);
+int em28xx_isoc_dvb_max_packetsize(struct em28xx *dev);
int em28xx_set_mode(struct em28xx *dev, enum em28xx_mode set_mode);
int em28xx_gpio_set(struct em28xx *dev, struct em28xx_reg_seq *gpio);
void em28xx_wake_i2c(struct em28xx *dev);
@@ -639,7 +644,7 @@ extern void em28xx_card_setup(struct em28xx *dev);
extern struct em28xx_board em28xx_boards[];
extern struct usb_device_id em28xx_id_table[];
extern const unsigned int em28xx_bcount;
-void em28xx_set_ir(struct em28xx *dev, struct IR_i2c *ir);
+void em28xx_register_i2c_ir(struct em28xx *dev);
int em28xx_tuner_callback(void *ptr, int component, int command, int arg);
void em28xx_release_resources(struct em28xx *dev);
diff --git a/drivers/media/video/gspca/finepix.c b/drivers/media/video/gspca/finepix.c
index 00e6863ed66..480ec5c87d0 100644
--- a/drivers/media/video/gspca/finepix.c
+++ b/drivers/media/video/gspca/finepix.c
@@ -168,6 +168,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->cam_mode = fpix_mode;
cam->nmodes = 1;
+ cam->bulk = 1;
cam->bulk_size = FPIX_MAX_TRANSFER;
INIT_WORK(&dev->work_struct, dostream);
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index a2741d7dccf..f7e0355ad64 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -1,7 +1,7 @@
/*
* Main USB camera driver
*
- * V4L2 by Jean-Francois Moine <http://moinejf.free.fr>
+ * Copyright (C) 2008-2009 Jean-Francois Moine (http://moinejf.free.fr)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -47,7 +47,7 @@ MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>");
MODULE_DESCRIPTION("GSPCA USB Camera Driver");
MODULE_LICENSE("GPL");
-#define DRIVER_VERSION_NUMBER KERNEL_VERSION(2, 5, 0)
+#define DRIVER_VERSION_NUMBER KERNEL_VERSION(2, 6, 0)
#ifdef GSPCA_DEBUG
int gspca_debug = D_ERR | D_PROBE;
@@ -441,7 +441,7 @@ static void destroy_urbs(struct gspca_dev *gspca_dev)
* look for an input transfer endpoint in an alternate setting
*/
static struct usb_host_endpoint *alt_xfer(struct usb_host_interface *alt,
- __u8 xfer)
+ int xfer)
{
struct usb_host_endpoint *ep;
int i, attr;
@@ -449,7 +449,8 @@ static struct usb_host_endpoint *alt_xfer(struct usb_host_interface *alt,
for (i = 0; i < alt->desc.bNumEndpoints; i++) {
ep = &alt->endpoint[i];
attr = ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
- if (attr == xfer)
+ if (attr == xfer
+ && ep->desc.wMaxPacketSize != 0)
return ep;
}
return NULL;
@@ -467,37 +468,28 @@ static struct usb_host_endpoint *get_ep(struct gspca_dev *gspca_dev)
{
struct usb_interface *intf;
struct usb_host_endpoint *ep;
- int i, ret;
+ int xfer, i, ret;
intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface);
ep = NULL;
+ xfer = gspca_dev->cam.bulk ? USB_ENDPOINT_XFER_BULK
+ : USB_ENDPOINT_XFER_ISOC;
i = gspca_dev->alt; /* previous alt setting */
-
- /* try isoc */
while (--i >= 0) {
- ep = alt_xfer(&intf->altsetting[i],
- USB_ENDPOINT_XFER_ISOC);
+ ep = alt_xfer(&intf->altsetting[i], xfer);
if (ep)
break;
}
-
- /* if no isoc, try bulk (alt 0 only) */
if (ep == NULL) {
- ep = alt_xfer(&intf->altsetting[0],
- USB_ENDPOINT_XFER_BULK);
- if (ep == NULL) {
- err("no transfer endpoint found");
- return NULL;
- }
- i = 0;
- gspca_dev->bulk = 1;
+ err("no transfer endpoint found");
+ return NULL;
}
PDEBUG(D_STREAM, "use alt %d ep 0x%02x",
i, ep->desc.bEndpointAddress);
- if (i > 0) {
+ if (gspca_dev->nbalt > 1) {
ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, i);
if (ret < 0) {
- err("set interface err %d", ret);
+ err("set alt %d err %d", i, ret);
return NULL;
}
}
@@ -517,13 +509,13 @@ static int create_urbs(struct gspca_dev *gspca_dev,
/* calculate the packet size and the number of packets */
psize = le16_to_cpu(ep->desc.wMaxPacketSize);
- if (!gspca_dev->bulk) { /* isoc */
+ if (!gspca_dev->cam.bulk) { /* isoc */
/* See paragraph 5.9 / table 5-11 of the usb 2.0 spec. */
psize = (psize & 0x07ff) * (1 + ((psize >> 11) & 3));
- npkt = ISO_MAX_SIZE / psize;
- if (npkt > ISO_MAX_PKT)
- npkt = ISO_MAX_PKT;
+ npkt = gspca_dev->cam.npkt;
+ if (npkt == 0)
+ npkt = 32; /* default value */
bsize = psize * npkt;
PDEBUG(D_STREAM,
"isoc %d pkts size %d = bsize:%d",
@@ -617,7 +609,7 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
goto out;
/* clear the bulk endpoint */
- if (gspca_dev->bulk)
+ if (gspca_dev->cam.bulk)
usb_clear_halt(gspca_dev->dev,
gspca_dev->urb[0]->pipe);
@@ -630,7 +622,7 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
gspca_dev->streaming = 1;
/* some bulk transfers are started by the subdriver */
- if (gspca_dev->bulk && gspca_dev->cam.bulk_nurbs == 0)
+ if (gspca_dev->cam.bulk && gspca_dev->cam.bulk_nurbs == 0)
break;
/* submit the URBs */
@@ -661,6 +653,8 @@ static int gspca_set_alt0(struct gspca_dev *gspca_dev)
{
int ret;
+ if (gspca_dev->alt == 0)
+ return 0;
ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, 0);
if (ret < 0)
PDEBUG(D_ERR|D_STREAM, "set alt 0 err %d", ret);
@@ -869,6 +863,32 @@ out:
return ret;
}
+static int vidioc_enum_framesizes(struct file *file, void *priv,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct gspca_dev *gspca_dev = priv;
+ int i;
+ __u32 index = 0;
+
+ for (i = 0; i < gspca_dev->cam.nmodes; i++) {
+ if (fsize->pixel_format !=
+ gspca_dev->cam.cam_mode[i].pixelformat)
+ continue;
+
+ if (fsize->index == index) {
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width =
+ gspca_dev->cam.cam_mode[i].width;
+ fsize->discrete.height =
+ gspca_dev->cam.cam_mode[i].height;
+ return 0;
+ }
+ index++;
+ }
+
+ return -EINVAL;
+}
+
static void gspca_release(struct video_device *vfd)
{
struct gspca_dev *gspca_dev = container_of(vfd, struct gspca_dev, vdev);
@@ -989,43 +1009,54 @@ out:
return ret;
}
+static const struct ctrl *get_ctrl(struct gspca_dev *gspca_dev,
+ int id)
+{
+ const struct ctrl *ctrls;
+ int i;
+
+ for (i = 0, ctrls = gspca_dev->sd_desc->ctrls;
+ i < gspca_dev->sd_desc->nctrls;
+ i++, ctrls++) {
+ if (gspca_dev->ctrl_dis & (1 << i))
+ continue;
+ if (id == ctrls->qctrl.id)
+ return ctrls;
+ }
+ return NULL;
+}
+
static int vidioc_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *q_ctrl)
{
struct gspca_dev *gspca_dev = priv;
- int i, ix;
+ const struct ctrl *ctrls;
+ int i;
u32 id;
- ix = -1;
+ ctrls = NULL;
id = q_ctrl->id;
if (id & V4L2_CTRL_FLAG_NEXT_CTRL) {
id &= V4L2_CTRL_ID_MASK;
id++;
for (i = 0; i < gspca_dev->sd_desc->nctrls; i++) {
- if (gspca_dev->sd_desc->ctrls[i].qctrl.id < id)
+ if (gspca_dev->ctrl_dis & (1 << i))
continue;
- if (ix < 0) {
- ix = i;
+ if (ctrls->qctrl.id < id)
continue;
+ if (ctrls != NULL) {
+ if (gspca_dev->sd_desc->ctrls[i].qctrl.id
+ > ctrls->qctrl.id)
+ continue;
}
- if (gspca_dev->sd_desc->ctrls[i].qctrl.id
- > gspca_dev->sd_desc->ctrls[ix].qctrl.id)
- continue;
- ix = i;
- }
- }
- for (i = 0; i < gspca_dev->sd_desc->nctrls; i++) {
- if (id == gspca_dev->sd_desc->ctrls[i].qctrl.id) {
- ix = i;
- break;
+ ctrls = &gspca_dev->sd_desc->ctrls[i];
}
+ } else {
+ ctrls = get_ctrl(gspca_dev, id);
}
- if (ix < 0)
+ if (ctrls == NULL)
return -EINVAL;
- memcpy(q_ctrl, &gspca_dev->sd_desc->ctrls[ix].qctrl,
- sizeof *q_ctrl);
- if (gspca_dev->ctrl_dis & (1 << ix))
- q_ctrl->flags |= V4L2_CTRL_FLAG_DISABLED;
+ memcpy(q_ctrl, ctrls, sizeof *q_ctrl);
return 0;
}
@@ -1034,56 +1065,45 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
{
struct gspca_dev *gspca_dev = priv;
const struct ctrl *ctrls;
- int i, ret;
+ int ret;
- for (i = 0, ctrls = gspca_dev->sd_desc->ctrls;
- i < gspca_dev->sd_desc->nctrls;
- i++, ctrls++) {
- if (ctrl->id != ctrls->qctrl.id)
- continue;
- if (gspca_dev->ctrl_dis & (1 << i))
- return -EINVAL;
- if (ctrl->value < ctrls->qctrl.minimum
- || ctrl->value > ctrls->qctrl.maximum)
- return -ERANGE;
- PDEBUG(D_CONF, "set ctrl [%08x] = %d", ctrl->id, ctrl->value);
- if (mutex_lock_interruptible(&gspca_dev->usb_lock))
- return -ERESTARTSYS;
- if (gspca_dev->present)
- ret = ctrls->set(gspca_dev, ctrl->value);
- else
- ret = -ENODEV;
- mutex_unlock(&gspca_dev->usb_lock);
- return ret;
- }
- return -EINVAL;
+ ctrls = get_ctrl(gspca_dev, ctrl->id);
+ if (ctrls == NULL)
+ return -EINVAL;
+
+ if (ctrl->value < ctrls->qctrl.minimum
+ || ctrl->value > ctrls->qctrl.maximum)
+ return -ERANGE;
+ PDEBUG(D_CONF, "set ctrl [%08x] = %d", ctrl->id, ctrl->value);
+ if (mutex_lock_interruptible(&gspca_dev->usb_lock))
+ return -ERESTARTSYS;
+ if (gspca_dev->present)
+ ret = ctrls->set(gspca_dev, ctrl->value);
+ else
+ ret = -ENODEV;
+ mutex_unlock(&gspca_dev->usb_lock);
+ return ret;
}
static int vidioc_g_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
struct gspca_dev *gspca_dev = priv;
-
const struct ctrl *ctrls;
- int i, ret;
+ int ret;
- for (i = 0, ctrls = gspca_dev->sd_desc->ctrls;
- i < gspca_dev->sd_desc->nctrls;
- i++, ctrls++) {
- if (ctrl->id != ctrls->qctrl.id)
- continue;
- if (gspca_dev->ctrl_dis & (1 << i))
- return -EINVAL;
- if (mutex_lock_interruptible(&gspca_dev->usb_lock))
- return -ERESTARTSYS;
- if (gspca_dev->present)
- ret = ctrls->get(gspca_dev, &ctrl->value);
- else
- ret = -ENODEV;
- mutex_unlock(&gspca_dev->usb_lock);
- return ret;
- }
- return -EINVAL;
+ ctrls = get_ctrl(gspca_dev, ctrl->id);
+ if (ctrls == NULL)
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&gspca_dev->usb_lock))
+ return -ERESTARTSYS;
+ if (gspca_dev->present)
+ ret = ctrls->get(gspca_dev, &ctrl->value);
+ else
+ ret = -ENODEV;
+ mutex_unlock(&gspca_dev->usb_lock);
+ return ret;
}
/*fixme: have an audio flag in gspca_dev?*/
@@ -1864,6 +1884,7 @@ static const struct v4l2_ioctl_ops dev_ioctl_ops = {
.vidioc_g_parm = vidioc_g_parm,
.vidioc_s_parm = vidioc_s_parm,
.vidioc_s_std = vidioc_s_std,
+ .vidioc_enum_framesizes = vidioc_enum_framesizes,
#ifdef CONFIG_VIDEO_V4L1_COMPAT
.vidiocgmbuf = vidiocgmbuf,
#endif
@@ -1943,7 +1964,7 @@ int gspca_dev_probe(struct usb_interface *intf,
/* init video stuff */
memcpy(&gspca_dev->vdev, &gspca_template, sizeof gspca_template);
- gspca_dev->vdev.parent = &dev->dev;
+ gspca_dev->vdev.parent = &intf->dev;
gspca_dev->module = module;
gspca_dev->present = 1;
ret = video_register_device(&gspca_dev->vdev,
diff --git a/drivers/media/video/gspca/gspca.h b/drivers/media/video/gspca/gspca.h
index 58e8ff02136..bd1faff8864 100644
--- a/drivers/media/video/gspca/gspca.h
+++ b/drivers/media/video/gspca/gspca.h
@@ -44,8 +44,6 @@ extern int gspca_debug;
#define GSPCA_MAX_FRAMES 16 /* maximum number of video frame buffers */
/* image transfers */
#define MAX_NURBS 4 /* max number of URBs */
-#define ISO_MAX_PKT 32 /* max number of packets in an ISOC transfer */
-#define ISO_MAX_SIZE 0x8000 /* max size of one URB buffer (32 Kb) */
/* device information - set at probe time */
struct cam {
@@ -56,6 +54,9 @@ struct cam {
* - cannot be > MAX_NURBS
* - when 0 and bulk_size != 0 means
* 1 URB and submit done by subdriver */
+ u8 bulk; /* image transfer by 0:isoc / 1:bulk */
+ u8 npkt; /* number of packets in an ISOC message
+ * 0 is the default value: 32 packets */
u32 input_flags; /* value for ENUM_INPUT status flags */
};
@@ -168,7 +169,6 @@ struct gspca_dev {
__u8 iface; /* USB interface number */
__u8 alt; /* USB alternate setting */
__u8 nbalt; /* number of USB alternate settings */
- u8 bulk; /* image transfer by 0:isoc / 1:bulk */
};
int gspca_dev_probe(struct usb_interface *intf,
diff --git a/drivers/media/video/gspca/m5602/Makefile b/drivers/media/video/gspca/m5602/Makefile
index 9fa3644f486..bf7a19a1e6d 100644
--- a/drivers/media/video/gspca/m5602/Makefile
+++ b/drivers/media/video/gspca/m5602/Makefile
@@ -2,9 +2,10 @@ obj-$(CONFIG_USB_M5602) += gspca_m5602.o
gspca_m5602-objs := m5602_core.o \
m5602_ov9650.o \
+ m5602_ov7660.o \
m5602_mt9m111.o \
m5602_po1030.o \
m5602_s5k83a.o \
m5602_s5k4aa.o
-EXTRA_CFLAGS += -Idrivers/media/video/gspca \ No newline at end of file
+EXTRA_CFLAGS += -Idrivers/media/video/gspca
diff --git a/drivers/media/video/gspca/m5602/m5602_bridge.h b/drivers/media/video/gspca/m5602/m5602_bridge.h
index 8f1cea6fd3b..1127a405c9b 100644
--- a/drivers/media/video/gspca/m5602/m5602_bridge.h
+++ b/drivers/media/video/gspca/m5602/m5602_bridge.h
@@ -45,6 +45,15 @@
#define M5602_XB_SEN_CLK_DIV 0x15
#define M5602_XB_AUD_CLK_CTRL 0x16
#define M5602_XB_AUD_CLK_DIV 0x17
+#define M5602_OB_AC_LINK_STATE 0x22
+#define M5602_OB_PCM_SLOT_INDEX 0x24
+#define M5602_OB_GPIO_SLOT_INDEX 0x25
+#define M5602_OB_ACRX_STATUS_ADDRESS_H 0x28
+#define M5602_OB_ACRX_STATUS_DATA_L 0x29
+#define M5602_OB_ACRX_STATUS_DATA_H 0x2a
+#define M5602_OB_ACTX_COMMAND_ADDRESS 0x31
+#define M5602_OB_ACRX_COMMAND_DATA_L 0x32
+#define M5602_OB_ACTX_COMMAND_DATA_H 0X33
#define M5602_XB_DEVCTR1 0x41
#define M5602_XB_EPSETR0 0x42
#define M5602_XB_EPAFCTR 0x47
@@ -77,7 +86,18 @@
#define M5602_XB_GPIO_EN_L 0x75
#define M5602_XB_GPIO_DAT 0x76
#define M5602_XB_GPIO_DIR 0x77
-#define M5602_XB_MISC_CTL 0x70
+#define M5602_XB_SEN_CLK_CONTROL 0x80
+#define M5602_XB_SEN_CLK_DIVISION 0x81
+#define M5602_XB_CPR_CLK_CONTROL 0x82
+#define M5602_XB_CPR_CLK_DIVISION 0x83
+#define M5602_XB_MCU_CLK_CONTROL 0x84
+#define M5602_XB_MCU_CLK_DIVISION 0x85
+#define M5602_XB_DCT_CLK_CONTROL 0x86
+#define M5602_XB_DCT_CLK_DIVISION 0x87
+#define M5602_XB_EC_CLK_CONTROL 0x88
+#define M5602_XB_EC_CLK_DIVISION 0x89
+#define M5602_XB_LBUF_CLK_CONTROL 0x8a
+#define M5602_XB_LBUF_CLK_DIVISION 0x8b
#define I2C_BUSY 0x80
@@ -128,10 +148,10 @@ struct sd {
};
int m5602_read_bridge(
- struct sd *sd, u8 address, u8 *i2c_data);
+ struct sd *sd, const u8 address, u8 *i2c_data);
int m5602_write_bridge(
- struct sd *sd, u8 address, u8 i2c_data);
+ struct sd *sd, const u8 address, const u8 i2c_data);
int m5602_write_sensor(struct sd *sd, const u8 address,
u8 *i2c_data, const u8 len);
diff --git a/drivers/media/video/gspca/m5602/m5602_core.c b/drivers/media/video/gspca/m5602/m5602_core.c
index 1aac2985fee..8a5bba16ff3 100644
--- a/drivers/media/video/gspca/m5602/m5602_core.c
+++ b/drivers/media/video/gspca/m5602/m5602_core.c
@@ -17,6 +17,7 @@
*/
#include "m5602_ov9650.h"
+#include "m5602_ov7660.h"
#include "m5602_mt9m111.h"
#include "m5602_po1030.h"
#include "m5602_s5k83a.h"
@@ -35,7 +36,7 @@ static const __devinitdata struct usb_device_id m5602_table[] = {
MODULE_DEVICE_TABLE(usb, m5602_table);
/* Reads a byte from the m5602 */
-int m5602_read_bridge(struct sd *sd, u8 address, u8 *i2c_data)
+int m5602_read_bridge(struct sd *sd, const u8 address, u8 *i2c_data)
{
int err;
struct usb_device *udev = sd->gspca_dev.dev;
@@ -56,7 +57,7 @@ int m5602_read_bridge(struct sd *sd, u8 address, u8 *i2c_data)
}
/* Writes a byte to to the m5602 */
-int m5602_write_bridge(struct sd *sd, u8 address, u8 i2c_data)
+int m5602_write_bridge(struct sd *sd, const u8 address, const u8 i2c_data)
{
int err;
struct usb_device *udev = sd->gspca_dev.dev;
@@ -80,6 +81,17 @@ int m5602_write_bridge(struct sd *sd, u8 address, u8 i2c_data)
return (err < 0) ? err : 0;
}
+int m5602_wait_for_i2c(struct sd *sd)
+{
+ int err;
+ u8 data;
+
+ do {
+ err = m5602_read_bridge(sd, M5602_XB_I2C_STATUS, &data);
+ } while ((data & I2C_BUSY) && !err);
+ return err;
+}
+
int m5602_read_sensor(struct sd *sd, const u8 address,
u8 *i2c_data, const u8 len)
{
@@ -88,9 +100,7 @@ int m5602_read_sensor(struct sd *sd, const u8 address,
if (!len || len > sd->sensor->i2c_regW)
return -EINVAL;
- do {
- err = m5602_read_bridge(sd, M5602_XB_I2C_STATUS, i2c_data);
- } while ((*i2c_data & I2C_BUSY) && !err);
+ err = m5602_wait_for_i2c(sd);
if (err < 0)
return err;
@@ -103,21 +113,25 @@ int m5602_read_sensor(struct sd *sd, const u8 address,
if (err < 0)
return err;
+ /* Sensors with registers that are of only
+ one byte width are differently read */
+
+ /* FIXME: This works with the ov9650, but has issues with the po1030 */
if (sd->sensor->i2c_regW == 1) {
- err = m5602_write_bridge(sd, M5602_XB_I2C_CTRL, len);
+ err = m5602_write_bridge(sd, M5602_XB_I2C_CTRL, 1);
if (err < 0)
return err;
err = m5602_write_bridge(sd, M5602_XB_I2C_CTRL, 0x08);
- if (err < 0)
- return err;
} else {
err = m5602_write_bridge(sd, M5602_XB_I2C_CTRL, 0x18 + len);
- if (err < 0)
- return err;
}
for (i = 0; (i < len) && !err; i++) {
+ err = m5602_wait_for_i2c(sd);
+ if (err < 0)
+ return err;
+
err = m5602_read_bridge(sd, M5602_XB_I2C_DATA, &(i2c_data[i]));
PDEBUG(D_CONF, "Reading sensor register "
@@ -206,6 +220,11 @@ static int m5602_probe_sensor(struct sd *sd)
if (!sd->sensor->probe(sd))
return 0;
+ /* Try the ov7660 */
+ sd->sensor = &ov7660;
+ if (!sd->sensor->probe(sd))
+ return 0;
+
/* Try the s5k83a */
sd->sensor = &s5k83a;
if (!sd->sensor->probe(sd))
@@ -409,8 +428,9 @@ MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
module_param(force_sensor, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(force_sensor,
- "force detection of sensor, "
- "1 = OV9650, 2 = S5K83A, 3 = S5K4AA, 4 = MT9M111, 5 = PO1030");
+ "forces detection of a sensor, "
+ "1 = OV9650, 2 = S5K83A, 3 = S5K4AA, "
+ "4 = MT9M111, 5 = PO1030, 6 = OV7660");
module_param(dump_bridge, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dump_bridge, "Dumps all usb bridge registers at startup");
diff --git a/drivers/media/video/gspca/m5602/m5602_mt9m111.c b/drivers/media/video/gspca/m5602/m5602_mt9m111.c
index 7d3f9e348ef..8d071dff694 100644
--- a/drivers/media/video/gspca/m5602/m5602_mt9m111.c
+++ b/drivers/media/video/gspca/m5602/m5602_mt9m111.c
@@ -18,6 +18,23 @@
#include "m5602_mt9m111.h"
+static int mt9m111_set_vflip(struct gspca_dev *gspca_dev, __s32 val);
+static int mt9m111_get_vflip(struct gspca_dev *gspca_dev, __s32 *val);
+static int mt9m111_get_hflip(struct gspca_dev *gspca_dev, __s32 *val);
+static int mt9m111_set_hflip(struct gspca_dev *gspca_dev, __s32 val);
+static int mt9m111_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
+static int mt9m111_set_gain(struct gspca_dev *gspca_dev, __s32 val);
+static int mt9m111_set_auto_white_balance(struct gspca_dev *gspca_dev,
+ __s32 val);
+static int mt9m111_get_auto_white_balance(struct gspca_dev *gspca_dev,
+ __s32 *val);
+static int mt9m111_get_green_balance(struct gspca_dev *gspca_dev, __s32 *val);
+static int mt9m111_set_green_balance(struct gspca_dev *gspca_dev, __s32 val);
+static int mt9m111_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val);
+static int mt9m111_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val);
+static int mt9m111_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val);
+static int mt9m111_set_red_balance(struct gspca_dev *gspca_dev, __s32 val);
+
static struct v4l2_pix_format mt9m111_modes[] = {
{
640,
@@ -32,6 +49,7 @@ static struct v4l2_pix_format mt9m111_modes[] = {
};
const static struct ctrl mt9m111_ctrls[] = {
+#define VFLIP_IDX 0
{
{
.id = V4L2_CID_VFLIP,
@@ -44,7 +62,9 @@ const static struct ctrl mt9m111_ctrls[] = {
},
.set = mt9m111_set_vflip,
.get = mt9m111_get_vflip
- }, {
+ },
+#define HFLIP_IDX 1
+ {
{
.id = V4L2_CID_HFLIP,
.type = V4L2_CTRL_TYPE_BOOLEAN,
@@ -56,7 +76,9 @@ const static struct ctrl mt9m111_ctrls[] = {
},
.set = mt9m111_set_hflip,
.get = mt9m111_get_hflip
- }, {
+ },
+#define GAIN_IDX 2
+ {
{
.id = V4L2_CID_GAIN,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -64,21 +86,80 @@ const static struct ctrl mt9m111_ctrls[] = {
.minimum = 0,
.maximum = (INITIAL_MAX_GAIN - 1) * 2 * 2 * 2,
.step = 1,
- .default_value = DEFAULT_GAIN,
+ .default_value = MT9M111_DEFAULT_GAIN,
.flags = V4L2_CTRL_FLAG_SLIDER
},
.set = mt9m111_set_gain,
.get = mt9m111_get_gain
- }
+ },
+#define AUTO_WHITE_BALANCE_IDX 3
+ {
+ {
+ .id = V4L2_CID_AUTO_WHITE_BALANCE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "auto white balance",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ .set = mt9m111_set_auto_white_balance,
+ .get = mt9m111_get_auto_white_balance
+ },
+#define GREEN_BALANCE_IDX 4
+ {
+ {
+ .id = M5602_V4L2_CID_GREEN_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "green balance",
+ .minimum = 0x00,
+ .maximum = 0x7ff,
+ .step = 0x1,
+ .default_value = MT9M111_GREEN_GAIN_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER
+ },
+ .set = mt9m111_set_green_balance,
+ .get = mt9m111_get_green_balance
+ },
+#define BLUE_BALANCE_IDX 5
+ {
+ {
+ .id = V4L2_CID_BLUE_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "blue balance",
+ .minimum = 0x00,
+ .maximum = 0x7ff,
+ .step = 0x1,
+ .default_value = MT9M111_BLUE_GAIN_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER
+ },
+ .set = mt9m111_set_blue_balance,
+ .get = mt9m111_get_blue_balance
+ },
+#define RED_BALANCE_IDX 5
+ {
+ {
+ .id = V4L2_CID_RED_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "red balance",
+ .minimum = 0x00,
+ .maximum = 0x7ff,
+ .step = 0x1,
+ .default_value = MT9M111_RED_GAIN_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER
+ },
+ .set = mt9m111_set_red_balance,
+ .get = mt9m111_get_red_balance
+ },
};
-
static void mt9m111_dump_registers(struct sd *sd);
int mt9m111_probe(struct sd *sd)
{
u8 data[2] = {0x00, 0x00};
int i;
+ s32 *sensor_settings;
if (force_sensor) {
if (force_sensor == MT9M111_SENSOR) {
@@ -117,16 +198,27 @@ int mt9m111_probe(struct sd *sd)
return -ENODEV;
sensor_found:
+ sensor_settings = kmalloc(ARRAY_SIZE(mt9m111_ctrls) * sizeof(s32),
+ GFP_KERNEL);
+ if (!sensor_settings)
+ return -ENOMEM;
+
sd->gspca_dev.cam.cam_mode = mt9m111_modes;
sd->gspca_dev.cam.nmodes = ARRAY_SIZE(mt9m111_modes);
sd->desc->ctrls = mt9m111_ctrls;
sd->desc->nctrls = ARRAY_SIZE(mt9m111_ctrls);
+
+ for (i = 0; i < ARRAY_SIZE(mt9m111_ctrls); i++)
+ sensor_settings[i] = mt9m111_ctrls[i].qctrl.default_value;
+ sd->sensor_priv = sensor_settings;
+
return 0;
}
int mt9m111_init(struct sd *sd)
{
int i, err = 0;
+ s32 *sensor_settings = sd->sensor_priv;
/* Init the sensor */
for (i = 0; i < ARRAY_SIZE(init_mt9m111) && !err; i++) {
@@ -147,36 +239,154 @@ int mt9m111_init(struct sd *sd)
if (dump_sensor)
mt9m111_dump_registers(sd);
- return (err < 0) ? err : 0;
+ err = mt9m111_set_vflip(&sd->gspca_dev, sensor_settings[VFLIP_IDX]);
+ if (err < 0)
+ return err;
+
+ err = mt9m111_set_hflip(&sd->gspca_dev, sensor_settings[HFLIP_IDX]);
+ if (err < 0)
+ return err;
+
+ err = mt9m111_set_green_balance(&sd->gspca_dev,
+ sensor_settings[GREEN_BALANCE_IDX]);
+ if (err < 0)
+ return err;
+
+ err = mt9m111_set_blue_balance(&sd->gspca_dev,
+ sensor_settings[BLUE_BALANCE_IDX]);
+ if (err < 0)
+ return err;
+
+ err = mt9m111_set_red_balance(&sd->gspca_dev,
+ sensor_settings[RED_BALANCE_IDX]);
+ if (err < 0)
+ return err;
+
+ return mt9m111_set_gain(&sd->gspca_dev, sensor_settings[GAIN_IDX]);
}
-int mt9m111_power_down(struct sd *sd)
+int mt9m111_start(struct sd *sd)
{
- return 0;
+ int i, err = 0;
+ u8 data[2];
+ struct cam *cam = &sd->gspca_dev.cam;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ int width = cam->cam_mode[sd->gspca_dev.curr_mode].width - 1;
+ int height = cam->cam_mode[sd->gspca_dev.curr_mode].height;
+
+ for (i = 0; i < ARRAY_SIZE(start_mt9m111) && !err; i++) {
+ if (start_mt9m111[i][0] == BRIDGE) {
+ err = m5602_write_bridge(sd,
+ start_mt9m111[i][1],
+ start_mt9m111[i][2]);
+ } else {
+ data[0] = start_mt9m111[i][2];
+ data[1] = start_mt9m111[i][3];
+ err = m5602_write_sensor(sd,
+ start_mt9m111[i][1], data, 2);
+ }
+ }
+ if (err < 0)
+ return err;
+
+ err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, (height >> 8) & 0xff);
+ if (err < 0)
+ return err;
+
+ err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, (height & 0xff));
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < 2 && !err; i++)
+ err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, 0);
+ if (err < 0)
+ return err;
+
+ err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 0);
+ if (err < 0)
+ return err;
+
+ err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 2);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < 2 && !err; i++)
+ err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA, 0);
+ if (err < 0)
+ return err;
+
+ err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA,
+ (width >> 8) & 0xff);
+ if (err < 0)
+ return err;
+
+ err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA, width & 0xff);
+ if (err < 0)
+ return err;
+
+ err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 0);
+ if (err < 0)
+ return err;
+
+ switch (width) {
+ case 640:
+ PDEBUG(D_V4L2, "Configuring camera for VGA mode");
+ data[0] = MT9M111_RMB_OVER_SIZED;
+ data[1] = MT9M111_RMB_ROW_SKIP_2X |
+ MT9M111_RMB_COLUMN_SKIP_2X |
+ (sensor_settings[VFLIP_IDX] << 0) |
+ (sensor_settings[HFLIP_IDX] << 1);
+
+ err = m5602_write_sensor(sd,
+ MT9M111_SC_R_MODE_CONTEXT_B, data, 2);
+ break;
+
+ case 320:
+ PDEBUG(D_V4L2, "Configuring camera for QVGA mode");
+ data[0] = MT9M111_RMB_OVER_SIZED;
+ data[1] = MT9M111_RMB_ROW_SKIP_4X |
+ MT9M111_RMB_COLUMN_SKIP_4X |
+ (sensor_settings[VFLIP_IDX] << 0) |
+ (sensor_settings[HFLIP_IDX] << 1);
+ err = m5602_write_sensor(sd,
+ MT9M111_SC_R_MODE_CONTEXT_B, data, 2);
+ break;
+ }
+ return err;
}
-int mt9m111_get_vflip(struct gspca_dev *gspca_dev, __s32 *val)
+void mt9m111_disconnect(struct sd *sd)
+{
+ sd->sensor = NULL;
+ kfree(sd->sensor_priv);
+}
+
+static int mt9m111_get_vflip(struct gspca_dev *gspca_dev, __s32 *val)
{
- int err;
- u8 data[2] = {0x00, 0x00};
struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
- err = m5602_read_sensor(sd, MT9M111_SC_R_MODE_CONTEXT_B,
- data, 2);
- *val = data[0] & MT9M111_RMB_MIRROR_ROWS;
+ *val = sensor_settings[VFLIP_IDX];
PDEBUG(D_V4L2, "Read vertical flip %d", *val);
- return err;
+ return 0;
}
-int mt9m111_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
+static int mt9m111_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
u8 data[2] = {0x00, 0x00};
struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
PDEBUG(D_V4L2, "Set vertical flip to %d", val);
+ sensor_settings[VFLIP_IDX] = val;
+
+ /* The mt9m111 is flipped by default */
+ val = !val;
+
/* Set the correct page map */
err = m5602_write_sensor(sd, MT9M111_PAGE_MAP, data, 2);
if (err < 0)
@@ -186,34 +396,37 @@ int mt9m111_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
if (err < 0)
return err;
- data[0] = (data[0] & 0xfe) | val;
+ data[1] = (data[1] & 0xfe) | val;
err = m5602_write_sensor(sd, MT9M111_SC_R_MODE_CONTEXT_B,
data, 2);
return err;
}
-int mt9m111_get_hflip(struct gspca_dev *gspca_dev, __s32 *val)
+static int mt9m111_get_hflip(struct gspca_dev *gspca_dev, __s32 *val)
{
- int err;
- u8 data[2] = {0x00, 0x00};
struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
- err = m5602_read_sensor(sd, MT9M111_SC_R_MODE_CONTEXT_B,
- data, 2);
- *val = data[0] & MT9M111_RMB_MIRROR_COLS;
+ *val = sensor_settings[HFLIP_IDX];
PDEBUG(D_V4L2, "Read horizontal flip %d", *val);
- return err;
+ return 0;
}
-int mt9m111_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
+static int mt9m111_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
u8 data[2] = {0x00, 0x00};
struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
PDEBUG(D_V4L2, "Set horizontal flip to %d", val);
+ sensor_settings[HFLIP_IDX] = val;
+
+ /* The mt9m111 is flipped by default */
+ val = !val;
+
/* Set the correct page map */
err = m5602_write_sensor(sd, MT9M111_PAGE_MAP, data, 2);
if (err < 0)
@@ -223,36 +436,62 @@ int mt9m111_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
if (err < 0)
return err;
- data[0] = (data[0] & 0xfd) | ((val << 1) & 0x02);
+ data[1] = (data[1] & 0xfd) | ((val << 1) & 0x02);
err = m5602_write_sensor(sd, MT9M111_SC_R_MODE_CONTEXT_B,
data, 2);
return err;
}
-int mt9m111_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
+static int mt9m111_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
{
- int err, tmp;
- u8 data[2] = {0x00, 0x00};
struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
- err = m5602_read_sensor(sd, MT9M111_SC_GLOBAL_GAIN, data, 2);
- tmp = ((data[1] << 8) | data[0]);
+ *val = sensor_settings[GAIN_IDX];
+ PDEBUG(D_V4L2, "Read gain %d", *val);
- *val = ((tmp & (1 << 10)) * 2) |
- ((tmp & (1 << 9)) * 2) |
- ((tmp & (1 << 8)) * 2) |
- (tmp & 0x7f);
+ return 0;
+}
- PDEBUG(D_V4L2, "Read gain %d", *val);
+static int mt9m111_set_auto_white_balance(struct gspca_dev *gspca_dev,
+ __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+ int err;
+ u8 data[2];
+
+ err = m5602_read_sensor(sd, MT9M111_CP_OPERATING_MODE_CTL, data, 2);
+ if (err < 0)
+ return err;
+
+ sensor_settings[AUTO_WHITE_BALANCE_IDX] = val & 0x01;
+ data[1] = ((data[1] & 0xfd) | ((val & 0x01) << 1));
+ err = m5602_write_sensor(sd, MT9M111_CP_OPERATING_MODE_CTL, data, 2);
+
+ PDEBUG(D_V4L2, "Set auto white balance %d", val);
return err;
}
-int mt9m111_set_gain(struct gspca_dev *gspca_dev, __s32 val)
+static int mt9m111_get_auto_white_balance(struct gspca_dev *gspca_dev,
+ __s32 *val) {
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ *val = sensor_settings[AUTO_WHITE_BALANCE_IDX];
+ PDEBUG(D_V4L2, "Read auto white balance %d", *val);
+ return 0;
+}
+
+static int mt9m111_set_gain(struct gspca_dev *gspca_dev, __s32 val)
{
int err, tmp;
u8 data[2] = {0x00, 0x00};
struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ sensor_settings[GAIN_IDX] = val;
/* Set the correct page map */
err = m5602_write_sensor(sd, MT9M111_PAGE_MAP, data, 2);
@@ -275,8 +514,8 @@ int mt9m111_set_gain(struct gspca_dev *gspca_dev, __s32 val)
else
tmp = val;
- data[1] = (tmp & 0xff00) >> 8;
- data[0] = (tmp & 0xff);
+ data[1] = (tmp & 0xff);
+ data[0] = (tmp & 0xff00) >> 8;
PDEBUG(D_V4L2, "tmp=%d, data[1]=%d, data[0]=%d", tmp,
data[1], data[0]);
@@ -286,6 +525,89 @@ int mt9m111_set_gain(struct gspca_dev *gspca_dev, __s32 val)
return err;
}
+static int mt9m111_set_green_balance(struct gspca_dev *gspca_dev, __s32 val)
+{
+ int err;
+ u8 data[2];
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ sensor_settings[GREEN_BALANCE_IDX] = val;
+ data[1] = (val & 0xff);
+ data[0] = (val & 0xff00) >> 8;
+
+ PDEBUG(D_V4L2, "Set green balance %d", val);
+ err = m5602_write_sensor(sd, MT9M111_SC_GREEN_1_GAIN,
+ data, 2);
+ if (err < 0)
+ return err;
+
+ return m5602_write_sensor(sd, MT9M111_SC_GREEN_2_GAIN,
+ data, 2);
+}
+
+static int mt9m111_get_green_balance(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ *val = sensor_settings[GREEN_BALANCE_IDX];
+ PDEBUG(D_V4L2, "Read green balance %d", *val);
+ return 0;
+}
+
+static int mt9m111_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val)
+{
+ u8 data[2];
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ sensor_settings[BLUE_BALANCE_IDX] = val;
+ data[1] = (val & 0xff);
+ data[0] = (val & 0xff00) >> 8;
+
+ PDEBUG(D_V4L2, "Set blue balance %d", val);
+
+ return m5602_write_sensor(sd, MT9M111_SC_BLUE_GAIN,
+ data, 2);
+}
+
+static int mt9m111_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ *val = sensor_settings[BLUE_BALANCE_IDX];
+ PDEBUG(D_V4L2, "Read blue balance %d", *val);
+ return 0;
+}
+
+static int mt9m111_set_red_balance(struct gspca_dev *gspca_dev, __s32 val)
+{
+ u8 data[2];
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ sensor_settings[RED_BALANCE_IDX] = val;
+ data[1] = (val & 0xff);
+ data[0] = (val & 0xff00) >> 8;
+
+ PDEBUG(D_V4L2, "Set red balance %d", val);
+
+ return m5602_write_sensor(sd, MT9M111_SC_RED_GAIN,
+ data, 2);
+}
+
+static int mt9m111_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ *val = sensor_settings[RED_BALANCE_IDX];
+ PDEBUG(D_V4L2, "Read red balance %d", *val);
+ return 0;
+}
+
static void mt9m111_dump_registers(struct sd *sd)
{
u8 address, value[2] = {0x00, 0x00};
diff --git a/drivers/media/video/gspca/m5602/m5602_mt9m111.h b/drivers/media/video/gspca/m5602/m5602_mt9m111.h
index 00c6db02bdb..b3de7782309 100644
--- a/drivers/media/video/gspca/m5602/m5602_mt9m111.h
+++ b/drivers/media/video/gspca/m5602/m5602_mt9m111.h
@@ -37,7 +37,6 @@
#define MT9M111_SC_VBLANK_CONTEXT_A 0x08
#define MT9M111_SC_SHUTTER_WIDTH 0x09
#define MT9M111_SC_ROW_SPEED 0x0a
-
#define MT9M111_SC_EXTRA_DELAY 0x0b
#define MT9M111_SC_SHUTTER_DELAY 0x0c
#define MT9M111_SC_RESET 0x0d
@@ -50,9 +49,6 @@
#define MT9M111_SC_GREEN_2_GAIN 0x2e
#define MT9M111_SC_GLOBAL_GAIN 0x2f
-#define MT9M111_RMB_MIRROR_ROWS (1 << 0)
-#define MT9M111_RMB_MIRROR_COLS (1 << 1)
-
#define MT9M111_CONTEXT_CONTROL 0xc8
#define MT9M111_PAGE_MAP 0xf0
#define MT9M111_BYTEWISE_ADDRESS 0xf1
@@ -74,8 +70,37 @@
#define MT9M111_COLORPIPE 0x01
#define MT9M111_CAMERA_CONTROL 0x02
+#define MT9M111_RESET (1 << 0)
+#define MT9M111_RESTART (1 << 1)
+#define MT9M111_ANALOG_STANDBY (1 << 2)
+#define MT9M111_CHIP_ENABLE (1 << 3)
+#define MT9M111_CHIP_DISABLE (0 << 3)
+#define MT9M111_OUTPUT_DISABLE (1 << 4)
+#define MT9M111_SHOW_BAD_FRAMES (1 << 0)
+#define MT9M111_RESTART_BAD_FRAMES (1 << 1)
+#define MT9M111_SYNCHRONIZE_CHANGES (1 << 7)
+
+#define MT9M111_RMB_OVER_SIZED (1 << 0)
+#define MT9M111_RMB_MIRROR_ROWS (1 << 0)
+#define MT9M111_RMB_MIRROR_COLS (1 << 1)
+#define MT9M111_RMB_ROW_SKIP_2X (1 << 2)
+#define MT9M111_RMB_COLUMN_SKIP_2X (1 << 3)
+#define MT9M111_RMB_ROW_SKIP_4X (1 << 4)
+#define MT9M111_RMB_COLUMN_SKIP_4X (1 << 5)
+
+#define MT9M111_COLOR_MATRIX_BYPASS (1 << 4)
+#define MT9M111_SEL_CONTEXT_B (1 << 3)
+
+#define MT9M111_TRISTATE_PIN_IN_STANDBY (1 << 1)
+#define MT9M111_SOC_SOFT_STANDBY (1 << 0)
+
+#define MT9M111_2D_DEFECT_CORRECTION_ENABLE (1 << 0)
+
#define INITIAL_MAX_GAIN 64
-#define DEFAULT_GAIN 283
+#define MT9M111_DEFAULT_GAIN 283
+#define MT9M111_GREEN_GAIN_DEFAULT 0x20
+#define MT9M111_BLUE_GAIN_DEFAULT 0x20
+#define MT9M111_RED_GAIN_DEFAULT 0x20
/*****************************************************************************/
@@ -85,16 +110,10 @@ extern int dump_sensor;
int mt9m111_probe(struct sd *sd);
int mt9m111_init(struct sd *sd);
-int mt9m111_power_down(struct sd *sd);
+int mt9m111_start(struct sd *sd);
+void mt9m111_disconnect(struct sd *sd);
-int mt9m111_set_vflip(struct gspca_dev *gspca_dev, __s32 val);
-int mt9m111_get_vflip(struct gspca_dev *gspca_dev, __s32 *val);
-int mt9m111_get_hflip(struct gspca_dev *gspca_dev, __s32 *val);
-int mt9m111_set_hflip(struct gspca_dev *gspca_dev, __s32 val);
-int mt9m111_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
-int mt9m111_set_gain(struct gspca_dev *gspca_dev, __s32 val);
-
-const static struct m5602_sensor mt9m111 = {
+static const struct m5602_sensor mt9m111 = {
.name = "MT9M111",
.i2c_slave_id = 0xba,
@@ -102,7 +121,8 @@ const static struct m5602_sensor mt9m111 = {
.probe = mt9m111_probe,
.init = mt9m111_init,
- .power_down = mt9m111_power_down
+ .disconnect = mt9m111_disconnect,
+ .start = mt9m111_start,
};
static const unsigned char preinit_mt9m111[][4] =
@@ -117,7 +137,14 @@ static const unsigned char preinit_mt9m111[][4] =
{BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
{SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
- {SENSOR, MT9M111_SC_RESET, 0xff, 0xf7},
+ {SENSOR, MT9M111_SC_RESET,
+ MT9M111_RESET |
+ MT9M111_RESTART |
+ MT9M111_ANALOG_STANDBY |
+ MT9M111_CHIP_DISABLE,
+ MT9M111_SHOW_BAD_FRAMES |
+ MT9M111_RESTART_BAD_FRAMES |
+ MT9M111_SYNCHRONIZE_CHANGES},
{BRIDGE, M5602_XB_GPIO_DIR, 0x05, 0x00},
{BRIDGE, M5602_XB_GPIO_DAT, 0x04, 0x00},
@@ -145,731 +172,42 @@ static const unsigned char init_mt9m111[][4] =
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
{BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0d, 0x00},
- {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00, 0x00},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
{BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
-
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
- {SENSOR, MT9M111_SC_RESET, 0xff, 0xff},
- {SENSOR, MT9M111_SC_RESET, 0xff, 0xff},
- {SENSOR, MT9M111_SC_RESET, 0xff, 0xde},
- {SENSOR, MT9M111_SC_RESET, 0xff, 0xff},
- {SENSOR, MT9M111_SC_RESET, 0xff, 0xf7},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x01},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0xb3, 0x00},
-
- {SENSOR, MT9M111_CP_GLOBAL_CLK_CONTROL, 0xff, 0xff},
-
- {BRIDGE, M5602_XB_GPIO_DIR, 0x05, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x04, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x3e, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR_H, 0x3e, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT_H, 0x02, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_L, 0xff, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR_L, 0xff, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT_L, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x04, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x07, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x0b, 0x00},
{BRIDGE, M5602_XB_GPIO_EN_H, 0x06, 0x00},
{BRIDGE, M5602_XB_GPIO_EN_L, 0x00, 0x00},
- {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x0a, 0x00},
-
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x05},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x29},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x08},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x01},
- {SENSOR, MT9M111_CP_OPERATING_MODE_CTL, 0x00, 0x10},
- {SENSOR, MT9M111_CP_LENS_CORRECTION_1, 0x04, 0x2a},
- {SENSOR, MT9M111_CP_DEFECT_CORR_CONTEXT_A, 0x00, 0x01},
- {SENSOR, MT9M111_CP_DEFECT_CORR_CONTEXT_B, 0x00, 0x01},
- {SENSOR, MT9M111_CP_LUMA_OFFSET, 0x00, 0x00},
- {SENSOR, MT9M111_CP_LUMA_CLIP, 0xff, 0x00},
- {SENSOR, MT9M111_CP_OUTPUT_FORMAT_CTL2_CONTEXT_A, 0x14, 0x00},
- {SENSOR, MT9M111_CP_OUTPUT_FORMAT_CTL2_CONTEXT_B, 0x14, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0xcd, 0x00},
-
- {SENSOR, 0xcd, 0x00, 0x0e},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0xd0, 0x00},
- {SENSOR, 0xd0, 0x00, 0x40},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x02},
- {SENSOR, MT9M111_CC_AUTO_EXPOSURE_PARAMETER_18, 0x00, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x28, 0x00},
- {SENSOR, MT9M111_CC_AWB_PARAMETER_7, 0xef, 0x07},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x28, 0x00},
- {SENSOR, MT9M111_CC_AWB_PARAMETER_7, 0xef, 0x03},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x33, 0x00},
- {SENSOR, 0x33, 0x03, 0x49},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x33, 0x00},
-
- {SENSOR, 0x33, 0x03, 0x49},
- {SENSOR, 0x34, 0xc0, 0x19},
- {SENSOR, 0x3f, 0x20, 0x20},
- {SENSOR, 0x40, 0x20, 0x20},
- {SENSOR, 0x5a, 0xc0, 0x0a},
- {SENSOR, 0x70, 0x7b, 0x0a},
- {SENSOR, 0x71, 0xff, 0x00},
- {SENSOR, 0x72, 0x19, 0x0e},
- {SENSOR, 0x73, 0x18, 0x0f},
- {SENSOR, 0x74, 0x57, 0x32},
- {SENSOR, 0x75, 0x56, 0x34},
- {SENSOR, 0x76, 0x73, 0x35},
- {SENSOR, 0x77, 0x30, 0x12},
- {SENSOR, 0x78, 0x79, 0x02},
- {SENSOR, 0x79, 0x75, 0x06},
- {SENSOR, 0x7a, 0x77, 0x0a},
- {SENSOR, 0x7b, 0x78, 0x09},
- {SENSOR, 0x7c, 0x7d, 0x06},
- {SENSOR, 0x7d, 0x31, 0x10},
- {SENSOR, 0x7e, 0x00, 0x7e},
- {SENSOR, 0x80, 0x59, 0x04},
- {SENSOR, 0x81, 0x59, 0x04},
- {SENSOR, 0x82, 0x57, 0x0a},
- {SENSOR, 0x83, 0x58, 0x0b},
- {SENSOR, 0x84, 0x47, 0x0c},
- {SENSOR, 0x85, 0x48, 0x0e},
- {SENSOR, 0x86, 0x5b, 0x02},
- {SENSOR, 0x87, 0x00, 0x5c},
- {SENSOR, MT9M111_CONTEXT_CONTROL, 0x00, 0x08},
- {SENSOR, 0x60, 0x00, 0x80},
- {SENSOR, 0x61, 0x00, 0x00},
- {SENSOR, 0x62, 0x00, 0x00},
- {SENSOR, 0x63, 0x00, 0x00},
- {SENSOR, 0x64, 0x00, 0x00},
-
- {SENSOR, MT9M111_SC_ROWSTART, 0x00, 0x0d},
- {SENSOR, MT9M111_SC_COLSTART, 0x00, 0x18},
- {SENSOR, MT9M111_SC_WINDOW_HEIGHT, 0x04, 0x04},
- {SENSOR, MT9M111_SC_WINDOW_WIDTH, 0x05, 0x08},
- {SENSOR, MT9M111_SC_HBLANK_CONTEXT_B, 0x01, 0x38},
- {SENSOR, MT9M111_SC_VBLANK_CONTEXT_B, 0x00, 0x11},
- {SENSOR, MT9M111_SC_HBLANK_CONTEXT_A, 0x01, 0x38},
- {SENSOR, MT9M111_SC_VBLANK_CONTEXT_A, 0x00, 0x11},
- {SENSOR, MT9M111_SC_R_MODE_CONTEXT_B, 0x01, 0x03},
- {SENSOR, MT9M111_SC_R_MODE_CONTEXT_A, 0x01, 0x03},
- {SENSOR, 0x30, 0x04, 0x00},
-
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
- {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81, 0x00},
- {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x01, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x04, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x04, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x02, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x05, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x07, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xa0, 0x00},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
- {SENSOR, MT9M111_SC_SHUTTER_WIDTH, 0x01, 0xf4},
- {SENSOR, MT9M111_SC_GLOBAL_GAIN, 0x00, 0xea},
-
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
- {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81, 0x00},
- {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x01, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x04, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x04, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x02, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x05, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x07, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
-
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x09},
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x29},
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x08},
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x0c},
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x04},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x01},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0xb3, 0x00},
- {SENSOR, MT9M111_CP_GLOBAL_CLK_CONTROL, 0x00, 0x03},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x05, 0x00},
{BRIDGE, M5602_XB_GPIO_DAT, 0x04, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x3e, 0x00},
{BRIDGE, M5602_XB_GPIO_DIR_H, 0x3e, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT_H, 0x02, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_L, 0xff, 0x00},
{BRIDGE, M5602_XB_GPIO_DIR_L, 0xff, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT_L, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x04, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x07, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x0b, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x06, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_L, 0x00, 0x00},
- {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x0a, 0x00},
-
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x05},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x29},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x08},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x01},
- {SENSOR, MT9M111_CP_OPERATING_MODE_CTL, 0x00, 0x10},
- {SENSOR, MT9M111_CP_LENS_CORRECTION_1, 0x04, 0x2a},
- {SENSOR, MT9M111_CP_DEFECT_CORR_CONTEXT_A, 0x00, 0x01},
- {SENSOR, MT9M111_CP_DEFECT_CORR_CONTEXT_B, 0x00, 0x01},
- {SENSOR, MT9M111_CP_LUMA_OFFSET, 0x00, 0x00},
- {SENSOR, MT9M111_CP_LUMA_CLIP, 0xff, 0x00},
- {SENSOR, MT9M111_CP_OUTPUT_FORMAT_CTL2_CONTEXT_A, 0x14, 0x00},
- {SENSOR, MT9M111_CP_OUTPUT_FORMAT_CTL2_CONTEXT_B, 0x14, 0x00},
-
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0xcd, 0x00},
- {SENSOR, 0xcd, 0x00, 0x0e},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0xd0, 0x00},
- {SENSOR, 0xd0, 0x00, 0x40},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x02},
- {SENSOR, MT9M111_CC_AUTO_EXPOSURE_PARAMETER_18, 0x00, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x28, 0x00},
- {SENSOR, MT9M111_CC_AWB_PARAMETER_7, 0xef, 0x07},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x28, 0x00},
- {SENSOR, MT9M111_CC_AWB_PARAMETER_7, 0xef, 0x03},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x33, 0x00},
- {SENSOR, 0x33, 0x03, 0x49},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x33, 0x00},
-
- {SENSOR, 0x33, 0x03, 0x49},
- {SENSOR, 0x34, 0xc0, 0x19},
- {SENSOR, 0x3f, 0x20, 0x20},
- {SENSOR, 0x40, 0x20, 0x20},
- {SENSOR, 0x5a, 0xc0, 0x0a},
- {SENSOR, 0x70, 0x7b, 0x0a},
- {SENSOR, 0x71, 0xff, 0x00},
- {SENSOR, 0x72, 0x19, 0x0e},
- {SENSOR, 0x73, 0x18, 0x0f},
- {SENSOR, 0x74, 0x57, 0x32},
- {SENSOR, 0x75, 0x56, 0x34},
- {SENSOR, 0x76, 0x73, 0x35},
- {SENSOR, 0x77, 0x30, 0x12},
- {SENSOR, 0x78, 0x79, 0x02},
- {SENSOR, 0x79, 0x75, 0x06},
- {SENSOR, 0x7a, 0x77, 0x0a},
- {SENSOR, 0x7b, 0x78, 0x09},
- {SENSOR, 0x7c, 0x7d, 0x06},
- {SENSOR, 0x7d, 0x31, 0x10},
- {SENSOR, 0x7e, 0x00, 0x7e},
- {SENSOR, 0x80, 0x59, 0x04},
- {SENSOR, 0x81, 0x59, 0x04},
- {SENSOR, 0x82, 0x57, 0x0a},
- {SENSOR, 0x83, 0x58, 0x0b},
- {SENSOR, 0x84, 0x47, 0x0c},
- {SENSOR, 0x85, 0x48, 0x0e},
- {SENSOR, 0x86, 0x5b, 0x02},
- {SENSOR, 0x87, 0x00, 0x5c},
- {SENSOR, MT9M111_CONTEXT_CONTROL, 0x00, 0x08},
- {SENSOR, 0x60, 0x00, 0x80},
- {SENSOR, 0x61, 0x00, 0x00},
- {SENSOR, 0x62, 0x00, 0x00},
- {SENSOR, 0x63, 0x00, 0x00},
- {SENSOR, 0x64, 0x00, 0x00},
-
- {SENSOR, MT9M111_SC_ROWSTART, 0x00, 0x0d},
- {SENSOR, MT9M111_SC_COLSTART, 0x00, 0x18},
- {SENSOR, MT9M111_SC_WINDOW_HEIGHT, 0x04, 0x04},
- {SENSOR, MT9M111_SC_WINDOW_WIDTH, 0x05, 0x08},
- {SENSOR, MT9M111_SC_HBLANK_CONTEXT_B, 0x01, 0x38},
- {SENSOR, MT9M111_SC_VBLANK_CONTEXT_B, 0x00, 0x11},
- {SENSOR, MT9M111_SC_HBLANK_CONTEXT_A, 0x01, 0x38},
- {SENSOR, MT9M111_SC_VBLANK_CONTEXT_A, 0x00, 0x11},
- {SENSOR, MT9M111_SC_R_MODE_CONTEXT_B, 0x01, 0x03},
- {SENSOR, MT9M111_SC_R_MODE_CONTEXT_A, 0x01, 0x03},
- {SENSOR, 0x30, 0x04, 0x00},
-
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
- {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81, 0x00},
- {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x01, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x04, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x04, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x02, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x05, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x07, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xa0, 0x00},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
- {SENSOR, MT9M111_SC_SHUTTER_WIDTH, 0x01, 0xf4},
- {SENSOR, MT9M111_SC_GLOBAL_GAIN, 0x00, 0xea},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
-
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x09},
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x29},
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x08},
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x0c},
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x04},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x01},
-
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0xb3, 0x00},
- {SENSOR, MT9M111_CP_GLOBAL_CLK_CONTROL, 0x00, 0x03},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x05, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x04, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x3e, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR_H, 0x3e, 0x00},
{BRIDGE, M5602_XB_GPIO_DAT_H, 0x02, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_L, 0xff, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR_L, 0xff, 0x00},
{BRIDGE, M5602_XB_GPIO_DAT_L, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x04, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
{BRIDGE, M5602_XB_GPIO_DIR, 0x07, 0x00},
{BRIDGE, M5602_XB_GPIO_DAT, 0x0b, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x06, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_L, 0x00, 0x00},
{BRIDGE, M5602_XB_I2C_CLK_DIV, 0x0a, 0x00},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x05},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
{SENSOR, MT9M111_SC_RESET, 0x00, 0x29},
{SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
{SENSOR, MT9M111_SC_RESET, 0x00, 0x08},
{SENSOR, MT9M111_PAGE_MAP, 0x00, 0x01},
- {SENSOR, MT9M111_CP_OPERATING_MODE_CTL, 0x00, 0x10},
+ {SENSOR, MT9M111_CP_OPERATING_MODE_CTL, 0x00,
+ MT9M111_CP_OPERATING_MODE_CTL},
{SENSOR, MT9M111_CP_LENS_CORRECTION_1, 0x04, 0x2a},
- {SENSOR, MT9M111_CP_DEFECT_CORR_CONTEXT_A, 0x00, 0x01},
- {SENSOR, MT9M111_CP_DEFECT_CORR_CONTEXT_B, 0x00, 0x01},
+ {SENSOR, MT9M111_CP_DEFECT_CORR_CONTEXT_A, 0x00,
+ MT9M111_2D_DEFECT_CORRECTION_ENABLE},
+ {SENSOR, MT9M111_CP_DEFECT_CORR_CONTEXT_B, 0x00,
+ MT9M111_2D_DEFECT_CORRECTION_ENABLE},
{SENSOR, MT9M111_CP_LUMA_OFFSET, 0x00, 0x00},
{SENSOR, MT9M111_CP_LUMA_CLIP, 0xff, 0x00},
{SENSOR, MT9M111_CP_OUTPUT_FORMAT_CTL2_CONTEXT_A, 0x14, 0x00},
{SENSOR, MT9M111_CP_OUTPUT_FORMAT_CTL2_CONTEXT_B, 0x14, 0x00},
-
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0xcd, 0x00},
{SENSOR, 0xcd, 0x00, 0x0e},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0xd0, 0x00},
{SENSOR, 0xd0, 0x00, 0x40},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x02},
- {SENSOR, MT9M111_CC_AUTO_EXPOSURE_PARAMETER_18, 0x00, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x28, 0x00},
- {SENSOR, MT9M111_CC_AWB_PARAMETER_7, 0xef, 0x07},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x28, 0x00},
- {SENSOR, MT9M111_CC_AWB_PARAMETER_7, 0xef, 0x03},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x33, 0x00},
- {SENSOR, 0x33, 0x03, 0x49},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x33, 0x00},
-
- {SENSOR, 0x33, 0x03, 0x49},
- {SENSOR, 0x34, 0xc0, 0x19},
- {SENSOR, 0x3f, 0x20, 0x20},
- {SENSOR, 0x40, 0x20, 0x20},
- {SENSOR, 0x5a, 0xc0, 0x0a},
- {SENSOR, 0x70, 0x7b, 0x0a},
- {SENSOR, 0x71, 0xff, 0x00},
- {SENSOR, 0x72, 0x19, 0x0e},
- {SENSOR, 0x73, 0x18, 0x0f},
- {SENSOR, 0x74, 0x57, 0x32},
- {SENSOR, 0x75, 0x56, 0x34},
- {SENSOR, 0x76, 0x73, 0x35},
- {SENSOR, 0x77, 0x30, 0x12},
- {SENSOR, 0x78, 0x79, 0x02},
- {SENSOR, 0x79, 0x75, 0x06},
- {SENSOR, 0x7a, 0x77, 0x0a},
- {SENSOR, 0x7b, 0x78, 0x09},
- {SENSOR, 0x7c, 0x7d, 0x06},
- {SENSOR, 0x7d, 0x31, 0x10},
- {SENSOR, 0x7e, 0x00, 0x7e},
- {SENSOR, 0x80, 0x59, 0x04},
- {SENSOR, 0x81, 0x59, 0x04},
- {SENSOR, 0x82, 0x57, 0x0a},
- {SENSOR, 0x83, 0x58, 0x0b},
- {SENSOR, 0x84, 0x47, 0x0c},
- {SENSOR, 0x85, 0x48, 0x0e},
- {SENSOR, 0x86, 0x5b, 0x02},
- {SENSOR, 0x87, 0x00, 0x5c},
- {SENSOR, MT9M111_CONTEXT_CONTROL, 0x00, 0x08},
- {SENSOR, 0x60, 0x00, 0x80},
- {SENSOR, 0x61, 0x00, 0x00},
- {SENSOR, 0x62, 0x00, 0x00},
- {SENSOR, 0x63, 0x00, 0x00},
- {SENSOR, 0x64, 0x00, 0x00},
- {SENSOR, MT9M111_SC_ROWSTART, 0x00, 0x0d},
- {SENSOR, MT9M111_SC_COLSTART, 0x00, 0x18},
- {SENSOR, MT9M111_SC_WINDOW_HEIGHT, 0x04, 0x04},
- {SENSOR, MT9M111_SC_WINDOW_WIDTH, 0x05, 0x08},
- {SENSOR, MT9M111_SC_HBLANK_CONTEXT_B, 0x01, 0x38},
- {SENSOR, MT9M111_SC_VBLANK_CONTEXT_B, 0x00, 0x11},
- {SENSOR, MT9M111_SC_HBLANK_CONTEXT_A, 0x01, 0x38},
- {SENSOR, MT9M111_SC_VBLANK_CONTEXT_A, 0x00, 0x11},
- {SENSOR, MT9M111_SC_R_MODE_CONTEXT_B, 0x01, 0x03},
- {SENSOR, MT9M111_SC_R_MODE_CONTEXT_A, 0x01, 0x03},
- {SENSOR, 0x30, 0x04, 0x00},
-
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
- {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81, 0x00},
- {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x01, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x04, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x04, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x05, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x07, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xa0, 0x00},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
- {SENSOR, MT9M111_SC_SHUTTER_WIDTH, 0x01, 0xf4},
- {SENSOR, MT9M111_SC_GLOBAL_GAIN, 0x00, 0xea},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
- {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x0d, 0x00},
- {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x09},
- {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x0d, 0x00},
- {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x29},
- {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x0d, 0x00},
- {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x08},
- {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x0d, 0x00},
- {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x0c},
- {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x0d, 0x00},
- {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x04},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x01},
- {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0xb3, 0x00},
- {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
- {SENSOR, MT9M111_CP_GLOBAL_CLK_CONTROL, 0x00, 0x03},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x05, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x04, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x3e, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR_H, 0x3e, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT_H, 0x02, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_L, 0xff, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR_L, 0xff, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT_L, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x04, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x07, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x0b, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x06, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_L, 0x00, 0x00},
- {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x0a, 0x00},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
- {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x0d, 0x00},
- {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x05},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
- {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x0d, 0x00},
- {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x29},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
- {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x0d, 0x00},
- {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
-
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x08},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x01},
- {SENSOR, MT9M111_CP_OPERATING_MODE_CTL, 0x00, 0x10},
- {SENSOR, MT9M111_CP_LENS_CORRECTION_1, 0x04, 0x2a},
- {SENSOR, MT9M111_CP_DEFECT_CORR_CONTEXT_A, 0x00, 0x01},
- {SENSOR, MT9M111_CP_DEFECT_CORR_CONTEXT_B, 0x00, 0x01},
- {SENSOR, MT9M111_CP_LUMA_OFFSET, 0x00, 0x00},
- {SENSOR, MT9M111_CP_LUMA_CLIP, 0xff, 0x00},
- {SENSOR, MT9M111_CP_OUTPUT_FORMAT_CTL2_CONTEXT_A, 0x14, 0x00},
- {SENSOR, MT9M111_CP_OUTPUT_FORMAT_CTL2_CONTEXT_B, 0x14, 0x00},
-
- {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0xcd, 0x00},
- {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
- {SENSOR, 0xcd, 0x00, 0x0e},
- {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0xd0, 0x00},
- {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
- {SENSOR, 0xd0, 0x00, 0x40},
{SENSOR, MT9M111_PAGE_MAP, 0x00, 0x02},
{SENSOR, MT9M111_CC_AUTO_EXPOSURE_PARAMETER_18, 0x00, 0x00},
- {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x28, 0x00},
- {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
- {SENSOR, MT9M111_CC_AWB_PARAMETER_7, 0xef, 0x07},
- {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x28, 0x00},
- {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
{SENSOR, MT9M111_CC_AWB_PARAMETER_7, 0xef, 0x03},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
- {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x33, 0x00},
- {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
- {SENSOR, 0x33, 0x03, 0x49},
- {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x33, 0x00},
- {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
-
- {SENSOR, 0x33, 0x03, 0x49},
- {SENSOR, 0x34, 0xc0, 0x19},
- {SENSOR, 0x3f, 0x20, 0x20},
- {SENSOR, 0x40, 0x20, 0x20},
- {SENSOR, 0x5a, 0xc0, 0x0a},
- {SENSOR, 0x70, 0x7b, 0x0a},
- {SENSOR, 0x71, 0xff, 0x00},
- {SENSOR, 0x72, 0x19, 0x0e},
- {SENSOR, 0x73, 0x18, 0x0f},
- {SENSOR, 0x74, 0x57, 0x32},
- {SENSOR, 0x75, 0x56, 0x34},
- {SENSOR, 0x76, 0x73, 0x35},
- {SENSOR, 0x77, 0x30, 0x12},
- {SENSOR, 0x78, 0x79, 0x02},
- {SENSOR, 0x79, 0x75, 0x06},
- {SENSOR, 0x7a, 0x77, 0x0a},
- {SENSOR, 0x7b, 0x78, 0x09},
- {SENSOR, 0x7c, 0x7d, 0x06},
- {SENSOR, 0x7d, 0x31, 0x10},
- {SENSOR, 0x7e, 0x00, 0x7e},
- {SENSOR, 0x80, 0x59, 0x04},
- {SENSOR, 0x81, 0x59, 0x04},
- {SENSOR, 0x82, 0x57, 0x0a},
- {SENSOR, 0x83, 0x58, 0x0b},
- {SENSOR, 0x84, 0x47, 0x0c},
- {SENSOR, 0x85, 0x48, 0x0e},
- {SENSOR, 0x86, 0x5b, 0x02},
- {SENSOR, 0x87, 0x00, 0x5c},
- {SENSOR, MT9M111_CONTEXT_CONTROL, 0x00, 0x08},
- {SENSOR, 0x60, 0x00, 0x80},
- {SENSOR, 0x61, 0x00, 0x00},
- {SENSOR, 0x62, 0x00, 0x00},
- {SENSOR, 0x63, 0x00, 0x00},
- {SENSOR, 0x64, 0x00, 0x00},
- {SENSOR, MT9M111_SC_ROWSTART, 0x00, 0x0d},
- {SENSOR, MT9M111_SC_COLSTART, 0x00, 0x12},
- {SENSOR, MT9M111_SC_WINDOW_HEIGHT, 0x04, 0x00},
- {SENSOR, MT9M111_SC_WINDOW_WIDTH, 0x05, 0x10},
- {SENSOR, MT9M111_SC_HBLANK_CONTEXT_B, 0x01, 0x60},
- {SENSOR, MT9M111_SC_VBLANK_CONTEXT_B, 0x00, 0x11},
- {SENSOR, MT9M111_SC_HBLANK_CONTEXT_A, 0x01, 0x60},
- {SENSOR, MT9M111_SC_VBLANK_CONTEXT_A, 0x00, 0x11},
- {SENSOR, MT9M111_SC_R_MODE_CONTEXT_B, 0x01, 0x0f},
- {SENSOR, MT9M111_SC_R_MODE_CONTEXT_A, 0x01, 0x0f},
- {SENSOR, 0x30, 0x04, 0x00},
-
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
- {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81, 0x00},
- {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x01, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x01, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0xe3, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x02, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x02, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x87, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
-
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
- {SENSOR, MT9M111_SC_SHUTTER_WIDTH, 0x01, 0x90},
- {SENSOR, MT9M111_SC_GLOBAL_GAIN, 0x00, 0xe6},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
- {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x0d, 0x00},
- {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x09},
- {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x0d, 0x00},
- {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x29},
- {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x0d, 0x00},
- {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x08},
- {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x0d, 0x00},
- {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x0c},
- {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x0d, 0x00},
- {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x04},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x01},
- {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0xb3, 0x00},
- {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
- {SENSOR, MT9M111_CP_GLOBAL_CLK_CONTROL, 0x00, 0x03},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x05, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x04, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x3e, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR_H, 0x3e, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT_H, 0x02, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_L, 0xff, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR_L, 0xff, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT_L, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x04, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x07, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x0b, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x06, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_L, 0x00, 0x00},
- {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x0a, 0x00},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
- {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x0d, 0x00},
- {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x05},
{SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
- {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x0d, 0x00},
- {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x29},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
- {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x0d, 0x00},
- {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x08},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x01},
- {SENSOR, MT9M111_CP_OPERATING_MODE_CTL, 0x00, 0x10},
- {SENSOR, MT9M111_CP_LENS_CORRECTION_1, 0x04, 0x2a},
- {SENSOR, MT9M111_CP_DEFECT_CORR_CONTEXT_A, 0x00, 0x01},
- {SENSOR, MT9M111_CP_DEFECT_CORR_CONTEXT_B, 0x00, 0x01},
- {SENSOR, MT9M111_CP_LUMA_OFFSET, 0x00, 0x00},
- {SENSOR, MT9M111_CP_LUMA_CLIP, 0xff, 0x00},
- {SENSOR, MT9M111_CP_OUTPUT_FORMAT_CTL2_CONTEXT_A, 0x14, 0x00},
- {SENSOR, MT9M111_CP_OUTPUT_FORMAT_CTL2_CONTEXT_B, 0x14, 0x00},
-
- {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0xcd, 0x00},
- {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
- {SENSOR, 0xcd, 0x00, 0x0e},
- {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0xd0, 0x00},
- {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
- {SENSOR, 0xd0, 0x00, 0x40},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x02},
- {SENSOR, MT9M111_CC_AUTO_EXPOSURE_PARAMETER_18, 0x00, 0x00},
- {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x28, 0x00},
- {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
- {SENSOR, MT9M111_CC_AWB_PARAMETER_7, 0xef, 0x07},
- {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x28, 0x00},
- {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
- {SENSOR, MT9M111_CC_AWB_PARAMETER_7, 0xef, 0x03},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
-
- {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x33, 0x00},
- {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
- {SENSOR, 0x33, 0x03, 0x49},
- {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
- {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x33, 0x00},
- {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
-
{SENSOR, 0x33, 0x03, 0x49},
{SENSOR, 0x34, 0xc0, 0x19},
{SENSOR, 0x3f, 0x20, 0x20},
@@ -898,25 +236,29 @@ static const unsigned char init_mt9m111[][4] =
{SENSOR, 0x85, 0x48, 0x0e},
{SENSOR, 0x86, 0x5b, 0x02},
{SENSOR, 0x87, 0x00, 0x5c},
- {SENSOR, MT9M111_CONTEXT_CONTROL, 0x00, 0x08},
+ {SENSOR, MT9M111_CONTEXT_CONTROL, 0x00, MT9M111_SEL_CONTEXT_B},
{SENSOR, 0x60, 0x00, 0x80},
{SENSOR, 0x61, 0x00, 0x00},
{SENSOR, 0x62, 0x00, 0x00},
{SENSOR, 0x63, 0x00, 0x00},
{SENSOR, 0x64, 0x00, 0x00},
- {SENSOR, MT9M111_SC_ROWSTART, 0x00, 0x0d},
- {SENSOR, MT9M111_SC_COLSTART, 0x00, 0x12},
- {SENSOR, MT9M111_SC_WINDOW_HEIGHT, 0x04, 0x00},
- {SENSOR, MT9M111_SC_WINDOW_WIDTH, 0x05, 0x10},
- {SENSOR, MT9M111_SC_HBLANK_CONTEXT_B, 0x01, 0x60},
- {SENSOR, MT9M111_SC_VBLANK_CONTEXT_B, 0x00, 0x11},
- {SENSOR, MT9M111_SC_HBLANK_CONTEXT_A, 0x01, 0x60},
- {SENSOR, MT9M111_SC_VBLANK_CONTEXT_A, 0x00, 0x11},
- {SENSOR, MT9M111_SC_R_MODE_CONTEXT_B, 0x01, 0x0f},
- {SENSOR, MT9M111_SC_R_MODE_CONTEXT_A, 0x01, 0x0f},
+ {SENSOR, MT9M111_SC_ROWSTART, 0x00, 0x0d}, /* 13 */
+ {SENSOR, MT9M111_SC_COLSTART, 0x00, 0x12}, /* 18 */
+ {SENSOR, MT9M111_SC_WINDOW_HEIGHT, 0x04, 0x00}, /* 1024 */
+ {SENSOR, MT9M111_SC_WINDOW_WIDTH, 0x05, 0x10}, /* 1296 */
+ {SENSOR, MT9M111_SC_HBLANK_CONTEXT_B, 0x01, 0x60}, /* 352 */
+ {SENSOR, MT9M111_SC_VBLANK_CONTEXT_B, 0x00, 0x11}, /* 17 */
+ {SENSOR, MT9M111_SC_HBLANK_CONTEXT_A, 0x01, 0x60}, /* 352 */
+ {SENSOR, MT9M111_SC_VBLANK_CONTEXT_A, 0x00, 0x11}, /* 17 */
+ {SENSOR, MT9M111_SC_R_MODE_CONTEXT_A, 0x01, 0x0f}, /* 271 */
{SENSOR, 0x30, 0x04, 0x00},
+ /* Set number of blank rows chosen to 400 */
+ {SENSOR, MT9M111_SC_SHUTTER_WIDTH, 0x01, 0x90},
+};
+static const unsigned char start_mt9m111[][4] =
+{
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
{BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
@@ -928,25 +270,6 @@ static const unsigned char init_mt9m111[][4] =
{BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
{BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
{BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x01, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0xe0, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x02, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x02, 0x00}, /* 639*/
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x7f, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
-
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
- /* Set number of blank rows chosen to 400 */
- {SENSOR, MT9M111_SC_SHUTTER_WIDTH, 0x01, 0x90},
- /* Set the global gain to 283 (of 512) */
- {SENSOR, MT9M111_SC_GLOBAL_GAIN, 0x03, 0x63}
};
#endif
diff --git a/drivers/media/video/gspca/m5602/m5602_ov7660.c b/drivers/media/video/gspca/m5602/m5602_ov7660.c
new file mode 100644
index 00000000000..7aafeb7cfa0
--- /dev/null
+++ b/drivers/media/video/gspca/m5602/m5602_ov7660.c
@@ -0,0 +1,227 @@
+/*
+ * Driver for the ov7660 sensor
+ *
+ * Copyright (C) 2009 Erik Andrén
+ * Copyright (C) 2007 Ilyes Gouta. Based on the m5603x Linux Driver Project.
+ * Copyright (C) 2005 m5603x Linux Driver Project <m5602@x3ng.com.br>
+ *
+ * Portions of code to USB interface and ALi driver software,
+ * Copyright (c) 2006 Willem Duinker
+ * v4l2 interface modeled after the V4L2 driver
+ * for SN9C10x PC Camera Controllers
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2.
+ *
+ */
+
+#include "m5602_ov7660.h"
+
+static int ov7660_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
+static int ov7660_set_gain(struct gspca_dev *gspca_dev, __s32 val);
+
+const static struct ctrl ov7660_ctrls[] = {
+#define GAIN_IDX 1
+ {
+ {
+ .id = V4L2_CID_GAIN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "gain",
+ .minimum = 0x00,
+ .maximum = 0xff,
+ .step = 0x1,
+ .default_value = OV7660_DEFAULT_GAIN,
+ .flags = V4L2_CTRL_FLAG_SLIDER
+ },
+ .set = ov7660_set_gain,
+ .get = ov7660_get_gain
+ },
+};
+
+static struct v4l2_pix_format ov7660_modes[] = {
+ {
+ 640,
+ 480,
+ V4L2_PIX_FMT_SBGGR8,
+ V4L2_FIELD_NONE,
+ .sizeimage =
+ 640 * 480,
+ .bytesperline = 640,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .priv = 0
+ }
+};
+
+static void ov7660_dump_registers(struct sd *sd);
+
+int ov7660_probe(struct sd *sd)
+{
+ int err = 0, i;
+ u8 prod_id = 0, ver_id = 0;
+
+ s32 *sensor_settings;
+
+ if (force_sensor) {
+ if (force_sensor == OV7660_SENSOR) {
+ info("Forcing an %s sensor", ov7660.name);
+ goto sensor_found;
+ }
+ /* If we want to force another sensor,
+ don't try to probe this one */
+ return -ENODEV;
+ }
+
+ /* Do the preinit */
+ for (i = 0; i < ARRAY_SIZE(preinit_ov7660) && !err; i++) {
+ u8 data[2];
+
+ if (preinit_ov7660[i][0] == BRIDGE) {
+ err = m5602_write_bridge(sd,
+ preinit_ov7660[i][1],
+ preinit_ov7660[i][2]);
+ } else {
+ data[0] = preinit_ov7660[i][2];
+ err = m5602_write_sensor(sd,
+ preinit_ov7660[i][1], data, 1);
+ }
+ }
+ if (err < 0)
+ return err;
+
+ if (m5602_read_sensor(sd, OV7660_PID, &prod_id, 1))
+ return -ENODEV;
+
+ if (m5602_read_sensor(sd, OV7660_VER, &ver_id, 1))
+ return -ENODEV;
+
+ info("Sensor reported 0x%x%x", prod_id, ver_id);
+
+ if ((prod_id == 0x76) && (ver_id == 0x60)) {
+ info("Detected a ov7660 sensor");
+ goto sensor_found;
+ }
+ return -ENODEV;
+
+sensor_found:
+ sensor_settings = kmalloc(
+ ARRAY_SIZE(ov7660_ctrls) * sizeof(s32), GFP_KERNEL);
+ if (!sensor_settings)
+ return -ENOMEM;
+
+ sd->gspca_dev.cam.cam_mode = ov7660_modes;
+ sd->gspca_dev.cam.nmodes = ARRAY_SIZE(ov7660_modes);
+ sd->desc->ctrls = ov7660_ctrls;
+ sd->desc->nctrls = ARRAY_SIZE(ov7660_ctrls);
+
+ for (i = 0; i < ARRAY_SIZE(ov7660_ctrls); i++)
+ sensor_settings[i] = ov7660_ctrls[i].qctrl.default_value;
+ sd->sensor_priv = sensor_settings;
+
+ return 0;
+}
+
+int ov7660_init(struct sd *sd)
+{
+ int i, err = 0;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ /* Init the sensor */
+ for (i = 0; i < ARRAY_SIZE(init_ov7660); i++) {
+ u8 data[2];
+
+ if (init_ov7660[i][0] == BRIDGE) {
+ err = m5602_write_bridge(sd,
+ init_ov7660[i][1],
+ init_ov7660[i][2]);
+ } else {
+ data[0] = init_ov7660[i][2];
+ err = m5602_write_sensor(sd,
+ init_ov7660[i][1], data, 1);
+ }
+ }
+
+ if (dump_sensor)
+ ov7660_dump_registers(sd);
+
+ err = ov7660_set_gain(&sd->gspca_dev, sensor_settings[GAIN_IDX]);
+ if (err < 0)
+ return err;
+
+ return err;
+}
+
+int ov7660_start(struct sd *sd)
+{
+ return 0;
+}
+
+int ov7660_stop(struct sd *sd)
+{
+ return 0;
+}
+
+void ov7660_disconnect(struct sd *sd)
+{
+ ov7660_stop(sd);
+
+ sd->sensor = NULL;
+ kfree(sd->sensor_priv);
+}
+
+static int ov7660_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ *val = sensor_settings[GAIN_IDX];
+ PDEBUG(D_V4L2, "Read gain %d", *val);
+ return 0;
+}
+
+static int ov7660_set_gain(struct gspca_dev *gspca_dev, __s32 val)
+{
+ int err;
+ u8 i2c_data;
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ PDEBUG(D_V4L2, "Setting gain to %d", val);
+
+ sensor_settings[GAIN_IDX] = val;
+
+ err = m5602_write_sensor(sd, OV7660_GAIN, &i2c_data, 1);
+ return err;
+}
+
+static void ov7660_dump_registers(struct sd *sd)
+{
+ int address;
+ info("Dumping the ov7660 register state");
+ for (address = 0; address < 0xa9; address++) {
+ u8 value;
+ m5602_read_sensor(sd, address, &value, 1);
+ info("register 0x%x contains 0x%x",
+ address, value);
+ }
+
+ info("ov7660 register state dump complete");
+
+ info("Probing for which registers that are read/write");
+ for (address = 0; address < 0xff; address++) {
+ u8 old_value, ctrl_value;
+ u8 test_value[2] = {0xff, 0xff};
+
+ m5602_read_sensor(sd, address, &old_value, 1);
+ m5602_write_sensor(sd, address, test_value, 1);
+ m5602_read_sensor(sd, address, &ctrl_value, 1);
+
+ if (ctrl_value == test_value[0])
+ info("register 0x%x is writeable", address);
+ else
+ info("register 0x%x is read only", address);
+
+ /* Restore original value */
+ m5602_write_sensor(sd, address, &old_value, 1);
+ }
+}
diff --git a/drivers/media/video/gspca/m5602/m5602_ov7660.h b/drivers/media/video/gspca/m5602/m5602_ov7660.h
new file mode 100644
index 00000000000..3f2c169a93e
--- /dev/null
+++ b/drivers/media/video/gspca/m5602/m5602_ov7660.h
@@ -0,0 +1,279 @@
+/*
+ * Driver for the ov7660 sensor
+ *
+ * Copyright (C) 2009 Erik Andrén
+ * Copyright (C) 2007 Ilyes Gouta. Based on the m5603x Linux Driver Project.
+ * Copyright (C) 2005 m5603x Linux Driver Project <m5602@x3ng.com.br>
+ *
+ * Portions of code to USB interface and ALi driver software,
+ * Copyright (c) 2006 Willem Duinker
+ * v4l2 interface modeled after the V4L2 driver
+ * for SN9C10x PC Camera Controllers
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2.
+ *
+ */
+
+#ifndef M5602_OV7660_H_
+#define M5602_OV7660_H_
+
+#include "m5602_sensor.h"
+
+#define OV7660_GAIN 0x00
+#define OV7660_BLUE_GAIN 0x01
+#define OV7660_RED_GAIN 0x02
+#define OV7660_VREF 0x03
+#define OV7660_COM1 0x04
+#define OV7660_BAVE 0x05
+#define OV7660_GEAVE 0x06
+#define OV7660_AECHH 0x07
+#define OV7660_RAVE 0x08
+#define OV7660_COM2 0x09
+#define OV7660_PID 0x0a
+#define OV7660_VER 0x0b
+#define OV7660_COM3 0x0c
+#define OV7660_COM4 0x0d
+#define OV7660_COM5 0x0e
+#define OV7660_COM6 0x0f
+#define OV7660_AECH 0x10
+#define OV7660_CLKRC 0x11
+#define OV7660_COM7 0x12
+#define OV7660_COM8 0x13
+#define OV7660_COM9 0x14
+#define OV7660_COM10 0x15
+#define OV7660_RSVD16 0x16
+#define OV7660_HSTART 0x17
+#define OV7660_HSTOP 0x18
+#define OV7660_VSTART 0x19
+#define OV7660_VSTOP 0x1a
+#define OV7660_PSHFT 0x1b
+#define OV7660_MIDH 0x1c
+#define OV7660_MIDL 0x1d
+#define OV7660_MVFP 0x1e
+#define OV7660_LAEC 0x1f
+#define OV7660_BOS 0x20
+#define OV7660_GBOS 0x21
+#define OV7660_GROS 0x22
+#define OV7660_ROS 0x23
+#define OV7660_AEW 0x24
+#define OV7660_AEB 0x25
+#define OV7660_VPT 0x26
+#define OV7660_BBIAS 0x27
+#define OV7660_GbBIAS 0x28
+#define OV7660_RSVD29 0x29
+#define OV7660_RBIAS 0x2c
+#define OV7660_HREF 0x32
+#define OV7660_ADC 0x37
+#define OV7660_OFON 0x39
+#define OV7660_TSLB 0x3a
+#define OV7660_COM12 0x3c
+#define OV7660_COM13 0x3d
+#define OV7660_LCC1 0x62
+#define OV7660_LCC2 0x63
+#define OV7660_LCC3 0x64
+#define OV7660_LCC4 0x65
+#define OV7660_LCC5 0x66
+#define OV7660_HV 0x69
+#define OV7660_RSVDA1 0xa1
+
+#define OV7660_DEFAULT_GAIN 0x0e
+#define OV7660_DEFAULT_RED_GAIN 0x80
+#define OV7660_DEFAULT_BLUE_GAIN 0x80
+#define OV7660_DEFAULT_SATURATION 0x00
+#define OV7660_DEFAULT_EXPOSURE 0x20
+
+/* Kernel module parameters */
+extern int force_sensor;
+extern int dump_sensor;
+
+int ov7660_probe(struct sd *sd);
+int ov7660_init(struct sd *sd);
+int ov7660_start(struct sd *sd);
+int ov7660_stop(struct sd *sd);
+void ov7660_disconnect(struct sd *sd);
+
+const static struct m5602_sensor ov7660 = {
+ .name = "ov7660",
+ .i2c_slave_id = 0x42,
+ .i2c_regW = 1,
+ .probe = ov7660_probe,
+ .init = ov7660_init,
+ .start = ov7660_start,
+ .stop = ov7660_stop,
+ .disconnect = ov7660_disconnect,
+};
+
+static const unsigned char preinit_ov7660[][4] =
+{
+ {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02},
+ {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
+ {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0d},
+ {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x03},
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x03},
+ {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
+
+ {SENSOR, OV7660_OFON, 0x0c},
+ {SENSOR, OV7660_COM2, 0x11},
+ {SENSOR, OV7660_COM7, 0x05},
+
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x01},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x04},
+ {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
+ {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06},
+ {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x08},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
+ {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x00},
+ {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
+ {BRIDGE, M5602_XB_GPIO_EN_L, 0x00}
+};
+
+static const unsigned char init_ov7660[][4] =
+{
+ {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02},
+ {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
+ {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0d},
+ {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x03},
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x03},
+ {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
+
+ {SENSOR, OV7660_OFON, 0x0c},
+ {SENSOR, OV7660_COM2, 0x11},
+ {SENSOR, OV7660_COM7, 0x05},
+
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x01},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x04},
+ {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
+ {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06},
+ {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x08},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
+ {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x00},
+ {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
+ {BRIDGE, M5602_XB_GPIO_EN_L, 0x00},
+
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x02},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
+
+ {SENSOR, OV7660_AECH, OV7660_DEFAULT_EXPOSURE},
+ {SENSOR, OV7660_COM1, 0x00},
+
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x01},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x04},
+ {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
+ {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06},
+ {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x08},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
+
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
+ {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x00},
+ {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
+ {BRIDGE, M5602_XB_GPIO_EN_L, 0x00},
+
+ {SENSOR, OV7660_COM7, 0x80},
+ {SENSOR, OV7660_CLKRC, 0x80},
+ {SENSOR, OV7660_BLUE_GAIN, 0x80},
+ {SENSOR, OV7660_RED_GAIN, 0x80},
+ {SENSOR, OV7660_COM9, 0x4c},
+ {SENSOR, OV7660_OFON, 0x43},
+ {SENSOR, OV7660_COM12, 0x28},
+ {SENSOR, OV7660_COM8, 0x00},
+ {SENSOR, OV7660_COM10, 0x40},
+ {SENSOR, OV7660_HSTART, 0x0c},
+ {SENSOR, OV7660_HSTOP, 0x61},
+ {SENSOR, OV7660_HREF, 0xa4},
+ {SENSOR, OV7660_PSHFT, 0x0b},
+ {SENSOR, OV7660_VSTART, 0x01},
+ {SENSOR, OV7660_VSTOP, 0x7a},
+ {SENSOR, OV7660_VREF, 0x00},
+ {SENSOR, OV7660_COM7, 0x05},
+ {SENSOR, OV7660_COM6, 0x4b},
+ {SENSOR, OV7660_BBIAS, 0x98},
+ {SENSOR, OV7660_GbBIAS, 0x98},
+ {SENSOR, OV7660_RSVD29, 0x98},
+ {SENSOR, OV7660_RBIAS, 0x98},
+ {SENSOR, OV7660_COM1, 0x00},
+ {SENSOR, OV7660_AECH, 0x00},
+ {SENSOR, OV7660_AECHH, 0x00},
+ {SENSOR, OV7660_ADC, 0x04},
+ {SENSOR, OV7660_COM13, 0x00},
+ {SENSOR, OV7660_RSVDA1, 0x23},
+ {SENSOR, OV7660_TSLB, 0x0d},
+ {SENSOR, OV7660_HV, 0x80},
+ {SENSOR, OV7660_LCC1, 0x00},
+ {SENSOR, OV7660_LCC2, 0x00},
+ {SENSOR, OV7660_LCC3, 0x10},
+ {SENSOR, OV7660_LCC4, 0x40},
+ {SENSOR, OV7660_LCC5, 0x01},
+
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
+ {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
+ {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81},
+ {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82},
+ {BRIDGE, M5602_XB_SIG_INI, 0x01},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x08},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x01},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0xe0}, /* 480 */
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
+ {BRIDGE, M5602_XB_SIG_INI, 0x00},
+ {BRIDGE, M5602_XB_SIG_INI, 0x02},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x27}, /* 39 */
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x02},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0xa7}, /* 679 */
+ {BRIDGE, M5602_XB_SIG_INI, 0x00},
+
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
+
+ {SENSOR, OV7660_AECH, 0x20},
+ {SENSOR, OV7660_COM1, 0x00},
+ {SENSOR, OV7660_OFON, 0x0c},
+ {SENSOR, OV7660_COM2, 0x11},
+ {SENSOR, OV7660_COM7, 0x05},
+ {SENSOR, OV7660_BLUE_GAIN, 0x80},
+ {SENSOR, OV7660_RED_GAIN, 0x80},
+
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x01},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x04},
+ {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
+ {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06},
+ {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x08},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0}
+};
+
+#endif
diff --git a/drivers/media/video/gspca/m5602/m5602_ov9650.c b/drivers/media/video/gspca/m5602/m5602_ov9650.c
index fc4548fd441..c2739d6605a 100644
--- a/drivers/media/video/gspca/m5602/m5602_ov9650.c
+++ b/drivers/media/video/gspca/m5602/m5602_ov9650.c
@@ -18,44 +18,87 @@
#include "m5602_ov9650.h"
+static int ov9650_set_exposure(struct gspca_dev *gspca_dev, __s32 val);
+static int ov9650_get_exposure(struct gspca_dev *gspca_dev, __s32 *val);
+static int ov9650_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
+static int ov9650_set_gain(struct gspca_dev *gspca_dev, __s32 val);
+static int ov9650_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val);
+static int ov9650_set_red_balance(struct gspca_dev *gspca_dev, __s32 val);
+static int ov9650_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val);
+static int ov9650_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val);
+static int ov9650_get_hflip(struct gspca_dev *gspca_dev, __s32 *val);
+static int ov9650_set_hflip(struct gspca_dev *gspca_dev, __s32 val);
+static int ov9650_get_vflip(struct gspca_dev *gspca_dev, __s32 *val);
+static int ov9650_set_vflip(struct gspca_dev *gspca_dev, __s32 val);
+static int ov9650_get_auto_white_balance(struct gspca_dev *gspca_dev,
+ __s32 *val);
+static int ov9650_set_auto_white_balance(struct gspca_dev *gspca_dev,
+ __s32 val);
+static int ov9650_get_auto_gain(struct gspca_dev *gspca_dev, __s32 *val);
+static int ov9650_set_auto_gain(struct gspca_dev *gspca_dev, __s32 val);
+static int ov9650_get_auto_exposure(struct gspca_dev *gspca_dev, __s32 *val);
+static int ov9650_set_auto_exposure(struct gspca_dev *gspca_dev, __s32 val);
+
/* Vertically and horizontally flips the image if matched, needed for machines
where the sensor is mounted upside down */
static
const
struct dmi_system_id ov9650_flip_dmi_table[] = {
{
- .ident = "ASUS A6VC",
+ .ident = "ASUS A6Ja",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "A6VC")
+ DMI_MATCH(DMI_PRODUCT_NAME, "A6J")
}
},
{
- .ident = "ASUS A6VM",
+ .ident = "ASUS A6JC",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "A6VM")
+ DMI_MATCH(DMI_PRODUCT_NAME, "A6JC")
}
},
{
- .ident = "ASUS A6JC",
+ .ident = "ASUS A6K",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "A6JC")
+ DMI_MATCH(DMI_PRODUCT_NAME, "A6K")
}
},
{
- .ident = "ASUS A6Ja",
+ .ident = "ASUS A6Kt",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "A6J")
+ DMI_MATCH(DMI_PRODUCT_NAME, "A6Kt")
}
},
{
- .ident = "ASUS A6Kt",
+ .ident = "ASUS A6VA",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "A6Kt")
+ DMI_MATCH(DMI_PRODUCT_NAME, "A6VA")
+ }
+ },
+ {
+
+ .ident = "ASUS A6VC",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "A6VC")
+ }
+ },
+ {
+ .ident = "ASUS A6VM",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "A6VM")
+ }
+ },
+ {
+ .ident = "ASUS A7V",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "A7V")
}
},
{
@@ -68,7 +111,7 @@ static
{}
};
-const static struct ctrl ov9650_ctrls[] = {
+static const struct ctrl ov9650_ctrls[] = {
#define EXPOSURE_IDX 0
{
{
@@ -102,6 +145,7 @@ const static struct ctrl ov9650_ctrls[] = {
#define RED_BALANCE_IDX 2
{
{
+ .id = V4L2_CID_RED_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "red balance",
.minimum = 0x00,
@@ -116,6 +160,7 @@ const static struct ctrl ov9650_ctrls[] = {
#define BLUE_BALANCE_IDX 3
{
{
+ .id = V4L2_CID_BLUE_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "blue balance",
.minimum = 0x00,
@@ -182,7 +227,22 @@ const static struct ctrl ov9650_ctrls[] = {
},
.set = ov9650_set_auto_gain,
.get = ov9650_get_auto_gain
+ },
+#define AUTO_EXPOSURE_IDX 8
+ {
+ {
+ .id = V4L2_CID_EXPOSURE_AUTO,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "auto exposure",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1
+ },
+ .set = ov9650_set_auto_exposure,
+ .get = ov9650_get_auto_exposure
}
+
};
static struct v4l2_pix_format ov9650_modes[] = {
@@ -289,12 +349,6 @@ sensor_found:
for (i = 0; i < ARRAY_SIZE(ov9650_ctrls); i++)
sensor_settings[i] = ov9650_ctrls[i].qctrl.default_value;
sd->sensor_priv = sensor_settings;
-
- if (dmi_check_system(ov9650_flip_dmi_table) && !err) {
- info("vflip quirk active");
- sensor_settings[VFLIP_IDX] = 1;
- }
-
return 0;
}
@@ -316,7 +370,8 @@ int ov9650_init(struct sd *sd)
err = m5602_write_bridge(sd, init_ov9650[i][1], data);
}
- err = ov9650_set_exposure(&sd->gspca_dev, sensor_settings[EXPOSURE_IDX]);
+ err = ov9650_set_exposure(&sd->gspca_dev,
+ sensor_settings[EXPOSURE_IDX]);
if (err < 0)
return err;
@@ -324,11 +379,13 @@ int ov9650_init(struct sd *sd)
if (err < 0)
return err;
- err = ov9650_set_red_balance(&sd->gspca_dev, sensor_settings[RED_BALANCE_IDX]);
+ err = ov9650_set_red_balance(&sd->gspca_dev,
+ sensor_settings[RED_BALANCE_IDX]);
if (err < 0)
return err;
- err = ov9650_set_blue_balance(&sd->gspca_dev, sensor_settings[BLUE_BALANCE_IDX]);
+ err = ov9650_set_blue_balance(&sd->gspca_dev,
+ sensor_settings[BLUE_BALANCE_IDX]);
if (err < 0)
return err;
@@ -340,11 +397,18 @@ int ov9650_init(struct sd *sd)
if (err < 0)
return err;
- err = ov9650_set_auto_white_balance(&sd->gspca_dev, sensor_settings[AUTO_WHITE_BALANCE_IDX]);
+ err = ov9650_set_auto_exposure(&sd->gspca_dev,
+ sensor_settings[AUTO_EXPOSURE_IDX]);
+ if (err < 0)
+ return err;
+
+ err = ov9650_set_auto_white_balance(&sd->gspca_dev,
+ sensor_settings[AUTO_WHITE_BALANCE_IDX]);
if (err < 0)
return err;
- err = ov9650_set_auto_gain(&sd->gspca_dev, sensor_settings[AUTO_GAIN_CTRL_IDX]);
+ err = ov9650_set_auto_gain(&sd->gspca_dev,
+ sensor_settings[AUTO_GAIN_CTRL_IDX]);
return err;
}
@@ -360,7 +424,10 @@ int ov9650_start(struct sd *sd)
int ver_offs = cam->cam_mode[sd->gspca_dev.curr_mode].priv;
int hor_offs = OV9650_LEFT_OFFSET;
- if (sensor_settings[VFLIP_IDX])
+ if ((!dmi_check_system(ov9650_flip_dmi_table) &&
+ sensor_settings[VFLIP_IDX]) ||
+ (dmi_check_system(ov9650_flip_dmi_table) &&
+ !sensor_settings[VFLIP_IDX]))
ver_offs--;
if (width <= 320)
@@ -406,6 +473,14 @@ int ov9650_start(struct sd *sd)
if (err < 0)
return err;
+ err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 0);
+ if (err < 0)
+ return err;
+
+ err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 2);
+ if (err < 0)
+ return err;
+
err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA,
(hor_offs >> 8) & 0xff);
if (err < 0)
@@ -425,6 +500,10 @@ int ov9650_start(struct sd *sd)
if (err < 0)
return err;
+ err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 0);
+ if (err < 0)
+ return err;
+
switch (width) {
case 640:
PDEBUG(D_V4L2, "Configuring camera for VGA mode");
@@ -467,32 +546,15 @@ int ov9650_stop(struct sd *sd)
return m5602_write_sensor(sd, OV9650_COM2, &data, 1);
}
-int ov9650_power_down(struct sd *sd)
-{
- int i, err = 0;
- for (i = 0; i < ARRAY_SIZE(power_down_ov9650) && !err; i++) {
- u8 data = power_down_ov9650[i][2];
- if (power_down_ov9650[i][0] == SENSOR)
- err = m5602_write_sensor(sd,
- power_down_ov9650[i][1], &data, 1);
- else
- err = m5602_write_bridge(sd, power_down_ov9650[i][1],
- data);
- }
-
- return err;
-}
-
void ov9650_disconnect(struct sd *sd)
{
ov9650_stop(sd);
- ov9650_power_down(sd);
sd->sensor = NULL;
kfree(sd->sensor_priv);
}
-int ov9650_get_exposure(struct gspca_dev *gspca_dev, __s32 *val)
+static int ov9650_get_exposure(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
@@ -502,7 +564,7 @@ int ov9650_get_exposure(struct gspca_dev *gspca_dev, __s32 *val)
return 0;
}
-int ov9650_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
+static int ov9650_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
@@ -532,7 +594,7 @@ int ov9650_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
return err;
}
-int ov9650_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
+static int ov9650_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
@@ -542,7 +604,7 @@ int ov9650_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
return 0;
}
-int ov9650_set_gain(struct gspca_dev *gspca_dev, __s32 val)
+static int ov9650_set_gain(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
u8 i2c_data;
@@ -573,7 +635,7 @@ int ov9650_set_gain(struct gspca_dev *gspca_dev, __s32 val)
return err;
}
-int ov9650_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val)
+static int ov9650_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
@@ -583,7 +645,7 @@ int ov9650_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val)
return 0;
}
-int ov9650_set_red_balance(struct gspca_dev *gspca_dev, __s32 val)
+static int ov9650_set_red_balance(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
u8 i2c_data;
@@ -599,7 +661,7 @@ int ov9650_set_red_balance(struct gspca_dev *gspca_dev, __s32 val)
return err;
}
-int ov9650_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val)
+static int ov9650_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
@@ -610,7 +672,7 @@ int ov9650_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val)
return 0;
}
-int ov9650_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val)
+static int ov9650_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
u8 i2c_data;
@@ -626,7 +688,7 @@ int ov9650_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val)
return err;
}
-int ov9650_get_hflip(struct gspca_dev *gspca_dev, __s32 *val)
+static int ov9650_get_hflip(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
@@ -636,7 +698,7 @@ int ov9650_get_hflip(struct gspca_dev *gspca_dev, __s32 *val)
return 0;
}
-int ov9650_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
+static int ov9650_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
u8 i2c_data;
@@ -646,13 +708,20 @@ int ov9650_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
PDEBUG(D_V4L2, "Set horizontal flip to %d", val);
sensor_settings[HFLIP_IDX] = val;
- i2c_data = ((val & 0x01) << 5) | (sensor_settings[VFLIP_IDX] << 4);
+
+ if (!dmi_check_system(ov9650_flip_dmi_table))
+ i2c_data = ((val & 0x01) << 5) |
+ (sensor_settings[VFLIP_IDX] << 4);
+ else
+ i2c_data = ((val & 0x01) << 5) |
+ (!sensor_settings[VFLIP_IDX] << 4);
+
err = m5602_write_sensor(sd, OV9650_MVFP, &i2c_data, 1);
return err;
}
-int ov9650_get_vflip(struct gspca_dev *gspca_dev, __s32 *val)
+static int ov9650_get_vflip(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
@@ -663,7 +732,7 @@ int ov9650_get_vflip(struct gspca_dev *gspca_dev, __s32 *val)
return 0;
}
-int ov9650_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
+static int ov9650_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
u8 i2c_data;
@@ -673,6 +742,9 @@ int ov9650_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
PDEBUG(D_V4L2, "Set vertical flip to %d", val);
sensor_settings[VFLIP_IDX] = val;
+ if (dmi_check_system(ov9650_flip_dmi_table))
+ val = !val;
+
i2c_data = ((val & 0x01) << 4) | (sensor_settings[VFLIP_IDX] << 5);
err = m5602_write_sensor(sd, OV9650_MVFP, &i2c_data, 1);
if (err < 0)
@@ -685,48 +757,38 @@ int ov9650_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
return err;
}
-int ov9650_get_brightness(struct gspca_dev *gspca_dev, __s32 *val)
+static int ov9650_get_auto_exposure(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
- *val = sensor_settings[GAIN_IDX];
- PDEBUG(D_V4L2, "Read gain %d", *val);
-
+ *val = sensor_settings[AUTO_EXPOSURE_IDX];
+ PDEBUG(D_V4L2, "Read auto exposure control %d", *val);
return 0;
}
-int ov9650_set_brightness(struct gspca_dev *gspca_dev, __s32 val)
+static int ov9650_set_auto_exposure(struct gspca_dev *gspca_dev,
+ __s32 val)
{
int err;
u8 i2c_data;
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
- PDEBUG(D_V4L2, "Set gain to %d", val);
-
- sensor_settings[GAIN_IDX] = val;
+ PDEBUG(D_V4L2, "Set auto exposure control to %d", val);
- /* Read the OV9650_VREF register first to avoid
- corrupting the VREF high and low bits */
- err = m5602_read_sensor(sd, OV9650_VREF, &i2c_data, 1);
- if (err < 0)
- return err;
-
- /* Mask away all uninteresting bits */
- i2c_data = ((val & 0x0300) >> 2) | (i2c_data & 0x3F);
- err = m5602_write_sensor(sd, OV9650_VREF, &i2c_data, 1);
+ sensor_settings[AUTO_EXPOSURE_IDX] = val;
+ err = m5602_read_sensor(sd, OV9650_COM8, &i2c_data, 1);
if (err < 0)
return err;
- /* The 8 LSBs */
- i2c_data = val & 0xff;
- err = m5602_write_sensor(sd, OV9650_GAIN, &i2c_data, 1);
+ i2c_data = ((i2c_data & 0xfe) | ((val & 0x01) << 0));
- return err;
+ return m5602_write_sensor(sd, OV9650_COM8, &i2c_data, 1);
}
-int ov9650_get_auto_white_balance(struct gspca_dev *gspca_dev, __s32 *val)
+static int ov9650_get_auto_white_balance(struct gspca_dev *gspca_dev,
+ __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
@@ -735,7 +797,8 @@ int ov9650_get_auto_white_balance(struct gspca_dev *gspca_dev, __s32 *val)
return 0;
}
-int ov9650_set_auto_white_balance(struct gspca_dev *gspca_dev, __s32 val)
+static int ov9650_set_auto_white_balance(struct gspca_dev *gspca_dev,
+ __s32 val)
{
int err;
u8 i2c_data;
@@ -755,7 +818,7 @@ int ov9650_set_auto_white_balance(struct gspca_dev *gspca_dev, __s32 val)
return err;
}
-int ov9650_get_auto_gain(struct gspca_dev *gspca_dev, __s32 *val)
+static int ov9650_get_auto_gain(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
@@ -765,7 +828,7 @@ int ov9650_get_auto_gain(struct gspca_dev *gspca_dev, __s32 *val)
return 0;
}
-int ov9650_set_auto_gain(struct gspca_dev *gspca_dev, __s32 val)
+static int ov9650_set_auto_gain(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
u8 i2c_data;
@@ -780,9 +843,8 @@ int ov9650_set_auto_gain(struct gspca_dev *gspca_dev, __s32 val)
return err;
i2c_data = ((i2c_data & 0xfb) | ((val & 0x01) << 2));
- err = m5602_write_sensor(sd, OV9650_COM8, &i2c_data, 1);
- return err;
+ return m5602_write_sensor(sd, OV9650_COM8, &i2c_data, 1);
}
static void ov9650_dump_registers(struct sd *sd)
diff --git a/drivers/media/video/gspca/m5602/m5602_ov9650.h b/drivers/media/video/gspca/m5602/m5602_ov9650.h
index fcc54e4c0f4..c98c40d69e0 100644
--- a/drivers/media/video/gspca/m5602/m5602_ov9650.h
+++ b/drivers/media/video/gspca/m5602/m5602_ov9650.h
@@ -120,6 +120,10 @@
#define OV9650_SOFT_SLEEP (1 << 4)
#define OV9650_OUTPUT_DRIVE_2X (1 << 0)
+#define OV9650_DENOISE_ENABLE (1 << 5)
+#define OV9650_WHITE_PIXEL_ENABLE (1 << 1)
+#define OV9650_WHITE_PIXEL_OPTION (1 << 0)
+
#define OV9650_LEFT_OFFSET 0x62
#define GAIN_DEFAULT 0x14
@@ -137,29 +141,9 @@ int ov9650_probe(struct sd *sd);
int ov9650_init(struct sd *sd);
int ov9650_start(struct sd *sd);
int ov9650_stop(struct sd *sd);
-int ov9650_power_down(struct sd *sd);
void ov9650_disconnect(struct sd *sd);
-int ov9650_set_exposure(struct gspca_dev *gspca_dev, __s32 val);
-int ov9650_get_exposure(struct gspca_dev *gspca_dev, __s32 *val);
-int ov9650_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
-int ov9650_set_gain(struct gspca_dev *gspca_dev, __s32 val);
-int ov9650_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val);
-int ov9650_set_red_balance(struct gspca_dev *gspca_dev, __s32 val);
-int ov9650_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val);
-int ov9650_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val);
-int ov9650_get_hflip(struct gspca_dev *gspca_dev, __s32 *val);
-int ov9650_set_hflip(struct gspca_dev *gspca_dev, __s32 val);
-int ov9650_get_vflip(struct gspca_dev *gspca_dev, __s32 *val);
-int ov9650_set_vflip(struct gspca_dev *gspca_dev, __s32 val);
-int ov9650_get_brightness(struct gspca_dev *gspca_dev, __s32 *val);
-int ov9650_set_brightness(struct gspca_dev *gspca_dev, __s32 val);
-int ov9650_get_auto_white_balance(struct gspca_dev *gspca_dev, __s32 *val);
-int ov9650_set_auto_white_balance(struct gspca_dev *gspca_dev, __s32 val);
-int ov9650_get_auto_gain(struct gspca_dev *gspca_dev, __s32 *val);
-int ov9650_set_auto_gain(struct gspca_dev *gspca_dev, __s32 val);
-
-const static struct m5602_sensor ov9650 = {
+static const struct m5602_sensor ov9650 = {
.name = "OV9650",
.i2c_slave_id = 0x60,
.i2c_regW = 1,
@@ -167,7 +151,6 @@ const static struct m5602_sensor ov9650 = {
.init = ov9650_init,
.start = ov9650_start,
.stop = ov9650_stop,
- .power_down = ov9650_power_down,
.disconnect = ov9650_disconnect,
};
@@ -219,7 +202,7 @@ static const unsigned char init_ov9650[][3] =
/* Reset chip */
{SENSOR, OV9650_COM7, OV9650_REGISTER_RESET},
/* One extra reset is needed in order to make the sensor behave
- properly when resuming from ram */
+ properly when resuming from ram, could be a timing issue */
{SENSOR, OV9650_COM7, OV9650_REGISTER_RESET},
/* Enable double clock */
@@ -229,8 +212,7 @@ static const unsigned char init_ov9650[][3] =
/* Set fast AGC/AEC algorithm with unlimited step size */
{SENSOR, OV9650_COM8, OV9650_FAST_AGC_AEC |
- OV9650_AEC_UNLIM_STEP_SIZE |
- OV9650_AWB_EN | OV9650_AGC_EN},
+ OV9650_AEC_UNLIM_STEP_SIZE},
{SENSOR, OV9650_CHLF, 0x10},
{SENSOR, OV9650_ARBLM, 0xbf},
@@ -301,8 +283,11 @@ static const unsigned char init_ov9650[][3] =
{SENSOR, OV9650_VREF, 0x10},
{SENSOR, OV9650_ADC, 0x04},
{SENSOR, OV9650_HV, 0x40},
+
/* Enable denoise, and white-pixel erase */
- {SENSOR, OV9650_COM22, 0x23},
+ {SENSOR, OV9650_COM22, OV9650_DENOISE_ENABLE |
+ OV9650_WHITE_PIXEL_ENABLE |
+ OV9650_WHITE_PIXEL_OPTION},
/* Enable VARIOPIXEL */
{SENSOR, OV9650_COM3, OV9650_VARIOPIXEL},
@@ -312,26 +297,6 @@ static const unsigned char init_ov9650[][3] =
{SENSOR, OV9650_COM2, OV9650_SOFT_SLEEP | OV9650_OUTPUT_DRIVE_2X},
};
-static const unsigned char power_down_ov9650[][3] =
-{
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x04},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
- {SENSOR, OV9650_COM7, 0x80},
- {SENSOR, OV9650_OFON, 0xf4},
- {SENSOR, OV9650_MVFP, 0x80},
- {SENSOR, OV9650_DBLV, 0x3f},
- {SENSOR, OV9650_RSVD36, 0x49},
- {SENSOR, OV9650_COM7, 0x05},
-
- {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x04},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
- {BRIDGE, M5602_XB_GPIO_EN_L, 0x06},
- {BRIDGE, M5602_XB_GPIO_DAT_H, 0x02},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x04},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
-};
-
static const unsigned char res_init_ov9650[][3] =
{
{SENSOR, OV9650_COM2, OV9650_OUTPUT_DRIVE_2X},
diff --git a/drivers/media/video/gspca/m5602/m5602_po1030.c b/drivers/media/video/gspca/m5602/m5602_po1030.c
index eaddf488bad..8d74d8065b7 100644
--- a/drivers/media/video/gspca/m5602/m5602_po1030.c
+++ b/drivers/media/video/gspca/m5602/m5602_po1030.c
@@ -18,6 +18,29 @@
#include "m5602_po1030.h"
+static int po1030_get_exposure(struct gspca_dev *gspca_dev, __s32 *val);
+static int po1030_set_exposure(struct gspca_dev *gspca_dev, __s32 val);
+static int po1030_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
+static int po1030_set_gain(struct gspca_dev *gspca_dev, __s32 val);
+static int po1030_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val);
+static int po1030_set_red_balance(struct gspca_dev *gspca_dev, __s32 val);
+static int po1030_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val);
+static int po1030_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val);
+static int po1030_get_green_balance(struct gspca_dev *gspca_dev, __s32 *val);
+static int po1030_set_green_balance(struct gspca_dev *gspca_dev, __s32 val);
+static int po1030_get_hflip(struct gspca_dev *gspca_dev, __s32 *val);
+static int po1030_set_hflip(struct gspca_dev *gspca_dev, __s32 val);
+static int po1030_get_vflip(struct gspca_dev *gspca_dev, __s32 *val);
+static int po1030_set_vflip(struct gspca_dev *gspca_dev, __s32 val);
+static int po1030_set_auto_white_balance(struct gspca_dev *gspca_dev,
+ __s32 val);
+static int po1030_get_auto_white_balance(struct gspca_dev *gspca_dev,
+ __s32 *val);
+static int po1030_set_auto_exposure(struct gspca_dev *gspca_dev,
+ __s32 val);
+static int po1030_get_auto_exposure(struct gspca_dev *gspca_dev,
+ __s32 *val);
+
static struct v4l2_pix_format po1030_modes[] = {
{
640,
@@ -27,11 +50,12 @@ static struct v4l2_pix_format po1030_modes[] = {
.sizeimage = 640 * 480,
.bytesperline = 640,
.colorspace = V4L2_COLORSPACE_SRGB,
- .priv = 0
+ .priv = 2
}
};
-const static struct ctrl po1030_ctrls[] = {
+static const struct ctrl po1030_ctrls[] = {
+#define GAIN_IDX 0
{
{
.id = V4L2_CID_GAIN,
@@ -45,7 +69,9 @@ const static struct ctrl po1030_ctrls[] = {
},
.set = po1030_set_gain,
.get = po1030_get_gain
- }, {
+ },
+#define EXPOSURE_IDX 1
+ {
{
.id = V4L2_CID_EXPOSURE,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -58,7 +84,9 @@ const static struct ctrl po1030_ctrls[] = {
},
.set = po1030_set_exposure,
.get = po1030_get_exposure
- }, {
+ },
+#define RED_BALANCE_IDX 2
+ {
{
.id = V4L2_CID_RED_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -71,7 +99,9 @@ const static struct ctrl po1030_ctrls[] = {
},
.set = po1030_set_red_balance,
.get = po1030_get_red_balance
- }, {
+ },
+#define BLUE_BALANCE_IDX 3
+ {
{
.id = V4L2_CID_BLUE_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -84,7 +114,9 @@ const static struct ctrl po1030_ctrls[] = {
},
.set = po1030_set_blue_balance,
.get = po1030_get_blue_balance
- }, {
+ },
+#define HFLIP_IDX 4
+ {
{
.id = V4L2_CID_HFLIP,
.type = V4L2_CTRL_TYPE_BOOLEAN,
@@ -96,7 +128,9 @@ const static struct ctrl po1030_ctrls[] = {
},
.set = po1030_set_hflip,
.get = po1030_get_hflip
- }, {
+ },
+#define VFLIP_IDX 5
+ {
{
.id = V4L2_CID_VFLIP,
.type = V4L2_CTRL_TYPE_BOOLEAN,
@@ -108,14 +142,58 @@ const static struct ctrl po1030_ctrls[] = {
},
.set = po1030_set_vflip,
.get = po1030_get_vflip
- }
+ },
+#define AUTO_WHITE_BALANCE_IDX 6
+ {
+ {
+ .id = V4L2_CID_AUTO_WHITE_BALANCE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "auto white balance",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ .set = po1030_set_auto_white_balance,
+ .get = po1030_get_auto_white_balance
+ },
+#define AUTO_EXPOSURE_IDX 7
+ {
+ {
+ .id = V4L2_CID_EXPOSURE_AUTO,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "auto exposure",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ .set = po1030_set_auto_exposure,
+ .get = po1030_get_auto_exposure
+ },
+#define GREEN_BALANCE_IDX 8
+ {
+ {
+ .id = M5602_V4L2_CID_GREEN_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "green balance",
+ .minimum = 0x00,
+ .maximum = 0xff,
+ .step = 0x1,
+ .default_value = PO1030_GREEN_GAIN_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER
+ },
+ .set = po1030_set_green_balance,
+ .get = po1030_get_green_balance
+ },
};
static void po1030_dump_registers(struct sd *sd);
int po1030_probe(struct sd *sd)
{
- u8 prod_id = 0, ver_id = 0, i;
+ u8 dev_id_h = 0, i;
+ s32 *sensor_settings;
if (force_sensor) {
if (force_sensor == PO1030_SENSOR) {
@@ -139,28 +217,36 @@ int po1030_probe(struct sd *sd)
m5602_write_bridge(sd, preinit_po1030[i][1], data);
}
- if (m5602_read_sensor(sd, 0x3, &prod_id, 1))
+ if (m5602_read_sensor(sd, PO1030_DEVID_H, &dev_id_h, 1))
return -ENODEV;
- if (m5602_read_sensor(sd, 0x4, &ver_id, 1))
- return -ENODEV;
-
- if ((prod_id == 0x02) && (ver_id == 0xef)) {
+ if (dev_id_h == 0x30) {
info("Detected a po1030 sensor");
goto sensor_found;
}
return -ENODEV;
sensor_found:
+ sensor_settings = kmalloc(
+ ARRAY_SIZE(po1030_ctrls) * sizeof(s32), GFP_KERNEL);
+ if (!sensor_settings)
+ return -ENOMEM;
+
sd->gspca_dev.cam.cam_mode = po1030_modes;
sd->gspca_dev.cam.nmodes = ARRAY_SIZE(po1030_modes);
sd->desc->ctrls = po1030_ctrls;
sd->desc->nctrls = ARRAY_SIZE(po1030_ctrls);
+
+ for (i = 0; i < ARRAY_SIZE(po1030_ctrls); i++)
+ sensor_settings[i] = po1030_ctrls[i].qctrl.default_value;
+ sd->sensor_priv = sensor_settings;
+
return 0;
}
int po1030_init(struct sd *sd)
{
+ s32 *sensor_settings = sd->sensor_priv;
int i, err = 0;
/* Init the sensor */
@@ -185,47 +271,206 @@ int po1030_init(struct sd *sd)
return -EINVAL;
}
}
+ if (err < 0)
+ return err;
if (dump_sensor)
po1030_dump_registers(sd);
+ err = po1030_set_exposure(&sd->gspca_dev,
+ sensor_settings[EXPOSURE_IDX]);
+ if (err < 0)
+ return err;
+
+ err = po1030_set_gain(&sd->gspca_dev, sensor_settings[GAIN_IDX]);
+ if (err < 0)
+ return err;
+
+ err = po1030_set_hflip(&sd->gspca_dev, sensor_settings[HFLIP_IDX]);
+ if (err < 0)
+ return err;
+
+ err = po1030_set_vflip(&sd->gspca_dev, sensor_settings[VFLIP_IDX]);
+ if (err < 0)
+ return err;
+
+ err = po1030_set_red_balance(&sd->gspca_dev,
+ sensor_settings[RED_BALANCE_IDX]);
+ if (err < 0)
+ return err;
+
+ err = po1030_set_blue_balance(&sd->gspca_dev,
+ sensor_settings[BLUE_BALANCE_IDX]);
+ if (err < 0)
+ return err;
+
+ err = po1030_set_green_balance(&sd->gspca_dev,
+ sensor_settings[GREEN_BALANCE_IDX]);
+ if (err < 0)
+ return err;
+
+ err = po1030_set_auto_white_balance(&sd->gspca_dev,
+ sensor_settings[AUTO_WHITE_BALANCE_IDX]);
+ if (err < 0)
+ return err;
+
+ err = po1030_set_auto_exposure(&sd->gspca_dev,
+ sensor_settings[AUTO_EXPOSURE_IDX]);
return err;
}
-int po1030_get_exposure(struct gspca_dev *gspca_dev, __s32 *val)
+int po1030_start(struct sd *sd)
{
- struct sd *sd = (struct sd *) gspca_dev;
- u8 i2c_data;
- int err;
+ struct cam *cam = &sd->gspca_dev.cam;
+ int i, err = 0;
+ int width = cam->cam_mode[sd->gspca_dev.curr_mode].width;
+ int height = cam->cam_mode[sd->gspca_dev.curr_mode].height;
+ int ver_offs = cam->cam_mode[sd->gspca_dev.curr_mode].priv;
+ u8 data;
+
+ switch (width) {
+ case 320:
+ data = PO1030_SUBSAMPLING;
+ err = m5602_write_sensor(sd, PO1030_CONTROL3, &data, 1);
+ if (err < 0)
+ return err;
+
+ data = ((width + 3) >> 8) & 0xff;
+ err = m5602_write_sensor(sd, PO1030_WINDOWWIDTH_H, &data, 1);
+ if (err < 0)
+ return err;
+
+ data = (width + 3) & 0xff;
+ err = m5602_write_sensor(sd, PO1030_WINDOWWIDTH_L, &data, 1);
+ if (err < 0)
+ return err;
+
+ data = ((height + 1) >> 8) & 0xff;
+ err = m5602_write_sensor(sd, PO1030_WINDOWHEIGHT_H, &data, 1);
+ if (err < 0)
+ return err;
+
+ data = (height + 1) & 0xff;
+ err = m5602_write_sensor(sd, PO1030_WINDOWHEIGHT_L, &data, 1);
+
+ height += 6;
+ width -= 1;
+ break;
+
+ case 640:
+ data = 0;
+ err = m5602_write_sensor(sd, PO1030_CONTROL3, &data, 1);
+ if (err < 0)
+ return err;
+
+ data = ((width + 7) >> 8) & 0xff;
+ err = m5602_write_sensor(sd, PO1030_WINDOWWIDTH_H, &data, 1);
+ if (err < 0)
+ return err;
+
+ data = (width + 7) & 0xff;
+ err = m5602_write_sensor(sd, PO1030_WINDOWWIDTH_L, &data, 1);
+ if (err < 0)
+ return err;
+
+ data = ((height + 3) >> 8) & 0xff;
+ err = m5602_write_sensor(sd, PO1030_WINDOWHEIGHT_H, &data, 1);
+ if (err < 0)
+ return err;
+
+ data = (height + 3) & 0xff;
+ err = m5602_write_sensor(sd, PO1030_WINDOWHEIGHT_L, &data, 1);
+
+ height += 12;
+ width -= 2;
+ break;
+ }
+ err = m5602_write_bridge(sd, M5602_XB_SENSOR_TYPE, 0x0c);
+ if (err < 0)
+ return err;
- err = m5602_read_sensor(sd, PO1030_REG_INTEGLINES_H,
- &i2c_data, 1);
+ err = m5602_write_bridge(sd, M5602_XB_LINE_OF_FRAME_H, 0x81);
if (err < 0)
return err;
- *val = (i2c_data << 8);
- err = m5602_read_sensor(sd, PO1030_REG_INTEGLINES_M,
- &i2c_data, 1);
- *val |= i2c_data;
+ err = m5602_write_bridge(sd, M5602_XB_PIX_OF_LINE_H, 0x82);
+ if (err < 0)
+ return err;
- PDEBUG(D_V4L2, "Exposure read as %d", *val);
+ err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 0x01);
+ if (err < 0)
+ return err;
+
+ err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA,
+ ((ver_offs >> 8) & 0xff));
+ if (err < 0)
+ return err;
+
+ err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, (ver_offs & 0xff));
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < 2 && !err; i++)
+ err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, 0);
+ if (err < 0)
+ return err;
+
+ err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, (height >> 8) & 0xff);
+ if (err < 0)
+ return err;
+
+ err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, (height & 0xff));
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < 2 && !err; i++)
+ err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, 0);
+
+ for (i = 0; i < 2 && !err; i++)
+ err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 0);
+
+ for (i = 0; i < 2 && !err; i++)
+ err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA, 0);
+ if (err < 0)
+ return err;
+
+ err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA, (width >> 8) & 0xff);
+ if (err < 0)
+ return err;
+ err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA, (width & 0xff));
+ if (err < 0)
+ return err;
+
+ err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 0);
return err;
}
-int po1030_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
+static int po1030_get_exposure(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ *val = sensor_settings[EXPOSURE_IDX];
+ PDEBUG(D_V4L2, "Exposure read as %d", *val);
+ return 0;
+}
+
+static int po1030_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
u8 i2c_data;
int err;
+ sensor_settings[EXPOSURE_IDX] = val;
PDEBUG(D_V4L2, "Set exposure to %d", val & 0xffff);
i2c_data = ((val & 0xff00) >> 8);
PDEBUG(D_V4L2, "Set exposure to high byte to 0x%x",
i2c_data);
- err = m5602_write_sensor(sd, PO1030_REG_INTEGLINES_H,
+ err = m5602_write_sensor(sd, PO1030_INTEGLINES_H,
&i2c_data, 1);
if (err < 0)
return err;
@@ -233,167 +478,256 @@ int po1030_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
i2c_data = (val & 0xff);
PDEBUG(D_V4L2, "Set exposure to low byte to 0x%x",
i2c_data);
- err = m5602_write_sensor(sd, PO1030_REG_INTEGLINES_M,
+ err = m5602_write_sensor(sd, PO1030_INTEGLINES_M,
&i2c_data, 1);
return err;
}
-int po1030_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
+static int po1030_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
- u8 i2c_data;
- int err;
+ s32 *sensor_settings = sd->sensor_priv;
- err = m5602_read_sensor(sd, PO1030_REG_GLOBALGAIN,
- &i2c_data, 1);
- *val = i2c_data;
+ *val = sensor_settings[GAIN_IDX];
PDEBUG(D_V4L2, "Read global gain %d", *val);
-
- return err;
+ return 0;
}
-int po1030_get_hflip(struct gspca_dev *gspca_dev, __s32 *val)
+static int po1030_set_gain(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
u8 i2c_data;
int err;
- err = m5602_read_sensor(sd, PO1030_REG_CONTROL2,
+ sensor_settings[GAIN_IDX] = val;
+
+ i2c_data = val & 0xff;
+ PDEBUG(D_V4L2, "Set global gain to %d", i2c_data);
+ err = m5602_write_sensor(sd, PO1030_GLOBALGAIN,
&i2c_data, 1);
+ return err;
+}
- *val = (i2c_data >> 7) & 0x01 ;
+static int po1030_get_hflip(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+ *val = sensor_settings[HFLIP_IDX];
PDEBUG(D_V4L2, "Read hflip %d", *val);
- return err;
+ return 0;
}
-int po1030_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
+static int po1030_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
u8 i2c_data;
int err;
+ sensor_settings[HFLIP_IDX] = val;
+
PDEBUG(D_V4L2, "Set hflip %d", val);
- err = m5602_read_sensor(sd, PO1030_REG_CONTROL2, &i2c_data, 1);
+ err = m5602_read_sensor(sd, PO1030_CONTROL2, &i2c_data, 1);
if (err < 0)
return err;
i2c_data = (0x7f & i2c_data) | ((val & 0x01) << 7);
- err = m5602_write_sensor(sd, PO1030_REG_CONTROL2,
+ err = m5602_write_sensor(sd, PO1030_CONTROL2,
&i2c_data, 1);
return err;
}
-int po1030_get_vflip(struct gspca_dev *gspca_dev, __s32 *val)
+static int po1030_get_vflip(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
- u8 i2c_data;
- int err;
-
- err = m5602_read_sensor(sd, PO1030_REG_GLOBALGAIN,
- &i2c_data, 1);
-
- *val = (i2c_data >> 6) & 0x01;
+ s32 *sensor_settings = sd->sensor_priv;
+ *val = sensor_settings[VFLIP_IDX];
PDEBUG(D_V4L2, "Read vflip %d", *val);
- return err;
+ return 0;
}
-int po1030_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
+static int po1030_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
u8 i2c_data;
int err;
+ sensor_settings[VFLIP_IDX] = val;
+
PDEBUG(D_V4L2, "Set vflip %d", val);
- err = m5602_read_sensor(sd, PO1030_REG_CONTROL2, &i2c_data, 1);
+ err = m5602_read_sensor(sd, PO1030_CONTROL2, &i2c_data, 1);
if (err < 0)
return err;
i2c_data = (i2c_data & 0xbf) | ((val & 0x01) << 6);
- err = m5602_write_sensor(sd, PO1030_REG_CONTROL2,
+ err = m5602_write_sensor(sd, PO1030_CONTROL2,
&i2c_data, 1);
return err;
}
-int po1030_set_gain(struct gspca_dev *gspca_dev, __s32 val)
+static int po1030_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ *val = sensor_settings[RED_BALANCE_IDX];
+ PDEBUG(D_V4L2, "Read red gain %d", *val);
+ return 0;
+}
+
+static int po1030_set_red_balance(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
u8 i2c_data;
int err;
+ sensor_settings[RED_BALANCE_IDX] = val;
+
i2c_data = val & 0xff;
- PDEBUG(D_V4L2, "Set global gain to %d", i2c_data);
- err = m5602_write_sensor(sd, PO1030_REG_GLOBALGAIN,
+ PDEBUG(D_V4L2, "Set red gain to %d", i2c_data);
+ err = m5602_write_sensor(sd, PO1030_RED_GAIN,
&i2c_data, 1);
return err;
}
-int po1030_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val)
+static int po1030_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
- u8 i2c_data;
- int err;
+ s32 *sensor_settings = sd->sensor_priv;
- err = m5602_read_sensor(sd, PO1030_REG_RED_GAIN,
- &i2c_data, 1);
- *val = i2c_data;
- PDEBUG(D_V4L2, "Read red gain %d", *val);
- return err;
+ *val = sensor_settings[BLUE_BALANCE_IDX];
+ PDEBUG(D_V4L2, "Read blue gain %d", *val);
+
+ return 0;
}
-int po1030_set_red_balance(struct gspca_dev *gspca_dev, __s32 val)
+static int po1030_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
u8 i2c_data;
int err;
+ sensor_settings[BLUE_BALANCE_IDX] = val;
+
i2c_data = val & 0xff;
- PDEBUG(D_V4L2, "Set red gain to %d", i2c_data);
- err = m5602_write_sensor(sd, PO1030_REG_RED_GAIN,
+ PDEBUG(D_V4L2, "Set blue gain to %d", i2c_data);
+ err = m5602_write_sensor(sd, PO1030_BLUE_GAIN,
&i2c_data, 1);
+
return err;
}
-int po1030_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val)
+static int po1030_get_green_balance(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ *val = sensor_settings[GREEN_BALANCE_IDX];
+ PDEBUG(D_V4L2, "Read green gain %d", *val);
+
+ return 0;
+}
+
+static int po1030_set_green_balance(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
u8 i2c_data;
int err;
- err = m5602_read_sensor(sd, PO1030_REG_BLUE_GAIN,
+ sensor_settings[GREEN_BALANCE_IDX] = val;
+ i2c_data = val & 0xff;
+ PDEBUG(D_V4L2, "Set green gain to %d", i2c_data);
+
+ err = m5602_write_sensor(sd, PO1030_GREEN_1_GAIN,
+ &i2c_data, 1);
+ if (err < 0)
+ return err;
+
+ return m5602_write_sensor(sd, PO1030_GREEN_2_GAIN,
&i2c_data, 1);
- *val = i2c_data;
- PDEBUG(D_V4L2, "Read blue gain %d", *val);
+}
- return err;
+static int po1030_get_auto_white_balance(struct gspca_dev *gspca_dev,
+ __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ *val = sensor_settings[AUTO_WHITE_BALANCE_IDX];
+ PDEBUG(D_V4L2, "Auto white balancing is %d", *val);
+
+ return 0;
}
-int po1030_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val)
+static int po1030_set_auto_white_balance(struct gspca_dev *gspca_dev,
+ __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
u8 i2c_data;
int err;
- i2c_data = val & 0xff;
- PDEBUG(D_V4L2, "Set blue gain to %d", i2c_data);
- err = m5602_write_sensor(sd, PO1030_REG_BLUE_GAIN,
- &i2c_data, 1);
+ sensor_settings[AUTO_WHITE_BALANCE_IDX] = val;
+
+ err = m5602_read_sensor(sd, PO1030_AUTOCTRL1, &i2c_data, 1);
+ if (err < 0)
+ return err;
+
+ PDEBUG(D_V4L2, "Set auto white balance to %d", val);
+ i2c_data = (i2c_data & 0xfe) | (val & 0x01);
+ err = m5602_write_sensor(sd, PO1030_AUTOCTRL1, &i2c_data, 1);
return err;
}
-int po1030_power_down(struct sd *sd)
+static int po1030_get_auto_exposure(struct gspca_dev *gspca_dev,
+ __s32 *val)
{
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ *val = sensor_settings[AUTO_EXPOSURE_IDX];
+ PDEBUG(D_V4L2, "Auto exposure is %d", *val);
return 0;
}
+static int po1030_set_auto_exposure(struct gspca_dev *gspca_dev,
+ __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+ u8 i2c_data;
+ int err;
+
+ sensor_settings[AUTO_EXPOSURE_IDX] = val;
+ err = m5602_read_sensor(sd, PO1030_AUTOCTRL1, &i2c_data, 1);
+ if (err < 0)
+ return err;
+
+ PDEBUG(D_V4L2, "Set auto exposure to %d", val);
+ i2c_data = (i2c_data & 0xfd) | ((val & 0x01) << 1);
+ return m5602_write_sensor(sd, PO1030_AUTOCTRL1, &i2c_data, 1);
+}
+
+void po1030_disconnect(struct sd *sd)
+{
+ sd->sensor = NULL;
+ kfree(sd->sensor_priv);
+}
+
static void po1030_dump_registers(struct sd *sd)
{
int address;
diff --git a/drivers/media/video/gspca/m5602/m5602_po1030.h b/drivers/media/video/gspca/m5602/m5602_po1030.h
index c10b1233581..1ea380b2bbe 100644
--- a/drivers/media/video/gspca/m5602/m5602_po1030.h
+++ b/drivers/media/video/gspca/m5602/m5602_po1030.h
@@ -25,98 +25,123 @@
/*****************************************************************************/
-#define PO1030_REG_DEVID_H 0x00
-#define PO1030_REG_DEVID_L 0x01
-#define PO1030_REG_FRAMEWIDTH_H 0x04
-#define PO1030_REG_FRAMEWIDTH_L 0x05
-#define PO1030_REG_FRAMEHEIGHT_H 0x06
-#define PO1030_REG_FRAMEHEIGHT_L 0x07
-#define PO1030_REG_WINDOWX_H 0x08
-#define PO1030_REG_WINDOWX_L 0x09
-#define PO1030_REG_WINDOWY_H 0x0a
-#define PO1030_REG_WINDOWY_L 0x0b
-#define PO1030_REG_WINDOWWIDTH_H 0x0c
-#define PO1030_REG_WINDOWWIDTH_L 0x0d
-#define PO1030_REG_WINDOWHEIGHT_H 0x0e
-#define PO1030_REG_WINDOWHEIGHT_L 0x0f
-
-#define PO1030_REG_GLOBALIBIAS 0x12
-#define PO1030_REG_PIXELIBIAS 0x13
-
-#define PO1030_REG_GLOBALGAIN 0x15
-#define PO1030_REG_RED_GAIN 0x16
-#define PO1030_REG_GREEN_1_GAIN 0x17
-#define PO1030_REG_BLUE_GAIN 0x18
-#define PO1030_REG_GREEN_2_GAIN 0x19
-
-#define PO1030_REG_INTEGLINES_H 0x1a
-#define PO1030_REG_INTEGLINES_M 0x1b
-#define PO1030_REG_INTEGLINES_L 0x1c
-
-#define PO1030_REG_CONTROL1 0x1d
-#define PO1030_REG_CONTROL2 0x1e
-#define PO1030_REG_CONTROL3 0x1f
-#define PO1030_REG_CONTROL4 0x20
-
-#define PO1030_REG_PERIOD50_H 0x23
-#define PO1030_REG_PERIOD50_L 0x24
-#define PO1030_REG_PERIOD60_H 0x25
-#define PO1030_REG_PERIOD60_L 0x26
-#define PO1030_REG_REGCLK167 0x27
-#define PO1030_REG_DELTA50 0x28
-#define PO1030_REG_DELTA60 0x29
-
-#define PO1030_REG_ADCOFFSET 0x2c
+#define PO1030_DEVID_H 0x00
+#define PO1030_DEVID_L 0x01
+#define PO1030_FRAMEWIDTH_H 0x04
+#define PO1030_FRAMEWIDTH_L 0x05
+#define PO1030_FRAMEHEIGHT_H 0x06
+#define PO1030_FRAMEHEIGHT_L 0x07
+#define PO1030_WINDOWX_H 0x08
+#define PO1030_WINDOWX_L 0x09
+#define PO1030_WINDOWY_H 0x0a
+#define PO1030_WINDOWY_L 0x0b
+#define PO1030_WINDOWWIDTH_H 0x0c
+#define PO1030_WINDOWWIDTH_L 0x0d
+#define PO1030_WINDOWHEIGHT_H 0x0e
+#define PO1030_WINDOWHEIGHT_L 0x0f
+
+#define PO1030_GLOBALIBIAS 0x12
+#define PO1030_PIXELIBIAS 0x13
+
+#define PO1030_GLOBALGAIN 0x15
+#define PO1030_RED_GAIN 0x16
+#define PO1030_GREEN_1_GAIN 0x17
+#define PO1030_BLUE_GAIN 0x18
+#define PO1030_GREEN_2_GAIN 0x19
+
+#define PO1030_INTEGLINES_H 0x1a
+#define PO1030_INTEGLINES_M 0x1b
+#define PO1030_INTEGLINES_L 0x1c
+
+#define PO1030_CONTROL1 0x1d
+#define PO1030_CONTROL2 0x1e
+#define PO1030_CONTROL3 0x1f
+#define PO1030_CONTROL4 0x20
+
+#define PO1030_PERIOD50_H 0x23
+#define PO1030_PERIOD50_L 0x24
+#define PO1030_PERIOD60_H 0x25
+#define PO1030_PERIOD60_L 0x26
+#define PO1030_REGCLK167 0x27
+#define PO1030_FLICKER_DELTA50 0x28
+#define PO1030_FLICKERDELTA60 0x29
+
+#define PO1030_ADCOFFSET 0x2c
/* Gamma Correction Coeffs */
-#define PO1030_REG_GC0 0x2d
-#define PO1030_REG_GC1 0x2e
-#define PO1030_REG_GC2 0x2f
-#define PO1030_REG_GC3 0x30
-#define PO1030_REG_GC4 0x31
-#define PO1030_REG_GC5 0x32
-#define PO1030_REG_GC6 0x33
-#define PO1030_REG_GC7 0x34
+#define PO1030_GC0 0x2d
+#define PO1030_GC1 0x2e
+#define PO1030_GC2 0x2f
+#define PO1030_GC3 0x30
+#define PO1030_GC4 0x31
+#define PO1030_GC5 0x32
+#define PO1030_GC6 0x33
+#define PO1030_GC7 0x34
/* Color Transform Matrix */
-#define PO1030_REG_CT0 0x35
-#define PO1030_REG_CT1 0x36
-#define PO1030_REG_CT2 0x37
-#define PO1030_REG_CT3 0x38
-#define PO1030_REG_CT4 0x39
-#define PO1030_REG_CT5 0x3a
-#define PO1030_REG_CT6 0x3b
-#define PO1030_REG_CT7 0x3c
-#define PO1030_REG_CT8 0x3d
-
-#define PO1030_REG_AUTOCTRL1 0x3e
-#define PO1030_REG_AUTOCTRL2 0x3f
-
-#define PO1030_REG_YTARGET 0x40
-#define PO1030_REG_GLOBALGAINMIN 0x41
-#define PO1030_REG_GLOBALGAINMAX 0x42
+#define PO1030_CT0 0x35
+#define PO1030_CT1 0x36
+#define PO1030_CT2 0x37
+#define PO1030_CT3 0x38
+#define PO1030_CT4 0x39
+#define PO1030_CT5 0x3a
+#define PO1030_CT6 0x3b
+#define PO1030_CT7 0x3c
+#define PO1030_CT8 0x3d
+
+#define PO1030_AUTOCTRL1 0x3e
+#define PO1030_AUTOCTRL2 0x3f
+
+#define PO1030_YTARGET 0x40
+#define PO1030_GLOBALGAINMIN 0x41
+#define PO1030_GLOBALGAINMAX 0x42
+
+#define PO1030_AWB_RED_TUNING 0x47
+#define PO1030_AWB_BLUE_TUNING 0x48
/* Output format control */
-#define PO1030_REG_OUTFORMCTRL1 0x5a
-#define PO1030_REG_OUTFORMCTRL2 0x5b
-#define PO1030_REG_OUTFORMCTRL3 0x5c
-#define PO1030_REG_OUTFORMCTRL4 0x5d
-#define PO1030_REG_OUTFORMCTRL5 0x5e
+#define PO1030_OUTFORMCTRL1 0x5a
+#define PO1030_OUTFORMCTRL2 0x5b
+#define PO1030_OUTFORMCTRL3 0x5c
+#define PO1030_OUTFORMCTRL4 0x5d
+#define PO1030_OUTFORMCTRL5 0x5e
-/* Imaging coefficients */
-#define PO1030_REG_YBRIGHT 0x73
-#define PO1030_REG_YCONTRAST 0x74
-#define PO1030_REG_YSATURATION 0x75
+#define PO1030_EDGE_ENH_OFF 0x5f
+#define PO1030_EGA 0x60
-#define PO1030_HFLIP (1 << 7)
-#define PO1030_VFLIP (1 << 6)
+#define PO1030_Cb_U_GAIN 0x63
+#define PO1030_Cr_V_GAIN 0x64
+
+#define PO1030_YCONTRAST 0x74
+#define PO1030_YSATURATION 0x75
+
+#define PO1030_HFLIP (1 << 7)
+#define PO1030_VFLIP (1 << 6)
+
+#define PO1030_HREF_ENABLE (1 << 6)
+
+#define PO1030_RAW_RGB_BAYER 0x4
+
+#define PO1030_FRAME_EQUAL (1 << 3)
+#define PO1030_AUTO_SUBSAMPLING (1 << 4)
+
+#define PO1030_WEIGHT_WIN_2X (1 << 3)
+
+#define PO1030_SHUTTER_MODE (1 << 6)
+#define PO1030_AUTO_SUBSAMPLING (1 << 4)
+#define PO1030_FRAME_EQUAL (1 << 3)
+
+#define PO1030_SENSOR_RESET (1 << 5)
+
+#define PO1030_SUBSAMPLING (1 << 6)
/*****************************************************************************/
#define PO1030_GLOBAL_GAIN_DEFAULT 0x12
#define PO1030_EXPOSURE_DEFAULT 0x0085
-#define PO1030_BLUE_GAIN_DEFAULT 0x40
-#define PO1030_RED_GAIN_DEFAULT 0x40
+#define PO1030_BLUE_GAIN_DEFAULT 0x36
+#define PO1030_RED_GAIN_DEFAULT 0x36
+#define PO1030_GREEN_GAIN_DEFAULT 0x40
/*****************************************************************************/
@@ -126,20 +151,8 @@ extern int dump_sensor;
int po1030_probe(struct sd *sd);
int po1030_init(struct sd *sd);
-int po1030_power_down(struct sd *sd);
-
-int po1030_get_exposure(struct gspca_dev *gspca_dev, __s32 *val);
-int po1030_set_exposure(struct gspca_dev *gspca_dev, __s32 val);
-int po1030_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
-int po1030_set_gain(struct gspca_dev *gspca_dev, __s32 val);
-int po1030_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val);
-int po1030_set_red_balance(struct gspca_dev *gspca_dev, __s32 val);
-int po1030_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val);
-int po1030_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val);
-int po1030_get_hflip(struct gspca_dev *gspca_dev, __s32 *val);
-int po1030_set_hflip(struct gspca_dev *gspca_dev, __s32 val);
-int po1030_get_vflip(struct gspca_dev *gspca_dev, __s32 *val);
-int po1030_set_vflip(struct gspca_dev *gspca_dev, __s32 val);
+int po1030_start(struct sd *sd);
+void po1030_disconnect(struct sd *sd);
static const struct m5602_sensor po1030 = {
.name = "PO1030",
@@ -149,7 +162,8 @@ static const struct m5602_sensor po1030 = {
.probe = po1030_probe,
.init = po1030_init,
- .power_down = po1030_power_down,
+ .start = po1030_start,
+ .disconnect = po1030_disconnect,
};
static const unsigned char preinit_po1030[][3] =
@@ -159,248 +173,103 @@ static const unsigned char preinit_po1030[][3] =
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
{BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0d},
{BRIDGE, M5602_XB_SENSOR_CTRL, 0x00},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
{BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
-
- {SENSOR, PO1030_REG_AUTOCTRL2, 0x24},
-
- {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x04},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
- {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06},
- {BRIDGE, M5602_XB_GPIO_DAT_H, 0x02},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x04},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
{BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
- {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81},
- {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82},
- {BRIDGE, M5602_XB_SIG_INI, 0x01},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x02},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x01},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0xec},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x02},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x02},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x87},
- {BRIDGE, M5602_XB_SIG_INI, 0x00},
-
- {SENSOR, PO1030_REG_AUTOCTRL2, 0x24},
-
{BRIDGE, M5602_XB_GPIO_DIR, 0x05},
{BRIDGE, M5602_XB_GPIO_DAT, 0x04},
{BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
{BRIDGE, M5602_XB_GPIO_DIR_H, 0x06},
{BRIDGE, M5602_XB_GPIO_DAT_H, 0x02},
+
+ {SENSOR, PO1030_AUTOCTRL2, PO1030_SENSOR_RESET | (1 << 2)},
+
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x04},
{BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
{BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
{BRIDGE, M5602_XB_GPIO_DIR, 0x05},
{BRIDGE, M5602_XB_GPIO_DAT, 0x00}
};
-static const unsigned char init_po1030[][4] =
+static const unsigned char init_po1030[][3] =
{
{BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02},
{BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0},
- /*sequence 1*/
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
{BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0d},
-
{BRIDGE, M5602_XB_SENSOR_CTRL, 0x00},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
{BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
- /*end of sequence 1*/
-
- /*sequence 2 (same as stop sequence)*/
- {SENSOR, PO1030_REG_AUTOCTRL2, 0x24},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x04},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
- {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06},
- {BRIDGE, M5602_XB_GPIO_DAT_H, 0x02},
-
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x04},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
- /*end of sequence 2*/
- /*sequence 5*/
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
- {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81},
- {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82},
- {BRIDGE, M5602_XB_SIG_INI, 0x01},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x02},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x01},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0xec},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x02},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x02},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x87},
- {BRIDGE, M5602_XB_SIG_INI, 0x00},
- /*end of sequence 5*/
-
- /*sequence 2 stop */
- {SENSOR, PO1030_REG_AUTOCTRL2, 0x24},
+ {SENSOR, PO1030_AUTOCTRL2, PO1030_SENSOR_RESET | (1 << 2)},
{BRIDGE, M5602_XB_GPIO_DIR, 0x05},
{BRIDGE, M5602_XB_GPIO_DAT, 0x04},
{BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
+ {BRIDGE, M5602_XB_GPIO_EN_L, 0x00},
{BRIDGE, M5602_XB_GPIO_DIR_H, 0x06},
{BRIDGE, M5602_XB_GPIO_DAT_H, 0x02},
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x04},
{BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
- /*end of sequence 2 stop */
-
-/* ---------------------------------
- * end of init - begin of start
- * --------------------------------- */
-
- /*sequence 3*/
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
- /*end of sequence 3*/
- /*sequence 4*/
{BRIDGE, M5602_XB_GPIO_DIR, 0x05},
{BRIDGE, M5602_XB_GPIO_DAT, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
- {BRIDGE, M5602_XB_GPIO_EN_L, 0x00},
- {SENSOR, PO1030_REG_AUTOCTRL2, 0x04},
+ {SENSOR, PO1030_AUTOCTRL2, 0x04},
+
+ {SENSOR, PO1030_OUTFORMCTRL2, PO1030_RAW_RGB_BAYER},
+ {SENSOR, PO1030_AUTOCTRL1, PO1030_WEIGHT_WIN_2X},
+
+ {SENSOR, PO1030_CONTROL2, 0x03},
+ {SENSOR, 0x21, 0x90},
+ {SENSOR, PO1030_YTARGET, 0x60},
+ {SENSOR, 0x59, 0x13},
+ {SENSOR, PO1030_OUTFORMCTRL1, PO1030_HREF_ENABLE},
+ {SENSOR, PO1030_EDGE_ENH_OFF, 0x00},
+ {SENSOR, PO1030_EGA, 0x80},
+ {SENSOR, 0x78, 0x14},
+ {SENSOR, 0x6f, 0x01},
+ {SENSOR, PO1030_GLOBALGAINMAX, 0x14},
+ {SENSOR, PO1030_Cb_U_GAIN, 0x38},
+ {SENSOR, PO1030_Cr_V_GAIN, 0x38},
+ {SENSOR, PO1030_CONTROL1, PO1030_SHUTTER_MODE |
+ PO1030_AUTO_SUBSAMPLING |
+ PO1030_FRAME_EQUAL},
+ {SENSOR, PO1030_GC0, 0x10},
+ {SENSOR, PO1030_GC1, 0x20},
+ {SENSOR, PO1030_GC2, 0x40},
+ {SENSOR, PO1030_GC3, 0x60},
+ {SENSOR, PO1030_GC4, 0x80},
+ {SENSOR, PO1030_GC5, 0xa0},
+ {SENSOR, PO1030_GC6, 0xc0},
+ {SENSOR, PO1030_GC7, 0xff},
/* Set the width to 751 */
- {SENSOR, PO1030_REG_FRAMEWIDTH_H, 0x02},
- {SENSOR, PO1030_REG_FRAMEWIDTH_L, 0xef},
+ {SENSOR, PO1030_FRAMEWIDTH_H, 0x02},
+ {SENSOR, PO1030_FRAMEWIDTH_L, 0xef},
/* Set the height to 540 */
- {SENSOR, PO1030_REG_FRAMEHEIGHT_H, 0x02},
- {SENSOR, PO1030_REG_FRAMEHEIGHT_L, 0x1c},
+ {SENSOR, PO1030_FRAMEHEIGHT_H, 0x02},
+ {SENSOR, PO1030_FRAMEHEIGHT_L, 0x1c},
/* Set the x window to 1 */
- {SENSOR, PO1030_REG_WINDOWX_H, 0x00},
- {SENSOR, PO1030_REG_WINDOWX_L, 0x01},
+ {SENSOR, PO1030_WINDOWX_H, 0x00},
+ {SENSOR, PO1030_WINDOWX_L, 0x01},
/* Set the y window to 1 */
- {SENSOR, PO1030_REG_WINDOWY_H, 0x00},
- {SENSOR, PO1030_REG_WINDOWY_L, 0x01},
-
- {SENSOR, PO1030_REG_WINDOWWIDTH_H, 0x02},
- {SENSOR, PO1030_REG_WINDOWWIDTH_L, 0x87},
- {SENSOR, PO1030_REG_WINDOWHEIGHT_H, 0x01},
- {SENSOR, PO1030_REG_WINDOWHEIGHT_L, 0xe3},
-
- {SENSOR, PO1030_REG_OUTFORMCTRL2, 0x04},
- {SENSOR, PO1030_REG_OUTFORMCTRL2, 0x04},
- {SENSOR, PO1030_REG_AUTOCTRL1, 0x08},
- {SENSOR, PO1030_REG_CONTROL2, 0x03},
- {SENSOR, 0x21, 0x90},
- {SENSOR, PO1030_REG_YTARGET, 0x60},
- {SENSOR, 0x59, 0x13},
- {SENSOR, PO1030_REG_OUTFORMCTRL1, 0x40},
- {SENSOR, 0x5f, 0x00},
- {SENSOR, 0x60, 0x80},
- {SENSOR, 0x78, 0x14},
- {SENSOR, 0x6f, 0x01},
- {SENSOR, PO1030_REG_CONTROL1, 0x18},
- {SENSOR, PO1030_REG_GLOBALGAINMAX, 0x14},
- {SENSOR, 0x63, 0x38},
- {SENSOR, 0x64, 0x38},
- {SENSOR, PO1030_REG_CONTROL1, 0x58},
- {SENSOR, PO1030_REG_RED_GAIN, 0x30},
- {SENSOR, PO1030_REG_GREEN_1_GAIN, 0x30},
- {SENSOR, PO1030_REG_BLUE_GAIN, 0x30},
- {SENSOR, PO1030_REG_GREEN_2_GAIN, 0x30},
- {SENSOR, PO1030_REG_GC0, 0x10},
- {SENSOR, PO1030_REG_GC1, 0x20},
- {SENSOR, PO1030_REG_GC2, 0x40},
- {SENSOR, PO1030_REG_GC3, 0x60},
- {SENSOR, PO1030_REG_GC4, 0x80},
- {SENSOR, PO1030_REG_GC5, 0xa0},
- {SENSOR, PO1030_REG_GC6, 0xc0},
- {SENSOR, PO1030_REG_GC7, 0xff},
- /*end of sequence 4*/
- /*sequence 5*/
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
- {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81},
- {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82},
- {BRIDGE, M5602_XB_SIG_INI, 0x01},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x02},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x01},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0xec},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x02},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x7e},
- {BRIDGE, M5602_XB_SIG_INI, 0x00},
- /*end of sequence 5*/
-
- /*sequence 6*/
- /* Changing 40 in f0 the image becomes green in bayer mode and red in
- * rgb mode */
- {SENSOR, PO1030_REG_RED_GAIN, PO1030_RED_GAIN_DEFAULT},
- /* in changing 40 in f0 the image becomes green in bayer mode and red in
- * rgb mode */
- {SENSOR, PO1030_REG_BLUE_GAIN, PO1030_BLUE_GAIN_DEFAULT},
+ {SENSOR, PO1030_WINDOWY_H, 0x00},
+ {SENSOR, PO1030_WINDOWY_L, 0x01},
/* with a very low lighted environment increase the exposure but
* decrease the FPS (Frame Per Second) */
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
- /* Controls high exposure more than SENSOR_LOW_EXPOSURE, use only in
- * low lighted environment (f0 is more than ff ?)*/
- {SENSOR, PO1030_REG_INTEGLINES_H, ((PO1030_EXPOSURE_DEFAULT >> 2)
- & 0xff)},
-
- /* Controls middle exposure, use only in high lighted environment */
- {SENSOR, PO1030_REG_INTEGLINES_M, PO1030_EXPOSURE_DEFAULT & 0xff},
-
- /* Controls clarity (not sure) */
- {SENSOR, PO1030_REG_INTEGLINES_L, 0x00},
- /* Controls gain (the image is more lighted) */
- {SENSOR, PO1030_REG_GLOBALGAIN, PO1030_GLOBAL_GAIN_DEFAULT},
-
- /* Sets the width */
- {SENSOR, PO1030_REG_FRAMEWIDTH_H, 0x02},
- {SENSOR, PO1030_REG_FRAMEWIDTH_L, 0xef}
- /*end of sequence 6*/
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x00},
+ {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
+ {BRIDGE, M5602_XB_GPIO_EN_L, 0x00},
};
#endif
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k4aa.c b/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
index 4306d596056..191bcd71897 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
+++ b/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
@@ -18,6 +18,19 @@
#include "m5602_s5k4aa.h"
+static int s5k4aa_get_exposure(struct gspca_dev *gspca_dev, __s32 *val);
+static int s5k4aa_set_exposure(struct gspca_dev *gspca_dev, __s32 val);
+static int s5k4aa_get_vflip(struct gspca_dev *gspca_dev, __s32 *val);
+static int s5k4aa_set_vflip(struct gspca_dev *gspca_dev, __s32 val);
+static int s5k4aa_get_hflip(struct gspca_dev *gspca_dev, __s32 *val);
+static int s5k4aa_set_hflip(struct gspca_dev *gspca_dev, __s32 val);
+static int s5k4aa_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
+static int s5k4aa_set_gain(struct gspca_dev *gspca_dev, __s32 val);
+static int s5k4aa_get_noise(struct gspca_dev *gspca_dev, __s32 *val);
+static int s5k4aa_set_noise(struct gspca_dev *gspca_dev, __s32 val);
+static int s5k4aa_get_brightness(struct gspca_dev *gspca_dev, __s32 *val);
+static int s5k4aa_set_brightness(struct gspca_dev *gspca_dev, __s32 val);
+
static
const
struct dmi_system_id s5k4aa_vflip_dmi_table[] = {
@@ -46,6 +59,18 @@ static
DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"),
DMI_MATCH(DMI_PRODUCT_NAME, "GX700/GX705/EX700")
}
+ }, {
+ .ident = "MSI L735",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MS-1717X")
+ }
+ }, {
+ .ident = "Lenovo Y300",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "L3000 Y300"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Y300")
+ }
},
{ }
};
@@ -61,10 +86,22 @@ static struct v4l2_pix_format s5k4aa_modes[] = {
.bytesperline = 640,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 0
+ },
+ {
+ 1280,
+ 1024,
+ V4L2_PIX_FMT_SBGGR8,
+ V4L2_FIELD_NONE,
+ .sizeimage =
+ 1280 * 1024,
+ .bytesperline = 1280,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .priv = 0
}
};
-const static struct ctrl s5k4aa_ctrls[] = {
+static const struct ctrl s5k4aa_ctrls[] = {
+#define VFLIP_IDX 0
{
{
.id = V4L2_CID_VFLIP,
@@ -77,8 +114,9 @@ const static struct ctrl s5k4aa_ctrls[] = {
},
.set = s5k4aa_set_vflip,
.get = s5k4aa_get_vflip
-
- }, {
+ },
+#define HFLIP_IDX 1
+ {
{
.id = V4L2_CID_HFLIP,
.type = V4L2_CTRL_TYPE_BOOLEAN,
@@ -90,8 +128,9 @@ const static struct ctrl s5k4aa_ctrls[] = {
},
.set = s5k4aa_set_hflip,
.get = s5k4aa_get_hflip
-
- }, {
+ },
+#define GAIN_IDX 2
+ {
{
.id = V4L2_CID_GAIN,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -99,12 +138,14 @@ const static struct ctrl s5k4aa_ctrls[] = {
.minimum = 0,
.maximum = 127,
.step = 1,
- .default_value = 0xa0,
+ .default_value = S5K4AA_DEFAULT_GAIN,
.flags = V4L2_CTRL_FLAG_SLIDER
},
.set = s5k4aa_set_gain,
.get = s5k4aa_get_gain
- }, {
+ },
+#define EXPOSURE_IDX 3
+ {
{
.id = V4L2_CID_EXPOSURE,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -117,7 +158,36 @@ const static struct ctrl s5k4aa_ctrls[] = {
},
.set = s5k4aa_set_exposure,
.get = s5k4aa_get_exposure
- }
+ },
+#define NOISE_SUPP_IDX 4
+ {
+ {
+ .id = V4L2_CID_PRIVATE_BASE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Noise suppression (smoothing)",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1,
+ },
+ .set = s5k4aa_set_noise,
+ .get = s5k4aa_get_noise
+ },
+#define BRIGHTNESS_IDX 5
+ {
+ {
+ .id = V4L2_CID_BRIGHTNESS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Brightness",
+ .minimum = 0,
+ .maximum = 0x1f,
+ .step = 1,
+ .default_value = S5K4AA_DEFAULT_BRIGHTNESS,
+ },
+ .set = s5k4aa_set_brightness,
+ .get = s5k4aa_get_brightness
+ },
+
};
static void s5k4aa_dump_registers(struct sd *sd);
@@ -127,6 +197,7 @@ int s5k4aa_probe(struct sd *sd)
u8 prod_id[6] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
const u8 expected_prod_id[6] = {0x00, 0x10, 0x00, 0x4b, 0x33, 0x75};
int i, err = 0;
+ s32 *sensor_settings;
if (force_sensor) {
if (force_sensor == S5K4AA_SENSOR) {
@@ -185,10 +256,20 @@ int s5k4aa_probe(struct sd *sd)
info("Detected a s5k4aa sensor");
sensor_found:
+ sensor_settings = kmalloc(
+ ARRAY_SIZE(s5k4aa_ctrls) * sizeof(s32), GFP_KERNEL);
+ if (!sensor_settings)
+ return -ENOMEM;
+
sd->gspca_dev.cam.cam_mode = s5k4aa_modes;
sd->gspca_dev.cam.nmodes = ARRAY_SIZE(s5k4aa_modes);
sd->desc->ctrls = s5k4aa_ctrls;
sd->desc->nctrls = ARRAY_SIZE(s5k4aa_ctrls);
+
+ for (i = 0; i < ARRAY_SIZE(s5k4aa_ctrls); i++)
+ sensor_settings[i] = s5k4aa_ctrls[i].qctrl.default_value;
+ sd->sensor_priv = sensor_settings;
+
return 0;
}
@@ -197,9 +278,45 @@ int s5k4aa_start(struct sd *sd)
int i, err = 0;
u8 data[2];
struct cam *cam = &sd->gspca_dev.cam;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ switch (cam->cam_mode[sd->gspca_dev.curr_mode].width) {
+ case 1280:
+ PDEBUG(D_V4L2, "Configuring camera for SXGA mode");
+
+ for (i = 0; i < ARRAY_SIZE(SXGA_s5k4aa); i++) {
+ switch (SXGA_s5k4aa[i][0]) {
+ case BRIDGE:
+ err = m5602_write_bridge(sd,
+ SXGA_s5k4aa[i][1],
+ SXGA_s5k4aa[i][2]);
+ break;
+
+ case SENSOR:
+ data[0] = SXGA_s5k4aa[i][2];
+ err = m5602_write_sensor(sd,
+ SXGA_s5k4aa[i][1],
+ data, 1);
+ break;
+
+ case SENSOR_LONG:
+ data[0] = SXGA_s5k4aa[i][2];
+ data[1] = SXGA_s5k4aa[i][3];
+ err = m5602_write_sensor(sd,
+ SXGA_s5k4aa[i][1],
+ data, 2);
+ break;
+
+ default:
+ err("Invalid stream command, exiting init");
+ return -EINVAL;
+ }
+ }
+ err = s5k4aa_set_noise(&sd->gspca_dev, 0);
+ if (err < 0)
+ return err;
+ break;
- switch (cam->cam_mode[sd->gspca_dev.curr_mode].width)
- {
case 640:
PDEBUG(D_V4L2, "Configuring camera for VGA mode");
@@ -231,8 +348,37 @@ int s5k4aa_start(struct sd *sd)
return -EINVAL;
}
}
+ err = s5k4aa_set_noise(&sd->gspca_dev, 1);
+ if (err < 0)
+ return err;
+ break;
}
- return err;
+ if (err < 0)
+ return err;
+
+ err = s5k4aa_set_exposure(&sd->gspca_dev,
+ sensor_settings[EXPOSURE_IDX]);
+ if (err < 0)
+ return err;
+
+ err = s5k4aa_set_gain(&sd->gspca_dev, sensor_settings[GAIN_IDX]);
+ if (err < 0)
+ return err;
+
+ err = s5k4aa_set_brightness(&sd->gspca_dev,
+ sensor_settings[BRIGHTNESS_IDX]);
+ if (err < 0)
+ return err;
+
+ err = s5k4aa_set_noise(&sd->gspca_dev, sensor_settings[NOISE_SUPP_IDX]);
+ if (err < 0)
+ return err;
+
+ err = s5k4aa_set_vflip(&sd->gspca_dev, sensor_settings[VFLIP_IDX]);
+ if (err < 0)
+ return err;
+
+ return s5k4aa_set_hflip(&sd->gspca_dev, sensor_settings[HFLIP_IDX]);
}
int s5k4aa_init(struct sd *sd)
@@ -270,62 +416,28 @@ int s5k4aa_init(struct sd *sd)
if (dump_sensor)
s5k4aa_dump_registers(sd);
- if (!err && dmi_check_system(s5k4aa_vflip_dmi_table)) {
- u8 data = 0x02;
- info("vertical flip quirk active");
- m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
- m5602_read_sensor(sd, S5K4AA_READ_MODE, &data, 1);
- data |= S5K4AA_RM_V_FLIP;
- data &= ~S5K4AA_RM_H_FLIP;
- m5602_write_sensor(sd, S5K4AA_READ_MODE, &data, 1);
-
- /* Decrement COLSTART to preserve color order (BGGR) */
- m5602_read_sensor(sd, S5K4AA_COLSTART_LO, &data, 1);
- data--;
- m5602_write_sensor(sd, S5K4AA_COLSTART_LO, &data, 1);
-
- /* Increment ROWSTART to preserve color order (BGGR) */
- m5602_read_sensor(sd, S5K4AA_ROWSTART_LO, &data, 1);
- data++;
- m5602_write_sensor(sd, S5K4AA_ROWSTART_LO, &data, 1);
- }
-
- return (err < 0) ? err : 0;
-}
-
-int s5k4aa_power_down(struct sd *sd)
-{
- return 0;
+ return err;
}
-int s5k4aa_get_exposure(struct gspca_dev *gspca_dev, __s32 *val)
+static int s5k4aa_get_exposure(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
- u8 data = S5K4AA_PAGE_MAP_2;
- int err;
-
- err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
- if (err < 0)
- return err;
+ s32 *sensor_settings = sd->sensor_priv;
- err = m5602_read_sensor(sd, S5K4AA_EXPOSURE_HI, &data, 1);
- if (err < 0)
- return err;
-
- *val = data << 8;
- err = m5602_read_sensor(sd, S5K4AA_EXPOSURE_LO, &data, 1);
- *val |= data;
+ *val = sensor_settings[EXPOSURE_IDX];
PDEBUG(D_V4L2, "Read exposure %d", *val);
- return err;
+ return 0;
}
-int s5k4aa_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
+static int s5k4aa_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
u8 data = S5K4AA_PAGE_MAP_2;
int err;
+ sensor_settings[EXPOSURE_IDX] = val;
PDEBUG(D_V4L2, "Set exposure to %d", val);
err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
if (err < 0)
@@ -340,29 +452,26 @@ int s5k4aa_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
return err;
}
-int s5k4aa_get_vflip(struct gspca_dev *gspca_dev, __s32 *val)
+static int s5k4aa_get_vflip(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
- u8 data = S5K4AA_PAGE_MAP_2;
- int err;
-
- err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
- if (err < 0)
- return err;
+ s32 *sensor_settings = sd->sensor_priv;
- err = m5602_read_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
- *val = (data & S5K4AA_RM_V_FLIP) >> 7;
+ *val = sensor_settings[VFLIP_IDX];
PDEBUG(D_V4L2, "Read vertical flip %d", *val);
- return err;
+ return 0;
}
-int s5k4aa_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
+static int s5k4aa_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
u8 data = S5K4AA_PAGE_MAP_2;
int err;
+ sensor_settings[VFLIP_IDX] = val;
+
PDEBUG(D_V4L2, "Set vertical flip to %d", val);
err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
if (err < 0)
@@ -370,56 +479,48 @@ int s5k4aa_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
err = m5602_write_sensor(sd, S5K4AA_READ_MODE, &data, 1);
if (err < 0)
return err;
- data = ((data & ~S5K4AA_RM_V_FLIP)
- | ((val & 0x01) << 7));
- err = m5602_write_sensor(sd, S5K4AA_READ_MODE, &data, 1);
+
+ err = m5602_read_sensor(sd, S5K4AA_READ_MODE, &data, 1);
if (err < 0)
return err;
- if (val) {
- err = m5602_read_sensor(sd, S5K4AA_ROWSTART_LO, &data, 1);
- if (err < 0)
- return err;
-
- data++;
- err = m5602_write_sensor(sd, S5K4AA_ROWSTART_LO, &data, 1);
- } else {
- err = m5602_read_sensor(sd, S5K4AA_ROWSTART_LO, &data, 1);
- if (err < 0)
- return err;
+ if (dmi_check_system(s5k4aa_vflip_dmi_table))
+ val = !val;
- data--;
- err = m5602_write_sensor(sd, S5K4AA_ROWSTART_LO, &data, 1);
- }
+ data = ((data & ~S5K4AA_RM_V_FLIP) | ((val & 0x01) << 7));
+ err = m5602_write_sensor(sd, S5K4AA_READ_MODE, &data, 1);
+ if (err < 0)
+ return err;
+ err = m5602_read_sensor(sd, S5K4AA_ROWSTART_LO, &data, 1);
+ if (err < 0)
+ return err;
+ data = (data & 0xfe) | !val;
+ err = m5602_write_sensor(sd, S5K4AA_ROWSTART_LO, &data, 1);
return err;
}
-int s5k4aa_get_hflip(struct gspca_dev *gspca_dev, __s32 *val)
+static int s5k4aa_get_hflip(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
- u8 data = S5K4AA_PAGE_MAP_2;
- int err;
-
- err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
- if (err < 0)
- return err;
+ s32 *sensor_settings = sd->sensor_priv;
- err = m5602_read_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
- *val = (data & S5K4AA_RM_H_FLIP) >> 6;
+ *val = sensor_settings[HFLIP_IDX];
PDEBUG(D_V4L2, "Read horizontal flip %d", *val);
- return err;
+ return 0;
}
-int s5k4aa_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
+static int s5k4aa_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
u8 data = S5K4AA_PAGE_MAP_2;
int err;
- PDEBUG(D_V4L2, "Set horizontal flip to %d",
- val);
+ sensor_settings[HFLIP_IDX] = val;
+
+ PDEBUG(D_V4L2, "Set horizontal flip to %d", val);
err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
if (err < 0)
return err;
@@ -427,62 +528,116 @@ int s5k4aa_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
if (err < 0)
return err;
+ err = m5602_read_sensor(sd, S5K4AA_READ_MODE, &data, 1);
+ if (err < 0)
+ return err;
+
+ if (dmi_check_system(s5k4aa_vflip_dmi_table))
+ val = !val;
+
data = ((data & ~S5K4AA_RM_H_FLIP) | ((val & 0x01) << 6));
err = m5602_write_sensor(sd, S5K4AA_READ_MODE, &data, 1);
if (err < 0)
return err;
- if (val) {
- err = m5602_read_sensor(sd, S5K4AA_COLSTART_LO, &data, 1);
- if (err < 0)
- return err;
- data++;
- err = m5602_write_sensor(sd, S5K4AA_COLSTART_LO, &data, 1);
- if (err < 0)
- return err;
- } else {
- err = m5602_read_sensor(sd, S5K4AA_COLSTART_LO, &data, 1);
- if (err < 0)
- return err;
- data--;
- err = m5602_write_sensor(sd, S5K4AA_COLSTART_LO, &data, 1);
- }
-
+ err = m5602_read_sensor(sd, S5K4AA_COLSTART_LO, &data, 1);
+ if (err < 0)
+ return err;
+ data = (data & 0xfe) | !val;
+ err = m5602_write_sensor(sd, S5K4AA_COLSTART_LO, &data, 1);
return err;
}
-int s5k4aa_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
+static int s5k4aa_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ *val = sensor_settings[GAIN_IDX];
+ PDEBUG(D_V4L2, "Read gain %d", *val);
+ return 0;
+}
+
+static int s5k4aa_set_gain(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
u8 data = S5K4AA_PAGE_MAP_2;
int err;
+ sensor_settings[GAIN_IDX] = val;
+
+ PDEBUG(D_V4L2, "Set gain to %d", val);
err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
if (err < 0)
return err;
- err = m5602_read_sensor(sd, S5K4AA_GAIN_2, &data, 1);
- *val = data;
- PDEBUG(D_V4L2, "Read gain %d", *val);
+ data = val & 0xff;
+ err = m5602_write_sensor(sd, S5K4AA_GAIN, &data, 1);
return err;
}
-int s5k4aa_set_gain(struct gspca_dev *gspca_dev, __s32 val)
+static int s5k4aa_get_brightness(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ *val = sensor_settings[BRIGHTNESS_IDX];
+ PDEBUG(D_V4L2, "Read brightness %d", *val);
+ return 0;
+}
+
+static int s5k4aa_set_brightness(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
u8 data = S5K4AA_PAGE_MAP_2;
int err;
- PDEBUG(D_V4L2, "Set gain to %d", val);
+ sensor_settings[BRIGHTNESS_IDX] = val;
+
+ PDEBUG(D_V4L2, "Set brightness to %d", val);
err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
if (err < 0)
return err;
data = val & 0xff;
- err = m5602_write_sensor(sd, S5K4AA_GAIN_2, &data, 1);
+ return m5602_write_sensor(sd, S5K4AA_BRIGHTNESS, &data, 1);
+}
- return err;
+static int s5k4aa_get_noise(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ *val = sensor_settings[NOISE_SUPP_IDX];
+ PDEBUG(D_V4L2, "Read noise %d", *val);
+ return 0;
+}
+
+static int s5k4aa_set_noise(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+ u8 data = S5K4AA_PAGE_MAP_2;
+ int err;
+
+ sensor_settings[NOISE_SUPP_IDX] = val;
+
+ PDEBUG(D_V4L2, "Set noise to %d", val);
+ err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
+ if (err < 0)
+ return err;
+
+ data = val & 0x01;
+ return m5602_write_sensor(sd, S5K4AA_NOISE_SUPP, &data, 1);
+}
+
+void s5k4aa_disconnect(struct sd *sd)
+{
+ sd->sensor = NULL;
+ kfree(sd->sensor_priv);
}
static void s5k4aa_dump_registers(struct sd *sd)
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k4aa.h b/drivers/media/video/gspca/m5602/m5602_s5k4aa.h
index ca854d4f947..4440da4e7f0 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k4aa.h
+++ b/drivers/media/video/gspca/m5602/m5602_s5k4aa.h
@@ -47,8 +47,9 @@
#define S5K4AA_H_BLANK_LO__ 0x1e
#define S5K4AA_EXPOSURE_HI 0x17
#define S5K4AA_EXPOSURE_LO 0x18
-#define S5K4AA_GAIN_1 0x1f /* (digital?) gain : 5 bits */
-#define S5K4AA_GAIN_2 0x20 /* (analogue?) gain : 7 bits */
+#define S5K4AA_BRIGHTNESS 0x1f /* (digital?) gain : 5 bits */
+#define S5K4AA_GAIN 0x20 /* (analogue?) gain : 7 bits */
+#define S5K4AA_NOISE_SUPP 0x37
#define S5K4AA_RM_ROW_SKIP_4X 0x08
#define S5K4AA_RM_ROW_SKIP_2X 0x04
@@ -57,6 +58,9 @@
#define S5K4AA_RM_H_FLIP 0x40
#define S5K4AA_RM_V_FLIP 0x80
+#define S5K4AA_DEFAULT_GAIN 0x5f
+#define S5K4AA_DEFAULT_BRIGHTNESS 0x10
+
/*****************************************************************************/
/* Kernel module parameters */
@@ -66,25 +70,17 @@ extern int dump_sensor;
int s5k4aa_probe(struct sd *sd);
int s5k4aa_init(struct sd *sd);
int s5k4aa_start(struct sd *sd);
-int s5k4aa_power_down(struct sd *sd);
-
-int s5k4aa_get_exposure(struct gspca_dev *gspca_dev, __s32 *val);
-int s5k4aa_set_exposure(struct gspca_dev *gspca_dev, __s32 val);
-int s5k4aa_get_vflip(struct gspca_dev *gspca_dev, __s32 *val);
-int s5k4aa_set_vflip(struct gspca_dev *gspca_dev, __s32 val);
-int s5k4aa_get_hflip(struct gspca_dev *gspca_dev, __s32 *val);
-int s5k4aa_set_hflip(struct gspca_dev *gspca_dev, __s32 val);
-int s5k4aa_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
-int s5k4aa_set_gain(struct gspca_dev *gspca_dev, __s32 val);
+void s5k4aa_disconnect(struct sd *sd);
static const struct m5602_sensor s5k4aa = {
.name = "S5K4AA",
+ .i2c_slave_id = 0x5a,
+ .i2c_regW = 2,
+
.probe = s5k4aa_probe,
.init = s5k4aa_init,
.start = s5k4aa_start,
- .power_down = s5k4aa_power_down,
- .i2c_slave_id = 0x5a,
- .i2c_regW = 2,
+ .disconnect = s5k4aa_disconnect,
};
static const unsigned char preinit_s5k4aa[][4] =
@@ -179,30 +175,12 @@ static const unsigned char init_s5k4aa[][4] =
{SENSOR, S5K4AA_PAGE_MAP, 0x02, 0x00},
{SENSOR, 0x0c, 0x05, 0x00},
{SENSOR, 0x02, 0x0e, 0x00},
- {SENSOR, S5K4AA_GAIN_1, 0x0f, 0x00},
- {SENSOR, S5K4AA_GAIN_2, 0x00, 0x00},
- {SENSOR, S5K4AA_GLOBAL_GAIN__, 0x01, 0x00},
- {SENSOR, 0x11, 0x00, 0x00},
- {SENSOR, 0x12, 0x00, 0x00},
- {SENSOR, S5K4AA_PAGE_MAP, 0x02, 0x00},
{SENSOR, S5K4AA_READ_MODE, 0xa0, 0x00},
{SENSOR, 0x37, 0x00, 0x00},
- {SENSOR, S5K4AA_ROWSTART_HI, 0x00, 0x00},
- {SENSOR, S5K4AA_ROWSTART_LO, 0x2a, 0x00},
- {SENSOR, S5K4AA_COLSTART_HI, 0x00, 0x00},
- {SENSOR, S5K4AA_COLSTART_LO, 0x0b, 0x00},
- {SENSOR, S5K4AA_WINDOW_HEIGHT_HI, 0x03, 0x00},
- {SENSOR, S5K4AA_WINDOW_HEIGHT_LO, 0xc4, 0x00},
- {SENSOR, S5K4AA_WINDOW_WIDTH_HI, 0x05, 0x00},
- {SENSOR, S5K4AA_WINDOW_WIDTH_LO, 0x08, 0x00},
- {SENSOR, S5K4AA_H_BLANK_HI__, 0x00, 0x00},
- {SENSOR, S5K4AA_H_BLANK_LO__, 0x48, 0x00},
- {SENSOR, S5K4AA_EXPOSURE_HI, 0x00, 0x00},
- {SENSOR, S5K4AA_EXPOSURE_LO, 0x43, 0x00},
- {SENSOR, 0x11, 0x04, 0x00},
- {SENSOR, 0x12, 0xc3, 0x00},
- {SENSOR, S5K4AA_PAGE_MAP, 0x02, 0x00},
+};
+static const unsigned char VGA_s5k4aa[][4] =
+{
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
{BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
@@ -238,7 +216,7 @@ static const unsigned char init_s5k4aa[][4] =
{SENSOR, 0x37, 0x01, 0x00},
/* ROWSTART_HI, ROWSTART_LO : 10 + (1024-960)/2 = 42 = 0x002a */
{SENSOR, S5K4AA_ROWSTART_HI, 0x00, 0x00},
- {SENSOR, S5K4AA_ROWSTART_LO, 0x2a, 0x00},
+ {SENSOR, S5K4AA_ROWSTART_LO, 0x29, 0x00},
{SENSOR, S5K4AA_COLSTART_HI, 0x00, 0x00},
{SENSOR, S5K4AA_COLSTART_LO, 0x0c, 0x00},
/* window_height_hi, window_height_lo : 960 = 0x03c0 */
@@ -255,12 +233,9 @@ static const unsigned char init_s5k4aa[][4] =
{SENSOR, 0x12, 0xc3, 0x00},
{SENSOR, S5K4AA_PAGE_MAP, 0x02, 0x00},
{SENSOR, 0x02, 0x0e, 0x00},
- {SENSOR_LONG, S5K4AA_GLOBAL_GAIN__, 0x0f, 0x00},
- {SENSOR, S5K4AA_GAIN_1, 0x0b, 0x00},
- {SENSOR, S5K4AA_GAIN_2, 0xa0, 0x00}
};
-static const unsigned char VGA_s5k4aa[][4] =
+static const unsigned char SXGA_s5k4aa[][4] =
{
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
@@ -273,50 +248,42 @@ static const unsigned char VGA_s5k4aa[][4] =
{BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
{BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
{BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- /* VSYNC_PARA, VSYNC_PARA : img height 480 = 0x01e0 */
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x01, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0xe0, 0x00},
+ /* VSYNC_PARA, VSYNC_PARA : img height 1024 = 0x0400 */
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x04, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
{BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
{BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
{BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
{BRIDGE, M5602_XB_SIG_INI, 0x02, 0x00},
{BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
{BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
- /* HSYNC_PARA, HSYNC_PARA : img width 640 = 0x0280 */
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x02, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x80, 0x00},
+ /* HSYNC_PARA, HSYNC_PARA : img width 1280 = 0x0500 */
+ {BRIDGE, M5602_XB_HSYNC_PARA, 0x05, 0x00},
+ {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
{BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xa0, 0x00}, /* 48 MHz */
{SENSOR, S5K4AA_PAGE_MAP, 0x02, 0x00},
- {SENSOR, S5K4AA_READ_MODE, S5K4AA_RM_H_FLIP | S5K4AA_RM_ROW_SKIP_2X
- | S5K4AA_RM_COL_SKIP_2X, 0x00},
- /* 0x37 : Fix image stability when light is too bright and improves
- * image quality in 640x480, but worsens it in 1280x1024 */
+ {SENSOR, S5K4AA_READ_MODE, S5K4AA_RM_H_FLIP, 0x00},
{SENSOR, 0x37, 0x01, 0x00},
- /* ROWSTART_HI, ROWSTART_LO : 10 + (1024-960)/2 = 42 = 0x002a */
{SENSOR, S5K4AA_ROWSTART_HI, 0x00, 0x00},
- {SENSOR, S5K4AA_ROWSTART_LO, 0x2a, 0x00},
+ {SENSOR, S5K4AA_ROWSTART_LO, 0x09, 0x00},
{SENSOR, S5K4AA_COLSTART_HI, 0x00, 0x00},
- {SENSOR, S5K4AA_COLSTART_LO, 0x0c, 0x00},
- /* window_height_hi, window_height_lo : 960 = 0x03c0 */
- {SENSOR, S5K4AA_WINDOW_HEIGHT_HI, 0x03, 0x00},
- {SENSOR, S5K4AA_WINDOW_HEIGHT_LO, 0xc0, 0x00},
- /* window_width_hi, window_width_lo : 1280 = 0x0500 */
+ {SENSOR, S5K4AA_COLSTART_LO, 0x0a, 0x00},
+ {SENSOR, S5K4AA_WINDOW_HEIGHT_HI, 0x04, 0x00},
+ {SENSOR, S5K4AA_WINDOW_HEIGHT_LO, 0x00, 0x00},
{SENSOR, S5K4AA_WINDOW_WIDTH_HI, 0x05, 0x00},
{SENSOR, S5K4AA_WINDOW_WIDTH_LO, 0x00, 0x00},
- {SENSOR, S5K4AA_H_BLANK_HI__, 0x00, 0x00},
- {SENSOR, S5K4AA_H_BLANK_LO__, 0xa8, 0x00}, /* helps to sync... */
+ {SENSOR, S5K4AA_H_BLANK_HI__, 0x01, 0x00},
+ {SENSOR, S5K4AA_H_BLANK_LO__, 0xa8, 0x00},
{SENSOR, S5K4AA_EXPOSURE_HI, 0x01, 0x00},
{SENSOR, S5K4AA_EXPOSURE_LO, 0x00, 0x00},
{SENSOR, 0x11, 0x04, 0x00},
{SENSOR, 0x12, 0xc3, 0x00},
{SENSOR, S5K4AA_PAGE_MAP, 0x02, 0x00},
{SENSOR, 0x02, 0x0e, 0x00},
- {SENSOR_LONG, S5K4AA_GLOBAL_GAIN__, 0x0f, 0x00},
- {SENSOR, S5K4AA_GAIN_1, 0x0b, 0x00},
- {SENSOR, S5K4AA_GAIN_2, 0xa0, 0x00}
};
+
#endif
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k83a.c b/drivers/media/video/gspca/m5602/m5602_s5k83a.c
index 42c86aa4dc8..7127321ace8 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k83a.c
+++ b/drivers/media/video/gspca/m5602/m5602_s5k83a.c
@@ -16,8 +16,20 @@
*
*/
+#include <linux/kthread.h>
#include "m5602_s5k83a.h"
+static int s5k83a_set_gain(struct gspca_dev *gspca_dev, __s32 val);
+static int s5k83a_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
+static int s5k83a_set_brightness(struct gspca_dev *gspca_dev, __s32 val);
+static int s5k83a_get_brightness(struct gspca_dev *gspca_dev, __s32 *val);
+static int s5k83a_set_exposure(struct gspca_dev *gspca_dev, __s32 val);
+static int s5k83a_get_exposure(struct gspca_dev *gspca_dev, __s32 *val);
+static int s5k83a_get_vflip(struct gspca_dev *gspca_dev, __s32 *val);
+static int s5k83a_set_vflip(struct gspca_dev *gspca_dev, __s32 val);
+static int s5k83a_get_hflip(struct gspca_dev *gspca_dev, __s32 *val);
+static int s5k83a_set_hflip(struct gspca_dev *gspca_dev, __s32 val);
+
static struct v4l2_pix_format s5k83a_modes[] = {
{
640,
@@ -32,68 +44,77 @@ static struct v4l2_pix_format s5k83a_modes[] = {
}
};
-const static struct ctrl s5k83a_ctrls[] = {
+static const struct ctrl s5k83a_ctrls[] = {
+#define GAIN_IDX 0
{
{
- .id = V4L2_CID_BRIGHTNESS,
+ .id = V4L2_CID_GAIN,
.type = V4L2_CTRL_TYPE_INTEGER,
- .name = "brightness",
+ .name = "gain",
.minimum = 0x00,
.maximum = 0xff,
.step = 0x01,
- .default_value = S5K83A_DEFAULT_BRIGHTNESS,
+ .default_value = S5K83A_DEFAULT_GAIN,
.flags = V4L2_CTRL_FLAG_SLIDER
},
- .set = s5k83a_set_brightness,
- .get = s5k83a_get_brightness
+ .set = s5k83a_set_gain,
+ .get = s5k83a_get_gain
- }, {
+ },
+#define BRIGHTNESS_IDX 1
+ {
{
- .id = V4L2_CID_WHITENESS,
+ .id = V4L2_CID_BRIGHTNESS,
.type = V4L2_CTRL_TYPE_INTEGER,
- .name = "whiteness",
+ .name = "brightness",
.minimum = 0x00,
.maximum = 0xff,
.step = 0x01,
- .default_value = S5K83A_DEFAULT_WHITENESS,
+ .default_value = S5K83A_DEFAULT_BRIGHTNESS,
.flags = V4L2_CTRL_FLAG_SLIDER
},
- .set = s5k83a_set_whiteness,
- .get = s5k83a_get_whiteness,
- }, {
+ .set = s5k83a_set_brightness,
+ .get = s5k83a_get_brightness,
+ },
+#define EXPOSURE_IDX 2
+ {
{
- .id = V4L2_CID_GAIN,
+ .id = V4L2_CID_EXPOSURE,
.type = V4L2_CTRL_TYPE_INTEGER,
- .name = "gain",
+ .name = "exposure",
.minimum = 0x00,
- .maximum = S5K83A_MAXIMUM_GAIN,
+ .maximum = S5K83A_MAXIMUM_EXPOSURE,
.step = 0x01,
- .default_value = S5K83A_DEFAULT_GAIN,
+ .default_value = S5K83A_DEFAULT_EXPOSURE,
.flags = V4L2_CTRL_FLAG_SLIDER
},
- .set = s5k83a_set_gain,
- .get = s5k83a_get_gain
- }, {
+ .set = s5k83a_set_exposure,
+ .get = s5k83a_get_exposure
+ },
+#define HFLIP_IDX 3
+ {
{
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "horizontal flip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0
+ .id = V4L2_CID_HFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "horizontal flip",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0
},
.set = s5k83a_set_hflip,
.get = s5k83a_get_hflip
- }, {
+ },
+#define VFLIP_IDX 4
+ {
{
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "vertical flip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0
+ .id = V4L2_CID_VFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "vertical flip",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0
},
.set = s5k83a_set_vflip,
.get = s5k83a_get_vflip
@@ -101,9 +122,14 @@ const static struct ctrl s5k83a_ctrls[] = {
};
static void s5k83a_dump_registers(struct sd *sd);
+static int s5k83a_get_rotation(struct sd *sd, u8 *reg_data);
+static int s5k83a_set_led_indication(struct sd *sd, u8 val);
+static int s5k83a_set_flip_real(struct gspca_dev *gspca_dev,
+ __s32 vflip, __s32 hflip);
int s5k83a_probe(struct sd *sd)
{
+ struct s5k83a_priv *sens_priv;
u8 prod_id = 0, ver_id = 0;
int i, err = 0;
@@ -145,16 +171,36 @@ int s5k83a_probe(struct sd *sd)
info("Detected a s5k83a sensor");
sensor_found:
+ sens_priv = kmalloc(
+ sizeof(struct s5k83a_priv), GFP_KERNEL);
+ if (!sens_priv)
+ return -ENOMEM;
+
+ sens_priv->settings =
+ kmalloc(sizeof(s32)*ARRAY_SIZE(s5k83a_ctrls), GFP_KERNEL);
+ if (!sens_priv->settings)
+ return -ENOMEM;
+
sd->gspca_dev.cam.cam_mode = s5k83a_modes;
sd->gspca_dev.cam.nmodes = ARRAY_SIZE(s5k83a_modes);
sd->desc->ctrls = s5k83a_ctrls;
sd->desc->nctrls = ARRAY_SIZE(s5k83a_ctrls);
+
+ /* null the pointer! thread is't running now */
+ sens_priv->rotation_thread = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(s5k83a_ctrls); i++)
+ sens_priv->settings[i] = s5k83a_ctrls[i].qctrl.default_value;
+
+ sd->sensor_priv = sens_priv;
return 0;
}
int s5k83a_init(struct sd *sd)
{
int i, err = 0;
+ s32 *sensor_settings =
+ ((struct s5k83a_priv *) sd->sensor_priv)->settings;
for (i = 0; i < ARRAY_SIZE(init_s5k83a) && !err; i++) {
u8 data[2] = {0x00, 0x00};
@@ -187,87 +233,138 @@ int s5k83a_init(struct sd *sd)
if (dump_sensor)
s5k83a_dump_registers(sd);
- return (err < 0) ? err : 0;
-}
+ err = s5k83a_set_gain(&sd->gspca_dev, sensor_settings[GAIN_IDX]);
+ if (err < 0)
+ return err;
-int s5k83a_start(struct sd *sd)
-{
- return s5k83a_set_led_indication(sd, 1);
-}
+ err = s5k83a_set_brightness(&sd->gspca_dev,
+ sensor_settings[BRIGHTNESS_IDX]);
+ if (err < 0)
+ return err;
-int s5k83a_stop(struct sd *sd)
-{
- return s5k83a_set_led_indication(sd, 0);
+ err = s5k83a_set_exposure(&sd->gspca_dev,
+ sensor_settings[EXPOSURE_IDX]);
+ if (err < 0)
+ return err;
+
+ err = s5k83a_set_hflip(&sd->gspca_dev, sensor_settings[HFLIP_IDX]);
+ if (err < 0)
+ return err;
+
+ err = s5k83a_set_vflip(&sd->gspca_dev, sensor_settings[VFLIP_IDX]);
+
+ return err;
}
-int s5k83a_power_down(struct sd *sd)
+static int rotation_thread_function(void *data)
{
+ struct sd *sd = (struct sd *) data;
+ struct s5k83a_priv *sens_priv = sd->sensor_priv;
+ u8 reg, previous_rotation = 0;
+ __s32 vflip, hflip;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (!schedule_timeout(100)) {
+ if (mutex_lock_interruptible(&sd->gspca_dev.usb_lock))
+ break;
+
+ s5k83a_get_rotation(sd, &reg);
+ if (previous_rotation != reg) {
+ previous_rotation = reg;
+ info("Camera was flipped");
+
+ s5k83a_get_vflip((struct gspca_dev *) sd, &vflip);
+ s5k83a_get_hflip((struct gspca_dev *) sd, &hflip);
+
+ if (reg) {
+ vflip = !vflip;
+ hflip = !hflip;
+ }
+ s5k83a_set_flip_real((struct gspca_dev *) sd,
+ vflip, hflip);
+ }
+
+ mutex_unlock(&sd->gspca_dev.usb_lock);
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+
+ /* return to "front" flip */
+ if (previous_rotation) {
+ s5k83a_get_vflip((struct gspca_dev *) sd, &vflip);
+ s5k83a_get_hflip((struct gspca_dev *) sd, &hflip);
+ s5k83a_set_flip_real((struct gspca_dev *) sd, vflip, hflip);
+ }
+
+ sens_priv->rotation_thread = NULL;
return 0;
}
-static void s5k83a_dump_registers(struct sd *sd)
+int s5k83a_start(struct sd *sd)
{
- int address;
- u8 page, old_page;
- m5602_read_sensor(sd, S5K83A_PAGE_MAP, &old_page, 1);
+ int i, err = 0;
+ struct s5k83a_priv *sens_priv = sd->sensor_priv;
- for (page = 0; page < 16; page++) {
- m5602_write_sensor(sd, S5K83A_PAGE_MAP, &page, 1);
- info("Dumping the s5k83a register state for page 0x%x", page);
- for (address = 0; address <= 0xff; address++) {
- u8 val = 0;
- m5602_read_sensor(sd, address, &val, 1);
- info("register 0x%x contains 0x%x",
- address, val);
- }
+ /* Create another thread, polling the GPIO ports of the camera to check
+ if it got rotated. This is how the windows driver does it so we have
+ to assume that there is no better way of accomplishing this */
+ sens_priv->rotation_thread = kthread_create(rotation_thread_function,
+ sd, "rotation thread");
+ wake_up_process(sens_priv->rotation_thread);
+
+ /* Preinit the sensor */
+ for (i = 0; i < ARRAY_SIZE(start_s5k83a) && !err; i++) {
+ u8 data[2] = {start_s5k83a[i][2], start_s5k83a[i][3]};
+ if (start_s5k83a[i][0] == SENSOR)
+ err = m5602_write_sensor(sd, start_s5k83a[i][1],
+ data, 2);
+ else
+ err = m5602_write_bridge(sd, start_s5k83a[i][1],
+ data[0]);
}
- info("s5k83a register state dump complete");
+ if (err < 0)
+ return err;
- for (page = 0; page < 16; page++) {
- m5602_write_sensor(sd, S5K83A_PAGE_MAP, &page, 1);
- info("Probing for which registers that are read/write "
- "for page 0x%x", page);
- for (address = 0; address <= 0xff; address++) {
- u8 old_val, ctrl_val, test_val = 0xff;
+ return s5k83a_set_led_indication(sd, 1);
+}
- m5602_read_sensor(sd, address, &old_val, 1);
- m5602_write_sensor(sd, address, &test_val, 1);
- m5602_read_sensor(sd, address, &ctrl_val, 1);
+int s5k83a_stop(struct sd *sd)
+{
+ struct s5k83a_priv *sens_priv = sd->sensor_priv;
- if (ctrl_val == test_val)
- info("register 0x%x is writeable", address);
- else
- info("register 0x%x is read only", address);
+ if (sens_priv->rotation_thread)
+ kthread_stop(sens_priv->rotation_thread);
- /* Restore original val */
- m5602_write_sensor(sd, address, &old_val, 1);
- }
- }
- info("Read/write register probing complete");
- m5602_write_sensor(sd, S5K83A_PAGE_MAP, &old_page, 1);
+ return s5k83a_set_led_indication(sd, 0);
}
-int s5k83a_get_brightness(struct gspca_dev *gspca_dev, __s32 *val)
+void s5k83a_disconnect(struct sd *sd)
{
- int err;
- u8 data[2];
- struct sd *sd = (struct sd *) gspca_dev;
+ struct s5k83a_priv *sens_priv = sd->sensor_priv;
- err = m5602_read_sensor(sd, S5K83A_BRIGHTNESS, data, 2);
- if (err < 0)
- return err;
+ s5k83a_stop(sd);
+
+ sd->sensor = NULL;
+ kfree(sens_priv->settings);
+ kfree(sens_priv);
+}
- data[1] = data[1] << 1;
- *val = data[1];
+static int s5k83a_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ struct s5k83a_priv *sens_priv = sd->sensor_priv;
- return err;
+ *val = sens_priv->settings[GAIN_IDX];
+ return 0;
}
-int s5k83a_set_brightness(struct gspca_dev *gspca_dev, __s32 val)
+static int s5k83a_set_gain(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
u8 data[2];
struct sd *sd = (struct sd *) gspca_dev;
+ struct s5k83a_priv *sens_priv = sd->sensor_priv;
+
+ sens_priv->settings[GAIN_IDX] = val;
data[0] = 0x00;
data[1] = 0x20;
@@ -283,89 +380,69 @@ int s5k83a_set_brightness(struct gspca_dev *gspca_dev, __s32 val)
/* FIXME: This is not sane, we need to figure out the composition
of these registers */
- data[0] = val >> 3; /* brightness, high 5 bits */
- data[1] = val >> 1; /* brightness, high 7 bits */
- err = m5602_write_sensor(sd, S5K83A_BRIGHTNESS, data, 2);
+ data[0] = val >> 3; /* gain, high 5 bits */
+ data[1] = val >> 1; /* gain, high 7 bits */
+ err = m5602_write_sensor(sd, S5K83A_GAIN, data, 2);
return err;
}
-int s5k83a_get_whiteness(struct gspca_dev *gspca_dev, __s32 *val)
+static int s5k83a_get_brightness(struct gspca_dev *gspca_dev, __s32 *val)
{
- int err;
- u8 data;
struct sd *sd = (struct sd *) gspca_dev;
+ struct s5k83a_priv *sens_priv = sd->sensor_priv;
- err = m5602_read_sensor(sd, S5K83A_WHITENESS, &data, 1);
- if (err < 0)
- return err;
-
- *val = data;
-
- return err;
+ *val = sens_priv->settings[BRIGHTNESS_IDX];
+ return 0;
}
-int s5k83a_set_whiteness(struct gspca_dev *gspca_dev, __s32 val)
+static int s5k83a_set_brightness(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
u8 data[1];
struct sd *sd = (struct sd *) gspca_dev;
+ struct s5k83a_priv *sens_priv = sd->sensor_priv;
+ sens_priv->settings[BRIGHTNESS_IDX] = val;
data[0] = val;
- err = m5602_write_sensor(sd, S5K83A_WHITENESS, data, 1);
-
+ err = m5602_write_sensor(sd, S5K83A_BRIGHTNESS, data, 1);
return err;
}
-int s5k83a_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
+static int s5k83a_get_exposure(struct gspca_dev *gspca_dev, __s32 *val)
{
- int err;
- u8 data[2];
struct sd *sd = (struct sd *) gspca_dev;
+ struct s5k83a_priv *sens_priv = sd->sensor_priv;
- err = m5602_read_sensor(sd, S5K83A_GAIN, data, 2);
- if (err < 0)
- return err;
-
- data[1] = data[1] & 0x3f;
- if (data[1] > S5K83A_MAXIMUM_GAIN)
- data[1] = S5K83A_MAXIMUM_GAIN;
-
- *val = data[1];
-
- return err;
+ *val = sens_priv->settings[EXPOSURE_IDX];
+ return 0;
}
-int s5k83a_set_gain(struct gspca_dev *gspca_dev, __s32 val)
+static int s5k83a_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
u8 data[2];
struct sd *sd = (struct sd *) gspca_dev;
+ struct s5k83a_priv *sens_priv = sd->sensor_priv;
+ sens_priv->settings[EXPOSURE_IDX] = val;
data[0] = 0;
data[1] = val;
- err = m5602_write_sensor(sd, S5K83A_GAIN, data, 2);
+ err = m5602_write_sensor(sd, S5K83A_EXPOSURE, data, 2);
return err;
}
-int s5k83a_get_vflip(struct gspca_dev *gspca_dev, __s32 *val)
+static int s5k83a_get_vflip(struct gspca_dev *gspca_dev, __s32 *val)
{
- int err;
- u8 data[1];
struct sd *sd = (struct sd *) gspca_dev;
+ struct s5k83a_priv *sens_priv = sd->sensor_priv;
- data[0] = 0x05;
- err = m5602_write_sensor(sd, S5K83A_PAGE_MAP, data, 1);
- if (err < 0)
- return err;
-
- err = m5602_read_sensor(sd, S5K83A_FLIP, data, 1);
- *val = (data[0] | 0x40) ? 1 : 0;
-
- return err;
+ *val = sens_priv->settings[VFLIP_IDX];
+ return 0;
}
-int s5k83a_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
+static int s5k83a_set_flip_real(struct gspca_dev *gspca_dev,
+ __s32 vflip, __s32 hflip)
{
int err;
u8 data[1];
@@ -376,69 +453,83 @@ int s5k83a_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
if (err < 0)
return err;
- err = m5602_read_sensor(sd, S5K83A_FLIP, data, 1);
- if (err < 0)
- return err;
+ /* six bit is vflip, seven is hflip */
+ data[0] = S5K83A_FLIP_MASK;
+ data[0] = (vflip) ? data[0] | 0x40 : data[0];
+ data[0] = (hflip) ? data[0] | 0x80 : data[0];
- /* set or zero six bit, seven is hflip */
- data[0] = (val) ? (data[0] & 0x80) | 0x40 | S5K83A_FLIP_MASK
- : (data[0] & 0x80) | S5K83A_FLIP_MASK;
err = m5602_write_sensor(sd, S5K83A_FLIP, data, 1);
if (err < 0)
return err;
- data[0] = (val) ? 0x0b : 0x0a;
+ data[0] = (vflip) ? 0x0b : 0x0a;
err = m5602_write_sensor(sd, S5K83A_VFLIP_TUNE, data, 1);
+ if (err < 0)
+ return err;
+ data[0] = (hflip) ? 0x0a : 0x0b;
+ err = m5602_write_sensor(sd, S5K83A_HFLIP_TUNE, data, 1);
return err;
}
-int s5k83a_get_hflip(struct gspca_dev *gspca_dev, __s32 *val)
+static int s5k83a_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
- u8 data[1];
+ u8 reg;
+ __s32 hflip;
struct sd *sd = (struct sd *) gspca_dev;
+ struct s5k83a_priv *sens_priv = sd->sensor_priv;
- data[0] = 0x05;
- err = m5602_write_sensor(sd, S5K83A_PAGE_MAP, data, 1);
+ sens_priv->settings[VFLIP_IDX] = val;
+
+ s5k83a_get_hflip(gspca_dev, &hflip);
+
+ err = s5k83a_get_rotation(sd, &reg);
if (err < 0)
return err;
+ if (reg) {
+ val = !val;
+ hflip = !hflip;
+ }
- err = m5602_read_sensor(sd, S5K83A_FLIP, data, 1);
- *val = (data[0] | 0x80) ? 1 : 0;
-
+ err = s5k83a_set_flip_real(gspca_dev, val, hflip);
return err;
}
-int s5k83a_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
+static int s5k83a_get_hflip(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ struct s5k83a_priv *sens_priv = sd->sensor_priv;
+
+ *val = sens_priv->settings[HFLIP_IDX];
+ return 0;
+}
+
+static int s5k83a_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
- u8 data[1];
+ u8 reg;
+ __s32 vflip;
struct sd *sd = (struct sd *) gspca_dev;
+ struct s5k83a_priv *sens_priv = sd->sensor_priv;
- data[0] = 0x05;
- err = m5602_write_sensor(sd, S5K83A_PAGE_MAP, data, 1);
- if (err < 0)
- return err;
+ sens_priv->settings[HFLIP_IDX] = val;
- err = m5602_read_sensor(sd, S5K83A_FLIP, data, 1);
- if (err < 0)
- return err;
+ s5k83a_get_vflip(gspca_dev, &vflip);
- /* set or zero seven bit, six is vflip */
- data[0] = (val) ? (data[0] & 0x40) | 0x80 | S5K83A_FLIP_MASK
- : (data[0] & 0x40) | S5K83A_FLIP_MASK;
- err = m5602_write_sensor(sd, S5K83A_FLIP, data, 1);
+ err = s5k83a_get_rotation(sd, &reg);
if (err < 0)
return err;
+ if (reg) {
+ val = !val;
+ vflip = !vflip;
+ }
- data[0] = (val) ? 0x0a : 0x0b;
- err = m5602_write_sensor(sd, S5K83A_HFLIP_TUNE, data, 1);
-
+ err = s5k83a_set_flip_real(gspca_dev, vflip, val);
return err;
}
-int s5k83a_set_led_indication(struct sd *sd, u8 val)
+static int s5k83a_set_led_indication(struct sd *sd, u8 val)
{
int err = 0;
u8 data[1];
@@ -454,5 +545,55 @@ int s5k83a_set_led_indication(struct sd *sd, u8 val)
err = m5602_write_bridge(sd, M5602_XB_GPIO_DAT, data[0]);
- return (err < 0) ? err : 0;
+ return err;
+}
+
+/* Get camera rotation on Acer notebooks */
+static int s5k83a_get_rotation(struct sd *sd, u8 *reg_data)
+{
+ int err = m5602_read_bridge(sd, M5602_XB_GPIO_DAT, reg_data);
+ *reg_data = (*reg_data & S5K83A_GPIO_ROTATION_MASK) ? 0 : 1;
+ return err;
+}
+
+static void s5k83a_dump_registers(struct sd *sd)
+{
+ int address;
+ u8 page, old_page;
+ m5602_read_sensor(sd, S5K83A_PAGE_MAP, &old_page, 1);
+
+ for (page = 0; page < 16; page++) {
+ m5602_write_sensor(sd, S5K83A_PAGE_MAP, &page, 1);
+ info("Dumping the s5k83a register state for page 0x%x", page);
+ for (address = 0; address <= 0xff; address++) {
+ u8 val = 0;
+ m5602_read_sensor(sd, address, &val, 1);
+ info("register 0x%x contains 0x%x",
+ address, val);
+ }
+ }
+ info("s5k83a register state dump complete");
+
+ for (page = 0; page < 16; page++) {
+ m5602_write_sensor(sd, S5K83A_PAGE_MAP, &page, 1);
+ info("Probing for which registers that are read/write "
+ "for page 0x%x", page);
+ for (address = 0; address <= 0xff; address++) {
+ u8 old_val, ctrl_val, test_val = 0xff;
+
+ m5602_read_sensor(sd, address, &old_val, 1);
+ m5602_write_sensor(sd, address, &test_val, 1);
+ m5602_read_sensor(sd, address, &ctrl_val, 1);
+
+ if (ctrl_val == test_val)
+ info("register 0x%x is writeable", address);
+ else
+ info("register 0x%x is read only", address);
+
+ /* Restore original val */
+ m5602_write_sensor(sd, address, &old_val, 1);
+ }
+ }
+ info("Read/write register probing complete");
+ m5602_write_sensor(sd, S5K83A_PAGE_MAP, &old_page, 1);
}
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k83a.h b/drivers/media/video/gspca/m5602/m5602_s5k83a.h
index 819ab25272b..7814b078acd 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k83a.h
+++ b/drivers/media/video/gspca/m5602/m5602_s5k83a.h
@@ -21,20 +21,21 @@
#include "m5602_sensor.h"
-#define S5K83A_FLIP 0x01
-#define S5K83A_HFLIP_TUNE 0x03
-#define S5K83A_VFLIP_TUNE 0x05
-#define S5K83A_WHITENESS 0x0a
-#define S5K83A_GAIN 0x18
-#define S5K83A_BRIGHTNESS 0x1b
-#define S5K83A_PAGE_MAP 0xec
-
-#define S5K83A_DEFAULT_BRIGHTNESS 0x71
-#define S5K83A_DEFAULT_WHITENESS 0x7e
-#define S5K83A_DEFAULT_GAIN 0x00
-#define S5K83A_MAXIMUM_GAIN 0x3c
-#define S5K83A_FLIP_MASK 0x10
+#define S5K83A_FLIP 0x01
+#define S5K83A_HFLIP_TUNE 0x03
+#define S5K83A_VFLIP_TUNE 0x05
+#define S5K83A_BRIGHTNESS 0x0a
+#define S5K83A_EXPOSURE 0x18
+#define S5K83A_GAIN 0x1b
+#define S5K83A_PAGE_MAP 0xec
+
+#define S5K83A_DEFAULT_GAIN 0x71
+#define S5K83A_DEFAULT_BRIGHTNESS 0x7e
+#define S5K83A_DEFAULT_EXPOSURE 0x00
+#define S5K83A_MAXIMUM_EXPOSURE 0x3c
+#define S5K83A_FLIP_MASK 0x10
#define S5K83A_GPIO_LED_MASK 0x10
+#define S5K83A_GPIO_ROTATION_MASK 0x40
/*****************************************************************************/
@@ -46,20 +47,7 @@ int s5k83a_probe(struct sd *sd);
int s5k83a_init(struct sd *sd);
int s5k83a_start(struct sd *sd);
int s5k83a_stop(struct sd *sd);
-int s5k83a_power_down(struct sd *sd);
-
-int s5k83a_set_led_indication(struct sd *sd, u8 val);
-
-int s5k83a_set_brightness(struct gspca_dev *gspca_dev, __s32 val);
-int s5k83a_get_brightness(struct gspca_dev *gspca_dev, __s32 *val);
-int s5k83a_set_whiteness(struct gspca_dev *gspca_dev, __s32 val);
-int s5k83a_get_whiteness(struct gspca_dev *gspca_dev, __s32 *val);
-int s5k83a_set_gain(struct gspca_dev *gspca_dev, __s32 val);
-int s5k83a_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
-int s5k83a_get_vflip(struct gspca_dev *gspca_dev, __s32 *val);
-int s5k83a_set_vflip(struct gspca_dev *gspca_dev, __s32 val);
-int s5k83a_get_hflip(struct gspca_dev *gspca_dev, __s32 *val);
-int s5k83a_set_hflip(struct gspca_dev *gspca_dev, __s32 val);
+void s5k83a_disconnect(struct sd *sd);
static const struct m5602_sensor s5k83a = {
.name = "S5K83A",
@@ -67,11 +55,18 @@ static const struct m5602_sensor s5k83a = {
.init = s5k83a_init,
.start = s5k83a_start,
.stop = s5k83a_stop,
- .power_down = s5k83a_power_down,
+ .disconnect = s5k83a_disconnect,
.i2c_slave_id = 0x5a,
.i2c_regW = 2,
};
+struct s5k83a_priv {
+ /* We use another thread periodically
+ probing the orientation of the camera */
+ struct task_struct *rotation_thread;
+ s32 *settings;
+};
+
static const unsigned char preinit_s5k83a[][4] =
{
{BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00},
@@ -108,8 +103,6 @@ static const unsigned char preinit_s5k83a[][4] =
{BRIDGE, M5602_XB_GPIO_DAT_H, 0x00, 0x00},
{BRIDGE, M5602_XB_GPIO_EN_L, 0x00, 0x00},
{BRIDGE, M5602_XB_I2C_CLK_DIV, 0x20, 0x00},
-
- {SENSOR, S5K83A_PAGE_MAP, 0x00, 0x00}
};
/* This could probably be considerably shortened.
@@ -117,86 +110,8 @@ static const unsigned char preinit_s5k83a[][4] =
*/
static const unsigned char init_s5k83a[][4] =
{
- {SENSOR, S5K83A_PAGE_MAP, 0x04, 0x00},
- {SENSOR, 0xaf, 0x01, 0x00},
- {SENSOR, S5K83A_PAGE_MAP, 0x00, 0x00},
- {SENSOR, 0x7b, 0xff, 0x00},
- {SENSOR, S5K83A_PAGE_MAP, 0x05, 0x00},
- {SENSOR, 0x01, 0x50, 0x00},
- {SENSOR, 0x12, 0x20, 0x00},
- {SENSOR, 0x17, 0x40, 0x00},
- {SENSOR, S5K83A_BRIGHTNESS, 0x0f, 0x00},
- {SENSOR, 0x1c, 0x00, 0x00},
- {SENSOR, 0x02, 0x70, 0x00},
- {SENSOR, 0x03, 0x0b, 0x00},
- {SENSOR, 0x04, 0xf0, 0x00},
- {SENSOR, 0x05, 0x0b, 0x00},
- {SENSOR, S5K83A_PAGE_MAP, 0x05, 0x00},
-
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
- {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81, 0x00},
- {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x01, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x01, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0xe4, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x02, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x02, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x87, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
-
- {SENSOR, S5K83A_PAGE_MAP, 0x05, 0x00},
- {SENSOR, 0x06, 0x71, 0x00},
- {SENSOR, 0x07, 0xe8, 0x00},
- {SENSOR, 0x08, 0x02, 0x00},
- {SENSOR, 0x09, 0x88, 0x00},
- {SENSOR, 0x14, 0x00, 0x00},
- {SENSOR, 0x15, 0x20, 0x00},
- {SENSOR, 0x19, 0x00, 0x00},
- {SENSOR, 0x1a, 0x98, 0x00},
- {SENSOR, 0x0f, 0x02, 0x00},
- {SENSOR, 0x10, 0xe5, 0x00},
- {SENSOR, S5K83A_PAGE_MAP, 0x05, 0x00},
- {SENSOR_LONG, 0x14, 0x00, 0x20},
- {SENSOR_LONG, 0x0d, 0x00, 0x7d},
- {SENSOR_LONG, 0x1b, 0x0d, 0x05},
-
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
- {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81, 0x00},
- {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x01, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x01, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0xe4, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x02, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x02, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x87, 0x00},
-
- {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
+ /* The following sequence is useless after a clean boot
+ but is necessary after resume from suspend */
{BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00},
{BRIDGE, M5602_XB_GPIO_DAT, 0x08, 0x00},
{BRIDGE, M5602_XB_GPIO_EN_H, 0x3f, 0x00},
@@ -216,7 +131,7 @@ static const unsigned char init_s5k83a[][4] =
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xf0, 0x00},
{BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x1c, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x08, 0x00},
{BRIDGE, M5602_XB_GPIO_EN_H, 0x06, 0x00},
{BRIDGE, M5602_XB_GPIO_DIR_H, 0x06, 0x00},
{BRIDGE, M5602_XB_GPIO_DAT_H, 0x00, 0x00},
@@ -225,109 +140,34 @@ static const unsigned char init_s5k83a[][4] =
{SENSOR, S5K83A_PAGE_MAP, 0x04, 0x00},
{SENSOR, 0xaf, 0x01, 0x00},
- {SENSOR, S5K83A_PAGE_MAP, 0x05, 0x00},
- /* ff ( init value )is very dark) || 71 and f0 better */
+ {SENSOR, S5K83A_PAGE_MAP, 0x00, 0x00},
{SENSOR, 0x7b, 0xff, 0x00},
{SENSOR, S5K83A_PAGE_MAP, 0x05, 0x00},
{SENSOR, 0x01, 0x50, 0x00},
{SENSOR, 0x12, 0x20, 0x00},
{SENSOR, 0x17, 0x40, 0x00},
- {SENSOR, S5K83A_BRIGHTNESS, 0x0f, 0x00},
{SENSOR, 0x1c, 0x00, 0x00},
{SENSOR, 0x02, 0x70, 0x00},
- /* some values like 0x10 give a blue-purple image */
{SENSOR, 0x03, 0x0b, 0x00},
{SENSOR, 0x04, 0xf0, 0x00},
{SENSOR, 0x05, 0x0b, 0x00},
- {SENSOR, S5K83A_PAGE_MAP, 0x05, 0x00},
-
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
- /* under 80 don't work, highter depend on value */
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
-
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
- {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81, 0x00},
- {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x01, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x01, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0xe4, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x02, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x02, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x7f, 0x00},
-
- {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
-
- {SENSOR, S5K83A_PAGE_MAP, 0x05, 0x00},
{SENSOR, 0x06, 0x71, 0x00},
- {SENSOR, 0x07, 0xe8, 0x00},
+ {SENSOR, 0x07, 0xe8, 0x00}, /* 488 */
{SENSOR, 0x08, 0x02, 0x00},
- {SENSOR, 0x09, 0x88, 0x00},
+ {SENSOR, 0x09, 0x88, 0x00}, /* 648 */
{SENSOR, 0x14, 0x00, 0x00},
- {SENSOR, 0x15, 0x20, 0x00},
+ {SENSOR, 0x15, 0x20, 0x00}, /* 32 */
{SENSOR, 0x19, 0x00, 0x00},
- {SENSOR, 0x1a, 0x98, 0x00},
+ {SENSOR, 0x1a, 0x98, 0x00}, /* 152 */
{SENSOR, 0x0f, 0x02, 0x00},
- {SENSOR, 0x10, 0xe5, 0x00},
- {SENSOR, S5K83A_PAGE_MAP, 0x05, 0x00},
- {SENSOR_LONG, 0x14, 0x00, 0x20},
- {SENSOR_LONG, 0x0d, 0x00, 0x7d},
- {SENSOR_LONG, 0x1b, 0x0d, 0x05},
-
- /* The following sequence is useless after a clean boot
- but is necessary after resume from suspend */
- {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x08, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x3f, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR_H, 0x3f, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_L, 0xff, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR_L, 0xff, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT_L, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0xb0, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0x80, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
- {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00},
- {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xf0, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x08, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x06, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_L, 0x00, 0x00},
- {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x20, 0x00},
-
- {SENSOR, S5K83A_PAGE_MAP, 0x04, 0x00},
- {SENSOR, 0xaf, 0x01, 0x00},
- {SENSOR, S5K83A_PAGE_MAP, 0x00, 0x00},
- {SENSOR, 0x7b, 0xff, 0x00},
- {SENSOR, S5K83A_PAGE_MAP, 0x05, 0x00},
- {SENSOR, 0x01, 0x50, 0x00},
- {SENSOR, 0x12, 0x20, 0x00},
- {SENSOR, 0x17, 0x40, 0x00},
- {SENSOR, S5K83A_BRIGHTNESS, 0x0f, 0x00},
- {SENSOR, 0x1c, 0x00, 0x00},
- {SENSOR, 0x02, 0x70, 0x00},
- {SENSOR, 0x03, 0x0b, 0x00},
- {SENSOR, 0x04, 0xf0, 0x00},
- {SENSOR, 0x05, 0x0b, 0x00},
- {SENSOR, S5K83A_PAGE_MAP, 0x05, 0x00},
+ {SENSOR, 0x10, 0xe5, 0x00}, /* 741 */
+ /* normal colors
+ (this is value after boot, but after tries can be different) */
+ {SENSOR, 0x00, 0x06, 0x00},
+};
+static const unsigned char start_s5k83a[][4] =
+{
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
{BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
@@ -340,7 +180,7 @@ static const unsigned char init_s5k83a[][4] =
{BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
{BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
{BRIDGE, M5602_XB_VSYNC_PARA, 0x01, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0xe4, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0xe4, 0x00}, /* 484 */
{BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
{BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
{BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
@@ -348,50 +188,10 @@ static const unsigned char init_s5k83a[][4] =
{BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
{BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
{BRIDGE, M5602_XB_HSYNC_PARA, 0x02, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x7f, 0x00},
+ {BRIDGE, M5602_XB_HSYNC_PARA, 0x7f, 0x00}, /* 639 */
{BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
-
- {SENSOR, S5K83A_PAGE_MAP, 0x05, 0x00},
- {SENSOR, 0x06, 0x71, 0x00},
- {SENSOR, 0x07, 0xe8, 0x00},
- {SENSOR, 0x08, 0x02, 0x00},
- {SENSOR, 0x09, 0x88, 0x00},
- {SENSOR, 0x14, 0x00, 0x00},
- {SENSOR, 0x15, 0x20, 0x00},
- {SENSOR, 0x19, 0x00, 0x00},
- {SENSOR, 0x1a, 0x98, 0x00},
- {SENSOR, 0x0f, 0x02, 0x00},
-
- {SENSOR, 0x10, 0xe5, 0x00},
- {SENSOR, S5K83A_PAGE_MAP, 0x05, 0x00},
- {SENSOR_LONG, 0x14, 0x00, 0x20},
- {SENSOR_LONG, 0x0d, 0x00, 0x7d},
- {SENSOR_LONG, 0x1b, 0x0d, 0x05},
-
- /* normal colors
- (this is value after boot, but after tries can be different) */
- {SENSOR, 0x00, 0x06, 0x00},
-
- /* set default brightness */
- {SENSOR_LONG, 0x14, 0x00, 0x20},
- {SENSOR_LONG, 0x0d, 0x01, 0x00},
- {SENSOR_LONG, 0x1b, S5K83A_DEFAULT_BRIGHTNESS >> 3,
- S5K83A_DEFAULT_BRIGHTNESS >> 1},
-
- /* set default whiteness */
- {SENSOR, S5K83A_WHITENESS, S5K83A_DEFAULT_WHITENESS, 0x00},
-
- /* set default gain */
- {SENSOR_LONG, 0x18, 0x00, S5K83A_DEFAULT_GAIN},
-
- /* set default flip */
- {SENSOR, S5K83A_PAGE_MAP, 0x05, 0x00},
- {SENSOR, S5K83A_FLIP, 0x00 | S5K83A_FLIP_MASK, 0x00},
- {SENSOR, S5K83A_HFLIP_TUNE, 0x0b, 0x00},
- {SENSOR, S5K83A_VFLIP_TUNE, 0x0a, 0x00}
-
};
#endif
diff --git a/drivers/media/video/gspca/m5602/m5602_sensor.h b/drivers/media/video/gspca/m5602/m5602_sensor.h
index 0d3026936f2..edff4f1f586 100644
--- a/drivers/media/video/gspca/m5602/m5602_sensor.h
+++ b/drivers/media/video/gspca/m5602/m5602_sensor.h
@@ -21,13 +21,17 @@
#include "m5602_bridge.h"
+#define M5602_V4L2_CID_GREEN_BALANCE (V4L2_CID_PRIVATE_BASE + 0)
+#define M5602_V4L2_CID_NOISE_SUPPRESION (V4L2_CID_PRIVATE_BASE + 1)
+
/* Enumerates all supported sensors */
enum sensors {
OV9650_SENSOR = 1,
S5K83A_SENSOR = 2,
S5K4AA_SENSOR = 3,
MT9M111_SENSOR = 4,
- PO1030_SENSOR = 5
+ PO1030_SENSOR = 5,
+ OV7660_SENSOR = 6,
};
/* Enumerates all possible instruction types */
@@ -61,9 +65,6 @@ struct m5602_sensor {
/* Executed when the device is disconnected */
void (*disconnect)(struct sd *sd);
-
- /* Performs a power down sequence */
- int (*power_down)(struct sd *sd);
};
#endif
diff --git a/drivers/media/video/gspca/mr97310a.c b/drivers/media/video/gspca/mr97310a.c
index 2a901a4a6f0..30132513400 100644
--- a/drivers/media/video/gspca/mr97310a.c
+++ b/drivers/media/video/gspca/mr97310a.c
@@ -321,6 +321,7 @@ static const struct sd_desc sd_desc = {
/* -- module initialisation -- */
static const __devinitdata struct usb_device_id device_table[] = {
{USB_DEVICE(0x08ca, 0x0111)},
+ {USB_DEVICE(0x093a, 0x010f)},
{}
};
MODULE_DEVICE_TABLE(usb, device_table);
@@ -347,8 +348,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- if (usb_register(&sd_driver) < 0)
- return -1;
+ int ret;
+
+ ret = usb_register(&sd_driver);
+ if (ret < 0)
+ return ret;
PDEBUG(D_PROBE, "registered");
return 0;
}
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index 1fff37b7989..188866ac6ce 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -50,6 +50,13 @@ static int i2c_detect_tries = 10;
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
+ char bridge;
+#define BRIDGE_OV511 0
+#define BRIDGE_OV511PLUS 1
+#define BRIDGE_OV518 2
+#define BRIDGE_OV518PLUS 3
+#define BRIDGE_OV519 4
+
/* Determined by sensor type */
__u8 sif;
@@ -87,6 +94,9 @@ static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val);
static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val);
+static void setbrightness(struct gspca_dev *gspca_dev);
+static void setcontrast(struct gspca_dev *gspca_dev);
+static void setcolors(struct gspca_dev *gspca_dev);
static struct ctrl sd_ctrls[] = {
{
@@ -164,7 +174,7 @@ static struct ctrl sd_ctrls[] = {
},
};
-static const struct v4l2_pix_format vga_mode[] = {
+static const struct v4l2_pix_format ov519_vga_mode[] = {
{320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 320,
.sizeimage = 320 * 240 * 3 / 8 + 590,
@@ -176,7 +186,7 @@ static const struct v4l2_pix_format vga_mode[] = {
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 0},
};
-static const struct v4l2_pix_format sif_mode[] = {
+static const struct v4l2_pix_format ov519_sif_mode[] = {
{176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 176,
.sizeimage = 176 * 144 * 3 / 8 + 590,
@@ -189,6 +199,47 @@ static const struct v4l2_pix_format sif_mode[] = {
.priv = 0},
};
+static const struct v4l2_pix_format ov518_vga_mode[] = {
+ {320, 240, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
+ .bytesperline = 320,
+ .sizeimage = 320 * 240 * 3 / 8 + 590,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 1},
+ {640, 480, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
+ .bytesperline = 640,
+ .sizeimage = 640 * 480 * 3 / 8 + 590,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 0},
+};
+static const struct v4l2_pix_format ov518_sif_mode[] = {
+ {176, 144, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
+ .bytesperline = 176,
+ .sizeimage = 40000,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 1},
+ {352, 288, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
+ .bytesperline = 352,
+ .sizeimage = 352 * 288 * 3 / 8 + 590,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 0},
+};
+
+
+/* Registers common to OV511 / OV518 */
+#define R51x_SYS_RESET 0x50
+#define R51x_SYS_INIT 0x53
+#define R51x_SYS_SNAP 0x52
+#define R51x_SYS_CUST_ID 0x5F
+#define R51x_COMP_LUT_BEGIN 0x80
+
+/* OV511 Camera interface register numbers */
+#define R511_SYS_LED_CTL 0x55 /* OV511+ only */
+#define OV511_RESET_NOREGS 0x3F /* All but OV511 & regs */
+
+/* OV518 Camera interface register numbers */
+#define R518_GPIO_OUT 0x56 /* OV518(+) only */
+#define R518_GPIO_CTL 0x57 /* OV518(+) only */
+
/* OV519 Camera interface register numbers */
#define OV519_R10_H_SIZE 0x10
#define OV519_R11_V_SIZE 0x11
@@ -224,6 +275,8 @@ static const struct v4l2_pix_format sif_mode[] = {
/* OV7610 registers */
#define OV7610_REG_GAIN 0x00 /* gain setting (5:0) */
+#define OV7610_REG_BLUE 0x01 /* blue channel balance */
+#define OV7610_REG_RED 0x02 /* red channel balance */
#define OV7610_REG_SAT 0x03 /* saturation */
#define OV8610_REG_HUE 0x04 /* 04 reserved */
#define OV7610_REG_CNT 0x05 /* Y contrast */
@@ -846,11 +899,12 @@ static unsigned char ov7670_abs_to_sm(unsigned char v)
static int reg_w(struct sd *sd, __u16 index, __u8 value)
{
int ret;
+ int req = (sd->bridge <= BRIDGE_OV511PLUS) ? 2 : 1;
sd->gspca_dev.usb_buf[0] = value;
ret = usb_control_msg(sd->gspca_dev.dev,
usb_sndctrlpipe(sd->gspca_dev.dev, 0),
- 1, /* REQ_IO (ov518/519) */
+ req,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, index,
sd->gspca_dev.usb_buf, 1, 500);
@@ -864,10 +918,11 @@ static int reg_w(struct sd *sd, __u16 index, __u8 value)
static int reg_r(struct sd *sd, __u16 index)
{
int ret;
+ int req = (sd->bridge <= BRIDGE_OV511PLUS) ? 3 : 1;
ret = usb_control_msg(sd->gspca_dev.dev,
usb_rcvctrlpipe(sd->gspca_dev.dev, 0),
- 1, /* REQ_IO */
+ req,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, index, sd->gspca_dev.usb_buf, 1, 500);
@@ -924,6 +979,28 @@ static int reg_w_mask(struct sd *sd,
}
/*
+ * Writes multiple (n) byte value to a single register. Only valid with certain
+ * registers (0x30 and 0xc4 - 0xce).
+ */
+static int ov518_reg_w32(struct sd *sd, __u16 index, u32 value, int n)
+{
+ int ret;
+
+ *((u32 *)sd->gspca_dev.usb_buf) = __cpu_to_le32(value);
+
+ ret = usb_control_msg(sd->gspca_dev.dev,
+ usb_sndctrlpipe(sd->gspca_dev.dev, 0),
+ 1 /* REG_IO */,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, index,
+ sd->gspca_dev.usb_buf, n, 500);
+ if (ret < 0)
+ PDEBUG(D_ERR, "Write reg32 [%02x] %08x failed", index, value);
+ return ret;
+}
+
+
+/*
* The OV518 I2C I/O procedure is different, hence, this function.
* This is normally only called from i2c_w(). Note that this function
* always succeeds regardless of whether the sensor is present and working.
@@ -1014,20 +1091,47 @@ static inline int ov51x_stop(struct sd *sd)
{
PDEBUG(D_STREAM, "stopping");
sd->stopped = 1;
- return reg_w(sd, OV519_SYS_RESET1, 0x0f);
+ switch (sd->bridge) {
+ case BRIDGE_OV511:
+ case BRIDGE_OV511PLUS:
+ return reg_w(sd, R51x_SYS_RESET, 0x3d);
+ case BRIDGE_OV518:
+ case BRIDGE_OV518PLUS:
+ return reg_w_mask(sd, R51x_SYS_RESET, 0x3a, 0x3a);
+ case BRIDGE_OV519:
+ return reg_w(sd, OV519_SYS_RESET1, 0x0f);
+ }
+
+ return 0;
}
/* Restarts OV511 after ov511_stop() is called. Has no effect if it is not
* actually stopped (for performance). */
static inline int ov51x_restart(struct sd *sd)
{
+ int rc;
+
PDEBUG(D_STREAM, "restarting");
if (!sd->stopped)
return 0;
sd->stopped = 0;
/* Reinitialize the stream */
- return reg_w(sd, OV519_SYS_RESET1, 0x00);
+ switch (sd->bridge) {
+ case BRIDGE_OV511:
+ case BRIDGE_OV511PLUS:
+ return reg_w(sd, R51x_SYS_RESET, 0x00);
+ case BRIDGE_OV518:
+ case BRIDGE_OV518PLUS:
+ rc = reg_w(sd, 0x2f, 0x80);
+ if (rc < 0)
+ return rc;
+ return reg_w(sd, R51x_SYS_RESET, 0x00);
+ case BRIDGE_OV519:
+ return reg_w(sd, OV519_SYS_RESET1, 0x00);
+ }
+
+ return 0;
}
/* This does an initial reset of an OmniVision sensor and ensures that I2C
@@ -1287,16 +1391,161 @@ static int ov6xx0_configure(struct sd *sd)
/* Turns on or off the LED. Only has an effect with OV511+/OV518(+)/OV519 */
static void ov51x_led_control(struct sd *sd, int on)
{
- reg_w_mask(sd, OV519_GPIO_DATA_OUT0, !on, 1); /* 0 / 1 */
+ switch (sd->bridge) {
+ /* OV511 has no LED control */
+ case BRIDGE_OV511PLUS:
+ reg_w(sd, R511_SYS_LED_CTL, on ? 1 : 0);
+ break;
+ case BRIDGE_OV518:
+ case BRIDGE_OV518PLUS:
+ reg_w_mask(sd, R518_GPIO_OUT, on ? 0x02 : 0x00, 0x02);
+ break;
+ case BRIDGE_OV519:
+ reg_w_mask(sd, OV519_GPIO_DATA_OUT0, !on, 1); /* 0 / 1 */
+ break;
+ }
}
-/* this function is called at probe time */
-static int sd_config(struct gspca_dev *gspca_dev,
- const struct usb_device_id *id)
+/* OV518 quantization tables are 8x4 (instead of 8x8) */
+static int ov518_upload_quan_tables(struct sd *sd)
+{
+ const unsigned char yQuanTable518[] = {
+ 5, 4, 5, 6, 6, 7, 7, 7,
+ 5, 5, 5, 5, 6, 7, 7, 7,
+ 6, 6, 6, 6, 7, 7, 7, 8,
+ 7, 7, 6, 7, 7, 7, 8, 8
+ };
+
+ const unsigned char uvQuanTable518[] = {
+ 6, 6, 6, 7, 7, 7, 7, 7,
+ 6, 6, 6, 7, 7, 7, 7, 7,
+ 6, 6, 6, 7, 7, 7, 7, 8,
+ 7, 7, 7, 7, 7, 7, 8, 8
+ };
+
+ const unsigned char *pYTable = yQuanTable518;
+ const unsigned char *pUVTable = uvQuanTable518;
+ unsigned char val0, val1;
+ int i, rc, reg = R51x_COMP_LUT_BEGIN;
+
+ PDEBUG(D_PROBE, "Uploading quantization tables");
+
+ for (i = 0; i < 16; i++) {
+ val0 = *pYTable++;
+ val1 = *pYTable++;
+ val0 &= 0x0f;
+ val1 &= 0x0f;
+ val0 |= val1 << 4;
+ rc = reg_w(sd, reg, val0);
+ if (rc < 0)
+ return rc;
+
+ val0 = *pUVTable++;
+ val1 = *pUVTable++;
+ val0 &= 0x0f;
+ val1 &= 0x0f;
+ val0 |= val1 << 4;
+ rc = reg_w(sd, reg + 16, val0);
+ if (rc < 0)
+ return rc;
+
+ reg++;
+ }
+
+ return 0;
+}
+
+/* This initializes the OV518/OV518+ and the sensor */
+static int ov518_configure(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- struct cam *cam;
+ int rc;
+
+ /* For 518 and 518+ */
+ static struct ov_regvals init_518[] = {
+ { R51x_SYS_RESET, 0x40 },
+ { R51x_SYS_INIT, 0xe1 },
+ { R51x_SYS_RESET, 0x3e },
+ { R51x_SYS_INIT, 0xe1 },
+ { R51x_SYS_RESET, 0x00 },
+ { R51x_SYS_INIT, 0xe1 },
+ { 0x46, 0x00 },
+ { 0x5d, 0x03 },
+ };
+
+ static struct ov_regvals norm_518[] = {
+ { R51x_SYS_SNAP, 0x02 }, /* Reset */
+ { R51x_SYS_SNAP, 0x01 }, /* Enable */
+ { 0x31, 0x0f },
+ { 0x5d, 0x03 },
+ { 0x24, 0x9f },
+ { 0x25, 0x90 },
+ { 0x20, 0x00 },
+ { 0x51, 0x04 },
+ { 0x71, 0x19 },
+ { 0x2f, 0x80 },
+ };
+
+ static struct ov_regvals norm_518_p[] = {
+ { R51x_SYS_SNAP, 0x02 }, /* Reset */
+ { R51x_SYS_SNAP, 0x01 }, /* Enable */
+ { 0x31, 0x0f },
+ { 0x5d, 0x03 },
+ { 0x24, 0x9f },
+ { 0x25, 0x90 },
+ { 0x20, 0x60 },
+ { 0x51, 0x02 },
+ { 0x71, 0x19 },
+ { 0x40, 0xff },
+ { 0x41, 0x42 },
+ { 0x46, 0x00 },
+ { 0x33, 0x04 },
+ { 0x21, 0x19 },
+ { 0x3f, 0x10 },
+ { 0x2f, 0x80 },
+ };
+
+ /* First 5 bits of custom ID reg are a revision ID on OV518 */
+ PDEBUG(D_PROBE, "Device revision %d",
+ 0x1F & reg_r(sd, R51x_SYS_CUST_ID));
+
+ rc = write_regvals(sd, init_518, ARRAY_SIZE(init_518));
+ if (rc < 0)
+ return rc;
+
+ /* Set LED GPIO pin to output mode */
+ rc = reg_w_mask(sd, R518_GPIO_CTL, 0x00, 0x02);
+ if (rc < 0)
+ return rc;
+ switch (sd->bridge) {
+ case BRIDGE_OV518:
+ rc = write_regvals(sd, norm_518, ARRAY_SIZE(norm_518));
+ if (rc < 0)
+ return rc;
+ break;
+ case BRIDGE_OV518PLUS:
+ rc = write_regvals(sd, norm_518_p, ARRAY_SIZE(norm_518_p));
+ if (rc < 0)
+ return rc;
+ break;
+ }
+
+ rc = ov518_upload_quan_tables(sd);
+ if (rc < 0) {
+ PDEBUG(D_ERR, "Error uploading quantization tables");
+ return rc;
+ }
+
+ rc = reg_w(sd, 0x2f, 0x80);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+static int ov519_configure(struct sd *sd)
+{
static const struct ov_regvals init_519[] = {
{ 0x5a, 0x6d }, /* EnableSystem */
{ 0x53, 0x9b },
@@ -1313,8 +1562,32 @@ static int sd_config(struct gspca_dev *gspca_dev,
/* windows reads 0x55 at this point*/
};
- if (write_regvals(sd, init_519, ARRAY_SIZE(init_519)))
+ return write_regvals(sd, init_519, ARRAY_SIZE(init_519));
+}
+
+/* this function is called at probe time */
+static int sd_config(struct gspca_dev *gspca_dev,
+ const struct usb_device_id *id)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ struct cam *cam;
+ int ret = 0;
+
+ sd->bridge = id->driver_info;
+
+ switch (sd->bridge) {
+ case BRIDGE_OV518:
+ case BRIDGE_OV518PLUS:
+ ret = ov518_configure(gspca_dev);
+ break;
+ case BRIDGE_OV519:
+ ret = ov519_configure(sd);
+ break;
+ }
+
+ if (ret)
goto error;
+
ov51x_led_control(sd, 0); /* turn LED off */
/* Test for 76xx */
@@ -1360,12 +1633,26 @@ static int sd_config(struct gspca_dev *gspca_dev,
}
cam = &gspca_dev->cam;
- if (!sd->sif) {
- cam->cam_mode = vga_mode;
- cam->nmodes = ARRAY_SIZE(vga_mode);
- } else {
- cam->cam_mode = sif_mode;
- cam->nmodes = ARRAY_SIZE(sif_mode);
+ switch (sd->bridge) {
+ case BRIDGE_OV518:
+ case BRIDGE_OV518PLUS:
+ if (!sd->sif) {
+ cam->cam_mode = ov518_vga_mode;
+ cam->nmodes = ARRAY_SIZE(ov518_vga_mode);
+ } else {
+ cam->cam_mode = ov518_sif_mode;
+ cam->nmodes = ARRAY_SIZE(ov518_sif_mode);
+ }
+ break;
+ case BRIDGE_OV519:
+ if (!sd->sif) {
+ cam->cam_mode = ov519_vga_mode;
+ cam->nmodes = ARRAY_SIZE(ov519_vga_mode);
+ } else {
+ cam->cam_mode = ov519_sif_mode;
+ cam->nmodes = ARRAY_SIZE(ov519_sif_mode);
+ }
+ break;
}
sd->brightness = BRIGHTNESS_DEF;
sd->contrast = CONTRAST_DEF;
@@ -1422,6 +1709,106 @@ static int sd_init(struct gspca_dev *gspca_dev)
return 0;
}
+/* Sets up the OV518/OV518+ with the given image parameters
+ *
+ * OV518 needs a completely different approach, until we can figure out what
+ * the individual registers do. Also, only 15 FPS is supported now.
+ *
+ * Do not put any sensor-specific code in here (including I2C I/O functions)
+ */
+static int ov518_mode_init_regs(struct sd *sd)
+{
+ int hsegs, vsegs;
+
+ /******** Set the mode ********/
+
+ reg_w(sd, 0x2b, 0);
+ reg_w(sd, 0x2c, 0);
+ reg_w(sd, 0x2d, 0);
+ reg_w(sd, 0x2e, 0);
+ reg_w(sd, 0x3b, 0);
+ reg_w(sd, 0x3c, 0);
+ reg_w(sd, 0x3d, 0);
+ reg_w(sd, 0x3e, 0);
+
+ if (sd->bridge == BRIDGE_OV518) {
+ /* Set 8-bit (YVYU) input format */
+ reg_w_mask(sd, 0x20, 0x08, 0x08);
+
+ /* Set 12-bit (4:2:0) output format */
+ reg_w_mask(sd, 0x28, 0x80, 0xf0);
+ reg_w_mask(sd, 0x38, 0x80, 0xf0);
+ } else {
+ reg_w(sd, 0x28, 0x80);
+ reg_w(sd, 0x38, 0x80);
+ }
+
+ hsegs = sd->gspca_dev.width / 16;
+ vsegs = sd->gspca_dev.height / 4;
+
+ reg_w(sd, 0x29, hsegs);
+ reg_w(sd, 0x2a, vsegs);
+
+ reg_w(sd, 0x39, hsegs);
+ reg_w(sd, 0x3a, vsegs);
+
+ /* Windows driver does this here; who knows why */
+ reg_w(sd, 0x2f, 0x80);
+
+ /******** Set the framerate (to 30 FPS) ********/
+ if (sd->bridge == BRIDGE_OV518PLUS)
+ sd->clockdiv = 1;
+ else
+ sd->clockdiv = 0;
+
+ /* Mode independent, but framerate dependent, regs */
+ reg_w(sd, 0x51, 0x04); /* Clock divider; lower==faster */
+ reg_w(sd, 0x22, 0x18);
+ reg_w(sd, 0x23, 0xff);
+
+ if (sd->bridge == BRIDGE_OV518PLUS)
+ reg_w(sd, 0x21, 0x19);
+ else
+ reg_w(sd, 0x71, 0x17); /* Compression-related? */
+
+ /* FIXME: Sensor-specific */
+ /* Bit 5 is what matters here. Of course, it is "reserved" */
+ i2c_w(sd, 0x54, 0x23);
+
+ reg_w(sd, 0x2f, 0x80);
+
+ if (sd->bridge == BRIDGE_OV518PLUS) {
+ reg_w(sd, 0x24, 0x94);
+ reg_w(sd, 0x25, 0x90);
+ ov518_reg_w32(sd, 0xc4, 400, 2); /* 190h */
+ ov518_reg_w32(sd, 0xc6, 540, 2); /* 21ch */
+ ov518_reg_w32(sd, 0xc7, 540, 2); /* 21ch */
+ ov518_reg_w32(sd, 0xc8, 108, 2); /* 6ch */
+ ov518_reg_w32(sd, 0xca, 131098, 3); /* 2001ah */
+ ov518_reg_w32(sd, 0xcb, 532, 2); /* 214h */
+ ov518_reg_w32(sd, 0xcc, 2400, 2); /* 960h */
+ ov518_reg_w32(sd, 0xcd, 32, 2); /* 20h */
+ ov518_reg_w32(sd, 0xce, 608, 2); /* 260h */
+ } else {
+ reg_w(sd, 0x24, 0x9f);
+ reg_w(sd, 0x25, 0x90);
+ ov518_reg_w32(sd, 0xc4, 400, 2); /* 190h */
+ ov518_reg_w32(sd, 0xc6, 381, 2); /* 17dh */
+ ov518_reg_w32(sd, 0xc7, 381, 2); /* 17dh */
+ ov518_reg_w32(sd, 0xc8, 128, 2); /* 80h */
+ ov518_reg_w32(sd, 0xca, 183331, 3); /* 2cc23h */
+ ov518_reg_w32(sd, 0xcb, 746, 2); /* 2eah */
+ ov518_reg_w32(sd, 0xcc, 1750, 2); /* 6d6h */
+ ov518_reg_w32(sd, 0xcd, 45, 2); /* 2dh */
+ ov518_reg_w32(sd, 0xce, 851, 2); /* 353h */
+ }
+
+ reg_w(sd, 0x2f, 0x80);
+
+ return 0;
+}
+
+
/* Sets up the OV519 with the given image parameters
*
* OV519 needs a completely different approach, until we can figure out what
@@ -1740,6 +2127,11 @@ static int set_ov_sensor_window(struct sd *sd)
hwebase = 0x3a;
vwsbase = 0x05;
vwebase = 0x06;
+ if (qvga) {
+ /* HDG: this fixes U and V getting swapped */
+ hwsbase--;
+ vwsbase--;
+ }
break;
case SEN_OV7620:
hwsbase = 0x2f; /* From 7620.SET (spec is wrong) */
@@ -1855,15 +2247,28 @@ static int set_ov_sensor_window(struct sd *sd)
static int sd_start(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- int ret;
+ int ret = 0;
- ret = ov519_mode_init_regs(sd);
+ switch (sd->bridge) {
+ case BRIDGE_OV518:
+ case BRIDGE_OV518PLUS:
+ ret = ov518_mode_init_regs(sd);
+ break;
+ case BRIDGE_OV519:
+ ret = ov519_mode_init_regs(sd);
+ break;
+ }
if (ret < 0)
goto out;
+
ret = set_ov_sensor_window(sd);
if (ret < 0)
goto out;
+ setcontrast(gspca_dev);
+ setbrightness(gspca_dev);
+ setcolors(gspca_dev);
+
ret = ov51x_restart(sd);
if (ret < 0)
goto out;
@@ -1882,7 +2287,30 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
ov51x_led_control(sd, 0);
}
-static void sd_pkt_scan(struct gspca_dev *gspca_dev,
+static void ov518_pkt_scan(struct gspca_dev *gspca_dev,
+ struct gspca_frame *frame, /* target */
+ __u8 *data, /* isoc packet */
+ int len) /* iso packet length */
+{
+ PDEBUG(D_STREAM, "ov518_pkt_scan: %d bytes", len);
+
+ if (len & 7) {
+ len--;
+ PDEBUG(D_STREAM, "packet number: %d\n", (int)data[len]);
+ }
+
+ /* A false positive here is likely, until OVT gives me
+ * the definitive SOF/EOF format */
+ if ((!(data[0] | data[1] | data[2] | data[3] | data[5])) && data[6]) {
+ gspca_frame_add(gspca_dev, LAST_PACKET, frame, data, 0);
+ gspca_frame_add(gspca_dev, FIRST_PACKET, frame, data, 0);
+ }
+
+ /* intermediate packet */
+ gspca_frame_add(gspca_dev, INTER_PACKET, frame, data, len);
+}
+
+static void ov519_pkt_scan(struct gspca_dev *gspca_dev,
struct gspca_frame *frame, /* target */
__u8 *data, /* isoc packet */
int len) /* iso packet length */
@@ -1926,6 +2354,27 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
data, len);
}
+static void sd_pkt_scan(struct gspca_dev *gspca_dev,
+ struct gspca_frame *frame, /* target */
+ __u8 *data, /* isoc packet */
+ int len) /* iso packet length */
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ switch (sd->bridge) {
+ case BRIDGE_OV511:
+ case BRIDGE_OV511PLUS:
+ break;
+ case BRIDGE_OV518:
+ case BRIDGE_OV518PLUS:
+ ov518_pkt_scan(gspca_dev, frame, data, len);
+ break;
+ case BRIDGE_OV519:
+ ov519_pkt_scan(gspca_dev, frame, data, len);
+ break;
+ }
+}
+
/* -- management routines -- */
static void setbrightness(struct gspca_dev *gspca_dev)
@@ -1970,6 +2419,7 @@ static void setcontrast(struct gspca_dev *gspca_dev)
break;
case SEN_OV6630:
i2c_w_mask(sd, OV7610_REG_CNT, val >> 4, 0x0f);
+ break;
case SEN_OV8610: {
static const __u8 ctab[] = {
0x03, 0x09, 0x0b, 0x0f, 0x53, 0x6f, 0x35, 0x7f
@@ -2136,19 +2586,21 @@ static const struct sd_desc sd_desc = {
/* -- module initialisation -- */
static const __devinitdata struct usb_device_id device_table[] = {
- {USB_DEVICE(0x041e, 0x4052)},
- {USB_DEVICE(0x041e, 0x405f)},
- {USB_DEVICE(0x041e, 0x4060)},
- {USB_DEVICE(0x041e, 0x4061)},
- {USB_DEVICE(0x041e, 0x4064)},
- {USB_DEVICE(0x041e, 0x4068)},
- {USB_DEVICE(0x045e, 0x028c)},
- {USB_DEVICE(0x054c, 0x0154)},
- {USB_DEVICE(0x054c, 0x0155)},
- {USB_DEVICE(0x05a9, 0x0519)},
- {USB_DEVICE(0x05a9, 0x0530)},
- {USB_DEVICE(0x05a9, 0x4519)},
- {USB_DEVICE(0x05a9, 0x8519)},
+ {USB_DEVICE(0x041e, 0x4052), .driver_info = BRIDGE_OV519 },
+ {USB_DEVICE(0x041e, 0x405f), .driver_info = BRIDGE_OV519 },
+ {USB_DEVICE(0x041e, 0x4060), .driver_info = BRIDGE_OV519 },
+ {USB_DEVICE(0x041e, 0x4061), .driver_info = BRIDGE_OV519 },
+ {USB_DEVICE(0x041e, 0x4064), .driver_info = BRIDGE_OV519 },
+ {USB_DEVICE(0x041e, 0x4068), .driver_info = BRIDGE_OV519 },
+ {USB_DEVICE(0x045e, 0x028c), .driver_info = BRIDGE_OV519 },
+ {USB_DEVICE(0x054c, 0x0154), .driver_info = BRIDGE_OV519 },
+ {USB_DEVICE(0x054c, 0x0155), .driver_info = BRIDGE_OV519 },
+ {USB_DEVICE(0x05a9, 0x0518), .driver_info = BRIDGE_OV518 },
+ {USB_DEVICE(0x05a9, 0x0519), .driver_info = BRIDGE_OV519 },
+ {USB_DEVICE(0x05a9, 0x0530), .driver_info = BRIDGE_OV519 },
+ {USB_DEVICE(0x05a9, 0x4519), .driver_info = BRIDGE_OV519 },
+ {USB_DEVICE(0x05a9, 0x8519), .driver_info = BRIDGE_OV519 },
+ {USB_DEVICE(0x05a9, 0xa518), .driver_info = BRIDGE_OV518PLUS },
{}
};
diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c
index 19e0bc60de1..4b528b37291 100644
--- a/drivers/media/video/gspca/ov534.c
+++ b/drivers/media/video/gspca/ov534.c
@@ -60,10 +60,23 @@ struct sd {
static struct ctrl sd_ctrls[] = {
};
-static const struct v4l2_pix_format vga_mode[] = {
+static const struct v4l2_pix_format vga_yuyv_mode[] = {
{640, 480, V4L2_PIX_FMT_YUYV, V4L2_FIELD_NONE,
.bytesperline = 640 * 2,
.sizeimage = 640 * 480 * 2,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .priv = 0},
+};
+
+static const struct v4l2_pix_format vga_jpeg_mode[] = {
+ {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
+ .bytesperline = 320,
+ .sizeimage = 320 * 240 * 3 / 8 + 590,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 1},
+ {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
+ .bytesperline = 640,
+ .sizeimage = 640 * 480 * 3 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 0},
};
@@ -244,7 +257,7 @@ static const u8 bridge_init_ov965x[][2] = {
};
static const u8 sensor_init_ov965x[][2] = {
- {0x12, 0x80}, /* com7 - reset */
+ {0x12, 0x80}, /* com7 - SSCB reset */
{0x00, 0x00}, /* gain */
{0x01, 0x80}, /* blue */
{0x02, 0x80}, /* red */
@@ -254,10 +267,10 @@ static const u8 sensor_init_ov965x[][2] = {
{0x0e, 0x61}, /* com5 */
{0x0f, 0x42}, /* com6 */
{0x11, 0x00}, /* clkrc */
- {0x12, 0x02}, /* com7 */
+ {0x12, 0x02}, /* com7 - 15fps VGA YUYV */
{0x13, 0xe7}, /* com8 - everything (AGC, AWB and AEC) */
{0x14, 0x28}, /* com9 */
- {0x16, 0x24}, /* rsvd16 */
+ {0x16, 0x24}, /* reg16 */
{0x17, 0x1d}, /* hstart*/
{0x18, 0xbd}, /* hstop */
{0x19, 0x01}, /* vstrt */
@@ -269,24 +282,24 @@ static const u8 sensor_init_ov965x[][2] = {
{0x27, 0x08}, /* bbias */
{0x28, 0x08}, /* gbbias */
{0x29, 0x15}, /* gr com */
- {0x2a, 0x00},
- {0x2b, 0x00},
+ {0x2a, 0x00}, /* exhch */
+ {0x2b, 0x00}, /* exhcl */
{0x2c, 0x08}, /* rbias */
{0x32, 0xff}, /* href */
{0x33, 0x00}, /* chlf */
- {0x34, 0x3f}, /* arblm */
- {0x35, 0x00}, /* rsvd35 */
- {0x36, 0xf8}, /* rsvd36 */
- {0x38, 0x72}, /* acom38 */
- {0x39, 0x57}, /* ofon */
- {0x3a, 0x80}, /* tslb */
- {0x3b, 0xc4},
+ {0x34, 0x3f}, /* aref1 */
+ {0x35, 0x00}, /* aref2 */
+ {0x36, 0xf8}, /* aref3 */
+ {0x38, 0x72}, /* adc2 */
+ {0x39, 0x57}, /* aref4 */
+ {0x3a, 0x80}, /* tslb - yuyv */
+ {0x3b, 0xc4}, /* com11 - night mode 1/4 frame rate */
{0x3d, 0x99}, /* com13 */
- {0x3f, 0xc1},
+ {0x3f, 0xc1}, /* edge */
{0x40, 0xc0}, /* com15 */
{0x41, 0x40}, /* com16 */
- {0x42, 0xc0},
- {0x43, 0x0a},
+ {0x42, 0xc0}, /* com17 */
+ {0x43, 0x0a}, /* rsvd */
{0x44, 0xf0},
{0x45, 0x46},
{0x46, 0x62},
@@ -297,22 +310,22 @@ static const u8 sensor_init_ov965x[][2] = {
{0x4c, 0x7f},
{0x4d, 0x7f},
{0x4e, 0x7f},
- {0x4f, 0x98},
+ {0x4f, 0x98}, /* matrix */
{0x50, 0x98},
{0x51, 0x00},
{0x52, 0x28},
{0x53, 0x70},
{0x54, 0x98},
- {0x58, 0x1a},
- {0x59, 0x85},
+ {0x58, 0x1a}, /* matrix coef sign */
+ {0x59, 0x85}, /* AWB control */
{0x5a, 0xa9},
{0x5b, 0x64},
{0x5c, 0x84},
{0x5d, 0x53},
{0x5e, 0x0e},
- {0x5f, 0xf0},
- {0x60, 0xf0},
- {0x61, 0xf0},
+ {0x5f, 0xf0}, /* AWB blue limit */
+ {0x60, 0xf0}, /* AWB red limit */
+ {0x61, 0xf0}, /* AWB green limit */
{0x62, 0x00}, /* lcc1 */
{0x63, 0x00}, /* lcc2 */
{0x64, 0x02}, /* lcc3 */
@@ -324,15 +337,15 @@ static const u8 sensor_init_ov965x[][2] = {
{0x6d, 0x55},
{0x6e, 0x00},
{0x6f, 0x9d},
- {0x70, 0x21},
+ {0x70, 0x21}, /* dnsth */
{0x71, 0x78},
- {0x72, 0x00},
- {0x73, 0x01},
- {0x74, 0x3a},
- {0x75, 0x35},
+ {0x72, 0x00}, /* poidx */
+ {0x73, 0x01}, /* pckdv */
+ {0x74, 0x3a}, /* xindx */
+ {0x75, 0x35}, /* yindx */
{0x76, 0x01},
{0x77, 0x02},
- {0x7a, 0x12},
+ {0x7a, 0x12}, /* gamma curve */
{0x7b, 0x08},
{0x7c, 0x16},
{0x7d, 0x30},
@@ -349,33 +362,33 @@ static const u8 sensor_init_ov965x[][2] = {
{0x88, 0xe6},
{0x89, 0xf2},
{0x8a, 0x03},
- {0x8c, 0x89},
+ {0x8c, 0x89}, /* com19 */
{0x14, 0x28}, /* com9 */
{0x90, 0x7d},
{0x91, 0x7b},
- {0x9d, 0x03},
- {0x9e, 0x04},
+ {0x9d, 0x03}, /* lcc6 */
+ {0x9e, 0x04}, /* lcc7 */
{0x9f, 0x7a},
{0xa0, 0x79},
{0xa1, 0x40}, /* aechm */
- {0xa4, 0x50},
+ {0xa4, 0x50}, /* com21 */
{0xa5, 0x68}, /* com26 */
- {0xa6, 0x4a},
- {0xa8, 0xc1}, /* acoma8 */
- {0xa9, 0xef}, /* acoma9 */
+ {0xa6, 0x4a}, /* AWB green */
+ {0xa8, 0xc1}, /* refa8 */
+ {0xa9, 0xef}, /* refa9 */
{0xaa, 0x92},
{0xab, 0x04},
- {0xac, 0x80},
+ {0xac, 0x80}, /* black level control */
{0xad, 0x80},
{0xae, 0x80},
{0xaf, 0x80},
{0xb2, 0xf2},
{0xb3, 0x20},
- {0xb4, 0x20},
+ {0xb4, 0x20}, /* ctrlb4 */
{0xb5, 0x00},
{0xb6, 0xaf},
{0xbb, 0xae},
- {0xbc, 0x7f},
+ {0xbc, 0x7f}, /* ADC channel offsets */
{0xdb, 0x7f},
{0xbe, 0x7f},
{0xbf, 0x7f},
@@ -384,7 +397,7 @@ static const u8 sensor_init_ov965x[][2] = {
{0xc2, 0x01},
{0xc3, 0x4e},
{0xc6, 0x85},
- {0xc7, 0x80},
+ {0xc7, 0x80}, /* com24 */
{0xc9, 0xe0},
{0xca, 0xe8},
{0xcb, 0xf0},
@@ -399,11 +412,11 @@ static const u8 sensor_init_ov965x[][2] = {
{0x58, 0x1a},
{0xff, 0x41}, /* read 41, write ff 00 */
{0x41, 0x40}, /* com16 */
- {0xc5, 0x03},
- {0x6a, 0x02},
+ {0xc5, 0x03}, /* 60 Hz banding filter */
+ {0x6a, 0x02}, /* 50 Hz banding filter */
- {0x12, 0x62}, /* com7 - VGA + CIF */
- {0x36, 0xfa}, /* rsvd36 */
+ {0x12, 0x62}, /* com7 - 30fps VGA YUV */
+ {0x36, 0xfa}, /* aref3 */
{0x69, 0x0a}, /* hv */
{0x8c, 0x89}, /* com22 */
{0x14, 0x28}, /* com9 */
@@ -442,8 +455,8 @@ static const u8 bridge_init_ov965x_2[][2] = {
{0x52, 0x3c},
{0x53, 0x00},
{0x54, 0x00},
- {0x55, 0x00},
- {0x57, 0x00},
+ {0x55, 0x00}, /* brightness */
+ {0x57, 0x00}, /* contrast 2 */
{0x5c, 0x00},
{0x5a, 0xa0},
{0x5b, 0x78},
@@ -489,9 +502,66 @@ static const u8 sensor_init_ov965x_2[][2] = {
{0x13, 0xe7}, /* com8 - everything (AGC, AWB and AEC) */
};
+static const u8 sensor_start_ov965x[][2] = {
+ {0x12, 0x62}, /* com7 - 30fps VGA YUV */
+ {0x36, 0xfa}, /* aref3 */
+ {0x69, 0x0a}, /* hv */
+ {0x8c, 0x89}, /* com22 */
+ {0x14, 0x28}, /* com9 */
+ {0x3e, 0x0c}, /* com14 */
+ {0x41, 0x40}, /* com16 */
+ {0x72, 0x00},
+ {0x73, 0x00},
+ {0x74, 0x3a},
+ {0x75, 0x35},
+ {0x76, 0x01},
+ {0xc7, 0x80}, /* com24 */
+ {0x03, 0x12}, /* vref */
+ {0x17, 0x16}, /* hstart */
+ {0x18, 0x02}, /* hstop */
+ {0x19, 0x01}, /* vstrt */
+ {0x1a, 0x3d}, /* vstop */
+ {0x32, 0xff}, /* href */
+ {0xc0, 0xaa},
+ {}
+};
+
static const u8 bridge_start_ov965x[][2] = {
+ {0x94, 0xaa},
+ {0xf1, 0x60},
+ {0xe5, 0x04},
+ {0xc0, 0x50},
+ {0xc1, 0x3c},
+ {0x8c, 0x00},
+ {0x8d, 0x1c},
+ {0x34, 0x05},
+ {}
+};
+
+static const u8 bridge_start_ov965x_vga[][2] = {
+ {0xc2, 0x0c},
+ {0xc3, 0xf9},
+ {0xda, 0x01},
+ {0x50, 0x00},
+ {0x51, 0xa0},
+ {0x52, 0x3c},
+ {0x53, 0x00},
+ {0x54, 0x00},
+ {0x55, 0x00},
+ {0x57, 0x00},
+ {0x5c, 0x00},
+ {0x5a, 0xa0},
+ {0x5b, 0x78},
+ {0x35, 0x02},
+ {0xd9, 0x10},
+ {0x94, 0x11},
+ {}
+};
+
+static const u8 bridge_start_ov965x_cif[][2] = {
{0xc2, 0x4c},
{0xc3, 0xf9},
+ {0xda, 0x00},
{0x50, 0x00},
{0x51, 0xa0},
{0x52, 0x78},
@@ -500,30 +570,54 @@ static const u8 bridge_start_ov965x[][2] = {
{0x55, 0x00},
{0x57, 0x00},
{0x5c, 0x00},
- {0x5a, 0x28},
- {0x5b, 0x1e},
- {0x35, 0x00},
- {0xd9, 0x21},
+ {0x5a, 0x50},
+ {0x5b, 0x3c},
+ {0x35, 0x02},
+ {0xd9, 0x10},
{0x94, 0x11},
+ {}
};
-static const u8 sensor_start_ov965x[][2] = {
- {0x3b, 0xe4},
+static const u8 sensor_start_ov965x_vga[][2] = {
+ {0x3b, 0xc4}, /* com11 - night mode 1/4 frame rate */
+ {0x1e, 0x04}, /* mvfp */
+ {0x13, 0xe0}, /* com8 */
+ {0x00, 0x00},
+ {0x13, 0xe7}, /* com8 - everything (AGC, AWB and AEC) */
+ {0x11, 0x03}, /* clkrc */
+ {0x6b, 0x5a}, /* dblv */
+ {0x6a, 0x05}, /* 50 Hz banding filter */
+ {0xc5, 0x07}, /* 60 Hz banding filter */
+ {0xa2, 0x4b}, /* bd50 */
+ {0xa3, 0x3e}, /* bd60 */
+
+ {0x2d, 0x00}, /* advfl */
+ {}
+};
+
+static const u8 sensor_start_ov965x_cif[][2] = {
+ {0x3b, 0xe4}, /* com11 - night mode 1/4 frame rate */
{0x1e, 0x04}, /* mvfp */
{0x13, 0xe0}, /* com8 */
{0x00, 0x00},
{0x13, 0xe7}, /* com8 - everything (AGC, AWB and AEC) */
{0x11, 0x01}, /* clkrc */
{0x6b, 0x5a}, /* dblv */
- {0x6a, 0x02},
- {0xc5, 0x03},
- {0xa2, 0x96},
- {0xa3, 0x7d},
+ {0x6a, 0x02}, /* 50 Hz banding filter */
+ {0xc5, 0x03}, /* 60 Hz banding filter */
+ {0xa2, 0x96}, /* bd50 */
+ {0xa3, 0x7d}, /* bd60 */
+
{0xff, 0x13}, /* read 13, write ff 00 */
{0x13, 0xe7},
- {0x3a, 0x80},
+ {0x3a, 0x80}, /* tslb - yuyv */
+ {}
+};
+
+static const u8 sensor_start_ov965x_2[][2] = {
{0xff, 0x42}, /* read 42, write ff 00 */
- {0x42, 0xc1},
+ {0x42, 0xc1}, /* com17 - 50 Hz filter */
+ {}
};
@@ -705,11 +799,17 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam = &gspca_dev->cam;
- cam->cam_mode = vga_mode;
- cam->nmodes = ARRAY_SIZE(vga_mode);
+ if (sd->sensor == SENSOR_OV772X) {
+ cam->cam_mode = vga_yuyv_mode;
+ cam->nmodes = ARRAY_SIZE(vga_yuyv_mode);
- cam->bulk_size = 16384;
- cam->bulk_nurbs = 2;
+ cam->bulk = 1;
+ cam->bulk_size = 16384;
+ cam->bulk_nurbs = 2;
+ } else { /* ov965x */
+ cam->cam_mode = vga_jpeg_mode;
+ cam->nmodes = ARRAY_SIZE(vga_jpeg_mode);
+ }
return 0;
}
@@ -778,6 +878,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
static int sd_start(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
+ int mode;
switch (sd->sensor) {
case SENSOR_OV772X:
@@ -786,13 +887,28 @@ static int sd_start(struct gspca_dev *gspca_dev)
break;
default:
/* case SENSOR_OV965X: */
- reg_w_array(gspca_dev, bridge_start_ov965x,
- ARRAY_SIZE(bridge_start_ov965x));
+
sccb_w_array(gspca_dev, sensor_start_ov965x,
ARRAY_SIZE(sensor_start_ov965x));
+ reg_w_array(gspca_dev, bridge_start_ov965x,
+ ARRAY_SIZE(bridge_start_ov965x));
+ mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
+ if (mode != 0) { /* 320x240 */
+ reg_w_array(gspca_dev, bridge_start_ov965x_cif,
+ ARRAY_SIZE(bridge_start_ov965x_cif));
+ sccb_w_array(gspca_dev, sensor_start_ov965x_cif,
+ ARRAY_SIZE(sensor_start_ov965x_cif));
+ } else { /* 640x480 */
+ reg_w_array(gspca_dev, bridge_start_ov965x_vga,
+ ARRAY_SIZE(bridge_start_ov965x_vga));
+ sccb_w_array(gspca_dev, sensor_start_ov965x_vga,
+ ARRAY_SIZE(sensor_start_ov965x_vga));
+ }
+ sccb_w_array(gspca_dev, sensor_start_ov965x_2,
+ ARRAY_SIZE(sensor_start_ov965x_2));
+ ov534_reg_write(gspca_dev, 0xe0, 0x00);
ov534_reg_write(gspca_dev, 0xe0, 0x00);
ov534_set_led(gspca_dev, 1);
-/*fixme: other sensor start omitted*/
}
return 0;
}
@@ -832,9 +948,11 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, struct gspca_frame *frame,
__u32 this_pts;
u16 this_fid;
int remaining_len = len;
+ int payload_len;
+ payload_len = gspca_dev->cam.bulk ? 2048 : 2040;
do {
- len = min(remaining_len, 2040); /*fixme: was 2048*/
+ len = min(remaining_len, payload_len);
/* Payloads are prefixed with a UVC-style header. We
consider a frame to start when the FID toggles, or the PTS
@@ -864,30 +982,27 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, struct gspca_frame *frame,
/* If PTS or FID has changed, start a new frame. */
if (this_pts != sd->last_pts || this_fid != sd->last_fid) {
- gspca_frame_add(gspca_dev, FIRST_PACKET, frame,
- NULL, 0);
+ if (gspca_dev->last_packet_type == INTER_PACKET)
+ frame = gspca_frame_add(gspca_dev,
+ LAST_PACKET, frame,
+ NULL, 0);
sd->last_pts = this_pts;
sd->last_fid = this_fid;
- }
-
- /* Add the data from this payload */
- gspca_frame_add(gspca_dev, INTER_PACKET, frame,
+ gspca_frame_add(gspca_dev, FIRST_PACKET, frame,
data + 12, len - 12);
-
/* If this packet is marked as EOF, end the frame */
- if (data[1] & UVC_STREAM_EOF) {
+ } else if (data[1] & UVC_STREAM_EOF) {
sd->last_pts = 0;
-
- if (frame->data_end - frame->data !=
- gspca_dev->width * gspca_dev->height * 2) {
- PDEBUG(D_PACK, "short frame");
- goto discard;
- }
-
frame = gspca_frame_add(gspca_dev, LAST_PACKET, frame,
- NULL, 0);
+ data + 12, len - 12);
+ } else {
+
+ /* Add the data from this payload */
+ gspca_frame_add(gspca_dev, INTER_PACKET, frame,
+ data + 12, len - 12);
}
+
/* Done this payload */
goto scan_next;
diff --git a/drivers/media/video/gspca/sonixb.c b/drivers/media/video/gspca/sonixb.c
index 153d0a91d4b..cf3af8de6e9 100644
--- a/drivers/media/video/gspca/sonixb.c
+++ b/drivers/media/video/gspca/sonixb.c
@@ -877,6 +877,8 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->cam_mode = sif_mode;
cam->nmodes = ARRAY_SIZE(sif_mode);
}
+ cam->npkt = 36; /* 36 packets per ISOC message */
+
sd->brightness = BRIGHTNESS_DEF;
sd->gain = GAIN_DEF;
sd->exposure = EXPOSURE_DEF;
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index c72e19d3ac3..dc6a6f11354 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -62,7 +62,6 @@ struct sd {
#define BRIDGE_SN9C105 1
#define BRIDGE_SN9C110 2
#define BRIDGE_SN9C120 3
-#define BRIDGE_SN9C325 4
u8 sensor; /* Type of image sensor chip */
#define SENSOR_HV7131R 0
#define SENSOR_MI0360 1
@@ -354,9 +353,9 @@ static const u8 sn_ov7648[0x1c] = {
static const u8 sn_ov7660[0x1c] = {
/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */
- 0x00, 0x61, 0x40, 0x00, 0x1a, 0x20, 0x20, 0x20,
+ 0x00, 0x61, 0x40, 0x00, 0x1a, 0x00, 0x00, 0x00,
/* reg8 reg9 rega regb regc regd rege regf */
- 0x81, 0x21, 0x07, 0x00, 0x00, 0x00, 0x00, 0x10,
+ 0x81, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */
0x03, 0x00, 0x01, 0x01, 0x08, 0x28, 0x1e, 0x20,
/* reg18 reg19 reg1a reg1b */
@@ -757,6 +756,7 @@ static const u8 ov7660_sensor_init[][8] = {
{0xc1, 0x21, 0x88, 0xaf, 0xc7, 0xdf, 0x00, 0x10}, /* gamma curve */
{0xc1, 0x21, 0x8b, 0x99, 0x99, 0xcf, 0x00, 0x10}, /* reserved */
{0xb1, 0x21, 0x92, 0x00, 0x00, 0x00, 0x00, 0x10}, /* DM_LNL/H */
+ {0xb1, 0x21, 0xa1, 0x00, 0x00, 0x00, 0x00, 0x10},
/****** (some exchanges in the win trace) ******/
{0xa1, 0x21, 0x1e, 0x01, 0x00, 0x00, 0x00, 0x10}, /* MVFP */
/* bits[3..0]reserved */
@@ -1065,9 +1065,9 @@ static int configure_gpio(struct gspca_dev *gspca_dev,
struct sd *sd = (struct sd *) gspca_dev;
const u8 *reg9a;
static const u8 reg9a_def[] =
- {0x08, 0x40, 0x20, 0x10, 0x00, 0x04};
- static const u8 reg9a_sn9c325[] =
- {0x0a, 0x40, 0x38, 0x30, 0x00, 0x20};
+ {0x00, 0x40, 0x20, 0x00, 0x00, 0x00};
+ static const u8 reg9a_spec[] =
+ {0x00, 0x40, 0x38, 0x30, 0x00, 0x20};
static const u8 regd4[] = {0x60, 0x00, 0x00};
reg_w1(gspca_dev, 0xf1, 0x00);
@@ -1077,9 +1077,10 @@ static int configure_gpio(struct gspca_dev *gspca_dev,
reg_w(gspca_dev, 0x01, &sn9c1xx[1], 2);
reg_w(gspca_dev, 0x08, &sn9c1xx[8], 2);
reg_w(gspca_dev, 0x17, &sn9c1xx[0x17], 5); /* jfm len was 3 */
- switch (sd->bridge) {
- case BRIDGE_SN9C325:
- reg9a = reg9a_sn9c325;
+ switch (sd->sensor) {
+ case SENSOR_OV7660:
+ case SENSOR_SP80708:
+ reg9a = reg9a_spec;
break;
default:
reg9a = reg9a_def;
@@ -1104,7 +1105,6 @@ static int configure_gpio(struct gspca_dev *gspca_dev,
reg_w1(gspca_dev, 0x17, 0x64);
reg_w1(gspca_dev, 0x01, 0x42);
break;
-/*jfm: from win trace */
case SENSOR_OV7630:
reg_w1(gspca_dev, 0x01, 0x61);
reg_w1(gspca_dev, 0x17, 0xe2);
@@ -1114,18 +1114,15 @@ static int configure_gpio(struct gspca_dev *gspca_dev,
case SENSOR_OV7648:
reg_w1(gspca_dev, 0x01, 0x63);
reg_w1(gspca_dev, 0x17, 0x20);
+ reg_w1(gspca_dev, 0x01, 0x62);
reg_w1(gspca_dev, 0x01, 0x42);
break;
-/*jfm: from win trace */
case SENSOR_OV7660:
- if (sd->bridge == BRIDGE_SN9C120) {
- reg_w1(gspca_dev, 0x01, 0x61);
- reg_w1(gspca_dev, 0x17, 0x20);
- reg_w1(gspca_dev, 0x01, 0x60);
- reg_w1(gspca_dev, 0x01, 0x40);
- break;
- }
- /* fall thru */
+ reg_w1(gspca_dev, 0x01, 0x61);
+ reg_w1(gspca_dev, 0x17, 0x20);
+ reg_w1(gspca_dev, 0x01, 0x60);
+ reg_w1(gspca_dev, 0x01, 0x40);
+ break;
case SENSOR_SP80708:
reg_w1(gspca_dev, 0x01, 0x63);
reg_w1(gspca_dev, 0x17, 0x20);
@@ -1134,6 +1131,9 @@ static int configure_gpio(struct gspca_dev *gspca_dev,
mdelay(100);
reg_w1(gspca_dev, 0x02, 0x62);
break;
+/* case SENSOR_HV7131R: */
+/* case SENSOR_MI0360: */
+/* case SENSOR_MO4000: */
default:
reg_w1(gspca_dev, 0x01, 0x43);
reg_w1(gspca_dev, 0x17, 0x61);
@@ -1280,6 +1280,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam = &gspca_dev->cam;
cam->cam_mode = vga_mode;
cam->nmodes = ARRAY_SIZE(vga_mode);
+ cam->npkt = 24; /* 24 packets per ISOC message */
sd->bridge = id->driver_info >> 16;
sd->sensor = id->driver_info >> 8;
@@ -1683,13 +1684,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
case SENSOR_OV7648:
reg17 = 0x20;
break;
-/*jfm: from win trace */
case SENSOR_OV7660:
- if (sd->bridge == BRIDGE_SN9C120) {
- reg17 = 0xa0;
- break;
- }
- /* fall thru */
+ reg17 = 0xa0;
+ break;
default:
reg17 = 0x60;
break;
@@ -1714,16 +1711,17 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg_w1(gspca_dev, 0x9a, 0x0a);
reg_w1(gspca_dev, 0x99, 0x60);
break;
+ case SENSOR_OV7660:
+ reg_w1(gspca_dev, 0x9a, 0x05);
+ if (sd->bridge == BRIDGE_SN9C105)
+ reg_w1(gspca_dev, 0x99, 0xff);
+ else
+ reg_w1(gspca_dev, 0x99, 0x5b);
+ break;
case SENSOR_SP80708:
reg_w1(gspca_dev, 0x9a, 0x05);
reg_w1(gspca_dev, 0x99, 0x59);
break;
- case SENSOR_OV7660:
- if (sd->bridge == BRIDGE_SN9C120) {
- reg_w1(gspca_dev, 0x9a, 0x05);
- break;
- }
- /* fall thru */
default:
reg_w1(gspca_dev, 0x9a, 0x08);
reg_w1(gspca_dev, 0x99, 0x59);
@@ -2193,6 +2191,7 @@ static const __devinitdata struct usb_device_id device_table[] = {
{USB_DEVICE(0x0471, 0x0328), BSI(SN9C105, MI0360, 0x5d)},
{USB_DEVICE(0x0471, 0x0330), BSI(SN9C105, MI0360, 0x5d)},
{USB_DEVICE(0x06f8, 0x3004), BSI(SN9C105, OV7660, 0x21)},
+ {USB_DEVICE(0x06f8, 0x3008), BSI(SN9C105, OV7660, 0x21)},
{USB_DEVICE(0x0c45, 0x6040), BSI(SN9C102P, HV7131R, 0x11)},
/* bw600.inf:
{USB_DEVICE(0x0c45, 0x6040), BSI(SN9C102P, MI0360, 0x5d)}, */
@@ -2211,7 +2210,12 @@ static const __devinitdata struct usb_device_id device_table[] = {
#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
{USB_DEVICE(0x0c45, 0x60fe), BSI(SN9C105, OV7630, 0x21)},
#endif
+ {USB_DEVICE(0x0c45, 0x6100), BSI(SN9C120, MI0360, 0x5d)}, /*sn9c128*/
/* {USB_DEVICE(0x0c45, 0x6108), BSI(SN9C120, OM6801, 0x??)}, */
+ {USB_DEVICE(0x0c45, 0x610a), BSI(SN9C120, OV7648, 0x21)}, /*sn9c128*/
+ {USB_DEVICE(0x0c45, 0x610b), BSI(SN9C120, OV7660, 0x21)}, /*sn9c128*/
+ {USB_DEVICE(0x0c45, 0x610c), BSI(SN9C120, HV7131R, 0x11)}, /*sn9c128*/
+ {USB_DEVICE(0x0c45, 0x610e), BSI(SN9C120, OV7630, 0x21)}, /*sn9c128*/
/* {USB_DEVICE(0x0c45, 0x6122), BSI(SN9C110, ICM105C, 0x??)}, */
/* {USB_DEVICE(0x0c45, 0x6123), BSI(SN9C110, SanyoCCD, 0x??)}, */
{USB_DEVICE(0x0c45, 0x6128), BSI(SN9C110, OM6802, 0x21)}, /*sn9c325?*/
diff --git a/drivers/media/video/gspca/spca500.c b/drivers/media/video/gspca/spca500.c
index 6f38fa6d86b..8806b2ff82b 100644
--- a/drivers/media/video/gspca/spca500.c
+++ b/drivers/media/video/gspca/spca500.c
@@ -32,9 +32,6 @@ MODULE_LICENSE("GPL");
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- __u8 packet[ISO_MAX_SIZE + 128];
- /* !! no more than 128 ff in an ISO packet */
-
unsigned char brightness;
unsigned char contrast;
unsigned char colors;
@@ -906,7 +903,6 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
{
struct sd *sd = (struct sd *) gspca_dev;
int i;
- __u8 *s, *d;
static __u8 ffd9[] = {0xff, 0xd9};
/* frames are jpeg 4.1.1 without 0xff escape */
@@ -930,22 +926,19 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
}
/* add 0x00 after 0xff */
- for (i = len; --i >= 0; )
- if (data[i] == 0xff)
- break;
- if (i < 0) { /* no 0xff */
- gspca_frame_add(gspca_dev, INTER_PACKET, frame, data, len);
- return;
- }
- s = data;
- d = sd->packet;
- for (i = 0; i < len; i++) {
- *d++ = *s++;
- if (s[-1] == 0xff)
- *d++ = 0x00;
- }
- gspca_frame_add(gspca_dev, INTER_PACKET, frame,
- sd->packet, d - sd->packet);
+ i = 0;
+ do {
+ if (data[i] == 0xff) {
+ gspca_frame_add(gspca_dev, INTER_PACKET, frame,
+ data, i + 1);
+ len -= i;
+ data += i;
+ *data = 0x00;
+ i = 0;
+ }
+ i++;
+ } while (i < len);
+ gspca_frame_add(gspca_dev, INTER_PACKET, frame, data, len);
}
static void setbrightness(struct gspca_dev *gspca_dev)
diff --git a/drivers/media/video/gspca/spca505.c b/drivers/media/video/gspca/spca505.c
index 2acec58b1b9..ea8c9fe2e96 100644
--- a/drivers/media/video/gspca/spca505.c
+++ b/drivers/media/video/gspca/spca505.c
@@ -637,19 +637,19 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->nmodes = ARRAY_SIZE(vga_mode) - 1;
sd->brightness = BRIGHTNESS_DEF;
- if (sd->subtype == Nxultra) {
- if (write_vector(gspca_dev, spca505b_init_data))
- return -EIO;
- } else {
- if (write_vector(gspca_dev, spca505_init_data))
- return -EIO;
- }
return 0;
}
/* this function is called at probe and resume time */
static int sd_init(struct gspca_dev *gspca_dev)
{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ if (write_vector(gspca_dev,
+ sd->subtype == Nxultra
+ ? spca505b_init_data
+ : spca505_init_data))
+ return -EIO;
return 0;
}
diff --git a/drivers/media/video/gspca/spca508.c b/drivers/media/video/gspca/spca508.c
index adacf843766..2ed2669bac3 100644
--- a/drivers/media/video/gspca/spca508.c
+++ b/drivers/media/video/gspca/spca508.c
@@ -1,7 +1,7 @@
/*
* SPCA508 chip based cameras subdriver
*
- * V4L2 by Jean-Francois Moine <http://moinejf.free.fr>
+ * Copyright (C) 2009 Jean-Francois Moine <http://moinejf.free.fr>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -30,9 +30,9 @@ MODULE_LICENSE("GPL");
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- unsigned char brightness;
+ u8 brightness;
- char subtype;
+ u8 subtype;
#define CreativeVista 0
#define HamaUSBSightcam 1
#define HamaUSBSightcam2 2
@@ -86,58 +86,34 @@ static const struct v4l2_pix_format sif_mode[] = {
};
/* Frame packet header offsets for the spca508 */
-#define SPCA508_OFFSET_TYPE 1
-#define SPCA508_OFFSET_COMPRESS 2
-#define SPCA508_OFFSET_FRAMSEQ 8
-#define SPCA508_OFFSET_WIN1LUM 11
#define SPCA508_OFFSET_DATA 37
-#define SPCA508_SNAPBIT 0x20
-#define SPCA508_SNAPCTRL 0x40
-/*************** I2c ****************/
-#define SPCA508_INDEX_I2C_BASE 0x8800
-
/*
* Initialization data: this is the first set-up data written to the
* device (before the open data).
*/
static const u16 spca508_init_data[][2] =
{
- /* line URB value, index */
- /* 44274 1804 */ {0x0000, 0x870b},
-
- /* 44299 1805 */ {0x0020, 0x8112},
- /* Video drop enable, ISO streaming disable */
- /* 44324 1806 */ {0x0003, 0x8111},
- /* Reset compression & memory */
- /* 44349 1807 */ {0x0000, 0x8110},
- /* Disable all outputs */
- /* 44372 1808 */ /* READ {0x0000, 0x8114} -> 0000: 00 */
- /* 44398 1809 */ {0x0000, 0x8114},
- /* SW GPIO data */
- /* 44423 1810 */ {0x0008, 0x8110},
- /* Enable charge pump output */
- /* 44527 1811 */ {0x0002, 0x8116},
- /* 200 kHz pump clock */
- /* 44555 1812 */
- /* UNKNOWN DIRECTION (URB_FUNCTION_SELECT_INTERFACE:) */
- /* 44590 1813 */ {0x0003, 0x8111},
- /* Reset compression & memory */
- /* 44615 1814 */ {0x0000, 0x8111},
- /* Normal mode (not reset) */
- /* 44640 1815 */ {0x0098, 0x8110},
- /* Enable charge pump output, sync.serial,external 2x clock */
- /* 44665 1816 */ {0x000d, 0x8114},
- /* SW GPIO data */
- /* 44690 1817 */ {0x0002, 0x8116},
- /* 200 kHz pump clock */
- /* 44715 1818 */ {0x0020, 0x8112},
- /* Video drop enable, ISO streaming disable */
+ {0x0000, 0x870b},
+
+ {0x0020, 0x8112}, /* Video drop enable, ISO streaming disable */
+ {0x0003, 0x8111}, /* Reset compression & memory */
+ {0x0000, 0x8110}, /* Disable all outputs */
+ /* READ {0x0000, 0x8114} -> 0000: 00 */
+ {0x0000, 0x8114}, /* SW GPIO data */
+ {0x0008, 0x8110}, /* Enable charge pump output */
+ {0x0002, 0x8116}, /* 200 kHz pump clock */
+ /* UNKNOWN DIRECTION (URB_FUNCTION_SELECT_INTERFACE:) */
+ {0x0003, 0x8111}, /* Reset compression & memory */
+ {0x0000, 0x8111}, /* Normal mode (not reset) */
+ {0x0098, 0x8110},
+ /* Enable charge pump output, sync.serial,external 2x clock */
+ {0x000d, 0x8114}, /* SW GPIO data */
+ {0x0002, 0x8116}, /* 200 kHz pump clock */
+ {0x0020, 0x8112}, /* Video drop enable, ISO streaming disable */
/* --------------------------------------- */
- /* 44740 1819 */ {0x000f, 0x8402},
- /* memory bank */
- /* 44765 1820 */ {0x0000, 0x8403},
- /* ... address */
+ {0x000f, 0x8402}, /* memory bank */
+ {0x0000, 0x8403}, /* ... address */
/* --------------------------------------- */
/* 0x88__ is Synchronous Serial Interface. */
/* TBD: This table could be expressed more compactly */
@@ -145,446 +121,384 @@ static const u16 spca508_init_data[][2] =
/* TBD: Should see if the values in spca50x_i2c_data */
/* would work with the VQ110 instead of the values */
/* below. */
- /* 44790 1821 */ {0x00c0, 0x8804},
- /* SSI slave addr */
- /* 44815 1822 */ {0x0008, 0x8802},
- /* 375 Khz SSI clock */
- /* 44838 1823 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 44862 1824 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 44888 1825 */ {0x0008, 0x8802},
- /* 375 Khz SSI clock */
- /* 44913 1826 */ {0x0012, 0x8801},
- /* SSI reg addr */
- /* 44938 1827 */ {0x0080, 0x8800},
- /* SSI data to write */
- /* 44961 1828 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 44985 1829 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 45009 1830 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 45035 1831 */ {0x0008, 0x8802},
- /* 375 Khz SSI clock */
- /* 45060 1832 */ {0x0012, 0x8801},
- /* SSI reg addr */
- /* 45085 1833 */ {0x0000, 0x8800},
- /* SSI data to write */
- /* 45108 1834 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 45132 1835 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 45156 1836 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 45182 1837 */ {0x0008, 0x8802},
- /* 375 Khz SSI clock */
- /* 45207 1838 */ {0x0011, 0x8801},
- /* SSI reg addr */
- /* 45232 1839 */ {0x0040, 0x8800},
- /* SSI data to write */
- /* 45255 1840 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 45279 1841 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 45303 1842 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 45329 1843 */ {0x0008, 0x8802},
- /* 45354 1844 */ {0x0013, 0x8801},
- /* 45379 1845 */ {0x0000, 0x8800},
- /* 45402 1846 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 45426 1847 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 45450 1848 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 45476 1849 */ {0x0008, 0x8802},
- /* 45501 1850 */ {0x0014, 0x8801},
- /* 45526 1851 */ {0x0000, 0x8800},
- /* 45549 1852 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 45573 1853 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 45597 1854 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 45623 1855 */ {0x0008, 0x8802},
- /* 45648 1856 */ {0x0015, 0x8801},
- /* 45673 1857 */ {0x0001, 0x8800},
- /* 45696 1858 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 45720 1859 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 45744 1860 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 45770 1861 */ {0x0008, 0x8802},
- /* 45795 1862 */ {0x0016, 0x8801},
- /* 45820 1863 */ {0x0003, 0x8800},
- /* 45843 1864 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 45867 1865 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 45891 1866 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 45917 1867 */ {0x0008, 0x8802},
- /* 45942 1868 */ {0x0017, 0x8801},
- /* 45967 1869 */ {0x0036, 0x8800},
- /* 45990 1870 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 46014 1871 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 46038 1872 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 46064 1873 */ {0x0008, 0x8802},
- /* 46089 1874 */ {0x0018, 0x8801},
- /* 46114 1875 */ {0x00ec, 0x8800},
- /* 46137 1876 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 46161 1877 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 46185 1878 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 46211 1879 */ {0x0008, 0x8802},
- /* 46236 1880 */ {0x001a, 0x8801},
- /* 46261 1881 */ {0x0094, 0x8800},
- /* 46284 1882 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 46308 1883 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 46332 1884 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 46358 1885 */ {0x0008, 0x8802},
- /* 46383 1886 */ {0x001b, 0x8801},
- /* 46408 1887 */ {0x0000, 0x8800},
- /* 46431 1888 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 46455 1889 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 46479 1890 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 46505 1891 */ {0x0008, 0x8802},
- /* 46530 1892 */ {0x0027, 0x8801},
- /* 46555 1893 */ {0x00a2, 0x8800},
- /* 46578 1894 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 46602 1895 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 46626 1896 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 46652 1897 */ {0x0008, 0x8802},
- /* 46677 1898 */ {0x0028, 0x8801},
- /* 46702 1899 */ {0x0040, 0x8800},
- /* 46725 1900 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 46749 1901 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 46773 1902 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 46799 1903 */ {0x0008, 0x8802},
- /* 46824 1904 */ {0x002a, 0x8801},
- /* 46849 1905 */ {0x0084, 0x8800},
- /* 46872 1906 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 46896 1907 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 46920 1908 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 46946 1909 */ {0x0008, 0x8802},
- /* 46971 1910 */ {0x002b, 0x8801},
- /* 46996 1911 */ {0x00a8, 0x8800},
- /* 47019 1912 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 47043 1913 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 47067 1914 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 47093 1915 */ {0x0008, 0x8802},
- /* 47118 1916 */ {0x002c, 0x8801},
- /* 47143 1917 */ {0x00fe, 0x8800},
- /* 47166 1918 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 47190 1919 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 47214 1920 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 47240 1921 */ {0x0008, 0x8802},
- /* 47265 1922 */ {0x002d, 0x8801},
- /* 47290 1923 */ {0x0003, 0x8800},
- /* 47313 1924 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 47337 1925 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 47361 1926 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 47387 1927 */ {0x0008, 0x8802},
- /* 47412 1928 */ {0x0038, 0x8801},
- /* 47437 1929 */ {0x0083, 0x8800},
- /* 47460 1930 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 47484 1931 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 47508 1932 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 47534 1933 */ {0x0008, 0x8802},
- /* 47559 1934 */ {0x0033, 0x8801},
- /* 47584 1935 */ {0x0081, 0x8800},
- /* 47607 1936 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 47631 1937 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 47655 1938 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 47681 1939 */ {0x0008, 0x8802},
- /* 47706 1940 */ {0x0034, 0x8801},
- /* 47731 1941 */ {0x004a, 0x8800},
- /* 47754 1942 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 47778 1943 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 47802 1944 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 47828 1945 */ {0x0008, 0x8802},
- /* 47853 1946 */ {0x0039, 0x8801},
- /* 47878 1947 */ {0x0000, 0x8800},
- /* 47901 1948 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 47925 1949 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 47949 1950 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 47975 1951 */ {0x0008, 0x8802},
- /* 48000 1952 */ {0x0010, 0x8801},
- /* 48025 1953 */ {0x00a8, 0x8800},
- /* 48048 1954 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 48072 1955 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 48096 1956 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 48122 1957 */ {0x0008, 0x8802},
- /* 48147 1958 */ {0x0006, 0x8801},
- /* 48172 1959 */ {0x0058, 0x8800},
- /* 48195 1960 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 48219 1961 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 48243 1962 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 48269 1963 */ {0x0008, 0x8802},
- /* 48294 1964 */ {0x0000, 0x8801},
- /* 48319 1965 */ {0x0004, 0x8800},
- /* 48342 1966 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 48366 1967 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 48390 1968 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 48416 1969 */ {0x0008, 0x8802},
- /* 48441 1970 */ {0x0040, 0x8801},
- /* 48466 1971 */ {0x0080, 0x8800},
- /* 48489 1972 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 48513 1973 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 48537 1974 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 48563 1975 */ {0x0008, 0x8802},
- /* 48588 1976 */ {0x0041, 0x8801},
- /* 48613 1977 */ {0x000c, 0x8800},
- /* 48636 1978 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 48660 1979 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 48684 1980 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 48710 1981 */ {0x0008, 0x8802},
- /* 48735 1982 */ {0x0042, 0x8801},
- /* 48760 1983 */ {0x000c, 0x8800},
- /* 48783 1984 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 48807 1985 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 48831 1986 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 48857 1987 */ {0x0008, 0x8802},
- /* 48882 1988 */ {0x0043, 0x8801},
- /* 48907 1989 */ {0x0028, 0x8800},
- /* 48930 1990 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 48954 1991 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 48978 1992 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 49004 1993 */ {0x0008, 0x8802},
- /* 49029 1994 */ {0x0044, 0x8801},
- /* 49054 1995 */ {0x0080, 0x8800},
- /* 49077 1996 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 49101 1997 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 49125 1998 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 49151 1999 */ {0x0008, 0x8802},
- /* 49176 2000 */ {0x0045, 0x8801},
- /* 49201 2001 */ {0x0020, 0x8800},
- /* 49224 2002 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 49248 2003 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 49272 2004 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 49298 2005 */ {0x0008, 0x8802},
- /* 49323 2006 */ {0x0046, 0x8801},
- /* 49348 2007 */ {0x0020, 0x8800},
- /* 49371 2008 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 49395 2009 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 49419 2010 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 49445 2011 */ {0x0008, 0x8802},
- /* 49470 2012 */ {0x0047, 0x8801},
- /* 49495 2013 */ {0x0080, 0x8800},
- /* 49518 2014 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 49542 2015 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 49566 2016 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 49592 2017 */ {0x0008, 0x8802},
- /* 49617 2018 */ {0x0048, 0x8801},
- /* 49642 2019 */ {0x004c, 0x8800},
- /* 49665 2020 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 49689 2021 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 49713 2022 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 49739 2023 */ {0x0008, 0x8802},
- /* 49764 2024 */ {0x0049, 0x8801},
- /* 49789 2025 */ {0x0084, 0x8800},
- /* 49812 2026 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 49836 2027 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 49860 2028 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 49886 2029 */ {0x0008, 0x8802},
- /* 49911 2030 */ {0x004a, 0x8801},
- /* 49936 2031 */ {0x0084, 0x8800},
- /* 49959 2032 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 49983 2033 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 50007 2034 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 50033 2035 */ {0x0008, 0x8802},
- /* 50058 2036 */ {0x004b, 0x8801},
- /* 50083 2037 */ {0x0084, 0x8800},
- /* 50106 2038 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
+ {0x00c0, 0x8804}, /* SSI slave addr */
+ {0x0008, 0x8802}, /* 375 Khz SSI clock */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802}, /* 375 Khz SSI clock */
+ {0x0012, 0x8801}, /* SSI reg addr */
+ {0x0080, 0x8800}, /* SSI data to write */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802}, /* 375 Khz SSI clock */
+ {0x0012, 0x8801}, /* SSI reg addr */
+ {0x0000, 0x8800}, /* SSI data to write */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802}, /* 375 Khz SSI clock */
+ {0x0011, 0x8801}, /* SSI reg addr */
+ {0x0040, 0x8800}, /* SSI data to write */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802},
+ {0x0013, 0x8801},
+ {0x0000, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802},
+ {0x0014, 0x8801},
+ {0x0000, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802},
+ {0x0015, 0x8801},
+ {0x0001, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802},
+ {0x0016, 0x8801},
+ {0x0003, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802},
+ {0x0017, 0x8801},
+ {0x0036, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802},
+ {0x0018, 0x8801},
+ {0x00ec, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802},
+ {0x001a, 0x8801},
+ {0x0094, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802},
+ {0x001b, 0x8801},
+ {0x0000, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802},
+ {0x0027, 0x8801},
+ {0x00a2, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802},
+ {0x0028, 0x8801},
+ {0x0040, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802},
+ {0x002a, 0x8801},
+ {0x0084, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802},
+ {0x002b, 0x8801},
+ {0x00a8, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802},
+ {0x002c, 0x8801},
+ {0x00fe, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802},
+ {0x002d, 0x8801},
+ {0x0003, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802},
+ {0x0038, 0x8801},
+ {0x0083, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802},
+ {0x0033, 0x8801},
+ {0x0081, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802},
+ {0x0034, 0x8801},
+ {0x004a, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802},
+ {0x0039, 0x8801},
+ {0x0000, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802},
+ {0x0010, 0x8801},
+ {0x00a8, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802},
+ {0x0006, 0x8801},
+ {0x0058, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802},
+ {0x0000, 0x8801},
+ {0x0004, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802},
+ {0x0040, 0x8801},
+ {0x0080, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802},
+ {0x0041, 0x8801},
+ {0x000c, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802},
+ {0x0042, 0x8801},
+ {0x000c, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802},
+ {0x0043, 0x8801},
+ {0x0028, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802},
+ {0x0044, 0x8801},
+ {0x0080, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802},
+ {0x0045, 0x8801},
+ {0x0020, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802},
+ {0x0046, 0x8801},
+ {0x0020, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802},
+ {0x0047, 0x8801},
+ {0x0080, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802},
+ {0x0048, 0x8801},
+ {0x004c, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802},
+ {0x0049, 0x8801},
+ {0x0084, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802},
+ {0x004a, 0x8801},
+ {0x0084, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x0008, 0x8802},
+ {0x004b, 0x8801},
+ {0x0084, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* --------------------------------------- */
- /* 50132 2039 */ {0x0012, 0x8700},
- /* Clock speed 48Mhz/(2+2)/2= 6 Mhz */
- /* 50157 2040 */ {0x0000, 0x8701},
- /* CKx1 clock delay adj */
- /* 50182 2041 */ {0x0000, 0x8701},
- /* CKx1 clock delay adj */
- /* 50207 2042 */ {0x0001, 0x870c},
- /* CKOx2 output */
+ {0x0012, 0x8700}, /* Clock speed 48Mhz/(2+2)/2= 6 Mhz */
+ {0x0000, 0x8701}, /* CKx1 clock delay adj */
+ {0x0000, 0x8701}, /* CKx1 clock delay adj */
+ {0x0001, 0x870c}, /* CKOx2 output */
/* --------------------------------------- */
- /* 50232 2043 */ {0x0080, 0x8600},
- /* Line memory read counter (L) */
- /* 50257 2044 */ {0x0001, 0x8606},
- /* reserved */
- /* 50282 2045 */ {0x0064, 0x8607},
- /* Line memory read counter (H) 0x6480=25,728 */
- /* 50307 2046 */ {0x002a, 0x8601},
- /* CDSP sharp interpolation mode,
+ {0x0080, 0x8600}, /* Line memory read counter (L) */
+ {0x0001, 0x8606}, /* reserved */
+ {0x0064, 0x8607}, /* Line memory read counter (H) 0x6480=25,728 */
+ {0x002a, 0x8601}, /* CDSP sharp interpolation mode,
* line sel for color sep, edge enhance enab */
- /* 50332 2047 */ {0x0000, 0x8602},
- /* optical black level for user settng = 0 */
- /* 50357 2048 */ {0x0080, 0x8600},
- /* Line memory read counter (L) */
- /* 50382 2049 */ {0x000a, 0x8603},
- /* optical black level calc mode: auto; optical black offset = 10 */
- /* 50407 2050 */ {0x00df, 0x865b},
- /* Horiz offset for valid pixels (L)=0xdf */
- /* 50432 2051 */ {0x0012, 0x865c},
- /* Vert offset for valid lines (L)=0x12 */
+ {0x0000, 0x8602}, /* optical black level for user settng = 0 */
+ {0x0080, 0x8600}, /* Line memory read counter (L) */
+ {0x000a, 0x8603}, /* optical black level calc mode:
+ * auto; optical black offset = 10 */
+ {0x00df, 0x865b}, /* Horiz offset for valid pixels (L)=0xdf */
+ {0x0012, 0x865c}, /* Vert offset for valid lines (L)=0x12 */
/* The following two lines seem to be the "wrong" resolution. */
/* But perhaps these indicate the actual size of the sensor */
/* rather than the size of the current video mode. */
- /* 50457 2052 */ {0x0058, 0x865d},
- /* Horiz valid pixels (*4) (L) = 352 */
- /* 50482 2053 */ {0x0048, 0x865e},
- /* Vert valid lines (*4) (L) = 288 */
-
- /* 50507 2054 */ {0x0015, 0x8608},
- /* A11 Coef ... */
- /* 50532 2055 */ {0x0030, 0x8609},
- /* 50557 2056 */ {0x00fb, 0x860a},
- /* 50582 2057 */ {0x003e, 0x860b},
- /* 50607 2058 */ {0x00ce, 0x860c},
- /* 50632 2059 */ {0x00f4, 0x860d},
- /* 50657 2060 */ {0x00eb, 0x860e},
- /* 50682 2061 */ {0x00dc, 0x860f},
- /* 50707 2062 */ {0x0039, 0x8610},
- /* 50732 2063 */ {0x0001, 0x8611},
- /* R offset for white balance ... */
- /* 50757 2064 */ {0x0000, 0x8612},
- /* 50782 2065 */ {0x0001, 0x8613},
- /* 50807 2066 */ {0x0000, 0x8614},
- /* 50832 2067 */ {0x005b, 0x8651},
- /* R gain for white balance ... */
- /* 50857 2068 */ {0x0040, 0x8652},
- /* 50882 2069 */ {0x0060, 0x8653},
- /* 50907 2070 */ {0x0040, 0x8654},
- /* 50932 2071 */ {0x0000, 0x8655},
- /* 50957 2072 */ {0x0001, 0x863f},
- /* Fixed gamma correction enable, USB control,
- * lum filter disable, lum noise clip disable */
- /* 50982 2073 */ {0x00a1, 0x8656},
- /* Window1 size 256x256, Windows2 size 64x64,
- * gamma look-up disable, new edge enhancement enable */
- /* 51007 2074 */ {0x0018, 0x8657},
- /* Edge gain high thresh */
- /* 51032 2075 */ {0x0020, 0x8658},
- /* Edge gain low thresh */
- /* 51057 2076 */ {0x000a, 0x8659},
- /* Edge bandwidth high threshold */
- /* 51082 2077 */ {0x0005, 0x865a},
- /* Edge bandwidth low threshold */
+ {0x0058, 0x865d}, /* Horiz valid pixels (*4) (L) = 352 */
+ {0x0048, 0x865e}, /* Vert valid lines (*4) (L) = 288 */
+
+ {0x0015, 0x8608}, /* A11 Coef ... */
+ {0x0030, 0x8609},
+ {0x00fb, 0x860a},
+ {0x003e, 0x860b},
+ {0x00ce, 0x860c},
+ {0x00f4, 0x860d},
+ {0x00eb, 0x860e},
+ {0x00dc, 0x860f},
+ {0x0039, 0x8610},
+ {0x0001, 0x8611}, /* R offset for white balance ... */
+ {0x0000, 0x8612},
+ {0x0001, 0x8613},
+ {0x0000, 0x8614},
+ {0x005b, 0x8651}, /* R gain for white balance ... */
+ {0x0040, 0x8652},
+ {0x0060, 0x8653},
+ {0x0040, 0x8654},
+ {0x0000, 0x8655},
+ {0x0001, 0x863f}, /* Fixed gamma correction enable, USB control,
+ * lum filter disable, lum noise clip disable */
+ {0x00a1, 0x8656}, /* Window1 size 256x256, Windows2 size 64x64,
+ * gamma look-up disable,
+ * new edge enhancement enable */
+ {0x0018, 0x8657}, /* Edge gain high thresh */
+ {0x0020, 0x8658}, /* Edge gain low thresh */
+ {0x000a, 0x8659}, /* Edge bandwidth high threshold */
+ {0x0005, 0x865a}, /* Edge bandwidth low threshold */
/* -------------------------------- */
- /* 51107 2078 */ {0x0030, 0x8112},
- /* Video drop enable, ISO streaming enable */
- /* 51130 2079 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 51154 2080 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 51180 2081 */ {0xa908, 0x8802},
- /* 51205 2082 */ {0x0034, 0x8801},
- /* SSI reg addr */
- /* 51230 2083 */ {0x00ca, 0x8800},
+ {0x0030, 0x8112}, /* Video drop enable, ISO streaming enable */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0xa908, 0x8802},
+ {0x0034, 0x8801}, /* SSI reg addr */
+ {0x00ca, 0x8800},
/* SSI data to write */
- /* 51253 2084 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 51277 2085 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 51301 2086 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 51327 2087 */ {0x1f08, 0x8802},
- /* 51352 2088 */ {0x0006, 0x8801},
- /* 51377 2089 */ {0x0080, 0x8800},
- /* 51400 2090 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0x1f08, 0x8802},
+ {0x0006, 0x8801},
+ {0x0080, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* ----- Read back coefs we wrote earlier. */
- /* 51424 2091 */ /* READ { 0, 0x0000, 0x8608 } -> 0000: 15 */
- /* 51448 2092 */ /* READ { 0, 0x0000, 0x8609 } -> 0000: 30 */
- /* 51472 2093 */ /* READ { 0, 0x0000, 0x860a } -> 0000: fb */
- /* 51496 2094 */ /* READ { 0, 0x0000, 0x860b } -> 0000: 3e */
- /* 51520 2095 */ /* READ { 0, 0x0000, 0x860c } -> 0000: ce */
- /* 51544 2096 */ /* READ { 0, 0x0000, 0x860d } -> 0000: f4 */
- /* 51568 2097 */ /* READ { 0, 0x0000, 0x860e } -> 0000: eb */
- /* 51592 2098 */ /* READ { 0, 0x0000, 0x860f } -> 0000: dc */
- /* 51616 2099 */ /* READ { 0, 0x0000, 0x8610 } -> 0000: 39 */
- /* 51640 2100 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 51664 2101 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
- /* 51690 2102 */ {0xb008, 0x8802},
- /* 51715 2103 */ {0x0006, 0x8801},
- /* 51740 2104 */ {0x007d, 0x8800},
- /* 51763 2105 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0000, 0x8608 } -> 0000: 15 */
+ /* READ { 0x0000, 0x8609 } -> 0000: 30 */
+ /* READ { 0x0000, 0x860a } -> 0000: fb */
+ /* READ { 0x0000, 0x860b } -> 0000: 3e */
+ /* READ { 0x0000, 0x860c } -> 0000: ce */
+ /* READ { 0x0000, 0x860d } -> 0000: f4 */
+ /* READ { 0x0000, 0x860e } -> 0000: eb */
+ /* READ { 0x0000, 0x860f } -> 0000: dc */
+ /* READ { 0x0000, 0x8610 } -> 0000: 39 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 08 */
+ {0xb008, 0x8802},
+ {0x0006, 0x8801},
+ {0x007d, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* This chunk is seemingly redundant with */
/* earlier commands (A11 Coef...), but if I disable it, */
/* the image appears too dark. Maybe there was some kind of */
/* reset since the earlier commands, so this is necessary again. */
- /* 51789 2106 */ {0x0015, 0x8608},
- /* 51814 2107 */ {0x0030, 0x8609},
- /* 51839 2108 */ {0xfffb, 0x860a},
- /* 51864 2109 */ {0x003e, 0x860b},
- /* 51889 2110 */ {0xffce, 0x860c},
- /* 51914 2111 */ {0xfff4, 0x860d},
- /* 51939 2112 */ {0xffeb, 0x860e},
- /* 51964 2113 */ {0xffdc, 0x860f},
- /* 51989 2114 */ {0x0039, 0x8610},
- /* 52014 2115 */ {0x0018, 0x8657},
-
- /* 52039 2116 */ {0x0000, 0x8508},
- /* Disable compression. */
+ {0x0015, 0x8608},
+ {0x0030, 0x8609},
+ {0xfffb, 0x860a},
+ {0x003e, 0x860b},
+ {0xffce, 0x860c},
+ {0xfff4, 0x860d},
+ {0xffeb, 0x860e},
+ {0xffdc, 0x860f},
+ {0x0039, 0x8610},
+ {0x0018, 0x8657},
+
+ {0x0000, 0x8508}, /* Disable compression. */
/* Previous line was:
- * 52039 2116 * { 0, 0x0021, 0x8508 }, * Enable compression. */
- /* 52064 2117 */ {0x0032, 0x850b},
- /* compression stuff */
- /* 52089 2118 */ {0x0003, 0x8509},
- /* compression stuff */
- /* 52114 2119 */ {0x0011, 0x850a},
- /* compression stuff */
- /* 52139 2120 */ {0x0021, 0x850d},
- /* compression stuff */
- /* 52164 2121 */ {0x0010, 0x850c},
- /* compression stuff */
- /* 52189 2122 */ {0x0003, 0x8500},
- /* *** Video mode: 160x120 */
- /* 52214 2123 */ {0x0001, 0x8501},
- /* Hardware-dominated snap control */
- /* 52239 2124 */ {0x0061, 0x8656},
- /* Window1 size 128x128, Windows2 size 128x128,
- * gamma look-up disable, new edge enhancement enable */
- /* 52264 2125 */ {0x0018, 0x8617},
- /* Window1 start X (*2) */
- /* 52289 2126 */ {0x0008, 0x8618},
- /* Window1 start Y (*2) */
- /* 52314 2127 */ {0x0061, 0x8656},
- /* Window1 size 128x128, Windows2 size 128x128,
- * gamma look-up disable, new edge enhancement enable */
- /* 52339 2128 */ {0x0058, 0x8619},
- /* Window2 start X (*2) */
- /* 52364 2129 */ {0x0008, 0x861a},
- /* Window2 start Y (*2) */
- /* 52389 2130 */ {0x00ff, 0x8615},
- /* High lum thresh for white balance */
- /* 52414 2131 */ {0x0000, 0x8616},
- /* Low lum thresh for white balance */
- /* 52439 2132 */ {0x0012, 0x8700},
- /* Clock speed 48Mhz/(2+2)/2= 6 Mhz */
- /* 52464 2133 */ {0x0012, 0x8700},
- /* Clock speed 48Mhz/(2+2)/2= 6 Mhz */
- /* 52487 2134 */ /* READ { 0, 0x0000, 0x8656 } -> 0000: 61 */
- /* 52513 2135 */ {0x0028, 0x8802},
- /* 375 Khz SSI clock, SSI r/w sync with VSYNC */
- /* 52536 2136 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 52560 2137 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 28 */
- /* 52586 2138 */ {0x1f28, 0x8802},
- /* 375 Khz SSI clock, SSI r/w sync with VSYNC */
- /* 52611 2139 */ {0x0010, 0x8801},
- /* SSI reg addr */
- /* 52636 2140 */ {0x003e, 0x8800},
- /* SSI data to write */
- /* 52659 2141 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 52685 2142 */ {0x0028, 0x8802},
- /* 52708 2143 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 52732 2144 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 28 */
- /* 52758 2145 */ {0x1f28, 0x8802},
- /* 52783 2146 */ {0x0000, 0x8801},
- /* 52808 2147 */ {0x001f, 0x8800},
- /* 52831 2148 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 52857 2149 */ {0x0001, 0x8602},
- /* optical black level for user settning = 1 */
+ {0x0021, 0x8508}, * Enable compression. */
+ {0x0032, 0x850b}, /* compression stuff */
+ {0x0003, 0x8509}, /* compression stuff */
+ {0x0011, 0x850a}, /* compression stuff */
+ {0x0021, 0x850d}, /* compression stuff */
+ {0x0010, 0x850c}, /* compression stuff */
+ {0x0003, 0x8500}, /* *** Video mode: 160x120 */
+ {0x0001, 0x8501}, /* Hardware-dominated snap control */
+ {0x0061, 0x8656}, /* Window1 size 128x128, Windows2 size 128x128,
+ * gamma look-up disable,
+ * new edge enhancement enable */
+ {0x0018, 0x8617}, /* Window1 start X (*2) */
+ {0x0008, 0x8618}, /* Window1 start Y (*2) */
+ {0x0061, 0x8656}, /* Window1 size 128x128, Windows2 size 128x128,
+ * gamma look-up disable,
+ * new edge enhancement enable */
+ {0x0058, 0x8619}, /* Window2 start X (*2) */
+ {0x0008, 0x861a}, /* Window2 start Y (*2) */
+ {0x00ff, 0x8615}, /* High lum thresh for white balance */
+ {0x0000, 0x8616}, /* Low lum thresh for white balance */
+ {0x0012, 0x8700}, /* Clock speed 48Mhz/(2+2)/2= 6 Mhz */
+ {0x0012, 0x8700}, /* Clock speed 48Mhz/(2+2)/2= 6 Mhz */
+ /* READ { 0x0000, 0x8656 } -> 0000: 61 */
+ {0x0028, 0x8802}, /* 375 Khz SSI clock, SSI r/w sync with VSYNC */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 28 */
+ {0x1f28, 0x8802}, /* 375 Khz SSI clock, SSI r/w sync with VSYNC */
+ {0x0010, 0x8801}, /* SSI reg addr */
+ {0x003e, 0x8800}, /* SSI data to write */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ {0x0028, 0x8802},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 28 */
+ {0x1f28, 0x8802},
+ {0x0000, 0x8801},
+ {0x001f, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ {0x0001, 0x8602}, /* optical black level for user settning = 1 */
/* Original: */
- /* 52882 2150 */ {0x0023, 0x8700},
- /* Clock speed 48Mhz/(3+2)/4= 2.4 Mhz */
- /* 52907 2151 */ {0x000f, 0x8602},
- /* optical black level for user settning = 15 */
-
- /* 52932 2152 */ {0x0028, 0x8802},
- /* 52955 2153 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 52979 2154 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 28 */
- /* 53005 2155 */ {0x1f28, 0x8802},
- /* 53030 2156 */ {0x0010, 0x8801},
- /* 53055 2157 */ {0x007b, 0x8800},
- /* 53078 2158 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
- /* 53104 2159 */ {0x002f, 0x8651},
- /* R gain for white balance ... */
- /* 53129 2160 */ {0x0080, 0x8653},
- /* 53152 2161 */ /* READ { 0, 0x0000, 0x8655 } -> 0000: 00 */
- /* 53178 2162 */ {0x0000, 0x8655},
-
- /* 53203 2163 */ {0x0030, 0x8112},
- /* Video drop enable, ISO streaming enable */
- /* 53228 2164 */ {0x0020, 0x8112},
- /* Video drop enable, ISO streaming disable */
- /* 53252 2165 */
- /* UNKNOWN DIRECTION (URB_FUNCTION_SELECT_INTERFACE: (ALT=0) ) */
+ {0x0023, 0x8700}, /* Clock speed 48Mhz/(3+2)/4= 2.4 Mhz */
+ {0x000f, 0x8602}, /* optical black level for user settning = 15 */
+
+ {0x0028, 0x8802},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 28 */
+ {0x1f28, 0x8802},
+ {0x0010, 0x8801},
+ {0x007b, 0x8800},
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ {0x002f, 0x8651}, /* R gain for white balance ... */
+ {0x0080, 0x8653},
+ /* READ { 0x0000, 0x8655 } -> 0000: 00 */
+ {0x0000, 0x8655},
+
+ {0x0030, 0x8112}, /* Video drop enable, ISO streaming enable */
+ {0x0020, 0x8112}, /* Video drop enable, ISO streaming disable */
+ /* UNKNOWN DIRECTION (URB_FUNCTION_SELECT_INTERFACE: (ALT=0) ) */
{}
};
@@ -592,27 +506,27 @@ static const u16 spca508_init_data[][2] =
* Initialization data for Intel EasyPC Camera CS110
*/
static const u16 spca508cs110_init_data[][2] = {
- {0x0000, 0x870b}, /* Reset CTL3 */
- {0x0003, 0x8111}, /* Soft Reset compression, memory, TG & CDSP */
- {0x0000, 0x8111}, /* Normal operation on reset */
+ {0x0000, 0x870b}, /* Reset CTL3 */
+ {0x0003, 0x8111}, /* Soft Reset compression, memory, TG & CDSP */
+ {0x0000, 0x8111}, /* Normal operation on reset */
{0x0090, 0x8110},
/* External Clock 2x & Synchronous Serial Interface Output */
- {0x0020, 0x8112}, /* Video Drop packet enable */
- {0x0000, 0x8114}, /* Software GPIO output data */
+ {0x0020, 0x8112}, /* Video Drop packet enable */
+ {0x0000, 0x8114}, /* Software GPIO output data */
{0x0001, 0x8114},
{0x0001, 0x8114},
{0x0001, 0x8114},
{0x0003, 0x8114},
/* Initial sequence Synchronous Serial Interface */
- {0x000f, 0x8402}, /* Memory bank Address */
- {0x0000, 0x8403}, /* Memory bank Address */
- {0x00ba, 0x8804}, /* SSI Slave address */
- {0x0010, 0x8802}, /* 93.75kHz SSI Clock Two DataByte */
- {0x0010, 0x8802}, /* 93.75kHz SSI Clock two DataByte */
+ {0x000f, 0x8402}, /* Memory bank Address */
+ {0x0000, 0x8403}, /* Memory bank Address */
+ {0x00ba, 0x8804}, /* SSI Slave address */
+ {0x0010, 0x8802}, /* 93.75kHz SSI Clock Two DataByte */
+ {0x0010, 0x8802}, /* 93.75kHz SSI Clock two DataByte */
{0x0001, 0x8801},
- {0x000a, 0x8805},/* a - NWG: Dunno what this is about */
+ {0x000a, 0x8805}, /* a - NWG: Dunno what this is about */
{0x0000, 0x8800},
{0x0010, 0x8802},
@@ -646,459 +560,459 @@ static const u16 spca508cs110_init_data[][2] = {
{0x0000, 0x8800},
{0x0010, 0x8802},
- {0x0002, 0x8704}, /* External input CKIx1 */
- {0x0001, 0x8606}, /* 1 Line memory Read Counter (H) Result: (d)410 */
- {0x009a, 0x8600}, /* Line memory Read Counter (L) */
- {0x0001, 0x865b}, /* 1 Horizontal Offset for Valid Pixel(L) */
- {0x0003, 0x865c}, /* 3 Vertical Offset for Valid Lines(L) */
- {0x0058, 0x865d}, /* 58 Horizontal Valid Pixel Window(L) */
+ {0x0002, 0x8704}, /* External input CKIx1 */
+ {0x0001, 0x8606}, /* 1 Line memory Read Counter (H) Result: (d)410 */
+ {0x009a, 0x8600}, /* Line memory Read Counter (L) */
+ {0x0001, 0x865b}, /* 1 Horizontal Offset for Valid Pixel(L) */
+ {0x0003, 0x865c}, /* 3 Vertical Offset for Valid Lines(L) */
+ {0x0058, 0x865d}, /* 58 Horizontal Valid Pixel Window(L) */
- {0x0006, 0x8660}, /* Nibble data + input order */
+ {0x0006, 0x8660}, /* Nibble data + input order */
- {0x000a, 0x8602}, /* Optical black level set to 0x0a */
-/* 1945 */ {0x0000, 0x8603}, /* Optical black level Offset */
+ {0x000a, 0x8602}, /* Optical black level set to 0x0a */
+ {0x0000, 0x8603}, /* Optical black level Offset */
-/* 1962 * {0, 0x0000, 0x8611}, * 0 R Offset for white Balance */
-/* 1963 * {0, 0x0000, 0x8612}, * 1 Gr Offset for white Balance */
-/* 1964 * {0, 0x0000, 0x8613}, * 1f B Offset for white Balance */
-/* 1965 * {0, 0x0000, 0x8614}, * f0 Gb Offset for white Balance */
+/* {0x0000, 0x8611}, * 0 R Offset for white Balance */
+/* {0x0000, 0x8612}, * 1 Gr Offset for white Balance */
+/* {0x0000, 0x8613}, * 1f B Offset for white Balance */
+/* {0x0000, 0x8614}, * f0 Gb Offset for white Balance */
- {0x0040, 0x8651}, /* 2b BLUE gain for white balance good at all 60 */
- {0x0030, 0x8652}, /* 41 Gr Gain for white Balance (L) */
- {0x0035, 0x8653}, /* 26 RED gain for white balance */
- {0x0035, 0x8654}, /* 40Gb Gain for white Balance (L) */
+ {0x0040, 0x8651}, /* 2b BLUE gain for white balance good at all 60 */
+ {0x0030, 0x8652}, /* 41 Gr Gain for white Balance (L) */
+ {0x0035, 0x8653}, /* 26 RED gain for white balance */
+ {0x0035, 0x8654}, /* 40Gb Gain for white Balance (L) */
{0x0041, 0x863f},
/* Fixed Gamma correction enabled (makes colours look better) */
-/* 2422 */ {0x0000, 0x8655},
- /* High bits for white balance*****brightness control*** */
+ {0x0000, 0x8655},
+ /* High bits for white balance*****brightness control*** */
{}
};
static const u16 spca508_sightcam_init_data[][2] = {
/* This line seems to setup the frame/canvas */
- /*368 */ {0x000f, 0x8402},
+ {0x000f, 0x8402},
/* Theese 6 lines are needed to startup the webcam */
- /*398 */ {0x0090, 0x8110},
- /*399 */ {0x0001, 0x8114},
- /*400 */ {0x0001, 0x8114},
- /*401 */ {0x0001, 0x8114},
- /*402 */ {0x0003, 0x8114},
- /*403 */ {0x0080, 0x8804},
+ {0x0090, 0x8110},
+ {0x0001, 0x8114},
+ {0x0001, 0x8114},
+ {0x0001, 0x8114},
+ {0x0003, 0x8114},
+ {0x0080, 0x8804},
/* This part seems to make the pictures darker? (autobrightness?) */
- /*436 */ {0x0001, 0x8801},
- /*437 */ {0x0004, 0x8800},
- /*439 */ {0x0003, 0x8801},
- /*440 */ {0x00e0, 0x8800},
- /*442 */ {0x0004, 0x8801},
- /*443 */ {0x00b4, 0x8800},
- /*445 */ {0x0005, 0x8801},
- /*446 */ {0x0000, 0x8800},
-
- /*448 */ {0x0006, 0x8801},
- /*449 */ {0x00e0, 0x8800},
- /*451 */ {0x0007, 0x8801},
- /*452 */ {0x000c, 0x8800},
+ {0x0001, 0x8801},
+ {0x0004, 0x8800},
+ {0x0003, 0x8801},
+ {0x00e0, 0x8800},
+ {0x0004, 0x8801},
+ {0x00b4, 0x8800},
+ {0x0005, 0x8801},
+ {0x0000, 0x8800},
+
+ {0x0006, 0x8801},
+ {0x00e0, 0x8800},
+ {0x0007, 0x8801},
+ {0x000c, 0x8800},
/* This section is just needed, it probably
* does something like the previous section,
* but the cam won't start if it's not included.
*/
- /*484 */ {0x0014, 0x8801},
- /*485 */ {0x0008, 0x8800},
- /*487 */ {0x0015, 0x8801},
- /*488 */ {0x0067, 0x8800},
- /*490 */ {0x0016, 0x8801},
- /*491 */ {0x0000, 0x8800},
- /*493 */ {0x0017, 0x8801},
- /*494 */ {0x0020, 0x8800},
- /*496 */ {0x0018, 0x8801},
- /*497 */ {0x0044, 0x8800},
+ {0x0014, 0x8801},
+ {0x0008, 0x8800},
+ {0x0015, 0x8801},
+ {0x0067, 0x8800},
+ {0x0016, 0x8801},
+ {0x0000, 0x8800},
+ {0x0017, 0x8801},
+ {0x0020, 0x8800},
+ {0x0018, 0x8801},
+ {0x0044, 0x8800},
/* Makes the picture darker - and the
* cam won't start if not included
*/
- /*505 */ {0x001e, 0x8801},
- /*506 */ {0x00ea, 0x8800},
- /*508 */ {0x001f, 0x8801},
- /*509 */ {0x0001, 0x8800},
- /*511 */ {0x0003, 0x8801},
- /*512 */ {0x00e0, 0x8800},
+ {0x001e, 0x8801},
+ {0x00ea, 0x8800},
+ {0x001f, 0x8801},
+ {0x0001, 0x8800},
+ {0x0003, 0x8801},
+ {0x00e0, 0x8800},
/* seems to place the colors ontop of each other #1 */
- /*517 */ {0x0006, 0x8704},
- /*518 */ {0x0001, 0x870c},
- /*519 */ {0x0016, 0x8600},
- /*520 */ {0x0002, 0x8606},
+ {0x0006, 0x8704},
+ {0x0001, 0x870c},
+ {0x0016, 0x8600},
+ {0x0002, 0x8606},
/* if not included the pictures becomes _very_ dark */
- /*521 */ {0x0064, 0x8607},
- /*522 */ {0x003a, 0x8601},
- /*523 */ {0x0000, 0x8602},
+ {0x0064, 0x8607},
+ {0x003a, 0x8601},
+ {0x0000, 0x8602},
/* seems to place the colors ontop of each other #2 */
- /*524 */ {0x0016, 0x8600},
- /*525 */ {0x0018, 0x8617},
- /*526 */ {0x0008, 0x8618},
- /*527 */ {0x00a1, 0x8656},
+ {0x0016, 0x8600},
+ {0x0018, 0x8617},
+ {0x0008, 0x8618},
+ {0x00a1, 0x8656},
/* webcam won't start if not included */
- /*528 */ {0x0007, 0x865b},
- /*529 */ {0x0001, 0x865c},
- /*530 */ {0x0058, 0x865d},
- /*531 */ {0x0048, 0x865e},
+ {0x0007, 0x865b},
+ {0x0001, 0x865c},
+ {0x0058, 0x865d},
+ {0x0048, 0x865e},
/* adjusts the colors */
- /*541 */ {0x0049, 0x8651},
- /*542 */ {0x0040, 0x8652},
- /*543 */ {0x004c, 0x8653},
- /*544 */ {0x0040, 0x8654},
+ {0x0049, 0x8651},
+ {0x0040, 0x8652},
+ {0x004c, 0x8653},
+ {0x0040, 0x8654},
{}
};
static const u16 spca508_sightcam2_init_data[][2] = {
-/* 35 */ {0x0020, 0x8112},
-
-/* 36 */ {0x000f, 0x8402},
-/* 37 */ {0x0000, 0x8403},
-
-/* 38 */ {0x0008, 0x8201},
-/* 39 */ {0x0008, 0x8200},
-/* 40 */ {0x0001, 0x8200},
-/* 43 */ {0x0009, 0x8201},
-/* 44 */ {0x0008, 0x8200},
-/* 45 */ {0x0001, 0x8200},
-/* 48 */ {0x000a, 0x8201},
-/* 49 */ {0x0008, 0x8200},
-/* 50 */ {0x0001, 0x8200},
-/* 53 */ {0x000b, 0x8201},
-/* 54 */ {0x0008, 0x8200},
-/* 55 */ {0x0001, 0x8200},
-/* 58 */ {0x000c, 0x8201},
-/* 59 */ {0x0008, 0x8200},
-/* 60 */ {0x0001, 0x8200},
-/* 63 */ {0x000d, 0x8201},
-/* 64 */ {0x0008, 0x8200},
-/* 65 */ {0x0001, 0x8200},
-/* 68 */ {0x000e, 0x8201},
-/* 69 */ {0x0008, 0x8200},
-/* 70 */ {0x0001, 0x8200},
-/* 73 */ {0x0007, 0x8201},
-/* 74 */ {0x0008, 0x8200},
-/* 75 */ {0x0001, 0x8200},
-/* 78 */ {0x000f, 0x8201},
-/* 79 */ {0x0008, 0x8200},
-/* 80 */ {0x0001, 0x8200},
-
-/* 84 */ {0x0018, 0x8660},
-/* 85 */ {0x0010, 0x8201},
-
-/* 86 */ {0x0008, 0x8200},
-/* 87 */ {0x0001, 0x8200},
-/* 90 */ {0x0011, 0x8201},
-/* 91 */ {0x0008, 0x8200},
-/* 92 */ {0x0001, 0x8200},
-
-/* 95 */ {0x0000, 0x86b0},
-/* 96 */ {0x0034, 0x86b1},
-/* 97 */ {0x0000, 0x86b2},
-/* 98 */ {0x0049, 0x86b3},
-/* 99 */ {0x0000, 0x86b4},
-/* 100 */ {0x0000, 0x86b4},
-
-/* 101 */ {0x0012, 0x8201},
-/* 102 */ {0x0008, 0x8200},
-/* 103 */ {0x0001, 0x8200},
-/* 106 */ {0x0013, 0x8201},
-/* 107 */ {0x0008, 0x8200},
-/* 108 */ {0x0001, 0x8200},
-
-/* 111 */ {0x0001, 0x86b0},
-/* 112 */ {0x00aa, 0x86b1},
-/* 113 */ {0x0000, 0x86b2},
-/* 114 */ {0x00e4, 0x86b3},
-/* 115 */ {0x0000, 0x86b4},
-/* 116 */ {0x0000, 0x86b4},
-
-/* 118 */ {0x0018, 0x8660},
-
-/* 119 */ {0x0090, 0x8110},
-/* 120 */ {0x0001, 0x8114},
-/* 121 */ {0x0001, 0x8114},
-/* 122 */ {0x0001, 0x8114},
-/* 123 */ {0x0003, 0x8114},
-
-/* 124 */ {0x0080, 0x8804},
-/* 157 */ {0x0003, 0x8801},
-/* 158 */ {0x0012, 0x8800},
-/* 160 */ {0x0004, 0x8801},
-/* 161 */ {0x0005, 0x8800},
-/* 163 */ {0x0005, 0x8801},
-/* 164 */ {0x0000, 0x8800},
-/* 166 */ {0x0006, 0x8801},
-/* 167 */ {0x0000, 0x8800},
-/* 169 */ {0x0007, 0x8801},
-/* 170 */ {0x0000, 0x8800},
-/* 172 */ {0x0008, 0x8801},
-/* 173 */ {0x0005, 0x8800},
-/* 175 */ {0x000a, 0x8700},
-/* 176 */ {0x000e, 0x8801},
-/* 177 */ {0x0004, 0x8800},
-/* 179 */ {0x0005, 0x8801},
-/* 180 */ {0x0047, 0x8800},
-/* 182 */ {0x0006, 0x8801},
-/* 183 */ {0x0000, 0x8800},
-/* 185 */ {0x0007, 0x8801},
-/* 186 */ {0x00c0, 0x8800},
-/* 188 */ {0x0008, 0x8801},
-/* 189 */ {0x0003, 0x8800},
-/* 191 */ {0x0013, 0x8801},
-/* 192 */ {0x0001, 0x8800},
-/* 194 */ {0x0009, 0x8801},
-/* 195 */ {0x0000, 0x8800},
-/* 197 */ {0x000a, 0x8801},
-/* 198 */ {0x0000, 0x8800},
-/* 200 */ {0x000b, 0x8801},
-/* 201 */ {0x0000, 0x8800},
-/* 203 */ {0x000c, 0x8801},
-/* 204 */ {0x0000, 0x8800},
-/* 206 */ {0x000e, 0x8801},
-/* 207 */ {0x0004, 0x8800},
-/* 209 */ {0x000f, 0x8801},
-/* 210 */ {0x0000, 0x8800},
-/* 212 */ {0x0010, 0x8801},
-/* 213 */ {0x0006, 0x8800},
-/* 215 */ {0x0011, 0x8801},
-/* 216 */ {0x0006, 0x8800},
-/* 218 */ {0x0012, 0x8801},
-/* 219 */ {0x0000, 0x8800},
-/* 221 */ {0x0013, 0x8801},
-/* 222 */ {0x0001, 0x8800},
-
-/* 224 */ {0x000a, 0x8700},
-/* 225 */ {0x0000, 0x8702},
-/* 226 */ {0x0000, 0x8703},
-/* 227 */ {0x00c2, 0x8704},
-/* 228 */ {0x0001, 0x870c},
-
-/* 229 */ {0x0044, 0x8600},
-/* 230 */ {0x0002, 0x8606},
-/* 231 */ {0x0064, 0x8607},
-/* 232 */ {0x003a, 0x8601},
-/* 233 */ {0x0008, 0x8602},
-/* 234 */ {0x0044, 0x8600},
-/* 235 */ {0x0018, 0x8617},
-/* 236 */ {0x0008, 0x8618},
-/* 237 */ {0x00a1, 0x8656},
-/* 238 */ {0x0004, 0x865b},
-/* 239 */ {0x0002, 0x865c},
-/* 240 */ {0x0058, 0x865d},
-/* 241 */ {0x0048, 0x865e},
-/* 242 */ {0x0012, 0x8608},
-/* 243 */ {0x002c, 0x8609},
-/* 244 */ {0x0002, 0x860a},
-/* 245 */ {0x002c, 0x860b},
-/* 246 */ {0x00db, 0x860c},
-/* 247 */ {0x00f9, 0x860d},
-/* 248 */ {0x00f1, 0x860e},
-/* 249 */ {0x00e3, 0x860f},
-/* 250 */ {0x002c, 0x8610},
-/* 251 */ {0x006c, 0x8651},
-/* 252 */ {0x0041, 0x8652},
-/* 253 */ {0x0059, 0x8653},
-/* 254 */ {0x0040, 0x8654},
-/* 255 */ {0x00fa, 0x8611},
-/* 256 */ {0x00ff, 0x8612},
-/* 257 */ {0x00f8, 0x8613},
-/* 258 */ {0x0000, 0x8614},
-/* 259 */ {0x0001, 0x863f},
-/* 260 */ {0x0000, 0x8640},
-/* 261 */ {0x0026, 0x8641},
-/* 262 */ {0x0045, 0x8642},
-/* 263 */ {0x0060, 0x8643},
-/* 264 */ {0x0075, 0x8644},
-/* 265 */ {0x0088, 0x8645},
-/* 266 */ {0x009b, 0x8646},
-/* 267 */ {0x00b0, 0x8647},
-/* 268 */ {0x00c5, 0x8648},
-/* 269 */ {0x00d2, 0x8649},
-/* 270 */ {0x00dc, 0x864a},
-/* 271 */ {0x00e5, 0x864b},
-/* 272 */ {0x00eb, 0x864c},
-/* 273 */ {0x00f0, 0x864d},
-/* 274 */ {0x00f6, 0x864e},
-/* 275 */ {0x00fa, 0x864f},
-/* 276 */ {0x00ff, 0x8650},
-/* 277 */ {0x0060, 0x8657},
-/* 278 */ {0x0010, 0x8658},
-/* 279 */ {0x0018, 0x8659},
-/* 280 */ {0x0005, 0x865a},
-/* 281 */ {0x0018, 0x8660},
-/* 282 */ {0x0003, 0x8509},
-/* 283 */ {0x0011, 0x850a},
-/* 284 */ {0x0032, 0x850b},
-/* 285 */ {0x0010, 0x850c},
-/* 286 */ {0x0021, 0x850d},
-/* 287 */ {0x0001, 0x8500},
-/* 288 */ {0x0000, 0x8508},
-/* 289 */ {0x0012, 0x8608},
-/* 290 */ {0x002c, 0x8609},
-/* 291 */ {0x0002, 0x860a},
-/* 292 */ {0x0039, 0x860b},
-/* 293 */ {0x00d0, 0x860c},
-/* 294 */ {0x00f7, 0x860d},
-/* 295 */ {0x00ed, 0x860e},
-/* 296 */ {0x00db, 0x860f},
-/* 297 */ {0x0039, 0x8610},
-/* 298 */ {0x0012, 0x8657},
-/* 299 */ {0x000c, 0x8619},
-/* 300 */ {0x0004, 0x861a},
-/* 301 */ {0x00a1, 0x8656},
-/* 302 */ {0x00c8, 0x8615},
-/* 303 */ {0x0032, 0x8616},
-
-/* 306 */ {0x0030, 0x8112},
-/* 313 */ {0x0020, 0x8112},
-/* 314 */ {0x0020, 0x8112},
-/* 315 */ {0x000f, 0x8402},
-/* 316 */ {0x0000, 0x8403},
-
-/* 317 */ {0x0090, 0x8110},
-/* 318 */ {0x0001, 0x8114},
-/* 319 */ {0x0001, 0x8114},
-/* 320 */ {0x0001, 0x8114},
-/* 321 */ {0x0003, 0x8114},
-/* 322 */ {0x0080, 0x8804},
-
-/* 355 */ {0x0003, 0x8801},
-/* 356 */ {0x0012, 0x8800},
-/* 358 */ {0x0004, 0x8801},
-/* 359 */ {0x0005, 0x8800},
-/* 361 */ {0x0005, 0x8801},
-/* 362 */ {0x0047, 0x8800},
-/* 364 */ {0x0006, 0x8801},
-/* 365 */ {0x0000, 0x8800},
-/* 367 */ {0x0007, 0x8801},
-/* 368 */ {0x00c0, 0x8800},
-/* 370 */ {0x0008, 0x8801},
-/* 371 */ {0x0003, 0x8800},
-/* 373 */ {0x000a, 0x8700},
-/* 374 */ {0x000e, 0x8801},
-/* 375 */ {0x0004, 0x8800},
-/* 377 */ {0x0005, 0x8801},
-/* 378 */ {0x0047, 0x8800},
-/* 380 */ {0x0006, 0x8801},
-/* 381 */ {0x0000, 0x8800},
-/* 383 */ {0x0007, 0x8801},
-/* 384 */ {0x00c0, 0x8800},
-/* 386 */ {0x0008, 0x8801},
-/* 387 */ {0x0003, 0x8800},
-/* 389 */ {0x0013, 0x8801},
-/* 390 */ {0x0001, 0x8800},
-/* 392 */ {0x0009, 0x8801},
-/* 393 */ {0x0000, 0x8800},
-/* 395 */ {0x000a, 0x8801},
-/* 396 */ {0x0000, 0x8800},
-/* 398 */ {0x000b, 0x8801},
-/* 399 */ {0x0000, 0x8800},
-/* 401 */ {0x000c, 0x8801},
-/* 402 */ {0x0000, 0x8800},
-/* 404 */ {0x000e, 0x8801},
-/* 405 */ {0x0004, 0x8800},
-/* 407 */ {0x000f, 0x8801},
-/* 408 */ {0x0000, 0x8800},
-/* 410 */ {0x0010, 0x8801},
-/* 411 */ {0x0006, 0x8800},
-/* 413 */ {0x0011, 0x8801},
-/* 414 */ {0x0006, 0x8800},
-/* 416 */ {0x0012, 0x8801},
-/* 417 */ {0x0000, 0x8800},
-/* 419 */ {0x0013, 0x8801},
-/* 420 */ {0x0001, 0x8800},
-/* 422 */ {0x000a, 0x8700},
-/* 423 */ {0x0000, 0x8702},
-/* 424 */ {0x0000, 0x8703},
-/* 425 */ {0x00c2, 0x8704},
-/* 426 */ {0x0001, 0x870c},
-/* 427 */ {0x0044, 0x8600},
-/* 428 */ {0x0002, 0x8606},
-/* 429 */ {0x0064, 0x8607},
-/* 430 */ {0x003a, 0x8601},
-/* 431 */ {0x0008, 0x8602},
-/* 432 */ {0x0044, 0x8600},
-/* 433 */ {0x0018, 0x8617},
-/* 434 */ {0x0008, 0x8618},
-/* 435 */ {0x00a1, 0x8656},
-/* 436 */ {0x0004, 0x865b},
-/* 437 */ {0x0002, 0x865c},
-/* 438 */ {0x0058, 0x865d},
-/* 439 */ {0x0048, 0x865e},
-/* 440 */ {0x0012, 0x8608},
-/* 441 */ {0x002c, 0x8609},
-/* 442 */ {0x0002, 0x860a},
-/* 443 */ {0x002c, 0x860b},
-/* 444 */ {0x00db, 0x860c},
-/* 445 */ {0x00f9, 0x860d},
-/* 446 */ {0x00f1, 0x860e},
-/* 447 */ {0x00e3, 0x860f},
-/* 448 */ {0x002c, 0x8610},
-/* 449 */ {0x006c, 0x8651},
-/* 450 */ {0x0041, 0x8652},
-/* 451 */ {0x0059, 0x8653},
-/* 452 */ {0x0040, 0x8654},
-/* 453 */ {0x00fa, 0x8611},
-/* 454 */ {0x00ff, 0x8612},
-/* 455 */ {0x00f8, 0x8613},
-/* 456 */ {0x0000, 0x8614},
-/* 457 */ {0x0001, 0x863f},
-/* 458 */ {0x0000, 0x8640},
-/* 459 */ {0x0026, 0x8641},
-/* 460 */ {0x0045, 0x8642},
-/* 461 */ {0x0060, 0x8643},
-/* 462 */ {0x0075, 0x8644},
-/* 463 */ {0x0088, 0x8645},
-/* 464 */ {0x009b, 0x8646},
-/* 465 */ {0x00b0, 0x8647},
-/* 466 */ {0x00c5, 0x8648},
-/* 467 */ {0x00d2, 0x8649},
-/* 468 */ {0x00dc, 0x864a},
-/* 469 */ {0x00e5, 0x864b},
-/* 470 */ {0x00eb, 0x864c},
-/* 471 */ {0x00f0, 0x864d},
-/* 472 */ {0x00f6, 0x864e},
-/* 473 */ {0x00fa, 0x864f},
-/* 474 */ {0x00ff, 0x8650},
-/* 475 */ {0x0060, 0x8657},
-/* 476 */ {0x0010, 0x8658},
-/* 477 */ {0x0018, 0x8659},
-/* 478 */ {0x0005, 0x865a},
-/* 479 */ {0x0018, 0x8660},
-/* 480 */ {0x0003, 0x8509},
-/* 481 */ {0x0011, 0x850a},
-/* 482 */ {0x0032, 0x850b},
-/* 483 */ {0x0010, 0x850c},
-/* 484 */ {0x0021, 0x850d},
-/* 485 */ {0x0001, 0x8500},
-/* 486 */ {0x0000, 0x8508},
-
-/* 487 */ {0x0012, 0x8608},
-/* 488 */ {0x002c, 0x8609},
-/* 489 */ {0x0002, 0x860a},
-/* 490 */ {0x0039, 0x860b},
-/* 491 */ {0x00d0, 0x860c},
-/* 492 */ {0x00f7, 0x860d},
-/* 493 */ {0x00ed, 0x860e},
-/* 494 */ {0x00db, 0x860f},
-/* 495 */ {0x0039, 0x8610},
-/* 496 */ {0x0012, 0x8657},
-/* 497 */ {0x0064, 0x8619},
+ {0x0020, 0x8112},
+
+ {0x000f, 0x8402},
+ {0x0000, 0x8403},
+
+ {0x0008, 0x8201},
+ {0x0008, 0x8200},
+ {0x0001, 0x8200},
+ {0x0009, 0x8201},
+ {0x0008, 0x8200},
+ {0x0001, 0x8200},
+ {0x000a, 0x8201},
+ {0x0008, 0x8200},
+ {0x0001, 0x8200},
+ {0x000b, 0x8201},
+ {0x0008, 0x8200},
+ {0x0001, 0x8200},
+ {0x000c, 0x8201},
+ {0x0008, 0x8200},
+ {0x0001, 0x8200},
+ {0x000d, 0x8201},
+ {0x0008, 0x8200},
+ {0x0001, 0x8200},
+ {0x000e, 0x8201},
+ {0x0008, 0x8200},
+ {0x0001, 0x8200},
+ {0x0007, 0x8201},
+ {0x0008, 0x8200},
+ {0x0001, 0x8200},
+ {0x000f, 0x8201},
+ {0x0008, 0x8200},
+ {0x0001, 0x8200},
+
+ {0x0018, 0x8660},
+ {0x0010, 0x8201},
+
+ {0x0008, 0x8200},
+ {0x0001, 0x8200},
+ {0x0011, 0x8201},
+ {0x0008, 0x8200},
+ {0x0001, 0x8200},
+
+ {0x0000, 0x86b0},
+ {0x0034, 0x86b1},
+ {0x0000, 0x86b2},
+ {0x0049, 0x86b3},
+ {0x0000, 0x86b4},
+ {0x0000, 0x86b4},
+
+ {0x0012, 0x8201},
+ {0x0008, 0x8200},
+ {0x0001, 0x8200},
+ {0x0013, 0x8201},
+ {0x0008, 0x8200},
+ {0x0001, 0x8200},
+
+ {0x0001, 0x86b0},
+ {0x00aa, 0x86b1},
+ {0x0000, 0x86b2},
+ {0x00e4, 0x86b3},
+ {0x0000, 0x86b4},
+ {0x0000, 0x86b4},
+
+ {0x0018, 0x8660},
+
+ {0x0090, 0x8110},
+ {0x0001, 0x8114},
+ {0x0001, 0x8114},
+ {0x0001, 0x8114},
+ {0x0003, 0x8114},
+
+ {0x0080, 0x8804},
+ {0x0003, 0x8801},
+ {0x0012, 0x8800},
+ {0x0004, 0x8801},
+ {0x0005, 0x8800},
+ {0x0005, 0x8801},
+ {0x0000, 0x8800},
+ {0x0006, 0x8801},
+ {0x0000, 0x8800},
+ {0x0007, 0x8801},
+ {0x0000, 0x8800},
+ {0x0008, 0x8801},
+ {0x0005, 0x8800},
+ {0x000a, 0x8700},
+ {0x000e, 0x8801},
+ {0x0004, 0x8800},
+ {0x0005, 0x8801},
+ {0x0047, 0x8800},
+ {0x0006, 0x8801},
+ {0x0000, 0x8800},
+ {0x0007, 0x8801},
+ {0x00c0, 0x8800},
+ {0x0008, 0x8801},
+ {0x0003, 0x8800},
+ {0x0013, 0x8801},
+ {0x0001, 0x8800},
+ {0x0009, 0x8801},
+ {0x0000, 0x8800},
+ {0x000a, 0x8801},
+ {0x0000, 0x8800},
+ {0x000b, 0x8801},
+ {0x0000, 0x8800},
+ {0x000c, 0x8801},
+ {0x0000, 0x8800},
+ {0x000e, 0x8801},
+ {0x0004, 0x8800},
+ {0x000f, 0x8801},
+ {0x0000, 0x8800},
+ {0x0010, 0x8801},
+ {0x0006, 0x8800},
+ {0x0011, 0x8801},
+ {0x0006, 0x8800},
+ {0x0012, 0x8801},
+ {0x0000, 0x8800},
+ {0x0013, 0x8801},
+ {0x0001, 0x8800},
+
+ {0x000a, 0x8700},
+ {0x0000, 0x8702},
+ {0x0000, 0x8703},
+ {0x00c2, 0x8704},
+ {0x0001, 0x870c},
+
+ {0x0044, 0x8600},
+ {0x0002, 0x8606},
+ {0x0064, 0x8607},
+ {0x003a, 0x8601},
+ {0x0008, 0x8602},
+ {0x0044, 0x8600},
+ {0x0018, 0x8617},
+ {0x0008, 0x8618},
+ {0x00a1, 0x8656},
+ {0x0004, 0x865b},
+ {0x0002, 0x865c},
+ {0x0058, 0x865d},
+ {0x0048, 0x865e},
+ {0x0012, 0x8608},
+ {0x002c, 0x8609},
+ {0x0002, 0x860a},
+ {0x002c, 0x860b},
+ {0x00db, 0x860c},
+ {0x00f9, 0x860d},
+ {0x00f1, 0x860e},
+ {0x00e3, 0x860f},
+ {0x002c, 0x8610},
+ {0x006c, 0x8651},
+ {0x0041, 0x8652},
+ {0x0059, 0x8653},
+ {0x0040, 0x8654},
+ {0x00fa, 0x8611},
+ {0x00ff, 0x8612},
+ {0x00f8, 0x8613},
+ {0x0000, 0x8614},
+ {0x0001, 0x863f},
+ {0x0000, 0x8640},
+ {0x0026, 0x8641},
+ {0x0045, 0x8642},
+ {0x0060, 0x8643},
+ {0x0075, 0x8644},
+ {0x0088, 0x8645},
+ {0x009b, 0x8646},
+ {0x00b0, 0x8647},
+ {0x00c5, 0x8648},
+ {0x00d2, 0x8649},
+ {0x00dc, 0x864a},
+ {0x00e5, 0x864b},
+ {0x00eb, 0x864c},
+ {0x00f0, 0x864d},
+ {0x00f6, 0x864e},
+ {0x00fa, 0x864f},
+ {0x00ff, 0x8650},
+ {0x0060, 0x8657},
+ {0x0010, 0x8658},
+ {0x0018, 0x8659},
+ {0x0005, 0x865a},
+ {0x0018, 0x8660},
+ {0x0003, 0x8509},
+ {0x0011, 0x850a},
+ {0x0032, 0x850b},
+ {0x0010, 0x850c},
+ {0x0021, 0x850d},
+ {0x0001, 0x8500},
+ {0x0000, 0x8508},
+ {0x0012, 0x8608},
+ {0x002c, 0x8609},
+ {0x0002, 0x860a},
+ {0x0039, 0x860b},
+ {0x00d0, 0x860c},
+ {0x00f7, 0x860d},
+ {0x00ed, 0x860e},
+ {0x00db, 0x860f},
+ {0x0039, 0x8610},
+ {0x0012, 0x8657},
+ {0x000c, 0x8619},
+ {0x0004, 0x861a},
+ {0x00a1, 0x8656},
+ {0x00c8, 0x8615},
+ {0x0032, 0x8616},
+
+ {0x0030, 0x8112},
+ {0x0020, 0x8112},
+ {0x0020, 0x8112},
+ {0x000f, 0x8402},
+ {0x0000, 0x8403},
+
+ {0x0090, 0x8110},
+ {0x0001, 0x8114},
+ {0x0001, 0x8114},
+ {0x0001, 0x8114},
+ {0x0003, 0x8114},
+ {0x0080, 0x8804},
+
+ {0x0003, 0x8801},
+ {0x0012, 0x8800},
+ {0x0004, 0x8801},
+ {0x0005, 0x8800},
+ {0x0005, 0x8801},
+ {0x0047, 0x8800},
+ {0x0006, 0x8801},
+ {0x0000, 0x8800},
+ {0x0007, 0x8801},
+ {0x00c0, 0x8800},
+ {0x0008, 0x8801},
+ {0x0003, 0x8800},
+ {0x000a, 0x8700},
+ {0x000e, 0x8801},
+ {0x0004, 0x8800},
+ {0x0005, 0x8801},
+ {0x0047, 0x8800},
+ {0x0006, 0x8801},
+ {0x0000, 0x8800},
+ {0x0007, 0x8801},
+ {0x00c0, 0x8800},
+ {0x0008, 0x8801},
+ {0x0003, 0x8800},
+ {0x0013, 0x8801},
+ {0x0001, 0x8800},
+ {0x0009, 0x8801},
+ {0x0000, 0x8800},
+ {0x000a, 0x8801},
+ {0x0000, 0x8800},
+ {0x000b, 0x8801},
+ {0x0000, 0x8800},
+ {0x000c, 0x8801},
+ {0x0000, 0x8800},
+ {0x000e, 0x8801},
+ {0x0004, 0x8800},
+ {0x000f, 0x8801},
+ {0x0000, 0x8800},
+ {0x0010, 0x8801},
+ {0x0006, 0x8800},
+ {0x0011, 0x8801},
+ {0x0006, 0x8800},
+ {0x0012, 0x8801},
+ {0x0000, 0x8800},
+ {0x0013, 0x8801},
+ {0x0001, 0x8800},
+ {0x000a, 0x8700},
+ {0x0000, 0x8702},
+ {0x0000, 0x8703},
+ {0x00c2, 0x8704},
+ {0x0001, 0x870c},
+ {0x0044, 0x8600},
+ {0x0002, 0x8606},
+ {0x0064, 0x8607},
+ {0x003a, 0x8601},
+ {0x0008, 0x8602},
+ {0x0044, 0x8600},
+ {0x0018, 0x8617},
+ {0x0008, 0x8618},
+ {0x00a1, 0x8656},
+ {0x0004, 0x865b},
+ {0x0002, 0x865c},
+ {0x0058, 0x865d},
+ {0x0048, 0x865e},
+ {0x0012, 0x8608},
+ {0x002c, 0x8609},
+ {0x0002, 0x860a},
+ {0x002c, 0x860b},
+ {0x00db, 0x860c},
+ {0x00f9, 0x860d},
+ {0x00f1, 0x860e},
+ {0x00e3, 0x860f},
+ {0x002c, 0x8610},
+ {0x006c, 0x8651},
+ {0x0041, 0x8652},
+ {0x0059, 0x8653},
+ {0x0040, 0x8654},
+ {0x00fa, 0x8611},
+ {0x00ff, 0x8612},
+ {0x00f8, 0x8613},
+ {0x0000, 0x8614},
+ {0x0001, 0x863f},
+ {0x0000, 0x8640},
+ {0x0026, 0x8641},
+ {0x0045, 0x8642},
+ {0x0060, 0x8643},
+ {0x0075, 0x8644},
+ {0x0088, 0x8645},
+ {0x009b, 0x8646},
+ {0x00b0, 0x8647},
+ {0x00c5, 0x8648},
+ {0x00d2, 0x8649},
+ {0x00dc, 0x864a},
+ {0x00e5, 0x864b},
+ {0x00eb, 0x864c},
+ {0x00f0, 0x864d},
+ {0x00f6, 0x864e},
+ {0x00fa, 0x864f},
+ {0x00ff, 0x8650},
+ {0x0060, 0x8657},
+ {0x0010, 0x8658},
+ {0x0018, 0x8659},
+ {0x0005, 0x865a},
+ {0x0018, 0x8660},
+ {0x0003, 0x8509},
+ {0x0011, 0x850a},
+ {0x0032, 0x850b},
+ {0x0010, 0x850c},
+ {0x0021, 0x850d},
+ {0x0001, 0x8500},
+ {0x0000, 0x8508},
+
+ {0x0012, 0x8608},
+ {0x002c, 0x8609},
+ {0x0002, 0x860a},
+ {0x0039, 0x860b},
+ {0x00d0, 0x860c},
+ {0x00f7, 0x860d},
+ {0x00ed, 0x860e},
+ {0x00db, 0x860f},
+ {0x0039, 0x8610},
+ {0x0012, 0x8657},
+ {0x0064, 0x8619},
/* This line starts it all, it is not needed here */
/* since it has been build into the driver */
/* jfm: don't start now */
-/* 590 * {0x0030, 0x8112}, */
+/* {0x0030, 0x8112}, */
{}
};
@@ -1109,14 +1023,14 @@ static const u16 spca508_vista_init_data[][2] = {
{0x0008, 0x8200}, /* Clear register */
{0x0000, 0x870b}, /* Reset CTL3 */
{0x0020, 0x8112}, /* Video Drop packet enable */
- {0x0003, 0x8111}, /* Soft Reset compression, memory, TG & CDSP */
+ {0x0003, 0x8111}, /* Soft Reset compression, memory, TG & CDSP */
{0x0000, 0x8110}, /* Disable everything */
{0x0000, 0x8114}, /* Software GPIO output data */
{0x0000, 0x8114},
{0x0003, 0x8111},
{0x0000, 0x8111},
- {0x0090, 0x8110}, /* Enable: SSI output, External 2X clock output */
+ {0x0090, 0x8110}, /* Enable: SSI output, External 2X clock output */
{0x0020, 0x8112},
{0x0000, 0x8114},
{0x0001, 0x8114},
@@ -1129,191 +1043,143 @@ static const u16 spca508_vista_init_data[][2] = {
{0x00ba, 0x8804}, /* SSI Slave address */
{0x0010, 0x8802}, /* 93.75kHz SSI Clock Two DataByte */
- /* READ { 0, 0x0001, 0x8803 } ->
- 0000: 00 */
- /* READ { 0, 0x0001, 0x8802 } ->
- 0000: 10 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802}, /* Will write 2 bytes (DATA1+DATA2) */
{0x0020, 0x8801}, /* Register address for SSI read/write */
{0x0044, 0x8805}, /* DATA2 */
{0x0004, 0x8800}, /* DATA1 -> write triggered */
- /* READ { 0, 0x0001, 0x8803 } ->
- 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
- /* READ { 0, 0x0001, 0x8803 } ->
- 0000: 00 */
- /* READ { 0, 0x0001, 0x8802 } ->
- 0000: 10 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0009, 0x8801},
{0x0042, 0x8805},
{0x0001, 0x8800},
- /* READ { 0, 0x0001, 0x8803 } ->
- 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
- /* READ { 0, 0x0001, 0x8803 } ->
- 0000: 00 */
- /* READ { 0, 0x0001, 0x8802 } ->
- 0000: 10 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x003c, 0x8801},
{0x0001, 0x8805},
{0x0000, 0x8800},
- /* READ { 0, 0x0001, 0x8803 } ->
- 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
- /* READ { 0, 0x0001, 0x8803 } ->
- 0000: 00 */
- /* READ { 0, 0x0001, 0x8802 } ->
- 0000: 10 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0001, 0x8801},
{0x000a, 0x8805},
{0x0000, 0x8800},
- /* READ { 0, 0x0001, 0x8803 } ->
- 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
- /* READ { 0, 0x0001, 0x8803 } ->
- 0000: 00 */
- /* READ { 0, 0x0001, 0x8802 } ->
- 0000: 10 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0002, 0x8801},
{0x0000, 0x8805},
{0x0000, 0x8800},
- /* READ { 0, 0x0001, 0x8803 } ->
- 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
- /* READ { 0, 0x0001, 0x8803 } ->
- 0000: 00 */
- /* READ { 0, 0x0001, 0x8802 } ->
- 0000: 10 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0003, 0x8801},
{0x0027, 0x8805},
{0x0001, 0x8800},
- /* READ { 0, 0x0001, 0x8803 } ->
- 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
- /* READ { 0, 0x0001, 0x8803 } ->
- 0000: 00 */
- /* READ { 0, 0x0001, 0x8802 } ->
- 0000: 10 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0004, 0x8801},
{0x0065, 0x8805},
{0x0001, 0x8800},
- /* READ { 0, 0x0001, 0x8803 } ->
- 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
- /* READ { 0, 0x0001, 0x8803 } ->
- 0000: 00 */
- /* READ { 0, 0x0001, 0x8802 } ->
- 0000: 10 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0005, 0x8801},
{0x0003, 0x8805},
{0x0000, 0x8800},
- /* READ { 0, 0x0001, 0x8803 } ->
- 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
- /* READ { 0, 0x0001, 0x8803 } ->
- 0000: 00 */
- /* READ { 0, 0x0001, 0x8802 } ->
- 0000: 10 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0006, 0x8801},
{0x001c, 0x8805},
{0x0000, 0x8800},
- /* READ { 0, 0x0001, 0x8803 } ->
- 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
- /* READ { 0, 0x0001, 0x8803 } ->
- 0000: 00 */
- /* READ { 0, 0x0001, 0x8802 } ->
- 0000: 10 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0007, 0x8801},
{0x002a, 0x8805},
{0x0000, 0x8800},
- /* READ { 0, 0x0001, 0x8803 } ->
- 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
- /* READ { 0, 0x0001, 0x8803 } ->
- 0000: 00 */
- /* READ { 0, 0x0001, 0x8802 } ->
- 0000: 10 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x000e, 0x8801},
{0x0000, 0x8805},
{0x0000, 0x8800},
- /* READ { 0, 0x0001, 0x8803 } ->
- 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
- /* READ { 0, 0x0001, 0x8803 } ->
- 0000: 00 */
- /* READ { 0, 0x0001, 0x8802 } ->
- 0000: 10 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0028, 0x8801},
{0x002e, 0x8805},
{0x0000, 0x8800},
- /* READ { 0, 0x0001, 0x8803 } ->
- 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
- /* READ { 0, 0x0001, 0x8803 } ->
- 0000: 00 */
- /* READ { 0, 0x0001, 0x8802 } ->
- 0000: 10 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0039, 0x8801},
{0x0013, 0x8805},
{0x0000, 0x8800},
- /* READ { 0, 0x0001, 0x8803 } ->
- 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
- /* READ { 0, 0x0001, 0x8803 } ->
- 0000: 00 */
- /* READ { 0, 0x0001, 0x8802 } ->
- 0000: 10 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x003b, 0x8801},
{0x000c, 0x8805},
{0x0000, 0x8800},
- /* READ { 0, 0x0001, 0x8803 } ->
- 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
- /* READ { 0, 0x0001, 0x8803 } ->
- 0000: 00 */
- /* READ { 0, 0x0001, 0x8802 } ->
- 0000: 10 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0035, 0x8801},
{0x0028, 0x8805},
{0x0000, 0x8800},
- /* READ { 0, 0x0001, 0x8803 } ->
- 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
- /* READ { 0, 0x0001, 0x8803 } ->
- 0000: 00 */
- /* READ { 0, 0x0001, 0x8802 } ->
- 0000: 10 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
+ /* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0009, 0x8801},
{0x0042, 0x8805},
{0x0001, 0x8800},
- /* READ { 0, 0x0001, 0x8803 } ->
- 0000: 00 */
+ /* READ { 0x0001, 0x8803 } -> 0000: 00 */
{0x0050, 0x8703},
{0x0002, 0x8704}, /* External input CKIx1 */
{0x0001, 0x870c}, /* Select CKOx2 output */
{0x009a, 0x8600}, /* Line memory Read Counter (L) */
- {0x0001, 0x8606}, /* 1 Line memory Read Counter (H) Result: (d)410 */
+ {0x0001, 0x8606}, /* 1 Line memory Read Counter (H) Result: (d)410 */
{0x0023, 0x8601},
{0x0010, 0x8602},
{0x000a, 0x8603},
- {0x009A, 0x8600},
+ {0x009a, 0x8600},
{0x0001, 0x865b}, /* 1 Horizontal Offset for Valid Pixel(L) */
{0x0003, 0x865c}, /* Vertical offset for valid lines (L) */
{0x0058, 0x865d}, /* Horizontal valid pixels window (L) */
@@ -1329,7 +1195,7 @@ static const u16 spca508_vista_init_data[][2] = {
{0x0005, 0x860a}, /* ... */
{0x0025, 0x860b},
{0x00e1, 0x860c},
- {0x00fa, 0x860D},
+ {0x00fa, 0x860d},
{0x00f4, 0x860e},
{0x00e8, 0x860f},
{0x0025, 0x8610}, /* A33 Coef. */
@@ -1344,11 +1210,12 @@ static const u16 spca508_vista_init_data[][2] = {
{0x0040, 0x8654}, /* Gb gain for white balance (L) */
{0x0001, 0x863f}, /* Enable fixed gamma correction */
- {0x00a1, 0x8656}, /* Size - Window1: 256x256, Window2: 128x128 */
- /* UV division: UV no change, Enable New edge enhancement */
+ {0x00a1, 0x8656}, /* Size - Window1: 256x256, Window2: 128x128,
+ * UV division: UV no change,
+ * Enable New edge enhancement */
{0x0018, 0x8657}, /* Edge gain high threshold */
{0x0020, 0x8658}, /* Edge gain low threshold */
- {0x000A, 0x8659}, /* Edge bandwidth high threshold */
+ {0x000a, 0x8659}, /* Edge bandwidth high threshold */
{0x0005, 0x865a}, /* Edge bandwidth low threshold */
{0x0064, 0x8607}, /* UV filter enable */
@@ -1384,29 +1251,20 @@ static const u16 spca508_vista_init_data[][2] = {
{0x0000, 0x86b4},
{0x001e, 0x8660},
- /* READ { 0, 0x0000, 0x8608 } ->
- 0000: 13 */
- /* READ { 0, 0x0000, 0x8609 } ->
- 0000: 28 */
- /* READ { 0, 0x0000, 0x8610 } ->
- 0000: 05 */
- /* READ { 0, 0x0000, 0x8611 } ->
- 0000: 25 */
- /* READ { 0, 0x0000, 0x8612 } ->
- 0000: e1 */
- /* READ { 0, 0x0000, 0x8613 } ->
- 0000: fa */
- /* READ { 0, 0x0000, 0x8614 } ->
- 0000: f4 */
- /* READ { 0, 0x0000, 0x8615 } ->
- 0000: e8 */
- /* READ { 0, 0x0000, 0x8616 } ->
- 0000: 25 */
+ /* READ { 0x0000, 0x8608 } -> 0000: 13 */
+ /* READ { 0x0000, 0x8609 } -> 0000: 28 */
+ /* READ { 0x0000, 0x8610 } -> 0000: 05 */
+ /* READ { 0x0000, 0x8611 } -> 0000: 25 */
+ /* READ { 0x0000, 0x8612 } -> 0000: e1 */
+ /* READ { 0x0000, 0x8613 } -> 0000: fa */
+ /* READ { 0x0000, 0x8614 } -> 0000: f4 */
+ /* READ { 0x0000, 0x8615 } -> 0000: e8 */
+ /* READ { 0x0000, 0x8616 } -> 0000: 25 */
{}
};
static int reg_write(struct usb_device *dev,
- __u16 index, __u16 value)
+ u16 index, u16 value)
{
int ret;
@@ -1425,7 +1283,7 @@ static int reg_write(struct usb_device *dev,
/* read 1 byte */
/* returns: negative is error, pos or zero is data */
static int reg_read(struct gspca_dev *gspca_dev,
- __u16 index) /* wIndex */
+ u16 index) /* wIndex */
{
int ret;
@@ -1447,16 +1305,16 @@ static int reg_read(struct gspca_dev *gspca_dev,
}
static int write_vector(struct gspca_dev *gspca_dev,
- const u16 data[][2])
+ const u16 (*data)[2])
{
struct usb_device *dev = gspca_dev->dev;
- int ret, i = 0;
+ int ret;
- while (data[i][1] != 0) {
- ret = reg_write(dev, data[i][1], data[i][0]);
+ while ((*data)[1] != 0) {
+ ret = reg_write(dev, (*data)[1], (*data)[0]);
if (ret < 0)
return ret;
- i++;
+ data++;
}
return 0;
}
@@ -1468,6 +1326,15 @@ static int sd_config(struct gspca_dev *gspca_dev,
struct sd *sd = (struct sd *) gspca_dev;
struct cam *cam;
int data1, data2;
+ const u16 (*init_data)[2];
+ static const u16 (*(init_data_tb[]))[2] = {
+ spca508_vista_init_data, /* CreativeVista 0 */
+ spca508_sightcam_init_data, /* HamaUSBSightcam 1 */
+ spca508_sightcam2_init_data, /* HamaUSBSightcam2 2 */
+ spca508cs110_init_data, /* IntelEasyPCCamera 3 */
+ spca508cs110_init_data, /* MicroInnovationIC200 4 */
+ spca508_init_data, /* ViewQuestVQ110 5 */
+ };
/* Read from global register the USB product and vendor IDs, just to
* prove that we can communicate with the device. This works, which
@@ -1491,37 +1358,13 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->subtype = id->driver_info;
sd->brightness = BRIGHTNESS_DEF;
- switch (sd->subtype) {
- case ViewQuestVQ110:
- if (write_vector(gspca_dev, spca508_init_data))
- return -1;
- break;
- default:
-/* case MicroInnovationIC200: */
-/* case IntelEasyPCCamera: */
- if (write_vector(gspca_dev, spca508cs110_init_data))
- return -1;
- break;
- case HamaUSBSightcam:
- if (write_vector(gspca_dev, spca508_sightcam_init_data))
- return -1;
- break;
- case HamaUSBSightcam2:
- if (write_vector(gspca_dev, spca508_sightcam2_init_data))
- return -1;
- break;
- case CreativeVista:
- if (write_vector(gspca_dev, spca508_vista_init_data))
- return -1;
- break;
- }
- return 0; /* success */
+ init_data = init_data_tb[sd->subtype];
+ return write_vector(gspca_dev, init_data);
}
/* this function is called at probe and resume time */
static int sd_init(struct gspca_dev *gspca_dev)
{
-/* write_vector(gspca_dev, spca508_open_data); */
return 0;
}
@@ -1529,7 +1372,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
{
int mode;
- mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv;
+ mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
reg_write(gspca_dev->dev, 0x8500, mode);
switch (mode) {
case 0:
@@ -1554,7 +1397,7 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
static void sd_pkt_scan(struct gspca_dev *gspca_dev,
struct gspca_frame *frame, /* target */
- __u8 *data, /* isoc packet */
+ u8 *data, /* isoc packet */
int len) /* iso packet length */
{
switch (data[0]) {
@@ -1567,7 +1410,6 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
data, len);
break;
case 0xff: /* drop */
-/* gspca_dev->last_packet_type = DISCARD_PACKET; */
break;
default:
data += 1;
@@ -1581,7 +1423,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
static void setbrightness(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- __u8 brightness = sd->brightness;
+ u8 brightness = sd->brightness;
/* MX seem contrast */
reg_write(gspca_dev->dev, 0x8651, brightness);
diff --git a/drivers/media/video/gspca/spca561.c b/drivers/media/video/gspca/spca561.c
index c99c5e34e21..27e82b35f3e 100644
--- a/drivers/media/video/gspca/spca561.c
+++ b/drivers/media/video/gspca/spca561.c
@@ -34,8 +34,8 @@ struct sd {
__u16 exposure; /* rev12a only */
#define EXPOSURE_MIN 1
-#define EXPOSURE_DEF 200
-#define EXPOSURE_MAX (4095 - 900) /* see set_exposure */
+#define EXPOSURE_DEF 700 /* == 10 fps */
+#define EXPOSURE_MAX (2047 + 325) /* see setexposure */
__u8 contrast; /* rev72a only */
#define CONTRAST_MIN 0x00
@@ -48,9 +48,9 @@ struct sd {
#define BRIGHTNESS_MAX 0x3f
__u8 white;
-#define WHITE_MIN 1
-#define WHITE_DEF 0x40
-#define WHITE_MAX 0x7f
+#define HUE_MIN 1
+#define HUE_DEF 0x40
+#define HUE_MAX 0x7f
__u8 autogain;
#define AUTOGAIN_MIN 0
@@ -58,9 +58,9 @@ struct sd {
#define AUTOGAIN_MAX 1
__u8 gain; /* rev12a only */
-#define GAIN_MIN 0x0
-#define GAIN_DEF 0x24
-#define GAIN_MAX 0x24
+#define GAIN_MIN 0
+#define GAIN_DEF 63
+#define GAIN_MAX 255
#define EXPO12A_DEF 3
__u8 expo12a; /* expo/gain? for rev 12a */
@@ -461,7 +461,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
}
sd->brightness = BRIGHTNESS_DEF;
sd->contrast = CONTRAST_DEF;
- sd->white = WHITE_DEF;
+ sd->white = HUE_DEF;
sd->exposure = EXPOSURE_DEF;
sd->autogain = AUTOGAIN_DEF;
sd->gain = GAIN_DEF;
@@ -549,8 +549,7 @@ static void setcontrast(struct gspca_dev *gspca_dev)
static void setexposure(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- int expo;
- int clock_divider;
+ int i, expo = 0;
/* Register 0x8309 controls exposure for the spca561,
the basic exposure setting goes from 1-2047, where 1 is completely
@@ -564,16 +563,22 @@ static void setexposure(struct gspca_dev *gspca_dev)
configure a divider for the base framerate which us used at the
exposure setting of 1-300. These bits configure the base framerate
according to the following formula: fps = 60 / (value + 2) */
- if (sd->exposure < 2048) {
- expo = sd->exposure;
- clock_divider = 0;
- } else {
- /* Add 900 to make the 0 setting of the second part of the
- exposure equal to the 2047 setting of the first part. */
- expo = (sd->exposure - 2048) + 900;
- clock_divider = 3;
+
+ /* We choose to use the high bits setting the fixed framerate divisor
+ asap, as setting high basic exposure setting without the fixed
+ divider in combination with high gains makes the cam stop */
+ int table[] = { 0, 450, 550, 625, EXPOSURE_MAX };
+
+ for (i = 0; i < ARRAY_SIZE(table) - 1; i++) {
+ if (sd->exposure <= table[i + 1]) {
+ expo = sd->exposure - table[i];
+ if (i)
+ expo += 300;
+ expo |= i << 11;
+ break;
+ }
}
- expo |= clock_divider << 11;
+
gspca_dev->usb_buf[0] = expo;
gspca_dev->usb_buf[1] = expo >> 8;
reg_w_buf(gspca_dev, 0x8309, 2);
@@ -584,7 +589,16 @@ static void setgain(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- gspca_dev->usb_buf[0] = sd->gain;
+ /* gain reg low 6 bits 0-63 gain, bit 6 and 7, both double the
+ sensitivity when set, so 31 + one of them set == 63, and 15
+ with both of them set == 63 */
+ if (sd->gain < 64)
+ gspca_dev->usb_buf[0] = sd->gain;
+ else if (sd->gain < 128)
+ gspca_dev->usb_buf[0] = (sd->gain / 2) | 0x40;
+ else
+ gspca_dev->usb_buf[0] = (sd->gain / 4) | 0xC0;
+
gspca_dev->usb_buf[1] = 0;
reg_w_buf(gspca_dev, 0x8335, 2);
}
@@ -629,8 +643,7 @@ static int sd_start_12a(struct gspca_dev *gspca_dev)
reg_w_buf(gspca_dev, 0x8391, 8);
reg_w_buf(gspca_dev, 0x8390, 8);
setwhite(gspca_dev);
- setautogain(gspca_dev);
-/* setgain(gspca_dev); */
+ setgain(gspca_dev);
setexposure(gspca_dev);
return 0;
}
@@ -762,18 +775,6 @@ static void do_autogain(struct gspca_dev *gspca_dev)
i2c_write(gspca_dev, expotimes | pixelclk, 0x09);
}
break;
- case Rev012A:
- reg_r(gspca_dev, 0x8330, 2);
- if (gspca_dev->usb_buf[1] > 0x08) {
- gspca_dev->usb_buf[0] = ++sd->expo12a;
- gspca_dev->usb_buf[1] = 0;
- reg_w_buf(gspca_dev, 0x8339, 2);
- } else if (gspca_dev->usb_buf[1] < 0x02) {
- gspca_dev->usb_buf[0] = --sd->expo12a;
- gspca_dev->usb_buf[1] = 0;
- reg_w_buf(gspca_dev, 0x8339, 2);
- }
- break;
}
}
@@ -928,13 +929,13 @@ static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val)
static struct ctrl sd_ctrls_12a[] = {
{
{
- .id = V4L2_CID_DO_WHITE_BALANCE,
+ .id = V4L2_CID_HUE,
.type = V4L2_CTRL_TYPE_INTEGER,
- .name = "White Balance",
- .minimum = WHITE_MIN,
- .maximum = WHITE_MAX,
+ .name = "Hue",
+ .minimum = HUE_MIN,
+ .maximum = HUE_MAX,
.step = 1,
- .default_value = WHITE_DEF,
+ .default_value = HUE_DEF,
},
.set = sd_setwhite,
.get = sd_getwhite,
@@ -954,19 +955,6 @@ static struct ctrl sd_ctrls_12a[] = {
},
{
{
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Auto Gain",
- .minimum = AUTOGAIN_MIN,
- .maximum = AUTOGAIN_MAX,
- .step = 1,
- .default_value = AUTOGAIN_DEF,
- },
- .set = sd_setautogain,
- .get = sd_getautogain,
- },
- {
- {
.id = V4L2_CID_GAIN,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Gain",
@@ -983,13 +971,13 @@ static struct ctrl sd_ctrls_12a[] = {
static struct ctrl sd_ctrls_72a[] = {
{
{
- .id = V4L2_CID_DO_WHITE_BALANCE,
+ .id = V4L2_CID_HUE,
.type = V4L2_CTRL_TYPE_INTEGER,
- .name = "White Balance",
- .minimum = WHITE_MIN,
- .maximum = WHITE_MAX,
+ .name = "Hue",
+ .minimum = HUE_MIN,
+ .maximum = HUE_MAX,
.step = 1,
- .default_value = WHITE_DEF,
+ .default_value = HUE_DEF,
},
.set = sd_setwhite,
.get = sd_getwhite,
@@ -1046,7 +1034,6 @@ static const struct sd_desc sd_desc_12a = {
.stopN = sd_stopN,
.stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
-/* .dq_callback = do_autogain, * fixme */
};
static const struct sd_desc sd_desc_72a = {
.name = MODULE_NAME,
diff --git a/drivers/media/video/gspca/sq905.c b/drivers/media/video/gspca/sq905.c
index 2e1cdf068fd..715a68f0156 100644
--- a/drivers/media/video/gspca/sq905.c
+++ b/drivers/media/video/gspca/sq905.c
@@ -309,6 +309,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
struct sd *dev = (struct sd *) gspca_dev;
/* We don't use the buffer gspca allocates so make it small. */
+ cam->bulk = 1;
cam->bulk_size = 64;
INIT_WORK(&dev->work_struct, sq905_dostream);
diff --git a/drivers/media/video/gspca/sq905c.c b/drivers/media/video/gspca/sq905c.c
index 0bcb74a1b14..91689250543 100644
--- a/drivers/media/video/gspca/sq905c.c
+++ b/drivers/media/video/gspca/sq905c.c
@@ -206,6 +206,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->nmodes = 1;
/* We don't use the buffer gspca allocates so make it small. */
cam->bulk_size = 32;
+ cam->bulk = 1;
INIT_WORK(&dev->work_struct, sq905c_dostream);
return 0;
}
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.c b/drivers/media/video/gspca/stv06xx/stv06xx.c
index 9dff2e65b11..e573c340632 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.c
@@ -293,8 +293,6 @@ static void stv06xx_stopN(struct gspca_dev *gspca_dev)
goto out;
err = sd->sensor->stop(sd);
- if (err < 0)
- goto out;
out:
if (err < 0)
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c
index 69c77c932fc..11a0c002f5d 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c
@@ -80,12 +80,26 @@ static const struct ctrl vv6410_ctrl[] = {
.minimum = 0,
.maximum = 15,
.step = 1,
- .default_value = 0
+ .default_value = 10
},
.set = vv6410_set_analog_gain,
.get = vv6410_get_analog_gain
+ },
+#define EXPOSURE_IDX 3
+ {
+ {
+ .id = V4L2_CID_EXPOSURE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "exposure",
+ .minimum = 0,
+ .maximum = 32768,
+ .step = 1,
+ .default_value = 20000
+ },
+ .set = vv6410_set_exposure,
+ .get = vv6410_get_exposure
}
-};
+ };
static int vv6410_probe(struct sd *sd)
{
@@ -121,6 +135,7 @@ static int vv6410_probe(struct sd *sd)
static int vv6410_init(struct sd *sd)
{
int err = 0, i;
+ s32 *sensor_settings = sd->sensor_priv;
for (i = 0; i < ARRAY_SIZE(stv_bridge_init); i++) {
/* if NULL then len contains single value */
@@ -142,6 +157,16 @@ static int vv6410_init(struct sd *sd)
err = stv06xx_write_sensor_bytes(sd, (u8 *) vv6410_sensor_init,
ARRAY_SIZE(vv6410_sensor_init));
+ if (err < 0)
+ return err;
+
+ err = vv6410_set_exposure(&sd->gspca_dev,
+ sensor_settings[EXPOSURE_IDX]);
+ if (err < 0)
+ return err;
+
+ err = vv6410_set_analog_gain(&sd->gspca_dev,
+ sensor_settings[GAIN_IDX]);
return (err < 0) ? err : 0;
}
@@ -318,3 +343,50 @@ static int vv6410_set_analog_gain(struct gspca_dev *gspca_dev, __s32 val)
return (err < 0) ? err : 0;
}
+
+static int vv6410_get_exposure(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ *val = sensor_settings[EXPOSURE_IDX];
+
+ PDEBUG(D_V4L2, "Read exposure %d", *val);
+
+ return 0;
+}
+
+static int vv6410_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
+{
+ int err;
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+ unsigned int fine, coarse;
+
+ sensor_settings[EXPOSURE_IDX] = val;
+
+ val = (val * val >> 14) + val / 4;
+
+ fine = val % VV6410_CIF_LINELENGTH;
+ coarse = min(512, val / VV6410_CIF_LINELENGTH);
+
+ PDEBUG(D_V4L2, "Set coarse exposure to %d, fine expsure to %d",
+ coarse, fine);
+
+ err = stv06xx_write_sensor(sd, VV6410_FINEH, fine >> 8);
+ if (err < 0)
+ goto out;
+
+ err = stv06xx_write_sensor(sd, VV6410_FINEL, fine & 0xff);
+ if (err < 0)
+ goto out;
+
+ err = stv06xx_write_sensor(sd, VV6410_COARSEH, coarse >> 8);
+ if (err < 0)
+ goto out;
+
+ err = stv06xx_write_sensor(sd, VV6410_COARSEL, coarse & 0xff);
+
+out:
+ return err;
+}
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h
index 95ac55891bd..487d4055534 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h
@@ -173,6 +173,8 @@
#define VV6410_SUBSAMPLE 0x01
#define VV6410_CROP_TO_QVGA 0x02
+#define VV6410_CIF_LINELENGTH 415
+
static int vv6410_probe(struct sd *sd);
static int vv6410_start(struct sd *sd);
static int vv6410_init(struct sd *sd);
@@ -187,6 +189,8 @@ static int vv6410_get_vflip(struct gspca_dev *gspca_dev, __s32 *val);
static int vv6410_set_vflip(struct gspca_dev *gspca_dev, __s32 val);
static int vv6410_get_analog_gain(struct gspca_dev *gspca_dev, __s32 *val);
static int vv6410_set_analog_gain(struct gspca_dev *gspca_dev, __s32 val);
+static int vv6410_get_exposure(struct gspca_dev *gspca_dev, __s32 *val);
+static int vv6410_set_exposure(struct gspca_dev *gspca_dev, __s32 val);
const struct stv06xx_sensor stv06xx_sensor_vv6410 = {
.name = "ST VV6410",
@@ -242,12 +246,6 @@ static const u8 vv6410_sensor_init[][2] = {
/* Pre-clock generator divide off */
{VV6410_DATAFORMAT, BIT(7) | BIT(0)},
- /* Exposure registers */
- {VV6410_FINEH, VV6410_FINE_EXPOSURE >> 8},
- {VV6410_FINEL, VV6410_FINE_EXPOSURE & 0xff},
- {VV6410_COARSEH, VV6410_COARSE_EXPOSURE >> 8},
- {VV6410_COARSEL, VV6410_COARSE_EXPOSURE & 0xff},
- {VV6410_ANALOGGAIN, 0xf0 | VV6410_DEFAULT_GAIN},
{VV6410_CLKDIV, VV6410_CLK_DIV_2},
/* System registers */
diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c
index c2b8c10c075..9623f294bda 100644
--- a/drivers/media/video/gspca/sunplus.c
+++ b/drivers/media/video/gspca/sunplus.c
@@ -32,9 +32,6 @@ MODULE_LICENSE("GPL");
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- __u8 packet[ISO_MAX_SIZE + 128];
- /* !! no more than 128 ff in an ISO packet */
-
unsigned char brightness;
unsigned char contrast;
unsigned char colors;
@@ -1103,7 +1100,6 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
{
struct sd *sd = (struct sd *) gspca_dev;
int i, sof = 0;
- unsigned char *s, *d;
static unsigned char ffd9[] = {0xff, 0xd9};
/* frames are jpeg 4.1.1 without 0xff escape */
@@ -1177,22 +1173,19 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
}
/* add 0x00 after 0xff */
- for (i = len; --i >= 0; )
- if (data[i] == 0xff)
- break;
- if (i < 0) { /* no 0xff */
- gspca_frame_add(gspca_dev, INTER_PACKET, frame, data, len);
- return;
- }
- s = data;
- d = sd->packet;
- for (i = 0; i < len; i++) {
- *d++ = *s++;
- if (s[-1] == 0xff)
- *d++ = 0x00;
- }
- gspca_frame_add(gspca_dev, INTER_PACKET, frame,
- sd->packet, d - sd->packet);
+ i = 0;
+ do {
+ if (data[i] == 0xff) {
+ gspca_frame_add(gspca_dev, INTER_PACKET, frame,
+ data, i + 1);
+ len -= i;
+ data += i;
+ *data = 0x00;
+ i = 0;
+ }
+ i++;
+ } while (i < len);
+ gspca_frame_add(gspca_dev, INTER_PACKET, frame, data, len);
}
static void setbrightness(struct gspca_dev *gspca_dev)
diff --git a/drivers/media/video/gspca/t613.c b/drivers/media/video/gspca/t613.c
index f63e37e2e4f..404214b8cd2 100644
--- a/drivers/media/video/gspca/t613.c
+++ b/drivers/media/video/gspca/t613.c
@@ -697,7 +697,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
return -EINVAL;
}
- if (sd->sensor != SENSOR_OTHER) {
+ if (sd->sensor == SENSOR_OM6802) {
reg_w_buf(gspca_dev, n1, sizeof n1);
i = 5;
while (--i >= 0) {
diff --git a/drivers/media/video/gspca/vc032x.c b/drivers/media/video/gspca/vc032x.c
index e4e933c400b..26dd155efcc 100644
--- a/drivers/media/video/gspca/vc032x.c
+++ b/drivers/media/video/gspca/vc032x.c
@@ -42,7 +42,7 @@ struct sd {
char bridge;
#define BRIDGE_VC0321 0
#define BRIDGE_VC0323 1
- char sensor;
+ u8 sensor;
#define SENSOR_HV7131R 0
#define SENSOR_MI0360 1
#define SENSOR_MI1310_SOC 2
@@ -159,17 +159,17 @@ static const struct v4l2_pix_format vc0323_mode[] = {
.priv = 2},
};
static const struct v4l2_pix_format bi_mode[] = {
- {320, 240, V4L2_PIX_FMT_YVYU, V4L2_FIELD_NONE,
+ {320, 240, V4L2_PIX_FMT_YUYV, V4L2_FIELD_NONE,
.bytesperline = 320,
.sizeimage = 320 * 240 * 2,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 2},
- {640, 480, V4L2_PIX_FMT_YVYU, V4L2_FIELD_NONE,
+ {640, 480, V4L2_PIX_FMT_YUYV, V4L2_FIELD_NONE,
.bytesperline = 640,
.sizeimage = 640 * 480 * 2,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 1},
- {1280, 1024, V4L2_PIX_FMT_YVYU, V4L2_FIELD_NONE,
+ {1280, 1024, V4L2_PIX_FMT_YUYV, V4L2_FIELD_NONE,
.bytesperline = 1280,
.sizeimage = 1280 * 1024 * 2,
.colorspace = V4L2_COLORSPACE_SRGB,
@@ -2453,6 +2453,17 @@ static int sd_config(struct gspca_dev *gspca_dev,
struct usb_device *dev = gspca_dev->dev;
struct cam *cam;
int sensor;
+ static u8 npkt[] = { /* number of packets per ISOC message */
+ 64, /* HV7131R 0 */
+ 32, /* MI0360 1 */
+ 32, /* MI1310_SOC 2 */
+ 64, /* MI1320 3 */
+ 128, /* MI1320_SOC 4 */
+ 32, /* OV7660 5 */
+ 64, /* OV7670 6 */
+ 128, /* PO1200 7 */
+ 128, /* PO3130NC 8 */
+ };
cam = &gspca_dev->cam;
sd->bridge = id->driver_info;
@@ -2508,6 +2519,8 @@ static int sd_config(struct gspca_dev *gspca_dev,
case SENSOR_MI1320_SOC:
cam->cam_mode = bi_mode;
cam->nmodes = ARRAY_SIZE(bi_mode);
+ cam->input_flags = V4L2_IN_ST_VFLIP |
+ V4L2_IN_ST_HFLIP;
break;
default:
cam->cam_mode = vc0323_mode;
@@ -2515,6 +2528,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
break;
}
}
+ cam->npkt = npkt[sd->sensor];
sd->hflip = HFLIP_DEF;
sd->vflip = VFLIP_DEF;
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c
index 4fe01d8b6c8..08422d315e6 100644
--- a/drivers/media/video/gspca/zc3xx.c
+++ b/drivers/media/video/gspca/zc3xx.c
@@ -6307,7 +6307,7 @@ static __u16 i2c_read(struct gspca_dev *gspca_dev,
retbyte = reg_r_i(gspca_dev, 0x0091); /* read status */
retval = reg_r_i(gspca_dev, 0x0095); /* read Lowbyte */
retval |= reg_r_i(gspca_dev, 0x0096) << 8; /* read Hightbyte */
- PDEBUG(D_USBO, "i2c r [%02x] -> %04x (%02x)",
+ PDEBUG(D_USBI, "i2c r [%02x] -> %04x (%02x)",
reg, retval, retbyte);
return retval;
}
@@ -6868,7 +6868,6 @@ static const struct sensor_by_chipset_revision chipset_revision_sensor[] = {
{0x8001, 0x13},
{0x8000, 0x14}, /* CS2102K */
{0x8400, 0x15}, /* TAS5130K */
- {0x4001, 0x16}, /* ADCM2700 */
};
static int vga_3wr_probe(struct gspca_dev *gspca_dev)
@@ -6904,12 +6903,15 @@ static int vga_3wr_probe(struct gspca_dev *gspca_dev)
retword |= reg_r(gspca_dev, 0x000a);
PDEBUG(D_PROBE, "probe 3wr vga 1 0x%04x", retword);
reg_r(gspca_dev, 0x0010);
- /* this is tested only once anyway */
- for (i = 0; i < ARRAY_SIZE(chipset_revision_sensor); i++) {
- if (chipset_revision_sensor[i].revision == retword) {
- sd->chip_revision = retword;
- send_unknown(dev, SENSOR_PB0330);
- return chipset_revision_sensor[i].internal_sensor_id;
+ /* value 0x4001 is meaningless */
+ if (retword != 0x4001) {
+ for (i = 0; i < ARRAY_SIZE(chipset_revision_sensor); i++) {
+ if (chipset_revision_sensor[i].revision == retword) {
+ sd->chip_revision = retword;
+ send_unknown(dev, SENSOR_PB0330);
+ return chipset_revision_sensor[i]
+ .internal_sensor_id;
+ }
}
}
@@ -6980,12 +6982,12 @@ static int vga_3wr_probe(struct gspca_dev *gspca_dev)
reg_w(dev, 0x01, 0x0001);
reg_w(dev, 0x03, 0x0012);
reg_w(dev, 0x01, 0x0012);
- reg_w(dev, 0x05, 0x0001);
+ reg_w(dev, 0x05, 0x0012);
reg_w(dev, 0xd3, 0x008b);
retword = i2c_read(gspca_dev, 0x01);
if (retword != 0) {
PDEBUG(D_PROBE, "probe 3wr vga type 0a ? ret: %04x", retword);
- return retword;
+ return 0x16; /* adcm2700 (6100/6200) */
}
return -1;
}
diff --git a/drivers/media/video/hdpvr/hdpvr-video.c b/drivers/media/video/hdpvr/hdpvr-video.c
index 3e6ffee8dfe..ccd47f57f42 100644
--- a/drivers/media/video/hdpvr/hdpvr-video.c
+++ b/drivers/media/video/hdpvr/hdpvr-video.c
@@ -181,7 +181,7 @@ static int hdpvr_submit_buffers(struct hdpvr_device *dev)
buff_list);
if (buf->status != BUFSTAT_AVAILABLE) {
v4l2_err(&dev->v4l2_dev,
- "buffer not marked as availbale\n");
+ "buffer not marked as available\n");
ret = -EFAULT;
goto err;
}
diff --git a/drivers/media/video/hexium_gemini.c b/drivers/media/video/hexium_gemini.c
index 8e1463ee1b6..71c211402eb 100644
--- a/drivers/media/video/hexium_gemini.c
+++ b/drivers/media/video/hexium_gemini.c
@@ -224,7 +224,7 @@ static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i)
{
DEB_EE(("VIDIOC_ENUMINPUT %d.\n", i->index));
- if (i->index < 0 || i->index >= HEXIUM_INPUTS)
+ if (i->index >= HEXIUM_INPUTS)
return -EINVAL;
memcpy(i, &hexium_inputs[i->index], sizeof(struct v4l2_input));
diff --git a/drivers/media/video/hexium_orion.c b/drivers/media/video/hexium_orion.c
index 2bc39f62845..39d65ca41c6 100644
--- a/drivers/media/video/hexium_orion.c
+++ b/drivers/media/video/hexium_orion.c
@@ -325,7 +325,7 @@ static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i)
{
DEB_EE(("VIDIOC_ENUMINPUT %d.\n", i->index));
- if (i->index < 0 || i->index >= HEXIUM_INPUTS)
+ if (i->index >= HEXIUM_INPUTS)
return -EINVAL;
memcpy(i, &hexium_inputs[i->index], sizeof(struct v4l2_input));
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index 092c7da0f37..86f2fefe1ed 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -74,7 +74,7 @@ static int get_key_haup_common(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw,
int start, range, toggle, dev, code, ircode;
/* poll IR chip */
- if (size != i2c_master_recv(&ir->c,buf,size))
+ if (size != i2c_master_recv(ir->c, buf, size))
return -EIO;
/* split rc5 data block ... */
@@ -137,7 +137,7 @@ static int get_key_pixelview(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
unsigned char b;
/* poll IR chip */
- if (1 != i2c_master_recv(&ir->c,&b,1)) {
+ if (1 != i2c_master_recv(ir->c, &b, 1)) {
dprintk(1,"read error\n");
return -EIO;
}
@@ -151,7 +151,7 @@ static int get_key_pv951(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
unsigned char b;
/* poll IR chip */
- if (1 != i2c_master_recv(&ir->c,&b,1)) {
+ if (1 != i2c_master_recv(ir->c, &b, 1)) {
dprintk(1,"read error\n");
return -EIO;
}
@@ -171,7 +171,7 @@ static int get_key_fusionhdtv(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
unsigned char buf[4];
/* poll IR chip */
- if (4 != i2c_master_recv(&ir->c,buf,4)) {
+ if (4 != i2c_master_recv(ir->c, buf, 4)) {
dprintk(1,"read error\n");
return -EIO;
}
@@ -195,7 +195,7 @@ static int get_key_knc1(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
unsigned char b;
/* poll IR chip */
- if (1 != i2c_master_recv(&ir->c,&b,1)) {
+ if (1 != i2c_master_recv(ir->c, &b, 1)) {
dprintk(1,"read error\n");
return -EIO;
}
@@ -222,12 +222,12 @@ static int get_key_avermedia_cardbus(struct IR_i2c *ir,
u32 *ir_key, u32 *ir_raw)
{
unsigned char subaddr, key, keygroup;
- struct i2c_msg msg[] = { { .addr = ir->c.addr, .flags = 0,
+ struct i2c_msg msg[] = { { .addr = ir->c->addr, .flags = 0,
.buf = &subaddr, .len = 1},
- { .addr = ir->c.addr, .flags = I2C_M_RD,
+ { .addr = ir->c->addr, .flags = I2C_M_RD,
.buf = &key, .len = 1} };
subaddr = 0x0d;
- if (2 != i2c_transfer(ir->c.adapter, msg, 2)) {
+ if (2 != i2c_transfer(ir->c->adapter, msg, 2)) {
dprintk(1, "read error\n");
return -EIO;
}
@@ -237,7 +237,7 @@ static int get_key_avermedia_cardbus(struct IR_i2c *ir,
subaddr = 0x0b;
msg[1].buf = &keygroup;
- if (2 != i2c_transfer(ir->c.adapter, msg, 2)) {
+ if (2 != i2c_transfer(ir->c->adapter, msg, 2)) {
dprintk(1, "read error\n");
return -EIO;
}
@@ -286,7 +286,7 @@ static void ir_work(struct work_struct *work)
/* MSI TV@nywhere Plus requires more frequent polling
otherwise it will miss some keypresses */
- if (ir->c.adapter->id == I2C_HW_SAA7134 && ir->c.addr == 0x30)
+ if (ir->c->adapter->id == I2C_HW_SAA7134 && ir->c->addr == 0x30)
polling_interval = 50;
ir_key_poll(ir);
@@ -295,34 +295,15 @@ static void ir_work(struct work_struct *work)
/* ----------------------------------------------------------------------- */
-static int ir_attach(struct i2c_adapter *adap, int addr,
- unsigned short flags, int kind);
-static int ir_detach(struct i2c_client *client);
-static int ir_probe(struct i2c_adapter *adap);
-
-static struct i2c_driver driver = {
- .driver = {
- .name = "ir-kbd-i2c",
- },
- .id = I2C_DRIVERID_INFRARED,
- .attach_adapter = ir_probe,
- .detach_client = ir_detach,
-};
-
-static struct i2c_client client_template =
-{
- .name = "unset",
- .driver = &driver
-};
-
-static int ir_attach(struct i2c_adapter *adap, int addr,
- unsigned short flags, int kind)
+static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
IR_KEYTAB_TYPE *ir_codes = NULL;
- char *name;
+ const char *name = NULL;
int ir_type;
struct IR_i2c *ir;
struct input_dev *input_dev;
+ struct i2c_adapter *adap = client->adapter;
+ unsigned short addr = client->addr;
int err;
ir = kzalloc(sizeof(struct IR_i2c),GFP_KERNEL);
@@ -332,13 +313,9 @@ static int ir_attach(struct i2c_adapter *adap, int addr,
goto err_out_free;
}
- ir->c = client_template;
+ ir->c = client;
ir->input = input_dev;
-
- ir->c.adapter = adap;
- ir->c.addr = addr;
-
- i2c_set_clientdata(&ir->c, ir);
+ i2c_set_clientdata(client, ir);
switch(addr) {
case 0x64:
@@ -403,44 +380,46 @@ static int ir_attach(struct i2c_adapter *adap, int addr,
ir_codes = ir_codes_avermedia_cardbus;
break;
default:
- /* shouldn't happen */
- printk(DEVNAME ": Huh? unknown i2c address (0x%02x)?\n", addr);
+ dprintk(1, DEVNAME ": Unsupported i2c address 0x%02x\n", addr);
err = -ENODEV;
goto err_out_free;
}
- /* Sets name */
- snprintf(ir->c.name, sizeof(ir->c.name), "i2c IR (%s)", name);
- ir->ir_codes = ir_codes;
+ /* Let the caller override settings */
+ if (client->dev.platform_data) {
+ const struct IR_i2c_init_data *init_data =
+ client->dev.platform_data;
- /* register i2c device
- * At device register, IR codes may be changed to be
- * board dependent.
- */
- err = i2c_attach_client(&ir->c);
- if (err)
- goto err_out_free;
+ ir_codes = init_data->ir_codes;
+ name = init_data->name;
+ ir->get_key = init_data->get_key;
+ }
- /* If IR not supported or disabled, unregisters driver */
- if (ir->get_key == NULL) {
+ /* Make sure we are all setup before going on */
+ if (!name || !ir->get_key || !ir_codes) {
+ dprintk(1, DEVNAME ": Unsupported device at address 0x%02x\n",
+ addr);
err = -ENODEV;
- goto err_out_detach;
+ goto err_out_free;
}
- /* Phys addr can only be set after attaching (for ir->c.dev) */
+ /* Sets name */
+ snprintf(ir->name, sizeof(ir->name), "i2c IR (%s)", name);
+ ir->ir_codes = ir_codes;
+
snprintf(ir->phys, sizeof(ir->phys), "%s/%s/ir0",
- dev_name(&ir->c.adapter->dev),
- dev_name(&ir->c.dev));
+ dev_name(&adap->dev),
+ dev_name(&client->dev));
/* init + register input device */
ir_input_init(input_dev, &ir->ir, ir_type, ir->ir_codes);
input_dev->id.bustype = BUS_I2C;
- input_dev->name = ir->c.name;
+ input_dev->name = ir->name;
input_dev->phys = ir->phys;
err = input_register_device(ir->input);
if (err)
- goto err_out_detach;
+ goto err_out_free;
printk(DEVNAME ": %s detected at %s [%s]\n",
ir->input->name, ir->input->phys, adap->name);
@@ -451,135 +430,42 @@ static int ir_attach(struct i2c_adapter *adap, int addr,
return 0;
- err_out_detach:
- i2c_detach_client(&ir->c);
err_out_free:
input_free_device(input_dev);
kfree(ir);
return err;
}
-static int ir_detach(struct i2c_client *client)
+static int ir_remove(struct i2c_client *client)
{
struct IR_i2c *ir = i2c_get_clientdata(client);
/* kill outstanding polls */
cancel_delayed_work_sync(&ir->work);
- /* unregister devices */
+ /* unregister device */
input_unregister_device(ir->input);
- i2c_detach_client(&ir->c);
/* free memory */
kfree(ir);
return 0;
}
-static int ir_probe(struct i2c_adapter *adap)
-{
-
- /* The external IR receiver is at i2c address 0x34 (0x35 for
- reads). Future Hauppauge cards will have an internal
- receiver at 0x30 (0x31 for reads). In theory, both can be
- fitted, and Hauppauge suggest an external overrides an
- internal.
-
- That's why we probe 0x1a (~0x34) first. CB
- */
-
- static const int probe_bttv[] = { 0x1a, 0x18, 0x4b, 0x64, 0x30, -1};
- static const int probe_saa7134[] = { 0x7a, 0x47, 0x71, 0x2d, -1 };
- static const int probe_em28XX[] = { 0x30, 0x47, -1 };
- static const int probe_cx88[] = { 0x18, 0x6b, 0x71, -1 };
- static const int probe_cx23885[] = { 0x6b, -1 };
- const int *probe;
- struct i2c_msg msg = {
- .flags = I2C_M_RD,
- .len = 0,
- .buf = NULL,
- };
- int i, rc;
-
- switch (adap->id) {
- case I2C_HW_B_BT848:
- probe = probe_bttv;
- break;
- case I2C_HW_B_CX2341X:
- probe = probe_bttv;
- break;
- case I2C_HW_SAA7134:
- probe = probe_saa7134;
- break;
- case I2C_HW_B_EM28XX:
- probe = probe_em28XX;
- break;
- case I2C_HW_B_CX2388x:
- probe = probe_cx88;
- break;
- case I2C_HW_B_CX23885:
- probe = probe_cx23885;
- break;
- default:
- return 0;
- }
-
- for (i = 0; -1 != probe[i]; i++) {
- msg.addr = probe[i];
- rc = i2c_transfer(adap, &msg, 1);
- dprintk(1,"probe 0x%02x @ %s: %s\n",
- probe[i], adap->name,
- (1 == rc) ? "yes" : "no");
- if (1 == rc) {
- ir_attach(adap, probe[i], 0, 0);
- return 0;
- }
- }
-
- /* Special case for MSI TV@nywhere Plus remote */
- if (adap->id == I2C_HW_SAA7134) {
- u8 temp;
-
- /* MSI TV@nywhere Plus controller doesn't seem to
- respond to probes unless we read something from
- an existing device. Weird... */
-
- msg.addr = 0x50;
- rc = i2c_transfer(adap, &msg, 1);
- dprintk(1, "probe 0x%02x @ %s: %s\n",
- msg.addr, adap->name,
- (1 == rc) ? "yes" : "no");
-
- /* Now do the probe. The controller does not respond
- to 0-byte reads, so we use a 1-byte read instead. */
- msg.addr = 0x30;
- msg.len = 1;
- msg.buf = &temp;
- rc = i2c_transfer(adap, &msg, 1);
- dprintk(1, "probe 0x%02x @ %s: %s\n",
- msg.addr, adap->name,
- (1 == rc) ? "yes" : "no");
- if (1 == rc)
- ir_attach(adap, msg.addr, 0, 0);
- }
-
- /* Special case for AVerMedia Cardbus remote */
- if (adap->id == I2C_HW_SAA7134) {
- unsigned char subaddr, data;
- struct i2c_msg msg[] = { { .addr = 0x40, .flags = 0,
- .buf = &subaddr, .len = 1},
- { .addr = 0x40, .flags = I2C_M_RD,
- .buf = &data, .len = 1} };
- subaddr = 0x0d;
- rc = i2c_transfer(adap, msg, 2);
- dprintk(1, "probe 0x%02x/0x%02x @ %s: %s\n",
- msg[0].addr, subaddr, adap->name,
- (2 == rc) ? "yes" : "no");
- if (2 == rc)
- ir_attach(adap, msg[0].addr, 0, 0);
- }
+static const struct i2c_device_id ir_kbd_id[] = {
+ /* Generic entry for any IR receiver */
+ { "ir_video", 0 },
+ /* IR device specific entries could be added here */
+ { }
+};
- return 0;
-}
+static struct i2c_driver driver = {
+ .driver = {
+ .name = "ir-kbd-i2c",
+ },
+ .probe = ir_probe,
+ .remove = ir_remove,
+ .id_table = ir_kbd_id,
+};
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index db2ac9a99ac..558f8a837ff 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -455,7 +455,7 @@ static void ivtv_process_eeprom(struct ivtv *itv)
break;
}
if (tv.tuner_type == TUNER_ABSENT)
- IVTV_ERR("tveeprom cannot autodetect tuner!");
+ IVTV_ERR("tveeprom cannot autodetect tuner!\n");
if (itv->options.tuner == -1)
itv->options.tuner = tv.tuner_type;
@@ -946,17 +946,14 @@ static int __devinit ivtv_probe(struct pci_dev *pdev,
if (itv == NULL)
return -ENOMEM;
itv->pdev = pdev;
- itv->instance = atomic_inc_return(&ivtv_instance) - 1;
+ itv->instance = v4l2_device_set_name(&itv->v4l2_dev, "ivtv",
+ &ivtv_instance);
retval = v4l2_device_register(&pdev->dev, &itv->v4l2_dev);
if (retval) {
kfree(itv);
return retval;
}
- /* "ivtv + PCI ID" is a bit of a mouthful, so use
- "ivtv + instance" instead. */
- snprintf(itv->v4l2_dev.name, sizeof(itv->v4l2_dev.name),
- "ivtv%d", itv->instance);
IVTV_INFO("Initializing card %d\n", itv->instance);
ivtv_process_options(itv);
diff --git a/drivers/media/video/ivtv/ivtv-i2c.c b/drivers/media/video/ivtv/ivtv-i2c.c
index 9e3d32b8004..e52aa322b13 100644
--- a/drivers/media/video/ivtv/ivtv-i2c.c
+++ b/drivers/media/video/ivtv/ivtv-i2c.c
@@ -579,9 +579,11 @@ static struct i2c_client ivtv_i2c_client_template = {
.name = "ivtv internal",
};
-/* init + register i2c algo-bit adapter */
+/* init + register i2c adapter + instantiate IR receiver */
int init_ivtv_i2c(struct ivtv *itv)
{
+ int retval;
+
IVTV_DEBUG_I2C("i2c init\n");
/* Sanity checks for the I2C hardware arrays. They must be the
@@ -619,9 +621,37 @@ int init_ivtv_i2c(struct ivtv *itv)
ivtv_setsda(itv, 1);
if (itv->options.newi2c > 0)
- return i2c_add_adapter(&itv->i2c_adap);
+ retval = i2c_add_adapter(&itv->i2c_adap);
else
- return i2c_bit_add_bus(&itv->i2c_adap);
+ retval = i2c_bit_add_bus(&itv->i2c_adap);
+
+ /* Instantiate the IR receiver device, if present */
+ if (retval == 0) {
+ struct i2c_board_info info;
+ /* The external IR receiver is at i2c address 0x34 (0x35 for
+ reads). Future Hauppauge cards will have an internal
+ receiver at 0x30 (0x31 for reads). In theory, both can be
+ fitted, and Hauppauge suggest an external overrides an
+ internal.
+
+ That's why we probe 0x1a (~0x34) first. CB
+ */
+ const unsigned short addr_list[] = {
+ 0x1a, /* Hauppauge IR external */
+ 0x18, /* Hauppauge IR internal */
+ 0x71, /* Hauppauge IR (PVR150) */
+ 0x64, /* Pixelview IR */
+ 0x30, /* KNC ONE IR */
+ 0x6b, /* Adaptec IR */
+ I2C_CLIENT_END
+ };
+
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
+ i2c_new_probed_device(&itv->i2c_adap, &info, addr_list);
+ }
+
+ return retval;
}
void exit_ivtv_i2c(struct ivtv *itv)
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index c342a9fe983..99f3c39a118 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -709,7 +709,7 @@ static int ivtv_itvc(struct ivtv *itv, unsigned int cmd, void *arg)
else if (itv->has_cx23415 && regs->reg >= IVTV_DECODER_OFFSET &&
regs->reg < IVTV_DECODER_OFFSET + IVTV_DECODER_SIZE)
reg_start = itv->dec_mem - IVTV_DECODER_OFFSET;
- else if (regs->reg >= 0 && regs->reg < IVTV_ENCODER_SIZE)
+ else if (regs->reg < IVTV_ENCODER_SIZE)
reg_start = itv->enc_mem;
else
return -EINVAL;
diff --git a/drivers/media/video/ivtv/ivtv-queue.c b/drivers/media/video/ivtv/ivtv-queue.c
index ff7b7deded4..7fde36e6d22 100644
--- a/drivers/media/video/ivtv/ivtv-queue.c
+++ b/drivers/media/video/ivtv/ivtv-queue.c
@@ -230,7 +230,8 @@ int ivtv_stream_alloc(struct ivtv_stream *s)
return -ENOMEM;
}
if (ivtv_might_use_dma(s)) {
- s->sg_handle = pci_map_single(itv->pdev, s->sg_dma, sizeof(struct ivtv_sg_element), s->dma);
+ s->sg_handle = pci_map_single(itv->pdev, s->sg_dma,
+ sizeof(struct ivtv_sg_element), PCI_DMA_TODEVICE);
ivtv_stream_sync_for_cpu(s);
}
diff --git a/drivers/media/video/mt9m001.c b/drivers/media/video/mt9m001.c
index 684f62fa789..459c04cbf69 100644
--- a/drivers/media/video/mt9m001.c
+++ b/drivers/media/video/mt9m001.c
@@ -75,53 +75,50 @@ struct mt9m001 {
unsigned char autoexposure;
};
-static int reg_read(struct soc_camera_device *icd, const u8 reg)
+static int reg_read(struct i2c_client *client, const u8 reg)
{
- struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd);
- struct i2c_client *client = mt9m001->client;
s32 data = i2c_smbus_read_word_data(client, reg);
return data < 0 ? data : swab16(data);
}
-static int reg_write(struct soc_camera_device *icd, const u8 reg,
+static int reg_write(struct i2c_client *client, const u8 reg,
const u16 data)
{
- struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd);
- return i2c_smbus_write_word_data(mt9m001->client, reg, swab16(data));
+ return i2c_smbus_write_word_data(client, reg, swab16(data));
}
-static int reg_set(struct soc_camera_device *icd, const u8 reg,
+static int reg_set(struct i2c_client *client, const u8 reg,
const u16 data)
{
int ret;
- ret = reg_read(icd, reg);
+ ret = reg_read(client, reg);
if (ret < 0)
return ret;
- return reg_write(icd, reg, ret | data);
+ return reg_write(client, reg, ret | data);
}
-static int reg_clear(struct soc_camera_device *icd, const u8 reg,
+static int reg_clear(struct i2c_client *client, const u8 reg,
const u16 data)
{
int ret;
- ret = reg_read(icd, reg);
+ ret = reg_read(client, reg);
if (ret < 0)
return ret;
- return reg_write(icd, reg, ret & ~data);
+ return reg_write(client, reg, ret & ~data);
}
static int mt9m001_init(struct soc_camera_device *icd)
{
- struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd);
- struct soc_camera_link *icl = mt9m001->client->dev.platform_data;
+ struct i2c_client *client = to_i2c_client(icd->control);
+ struct soc_camera_link *icl = client->dev.platform_data;
int ret;
dev_dbg(icd->vdev->parent, "%s\n", __func__);
if (icl->power) {
- ret = icl->power(&mt9m001->client->dev, 1);
+ ret = icl->power(&client->dev, 1);
if (ret < 0) {
dev_err(icd->vdev->parent,
"Platform failed to power-on the camera.\n");
@@ -131,49 +128,53 @@ static int mt9m001_init(struct soc_camera_device *icd)
/* The camera could have been already on, we reset it additionally */
if (icl->reset)
- ret = icl->reset(&mt9m001->client->dev);
+ ret = icl->reset(&client->dev);
else
ret = -ENODEV;
if (ret < 0) {
/* Either no platform reset, or platform reset failed */
- ret = reg_write(icd, MT9M001_RESET, 1);
+ ret = reg_write(client, MT9M001_RESET, 1);
if (!ret)
- ret = reg_write(icd, MT9M001_RESET, 0);
+ ret = reg_write(client, MT9M001_RESET, 0);
}
/* Disable chip, synchronous option update */
if (!ret)
- ret = reg_write(icd, MT9M001_OUTPUT_CONTROL, 0);
+ ret = reg_write(client, MT9M001_OUTPUT_CONTROL, 0);
return ret;
}
static int mt9m001_release(struct soc_camera_device *icd)
{
- struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd);
- struct soc_camera_link *icl = mt9m001->client->dev.platform_data;
+ struct i2c_client *client = to_i2c_client(icd->control);
+ struct soc_camera_link *icl = client->dev.platform_data;
/* Disable the chip */
- reg_write(icd, MT9M001_OUTPUT_CONTROL, 0);
+ reg_write(client, MT9M001_OUTPUT_CONTROL, 0);
if (icl->power)
- icl->power(&mt9m001->client->dev, 0);
+ icl->power(&client->dev, 0);
return 0;
}
static int mt9m001_start_capture(struct soc_camera_device *icd)
{
+ struct i2c_client *client = to_i2c_client(icd->control);
+
/* Switch to master "normal" mode */
- if (reg_write(icd, MT9M001_OUTPUT_CONTROL, 2) < 0)
+ if (reg_write(client, MT9M001_OUTPUT_CONTROL, 2) < 0)
return -EIO;
return 0;
}
static int mt9m001_stop_capture(struct soc_camera_device *icd)
{
+ struct i2c_client *client = to_i2c_client(icd->control);
+
/* Stop sensor readout */
- if (reg_write(icd, MT9M001_OUTPUT_CONTROL, 0) < 0)
+ if (reg_write(client, MT9M001_OUTPUT_CONTROL, 0) < 0)
return -EIO;
return 0;
}
@@ -222,28 +223,29 @@ static unsigned long mt9m001_query_bus_param(struct soc_camera_device *icd)
static int mt9m001_set_crop(struct soc_camera_device *icd,
struct v4l2_rect *rect)
{
+ struct i2c_client *client = to_i2c_client(icd->control);
struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd);
int ret;
const u16 hblank = 9, vblank = 25;
/* Blanking and start values - default... */
- ret = reg_write(icd, MT9M001_HORIZONTAL_BLANKING, hblank);
+ ret = reg_write(client, MT9M001_HORIZONTAL_BLANKING, hblank);
if (!ret)
- ret = reg_write(icd, MT9M001_VERTICAL_BLANKING, vblank);
+ ret = reg_write(client, MT9M001_VERTICAL_BLANKING, vblank);
/* The caller provides a supported format, as verified per
* call to icd->try_fmt() */
if (!ret)
- ret = reg_write(icd, MT9M001_COLUMN_START, rect->left);
+ ret = reg_write(client, MT9M001_COLUMN_START, rect->left);
if (!ret)
- ret = reg_write(icd, MT9M001_ROW_START, rect->top);
+ ret = reg_write(client, MT9M001_ROW_START, rect->top);
if (!ret)
- ret = reg_write(icd, MT9M001_WINDOW_WIDTH, rect->width - 1);
+ ret = reg_write(client, MT9M001_WINDOW_WIDTH, rect->width - 1);
if (!ret)
- ret = reg_write(icd, MT9M001_WINDOW_HEIGHT,
+ ret = reg_write(client, MT9M001_WINDOW_HEIGHT,
rect->height + icd->y_skip_top - 1);
if (!ret && mt9m001->autoexposure) {
- ret = reg_write(icd, MT9M001_SHUTTER_WIDTH,
+ ret = reg_write(client, MT9M001_SHUTTER_WIDTH,
rect->height + icd->y_skip_top + vblank);
if (!ret) {
const struct v4l2_queryctrl *qctrl =
@@ -312,16 +314,16 @@ static int mt9m001_get_chip_id(struct soc_camera_device *icd,
static int mt9m001_get_register(struct soc_camera_device *icd,
struct v4l2_dbg_register *reg)
{
- struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd);
+ struct i2c_client *client = to_i2c_client(icd->control);
if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff)
return -EINVAL;
- if (reg->match.addr != mt9m001->client->addr)
+ if (reg->match.addr != client->addr)
return -ENODEV;
reg->size = 2;
- reg->val = reg_read(icd, reg->reg);
+ reg->val = reg_read(client, reg->reg);
if (reg->val > 0xffff)
return -EIO;
@@ -332,15 +334,15 @@ static int mt9m001_get_register(struct soc_camera_device *icd,
static int mt9m001_set_register(struct soc_camera_device *icd,
struct v4l2_dbg_register *reg)
{
- struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd);
+ struct i2c_client *client = to_i2c_client(icd->control);
if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff)
return -EINVAL;
- if (reg->match.addr != mt9m001->client->addr)
+ if (reg->match.addr != client->addr)
return -ENODEV;
- if (reg_write(icd, reg->reg, reg->val) < 0)
+ if (reg_write(client, reg->reg, reg->val) < 0)
return -EIO;
return 0;
@@ -416,12 +418,13 @@ static struct soc_camera_ops mt9m001_ops = {
static int mt9m001_get_control(struct soc_camera_device *icd, struct v4l2_control *ctrl)
{
+ struct i2c_client *client = to_i2c_client(icd->control);
struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd);
int data;
switch (ctrl->id) {
case V4L2_CID_VFLIP:
- data = reg_read(icd, MT9M001_READ_OPTIONS2);
+ data = reg_read(client, MT9M001_READ_OPTIONS2);
if (data < 0)
return -EIO;
ctrl->value = !!(data & 0x8000);
@@ -435,6 +438,7 @@ static int mt9m001_get_control(struct soc_camera_device *icd, struct v4l2_contro
static int mt9m001_set_control(struct soc_camera_device *icd, struct v4l2_control *ctrl)
{
+ struct i2c_client *client = to_i2c_client(icd->control);
struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd);
const struct v4l2_queryctrl *qctrl;
int data;
@@ -447,9 +451,9 @@ static int mt9m001_set_control(struct soc_camera_device *icd, struct v4l2_contro
switch (ctrl->id) {
case V4L2_CID_VFLIP:
if (ctrl->value)
- data = reg_set(icd, MT9M001_READ_OPTIONS2, 0x8000);
+ data = reg_set(client, MT9M001_READ_OPTIONS2, 0x8000);
else
- data = reg_clear(icd, MT9M001_READ_OPTIONS2, 0x8000);
+ data = reg_clear(client, MT9M001_READ_OPTIONS2, 0x8000);
if (data < 0)
return -EIO;
break;
@@ -463,7 +467,7 @@ static int mt9m001_set_control(struct soc_camera_device *icd, struct v4l2_contro
data = ((ctrl->value - qctrl->minimum) * 8 + range / 2) / range;
dev_dbg(&icd->dev, "Setting gain %d\n", data);
- data = reg_write(icd, MT9M001_GLOBAL_GAIN, data);
+ data = reg_write(client, MT9M001_GLOBAL_GAIN, data);
if (data < 0)
return -EIO;
} else {
@@ -481,8 +485,8 @@ static int mt9m001_set_control(struct soc_camera_device *icd, struct v4l2_contro
data = ((gain - 64) * 7 + 28) / 56 + 96;
dev_dbg(&icd->dev, "Setting gain from %d to %d\n",
- reg_read(icd, MT9M001_GLOBAL_GAIN), data);
- data = reg_write(icd, MT9M001_GLOBAL_GAIN, data);
+ reg_read(client, MT9M001_GLOBAL_GAIN), data);
+ data = reg_write(client, MT9M001_GLOBAL_GAIN, data);
if (data < 0)
return -EIO;
}
@@ -500,8 +504,8 @@ static int mt9m001_set_control(struct soc_camera_device *icd, struct v4l2_contro
range / 2) / range + 1;
dev_dbg(&icd->dev, "Setting shutter width from %d to %lu\n",
- reg_read(icd, MT9M001_SHUTTER_WIDTH), shutter);
- if (reg_write(icd, MT9M001_SHUTTER_WIDTH, shutter) < 0)
+ reg_read(client, MT9M001_SHUTTER_WIDTH), shutter);
+ if (reg_write(client, MT9M001_SHUTTER_WIDTH, shutter) < 0)
return -EIO;
icd->exposure = ctrl->value;
mt9m001->autoexposure = 0;
@@ -510,7 +514,7 @@ static int mt9m001_set_control(struct soc_camera_device *icd, struct v4l2_contro
case V4L2_CID_EXPOSURE_AUTO:
if (ctrl->value) {
const u16 vblank = 25;
- if (reg_write(icd, MT9M001_SHUTTER_WIDTH, icd->height +
+ if (reg_write(client, MT9M001_SHUTTER_WIDTH, icd->height +
icd->y_skip_top + vblank) < 0)
return -EIO;
qctrl = soc_camera_find_qctrl(icd->ops, V4L2_CID_EXPOSURE);
@@ -529,8 +533,9 @@ static int mt9m001_set_control(struct soc_camera_device *icd, struct v4l2_contro
* this wasn't our capture interface, so, we wait for the right one */
static int mt9m001_video_probe(struct soc_camera_device *icd)
{
+ struct i2c_client *client = to_i2c_client(icd->control);
struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd);
- struct soc_camera_link *icl = mt9m001->client->dev.platform_data;
+ struct soc_camera_link *icl = client->dev.platform_data;
s32 data;
int ret;
unsigned long flags;
@@ -542,11 +547,11 @@ static int mt9m001_video_probe(struct soc_camera_device *icd)
return -ENODEV;
/* Enable the chip */
- data = reg_write(icd, MT9M001_CHIP_ENABLE, 1);
+ data = reg_write(client, MT9M001_CHIP_ENABLE, 1);
dev_dbg(&icd->dev, "write: %d\n", data);
/* Read out the chip version register */
- data = reg_read(icd, MT9M001_CHIP_VERSION);
+ data = reg_read(client, MT9M001_CHIP_VERSION);
/* must be 0x8411 or 0x8421 for colour sensor and 8431 for bw */
switch (data) {
@@ -604,10 +609,13 @@ ei2c:
static void mt9m001_video_remove(struct soc_camera_device *icd)
{
struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd);
+ struct soc_camera_link *icl = mt9m001->client->dev.platform_data;
dev_dbg(&icd->dev, "Video %x removed: %p, %p\n", mt9m001->client->addr,
icd->dev.parent, icd->vdev);
soc_camera_video_stop(icd);
+ if (icl->free_bus)
+ icl->free_bus(icl);
}
static int mt9m001_probe(struct i2c_client *client,
diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c
index cdd1ddb5138..fc5e2de0376 100644
--- a/drivers/media/video/mt9m111.c
+++ b/drivers/media/video/mt9m111.c
@@ -113,10 +113,10 @@
* mt9m111: Camera control register addresses (0x200..0x2ff not implemented)
*/
-#define reg_read(reg) mt9m111_reg_read(icd, MT9M111_##reg)
-#define reg_write(reg, val) mt9m111_reg_write(icd, MT9M111_##reg, (val))
-#define reg_set(reg, val) mt9m111_reg_set(icd, MT9M111_##reg, (val))
-#define reg_clear(reg, val) mt9m111_reg_clear(icd, MT9M111_##reg, (val))
+#define reg_read(reg) mt9m111_reg_read(client, MT9M111_##reg)
+#define reg_write(reg, val) mt9m111_reg_write(client, MT9M111_##reg, (val))
+#define reg_set(reg, val) mt9m111_reg_set(client, MT9M111_##reg, (val))
+#define reg_clear(reg, val) mt9m111_reg_clear(client, MT9M111_##reg, (val))
#define MT9M111_MIN_DARK_ROWS 8
#define MT9M111_MIN_DARK_COLS 24
@@ -184,58 +184,55 @@ static int reg_page_map_set(struct i2c_client *client, const u16 reg)
return ret;
}
-static int mt9m111_reg_read(struct soc_camera_device *icd, const u16 reg)
+static int mt9m111_reg_read(struct i2c_client *client, const u16 reg)
{
- struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd);
- struct i2c_client *client = mt9m111->client;
int ret;
ret = reg_page_map_set(client, reg);
if (!ret)
ret = swab16(i2c_smbus_read_word_data(client, (reg & 0xff)));
- dev_dbg(&icd->dev, "read reg.%03x -> %04x\n", reg, ret);
+ dev_dbg(&client->dev, "read reg.%03x -> %04x\n", reg, ret);
return ret;
}
-static int mt9m111_reg_write(struct soc_camera_device *icd, const u16 reg,
+static int mt9m111_reg_write(struct i2c_client *client, const u16 reg,
const u16 data)
{
- struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd);
- struct i2c_client *client = mt9m111->client;
int ret;
ret = reg_page_map_set(client, reg);
if (!ret)
- ret = i2c_smbus_write_word_data(mt9m111->client, (reg & 0xff),
+ ret = i2c_smbus_write_word_data(client, (reg & 0xff),
swab16(data));
- dev_dbg(&icd->dev, "write reg.%03x = %04x -> %d\n", reg, data, ret);
+ dev_dbg(&client->dev, "write reg.%03x = %04x -> %d\n", reg, data, ret);
return ret;
}
-static int mt9m111_reg_set(struct soc_camera_device *icd, const u16 reg,
+static int mt9m111_reg_set(struct i2c_client *client, const u16 reg,
const u16 data)
{
int ret;
- ret = mt9m111_reg_read(icd, reg);
+ ret = mt9m111_reg_read(client, reg);
if (ret >= 0)
- ret = mt9m111_reg_write(icd, reg, ret | data);
+ ret = mt9m111_reg_write(client, reg, ret | data);
return ret;
}
-static int mt9m111_reg_clear(struct soc_camera_device *icd, const u16 reg,
+static int mt9m111_reg_clear(struct i2c_client *client, const u16 reg,
const u16 data)
{
int ret;
- ret = mt9m111_reg_read(icd, reg);
- return mt9m111_reg_write(icd, reg, ret & ~data);
+ ret = mt9m111_reg_read(client, reg);
+ return mt9m111_reg_write(client, reg, ret & ~data);
}
static int mt9m111_set_context(struct soc_camera_device *icd,
enum mt9m111_context ctxt)
{
+ struct i2c_client *client = to_i2c_client(icd->control);
int valB = MT9M111_CTXT_CTRL_RESTART | MT9M111_CTXT_CTRL_DEFECTCOR_B
| MT9M111_CTXT_CTRL_RESIZE_B | MT9M111_CTXT_CTRL_CTRL2_B
| MT9M111_CTXT_CTRL_GAMMA_B | MT9M111_CTXT_CTRL_READ_MODE_B
@@ -252,6 +249,7 @@ static int mt9m111_set_context(struct soc_camera_device *icd,
static int mt9m111_setup_rect(struct soc_camera_device *icd,
struct v4l2_rect *rect)
{
+ struct i2c_client *client = to_i2c_client(icd->control);
struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd);
int ret, is_raw_format;
int width = rect->width;
@@ -296,6 +294,7 @@ static int mt9m111_setup_rect(struct soc_camera_device *icd,
static int mt9m111_setup_pixfmt(struct soc_camera_device *icd, u16 outfmt)
{
+ struct i2c_client *client = to_i2c_client(icd->control);
int ret;
ret = reg_write(OUTPUT_FORMAT_CTRL2_A, outfmt);
@@ -357,12 +356,13 @@ static int mt9m111_setfmt_yuv(struct soc_camera_device *icd)
static int mt9m111_enable(struct soc_camera_device *icd)
{
+ struct i2c_client *client = to_i2c_client(icd->control);
struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd);
- struct soc_camera_link *icl = mt9m111->client->dev.platform_data;
+ struct soc_camera_link *icl = client->dev.platform_data;
int ret;
if (icl->power) {
- ret = icl->power(&mt9m111->client->dev, 1);
+ ret = icl->power(&client->dev, 1);
if (ret < 0) {
dev_err(icd->vdev->parent,
"Platform failed to power-on the camera.\n");
@@ -378,8 +378,9 @@ static int mt9m111_enable(struct soc_camera_device *icd)
static int mt9m111_disable(struct soc_camera_device *icd)
{
+ struct i2c_client *client = to_i2c_client(icd->control);
struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd);
- struct soc_camera_link *icl = mt9m111->client->dev.platform_data;
+ struct soc_camera_link *icl = client->dev.platform_data;
int ret;
ret = reg_clear(RESET, MT9M111_RESET_CHIP_ENABLE);
@@ -387,15 +388,15 @@ static int mt9m111_disable(struct soc_camera_device *icd)
mt9m111->powered = 0;
if (icl->power)
- icl->power(&mt9m111->client->dev, 0);
+ icl->power(&client->dev, 0);
return ret;
}
static int mt9m111_reset(struct soc_camera_device *icd)
{
- struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd);
- struct soc_camera_link *icl = mt9m111->client->dev.platform_data;
+ struct i2c_client *client = to_i2c_client(icd->control);
+ struct soc_camera_link *icl = client->dev.platform_data;
int ret;
ret = reg_set(RESET, MT9M111_RESET_RESET_MODE);
@@ -406,7 +407,7 @@ static int mt9m111_reset(struct soc_camera_device *icd)
| MT9M111_RESET_RESET_SOC);
if (icl->reset)
- icl->reset(&mt9m111->client->dev);
+ icl->reset(&client->dev);
return ret;
}
@@ -562,15 +563,14 @@ static int mt9m111_get_register(struct soc_camera_device *icd,
struct v4l2_dbg_register *reg)
{
int val;
-
- struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd);
+ struct i2c_client *client = to_i2c_client(icd->control);
if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0x2ff)
return -EINVAL;
- if (reg->match.addr != mt9m111->client->addr)
+ if (reg->match.addr != client->addr)
return -ENODEV;
- val = mt9m111_reg_read(icd, reg->reg);
+ val = mt9m111_reg_read(client, reg->reg);
reg->size = 2;
reg->val = (u64)val;
@@ -583,15 +583,15 @@ static int mt9m111_get_register(struct soc_camera_device *icd,
static int mt9m111_set_register(struct soc_camera_device *icd,
struct v4l2_dbg_register *reg)
{
- struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd);
+ struct i2c_client *client = to_i2c_client(icd->control);
if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0x2ff)
return -EINVAL;
- if (reg->match.addr != mt9m111->client->addr)
+ if (reg->match.addr != client->addr)
return -ENODEV;
- if (mt9m111_reg_write(icd, reg->reg, reg->val) < 0)
+ if (mt9m111_reg_write(client, reg->reg, reg->val) < 0)
return -EIO;
return 0;
@@ -672,6 +672,7 @@ static struct soc_camera_ops mt9m111_ops = {
static int mt9m111_set_flip(struct soc_camera_device *icd, int flip, int mask)
{
+ struct i2c_client *client = to_i2c_client(icd->control);
struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd);
int ret;
@@ -692,6 +693,7 @@ static int mt9m111_set_flip(struct soc_camera_device *icd, int flip, int mask)
static int mt9m111_get_global_gain(struct soc_camera_device *icd)
{
+ struct i2c_client *client = to_i2c_client(icd->control);
int data;
data = reg_read(GLOBAL_GAIN);
@@ -703,6 +705,7 @@ static int mt9m111_get_global_gain(struct soc_camera_device *icd)
static int mt9m111_set_global_gain(struct soc_camera_device *icd, int gain)
{
+ struct i2c_client *client = to_i2c_client(icd->control);
u16 val;
if (gain > 63 * 2 * 2)
@@ -721,6 +724,7 @@ static int mt9m111_set_global_gain(struct soc_camera_device *icd, int gain)
static int mt9m111_set_autoexposure(struct soc_camera_device *icd, int on)
{
+ struct i2c_client *client = to_i2c_client(icd->control);
struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd);
int ret;
@@ -737,6 +741,7 @@ static int mt9m111_set_autoexposure(struct soc_camera_device *icd, int on)
static int mt9m111_set_autowhitebalance(struct soc_camera_device *icd, int on)
{
+ struct i2c_client *client = to_i2c_client(icd->control);
struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd);
int ret;
@@ -754,6 +759,7 @@ static int mt9m111_set_autowhitebalance(struct soc_camera_device *icd, int on)
static int mt9m111_get_control(struct soc_camera_device *icd,
struct v4l2_control *ctrl)
{
+ struct i2c_client *client = to_i2c_client(icd->control);
struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd);
int data;
@@ -898,6 +904,7 @@ static int mt9m111_release(struct soc_camera_device *icd)
*/
static int mt9m111_video_probe(struct soc_camera_device *icd)
{
+ struct i2c_client *client = to_i2c_client(icd->control);
struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd);
s32 data;
int ret;
diff --git a/drivers/media/video/mt9t031.c b/drivers/media/video/mt9t031.c
index 2b0927bfd21..f72aeb7c4de 100644
--- a/drivers/media/video/mt9t031.c
+++ b/drivers/media/video/mt9t031.c
@@ -76,64 +76,61 @@ struct mt9t031 {
u16 yskip;
};
-static int reg_read(struct soc_camera_device *icd, const u8 reg)
+static int reg_read(struct i2c_client *client, const u8 reg)
{
- struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd);
- struct i2c_client *client = mt9t031->client;
s32 data = i2c_smbus_read_word_data(client, reg);
return data < 0 ? data : swab16(data);
}
-static int reg_write(struct soc_camera_device *icd, const u8 reg,
+static int reg_write(struct i2c_client *client, const u8 reg,
const u16 data)
{
- struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd);
- return i2c_smbus_write_word_data(mt9t031->client, reg, swab16(data));
+ return i2c_smbus_write_word_data(client, reg, swab16(data));
}
-static int reg_set(struct soc_camera_device *icd, const u8 reg,
+static int reg_set(struct i2c_client *client, const u8 reg,
const u16 data)
{
int ret;
- ret = reg_read(icd, reg);
+ ret = reg_read(client, reg);
if (ret < 0)
return ret;
- return reg_write(icd, reg, ret | data);
+ return reg_write(client, reg, ret | data);
}
-static int reg_clear(struct soc_camera_device *icd, const u8 reg,
+static int reg_clear(struct i2c_client *client, const u8 reg,
const u16 data)
{
int ret;
- ret = reg_read(icd, reg);
+ ret = reg_read(client, reg);
if (ret < 0)
return ret;
- return reg_write(icd, reg, ret & ~data);
+ return reg_write(client, reg, ret & ~data);
}
-static int set_shutter(struct soc_camera_device *icd, const u32 data)
+static int set_shutter(struct i2c_client *client, const u32 data)
{
int ret;
- ret = reg_write(icd, MT9T031_SHUTTER_WIDTH_UPPER, data >> 16);
+ ret = reg_write(client, MT9T031_SHUTTER_WIDTH_UPPER, data >> 16);
if (ret >= 0)
- ret = reg_write(icd, MT9T031_SHUTTER_WIDTH, data & 0xffff);
+ ret = reg_write(client, MT9T031_SHUTTER_WIDTH, data & 0xffff);
return ret;
}
-static int get_shutter(struct soc_camera_device *icd, u32 *data)
+static int get_shutter(struct i2c_client *client, u32 *data)
{
int ret;
- ret = reg_read(icd, MT9T031_SHUTTER_WIDTH_UPPER);
+ ret = reg_read(client, MT9T031_SHUTTER_WIDTH_UPPER);
*data = ret << 16;
if (ret >= 0)
- ret = reg_read(icd, MT9T031_SHUTTER_WIDTH);
+ ret = reg_read(client, MT9T031_SHUTTER_WIDTH);
*data |= ret & 0xffff;
return ret < 0 ? ret : 0;
@@ -141,12 +138,12 @@ static int get_shutter(struct soc_camera_device *icd, u32 *data)
static int mt9t031_init(struct soc_camera_device *icd)
{
- struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd);
- struct soc_camera_link *icl = mt9t031->client->dev.platform_data;
+ struct i2c_client *client = to_i2c_client(icd->control);
+ struct soc_camera_link *icl = client->dev.platform_data;
int ret;
if (icl->power) {
- ret = icl->power(&mt9t031->client->dev, 1);
+ ret = icl->power(&client->dev, 1);
if (ret < 0) {
dev_err(icd->vdev->parent,
"Platform failed to power-on the camera.\n");
@@ -155,44 +152,48 @@ static int mt9t031_init(struct soc_camera_device *icd)
}
/* Disable chip output, synchronous option update */
- ret = reg_write(icd, MT9T031_RESET, 1);
+ ret = reg_write(client, MT9T031_RESET, 1);
if (ret >= 0)
- ret = reg_write(icd, MT9T031_RESET, 0);
+ ret = reg_write(client, MT9T031_RESET, 0);
if (ret >= 0)
- ret = reg_clear(icd, MT9T031_OUTPUT_CONTROL, 2);
+ ret = reg_clear(client, MT9T031_OUTPUT_CONTROL, 2);
if (ret < 0 && icl->power)
- icl->power(&mt9t031->client->dev, 0);
+ icl->power(&client->dev, 0);
return ret >= 0 ? 0 : -EIO;
}
static int mt9t031_release(struct soc_camera_device *icd)
{
- struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd);
- struct soc_camera_link *icl = mt9t031->client->dev.platform_data;
+ struct i2c_client *client = to_i2c_client(icd->control);
+ struct soc_camera_link *icl = client->dev.platform_data;
/* Disable the chip */
- reg_clear(icd, MT9T031_OUTPUT_CONTROL, 2);
+ reg_clear(client, MT9T031_OUTPUT_CONTROL, 2);
if (icl->power)
- icl->power(&mt9t031->client->dev, 0);
+ icl->power(&client->dev, 0);
return 0;
}
static int mt9t031_start_capture(struct soc_camera_device *icd)
{
+ struct i2c_client *client = to_i2c_client(icd->control);
+
/* Switch to master "normal" mode */
- if (reg_set(icd, MT9T031_OUTPUT_CONTROL, 2) < 0)
+ if (reg_set(client, MT9T031_OUTPUT_CONTROL, 2) < 0)
return -EIO;
return 0;
}
static int mt9t031_stop_capture(struct soc_camera_device *icd)
{
+ struct i2c_client *client = to_i2c_client(icd->control);
+
/* Stop sensor readout */
- if (reg_clear(icd, MT9T031_OUTPUT_CONTROL, 2) < 0)
+ if (reg_clear(client, MT9T031_OUTPUT_CONTROL, 2) < 0)
return -EIO;
return 0;
}
@@ -200,14 +201,16 @@ static int mt9t031_stop_capture(struct soc_camera_device *icd)
static int mt9t031_set_bus_param(struct soc_camera_device *icd,
unsigned long flags)
{
+ struct i2c_client *client = to_i2c_client(icd->control);
+
/* The caller should have queried our parameters, check anyway */
if (flags & ~MT9T031_BUS_PARAM)
return -EINVAL;
if (flags & SOCAM_PCLK_SAMPLE_FALLING)
- reg_clear(icd, MT9T031_PIXEL_CLOCK_CONTROL, 0x8000);
+ reg_clear(client, MT9T031_PIXEL_CLOCK_CONTROL, 0x8000);
else
- reg_set(icd, MT9T031_PIXEL_CLOCK_CONTROL, 0x8000);
+ reg_set(client, MT9T031_PIXEL_CLOCK_CONTROL, 0x8000);
return 0;
}
@@ -235,6 +238,7 @@ static void recalculate_limits(struct soc_camera_device *icd,
static int mt9t031_set_params(struct soc_camera_device *icd,
struct v4l2_rect *rect, u16 xskip, u16 yskip)
{
+ struct i2c_client *client = to_i2c_client(icd->control);
struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd);
int ret;
u16 xbin, ybin, width, height, left, top;
@@ -277,22 +281,22 @@ static int mt9t031_set_params(struct soc_camera_device *icd,
}
/* Disable register update, reconfigure atomically */
- ret = reg_set(icd, MT9T031_OUTPUT_CONTROL, 1);
+ ret = reg_set(client, MT9T031_OUTPUT_CONTROL, 1);
if (ret < 0)
return ret;
/* Blanking and start values - default... */
- ret = reg_write(icd, MT9T031_HORIZONTAL_BLANKING, hblank);
+ ret = reg_write(client, MT9T031_HORIZONTAL_BLANKING, hblank);
if (ret >= 0)
- ret = reg_write(icd, MT9T031_VERTICAL_BLANKING, vblank);
+ ret = reg_write(client, MT9T031_VERTICAL_BLANKING, vblank);
if (yskip != mt9t031->yskip || xskip != mt9t031->xskip) {
/* Binning, skipping */
if (ret >= 0)
- ret = reg_write(icd, MT9T031_COLUMN_ADDRESS_MODE,
+ ret = reg_write(client, MT9T031_COLUMN_ADDRESS_MODE,
((xbin - 1) << 4) | (xskip - 1));
if (ret >= 0)
- ret = reg_write(icd, MT9T031_ROW_ADDRESS_MODE,
+ ret = reg_write(client, MT9T031_ROW_ADDRESS_MODE,
((ybin - 1) << 4) | (yskip - 1));
}
dev_dbg(&icd->dev, "new physical left %u, top %u\n", left, top);
@@ -300,16 +304,16 @@ static int mt9t031_set_params(struct soc_camera_device *icd,
/* The caller provides a supported format, as guaranteed by
* icd->try_fmt_cap(), soc_camera_s_crop() and soc_camera_cropcap() */
if (ret >= 0)
- ret = reg_write(icd, MT9T031_COLUMN_START, left);
+ ret = reg_write(client, MT9T031_COLUMN_START, left);
if (ret >= 0)
- ret = reg_write(icd, MT9T031_ROW_START, top);
+ ret = reg_write(client, MT9T031_ROW_START, top);
if (ret >= 0)
- ret = reg_write(icd, MT9T031_WINDOW_WIDTH, width - 1);
+ ret = reg_write(client, MT9T031_WINDOW_WIDTH, width - 1);
if (ret >= 0)
- ret = reg_write(icd, MT9T031_WINDOW_HEIGHT,
+ ret = reg_write(client, MT9T031_WINDOW_HEIGHT,
height + icd->y_skip_top - 1);
if (ret >= 0 && mt9t031->autoexposure) {
- ret = set_shutter(icd, height + icd->y_skip_top + vblank);
+ ret = set_shutter(client, height + icd->y_skip_top + vblank);
if (ret >= 0) {
const u32 shutter_max = MT9T031_MAX_HEIGHT + vblank;
const struct v4l2_queryctrl *qctrl =
@@ -324,7 +328,7 @@ static int mt9t031_set_params(struct soc_camera_device *icd,
/* Re-enable register update, commit all changes */
if (ret >= 0)
- ret = reg_clear(icd, MT9T031_OUTPUT_CONTROL, 1);
+ ret = reg_clear(client, MT9T031_OUTPUT_CONTROL, 1);
return ret < 0 ? ret : 0;
}
@@ -417,15 +421,15 @@ static int mt9t031_get_chip_id(struct soc_camera_device *icd,
static int mt9t031_get_register(struct soc_camera_device *icd,
struct v4l2_dbg_register *reg)
{
- struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd);
+ struct i2c_client *client = to_i2c_client(icd->control);
if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff)
return -EINVAL;
- if (reg->match.addr != mt9t031->client->addr)
+ if (reg->match.addr != client->addr)
return -ENODEV;
- reg->val = reg_read(icd, reg->reg);
+ reg->val = reg_read(client, reg->reg);
if (reg->val > 0xffff)
return -EIO;
@@ -436,15 +440,15 @@ static int mt9t031_get_register(struct soc_camera_device *icd,
static int mt9t031_set_register(struct soc_camera_device *icd,
struct v4l2_dbg_register *reg)
{
- struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd);
+ struct i2c_client *client = to_i2c_client(icd->control);
if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff)
return -EINVAL;
- if (reg->match.addr != mt9t031->client->addr)
+ if (reg->match.addr != client->addr)
return -ENODEV;
- if (reg_write(icd, reg->reg, reg->val) < 0)
+ if (reg_write(client, reg->reg, reg->val) < 0)
return -EIO;
return 0;
@@ -528,18 +532,19 @@ static struct soc_camera_ops mt9t031_ops = {
static int mt9t031_get_control(struct soc_camera_device *icd, struct v4l2_control *ctrl)
{
+ struct i2c_client *client = to_i2c_client(icd->control);
struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd);
int data;
switch (ctrl->id) {
case V4L2_CID_VFLIP:
- data = reg_read(icd, MT9T031_READ_MODE_2);
+ data = reg_read(client, MT9T031_READ_MODE_2);
if (data < 0)
return -EIO;
ctrl->value = !!(data & 0x8000);
break;
case V4L2_CID_HFLIP:
- data = reg_read(icd, MT9T031_READ_MODE_2);
+ data = reg_read(client, MT9T031_READ_MODE_2);
if (data < 0)
return -EIO;
ctrl->value = !!(data & 0x4000);
@@ -553,6 +558,7 @@ static int mt9t031_get_control(struct soc_camera_device *icd, struct v4l2_contro
static int mt9t031_set_control(struct soc_camera_device *icd, struct v4l2_control *ctrl)
{
+ struct i2c_client *client = to_i2c_client(icd->control);
struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd);
const struct v4l2_queryctrl *qctrl;
int data;
@@ -565,17 +571,17 @@ static int mt9t031_set_control(struct soc_camera_device *icd, struct v4l2_contro
switch (ctrl->id) {
case V4L2_CID_VFLIP:
if (ctrl->value)
- data = reg_set(icd, MT9T031_READ_MODE_2, 0x8000);
+ data = reg_set(client, MT9T031_READ_MODE_2, 0x8000);
else
- data = reg_clear(icd, MT9T031_READ_MODE_2, 0x8000);
+ data = reg_clear(client, MT9T031_READ_MODE_2, 0x8000);
if (data < 0)
return -EIO;
break;
case V4L2_CID_HFLIP:
if (ctrl->value)
- data = reg_set(icd, MT9T031_READ_MODE_2, 0x4000);
+ data = reg_set(client, MT9T031_READ_MODE_2, 0x4000);
else
- data = reg_clear(icd, MT9T031_READ_MODE_2, 0x4000);
+ data = reg_clear(client, MT9T031_READ_MODE_2, 0x4000);
if (data < 0)
return -EIO;
break;
@@ -589,7 +595,7 @@ static int mt9t031_set_control(struct soc_camera_device *icd, struct v4l2_contro
data = ((ctrl->value - qctrl->minimum) * 8 + range / 2) / range;
dev_dbg(&icd->dev, "Setting gain %d\n", data);
- data = reg_write(icd, MT9T031_GLOBAL_GAIN, data);
+ data = reg_write(client, MT9T031_GLOBAL_GAIN, data);
if (data < 0)
return -EIO;
} else {
@@ -609,8 +615,8 @@ static int mt9t031_set_control(struct soc_camera_device *icd, struct v4l2_contro
data = (((gain - 64 + 7) * 32) & 0xff00) | 0x60;
dev_dbg(&icd->dev, "Setting gain from 0x%x to 0x%x\n",
- reg_read(icd, MT9T031_GLOBAL_GAIN), data);
- data = reg_write(icd, MT9T031_GLOBAL_GAIN, data);
+ reg_read(client, MT9T031_GLOBAL_GAIN), data);
+ data = reg_write(client, MT9T031_GLOBAL_GAIN, data);
if (data < 0)
return -EIO;
}
@@ -628,10 +634,10 @@ static int mt9t031_set_control(struct soc_camera_device *icd, struct v4l2_contro
range / 2) / range + 1;
u32 old;
- get_shutter(icd, &old);
+ get_shutter(client, &old);
dev_dbg(&icd->dev, "Setting shutter width from %u to %u\n",
old, shutter);
- if (set_shutter(icd, shutter) < 0)
+ if (set_shutter(client, shutter) < 0)
return -EIO;
icd->exposure = ctrl->value;
mt9t031->autoexposure = 0;
@@ -641,7 +647,7 @@ static int mt9t031_set_control(struct soc_camera_device *icd, struct v4l2_contro
if (ctrl->value) {
const u16 vblank = MT9T031_VERTICAL_BLANK;
const u32 shutter_max = MT9T031_MAX_HEIGHT + vblank;
- if (set_shutter(icd, icd->height +
+ if (set_shutter(client, icd->height +
icd->y_skip_top + vblank) < 0)
return -EIO;
qctrl = soc_camera_find_qctrl(icd->ops, V4L2_CID_EXPOSURE);
@@ -661,6 +667,7 @@ static int mt9t031_set_control(struct soc_camera_device *icd, struct v4l2_contro
* this wasn't our capture interface, so, we wait for the right one */
static int mt9t031_video_probe(struct soc_camera_device *icd)
{
+ struct i2c_client *client = to_i2c_client(icd->control);
struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd);
s32 data;
int ret;
@@ -672,11 +679,11 @@ static int mt9t031_video_probe(struct soc_camera_device *icd)
return -ENODEV;
/* Enable the chip */
- data = reg_write(icd, MT9T031_CHIP_ENABLE, 1);
+ data = reg_write(client, MT9T031_CHIP_ENABLE, 1);
dev_dbg(&icd->dev, "write: %d\n", data);
/* Read out the chip version register */
- data = reg_read(icd, MT9T031_CHIP_VERSION);
+ data = reg_read(client, MT9T031_CHIP_VERSION);
switch (data) {
case 0x1621:
diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c
index 4d3b4813c32..be20d312b1d 100644
--- a/drivers/media/video/mt9v022.c
+++ b/drivers/media/video/mt9v022.c
@@ -91,51 +91,49 @@ struct mt9v022 {
u16 chip_control;
};
-static int reg_read(struct soc_camera_device *icd, const u8 reg)
+static int reg_read(struct i2c_client *client, const u8 reg)
{
- struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd);
- struct i2c_client *client = mt9v022->client;
s32 data = i2c_smbus_read_word_data(client, reg);
return data < 0 ? data : swab16(data);
}
-static int reg_write(struct soc_camera_device *icd, const u8 reg,
+static int reg_write(struct i2c_client *client, const u8 reg,
const u16 data)
{
- struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd);
- return i2c_smbus_write_word_data(mt9v022->client, reg, swab16(data));
+ return i2c_smbus_write_word_data(client, reg, swab16(data));
}
-static int reg_set(struct soc_camera_device *icd, const u8 reg,
+static int reg_set(struct i2c_client *client, const u8 reg,
const u16 data)
{
int ret;
- ret = reg_read(icd, reg);
+ ret = reg_read(client, reg);
if (ret < 0)
return ret;
- return reg_write(icd, reg, ret | data);
+ return reg_write(client, reg, ret | data);
}
-static int reg_clear(struct soc_camera_device *icd, const u8 reg,
+static int reg_clear(struct i2c_client *client, const u8 reg,
const u16 data)
{
int ret;
- ret = reg_read(icd, reg);
+ ret = reg_read(client, reg);
if (ret < 0)
return ret;
- return reg_write(icd, reg, ret & ~data);
+ return reg_write(client, reg, ret & ~data);
}
static int mt9v022_init(struct soc_camera_device *icd)
{
+ struct i2c_client *client = to_i2c_client(icd->control);
struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd);
- struct soc_camera_link *icl = mt9v022->client->dev.platform_data;
+ struct soc_camera_link *icl = client->dev.platform_data;
int ret;
if (icl->power) {
- ret = icl->power(&mt9v022->client->dev, 1);
+ ret = icl->power(&client->dev, 1);
if (ret < 0) {
dev_err(icd->vdev->parent,
"Platform failed to power-on the camera.\n");
@@ -148,27 +146,27 @@ static int mt9v022_init(struct soc_camera_device *icd)
* if available. Soft reset is done in video_probe().
*/
if (icl->reset)
- icl->reset(&mt9v022->client->dev);
+ icl->reset(&client->dev);
/* Almost the default mode: master, parallel, simultaneous, and an
* undocumented bit 0x200, which is present in table 7, but not in 8,
* plus snapshot mode to disable scan for now */
mt9v022->chip_control |= 0x10;
- ret = reg_write(icd, MT9V022_CHIP_CONTROL, mt9v022->chip_control);
+ ret = reg_write(client, MT9V022_CHIP_CONTROL, mt9v022->chip_control);
if (!ret)
- ret = reg_write(icd, MT9V022_READ_MODE, 0x300);
+ ret = reg_write(client, MT9V022_READ_MODE, 0x300);
/* All defaults */
if (!ret)
/* AEC, AGC on */
- ret = reg_set(icd, MT9V022_AEC_AGC_ENABLE, 0x3);
+ ret = reg_set(client, MT9V022_AEC_AGC_ENABLE, 0x3);
if (!ret)
- ret = reg_write(icd, MT9V022_MAX_TOTAL_SHUTTER_WIDTH, 480);
+ ret = reg_write(client, MT9V022_MAX_TOTAL_SHUTTER_WIDTH, 480);
if (!ret)
/* default - auto */
- ret = reg_clear(icd, MT9V022_BLACK_LEVEL_CALIB_CTRL, 1);
+ ret = reg_clear(client, MT9V022_BLACK_LEVEL_CALIB_CTRL, 1);
if (!ret)
- ret = reg_write(icd, MT9V022_DIGITAL_TEST_PATTERN, 0);
+ ret = reg_write(client, MT9V022_DIGITAL_TEST_PATTERN, 0);
return ret;
}
@@ -186,10 +184,11 @@ static int mt9v022_release(struct soc_camera_device *icd)
static int mt9v022_start_capture(struct soc_camera_device *icd)
{
+ struct i2c_client *client = to_i2c_client(icd->control);
struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd);
/* Switch to master "normal" mode */
mt9v022->chip_control &= ~0x10;
- if (reg_write(icd, MT9V022_CHIP_CONTROL,
+ if (reg_write(client, MT9V022_CHIP_CONTROL,
mt9v022->chip_control) < 0)
return -EIO;
return 0;
@@ -197,10 +196,11 @@ static int mt9v022_start_capture(struct soc_camera_device *icd)
static int mt9v022_stop_capture(struct soc_camera_device *icd)
{
+ struct i2c_client *client = to_i2c_client(icd->control);
struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd);
/* Switch to snapshot mode */
mt9v022->chip_control |= 0x10;
- if (reg_write(icd, MT9V022_CHIP_CONTROL,
+ if (reg_write(client, MT9V022_CHIP_CONTROL,
mt9v022->chip_control) < 0)
return -EIO;
return 0;
@@ -209,8 +209,9 @@ static int mt9v022_stop_capture(struct soc_camera_device *icd)
static int mt9v022_set_bus_param(struct soc_camera_device *icd,
unsigned long flags)
{
+ struct i2c_client *client = to_i2c_client(icd->control);
struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd);
- struct soc_camera_link *icl = mt9v022->client->dev.platform_data;
+ struct soc_camera_link *icl = client->dev.platform_data;
unsigned int width_flag = flags & SOCAM_DATAWIDTH_MASK;
int ret;
u16 pixclk = 0;
@@ -243,14 +244,14 @@ static int mt9v022_set_bus_param(struct soc_camera_device *icd,
if (!(flags & SOCAM_VSYNC_ACTIVE_HIGH))
pixclk |= 0x2;
- ret = reg_write(icd, MT9V022_PIXCLK_FV_LV, pixclk);
+ ret = reg_write(client, MT9V022_PIXCLK_FV_LV, pixclk);
if (ret < 0)
return ret;
if (!(flags & SOCAM_MASTER))
mt9v022->chip_control &= ~0x8;
- ret = reg_write(icd, MT9V022_CHIP_CONTROL, mt9v022->chip_control);
+ ret = reg_write(client, MT9V022_CHIP_CONTROL, mt9v022->chip_control);
if (ret < 0)
return ret;
@@ -282,35 +283,36 @@ static unsigned long mt9v022_query_bus_param(struct soc_camera_device *icd)
static int mt9v022_set_crop(struct soc_camera_device *icd,
struct v4l2_rect *rect)
{
+ struct i2c_client *client = to_i2c_client(icd->control);
int ret;
/* Like in example app. Contradicts the datasheet though */
- ret = reg_read(icd, MT9V022_AEC_AGC_ENABLE);
+ ret = reg_read(client, MT9V022_AEC_AGC_ENABLE);
if (ret >= 0) {
if (ret & 1) /* Autoexposure */
- ret = reg_write(icd, MT9V022_MAX_TOTAL_SHUTTER_WIDTH,
+ ret = reg_write(client, MT9V022_MAX_TOTAL_SHUTTER_WIDTH,
rect->height + icd->y_skip_top + 43);
else
- ret = reg_write(icd, MT9V022_TOTAL_SHUTTER_WIDTH,
+ ret = reg_write(client, MT9V022_TOTAL_SHUTTER_WIDTH,
rect->height + icd->y_skip_top + 43);
}
/* Setup frame format: defaults apart from width and height */
if (!ret)
- ret = reg_write(icd, MT9V022_COLUMN_START, rect->left);
+ ret = reg_write(client, MT9V022_COLUMN_START, rect->left);
if (!ret)
- ret = reg_write(icd, MT9V022_ROW_START, rect->top);
+ ret = reg_write(client, MT9V022_ROW_START, rect->top);
if (!ret)
/* Default 94, Phytec driver says:
* "width + horizontal blank >= 660" */
- ret = reg_write(icd, MT9V022_HORIZONTAL_BLANKING,
+ ret = reg_write(client, MT9V022_HORIZONTAL_BLANKING,
rect->width > 660 - 43 ? 43 :
660 - rect->width);
if (!ret)
- ret = reg_write(icd, MT9V022_VERTICAL_BLANKING, 45);
+ ret = reg_write(client, MT9V022_VERTICAL_BLANKING, 45);
if (!ret)
- ret = reg_write(icd, MT9V022_WINDOW_WIDTH, rect->width);
+ ret = reg_write(client, MT9V022_WINDOW_WIDTH, rect->width);
if (!ret)
- ret = reg_write(icd, MT9V022_WINDOW_HEIGHT,
+ ret = reg_write(client, MT9V022_WINDOW_HEIGHT,
rect->height + icd->y_skip_top);
if (ret < 0)
@@ -396,16 +398,16 @@ static int mt9v022_get_chip_id(struct soc_camera_device *icd,
static int mt9v022_get_register(struct soc_camera_device *icd,
struct v4l2_dbg_register *reg)
{
- struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd);
+ struct i2c_client *client = to_i2c_client(icd->control);
if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff)
return -EINVAL;
- if (reg->match.addr != mt9v022->client->addr)
+ if (reg->match.addr != client->addr)
return -ENODEV;
reg->size = 2;
- reg->val = reg_read(icd, reg->reg);
+ reg->val = reg_read(client, reg->reg);
if (reg->val > 0xffff)
return -EIO;
@@ -416,15 +418,15 @@ static int mt9v022_get_register(struct soc_camera_device *icd,
static int mt9v022_set_register(struct soc_camera_device *icd,
struct v4l2_dbg_register *reg)
{
- struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd);
+ struct i2c_client *client = to_i2c_client(icd->control);
if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff)
return -EINVAL;
- if (reg->match.addr != mt9v022->client->addr)
+ if (reg->match.addr != client->addr)
return -ENODEV;
- if (reg_write(icd, reg->reg, reg->val) < 0)
+ if (reg_write(client, reg->reg, reg->val) < 0)
return -EIO;
return 0;
@@ -517,29 +519,30 @@ static struct soc_camera_ops mt9v022_ops = {
static int mt9v022_get_control(struct soc_camera_device *icd,
struct v4l2_control *ctrl)
{
+ struct i2c_client *client = to_i2c_client(icd->control);
int data;
switch (ctrl->id) {
case V4L2_CID_VFLIP:
- data = reg_read(icd, MT9V022_READ_MODE);
+ data = reg_read(client, MT9V022_READ_MODE);
if (data < 0)
return -EIO;
ctrl->value = !!(data & 0x10);
break;
case V4L2_CID_HFLIP:
- data = reg_read(icd, MT9V022_READ_MODE);
+ data = reg_read(client, MT9V022_READ_MODE);
if (data < 0)
return -EIO;
ctrl->value = !!(data & 0x20);
break;
case V4L2_CID_EXPOSURE_AUTO:
- data = reg_read(icd, MT9V022_AEC_AGC_ENABLE);
+ data = reg_read(client, MT9V022_AEC_AGC_ENABLE);
if (data < 0)
return -EIO;
ctrl->value = !!(data & 0x1);
break;
case V4L2_CID_AUTOGAIN:
- data = reg_read(icd, MT9V022_AEC_AGC_ENABLE);
+ data = reg_read(client, MT9V022_AEC_AGC_ENABLE);
if (data < 0)
return -EIO;
ctrl->value = !!(data & 0x2);
@@ -552,6 +555,7 @@ static int mt9v022_set_control(struct soc_camera_device *icd,
struct v4l2_control *ctrl)
{
int data;
+ struct i2c_client *client = to_i2c_client(icd->control);
const struct v4l2_queryctrl *qctrl;
qctrl = soc_camera_find_qctrl(&mt9v022_ops, ctrl->id);
@@ -562,17 +566,17 @@ static int mt9v022_set_control(struct soc_camera_device *icd,
switch (ctrl->id) {
case V4L2_CID_VFLIP:
if (ctrl->value)
- data = reg_set(icd, MT9V022_READ_MODE, 0x10);
+ data = reg_set(client, MT9V022_READ_MODE, 0x10);
else
- data = reg_clear(icd, MT9V022_READ_MODE, 0x10);
+ data = reg_clear(client, MT9V022_READ_MODE, 0x10);
if (data < 0)
return -EIO;
break;
case V4L2_CID_HFLIP:
if (ctrl->value)
- data = reg_set(icd, MT9V022_READ_MODE, 0x20);
+ data = reg_set(client, MT9V022_READ_MODE, 0x20);
else
- data = reg_clear(icd, MT9V022_READ_MODE, 0x20);
+ data = reg_clear(client, MT9V022_READ_MODE, 0x20);
if (data < 0)
return -EIO;
break;
@@ -593,12 +597,12 @@ static int mt9v022_set_control(struct soc_camera_device *icd,
/* The user wants to set gain manually, hope, she
* knows, what she's doing... Switch AGC off. */
- if (reg_clear(icd, MT9V022_AEC_AGC_ENABLE, 0x2) < 0)
+ if (reg_clear(client, MT9V022_AEC_AGC_ENABLE, 0x2) < 0)
return -EIO;
dev_info(&icd->dev, "Setting gain from %d to %lu\n",
- reg_read(icd, MT9V022_ANALOG_GAIN), gain);
- if (reg_write(icd, MT9V022_ANALOG_GAIN, gain) < 0)
+ reg_read(client, MT9V022_ANALOG_GAIN), gain);
+ if (reg_write(client, MT9V022_ANALOG_GAIN, gain) < 0)
return -EIO;
icd->gain = ctrl->value;
}
@@ -614,13 +618,13 @@ static int mt9v022_set_control(struct soc_camera_device *icd,
/* The user wants to set shutter width manually, hope,
* she knows, what she's doing... Switch AEC off. */
- if (reg_clear(icd, MT9V022_AEC_AGC_ENABLE, 0x1) < 0)
+ if (reg_clear(client, MT9V022_AEC_AGC_ENABLE, 0x1) < 0)
return -EIO;
dev_dbg(&icd->dev, "Shutter width from %d to %lu\n",
- reg_read(icd, MT9V022_TOTAL_SHUTTER_WIDTH),
+ reg_read(client, MT9V022_TOTAL_SHUTTER_WIDTH),
shutter);
- if (reg_write(icd, MT9V022_TOTAL_SHUTTER_WIDTH,
+ if (reg_write(client, MT9V022_TOTAL_SHUTTER_WIDTH,
shutter) < 0)
return -EIO;
icd->exposure = ctrl->value;
@@ -628,17 +632,17 @@ static int mt9v022_set_control(struct soc_camera_device *icd,
break;
case V4L2_CID_AUTOGAIN:
if (ctrl->value)
- data = reg_set(icd, MT9V022_AEC_AGC_ENABLE, 0x2);
+ data = reg_set(client, MT9V022_AEC_AGC_ENABLE, 0x2);
else
- data = reg_clear(icd, MT9V022_AEC_AGC_ENABLE, 0x2);
+ data = reg_clear(client, MT9V022_AEC_AGC_ENABLE, 0x2);
if (data < 0)
return -EIO;
break;
case V4L2_CID_EXPOSURE_AUTO:
if (ctrl->value)
- data = reg_set(icd, MT9V022_AEC_AGC_ENABLE, 0x1);
+ data = reg_set(client, MT9V022_AEC_AGC_ENABLE, 0x1);
else
- data = reg_clear(icd, MT9V022_AEC_AGC_ENABLE, 0x1);
+ data = reg_clear(client, MT9V022_AEC_AGC_ENABLE, 0x1);
if (data < 0)
return -EIO;
break;
@@ -650,8 +654,9 @@ static int mt9v022_set_control(struct soc_camera_device *icd,
* this wasn't our capture interface, so, we wait for the right one */
static int mt9v022_video_probe(struct soc_camera_device *icd)
{
+ struct i2c_client *client = to_i2c_client(icd->control);
struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd);
- struct soc_camera_link *icl = mt9v022->client->dev.platform_data;
+ struct soc_camera_link *icl = client->dev.platform_data;
s32 data;
int ret;
unsigned long flags;
@@ -661,7 +666,7 @@ static int mt9v022_video_probe(struct soc_camera_device *icd)
return -ENODEV;
/* Read out the chip version register */
- data = reg_read(icd, MT9V022_CHIP_VERSION);
+ data = reg_read(client, MT9V022_CHIP_VERSION);
/* must be 0x1311 or 0x1313 */
if (data != 0x1311 && data != 0x1313) {
@@ -672,12 +677,12 @@ static int mt9v022_video_probe(struct soc_camera_device *icd)
}
/* Soft reset */
- ret = reg_write(icd, MT9V022_RESET, 1);
+ ret = reg_write(client, MT9V022_RESET, 1);
if (ret < 0)
goto ei2c;
/* 15 clock cycles */
udelay(200);
- if (reg_read(icd, MT9V022_RESET)) {
+ if (reg_read(client, MT9V022_RESET)) {
dev_err(&icd->dev, "Resetting MT9V022 failed!\n");
goto ei2c;
}
@@ -685,11 +690,11 @@ static int mt9v022_video_probe(struct soc_camera_device *icd)
/* Set monochrome or colour sensor type */
if (sensor_type && (!strcmp("colour", sensor_type) ||
!strcmp("color", sensor_type))) {
- ret = reg_write(icd, MT9V022_PIXEL_OPERATION_MODE, 4 | 0x11);
+ ret = reg_write(client, MT9V022_PIXEL_OPERATION_MODE, 4 | 0x11);
mt9v022->model = V4L2_IDENT_MT9V022IX7ATC;
icd->formats = mt9v022_colour_formats;
} else {
- ret = reg_write(icd, MT9V022_PIXEL_OPERATION_MODE, 0x11);
+ ret = reg_write(client, MT9V022_PIXEL_OPERATION_MODE, 0x11);
mt9v022->model = V4L2_IDENT_MT9V022IX7ATM;
icd->formats = mt9v022_monochrome_formats;
}
@@ -735,10 +740,13 @@ ei2c:
static void mt9v022_video_remove(struct soc_camera_device *icd)
{
struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd);
+ struct soc_camera_link *icl = mt9v022->client->dev.platform_data;
dev_dbg(&icd->dev, "Video %x removed: %p, %p\n", mt9v022->client->addr,
icd->dev.parent, icd->vdev);
soc_camera_video_stop(icd);
+ if (icl->free_bus)
+ icl->free_bus(icl);
}
static int mt9v022_probe(struct i2c_client *client,
diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c
index 86fab56c5a2..2d075205bdf 100644
--- a/drivers/media/video/mx1_camera.c
+++ b/drivers/media/video/mx1_camera.c
@@ -102,10 +102,10 @@ struct mx1_buffer {
* Interface. If anyone ever builds hardware to enable more than
* one camera, they will have to modify this driver too */
struct mx1_camera_dev {
+ struct soc_camera_host soc_host;
struct soc_camera_device *icd;
struct mx1_camera_pdata *pdata;
struct mx1_buffer *active;
- struct device *dev;
struct resource *res;
struct clk *clk;
struct list_head capture;
@@ -219,7 +219,7 @@ static int mx1_camera_setup_dma(struct mx1_camera_dev *pcdev)
int ret;
if (unlikely(!pcdev->active)) {
- dev_err(pcdev->dev, "DMA End IRQ with no active buffer\n");
+ dev_err(pcdev->soc_host.dev, "DMA End IRQ with no active buffer\n");
return -EFAULT;
}
@@ -229,7 +229,7 @@ static int mx1_camera_setup_dma(struct mx1_camera_dev *pcdev)
vbuf->size, pcdev->res->start +
CSIRXR, DMA_MODE_READ);
if (unlikely(ret))
- dev_err(pcdev->dev, "Failed to setup DMA sg list\n");
+ dev_err(pcdev->soc_host.dev, "Failed to setup DMA sg list\n");
return ret;
}
@@ -338,14 +338,14 @@ static void mx1_camera_dma_irq(int channel, void *data)
imx_dma_disable(channel);
if (unlikely(!pcdev->active)) {
- dev_err(pcdev->dev, "DMA End IRQ with no active buffer\n");
+ dev_err(pcdev->soc_host.dev, "DMA End IRQ with no active buffer\n");
goto out;
}
vb = &pcdev->active->vb;
buf = container_of(vb, struct mx1_buffer, vb);
WARN_ON(buf->inwork || list_empty(&vb->queue));
- dev_dbg(pcdev->dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
+ dev_dbg(pcdev->soc_host.dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
vb, vb->baddr, vb->bsize);
mx1_camera_wakeup(pcdev, vb, buf);
@@ -366,7 +366,7 @@ static void mx1_camera_init_videobuf(struct videobuf_queue *q,
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct mx1_camera_dev *pcdev = ici->priv;
- videobuf_queue_dma_contig_init(q, &mx1_videobuf_ops, pcdev->dev,
+ videobuf_queue_dma_contig_init(q, &mx1_videobuf_ops, ici->dev,
&pcdev->lock,
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_NONE,
@@ -385,7 +385,7 @@ static int mclk_get_divisor(struct mx1_camera_dev *pcdev)
* they get a nice Oops */
div = (lcdclk + 2 * mclk - 1) / (2 * mclk) - 1;
- dev_dbg(pcdev->dev, "System clock %lukHz, target freq %dkHz, "
+ dev_dbg(pcdev->soc_host.dev, "System clock %lukHz, target freq %dkHz, "
"divisor %lu\n", lcdclk / 1000, mclk / 1000, div);
return div;
@@ -395,7 +395,7 @@ static void mx1_camera_activate(struct mx1_camera_dev *pcdev)
{
unsigned int csicr1 = CSICR1_EN;
- dev_dbg(pcdev->dev, "Activate device\n");
+ dev_dbg(pcdev->soc_host.dev, "Activate device\n");
clk_enable(pcdev->clk);
@@ -411,7 +411,7 @@ static void mx1_camera_activate(struct mx1_camera_dev *pcdev)
static void mx1_camera_deactivate(struct mx1_camera_dev *pcdev)
{
- dev_dbg(pcdev->dev, "Deactivate device\n");
+ dev_dbg(pcdev->soc_host.dev, "Deactivate device\n");
/* Disable all CSI interface */
__raw_writel(0x00, pcdev->base + CSICR1);
@@ -550,7 +550,7 @@ static int mx1_camera_set_fmt(struct soc_camera_device *icd,
xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
if (!xlate) {
- dev_warn(&ici->dev, "Format %x not found\n", pix->pixelformat);
+ dev_warn(ici->dev, "Format %x not found\n", pix->pixelformat);
return -EINVAL;
}
@@ -633,12 +633,6 @@ static struct soc_camera_host_ops mx1_soc_camera_host_ops = {
.querycap = mx1_camera_querycap,
};
-/* Should be allocated dynamically too, but we have only one. */
-static struct soc_camera_host mx1_soc_camera_host = {
- .drv_name = DRIVER_NAME,
- .ops = &mx1_soc_camera_host_ops,
-};
-
static struct fiq_handler fh = {
.name = "csi_sof"
};
@@ -673,7 +667,6 @@ static int __init mx1_camera_probe(struct platform_device *pdev)
goto exit_put_clk;
}
- dev_set_drvdata(&pdev->dev, pcdev);
pcdev->res = res;
pcdev->clk = clk;
@@ -707,16 +700,15 @@ static int __init mx1_camera_probe(struct platform_device *pdev)
}
pcdev->irq = irq;
pcdev->base = base;
- pcdev->dev = &pdev->dev;
/* request dma */
pcdev->dma_chan = imx_dma_request_by_prio(DRIVER_NAME, DMA_PRIO_HIGH);
if (pcdev->dma_chan < 0) {
- dev_err(pcdev->dev, "Can't request DMA for MX1 CSI\n");
+ dev_err(&pdev->dev, "Can't request DMA for MX1 CSI\n");
err = -EBUSY;
goto exit_iounmap;
}
- dev_dbg(pcdev->dev, "got DMA channel %d\n", pcdev->dma_chan);
+ dev_dbg(&pdev->dev, "got DMA channel %d\n", pcdev->dma_chan);
imx_dma_setup_handlers(pcdev->dma_chan, mx1_camera_dma_irq, NULL,
pcdev);
@@ -729,7 +721,7 @@ static int __init mx1_camera_probe(struct platform_device *pdev)
/* request irq */
err = claim_fiq(&fh);
if (err) {
- dev_err(pcdev->dev, "Camera interrupt register failed \n");
+ dev_err(&pdev->dev, "Camera interrupt register failed \n");
goto exit_free_dma;
}
@@ -746,10 +738,12 @@ static int __init mx1_camera_probe(struct platform_device *pdev)
mxc_set_irq_fiq(irq, 1);
enable_fiq(irq);
- mx1_soc_camera_host.priv = pcdev;
- mx1_soc_camera_host.dev.parent = &pdev->dev;
- mx1_soc_camera_host.nr = pdev->id;
- err = soc_camera_host_register(&mx1_soc_camera_host);
+ pcdev->soc_host.drv_name = DRIVER_NAME;
+ pcdev->soc_host.ops = &mx1_soc_camera_host_ops;
+ pcdev->soc_host.priv = pcdev;
+ pcdev->soc_host.dev = &pdev->dev;
+ pcdev->soc_host.nr = pdev->id;
+ err = soc_camera_host_register(&pcdev->soc_host);
if (err)
goto exit_free_irq;
@@ -777,7 +771,9 @@ exit:
static int __exit mx1_camera_remove(struct platform_device *pdev)
{
- struct mx1_camera_dev *pcdev = platform_get_drvdata(pdev);
+ struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
+ struct mx1_camera_dev *pcdev = container_of(soc_host,
+ struct mx1_camera_dev, soc_host);
struct resource *res;
imx_dma_free(pcdev->dma_chan);
@@ -787,7 +783,7 @@ static int __exit mx1_camera_remove(struct platform_device *pdev)
clk_put(pcdev->clk);
- soc_camera_host_unregister(&mx1_soc_camera_host);
+ soc_camera_host_unregister(soc_host);
iounmap(pcdev->base);
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c
index 2d0781118eb..e605c076ed8 100644
--- a/drivers/media/video/mx3_camera.c
+++ b/drivers/media/video/mx3_camera.c
@@ -87,7 +87,6 @@ struct mx3_camera_buffer {
* @soc_host: embedded soc_host object
*/
struct mx3_camera_dev {
- struct device *dev;
/*
* i.MX3x is only supposed to handle one camera on its Camera Sensor
* Interface. If anyone ever builds hardware to enable more than one
@@ -431,7 +430,7 @@ static void mx3_camera_init_videobuf(struct videobuf_queue *q,
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
- videobuf_queue_dma_contig_init(q, &mx3_videobuf_ops, mx3_cam->dev,
+ videobuf_queue_dma_contig_init(q, &mx3_videobuf_ops, ici->dev,
&mx3_cam->lock,
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_NONE,
@@ -599,7 +598,8 @@ static int test_platform_param(struct mx3_camera_dev *mx3_cam,
*flags |= SOCAM_DATAWIDTH_4;
break;
default:
- dev_info(mx3_cam->dev, "Unsupported bus width %d\n", buswidth);
+ dev_info(mx3_cam->soc_host.dev, "Unsupported bus width %d\n",
+ buswidth);
return -EINVAL;
}
@@ -614,7 +614,7 @@ static int mx3_camera_try_bus_param(struct soc_camera_device *icd,
unsigned long bus_flags, camera_flags;
int ret = test_platform_param(mx3_cam, depth, &bus_flags);
- dev_dbg(&ici->dev, "requested bus width %d bit: %d\n", depth, ret);
+ dev_dbg(ici->dev, "requested bus width %d bit: %d\n", depth, ret);
if (ret < 0)
return ret;
@@ -637,7 +637,7 @@ static bool chan_filter(struct dma_chan *chan, void *arg)
if (!rq)
return false;
- pdata = rq->mx3_cam->dev->platform_data;
+ pdata = rq->mx3_cam->soc_host.dev->platform_data;
return rq->id == chan->chan_id &&
pdata->dma_dev == chan->device->dev;
@@ -697,7 +697,7 @@ static int mx3_camera_get_formats(struct soc_camera_device *icd, int idx,
xlate->cam_fmt = icd->formats + idx;
xlate->buswidth = buswidth;
xlate++;
- dev_dbg(&ici->dev, "Providing format %s using %s\n",
+ dev_dbg(ici->dev, "Providing format %s using %s\n",
mx3_camera_formats[0].name,
icd->formats[idx].name);
}
@@ -709,7 +709,7 @@ static int mx3_camera_get_formats(struct soc_camera_device *icd, int idx,
xlate->cam_fmt = icd->formats + idx;
xlate->buswidth = buswidth;
xlate++;
- dev_dbg(&ici->dev, "Providing format %s using %s\n",
+ dev_dbg(ici->dev, "Providing format %s using %s\n",
mx3_camera_formats[0].name,
icd->formats[idx].name);
}
@@ -722,7 +722,7 @@ passthrough:
xlate->cam_fmt = icd->formats + idx;
xlate->buswidth = buswidth;
xlate++;
- dev_dbg(&ici->dev,
+ dev_dbg(ici->dev,
"Providing format %s in pass-through mode\n",
icd->formats[idx].name);
}
@@ -829,7 +829,7 @@ static int mx3_camera_set_fmt(struct soc_camera_device *icd,
xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
if (!xlate) {
- dev_warn(&ici->dev, "Format %x not found\n", pix->pixelformat);
+ dev_warn(ici->dev, "Format %x not found\n", pix->pixelformat);
return -EINVAL;
}
@@ -866,7 +866,7 @@ static int mx3_camera_try_fmt(struct soc_camera_device *icd,
xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
if (pixfmt && !xlate) {
- dev_warn(&ici->dev, "Format %x not found\n", pixfmt);
+ dev_warn(ici->dev, "Format %x not found\n", pixfmt);
return -EINVAL;
}
@@ -933,11 +933,11 @@ static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
if (!xlate) {
- dev_warn(&ici->dev, "Format %x not found\n", pixfmt);
+ dev_warn(ici->dev, "Format %x not found\n", pixfmt);
return -EINVAL;
}
- dev_dbg(&ici->dev, "requested bus width %d bit: %d\n",
+ dev_dbg(ici->dev, "requested bus width %d bit: %d\n",
icd->buswidth, ret);
if (ret < 0)
@@ -947,7 +947,7 @@ static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
common_flags = soc_camera_bus_param_compatible(camera_flags, bus_flags);
if (!common_flags) {
- dev_dbg(&ici->dev, "no common flags: camera %lx, host %lx\n",
+ dev_dbg(ici->dev, "no common flags: camera %lx, host %lx\n",
camera_flags, bus_flags);
return -EINVAL;
}
@@ -1054,7 +1054,7 @@ static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
csi_reg_write(mx3_cam, sens_conf | dw, CSI_SENS_CONF);
- dev_dbg(&ici->dev, "Set SENS_CONF to %x\n", sens_conf | dw);
+ dev_dbg(ici->dev, "Set SENS_CONF to %x\n", sens_conf | dw);
return 0;
}
@@ -1074,7 +1074,7 @@ static struct soc_camera_host_ops mx3_soc_camera_host_ops = {
.set_bus_param = mx3_camera_set_bus_param,
};
-static int mx3_camera_probe(struct platform_device *pdev)
+static int __devinit mx3_camera_probe(struct platform_device *pdev)
{
struct mx3_camera_dev *mx3_cam;
struct resource *res;
@@ -1102,8 +1102,6 @@ static int mx3_camera_probe(struct platform_device *pdev)
goto eclkget;
}
- dev_set_drvdata(&pdev->dev, mx3_cam);
-
mx3_cam->pdata = pdev->dev.platform_data;
mx3_cam->platform_flags = mx3_cam->pdata->flags;
if (!(mx3_cam->platform_flags & (MX3_CAMERA_DATAWIDTH_4 |
@@ -1135,14 +1133,14 @@ static int mx3_camera_probe(struct platform_device *pdev)
}
mx3_cam->base = base;
- mx3_cam->dev = &pdev->dev;
soc_host = &mx3_cam->soc_host;
soc_host->drv_name = MX3_CAM_DRV_NAME;
soc_host->ops = &mx3_soc_camera_host_ops;
soc_host->priv = mx3_cam;
- soc_host->dev.parent = &pdev->dev;
+ soc_host->dev = &pdev->dev;
soc_host->nr = pdev->id;
+
err = soc_camera_host_register(soc_host);
if (err)
goto ecamhostreg;
@@ -1165,11 +1163,13 @@ egetres:
static int __devexit mx3_camera_remove(struct platform_device *pdev)
{
- struct mx3_camera_dev *mx3_cam = platform_get_drvdata(pdev);
+ struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
+ struct mx3_camera_dev *mx3_cam = container_of(soc_host,
+ struct mx3_camera_dev, soc_host);
clk_put(mx3_cam->clk);
- soc_camera_host_unregister(&mx3_cam->soc_host);
+ soc_camera_host_unregister(soc_host);
iounmap(mx3_cam->base);
@@ -1194,11 +1194,11 @@ static struct platform_driver mx3_camera_driver = {
.name = MX3_CAM_DRV_NAME,
},
.probe = mx3_camera_probe,
- .remove = __exit_p(mx3_camera_remove),
+ .remove = __devexit_p(mx3_camera_remove),
};
-static int __devinit mx3_camera_init(void)
+static int __init mx3_camera_init(void)
{
return platform_driver_register(&mx3_camera_driver);
}
diff --git a/drivers/media/video/mxb.c b/drivers/media/video/mxb.c
index 3be5a71bdac..35890e8b243 100644
--- a/drivers/media/video/mxb.c
+++ b/drivers/media/video/mxb.c
@@ -453,7 +453,7 @@ static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *vc)
static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i)
{
DEB_EE(("VIDIOC_ENUMINPUT %d.\n", i->index));
- if (i->index < 0 || i->index >= MXB_INPUTS)
+ if (i->index >= MXB_INPUTS)
return -EINVAL;
memcpy(i, &mxb_inputs[i->index], sizeof(struct v4l2_input));
return 0;
@@ -616,7 +616,7 @@ static int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *a)
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
struct mxb *mxb = (struct mxb *)dev->ext_priv;
- if (a->index < 0 || a->index > MXB_INPUTS) {
+ if (a->index > MXB_INPUTS) {
DEB_D(("VIDIOC_G_AUDIO %d out of range.\n", a->index));
return -EINVAL;
}
diff --git a/drivers/media/video/ov511.c b/drivers/media/video/ov511.c
index 9af5532db14..08cfd3e4ae8 100644
--- a/drivers/media/video/ov511.c
+++ b/drivers/media/video/ov511.c
@@ -112,6 +112,8 @@ static int framedrop = -1;
static int fastset;
static int force_palette;
static int backlight;
+/* Bitmask marking allocated devices from 0 to OV511_MAX_UNIT_VIDEO */
+static unsigned long ov511_devused;
static int unit_video[OV511_MAX_UNIT_VIDEO];
static int remove_zeros;
static int mirror;
@@ -5720,7 +5722,7 @@ ov51x_probe(struct usb_interface *intf, const struct usb_device_id *id)
struct usb_device *dev = interface_to_usbdev(intf);
struct usb_interface_descriptor *idesc;
struct usb_ov511 *ov;
- int i;
+ int i, rc, nr;
PDEBUG(1, "probing for device...");
@@ -5845,33 +5847,41 @@ ov51x_probe(struct usb_interface *intf, const struct usb_device_id *id)
ov->vdev->parent = &intf->dev;
video_set_drvdata(ov->vdev, ov);
- for (i = 0; i < OV511_MAX_UNIT_VIDEO; i++) {
- /* Minor 0 cannot be specified; assume user wants autodetect */
- if (unit_video[i] == 0)
- break;
+ mutex_lock(&ov->lock);
- if (video_register_device(ov->vdev, VFL_TYPE_GRABBER,
- unit_video[i]) >= 0) {
- break;
- }
- }
+ /* Check to see next free device and mark as used */
+ nr = find_first_zero_bit(&ov511_devused, OV511_MAX_UNIT_VIDEO);
+
+ /* Registers device */
+ if (unit_video[nr] != 0)
+ rc = video_register_device(ov->vdev, VFL_TYPE_GRABBER,
+ unit_video[nr]);
+ else
+ rc = video_register_device(ov->vdev, VFL_TYPE_GRABBER, -1);
- /* Use the next available one */
- if ((ov->vdev->minor == -1) &&
- video_register_device(ov->vdev, VFL_TYPE_GRABBER, -1) < 0) {
+ if (rc < 0) {
err("video_register_device failed");
+ mutex_unlock(&ov->lock);
goto error;
}
+ /* Mark device as used */
+ ov511_devused |= 1 << nr;
+ ov->nr = nr;
+
dev_info(&intf->dev, "Device at %s registered to minor %d\n",
ov->usb_path, ov->vdev->minor);
usb_set_intfdata(intf, ov);
if (ov_create_sysfs(ov->vdev)) {
err("ov_create_sysfs failed");
+ ov511_devused &= ~(1 << nr);
+ mutex_unlock(&ov->lock);
goto error;
}
+ mutex_lock(&ov->lock);
+
return 0;
error:
@@ -5906,10 +5916,16 @@ ov51x_disconnect(struct usb_interface *intf)
PDEBUG(3, "");
+ mutex_lock(&ov->lock);
usb_set_intfdata (intf, NULL);
- if (!ov)
+ if (!ov) {
+ mutex_unlock(&ov->lock);
return;
+ }
+
+ /* Free device number */
+ ov511_devused &= ~(1 << ov->nr);
if (ov->vdev)
video_unregister_device(ov->vdev);
@@ -5927,6 +5943,7 @@ ov51x_disconnect(struct usb_interface *intf)
ov->streaming = 0;
ov51x_unlink_isoc(ov);
+ mutex_unlock(&ov->lock);
ov->dev = NULL;
diff --git a/drivers/media/video/ov511.h b/drivers/media/video/ov511.h
index 70d99e52329..c450c92468d 100644
--- a/drivers/media/video/ov511.h
+++ b/drivers/media/video/ov511.h
@@ -494,6 +494,9 @@ struct usb_ov511 {
int has_decoder; /* Device has a video decoder */
int pal; /* Device is designed for PAL resolution */
+ /* ov511 device number ID */
+ int nr; /* Stores a device number */
+
/* I2C interface */
struct mutex i2c_lock; /* Protect I2C controller regs */
unsigned char primary_i2c_slave; /* I2C write id of sensor */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-devattr.c b/drivers/media/video/pvrusb2/pvrusb2-devattr.c
index 1cb6a260e8b..336a20eded0 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-devattr.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-devattr.c
@@ -71,6 +71,7 @@ static const struct pvr2_device_desc pvr2_device_29xxx = {
.flag_has_svideo = !0,
.signal_routing_scheme = PVR2_ROUTING_SCHEME_HAUPPAUGE,
.led_scheme = PVR2_LED_SCHEME_HAUPPAUGE,
+ .ir_scheme = PVR2_IR_SCHEME_29XXX,
};
@@ -284,6 +285,11 @@ static struct tda10048_config hauppauge_tda10048_config = {
.output_mode = TDA10048_PARALLEL_OUTPUT,
.fwbulkwritelen = TDA10048_BULKWRITE_50,
.inversion = TDA10048_INVERSION_ON,
+ .dtv6_if_freq_khz = TDA10048_IF_3300,
+ .dtv7_if_freq_khz = TDA10048_IF_3800,
+ .dtv8_if_freq_khz = TDA10048_IF_4300,
+ .clk_freq_khz = TDA10048_CLK_16000,
+ .disable_gate_access = 1,
};
static struct tda829x_config tda829x_no_probe = {
diff --git a/drivers/media/video/pvrusb2/pvrusb2-devattr.h b/drivers/media/video/pvrusb2/pvrusb2-devattr.h
index 3e553389cbc..ea04ecf8aa3 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-devattr.h
+++ b/drivers/media/video/pvrusb2/pvrusb2-devattr.h
@@ -69,6 +69,7 @@ struct pvr2_string_table {
#define PVR2_ROUTING_SCHEME_HAUPPAUGE 0
#define PVR2_ROUTING_SCHEME_GOTVIEW 1
#define PVR2_ROUTING_SCHEME_ONAIR 2
+#define PVR2_ROUTING_SCHEME_AV400 3
#define PVR2_DIGITAL_SCHEME_NONE 0
#define PVR2_DIGITAL_SCHEME_HAUPPAUGE 1
@@ -78,8 +79,10 @@ struct pvr2_string_table {
#define PVR2_LED_SCHEME_HAUPPAUGE 1
#define PVR2_IR_SCHEME_NONE 0
-#define PVR2_IR_SCHEME_24XXX 1
-#define PVR2_IR_SCHEME_ZILOG 2
+#define PVR2_IR_SCHEME_24XXX 1 /* FX2-controlled IR */
+#define PVR2_IR_SCHEME_ZILOG 2 /* HVR-1950 style (must be taken out of reset) */
+#define PVR2_IR_SCHEME_24XXX_MCE 3 /* 24xxx MCE device */
+#define PVR2_IR_SCHEME_29XXX 4 /* Original 29xxx device */
/* This describes a particular hardware type (except for the USB device ID
which must live in a separate structure due to environmental
@@ -162,19 +165,9 @@ struct pvr2_device_desc {
ensure that it is found. */
unsigned int flag_has_wm8775:1;
- /* Indicate any specialized IR scheme that might need to be
- supported by this driver. If not set, then it is assumed that
- IR can work without help from the driver (which is frequently
- the case). This is otherwise set to one of
- PVR2_IR_SCHEME_xxxx. For "xxxx", the value "24XXX" indicates a
- Hauppauge 24xxx class device which has an FPGA-hosted IR
- receiver that can only be reached via FX2 command codes. In
- that case the pvrusb2 driver will emulate the behavior of the
- older 29xxx device's IR receiver (a "virtual" I2C chip) in terms
- of those command codes. For the value "ZILOG", we're dealing
- with an IR chip that must be taken out of reset via another FX2
- command code (which is the case for HVR-1950 devices). */
- unsigned int ir_scheme:2;
+ /* Indicate IR scheme of hardware. If not set, then it is assumed
+ that IR can work without any help from the driver. */
+ unsigned int ir_scheme:3;
/* These bits define which kinds of sources the device can handle.
Note: Digital tuner presence is inferred by the
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
index 5d75eb5211b..5b152ff20bd 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
@@ -200,6 +200,9 @@ struct pvr2_hdw {
int i2c_cx25840_hack_state;
int i2c_linked;
+ /* IR related */
+ unsigned int ir_scheme_active; /* IR scheme as seen from the outside */
+
/* Frequency table */
unsigned int freqTable[FREQTABLE_SIZE];
unsigned int freqProgSlot;
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
index add3395d324..0c745b142fb 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -142,6 +142,15 @@ static const unsigned char *module_i2c_addresses[] = {
};
+static const char *ir_scheme_names[] = {
+ [PVR2_IR_SCHEME_NONE] = "none",
+ [PVR2_IR_SCHEME_29XXX] = "29xxx",
+ [PVR2_IR_SCHEME_24XXX] = "24xxx (29xxx emulation)",
+ [PVR2_IR_SCHEME_24XXX_MCE] = "24xxx (MCE device)",
+ [PVR2_IR_SCHEME_ZILOG] = "Zilog",
+};
+
+
/* Define the list of additional controls we'll dynamically construct based
on query of the cx2341x module. */
struct pvr2_mpeg_ids {
@@ -2170,7 +2179,7 @@ static void pvr2_hdw_setup_low(struct pvr2_hdw *hdw)
}
/* Take the IR chip out of reset, if appropriate */
- if (hdw->hdw_desc->ir_scheme == PVR2_IR_SCHEME_ZILOG) {
+ if (hdw->ir_scheme_active == PVR2_IR_SCHEME_ZILOG) {
pvr2_issue_simple_cmd(hdw,
FX2CMD_HCW_ZILOG_RESET |
(1 << 8) |
@@ -2451,6 +2460,7 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
GFP_KERNEL);
if (!hdw->controls) goto fail;
hdw->hdw_desc = hdw_desc;
+ hdw->ir_scheme_active = hdw->hdw_desc->ir_scheme;
for (idx = 0; idx < hdw->control_cnt; idx++) {
cptr = hdw->controls + idx;
cptr->hdw = hdw;
@@ -4809,6 +4819,12 @@ static unsigned int pvr2_hdw_report_unlocked(struct pvr2_hdw *hdw,int which,
stats.buffers_processed,
stats.buffers_failed);
}
+ case 6: {
+ unsigned int id = hdw->ir_scheme_active;
+ return scnprintf(buf, acnt, "ir scheme: id=%d %s", id,
+ (id >= ARRAY_SIZE(ir_scheme_names) ?
+ "?" : ir_scheme_names[id]));
+ }
default: break;
}
return 0;
@@ -4825,65 +4841,35 @@ static unsigned int pvr2_hdw_report_clients(struct pvr2_hdw *hdw,
unsigned int tcnt = 0;
unsigned int ccnt;
struct i2c_client *client;
- struct list_head *item;
- void *cd;
const char *p;
unsigned int id;
- ccnt = scnprintf(buf, acnt, "Associated v4l2-subdev drivers:");
+ ccnt = scnprintf(buf, acnt, "Associated v4l2-subdev drivers and I2C clients:\n");
tcnt += ccnt;
v4l2_device_for_each_subdev(sd, &hdw->v4l2_dev) {
id = sd->grp_id;
p = NULL;
if (id < ARRAY_SIZE(module_names)) p = module_names[id];
if (p) {
- ccnt = scnprintf(buf + tcnt, acnt - tcnt, " %s", p);
+ ccnt = scnprintf(buf + tcnt, acnt - tcnt, " %s:", p);
tcnt += ccnt;
} else {
ccnt = scnprintf(buf + tcnt, acnt - tcnt,
- " (unknown id=%u)", id);
+ " (unknown id=%u):", id);
tcnt += ccnt;
}
- }
- ccnt = scnprintf(buf + tcnt, acnt - tcnt, "\n");
- tcnt += ccnt;
-
- ccnt = scnprintf(buf + tcnt, acnt - tcnt, "I2C clients:\n");
- tcnt += ccnt;
-
- mutex_lock(&hdw->i2c_adap.clist_lock);
- list_for_each(item, &hdw->i2c_adap.clients) {
- client = list_entry(item, struct i2c_client, list);
- ccnt = scnprintf(buf + tcnt, acnt - tcnt,
- " %s: i2c=%02x", client->name, client->addr);
- tcnt += ccnt;
- cd = i2c_get_clientdata(client);
- v4l2_device_for_each_subdev(sd, &hdw->v4l2_dev) {
- if (cd == sd) {
- id = sd->grp_id;
- p = NULL;
- if (id < ARRAY_SIZE(module_names)) {
- p = module_names[id];
- }
- if (p) {
- ccnt = scnprintf(buf + tcnt,
- acnt - tcnt,
- " subdev=%s", p);
- tcnt += ccnt;
- } else {
- ccnt = scnprintf(buf + tcnt,
- acnt - tcnt,
- " subdev= id %u)",
- id);
- tcnt += ccnt;
- }
- break;
- }
+ client = v4l2_get_subdevdata(sd);
+ if (client) {
+ ccnt = scnprintf(buf + tcnt, acnt - tcnt,
+ " %s @ %02x\n", client->name,
+ client->addr);
+ tcnt += ccnt;
+ } else {
+ ccnt = scnprintf(buf + tcnt, acnt - tcnt,
+ " no i2c client\n");
+ tcnt += ccnt;
}
- ccnt = scnprintf(buf + tcnt, acnt - tcnt, "\n");
- tcnt += ccnt;
}
- mutex_unlock(&hdw->i2c_adap.clist_lock);
return tcnt;
}
diff --git a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
index 9af282f9e76..610bd848df2 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
@@ -42,6 +42,18 @@ static int ir_mode[PVR_NUM] = { [0 ... PVR_NUM-1] = 1 };
module_param_array(ir_mode, int, NULL, 0444);
MODULE_PARM_DESC(ir_mode,"specify: 0=disable IR reception, 1=normal IR");
+static int pvr2_disable_ir_video;
+module_param_named(disable_autoload_ir_video, pvr2_disable_ir_video,
+ int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(disable_autoload_ir_video,
+ "1=do not try to autoload ir_video IR receiver");
+
+/* Mapping of IR schemes to known I2C addresses - if any */
+static const unsigned char ir_video_addresses[] = {
+ [PVR2_IR_SCHEME_29XXX] = 0x18,
+ [PVR2_IR_SCHEME_24XXX] = 0x18,
+};
+
static int pvr2_i2c_write(struct pvr2_hdw *hdw, /* Context */
u8 i2c_addr, /* I2C address we're talking to */
u8 *data, /* Data to write */
@@ -559,6 +571,31 @@ static void do_i2c_scan(struct pvr2_hdw *hdw)
printk(KERN_INFO "%s: i2c scan done.\n", hdw->name);
}
+static void pvr2_i2c_register_ir(struct pvr2_hdw *hdw)
+{
+ struct i2c_board_info info;
+ unsigned char addr = 0;
+ if (pvr2_disable_ir_video) {
+ pvr2_trace(PVR2_TRACE_INFO,
+ "Automatic binding of ir_video has been disabled.");
+ return;
+ }
+ if (hdw->ir_scheme_active < ARRAY_SIZE(ir_video_addresses)) {
+ addr = ir_video_addresses[hdw->ir_scheme_active];
+ }
+ if (!addr) {
+ /* The device either doesn't support I2C-based IR or we
+ don't know (yet) how to operate IR on the device. */
+ return;
+ }
+ pvr2_trace(PVR2_TRACE_INFO,
+ "Binding ir_video to i2c address 0x%02x.", addr);
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
+ info.addr = addr;
+ i2c_new_device(&hdw->i2c_adap, &info);
+}
+
void pvr2_i2c_core_init(struct pvr2_hdw *hdw)
{
unsigned int idx;
@@ -574,7 +611,9 @@ void pvr2_i2c_core_init(struct pvr2_hdw *hdw)
printk(KERN_INFO "%s: IR disabled\n",hdw->name);
hdw->i2c_func[0x18] = i2c_black_hole;
} else if (ir_mode[hdw->unit_number] == 1) {
- if (hdw->hdw_desc->ir_scheme == PVR2_IR_SCHEME_24XXX) {
+ if (hdw->ir_scheme_active == PVR2_IR_SCHEME_24XXX) {
+ /* Set up translation so that our IR looks like a
+ 29xxx device */
hdw->i2c_func[0x18] = i2c_24xxx_ir;
}
}
@@ -597,15 +636,23 @@ void pvr2_i2c_core_init(struct pvr2_hdw *hdw)
i2c_add_adapter(&hdw->i2c_adap);
if (hdw->i2c_func[0x18] == i2c_24xxx_ir) {
/* Probe for a different type of IR receiver on this
- device. If present, disable the emulated IR receiver. */
+ device. This is really the only way to differentiate
+ older 24xxx devices from 24xxx variants that include an
+ IR blaster. If the IR blaster is present, the IR
+ receiver is part of that chip and thus we must disable
+ the emulated IR receiver. */
if (do_i2c_probe(hdw, 0x71)) {
pvr2_trace(PVR2_TRACE_INFO,
"Device has newer IR hardware;"
" disabling unneeded virtual IR device");
hdw->i2c_func[0x18] = NULL;
+ /* Remember that this is a different device... */
+ hdw->ir_scheme_active = PVR2_IR_SCHEME_24XXX_MCE;
}
}
if (i2c_scan) do_i2c_scan(hdw);
+
+ pvr2_i2c_register_ir(hdw);
}
void pvr2_i2c_core_done(struct pvr2_hdw *hdw)
diff --git a/drivers/media/video/pvrusb2/pvrusb2-sysfs.c b/drivers/media/video/pvrusb2/pvrusb2-sysfs.c
index 299c1cbc383..6c23456e0bd 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-sysfs.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-sysfs.c
@@ -539,7 +539,7 @@ static void class_dev_destroy(struct pvr2_sysfs *sfp)
&sfp->attr_unit_number);
}
pvr2_sysfs_trace("Destroying class_dev id=%p",sfp->class_dev);
- sfp->class_dev->driver_data = NULL;
+ dev_set_drvdata(sfp->class_dev, NULL);
device_unregister(sfp->class_dev);
sfp->class_dev = NULL;
}
@@ -549,7 +549,7 @@ static ssize_t v4l_minor_number_show(struct device *class_dev,
struct device_attribute *attr, char *buf)
{
struct pvr2_sysfs *sfp;
- sfp = (struct pvr2_sysfs *)class_dev->driver_data;
+ sfp = dev_get_drvdata(class_dev);
if (!sfp) return -EINVAL;
return scnprintf(buf,PAGE_SIZE,"%d\n",
pvr2_hdw_v4l_get_minor_number(sfp->channel.hdw,
@@ -561,7 +561,7 @@ static ssize_t bus_info_show(struct device *class_dev,
struct device_attribute *attr, char *buf)
{
struct pvr2_sysfs *sfp;
- sfp = (struct pvr2_sysfs *)class_dev->driver_data;
+ sfp = dev_get_drvdata(class_dev);
if (!sfp) return -EINVAL;
return scnprintf(buf,PAGE_SIZE,"%s\n",
pvr2_hdw_get_bus_info(sfp->channel.hdw));
@@ -572,7 +572,7 @@ static ssize_t hdw_name_show(struct device *class_dev,
struct device_attribute *attr, char *buf)
{
struct pvr2_sysfs *sfp;
- sfp = (struct pvr2_sysfs *)class_dev->driver_data;
+ sfp = dev_get_drvdata(class_dev);
if (!sfp) return -EINVAL;
return scnprintf(buf,PAGE_SIZE,"%s\n",
pvr2_hdw_get_type(sfp->channel.hdw));
@@ -583,7 +583,7 @@ static ssize_t hdw_desc_show(struct device *class_dev,
struct device_attribute *attr, char *buf)
{
struct pvr2_sysfs *sfp;
- sfp = (struct pvr2_sysfs *)class_dev->driver_data;
+ sfp = dev_get_drvdata(class_dev);
if (!sfp) return -EINVAL;
return scnprintf(buf,PAGE_SIZE,"%s\n",
pvr2_hdw_get_desc(sfp->channel.hdw));
@@ -595,7 +595,7 @@ static ssize_t v4l_radio_minor_number_show(struct device *class_dev,
char *buf)
{
struct pvr2_sysfs *sfp;
- sfp = (struct pvr2_sysfs *)class_dev->driver_data;
+ sfp = dev_get_drvdata(class_dev);
if (!sfp) return -EINVAL;
return scnprintf(buf,PAGE_SIZE,"%d\n",
pvr2_hdw_v4l_get_minor_number(sfp->channel.hdw,
@@ -607,7 +607,7 @@ static ssize_t unit_number_show(struct device *class_dev,
struct device_attribute *attr, char *buf)
{
struct pvr2_sysfs *sfp;
- sfp = (struct pvr2_sysfs *)class_dev->driver_data;
+ sfp = dev_get_drvdata(class_dev);
if (!sfp) return -EINVAL;
return scnprintf(buf,PAGE_SIZE,"%d\n",
pvr2_hdw_get_unit_number(sfp->channel.hdw));
@@ -635,7 +635,7 @@ static void class_dev_create(struct pvr2_sysfs *sfp,
class_dev->parent = &usb_dev->dev;
sfp->class_dev = class_dev;
- class_dev->driver_data = sfp;
+ dev_set_drvdata(class_dev, sfp);
ret = device_register(class_dev);
if (ret) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
@@ -792,7 +792,7 @@ static ssize_t debuginfo_show(struct device *class_dev,
struct device_attribute *attr, char *buf)
{
struct pvr2_sysfs *sfp;
- sfp = (struct pvr2_sysfs *)class_dev->driver_data;
+ sfp = dev_get_drvdata(class_dev);
if (!sfp) return -EINVAL;
pvr2_hdw_trigger_module_log(sfp->channel.hdw);
return pvr2_debugifc_print_info(sfp->channel.hdw,buf,PAGE_SIZE);
@@ -803,7 +803,7 @@ static ssize_t debugcmd_show(struct device *class_dev,
struct device_attribute *attr, char *buf)
{
struct pvr2_sysfs *sfp;
- sfp = (struct pvr2_sysfs *)class_dev->driver_data;
+ sfp = dev_get_drvdata(class_dev);
if (!sfp) return -EINVAL;
return pvr2_debugifc_print_status(sfp->channel.hdw,buf,PAGE_SIZE);
}
@@ -816,7 +816,7 @@ static ssize_t debugcmd_store(struct device *class_dev,
struct pvr2_sysfs *sfp;
int ret;
- sfp = (struct pvr2_sysfs *)class_dev->driver_data;
+ sfp = dev_get_drvdata(class_dev);
if (!sfp) return -EINVAL;
ret = pvr2_debugifc_docmd(sfp->channel.hdw,buf,count);
diff --git a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
index 9e0f2b07b93..2d8825e5b1b 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
@@ -90,7 +90,7 @@ static struct v4l2_capability pvr_capability ={
.driver = "pvrusb2",
.card = "Hauppauge WinTV pvr-usb2",
.bus_info = "usb",
- .version = KERNEL_VERSION(0,8,0),
+ .version = KERNEL_VERSION(0, 9, 0),
.capabilities = (V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_RADIO |
V4L2_CAP_READWRITE),
@@ -267,7 +267,7 @@ static long pvr2_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
memset(&tmp,0,sizeof(tmp));
tmp.index = vi->index;
ret = 0;
- if ((vi->index < 0) || (vi->index >= fh->input_cnt)) {
+ if (vi->index >= fh->input_cnt) {
ret = -EINVAL;
break;
}
@@ -331,7 +331,7 @@ static long pvr2_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_S_INPUT:
{
struct v4l2_input *vi = (struct v4l2_input *)arg;
- if ((vi->index < 0) || (vi->index >= fh->input_cnt)) {
+ if (vi->index >= fh->input_cnt) {
ret = -ERANGE;
break;
}
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index 7c542caf248..db25c3034c1 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -601,7 +601,7 @@ static void pwc_snapshot_button(struct pwc_device *pdev, int down)
#ifdef CONFIG_USB_PWC_INPUT_EVDEV
if (pdev->button_dev) {
- input_report_key(pdev->button_dev, BTN_0, down);
+ input_report_key(pdev->button_dev, KEY_CAMERA, down);
input_sync(pdev->button_dev);
}
#endif
@@ -1783,7 +1783,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
return -ENOMEM;
}
memcpy(pdev->vdev, &pwc_template, sizeof(pwc_template));
- pdev->vdev->parent = &(udev->dev);
+ pdev->vdev->parent = &intf->dev;
strcpy(pdev->vdev->name, name);
video_set_drvdata(pdev->vdev, pdev);
@@ -1847,7 +1847,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
usb_to_input_id(pdev->udev, &pdev->button_dev->id);
pdev->button_dev->dev.parent = &pdev->udev->dev;
pdev->button_dev->evbit[0] = BIT_MASK(EV_KEY);
- pdev->button_dev->keybit[BIT_WORD(BTN_0)] = BIT_MASK(BTN_0);
+ pdev->button_dev->keybit[BIT_WORD(KEY_CAMERA)] = BIT_MASK(KEY_CAMERA);
rc = input_register_device(pdev->button_dev);
if (rc) {
diff --git a/drivers/media/video/pwc/pwc-v4l.c b/drivers/media/video/pwc/pwc-v4l.c
index bc0a464295c..2876ce08451 100644
--- a/drivers/media/video/pwc/pwc-v4l.c
+++ b/drivers/media/video/pwc/pwc-v4l.c
@@ -1107,7 +1107,7 @@ long pwc_video_do_ioctl(struct file *file, unsigned int cmd, void *arg)
return -EINVAL;
if (buf->memory != V4L2_MEMORY_MMAP)
return -EINVAL;
- if (buf->index < 0 || buf->index >= pwc_mbufs)
+ if (buf->index >= pwc_mbufs)
return -EINVAL;
buf->flags |= V4L2_BUF_FLAG_QUEUED;
diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c
index c639845460f..f60de40fd21 100644
--- a/drivers/media/video/pxa_camera.c
+++ b/drivers/media/video/pxa_camera.c
@@ -202,7 +202,7 @@ struct pxa_buffer {
};
struct pxa_camera_dev {
- struct device *dev;
+ struct soc_camera_host soc_host;
/* PXA27x is only supposed to handle one camera on its Quick Capture
* interface. If anyone ever builds hardware to enable more than
* one camera, they will have to modify this driver too */
@@ -261,7 +261,6 @@ static void free_buffer(struct videobuf_queue *vq, struct pxa_buffer *buf)
{
struct soc_camera_device *icd = vq->priv_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
- struct pxa_camera_dev *pcdev = ici->priv;
struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
int i;
@@ -278,7 +277,7 @@ static void free_buffer(struct videobuf_queue *vq, struct pxa_buffer *buf)
for (i = 0; i < ARRAY_SIZE(buf->dmas); i++) {
if (buf->dmas[i].sg_cpu)
- dma_free_coherent(pcdev->dev, buf->dmas[i].sg_size,
+ dma_free_coherent(ici->dev, buf->dmas[i].sg_size,
buf->dmas[i].sg_cpu,
buf->dmas[i].sg_dma);
buf->dmas[i].sg_cpu = NULL;
@@ -338,14 +337,14 @@ static int pxa_init_dma_channel(struct pxa_camera_dev *pcdev,
int dma_len = 0, xfer_len = 0;
if (pxa_dma->sg_cpu)
- dma_free_coherent(pcdev->dev, pxa_dma->sg_size,
+ dma_free_coherent(pcdev->soc_host.dev, pxa_dma->sg_size,
pxa_dma->sg_cpu, pxa_dma->sg_dma);
sglen = calculate_dma_sglen(*sg_first, dma->sglen,
*sg_first_ofs, size);
pxa_dma->sg_size = (sglen + 1) * sizeof(struct pxa_dma_desc);
- pxa_dma->sg_cpu = dma_alloc_coherent(pcdev->dev, pxa_dma->sg_size,
+ pxa_dma->sg_cpu = dma_alloc_coherent(pcdev->soc_host.dev, pxa_dma->sg_size,
&pxa_dma->sg_dma, GFP_KERNEL);
if (!pxa_dma->sg_cpu)
return -ENOMEM;
@@ -353,7 +352,7 @@ static int pxa_init_dma_channel(struct pxa_camera_dev *pcdev,
pxa_dma->sglen = sglen;
offset = *sg_first_ofs;
- dev_dbg(pcdev->dev, "DMA: sg_first=%p, sglen=%d, ofs=%d, dma.desc=%x\n",
+ dev_dbg(pcdev->soc_host.dev, "DMA: sg_first=%p, sglen=%d, ofs=%d, dma.desc=%x\n",
*sg_first, sglen, *sg_first_ofs, pxa_dma->sg_dma);
@@ -376,7 +375,7 @@ static int pxa_init_dma_channel(struct pxa_camera_dev *pcdev,
pxa_dma->sg_cpu[i].ddadr =
pxa_dma->sg_dma + (i + 1) * sizeof(struct pxa_dma_desc);
- dev_vdbg(pcdev->dev, "DMA: desc.%08x->@phys=0x%08x, len=%d\n",
+ dev_vdbg(pcdev->soc_host.dev, "DMA: desc.%08x->@phys=0x%08x, len=%d\n",
pxa_dma->sg_dma + i * sizeof(struct pxa_dma_desc),
sg_dma_address(sg) + offset, xfer_len);
offset = 0;
@@ -488,7 +487,7 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq,
ret = pxa_init_dma_channel(pcdev, buf, dma, 0, CIBR0, size_y,
&sg, &next_ofs);
if (ret) {
- dev_err(pcdev->dev,
+ dev_err(pcdev->soc_host.dev,
"DMA initialization for Y/RGB failed\n");
goto fail;
}
@@ -498,7 +497,7 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq,
ret = pxa_init_dma_channel(pcdev, buf, dma, 1, CIBR1,
size_u, &sg, &next_ofs);
if (ret) {
- dev_err(pcdev->dev,
+ dev_err(pcdev->soc_host.dev,
"DMA initialization for U failed\n");
goto fail_u;
}
@@ -508,7 +507,7 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq,
ret = pxa_init_dma_channel(pcdev, buf, dma, 2, CIBR2,
size_v, &sg, &next_ofs);
if (ret) {
- dev_err(pcdev->dev,
+ dev_err(pcdev->soc_host.dev,
"DMA initialization for V failed\n");
goto fail_v;
}
@@ -522,10 +521,10 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq,
return 0;
fail_v:
- dma_free_coherent(pcdev->dev, buf->dmas[1].sg_size,
+ dma_free_coherent(pcdev->soc_host.dev, buf->dmas[1].sg_size,
buf->dmas[1].sg_cpu, buf->dmas[1].sg_dma);
fail_u:
- dma_free_coherent(pcdev->dev, buf->dmas[0].sg_size,
+ dma_free_coherent(pcdev->soc_host.dev, buf->dmas[0].sg_size,
buf->dmas[0].sg_cpu, buf->dmas[0].sg_dma);
fail:
free_buffer(vq, buf);
@@ -549,7 +548,7 @@ static void pxa_dma_start_channels(struct pxa_camera_dev *pcdev)
active = pcdev->active;
for (i = 0; i < pcdev->channels; i++) {
- dev_dbg(pcdev->dev, "%s (channel=%d) ddadr=%08x\n", __func__,
+ dev_dbg(pcdev->soc_host.dev, "%s (channel=%d) ddadr=%08x\n", __func__,
i, active->dmas[i].sg_dma);
DDADR(pcdev->dma_chans[i]) = active->dmas[i].sg_dma;
DCSR(pcdev->dma_chans[i]) = DCSR_RUN;
@@ -561,7 +560,7 @@ static void pxa_dma_stop_channels(struct pxa_camera_dev *pcdev)
int i;
for (i = 0; i < pcdev->channels; i++) {
- dev_dbg(pcdev->dev, "%s (channel=%d)\n", __func__, i);
+ dev_dbg(pcdev->soc_host.dev, "%s (channel=%d)\n", __func__, i);
DCSR(pcdev->dma_chans[i]) = 0;
}
}
@@ -597,7 +596,7 @@ static void pxa_camera_start_capture(struct pxa_camera_dev *pcdev)
{
unsigned long cicr0, cifr;
- dev_dbg(pcdev->dev, "%s\n", __func__);
+ dev_dbg(pcdev->soc_host.dev, "%s\n", __func__);
/* Reset the FIFOs */
cifr = __raw_readl(pcdev->base + CIFR) | CIFR_RESET_F;
__raw_writel(cifr, pcdev->base + CIFR);
@@ -617,7 +616,7 @@ static void pxa_camera_stop_capture(struct pxa_camera_dev *pcdev)
__raw_writel(cicr0, pcdev->base + CICR0);
pcdev->active = NULL;
- dev_dbg(pcdev->dev, "%s\n", __func__);
+ dev_dbg(pcdev->soc_host.dev, "%s\n", __func__);
}
static void pxa_videobuf_queue(struct videobuf_queue *vq,
@@ -686,7 +685,7 @@ static void pxa_camera_wakeup(struct pxa_camera_dev *pcdev,
do_gettimeofday(&vb->ts);
vb->field_count++;
wake_up(&vb->done);
- dev_dbg(pcdev->dev, "%s dequeud buffer (vb=0x%p)\n", __func__, vb);
+ dev_dbg(pcdev->soc_host.dev, "%s dequeud buffer (vb=0x%p)\n", __func__, vb);
if (list_empty(&pcdev->capture)) {
pxa_camera_stop_capture(pcdev);
@@ -722,7 +721,7 @@ static void pxa_camera_check_link_miss(struct pxa_camera_dev *pcdev)
for (i = 0; i < pcdev->channels; i++)
if (DDADR(pcdev->dma_chans[i]) != DDADR_STOP)
is_dma_stopped = 0;
- dev_dbg(pcdev->dev, "%s : top queued buffer=%p, dma_stopped=%d\n",
+ dev_dbg(pcdev->soc_host.dev, "%s : top queued buffer=%p, dma_stopped=%d\n",
__func__, pcdev->active, is_dma_stopped);
if (pcdev->active && is_dma_stopped)
pxa_camera_start_capture(pcdev);
@@ -747,12 +746,12 @@ static void pxa_camera_dma_irq(int channel, struct pxa_camera_dev *pcdev,
overrun |= CISR_IFO_1 | CISR_IFO_2;
if (status & DCSR_BUSERR) {
- dev_err(pcdev->dev, "DMA Bus Error IRQ!\n");
+ dev_err(pcdev->soc_host.dev, "DMA Bus Error IRQ!\n");
goto out;
}
if (!(status & (DCSR_ENDINTR | DCSR_STARTINTR))) {
- dev_err(pcdev->dev, "Unknown DMA IRQ source, "
+ dev_err(pcdev->soc_host.dev, "Unknown DMA IRQ source, "
"status: 0x%08x\n", status);
goto out;
}
@@ -776,7 +775,7 @@ static void pxa_camera_dma_irq(int channel, struct pxa_camera_dev *pcdev,
buf = container_of(vb, struct pxa_buffer, vb);
WARN_ON(buf->inwork || list_empty(&vb->queue));
- dev_dbg(pcdev->dev, "%s channel=%d %s%s(vb=0x%p) dma.desc=%x\n",
+ dev_dbg(pcdev->soc_host.dev, "%s channel=%d %s%s(vb=0x%p) dma.desc=%x\n",
__func__, channel, status & DCSR_STARTINTR ? "SOF " : "",
status & DCSR_ENDINTR ? "EOF " : "", vb, DDADR(channel));
@@ -787,7 +786,7 @@ static void pxa_camera_dma_irq(int channel, struct pxa_camera_dev *pcdev,
*/
if (camera_status & overrun &&
!list_is_last(pcdev->capture.next, &pcdev->capture)) {
- dev_dbg(pcdev->dev, "FIFO overrun! CISR: %x\n",
+ dev_dbg(pcdev->soc_host.dev, "FIFO overrun! CISR: %x\n",
camera_status);
pxa_camera_stop_capture(pcdev);
pxa_camera_start_capture(pcdev);
@@ -854,7 +853,7 @@ static u32 mclk_get_divisor(struct pxa_camera_dev *pcdev)
/* mclk <= ciclk / 4 (27.4.2) */
if (mclk > lcdclk / 4) {
mclk = lcdclk / 4;
- dev_warn(pcdev->dev, "Limiting master clock to %lu\n", mclk);
+ dev_warn(pcdev->soc_host.dev, "Limiting master clock to %lu\n", mclk);
}
/* We verify mclk != 0, so if anyone breaks it, here comes their Oops */
@@ -864,7 +863,7 @@ static u32 mclk_get_divisor(struct pxa_camera_dev *pcdev)
if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN)
pcdev->mclk = lcdclk / (2 * (div + 1));
- dev_dbg(pcdev->dev, "LCD clock %luHz, target freq %luHz, "
+ dev_dbg(pcdev->soc_host.dev, "LCD clock %luHz, target freq %luHz, "
"divisor %u\n", lcdclk, mclk, div);
return div;
@@ -884,12 +883,12 @@ static void pxa_camera_activate(struct pxa_camera_dev *pcdev)
struct pxacamera_platform_data *pdata = pcdev->pdata;
u32 cicr4 = 0;
- dev_dbg(pcdev->dev, "Registered platform device at %p data %p\n",
+ dev_dbg(pcdev->soc_host.dev, "Registered platform device at %p data %p\n",
pcdev, pdata);
if (pdata && pdata->init) {
- dev_dbg(pcdev->dev, "%s: Init gpios\n", __func__);
- pdata->init(pcdev->dev);
+ dev_dbg(pcdev->soc_host.dev, "%s: Init gpios\n", __func__);
+ pdata->init(pcdev->soc_host.dev);
}
/* disable all interrupts */
@@ -931,7 +930,7 @@ static irqreturn_t pxa_camera_irq(int irq, void *data)
struct videobuf_buffer *vb;
status = __raw_readl(pcdev->base + CISR);
- dev_dbg(pcdev->dev, "Camera interrupt status 0x%lx\n", status);
+ dev_dbg(pcdev->soc_host.dev, "Camera interrupt status 0x%lx\n", status);
if (!status)
return IRQ_NONE;
@@ -1259,7 +1258,7 @@ static int pxa_camera_get_formats(struct soc_camera_device *icd, int idx,
xlate->cam_fmt = icd->formats + idx;
xlate->buswidth = buswidth;
xlate++;
- dev_dbg(&ici->dev, "Providing format %s using %s\n",
+ dev_dbg(ici->dev, "Providing format %s using %s\n",
pxa_camera_formats[0].name,
icd->formats[idx].name);
}
@@ -1274,7 +1273,7 @@ static int pxa_camera_get_formats(struct soc_camera_device *icd, int idx,
xlate->cam_fmt = icd->formats + idx;
xlate->buswidth = buswidth;
xlate++;
- dev_dbg(&ici->dev, "Providing format %s packed\n",
+ dev_dbg(ici->dev, "Providing format %s packed\n",
icd->formats[idx].name);
}
break;
@@ -1286,7 +1285,7 @@ static int pxa_camera_get_formats(struct soc_camera_device *icd, int idx,
xlate->cam_fmt = icd->formats + idx;
xlate->buswidth = icd->formats[idx].depth;
xlate++;
- dev_dbg(&ici->dev,
+ dev_dbg(ici->dev,
"Providing format %s in pass-through mode\n",
icd->formats[idx].name);
}
@@ -1315,11 +1314,11 @@ static int pxa_camera_set_crop(struct soc_camera_device *icd,
icd->sense = NULL;
if (ret < 0) {
- dev_warn(&ici->dev, "Failed to crop to %ux%u@%u:%u\n",
+ dev_warn(ici->dev, "Failed to crop to %ux%u@%u:%u\n",
rect->width, rect->height, rect->left, rect->top);
} else if (sense.flags & SOCAM_SENSE_PCLK_CHANGED) {
if (sense.pixel_clock > sense.pixel_clock_max) {
- dev_err(&ici->dev,
+ dev_err(ici->dev,
"pixel clock %lu set by the camera too high!",
sense.pixel_clock);
return -EIO;
@@ -1347,7 +1346,7 @@ static int pxa_camera_set_fmt(struct soc_camera_device *icd,
xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
if (!xlate) {
- dev_warn(&ici->dev, "Format %x not found\n", pix->pixelformat);
+ dev_warn(ici->dev, "Format %x not found\n", pix->pixelformat);
return -EINVAL;
}
@@ -1363,11 +1362,11 @@ static int pxa_camera_set_fmt(struct soc_camera_device *icd,
icd->sense = NULL;
if (ret < 0) {
- dev_warn(&ici->dev, "Failed to configure for format %x\n",
+ dev_warn(ici->dev, "Failed to configure for format %x\n",
pix->pixelformat);
} else if (sense.flags & SOCAM_SENSE_PCLK_CHANGED) {
if (sense.pixel_clock > sense.pixel_clock_max) {
- dev_err(&ici->dev,
+ dev_err(ici->dev,
"pixel clock %lu set by the camera too high!",
sense.pixel_clock);
return -EIO;
@@ -1395,7 +1394,7 @@ static int pxa_camera_try_fmt(struct soc_camera_device *icd,
xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
if (!xlate) {
- dev_warn(&ici->dev, "Format %x not found\n", pixfmt);
+ dev_warn(ici->dev, "Format %x not found\n", pixfmt);
return -EINVAL;
}
@@ -1552,13 +1551,7 @@ static struct soc_camera_host_ops pxa_soc_camera_host_ops = {
.set_bus_param = pxa_camera_set_bus_param,
};
-/* Should be allocated dynamically too, but we have only one. */
-static struct soc_camera_host pxa_soc_camera_host = {
- .drv_name = PXA_CAM_DRV_NAME,
- .ops = &pxa_soc_camera_host_ops,
-};
-
-static int pxa_camera_probe(struct platform_device *pdev)
+static int __devinit pxa_camera_probe(struct platform_device *pdev)
{
struct pxa_camera_dev *pcdev;
struct resource *res;
@@ -1586,7 +1579,6 @@ static int pxa_camera_probe(struct platform_device *pdev)
goto exit_kfree;
}
- dev_set_drvdata(&pdev->dev, pcdev);
pcdev->res = res;
pcdev->pdata = pdev->dev.platform_data;
@@ -1607,7 +1599,6 @@ static int pxa_camera_probe(struct platform_device *pdev)
pcdev->mclk = 20000000;
}
- pcdev->dev = &pdev->dev;
pcdev->mclk_divisor = mclk_get_divisor(pcdev);
INIT_LIST_HEAD(&pcdev->capture);
@@ -1616,13 +1607,13 @@ static int pxa_camera_probe(struct platform_device *pdev)
/*
* Request the regions.
*/
- if (!request_mem_region(res->start, res->end - res->start + 1,
+ if (!request_mem_region(res->start, resource_size(res),
PXA_CAM_DRV_NAME)) {
err = -EBUSY;
goto exit_clk;
}
- base = ioremap(res->start, res->end - res->start + 1);
+ base = ioremap(res->start, resource_size(res));
if (!base) {
err = -ENOMEM;
goto exit_release;
@@ -1634,29 +1625,29 @@ static int pxa_camera_probe(struct platform_device *pdev)
err = pxa_request_dma("CI_Y", DMA_PRIO_HIGH,
pxa_camera_dma_irq_y, pcdev);
if (err < 0) {
- dev_err(pcdev->dev, "Can't request DMA for Y\n");
+ dev_err(&pdev->dev, "Can't request DMA for Y\n");
goto exit_iounmap;
}
pcdev->dma_chans[0] = err;
- dev_dbg(pcdev->dev, "got DMA channel %d\n", pcdev->dma_chans[0]);
+ dev_dbg(&pdev->dev, "got DMA channel %d\n", pcdev->dma_chans[0]);
err = pxa_request_dma("CI_U", DMA_PRIO_HIGH,
pxa_camera_dma_irq_u, pcdev);
if (err < 0) {
- dev_err(pcdev->dev, "Can't request DMA for U\n");
+ dev_err(&pdev->dev, "Can't request DMA for U\n");
goto exit_free_dma_y;
}
pcdev->dma_chans[1] = err;
- dev_dbg(pcdev->dev, "got DMA channel (U) %d\n", pcdev->dma_chans[1]);
+ dev_dbg(&pdev->dev, "got DMA channel (U) %d\n", pcdev->dma_chans[1]);
err = pxa_request_dma("CI_V", DMA_PRIO_HIGH,
pxa_camera_dma_irq_v, pcdev);
if (err < 0) {
- dev_err(pcdev->dev, "Can't request DMA for V\n");
+ dev_err(&pdev->dev, "Can't request DMA for V\n");
goto exit_free_dma_u;
}
pcdev->dma_chans[2] = err;
- dev_dbg(pcdev->dev, "got DMA channel (V) %d\n", pcdev->dma_chans[2]);
+ dev_dbg(&pdev->dev, "got DMA channel (V) %d\n", pcdev->dma_chans[2]);
DRCMR(68) = pcdev->dma_chans[0] | DRCMR_MAPVLD;
DRCMR(69) = pcdev->dma_chans[1] | DRCMR_MAPVLD;
@@ -1666,14 +1657,17 @@ static int pxa_camera_probe(struct platform_device *pdev)
err = request_irq(pcdev->irq, pxa_camera_irq, 0, PXA_CAM_DRV_NAME,
pcdev);
if (err) {
- dev_err(pcdev->dev, "Camera interrupt register failed \n");
+ dev_err(&pdev->dev, "Camera interrupt register failed \n");
goto exit_free_dma;
}
- pxa_soc_camera_host.priv = pcdev;
- pxa_soc_camera_host.dev.parent = &pdev->dev;
- pxa_soc_camera_host.nr = pdev->id;
- err = soc_camera_host_register(&pxa_soc_camera_host);
+ pcdev->soc_host.drv_name = PXA_CAM_DRV_NAME;
+ pcdev->soc_host.ops = &pxa_soc_camera_host_ops;
+ pcdev->soc_host.priv = pcdev;
+ pcdev->soc_host.dev = &pdev->dev;
+ pcdev->soc_host.nr = pdev->id;
+
+ err = soc_camera_host_register(&pcdev->soc_host);
if (err)
goto exit_free_irq;
@@ -1690,7 +1684,7 @@ exit_free_dma_y:
exit_iounmap:
iounmap(base);
exit_release:
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
exit_clk:
clk_put(pcdev->clk);
exit_kfree:
@@ -1701,7 +1695,9 @@ exit:
static int __devexit pxa_camera_remove(struct platform_device *pdev)
{
- struct pxa_camera_dev *pcdev = platform_get_drvdata(pdev);
+ struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
+ struct pxa_camera_dev *pcdev = container_of(soc_host,
+ struct pxa_camera_dev, soc_host);
struct resource *res;
clk_put(pcdev->clk);
@@ -1711,12 +1707,12 @@ static int __devexit pxa_camera_remove(struct platform_device *pdev)
pxa_free_dma(pcdev->dma_chans[2]);
free_irq(pcdev->irq, pcdev);
- soc_camera_host_unregister(&pxa_soc_camera_host);
+ soc_camera_host_unregister(soc_host);
iounmap(pcdev->base);
res = pcdev->res;
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
kfree(pcdev);
@@ -1730,11 +1726,11 @@ static struct platform_driver pxa_camera_driver = {
.name = PXA_CAM_DRV_NAME,
},
.probe = pxa_camera_probe,
- .remove = __exit_p(pxa_camera_remove),
+ .remove = __devexit_p(pxa_camera_remove),
};
-static int __devinit pxa_camera_init(void)
+static int __init pxa_camera_init(void)
{
return platform_driver_register(&pxa_camera_driver);
}
diff --git a/drivers/media/video/s2255drv.c b/drivers/media/video/s2255drv.c
index 30f4698be90..6be845ccc7d 100644
--- a/drivers/media/video/s2255drv.c
+++ b/drivers/media/video/s2255drv.c
@@ -77,6 +77,8 @@
#define MAX_CHANNELS 4
#define S2255_MARKER_FRAME 0x2255DA4AL
#define S2255_MARKER_RESPONSE 0x2255ACACL
+#define S2255_RESPONSE_SETMODE 0x01
+#define S2255_RESPONSE_FW 0x10
#define S2255_USB_XFER_SIZE (16 * 1024)
#define MAX_CHANNELS 4
#define MAX_PIPE_BUFFERS 1
@@ -107,6 +109,8 @@
#define SCALE_4CIFS 1 /* 640x480(NTSC) or 704x576(PAL) */
#define SCALE_2CIFS 2 /* 640x240(NTSC) or 704x288(PAL) */
#define SCALE_1CIFS 3 /* 320x240(NTSC) or 352x288(PAL) */
+/* SCALE_4CIFSI is the 2 fields interpolated into one */
+#define SCALE_4CIFSI 4 /* 640x480(NTSC) or 704x576(PAL) high quality */
#define COLOR_YUVPL 1 /* YUV planar */
#define COLOR_YUVPK 2 /* YUV packed */
@@ -178,9 +182,6 @@ struct s2255_bufferi {
struct s2255_dmaqueue {
struct list_head active;
- /* thread for acquisition */
- struct task_struct *kthread;
- int frame;
struct s2255_dev *dev;
int channel;
};
@@ -210,16 +211,11 @@ struct s2255_pipeinfo {
u32 max_transfer_size;
u32 cur_transfer_size;
u8 *transfer_buffer;
- u32 transfer_flags;;
u32 state;
- u32 prev_state;
- u32 urb_size;
void *stream_urb;
void *dev; /* back pointer to s2255_dev struct*/
u32 err_count;
- u32 buf_index;
u32 idx;
- u32 priority_set;
};
struct s2255_fmt; /*forward declaration */
@@ -239,13 +235,13 @@ struct s2255_dev {
struct list_head s2255_devlist;
struct timer_list timer;
struct s2255_fw *fw_data;
- int board_num;
- int is_open;
struct s2255_pipeinfo pipes[MAX_PIPE_BUFFERS];
struct s2255_bufferi buffer[MAX_CHANNELS];
struct s2255_mode mode[MAX_CHANNELS];
/* jpeg compression */
struct v4l2_jpegcompression jc[MAX_CHANNELS];
+ /* capture parameters (for high quality mode full size) */
+ struct v4l2_captureparm cap_parm[MAX_CHANNELS];
const struct s2255_fmt *cur_fmt[MAX_CHANNELS];
int cur_frame[MAX_CHANNELS];
int last_frame[MAX_CHANNELS];
@@ -297,9 +293,10 @@ struct s2255_fh {
int resources[MAX_CHANNELS];
};
-#define CUR_USB_FWVER 774 /* current cypress EEPROM firmware version */
+/* current cypress EEPROM firmware version */
+#define S2255_CUR_USB_FWVER ((3 << 8) | 6)
#define S2255_MAJOR_VERSION 1
-#define S2255_MINOR_VERSION 13
+#define S2255_MINOR_VERSION 14
#define S2255_RELEASE 0
#define S2255_VERSION KERNEL_VERSION(S2255_MAJOR_VERSION, \
S2255_MINOR_VERSION, \
@@ -1027,9 +1024,16 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
fh->type = f->type;
norm = norm_minw(fh->dev->vdev[fh->channel]);
if (fh->width > norm_minw(fh->dev->vdev[fh->channel])) {
- if (fh->height > norm_minh(fh->dev->vdev[fh->channel]))
- fh->mode.scale = SCALE_4CIFS;
- else
+ if (fh->height > norm_minh(fh->dev->vdev[fh->channel])) {
+ if (fh->dev->cap_parm[fh->channel].capturemode &
+ V4L2_MODE_HIGHQUALITY) {
+ fh->mode.scale = SCALE_4CIFSI;
+ dprintk(2, "scale 4CIFSI\n");
+ } else {
+ fh->mode.scale = SCALE_4CIFS;
+ dprintk(2, "scale 4CIFS\n");
+ }
+ } else
fh->mode.scale = SCALE_2CIFS;
} else {
@@ -1130,6 +1134,7 @@ static u32 get_transfer_size(struct s2255_mode *mode)
if (mode->format == FORMAT_NTSC) {
switch (mode->scale) {
case SCALE_4CIFS:
+ case SCALE_4CIFSI:
linesPerFrame = NUM_LINES_4CIFS_NTSC * 2;
pixelsPerLine = LINE_SZ_4CIFS_NTSC;
break;
@@ -1147,6 +1152,7 @@ static u32 get_transfer_size(struct s2255_mode *mode)
} else if (mode->format == FORMAT_PAL) {
switch (mode->scale) {
case SCALE_4CIFS:
+ case SCALE_4CIFSI:
linesPerFrame = NUM_LINES_4CIFS_PAL * 2;
pixelsPerLine = LINE_SZ_4CIFS_PAL;
break;
@@ -1502,6 +1508,33 @@ static int vidioc_s_jpegcomp(struct file *file, void *priv,
dprintk(2, "setting jpeg quality %d\n", jc->quality);
return 0;
}
+
+static int vidioc_g_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *sp)
+{
+ struct s2255_fh *fh = priv;
+ struct s2255_dev *dev = fh->dev;
+ if (sp->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ sp->parm.capture.capturemode = dev->cap_parm[fh->channel].capturemode;
+ dprintk(2, "getting parm %d\n", sp->parm.capture.capturemode);
+ return 0;
+}
+
+static int vidioc_s_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *sp)
+{
+ struct s2255_fh *fh = priv;
+ struct s2255_dev *dev = fh->dev;
+
+ if (sp->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ dev->cap_parm[fh->channel].capturemode = sp->parm.capture.capturemode;
+ dprintk(2, "setting param capture mode %d\n",
+ sp->parm.capture.capturemode);
+ return 0;
+}
static int s2255_open(struct file *file)
{
int minor = video_devdata(file)->minor;
@@ -1793,6 +1826,8 @@ static const struct v4l2_ioctl_ops s2255_ioctl_ops = {
#endif
.vidioc_s_jpegcomp = vidioc_s_jpegcomp,
.vidioc_g_jpegcomp = vidioc_g_jpegcomp,
+ .vidioc_s_parm = vidioc_s_parm,
+ .vidioc_g_parm = vidioc_g_parm,
};
static struct video_device template = {
@@ -1818,7 +1853,6 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
INIT_LIST_HEAD(&dev->vidq[i].active);
dev->vidq[i].dev = dev;
dev->vidq[i].channel = i;
- dev->vidq[i].kthread = NULL;
/* register 4 video devices */
dev->vdev[i] = video_device_alloc();
memcpy(dev->vdev[i], &template, sizeof(struct video_device));
@@ -1839,7 +1873,9 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
return ret;
}
}
- printk(KERN_INFO "Sensoray 2255 V4L driver\n");
+ printk(KERN_INFO "Sensoray 2255 V4L driver Revision: %d.%d\n",
+ S2255_MAJOR_VERSION,
+ S2255_MINOR_VERSION);
return ret;
}
@@ -1929,14 +1965,14 @@ static int save_frame(struct s2255_dev *dev, struct s2255_pipeinfo *pipe_info)
if (!(cc >= 0 && cc < MAX_CHANNELS))
break;
switch (pdword[2]) {
- case 0x01:
+ case S2255_RESPONSE_SETMODE:
/* check if channel valid */
/* set mode ready */
dev->setmode_ready[cc] = 1;
wake_up(&dev->wait_setmode[cc]);
dprintk(5, "setmode ready %d\n", cc);
break;
- case 0x10:
+ case S2255_RESPONSE_FW:
dev->chn_ready |= (1 << cc);
if ((dev->chn_ready & 0x0f) != 0x0f)
@@ -2172,10 +2208,15 @@ static int s2255_board_init(struct s2255_dev *dev)
/* query the firmware */
fw_ver = s2255_get_fx2fw(dev);
- printk(KERN_INFO "2255 usb firmware version %d \n", fw_ver);
- if (fw_ver < CUR_USB_FWVER)
+ printk(KERN_INFO "2255 usb firmware version %d.%d\n",
+ (fw_ver >> 8) & 0xff,
+ fw_ver & 0xff);
+
+ if (fw_ver < S2255_CUR_USB_FWVER)
dev_err(&dev->udev->dev,
- "usb firmware not up to date %d\n", fw_ver);
+ "usb firmware not up to date %d.%d\n",
+ (fw_ver >> 8) & 0xff,
+ fw_ver & 0xff);
for (j = 0; j < MAX_CHANNELS; j++) {
dev->b_acquire[j] = 0;
@@ -2240,8 +2281,10 @@ static void read_pipe_completion(struct urb *purb)
return;
}
status = purb->status;
- if (status != 0) {
- dprintk(2, "read_pipe_completion: err\n");
+ /* if shutting down, do not resubmit, exit immediately */
+ if (status == -ESHUTDOWN) {
+ dprintk(2, "read_pipe_completion: err shutdown\n");
+ pipe_info->err_count++;
return;
}
@@ -2250,9 +2293,13 @@ static void read_pipe_completion(struct urb *purb)
return;
}
- s2255_read_video_callback(dev, pipe_info);
+ if (status == 0)
+ s2255_read_video_callback(dev, pipe_info);
+ else {
+ pipe_info->err_count++;
+ dprintk(1, "s2255drv: failed URB %d\n", status);
+ }
- pipe_info->err_count = 0;
pipe = usb_rcvbulkpipe(dev->udev, dev->read_endpoint);
/* reuse urb */
usb_fill_bulk_urb(pipe_info->stream_urb, dev->udev,
@@ -2264,7 +2311,6 @@ static void read_pipe_completion(struct urb *purb)
if (pipe_info->state != 0) {
if (usb_submit_urb(pipe_info->stream_urb, GFP_KERNEL)) {
dev_err(&dev->udev->dev, "error submitting urb\n");
- usb_free_urb(pipe_info->stream_urb);
}
} else {
dprintk(2, "read pipe complete state 0\n");
@@ -2283,8 +2329,7 @@ static int s2255_start_readpipe(struct s2255_dev *dev)
for (i = 0; i < MAX_PIPE_BUFFERS; i++) {
pipe_info->state = 1;
- pipe_info->buf_index = (u32) i;
- pipe_info->priority_set = 0;
+ pipe_info->err_count = 0;
pipe_info->stream_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!pipe_info->stream_urb) {
dev_err(&dev->udev->dev,
@@ -2298,7 +2343,6 @@ static int s2255_start_readpipe(struct s2255_dev *dev)
pipe_info->cur_transfer_size,
read_pipe_completion, pipe_info);
- pipe_info->urb_size = sizeof(pipe_info->stream_urb);
dprintk(4, "submitting URB %p\n", pipe_info->stream_urb);
retval = usb_submit_urb(pipe_info->stream_urb, GFP_KERNEL);
if (retval) {
@@ -2403,8 +2447,6 @@ static void s2255_stop_readpipe(struct s2255_dev *dev)
if (pipe_info->state == 0)
continue;
pipe_info->state = 0;
- pipe_info->prev_state = 1;
-
}
}
@@ -2542,7 +2584,9 @@ static int s2255_probe(struct usb_interface *interface,
s2255_probe_v4l(dev);
usb_reset_device(dev->udev);
/* load 2255 board specific */
- s2255_board_init(dev);
+ retval = s2255_board_init(dev);
+ if (retval)
+ goto error;
dprintk(4, "before probe done %p\n", dev);
spin_lock_init(&dev->slock);
diff --git a/drivers/media/video/saa7134/Kconfig b/drivers/media/video/saa7134/Kconfig
index 0ba68987bfc..5bcce092e80 100644
--- a/drivers/media/video/saa7134/Kconfig
+++ b/drivers/media/video/saa7134/Kconfig
@@ -44,6 +44,7 @@ config VIDEO_SAA7134_DVB
select DVB_LNBP21 if !DVB_FE_CUSTOMISE
select DVB_ZL10353 if !DVB_FE_CUSTOMISE
select DVB_LGDT3305 if !DVB_FE_CUSTOMISE
+ select DVB_TDA10048 if !DVB_FE_CUSTOMISE
select MEDIA_TUNER_TDA18271 if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_TDA8290 if !MEDIA_TUNER_CUSTOMISE
---help---
diff --git a/drivers/media/video/saa7134/Makefile b/drivers/media/video/saa7134/Makefile
index 3dbaa19a6d0..604158a8c23 100644
--- a/drivers/media/video/saa7134/Makefile
+++ b/drivers/media/video/saa7134/Makefile
@@ -3,8 +3,7 @@ saa7134-objs := saa7134-cards.o saa7134-core.o saa7134-i2c.o \
saa7134-ts.o saa7134-tvaudio.o saa7134-vbi.o \
saa7134-video.o saa7134-input.o
-obj-$(CONFIG_VIDEO_SAA7134) += saa7134.o saa7134-empress.o \
- saa6752hs.o
+obj-$(CONFIG_VIDEO_SAA7134) += saa6752hs.o saa7134.o saa7134-empress.o
obj-$(CONFIG_VIDEO_SAA7134_ALSA) += saa7134-alsa.o
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index fdb19449d26..06861b782b9 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -1669,6 +1669,39 @@ struct saa7134_board saa7134_boards[] = {
.amux = LINE1,
},
},
+ [SAA7134_BOARD_AVERMEDIA_CARDBUS_501] = {
+ /* Oldrich Jedlicka <oldium.pro@seznam.cz> */
+ .name = "AVerMedia Cardbus TV/Radio (E501R)",
+ .audio_clock = 0x187de7,
+ .tuner_type = TUNER_ALPS_TSBE5_PAL,
+ .radio_type = TUNER_TEA5767,
+ .tuner_addr = 0x61,
+ .radio_addr = 0x60,
+ .tda9887_conf = TDA9887_PRESENT,
+ .gpiomask = 0x08000000,
+ .inputs = { {
+ .name = name_tv,
+ .vmux = 1,
+ .amux = TV,
+ .tv = 1,
+ .gpio = 0x08000000,
+ }, {
+ .name = name_comp1,
+ .vmux = 3,
+ .amux = LINE1,
+ .gpio = 0x08000000,
+ }, {
+ .name = name_svideo,
+ .vmux = 8,
+ .amux = LINE1,
+ .gpio = 0x08000000,
+ } },
+ .radio = {
+ .name = name_radio,
+ .amux = LINE2,
+ .gpio = 0x00000000,
+ },
+ },
[SAA7134_BOARD_CINERGY400_CARDBUS] = {
.name = "Terratec Cinergy 400 mobile",
.audio_clock = 0x187de7,
@@ -3331,13 +3364,15 @@ struct saa7134_board saa7134_boards[] = {
},
},
[SAA7134_BOARD_HAUPPAUGE_HVR1110R3] = {
- .name = "Hauppauge WinTV-HVR1110r3",
+ .name = "Hauppauge WinTV-HVR1110r3 DVB-T/Hybrid",
.audio_clock = 0x00187de7,
.tuner_type = TUNER_PHILIPS_TDA8290,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tuner_config = 3,
+ .mpeg = SAA7134_MPEG_DVB,
+ .ts_type = SAA7134_MPEG_TS_SERIAL,
.gpiomask = 0x0800100, /* GPIO 21 is an INPUT */
.inputs = {{
.name = name_tv,
@@ -4006,7 +4041,7 @@ struct saa7134_board saa7134_boards[] = {
[SAA7134_BOARD_BEHOLD_505FM] = {
/* Beholder Intl. Ltd. 2008 */
/*Dmitry Belimov <d.belimov@gmail.com> */
- .name = "Beholder BeholdTV 505 FM/RDS",
+ .name = "Beholder BeholdTV 505 FM",
.audio_clock = 0x00200000,
.tuner_type = TUNER_PHILIPS_FM1216ME_MK3,
.radio_type = UNSET,
@@ -4019,6 +4054,40 @@ struct saa7134_board saa7134_boards[] = {
.vmux = 3,
.amux = LINE2,
.tv = 1,
+ }, {
+ .name = name_comp1,
+ .vmux = 1,
+ .amux = LINE1,
+ }, {
+ .name = name_svideo,
+ .vmux = 8,
+ .amux = LINE1,
+ } },
+ .mute = {
+ .name = name_mute,
+ .amux = LINE1,
+ },
+ .radio = {
+ .name = name_radio,
+ .amux = LINE2,
+ },
+ },
+ [SAA7134_BOARD_BEHOLD_505RDS] = {
+ /* Beholder Intl. Ltd. 2008 */
+ /*Dmitry Belimov <d.belimov@gmail.com> */
+ .name = "Beholder BeholdTV 505 RDS",
+ .audio_clock = 0x00200000,
+ .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, /* FIXME to MK5 */
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .tda9887_conf = TDA9887_PRESENT,
+ .gpiomask = 0x00008000,
+ .inputs = {{
+ .name = name_tv,
+ .vmux = 3,
+ .amux = LINE2,
+ .tv = 1,
},{
.name = name_comp1,
.vmux = 1,
@@ -4040,7 +4109,7 @@ struct saa7134_board saa7134_boards[] = {
[SAA7134_BOARD_BEHOLD_507_9FM] = {
/* Beholder Intl. Ltd. 2008 */
/*Dmitry Belimov <d.belimov@gmail.com> */
- .name = "Beholder BeholdTV 507 FM/RDS / BeholdTV 509 FM",
+ .name = "Beholder BeholdTV 507 FM / BeholdTV 509 FM",
.audio_clock = 0x00187de7,
.tuner_type = TUNER_PHILIPS_FM1216ME_MK3,
.radio_type = UNSET,
@@ -4067,6 +4136,66 @@ struct saa7134_board saa7134_boards[] = {
.amux = LINE2,
},
},
+ [SAA7134_BOARD_BEHOLD_507RDS_MK5] = {
+ /* Beholder Intl. Ltd. 2008 */
+ /*Dmitry Belimov <d.belimov@gmail.com> */
+ .name = "Beholder BeholdTV 507 RDS",
+ .audio_clock = 0x00187de7,
+ .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, /* FIXME to MK5 */
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .tda9887_conf = TDA9887_PRESENT,
+ .gpiomask = 0x00008000,
+ .inputs = {{
+ .name = name_tv,
+ .vmux = 3,
+ .amux = TV,
+ .tv = 1,
+ }, {
+ .name = name_comp1,
+ .vmux = 1,
+ .amux = LINE1,
+ }, {
+ .name = name_svideo,
+ .vmux = 8,
+ .amux = LINE1,
+ } },
+ .radio = {
+ .name = name_radio,
+ .amux = LINE2,
+ },
+ },
+ [SAA7134_BOARD_BEHOLD_507RDS_MK3] = {
+ /* Beholder Intl. Ltd. 2008 */
+ /*Dmitry Belimov <d.belimov@gmail.com> */
+ .name = "Beholder BeholdTV 507 RDS",
+ .audio_clock = 0x00187de7,
+ .tuner_type = TUNER_PHILIPS_FM1216ME_MK3,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .tda9887_conf = TDA9887_PRESENT,
+ .gpiomask = 0x00008000,
+ .inputs = {{
+ .name = name_tv,
+ .vmux = 3,
+ .amux = TV,
+ .tv = 1,
+ }, {
+ .name = name_comp1,
+ .vmux = 1,
+ .amux = LINE1,
+ }, {
+ .name = name_svideo,
+ .vmux = 8,
+ .amux = LINE1,
+ } },
+ .radio = {
+ .name = name_radio,
+ .amux = LINE2,
+ },
+ },
[SAA7134_BOARD_BEHOLD_COLUMBUS_TVFM] = {
/* Beholder Intl. Ltd. 2008 */
/*Dmitry Belimov <d.belimov@gmail.com> */
@@ -4101,9 +4230,121 @@ struct saa7134_board saa7134_boards[] = {
.gpio = 0x000A8000,
},
},
- [SAA7134_BOARD_BEHOLD_607_9FM] = {
+ [SAA7134_BOARD_BEHOLD_607FM_MK3] = {
+ /* Andrey Melnikoff <temnota@kmv.ru> */
+ .name = "Beholder BeholdTV 607 FM",
+ .audio_clock = 0x00187de7,
+ .tuner_type = TUNER_PHILIPS_FM1216ME_MK3,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .tda9887_conf = TDA9887_PRESENT,
+ .inputs = {{
+ .name = name_tv,
+ .vmux = 3,
+ .amux = TV,
+ .tv = 1,
+ }, {
+ .name = name_comp1,
+ .vmux = 1,
+ .amux = LINE1,
+ }, {
+ .name = name_svideo,
+ .vmux = 8,
+ .amux = LINE1,
+ } },
+ .radio = {
+ .name = name_radio,
+ .amux = LINE2,
+ },
+ },
+ [SAA7134_BOARD_BEHOLD_609FM_MK3] = {
+ /* Andrey Melnikoff <temnota@kmv.ru> */
+ .name = "Beholder BeholdTV 609 FM",
+ .audio_clock = 0x00187de7,
+ .tuner_type = TUNER_PHILIPS_FM1216ME_MK3,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .tda9887_conf = TDA9887_PRESENT,
+ .inputs = {{
+ .name = name_tv,
+ .vmux = 3,
+ .amux = TV,
+ .tv = 1,
+ }, {
+ .name = name_comp1,
+ .vmux = 1,
+ .amux = LINE1,
+ }, {
+ .name = name_svideo,
+ .vmux = 8,
+ .amux = LINE1,
+ } },
+ .radio = {
+ .name = name_radio,
+ .amux = LINE2,
+ },
+ },
+ [SAA7134_BOARD_BEHOLD_607FM_MK5] = {
+ /* Andrey Melnikoff <temnota@kmv.ru> */
+ .name = "Beholder BeholdTV 607 FM",
+ .audio_clock = 0x00187de7,
+ .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, /* FIXME to MK5 */
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .tda9887_conf = TDA9887_PRESENT,
+ .inputs = {{
+ .name = name_tv,
+ .vmux = 3,
+ .amux = TV,
+ .tv = 1,
+ }, {
+ .name = name_comp1,
+ .vmux = 1,
+ .amux = LINE1,
+ }, {
+ .name = name_svideo,
+ .vmux = 8,
+ .amux = LINE1,
+ } },
+ .radio = {
+ .name = name_radio,
+ .amux = LINE2,
+ },
+ },
+ [SAA7134_BOARD_BEHOLD_609FM_MK5] = {
/* Andrey Melnikoff <temnota@kmv.ru> */
- .name = "Beholder BeholdTV 607 / BeholdTV 609",
+ .name = "Beholder BeholdTV 609 FM",
+ .audio_clock = 0x00187de7,
+ .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, /* FIXME to MK5 */
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .tda9887_conf = TDA9887_PRESENT,
+ .inputs = {{
+ .name = name_tv,
+ .vmux = 3,
+ .amux = TV,
+ .tv = 1,
+ }, {
+ .name = name_comp1,
+ .vmux = 1,
+ .amux = LINE1,
+ }, {
+ .name = name_svideo,
+ .vmux = 8,
+ .amux = LINE1,
+ } },
+ .radio = {
+ .name = name_radio,
+ .amux = LINE2,
+ },
+ },
+ [SAA7134_BOARD_BEHOLD_607RDS_MK3] = {
+ /* Andrey Melnikoff <temnota@kmv.ru> */
+ .name = "Beholder BeholdTV 607 RDS",
.audio_clock = 0x00187de7,
.tuner_type = TUNER_PHILIPS_FM1216ME_MK3,
.radio_type = UNSET,
@@ -4115,6 +4356,90 @@ struct saa7134_board saa7134_boards[] = {
.vmux = 3,
.amux = TV,
.tv = 1,
+ }, {
+ .name = name_comp1,
+ .vmux = 1,
+ .amux = LINE1,
+ }, {
+ .name = name_svideo,
+ .vmux = 8,
+ .amux = LINE1,
+ } },
+ .radio = {
+ .name = name_radio,
+ .amux = LINE2,
+ },
+ },
+ [SAA7134_BOARD_BEHOLD_609RDS_MK3] = {
+ /* Andrey Melnikoff <temnota@kmv.ru> */
+ .name = "Beholder BeholdTV 609 RDS",
+ .audio_clock = 0x00187de7,
+ .tuner_type = TUNER_PHILIPS_FM1216ME_MK3,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .tda9887_conf = TDA9887_PRESENT,
+ .inputs = {{
+ .name = name_tv,
+ .vmux = 3,
+ .amux = TV,
+ .tv = 1,
+ }, {
+ .name = name_comp1,
+ .vmux = 1,
+ .amux = LINE1,
+ }, {
+ .name = name_svideo,
+ .vmux = 8,
+ .amux = LINE1,
+ } },
+ .radio = {
+ .name = name_radio,
+ .amux = LINE2,
+ },
+ },
+ [SAA7134_BOARD_BEHOLD_607RDS_MK5] = {
+ /* Andrey Melnikoff <temnota@kmv.ru> */
+ .name = "Beholder BeholdTV 607 RDS",
+ .audio_clock = 0x00187de7,
+ .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, /* FIXME to MK5 */
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .tda9887_conf = TDA9887_PRESENT,
+ .inputs = {{
+ .name = name_tv,
+ .vmux = 3,
+ .amux = TV,
+ .tv = 1,
+ }, {
+ .name = name_comp1,
+ .vmux = 1,
+ .amux = LINE1,
+ }, {
+ .name = name_svideo,
+ .vmux = 8,
+ .amux = LINE1,
+ } },
+ .radio = {
+ .name = name_radio,
+ .amux = LINE2,
+ },
+ },
+ [SAA7134_BOARD_BEHOLD_609RDS_MK5] = {
+ /* Andrey Melnikoff <temnota@kmv.ru> */
+ .name = "Beholder BeholdTV 609 RDS",
+ .audio_clock = 0x00187de7,
+ .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, /* FIXME to MK5 */
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .tda9887_conf = TDA9887_PRESENT,
+ .inputs = {{
+ .name = name_tv,
+ .vmux = 3,
+ .amux = TV,
+ .tv = 1,
},{
.name = name_comp1,
.vmux = 1,
@@ -4133,6 +4458,7 @@ struct saa7134_board saa7134_boards[] = {
/* Igor Kuznetsov <igk@igk.ru> */
/* Andrey Melnikoff <temnota@kmv.ru> */
/* Beholder Intl. Ltd. Dmitry Belimov <d.belimov@gmail.com> */
+ /* Alexey Osipov <lion-simba@pridelands.ru> */
.name = "Beholder BeholdTV M6",
.audio_clock = 0x00187de7,
.tuner_type = TUNER_PHILIPS_FM1216ME_MK3,
@@ -4207,10 +4533,10 @@ struct saa7134_board saa7134_boards[] = {
/* Igor Kuznetsov <igk@igk.ru> */
/* Andrey Melnikoff <temnota@kmv.ru> */
/* Beholder Intl. Ltd. Dmitry Belimov <d.belimov@gmail.com> */
+ /* Alexey Osipov <lion-simba@pridelands.ru> */
.name = "Beholder BeholdTV M6 Extra",
.audio_clock = 0x00187de7,
- /* FIXME: Must be PHILIPS_FM1216ME_MK5*/
- .tuner_type = TUNER_PHILIPS_FM1216ME_MK3,
+ .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, /* FIXME to MK5 */
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
@@ -4465,7 +4791,6 @@ struct saa7134_board saa7134_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .mpeg = SAA7134_MPEG_DVB,
.inputs = {{
.name = name_tv,
.vmux = 3,
@@ -4753,6 +5078,44 @@ struct saa7134_board saa7134_boards[] = {
.gpio = 0x01,
},
},
+ [SAA7134_BOARD_AVERMEDIA_STUDIO_507UA] = {
+ /* Andy Shevchenko <andy@smile.org.ua> */
+ .name = "Avermedia AVerTV Studio 507UA",
+ .audio_clock = 0x00187de7,
+ .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, /* Should be MK5 */
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .tda9887_conf = TDA9887_PRESENT,
+ .gpiomask = 0x03,
+ .inputs = { {
+ .name = name_tv,
+ .vmux = 1,
+ .amux = TV,
+ .tv = 1,
+ .gpio = 0x00,
+ }, {
+ .name = name_comp1,
+ .vmux = 3,
+ .amux = LINE1,
+ .gpio = 0x00,
+ }, {
+ .name = name_svideo,
+ .vmux = 8,
+ .amux = LINE1,
+ .gpio = 0x00,
+ } },
+ .radio = {
+ .name = name_radio,
+ .amux = LINE2,
+ .gpio = 0x01,
+ },
+ .mute = {
+ .name = name_mute,
+ .amux = LINE1,
+ .gpio = 0x00,
+ },
+ },
};
const unsigned int saa7134_bcount = ARRAY_SIZE(saa7134_boards);
@@ -5027,6 +5390,13 @@ struct pci_device_id saa7134_pci_tbl[] = {
.subdevice = 0xd6ee,
.driver_data = SAA7134_BOARD_AVERMEDIA_CARDBUS,
},{
+ /* AVerMedia CardBus */
+ .vendor = PCI_VENDOR_ID_PHILIPS,
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7134,
+ .subvendor = 0x1461, /* Avermedia Technologies Inc */
+ .subdevice = 0xb7e9,
+ .driver_data = SAA7134_BOARD_AVERMEDIA_CARDBUS_501,
+ }, {
/* TransGear 3000TV */
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7130,
@@ -5441,6 +5811,12 @@ struct pci_device_id saa7134_pci_tbl[] = {
.driver_data = SAA7134_BOARD_AVERMEDIA_STUDIO_507,
},{
.vendor = PCI_VENDOR_ID_PHILIPS,
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7134,
+ .subvendor = 0x1461, /* Avermedia Technologies Inc */
+ .subdevice = 0xa11b,
+ .driver_data = SAA7134_BOARD_AVERMEDIA_STUDIO_507UA,
+ }, {
+ .vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7133,
.subvendor = 0x1043,
.subdevice = 0x4876,
@@ -5647,14 +6023,8 @@ struct pci_device_id saa7134_pci_tbl[] = {
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7130,
.subvendor = 0x0000,
- .subdevice = 0x5051,
- .driver_data = SAA7134_BOARD_BEHOLD_505FM,
- },{
- .vendor = PCI_VENDOR_ID_PHILIPS,
- .device = PCI_DEVICE_ID_PHILIPS_SAA7130,
- .subvendor = 0x0000,
.subdevice = 0x505B,
- .driver_data = SAA7134_BOARD_BEHOLD_505FM,
+ .driver_data = SAA7134_BOARD_BEHOLD_505RDS,
},{
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7130,
@@ -5666,13 +6036,13 @@ struct pci_device_id saa7134_pci_tbl[] = {
.device = PCI_DEVICE_ID_PHILIPS_SAA7133,
.subvendor = 0x0000,
.subdevice = 0x5071,
- .driver_data = SAA7134_BOARD_BEHOLD_507_9FM,
+ .driver_data = SAA7134_BOARD_BEHOLD_507RDS_MK3,
},{
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7133,
.subvendor = 0x0000,
.subdevice = 0x507B,
- .driver_data = SAA7134_BOARD_BEHOLD_507_9FM,
+ .driver_data = SAA7134_BOARD_BEHOLD_507RDS_MK5,
},{
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7134,
@@ -5696,49 +6066,49 @@ struct pci_device_id saa7134_pci_tbl[] = {
.device = PCI_DEVICE_ID_PHILIPS_SAA7134,
.subvendor = 0x5ace,
.subdevice = 0x6070,
- .driver_data = SAA7134_BOARD_BEHOLD_607_9FM,
+ .driver_data = SAA7134_BOARD_BEHOLD_607FM_MK3,
},{
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7134,
.subvendor = 0x5ace,
.subdevice = 0x6071,
- .driver_data = SAA7134_BOARD_BEHOLD_607_9FM,
+ .driver_data = SAA7134_BOARD_BEHOLD_607FM_MK5,
},{
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7134,
.subvendor = 0x5ace,
.subdevice = 0x6072,
- .driver_data = SAA7134_BOARD_BEHOLD_607_9FM,
+ .driver_data = SAA7134_BOARD_BEHOLD_607RDS_MK3,
},{
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7134,
.subvendor = 0x5ace,
.subdevice = 0x6073,
- .driver_data = SAA7134_BOARD_BEHOLD_607_9FM,
+ .driver_data = SAA7134_BOARD_BEHOLD_607RDS_MK5,
},{
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7133,
.subvendor = 0x5ace,
.subdevice = 0x6090,
- .driver_data = SAA7134_BOARD_BEHOLD_607_9FM,
+ .driver_data = SAA7134_BOARD_BEHOLD_609FM_MK3,
},{
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7133,
.subvendor = 0x5ace,
.subdevice = 0x6091,
- .driver_data = SAA7134_BOARD_BEHOLD_607_9FM,
+ .driver_data = SAA7134_BOARD_BEHOLD_609FM_MK5,
},{
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7133,
.subvendor = 0x5ace,
.subdevice = 0x6092,
- .driver_data = SAA7134_BOARD_BEHOLD_607_9FM,
+ .driver_data = SAA7134_BOARD_BEHOLD_609RDS_MK3,
},{
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7133,
.subvendor = 0x5ace,
.subdevice = 0x6093,
- .driver_data = SAA7134_BOARD_BEHOLD_607_9FM,
+ .driver_data = SAA7134_BOARD_BEHOLD_609RDS_MK5,
},{
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7133,
@@ -5832,6 +6202,12 @@ struct pci_device_id saa7134_pci_tbl[] = {
}, {
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7133,
+ .subvendor = 0x1461, /* Avermedia Technologies Inc */
+ .subdevice = 0xf736,
+ .driver_data = SAA7134_BOARD_AVERMEDIA_M103,
+ }, {
+ .vendor = PCI_VENDOR_ID_PHILIPS,
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
.subvendor = 0x1043,
.subdevice = 0x4878, /* REV:1.02G */
.driver_data = SAA7134_BOARD_ASUSTeK_TIGER_3IN1,
@@ -6114,7 +6490,6 @@ int saa7134_board_init1(struct saa7134_dev *dev)
case SAA7134_BOARD_VIDEOMATE_DVBT_300:
case SAA7134_BOARD_VIDEOMATE_DVBT_200:
case SAA7134_BOARD_VIDEOMATE_DVBT_200A:
- case SAA7134_BOARD_VIDEOMATE_T750:
case SAA7134_BOARD_MANLI_MTV001:
case SAA7134_BOARD_MANLI_MTV002:
case SAA7134_BOARD_BEHOLD_409FM:
@@ -6142,7 +6517,10 @@ int saa7134_board_init1(struct saa7134_dev *dev)
case SAA7134_BOARD_BEHOLD_407FM:
case SAA7134_BOARD_BEHOLD_409:
case SAA7134_BOARD_BEHOLD_505FM:
+ case SAA7134_BOARD_BEHOLD_505RDS:
case SAA7134_BOARD_BEHOLD_507_9FM:
+ case SAA7134_BOARD_BEHOLD_507RDS_MK3:
+ case SAA7134_BOARD_BEHOLD_507RDS_MK5:
case SAA7134_BOARD_GENIUS_TVGO_A11MCE:
case SAA7134_BOARD_REAL_ANGEL_220:
case SAA7134_BOARD_KWORLD_PLUS_TV_ANALOG:
@@ -6196,6 +6574,16 @@ int saa7134_board_init1(struct saa7134_dev *dev)
saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0xffffffff, 0xffffffff);
msleep(10);
break;
+ case SAA7134_BOARD_AVERMEDIA_CARDBUS_501:
+ /* power-down tuner chip */
+ saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0x08400000, 0x08400000);
+ saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0x08400000, 0);
+ msleep(10);
+ saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0x08400000, 0x08400000);
+ saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0x08400000, 0x08400000);
+ msleep(10);
+ dev->has_remote = SAA7134_REMOTE_I2C;
+ break;
case SAA7134_BOARD_AVERMEDIA_CARDBUS_506:
saa7134_set_gpio(dev, 23, 0);
msleep(10);
@@ -6253,7 +6641,14 @@ int saa7134_board_init1(struct saa7134_dev *dev)
case SAA7134_BOARD_UPMOST_PURPLE_TV:
case SAA7134_BOARD_MSI_TVATANYWHERE_PLUS:
case SAA7134_BOARD_HAUPPAUGE_HVR1110:
- case SAA7134_BOARD_BEHOLD_607_9FM:
+ case SAA7134_BOARD_BEHOLD_607FM_MK3:
+ case SAA7134_BOARD_BEHOLD_607FM_MK5:
+ case SAA7134_BOARD_BEHOLD_609FM_MK3:
+ case SAA7134_BOARD_BEHOLD_609FM_MK5:
+ case SAA7134_BOARD_BEHOLD_607RDS_MK3:
+ case SAA7134_BOARD_BEHOLD_607RDS_MK5:
+ case SAA7134_BOARD_BEHOLD_609RDS_MK3:
+ case SAA7134_BOARD_BEHOLD_609RDS_MK5:
case SAA7134_BOARD_BEHOLD_M6:
case SAA7134_BOARD_BEHOLD_M63:
case SAA7134_BOARD_BEHOLD_M6_EXTRA:
@@ -6635,6 +7030,7 @@ int saa7134_board_init2(struct saa7134_dev *dev)
switch (dev->board) {
case SAA7134_BOARD_BEHOLD_COLUMBUS_TVFM:
+ case SAA7134_BOARD_AVERMEDIA_CARDBUS_501:
{
struct v4l2_priv_tun_config tea5767_cfg;
struct tea5767_ctrl ctl;
diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c
index 2def6fec814..94a023a14bb 100644
--- a/drivers/media/video/saa7134/saa7134-core.c
+++ b/drivers/media/video/saa7134/saa7134-core.c
@@ -331,6 +331,10 @@ void saa7134_buffer_next(struct saa7134_dev *dev,
dprintk("buffer_next %p\n",NULL);
saa7134_set_dmabits(dev);
del_timer(&q->timeout);
+
+ if (card_has_mpeg(dev))
+ if (dev->ts_started)
+ saa7134_ts_stop(dev);
}
}
@@ -416,6 +420,19 @@ int saa7134_set_dmabits(struct saa7134_dev *dev)
ctrl |= SAA7134_MAIN_CTRL_TE5;
irq |= SAA7134_IRQ1_INTE_RA2_1 |
SAA7134_IRQ1_INTE_RA2_0;
+
+ /* dma: setup channel 5 (= TS) */
+
+ saa_writeb(SAA7134_TS_DMA0, (dev->ts.nr_packets - 1) & 0xff);
+ saa_writeb(SAA7134_TS_DMA1,
+ ((dev->ts.nr_packets - 1) >> 8) & 0xff);
+ /* TSNOPIT=0, TSCOLAP=0 */
+ saa_writeb(SAA7134_TS_DMA2,
+ (((dev->ts.nr_packets - 1) >> 16) & 0x3f) | 0x00);
+ saa_writel(SAA7134_RS_PITCH(5), TS_PACKET_SIZE);
+ saa_writel(SAA7134_RS_CONTROL(5), SAA7134_RS_CONTROL_BURST_16 |
+ SAA7134_RS_CONTROL_ME |
+ (dev->ts.pt_ts.dma >> 12));
}
/* set task conditions + field handling */
@@ -775,7 +792,6 @@ static struct video_device *vdev_init(struct saa7134_dev *dev,
if (NULL == vfd)
return NULL;
*vfd = *template;
- vfd->minor = -1;
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->release = video_device_release;
vfd->debug = video_debug;
diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
index 4eff1ca8593..31930f26ffc 100644
--- a/drivers/media/video/saa7134/saa7134-dvb.c
+++ b/drivers/media/video/saa7134/saa7134-dvb.c
@@ -48,6 +48,7 @@
#include "isl6405.h"
#include "lnbp21.h"
#include "tuner-simple.h"
+#include "tda10048.h"
#include "tda18271.h"
#include "lgdt3305.h"
#include "tda8290.h"
@@ -978,6 +979,18 @@ static struct lgdt3305_config hcw_lgdt3305_config = {
.vsb_if_khz = 3250,
};
+static struct tda10048_config hcw_tda10048_config = {
+ .demod_address = 0x10 >> 1,
+ .output_mode = TDA10048_SERIAL_OUTPUT,
+ .fwbulkwritelen = TDA10048_BULKWRITE_200,
+ .inversion = TDA10048_INVERSION_ON,
+ .dtv6_if_freq_khz = TDA10048_IF_3300,
+ .dtv7_if_freq_khz = TDA10048_IF_3500,
+ .dtv8_if_freq_khz = TDA10048_IF_4000,
+ .clk_freq_khz = TDA10048_CLK_16000,
+ .disable_gate_access = 1,
+};
+
static struct tda18271_std_map hauppauge_tda18271_std_map = {
.atsc_6 = { .if_freq = 3250, .agc_mode = 3, .std = 4,
.if_lvl = 1, .rfagc_top = 0x58, },
@@ -1106,6 +1119,19 @@ static int dvb_init(struct saa7134_dev *dev)
&tda827x_cfg_2) < 0)
goto dettach_frontend;
break;
+ case SAA7134_BOARD_HAUPPAUGE_HVR1110R3:
+ fe0->dvb.frontend = dvb_attach(tda10048_attach,
+ &hcw_tda10048_config,
+ &dev->i2c_adap);
+ if (fe0->dvb.frontend != NULL) {
+ dvb_attach(tda829x_attach, fe0->dvb.frontend,
+ &dev->i2c_adap, 0x4b,
+ &tda829x_no_probe);
+ dvb_attach(tda18271_attach, fe0->dvb.frontend,
+ 0x60, &dev->i2c_adap,
+ &hcw_tda18271_config);
+ }
+ break;
case SAA7134_BOARD_PHILIPS_TIGER:
if (configure_tda827x_fe(dev, &philips_tiger_config,
&tda827x_cfg_0) < 0)
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c
index 9db3472667e..add1757f893 100644
--- a/drivers/media/video/saa7134/saa7134-empress.c
+++ b/drivers/media/video/saa7134/saa7134-empress.c
@@ -255,6 +255,16 @@ static int empress_s_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
+static int empress_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct saa7134_dev *dev = file->private_data;
+
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
+ f->fmt.pix.sizeimage = TS_PACKET_SIZE * dev->ts.nr_packets;
+
+ return 0;
+}
static int empress_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *p)
@@ -450,6 +460,7 @@ static const struct v4l2_file_operations ts_fops =
static const struct v4l2_ioctl_ops ts_ioctl_ops = {
.vidioc_querycap = empress_querycap,
.vidioc_enum_fmt_vid_cap = empress_enum_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = empress_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = empress_s_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = empress_g_fmt_vid_cap,
.vidioc_reqbufs = empress_reqbufs,
@@ -491,11 +502,8 @@ static void empress_signal_update(struct work_struct *work)
if (dev->nosignal) {
dprintk("no video signal\n");
- ts_reset_encoder(dev);
} else {
dprintk("video signal acquired\n");
- if (atomic_read(&dev->empress_users))
- ts_init_encoder(dev);
}
}
diff --git a/drivers/media/video/saa7134/saa7134-i2c.c b/drivers/media/video/saa7134/saa7134-i2c.c
index f3e285aa2fb..8096dace5f6 100644
--- a/drivers/media/video/saa7134/saa7134-i2c.c
+++ b/drivers/media/video/saa7134/saa7134-i2c.c
@@ -259,7 +259,7 @@ static int saa7134_i2c_xfer(struct i2c_adapter *i2c_adap,
/* workaround for a saa7134 i2c bug
* needed to talk to the mt352 demux
* thanks to pinnacle for the hint */
- int quirk = 0xfd;
+ int quirk = 0xfe;
d1printk(" [%02x quirk]",quirk);
i2c_send_byte(dev,START,quirk);
i2c_recv_byte(dev);
@@ -321,33 +321,6 @@ static u32 functionality(struct i2c_adapter *adap)
return I2C_FUNC_SMBUS_EMUL;
}
-static int attach_inform(struct i2c_client *client)
-{
- struct saa7134_dev *dev = client->adapter->algo_data;
-
- d1printk( "%s i2c attach [addr=0x%x,client=%s]\n",
- client->driver->driver.name, client->addr, client->name);
-
- /* Am I an i2c remote control? */
-
- switch (client->addr) {
- case 0x7a:
- case 0x47:
- case 0x71:
- case 0x2d:
- case 0x30:
- {
- struct IR_i2c *ir = i2c_get_clientdata(client);
- d1printk("%s i2c IR detected (%s).\n",
- client->driver->driver.name, ir->phys);
- saa7134_set_i2c_ir(dev,ir);
- break;
- }
- }
-
- return 0;
-}
-
static struct i2c_algorithm saa7134_algo = {
.master_xfer = saa7134_i2c_xfer,
.functionality = functionality,
@@ -358,7 +331,6 @@ static struct i2c_adapter saa7134_adap_template = {
.name = "saa7134",
.id = I2C_HW_SAA7134,
.algo = &saa7134_algo,
- .client_register = attach_inform,
};
static struct i2c_client saa7134_client_template = {
@@ -433,6 +405,9 @@ int saa7134_i2c_register(struct saa7134_dev *dev)
saa7134_i2c_eeprom(dev,dev->eedata,sizeof(dev->eedata));
if (i2c_scan)
do_i2c_scan(dev->name,&dev->i2c_client);
+
+ /* Instantiate the IR receiver device, if present */
+ saa7134_probe_i2c_ir(dev);
return 0;
}
diff --git a/drivers/media/video/saa7134/saa7134-input.c b/drivers/media/video/saa7134/saa7134-input.c
index 8a106d36e72..6e219c2db84 100644
--- a/drivers/media/video/saa7134/saa7134-input.c
+++ b/drivers/media/video/saa7134/saa7134-input.c
@@ -60,7 +60,7 @@ MODULE_PARM_DESC(disable_other_ir, "disable full codes of "
#define dprintk(fmt, arg...) if (ir_debug) \
printk(KERN_DEBUG "%s/ir: " fmt, dev->name , ## arg)
#define i2cdprintk(fmt, arg...) if (ir_debug) \
- printk(KERN_DEBUG "%s/ir: " fmt, ir->c.name , ## arg)
+ printk(KERN_DEBUG "%s/ir: " fmt, ir->name , ## arg)
/* Helper functions for RC5 and NEC decoding at GPIO16 or GPIO18 */
static int saa7134_rc5_irq(struct saa7134_dev *dev);
@@ -134,10 +134,10 @@ static int get_key_msi_tvanywhere_plus(struct IR_i2c *ir, u32 *ir_key,
int gpio;
/* <dev> is needed to access GPIO. Used by the saa_readl macro. */
- struct saa7134_dev *dev = ir->c.adapter->algo_data;
+ struct saa7134_dev *dev = ir->c->adapter->algo_data;
if (dev == NULL) {
dprintk("get_key_msi_tvanywhere_plus: "
- "gir->c.adapter->algo_data is NULL!\n");
+ "gir->c->adapter->algo_data is NULL!\n");
return -EIO;
}
@@ -156,7 +156,7 @@ static int get_key_msi_tvanywhere_plus(struct IR_i2c *ir, u32 *ir_key,
/* GPIO says there is a button press. Get it. */
- if (1 != i2c_master_recv(&ir->c, &b, 1)) {
+ if (1 != i2c_master_recv(ir->c, &b, 1)) {
i2cdprintk("read error\n");
return -EIO;
}
@@ -179,7 +179,7 @@ static int get_key_purpletv(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
unsigned char b;
/* poll IR chip */
- if (1 != i2c_master_recv(&ir->c,&b,1)) {
+ if (1 != i2c_master_recv(ir->c, &b, 1)) {
i2cdprintk("read error\n");
return -EIO;
}
@@ -202,7 +202,7 @@ static int get_key_hvr1110(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
unsigned char buf[5], cod4, code3, code4;
/* poll IR chip */
- if (5 != i2c_master_recv(&ir->c,buf,5))
+ if (5 != i2c_master_recv(ir->c, buf, 5))
return -EIO;
cod4 = buf[4];
@@ -224,7 +224,7 @@ static int get_key_beholdm6xx(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
unsigned char data[12];
u32 gpio;
- struct saa7134_dev *dev = ir->c.adapter->algo_data;
+ struct saa7134_dev *dev = ir->c->adapter->algo_data;
/* rising SAA7134_GPIO_GPRESCAN reads the status */
saa_clearb(SAA7134_GPIO_GPMODE3, SAA7134_GPIO_GPRESCAN);
@@ -235,9 +235,9 @@ static int get_key_beholdm6xx(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
if (0x400000 & ~gpio)
return 0; /* No button press */
- ir->c.addr = 0x5a >> 1;
+ ir->c->addr = 0x5a >> 1;
- if (12 != i2c_master_recv(&ir->c, data, 12)) {
+ if (12 != i2c_master_recv(ir->c, data, 12)) {
i2cdprintk("read error\n");
return -EIO;
}
@@ -267,7 +267,7 @@ static int get_key_pinnacle(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw,
unsigned int start = 0,parity = 0,code = 0;
/* poll IR chip */
- if (4 != i2c_master_recv(&ir->c, b, 4)) {
+ if (4 != i2c_master_recv(ir->c, b, 4)) {
i2cdprintk("read error\n");
return -EIO;
}
@@ -447,6 +447,7 @@ int saa7134_input_init1(struct saa7134_dev *dev)
case SAA7134_BOARD_AVERMEDIA_STUDIO_305:
case SAA7134_BOARD_AVERMEDIA_STUDIO_307:
case SAA7134_BOARD_AVERMEDIA_STUDIO_507:
+ case SAA7134_BOARD_AVERMEDIA_STUDIO_507UA:
case SAA7134_BOARD_AVERMEDIA_GO_007_FM:
case SAA7134_BOARD_AVERMEDIA_M102:
case SAA7134_BOARD_AVERMEDIA_GO_007_FM_PLUS:
@@ -506,7 +507,10 @@ int saa7134_input_init1(struct saa7134_dev *dev)
case SAA7134_BOARD_BEHOLD_407FM:
case SAA7134_BOARD_BEHOLD_409:
case SAA7134_BOARD_BEHOLD_505FM:
+ case SAA7134_BOARD_BEHOLD_505RDS:
case SAA7134_BOARD_BEHOLD_507_9FM:
+ case SAA7134_BOARD_BEHOLD_507RDS_MK3:
+ case SAA7134_BOARD_BEHOLD_507RDS_MK5:
ir_codes = ir_codes_manli;
mask_keycode = 0x003f00;
mask_keyup = 0x004000;
@@ -678,55 +682,101 @@ void saa7134_input_fini(struct saa7134_dev *dev)
dev->remote = NULL;
}
-void saa7134_set_i2c_ir(struct saa7134_dev *dev, struct IR_i2c *ir)
+void saa7134_probe_i2c_ir(struct saa7134_dev *dev)
{
+ struct i2c_board_info info;
+ struct IR_i2c_init_data init_data;
+ const unsigned short addr_list[] = {
+ 0x7a, 0x47, 0x71, 0x2d,
+ I2C_CLIENT_END
+ };
+
+ struct i2c_msg msg_msi = {
+ .addr = 0x50,
+ .flags = I2C_M_RD,
+ .len = 0,
+ .buf = NULL,
+ };
+
+ int rc;
+
if (disable_ir) {
- dprintk("Found supported i2c remote, but IR has been disabled\n");
- ir->get_key=NULL;
+ dprintk("IR has been disabled, not probing for i2c remote\n");
return;
}
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ memset(&init_data, 0, sizeof(struct IR_i2c_init_data));
+ strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
+
switch (dev->board) {
case SAA7134_BOARD_PINNACLE_PCTV_110i:
case SAA7134_BOARD_PINNACLE_PCTV_310i:
- snprintf(ir->c.name, sizeof(ir->c.name), "Pinnacle PCTV");
+ init_data.name = "Pinnacle PCTV";
if (pinnacle_remote == 0) {
- ir->get_key = get_key_pinnacle_color;
- ir->ir_codes = ir_codes_pinnacle_color;
+ init_data.get_key = get_key_pinnacle_color;
+ init_data.ir_codes = ir_codes_pinnacle_color;
} else {
- ir->get_key = get_key_pinnacle_grey;
- ir->ir_codes = ir_codes_pinnacle_grey;
+ init_data.get_key = get_key_pinnacle_grey;
+ init_data.ir_codes = ir_codes_pinnacle_grey;
}
break;
case SAA7134_BOARD_UPMOST_PURPLE_TV:
- snprintf(ir->c.name, sizeof(ir->c.name), "Purple TV");
- ir->get_key = get_key_purpletv;
- ir->ir_codes = ir_codes_purpletv;
+ init_data.name = "Purple TV";
+ init_data.get_key = get_key_purpletv;
+ init_data.ir_codes = ir_codes_purpletv;
break;
case SAA7134_BOARD_MSI_TVATANYWHERE_PLUS:
- snprintf(ir->c.name, sizeof(ir->c.name), "MSI TV@nywhere Plus");
- ir->get_key = get_key_msi_tvanywhere_plus;
- ir->ir_codes = ir_codes_msi_tvanywhere_plus;
+ init_data.name = "MSI TV@nywhere Plus";
+ init_data.get_key = get_key_msi_tvanywhere_plus;
+ init_data.ir_codes = ir_codes_msi_tvanywhere_plus;
+ info.addr = 0x30;
+ /* MSI TV@nywhere Plus controller doesn't seem to
+ respond to probes unless we read something from
+ an existing device. Weird...
+ REVISIT: might no longer be needed */
+ rc = i2c_transfer(&dev->i2c_adap, &msg_msi, 1);
+ dprintk(KERN_DEBUG "probe 0x%02x @ %s: %s\n",
+ msg_msi.addr, dev->i2c_adap.name,
+ (1 == rc) ? "yes" : "no");
break;
case SAA7134_BOARD_HAUPPAUGE_HVR1110:
- snprintf(ir->c.name, sizeof(ir->c.name), "HVR 1110");
- ir->get_key = get_key_hvr1110;
- ir->ir_codes = ir_codes_hauppauge_new;
- break;
- case SAA7134_BOARD_BEHOLD_607_9FM:
+ init_data.name = "HVR 1110";
+ init_data.get_key = get_key_hvr1110;
+ init_data.ir_codes = ir_codes_hauppauge_new;
+ break;
+ case SAA7134_BOARD_BEHOLD_607FM_MK3:
+ case SAA7134_BOARD_BEHOLD_607FM_MK5:
+ case SAA7134_BOARD_BEHOLD_609FM_MK3:
+ case SAA7134_BOARD_BEHOLD_609FM_MK5:
+ case SAA7134_BOARD_BEHOLD_607RDS_MK3:
+ case SAA7134_BOARD_BEHOLD_607RDS_MK5:
+ case SAA7134_BOARD_BEHOLD_609RDS_MK3:
+ case SAA7134_BOARD_BEHOLD_609RDS_MK5:
case SAA7134_BOARD_BEHOLD_M6:
case SAA7134_BOARD_BEHOLD_M63:
case SAA7134_BOARD_BEHOLD_M6_EXTRA:
case SAA7134_BOARD_BEHOLD_H6:
- snprintf(ir->c.name, sizeof(ir->c.name), "BeholdTV");
- ir->get_key = get_key_beholdm6xx;
- ir->ir_codes = ir_codes_behold;
+ init_data.name = "BeholdTV";
+ init_data.get_key = get_key_beholdm6xx;
+ init_data.ir_codes = ir_codes_behold;
break;
- default:
- dprintk("Shouldn't get here: Unknown board %x for I2C IR?\n",dev->board);
+ case SAA7134_BOARD_AVERMEDIA_CARDBUS_501:
+ case SAA7134_BOARD_AVERMEDIA_CARDBUS_506:
+ info.addr = 0x40;
break;
}
+ if (init_data.name)
+ info.platform_data = &init_data;
+ /* No need to probe if address is known */
+ if (info.addr) {
+ i2c_new_device(&dev->i2c_adap, &info);
+ return;
+ }
+
+ /* Address not known, fallback to probing */
+ i2c_new_probed_device(&dev->i2c_adap, &info, addr_list);
}
static int saa7134_rc5_irq(struct saa7134_dev *dev)
diff --git a/drivers/media/video/saa7134/saa7134-ts.c b/drivers/media/video/saa7134/saa7134-ts.c
index cc8b923afbc..3fa652279ac 100644
--- a/drivers/media/video/saa7134/saa7134-ts.c
+++ b/drivers/media/video/saa7134/saa7134-ts.c
@@ -65,35 +65,10 @@ static int buffer_activate(struct saa7134_dev *dev,
/* start DMA */
saa7134_set_dmabits(dev);
- mod_timer(&dev->ts_q.timeout, jiffies+BUFFER_TIMEOUT);
-
- if (dev->ts_state == SAA7134_TS_BUFF_DONE) {
- /* Clear TS cache */
- dev->buff_cnt = 0;
- saa_writeb(SAA7134_TS_SERIAL1, 0x00);
- saa_writeb(SAA7134_TS_SERIAL1, 0x03);
- saa_writeb(SAA7134_TS_SERIAL1, 0x00);
- saa_writeb(SAA7134_TS_SERIAL1, 0x01);
-
- /* TS clock non-inverted */
- saa_writeb(SAA7134_TS_SERIAL1, 0x00);
-
- /* Start TS stream */
- switch (saa7134_boards[dev->board].ts_type) {
- case SAA7134_MPEG_TS_PARALLEL:
- saa_writeb(SAA7134_TS_SERIAL0, 0x40);
- saa_writeb(SAA7134_TS_PARALLEL, 0xec);
- break;
- case SAA7134_MPEG_TS_SERIAL:
- saa_writeb(SAA7134_TS_SERIAL0, 0xd8);
- saa_writeb(SAA7134_TS_PARALLEL, 0x6c);
- saa_writeb(SAA7134_TS_PARALLEL_SERIAL, 0xbc);
- saa_writeb(SAA7134_TS_SERIAL1, 0x02);
- break;
- }
+ mod_timer(&dev->ts_q.timeout, jiffies+TS_BUFFER_TIMEOUT);
- dev->ts_state = SAA7134_TS_STARTED;
- }
+ if (!dev->ts_started)
+ saa7134_ts_start(dev);
return 0;
}
@@ -104,7 +79,6 @@ static int buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
struct saa7134_dev *dev = q->priv_data;
struct saa7134_buf *buf = container_of(vb,struct saa7134_buf,vb);
unsigned int lines, llength, size;
- u32 control;
int err;
dprintk("buffer_prepare [%p,%s]\n",buf,v4l2_field_names[field]);
@@ -121,8 +95,11 @@ static int buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
}
if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
+
struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb);
+ dprintk("buffer_prepare: needs_init\n");
+
buf->vb.width = llength;
buf->vb.height = lines;
buf->vb.size = size;
@@ -139,23 +116,6 @@ static int buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
goto oops;
}
- dev->buff_cnt++;
-
- if (dev->buff_cnt == dev->ts.nr_bufs) {
- dev->ts_state = SAA7134_TS_BUFF_DONE;
- /* dma: setup channel 5 (= TS) */
- control = SAA7134_RS_CONTROL_BURST_16 |
- SAA7134_RS_CONTROL_ME |
- (buf->pt->dma >> 12);
-
- saa_writeb(SAA7134_TS_DMA0, (lines - 1) & 0xff);
- saa_writeb(SAA7134_TS_DMA1, ((lines - 1) >> 8) & 0xff);
- /* TSNOPIT=0, TSCOLAP=0 */
- saa_writeb(SAA7134_TS_DMA2, (((lines - 1) >> 16) & 0x3f) | 0x00);
- saa_writel(SAA7134_RS_PITCH(5), TS_PACKET_SIZE);
- saa_writel(SAA7134_RS_CONTROL(5), control);
- }
-
buf->vb.state = VIDEOBUF_PREPARED;
buf->activate = buffer_activate;
buf->vb.field = field;
@@ -175,8 +135,7 @@ buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size)
if (0 == *count)
*count = dev->ts.nr_bufs;
*count = saa7134_buffer_count(*size,*count);
- dev->buff_cnt = 0;
- dev->ts_state = SAA7134_TS_STOPPED;
+
return 0;
}
@@ -193,11 +152,9 @@ static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
struct saa7134_buf *buf = container_of(vb,struct saa7134_buf,vb);
struct saa7134_dev *dev = q->priv_data;
- if (dev->ts_state == SAA7134_TS_STARTED) {
- /* Stop TS transport */
- saa_writeb(SAA7134_TS_PARALLEL, 0x6c);
- dev->ts_state = SAA7134_TS_STOPPED;
- }
+ if (dev->ts_started)
+ saa7134_ts_stop(dev);
+
saa7134_dma_free(q,buf);
}
@@ -214,7 +171,7 @@ EXPORT_SYMBOL_GPL(saa7134_ts_qops);
static unsigned int tsbufs = 8;
module_param(tsbufs, int, 0444);
-MODULE_PARM_DESC(tsbufs,"number of ts buffers, range 2-32");
+MODULE_PARM_DESC(tsbufs, "number of ts buffers for read/write IO, range 2-32");
static unsigned int ts_nr_packets = 64;
module_param(ts_nr_packets, int, 0444);
@@ -256,6 +213,7 @@ int saa7134_ts_init1(struct saa7134_dev *dev)
dev->ts_q.timeout.data = (unsigned long)(&dev->ts_q);
dev->ts_q.dev = dev;
dev->ts_q.need_two = 1;
+ dev->ts_started = 0;
saa7134_pgtable_alloc(dev->pci,&dev->ts.pt_ts);
/* init TS hw */
@@ -264,13 +222,67 @@ int saa7134_ts_init1(struct saa7134_dev *dev)
return 0;
}
+/* Function for stop TS */
+int saa7134_ts_stop(struct saa7134_dev *dev)
+{
+ dprintk("TS stop\n");
+
+ BUG_ON(!dev->ts_started);
+
+ /* Stop TS stream */
+ switch (saa7134_boards[dev->board].ts_type) {
+ case SAA7134_MPEG_TS_PARALLEL:
+ saa_writeb(SAA7134_TS_PARALLEL, 0x6c);
+ dev->ts_started = 0;
+ break;
+ case SAA7134_MPEG_TS_SERIAL:
+ saa_writeb(SAA7134_TS_SERIAL0, 0x40);
+ dev->ts_started = 0;
+ break;
+ }
+ return 0;
+}
+
+/* Function for start TS */
+int saa7134_ts_start(struct saa7134_dev *dev)
+{
+ dprintk("TS start\n");
+
+ BUG_ON(dev->ts_started);
+
+ saa_writeb(SAA7134_TS_SERIAL1, 0x00);
+ saa_writeb(SAA7134_TS_SERIAL1, 0x03);
+ saa_writeb(SAA7134_TS_SERIAL1, 0x00);
+ saa_writeb(SAA7134_TS_SERIAL1, 0x01);
+
+ /* TS clock non-inverted */
+ saa_writeb(SAA7134_TS_SERIAL1, 0x00);
+
+ /* Start TS stream */
+ switch (saa7134_boards[dev->board].ts_type) {
+ case SAA7134_MPEG_TS_PARALLEL:
+ saa_writeb(SAA7134_TS_SERIAL0, 0x40);
+ saa_writeb(SAA7134_TS_PARALLEL, 0xec);
+ break;
+ case SAA7134_MPEG_TS_SERIAL:
+ saa_writeb(SAA7134_TS_SERIAL0, 0xd8);
+ saa_writeb(SAA7134_TS_PARALLEL, 0x6c);
+ saa_writeb(SAA7134_TS_PARALLEL_SERIAL, 0xbc);
+ saa_writeb(SAA7134_TS_SERIAL1, 0x02);
+ break;
+ }
+
+ dev->ts_started = 1;
+
+ return 0;
+}
+
int saa7134_ts_fini(struct saa7134_dev *dev)
{
saa7134_pgtable_free(dev->pci,&dev->ts.pt_ts);
return 0;
}
-
void saa7134_irq_ts_done(struct saa7134_dev *dev, unsigned long status)
{
enum v4l2_field field;
diff --git a/drivers/media/video/saa7134/saa7134-video.c b/drivers/media/video/saa7134/saa7134-video.c
index 493cad94146..e305c1674ce 100644
--- a/drivers/media/video/saa7134/saa7134-video.c
+++ b/drivers/media/video/saa7134/saa7134-video.c
@@ -1057,6 +1057,7 @@ static int buffer_prepare(struct videobuf_queue *q,
buf->vb.field = field;
buf->fmt = fh->fmt;
buf->pt = &fh->pt_cap;
+ dev->video_q.curr = NULL;
err = videobuf_iolock(q,&buf->vb,&dev->ovbuf);
if (err)
@@ -1423,11 +1424,13 @@ video_poll(struct file *file, struct poll_table_struct *wait)
{
struct saa7134_fh *fh = file->private_data;
struct videobuf_buffer *buf = NULL;
+ unsigned int rc = 0;
if (V4L2_BUF_TYPE_VBI_CAPTURE == fh->type)
return videobuf_poll_stream(file, &fh->vbi, wait);
if (res_check(fh,RESOURCE_VIDEO)) {
+ mutex_lock(&fh->cap.vb_lock);
if (!list_empty(&fh->cap.stream))
buf = list_entry(fh->cap.stream.next, struct videobuf_buffer, stream);
} else {
@@ -1446,13 +1449,14 @@ video_poll(struct file *file, struct poll_table_struct *wait)
}
if (!buf)
- return POLLERR;
+ goto err;
poll_wait(file, &buf->done, wait);
if (buf->state == VIDEOBUF_DONE ||
buf->state == VIDEOBUF_ERROR)
- return POLLIN|POLLRDNORM;
- return 0;
+ rc = POLLIN|POLLRDNORM;
+ mutex_unlock(&fh->cap.vb_lock);
+ return rc;
err:
mutex_unlock(&fh->cap.vb_lock);
diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h
index 0cbaf90d487..82268848f26 100644
--- a/drivers/media/video/saa7134/saa7134.h
+++ b/drivers/media/video/saa7134/saa7134.h
@@ -252,7 +252,7 @@ struct saa7134_format {
#define SAA7134_BOARD_BEHOLD_505FM 126
#define SAA7134_BOARD_BEHOLD_507_9FM 127
#define SAA7134_BOARD_BEHOLD_COLUMBUS_TVFM 128
-#define SAA7134_BOARD_BEHOLD_607_9FM 129
+#define SAA7134_BOARD_BEHOLD_607FM_MK3 129
#define SAA7134_BOARD_BEHOLD_M6 130
#define SAA7134_BOARD_TWINHAN_DTV_DVB_3056 131
#define SAA7134_BOARD_GENIUS_TVGO_A11MCE 132
@@ -280,6 +280,18 @@ struct saa7134_format {
#define SAA7134_BOARD_AVERMEDIA_GO_007_FM_PLUS 154
#define SAA7134_BOARD_HAUPPAUGE_HVR1120 155
#define SAA7134_BOARD_HAUPPAUGE_HVR1110R3 156
+#define SAA7134_BOARD_AVERMEDIA_STUDIO_507UA 157
+#define SAA7134_BOARD_AVERMEDIA_CARDBUS_501 158
+#define SAA7134_BOARD_BEHOLD_505RDS 159
+#define SAA7134_BOARD_BEHOLD_507RDS_MK3 160
+#define SAA7134_BOARD_BEHOLD_507RDS_MK5 161
+#define SAA7134_BOARD_BEHOLD_607FM_MK5 162
+#define SAA7134_BOARD_BEHOLD_609FM_MK3 163
+#define SAA7134_BOARD_BEHOLD_609FM_MK5 164
+#define SAA7134_BOARD_BEHOLD_607RDS_MK3 165
+#define SAA7134_BOARD_BEHOLD_607RDS_MK5 166
+#define SAA7134_BOARD_BEHOLD_609RDS_MK3 167
+#define SAA7134_BOARD_BEHOLD_609RDS_MK5 168
#define SAA7134_MAXBOARDS 32
#define SAA7134_INPUT_MAX 8
@@ -364,6 +376,7 @@ struct saa7134_board {
#define INTERLACE_OFF 2
#define BUFFER_TIMEOUT msecs_to_jiffies(500) /* 0.5 seconds */
+#define TS_BUFFER_TIMEOUT msecs_to_jiffies(1000) /* 1 second */
struct saa7134_dev;
struct saa7134_dma;
@@ -480,12 +493,6 @@ struct saa7134_mpeg_ops {
void (*signal_change)(struct saa7134_dev *dev);
};
-enum saa7134_ts_status {
- SAA7134_TS_STOPPED,
- SAA7134_TS_BUFF_DONE,
- SAA7134_TS_STARTED,
-};
-
/* global device status */
struct saa7134_dev {
struct list_head devlist;
@@ -580,8 +587,7 @@ struct saa7134_dev {
/* SAA7134_MPEG_* */
struct saa7134_ts ts;
struct saa7134_dmaqueue ts_q;
- enum saa7134_ts_status ts_state;
- unsigned int buff_cnt;
+ int ts_started;
struct saa7134_mpeg_ops *mops;
/* SAA7134_MPEG_EMPRESS only */
@@ -739,6 +745,9 @@ void saa7134_ts_unregister(struct saa7134_mpeg_ops *ops);
int saa7134_ts_init_hw(struct saa7134_dev *dev);
+int saa7134_ts_start(struct saa7134_dev *dev);
+int saa7134_ts_stop(struct saa7134_dev *dev);
+
/* ----------------------------------------------------------- */
/* saa7134-vbi.c */
@@ -786,7 +795,7 @@ void saa7134_irq_oss_done(struct saa7134_dev *dev, unsigned long status);
int saa7134_input_init1(struct saa7134_dev *dev);
void saa7134_input_fini(struct saa7134_dev *dev);
void saa7134_input_irq(struct saa7134_dev *dev);
-void saa7134_set_i2c_ir(struct saa7134_dev *dev, struct IR_i2c *ir);
+void saa7134_probe_i2c_ir(struct saa7134_dev *dev);
void saa7134_ir_start(struct saa7134_dev *dev, struct card_ir *ir);
void saa7134_ir_stop(struct saa7134_dev *dev);
diff --git a/drivers/media/video/se401.c b/drivers/media/video/se401.c
index 5990ab38a12..c8f05297d0f 100644
--- a/drivers/media/video/se401.c
+++ b/drivers/media/video/se401.c
@@ -38,7 +38,7 @@ static const char version[] = "0.24";
static int flickerless;
static int video_nr = -1;
-static struct usb_device_id device_table [] = {
+static struct usb_device_id device_table[] = {
{ USB_DEVICE(0x03e8, 0x0004) },/* Endpoints/Aox SE401 */
{ USB_DEVICE(0x0471, 0x030b) },/* Philips PCVC665K */
{ USB_DEVICE(0x047d, 0x5001) },/* Kensington 67014 */
@@ -53,7 +53,8 @@ MODULE_AUTHOR("Jeroen Vreeken <pe1rxq@amsat.org>");
MODULE_DESCRIPTION("SE401 USB Camera Driver");
MODULE_LICENSE("GPL");
module_param(flickerless, int, 0);
-MODULE_PARM_DESC(flickerless, "Net frequency to adjust exposure time to (0/50/60)");
+MODULE_PARM_DESC(flickerless,
+ "Net frequency to adjust exposure time to (0/50/60)");
module_param(video_nr, int, 0);
static struct usb_driver se401_driver;
@@ -78,8 +79,8 @@ static void *rvmalloc(unsigned long size)
adr = (unsigned long) mem;
while (size > 0) {
SetPageReserved(vmalloc_to_page((void *)adr));
- adr += PAGE_SIZE;
- size -= PAGE_SIZE;
+ adr += PAGE_SIZE;
+ size -= PAGE_SIZE;
}
return mem;
@@ -95,8 +96,8 @@ static void rvfree(void *mem, unsigned long size)
adr = (unsigned long) mem;
while ((long) size > 0) {
ClearPageReserved(vmalloc_to_page((void *)adr));
- adr += PAGE_SIZE;
- size -= PAGE_SIZE;
+ adr += PAGE_SIZE;
+ size -= PAGE_SIZE;
}
vfree(mem);
}
@@ -112,7 +113,7 @@ static void rvfree(void *mem, unsigned long size)
static int se401_sndctrl(int set, struct usb_se401 *se401, unsigned short req,
unsigned short value, unsigned char *cp, int size)
{
- return usb_control_msg (
+ return usb_control_msg(
se401->dev,
set ? usb_sndctrlpipe(se401->dev, 0) : usb_rcvctrlpipe(se401->dev, 0),
req,
@@ -132,7 +133,7 @@ static int se401_set_feature(struct usb_se401 *se401, unsigned short selector,
and the param in index, but in the logs of the windows driver they do
this the other way around...
*/
- return usb_control_msg (
+ return usb_control_msg(
se401->dev,
usb_sndctrlpipe(se401->dev, 0),
SE401_REQ_SET_EXT_FEATURE,
@@ -152,7 +153,7 @@ static unsigned short se401_get_feature(struct usb_se401 *se401,
wrong here to....
*/
unsigned char cp[2];
- usb_control_msg (
+ usb_control_msg(
se401->dev,
usb_rcvctrlpipe(se401->dev, 0),
SE401_REQ_GET_EXT_FEATURE,
@@ -175,46 +176,51 @@ static unsigned short se401_get_feature(struct usb_se401 *se401,
static int se401_send_pict(struct usb_se401 *se401)
{
- se401_set_feature(se401, HV7131_REG_TITL, se401->expose_l);/* integration time low */
- se401_set_feature(se401, HV7131_REG_TITM, se401->expose_m);/* integration time mid */
- se401_set_feature(se401, HV7131_REG_TITU, se401->expose_h);/* integration time mid */
- se401_set_feature(se401, HV7131_REG_ARLV, se401->resetlevel);/* reset level value */
- se401_set_feature(se401, HV7131_REG_ARCG, se401->rgain);/* red color gain */
- se401_set_feature(se401, HV7131_REG_AGCG, se401->ggain);/* green color gain */
- se401_set_feature(se401, HV7131_REG_ABCG, se401->bgain);/* blue color gain */
+ /* integration time low */
+ se401_set_feature(se401, HV7131_REG_TITL, se401->expose_l);
+ /* integration time mid */
+ se401_set_feature(se401, HV7131_REG_TITM, se401->expose_m);
+ /* integration time mid */
+ se401_set_feature(se401, HV7131_REG_TITU, se401->expose_h);
+ /* reset level value */
+ se401_set_feature(se401, HV7131_REG_ARLV, se401->resetlevel);
+ /* red color gain */
+ se401_set_feature(se401, HV7131_REG_ARCG, se401->rgain);
+ /* green color gain */
+ se401_set_feature(se401, HV7131_REG_AGCG, se401->ggain);
+ /* blue color gain */
+ se401_set_feature(se401, HV7131_REG_ABCG, se401->bgain);
return 0;
}
static void se401_set_exposure(struct usb_se401 *se401, int brightness)
{
- int integration=brightness<<5;
-
- if (flickerless==50) {
- integration=integration-integration%106667;
- }
- if (flickerless==60) {
- integration=integration-integration%88889;
- }
- se401->brightness=integration>>5;
- se401->expose_h=(integration>>16)&0xff;
- se401->expose_m=(integration>>8)&0xff;
- se401->expose_l=integration&0xff;
+ int integration = brightness << 5;
+
+ if (flickerless == 50)
+ integration = integration-integration % 106667;
+ if (flickerless == 60)
+ integration = integration-integration % 88889;
+ se401->brightness = integration >> 5;
+ se401->expose_h = (integration >> 16) & 0xff;
+ se401->expose_m = (integration >> 8) & 0xff;
+ se401->expose_l = integration & 0xff;
}
static int se401_get_pict(struct usb_se401 *se401, struct video_picture *p)
{
- p->brightness=se401->brightness;
- if (se401->enhance) {
- p->whiteness=32768;
- } else {
- p->whiteness=0;
- }
- p->colour=65535;
- p->contrast=65535;
- p->hue=se401->rgain<<10;
- p->palette=se401->palette;
- p->depth=3; /* rgb24 */
+ p->brightness = se401->brightness;
+ if (se401->enhance)
+ p->whiteness = 32768;
+ else
+ p->whiteness = 0;
+
+ p->colour = 65535;
+ p->contrast = 65535;
+ p->hue = se401->rgain << 10;
+ p->palette = se401->palette;
+ p->depth = 3; /* rgb24 */
return 0;
}
@@ -223,20 +229,19 @@ static int se401_set_pict(struct usb_se401 *se401, struct video_picture *p)
{
if (p->palette != VIDEO_PALETTE_RGB24)
return 1;
- se401->palette=p->palette;
- if (p->hue!=se401->hue) {
- se401->rgain= p->hue>>10;
- se401->bgain= 0x40-(p->hue>>10);
- se401->hue=p->hue;
+ se401->palette = p->palette;
+ if (p->hue != se401->hue) {
+ se401->rgain = p->hue >> 10;
+ se401->bgain = 0x40-(p->hue >> 10);
+ se401->hue = p->hue;
}
- if (p->brightness!=se401->brightness) {
+ if (p->brightness != se401->brightness)
se401_set_exposure(se401, p->brightness);
- }
- if (p->whiteness>=32768) {
- se401->enhance=1;
- } else {
- se401->enhance=0;
- }
+
+ if (p->whiteness >= 32768)
+ se401->enhance = 1;
+ else
+ se401->enhance = 0;
se401_send_pict(se401);
se401_send_pict(se401);
return 0;
@@ -249,7 +254,7 @@ static int se401_set_pict(struct usb_se401 *se401, struct video_picture *p)
static void se401_auto_resetlevel(struct usb_se401 *se401)
{
unsigned int ahrc, alrc;
- int oldreset=se401->resetlevel;
+ int oldreset = se401->resetlevel;
/* For some reason this normally read-only register doesn't get reset
to zero after reading them just once...
@@ -258,24 +263,24 @@ static void se401_auto_resetlevel(struct usb_se401 *se401)
se401_get_feature(se401, HV7131_REG_HIREFNOL);
se401_get_feature(se401, HV7131_REG_LOREFNOH);
se401_get_feature(se401, HV7131_REG_LOREFNOL);
- ahrc=256*se401_get_feature(se401, HV7131_REG_HIREFNOH) +
+ ahrc = 256*se401_get_feature(se401, HV7131_REG_HIREFNOH) +
se401_get_feature(se401, HV7131_REG_HIREFNOL);
- alrc=256*se401_get_feature(se401, HV7131_REG_LOREFNOH) +
+ alrc = 256*se401_get_feature(se401, HV7131_REG_LOREFNOH) +
se401_get_feature(se401, HV7131_REG_LOREFNOL);
/* Not an exact science, but it seems to work pretty well... */
if (alrc > 10) {
- while (alrc>=10 && se401->resetlevel < 63) {
+ while (alrc >= 10 && se401->resetlevel < 63) {
se401->resetlevel++;
- alrc /=2;
+ alrc /= 2;
}
} else if (ahrc > 20) {
- while (ahrc>=20 && se401->resetlevel > 0) {
+ while (ahrc >= 20 && se401->resetlevel > 0) {
se401->resetlevel--;
- ahrc /=2;
+ ahrc /= 2;
}
}
- if (se401->resetlevel!=oldreset)
+ if (se401->resetlevel != oldreset)
se401_set_feature(se401, HV7131_REG_ARLV, se401->resetlevel);
return;
@@ -300,21 +305,22 @@ static void se401_button_irq(struct urb *urb)
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
- dbg("%s - urb shutting down with status: %d", __func__, urb->status);
+ dbg("%s - urb shutting down with status: %d",
+ __func__, urb->status);
return;
default:
- dbg("%s - nonzero urb status received: %d", __func__, urb->status);
+ dbg("%s - nonzero urb status received: %d",
+ __func__, urb->status);
goto exit;
}
- if (urb->actual_length >=2) {
+ if (urb->actual_length >= 2)
if (se401->button)
- se401->buttonpressed=1;
- }
+ se401->buttonpressed = 1;
exit:
- status = usb_submit_urb (urb, GFP_ATOMIC);
+ status = usb_submit_urb(urb, GFP_ATOMIC);
if (status)
- err ("%s - usb_submit_urb failed with result %d",
+ err("%s - usb_submit_urb failed with result %d",
__func__, status);
}
@@ -336,55 +342,52 @@ static void se401_video_irq(struct urb *urb)
keeps sending them forever...
*/
if (length && !urb->status) {
- se401->nullpackets=0;
- switch(se401->scratch[se401->scratch_next].state) {
- case BUFFER_READY:
- case BUFFER_BUSY: {
- se401->dropped++;
- break;
- }
- case BUFFER_UNUSED: {
- memcpy(se401->scratch[se401->scratch_next].data, (unsigned char *)urb->transfer_buffer, length);
- se401->scratch[se401->scratch_next].state=BUFFER_READY;
- se401->scratch[se401->scratch_next].offset=se401->bayeroffset;
- se401->scratch[se401->scratch_next].length=length;
- if (waitqueue_active(&se401->wq)) {
- wake_up_interruptible(&se401->wq);
- }
- se401->scratch_overflow=0;
- se401->scratch_next++;
- if (se401->scratch_next>=SE401_NUMSCRATCH)
- se401->scratch_next=0;
- break;
- }
- }
- se401->bayeroffset+=length;
- if (se401->bayeroffset>=se401->cheight*se401->cwidth) {
- se401->bayeroffset=0;
+ se401->nullpackets = 0;
+ switch (se401->scratch[se401->scratch_next].state) {
+ case BUFFER_READY:
+ case BUFFER_BUSY:
+ se401->dropped++;
+ break;
+ case BUFFER_UNUSED:
+ memcpy(se401->scratch[se401->scratch_next].data,
+ (unsigned char *)urb->transfer_buffer, length);
+ se401->scratch[se401->scratch_next].state
+ = BUFFER_READY;
+ se401->scratch[se401->scratch_next].offset
+ = se401->bayeroffset;
+ se401->scratch[se401->scratch_next].length = length;
+ if (waitqueue_active(&se401->wq))
+ wake_up_interruptible(&se401->wq);
+ se401->scratch_overflow = 0;
+ se401->scratch_next++;
+ if (se401->scratch_next >= SE401_NUMSCRATCH)
+ se401->scratch_next = 0;
+ break;
}
+ se401->bayeroffset += length;
+ if (se401->bayeroffset >= se401->cheight * se401->cwidth)
+ se401->bayeroffset = 0;
} else {
se401->nullpackets++;
- if (se401->nullpackets > SE401_MAX_NULLPACKETS) {
- if (waitqueue_active(&se401->wq)) {
+ if (se401->nullpackets > SE401_MAX_NULLPACKETS)
+ if (waitqueue_active(&se401->wq))
wake_up_interruptible(&se401->wq);
- }
- }
}
/* Resubmit urb for new data */
- urb->status=0;
- urb->dev=se401->dev;
- if(usb_submit_urb(urb, GFP_KERNEL))
+ urb->status = 0;
+ urb->dev = se401->dev;
+ if (usb_submit_urb(urb, GFP_KERNEL))
dev_info(&urb->dev->dev, "urb burned down\n");
return;
}
static void se401_send_size(struct usb_se401 *se401, int width, int height)
{
- int i=0;
- int mode=0x03; /* No compression */
- int sendheight=height;
- int sendwidth=width;
+ int i = 0;
+ int mode = 0x03; /* No compression */
+ int sendheight = height;
+ int sendwidth = width;
/* JangGu compression can only be used with the camera supported sizes,
but bayer seems to work with any size that fits on the sensor.
@@ -392,18 +395,21 @@ static void se401_send_size(struct usb_se401 *se401, int width, int height)
4 or 16 times subcapturing, if not we use uncompressed bayer data
but this will result in cutouts of the maximum size....
*/
- while (i<se401->sizes && !(se401->width[i]==width && se401->height[i]==height))
+ while (i < se401->sizes && !(se401->width[i] == width &&
+ se401->height[i] == height))
i++;
- while (i<se401->sizes) {
- if (se401->width[i]==width*2 && se401->height[i]==height*2) {
- sendheight=se401->height[i];
- sendwidth=se401->width[i];
- mode=0x40;
+ while (i < se401->sizes) {
+ if (se401->width[i] == width * 2 &&
+ se401->height[i] == height * 2) {
+ sendheight = se401->height[i];
+ sendwidth = se401->width[i];
+ mode = 0x40;
}
- if (se401->width[i]==width*4 && se401->height[i]==height*4) {
- sendheight=se401->height[i];
- sendwidth=se401->width[i];
- mode=0x42;
+ if (se401->width[i] == width * 4 &&
+ se401->height[i] == height * 4) {
+ sendheight = se401->height[i];
+ sendwidth = se401->width[i];
+ mode = 0x42;
}
i++;
}
@@ -412,13 +418,10 @@ static void se401_send_size(struct usb_se401 *se401, int width, int height)
se401_sndctrl(1, se401, SE401_REQ_SET_HEIGHT, sendheight, NULL, 0);
se401_set_feature(se401, SE401_OPERATINGMODE, mode);
- if (mode==0x03) {
- se401->format=FMT_BAYER;
- } else {
- se401->format=FMT_JANGGU;
- }
-
- return;
+ if (mode == 0x03)
+ se401->format = FMT_BAYER;
+ else
+ se401->format = FMT_JANGGU;
}
/*
@@ -429,29 +432,31 @@ static void se401_send_size(struct usb_se401 *se401, int width, int height)
static int se401_start_stream(struct usb_se401 *se401)
{
struct urb *urb;
- int err=0, i;
- se401->streaming=1;
+ int err = 0, i;
+ se401->streaming = 1;
se401_sndctrl(1, se401, SE401_REQ_CAMERA_POWER, 1, NULL, 0);
se401_sndctrl(1, se401, SE401_REQ_LED_CONTROL, 1, NULL, 0);
/* Set picture settings */
- se401_set_feature(se401, HV7131_REG_MODE_B, 0x05);/*windowed + pix intg */
+ /* windowed + pix intg */
+ se401_set_feature(se401, HV7131_REG_MODE_B, 0x05);
se401_send_pict(se401);
se401_send_size(se401, se401->cwidth, se401->cheight);
- se401_sndctrl(1, se401, SE401_REQ_START_CONTINUOUS_CAPTURE, 0, NULL, 0);
+ se401_sndctrl(1, se401, SE401_REQ_START_CONTINUOUS_CAPTURE,
+ 0, NULL, 0);
/* Do some memory allocation */
- for (i=0; i<SE401_NUMFRAMES; i++) {
- se401->frame[i].data=se401->fbuf + i * se401->maxframesize;
- se401->frame[i].curpix=0;
+ for (i = 0; i < SE401_NUMFRAMES; i++) {
+ se401->frame[i].data = se401->fbuf + i * se401->maxframesize;
+ se401->frame[i].curpix = 0;
}
- for (i=0; i<SE401_NUMSBUF; i++) {
- se401->sbuf[i].data=kmalloc(SE401_PACKETSIZE, GFP_KERNEL);
+ for (i = 0; i < SE401_NUMSBUF; i++) {
+ se401->sbuf[i].data = kmalloc(SE401_PACKETSIZE, GFP_KERNEL);
if (!se401->sbuf[i].data) {
- for(i = i - 1; i >= 0; i--) {
+ for (i = i - 1; i >= 0; i--) {
kfree(se401->sbuf[i].data);
se401->sbuf[i].data = NULL;
}
@@ -459,26 +464,26 @@ static int se401_start_stream(struct usb_se401 *se401)
}
}
- se401->bayeroffset=0;
- se401->scratch_next=0;
- se401->scratch_use=0;
- se401->scratch_overflow=0;
- for (i=0; i<SE401_NUMSCRATCH; i++) {
- se401->scratch[i].data=kmalloc(SE401_PACKETSIZE, GFP_KERNEL);
+ se401->bayeroffset = 0;
+ se401->scratch_next = 0;
+ se401->scratch_use = 0;
+ se401->scratch_overflow = 0;
+ for (i = 0; i < SE401_NUMSCRATCH; i++) {
+ se401->scratch[i].data = kmalloc(SE401_PACKETSIZE, GFP_KERNEL);
if (!se401->scratch[i].data) {
- for(i = i - 1; i >= 0; i--) {
+ for (i = i - 1; i >= 0; i--) {
kfree(se401->scratch[i].data);
se401->scratch[i].data = NULL;
}
goto nomem_sbuf;
}
- se401->scratch[i].state=BUFFER_UNUSED;
+ se401->scratch[i].state = BUFFER_UNUSED;
}
- for (i=0; i<SE401_NUMSBUF; i++) {
- urb=usb_alloc_urb(0, GFP_KERNEL);
- if(!urb) {
- for(i = i - 1; i >= 0; i--) {
+ for (i = 0; i < SE401_NUMSBUF; i++) {
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ for (i = i - 1; i >= 0; i--) {
usb_kill_urb(se401->urb[i]);
usb_free_urb(se401->urb[i]);
se401->urb[i] = NULL;
@@ -492,24 +497,24 @@ static int se401_start_stream(struct usb_se401 *se401)
se401_video_irq,
se401);
- se401->urb[i]=urb;
+ se401->urb[i] = urb;
- err=usb_submit_urb(se401->urb[i], GFP_KERNEL);
- if(err)
+ err = usb_submit_urb(se401->urb[i], GFP_KERNEL);
+ if (err)
err("urb burned down");
}
- se401->framecount=0;
+ se401->framecount = 0;
return 0;
nomem_scratch:
- for (i=0; i<SE401_NUMSCRATCH; i++) {
+ for (i = 0; i < SE401_NUMSCRATCH; i++) {
kfree(se401->scratch[i].data);
se401->scratch[i].data = NULL;
}
nomem_sbuf:
- for (i=0; i<SE401_NUMSBUF; i++) {
+ for (i = 0; i < SE401_NUMSBUF; i++) {
kfree(se401->sbuf[i].data);
se401->sbuf[i].data = NULL;
}
@@ -523,22 +528,23 @@ static int se401_stop_stream(struct usb_se401 *se401)
if (!se401->streaming || !se401->dev)
return 1;
- se401->streaming=0;
+ se401->streaming = 0;
se401_sndctrl(1, se401, SE401_REQ_STOP_CONTINUOUS_CAPTURE, 0, NULL, 0);
se401_sndctrl(1, se401, SE401_REQ_LED_CONTROL, 0, NULL, 0);
se401_sndctrl(1, se401, SE401_REQ_CAMERA_POWER, 0, NULL, 0);
- for (i=0; i<SE401_NUMSBUF; i++) if (se401->urb[i]) {
- usb_kill_urb(se401->urb[i]);
- usb_free_urb(se401->urb[i]);
- se401->urb[i]=NULL;
- kfree(se401->sbuf[i].data);
- }
- for (i=0; i<SE401_NUMSCRATCH; i++) {
+ for (i = 0; i < SE401_NUMSBUF; i++)
+ if (se401->urb[i]) {
+ usb_kill_urb(se401->urb[i]);
+ usb_free_urb(se401->urb[i]);
+ se401->urb[i] = NULL;
+ kfree(se401->sbuf[i].data);
+ }
+ for (i = 0; i < SE401_NUMSCRATCH; i++) {
kfree(se401->scratch[i].data);
- se401->scratch[i].data=NULL;
+ se401->scratch[i].data = NULL;
}
return 0;
@@ -546,9 +552,9 @@ static int se401_stop_stream(struct usb_se401 *se401)
static int se401_set_size(struct usb_se401 *se401, int width, int height)
{
- int wasstreaming=se401->streaming;
+ int wasstreaming = se401->streaming;
/* Check to see if we need to change */
- if (se401->cwidth==width && se401->cheight==height)
+ if (se401->cwidth == width && se401->cheight == height)
return 0;
/* Check for a valid mode */
@@ -556,16 +562,16 @@ static int se401_set_size(struct usb_se401 *se401, int width, int height)
return 1;
if ((width & 1) || (height & 1))
return 1;
- if (width>se401->width[se401->sizes-1])
+ if (width > se401->width[se401->sizes-1])
return 1;
- if (height>se401->height[se401->sizes-1])
+ if (height > se401->height[se401->sizes-1])
return 1;
/* Stop a current stream and start it again at the new size */
if (wasstreaming)
se401_stop_stream(se401);
- se401->cwidth=width;
- se401->cheight=height;
+ se401->cwidth = width;
+ se401->cheight = height;
if (wasstreaming)
se401_start_stream(se401);
return 0;
@@ -586,68 +592,68 @@ static int se401_set_size(struct usb_se401 *se401, int width, int height)
static inline void enhance_picture(unsigned char *frame, int len)
{
while (len--) {
- *frame=(((*frame^255)*(*frame^255))/255)^255;
+ *frame = (((*frame^255)*(*frame^255))/255)^255;
frame++;
}
}
static inline void decode_JangGu_integrate(struct usb_se401 *se401, int data)
{
- struct se401_frame *frame=&se401->frame[se401->curframe];
- int linelength=se401->cwidth*3;
+ struct se401_frame *frame = &se401->frame[se401->curframe];
+ int linelength = se401->cwidth * 3;
if (frame->curlinepix >= linelength) {
- frame->curlinepix=0;
- frame->curline+=linelength;
+ frame->curlinepix = 0;
+ frame->curline += linelength;
}
/* First three are absolute, all others relative.
* Format is rgb from right to left (mirrorred image),
* we flip it to get bgr from left to right. */
- if (frame->curlinepix < 3) {
- *(frame->curline-frame->curlinepix)=1+data*4;
- } else {
- *(frame->curline-frame->curlinepix)=
- *(frame->curline-frame->curlinepix+3)+data*4;
- }
+ if (frame->curlinepix < 3)
+ *(frame->curline-frame->curlinepix) = 1 + data * 4;
+ else
+ *(frame->curline-frame->curlinepix) =
+ *(frame->curline-frame->curlinepix + 3) + data * 4;
frame->curlinepix++;
}
-static inline void decode_JangGu_vlc (struct usb_se401 *se401, unsigned char *data, int bit_exp, int packetlength)
+static inline void decode_JangGu_vlc(struct usb_se401 *se401,
+ unsigned char *data, int bit_exp, int packetlength)
{
- int pos=0;
- int vlc_cod=0;
- int vlc_size=0;
- int vlc_data=0;
+ int pos = 0;
+ int vlc_cod = 0;
+ int vlc_size = 0;
+ int vlc_data = 0;
int bit_cur;
int bit;
- data+=4;
+ data += 4;
while (pos < packetlength) {
- bit_cur=8;
+ bit_cur = 8;
while (bit_cur && bit_exp) {
- bit=((*data)>>(bit_cur-1))&1;
+ bit = ((*data) >> (bit_cur-1))&1;
if (!vlc_cod) {
if (bit) {
vlc_size++;
} else {
- if (!vlc_size) {
+ if (!vlc_size)
decode_JangGu_integrate(se401, 0);
- } else {
- vlc_cod=2;
- vlc_data=0;
+ else {
+ vlc_cod = 2;
+ vlc_data = 0;
}
}
} else {
- if (vlc_cod==2) {
+ if (vlc_cod == 2) {
if (!bit)
- vlc_data = -(1<<vlc_size) + 1;
+ vlc_data = -(1 << vlc_size) + 1;
vlc_cod--;
}
vlc_size--;
- vlc_data+=bit<<vlc_size;
+ vlc_data += bit << vlc_size;
if (!vlc_size) {
decode_JangGu_integrate(se401, vlc_data);
- vlc_cod=0;
+ vlc_cod = 0;
}
}
bit_cur--;
@@ -658,186 +664,188 @@ static inline void decode_JangGu_vlc (struct usb_se401 *se401, unsigned char *da
}
}
-static inline void decode_JangGu (struct usb_se401 *se401, struct se401_scratch *buffer)
+static inline void decode_JangGu(struct usb_se401 *se401,
+ struct se401_scratch *buffer)
{
- unsigned char *data=buffer->data;
- int len=buffer->length;
- int bit_exp=0, pix_exp=0, frameinfo=0, packetlength=0, size;
- int datapos=0;
+ unsigned char *data = buffer->data;
+ int len = buffer->length;
+ int bit_exp = 0, pix_exp = 0, frameinfo = 0, packetlength = 0, size;
+ int datapos = 0;
/* New image? */
if (!se401->frame[se401->curframe].curpix) {
- se401->frame[se401->curframe].curlinepix=0;
- se401->frame[se401->curframe].curline=
+ se401->frame[se401->curframe].curlinepix = 0;
+ se401->frame[se401->curframe].curline =
se401->frame[se401->curframe].data+
- se401->cwidth*3-1;
- if (se401->frame[se401->curframe].grabstate==FRAME_READY)
- se401->frame[se401->curframe].grabstate=FRAME_GRABBING;
- se401->vlcdatapos=0;
+ se401->cwidth * 3 - 1;
+ if (se401->frame[se401->curframe].grabstate == FRAME_READY)
+ se401->frame[se401->curframe].grabstate = FRAME_GRABBING;
+ se401->vlcdatapos = 0;
}
while (datapos < len) {
- size=1024-se401->vlcdatapos;
+ size = 1024 - se401->vlcdatapos;
if (size+datapos > len)
- size=len-datapos;
+ size = len-datapos;
memcpy(se401->vlcdata+se401->vlcdatapos, data+datapos, size);
- se401->vlcdatapos+=size;
- packetlength=0;
+ se401->vlcdatapos += size;
+ packetlength = 0;
if (se401->vlcdatapos >= 4) {
- bit_exp=se401->vlcdata[3]+(se401->vlcdata[2]<<8);
- pix_exp=se401->vlcdata[1]+((se401->vlcdata[0]&0x3f)<<8);
- frameinfo=se401->vlcdata[0]&0xc0;
- packetlength=((bit_exp+47)>>4)<<1;
+ bit_exp = se401->vlcdata[3] + (se401->vlcdata[2] << 8);
+ pix_exp = se401->vlcdata[1] +
+ ((se401->vlcdata[0] & 0x3f) << 8);
+ frameinfo = se401->vlcdata[0] & 0xc0;
+ packetlength = ((bit_exp + 47) >> 4) << 1;
if (packetlength > 1024) {
- se401->vlcdatapos=0;
- datapos=len;
- packetlength=0;
+ se401->vlcdatapos = 0;
+ datapos = len;
+ packetlength = 0;
se401->error++;
- se401->frame[se401->curframe].curpix=0;
+ se401->frame[se401->curframe].curpix = 0;
}
}
if (packetlength && se401->vlcdatapos >= packetlength) {
- decode_JangGu_vlc(se401, se401->vlcdata, bit_exp, packetlength);
- se401->frame[se401->curframe].curpix+=pix_exp*3;
- datapos+=size-(se401->vlcdatapos-packetlength);
- se401->vlcdatapos=0;
- if (se401->frame[se401->curframe].curpix>=se401->cwidth*se401->cheight*3) {
- if (se401->frame[se401->curframe].curpix==se401->cwidth*se401->cheight*3) {
- if (se401->frame[se401->curframe].grabstate==FRAME_GRABBING) {
- se401->frame[se401->curframe].grabstate=FRAME_DONE;
+ decode_JangGu_vlc(se401, se401->vlcdata, bit_exp,
+ packetlength);
+ se401->frame[se401->curframe].curpix += pix_exp * 3;
+ datapos += size-(se401->vlcdatapos-packetlength);
+ se401->vlcdatapos = 0;
+ if (se401->frame[se401->curframe].curpix >= se401->cwidth * se401->cheight * 3) {
+ if (se401->frame[se401->curframe].curpix == se401->cwidth * se401->cheight * 3) {
+ if (se401->frame[se401->curframe].grabstate == FRAME_GRABBING) {
+ se401->frame[se401->curframe].grabstate = FRAME_DONE;
se401->framecount++;
se401->readcount++;
}
- if (se401->frame[(se401->curframe+1)&(SE401_NUMFRAMES-1)].grabstate==FRAME_READY) {
- se401->curframe=(se401->curframe+1) & (SE401_NUMFRAMES-1);
- }
- } else {
+ if (se401->frame[(se401->curframe + 1) & (SE401_NUMFRAMES - 1)].grabstate == FRAME_READY)
+ se401->curframe = (se401->curframe + 1) & (SE401_NUMFRAMES - 1);
+ } else
se401->error++;
- }
- se401->frame[se401->curframe].curpix=0;
- datapos=len;
+ se401->frame[se401->curframe].curpix = 0;
+ datapos = len;
}
- } else {
- datapos+=size;
- }
+ } else
+ datapos += size;
}
}
-static inline void decode_bayer (struct usb_se401 *se401, struct se401_scratch *buffer)
+static inline void decode_bayer(struct usb_se401 *se401,
+ struct se401_scratch *buffer)
{
- unsigned char *data=buffer->data;
- int len=buffer->length;
- int offset=buffer->offset;
- int datasize=se401->cwidth*se401->cheight;
- struct se401_frame *frame=&se401->frame[se401->curframe];
+ unsigned char *data = buffer->data;
+ int len = buffer->length;
+ int offset = buffer->offset;
+ int datasize = se401->cwidth * se401->cheight;
+ struct se401_frame *frame = &se401->frame[se401->curframe];
+ unsigned char *framedata = frame->data, *curline, *nextline;
+ int width = se401->cwidth;
+ int blineoffset = 0, bline;
+ int linelength = width * 3, i;
- unsigned char *framedata=frame->data, *curline, *nextline;
- int width=se401->cwidth;
- int blineoffset=0, bline;
- int linelength=width*3, i;
+ if (frame->curpix == 0) {
+ if (frame->grabstate == FRAME_READY)
+ frame->grabstate = FRAME_GRABBING;
- if (frame->curpix==0) {
- if (frame->grabstate==FRAME_READY) {
- frame->grabstate=FRAME_GRABBING;
- }
- frame->curline=framedata+linelength;
- frame->curlinepix=0;
+ frame->curline = framedata + linelength;
+ frame->curlinepix = 0;
}
- if (offset!=frame->curpix) {
+ if (offset != frame->curpix) {
/* Regard frame as lost :( */
- frame->curpix=0;
+ frame->curpix = 0;
se401->error++;
return;
}
/* Check if we have to much data */
- if (frame->curpix+len > datasize) {
- len=datasize-frame->curpix;
- }
- if (se401->cheight%4)
- blineoffset=1;
- bline=frame->curpix/se401->cwidth+blineoffset;
-
- curline=frame->curline;
- nextline=curline+linelength;
- if (nextline >= framedata+datasize*3)
- nextline=curline;
+ if (frame->curpix + len > datasize)
+ len = datasize-frame->curpix;
+
+ if (se401->cheight % 4)
+ blineoffset = 1;
+ bline = frame->curpix / se401->cwidth+blineoffset;
+
+ curline = frame->curline;
+ nextline = curline + linelength;
+ if (nextline >= framedata+datasize * 3)
+ nextline = curline;
while (len) {
- if (frame->curlinepix>=width) {
- frame->curlinepix-=width;
- bline=frame->curpix/width+blineoffset;
- curline+=linelength*2;
- nextline+=linelength*2;
- if (curline >= framedata+datasize*3) {
+ if (frame->curlinepix >= width) {
+ frame->curlinepix -= width;
+ bline = frame->curpix / width + blineoffset;
+ curline += linelength*2;
+ nextline += linelength*2;
+ if (curline >= framedata+datasize * 3) {
frame->curlinepix++;
- curline-=3;
- nextline-=3;
+ curline -= 3;
+ nextline -= 3;
len--;
data++;
frame->curpix++;
}
if (nextline >= framedata+datasize*3)
- nextline=curline;
+ nextline = curline;
}
- if ((bline&1)) {
- if ((frame->curlinepix&1)) {
- *(curline+2)=*data;
- *(curline-1)=*data;
- *(nextline+2)=*data;
- *(nextline-1)=*data;
+ if (bline & 1) {
+ if (frame->curlinepix & 1) {
+ *(curline + 2) = *data;
+ *(curline - 1) = *data;
+ *(nextline + 2) = *data;
+ *(nextline - 1) = *data;
} else {
- *(curline+1)=
- (*(curline+1)+*data)/2;
- *(curline-2)=
- (*(curline-2)+*data)/2;
- *(nextline+1)=*data;
- *(nextline-2)=*data;
+ *(curline + 1) =
+ (*(curline + 1) + *data) / 2;
+ *(curline-2) =
+ (*(curline - 2) + *data) / 2;
+ *(nextline + 1) = *data;
+ *(nextline - 2) = *data;
}
} else {
- if ((frame->curlinepix&1)) {
- *(curline+1)=
- (*(curline+1)+*data)/2;
- *(curline-2)=
- (*(curline-2)+*data)/2;
- *(nextline+1)=*data;
- *(nextline-2)=*data;
+ if (frame->curlinepix & 1) {
+ *(curline + 1) =
+ (*(curline + 1) + *data) / 2;
+ *(curline - 2) =
+ (*(curline - 2) + *data) / 2;
+ *(nextline + 1) = *data;
+ *(nextline - 2) = *data;
} else {
- *curline=*data;
- *(curline-3)=*data;
- *nextline=*data;
- *(nextline-3)=*data;
+ *curline = *data;
+ *(curline - 3) = *data;
+ *nextline = *data;
+ *(nextline - 3) = *data;
}
}
frame->curlinepix++;
- curline-=3;
- nextline-=3;
+ curline -= 3;
+ nextline -= 3;
len--;
data++;
frame->curpix++;
}
- frame->curline=curline;
+ frame->curline = curline;
- if (frame->curpix>=datasize) {
+ if (frame->curpix >= datasize) {
/* Fix the top line */
- framedata+=linelength;
- for (i=0; i<linelength; i++) {
+ framedata += linelength;
+ for (i = 0; i < linelength; i++) {
framedata--;
- *framedata=*(framedata+linelength);
+ *framedata = *(framedata + linelength);
}
/* Fix the left side (green is already present) */
- for (i=0; i<se401->cheight; i++) {
- *framedata=*(framedata+3);
- *(framedata+1)=*(framedata+4);
- *(framedata+2)=*(framedata+5);
- framedata+=linelength;
+ for (i = 0; i < se401->cheight; i++) {
+ *framedata = *(framedata + 3);
+ *(framedata + 1) = *(framedata + 4);
+ *(framedata + 2) = *(framedata + 5);
+ framedata += linelength;
}
- frame->curpix=0;
- frame->grabstate=FRAME_DONE;
+ frame->curpix = 0;
+ frame->grabstate = FRAME_DONE;
se401->framecount++;
se401->readcount++;
- if (se401->frame[(se401->curframe+1)&(SE401_NUMFRAMES-1)].grabstate==FRAME_READY) {
- se401->curframe=(se401->curframe+1) & (SE401_NUMFRAMES-1);
+ if (se401->frame[(se401->curframe + 1) &
+ (SE401_NUMFRAMES - 1)].grabstate == FRAME_READY) {
+ se401->curframe = (se401->curframe+1) &
+ (SE401_NUMFRAMES-1);
}
}
}
@@ -845,72 +853,76 @@ static inline void decode_bayer (struct usb_se401 *se401, struct se401_scratch *
static int se401_newframe(struct usb_se401 *se401, int framenr)
{
DECLARE_WAITQUEUE(wait, current);
- int errors=0;
+ int errors = 0;
while (se401->streaming &&
- (se401->frame[framenr].grabstate==FRAME_READY ||
- se401->frame[framenr].grabstate==FRAME_GRABBING) ) {
- if(!se401->frame[framenr].curpix) {
+ (se401->frame[framenr].grabstate == FRAME_READY ||
+ se401->frame[framenr].grabstate == FRAME_GRABBING)) {
+ if (!se401->frame[framenr].curpix)
errors++;
- }
+
wait_interruptible(
- se401->scratch[se401->scratch_use].state!=BUFFER_READY,
- &se401->wq,
- &wait
- );
+ se401->scratch[se401->scratch_use].state != BUFFER_READY,
+ &se401->wq, &wait);
if (se401->nullpackets > SE401_MAX_NULLPACKETS) {
- se401->nullpackets=0;
+ se401->nullpackets = 0;
dev_info(&se401->dev->dev,
- "too many null length packets, restarting capture\n");
+ "too many null length packets, restarting capture\n");
se401_stop_stream(se401);
se401_start_stream(se401);
} else {
- if (se401->scratch[se401->scratch_use].state!=BUFFER_READY) {
- se401->frame[framenr].grabstate=FRAME_ERROR;
+ if (se401->scratch[se401->scratch_use].state !=
+ BUFFER_READY) {
+ se401->frame[framenr].grabstate = FRAME_ERROR;
return -EIO;
}
- se401->scratch[se401->scratch_use].state=BUFFER_BUSY;
- if (se401->format==FMT_JANGGU) {
- decode_JangGu(se401, &se401->scratch[se401->scratch_use]);
- } else {
- decode_bayer(se401, &se401->scratch[se401->scratch_use]);
- }
- se401->scratch[se401->scratch_use].state=BUFFER_UNUSED;
+ se401->scratch[se401->scratch_use].state = BUFFER_BUSY;
+ if (se401->format == FMT_JANGGU)
+ decode_JangGu(se401,
+ &se401->scratch[se401->scratch_use]);
+ else
+ decode_bayer(se401,
+ &se401->scratch[se401->scratch_use]);
+
+ se401->scratch[se401->scratch_use].state =
+ BUFFER_UNUSED;
se401->scratch_use++;
- if (se401->scratch_use>=SE401_NUMSCRATCH)
- se401->scratch_use=0;
+ if (se401->scratch_use >= SE401_NUMSCRATCH)
+ se401->scratch_use = 0;
if (errors > SE401_MAX_ERRORS) {
- errors=0;
+ errors = 0;
dev_info(&se401->dev->dev,
- "too many errors, restarting capture\n");
+ "too many errors, restarting capture\n");
se401_stop_stream(se401);
se401_start_stream(se401);
}
}
}
- if (se401->frame[framenr].grabstate==FRAME_DONE)
+ if (se401->frame[framenr].grabstate == FRAME_DONE)
if (se401->enhance)
- enhance_picture(se401->frame[framenr].data, se401->cheight*se401->cwidth*3);
+ enhance_picture(se401->frame[framenr].data,
+ se401->cheight * se401->cwidth * 3);
return 0;
}
-static void usb_se401_remove_disconnected (struct usb_se401 *se401)
+static void usb_se401_remove_disconnected(struct usb_se401 *se401)
{
int i;
se401->dev = NULL;
- for (i=0; i<SE401_NUMSBUF; i++)
+ for (i = 0; i < SE401_NUMSBUF; i++)
if (se401->urb[i]) {
usb_kill_urb(se401->urb[i]);
usb_free_urb(se401->urb[i]);
se401->urb[i] = NULL;
kfree(se401->sbuf[i].data);
}
- for (i=0; i<SE401_NUMSCRATCH; i++) {
+
+ for (i = 0; i < SE401_NUMSCRATCH; i++)
kfree(se401->scratch[i].data);
- }
+
if (se401->inturb) {
usb_kill_urb(se401->inturb);
usb_free_urb(se401->inturb);
@@ -965,11 +977,11 @@ static int se401_close(struct file *file)
dev_info(&se401->dev->dev, "device unregistered\n");
usb_se401_remove_disconnected(se401);
} else {
- for (i=0; i<SE401_NUMFRAMES; i++)
- se401->frame[i].grabstate=FRAME_UNUSED;
+ for (i = 0; i < SE401_NUMFRAMES; i++)
+ se401->frame[i].grabstate = FRAME_UNUSED;
if (se401->streaming)
se401_stop_stream(se401);
- se401->user=0;
+ se401->user = 0;
}
file->private_data = NULL;
return 0;
@@ -1065,7 +1077,7 @@ static long se401_do_ioctl(struct file *file, unsigned int cmd, void *arg)
memset(vm, 0, sizeof(*vm));
vm->size = SE401_NUMFRAMES * se401->maxframesize;
vm->frames = SE401_NUMFRAMES;
- for (i=0; i<SE401_NUMFRAMES; i++)
+ for (i = 0; i < SE401_NUMFRAMES; i++)
vm->offsets[i] = se401->maxframesize * i;
return 0;
}
@@ -1083,16 +1095,16 @@ static long se401_do_ioctl(struct file *file, unsigned int cmd, void *arg)
/* Is this according to the v4l spec??? */
if (se401_set_size(se401, vm->width, vm->height))
return -EINVAL;
- se401->frame[vm->frame].grabstate=FRAME_READY;
+ se401->frame[vm->frame].grabstate = FRAME_READY;
if (!se401->streaming)
se401_start_stream(se401);
/* Set the picture properties */
- if (se401->framecount==0)
+ if (se401->framecount == 0)
se401_send_pict(se401);
/* Calibrate the reset level after a few frames. */
- if (se401->framecount%20==1)
+ if (se401->framecount % 20 == 1)
se401_auto_resetlevel(se401);
return 0;
@@ -1100,13 +1112,13 @@ static long se401_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOCSYNC:
{
int *frame = arg;
- int ret=0;
+ int ret = 0;
- if(*frame <0 || *frame >= SE401_NUMFRAMES)
+ if (*frame < 0 || *frame >= SE401_NUMFRAMES)
return -EINVAL;
- ret=se401_newframe(se401, *frame);
- se401->frame[*frame].grabstate=FRAME_UNUSED;
+ ret = se401_newframe(se401, *frame);
+ se401->frame[*frame].grabstate = FRAME_UNUSED;
return ret;
}
case VIDIOCGFBUF:
@@ -1147,36 +1159,36 @@ static long se401_ioctl(struct file *file,
static ssize_t se401_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- int realcount=count, ret=0;
+ int realcount = count, ret = 0;
struct video_device *dev = file->private_data;
struct usb_se401 *se401 = (struct usb_se401 *)dev;
- if (se401->dev == NULL)
+ if (se401->dev == NULL)
return -EIO;
if (realcount > se401->cwidth*se401->cheight*3)
- realcount=se401->cwidth*se401->cheight*3;
+ realcount = se401->cwidth*se401->cheight*3;
/* Shouldn't happen: */
- if (se401->frame[0].grabstate==FRAME_GRABBING)
+ if (se401->frame[0].grabstate == FRAME_GRABBING)
return -EBUSY;
- se401->frame[0].grabstate=FRAME_READY;
- se401->frame[1].grabstate=FRAME_UNUSED;
- se401->curframe=0;
+ se401->frame[0].grabstate = FRAME_READY;
+ se401->frame[1].grabstate = FRAME_UNUSED;
+ se401->curframe = 0;
if (!se401->streaming)
se401_start_stream(se401);
/* Set the picture properties */
- if (se401->framecount==0)
+ if (se401->framecount == 0)
se401_send_pict(se401);
/* Calibrate the reset level after a few frames. */
- if (se401->framecount%20==1)
+ if (se401->framecount%20 == 1)
se401_auto_resetlevel(se401);
- ret=se401_newframe(se401, 0);
+ ret = se401_newframe(se401, 0);
- se401->frame[0].grabstate=FRAME_UNUSED;
+ se401->frame[0].grabstate = FRAME_UNUSED;
if (ret)
return ret;
if (copy_to_user(buf, se401->frame[0].data, realcount))
@@ -1195,11 +1207,12 @@ static int se401_mmap(struct file *file, struct vm_area_struct *vma)
mutex_lock(&se401->lock);
- if (se401->dev == NULL) {
+ if (se401->dev == NULL) {
mutex_unlock(&se401->lock);
return -EIO;
}
- if (size > (((SE401_NUMFRAMES * se401->maxframesize) + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))) {
+ if (size > (((SE401_NUMFRAMES * se401->maxframesize) + PAGE_SIZE - 1)
+ & ~(PAGE_SIZE - 1))) {
mutex_unlock(&se401->lock);
return -EINVAL;
}
@@ -1210,10 +1223,10 @@ static int se401_mmap(struct file *file, struct vm_area_struct *vma)
mutex_unlock(&se401->lock);
return -EAGAIN;
}
- start += PAGE_SIZE;
- pos += PAGE_SIZE;
+ start += PAGE_SIZE;
+ pos += PAGE_SIZE;
if (size > PAGE_SIZE)
- size -= PAGE_SIZE;
+ size -= PAGE_SIZE;
else
size = 0;
}
@@ -1223,7 +1236,7 @@ static int se401_mmap(struct file *file, struct vm_area_struct *vma)
}
static const struct v4l2_file_operations se401_fops = {
- .owner = THIS_MODULE,
+ .owner = THIS_MODULE,
.open = se401_open,
.release = se401_close,
.read = se401_read,
@@ -1241,71 +1254,76 @@ static struct video_device se401_template = {
/***************************/
static int se401_init(struct usb_se401 *se401, int button)
{
- int i=0, rc;
+ int i = 0, rc;
unsigned char cp[0x40];
char temp[200];
+ int slen;
/* led on */
se401_sndctrl(1, se401, SE401_REQ_LED_CONTROL, 1, NULL, 0);
/* get camera descriptor */
- rc=se401_sndctrl(0, se401, SE401_REQ_GET_CAMERA_DESCRIPTOR, 0, cp, sizeof(cp));
- if (cp[1]!=0x41) {
+ rc = se401_sndctrl(0, se401, SE401_REQ_GET_CAMERA_DESCRIPTOR, 0,
+ cp, sizeof(cp));
+ if (cp[1] != 0x41) {
err("Wrong descriptor type");
return 1;
}
- sprintf (temp, "ExtraFeatures: %d", cp[3]);
+ slen = snprintf(temp, 200, "ExtraFeatures: %d", cp[3]);
- se401->sizes=cp[4]+cp[5]*256;
- se401->width=kmalloc(se401->sizes*sizeof(int), GFP_KERNEL);
+ se401->sizes = cp[4] + cp[5] * 256;
+ se401->width = kmalloc(se401->sizes*sizeof(int), GFP_KERNEL);
if (!se401->width)
return 1;
- se401->height=kmalloc(se401->sizes*sizeof(int), GFP_KERNEL);
+ se401->height = kmalloc(se401->sizes*sizeof(int), GFP_KERNEL);
if (!se401->height) {
kfree(se401->width);
return 1;
}
- for (i=0; i<se401->sizes; i++) {
- se401->width[i]=cp[6+i*4+0]+cp[6+i*4+1]*256;
- se401->height[i]=cp[6+i*4+2]+cp[6+i*4+3]*256;
+ for (i = 0; i < se401->sizes; i++) {
+ se401->width[i] = cp[6 + i * 4 + 0] + cp[6 + i*4 + 1] * 256;
+ se401->height[i] = cp[6 + i * 4 + 2] + cp[6 + i * 4 + 3] * 256;
}
- sprintf (temp, "%s Sizes:", temp);
- for (i=0; i<se401->sizes; i++) {
- sprintf(temp, "%s %dx%d", temp, se401->width[i], se401->height[i]);
+ slen += snprintf(temp + slen, 200 - slen, " Sizes:");
+ for (i = 0; i < se401->sizes; i++) {
+ slen += snprintf(temp + slen, 200 - slen,
+ " %dx%d", se401->width[i], se401->height[i]);
}
dev_info(&se401->dev->dev, "%s\n", temp);
- se401->maxframesize=se401->width[se401->sizes-1]*se401->height[se401->sizes-1]*3;
+ se401->maxframesize = se401->width[se401->sizes-1] *
+ se401->height[se401->sizes - 1] * 3;
- rc=se401_sndctrl(0, se401, SE401_REQ_GET_WIDTH, 0, cp, sizeof(cp));
- se401->cwidth=cp[0]+cp[1]*256;
- rc=se401_sndctrl(0, se401, SE401_REQ_GET_HEIGHT, 0, cp, sizeof(cp));
- se401->cheight=cp[0]+cp[1]*256;
+ rc = se401_sndctrl(0, se401, SE401_REQ_GET_WIDTH, 0, cp, sizeof(cp));
+ se401->cwidth = cp[0]+cp[1]*256;
+ rc = se401_sndctrl(0, se401, SE401_REQ_GET_HEIGHT, 0, cp, sizeof(cp));
+ se401->cheight = cp[0]+cp[1]*256;
if (!(cp[2] & SE401_FORMAT_BAYER)) {
err("Bayer format not supported!");
return 1;
}
/* set output mode (BAYER) */
- se401_sndctrl(1, se401, SE401_REQ_SET_OUTPUT_MODE, SE401_FORMAT_BAYER, NULL, 0);
+ se401_sndctrl(1, se401, SE401_REQ_SET_OUTPUT_MODE,
+ SE401_FORMAT_BAYER, NULL, 0);
- rc=se401_sndctrl(0, se401, SE401_REQ_GET_BRT, 0, cp, sizeof(cp));
- se401->brightness=cp[0]+cp[1]*256;
+ rc = se401_sndctrl(0, se401, SE401_REQ_GET_BRT, 0, cp, sizeof(cp));
+ se401->brightness = cp[0]+cp[1]*256;
/* some default values */
- se401->resetlevel=0x2d;
- se401->rgain=0x20;
- se401->ggain=0x20;
- se401->bgain=0x20;
+ se401->resetlevel = 0x2d;
+ se401->rgain = 0x20;
+ se401->ggain = 0x20;
+ se401->bgain = 0x20;
se401_set_exposure(se401, 20000);
- se401->palette=VIDEO_PALETTE_RGB24;
- se401->enhance=1;
- se401->dropped=0;
- se401->error=0;
- se401->framecount=0;
- se401->readcount=0;
+ se401->palette = VIDEO_PALETTE_RGB24;
+ se401->enhance = 1;
+ se401->dropped = 0;
+ se401->error = 0;
+ se401->framecount = 0;
+ se401->readcount = 0;
/* Start interrupt transfers for snapshot button */
if (button) {
- se401->inturb=usb_alloc_urb(0, GFP_KERNEL);
+ se401->inturb = usb_alloc_urb(0, GFP_KERNEL);
if (!se401->inturb) {
dev_info(&se401->dev->dev,
"Allocation of inturb failed\n");
@@ -1323,7 +1341,7 @@ static int se401_init(struct usb_se401 *se401, int button)
return 1;
}
} else
- se401->inturb=NULL;
+ se401->inturb = NULL;
/* Flash the led */
se401_sndctrl(1, se401, SE401_REQ_CAMERA_POWER, 1, NULL, 0);
@@ -1340,8 +1358,8 @@ static int se401_probe(struct usb_interface *intf,
struct usb_device *dev = interface_to_usbdev(intf);
struct usb_interface_descriptor *interface;
struct usb_se401 *se401;
- char *camera_name=NULL;
- int button=1;
+ char *camera_name = NULL;
+ int button = 1;
/* We don't handle multi-config cameras */
if (dev->descriptor.bNumConfigurations != 1)
@@ -1350,22 +1368,22 @@ static int se401_probe(struct usb_interface *intf,
interface = &intf->cur_altsetting->desc;
/* Is it an se401? */
- if (le16_to_cpu(dev->descriptor.idVendor) == 0x03e8 &&
- le16_to_cpu(dev->descriptor.idProduct) == 0x0004) {
- camera_name="Endpoints/Aox SE401";
- } else if (le16_to_cpu(dev->descriptor.idVendor) == 0x0471 &&
- le16_to_cpu(dev->descriptor.idProduct) == 0x030b) {
- camera_name="Philips PCVC665K";
- } else if (le16_to_cpu(dev->descriptor.idVendor) == 0x047d &&
- le16_to_cpu(dev->descriptor.idProduct) == 0x5001) {
- camera_name="Kensington VideoCAM 67014";
- } else if (le16_to_cpu(dev->descriptor.idVendor) == 0x047d &&
- le16_to_cpu(dev->descriptor.idProduct) == 0x5002) {
- camera_name="Kensington VideoCAM 6701(5/7)";
- } else if (le16_to_cpu(dev->descriptor.idVendor) == 0x047d &&
- le16_to_cpu(dev->descriptor.idProduct) == 0x5003) {
- camera_name="Kensington VideoCAM 67016";
- button=0;
+ if (le16_to_cpu(dev->descriptor.idVendor) == 0x03e8 &&
+ le16_to_cpu(dev->descriptor.idProduct) == 0x0004) {
+ camera_name = "Endpoints/Aox SE401";
+ } else if (le16_to_cpu(dev->descriptor.idVendor) == 0x0471 &&
+ le16_to_cpu(dev->descriptor.idProduct) == 0x030b) {
+ camera_name = "Philips PCVC665K";
+ } else if (le16_to_cpu(dev->descriptor.idVendor) == 0x047d &&
+ le16_to_cpu(dev->descriptor.idProduct) == 0x5001) {
+ camera_name = "Kensington VideoCAM 67014";
+ } else if (le16_to_cpu(dev->descriptor.idVendor) == 0x047d &&
+ le16_to_cpu(dev->descriptor.idProduct) == 0x5002) {
+ camera_name = "Kensington VideoCAM 6701(5/7)";
+ } else if (le16_to_cpu(dev->descriptor.idVendor) == 0x047d &&
+ le16_to_cpu(dev->descriptor.idProduct) == 0x5003) {
+ camera_name = "Kensington VideoCAM 67016";
+ button = 0;
} else
return -ENODEV;
@@ -1378,7 +1396,8 @@ static int se401_probe(struct usb_interface *intf,
/* We found one */
dev_info(&intf->dev, "SE401 camera found: %s\n", camera_name);
- if ((se401 = kzalloc(sizeof(*se401), GFP_KERNEL)) == NULL) {
+ se401 = kzalloc(sizeof(*se401), GFP_KERNEL);
+ if (se401 == NULL) {
err("couldn't kmalloc se401 struct");
return -ENOMEM;
}
@@ -1396,12 +1415,14 @@ static int se401_probe(struct usb_interface *intf,
}
memcpy(&se401->vdev, &se401_template, sizeof(se401_template));
- memcpy(se401->vdev.name, se401->camera_name, strlen(se401->camera_name));
+ memcpy(se401->vdev.name, se401->camera_name,
+ strlen(se401->camera_name));
init_waitqueue_head(&se401->wq);
mutex_init(&se401->lock);
wmb();
- if (video_register_device(&se401->vdev, VFL_TYPE_GRABBER, video_nr) < 0) {
+ if (video_register_device(&se401->vdev,
+ VFL_TYPE_GRABBER, video_nr) < 0) {
kfree(se401);
err("video_register_device failed");
return -EIO;
@@ -1409,20 +1430,20 @@ static int se401_probe(struct usb_interface *intf,
dev_info(&intf->dev, "registered new video device: video%d\n",
se401->vdev.num);
- usb_set_intfdata (intf, se401);
+ usb_set_intfdata(intf, se401);
return 0;
}
static void se401_disconnect(struct usb_interface *intf)
{
- struct usb_se401 *se401 = usb_get_intfdata (intf);
+ struct usb_se401 *se401 = usb_get_intfdata(intf);
- usb_set_intfdata (intf, NULL);
+ usb_set_intfdata(intf, NULL);
if (se401) {
video_unregister_device(&se401->vdev);
- if (!se401->user){
+ if (!se401->user)
usb_se401_remove_disconnected(se401);
- } else {
+ else {
se401->frame[0].grabstate = FRAME_ERROR;
se401->frame[0].grabstate = FRAME_ERROR;
@@ -1435,10 +1456,10 @@ static void se401_disconnect(struct usb_interface *intf)
}
static struct usb_driver se401_driver = {
- .name = "se401",
- .id_table = device_table,
- .probe = se401_probe,
- .disconnect = se401_disconnect,
+ .name = "se401",
+ .id_table = device_table,
+ .probe = se401_probe,
+ .disconnect = se401_disconnect,
};
@@ -1451,9 +1472,10 @@ static struct usb_driver se401_driver = {
static int __init usb_se401_init(void)
{
- printk(KERN_INFO "SE401 usb camera driver version %s registering\n", version);
+ printk(KERN_INFO "SE401 usb camera driver version %s registering\n",
+ version);
if (flickerless)
- if (flickerless!=50 && flickerless!=60) {
+ if (flickerless != 50 && flickerless != 60) {
printk(KERN_ERR "Invallid flickerless value, use 0, 50 or 60.\n");
return -1;
}
diff --git a/drivers/media/video/se401.h b/drivers/media/video/se401.h
index 2ce685db5d8..bf7d2e9765b 100644
--- a/drivers/media/video/se401.h
+++ b/drivers/media/video/se401.h
@@ -2,7 +2,7 @@
#ifndef __LINUX_se401_H
#define __LINUX_se401_H
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/videodev.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
@@ -12,9 +12,10 @@
#ifdef se401_DEBUG
# define PDEBUG(level, fmt, args...) \
-if (debug >= level) info("[" __PRETTY_FUNCTION__ ":%d] " fmt, __LINE__ , ## args)
+if (debug >= level) \
+ info("[" __PRETTY_FUNCTION__ ":%d] " fmt, __LINE__ , ## args)
#else
-# define PDEBUG(level, fmt, args...) do {} while(0)
+# define PDEBUG(level, fmt, args...) do {} while (0)
#endif
/* An almost drop-in replacement for sleep_on_interruptible */
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index b5e37a530c6..d369e8409ab 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -81,7 +81,6 @@ struct sh_mobile_ceu_buffer {
};
struct sh_mobile_ceu_dev {
- struct device *dev;
struct soc_camera_host ici;
struct soc_camera_device *icd;
@@ -617,7 +616,7 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, int idx,
xlate->cam_fmt = icd->formats + idx;
xlate->buswidth = icd->formats[idx].depth;
xlate++;
- dev_dbg(&ici->dev, "Providing format %s using %s\n",
+ dev_dbg(ici->dev, "Providing format %s using %s\n",
sh_mobile_ceu_formats[k].name,
icd->formats[idx].name);
}
@@ -630,7 +629,7 @@ add_single_format:
xlate->cam_fmt = icd->formats + idx;
xlate->buswidth = icd->formats[idx].depth;
xlate++;
- dev_dbg(&ici->dev,
+ dev_dbg(ici->dev,
"Providing format %s in pass-through mode\n",
icd->formats[idx].name);
}
@@ -657,7 +656,7 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
if (!xlate) {
- dev_warn(&ici->dev, "Format %x not found\n", pixfmt);
+ dev_warn(ici->dev, "Format %x not found\n", pixfmt);
return -EINVAL;
}
@@ -684,7 +683,7 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
if (!xlate) {
- dev_warn(&ici->dev, "Format %x not found\n", pixfmt);
+ dev_warn(ici->dev, "Format %x not found\n", pixfmt);
return -EINVAL;
}
@@ -782,7 +781,7 @@ static void sh_mobile_ceu_init_videobuf(struct videobuf_queue *q,
videobuf_queue_dma_contig_init(q,
&sh_mobile_ceu_videobuf_ops,
- &ici->dev, &pcdev->lock,
+ ici->dev, &pcdev->lock,
V4L2_BUF_TYPE_VIDEO_CAPTURE,
pcdev->is_interlaced ?
V4L2_FIELD_INTERLACED : V4L2_FIELD_NONE,
@@ -829,7 +828,6 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev)
goto exit;
}
- platform_set_drvdata(pdev, pcdev);
INIT_LIST_HEAD(&pcdev->capture);
spin_lock_init(&pcdev->lock);
@@ -840,7 +838,7 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev)
goto exit_kfree;
}
- base = ioremap_nocache(res->start, res->end - res->start + 1);
+ base = ioremap_nocache(res->start, resource_size(res));
if (!base) {
err = -ENXIO;
dev_err(&pdev->dev, "Unable to ioremap CEU registers.\n");
@@ -850,13 +848,12 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev)
pcdev->irq = irq;
pcdev->base = base;
pcdev->video_limit = 0; /* only enabled if second resource exists */
- pcdev->dev = &pdev->dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res) {
err = dma_declare_coherent_memory(&pdev->dev, res->start,
res->start,
- (res->end - res->start) + 1,
+ resource_size(res),
DMA_MEMORY_MAP |
DMA_MEMORY_EXCLUSIVE);
if (!err) {
@@ -865,7 +862,7 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev)
goto exit_iounmap;
}
- pcdev->video_limit = (res->end - res->start) + 1;
+ pcdev->video_limit = resource_size(res);
}
/* request irq */
@@ -885,7 +882,7 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev)
}
pcdev->ici.priv = pcdev;
- pcdev->ici.dev.parent = &pdev->dev;
+ pcdev->ici.dev = &pdev->dev;
pcdev->ici.nr = pdev->id;
pcdev->ici.drv_name = dev_name(&pdev->dev);
pcdev->ici.ops = &sh_mobile_ceu_host_ops;
@@ -913,9 +910,11 @@ exit:
static int sh_mobile_ceu_remove(struct platform_device *pdev)
{
- struct sh_mobile_ceu_dev *pcdev = platform_get_drvdata(pdev);
+ struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
+ struct sh_mobile_ceu_dev *pcdev = container_of(soc_host,
+ struct sh_mobile_ceu_dev, ici);
- soc_camera_host_unregister(&pcdev->ici);
+ soc_camera_host_unregister(soc_host);
clk_put(pcdev->clk);
free_irq(pcdev->irq, pcdev);
if (platform_get_resource(pdev, IORESOURCE_MEM, 1))
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index 0e890cc2337..16f595d4337 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -16,19 +16,21 @@
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
-#include <linux/init.h>
#include <linux/device.h>
-#include <linux/list.h>
#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/platform_device.h>
#include <linux/vmalloc.h>
+#include <media/soc_camera.h>
#include <media/v4l2-common.h>
-#include <media/v4l2-ioctl.h>
#include <media/v4l2-dev.h>
+#include <media/v4l2-ioctl.h>
#include <media/videobuf-core.h>
-#include <media/soc_camera.h>
/* Default to VGA resolution */
#define DEFAULT_WIDTH 640
@@ -279,7 +281,7 @@ static int soc_camera_set_fmt(struct soc_camera_file *icf,
return ret;
} else if (!icd->current_fmt ||
icd->current_fmt->fourcc != pix->pixelformat) {
- dev_err(&ici->dev,
+ dev_err(ici->dev,
"Host driver hasn't set up current format correctly!\n");
return -EINVAL;
}
@@ -794,7 +796,7 @@ static void scan_add_host(struct soc_camera_host *ici)
list_for_each_entry(icd, &devices, list) {
if (icd->iface == ici->nr) {
- icd->dev.parent = &ici->dev;
+ icd->dev.parent = ici->dev;
device_register_link(icd);
}
}
@@ -818,7 +820,7 @@ static int scan_add_device(struct soc_camera_device *icd)
list_for_each_entry(ici, &hosts, list) {
if (icd->iface == ici->nr) {
ret = 1;
- icd->dev.parent = &ici->dev;
+ icd->dev.parent = ici->dev;
break;
}
}
@@ -952,7 +954,6 @@ static void dummy_release(struct device *dev)
int soc_camera_host_register(struct soc_camera_host *ici)
{
- int ret;
struct soc_camera_host *ix;
if (!ici || !ici->ops ||
@@ -965,12 +966,10 @@ int soc_camera_host_register(struct soc_camera_host *ici)
!ici->ops->reqbufs ||
!ici->ops->add ||
!ici->ops->remove ||
- !ici->ops->poll)
+ !ici->ops->poll ||
+ !ici->dev)
return -EINVAL;
- /* Number might be equal to the platform device ID */
- dev_set_name(&ici->dev, "camera_host%d", ici->nr);
-
mutex_lock(&list_lock);
list_for_each_entry(ix, &hosts, list) {
if (ix->nr == ici->nr) {
@@ -979,26 +978,14 @@ int soc_camera_host_register(struct soc_camera_host *ici)
}
}
+ dev_set_drvdata(ici->dev, ici);
+
list_add_tail(&ici->list, &hosts);
mutex_unlock(&list_lock);
- ici->dev.release = dummy_release;
-
- ret = device_register(&ici->dev);
-
- if (ret)
- goto edevr;
-
scan_add_host(ici);
return 0;
-
-edevr:
- mutex_lock(&list_lock);
- list_del(&ici->list);
- mutex_unlock(&list_lock);
-
- return ret;
}
EXPORT_SYMBOL(soc_camera_host_register);
@@ -1012,7 +999,7 @@ void soc_camera_host_unregister(struct soc_camera_host *ici)
list_del(&ici->list);
list_for_each_entry(icd, &devices, list) {
- if (icd->dev.parent == &ici->dev) {
+ if (icd->dev.parent == ici->dev) {
device_unregister(&icd->dev);
/* Not before device_unregister(), .remove
* needs parent to call ici->ops->remove() */
@@ -1023,7 +1010,7 @@ void soc_camera_host_unregister(struct soc_camera_host *ici)
mutex_unlock(&list_lock);
- device_unregister(&ici->dev);
+ dev_set_drvdata(ici->dev, NULL);
}
EXPORT_SYMBOL(soc_camera_host_unregister);
@@ -1130,7 +1117,7 @@ int soc_camera_video_start(struct soc_camera_device *icd)
vdev = video_device_alloc();
if (!vdev)
goto evidallocd;
- dev_dbg(&ici->dev, "Allocated video_device %p\n", vdev);
+ dev_dbg(ici->dev, "Allocated video_device %p\n", vdev);
strlcpy(vdev->name, ici->drv_name, sizeof(vdev->name));
@@ -1174,6 +1161,57 @@ void soc_camera_video_stop(struct soc_camera_device *icd)
}
EXPORT_SYMBOL(soc_camera_video_stop);
+static int __devinit soc_camera_pdrv_probe(struct platform_device *pdev)
+{
+ struct soc_camera_link *icl = pdev->dev.platform_data;
+ struct i2c_adapter *adap;
+ struct i2c_client *client;
+
+ if (!icl)
+ return -EINVAL;
+
+ adap = i2c_get_adapter(icl->i2c_adapter_id);
+ if (!adap) {
+ dev_warn(&pdev->dev, "Cannot get adapter #%d. No driver?\n",
+ icl->i2c_adapter_id);
+ /* -ENODEV and -ENXIO do not produce an error on probe()... */
+ return -ENOENT;
+ }
+
+ icl->board_info->platform_data = icl;
+ client = i2c_new_device(adap, icl->board_info);
+ if (!client) {
+ i2c_put_adapter(adap);
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, client);
+
+ return 0;
+}
+
+static int __devexit soc_camera_pdrv_remove(struct platform_device *pdev)
+{
+ struct i2c_client *client = platform_get_drvdata(pdev);
+
+ if (!client)
+ return -ENODEV;
+
+ i2c_unregister_device(client);
+ i2c_put_adapter(client->adapter);
+
+ return 0;
+}
+
+static struct platform_driver __refdata soc_camera_pdrv = {
+ .probe = soc_camera_pdrv_probe,
+ .remove = __devexit_p(soc_camera_pdrv_remove),
+ .driver = {
+ .name = "soc-camera-pdrv",
+ .owner = THIS_MODULE,
+ },
+};
+
static int __init soc_camera_init(void)
{
int ret = bus_register(&soc_camera_bus_type);
@@ -1183,8 +1221,14 @@ static int __init soc_camera_init(void)
if (ret)
goto edrvr;
+ ret = platform_driver_register(&soc_camera_pdrv);
+ if (ret)
+ goto epdr;
+
return 0;
+epdr:
+ driver_unregister(&ic_drv);
edrvr:
bus_unregister(&soc_camera_bus_type);
return ret;
@@ -1192,6 +1236,7 @@ edrvr:
static void __exit soc_camera_exit(void)
{
+ platform_driver_unregister(&soc_camera_pdrv);
driver_unregister(&ic_drv);
bus_unregister(&soc_camera_bus_type);
}
@@ -1202,3 +1247,4 @@ module_exit(soc_camera_exit);
MODULE_DESCRIPTION("Image capture bus driver");
MODULE_AUTHOR("Guennadi Liakhovetski <kernel@pengutronix.de>");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:soc-camera-pdrv");
diff --git a/drivers/media/video/stk-webcam.c b/drivers/media/video/stk-webcam.c
index 1a6d39cbd6f..2e593704727 100644
--- a/drivers/media/video/stk-webcam.c
+++ b/drivers/media/video/stk-webcam.c
@@ -1137,7 +1137,7 @@ static int stk_vidioc_querybuf(struct file *filp,
struct stk_camera *dev = priv;
struct stk_sio_buffer *sbuf;
- if (buf->index < 0 || buf->index >= dev->n_sbufs)
+ if (buf->index >= dev->n_sbufs)
return -EINVAL;
sbuf = dev->sio_bufs + buf->index;
*buf = sbuf->v4lbuf;
@@ -1154,7 +1154,7 @@ static int stk_vidioc_qbuf(struct file *filp,
if (buf->memory != V4L2_MEMORY_MMAP)
return -EINVAL;
- if (buf->index < 0 || buf->index >= dev->n_sbufs)
+ if (buf->index >= dev->n_sbufs)
return -EINVAL;
sbuf = dev->sio_bufs + buf->index;
if (sbuf->v4lbuf.flags & V4L2_BUF_FLAG_QUEUED)
diff --git a/drivers/media/video/tda7432.c b/drivers/media/video/tda7432.c
index 005f8a46803..80f1cee23fa 100644
--- a/drivers/media/video/tda7432.c
+++ b/drivers/media/video/tda7432.c
@@ -20,20 +20,6 @@
* loudness - set between 0 and 15 for varying degrees of loudness effect
*
* maxvol - set maximium volume to +20db (1), default is 0db(0)
- *
- *
- * Revision: 0.7 - maxvol module parm to set maximium volume 0db or +20db
- * store if muted so we can return it
- * change balance only if flaged to
- * Revision: 0.6 - added tone controls
- * Revision: 0.5 - Fixed odd balance problem
- * Revision: 0.4 - added muting
- * Revision: 0.3 - Fixed silly reversed volume controls. :)
- * Revision: 0.2 - Cleaned up #defines
- * fixed volume control
- * Added I2C_DRIVERID_TDA7432
- * added loudness insmod control
- * Revision: 0.1 - initial version
*/
#include <linux/module.h>
diff --git a/drivers/media/video/tea6415c.c b/drivers/media/video/tea6415c.c
index d4a9ed45764..1585839bd0b 100644
--- a/drivers/media/video/tea6415c.c
+++ b/drivers/media/video/tea6415c.c
@@ -141,7 +141,6 @@ static const struct v4l2_subdev_ops tea6415c_ops = {
.video = &tea6415c_video_ops,
};
-/* this function is called by i2c_probe */
static int tea6415c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
diff --git a/drivers/media/video/tea6420.c b/drivers/media/video/tea6420.c
index ced6eadf347..0446524d354 100644
--- a/drivers/media/video/tea6420.c
+++ b/drivers/media/video/tea6420.c
@@ -112,7 +112,6 @@ static const struct v4l2_subdev_ops tea6420_ops = {
.audio = &tea6420_audio_ops,
};
-/* this function is called by i2c_probe */
static int tea6420_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
diff --git a/drivers/media/video/ths7303.c b/drivers/media/video/ths7303.c
new file mode 100644
index 00000000000..21781f8a0e8
--- /dev/null
+++ b/drivers/media/video/ths7303.c
@@ -0,0 +1,151 @@
+/*
+ * ths7303- THS7303 Video Amplifier driver
+ *
+ * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed .as is. WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ctype.h>
+#include <linux/i2c.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-chip-ident.h>
+
+MODULE_DESCRIPTION("TI THS7303 video amplifier driver");
+MODULE_AUTHOR("Chaithrika U S");
+MODULE_LICENSE("GPL");
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug level 0-1");
+
+/* following function is used to set ths7303 */
+static int ths7303_setvalue(struct v4l2_subdev *sd, v4l2_std_id std)
+{
+ int err = 0;
+ u8 val;
+ struct i2c_client *client;
+
+ client = v4l2_get_subdevdata(sd);
+
+ if (std & (V4L2_STD_ALL & ~V4L2_STD_SECAM)) {
+ val = 0x02;
+ v4l2_dbg(1, debug, sd, "setting value for SDTV format\n");
+ } else {
+ val = 0x00;
+ v4l2_dbg(1, debug, sd, "disabling all channels\n");
+ }
+
+ err |= i2c_smbus_write_byte_data(client, 0x01, val);
+ err |= i2c_smbus_write_byte_data(client, 0x02, val);
+ err |= i2c_smbus_write_byte_data(client, 0x03, val);
+
+ if (err)
+ v4l2_err(sd, "write failed\n");
+
+ return err;
+}
+
+static int ths7303_s_std_output(struct v4l2_subdev *sd, v4l2_std_id norm)
+{
+ return ths7303_setvalue(sd, norm);
+}
+
+static int ths7303_g_chip_ident(struct v4l2_subdev *sd,
+ struct v4l2_dbg_chip_ident *chip)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_THS7303, 0);
+}
+
+static const struct v4l2_subdev_video_ops ths7303_video_ops = {
+ .s_std_output = ths7303_s_std_output,
+};
+
+static const struct v4l2_subdev_core_ops ths7303_core_ops = {
+ .g_chip_ident = ths7303_g_chip_ident,
+};
+
+static const struct v4l2_subdev_ops ths7303_ops = {
+ .core = &ths7303_core_ops,
+ .video = &ths7303_video_ops,
+};
+
+static int ths7303_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct v4l2_subdev *sd;
+ v4l2_std_id std_id = V4L2_STD_NTSC;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ v4l_info(client, "chip found @ 0x%x (%s)\n",
+ client->addr << 1, client->adapter->name);
+
+ sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL);
+ if (sd == NULL)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(sd, client, &ths7303_ops);
+
+ return ths7303_setvalue(sd, std_id);
+}
+
+static int ths7303_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+
+ v4l2_device_unregister_subdev(sd);
+ kfree(sd);
+
+ return 0;
+}
+
+static const struct i2c_device_id ths7303_id[] = {
+ {"ths7303", 0},
+ {},
+};
+
+MODULE_DEVICE_TABLE(i2c, ths7303_id);
+
+static struct i2c_driver ths7303_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ths7303",
+ },
+ .probe = ths7303_probe,
+ .remove = ths7303_remove,
+ .id_table = ths7303_id,
+};
+
+static int __init ths7303_init(void)
+{
+ return i2c_add_driver(&ths7303_driver);
+}
+
+static void __exit ths7303_exit(void)
+{
+ i2c_del_driver(&ths7303_driver);
+}
+
+module_init(ths7303_init);
+module_exit(ths7303_exit);
+
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index 78c377a399c..537594211a9 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -309,32 +309,6 @@ static void set_freq(struct i2c_client *c, unsigned long freq)
}
}
-static void tuner_i2c_address_check(struct tuner *t)
-{
- if ((t->type == UNSET || t->type == TUNER_ABSENT) ||
- ((t->i2c->addr < 0x64) || (t->i2c->addr > 0x6f)))
- return;
-
- /* We already know that the XC5000 can only be located at
- * i2c address 0x61, 0x62, 0x63 or 0x64 */
- if ((t->type == TUNER_XC5000) &&
- ((t->i2c->addr <= 0x64)) && (t->i2c->addr >= 0x61))
- return;
-
- tuner_warn("====================== WARNING! ======================\n");
- tuner_warn("Support for tuners in i2c address range 0x64 thru 0x6f\n");
- tuner_warn("will soon be dropped. This message indicates that your\n");
- tuner_warn("hardware has a %s tuner at i2c address 0x%02x.\n",
- t->name, t->i2c->addr);
- tuner_warn("To ensure continued support for your device, please\n");
- tuner_warn("send a copy of this message, along with full dmesg\n");
- tuner_warn("output to v4l-dvb-maintainer@linuxtv.org\n");
- tuner_warn("Please use subject line: \"obsolete tuner i2c address.\"\n");
- tuner_warn("driver: %s, addr: 0x%02x, type: %d (%s)\n",
- t->i2c->adapter->name, t->i2c->addr, t->type, t->name);
- tuner_warn("====================== WARNING! ======================\n");
-}
-
static struct xc5000_config xc5000_cfg;
static void set_type(struct i2c_client *c, unsigned int type,
@@ -438,18 +412,12 @@ static void set_type(struct i2c_client *c, unsigned int type,
break;
case TUNER_XC5000:
{
- struct dvb_tuner_ops *xc_tuner_ops;
-
xc5000_cfg.i2c_address = t->i2c->addr;
/* if_khz will be set when the digital dvb_attach() occurs */
xc5000_cfg.if_khz = 0;
if (!dvb_attach(xc5000_attach,
&t->fe, t->i2c->adapter, &xc5000_cfg))
goto attach_failed;
-
- xc_tuner_ops = &t->fe.ops.tuner_ops;
- if (xc_tuner_ops->init)
- xc_tuner_ops->init(&t->fe);
break;
}
default:
@@ -490,7 +458,6 @@ static void set_type(struct i2c_client *c, unsigned int type,
tuner_dbg("%s %s I2C addr 0x%02x with type %d used for 0x%02x\n",
c->adapter->name, c->driver->driver.name, c->addr << 1, type,
t->mode_mask);
- tuner_i2c_address_check(t);
return;
attach_failed:
diff --git a/drivers/media/video/tveeprom.c b/drivers/media/video/tveeprom.c
index e24a38c7fa4..ac02808106c 100644
--- a/drivers/media/video/tveeprom.c
+++ b/drivers/media/video/tveeprom.c
@@ -184,7 +184,7 @@ hauppauge_tuner[] =
{ TUNER_ABSENT, "Silicon TDA8275C1 8290 FM"},
{ TUNER_ABSENT, "Thompson DTT757"},
/* 80-89 */
- { TUNER_PHILIPS_FM1216ME_MK3, "Philips FQ1216LME MK3"},
+ { TUNER_PHILIPS_FQ1216LME_MK3, "Philips FQ1216LME MK3"},
{ TUNER_LG_PAL_NEW_TAPC, "LG TAPC G701D"},
{ TUNER_LG_NTSC_NEW_TAPC, "LG TAPC H791F"},
{ TUNER_LG_PAL_NEW_TAPC, "TCL 2002MB 3"},
@@ -210,7 +210,7 @@ hauppauge_tuner[] =
{ TUNER_TEA5767, "Philips TEA5768HL FM Radio"},
{ TUNER_ABSENT, "Panasonic ENV57H12D5"},
{ TUNER_PHILIPS_FM1236_MK3, "TCL MFNM05-4"},
- { TUNER_ABSENT, "TCL MNM05-4"},
+ { TUNER_PHILIPS_FM1236_MK3, "TCL MNM05-4"},
{ TUNER_PHILIPS_FM1216ME_MK3, "TCL MPE05-2"},
{ TUNER_ABSENT, "TCL MQNM05-4"},
{ TUNER_ABSENT, "LG TAPC-W701D"},
@@ -229,7 +229,7 @@ hauppauge_tuner[] =
{ TUNER_ABSENT, "Samsung THPD5222FG30A"},
/* 120-129 */
{ TUNER_XC2028, "Xceive XC3028"},
- { TUNER_ABSENT, "Philips FQ1216LME MK5"},
+ { TUNER_PHILIPS_FQ1216LME_MK3, "Philips FQ1216LME MK5"},
{ TUNER_ABSENT, "Philips FQD1216LME"},
{ TUNER_ABSENT, "Conexant CX24118A"},
{ TUNER_ABSENT, "TCL DMF11WIP"},
diff --git a/drivers/media/video/tvp514x.c b/drivers/media/video/tvp514x.c
index 4262e60b811..3750f7fadb1 100644
--- a/drivers/media/video/tvp514x.c
+++ b/drivers/media/video/tvp514x.c
@@ -692,7 +692,7 @@ static int ioctl_s_routing(struct v4l2_int_device *s,
break; /* Input detected */
}
- if ((current_std == STD_INVALID) || (try_count <= 0))
+ if ((current_std == STD_INVALID) || (try_count < 0))
return -EINVAL;
decoder->current_std = current_std;
diff --git a/drivers/media/video/usbvideo/konicawc.c b/drivers/media/video/usbvideo/konicawc.c
index 900ec2129ca..31d57f2d09e 100644
--- a/drivers/media/video/usbvideo/konicawc.c
+++ b/drivers/media/video/usbvideo/konicawc.c
@@ -240,7 +240,7 @@ static void konicawc_register_input(struct konicawc *cam, struct usb_device *dev
input_dev->dev.parent = &dev->dev;
input_dev->evbit[0] = BIT_MASK(EV_KEY);
- input_dev->keybit[BIT_WORD(BTN_0)] = BIT_MASK(BTN_0);
+ input_dev->keybit[BIT_WORD(KEY_CAMERA)] = BIT_MASK(KEY_CAMERA);
error = input_register_device(cam->input);
if (error) {
@@ -263,7 +263,7 @@ static void konicawc_unregister_input(struct konicawc *cam)
static void konicawc_report_buttonstat(struct konicawc *cam)
{
if (cam->input) {
- input_report_key(cam->input, BTN_0, cam->buttonsts);
+ input_report_key(cam->input, KEY_CAMERA, cam->buttonsts);
input_sync(cam->input);
}
}
diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
index fd112f0b9d3..803d3e4e29a 100644
--- a/drivers/media/video/usbvideo/quickcam_messenger.c
+++ b/drivers/media/video/usbvideo/quickcam_messenger.c
@@ -103,7 +103,7 @@ static void qcm_register_input(struct qcm *cam, struct usb_device *dev)
input_dev->dev.parent = &dev->dev;
input_dev->evbit[0] = BIT_MASK(EV_KEY);
- input_dev->keybit[BIT_WORD(BTN_0)] = BIT_MASK(BTN_0);
+ input_dev->keybit[BIT_WORD(KEY_CAMERA)] = BIT_MASK(KEY_CAMERA);
error = input_register_device(cam->input);
if (error) {
@@ -126,7 +126,7 @@ static void qcm_unregister_input(struct qcm *cam)
static void qcm_report_buttonstat(struct qcm *cam)
{
if (cam->input) {
- input_report_key(cam->input, BTN_0, cam->button_sts);
+ input_report_key(cam->input, KEY_CAMERA, cam->button_sts);
input_sync(cam->input);
}
}
diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
index 8bc03b9e131..6ba16abeebd 100644
--- a/drivers/media/video/usbvision/usbvision-core.c
+++ b/drivers/media/video/usbvision/usbvision-core.c
@@ -390,10 +390,9 @@ int usbvision_scratch_alloc(struct usb_usbvision *usbvision)
void usbvision_scratch_free(struct usb_usbvision *usbvision)
{
- if (usbvision->scratch != NULL) {
- vfree(usbvision->scratch);
- usbvision->scratch = NULL;
- }
+ vfree(usbvision->scratch);
+ usbvision->scratch = NULL;
+
}
/*
@@ -506,10 +505,9 @@ int usbvision_decompress_alloc(struct usb_usbvision *usbvision)
*/
void usbvision_decompress_free(struct usb_usbvision *usbvision)
{
- if (usbvision->IntraFrameBuffer != NULL) {
- vfree(usbvision->IntraFrameBuffer);
- usbvision->IntraFrameBuffer = NULL;
- }
+ vfree(usbvision->IntraFrameBuffer);
+ usbvision->IntraFrameBuffer = NULL;
+
}
/************************************************************
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
index d7056a5b7f9..90b58914f98 100644
--- a/drivers/media/video/usbvision/usbvision-video.c
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -541,7 +541,7 @@ static int vidioc_enum_input (struct file *file, void *priv,
struct usb_usbvision *usbvision = video_drvdata(file);
int chan;
- if ((vi->index >= usbvision->video_inputs) || (vi->index < 0) )
+ if (vi->index >= usbvision->video_inputs)
return -EINVAL;
if (usbvision->have_tuner) {
chan = vi->index;
@@ -1794,7 +1794,7 @@ static struct usb_driver usbvision_driver = {
.name = "usbvision",
.id_table = usbvision_table,
.probe = usbvision_probe,
- .disconnect = usbvision_disconnect
+ .disconnect = __devexit_p(usbvision_disconnect),
};
/*
diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c
index 0d7e38d6ff6..36a6ba92df2 100644
--- a/drivers/media/video/uvc/uvc_ctrl.c
+++ b/drivers/media/video/uvc/uvc_ctrl.c
@@ -1372,21 +1372,19 @@ end:
}
/*
- * Prune an entity of its bogus controls. This currently includes processing
- * unit auto controls for which no corresponding manual control is available.
- * Such auto controls make little sense if any, and are known to crash at
- * least the SiGma Micro webcam.
+ * Prune an entity of its bogus controls using a blacklist. Bogus controls
+ * are currently the ones that crash the camera or unconditionally return an
+ * error when queried.
*/
static void
-uvc_ctrl_prune_entity(struct uvc_entity *entity)
+uvc_ctrl_prune_entity(struct uvc_device *dev, struct uvc_entity *entity)
{
static const struct {
- u8 idx_manual;
- u8 idx_auto;
+ struct usb_device_id id;
+ u8 index;
} blacklist[] = {
- { 2, 11 }, /* Hue */
- { 6, 12 }, /* White Balance Temperature */
- { 7, 13 }, /* White Balance Component */
+ { { USB_DEVICE(0x1c4f, 0x3000) }, 6 }, /* WB Temperature */
+ { { USB_DEVICE(0x5986, 0x0241) }, 2 }, /* Hue */
};
u8 *controls;
@@ -1400,19 +1398,17 @@ uvc_ctrl_prune_entity(struct uvc_entity *entity)
size = entity->processing.bControlSize;
for (i = 0; i < ARRAY_SIZE(blacklist); ++i) {
- if (blacklist[i].idx_auto >= 8 * size ||
- blacklist[i].idx_manual >= 8 * size)
+ if (!usb_match_id(dev->intf, &blacklist[i].id))
continue;
- if (!uvc_test_bit(controls, blacklist[i].idx_auto) ||
- uvc_test_bit(controls, blacklist[i].idx_manual))
+ if (blacklist[i].index >= 8 * size ||
+ !uvc_test_bit(controls, blacklist[i].index))
continue;
- uvc_trace(UVC_TRACE_CONTROL, "Auto control %u/%u has no "
- "matching manual control, removing it.\n", entity->id,
- blacklist[i].idx_auto);
+ uvc_trace(UVC_TRACE_CONTROL, "%u/%u control is black listed, "
+ "removing it.\n", entity->id, blacklist[i].index);
- uvc_clear_bit(controls, blacklist[i].idx_auto);
+ uvc_clear_bit(controls, blacklist[i].index);
}
}
@@ -1442,8 +1438,7 @@ int uvc_ctrl_init_device(struct uvc_device *dev)
bControlSize = entity->camera.bControlSize;
}
- if (dev->quirks & UVC_QUIRK_PRUNE_CONTROLS)
- uvc_ctrl_prune_entity(entity);
+ uvc_ctrl_prune_entity(dev, entity);
for (i = 0; i < bControlSize; ++i)
ncontrols += hweight8(bmControls[i]);
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
index 507dc85646b..89927b7aec2 100644
--- a/drivers/media/video/uvc/uvc_driver.c
+++ b/drivers/media/video/uvc/uvc_driver.c
@@ -289,10 +289,8 @@ static int uvc_parse_format(struct uvc_device *dev,
struct uvc_format_desc *fmtdesc;
struct uvc_frame *frame;
const unsigned char *start = buffer;
- unsigned char *_buffer;
unsigned int interval;
unsigned int i, n;
- int _buflen;
__u8 ftype;
format->type = buffer[2];
@@ -303,7 +301,7 @@ static int uvc_parse_format(struct uvc_device *dev,
case VS_FORMAT_FRAME_BASED:
n = buffer[2] == VS_FORMAT_UNCOMPRESSED ? 27 : 28;
if (buflen < n) {
- uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming"
+ uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming "
"interface %d FORMAT error\n",
dev->udev->devnum,
alts->desc.bInterfaceNumber);
@@ -338,7 +336,7 @@ static int uvc_parse_format(struct uvc_device *dev,
case VS_FORMAT_MJPEG:
if (buflen < 11) {
- uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming"
+ uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming "
"interface %d FORMAT error\n",
dev->udev->devnum,
alts->desc.bInterfaceNumber);
@@ -354,7 +352,7 @@ static int uvc_parse_format(struct uvc_device *dev,
case VS_FORMAT_DV:
if (buflen < 9) {
- uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming"
+ uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming "
"interface %d FORMAT error\n",
dev->udev->devnum,
alts->desc.bInterfaceNumber);
@@ -372,7 +370,7 @@ static int uvc_parse_format(struct uvc_device *dev,
strlcpy(format->name, "HD-DV", sizeof format->name);
break;
default:
- uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming"
+ uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming "
"interface %d: unknown DV format %u\n",
dev->udev->devnum,
alts->desc.bInterfaceNumber, buffer[8]);
@@ -401,7 +399,7 @@ static int uvc_parse_format(struct uvc_device *dev,
case VS_FORMAT_STREAM_BASED:
/* Not supported yet. */
default:
- uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming"
+ uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming "
"interface %d unsupported format %u\n",
dev->udev->devnum, alts->desc.bInterfaceNumber,
buffer[2]);
@@ -413,20 +411,11 @@ static int uvc_parse_format(struct uvc_device *dev,
buflen -= buffer[0];
buffer += buffer[0];
- /* Count the number of frame descriptors to test the bFrameIndex
- * field when parsing the descriptors. We can't rely on the
- * bNumFrameDescriptors field as some cameras don't initialize it
- * properly.
- */
- for (_buflen = buflen, _buffer = buffer;
- _buflen > 2 && _buffer[2] == ftype;
- _buflen -= _buffer[0], _buffer += _buffer[0])
- format->nframes++;
-
/* Parse the frame descriptors. Only uncompressed, MJPEG and frame
* based formats have frame descriptors.
*/
while (buflen > 2 && buffer[2] == ftype) {
+ frame = &format->frame[format->nframes];
if (ftype != VS_FRAME_FRAME_BASED)
n = buflen > 25 ? buffer[25] : 0;
else
@@ -435,22 +424,12 @@ static int uvc_parse_format(struct uvc_device *dev,
n = n ? n : 3;
if (buflen < 26 + 4*n) {
- uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming"
+ uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming "
"interface %d FRAME error\n", dev->udev->devnum,
alts->desc.bInterfaceNumber);
return -EINVAL;
}
- if (buffer[3] - 1 >= format->nframes) {
- uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming"
- "interface %d frame index %u out of range\n",
- dev->udev->devnum, alts->desc.bInterfaceNumber,
- buffer[3]);
- return -EINVAL;
- }
-
- frame = &format->frame[buffer[3] - 1];
-
frame->bFrameIndex = buffer[3];
frame->bmCapabilities = buffer[4];
frame->wWidth = get_unaligned_le16(&buffer[5]);
@@ -507,6 +486,7 @@ static int uvc_parse_format(struct uvc_device *dev,
10000000/frame->dwDefaultFrameInterval,
(100000000/frame->dwDefaultFrameInterval)%10);
+ format->nframes++;
buflen -= buffer[0];
buffer += buffer[0];
}
@@ -518,7 +498,7 @@ static int uvc_parse_format(struct uvc_device *dev,
if (buflen > 2 && buffer[2] == VS_COLORFORMAT) {
if (buflen < 6) {
- uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming"
+ uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming "
"interface %d COLORFORMAT error\n",
dev->udev->devnum,
alts->desc.bInterfaceNumber);
@@ -664,7 +644,7 @@ static int uvc_parse_streaming(struct uvc_device *dev,
_buflen = buflen;
/* Count the format and frame descriptors. */
- while (_buflen > 2) {
+ while (_buflen > 2 && _buffer[1] == CS_INTERFACE) {
switch (_buffer[2]) {
case VS_FORMAT_UNCOMPRESSED:
case VS_FORMAT_MJPEG:
@@ -729,7 +709,7 @@ static int uvc_parse_streaming(struct uvc_device *dev,
streaming->nformats = nformats;
/* Parse the format descriptors. */
- while (buflen > 2) {
+ while (buflen > 2 && buffer[1] == CS_INTERFACE) {
switch (buffer[2]) {
case VS_FORMAT_UNCOMPRESSED:
case VS_FORMAT_MJPEG:
@@ -1316,7 +1296,7 @@ static int uvc_scan_chain_forward(struct uvc_video_device *video,
continue;
if (forward->extension.bNrInPins != 1) {
- uvc_trace(UVC_TRACE_DESCR, "Extension unit %d has"
+ uvc_trace(UVC_TRACE_DESCR, "Extension unit %d has "
"more than 1 input pin.\n", entity->id);
return -1;
}
@@ -1614,6 +1594,7 @@ static int uvc_probe(struct usb_interface *intf,
INIT_LIST_HEAD(&dev->entities);
INIT_LIST_HEAD(&dev->streaming);
kref_init(&dev->kref);
+ atomic_set(&dev->users, 0);
dev->udev = usb_get_dev(udev);
dev->intf = usb_get_intf(intf);
@@ -1927,7 +1908,7 @@ static struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = UVC_QUIRK_STREAM_NO_FID },
- /* Lenovo Thinkpad SL500 */
+ /* Lenovo Thinkpad SL400/SL500 */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x17ef,
@@ -1936,6 +1917,15 @@ static struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = UVC_QUIRK_STREAM_NO_FID },
+ /* Aveo Technology USB 2.0 Camera */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x1871,
+ .idProduct = 0x0306,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_QUIRK_PROBE_EXTRAFIELDS },
/* Ecamm Pico iMage */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
@@ -1945,6 +1935,15 @@ static struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = UVC_QUIRK_PROBE_EXTRAFIELDS },
+ /* FSC WebCam V30S */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x18ec,
+ .idProduct = 0x3288,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_QUIRK_PROBE_MINMAX },
/* Bodelin ProScopeHR */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_DEV_HI
@@ -1965,8 +1964,7 @@ static struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = UVC_QUIRK_PROBE_MINMAX
- | UVC_QUIRK_IGNORE_SELECTOR_UNIT
- | UVC_QUIRK_PRUNE_CONTROLS },
+ | UVC_QUIRK_IGNORE_SELECTOR_UNIT },
/* Generic USB Video Class */
{ USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, 0) },
{}
diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c
index 0155752e4a5..f854698c406 100644
--- a/drivers/media/video/uvc/uvc_queue.c
+++ b/drivers/media/video/uvc/uvc_queue.c
@@ -172,6 +172,20 @@ int uvc_free_buffers(struct uvc_video_queue *queue)
return 0;
}
+/*
+ * Check if buffers have been allocated.
+ */
+int uvc_queue_allocated(struct uvc_video_queue *queue)
+{
+ int allocated;
+
+ mutex_lock(&queue->mutex);
+ allocated = queue->count != 0;
+ mutex_unlock(&queue->mutex);
+
+ return allocated;
+}
+
static void __uvc_query_buffer(struct uvc_buffer *buf,
struct v4l2_buffer *v4l2_buf)
{
diff --git a/drivers/media/video/uvc/uvc_status.c b/drivers/media/video/uvc/uvc_status.c
index 21d87124986..f152a990386 100644
--- a/drivers/media/video/uvc/uvc_status.c
+++ b/drivers/media/video/uvc/uvc_status.c
@@ -194,7 +194,7 @@ int uvc_status_init(struct uvc_device *dev)
dev->status, UVC_MAX_STATUS_SIZE, uvc_status_complete,
dev, interval);
- return usb_submit_urb(dev->int_urb, GFP_KERNEL);
+ return 0;
}
void uvc_status_cleanup(struct uvc_device *dev)
@@ -205,15 +205,30 @@ void uvc_status_cleanup(struct uvc_device *dev)
uvc_input_cleanup(dev);
}
-int uvc_status_suspend(struct uvc_device *dev)
+int uvc_status_start(struct uvc_device *dev)
+{
+ if (dev->int_urb == NULL)
+ return 0;
+
+ return usb_submit_urb(dev->int_urb, GFP_KERNEL);
+}
+
+void uvc_status_stop(struct uvc_device *dev)
{
usb_kill_urb(dev->int_urb);
+}
+
+int uvc_status_suspend(struct uvc_device *dev)
+{
+ if (atomic_read(&dev->users))
+ usb_kill_urb(dev->int_urb);
+
return 0;
}
int uvc_status_resume(struct uvc_device *dev)
{
- if (dev->int_urb == NULL)
+ if (dev->int_urb == NULL || atomic_read(&dev->users) == 0)
return 0;
return usb_submit_urb(dev->int_urb, GFP_NOIO);
diff --git a/drivers/media/video/uvc/uvc_v4l2.c b/drivers/media/video/uvc/uvc_v4l2.c
index 2a80caa54fb..5e77cad2969 100644
--- a/drivers/media/video/uvc/uvc_v4l2.c
+++ b/drivers/media/video/uvc/uvc_v4l2.c
@@ -46,6 +46,8 @@ static int uvc_v4l2_query_menu(struct uvc_video_device *video,
struct uvc_menu_info *menu_info;
struct uvc_control_mapping *mapping;
struct uvc_control *ctrl;
+ u32 index = query_menu->index;
+ u32 id = query_menu->id;
ctrl = uvc_find_control(video, query_menu->id, &mapping);
if (ctrl == NULL || mapping->v4l2_type != V4L2_CTRL_TYPE_MENU)
@@ -54,6 +56,10 @@ static int uvc_v4l2_query_menu(struct uvc_video_device *video,
if (query_menu->index >= mapping->menu_count)
return -EINVAL;
+ memset(query_menu, 0, sizeof(*query_menu));
+ query_menu->id = id;
+ query_menu->index = index;
+
menu_info = &mapping->menu_info[query_menu->index];
strlcpy(query_menu->name, menu_info->name, sizeof query_menu->name);
return 0;
@@ -245,7 +251,7 @@ static int uvc_v4l2_set_format(struct uvc_video_device *video,
if (fmt->type != video->streaming->type)
return -EINVAL;
- if (uvc_queue_streaming(&video->queue))
+ if (uvc_queue_allocated(&video->queue))
return -EBUSY;
ret = uvc_v4l2_try_format(video, fmt, &probe, &format, &frame);
@@ -433,6 +439,15 @@ static int uvc_v4l2_open(struct file *file)
goto done;
}
+ if (atomic_inc_return(&video->dev->users) == 1) {
+ if ((ret = uvc_status_start(video->dev)) < 0) {
+ usb_autopm_put_interface(video->dev->intf);
+ atomic_dec(&video->dev->users);
+ kfree(handle);
+ goto done;
+ }
+ }
+
handle->device = video;
handle->state = UVC_HANDLE_PASSIVE;
file->private_data = handle;
@@ -467,6 +482,9 @@ static int uvc_v4l2_release(struct file *file)
kfree(handle);
file->private_data = NULL;
+ if (atomic_dec_return(&video->dev->users) == 0)
+ uvc_status_stop(video->dev);
+
usb_autopm_put_interface(video->dev->intf);
kref_put(&video->dev->kref, uvc_delete);
return 0;
@@ -512,7 +530,10 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
memset(&xctrl, 0, sizeof xctrl);
xctrl.id = ctrl->id;
- uvc_ctrl_begin(video);
+ ret = uvc_ctrl_begin(video);
+ if (ret < 0)
+ return ret;
+
ret = uvc_ctrl_get(video, &xctrl);
uvc_ctrl_rollback(video);
if (ret >= 0)
@@ -529,7 +550,10 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
xctrl.id = ctrl->id;
xctrl.value = ctrl->value;
- uvc_ctrl_begin(video);
+ ret = uvc_ctrl_begin(video);
+ if (ret < 0)
+ return ret;
+
ret = uvc_ctrl_set(video, &xctrl);
if (ret < 0) {
uvc_ctrl_rollback(video);
@@ -548,7 +572,10 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
struct v4l2_ext_control *ctrl = ctrls->controls;
unsigned int i;
- uvc_ctrl_begin(video);
+ ret = uvc_ctrl_begin(video);
+ if (ret < 0)
+ return ret;
+
for (i = 0; i < ctrls->count; ++ctrl, ++i) {
ret = uvc_ctrl_get(video, ctrl);
if (ret < 0) {
@@ -648,7 +675,7 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_S_INPUT:
{
- u8 input = *(u32 *)arg + 1;
+ u32 input = *(u32 *)arg + 1;
if ((ret = uvc_acquire_privileges(handle)) < 0)
return ret;
@@ -660,7 +687,7 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
break;
}
- if (input > video->selector->selector.bNrInPins)
+ if (input == 0 || input > video->selector->selector.bNrInPins)
return -EINVAL;
return uvc_query_ctrl(video->dev, SET_CUR, video->selector->id,
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
index 6ce974d7362..01b633c7348 100644
--- a/drivers/media/video/uvc/uvc_video.c
+++ b/drivers/media/video/uvc/uvc_video.c
@@ -65,7 +65,8 @@ static void uvc_fixup_video_ctrl(struct uvc_video_device *video,
struct uvc_streaming_control *ctrl)
{
struct uvc_format *format;
- struct uvc_frame *frame;
+ struct uvc_frame *frame = NULL;
+ unsigned int i;
if (ctrl->bFormatIndex <= 0 ||
ctrl->bFormatIndex > video->streaming->nformats)
@@ -73,11 +74,15 @@ static void uvc_fixup_video_ctrl(struct uvc_video_device *video,
format = &video->streaming->format[ctrl->bFormatIndex - 1];
- if (ctrl->bFrameIndex <= 0 ||
- ctrl->bFrameIndex > format->nframes)
- return;
+ for (i = 0; i < format->nframes; ++i) {
+ if (format->frame[i].bFrameIndex == ctrl->bFrameIndex) {
+ frame = &format->frame[i];
+ break;
+ }
+ }
- frame = &format->frame[ctrl->bFrameIndex - 1];
+ if (frame == NULL)
+ return;
if (!(format->flags & UVC_FMT_FLAG_COMPRESSED) ||
(ctrl->dwMaxVideoFrameSize == 0 &&
@@ -1089,7 +1094,7 @@ int uvc_video_init(struct uvc_video_device *video)
/* Zero bFrameIndex might be correct. Stream-based formats (including
* MPEG-2 TS and DV) do not support frames but have a dummy frame
* descriptor with bFrameIndex set to zero. If the default frame
- * descriptor is not found, use the first avalable frame.
+ * descriptor is not found, use the first available frame.
*/
for (i = format->nframes; i > 0; --i) {
frame = &format->frame[i-1];
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
index e5014e668f9..3c78d3c1e4c 100644
--- a/drivers/media/video/uvc/uvcvideo.h
+++ b/drivers/media/video/uvc/uvcvideo.h
@@ -313,7 +313,6 @@ struct uvc_xu_control {
#define UVC_QUIRK_BUILTIN_ISIGHT 0x00000008
#define UVC_QUIRK_STREAM_NO_FID 0x00000010
#define UVC_QUIRK_IGNORE_SELECTOR_UNIT 0x00000020
-#define UVC_QUIRK_PRUNE_CONTROLS 0x00000040
#define UVC_QUIRK_FIX_BANDWIDTH 0x00000080
/* Format flags */
@@ -634,6 +633,7 @@ struct uvc_device {
enum uvc_device_state state;
struct kref kref;
struct list_head list;
+ atomic_t users;
/* Video control interface */
__u16 uvc_version;
@@ -747,6 +747,7 @@ extern struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
struct uvc_buffer *buf);
extern unsigned int uvc_queue_poll(struct uvc_video_queue *queue,
struct file *file, poll_table *wait);
+extern int uvc_queue_allocated(struct uvc_video_queue *queue);
static inline int uvc_queue_streaming(struct uvc_video_queue *queue)
{
return queue->flags & UVC_QUEUE_STREAMING;
@@ -770,6 +771,8 @@ extern int uvc_query_ctrl(struct uvc_device *dev, __u8 query, __u8 unit,
/* Status */
extern int uvc_status_init(struct uvc_device *dev);
extern void uvc_status_cleanup(struct uvc_device *dev);
+extern int uvc_status_start(struct uvc_device *dev);
+extern void uvc_status_stop(struct uvc_device *dev);
extern int uvc_status_suspend(struct uvc_device *dev);
extern int uvc_status_resume(struct uvc_device *dev);
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index f576ef66b80..f96475626da 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -746,6 +746,7 @@ void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client,
const struct v4l2_subdev_ops *ops)
{
v4l2_subdev_init(sd, ops);
+ sd->flags |= V4L2_SUBDEV_FL_IS_I2C;
/* the owner is the same as the i2c_client's driver owner */
sd->owner = client->driver->driver.owner;
/* i2c_client and v4l2_subdev point to one another */
@@ -897,8 +898,7 @@ const unsigned short *v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type)
};
static const unsigned short tv_addrs[] = {
0x42, 0x43, 0x4a, 0x4b, /* tda8290 */
- 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
- 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x60, 0x61, 0x62, 0x63, 0x64,
I2C_CLIENT_END
};
diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
index 94aa485ade5..0d06e7cbd5b 100644
--- a/drivers/media/video/v4l2-device.c
+++ b/drivers/media/video/v4l2-device.c
@@ -49,6 +49,22 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
}
EXPORT_SYMBOL_GPL(v4l2_device_register);
+int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
+ atomic_t *instance)
+{
+ int num = atomic_inc_return(instance) - 1;
+ int len = strlen(basename);
+
+ if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
+ snprintf(v4l2_dev->name, sizeof(v4l2_dev->name),
+ "%s-%d", basename, num);
+ else
+ snprintf(v4l2_dev->name, sizeof(v4l2_dev->name),
+ "%s%d", basename, num);
+ return num;
+}
+EXPORT_SYMBOL_GPL(v4l2_device_set_name);
+
void v4l2_device_disconnect(struct v4l2_device *v4l2_dev)
{
if (v4l2_dev->dev) {
@@ -67,8 +83,21 @@ void v4l2_device_unregister(struct v4l2_device *v4l2_dev)
v4l2_device_disconnect(v4l2_dev);
/* Unregister subdevs */
- list_for_each_entry_safe(sd, next, &v4l2_dev->subdevs, list)
+ list_for_each_entry_safe(sd, next, &v4l2_dev->subdevs, list) {
v4l2_device_unregister_subdev(sd);
+#if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
+ if (sd->flags & V4L2_SUBDEV_FL_IS_I2C) {
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ /* We need to unregister the i2c client explicitly.
+ We cannot rely on i2c_del_adapter to always
+ unregister clients for us, since if the i2c bus
+ is a platform bus, then it is never deleted. */
+ if (client)
+ i2c_unregister_device(client);
+ }
+#endif
+ }
}
EXPORT_SYMBOL_GPL(v4l2_device_unregister);
diff --git a/drivers/media/video/videobuf-core.c b/drivers/media/video/videobuf-core.c
index b7b05842cf2..f1ccf98c0a6 100644
--- a/drivers/media/video/videobuf-core.c
+++ b/drivers/media/video/videobuf-core.c
@@ -118,6 +118,7 @@ void videobuf_queue_core_init(struct videobuf_queue *q,
void *priv,
struct videobuf_qtype_ops *int_ops)
{
+ BUG_ON(!q);
memset(q, 0, sizeof(*q));
q->irqlock = irqlock;
q->dev = dev;
@@ -439,6 +440,7 @@ int videobuf_reqbufs(struct videobuf_queue *q,
}
req->count = retval;
+ retval = 0;
done:
mutex_unlock(&q->vb_lock);
@@ -454,7 +456,7 @@ int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b)
dprintk(1, "querybuf: Wrong type.\n");
goto done;
}
- if (unlikely(b->index < 0 || b->index >= VIDEO_MAX_FRAME)) {
+ if (unlikely(b->index >= VIDEO_MAX_FRAME)) {
dprintk(1, "querybuf: index out of range.\n");
goto done;
}
@@ -495,7 +497,7 @@ int videobuf_qbuf(struct videobuf_queue *q,
dprintk(1, "qbuf: Wrong type.\n");
goto done;
}
- if (b->index < 0 || b->index >= VIDEO_MAX_FRAME) {
+ if (b->index >= VIDEO_MAX_FRAME) {
dprintk(1, "qbuf: index out of range.\n");
goto done;
}
diff --git a/drivers/media/video/videobuf-dma-contig.c b/drivers/media/video/videobuf-dma-contig.c
index 6109fb5f34e..d09ce83a942 100644
--- a/drivers/media/video/videobuf-dma-contig.c
+++ b/drivers/media/video/videobuf-dma-contig.c
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
+#include <linux/pagemap.h>
#include <linux/dma-mapping.h>
#include <media/videobuf-dma-contig.h>
@@ -25,6 +26,7 @@ struct videobuf_dma_contig_memory {
void *vaddr;
dma_addr_t dma_handle;
unsigned long size;
+ int is_userptr;
};
#define MAGIC_DC_MEM 0x0733ac61
@@ -108,6 +110,82 @@ static struct vm_operations_struct videobuf_vm_ops = {
.close = videobuf_vm_close,
};
+/**
+ * videobuf_dma_contig_user_put() - reset pointer to user space buffer
+ * @mem: per-buffer private videobuf-dma-contig data
+ *
+ * This function resets the user space pointer
+ */
+static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
+{
+ mem->is_userptr = 0;
+ mem->dma_handle = 0;
+ mem->size = 0;
+}
+
+/**
+ * videobuf_dma_contig_user_get() - setup user space memory pointer
+ * @mem: per-buffer private videobuf-dma-contig data
+ * @vb: video buffer to map
+ *
+ * This function validates and sets up a pointer to user space memory.
+ * Only physically contiguous pfn-mapped memory is accepted.
+ *
+ * Returns 0 if successful.
+ */
+static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
+ struct videobuf_buffer *vb)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned long prev_pfn, this_pfn;
+ unsigned long pages_done, user_address;
+ int ret;
+
+ mem->size = PAGE_ALIGN(vb->size);
+ mem->is_userptr = 0;
+ ret = -EINVAL;
+
+ down_read(&mm->mmap_sem);
+
+ vma = find_vma(mm, vb->baddr);
+ if (!vma)
+ goto out_up;
+
+ if ((vb->baddr + mem->size) > vma->vm_end)
+ goto out_up;
+
+ pages_done = 0;
+ prev_pfn = 0; /* kill warning */
+ user_address = vb->baddr;
+
+ while (pages_done < (mem->size >> PAGE_SHIFT)) {
+ ret = follow_pfn(vma, user_address, &this_pfn);
+ if (ret)
+ break;
+
+ if (pages_done == 0)
+ mem->dma_handle = this_pfn << PAGE_SHIFT;
+ else if (this_pfn != (prev_pfn + 1))
+ ret = -EFAULT;
+
+ if (ret)
+ break;
+
+ prev_pfn = this_pfn;
+ user_address += PAGE_SIZE;
+ pages_done++;
+ }
+
+ if (!ret)
+ mem->is_userptr = 1;
+
+ out_up:
+ up_read(&current->mm->mmap_sem);
+
+ return ret;
+}
+
static void *__videobuf_alloc(size_t size)
{
struct videobuf_dma_contig_memory *mem;
@@ -154,12 +232,11 @@ static int __videobuf_iolock(struct videobuf_queue *q,
case V4L2_MEMORY_USERPTR:
dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
- /* The only USERPTR currently supported is the one needed for
- read() method.
- */
+ /* handle pointer from user space */
if (vb->baddr)
- return -EINVAL;
+ return videobuf_dma_contig_user_get(mem, vb);
+ /* allocate memory for the read() method */
mem->size = PAGE_ALIGN(vb->size);
mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
&mem->dma_handle, GFP_KERNEL);
@@ -182,19 +259,6 @@ static int __videobuf_iolock(struct videobuf_queue *q,
return 0;
}
-static int __videobuf_sync(struct videobuf_queue *q,
- struct videobuf_buffer *buf)
-{
- struct videobuf_dma_contig_memory *mem = buf->priv;
-
- BUG_ON(!mem);
- MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
-
- dma_sync_single_for_cpu(q->dev, mem->dma_handle, mem->size,
- DMA_FROM_DEVICE);
- return 0;
-}
-
static int __videobuf_mmap_free(struct videobuf_queue *q)
{
unsigned int i;
@@ -356,7 +420,6 @@ static struct videobuf_qtype_ops qops = {
.alloc = __videobuf_alloc,
.iolock = __videobuf_iolock,
- .sync = __videobuf_sync,
.mmap_free = __videobuf_mmap_free,
.mmap_mapper = __videobuf_mmap_mapper,
.video_copy_to_user = __videobuf_copy_to_user,
@@ -400,7 +463,7 @@ void videobuf_dma_contig_free(struct videobuf_queue *q,
So, it should free memory only if the memory were allocated for
read() operation.
*/
- if ((buf->memory != V4L2_MEMORY_USERPTR) || buf->baddr)
+ if (buf->memory != V4L2_MEMORY_USERPTR)
return;
if (!mem)
@@ -408,6 +471,13 @@ void videobuf_dma_contig_free(struct videobuf_queue *q,
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
+ /* handle user space pointer case */
+ if (buf->baddr) {
+ videobuf_dma_contig_user_put(mem);
+ return;
+ }
+
+ /* read() method */
dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle);
mem->vaddr = NULL;
}
diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
index da1790e57a8..a8dd22ace3f 100644
--- a/drivers/media/video/videobuf-dma-sg.c
+++ b/drivers/media/video/videobuf-dma-sg.c
@@ -58,9 +58,10 @@ videobuf_vmalloc_to_sg(unsigned char *virt, int nr_pages)
struct page *pg;
int i;
- sglist = kcalloc(nr_pages, sizeof(struct scatterlist), GFP_KERNEL);
+ sglist = vmalloc(nr_pages * sizeof(*sglist));
if (NULL == sglist)
return NULL;
+ memset(sglist, 0, nr_pages * sizeof(*sglist));
sg_init_table(sglist, nr_pages);
for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) {
pg = vmalloc_to_page(virt);
@@ -72,7 +73,7 @@ videobuf_vmalloc_to_sg(unsigned char *virt, int nr_pages)
return sglist;
err:
- kfree(sglist);
+ vfree(sglist);
return NULL;
}
@@ -84,7 +85,7 @@ videobuf_pages_to_sg(struct page **pages, int nr_pages, int offset)
if (NULL == pages[0])
return NULL;
- sglist = kmalloc(nr_pages * sizeof(*sglist), GFP_KERNEL);
+ sglist = vmalloc(nr_pages * sizeof(*sglist));
if (NULL == sglist)
return NULL;
sg_init_table(sglist, nr_pages);
@@ -104,12 +105,12 @@ videobuf_pages_to_sg(struct page **pages, int nr_pages, int offset)
nopage:
dprintk(2,"sgl: oops - no page\n");
- kfree(sglist);
+ vfree(sglist);
return NULL;
highmem:
dprintk(2,"sgl: oops - highmem page\n");
- kfree(sglist);
+ vfree(sglist);
return NULL;
}
@@ -230,7 +231,7 @@ int videobuf_dma_map(struct videobuf_queue* q, struct videobuf_dmabuf *dma)
(dma->vmalloc,dma->nr_pages);
}
if (dma->bus_addr) {
- dma->sglist = kmalloc(sizeof(struct scatterlist), GFP_KERNEL);
+ dma->sglist = vmalloc(sizeof(*dma->sglist));
if (NULL != dma->sglist) {
dma->sglen = 1;
sg_dma_address(&dma->sglist[0]) = dma->bus_addr & PAGE_MASK;
@@ -248,10 +249,10 @@ int videobuf_dma_map(struct videobuf_queue* q, struct videobuf_dmabuf *dma)
if (0 == dma->sglen) {
printk(KERN_WARNING
"%s: videobuf_map_sg failed\n",__func__);
- kfree(dma->sglist);
+ vfree(dma->sglist);
dma->sglist = NULL;
dma->sglen = 0;
- return -EIO;
+ return -ENOMEM;
}
}
return 0;
@@ -274,7 +275,7 @@ int videobuf_dma_unmap(struct videobuf_queue* q,struct videobuf_dmabuf *dma)
dma_unmap_sg(q->dev, dma->sglist, dma->nr_pages, dma->direction);
- kfree(dma->sglist);
+ vfree(dma->sglist);
dma->sglist = NULL;
dma->sglen = 0;
return 0;
diff --git a/drivers/media/video/vino.c b/drivers/media/video/vino.c
index 43e0998adb5..97b082fe447 100644
--- a/drivers/media/video/vino.c
+++ b/drivers/media/video/vino.c
@@ -868,9 +868,9 @@ static void vino_sync_buffer(struct vino_framebuffer *fb)
dprintk("vino_sync_buffer():\n");
for (i = 0; i < fb->desc_table.page_count; i++)
- dma_sync_single(NULL,
- fb->desc_table.dma_cpu[VINO_PAGE_RATIO * i],
- PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(NULL,
+ fb->desc_table.dma_cpu[VINO_PAGE_RATIO * i],
+ PAGE_SIZE, DMA_FROM_DEVICE);
}
/* Framebuffer fifo functions (need to be locked externally) */
diff --git a/drivers/media/video/zoran/zoran_card.c b/drivers/media/video/zoran/zoran_card.c
index ea6c577b0eb..03dc2f3cf84 100644
--- a/drivers/media/video/zoran/zoran_card.c
+++ b/drivers/media/video/zoran/zoran_card.c
@@ -1022,7 +1022,7 @@ zr36057_init (struct zoran *zr)
zr->vbuf_bytesperline = 0;
/* Avoid nonsense settings from user for default input/norm */
- if (default_norm < 0 && default_norm > 2)
+ if (default_norm < 0 || default_norm > 2)
default_norm = 0;
if (default_norm == 0) {
zr->norm = V4L2_STD_PAL;
@@ -1477,7 +1477,7 @@ static struct pci_driver zoran_driver = {
.name = "zr36067",
.id_table = zr36067_pci_tbl,
.probe = zoran_probe,
- .remove = zoran_remove,
+ .remove = __devexit_p(zoran_remove),
};
static int __init zoran_init(void)
diff --git a/drivers/media/video/zr364xx.c b/drivers/media/video/zr364xx.c
index ac169c9eb18..fc976f42f43 100644
--- a/drivers/media/video/zr364xx.c
+++ b/drivers/media/video/zr364xx.c
@@ -882,9 +882,11 @@ static void zr364xx_disconnect(struct usb_interface *intf)
video_unregister_device(cam->vdev);
cam->vdev = NULL;
kfree(cam->buffer);
- if (cam->framebuf)
- vfree(cam->framebuf);
+ cam->buffer = NULL;
+ vfree(cam->framebuf);
+ cam->framebuf = NULL;
kfree(cam);
+ cam = NULL;
}
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index de143deb06f..7847bbc1440 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -672,15 +672,14 @@ try_again:
msb->req_sg);
if (!msb->seg_count) {
- chunk = __blk_end_request(msb->block_req, -ENOMEM,
- blk_rq_cur_bytes(msb->block_req));
+ chunk = __blk_end_request_cur(msb->block_req, -ENOMEM);
continue;
}
- t_sec = msb->block_req->sector << 9;
+ t_sec = blk_rq_pos(msb->block_req) << 9;
sector_div(t_sec, msb->page_size);
- count = msb->block_req->nr_sectors << 9;
+ count = blk_rq_bytes(msb->block_req);
count /= msb->page_size;
param.system = msb->system;
@@ -705,8 +704,8 @@ try_again:
return 0;
}
- dev_dbg(&card->dev, "elv_next\n");
- msb->block_req = elv_next_request(msb->queue);
+ dev_dbg(&card->dev, "blk_fetch\n");
+ msb->block_req = blk_fetch_request(msb->queue);
if (!msb->block_req) {
dev_dbg(&card->dev, "issue end\n");
return -EAGAIN;
@@ -745,7 +744,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
t_len *= msb->page_size;
}
} else
- t_len = msb->block_req->nr_sectors << 9;
+ t_len = blk_rq_bytes(msb->block_req);
dev_dbg(&card->dev, "transferred %x (%d)\n", t_len, error);
@@ -825,8 +824,8 @@ static void mspro_block_submit_req(struct request_queue *q)
return;
if (msb->eject) {
- while ((req = elv_next_request(q)) != NULL)
- __blk_end_request(req, -ENODEV, blk_rq_bytes(req));
+ while ((req = blk_fetch_request(q)) != NULL)
+ __blk_end_request_all(req, -ENODEV);
return;
}
@@ -1243,7 +1242,7 @@ static int mspro_block_init_disk(struct memstick_dev *card)
sprintf(msb->disk->disk_name, "mspblk%d", disk_id);
- blk_queue_hardsect_size(msb->queue, msb->page_size);
+ blk_queue_logical_block_size(msb->queue, msb->page_size);
capacity = be16_to_cpu(sys_info->user_block_count);
capacity *= be16_to_cpu(sys_info->block_size);
diff --git a/drivers/message/fusion/lsi/mpi_history.txt b/drivers/message/fusion/lsi/mpi_history.txt
index 693e4b51135..fa9249b4971 100644
--- a/drivers/message/fusion/lsi/mpi_history.txt
+++ b/drivers/message/fusion/lsi/mpi_history.txt
@@ -130,7 +130,7 @@ mpi_ioc.h
* 08-08-01 01.02.01 Original release for v1.2 work.
* New format for FWVersion and ProductId in
* MSG_IOC_FACTS_REPLY and MPI_FW_HEADER.
- * 08-31-01 01.02.02 Addded event MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE and
+ * 08-31-01 01.02.02 Added event MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE and
* related structure and defines.
* Added event MPI_EVENT_ON_BUS_TIMER_EXPIRED.
* Added MPI_IOCINIT_FLAGS_DISCARD_FW_IMAGE.
@@ -190,7 +190,7 @@ mpi_ioc.h
* 10-11-06 01.05.12 Added MPI_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED.
* Added MaxInitiators field to PortFacts reply.
* Added SAS Device Status Change ReasonCode for
- * asynchronous notificaiton.
+ * asynchronous notification.
* Added MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE and event
* data structure.
* Added new ImageType values for FWDownload and FWUpload
@@ -623,7 +623,7 @@ mpi_fc.h
* 11-02-00 01.01.01 Original release for post 1.0 work
* 12-04-00 01.01.02 Added messages for Common Transport Send and
* Primitive Send.
- * 01-09-01 01.01.03 Modifed some of the new flags to have an MPI prefix
+ * 01-09-01 01.01.03 Modified some of the new flags to have an MPI prefix
* and modified the FcPrimitiveSend flags.
* 01-25-01 01.01.04 Move InitiatorIndex in LinkServiceRsp reply to a larger
* field.
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 5d496a99e03..0df065275cd 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -146,7 +146,6 @@ static MPT_EVHANDLER MptEvHandlers[MPT_MAX_PROTOCOL_DRIVERS];
static MPT_RESETHANDLER MptResetHandlers[MPT_MAX_PROTOCOL_DRIVERS];
static struct mpt_pci_driver *MptDeviceDriverHandlers[MPT_MAX_PROTOCOL_DRIVERS];
-static DECLARE_WAIT_QUEUE_HEAD(mpt_waitq);
/*
* Driver Callback Index's
@@ -159,7 +158,8 @@ static u8 last_drv_idx;
* Forward protos...
*/
static irqreturn_t mpt_interrupt(int irq, void *bus_id);
-static int mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply);
+static int mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
+ MPT_FRAME_HDR *reply);
static int mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes,
u32 *req, int replyBytes, u16 *u16reply, int maxwait,
int sleepFlag);
@@ -190,9 +190,9 @@ static int mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum);
static int mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum);
static void mpt_read_ioc_pg_1(MPT_ADAPTER *ioc);
static void mpt_read_ioc_pg_4(MPT_ADAPTER *ioc);
-static void mpt_timer_expired(unsigned long data);
static void mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc);
-static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch);
+static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch,
+ int sleepFlag);
static int SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp);
static int mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag);
static int mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init);
@@ -207,8 +207,8 @@ static int procmpt_iocinfo_read(char *buf, char **start, off_t offset,
#endif
static void mpt_get_fw_exp_ver(char *buf, MPT_ADAPTER *ioc);
-//int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag);
-static int ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *evReply, int *evHandlers);
+static int ProcessEventNotification(MPT_ADAPTER *ioc,
+ EventNotificationReply_t *evReply, int *evHandlers);
static void mpt_iocstatus_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf);
static void mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info);
static void mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info);
@@ -277,6 +277,56 @@ mpt_get_cb_idx(MPT_DRIVER_CLASS dclass)
}
/**
+ * mpt_is_discovery_complete - determine if discovery has completed
+ * @ioc: per adatper instance
+ *
+ * Returns 1 when discovery completed, else zero.
+ */
+static int
+mpt_is_discovery_complete(MPT_ADAPTER *ioc)
+{
+ ConfigExtendedPageHeader_t hdr;
+ CONFIGPARMS cfg;
+ SasIOUnitPage0_t *buffer;
+ dma_addr_t dma_handle;
+ int rc = 0;
+
+ memset(&hdr, 0, sizeof(ConfigExtendedPageHeader_t));
+ memset(&cfg, 0, sizeof(CONFIGPARMS));
+ hdr.PageVersion = MPI_SASIOUNITPAGE0_PAGEVERSION;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
+ hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
+ cfg.cfghdr.ehdr = &hdr;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+
+ if ((mpt_config(ioc, &cfg)))
+ goto out;
+ if (!hdr.ExtPageLength)
+ goto out;
+
+ buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ &dma_handle);
+ if (!buffer)
+ goto out;
+
+ cfg.physAddr = dma_handle;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ if ((mpt_config(ioc, &cfg)))
+ goto out_free_consistent;
+
+ if (!(buffer->PhyData[0].PortFlags &
+ MPI_SAS_IOUNIT0_PORT_FLAGS_DISCOVERY_IN_PROGRESS))
+ rc = 1;
+
+ out_free_consistent:
+ pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ buffer, dma_handle);
+ out:
+ return rc;
+}
+
+/**
* mpt_fault_reset_work - work performed on workq after ioc fault
* @work: input argument, used to derive ioc
*
@@ -290,7 +340,7 @@ mpt_fault_reset_work(struct work_struct *work)
int rc;
unsigned long flags;
- if (ioc->diagPending || !ioc->active)
+ if (ioc->ioc_reset_in_progress || !ioc->active)
goto out;
ioc_raw_state = mpt_GetIocState(ioc, 0);
@@ -307,6 +357,12 @@ mpt_fault_reset_work(struct work_struct *work)
printk(MYIOC_s_WARN_FMT "IOC is in FAULT state after "
"reset (%04xh)\n", ioc->name, ioc_raw_state &
MPI_DOORBELL_DATA_MASK);
+ } else if (ioc->bus_type == SAS && ioc->sas_discovery_quiesce_io) {
+ if ((mpt_is_discovery_complete(ioc))) {
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "clearing "
+ "discovery_quiesce_io flag\n", ioc->name));
+ ioc->sas_discovery_quiesce_io = 0;
+ }
}
out:
@@ -317,11 +373,11 @@ mpt_fault_reset_work(struct work_struct *work)
ioc = ioc->alt_ioc;
/* rearm the timer */
- spin_lock_irqsave(&ioc->fault_reset_work_lock, flags);
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
if (ioc->reset_work_q)
queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work,
msecs_to_jiffies(MPT_POLLING_INTERVAL));
- spin_unlock_irqrestore(&ioc->fault_reset_work_lock, flags);
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
}
@@ -501,9 +557,9 @@ mpt_interrupt(int irq, void *bus_id)
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
- * mpt_base_reply - MPT base driver's callback routine
+ * mptbase_reply - MPT base driver's callback routine
* @ioc: Pointer to MPT_ADAPTER structure
- * @mf: Pointer to original MPT request frame
+ * @req: Pointer to original MPT request frame
* @reply: Pointer to MPT reply frame (NULL if TurboReply)
*
* MPT base driver's callback routine; all base driver
@@ -514,122 +570,49 @@ mpt_interrupt(int irq, void *bus_id)
* should be freed, or 0 if it shouldn't.
*/
static int
-mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
+mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
{
+ EventNotificationReply_t *pEventReply;
+ u8 event;
+ int evHandlers;
int freereq = 1;
- u8 func;
- dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_base_reply() called\n", ioc->name));
-#ifdef CONFIG_FUSION_LOGGING
- if ((ioc->debug_level & MPT_DEBUG_MSG_FRAME) &&
- !(reply->u.hdr.MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)) {
- dmfprintk(ioc, printk(MYIOC_s_INFO_FMT ": Original request frame (@%p) header\n",
- ioc->name, mf));
- DBG_DUMP_REQUEST_FRAME_HDR(ioc, (u32 *)mf);
- }
-#endif
-
- func = reply->u.hdr.Function;
- dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_base_reply, Function=%02Xh\n",
- ioc->name, func));
-
- if (func == MPI_FUNCTION_EVENT_NOTIFICATION) {
- EventNotificationReply_t *pEvReply = (EventNotificationReply_t *) reply;
- int evHandlers = 0;
- int results;
-
- results = ProcessEventNotification(ioc, pEvReply, &evHandlers);
- if (results != evHandlers) {
- /* CHECKME! Any special handling needed here? */
- devtverboseprintk(ioc, printk(MYIOC_s_WARN_FMT "Called %d event handlers, sum results = %d\n",
- ioc->name, evHandlers, results));
- }
-
- /*
- * Hmmm... It seems that EventNotificationReply is an exception
- * to the rule of one reply per request.
- */
- if (pEvReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) {
+ switch (reply->u.hdr.Function) {
+ case MPI_FUNCTION_EVENT_NOTIFICATION:
+ pEventReply = (EventNotificationReply_t *)reply;
+ evHandlers = 0;
+ ProcessEventNotification(ioc, pEventReply, &evHandlers);
+ event = le32_to_cpu(pEventReply->Event) & 0xFF;
+ if (pEventReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)
freereq = 0;
- } else {
- devtverboseprintk(ioc, printk(MYIOC_s_WARN_FMT "EVENT_NOTIFICATION reply %p returns Request frame\n",
- ioc->name, pEvReply));
- }
-
-#ifdef CONFIG_PROC_FS
-// LogEvent(ioc, pEvReply);
-#endif
-
- } else if (func == MPI_FUNCTION_EVENT_ACK) {
- dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_base_reply, EventAck reply received\n",
- ioc->name));
- } else if (func == MPI_FUNCTION_CONFIG) {
- CONFIGPARMS *pCfg;
- unsigned long flags;
-
- dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "config_complete (mf=%p,mr=%p)\n",
- ioc->name, mf, reply));
-
- pCfg = * ((CONFIGPARMS **)((u8 *) mf + ioc->req_sz - sizeof(void *)));
-
- if (pCfg) {
- /* disable timer and remove from linked list */
- del_timer(&pCfg->timer);
-
- spin_lock_irqsave(&ioc->FreeQlock, flags);
- list_del(&pCfg->linkage);
- spin_unlock_irqrestore(&ioc->FreeQlock, flags);
-
- /*
- * If IOC Status is SUCCESS, save the header
- * and set the status code to GOOD.
- */
- pCfg->status = MPT_CONFIG_ERROR;
- if (reply) {
- ConfigReply_t *pReply = (ConfigReply_t *)reply;
- u16 status;
-
- status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
- dcprintk(ioc, printk(MYIOC_s_NOTE_FMT " IOCStatus=%04xh, IOCLogInfo=%08xh\n",
- ioc->name, status, le32_to_cpu(pReply->IOCLogInfo)));
-
- pCfg->status = status;
- if (status == MPI_IOCSTATUS_SUCCESS) {
- if ((pReply->Header.PageType &
- MPI_CONFIG_PAGETYPE_MASK) ==
- MPI_CONFIG_PAGETYPE_EXTENDED) {
- pCfg->cfghdr.ehdr->ExtPageLength =
- le16_to_cpu(pReply->ExtPageLength);
- pCfg->cfghdr.ehdr->ExtPageType =
- pReply->ExtPageType;
- }
- pCfg->cfghdr.hdr->PageVersion = pReply->Header.PageVersion;
-
- /* If this is a regular header, save PageLength. */
- /* LMP Do this better so not using a reserved field! */
- pCfg->cfghdr.hdr->PageLength = pReply->Header.PageLength;
- pCfg->cfghdr.hdr->PageNumber = pReply->Header.PageNumber;
- pCfg->cfghdr.hdr->PageType = pReply->Header.PageType;
- }
- }
-
- /*
- * Wake up the original calling thread
- */
- pCfg->wait_done = 1;
- wake_up(&mpt_waitq);
+ if (event != MPI_EVENT_EVENT_CHANGE)
+ break;
+ case MPI_FUNCTION_CONFIG:
+ case MPI_FUNCTION_SAS_IO_UNIT_CONTROL:
+ ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
+ if (reply) {
+ ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
+ memcpy(ioc->mptbase_cmds.reply, reply,
+ min(MPT_DEFAULT_FRAME_SIZE,
+ 4 * reply->u.reply.MsgLength));
}
- } else if (func == MPI_FUNCTION_SAS_IO_UNIT_CONTROL) {
- /* we should be always getting a reply frame */
- memcpy(ioc->persist_reply_frame, reply,
- min(MPT_DEFAULT_FRAME_SIZE,
- 4*reply->u.reply.MsgLength));
- del_timer(&ioc->persist_timer);
- ioc->persist_wait_done = 1;
- wake_up(&mpt_waitq);
- } else {
- printk(MYIOC_s_ERR_FMT "Unexpected msg function (=%02Xh) reply received!\n",
- ioc->name, func);
+ if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_PENDING) {
+ ioc->mptbase_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
+ complete(&ioc->mptbase_cmds.done);
+ } else
+ freereq = 0;
+ if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_FREE_MF)
+ freereq = 1;
+ break;
+ case MPI_FUNCTION_EVENT_ACK:
+ devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "EventAck reply received\n", ioc->name));
+ break;
+ default:
+ printk(MYIOC_s_ERR_FMT
+ "Unexpected msg function (=%02Xh) reply received!\n",
+ ioc->name, reply->u.hdr.Function);
+ break;
}
/*
@@ -988,17 +971,21 @@ mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
/* Put Request back on FreeQ! */
spin_lock_irqsave(&ioc->FreeQlock, flags);
- mf->u.frame.linkage.arg1 = 0xdeadbeaf; /* signature to know if this mf is freed */
+ if (cpu_to_le32(mf->u.frame.linkage.arg1) == 0xdeadbeaf)
+ goto out;
+ /* signature to know if this mf is freed */
+ mf->u.frame.linkage.arg1 = cpu_to_le32(0xdeadbeaf);
list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ);
#ifdef MFCNT
ioc->mfcnt--;
#endif
+ out:
spin_unlock_irqrestore(&ioc->FreeQlock, flags);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
- * mpt_add_sge - Place a simple SGE at address pAddr.
+ * mpt_add_sge - Place a simple 32 bit SGE at address pAddr.
* @pAddr: virtual address for SGE
* @flagslength: SGE flags and data transfer length
* @dma_addr: Physical address
@@ -1006,23 +993,116 @@ mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
* This routine places a MPT request frame back on the MPT adapter's
* FreeQ.
*/
-void
-mpt_add_sge(char *pAddr, u32 flagslength, dma_addr_t dma_addr)
+static void
+mpt_add_sge(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
{
- if (sizeof(dma_addr_t) == sizeof(u64)) {
- SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
+ SGESimple32_t *pSge = (SGESimple32_t *) pAddr;
+ pSge->FlagsLength = cpu_to_le32(flagslength);
+ pSge->Address = cpu_to_le32(dma_addr);
+}
+
+/**
+ * mpt_add_sge_64bit - Place a simple 64 bit SGE at address pAddr.
+ * @pAddr: virtual address for SGE
+ * @flagslength: SGE flags and data transfer length
+ * @dma_addr: Physical address
+ *
+ * This routine places a MPT request frame back on the MPT adapter's
+ * FreeQ.
+ **/
+static void
+mpt_add_sge_64bit(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
+{
+ SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
+ pSge->Address.Low = cpu_to_le32
+ (lower_32_bits((unsigned long)(dma_addr)));
+ pSge->Address.High = cpu_to_le32
+ (upper_32_bits((unsigned long)dma_addr));
+ pSge->FlagsLength = cpu_to_le32
+ ((flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING));
+}
+
+/**
+ * mpt_add_sge_64bit_1078 - Place a simple 64 bit SGE at address pAddr (1078 workaround).
+ * @pAddr: virtual address for SGE
+ * @flagslength: SGE flags and data transfer length
+ * @dma_addr: Physical address
+ *
+ * This routine places a MPT request frame back on the MPT adapter's
+ * FreeQ.
+ **/
+static void
+mpt_add_sge_64bit_1078(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
+{
+ SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
+ u32 tmp;
+
+ pSge->Address.Low = cpu_to_le32
+ (lower_32_bits((unsigned long)(dma_addr)));
+ tmp = (u32)(upper_32_bits((unsigned long)dma_addr));
+
+ /*
+ * 1078 errata workaround for the 36GB limitation
+ */
+ if ((((u64)dma_addr + MPI_SGE_LENGTH(flagslength)) >> 32) == 9) {
+ flagslength |=
+ MPI_SGE_SET_FLAGS(MPI_SGE_FLAGS_LOCAL_ADDRESS);
+ tmp |= (1<<31);
+ if (mpt_debug_level & MPT_DEBUG_36GB_MEM)
+ printk(KERN_DEBUG "1078 P0M2 addressing for "
+ "addr = 0x%llx len = %d\n",
+ (unsigned long long)dma_addr,
+ MPI_SGE_LENGTH(flagslength));
+ }
+
+ pSge->Address.High = cpu_to_le32(tmp);
+ pSge->FlagsLength = cpu_to_le32(
+ (flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING));
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_add_chain - Place a 32 bit chain SGE at address pAddr.
+ * @pAddr: virtual address for SGE
+ * @next: nextChainOffset value (u32's)
+ * @length: length of next SGL segment
+ * @dma_addr: Physical address
+ *
+ */
+static void
+mpt_add_chain(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
+{
+ SGEChain32_t *pChain = (SGEChain32_t *) pAddr;
+ pChain->Length = cpu_to_le16(length);
+ pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
+ pChain->NextChainOffset = next;
+ pChain->Address = cpu_to_le32(dma_addr);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_add_chain_64bit - Place a 64 bit chain SGE at address pAddr.
+ * @pAddr: virtual address for SGE
+ * @next: nextChainOffset value (u32's)
+ * @length: length of next SGL segment
+ * @dma_addr: Physical address
+ *
+ */
+static void
+mpt_add_chain_64bit(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
+{
+ SGEChain64_t *pChain = (SGEChain64_t *) pAddr;
u32 tmp = dma_addr & 0xFFFFFFFF;
- pSge->FlagsLength = cpu_to_le32(flagslength);
- pSge->Address.Low = cpu_to_le32(tmp);
- tmp = (u32) ((u64)dma_addr >> 32);
- pSge->Address.High = cpu_to_le32(tmp);
+ pChain->Length = cpu_to_le16(length);
+ pChain->Flags = (MPI_SGE_FLAGS_CHAIN_ELEMENT |
+ MPI_SGE_FLAGS_64_BIT_ADDRESSING);
- } else {
- SGESimple32_t *pSge = (SGESimple32_t *) pAddr;
- pSge->FlagsLength = cpu_to_le32(flagslength);
- pSge->Address = cpu_to_le32(dma_addr);
- }
+ pChain->NextChainOffset = next;
+
+ pChain->Address.Low = cpu_to_le32(tmp);
+ tmp = (u32)(upper_32_bits((unsigned long)dma_addr));
+ pChain->Address.High = cpu_to_le32(tmp);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1225,7 +1305,7 @@ mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
}
flags_length = flags_length << MPI_SGE_FLAGS_SHIFT;
flags_length |= ioc->HostPageBuffer_sz;
- mpt_add_sge(psge, flags_length, ioc->HostPageBuffer_dma);
+ ioc->add_sge(psge, flags_length, ioc->HostPageBuffer_dma);
ioc->facts.HostPageBufferSGE = ioc_init->HostPageBufferSGE;
return 0;
@@ -1534,21 +1614,42 @@ mpt_mapresources(MPT_ADAPTER *ioc)
pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
- && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
- dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
- ": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
- ioc->name));
- } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
- && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
- dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
- ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
- ioc->name));
+ if (sizeof(dma_addr_t) > 4) {
+ const uint64_t required_mask = dma_get_required_mask
+ (&pdev->dev);
+ if (required_mask > DMA_BIT_MASK(32)
+ && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
+ && !pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(64))) {
+ ioc->dma_mask = DMA_BIT_MASK(64);
+ dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
+ ": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
+ ioc->name));
+ } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
+ && !pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(32))) {
+ ioc->dma_mask = DMA_BIT_MASK(32);
+ dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
+ ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
+ ioc->name));
+ } else {
+ printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
+ ioc->name, pci_name(pdev));
+ return r;
+ }
} else {
- printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
- ioc->name, pci_name(pdev));
- pci_release_selected_regions(pdev, ioc->bars);
- return r;
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
+ && !pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(32))) {
+ ioc->dma_mask = DMA_BIT_MASK(32);
+ dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
+ ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
+ ioc->name));
+ } else {
+ printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
+ ioc->name, pci_name(pdev));
+ return r;
+ }
}
mem_phys = msize = 0;
@@ -1632,6 +1733,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->id = mpt_ids++;
sprintf(ioc->name, "ioc%d", ioc->id);
+ dinitprintk(ioc, printk(KERN_WARNING MYNAM ": mpt_adapter_install\n"));
/*
* set initial debug level
@@ -1650,14 +1752,36 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
return r;
}
+ /*
+ * Setting up proper handlers for scatter gather handling
+ */
+ if (ioc->dma_mask == DMA_BIT_MASK(64)) {
+ if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1078)
+ ioc->add_sge = &mpt_add_sge_64bit_1078;
+ else
+ ioc->add_sge = &mpt_add_sge_64bit;
+ ioc->add_chain = &mpt_add_chain_64bit;
+ ioc->sg_addr_size = 8;
+ } else {
+ ioc->add_sge = &mpt_add_sge;
+ ioc->add_chain = &mpt_add_chain;
+ ioc->sg_addr_size = 4;
+ }
+ ioc->SGE_size = sizeof(u32) + ioc->sg_addr_size;
+
ioc->alloc_total = sizeof(MPT_ADAPTER);
ioc->req_sz = MPT_DEFAULT_FRAME_SIZE; /* avoid div by zero! */
ioc->reply_sz = MPT_REPLY_FRAME_SIZE;
ioc->pcidev = pdev;
- ioc->diagPending = 0;
- spin_lock_init(&ioc->diagLock);
- spin_lock_init(&ioc->initializing_hba_lock);
+
+ spin_lock_init(&ioc->taskmgmt_lock);
+ mutex_init(&ioc->internal_cmds.mutex);
+ init_completion(&ioc->internal_cmds.done);
+ mutex_init(&ioc->mptbase_cmds.mutex);
+ init_completion(&ioc->mptbase_cmds.done);
+ mutex_init(&ioc->taskmgmt_cmds.mutex);
+ init_completion(&ioc->taskmgmt_cmds.done);
/* Initialize the event logging.
*/
@@ -1670,16 +1794,13 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->mfcnt = 0;
#endif
+ ioc->sh = NULL;
ioc->cached_fw = NULL;
/* Initilize SCSI Config Data structure
*/
memset(&ioc->spi_data, 0, sizeof(SpiCfgData));
- /* Initialize the running configQ head.
- */
- INIT_LIST_HEAD(&ioc->configQ);
-
/* Initialize the fc rport list head.
*/
INIT_LIST_HEAD(&ioc->fc_rports);
@@ -1690,9 +1811,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
/* Initialize workqueue */
INIT_DELAYED_WORK(&ioc->fault_reset_work, mpt_fault_reset_work);
- spin_lock_init(&ioc->fault_reset_work_lock);
- snprintf(ioc->reset_work_q_name, sizeof(ioc->reset_work_q_name),
+ snprintf(ioc->reset_work_q_name, MPT_KOBJ_NAME_LEN,
"mpt_poll_%d", ioc->id);
ioc->reset_work_q =
create_singlethread_workqueue(ioc->reset_work_q_name);
@@ -1767,11 +1887,14 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
case MPI_MANUFACTPAGE_DEVID_SAS1064:
case MPI_MANUFACTPAGE_DEVID_SAS1068:
ioc->errata_flag_1064 = 1;
+ ioc->bus_type = SAS;
+ break;
case MPI_MANUFACTPAGE_DEVID_SAS1064E:
case MPI_MANUFACTPAGE_DEVID_SAS1068E:
case MPI_MANUFACTPAGE_DEVID_SAS1078:
ioc->bus_type = SAS;
+ break;
}
@@ -1813,6 +1936,11 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
*/
mpt_detect_bound_ports(ioc, pdev);
+ INIT_LIST_HEAD(&ioc->fw_event_list);
+ spin_lock_init(&ioc->fw_event_lock);
+ snprintf(ioc->fw_event_q_name, MPT_KOBJ_NAME_LEN, "mpt/%d", ioc->id);
+ ioc->fw_event_q = create_singlethread_workqueue(ioc->fw_event_q_name);
+
if ((r = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP,
CAN_SLEEP)) != 0){
printk(MYIOC_s_ERR_FMT "didn't initialize properly! (%d)\n",
@@ -1885,13 +2013,18 @@ mpt_detach(struct pci_dev *pdev)
/*
* Stop polling ioc for fault condition
*/
- spin_lock_irqsave(&ioc->fault_reset_work_lock, flags);
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
wq = ioc->reset_work_q;
ioc->reset_work_q = NULL;
- spin_unlock_irqrestore(&ioc->fault_reset_work_lock, flags);
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
cancel_delayed_work(&ioc->fault_reset_work);
destroy_workqueue(wq);
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ wq = ioc->fw_event_q;
+ ioc->fw_event_q = NULL;
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+ destroy_workqueue(wq);
sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/summary", ioc->name);
remove_proc_entry(pname, NULL);
@@ -1994,6 +2127,21 @@ mpt_resume(struct pci_dev *pdev)
if (err)
return err;
+ if (ioc->dma_mask == DMA_BIT_MASK(64)) {
+ if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1078)
+ ioc->add_sge = &mpt_add_sge_64bit_1078;
+ else
+ ioc->add_sge = &mpt_add_sge_64bit;
+ ioc->add_chain = &mpt_add_chain_64bit;
+ ioc->sg_addr_size = 8;
+ } else {
+
+ ioc->add_sge = &mpt_add_sge;
+ ioc->add_chain = &mpt_add_chain;
+ ioc->sg_addr_size = 4;
+ }
+ ioc->SGE_size = sizeof(u32) + ioc->sg_addr_size;
+
printk(MYIOC_s_INFO_FMT "pci-resume: ioc-state=0x%x,doorbell=0x%x\n",
ioc->name, (mpt_GetIocState(ioc, 1) >> MPI_IOC_STATE_SHIFT),
CHIPREG_READ32(&ioc->chip->Doorbell));
@@ -2091,12 +2239,16 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
ioc->active = 0;
if (ioc->alt_ioc) {
- if (ioc->alt_ioc->active)
+ if (ioc->alt_ioc->active ||
+ reason == MPT_HOSTEVENT_IOC_RECOVER) {
reset_alt_ioc_active = 1;
-
- /* Disable alt-IOC's reply interrupts (and FreeQ) for a bit ... */
- CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, 0xFFFFFFFF);
- ioc->alt_ioc->active = 0;
+ /* Disable alt-IOC's reply interrupts
+ * (and FreeQ) for a bit
+ **/
+ CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask,
+ 0xFFFFFFFF);
+ ioc->alt_ioc->active = 0;
+ }
}
hard = 1;
@@ -2117,9 +2269,11 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
}
} else {
- printk(MYIOC_s_WARN_FMT "NOT READY!\n", ioc->name);
+ printk(MYIOC_s_WARN_FMT
+ "NOT READY WARNING!\n", ioc->name);
}
- return -1;
+ ret = -1;
+ goto out;
}
/* hard_reset_done = 0 if a soft reset was performed
@@ -2129,7 +2283,9 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
if ((rc = MakeIocReady(ioc->alt_ioc, 0, sleepFlag)) == 0)
alt_ioc_ready = 1;
else
- printk(MYIOC_s_WARN_FMT "alt_ioc not ready!\n", ioc->alt_ioc->name);
+ printk(MYIOC_s_WARN_FMT
+ ": alt-ioc Not ready WARNING!\n",
+ ioc->alt_ioc->name);
}
for (ii=0; ii<5; ii++) {
@@ -2150,7 +2306,8 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
if (alt_ioc_ready) {
if ((rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason)) != 0) {
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
- "Initial Alt IocFacts failed rc=%x\n", ioc->name, rc));
+ "Initial Alt IocFacts failed rc=%x\n",
+ ioc->name, rc));
/* Retry - alt IOC was initialized once
*/
rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason);
@@ -2194,16 +2351,20 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
IRQF_SHARED, ioc->name, ioc);
if (rc < 0) {
printk(MYIOC_s_ERR_FMT "Unable to allocate "
- "interrupt %d!\n", ioc->name, ioc->pcidev->irq);
+ "interrupt %d!\n",
+ ioc->name, ioc->pcidev->irq);
if (ioc->msi_enable)
pci_disable_msi(ioc->pcidev);
- return -EBUSY;
+ ret = -EBUSY;
+ goto out;
}
irq_allocated = 1;
ioc->pci_irq = ioc->pcidev->irq;
pci_set_master(ioc->pcidev); /* ?? */
- dprintk(ioc, printk(MYIOC_s_INFO_FMT "installed at interrupt "
- "%d\n", ioc->name, ioc->pcidev->irq));
+ pci_set_drvdata(ioc->pcidev, ioc);
+ dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
+ "installed at interrupt %d\n", ioc->name,
+ ioc->pcidev->irq));
}
}
@@ -2212,17 +2373,22 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
* init as upper addresses are needed for init.
* If fails, continue with alt-ioc processing
*/
+ dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "PrimeIocFifos\n",
+ ioc->name));
if ((ret == 0) && ((rc = PrimeIocFifos(ioc)) != 0))
ret = -3;
/* May need to check/upload firmware & data here!
* If fails, continue with alt-ioc processing
*/
+ dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "SendIocInit\n",
+ ioc->name));
if ((ret == 0) && ((rc = SendIocInit(ioc, sleepFlag)) != 0))
ret = -4;
// NEW!
if (alt_ioc_ready && ((rc = PrimeIocFifos(ioc->alt_ioc)) != 0)) {
- printk(MYIOC_s_WARN_FMT ": alt_ioc (%d) FIFO mgmt alloc!\n",
+ printk(MYIOC_s_WARN_FMT
+ ": alt-ioc (%d) FIFO mgmt alloc WARNING!\n",
ioc->alt_ioc->name, rc);
alt_ioc_ready = 0;
reset_alt_ioc_active = 0;
@@ -2232,8 +2398,9 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
if ((rc = SendIocInit(ioc->alt_ioc, sleepFlag)) != 0) {
alt_ioc_ready = 0;
reset_alt_ioc_active = 0;
- printk(MYIOC_s_WARN_FMT "alt_ioc (%d) init failure!\n",
- ioc->alt_ioc->name, rc);
+ printk(MYIOC_s_WARN_FMT
+ ": alt-ioc: (%d) init failure WARNING!\n",
+ ioc->alt_ioc->name, rc);
}
}
@@ -2269,28 +2436,36 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
}
}
+ /* Enable MPT base driver management of EventNotification
+ * and EventAck handling.
+ */
+ if ((ret == 0) && (!ioc->facts.EventState)) {
+ dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
+ "SendEventNotification\n",
+ ioc->name));
+ ret = SendEventNotification(ioc, 1, sleepFlag); /* 1=Enable */
+ }
+
+ if (ioc->alt_ioc && alt_ioc_ready && !ioc->alt_ioc->facts.EventState)
+ rc = SendEventNotification(ioc->alt_ioc, 1, sleepFlag);
+
if (ret == 0) {
/* Enable! (reply interrupt) */
CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM);
ioc->active = 1;
}
-
- if (reset_alt_ioc_active && ioc->alt_ioc) {
- /* (re)Enable alt-IOC! (reply interrupt) */
- dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "alt_ioc reply irq re-enabled\n",
- ioc->alt_ioc->name));
- CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, MPI_HIM_DIM);
- ioc->alt_ioc->active = 1;
+ if (rc == 0) { /* alt ioc */
+ if (reset_alt_ioc_active && ioc->alt_ioc) {
+ /* (re)Enable alt-IOC! (reply interrupt) */
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "alt-ioc"
+ "reply irq re-enabled\n",
+ ioc->alt_ioc->name));
+ CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask,
+ MPI_HIM_DIM);
+ ioc->alt_ioc->active = 1;
+ }
}
- /* Enable MPT base driver management of EventNotification
- * and EventAck handling.
- */
- if ((ret == 0) && (!ioc->facts.EventState))
- (void) SendEventNotification(ioc, 1); /* 1=Enable EventNotification */
-
- if (ioc->alt_ioc && alt_ioc_ready && !ioc->alt_ioc->facts.EventState)
- (void) SendEventNotification(ioc->alt_ioc, 1); /* 1=Enable EventNotification */
/* Add additional "reason" check before call to GetLanConfigPages
* (combined with GetIoUnitPage2 call). This prevents a somewhat
@@ -2306,8 +2481,9 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
mutex_init(&ioc->raid_data.inactive_list_mutex);
INIT_LIST_HEAD(&ioc->raid_data.inactive_list);
- if (ioc->bus_type == SAS) {
+ switch (ioc->bus_type) {
+ case SAS:
/* clear persistency table */
if(ioc->facts.IOCExceptions &
MPI_IOCFACTS_EXCEPT_PERSISTENT_TABLE_FULL) {
@@ -2321,8 +2497,15 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
*/
mpt_findImVolumes(ioc);
- } else if (ioc->bus_type == FC) {
- if ((ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) &&
+ /* Check, and possibly reset, the coalescing value
+ */
+ mpt_read_ioc_pg_1(ioc);
+
+ break;
+
+ case FC:
+ if ((ioc->pfacts[0].ProtocolFlags &
+ MPI_PORTFACTS_PROTOCOL_LAN) &&
(ioc->lan_cnfg_page0.Header.PageLength == 0)) {
/*
* Pre-fetch the ports LAN MAC address!
@@ -2331,11 +2514,14 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
(void) GetLanConfigPages(ioc);
a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
- "LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
- ioc->name, a[5], a[4], a[3], a[2], a[1], a[0]));
-
+ "LanAddr = %02X:%02X:%02X"
+ ":%02X:%02X:%02X\n",
+ ioc->name, a[5], a[4],
+ a[3], a[2], a[1], a[0]));
}
- } else {
+ break;
+
+ case SPI:
/* Get NVRAM and adapter maximums from SPP 0 and 2
*/
mpt_GetScsiPortSettings(ioc, 0);
@@ -2354,6 +2540,8 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
mpt_read_ioc_pg_1(ioc);
mpt_read_ioc_pg_4(ioc);
+
+ break;
}
GetIoUnitPage2(ioc);
@@ -2435,16 +2623,20 @@ mpt_detect_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev)
if (_pcidev == peer) {
/* Paranoia checks */
if (ioc->alt_ioc != NULL) {
- printk(MYIOC_s_WARN_FMT "Oops, already bound to %s!\n",
- ioc->name, ioc->alt_ioc->name);
+ printk(MYIOC_s_WARN_FMT
+ "Oops, already bound (%s <==> %s)!\n",
+ ioc->name, ioc->name, ioc->alt_ioc->name);
break;
} else if (ioc_srch->alt_ioc != NULL) {
- printk(MYIOC_s_WARN_FMT "Oops, already bound to %s!\n",
- ioc_srch->name, ioc_srch->alt_ioc->name);
+ printk(MYIOC_s_WARN_FMT
+ "Oops, already bound (%s <==> %s)!\n",
+ ioc_srch->name, ioc_srch->name,
+ ioc_srch->alt_ioc->name);
break;
}
- dprintk(ioc, printk(MYIOC_s_INFO_FMT "FOUND! binding to %s\n",
- ioc->name, ioc_srch->name));
+ dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "FOUND! binding %s <==> %s\n",
+ ioc->name, ioc->name, ioc_srch->name));
ioc_srch->alt_ioc = ioc;
ioc->alt_ioc = ioc_srch;
}
@@ -2464,8 +2656,8 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
int ret;
if (ioc->cached_fw != NULL) {
- ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: Pushing FW onto "
- "adapter\n", __func__, ioc->name));
+ ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: Pushing FW onto adapter\n", __func__, ioc->name));
if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *)
ioc->cached_fw, CAN_SLEEP)) < 0) {
printk(MYIOC_s_WARN_FMT
@@ -2474,11 +2666,30 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
}
}
+ /*
+ * Put the controller into ready state (if its not already)
+ */
+ if (mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_READY) {
+ if (!SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET,
+ CAN_SLEEP)) {
+ if (mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_READY)
+ printk(MYIOC_s_ERR_FMT "%s: IOC msg unit "
+ "reset failed to put ioc in ready state!\n",
+ ioc->name, __func__);
+ } else
+ printk(MYIOC_s_ERR_FMT "%s: IOC msg unit reset "
+ "failed!\n", ioc->name, __func__);
+ }
+
+
/* Disable adapter interrupts! */
+ synchronize_irq(ioc->pcidev->irq);
CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
ioc->active = 0;
+
/* Clear any lingering interrupt */
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
+ CHIPREG_READ32(&ioc->chip->IntStatus);
if (ioc->alloc != NULL) {
sz = ioc->alloc_sz;
@@ -2538,19 +2749,22 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
if((ret = mpt_host_page_access_control(ioc,
MPI_DB_HPBAC_FREE_BUFFER, NO_SLEEP)) != 0) {
printk(MYIOC_s_ERR_FMT
- "host page buffers free failed (%d)!\n",
- ioc->name, ret);
+ ": %s: host page buffers free failed (%d)!\n",
+ ioc->name, __func__, ret);
}
- dexitprintk(ioc, printk(MYIOC_s_INFO_FMT "HostPageBuffer free @ %p, sz=%d bytes\n",
- ioc->name, ioc->HostPageBuffer, ioc->HostPageBuffer_sz));
+ dexitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "HostPageBuffer free @ %p, sz=%d bytes\n",
+ ioc->name, ioc->HostPageBuffer,
+ ioc->HostPageBuffer_sz));
pci_free_consistent(ioc->pcidev, ioc->HostPageBuffer_sz,
ioc->HostPageBuffer, ioc->HostPageBuffer_dma);
ioc->HostPageBuffer = NULL;
ioc->HostPageBuffer_sz = 0;
ioc->alloc_total -= ioc->HostPageBuffer_sz;
}
-}
+ pci_set_drvdata(ioc->pcidev, NULL);
+}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_adapter_dispose - Free all resources associated with an MPT adapter
@@ -2690,8 +2904,12 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
}
/* Is it already READY? */
- if (!statefault && (ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_READY)
+ if (!statefault &&
+ ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_READY)) {
+ dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
+ "IOC is in READY state\n", ioc->name));
return 0;
+ }
/*
* Check to see if IOC is in FAULT state.
@@ -2764,8 +2982,9 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
ii++; cntdn--;
if (!cntdn) {
- printk(MYIOC_s_ERR_FMT "Wait IOC_READY state timeout(%d)!\n",
- ioc->name, (int)((ii+5)/HZ));
+ printk(MYIOC_s_ERR_FMT
+ "Wait IOC_READY state (0x%x) timeout(%d)!\n",
+ ioc->name, ioc_state, (int)((ii+5)/HZ));
return -ETIME;
}
@@ -2778,9 +2997,8 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
}
if (statefault < 3) {
- printk(MYIOC_s_INFO_FMT "Recovered from %s\n",
- ioc->name,
- statefault==1 ? "stuck handshake" : "IOC FAULT");
+ printk(MYIOC_s_INFO_FMT "Recovered from %s\n", ioc->name,
+ statefault == 1 ? "stuck handshake" : "IOC FAULT");
}
return hard_reset_done;
@@ -2833,8 +3051,9 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
/* IOC *must* NOT be in RESET state! */
if (ioc->last_state == MPI_IOC_STATE_RESET) {
- printk(MYIOC_s_ERR_FMT "Can't get IOCFacts NOT READY! (%08x)\n",
- ioc->name, ioc->last_state );
+ printk(KERN_ERR MYNAM
+ ": ERROR - Can't get IOCFacts, %s NOT READY! (%08x)\n",
+ ioc->name, ioc->last_state);
return -44;
}
@@ -2896,7 +3115,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
* Old: u16{Major(4),Minor(4),SubMinor(8)}
* New: u32{Major(8),Minor(8),Unit(8),Dev(8)}
*/
- if (facts->MsgVersion < 0x0102) {
+ if (facts->MsgVersion < MPI_VERSION_01_02) {
/*
* Handle old FC f/w style, convert to new...
*/
@@ -2908,9 +3127,11 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
facts->FWVersion.Word = le32_to_cpu(facts->FWVersion.Word);
facts->ProductID = le16_to_cpu(facts->ProductID);
+
if ((ioc->facts.ProductID & MPI_FW_HEADER_PID_PROD_MASK)
> MPI_FW_HEADER_PID_PROD_TARGET_SCSI)
ioc->ir_firmware = 1;
+
facts->CurrentHostMfaHighAddr =
le32_to_cpu(facts->CurrentHostMfaHighAddr);
facts->GlobalCredits = le16_to_cpu(facts->GlobalCredits);
@@ -2926,7 +3147,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
* to 14 in MPI-1.01.0x.
*/
if (facts->MsgLength >= (offsetof(IOCFactsReply_t,FWImageSize) + 7)/4 &&
- facts->MsgVersion > 0x0100) {
+ facts->MsgVersion > MPI_VERSION_01_00) {
facts->FWImageSize = le32_to_cpu(facts->FWImageSize);
}
@@ -3108,6 +3329,7 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
ioc_init.MaxDevices = (U8)ioc->devices_per_bus;
ioc_init.MaxBuses = (U8)ioc->number_of_buses;
+
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "facts.MsgVersion=%x\n",
ioc->name, ioc->facts.MsgVersion));
if (ioc->facts.MsgVersion >= MPI_VERSION_01_05) {
@@ -3122,7 +3344,7 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
}
ioc_init.ReplyFrameSize = cpu_to_le16(ioc->reply_sz); /* in BYTES */
- if (sizeof(dma_addr_t) == sizeof(u64)) {
+ if (ioc->sg_addr_size == sizeof(u64)) {
/* Save the upper 32-bits of the request
* (reply) and sense buffers.
*/
@@ -3325,11 +3547,10 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
FWUpload_t *prequest;
FWUploadReply_t *preply;
FWUploadTCSGE_t *ptcsge;
- int sgeoffset;
u32 flagsLength;
int ii, sz, reply_sz;
int cmdStatus;
-
+ int request_size;
/* If the image size is 0, we are done.
*/
if ((sz = ioc->facts.FWImageSize) == 0)
@@ -3364,42 +3585,41 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
ptcsge->ImageSize = cpu_to_le32(sz);
ptcsge++;
- sgeoffset = sizeof(FWUpload_t) - sizeof(SGE_MPI_UNION) + sizeof(FWUploadTCSGE_t);
-
flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ | sz;
- mpt_add_sge((char *)ptcsge, flagsLength, ioc->cached_fw_dma);
-
- sgeoffset += sizeof(u32) + sizeof(dma_addr_t);
- dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": Sending FW Upload (req @ %p) sgeoffset=%d \n",
- ioc->name, prequest, sgeoffset));
+ ioc->add_sge((char *)ptcsge, flagsLength, ioc->cached_fw_dma);
+ request_size = offsetof(FWUpload_t, SGL) + sizeof(FWUploadTCSGE_t) +
+ ioc->SGE_size;
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending FW Upload "
+ " (req @ %p) fw_size=%d mf_request_size=%d\n", ioc->name, prequest,
+ ioc->facts.FWImageSize, request_size));
DBG_DUMP_FW_REQUEST_FRAME(ioc, (u32 *)prequest);
- ii = mpt_handshake_req_reply_wait(ioc, sgeoffset, (u32*)prequest,
- reply_sz, (u16*)preply, 65 /*seconds*/, sleepFlag);
+ ii = mpt_handshake_req_reply_wait(ioc, request_size, (u32 *)prequest,
+ reply_sz, (u16 *)preply, 65 /*seconds*/, sleepFlag);
- dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": FW Upload completed rc=%x \n", ioc->name, ii));
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "FW Upload completed "
+ "rc=%x \n", ioc->name, ii));
cmdStatus = -EFAULT;
if (ii == 0) {
/* Handshake transfer was complete and successful.
* Check the Reply Frame.
*/
- int status, transfer_sz;
- status = le16_to_cpu(preply->IOCStatus);
- if (status == MPI_IOCSTATUS_SUCCESS) {
- transfer_sz = le32_to_cpu(preply->ActualImageSize);
- if (transfer_sz == sz)
+ int status;
+ status = le16_to_cpu(preply->IOCStatus) &
+ MPI_IOCSTATUS_MASK;
+ if (status == MPI_IOCSTATUS_SUCCESS &&
+ ioc->facts.FWImageSize ==
+ le32_to_cpu(preply->ActualImageSize))
cmdStatus = 0;
- }
}
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": do_upload cmdStatus=%d \n",
ioc->name, cmdStatus));
if (cmdStatus) {
-
- ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": fw upload failed, freeing image \n",
- ioc->name));
+ ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "fw upload failed, "
+ "freeing image \n", ioc->name));
mpt_free_fw_memory(ioc);
}
kfree(prequest);
@@ -3723,6 +3943,10 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) {
+
+ if (!ignore)
+ return 0;
+
drsprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: Doorbell=%p; 1078 reset "
"address=%p\n", ioc->name, __func__,
&ioc->chip->Doorbell, &ioc->chip->Reset_1078));
@@ -3740,6 +3964,7 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
"looking for READY STATE: doorbell=%x"
" count=%d\n",
ioc->name, doorbell, count));
+
if (doorbell == MPI_IOC_STATE_READY) {
return 1;
}
@@ -3890,6 +4115,10 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
doorbell = CHIPREG_READ32(&ioc->chip->Doorbell);
doorbell &= MPI_IOC_STATE_MASK;
+ drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "looking for READY STATE: doorbell=%x"
+ " count=%d\n", ioc->name, doorbell, count));
+
if (doorbell == MPI_IOC_STATE_READY) {
break;
}
@@ -3901,6 +4130,11 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
mdelay (1000);
}
}
+
+ if (doorbell != MPI_IOC_STATE_READY)
+ printk(MYIOC_s_ERR_FMT "Failed to come READY "
+ "after reset! IocState=%x", ioc->name,
+ doorbell);
}
}
@@ -4019,8 +4253,9 @@ SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag)
if (sleepFlag != CAN_SLEEP)
count *= 10;
- printk(MYIOC_s_ERR_FMT "Wait IOC_READY state timeout(%d)!\n",
- ioc->name, (int)((count+5)/HZ));
+ printk(MYIOC_s_ERR_FMT
+ "Wait IOC_READY state (0x%x) timeout(%d)!\n",
+ ioc->name, state, (int)((count+5)/HZ));
return -ETIME;
}
@@ -4090,24 +4325,29 @@ initChainBuffers(MPT_ADAPTER *ioc)
* num_sge = num sge in request frame + last chain buffer
* scale = num sge per chain buffer if no chain element
*/
- scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32));
- if (sizeof(dma_addr_t) == sizeof(u64))
- num_sge = scale + (ioc->req_sz - 60) / (sizeof(dma_addr_t) + sizeof(u32));
+ scale = ioc->req_sz / ioc->SGE_size;
+ if (ioc->sg_addr_size == sizeof(u64))
+ num_sge = scale + (ioc->req_sz - 60) / ioc->SGE_size;
else
- num_sge = 1+ scale + (ioc->req_sz - 64) / (sizeof(dma_addr_t) + sizeof(u32));
+ num_sge = 1 + scale + (ioc->req_sz - 64) / ioc->SGE_size;
- if (sizeof(dma_addr_t) == sizeof(u64)) {
+ if (ioc->sg_addr_size == sizeof(u64)) {
numSGE = (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale +
- (ioc->req_sz - 60) / (sizeof(dma_addr_t) + sizeof(u32));
+ (ioc->req_sz - 60) / ioc->SGE_size;
} else {
- numSGE = 1 + (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale +
- (ioc->req_sz - 64) / (sizeof(dma_addr_t) + sizeof(u32));
+ numSGE = 1 + (scale - 1) * (ioc->facts.MaxChainDepth-1) +
+ scale + (ioc->req_sz - 64) / ioc->SGE_size;
}
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "num_sge=%d numSGE=%d\n",
ioc->name, num_sge, numSGE));
- if ( numSGE > MPT_SCSI_SG_DEPTH )
- numSGE = MPT_SCSI_SG_DEPTH;
+ if (ioc->bus_type == FC) {
+ if (numSGE > MPT_SCSI_FC_SG_DEPTH)
+ numSGE = MPT_SCSI_FC_SG_DEPTH;
+ } else {
+ if (numSGE > MPT_SCSI_SG_DEPTH)
+ numSGE = MPT_SCSI_SG_DEPTH;
+ }
num_chain = 1;
while (numSGE - num_sge > 0) {
@@ -4161,12 +4401,42 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
dma_addr_t alloc_dma;
u8 *mem;
int i, reply_sz, sz, total_size, num_chain;
+ u64 dma_mask;
+
+ dma_mask = 0;
/* Prime reply FIFO... */
if (ioc->reply_frames == NULL) {
if ( (num_chain = initChainBuffers(ioc)) < 0)
return -1;
+ /*
+ * 1078 errata workaround for the 36GB limitation
+ */
+ if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078 &&
+ ioc->dma_mask > DMA_35BIT_MASK) {
+ if (!pci_set_dma_mask(ioc->pcidev, DMA_BIT_MASK(32))
+ && !pci_set_consistent_dma_mask(ioc->pcidev,
+ DMA_BIT_MASK(32))) {
+ dma_mask = DMA_35BIT_MASK;
+ d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "setting 35 bit addressing for "
+ "Request/Reply/Chain and Sense Buffers\n",
+ ioc->name));
+ } else {
+ /*Reseting DMA mask to 64 bit*/
+ pci_set_dma_mask(ioc->pcidev,
+ DMA_BIT_MASK(64));
+ pci_set_consistent_dma_mask(ioc->pcidev,
+ DMA_BIT_MASK(64));
+
+ printk(MYIOC_s_ERR_FMT
+ "failed setting 35 bit addressing for "
+ "Request/Reply/Chain and Sense Buffers\n",
+ ioc->name);
+ return -1;
+ }
+ }
total_size = reply_sz = (ioc->reply_sz * ioc->reply_depth);
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffer sz=%d bytes, ReplyDepth=%d\n",
@@ -4305,9 +4575,16 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
alloc_dma += ioc->reply_sz;
}
+ if (dma_mask == DMA_35BIT_MASK && !pci_set_dma_mask(ioc->pcidev,
+ ioc->dma_mask) && !pci_set_consistent_dma_mask(ioc->pcidev,
+ ioc->dma_mask))
+ d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "restoring 64 bit addressing\n", ioc->name));
+
return 0;
out_fail:
+
if (ioc->alloc != NULL) {
sz = ioc->alloc_sz;
pci_free_consistent(ioc->pcidev,
@@ -4324,6 +4601,13 @@ out_fail:
ioc->sense_buf_pool, ioc->sense_buf_pool_dma);
ioc->sense_buf_pool = NULL;
}
+
+ if (dma_mask == DMA_35BIT_MASK && !pci_set_dma_mask(ioc->pcidev,
+ DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(ioc->pcidev,
+ DMA_BIT_MASK(64)))
+ d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "restoring 64 bit addressing\n", ioc->name));
+
return -1;
}
@@ -4759,7 +5043,14 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
SasIoUnitControlReply_t *sasIoUnitCntrReply;
MPT_FRAME_HDR *mf = NULL;
MPIHeader_t *mpi_hdr;
+ int ret = 0;
+ unsigned long timeleft;
+
+ mutex_lock(&ioc->mptbase_cmds.mutex);
+ /* init the internal cmd struct */
+ memset(ioc->mptbase_cmds.reply, 0 , MPT_DEFAULT_FRAME_SIZE);
+ INITIALIZE_MGMT_STATUS(ioc->mptbase_cmds.status)
/* insure garbage is not sent to fw */
switch(persist_opcode) {
@@ -4769,17 +5060,19 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
break;
default:
- return -1;
- break;
+ ret = -1;
+ goto out;
}
- printk("%s: persist_opcode=%x\n",__func__, persist_opcode);
+ printk(KERN_DEBUG "%s: persist_opcode=%x\n",
+ __func__, persist_opcode);
/* Get a MF for this command.
*/
if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
- printk("%s: no msg frames!\n",__func__);
- return -1;
+ printk(KERN_DEBUG "%s: no msg frames!\n", __func__);
+ ret = -1;
+ goto out;
}
mpi_hdr = (MPIHeader_t *) mf;
@@ -4789,27 +5082,42 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
sasIoUnitCntrReq->MsgContext = mpi_hdr->MsgContext;
sasIoUnitCntrReq->Operation = persist_opcode;
- init_timer(&ioc->persist_timer);
- ioc->persist_timer.data = (unsigned long) ioc;
- ioc->persist_timer.function = mpt_timer_expired;
- ioc->persist_timer.expires = jiffies + HZ*10 /* 10 sec */;
- ioc->persist_wait_done=0;
- add_timer(&ioc->persist_timer);
mpt_put_msg_frame(mpt_base_index, ioc, mf);
- wait_event(mpt_waitq, ioc->persist_wait_done);
+ timeleft = wait_for_completion_timeout(&ioc->mptbase_cmds.done, 10*HZ);
+ if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+ ret = -ETIME;
+ printk(KERN_DEBUG "%s: failed\n", __func__);
+ if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
+ goto out;
+ if (!timeleft) {
+ printk(KERN_DEBUG "%s: Issuing Reset from %s!!\n",
+ ioc->name, __func__);
+ mpt_HardResetHandler(ioc, CAN_SLEEP);
+ mpt_free_msg_frame(ioc, mf);
+ }
+ goto out;
+ }
+
+ if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
+ ret = -1;
+ goto out;
+ }
sasIoUnitCntrReply =
- (SasIoUnitControlReply_t *)ioc->persist_reply_frame;
+ (SasIoUnitControlReply_t *)ioc->mptbase_cmds.reply;
if (le16_to_cpu(sasIoUnitCntrReply->IOCStatus) != MPI_IOCSTATUS_SUCCESS) {
- printk("%s: IOCStatus=0x%X IOCLogInfo=0x%X\n",
- __func__,
- sasIoUnitCntrReply->IOCStatus,
+ printk(KERN_DEBUG "%s: IOCStatus=0x%X IOCLogInfo=0x%X\n",
+ __func__, sasIoUnitCntrReply->IOCStatus,
sasIoUnitCntrReply->IOCLogInfo);
- return -1;
- }
+ printk(KERN_DEBUG "%s: failed\n", __func__);
+ ret = -1;
+ } else
+ printk(KERN_DEBUG "%s: success\n", __func__);
+ out:
- printk("%s: success\n",__func__);
- return 0;
+ CLEAR_MGMT_STATUS(ioc->mptbase_cmds.status)
+ mutex_unlock(&ioc->mptbase_cmds.mutex);
+ return ret;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -5394,17 +5702,20 @@ mpt_inactive_raid_volumes(MPT_ADAPTER *ioc, u8 channel, u8 id)
* -ENOMEM if pci_alloc failed
**/
int
-mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t phys_disk)
+mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num,
+ RaidPhysDiskPage0_t *phys_disk)
{
- CONFIGPARMS cfg;
- ConfigPageHeader_t hdr;
+ CONFIGPARMS cfg;
+ ConfigPageHeader_t hdr;
dma_addr_t dma_handle;
pRaidPhysDiskPage0_t buffer = NULL;
int rc;
memset(&cfg, 0 , sizeof(CONFIGPARMS));
memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
+ memset(phys_disk, 0, sizeof(RaidPhysDiskPage0_t));
+ hdr.PageVersion = MPI_RAIDPHYSDISKPAGE0_PAGEVERSION;
hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK;
cfg.cfghdr.hdr = &hdr;
cfg.physAddr = -1;
@@ -5451,6 +5762,161 @@ mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t
}
/**
+ * mpt_raid_phys_disk_get_num_paths - returns number paths associated to this phys_num
+ * @ioc: Pointer to a Adapter Structure
+ * @phys_disk_num: io unit unique phys disk num generated by the ioc
+ *
+ * Return:
+ * returns number paths
+ **/
+int
+mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc, u8 phys_disk_num)
+{
+ CONFIGPARMS cfg;
+ ConfigPageHeader_t hdr;
+ dma_addr_t dma_handle;
+ pRaidPhysDiskPage1_t buffer = NULL;
+ int rc;
+
+ memset(&cfg, 0 , sizeof(CONFIGPARMS));
+ memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
+
+ hdr.PageVersion = MPI_RAIDPHYSDISKPAGE1_PAGEVERSION;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK;
+ hdr.PageNumber = 1;
+ cfg.cfghdr.hdr = &hdr;
+ cfg.physAddr = -1;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+
+ if (mpt_config(ioc, &cfg) != 0) {
+ rc = 0;
+ goto out;
+ }
+
+ if (!hdr.PageLength) {
+ rc = 0;
+ goto out;
+ }
+
+ buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
+ &dma_handle);
+
+ if (!buffer) {
+ rc = 0;
+ goto out;
+ }
+
+ cfg.physAddr = dma_handle;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+ cfg.pageAddr = phys_disk_num;
+
+ if (mpt_config(ioc, &cfg) != 0) {
+ rc = 0;
+ goto out;
+ }
+
+ rc = buffer->NumPhysDiskPaths;
+ out:
+
+ if (buffer)
+ pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
+ dma_handle);
+
+ return rc;
+}
+EXPORT_SYMBOL(mpt_raid_phys_disk_get_num_paths);
+
+/**
+ * mpt_raid_phys_disk_pg1 - returns phys disk page 1
+ * @ioc: Pointer to a Adapter Structure
+ * @phys_disk_num: io unit unique phys disk num generated by the ioc
+ * @phys_disk: requested payload data returned
+ *
+ * Return:
+ * 0 on success
+ * -EFAULT if read of config page header fails or data pointer not NULL
+ * -ENOMEM if pci_alloc failed
+ **/
+int
+mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, u8 phys_disk_num,
+ RaidPhysDiskPage1_t *phys_disk)
+{
+ CONFIGPARMS cfg;
+ ConfigPageHeader_t hdr;
+ dma_addr_t dma_handle;
+ pRaidPhysDiskPage1_t buffer = NULL;
+ int rc;
+ int i;
+ __le64 sas_address;
+
+ memset(&cfg, 0 , sizeof(CONFIGPARMS));
+ memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
+ rc = 0;
+
+ hdr.PageVersion = MPI_RAIDPHYSDISKPAGE1_PAGEVERSION;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK;
+ hdr.PageNumber = 1;
+ cfg.cfghdr.hdr = &hdr;
+ cfg.physAddr = -1;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+
+ if (mpt_config(ioc, &cfg) != 0) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ if (!hdr.PageLength) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
+ &dma_handle);
+
+ if (!buffer) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ cfg.physAddr = dma_handle;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+ cfg.pageAddr = phys_disk_num;
+
+ if (mpt_config(ioc, &cfg) != 0) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ phys_disk->NumPhysDiskPaths = buffer->NumPhysDiskPaths;
+ phys_disk->PhysDiskNum = phys_disk_num;
+ for (i = 0; i < phys_disk->NumPhysDiskPaths; i++) {
+ phys_disk->Path[i].PhysDiskID = buffer->Path[i].PhysDiskID;
+ phys_disk->Path[i].PhysDiskBus = buffer->Path[i].PhysDiskBus;
+ phys_disk->Path[i].OwnerIdentifier =
+ buffer->Path[i].OwnerIdentifier;
+ phys_disk->Path[i].Flags = le16_to_cpu(buffer->Path[i].Flags);
+ memcpy(&sas_address, &buffer->Path[i].WWID, sizeof(__le64));
+ sas_address = le64_to_cpu(sas_address);
+ memcpy(&phys_disk->Path[i].WWID, &sas_address, sizeof(__le64));
+ memcpy(&sas_address,
+ &buffer->Path[i].OwnerWWID, sizeof(__le64));
+ sas_address = le64_to_cpu(sas_address);
+ memcpy(&phys_disk->Path[i].OwnerWWID,
+ &sas_address, sizeof(__le64));
+ }
+
+ out:
+
+ if (buffer)
+ pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
+ dma_handle);
+
+ return rc;
+}
+EXPORT_SYMBOL(mpt_raid_phys_disk_pg1);
+
+
+/**
* mpt_findImVolumes - Identify IDs of hidden disks and RAID Volumes
* @ioc: Pointer to a Adapter Strucutre
*
@@ -5775,30 +6241,28 @@ mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc)
* SendEventNotification - Send EventNotification (on or off) request to adapter
* @ioc: Pointer to MPT_ADAPTER structure
* @EvSwitch: Event switch flags
+ * @sleepFlag: Specifies whether the process can sleep
*/
static int
-SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch)
+SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch, int sleepFlag)
{
- EventNotification_t *evnp;
+ EventNotification_t evn;
+ MPIDefaultReply_t reply_buf;
- evnp = (EventNotification_t *) mpt_get_msg_frame(mpt_base_index, ioc);
- if (evnp == NULL) {
- devtverboseprintk(ioc, printk(MYIOC_s_WARN_FMT "Unable to allocate event request frame!\n",
- ioc->name));
- return 0;
- }
- memset(evnp, 0, sizeof(*evnp));
-
- devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending EventNotification (%d) request %p\n", ioc->name, EvSwitch, evnp));
+ memset(&evn, 0, sizeof(EventNotification_t));
+ memset(&reply_buf, 0, sizeof(MPIDefaultReply_t));
- evnp->Function = MPI_FUNCTION_EVENT_NOTIFICATION;
- evnp->ChainOffset = 0;
- evnp->MsgFlags = 0;
- evnp->Switch = EvSwitch;
+ evn.Function = MPI_FUNCTION_EVENT_NOTIFICATION;
+ evn.Switch = EvSwitch;
+ evn.MsgContext = cpu_to_le32(mpt_base_index << 16);
- mpt_put_msg_frame(mpt_base_index, ioc, (MPT_FRAME_HDR *)evnp);
+ devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Sending EventNotification (%d) request %p\n",
+ ioc->name, EvSwitch, &evn));
- return 0;
+ return mpt_handshake_req_reply_wait(ioc, sizeof(EventNotification_t),
+ (u32 *)&evn, sizeof(MPIDefaultReply_t), (u16 *)&reply_buf, 30,
+ sleepFlag);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -5814,7 +6278,7 @@ SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp)
if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n",
- ioc->name,__func__));
+ ioc->name, __func__));
return -1;
}
@@ -5851,12 +6315,19 @@ int
mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
{
Config_t *pReq;
+ ConfigReply_t *pReply;
ConfigExtendedPageHeader_t *pExtHdr = NULL;
MPT_FRAME_HDR *mf;
- unsigned long flags;
- int ii, rc;
+ int ii;
int flagsLength;
- int in_isr;
+ long timeout;
+ int ret;
+ u8 page_type = 0, extend_page;
+ unsigned long timeleft;
+ unsigned long flags;
+ int in_isr;
+ u8 issue_hard_reset = 0;
+ u8 retry_count = 0;
/* Prevent calling wait_event() (below), if caller happens
* to be in ISR context, because that is fatal!
@@ -5866,15 +6337,43 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
dcprintk(ioc, printk(MYIOC_s_WARN_FMT "Config request not allowed in ISR context!\n",
ioc->name));
return -EPERM;
+ }
+
+ /* don't send a config page during diag reset */
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ if (ioc->ioc_reset_in_progress) {
+ dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: busy with host reset\n", ioc->name, __func__));
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ return -EBUSY;
+ }
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+
+ /* don't send if no chance of success */
+ if (!ioc->active ||
+ mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_OPERATIONAL) {
+ dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: ioc not operational, %d, %xh\n",
+ ioc->name, __func__, ioc->active,
+ mpt_GetIocState(ioc, 0)));
+ return -EFAULT;
}
+ retry_config:
+ mutex_lock(&ioc->mptbase_cmds.mutex);
+ /* init the internal cmd struct */
+ memset(ioc->mptbase_cmds.reply, 0 , MPT_DEFAULT_FRAME_SIZE);
+ INITIALIZE_MGMT_STATUS(ioc->mptbase_cmds.status)
+
/* Get and Populate a free Frame
*/
if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
- dcprintk(ioc, printk(MYIOC_s_WARN_FMT "mpt_config: no msg frames!\n",
- ioc->name));
- return -EAGAIN;
+ dcprintk(ioc, printk(MYIOC_s_WARN_FMT
+ "mpt_config: no msg frames!\n", ioc->name));
+ ret = -EAGAIN;
+ goto out;
}
+
pReq = (Config_t *)mf;
pReq->Action = pCfg->action;
pReq->Reserved = 0;
@@ -5900,7 +6399,9 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
pReq->ExtPageType = pExtHdr->ExtPageType;
pReq->Header.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
- /* Page Length must be treated as a reserved field for the extended header. */
+ /* Page Length must be treated as a reserved field for the
+ * extended header.
+ */
pReq->Header.PageLength = 0;
}
@@ -5913,78 +6414,91 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
else
flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ;
- if ((pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK) == MPI_CONFIG_PAGETYPE_EXTENDED) {
+ if ((pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK) ==
+ MPI_CONFIG_PAGETYPE_EXTENDED) {
flagsLength |= pExtHdr->ExtPageLength * 4;
-
- dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending Config request type %d, page %d and action %d\n",
- ioc->name, pReq->ExtPageType, pReq->Header.PageNumber, pReq->Action));
- }
- else {
+ page_type = pReq->ExtPageType;
+ extend_page = 1;
+ } else {
flagsLength |= pCfg->cfghdr.hdr->PageLength * 4;
-
- dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending Config request type %d, page %d and action %d\n",
- ioc->name, pReq->Header.PageType, pReq->Header.PageNumber, pReq->Action));
+ page_type = pReq->Header.PageType;
+ extend_page = 0;
}
- mpt_add_sge((char *)&pReq->PageBufferSGE, flagsLength, pCfg->physAddr);
-
- /* Append pCfg pointer to end of mf
- */
- *((void **) (((u8 *) mf) + (ioc->req_sz - sizeof(void *)))) = (void *) pCfg;
-
- /* Initalize the timer
- */
- init_timer_on_stack(&pCfg->timer);
- pCfg->timer.data = (unsigned long) ioc;
- pCfg->timer.function = mpt_timer_expired;
- pCfg->wait_done = 0;
-
- /* Set the timer; ensure 10 second minimum */
- if (pCfg->timeout < 10)
- pCfg->timer.expires = jiffies + HZ*10;
- else
- pCfg->timer.expires = jiffies + HZ*pCfg->timeout;
-
- /* Add to end of Q, set timer and then issue this command */
- spin_lock_irqsave(&ioc->FreeQlock, flags);
- list_add_tail(&pCfg->linkage, &ioc->configQ);
- spin_unlock_irqrestore(&ioc->FreeQlock, flags);
+ dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Sending Config request type 0x%x, page 0x%x and action %d\n",
+ ioc->name, page_type, pReq->Header.PageNumber, pReq->Action));
- add_timer(&pCfg->timer);
+ ioc->add_sge((char *)&pReq->PageBufferSGE, flagsLength, pCfg->physAddr);
+ timeout = (pCfg->timeout < 15) ? HZ*15 : HZ*pCfg->timeout;
mpt_put_msg_frame(mpt_base_index, ioc, mf);
- wait_event(mpt_waitq, pCfg->wait_done);
+ timeleft = wait_for_completion_timeout(&ioc->mptbase_cmds.done,
+ timeout);
+ if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+ ret = -ETIME;
+ dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Failed Sending Config request type 0x%x, page 0x%x,"
+ " action %d, status %xh, time left %ld\n\n",
+ ioc->name, page_type, pReq->Header.PageNumber,
+ pReq->Action, ioc->mptbase_cmds.status, timeleft));
+ if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
+ goto out;
+ if (!timeleft)
+ issue_hard_reset = 1;
+ goto out;
+ }
- /* mf has been freed - do not access */
+ if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
+ ret = -1;
+ goto out;
+ }
+ pReply = (ConfigReply_t *)ioc->mptbase_cmds.reply;
+ ret = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
+ if (ret == MPI_IOCSTATUS_SUCCESS) {
+ if (extend_page) {
+ pCfg->cfghdr.ehdr->ExtPageLength =
+ le16_to_cpu(pReply->ExtPageLength);
+ pCfg->cfghdr.ehdr->ExtPageType =
+ pReply->ExtPageType;
+ }
+ pCfg->cfghdr.hdr->PageVersion = pReply->Header.PageVersion;
+ pCfg->cfghdr.hdr->PageLength = pReply->Header.PageLength;
+ pCfg->cfghdr.hdr->PageNumber = pReply->Header.PageNumber;
+ pCfg->cfghdr.hdr->PageType = pReply->Header.PageType;
- rc = pCfg->status;
+ }
- return rc;
-}
+ if (retry_count)
+ printk(MYIOC_s_INFO_FMT "Retry completed "
+ "ret=0x%x timeleft=%ld\n",
+ ioc->name, ret, timeleft);
-/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/**
- * mpt_timer_expired - Callback for timer process.
- * Used only internal config functionality.
- * @data: Pointer to MPT_SCSI_HOST recast as an unsigned long
- */
-static void
-mpt_timer_expired(unsigned long data)
-{
- MPT_ADAPTER *ioc = (MPT_ADAPTER *) data;
-
- dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_timer_expired! \n", ioc->name));
+ dcprintk(ioc, printk(KERN_DEBUG "IOCStatus=%04xh, IOCLogInfo=%08xh\n",
+ ret, le32_to_cpu(pReply->IOCLogInfo)));
- /* Perform a FW reload */
- if (mpt_HardResetHandler(ioc, NO_SLEEP) < 0)
- printk(MYIOC_s_WARN_FMT "Firmware Reload FAILED!\n", ioc->name);
+out:
- /* No more processing.
- * Hard reset clean-up will wake up
- * process and free all resources.
- */
- dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_timer_expired complete!\n", ioc->name));
+ CLEAR_MGMT_STATUS(ioc->mptbase_cmds.status)
+ mutex_unlock(&ioc->mptbase_cmds.mutex);
+ if (issue_hard_reset) {
+ issue_hard_reset = 0;
+ printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
+ ioc->name, __func__);
+ mpt_HardResetHandler(ioc, CAN_SLEEP);
+ mpt_free_msg_frame(ioc, mf);
+ /* attempt one retry for a timed out command */
+ if (!retry_count) {
+ printk(MYIOC_s_INFO_FMT
+ "Attempting Retry Config request"
+ " type 0x%x, page 0x%x,"
+ " action %d\n", ioc->name, page_type,
+ pCfg->cfghdr.hdr->PageNumber, pCfg->action);
+ retry_count++;
+ goto retry_config;
+ }
+ }
+ return ret;
- return;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -5998,41 +6512,34 @@ mpt_timer_expired(unsigned long data)
static int
mpt_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
{
- CONFIGPARMS *pCfg;
- unsigned long flags;
-
- dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
- ": IOC %s_reset routed to MPT base driver!\n",
- ioc->name, reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
- reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
-
- if (reset_phase == MPT_IOC_SETUP_RESET) {
- ;
- } else if (reset_phase == MPT_IOC_PRE_RESET) {
- /* If the internal config Q is not empty -
- * delete timer. MF resources will be freed when
- * the FIFO's are primed.
- */
- spin_lock_irqsave(&ioc->FreeQlock, flags);
- list_for_each_entry(pCfg, &ioc->configQ, linkage)
- del_timer(&pCfg->timer);
- spin_unlock_irqrestore(&ioc->FreeQlock, flags);
-
- } else {
- CONFIGPARMS *pNext;
-
- /* Search the configQ for internal commands.
- * Flush the Q, and wake up all suspended threads.
- */
- spin_lock_irqsave(&ioc->FreeQlock, flags);
- list_for_each_entry_safe(pCfg, pNext, &ioc->configQ, linkage) {
- list_del(&pCfg->linkage);
-
- pCfg->status = MPT_CONFIG_ERROR;
- pCfg->wait_done = 1;
- wake_up(&mpt_waitq);
+ switch (reset_phase) {
+ case MPT_IOC_SETUP_RESET:
+ ioc->taskmgmt_quiesce_io = 1;
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
+ break;
+ case MPT_IOC_PRE_RESET:
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
+ break;
+ case MPT_IOC_POST_RESET:
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
+/* wake up mptbase_cmds */
+ if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_PENDING) {
+ ioc->mptbase_cmds.status |=
+ MPT_MGMT_STATUS_DID_IOCRESET;
+ complete(&ioc->mptbase_cmds.done);
}
- spin_unlock_irqrestore(&ioc->FreeQlock, flags);
+/* wake up taskmgmt_cmds */
+ if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
+ ioc->taskmgmt_cmds.status |=
+ MPT_MGMT_STATUS_DID_IOCRESET;
+ complete(&ioc->taskmgmt_cmds.done);
+ }
+ break;
+ default:
+ break;
}
return 1; /* currently means nothing really */
@@ -6344,6 +6851,59 @@ mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buffer, int *size, int len, int sh
*size = y;
}
+/**
+ * mpt_set_taskmgmt_in_progress_flag - set flags associated with task managment
+ * @ioc: Pointer to MPT_ADAPTER structure
+ *
+ * Returns 0 for SUCCESS or -1 if FAILED.
+ *
+ * If -1 is return, then it was not possible to set the flags
+ **/
+int
+mpt_set_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc)
+{
+ unsigned long flags;
+ int retval;
+
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ if (ioc->ioc_reset_in_progress || ioc->taskmgmt_in_progress ||
+ (ioc->alt_ioc && ioc->alt_ioc->taskmgmt_in_progress)) {
+ retval = -1;
+ goto out;
+ }
+ retval = 0;
+ ioc->taskmgmt_in_progress = 1;
+ ioc->taskmgmt_quiesce_io = 1;
+ if (ioc->alt_ioc) {
+ ioc->alt_ioc->taskmgmt_in_progress = 1;
+ ioc->alt_ioc->taskmgmt_quiesce_io = 1;
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ return retval;
+}
+EXPORT_SYMBOL(mpt_set_taskmgmt_in_progress_flag);
+
+/**
+ * mpt_clear_taskmgmt_in_progress_flag - clear flags associated with task managment
+ * @ioc: Pointer to MPT_ADAPTER structure
+ *
+ **/
+void
+mpt_clear_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ ioc->taskmgmt_in_progress = 0;
+ ioc->taskmgmt_quiesce_io = 0;
+ if (ioc->alt_ioc) {
+ ioc->alt_ioc->taskmgmt_in_progress = 0;
+ ioc->alt_ioc->taskmgmt_quiesce_io = 0;
+ }
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+}
+EXPORT_SYMBOL(mpt_clear_taskmgmt_in_progress_flag);
/**
@@ -6397,7 +6957,9 @@ int
mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
{
int rc;
+ u8 cb_idx;
unsigned long flags;
+ unsigned long time_count;
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HardResetHandler Entered!\n", ioc->name));
#ifdef MFCNT
@@ -6410,14 +6972,15 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
/* Reset the adapter. Prevent more than 1 call to
* mpt_do_ioc_recovery at any instant in time.
*/
- spin_lock_irqsave(&ioc->diagLock, flags);
- if ((ioc->diagPending) || (ioc->alt_ioc && ioc->alt_ioc->diagPending)){
- spin_unlock_irqrestore(&ioc->diagLock, flags);
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ if (ioc->ioc_reset_in_progress) {
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
return 0;
- } else {
- ioc->diagPending = 1;
}
- spin_unlock_irqrestore(&ioc->diagLock, flags);
+ ioc->ioc_reset_in_progress = 1;
+ if (ioc->alt_ioc)
+ ioc->alt_ioc->ioc_reset_in_progress = 1;
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
/* FIXME: If do_ioc_recovery fails, repeat....
*/
@@ -6427,47 +6990,57 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
* Prevents timeouts occurring during a diagnostic reset...very bad.
* For all other protocol drivers, this is a no-op.
*/
- {
- u8 cb_idx;
- int r = 0;
-
- for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
- if (MptResetHandlers[cb_idx]) {
- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling IOC reset_setup handler #%d\n",
- ioc->name, cb_idx));
- r += mpt_signal_reset(cb_idx, ioc, MPT_IOC_SETUP_RESET);
- if (ioc->alt_ioc) {
- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling alt-%s setup reset handler #%d\n",
- ioc->name, ioc->alt_ioc->name, cb_idx));
- r += mpt_signal_reset(cb_idx, ioc->alt_ioc, MPT_IOC_SETUP_RESET);
- }
- }
+ for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
+ if (MptResetHandlers[cb_idx]) {
+ mpt_signal_reset(cb_idx, ioc, MPT_IOC_SETUP_RESET);
+ if (ioc->alt_ioc)
+ mpt_signal_reset(cb_idx, ioc->alt_ioc,
+ MPT_IOC_SETUP_RESET);
}
}
- if ((rc = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_RECOVER, sleepFlag)) != 0) {
- printk(MYIOC_s_WARN_FMT "Cannot recover rc = %d!\n", ioc->name, rc);
+ time_count = jiffies;
+ rc = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_RECOVER, sleepFlag);
+ if (rc != 0) {
+ printk(KERN_WARNING MYNAM
+ ": WARNING - (%d) Cannot recover %s\n", rc, ioc->name);
+ } else {
+ if (ioc->hard_resets < -1)
+ ioc->hard_resets++;
}
- ioc->reload_fw = 0;
- if (ioc->alt_ioc)
- ioc->alt_ioc->reload_fw = 0;
- spin_lock_irqsave(&ioc->diagLock, flags);
- ioc->diagPending = 0;
- if (ioc->alt_ioc)
- ioc->alt_ioc->diagPending = 0;
- spin_unlock_irqrestore(&ioc->diagLock, flags);
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ ioc->ioc_reset_in_progress = 0;
+ ioc->taskmgmt_quiesce_io = 0;
+ ioc->taskmgmt_in_progress = 0;
+ if (ioc->alt_ioc) {
+ ioc->alt_ioc->ioc_reset_in_progress = 0;
+ ioc->alt_ioc->taskmgmt_quiesce_io = 0;
+ ioc->alt_ioc->taskmgmt_in_progress = 0;
+ }
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HardResetHandler rc = %d!\n", ioc->name, rc));
+ dtmprintk(ioc,
+ printk(MYIOC_s_DEBUG_FMT
+ "HardResetHandler: completed (%d seconds): %s\n", ioc->name,
+ jiffies_to_msecs(jiffies - time_count)/1000, ((rc == 0) ?
+ "SUCCESS" : "FAILED")));
return rc;
}
-/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+#ifdef CONFIG_FUSION_LOGGING
static void
-EventDescriptionStr(u8 event, u32 evData0, char *evStr)
+mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply)
{
char *ds = NULL;
+ u32 evData0;
+ int ii;
+ u8 event;
+ char *evStr = ioc->evStr;
+
+ event = le32_to_cpu(pEventReply->Event) & 0xFF;
+ evData0 = le32_to_cpu(pEventReply->Data[0]);
switch(event) {
case MPI_EVENT_NONE:
@@ -6501,9 +7074,9 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LIP)
ds = "Loop State(LIP) Change";
else if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LPE)
- ds = "Loop State(LPE) Change"; /* ??? */
+ ds = "Loop State(LPE) Change";
else
- ds = "Loop State(LPB) Change"; /* ??? */
+ ds = "Loop State(LPB) Change";
break;
case MPI_EVENT_LOGOUT:
ds = "Logout";
@@ -6703,28 +7276,65 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
}
case MPI_EVENT_IR2:
{
+ u8 id = (u8)(evData0);
+ u8 channel = (u8)(evData0 >> 8);
+ u8 phys_num = (u8)(evData0 >> 24);
u8 ReasonCode = (u8)(evData0 >> 16);
+
switch (ReasonCode) {
case MPI_EVENT_IR2_RC_LD_STATE_CHANGED:
- ds = "IR2: LD State Changed";
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "IR2: LD State Changed: "
+ "id=%d channel=%d phys_num=%d",
+ id, channel, phys_num);
break;
case MPI_EVENT_IR2_RC_PD_STATE_CHANGED:
- ds = "IR2: PD State Changed";
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "IR2: PD State Changed "
+ "id=%d channel=%d phys_num=%d",
+ id, channel, phys_num);
break;
case MPI_EVENT_IR2_RC_BAD_BLOCK_TABLE_FULL:
- ds = "IR2: Bad Block Table Full";
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "IR2: Bad Block Table Full: "
+ "id=%d channel=%d phys_num=%d",
+ id, channel, phys_num);
break;
case MPI_EVENT_IR2_RC_PD_INSERTED:
- ds = "IR2: PD Inserted";
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "IR2: PD Inserted: "
+ "id=%d channel=%d phys_num=%d",
+ id, channel, phys_num);
break;
case MPI_EVENT_IR2_RC_PD_REMOVED:
- ds = "IR2: PD Removed";
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "IR2: PD Removed: "
+ "id=%d channel=%d phys_num=%d",
+ id, channel, phys_num);
break;
case MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED:
- ds = "IR2: Foreign CFG Detected";
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "IR2: Foreign CFG Detected: "
+ "id=%d channel=%d phys_num=%d",
+ id, channel, phys_num);
break;
case MPI_EVENT_IR2_RC_REBUILD_MEDIUM_ERROR:
- ds = "IR2: Rebuild Medium Error";
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "IR2: Rebuild Medium Error: "
+ "id=%d channel=%d phys_num=%d",
+ id, channel, phys_num);
+ break;
+ case MPI_EVENT_IR2_RC_DUAL_PORT_ADDED:
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "IR2: Dual Port Added: "
+ "id=%d channel=%d phys_num=%d",
+ id, channel, phys_num);
+ break;
+ case MPI_EVENT_IR2_RC_DUAL_PORT_REMOVED:
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "IR2: Dual Port Removed: "
+ "id=%d channel=%d phys_num=%d",
+ id, channel, phys_num);
break;
default:
ds = "IR2";
@@ -6760,13 +7370,18 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
case MPI_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
{
u8 reason = (u8)(evData0);
- u8 port_num = (u8)(evData0 >> 8);
- u16 handle = le16_to_cpu(evData0 >> 16);
- snprintf(evStr, EVENT_DESCR_STR_SZ,
- "SAS Initiator Device Status Change: reason=0x%02x "
- "port=%d handle=0x%04x",
- reason, port_num, handle);
+ switch (reason) {
+ case MPI_EVENT_SAS_INIT_RC_ADDED:
+ ds = "SAS Initiator Status Change: Added";
+ break;
+ case MPI_EVENT_SAS_INIT_RC_REMOVED:
+ ds = "SAS Initiator Status Change: Deleted";
+ break;
+ default:
+ ds = "SAS Initiator Status Change";
+ break;
+ }
break;
}
@@ -6814,6 +7429,24 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
break;
}
+ case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
+ {
+ u8 reason = (u8)(evData0);
+
+ switch (reason) {
+ case MPI_EVENT_SAS_EXP_RC_ADDED:
+ ds = "Expander Status Change: Added";
+ break;
+ case MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING:
+ ds = "Expander Status Change: Deleted";
+ break;
+ default:
+ ds = "Expander Status Change";
+ break;
+ }
+ break;
+ }
+
/*
* MPT base "custom" events may be added here...
*/
@@ -6823,8 +7456,20 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
}
if (ds)
strncpy(evStr, ds, EVENT_DESCR_STR_SZ);
-}
+
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "MPT event:(%02Xh) : %s\n",
+ ioc->name, event, evStr));
+
+ devtverboseprintk(ioc, printk(KERN_DEBUG MYNAM
+ ": Event data:\n"));
+ for (ii = 0; ii < le16_to_cpu(pEventReply->EventDataLength); ii++)
+ devtverboseprintk(ioc, printk(" %08x",
+ le32_to_cpu(pEventReply->Data[ii])));
+ devtverboseprintk(ioc, printk(KERN_DEBUG "\n"));
+}
+#endif
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* ProcessEventNotification - Route EventNotificationReply to all event handlers
@@ -6841,37 +7486,24 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
{
u16 evDataLen;
u32 evData0 = 0;
-// u32 evCtx;
int ii;
u8 cb_idx;
int r = 0;
int handlers = 0;
- char evStr[EVENT_DESCR_STR_SZ];
u8 event;
/*
* Do platform normalization of values
*/
event = le32_to_cpu(pEventReply->Event) & 0xFF;
-// evCtx = le32_to_cpu(pEventReply->EventContext);
evDataLen = le16_to_cpu(pEventReply->EventDataLength);
if (evDataLen) {
evData0 = le32_to_cpu(pEventReply->Data[0]);
}
- EventDescriptionStr(event, evData0, evStr);
- devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MPT event:(%02Xh) : %s\n",
- ioc->name,
- event,
- evStr));
-
#ifdef CONFIG_FUSION_LOGGING
- devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
- ": Event data:\n", ioc->name));
- for (ii = 0; ii < evDataLen; ii++)
- devtverboseprintk(ioc, printk(" %08x",
- le32_to_cpu(pEventReply->Data[ii])));
- devtverboseprintk(ioc, printk("\n"));
+ if (evDataLen)
+ mpt_display_event_info(ioc, pEventReply);
#endif
/*
@@ -6926,8 +7558,9 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
*/
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
if (MptEvHandlers[cb_idx]) {
- devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Routing Event to event handler #%d\n",
- ioc->name, cb_idx));
+ devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Routing Event to event handler #%d\n",
+ ioc->name, cb_idx));
r += (*(MptEvHandlers[cb_idx]))(ioc, pEventReply);
handlers++;
}
@@ -7011,8 +7644,6 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info)
switch (info) {
case 0x00010000:
desc = "bug! MID not found";
- if (ioc->reload_fw == 0)
- ioc->reload_fw++;
break;
case 0x00020000:
@@ -7613,7 +8244,6 @@ EXPORT_SYMBOL(mpt_get_msg_frame);
EXPORT_SYMBOL(mpt_put_msg_frame);
EXPORT_SYMBOL(mpt_put_msg_frame_hi_pri);
EXPORT_SYMBOL(mpt_free_msg_frame);
-EXPORT_SYMBOL(mpt_add_sge);
EXPORT_SYMBOL(mpt_send_handshake_request);
EXPORT_SYMBOL(mpt_verify_adapter);
EXPORT_SYMBOL(mpt_GetIocState);
@@ -7650,7 +8280,7 @@ fusion_init(void)
/* Register ourselves (mptbase) in order to facilitate
* EventNotification handling.
*/
- mpt_base_index = mpt_register(mpt_base_reply, MPTBASE_DRIVER);
+ mpt_base_index = mpt_register(mptbase_reply, MPTBASE_DRIVER);
/* Register for hard reset handling callbacks.
*/
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index b3e981d2a50..1c8514dc31c 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -76,8 +76,8 @@
#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR
#endif
-#define MPT_LINUX_VERSION_COMMON "3.04.07"
-#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.07"
+#define MPT_LINUX_VERSION_COMMON "3.04.10"
+#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.09"
#define WHAT_MAGIC_STRING "@" "(" "#" ")"
#define show_mptmod_ver(s,ver) \
@@ -104,6 +104,7 @@
#endif
#define MPT_NAME_LENGTH 32
+#define MPT_KOBJ_NAME_LEN 20
#define MPT_PROCFS_MPTBASEDIR "mpt"
/* chg it to "driver/fusion" ? */
@@ -134,6 +135,7 @@
#define MPT_COALESCING_TIMEOUT 0x10
+
/*
* SCSI transfer rate defines.
*/
@@ -161,10 +163,10 @@
/*
* Set the MAX_SGE value based on user input.
*/
-#ifdef CONFIG_FUSION_MAX_SGE
-#if CONFIG_FUSION_MAX_SGE < 16
+#ifdef CONFIG_FUSION_MAX_SGE
+#if CONFIG_FUSION_MAX_SGE < 16
#define MPT_SCSI_SG_DEPTH 16
-#elif CONFIG_FUSION_MAX_SGE > 128
+#elif CONFIG_FUSION_MAX_SGE > 128
#define MPT_SCSI_SG_DEPTH 128
#else
#define MPT_SCSI_SG_DEPTH CONFIG_FUSION_MAX_SGE
@@ -173,6 +175,18 @@
#define MPT_SCSI_SG_DEPTH 40
#endif
+#ifdef CONFIG_FUSION_MAX_FC_SGE
+#if CONFIG_FUSION_MAX_FC_SGE < 16
+#define MPT_SCSI_FC_SG_DEPTH 16
+#elif CONFIG_FUSION_MAX_FC_SGE > 256
+#define MPT_SCSI_FC_SG_DEPTH 256
+#else
+#define MPT_SCSI_FC_SG_DEPTH CONFIG_FUSION_MAX_FC_SGE
+#endif
+#else
+#define MPT_SCSI_FC_SG_DEPTH 40
+#endif
+
/* debug print string length used for events and iocstatus */
# define EVENT_DESCR_STR_SZ 100
@@ -431,38 +445,36 @@ do { \
* IOCTL structure and associated defines
*/
-#define MPT_IOCTL_STATUS_DID_IOCRESET 0x01 /* IOC Reset occurred on the current*/
-#define MPT_IOCTL_STATUS_RF_VALID 0x02 /* The Reply Frame is VALID */
-#define MPT_IOCTL_STATUS_TIMER_ACTIVE 0x04 /* The timer is running */
-#define MPT_IOCTL_STATUS_SENSE_VALID 0x08 /* Sense data is valid */
-#define MPT_IOCTL_STATUS_COMMAND_GOOD 0x10 /* Command Status GOOD */
-#define MPT_IOCTL_STATUS_TMTIMER_ACTIVE 0x20 /* The TM timer is running */
-#define MPT_IOCTL_STATUS_TM_FAILED 0x40 /* User TM request failed */
-
#define MPTCTL_RESET_OK 0x01 /* Issue Bus Reset */
-typedef struct _MPT_IOCTL {
- struct _MPT_ADAPTER *ioc;
- u8 ReplyFrame[MPT_DEFAULT_FRAME_SIZE]; /* reply frame data */
- u8 sense[MPT_SENSE_BUFFER_ALLOC];
- int wait_done; /* wake-up value for this ioc */
- u8 rsvd;
- u8 status; /* current command status */
- u8 reset; /* 1 if bus reset allowed */
- u8 id; /* target for reset */
- struct mutex ioctl_mutex;
-} MPT_IOCTL;
-
-#define MPT_SAS_MGMT_STATUS_RF_VALID 0x02 /* The Reply Frame is VALID */
-#define MPT_SAS_MGMT_STATUS_COMMAND_GOOD 0x10 /* Command Status GOOD */
-#define MPT_SAS_MGMT_STATUS_TM_FAILED 0x40 /* User TM request failed */
-
-typedef struct _MPT_SAS_MGMT {
+#define MPT_MGMT_STATUS_RF_VALID 0x01 /* The Reply Frame is VALID */
+#define MPT_MGMT_STATUS_COMMAND_GOOD 0x02 /* Command Status GOOD */
+#define MPT_MGMT_STATUS_PENDING 0x04 /* command is pending */
+#define MPT_MGMT_STATUS_DID_IOCRESET 0x08 /* IOC Reset occurred
+ on the current*/
+#define MPT_MGMT_STATUS_SENSE_VALID 0x10 /* valid sense info */
+#define MPT_MGMT_STATUS_TIMER_ACTIVE 0x20 /* obsolete */
+#define MPT_MGMT_STATUS_FREE_MF 0x40 /* free the mf from
+ complete routine */
+
+#define INITIALIZE_MGMT_STATUS(status) \
+ status = MPT_MGMT_STATUS_PENDING;
+#define CLEAR_MGMT_STATUS(status) \
+ status = 0;
+#define CLEAR_MGMT_PENDING_STATUS(status) \
+ status &= ~MPT_MGMT_STATUS_PENDING;
+#define SET_MGMT_MSG_CONTEXT(msg_context, value) \
+ msg_context = value;
+
+typedef struct _MPT_MGMT {
struct mutex mutex;
struct completion done;
u8 reply[MPT_DEFAULT_FRAME_SIZE]; /* reply frame data */
+ u8 sense[MPT_SENSE_BUFFER_ALLOC];
u8 status; /* current command status */
-}MPT_SAS_MGMT;
+ int completion_code;
+ u32 msg_context;
+} MPT_MGMT;
/*
* Event Structure and define
@@ -564,6 +576,10 @@ struct mptfc_rport_info
u8 flags;
};
+typedef void (*MPT_ADD_SGE)(void *pAddr, u32 flagslength, dma_addr_t dma_addr);
+typedef void (*MPT_ADD_CHAIN)(void *pAddr, u8 next, u16 length,
+ dma_addr_t dma_addr);
+
/*
* Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS
*/
@@ -573,6 +589,10 @@ typedef struct _MPT_ADAPTER
int pci_irq; /* This irq */
char name[MPT_NAME_LENGTH]; /* "iocN" */
char prod_name[MPT_NAME_LENGTH]; /* "LSIFC9x9" */
+#ifdef CONFIG_FUSION_LOGGING
+ /* used in mpt_display_event_info */
+ char evStr[EVENT_DESCR_STR_SZ];
+#endif
char board_name[16];
char board_assembly[16];
char board_tracer[16];
@@ -600,6 +620,10 @@ typedef struct _MPT_ADAPTER
int reply_depth; /* Num Allocated reply frames */
int reply_sz; /* Reply frame size */
int num_chain; /* Number of chain buffers */
+ MPT_ADD_SGE add_sge; /* Pointer to add_sge
+ function */
+ MPT_ADD_CHAIN add_chain; /* Pointer to add_chain
+ function */
/* Pool of buffers for chaining. ReqToChain
* and ChainToChain track index of chain buffers.
* ChainBuffer (DMA) virt/phys addresses.
@@ -640,11 +664,8 @@ typedef struct _MPT_ADAPTER
RaidCfgData raid_data; /* Raid config. data */
SasCfgData sas_data; /* Sas config. data */
FcCfgData fc_data; /* Fc config. data */
- MPT_IOCTL *ioctl; /* ioctl data pointer */
struct proc_dir_entry *ioc_dentry;
struct _MPT_ADAPTER *alt_ioc; /* ptr to 929 bound adapter port */
- spinlock_t diagLock; /* diagnostic reset lock */
- int diagPending;
u32 biosVersion; /* BIOS version from IO Unit Page 2 */
int eventTypes; /* Event logging parameters */
int eventContext; /* Next event context */
@@ -652,7 +673,6 @@ typedef struct _MPT_ADAPTER
struct _mpt_ioctl_events *events; /* pointer to event log */
u8 *cached_fw; /* Pointer to FW */
dma_addr_t cached_fw_dma;
- struct list_head configQ; /* linked list of config. requests */
int hs_reply_idx;
#ifndef MFCNT
u32 pad0;
@@ -665,9 +685,6 @@ typedef struct _MPT_ADAPTER
IOCFactsReply_t facts;
PortFactsReply_t pfacts[2];
FCPortPage0_t fc_port_page0[2];
- struct timer_list persist_timer; /* persist table timer */
- int persist_wait_done; /* persist completion flag */
- u8 persist_reply_frame[MPT_DEFAULT_FRAME_SIZE]; /* persist reply */
LANPage0_t lan_cnfg_page0;
LANPage1_t lan_cnfg_page1;
@@ -682,23 +699,44 @@ typedef struct _MPT_ADAPTER
int aen_event_read_flag; /* flag to indicate event log was read*/
u8 FirstWhoInit;
u8 upload_fw; /* If set, do a fw upload */
- u8 reload_fw; /* Force a FW Reload on next reset */
u8 NBShiftFactor; /* NB Shift Factor based on Block Size (Facts) */
u8 pad1[4];
u8 DoneCtx;
u8 TaskCtx;
u8 InternalCtx;
- spinlock_t initializing_hba_lock;
- int initializing_hba_lock_flag;
struct list_head list;
struct net_device *netdev;
struct list_head sas_topology;
struct mutex sas_topology_mutex;
+
+ struct workqueue_struct *fw_event_q;
+ struct list_head fw_event_list;
+ spinlock_t fw_event_lock;
+ u8 fw_events_off; /* if '1', then ignore events */
+ char fw_event_q_name[MPT_KOBJ_NAME_LEN];
+
struct mutex sas_discovery_mutex;
u8 sas_discovery_runtime;
u8 sas_discovery_ignore_events;
+
+ /* port_info object for the host */
+ struct mptsas_portinfo *hba_port_info;
+ u64 hba_port_sas_addr;
+ u16 hba_port_num_phy;
+ struct list_head sas_device_info_list;
+ struct mutex sas_device_info_mutex;
+ u8 old_sas_discovery_protocal;
+ u8 sas_discovery_quiesce_io;
int sas_index; /* index refrencing */
- MPT_SAS_MGMT sas_mgmt;
+ MPT_MGMT sas_mgmt;
+ MPT_MGMT mptbase_cmds; /* for sending config pages */
+ MPT_MGMT internal_cmds;
+ MPT_MGMT taskmgmt_cmds;
+ MPT_MGMT ioctl_cmds;
+ spinlock_t taskmgmt_lock; /* diagnostic reset lock */
+ int taskmgmt_in_progress;
+ u8 taskmgmt_quiesce_io;
+ u8 ioc_reset_in_progress;
struct work_struct sas_persist_task;
struct work_struct fc_setup_reset_work;
@@ -707,15 +745,27 @@ typedef struct _MPT_ADAPTER
u8 fc_link_speed[2];
spinlock_t fc_rescan_work_lock;
struct work_struct fc_rescan_work;
- char fc_rescan_work_q_name[20];
+ char fc_rescan_work_q_name[MPT_KOBJ_NAME_LEN];
struct workqueue_struct *fc_rescan_work_q;
+
+ /* driver forced bus resets count */
+ unsigned long hard_resets;
+ /* fw/external bus resets count */
+ unsigned long soft_resets;
+ /* cmd timeouts */
+ unsigned long timeouts;
+
struct scsi_cmnd **ScsiLookup;
spinlock_t scsi_lookup_lock;
-
- char reset_work_q_name[20];
+ u64 dma_mask;
+ u32 broadcast_aen_busy;
+ char reset_work_q_name[MPT_KOBJ_NAME_LEN];
struct workqueue_struct *reset_work_q;
struct delayed_work fault_reset_work;
- spinlock_t fault_reset_work_lock;
+
+ u8 sg_addr_size;
+ u8 in_rescan;
+ u8 SGE_size;
} MPT_ADAPTER;
@@ -753,13 +803,14 @@ typedef struct _mpt_sge {
dma_addr_t Address;
} MptSge_t;
-#define mpt_addr_size() \
- ((sizeof(dma_addr_t) == sizeof(u64)) ? MPI_SGE_FLAGS_64_BIT_ADDRESSING : \
- MPI_SGE_FLAGS_32_BIT_ADDRESSING)
-#define mpt_msg_flags() \
- ((sizeof(dma_addr_t) == sizeof(u64)) ? MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_64 : \
- MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_32)
+#define mpt_msg_flags(ioc) \
+ (ioc->sg_addr_size == sizeof(u64)) ? \
+ MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_64 : \
+ MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_32
+
+#define MPT_SGE_FLAGS_64_BIT_ADDRESSING \
+ (MPI_SGE_FLAGS_64_BIT_ADDRESSING << MPI_SGE_FLAGS_SHIFT)
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
@@ -835,22 +886,14 @@ typedef struct _MPT_SCSI_HOST {
/* Pool of memory for holding SCpnts before doing
* OS callbacks. freeQ is the free pool.
*/
- u8 tmPending;
- u8 resetPending;
u8 negoNvram; /* DV disabled, nego NVRAM */
u8 pad1;
- u8 tmState;
u8 rsvd[2];
MPT_FRAME_HDR *cmdPtr; /* Ptr to nonOS request */
struct scsi_cmnd *abortSCpnt;
MPT_LOCAL_REPLY localReply; /* internal cmd reply struct */
- unsigned long hard_resets; /* driver forced bus resets count */
- unsigned long soft_resets; /* fw/external bus resets count */
- unsigned long timeouts; /* cmd timeouts */
ushort sel_timeout[MPT_MAX_FC_DEVICES];
char *info_kbuf;
- wait_queue_head_t scandv_waitq;
- int scandv_wait_done;
long last_queue_full;
u16 tm_iocstatus;
u16 spi_pending;
@@ -870,21 +913,16 @@ struct scsi_cmnd;
* Generic structure passed to the base mpt_config function.
*/
typedef struct _x_config_parms {
- struct list_head linkage; /* linked list */
- struct timer_list timer; /* timer function for this request */
union {
ConfigExtendedPageHeader_t *ehdr;
ConfigPageHeader_t *hdr;
} cfghdr;
dma_addr_t physAddr;
- int wait_done; /* wait for this request */
u32 pageAddr; /* properly formatted */
+ u16 status;
u8 action;
u8 dir;
u8 timeout; /* seconds */
- u8 pad1;
- u16 status;
- u16 pad2;
} CONFIGPARMS;
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -909,7 +947,6 @@ extern MPT_FRAME_HDR *mpt_get_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc);
extern void mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
extern void mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
extern void mpt_put_msg_frame_hi_pri(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
-extern void mpt_add_sge(char *pAddr, u32 flagslength, dma_addr_t dma_addr);
extern int mpt_send_handshake_request(u8 cb_idx, MPT_ADAPTER *ioc, int reqBytes, u32 *req, int sleepFlag);
extern int mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp);
@@ -922,6 +959,12 @@ extern void mpt_free_fw_memory(MPT_ADAPTER *ioc);
extern int mpt_findImVolumes(MPT_ADAPTER *ioc);
extern int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode);
extern int mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t phys_disk);
+extern int mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, u8 phys_disk_num,
+ pRaidPhysDiskPage1_t phys_disk);
+extern int mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc,
+ u8 phys_disk_num);
+extern int mpt_set_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc);
+extern void mpt_clear_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc);
extern void mpt_halt_firmware(MPT_ADAPTER *ioc);
@@ -959,7 +1002,6 @@ extern int mpt_fwfault_debug;
#define MPT_SGE_FLAGS_END_OF_BUFFER (0x40000000)
#define MPT_SGE_FLAGS_LOCAL_ADDRESS (0x08000000)
#define MPT_SGE_FLAGS_DIRECTION (0x04000000)
-#define MPT_SGE_FLAGS_ADDRESSING (mpt_addr_size() << MPI_SGE_FLAGS_SHIFT)
#define MPT_SGE_FLAGS_END_OF_LIST (0x01000000)
#define MPT_SGE_FLAGS_TRANSACTION_ELEMENT (0x00000000)
@@ -972,14 +1014,12 @@ extern int mpt_fwfault_debug;
MPT_SGE_FLAGS_END_OF_BUFFER | \
MPT_SGE_FLAGS_END_OF_LIST | \
MPT_SGE_FLAGS_SIMPLE_ELEMENT | \
- MPT_SGE_FLAGS_ADDRESSING | \
MPT_TRANSFER_IOC_TO_HOST)
#define MPT_SGE_FLAGS_SSIMPLE_WRITE \
(MPT_SGE_FLAGS_LAST_ELEMENT | \
MPT_SGE_FLAGS_END_OF_BUFFER | \
MPT_SGE_FLAGS_END_OF_LIST | \
MPT_SGE_FLAGS_SIMPLE_ELEMENT | \
- MPT_SGE_FLAGS_ADDRESSING | \
MPT_TRANSFER_HOST_TO_IOC)
/*}-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index c63817117c0..9b2e2198aee 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -84,6 +84,7 @@ MODULE_VERSION(my_VERSION);
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static u8 mptctl_id = MPT_MAX_PROTOCOL_DRIVERS;
+static u8 mptctl_taskmgmt_id = MPT_MAX_PROTOCOL_DRIVERS;
static DECLARE_WAIT_QUEUE_HEAD ( mptctl_wait );
@@ -127,10 +128,7 @@ static MptSge_t *kbuf_alloc_2_sgl(int bytes, u32 dir, int sge_offset, int *frags
struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc);
static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma,
struct buflist *buflist, MPT_ADAPTER *ioc);
-static void mptctl_timeout_expired (MPT_IOCTL *ioctl);
-static int mptctl_bus_reset(MPT_IOCTL *ioctl);
-static int mptctl_set_tm_flags(MPT_SCSI_HOST *hd);
-static void mptctl_free_tm_flags(MPT_ADAPTER *ioc);
+static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function);
/*
* Reset Handler cleanup function
@@ -183,10 +181,10 @@ mptctl_syscall_down(MPT_ADAPTER *ioc, int nonblock)
int rc = 0;
if (nonblock) {
- if (!mutex_trylock(&ioc->ioctl->ioctl_mutex))
+ if (!mutex_trylock(&ioc->ioctl_cmds.mutex))
rc = -EAGAIN;
} else {
- if (mutex_lock_interruptible(&ioc->ioctl->ioctl_mutex))
+ if (mutex_lock_interruptible(&ioc->ioctl_cmds.mutex))
rc = -ERESTARTSYS;
}
return rc;
@@ -202,99 +200,78 @@ mptctl_syscall_down(MPT_ADAPTER *ioc, int nonblock)
static int
mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
{
- char *sense_data;
- int sz, req_index;
- u16 iocStatus;
- u8 cmd;
+ char *sense_data;
+ int req_index;
+ int sz;
- if (req)
- cmd = req->u.hdr.Function;
- else
- return 1;
- dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\tcompleting mpi function (0x%02X), req=%p, "
- "reply=%p\n", ioc->name, req->u.hdr.Function, req, reply));
-
- if (ioc->ioctl) {
-
- if (reply==NULL) {
-
- dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_reply() NULL Reply "
- "Function=%x!\n", ioc->name, cmd));
+ if (!req)
+ return 0;
- ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD;
- ioc->ioctl->reset &= ~MPTCTL_RESET_OK;
+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "completing mpi function "
+ "(0x%02X), req=%p, reply=%p\n", ioc->name, req->u.hdr.Function,
+ req, reply));
- /* We are done, issue wake up
- */
- ioc->ioctl->wait_done = 1;
- wake_up (&mptctl_wait);
- return 1;
+ /*
+ * Handling continuation of the same reply. Processing the first
+ * reply, and eating the other replys that come later.
+ */
+ if (ioc->ioctl_cmds.msg_context != req->u.hdr.MsgContext)
+ goto out_continuation;
- }
+ ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
- /* Copy the reply frame (which much exist
- * for non-SCSI I/O) to the IOC structure.
- */
- memcpy(ioc->ioctl->ReplyFrame, reply,
- min(ioc->reply_sz, 4*reply->u.reply.MsgLength));
- ioc->ioctl->status |= MPT_IOCTL_STATUS_RF_VALID;
+ if (!reply)
+ goto out;
- /* Set the command status to GOOD if IOC Status is GOOD
- * OR if SCSI I/O cmd and data underrun or recovered error.
- */
- iocStatus = le16_to_cpu(reply->u.reply.IOCStatus) & MPI_IOCSTATUS_MASK;
- if (iocStatus == MPI_IOCSTATUS_SUCCESS)
- ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD;
-
- if (iocStatus || reply->u.reply.IOCLogInfo)
- dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\tiocstatus (0x%04X), "
- "loginfo (0x%08X)\n", ioc->name,
- iocStatus,
- le32_to_cpu(reply->u.reply.IOCLogInfo)));
-
- if ((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) ||
- (cmd == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
-
- if (reply->u.sreply.SCSIStatus || reply->u.sreply.SCSIState)
- dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
- "\tscsi_status (0x%02x), scsi_state (0x%02x), "
- "tag = (0x%04x), transfer_count (0x%08x)\n", ioc->name,
- reply->u.sreply.SCSIStatus,
- reply->u.sreply.SCSIState,
- le16_to_cpu(reply->u.sreply.TaskTag),
- le32_to_cpu(reply->u.sreply.TransferCount)));
-
- ioc->ioctl->reset &= ~MPTCTL_RESET_OK;
-
- if ((iocStatus == MPI_IOCSTATUS_SCSI_DATA_UNDERRUN) ||
- (iocStatus == MPI_IOCSTATUS_SCSI_RECOVERED_ERROR)) {
- ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD;
- }
- }
+ ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
+ sz = min(ioc->reply_sz, 4*reply->u.reply.MsgLength);
+ memcpy(ioc->ioctl_cmds.reply, reply, sz);
- /* Copy the sense data - if present
- */
- if ((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) &&
- (reply->u.sreply.SCSIState &
- MPI_SCSI_STATE_AUTOSENSE_VALID)){
+ if (reply->u.reply.IOCStatus || reply->u.reply.IOCLogInfo)
+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "iocstatus (0x%04X), loginfo (0x%08X)\n", ioc->name,
+ le16_to_cpu(reply->u.reply.IOCStatus),
+ le32_to_cpu(reply->u.reply.IOCLogInfo)));
+
+ if ((req->u.hdr.Function == MPI_FUNCTION_SCSI_IO_REQUEST) ||
+ (req->u.hdr.Function ==
+ MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
+
+ if (reply->u.sreply.SCSIStatus || reply->u.sreply.SCSIState)
+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "scsi_status (0x%02x), scsi_state (0x%02x), "
+ "tag = (0x%04x), transfer_count (0x%08x)\n", ioc->name,
+ reply->u.sreply.SCSIStatus,
+ reply->u.sreply.SCSIState,
+ le16_to_cpu(reply->u.sreply.TaskTag),
+ le32_to_cpu(reply->u.sreply.TransferCount)));
+
+ if (reply->u.sreply.SCSIState &
+ MPI_SCSI_STATE_AUTOSENSE_VALID) {
sz = req->u.scsireq.SenseBufferLength;
req_index =
le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx);
- sense_data =
- ((u8 *)ioc->sense_buf_pool +
+ sense_data = ((u8 *)ioc->sense_buf_pool +
(req_index * MPT_SENSE_BUFFER_ALLOC));
- memcpy(ioc->ioctl->sense, sense_data, sz);
- ioc->ioctl->status |= MPT_IOCTL_STATUS_SENSE_VALID;
+ memcpy(ioc->ioctl_cmds.sense, sense_data, sz);
+ ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_SENSE_VALID;
}
+ }
- if (cmd == MPI_FUNCTION_SCSI_TASK_MGMT)
- mptctl_free_tm_flags(ioc);
-
- /* We are done, issue wake up
- */
- ioc->ioctl->wait_done = 1;
- wake_up (&mptctl_wait);
+ out:
+ /* We are done, issue wake up
+ */
+ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_PENDING) {
+ if (req->u.hdr.Function == MPI_FUNCTION_SCSI_TASK_MGMT)
+ mpt_clear_taskmgmt_in_progress_flag(ioc);
+ ioc->ioctl_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
+ complete(&ioc->ioctl_cmds.done);
}
+
+ out_continuation:
+ if (reply && (reply->u.reply.MsgFlags &
+ MPI_MSGFLAGS_CONTINUATION_REPLY))
+ return 0;
return 1;
}
@@ -304,30 +281,66 @@ mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
* Expecting an interrupt, however timed out.
*
*/
-static void mptctl_timeout_expired (MPT_IOCTL *ioctl)
+static void
+mptctl_timeout_expired(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
{
- int rc = 1;
+ unsigned long flags;
- if (ioctl == NULL)
- return;
- dctlprintk(ioctl->ioc,
- printk(MYIOC_s_DEBUG_FMT ": Timeout Expired! Host %d\n",
- ioctl->ioc->name, ioctl->ioc->id));
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": %s\n",
+ ioc->name, __func__));
- ioctl->wait_done = 0;
- if (ioctl->reset & MPTCTL_RESET_OK)
- rc = mptctl_bus_reset(ioctl);
+ if (mpt_fwfault_debug)
+ mpt_halt_firmware(ioc);
- if (rc) {
- /* Issue a reset for this device.
- * The IOC is not responding.
- */
- dctlprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT "Calling HardReset! \n",
- ioctl->ioc->name));
- mpt_HardResetHandler(ioctl->ioc, CAN_SLEEP);
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ if (ioc->ioc_reset_in_progress) {
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
+ mpt_free_msg_frame(ioc, mf);
+ return;
}
- return;
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+
+ if (!mptctl_bus_reset(ioc, mf->u.hdr.Function))
+ return;
+
+ /* Issue a reset for this device.
+ * The IOC is not responding.
+ */
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling HardReset! \n",
+ ioc->name));
+ CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
+ mpt_HardResetHandler(ioc, CAN_SLEEP);
+ mpt_free_msg_frame(ioc, mf);
+}
+
+static int
+mptctl_taskmgmt_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
+{
+ if (!mf)
+ return 0;
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "TaskMgmt completed (mf=%p, mr=%p)\n",
+ ioc->name, mf, mr));
+
+ ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
+
+ if (!mr)
+ goto out;
+
+ ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
+ memcpy(ioc->taskmgmt_cmds.reply, mr,
+ min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength));
+ out:
+ if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
+ mpt_clear_taskmgmt_in_progress_flag(ioc);
+ ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
+ complete(&ioc->taskmgmt_cmds.done);
+ return 1;
+ }
+ return 0;
}
/* mptctl_bus_reset
@@ -335,133 +348,150 @@ static void mptctl_timeout_expired (MPT_IOCTL *ioctl)
* Bus reset code.
*
*/
-static int mptctl_bus_reset(MPT_IOCTL *ioctl)
+static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
{
MPT_FRAME_HDR *mf;
SCSITaskMgmt_t *pScsiTm;
- MPT_SCSI_HOST *hd;
+ SCSITaskMgmtReply_t *pScsiTmReply;
int ii;
- int retval=0;
-
-
- ioctl->reset &= ~MPTCTL_RESET_OK;
-
- if (ioctl->ioc->sh == NULL)
+ int retval;
+ unsigned long timeout;
+ unsigned long time_count;
+ u16 iocstatus;
+
+ /* bus reset is only good for SCSI IO, RAID PASSTHRU */
+ if (!(function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) ||
+ (function == MPI_FUNCTION_SCSI_IO_REQUEST)) {
+ dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
+ "TaskMgmt, not SCSI_IO!!\n", ioc->name));
return -EPERM;
+ }
- hd = shost_priv(ioctl->ioc->sh);
- if (hd == NULL)
+ mutex_lock(&ioc->taskmgmt_cmds.mutex);
+ if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
+ mutex_unlock(&ioc->taskmgmt_cmds.mutex);
return -EPERM;
+ }
- /* Single threading ....
- */
- if (mptctl_set_tm_flags(hd) != 0)
- return -EPERM;
+ retval = 0;
/* Send request
*/
- if ((mf = mpt_get_msg_frame(mptctl_id, ioctl->ioc)) == NULL) {
- dtmprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt, no msg frames!!\n",
- ioctl->ioc->name));
-
- mptctl_free_tm_flags(ioctl->ioc);
- return -ENOMEM;
+ mf = mpt_get_msg_frame(mptctl_taskmgmt_id, ioc);
+ if (mf == NULL) {
+ dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
+ "TaskMgmt, no msg frames!!\n", ioc->name));
+ mpt_clear_taskmgmt_in_progress_flag(ioc);
+ retval = -ENOMEM;
+ goto mptctl_bus_reset_done;
}
- dtmprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt request @ %p\n",
- ioctl->ioc->name, mf));
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n",
+ ioc->name, mf));
pScsiTm = (SCSITaskMgmt_t *) mf;
- pScsiTm->TargetID = ioctl->id;
- pScsiTm->Bus = hd->port; /* 0 */
- pScsiTm->ChainOffset = 0;
+ memset(pScsiTm, 0, sizeof(SCSITaskMgmt_t));
pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
- pScsiTm->Reserved = 0;
pScsiTm->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS;
- pScsiTm->Reserved1 = 0;
pScsiTm->MsgFlags = MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION;
-
+ pScsiTm->TargetID = 0;
+ pScsiTm->Bus = 0;
+ pScsiTm->ChainOffset = 0;
+ pScsiTm->Reserved = 0;
+ pScsiTm->Reserved1 = 0;
+ pScsiTm->TaskMsgContext = 0;
for (ii= 0; ii < 8; ii++)
pScsiTm->LUN[ii] = 0;
-
for (ii=0; ii < 7; ii++)
pScsiTm->Reserved2[ii] = 0;
- pScsiTm->TaskMsgContext = 0;
- dtmprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT
- "mptctl_bus_reset: issued.\n", ioctl->ioc->name));
-
- DBG_DUMP_TM_REQUEST_FRAME(ioctl->ioc, (u32 *)mf);
+ switch (ioc->bus_type) {
+ case FC:
+ timeout = 40;
+ break;
+ case SAS:
+ timeout = 30;
+ break;
+ case SPI:
+ default:
+ timeout = 2;
+ break;
+ }
- ioctl->wait_done=0;
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "TaskMgmt type=%d timeout=%ld\n",
+ ioc->name, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, timeout));
- if ((ioctl->ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
- (ioctl->ioc->facts.MsgVersion >= MPI_VERSION_01_05))
- mpt_put_msg_frame_hi_pri(mptctl_id, ioctl->ioc, mf);
+ INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
+ CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
+ time_count = jiffies;
+ if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
+ (ioc->facts.MsgVersion >= MPI_VERSION_01_05))
+ mpt_put_msg_frame_hi_pri(mptctl_taskmgmt_id, ioc, mf);
else {
- retval = mpt_send_handshake_request(mptctl_id, ioctl->ioc,
- sizeof(SCSITaskMgmt_t), (u32*)pScsiTm, CAN_SLEEP);
+ retval = mpt_send_handshake_request(mptctl_taskmgmt_id, ioc,
+ sizeof(SCSITaskMgmt_t), (u32 *)pScsiTm, CAN_SLEEP);
if (retval != 0) {
- dfailprintk(ioctl->ioc, printk(MYIOC_s_ERR_FMT "_send_handshake FAILED!"
- " (hd %p, ioc %p, mf %p) \n", hd->ioc->name, hd,
- hd->ioc, mf));
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "TaskMgmt send_handshake FAILED!"
+ " (ioc %p, mf %p, rc=%d) \n", ioc->name,
+ ioc, mf, retval));
+ mpt_clear_taskmgmt_in_progress_flag(ioc);
goto mptctl_bus_reset_done;
}
}
/* Now wait for the command to complete */
- ii = wait_event_timeout(mptctl_wait,
- ioctl->wait_done == 1,
- HZ*5 /* 5 second timeout */);
+ ii = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done, timeout*HZ);
+ if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "TaskMgmt failed\n", ioc->name));
+ mpt_free_msg_frame(ioc, mf);
+ mpt_clear_taskmgmt_in_progress_flag(ioc);
+ if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
+ retval = 0;
+ else
+ retval = -1; /* return failure */
+ goto mptctl_bus_reset_done;
+ }
- if(ii <=0 && (ioctl->wait_done != 1 )) {
- mpt_free_msg_frame(hd->ioc, mf);
- ioctl->wait_done = 0;
+ if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "TaskMgmt failed\n", ioc->name));
+ retval = -1; /* return failure */
+ goto mptctl_bus_reset_done;
+ }
+
+ pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply;
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "TaskMgmt fw_channel = %d, fw_id = %d, task_type=0x%02X, "
+ "iocstatus=0x%04X\n\tloginfo=0x%08X, response_code=0x%02X, "
+ "term_cmnds=%d\n", ioc->name, pScsiTmReply->Bus,
+ pScsiTmReply->TargetID, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
+ le16_to_cpu(pScsiTmReply->IOCStatus),
+ le32_to_cpu(pScsiTmReply->IOCLogInfo),
+ pScsiTmReply->ResponseCode,
+ le32_to_cpu(pScsiTmReply->TerminationCount)));
+
+ iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK;
+
+ if (iocstatus == MPI_IOCSTATUS_SCSI_TASK_TERMINATED ||
+ iocstatus == MPI_IOCSTATUS_SCSI_IOC_TERMINATED ||
+ iocstatus == MPI_IOCSTATUS_SUCCESS)
+ retval = 0;
+ else {
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "TaskMgmt failed\n", ioc->name));
retval = -1; /* return failure */
}
-mptctl_bus_reset_done:
- mptctl_free_tm_flags(ioctl->ioc);
+ mptctl_bus_reset_done:
+ mutex_unlock(&ioc->taskmgmt_cmds.mutex);
+ CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
return retval;
}
-static int
-mptctl_set_tm_flags(MPT_SCSI_HOST *hd) {
- unsigned long flags;
-
- spin_lock_irqsave(&hd->ioc->FreeQlock, flags);
-
- if (hd->tmState == TM_STATE_NONE) {
- hd->tmState = TM_STATE_IN_PROGRESS;
- hd->tmPending = 1;
- spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
- } else {
- spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
- return -EBUSY;
- }
-
- return 0;
-}
-
-static void
-mptctl_free_tm_flags(MPT_ADAPTER *ioc)
-{
- MPT_SCSI_HOST * hd;
- unsigned long flags;
-
- hd = shost_priv(ioc->sh);
- if (hd == NULL)
- return;
-
- spin_lock_irqsave(&ioc->FreeQlock, flags);
-
- hd->tmState = TM_STATE_NONE;
- hd->tmPending = 0;
- spin_unlock_irqrestore(&ioc->FreeQlock, flags);
-
- return;
-}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/* mptctl_ioc_reset
@@ -473,22 +503,23 @@ mptctl_free_tm_flags(MPT_ADAPTER *ioc)
static int
mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
{
- MPT_IOCTL *ioctl = ioc->ioctl;
- dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IOC %s_reset routed to IOCTL driver!\n", ioc->name,
- reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
- reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
-
- if(ioctl == NULL)
- return 1;
-
switch(reset_phase) {
case MPT_IOC_SETUP_RESET:
- ioctl->status |= MPT_IOCTL_STATUS_DID_IOCRESET;
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
+ break;
+ case MPT_IOC_PRE_RESET:
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
break;
case MPT_IOC_POST_RESET:
- ioctl->status &= ~MPT_IOCTL_STATUS_DID_IOCRESET;
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
+ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_PENDING) {
+ ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_DID_IOCRESET;
+ complete(&ioc->ioctl_cmds.done);
+ }
break;
- case MPT_IOC_PRE_RESET:
default:
break;
}
@@ -642,7 +673,7 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
else
ret = -EINVAL;
- mutex_unlock(&iocp->ioctl->ioctl_mutex);
+ mutex_unlock(&iocp->ioctl_cmds.mutex);
return ret;
}
@@ -758,6 +789,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
int sge_offset = 0;
u16 iocstat;
pFWDownloadReply_t ReplyMsg = NULL;
+ unsigned long timeleft;
if (mpt_verify_adapter(ioc, &iocp) < 0) {
printk(KERN_DEBUG MYNAM "ioctl_fwdl - ioc%d not found!\n",
@@ -841,8 +873,9 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
* 96 8
* 64 4
*/
- maxfrags = (iocp->req_sz - sizeof(MPIHeader_t) - sizeof(FWDownloadTCSGE_t))
- / (sizeof(dma_addr_t) + sizeof(u32));
+ maxfrags = (iocp->req_sz - sizeof(MPIHeader_t) -
+ sizeof(FWDownloadTCSGE_t))
+ / iocp->SGE_size;
if (numfrags > maxfrags) {
ret = -EMLINK;
goto fwdl_out;
@@ -870,7 +903,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
if (nib == 0 || nib == 3) {
;
} else if (sgIn->Address) {
- mpt_add_sge(sgOut, sgIn->FlagsLength, sgIn->Address);
+ iocp->add_sge(sgOut, sgIn->FlagsLength, sgIn->Address);
n++;
if (copy_from_user(bl->kptr, ufwbuf+fw_bytes_copied, bl->len)) {
printk(MYIOC_s_ERR_FMT "%s@%d::_ioctl_fwdl - "
@@ -882,7 +915,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
}
sgIn++;
bl++;
- sgOut += (sizeof(dma_addr_t) + sizeof(u32));
+ sgOut += iocp->SGE_size;
}
DBG_DUMP_FW_DOWNLOAD(iocp, (u32 *)mf, numfrags);
@@ -891,16 +924,30 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
* Finally, perform firmware download.
*/
ReplyMsg = NULL;
+ SET_MGMT_MSG_CONTEXT(iocp->ioctl_cmds.msg_context, dlmsg->MsgContext);
+ INITIALIZE_MGMT_STATUS(iocp->ioctl_cmds.status)
mpt_put_msg_frame(mptctl_id, iocp, mf);
/* Now wait for the command to complete */
- ret = wait_event_timeout(mptctl_wait,
- iocp->ioctl->wait_done == 1,
- HZ*60);
+retry_wait:
+ timeleft = wait_for_completion_timeout(&iocp->ioctl_cmds.done, HZ*60);
+ if (!(iocp->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+ ret = -ETIME;
+ printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __func__);
+ if (iocp->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
+ mpt_free_msg_frame(iocp, mf);
+ goto fwdl_out;
+ }
+ if (!timeleft)
+ mptctl_timeout_expired(iocp, mf);
+ else
+ goto retry_wait;
+ goto fwdl_out;
+ }
- if(ret <=0 && (iocp->ioctl->wait_done != 1 )) {
- /* Now we need to reset the board */
- mptctl_timeout_expired(iocp->ioctl);
+ if (!(iocp->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
+ printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __func__);
+ mpt_free_msg_frame(iocp, mf);
ret = -ENODATA;
goto fwdl_out;
}
@@ -908,7 +955,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
if (sgl)
kfree_sgl(sgl, sgl_dma, buflist, iocp);
- ReplyMsg = (pFWDownloadReply_t)iocp->ioctl->ReplyFrame;
+ ReplyMsg = (pFWDownloadReply_t)iocp->ioctl_cmds.reply;
iocstat = le16_to_cpu(ReplyMsg->IOCStatus) & MPI_IOCSTATUS_MASK;
if (iocstat == MPI_IOCSTATUS_SUCCESS) {
printk(MYIOC_s_INFO_FMT "F/W update successfull!\n", iocp->name);
@@ -932,6 +979,9 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
return 0;
fwdl_out:
+
+ CLEAR_MGMT_STATUS(iocp->ioctl_cmds.status);
+ SET_MGMT_MSG_CONTEXT(iocp->ioctl_cmds.msg_context, 0);
kfree_sgl(sgl, sgl_dma, buflist, iocp);
return ret;
}
@@ -1003,7 +1053,7 @@ kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags,
*
*/
sgl = sglbuf;
- sg_spill = ((ioc->req_sz - sge_offset)/(sizeof(dma_addr_t) + sizeof(u32))) - 1;
+ sg_spill = ((ioc->req_sz - sge_offset)/ioc->SGE_size) - 1;
while (bytes_allocd < bytes) {
this_alloc = min(alloc_sz, bytes-bytes_allocd);
buflist[buflist_ent].len = this_alloc;
@@ -1024,8 +1074,9 @@ kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags,
dma_addr_t dma_addr;
bytes_allocd += this_alloc;
- sgl->FlagsLength = (0x10000000|MPT_SGE_FLAGS_ADDRESSING|sgdir|this_alloc);
- dma_addr = pci_map_single(ioc->pcidev, buflist[buflist_ent].kptr, this_alloc, dir);
+ sgl->FlagsLength = (0x10000000|sgdir|this_alloc);
+ dma_addr = pci_map_single(ioc->pcidev,
+ buflist[buflist_ent].kptr, this_alloc, dir);
sgl->Address = dma_addr;
fragcnt++;
@@ -1771,7 +1822,10 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
int msgContext;
u16 req_idx;
ulong timeout;
+ unsigned long timeleft;
struct scsi_device *sdev;
+ unsigned long flags;
+ u8 function;
/* bufIn and bufOut are used for user to kernel space transfers
*/
@@ -1784,24 +1838,23 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
__FILE__, __LINE__, iocnum);
return -ENODEV;
}
- if (!ioc->ioctl) {
- printk(KERN_ERR MYNAM "%s@%d::mptctl_do_mpt_command - "
- "No memory available during driver init.\n",
- __FILE__, __LINE__);
- return -ENOMEM;
- } else if (ioc->ioctl->status & MPT_IOCTL_STATUS_DID_IOCRESET) {
+
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ if (ioc->ioc_reset_in_progress) {
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
printk(KERN_ERR MYNAM "%s@%d::mptctl_do_mpt_command - "
- "Busy with IOC Reset \n", __FILE__, __LINE__);
+ "Busy with diagnostic reset\n", __FILE__, __LINE__);
return -EBUSY;
}
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
/* Verify that the final request frame will not be too large.
*/
sz = karg.dataSgeOffset * 4;
if (karg.dataInSize > 0)
- sz += sizeof(dma_addr_t) + sizeof(u32);
+ sz += ioc->SGE_size;
if (karg.dataOutSize > 0)
- sz += sizeof(dma_addr_t) + sizeof(u32);
+ sz += ioc->SGE_size;
if (sz > ioc->req_sz) {
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
@@ -1827,10 +1880,12 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
"Unable to read MF from mpt_ioctl_command struct @ %p\n",
ioc->name, __FILE__, __LINE__, mfPtr);
+ function = -1;
rc = -EFAULT;
goto done_free_mem;
}
hdr->MsgContext = cpu_to_le32(msgContext);
+ function = hdr->Function;
/* Verify that this request is allowed.
@@ -1838,7 +1893,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sending mpi function (0x%02X), req=%p\n",
ioc->name, hdr->Function, mf));
- switch (hdr->Function) {
+ switch (function) {
case MPI_FUNCTION_IOC_FACTS:
case MPI_FUNCTION_PORT_FACTS:
karg.dataOutSize = karg.dataInSize = 0;
@@ -1893,7 +1948,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
}
pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH;
- pScsiReq->MsgFlags |= mpt_msg_flags();
+ pScsiReq->MsgFlags |= mpt_msg_flags(ioc);
/* verify that app has not requested
@@ -1935,8 +1990,6 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
pScsiReq->Control = cpu_to_le32(scsidir | qtag);
pScsiReq->DataLength = cpu_to_le32(dataSize);
- ioc->ioctl->reset = MPTCTL_RESET_OK;
- ioc->ioctl->id = pScsiReq->TargetID;
} else {
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
@@ -1979,7 +2032,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
int dataSize;
pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH;
- pScsiReq->MsgFlags |= mpt_msg_flags();
+ pScsiReq->MsgFlags |= mpt_msg_flags(ioc);
/* verify that app has not requested
@@ -2014,8 +2067,6 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
pScsiReq->Control = cpu_to_le32(scsidir | qtag);
pScsiReq->DataLength = cpu_to_le32(dataSize);
- ioc->ioctl->reset = MPTCTL_RESET_OK;
- ioc->ioctl->id = pScsiReq->TargetID;
} else {
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
"SCSI driver is not loaded. \n",
@@ -2026,20 +2077,17 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
break;
case MPI_FUNCTION_SCSI_TASK_MGMT:
- {
- MPT_SCSI_HOST *hd = NULL;
- if ((ioc->sh == NULL) || ((hd = shost_priv(ioc->sh)) == NULL)) {
- printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
- "SCSI driver not loaded or SCSI host not found. \n",
- ioc->name, __FILE__, __LINE__);
- rc = -EFAULT;
- goto done_free_mem;
- } else if (mptctl_set_tm_flags(hd) != 0) {
- rc = -EPERM;
- goto done_free_mem;
- }
- }
+ {
+ SCSITaskMgmt_t *pScsiTm;
+ pScsiTm = (SCSITaskMgmt_t *)mf;
+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "\tTaskType=0x%x MsgFlags=0x%x "
+ "TaskMsgContext=0x%x id=%d channel=%d\n",
+ ioc->name, pScsiTm->TaskType, le32_to_cpu
+ (pScsiTm->TaskMsgContext), pScsiTm->MsgFlags,
+ pScsiTm->TargetID, pScsiTm->Bus));
break;
+ }
case MPI_FUNCTION_IOC_INIT:
{
@@ -2123,8 +2171,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
if (karg.dataInSize > 0) {
flagsLength = ( MPI_SGE_FLAGS_SIMPLE_ELEMENT |
MPI_SGE_FLAGS_END_OF_BUFFER |
- MPI_SGE_FLAGS_DIRECTION |
- mpt_addr_size() )
+ MPI_SGE_FLAGS_DIRECTION)
<< MPI_SGE_FLAGS_SHIFT;
} else {
flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE;
@@ -2141,8 +2188,8 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
/* Set up this SGE.
* Copy to MF and to sglbuf
*/
- mpt_add_sge(psge, flagsLength, dma_addr_out);
- psge += (sizeof(u32) + sizeof(dma_addr_t));
+ ioc->add_sge(psge, flagsLength, dma_addr_out);
+ psge += ioc->SGE_size;
/* Copy user data to kernel space.
*/
@@ -2175,18 +2222,25 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
/* Set up this SGE
* Copy to MF and to sglbuf
*/
- mpt_add_sge(psge, flagsLength, dma_addr_in);
+ ioc->add_sge(psge, flagsLength, dma_addr_in);
}
}
} else {
/* Add a NULL SGE
*/
- mpt_add_sge(psge, flagsLength, (dma_addr_t) -1);
+ ioc->add_sge(psge, flagsLength, (dma_addr_t) -1);
}
- ioc->ioctl->wait_done = 0;
+ SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, hdr->MsgContext);
+ INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status)
if (hdr->Function == MPI_FUNCTION_SCSI_TASK_MGMT) {
+ mutex_lock(&ioc->taskmgmt_cmds.mutex);
+ if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
+ mutex_unlock(&ioc->taskmgmt_cmds.mutex);
+ goto done_free_mem;
+ }
+
DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf);
if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
@@ -2197,10 +2251,11 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
sizeof(SCSITaskMgmt_t), (u32*)mf, CAN_SLEEP);
if (rc != 0) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
- "_send_handshake FAILED! (ioc %p, mf %p)\n",
+ "send_handshake FAILED! (ioc %p, mf %p)\n",
ioc->name, ioc, mf));
- mptctl_free_tm_flags(ioc);
+ mpt_clear_taskmgmt_in_progress_flag(ioc);
rc = -ENODATA;
+ mutex_unlock(&ioc->taskmgmt_cmds.mutex);
goto done_free_mem;
}
}
@@ -2210,36 +2265,47 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
/* Now wait for the command to complete */
timeout = (karg.timeout > 0) ? karg.timeout : MPT_IOCTL_DEFAULT_TIMEOUT;
- timeout = wait_event_timeout(mptctl_wait,
- ioc->ioctl->wait_done == 1,
- HZ*timeout);
-
- if(timeout <=0 && (ioc->ioctl->wait_done != 1 )) {
- /* Now we need to reset the board */
-
- if (hdr->Function == MPI_FUNCTION_SCSI_TASK_MGMT)
- mptctl_free_tm_flags(ioc);
-
- mptctl_timeout_expired(ioc->ioctl);
- rc = -ENODATA;
+retry_wait:
+ timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done,
+ HZ*timeout);
+ if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+ rc = -ETIME;
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "%s: TIMED OUT!\n",
+ ioc->name, __func__));
+ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
+ if (function == MPI_FUNCTION_SCSI_TASK_MGMT)
+ mutex_unlock(&ioc->taskmgmt_cmds.mutex);
+ goto done_free_mem;
+ }
+ if (!timeleft) {
+ if (function == MPI_FUNCTION_SCSI_TASK_MGMT)
+ mutex_unlock(&ioc->taskmgmt_cmds.mutex);
+ mptctl_timeout_expired(ioc, mf);
+ mf = NULL;
+ } else
+ goto retry_wait;
goto done_free_mem;
}
+ if (function == MPI_FUNCTION_SCSI_TASK_MGMT)
+ mutex_unlock(&ioc->taskmgmt_cmds.mutex);
+
+
mf = NULL;
/* If a valid reply frame, copy to the user.
* Offset 2: reply length in U32's
*/
- if (ioc->ioctl->status & MPT_IOCTL_STATUS_RF_VALID) {
+ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID) {
if (karg.maxReplyBytes < ioc->reply_sz) {
- sz = min(karg.maxReplyBytes, 4*ioc->ioctl->ReplyFrame[2]);
+ sz = min(karg.maxReplyBytes,
+ 4*ioc->ioctl_cmds.reply[2]);
} else {
- sz = min(ioc->reply_sz, 4*ioc->ioctl->ReplyFrame[2]);
+ sz = min(ioc->reply_sz, 4*ioc->ioctl_cmds.reply[2]);
}
-
if (sz > 0) {
if (copy_to_user(karg.replyFrameBufPtr,
- &ioc->ioctl->ReplyFrame, sz)){
+ ioc->ioctl_cmds.reply, sz)){
printk(MYIOC_s_ERR_FMT
"%s@%d::mptctl_do_mpt_command - "
"Unable to write out reply frame %p\n",
@@ -2252,10 +2318,11 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
/* If valid sense data, copy to user.
*/
- if (ioc->ioctl->status & MPT_IOCTL_STATUS_SENSE_VALID) {
+ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_SENSE_VALID) {
sz = min(karg.maxSenseBytes, MPT_SENSE_BUFFER_SIZE);
if (sz > 0) {
- if (copy_to_user(karg.senseDataPtr, ioc->ioctl->sense, sz)) {
+ if (copy_to_user(karg.senseDataPtr,
+ ioc->ioctl_cmds.sense, sz)) {
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
"Unable to write sense data to user %p\n",
ioc->name, __FILE__, __LINE__,
@@ -2269,7 +2336,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
/* If the overall status is _GOOD and data in, copy data
* to user.
*/
- if ((ioc->ioctl->status & MPT_IOCTL_STATUS_COMMAND_GOOD) &&
+ if ((ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD) &&
(karg.dataInSize > 0) && (bufIn.kptr)) {
if (copy_to_user(karg.dataInBufPtr,
@@ -2284,9 +2351,8 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
done_free_mem:
- ioc->ioctl->status &= ~(MPT_IOCTL_STATUS_COMMAND_GOOD |
- MPT_IOCTL_STATUS_SENSE_VALID |
- MPT_IOCTL_STATUS_RF_VALID );
+ CLEAR_MGMT_STATUS(ioc->ioctl_cmds.status)
+ SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0);
/* Free the allocated memory.
*/
@@ -2336,6 +2402,8 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
ToolboxIstwiReadWriteRequest_t *IstwiRWRequest;
MPT_FRAME_HDR *mf = NULL;
MPIHeader_t *mpi_hdr;
+ unsigned long timeleft;
+ int retval;
/* Reset long to int. Should affect IA64 and SPARC only
*/
@@ -2466,9 +2534,9 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
if (hd && (cim_rev == 1)) {
- karg.hard_resets = hd->hard_resets;
- karg.soft_resets = hd->soft_resets;
- karg.timeouts = hd->timeouts;
+ karg.hard_resets = ioc->hard_resets;
+ karg.soft_resets = ioc->soft_resets;
+ karg.timeouts = ioc->timeouts;
}
}
@@ -2476,8 +2544,8 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
* Gather ISTWI(Industry Standard Two Wire Interface) Data
*/
if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) {
- dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n",
- ioc->name,__func__));
+ dfailprintk(ioc, printk(MYIOC_s_WARN_FMT
+ "%s, no msg frames!!\n", ioc->name, __func__));
goto out;
}
@@ -2498,22 +2566,29 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma);
if (!pbuf)
goto out;
- mpt_add_sge((char *)&IstwiRWRequest->SGL,
+ ioc->add_sge((char *)&IstwiRWRequest->SGL,
(MPT_SGE_FLAGS_SSIMPLE_READ|4), buf_dma);
- ioc->ioctl->wait_done = 0;
+ retval = 0;
+ SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context,
+ IstwiRWRequest->MsgContext);
+ INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status)
mpt_put_msg_frame(mptctl_id, ioc, mf);
- rc = wait_event_timeout(mptctl_wait,
- ioc->ioctl->wait_done == 1,
- HZ*MPT_IOCTL_DEFAULT_TIMEOUT /* 10 sec */);
-
- if(rc <=0 && (ioc->ioctl->wait_done != 1 )) {
- /*
- * Now we need to reset the board
- */
- mpt_free_msg_frame(ioc, mf);
- mptctl_timeout_expired(ioc->ioctl);
+retry_wait:
+ timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done,
+ HZ*MPT_IOCTL_DEFAULT_TIMEOUT);
+ if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+ retval = -ETIME;
+ printk(MYIOC_s_WARN_FMT "%s: failed\n", ioc->name, __func__);
+ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
+ mpt_free_msg_frame(ioc, mf);
+ goto out;
+ }
+ if (!timeleft)
+ mptctl_timeout_expired(ioc, mf);
+ else
+ goto retry_wait;
goto out;
}
@@ -2526,10 +2601,13 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
* bays have drives in them
* pbuf[3] = Checksum (0x100 = (byte0 + byte2 + byte3)
*/
- if (ioc->ioctl->status & MPT_IOCTL_STATUS_RF_VALID)
+ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID)
karg.rsvd = *(u32 *)pbuf;
out:
+ CLEAR_MGMT_STATUS(ioc->ioctl_cmds.status)
+ SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0);
+
if (pbuf)
pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma);
@@ -2753,7 +2831,7 @@ compat_mptfwxfer_ioctl(struct file *filp, unsigned int cmd,
ret = mptctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen);
- mutex_unlock(&iocp->ioctl->ioctl_mutex);
+ mutex_unlock(&iocp->ioctl_cmds.mutex);
return ret;
}
@@ -2807,7 +2885,7 @@ compat_mpt_command(struct file *filp, unsigned int cmd,
*/
ret = mptctl_do_mpt_command (karg, &uarg->MF);
- mutex_unlock(&iocp->ioctl->ioctl_mutex);
+ mutex_unlock(&iocp->ioctl_cmds.mutex);
return ret;
}
@@ -2859,21 +2937,10 @@ static long compat_mpctl_ioctl(struct file *f, unsigned int cmd, unsigned long a
static int
mptctl_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- MPT_IOCTL *mem;
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
- /*
- * Allocate and inite a MPT_IOCTL structure
- */
- mem = kzalloc(sizeof(MPT_IOCTL), GFP_KERNEL);
- if (!mem) {
- mptctl_remove(pdev);
- return -ENOMEM;
- }
-
- ioc->ioctl = mem;
- ioc->ioctl->ioc = ioc;
- mutex_init(&ioc->ioctl->ioctl_mutex);
+ mutex_init(&ioc->ioctl_cmds.mutex);
+ init_completion(&ioc->ioctl_cmds.done);
return 0;
}
@@ -2887,9 +2954,6 @@ mptctl_probe(struct pci_dev *pdev, const struct pci_device_id *id)
static void
mptctl_remove(struct pci_dev *pdev)
{
- MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
-
- kfree ( ioc->ioctl );
}
static struct mpt_pci_driver mptctl_driver = {
@@ -2929,6 +2993,7 @@ static int __init mptctl_init(void)
goto out_fail;
}
+ mptctl_taskmgmt_id = mpt_register(mptctl_taskmgmt_reply, MPTCTL_DRIVER);
mpt_reset_register(mptctl_id, mptctl_ioc_reset);
mpt_event_register(mptctl_id, mptctl_event_process);
@@ -2953,6 +3018,7 @@ static void mptctl_exit(void)
/* De-register callback handler from base module */
mpt_deregister(mptctl_id);
+ mpt_reset_deregister(mptctl_taskmgmt_id);
mpt_device_driver_deregister(MPTCTL_DRIVER);
diff --git a/drivers/message/fusion/mptdebug.h b/drivers/message/fusion/mptdebug.h
index 510b9f49209..28e47887928 100644
--- a/drivers/message/fusion/mptdebug.h
+++ b/drivers/message/fusion/mptdebug.h
@@ -58,6 +58,7 @@
#define MPT_DEBUG_FC 0x00080000
#define MPT_DEBUG_SAS 0x00100000
#define MPT_DEBUG_SAS_WIDE 0x00200000
+#define MPT_DEBUG_36GB_MEM 0x00400000
/*
* CONFIG_FUSION_LOGGING - enabled in Kconfig
@@ -135,6 +136,8 @@
#define dsaswideprintk(IOC, CMD) \
MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS_WIDE)
+#define d36memprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_36GB_MEM)
/*
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index c3c24fdf9fb..e61df133a59 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -1251,17 +1251,15 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* A slightly different algorithm is required for
* 64bit SGEs.
*/
- scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32));
- if (sizeof(dma_addr_t) == sizeof(u64)) {
+ scale = ioc->req_sz/ioc->SGE_size;
+ if (ioc->sg_addr_size == sizeof(u64)) {
numSGE = (scale - 1) *
(ioc->facts.MaxChainDepth-1) + scale +
- (ioc->req_sz - 60) / (sizeof(dma_addr_t) +
- sizeof(u32));
+ (ioc->req_sz - 60) / ioc->SGE_size;
} else {
numSGE = 1 + (scale - 1) *
(ioc->facts.MaxChainDepth-1) + scale +
- (ioc->req_sz - 64) / (sizeof(dma_addr_t) +
- sizeof(u32));
+ (ioc->req_sz - 64) / ioc->SGE_size;
}
if (numSGE < sh->sg_tablesize) {
@@ -1292,9 +1290,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Clear the TM flags
*/
- hd->tmPending = 0;
- hd->tmState = TM_STATE_NONE;
- hd->resetPending = 0;
hd->abortSCpnt = NULL;
/* Clear the pointer used to store
@@ -1312,8 +1307,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hd->timer.data = (unsigned long) hd;
hd->timer.function = mptscsih_timer_expired;
- init_waitqueue_head(&hd->scandv_waitq);
- hd->scandv_wait_done = 0;
hd->last_queue_full = 0;
sh->transportt = mptfc_transport_template;
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index c2804f26cb4..a9e48e28b1d 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -703,7 +703,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
printk (KERN_ERR "%s: no tx context available: %u\n",
__func__, priv->mpt_txfidx_tail);
- return 1;
+ return NETDEV_TX_BUSY;
}
mf = mpt_get_msg_frame(LanCtx, mpt_dev);
@@ -713,7 +713,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
printk (KERN_ERR "%s: Unable to alloc request frame\n",
__func__);
- return 1;
+ return NETDEV_TX_BUSY;
}
ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index a9019f081b9..20e0b447e8e 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -93,8 +93,37 @@ static u8 mptsasDoneCtx = MPT_MAX_PROTOCOL_DRIVERS;
static u8 mptsasTaskCtx = MPT_MAX_PROTOCOL_DRIVERS;
static u8 mptsasInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; /* Used only for internal commands */
static u8 mptsasMgmtCtx = MPT_MAX_PROTOCOL_DRIVERS;
-
-static void mptsas_hotplug_work(struct work_struct *work);
+static u8 mptsasDeviceResetCtx = MPT_MAX_PROTOCOL_DRIVERS;
+
+static void mptsas_firmware_event_work(struct work_struct *work);
+static void mptsas_send_sas_event(struct fw_event_work *fw_event);
+static void mptsas_send_raid_event(struct fw_event_work *fw_event);
+static void mptsas_send_ir2_event(struct fw_event_work *fw_event);
+static void mptsas_parse_device_info(struct sas_identify *identify,
+ struct mptsas_devinfo *device_info);
+static inline void mptsas_set_rphy(MPT_ADAPTER *ioc,
+ struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy);
+static struct mptsas_phyinfo *mptsas_find_phyinfo_by_sas_address
+ (MPT_ADAPTER *ioc, u64 sas_address);
+static int mptsas_sas_device_pg0(MPT_ADAPTER *ioc,
+ struct mptsas_devinfo *device_info, u32 form, u32 form_specific);
+static int mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc,
+ struct mptsas_enclosure *enclosure, u32 form, u32 form_specific);
+static int mptsas_add_end_device(MPT_ADAPTER *ioc,
+ struct mptsas_phyinfo *phy_info);
+static void mptsas_del_end_device(MPT_ADAPTER *ioc,
+ struct mptsas_phyinfo *phy_info);
+static void mptsas_send_link_status_event(struct fw_event_work *fw_event);
+static struct mptsas_portinfo *mptsas_find_portinfo_by_sas_address
+ (MPT_ADAPTER *ioc, u64 sas_address);
+static void mptsas_expander_delete(MPT_ADAPTER *ioc,
+ struct mptsas_portinfo *port_info, u8 force);
+static void mptsas_send_expander_event(struct fw_event_work *fw_event);
+static void mptsas_not_responding_devices(MPT_ADAPTER *ioc);
+static void mptsas_scan_sas_topology(MPT_ADAPTER *ioc);
+static void mptsas_broadcast_primative_work(struct fw_event_work *fw_event);
+static void mptsas_handle_queue_full_event(struct fw_event_work *fw_event);
+static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id);
static void mptsas_print_phy_data(MPT_ADAPTER *ioc,
MPI_SAS_IO_UNIT0_PHY_DATA *phy_data)
@@ -218,30 +247,125 @@ static void mptsas_print_expander_pg1(MPT_ADAPTER *ioc, SasExpanderPage1_t *pg1)
le16_to_cpu(pg1->AttachedDevHandle)));
}
-static inline MPT_ADAPTER *phy_to_ioc(struct sas_phy *phy)
+/* inhibit sas firmware event handling */
+static void
+mptsas_fw_event_off(MPT_ADAPTER *ioc)
{
- struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
- return ((MPT_SCSI_HOST *)shost->hostdata)->ioc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ ioc->fw_events_off = 1;
+ ioc->sas_discovery_quiesce_io = 0;
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+
}
-static inline MPT_ADAPTER *rphy_to_ioc(struct sas_rphy *rphy)
+/* enable sas firmware event handling */
+static void
+mptsas_fw_event_on(MPT_ADAPTER *ioc)
{
- struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent);
- return ((MPT_SCSI_HOST *)shost->hostdata)->ioc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ ioc->fw_events_off = 0;
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
-static struct mptsas_portinfo *
-mptsas_get_hba_portinfo(MPT_ADAPTER *ioc)
+/* queue a sas firmware event */
+static void
+mptsas_add_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
+ unsigned long delay)
{
- struct list_head *head = &ioc->sas_topology;
- struct mptsas_portinfo *pi = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ list_add_tail(&fw_event->list, &ioc->fw_event_list);
+ INIT_DELAYED_WORK(&fw_event->work, mptsas_firmware_event_work);
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: add (fw_event=0x%p)\n",
+ ioc->name, __func__, fw_event));
+ queue_delayed_work(ioc->fw_event_q, &fw_event->work,
+ delay);
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+}
+
+/* requeue a sas firmware event */
+static void
+mptsas_requeue_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
+ unsigned long delay)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: reschedule task "
+ "(fw_event=0x%p)\n", ioc->name, __func__, fw_event));
+ fw_event->retries++;
+ queue_delayed_work(ioc->fw_event_q, &fw_event->work,
+ msecs_to_jiffies(delay));
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+}
+
+/* free memory assoicated to a sas firmware event */
+static void
+mptsas_free_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: kfree (fw_event=0x%p)\n",
+ ioc->name, __func__, fw_event));
+ list_del(&fw_event->list);
+ kfree(fw_event);
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+}
+
+/* walk the firmware event queue, and either stop or wait for
+ * outstanding events to complete */
+static void
+mptsas_cleanup_fw_event_q(MPT_ADAPTER *ioc)
+{
+ struct fw_event_work *fw_event, *next;
+ struct mptsas_target_reset_event *target_reset_list, *n;
+ u8 flush_q;
+ MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
+
+ /* flush the target_reset_list */
+ if (!list_empty(&hd->target_reset_list)) {
+ list_for_each_entry_safe(target_reset_list, n,
+ &hd->target_reset_list, list) {
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: removing target reset for id=%d\n",
+ ioc->name, __func__,
+ target_reset_list->sas_event_data.TargetID));
+ list_del(&target_reset_list->list);
+ kfree(target_reset_list);
+ }
+ }
+
+ if (list_empty(&ioc->fw_event_list) ||
+ !ioc->fw_event_q || in_interrupt())
+ return;
- /* always the first entry on sas_topology list */
+ flush_q = 0;
+ list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) {
+ if (cancel_delayed_work(&fw_event->work))
+ mptsas_free_fw_event(ioc, fw_event);
+ else
+ flush_q = 1;
+ }
+ if (flush_q)
+ flush_workqueue(ioc->fw_event_q);
+}
- if (!list_empty(head))
- pi = list_entry(head->next, struct mptsas_portinfo, list);
- return pi;
+static inline MPT_ADAPTER *phy_to_ioc(struct sas_phy *phy)
+{
+ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+ return ((MPT_SCSI_HOST *)shost->hostdata)->ioc;
+}
+
+static inline MPT_ADAPTER *rphy_to_ioc(struct sas_rphy *rphy)
+{
+ struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent);
+ return ((MPT_SCSI_HOST *)shost->hostdata)->ioc;
}
/*
@@ -265,6 +389,38 @@ mptsas_find_portinfo_by_handle(MPT_ADAPTER *ioc, u16 handle)
return rc;
}
+/**
+ * mptsas_find_portinfo_by_sas_address -
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @handle:
+ *
+ * This function should be called with the sas_topology_mutex already held
+ *
+ **/
+static struct mptsas_portinfo *
+mptsas_find_portinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
+{
+ struct mptsas_portinfo *port_info, *rc = NULL;
+ int i;
+
+ if (sas_address >= ioc->hba_port_sas_addr &&
+ sas_address < (ioc->hba_port_sas_addr +
+ ioc->hba_port_num_phy))
+ return ioc->hba_port_info;
+
+ mutex_lock(&ioc->sas_topology_mutex);
+ list_for_each_entry(port_info, &ioc->sas_topology, list)
+ for (i = 0; i < port_info->num_phys; i++)
+ if (port_info->phy_info[i].identify.sas_address ==
+ sas_address) {
+ rc = port_info;
+ goto out;
+ }
+ out:
+ mutex_unlock(&ioc->sas_topology_mutex);
+ return rc;
+}
+
/*
* Returns true if there is a scsi end device
*/
@@ -308,6 +464,7 @@ mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_detai
if(phy_info->port_details != port_details)
continue;
memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo));
+ mptsas_set_rphy(ioc, phy_info, NULL);
phy_info->port_details = NULL;
}
kfree(port_details);
@@ -379,6 +536,285 @@ starget)
phy_info->port_details->starget = starget;
}
+/**
+ * mptsas_add_device_component -
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @channel: fw mapped id's
+ * @id:
+ * @sas_address:
+ * @device_info:
+ *
+ **/
+static void
+mptsas_add_device_component(MPT_ADAPTER *ioc, u8 channel, u8 id,
+ u64 sas_address, u32 device_info, u16 slot, u64 enclosure_logical_id)
+{
+ struct mptsas_device_info *sas_info, *next;
+ struct scsi_device *sdev;
+ struct scsi_target *starget;
+ struct sas_rphy *rphy;
+
+ /*
+ * Delete all matching devices out of the list
+ */
+ mutex_lock(&ioc->sas_device_info_mutex);
+ list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
+ list) {
+ if (!sas_info->is_logical_volume &&
+ (sas_info->sas_address == sas_address ||
+ (sas_info->fw.channel == channel &&
+ sas_info->fw.id == id))) {
+ list_del(&sas_info->list);
+ kfree(sas_info);
+ }
+ }
+
+ sas_info = kzalloc(sizeof(struct mptsas_device_info), GFP_KERNEL);
+ if (!sas_info)
+ goto out;
+
+ /*
+ * Set Firmware mapping
+ */
+ sas_info->fw.id = id;
+ sas_info->fw.channel = channel;
+
+ sas_info->sas_address = sas_address;
+ sas_info->device_info = device_info;
+ sas_info->slot = slot;
+ sas_info->enclosure_logical_id = enclosure_logical_id;
+ INIT_LIST_HEAD(&sas_info->list);
+ list_add_tail(&sas_info->list, &ioc->sas_device_info_list);
+
+ /*
+ * Set OS mapping
+ */
+ shost_for_each_device(sdev, ioc->sh) {
+ starget = scsi_target(sdev);
+ rphy = dev_to_rphy(starget->dev.parent);
+ if (rphy->identify.sas_address == sas_address) {
+ sas_info->os.id = starget->id;
+ sas_info->os.channel = starget->channel;
+ }
+ }
+
+ out:
+ mutex_unlock(&ioc->sas_device_info_mutex);
+ return;
+}
+
+/**
+ * mptsas_add_device_component_by_fw -
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @channel: fw mapped id's
+ * @id:
+ *
+ **/
+static void
+mptsas_add_device_component_by_fw(MPT_ADAPTER *ioc, u8 channel, u8 id)
+{
+ struct mptsas_devinfo sas_device;
+ struct mptsas_enclosure enclosure_info;
+ int rc;
+
+ rc = mptsas_sas_device_pg0(ioc, &sas_device,
+ (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
+ MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
+ (channel << 8) + id);
+ if (rc)
+ return;
+
+ memset(&enclosure_info, 0, sizeof(struct mptsas_enclosure));
+ mptsas_sas_enclosure_pg0(ioc, &enclosure_info,
+ (MPI_SAS_ENCLOS_PGAD_FORM_HANDLE <<
+ MPI_SAS_ENCLOS_PGAD_FORM_SHIFT),
+ sas_device.handle_enclosure);
+
+ mptsas_add_device_component(ioc, sas_device.channel,
+ sas_device.id, sas_device.sas_address, sas_device.device_info,
+ sas_device.slot, enclosure_info.enclosure_logical_id);
+}
+
+/**
+ * mptsas_add_device_component_starget_ir - Handle Integrated RAID, adding each individual device to list
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @channel: fw mapped id's
+ * @id:
+ *
+ **/
+static void
+mptsas_add_device_component_starget_ir(MPT_ADAPTER *ioc,
+ struct scsi_target *starget)
+{
+ CONFIGPARMS cfg;
+ ConfigPageHeader_t hdr;
+ dma_addr_t dma_handle;
+ pRaidVolumePage0_t buffer = NULL;
+ int i;
+ RaidPhysDiskPage0_t phys_disk;
+ struct mptsas_device_info *sas_info, *next;
+
+ memset(&cfg, 0 , sizeof(CONFIGPARMS));
+ memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
+ hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_VOLUME;
+ /* assumption that all volumes on channel = 0 */
+ cfg.pageAddr = starget->id;
+ cfg.cfghdr.hdr = &hdr;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.timeout = 10;
+
+ if (mpt_config(ioc, &cfg) != 0)
+ goto out;
+
+ if (!hdr.PageLength)
+ goto out;
+
+ buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
+ &dma_handle);
+
+ if (!buffer)
+ goto out;
+
+ cfg.physAddr = dma_handle;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ if (mpt_config(ioc, &cfg) != 0)
+ goto out;
+
+ if (!buffer->NumPhysDisks)
+ goto out;
+
+ /*
+ * Adding entry for hidden components
+ */
+ for (i = 0; i < buffer->NumPhysDisks; i++) {
+
+ if (mpt_raid_phys_disk_pg0(ioc,
+ buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0)
+ continue;
+
+ mptsas_add_device_component_by_fw(ioc, phys_disk.PhysDiskBus,
+ phys_disk.PhysDiskID);
+
+ mutex_lock(&ioc->sas_device_info_mutex);
+ list_for_each_entry(sas_info, &ioc->sas_device_info_list,
+ list) {
+ if (!sas_info->is_logical_volume &&
+ (sas_info->fw.channel == phys_disk.PhysDiskBus &&
+ sas_info->fw.id == phys_disk.PhysDiskID)) {
+ sas_info->is_hidden_raid_component = 1;
+ sas_info->volume_id = starget->id;
+ }
+ }
+ mutex_unlock(&ioc->sas_device_info_mutex);
+
+ }
+
+ /*
+ * Delete all matching devices out of the list
+ */
+ mutex_lock(&ioc->sas_device_info_mutex);
+ list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
+ list) {
+ if (sas_info->is_logical_volume && sas_info->fw.id ==
+ starget->id) {
+ list_del(&sas_info->list);
+ kfree(sas_info);
+ }
+ }
+
+ sas_info = kzalloc(sizeof(struct mptsas_device_info), GFP_KERNEL);
+ if (sas_info) {
+ sas_info->fw.id = starget->id;
+ sas_info->os.id = starget->id;
+ sas_info->os.channel = starget->channel;
+ sas_info->is_logical_volume = 1;
+ INIT_LIST_HEAD(&sas_info->list);
+ list_add_tail(&sas_info->list, &ioc->sas_device_info_list);
+ }
+ mutex_unlock(&ioc->sas_device_info_mutex);
+
+ out:
+ if (buffer)
+ pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
+ dma_handle);
+}
+
+/**
+ * mptsas_add_device_component_starget -
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @starget:
+ *
+ **/
+static void
+mptsas_add_device_component_starget(MPT_ADAPTER *ioc,
+ struct scsi_target *starget)
+{
+ VirtTarget *vtarget;
+ struct sas_rphy *rphy;
+ struct mptsas_phyinfo *phy_info = NULL;
+ struct mptsas_enclosure enclosure_info;
+
+ rphy = dev_to_rphy(starget->dev.parent);
+ vtarget = starget->hostdata;
+ phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
+ rphy->identify.sas_address);
+ if (!phy_info)
+ return;
+
+ memset(&enclosure_info, 0, sizeof(struct mptsas_enclosure));
+ mptsas_sas_enclosure_pg0(ioc, &enclosure_info,
+ (MPI_SAS_ENCLOS_PGAD_FORM_HANDLE <<
+ MPI_SAS_ENCLOS_PGAD_FORM_SHIFT),
+ phy_info->attached.handle_enclosure);
+
+ mptsas_add_device_component(ioc, phy_info->attached.channel,
+ phy_info->attached.id, phy_info->attached.sas_address,
+ phy_info->attached.device_info,
+ phy_info->attached.slot, enclosure_info.enclosure_logical_id);
+}
+
+/**
+ * mptsas_del_device_component_by_os - Once a device has been removed, we mark the entry in the list as being cached
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @channel: os mapped id's
+ * @id:
+ *
+ **/
+static void
+mptsas_del_device_component_by_os(MPT_ADAPTER *ioc, u8 channel, u8 id)
+{
+ struct mptsas_device_info *sas_info, *next;
+
+ /*
+ * Set is_cached flag
+ */
+ list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
+ list) {
+ if (sas_info->os.channel == channel && sas_info->os.id == id)
+ sas_info->is_cached = 1;
+ }
+}
+
+/**
+ * mptsas_del_device_components - Cleaning the list
+ * @ioc: Pointer to MPT_ADAPTER structure
+ *
+ **/
+static void
+mptsas_del_device_components(MPT_ADAPTER *ioc)
+{
+ struct mptsas_device_info *sas_info, *next;
+
+ mutex_lock(&ioc->sas_device_info_mutex);
+ list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
+ list) {
+ list_del(&sas_info->list);
+ kfree(sas_info);
+ }
+ mutex_unlock(&ioc->sas_device_info_mutex);
+}
+
/*
* mptsas_setup_wide_ports
@@ -434,8 +870,8 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
* Forming a port
*/
if (!port_details) {
- port_details = kzalloc(sizeof(*port_details),
- GFP_KERNEL);
+ port_details = kzalloc(sizeof(struct
+ mptsas_portinfo_details), GFP_KERNEL);
if (!port_details)
goto out;
port_details->num_phys = 1;
@@ -523,15 +959,62 @@ mptsas_find_vtarget(MPT_ADAPTER *ioc, u8 channel, u8 id)
VirtTarget *vtarget = NULL;
shost_for_each_device(sdev, ioc->sh) {
- if ((vdevice = sdev->hostdata) == NULL)
+ vdevice = sdev->hostdata;
+ if ((vdevice == NULL) ||
+ (vdevice->vtarget == NULL))
+ continue;
+ if ((vdevice->vtarget->tflags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT ||
+ vdevice->vtarget->raidVolume))
continue;
if (vdevice->vtarget->id == id &&
- vdevice->vtarget->channel == channel)
+ vdevice->vtarget->channel == channel)
vtarget = vdevice->vtarget;
}
return vtarget;
}
+static void
+mptsas_queue_device_delete(MPT_ADAPTER *ioc,
+ MpiEventDataSasDeviceStatusChange_t *sas_event_data)
+{
+ struct fw_event_work *fw_event;
+ int sz;
+
+ sz = offsetof(struct fw_event_work, event_data) +
+ sizeof(MpiEventDataSasDeviceStatusChange_t);
+ fw_event = kzalloc(sz, GFP_ATOMIC);
+ if (!fw_event) {
+ printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n",
+ ioc->name, __func__, __LINE__);
+ return;
+ }
+ memcpy(fw_event->event_data, sas_event_data,
+ sizeof(MpiEventDataSasDeviceStatusChange_t));
+ fw_event->event = MPI_EVENT_SAS_DEVICE_STATUS_CHANGE;
+ fw_event->ioc = ioc;
+ mptsas_add_fw_event(ioc, fw_event, msecs_to_jiffies(1));
+}
+
+static void
+mptsas_queue_rescan(MPT_ADAPTER *ioc)
+{
+ struct fw_event_work *fw_event;
+ int sz;
+
+ sz = offsetof(struct fw_event_work, event_data);
+ fw_event = kzalloc(sz, GFP_ATOMIC);
+ if (!fw_event) {
+ printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n",
+ ioc->name, __func__, __LINE__);
+ return;
+ }
+ fw_event->event = -1;
+ fw_event->ioc = ioc;
+ mptsas_add_fw_event(ioc, fw_event, msecs_to_jiffies(1));
+}
+
+
/**
* mptsas_target_reset
*
@@ -550,13 +1033,21 @@ mptsas_target_reset(MPT_ADAPTER *ioc, u8 channel, u8 id)
{
MPT_FRAME_HDR *mf;
SCSITaskMgmt_t *pScsiTm;
-
- if ((mf = mpt_get_msg_frame(ioc->TaskCtx, ioc)) == NULL) {
- dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames @%d!!\n",
- ioc->name,__func__, __LINE__));
+ if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0)
return 0;
+
+
+ mf = mpt_get_msg_frame(mptsasDeviceResetCtx, ioc);
+ if (mf == NULL) {
+ dfailprintk(ioc, printk(MYIOC_s_WARN_FMT
+ "%s, no msg frames @%d!!\n", ioc->name,
+ __func__, __LINE__));
+ goto out_fail;
}
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n",
+ ioc->name, mf));
+
/* Format the Request
*/
pScsiTm = (SCSITaskMgmt_t *) mf;
@@ -569,9 +1060,18 @@ mptsas_target_reset(MPT_ADAPTER *ioc, u8 channel, u8 id)
DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf);
- mpt_put_msg_frame_hi_pri(ioc->TaskCtx, ioc, mf);
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "TaskMgmt type=%d (sas device delete) fw_channel = %d fw_id = %d)\n",
+ ioc->name, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET, channel, id));
+
+ mpt_put_msg_frame_hi_pri(mptsasDeviceResetCtx, ioc, mf);
return 1;
+
+ out_fail:
+
+ mpt_clear_taskmgmt_in_progress_flag(ioc);
+ return 0;
}
/**
@@ -602,11 +1102,12 @@ mptsas_target_reset_queue(MPT_ADAPTER *ioc,
vtarget->deleted = 1; /* block IO */
- target_reset_list = kzalloc(sizeof(*target_reset_list),
+ target_reset_list = kzalloc(sizeof(struct mptsas_target_reset_event),
GFP_ATOMIC);
if (!target_reset_list) {
- dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n",
- ioc->name,__func__, __LINE__));
+ dfailprintk(ioc, printk(MYIOC_s_WARN_FMT
+ "%s, failed to allocate mem @%d..!!\n",
+ ioc->name, __func__, __LINE__));
return;
}
@@ -614,84 +1115,101 @@ mptsas_target_reset_queue(MPT_ADAPTER *ioc,
sizeof(*sas_event_data));
list_add_tail(&target_reset_list->list, &hd->target_reset_list);
- if (hd->resetPending)
- return;
+ target_reset_list->time_count = jiffies;
if (mptsas_target_reset(ioc, channel, id)) {
target_reset_list->target_reset_issued = 1;
- hd->resetPending = 1;
}
}
/**
- * mptsas_dev_reset_complete
- *
- * Completion for TARGET_RESET after NOT_RESPONDING_EVENT,
- * enable work queue to finish off removing device from upper layers.
- * then send next TARGET_RESET in the queue.
- *
- * @ioc
+ * mptsas_taskmgmt_complete - complete SAS task management function
+ * @ioc: Pointer to MPT_ADAPTER structure
*
+ * Completion for TARGET_RESET after NOT_RESPONDING_EVENT, enable work
+ * queue to finish off removing device from upper layers. then send next
+ * TARGET_RESET in the queue.
**/
-static void
-mptsas_dev_reset_complete(MPT_ADAPTER *ioc)
+static int
+mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
{
MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
struct list_head *head = &hd->target_reset_list;
- struct mptsas_target_reset_event *target_reset_list;
- struct mptsas_hotplug_event *ev;
- EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data;
u8 id, channel;
- __le64 sas_address;
+ struct mptsas_target_reset_event *target_reset_list;
+ SCSITaskMgmtReply_t *pScsiTmReply;
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt completed: "
+ "(mf = %p, mr = %p)\n", ioc->name, mf, mr));
+
+ pScsiTmReply = (SCSITaskMgmtReply_t *)mr;
+ if (pScsiTmReply) {
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "\tTaskMgmt completed: fw_channel = %d, fw_id = %d,\n"
+ "\ttask_type = 0x%02X, iocstatus = 0x%04X "
+ "loginfo = 0x%08X,\n\tresponse_code = 0x%02X, "
+ "term_cmnds = %d\n", ioc->name,
+ pScsiTmReply->Bus, pScsiTmReply->TargetID,
+ pScsiTmReply->TaskType,
+ le16_to_cpu(pScsiTmReply->IOCStatus),
+ le32_to_cpu(pScsiTmReply->IOCLogInfo),
+ pScsiTmReply->ResponseCode,
+ le32_to_cpu(pScsiTmReply->TerminationCount)));
+
+ if (pScsiTmReply->ResponseCode)
+ mptscsih_taskmgmt_response_code(ioc,
+ pScsiTmReply->ResponseCode);
+ }
+
+ if (pScsiTmReply && (pScsiTmReply->TaskType ==
+ MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK || pScsiTmReply->TaskType ==
+ MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET)) {
+ ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
+ ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
+ memcpy(ioc->taskmgmt_cmds.reply, mr,
+ min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength));
+ if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
+ ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
+ complete(&ioc->taskmgmt_cmds.done);
+ return 1;
+ }
+ return 0;
+ }
+
+ mpt_clear_taskmgmt_in_progress_flag(ioc);
if (list_empty(head))
- return;
+ return 1;
- target_reset_list = list_entry(head->next, struct mptsas_target_reset_event, list);
+ target_reset_list = list_entry(head->next,
+ struct mptsas_target_reset_event, list);
- sas_event_data = &target_reset_list->sas_event_data;
- id = sas_event_data->TargetID;
- channel = sas_event_data->Bus;
- hd->resetPending = 0;
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "TaskMgmt: completed (%d seconds)\n",
+ ioc->name, jiffies_to_msecs(jiffies -
+ target_reset_list->time_count)/1000));
+
+ id = pScsiTmReply->TargetID;
+ channel = pScsiTmReply->Bus;
+ target_reset_list->time_count = jiffies;
/*
* retry target reset
*/
if (!target_reset_list->target_reset_issued) {
- if (mptsas_target_reset(ioc, channel, id)) {
+ if (mptsas_target_reset(ioc, channel, id))
target_reset_list->target_reset_issued = 1;
- hd->resetPending = 1;
- }
- return;
+ return 1;
}
/*
* enable work queue to remove device from upper layers
*/
list_del(&target_reset_list->list);
+ if ((mptsas_find_vtarget(ioc, channel, id)) && !ioc->fw_events_off)
+ mptsas_queue_device_delete(ioc,
+ &target_reset_list->sas_event_data);
- ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
- if (!ev) {
- dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n",
- ioc->name,__func__, __LINE__));
- return;
- }
-
- INIT_WORK(&ev->work, mptsas_hotplug_work);
- ev->ioc = ioc;
- ev->handle = le16_to_cpu(sas_event_data->DevHandle);
- ev->parent_handle =
- le16_to_cpu(sas_event_data->ParentDevHandle);
- ev->channel = channel;
- ev->id =id;
- ev->phy_id = sas_event_data->PhyNum;
- memcpy(&sas_address, &sas_event_data->SASAddress,
- sizeof(__le64));
- ev->sas_address = le64_to_cpu(sas_address);
- ev->device_info = le32_to_cpu(sas_event_data->DeviceInfo);
- ev->event_type = MPTSAS_DEL_DEVICE;
- schedule_work(&ev->work);
- kfree(target_reset_list);
/*
* issue target reset to next device in the queue
@@ -699,34 +1217,19 @@ mptsas_dev_reset_complete(MPT_ADAPTER *ioc)
head = &hd->target_reset_list;
if (list_empty(head))
- return;
+ return 1;
target_reset_list = list_entry(head->next, struct mptsas_target_reset_event,
list);
- sas_event_data = &target_reset_list->sas_event_data;
- id = sas_event_data->TargetID;
- channel = sas_event_data->Bus;
+ id = target_reset_list->sas_event_data.TargetID;
+ channel = target_reset_list->sas_event_data.Bus;
+ target_reset_list->time_count = jiffies;
- if (mptsas_target_reset(ioc, channel, id)) {
+ if (mptsas_target_reset(ioc, channel, id))
target_reset_list->target_reset_issued = 1;
- hd->resetPending = 1;
- }
-}
-/**
- * mptsas_taskmgmt_complete
- *
- * @ioc
- * @mf
- * @mr
- *
- **/
-static int
-mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
-{
- mptsas_dev_reset_complete(ioc);
- return mptscsih_taskmgmt_complete(ioc, mf, mr);
+ return 1;
}
/**
@@ -740,37 +1243,59 @@ static int
mptsas_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
{
MPT_SCSI_HOST *hd;
- struct mptsas_target_reset_event *target_reset_list, *n;
int rc;
rc = mptscsih_ioc_reset(ioc, reset_phase);
+ if ((ioc->bus_type != SAS) || (!rc))
+ return rc;
- if (ioc->bus_type != SAS)
- goto out;
-
- if (reset_phase != MPT_IOC_POST_RESET)
- goto out;
-
- if (!ioc->sh || !ioc->sh->hostdata)
- goto out;
hd = shost_priv(ioc->sh);
if (!hd->ioc)
goto out;
- if (list_empty(&hd->target_reset_list))
- goto out;
-
- /* flush the target_reset_list */
- list_for_each_entry_safe(target_reset_list, n,
- &hd->target_reset_list, list) {
- list_del(&target_reset_list->list);
- kfree(target_reset_list);
+ switch (reset_phase) {
+ case MPT_IOC_SETUP_RESET:
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
+ mptsas_fw_event_off(ioc);
+ break;
+ case MPT_IOC_PRE_RESET:
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
+ break;
+ case MPT_IOC_POST_RESET:
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
+ if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_PENDING) {
+ ioc->sas_mgmt.status |= MPT_MGMT_STATUS_DID_IOCRESET;
+ complete(&ioc->sas_mgmt.done);
+ }
+ mptsas_cleanup_fw_event_q(ioc);
+ mptsas_queue_rescan(ioc);
+ mptsas_fw_event_on(ioc);
+ break;
+ default:
+ break;
}
out:
return rc;
}
+
+/**
+ * enum device_state -
+ * @DEVICE_RETRY: need to retry the TUR
+ * @DEVICE_ERROR: TUR return error, don't add device
+ * @DEVICE_READY: device can be added
+ *
+ */
+enum device_state{
+ DEVICE_RETRY,
+ DEVICE_ERROR,
+ DEVICE_READY,
+};
+
static int
mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
u32 form, u32 form_specific)
@@ -836,15 +1361,308 @@ mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
return error;
}
+/**
+ * mptsas_add_end_device - report a new end device to sas transport layer
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @phy_info: decribes attached device
+ *
+ * return (0) success (1) failure
+ *
+ **/
+static int
+mptsas_add_end_device(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info)
+{
+ struct sas_rphy *rphy;
+ struct sas_port *port;
+ struct sas_identify identify;
+ char *ds = NULL;
+ u8 fw_id;
+
+ if (!phy_info) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s: exit at line=%d\n", ioc->name,
+ __func__, __LINE__));
+ return 1;
+ }
+
+ fw_id = phy_info->attached.id;
+
+ if (mptsas_get_rphy(phy_info)) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s: fw_id=%d exit at line=%d\n", ioc->name,
+ __func__, fw_id, __LINE__));
+ return 2;
+ }
+
+ port = mptsas_get_port(phy_info);
+ if (!port) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s: fw_id=%d exit at line=%d\n", ioc->name,
+ __func__, fw_id, __LINE__));
+ return 3;
+ }
+
+ if (phy_info->attached.device_info &
+ MPI_SAS_DEVICE_INFO_SSP_TARGET)
+ ds = "ssp";
+ if (phy_info->attached.device_info &
+ MPI_SAS_DEVICE_INFO_STP_TARGET)
+ ds = "stp";
+ if (phy_info->attached.device_info &
+ MPI_SAS_DEVICE_INFO_SATA_DEVICE)
+ ds = "sata";
+
+ printk(MYIOC_s_INFO_FMT "attaching %s device: fw_channel %d, fw_id %d,"
+ " phy %d, sas_addr 0x%llx\n", ioc->name, ds,
+ phy_info->attached.channel, phy_info->attached.id,
+ phy_info->attached.phy_id, (unsigned long long)
+ phy_info->attached.sas_address);
+
+ mptsas_parse_device_info(&identify, &phy_info->attached);
+ rphy = sas_end_device_alloc(port);
+ if (!rphy) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s: fw_id=%d exit at line=%d\n", ioc->name,
+ __func__, fw_id, __LINE__));
+ return 5; /* non-fatal: an rphy can be added later */
+ }
+
+ rphy->identify = identify;
+ if (sas_rphy_add(rphy)) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s: fw_id=%d exit at line=%d\n", ioc->name,
+ __func__, fw_id, __LINE__));
+ sas_rphy_free(rphy);
+ return 6;
+ }
+ mptsas_set_rphy(ioc, phy_info, rphy);
+ return 0;
+}
+
+/**
+ * mptsas_del_end_device - report a deleted end device to sas transport layer
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @phy_info: decribes attached device
+ *
+ **/
+static void
+mptsas_del_end_device(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info)
+{
+ struct sas_rphy *rphy;
+ struct sas_port *port;
+ struct mptsas_portinfo *port_info;
+ struct mptsas_phyinfo *phy_info_parent;
+ int i;
+ char *ds = NULL;
+ u8 fw_id;
+ u64 sas_address;
+
+ if (!phy_info)
+ return;
+
+ fw_id = phy_info->attached.id;
+ sas_address = phy_info->attached.sas_address;
+
+ if (!phy_info->port_details) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s: fw_id=%d exit at line=%d\n", ioc->name,
+ __func__, fw_id, __LINE__));
+ return;
+ }
+ rphy = mptsas_get_rphy(phy_info);
+ if (!rphy) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s: fw_id=%d exit at line=%d\n", ioc->name,
+ __func__, fw_id, __LINE__));
+ return;
+ }
+
+ if (phy_info->attached.device_info & MPI_SAS_DEVICE_INFO_SSP_INITIATOR
+ || phy_info->attached.device_info
+ & MPI_SAS_DEVICE_INFO_SMP_INITIATOR
+ || phy_info->attached.device_info
+ & MPI_SAS_DEVICE_INFO_STP_INITIATOR)
+ ds = "initiator";
+ if (phy_info->attached.device_info &
+ MPI_SAS_DEVICE_INFO_SSP_TARGET)
+ ds = "ssp";
+ if (phy_info->attached.device_info &
+ MPI_SAS_DEVICE_INFO_STP_TARGET)
+ ds = "stp";
+ if (phy_info->attached.device_info &
+ MPI_SAS_DEVICE_INFO_SATA_DEVICE)
+ ds = "sata";
+
+ dev_printk(KERN_DEBUG, &rphy->dev, MYIOC_s_FMT
+ "removing %s device: fw_channel %d, fw_id %d, phy %d,"
+ "sas_addr 0x%llx\n", ioc->name, ds, phy_info->attached.channel,
+ phy_info->attached.id, phy_info->attached.phy_id,
+ (unsigned long long) sas_address);
+
+ port = mptsas_get_port(phy_info);
+ if (!port) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s: fw_id=%d exit at line=%d\n", ioc->name,
+ __func__, fw_id, __LINE__));
+ return;
+ }
+ port_info = phy_info->portinfo;
+ phy_info_parent = port_info->phy_info;
+ for (i = 0; i < port_info->num_phys; i++, phy_info_parent++) {
+ if (!phy_info_parent->phy)
+ continue;
+ if (phy_info_parent->attached.sas_address !=
+ sas_address)
+ continue;
+ dev_printk(KERN_DEBUG, &phy_info_parent->phy->dev,
+ MYIOC_s_FMT "delete phy %d, phy-obj (0x%p)\n",
+ ioc->name, phy_info_parent->phy_id,
+ phy_info_parent->phy);
+ sas_port_delete_phy(port, phy_info_parent->phy);
+ }
+
+ dev_printk(KERN_DEBUG, &port->dev, MYIOC_s_FMT
+ "delete port %d, sas_addr (0x%llx)\n", ioc->name,
+ port->port_identifier, (unsigned long long)sas_address);
+ sas_port_delete(port);
+ mptsas_set_port(ioc, phy_info, NULL);
+ mptsas_port_delete(ioc, phy_info->port_details);
+}
+
+struct mptsas_phyinfo *
+mptsas_refreshing_device_handles(MPT_ADAPTER *ioc,
+ struct mptsas_devinfo *sas_device)
+{
+ struct mptsas_phyinfo *phy_info;
+ struct mptsas_portinfo *port_info;
+ int i;
+
+ phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
+ sas_device->sas_address);
+ if (!phy_info)
+ goto out;
+ port_info = phy_info->portinfo;
+ if (!port_info)
+ goto out;
+ mutex_lock(&ioc->sas_topology_mutex);
+ for (i = 0; i < port_info->num_phys; i++) {
+ if (port_info->phy_info[i].attached.sas_address !=
+ sas_device->sas_address)
+ continue;
+ port_info->phy_info[i].attached.channel = sas_device->channel;
+ port_info->phy_info[i].attached.id = sas_device->id;
+ port_info->phy_info[i].attached.sas_address =
+ sas_device->sas_address;
+ port_info->phy_info[i].attached.handle = sas_device->handle;
+ port_info->phy_info[i].attached.handle_parent =
+ sas_device->handle_parent;
+ port_info->phy_info[i].attached.handle_enclosure =
+ sas_device->handle_enclosure;
+ }
+ mutex_unlock(&ioc->sas_topology_mutex);
+ out:
+ return phy_info;
+}
+
+/**
+ * mptsas_firmware_event_work - work thread for processing fw events
+ * @work: work queue payload containing info describing the event
+ * Context: user
+ *
+ */
+static void
+mptsas_firmware_event_work(struct work_struct *work)
+{
+ struct fw_event_work *fw_event =
+ container_of(work, struct fw_event_work, work.work);
+ MPT_ADAPTER *ioc = fw_event->ioc;
+
+ /* special rescan topology handling */
+ if (fw_event->event == -1) {
+ if (ioc->in_rescan) {
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: rescan ignored as it is in progress\n",
+ ioc->name, __func__));
+ return;
+ }
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: rescan after "
+ "reset\n", ioc->name, __func__));
+ ioc->in_rescan = 1;
+ mptsas_not_responding_devices(ioc);
+ mptsas_scan_sas_topology(ioc);
+ ioc->in_rescan = 0;
+ mptsas_free_fw_event(ioc, fw_event);
+ return;
+ }
+
+ /* events handling turned off during host reset */
+ if (ioc->fw_events_off) {
+ mptsas_free_fw_event(ioc, fw_event);
+ return;
+ }
+
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: fw_event=(0x%p), "
+ "event = (0x%02x)\n", ioc->name, __func__, fw_event,
+ (fw_event->event & 0xFF)));
+
+ switch (fw_event->event) {
+ case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
+ mptsas_send_sas_event(fw_event);
+ break;
+ case MPI_EVENT_INTEGRATED_RAID:
+ mptsas_send_raid_event(fw_event);
+ break;
+ case MPI_EVENT_IR2:
+ mptsas_send_ir2_event(fw_event);
+ break;
+ case MPI_EVENT_PERSISTENT_TABLE_FULL:
+ mptbase_sas_persist_operation(ioc,
+ MPI_SAS_OP_CLEAR_NOT_PRESENT);
+ mptsas_free_fw_event(ioc, fw_event);
+ break;
+ case MPI_EVENT_SAS_BROADCAST_PRIMITIVE:
+ mptsas_broadcast_primative_work(fw_event);
+ break;
+ case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
+ mptsas_send_expander_event(fw_event);
+ break;
+ case MPI_EVENT_SAS_PHY_LINK_STATUS:
+ mptsas_send_link_status_event(fw_event);
+ break;
+ case MPI_EVENT_QUEUE_FULL:
+ mptsas_handle_queue_full_event(fw_event);
+ break;
+ }
+}
+
+
+
static int
mptsas_slave_configure(struct scsi_device *sdev)
{
+ struct Scsi_Host *host = sdev->host;
+ MPT_SCSI_HOST *hd = shost_priv(host);
+ MPT_ADAPTER *ioc = hd->ioc;
+ VirtDevice *vdevice = sdev->hostdata;
- if (sdev->channel == MPTSAS_RAID_CHANNEL)
+ if (vdevice->vtarget->deleted) {
+ sdev_printk(KERN_INFO, sdev, "clearing deleted flag\n");
+ vdevice->vtarget->deleted = 0;
+ }
+
+ /*
+ * RAID volumes placed beyond the last expected port.
+ * Ignore sending sas mode pages in that case..
+ */
+ if (sdev->channel == MPTSAS_RAID_CHANNEL) {
+ mptsas_add_device_component_starget_ir(ioc, scsi_target(sdev));
goto out;
+ }
sas_read_port_mode_page(sdev);
+ mptsas_add_device_component_starget(ioc, scsi_target(sdev));
+
out:
return mptscsih_slave_configure(sdev);
}
@@ -875,9 +1693,18 @@ mptsas_target_alloc(struct scsi_target *starget)
* RAID volumes placed beyond the last expected port.
*/
if (starget->channel == MPTSAS_RAID_CHANNEL) {
- for (i=0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++)
- if (id == ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID)
- channel = ioc->raid_data.pIocPg2->RaidVolume[i].VolumeBus;
+ if (!ioc->raid_data.pIocPg2) {
+ kfree(vtarget);
+ return -ENXIO;
+ }
+ for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
+ if (id == ioc->raid_data.pIocPg2->
+ RaidVolume[i].VolumeID) {
+ channel = ioc->raid_data.pIocPg2->
+ RaidVolume[i].VolumeBus;
+ }
+ }
+ vtarget->raidVolume = 1;
goto out;
}
@@ -926,11 +1753,18 @@ mptsas_target_destroy(struct scsi_target *starget)
struct sas_rphy *rphy;
struct mptsas_portinfo *p;
int i;
- MPT_ADAPTER *ioc = hd->ioc;
+ MPT_ADAPTER *ioc = hd->ioc;
+ VirtTarget *vtarget;
if (!starget->hostdata)
return;
+ vtarget = starget->hostdata;
+
+ mptsas_del_device_component_by_os(ioc, starget->channel,
+ starget->id);
+
+
if (starget->channel == MPTSAS_RAID_CHANNEL)
goto out;
@@ -940,12 +1774,21 @@ mptsas_target_destroy(struct scsi_target *starget)
if (p->phy_info[i].attached.sas_address !=
rphy->identify.sas_address)
continue;
+
+ starget_printk(KERN_INFO, starget, MYIOC_s_FMT
+ "delete device: fw_channel %d, fw_id %d, phy %d, "
+ "sas_addr 0x%llx\n", ioc->name,
+ p->phy_info[i].attached.channel,
+ p->phy_info[i].attached.id,
+ p->phy_info[i].attached.phy_id, (unsigned long long)
+ p->phy_info[i].attached.sas_address);
+
mptsas_set_starget(&p->phy_info[i], NULL);
- goto out;
}
}
out:
+ vtarget->starget = NULL;
kfree(starget->hostdata);
starget->hostdata = NULL;
}
@@ -1008,6 +1851,8 @@ mptsas_slave_alloc(struct scsi_device *sdev)
static int
mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
{
+ MPT_SCSI_HOST *hd;
+ MPT_ADAPTER *ioc;
VirtDevice *vdevice = SCpnt->device->hostdata;
if (!vdevice || !vdevice->vtarget || vdevice->vtarget->deleted) {
@@ -1016,6 +1861,12 @@ mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
return 0;
}
+ hd = shost_priv(SCpnt->device->host);
+ ioc = hd->ioc;
+
+ if (ioc->sas_discovery_quiesce_io)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
// scsi_print_command(SCpnt);
return mptscsih_qcmd(SCpnt,done);
@@ -1114,14 +1965,19 @@ static int mptsas_get_linkerrors(struct sas_phy *phy)
static int mptsas_mgmt_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
MPT_FRAME_HDR *reply)
{
- ioc->sas_mgmt.status |= MPT_SAS_MGMT_STATUS_COMMAND_GOOD;
+ ioc->sas_mgmt.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
if (reply != NULL) {
- ioc->sas_mgmt.status |= MPT_SAS_MGMT_STATUS_RF_VALID;
+ ioc->sas_mgmt.status |= MPT_MGMT_STATUS_RF_VALID;
memcpy(ioc->sas_mgmt.reply, reply,
min(ioc->reply_sz, 4 * reply->u.reply.MsgLength));
}
- complete(&ioc->sas_mgmt.done);
- return 1;
+
+ if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_PENDING) {
+ ioc->sas_mgmt.status &= ~MPT_MGMT_STATUS_PENDING;
+ complete(&ioc->sas_mgmt.done);
+ return 1;
+ }
+ return 0;
}
static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
@@ -1160,6 +2016,7 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
MPI_SAS_OP_PHY_HARD_RESET : MPI_SAS_OP_PHY_LINK_RESET;
req->PhyNum = phy->identify.phy_identifier;
+ INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status)
mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done,
@@ -1174,7 +2031,7 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
/* a reply frame is expected */
if ((ioc->sas_mgmt.status &
- MPT_IOCTL_STATUS_RF_VALID) == 0) {
+ MPT_MGMT_STATUS_RF_VALID) == 0) {
error = -ENXIO;
goto out_unlock;
}
@@ -1191,6 +2048,7 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
error = 0;
out_unlock:
+ CLEAR_MGMT_STATUS(ioc->sas_mgmt.status)
mutex_unlock(&ioc->sas_mgmt.mutex);
out:
return error;
@@ -1277,8 +2135,8 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
/* do we need to support multiple segments? */
if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n",
- ioc->name, __func__, req->bio->bi_vcnt, req->data_len,
- rsp->bio->bi_vcnt, rsp->data_len);
+ ioc->name, __func__, req->bio->bi_vcnt, blk_rq_bytes(req),
+ rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
return -EINVAL;
}
@@ -1295,7 +2153,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
smpreq = (SmpPassthroughRequest_t *)mf;
memset(smpreq, 0, sizeof(*smpreq));
- smpreq->RequestDataLength = cpu_to_le16(req->data_len - 4);
+ smpreq->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH;
if (rphy)
@@ -1304,7 +2162,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
struct mptsas_portinfo *port_info;
mutex_lock(&ioc->sas_topology_mutex);
- port_info = mptsas_get_hba_portinfo(ioc);
+ port_info = ioc->hba_port_info;
if (port_info && port_info->phy_info)
sas_address =
port_info->phy_info[0].phy->identify.sas_address;
@@ -1319,26 +2177,32 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
/* request */
flagsLength = (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
MPI_SGE_FLAGS_END_OF_BUFFER |
- MPI_SGE_FLAGS_DIRECTION |
- mpt_addr_size()) << MPI_SGE_FLAGS_SHIFT;
- flagsLength |= (req->data_len - 4);
+ MPI_SGE_FLAGS_DIRECTION)
+ << MPI_SGE_FLAGS_SHIFT;
+ flagsLength |= (blk_rq_bytes(req) - 4);
dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio),
- req->data_len, PCI_DMA_BIDIRECTIONAL);
+ blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
if (!dma_addr_out)
goto put_mf;
- mpt_add_sge(psge, flagsLength, dma_addr_out);
- psge += (sizeof(u32) + sizeof(dma_addr_t));
+ ioc->add_sge(psge, flagsLength, dma_addr_out);
+ psge += ioc->SGE_size;
/* response */
- flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ;
- flagsLength |= rsp->data_len + 4;
+ flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI_SGE_FLAGS_SYSTEM_ADDRESS |
+ MPI_SGE_FLAGS_IOC_TO_HOST |
+ MPI_SGE_FLAGS_END_OF_BUFFER;
+
+ flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT;
+ flagsLength |= blk_rq_bytes(rsp) + 4;
dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio),
- rsp->data_len, PCI_DMA_BIDIRECTIONAL);
+ blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
if (!dma_addr_in)
goto unmap;
- mpt_add_sge(psge, flagsLength, dma_addr_in);
+ ioc->add_sge(psge, flagsLength, dma_addr_in);
+ INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status)
mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ);
@@ -1351,30 +2215,32 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
}
mf = NULL;
- if (ioc->sas_mgmt.status & MPT_IOCTL_STATUS_RF_VALID) {
+ if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_RF_VALID) {
SmpPassthroughReply_t *smprep;
smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
memcpy(req->sense, smprep, sizeof(*smprep));
req->sense_len = sizeof(*smprep);
- req->data_len = 0;
- rsp->data_len -= smprep->ResponseDataLength;
+ req->resid_len = 0;
+ rsp->resid_len -= smprep->ResponseDataLength;
} else {
- printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n",
+ printk(MYIOC_s_ERR_FMT
+ "%s: smp passthru reply failed to be returned\n",
ioc->name, __func__);
ret = -ENXIO;
}
unmap:
if (dma_addr_out)
- pci_unmap_single(ioc->pcidev, dma_addr_out, req->data_len,
+ pci_unmap_single(ioc->pcidev, dma_addr_out, blk_rq_bytes(req),
PCI_DMA_BIDIRECTIONAL);
if (dma_addr_in)
- pci_unmap_single(ioc->pcidev, dma_addr_in, rsp->data_len,
+ pci_unmap_single(ioc->pcidev, dma_addr_in, blk_rq_bytes(rsp),
PCI_DMA_BIDIRECTIONAL);
put_mf:
if (mf)
mpt_free_msg_frame(ioc, mf);
out_unlock:
+ CLEAR_MGMT_STATUS(ioc->sas_mgmt.status)
mutex_unlock(&ioc->sas_mgmt.mutex);
out:
return ret;
@@ -1438,7 +2304,7 @@ mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
port_info->num_phys = buffer->NumPhys;
port_info->phy_info = kcalloc(port_info->num_phys,
- sizeof(*port_info->phy_info),GFP_KERNEL);
+ sizeof(struct mptsas_phyinfo), GFP_KERNEL);
if (!port_info->phy_info) {
error = -ENOMEM;
goto out_free_consistent;
@@ -1600,10 +2466,6 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
__le64 sas_address;
int error=0;
- if (ioc->sas_discovery_runtime &&
- mptsas_is_end_device(device_info))
- goto out;
-
hdr.PageVersion = MPI_SASDEVICE0_PAGEVERSION;
hdr.ExtPageLength = 0;
hdr.PageNumber = 0;
@@ -1644,6 +2506,7 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
mptsas_print_device_pg0(ioc, buffer);
+ memset(device_info, 0, sizeof(struct mptsas_devinfo));
device_info->handle = le16_to_cpu(buffer->DevHandle);
device_info->handle_parent = le16_to_cpu(buffer->ParentDevHandle);
device_info->handle_enclosure =
@@ -1675,7 +2538,9 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
SasExpanderPage0_t *buffer;
dma_addr_t dma_handle;
int i, error;
+ __le64 sas_address;
+ memset(port_info, 0, sizeof(struct mptsas_portinfo));
hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION;
hdr.ExtPageLength = 0;
hdr.PageNumber = 0;
@@ -1721,18 +2586,23 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
}
/* save config data */
- port_info->num_phys = buffer->NumPhys;
+ port_info->num_phys = (buffer->NumPhys) ? buffer->NumPhys : 1;
port_info->phy_info = kcalloc(port_info->num_phys,
- sizeof(*port_info->phy_info),GFP_KERNEL);
+ sizeof(struct mptsas_phyinfo), GFP_KERNEL);
if (!port_info->phy_info) {
error = -ENOMEM;
goto out_free_consistent;
}
+ memcpy(&sas_address, &buffer->SASAddress, sizeof(__le64));
for (i = 0; i < port_info->num_phys; i++) {
port_info->phy_info[i].portinfo = port_info;
port_info->phy_info[i].handle =
le16_to_cpu(buffer->DevHandle);
+ port_info->phy_info[i].identify.sas_address =
+ le64_to_cpu(sas_address);
+ port_info->phy_info[i].identify.handle_parent =
+ le16_to_cpu(buffer->ParentDevHandle);
}
out_free_consistent:
@@ -1752,11 +2622,7 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
dma_addr_t dma_handle;
int error=0;
- if (ioc->sas_discovery_runtime &&
- mptsas_is_end_device(&phy_info->attached))
- goto out;
-
- hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION;
+ hdr.PageVersion = MPI_SASEXPANDER1_PAGEVERSION;
hdr.ExtPageLength = 0;
hdr.PageNumber = 1;
hdr.Reserved1 = 0;
@@ -1791,6 +2657,12 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
error = mpt_config(ioc, &cfg);
+
+ if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
+ error = -ENODEV;
+ goto out;
+ }
+
if (error)
goto out_free_consistent;
@@ -2010,16 +2882,21 @@ static int mptsas_probe_one_phy(struct device *dev,
goto out;
}
mptsas_set_port(ioc, phy_info, port);
- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
- "sas_port_alloc: port=%p dev=%p port_id=%d\n",
- ioc->name, port, dev, port->port_identifier));
+ devtprintk(ioc, dev_printk(KERN_DEBUG, &port->dev,
+ MYIOC_s_FMT "add port %d, sas_addr (0x%llx)\n",
+ ioc->name, port->port_identifier,
+ (unsigned long long)phy_info->
+ attached.sas_address));
}
- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_port_add_phy: phy_id=%d\n",
- ioc->name, phy_info->phy_id));
+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "sas_port_add_phy: phy_id=%d\n",
+ ioc->name, phy_info->phy_id));
sas_port_add_phy(port, phy_info->phy);
phy_info->sas_port_add_phy = 0;
+ devtprintk(ioc, dev_printk(KERN_DEBUG, &phy_info->phy->dev,
+ MYIOC_s_FMT "add phy %d, phy-obj (0x%p)\n", ioc->name,
+ phy_info->phy_id, phy_info->phy));
}
-
if (!mptsas_get_rphy(phy_info) && port && !port->rphy) {
struct sas_rphy *rphy;
@@ -2032,18 +2909,17 @@ static int mptsas_probe_one_phy(struct device *dev,
* the adding/removing of devices that occur
* after start of day.
*/
- if (ioc->sas_discovery_runtime &&
- mptsas_is_end_device(&phy_info->attached))
- goto out;
+ if (mptsas_is_end_device(&phy_info->attached) &&
+ phy_info->attached.handle_parent) {
+ goto out;
+ }
mptsas_parse_device_info(&identify, &phy_info->attached);
if (scsi_is_host_device(parent)) {
struct mptsas_portinfo *port_info;
int i;
- mutex_lock(&ioc->sas_topology_mutex);
- port_info = mptsas_get_hba_portinfo(ioc);
- mutex_unlock(&ioc->sas_topology_mutex);
+ port_info = ioc->hba_port_info;
for (i = 0; i < port_info->num_phys; i++)
if (port_info->phy_info[i].identify.sas_address ==
@@ -2102,7 +2978,7 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
struct mptsas_portinfo *port_info, *hba;
int error = -ENOMEM, i;
- hba = kzalloc(sizeof(*port_info), GFP_KERNEL);
+ hba = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
if (! hba)
goto out;
@@ -2112,9 +2988,10 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
mptsas_sas_io_unit_pg1(ioc);
mutex_lock(&ioc->sas_topology_mutex);
- port_info = mptsas_get_hba_portinfo(ioc);
+ port_info = ioc->hba_port_info;
if (!port_info) {
- port_info = hba;
+ ioc->hba_port_info = port_info = hba;
+ ioc->hba_port_num_phy = port_info->num_phys;
list_add_tail(&port_info->list, &ioc->sas_topology);
} else {
for (i = 0; i < hba->num_phys; i++) {
@@ -2130,15 +3007,22 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
hba = NULL;
}
mutex_unlock(&ioc->sas_topology_mutex);
+#if defined(CPQ_CIM)
+ ioc->num_ports = port_info->num_phys;
+#endif
for (i = 0; i < port_info->num_phys; i++) {
mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i],
(MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
-
+ port_info->phy_info[i].identify.handle =
+ port_info->phy_info[i].handle;
mptsas_sas_device_pg0(ioc, &port_info->phy_info[i].identify,
(MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
- port_info->phy_info[i].handle);
+ port_info->phy_info[i].identify.handle);
+ if (!ioc->hba_port_sas_addr)
+ ioc->hba_port_sas_addr =
+ port_info->phy_info[i].identify.sas_address;
port_info->phy_info[i].identify.phy_id =
port_info->phy_info[i].phy_id = i;
if (port_info->phy_info[i].attached.handle)
@@ -2163,248 +3047,721 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
return error;
}
-static int
-mptsas_probe_expander_phys(MPT_ADAPTER *ioc, u32 *handle)
+static void
+mptsas_expander_refresh(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
{
- struct mptsas_portinfo *port_info, *p, *ex;
- struct device *parent;
- struct sas_rphy *rphy;
- int error = -ENOMEM, i, j;
-
- ex = kzalloc(sizeof(*port_info), GFP_KERNEL);
- if (!ex)
- goto out;
-
- error = mptsas_sas_expander_pg0(ioc, ex,
- (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE <<
- MPI_SAS_EXPAND_PGAD_FORM_SHIFT), *handle);
- if (error)
- goto out_free_port_info;
-
- *handle = ex->phy_info[0].handle;
-
- mutex_lock(&ioc->sas_topology_mutex);
- port_info = mptsas_find_portinfo_by_handle(ioc, *handle);
- if (!port_info) {
- port_info = ex;
- list_add_tail(&port_info->list, &ioc->sas_topology);
- } else {
- for (i = 0; i < ex->num_phys; i++) {
- port_info->phy_info[i].handle =
- ex->phy_info[i].handle;
- port_info->phy_info[i].port_id =
- ex->phy_info[i].port_id;
- }
- kfree(ex->phy_info);
- kfree(ex);
- ex = NULL;
- }
- mutex_unlock(&ioc->sas_topology_mutex);
-
+ struct mptsas_portinfo *parent;
+ struct device *parent_dev;
+ struct sas_rphy *rphy;
+ int i;
+ u64 sas_address; /* expander sas address */
+ u32 handle;
+
+ handle = port_info->phy_info[0].handle;
+ sas_address = port_info->phy_info[0].identify.sas_address;
for (i = 0; i < port_info->num_phys; i++) {
mptsas_sas_expander_pg1(ioc, &port_info->phy_info[i],
- (MPI_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM <<
- MPI_SAS_EXPAND_PGAD_FORM_SHIFT), (i << 16) + *handle);
-
- if (port_info->phy_info[i].identify.handle) {
- mptsas_sas_device_pg0(ioc,
- &port_info->phy_info[i].identify,
- (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
- MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
- port_info->phy_info[i].identify.handle);
- port_info->phy_info[i].identify.phy_id =
- port_info->phy_info[i].phy_id;
- }
+ (MPI_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM <<
+ MPI_SAS_EXPAND_PGAD_FORM_SHIFT), (i << 16) + handle);
+
+ mptsas_sas_device_pg0(ioc,
+ &port_info->phy_info[i].identify,
+ (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
+ MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
+ port_info->phy_info[i].identify.handle);
+ port_info->phy_info[i].identify.phy_id =
+ port_info->phy_info[i].phy_id;
if (port_info->phy_info[i].attached.handle) {
mptsas_sas_device_pg0(ioc,
- &port_info->phy_info[i].attached,
- (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
- MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
- port_info->phy_info[i].attached.handle);
+ &port_info->phy_info[i].attached,
+ (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
+ MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
+ port_info->phy_info[i].attached.handle);
port_info->phy_info[i].attached.phy_id =
port_info->phy_info[i].phy_id;
}
}
- parent = &ioc->sh->shost_gendev;
- for (i = 0; i < port_info->num_phys; i++) {
- mutex_lock(&ioc->sas_topology_mutex);
- list_for_each_entry(p, &ioc->sas_topology, list) {
- for (j = 0; j < p->num_phys; j++) {
- if (port_info->phy_info[i].identify.handle !=
- p->phy_info[j].attached.handle)
- continue;
- rphy = mptsas_get_rphy(&p->phy_info[j]);
- parent = &rphy->dev;
- }
- }
+ mutex_lock(&ioc->sas_topology_mutex);
+ parent = mptsas_find_portinfo_by_handle(ioc,
+ port_info->phy_info[0].identify.handle_parent);
+ if (!parent) {
mutex_unlock(&ioc->sas_topology_mutex);
+ return;
}
+ for (i = 0, parent_dev = NULL; i < parent->num_phys && !parent_dev;
+ i++) {
+ if (parent->phy_info[i].attached.sas_address == sas_address) {
+ rphy = mptsas_get_rphy(&parent->phy_info[i]);
+ parent_dev = &rphy->dev;
+ }
+ }
+ mutex_unlock(&ioc->sas_topology_mutex);
mptsas_setup_wide_ports(ioc, port_info);
-
for (i = 0; i < port_info->num_phys; i++, ioc->sas_index++)
- mptsas_probe_one_phy(parent, &port_info->phy_info[i],
+ mptsas_probe_one_phy(parent_dev, &port_info->phy_info[i],
ioc->sas_index, 0);
+}
- return 0;
+static void
+mptsas_expander_event_add(MPT_ADAPTER *ioc,
+ MpiEventDataSasExpanderStatusChange_t *expander_data)
+{
+ struct mptsas_portinfo *port_info;
+ int i;
+ __le64 sas_address;
- out_free_port_info:
- if (ex) {
- kfree(ex->phy_info);
- kfree(ex);
+ port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
+ if (!port_info)
+ BUG();
+ port_info->num_phys = (expander_data->NumPhys) ?
+ expander_data->NumPhys : 1;
+ port_info->phy_info = kcalloc(port_info->num_phys,
+ sizeof(struct mptsas_phyinfo), GFP_KERNEL);
+ if (!port_info->phy_info)
+ BUG();
+ memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64));
+ for (i = 0; i < port_info->num_phys; i++) {
+ port_info->phy_info[i].portinfo = port_info;
+ port_info->phy_info[i].handle =
+ le16_to_cpu(expander_data->DevHandle);
+ port_info->phy_info[i].identify.sas_address =
+ le64_to_cpu(sas_address);
+ port_info->phy_info[i].identify.handle_parent =
+ le16_to_cpu(expander_data->ParentDevHandle);
+ }
+
+ mutex_lock(&ioc->sas_topology_mutex);
+ list_add_tail(&port_info->list, &ioc->sas_topology);
+ mutex_unlock(&ioc->sas_topology_mutex);
+
+ printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, "
+ "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
+ (unsigned long long)sas_address);
+
+ mptsas_expander_refresh(ioc, port_info);
+}
+
+/**
+ * mptsas_delete_expander_siblings - remove siblings attached to expander
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @parent: the parent port_info object
+ * @expander: the expander port_info object
+ **/
+static void
+mptsas_delete_expander_siblings(MPT_ADAPTER *ioc, struct mptsas_portinfo
+ *parent, struct mptsas_portinfo *expander)
+{
+ struct mptsas_phyinfo *phy_info;
+ struct mptsas_portinfo *port_info;
+ struct sas_rphy *rphy;
+ int i;
+
+ phy_info = expander->phy_info;
+ for (i = 0; i < expander->num_phys; i++, phy_info++) {
+ rphy = mptsas_get_rphy(phy_info);
+ if (!rphy)
+ continue;
+ if (rphy->identify.device_type == SAS_END_DEVICE)
+ mptsas_del_end_device(ioc, phy_info);
+ }
+
+ phy_info = expander->phy_info;
+ for (i = 0; i < expander->num_phys; i++, phy_info++) {
+ rphy = mptsas_get_rphy(phy_info);
+ if (!rphy)
+ continue;
+ if (rphy->identify.device_type ==
+ MPI_SAS_DEVICE_INFO_EDGE_EXPANDER ||
+ rphy->identify.device_type ==
+ MPI_SAS_DEVICE_INFO_FANOUT_EXPANDER) {
+ port_info = mptsas_find_portinfo_by_sas_address(ioc,
+ rphy->identify.sas_address);
+ if (!port_info)
+ continue;
+ if (port_info == parent) /* backlink rphy */
+ continue;
+ /*
+ Delete this expander even if the expdevpage is exists
+ because the parent expander is already deleted
+ */
+ mptsas_expander_delete(ioc, port_info, 1);
+ }
+ }
+}
+
+
+/**
+ * mptsas_expander_delete - remove this expander
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @port_info: expander port_info struct
+ * @force: Flag to forcefully delete the expander
+ *
+ **/
+
+static void mptsas_expander_delete(MPT_ADAPTER *ioc,
+ struct mptsas_portinfo *port_info, u8 force)
+{
+
+ struct mptsas_portinfo *parent;
+ int i;
+ u64 expander_sas_address;
+ struct mptsas_phyinfo *phy_info;
+ struct mptsas_portinfo buffer;
+ struct mptsas_portinfo_details *port_details;
+ struct sas_port *port;
+
+ if (!port_info)
+ return;
+
+ /* see if expander is still there before deleting */
+ mptsas_sas_expander_pg0(ioc, &buffer,
+ (MPI_SAS_EXPAND_PGAD_FORM_HANDLE <<
+ MPI_SAS_EXPAND_PGAD_FORM_SHIFT),
+ port_info->phy_info[0].identify.handle);
+
+ if (buffer.num_phys) {
+ kfree(buffer.phy_info);
+ if (!force)
+ return;
+ }
+
+
+ /*
+ * Obtain the port_info instance to the parent port
+ */
+ port_details = NULL;
+ expander_sas_address =
+ port_info->phy_info[0].identify.sas_address;
+ parent = mptsas_find_portinfo_by_handle(ioc,
+ port_info->phy_info[0].identify.handle_parent);
+ mptsas_delete_expander_siblings(ioc, parent, port_info);
+ if (!parent)
+ goto out;
+
+ /*
+ * Delete rphys in the parent that point
+ * to this expander.
+ */
+ phy_info = parent->phy_info;
+ port = NULL;
+ for (i = 0; i < parent->num_phys; i++, phy_info++) {
+ if (!phy_info->phy)
+ continue;
+ if (phy_info->attached.sas_address !=
+ expander_sas_address)
+ continue;
+ if (!port) {
+ port = mptsas_get_port(phy_info);
+ port_details = phy_info->port_details;
+ }
+ dev_printk(KERN_DEBUG, &phy_info->phy->dev,
+ MYIOC_s_FMT "delete phy %d, phy-obj (0x%p)\n", ioc->name,
+ phy_info->phy_id, phy_info->phy);
+ sas_port_delete_phy(port, phy_info->phy);
+ }
+ if (port) {
+ dev_printk(KERN_DEBUG, &port->dev,
+ MYIOC_s_FMT "delete port %d, sas_addr (0x%llx)\n",
+ ioc->name, port->port_identifier,
+ (unsigned long long)expander_sas_address);
+ sas_port_delete(port);
+ mptsas_port_delete(ioc, port_details);
}
out:
- return error;
+
+ printk(MYIOC_s_INFO_FMT "delete expander: num_phys %d, "
+ "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
+ (unsigned long long)expander_sas_address);
+
+ /*
+ * free link
+ */
+ list_del(&port_info->list);
+ kfree(port_info->phy_info);
+ kfree(port_info);
}
-/*
- * mptsas_delete_expander_phys
+
+/**
+ * mptsas_send_expander_event - expanders events
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @expander_data: event data
*
*
- * This will traverse topology, and remove expanders
- * that are no longer present
+ * This function handles adding, removing, and refreshing
+ * device handles within the expander objects.
*/
static void
-mptsas_delete_expander_phys(MPT_ADAPTER *ioc)
+mptsas_send_expander_event(struct fw_event_work *fw_event)
{
- struct mptsas_portinfo buffer;
- struct mptsas_portinfo *port_info, *n, *parent;
- struct mptsas_phyinfo *phy_info;
- struct sas_port * port;
+ MPT_ADAPTER *ioc;
+ MpiEventDataSasExpanderStatusChange_t *expander_data;
+ struct mptsas_portinfo *port_info;
+ __le64 sas_address;
int i;
- u64 expander_sas_address;
+ ioc = fw_event->ioc;
+ expander_data = (MpiEventDataSasExpanderStatusChange_t *)
+ fw_event->event_data;
+ memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64));
+ port_info = mptsas_find_portinfo_by_sas_address(ioc, sas_address);
+
+ if (expander_data->ReasonCode == MPI_EVENT_SAS_EXP_RC_ADDED) {
+ if (port_info) {
+ for (i = 0; i < port_info->num_phys; i++) {
+ port_info->phy_info[i].portinfo = port_info;
+ port_info->phy_info[i].handle =
+ le16_to_cpu(expander_data->DevHandle);
+ port_info->phy_info[i].identify.sas_address =
+ le64_to_cpu(sas_address);
+ port_info->phy_info[i].identify.handle_parent =
+ le16_to_cpu(expander_data->ParentDevHandle);
+ }
+ mptsas_expander_refresh(ioc, port_info);
+ } else if (!port_info && expander_data->NumPhys)
+ mptsas_expander_event_add(ioc, expander_data);
+ } else if (expander_data->ReasonCode ==
+ MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING)
+ mptsas_expander_delete(ioc, port_info, 0);
+
+ mptsas_free_fw_event(ioc, fw_event);
+}
+
+
+/**
+ * mptsas_expander_add -
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @handle:
+ *
+ */
+struct mptsas_portinfo *
+mptsas_expander_add(MPT_ADAPTER *ioc, u16 handle)
+{
+ struct mptsas_portinfo buffer, *port_info;
+ int i;
+
+ if ((mptsas_sas_expander_pg0(ioc, &buffer,
+ (MPI_SAS_EXPAND_PGAD_FORM_HANDLE <<
+ MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle)))
+ return NULL;
+
+ port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_ATOMIC);
+ if (!port_info) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s: exit at line=%d\n", ioc->name,
+ __func__, __LINE__));
+ return NULL;
+ }
+ port_info->num_phys = buffer.num_phys;
+ port_info->phy_info = buffer.phy_info;
+ for (i = 0; i < port_info->num_phys; i++)
+ port_info->phy_info[i].portinfo = port_info;
mutex_lock(&ioc->sas_topology_mutex);
- list_for_each_entry_safe(port_info, n, &ioc->sas_topology, list) {
+ list_add_tail(&port_info->list, &ioc->sas_topology);
+ mutex_unlock(&ioc->sas_topology_mutex);
+ printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, "
+ "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
+ (unsigned long long)buffer.phy_info[0].identify.sas_address);
+ mptsas_expander_refresh(ioc, port_info);
+ return port_info;
+}
- if (!(port_info->phy_info[0].identify.device_info &
- MPI_SAS_DEVICE_INFO_SMP_TARGET))
+static void
+mptsas_send_link_status_event(struct fw_event_work *fw_event)
+{
+ MPT_ADAPTER *ioc;
+ MpiEventDataSasPhyLinkStatus_t *link_data;
+ struct mptsas_portinfo *port_info;
+ struct mptsas_phyinfo *phy_info = NULL;
+ __le64 sas_address;
+ u8 phy_num;
+ u8 link_rate;
+
+ ioc = fw_event->ioc;
+ link_data = (MpiEventDataSasPhyLinkStatus_t *)fw_event->event_data;
+
+ memcpy(&sas_address, &link_data->SASAddress, sizeof(__le64));
+ sas_address = le64_to_cpu(sas_address);
+ link_rate = link_data->LinkRates >> 4;
+ phy_num = link_data->PhyNum;
+
+ port_info = mptsas_find_portinfo_by_sas_address(ioc, sas_address);
+ if (port_info) {
+ phy_info = &port_info->phy_info[phy_num];
+ if (phy_info)
+ phy_info->negotiated_link_rate = link_rate;
+ }
+
+ if (link_rate == MPI_SAS_IOUNIT0_RATE_1_5 ||
+ link_rate == MPI_SAS_IOUNIT0_RATE_3_0) {
+
+ if (!port_info) {
+ if (ioc->old_sas_discovery_protocal) {
+ port_info = mptsas_expander_add(ioc,
+ le16_to_cpu(link_data->DevHandle));
+ if (port_info)
+ goto out;
+ }
+ goto out;
+ }
+
+ if (port_info == ioc->hba_port_info)
+ mptsas_probe_hba_phys(ioc);
+ else
+ mptsas_expander_refresh(ioc, port_info);
+ } else if (phy_info && phy_info->phy) {
+ if (link_rate == MPI_SAS_IOUNIT0_RATE_PHY_DISABLED)
+ phy_info->phy->negotiated_linkrate =
+ SAS_PHY_DISABLED;
+ else if (link_rate ==
+ MPI_SAS_IOUNIT0_RATE_FAILED_SPEED_NEGOTIATION)
+ phy_info->phy->negotiated_linkrate =
+ SAS_LINK_RATE_FAILED;
+ else
+ phy_info->phy->negotiated_linkrate =
+ SAS_LINK_RATE_UNKNOWN;
+ }
+ out:
+ mptsas_free_fw_event(ioc, fw_event);
+}
+
+static void
+mptsas_not_responding_devices(MPT_ADAPTER *ioc)
+{
+ struct mptsas_portinfo buffer, *port_info;
+ struct mptsas_device_info *sas_info;
+ struct mptsas_devinfo sas_device;
+ u32 handle;
+ VirtTarget *vtarget = NULL;
+ struct mptsas_phyinfo *phy_info;
+ u8 found_expander;
+ int retval, retry_count;
+ unsigned long flags;
+
+ mpt_findImVolumes(ioc);
+
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ if (ioc->ioc_reset_in_progress) {
+ dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: exiting due to a parallel reset \n", ioc->name,
+ __func__));
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+
+ /* devices, logical volumes */
+ mutex_lock(&ioc->sas_device_info_mutex);
+ redo_device_scan:
+ list_for_each_entry(sas_info, &ioc->sas_device_info_list, list) {
+ if (sas_info->is_cached)
continue;
+ if (!sas_info->is_logical_volume) {
+ sas_device.handle = 0;
+ retry_count = 0;
+retry_page:
+ retval = mptsas_sas_device_pg0(ioc, &sas_device,
+ (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID
+ << MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
+ (sas_info->fw.channel << 8) +
+ sas_info->fw.id);
+
+ if (sas_device.handle)
+ continue;
+ if (retval == -EBUSY) {
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ if (ioc->ioc_reset_in_progress) {
+ dfailprintk(ioc,
+ printk(MYIOC_s_DEBUG_FMT
+ "%s: exiting due to reset\n",
+ ioc->name, __func__));
+ spin_unlock_irqrestore
+ (&ioc->taskmgmt_lock, flags);
+ mutex_unlock(&ioc->
+ sas_device_info_mutex);
+ return;
+ }
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock,
+ flags);
+ }
- if (mptsas_sas_expander_pg0(ioc, &buffer,
- (MPI_SAS_EXPAND_PGAD_FORM_HANDLE <<
- MPI_SAS_EXPAND_PGAD_FORM_SHIFT),
- port_info->phy_info[0].handle)) {
+ if (retval && (retval != -ENODEV)) {
+ if (retry_count < 10) {
+ retry_count++;
+ goto retry_page;
+ } else {
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: Config page retry exceeded retry "
+ "count deleting device 0x%llx\n",
+ ioc->name, __func__,
+ sas_info->sas_address));
+ }
+ }
- /*
- * Obtain the port_info instance to the parent port
- */
- parent = mptsas_find_portinfo_by_handle(ioc,
- port_info->phy_info[0].identify.handle_parent);
+ /* delete device */
+ vtarget = mptsas_find_vtarget(ioc,
+ sas_info->fw.channel, sas_info->fw.id);
- if (!parent)
- goto next_port;
+ if (vtarget)
+ vtarget->deleted = 1;
- expander_sas_address =
- port_info->phy_info[0].identify.sas_address;
+ phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
+ sas_info->sas_address);
- /*
- * Delete rphys in the parent that point
- * to this expander. The transport layer will
- * cleanup all the children.
- */
- phy_info = parent->phy_info;
- for (i = 0; i < parent->num_phys; i++, phy_info++) {
- port = mptsas_get_port(phy_info);
- if (!port)
- continue;
- if (phy_info->attached.sas_address !=
- expander_sas_address)
- continue;
- dsaswideprintk(ioc,
- dev_printk(KERN_DEBUG, &port->dev,
- MYIOC_s_FMT "delete port (%d)\n", ioc->name,
- port->port_identifier));
- sas_port_delete(port);
- mptsas_port_delete(ioc, phy_info->port_details);
+ if (phy_info) {
+ mptsas_del_end_device(ioc, phy_info);
+ goto redo_device_scan;
}
- next_port:
+ } else
+ mptsas_volume_delete(ioc, sas_info->fw.id);
+ }
+ mutex_lock(&ioc->sas_device_info_mutex);
- phy_info = port_info->phy_info;
- for (i = 0; i < port_info->num_phys; i++, phy_info++)
- mptsas_port_delete(ioc, phy_info->port_details);
+ /* expanders */
+ mutex_lock(&ioc->sas_topology_mutex);
+ redo_expander_scan:
+ list_for_each_entry(port_info, &ioc->sas_topology, list) {
- list_del(&port_info->list);
- kfree(port_info->phy_info);
- kfree(port_info);
+ if (port_info->phy_info &&
+ (!(port_info->phy_info[0].identify.device_info &
+ MPI_SAS_DEVICE_INFO_SMP_TARGET)))
+ continue;
+ found_expander = 0;
+ handle = 0xFFFF;
+ while (!mptsas_sas_expander_pg0(ioc, &buffer,
+ (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE <<
+ MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle) &&
+ !found_expander) {
+
+ handle = buffer.phy_info[0].handle;
+ if (buffer.phy_info[0].identify.sas_address ==
+ port_info->phy_info[0].identify.sas_address) {
+ found_expander = 1;
+ }
+ kfree(buffer.phy_info);
+ }
+
+ if (!found_expander) {
+ mptsas_expander_delete(ioc, port_info, 0);
+ goto redo_expander_scan;
}
- /*
- * Free this memory allocated from inside
- * mptsas_sas_expander_pg0
- */
- kfree(buffer.phy_info);
}
- mutex_unlock(&ioc->sas_topology_mutex);
+ mutex_lock(&ioc->sas_topology_mutex);
+}
+
+/**
+ * mptsas_probe_expanders - adding expanders
+ * @ioc: Pointer to MPT_ADAPTER structure
+ *
+ **/
+static void
+mptsas_probe_expanders(MPT_ADAPTER *ioc)
+{
+ struct mptsas_portinfo buffer, *port_info;
+ u32 handle;
+ int i;
+
+ handle = 0xFFFF;
+ while (!mptsas_sas_expander_pg0(ioc, &buffer,
+ (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE <<
+ MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle)) {
+
+ handle = buffer.phy_info[0].handle;
+ port_info = mptsas_find_portinfo_by_sas_address(ioc,
+ buffer.phy_info[0].identify.sas_address);
+
+ if (port_info) {
+ /* refreshing handles */
+ for (i = 0; i < buffer.num_phys; i++) {
+ port_info->phy_info[i].handle = handle;
+ port_info->phy_info[i].identify.handle_parent =
+ buffer.phy_info[0].identify.handle_parent;
+ }
+ mptsas_expander_refresh(ioc, port_info);
+ kfree(buffer.phy_info);
+ continue;
+ }
+
+ port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
+ if (!port_info) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s: exit at line=%d\n", ioc->name,
+ __func__, __LINE__));
+ return;
+ }
+ port_info->num_phys = buffer.num_phys;
+ port_info->phy_info = buffer.phy_info;
+ for (i = 0; i < port_info->num_phys; i++)
+ port_info->phy_info[i].portinfo = port_info;
+ mutex_lock(&ioc->sas_topology_mutex);
+ list_add_tail(&port_info->list, &ioc->sas_topology);
+ mutex_unlock(&ioc->sas_topology_mutex);
+ printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, "
+ "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
+ (unsigned long long)buffer.phy_info[0].identify.sas_address);
+ mptsas_expander_refresh(ioc, port_info);
+ }
}
-/*
- * Start of day discovery
- */
+static void
+mptsas_probe_devices(MPT_ADAPTER *ioc)
+{
+ u16 handle;
+ struct mptsas_devinfo sas_device;
+ struct mptsas_phyinfo *phy_info;
+
+ handle = 0xFFFF;
+ while (!(mptsas_sas_device_pg0(ioc, &sas_device,
+ MPI_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
+
+ handle = sas_device.handle;
+
+ if ((sas_device.device_info &
+ (MPI_SAS_DEVICE_INFO_SSP_TARGET |
+ MPI_SAS_DEVICE_INFO_STP_TARGET |
+ MPI_SAS_DEVICE_INFO_SATA_DEVICE)) == 0)
+ continue;
+
+ phy_info = mptsas_refreshing_device_handles(ioc, &sas_device);
+ if (!phy_info)
+ continue;
+
+ if (mptsas_get_rphy(phy_info))
+ continue;
+
+ mptsas_add_end_device(ioc, phy_info);
+ }
+}
+
+/**
+ * mptsas_scan_sas_topology -
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @sas_address:
+ *
+ **/
static void
mptsas_scan_sas_topology(MPT_ADAPTER *ioc)
{
- u32 handle = 0xFFFF;
+ struct scsi_device *sdev;
int i;
- mutex_lock(&ioc->sas_discovery_mutex);
mptsas_probe_hba_phys(ioc);
- while (!mptsas_probe_expander_phys(ioc, &handle))
- ;
+ mptsas_probe_expanders(ioc);
+ mptsas_probe_devices(ioc);
+
/*
Reporting RAID volumes.
*/
- if (!ioc->ir_firmware)
- goto out;
- if (!ioc->raid_data.pIocPg2)
- goto out;
- if (!ioc->raid_data.pIocPg2->NumActiveVolumes)
- goto out;
+ if (!ioc->ir_firmware || !ioc->raid_data.pIocPg2 ||
+ !ioc->raid_data.pIocPg2->NumActiveVolumes)
+ return;
for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
+ sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
+ ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0);
+ if (sdev) {
+ scsi_device_put(sdev);
+ continue;
+ }
+ printk(MYIOC_s_INFO_FMT "attaching raid volume, channel %d, "
+ "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL,
+ ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID);
scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL,
ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0);
}
- out:
- mutex_unlock(&ioc->sas_discovery_mutex);
}
-/*
- * Work queue thread to handle Runtime discovery
- * Mere purpose is the hot add/delete of expanders
- *(Mutex UNLOCKED)
- */
+
static void
-__mptsas_discovery_work(MPT_ADAPTER *ioc)
+mptsas_handle_queue_full_event(struct fw_event_work *fw_event)
{
- u32 handle = 0xFFFF;
+ MPT_ADAPTER *ioc;
+ EventDataQueueFull_t *qfull_data;
+ struct mptsas_device_info *sas_info;
+ struct scsi_device *sdev;
+ int depth;
+ int id = -1;
+ int channel = -1;
+ int fw_id, fw_channel;
+ u16 current_depth;
+
+
+ ioc = fw_event->ioc;
+ qfull_data = (EventDataQueueFull_t *)fw_event->event_data;
+ fw_id = qfull_data->TargetID;
+ fw_channel = qfull_data->Bus;
+ current_depth = le16_to_cpu(qfull_data->CurrentDepth);
+
+ /* if hidden raid component, look for the volume id */
+ mutex_lock(&ioc->sas_device_info_mutex);
+ if (mptscsih_is_phys_disk(ioc, fw_channel, fw_id)) {
+ list_for_each_entry(sas_info, &ioc->sas_device_info_list,
+ list) {
+ if (sas_info->is_cached ||
+ sas_info->is_logical_volume)
+ continue;
+ if (sas_info->is_hidden_raid_component &&
+ (sas_info->fw.channel == fw_channel &&
+ sas_info->fw.id == fw_id)) {
+ id = sas_info->volume_id;
+ channel = MPTSAS_RAID_CHANNEL;
+ goto out;
+ }
+ }
+ } else {
+ list_for_each_entry(sas_info, &ioc->sas_device_info_list,
+ list) {
+ if (sas_info->is_cached ||
+ sas_info->is_hidden_raid_component ||
+ sas_info->is_logical_volume)
+ continue;
+ if (sas_info->fw.channel == fw_channel &&
+ sas_info->fw.id == fw_id) {
+ id = sas_info->os.id;
+ channel = sas_info->os.channel;
+ goto out;
+ }
+ }
- ioc->sas_discovery_runtime=1;
- mptsas_delete_expander_phys(ioc);
- mptsas_probe_hba_phys(ioc);
- while (!mptsas_probe_expander_phys(ioc, &handle))
- ;
- ioc->sas_discovery_runtime=0;
-}
+ }
-/*
- * Work queue thread to handle Runtime discovery
- * Mere purpose is the hot add/delete of expanders
- *(Mutex LOCKED)
- */
-static void
-mptsas_discovery_work(struct work_struct *work)
-{
- struct mptsas_discovery_event *ev =
- container_of(work, struct mptsas_discovery_event, work);
- MPT_ADAPTER *ioc = ev->ioc;
+ out:
+ mutex_unlock(&ioc->sas_device_info_mutex);
+
+ if (id != -1) {
+ shost_for_each_device(sdev, ioc->sh) {
+ if (sdev->id == id && sdev->channel == channel) {
+ if (current_depth > sdev->queue_depth) {
+ sdev_printk(KERN_INFO, sdev,
+ "strange observation, the queue "
+ "depth is (%d) meanwhile fw queue "
+ "depth (%d)\n", sdev->queue_depth,
+ current_depth);
+ continue;
+ }
+ depth = scsi_track_queue_full(sdev,
+ current_depth - 1);
+ if (depth > 0)
+ sdev_printk(KERN_INFO, sdev,
+ "Queue depth reduced to (%d)\n",
+ depth);
+ else if (depth < 0)
+ sdev_printk(KERN_INFO, sdev,
+ "Tagged Command Queueing is being "
+ "disabled\n");
+ else if (depth == 0)
+ sdev_printk(KERN_INFO, sdev,
+ "Queue depth not changed yet\n");
+ }
+ }
+ }
- mutex_lock(&ioc->sas_discovery_mutex);
- __mptsas_discovery_work(ioc);
- mutex_unlock(&ioc->sas_discovery_mutex);
- kfree(ev);
+ mptsas_free_fw_event(ioc, fw_event);
}
+
static struct mptsas_phyinfo *
mptsas_find_phyinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
{
@@ -2429,69 +3786,80 @@ mptsas_find_phyinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
return phy_info;
}
+/**
+ * mptsas_find_phyinfo_by_phys_disk_num -
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @phys_disk_num:
+ * @channel:
+ * @id:
+ *
+ **/
static struct mptsas_phyinfo *
-mptsas_find_phyinfo_by_target(MPT_ADAPTER *ioc, u8 channel, u8 id)
+mptsas_find_phyinfo_by_phys_disk_num(MPT_ADAPTER *ioc, u8 phys_disk_num,
+ u8 channel, u8 id)
{
- struct mptsas_portinfo *port_info;
struct mptsas_phyinfo *phy_info = NULL;
+ struct mptsas_portinfo *port_info;
+ RaidPhysDiskPage1_t *phys_disk = NULL;
+ int num_paths;
+ u64 sas_address = 0;
int i;
- mutex_lock(&ioc->sas_topology_mutex);
- list_for_each_entry(port_info, &ioc->sas_topology, list) {
- for (i = 0; i < port_info->num_phys; i++) {
- if (!mptsas_is_end_device(
- &port_info->phy_info[i].attached))
- continue;
- if (port_info->phy_info[i].attached.id != id)
- continue;
- if (port_info->phy_info[i].attached.channel != channel)
- continue;
- phy_info = &port_info->phy_info[i];
- break;
+ phy_info = NULL;
+ if (!ioc->raid_data.pIocPg3)
+ return NULL;
+ /* dual port support */
+ num_paths = mpt_raid_phys_disk_get_num_paths(ioc, phys_disk_num);
+ if (!num_paths)
+ goto out;
+ phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) +
+ (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL);
+ if (!phys_disk)
+ goto out;
+ mpt_raid_phys_disk_pg1(ioc, phys_disk_num, phys_disk);
+ for (i = 0; i < num_paths; i++) {
+ if ((phys_disk->Path[i].Flags & 1) != 0)
+ /* entry no longer valid */
+ continue;
+ if ((id == phys_disk->Path[i].PhysDiskID) &&
+ (channel == phys_disk->Path[i].PhysDiskBus)) {
+ memcpy(&sas_address, &phys_disk->Path[i].WWID,
+ sizeof(u64));
+ phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
+ sas_address);
+ goto out;
}
}
- mutex_unlock(&ioc->sas_topology_mutex);
- return phy_info;
-}
-static struct mptsas_phyinfo *
-mptsas_find_phyinfo_by_phys_disk_num(MPT_ADAPTER *ioc, u8 channel, u8 id)
-{
- struct mptsas_portinfo *port_info;
- struct mptsas_phyinfo *phy_info = NULL;
- int i;
+ out:
+ kfree(phys_disk);
+ if (phy_info)
+ return phy_info;
+ /*
+ * Extra code to handle RAID0 case, where the sas_address is not updated
+ * in phys_disk_page_1 when hotswapped
+ */
mutex_lock(&ioc->sas_topology_mutex);
list_for_each_entry(port_info, &ioc->sas_topology, list) {
- for (i = 0; i < port_info->num_phys; i++) {
+ for (i = 0; i < port_info->num_phys && !phy_info; i++) {
if (!mptsas_is_end_device(
&port_info->phy_info[i].attached))
continue;
if (port_info->phy_info[i].attached.phys_disk_num == ~0)
continue;
- if (port_info->phy_info[i].attached.phys_disk_num != id)
- continue;
- if (port_info->phy_info[i].attached.channel != channel)
- continue;
- phy_info = &port_info->phy_info[i];
- break;
+ if ((port_info->phy_info[i].attached.phys_disk_num ==
+ phys_disk_num) &&
+ (port_info->phy_info[i].attached.id == id) &&
+ (port_info->phy_info[i].attached.channel ==
+ channel))
+ phy_info = &port_info->phy_info[i];
}
}
mutex_unlock(&ioc->sas_topology_mutex);
return phy_info;
}
-/*
- * Work queue thread to clear the persitency table
- */
-static void
-mptsas_persist_clear_table(struct work_struct *work)
-{
- MPT_ADAPTER *ioc = container_of(work, MPT_ADAPTER, sas_persist_task);
-
- mptbase_sas_persist_operation(ioc, MPI_SAS_OP_CLEAR_NOT_PRESENT);
-}
-
static void
mptsas_reprobe_lun(struct scsi_device *sdev, void *data)
{
@@ -2517,7 +3885,8 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id)
pRaidVolumePage0_t buffer = NULL;
RaidPhysDiskPage0_t phys_disk;
int i;
- struct mptsas_hotplug_event *ev;
+ struct mptsas_phyinfo *phy_info;
+ struct mptsas_devinfo sas_device;
memset(&cfg, 0 , sizeof(CONFIGPARMS));
memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
@@ -2557,20 +3926,16 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id)
buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0)
continue;
- ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
- if (!ev) {
- printk(MYIOC_s_WARN_FMT "mptsas: lost hotplug event\n", ioc->name);
- goto out;
- }
+ if (mptsas_sas_device_pg0(ioc, &sas_device,
+ (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
+ MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
+ (phys_disk.PhysDiskBus << 8) +
+ phys_disk.PhysDiskID))
+ continue;
- INIT_WORK(&ev->work, mptsas_hotplug_work);
- ev->ioc = ioc;
- ev->id = phys_disk.PhysDiskID;
- ev->channel = phys_disk.PhysDiskBus;
- ev->phys_disk_num_valid = 1;
- ev->phys_disk_num = phys_disk.PhysDiskNum;
- ev->event_type = MPTSAS_ADD_DEVICE;
- schedule_work(&ev->work);
+ phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
+ sas_device.sas_address);
+ mptsas_add_end_device(ioc, phy_info);
}
out:
@@ -2582,417 +3947,386 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id)
* Work queue thread to handle SAS hotplug events
*/
static void
-mptsas_hotplug_work(struct work_struct *work)
+mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
+ struct mptsas_hotplug_event *hot_plug_info)
{
- struct mptsas_hotplug_event *ev =
- container_of(work, struct mptsas_hotplug_event, work);
-
- MPT_ADAPTER *ioc = ev->ioc;
struct mptsas_phyinfo *phy_info;
- struct sas_rphy *rphy;
- struct sas_port *port;
- struct scsi_device *sdev;
struct scsi_target * starget;
- struct sas_identify identify;
- char *ds = NULL;
struct mptsas_devinfo sas_device;
VirtTarget *vtarget;
- VirtDevice *vdevice;
+ int i;
- mutex_lock(&ioc->sas_discovery_mutex);
- switch (ev->event_type) {
- case MPTSAS_DEL_DEVICE:
+ switch (hot_plug_info->event_type) {
- phy_info = NULL;
- if (ev->phys_disk_num_valid) {
- if (ev->hidden_raid_component){
- if (mptsas_sas_device_pg0(ioc, &sas_device,
- (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
- MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
- (ev->channel << 8) + ev->id)) {
- dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
- "%s: exit at line=%d\n", ioc->name,
- __func__, __LINE__));
- break;
- }
- phy_info = mptsas_find_phyinfo_by_sas_address(
- ioc, sas_device.sas_address);
- }else
- phy_info = mptsas_find_phyinfo_by_phys_disk_num(
- ioc, ev->channel, ev->phys_disk_num);
+ case MPTSAS_ADD_PHYSDISK:
+
+ if (!ioc->raid_data.pIocPg2)
+ break;
+
+ for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
+ if (ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID ==
+ hot_plug_info->id) {
+ printk(MYIOC_s_WARN_FMT "firmware bug: unable "
+ "to add hidden disk - target_id matchs "
+ "volume_id\n", ioc->name);
+ mptsas_free_fw_event(ioc, fw_event);
+ return;
+ }
}
+ mpt_findImVolumes(ioc);
+ case MPTSAS_ADD_DEVICE:
+ memset(&sas_device, 0, sizeof(struct mptsas_devinfo));
+ mptsas_sas_device_pg0(ioc, &sas_device,
+ (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
+ MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
+ (hot_plug_info->channel << 8) +
+ hot_plug_info->id);
+
+ if (!sas_device.handle)
+ return;
+
+ phy_info = mptsas_refreshing_device_handles(ioc, &sas_device);
if (!phy_info)
- phy_info = mptsas_find_phyinfo_by_target(ioc,
- ev->channel, ev->id);
+ break;
- /*
- * Sanity checks, for non-existing phys and remote rphys.
- */
- if (!phy_info){
+ if (mptsas_get_rphy(phy_info))
+ break;
+
+ mptsas_add_end_device(ioc, phy_info);
+ break;
+
+ case MPTSAS_DEL_DEVICE:
+ phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
+ hot_plug_info->sas_address);
+ mptsas_del_end_device(ioc, phy_info);
+ break;
+
+ case MPTSAS_DEL_PHYSDISK:
+
+ mpt_findImVolumes(ioc);
+
+ phy_info = mptsas_find_phyinfo_by_phys_disk_num(
+ ioc, hot_plug_info->phys_disk_num,
+ hot_plug_info->channel,
+ hot_plug_info->id);
+ mptsas_del_end_device(ioc, phy_info);
+ break;
+
+ case MPTSAS_ADD_PHYSDISK_REPROBE:
+
+ if (mptsas_sas_device_pg0(ioc, &sas_device,
+ (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
+ MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
+ (hot_plug_info->channel << 8) + hot_plug_info->id)) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
- "%s: exit at line=%d\n", ioc->name,
- __func__, __LINE__));
+ "%s: fw_id=%d exit at line=%d\n", ioc->name,
+ __func__, hot_plug_info->id, __LINE__));
break;
}
- if (!phy_info->port_details) {
+
+ phy_info = mptsas_find_phyinfo_by_sas_address(
+ ioc, sas_device.sas_address);
+
+ if (!phy_info) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
- "%s: exit at line=%d\n", ioc->name,
- __func__, __LINE__));
+ "%s: fw_id=%d exit at line=%d\n", ioc->name,
+ __func__, hot_plug_info->id, __LINE__));
break;
}
- rphy = mptsas_get_rphy(phy_info);
- if (!rphy) {
+
+ starget = mptsas_get_starget(phy_info);
+ if (!starget) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
- "%s: exit at line=%d\n", ioc->name,
- __func__, __LINE__));
+ "%s: fw_id=%d exit at line=%d\n", ioc->name,
+ __func__, hot_plug_info->id, __LINE__));
break;
}
- port = mptsas_get_port(phy_info);
- if (!port) {
+ vtarget = starget->hostdata;
+ if (!vtarget) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
- "%s: exit at line=%d\n", ioc->name,
- __func__, __LINE__));
+ "%s: fw_id=%d exit at line=%d\n", ioc->name,
+ __func__, hot_plug_info->id, __LINE__));
break;
}
- starget = mptsas_get_starget(phy_info);
- if (starget) {
- vtarget = starget->hostdata;
+ mpt_findImVolumes(ioc);
- if (!vtarget) {
- dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
- "%s: exit at line=%d\n", ioc->name,
- __func__, __LINE__));
- break;
- }
+ starget_printk(KERN_INFO, starget, MYIOC_s_FMT "RAID Hidding: "
+ "fw_channel=%d, fw_id=%d, physdsk %d, sas_addr 0x%llx\n",
+ ioc->name, hot_plug_info->channel, hot_plug_info->id,
+ hot_plug_info->phys_disk_num, (unsigned long long)
+ sas_device.sas_address);
- /*
- * Handling RAID components
- */
- if (ev->phys_disk_num_valid &&
- ev->hidden_raid_component) {
- printk(MYIOC_s_INFO_FMT
- "RAID Hidding: channel=%d, id=%d, "
- "physdsk %d \n", ioc->name, ev->channel,
- ev->id, ev->phys_disk_num);
- vtarget->id = ev->phys_disk_num;
- vtarget->tflags |=
- MPT_TARGET_FLAGS_RAID_COMPONENT;
- mptsas_reprobe_target(starget, 1);
- phy_info->attached.phys_disk_num =
- ev->phys_disk_num;
- break;
- }
- }
-
- if (phy_info->attached.device_info &
- MPI_SAS_DEVICE_INFO_SSP_TARGET)
- ds = "ssp";
- if (phy_info->attached.device_info &
- MPI_SAS_DEVICE_INFO_STP_TARGET)
- ds = "stp";
- if (phy_info->attached.device_info &
- MPI_SAS_DEVICE_INFO_SATA_DEVICE)
- ds = "sata";
-
- printk(MYIOC_s_INFO_FMT
- "removing %s device, channel %d, id %d, phy %d\n",
- ioc->name, ds, ev->channel, ev->id, phy_info->phy_id);
- dev_printk(KERN_DEBUG, &port->dev, MYIOC_s_FMT
- "delete port (%d)\n", ioc->name, port->port_identifier);
- sas_port_delete(port);
- mptsas_port_delete(ioc, phy_info->port_details);
+ vtarget->id = hot_plug_info->phys_disk_num;
+ vtarget->tflags |= MPT_TARGET_FLAGS_RAID_COMPONENT;
+ phy_info->attached.phys_disk_num = hot_plug_info->phys_disk_num;
+ mptsas_reprobe_target(starget, 1);
break;
- case MPTSAS_ADD_DEVICE:
- if (ev->phys_disk_num_valid)
- mpt_findImVolumes(ioc);
+ case MPTSAS_DEL_PHYSDISK_REPROBE:
- /*
- * Refresh sas device pg0 data
- */
if (mptsas_sas_device_pg0(ioc, &sas_device,
(MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
- (ev->channel << 8) + ev->id)) {
+ (hot_plug_info->channel << 8) + hot_plug_info->id)) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
- "%s: exit at line=%d\n", ioc->name,
- __func__, __LINE__));
+ "%s: fw_id=%d exit at line=%d\n",
+ ioc->name, __func__,
+ hot_plug_info->id, __LINE__));
break;
}
- __mptsas_discovery_work(ioc);
-
phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
sas_device.sas_address);
-
- if (!phy_info || !phy_info->port_details) {
+ if (!phy_info) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
- "%s: exit at line=%d\n", ioc->name,
- __func__, __LINE__));
+ "%s: fw_id=%d exit at line=%d\n", ioc->name,
+ __func__, hot_plug_info->id, __LINE__));
break;
}
starget = mptsas_get_starget(phy_info);
- if (starget && (!ev->hidden_raid_component)){
-
- vtarget = starget->hostdata;
-
- if (!vtarget) {
- dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
- "%s: exit at line=%d\n", ioc->name,
- __func__, __LINE__));
- break;
- }
- /*
- * Handling RAID components
- */
- if (vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
- printk(MYIOC_s_INFO_FMT
- "RAID Exposing: channel=%d, id=%d, "
- "physdsk %d \n", ioc->name, ev->channel,
- ev->id, ev->phys_disk_num);
- vtarget->tflags &=
- ~MPT_TARGET_FLAGS_RAID_COMPONENT;
- vtarget->id = ev->id;
- mptsas_reprobe_target(starget, 0);
- phy_info->attached.phys_disk_num = ~0;
- }
+ if (!starget) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s: fw_id=%d exit at line=%d\n", ioc->name,
+ __func__, hot_plug_info->id, __LINE__));
break;
}
- if (mptsas_get_rphy(phy_info)) {
+ vtarget = starget->hostdata;
+ if (!vtarget) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
- "%s: exit at line=%d\n", ioc->name,
- __func__, __LINE__));
- if (ev->channel) printk("%d\n", __LINE__);
+ "%s: fw_id=%d exit at line=%d\n", ioc->name,
+ __func__, hot_plug_info->id, __LINE__));
break;
}
- port = mptsas_get_port(phy_info);
- if (!port) {
+ if (!(vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
- "%s: exit at line=%d\n", ioc->name,
- __func__, __LINE__));
+ "%s: fw_id=%d exit at line=%d\n", ioc->name,
+ __func__, hot_plug_info->id, __LINE__));
break;
}
- memcpy(&phy_info->attached, &sas_device,
- sizeof(struct mptsas_devinfo));
-
- if (phy_info->attached.device_info &
- MPI_SAS_DEVICE_INFO_SSP_TARGET)
- ds = "ssp";
- if (phy_info->attached.device_info &
- MPI_SAS_DEVICE_INFO_STP_TARGET)
- ds = "stp";
- if (phy_info->attached.device_info &
- MPI_SAS_DEVICE_INFO_SATA_DEVICE)
- ds = "sata";
-
- printk(MYIOC_s_INFO_FMT
- "attaching %s device, channel %d, id %d, phy %d\n",
- ioc->name, ds, ev->channel, ev->id, ev->phy_id);
- mptsas_parse_device_info(&identify, &phy_info->attached);
- rphy = sas_end_device_alloc(port);
- if (!rphy) {
- dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
- "%s: exit at line=%d\n", ioc->name,
- __func__, __LINE__));
- break; /* non-fatal: an rphy can be added later */
- }
+ mpt_findImVolumes(ioc);
- rphy->identify = identify;
- if (sas_rphy_add(rphy)) {
- dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
- "%s: exit at line=%d\n", ioc->name,
- __func__, __LINE__));
- sas_rphy_free(rphy);
- break;
- }
- mptsas_set_rphy(ioc, phy_info, rphy);
+ starget_printk(KERN_INFO, starget, MYIOC_s_FMT "RAID Exposing:"
+ " fw_channel=%d, fw_id=%d, physdsk %d, sas_addr 0x%llx\n",
+ ioc->name, hot_plug_info->channel, hot_plug_info->id,
+ hot_plug_info->phys_disk_num, (unsigned long long)
+ sas_device.sas_address);
+
+ vtarget->tflags &= ~MPT_TARGET_FLAGS_RAID_COMPONENT;
+ vtarget->id = hot_plug_info->id;
+ phy_info->attached.phys_disk_num = ~0;
+ mptsas_reprobe_target(starget, 0);
+ mptsas_add_device_component_by_fw(ioc,
+ hot_plug_info->channel, hot_plug_info->id);
break;
+
case MPTSAS_ADD_RAID:
- sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
- ev->id, 0);
- if (sdev) {
- scsi_device_put(sdev);
- break;
- }
- printk(MYIOC_s_INFO_FMT
- "attaching raid volume, channel %d, id %d\n",
- ioc->name, MPTSAS_RAID_CHANNEL, ev->id);
- scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL, ev->id, 0);
+
mpt_findImVolumes(ioc);
+ printk(MYIOC_s_INFO_FMT "attaching raid volume, channel %d, "
+ "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL,
+ hot_plug_info->id);
+ scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL,
+ hot_plug_info->id, 0);
break;
+
case MPTSAS_DEL_RAID:
- sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
- ev->id, 0);
- if (!sdev)
- break;
- printk(MYIOC_s_INFO_FMT
- "removing raid volume, channel %d, id %d\n",
- ioc->name, MPTSAS_RAID_CHANNEL, ev->id);
- vdevice = sdev->hostdata;
- scsi_remove_device(sdev);
- scsi_device_put(sdev);
+
mpt_findImVolumes(ioc);
+ printk(MYIOC_s_INFO_FMT "removing raid volume, channel %d, "
+ "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL,
+ hot_plug_info->id);
+ scsi_remove_device(hot_plug_info->sdev);
+ scsi_device_put(hot_plug_info->sdev);
break;
+
case MPTSAS_ADD_INACTIVE_VOLUME:
+
+ mpt_findImVolumes(ioc);
mptsas_adding_inactive_raid_components(ioc,
- ev->channel, ev->id);
+ hot_plug_info->channel, hot_plug_info->id);
break;
- case MPTSAS_IGNORE_EVENT:
+
default:
break;
}
- mutex_unlock(&ioc->sas_discovery_mutex);
- kfree(ev);
+ mptsas_free_fw_event(ioc, fw_event);
}
static void
-mptsas_send_sas_event(MPT_ADAPTER *ioc,
- EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data)
+mptsas_send_sas_event(struct fw_event_work *fw_event)
{
- struct mptsas_hotplug_event *ev;
- u32 device_info = le32_to_cpu(sas_event_data->DeviceInfo);
- __le64 sas_address;
+ MPT_ADAPTER *ioc;
+ struct mptsas_hotplug_event hot_plug_info;
+ EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data;
+ u32 device_info;
+ u64 sas_address;
+
+ ioc = fw_event->ioc;
+ sas_event_data = (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)
+ fw_event->event_data;
+ device_info = le32_to_cpu(sas_event_data->DeviceInfo);
if ((device_info &
- (MPI_SAS_DEVICE_INFO_SSP_TARGET |
- MPI_SAS_DEVICE_INFO_STP_TARGET |
- MPI_SAS_DEVICE_INFO_SATA_DEVICE )) == 0)
+ (MPI_SAS_DEVICE_INFO_SSP_TARGET |
+ MPI_SAS_DEVICE_INFO_STP_TARGET |
+ MPI_SAS_DEVICE_INFO_SATA_DEVICE)) == 0) {
+ mptsas_free_fw_event(ioc, fw_event);
+ return;
+ }
+
+ if (sas_event_data->ReasonCode ==
+ MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED) {
+ mptbase_sas_persist_operation(ioc,
+ MPI_SAS_OP_CLEAR_NOT_PRESENT);
+ mptsas_free_fw_event(ioc, fw_event);
return;
+ }
switch (sas_event_data->ReasonCode) {
case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
-
- mptsas_target_reset_queue(ioc, sas_event_data);
- break;
-
case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
- ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
- if (!ev) {
- printk(MYIOC_s_WARN_FMT "lost hotplug event\n", ioc->name);
- break;
- }
-
- INIT_WORK(&ev->work, mptsas_hotplug_work);
- ev->ioc = ioc;
- ev->handle = le16_to_cpu(sas_event_data->DevHandle);
- ev->parent_handle =
- le16_to_cpu(sas_event_data->ParentDevHandle);
- ev->channel = sas_event_data->Bus;
- ev->id = sas_event_data->TargetID;
- ev->phy_id = sas_event_data->PhyNum;
+ memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event));
+ hot_plug_info.handle = le16_to_cpu(sas_event_data->DevHandle);
+ hot_plug_info.channel = sas_event_data->Bus;
+ hot_plug_info.id = sas_event_data->TargetID;
+ hot_plug_info.phy_id = sas_event_data->PhyNum;
memcpy(&sas_address, &sas_event_data->SASAddress,
- sizeof(__le64));
- ev->sas_address = le64_to_cpu(sas_address);
- ev->device_info = device_info;
-
+ sizeof(u64));
+ hot_plug_info.sas_address = le64_to_cpu(sas_address);
+ hot_plug_info.device_info = device_info;
if (sas_event_data->ReasonCode &
MPI_EVENT_SAS_DEV_STAT_RC_ADDED)
- ev->event_type = MPTSAS_ADD_DEVICE;
+ hot_plug_info.event_type = MPTSAS_ADD_DEVICE;
else
- ev->event_type = MPTSAS_DEL_DEVICE;
- schedule_work(&ev->work);
+ hot_plug_info.event_type = MPTSAS_DEL_DEVICE;
+ mptsas_hotplug_work(ioc, fw_event, &hot_plug_info);
break;
+
case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED:
- /*
- * Persistent table is full.
- */
- INIT_WORK(&ioc->sas_persist_task,
- mptsas_persist_clear_table);
- schedule_work(&ioc->sas_persist_task);
+ mptbase_sas_persist_operation(ioc,
+ MPI_SAS_OP_CLEAR_NOT_PRESENT);
+ mptsas_free_fw_event(ioc, fw_event);
break;
- /*
- * TODO, handle other events
- */
+
case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
- case MPI_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
+ /* TODO */
case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
- case MPI_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
- case MPI_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
- case MPI_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
- case MPI_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
+ /* TODO */
default:
+ mptsas_free_fw_event(ioc, fw_event);
break;
}
}
+
static void
-mptsas_send_raid_event(MPT_ADAPTER *ioc,
- EVENT_DATA_RAID *raid_event_data)
+mptsas_send_raid_event(struct fw_event_work *fw_event)
{
- struct mptsas_hotplug_event *ev;
- int status = le32_to_cpu(raid_event_data->SettingsStatus);
- int state = (status >> 8) & 0xff;
-
- if (ioc->bus_type != SAS)
- return;
-
- ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
- if (!ev) {
- printk(MYIOC_s_WARN_FMT "lost hotplug event\n", ioc->name);
- return;
+ MPT_ADAPTER *ioc;
+ EVENT_DATA_RAID *raid_event_data;
+ struct mptsas_hotplug_event hot_plug_info;
+ int status;
+ int state;
+ struct scsi_device *sdev = NULL;
+ VirtDevice *vdevice = NULL;
+ RaidPhysDiskPage0_t phys_disk;
+
+ ioc = fw_event->ioc;
+ raid_event_data = (EVENT_DATA_RAID *)fw_event->event_data;
+ status = le32_to_cpu(raid_event_data->SettingsStatus);
+ state = (status >> 8) & 0xff;
+
+ memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event));
+ hot_plug_info.id = raid_event_data->VolumeID;
+ hot_plug_info.channel = raid_event_data->VolumeBus;
+ hot_plug_info.phys_disk_num = raid_event_data->PhysDiskNum;
+
+ if (raid_event_data->ReasonCode == MPI_EVENT_RAID_RC_VOLUME_DELETED ||
+ raid_event_data->ReasonCode == MPI_EVENT_RAID_RC_VOLUME_CREATED ||
+ raid_event_data->ReasonCode ==
+ MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED) {
+ sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
+ hot_plug_info.id, 0);
+ hot_plug_info.sdev = sdev;
+ if (sdev)
+ vdevice = sdev->hostdata;
}
- INIT_WORK(&ev->work, mptsas_hotplug_work);
- ev->ioc = ioc;
- ev->id = raid_event_data->VolumeID;
- ev->channel = raid_event_data->VolumeBus;
- ev->event_type = MPTSAS_IGNORE_EVENT;
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Entering %s: "
+ "ReasonCode=%02x\n", ioc->name, __func__,
+ raid_event_data->ReasonCode));
switch (raid_event_data->ReasonCode) {
case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
- ev->phys_disk_num_valid = 1;
- ev->phys_disk_num = raid_event_data->PhysDiskNum;
- ev->event_type = MPTSAS_ADD_DEVICE;
+ hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK_REPROBE;
break;
case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
- ev->phys_disk_num_valid = 1;
- ev->phys_disk_num = raid_event_data->PhysDiskNum;
- ev->hidden_raid_component = 1;
- ev->event_type = MPTSAS_DEL_DEVICE;
+ hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK_REPROBE;
break;
case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
switch (state) {
case MPI_PD_STATE_ONLINE:
case MPI_PD_STATE_NOT_COMPATIBLE:
- ev->phys_disk_num_valid = 1;
- ev->phys_disk_num = raid_event_data->PhysDiskNum;
- ev->hidden_raid_component = 1;
- ev->event_type = MPTSAS_ADD_DEVICE;
+ mpt_raid_phys_disk_pg0(ioc,
+ raid_event_data->PhysDiskNum, &phys_disk);
+ hot_plug_info.id = phys_disk.PhysDiskID;
+ hot_plug_info.channel = phys_disk.PhysDiskBus;
+ hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK;
break;
+ case MPI_PD_STATE_FAILED:
case MPI_PD_STATE_MISSING:
case MPI_PD_STATE_OFFLINE_AT_HOST_REQUEST:
case MPI_PD_STATE_FAILED_AT_HOST_REQUEST:
case MPI_PD_STATE_OFFLINE_FOR_ANOTHER_REASON:
- ev->phys_disk_num_valid = 1;
- ev->phys_disk_num = raid_event_data->PhysDiskNum;
- ev->event_type = MPTSAS_DEL_DEVICE;
+ hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK;
break;
default:
break;
}
break;
case MPI_EVENT_RAID_RC_VOLUME_DELETED:
- ev->event_type = MPTSAS_DEL_RAID;
+ if (!sdev)
+ break;
+ vdevice->vtarget->deleted = 1; /* block IO */
+ hot_plug_info.event_type = MPTSAS_DEL_RAID;
break;
case MPI_EVENT_RAID_RC_VOLUME_CREATED:
- ev->event_type = MPTSAS_ADD_RAID;
+ if (sdev) {
+ scsi_device_put(sdev);
+ break;
+ }
+ hot_plug_info.event_type = MPTSAS_ADD_RAID;
break;
case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
+ if (!(status & MPI_RAIDVOL0_STATUS_FLAG_ENABLED)) {
+ if (!sdev)
+ break;
+ vdevice->vtarget->deleted = 1; /* block IO */
+ hot_plug_info.event_type = MPTSAS_DEL_RAID;
+ break;
+ }
switch (state) {
case MPI_RAIDVOL0_STATUS_STATE_FAILED:
case MPI_RAIDVOL0_STATUS_STATE_MISSING:
- ev->event_type = MPTSAS_DEL_RAID;
+ if (!sdev)
+ break;
+ vdevice->vtarget->deleted = 1; /* block IO */
+ hot_plug_info.event_type = MPTSAS_DEL_RAID;
break;
case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
- ev->event_type = MPTSAS_ADD_RAID;
+ if (sdev) {
+ scsi_device_put(sdev);
+ break;
+ }
+ hot_plug_info.event_type = MPTSAS_ADD_RAID;
break;
default:
break;
@@ -3001,32 +4335,188 @@ mptsas_send_raid_event(MPT_ADAPTER *ioc,
default:
break;
}
- schedule_work(&ev->work);
+
+ if (hot_plug_info.event_type != MPTSAS_IGNORE_EVENT)
+ mptsas_hotplug_work(ioc, fw_event, &hot_plug_info);
+ else
+ mptsas_free_fw_event(ioc, fw_event);
}
-static void
-mptsas_send_discovery_event(MPT_ADAPTER *ioc,
- EVENT_DATA_SAS_DISCOVERY *discovery_data)
+/**
+ * mptsas_issue_tm - send mptsas internal tm request
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @type: Task Management type
+ * @channel: channel number for task management
+ * @id: Logical Target ID for reset (if appropriate)
+ * @lun: Logical unit for reset (if appropriate)
+ * @task_context: Context for the task to be aborted
+ * @timeout: timeout for task management control
+ *
+ * return 0 on success and -1 on failure:
+ *
+ */
+static int
+mptsas_issue_tm(MPT_ADAPTER *ioc, u8 type, u8 channel, u8 id, u64 lun,
+ int task_context, ulong timeout, u8 *issue_reset)
{
- struct mptsas_discovery_event *ev;
+ MPT_FRAME_HDR *mf;
+ SCSITaskMgmt_t *pScsiTm;
+ int retval;
+ unsigned long timeleft;
+
+ *issue_reset = 0;
+ mf = mpt_get_msg_frame(mptsasDeviceResetCtx, ioc);
+ if (mf == NULL) {
+ retval = -1; /* return failure */
+ dtmprintk(ioc, printk(MYIOC_s_WARN_FMT "TaskMgmt request: no "
+ "msg frames!!\n", ioc->name));
+ goto out;
+ }
- /*
- * DiscoveryStatus
- *
- * This flag will be non-zero when firmware
- * kicks off discovery, and return to zero
- * once its completed.
- */
- if (discovery_data->DiscoveryStatus)
- return;
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request: mr = %p, "
+ "task_type = 0x%02X,\n\t timeout = %ld, fw_channel = %d, "
+ "fw_id = %d, lun = %lld,\n\t task_context = 0x%x\n", ioc->name, mf,
+ type, timeout, channel, id, (unsigned long long)lun,
+ task_context));
+
+ pScsiTm = (SCSITaskMgmt_t *) mf;
+ memset(pScsiTm, 0, sizeof(SCSITaskMgmt_t));
+ pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
+ pScsiTm->TaskType = type;
+ pScsiTm->MsgFlags = 0;
+ pScsiTm->TargetID = id;
+ pScsiTm->Bus = channel;
+ pScsiTm->ChainOffset = 0;
+ pScsiTm->Reserved = 0;
+ pScsiTm->Reserved1 = 0;
+ pScsiTm->TaskMsgContext = task_context;
+ int_to_scsilun(lun, (struct scsi_lun *)pScsiTm->LUN);
+
+ INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
+ CLEAR_MGMT_STATUS(ioc->internal_cmds.status)
+ retval = 0;
+ mpt_put_msg_frame_hi_pri(mptsasDeviceResetCtx, ioc, mf);
+
+ /* Now wait for the command to complete */
+ timeleft = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done,
+ timeout*HZ);
+ if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+ retval = -1; /* return failure */
+ dtmprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "TaskMgmt request: TIMED OUT!(mr=%p)\n", ioc->name, mf));
+ mpt_free_msg_frame(ioc, mf);
+ if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
+ goto out;
+ *issue_reset = 1;
+ goto out;
+ }
- ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
- if (!ev)
+ if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
+ retval = -1; /* return failure */
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "TaskMgmt request: failed with no reply\n", ioc->name));
+ goto out;
+ }
+
+ out:
+ CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
+ return retval;
+}
+
+/**
+ * mptsas_broadcast_primative_work - Handle broadcast primitives
+ * @work: work queue payload containing info describing the event
+ *
+ * this will be handled in workqueue context.
+ */
+static void
+mptsas_broadcast_primative_work(struct fw_event_work *fw_event)
+{
+ MPT_ADAPTER *ioc = fw_event->ioc;
+ MPT_FRAME_HDR *mf;
+ VirtDevice *vdevice;
+ int ii;
+ struct scsi_cmnd *sc;
+ SCSITaskMgmtReply_t *pScsiTmReply;
+ u8 issue_reset;
+ int task_context;
+ u8 channel, id;
+ int lun;
+ u32 termination_count;
+ u32 query_count;
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s - enter\n", ioc->name, __func__));
+
+ mutex_lock(&ioc->taskmgmt_cmds.mutex);
+ if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
+ mutex_unlock(&ioc->taskmgmt_cmds.mutex);
+ mptsas_requeue_fw_event(ioc, fw_event, 1000);
return;
- INIT_WORK(&ev->work, mptsas_discovery_work);
- ev->ioc = ioc;
- schedule_work(&ev->work);
-};
+ }
+
+ issue_reset = 0;
+ termination_count = 0;
+ query_count = 0;
+ mpt_findImVolumes(ioc);
+ pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply;
+
+ for (ii = 0; ii < ioc->req_depth; ii++) {
+ if (ioc->fw_events_off)
+ goto out;
+ sc = mptscsih_get_scsi_lookup(ioc, ii);
+ if (!sc)
+ continue;
+ mf = MPT_INDEX_2_MFPTR(ioc, ii);
+ if (!mf)
+ continue;
+ task_context = mf->u.frame.hwhdr.msgctxu.MsgContext;
+ vdevice = sc->device->hostdata;
+ if (!vdevice || !vdevice->vtarget)
+ continue;
+ if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)
+ continue; /* skip hidden raid components */
+ if (vdevice->vtarget->raidVolume)
+ continue; /* skip hidden raid components */
+ channel = vdevice->vtarget->channel;
+ id = vdevice->vtarget->id;
+ lun = vdevice->lun;
+ if (mptsas_issue_tm(ioc, MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK,
+ channel, id, (u64)lun, task_context, 30, &issue_reset))
+ goto out;
+ query_count++;
+ termination_count +=
+ le32_to_cpu(pScsiTmReply->TerminationCount);
+ if ((pScsiTmReply->IOCStatus == MPI_IOCSTATUS_SUCCESS) &&
+ (pScsiTmReply->ResponseCode ==
+ MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
+ pScsiTmReply->ResponseCode ==
+ MPI_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC))
+ continue;
+ if (mptsas_issue_tm(ioc,
+ MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET,
+ channel, id, (u64)lun, 0, 30, &issue_reset))
+ goto out;
+ termination_count +=
+ le32_to_cpu(pScsiTmReply->TerminationCount);
+ }
+
+ out:
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s - exit, query_count = %d termination_count = %d\n",
+ ioc->name, __func__, query_count, termination_count));
+
+ ioc->broadcast_aen_busy = 0;
+ mpt_clear_taskmgmt_in_progress_flag(ioc);
+ mutex_unlock(&ioc->taskmgmt_cmds.mutex);
+
+ if (issue_reset) {
+ printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
+ ioc->name, __func__);
+ mpt_HardResetHandler(ioc, CAN_SLEEP);
+ }
+ mptsas_free_fw_event(ioc, fw_event);
+}
/*
* mptsas_send_ir2_event - handle exposing hidden disk when
@@ -3037,76 +4527,159 @@ mptsas_send_discovery_event(MPT_ADAPTER *ioc,
*
*/
static void
-mptsas_send_ir2_event(MPT_ADAPTER *ioc, PTR_MPI_EVENT_DATA_IR2 ir2_data)
+mptsas_send_ir2_event(struct fw_event_work *fw_event)
{
- struct mptsas_hotplug_event *ev;
-
- if (ir2_data->ReasonCode !=
- MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED)
- return;
-
- ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
- if (!ev)
+ MPT_ADAPTER *ioc;
+ struct mptsas_hotplug_event hot_plug_info;
+ MPI_EVENT_DATA_IR2 *ir2_data;
+ u8 reasonCode;
+ RaidPhysDiskPage0_t phys_disk;
+
+ ioc = fw_event->ioc;
+ ir2_data = (MPI_EVENT_DATA_IR2 *)fw_event->event_data;
+ reasonCode = ir2_data->ReasonCode;
+
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Entering %s: "
+ "ReasonCode=%02x\n", ioc->name, __func__, reasonCode));
+
+ memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event));
+ hot_plug_info.id = ir2_data->TargetID;
+ hot_plug_info.channel = ir2_data->Bus;
+ switch (reasonCode) {
+ case MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED:
+ hot_plug_info.event_type = MPTSAS_ADD_INACTIVE_VOLUME;
+ break;
+ case MPI_EVENT_IR2_RC_DUAL_PORT_REMOVED:
+ hot_plug_info.phys_disk_num = ir2_data->PhysDiskNum;
+ hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK;
+ break;
+ case MPI_EVENT_IR2_RC_DUAL_PORT_ADDED:
+ hot_plug_info.phys_disk_num = ir2_data->PhysDiskNum;
+ mpt_raid_phys_disk_pg0(ioc,
+ ir2_data->PhysDiskNum, &phys_disk);
+ hot_plug_info.id = phys_disk.PhysDiskID;
+ hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK;
+ break;
+ default:
+ mptsas_free_fw_event(ioc, fw_event);
return;
-
- INIT_WORK(&ev->work, mptsas_hotplug_work);
- ev->ioc = ioc;
- ev->id = ir2_data->TargetID;
- ev->channel = ir2_data->Bus;
- ev->event_type = MPTSAS_ADD_INACTIVE_VOLUME;
-
- schedule_work(&ev->work);
-};
+ }
+ mptsas_hotplug_work(ioc, fw_event, &hot_plug_info);
+}
static int
mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply)
{
- int rc=1;
- u8 event = le32_to_cpu(reply->Event) & 0xFF;
+ u32 event = le32_to_cpu(reply->Event);
+ int sz, event_data_sz;
+ struct fw_event_work *fw_event;
+ unsigned long delay;
- if (!ioc->sh)
- goto out;
-
- /*
- * sas_discovery_ignore_events
- *
- * This flag is to prevent anymore processing of
- * sas events once mptsas_remove function is called.
- */
- if (ioc->sas_discovery_ignore_events) {
- rc = mptscsih_event_process(ioc, reply);
- goto out;
- }
+ /* events turned off due to host reset or driver unloading */
+ if (ioc->fw_events_off)
+ return 0;
+ delay = msecs_to_jiffies(1);
switch (event) {
+ case MPI_EVENT_SAS_BROADCAST_PRIMITIVE:
+ {
+ EVENT_DATA_SAS_BROADCAST_PRIMITIVE *broadcast_event_data =
+ (EVENT_DATA_SAS_BROADCAST_PRIMITIVE *)reply->Data;
+ if (broadcast_event_data->Primitive !=
+ MPI_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
+ return 0;
+ if (ioc->broadcast_aen_busy)
+ return 0;
+ ioc->broadcast_aen_busy = 1;
+ break;
+ }
case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
- mptsas_send_sas_event(ioc,
- (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)reply->Data);
+ {
+ EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data =
+ (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)reply->Data;
+
+ if (sas_event_data->ReasonCode ==
+ MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING) {
+ mptsas_target_reset_queue(ioc, sas_event_data);
+ return 0;
+ }
break;
- case MPI_EVENT_INTEGRATED_RAID:
- mptsas_send_raid_event(ioc,
- (EVENT_DATA_RAID *)reply->Data);
+ }
+ case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
+ {
+ MpiEventDataSasExpanderStatusChange_t *expander_data =
+ (MpiEventDataSasExpanderStatusChange_t *)reply->Data;
+
+ if (ioc->old_sas_discovery_protocal)
+ return 0;
+
+ if (expander_data->ReasonCode ==
+ MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING &&
+ ioc->device_missing_delay)
+ delay = HZ * ioc->device_missing_delay;
break;
+ }
+ case MPI_EVENT_SAS_DISCOVERY:
+ {
+ u32 discovery_status;
+ EventDataSasDiscovery_t *discovery_data =
+ (EventDataSasDiscovery_t *)reply->Data;
+
+ discovery_status = le32_to_cpu(discovery_data->DiscoveryStatus);
+ ioc->sas_discovery_quiesce_io = discovery_status ? 1 : 0;
+ if (ioc->old_sas_discovery_protocal && !discovery_status)
+ mptsas_queue_rescan(ioc);
+ return 0;
+ }
+ case MPI_EVENT_INTEGRATED_RAID:
case MPI_EVENT_PERSISTENT_TABLE_FULL:
- INIT_WORK(&ioc->sas_persist_task,
- mptsas_persist_clear_table);
- schedule_work(&ioc->sas_persist_task);
- break;
- case MPI_EVENT_SAS_DISCOVERY:
- mptsas_send_discovery_event(ioc,
- (EVENT_DATA_SAS_DISCOVERY *)reply->Data);
- break;
case MPI_EVENT_IR2:
- mptsas_send_ir2_event(ioc,
- (PTR_MPI_EVENT_DATA_IR2)reply->Data);
+ case MPI_EVENT_SAS_PHY_LINK_STATUS:
+ case MPI_EVENT_QUEUE_FULL:
break;
default:
- rc = mptscsih_event_process(ioc, reply);
- break;
+ return 0;
}
- out:
- return rc;
+ event_data_sz = ((reply->MsgLength * 4) -
+ offsetof(EventNotificationReply_t, Data));
+ sz = offsetof(struct fw_event_work, event_data) + event_data_sz;
+ fw_event = kzalloc(sz, GFP_ATOMIC);
+ if (!fw_event) {
+ printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n", ioc->name,
+ __func__, __LINE__);
+ return 0;
+ }
+ memcpy(fw_event->event_data, reply->Data, event_data_sz);
+ fw_event->event = event;
+ fw_event->ioc = ioc;
+ mptsas_add_fw_event(ioc, fw_event, delay);
+ return 0;
+}
+
+/* Delete a volume when no longer listed in ioc pg2
+ */
+static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id)
+{
+ struct scsi_device *sdev;
+ int i;
+
+ sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, id, 0);
+ if (!sdev)
+ return;
+ if (!ioc->raid_data.pIocPg2)
+ goto out;
+ if (!ioc->raid_data.pIocPg2->NumActiveVolumes)
+ goto out;
+ for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++)
+ if (ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID == id)
+ goto release_sdev;
+ out:
+ printk(MYIOC_s_INFO_FMT "removing raid volume, channel %d, "
+ "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL, id);
+ scsi_remove_device(sdev);
+ release_sdev:
+ scsi_device_put(sdev);
}
static int
@@ -3128,6 +4701,7 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return r;
ioc = pci_get_drvdata(pdev);
+ mptsas_fw_event_off(ioc);
ioc->DoneCtx = mptsasDoneCtx;
ioc->TaskCtx = mptsasTaskCtx;
ioc->InternalCtx = mptsasInternalCtx;
@@ -3211,17 +4785,15 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* A slightly different algorithm is required for
* 64bit SGEs.
*/
- scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32));
- if (sizeof(dma_addr_t) == sizeof(u64)) {
+ scale = ioc->req_sz/ioc->SGE_size;
+ if (ioc->sg_addr_size == sizeof(u64)) {
numSGE = (scale - 1) *
(ioc->facts.MaxChainDepth-1) + scale +
- (ioc->req_sz - 60) / (sizeof(dma_addr_t) +
- sizeof(u32));
+ (ioc->req_sz - 60) / ioc->SGE_size;
} else {
numSGE = 1 + (scale - 1) *
(ioc->facts.MaxChainDepth-1) + scale +
- (ioc->req_sz - 64) / (sizeof(dma_addr_t) +
- sizeof(u32));
+ (ioc->req_sz - 64) / ioc->SGE_size;
}
if (numSGE < sh->sg_tablesize) {
@@ -3251,9 +4823,6 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Clear the TM flags
*/
- hd->tmPending = 0;
- hd->tmState = TM_STATE_NONE;
- hd->resetPending = 0;
hd->abortSCpnt = NULL;
/* Clear the pointer used to store
@@ -3273,10 +4842,11 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->sas_data.ptClear = mpt_pt_clear;
- init_waitqueue_head(&hd->scandv_waitq);
- hd->scandv_wait_done = 0;
hd->last_queue_full = 0;
INIT_LIST_HEAD(&hd->target_reset_list);
+ INIT_LIST_HEAD(&ioc->sas_device_info_list);
+ mutex_init(&ioc->sas_device_info_mutex);
+
spin_unlock_irqrestore(&ioc->FreeQlock, flags);
if (ioc->sas_data.ptClear==1) {
@@ -3291,8 +4861,11 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_mptsas_probe;
}
+ /* older firmware doesn't support expander events */
+ if ((ioc->facts.HeaderVersion >> 8) < 0xE)
+ ioc->old_sas_discovery_protocal = 1;
mptsas_scan_sas_topology(ioc);
-
+ mptsas_fw_event_on(ioc);
return 0;
out_mptsas_probe:
@@ -3301,12 +4874,25 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return error;
}
+void
+mptsas_shutdown(struct pci_dev *pdev)
+{
+ MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
+
+ mptsas_fw_event_off(ioc);
+ mptsas_cleanup_fw_event_q(ioc);
+}
+
static void __devexit mptsas_remove(struct pci_dev *pdev)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
struct mptsas_portinfo *p, *n;
int i;
+ mptsas_shutdown(pdev);
+
+ mptsas_del_device_components(ioc);
+
ioc->sas_discovery_ignore_events = 1;
sas_remove_host(ioc->sh);
@@ -3315,11 +4901,12 @@ static void __devexit mptsas_remove(struct pci_dev *pdev)
list_del(&p->list);
for (i = 0 ; i < p->num_phys ; i++)
mptsas_port_delete(ioc, p->phy_info[i].port_details);
+
kfree(p->phy_info);
kfree(p);
}
mutex_unlock(&ioc->sas_topology_mutex);
-
+ ioc->hba_port_info = NULL;
mptscsih_remove(pdev);
}
@@ -3344,7 +4931,7 @@ static struct pci_driver mptsas_driver = {
.id_table = mptsas_pci_table,
.probe = mptsas_probe,
.remove = __devexit_p(mptsas_remove),
- .shutdown = mptscsih_shutdown,
+ .shutdown = mptsas_shutdown,
#ifdef CONFIG_PM
.suspend = mptscsih_suspend,
.resume = mptscsih_resume,
@@ -3364,10 +4951,12 @@ mptsas_init(void)
return -ENODEV;
mptsasDoneCtx = mpt_register(mptscsih_io_done, MPTSAS_DRIVER);
- mptsasTaskCtx = mpt_register(mptsas_taskmgmt_complete, MPTSAS_DRIVER);
+ mptsasTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSAS_DRIVER);
mptsasInternalCtx =
mpt_register(mptscsih_scandv_complete, MPTSAS_DRIVER);
mptsasMgmtCtx = mpt_register(mptsas_mgmt_done, MPTSAS_DRIVER);
+ mptsasDeviceResetCtx =
+ mpt_register(mptsas_taskmgmt_complete, MPTSAS_DRIVER);
mpt_event_register(mptsasDoneCtx, mptsas_event_process);
mpt_reset_register(mptsasDoneCtx, mptsas_ioc_reset);
@@ -3392,6 +4981,7 @@ mptsas_exit(void)
mpt_deregister(mptsasInternalCtx);
mpt_deregister(mptsasTaskCtx);
mpt_deregister(mptsasDoneCtx);
+ mpt_deregister(mptsasDeviceResetCtx);
}
module_init(mptsas_init);
diff --git a/drivers/message/fusion/mptsas.h b/drivers/message/fusion/mptsas.h
index 2b544e0877e..953c2bfcf6a 100644
--- a/drivers/message/fusion/mptsas.h
+++ b/drivers/message/fusion/mptsas.h
@@ -53,6 +53,7 @@ struct mptsas_target_reset_event {
struct list_head list;
EVENT_DATA_SAS_DEVICE_STATUS_CHANGE sas_event_data;
u8 target_reset_issued;
+ unsigned long time_count;
};
enum mptsas_hotplug_action {
@@ -60,12 +61,37 @@ enum mptsas_hotplug_action {
MPTSAS_DEL_DEVICE,
MPTSAS_ADD_RAID,
MPTSAS_DEL_RAID,
+ MPTSAS_ADD_PHYSDISK,
+ MPTSAS_ADD_PHYSDISK_REPROBE,
+ MPTSAS_DEL_PHYSDISK,
+ MPTSAS_DEL_PHYSDISK_REPROBE,
MPTSAS_ADD_INACTIVE_VOLUME,
MPTSAS_IGNORE_EVENT,
};
+struct mptsas_mapping{
+ u8 id;
+ u8 channel;
+};
+
+struct mptsas_device_info {
+ struct list_head list;
+ struct mptsas_mapping os; /* operating system mapping*/
+ struct mptsas_mapping fw; /* firmware mapping */
+ u64 sas_address;
+ u32 device_info; /* specific bits for devices */
+ u16 slot; /* enclosure slot id */
+ u64 enclosure_logical_id; /*enclosure address */
+ u8 is_logical_volume; /* is this logical volume */
+ /* this belongs to volume */
+ u8 is_hidden_raid_component;
+ /* this valid when is_hidden_raid_component set */
+ u8 volume_id;
+ /* cached data for a removed device */
+ u8 is_cached;
+};
+
struct mptsas_hotplug_event {
- struct work_struct work;
MPT_ADAPTER *ioc;
enum mptsas_hotplug_action event_type;
u64 sas_address;
@@ -73,11 +99,18 @@ struct mptsas_hotplug_event {
u8 id;
u32 device_info;
u16 handle;
- u16 parent_handle;
u8 phy_id;
- u8 phys_disk_num_valid; /* hrc (hidden raid component) */
u8 phys_disk_num; /* hrc - unique index*/
- u8 hidden_raid_component; /* hrc - don't expose*/
+ struct scsi_device *sdev;
+};
+
+struct fw_event_work {
+ struct list_head list;
+ struct delayed_work work;
+ MPT_ADAPTER *ioc;
+ u32 event;
+ u8 retries;
+ u8 event_data[1];
};
struct mptsas_discovery_event {
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index e62c6bc4ad3..8440f78f696 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -80,7 +80,7 @@ MODULE_VERSION(my_VERSION);
/*
* Other private/forward protos...
*/
-static struct scsi_cmnd * mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i);
+struct scsi_cmnd *mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i);
static struct scsi_cmnd * mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i);
static void mptscsih_set_scsi_lookup(MPT_ADAPTER *ioc, int i, struct scsi_cmnd *scmd);
static int SCPNT_TO_LOOKUP_IDX(MPT_ADAPTER *ioc, struct scsi_cmnd *scmd);
@@ -92,18 +92,24 @@ static int mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt,
SCSIIORequest_t *pReq, int req_idx);
static void mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx);
static void mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply);
-static int mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd);
-static int mptscsih_tm_wait_for_completion(MPT_SCSI_HOST * hd, ulong timeout );
-static int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout);
+int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id,
+ int lun, int ctx2abort, ulong timeout);
int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
+void
+mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code);
+static int mptscsih_get_completion_code(MPT_ADAPTER *ioc,
+ MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply);
int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
static int mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd);
static void mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice);
+static int
+mptscsih_taskmgmt_reply(MPT_ADAPTER *ioc, u8 type,
+ SCSITaskMgmtReply_t *pScsiTmReply);
void mptscsih_remove(struct pci_dev *);
void mptscsih_shutdown(struct pci_dev *);
#ifdef CONFIG_PM
@@ -113,69 +119,6 @@ int mptscsih_resume(struct pci_dev *pdev);
#define SNS_LEN(scp) SCSI_SENSE_BUFFERSIZE
-/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/**
- * mptscsih_add_sge - Place a simple SGE at address pAddr.
- * @pAddr: virtual address for SGE
- * @flagslength: SGE flags and data transfer length
- * @dma_addr: Physical address
- *
- * This routine places a MPT request frame back on the MPT adapter's
- * FreeQ.
- */
-static inline void
-mptscsih_add_sge(char *pAddr, u32 flagslength, dma_addr_t dma_addr)
-{
- if (sizeof(dma_addr_t) == sizeof(u64)) {
- SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
- u32 tmp = dma_addr & 0xFFFFFFFF;
-
- pSge->FlagsLength = cpu_to_le32(flagslength);
- pSge->Address.Low = cpu_to_le32(tmp);
- tmp = (u32) ((u64)dma_addr >> 32);
- pSge->Address.High = cpu_to_le32(tmp);
-
- } else {
- SGESimple32_t *pSge = (SGESimple32_t *) pAddr;
- pSge->FlagsLength = cpu_to_le32(flagslength);
- pSge->Address = cpu_to_le32(dma_addr);
- }
-} /* mptscsih_add_sge() */
-
-/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/**
- * mptscsih_add_chain - Place a chain SGE at address pAddr.
- * @pAddr: virtual address for SGE
- * @next: nextChainOffset value (u32's)
- * @length: length of next SGL segment
- * @dma_addr: Physical address
- *
- * This routine places a MPT request frame back on the MPT adapter's
- * FreeQ.
- */
-static inline void
-mptscsih_add_chain(char *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
-{
- if (sizeof(dma_addr_t) == sizeof(u64)) {
- SGEChain64_t *pChain = (SGEChain64_t *) pAddr;
- u32 tmp = dma_addr & 0xFFFFFFFF;
-
- pChain->Length = cpu_to_le16(length);
- pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | mpt_addr_size();
-
- pChain->NextChainOffset = next;
-
- pChain->Address.Low = cpu_to_le32(tmp);
- tmp = (u32) ((u64)dma_addr >> 32);
- pChain->Address.High = cpu_to_le32(tmp);
- } else {
- SGEChain32_t *pChain = (SGEChain32_t *) pAddr;
- pChain->Length = cpu_to_le16(length);
- pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | mpt_addr_size();
- pChain->NextChainOffset = next;
- pChain->Address = cpu_to_le32(dma_addr);
- }
-} /* mptscsih_add_chain() */
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
@@ -281,10 +224,10 @@ mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt,
*/
nextSGEset:
- numSgeSlots = ((frm_sz - sgeOffset) / (sizeof(u32) + sizeof(dma_addr_t)) );
+ numSgeSlots = ((frm_sz - sgeOffset) / ioc->SGE_size);
numSgeThisFrame = (sges_left < numSgeSlots) ? sges_left : numSgeSlots;
- sgflags = MPT_SGE_FLAGS_SIMPLE_ELEMENT | MPT_SGE_FLAGS_ADDRESSING | sgdir;
+ sgflags = MPT_SGE_FLAGS_SIMPLE_ELEMENT | sgdir;
/* Get first (num - 1) SG elements
* Skip any SG entries with a length of 0
@@ -293,17 +236,19 @@ nextSGEset:
for (ii=0; ii < (numSgeThisFrame-1); ii++) {
thisxfer = sg_dma_len(sg);
if (thisxfer == 0) {
- sg = sg_next(sg); /* Get next SG element from the OS */
+ /* Get next SG element from the OS */
+ sg = sg_next(sg);
sg_done++;
continue;
}
v2 = sg_dma_address(sg);
- mptscsih_add_sge(psge, sgflags | thisxfer, v2);
+ ioc->add_sge(psge, sgflags | thisxfer, v2);
- sg = sg_next(sg); /* Get next SG element from the OS */
- psge += (sizeof(u32) + sizeof(dma_addr_t));
- sgeOffset += (sizeof(u32) + sizeof(dma_addr_t));
+ /* Get next SG element from the OS */
+ sg = sg_next(sg);
+ psge += ioc->SGE_size;
+ sgeOffset += ioc->SGE_size;
sg_done++;
}
@@ -320,12 +265,8 @@ nextSGEset:
thisxfer = sg_dma_len(sg);
v2 = sg_dma_address(sg);
- mptscsih_add_sge(psge, sgflags | thisxfer, v2);
- /*
- sg = sg_next(sg);
- psge += (sizeof(u32) + sizeof(dma_addr_t));
- */
- sgeOffset += (sizeof(u32) + sizeof(dma_addr_t));
+ ioc->add_sge(psge, sgflags | thisxfer, v2);
+ sgeOffset += ioc->SGE_size;
sg_done++;
if (chainSge) {
@@ -334,7 +275,8 @@ nextSGEset:
* Update the chain element
* Offset and Length fields.
*/
- mptscsih_add_chain((char *)chainSge, 0, sgeOffset, ioc->ChainBufferDMA + chain_dma_off);
+ ioc->add_chain((char *)chainSge, 0, sgeOffset,
+ ioc->ChainBufferDMA + chain_dma_off);
} else {
/* The current buffer is the original MF
* and there is no Chain buffer.
@@ -367,7 +309,7 @@ nextSGEset:
* set properly).
*/
if (sg_done) {
- u32 *ptmp = (u32 *) (psge - (sizeof(u32) + sizeof(dma_addr_t)));
+ u32 *ptmp = (u32 *) (psge - ioc->SGE_size);
sgflags = le32_to_cpu(*ptmp);
sgflags |= MPT_SGE_FLAGS_LAST_ELEMENT;
*ptmp = cpu_to_le32(sgflags);
@@ -381,8 +323,9 @@ nextSGEset:
* Old chain element is now complete.
*/
u8 nextChain = (u8) (sgeOffset >> 2);
- sgeOffset += (sizeof(u32) + sizeof(dma_addr_t));
- mptscsih_add_chain((char *)chainSge, nextChain, sgeOffset, ioc->ChainBufferDMA + chain_dma_off);
+ sgeOffset += ioc->SGE_size;
+ ioc->add_chain((char *)chainSge, nextChain, sgeOffset,
+ ioc->ChainBufferDMA + chain_dma_off);
} else {
/* The original MF buffer requires a chain buffer -
* set the offset.
@@ -592,14 +535,15 @@ mptscsih_info_scsiio(MPT_ADAPTER *ioc, struct scsi_cmnd *sc, SCSIIOReply_t * pSc
}
scsi_print_command(sc);
- printk(MYIOC_s_DEBUG_FMT "\tfw_channel = %d, fw_id = %d\n",
- ioc->name, pScsiReply->Bus, pScsiReply->TargetID);
+ printk(MYIOC_s_DEBUG_FMT "\tfw_channel = %d, fw_id = %d, lun = %d\n",
+ ioc->name, pScsiReply->Bus, pScsiReply->TargetID, sc->device->lun);
printk(MYIOC_s_DEBUG_FMT "\trequest_len = %d, underflow = %d, "
"resid = %d\n", ioc->name, scsi_bufflen(sc), sc->underflow,
scsi_get_resid(sc));
printk(MYIOC_s_DEBUG_FMT "\ttag = %d, transfer_count = %d, "
"sc->result = %08X\n", ioc->name, le16_to_cpu(pScsiReply->TaskTag),
le32_to_cpu(pScsiReply->TransferCount), sc->result);
+
printk(MYIOC_s_DEBUG_FMT "\tiocstatus = %s (0x%04x), "
"scsi_status = %s (0x%02x), scsi_state = (0x%02x)\n",
ioc->name, desc, ioc_status, desc1, pScsiReply->SCSIStatus,
@@ -654,16 +598,14 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
req_idx_MR = (mr != NULL) ?
le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx) : req_idx;
+
+ /* Special case, where already freed message frame is received from
+ * Firmware. It happens with Resetting IOC.
+ * Return immediately. Do not care
+ */
if ((req_idx != req_idx_MR) ||
- (mf->u.frame.linkage.arg1 == 0xdeadbeaf)) {
- printk(MYIOC_s_ERR_FMT "Received a mf that was already freed\n",
- ioc->name);
- printk (MYIOC_s_ERR_FMT
- "req_idx=%x req_idx_MR=%x mf=%p mr=%p sc=%p\n",
- ioc->name, req_idx, req_idx_MR, mf, mr,
- mptscsih_get_scsi_lookup(ioc, req_idx_MR));
+ (le32_to_cpu(mf->u.frame.linkage.arg1) == 0xdeadbeaf))
return 0;
- }
sc = mptscsih_getclear_scsi_lookup(ioc, req_idx);
if (sc == NULL) {
@@ -810,12 +752,16 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
*/
case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
- case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
/* Linux handles an unsolicited DID_RESET better
* than an unsolicited DID_ABORT.
*/
sc->result = DID_RESET << 16;
+ case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
+ if (ioc->bus_type == FC)
+ sc->result = DID_ERROR << 16;
+ else
+ sc->result = DID_RESET << 16;
break;
case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */
@@ -992,9 +938,9 @@ mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd)
scsi_dma_unmap(sc);
sc->result = DID_RESET << 16;
sc->host_scribble = NULL;
- sdev_printk(KERN_INFO, sc->device, MYIOC_s_FMT
- "completing cmds: fw_channel %d, fw_id %d, sc=%p,"
- " mf = %p, idx=%x\n", ioc->name, channel, id, sc, mf, ii);
+ dtmprintk(ioc, sdev_printk(KERN_INFO, sc->device, MYIOC_s_FMT
+ "completing cmds: fw_channel %d, fw_id %d, sc=%p, mf = %p, "
+ "idx=%x\n", ioc->name, channel, id, sc, mf, ii));
sc->scsi_done(sc);
}
}
@@ -1053,9 +999,11 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
scsi_dma_unmap(sc);
sc->host_scribble = NULL;
sc->result = DID_NO_CONNECT << 16;
- sdev_printk(KERN_INFO, sc->device, MYIOC_s_FMT "completing cmds: fw_channel %d,"
- "fw_id %d, sc=%p, mf = %p, idx=%x\n", ioc->name, vdevice->vtarget->channel,
- vdevice->vtarget->id, sc, mf, ii);
+ dtmprintk(ioc, sdev_printk(KERN_INFO, sc->device,
+ MYIOC_s_FMT "completing cmds: fw_channel %d, "
+ "fw_id %d, sc=%p, mf = %p, idx=%x\n", ioc->name,
+ vdevice->vtarget->channel, vdevice->vtarget->id,
+ sc, mf, ii));
sc->scsi_done(sc);
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
}
@@ -1346,7 +1294,6 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
MPT_FRAME_HDR *mf;
SCSIIORequest_t *pScsiReq;
VirtDevice *vdevice = SCpnt->device->hostdata;
- int lun;
u32 datalen;
u32 scsictl;
u32 scsidir;
@@ -1357,13 +1304,12 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
hd = shost_priv(SCpnt->device->host);
ioc = hd->ioc;
- lun = SCpnt->device->lun;
SCpnt->scsi_done = done;
dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "qcmd: SCpnt=%p, done()=%p\n",
ioc->name, SCpnt, done));
- if (hd->resetPending) {
+ if (ioc->taskmgmt_quiesce_io) {
dtmprintk(ioc, printk(MYIOC_s_WARN_FMT "qcmd: SCpnt=%p timeout + 60HZ\n",
ioc->name, SCpnt));
return SCSI_MLQUEUE_HOST_BUSY;
@@ -1422,7 +1368,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
pScsiReq->CDBLength = SCpnt->cmd_len;
pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE;
pScsiReq->Reserved = 0;
- pScsiReq->MsgFlags = mpt_msg_flags();
+ pScsiReq->MsgFlags = mpt_msg_flags(ioc);
int_to_scsilun(SCpnt->device->lun, (struct scsi_lun *)pScsiReq->LUN);
pScsiReq->Control = cpu_to_le32(scsictl);
@@ -1448,7 +1394,8 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
*/
if (datalen == 0) {
/* Add a NULL SGE */
- mptscsih_add_sge((char *)&pScsiReq->SGL, MPT_SGE_FLAGS_SSIMPLE_READ | 0,
+ ioc->add_sge((char *)&pScsiReq->SGL,
+ MPT_SGE_FLAGS_SSIMPLE_READ | 0,
(dma_addr_t) -1);
} else {
/* Add a 32 or 64 bit SGE */
@@ -1528,8 +1475,8 @@ mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx)
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
- * mptscsih_TMHandler - Generic handler for SCSI Task Management.
- * @hd: Pointer to MPT SCSI HOST structure
+ * mptscsih_IssueTaskMgmt - Generic send Task Management function.
+ * @hd: Pointer to MPT_SCSI_HOST structure
* @type: Task Management type
* @channel: channel number for task management
* @id: Logical Target ID for reset (if appropriate)
@@ -1537,145 +1484,68 @@ mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx)
* @ctx2abort: Context for the task to be aborted (if appropriate)
* @timeout: timeout for task management control
*
- * Fall through to mpt_HardResetHandler if: not operational, too many
- * failed TM requests or handshake failure.
+ * Remark: _HardResetHandler can be invoked from an interrupt thread (timer)
+ * or a non-interrupt thread. In the former, must not call schedule().
*
- * Remark: Currently invoked from a non-interrupt thread (_bh).
+ * Not all fields are meaningfull for all task types.
*
- * Note: With old EH code, at most 1 SCSI TaskMgmt function per IOC
- * will be active.
+ * Returns 0 for SUCCESS, or FAILED.
*
- * Returns 0 for SUCCESS, or %FAILED.
**/
int
-mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout)
+mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun,
+ int ctx2abort, ulong timeout)
{
- MPT_ADAPTER *ioc;
- int rc = -1;
+ MPT_FRAME_HDR *mf;
+ SCSITaskMgmt_t *pScsiTm;
+ int ii;
+ int retval;
+ MPT_ADAPTER *ioc = hd->ioc;
+ unsigned long timeleft;
+ u8 issue_hard_reset;
u32 ioc_raw_state;
- unsigned long flags;
-
- ioc = hd->ioc;
- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TMHandler Entered!\n", ioc->name));
-
- // SJR - CHECKME - Can we avoid this here?
- // (mpt_HardResetHandler has this check...)
- spin_lock_irqsave(&ioc->diagLock, flags);
- if ((ioc->diagPending) || (ioc->alt_ioc && ioc->alt_ioc->diagPending)) {
- spin_unlock_irqrestore(&ioc->diagLock, flags);
- return FAILED;
- }
- spin_unlock_irqrestore(&ioc->diagLock, flags);
-
- /* Wait a fixed amount of time for the TM pending flag to be cleared.
- * If we time out and not bus reset, then we return a FAILED status
- * to the caller.
- * The call to mptscsih_tm_pending_wait() will set the pending flag
- * if we are
- * successful. Otherwise, reload the FW.
- */
- if (mptscsih_tm_pending_wait(hd) == FAILED) {
- if (type == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TMHandler abort: "
- "Timed out waiting for last TM (%d) to complete! \n",
- ioc->name, hd->tmPending));
- return FAILED;
- } else if (type == MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TMHandler target "
- "reset: Timed out waiting for last TM (%d) "
- "to complete! \n", ioc->name,
- hd->tmPending));
- return FAILED;
- } else if (type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) {
- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TMHandler bus reset: "
- "Timed out waiting for last TM (%d) to complete! \n",
- ioc->name, hd->tmPending));
- return FAILED;
- }
- } else {
- spin_lock_irqsave(&ioc->FreeQlock, flags);
- hd->tmPending |= (1 << type);
- spin_unlock_irqrestore(&ioc->FreeQlock, flags);
- }
+ unsigned long time_count;
+ issue_hard_reset = 0;
ioc_raw_state = mpt_GetIocState(ioc, 0);
if ((ioc_raw_state & MPI_IOC_STATE_MASK) != MPI_IOC_STATE_OPERATIONAL) {
printk(MYIOC_s_WARN_FMT
- "TM Handler for type=%x: IOC Not operational (0x%x)!\n",
+ "TaskMgmt type=%x: IOC Not operational (0x%x)!\n",
ioc->name, type, ioc_raw_state);
- printk(MYIOC_s_WARN_FMT " Issuing HardReset!!\n", ioc->name);
+ printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n",
+ ioc->name, __func__);
if (mpt_HardResetHandler(ioc, CAN_SLEEP) < 0)
- printk(MYIOC_s_WARN_FMT "TMHandler: HardReset "
+ printk(MYIOC_s_WARN_FMT "TaskMgmt HardReset "
"FAILED!!\n", ioc->name);
- return FAILED;
+ return 0;
}
if (ioc_raw_state & MPI_DOORBELL_ACTIVE) {
printk(MYIOC_s_WARN_FMT
- "TM Handler for type=%x: ioc_state: "
+ "TaskMgmt type=%x: ioc_state: "
"DOORBELL_ACTIVE (0x%x)!\n",
ioc->name, type, ioc_raw_state);
return FAILED;
}
- /* Isse the Task Mgmt request.
- */
- if (hd->hard_resets < -1)
- hd->hard_resets++;
-
- rc = mptscsih_IssueTaskMgmt(hd, type, channel, id, lun,
- ctx2abort, timeout);
- if (rc)
- printk(MYIOC_s_INFO_FMT "Issue of TaskMgmt failed!\n",
- ioc->name);
- else
- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Issue of TaskMgmt Successful!\n",
- ioc->name));
-
- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
- "TMHandler rc = %d!\n", ioc->name, rc));
-
- return rc;
-}
-
-
-/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/**
- * mptscsih_IssueTaskMgmt - Generic send Task Management function.
- * @hd: Pointer to MPT_SCSI_HOST structure
- * @type: Task Management type
- * @channel: channel number for task management
- * @id: Logical Target ID for reset (if appropriate)
- * @lun: Logical Unit for reset (if appropriate)
- * @ctx2abort: Context for the task to be aborted (if appropriate)
- * @timeout: timeout for task management control
- *
- * Remark: _HardResetHandler can be invoked from an interrupt thread (timer)
- * or a non-interrupt thread. In the former, must not call schedule().
- *
- * Not all fields are meaningfull for all task types.
- *
- * Returns 0 for SUCCESS, or FAILED.
- *
- **/
-static int
-mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout)
-{
- MPT_FRAME_HDR *mf;
- SCSITaskMgmt_t *pScsiTm;
- int ii;
- int retval;
- MPT_ADAPTER *ioc = hd->ioc;
+ mutex_lock(&ioc->taskmgmt_cmds.mutex);
+ if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
+ mf = NULL;
+ retval = FAILED;
+ goto out;
+ }
/* Return Fail to calling function if no message frames available.
*/
if ((mf = mpt_get_msg_frame(ioc->TaskCtx, ioc)) == NULL) {
- dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "IssueTaskMgmt, no msg frames!!\n",
- ioc->name));
- return FAILED;
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "TaskMgmt no msg frames!!\n", ioc->name));
+ retval = FAILED;
+ mpt_clear_taskmgmt_in_progress_flag(ioc);
+ goto out;
}
- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt request @ %p\n",
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n",
ioc->name, mf));
/* Format the Request
@@ -1699,11 +1569,14 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, i
pScsiTm->TaskMsgContext = ctx2abort;
- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt: ctx2abort (0x%08x) "
- "type=%d\n", ioc->name, ctx2abort, type));
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt: ctx2abort (0x%08x) "
+ "task_type = 0x%02X, timeout = %ld\n", ioc->name, ctx2abort,
+ type, timeout));
DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)pScsiTm);
+ INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
+ time_count = jiffies;
if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
(ioc->facts.MsgVersion >= MPI_VERSION_01_05))
mpt_put_msg_frame_hi_pri(ioc->TaskCtx, ioc, mf);
@@ -1711,47 +1584,50 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, i
retval = mpt_send_handshake_request(ioc->TaskCtx, ioc,
sizeof(SCSITaskMgmt_t), (u32*)pScsiTm, CAN_SLEEP);
if (retval) {
- dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "send_handshake FAILED!"
- " (hd %p, ioc %p, mf %p, rc=%d) \n", ioc->name, hd,
- ioc, mf, retval));
- goto fail_out;
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "TaskMgmt handshake FAILED!(mf=%p, rc=%d) \n",
+ ioc->name, mf, retval));
+ mpt_free_msg_frame(ioc, mf);
+ mpt_clear_taskmgmt_in_progress_flag(ioc);
+ goto out;
}
}
- if(mptscsih_tm_wait_for_completion(hd, timeout) == FAILED) {
- dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "task management request TIMED OUT!"
- " (hd %p, ioc %p, mf %p) \n", ioc->name, hd,
- ioc, mf));
- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling HardReset! \n",
- ioc->name));
- retval = mpt_HardResetHandler(ioc, CAN_SLEEP);
- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rc=%d \n",
- ioc->name, retval));
- goto fail_out;
+ timeleft = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done,
+ timeout*HZ);
+ if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+ retval = FAILED;
+ dtmprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "TaskMgmt TIMED OUT!(mf=%p)\n", ioc->name, mf));
+ mpt_clear_taskmgmt_in_progress_flag(ioc);
+ if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
+ goto out;
+ issue_hard_reset = 1;
+ goto out;
}
- /*
- * Handle success case, see if theres a non-zero ioc_status.
- */
- if (hd->tm_iocstatus == MPI_IOCSTATUS_SUCCESS ||
- hd->tm_iocstatus == MPI_IOCSTATUS_SCSI_TASK_TERMINATED ||
- hd->tm_iocstatus == MPI_IOCSTATUS_SCSI_IOC_TERMINATED)
- retval = 0;
- else
- retval = FAILED;
+ retval = mptscsih_taskmgmt_reply(ioc, type,
+ (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply);
- return retval;
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "TaskMgmt completed (%d seconds)\n",
+ ioc->name, jiffies_to_msecs(jiffies - time_count)/1000));
- fail_out:
+ out:
- /*
- * Free task management mf, and corresponding tm flags
- */
- mpt_free_msg_frame(ioc, mf);
- hd->tmPending = 0;
- hd->tmState = TM_STATE_NONE;
- return FAILED;
+ CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
+ if (issue_hard_reset) {
+ printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
+ ioc->name, __func__);
+ retval = mpt_HardResetHandler(ioc, CAN_SLEEP);
+ mpt_free_msg_frame(ioc, mf);
+ }
+
+ retval = (retval == 0) ? 0 : FAILED;
+ mutex_unlock(&ioc->taskmgmt_cmds.mutex);
+ return retval;
}
+EXPORT_SYMBOL(mptscsih_IssueTaskMgmt);
static int
mptscsih_get_tm_timeout(MPT_ADAPTER *ioc)
@@ -1838,13 +1714,8 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
goto out;
}
- if (hd->resetPending) {
- retval = FAILED;
- goto out;
- }
-
- if (hd->timeouts < -1)
- hd->timeouts++;
+ if (ioc->timeouts < -1)
+ ioc->timeouts++;
if (mpt_fwfault_debug)
mpt_halt_firmware(ioc);
@@ -1861,22 +1732,30 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
hd->abortSCpnt = SCpnt;
- retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
- vdevice->vtarget->channel, vdevice->vtarget->id, vdevice->lun,
- ctx2abort, mptscsih_get_tm_timeout(ioc));
+ retval = mptscsih_IssueTaskMgmt(hd,
+ MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
+ vdevice->vtarget->channel,
+ vdevice->vtarget->id, vdevice->lun,
+ ctx2abort, mptscsih_get_tm_timeout(ioc));
if (SCPNT_TO_LOOKUP_IDX(ioc, SCpnt) == scpnt_idx &&
- SCpnt->serial_number == sn)
+ SCpnt->serial_number == sn) {
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "task abort: command still in active list! (sc=%p)\n",
+ ioc->name, SCpnt));
retval = FAILED;
+ } else {
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "task abort: command cleared from active list! (sc=%p)\n",
+ ioc->name, SCpnt));
+ retval = SUCCESS;
+ }
out:
printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n",
- ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
+ ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), SCpnt);
- if (retval == 0)
- return SUCCESS;
- else
- return FAILED;
+ return retval;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1909,14 +1788,9 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
ioc->name, SCpnt);
scsi_print_command(SCpnt);
- if (hd->resetPending) {
- retval = FAILED;
- goto out;
- }
-
vdevice = SCpnt->device->hostdata;
if (!vdevice || !vdevice->vtarget) {
- retval = 0;
+ retval = SUCCESS;
goto out;
}
@@ -1927,9 +1801,11 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
goto out;
}
- retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
- vdevice->vtarget->channel, vdevice->vtarget->id, 0, 0,
- mptscsih_get_tm_timeout(ioc));
+ retval = mptscsih_IssueTaskMgmt(hd,
+ MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
+ vdevice->vtarget->channel,
+ vdevice->vtarget->id, 0, 0,
+ mptscsih_get_tm_timeout(ioc));
out:
printk (MYIOC_s_INFO_FMT "target reset: %s (sc=%p)\n",
@@ -1972,12 +1848,16 @@ mptscsih_bus_reset(struct scsi_cmnd * SCpnt)
ioc->name, SCpnt);
scsi_print_command(SCpnt);
- if (hd->timeouts < -1)
- hd->timeouts++;
+ if (ioc->timeouts < -1)
+ ioc->timeouts++;
vdevice = SCpnt->device->hostdata;
- retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
- vdevice->vtarget->channel, 0, 0, 0, mptscsih_get_tm_timeout(ioc));
+ if (!vdevice || !vdevice->vtarget)
+ return SUCCESS;
+ retval = mptscsih_IssueTaskMgmt(hd,
+ MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
+ vdevice->vtarget->channel, 0, 0, 0,
+ mptscsih_get_tm_timeout(ioc));
printk(MYIOC_s_INFO_FMT "bus reset: %s (sc=%p)\n",
ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
@@ -2001,8 +1881,9 @@ int
mptscsih_host_reset(struct scsi_cmnd *SCpnt)
{
MPT_SCSI_HOST * hd;
- int retval;
+ int status = SUCCESS;
MPT_ADAPTER *ioc;
+ int retval;
/* If we can't locate the host to reset, then we failed. */
if ((hd = shost_priv(SCpnt->device->host)) == NULL){
@@ -2021,86 +1902,71 @@ mptscsih_host_reset(struct scsi_cmnd *SCpnt)
/* If our attempts to reset the host failed, then return a failed
* status. The host will be taken off line by the SCSI mid-layer.
*/
- if (mpt_HardResetHandler(ioc, CAN_SLEEP) < 0) {
- retval = FAILED;
- } else {
- /* Make sure TM pending is cleared and TM state is set to
- * NONE.
- */
- retval = 0;
- hd->tmPending = 0;
- hd->tmState = TM_STATE_NONE;
- }
+ retval = mpt_HardResetHandler(ioc, CAN_SLEEP);
+ if (retval < 0)
+ status = FAILED;
+ else
+ status = SUCCESS;
printk(MYIOC_s_INFO_FMT "host reset: %s (sc=%p)\n",
ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
- return retval;
+ return status;
}
-/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/**
- * mptscsih_tm_pending_wait - wait for pending task management request to complete
- * @hd: Pointer to MPT host structure.
- *
- * Returns {SUCCESS,FAILED}.
- */
static int
-mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd)
+mptscsih_taskmgmt_reply(MPT_ADAPTER *ioc, u8 type,
+ SCSITaskMgmtReply_t *pScsiTmReply)
{
- unsigned long flags;
- int loop_count = 4 * 10; /* Wait 10 seconds */
- int status = FAILED;
- MPT_ADAPTER *ioc = hd->ioc;
+ u16 iocstatus;
+ u32 termination_count;
+ int retval;
- do {
- spin_lock_irqsave(&ioc->FreeQlock, flags);
- if (hd->tmState == TM_STATE_NONE) {
- hd->tmState = TM_STATE_IN_PROGRESS;
- hd->tmPending = 1;
- spin_unlock_irqrestore(&ioc->FreeQlock, flags);
- status = SUCCESS;
- break;
- }
- spin_unlock_irqrestore(&ioc->FreeQlock, flags);
- msleep(250);
- } while (--loop_count);
+ if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
+ retval = FAILED;
+ goto out;
+ }
- return status;
-}
+ DBG_DUMP_TM_REPLY_FRAME(ioc, (u32 *)pScsiTmReply);
-/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/**
- * mptscsih_tm_wait_for_completion - wait for completion of TM task
- * @hd: Pointer to MPT host structure.
- * @timeout: timeout value
- *
- * Returns {SUCCESS,FAILED}.
- */
-static int
-mptscsih_tm_wait_for_completion(MPT_SCSI_HOST * hd, ulong timeout )
-{
- unsigned long flags;
- int loop_count = 4 * timeout;
- int status = FAILED;
- MPT_ADAPTER *ioc = hd->ioc;
+ iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK;
+ termination_count = le32_to_cpu(pScsiTmReply->TerminationCount);
- do {
- spin_lock_irqsave(&ioc->FreeQlock, flags);
- if(hd->tmPending == 0) {
- status = SUCCESS;
- spin_unlock_irqrestore(&ioc->FreeQlock, flags);
- break;
- }
- spin_unlock_irqrestore(&ioc->FreeQlock, flags);
- msleep(250);
- } while (--loop_count);
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "TaskMgmt fw_channel = %d, fw_id = %d, task_type = 0x%02X,\n"
+ "\tiocstatus = 0x%04X, loginfo = 0x%08X, response_code = 0x%02X,\n"
+ "\tterm_cmnds = %d\n", ioc->name, pScsiTmReply->Bus,
+ pScsiTmReply->TargetID, type, le16_to_cpu(pScsiTmReply->IOCStatus),
+ le32_to_cpu(pScsiTmReply->IOCLogInfo), pScsiTmReply->ResponseCode,
+ termination_count));
- return status;
+ if (ioc->facts.MsgVersion >= MPI_VERSION_01_05 &&
+ pScsiTmReply->ResponseCode)
+ mptscsih_taskmgmt_response_code(ioc,
+ pScsiTmReply->ResponseCode);
+
+ if (iocstatus == MPI_IOCSTATUS_SUCCESS) {
+ retval = 0;
+ goto out;
+ }
+
+ retval = FAILED;
+ if (type == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
+ if (termination_count == 1)
+ retval = 0;
+ goto out;
+ }
+
+ if (iocstatus == MPI_IOCSTATUS_SCSI_TASK_TERMINATED ||
+ iocstatus == MPI_IOCSTATUS_SCSI_IOC_TERMINATED)
+ retval = 0;
+
+ out:
+ return retval;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-static void
+void
mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code)
{
char *desc;
@@ -2134,6 +2000,7 @@ mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code)
printk(MYIOC_s_INFO_FMT "Response Code(0x%08x): F/W: %s\n",
ioc->name, response_code, desc);
}
+EXPORT_SYMBOL(mptscsih_taskmgmt_response_code);
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
@@ -2150,97 +2017,28 @@ mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code)
* Returns 1 indicating alloc'd request frame ptr should be freed.
**/
int
-mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
+mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
+ MPT_FRAME_HDR *mr)
{
- SCSITaskMgmtReply_t *pScsiTmReply;
- SCSITaskMgmt_t *pScsiTmReq;
- MPT_SCSI_HOST *hd;
- unsigned long flags;
- u16 iocstatus;
- u8 tmType;
- u32 termination_count;
-
- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt completed (mf=%p,mr=%p)\n",
- ioc->name, mf, mr));
- if (!ioc->sh) {
- dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
- "TaskMgmt Complete: NULL Scsi Host Ptr\n", ioc->name));
- return 1;
- }
-
- if (mr == NULL) {
- dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
- "ERROR! TaskMgmt Reply: NULL Request %p\n", ioc->name, mf));
- return 1;
- }
-
- hd = shost_priv(ioc->sh);
- pScsiTmReply = (SCSITaskMgmtReply_t*)mr;
- pScsiTmReq = (SCSITaskMgmt_t*)mf;
- tmType = pScsiTmReq->TaskType;
- iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK;
- termination_count = le32_to_cpu(pScsiTmReply->TerminationCount);
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "TaskMgmt completed (mf=%p, mr=%p)\n", ioc->name, mf, mr));
- if (ioc->facts.MsgVersion >= MPI_VERSION_01_05 &&
- pScsiTmReply->ResponseCode)
- mptscsih_taskmgmt_response_code(ioc,
- pScsiTmReply->ResponseCode);
- DBG_DUMP_TM_REPLY_FRAME(ioc, (u32 *)pScsiTmReply);
+ ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
-#ifdef CONFIG_FUSION_LOGGING
- if ((ioc->debug_level & MPT_DEBUG_REPLY) ||
- (ioc->debug_level & MPT_DEBUG_TM ))
- printk("%s: ha=%d [%d:%d:0] task_type=0x%02X "
- "iocstatus=0x%04X\n\tloginfo=0x%08X response_code=0x%02X "
- "term_cmnds=%d\n", __func__, ioc->id, pScsiTmReply->Bus,
- pScsiTmReply->TargetID, pScsiTmReq->TaskType,
- le16_to_cpu(pScsiTmReply->IOCStatus),
- le32_to_cpu(pScsiTmReply->IOCLogInfo),pScsiTmReply->ResponseCode,
- le32_to_cpu(pScsiTmReply->TerminationCount));
-#endif
- if (!iocstatus) {
- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT " TaskMgmt SUCCESS\n", ioc->name));
- hd->abortSCpnt = NULL;
+ if (!mr)
goto out;
- }
-
- /* Error? (anything non-zero?) */
-
- /* clear flags and continue.
- */
- switch (tmType) {
-
- case MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
- if (termination_count == 1)
- iocstatus = MPI_IOCSTATUS_SCSI_TASK_TERMINATED;
- hd->abortSCpnt = NULL;
- break;
-
- case MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS:
-
- /* If an internal command is present
- * or the TM failed - reload the FW.
- * FC FW may respond FAILED to an ABORT
- */
- if (iocstatus == MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED ||
- hd->cmdPtr)
- if (mpt_HardResetHandler(ioc, NO_SLEEP) < 0)
- printk(MYIOC_s_WARN_FMT " Firmware Reload FAILED!!\n", ioc->name);
- break;
-
- case MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
- default:
- break;
- }
+ ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
+ memcpy(ioc->taskmgmt_cmds.reply, mr,
+ min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength));
out:
- spin_lock_irqsave(&ioc->FreeQlock, flags);
- hd->tmPending = 0;
- hd->tmState = TM_STATE_NONE;
- hd->tm_iocstatus = iocstatus;
- spin_unlock_irqrestore(&ioc->FreeQlock, flags);
-
- return 1;
+ if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
+ mpt_clear_taskmgmt_in_progress_flag(ioc);
+ ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
+ complete(&ioc->taskmgmt_cmds.done);
+ return 1;
+ }
+ return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -2290,8 +2088,10 @@ int
mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id)
{
struct inactive_raid_component_info *component_info;
- int i;
+ int i, j;
+ RaidPhysDiskPage1_t *phys_disk;
int rc = 0;
+ int num_paths;
if (!ioc->raid_data.pIocPg3)
goto out;
@@ -2303,6 +2103,45 @@ mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id)
}
}
+ if (ioc->bus_type != SAS)
+ goto out;
+
+ /*
+ * Check if dual path
+ */
+ for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
+ num_paths = mpt_raid_phys_disk_get_num_paths(ioc,
+ ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum);
+ if (num_paths < 2)
+ continue;
+ phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) +
+ (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL);
+ if (!phys_disk)
+ continue;
+ if ((mpt_raid_phys_disk_pg1(ioc,
+ ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum,
+ phys_disk))) {
+ kfree(phys_disk);
+ continue;
+ }
+ for (j = 0; j < num_paths; j++) {
+ if ((phys_disk->Path[j].Flags &
+ MPI_RAID_PHYSDISK1_FLAG_INVALID))
+ continue;
+ if ((phys_disk->Path[j].Flags &
+ MPI_RAID_PHYSDISK1_FLAG_BROKEN))
+ continue;
+ if ((id == phys_disk->Path[j].PhysDiskID) &&
+ (channel == phys_disk->Path[j].PhysDiskBus)) {
+ rc = 1;
+ kfree(phys_disk);
+ goto out;
+ }
+ }
+ kfree(phys_disk);
+ }
+
+
/*
* Check inactive list for matching phys disks
*/
@@ -2327,8 +2166,10 @@ u8
mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id)
{
struct inactive_raid_component_info *component_info;
- int i;
+ int i, j;
+ RaidPhysDiskPage1_t *phys_disk;
int rc = -ENXIO;
+ int num_paths;
if (!ioc->raid_data.pIocPg3)
goto out;
@@ -2340,6 +2181,44 @@ mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id)
}
}
+ if (ioc->bus_type != SAS)
+ goto out;
+
+ /*
+ * Check if dual path
+ */
+ for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
+ num_paths = mpt_raid_phys_disk_get_num_paths(ioc,
+ ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum);
+ if (num_paths < 2)
+ continue;
+ phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) +
+ (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL);
+ if (!phys_disk)
+ continue;
+ if ((mpt_raid_phys_disk_pg1(ioc,
+ ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum,
+ phys_disk))) {
+ kfree(phys_disk);
+ continue;
+ }
+ for (j = 0; j < num_paths; j++) {
+ if ((phys_disk->Path[j].Flags &
+ MPI_RAID_PHYSDISK1_FLAG_INVALID))
+ continue;
+ if ((phys_disk->Path[j].Flags &
+ MPI_RAID_PHYSDISK1_FLAG_BROKEN))
+ continue;
+ if ((id == phys_disk->Path[j].PhysDiskID) &&
+ (channel == phys_disk->Path[j].PhysDiskBus)) {
+ rc = phys_disk->PhysDiskNum;
+ kfree(phys_disk);
+ goto out;
+ }
+ }
+ kfree(phys_disk);
+ }
+
/*
* Check inactive list for matching phys disks
*/
@@ -2457,7 +2336,6 @@ mptscsih_slave_configure(struct scsi_device *sdev)
sdev->ppr, sdev->inquiry_len));
vdevice->configured_lun = 1;
- mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH);
dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Queue depth=%d, tflags=%x\n",
@@ -2469,6 +2347,7 @@ mptscsih_slave_configure(struct scsi_device *sdev)
ioc->name, vtarget->negoFlags, vtarget->maxOffset,
vtarget->minSyncFactor));
+ mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH);
dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"tagged %d, simple %d, ordered %d\n",
ioc->name,sdev->tagged_supported, sdev->simple_tags,
@@ -2542,15 +2421,13 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR
}
/**
- * mptscsih_get_scsi_lookup
+ * mptscsih_get_scsi_lookup - retrieves scmd entry
* @ioc: Pointer to MPT_ADAPTER structure
* @i: index into the array
*
- * retrieves scmd entry from ScsiLookup[] array list
- *
* Returns the scsi_cmd pointer
- **/
-static struct scsi_cmnd *
+ */
+struct scsi_cmnd *
mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i)
{
unsigned long flags;
@@ -2562,15 +2439,15 @@ mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i)
return scmd;
}
+EXPORT_SYMBOL(mptscsih_get_scsi_lookup);
/**
- * mptscsih_getclear_scsi_lookup
+ * mptscsih_getclear_scsi_lookup - retrieves and clears scmd entry from ScsiLookup[] array list
* @ioc: Pointer to MPT_ADAPTER structure
* @i: index into the array
*
- * retrieves and clears scmd entry from ScsiLookup[] array list
- *
* Returns the scsi_cmd pointer
+ *
**/
static struct scsi_cmnd *
mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i)
@@ -2635,94 +2512,33 @@ int
mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
{
MPT_SCSI_HOST *hd;
- unsigned long flags;
- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
- ": IOC %s_reset routed to SCSI host driver!\n",
- ioc->name, reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
- reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
-
- /* If a FW reload request arrives after base installed but
- * before all scsi hosts have been attached, then an alt_ioc
- * may have a NULL sh pointer.
- */
if (ioc->sh == NULL || shost_priv(ioc->sh) == NULL)
return 0;
- else
- hd = shost_priv(ioc->sh);
-
- if (reset_phase == MPT_IOC_SETUP_RESET) {
- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Setup-Diag Reset\n", ioc->name));
-
- /* Clean Up:
- * 1. Set Hard Reset Pending Flag
- * All new commands go to doneQ
- */
- hd->resetPending = 1;
-
- } else if (reset_phase == MPT_IOC_PRE_RESET) {
- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Pre-Diag Reset\n", ioc->name));
- /* 2. Flush running commands
- * Clean ScsiLookup (and associated memory)
- * AND clean mytaskQ
- */
-
- /* 2b. Reply to OS all known outstanding I/O commands.
- */
+ hd = shost_priv(ioc->sh);
+ switch (reset_phase) {
+ case MPT_IOC_SETUP_RESET:
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
+ break;
+ case MPT_IOC_PRE_RESET:
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
mptscsih_flush_running_cmds(hd);
-
- /* 2c. If there was an internal command that
- * has not completed, configuration or io request,
- * free these resources.
- */
- if (hd->cmdPtr) {
- del_timer(&hd->timer);
- mpt_free_msg_frame(ioc, hd->cmdPtr);
- }
-
- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Pre-Reset complete.\n", ioc->name));
-
- } else {
- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Post-Diag Reset\n", ioc->name));
-
- /* Once a FW reload begins, all new OS commands are
- * redirected to the doneQ w/ a reset status.
- * Init all control structures.
- */
-
- /* 2. Chain Buffer initialization
- */
-
- /* 4. Renegotiate to all devices, if SPI
- */
-
- /* 5. Enable new commands to be posted
- */
- spin_lock_irqsave(&ioc->FreeQlock, flags);
- hd->tmPending = 0;
- spin_unlock_irqrestore(&ioc->FreeQlock, flags);
- hd->resetPending = 0;
- hd->tmState = TM_STATE_NONE;
-
- /* 6. If there was an internal command,
- * wake this process up.
- */
- if (hd->cmdPtr) {
- /*
- * Wake up the original calling thread
- */
- hd->pLocal = &hd->localReply;
- hd->pLocal->completion = MPT_SCANDV_DID_RESET;
- hd->scandv_wait_done = 1;
- wake_up(&hd->scandv_waitq);
- hd->cmdPtr = NULL;
+ break;
+ case MPT_IOC_POST_RESET:
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
+ if (ioc->internal_cmds.status & MPT_MGMT_STATUS_PENDING) {
+ ioc->internal_cmds.status |=
+ MPT_MGMT_STATUS_DID_IOCRESET;
+ complete(&ioc->internal_cmds.done);
}
-
- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Post-Reset complete.\n", ioc->name));
-
+ break;
+ default:
+ break;
}
-
return 1; /* currently means nothing really */
}
@@ -2730,55 +2546,16 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
int
mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
{
- MPT_SCSI_HOST *hd;
u8 event = le32_to_cpu(pEvReply->Event) & 0xFF;
- devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n",
- ioc->name, event));
-
- if (ioc->sh == NULL ||
- ((hd = shost_priv(ioc->sh)) == NULL))
- return 1;
-
- switch (event) {
- case MPI_EVENT_UNIT_ATTENTION: /* 03 */
- /* FIXME! */
- break;
- case MPI_EVENT_IOC_BUS_RESET: /* 04 */
- case MPI_EVENT_EXT_BUS_RESET: /* 05 */
- if (hd && (ioc->bus_type == SPI) && (hd->soft_resets < -1))
- hd->soft_resets++;
- break;
- case MPI_EVENT_LOGOUT: /* 09 */
- /* FIXME! */
- break;
-
- case MPI_EVENT_RESCAN: /* 06 */
- break;
-
- /*
- * CHECKME! Don't think we need to do
- * anything for these, but...
- */
- case MPI_EVENT_LINK_STATUS_CHANGE: /* 07 */
- case MPI_EVENT_LOOP_STATE_CHANGE: /* 08 */
- /*
- * CHECKME! Falling thru...
- */
- break;
-
- case MPI_EVENT_INTEGRATED_RAID: /* 0B */
- break;
+ devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "MPT event (=%02Xh) routed to SCSI host driver!\n",
+ ioc->name, event));
- case MPI_EVENT_NONE: /* 00 */
- case MPI_EVENT_LOG_DATA: /* 01 */
- case MPI_EVENT_STATE_CHANGE: /* 02 */
- case MPI_EVENT_EVENT_CHANGE: /* 0A */
- default:
- dprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": Ignoring event (=%02Xh)\n",
- ioc->name, event));
- break;
- }
+ if ((event == MPI_EVENT_IOC_BUS_RESET ||
+ event == MPI_EVENT_EXT_BUS_RESET) &&
+ (ioc->bus_type == SPI) && (ioc->soft_resets < -1))
+ ioc->soft_resets++;
return 1; /* currently means nothing really */
}
@@ -2809,153 +2586,44 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
* Used ONLY for DV and other internal commands.
*/
int
-mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
+mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
+ MPT_FRAME_HDR *reply)
{
- MPT_SCSI_HOST *hd;
SCSIIORequest_t *pReq;
- int completionCode;
+ SCSIIOReply_t *pReply;
+ u8 cmd;
u16 req_idx;
+ u8 *sense_data;
+ int sz;
- hd = shost_priv(ioc->sh);
-
- if ((mf == NULL) ||
- (mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth))) {
- printk(MYIOC_s_ERR_FMT
- "ScanDvComplete, %s req frame ptr! (=%p)\n",
- ioc->name, mf?"BAD":"NULL", (void *) mf);
- goto wakeup;
- }
-
- del_timer(&hd->timer);
- req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
- mptscsih_set_scsi_lookup(ioc, req_idx, NULL);
- pReq = (SCSIIORequest_t *) mf;
+ ioc->internal_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
+ ioc->internal_cmds.completion_code = MPT_SCANDV_GOOD;
+ if (!reply)
+ goto out;
- if (mf != hd->cmdPtr) {
- printk(MYIOC_s_WARN_FMT "ScanDvComplete (mf=%p, cmdPtr=%p, idx=%d)\n",
- ioc->name, (void *)mf, (void *) hd->cmdPtr, req_idx);
+ pReply = (SCSIIOReply_t *) reply;
+ pReq = (SCSIIORequest_t *) req;
+ ioc->internal_cmds.completion_code =
+ mptscsih_get_completion_code(ioc, req, reply);
+ ioc->internal_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
+ memcpy(ioc->internal_cmds.reply, reply,
+ min(MPT_DEFAULT_FRAME_SIZE, 4 * reply->u.reply.MsgLength));
+ cmd = reply->u.hdr.Function;
+ if (((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) ||
+ (cmd == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) &&
+ (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID)) {
+ req_idx = le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx);
+ sense_data = ((u8 *)ioc->sense_buf_pool +
+ (req_idx * MPT_SENSE_BUFFER_ALLOC));
+ sz = min_t(int, pReq->SenseBufferLength,
+ MPT_SENSE_BUFFER_ALLOC);
+ memcpy(ioc->internal_cmds.sense, sense_data, sz);
}
- hd->cmdPtr = NULL;
-
- ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScanDvComplete (mf=%p,mr=%p,idx=%d)\n",
- ioc->name, mf, mr, req_idx));
-
- hd->pLocal = &hd->localReply;
- hd->pLocal->scsiStatus = 0;
-
- /* If target struct exists, clear sense valid flag.
- */
- if (mr == NULL) {
- completionCode = MPT_SCANDV_GOOD;
- } else {
- SCSIIOReply_t *pReply;
- u16 status;
- u8 scsi_status;
-
- pReply = (SCSIIOReply_t *) mr;
-
- status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
- scsi_status = pReply->SCSIStatus;
-
-
- switch(status) {
-
- case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */
- completionCode = MPT_SCANDV_SELECTION_TIMEOUT;
- break;
-
- case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */
- case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
- case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */
- case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
- completionCode = MPT_SCANDV_DID_RESET;
- break;
-
- case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */
- case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
- case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */
- if (pReply->Function == MPI_FUNCTION_CONFIG) {
- ConfigReply_t *pr = (ConfigReply_t *)mr;
- completionCode = MPT_SCANDV_GOOD;
- hd->pLocal->header.PageVersion = pr->Header.PageVersion;
- hd->pLocal->header.PageLength = pr->Header.PageLength;
- hd->pLocal->header.PageNumber = pr->Header.PageNumber;
- hd->pLocal->header.PageType = pr->Header.PageType;
-
- } else if (pReply->Function == MPI_FUNCTION_RAID_ACTION) {
- /* If the RAID Volume request is successful,
- * return GOOD, else indicate that
- * some type of error occurred.
- */
- MpiRaidActionReply_t *pr = (MpiRaidActionReply_t *)mr;
- if (le16_to_cpu(pr->ActionStatus) == MPI_RAID_ACTION_ASTATUS_SUCCESS)
- completionCode = MPT_SCANDV_GOOD;
- else
- completionCode = MPT_SCANDV_SOME_ERROR;
- memcpy(hd->pLocal->sense, pr, sizeof(hd->pLocal->sense));
-
- } else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
- u8 *sense_data;
- int sz;
-
- /* save sense data in global structure
- */
- completionCode = MPT_SCANDV_SENSE;
- hd->pLocal->scsiStatus = scsi_status;
- sense_data = ((u8 *)ioc->sense_buf_pool +
- (req_idx * MPT_SENSE_BUFFER_ALLOC));
-
- sz = min_t(int, pReq->SenseBufferLength,
- SCSI_STD_SENSE_BYTES);
- memcpy(hd->pLocal->sense, sense_data, sz);
-
- ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT " Check Condition, sense ptr %p\n",
- ioc->name, sense_data));
- } else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
- if (pReq->CDB[0] == INQUIRY)
- completionCode = MPT_SCANDV_ISSUE_SENSE;
- else
- completionCode = MPT_SCANDV_DID_RESET;
- }
- else if (pReply->SCSIState & MPI_SCSI_STATE_NO_SCSI_STATUS)
- completionCode = MPT_SCANDV_DID_RESET;
- else if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED)
- completionCode = MPT_SCANDV_DID_RESET;
- else {
- completionCode = MPT_SCANDV_GOOD;
- hd->pLocal->scsiStatus = scsi_status;
- }
- break;
-
- case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */
- if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED)
- completionCode = MPT_SCANDV_DID_RESET;
- else
- completionCode = MPT_SCANDV_SOME_ERROR;
- break;
-
- default:
- completionCode = MPT_SCANDV_SOME_ERROR;
- break;
-
- } /* switch(status) */
-
- } /* end of address reply case */
-
- hd->pLocal->completion = completionCode;
-
- /* MF and RF are freed in mpt_interrupt
- */
-wakeup:
- /* Free Chain buffers (will never chain) in scan or dv */
- //mptscsih_freeChainBuffers(ioc, req_idx);
-
- /*
- * Wake up the original calling thread
- */
- hd->scandv_wait_done = 1;
- wake_up(&hd->scandv_waitq);
-
+ out:
+ if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_PENDING))
+ return 0;
+ ioc->internal_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
+ complete(&ioc->internal_cmds.done);
return 1;
}
@@ -3004,6 +2672,95 @@ mptscsih_timer_expired(unsigned long data)
return;
}
+/**
+ * mptscsih_get_completion_code -
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @req: Pointer to original MPT request frame
+ * @reply: Pointer to MPT reply frame (NULL if TurboReply)
+ *
+ **/
+static int
+mptscsih_get_completion_code(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
+ MPT_FRAME_HDR *reply)
+{
+ SCSIIOReply_t *pReply;
+ MpiRaidActionReply_t *pr;
+ u8 scsi_status;
+ u16 status;
+ int completion_code;
+
+ pReply = (SCSIIOReply_t *)reply;
+ status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
+ scsi_status = pReply->SCSIStatus;
+
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "IOCStatus=%04xh, SCSIState=%02xh, SCSIStatus=%02xh,"
+ "IOCLogInfo=%08xh\n", ioc->name, status, pReply->SCSIState,
+ scsi_status, le32_to_cpu(pReply->IOCLogInfo)));
+
+ switch (status) {
+
+ case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */
+ completion_code = MPT_SCANDV_SELECTION_TIMEOUT;
+ break;
+
+ case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */
+ case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
+ case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */
+ case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
+ completion_code = MPT_SCANDV_DID_RESET;
+ break;
+
+ case MPI_IOCSTATUS_BUSY:
+ case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
+ completion_code = MPT_SCANDV_BUSY;
+ break;
+
+ case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */
+ case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
+ case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */
+ if (pReply->Function == MPI_FUNCTION_CONFIG) {
+ completion_code = MPT_SCANDV_GOOD;
+ } else if (pReply->Function == MPI_FUNCTION_RAID_ACTION) {
+ pr = (MpiRaidActionReply_t *)reply;
+ if (le16_to_cpu(pr->ActionStatus) ==
+ MPI_RAID_ACTION_ASTATUS_SUCCESS)
+ completion_code = MPT_SCANDV_GOOD;
+ else
+ completion_code = MPT_SCANDV_SOME_ERROR;
+ } else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID)
+ completion_code = MPT_SCANDV_SENSE;
+ else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
+ if (req->u.scsireq.CDB[0] == INQUIRY)
+ completion_code = MPT_SCANDV_ISSUE_SENSE;
+ else
+ completion_code = MPT_SCANDV_DID_RESET;
+ } else if (pReply->SCSIState & MPI_SCSI_STATE_NO_SCSI_STATUS)
+ completion_code = MPT_SCANDV_DID_RESET;
+ else if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED)
+ completion_code = MPT_SCANDV_DID_RESET;
+ else if (scsi_status == MPI_SCSI_STATUS_BUSY)
+ completion_code = MPT_SCANDV_BUSY;
+ else
+ completion_code = MPT_SCANDV_GOOD;
+ break;
+
+ case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */
+ if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED)
+ completion_code = MPT_SCANDV_DID_RESET;
+ else
+ completion_code = MPT_SCANDV_SOME_ERROR;
+ break;
+ default:
+ completion_code = MPT_SCANDV_SOME_ERROR;
+ break;
+
+ } /* switch(status) */
+
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ " completionCode set to %08xh\n", ioc->name, completion_code));
+ return completion_code;
+}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
@@ -3030,22 +2787,27 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
{
MPT_FRAME_HDR *mf;
SCSIIORequest_t *pScsiReq;
- SCSIIORequest_t ReqCopy;
int my_idx, ii, dir;
- int rc, cmdTimeout;
- int in_isr;
+ int timeout;
char cmdLen;
char CDB[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
- char cmd = io->cmd;
- MPT_ADAPTER *ioc = hd->ioc;
+ u8 cmd = io->cmd;
+ MPT_ADAPTER *ioc = hd->ioc;
+ int ret = 0;
+ unsigned long timeleft;
+ unsigned long flags;
- in_isr = in_interrupt();
- if (in_isr) {
- dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Internal SCSI IO request not allowed in ISR context!\n",
- ioc->name));
- return -EPERM;
+ /* don't send internal command during diag reset */
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ if (ioc->ioc_reset_in_progress) {
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: busy with host reset\n", ioc->name, __func__));
+ return MPT_SCANDV_BUSY;
}
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ mutex_lock(&ioc->internal_cmds.mutex);
/* Set command specific information
*/
@@ -3055,13 +2817,13 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
dir = MPI_SCSIIO_CONTROL_READ;
CDB[0] = cmd;
CDB[4] = io->size;
- cmdTimeout = 10;
+ timeout = 10;
break;
case TEST_UNIT_READY:
cmdLen = 6;
dir = MPI_SCSIIO_CONTROL_READ;
- cmdTimeout = 10;
+ timeout = 10;
break;
case START_STOP:
@@ -3069,7 +2831,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
dir = MPI_SCSIIO_CONTROL_READ;
CDB[0] = cmd;
CDB[4] = 1; /*Spin up the disk */
- cmdTimeout = 15;
+ timeout = 15;
break;
case REQUEST_SENSE:
@@ -3077,7 +2839,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
CDB[0] = cmd;
CDB[4] = io->size;
dir = MPI_SCSIIO_CONTROL_READ;
- cmdTimeout = 10;
+ timeout = 10;
break;
case READ_BUFFER:
@@ -3096,7 +2858,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
CDB[6] = (io->size >> 16) & 0xFF;
CDB[7] = (io->size >> 8) & 0xFF;
CDB[8] = io->size & 0xFF;
- cmdTimeout = 10;
+ timeout = 10;
break;
case WRITE_BUFFER:
@@ -3111,21 +2873,21 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
CDB[6] = (io->size >> 16) & 0xFF;
CDB[7] = (io->size >> 8) & 0xFF;
CDB[8] = io->size & 0xFF;
- cmdTimeout = 10;
+ timeout = 10;
break;
case RESERVE:
cmdLen = 6;
dir = MPI_SCSIIO_CONTROL_READ;
CDB[0] = cmd;
- cmdTimeout = 10;
+ timeout = 10;
break;
case RELEASE:
cmdLen = 6;
dir = MPI_SCSIIO_CONTROL_READ;
CDB[0] = cmd;
- cmdTimeout = 10;
+ timeout = 10;
break;
case SYNCHRONIZE_CACHE:
@@ -3133,20 +2895,23 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
dir = MPI_SCSIIO_CONTROL_READ;
CDB[0] = cmd;
// CDB[1] = 0x02; /* set immediate bit */
- cmdTimeout = 10;
+ timeout = 10;
break;
default:
/* Error Case */
- return -EFAULT;
+ ret = -EFAULT;
+ goto out;
}
/* Get and Populate a free Frame
+ * MsgContext set in mpt_get_msg_frame call
*/
if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
- dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "No msg frames!\n",
- ioc->name));
- return -EBUSY;
+ dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: No msg frames!\n",
+ ioc->name, __func__));
+ ret = MPT_SCANDV_BUSY;
+ goto out;
}
pScsiReq = (SCSIIORequest_t *) mf;
@@ -3172,7 +2937,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
pScsiReq->Reserved = 0;
- pScsiReq->MsgFlags = mpt_msg_flags();
+ pScsiReq->MsgFlags = mpt_msg_flags(ioc);
/* MsgContext set in mpt_get_msg_fram call */
int_to_scsilun(io->lun, (struct scsi_lun *)pScsiReq->LUN);
@@ -3184,74 +2949,58 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
if (cmd == REQUEST_SENSE) {
pScsiReq->Control = cpu_to_le32(dir | MPI_SCSIIO_CONTROL_UNTAGGED);
- ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Untagged! 0x%2x\n",
- ioc->name, cmd));
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: Untagged! 0x%02x\n", ioc->name, __func__, cmd));
}
- for (ii=0; ii < 16; ii++)
+ for (ii = 0; ii < 16; ii++)
pScsiReq->CDB[ii] = CDB[ii];
pScsiReq->DataLength = cpu_to_le32(io->size);
pScsiReq->SenseBufferLowAddr = cpu_to_le32(ioc->sense_buf_low_dma
+ (my_idx * MPT_SENSE_BUFFER_ALLOC));
- ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending Command 0x%x for (%d:%d:%d)\n",
- ioc->name, cmd, io->channel, io->id, io->lun));
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: Sending Command 0x%02x for fw_channel=%d fw_id=%d lun=%d\n",
+ ioc->name, __func__, cmd, io->channel, io->id, io->lun));
- if (dir == MPI_SCSIIO_CONTROL_READ) {
- mpt_add_sge((char *) &pScsiReq->SGL,
- MPT_SGE_FLAGS_SSIMPLE_READ | io->size,
- io->data_dma);
- } else {
- mpt_add_sge((char *) &pScsiReq->SGL,
- MPT_SGE_FLAGS_SSIMPLE_WRITE | io->size,
- io->data_dma);
- }
-
- /* The ISR will free the request frame, but we need
- * the information to initialize the target. Duplicate.
- */
- memcpy(&ReqCopy, pScsiReq, sizeof(SCSIIORequest_t));
-
- /* Issue this command after:
- * finish init
- * add timer
- * Wait until the reply has been received
- * ScsiScanDvCtx callback function will
- * set hd->pLocal;
- * set scandv_wait_done and call wake_up
- */
- hd->pLocal = NULL;
- hd->timer.expires = jiffies + HZ*cmdTimeout;
- hd->scandv_wait_done = 0;
-
- /* Save cmd pointer, for resource free if timeout or
- * FW reload occurs
- */
- hd->cmdPtr = mf;
+ if (dir == MPI_SCSIIO_CONTROL_READ)
+ ioc->add_sge((char *) &pScsiReq->SGL,
+ MPT_SGE_FLAGS_SSIMPLE_READ | io->size, io->data_dma);
+ else
+ ioc->add_sge((char *) &pScsiReq->SGL,
+ MPT_SGE_FLAGS_SSIMPLE_WRITE | io->size, io->data_dma);
- add_timer(&hd->timer);
+ INITIALIZE_MGMT_STATUS(ioc->internal_cmds.status)
mpt_put_msg_frame(ioc->InternalCtx, ioc, mf);
- wait_event(hd->scandv_waitq, hd->scandv_wait_done);
-
- if (hd->pLocal) {
- rc = hd->pLocal->completion;
- hd->pLocal->skip = 0;
-
- /* Always set fatal error codes in some cases.
- */
- if (rc == MPT_SCANDV_SELECTION_TIMEOUT)
- rc = -ENXIO;
- else if (rc == MPT_SCANDV_SOME_ERROR)
- rc = -rc;
- } else {
- rc = -EFAULT;
- /* This should never happen. */
- ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "_do_cmd: Null pLocal!!!\n",
- ioc->name));
+ timeleft = wait_for_completion_timeout(&ioc->internal_cmds.done,
+ timeout*HZ);
+ if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+ ret = MPT_SCANDV_DID_RESET;
+ dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: TIMED OUT for cmd=0x%02x\n", ioc->name, __func__,
+ cmd));
+ if (ioc->internal_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
+ mpt_free_msg_frame(ioc, mf);
+ goto out;
+ }
+ if (!timeleft) {
+ printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
+ ioc->name, __func__);
+ mpt_HardResetHandler(ioc, CAN_SLEEP);
+ mpt_free_msg_frame(ioc, mf);
+ }
+ goto out;
}
- return rc;
+ ret = ioc->internal_cmds.completion_code;
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: success, rc=0x%02x\n",
+ ioc->name, __func__, ret));
+
+ out:
+ CLEAR_MGMT_STATUS(ioc->internal_cmds.status)
+ mutex_unlock(&ioc->internal_cmds.mutex);
+ return ret;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -3491,6 +3240,7 @@ struct device_attribute *mptscsih_host_attrs[] = {
&dev_attr_debug_level,
NULL,
};
+
EXPORT_SYMBOL(mptscsih_host_attrs);
EXPORT_SYMBOL(mptscsih_remove);
@@ -3516,6 +3266,5 @@ EXPORT_SYMBOL(mptscsih_event_process);
EXPORT_SYMBOL(mptscsih_ioc_reset);
EXPORT_SYMBOL(mptscsih_change_queue_depth);
EXPORT_SYMBOL(mptscsih_timer_expired);
-EXPORT_SYMBOL(mptscsih_TMHandler);
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
index 319aa303337..eb3f677528a 100644
--- a/drivers/message/fusion/mptscsih.h
+++ b/drivers/message/fusion/mptscsih.h
@@ -60,6 +60,7 @@
#define MPT_SCANDV_SELECTION_TIMEOUT (0x00000008)
#define MPT_SCANDV_ISSUE_SENSE (0x00000010)
#define MPT_SCANDV_FALLBACK (0x00000020)
+#define MPT_SCANDV_BUSY (0x00000040)
#define MPT_SCANDV_MAX_RETRIES (10)
@@ -89,6 +90,7 @@
#endif
+
typedef struct _internal_cmd {
char *data; /* data pointer */
dma_addr_t data_dma; /* data dma address */
@@ -112,6 +114,8 @@ extern int mptscsih_resume(struct pci_dev *pdev);
extern int mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int func);
extern const char * mptscsih_info(struct Scsi_Host *SChost);
extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *));
+extern int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel,
+ u8 id, int lun, int ctx2abort, ulong timeout);
extern void mptscsih_slave_destroy(struct scsi_device *device);
extern int mptscsih_slave_configure(struct scsi_device *device);
extern int mptscsih_abort(struct scsi_cmnd * SCpnt);
@@ -126,7 +130,8 @@ extern int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pE
extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth);
extern void mptscsih_timer_expired(unsigned long data);
-extern int mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout);
extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id);
extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id);
extern struct device_attribute *mptscsih_host_attrs[];
+extern struct scsi_cmnd *mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i);
+extern void mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code);
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 61620144e49..c5b808fd55b 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -300,7 +300,7 @@ mptspi_writeIOCPage4(MPT_SCSI_HOST *hd, u8 channel , u8 id)
flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE |
(IOCPage4Ptr->Header.PageLength + ii) * 4;
- mpt_add_sge((char *)&pReq->PageBufferSGE, flagsLength, dataDma);
+ ioc->add_sge((char *)&pReq->PageBufferSGE, flagsLength, dataDma);
ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"writeIOCPage4: MaxSEP=%d ActiveSEP=%d id=%d bus=%d\n",
@@ -614,19 +614,24 @@ static void mptspi_read_parameters(struct scsi_target *starget)
spi_width(starget) = (nego & MPI_SCSIDEVPAGE0_NP_WIDE) ? 1 : 0;
}
-static int
+int
mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id)
{
+ MPT_ADAPTER *ioc = hd->ioc;
MpiRaidActionRequest_t *pReq;
MPT_FRAME_HDR *mf;
- MPT_ADAPTER *ioc = hd->ioc;
+ int ret;
+ unsigned long timeleft;
+
+ mutex_lock(&ioc->internal_cmds.mutex);
/* Get and Populate a free Frame
*/
if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
- ddvprintk(ioc, printk(MYIOC_s_WARN_FMT "_do_raid: no msg frames!\n",
- ioc->name));
- return -EAGAIN;
+ dfailprintk(hd->ioc, printk(MYIOC_s_WARN_FMT
+ "%s: no msg frames!\n", ioc->name, __func__));
+ ret = -EAGAIN;
+ goto out;
}
pReq = (MpiRaidActionRequest_t *)mf;
if (quiesce)
@@ -643,29 +648,36 @@ mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id)
pReq->Reserved2 = 0;
pReq->ActionDataWord = 0; /* Reserved for this action */
- mpt_add_sge((char *)&pReq->ActionDataSGE,
+ ioc->add_sge((char *)&pReq->ActionDataSGE,
MPT_SGE_FLAGS_SSIMPLE_READ | 0, (dma_addr_t) -1);
ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RAID Volume action=%x channel=%d id=%d\n",
ioc->name, pReq->Action, channel, id));
- hd->pLocal = NULL;
- hd->timer.expires = jiffies + HZ*10; /* 10 second timeout */
- hd->scandv_wait_done = 0;
-
- /* Save cmd pointer, for resource free if timeout or
- * FW reload occurs
- */
- hd->cmdPtr = mf;
-
- add_timer(&hd->timer);
+ INITIALIZE_MGMT_STATUS(ioc->internal_cmds.status)
mpt_put_msg_frame(ioc->InternalCtx, ioc, mf);
- wait_event(hd->scandv_waitq, hd->scandv_wait_done);
+ timeleft = wait_for_completion_timeout(&ioc->internal_cmds.done, 10*HZ);
+ if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+ ret = -ETIME;
+ dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: TIMED OUT!\n",
+ ioc->name, __func__));
+ if (ioc->internal_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
+ goto out;
+ if (!timeleft) {
+ printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
+ ioc->name, __func__);
+ mpt_HardResetHandler(ioc, CAN_SLEEP);
+ mpt_free_msg_frame(ioc, mf);
+ }
+ goto out;
+ }
- if ((hd->pLocal == NULL) || (hd->pLocal->completion != 0))
- return -1;
+ ret = ioc->internal_cmds.completion_code;
- return 0;
+ out:
+ CLEAR_MGMT_STATUS(ioc->internal_cmds.status)
+ mutex_unlock(&ioc->internal_cmds.mutex);
+ return ret;
}
static void mptspi_dv_device(struct _MPT_SCSI_HOST *hd,
@@ -1423,17 +1435,15 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* A slightly different algorithm is required for
* 64bit SGEs.
*/
- scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32));
- if (sizeof(dma_addr_t) == sizeof(u64)) {
+ scale = ioc->req_sz/ioc->SGE_size;
+ if (ioc->sg_addr_size == sizeof(u64)) {
numSGE = (scale - 1) *
(ioc->facts.MaxChainDepth-1) + scale +
- (ioc->req_sz - 60) / (sizeof(dma_addr_t) +
- sizeof(u32));
+ (ioc->req_sz - 60) / ioc->SGE_size;
} else {
numSGE = 1 + (scale - 1) *
(ioc->facts.MaxChainDepth-1) + scale +
- (ioc->req_sz - 64) / (sizeof(dma_addr_t) +
- sizeof(u32));
+ (ioc->req_sz - 64) / ioc->SGE_size;
}
if (numSGE < sh->sg_tablesize) {
@@ -1464,9 +1474,6 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Clear the TM flags
*/
- hd->tmPending = 0;
- hd->tmState = TM_STATE_NONE;
- hd->resetPending = 0;
hd->abortSCpnt = NULL;
/* Clear the pointer used to store
@@ -1493,8 +1500,6 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mpt_saf_te));
ioc->spi_data.noQas = 0;
- init_waitqueue_head(&hd->scandv_waitq);
- hd->scandv_wait_done = 0;
hd->last_queue_full = 0;
hd->spi_pending = 0;
@@ -1514,7 +1519,7 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* issue internal bus reset
*/
if (ioc->spi_data.bus_reset)
- mptscsih_TMHandler(hd,
+ mptscsih_IssueTaskMgmt(hd,
MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
0, 0, 0, 0, 5);
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index a443e136dc4..335d4c78a77 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -426,15 +426,9 @@ static void i2o_block_end_request(struct request *req, int error,
struct request_queue *q = req->q;
unsigned long flags;
- if (blk_end_request(req, error, nr_bytes)) {
- int leftover = (req->hard_nr_sectors << KERNEL_SECTOR_SHIFT);
-
- if (blk_pc_request(req))
- leftover = req->data_len;
-
+ if (blk_end_request(req, error, nr_bytes))
if (error)
- blk_end_request(req, -EIO, leftover);
- }
+ blk_end_request_all(req, -EIO);
spin_lock_irqsave(q->queue_lock, flags);
@@ -761,7 +755,7 @@ static int i2o_block_transfer(struct request *req)
break;
case CACHE_SMARTFETCH:
- if (req->nr_sectors > 16)
+ if (blk_rq_sectors(req) > 16)
ctl_flags = 0x201F0008;
else
ctl_flags = 0x001F0000;
@@ -781,13 +775,13 @@ static int i2o_block_transfer(struct request *req)
ctl_flags = 0x001F0010;
break;
case CACHE_SMARTBACK:
- if (req->nr_sectors > 16)
+ if (blk_rq_sectors(req) > 16)
ctl_flags = 0x001F0004;
else
ctl_flags = 0x001F0010;
break;
case CACHE_SMARTTHROUGH:
- if (req->nr_sectors > 16)
+ if (blk_rq_sectors(req) > 16)
ctl_flags = 0x001F0004;
else
ctl_flags = 0x001F0010;
@@ -800,8 +794,9 @@ static int i2o_block_transfer(struct request *req)
if (c->adaptec) {
u8 cmd[10];
u32 scsi_flags;
- u16 hwsec = queue_hardsect_size(req->q) >> KERNEL_SECTOR_SHIFT;
+ u16 hwsec;
+ hwsec = queue_logical_block_size(req->q) >> KERNEL_SECTOR_SHIFT;
memset(cmd, 0, 10);
sgl_offset = SGL_OFFSET_12;
@@ -827,22 +822,22 @@ static int i2o_block_transfer(struct request *req)
*mptr++ = cpu_to_le32(scsi_flags);
- *((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec);
- *((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec);
+ *((u32 *) & cmd[2]) = cpu_to_be32(blk_rq_pos(req) * hwsec);
+ *((u16 *) & cmd[7]) = cpu_to_be16(blk_rq_sectors(req) * hwsec);
memcpy(mptr, cmd, 10);
mptr += 4;
- *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT);
+ *mptr++ = cpu_to_le32(blk_rq_bytes(req));
} else
#endif
{
msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid);
*mptr++ = cpu_to_le32(ctl_flags);
- *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT);
+ *mptr++ = cpu_to_le32(blk_rq_bytes(req));
*mptr++ =
- cpu_to_le32((u32) (req->sector << KERNEL_SECTOR_SHIFT));
+ cpu_to_le32((u32) (blk_rq_pos(req) << KERNEL_SECTOR_SHIFT));
*mptr++ =
- cpu_to_le32(req->sector >> (32 - KERNEL_SECTOR_SHIFT));
+ cpu_to_le32(blk_rq_pos(req) >> (32 - KERNEL_SECTOR_SHIFT));
}
if (!i2o_block_sglist_alloc(c, ireq, &mptr)) {
@@ -883,7 +878,7 @@ static void i2o_block_request_fn(struct request_queue *q)
struct request *req;
while (!blk_queue_plugged(q)) {
- req = elv_next_request(q);
+ req = blk_peek_request(q);
if (!req)
break;
@@ -896,7 +891,7 @@ static void i2o_block_request_fn(struct request_queue *q)
if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) {
if (!i2o_block_transfer(req)) {
- blkdev_dequeue_request(req);
+ blk_start_request(req);
continue;
} else
osm_info("transfer error\n");
@@ -922,8 +917,10 @@ static void i2o_block_request_fn(struct request_queue *q)
blk_stop_queue(q);
break;
}
- } else
- end_request(req, 0);
+ } else {
+ blk_start_request(req);
+ __blk_end_request_all(req, -EIO);
+ }
}
};
@@ -1082,7 +1079,7 @@ static int i2o_block_probe(struct device *dev)
*/
if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) ||
!i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) {
- blk_queue_hardsect_size(queue, le32_to_cpu(blocksize));
+ blk_queue_logical_block_size(queue, le32_to_cpu(blocksize));
} else
osm_warn("unable to get blocksize of %s\n", gd->disk_name);
diff --git a/drivers/mfd/htc-pasic3.c b/drivers/mfd/htc-pasic3.c
index 386da1566fc..cb73051e43d 100644
--- a/drivers/mfd/htc-pasic3.c
+++ b/drivers/mfd/htc-pasic3.c
@@ -35,7 +35,7 @@ struct pasic3_data {
*/
void pasic3_write_register(struct device *dev, u32 reg, u8 val)
{
- struct pasic3_data *asic = dev->driver_data;
+ struct pasic3_data *asic = dev_get_drvdata(dev);
int bus_shift = asic->bus_shift;
void __iomem *addr = asic->mapping + (REG_ADDR << bus_shift);
void __iomem *data = asic->mapping + (REG_DATA << bus_shift);
@@ -50,7 +50,7 @@ EXPORT_SYMBOL(pasic3_write_register); /* for leds-pasic3 */
*/
u8 pasic3_read_register(struct device *dev, u32 reg)
{
- struct pasic3_data *asic = dev->driver_data;
+ struct pasic3_data *asic = dev_get_drvdata(dev);
int bus_shift = asic->bus_shift;
void __iomem *addr = asic->mapping + (REG_ADDR << bus_shift);
void __iomem *data = asic->mapping + (REG_DATA << bus_shift);
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 7793932a513..082c197ab9b 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -443,7 +443,7 @@ static irqreturn_t pcf50633_irq(int irq, void *data)
dev_dbg(pcf->dev, "pcf50633_irq\n");
get_device(pcf->dev);
- disable_irq(pcf->irq);
+ disable_irq_nosync(pcf->irq);
schedule_work(&pcf->irq_work);
return IRQ_HANDLED;
@@ -618,7 +618,7 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
pdev->dev.parent = pcf->dev;
pdev->dev.platform_data = &pdata->reg_init_data[i];
- pdev->dev.driver_data = pcf;
+ dev_set_drvdata(&pdev->dev, pcf);
pcf->regulator_pdev[i] = pdev;
platform_device_add(pdev);
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index e9f4323dd2c..875f7a87573 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -108,6 +108,10 @@ static int t7l66xb_mmc_disable(struct platform_device *mmc)
/*--------------------------------------------------------------------------*/
+static const struct tmio_mmc_data t7166xb_mmc_data = {
+ .hclk = 24000000,
+};
+
static const struct resource t7l66xb_mmc_resources[] = {
{
.start = 0x800,
@@ -149,6 +153,7 @@ static struct mfd_cell t7l66xb_cells[] = {
.name = "tmio-mmc",
.enable = t7l66xb_mmc_enable,
.disable = t7l66xb_mmc_disable,
+ .driver_data = &t7166xb_mmc_data,
.num_resources = ARRAY_SIZE(t7l66xb_mmc_resources),
.resources = t7l66xb_mmc_resources,
},
diff --git a/drivers/mfd/tc6387xb.c b/drivers/mfd/tc6387xb.c
index 43222c12fec..c3993ac2054 100644
--- a/drivers/mfd/tc6387xb.c
+++ b/drivers/mfd/tc6387xb.c
@@ -75,6 +75,10 @@ static int tc6387xb_mmc_disable(struct platform_device *mmc)
/*--------------------------------------------------------------------------*/
+const static struct tmio_mmc_data tc6387xb_mmc_data = {
+ .hclk = 24000000,
+};
+
static struct resource tc6387xb_mmc_resources[] = {
{
.start = 0x800,
@@ -98,6 +102,7 @@ static struct mfd_cell tc6387xb_cells[] = {
.name = "tmio-mmc",
.enable = tc6387xb_mmc_enable,
.disable = tc6387xb_mmc_disable,
+ .driver_data = &tc6387xb_mmc_data,
.num_resources = ARRAY_SIZE(tc6387xb_mmc_resources),
.resources = tc6387xb_mmc_resources,
},
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index 77a12fc8045..9d2abb5d6e2 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -136,6 +136,10 @@ static int tc6393xb_nand_enable(struct platform_device *nand)
return 0;
}
+const static struct tmio_mmc_data tc6393xb_mmc_data = {
+ .hclk = 24000000,
+};
+
static struct resource __devinitdata tc6393xb_nand_resources[] = {
{
.start = 0x1000,
@@ -351,6 +355,7 @@ static struct mfd_cell __devinitdata tc6393xb_cells[] = {
},
[TC6393XB_CELL_MMC] = {
.name = "tmio-mmc",
+ .driver_data = &tc6393xb_mmc_data,
.num_resources = ARRAY_SIZE(tc6393xb_mmc_resources),
.resources = tc6393xb_mmc_resources,
},
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c
index c2be3088e2e..fe24079387c 100644
--- a/drivers/mfd/wm8350-core.c
+++ b/drivers/mfd/wm8350-core.c
@@ -79,10 +79,6 @@ static int wm8350_phys_read(struct wm8350 *wm8350, u8 reg, int num_regs,
/* Cache is CPU endian */
dest[i - reg] = be16_to_cpu(dest[i - reg]);
- /* Satisfy non-volatile bits from cache */
- dest[i - reg] &= wm8350_reg_io_map[i].vol;
- dest[i - reg] |= wm8350->reg_cache[i];
-
/* Mask out non-readable bits */
dest[i - reg] &= wm8350_reg_io_map[i].readable;
}
@@ -182,9 +178,6 @@ static int wm8350_write(struct wm8350 *wm8350, u8 reg, int num_regs, u16 *src)
(wm8350->reg_cache[i] & ~wm8350_reg_io_map[i].writable)
| src[i - reg];
- /* Don't store volatile bits */
- wm8350->reg_cache[i] &= ~wm8350_reg_io_map[i].vol;
-
src[i - reg] = cpu_to_be16(src[i - reg]);
}
@@ -1261,7 +1254,6 @@ static int wm8350_create_cache(struct wm8350 *wm8350, int type, int mode)
(i < WM8350_CLOCK_CONTROL_1 || i > WM8350_AIF_TEST)) {
value = be16_to_cpu(wm8350->reg_cache[i]);
value &= wm8350_reg_io_map[i].readable;
- value &= ~wm8350_reg_io_map[i].vol;
wm8350->reg_cache[i] = value;
} else
wm8350->reg_cache[i] = reg_map[i];
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index cf30d06a010..7c21bf79156 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -265,7 +265,7 @@ static int wm8400_init(struct wm8400 *wm8400,
mutex_init(&wm8400->io_lock);
- wm8400->dev->driver_data = wm8400;
+ dev_set_drvdata(wm8400->dev, wm8400);
/* Check that this is actually a WM8400 */
ret = wm8400->read_dev(wm8400->io_data, WM8400_RESET_ID, 1, &reg);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 6d1ac180f6e..68ab39d7cb3 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -235,5 +235,6 @@ config ISL29003
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
+source "drivers/misc/cb710/Kconfig"
endif # MISC_DEVICES
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 7871f05dcb9..36f733cd60e 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -21,3 +21,4 @@ obj-$(CONFIG_HP_ILO) += hpilo.o
obj-$(CONFIG_ISL29003) += isl29003.o
obj-$(CONFIG_C2PORT) += c2port/
obj-y += eeprom/
+obj-y += cb710/
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
index 0207dd59090..b5346b4db91 100644
--- a/drivers/misc/c2port/core.c
+++ b/drivers/misc/c2port/core.c
@@ -15,6 +15,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kernel.h>
+#include <linux/kmemcheck.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/idr.h>
@@ -891,6 +892,7 @@ struct c2port_device *c2port_device_register(char *name,
return ERR_PTR(-EINVAL);
c2dev = kmalloc(sizeof(struct c2port_device), GFP_KERNEL);
+ kmemcheck_annotate_bitfield(c2dev, flags);
if (unlikely(!c2dev))
return ERR_PTR(-ENOMEM);
diff --git a/drivers/misc/cb710/Kconfig b/drivers/misc/cb710/Kconfig
new file mode 100644
index 00000000000..22429b8b106
--- /dev/null
+++ b/drivers/misc/cb710/Kconfig
@@ -0,0 +1,25 @@
+config CB710_CORE
+ tristate "ENE CB710/720 Flash memory card reader support"
+ depends on PCI
+ help
+ This option enables support for PCI ENE CB710/720 Flash memory card
+ reader found in some laptops (ie. some versions of HP Compaq nx9500).
+
+ You will also have to select some flash card format drivers (MMC/SD,
+ MemoryStick).
+
+ This driver can also be built as a module. If so, the module
+ will be called cb710.
+
+config CB710_DEBUG
+ bool "Enable driver debugging"
+ depends on CB710_CORE != n
+ default n
+ help
+ This is an option for use by developers; most people should
+ say N here. This adds a lot of debugging output to dmesg.
+
+config CB710_DEBUG_ASSUMPTIONS
+ bool
+ depends on CB710_CORE != n
+ default y
diff --git a/drivers/misc/cb710/Makefile b/drivers/misc/cb710/Makefile
new file mode 100644
index 00000000000..7b80cbf1a60
--- /dev/null
+++ b/drivers/misc/cb710/Makefile
@@ -0,0 +1,8 @@
+ifeq ($(CONFIG_CB710_DEBUG),y)
+ EXTRA_CFLAGS += -DDEBUG
+endif
+
+obj-$(CONFIG_CB710_CORE) += cb710.o
+
+cb710-y := core.o sgbuf2.o
+cb710-$(CONFIG_CB710_DEBUG) += debug.o
diff --git a/drivers/misc/cb710/core.c b/drivers/misc/cb710/core.c
new file mode 100644
index 00000000000..b14eab0f2ba
--- /dev/null
+++ b/drivers/misc/cb710/core.c
@@ -0,0 +1,357 @@
+/*
+ * cb710/core.c
+ *
+ * Copyright by Michał Mirosław, 2008-2009
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/idr.h>
+#include <linux/cb710.h>
+
+static DEFINE_IDA(cb710_ida);
+static DEFINE_SPINLOCK(cb710_ida_lock);
+
+void cb710_pci_update_config_reg(struct pci_dev *pdev,
+ int reg, uint32_t mask, uint32_t xor)
+{
+ u32 rval;
+
+ pci_read_config_dword(pdev, reg, &rval);
+ rval = (rval & mask) ^ xor;
+ pci_write_config_dword(pdev, reg, rval);
+}
+EXPORT_SYMBOL_GPL(cb710_pci_update_config_reg);
+
+/* Some magic writes based on Windows driver init code */
+static int __devinit cb710_pci_configure(struct pci_dev *pdev)
+{
+ unsigned int devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
+ struct pci_dev *pdev0 = pci_get_slot(pdev->bus, devfn);
+ u32 val;
+
+ cb710_pci_update_config_reg(pdev, 0x48,
+ ~0x000000FF, 0x0000003F);
+
+ pci_read_config_dword(pdev, 0x48, &val);
+ if (val & 0x80000000)
+ return 0;
+
+ if (!pdev0)
+ return -ENODEV;
+
+ if (pdev0->vendor == PCI_VENDOR_ID_ENE
+ && pdev0->device == PCI_DEVICE_ID_ENE_720) {
+ cb710_pci_update_config_reg(pdev0, 0x8C,
+ ~0x00F00000, 0x00100000);
+ cb710_pci_update_config_reg(pdev0, 0xB0,
+ ~0x08000000, 0x08000000);
+ }
+
+ cb710_pci_update_config_reg(pdev0, 0x8C,
+ ~0x00000F00, 0x00000200);
+ cb710_pci_update_config_reg(pdev0, 0x90,
+ ~0x00060000, 0x00040000);
+
+ pci_dev_put(pdev0);
+
+ return 0;
+}
+
+static irqreturn_t cb710_irq_handler(int irq, void *data)
+{
+ struct cb710_chip *chip = data;
+ struct cb710_slot *slot = &chip->slot[0];
+ irqreturn_t handled = IRQ_NONE;
+ unsigned nr;
+
+ spin_lock(&chip->irq_lock); /* incl. smp_rmb() */
+
+ for (nr = chip->slots; nr; ++slot, --nr) {
+ cb710_irq_handler_t handler_func = slot->irq_handler;
+ if (handler_func && handler_func(slot))
+ handled = IRQ_HANDLED;
+ }
+
+ spin_unlock(&chip->irq_lock);
+
+ return handled;
+}
+
+static void cb710_release_slot(struct device *dev)
+{
+#ifdef CONFIG_CB710_DEBUG_ASSUMPTIONS
+ struct cb710_slot *slot = cb710_pdev_to_slot(to_platform_device(dev));
+ struct cb710_chip *chip = cb710_slot_to_chip(slot);
+
+ /* slot struct can be freed now */
+ atomic_dec(&chip->slot_refs_count);
+#endif
+}
+
+static int __devinit cb710_register_slot(struct cb710_chip *chip,
+ unsigned slot_mask, unsigned io_offset, const char *name)
+{
+ int nr = chip->slots;
+ struct cb710_slot *slot = &chip->slot[nr];
+ int err;
+
+ dev_dbg(cb710_chip_dev(chip),
+ "register: %s.%d; slot %d; mask %d; IO offset: 0x%02X\n",
+ name, chip->platform_id, nr, slot_mask, io_offset);
+
+ /* slot->irq_handler == NULL here; this needs to be
+ * seen before platform_device_register() */
+ ++chip->slots;
+ smp_wmb();
+
+ slot->iobase = chip->iobase + io_offset;
+ slot->pdev.name = name;
+ slot->pdev.id = chip->platform_id;
+ slot->pdev.dev.parent = &chip->pdev->dev;
+ slot->pdev.dev.release = cb710_release_slot;
+
+ err = platform_device_register(&slot->pdev);
+
+#ifdef CONFIG_CB710_DEBUG_ASSUMPTIONS
+ atomic_inc(&chip->slot_refs_count);
+#endif
+
+ if (err) {
+ /* device_initialize() called from platform_device_register()
+ * wants this on error path */
+ platform_device_put(&slot->pdev);
+
+ /* slot->irq_handler == NULL here anyway, so no lock needed */
+ --chip->slots;
+ return err;
+ }
+
+ chip->slot_mask |= slot_mask;
+
+ return 0;
+}
+
+static void cb710_unregister_slot(struct cb710_chip *chip,
+ unsigned slot_mask)
+{
+ int nr = chip->slots - 1;
+
+ if (!(chip->slot_mask & slot_mask))
+ return;
+
+ platform_device_unregister(&chip->slot[nr].pdev);
+
+ /* complementary to spin_unlock() in cb710_set_irq_handler() */
+ smp_rmb();
+ BUG_ON(chip->slot[nr].irq_handler != NULL);
+
+ /* slot->irq_handler == NULL here, so no lock needed */
+ --chip->slots;
+ chip->slot_mask &= ~slot_mask;
+}
+
+void cb710_set_irq_handler(struct cb710_slot *slot,
+ cb710_irq_handler_t handler)
+{
+ struct cb710_chip *chip = cb710_slot_to_chip(slot);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chip->irq_lock, flags);
+ slot->irq_handler = handler;
+ spin_unlock_irqrestore(&chip->irq_lock, flags);
+}
+EXPORT_SYMBOL_GPL(cb710_set_irq_handler);
+
+#ifdef CONFIG_PM
+
+static int cb710_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct cb710_chip *chip = pci_get_drvdata(pdev);
+
+ free_irq(pdev->irq, chip);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ if (state.event & PM_EVENT_SLEEP)
+ pci_set_power_state(pdev, PCI_D3cold);
+ return 0;
+}
+
+static int cb710_resume(struct pci_dev *pdev)
+{
+ struct cb710_chip *chip = pci_get_drvdata(pdev);
+ int err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ err = pcim_enable_device(pdev);
+ if (err)
+ return err;
+
+ return devm_request_irq(&pdev->dev, pdev->irq,
+ cb710_irq_handler, IRQF_SHARED, KBUILD_MODNAME, chip);
+}
+
+#endif /* CONFIG_PM */
+
+static int __devinit cb710_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct cb710_chip *chip;
+ unsigned long flags;
+ u32 val;
+ int err;
+ int n = 0;
+
+ err = cb710_pci_configure(pdev);
+ if (err)
+ return err;
+
+ /* this is actually magic... */
+ pci_read_config_dword(pdev, 0x48, &val);
+ if (!(val & 0x80000000)) {
+ pci_write_config_dword(pdev, 0x48, val|0x71000000);
+ pci_read_config_dword(pdev, 0x48, &val);
+ }
+
+ dev_dbg(&pdev->dev, "PCI config[0x48] = 0x%08X\n", val);
+ if (!(val & 0x70000000))
+ return -ENODEV;
+ val = (val >> 28) & 7;
+ if (val & CB710_SLOT_MMC)
+ ++n;
+ if (val & CB710_SLOT_MS)
+ ++n;
+ if (val & CB710_SLOT_SM)
+ ++n;
+
+ chip = devm_kzalloc(&pdev->dev,
+ sizeof(*chip) + n * sizeof(*chip->slot), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ err = pcim_enable_device(pdev);
+ if (err)
+ return err;
+
+ err = pcim_iomap_regions(pdev, 0x0001, KBUILD_MODNAME);
+ if (err)
+ return err;
+
+ chip->pdev = pdev;
+ chip->iobase = pcim_iomap_table(pdev)[0];
+
+ pci_set_drvdata(pdev, chip);
+
+ err = devm_request_irq(&pdev->dev, pdev->irq,
+ cb710_irq_handler, IRQF_SHARED, KBUILD_MODNAME, chip);
+ if (err)
+ return err;
+
+ do {
+ if (!ida_pre_get(&cb710_ida, GFP_KERNEL))
+ return -ENOMEM;
+
+ spin_lock_irqsave(&cb710_ida_lock, flags);
+ err = ida_get_new(&cb710_ida, &chip->platform_id);
+ spin_unlock_irqrestore(&cb710_ida_lock, flags);
+
+ if (err && err != -EAGAIN)
+ return err;
+ } while (err);
+
+
+ dev_info(&pdev->dev, "id %d, IO 0x%p, IRQ %d\n",
+ chip->platform_id, chip->iobase, pdev->irq);
+
+ if (val & CB710_SLOT_MMC) { /* MMC/SD slot */
+ err = cb710_register_slot(chip,
+ CB710_SLOT_MMC, 0x00, "cb710-mmc");
+ if (err)
+ return err;
+ }
+
+ if (val & CB710_SLOT_MS) { /* MemoryStick slot */
+ err = cb710_register_slot(chip,
+ CB710_SLOT_MS, 0x40, "cb710-ms");
+ if (err)
+ goto unreg_mmc;
+ }
+
+ if (val & CB710_SLOT_SM) { /* SmartMedia slot */
+ err = cb710_register_slot(chip,
+ CB710_SLOT_SM, 0x60, "cb710-sm");
+ if (err)
+ goto unreg_ms;
+ }
+
+ return 0;
+unreg_ms:
+ cb710_unregister_slot(chip, CB710_SLOT_MS);
+unreg_mmc:
+ cb710_unregister_slot(chip, CB710_SLOT_MMC);
+
+#ifdef CONFIG_CB710_DEBUG_ASSUMPTIONS
+ BUG_ON(atomic_read(&chip->slot_refs_count) != 0);
+#endif
+ return err;
+}
+
+static void __devexit cb710_remove_one(struct pci_dev *pdev)
+{
+ struct cb710_chip *chip = pci_get_drvdata(pdev);
+ unsigned long flags;
+
+ cb710_unregister_slot(chip, CB710_SLOT_SM);
+ cb710_unregister_slot(chip, CB710_SLOT_MS);
+ cb710_unregister_slot(chip, CB710_SLOT_MMC);
+#ifdef CONFIG_CB710_DEBUG_ASSUMPTIONS
+ BUG_ON(atomic_read(&chip->slot_refs_count) != 0);
+#endif
+
+ spin_lock_irqsave(&cb710_ida_lock, flags);
+ ida_remove(&cb710_ida, chip->platform_id);
+ spin_unlock_irqrestore(&cb710_ida_lock, flags);
+}
+
+static const struct pci_device_id cb710_pci_tbl[] = {
+ { PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_CB710_FLASH,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { 0, }
+};
+
+static struct pci_driver cb710_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = cb710_pci_tbl,
+ .probe = cb710_probe,
+ .remove = __devexit_p(cb710_remove_one),
+#ifdef CONFIG_PM
+ .suspend = cb710_suspend,
+ .resume = cb710_resume,
+#endif
+};
+
+static int __init cb710_init_module(void)
+{
+ return pci_register_driver(&cb710_driver);
+}
+
+static void __exit cb710_cleanup_module(void)
+{
+ pci_unregister_driver(&cb710_driver);
+ ida_destroy(&cb710_ida);
+}
+
+module_init(cb710_init_module);
+module_exit(cb710_cleanup_module);
+
+MODULE_AUTHOR("Michał Mirosław <mirq-linux@rere.qmqm.pl>");
+MODULE_DESCRIPTION("ENE CB710 memory card reader driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, cb710_pci_tbl);
diff --git a/drivers/misc/cb710/debug.c b/drivers/misc/cb710/debug.c
new file mode 100644
index 00000000000..02358d086e0
--- /dev/null
+++ b/drivers/misc/cb710/debug.c
@@ -0,0 +1,119 @@
+/*
+ * cb710/debug.c
+ *
+ * Copyright by Michał Mirosław, 2008-2009
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/cb710.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#define CB710_REG_COUNT 0x80
+
+static const u16 allow[CB710_REG_COUNT/16] = {
+ 0xFFF0, 0xFFFF, 0xFFFF, 0xFFFF,
+ 0xFFF0, 0xFFFF, 0xFFFF, 0xFFFF,
+};
+static const char *const prefix[ARRAY_SIZE(allow)] = {
+ "MMC", "MMC", "MMC", "MMC",
+ "MS?", "MS?", "SM?", "SM?"
+};
+
+static inline int allow_reg_read(unsigned block, unsigned offset, unsigned bits)
+{
+ unsigned mask = (1 << bits/8) - 1;
+ offset *= bits/8;
+ return ((allow[block] >> offset) & mask) == mask;
+}
+
+#define CB710_READ_REGS_TEMPLATE(t) \
+static void cb710_read_regs_##t(void __iomem *iobase, \
+ u##t *reg, unsigned select) \
+{ \
+ unsigned i, j; \
+ \
+ for (i = 0; i < ARRAY_SIZE(allow); ++i, reg += 16/(t/8)) { \
+ if (!(select & (1 << i))) \
+ continue; \
+ \
+ for (j = 0; j < 0x10/(t/8); ++j) { \
+ if (!allow_reg_read(i, j, t)) \
+ continue; \
+ reg[j] = ioread##t(iobase \
+ + (i << 4) + (j * (t/8))); \
+ } \
+ } \
+}
+
+static const char cb710_regf_8[] = "%02X";
+static const char cb710_regf_16[] = "%04X";
+static const char cb710_regf_32[] = "%08X";
+static const char cb710_xes[] = "xxxxxxxx";
+
+#define CB710_DUMP_REGS_TEMPLATE(t) \
+static void cb710_dump_regs_##t(struct device *dev, \
+ const u##t *reg, unsigned select) \
+{ \
+ const char *const xp = &cb710_xes[8 - t/4]; \
+ const char *const format = cb710_regf_##t; \
+ \
+ char msg[100], *p; \
+ unsigned i, j; \
+ \
+ for (i = 0; i < ARRAY_SIZE(allow); ++i, reg += 16/(t/8)) { \
+ if (!(select & (1 << i))) \
+ continue; \
+ p = msg; \
+ for (j = 0; j < 0x10/(t/8); ++j) { \
+ *p++ = ' '; \
+ if (j == 8/(t/8)) \
+ *p++ = ' '; \
+ if (allow_reg_read(i, j, t)) \
+ p += sprintf(p, format, reg[j]); \
+ else \
+ p += sprintf(p, "%s", xp); \
+ } \
+ dev_dbg(dev, "%s 0x%02X %s\n", prefix[i], i << 4, msg); \
+ } \
+}
+
+#define CB710_READ_AND_DUMP_REGS_TEMPLATE(t) \
+static void cb710_read_and_dump_regs_##t(struct cb710_chip *chip, \
+ unsigned select) \
+{ \
+ u##t regs[CB710_REG_COUNT/sizeof(u##t)]; \
+ \
+ memset(&regs, 0, sizeof(regs)); \
+ cb710_read_regs_##t(chip->iobase, regs, select); \
+ cb710_dump_regs_##t(cb710_chip_dev(chip), regs, select); \
+}
+
+#define CB710_REG_ACCESS_TEMPLATES(t) \
+ CB710_READ_REGS_TEMPLATE(t) \
+ CB710_DUMP_REGS_TEMPLATE(t) \
+ CB710_READ_AND_DUMP_REGS_TEMPLATE(t)
+
+CB710_REG_ACCESS_TEMPLATES(8)
+CB710_REG_ACCESS_TEMPLATES(16)
+CB710_REG_ACCESS_TEMPLATES(32)
+
+void cb710_dump_regs(struct cb710_chip *chip, unsigned select)
+{
+ if (!(select & CB710_DUMP_REGS_MASK))
+ select = CB710_DUMP_REGS_ALL;
+ if (!(select & CB710_DUMP_ACCESS_MASK))
+ select |= CB710_DUMP_ACCESS_8;
+
+ if (select & CB710_DUMP_ACCESS_32)
+ cb710_read_and_dump_regs_32(chip, select);
+ if (select & CB710_DUMP_ACCESS_16)
+ cb710_read_and_dump_regs_16(chip, select);
+ if (select & CB710_DUMP_ACCESS_8)
+ cb710_read_and_dump_regs_8(chip, select);
+}
+EXPORT_SYMBOL_GPL(cb710_dump_regs);
+
diff --git a/drivers/misc/cb710/sgbuf2.c b/drivers/misc/cb710/sgbuf2.c
new file mode 100644
index 00000000000..d38a7acdb6e
--- /dev/null
+++ b/drivers/misc/cb710/sgbuf2.c
@@ -0,0 +1,150 @@
+/*
+ * cb710/sgbuf2.c
+ *
+ * Copyright by Michał Mirosław, 2008-2009
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/cb710.h>
+
+static bool sg_dwiter_next(struct sg_mapping_iter *miter)
+{
+ if (sg_miter_next(miter)) {
+ miter->consumed = 0;
+ return true;
+ } else
+ return false;
+}
+
+static bool sg_dwiter_is_at_end(struct sg_mapping_iter *miter)
+{
+ return miter->length == miter->consumed && !sg_dwiter_next(miter);
+}
+
+static uint32_t sg_dwiter_read_buffer(struct sg_mapping_iter *miter)
+{
+ size_t len, left = 4;
+ uint32_t data;
+ void *addr = &data;
+
+ do {
+ len = min(miter->length - miter->consumed, left);
+ memcpy(addr, miter->addr + miter->consumed, len);
+ miter->consumed += len;
+ left -= len;
+ if (!left)
+ return data;
+ addr += len;
+ } while (sg_dwiter_next(miter));
+
+ memset(addr, 0, left);
+ return data;
+}
+
+static inline bool needs_unaligned_copy(const void *ptr)
+{
+#ifdef HAVE_EFFICIENT_UNALIGNED_ACCESS
+ return false;
+#else
+ return ((ptr - NULL) & 3) != 0;
+#endif
+}
+
+static bool sg_dwiter_get_next_block(struct sg_mapping_iter *miter, uint32_t **ptr)
+{
+ size_t len;
+
+ if (sg_dwiter_is_at_end(miter))
+ return true;
+
+ len = miter->length - miter->consumed;
+
+ if (likely(len >= 4 && !needs_unaligned_copy(
+ miter->addr + miter->consumed))) {
+ *ptr = miter->addr + miter->consumed;
+ miter->consumed += 4;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * cb710_sg_dwiter_read_next_block() - get next 32-bit word from sg buffer
+ * @miter: sg mapping iterator used for reading
+ *
+ * Description:
+ * Returns 32-bit word starting at byte pointed to by @miter@
+ * handling any alignment issues. Bytes past the buffer's end
+ * are not accessed (read) but are returned as zeroes. @miter@
+ * is advanced by 4 bytes or to the end of buffer whichever is
+ * closer.
+ *
+ * Context:
+ * Same requirements as in sg_miter_next().
+ *
+ * Returns:
+ * 32-bit word just read.
+ */
+uint32_t cb710_sg_dwiter_read_next_block(struct sg_mapping_iter *miter)
+{
+ uint32_t *ptr = NULL;
+
+ if (likely(sg_dwiter_get_next_block(miter, &ptr)))
+ return ptr ? *ptr : 0;
+
+ return sg_dwiter_read_buffer(miter);
+}
+EXPORT_SYMBOL_GPL(cb710_sg_dwiter_read_next_block);
+
+static void sg_dwiter_write_slow(struct sg_mapping_iter *miter, uint32_t data)
+{
+ size_t len, left = 4;
+ void *addr = &data;
+
+ do {
+ len = min(miter->length - miter->consumed, left);
+ memcpy(miter->addr, addr, len);
+ miter->consumed += len;
+ left -= len;
+ if (!left)
+ return;
+ addr += len;
+ flush_kernel_dcache_page(miter->page);
+ } while (sg_dwiter_next(miter));
+}
+
+/**
+ * cb710_sg_dwiter_write_next_block() - write next 32-bit word to sg buffer
+ * @miter: sg mapping iterator used for writing
+ *
+ * Description:
+ * Writes 32-bit word starting at byte pointed to by @miter@
+ * handling any alignment issues. Bytes which would be written
+ * past the buffer's end are silently discarded. @miter@ is
+ * advanced by 4 bytes or to the end of buffer whichever is closer.
+ *
+ * Context:
+ * Same requirements as in sg_miter_next().
+ */
+void cb710_sg_dwiter_write_next_block(struct sg_mapping_iter *miter, uint32_t data)
+{
+ uint32_t *ptr = NULL;
+
+ if (likely(sg_dwiter_get_next_block(miter, &ptr))) {
+ if (ptr)
+ *ptr = data;
+ else
+ return;
+ } else
+ sg_dwiter_write_slow(miter, data);
+
+ if (miter->length == miter->consumed)
+ flush_kernel_dcache_page(miter->page);
+}
+EXPORT_SYMBOL_GPL(cb710_sg_dwiter_write_next_block);
+
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index 89fec052f3b..9118613af32 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -48,6 +48,20 @@ config EEPROM_LEGACY
This driver can also be built as a module. If so, the module
will be called eeprom.
+config EEPROM_MAX6875
+ tristate "Maxim MAX6874/5 power supply supervisor"
+ depends on I2C && EXPERIMENTAL
+ help
+ If you say yes here you get read-only support for the user EEPROM of
+ the Maxim MAX6874/5 EEPROM-programmable, quad power-supply
+ sequencer/supervisor.
+
+ All other features of this chip should be accessed via i2c-dev.
+
+ This driver can also be built as a module. If so, the module
+ will be called max6875.
+
+
config EEPROM_93CX6
tristate "EEPROM 93CX6 support"
help
diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile
index 539dd8f8812..df3d68ffa9d 100644
--- a/drivers/misc/eeprom/Makefile
+++ b/drivers/misc/eeprom/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_EEPROM_AT24) += at24.o
obj-$(CONFIG_EEPROM_AT25) += at25.o
obj-$(CONFIG_EEPROM_LEGACY) += eeprom.o
+obj-$(CONFIG_EEPROM_MAX6875) += max6875.o
obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
diff --git a/drivers/i2c/chips/max6875.c b/drivers/misc/eeprom/max6875.c
index 033d9d81ec8..3c0c58eed34 100644
--- a/drivers/i2c/chips/max6875.c
+++ b/drivers/misc/eeprom/max6875.c
@@ -3,7 +3,7 @@
Copyright (C) 2005 Ben Gardner <bgardner@wabtec.com>
- Based on i2c/chips/eeprom.c
+ Based on eeprom.c
The MAX6875 has a bank of registers and two banks of EEPROM.
Address ranges are defined as follows:
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 3cf61ece71d..348443bdb23 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -119,7 +119,7 @@ enclosure_register(struct device *dev, const char *name, int components,
edev->edev.class = &enclosure_class;
edev->edev.parent = get_device(dev);
edev->cb = cb;
- dev_set_name(&edev->edev, name);
+ dev_set_name(&edev->edev, "%s", name);
err = device_register(&edev->edev);
if (err)
goto err;
@@ -255,8 +255,8 @@ enclosure_component_register(struct enclosure_device *edev,
ecomp->number = number;
cdev = &ecomp->cdev;
cdev->parent = get_device(&edev->edev);
- if (name)
- dev_set_name(cdev, name);
+ if (name && name[0])
+ dev_set_name(cdev, "%s", name);
else
dev_set_name(cdev, "%u", number);
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index bbefe77c67a..3ce2920e2bf 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -302,7 +302,7 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
pnode = uv_node_to_pnode(nid);
if (bid < 0 || gru_base[bid])
continue;
- page = alloc_pages_node(nid, GFP_KERNEL, order);
+ page = alloc_pages_exact_node(nid, GFP_KERNEL, order);
if (!page)
goto fail;
gru_base[bid] = page_address(page);
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 9172fcdee4e..c76677afda1 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -232,7 +232,7 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
mq->mmr_blade = uv_cpu_to_blade_id(cpu);
nid = cpu_to_node(cpu);
- page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
+ page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
pg_order);
if (page == NULL) {
dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index 6faefcffcb5..8d1c60a3f0d 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -450,7 +450,8 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
"packet\n", sizeof(struct xpnet_pending_msg));
dev->stats.tx_errors++;
- return -ENOMEM;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
}
/* get the beginning of the first cacheline and end of last */
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index b25e9b6516a..adc205c49fb 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -147,7 +147,8 @@ struct mmc_blk_request {
static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
{
int err;
- __be32 blocks;
+ u32 result;
+ __be32 *blocks;
struct mmc_request mrq;
struct mmc_command cmd;
@@ -199,14 +200,21 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
mrq.cmd = &cmd;
mrq.data = &data;
- sg_init_one(&sg, &blocks, 4);
+ blocks = kmalloc(4, GFP_KERNEL);
+ if (!blocks)
+ return (u32)-1;
+
+ sg_init_one(&sg, blocks, 4);
mmc_wait_for_req(card->host, &mrq);
+ result = ntohl(*blocks);
+ kfree(blocks);
+
if (cmd.error || data.error)
- return (u32)-1;
+ result = (u32)-1;
- return ntohl(blocks);
+ return result;
}
static u32 get_card_status(struct mmc_card *card, struct request *req)
@@ -243,7 +251,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
brq.mrq.cmd = &brq.cmd;
brq.mrq.data = &brq.data;
- brq.cmd.arg = req->sector;
+ brq.cmd.arg = blk_rq_pos(req);
if (!mmc_card_blockaddr(card))
brq.cmd.arg <<= 9;
brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
@@ -251,7 +259,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
brq.stop.opcode = MMC_STOP_TRANSMISSION;
brq.stop.arg = 0;
brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
- brq.data.blocks = req->nr_sectors;
+ brq.data.blocks = blk_rq_sectors(req);
/*
* The block layer doesn't support all sector count
@@ -301,7 +309,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
* Adjust the sg list so it is the same size as the
* request.
*/
- if (brq.data.blocks != req->nr_sectors) {
+ if (brq.data.blocks != blk_rq_sectors(req)) {
int i, data_size = brq.data.blocks << 9;
struct scatterlist *sg;
@@ -352,8 +360,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
printk(KERN_ERR "%s: error %d transferring data,"
" sector %u, nr %u, card status %#x\n",
req->rq_disk->disk_name, brq.data.error,
- (unsigned)req->sector,
- (unsigned)req->nr_sectors, status);
+ (unsigned)blk_rq_pos(req),
+ (unsigned)blk_rq_sectors(req), status);
}
if (brq.stop.error) {
@@ -521,7 +529,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
sprintf(md->disk->disk_name, "mmcblk%d", devidx);
- blk_queue_hardsect_size(md->queue.queue, 512);
+ blk_queue_logical_block_size(md->queue.queue, 512);
if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
/*
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 7a72e75d5c6..49e582356c6 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -55,7 +55,7 @@ static int mmc_queue_thread(void *d)
spin_lock_irq(q->queue_lock);
set_current_state(TASK_INTERRUPTIBLE);
if (!blk_queue_plugged(q))
- req = elv_next_request(q);
+ req = blk_fetch_request(q);
mq->req = req;
spin_unlock_irq(q->queue_lock);
@@ -88,16 +88,11 @@ static void mmc_request(struct request_queue *q)
{
struct mmc_queue *mq = q->queuedata;
struct request *req;
- int ret;
if (!mq) {
printk(KERN_ERR "MMC: killing requests for dead queue\n");
- while ((req = elv_next_request(q)) != NULL) {
- do {
- ret = __blk_end_request(req, -EIO,
- blk_rq_cur_bytes(req));
- } while (ret);
- }
+ while ((req = blk_fetch_request(q)) != NULL)
+ __blk_end_request_all(req, -EIO);
return;
}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 26491173275..d84c880fac8 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -708,7 +708,13 @@ static void mmc_power_up(struct mmc_host *host)
*/
mmc_delay(10);
- host->ios.clock = host->f_min;
+ if (host->f_min > 400000) {
+ pr_warning("%s: Minimum clock frequency too high for "
+ "identification mode\n", mmc_hostname(host));
+ host->ios.clock = host->f_min;
+ } else
+ host->ios.clock = 400000;
+
host->ios.power_mode = MMC_POWER_ON;
mmc_set_ios(host);
@@ -855,61 +861,72 @@ void mmc_rescan(struct work_struct *work)
mmc_bus_get(host);
- if (host->bus_ops == NULL) {
- /*
- * Only we can add a new handler, so it's safe to
- * release the lock here.
- */
+ /* if there is a card registered, check whether it is still present */
+ if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead)
+ host->bus_ops->detect(host);
+
+ mmc_bus_put(host);
+
+
+ mmc_bus_get(host);
+
+ /* if there still is a card present, stop here */
+ if (host->bus_ops != NULL) {
mmc_bus_put(host);
+ goto out;
+ }
- if (host->ops->get_cd && host->ops->get_cd(host) == 0)
- goto out;
+ /* detect a newly inserted card */
- mmc_claim_host(host);
+ /*
+ * Only we can add a new handler, so it's safe to
+ * release the lock here.
+ */
+ mmc_bus_put(host);
- mmc_power_up(host);
- mmc_go_idle(host);
+ if (host->ops->get_cd && host->ops->get_cd(host) == 0)
+ goto out;
- mmc_send_if_cond(host, host->ocr_avail);
+ mmc_claim_host(host);
- /*
- * First we search for SDIO...
- */
- err = mmc_send_io_op_cond(host, 0, &ocr);
- if (!err) {
- if (mmc_attach_sdio(host, ocr))
- mmc_power_off(host);
- goto out;
- }
+ mmc_power_up(host);
+ mmc_go_idle(host);
- /*
- * ...then normal SD...
- */
- err = mmc_send_app_op_cond(host, 0, &ocr);
- if (!err) {
- if (mmc_attach_sd(host, ocr))
- mmc_power_off(host);
- goto out;
- }
+ mmc_send_if_cond(host, host->ocr_avail);
- /*
- * ...and finally MMC.
- */
- err = mmc_send_op_cond(host, 0, &ocr);
- if (!err) {
- if (mmc_attach_mmc(host, ocr))
- mmc_power_off(host);
- goto out;
- }
+ /*
+ * First we search for SDIO...
+ */
+ err = mmc_send_io_op_cond(host, 0, &ocr);
+ if (!err) {
+ if (mmc_attach_sdio(host, ocr))
+ mmc_power_off(host);
+ goto out;
+ }
- mmc_release_host(host);
- mmc_power_off(host);
- } else {
- if (host->bus_ops->detect && !host->bus_dead)
- host->bus_ops->detect(host);
+ /*
+ * ...then normal SD...
+ */
+ err = mmc_send_app_op_cond(host, 0, &ocr);
+ if (!err) {
+ if (mmc_attach_sd(host, ocr))
+ mmc_power_off(host);
+ goto out;
+ }
- mmc_bus_put(host);
+ /*
+ * ...and finally MMC.
+ */
+ err = mmc_send_op_cond(host, 0, &ocr);
+ if (!err) {
+ if (mmc_attach_mmc(host, ocr))
+ mmc_power_off(host);
+ goto out;
}
+
+ mmc_release_host(host);
+ mmc_power_off(host);
+
out:
if (host->caps & MMC_CAP_NEEDS_POLL)
mmc_schedule_delayed_work(&host->detect, HZ);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index b4cf691f3f6..40111a6d8d5 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -83,6 +83,17 @@ config MMC_SDHCI_OF
If unsure, say N.
+config MMC_SDHCI_PLTFM
+ tristate "SDHCI support on the platform specific bus"
+ depends on MMC_SDHCI
+ help
+ This selects the platform specific bus support for Secure Digital Host
+ Controller Interface.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
config MMC_OMAP
tristate "TI OMAP Multimedia Card Interface support"
depends on ARCH_OMAP
@@ -155,7 +166,7 @@ config MMC_ATMELMCI_DMA
config MMC_IMX
tristate "Motorola i.MX Multimedia Card Interface support"
- depends on ARCH_IMX
+ depends on ARCH_MX1
help
This selects the Motorola i.MX Multimedia card Interface.
If you have a i.MX platform with a Multimedia Card slot,
@@ -237,7 +248,20 @@ config MMC_SDRICOH_CS
config MMC_TMIO
tristate "Toshiba Mobile IO Controller (TMIO) MMC/SD function support"
- depends on MFD_TMIO
+ depends on MFD_TMIO || MFD_ASIC3
help
This provides support for the SD/MMC cell found in TC6393XB,
- T7L66XB and also ipaq ASIC3
+ T7L66XB and also HTC ASIC3
+
+config MMC_CB710
+ tristate "ENE CB710 MMC/SD Interface support"
+ depends on PCI
+ select CB710_CORE
+ help
+ This option enables support for MMC/SD part of ENE CB710/720 Flash
+ memory card reader found in some laptops (ie. some versions of
+ HP Compaq nx9500).
+
+ This driver can also be built as a module. If so, the module
+ will be called cb710-mmc.
+
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 970a997620e..79da397c5fe 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_MMC_SDHCI) += sdhci.o
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
obj-$(CONFIG_MMC_RICOH_MMC) += ricoh_mmc.o
obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
+obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
obj-$(CONFIG_MMC_WBSD) += wbsd.o
obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
obj-$(CONFIG_MMC_OMAP) += omap.o
@@ -29,4 +30,8 @@ endif
obj-$(CONFIG_MMC_S3C) += s3cmci.o
obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o
obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
+obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
+ifeq ($(CONFIG_CB710_DEBUG),y)
+ CFLAGS-cb710-mmc += -DDEBUG
+endif
diff --git a/drivers/mmc/host/atmel-mci-regs.h b/drivers/mmc/host/atmel-mci-regs.h
index b58364ed6bb..fc8a0fe7c5c 100644
--- a/drivers/mmc/host/atmel-mci-regs.h
+++ b/drivers/mmc/host/atmel-mci-regs.h
@@ -7,6 +7,12 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+
+/*
+ * Superset of MCI IP registers integrated in Atmel AVR32 and AT91 Processors
+ * Registers and bitfields marked with [2] are only available in MCI2
+ */
+
#ifndef __DRIVERS_MMC_ATMEL_MCI_H__
#define __DRIVERS_MMC_ATMEL_MCI_H__
@@ -14,11 +20,17 @@
#define MCI_CR 0x0000 /* Control */
# define MCI_CR_MCIEN ( 1 << 0) /* MCI Enable */
# define MCI_CR_MCIDIS ( 1 << 1) /* MCI Disable */
+# define MCI_CR_PWSEN ( 1 << 2) /* Power Save Enable */
+# define MCI_CR_PWSDIS ( 1 << 3) /* Power Save Disable */
# define MCI_CR_SWRST ( 1 << 7) /* Software Reset */
#define MCI_MR 0x0004 /* Mode */
# define MCI_MR_CLKDIV(x) ((x) << 0) /* Clock Divider */
+# define MCI_MR_PWSDIV(x) ((x) << 8) /* Power Saving Divider */
# define MCI_MR_RDPROOF ( 1 << 11) /* Read Proof */
# define MCI_MR_WRPROOF ( 1 << 12) /* Write Proof */
+# define MCI_MR_PDCFBYTE ( 1 << 13) /* Force Byte Transfer */
+# define MCI_MR_PDCPADV ( 1 << 14) /* Padding Value */
+# define MCI_MR_PDCMODE ( 1 << 15) /* PDC-oriented Mode */
#define MCI_DTOR 0x0008 /* Data Timeout */
# define MCI_DTOCYC(x) ((x) << 0) /* Data Timeout Cycles */
# define MCI_DTOMUL(x) ((x) << 4) /* Data Timeout Multiplier */
@@ -28,6 +40,7 @@
# define MCI_SDCSEL_MASK ( 3 << 0)
# define MCI_SDCBUS_1BIT ( 0 << 6) /* 1-bit data bus */
# define MCI_SDCBUS_4BIT ( 2 << 6) /* 4-bit data bus */
+# define MCI_SDCBUS_8BIT ( 3 << 6) /* 8-bit data bus[2] */
# define MCI_SDCBUS_MASK ( 3 << 6)
#define MCI_ARGR 0x0010 /* Command Argument */
#define MCI_CMDR 0x0014 /* Command */
@@ -56,6 +69,9 @@
#define MCI_BLKR 0x0018 /* Block */
# define MCI_BCNT(x) ((x) << 0) /* Data Block Count */
# define MCI_BLKLEN(x) ((x) << 16) /* Data Block Length */
+#define MCI_CSTOR 0x001c /* Completion Signal Timeout[2] */
+# define MCI_CSTOCYC(x) ((x) << 0) /* CST cycles */
+# define MCI_CSTOMUL(x) ((x) << 4) /* CST multiplier */
#define MCI_RSPR 0x0020 /* Response 0 */
#define MCI_RSPR1 0x0024 /* Response 1 */
#define MCI_RSPR2 0x0028 /* Response 2 */
@@ -83,7 +99,24 @@
# define MCI_DTOE ( 1 << 22) /* Data Time-Out Error */
# define MCI_OVRE ( 1 << 30) /* RX Overrun Error */
# define MCI_UNRE ( 1 << 31) /* TX Underrun Error */
+#define MCI_DMA 0x0050 /* DMA Configuration[2] */
+# define MCI_DMA_OFFSET(x) ((x) << 0) /* DMA Write Buffer Offset */
+# define MCI_DMA_CHKSIZE(x) ((x) << 4) /* DMA Channel Read and Write Chunk Size */
+# define MCI_DMAEN ( 1 << 8) /* DMA Hardware Handshaking Enable */
+#define MCI_CFG 0x0054 /* Configuration[2] */
+# define MCI_CFG_FIFOMODE_1DATA ( 1 << 0) /* MCI Internal FIFO control mode */
+# define MCI_CFG_FERRCTRL_COR ( 1 << 4) /* Flow Error flag reset control mode */
+# define MCI_CFG_HSMODE ( 1 << 8) /* High Speed Mode */
+# define MCI_CFG_LSYNC ( 1 << 12) /* Synchronize on the last block */
+#define MCI_WPMR 0x00e4 /* Write Protection Mode[2] */
+# define MCI_WP_EN ( 1 << 0) /* WP Enable */
+# define MCI_WP_KEY (0x4d4349 << 8) /* WP Key */
+#define MCI_WPSR 0x00e8 /* Write Protection Status[2] */
+# define MCI_GET_WP_VS(x) ((x) & 0x0f)
+# define MCI_GET_WP_VSRC(x) (((x) >> 8) & 0xffff)
+#define MCI_FIFO_APERTURE 0x0200 /* FIFO Aperture[2] */
+/* This is not including the FIFO Aperture on MCI2 */
#define MCI_REGS_SIZE 0x100
/* Register access macros */
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index cf6a100bb38..7b603e4b41d 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -177,6 +177,7 @@ struct atmel_mci {
* available.
* @wp_pin: GPIO pin used for card write protect sending, or negative
* if not available.
+ * @detect_is_active_high: The state of the detect pin when it is active.
* @detect_timer: Timer used for debouncing @detect_pin interrupts.
*/
struct atmel_mci_slot {
@@ -196,6 +197,7 @@ struct atmel_mci_slot {
int detect_pin;
int wp_pin;
+ bool detect_is_active_high;
struct timer_list detect_timer;
};
@@ -924,7 +926,8 @@ static int atmci_get_cd(struct mmc_host *mmc)
struct atmel_mci_slot *slot = mmc_priv(mmc);
if (gpio_is_valid(slot->detect_pin)) {
- present = !gpio_get_value(slot->detect_pin);
+ present = !(gpio_get_value(slot->detect_pin) ^
+ slot->detect_is_active_high);
dev_dbg(&mmc->class_dev, "card is %spresent\n",
present ? "" : "not ");
}
@@ -1028,7 +1031,8 @@ static void atmci_detect_change(unsigned long data)
return;
enable_irq(gpio_to_irq(slot->detect_pin));
- present = !gpio_get_value(slot->detect_pin);
+ present = !(gpio_get_value(slot->detect_pin) ^
+ slot->detect_is_active_high);
present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags);
dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n",
@@ -1456,6 +1460,7 @@ static int __init atmci_init_slot(struct atmel_mci *host,
slot->host = host;
slot->detect_pin = slot_data->detect_pin;
slot->wp_pin = slot_data->wp_pin;
+ slot->detect_is_active_high = slot_data->detect_is_active_high;
slot->sdc_reg = sdc_reg;
mmc->ops = &atmci_ops;
@@ -1477,7 +1482,8 @@ static int __init atmci_init_slot(struct atmel_mci *host,
if (gpio_request(slot->detect_pin, "mmc_detect")) {
dev_dbg(&mmc->class_dev, "no detect pin available\n");
slot->detect_pin = -EBUSY;
- } else if (gpio_get_value(slot->detect_pin)) {
+ } else if (gpio_get_value(slot->detect_pin) ^
+ slot->detect_is_active_high) {
clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
}
}
diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c
new file mode 100644
index 00000000000..11efefb1af5
--- /dev/null
+++ b/drivers/mmc/host/cb710-mmc.c
@@ -0,0 +1,804 @@
+/*
+ * cb710/mmc.c
+ *
+ * Copyright by Michał Mirosław, 2008-2009
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include "cb710-mmc.h"
+
+static const u8 cb710_clock_divider_log2[8] = {
+/* 1, 2, 4, 8, 16, 32, 128, 512 */
+ 0, 1, 2, 3, 4, 5, 7, 9
+};
+#define CB710_MAX_DIVIDER_IDX \
+ (ARRAY_SIZE(cb710_clock_divider_log2) - 1)
+
+static const u8 cb710_src_freq_mhz[16] = {
+ 33, 10, 20, 25, 30, 35, 40, 45,
+ 50, 55, 60, 65, 70, 75, 80, 85
+};
+
+static void cb710_mmc_set_clock(struct mmc_host *mmc, int hz)
+{
+ struct cb710_slot *slot = cb710_mmc_to_slot(mmc);
+ struct pci_dev *pdev = cb710_slot_to_chip(slot)->pdev;
+ u32 src_freq_idx;
+ u32 divider_idx;
+ int src_hz;
+
+ /* this is magic, unverifiable for me, unless I get
+ * MMC card with cables connected to bus signals */
+ pci_read_config_dword(pdev, 0x48, &src_freq_idx);
+ src_freq_idx = (src_freq_idx >> 16) & 0xF;
+ src_hz = cb710_src_freq_mhz[src_freq_idx] * 1000000;
+
+ for (divider_idx = 0; divider_idx < CB710_MAX_DIVIDER_IDX; ++divider_idx) {
+ if (hz >= src_hz >> cb710_clock_divider_log2[divider_idx])
+ break;
+ }
+
+ if (src_freq_idx)
+ divider_idx |= 0x8;
+
+ cb710_pci_update_config_reg(pdev, 0x40, ~0xF0000000, divider_idx << 28);
+
+ dev_dbg(cb710_slot_dev(slot),
+ "clock set to %d Hz, wanted %d Hz; flag = %d\n",
+ src_hz >> cb710_clock_divider_log2[divider_idx & 7],
+ hz, (divider_idx & 8) != 0);
+}
+
+static void __cb710_mmc_enable_irq(struct cb710_slot *slot,
+ unsigned short enable, unsigned short mask)
+{
+ /* clear global IE
+ * - it gets set later if any interrupt sources are enabled */
+ mask |= CB710_MMC_IE_IRQ_ENABLE;
+
+ /* look like interrupt is fired whenever
+ * WORD[0x0C] & WORD[0x10] != 0;
+ * -> bit 15 port 0x0C seems to be global interrupt enable
+ */
+
+ enable = (cb710_read_port_16(slot, CB710_MMC_IRQ_ENABLE_PORT)
+ & ~mask) | enable;
+
+ if (enable)
+ enable |= CB710_MMC_IE_IRQ_ENABLE;
+
+ cb710_write_port_16(slot, CB710_MMC_IRQ_ENABLE_PORT, enable);
+}
+
+static void cb710_mmc_enable_irq(struct cb710_slot *slot,
+ unsigned short enable, unsigned short mask)
+{
+ struct cb710_mmc_reader *reader = mmc_priv(cb710_slot_to_mmc(slot));
+ unsigned long flags;
+
+ spin_lock_irqsave(&reader->irq_lock, flags);
+ /* this is the only thing irq_lock protects */
+ __cb710_mmc_enable_irq(slot, enable, mask);
+ spin_unlock_irqrestore(&reader->irq_lock, flags);
+}
+
+static void cb710_mmc_reset_events(struct cb710_slot *slot)
+{
+ cb710_write_port_8(slot, CB710_MMC_STATUS0_PORT, 0xFF);
+ cb710_write_port_8(slot, CB710_MMC_STATUS1_PORT, 0xFF);
+ cb710_write_port_8(slot, CB710_MMC_STATUS2_PORT, 0xFF);
+}
+
+static int cb710_mmc_is_card_inserted(struct cb710_slot *slot)
+{
+ return cb710_read_port_8(slot, CB710_MMC_STATUS3_PORT)
+ & CB710_MMC_S3_CARD_DETECTED;
+}
+
+static void cb710_mmc_enable_4bit_data(struct cb710_slot *slot, int enable)
+{
+ dev_dbg(cb710_slot_dev(slot), "configuring %d-data-line%s mode\n",
+ enable ? 4 : 1, enable ? "s" : "");
+ if (enable)
+ cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT,
+ CB710_MMC_C1_4BIT_DATA_BUS, 0);
+ else
+ cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT,
+ 0, CB710_MMC_C1_4BIT_DATA_BUS);
+}
+
+static int cb710_check_event(struct cb710_slot *slot, u8 what)
+{
+ u16 status;
+
+ status = cb710_read_port_16(slot, CB710_MMC_STATUS_PORT);
+
+ if (status & CB710_MMC_S0_FIFO_UNDERFLOW) {
+ /* it is just a guess, so log it */
+ dev_dbg(cb710_slot_dev(slot),
+ "CHECK : ignoring bit 6 in status %04X\n", status);
+ cb710_write_port_8(slot, CB710_MMC_STATUS0_PORT,
+ CB710_MMC_S0_FIFO_UNDERFLOW);
+ status &= ~CB710_MMC_S0_FIFO_UNDERFLOW;
+ }
+
+ if (status & CB710_MMC_STATUS_ERROR_EVENTS) {
+ dev_dbg(cb710_slot_dev(slot),
+ "CHECK : returning EIO on status %04X\n", status);
+ cb710_write_port_8(slot, CB710_MMC_STATUS0_PORT, status & 0xFF);
+ cb710_write_port_8(slot, CB710_MMC_STATUS1_PORT,
+ CB710_MMC_S1_RESET);
+ return -EIO;
+ }
+
+ /* 'what' is a bit in MMC_STATUS1 */
+ if ((status >> 8) & what) {
+ cb710_write_port_8(slot, CB710_MMC_STATUS1_PORT, what);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int cb710_wait_for_event(struct cb710_slot *slot, u8 what)
+{
+ int err = 0;
+ unsigned limit = 2000000; /* FIXME: real timeout */
+
+#ifdef CONFIG_CB710_DEBUG
+ u32 e, x;
+ e = cb710_read_port_32(slot, CB710_MMC_STATUS_PORT);
+#endif
+
+ while (!(err = cb710_check_event(slot, what))) {
+ if (!--limit) {
+ cb710_dump_regs(cb710_slot_to_chip(slot),
+ CB710_DUMP_REGS_MMC);
+ err = -ETIMEDOUT;
+ break;
+ }
+ udelay(1);
+ }
+
+#ifdef CONFIG_CB710_DEBUG
+ x = cb710_read_port_32(slot, CB710_MMC_STATUS_PORT);
+
+ limit = 2000000 - limit;
+ if (limit > 100)
+ dev_dbg(cb710_slot_dev(slot),
+ "WAIT10: waited %d loops, what %d, entry val %08X, exit val %08X\n",
+ limit, what, e, x);
+#endif
+ return err < 0 ? err : 0;
+}
+
+
+static int cb710_wait_while_busy(struct cb710_slot *slot, uint8_t mask)
+{
+ unsigned limit = 500000; /* FIXME: real timeout */
+ int err = 0;
+
+#ifdef CONFIG_CB710_DEBUG
+ u32 e, x;
+ e = cb710_read_port_32(slot, CB710_MMC_STATUS_PORT);
+#endif
+
+ while (cb710_read_port_8(slot, CB710_MMC_STATUS2_PORT) & mask) {
+ if (!--limit) {
+ cb710_dump_regs(cb710_slot_to_chip(slot),
+ CB710_DUMP_REGS_MMC);
+ err = -ETIMEDOUT;
+ break;
+ }
+ udelay(1);
+ }
+
+#ifdef CONFIG_CB710_DEBUG
+ x = cb710_read_port_32(slot, CB710_MMC_STATUS_PORT);
+
+ limit = 500000 - limit;
+ if (limit > 100)
+ dev_dbg(cb710_slot_dev(slot),
+ "WAIT12: waited %d loops, mask %02X, entry val %08X, exit val %08X\n",
+ limit, mask, e, x);
+#endif
+ return 0;
+}
+
+static void cb710_mmc_set_transfer_size(struct cb710_slot *slot,
+ size_t count, size_t blocksize)
+{
+ cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20);
+ cb710_write_port_32(slot, CB710_MMC_TRANSFER_SIZE_PORT,
+ ((count - 1) << 16)|(blocksize - 1));
+
+ dev_vdbg(cb710_slot_dev(slot), "set up for %zu block%s of %zu bytes\n",
+ count, count == 1 ? "" : "s", blocksize);
+}
+
+static void cb710_mmc_fifo_hack(struct cb710_slot *slot)
+{
+ /* without this, received data is prepended with 8-bytes of zeroes */
+ u32 r1, r2;
+ int ok = 0;
+
+ r1 = cb710_read_port_32(slot, CB710_MMC_DATA_PORT);
+ r2 = cb710_read_port_32(slot, CB710_MMC_DATA_PORT);
+ if (cb710_read_port_8(slot, CB710_MMC_STATUS0_PORT)
+ & CB710_MMC_S0_FIFO_UNDERFLOW) {
+ cb710_write_port_8(slot, CB710_MMC_STATUS0_PORT,
+ CB710_MMC_S0_FIFO_UNDERFLOW);
+ ok = 1;
+ }
+
+ dev_dbg(cb710_slot_dev(slot),
+ "FIFO-read-hack: expected STATUS0 bit was %s\n",
+ ok ? "set." : "NOT SET!");
+ dev_dbg(cb710_slot_dev(slot),
+ "FIFO-read-hack: dwords ignored: %08X %08X - %s\n",
+ r1, r2, (r1|r2) ? "BAD (NOT ZERO)!" : "ok");
+}
+
+static int cb710_mmc_receive_pio(struct cb710_slot *slot,
+ struct sg_mapping_iter *miter, size_t dw_count)
+{
+ if (!(cb710_read_port_8(slot, CB710_MMC_STATUS2_PORT) & CB710_MMC_S2_FIFO_READY)) {
+ int err = cb710_wait_for_event(slot,
+ CB710_MMC_S1_PIO_TRANSFER_DONE);
+ if (err)
+ return err;
+ }
+
+ cb710_sg_dwiter_write_from_io(miter,
+ slot->iobase + CB710_MMC_DATA_PORT, dw_count);
+
+ return 0;
+}
+
+static bool cb710_is_transfer_size_supported(struct mmc_data *data)
+{
+ return !(data->blksz & 15 && (data->blocks != 1 || data->blksz != 8));
+}
+
+static int cb710_mmc_receive(struct cb710_slot *slot, struct mmc_data *data)
+{
+ struct sg_mapping_iter miter;
+ size_t len, blocks = data->blocks;
+ int err = 0;
+
+ /* TODO: I don't know how/if the hardware handles non-16B-boundary blocks
+ * except single 8B block */
+ if (unlikely(data->blksz & 15 && (data->blocks != 1 || data->blksz != 8)))
+ return -EINVAL;
+
+ sg_miter_start(&miter, data->sg, data->sg_len, 0);
+
+ cb710_modify_port_8(slot, CB710_MMC_CONFIG2_PORT,
+ 15, CB710_MMC_C2_READ_PIO_SIZE_MASK);
+
+ cb710_mmc_fifo_hack(slot);
+
+ while (blocks-- > 0) {
+ len = data->blksz;
+
+ while (len >= 16) {
+ err = cb710_mmc_receive_pio(slot, &miter, 4);
+ if (err)
+ goto out;
+ len -= 16;
+ }
+
+ if (!len)
+ continue;
+
+ cb710_modify_port_8(slot, CB710_MMC_CONFIG2_PORT,
+ len - 1, CB710_MMC_C2_READ_PIO_SIZE_MASK);
+
+ len = (len >= 8) ? 4 : 2;
+ err = cb710_mmc_receive_pio(slot, &miter, len);
+ if (err)
+ goto out;
+ }
+out:
+ cb710_sg_miter_stop_writing(&miter);
+ return err;
+}
+
+static int cb710_mmc_send(struct cb710_slot *slot, struct mmc_data *data)
+{
+ struct sg_mapping_iter miter;
+ size_t len, blocks = data->blocks;
+ int err = 0;
+
+ /* TODO: I don't know how/if the hardware handles multiple
+ * non-16B-boundary blocks */
+ if (unlikely(data->blocks > 1 && data->blksz & 15))
+ return -EINVAL;
+
+ sg_miter_start(&miter, data->sg, data->sg_len, 0);
+
+ cb710_modify_port_8(slot, CB710_MMC_CONFIG2_PORT,
+ 0, CB710_MMC_C2_READ_PIO_SIZE_MASK);
+
+ while (blocks-- > 0) {
+ len = (data->blksz + 15) >> 4;
+ do {
+ if (!(cb710_read_port_8(slot, CB710_MMC_STATUS2_PORT)
+ & CB710_MMC_S2_FIFO_EMPTY)) {
+ err = cb710_wait_for_event(slot,
+ CB710_MMC_S1_PIO_TRANSFER_DONE);
+ if (err)
+ goto out;
+ }
+ cb710_sg_dwiter_read_to_io(&miter,
+ slot->iobase + CB710_MMC_DATA_PORT, 4);
+ } while (--len);
+ }
+out:
+ sg_miter_stop(&miter);
+ return err;
+}
+
+static u16 cb710_encode_cmd_flags(struct cb710_mmc_reader *reader,
+ struct mmc_command *cmd)
+{
+ unsigned int flags = cmd->flags;
+ u16 cb_flags = 0;
+
+ /* Windows driver returned 0 for commands for which no response
+ * is expected. It happened that there were only two such commands
+ * used: MMC_GO_IDLE_STATE and MMC_GO_INACTIVE_STATE so it might
+ * as well be a bug in that driver.
+ *
+ * Original driver set bit 14 for MMC/SD application
+ * commands. There's no difference 'on the wire' and
+ * it apparently works without it anyway.
+ */
+
+ switch (flags & MMC_CMD_MASK) {
+ case MMC_CMD_AC: cb_flags = CB710_MMC_CMD_AC; break;
+ case MMC_CMD_ADTC: cb_flags = CB710_MMC_CMD_ADTC; break;
+ case MMC_CMD_BC: cb_flags = CB710_MMC_CMD_BC; break;
+ case MMC_CMD_BCR: cb_flags = CB710_MMC_CMD_BCR; break;
+ }
+
+ if (flags & MMC_RSP_BUSY)
+ cb_flags |= CB710_MMC_RSP_BUSY;
+
+ cb_flags |= cmd->opcode << CB710_MMC_CMD_CODE_SHIFT;
+
+ if (cmd->data && (cmd->data->flags & MMC_DATA_READ))
+ cb_flags |= CB710_MMC_DATA_READ;
+
+ if (flags & MMC_RSP_PRESENT) {
+ /* Windows driver set 01 at bits 4,3 except for
+ * MMC_SET_BLOCKLEN where it set 10. Maybe the
+ * hardware can do something special about this
+ * command? The original driver looks buggy/incomplete
+ * anyway so we ignore this for now.
+ *
+ * I assume that 00 here means no response is expected.
+ */
+ cb_flags |= CB710_MMC_RSP_PRESENT;
+
+ if (flags & MMC_RSP_136)
+ cb_flags |= CB710_MMC_RSP_136;
+ if (!(flags & MMC_RSP_CRC))
+ cb_flags |= CB710_MMC_RSP_NO_CRC;
+ }
+
+ return cb_flags;
+}
+
+static void cb710_receive_response(struct cb710_slot *slot,
+ struct mmc_command *cmd)
+{
+ unsigned rsp_opcode, wanted_opcode;
+
+ /* Looks like final byte with CRC is always stripped (same as SDHCI) */
+ if (cmd->flags & MMC_RSP_136) {
+ u32 resp[4];
+
+ resp[0] = cb710_read_port_32(slot, CB710_MMC_RESPONSE3_PORT);
+ resp[1] = cb710_read_port_32(slot, CB710_MMC_RESPONSE2_PORT);
+ resp[2] = cb710_read_port_32(slot, CB710_MMC_RESPONSE1_PORT);
+ resp[3] = cb710_read_port_32(slot, CB710_MMC_RESPONSE0_PORT);
+ rsp_opcode = resp[0] >> 24;
+
+ cmd->resp[0] = (resp[0] << 8)|(resp[1] >> 24);
+ cmd->resp[1] = (resp[1] << 8)|(resp[2] >> 24);
+ cmd->resp[2] = (resp[2] << 8)|(resp[3] >> 24);
+ cmd->resp[3] = (resp[3] << 8);
+ } else {
+ rsp_opcode = cb710_read_port_32(slot, CB710_MMC_RESPONSE1_PORT) & 0x3F;
+ cmd->resp[0] = cb710_read_port_32(slot, CB710_MMC_RESPONSE0_PORT);
+ }
+
+ wanted_opcode = (cmd->flags & MMC_RSP_OPCODE) ? cmd->opcode : 0x3F;
+ if (rsp_opcode != wanted_opcode)
+ cmd->error = -EILSEQ;
+}
+
+static int cb710_mmc_transfer_data(struct cb710_slot *slot,
+ struct mmc_data *data)
+{
+ int error, to;
+
+ if (data->flags & MMC_DATA_READ)
+ error = cb710_mmc_receive(slot, data);
+ else
+ error = cb710_mmc_send(slot, data);
+
+ to = cb710_wait_for_event(slot, CB710_MMC_S1_DATA_TRANSFER_DONE);
+ if (!error)
+ error = to;
+
+ if (!error)
+ data->bytes_xfered = data->blksz * data->blocks;
+ return error;
+}
+
+static int cb710_mmc_command(struct mmc_host *mmc, struct mmc_command *cmd)
+{
+ struct cb710_slot *slot = cb710_mmc_to_slot(mmc);
+ struct cb710_mmc_reader *reader = mmc_priv(mmc);
+ struct mmc_data *data = cmd->data;
+
+ u16 cb_cmd = cb710_encode_cmd_flags(reader, cmd);
+ dev_dbg(cb710_slot_dev(slot), "cmd request: 0x%04X\n", cb_cmd);
+
+ if (data) {
+ if (!cb710_is_transfer_size_supported(data)) {
+ data->error = -EINVAL;
+ return -1;
+ }
+ cb710_mmc_set_transfer_size(slot, data->blocks, data->blksz);
+ }
+
+ cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20|CB710_MMC_S2_BUSY_10);
+ cb710_write_port_16(slot, CB710_MMC_CMD_TYPE_PORT, cb_cmd);
+ cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20);
+ cb710_write_port_32(slot, CB710_MMC_CMD_PARAM_PORT, cmd->arg);
+ cb710_mmc_reset_events(slot);
+ cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20);
+ cb710_modify_port_8(slot, CB710_MMC_CONFIG0_PORT, 0x01, 0);
+
+ cmd->error = cb710_wait_for_event(slot, CB710_MMC_S1_COMMAND_SENT);
+ if (cmd->error)
+ return -1;
+
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ cb710_receive_response(slot, cmd);
+ if (cmd->error)
+ return -1;
+ }
+
+ if (data)
+ data->error = cb710_mmc_transfer_data(slot, data);
+ return 0;
+}
+
+static void cb710_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct cb710_slot *slot = cb710_mmc_to_slot(mmc);
+ struct cb710_mmc_reader *reader = mmc_priv(mmc);
+
+ WARN_ON(reader->mrq != NULL);
+
+ reader->mrq = mrq;
+ cb710_mmc_enable_irq(slot, CB710_MMC_IE_TEST_MASK, 0);
+
+ if (cb710_mmc_is_card_inserted(slot)) {
+ if (!cb710_mmc_command(mmc, mrq->cmd) && mrq->stop)
+ cb710_mmc_command(mmc, mrq->stop);
+ mdelay(1);
+ } else {
+ mrq->cmd->error = -ENOMEDIUM;
+ }
+
+ tasklet_schedule(&reader->finish_req_tasklet);
+}
+
+static int cb710_mmc_powerup(struct cb710_slot *slot)
+{
+#ifdef CONFIG_CB710_DEBUG
+ struct cb710_chip *chip = cb710_slot_to_chip(slot);
+#endif
+ int err;
+
+ /* a lot of magic; see comment in cb710_mmc_set_clock() */
+ dev_dbg(cb710_slot_dev(slot), "bus powerup\n");
+ cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
+ err = cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20);
+ if (unlikely(err))
+ return err;
+ cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, 0x80, 0);
+ cb710_modify_port_8(slot, CB710_MMC_CONFIG3_PORT, 0x80, 0);
+ cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
+ mdelay(1);
+ dev_dbg(cb710_slot_dev(slot), "after delay 1\n");
+ cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
+ err = cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20);
+ if (unlikely(err))
+ return err;
+ cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, 0x09, 0);
+ cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
+ mdelay(1);
+ dev_dbg(cb710_slot_dev(slot), "after delay 2\n");
+ cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
+ err = cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20);
+ if (unlikely(err))
+ return err;
+ cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, 0, 0x08);
+ cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
+ mdelay(2);
+ dev_dbg(cb710_slot_dev(slot), "after delay 3\n");
+ cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
+ cb710_modify_port_8(slot, CB710_MMC_CONFIG0_PORT, 0x06, 0);
+ cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, 0x70, 0);
+ cb710_modify_port_8(slot, CB710_MMC_CONFIG2_PORT, 0x80, 0);
+ cb710_modify_port_8(slot, CB710_MMC_CONFIG3_PORT, 0x03, 0);
+ cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
+ err = cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20);
+ if (unlikely(err))
+ return err;
+ /* This port behaves weird: quick byte reads of 0x08,0x09 return
+ * 0xFF,0x00 after writing 0xFFFF to 0x08; it works correctly when
+ * read/written from userspace... What am I missing here?
+ * (it doesn't depend on write-to-read delay) */
+ cb710_write_port_16(slot, CB710_MMC_CONFIGB_PORT, 0xFFFF);
+ cb710_modify_port_8(slot, CB710_MMC_CONFIG0_PORT, 0x06, 0);
+ cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
+ dev_dbg(cb710_slot_dev(slot), "bus powerup finished\n");
+
+ return cb710_check_event(slot, 0);
+}
+
+static void cb710_mmc_powerdown(struct cb710_slot *slot)
+{
+ cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, 0, 0x81);
+ cb710_modify_port_8(slot, CB710_MMC_CONFIG3_PORT, 0, 0x80);
+}
+
+static void cb710_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct cb710_slot *slot = cb710_mmc_to_slot(mmc);
+ struct cb710_mmc_reader *reader = mmc_priv(mmc);
+ int err;
+
+ cb710_mmc_set_clock(mmc, ios->clock);
+
+ if (!cb710_mmc_is_card_inserted(slot)) {
+ dev_dbg(cb710_slot_dev(slot),
+ "no card inserted - ignoring bus powerup request\n");
+ ios->power_mode = MMC_POWER_OFF;
+ }
+
+ if (ios->power_mode != reader->last_power_mode)
+ switch (ios->power_mode) {
+ case MMC_POWER_ON:
+ err = cb710_mmc_powerup(slot);
+ if (err) {
+ dev_warn(cb710_slot_dev(slot),
+ "powerup failed (%d)- retrying\n", err);
+ cb710_mmc_powerdown(slot);
+ udelay(1);
+ err = cb710_mmc_powerup(slot);
+ if (err)
+ dev_warn(cb710_slot_dev(slot),
+ "powerup retry failed (%d) - expect errors\n",
+ err);
+ }
+ reader->last_power_mode = MMC_POWER_ON;
+ break;
+ case MMC_POWER_OFF:
+ cb710_mmc_powerdown(slot);
+ reader->last_power_mode = MMC_POWER_OFF;
+ break;
+ case MMC_POWER_UP:
+ default:
+ /* ignore */;
+ }
+
+ cb710_mmc_enable_4bit_data(slot, ios->bus_width != MMC_BUS_WIDTH_1);
+
+ cb710_mmc_enable_irq(slot, CB710_MMC_IE_TEST_MASK, 0);
+}
+
+static int cb710_mmc_get_ro(struct mmc_host *mmc)
+{
+ struct cb710_slot *slot = cb710_mmc_to_slot(mmc);
+
+ return cb710_read_port_8(slot, CB710_MMC_STATUS3_PORT)
+ & CB710_MMC_S3_WRITE_PROTECTED;
+}
+
+static int cb710_mmc_irq_handler(struct cb710_slot *slot)
+{
+ struct mmc_host *mmc = cb710_slot_to_mmc(slot);
+ struct cb710_mmc_reader *reader = mmc_priv(mmc);
+ u32 status, config1, config2, irqen;
+
+ status = cb710_read_port_32(slot, CB710_MMC_STATUS_PORT);
+ irqen = cb710_read_port_32(slot, CB710_MMC_IRQ_ENABLE_PORT);
+ config2 = cb710_read_port_32(slot, CB710_MMC_CONFIGB_PORT);
+ config1 = cb710_read_port_32(slot, CB710_MMC_CONFIG_PORT);
+
+ dev_dbg(cb710_slot_dev(slot), "interrupt; status: %08X, "
+ "ie: %08X, c2: %08X, c1: %08X\n",
+ status, irqen, config2, config1);
+
+ if (status & (CB710_MMC_S1_CARD_CHANGED << 8)) {
+ /* ack the event */
+ cb710_write_port_8(slot, CB710_MMC_STATUS1_PORT,
+ CB710_MMC_S1_CARD_CHANGED);
+ if ((irqen & CB710_MMC_IE_CISTATUS_MASK)
+ == CB710_MMC_IE_CISTATUS_MASK)
+ mmc_detect_change(mmc, HZ/5);
+ } else {
+ dev_dbg(cb710_slot_dev(slot), "unknown interrupt (test)\n");
+ spin_lock(&reader->irq_lock);
+ __cb710_mmc_enable_irq(slot, 0, CB710_MMC_IE_TEST_MASK);
+ spin_unlock(&reader->irq_lock);
+ }
+
+ return 1;
+}
+
+static void cb710_mmc_finish_request_tasklet(unsigned long data)
+{
+ struct mmc_host *mmc = (void *)data;
+ struct cb710_mmc_reader *reader = mmc_priv(mmc);
+ struct mmc_request *mrq = reader->mrq;
+
+ reader->mrq = NULL;
+ mmc_request_done(mmc, mrq);
+}
+
+static const struct mmc_host_ops cb710_mmc_host = {
+ .request = cb710_mmc_request,
+ .set_ios = cb710_mmc_set_ios,
+ .get_ro = cb710_mmc_get_ro
+};
+
+#ifdef CONFIG_PM
+
+static int cb710_mmc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct cb710_slot *slot = cb710_pdev_to_slot(pdev);
+ struct mmc_host *mmc = cb710_slot_to_mmc(slot);
+ int err;
+
+ err = mmc_suspend_host(mmc, state);
+ if (err)
+ return err;
+
+ cb710_mmc_enable_irq(slot, 0, ~0);
+ return 0;
+}
+
+static int cb710_mmc_resume(struct platform_device *pdev)
+{
+ struct cb710_slot *slot = cb710_pdev_to_slot(pdev);
+ struct mmc_host *mmc = cb710_slot_to_mmc(slot);
+
+ cb710_mmc_enable_irq(slot, 0, ~0);
+
+ return mmc_resume_host(mmc);
+}
+
+#endif /* CONFIG_PM */
+
+static int __devinit cb710_mmc_init(struct platform_device *pdev)
+{
+ struct cb710_slot *slot = cb710_pdev_to_slot(pdev);
+ struct cb710_chip *chip = cb710_slot_to_chip(slot);
+ struct mmc_host *mmc;
+ struct cb710_mmc_reader *reader;
+ int err;
+ u32 val;
+
+ mmc = mmc_alloc_host(sizeof(*reader), cb710_slot_dev(slot));
+ if (!mmc)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, mmc);
+
+ /* harmless (maybe) magic */
+ pci_read_config_dword(chip->pdev, 0x48, &val);
+ val = cb710_src_freq_mhz[(val >> 16) & 0xF];
+ dev_dbg(cb710_slot_dev(slot), "source frequency: %dMHz\n", val);
+ val *= 1000000;
+
+ mmc->ops = &cb710_mmc_host;
+ mmc->f_max = val;
+ mmc->f_min = val >> cb710_clock_divider_log2[CB710_MAX_DIVIDER_IDX];
+ mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34;
+ mmc->caps = MMC_CAP_4_BIT_DATA;
+
+ reader = mmc_priv(mmc);
+
+ tasklet_init(&reader->finish_req_tasklet,
+ cb710_mmc_finish_request_tasklet, (unsigned long)mmc);
+ spin_lock_init(&reader->irq_lock);
+ cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
+
+ cb710_mmc_enable_irq(slot, 0, ~0);
+ cb710_set_irq_handler(slot, cb710_mmc_irq_handler);
+
+ err = mmc_add_host(mmc);
+ if (unlikely(err))
+ goto err_free_mmc;
+
+ dev_dbg(cb710_slot_dev(slot), "mmc_hostname is %s\n",
+ mmc_hostname(mmc));
+
+ cb710_mmc_enable_irq(slot, CB710_MMC_IE_CARD_INSERTION_STATUS, 0);
+
+ return 0;
+
+err_free_mmc:
+ dev_dbg(cb710_slot_dev(slot), "mmc_add_host() failed: %d\n", err);
+
+ mmc_free_host(mmc);
+ return err;
+}
+
+static int __devexit cb710_mmc_exit(struct platform_device *pdev)
+{
+ struct cb710_slot *slot = cb710_pdev_to_slot(pdev);
+ struct mmc_host *mmc = cb710_slot_to_mmc(slot);
+ struct cb710_mmc_reader *reader = mmc_priv(mmc);
+
+ cb710_mmc_enable_irq(slot, 0, CB710_MMC_IE_CARD_INSERTION_STATUS);
+
+ mmc_remove_host(mmc);
+
+ /* IRQs should be disabled now, but let's stay on the safe side */
+ cb710_mmc_enable_irq(slot, 0, ~0);
+ cb710_set_irq_handler(slot, NULL);
+
+ /* clear config ports - just in case */
+ cb710_write_port_32(slot, CB710_MMC_CONFIG_PORT, 0);
+ cb710_write_port_16(slot, CB710_MMC_CONFIGB_PORT, 0);
+
+ tasklet_kill(&reader->finish_req_tasklet);
+
+ mmc_free_host(mmc);
+ return 0;
+}
+
+static struct platform_driver cb710_mmc_driver = {
+ .driver.name = "cb710-mmc",
+ .probe = cb710_mmc_init,
+ .remove = __devexit_p(cb710_mmc_exit),
+#ifdef CONFIG_PM
+ .suspend = cb710_mmc_suspend,
+ .resume = cb710_mmc_resume,
+#endif
+};
+
+static int __init cb710_mmc_init_module(void)
+{
+ return platform_driver_register(&cb710_mmc_driver);
+}
+
+static void __exit cb710_mmc_cleanup_module(void)
+{
+ platform_driver_unregister(&cb710_mmc_driver);
+}
+
+module_init(cb710_mmc_init_module);
+module_exit(cb710_mmc_cleanup_module);
+
+MODULE_AUTHOR("Michał Mirosław <mirq-linux@rere.qmqm.pl>");
+MODULE_DESCRIPTION("ENE CB710 memory card reader driver - MMC/SD part");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:cb710-mmc");
diff --git a/drivers/mmc/host/cb710-mmc.h b/drivers/mmc/host/cb710-mmc.h
new file mode 100644
index 00000000000..e845c776bdd
--- /dev/null
+++ b/drivers/mmc/host/cb710-mmc.h
@@ -0,0 +1,104 @@
+/*
+ * cb710/cb710-mmc.h
+ *
+ * Copyright by Michał Mirosław, 2008-2009
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef LINUX_CB710_MMC_H
+#define LINUX_CB710_MMC_H
+
+#include <linux/cb710.h>
+
+/* per-MMC-reader structure */
+struct cb710_mmc_reader {
+ struct tasklet_struct finish_req_tasklet;
+ struct mmc_request *mrq;
+ spinlock_t irq_lock;
+ unsigned char last_power_mode;
+};
+
+/* some device struct walking */
+
+static inline struct mmc_host *cb710_slot_to_mmc(struct cb710_slot *slot)
+{
+ return dev_get_drvdata(&slot->pdev.dev);
+}
+
+static inline struct cb710_slot *cb710_mmc_to_slot(struct mmc_host *mmc)
+{
+ struct platform_device *pdev = container_of(mmc_dev(mmc),
+ struct platform_device, dev);
+ return cb710_pdev_to_slot(pdev);
+}
+
+/* registers (this might be all wrong ;) */
+
+#define CB710_MMC_DATA_PORT 0x00
+
+#define CB710_MMC_CONFIG_PORT 0x04
+#define CB710_MMC_CONFIG0_PORT 0x04
+#define CB710_MMC_CONFIG1_PORT 0x05
+#define CB710_MMC_C1_4BIT_DATA_BUS 0x40
+#define CB710_MMC_CONFIG2_PORT 0x06
+#define CB710_MMC_C2_READ_PIO_SIZE_MASK 0x0F /* N-1 */
+#define CB710_MMC_CONFIG3_PORT 0x07
+
+#define CB710_MMC_CONFIGB_PORT 0x08
+
+#define CB710_MMC_IRQ_ENABLE_PORT 0x0C
+#define CB710_MMC_IE_TEST_MASK 0x00BF
+#define CB710_MMC_IE_CARD_INSERTION_STATUS 0x1000
+#define CB710_MMC_IE_IRQ_ENABLE 0x8000
+#define CB710_MMC_IE_CISTATUS_MASK \
+ (CB710_MMC_IE_CARD_INSERTION_STATUS|CB710_MMC_IE_IRQ_ENABLE)
+
+#define CB710_MMC_STATUS_PORT 0x10
+#define CB710_MMC_STATUS_ERROR_EVENTS 0x60FF
+#define CB710_MMC_STATUS0_PORT 0x10
+#define CB710_MMC_S0_FIFO_UNDERFLOW 0x40
+#define CB710_MMC_STATUS1_PORT 0x11
+#define CB710_MMC_S1_COMMAND_SENT 0x01
+#define CB710_MMC_S1_DATA_TRANSFER_DONE 0x02
+#define CB710_MMC_S1_PIO_TRANSFER_DONE 0x04
+#define CB710_MMC_S1_CARD_CHANGED 0x10
+#define CB710_MMC_S1_RESET 0x20
+#define CB710_MMC_STATUS2_PORT 0x12
+#define CB710_MMC_S2_FIFO_READY 0x01
+#define CB710_MMC_S2_FIFO_EMPTY 0x02
+#define CB710_MMC_S2_BUSY_10 0x10
+#define CB710_MMC_S2_BUSY_20 0x20
+#define CB710_MMC_STATUS3_PORT 0x13
+#define CB710_MMC_S3_CARD_DETECTED 0x02
+#define CB710_MMC_S3_WRITE_PROTECTED 0x04
+
+#define CB710_MMC_CMD_TYPE_PORT 0x14
+#define CB710_MMC_RSP_TYPE_MASK 0x0007
+#define CB710_MMC_RSP_R1 (0)
+#define CB710_MMC_RSP_136 (5)
+#define CB710_MMC_RSP_NO_CRC (2)
+#define CB710_MMC_RSP_PRESENT_MASK 0x0018
+#define CB710_MMC_RSP_NONE (0 << 3)
+#define CB710_MMC_RSP_PRESENT (1 << 3)
+#define CB710_MMC_RSP_PRESENT_X (2 << 3)
+#define CB710_MMC_CMD_TYPE_MASK 0x0060
+#define CB710_MMC_CMD_BC (0 << 5)
+#define CB710_MMC_CMD_BCR (1 << 5)
+#define CB710_MMC_CMD_AC (2 << 5)
+#define CB710_MMC_CMD_ADTC (3 << 5)
+#define CB710_MMC_DATA_READ 0x0080
+#define CB710_MMC_CMD_CODE_MASK 0x3F00
+#define CB710_MMC_CMD_CODE_SHIFT 8
+#define CB710_MMC_IS_APP_CMD 0x4000
+#define CB710_MMC_RSP_BUSY 0x8000
+
+#define CB710_MMC_CMD_PARAM_PORT 0x18
+#define CB710_MMC_TRANSFER_SIZE_PORT 0x1C
+#define CB710_MMC_RESPONSE0_PORT 0x20
+#define CB710_MMC_RESPONSE1_PORT 0x24
+#define CB710_MMC_RESPONSE2_PORT 0x28
+#define CB710_MMC_RESPONSE3_PORT 0x2C
+
+#endif /* LINUX_CB710_MMC_H */
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index f48349d18c9..240608cc7ae 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -97,6 +97,14 @@
*/
#define r1b_timeout (HZ * 3)
+/* One of the critical speed parameters is the amount of data which may
+ * be transfered in one command. If this value is too low, the SD card
+ * controller has to do multiple partial block writes (argggh!). With
+ * today (2008) SD cards there is little speed gain if we transfer more
+ * than 64 KBytes at a time. So use this value until there is any indication
+ * that we should do more here.
+ */
+#define MMC_SPI_BLOCKSATONCE 128
/****************************************************************************/
@@ -327,15 +335,16 @@ checkstatus:
/* Status byte: the entire seven-bit R1 response. */
if (cmd->resp[0] != 0) {
- if ((R1_SPI_PARAMETER | R1_SPI_ADDRESS
- | R1_SPI_ILLEGAL_COMMAND)
+ if ((R1_SPI_PARAMETER | R1_SPI_ADDRESS)
& cmd->resp[0])
- value = -EINVAL;
+ value = -EFAULT; /* Bad address */
+ else if (R1_SPI_ILLEGAL_COMMAND & cmd->resp[0])
+ value = -ENOSYS; /* Function not implemented */
else if (R1_SPI_COM_CRC & cmd->resp[0])
- value = -EILSEQ;
+ value = -EILSEQ; /* Illegal byte sequence */
else if ((R1_SPI_ERASE_SEQ | R1_SPI_ERASE_RESET)
& cmd->resp[0])
- value = -EIO;
+ value = -EIO; /* I/O error */
/* else R1_SPI_IDLE, "it's resetting" */
}
@@ -1366,6 +1375,10 @@ static int mmc_spi_probe(struct spi_device *spi)
mmc->ops = &mmc_spi_ops;
mmc->max_blk_size = MMC_SPI_BLOCKSIZE;
+ mmc->max_hw_segs = MMC_SPI_BLOCKSATONCE;
+ mmc->max_phys_segs = MMC_SPI_BLOCKSATONCE;
+ mmc->max_req_size = MMC_SPI_BLOCKSATONCE * MMC_SPI_BLOCKSIZE;
+ mmc->max_blk_count = MMC_SPI_BLOCKSATONCE;
mmc->caps = MMC_CAP_SPI;
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 36875dcfa49..e1aa8471ab1 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -490,7 +490,7 @@ static void mmci_check_status(unsigned long data)
mod_timer(&host->timer, jiffies + HZ);
}
-static int __devinit mmci_probe(struct amba_device *dev, void *id)
+static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
{
struct mmc_platform_data *plat = dev->dev.platform_data;
struct mmci_host *host;
@@ -546,7 +546,7 @@ static int __devinit mmci_probe(struct amba_device *dev, void *id)
host->mclk = clk_get_rate(host->clk);
DBG(host, "eventual mclk rate: %u Hz\n", host->mclk);
}
- host->base = ioremap(dev->res.start, SZ_4K);
+ host->base = ioremap(dev->res.start, resource_size(&dev->res));
if (!host->base) {
ret = -ENOMEM;
goto clk_disable;
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index c643d0fe118..b56d72ff06e 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -64,6 +64,31 @@ static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data)
unsigned int tmout;
int tmout_index;
+ /*
+ * Hardware weirdness. The FIFO_EMPTY bit of the HW_STATE
+ * register is sometimes not set before a while when some
+ * "unusual" data block sizes are used (such as with the SWITCH
+ * command), even despite the fact that the XFER_DONE interrupt
+ * was raised. And if another data transfer starts before
+ * this bit comes to good sense (which eventually happens by
+ * itself) then the new transfer simply fails with a timeout.
+ */
+ if (!(mvsd_read(MVSD_HW_STATE) & (1 << 13))) {
+ unsigned long t = jiffies + HZ;
+ unsigned int hw_state, count = 0;
+ do {
+ if (time_after(jiffies, t)) {
+ dev_warn(host->dev, "FIFO_EMPTY bit missing\n");
+ break;
+ }
+ hw_state = mvsd_read(MVSD_HW_STATE);
+ count++;
+ } while (!(hw_state & (1 << 13)));
+ dev_dbg(host->dev, "*** wait for FIFO_EMPTY bit "
+ "(hw=0x%04x, count=%d, jiffies=%ld)\n",
+ hw_state, count, jiffies - (t - HZ));
+ }
+
/* If timeout=0 then maximum timeout index is used. */
tmout = DIV_ROUND_UP(data->timeout_ns, host->ns_per_clk);
tmout += data->timeout_clks;
@@ -620,9 +645,18 @@ static void mvsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (ios->bus_width == MMC_BUS_WIDTH_4)
ctrl_reg |= MVSD_HOST_CTRL_DATA_WIDTH_4_BITS;
+ /*
+ * The HI_SPEED_EN bit is causing trouble with many (but not all)
+ * high speed SD, SDHC and SDIO cards. Not enabling that bit
+ * makes all cards work. So let's just ignore that bit for now
+ * and revisit this issue if problems for not enabling this bit
+ * are ever reported.
+ */
+#if 0
if (ios->timing == MMC_TIMING_MMC_HS ||
ios->timing == MMC_TIMING_SD_HS)
ctrl_reg |= MVSD_HOST_CTRL_HI_SPEED_EN;
+#endif
host->ctrl = ctrl_reg;
mvsd_write(MVSD_HOST_CTRL, ctrl_reg);
@@ -882,3 +916,4 @@ module_param(nodma, int, 0);
MODULE_AUTHOR("Maen Suleiman, Nicolas Pitre");
MODULE_DESCRIPTION("Marvell MMC,SD,SDIO Host Controller driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mvsdio");
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index b4a615c55f2..bc14bb1b057 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -140,6 +140,8 @@ struct mxcmci_host {
struct work_struct datawork;
};
+static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
+
static inline int mxcmci_use_dma(struct mxcmci_host *host)
{
return host->do_dma;
@@ -160,7 +162,7 @@ static void mxcmci_softreset(struct mxcmci_host *host)
writew(0xff, host->base + MMC_REG_RES_TO);
}
-static void mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
+static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
{
unsigned int nob = data->blocks;
unsigned int blksz = data->blksz;
@@ -168,6 +170,7 @@ static void mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
#ifdef HAS_DMA
struct scatterlist *sg;
int i;
+ int ret;
#endif
if (data->flags & MMC_DATA_STREAM)
nob = 0xffff;
@@ -183,7 +186,7 @@ static void mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
for_each_sg(data->sg, sg, data->sg_len, i) {
if (sg->offset & 3 || sg->length & 3) {
host->do_dma = 0;
- return;
+ return 0;
}
}
@@ -192,23 +195,30 @@ static void mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
data->sg_len, host->dma_dir);
- imx_dma_setup_sg(host->dma, data->sg, host->dma_nents, datasize,
- host->res->start + MMC_REG_BUFFER_ACCESS,
- DMA_MODE_READ);
+ ret = imx_dma_setup_sg(host->dma, data->sg, host->dma_nents,
+ datasize,
+ host->res->start + MMC_REG_BUFFER_ACCESS,
+ DMA_MODE_READ);
} else {
host->dma_dir = DMA_TO_DEVICE;
host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
data->sg_len, host->dma_dir);
- imx_dma_setup_sg(host->dma, data->sg, host->dma_nents, datasize,
- host->res->start + MMC_REG_BUFFER_ACCESS,
- DMA_MODE_WRITE);
+ ret = imx_dma_setup_sg(host->dma, data->sg, host->dma_nents,
+ datasize,
+ host->res->start + MMC_REG_BUFFER_ACCESS,
+ DMA_MODE_WRITE);
}
+ if (ret) {
+ dev_err(mmc_dev(host->mmc), "failed to setup DMA : %d\n", ret);
+ return ret;
+ }
wmb();
imx_dma_enable(host->dma);
#endif /* HAS_DMA */
+ return 0;
}
static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
@@ -345,8 +355,11 @@ static int mxcmci_poll_status(struct mxcmci_host *host, u32 mask)
stat = readl(host->base + MMC_REG_STATUS);
if (stat & STATUS_ERR_MASK)
return stat;
- if (time_after(jiffies, timeout))
+ if (time_after(jiffies, timeout)) {
+ mxcmci_softreset(host);
+ mxcmci_set_clk_rate(host, host->clock);
return STATUS_TIME_OUT_READ;
+ }
if (stat & mask)
return 0;
cpu_relax();
@@ -531,6 +544,7 @@ static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)
{
struct mxcmci_host *host = mmc_priv(mmc);
unsigned int cmdat = host->cmdat;
+ int error;
WARN_ON(host->req != NULL);
@@ -540,7 +554,12 @@ static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)
host->do_dma = 1;
#endif
if (req->data) {
- mxcmci_setup_data(host, req->data);
+ error = mxcmci_setup_data(host, req->data);
+ if (error) {
+ req->cmd->error = error;
+ goto out;
+ }
+
cmdat |= CMD_DAT_CONT_DATA_ENABLE;
@@ -548,7 +567,9 @@ static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)
cmdat |= CMD_DAT_CONT_WRITE;
}
- if (mxcmci_start_cmd(host, req->cmd, cmdat))
+ error = mxcmci_start_cmd(host, req->cmd, cmdat);
+out:
+ if (error)
mxcmci_finish_request(host, req);
}
@@ -724,7 +745,7 @@ static int mxcmci_probe(struct platform_device *pdev)
goto out_clk_put;
}
- mmc->f_min = clk_get_rate(host->clk) >> 7;
+ mmc->f_min = clk_get_rate(host->clk) >> 16;
mmc->f_max = clk_get_rate(host->clk) >> 1;
/* recommended in data sheet */
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index bfa25c01c87..e7a331de573 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -822,7 +822,7 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
del_timer(&host->cmd_abort_timer);
host->abort = 1;
OMAP_MMC_WRITE(host, IE, 0);
- disable_irq(host->irq);
+ disable_irq_nosync(host->irq);
schedule_work(&host->cmd_abort_work);
return IRQ_HANDLED;
}
@@ -1593,7 +1593,6 @@ static int mmc_omap_resume(struct platform_device *pdev)
#endif
static struct platform_driver mmc_omap_driver = {
- .probe = mmc_omap_probe,
.remove = mmc_omap_remove,
.suspend = mmc_omap_suspend,
.resume = mmc_omap_resume,
@@ -1605,7 +1604,7 @@ static struct platform_driver mmc_omap_driver = {
static int __init mmc_omap_init(void)
{
- return platform_driver_register(&mmc_omap_driver);
+ return platform_driver_probe(&mmc_omap_driver, mmc_omap_probe);
}
static void __exit mmc_omap_exit(void)
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index e62a22a7f00..1cf9cfb3b64 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -680,7 +680,7 @@ static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
host->dma_ch = -1;
/*
* DMA Callback: run in interrupt context.
- * mutex_unlock will through a kernel warning if used.
+ * mutex_unlock will throw a kernel warning if used.
*/
up(&host->sem);
}
@@ -1073,7 +1073,6 @@ static int __init omap_mmc_probe(struct platform_device *pdev)
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;
- mmc->ocr_avail = mmc_slot(host).ocr_mask;
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED;
if (pdata->slots[host->slot_id].wires >= 8)
@@ -1110,13 +1109,14 @@ static int __init omap_mmc_probe(struct platform_device *pdev)
goto err_irq;
}
+ /* initialize power supplies, gpios, etc */
if (pdata->init != NULL) {
if (pdata->init(&pdev->dev) != 0) {
- dev_dbg(mmc_dev(host->mmc),
- "Unable to configure MMC IRQs\n");
+ dev_dbg(mmc_dev(host->mmc), "late init error\n");
goto err_irq_cd_init;
}
}
+ mmc->ocr_avail = mmc_slot(host).ocr_mask;
/* Request IRQ for card detect */
if ((mmc_slot(host).card_detect_irq)) {
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 430095725f9..d7d7109ef47 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -27,6 +27,7 @@
#include <linux/err.h>
#include <linux/mmc/host.h>
#include <linux/io.h>
+#include <linux/regulator/consumer.h>
#include <asm/sizes.h>
@@ -67,8 +68,42 @@ struct pxamci_host {
unsigned int dma_dir;
unsigned int dma_drcmrrx;
unsigned int dma_drcmrtx;
+
+ struct regulator *vcc;
};
+static inline void pxamci_init_ocr(struct pxamci_host *host)
+{
+#ifdef CONFIG_REGULATOR
+ host->vcc = regulator_get(mmc_dev(host->mmc), "vmmc");
+
+ if (IS_ERR(host->vcc))
+ host->vcc = NULL;
+ else {
+ host->mmc->ocr_avail = mmc_regulator_get_ocrmask(host->vcc);
+ if (host->pdata && host->pdata->ocr_mask)
+ dev_warn(mmc_dev(host->mmc),
+ "ocr_mask/setpower will not be used\n");
+ }
+#endif
+ if (host->vcc == NULL) {
+ /* fall-back to platform data */
+ host->mmc->ocr_avail = host->pdata ?
+ host->pdata->ocr_mask :
+ MMC_VDD_32_33 | MMC_VDD_33_34;
+ }
+}
+
+static inline void pxamci_set_power(struct pxamci_host *host, unsigned int vdd)
+{
+#ifdef CONFIG_REGULATOR
+ if (host->vcc)
+ mmc_regulator_set_ocr(host->vcc, vdd);
+#endif
+ if (!host->vcc && host->pdata && host->pdata->setpower)
+ host->pdata->setpower(mmc_dev(host->mmc), vdd);
+}
+
static void pxamci_stop_clock(struct pxamci_host *host)
{
if (readl(host->base + MMC_STAT) & STAT_CLK_EN) {
@@ -438,8 +473,7 @@ static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (host->power_mode != ios->power_mode) {
host->power_mode = ios->power_mode;
- if (host->pdata && host->pdata->setpower)
- host->pdata->setpower(mmc_dev(mmc), ios->vdd);
+ pxamci_set_power(host, ios->vdd);
if (ios->power_mode == MMC_POWER_ON)
host->cmdat |= CMDAT_INIT;
@@ -562,9 +596,8 @@ static int pxamci_probe(struct platform_device *pdev)
mmc->f_max = (cpu_is_pxa300() || cpu_is_pxa310()) ? 26000000
: host->clkrate;
- mmc->ocr_avail = host->pdata ?
- host->pdata->ocr_mask :
- MMC_VDD_32_33|MMC_VDD_33_34;
+ pxamci_init_ocr(host);
+
mmc->caps = 0;
host->cmdat = 0;
if (!cpu_is_pxa25x()) {
@@ -661,6 +694,9 @@ static int pxamci_remove(struct platform_device *pdev)
if (mmc) {
struct pxamci_host *host = mmc_priv(mmc);
+ if (host->vcc)
+ regulator_put(host->vcc);
+
if (host->pdata && host->pdata->exit)
host->pdata->exit(&pdev->dev, mmc);
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 2db166b7096..4eb4f37544a 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -17,6 +17,7 @@
#include <linux/mmc/host.h>
#include <linux/platform_device.h>
#include <linux/cpufreq.h>
+#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/io.h>
@@ -789,7 +790,7 @@ static void s3cmci_dma_setup(struct s3cmci_host *host,
last_source = source;
- s3c2410_dma_devconfig(host->dma, source, 3,
+ s3c2410_dma_devconfig(host->dma, source,
host->mem->start + host->sdidata);
if (!setup_ok) {
@@ -1121,7 +1122,7 @@ static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
case MMC_POWER_OFF:
default:
s3c2410_gpio_setpin(S3C2410_GPE5, 0);
- s3c2410_gpio_cfgpin(S3C2410_GPE5, S3C2410_GPE5_OUTP);
+ s3c2410_gpio_cfgpin(S3C2410_GPE5, S3C2410_GPIO_OUTPUT);
if (host->is2440)
mci_con |= S3C2440_SDICON_SDRESET;
diff --git a/drivers/mmc/host/sdhci-of.c b/drivers/mmc/host/sdhci-of.c
index 3ff4ac3abe8..128c614d11a 100644
--- a/drivers/mmc/host/sdhci-of.c
+++ b/drivers/mmc/host/sdhci-of.c
@@ -55,7 +55,13 @@ static u32 esdhc_readl(struct sdhci_host *host, int reg)
static u16 esdhc_readw(struct sdhci_host *host, int reg)
{
- return in_be16(host->ioaddr + (reg ^ 0x2));
+ u16 ret;
+
+ if (unlikely(reg == SDHCI_HOST_VERSION))
+ ret = in_be16(host->ioaddr + reg);
+ else
+ ret = in_be16(host->ioaddr + (reg ^ 0x2));
+ return ret;
}
static u8 esdhc_readb(struct sdhci_host *host, int reg)
@@ -277,6 +283,7 @@ static int __devexit sdhci_of_remove(struct of_device *ofdev)
static const struct of_device_id sdhci_of_match[] = {
{ .compatible = "fsl,mpc8379-esdhc", .data = &sdhci_esdhc, },
{ .compatible = "fsl,mpc8536-esdhc", .data = &sdhci_esdhc, },
+ { .compatible = "fsl,esdhc", .data = &sdhci_esdhc, },
{ .compatible = "generic-sdhci", },
{},
};
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
new file mode 100644
index 00000000000..297f40ae6ad
--- /dev/null
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -0,0 +1,168 @@
+/*
+ * sdhci-pltfm.c Support for SDHCI platform devices
+ * Copyright (c) 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* Supports:
+ * SDHCI platform devices
+ *
+ * Inspired by sdhci-pci.c, by Pierre Ossman
+ */
+
+#include <linux/delay.h>
+#include <linux/highmem.h>
+#include <linux/platform_device.h>
+
+#include <linux/mmc/host.h>
+
+#include <linux/io.h>
+
+#include "sdhci.h"
+
+/*****************************************************************************\
+ * *
+ * SDHCI core callbacks *
+ * *
+\*****************************************************************************/
+
+static struct sdhci_ops sdhci_pltfm_ops = {
+};
+
+/*****************************************************************************\
+ * *
+ * Device probing/removal *
+ * *
+\*****************************************************************************/
+
+static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
+{
+ struct sdhci_host *host;
+ struct resource *iomem;
+ int ret;
+
+ BUG_ON(pdev == NULL);
+
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iomem) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if (resource_size(iomem) != 0x100)
+ dev_err(&pdev->dev, "Invalid iomem size. You may "
+ "experience problems.\n");
+
+ if (pdev->dev.parent)
+ host = sdhci_alloc_host(pdev->dev.parent, 0);
+ else
+ host = sdhci_alloc_host(&pdev->dev, 0);
+
+ if (IS_ERR(host)) {
+ ret = PTR_ERR(host);
+ goto err;
+ }
+
+ host->hw_name = "platform";
+ host->ops = &sdhci_pltfm_ops;
+ host->irq = platform_get_irq(pdev, 0);
+
+ if (!request_mem_region(iomem->start, resource_size(iomem),
+ mmc_hostname(host->mmc))) {
+ dev_err(&pdev->dev, "cannot request region\n");
+ ret = -EBUSY;
+ goto err_request;
+ }
+
+ host->ioaddr = ioremap(iomem->start, resource_size(iomem));
+ if (!host->ioaddr) {
+ dev_err(&pdev->dev, "failed to remap registers\n");
+ ret = -ENOMEM;
+ goto err_remap;
+ }
+
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto err_add_host;
+
+ platform_set_drvdata(pdev, host);
+
+ return 0;
+
+err_add_host:
+ iounmap(host->ioaddr);
+err_remap:
+ release_mem_region(iomem->start, resource_size(iomem));
+err_request:
+ sdhci_free_host(host);
+err:
+ printk(KERN_ERR"Probing of sdhci-pltfm failed: %d\n", ret);
+ return ret;
+}
+
+static int __devexit sdhci_pltfm_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ int dead;
+ u32 scratch;
+
+ dead = 0;
+ scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
+ if (scratch == (u32)-1)
+ dead = 1;
+
+ sdhci_remove_host(host, dead);
+ iounmap(host->ioaddr);
+ release_mem_region(iomem->start, resource_size(iomem));
+ sdhci_free_host(host);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver sdhci_pltfm_driver = {
+ .driver = {
+ .name = "sdhci",
+ .owner = THIS_MODULE,
+ },
+ .probe = sdhci_pltfm_probe,
+ .remove = __devexit_p(sdhci_pltfm_remove),
+};
+
+/*****************************************************************************\
+ * *
+ * Driver init/exit *
+ * *
+\*****************************************************************************/
+
+static int __init sdhci_drv_init(void)
+{
+ return platform_driver_register(&sdhci_pltfm_driver);
+}
+
+static void __exit sdhci_drv_exit(void)
+{
+ platform_driver_unregister(&sdhci_pltfm_driver);
+}
+
+module_init(sdhci_drv_init);
+module_exit(sdhci_drv_exit);
+
+MODULE_DESCRIPTION("Secure Digital Host Controller Interface platform driver");
+MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:sdhci");
+
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 9234be2226e..35789c6edc1 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -78,6 +78,11 @@ static void sdhci_dumpregs(struct sdhci_host *host)
sdhci_readl(host, SDHCI_CAPABILITIES),
sdhci_readl(host, SDHCI_MAX_CURRENT));
+ if (host->flags & SDHCI_USE_ADMA)
+ printk(KERN_DEBUG DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
+ readl(host->ioaddr + SDHCI_ADMA_ERROR),
+ readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
+
printk(KERN_DEBUG DRIVER_NAME ": ===========================================\n");
}
@@ -1005,12 +1010,34 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
{
u8 pwr;
- if (host->power == power)
+ if (power == (unsigned short)-1)
+ pwr = 0;
+ else {
+ switch (1 << power) {
+ case MMC_VDD_165_195:
+ pwr = SDHCI_POWER_180;
+ break;
+ case MMC_VDD_29_30:
+ case MMC_VDD_30_31:
+ pwr = SDHCI_POWER_300;
+ break;
+ case MMC_VDD_32_33:
+ case MMC_VDD_33_34:
+ pwr = SDHCI_POWER_330;
+ break;
+ default:
+ BUG();
+ }
+ }
+
+ if (host->pwr == pwr)
return;
- if (power == (unsigned short)-1) {
+ host->pwr = pwr;
+
+ if (pwr == 0) {
sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
- goto out;
+ return;
}
/*
@@ -1020,35 +1047,16 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
- pwr = SDHCI_POWER_ON;
-
- switch (1 << power) {
- case MMC_VDD_165_195:
- pwr |= SDHCI_POWER_180;
- break;
- case MMC_VDD_29_30:
- case MMC_VDD_30_31:
- pwr |= SDHCI_POWER_300;
- break;
- case MMC_VDD_32_33:
- case MMC_VDD_33_34:
- pwr |= SDHCI_POWER_330;
- break;
- default:
- BUG();
- }
-
/*
* At least the Marvell CaFe chip gets confused if we set the voltage
* and set turn on power at the same time, so set the voltage first.
*/
if ((host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER))
- sdhci_writeb(host, pwr & ~SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
+ sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
- sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+ pwr |= SDHCI_POWER_ON;
-out:
- host->power = power;
+ sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
}
/*****************************************************************************\
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 65c6f996bbd..2de08349c3c 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -255,7 +255,7 @@ struct sdhci_host {
unsigned int timeout_clk; /* Timeout freq (KHz) */
unsigned int clock; /* Current clock (MHz) */
- unsigned short power; /* Current voltage */
+ u8 pwr; /* Current voltage */
struct mmc_request *mrq; /* Current request */
struct mmc_command *cmd; /* Current command */
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 63fbd5b7d31..91991b460c4 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -10,7 +10,7 @@
*
* Driver for the MMC / SD / SDIO cell found in:
*
- * TC6393XB TC6391XB TC6387XB T7L66XB
+ * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
*
* This driver draws mainly on scattered spec sheets, Reverse engineering
* of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
@@ -35,69 +35,47 @@
#include "tmio_mmc.h"
-/*
- * Fixme - documentation conflicts on what the clock values are for the
- * various dividers.
- * One document I have says that its a divisor of a 24MHz clock, another 33.
- * This probably depends on HCLK for a given platform, so we may need to
- * require HCLK be passed to us from the MFD core.
- *
- */
-
static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
{
- void __iomem *cnf = host->cnf;
- void __iomem *ctl = host->ctl;
u32 clk = 0, clock;
if (new_clock) {
- for (clock = 46875, clk = 0x100; new_clock >= (clock<<1); ) {
+ for (clock = host->mmc->f_min, clk = 0x80000080;
+ new_clock >= (clock<<1); clk >>= 1)
clock <<= 1;
- clk >>= 1;
- }
- if (clk & 0x1)
- clk = 0x20000;
-
- clk >>= 2;
- tmio_iowrite8((clk & 0x8000) ? 0 : 1, cnf + CNF_SD_CLK_MODE);
clk |= 0x100;
}
- tmio_iowrite16(clk, ctl + CTL_SD_CARD_CLK_CTL);
+ sd_config_write8(host, CNF_SD_CLK_MODE, clk >> 22);
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff);
}
static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
{
- void __iomem *ctl = host->ctl;
-
- tmio_iowrite16(0x0000, ctl + CTL_CLK_AND_WAIT_CTL);
+ sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
msleep(10);
- tmio_iowrite16(tmio_ioread16(ctl + CTL_SD_CARD_CLK_CTL) & ~0x0100,
- ctl + CTL_SD_CARD_CLK_CTL);
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 &
+ sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
msleep(10);
}
static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
{
- void __iomem *ctl = host->ctl;
-
- tmio_iowrite16(tmio_ioread16(ctl + CTL_SD_CARD_CLK_CTL) | 0x0100,
- ctl + CTL_SD_CARD_CLK_CTL);
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 |
+ sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
msleep(10);
- tmio_iowrite16(0x0100, ctl + CTL_CLK_AND_WAIT_CTL);
+ sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
msleep(10);
}
static void reset(struct tmio_mmc_host *host)
{
- void __iomem *ctl = host->ctl;
-
/* FIXME - should we set stop clock reg here */
- tmio_iowrite16(0x0000, ctl + CTL_RESET_SD);
- tmio_iowrite16(0x0000, ctl + CTL_RESET_SDIO);
+ sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
+ sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
msleep(10);
- tmio_iowrite16(0x0001, ctl + CTL_RESET_SD);
- tmio_iowrite16(0x0001, ctl + CTL_RESET_SDIO);
+ sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
+ sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
msleep(10);
}
@@ -129,13 +107,12 @@ tmio_mmc_finish_request(struct tmio_mmc_host *host)
static int
tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
{
- void __iomem *ctl = host->ctl;
struct mmc_data *data = host->data;
int c = cmd->opcode;
/* Command 12 is handled by hardware */
if (cmd->opcode == 12 && !cmd->arg) {
- tmio_iowrite16(0x001, ctl + CTL_STOP_INTERNAL_ACTION);
+ sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001);
return 0;
}
@@ -160,18 +137,18 @@ tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
if (data) {
c |= DATA_PRESENT;
if (data->blocks > 1) {
- tmio_iowrite16(0x100, ctl + CTL_STOP_INTERNAL_ACTION);
+ sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100);
c |= TRANSFER_MULTI;
}
if (data->flags & MMC_DATA_READ)
c |= TRANSFER_READ;
}
- enable_mmc_irqs(ctl, TMIO_MASK_CMD);
+ enable_mmc_irqs(host, TMIO_MASK_CMD);
/* Fire off the command */
- tmio_iowrite32(cmd->arg, ctl + CTL_ARG_REG);
- tmio_iowrite16(c, ctl + CTL_SD_CMD);
+ sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg);
+ sd_ctrl_write16(host, CTL_SD_CMD, c);
return 0;
}
@@ -183,7 +160,6 @@ tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
*/
static inline void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
{
- void __iomem *ctl = host->ctl;
struct mmc_data *data = host->data;
unsigned short *buf;
unsigned int count;
@@ -206,9 +182,9 @@ static inline void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
/* Transfer the data */
if (data->flags & MMC_DATA_READ)
- tmio_ioread16_rep(ctl + CTL_SD_DATA_PORT, buf, count >> 1);
+ sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
else
- tmio_iowrite16_rep(ctl + CTL_SD_DATA_PORT, buf, count >> 1);
+ sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
host->sg_off += count;
@@ -222,7 +198,6 @@ static inline void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host)
{
- void __iomem *ctl = host->ctl;
struct mmc_data *data = host->data;
struct mmc_command *stop;
@@ -251,13 +226,13 @@ static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host)
*/
if (data->flags & MMC_DATA_READ)
- disable_mmc_irqs(ctl, TMIO_MASK_READOP);
+ disable_mmc_irqs(host, TMIO_MASK_READOP);
else
- disable_mmc_irqs(ctl, TMIO_MASK_WRITEOP);
+ disable_mmc_irqs(host, TMIO_MASK_WRITEOP);
if (stop) {
if (stop->opcode == 12 && !stop->arg)
- tmio_iowrite16(0x000, ctl + CTL_STOP_INTERNAL_ACTION);
+ sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000);
else
BUG();
}
@@ -268,9 +243,8 @@ static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host)
static inline void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
unsigned int stat)
{
- void __iomem *ctl = host->ctl, *addr;
struct mmc_command *cmd = host->cmd;
- int i;
+ int i, addr;
if (!host->cmd) {
pr_debug("Spurious CMD irq\n");
@@ -284,8 +258,8 @@ static inline void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
* modify the order of the response for short response command types.
*/
- for (i = 3, addr = ctl + CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
- cmd->resp[i] = tmio_ioread32(addr);
+ for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
+ cmd->resp[i] = sd_ctrl_read32(host, addr);
if (cmd->flags & MMC_RSP_136) {
cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
@@ -307,9 +281,9 @@ static inline void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
*/
if (host->data && !cmd->error) {
if (host->data->flags & MMC_DATA_READ)
- enable_mmc_irqs(ctl, TMIO_MASK_READOP);
+ enable_mmc_irqs(host, TMIO_MASK_READOP);
else
- enable_mmc_irqs(ctl, TMIO_MASK_WRITEOP);
+ enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
} else {
tmio_mmc_finish_request(host);
}
@@ -321,20 +295,19 @@ static inline void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
static irqreturn_t tmio_mmc_irq(int irq, void *devid)
{
struct tmio_mmc_host *host = devid;
- void __iomem *ctl = host->ctl;
unsigned int ireg, irq_mask, status;
pr_debug("MMC IRQ begin\n");
- status = tmio_ioread32(ctl + CTL_STATUS);
- irq_mask = tmio_ioread32(ctl + CTL_IRQ_MASK);
+ status = sd_ctrl_read32(host, CTL_STATUS);
+ irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
ireg = status & TMIO_MASK_IRQ & ~irq_mask;
pr_debug_status(status);
pr_debug_status(ireg);
if (!ireg) {
- disable_mmc_irqs(ctl, status & ~irq_mask);
+ disable_mmc_irqs(host, status & ~irq_mask);
pr_debug("tmio_mmc: Spurious irq, disabling! "
"0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg);
@@ -346,7 +319,7 @@ static irqreturn_t tmio_mmc_irq(int irq, void *devid)
while (ireg) {
/* Card insert / remove attempts */
if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
- ack_mmc_irqs(ctl, TMIO_STAT_CARD_INSERT |
+ ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
TMIO_STAT_CARD_REMOVE);
mmc_detect_change(host->mmc, 0);
}
@@ -358,25 +331,25 @@ static irqreturn_t tmio_mmc_irq(int irq, void *devid)
/* Command completion */
if (ireg & TMIO_MASK_CMD) {
- ack_mmc_irqs(ctl, TMIO_MASK_CMD);
+ ack_mmc_irqs(host, TMIO_MASK_CMD);
tmio_mmc_cmd_irq(host, status);
}
/* Data transfer */
if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
- ack_mmc_irqs(ctl, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
+ ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
tmio_mmc_pio_irq(host);
}
/* Data transfer completion */
if (ireg & TMIO_STAT_DATAEND) {
- ack_mmc_irqs(ctl, TMIO_STAT_DATAEND);
+ ack_mmc_irqs(host, TMIO_STAT_DATAEND);
tmio_mmc_data_irq(host);
}
/* Check status - keep going until we've handled it all */
- status = tmio_ioread32(ctl + CTL_STATUS);
- irq_mask = tmio_ioread32(ctl + CTL_IRQ_MASK);
+ status = sd_ctrl_read32(host, CTL_STATUS);
+ irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
ireg = status & TMIO_MASK_IRQ & ~irq_mask;
pr_debug("Status at end of loop: %08x\n", status);
@@ -391,8 +364,6 @@ out:
static int tmio_mmc_start_data(struct tmio_mmc_host *host,
struct mmc_data *data)
{
- void __iomem *ctl = host->ctl;
-
pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
data->blksz, data->blocks);
@@ -407,8 +378,8 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
host->data = data;
/* Set transfer length / blocksize */
- tmio_iowrite16(data->blksz, ctl + CTL_SD_XFER_LEN);
- tmio_iowrite16(data->blocks, ctl + CTL_XFER_BLK_COUNT);
+ sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
+ sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
return 0;
}
@@ -449,8 +420,6 @@ fail:
static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
- void __iomem *cnf = host->cnf;
- void __iomem *ctl = host->ctl;
if (ios->clock)
tmio_mmc_set_clock(host, ios->clock);
@@ -458,12 +427,12 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
/* Power sequence - OFF -> ON -> UP */
switch (ios->power_mode) {
case MMC_POWER_OFF: /* power down SD bus */
- tmio_iowrite8(0x00, cnf + CNF_PWR_CTL_2);
+ sd_config_write8(host, CNF_PWR_CTL_2, 0x00);
tmio_mmc_clk_stop(host);
break;
case MMC_POWER_ON: /* power up SD bus */
- tmio_iowrite8(0x02, cnf + CNF_PWR_CTL_2);
+ sd_config_write8(host, CNF_PWR_CTL_2, 0x02);
break;
case MMC_POWER_UP: /* start bus clock */
tmio_mmc_clk_start(host);
@@ -472,10 +441,10 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
switch (ios->bus_width) {
case MMC_BUS_WIDTH_1:
- tmio_iowrite16(0x80e0, ctl + CTL_SD_MEM_CARD_OPT);
+ sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0);
break;
case MMC_BUS_WIDTH_4:
- tmio_iowrite16(0x00e0, ctl + CTL_SD_MEM_CARD_OPT);
+ sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0);
break;
}
@@ -486,9 +455,8 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
static int tmio_mmc_get_ro(struct mmc_host *mmc)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
- void __iomem *ctl = host->ctl;
- return (tmio_ioread16(ctl + CTL_STATUS) & TMIO_STAT_WRPROTECT) ? 0 : 1;
+ return (sd_ctrl_read16(host, CTL_STATUS) & TMIO_STAT_WRPROTECT) ? 0 : 1;
}
static struct mmc_host_ops tmio_mmc_ops = {
@@ -518,13 +486,8 @@ static int tmio_mmc_resume(struct platform_device *dev)
struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
struct mmc_host *mmc = platform_get_drvdata(dev);
struct tmio_mmc_host *host = mmc_priv(mmc);
- void __iomem *cnf = host->cnf;
int ret = 0;
- /* Enable the MMC/SD Control registers */
- tmio_iowrite16(SDCREN, cnf + CNF_CMD);
- tmio_iowrite32(dev->resource[0].start & 0xfffe, cnf + CNF_CTL_BASE);
-
/* Tell the MFD core we are ready to be enabled */
if (cell->enable) {
ret = cell->enable(dev);
@@ -532,6 +495,11 @@ static int tmio_mmc_resume(struct platform_device *dev)
goto out;
}
+ /* Enable the MMC/SD Control registers */
+ sd_config_write16(host, CNF_CMD, SDCREN);
+ sd_config_write32(host, CNF_CTL_BASE,
+ (dev->resource[0].start >> host->bus_shift) & 0xfffe);
+
mmc_resume_host(mmc);
out:
@@ -545,20 +513,25 @@ out:
static int __devinit tmio_mmc_probe(struct platform_device *dev)
{
struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ struct tmio_mmc_data *pdata;
struct resource *res_ctl, *res_cnf;
struct tmio_mmc_host *host;
struct mmc_host *mmc;
- int ret = -ENOMEM;
+ int ret = -EINVAL;
if (dev->num_resources != 3)
goto out;
res_ctl = platform_get_resource(dev, IORESOURCE_MEM, 0);
res_cnf = platform_get_resource(dev, IORESOURCE_MEM, 1);
- if (!res_ctl || !res_cnf) {
- ret = -EINVAL;
+ if (!res_ctl || !res_cnf)
goto out;
- }
+
+ pdata = cell->driver_data;
+ if (!pdata || !pdata->hclk)
+ goto out;
+
+ ret = -ENOMEM;
mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &dev->dev);
if (!mmc)
@@ -568,6 +541,9 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
host->mmc = mmc;
platform_set_drvdata(dev, mmc);
+ /* SD control register space size is 0x200, 0x400 for bus_shift=1 */
+ host->bus_shift = resource_size(res_ctl) >> 10;
+
host->ctl = ioremap(res_ctl->start, resource_size(res_ctl));
if (!host->ctl)
goto host_free;
@@ -578,15 +554,10 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
mmc->ops = &tmio_mmc_ops;
mmc->caps = MMC_CAP_4_BIT_DATA;
- mmc->f_min = 46875; /* 24000000 / 512 */
- mmc->f_max = 24000000;
+ mmc->f_max = pdata->hclk;
+ mmc->f_min = mmc->f_max / 512;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
- /* Enable the MMC/SD Control registers */
- tmio_iowrite16(SDCREN, host->cnf + CNF_CMD);
- tmio_iowrite32(dev->resource[0].start & 0xfffe,
- host->cnf + CNF_CTL_BASE);
-
/* Tell the MFD core we are ready to be enabled */
if (cell->enable) {
ret = cell->enable(dev);
@@ -594,14 +565,19 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
goto unmap_cnf;
}
+ /* Enable the MMC/SD Control registers */
+ sd_config_write16(host, CNF_CMD, SDCREN);
+ sd_config_write32(host, CNF_CTL_BASE,
+ (dev->resource[0].start >> host->bus_shift) & 0xfffe);
+
/* Disable SD power during suspend */
- tmio_iowrite8(0x01, host->cnf + CNF_PWR_CTL_3);
+ sd_config_write8(host, CNF_PWR_CTL_3, 0x01);
/* The below is required but why? FIXME */
- tmio_iowrite8(0x1f, host->cnf + CNF_STOP_CLK_CTL);
+ sd_config_write8(host, CNF_STOP_CLK_CTL, 0x1f);
/* Power down SD bus*/
- tmio_iowrite8(0x0, host->cnf + CNF_PWR_CTL_2);
+ sd_config_write8(host, CNF_PWR_CTL_2, 0x00);
tmio_mmc_clk_stop(host);
reset(host);
@@ -612,22 +588,20 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
else
goto unmap_cnf;
- disable_mmc_irqs(host->ctl, TMIO_MASK_ALL);
+ disable_mmc_irqs(host, TMIO_MASK_ALL);
- ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED, "tmio-mmc",
- host);
+ ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED |
+ IRQF_TRIGGER_FALLING, "tmio-mmc", host);
if (ret)
goto unmap_cnf;
- set_irq_type(host->irq, IRQ_TYPE_EDGE_FALLING);
-
mmc_add_host(mmc);
printk(KERN_INFO "%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc),
(unsigned long)host->ctl, host->irq);
/* Unmask the IRQs we want to know about */
- enable_mmc_irqs(host->ctl, TMIO_MASK_IRQ);
+ enable_mmc_irqs(host, TMIO_MASK_IRQ);
return 0;
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 9c831ab2ece..9fa99859497 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -83,34 +83,36 @@
TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
#define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)
-#define enable_mmc_irqs(ctl, i) \
+
+#define enable_mmc_irqs(host, i) \
do { \
u32 mask;\
- mask = tmio_ioread32((ctl) + CTL_IRQ_MASK); \
+ mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
mask &= ~((i) & TMIO_MASK_IRQ); \
- tmio_iowrite32(mask, (ctl) + CTL_IRQ_MASK); \
+ sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
} while (0)
-#define disable_mmc_irqs(ctl, i) \
+#define disable_mmc_irqs(host, i) \
do { \
u32 mask;\
- mask = tmio_ioread32((ctl) + CTL_IRQ_MASK); \
+ mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
mask |= ((i) & TMIO_MASK_IRQ); \
- tmio_iowrite32(mask, (ctl) + CTL_IRQ_MASK); \
+ sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
} while (0)
-#define ack_mmc_irqs(ctl, i) \
+#define ack_mmc_irqs(host, i) \
do { \
u32 mask;\
- mask = tmio_ioread32((ctl) + CTL_STATUS); \
+ mask = sd_ctrl_read32((host), CTL_STATUS); \
mask &= ~((i) & TMIO_MASK_IRQ); \
- tmio_iowrite32(mask, (ctl) + CTL_STATUS); \
+ sd_ctrl_write32((host), CTL_STATUS, mask); \
} while (0)
struct tmio_mmc_host {
void __iomem *cnf;
void __iomem *ctl;
+ unsigned long bus_shift;
struct mmc_command *cmd;
struct mmc_request *mrq;
struct mmc_data *data;
@@ -123,6 +125,63 @@ struct tmio_mmc_host {
unsigned int sg_off;
};
+#include <linux/io.h>
+
+static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
+{
+ return readw(host->ctl + (addr << host->bus_shift));
+}
+
+static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
+ u16 *buf, int count)
+{
+ readsw(host->ctl + (addr << host->bus_shift), buf, count);
+}
+
+static inline u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
+{
+ return readw(host->ctl + (addr << host->bus_shift)) |
+ readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
+}
+
+static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr,
+ u16 val)
+{
+ writew(val, host->ctl + (addr << host->bus_shift));
+}
+
+static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
+ u16 *buf, int count)
+{
+ writesw(host->ctl + (addr << host->bus_shift), buf, count);
+}
+
+static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr,
+ u32 val)
+{
+ writew(val, host->ctl + (addr << host->bus_shift));
+ writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
+}
+
+static inline void sd_config_write8(struct tmio_mmc_host *host, int addr,
+ u8 val)
+{
+ writeb(val, host->cnf + (addr << host->bus_shift));
+}
+
+static inline void sd_config_write16(struct tmio_mmc_host *host, int addr,
+ u16 val)
+{
+ writew(val, host->cnf + (addr << host->bus_shift));
+}
+
+static inline void sd_config_write32(struct tmio_mmc_host *host, int addr,
+ u32 val)
+{
+ writew(val, host->cnf + (addr << host->bus_shift));
+ writew(val >> 16, host->cnf + ((addr + 2) << host->bus_shift));
+}
+
#include <linux/scatterlist.h>
#include <linux/blkdev.h>
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 7d04fb9ddca..b8e35a0b4d7 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -154,7 +154,8 @@ config MTD_AFS_PARTS
You will still need the parsing functions to be called by the driver
for your particular device. It won't happen automatically. The
- 'armflash' map driver (CONFIG_MTD_ARMFLASH) does this, for example.
+ 'armflash' map driver (CONFIG_MTD_ARM_INTEGRATOR) does this, for
+ example.
config MTD_OF_PARTS
tristate "Flash partition map based on OF description"
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 6fde0a2e356..325fab92a62 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -49,7 +49,7 @@ config MTD_MS02NV
If you want to compile this driver as a module ( = code which can be
inserted in and removed from the running kernel whenever you want),
say M here and read <file:Documentation/kbuild/modules.txt>.
- The module will be called ms02-nv.ko.
+ The module will be called ms02-nv.
config MTD_DATAFLASH
tristate "Support for AT45xxx DataFlash"
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 62dee54af0a..43976aa4dbb 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -178,7 +178,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
/* Calculate flash page address; use block erase (for speed) if
* we're at a block boundary and need to erase the whole block.
*/
- pageaddr = div_u64(instr->len, priv->page_size);
+ pageaddr = div_u64(instr->addr, priv->page_size);
do_block = (pageaddr & 0x7) == 0 && instr->len >= blocksize;
pageaddr = pageaddr << priv->page_offset;
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index a49a9c8f2cb..aaac3b6800b 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -47,40 +47,41 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
unsigned long block, nsect;
char *buf;
- block = req->sector << 9 >> tr->blkshift;
- nsect = req->current_nr_sectors << 9 >> tr->blkshift;
+ block = blk_rq_pos(req) << 9 >> tr->blkshift;
+ nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
buf = req->buffer;
if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
req->cmd[0] == REQ_LB_OP_DISCARD)
- return !tr->discard(dev, block, nsect);
+ return tr->discard(dev, block, nsect);
if (!blk_fs_request(req))
- return 0;
+ return -EIO;
- if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk))
- return 0;
+ if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
+ get_capacity(req->rq_disk))
+ return -EIO;
switch(rq_data_dir(req)) {
case READ:
for (; nsect > 0; nsect--, block++, buf += tr->blksize)
if (tr->readsect(dev, block, buf))
- return 0;
- return 1;
+ return -EIO;
+ return 0;
case WRITE:
if (!tr->writesect)
- return 0;
+ return -EIO;
for (; nsect > 0; nsect--, block++, buf += tr->blksize)
if (tr->writesect(dev, block, buf))
- return 0;
- return 1;
+ return -EIO;
+ return 0;
default:
printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
- return 0;
+ return -EIO;
}
}
@@ -88,19 +89,18 @@ static int mtd_blktrans_thread(void *arg)
{
struct mtd_blktrans_ops *tr = arg;
struct request_queue *rq = tr->blkcore_priv->rq;
+ struct request *req = NULL;
/* we might get involved when memory gets low, so use PF_MEMALLOC */
current->flags |= PF_MEMALLOC;
spin_lock_irq(rq->queue_lock);
+
while (!kthread_should_stop()) {
- struct request *req;
struct mtd_blktrans_dev *dev;
- int res = 0;
-
- req = elv_next_request(rq);
+ int res;
- if (!req) {
+ if (!req && !(req = blk_fetch_request(rq))) {
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(rq->queue_lock);
schedule();
@@ -119,8 +119,13 @@ static int mtd_blktrans_thread(void *arg)
spin_lock_irq(rq->queue_lock);
- end_request(req, res);
+ if (!__blk_end_request_cur(req, res))
+ req = NULL;
}
+
+ if (req)
+ __blk_end_request_all(req, -EIO);
+
spin_unlock_irq(rq->queue_lock);
return 0;
@@ -373,7 +378,7 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
}
tr->blkcore_priv->rq->queuedata = tr;
- blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize);
+ blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize);
if (tr->discard)
blk_queue_set_discard(tr->blkcore_priv->rq,
blktrans_discard_request);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 890936d0275..f3276897859 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -260,7 +260,7 @@ config MTD_NAND_BASLER_EXCITE
help
This enables the driver for the NAND flash device found on the
Basler eXcite Smart Camera. If built as a module, the driver
- will be named "excite_nandflash.ko".
+ will be named excite_nandflash.
config MTD_NAND_CAFE
tristate "NAND support for OLPC CAFÉ chip"
@@ -282,7 +282,7 @@ config MTD_NAND_CS553X
controller is enabled for NAND, and currently requires that
the controller be in MMIO mode.
- If you say "m", the module will be called "cs553x_nand.ko".
+ If you say "m", the module will be called cs553x_nand.
config MTD_NAND_ATMEL
tristate "Support for NAND Flash / SmartMedia on AT91 and AVR32"
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 0119220de7d..02700f769b8 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -407,16 +407,17 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
}
info->chip.ecc.mode = ecc_mode;
- info->clk = clk_get(&pdev->dev, "AEMIFCLK");
+ info->clk = clk_get(&pdev->dev, "aemif");
if (IS_ERR(info->clk)) {
ret = PTR_ERR(info->clk);
- dev_dbg(&pdev->dev, "unable to get AEMIFCLK, err %d\n", ret);
+ dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
goto err_clk;
}
ret = clk_enable(info->clk);
if (ret < 0) {
- dev_dbg(&pdev->dev, "unable to enable AEMIFCLK, err %d\n", ret);
+ dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n",
+ ret);
goto err_clk_enable;
}
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index f3548d04801..40c26080ecd 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -831,6 +831,7 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
break;
case NAND_CMD_READID:
+ host->col_addr = 0;
send_read_id(host);
break;
@@ -867,6 +868,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
mtd->priv = this;
mtd->owner = THIS_MODULE;
mtd->dev.parent = &pdev->dev;
+ mtd->name = "mxc_nand";
/* 50 us command delay time */
this->chip_delay = 5;
@@ -882,8 +884,10 @@ static int __init mxcnd_probe(struct platform_device *pdev)
this->verify_buf = mxc_nand_verify_buf;
host->clk = clk_get(&pdev->dev, "nfc");
- if (IS_ERR(host->clk))
+ if (IS_ERR(host->clk)) {
+ err = PTR_ERR(host->clk);
goto eclk;
+ }
clk_enable(host->clk);
host->clk_act = 1;
@@ -896,7 +900,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
host->regs = ioremap(res->start, res->end - res->start + 1);
if (!host->regs) {
- err = -EIO;
+ err = -ENOMEM;
goto eres;
}
@@ -1011,30 +1015,35 @@ static int __devexit mxcnd_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int mxcnd_suspend(struct platform_device *pdev, pm_message_t state)
{
- struct mtd_info *info = platform_get_drvdata(pdev);
+ struct mtd_info *mtd = platform_get_drvdata(pdev);
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
int ret = 0;
DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND suspend\n");
- if (info)
- ret = info->suspend(info);
-
- /* Disable the NFC clock */
- clk_disable(nfc_clk); /* FIXME */
+ if (mtd) {
+ ret = mtd->suspend(mtd);
+ /* Disable the NFC clock */
+ clk_disable(host->clk);
+ }
return ret;
}
static int mxcnd_resume(struct platform_device *pdev)
{
- struct mtd_info *info = platform_get_drvdata(pdev);
+ struct mtd_info *mtd = platform_get_drvdata(pdev);
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
int ret = 0;
DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND resume\n");
- /* Enable the NFC clock */
- clk_enable(nfc_clk); /* FIXME */
- if (info)
- info->resume(info);
+ if (mtd) {
+ /* Enable the NFC clock */
+ clk_enable(host->clk);
+ mtd->resume(mtd);
+ }
return ret;
}
@@ -1055,13 +1064,7 @@ static struct platform_driver mxcnd_driver = {
static int __init mxc_nd_init(void)
{
- /* Register the device driver structure. */
- pr_info("MXC MTD nand Driver\n");
- if (platform_driver_probe(&mxcnd_driver, mxcnd_probe) != 0) {
- printk(KERN_ERR "Driver register failed for mxcnd_driver\n");
- return -ENODEV;
- }
- return 0;
+ return platform_driver_probe(&mxcnd_driver, mxcnd_probe);
}
static void __exit mxc_nd_cleanup(void)
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index f2e9de1414d..6391e3dc800 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -39,7 +39,6 @@
#include <mach/gpmc.h>
#include <mach/onenand.h>
#include <mach/gpio.h>
-#include <mach/pm.h>
#include <mach/dma.h>
diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c
index 1c5344aa57c..367bec63620 100644
--- a/drivers/net/3c501.c
+++ b/drivers/net/3c501.c
@@ -281,7 +281,7 @@ static int __init el1_probe1(struct net_device *dev, int ioaddr)
autoirq = probe_irq_off(irq_mask);
if (autoirq == 0) {
- printk(KERN_WARNING "%s probe at %#x failed to detect IRQ line.\n",
+ pr_warning("%s probe at %#x failed to detect IRQ line.\n",
mname, ioaddr);
release_region(ioaddr, EL1_IO_EXTENT);
return -EAGAIN;
@@ -297,16 +297,16 @@ static int __init el1_probe1(struct net_device *dev, int ioaddr)
if (autoirq)
dev->irq = autoirq;
- printk(KERN_INFO "%s: %s EtherLink at %#lx, using %sIRQ %d.\n",
+ pr_info("%s: %s EtherLink at %#lx, using %sIRQ %d.\n",
dev->name, mname, dev->base_addr,
autoirq ? "auto":"assigned ", dev->irq);
#ifdef CONFIG_IP_MULTICAST
- printk(KERN_WARNING "WARNING: Use of the 3c501 in a multicast kernel is NOT recommended.\n");
+ pr_warning("WARNING: Use of the 3c501 in a multicast kernel is NOT recommended.\n");
#endif
if (el_debug)
- printk(KERN_DEBUG "%s", version);
+ pr_debug("%s", version);
lp = netdev_priv(dev);
memset(lp, 0, sizeof(struct net_local));
@@ -343,7 +343,7 @@ static int el_open(struct net_device *dev)
unsigned long flags;
if (el_debug > 2)
- printk(KERN_DEBUG "%s: Doing el_open()...", dev->name);
+ pr_debug("%s: Doing el_open()...\n", dev->name);
retval = request_irq(dev->irq, &el_interrupt, 0, dev->name, dev);
if (retval)
@@ -374,7 +374,7 @@ static void el_timeout(struct net_device *dev)
int ioaddr = dev->base_addr;
if (el_debug)
- printk(KERN_DEBUG "%s: transmit timed out, txsr %#2x axsr=%02x rxsr=%02x.\n",
+ pr_debug("%s: transmit timed out, txsr %#2x axsr=%02x rxsr=%02x.\n",
dev->name, inb(TX_STATUS),
inb(AX_STATUS), inb(RX_STATUS));
dev->stats.tx_errors++;
@@ -483,14 +483,13 @@ static int el_start_xmit(struct sk_buff *skb, struct net_device *dev)
lp->loading = 0;
dev->trans_start = jiffies;
if (el_debug > 2)
- printk(KERN_DEBUG " queued xmit.\n");
+ pr_debug(" queued xmit.\n");
dev_kfree_skb(skb);
return 0;
}
/* A receive upset our load, despite our best efforts */
if (el_debug > 2)
- printk(KERN_DEBUG "%s: burped during tx load.\n",
- dev->name);
+ pr_debug("%s: burped during tx load.\n", dev->name);
spin_lock_irqsave(&lp->lock, flags);
} while (1);
}
@@ -540,11 +539,10 @@ static irqreturn_t el_interrupt(int irq, void *dev_id)
*/
if (el_debug > 3)
- printk(KERN_DEBUG "%s: el_interrupt() aux=%#02x",
- dev->name, axsr);
+ pr_debug("%s: el_interrupt() aux=%#02x\n", dev->name, axsr);
if (lp->loading == 1 && !lp->txing)
- printk(KERN_WARNING "%s: Inconsistent state loading while not in tx\n",
+ pr_warning("%s: Inconsistent state loading while not in tx\n",
dev->name);
if (lp->txing) {
@@ -555,19 +553,17 @@ static irqreturn_t el_interrupt(int irq, void *dev_id)
int txsr = inb(TX_STATUS);
if (lp->loading == 1) {
- if (el_debug > 2) {
- printk(KERN_DEBUG "%s: Interrupt while loading [",
- dev->name);
- printk(" txsr=%02x gp=%04x rp=%04x]\n",
- txsr, inw(GP_LOW), inw(RX_LOW));
- }
+ if (el_debug > 2)
+ pr_debug("%s: Interrupt while loading [txsr=%02x gp=%04x rp=%04x]\n",
+ dev->name, txsr, inw(GP_LOW), inw(RX_LOW));
+
/* Force a reload */
lp->loading = 2;
spin_unlock(&lp->lock);
goto out;
}
if (el_debug > 6)
- printk(KERN_DEBUG " txsr=%02x gp=%04x rp=%04x",
+ pr_debug("%s: txsr=%02x gp=%04x rp=%04x\n", dev->name,
txsr, inw(GP_LOW), inw(RX_LOW));
if ((axsr & 0x80) && (txsr & TX_READY) == 0) {
@@ -576,7 +572,7 @@ static irqreturn_t el_interrupt(int irq, void *dev_id)
* on trying or reset immediately ?
*/
if (el_debug > 1)
- printk(KERN_DEBUG "%s: Unusual interrupt during Tx, txsr=%02x axsr=%02x gp=%03x rp=%03x.\n",
+ pr_debug("%s: Unusual interrupt during Tx, txsr=%02x axsr=%02x gp=%03x rp=%03x.\n",
dev->name, txsr, axsr,
inw(ioaddr + EL1_DATAPTR),
inw(ioaddr + EL1_RXPTR));
@@ -587,7 +583,7 @@ static irqreturn_t el_interrupt(int irq, void *dev_id)
* Timed out
*/
if (el_debug)
- printk(KERN_DEBUG "%s: Transmit failed 16 times, Ethernet jammed?\n", dev->name);
+ pr_debug("%s: Transmit failed 16 times, Ethernet jammed?\n", dev->name);
outb(AX_SYS, AX_CMD);
lp->txing = 0;
dev->stats.tx_aborted_errors++;
@@ -598,7 +594,7 @@ static irqreturn_t el_interrupt(int irq, void *dev_id)
*/
if (el_debug > 6)
- printk(KERN_DEBUG " retransmitting after a collision.\n");
+ pr_debug("%s: retransmitting after a collision.\n", dev->name);
/*
* Poor little chip can't reset its own start
* pointer
@@ -616,9 +612,8 @@ static irqreturn_t el_interrupt(int irq, void *dev_id)
*/
dev->stats.tx_packets++;
if (el_debug > 6)
- printk(KERN_DEBUG " Tx succeeded %s\n",
- (txsr & TX_RDY) ? "." :
- "but tx is busy!");
+ pr_debug("%s: Tx succeeded %s\n", dev->name,
+ (txsr & TX_RDY) ? "." : "but tx is busy!");
/*
* This is safe the interrupt is atomic WRT itself.
*/
@@ -633,7 +628,8 @@ static irqreturn_t el_interrupt(int irq, void *dev_id)
int rxsr = inb(RX_STATUS);
if (el_debug > 5)
- printk(KERN_DEBUG " rxsr=%02x txsr=%02x rp=%04x", rxsr, inb(TX_STATUS), inw(RX_LOW));
+ pr_debug("%s: rxsr=%02x txsr=%02x rp=%04x\n",
+ dev->name, rxsr, inb(TX_STATUS), inw(RX_LOW));
/*
* Just reading rx_status fixes most errors.
*/
@@ -643,7 +639,7 @@ static irqreturn_t el_interrupt(int irq, void *dev_id)
/* Handled to avoid board lock-up. */
dev->stats.rx_length_errors++;
if (el_debug > 5)
- printk(KERN_DEBUG " runt.\n");
+ pr_debug("%s: runt.\n", dev->name);
} else if (rxsr & RX_GOOD) {
/*
* Receive worked.
@@ -654,12 +650,10 @@ static irqreturn_t el_interrupt(int irq, void *dev_id)
* Nothing? Something is broken!
*/
if (el_debug > 2)
- printk(KERN_DEBUG "%s: No packet seen, rxsr=%02x **resetting 3c501***\n",
+ pr_debug("%s: No packet seen, rxsr=%02x **resetting 3c501***\n",
dev->name, rxsr);
el_reset(dev);
}
- if (el_debug > 3)
- printk(KERN_DEBUG ".\n");
}
/*
@@ -695,11 +689,11 @@ static void el_receive(struct net_device *dev)
pkt_len = inw(RX_LOW);
if (el_debug > 4)
- printk(KERN_DEBUG " el_receive %d.\n", pkt_len);
+ pr_debug(" el_receive %d.\n", pkt_len);
if (pkt_len < 60 || pkt_len > 1536) {
if (el_debug)
- printk(KERN_DEBUG "%s: bogus packet, length=%d\n",
+ pr_debug("%s: bogus packet, length=%d\n",
dev->name, pkt_len);
dev->stats.rx_over_errors++;
return;
@@ -718,8 +712,7 @@ static void el_receive(struct net_device *dev)
outw(0x00, GP_LOW);
if (skb == NULL) {
- printk(KERN_INFO "%s: Memory squeeze, dropping packet.\n",
- dev->name);
+ pr_info("%s: Memory squeeze, dropping packet.\n", dev->name);
dev->stats.rx_dropped++;
return;
} else {
@@ -753,7 +746,7 @@ static void el_reset(struct net_device *dev)
int ioaddr = dev->base_addr;
if (el_debug > 2)
- printk(KERN_INFO "3c501 reset...");
+ pr_info("3c501 reset...\n");
outb(AX_RESET, AX_CMD); /* Reset the chip */
/* Aux control, irq and loopback enabled */
outb(AX_LOOP, AX_CMD);
@@ -787,7 +780,7 @@ static int el1_close(struct net_device *dev)
int ioaddr = dev->base_addr;
if (el_debug > 2)
- printk(KERN_INFO "%s: Shutting down Ethernet card at %#x.\n",
+ pr_info("%s: Shutting down Ethernet card at %#x.\n",
dev->name, ioaddr);
netif_stop_queue(dev);
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
index 4f08bd99583..134638a9759 100644
--- a/drivers/net/3c503.c
+++ b/drivers/net/3c503.c
@@ -234,16 +234,16 @@ el2_probe1(struct net_device *dev, int ioaddr)
}
if (ei_debug && version_printed++ == 0)
- printk(version);
+ pr_debug("%s", version);
dev->base_addr = ioaddr;
- printk("%s: 3c503 at i/o base %#3x, node ", dev->name, ioaddr);
+ pr_info("%s: 3c503 at i/o base %#3x, node ", dev->name, ioaddr);
/* Retrieve and print the ethernet address. */
for (i = 0; i < 6; i++)
dev->dev_addr[i] = inb(ioaddr + i);
- printk("%pM", dev->dev_addr);
+ pr_cont("%pM", dev->dev_addr);
/* Map the 8390 back into the window. */
outb(ECNTRL_THIN, ioaddr + 0x406);
@@ -256,7 +256,8 @@ el2_probe1(struct net_device *dev, int ioaddr)
outb_p(E8390_PAGE0, ioaddr + E8390_CMD);
/* Probe for, turn on and clear the board's shared memory. */
- if (ei_debug > 2) printk(" memory jumpers %2.2x ", membase_reg);
+ if (ei_debug > 2)
+ pr_cont(" memory jumpers %2.2x ", membase_reg);
outb(EGACFR_NORM, ioaddr + 0x405); /* Enable RAM */
/* This should be probed for (or set via an ioctl()) at run-time.
@@ -268,7 +269,7 @@ el2_probe1(struct net_device *dev, int ioaddr)
#else
ei_status.interface_num = dev->mem_end & 0xf;
#endif
- printk(", using %sternal xcvr.\n", ei_status.interface_num == 0 ? "in" : "ex");
+ pr_cont(", using %sternal xcvr.\n", ei_status.interface_num == 0 ? "in" : "ex");
if ((membase_reg & 0xf0) == 0) {
dev->mem_start = 0;
@@ -292,7 +293,7 @@ el2_probe1(struct net_device *dev, int ioaddr)
writel(test_val, mem_base + i);
if (readl(mem_base) != 0xba5eba5e
|| readl(mem_base + i) != test_val) {
- printk("3c503: memory failure or memory address conflict.\n");
+ pr_warning("3c503: memory failure or memory address conflict.\n");
dev->mem_start = 0;
ei_status.name = "3c503-PIO";
iounmap(mem_base);
@@ -344,7 +345,7 @@ el2_probe1(struct net_device *dev, int ioaddr)
if (dev->irq == 2)
dev->irq = 9;
else if (dev->irq > 5 && dev->irq != 9) {
- printk("3c503: configured interrupt %d invalid, will use autoIRQ.\n",
+ pr_warning("3c503: configured interrupt %d invalid, will use autoIRQ.\n",
dev->irq);
dev->irq = 0;
}
@@ -359,7 +360,7 @@ el2_probe1(struct net_device *dev, int ioaddr)
goto out1;
if (dev->mem_start)
- printk("%s: %s - %dkB RAM, 8kB shared mem window at %#6lx-%#6lx.\n",
+ pr_info("%s: %s - %dkB RAM, 8kB shared mem window at %#6lx-%#6lx.\n",
dev->name, ei_status.name, (wordlength+1)<<3,
dev->mem_start, dev->mem_end-1);
@@ -367,7 +368,7 @@ el2_probe1(struct net_device *dev, int ioaddr)
{
ei_status.tx_start_page = EL2_MB1_START_PG;
ei_status.rx_start_page = EL2_MB1_START_PG + TX_PAGES;
- printk("\n%s: %s, %dkB RAM, using programmed I/O (REJUMPER for SHARED MEMORY).\n",
+ pr_info("%s: %s, %dkB RAM, using programmed I/O (REJUMPER for SHARED MEMORY).\n",
dev->name, ei_status.name, (wordlength+1)<<3);
}
release_region(ioaddr + 0x400, 8);
@@ -435,15 +436,16 @@ static void
el2_reset_8390(struct net_device *dev)
{
if (ei_debug > 1) {
- printk("%s: Resetting the 3c503 board...", dev->name);
- printk("%#lx=%#02x %#lx=%#02x %#lx=%#02x...", E33G_IDCFR, inb(E33G_IDCFR),
+ pr_debug("%s: Resetting the 3c503 board...", dev->name);
+ pr_cont(" %#lx=%#02x %#lx=%#02x %#lx=%#02x...", E33G_IDCFR, inb(E33G_IDCFR),
E33G_CNTRL, inb(E33G_CNTRL), E33G_GACFR, inb(E33G_GACFR));
}
outb_p(ECNTRL_RESET|ECNTRL_THIN, E33G_CNTRL);
ei_status.txing = 0;
outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
el2_init_card(dev);
- if (ei_debug > 1) printk("done\n");
+ if (ei_debug > 1)
+ pr_cont("done\n");
}
/* Initialize the 3c503 GA registers after a reset. */
@@ -529,7 +531,7 @@ el2_block_output(struct net_device *dev, int count,
{
if(!boguscount--)
{
- printk("%s: FIFO blocked in el2_block_output.\n", dev->name);
+ pr_notice("%s: FIFO blocked in el2_block_output.\n", dev->name);
el2_reset_8390(dev);
goto blocked;
}
@@ -581,7 +583,7 @@ el2_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_pag
{
if(!boguscount--)
{
- printk("%s: FIFO blocked in el2_get_8390_hdr.\n", dev->name);
+ pr_notice("%s: FIFO blocked in el2_get_8390_hdr.\n", dev->name);
memset(hdr, 0x00, sizeof(struct e8390_pkt_hdr));
el2_reset_8390(dev);
goto blocked;
@@ -645,7 +647,7 @@ el2_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring
{
if(!boguscount--)
{
- printk("%s: FIFO blocked in el2_block_input.\n", dev->name);
+ pr_notice("%s: FIFO blocked in el2_block_input.\n", dev->name);
el2_reset_8390(dev);
goto blocked;
}
@@ -707,7 +709,7 @@ init_module(void)
for (this_dev = 0; this_dev < MAX_EL2_CARDS; this_dev++) {
if (io[this_dev] == 0) {
if (this_dev != 0) break; /* only autoprobe 1st one */
- printk(KERN_NOTICE "3c503.c: Presently autoprobing (not recommended) for a single card.\n");
+ pr_notice("3c503.c: Presently autoprobing (not recommended) for a single card.\n");
}
dev = alloc_eip_netdev();
if (!dev)
@@ -720,7 +722,7 @@ init_module(void)
continue;
}
free_netdev(dev);
- printk(KERN_WARNING "3c503.c: No 3c503 card found (i/o = 0x%x).\n", io[this_dev]);
+ pr_warning("3c503.c: No 3c503 card found (i/o = 0x%x).\n", io[this_dev]);
break;
}
if (found)
diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c
index 2de1c9cd7bd..f71b3540275 100644
--- a/drivers/net/3c505.c
+++ b/drivers/net/3c505.c
@@ -126,26 +126,25 @@
*
*********************************************************/
-static const char filename[] = __FILE__;
+#define filename __FILE__
-static const char timeout_msg[] = "*** timeout at %s:%s (line %d) ***\n";
+#define timeout_msg "*** timeout at %s:%s (line %d) ***\n"
#define TIMEOUT_MSG(lineno) \
- printk(timeout_msg, filename,__func__,(lineno))
+ pr_notice(timeout_msg, filename, __func__, (lineno))
-static const char invalid_pcb_msg[] =
-"*** invalid pcb length %d at %s:%s (line %d) ***\n";
+#define invalid_pcb_msg "*** invalid pcb length %d at %s:%s (line %d) ***\n"
#define INVALID_PCB_MSG(len) \
- printk(invalid_pcb_msg, (len),filename,__func__,__LINE__)
+ pr_notice(invalid_pcb_msg, (len), filename, __func__, __LINE__)
-static char search_msg[] __initdata = KERN_INFO "%s: Looking for 3c505 adapter at address %#x...";
+#define search_msg "%s: Looking for 3c505 adapter at address %#x..."
-static char stilllooking_msg[] __initdata = "still looking...";
+#define stilllooking_msg "still looking..."
-static char found_msg[] __initdata = "found.\n";
+#define found_msg "found.\n"
-static char notfound_msg[] __initdata = "not found (reason = %d)\n";
+#define notfound_msg "not found (reason = %d)\n"
-static char couldnot_msg[] __initdata = KERN_INFO "%s: 3c505 not found\n";
+#define couldnot_msg "%s: 3c505 not found\n"
/*********************************************************
*
@@ -284,7 +283,7 @@ static inline void adapter_reset(struct net_device *dev)
outb_control(orig_hcr, dev);
if (!start_receive(dev, &adapter->tx_pcb))
- printk(KERN_ERR "%s: start receive command failed \n", dev->name);
+ pr_err("%s: start receive command failed\n", dev->name);
}
/* Check to make sure that a DMA transfer hasn't timed out. This should
@@ -296,7 +295,9 @@ static inline void check_3c505_dma(struct net_device *dev)
elp_device *adapter = netdev_priv(dev);
if (adapter->dmaing && time_after(jiffies, adapter->current_dma.start_time + 10)) {
unsigned long flags, f;
- printk(KERN_ERR "%s: DMA %s timed out, %d bytes left\n", dev->name, adapter->current_dma.direction ? "download" : "upload", get_dma_residue(dev->dma));
+ pr_err("%s: DMA %s timed out, %d bytes left\n", dev->name,
+ adapter->current_dma.direction ? "download" : "upload",
+ get_dma_residue(dev->dma));
spin_lock_irqsave(&adapter->lock, flags);
adapter->dmaing = 0;
adapter->busy = 0;
@@ -321,7 +322,7 @@ static inline bool send_pcb_slow(unsigned int base_addr, unsigned char byte)
if (inb_status(base_addr) & HCRE)
return false;
}
- printk(KERN_WARNING "3c505: send_pcb_slow timed out\n");
+ pr_warning("3c505: send_pcb_slow timed out\n");
return true;
}
@@ -333,7 +334,7 @@ static inline bool send_pcb_fast(unsigned int base_addr, unsigned char byte)
if (inb_status(base_addr) & HCRE)
return false;
}
- printk(KERN_WARNING "3c505: send_pcb_fast timed out\n");
+ pr_warning("3c505: send_pcb_fast timed out\n");
return true;
}
@@ -386,7 +387,7 @@ static bool send_pcb(struct net_device *dev, pcb_struct * pcb)
/* Avoid contention */
if (test_and_set_bit(1, &adapter->send_pcb_semaphore)) {
if (elp_debug >= 3) {
- printk(KERN_DEBUG "%s: send_pcb entered while threaded\n", dev->name);
+ pr_debug("%s: send_pcb entered while threaded\n", dev->name);
}
return false;
}
@@ -424,14 +425,15 @@ static bool send_pcb(struct net_device *dev, pcb_struct * pcb)
case ASF_PCB_NAK:
#ifdef ELP_DEBUG
- printk(KERN_DEBUG "%s: send_pcb got NAK\n", dev->name);
+ pr_debug("%s: send_pcb got NAK\n", dev->name);
#endif
goto abort;
}
}
if (elp_debug >= 1)
- printk(KERN_DEBUG "%s: timeout waiting for PCB acknowledge (status %02x)\n", dev->name, inb_status(dev->base_addr));
+ pr_debug("%s: timeout waiting for PCB acknowledge (status %02x)\n",
+ dev->name, inb_status(dev->base_addr));
goto abort;
sti_abort:
@@ -481,7 +483,7 @@ static bool receive_pcb(struct net_device *dev, pcb_struct * pcb)
while (((stat = get_status(dev->base_addr)) & ACRF) == 0 && time_before(jiffies, timeout));
if (time_after_eq(jiffies, timeout)) {
TIMEOUT_MSG(__LINE__);
- printk(KERN_INFO "%s: status %02x\n", dev->name, stat);
+ pr_info("%s: status %02x\n", dev->name, stat);
return false;
}
pcb->length = inb_command(dev->base_addr);
@@ -518,7 +520,7 @@ static bool receive_pcb(struct net_device *dev, pcb_struct * pcb)
/* safety check total length vs data length */
if (total_length != (pcb->length + 2)) {
if (elp_debug >= 2)
- printk(KERN_WARNING "%s: mangled PCB received\n", dev->name);
+ pr_warning("%s: mangled PCB received\n", dev->name);
set_hsf(dev, HSF_PCB_NAK);
return false;
}
@@ -527,7 +529,7 @@ static bool receive_pcb(struct net_device *dev, pcb_struct * pcb)
if (test_and_set_bit(0, (void *) &adapter->busy)) {
if (backlog_next(adapter->rx_backlog.in) == adapter->rx_backlog.out) {
set_hsf(dev, HSF_PCB_NAK);
- printk(KERN_WARNING "%s: PCB rejected, transfer in progress and backlog full\n", dev->name);
+ pr_warning("%s: PCB rejected, transfer in progress and backlog full\n", dev->name);
pcb->command = 0;
return true;
} else {
@@ -552,7 +554,7 @@ static bool start_receive(struct net_device *dev, pcb_struct * tx_pcb)
elp_device *adapter = netdev_priv(dev);
if (elp_debug >= 3)
- printk(KERN_DEBUG "%s: restarting receiver\n", dev->name);
+ pr_debug("%s: restarting receiver\n", dev->name);
tx_pcb->command = CMD_RECEIVE_PACKET;
tx_pcb->length = sizeof(struct Rcv_pkt);
tx_pcb->data.rcv_pkt.buf_seg
@@ -586,7 +588,7 @@ static void receive_packet(struct net_device *dev, int len)
skb = dev_alloc_skb(rlen + 2);
if (!skb) {
- printk(KERN_WARNING "%s: memory squeeze, dropping packet\n", dev->name);
+ pr_warning("%s: memory squeeze, dropping packet\n", dev->name);
target = adapter->dma_buffer;
adapter->current_dma.target = NULL;
/* FIXME: stats */
@@ -604,7 +606,8 @@ static void receive_packet(struct net_device *dev, int len)
/* if this happens, we die */
if (test_and_set_bit(0, (void *) &adapter->dmaing))
- printk(KERN_ERR "%s: rx blocked, DMA in progress, dir %d\n", dev->name, adapter->current_dma.direction);
+ pr_err("%s: rx blocked, DMA in progress, dir %d\n",
+ dev->name, adapter->current_dma.direction);
adapter->current_dma.direction = 0;
adapter->current_dma.length = rlen;
@@ -623,14 +626,14 @@ static void receive_packet(struct net_device *dev, int len)
release_dma_lock(flags);
if (elp_debug >= 3) {
- printk(KERN_DEBUG "%s: rx DMA transfer started\n", dev->name);
+ pr_debug("%s: rx DMA transfer started\n", dev->name);
}
if (adapter->rx_active)
adapter->rx_active--;
if (!adapter->busy)
- printk(KERN_WARNING "%s: receive_packet called, busy not set.\n", dev->name);
+ pr_warning("%s: receive_packet called, busy not set.\n", dev->name);
}
/******************************************************
@@ -655,12 +658,13 @@ static irqreturn_t elp_interrupt(int irq, void *dev_id)
* has a DMA transfer finished?
*/
if (inb_status(dev->base_addr) & DONE) {
- if (!adapter->dmaing) {
- printk(KERN_WARNING "%s: phantom DMA completed\n", dev->name);
- }
- if (elp_debug >= 3) {
- printk(KERN_DEBUG "%s: %s DMA complete, status %02x\n", dev->name, adapter->current_dma.direction ? "tx" : "rx", inb_status(dev->base_addr));
- }
+ if (!adapter->dmaing)
+ pr_warning("%s: phantom DMA completed\n", dev->name);
+
+ if (elp_debug >= 3)
+ pr_debug("%s: %s DMA complete, status %02x\n", dev->name,
+ adapter->current_dma.direction ? "tx" : "rx",
+ inb_status(dev->base_addr));
outb_control(adapter->hcr_val & ~(DMAE | TCEN | DIR), dev);
if (adapter->current_dma.direction) {
@@ -682,7 +686,7 @@ static irqreturn_t elp_interrupt(int irq, void *dev_id)
int t = adapter->rx_backlog.length[adapter->rx_backlog.out];
adapter->rx_backlog.out = backlog_next(adapter->rx_backlog.out);
if (elp_debug >= 2)
- printk(KERN_DEBUG "%s: receiving backlogged packet (%d)\n", dev->name, t);
+ pr_debug("%s: receiving backlogged packet (%d)\n", dev->name, t);
receive_packet(dev, t);
} else {
adapter->busy = 0;
@@ -713,21 +717,23 @@ static irqreturn_t elp_interrupt(int irq, void *dev_id)
len = adapter->irx_pcb.data.rcv_resp.pkt_len;
dlen = adapter->irx_pcb.data.rcv_resp.buf_len;
if (adapter->irx_pcb.data.rcv_resp.timeout != 0) {
- printk(KERN_ERR "%s: interrupt - packet not received correctly\n", dev->name);
+ pr_err("%s: interrupt - packet not received correctly\n", dev->name);
} else {
if (elp_debug >= 3) {
- printk(KERN_DEBUG "%s: interrupt - packet received of length %i (%i)\n", dev->name, len, dlen);
+ pr_debug("%s: interrupt - packet received of length %i (%i)\n",
+ dev->name, len, dlen);
}
if (adapter->irx_pcb.command == 0xff) {
if (elp_debug >= 2)
- printk(KERN_DEBUG "%s: adding packet to backlog (len = %d)\n", dev->name, dlen);
+ pr_debug("%s: adding packet to backlog (len = %d)\n",
+ dev->name, dlen);
adapter->rx_backlog.length[adapter->rx_backlog.in] = dlen;
adapter->rx_backlog.in = backlog_next(adapter->rx_backlog.in);
} else {
receive_packet(dev, dlen);
}
if (elp_debug >= 3)
- printk(KERN_DEBUG "%s: packet received\n", dev->name);
+ pr_debug("%s: packet received\n", dev->name);
}
break;
@@ -737,7 +743,7 @@ static irqreturn_t elp_interrupt(int irq, void *dev_id)
case CMD_CONFIGURE_82586_RESPONSE:
adapter->got[CMD_CONFIGURE_82586] = 1;
if (elp_debug >= 3)
- printk(KERN_DEBUG "%s: interrupt - configure response received\n", dev->name);
+ pr_debug("%s: interrupt - configure response received\n", dev->name);
break;
/*
@@ -746,7 +752,7 @@ static irqreturn_t elp_interrupt(int irq, void *dev_id)
case CMD_CONFIGURE_ADAPTER_RESPONSE:
adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] = 1;
if (elp_debug >= 3)
- printk(KERN_DEBUG "%s: Adapter memory configuration %s.\n", dev->name,
+ pr_debug("%s: Adapter memory configuration %s.\n", dev->name,
adapter->irx_pcb.data.failed ? "failed" : "succeeded");
break;
@@ -756,7 +762,7 @@ static irqreturn_t elp_interrupt(int irq, void *dev_id)
case CMD_LOAD_MULTICAST_RESPONSE:
adapter->got[CMD_LOAD_MULTICAST_LIST] = 1;
if (elp_debug >= 3)
- printk(KERN_DEBUG "%s: Multicast address list loading %s.\n", dev->name,
+ pr_debug("%s: Multicast address list loading %s.\n", dev->name,
adapter->irx_pcb.data.failed ? "failed" : "succeeded");
break;
@@ -766,7 +772,7 @@ static irqreturn_t elp_interrupt(int irq, void *dev_id)
case CMD_SET_ADDRESS_RESPONSE:
adapter->got[CMD_SET_STATION_ADDRESS] = 1;
if (elp_debug >= 3)
- printk(KERN_DEBUG "%s: Ethernet address setting %s.\n", dev->name,
+ pr_debug("%s: Ethernet address setting %s.\n", dev->name,
adapter->irx_pcb.data.failed ? "failed" : "succeeded");
break;
@@ -783,7 +789,7 @@ static irqreturn_t elp_interrupt(int irq, void *dev_id)
dev->stats.rx_over_errors += adapter->irx_pcb.data.netstat.err_res;
adapter->got[CMD_NETWORK_STATISTICS] = 1;
if (elp_debug >= 3)
- printk(KERN_DEBUG "%s: interrupt - statistics response received\n", dev->name);
+ pr_debug("%s: interrupt - statistics response received\n", dev->name);
break;
/*
@@ -791,17 +797,17 @@ static irqreturn_t elp_interrupt(int irq, void *dev_id)
*/
case CMD_TRANSMIT_PACKET_COMPLETE:
if (elp_debug >= 3)
- printk(KERN_DEBUG "%s: interrupt - packet sent\n", dev->name);
+ pr_debug("%s: interrupt - packet sent\n", dev->name);
if (!netif_running(dev))
break;
switch (adapter->irx_pcb.data.xmit_resp.c_stat) {
case 0xffff:
dev->stats.tx_aborted_errors++;
- printk(KERN_INFO "%s: transmit timed out, network cable problem?\n", dev->name);
+ pr_info("%s: transmit timed out, network cable problem?\n", dev->name);
break;
case 0xfffe:
dev->stats.tx_fifo_errors++;
- printk(KERN_INFO "%s: transmit timed out, FIFO underrun\n", dev->name);
+ pr_info("%s: transmit timed out, FIFO underrun\n", dev->name);
break;
}
netif_wake_queue(dev);
@@ -811,11 +817,12 @@ static irqreturn_t elp_interrupt(int irq, void *dev_id)
* some unknown PCB
*/
default:
- printk(KERN_DEBUG "%s: unknown PCB received - %2.2x\n", dev->name, adapter->irx_pcb.command);
+ pr_debug("%s: unknown PCB received - %2.2x\n",
+ dev->name, adapter->irx_pcb.command);
break;
}
} else {
- printk(KERN_WARNING "%s: failed to read PCB on interrupt\n", dev->name);
+ pr_warning("%s: failed to read PCB on interrupt\n", dev->name);
adapter_reset(dev);
}
}
@@ -844,13 +851,13 @@ static int elp_open(struct net_device *dev)
int retval;
if (elp_debug >= 3)
- printk(KERN_DEBUG "%s: request to open device\n", dev->name);
+ pr_debug("%s: request to open device\n", dev->name);
/*
* make sure we actually found the device
*/
if (adapter == NULL) {
- printk(KERN_ERR "%s: Opening a non-existent physical device\n", dev->name);
+ pr_err("%s: Opening a non-existent physical device\n", dev->name);
return -EAGAIN;
}
/*
@@ -880,17 +887,17 @@ static int elp_open(struct net_device *dev)
* install our interrupt service routine
*/
if ((retval = request_irq(dev->irq, &elp_interrupt, 0, dev->name, dev))) {
- printk(KERN_ERR "%s: could not allocate IRQ%d\n", dev->name, dev->irq);
+ pr_err("%s: could not allocate IRQ%d\n", dev->name, dev->irq);
return retval;
}
if ((retval = request_dma(dev->dma, dev->name))) {
free_irq(dev->irq, dev);
- printk(KERN_ERR "%s: could not allocate DMA%d channel\n", dev->name, dev->dma);
+ pr_err("%s: could not allocate DMA%d channel\n", dev->name, dev->dma);
return retval;
}
adapter->dma_buffer = (void *) dma_mem_alloc(DMA_BUFFER_SIZE);
if (!adapter->dma_buffer) {
- printk(KERN_ERR "%s: could not allocate DMA buffer\n", dev->name);
+ pr_err("%s: could not allocate DMA buffer\n", dev->name);
free_dma(dev->dma);
free_irq(dev->irq, dev);
return -ENOMEM;
@@ -906,7 +913,7 @@ static int elp_open(struct net_device *dev)
* configure adapter memory: we need 10 multicast addresses, default==0
*/
if (elp_debug >= 3)
- printk(KERN_DEBUG "%s: sending 3c505 memory configuration command\n", dev->name);
+ pr_debug("%s: sending 3c505 memory configuration command\n", dev->name);
adapter->tx_pcb.command = CMD_CONFIGURE_ADAPTER_MEMORY;
adapter->tx_pcb.data.memconf.cmd_q = 10;
adapter->tx_pcb.data.memconf.rcv_q = 20;
@@ -917,7 +924,7 @@ static int elp_open(struct net_device *dev)
adapter->tx_pcb.length = sizeof(struct Memconf);
adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] = 0;
if (!send_pcb(dev, &adapter->tx_pcb))
- printk(KERN_ERR "%s: couldn't send memory configuration command\n", dev->name);
+ pr_err("%s: couldn't send memory configuration command\n", dev->name);
else {
unsigned long timeout = jiffies + TIMEOUT;
while (adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] == 0 && time_before(jiffies, timeout));
@@ -930,13 +937,13 @@ static int elp_open(struct net_device *dev)
* configure adapter to receive broadcast messages and wait for response
*/
if (elp_debug >= 3)
- printk(KERN_DEBUG "%s: sending 82586 configure command\n", dev->name);
+ pr_debug("%s: sending 82586 configure command\n", dev->name);
adapter->tx_pcb.command = CMD_CONFIGURE_82586;
adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD;
adapter->tx_pcb.length = 2;
adapter->got[CMD_CONFIGURE_82586] = 0;
if (!send_pcb(dev, &adapter->tx_pcb))
- printk(KERN_ERR "%s: couldn't send 82586 configure command\n", dev->name);
+ pr_err("%s: couldn't send 82586 configure command\n", dev->name);
else {
unsigned long timeout = jiffies + TIMEOUT;
while (adapter->got[CMD_CONFIGURE_82586] == 0 && time_before(jiffies, timeout));
@@ -952,7 +959,7 @@ static int elp_open(struct net_device *dev)
*/
prime_rx(dev);
if (elp_debug >= 3)
- printk(KERN_DEBUG "%s: %d receive PCBs active\n", dev->name, adapter->rx_active);
+ pr_debug("%s: %d receive PCBs active\n", dev->name, adapter->rx_active);
/*
* device is now officially open!
@@ -982,7 +989,7 @@ static bool send_packet(struct net_device *dev, struct sk_buff *skb)
if (test_and_set_bit(0, (void *) &adapter->busy)) {
if (elp_debug >= 2)
- printk(KERN_DEBUG "%s: transmit blocked\n", dev->name);
+ pr_debug("%s: transmit blocked\n", dev->name);
return false;
}
@@ -1004,7 +1011,7 @@ static bool send_packet(struct net_device *dev, struct sk_buff *skb)
}
/* if this happens, we die */
if (test_and_set_bit(0, (void *) &adapter->dmaing))
- printk(KERN_DEBUG "%s: tx: DMA %d in progress\n", dev->name, adapter->current_dma.direction);
+ pr_debug("%s: tx: DMA %d in progress\n", dev->name, adapter->current_dma.direction);
adapter->current_dma.direction = 1;
adapter->current_dma.start_time = jiffies;
@@ -1030,7 +1037,7 @@ static bool send_packet(struct net_device *dev, struct sk_buff *skb)
release_dma_lock(flags);
if (elp_debug >= 3)
- printk(KERN_DEBUG "%s: DMA transfer started\n", dev->name);
+ pr_debug("%s: DMA transfer started\n", dev->name);
return true;
}
@@ -1044,9 +1051,10 @@ static void elp_timeout(struct net_device *dev)
int stat;
stat = inb_status(dev->base_addr);
- printk(KERN_WARNING "%s: transmit timed out, lost %s?\n", dev->name, (stat & ACRF) ? "interrupt" : "command");
+ pr_warning("%s: transmit timed out, lost %s?\n", dev->name,
+ (stat & ACRF) ? "interrupt" : "command");
if (elp_debug >= 1)
- printk(KERN_DEBUG "%s: status %#02x\n", dev->name, stat);
+ pr_debug("%s: status %#02x\n", dev->name, stat);
dev->trans_start = jiffies;
dev->stats.tx_dropped++;
netif_wake_queue(dev);
@@ -1068,7 +1076,7 @@ static int elp_start_xmit(struct sk_buff *skb, struct net_device *dev)
check_3c505_dma(dev);
if (elp_debug >= 3)
- printk(KERN_DEBUG "%s: request to send packet of length %d\n", dev->name, (int) skb->len);
+ pr_debug("%s: request to send packet of length %d\n", dev->name, (int) skb->len);
netif_stop_queue(dev);
@@ -1077,13 +1085,13 @@ static int elp_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
if (!send_packet(dev, skb)) {
if (elp_debug >= 2) {
- printk(KERN_DEBUG "%s: failed to transmit packet\n", dev->name);
+ pr_debug("%s: failed to transmit packet\n", dev->name);
}
spin_unlock_irqrestore(&adapter->lock, flags);
- return 1;
+ return NETDEV_TX_BUSY;
}
if (elp_debug >= 3)
- printk(KERN_DEBUG "%s: packet of length %d sent\n", dev->name, (int) skb->len);
+ pr_debug("%s: packet of length %d sent\n", dev->name, (int) skb->len);
/*
* start the transmit timeout
@@ -1107,7 +1115,7 @@ static struct net_device_stats *elp_get_stats(struct net_device *dev)
elp_device *adapter = netdev_priv(dev);
if (elp_debug >= 3)
- printk(KERN_DEBUG "%s: request for stats\n", dev->name);
+ pr_debug("%s: request for stats\n", dev->name);
/* If the device is closed, just return the latest stats we have,
- we cannot ask from the adapter without interrupts */
@@ -1119,7 +1127,7 @@ static struct net_device_stats *elp_get_stats(struct net_device *dev)
adapter->tx_pcb.length = 0;
adapter->got[CMD_NETWORK_STATISTICS] = 0;
if (!send_pcb(dev, &adapter->tx_pcb))
- printk(KERN_ERR "%s: couldn't send get statistics command\n", dev->name);
+ pr_err("%s: couldn't send get statistics command\n", dev->name);
else {
unsigned long timeout = jiffies + TIMEOUT;
while (adapter->got[CMD_NETWORK_STATISTICS] == 0 && time_before(jiffies, timeout));
@@ -1169,7 +1177,7 @@ static int elp_close(struct net_device *dev)
elp_device *adapter = netdev_priv(dev);
if (elp_debug >= 3)
- printk(KERN_DEBUG "%s: request to close device\n", dev->name);
+ pr_debug("%s: request to close device\n", dev->name);
netif_stop_queue(dev);
@@ -1213,7 +1221,7 @@ static void elp_set_mc_list(struct net_device *dev)
unsigned long flags;
if (elp_debug >= 3)
- printk(KERN_DEBUG "%s: request to set multicast list\n", dev->name);
+ pr_debug("%s: request to set multicast list\n", dev->name);
spin_lock_irqsave(&adapter->lock, flags);
@@ -1228,7 +1236,7 @@ static void elp_set_mc_list(struct net_device *dev)
}
adapter->got[CMD_LOAD_MULTICAST_LIST] = 0;
if (!send_pcb(dev, &adapter->tx_pcb))
- printk(KERN_ERR "%s: couldn't send set_multicast command\n", dev->name);
+ pr_err("%s: couldn't send set_multicast command\n", dev->name);
else {
unsigned long timeout = jiffies + TIMEOUT;
while (adapter->got[CMD_LOAD_MULTICAST_LIST] == 0 && time_before(jiffies, timeout));
@@ -1247,14 +1255,14 @@ static void elp_set_mc_list(struct net_device *dev)
* and wait for response
*/
if (elp_debug >= 3)
- printk(KERN_DEBUG "%s: sending 82586 configure command\n", dev->name);
+ pr_debug("%s: sending 82586 configure command\n", dev->name);
adapter->tx_pcb.command = CMD_CONFIGURE_82586;
adapter->tx_pcb.length = 2;
adapter->got[CMD_CONFIGURE_82586] = 0;
if (!send_pcb(dev, &adapter->tx_pcb))
{
spin_unlock_irqrestore(&adapter->lock, flags);
- printk(KERN_ERR "%s: couldn't send 82586 configure command\n", dev->name);
+ pr_err("%s: couldn't send 82586 configure command\n", dev->name);
}
else {
unsigned long timeout = jiffies + TIMEOUT;
@@ -1283,17 +1291,17 @@ static int __init elp_sense(struct net_device *dev)
orig_HSR = inb_status(addr);
if (elp_debug > 0)
- printk(search_msg, name, addr);
+ pr_debug(search_msg, name, addr);
if (orig_HSR == 0xff) {
if (elp_debug > 0)
- printk(notfound_msg, 1);
+ pr_cont(notfound_msg, 1);
goto out;
}
/* Wait for a while; the adapter may still be booting up */
if (elp_debug > 0)
- printk(stilllooking_msg);
+ pr_cont(stilllooking_msg);
if (orig_HSR & DIR) {
/* If HCR.DIR is up, we pull it down. HSR.DIR should follow. */
@@ -1301,7 +1309,7 @@ static int __init elp_sense(struct net_device *dev)
msleep(300);
if (inb_status(addr) & DIR) {
if (elp_debug > 0)
- printk(notfound_msg, 2);
+ pr_cont(notfound_msg, 2);
goto out;
}
} else {
@@ -1310,7 +1318,7 @@ static int __init elp_sense(struct net_device *dev)
msleep(300);
if (!(inb_status(addr) & DIR)) {
if (elp_debug > 0)
- printk(notfound_msg, 3);
+ pr_cont(notfound_msg, 3);
goto out;
}
}
@@ -1318,7 +1326,7 @@ static int __init elp_sense(struct net_device *dev)
* It certainly looks like a 3c505.
*/
if (elp_debug > 0)
- printk(found_msg);
+ pr_cont(found_msg);
return 0;
out:
@@ -1349,7 +1357,7 @@ static int __init elp_autodetect(struct net_device *dev)
/* could not find an adapter */
if (elp_debug > 0)
- printk(couldnot_msg, dev->name);
+ pr_debug(couldnot_msg, dev->name);
return 0; /* Because of this, the layer above will return -ENODEV */
}
@@ -1424,16 +1432,16 @@ static int __init elplus_setup(struct net_device *dev)
/* Nope, it's ignoring the command register. This means that
* either it's still booting up, or it's died.
*/
- printk(KERN_ERR "%s: command register wouldn't drain, ", dev->name);
+ pr_err("%s: command register wouldn't drain, ", dev->name);
if ((inb_status(dev->base_addr) & 7) == 3) {
/* If the adapter status is 3, it *could* still be booting.
* Give it the benefit of the doubt for 10 seconds.
*/
- printk("assuming 3c505 still starting\n");
+ pr_cont("assuming 3c505 still starting\n");
timeout = jiffies + 10*HZ;
while (time_before(jiffies, timeout) && (inb_status(dev->base_addr) & 7));
if (inb_status(dev->base_addr) & 7) {
- printk(KERN_ERR "%s: 3c505 failed to start\n", dev->name);
+ pr_err("%s: 3c505 failed to start\n", dev->name);
} else {
okay = 1; /* It started */
}
@@ -1441,7 +1449,7 @@ static int __init elplus_setup(struct net_device *dev)
/* Otherwise, it must just be in a strange
* state. We probably need to kick it.
*/
- printk("3c505 is sulking\n");
+ pr_cont("3c505 is sulking\n");
}
}
for (tries = 0; tries < 5 && okay; tries++) {
@@ -1454,18 +1462,19 @@ static int __init elplus_setup(struct net_device *dev)
adapter->tx_pcb.length = 0;
cookie = probe_irq_on();
if (!send_pcb(dev, &adapter->tx_pcb)) {
- printk(KERN_ERR "%s: could not send first PCB\n", dev->name);
+ pr_err("%s: could not send first PCB\n", dev->name);
probe_irq_off(cookie);
continue;
}
if (!receive_pcb(dev, &adapter->rx_pcb)) {
- printk(KERN_ERR "%s: could not read first PCB\n", dev->name);
+ pr_err("%s: could not read first PCB\n", dev->name);
probe_irq_off(cookie);
continue;
}
if ((adapter->rx_pcb.command != CMD_ADDRESS_RESPONSE) ||
(adapter->rx_pcb.length != 6)) {
- printk(KERN_ERR "%s: first PCB wrong (%d, %d)\n", dev->name, adapter->rx_pcb.command, adapter->rx_pcb.length);
+ pr_err("%s: first PCB wrong (%d, %d)\n", dev->name,
+ adapter->rx_pcb.command, adapter->rx_pcb.length);
probe_irq_off(cookie);
continue;
}
@@ -1474,32 +1483,32 @@ static int __init elplus_setup(struct net_device *dev)
/* It's broken. Do a hard reset to re-initialise the board,
* and try again.
*/
- printk(KERN_INFO "%s: resetting adapter\n", dev->name);
+ pr_info("%s: resetting adapter\n", dev->name);
outb_control(adapter->hcr_val | FLSH | ATTN, dev);
outb_control(adapter->hcr_val & ~(FLSH | ATTN), dev);
}
- printk(KERN_ERR "%s: failed to initialise 3c505\n", dev->name);
+ pr_err("%s: failed to initialise 3c505\n", dev->name);
goto out;
okay:
if (dev->irq) { /* Is there a preset IRQ? */
int rpt = probe_irq_off(cookie);
if (dev->irq != rpt) {
- printk(KERN_WARNING "%s: warning, irq %d configured but %d detected\n", dev->name, dev->irq, rpt);
+ pr_warning("%s: warning, irq %d configured but %d detected\n", dev->name, dev->irq, rpt);
}
/* if dev->irq == probe_irq_off(cookie), all is well */
} else /* No preset IRQ; just use what we can detect */
dev->irq = probe_irq_off(cookie);
switch (dev->irq) { /* Legal, sane? */
case 0:
- printk(KERN_ERR "%s: IRQ probe failed: check 3c505 jumpers.\n",
+ pr_err("%s: IRQ probe failed: check 3c505 jumpers.\n",
dev->name);
goto out;
case 1:
case 6:
case 8:
case 13:
- printk(KERN_ERR "%s: Impossible IRQ %d reported by probe_irq_off().\n",
+ pr_err("%s: Impossible IRQ %d reported by probe_irq_off().\n",
dev->name, dev->irq);
goto out;
}
@@ -1521,7 +1530,7 @@ static int __init elplus_setup(struct net_device *dev)
dev->dma = dev->mem_start & 7;
}
else {
- printk(KERN_WARNING "%s: warning, DMA channel not specified, using default\n", dev->name);
+ pr_warning("%s: warning, DMA channel not specified, using default\n", dev->name);
dev->dma = ELP_DMA;
}
}
@@ -1529,11 +1538,8 @@ static int __init elplus_setup(struct net_device *dev)
/*
* print remainder of startup message
*/
- printk(KERN_INFO "%s: 3c505 at %#lx, irq %d, dma %d, "
- "addr %pM, ",
- dev->name, dev->base_addr, dev->irq, dev->dma,
- dev->dev_addr);
-
+ pr_info("%s: 3c505 at %#lx, irq %d, dma %d, addr %pM, ",
+ dev->name, dev->base_addr, dev->irq, dev->dma, dev->dev_addr);
/*
* read more information from the adapter
*/
@@ -1544,9 +1550,10 @@ static int __init elplus_setup(struct net_device *dev)
!receive_pcb(dev, &adapter->rx_pcb) ||
(adapter->rx_pcb.command != CMD_ADAPTER_INFO_RESPONSE) ||
(adapter->rx_pcb.length != 10)) {
- printk("not responding to second PCB\n");
+ pr_cont("not responding to second PCB\n");
}
- printk("rev %d.%d, %dk\n", adapter->rx_pcb.data.info.major_vers, adapter->rx_pcb.data.info.minor_vers, adapter->rx_pcb.data.info.RAM_sz);
+ pr_cont("rev %d.%d, %dk\n", adapter->rx_pcb.data.info.major_vers,
+ adapter->rx_pcb.data.info.minor_vers, adapter->rx_pcb.data.info.RAM_sz);
/*
* reconfigure the adapter memory to better suit our purposes
@@ -1563,10 +1570,10 @@ static int __init elplus_setup(struct net_device *dev)
!receive_pcb(dev, &adapter->rx_pcb) ||
(adapter->rx_pcb.command != CMD_CONFIGURE_ADAPTER_RESPONSE) ||
(adapter->rx_pcb.length != 2)) {
- printk(KERN_ERR "%s: could not configure adapter memory\n", dev->name);
+ pr_err("%s: could not configure adapter memory\n", dev->name);
}
if (adapter->rx_pcb.data.configure) {
- printk(KERN_ERR "%s: adapter configuration failed\n", dev->name);
+ pr_err("%s: adapter configuration failed\n", dev->name);
}
dev->netdev_ops = &elp_netdev_ops;
@@ -1631,17 +1638,17 @@ int __init init_module(void)
dev->dma = dma[this_dev];
} else {
dev->dma = ELP_DMA;
- printk(KERN_WARNING "3c505.c: warning, using default DMA channel,\n");
+ pr_warning("3c505.c: warning, using default DMA channel,\n");
}
if (io[this_dev] == 0) {
if (this_dev) {
free_netdev(dev);
break;
}
- printk(KERN_NOTICE "3c505.c: module autoprobe not recommended, give io=xx.\n");
+ pr_notice("3c505.c: module autoprobe not recommended, give io=xx.\n");
}
if (elplus_setup(dev) != 0) {
- printk(KERN_WARNING "3c505.c: Failed to register card at 0x%x.\n", io[this_dev]);
+ pr_warning("3c505.c: Failed to register card at 0x%x.\n", io[this_dev]);
free_netdev(dev);
break;
}
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c
index fbbaf826def..96b86659381 100644
--- a/drivers/net/3c507.c
+++ b/drivers/net/3c507.c
@@ -364,7 +364,7 @@ static const struct net_device_ops netdev_ops = {
static int __init el16_probe1(struct net_device *dev, int ioaddr)
{
- static unsigned char init_ID_done, version_printed;
+ static unsigned char init_ID_done;
int i, irq, irqval, retval;
struct net_local *lp;
@@ -391,10 +391,7 @@ static int __init el16_probe1(struct net_device *dev, int ioaddr)
goto out;
}
- if (net_debug && version_printed++ == 0)
- printk(version);
-
- printk("%s: 3c507 at %#x,", dev->name, ioaddr);
+ pr_info("%s: 3c507 at %#x,", dev->name, ioaddr);
/* We should make a few more checks here, like the first three octets of
the S.A. for the manufacturer's code. */
@@ -403,7 +400,8 @@ static int __init el16_probe1(struct net_device *dev, int ioaddr)
irqval = request_irq(irq, &el16_interrupt, 0, DRV_NAME, dev);
if (irqval) {
- printk(KERN_ERR "3c507: unable to get IRQ %d (irqval=%d).\n", irq, irqval);
+ pr_cont("\n");
+ pr_err("3c507: unable to get IRQ %d (irqval=%d).\n", irq, irqval);
retval = -EAGAIN;
goto out;
}
@@ -414,7 +412,7 @@ static int __init el16_probe1(struct net_device *dev, int ioaddr)
outb(0x01, ioaddr + MISC_CTRL);
for (i = 0; i < 6; i++)
dev->dev_addr[i] = inb(ioaddr + i);
- printk(" %pM", dev->dev_addr);
+ pr_cont(" %pM", dev->dev_addr);
if (mem_start)
net_debug = mem_start & 7;
@@ -443,18 +441,18 @@ static int __init el16_probe1(struct net_device *dev, int ioaddr)
dev->if_port = (inb(ioaddr + ROM_CONFIG) & 0x80) ? 1 : 0;
dev->irq = inb(ioaddr + IRQ_CONFIG) & 0x0f;
- printk(", IRQ %d, %sternal xcvr, memory %#lx-%#lx.\n", dev->irq,
+ pr_cont(", IRQ %d, %sternal xcvr, memory %#lx-%#lx.\n", dev->irq,
dev->if_port ? "ex" : "in", dev->mem_start, dev->mem_end-1);
if (net_debug)
- printk(version);
+ pr_debug("%s", version);
lp = netdev_priv(dev);
memset(lp, 0, sizeof(*lp));
spin_lock_init(&lp->lock);
lp->base = ioremap(dev->mem_start, RX_BUF_END);
if (!lp->base) {
- printk(KERN_ERR "3c507: unable to remap memory\n");
+ pr_err("3c507: unable to remap memory\n");
retval = -EAGAIN;
goto out1;
}
@@ -488,20 +486,20 @@ static void el16_tx_timeout (struct net_device *dev)
void __iomem *shmem = lp->base;
if (net_debug > 1)
- printk ("%s: transmit timed out, %s? ", dev->name,
+ pr_debug("%s: transmit timed out, %s? ", dev->name,
readw(shmem + iSCB_STATUS) & 0x8000 ? "IRQ conflict" :
"network cable problem");
/* Try to restart the adaptor. */
if (lp->last_restart == dev->stats.tx_packets) {
if (net_debug > 1)
- printk ("Resetting board.\n");
+ pr_cont("Resetting board.\n");
/* Completely reset the adaptor. */
init_82586_mem (dev);
lp->tx_pkts_in_ring = 0;
} else {
/* Issue the channel attention signal and hope it "gets better". */
if (net_debug > 1)
- printk ("Kicking board.\n");
+ pr_cont("Kicking board.\n");
writew(0xf000 | CUC_START | RX_START, shmem + iSCB_CMD);
outb (0, ioaddr + SIGNAL_CA); /* Issue channel-attn. */
lp->last_restart = dev->stats.tx_packets;
@@ -553,7 +551,8 @@ static irqreturn_t el16_interrupt(int irq, void *dev_id)
void __iomem *shmem;
if (dev == NULL) {
- printk ("net_interrupt(): irq %d for unknown device.\n", irq);
+ pr_err("%s: net_interrupt(): irq %d for unknown device.\n",
+ dev->name, irq);
return IRQ_NONE;
}
@@ -566,7 +565,7 @@ static irqreturn_t el16_interrupt(int irq, void *dev_id)
status = readw(shmem+iSCB_STATUS);
if (net_debug > 4) {
- printk("%s: 3c507 interrupt, status %4.4x.\n", dev->name, status);
+ pr_debug("%s: 3c507 interrupt, status %4.4x.\n", dev->name, status);
}
/* Disable the 82586's input to the interrupt line. */
@@ -577,7 +576,7 @@ static irqreturn_t el16_interrupt(int irq, void *dev_id)
unsigned short tx_status = readw(shmem+lp->tx_reap);
if (!(tx_status & 0x8000)) {
if (net_debug > 5)
- printk("Tx command incomplete (%#x).\n", lp->tx_reap);
+ pr_debug("Tx command incomplete (%#x).\n", lp->tx_reap);
break;
}
/* Tx unsuccessful or some interesting status bit set. */
@@ -591,7 +590,7 @@ static irqreturn_t el16_interrupt(int irq, void *dev_id)
}
dev->stats.tx_packets++;
if (net_debug > 5)
- printk("Reaped %x, Tx status %04x.\n" , lp->tx_reap, tx_status);
+ pr_debug("Reaped %x, Tx status %04x.\n" , lp->tx_reap, tx_status);
lp->tx_reap += TX_BUF_SIZE;
if (lp->tx_reap > RX_BUF_START - TX_BUF_SIZE)
lp->tx_reap = TX_BUF_START;
@@ -606,7 +605,7 @@ static irqreturn_t el16_interrupt(int irq, void *dev_id)
if (status & 0x4000) { /* Packet received. */
if (net_debug > 5)
- printk("Received packet, rx_head %04x.\n", lp->rx_head);
+ pr_debug("Received packet, rx_head %04x.\n", lp->rx_head);
el16_rx(dev);
}
@@ -615,7 +614,7 @@ static irqreturn_t el16_interrupt(int irq, void *dev_id)
if ((status & 0x0700) != 0x0200 && netif_running(dev)) {
if (net_debug)
- printk("%s: Command unit stopped, status %04x, restarting.\n",
+ pr_debug("%s: Command unit stopped, status %04x, restarting.\n",
dev->name, status);
/* If this ever occurs we should really re-write the idle loop, reset
the Tx list, and do a complete restart of the command unit.
@@ -627,7 +626,7 @@ static irqreturn_t el16_interrupt(int irq, void *dev_id)
/* The Rx unit is not ready, it must be hung. Restart the receiver by
initializing the rx buffers, and issuing an Rx start command. */
if (net_debug)
- printk("%s: Rx unit stopped, status %04x, restarting.\n",
+ pr_debug("%s: Rx unit stopped, status %04x, restarting.\n",
dev->name, status);
init_rx_bufs(dev);
writew(RX_BUF_START,shmem+iSCB_RFA);
@@ -753,9 +752,8 @@ static void init_82586_mem(struct net_device *dev)
int boguscnt = 50;
while (readw(shmem+iSCB_STATUS) == 0)
if (--boguscnt == 0) {
- printk("%s: i82586 initialization timed out with status %04x, "
- "cmd %04x.\n", dev->name,
- readw(shmem+iSCB_STATUS), readw(shmem+iSCB_CMD));
+ pr_warning("%s: i82586 initialization timed out with status %04x, cmd %04x.\n",
+ dev->name, readw(shmem+iSCB_STATUS), readw(shmem+iSCB_CMD));
break;
}
/* Issue channel-attn -- the 82586 won't start. */
@@ -765,7 +763,7 @@ static void init_82586_mem(struct net_device *dev)
/* Disable loopback and enable interrupts. */
outb(0x84, ioaddr + MISC_CTRL);
if (net_debug > 4)
- printk("%s: Initialized 82586, status %04x.\n", dev->name,
+ pr_debug("%s: Initialized 82586, status %04x.\n", dev->name,
readw(shmem+iSCB_STATUS));
return;
}
@@ -810,7 +808,7 @@ static void hardware_send_packet(struct net_device *dev, void *buf, short length
lp->tx_head = TX_BUF_START;
if (net_debug > 4) {
- printk("%s: 3c507 @%x send length = %d, tx_block %3x, next %3x.\n",
+ pr_debug("%s: 3c507 @%x send length = %d, tx_block %3x, next %3x.\n",
dev->name, ioaddr, length, tx_block, lp->tx_head);
}
@@ -838,7 +836,7 @@ static void el16_rx(struct net_device *dev)
if (rfd_cmd != 0 || data_buffer_addr != rx_head + 22
|| (pkt_len & 0xC000) != 0xC000) {
- printk(KERN_ERR "%s: Rx frame at %#x corrupted, "
+ pr_err("%s: Rx frame at %#x corrupted, "
"status %04x cmd %04x next %04x "
"data-buf @%04x %04x.\n",
dev->name, rx_head, frame_status, rfd_cmd,
@@ -858,8 +856,7 @@ static void el16_rx(struct net_device *dev)
pkt_len &= 0x3fff;
skb = dev_alloc_skb(pkt_len+2);
if (skb == NULL) {
- printk(KERN_ERR "%s: Memory squeeze, "
- "dropping packet.\n",
+ pr_err("%s: Memory squeeze, dropping packet.\n",
dev->name);
dev->stats.rx_dropped++;
break;
@@ -926,7 +923,7 @@ MODULE_PARM_DESC(irq, "(ignored)");
int __init init_module(void)
{
if (io == 0)
- printk("3c507: You should not use auto-probing with insmod!\n");
+ pr_notice("3c507: You should not use auto-probing with insmod!\n");
dev_3c507 = el16_probe(-1);
return IS_ERR(dev_3c507) ? PTR_ERR(dev_3c507) : 0;
}
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index fbb37192199..d2137efbd45 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -257,7 +257,7 @@ static int el3_isa_id_sequence(__be16 *phys_addr)
&& !memcmp(phys_addr, el3_devs[i]->dev_addr,
ETH_ALEN)) {
if (el3_debug > 3)
- printk(KERN_DEBUG "3c509 with address %02x %02x %02x %02x %02x %02x was found by ISAPnP\n",
+ pr_debug("3c509 with address %02x %02x %02x %02x %02x %02x was found by ISAPnP\n",
phys_addr[0] & 0xff, phys_addr[0] >> 8,
phys_addr[1] & 0xff, phys_addr[1] >> 8,
phys_addr[2] & 0xff, phys_addr[2] >> 8);
@@ -480,9 +480,13 @@ static int pnp_registered;
#ifdef CONFIG_EISA
static struct eisa_device_id el3_eisa_ids[] = {
+ { "TCM5090" },
+ { "TCM5091" },
{ "TCM5092" },
{ "TCM5093" },
+ { "TCM5094" },
{ "TCM5095" },
+ { "TCM5098" },
{ "" }
};
MODULE_DEVICE_TABLE(eisa, el3_eisa_ids);
@@ -574,19 +578,18 @@ static int __devinit el3_common_init(struct net_device *dev)
err = register_netdev(dev);
if (err) {
- printk(KERN_ERR "Failed to register 3c5x9 at %#3.3lx, IRQ %d.\n",
+ pr_err("Failed to register 3c5x9 at %#3.3lx, IRQ %d.\n",
dev->base_addr, dev->irq);
release_region(dev->base_addr, EL3_IO_EXTENT);
return err;
}
- printk(KERN_INFO "%s: 3c5x9 found at %#3.3lx, %s port, "
- "address %pM, IRQ %d.\n",
+ pr_info("%s: 3c5x9 found at %#3.3lx, %s port, address %pM, IRQ %d.\n",
dev->name, dev->base_addr, if_names[(dev->if_port & 0x03)],
dev->dev_addr, dev->irq);
if (el3_debug > 0)
- printk(KERN_INFO "%s", version);
+ pr_info("%s", version);
return 0;
}
@@ -625,8 +628,8 @@ static int __init el3_mca_probe(struct device *device)
irq = pos5 & 0x0f;
- printk(KERN_INFO "3c529: found %s at slot %d\n",
- el3_mca_adapter_names[mdev->index], slot + 1);
+ pr_info("3c529: found %s at slot %d\n",
+ el3_mca_adapter_names[mdev->index], slot + 1);
/* claim the slot */
strncpy(mdev->name, el3_mca_adapter_names[mdev->index],
@@ -638,7 +641,7 @@ static int __init el3_mca_probe(struct device *device)
irq = mca_device_transform_irq(mdev, irq);
ioaddr = mca_device_transform_ioport(mdev, ioaddr);
if (el3_debug > 2) {
- printk(KERN_DEBUG "3c529: irq %d ioaddr 0x%x ifport %d\n", irq, ioaddr, if_port);
+ pr_debug("3c529: irq %d ioaddr 0x%x ifport %d\n", irq, ioaddr, if_port);
}
EL3WINDOW(0);
for (i = 0; i < 3; i++)
@@ -653,11 +656,11 @@ static int __init el3_mca_probe(struct device *device)
netdev_boot_setup_check(dev);
el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_MCA);
- device->driver_data = dev;
+ dev_set_drvdata(device, dev);
err = el3_common_init(dev);
if (err) {
- device->driver_data = NULL;
+ dev_set_drvdata(device, NULL);
free_netdev(dev);
return -ENOMEM;
}
@@ -721,12 +724,12 @@ static int __init el3_eisa_probe (struct device *device)
/* This remove works for all device types.
*
- * The net dev must be stored in the driver_data field */
+ * The net dev must be stored in the driver data field */
static int __devexit el3_device_remove (struct device *device)
{
struct net_device *dev;
- dev = device->driver_data;
+ dev = dev_get_drvdata(device);
el3_common_remove (dev);
return 0;
@@ -761,7 +764,7 @@ static ushort id_read_eeprom(int index)
word = (word << 1) + (inb(id_port) & 0x01);
if (el3_debug > 3)
- printk(KERN_DEBUG " 3c509 EEPROM word %d %#4.4x.\n", index, word);
+ pr_debug(" 3c509 EEPROM word %d %#4.4x.\n", index, word);
return word;
}
@@ -783,13 +786,13 @@ el3_open(struct net_device *dev)
EL3WINDOW(0);
if (el3_debug > 3)
- printk(KERN_DEBUG "%s: Opening, IRQ %d status@%x %4.4x.\n", dev->name,
+ pr_debug("%s: Opening, IRQ %d status@%x %4.4x.\n", dev->name,
dev->irq, ioaddr + EL3_STATUS, inw(ioaddr + EL3_STATUS));
el3_up(dev);
if (el3_debug > 3)
- printk(KERN_DEBUG "%s: Opened 3c509 IRQ %d status %4.4x.\n",
+ pr_debug("%s: Opened 3c509 IRQ %d status %4.4x.\n",
dev->name, dev->irq, inw(ioaddr + EL3_STATUS));
return 0;
@@ -801,8 +804,7 @@ el3_tx_timeout (struct net_device *dev)
int ioaddr = dev->base_addr;
/* Transmitter timeout, serious problems. */
- printk(KERN_WARNING "%s: transmit timed out, Tx_status %2.2x status %4.4x "
- "Tx FIFO room %d.\n",
+ pr_warning("%s: transmit timed out, Tx_status %2.2x status %4.4x Tx FIFO room %d.\n",
dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS),
inw(ioaddr + TX_FREE));
dev->stats.tx_errors++;
@@ -826,7 +828,7 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_bytes += skb->len;
if (el3_debug > 4) {
- printk(KERN_DEBUG "%s: el3_start_xmit(length = %u) called, status %4.4x.\n",
+ pr_debug("%s: el3_start_xmit(length = %u) called, status %4.4x.\n",
dev->name, skb->len, inw(ioaddr + EL3_STATUS));
}
#if 0
@@ -835,7 +837,7 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
ushort status = inw(ioaddr + EL3_STATUS);
if (status & 0x0001 /* IRQ line active, missed one. */
&& inw(ioaddr + EL3_STATUS) & 1) { /* Make sure. */
- printk(KERN_DEBUG "%s: Missed interrupt, status then %04x now %04x"
+ pr_debug("%s: Missed interrupt, status then %04x now %04x"
" Tx %2.2x Rx %4.4x.\n", dev->name, status,
inw(ioaddr + EL3_STATUS), inb(ioaddr + TX_STATUS),
inw(ioaddr + RX_STATUS));
@@ -909,7 +911,7 @@ el3_interrupt(int irq, void *dev_id)
if (el3_debug > 4) {
status = inw(ioaddr + EL3_STATUS);
- printk(KERN_DEBUG "%s: interrupt, status %4.4x.\n", dev->name, status);
+ pr_debug("%s: interrupt, status %4.4x.\n", dev->name, status);
}
while ((status = inw(ioaddr + EL3_STATUS)) &
@@ -920,7 +922,7 @@ el3_interrupt(int irq, void *dev_id)
if (status & TxAvailable) {
if (el3_debug > 5)
- printk(KERN_DEBUG " TX room bit was handled.\n");
+ pr_debug(" TX room bit was handled.\n");
/* There's room in the FIFO for a full-sized packet. */
outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
netif_wake_queue (dev);
@@ -958,7 +960,7 @@ el3_interrupt(int irq, void *dev_id)
}
if (--i < 0) {
- printk(KERN_ERR "%s: Infinite loop in interrupt, status %4.4x.\n",
+ pr_err("%s: Infinite loop in interrupt, status %4.4x.\n",
dev->name, status);
/* Clear all interrupts. */
outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
@@ -969,7 +971,7 @@ el3_interrupt(int irq, void *dev_id)
}
if (el3_debug > 4) {
- printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n", dev->name,
+ pr_debug("%s: exiting interrupt, status %4.4x.\n", dev->name,
inw(ioaddr + EL3_STATUS));
}
spin_unlock(&lp->lock);
@@ -1017,7 +1019,7 @@ static void update_stats(struct net_device *dev)
int ioaddr = dev->base_addr;
if (el3_debug > 5)
- printk(" Updating the statistics.\n");
+ pr_debug(" Updating the statistics.\n");
/* Turn off statistics updates while reading. */
outw(StatsDisable, ioaddr + EL3_CMD);
/* Switch to the stats window, and read everything. */
@@ -1047,7 +1049,7 @@ el3_rx(struct net_device *dev)
short rx_status;
if (el3_debug > 5)
- printk(" In rx_packet(), status %4.4x, rx_status %4.4x.\n",
+ pr_debug(" In rx_packet(), status %4.4x, rx_status %4.4x.\n",
inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
while ((rx_status = inw(ioaddr + RX_STATUS)) > 0) {
if (rx_status & 0x4000) { /* Error, update stats. */
@@ -1069,7 +1071,7 @@ el3_rx(struct net_device *dev)
skb = dev_alloc_skb(pkt_len+5);
if (el3_debug > 4)
- printk("Receiving packet size %d status %4.4x.\n",
+ pr_debug("Receiving packet size %d status %4.4x.\n",
pkt_len, rx_status);
if (skb != NULL) {
skb_reserve(skb, 2); /* Align IP on 16 byte */
@@ -1088,12 +1090,12 @@ el3_rx(struct net_device *dev)
outw(RxDiscard, ioaddr + EL3_CMD);
dev->stats.rx_dropped++;
if (el3_debug)
- printk("%s: Couldn't allocate a sk_buff of size %d.\n",
+ pr_debug("%s: Couldn't allocate a sk_buff of size %d.\n",
dev->name, pkt_len);
}
inw(ioaddr + EL3_STATUS); /* Delay. */
while (inw(ioaddr + EL3_STATUS) & 0x1000)
- printk(KERN_DEBUG " Waiting for 3c509 to discard packet, status %x.\n",
+ pr_debug(" Waiting for 3c509 to discard packet, status %x.\n",
inw(ioaddr + EL3_STATUS) );
}
@@ -1114,7 +1116,7 @@ set_multicast_list(struct net_device *dev)
static int old;
if (old != dev->mc_count) {
old = dev->mc_count;
- printk("%s: Setting Rx mode to %d addresses.\n", dev->name, dev->mc_count);
+ pr_debug("%s: Setting Rx mode to %d addresses.\n", dev->name, dev->mc_count);
}
}
spin_lock_irqsave(&lp->lock, flags);
@@ -1137,7 +1139,7 @@ el3_close(struct net_device *dev)
struct el3_private *lp = netdev_priv(dev);
if (el3_debug > 2)
- printk("%s: Shutting down ethercard.\n", dev->name);
+ pr_debug("%s: Shutting down ethercard.\n", dev->name);
el3_down(dev);
@@ -1384,30 +1386,30 @@ el3_up(struct net_device *dev)
EL3WINDOW(4);
net_diag = inw(ioaddr + WN4_NETDIAG);
net_diag = (net_diag | FD_ENABLE); /* temporarily assume full-duplex will be set */
- printk("%s: ", dev->name);
+ pr_info("%s: ", dev->name);
switch (dev->if_port & 0x0c) {
case 12:
/* force full-duplex mode if 3c5x9b */
if (sw_info & 0x000f) {
- printk("Forcing 3c5x9b full-duplex mode");
+ pr_cont("Forcing 3c5x9b full-duplex mode");
break;
}
case 8:
/* set full-duplex mode based on eeprom config setting */
if ((sw_info & 0x000f) && (sw_info & 0x8000)) {
- printk("Setting 3c5x9b full-duplex mode (from EEPROM configuration bit)");
+ pr_cont("Setting 3c5x9b full-duplex mode (from EEPROM configuration bit)");
break;
}
default:
/* xcvr=(0 || 4) OR user has an old 3c5x9 non "B" model */
- printk("Setting 3c5x9/3c5x9B half-duplex mode");
+ pr_cont("Setting 3c5x9/3c5x9B half-duplex mode");
net_diag = (net_diag & ~FD_ENABLE); /* disable full duplex */
}
outw(net_diag, ioaddr + WN4_NETDIAG);
- printk(" if_port: %d, sw_info: %4.4x\n", dev->if_port, sw_info);
+ pr_cont(" if_port: %d, sw_info: %4.4x\n", dev->if_port, sw_info);
if (el3_debug > 3)
- printk("%s: 3c5x9 net diag word is now: %4.4x.\n", dev->name, net_diag);
+ pr_debug("%s: 3c5x9 net diag word is now: %4.4x.\n", dev->name, net_diag);
/* Enable link beat and jabber check. */
outw(inw(ioaddr + WN4_MEDIA) | MEDIA_TP, ioaddr + WN4_MEDIA);
}
@@ -1451,7 +1453,7 @@ el3_suspend(struct device *pdev, pm_message_t state)
struct el3_private *lp;
int ioaddr;
- dev = pdev->driver_data;
+ dev = dev_get_drvdata(pdev);
lp = netdev_priv(dev);
ioaddr = dev->base_addr;
@@ -1475,7 +1477,7 @@ el3_resume(struct device *pdev)
struct el3_private *lp;
int ioaddr;
- dev = pdev->driver_data;
+ dev = dev_get_drvdata(pdev);
lp = netdev_priv(dev);
ioaddr = dev->base_addr;
@@ -1535,7 +1537,7 @@ static int __init el3_init_module(void)
}
if (id_port >= 0x200) {
id_port = 0;
- printk(KERN_ERR "No I/O port available for 3c509 activation.\n");
+ pr_err("No I/O port available for 3c509 activation.\n");
} else {
ret = isa_register_driver(&el3_isa_driver, EL3_MAX_CARDS);
if (!ret)
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index 167bf23066e..3e00fa8ea65 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -420,7 +420,7 @@ int init_module(void)
if (debug >= 0)
corkscrew_debug = debug;
if (corkscrew_debug)
- printk(version);
+ pr_debug("%s", version);
while (corkscrew_scan(-1))
found++;
return found ? 0 : -ENODEV;
@@ -437,7 +437,7 @@ struct net_device *tc515_probe(int unit)
if (corkscrew_debug > 0 && !printed) {
printed = 1;
- printk(version);
+ pr_debug("%s", version);
}
return dev;
@@ -516,7 +516,7 @@ static struct net_device *corkscrew_scan(int unit)
if (pnp_device_attach(idev) < 0)
continue;
if (pnp_activate_dev(idev) < 0) {
- printk("pnp activate failed (out of resources?)\n");
+ pr_warning("pnp activate failed (out of resources?)\n");
pnp_device_detach(idev);
continue;
}
@@ -531,9 +531,9 @@ static struct net_device *corkscrew_scan(int unit)
continue;
}
if(corkscrew_debug)
- printk ("ISAPNP reports %s at i/o 0x%x, irq %d\n",
+ pr_debug("ISAPNP reports %s at i/o 0x%x, irq %d\n",
(char*) corkscrew_isapnp_adapters[i].driver_data, ioaddr, irq);
- printk(KERN_INFO "3c515 Resource configuration register %#4.4x, DCR %4.4x.\n",
+ pr_info("3c515 Resource configuration register %#4.4x, DCR %4.4x.\n",
inl(ioaddr + 0x2002), inw(ioaddr + 0x2000));
/* irq = inw(ioaddr + 0x2002) & 15; */ /* Use the irq from isapnp */
SET_NETDEV_DEV(dev, &idev->dev);
@@ -552,7 +552,7 @@ no_pnp:
if (!check_device(ioaddr))
continue;
- printk(KERN_INFO "3c515 Resource configuration register %#4.4x, DCR %4.4x.\n",
+ pr_info("3c515 Resource configuration register %#4.4x, DCR %4.4x.\n",
inl(ioaddr + 0x2002), inw(ioaddr + 0x2000));
err = corkscrew_setup(dev, ioaddr, NULL, cards_found++);
if (!err)
@@ -625,7 +625,7 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr,
list_add(&vp->list, &root_corkscrew_dev);
#endif
- printk(KERN_INFO "%s: 3Com %s at %#3x,", dev->name, vp->product_name, ioaddr);
+ pr_info("%s: 3Com %s at %#3x,", dev->name, vp->product_name, ioaddr);
spin_lock_init(&vp->lock);
@@ -648,19 +648,19 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr,
}
checksum = (checksum ^ (checksum >> 8)) & 0xff;
if (checksum != 0x00)
- printk(" ***INVALID CHECKSUM %4.4x*** ", checksum);
- printk(" %pM", dev->dev_addr);
+ pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum);
+ pr_cont(" %pM", dev->dev_addr);
if (eeprom[16] == 0x11c7) { /* Corkscrew */
if (request_dma(dev->dma, "3c515")) {
- printk(", DMA %d allocation failed", dev->dma);
+ pr_cont(", DMA %d allocation failed", dev->dma);
dev->dma = 0;
} else
- printk(", DMA %d", dev->dma);
+ pr_cont(", DMA %d", dev->dma);
}
- printk(", IRQ %d\n", dev->irq);
+ pr_cont(", IRQ %d\n", dev->irq);
/* Tell them about an invalid IRQ. */
if (corkscrew_debug && (dev->irq <= 0 || dev->irq > 15))
- printk(KERN_WARNING " *** Warning: this IRQ is unlikely to work! ***\n");
+ pr_warning(" *** Warning: this IRQ is unlikely to work! ***\n");
{
char *ram_split[] = { "5:3", "3:1", "1:1", "3:5" };
@@ -669,9 +669,9 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr,
vp->available_media = inw(ioaddr + Wn3_Options);
config = inl(ioaddr + Wn3_Config);
if (corkscrew_debug > 1)
- printk(KERN_INFO " Internal config register is %4.4x, transceivers %#x.\n",
+ pr_info(" Internal config register is %4.4x, transceivers %#x.\n",
config, inw(ioaddr + Wn3_Options));
- printk(KERN_INFO " %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n",
+ pr_info(" %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n",
8 << config & Ram_size,
config & Ram_width ? "word" : "byte",
ram_split[(config & Ram_split) >> Ram_split_shift],
@@ -682,7 +682,7 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr,
dev->if_port = vp->default_media;
}
if (vp->media_override != 7) {
- printk(KERN_INFO " Media override to transceiver type %d (%s).\n",
+ pr_info(" Media override to transceiver type %d (%s).\n",
vp->media_override,
media_tbl[vp->media_override].name);
dev->if_port = vp->media_override;
@@ -718,7 +718,7 @@ static int corkscrew_open(struct net_device *dev)
if (vp->media_override != 7) {
if (corkscrew_debug > 1)
- printk(KERN_INFO "%s: Media override to transceiver %d (%s).\n",
+ pr_info("%s: Media override to transceiver %d (%s).\n",
dev->name, vp->media_override,
media_tbl[vp->media_override].name);
dev->if_port = vp->media_override;
@@ -729,7 +729,7 @@ static int corkscrew_open(struct net_device *dev)
dev->if_port = media_tbl[dev->if_port].next;
if (corkscrew_debug > 1)
- printk("%s: Initial media type %s.\n",
+ pr_debug("%s: Initial media type %s.\n",
dev->name, media_tbl[dev->if_port].name);
init_timer(&vp->timer);
@@ -744,7 +744,7 @@ static int corkscrew_open(struct net_device *dev)
outl(config, ioaddr + Wn3_Config);
if (corkscrew_debug > 1) {
- printk("%s: corkscrew_open() InternalConfig %8.8x.\n",
+ pr_debug("%s: corkscrew_open() InternalConfig %8.8x.\n",
dev->name, config);
}
@@ -777,7 +777,7 @@ static int corkscrew_open(struct net_device *dev)
if (corkscrew_debug > 1) {
EL3WINDOW(4);
- printk("%s: corkscrew_open() irq %d media status %4.4x.\n",
+ pr_debug("%s: corkscrew_open() irq %d media status %4.4x.\n",
dev->name, dev->irq, inw(ioaddr + Wn4_Media));
}
@@ -814,8 +814,7 @@ static int corkscrew_open(struct net_device *dev)
if (vp->full_bus_master_rx) { /* Boomerang bus master. */
vp->cur_rx = vp->dirty_rx = 0;
if (corkscrew_debug > 2)
- printk("%s: Filling in the Rx ring.\n",
- dev->name);
+ pr_debug("%s: Filling in the Rx ring.\n", dev->name);
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb;
if (i < (RX_RING_SIZE - 1))
@@ -877,7 +876,7 @@ static void corkscrew_timer(unsigned long data)
int ok = 0;
if (corkscrew_debug > 1)
- printk("%s: Media selection timer tick happened, %s.\n",
+ pr_debug("%s: Media selection timer tick happened, %s.\n",
dev->name, media_tbl[dev->if_port].name);
spin_lock_irqsave(&vp->lock, flags);
@@ -894,12 +893,12 @@ static void corkscrew_timer(unsigned long data)
if (media_status & Media_LnkBeat) {
ok = 1;
if (corkscrew_debug > 1)
- printk("%s: Media %s has link beat, %x.\n",
+ pr_debug("%s: Media %s has link beat, %x.\n",
dev->name,
media_tbl[dev->if_port].name,
media_status);
} else if (corkscrew_debug > 1)
- printk("%s: Media %s is has no link beat, %x.\n",
+ pr_debug("%s: Media %s is has no link beat, %x.\n",
dev->name,
media_tbl[dev->if_port].name,
media_status);
@@ -907,7 +906,7 @@ static void corkscrew_timer(unsigned long data)
break;
default: /* Other media types handled by Tx timeouts. */
if (corkscrew_debug > 1)
- printk("%s: Media %s is has no indication, %x.\n",
+ pr_debug("%s: Media %s is has no indication, %x.\n",
dev->name,
media_tbl[dev->if_port].name,
media_status);
@@ -925,12 +924,12 @@ static void corkscrew_timer(unsigned long data)
if (dev->if_port == 8) { /* Go back to default. */
dev->if_port = vp->default_media;
if (corkscrew_debug > 1)
- printk("%s: Media selection failing, using default %s port.\n",
+ pr_debug("%s: Media selection failing, using default %s port.\n",
dev->name,
media_tbl[dev->if_port].name);
} else {
if (corkscrew_debug > 1)
- printk("%s: Media selection failed, now trying %s port.\n",
+ pr_debug("%s: Media selection failed, now trying %s port.\n",
dev->name,
media_tbl[dev->if_port].name);
vp->timer.expires = jiffies + media_tbl[dev->if_port].wait;
@@ -953,7 +952,7 @@ static void corkscrew_timer(unsigned long data)
spin_unlock_irqrestore(&vp->lock, flags);
if (corkscrew_debug > 1)
- printk("%s: Media selection timer finished, %s.\n",
+ pr_debug("%s: Media selection timer finished, %s.\n",
dev->name, media_tbl[dev->if_port].name);
#endif /* AUTOMEDIA */
@@ -966,23 +965,21 @@ static void corkscrew_timeout(struct net_device *dev)
struct corkscrew_private *vp = netdev_priv(dev);
int ioaddr = dev->base_addr;
- printk(KERN_WARNING
- "%s: transmit timed out, tx_status %2.2x status %4.4x.\n",
+ pr_warning("%s: transmit timed out, tx_status %2.2x status %4.4x.\n",
dev->name, inb(ioaddr + TxStatus),
inw(ioaddr + EL3_STATUS));
/* Slight code bloat to be user friendly. */
if ((inb(ioaddr + TxStatus) & 0x88) == 0x88)
- printk(KERN_WARNING
- "%s: Transmitter encountered 16 collisions -- network"
+ pr_warning("%s: Transmitter encountered 16 collisions --"
" network cable problem?\n", dev->name);
#ifndef final_version
- printk(" Flags; bus-master %d, full %d; dirty %d current %d.\n",
+ pr_debug(" Flags; bus-master %d, full %d; dirty %d current %d.\n",
vp->full_bus_master_tx, vp->tx_full, vp->dirty_tx,
vp->cur_tx);
- printk(" Down list %8.8x vs. %p.\n", inl(ioaddr + DownListPtr),
+ pr_debug(" Down list %8.8x vs. %p.\n", inl(ioaddr + DownListPtr),
&vp->tx_ring[0]);
for (i = 0; i < TX_RING_SIZE; i++) {
- printk(" %d: %p length %8.8x status %8.8x\n", i,
+ pr_debug(" %d: %p length %8.8x status %8.8x\n", i,
&vp->tx_ring[i],
vp->tx_ring[i].length, vp->tx_ring[i].status);
}
@@ -1017,13 +1014,13 @@ static int corkscrew_start_xmit(struct sk_buff *skb,
int i;
if (vp->tx_full) /* No room to transmit with */
- return 1;
+ return NETDEV_TX_BUSY;
if (vp->cur_tx != 0)
prev_entry = &vp->tx_ring[(vp->cur_tx - 1) % TX_RING_SIZE];
else
prev_entry = NULL;
if (corkscrew_debug > 3)
- printk("%s: Trying to send a packet, Tx index %d.\n",
+ pr_debug("%s: Trying to send a packet, Tx index %d.\n",
dev->name, vp->cur_tx);
/* vp->tx_full = 1; */
vp->tx_skbuff[entry] = skb;
@@ -1102,7 +1099,7 @@ static int corkscrew_start_xmit(struct sk_buff *skb,
while (--i > 0 && (tx_status = inb(ioaddr + TxStatus)) > 0) {
if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */
if (corkscrew_debug > 2)
- printk("%s: Tx error, status %2.2x.\n",
+ pr_debug("%s: Tx error, status %2.2x.\n",
dev->name, tx_status);
if (tx_status & 0x04)
dev->stats.tx_fifo_errors++;
@@ -1143,7 +1140,7 @@ static irqreturn_t corkscrew_interrupt(int irq, void *dev_id)
status = inw(ioaddr + EL3_STATUS);
if (corkscrew_debug > 4)
- printk("%s: interrupt, status %4.4x, timer %d.\n",
+ pr_debug("%s: interrupt, status %4.4x, timer %d.\n",
dev->name, status, latency);
if ((status & 0xE000) != 0xE000) {
static int donedidthis;
@@ -1151,7 +1148,7 @@ static irqreturn_t corkscrew_interrupt(int irq, void *dev_id)
Ignore a single early interrupt, but don't hang the machine for
other interrupt problems. */
if (donedidthis++ > 100) {
- printk(KERN_ERR "%s: Bogus interrupt, bailing. Status %4.4x, start=%d.\n",
+ pr_err("%s: Bogus interrupt, bailing. Status %4.4x, start=%d.\n",
dev->name, status, netif_running(dev));
free_irq(dev->irq, dev);
dev->irq = -1;
@@ -1160,14 +1157,14 @@ static irqreturn_t corkscrew_interrupt(int irq, void *dev_id)
do {
if (corkscrew_debug > 5)
- printk("%s: In interrupt loop, status %4.4x.\n",
+ pr_debug("%s: In interrupt loop, status %4.4x.\n",
dev->name, status);
if (status & RxComplete)
corkscrew_rx(dev);
if (status & TxAvailable) {
if (corkscrew_debug > 5)
- printk(" TX room bit was handled.\n");
+ pr_debug(" TX room bit was handled.\n");
/* There's room in the FIFO for a full-sized packet. */
outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
netif_wake_queue(dev);
@@ -1212,19 +1209,20 @@ static irqreturn_t corkscrew_interrupt(int irq, void *dev_id)
if (status & StatsFull) { /* Empty statistics. */
static int DoneDidThat;
if (corkscrew_debug > 4)
- printk("%s: Updating stats.\n", dev->name);
+ pr_debug("%s: Updating stats.\n", dev->name);
update_stats(ioaddr, dev);
/* DEBUG HACK: Disable statistics as an interrupt source. */
/* This occurs when we have the wrong media type! */
if (DoneDidThat == 0 && inw(ioaddr + EL3_STATUS) & StatsFull) {
int win, reg;
- printk("%s: Updating stats failed, disabling stats as an"
- " interrupt source.\n", dev->name);
+ pr_notice("%s: Updating stats failed, disabling stats as an interrupt source.\n",
+ dev->name);
for (win = 0; win < 8; win++) {
EL3WINDOW(win);
- printk("\n Vortex window %d:", win);
+ pr_notice("Vortex window %d:", win);
for (reg = 0; reg < 16; reg++)
- printk(" %2.2x", inb(ioaddr + reg));
+ pr_cont(" %2.2x", inb(ioaddr + reg));
+ pr_cont("\n");
}
EL3WINDOW(7);
outw(SetIntrEnb | TxAvailable |
@@ -1246,9 +1244,8 @@ static irqreturn_t corkscrew_interrupt(int irq, void *dev_id)
}
if (--i < 0) {
- printk(KERN_ERR "%s: Too much work in interrupt, status %4.4x. "
- "Disabling functions (%4.4x).\n", dev->name,
- status, SetStatusEnb | ((~status) & 0x7FE));
+ pr_err("%s: Too much work in interrupt, status %4.4x. Disabling functions (%4.4x).\n",
+ dev->name, status, SetStatusEnb | ((~status) & 0x7FE));
/* Disable all pending interrupts. */
outw(SetStatusEnb | ((~status) & 0x7FE), ioaddr + EL3_CMD);
outw(AckIntr | 0x7FF, ioaddr + EL3_CMD);
@@ -1262,7 +1259,7 @@ static irqreturn_t corkscrew_interrupt(int irq, void *dev_id)
spin_unlock(&lp->lock);
if (corkscrew_debug > 4)
- printk("%s: exiting interrupt, status %4.4x.\n", dev->name, status);
+ pr_debug("%s: exiting interrupt, status %4.4x.\n", dev->name, status);
return IRQ_HANDLED;
}
@@ -1273,13 +1270,13 @@ static int corkscrew_rx(struct net_device *dev)
short rx_status;
if (corkscrew_debug > 5)
- printk(" In rx_packet(), status %4.4x, rx_status %4.4x.\n",
+ pr_debug(" In rx_packet(), status %4.4x, rx_status %4.4x.\n",
inw(ioaddr + EL3_STATUS), inw(ioaddr + RxStatus));
while ((rx_status = inw(ioaddr + RxStatus)) > 0) {
if (rx_status & 0x4000) { /* Error, update stats. */
unsigned char rx_error = inb(ioaddr + RxErrors);
if (corkscrew_debug > 2)
- printk(" Rx error: status %2.2x.\n",
+ pr_debug(" Rx error: status %2.2x.\n",
rx_error);
dev->stats.rx_errors++;
if (rx_error & 0x01)
@@ -1299,7 +1296,7 @@ static int corkscrew_rx(struct net_device *dev)
skb = dev_alloc_skb(pkt_len + 5 + 2);
if (corkscrew_debug > 4)
- printk("Receiving packet size %d status %4.4x.\n",
+ pr_debug("Receiving packet size %d status %4.4x.\n",
pkt_len, rx_status);
if (skb != NULL) {
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
@@ -1318,7 +1315,7 @@ static int corkscrew_rx(struct net_device *dev)
break;
continue;
} else if (corkscrew_debug)
- printk("%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, pkt_len);
+ pr_debug("%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, pkt_len);
}
outw(RxDiscard, ioaddr + EL3_CMD);
dev->stats.rx_dropped++;
@@ -1338,13 +1335,13 @@ static int boomerang_rx(struct net_device *dev)
int rx_status;
if (corkscrew_debug > 5)
- printk(" In boomerang_rx(), status %4.4x, rx_status %4.4x.\n",
+ pr_debug(" In boomerang_rx(), status %4.4x, rx_status %4.4x.\n",
inw(ioaddr + EL3_STATUS), inw(ioaddr + RxStatus));
while ((rx_status = vp->rx_ring[entry].status) & RxDComplete) {
if (rx_status & RxDError) { /* Error, update stats. */
unsigned char rx_error = rx_status >> 16;
if (corkscrew_debug > 2)
- printk(" Rx error: status %2.2x.\n",
+ pr_debug(" Rx error: status %2.2x.\n",
rx_error);
dev->stats.rx_errors++;
if (rx_error & 0x01)
@@ -1364,7 +1361,7 @@ static int boomerang_rx(struct net_device *dev)
dev->stats.rx_bytes += pkt_len;
if (corkscrew_debug > 4)
- printk("Receiving packet size %d status %4.4x.\n",
+ pr_debug("Receiving packet size %d status %4.4x.\n",
pkt_len, rx_status);
/* Check if the packet is long enough to just accept without
@@ -1385,7 +1382,7 @@ static int boomerang_rx(struct net_device *dev)
temp = skb_put(skb, pkt_len);
/* Remove this checking code for final release. */
if (isa_bus_to_virt(vp->rx_ring[entry].addr) != temp)
- printk("%s: Warning -- the skbuff addresses do not match"
+ pr_warning("%s: Warning -- the skbuff addresses do not match"
" in boomerang_rx: %p vs. %p / %p.\n",
dev->name,
isa_bus_to_virt(vp->
@@ -1427,12 +1424,11 @@ static int corkscrew_close(struct net_device *dev)
netif_stop_queue(dev);
if (corkscrew_debug > 1) {
- printk("%s: corkscrew_close() status %4.4x, Tx status %2.2x.\n",
+ pr_debug("%s: corkscrew_close() status %4.4x, Tx status %2.2x.\n",
dev->name, inw(ioaddr + EL3_STATUS),
inb(ioaddr + TxStatus));
- printk("%s: corkscrew close stats: rx_nocopy %d rx_copy %d"
- " tx_queued %d.\n", dev->name, rx_nocopy, rx_copy,
- queued_packet);
+ pr_debug("%s: corkscrew close stats: rx_nocopy %d rx_copy %d tx_queued %d.\n",
+ dev->name, rx_nocopy, rx_copy, queued_packet);
}
del_timer(&vp->timer);
@@ -1534,7 +1530,7 @@ static void set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
if (corkscrew_debug > 3)
- printk("%s: Setting promiscuous mode.\n",
+ pr_debug("%s: Setting promiscuous mode.\n",
dev->name);
new_mode = SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm;
} else if ((dev->mc_list) || (dev->flags & IFF_ALLMULTI)) {
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index 8f734d74b51..cdd955c4014 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -176,7 +176,7 @@ sizeof(nop_cmd) = 8;
if(!p->scb->cmd) break; \
DELAY_16(); \
if(i == 1023) { \
- printk(KERN_WARNING "%s:%d: scb_cmd timed out .. resetting i82586\n",\
+ pr_warning("%s:%d: scb_cmd timed out .. resetting i82586\n",\
dev->name,__LINE__); \
elmc_id_reset586(); } } }
@@ -291,7 +291,7 @@ static int elmc_open(struct net_device *dev)
ret = request_irq(dev->irq, &elmc_interrupt, IRQF_SHARED | IRQF_SAMPLE_RANDOM,
dev->name, dev);
if (ret) {
- printk(KERN_ERR "%s: couldn't get irq %d\n", dev->name, dev->irq);
+ pr_err("%s: couldn't get irq %d\n", dev->name, dev->irq);
elmc_id_reset586();
return ret;
}
@@ -371,9 +371,9 @@ static void alloc586(struct net_device *dev)
DELAY(2);
- if (p->iscp->busy) {
- printk(KERN_ERR "%s: Init-Problems (alloc).\n", dev->name);
- }
+ if (p->iscp->busy)
+ pr_err("%s: Init-Problems (alloc).\n", dev->name);
+
memset((char *) p->scb, 0, sizeof(struct scb_struct));
}
@@ -470,7 +470,7 @@ static int __init do_elmc_probe(struct net_device *dev)
mca_set_adapter_procfn(slot, (MCA_ProcFn) elmc_getinfo, dev);
/* if we get this far, adapter has been found - carry on */
- printk(KERN_INFO "%s: 3c523 adapter found in slot %d\n", dev->name, slot + 1);
+ pr_info("%s: 3c523 adapter found in slot %d\n", dev->name, slot + 1);
/* Now we extract configuration info from the card.
The 3c523 provides information in two of the POS registers, but
@@ -507,7 +507,7 @@ static int __init do_elmc_probe(struct net_device *dev)
memset(pr, 0, sizeof(struct priv));
pr->slot = slot;
- printk(KERN_INFO "%s: 3Com 3c523 Rev 0x%x at %#lx\n", dev->name, (int) revision,
+ pr_info("%s: 3Com 3c523 Rev 0x%x at %#lx\n", dev->name, (int) revision,
dev->base_addr);
/* Determine if we're using the on-board transceiver (i.e. coax) or
@@ -529,7 +529,7 @@ static int __init do_elmc_probe(struct net_device *dev)
size = 0x4000; /* check for 16K mem */
if (!check586(dev, dev->mem_start, size)) {
- printk(KERN_ERR "%s: memprobe, Can't find memory at 0x%lx!\n", dev->name,
+ pr_err("%s: memprobe, Can't find memory at 0x%lx!\n", dev->name,
dev->mem_start);
retval = -ENODEV;
goto err_out;
@@ -546,7 +546,7 @@ static int __init do_elmc_probe(struct net_device *dev)
pr->num_recv_buffs = NUM_RECV_BUFFS_16;
/* dump all the assorted information */
- printk(KERN_INFO "%s: IRQ %d, %sternal xcvr, memory %#lx-%#lx.\n", dev->name,
+ pr_info("%s: IRQ %d, %sternal xcvr, memory %#lx-%#lx.\n", dev->name,
dev->irq, dev->if_port ? "ex" : "in",
dev->mem_start, dev->mem_end - 1);
@@ -555,7 +555,7 @@ static int __init do_elmc_probe(struct net_device *dev)
for (i = 0; i < 6; i++)
dev->dev_addr[i] = inb(dev->base_addr + i);
- printk(KERN_INFO "%s: hardware address %pM\n",
+ pr_info("%s: hardware address %pM\n",
dev->name, dev->dev_addr);
dev->netdev_ops = &netdev_ops;
@@ -660,7 +660,7 @@ static int init586(struct net_device *dev)
}
if ((cfg_cmd->cmd_status & (STAT_OK | STAT_COMPL)) != (STAT_COMPL | STAT_OK)) {
- printk(KERN_WARNING "%s (elmc): configure command failed: %x\n", dev->name, cfg_cmd->cmd_status);
+ pr_warning("%s (elmc): configure command failed: %x\n", dev->name, cfg_cmd->cmd_status);
return 1;
}
/*
@@ -686,7 +686,8 @@ static int init586(struct net_device *dev)
}
if ((ias_cmd->cmd_status & (STAT_OK | STAT_COMPL)) != (STAT_OK | STAT_COMPL)) {
- printk(KERN_WARNING "%s (elmc): individual address setup command failed: %04x\n", dev->name, ias_cmd->cmd_status);
+ pr_warning("%s (elmc): individual address setup command failed: %04x\n",
+ dev->name, ias_cmd->cmd_status);
return 1;
}
/*
@@ -707,7 +708,7 @@ static int init586(struct net_device *dev)
s = jiffies;
while (!(tdr_cmd->cmd_status & STAT_COMPL)) {
if (time_after(jiffies, s + 30*HZ/100)) {
- printk(KERN_WARNING "%s: %d Problems while running the TDR.\n", dev->name, __LINE__);
+ pr_warning("%s: %d Problems while running the TDR.\n", dev->name, __LINE__);
result = 1;
break;
}
@@ -723,14 +724,14 @@ static int init586(struct net_device *dev)
if (result & TDR_LNK_OK) {
/* empty */
} else if (result & TDR_XCVR_PRB) {
- printk(KERN_WARNING "%s: TDR: Transceiver problem!\n", dev->name);
+ pr_warning("%s: TDR: Transceiver problem!\n", dev->name);
} else if (result & TDR_ET_OPN) {
- printk(KERN_WARNING "%s: TDR: No correct termination %d clocks away.\n", dev->name, result & TDR_TIMEMASK);
+ pr_warning("%s: TDR: No correct termination %d clocks away.\n", dev->name, result & TDR_TIMEMASK);
} else if (result & TDR_ET_SRT) {
if (result & TDR_TIMEMASK) /* time == 0 -> strange :-) */
- printk(KERN_WARNING "%s: TDR: Detected a short circuit %d clocks away.\n", dev->name, result & TDR_TIMEMASK);
+ pr_warning("%s: TDR: Detected a short circuit %d clocks away.\n", dev->name, result & TDR_TIMEMASK);
} else {
- printk(KERN_WARNING "%s: TDR: Unknown status %04x\n", dev->name, result);
+ pr_warning("%s: TDR: Unknown status %04x\n", dev->name, result);
}
}
/*
@@ -774,11 +775,11 @@ static int init586(struct net_device *dev)
/* I don't understand this: do we really need memory after the init? */
int len = ((char *) p->iscp - (char *) ptr - 8) / 6;
if (len <= 0) {
- printk(KERN_ERR "%s: Ooooops, no memory for MC-Setup!\n", dev->name);
+ pr_err("%s: Ooooops, no memory for MC-Setup!\n", dev->name);
} else {
if (len < num_addrs) {
num_addrs = len;
- printk(KERN_WARNING "%s: Sorry, can only apply %d MC-Address(es).\n",
+ pr_warning("%s: Sorry, can only apply %d MC-Address(es).\n",
dev->name, num_addrs);
}
mc_cmd = (struct mcsetup_cmd_struct *) ptr;
@@ -799,7 +800,7 @@ static int init586(struct net_device *dev)
break;
}
if (!(mc_cmd->cmd_status & STAT_COMPL)) {
- printk(KERN_WARNING "%s: Can't apply multicast-address-list.\n", dev->name);
+ pr_warning("%s: Can't apply multicast-address-list.\n", dev->name);
}
}
}
@@ -812,7 +813,7 @@ static int init586(struct net_device *dev)
p->xmit_buffs[i] = (struct tbd_struct *) ptr; /* TBD */
ptr = (char *) ptr + sizeof(struct tbd_struct);
if ((void *) ptr > (void *) p->iscp) {
- printk(KERN_ERR "%s: not enough shared-mem for your configuration!\n", dev->name);
+ pr_err("%s: not enough shared-mem for your configuration!\n", dev->name);
return 1;
}
memset((char *) (p->xmit_cmds[i]), 0, sizeof(struct transmit_cmd_struct));
@@ -936,7 +937,8 @@ elmc_interrupt(int irq, void *dev_id)
if (stat & STAT_CNA) {
/* CU went 'not ready' */
if (netif_running(dev)) {
- printk(KERN_WARNING "%s: oops! CU has left active state. stat: %04x/%04x.\n", dev->name, (int) stat, (int) p->scb->status);
+ pr_warning("%s: oops! CU has left active state. stat: %04x/%04x.\n",
+ dev->name, (int) stat, (int) p->scb->status);
}
}
#endif
@@ -951,7 +953,8 @@ elmc_interrupt(int irq, void *dev_id)
p->scb->cmd = RUC_RESUME;
elmc_attn586();
} else {
- printk(KERN_WARNING "%s: Receiver-Unit went 'NOT READY': %04x/%04x.\n", dev->name, (int) stat, (int) p->scb->status);
+ pr_warning("%s: Receiver-Unit went 'NOT READY': %04x/%04x.\n",
+ dev->name, (int) stat, (int) p->scb->status);
elmc_rnr_int(dev);
}
}
@@ -995,11 +998,11 @@ static void elmc_rcv_int(struct net_device *dev)
dev->stats.rx_dropped++;
}
} else {
- printk(KERN_WARNING "%s: received oversized frame.\n", dev->name);
+ pr_warning("%s: received oversized frame.\n", dev->name);
dev->stats.rx_dropped++;
}
} else { /* frame !(ok), only with 'save-bad-frames' */
- printk(KERN_WARNING "%s: oops! rfd-error-status: %04x\n", dev->name, status);
+ pr_warning("%s: oops! rfd-error-status: %04x\n", dev->name, status);
dev->stats.rx_errors++;
}
p->rfd_top->status = 0;
@@ -1028,7 +1031,7 @@ static void elmc_rnr_int(struct net_device *dev)
alloc_rfa(dev, (char *) p->rfd_first);
startrecv586(dev); /* restart RU */
- printk(KERN_WARNING "%s: Receive-Unit restarted. Status: %04x\n", dev->name, p->scb->status);
+ pr_warning("%s: Receive-Unit restarted. Status: %04x\n", dev->name, p->scb->status);
}
@@ -1043,7 +1046,7 @@ static void elmc_xmt_int(struct net_device *dev)
status = p->xmit_cmds[p->xmit_last]->cmd_status;
if (!(status & STAT_COMPL)) {
- printk(KERN_WARNING "%s: strange .. xmit-int without a 'COMPLETE'\n", dev->name);
+ pr_warning("%s: strange .. xmit-int without a 'COMPLETE'\n", dev->name);
}
if (status & STAT_OK) {
dev->stats.tx_packets++;
@@ -1051,18 +1054,18 @@ static void elmc_xmt_int(struct net_device *dev)
} else {
dev->stats.tx_errors++;
if (status & TCMD_LATECOLL) {
- printk(KERN_WARNING "%s: late collision detected.\n", dev->name);
+ pr_warning("%s: late collision detected.\n", dev->name);
dev->stats.collisions++;
} else if (status & TCMD_NOCARRIER) {
dev->stats.tx_carrier_errors++;
- printk(KERN_WARNING "%s: no carrier detected.\n", dev->name);
+ pr_warning("%s: no carrier detected.\n", dev->name);
} else if (status & TCMD_LOSTCTS) {
- printk(KERN_WARNING "%s: loss of CTS detected.\n", dev->name);
+ pr_warning("%s: loss of CTS detected.\n", dev->name);
} else if (status & TCMD_UNDERRUN) {
dev->stats.tx_fifo_errors++;
- printk(KERN_WARNING "%s: DMA underrun detected.\n", dev->name);
+ pr_warning("%s: DMA underrun detected.\n", dev->name);
} else if (status & TCMD_MAXCOLL) {
- printk(KERN_WARNING "%s: Max. collisions exceeded.\n", dev->name);
+ pr_warning("%s: Max. collisions exceeded.\n", dev->name);
dev->stats.collisions += 16;
}
}
@@ -1099,10 +1102,11 @@ static void elmc_timeout(struct net_device *dev)
struct priv *p = netdev_priv(dev);
/* COMMAND-UNIT active? */
if (p->scb->status & CU_ACTIVE) {
-#ifdef DEBUG
- printk("%s: strange ... timeout with CU active?!?\n", dev->name);
- printk("%s: X0: %04x N0: %04x N1: %04x %d\n", dev->name, (int) p->xmit_cmds[0]->cmd_status, (int) p->nop_cmds[0]->cmd_status, (int) p->nop_cmds[1]->cmd_status, (int) p->nop_point);
-#endif
+ pr_debug("%s: strange ... timeout with CU active?!?\n", dev->name);
+ pr_debug("%s: X0: %04x N0: %04x N1: %04x %d\n", dev->name,
+ (int)p->xmit_cmds[0]->cmd_status,
+ (int)p->nop_cmds[0]->cmd_status,
+ (int)p->nop_cmds[1]->cmd_status, (int)p->nop_point);
p->scb->cmd = CUC_ABORT;
elmc_attn586();
WAIT_4_SCB_CMD();
@@ -1112,10 +1116,10 @@ static void elmc_timeout(struct net_device *dev)
WAIT_4_SCB_CMD();
netif_wake_queue(dev);
} else {
-#ifdef DEBUG
- printk("%s: xmitter timed out, try to restart! stat: %04x\n", dev->name, p->scb->status);
- printk("%s: command-stats: %04x %04x\n", dev->name, p->xmit_cmds[0]->cmd_status, p->xmit_cmds[1]->cmd_status);
-#endif
+ pr_debug("%s: xmitter timed out, try to restart! stat: %04x\n",
+ dev->name, p->scb->status);
+ pr_debug("%s: command-stats: %04x %04x\n", dev->name,
+ p->xmit_cmds[0]->cmd_status, p->xmit_cmds[1]->cmd_status);
elmc_close(dev);
elmc_open(dev);
}
@@ -1162,7 +1166,7 @@ static int elmc_send_packet(struct sk_buff *skb, struct net_device *dev)
break;
}
if (i == 15) {
- printk(KERN_WARNING "%s: Can't start transmit-command.\n", dev->name);
+ pr_warning("%s: Can't start transmit-command.\n", dev->name);
}
}
#else
@@ -1287,11 +1291,12 @@ int __init init_module(void)
free_netdev(dev);
if (io[this_dev]==0)
break;
- printk(KERN_WARNING "3c523.c: No 3c523 card found at io=%#x\n",io[this_dev]);
+ pr_warning("3c523.c: No 3c523 card found at io=%#x\n",io[this_dev]);
}
if(found==0) {
- if(io[0]==0) printk(KERN_NOTICE "3c523.c: No 3c523 cards found\n");
+ if (io[0]==0)
+ pr_notice("3c523.c: No 3c523 cards found\n");
return -ENXIO;
} else return 0;
}
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index b61073c42bf..aaa8a9f405d 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -125,8 +125,6 @@ static const char* cardname = DRV_NAME;
#define NET_DEBUG 2
#endif
-#undef DEBUG_IRQ
-
static unsigned int mc32_debug = NET_DEBUG;
/* The number of low I/O ports used by the ethercard. */
@@ -351,15 +349,15 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
/* Time to play MCA games */
if (mc32_debug && version_printed++ == 0)
- printk(KERN_DEBUG "%s", version);
+ pr_debug("%s", version);
- printk(KERN_INFO "%s: %s found in slot %d:", dev->name, cardname, slot);
+ pr_info("%s: %s found in slot %d: ", dev->name, cardname, slot);
POS = mca_read_stored_pos(slot, 2);
if(!(POS&1))
{
- printk(" disabled.\n");
+ pr_cont("disabled.\n");
return -ENODEV;
}
@@ -370,7 +368,7 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
POS = mca_read_stored_pos(slot, 4);
if(!(POS&1))
{
- printk("memory window disabled.\n");
+ pr_cont("memory window disabled.\n");
return -ENODEV;
}
@@ -379,7 +377,7 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
i=(POS>>4)&3;
if(i==3)
{
- printk("invalid memory window.\n");
+ pr_cont("invalid memory window.\n");
return -ENODEV;
}
@@ -392,11 +390,11 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
if(!request_region(dev->base_addr, MC32_IO_EXTENT, cardname))
{
- printk("io 0x%3lX, which is busy.\n", dev->base_addr);
+ pr_cont("io 0x%3lX, which is busy.\n", dev->base_addr);
return -EBUSY;
}
- printk("io 0x%3lX irq %d mem 0x%lX (%dK)\n",
+ pr_cont("io 0x%3lX irq %d mem 0x%lX (%dK)\n",
dev->base_addr, dev->irq, dev->mem_start, i/1024);
@@ -416,7 +414,7 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
dev->dev_addr[i] = mca_read_pos(slot,3);
}
- printk("%s: Address %pM", dev->name, dev->dev_addr);
+ pr_info("%s: Address %pM ", dev->name, dev->dev_addr);
mca_write_pos(slot, 6, 0);
mca_write_pos(slot, 7, 0);
@@ -424,9 +422,9 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
POS = mca_read_stored_pos(slot, 4);
if(POS&2)
- printk(" : BNC port selected.\n");
+ pr_cont(": BNC port selected.\n");
else
- printk(" : AUI port selected.\n");
+ pr_cont(": AUI port selected.\n");
POS=inb(dev->base_addr+HOST_CTRL);
POS|=HOST_CTRL_ATTN|HOST_CTRL_RESET;
@@ -447,7 +445,7 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
err = request_irq(dev->irq, &mc32_interrupt, IRQF_SHARED | IRQF_SAMPLE_RANDOM, DRV_NAME, dev);
if (err) {
release_region(dev->base_addr, MC32_IO_EXTENT);
- printk(KERN_ERR "%s: unable to get IRQ %d.\n", DRV_NAME, dev->irq);
+ pr_err("%s: unable to get IRQ %d.\n", DRV_NAME, dev->irq);
goto err_exit_ports;
}
@@ -463,7 +461,7 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
i++;
if(i == 1000)
{
- printk(KERN_ERR "%s: failed to boot adapter.\n", dev->name);
+ pr_err("%s: failed to boot adapter.\n", dev->name);
err = -ENODEV;
goto err_exit_irq;
}
@@ -475,10 +473,10 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
if(base>0)
{
if(base < 0x0C)
- printk(KERN_ERR "%s: %s%s.\n", dev->name, failures[base-1],
+ pr_err("%s: %s%s.\n", dev->name, failures[base-1],
base<0x0A?" test failure":"");
else
- printk(KERN_ERR "%s: unknown failure %d.\n", dev->name, base);
+ pr_err("%s: unknown failure %d.\n", dev->name, base);
err = -ENODEV;
goto err_exit_irq;
}
@@ -494,7 +492,7 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
udelay(50);
if(n>100)
{
- printk(KERN_ERR "%s: mailbox read fail (%d).\n", dev->name, i);
+ pr_err("%s: mailbox read fail (%d).\n", dev->name, i);
err = -ENODEV;
goto err_exit_irq;
}
@@ -527,7 +525,7 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
init_completion(&lp->execution_cmd);
init_completion(&lp->xceiver_cmd);
- printk("%s: Firmware Rev %d. %d RX buffers, %d TX buffers. Base of 0x%08X.\n",
+ pr_info("%s: Firmware Rev %d. %d RX buffers, %d TX buffers. Base of 0x%08X.\n",
dev->name, lp->exec_box->data[12], lp->rx_len, lp->tx_len, lp->base);
dev->netdev_ops = &netdev_ops;
@@ -939,7 +937,7 @@ static int mc32_open(struct net_device *dev)
*/
if(mc32_command(dev, 8, descnumbuffs, 4)) {
- printk("%s: %s rejected our buffer configuration!\n",
+ pr_info("%s: %s rejected our buffer configuration!\n",
dev->name, cardname);
mc32_close(dev);
return -ENOBUFS;
@@ -995,7 +993,7 @@ static int mc32_open(struct net_device *dev)
static void mc32_timeout(struct net_device *dev)
{
- printk(KERN_WARNING "%s: transmit timed out?\n", dev->name);
+ pr_warning("%s: transmit timed out?\n", dev->name);
/* Try to restart the adaptor. */
netif_wake_queue(dev);
}
@@ -1032,7 +1030,7 @@ static int mc32_send_packet(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
if(atomic_read(&lp->tx_count)==0) {
- return 1;
+ return NETDEV_TX_BUSY;
}
if (skb_padto(skb, ETH_ZLEN)) {
@@ -1335,11 +1333,9 @@ static irqreturn_t mc32_interrupt(int irq, void *dev_id)
{
status=inb(ioaddr+HOST_CMD);
-#ifdef DEBUG_IRQ
- printk("Status TX%d RX%d EX%d OV%d BC%d\n",
+ pr_debug("Status TX%d RX%d EX%d OV%d BC%d\n",
(status&7), (status>>3)&7, (status>>6)&1,
(status>>7)&1, boguscount);
-#endif
switch(status&7)
{
@@ -1354,7 +1350,7 @@ static irqreturn_t mc32_interrupt(int irq, void *dev_id)
complete(&lp->xceiver_cmd);
break;
default:
- printk("%s: strange tx ack %d\n", dev->name, status&7);
+ pr_notice("%s: strange tx ack %d\n", dev->name, status&7);
}
status>>=3;
switch(status&7)
@@ -1376,7 +1372,7 @@ static irqreturn_t mc32_interrupt(int irq, void *dev_id)
mc32_start_transceiver(dev);
break;
default:
- printk("%s: strange rx ack %d\n",
+ pr_notice("%s: strange rx ack %d\n",
dev->name, status&7);
}
status>>=3;
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index c5669840242..c34aee91250 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -828,14 +828,14 @@ static int vortex_resume(struct pci_dev *pdev)
pci_restore_state(pdev);
err = pci_enable_device(pdev);
if (err) {
- printk(KERN_WARNING "%s: Could not enable device \n",
+ pr_warning("%s: Could not enable device\n",
dev->name);
return err;
}
pci_set_master(pdev);
if (request_irq(dev->irq, vp->full_bus_master_rx ?
&boomerang_interrupt : &vortex_interrupt, IRQF_SHARED, dev->name, dev)) {
- printk(KERN_WARNING "%s: Could not reserve IRQ %d\n", dev->name, dev->irq);
+ pr_warning("%s: Could not reserve IRQ %d\n", dev->name, dev->irq);
pci_disable_device(pdev);
return -EBUSY;
}
@@ -894,7 +894,7 @@ static int __devexit vortex_eisa_remove(struct device *device)
dev = eisa_get_drvdata(edev);
if (!dev) {
- printk("vortex_eisa_remove called for Compaq device!\n");
+ pr_err("vortex_eisa_remove called for Compaq device!\n");
BUG();
}
@@ -1051,7 +1051,7 @@ static int __devinit vortex_probe1(struct device *gendev,
struct eisa_device *edev = NULL;
if (!printed_version) {
- printk (version);
+ pr_info("%s", version);
printed_version = 1;
}
@@ -1068,7 +1068,7 @@ static int __devinit vortex_probe1(struct device *gendev,
dev = alloc_etherdev(sizeof(*vp));
retval = -ENOMEM;
if (!dev) {
- printk (KERN_ERR PFX "unable to allocate etherdev, aborting\n");
+ pr_err(PFX "unable to allocate etherdev, aborting\n");
goto out;
}
SET_NETDEV_DEV(dev, gendev);
@@ -1100,9 +1100,9 @@ static int __devinit vortex_probe1(struct device *gendev,
print_info = (vortex_debug > 1);
if (print_info)
- printk (KERN_INFO "See Documentation/networking/vortex.txt\n");
+ pr_info("See Documentation/networking/vortex.txt\n");
- printk(KERN_INFO "%s: 3Com %s %s at %p.\n",
+ pr_info("%s: 3Com %s %s at %p.\n",
print_name,
pdev ? "PCI" : "EISA",
vci->name,
@@ -1144,10 +1144,9 @@ static int __devinit vortex_probe1(struct device *gendev,
chip only. */
pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
if (pci_latency < new_latency) {
- printk(KERN_INFO "%s: Overriding PCI latency"
- " timer (CFLT) setting of %d, new value is %d.\n",
+ pr_info("%s: Overriding PCI latency timer (CFLT) setting of %d, new value is %d.\n",
print_name, pci_latency, new_latency);
- pci_write_config_byte(pdev, PCI_LATENCY_TIMER, new_latency);
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, new_latency);
}
}
}
@@ -1236,17 +1235,17 @@ static int __devinit vortex_probe1(struct device *gendev,
checksum = (checksum ^ (checksum >> 8)) & 0xff;
}
if ((checksum != 0x00) && !(vci->drv_flags & IS_TORNADO))
- printk(" ***INVALID CHECKSUM %4.4x*** ", checksum);
+ pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum);
for (i = 0; i < 3; i++)
((__be16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]);
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
if (print_info)
- printk(" %pM", dev->dev_addr);
+ pr_cont(" %pM", dev->dev_addr);
/* Unfortunately an all zero eeprom passes the checksum and this
gets found in the wild in failure cases. Crypto is hard 8) */
if (!is_valid_ether_addr(dev->dev_addr)) {
retval = -EINVAL;
- printk(KERN_ERR "*** EEPROM MAC address is invalid.\n");
+ pr_err("*** EEPROM MAC address is invalid.\n");
goto free_ring; /* With every pack */
}
EL3WINDOW(2);
@@ -1254,17 +1253,17 @@ static int __devinit vortex_probe1(struct device *gendev,
iowrite8(dev->dev_addr[i], ioaddr + i);
if (print_info)
- printk(", IRQ %d\n", dev->irq);
+ pr_cont(", IRQ %d\n", dev->irq);
/* Tell them about an invalid IRQ. */
if (dev->irq <= 0 || dev->irq >= nr_irqs)
- printk(KERN_WARNING " *** Warning: IRQ %d is unlikely to work! ***\n",
+ pr_warning(" *** Warning: IRQ %d is unlikely to work! ***\n",
dev->irq);
EL3WINDOW(4);
step = (ioread8(ioaddr + Wn4_NetDiag) & 0x1e) >> 1;
if (print_info) {
- printk(KERN_INFO " product code %02x%02x rev %02x.%d date %02d-"
- "%02d-%02d\n", eeprom[6]&0xff, eeprom[6]>>8, eeprom[0x14],
+ pr_info(" product code %02x%02x rev %02x.%d date %02d-%02d-%02d\n",
+ eeprom[6]&0xff, eeprom[6]>>8, eeprom[0x14],
step, (eeprom[4]>>5) & 15, eeprom[4] & 31, eeprom[4]>>9);
}
@@ -1279,8 +1278,7 @@ static int __devinit vortex_probe1(struct device *gendev,
}
if (print_info) {
- printk(KERN_INFO "%s: CardBus functions mapped "
- "%16.16llx->%p\n",
+ pr_info("%s: CardBus functions mapped %16.16llx->%p\n",
print_name,
(unsigned long long)pci_resource_start(pdev, 2),
vp->cb_fn_base);
@@ -1307,7 +1305,7 @@ static int __devinit vortex_probe1(struct device *gendev,
if (vp->info1 & 0x8000) {
vp->full_duplex = 1;
if (print_info)
- printk(KERN_INFO "Full duplex capable\n");
+ pr_info("Full duplex capable\n");
}
{
@@ -1319,9 +1317,9 @@ static int __devinit vortex_probe1(struct device *gendev,
vp->available_media = 0x40;
config = ioread32(ioaddr + Wn3_Config);
if (print_info) {
- printk(KERN_DEBUG " Internal config register is %4.4x, "
- "transceivers %#x.\n", config, ioread16(ioaddr + Wn3_Options));
- printk(KERN_INFO " %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n",
+ pr_debug(" Internal config register is %4.4x, transceivers %#x.\n",
+ config, ioread16(ioaddr + Wn3_Options));
+ pr_info(" %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n",
8 << RAM_SIZE(config),
RAM_WIDTH(config) ? "word" : "byte",
ram_split[RAM_SPLIT(config)],
@@ -1336,7 +1334,7 @@ static int __devinit vortex_probe1(struct device *gendev,
}
if (vp->media_override != 7) {
- printk(KERN_INFO "%s: Media override to transceiver type %d (%s).\n",
+ pr_info("%s: Media override to transceiver type %d (%s).\n",
print_name, vp->media_override,
media_tbl[vp->media_override].name);
dev->if_port = vp->media_override;
@@ -1369,8 +1367,8 @@ static int __devinit vortex_probe1(struct device *gendev,
if (mii_status && mii_status != 0xffff) {
vp->phys[phy_idx++] = phyx;
if (print_info) {
- printk(KERN_INFO " MII transceiver found at address %d,"
- " status %4x.\n", phyx, mii_status);
+ pr_info(" MII transceiver found at address %d, status %4x.\n",
+ phyx, mii_status);
}
if ((mii_status & 0x0040) == 0)
mii_preamble_required++;
@@ -1378,7 +1376,7 @@ static int __devinit vortex_probe1(struct device *gendev,
}
mii_preamble_required--;
if (phy_idx == 0) {
- printk(KERN_WARNING" ***WARNING*** No MII transceivers found!\n");
+ pr_warning(" ***WARNING*** No MII transceivers found!\n");
vp->phys[0] = 24;
} else {
vp->advertising = mdio_read(dev, vp->phys[0], MII_ADVERTISE);
@@ -1394,7 +1392,7 @@ static int __devinit vortex_probe1(struct device *gendev,
if (vp->capabilities & CapBusMaster) {
vp->full_bus_master_tx = 1;
if (print_info) {
- printk(KERN_INFO " Enabling bus-master transmits and %s receives.\n",
+ pr_info(" Enabling bus-master transmits and %s receives.\n",
(vp->info2 & 1) ? "early" : "whole-frame" );
}
vp->full_bus_master_rx = (vp->info2 & 1) ? 1 : 2;
@@ -1414,7 +1412,7 @@ static int __devinit vortex_probe1(struct device *gendev,
dev->netdev_ops = &vortex_netdev_ops;
if (print_info) {
- printk(KERN_INFO "%s: scatter/gather %sabled. h/w checksums %sabled\n",
+ pr_info("%s: scatter/gather %sabled. h/w checksums %sabled\n",
print_name,
(dev->features & NETIF_F_SG) ? "en":"dis",
(dev->features & NETIF_F_IP_CSUM) ? "en":"dis");
@@ -1442,7 +1440,7 @@ free_region:
if (vp->must_free_region)
release_region(dev->base_addr, vci->io_size);
free_netdev(dev);
- printk(KERN_ERR PFX "vortex_probe1 fails. Returns %d\n", retval);
+ pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval);
out:
return retval;
}
@@ -1464,13 +1462,13 @@ issue_and_wait(struct net_device *dev, int cmd)
for (i = 0; i < 100000; i++) {
if (!(ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) {
if (vortex_debug > 1)
- printk(KERN_INFO "%s: command 0x%04x took %d usecs\n",
+ pr_info("%s: command 0x%04x took %d usecs\n",
dev->name, cmd, i * 10);
return;
}
udelay(10);
}
- printk(KERN_ERR "%s: command 0x%04x did not complete! Status=0x%x\n",
+ pr_err("%s: command 0x%04x did not complete! Status=0x%x\n",
dev->name, cmd, ioread16(ioaddr + EL3_STATUS));
}
@@ -1480,7 +1478,7 @@ vortex_set_duplex(struct net_device *dev)
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
- printk(KERN_INFO "%s: setting %s-duplex.\n",
+ pr_info("%s: setting %s-duplex.\n",
dev->name, (vp->full_duplex) ? "full" : "half");
EL3WINDOW(3);
@@ -1522,7 +1520,7 @@ vortex_up(struct net_device *dev)
pci_restore_state(VORTEX_PCI(vp));
err = pci_enable_device(VORTEX_PCI(vp));
if (err) {
- printk(KERN_WARNING "%s: Could not enable device \n",
+ pr_warning("%s: Could not enable device\n",
dev->name);
goto err_out;
}
@@ -1533,14 +1531,14 @@ vortex_up(struct net_device *dev)
config = ioread32(ioaddr + Wn3_Config);
if (vp->media_override != 7) {
- printk(KERN_INFO "%s: Media override to transceiver %d (%s).\n",
+ pr_info("%s: Media override to transceiver %d (%s).\n",
dev->name, vp->media_override,
media_tbl[vp->media_override].name);
dev->if_port = vp->media_override;
} else if (vp->autoselect) {
if (vp->has_nway) {
if (vortex_debug > 1)
- printk(KERN_INFO "%s: using NWAY device table, not %d\n",
+ pr_info("%s: using NWAY device table, not %d\n",
dev->name, dev->if_port);
dev->if_port = XCVR_NWAY;
} else {
@@ -1549,13 +1547,13 @@ vortex_up(struct net_device *dev)
while (! (vp->available_media & media_tbl[dev->if_port].mask))
dev->if_port = media_tbl[dev->if_port].next;
if (vortex_debug > 1)
- printk(KERN_INFO "%s: first available media type: %s\n",
+ pr_info("%s: first available media type: %s\n",
dev->name, media_tbl[dev->if_port].name);
}
} else {
dev->if_port = vp->default_media;
if (vortex_debug > 1)
- printk(KERN_INFO "%s: using default media %s\n",
+ pr_info("%s: using default media %s\n",
dev->name, media_tbl[dev->if_port].name);
}
@@ -1570,13 +1568,13 @@ vortex_up(struct net_device *dev)
vp->rx_oom_timer.function = rx_oom_timer;
if (vortex_debug > 1)
- printk(KERN_DEBUG "%s: Initial media type %s.\n",
+ pr_debug("%s: Initial media type %s.\n",
dev->name, media_tbl[dev->if_port].name);
vp->full_duplex = vp->mii.force_media;
config = BFINS(config, dev->if_port, 20, 4);
if (vortex_debug > 6)
- printk(KERN_DEBUG "vortex_up(): writing 0x%x to InternalConfig\n", config);
+ pr_debug("vortex_up(): writing 0x%x to InternalConfig\n", config);
iowrite32(config, ioaddr + Wn3_Config);
if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
@@ -1602,7 +1600,7 @@ vortex_up(struct net_device *dev)
if (vortex_debug > 1) {
EL3WINDOW(4);
- printk(KERN_DEBUG "%s: vortex_up() irq %d media status %4.4x.\n",
+ pr_debug("%s: vortex_up() irq %d media status %4.4x.\n",
dev->name, dev->irq, ioread16(ioaddr + Wn4_Media));
}
@@ -1704,13 +1702,13 @@ vortex_open(struct net_device *dev)
/* Use the now-standard shared IRQ implementation. */
if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ?
&boomerang_interrupt : &vortex_interrupt, IRQF_SHARED, dev->name, dev))) {
- printk(KERN_ERR "%s: Could not reserve IRQ %d\n", dev->name, dev->irq);
+ pr_err("%s: Could not reserve IRQ %d\n", dev->name, dev->irq);
goto err;
}
if (vp->full_bus_master_rx) { /* Boomerang bus master. */
if (vortex_debug > 2)
- printk(KERN_DEBUG "%s: Filling in the Rx ring.\n", dev->name);
+ pr_debug("%s: Filling in the Rx ring.\n", dev->name);
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb;
vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1));
@@ -1728,7 +1726,7 @@ vortex_open(struct net_device *dev)
}
if (i != RX_RING_SIZE) {
int j;
- printk(KERN_EMERG "%s: no memory for rx ring\n", dev->name);
+ pr_emerg("%s: no memory for rx ring\n", dev->name);
for (j = 0; j < i; j++) {
if (vp->rx_skbuff[j]) {
dev_kfree_skb(vp->rx_skbuff[j]);
@@ -1750,7 +1748,7 @@ err_free_irq:
free_irq(dev->irq, dev);
err:
if (vortex_debug > 1)
- printk(KERN_ERR "%s: vortex_open() fails: returning %d\n", dev->name, retval);
+ pr_err("%s: vortex_open() fails: returning %d\n", dev->name, retval);
out:
return retval;
}
@@ -1766,9 +1764,9 @@ vortex_timer(unsigned long data)
int media_status, old_window;
if (vortex_debug > 2) {
- printk(KERN_DEBUG "%s: Media selection timer tick happened, %s.\n",
+ pr_debug("%s: Media selection timer tick happened, %s.\n",
dev->name, media_tbl[dev->if_port].name);
- printk(KERN_DEBUG "dev->watchdog_timeo=%d\n", dev->watchdog_timeo);
+ pr_debug("dev->watchdog_timeo=%d\n", dev->watchdog_timeo);
}
disable_irq_lockdep(dev->irq);
@@ -1781,12 +1779,12 @@ vortex_timer(unsigned long data)
netif_carrier_on(dev);
ok = 1;
if (vortex_debug > 1)
- printk(KERN_DEBUG "%s: Media %s has link beat, %x.\n",
+ pr_debug("%s: Media %s has link beat, %x.\n",
dev->name, media_tbl[dev->if_port].name, media_status);
} else {
netif_carrier_off(dev);
if (vortex_debug > 1) {
- printk(KERN_DEBUG "%s: Media %s has no link beat, %x.\n",
+ pr_debug("%s: Media %s has no link beat, %x.\n",
dev->name, media_tbl[dev->if_port].name, media_status);
}
}
@@ -1802,7 +1800,7 @@ vortex_timer(unsigned long data)
break;
default: /* Other media types handled by Tx timeouts. */
if (vortex_debug > 1)
- printk(KERN_DEBUG "%s: Media %s has no indication, %x.\n",
+ pr_debug("%s: Media %s has no indication, %x.\n",
dev->name, media_tbl[dev->if_port].name, media_status);
ok = 1;
}
@@ -1822,13 +1820,11 @@ vortex_timer(unsigned long data)
if (dev->if_port == XCVR_Default) { /* Go back to default. */
dev->if_port = vp->default_media;
if (vortex_debug > 1)
- printk(KERN_DEBUG "%s: Media selection failing, using default "
- "%s port.\n",
+ pr_debug("%s: Media selection failing, using default %s port.\n",
dev->name, media_tbl[dev->if_port].name);
} else {
if (vortex_debug > 1)
- printk(KERN_DEBUG "%s: Media selection failed, now trying "
- "%s port.\n",
+ pr_debug("%s: Media selection failed, now trying %s port.\n",
dev->name, media_tbl[dev->if_port].name);
next_tick = media_tbl[dev->if_port].wait;
}
@@ -1843,13 +1839,13 @@ vortex_timer(unsigned long data)
iowrite16(dev->if_port == XCVR_10base2 ? StartCoax : StopCoax,
ioaddr + EL3_CMD);
if (vortex_debug > 1)
- printk(KERN_DEBUG "wrote 0x%08x to Wn3_Config\n", config);
+ pr_debug("wrote 0x%08x to Wn3_Config\n", config);
/* AKPM: FIXME: Should reset Rx & Tx here. P60 of 3c90xc.pdf */
}
leave_media_alone:
if (vortex_debug > 2)
- printk(KERN_DEBUG "%s: Media selection timer finished, %s.\n",
+ pr_debug("%s: Media selection timer finished, %s.\n",
dev->name, media_tbl[dev->if_port].name);
EL3WINDOW(old_window);
@@ -1865,21 +1861,21 @@ static void vortex_tx_timeout(struct net_device *dev)
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
- printk(KERN_ERR "%s: transmit timed out, tx_status %2.2x status %4.4x.\n",
+ pr_err("%s: transmit timed out, tx_status %2.2x status %4.4x.\n",
dev->name, ioread8(ioaddr + TxStatus),
ioread16(ioaddr + EL3_STATUS));
EL3WINDOW(4);
- printk(KERN_ERR " diagnostics: net %04x media %04x dma %08x fifo %04x\n",
+ pr_err(" diagnostics: net %04x media %04x dma %08x fifo %04x\n",
ioread16(ioaddr + Wn4_NetDiag),
ioread16(ioaddr + Wn4_Media),
ioread32(ioaddr + PktStatus),
ioread16(ioaddr + Wn4_FIFODiag));
/* Slight code bloat to be user friendly. */
if ((ioread8(ioaddr + TxStatus) & 0x88) == 0x88)
- printk(KERN_ERR "%s: Transmitter encountered 16 collisions --"
+ pr_err("%s: Transmitter encountered 16 collisions --"
" network cable problem?\n", dev->name);
if (ioread16(ioaddr + EL3_STATUS) & IntLatch) {
- printk(KERN_ERR "%s: Interrupt posted but not delivered --"
+ pr_err("%s: Interrupt posted but not delivered --"
" IRQ blocked by another device?\n", dev->name);
/* Bad idea here.. but we might as well handle a few events. */
{
@@ -1903,7 +1899,7 @@ static void vortex_tx_timeout(struct net_device *dev)
dev->stats.tx_errors++;
if (vp->full_bus_master_tx) {
- printk(KERN_DEBUG "%s: Resetting the Tx ring pointer.\n", dev->name);
+ pr_debug("%s: Resetting the Tx ring pointer.\n", dev->name);
if (vp->cur_tx - vp->dirty_tx > 0 && ioread32(ioaddr + DownListPtr) == 0)
iowrite32(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc),
ioaddr + DownListPtr);
@@ -1938,7 +1934,7 @@ vortex_error(struct net_device *dev, int status)
unsigned char tx_status = 0;
if (vortex_debug > 2) {
- printk(KERN_ERR "%s: vortex_error(), status=0x%x\n", dev->name, status);
+ pr_err("%s: vortex_error(), status=0x%x\n", dev->name, status);
}
if (status & TxComplete) { /* Really "TxError" for us. */
@@ -1946,10 +1942,10 @@ vortex_error(struct net_device *dev, int status)
/* Presumably a tx-timeout. We must merely re-enable. */
if (vortex_debug > 2
|| (tx_status != 0x88 && vortex_debug > 0)) {
- printk(KERN_ERR "%s: Transmit error, Tx status register %2.2x.\n",
+ pr_err("%s: Transmit error, Tx status register %2.2x.\n",
dev->name, tx_status);
if (tx_status == 0x82) {
- printk(KERN_ERR "Probably a duplex mismatch. See "
+ pr_err("Probably a duplex mismatch. See "
"Documentation/networking/vortex.txt\n");
}
dump_tx_ring(dev);
@@ -1975,13 +1971,13 @@ vortex_error(struct net_device *dev, int status)
if (status & StatsFull) { /* Empty statistics. */
static int DoneDidThat;
if (vortex_debug > 4)
- printk(KERN_DEBUG "%s: Updating stats.\n", dev->name);
+ pr_debug("%s: Updating stats.\n", dev->name);
update_stats(ioaddr, dev);
/* HACK: Disable statistics as an interrupt source. */
/* This occurs when we have the wrong media type! */
if (DoneDidThat == 0 &&
ioread16(ioaddr + EL3_STATUS) & StatsFull) {
- printk(KERN_WARNING "%s: Updating statistics failed, disabling "
+ pr_warning("%s: Updating statistics failed, disabling "
"stats as an interrupt source.\n", dev->name);
EL3WINDOW(5);
iowrite16(SetIntrEnb | (ioread16(ioaddr + 10) & ~StatsFull), ioaddr + EL3_CMD);
@@ -1998,7 +1994,7 @@ vortex_error(struct net_device *dev, int status)
u16 fifo_diag;
EL3WINDOW(4);
fifo_diag = ioread16(ioaddr + Wn4_FIFODiag);
- printk(KERN_ERR "%s: Host error, FIFO diagnostic register %4.4x.\n",
+ pr_err("%s: Host error, FIFO diagnostic register %4.4x.\n",
dev->name, fifo_diag);
/* Adapter failure requires Tx/Rx reset and reinit. */
if (vp->full_bus_master_tx) {
@@ -2006,7 +2002,7 @@ vortex_error(struct net_device *dev, int status)
/* 0x80000000 PCI master abort. */
/* 0x40000000 PCI target abort. */
if (vortex_debug)
- printk(KERN_ERR "%s: PCI bus error, bus status %8.8x\n", dev->name, bus_status);
+ pr_err("%s: PCI bus error, bus status %8.8x\n", dev->name, bus_status);
/* In this case, blow the card away */
/* Must not enter D3 or we can't legally issue the reset! */
@@ -2075,7 +2071,7 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
while (--i > 0 && (tx_status = ioread8(ioaddr + TxStatus)) > 0) {
if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */
if (vortex_debug > 2)
- printk(KERN_DEBUG "%s: Tx error, status %2.2x.\n",
+ pr_debug("%s: Tx error, status %2.2x.\n",
dev->name, tx_status);
if (tx_status & 0x04) dev->stats.tx_fifo_errors++;
if (tx_status & 0x38) dev->stats.tx_aborted_errors++;
@@ -2101,17 +2097,17 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned long flags;
if (vortex_debug > 6) {
- printk(KERN_DEBUG "boomerang_start_xmit()\n");
- printk(KERN_DEBUG "%s: Trying to send a packet, Tx index %d.\n",
+ pr_debug("boomerang_start_xmit()\n");
+ pr_debug("%s: Trying to send a packet, Tx index %d.\n",
dev->name, vp->cur_tx);
}
if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) {
if (vortex_debug > 0)
- printk(KERN_WARNING "%s: BUG! Tx Ring full, refusing to send buffer.\n",
+ pr_warning("%s: BUG! Tx Ring full, refusing to send buffer.\n",
dev->name);
netif_stop_queue(dev);
- return 1;
+ return NETDEV_TX_BUSY;
}
vp->tx_skbuff[entry] = skb;
@@ -2204,7 +2200,7 @@ vortex_interrupt(int irq, void *dev_id)
status = ioread16(ioaddr + EL3_STATUS);
if (vortex_debug > 6)
- printk("vortex_interrupt(). status=0x%4x\n", status);
+ pr_debug("vortex_interrupt(). status=0x%4x\n", status);
if ((status & IntLatch) == 0)
goto handler_exit; /* No interrupt: shared IRQs cause this */
@@ -2219,19 +2215,19 @@ vortex_interrupt(int irq, void *dev_id)
goto handler_exit;
if (vortex_debug > 4)
- printk(KERN_DEBUG "%s: interrupt, status %4.4x, latency %d ticks.\n",
+ pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n",
dev->name, status, ioread8(ioaddr + Timer));
do {
if (vortex_debug > 5)
- printk(KERN_DEBUG "%s: In interrupt loop, status %4.4x.\n",
+ pr_debug("%s: In interrupt loop, status %4.4x.\n",
dev->name, status);
if (status & RxComplete)
vortex_rx(dev);
if (status & TxAvailable) {
if (vortex_debug > 5)
- printk(KERN_DEBUG " TX room bit was handled.\n");
+ pr_debug(" TX room bit was handled.\n");
/* There's room in the FIFO for a full-sized packet. */
iowrite16(AckIntr | TxAvailable, ioaddr + EL3_CMD);
netif_wake_queue (dev);
@@ -2263,8 +2259,8 @@ vortex_interrupt(int irq, void *dev_id)
}
if (--work_done < 0) {
- printk(KERN_WARNING "%s: Too much work in interrupt, status "
- "%4.4x.\n", dev->name, status);
+ pr_warning("%s: Too much work in interrupt, status %4.4x.\n",
+ dev->name, status);
/* Disable all pending interrupts. */
do {
vp->deferred |= status;
@@ -2281,7 +2277,7 @@ vortex_interrupt(int irq, void *dev_id)
} while ((status = ioread16(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
if (vortex_debug > 4)
- printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n",
+ pr_debug("%s: exiting interrupt, status %4.4x.\n",
dev->name, status);
handler_exit:
spin_unlock(&vp->lock);
@@ -2313,14 +2309,14 @@ boomerang_interrupt(int irq, void *dev_id)
status = ioread16(ioaddr + EL3_STATUS);
if (vortex_debug > 6)
- printk(KERN_DEBUG "boomerang_interrupt. status=0x%4x\n", status);
+ pr_debug("boomerang_interrupt. status=0x%4x\n", status);
if ((status & IntLatch) == 0)
goto handler_exit; /* No interrupt: shared IRQs can cause this */
if (status == 0xffff) { /* h/w no longer present (hotplug)? */
if (vortex_debug > 1)
- printk(KERN_DEBUG "boomerang_interrupt(1): status = 0xffff\n");
+ pr_debug("boomerang_interrupt(1): status = 0xffff\n");
goto handler_exit;
}
@@ -2330,16 +2326,16 @@ boomerang_interrupt(int irq, void *dev_id)
}
if (vortex_debug > 4)
- printk(KERN_DEBUG "%s: interrupt, status %4.4x, latency %d ticks.\n",
+ pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n",
dev->name, status, ioread8(ioaddr + Timer));
do {
if (vortex_debug > 5)
- printk(KERN_DEBUG "%s: In interrupt loop, status %4.4x.\n",
+ pr_debug("%s: In interrupt loop, status %4.4x.\n",
dev->name, status);
if (status & UpComplete) {
iowrite16(AckIntr | UpComplete, ioaddr + EL3_CMD);
if (vortex_debug > 5)
- printk(KERN_DEBUG "boomerang_interrupt->boomerang_rx\n");
+ pr_debug("boomerang_interrupt->boomerang_rx\n");
boomerang_rx(dev);
}
@@ -2374,7 +2370,7 @@ boomerang_interrupt(int irq, void *dev_id)
dev_kfree_skb_irq(skb);
vp->tx_skbuff[entry] = NULL;
} else {
- printk(KERN_DEBUG "boomerang_interrupt: no skb!\n");
+ pr_debug("boomerang_interrupt: no skb!\n");
}
/* dev->stats.tx_packets++; Counted below. */
dirty_tx++;
@@ -2382,7 +2378,7 @@ boomerang_interrupt(int irq, void *dev_id)
vp->dirty_tx = dirty_tx;
if (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1) {
if (vortex_debug > 6)
- printk(KERN_DEBUG "boomerang_interrupt: wake queue\n");
+ pr_debug("boomerang_interrupt: wake queue\n");
netif_wake_queue (dev);
}
}
@@ -2392,8 +2388,8 @@ boomerang_interrupt(int irq, void *dev_id)
vortex_error(dev, status);
if (--work_done < 0) {
- printk(KERN_WARNING "%s: Too much work in interrupt, status "
- "%4.4x.\n", dev->name, status);
+ pr_warning("%s: Too much work in interrupt, status %4.4x.\n",
+ dev->name, status);
/* Disable all pending interrupts. */
do {
vp->deferred |= status;
@@ -2413,7 +2409,7 @@ boomerang_interrupt(int irq, void *dev_id)
} while ((status = ioread16(ioaddr + EL3_STATUS)) & IntLatch);
if (vortex_debug > 4)
- printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n",
+ pr_debug("%s: exiting interrupt, status %4.4x.\n",
dev->name, status);
handler_exit:
spin_unlock(&vp->lock);
@@ -2428,13 +2424,13 @@ static int vortex_rx(struct net_device *dev)
short rx_status;
if (vortex_debug > 5)
- printk(KERN_DEBUG "vortex_rx(): status %4.4x, rx_status %4.4x.\n",
+ pr_debug("vortex_rx(): status %4.4x, rx_status %4.4x.\n",
ioread16(ioaddr+EL3_STATUS), ioread16(ioaddr+RxStatus));
while ((rx_status = ioread16(ioaddr + RxStatus)) > 0) {
if (rx_status & 0x4000) { /* Error, update stats. */
unsigned char rx_error = ioread8(ioaddr + RxErrors);
if (vortex_debug > 2)
- printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);
+ pr_debug(" Rx error: status %2.2x.\n", rx_error);
dev->stats.rx_errors++;
if (rx_error & 0x01) dev->stats.rx_over_errors++;
if (rx_error & 0x02) dev->stats.rx_length_errors++;
@@ -2448,7 +2444,7 @@ static int vortex_rx(struct net_device *dev)
skb = dev_alloc_skb(pkt_len + 5);
if (vortex_debug > 4)
- printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n",
+ pr_debug("Receiving packet size %d status %4.4x.\n",
pkt_len, rx_status);
if (skb != NULL) {
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
@@ -2478,8 +2474,8 @@ static int vortex_rx(struct net_device *dev)
break;
continue;
} else if (vortex_debug > 0)
- printk(KERN_NOTICE "%s: No memory to allocate a sk_buff of "
- "size %d.\n", dev->name, pkt_len);
+ pr_notice("%s: No memory to allocate a sk_buff of size %d.\n",
+ dev->name, pkt_len);
dev->stats.rx_dropped++;
}
issue_and_wait(dev, RxDiscard);
@@ -2498,7 +2494,7 @@ boomerang_rx(struct net_device *dev)
int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx;
if (vortex_debug > 5)
- printk(KERN_DEBUG "boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS));
+ pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS));
while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){
if (--rx_work_limit < 0)
@@ -2506,7 +2502,7 @@ boomerang_rx(struct net_device *dev)
if (rx_status & RxDError) { /* Error, update stats. */
unsigned char rx_error = rx_status >> 16;
if (vortex_debug > 2)
- printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);
+ pr_debug(" Rx error: status %2.2x.\n", rx_error);
dev->stats.rx_errors++;
if (rx_error & 0x01) dev->stats.rx_over_errors++;
if (rx_error & 0x02) dev->stats.rx_length_errors++;
@@ -2520,7 +2516,7 @@ boomerang_rx(struct net_device *dev)
dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr);
if (vortex_debug > 4)
- printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n",
+ pr_debug("Receiving packet size %d status %4.4x.\n",
pkt_len, rx_status);
/* Check if the packet is long enough to just accept without
@@ -2566,7 +2562,7 @@ boomerang_rx(struct net_device *dev)
if (skb == NULL) {
static unsigned long last_jif;
if (time_after(jiffies, last_jif + 10 * HZ)) {
- printk(KERN_WARNING "%s: memory shortage\n", dev->name);
+ pr_warning("%s: memory shortage\n", dev->name);
last_jif = jiffies;
}
if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE)
@@ -2598,7 +2594,7 @@ rx_oom_timer(unsigned long arg)
if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) /* This test is redundant, but makes me feel good */
boomerang_rx(dev);
if (vortex_debug > 1) {
- printk(KERN_DEBUG "%s: rx_oom_timer %s\n", dev->name,
+ pr_debug("%s: rx_oom_timer %s\n", dev->name,
((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying");
}
spin_unlock_irq(&vp->lock);
@@ -2655,9 +2651,9 @@ vortex_close(struct net_device *dev)
vortex_down(dev, 1);
if (vortex_debug > 1) {
- printk(KERN_DEBUG"%s: vortex_close() status %4.4x, Tx status %2.2x.\n",
+ pr_debug("%s: vortex_close() status %4.4x, Tx status %2.2x.\n",
dev->name, ioread16(ioaddr + EL3_STATUS), ioread8(ioaddr + TxStatus));
- printk(KERN_DEBUG "%s: vortex close stats: rx_nocopy %d rx_copy %d"
+ pr_debug("%s: vortex close stats: rx_nocopy %d rx_copy %d"
" tx_queued %d Rx pre-checksummed %d.\n",
dev->name, vp->rx_nocopy, vp->rx_copy, vp->queued_packet, vp->rx_csumhits);
}
@@ -2666,8 +2662,7 @@ vortex_close(struct net_device *dev)
if (vp->rx_csumhits &&
(vp->drv_flags & HAS_HWCKSM) == 0 &&
(vp->card_idx >= MAX_UNITS || hw_checksums[vp->card_idx] == -1)) {
- printk(KERN_WARNING "%s supports hardware checksums, and we're "
- "not using them!\n", dev->name);
+ pr_warning("%s supports hardware checksums, and we're not using them!\n", dev->name);
}
#endif
@@ -2717,16 +2712,16 @@ dump_tx_ring(struct net_device *dev)
int i;
int stalled = ioread32(ioaddr + PktStatus) & 0x04; /* Possible racy. But it's only debug stuff */
- printk(KERN_ERR " Flags; bus-master %d, dirty %d(%d) current %d(%d)\n",
+ pr_err(" Flags; bus-master %d, dirty %d(%d) current %d(%d)\n",
vp->full_bus_master_tx,
vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE,
vp->cur_tx, vp->cur_tx % TX_RING_SIZE);
- printk(KERN_ERR " Transmit list %8.8x vs. %p.\n",
+ pr_err(" Transmit list %8.8x vs. %p.\n",
ioread32(ioaddr + DownListPtr),
&vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]);
issue_and_wait(dev, DownStall);
for (i = 0; i < TX_RING_SIZE; i++) {
- printk(KERN_ERR " %d: @%p length %8.8x status %8.8x\n", i,
+ pr_err(" %d: @%p length %8.8x status %8.8x\n", i,
&vp->tx_ring[i],
#if DO_ZEROCOPY
le32_to_cpu(vp->tx_ring[i].frag[0].length),
@@ -2970,7 +2965,7 @@ static void set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
if (vortex_debug > 3)
- printk(KERN_NOTICE "%s: Setting promiscuous mode.\n", dev->name);
+ pr_notice("%s: Setting promiscuous mode.\n", dev->name);
new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm;
} else if ((dev->mc_list) || (dev->flags & IFF_ALLMULTI)) {
new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast;
@@ -3145,8 +3140,7 @@ static void acpi_set_WOL(struct net_device *dev)
iowrite16(RxEnable, ioaddr + EL3_CMD);
if (pci_enable_wake(VORTEX_PCI(vp), PCI_D3hot, 1)) {
- printk(KERN_INFO "%s: WOL not supported.\n",
- pci_name(VORTEX_PCI(vp)));
+ pr_info("%s: WOL not supported.\n", pci_name(VORTEX_PCI(vp)));
vp->enable_wol = 0;
return;
@@ -3164,7 +3158,7 @@ static void __devexit vortex_remove_one(struct pci_dev *pdev)
struct vortex_private *vp;
if (!dev) {
- printk("vortex_remove_one called for Compaq device!\n");
+ pr_err("vortex_remove_one called for Compaq device!\n");
BUG();
}
diff --git a/drivers/net/7990.c b/drivers/net/7990.c
index 7a331acc34a..69f5b7d298a 100644
--- a/drivers/net/7990.c
+++ b/drivers/net/7990.c
@@ -541,7 +541,7 @@ int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
unsigned long flags;
if (!TX_BUFFS_AVAIL)
- return -1;
+ return NETDEV_TX_LOCKED;
netif_stop_queue (dev);
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 02330f3d5a5..50efde11ea6 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -471,8 +471,7 @@ static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
u32 status, u32 len)
{
if (netif_msg_rx_err (cp))
- printk (KERN_DEBUG
- "%s: rx err, slot %d status 0x%x len %d\n",
+ pr_debug("%s: rx err, slot %d status 0x%x len %d\n",
cp->dev->name, rx_tail, status, len);
cp->dev->stats.rx_errors++;
if (status & RxErrFrame)
@@ -547,7 +546,7 @@ rx_status_loop:
}
if (netif_msg_rx_status(cp))
- printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n",
+ pr_debug("%s: rx slot %d status 0x%x len %d\n",
dev->name, rx_tail, status, len);
buflen = cp->rx_buf_sz + NET_IP_ALIGN;
@@ -626,7 +625,7 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
return IRQ_NONE;
if (netif_msg_intr(cp))
- printk(KERN_DEBUG "%s: intr, status %04x cmd %02x cpcmd %04x\n",
+ pr_debug("%s: intr, status %04x cmd %02x cpcmd %04x\n",
dev->name, status, cpr8(Cmd), cpr16(CpCmd));
cpw16(IntrStatus, status & ~cp_rx_intr_mask);
@@ -658,7 +657,7 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
- printk(KERN_ERR "%s: PCI bus error, status=%04x, PCI status=%04x\n",
+ pr_err("%s: PCI bus error, status=%04x, PCI status=%04x\n",
dev->name, status, pci_status);
/* TODO: reset hardware */
@@ -705,7 +704,7 @@ static void cp_tx (struct cp_private *cp)
if (status & LastFrag) {
if (status & (TxError | TxFIFOUnder)) {
if (netif_msg_tx_err(cp))
- printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
+ pr_debug("%s: tx err, status 0x%x\n",
cp->dev->name, status);
cp->dev->stats.tx_errors++;
if (status & TxOWC)
@@ -722,7 +721,7 @@ static void cp_tx (struct cp_private *cp)
cp->dev->stats.tx_packets++;
cp->dev->stats.tx_bytes += skb->len;
if (netif_msg_tx_done(cp))
- printk(KERN_DEBUG "%s: tx done, slot %d\n", cp->dev->name, tx_tail);
+ pr_debug("%s: tx done, slot %d\n", cp->dev->name, tx_tail);
}
dev_kfree_skb_irq(skb);
}
@@ -755,9 +754,9 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
netif_stop_queue(dev);
spin_unlock_irqrestore(&cp->lock, intr_flags);
- printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
+ pr_err(PFX "%s: BUG! Tx Ring full when queue awake!\n",
dev->name);
- return 1;
+ return NETDEV_TX_BUSY;
}
#if CP_VLAN_TAG_USED
@@ -882,7 +881,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
}
cp->tx_head = entry;
if (netif_msg_tx_queued(cp))
- printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
+ pr_debug("%s: tx queued, slot %d, skblen %d\n",
dev->name, entry, skb->len);
if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
netif_stop_queue(dev);
@@ -996,7 +995,7 @@ static void cp_reset_hw (struct cp_private *cp)
schedule_timeout_uninterruptible(10);
}
- printk(KERN_ERR "%s: hardware reset timeout\n", cp->dev->name);
+ pr_err("%s: hardware reset timeout\n", cp->dev->name);
}
static inline void cp_start_hw (struct cp_private *cp)
@@ -1166,7 +1165,7 @@ static int cp_open (struct net_device *dev)
int rc;
if (netif_msg_ifup(cp))
- printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
+ pr_debug("%s: enabling interface\n", dev->name);
rc = cp_alloc_rings(cp);
if (rc)
@@ -1201,7 +1200,7 @@ static int cp_close (struct net_device *dev)
napi_disable(&cp->napi);
if (netif_msg_ifdown(cp))
- printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
+ pr_debug("%s: disabling interface\n", dev->name);
spin_lock_irqsave(&cp->lock, flags);
@@ -1224,7 +1223,7 @@ static void cp_tx_timeout(struct net_device *dev)
unsigned long flags;
int rc;
- printk(KERN_WARNING "%s: Transmit timeout, status %2x %4x %4x %4x\n",
+ pr_warning("%s: Transmit timeout, status %2x %4x %4x %4x\n",
dev->name, cpr8(Cmd), cpr16(CpCmd),
cpr16(IntrStatus), cpr16(IntrMask));
@@ -1873,7 +1872,7 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
#ifndef MODULE
static int version_printed;
if (version_printed++ == 0)
- printk("%s", version);
+ pr_info("%s", version);
#endif
if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
@@ -1995,8 +1994,7 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto err_out_iomap;
- printk (KERN_INFO "%s: RTL-8139C+ at 0x%lx, "
- "%pM, IRQ %d\n",
+ pr_info("%s: RTL-8139C+ at 0x%lx, %pM, IRQ %d\n",
dev->name,
dev->base_addr,
dev->dev_addr,
@@ -2113,7 +2111,7 @@ static struct pci_driver cp_driver = {
static int __init cp_init (void)
{
#ifdef MODULE
- printk("%s", version);
+ pr_info("%s", version);
#endif
return pci_register_driver(&cp_driver);
}
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 1fc45431a62..8ae72ec1445 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -126,19 +126,12 @@
#undef RTL8139_NDEBUG
-#if RTL8139_DEBUG
-/* note: prints function name for you */
-# define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
-#else
-# define DPRINTK(fmt, args...)
-#endif
-
#ifdef RTL8139_NDEBUG
# define assert(expr) do {} while (0)
#else
# define assert(expr) \
if(unlikely(!(expr))) { \
- printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
+ pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
#expr, __FILE__, __func__, __LINE__); \
}
#endif
@@ -784,8 +777,8 @@ static __devinit struct net_device * rtl8139_init_board (struct pci_dev *pdev)
/* set this immediately, we need to know before
* we talk to the chip directly */
- DPRINTK("PIO region size == 0x%02X\n", pio_len);
- DPRINTK("MMIO region size == 0x%02lX\n", mmio_len);
+ pr_debug("PIO region size == 0x%02lX\n", pio_len);
+ pr_debug("MMIO region size == 0x%02lX\n", mmio_len);
retry:
if (use_io) {
@@ -865,19 +858,17 @@ retry:
}
/* if unknown chip, assume array element #0, original RTL-8139 in this case */
- dev_printk (KERN_DEBUG, &pdev->dev,
- "unknown chip version, assuming RTL-8139\n");
- dev_printk (KERN_DEBUG, &pdev->dev,
- "TxConfig = 0x%lx\n", RTL_R32 (TxConfig));
+ dev_dbg(&pdev->dev, "unknown chip version, assuming RTL-8139\n");
+ dev_dbg(&pdev->dev, "TxConfig = 0x%lx\n", RTL_R32 (TxConfig));
tp->chipset = 0;
match:
- DPRINTK ("chipset id (%d) == index %d, '%s'\n",
+ pr_debug("chipset id (%d) == index %d, '%s'\n",
version, i, rtl_chip_info[i].name);
if (tp->chipset >= CH_8139B) {
u8 new_tmp8 = tmp8 = RTL_R8 (Config1);
- DPRINTK("PCI PM wakeup\n");
+ pr_debug("PCI PM wakeup\n");
if ((rtl_chip_info[tp->chipset].flags & HasLWake) &&
(tmp8 & LWAKE))
new_tmp8 &= ~LWAKE;
@@ -896,7 +887,7 @@ match:
}
}
} else {
- DPRINTK("Old chip wakeup\n");
+ pr_debug("Old chip wakeup\n");
tmp8 = RTL_R8 (Config1);
tmp8 &= ~(SLEEP | PWRDN);
RTL_W8 (Config1, tmp8);
@@ -949,7 +940,7 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
{
static int printed_version;
if (!printed_version++)
- printk (KERN_INFO RTL8139_DRIVER_NAME "\n");
+ pr_info(RTL8139_DRIVER_NAME "\n");
}
#endif
@@ -965,7 +956,7 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
pdev->device == PCI_DEVICE_ID_REALTEK_8139 &&
pdev->subsystem_vendor == PCI_VENDOR_ID_ATHEROS &&
pdev->subsystem_device == PCI_DEVICE_ID_REALTEK_8139) {
- printk(KERN_INFO "8139too: OQO Model 2 detected. Forcing PIO\n");
+ pr_info("8139too: OQO Model 2 detected. Forcing PIO\n");
use_io = 1;
}
@@ -1018,21 +1009,20 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
tp->mii.reg_num_mask = 0x1f;
/* dev is fully set up and ready to use now */
- DPRINTK("about to register device named %s (%p)...\n", dev->name, dev);
+ pr_debug("about to register device named %s (%p)...\n", dev->name, dev);
i = register_netdev (dev);
if (i) goto err_out;
pci_set_drvdata (pdev, dev);
- printk (KERN_INFO "%s: %s at 0x%lx, "
- "%pM, IRQ %d\n",
+ pr_info("%s: %s at 0x%lx, %pM, IRQ %d\n",
dev->name,
board_info[ent->driver_data].name,
dev->base_addr,
dev->dev_addr,
dev->irq);
- printk (KERN_DEBUG "%s: Identified 8139 chip type '%s'\n",
+ pr_debug("%s: Identified 8139 chip type '%s'\n",
dev->name, rtl_chip_info[tp->chipset].name);
/* Find the connected MII xcvrs.
@@ -1046,14 +1036,12 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
if (mii_status != 0xffff && mii_status != 0x0000) {
u16 advertising = mdio_read(dev, phy, 4);
tp->phys[phy_idx++] = phy;
- printk(KERN_INFO "%s: MII transceiver %d status 0x%4.4x "
- "advertising %4.4x.\n",
+ pr_info("%s: MII transceiver %d status 0x%4.4x advertising %4.4x.\n",
dev->name, phy, mii_status, advertising);
}
}
if (phy_idx == 0) {
- printk(KERN_INFO "%s: No MII transceivers found! Assuming SYM "
- "transceiver.\n",
+ pr_info("%s: No MII transceivers found! Assuming SYM transceiver.\n",
dev->name);
tp->phys[0] = 32;
}
@@ -1073,13 +1061,13 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
if (board_idx < MAX_UNITS && full_duplex[board_idx] > 0)
tp->mii.full_duplex = full_duplex[board_idx];
if (tp->mii.full_duplex) {
- printk(KERN_INFO "%s: Media type forced to Full Duplex.\n", dev->name);
+ pr_info("%s: Media type forced to Full Duplex.\n", dev->name);
/* Changing the MII-advertised media because might prevent
re-connection. */
tp->mii.force_media = 1;
}
if (tp->default_port) {
- printk(KERN_INFO " Forcing %dMbps %s-duplex operation.\n",
+ pr_info(" Forcing %dMbps %s-duplex operation.\n",
(option & 0x20 ? 100 : 10),
(option & 0x10 ? "full" : "half"));
mdio_write(dev, tp->phys[0], 0,
@@ -1342,7 +1330,7 @@ static int rtl8139_open (struct net_device *dev)
netif_start_queue (dev);
if (netif_msg_ifup(tp))
- printk(KERN_DEBUG "%s: rtl8139_open() ioaddr %#llx IRQ %d"
+ pr_debug("%s: rtl8139_open() ioaddr %#llx IRQ %d"
" GP Pins %2.2x %s-duplex.\n", dev->name,
(unsigned long long)pci_resource_start (tp->pci_dev, 1),
dev->irq, RTL_R8 (MediaStatus),
@@ -1404,7 +1392,7 @@ static void rtl8139_hw_start (struct net_device *dev)
RTL_W8 (Config3, RTL_R8 (Config3) & ~Cfg3_Magic);
}
- DPRINTK("init buffer addresses\n");
+ pr_debug("init buffer addresses\n");
/* Lock Config[01234] and BMCR register writes */
RTL_W8 (Cfg9346, Cfg9346_Lock);
@@ -1566,14 +1554,13 @@ static inline void rtl8139_thread_iter (struct net_device *dev,
tp->mii.full_duplex = duplex;
if (mii_lpa) {
- printk (KERN_INFO
- "%s: Setting %s-duplex based on MII #%d link"
+ pr_info("%s: Setting %s-duplex based on MII #%d link"
" partner ability of %4.4x.\n",
dev->name,
tp->mii.full_duplex ? "full" : "half",
tp->phys[0], mii_lpa);
} else {
- printk(KERN_INFO"%s: media is unconnected, link down, or incompatible connection\n",
+ pr_info("%s: media is unconnected, link down, or incompatible connection\n",
dev->name);
}
#if 0
@@ -1588,11 +1575,11 @@ static inline void rtl8139_thread_iter (struct net_device *dev,
rtl8139_tune_twister (dev, tp);
- DPRINTK ("%s: Media selection tick, Link partner %4.4x.\n",
+ pr_debug("%s: Media selection tick, Link partner %4.4x.\n",
dev->name, RTL_R16 (NWayLPAR));
- DPRINTK ("%s: Other registers are IntMask %4.4x IntStatus %4.4x\n",
+ pr_debug("%s: Other registers are IntMask %4.4x IntStatus %4.4x\n",
dev->name, RTL_R16 (IntrMask), RTL_R16 (IntrStatus));
- DPRINTK ("%s: Chip config %2.2x %2.2x.\n",
+ pr_debug("%s: Chip config %2.2x %2.2x.\n",
dev->name, RTL_R8 (Config0),
RTL_R8 (Config1));
}
@@ -1652,14 +1639,14 @@ static void rtl8139_tx_timeout_task (struct work_struct *work)
int i;
u8 tmp8;
- printk (KERN_DEBUG "%s: Transmit timeout, status %2.2x %4.4x %4.4x "
- "media %2.2x.\n", dev->name, RTL_R8 (ChipCmd),
+ pr_debug("%s: Transmit timeout, status %2.2x %4.4x %4.4x media %2.2x.\n",
+ dev->name, RTL_R8 (ChipCmd),
RTL_R16(IntrStatus), RTL_R16(IntrMask), RTL_R8(MediaStatus));
/* Emit info to figure out what went wrong. */
- printk (KERN_DEBUG "%s: Tx queue start entry %ld dirty entry %ld.\n",
+ pr_debug("%s: Tx queue start entry %ld dirty entry %ld.\n",
dev->name, tp->cur_tx, tp->dirty_tx);
for (i = 0; i < NUM_TX_DESC; i++)
- printk (KERN_DEBUG "%s: Tx descriptor %d is %8.8lx.%s\n",
+ pr_debug("%s: Tx descriptor %d is %8.8lx.%s\n",
dev->name, i, RTL_R32 (TxStatus0 + (i * 4)),
i == tp->dirty_tx % NUM_TX_DESC ?
" (queue head)" : "");
@@ -1741,7 +1728,7 @@ static int rtl8139_start_xmit (struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&tp->lock, flags);
if (netif_msg_tx_queued(tp))
- printk (KERN_DEBUG "%s: Queued Tx packet size %u to slot %d.\n",
+ pr_debug("%s: Queued Tx packet size %u to slot %d.\n",
dev->name, len, entry);
return 0;
@@ -1772,7 +1759,7 @@ static void rtl8139_tx_interrupt (struct net_device *dev,
if (txstatus & (TxOutOfWindow | TxAborted)) {
/* There was an major error, log it. */
if (netif_msg_tx_err(tp))
- printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
+ pr_debug("%s: Transmit error, Tx status %8.8x.\n",
dev->name, txstatus);
dev->stats.tx_errors++;
if (txstatus & TxAborted) {
@@ -1803,7 +1790,7 @@ static void rtl8139_tx_interrupt (struct net_device *dev,
#ifndef RTL8139_NDEBUG
if (tp->cur_tx - dirty_tx > NUM_TX_DESC) {
- printk (KERN_ERR "%s: Out-of-sync dirty pointer, %ld vs. %ld.\n",
+ pr_err("%s: Out-of-sync dirty pointer, %ld vs. %ld.\n",
dev->name, dirty_tx, tp->cur_tx);
dirty_tx += NUM_TX_DESC;
}
@@ -1828,12 +1815,12 @@ static void rtl8139_rx_err (u32 rx_status, struct net_device *dev,
#endif
if (netif_msg_rx_err (tp))
- printk(KERN_DEBUG "%s: Ethernet frame had errors, status %8.8x.\n",
+ pr_debug("%s: Ethernet frame had errors, status %8.8x.\n",
dev->name, rx_status);
dev->stats.rx_errors++;
if (!(rx_status & RxStatusOK)) {
if (rx_status & RxTooLong) {
- DPRINTK ("%s: Oversized Ethernet frame, status %4.4x!\n",
+ pr_debug("%s: Oversized Ethernet frame, status %4.4x!\n",
dev->name, rx_status);
/* A.C.: The chip hangs here. */
}
@@ -1866,7 +1853,7 @@ static void rtl8139_rx_err (u32 rx_status, struct net_device *dev,
break;
}
if (tmp_work <= 0)
- printk (KERN_WARNING PFX "rx stop wait too long\n");
+ pr_warning(PFX "rx stop wait too long\n");
/* restart receive */
tmp_work = 200;
while (--tmp_work > 0) {
@@ -1877,7 +1864,7 @@ static void rtl8139_rx_err (u32 rx_status, struct net_device *dev,
break;
}
if (tmp_work <= 0)
- printk (KERN_WARNING PFX "tx/rx enable wait too long\n");
+ pr_warning(PFX "tx/rx enable wait too long\n");
/* and reinitialize all rx related registers */
RTL_W8_F (Cfg9346, Cfg9346_Unlock);
@@ -1888,7 +1875,7 @@ static void rtl8139_rx_err (u32 rx_status, struct net_device *dev,
RTL_W32 (RxConfig, tp->rx_config);
tp->cur_rx = 0;
- DPRINTK("init buffer addresses\n");
+ pr_debug("init buffer addresses\n");
/* Lock Config[01234] and BMCR register writes */
RTL_W8 (Cfg9346, Cfg9346_Lock);
@@ -1942,7 +1929,7 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
unsigned int cur_rx = tp->cur_rx;
unsigned int rx_size = 0;
- DPRINTK ("%s: In rtl8139_rx(), current %4.4x BufAddr %4.4x,"
+ pr_debug("%s: In rtl8139_rx(), current %4.4x BufAddr %4.4x,"
" free to %4.4x, Cmd %2.2x.\n", dev->name, (u16)cur_rx,
RTL_R16 (RxBufAddr),
RTL_R16 (RxBufPtr), RTL_R8 (ChipCmd));
@@ -1962,17 +1949,17 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
pkt_size = rx_size - 4;
if (netif_msg_rx_status(tp))
- printk(KERN_DEBUG "%s: rtl8139_rx() status %4.4x, size %4.4x,"
+ pr_debug("%s: rtl8139_rx() status %4.4x, size %4.4x,"
" cur %4.4x.\n", dev->name, rx_status,
rx_size, cur_rx);
#if RTL8139_DEBUG > 2
{
int i;
- DPRINTK ("%s: Frame contents ", dev->name);
+ pr_debug("%s: Frame contents ", dev->name);
for (i = 0; i < 70; i++)
- printk (" %2.2x",
+ pr_cont(" %2.2x",
rx_ring[ring_offset + i]);
- printk (".\n");
+ pr_cont(".\n");
}
#endif
@@ -1984,12 +1971,12 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
if (!tp->fifo_copy_timeout)
tp->fifo_copy_timeout = jiffies + 2;
else if (time_after(jiffies, tp->fifo_copy_timeout)) {
- DPRINTK ("%s: hung FIFO. Reset.", dev->name);
+ pr_debug("%s: hung FIFO. Reset.", dev->name);
rx_size = 0;
goto no_early_rx;
}
if (netif_msg_intr(tp)) {
- printk(KERN_DEBUG "%s: fifo copy in progress.",
+ pr_debug("%s: fifo copy in progress.",
dev->name);
}
tp->xstats.early_rx++;
@@ -2033,8 +2020,7 @@ no_early_rx:
netif_receive_skb (skb);
} else {
if (net_ratelimit())
- printk (KERN_WARNING
- "%s: Memory squeeze, dropping packet.\n",
+ pr_warning("%s: Memory squeeze, dropping packet.\n",
dev->name);
dev->stats.rx_dropped++;
}
@@ -2049,12 +2035,10 @@ no_early_rx:
if (unlikely(!received || rx_size == 0xfff0))
rtl8139_isr_ack(tp);
-#if RTL8139_DEBUG > 1
- DPRINTK ("%s: Done rtl8139_rx(), current %4.4x BufAddr %4.4x,"
+ pr_debug("%s: Done rtl8139_rx(), current %4.4x BufAddr %4.4x,"
" free to %4.4x, Cmd %2.2x.\n", dev->name, cur_rx,
RTL_R16 (RxBufAddr),
RTL_R16 (RxBufPtr), RTL_R8 (ChipCmd));
-#endif
tp->cur_rx = cur_rx;
@@ -2075,7 +2059,7 @@ static void rtl8139_weird_interrupt (struct net_device *dev,
void __iomem *ioaddr,
int status, int link_changed)
{
- DPRINTK ("%s: Abnormal interrupt, status %8.8x.\n",
+ pr_debug("%s: Abnormal interrupt, status %8.8x.\n",
dev->name, status);
assert (dev != NULL);
@@ -2104,7 +2088,7 @@ static void rtl8139_weird_interrupt (struct net_device *dev,
pci_read_config_word (tp->pci_dev, PCI_STATUS, &pci_cmd_status);
pci_write_config_word (tp->pci_dev, PCI_STATUS, pci_cmd_status);
- printk (KERN_ERR "%s: PCI Bus error %4.4x.\n",
+ pr_err("%s: PCI Bus error %4.4x.\n",
dev->name, pci_cmd_status);
}
}
@@ -2198,7 +2182,7 @@ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance)
out:
spin_unlock (&tp->lock);
- DPRINTK ("%s: exiting interrupt, intr_status=%#4.4x.\n",
+ pr_debug("%s: exiting interrupt, intr_status=%#4.4x.\n",
dev->name, RTL_R16 (IntrStatus));
return IRQ_RETVAL(handled);
}
@@ -2249,7 +2233,7 @@ static int rtl8139_close (struct net_device *dev)
napi_disable(&tp->napi);
if (netif_msg_ifdown(tp))
- printk(KERN_DEBUG "%s: Shutting down ethercard, status was 0x%4.4x.\n",
+ pr_debug("%s: Shutting down ethercard, status was 0x%4.4x.\n",
dev->name, RTL_R16 (IntrStatus));
spin_lock_irqsave (&tp->lock, flags);
@@ -2292,11 +2276,11 @@ static int rtl8139_close (struct net_device *dev)
other threads or interrupts aren't messing with the 8139. */
static void rtl8139_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
- struct rtl8139_private *np = netdev_priv(dev);
- void __iomem *ioaddr = np->mmio_addr;
+ struct rtl8139_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
- spin_lock_irq(&np->lock);
- if (rtl_chip_info[np->chipset].flags & HasLWake) {
+ spin_lock_irq(&tp->lock);
+ if (rtl_chip_info[tp->chipset].flags & HasLWake) {
u8 cfg3 = RTL_R8 (Config3);
u8 cfg5 = RTL_R8 (Config5);
@@ -2317,7 +2301,7 @@ static void rtl8139_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
if (cfg5 & Cfg5_BWF)
wol->wolopts |= WAKE_BCAST;
}
- spin_unlock_irq(&np->lock);
+ spin_unlock_irq(&tp->lock);
}
@@ -2326,19 +2310,19 @@ static void rtl8139_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
aren't messing with the 8139. */
static int rtl8139_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
- struct rtl8139_private *np = netdev_priv(dev);
- void __iomem *ioaddr = np->mmio_addr;
+ struct rtl8139_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
u32 support;
u8 cfg3, cfg5;
- support = ((rtl_chip_info[np->chipset].flags & HasLWake)
+ support = ((rtl_chip_info[tp->chipset].flags & HasLWake)
? (WAKE_PHY | WAKE_MAGIC
| WAKE_UCAST | WAKE_MCAST | WAKE_BCAST)
: 0);
if (wol->wolopts & ~support)
return -EINVAL;
- spin_lock_irq(&np->lock);
+ spin_lock_irq(&tp->lock);
cfg3 = RTL_R8 (Config3) & ~(Cfg3_LinkUp | Cfg3_Magic);
if (wol->wolopts & WAKE_PHY)
cfg3 |= Cfg3_LinkUp;
@@ -2359,87 +2343,87 @@ static int rtl8139_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
if (wol->wolopts & WAKE_BCAST)
cfg5 |= Cfg5_BWF;
RTL_W8 (Config5, cfg5); /* need not unlock via Cfg9346 */
- spin_unlock_irq(&np->lock);
+ spin_unlock_irq(&tp->lock);
return 0;
}
static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- struct rtl8139_private *np = netdev_priv(dev);
+ struct rtl8139_private *tp = netdev_priv(dev);
strcpy(info->driver, DRV_NAME);
strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(np->pci_dev));
- info->regdump_len = np->regs_len;
+ strcpy(info->bus_info, pci_name(tp->pci_dev));
+ info->regdump_len = tp->regs_len;
}
static int rtl8139_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
- struct rtl8139_private *np = netdev_priv(dev);
- spin_lock_irq(&np->lock);
- mii_ethtool_gset(&np->mii, cmd);
- spin_unlock_irq(&np->lock);
+ struct rtl8139_private *tp = netdev_priv(dev);
+ spin_lock_irq(&tp->lock);
+ mii_ethtool_gset(&tp->mii, cmd);
+ spin_unlock_irq(&tp->lock);
return 0;
}
static int rtl8139_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
- struct rtl8139_private *np = netdev_priv(dev);
+ struct rtl8139_private *tp = netdev_priv(dev);
int rc;
- spin_lock_irq(&np->lock);
- rc = mii_ethtool_sset(&np->mii, cmd);
- spin_unlock_irq(&np->lock);
+ spin_lock_irq(&tp->lock);
+ rc = mii_ethtool_sset(&tp->mii, cmd);
+ spin_unlock_irq(&tp->lock);
return rc;
}
static int rtl8139_nway_reset(struct net_device *dev)
{
- struct rtl8139_private *np = netdev_priv(dev);
- return mii_nway_restart(&np->mii);
+ struct rtl8139_private *tp = netdev_priv(dev);
+ return mii_nway_restart(&tp->mii);
}
static u32 rtl8139_get_link(struct net_device *dev)
{
- struct rtl8139_private *np = netdev_priv(dev);
- return mii_link_ok(&np->mii);
+ struct rtl8139_private *tp = netdev_priv(dev);
+ return mii_link_ok(&tp->mii);
}
static u32 rtl8139_get_msglevel(struct net_device *dev)
{
- struct rtl8139_private *np = netdev_priv(dev);
- return np->msg_enable;
+ struct rtl8139_private *tp = netdev_priv(dev);
+ return tp->msg_enable;
}
static void rtl8139_set_msglevel(struct net_device *dev, u32 datum)
{
- struct rtl8139_private *np = netdev_priv(dev);
- np->msg_enable = datum;
+ struct rtl8139_private *tp = netdev_priv(dev);
+ tp->msg_enable = datum;
}
static int rtl8139_get_regs_len(struct net_device *dev)
{
- struct rtl8139_private *np;
+ struct rtl8139_private *tp;
/* TODO: we are too slack to do reg dumping for pio, for now */
if (use_io)
return 0;
- np = netdev_priv(dev);
- return np->regs_len;
+ tp = netdev_priv(dev);
+ return tp->regs_len;
}
static void rtl8139_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf)
{
- struct rtl8139_private *np;
+ struct rtl8139_private *tp;
/* TODO: we are too slack to do reg dumping for pio, for now */
if (use_io)
return;
- np = netdev_priv(dev);
+ tp = netdev_priv(dev);
regs->version = RTL_REGS_VER;
- spin_lock_irq(&np->lock);
- memcpy_fromio(regbuf, np->mmio_addr, regs->len);
- spin_unlock_irq(&np->lock);
+ spin_lock_irq(&tp->lock);
+ memcpy_fromio(regbuf, tp->mmio_addr, regs->len);
+ spin_unlock_irq(&tp->lock);
}
static int rtl8139_get_sset_count(struct net_device *dev, int sset)
@@ -2454,12 +2438,12 @@ static int rtl8139_get_sset_count(struct net_device *dev, int sset)
static void rtl8139_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data)
{
- struct rtl8139_private *np = netdev_priv(dev);
+ struct rtl8139_private *tp = netdev_priv(dev);
- data[0] = np->xstats.early_rx;
- data[1] = np->xstats.tx_buf_mapped;
- data[2] = np->xstats.tx_timeouts;
- data[3] = np->xstats.rx_lost_in_ring;
+ data[0] = tp->xstats.early_rx;
+ data[1] = tp->xstats.tx_buf_mapped;
+ data[2] = tp->xstats.tx_timeouts;
+ data[3] = tp->xstats.rx_lost_in_ring;
}
static void rtl8139_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -2486,15 +2470,15 @@ static const struct ethtool_ops rtl8139_ethtool_ops = {
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct rtl8139_private *np = netdev_priv(dev);
+ struct rtl8139_private *tp = netdev_priv(dev);
int rc;
if (!netif_running(dev))
return -EINVAL;
- spin_lock_irq(&np->lock);
- rc = generic_mii_ioctl(&np->mii, if_mii(rq), cmd, NULL);
- spin_unlock_irq(&np->lock);
+ spin_lock_irq(&tp->lock);
+ rc = generic_mii_ioctl(&tp->mii, if_mii(rq), cmd, NULL);
+ spin_unlock_irq(&tp->lock);
return rc;
}
@@ -2527,7 +2511,7 @@ static void __set_rx_mode (struct net_device *dev)
int i, rx_mode;
u32 tmp;
- DPRINTK ("%s: rtl8139_set_rx_mode(%4.4x) done -- Rx config %8.8lx.\n",
+ pr_debug("%s: rtl8139_set_rx_mode(%4.4x) done -- Rx config %8.8lx.\n",
dev->name, dev->flags, RTL_R32 (RxConfig));
/* Note: do not reorder, GCC is clever about common statements. */
@@ -2643,7 +2627,7 @@ static int __init rtl8139_init_module (void)
* even if no 8139 board is found.
*/
#ifdef MODULE
- printk (KERN_INFO RTL8139_DRIVER_NAME "\n");
+ pr_info(RTL8139_DRIVER_NAME "\n");
#endif
return pci_register_driver(&rtl8139_pci_driver);
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index cca94b9c08a..77547545509 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -122,13 +122,13 @@ static char version[] __initdata =
#define ISCP_BUSY 0x00010000
#define MACH_IS_APRICOT 0
#else
-#define WSWAPrfd(x) ((struct i596_rfd *)(x))
-#define WSWAPrbd(x) ((struct i596_rbd *)(x))
-#define WSWAPiscp(x) ((struct i596_iscp *)(x))
-#define WSWAPscb(x) ((struct i596_scb *)(x))
-#define WSWAPcmd(x) ((struct i596_cmd *)(x))
-#define WSWAPtbd(x) ((struct i596_tbd *)(x))
-#define WSWAPchar(x) ((char *)(x))
+#define WSWAPrfd(x) ((struct i596_rfd *)((long)x))
+#define WSWAPrbd(x) ((struct i596_rbd *)((long)x))
+#define WSWAPiscp(x) ((struct i596_iscp *)((long)x))
+#define WSWAPscb(x) ((struct i596_scb *)((long)x))
+#define WSWAPcmd(x) ((struct i596_cmd *)((long)x))
+#define WSWAPtbd(x) ((struct i596_tbd *)((long)x))
+#define WSWAPchar(x) ((char *)((long)x))
#define ISCP_BUSY 0x0001
#define MACH_IS_APRICOT 1
#endif
diff --git a/drivers/net/8390.c b/drivers/net/8390.c
index ec3e22e6306..21153dea8eb 100644
--- a/drivers/net/8390.c
+++ b/drivers/net/8390.c
@@ -74,14 +74,8 @@ EXPORT_SYMBOL(ei_netdev_ops);
struct net_device *__alloc_ei_netdev(int size)
{
struct net_device *dev = ____alloc_ei_netdev(size);
-#ifdef CONFIG_COMPAT_NET_DEV_OPS
- if (dev) {
- dev->hard_start_xmit = ei_start_xmit;
- dev->get_stats = ei_get_stats;
- dev->set_multicast_list = ei_set_multicast_list;
- dev->tx_timeout = ei_tx_timeout;
- }
-#endif
+ if (dev)
+ dev->netdev_ops = &ei_netdev_ops;
return dev;
}
EXPORT_SYMBOL(__alloc_ei_netdev);
diff --git a/drivers/net/8390p.c b/drivers/net/8390p.c
index da863c91d1d..d225c291fd9 100644
--- a/drivers/net/8390p.c
+++ b/drivers/net/8390p.c
@@ -79,14 +79,8 @@ EXPORT_SYMBOL(eip_netdev_ops);
struct net_device *__alloc_eip_netdev(int size)
{
struct net_device *dev = ____alloc_ei_netdev(size);
-#ifdef CONFIG_COMPAT_NET_DEV_OPS
- if (dev) {
- dev->hard_start_xmit = eip_start_xmit;
- dev->get_stats = eip_get_stats;
- dev->set_multicast_list = eip_set_multicast_list;
- dev->tx_timeout = eip_tx_timeout;
- }
-#endif
+ if (dev)
+ dev->netdev_ops = &eip_netdev_ops;
return dev;
}
EXPORT_SYMBOL(__alloc_eip_netdev);
@@ -97,16 +91,15 @@ void NS8390p_init(struct net_device *dev, int startp)
}
EXPORT_SYMBOL(NS8390p_init);
-#if defined(MODULE)
-
-int init_module(void)
+static int __init NS8390p_init_module(void)
{
return 0;
}
-void cleanup_module(void)
+static void __exit NS8390p_cleanup_module(void)
{
}
-#endif /* MODULE */
+module_init(NS8390p_init_module);
+module_exit(NS8390p_cleanup_module);
MODULE_LICENSE("GPL");
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 214a92d1ef7..3b6383168c6 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1,4 +1,3 @@
-
#
# Network device configuration
#
@@ -26,15 +25,6 @@ menuconfig NETDEVICES
# that for each of the symbols.
if NETDEVICES
-config COMPAT_NET_DEV_OPS
- default y
- bool "Enable older network device API compatibility"
- ---help---
- This option enables kernel compatibility with older network devices
- that do not use net_device_ops interface.
-
- If unsure, say Y.
-
config IFB
tristate "Intermediate Functional Block support"
depends on NET_CLS_ACT
@@ -526,15 +516,16 @@ config STNIC
config SH_ETH
tristate "Renesas SuperH Ethernet support"
depends on SUPERH && \
- (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || CPU_SUBTYPE_SH7763 || \
- CPU_SUBTYPE_SH7619)
+ (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \
+ CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \
+ CPU_SUBTYPE_SH7724)
select CRC32
select MII
select MDIO_BITBANG
select PHYLIB
help
Renesas SuperH Ethernet device driver.
- This driver support SH7710, SH7712, SH7763 and SH7619.
+ This driver support SH7710, SH7712, SH7763, SH7619, and SH7724.
config SUNLANCE
tristate "Sun LANCE support"
@@ -927,6 +918,16 @@ config NET_NETX
To compile this driver as a module, choose M here. The module
will be called netx-eth.
+config TI_DAVINCI_EMAC
+ tristate "TI DaVinci EMAC Support"
+ depends on ARM && ARCH_DAVINCI
+ select PHYLIB
+ help
+ This driver supports TI's DaVinci Ethernet .
+
+ To compile this driver as a module, choose M here: the module
+ will be called davinci_emac_driver. This is recommended.
+
config DM9000
tristate "DM9000 support"
depends on ARM || BLACKFIN || MIPS
@@ -1000,7 +1001,7 @@ config SMC911X
config SMSC911X
tristate "SMSC LAN911x/LAN921x families embedded ethernet support"
- depends on ARM || SUPERH
+ depends on ARM || SUPERH || BLACKFIN
select CRC32
select MII
select PHYLIB
@@ -1722,6 +1723,11 @@ config TLAN
Please email feedback to <torben.mathiasen@compaq.com>.
+config KS8842
+ tristate "Micrel KSZ8842"
+ help
+ This platform driver is for Micrel KSZ8842 chip.
+
config VIA_RHINE
tristate "VIA Rhine support"
depends on NET_PCI && PCI
@@ -1858,8 +1864,8 @@ config 68360_ENET
the Motorola 68360 processor.
config FEC
- bool "FEC ethernet controller (of ColdFire CPUs)"
- depends on M523x || M527x || M5272 || M528x || M520x || M532x || MACH_MX27
+ bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
+ depends on M523x || M527x || M5272 || M528x || M520x || M532x || MACH_MX27 || ARCH_MX35
help
Say Y here if you want to use the built-in 10/100 Fast ethernet
controller on some Motorola ColdFire and Freescale i.MX processors.
@@ -1880,7 +1886,7 @@ config FEC_MPC52xx
---help---
This option enables support for the MPC5200's on-chip
Fast Ethernet Controller
- If compiled as module, it will be called 'fec_mpc52xx.ko'.
+ If compiled as module, it will be called fec_mpc52xx.
config FEC_MPC52xx_MDIO
bool "MPC52xx FEC MDIO bus driver"
@@ -1892,7 +1898,7 @@ config FEC_MPC52xx_MDIO
(Motorola? industry standard).
If your board uses an external PHY connected to FEC, enable this.
If not sure, enable.
- If compiled as module, it will be called 'fec_mpc52xx_phy.ko'.
+ If compiled as module, it will be called fec_mpc52xx_phy.
config NE_H8300
tristate "NE2000 compatible support for H8/300"
@@ -2200,7 +2206,7 @@ config SKGE_DEBUG
depends on SKGE && DEBUG_FS
help
This option adds the ability to dump driver state for debugging.
- The file debugfs/skge/ethX displays the state of the internal
+ The file /sys/kernel/debug/skge/ethX displays the state of the internal
transmit and receive rings.
If unsure, say N.
@@ -2226,7 +2232,7 @@ config SKY2_DEBUG
depends on SKY2 && DEBUG_FS
help
This option adds the ability to dump driver state for debugging.
- The file debugfs/sky2/ethX displays the state of the internal
+ The file /sys/kernel/debug/sky2/ethX displays the state of the internal
transmit and receive rings.
If unsure, say N.
@@ -2264,6 +2270,17 @@ config BNX2
To compile this driver as a module, choose M here: the module
will be called bnx2. This is recommended.
+config CNIC
+ tristate "Broadcom CNIC support"
+ depends on BNX2
+ depends on UIO
+ help
+ This driver supports offload features of Broadcom NetXtremeII
+ gigabit Ethernet cards.
+
+ To compile this driver as a module, choose M here: the module
+ will be called cnic. This is recommended.
+
config SPIDER_NET
tristate "Spider Gigabit Ethernet driver"
depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB)
@@ -2351,7 +2368,7 @@ config UGETH_TX_ON_DEMAND
config MV643XX_ETH
tristate "Marvell Discovery (643XX) and Orion ethernet support"
- depends on MV64360 || MV64X60 || (PPC_MULTIPLATFORM && PPC32) || PLAT_ORION
+ depends on MV64X60 || PPC32 || PLAT_ORION
select INET_LRO
select PHYLIB
help
@@ -2362,6 +2379,14 @@ config MV643XX_ETH
Some boards that use the Discovery chipset are the Momenco
Ocelot C and Jaguar ATX and Pegasos II.
+config XILINX_LL_TEMAC
+ tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
+ select PHYLIB
+ depends on PPC_DCR_NATIVE
+ help
+ This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
+ core used in Xilinx Spartan and Virtex FPGAs
+
config QLA3XXX
tristate "QLogic QLA3XXX Network Driver Support"
depends on PCI
@@ -2435,10 +2460,14 @@ menuconfig NETDEV_10000
if NETDEV_10000
+config MDIO
+ tristate
+
config CHELSIO_T1
tristate "Chelsio 10Gb Ethernet support"
depends on PCI
select CRC32
+ select MDIO
help
This driver supports Chelsio gigabit and 10-gigabit
Ethernet cards. More information about adapter features and
@@ -2471,6 +2500,7 @@ config CHELSIO_T3
tristate "Chelsio Communications T3 10Gb Ethernet support"
depends on CHELSIO_T3_DEPENDS
select FW_LOADER
+ select MDIO
help
This driver supports Chelsio T3-based gigabit and 10Gb Ethernet
adapters.
@@ -2506,6 +2536,7 @@ config ENIC
config IXGBE
tristate "Intel(R) 10GbE PCI Express adapters support"
depends on PCI && INET
+ select MDIO
---help---
This driver supports Intel(R) 10GbE PCI Express family of
adapters. For more information on how to identify your adapter, go
@@ -2668,6 +2699,7 @@ config TEHUTI
config BNX2X
tristate "Broadcom NetXtremeII 10Gb support"
depends on PCI
+ select FW_LOADER
select ZLIB_INFLATE
select LIBCRC32C
help
@@ -2704,6 +2736,8 @@ source "drivers/net/wan/Kconfig"
source "drivers/atm/Kconfig"
+source "drivers/ieee802154/Kconfig"
+
source "drivers/s390/net/Kconfig"
config XEN_NETDEV_FRONTEND
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 1fc4602a6ff..d366fb2b40e 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -2,6 +2,8 @@
# Makefile for the Linux network (ethercard) device drivers.
#
+obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
+
obj-$(CONFIG_E1000) += e1000/
obj-$(CONFIG_E1000E) += e1000e/
obj-$(CONFIG_IBM_NEW_EMAC) += ibm_newemac/
@@ -73,6 +75,7 @@ obj-$(CONFIG_STNIC) += stnic.o 8390.o
obj-$(CONFIG_FEALNX) += fealnx.o
obj-$(CONFIG_TIGON3) += tg3.o
obj-$(CONFIG_BNX2) += bnx2.o
+obj-$(CONFIG_CNIC) += cnic.o
obj-$(CONFIG_BNX2X) += bnx2x.o
bnx2x-objs := bnx2x_main.o bnx2x_link.o
spidernet-y += spider_net.o spider_net_ethtool.o
@@ -84,6 +87,7 @@ obj-$(CONFIG_TC35815) += tc35815.o
obj-$(CONFIG_SKGE) += skge.o
obj-$(CONFIG_SKY2) += sky2.o
obj-$(CONFIG_SKFP) += skfp/
+obj-$(CONFIG_KS8842) += ks8842.o
obj-$(CONFIG_VIA_RHINE) += via-rhine.o
obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
@@ -95,6 +99,7 @@ obj-$(CONFIG_SH_ETH) += sh_eth.o
#
obj-$(CONFIG_MII) += mii.o
+obj-$(CONFIG_MDIO) += mdio.o
obj-$(CONFIG_PHYLIB) += phy/
obj-$(CONFIG_SUNDANCE) += sundance.o
@@ -102,7 +107,7 @@ obj-$(CONFIG_HAMACHI) += hamachi.o
obj-$(CONFIG_NET) += Space.o loopback.o
obj-$(CONFIG_SEEQ8005) += seeq8005.o
obj-$(CONFIG_NET_SB1000) += sb1000.o
-obj-$(CONFIG_MAC8390) += mac8390.o 8390.o
+obj-$(CONFIG_MAC8390) += mac8390.o
obj-$(CONFIG_APNE) += apne.o 8390.o
obj-$(CONFIG_PCMCIA_PCNET) += 8390.o
obj-$(CONFIG_HP100) += hp100.o
@@ -134,6 +139,8 @@ obj-$(CONFIG_AX88796) += ax88796.o
obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o
obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
+ll_temac-objs := ll_temac_main.o ll_temac_mdio.o
+obj-$(CONFIG_XILINX_LL_TEMAC) += ll_temac.o
obj-$(CONFIG_QLA3XXX) += qla3xxx.o
obj-$(CONFIG_QLGE) += qlge/
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c
index 02f64d57864..85a18175730 100644
--- a/drivers/net/a2065.c
+++ b/drivers/net/a2065.c
@@ -564,7 +564,7 @@ static int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
if (!TX_BUFFS_AVAIL){
local_irq_restore(flags);
- return -1;
+ return NETDEV_TX_LOCKED;
}
#ifdef DEBUG_DRIVER
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index 57bc7152785..08419ee1029 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -2573,7 +2573,6 @@ restart:
netif_wake_queue(dev);
}
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
overflow:
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index da64ba88d7f..78cea5e80b1 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -39,6 +39,7 @@
static const char version[] = KERN_INFO "ipddp.c:v0.01 8/28/97 Bradford W. Johnson <johns393@maroon.tc.umn.edu>\n";
static struct ipddp_route *ipddp_route_list;
+static DEFINE_SPINLOCK(ipddp_route_lock);
#ifdef CONFIG_IPDDP_ENCAP
static int ipddp_mode = IPDDP_ENCAP;
@@ -50,7 +51,7 @@ static int ipddp_mode = IPDDP_DECAP;
static int ipddp_xmit(struct sk_buff *skb, struct net_device *dev);
static int ipddp_create(struct ipddp_route *new_rt);
static int ipddp_delete(struct ipddp_route *rt);
-static struct ipddp_route* ipddp_find_route(struct ipddp_route *rt);
+static struct ipddp_route* __ipddp_find_route(struct ipddp_route *rt);
static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
static const struct net_device_ops ipddp_netdev_ops = {
@@ -71,6 +72,7 @@ static struct net_device * __init ipddp_init(void)
if (!dev)
return ERR_PTR(-ENOMEM);
+ dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
strcpy(dev->name, "ipddp%d");
if (version_printed++ == 0)
@@ -113,11 +115,13 @@ static struct net_device * __init ipddp_init(void)
*/
static int ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
{
- __be32 paddr = ((struct rtable*)skb->dst)->rt_gateway;
+ __be32 paddr = skb_rtable(skb)->rt_gateway;
struct ddpehdr *ddp;
struct ipddp_route *rt;
struct atalk_addr *our_addr;
+ spin_lock(&ipddp_route_lock);
+
/*
* Find appropriate route to use, based only on IP number.
*/
@@ -126,8 +130,10 @@ static int ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
if(rt->ip == paddr)
break;
}
- if(rt == NULL)
+ if(rt == NULL) {
+ spin_unlock(&ipddp_route_lock);
return 0;
+ }
our_addr = atalk_find_dev_addr(rt->dev);
@@ -173,6 +179,8 @@ static int ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
if(aarp_send_ddp(rt->dev, skb, &rt->at, NULL) < 0)
dev_kfree_skb(skb);
+ spin_unlock(&ipddp_route_lock);
+
return 0;
}
@@ -195,7 +203,9 @@ static int ipddp_create(struct ipddp_route *new_rt)
return -ENETUNREACH;
}
- if (ipddp_find_route(rt)) {
+ spin_lock_bh(&ipddp_route_lock);
+ if (__ipddp_find_route(rt)) {
+ spin_unlock_bh(&ipddp_route_lock);
kfree(rt);
return -EEXIST;
}
@@ -203,6 +213,8 @@ static int ipddp_create(struct ipddp_route *new_rt)
rt->next = ipddp_route_list;
ipddp_route_list = rt;
+ spin_unlock_bh(&ipddp_route_lock);
+
return 0;
}
@@ -215,6 +227,7 @@ static int ipddp_delete(struct ipddp_route *rt)
struct ipddp_route **r = &ipddp_route_list;
struct ipddp_route *tmp;
+ spin_lock_bh(&ipddp_route_lock);
while((tmp = *r) != NULL)
{
if(tmp->ip == rt->ip
@@ -222,19 +235,21 @@ static int ipddp_delete(struct ipddp_route *rt)
&& tmp->at.s_node == rt->at.s_node)
{
*r = tmp->next;
+ spin_unlock_bh(&ipddp_route_lock);
kfree(tmp);
return 0;
}
r = &tmp->next;
}
+ spin_unlock_bh(&ipddp_route_lock);
return (-ENOENT);
}
/*
* Find a routing entry, we only return a FULL match
*/
-static struct ipddp_route* ipddp_find_route(struct ipddp_route *rt)
+static struct ipddp_route* __ipddp_find_route(struct ipddp_route *rt)
{
struct ipddp_route *f;
@@ -252,7 +267,7 @@ static struct ipddp_route* ipddp_find_route(struct ipddp_route *rt)
static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct ipddp_route __user *rt = ifr->ifr_data;
- struct ipddp_route rcp;
+ struct ipddp_route rcp, rcp2, *rp;
if(!capable(CAP_NET_ADMIN))
return -EPERM;
@@ -266,9 +281,19 @@ static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return (ipddp_create(&rcp));
case SIOCFINDIPDDPRT:
- if(copy_to_user(rt, ipddp_find_route(&rcp), sizeof(struct ipddp_route)))
- return -EFAULT;
- return 0;
+ spin_lock_bh(&ipddp_route_lock);
+ rp = __ipddp_find_route(&rcp);
+ if (rp)
+ memcpy(&rcp2, rp, sizeof(rcp2));
+ spin_unlock_bh(&ipddp_route_lock);
+
+ if (rp) {
+ if (copy_to_user(rt, &rcp2,
+ sizeof(struct ipddp_route)))
+ return -EFAULT;
+ return 0;
+ } else
+ return -ENOENT;
case SIOCDELIPDDPRT:
return (ipddp_delete(&rcp));
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index 78cc7146913..b642647170b 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -1220,7 +1220,7 @@ static int __init ltpc_setup(char *str)
if (ints[0] > 2) {
dma = ints[3];
}
- /* ignore any other paramters */
+ /* ignore any other parameters */
}
return 1;
}
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
index 7f4bc8ae546..2e7419a6119 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/arm/at91_ether.c
@@ -829,7 +829,7 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->trans_start = jiffies;
} else {
printk(KERN_ERR "at91_ether.c: at91ether_start_xmit() called, but device is busy!\n");
- return 1; /* if we return anything but zero, dev.c:1055 calls kfree_skb(skb)
+ return NETDEV_TX_BUSY; /* if we return anything but zero, dev.c:1055 calls kfree_skb(skb)
on this skb, he also reports -ENETDOWN and printk's, so either
we free and return(0) or don't free and return 1 */
}
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index b72b3d639f6..fbf4645417d 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -253,7 +253,7 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
skb = dev_alloc_skb(length + 2);
if (likely(skb != NULL)) {
skb_reserve(skb, 2);
- dma_sync_single(NULL, ep->descs->rdesc[entry].buf_addr,
+ dma_sync_single_for_cpu(NULL, ep->descs->rdesc[entry].buf_addr,
length, DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, ep->rx_buf[entry], length);
skb_put(skb, length);
@@ -331,7 +331,7 @@ static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
ep->descs->tdesc[entry].tdesc1 =
TDESC1_EOF | (entry << 16) | (skb->len & 0xfff);
skb_copy_and_csum_dev(skb, ep->tx_buf[entry]);
- dma_sync_single(NULL, ep->descs->tdesc[entry].buf_addr,
+ dma_sync_single_for_cpu(NULL, ep->descs->tdesc[entry].buf_addr,
skb->len, DMA_TO_DEVICE);
dev_kfree_skb(skb);
diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c
index ec8a1ae1e88..455037134aa 100644
--- a/drivers/net/arm/ether3.c
+++ b/drivers/net/arm/ether3.c
@@ -526,7 +526,7 @@ ether3_sendpacket(struct sk_buff *skb, struct net_device *dev)
if (priv(dev)->tx_tail == next_ptr) {
local_irq_restore(flags);
- return 1; /* unable to queue */
+ return NETDEV_TX_BUSY; /* unable to queue */
}
dev->trans_start = jiffies;
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index a740053d3af..6f42ad72891 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -456,7 +456,8 @@ static inline void queue_put_desc(unsigned int queue, u32 phys,
debug_desc(phys, desc);
BUG_ON(phys & 0x1F);
qmgr_put_entry(queue, phys);
- BUG_ON(qmgr_stat_overflow(queue));
+ /* Don't check for queue overflow here, we've allocated sufficient
+ length and queues >= 32 don't support this check anyway. */
}
@@ -512,8 +513,8 @@ static int eth_poll(struct napi_struct *napi, int budget)
#endif
napi_complete(napi);
qmgr_enable_irq(rxq);
- if (!qmgr_stat_empty(rxq) &&
- napi_reschedule(napi)) {
+ if (!qmgr_stat_below_low_watermark(rxq) &&
+ napi_reschedule(napi)) { /* not empty again */
#if DEBUG_RX
printk(KERN_DEBUG "%s: eth_poll"
" napi_reschedule successed\n",
@@ -561,8 +562,8 @@ static int eth_poll(struct napi_struct *napi, int budget)
dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN,
RX_BUFF_SIZE, DMA_FROM_DEVICE);
#else
- dma_sync_single(&dev->dev, desc->data - NET_IP_ALIGN,
- RX_BUFF_SIZE, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(&dev->dev, desc->data - NET_IP_ALIGN,
+ RX_BUFF_SIZE, DMA_FROM_DEVICE);
memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4);
#endif
@@ -630,9 +631,9 @@ static void eth_txdone_irq(void *unused)
port->tx_buff_tab[n_desc] = NULL;
}
- start = qmgr_stat_empty(port->plat->txreadyq);
+ start = qmgr_stat_below_low_watermark(port->plat->txreadyq);
queue_put_desc(port->plat->txreadyq, phys, desc);
- if (start) {
+ if (start) { /* TX-ready queue was empty */
#if DEBUG_TX
printk(KERN_DEBUG "%s: eth_txdone_irq xmit ready\n",
port->netdev->name);
@@ -708,13 +709,14 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc);
dev->trans_start = jiffies;
- if (qmgr_stat_empty(txreadyq)) {
+ if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
#if DEBUG_TX
printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name);
#endif
netif_stop_queue(dev);
/* we could miss TX ready interrupt */
- if (!qmgr_stat_empty(txreadyq)) {
+ /* really empty in fact */
+ if (!qmgr_stat_below_low_watermark(txreadyq)) {
#if DEBUG_TX
printk(KERN_DEBUG "%s: eth_xmit ready again\n",
dev->name);
@@ -814,29 +816,29 @@ static int request_queues(struct port *port)
int err;
err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0,
- "%s:RX-free", port->netdev->name);
+ "%s:RX-free", port->netdev->name);
if (err)
return err;
err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0,
- "%s:RX", port->netdev->name);
+ "%s:RX", port->netdev->name);
if (err)
goto rel_rxfree;
err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0,
- "%s:TX", port->netdev->name);
+ "%s:TX", port->netdev->name);
if (err)
goto rel_rx;
err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0,
- "%s:TX-ready", port->netdev->name);
+ "%s:TX-ready", port->netdev->name);
if (err)
goto rel_tx;
/* TX-done queue handles skbs sent out by the NPEs */
if (!ports_open) {
err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0,
- "%s:TX-done", DRV_NAME);
+ "%s:TX-done", DRV_NAME);
if (err)
goto rel_txready;
}
@@ -1149,7 +1151,7 @@ static int __devinit eth_init_one(struct platform_device *pdev)
struct net_device *dev;
struct eth_plat_info *plat = pdev->dev.platform_data;
u32 regs_phys;
- char phy_id[BUS_ID_SIZE];
+ char phy_id[MII_BUS_ID_SIZE + 3];
int err;
if (!(dev = alloc_etherdev(sizeof(struct port))))
@@ -1207,7 +1209,7 @@ static int __devinit eth_init_one(struct platform_device *pdev)
__raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control);
udelay(50);
- snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, "0", plat->phy);
+ snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, "0", plat->phy);
port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, 0,
PHY_INTERFACE_MODE_MII);
if ((err = IS_ERR(port->phydev)))
diff --git a/drivers/net/atl1c/atl1c_ethtool.c b/drivers/net/atl1c/atl1c_ethtool.c
index 45c5b7332cd..e4afbd628c2 100644
--- a/drivers/net/atl1c/atl1c_ethtool.c
+++ b/drivers/net/atl1c/atl1c_ethtool.c
@@ -271,7 +271,7 @@ static int atl1c_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
struct atl1c_adapter *adapter = netdev_priv(netdev);
if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE |
- WAKE_MCAST | WAKE_BCAST | WAKE_MCAST))
+ WAKE_UCAST | WAKE_BCAST | WAKE_MCAST))
return -EOPNOTSUPP;
/* these settings will always override what we currently have */
adapter->wol = 0;
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 83a12125b94..cd547a205fb 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -164,6 +164,24 @@ static inline void atl1c_irq_reset(struct atl1c_adapter *adapter)
}
/*
+ * atl1c_wait_until_idle - wait up to AT_HW_MAX_IDLE_DELAY reads
+ * of the idle status register until the device is actually idle
+ */
+static u32 atl1c_wait_until_idle(struct atl1c_hw *hw)
+{
+ int timeout;
+ u32 data;
+
+ for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) {
+ AT_READ_REG(hw, REG_IDLE_STATUS, &data);
+ if ((data & IDLE_STATUS_MASK) == 0)
+ return 0;
+ msleep(1);
+ }
+ return data;
+}
+
+/*
* atl1c_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
@@ -220,11 +238,11 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter)
/* link down */
if (netif_carrier_ok(netdev)) {
hw->hibernate = true;
- atl1c_set_aspm(hw, false);
if (atl1c_stop_mac(hw) != 0)
if (netif_msg_hw(adapter))
dev_warn(&pdev->dev,
"stop mac failed\n");
+ atl1c_set_aspm(hw, false);
}
netif_carrier_off(netdev);
} else {
@@ -240,10 +258,10 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter)
adapter->link_duplex != duplex) {
adapter->link_speed = speed;
adapter->link_duplex = duplex;
+ atl1c_set_aspm(hw, true);
atl1c_enable_tx_ctrl(hw);
atl1c_enable_rx_ctrl(hw);
atl1c_setup_mac_ctrl(adapter);
- atl1c_set_aspm(hw, true);
if (netif_msg_link(adapter))
dev_info(&pdev->dev,
"%s: %s NIC Link is Up<%d Mbps %s>\n",
@@ -1106,7 +1124,6 @@ static void atl1c_configure_dma(struct atl1c_adapter *adapter)
static int atl1c_stop_mac(struct atl1c_hw *hw)
{
u32 data;
- int timeout;
AT_READ_REG(hw, REG_RXQ_CTRL, &data);
data &= ~(RXQ1_CTRL_EN | RXQ2_CTRL_EN |
@@ -1117,25 +1134,13 @@ static int atl1c_stop_mac(struct atl1c_hw *hw)
data &= ~TXQ_CTRL_EN;
AT_WRITE_REG(hw, REG_TWSI_CTRL, data);
- for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) {
- AT_READ_REG(hw, REG_IDLE_STATUS, &data);
- if ((data & (IDLE_STATUS_RXQ_NO_IDLE |
- IDLE_STATUS_TXQ_NO_IDLE)) == 0)
- break;
- msleep(1);
- }
+ atl1c_wait_until_idle(hw);
AT_READ_REG(hw, REG_MAC_CTRL, &data);
data &= ~(MAC_CTRL_TX_EN | MAC_CTRL_RX_EN);
AT_WRITE_REG(hw, REG_MAC_CTRL, data);
- for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) {
- AT_READ_REG(hw, REG_IDLE_STATUS, &data);
- if ((data & IDLE_STATUS_MASK) == 0)
- return 0;
- msleep(1);
- }
- return data;
+ return (int)atl1c_wait_until_idle(hw);
}
static void atl1c_enable_rx_ctrl(struct atl1c_hw *hw)
@@ -1178,8 +1183,6 @@ static int atl1c_reset_mac(struct atl1c_hw *hw)
{
struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
struct pci_dev *pdev = adapter->pdev;
- u32 idle_status_data = 0;
- int timeout = 0;
int ret;
AT_WRITE_REG(hw, REG_IMR, 0);
@@ -1198,15 +1201,10 @@ static int atl1c_reset_mac(struct atl1c_hw *hw)
AT_WRITE_FLUSH(hw);
msleep(10);
/* Wait at least 10ms for All module to be Idle */
- for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) {
- AT_READ_REG(hw, REG_IDLE_STATUS, &idle_status_data);
- if ((idle_status_data & IDLE_STATUS_MASK) == 0)
- break;
- msleep(1);
- }
- if (timeout >= AT_HW_MAX_IDLE_DELAY) {
+
+ if (atl1c_wait_until_idle(hw)) {
dev_err(&pdev->dev,
- "MAC state machine cann't be idle since"
+ "MAC state machine can't be idle since"
" disabled for 10ms second\n");
return -1;
}
@@ -1242,9 +1240,7 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup)
AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data);
- pm_ctrl_data &= PM_CTRL_SERDES_PD_EX_L1;
- pm_ctrl_data |= ~PM_CTRL_SERDES_BUDS_RX_L1_EN;
- pm_ctrl_data |= ~PM_CTRL_SERDES_L1_EN;
+ pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1;
pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
PM_CTRL_L1_ENTRY_TIMER_SHIFT);
@@ -1254,19 +1250,11 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup)
pm_ctrl_data |= PM_CTRL_SERDES_PLL_L1_EN;
pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1;
- if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT) {
- pm_ctrl_data |= AT_ASPM_L1_TIMER <<
- PM_CTRL_L1_ENTRY_TIMER_SHIFT;
- pm_ctrl_data |= PM_CTRL_ASPM_L1_EN;
- } else
- pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
-
- if (hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT)
- pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN;
- else
- pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
-
+ pm_ctrl_data |= PM_CTRL_SERDES_BUDS_RX_L1_EN;
+ pm_ctrl_data |= PM_CTRL_SERDES_L1_EN;
} else {
+ pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN;
+ pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN;
@@ -2123,7 +2111,6 @@ static int atl1c_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
atl1c_tx_map(adapter, skb, tpd, type);
atl1c_tx_queue(adapter, skb, tpd, type);
- netdev->trans_start = jiffies;
spin_unlock_irqrestore(&adapter->tx_lock, flags);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/atl1e/atl1e.h b/drivers/net/atl1e/atl1e.h
index 2bf63b4368e..ba48220df16 100644
--- a/drivers/net/atl1e/atl1e.h
+++ b/drivers/net/atl1e/atl1e.h
@@ -429,7 +429,6 @@ struct atl1e_adapter {
struct mii_if_info mii; /* MII interface info */
struct atl1e_hw hw;
struct atl1e_hw_stats hw_stats;
- struct net_device_stats net_stats;
bool have_msi;
u32 wol;
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index fb57b750866..9fc6d6d9060 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -37,6 +37,7 @@ char atl1e_driver_version[] = DRV_VERSION;
*/
static struct pci_device_id atl1e_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1E)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, 0x1066)},
/* required last entry */
{ 0 }
};
@@ -1153,7 +1154,7 @@ static struct net_device_stats *atl1e_get_stats(struct net_device *netdev)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
struct atl1e_hw_stats *hw_stats = &adapter->hw_stats;
- struct net_device_stats *net_stats = &adapter->net_stats;
+ struct net_device_stats *net_stats = &netdev->stats;
net_stats->rx_packets = hw_stats->rx_ok;
net_stats->tx_packets = hw_stats->tx_ok;
@@ -1181,7 +1182,7 @@ static struct net_device_stats *atl1e_get_stats(struct net_device *netdev)
net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
net_stats->tx_window_errors = hw_stats->tx_late_col;
- return &adapter->net_stats;
+ return net_stats;
}
static void atl1e_update_hw_stats(struct atl1e_adapter *adapter)
@@ -1309,7 +1310,7 @@ static irqreturn_t atl1e_intr(int irq, void *data)
/* link event */
if (status & (ISR_GPHY | ISR_MANUAL)) {
- adapter->net_stats.tx_carrier_errors++;
+ netdev->stats.tx_carrier_errors++;
atl1e_link_chg_event(adapter);
break;
}
@@ -1601,7 +1602,7 @@ static u16 atl1e_cal_tdp_req(const struct sk_buff *skb)
}
if (skb_is_gso(skb)) {
- if (skb->protocol == ntohs(ETH_P_IP) ||
+ if (skb->protocol == htons(ETH_P_IP) ||
(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6)) {
proto_hdr_len = skb_transport_offset(skb) +
tcp_hdrlen(skb);
@@ -1794,8 +1795,7 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
memcpy(use_tpd, tpd, sizeof(struct atl1e_tpd_desc));
tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
- if (tx_buffer->skb)
- BUG();
+ BUG_ON(tx_buffer->skb);
tx_buffer->skb = NULL;
tx_buffer->length =
@@ -1878,7 +1878,7 @@ static int atl1e_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
TPD_VLAN_SHIFT;
}
- if (skb->protocol == ntohs(ETH_P_8021Q))
+ if (skb->protocol == htons(ETH_P_8021Q))
tpd->word3 |= 1 << TPD_VL_TAGGED_SHIFT;
if (skb_network_offset(skb) != ETH_HLEN)
@@ -1894,7 +1894,7 @@ static int atl1e_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
atl1e_tx_map(adapter, skb, tpd);
atl1e_tx_queue(adapter, tpd_req, tpd);
- netdev->trans_start = jiffies;
+ netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
spin_unlock_irqrestore(&adapter->tx_lock, flags);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 0ab22540bf5..94d7325caf4 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -82,6 +82,12 @@
#include "atl1.h"
+#define ATLX_DRIVER_VERSION "2.1.3"
+MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \
+ Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(ATLX_DRIVER_VERSION);
+
/* Temporary hack for merging atl1 and atl2 */
#include "atlx.c"
@@ -2207,8 +2213,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
nr_frags = skb_shinfo(skb)->nr_frags;
next_to_use = atomic_read(&tpd_ring->next_to_use);
buffer_info = &tpd_ring->buffer_info[next_to_use];
- if (unlikely(buffer_info->skb))
- BUG();
+ BUG_ON(buffer_info->skb);
/* put skb in last TPD */
buffer_info->skb = NULL;
@@ -2274,8 +2279,8 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
ATL1_MAX_TX_BUF_LEN;
for (i = 0; i < nseg; i++) {
buffer_info = &tpd_ring->buffer_info[next_to_use];
- if (unlikely(buffer_info->skb))
- BUG();
+ BUG_ON(buffer_info->skb);
+
buffer_info->skb = NULL;
buffer_info->length = (buf_len > ATL1_MAX_TX_BUF_LEN) ?
ATL1_MAX_TX_BUF_LEN : buf_len;
@@ -2377,7 +2382,7 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
mss = skb_shinfo(skb)->gso_size;
if (mss) {
- if (skb->protocol == ntohs(ETH_P_IP)) {
+ if (skb->protocol == htons(ETH_P_IP)) {
proto_hdr_len = (skb_transport_offset(skb) +
tcp_hdrlen(skb));
if (unlikely(proto_hdr_len > len)) {
@@ -2432,7 +2437,6 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
atl1_tx_queue(adapter, count, ptpd);
atl1_update_mailbox(adapter);
mmiowb();
- netdev->trans_start = jiffies;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/atlx/atlx.h b/drivers/net/atlx/atlx.h
index 297a03da6b7..14054b75aa6 100644
--- a/drivers/net/atlx/atlx.h
+++ b/drivers/net/atlx/atlx.h
@@ -29,12 +29,6 @@
#include <linux/module.h>
#include <linux/types.h>
-#define ATLX_DRIVER_VERSION "2.1.3"
-MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \
- Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(ATLX_DRIVER_VERSION);
-
#define ATLX_ERR_PHY 2
#define ATLX_ERR_PHY_SPEED 7
#define ATLX_ERR_PHY_RES 8
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index d58c105fc77..d3c734f4d67 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -957,7 +957,7 @@ static int au1000_tx(struct sk_buff *skb, struct net_device *dev)
/* We've wrapped around and the transmitter is still busy */
netif_stop_queue(dev);
aup->tx_full = 1;
- return 1;
+ return NETDEV_TX_BUSY;
}
else if (buff_stat & TX_T_DONE) {
update_tx_stats(dev, ptxd->status);
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index b70b81ec34c..36d4d377ec2 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -782,7 +782,7 @@ static int b44_rx(struct b44 *bp, int budget)
drop_it:
b44_recycle_rx(bp, cons, bp->rx_prod);
drop_it_no_recycle:
- bp->stats.rx_dropped++;
+ bp->dev->stats.rx_dropped++;
goto next_pkt;
}
@@ -1647,7 +1647,7 @@ static int b44_close(struct net_device *dev)
static struct net_device_stats *b44_get_stats(struct net_device *dev)
{
struct b44 *bp = netdev_priv(dev);
- struct net_device_stats *nstat = &bp->stats;
+ struct net_device_stats *nstat = &dev->stats;
struct b44_hw_stats *hwstat = &bp->hw_stats;
/* Convert HW stats into netdevice stats. */
diff --git a/drivers/net/b44.h b/drivers/net/b44.h
index e678498de6d..e1905a49279 100644
--- a/drivers/net/b44.h
+++ b/drivers/net/b44.h
@@ -97,7 +97,7 @@
#define B44_DMARX_STAT 0x021CUL /* DMA RX Current Active Desc. + Status */
#define DMARX_STAT_CDMASK 0x00000fff /* Current Descriptor Mask */
#define DMARX_STAT_SMASK 0x0000f000 /* State Mask */
-#define DMARX_STAT_SDISABLED 0x00000000 /* State Disbaled */
+#define DMARX_STAT_SDISABLED 0x00000000 /* State Disabled */
#define DMARX_STAT_SACTIVE 0x00001000 /* State Active */
#define DMARX_STAT_SIDLE 0x00002000 /* State Idle Wait */
#define DMARX_STAT_SSTOPPED 0x00003000 /* State Stopped */
@@ -384,7 +384,6 @@ struct b44 {
struct timer_list timer;
- struct net_device_stats stats;
struct b44_hw_stats hw_stats;
struct ssb_device *sdev;
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index c49ddd08b2a..b4bb06fdf30 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -35,8 +35,22 @@
#define DRV_VER "2.0.348"
#define DRV_NAME "be2net"
#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
+#define OC_NAME "Emulex OneConnect 10Gbps NIC"
#define DRV_DESC BE_NAME "Driver"
+#define BE_VENDOR_ID 0x19a2
+#define BE_DEVICE_ID1 0x211
+#define OC_DEVICE_ID1 0x700
+#define OC_DEVICE_ID2 0x701
+
+static inline char *nic_name(struct pci_dev *pdev)
+{
+ if (pdev->device == OC_DEVICE_ID1 || pdev->device == OC_DEVICE_ID2)
+ return OC_NAME;
+ else
+ return BE_NAME;
+}
+
/* Number of bytes of an RX frame that are copied to skb->data */
#define BE_HDR_LEN 64
#define BE_MAX_JUMBO_FRAME_SIZE 9018
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 30d0c81c989..66bb56874d9 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -28,10 +28,10 @@ static unsigned int rx_frag_size = 2048;
module_param(rx_frag_size, uint, S_IRUGO);
MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
-#define BE_VENDOR_ID 0x19a2
-#define BE2_DEVICE_ID_1 0x0211
static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
- { PCI_DEVICE(BE_VENDOR_ID, BE2_DEVICE_ID_1) },
+ { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
+ { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
+ { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -168,6 +168,7 @@ static void netdev_stats_update(struct be_adapter *adapter)
struct be_port_rxf_stats *port_stats =
&rxf_stats->port[adapter->port_num];
struct net_device_stats *dev_stats = &adapter->stats.net_stats;
+ struct be_erx_stats *erx_stats = &hw_stats->erx;
dev_stats->rx_packets = port_stats->rx_total_frames;
dev_stats->tx_packets = port_stats->tx_unicastframes +
@@ -181,29 +182,33 @@ static void netdev_stats_update(struct be_adapter *adapter)
dev_stats->rx_errors = port_stats->rx_crc_errors +
port_stats->rx_alignment_symbol_errors +
port_stats->rx_in_range_errors +
- port_stats->rx_out_range_errors + port_stats->rx_frame_too_long;
-
- /* packet transmit problems */
- dev_stats->tx_errors = 0;
-
- /* no space in linux buffers */
- dev_stats->rx_dropped = 0;
-
- /* no space available in linux */
- dev_stats->tx_dropped = 0;
-
- dev_stats->multicast = port_stats->tx_multicastframes;
- dev_stats->collisions = 0;
+ port_stats->rx_out_range_errors +
+ port_stats->rx_frame_too_long +
+ port_stats->rx_dropped_too_small +
+ port_stats->rx_dropped_too_short +
+ port_stats->rx_dropped_header_too_small +
+ port_stats->rx_dropped_tcp_length +
+ port_stats->rx_dropped_runt +
+ port_stats->rx_tcp_checksum_errs +
+ port_stats->rx_ip_checksum_errs +
+ port_stats->rx_udp_checksum_errs;
+
+ /* no space in linux buffers: best possible approximation */
+ dev_stats->rx_dropped = erx_stats->rx_drops_no_fragments[0];
/* detailed rx errors */
dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
- port_stats->rx_out_range_errors + port_stats->rx_frame_too_long;
+ port_stats->rx_out_range_errors +
+ port_stats->rx_frame_too_long;
+
/* receive ring buffer overflow */
dev_stats->rx_over_errors = 0;
+
dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
/* frame alignment errors */
dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
+
/* receiver fifo overrun */
/* drops_no_pbuf is no per i/f, it's per BE card */
dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
@@ -211,6 +216,16 @@ static void netdev_stats_update(struct be_adapter *adapter)
rxf_stats->rx_drops_no_pbuf;
/* receiver missed packetd */
dev_stats->rx_missed_errors = 0;
+
+ /* packet transmit problems */
+ dev_stats->tx_errors = 0;
+
+ /* no space available in linux */
+ dev_stats->tx_dropped = 0;
+
+ dev_stats->multicast = port_stats->tx_multicastframes;
+ dev_stats->collisions = 0;
+
/* detailed tx_errors */
dev_stats->tx_aborted_errors = 0;
dev_stats->tx_carrier_errors = 0;
@@ -337,13 +352,10 @@ static void be_tx_stats_update(struct be_adapter *adapter,
/* Determine number of WRB entries needed to xmit data in an skb */
static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
{
- int cnt = 0;
- while (skb) {
- if (skb->len > skb->data_len)
- cnt++;
- cnt += skb_shinfo(skb)->nr_frags;
- skb = skb_shinfo(skb)->frag_list;
- }
+ int cnt = (skb->len > skb->data_len);
+
+ cnt += skb_shinfo(skb)->nr_frags;
+
/* to account for hdr wrb */
cnt++;
if (cnt & 1) {
@@ -409,31 +421,28 @@ static int make_tx_wrbs(struct be_adapter *adapter,
hdr = queue_head_node(txq);
queue_head_inc(txq);
- while (skb) {
- if (skb->len > skb->data_len) {
- int len = skb->len - skb->data_len;
- busaddr = pci_map_single(pdev, skb->data, len,
- PCI_DMA_TODEVICE);
- wrb = queue_head_node(txq);
- wrb_fill(wrb, busaddr, len);
- be_dws_cpu_to_le(wrb, sizeof(*wrb));
- queue_head_inc(txq);
- copied += len;
- }
+ if (skb->len > skb->data_len) {
+ int len = skb->len - skb->data_len;
+ busaddr = pci_map_single(pdev, skb->data, len,
+ PCI_DMA_TODEVICE);
+ wrb = queue_head_node(txq);
+ wrb_fill(wrb, busaddr, len);
+ be_dws_cpu_to_le(wrb, sizeof(*wrb));
+ queue_head_inc(txq);
+ copied += len;
+ }
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- struct skb_frag_struct *frag =
- &skb_shinfo(skb)->frags[i];
- busaddr = pci_map_page(pdev, frag->page,
- frag->page_offset,
- frag->size, PCI_DMA_TODEVICE);
- wrb = queue_head_node(txq);
- wrb_fill(wrb, busaddr, frag->size);
- be_dws_cpu_to_le(wrb, sizeof(*wrb));
- queue_head_inc(txq);
- copied += frag->size;
- }
- skb = skb_shinfo(skb)->frag_list;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ struct skb_frag_struct *frag =
+ &skb_shinfo(skb)->frags[i];
+ busaddr = pci_map_page(pdev, frag->page,
+ frag->page_offset,
+ frag->size, PCI_DMA_TODEVICE);
+ wrb = queue_head_node(txq);
+ wrb_fill(wrb, busaddr, frag->size);
+ be_dws_cpu_to_le(wrb, sizeof(*wrb));
+ queue_head_inc(txq);
+ copied += frag->size;
}
if (dummy_wrb) {
@@ -478,8 +487,6 @@ static int be_xmit(struct sk_buff *skb, struct net_device *netdev)
be_txq_notify(&adapter->ctrl, txq->id, wrb_cnt);
- netdev->trans_start = jiffies;
-
be_tx_stats_update(adapter, wrb_cnt, copied, stopped);
return NETDEV_TX_OK;
}
@@ -637,6 +644,22 @@ static void be_rx_stats_update(struct be_adapter *adapter,
stats->be_rx_bytes += pktsize;
}
+static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
+{
+ u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
+
+ l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
+ ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
+ ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
+ if (ip_version) {
+ tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
+ udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
+ }
+ ipv6_chk = (ip_version && (tcpf || udpf));
+
+ return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
+}
+
static struct be_rx_page_info *
get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
{
@@ -720,7 +743,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
if (pktsize <= rx_frag_size) {
BUG_ON(num_rcvd != 1);
- return;
+ goto done;
}
/* More frags present for this completion */
@@ -742,6 +765,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
memset(page_info, 0, sizeof(*page_info));
}
+done:
be_rx_stats_update(adapter, pktsize, num_rcvd);
return;
}
@@ -752,9 +776,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
{
struct sk_buff *skb;
u32 vtp, vid;
- int l4_cksm;
- l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
vtp = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN);
@@ -769,10 +791,10 @@ static void be_rx_compl_process(struct be_adapter *adapter,
skb_fill_rx_data(adapter, skb, rxcp);
- if (l4_cksm && adapter->rx_csum)
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
+ if (do_pkt_csum(rxcp, adapter->rx_csum))
skb->ip_summed = CHECKSUM_NONE;
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->truesize = skb->len + sizeof(struct sk_buff);
skb->protocol = eth_type_trans(skb, adapter->netdev);
@@ -854,12 +876,19 @@ static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
- rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
-
queue_tail_inc(&adapter->rx_obj.cq);
return rxcp;
}
+/* To reset the valid bit, we need to reset the whole word as
+ * when walking the queue the valid entries are little-endian
+ * and invalid entries are host endian
+ */
+static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
+{
+ rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
+}
+
static inline struct page *be_alloc_pages(u32 size)
{
gfp_t alloc_flags = GFP_ATOMIC;
@@ -991,6 +1020,7 @@ static void be_rx_q_clean(struct be_adapter *adapter)
/* First cleanup pending rx completions */
while ((rxcp = be_rx_compl_get(adapter)) != NULL) {
be_rx_compl_discard(adapter, rxcp);
+ be_rx_compl_reset(rxcp);
be_cq_notify(&adapter->ctrl, rx_cq->id, true, 1);
}
@@ -1026,8 +1056,13 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
struct be_queue_info *q;
q = &adapter->tx_obj.q;
- if (q->created)
+ if (q->created) {
be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_TXQ);
+
+ /* No more tx completions can be rcvd now; clean up if there
+ * are any pending completions or pending tx requests */
+ be_tx_q_clean(adapter);
+ }
be_queue_free(adapter, q);
q = &adapter->tx_obj.cq;
@@ -1035,10 +1070,6 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_CQ);
be_queue_free(adapter, q);
- /* No more tx completions can be rcvd now; clean up if there are
- * any pending completions or pending tx requests */
- be_tx_q_clean(adapter);
-
q = &adapter->tx_eq.q;
if (q->created)
be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_EQ);
@@ -1272,6 +1303,8 @@ int be_poll_rx(struct napi_struct *napi, int budget)
be_rx_compl_process_lro(adapter, rxcp);
else
be_rx_compl_process(adapter, rxcp);
+
+ be_rx_compl_reset(rxcp);
}
lro_flush_all(&adapter->rx_obj.lro_mgr);
@@ -1527,7 +1560,7 @@ static int be_close(struct net_device *netdev)
struct be_eq_obj *tx_eq = &adapter->tx_eq;
int vec;
- cancel_delayed_work(&adapter->work);
+ cancel_delayed_work_sync(&adapter->work);
netif_stop_queue(netdev);
netif_carrier_off(netdev);
@@ -1626,10 +1659,12 @@ static void be_netdev_init(struct net_device *netdev)
netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
+ NETIF_F_IPV6_CSUM;
netdev->flags |= IFF_MULTICAST;
+ adapter->rx_csum = true;
+
BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
@@ -1859,7 +1894,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
if (status != 0)
goto stats_clean;
- dev_info(&pdev->dev, BE_NAME " port %d\n", adapter->port_num);
+ dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
return 0;
stats_clean:
@@ -1873,7 +1908,7 @@ rel_reg:
disable_dev:
pci_disable_device(pdev);
do_none:
- dev_warn(&pdev->dev, BE_NAME " initialization failed\n");
+ dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
return status;
}
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 9f971ed6b58..c15fc281f79 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -194,13 +194,13 @@ static int desc_list_init(void)
struct dma_descriptor *b = &(r->desc_b);
/* allocate a new skb for next time receive */
- new_skb = dev_alloc_skb(PKT_BUF_SZ + 2);
+ new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
if (!new_skb) {
printk(KERN_NOTICE DRV_NAME
": init: low on mem - packet dropped\n");
goto init_error;
}
- skb_reserve(new_skb, 2);
+ skb_reserve(new_skb, NET_IP_ALIGN);
r->skb = new_skb;
/*
@@ -566,9 +566,9 @@ static void adjust_tx_list(void)
*/
if (current_tx_ptr->next->next == tx_list_head) {
while (tx_list_head->status.status_word == 0) {
- mdelay(1);
+ udelay(10);
if (tx_list_head->status.status_word != 0
- || !(bfin_read_DMA2_IRQ_STATUS() & 0x08)) {
+ || !(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN)) {
goto adjust_head;
}
if (timeout_cnt-- < 0) {
@@ -606,93 +606,41 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
u16 *data;
-
+ u32 data_align = (unsigned long)(skb->data) & 0x3;
current_tx_ptr->skb = skb;
- if (ANOMALY_05000285) {
- /*
- * TXDWA feature is not avaible to older revision < 0.3 silicon
- * of BF537
- *
- * Only if data buffer is ODD WORD alignment, we do not
- * need to memcpy
- */
- u32 data_align = (u32)(skb->data) & 0x3;
- if (data_align == 0x2) {
- /* move skb->data to current_tx_ptr payload */
- data = (u16 *)(skb->data) - 1;
- *data = (u16)(skb->len);
- current_tx_ptr->desc_a.start_addr = (u32)data;
- /* this is important! */
- blackfin_dcache_flush_range((u32)data,
- (u32)((u8 *)data + skb->len + 4));
- } else {
- *((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
- memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
- skb->len);
- current_tx_ptr->desc_a.start_addr =
- (u32)current_tx_ptr->packet;
- if (current_tx_ptr->status.status_word != 0)
- current_tx_ptr->status.status_word = 0;
- blackfin_dcache_flush_range(
- (u32)current_tx_ptr->packet,
- (u32)(current_tx_ptr->packet + skb->len + 2));
- }
+ if (data_align == 0x2) {
+ /* move skb->data to current_tx_ptr payload */
+ data = (u16 *)(skb->data) - 1;
+ *data = (u16)(skb->len);
+ current_tx_ptr->desc_a.start_addr = (u32)data;
+ /* this is important! */
+ blackfin_dcache_flush_range((u32)data,
+ (u32)((u8 *)data + skb->len + 4));
} else {
- /*
- * TXDWA feature is avaible to revision < 0.3 silicon of
- * BF537 and always avaible to BF52x
- */
- u32 data_align = (u32)(skb->data) & 0x3;
- if (data_align == 0x0) {
- u16 sysctl = bfin_read_EMAC_SYSCTL();
- sysctl |= TXDWA;
- bfin_write_EMAC_SYSCTL(sysctl);
-
- /* move skb->data to current_tx_ptr payload */
- data = (u16 *)(skb->data) - 2;
- *data = (u16)(skb->len);
- current_tx_ptr->desc_a.start_addr = (u32)data;
- /* this is important! */
- blackfin_dcache_flush_range(
- (u32)data,
- (u32)((u8 *)data + skb->len + 4));
- } else if (data_align == 0x2) {
- u16 sysctl = bfin_read_EMAC_SYSCTL();
- sysctl &= ~TXDWA;
- bfin_write_EMAC_SYSCTL(sysctl);
-
- /* move skb->data to current_tx_ptr payload */
- data = (u16 *)(skb->data) - 1;
- *data = (u16)(skb->len);
- current_tx_ptr->desc_a.start_addr = (u32)data;
- /* this is important! */
- blackfin_dcache_flush_range(
- (u32)data,
- (u32)((u8 *)data + skb->len + 4));
- } else {
- u16 sysctl = bfin_read_EMAC_SYSCTL();
- sysctl &= ~TXDWA;
- bfin_write_EMAC_SYSCTL(sysctl);
-
- *((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
- memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
- skb->len);
- current_tx_ptr->desc_a.start_addr =
- (u32)current_tx_ptr->packet;
- if (current_tx_ptr->status.status_word != 0)
- current_tx_ptr->status.status_word = 0;
- blackfin_dcache_flush_range(
- (u32)current_tx_ptr->packet,
- (u32)(current_tx_ptr->packet + skb->len + 2));
- }
+ *((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
+ memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
+ skb->len);
+ current_tx_ptr->desc_a.start_addr =
+ (u32)current_tx_ptr->packet;
+ if (current_tx_ptr->status.status_word != 0)
+ current_tx_ptr->status.status_word = 0;
+ blackfin_dcache_flush_range(
+ (u32)current_tx_ptr->packet,
+ (u32)(current_tx_ptr->packet + skb->len + 2));
}
+ /* make sure the internal data buffers in the core are drained
+ * so that the DMA descriptors are completely written when the
+ * DMA engine goes to fetch them below
+ */
+ SSYNC();
+
/* enable this packet's dma */
current_tx_ptr->desc_a.config |= DMAEN;
/* tx dma is running, just return */
- if (bfin_read_DMA2_IRQ_STATUS() & 0x08)
+ if (bfin_read_DMA2_IRQ_STATUS() & DMA_RUN)
goto out;
/* tx dma is not running */
@@ -718,7 +666,7 @@ static void bfin_mac_rx(struct net_device *dev)
/* allocate a new skb for next time receive */
skb = current_rx_ptr->skb;
- new_skb = dev_alloc_skb(PKT_BUF_SZ + 2);
+ new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
if (!new_skb) {
printk(KERN_NOTICE DRV_NAME
": rx: low on mem - packet dropped\n");
@@ -726,7 +674,7 @@ static void bfin_mac_rx(struct net_device *dev)
goto out;
}
/* reserve 2 bytes for RXDWA padding */
- skb_reserve(new_skb, 2);
+ skb_reserve(new_skb, NET_IP_ALIGN);
current_rx_ptr->skb = new_skb;
current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
@@ -979,22 +927,7 @@ static int bfin_mac_open(struct net_device *dev)
return 0;
}
-static const struct net_device_ops bfin_mac_netdev_ops = {
- .ndo_open = bfin_mac_open,
- .ndo_stop = bfin_mac_close,
- .ndo_start_xmit = bfin_mac_hard_start_xmit,
- .ndo_set_mac_address = bfin_mac_set_mac_address,
- .ndo_tx_timeout = bfin_mac_timeout,
- .ndo_set_multicast_list = bfin_mac_set_multicast_list,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = bfin_mac_poll,
-#endif
-};
-
/*
- *
* this makes the board clean up everything that it can
* and not talk to the outside world. Caused by
* an 'ifconfig ethX down'
@@ -1019,11 +952,26 @@ static int bfin_mac_close(struct net_device *dev)
return 0;
}
+static const struct net_device_ops bfin_mac_netdev_ops = {
+ .ndo_open = bfin_mac_open,
+ .ndo_stop = bfin_mac_close,
+ .ndo_start_xmit = bfin_mac_hard_start_xmit,
+ .ndo_set_mac_address = bfin_mac_set_mac_address,
+ .ndo_tx_timeout = bfin_mac_timeout,
+ .ndo_set_multicast_list = bfin_mac_set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = bfin_mac_poll,
+#endif
+};
+
static int __devinit bfin_mac_probe(struct platform_device *pdev)
{
struct net_device *ndev;
struct bfin_mac_local *lp;
- int rc, i;
+ struct platform_device *pd;
+ int rc;
ndev = alloc_etherdev(sizeof(struct bfin_mac_local));
if (!ndev) {
@@ -1048,13 +996,6 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
goto out_err_probe_mac;
}
- /* set the GPIO pins to Ethernet mode */
- rc = peripheral_request_list(pin_req, DRV_NAME);
- if (rc) {
- dev_err(&pdev->dev, "Requesting peripherals failed!\n");
- rc = -EFAULT;
- goto out_err_setup_pin_mux;
- }
/*
* Is it valid? (Did bootloader initialize it?)
@@ -1070,26 +1011,14 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
setup_mac_addr(ndev->dev_addr);
- /* MDIO bus initial */
- lp->mii_bus = mdiobus_alloc();
- if (lp->mii_bus == NULL)
- goto out_err_mdiobus_alloc;
-
- lp->mii_bus->priv = ndev;
- lp->mii_bus->read = bfin_mdiobus_read;
- lp->mii_bus->write = bfin_mdiobus_write;
- lp->mii_bus->reset = bfin_mdiobus_reset;
- lp->mii_bus->name = "bfin_mac_mdio";
- snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "0");
- lp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
- for (i = 0; i < PHY_MAX_ADDR; ++i)
- lp->mii_bus->irq[i] = PHY_POLL;
-
- rc = mdiobus_register(lp->mii_bus);
- if (rc) {
- dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
- goto out_err_mdiobus_register;
+ if (!pdev->dev.platform_data) {
+ dev_err(&pdev->dev, "Cannot get platform device bfin_mii_bus!\n");
+ rc = -ENODEV;
+ goto out_err_probe_mac;
}
+ pd = pdev->dev.platform_data;
+ lp->mii_bus = platform_get_drvdata(pd);
+ lp->mii_bus->priv = ndev;
rc = mii_probe(ndev);
if (rc) {
@@ -1108,7 +1037,7 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
/* now, enable interrupts */
/* register irq handler */
rc = request_irq(IRQ_MAC_RX, bfin_mac_interrupt,
- IRQF_DISABLED | IRQF_SHARED, "EMAC_RX", ndev);
+ IRQF_DISABLED, "EMAC_RX", ndev);
if (rc) {
dev_err(&pdev->dev, "Cannot request Blackfin MAC RX IRQ!\n");
rc = -EBUSY;
@@ -1131,11 +1060,8 @@ out_err_reg_ndev:
out_err_request_irq:
out_err_mii_probe:
mdiobus_unregister(lp->mii_bus);
-out_err_mdiobus_register:
mdiobus_free(lp->mii_bus);
-out_err_mdiobus_alloc:
peripheral_free_list(pin_req);
-out_err_setup_pin_mux:
out_err_probe_mac:
platform_set_drvdata(pdev, NULL);
free_netdev(ndev);
@@ -1150,8 +1076,7 @@ static int __devexit bfin_mac_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- mdiobus_unregister(lp->mii_bus);
- mdiobus_free(lp->mii_bus);
+ lp->mii_bus->priv = NULL;
unregister_netdev(ndev);
@@ -1189,6 +1114,74 @@ static int bfin_mac_resume(struct platform_device *pdev)
#define bfin_mac_resume NULL
#endif /* CONFIG_PM */
+static int __devinit bfin_mii_bus_probe(struct platform_device *pdev)
+{
+ struct mii_bus *miibus;
+ int rc, i;
+
+ /*
+ * We are setting up a network card,
+ * so set the GPIO pins to Ethernet mode
+ */
+ rc = peripheral_request_list(pin_req, DRV_NAME);
+ if (rc) {
+ dev_err(&pdev->dev, "Requesting peripherals failed!\n");
+ return rc;
+ }
+
+ rc = -ENOMEM;
+ miibus = mdiobus_alloc();
+ if (miibus == NULL)
+ goto out_err_alloc;
+ miibus->read = bfin_mdiobus_read;
+ miibus->write = bfin_mdiobus_write;
+ miibus->reset = bfin_mdiobus_reset;
+
+ miibus->parent = &pdev->dev;
+ miibus->name = "bfin_mii_bus";
+ snprintf(miibus->id, MII_BUS_ID_SIZE, "0");
+ miibus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
+ if (miibus->irq == NULL)
+ goto out_err_alloc;
+ for (i = 0; i < PHY_MAX_ADDR; ++i)
+ miibus->irq[i] = PHY_POLL;
+
+ rc = mdiobus_register(miibus);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
+ goto out_err_mdiobus_register;
+ }
+
+ platform_set_drvdata(pdev, miibus);
+ return 0;
+
+out_err_mdiobus_register:
+ mdiobus_free(miibus);
+out_err_alloc:
+ peripheral_free_list(pin_req);
+
+ return rc;
+}
+
+static int __devexit bfin_mii_bus_remove(struct platform_device *pdev)
+{
+ struct mii_bus *miibus = platform_get_drvdata(pdev);
+ platform_set_drvdata(pdev, NULL);
+ mdiobus_unregister(miibus);
+ mdiobus_free(miibus);
+ peripheral_free_list(pin_req);
+ return 0;
+}
+
+static struct platform_driver bfin_mii_bus_driver = {
+ .probe = bfin_mii_bus_probe,
+ .remove = __devexit_p(bfin_mii_bus_remove),
+ .driver = {
+ .name = "bfin_mii_bus",
+ .owner = THIS_MODULE,
+ },
+};
+
static struct platform_driver bfin_mac_driver = {
.probe = bfin_mac_probe,
.remove = __devexit_p(bfin_mac_remove),
@@ -1202,7 +1195,11 @@ static struct platform_driver bfin_mac_driver = {
static int __init bfin_mac_init(void)
{
- return platform_driver_register(&bfin_mac_driver);
+ int ret;
+ ret = platform_driver_register(&bfin_mii_bus_driver);
+ if (!ret)
+ return platform_driver_register(&bfin_mac_driver);
+ return -ENODEV;
}
module_init(bfin_mac_init);
@@ -1210,6 +1207,7 @@ module_init(bfin_mac_init);
static void __exit bfin_mac_cleanup(void)
{
platform_driver_unregister(&bfin_mac_driver);
+ platform_driver_unregister(&bfin_mii_bus_driver);
}
module_exit(bfin_mac_cleanup);
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 44d015f70d1..9578a3dfac0 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -1247,6 +1247,16 @@ static const struct ethtool_ops bmac_ethtool_ops = {
.get_link = ethtool_op_get_link,
};
+static const struct net_device_ops bmac_netdev_ops = {
+ .ndo_open = bmac_open,
+ .ndo_stop = bmac_close,
+ .ndo_start_xmit = bmac_output,
+ .ndo_set_multicast_list = bmac_set_multicast,
+ .ndo_set_mac_address = bmac_set_address,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
{
int j, rev, ret;
@@ -1308,12 +1318,8 @@ static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_i
bmac_enable_and_reset_chip(dev);
bmwrite(dev, INTDISABLE, DisableAll);
- dev->open = bmac_open;
- dev->stop = bmac_close;
+ dev->netdev_ops = &bmac_netdev_ops;
dev->ethtool_ops = &bmac_ethtool_ops;
- dev->hard_start_xmit = bmac_output;
- dev->set_multicast_list = bmac_set_multicast;
- dev->set_mac_address = bmac_set_address;
bmac_get_station_address(dev, addr);
if (bmac_verify_checksum(dev) != 0)
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index b0cb29d4cc0..7e3738112c4 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -48,7 +48,12 @@
#include <linux/cache.h>
#include <linux/firmware.h>
#include <linux/log2.h>
+#include <linux/list.h>
+#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
+#define BCM_CNIC 1
+#include "cnic_if.h"
+#endif
#include "bnx2.h"
#include "bnx2_fw.h"
@@ -315,6 +320,158 @@ bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
spin_unlock_bh(&bp->indirect_lock);
}
+#ifdef BCM_CNIC
+static int
+bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+ struct drv_ctl_io *io = &info->data.io;
+
+ switch (info->cmd) {
+ case DRV_CTL_IO_WR_CMD:
+ bnx2_reg_wr_ind(bp, io->offset, io->data);
+ break;
+ case DRV_CTL_IO_RD_CMD:
+ io->data = bnx2_reg_rd_ind(bp, io->offset);
+ break;
+ case DRV_CTL_CTX_WR_CMD:
+ bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
+{
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+ int sb_id;
+
+ if (bp->flags & BNX2_FLAG_USING_MSIX) {
+ cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
+ bnapi->cnic_present = 0;
+ sb_id = bp->irq_nvecs;
+ cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
+ } else {
+ cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
+ bnapi->cnic_tag = bnapi->last_status_idx;
+ bnapi->cnic_present = 1;
+ sb_id = 0;
+ cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
+ }
+
+ cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
+ cp->irq_arr[0].status_blk = (void *)
+ ((unsigned long) bnapi->status_blk.msi +
+ (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
+ cp->irq_arr[0].status_blk_num = sb_id;
+ cp->num_irq = 1;
+}
+
+static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
+ void *data)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+ if (ops == NULL)
+ return -EINVAL;
+
+ if (cp->drv_state & CNIC_DRV_STATE_REGD)
+ return -EBUSY;
+
+ bp->cnic_data = data;
+ rcu_assign_pointer(bp->cnic_ops, ops);
+
+ cp->num_irq = 0;
+ cp->drv_state = CNIC_DRV_STATE_REGD;
+
+ bnx2_setup_cnic_irq_info(bp);
+
+ return 0;
+}
+
+static int bnx2_unregister_cnic(struct net_device *dev)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+ cp->drv_state = 0;
+ bnapi->cnic_present = 0;
+ rcu_assign_pointer(bp->cnic_ops, NULL);
+ synchronize_rcu();
+ return 0;
+}
+
+struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+ cp->drv_owner = THIS_MODULE;
+ cp->chip_id = bp->chip_id;
+ cp->pdev = bp->pdev;
+ cp->io_base = bp->regview;
+ cp->drv_ctl = bnx2_drv_ctl;
+ cp->drv_register_cnic = bnx2_register_cnic;
+ cp->drv_unregister_cnic = bnx2_unregister_cnic;
+
+ return cp;
+}
+EXPORT_SYMBOL(bnx2_cnic_probe);
+
+static void
+bnx2_cnic_stop(struct bnx2 *bp)
+{
+ struct cnic_ops *c_ops;
+ struct cnic_ctl_info info;
+
+ rcu_read_lock();
+ c_ops = rcu_dereference(bp->cnic_ops);
+ if (c_ops) {
+ info.cmd = CNIC_CTL_STOP_CMD;
+ c_ops->cnic_ctl(bp->cnic_data, &info);
+ }
+ rcu_read_unlock();
+}
+
+static void
+bnx2_cnic_start(struct bnx2 *bp)
+{
+ struct cnic_ops *c_ops;
+ struct cnic_ctl_info info;
+
+ rcu_read_lock();
+ c_ops = rcu_dereference(bp->cnic_ops);
+ if (c_ops) {
+ if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+
+ bnapi->cnic_tag = bnapi->last_status_idx;
+ }
+ info.cmd = CNIC_CTL_START_CMD;
+ c_ops->cnic_ctl(bp->cnic_data, &info);
+ }
+ rcu_read_unlock();
+}
+
+#else
+
+static void
+bnx2_cnic_stop(struct bnx2 *bp)
+{
+}
+
+static void
+bnx2_cnic_start(struct bnx2 *bp)
+{
+}
+
+#endif
+
static int
bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
{
@@ -488,6 +645,7 @@ bnx2_napi_enable(struct bnx2 *bp)
static void
bnx2_netif_stop(struct bnx2 *bp)
{
+ bnx2_cnic_stop(bp);
bnx2_disable_int_sync(bp);
if (netif_running(bp->dev)) {
bnx2_napi_disable(bp);
@@ -504,6 +662,7 @@ bnx2_netif_start(struct bnx2 *bp)
netif_tx_wake_all_queues(bp->dev);
bnx2_napi_enable(bp);
bnx2_enable_int(bp);
+ bnx2_cnic_start(bp);
}
}
}
@@ -545,8 +704,7 @@ bnx2_free_rx_mem(struct bnx2 *bp)
rxr->rx_desc_mapping[j]);
rxr->rx_desc_ring[j] = NULL;
}
- if (rxr->rx_buf_ring)
- vfree(rxr->rx_buf_ring);
+ vfree(rxr->rx_buf_ring);
rxr->rx_buf_ring = NULL;
for (j = 0; j < bp->rx_max_pg_ring; j++) {
@@ -556,8 +714,7 @@ bnx2_free_rx_mem(struct bnx2 *bp)
rxr->rx_pg_desc_mapping[j]);
rxr->rx_pg_desc_ring[j] = NULL;
}
- if (rxr->rx_pg_ring)
- vfree(rxr->rx_pg_ring);
+ vfree(rxr->rx_pg_ring);
rxr->rx_pg_ring = NULL;
}
}
@@ -2630,14 +2787,15 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
tx_buf = &txr->tx_buf_ring[sw_ring_cons];
skb = tx_buf->skb;
+ /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
+ prefetch(&skb->end);
+
/* partial BD completions possible with TSO packets */
- if (skb_is_gso(skb)) {
+ if (tx_buf->is_gso) {
u16 last_idx, last_ring_idx;
- last_idx = sw_cons +
- skb_shinfo(skb)->nr_frags + 1;
- last_ring_idx = sw_ring_cons +
- skb_shinfo(skb)->nr_frags + 1;
+ last_idx = sw_cons + tx_buf->nr_frags + 1;
+ last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
last_idx++;
}
@@ -2649,7 +2807,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
tx_buf->skb = NULL;
- last = skb_shinfo(skb)->nr_frags;
+ last = tx_buf->nr_frags;
for (i = 0; i < last; i++) {
sw_cons = NEXT_TX_BD(sw_cons);
@@ -2662,7 +2820,8 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
if (tx_pkt == budget)
break;
- hw_cons = bnx2_get_hw_tx_cons(bnapi);
+ if (hw_cons == sw_cons)
+ hw_cons = bnx2_get_hw_tx_cons(bnapi);
}
txr->hw_tx_cons = hw_cons;
@@ -3164,6 +3323,11 @@ bnx2_has_work(struct bnx2_napi *bnapi)
if (bnx2_has_fast_work(bnapi))
return 1;
+#ifdef BCM_CNIC
+ if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
+ return 1;
+#endif
+
if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
(sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
return 1;
@@ -3193,6 +3357,23 @@ bnx2_chk_missed_msi(struct bnx2 *bp)
bp->idle_chk_status_idx = bnapi->last_status_idx;
}
+#ifdef BCM_CNIC
+static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
+{
+ struct cnic_ops *c_ops;
+
+ if (!bnapi->cnic_present)
+ return;
+
+ rcu_read_lock();
+ c_ops = rcu_dereference(bp->cnic_ops);
+ if (c_ops)
+ bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
+ bnapi->status_blk.msi);
+ rcu_read_unlock();
+}
+#endif
+
static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
{
struct status_block *sblk = bnapi->status_blk.msi;
@@ -3267,6 +3448,10 @@ static int bnx2_poll(struct napi_struct *napi, int budget)
work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
+#ifdef BCM_CNIC
+ bnx2_poll_cnic(bp, bnapi);
+#endif
+
/* bnapi->last_status_idx is used below to tell the hw how
* much work has been processed, so we must read it before
* checking for more work.
@@ -3308,7 +3493,7 @@ bnx2_set_rx_mode(struct net_device *dev)
{
struct bnx2 *bp = netdev_priv(dev);
u32 rx_mode, sort_mode;
- struct dev_addr_list *uc_ptr;
+ struct netdev_hw_addr *ha;
int i;
if (!netif_running(dev))
@@ -3367,21 +3552,19 @@ bnx2_set_rx_mode(struct net_device *dev)
sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
}
- uc_ptr = NULL;
if (dev->uc_count > BNX2_MAX_UNICAST_ADDRESSES) {
rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
BNX2_RPM_SORT_USER0_PROM_VLAN;
} else if (!(dev->flags & IFF_PROMISC)) {
- uc_ptr = dev->uc_list;
-
/* Add all entries into to the match filter list */
- for (i = 0; i < dev->uc_count; i++) {
- bnx2_set_mac_addr(bp, uc_ptr->da_addr,
+ i = 0;
+ list_for_each_entry(ha, &dev->uc_list, list) {
+ bnx2_set_mac_addr(bp, ha->addr,
i + BNX2_START_UNICAST_ADDRESS_INDEX);
sort_mode |= (1 <<
(i + BNX2_START_UNICAST_ADDRESS_INDEX));
- uc_ptr = uc_ptr->next;
+ i++;
}
}
@@ -4632,8 +4815,11 @@ bnx2_init_chip(struct bnx2 *bp)
val = REG_RD(bp, BNX2_MQ_CONFIG);
val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
- if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
- val |= BNX2_MQ_CONFIG_HALT_DIS;
+ if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
+ if (CHIP_REV(bp) == CHIP_REV_Ax)
+ val |= BNX2_MQ_CONFIG_HALT_DIS;
+ }
REG_WR(bp, BNX2_MQ_CONFIG, val);
@@ -5486,7 +5672,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
dev_kfree_skb(skb);
return -EIO;
}
- map = skb_shinfo(skb)->dma_maps[0];
+ map = skb_shinfo(skb)->dma_head;
REG_WR(bp, BNX2_HC_COMMAND,
bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
@@ -6166,7 +6352,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
sp = skb_shinfo(skb);
- mapping = sp->dma_maps[0];
+ mapping = sp->dma_head;
tx_buf = &txr->tx_buf_ring[ring_prod];
tx_buf->skb = skb;
@@ -6179,6 +6365,8 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
last_frag = skb_shinfo(skb)->nr_frags;
+ tx_buf->nr_frags = last_frag;
+ tx_buf->is_gso = skb_is_gso(skb);
for (i = 0; i < last_frag; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -6188,7 +6376,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
txbd = &txr->tx_desc_ring[ring_prod];
len = frag->size;
- mapping = sp->dma_maps[i + 1];
+ mapping = sp->dma_maps[i];
txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
@@ -6207,7 +6395,6 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
mmiowb();
txr->tx_prod = prod;
- dev->trans_start = jiffies;
if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
netif_tx_stop_queue(txq);
@@ -7471,7 +7658,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
INIT_WORK(&bp->reset_task, bnx2_reset_task);
dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
- mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS);
+ mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
dev->mem_end = dev->mem_start + mem_len;
dev->irq = pdev->irq;
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 5b570e17c83..f1edfaa9e56 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -361,6 +361,9 @@ struct l2_fhdr {
#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE (1<<28)
#define BNX2_L2CTX_HOST_BDIDX 0x00000004
+#define BNX2_L2CTX_STATUSB_NUM_SHIFT 16
+#define BNX2_L2CTX_STATUSB_NUM(sb_id) \
+ (((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_STATUSB_NUM_SHIFT) : 0)
#define BNX2_L2CTX_HOST_BSEQ 0x00000008
#define BNX2_L2CTX_NX_BSEQ 0x0000000c
#define BNX2_L2CTX_NX_BDHADDR_HI 0x00000010
@@ -5900,6 +5903,7 @@ struct l2_fhdr {
#define BNX2_RXP_FTQ_CTL_CUR_DEPTH (0x3ffL<<22)
#define BNX2_RXP_SCRATCH 0x000e0000
+#define BNX2_RXP_SCRATCH_RXP_FLOOD 0x000e0024
#define BNX2_RXP_SCRATCH_RSS_TBL_SZ 0x000e0038
#define BNX2_RXP_SCRATCH_RSS_TBL 0x000e003c
#define BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES 128
@@ -6552,6 +6556,8 @@ struct sw_pg {
struct sw_tx_bd {
struct sk_buff *skb;
+ unsigned short is_gso;
+ unsigned short nr_frags;
};
#define SW_RXBD_RING_SIZE (sizeof(struct sw_bd) * RX_DESC_CNT)
@@ -6678,6 +6684,11 @@ struct bnx2_napi {
u32 last_status_idx;
u32 int_num;
+#ifdef BCM_CNIC
+ u32 cnic_tag;
+ int cnic_present;
+#endif
+
struct bnx2_rx_ring_info rx_ring;
struct bnx2_tx_ring_info tx_ring;
};
@@ -6727,6 +6738,11 @@ struct bnx2 {
int tx_ring_size;
u32 tx_wake_thresh;
+#ifdef BCM_CNIC
+ struct cnic_ops *cnic_ops;
+ void *cnic_data;
+#endif
+
/* End of fields used in the performance code paths. */
unsigned int current_interval;
@@ -6885,6 +6901,10 @@ struct bnx2 {
u32 idle_chk_status_idx;
+#ifdef BCM_CNIC
+ struct cnic_eth_dev cnic_eth_dev;
+#endif
+
const struct firmware *mips_firmware;
const struct firmware *rv2p_firmware;
};
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index a329bee2555..8678457849f 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -965,6 +965,21 @@ struct bnx2x {
int gunzip_outlen;
#define FW_BUF_SIZE 0x8000
+ struct raw_op *init_ops;
+ /* Init blocks offsets inside init_ops */
+ u16 *init_ops_offsets;
+ /* Data blob - has 32 bit granularity */
+ u32 *init_data;
+ /* Zipped PRAM blobs - raw data */
+ const u8 *tsem_int_table_data;
+ const u8 *tsem_pram_data;
+ const u8 *usem_int_table_data;
+ const u8 *usem_pram_data;
+ const u8 *xsem_int_table_data;
+ const u8 *xsem_pram_data;
+ const u8 *csem_int_table_data;
+ const u8 *csem_pram_data;
+ const struct firmware *firmware;
};
diff --git a/drivers/net/bnx2x_fw_file_hdr.h b/drivers/net/bnx2x_fw_file_hdr.h
new file mode 100644
index 00000000000..3f5ee5d7cc2
--- /dev/null
+++ b/drivers/net/bnx2x_fw_file_hdr.h
@@ -0,0 +1,37 @@
+/* bnx2x_fw_file_hdr.h: FW binary file header structure.
+ *
+ * Copyright (c) 2007-2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Vladislav Zolotarov <vladz@broadcom.com>
+ * Based on the original idea of John Wright <john.wright@hp.com>.
+ */
+
+#ifndef BNX2X_INIT_FILE_HDR_H
+#define BNX2X_INIT_FILE_HDR_H
+
+struct bnx2x_fw_file_section {
+ __be32 len;
+ __be32 offset;
+};
+
+struct bnx2x_fw_file_hdr {
+ struct bnx2x_fw_file_section init_ops;
+ struct bnx2x_fw_file_section init_ops_offsets;
+ struct bnx2x_fw_file_section init_data;
+ struct bnx2x_fw_file_section tsem_int_table_data;
+ struct bnx2x_fw_file_section tsem_pram_data;
+ struct bnx2x_fw_file_section usem_int_table_data;
+ struct bnx2x_fw_file_section usem_pram_data;
+ struct bnx2x_fw_file_section csem_int_table_data;
+ struct bnx2x_fw_file_section csem_pram_data;
+ struct bnx2x_fw_file_section xsem_int_table_data;
+ struct bnx2x_fw_file_section xsem_pram_data;
+ struct bnx2x_fw_file_section fw_version;
+};
+
+#endif /* BNX2X_INIT_FILE_HDR_H */
diff --git a/drivers/net/bnx2x_init.h b/drivers/net/bnx2x_init.h
index 39ba2936c0c..3ba4d888068 100644
--- a/drivers/net/bnx2x_init.h
+++ b/drivers/net/bnx2x_init.h
@@ -1,4 +1,5 @@
/* bnx2x_init.h: Broadcom Everest network driver.
+ * Structures and macroes needed during the initialization.
*
* Copyright (c) 2007-2009 Broadcom Corporation
*
@@ -8,6 +9,7 @@
*
* Maintained by: Eilon Greenstein <eilong@broadcom.com>
* Written by: Eliezer Tamir
+ * Modified by: Vladislav Zolotarov <vladz@broadcom.com>
*/
#ifndef BNX2X_INIT_H
@@ -45,33 +47,71 @@
#define OP_WR_64 0x8 /* write 64 bit pattern */
#define OP_WB 0x9 /* copy a string using DMAE */
-/* Operation specific for E1 */
-#define OP_RD_E1 0xa /* read single register */
-#define OP_WR_E1 0xb /* write single register */
-#define OP_IW_E1 0xc /* write single register using mailbox */
-#define OP_SW_E1 0xd /* copy a string to the device */
-#define OP_SI_E1 0xe /* copy a string using mailbox */
-#define OP_ZR_E1 0xf /* clear memory */
-#define OP_ZP_E1 0x10 /* unzip then copy with DMAE */
-#define OP_WR_64_E1 0x11 /* write 64 bit pattern on E1 */
-#define OP_WB_E1 0x12 /* copy a string using DMAE */
-
-/* Operation specific for E1H */
-#define OP_RD_E1H 0x13 /* read single register */
-#define OP_WR_E1H 0x14 /* write single register */
-#define OP_IW_E1H 0x15 /* write single register using mailbox */
-#define OP_SW_E1H 0x16 /* copy a string to the device */
-#define OP_SI_E1H 0x17 /* copy a string using mailbox */
-#define OP_ZR_E1H 0x18 /* clear memory */
-#define OP_ZP_E1H 0x19 /* unzip then copy with DMAE */
-#define OP_WR_64_E1H 0x1a /* write 64 bit pattern on E1H */
-#define OP_WB_E1H 0x1b /* copy a string using DMAE */
-
/* FPGA and EMUL specific operations */
-#define OP_WR_EMUL_E1H 0x1c /* write single register on E1H Emul */
-#define OP_WR_EMUL 0x1d /* write single register on Emulation */
-#define OP_WR_FPGA 0x1e /* write single register on FPGA */
-#define OP_WR_ASIC 0x1f /* write single register on ASIC */
+#define OP_WR_EMUL 0xa /* write single register on Emulation */
+#define OP_WR_FPGA 0xb /* write single register on FPGA */
+#define OP_WR_ASIC 0xc /* write single register on ASIC */
+
+/* Init stages */
+#define COMMON_STAGE 0
+#define PORT0_STAGE 1
+#define PORT1_STAGE 2
+/* Never reorder FUNCx stages !!! */
+#define FUNC0_STAGE 3
+#define FUNC1_STAGE 4
+#define FUNC2_STAGE 5
+#define FUNC3_STAGE 6
+#define FUNC4_STAGE 7
+#define FUNC5_STAGE 8
+#define FUNC6_STAGE 9
+#define FUNC7_STAGE 10
+#define STAGE_IDX_MAX 11
+
+#define STAGE_START 0
+#define STAGE_END 1
+
+
+/* Indices of blocks */
+#define PRS_BLOCK 0
+#define SRCH_BLOCK 1
+#define TSDM_BLOCK 2
+#define TCM_BLOCK 3
+#define BRB1_BLOCK 4
+#define TSEM_BLOCK 5
+#define PXPCS_BLOCK 6
+#define EMAC0_BLOCK 7
+#define EMAC1_BLOCK 8
+#define DBU_BLOCK 9
+#define MISC_BLOCK 10
+#define DBG_BLOCK 11
+#define NIG_BLOCK 12
+#define MCP_BLOCK 13
+#define UPB_BLOCK 14
+#define CSDM_BLOCK 15
+#define USDM_BLOCK 16
+#define CCM_BLOCK 17
+#define UCM_BLOCK 18
+#define USEM_BLOCK 19
+#define CSEM_BLOCK 20
+#define XPB_BLOCK 21
+#define DQ_BLOCK 22
+#define TIMERS_BLOCK 23
+#define XSDM_BLOCK 24
+#define QM_BLOCK 25
+#define PBF_BLOCK 26
+#define XCM_BLOCK 27
+#define XSEM_BLOCK 28
+#define CDU_BLOCK 29
+#define DMAE_BLOCK 30
+#define PXP_BLOCK 31
+#define CFC_BLOCK 32
+#define HC_BLOCK 33
+#define PXP2_BLOCK 34
+#define MISC_AEU_BLOCK 35
+
+/* Returns the index of start or end of a specific block stage in ops array*/
+#define BLOCK_OPS_IDX(block, stage, end) \
+ (2*(((block)*STAGE_IDX_MAX) + (stage)) + (end))
struct raw_op {
@@ -118,292 +158,6 @@ union init_op {
struct raw_op raw;
};
-#include "bnx2x_init_values.h"
-
-static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
-static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len);
-
-static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data,
- u32 len)
-{
- int i;
-
- for (i = 0; i < len; i++) {
- REG_WR(bp, addr + i*4, data[i]);
- if (!(i % 10000)) {
- touch_softlockup_watchdog();
- cpu_relax();
- }
- }
-}
-
-static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr, const u32 *data,
- u16 len)
-{
- int i;
-
- for (i = 0; i < len; i++) {
- REG_WR_IND(bp, addr + i*4, data[i]);
- if (!(i % 10000)) {
- touch_softlockup_watchdog();
- cpu_relax();
- }
- }
-}
-
-static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len)
-{
- int offset = 0;
-
- if (bp->dmae_ready) {
- while (len > DMAE_LEN32_WR_MAX) {
- bnx2x_write_dmae(bp, bp->gunzip_mapping + offset,
- addr + offset, DMAE_LEN32_WR_MAX);
- offset += DMAE_LEN32_WR_MAX * 4;
- len -= DMAE_LEN32_WR_MAX;
- }
- bnx2x_write_dmae(bp, bp->gunzip_mapping + offset,
- addr + offset, len);
- } else
- bnx2x_init_str_wr(bp, addr, bp->gunzip_buf, len);
-}
-
-static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
-{
- u32 buf_len = (((len * 4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len * 4));
- u32 buf_len32 = buf_len / 4;
- int i;
-
- memset(bp->gunzip_buf, fill, buf_len);
-
- for (i = 0; i < len; i += buf_len32) {
- u32 cur_len = min(buf_len32, len - i);
-
- bnx2x_write_big_buf(bp, addr + i * 4, cur_len);
- }
-}
-
-static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data,
- u32 len64)
-{
- u32 buf_len32 = FW_BUF_SIZE / 4;
- u32 len = len64 * 2;
- u64 data64 = 0;
- int i;
-
- /* 64 bit value is in a blob: first low DWORD, then high DWORD */
- data64 = HILO_U64((*(data + 1)), (*data));
- len64 = min((u32)(FW_BUF_SIZE/8), len64);
- for (i = 0; i < len64; i++) {
- u64 *pdata = ((u64 *)(bp->gunzip_buf)) + i;
-
- *pdata = data64;
- }
-
- for (i = 0; i < len; i += buf_len32) {
- u32 cur_len = min(buf_len32, len - i);
-
- bnx2x_write_big_buf(bp, addr + i * 4, cur_len);
- }
-}
-
-/*********************************************************
- There are different blobs for each PRAM section.
- In addition, each blob write operation is divided into a few operations
- in order to decrease the amount of phys. contiguous buffer needed.
- Thus, when we select a blob the address may be with some offset
- from the beginning of PRAM section.
- The same holds for the INT_TABLE sections.
-**********************************************************/
-#define IF_IS_INT_TABLE_ADDR(base, addr) \
- if (((base) <= (addr)) && ((base) + 0x400 >= (addr)))
-
-#define IF_IS_PRAM_ADDR(base, addr) \
- if (((base) <= (addr)) && ((base) + 0x40000 >= (addr)))
-
-static const u32 *bnx2x_sel_blob(u32 addr, const u32 *data, int is_e1)
-{
- IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr)
- data = is_e1 ? tsem_int_table_data_e1 :
- tsem_int_table_data_e1h;
- else
- IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr)
- data = is_e1 ? csem_int_table_data_e1 :
- csem_int_table_data_e1h;
- else
- IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr)
- data = is_e1 ? usem_int_table_data_e1 :
- usem_int_table_data_e1h;
- else
- IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr)
- data = is_e1 ? xsem_int_table_data_e1 :
- xsem_int_table_data_e1h;
- else
- IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr)
- data = is_e1 ? tsem_pram_data_e1 : tsem_pram_data_e1h;
- else
- IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr)
- data = is_e1 ? csem_pram_data_e1 : csem_pram_data_e1h;
- else
- IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr)
- data = is_e1 ? usem_pram_data_e1 : usem_pram_data_e1h;
- else
- IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr)
- data = is_e1 ? xsem_pram_data_e1 : xsem_pram_data_e1h;
-
- return data;
-}
-
-static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, const u32 *data,
- u32 len, int gunzip, int is_e1, u32 blob_off)
-{
- int offset = 0;
-
- data = bnx2x_sel_blob(addr, data, is_e1) + blob_off;
-
- if (gunzip) {
- int rc;
-#ifdef __BIG_ENDIAN
- int i, size;
- u32 *temp;
-
- temp = kmalloc(len, GFP_KERNEL);
- size = (len / 4) + ((len % 4) ? 1 : 0);
- for (i = 0; i < size; i++)
- temp[i] = swab32(data[i]);
- data = temp;
-#endif
- rc = bnx2x_gunzip(bp, (u8 *)data, len);
- if (rc) {
- BNX2X_ERR("gunzip failed ! rc %d\n", rc);
-#ifdef __BIG_ENDIAN
- kfree(temp);
-#endif
- return;
- }
- len = bp->gunzip_outlen;
-#ifdef __BIG_ENDIAN
- kfree(temp);
- for (i = 0; i < len; i++)
- ((u32 *)bp->gunzip_buf)[i] =
- swab32(((u32 *)bp->gunzip_buf)[i]);
-#endif
- } else {
- if ((len * 4) > FW_BUF_SIZE) {
- BNX2X_ERR("LARGE DMAE OPERATION ! "
- "addr 0x%x len 0x%x\n", addr, len*4);
- return;
- }
- memcpy(bp->gunzip_buf, data, len * 4);
- }
-
- if (bp->dmae_ready) {
- while (len > DMAE_LEN32_WR_MAX) {
- bnx2x_write_dmae(bp, bp->gunzip_mapping + offset,
- addr + offset, DMAE_LEN32_WR_MAX);
- offset += DMAE_LEN32_WR_MAX * 4;
- len -= DMAE_LEN32_WR_MAX;
- }
- bnx2x_write_dmae(bp, bp->gunzip_mapping + offset,
- addr + offset, len);
- } else
- bnx2x_init_ind_wr(bp, addr, bp->gunzip_buf, len);
-}
-
-static void bnx2x_init_block(struct bnx2x *bp, u32 op_start, u32 op_end)
-{
- int is_e1 = CHIP_IS_E1(bp);
- int is_e1h = CHIP_IS_E1H(bp);
- int is_emul_e1h = (CHIP_REV_IS_EMUL(bp) && is_e1h);
- int hw_wr, i;
- union init_op *op;
- u32 op_type, addr, len;
- const u32 *data, *data_base;
-
- if (CHIP_REV_IS_FPGA(bp))
- hw_wr = OP_WR_FPGA;
- else if (CHIP_REV_IS_EMUL(bp))
- hw_wr = OP_WR_EMUL;
- else
- hw_wr = OP_WR_ASIC;
-
- if (is_e1)
- data_base = init_data_e1;
- else /* CHIP_IS_E1H(bp) */
- data_base = init_data_e1h;
-
- for (i = op_start; i < op_end; i++) {
-
- op = (union init_op *)&(init_ops[i]);
-
- op_type = op->str_wr.op;
- addr = op->str_wr.offset;
- len = op->str_wr.data_len;
- data = data_base + op->str_wr.data_off;
-
- /* careful! it must be in order */
- if (unlikely(op_type > OP_WB)) {
-
- /* If E1 only */
- if (op_type <= OP_WB_E1) {
- if (is_e1)
- op_type -= (OP_RD_E1 - OP_RD);
-
- /* If E1H only */
- } else if (op_type <= OP_WB_E1H) {
- if (is_e1h)
- op_type -= (OP_RD_E1H - OP_RD);
- }
-
- /* HW/EMUL specific */
- if (op_type == hw_wr)
- op_type = OP_WR;
-
- /* EMUL on E1H is special */
- if ((op_type == OP_WR_EMUL_E1H) && is_emul_e1h)
- op_type = OP_WR;
- }
-
- switch (op_type) {
- case OP_RD:
- REG_RD(bp, addr);
- break;
- case OP_WR:
- REG_WR(bp, addr, op->write.val);
- break;
- case OP_SW:
- bnx2x_init_str_wr(bp, addr, data, len);
- break;
- case OP_WB:
- bnx2x_init_wr_wb(bp, addr, data, len, 0, is_e1, 0);
- break;
- case OP_SI:
- bnx2x_init_ind_wr(bp, addr, data, len);
- break;
- case OP_ZR:
- bnx2x_init_fill(bp, addr, 0, op->zero.len);
- break;
- case OP_ZP:
- bnx2x_init_wr_wb(bp, addr, data, len, 1, is_e1,
- op->str_wr.data_off);
- break;
- case OP_WR_64:
- bnx2x_init_wr_64(bp, addr, data, len);
- break;
- default:
- /* happens whenever an op is of a diff HW */
-#if 0
- DP(NETIF_MSG_HW, "skipping init operation "
- "index %d[%d:%d]: type %d addr 0x%x "
- "len %d(0x%x)\n",
- i, op_start, op_end, op_type, addr, len, len);
-#endif
- break;
- }
- }
-}
-
-
/****************************************************************************
* PXP
****************************************************************************/
@@ -567,111 +321,6 @@ static const struct arb_line write_arb_addr[NUM_WR_Q-1] = {
PXP2_REG_RQ_BW_WR_UBOUND30}
};
-static void bnx2x_init_pxp(struct bnx2x *bp)
-{
- u16 devctl;
- int r_order, w_order;
- u32 val, i;
-
- pci_read_config_word(bp->pdev,
- bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
- DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
- w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
- if (bp->mrrs == -1)
- r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
- else {
- DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
- r_order = bp->mrrs;
- }
-
- if (r_order > MAX_RD_ORD) {
- DP(NETIF_MSG_HW, "read order of %d order adjusted to %d\n",
- r_order, MAX_RD_ORD);
- r_order = MAX_RD_ORD;
- }
- if (w_order > MAX_WR_ORD) {
- DP(NETIF_MSG_HW, "write order of %d order adjusted to %d\n",
- w_order, MAX_WR_ORD);
- w_order = MAX_WR_ORD;
- }
- if (CHIP_REV_IS_FPGA(bp)) {
- DP(NETIF_MSG_HW, "write order adjusted to 1 for FPGA\n");
- w_order = 0;
- }
- DP(NETIF_MSG_HW, "read order %d write order %d\n", r_order, w_order);
-
- for (i = 0; i < NUM_RD_Q-1; i++) {
- REG_WR(bp, read_arb_addr[i].l, read_arb_data[i][r_order].l);
- REG_WR(bp, read_arb_addr[i].add,
- read_arb_data[i][r_order].add);
- REG_WR(bp, read_arb_addr[i].ubound,
- read_arb_data[i][r_order].ubound);
- }
-
- for (i = 0; i < NUM_WR_Q-1; i++) {
- if ((write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L29) ||
- (write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L30)) {
-
- REG_WR(bp, write_arb_addr[i].l,
- write_arb_data[i][w_order].l);
-
- REG_WR(bp, write_arb_addr[i].add,
- write_arb_data[i][w_order].add);
-
- REG_WR(bp, write_arb_addr[i].ubound,
- write_arb_data[i][w_order].ubound);
- } else {
-
- val = REG_RD(bp, write_arb_addr[i].l);
- REG_WR(bp, write_arb_addr[i].l,
- val | (write_arb_data[i][w_order].l << 10));
-
- val = REG_RD(bp, write_arb_addr[i].add);
- REG_WR(bp, write_arb_addr[i].add,
- val | (write_arb_data[i][w_order].add << 10));
-
- val = REG_RD(bp, write_arb_addr[i].ubound);
- REG_WR(bp, write_arb_addr[i].ubound,
- val | (write_arb_data[i][w_order].ubound << 7));
- }
- }
-
- val = write_arb_data[NUM_WR_Q-1][w_order].add;
- val += write_arb_data[NUM_WR_Q-1][w_order].ubound << 10;
- val += write_arb_data[NUM_WR_Q-1][w_order].l << 17;
- REG_WR(bp, PXP2_REG_PSWRQ_BW_RD, val);
-
- val = read_arb_data[NUM_RD_Q-1][r_order].add;
- val += read_arb_data[NUM_RD_Q-1][r_order].ubound << 10;
- val += read_arb_data[NUM_RD_Q-1][r_order].l << 17;
- REG_WR(bp, PXP2_REG_PSWRQ_BW_WR, val);
-
- REG_WR(bp, PXP2_REG_RQ_WR_MBS0, w_order);
- REG_WR(bp, PXP2_REG_RQ_WR_MBS1, w_order);
- REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order);
- REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order);
-
- if (r_order == MAX_RD_ORD)
- REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
-
- REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
-
- if (CHIP_IS_E1H(bp)) {
- val = ((w_order == 0) ? 2 : 3);
- REG_WR(bp, PXP2_REG_WR_HC_MPS, val);
- REG_WR(bp, PXP2_REG_WR_USDM_MPS, val);
- REG_WR(bp, PXP2_REG_WR_CSDM_MPS, val);
- REG_WR(bp, PXP2_REG_WR_TSDM_MPS, val);
- REG_WR(bp, PXP2_REG_WR_XSDM_MPS, val);
- REG_WR(bp, PXP2_REG_WR_QM_MPS, val);
- REG_WR(bp, PXP2_REG_WR_TM_MPS, val);
- REG_WR(bp, PXP2_REG_WR_SRC_MPS, val);
- REG_WR(bp, PXP2_REG_WR_DBG_MPS, val);
- REG_WR(bp, PXP2_REG_WR_DMAE_MPS, 2); /* DMAE is special */
- REG_WR(bp, PXP2_REG_WR_CDU_MPS, val);
- }
-}
-
/****************************************************************************
* CDU
@@ -695,128 +344,12 @@ static void bnx2x_init_pxp(struct bnx2x *bp)
(0x80 | ((_type) & 0xf << 3) | (CDU_CRC8(_cid, _region, _type) & 0x7))
#define CDU_RSRVD_INVALIDATE_CONTEXT_VALUE(_val) ((_val) & ~0x80)
-/*****************************************************************************
- * Description:
- * Calculates crc 8 on a word value: polynomial 0-1-2-8
- * Code was translated from Verilog.
- ****************************************************************************/
-static u8 calc_crc8(u32 data, u8 crc)
-{
- u8 D[32];
- u8 NewCRC[8];
- u8 C[8];
- u8 crc_res;
- u8 i;
-
- /* split the data into 31 bits */
- for (i = 0; i < 32; i++) {
- D[i] = data & 1;
- data = data >> 1;
- }
-
- /* split the crc into 8 bits */
- for (i = 0; i < 8; i++) {
- C[i] = crc & 1;
- crc = crc >> 1;
- }
-
- NewCRC[0] = D[31] ^ D[30] ^ D[28] ^ D[23] ^ D[21] ^ D[19] ^ D[18] ^
- D[16] ^ D[14] ^ D[12] ^ D[8] ^ D[7] ^ D[6] ^ D[0] ^ C[4] ^
- C[6] ^ C[7];
- NewCRC[1] = D[30] ^ D[29] ^ D[28] ^ D[24] ^ D[23] ^ D[22] ^ D[21] ^
- D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^
- D[12] ^ D[9] ^ D[6] ^ D[1] ^ D[0] ^ C[0] ^ C[4] ^ C[5] ^ C[6];
- NewCRC[2] = D[29] ^ D[28] ^ D[25] ^ D[24] ^ D[22] ^ D[17] ^ D[15] ^
- D[13] ^ D[12] ^ D[10] ^ D[8] ^ D[6] ^ D[2] ^ D[1] ^ D[0] ^
- C[0] ^ C[1] ^ C[4] ^ C[5];
- NewCRC[3] = D[30] ^ D[29] ^ D[26] ^ D[25] ^ D[23] ^ D[18] ^ D[16] ^
- D[14] ^ D[13] ^ D[11] ^ D[9] ^ D[7] ^ D[3] ^ D[2] ^ D[1] ^
- C[1] ^ C[2] ^ C[5] ^ C[6];
- NewCRC[4] = D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[19] ^ D[17] ^
- D[15] ^ D[14] ^ D[12] ^ D[10] ^ D[8] ^ D[4] ^ D[3] ^ D[2] ^
- C[0] ^ C[2] ^ C[3] ^ C[6] ^ C[7];
- NewCRC[5] = D[31] ^ D[28] ^ D[27] ^ D[25] ^ D[20] ^ D[18] ^ D[16] ^
- D[15] ^ D[13] ^ D[11] ^ D[9] ^ D[5] ^ D[4] ^ D[3] ^ C[1] ^
- C[3] ^ C[4] ^ C[7];
- NewCRC[6] = D[29] ^ D[28] ^ D[26] ^ D[21] ^ D[19] ^ D[17] ^ D[16] ^
- D[14] ^ D[12] ^ D[10] ^ D[6] ^ D[5] ^ D[4] ^ C[2] ^ C[4] ^
- C[5];
- NewCRC[7] = D[30] ^ D[29] ^ D[27] ^ D[22] ^ D[20] ^ D[18] ^ D[17] ^
- D[15] ^ D[13] ^ D[11] ^ D[7] ^ D[6] ^ D[5] ^ C[3] ^ C[5] ^
- C[6];
-
- crc_res = 0;
- for (i = 0; i < 8; i++)
- crc_res |= (NewCRC[i] << i);
-
- return crc_res;
-}
/* registers addresses are not in order
so these arrays help simplify the code */
-static const int cm_start[E1H_FUNC_MAX][9] = {
- {MISC_FUNC0_START, TCM_FUNC0_START, UCM_FUNC0_START, CCM_FUNC0_START,
- XCM_FUNC0_START, TSEM_FUNC0_START, USEM_FUNC0_START, CSEM_FUNC0_START,
- XSEM_FUNC0_START},
- {MISC_FUNC1_START, TCM_FUNC1_START, UCM_FUNC1_START, CCM_FUNC1_START,
- XCM_FUNC1_START, TSEM_FUNC1_START, USEM_FUNC1_START, CSEM_FUNC1_START,
- XSEM_FUNC1_START},
- {MISC_FUNC2_START, TCM_FUNC2_START, UCM_FUNC2_START, CCM_FUNC2_START,
- XCM_FUNC2_START, TSEM_FUNC2_START, USEM_FUNC2_START, CSEM_FUNC2_START,
- XSEM_FUNC2_START},
- {MISC_FUNC3_START, TCM_FUNC3_START, UCM_FUNC3_START, CCM_FUNC3_START,
- XCM_FUNC3_START, TSEM_FUNC3_START, USEM_FUNC3_START, CSEM_FUNC3_START,
- XSEM_FUNC3_START},
- {MISC_FUNC4_START, TCM_FUNC4_START, UCM_FUNC4_START, CCM_FUNC4_START,
- XCM_FUNC4_START, TSEM_FUNC4_START, USEM_FUNC4_START, CSEM_FUNC4_START,
- XSEM_FUNC4_START},
- {MISC_FUNC5_START, TCM_FUNC5_START, UCM_FUNC5_START, CCM_FUNC5_START,
- XCM_FUNC5_START, TSEM_FUNC5_START, USEM_FUNC5_START, CSEM_FUNC5_START,
- XSEM_FUNC5_START},
- {MISC_FUNC6_START, TCM_FUNC6_START, UCM_FUNC6_START, CCM_FUNC6_START,
- XCM_FUNC6_START, TSEM_FUNC6_START, USEM_FUNC6_START, CSEM_FUNC6_START,
- XSEM_FUNC6_START},
- {MISC_FUNC7_START, TCM_FUNC7_START, UCM_FUNC7_START, CCM_FUNC7_START,
- XCM_FUNC7_START, TSEM_FUNC7_START, USEM_FUNC7_START, CSEM_FUNC7_START,
- XSEM_FUNC7_START}
-};
-
-static const int cm_end[E1H_FUNC_MAX][9] = {
- {MISC_FUNC0_END, TCM_FUNC0_END, UCM_FUNC0_END, CCM_FUNC0_END,
- XCM_FUNC0_END, TSEM_FUNC0_END, USEM_FUNC0_END, CSEM_FUNC0_END,
- XSEM_FUNC0_END},
- {MISC_FUNC1_END, TCM_FUNC1_END, UCM_FUNC1_END, CCM_FUNC1_END,
- XCM_FUNC1_END, TSEM_FUNC1_END, USEM_FUNC1_END, CSEM_FUNC1_END,
- XSEM_FUNC1_END},
- {MISC_FUNC2_END, TCM_FUNC2_END, UCM_FUNC2_END, CCM_FUNC2_END,
- XCM_FUNC2_END, TSEM_FUNC2_END, USEM_FUNC2_END, CSEM_FUNC2_END,
- XSEM_FUNC2_END},
- {MISC_FUNC3_END, TCM_FUNC3_END, UCM_FUNC3_END, CCM_FUNC3_END,
- XCM_FUNC3_END, TSEM_FUNC3_END, USEM_FUNC3_END, CSEM_FUNC3_END,
- XSEM_FUNC3_END},
- {MISC_FUNC4_END, TCM_FUNC4_END, UCM_FUNC4_END, CCM_FUNC4_END,
- XCM_FUNC4_END, TSEM_FUNC4_END, USEM_FUNC4_END, CSEM_FUNC4_END,
- XSEM_FUNC4_END},
- {MISC_FUNC5_END, TCM_FUNC5_END, UCM_FUNC5_END, CCM_FUNC5_END,
- XCM_FUNC5_END, TSEM_FUNC5_END, USEM_FUNC5_END, CSEM_FUNC5_END,
- XSEM_FUNC5_END},
- {MISC_FUNC6_END, TCM_FUNC6_END, UCM_FUNC6_END, CCM_FUNC6_END,
- XCM_FUNC6_END, TSEM_FUNC6_END, USEM_FUNC6_END, CSEM_FUNC6_END,
- XSEM_FUNC6_END},
- {MISC_FUNC7_END, TCM_FUNC7_END, UCM_FUNC7_END, CCM_FUNC7_END,
- XCM_FUNC7_END, TSEM_FUNC7_END, USEM_FUNC7_END, CSEM_FUNC7_END,
- XSEM_FUNC7_END},
-};
-
-static const int hc_limits[E1H_FUNC_MAX][2] = {
- {HC_FUNC0_START, HC_FUNC0_END},
- {HC_FUNC1_START, HC_FUNC1_END},
- {HC_FUNC2_START, HC_FUNC2_END},
- {HC_FUNC3_START, HC_FUNC3_END},
- {HC_FUNC4_START, HC_FUNC4_END},
- {HC_FUNC5_START, HC_FUNC5_END},
- {HC_FUNC6_START, HC_FUNC6_END},
- {HC_FUNC7_START, HC_FUNC7_END}
+static const int cm_blocks[9] = {
+ MISC_BLOCK, TCM_BLOCK, UCM_BLOCK, CCM_BLOCK, XCM_BLOCK,
+ TSEM_BLOCK, USEM_BLOCK, CSEM_BLOCK, XSEM_BLOCK
};
#endif /* BNX2X_INIT_H */
diff --git a/drivers/net/bnx2x_init_ops.h b/drivers/net/bnx2x_init_ops.h
new file mode 100644
index 00000000000..32552b9366c
--- /dev/null
+++ b/drivers/net/bnx2x_init_ops.h
@@ -0,0 +1,442 @@
+/* bnx2x_init_ops.h: Broadcom Everest network driver.
+ * Static functions needed during the initialization.
+ * This file is "included" in bnx2x_main.c.
+ *
+ * Copyright (c) 2007-2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Vladislav Zolotarov <vladz@broadcom.com>
+ */
+#ifndef BNX2X_INIT_OPS_H
+#define BNX2X_INIT_OPS_H
+
+static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
+static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len);
+
+static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data,
+ u32 len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ REG_WR(bp, addr + i*4, data[i]);
+ if (!(i % 10000)) {
+ touch_softlockup_watchdog();
+ cpu_relax();
+ }
+ }
+}
+
+static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr, const u32 *data,
+ u16 len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ REG_WR_IND(bp, addr + i*4, data[i]);
+ if (!(i % 10000)) {
+ touch_softlockup_watchdog();
+ cpu_relax();
+ }
+ }
+}
+
+static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len)
+{
+ int offset = 0;
+
+ if (bp->dmae_ready) {
+ while (len > DMAE_LEN32_WR_MAX) {
+ bnx2x_write_dmae(bp, bp->gunzip_mapping + offset,
+ addr + offset, DMAE_LEN32_WR_MAX);
+ offset += DMAE_LEN32_WR_MAX * 4;
+ len -= DMAE_LEN32_WR_MAX;
+ }
+ bnx2x_write_dmae(bp, bp->gunzip_mapping + offset,
+ addr + offset, len);
+ } else
+ bnx2x_init_str_wr(bp, addr, bp->gunzip_buf, len);
+}
+
+static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
+{
+ u32 buf_len = (((len * 4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len * 4));
+ u32 buf_len32 = buf_len / 4;
+ int i;
+
+ memset(bp->gunzip_buf, fill, buf_len);
+
+ for (i = 0; i < len; i += buf_len32) {
+ u32 cur_len = min(buf_len32, len - i);
+
+ bnx2x_write_big_buf(bp, addr + i * 4, cur_len);
+ }
+}
+
+static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data,
+ u32 len64)
+{
+ u32 buf_len32 = FW_BUF_SIZE / 4;
+ u32 len = len64 * 2;
+ u64 data64 = 0;
+ int i;
+
+ /* 64 bit value is in a blob: first low DWORD, then high DWORD */
+ data64 = HILO_U64((*(data + 1)), (*data));
+ len64 = min((u32)(FW_BUF_SIZE/8), len64);
+ for (i = 0; i < len64; i++) {
+ u64 *pdata = ((u64 *)(bp->gunzip_buf)) + i;
+
+ *pdata = data64;
+ }
+
+ for (i = 0; i < len; i += buf_len32) {
+ u32 cur_len = min(buf_len32, len - i);
+
+ bnx2x_write_big_buf(bp, addr + i * 4, cur_len);
+ }
+}
+
+/*********************************************************
+ There are different blobs for each PRAM section.
+ In addition, each blob write operation is divided into a few operations
+ in order to decrease the amount of phys. contiguous buffer needed.
+ Thus, when we select a blob the address may be with some offset
+ from the beginning of PRAM section.
+ The same holds for the INT_TABLE sections.
+**********************************************************/
+#define IF_IS_INT_TABLE_ADDR(base, addr) \
+ if (((base) <= (addr)) && ((base) + 0x400 >= (addr)))
+
+#define IF_IS_PRAM_ADDR(base, addr) \
+ if (((base) <= (addr)) && ((base) + 0x40000 >= (addr)))
+
+static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr, const u8 *data)
+{
+ IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr)
+ data = bp->tsem_int_table_data;
+ else IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr)
+ data = bp->csem_int_table_data;
+ else IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr)
+ data = bp->usem_int_table_data;
+ else IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr)
+ data = bp->xsem_int_table_data;
+ else IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr)
+ data = bp->tsem_pram_data;
+ else IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr)
+ data = bp->csem_pram_data;
+ else IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr)
+ data = bp->usem_pram_data;
+ else IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr)
+ data = bp->xsem_pram_data;
+
+ return data;
+}
+
+static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len)
+{
+ int offset = 0;
+
+ if (bp->dmae_ready) {
+ while (len > DMAE_LEN32_WR_MAX) {
+ bnx2x_write_dmae(bp, bp->gunzip_mapping + offset,
+ addr + offset, DMAE_LEN32_WR_MAX);
+ offset += DMAE_LEN32_WR_MAX * 4;
+ len -= DMAE_LEN32_WR_MAX;
+ }
+ bnx2x_write_dmae(bp, bp->gunzip_mapping + offset,
+ addr + offset, len);
+ } else
+ bnx2x_init_ind_wr(bp, addr, bp->gunzip_buf, len);
+}
+
+static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, const u32 *data,
+ u32 len)
+{
+ /* This is needed for NO_ZIP mode, currently supported
+ in little endian mode only */
+ data = (const u32*)bnx2x_sel_blob(bp, addr, (const u8*)data);
+
+ if ((len * 4) > FW_BUF_SIZE) {
+ BNX2X_ERR("LARGE DMAE OPERATION ! "
+ "addr 0x%x len 0x%x\n", addr, len*4);
+ return;
+ }
+ memcpy(bp->gunzip_buf, data, len * 4);
+
+ bnx2x_write_big_buf_wb(bp, addr, len);
+}
+
+static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr,
+ u32 len, u32 blob_off)
+{
+ int rc, i;
+ const u8 *data = NULL;
+
+ data = bnx2x_sel_blob(bp, addr, data) + 4*blob_off;
+
+ if (data == NULL) {
+ panic("Blob not found for addr 0x%x\n", addr);
+ return;
+ }
+
+ rc = bnx2x_gunzip(bp, data, len);
+ if (rc) {
+ BNX2X_ERR("gunzip failed ! addr 0x%x rc %d\n", addr, rc);
+ BNX2X_ERR("blob_offset=0x%x\n", blob_off);
+ return;
+ }
+
+ /* gunzip_outlen is in dwords */
+ len = bp->gunzip_outlen;
+ for (i = 0; i < len; i++)
+ ((u32 *)bp->gunzip_buf)[i] =
+ cpu_to_le32(((u32 *)bp->gunzip_buf)[i]);
+
+ bnx2x_write_big_buf_wb(bp, addr, len);
+}
+
+static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage)
+{
+ int hw_wr, i;
+ u16 op_start =
+ bp->init_ops_offsets[BLOCK_OPS_IDX(block,stage,STAGE_START)];
+ u16 op_end =
+ bp->init_ops_offsets[BLOCK_OPS_IDX(block,stage,STAGE_END)];
+ union init_op *op;
+ u32 op_type, addr, len;
+ const u32 *data, *data_base;
+
+ /* If empty block */
+ if (op_start == op_end)
+ return;
+
+ if (CHIP_REV_IS_FPGA(bp))
+ hw_wr = OP_WR_FPGA;
+ else if (CHIP_REV_IS_EMUL(bp))
+ hw_wr = OP_WR_EMUL;
+ else
+ hw_wr = OP_WR_ASIC;
+
+ data_base = bp->init_data;
+
+ for (i = op_start; i < op_end; i++) {
+
+ op = (union init_op *)&(bp->init_ops[i]);
+
+ op_type = op->str_wr.op;
+ addr = op->str_wr.offset;
+ len = op->str_wr.data_len;
+ data = data_base + op->str_wr.data_off;
+
+ /* HW/EMUL specific */
+ if (unlikely((op_type > OP_WB) && (op_type == hw_wr)))
+ op_type = OP_WR;
+
+ switch (op_type) {
+ case OP_RD:
+ REG_RD(bp, addr);
+ break;
+ case OP_WR:
+ REG_WR(bp, addr, op->write.val);
+ break;
+ case OP_SW:
+ bnx2x_init_str_wr(bp, addr, data, len);
+ break;
+ case OP_WB:
+ bnx2x_init_wr_wb(bp, addr, data, len);
+ break;
+ case OP_SI:
+ bnx2x_init_ind_wr(bp, addr, data, len);
+ break;
+ case OP_ZR:
+ bnx2x_init_fill(bp, addr, 0, op->zero.len);
+ break;
+ case OP_ZP:
+ bnx2x_init_wr_zp(bp, addr, len,
+ op->str_wr.data_off);
+ break;
+ case OP_WR_64:
+ bnx2x_init_wr_64(bp, addr, data, len);
+ break;
+ default:
+ /* happens whenever an op is of a diff HW */
+#if 0
+ DP(NETIF_MSG_HW, "skipping init operation "
+ "index %d[%d:%d]: type %d addr 0x%x "
+ "len %d(0x%x)\n",
+ i, op_start, op_end, op_type, addr, len, len);
+#endif
+ break;
+ }
+ }
+}
+
+/* PXP */
+static void bnx2x_init_pxp(struct bnx2x *bp)
+{
+ u16 devctl;
+ int r_order, w_order;
+ u32 val, i;
+
+ pci_read_config_word(bp->pdev,
+ bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
+ DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
+ w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
+ if (bp->mrrs == -1)
+ r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
+ else {
+ DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
+ r_order = bp->mrrs;
+ }
+
+ if (r_order > MAX_RD_ORD) {
+ DP(NETIF_MSG_HW, "read order of %d order adjusted to %d\n",
+ r_order, MAX_RD_ORD);
+ r_order = MAX_RD_ORD;
+ }
+ if (w_order > MAX_WR_ORD) {
+ DP(NETIF_MSG_HW, "write order of %d order adjusted to %d\n",
+ w_order, MAX_WR_ORD);
+ w_order = MAX_WR_ORD;
+ }
+ if (CHIP_REV_IS_FPGA(bp)) {
+ DP(NETIF_MSG_HW, "write order adjusted to 1 for FPGA\n");
+ w_order = 0;
+ }
+ DP(NETIF_MSG_HW, "read order %d write order %d\n", r_order, w_order);
+
+ for (i = 0; i < NUM_RD_Q-1; i++) {
+ REG_WR(bp, read_arb_addr[i].l, read_arb_data[i][r_order].l);
+ REG_WR(bp, read_arb_addr[i].add,
+ read_arb_data[i][r_order].add);
+ REG_WR(bp, read_arb_addr[i].ubound,
+ read_arb_data[i][r_order].ubound);
+ }
+
+ for (i = 0; i < NUM_WR_Q-1; i++) {
+ if ((write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L29) ||
+ (write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L30)) {
+
+ REG_WR(bp, write_arb_addr[i].l,
+ write_arb_data[i][w_order].l);
+
+ REG_WR(bp, write_arb_addr[i].add,
+ write_arb_data[i][w_order].add);
+
+ REG_WR(bp, write_arb_addr[i].ubound,
+ write_arb_data[i][w_order].ubound);
+ } else {
+
+ val = REG_RD(bp, write_arb_addr[i].l);
+ REG_WR(bp, write_arb_addr[i].l,
+ val | (write_arb_data[i][w_order].l << 10));
+
+ val = REG_RD(bp, write_arb_addr[i].add);
+ REG_WR(bp, write_arb_addr[i].add,
+ val | (write_arb_data[i][w_order].add << 10));
+
+ val = REG_RD(bp, write_arb_addr[i].ubound);
+ REG_WR(bp, write_arb_addr[i].ubound,
+ val | (write_arb_data[i][w_order].ubound << 7));
+ }
+ }
+
+ val = write_arb_data[NUM_WR_Q-1][w_order].add;
+ val += write_arb_data[NUM_WR_Q-1][w_order].ubound << 10;
+ val += write_arb_data[NUM_WR_Q-1][w_order].l << 17;
+ REG_WR(bp, PXP2_REG_PSWRQ_BW_RD, val);
+
+ val = read_arb_data[NUM_RD_Q-1][r_order].add;
+ val += read_arb_data[NUM_RD_Q-1][r_order].ubound << 10;
+ val += read_arb_data[NUM_RD_Q-1][r_order].l << 17;
+ REG_WR(bp, PXP2_REG_PSWRQ_BW_WR, val);
+
+ REG_WR(bp, PXP2_REG_RQ_WR_MBS0, w_order);
+ REG_WR(bp, PXP2_REG_RQ_WR_MBS1, w_order);
+ REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order);
+ REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order);
+
+ if (r_order == MAX_RD_ORD)
+ REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
+
+ REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
+
+ if (CHIP_IS_E1H(bp)) {
+ val = ((w_order == 0) ? 2 : 3);
+ REG_WR(bp, PXP2_REG_WR_HC_MPS, val);
+ REG_WR(bp, PXP2_REG_WR_USDM_MPS, val);
+ REG_WR(bp, PXP2_REG_WR_CSDM_MPS, val);
+ REG_WR(bp, PXP2_REG_WR_TSDM_MPS, val);
+ REG_WR(bp, PXP2_REG_WR_XSDM_MPS, val);
+ REG_WR(bp, PXP2_REG_WR_QM_MPS, val);
+ REG_WR(bp, PXP2_REG_WR_TM_MPS, val);
+ REG_WR(bp, PXP2_REG_WR_SRC_MPS, val);
+ REG_WR(bp, PXP2_REG_WR_DBG_MPS, val);
+ REG_WR(bp, PXP2_REG_WR_DMAE_MPS, 2); /* DMAE is special */
+ REG_WR(bp, PXP2_REG_WR_CDU_MPS, val);
+ }
+}
+
+/*****************************************************************************
+ * Description:
+ * Calculates crc 8 on a word value: polynomial 0-1-2-8
+ * Code was translated from Verilog.
+ ****************************************************************************/
+static u8 calc_crc8(u32 data, u8 crc)
+{
+ u8 D[32];
+ u8 NewCRC[8];
+ u8 C[8];
+ u8 crc_res;
+ u8 i;
+
+ /* split the data into 31 bits */
+ for (i = 0; i < 32; i++) {
+ D[i] = data & 1;
+ data = data >> 1;
+ }
+
+ /* split the crc into 8 bits */
+ for (i = 0; i < 8; i++) {
+ C[i] = crc & 1;
+ crc = crc >> 1;
+ }
+
+ NewCRC[0] = D[31] ^ D[30] ^ D[28] ^ D[23] ^ D[21] ^ D[19] ^ D[18] ^
+ D[16] ^ D[14] ^ D[12] ^ D[8] ^ D[7] ^ D[6] ^ D[0] ^ C[4] ^
+ C[6] ^ C[7];
+ NewCRC[1] = D[30] ^ D[29] ^ D[28] ^ D[24] ^ D[23] ^ D[22] ^ D[21] ^
+ D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^
+ D[12] ^ D[9] ^ D[6] ^ D[1] ^ D[0] ^ C[0] ^ C[4] ^ C[5] ^ C[6];
+ NewCRC[2] = D[29] ^ D[28] ^ D[25] ^ D[24] ^ D[22] ^ D[17] ^ D[15] ^
+ D[13] ^ D[12] ^ D[10] ^ D[8] ^ D[6] ^ D[2] ^ D[1] ^ D[0] ^
+ C[0] ^ C[1] ^ C[4] ^ C[5];
+ NewCRC[3] = D[30] ^ D[29] ^ D[26] ^ D[25] ^ D[23] ^ D[18] ^ D[16] ^
+ D[14] ^ D[13] ^ D[11] ^ D[9] ^ D[7] ^ D[3] ^ D[2] ^ D[1] ^
+ C[1] ^ C[2] ^ C[5] ^ C[6];
+ NewCRC[4] = D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[19] ^ D[17] ^
+ D[15] ^ D[14] ^ D[12] ^ D[10] ^ D[8] ^ D[4] ^ D[3] ^ D[2] ^
+ C[0] ^ C[2] ^ C[3] ^ C[6] ^ C[7];
+ NewCRC[5] = D[31] ^ D[28] ^ D[27] ^ D[25] ^ D[20] ^ D[18] ^ D[16] ^
+ D[15] ^ D[13] ^ D[11] ^ D[9] ^ D[5] ^ D[4] ^ D[3] ^ C[1] ^
+ C[3] ^ C[4] ^ C[7];
+ NewCRC[6] = D[29] ^ D[28] ^ D[26] ^ D[21] ^ D[19] ^ D[17] ^ D[16] ^
+ D[14] ^ D[12] ^ D[10] ^ D[6] ^ D[5] ^ D[4] ^ C[2] ^ C[4] ^
+ C[5];
+ NewCRC[7] = D[30] ^ D[29] ^ D[27] ^ D[22] ^ D[20] ^ D[18] ^ D[17] ^
+ D[15] ^ D[13] ^ D[11] ^ D[7] ^ D[6] ^ D[5] ^ C[3] ^ C[5] ^
+ C[6];
+
+ crc_res = 0;
+ for (i = 0; i < 8; i++)
+ crc_res |= (NewCRC[i] << i);
+
+ return crc_res;
+}
+
+#endif /* BNX2X_INIT_OPS_H */
diff --git a/drivers/net/bnx2x_init_values.h b/drivers/net/bnx2x_init_values.h
deleted file mode 100644
index 1f22c9ab66d..00000000000
--- a/drivers/net/bnx2x_init_values.h
+++ /dev/null
@@ -1,16322 +0,0 @@
-#ifndef __BNX2X_INIT_VALUES_H__
-#define __BNX2X_INIT_VALUES_H__
-
-/* bnx2x_init_values.h: Broadcom NX2 10G network driver.
- *
- * Copyright (c) 2007-2009 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, except as noted below.
- *
- * This file contains firmware data derived from proprietary unpublished
- * source code, Copyright (c) 2007-2009 Broadcom Corporation.
- *
- * Permission is hereby granted for the distribution of this firmware data
- * in hexadecimal or equivalent format, provided this copyright notice is
- * accompanying it.
- *
- *
- * This array contains the list of operations needed to initialize the chip.
- *
- * For each block in the chip there are three init stages:
- * common - HW used by both ports,
- * port1 and port2 - initialization for a specific Ethernet port.
- * When a port is opened or closed, the management CPU tells the driver
- * whether to init/disable common HW in addition to the port HW.
- * This way the first port going up will first initializes the common HW,
- * and the last port going down also resets the common HW
- *
- * For each init stage/block there is a list of actions needed in a format:
- * {operation, register, data}
- * where:
- * OP_WR - write a value to the chip.
- * OP_RD - read a register (usually a clear on read register).
- * OP_SW - string write, write a section of consecutive addresses to the chip.
- * OP_SI - copy a string using indirect writes.
- * OP_ZR - clear a range of memory.
- * OP_ZP - unzip and copy using DMAE.
- * OP_WB - string copy using DMAE.
- *
- * The #defines mark the stages.
- *
- */
-
-static const struct raw_op init_ops[] = {
-#define PRS_COMMON_START 0
- {OP_WR, PRS_REG_INC_VALUE, 0xf},
- {OP_WR, PRS_REG_EVENT_ID_1, 0x45},
- {OP_WR, PRS_REG_EVENT_ID_2, 0x84},
- {OP_WR, PRS_REG_EVENT_ID_3, 0x6},
- {OP_WR, PRS_REG_NO_MATCH_EVENT_ID, 0x4},
- {OP_WR, PRS_REG_CM_HDR_TYPE_0, 0x0},
- {OP_WR, PRS_REG_CM_HDR_TYPE_1, 0x12170000},
- {OP_WR, PRS_REG_CM_HDR_TYPE_2, 0x22170000},
- {OP_WR, PRS_REG_CM_HDR_TYPE_3, 0x32170000},
- {OP_ZR, PRS_REG_CM_HDR_TYPE_4, 0x5},
- {OP_WR, PRS_REG_CM_HDR_LOOPBACK_TYPE_1, 0x12150000},
- {OP_WR, PRS_REG_CM_HDR_LOOPBACK_TYPE_2, 0x22150000},
- {OP_WR, PRS_REG_CM_HDR_LOOPBACK_TYPE_3, 0x32150000},
- {OP_ZR, PRS_REG_CM_HDR_LOOPBACK_TYPE_4, 0x4},
- {OP_WR, PRS_REG_CM_NO_MATCH_HDR, 0x2100000},
- {OP_WR, PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_0, 0x100000},
- {OP_WR, PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_1, 0x10100000},
- {OP_WR, PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_2, 0x20100000},
- {OP_WR, PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_3, 0x30100000},
- {OP_ZR_E1, PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_4, 0x4},
- {OP_WR_E1H, PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_4, 0x40100000},
- {OP_ZR_E1H, PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_5, 0x3},
- {OP_WR, PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_0, 0x100000},
- {OP_WR, PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_1, 0x12140000},
- {OP_WR, PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_2, 0x22140000},
- {OP_WR, PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_3, 0x32140000},
- {OP_ZR_E1, PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_4, 0x4},
- {OP_WR_E1H, PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_4, 0x42140000},
- {OP_ZR_E1H, PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_5, 0x3},
- {OP_RD, PRS_REG_NUM_OF_PACKETS, 0x0},
- {OP_RD, PRS_REG_NUM_OF_CFC_FLUSH_MESSAGES, 0x0},
- {OP_RD, PRS_REG_NUM_OF_TRANSPARENT_FLUSH_MESSAGES, 0x0},
- {OP_RD, PRS_REG_NUM_OF_DEAD_CYCLES, 0x0},
- {OP_WR_E1H, PRS_REG_FCOE_TYPE, 0x8906},
- {OP_WR, PRS_REG_FLUSH_REGIONS_TYPE_0, 0xff},
- {OP_WR, PRS_REG_FLUSH_REGIONS_TYPE_1, 0xff},
- {OP_WR, PRS_REG_FLUSH_REGIONS_TYPE_2, 0xff},
- {OP_WR, PRS_REG_FLUSH_REGIONS_TYPE_3, 0xff},
- {OP_WR, PRS_REG_FLUSH_REGIONS_TYPE_4, 0xff},
- {OP_WR, PRS_REG_FLUSH_REGIONS_TYPE_5, 0xff},
- {OP_WR, PRS_REG_FLUSH_REGIONS_TYPE_6, 0xff},
- {OP_WR, PRS_REG_FLUSH_REGIONS_TYPE_7, 0xff},
- {OP_WR, PRS_REG_PURE_REGIONS, 0x3e},
- {OP_WR, PRS_REG_PACKET_REGIONS_TYPE_0, 0x0},
- {OP_WR, PRS_REG_PACKET_REGIONS_TYPE_1, 0x3f},
- {OP_WR, PRS_REG_PACKET_REGIONS_TYPE_2, 0x3f},
- {OP_WR, PRS_REG_PACKET_REGIONS_TYPE_3, 0x3f},
- {OP_WR_E1, PRS_REG_PACKET_REGIONS_TYPE_4, 0x0},
- {OP_WR_E1H, PRS_REG_PACKET_REGIONS_TYPE_4, 0x3f},
- {OP_WR, PRS_REG_PACKET_REGIONS_TYPE_5, 0x3f},
- {OP_WR, PRS_REG_PACKET_REGIONS_TYPE_6, 0x3f},
- {OP_WR, PRS_REG_PACKET_REGIONS_TYPE_7, 0x3f},
-#define PRS_COMMON_END 52
-#define SRCH_COMMON_START 52
- {OP_WR_E1H, SRC_REG_E1HMF_ENABLE, 0x1},
-#define SRCH_COMMON_END 53
-#define TSDM_COMMON_START 53
- {OP_WR_E1, TSDM_REG_CFC_RSP_START_ADDR, 0x411},
- {OP_WR_E1H, TSDM_REG_CFC_RSP_START_ADDR, 0x211},
- {OP_WR_E1, TSDM_REG_CMP_COUNTER_START_ADDR, 0x400},
- {OP_WR_E1H, TSDM_REG_CMP_COUNTER_START_ADDR, 0x200},
- {OP_WR_E1, TSDM_REG_Q_COUNTER_START_ADDR, 0x404},
- {OP_WR_E1H, TSDM_REG_Q_COUNTER_START_ADDR, 0x204},
- {OP_WR_E1, TSDM_REG_PCK_END_MSG_START_ADDR, 0x419},
- {OP_WR_E1H, TSDM_REG_PCK_END_MSG_START_ADDR, 0x219},
- {OP_WR, TSDM_REG_CMP_COUNTER_MAX0, 0xffff},
- {OP_WR, TSDM_REG_CMP_COUNTER_MAX1, 0xffff},
- {OP_WR, TSDM_REG_CMP_COUNTER_MAX2, 0xffff},
- {OP_WR, TSDM_REG_CMP_COUNTER_MAX3, 0xffff},
- {OP_ZR_E1, TSDM_REG_AGG_INT_EVENT_0, 0x2},
- {OP_WR_E1H, TSDM_REG_AGG_INT_EVENT_0, 0x20},
- {OP_WR_E1H, TSDM_REG_AGG_INT_EVENT_1, 0x0},
- {OP_WR, TSDM_REG_AGG_INT_EVENT_2, 0x34},
- {OP_WR, TSDM_REG_AGG_INT_EVENT_3, 0x35},
- {OP_ZR_E1, TSDM_REG_AGG_INT_EVENT_4, 0x7c},
- {OP_ZR_E1H, TSDM_REG_AGG_INT_EVENT_4, 0x1c},
- {OP_WR_E1H, TSDM_REG_AGG_INT_T_0, 0x1},
- {OP_ZR_E1H, TSDM_REG_AGG_INT_T_1, 0x5f},
- {OP_WR, TSDM_REG_ENABLE_IN1, 0x7ffffff},
- {OP_WR, TSDM_REG_ENABLE_IN2, 0x3f},
- {OP_WR, TSDM_REG_ENABLE_OUT1, 0x7ffffff},
- {OP_WR, TSDM_REG_ENABLE_OUT2, 0xf},
- {OP_RD, TSDM_REG_NUM_OF_Q0_CMD, 0x0},
- {OP_RD, TSDM_REG_NUM_OF_Q1_CMD, 0x0},
- {OP_RD, TSDM_REG_NUM_OF_Q3_CMD, 0x0},
- {OP_RD, TSDM_REG_NUM_OF_Q4_CMD, 0x0},
- {OP_RD, TSDM_REG_NUM_OF_Q5_CMD, 0x0},
- {OP_RD, TSDM_REG_NUM_OF_Q6_CMD, 0x0},
- {OP_RD, TSDM_REG_NUM_OF_Q7_CMD, 0x0},
- {OP_RD, TSDM_REG_NUM_OF_Q8_CMD, 0x0},
- {OP_RD, TSDM_REG_NUM_OF_Q9_CMD, 0x0},
- {OP_RD, TSDM_REG_NUM_OF_Q10_CMD, 0x0},
- {OP_RD, TSDM_REG_NUM_OF_Q11_CMD, 0x0},
- {OP_RD, TSDM_REG_NUM_OF_PKT_END_MSG, 0x0},
- {OP_RD, TSDM_REG_NUM_OF_PXP_ASYNC_REQ, 0x0},
- {OP_RD, TSDM_REG_NUM_OF_ACK_AFTER_PLACE, 0x0},
- {OP_WR_E1, TSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1},
- {OP_WR_ASIC, TSDM_REG_TIMER_TICK, 0x3e8},
- {OP_WR_EMUL, TSDM_REG_TIMER_TICK, 0x1},
- {OP_WR_FPGA, TSDM_REG_TIMER_TICK, 0xa},
-#define TSDM_COMMON_END 96
-#define TCM_COMMON_START 96
- {OP_WR, TCM_REG_XX_MAX_LL_SZ, 0x20},
- {OP_WR, TCM_REG_XX_OVFL_EVNT_ID, 0x32},
- {OP_WR, TCM_REG_TQM_TCM_HDR_P, 0x2150020},
- {OP_WR, TCM_REG_TQM_TCM_HDR_S, 0x2150020},
- {OP_WR, TCM_REG_TM_TCM_HDR, 0x30},
- {OP_WR, TCM_REG_ERR_TCM_HDR, 0x8100000},
- {OP_WR, TCM_REG_ERR_EVNT_ID, 0x33},
- {OP_WR, TCM_REG_EXPR_EVNT_ID, 0x30},
- {OP_WR, TCM_REG_STOP_EVNT_ID, 0x31},
- {OP_WR, TCM_REG_STORM_WEIGHT, 0x2},
- {OP_WR, TCM_REG_PRS_WEIGHT, 0x5},
- {OP_WR, TCM_REG_PBF_WEIGHT, 0x6},
- {OP_WR, TCM_REG_USEM_WEIGHT, 0x2},
- {OP_WR, TCM_REG_CSEM_WEIGHT, 0x2},
- {OP_WR, TCM_REG_CP_WEIGHT, 0x0},
- {OP_WR, TCM_REG_TSDM_WEIGHT, 0x5},
- {OP_WR, TCM_REG_TQM_P_WEIGHT, 0x2},
- {OP_WR, TCM_REG_TQM_S_WEIGHT, 0x2},
- {OP_WR, TCM_REG_TM_WEIGHT, 0x2},
- {OP_WR, TCM_REG_TCM_TQM_USE_Q, 0x1},
- {OP_WR, TCM_REG_GR_ARB_TYPE, 0x1},
- {OP_WR, TCM_REG_GR_LD0_PR, 0x1},
- {OP_WR, TCM_REG_GR_LD1_PR, 0x2},
- {OP_WR, TCM_REG_CFC_INIT_CRD, 0x1},
- {OP_WR, TCM_REG_FIC0_INIT_CRD, 0x40},
- {OP_WR, TCM_REG_FIC1_INIT_CRD, 0x40},
- {OP_WR, TCM_REG_TQM_INIT_CRD, 0x20},
- {OP_WR, TCM_REG_XX_INIT_CRD, 0x13},
- {OP_WR, TCM_REG_XX_MSG_NUM, 0x20},
- {OP_ZR, TCM_REG_XX_TABLE, 0xa},
- {OP_SW, TCM_REG_XX_DESCR_TABLE, 0x200000},
- {OP_WR, TCM_REG_N_SM_CTX_LD_0, 0x7},
- {OP_WR, TCM_REG_N_SM_CTX_LD_1, 0x7},
- {OP_WR, TCM_REG_N_SM_CTX_LD_2, 0x8},
- {OP_WR, TCM_REG_N_SM_CTX_LD_3, 0x8},
- {OP_ZR_E1, TCM_REG_N_SM_CTX_LD_4, 0x4},
- {OP_WR_E1H, TCM_REG_N_SM_CTX_LD_4, 0x1},
- {OP_ZR_E1H, TCM_REG_N_SM_CTX_LD_5, 0x3},
- {OP_WR, TCM_REG_TCM_REG0_SZ, 0x6},
- {OP_WR_E1, TCM_REG_PHYS_QNUM0_0, 0xd},
- {OP_WR_E1, TCM_REG_PHYS_QNUM0_1, 0x2d},
- {OP_WR_E1, TCM_REG_PHYS_QNUM1_0, 0x7},
- {OP_WR_E1, TCM_REG_PHYS_QNUM1_1, 0x27},
- {OP_WR_E1, TCM_REG_PHYS_QNUM2_0, 0x7},
- {OP_WR_E1, TCM_REG_PHYS_QNUM2_1, 0x27},
- {OP_WR_E1, TCM_REG_PHYS_QNUM3_0, 0x7},
- {OP_WR_E1, TCM_REG_PHYS_QNUM3_1, 0x27},
- {OP_WR, TCM_REG_TCM_STORM0_IFEN, 0x1},
- {OP_WR, TCM_REG_TCM_STORM1_IFEN, 0x1},
- {OP_WR, TCM_REG_TCM_TQM_IFEN, 0x1},
- {OP_WR, TCM_REG_STORM_TCM_IFEN, 0x1},
- {OP_WR, TCM_REG_TQM_TCM_IFEN, 0x1},
- {OP_WR, TCM_REG_TSDM_IFEN, 0x1},
- {OP_WR, TCM_REG_TM_TCM_IFEN, 0x1},
- {OP_WR, TCM_REG_PRS_IFEN, 0x1},
- {OP_WR, TCM_REG_PBF_IFEN, 0x1},
- {OP_WR, TCM_REG_USEM_IFEN, 0x1},
- {OP_WR, TCM_REG_CSEM_IFEN, 0x1},
- {OP_WR, TCM_REG_CDU_AG_WR_IFEN, 0x1},
- {OP_WR, TCM_REG_CDU_AG_RD_IFEN, 0x1},
- {OP_WR, TCM_REG_CDU_SM_WR_IFEN, 0x1},
- {OP_WR, TCM_REG_CDU_SM_RD_IFEN, 0x1},
- {OP_WR, TCM_REG_TCM_CFC_IFEN, 0x1},
-#define TCM_COMMON_END 159
-#define TCM_FUNC0_START 159
- {OP_WR_E1H, TCM_REG_PHYS_QNUM0_0, 0xd},
- {OP_WR_E1H, TCM_REG_PHYS_QNUM1_0, 0x7},
- {OP_WR_E1H, TCM_REG_PHYS_QNUM2_0, 0x7},
- {OP_WR_E1H, TCM_REG_PHYS_QNUM3_0, 0x7},
-#define TCM_FUNC0_END 163
-#define TCM_FUNC1_START 163
- {OP_WR_E1H, TCM_REG_PHYS_QNUM0_1, 0x2d},
- {OP_WR_E1H, TCM_REG_PHYS_QNUM1_1, 0x27},
- {OP_WR_E1H, TCM_REG_PHYS_QNUM2_1, 0x27},
- {OP_WR_E1H, TCM_REG_PHYS_QNUM3_1, 0x27},
-#define TCM_FUNC1_END 167
-#define TCM_FUNC2_START 167
- {OP_WR_E1H, TCM_REG_PHYS_QNUM0_0, 0x1d},
- {OP_WR_E1H, TCM_REG_PHYS_QNUM1_0, 0x17},
- {OP_WR_E1H, TCM_REG_PHYS_QNUM2_0, 0x17},
- {OP_WR_E1H, TCM_REG_PHYS_QNUM3_0, 0x17},
-#define TCM_FUNC2_END 171
-#define TCM_FUNC3_START 171
- {OP_WR_E1H, TCM_REG_PHYS_QNUM0_1, 0x3d},
- {OP_WR_E1H, TCM_REG_PHYS_QNUM1_1, 0x37},
- {OP_WR_E1H, TCM_REG_PHYS_QNUM2_1, 0x37},
- {OP_WR_E1H, TCM_REG_PHYS_QNUM3_1, 0x37},
-#define TCM_FUNC3_END 175
-#define TCM_FUNC4_START 175
- {OP_WR_E1H, TCM_REG_PHYS_QNUM0_0, 0x4d},
- {OP_WR_E1H, TCM_REG_PHYS_QNUM1_0, 0x47},
- {OP_WR_E1H, TCM_REG_PHYS_QNUM2_0, 0x47},
- {OP_WR_E1H, TCM_REG_PHYS_QNUM3_0, 0x47},
-#define TCM_FUNC4_END 179
-#define TCM_FUNC5_START 179
- {OP_WR_E1H, TCM_REG_PHYS_QNUM0_1, 0x6d},
- {OP_WR_E1H, TCM_REG_PHYS_QNUM1_1, 0x67},
- {OP_WR_E1H, TCM_REG_PHYS_QNUM2_1, 0x67},
- {OP_WR_E1H, TCM_REG_PHYS_QNUM3_1, 0x67},
-#define TCM_FUNC5_END 183
-#define TCM_FUNC6_START 183
- {OP_WR_E1H, TCM_REG_PHYS_QNUM0_0, 0x5d},
- {OP_WR_E1H, TCM_REG_PHYS_QNUM1_0, 0x57},
- {OP_WR_E1H, TCM_REG_PHYS_QNUM2_0, 0x57},
- {OP_WR_E1H, TCM_REG_PHYS_QNUM3_0, 0x57},
-#define TCM_FUNC6_END 187
-#define TCM_FUNC7_START 187
- {OP_WR_E1H, TCM_REG_PHYS_QNUM0_1, 0x7d},
- {OP_WR_E1H, TCM_REG_PHYS_QNUM1_1, 0x77},
- {OP_WR_E1H, TCM_REG_PHYS_QNUM2_1, 0x77},
- {OP_WR_E1H, TCM_REG_PHYS_QNUM3_1, 0x77},
-#define TCM_FUNC7_END 191
-#define BRB1_COMMON_START 191
- {OP_SW, BRB1_REG_LL_RAM, 0x2000020},
- {OP_WR, BRB1_REG_SOFT_RESET, 0x1},
- {OP_RD, BRB1_REG_NUM_OF_FULL_CYCLES_4, 0x0},
- {OP_SW, BRB1_REG_FREE_LIST_PRS_CRDT, 0x30220},
- {OP_WR, BRB1_REG_SOFT_RESET, 0x0},
-#define BRB1_COMMON_END 196
-#define BRB1_PORT0_START 196
- {OP_WR_E1, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0xb8},
- {OP_WR_E1, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 0x114},
- {OP_RD, BRB1_REG_NUM_OF_PAUSE_CYCLES_0, 0x0},
- {OP_RD, BRB1_REG_NUM_OF_FULL_CYCLES_0, 0x0},
-#define BRB1_PORT0_END 200
-#define BRB1_PORT1_START 200
- {OP_WR_E1, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0xb8},
- {OP_WR_E1, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 0x114},
- {OP_RD, BRB1_REG_NUM_OF_PAUSE_CYCLES_1, 0x0},
- {OP_RD, BRB1_REG_NUM_OF_FULL_CYCLES_1, 0x0},
-#define BRB1_PORT1_END 204
-#define TSEM_COMMON_START 204
- {OP_RD, TSEM_REG_MSG_NUM_FIC0, 0x0},
- {OP_RD, TSEM_REG_MSG_NUM_FIC1, 0x0},
- {OP_RD, TSEM_REG_MSG_NUM_FOC0, 0x0},
- {OP_RD, TSEM_REG_MSG_NUM_FOC1, 0x0},
- {OP_RD, TSEM_REG_MSG_NUM_FOC2, 0x0},
- {OP_RD, TSEM_REG_MSG_NUM_FOC3, 0x0},
- {OP_WR, TSEM_REG_ARB_ELEMENT0, 0x1},
- {OP_WR, TSEM_REG_ARB_ELEMENT1, 0x2},
- {OP_WR, TSEM_REG_ARB_ELEMENT2, 0x3},
- {OP_WR, TSEM_REG_ARB_ELEMENT3, 0x0},
- {OP_WR, TSEM_REG_ARB_ELEMENT4, 0x4},
- {OP_WR, TSEM_REG_ARB_CYCLE_SIZE, 0x1},
- {OP_WR, TSEM_REG_TS_0_AS, 0x0},
- {OP_WR, TSEM_REG_TS_1_AS, 0x1},
- {OP_WR, TSEM_REG_TS_2_AS, 0x4},
- {OP_WR, TSEM_REG_TS_3_AS, 0x0},
- {OP_WR, TSEM_REG_TS_4_AS, 0x1},
- {OP_WR, TSEM_REG_TS_5_AS, 0x3},
- {OP_WR, TSEM_REG_TS_6_AS, 0x0},
- {OP_WR, TSEM_REG_TS_7_AS, 0x1},
- {OP_WR, TSEM_REG_TS_8_AS, 0x4},
- {OP_WR, TSEM_REG_TS_9_AS, 0x0},
- {OP_WR, TSEM_REG_TS_10_AS, 0x1},
- {OP_WR, TSEM_REG_TS_11_AS, 0x3},
- {OP_WR, TSEM_REG_TS_12_AS, 0x0},
- {OP_WR, TSEM_REG_TS_13_AS, 0x1},
- {OP_WR, TSEM_REG_TS_14_AS, 0x4},
- {OP_WR, TSEM_REG_TS_15_AS, 0x0},
- {OP_WR, TSEM_REG_TS_16_AS, 0x4},
- {OP_WR, TSEM_REG_TS_17_AS, 0x3},
- {OP_ZR, TSEM_REG_TS_18_AS, 0x2},
- {OP_WR, TSEM_REG_ENABLE_IN, 0x3fff},
- {OP_WR, TSEM_REG_ENABLE_OUT, 0x3ff},
- {OP_WR, TSEM_REG_FIC0_DISABLE, 0x0},
- {OP_WR, TSEM_REG_FIC1_DISABLE, 0x0},
- {OP_WR, TSEM_REG_PAS_DISABLE, 0x0},
- {OP_WR, TSEM_REG_THREADS_LIST, 0xff},
- {OP_ZR, TSEM_REG_PASSIVE_BUFFER, 0x400},
- {OP_WR, TSEM_REG_FAST_MEMORY + 0x18bc0, 0x1},
- {OP_WR, TSEM_REG_FAST_MEMORY + 0x18000, 0x34},
- {OP_WR, TSEM_REG_FAST_MEMORY + 0x18040, 0x18},
- {OP_WR, TSEM_REG_FAST_MEMORY + 0x18080, 0xc},
- {OP_WR, TSEM_REG_FAST_MEMORY + 0x180c0, 0x20},
- {OP_WR_ASIC, TSEM_REG_FAST_MEMORY + 0x18300, 0x7a120},
- {OP_WR_EMUL, TSEM_REG_FAST_MEMORY + 0x18300, 0x138},
- {OP_WR_FPGA, TSEM_REG_FAST_MEMORY + 0x18300, 0x1388},
- {OP_WR, TSEM_REG_FAST_MEMORY + 0x183c0, 0x1f4},
- {OP_ZR_E1, TSEM_REG_FAST_MEMORY + 0x2000, 0xb2},
- {OP_WR_E1H, TSEM_REG_FAST_MEMORY + 0x11480, 0x1},
- {OP_ZR_E1, TSEM_REG_FAST_MEMORY + 0x23c8, 0xc1},
- {OP_WR_EMUL_E1H, TSEM_REG_FAST_MEMORY + 0x11480, 0x0},
- {OP_SW_E1, TSEM_REG_FAST_MEMORY + 0x23c8 + 0x304, 0x10223},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x1000, 0x2b3},
- {OP_ZR_E1, TSEM_REG_FAST_MEMORY + 0x1020, 0xc8},
- {OP_SW_E1H, TSEM_REG_FAST_MEMORY + 0x1000 + 0xacc, 0x10223},
- {OP_ZR_E1, TSEM_REG_FAST_MEMORY + 0x1000, 0x2},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0xa020, 0xc8},
- {OP_ZR_E1, TSEM_REG_FAST_MEMORY + 0x1c18, 0x4},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0xa000, 0x2},
- {OP_ZR_E1, TSEM_REG_FAST_MEMORY + 0x1c10, 0x2},
- {OP_WR_E1H, TSEM_REG_FAST_MEMORY + 0x1ad0, 0x0},
- {OP_ZR_E1, TSEM_REG_FAST_MEMORY + 0x800, 0x2},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x1ad8, 0x4},
- {OP_ZR_E1, TSEM_REG_FAST_MEMORY + 0x808, 0x2},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x3678, 0x6},
- {OP_ZR_E1, TSEM_REG_FAST_MEMORY + 0x810, 0x4},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x3670, 0x2},
- {OP_SW_E1, TSEM_REG_FAST_MEMORY + 0x1fb0, 0x40224},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x5000, 0x2},
- {OP_SW_E1, TSEM_REG_FAST_MEMORY + 0x4cb0, 0x80228},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x5008, 0x4},
- {OP_ZP_E1, TSEM_REG_INT_TABLE, 0x930000},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x5018, 0x4},
- {OP_WR_64_E1, TSEM_REG_INT_TABLE + 0x360, 0x140230},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x5028, 0x4},
- {OP_ZP_E1, TSEM_REG_PRAM, 0x324f0000},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x5038, 0x4},
- {OP_ZP_E1, TSEM_REG_PRAM + 0x8000, 0x33250c94},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x5048, 0x4},
- {OP_ZP_E1, TSEM_REG_PRAM + 0x10000, 0xe4d195e},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x5058, 0x4},
- {OP_WR_64_E1, TSEM_REG_PRAM + 0x11e00, 0x5c400232},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x5068, 0x4},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x5078, 0x2},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x4000, 0x2},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x4008, 0x2},
- {OP_SW_E1H, TSEM_REG_FAST_MEMORY + 0x62c0, 0x200224},
- {OP_ZP_E1H, TSEM_REG_INT_TABLE, 0x9b0000},
- {OP_WR_64_E1H, TSEM_REG_INT_TABLE + 0x398, 0xd0244},
- {OP_ZP_E1H, TSEM_REG_PRAM, 0x325e0000},
- {OP_ZP_E1H, TSEM_REG_PRAM + 0x8000, 0x35960c98},
- {OP_ZP_E1H, TSEM_REG_PRAM + 0x10000, 0x1aea19fe},
- {OP_WR_64_E1H, TSEM_REG_PRAM + 0x143d0, 0x57860246},
-#define TSEM_COMMON_END 297
-#define TSEM_PORT0_START 297
- {OP_ZR_E1, TSEM_REG_FAST_MEMORY + 0x22c8, 0x20},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x2000, 0x16c},
- {OP_ZR_E1, TSEM_REG_FAST_MEMORY + 0x4000, 0x16c},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0xb000, 0x28},
- {OP_WR_E1, TSEM_REG_FAST_MEMORY + 0x4b60, 0x0},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0xb140, 0xc},
- {OP_ZR_E1, TSEM_REG_FAST_MEMORY + 0x1400, 0xa},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x32c0, 0x12},
- {OP_ZR_E1, TSEM_REG_FAST_MEMORY + 0x1450, 0x6},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x3350, 0x64},
- {OP_ZR_E1, TSEM_REG_FAST_MEMORY + 0x1500, 0x2},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x8108, 0x2},
- {OP_SW_E1, TSEM_REG_FAST_MEMORY + 0x1500 + 0x8, 0x50234},
- {OP_ZR_E1, TSEM_REG_FAST_MEMORY + 0x1500 + 0x1c, 0x7},
- {OP_ZR_E1, TSEM_REG_FAST_MEMORY + 0x1570, 0x12},
- {OP_ZR_E1, TSEM_REG_FAST_MEMORY + 0x9c0, 0x4c},
- {OP_ZR_E1, TSEM_REG_FAST_MEMORY + 0x800, 0x2},
- {OP_ZR_E1, TSEM_REG_FAST_MEMORY + 0x820, 0xe},
- {OP_SW_E1, TSEM_REG_FAST_MEMORY + 0x1fb0, 0x20239},
- {OP_ZR_E1, TSEM_REG_FAST_MEMORY + 0x2908, 0x2},
-#define TSEM_PORT0_END 317
-#define TSEM_PORT1_START 317
- {OP_ZR_E1, TSEM_REG_FAST_MEMORY + 0x2348, 0x20},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x25b0, 0x16c},
- {OP_ZR_E1, TSEM_REG_FAST_MEMORY + 0x45b0, 0x16c},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0xb0a0, 0x28},
- {OP_WR_E1, TSEM_REG_FAST_MEMORY + 0x4b64, 0x0},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0xb170, 0xc},
- {OP_ZR_E1, TSEM_REG_FAST_MEMORY + 0x1428, 0xa},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x3308, 0x12},
- {OP_ZR_E1, TSEM_REG_FAST_MEMORY + 0x1468, 0x6},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x34e0, 0x64},
- {OP_ZR_E1, TSEM_REG_FAST_MEMORY + 0x1538, 0x2},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x8110, 0x2},
- {OP_SW_E1, TSEM_REG_FAST_MEMORY + 0x1538 + 0x8, 0x5023b},
- {OP_ZR_E1, TSEM_REG_FAST_MEMORY + 0x1538 + 0x1c, 0x7},
- {OP_ZR_E1, TSEM_REG_FAST_MEMORY + 0x15b8, 0x12},
- {OP_ZR_E1, TSEM_REG_FAST_MEMORY + 0xaf0, 0x4c},
- {OP_ZR_E1, TSEM_REG_FAST_MEMORY + 0x808, 0x2},
- {OP_ZR_E1, TSEM_REG_FAST_MEMORY + 0x858, 0xe},
- {OP_SW_E1, TSEM_REG_FAST_MEMORY + 0x1fb8, 0x20240},
- {OP_ZR_E1, TSEM_REG_FAST_MEMORY + 0x2910, 0x2},
-#define TSEM_PORT1_END 337
-#define TSEM_FUNC0_START 337
- {OP_WR_E1H, TSEM_REG_FAST_MEMORY + 0x2b60, 0x0},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x3000, 0x2},
- {OP_SW_E1H, TSEM_REG_FAST_MEMORY + 0x3000 + 0x8, 0x50248},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x3000 + 0x1c, 0x7},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x31c0, 0x8},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x5000, 0x2},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x5080, 0x12},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x4000, 0x2},
-#define TSEM_FUNC0_END 345
-#define TSEM_FUNC1_START 345
- {OP_WR_E1H, TSEM_REG_FAST_MEMORY + 0x2b64, 0x0},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x3038, 0x2},
- {OP_SW_E1H, TSEM_REG_FAST_MEMORY + 0x3038 + 0x8, 0x5024d},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x3038 + 0x1c, 0x7},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x31e0, 0x8},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x5010, 0x2},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x50c8, 0x12},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x4008, 0x2},
-#define TSEM_FUNC1_END 353
-#define TSEM_FUNC2_START 353
- {OP_WR_E1H, TSEM_REG_FAST_MEMORY + 0x2b68, 0x0},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x3070, 0x2},
- {OP_SW_E1H, TSEM_REG_FAST_MEMORY + 0x3070 + 0x8, 0x50252},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x3070 + 0x1c, 0x7},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x3200, 0x8},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x5020, 0x2},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x5110, 0x12},
- {OP_SW_E1H, TSEM_REG_FAST_MEMORY + 0x4010, 0x20257},
-#define TSEM_FUNC2_END 361
-#define TSEM_FUNC3_START 361
- {OP_WR_E1H, TSEM_REG_FAST_MEMORY + 0x2b6c, 0x0},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x30a8, 0x2},
- {OP_SW_E1H, TSEM_REG_FAST_MEMORY + 0x30a8 + 0x8, 0x50259},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x30a8 + 0x1c, 0x7},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x3220, 0x8},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x5030, 0x2},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x5158, 0x12},
- {OP_SW_E1H, TSEM_REG_FAST_MEMORY + 0x4018, 0x2025e},
-#define TSEM_FUNC3_END 369
-#define TSEM_FUNC4_START 369
- {OP_WR_E1H, TSEM_REG_FAST_MEMORY + 0x2b70, 0x0},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x30e0, 0x2},
- {OP_SW_E1H, TSEM_REG_FAST_MEMORY + 0x30e0 + 0x8, 0x50260},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x30e0 + 0x1c, 0x7},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x3240, 0x8},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x5040, 0x2},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x51a0, 0x12},
- {OP_SW_E1H, TSEM_REG_FAST_MEMORY + 0x4020, 0x20265},
-#define TSEM_FUNC4_END 377
-#define TSEM_FUNC5_START 377
- {OP_WR_E1H, TSEM_REG_FAST_MEMORY + 0x2b74, 0x0},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x3118, 0x2},
- {OP_SW_E1H, TSEM_REG_FAST_MEMORY + 0x3118 + 0x8, 0x50267},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x3118 + 0x1c, 0x7},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x3260, 0x8},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x5050, 0x2},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x51e8, 0x12},
- {OP_SW_E1H, TSEM_REG_FAST_MEMORY + 0x4028, 0x2026c},
-#define TSEM_FUNC5_END 385
-#define TSEM_FUNC6_START 385
- {OP_WR_E1H, TSEM_REG_FAST_MEMORY + 0x2b78, 0x0},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x3150, 0x2},
- {OP_SW_E1H, TSEM_REG_FAST_MEMORY + 0x3150 + 0x8, 0x5026e},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x3150 + 0x1c, 0x7},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x3280, 0x8},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x5060, 0x2},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x5230, 0x12},
- {OP_SW_E1H, TSEM_REG_FAST_MEMORY + 0x4030, 0x20273},
-#define TSEM_FUNC6_END 393
-#define TSEM_FUNC7_START 393
- {OP_WR_E1H, TSEM_REG_FAST_MEMORY + 0x2b7c, 0x0},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x3188, 0x2},
- {OP_SW_E1H, TSEM_REG_FAST_MEMORY + 0x3188 + 0x8, 0x50275},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x3188 + 0x1c, 0x7},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x32a0, 0x8},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x5070, 0x2},
- {OP_ZR_E1H, TSEM_REG_FAST_MEMORY + 0x5278, 0x12},
- {OP_SW_E1H, TSEM_REG_FAST_MEMORY + 0x4038, 0x2027a},
-#define TSEM_FUNC7_END 401
-#define MISC_COMMON_START 401
- {OP_WR_E1, MISC_REG_GRC_TIMEOUT_EN, 0x1},
- {OP_WR, MISC_REG_PLL_STORM_CTRL_1, 0x71d2911},
- {OP_WR, MISC_REG_PLL_STORM_CTRL_2, 0x0},
- {OP_WR, MISC_REG_PLL_STORM_CTRL_3, 0x9c0424},
- {OP_WR, MISC_REG_PLL_STORM_CTRL_4, 0x0},
- {OP_WR, MISC_REG_LCPLL_CTRL_1, 0x209},
- {OP_WR_E1, MISC_REG_SPIO, 0xff000000},
-#define MISC_COMMON_END 408
-#define MISC_FUNC0_START 408
- {OP_WR_E1H, MISC_REG_NIG_WOL_P0, 0x0},
-#define MISC_FUNC0_END 409
-#define MISC_FUNC1_START 409
- {OP_WR_E1H, MISC_REG_NIG_WOL_P1, 0x0},
-#define MISC_FUNC1_END 410
-#define MISC_FUNC2_START 410
- {OP_WR_E1H, MISC_REG_NIG_WOL_P0, 0x0},
-#define MISC_FUNC2_END 411
-#define MISC_FUNC3_START 411
- {OP_WR_E1H, MISC_REG_NIG_WOL_P1, 0x0},
-#define MISC_FUNC3_END 412
-#define MISC_FUNC4_START 412
- {OP_WR_E1H, MISC_REG_NIG_WOL_P0, 0x0},
-#define MISC_FUNC4_END 413
-#define MISC_FUNC5_START 413
- {OP_WR_E1H, MISC_REG_NIG_WOL_P1, 0x0},
-#define MISC_FUNC5_END 414
-#define MISC_FUNC6_START 414
- {OP_WR_E1H, MISC_REG_NIG_WOL_P0, 0x0},
-#define MISC_FUNC6_END 415
-#define MISC_FUNC7_START 415
- {OP_WR_E1H, MISC_REG_NIG_WOL_P1, 0x0},
-#define MISC_FUNC7_END 416
-#define NIG_COMMON_START 416
- {OP_WR, NIG_REG_PBF_LB_IN_EN, 0x1},
- {OP_WR, NIG_REG_PRS_REQ_IN_EN, 0x1},
- {OP_WR, NIG_REG_EGRESS_DEBUG_IN_EN, 0x1},
- {OP_WR, NIG_REG_BRB_LB_OUT_EN, 0x1},
- {OP_WR, NIG_REG_PRS_EOP_OUT_EN, 0x1},
-#define NIG_COMMON_END 421
-#define NIG_PORT0_START 421
- {OP_WR, NIG_REG_LLH0_CM_HEADER, 0x300000},
- {OP_WR, NIG_REG_LLH0_EVENT_ID, 0x28},
- {OP_WR, NIG_REG_LLH0_ERROR_MASK, 0x0},
- {OP_WR, NIG_REG_LLH0_XCM_MASK, 0x4},
- {OP_WR, NIG_REG_LLH0_BRB1_NOT_MCP, 0x1},
- {OP_WR, NIG_REG_STATUS_INTERRUPT_PORT0, 0x0},
- {OP_WR_E1H, NIG_REG_LLH0_CLS_TYPE, 0x1},
- {OP_WR, NIG_REG_LLH0_XCM_INIT_CREDIT, 0x30},
- {OP_WR, NIG_REG_BRB0_PAUSE_IN_EN, 0x1},
- {OP_WR, NIG_REG_EGRESS_PBF0_IN_EN, 0x1},
- {OP_WR, NIG_REG_BRB0_OUT_EN, 0x1},
- {OP_WR, NIG_REG_XCM0_OUT_EN, 0x1},
-#define NIG_PORT0_END 433
-#define NIG_PORT1_START 433
- {OP_WR, NIG_REG_LLH1_CM_HEADER, 0x300000},
- {OP_WR, NIG_REG_LLH1_EVENT_ID, 0x28},
- {OP_WR, NIG_REG_LLH1_ERROR_MASK, 0x0},
- {OP_WR, NIG_REG_LLH1_XCM_MASK, 0x4},
- {OP_WR, NIG_REG_LLH1_BRB1_NOT_MCP, 0x1},
- {OP_WR, NIG_REG_STATUS_INTERRUPT_PORT1, 0x0},
- {OP_WR_E1H, NIG_REG_LLH1_CLS_TYPE, 0x1},
- {OP_WR, NIG_REG_LLH1_XCM_INIT_CREDIT, 0x30},
- {OP_WR, NIG_REG_BRB1_PAUSE_IN_EN, 0x1},
- {OP_WR, NIG_REG_EGRESS_PBF1_IN_EN, 0x1},
- {OP_WR, NIG_REG_BRB1_OUT_EN, 0x1},
- {OP_WR, NIG_REG_XCM1_OUT_EN, 0x1},
-#define NIG_PORT1_END 445
-#define UPB_COMMON_START 445
- {OP_WR, GRCBASE_UPB + PB_REG_CONTROL, 0x20},
-#define UPB_COMMON_END 446
-#define CSDM_COMMON_START 446
- {OP_WR_E1, CSDM_REG_CFC_RSP_START_ADDR, 0xa11},
- {OP_WR_E1H, CSDM_REG_CFC_RSP_START_ADDR, 0x211},
- {OP_WR_E1, CSDM_REG_CMP_COUNTER_START_ADDR, 0xa00},
- {OP_WR_E1H, CSDM_REG_CMP_COUNTER_START_ADDR, 0x200},
- {OP_WR_E1, CSDM_REG_Q_COUNTER_START_ADDR, 0xa04},
- {OP_WR_E1H, CSDM_REG_Q_COUNTER_START_ADDR, 0x204},
- {OP_WR, CSDM_REG_CMP_COUNTER_MAX0, 0xffff},
- {OP_WR, CSDM_REG_CMP_COUNTER_MAX1, 0xffff},
- {OP_WR, CSDM_REG_CMP_COUNTER_MAX2, 0xffff},
- {OP_WR, CSDM_REG_CMP_COUNTER_MAX3, 0xffff},
- {OP_WR, CSDM_REG_AGG_INT_EVENT_0, 0xc6},
- {OP_WR, CSDM_REG_AGG_INT_EVENT_1, 0x0},
- {OP_WR, CSDM_REG_AGG_INT_EVENT_2, 0x34},
- {OP_WR, CSDM_REG_AGG_INT_EVENT_3, 0x35},
- {OP_ZR, CSDM_REG_AGG_INT_EVENT_4, 0x1c},
- {OP_WR, CSDM_REG_AGG_INT_T_0, 0x1},
- {OP_ZR, CSDM_REG_AGG_INT_T_1, 0x5f},
- {OP_WR, CSDM_REG_ENABLE_IN1, 0x7ffffff},
- {OP_WR, CSDM_REG_ENABLE_IN2, 0x3f},
- {OP_WR, CSDM_REG_ENABLE_OUT1, 0x7ffffff},
- {OP_WR, CSDM_REG_ENABLE_OUT2, 0xf},
- {OP_RD, CSDM_REG_NUM_OF_Q0_CMD, 0x0},
- {OP_RD, CSDM_REG_NUM_OF_Q1_CMD, 0x0},
- {OP_RD, CSDM_REG_NUM_OF_Q3_CMD, 0x0},
- {OP_RD, CSDM_REG_NUM_OF_Q4_CMD, 0x0},
- {OP_RD, CSDM_REG_NUM_OF_Q5_CMD, 0x0},
- {OP_RD, CSDM_REG_NUM_OF_Q6_CMD, 0x0},
- {OP_RD, CSDM_REG_NUM_OF_Q7_CMD, 0x0},
- {OP_RD, CSDM_REG_NUM_OF_Q8_CMD, 0x0},
- {OP_RD, CSDM_REG_NUM_OF_Q9_CMD, 0x0},
- {OP_RD, CSDM_REG_NUM_OF_Q10_CMD, 0x0},
- {OP_RD, CSDM_REG_NUM_OF_Q11_CMD, 0x0},
- {OP_RD, CSDM_REG_NUM_OF_PKT_END_MSG, 0x0},
- {OP_RD, CSDM_REG_NUM_OF_PXP_ASYNC_REQ, 0x0},
- {OP_RD, CSDM_REG_NUM_OF_ACK_AFTER_PLACE, 0x0},
- {OP_WR_E1, CSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1},
- {OP_WR_ASIC, CSDM_REG_TIMER_TICK, 0x3e8},
- {OP_WR_EMUL, CSDM_REG_TIMER_TICK, 0x1},
- {OP_WR_FPGA, CSDM_REG_TIMER_TICK, 0xa},
-#define CSDM_COMMON_END 485
-#define USDM_COMMON_START 485
- {OP_WR_E1, USDM_REG_CFC_RSP_START_ADDR, 0xa11},
- {OP_WR_E1H, USDM_REG_CFC_RSP_START_ADDR, 0x411},
- {OP_WR_E1, USDM_REG_CMP_COUNTER_START_ADDR, 0xa00},
- {OP_WR_E1H, USDM_REG_CMP_COUNTER_START_ADDR, 0x400},
- {OP_WR_E1, USDM_REG_Q_COUNTER_START_ADDR, 0xa04},
- {OP_WR_E1H, USDM_REG_Q_COUNTER_START_ADDR, 0x404},
- {OP_WR_E1, USDM_REG_PCK_END_MSG_START_ADDR, 0xa21},
- {OP_WR_E1H, USDM_REG_PCK_END_MSG_START_ADDR, 0x421},
- {OP_WR, USDM_REG_CMP_COUNTER_MAX0, 0xffff},
- {OP_WR, USDM_REG_CMP_COUNTER_MAX1, 0xffff},
- {OP_WR, USDM_REG_CMP_COUNTER_MAX2, 0xffff},
- {OP_WR, USDM_REG_CMP_COUNTER_MAX3, 0xffff},
- {OP_WR, USDM_REG_AGG_INT_EVENT_0, 0x46},
- {OP_WR, USDM_REG_AGG_INT_EVENT_1, 0x5},
- {OP_WR, USDM_REG_AGG_INT_EVENT_2, 0x34},
- {OP_WR, USDM_REG_AGG_INT_EVENT_3, 0x35},
- {OP_ZR_E1, USDM_REG_AGG_INT_EVENT_4, 0x5c},
- {OP_WR_E1H, USDM_REG_AGG_INT_EVENT_4, 0x7},
- {OP_ZR_E1H, USDM_REG_AGG_INT_EVENT_5, 0x5b},
- {OP_WR, USDM_REG_AGG_INT_MODE_0, 0x1},
- {OP_ZR_E1, USDM_REG_AGG_INT_MODE_1, 0x1f},
- {OP_ZR_E1H, USDM_REG_AGG_INT_MODE_1, 0x3},
- {OP_WR_E1H, USDM_REG_AGG_INT_MODE_4, 0x1},
- {OP_ZR_E1H, USDM_REG_AGG_INT_MODE_5, 0x1b},
- {OP_WR, USDM_REG_ENABLE_IN1, 0x7ffffff},
- {OP_WR, USDM_REG_ENABLE_IN2, 0x3f},
- {OP_WR, USDM_REG_ENABLE_OUT1, 0x7ffffff},
- {OP_WR, USDM_REG_ENABLE_OUT2, 0xf},
- {OP_RD, USDM_REG_NUM_OF_Q0_CMD, 0x0},
- {OP_RD, USDM_REG_NUM_OF_Q1_CMD, 0x0},
- {OP_RD, USDM_REG_NUM_OF_Q2_CMD, 0x0},
- {OP_RD, USDM_REG_NUM_OF_Q3_CMD, 0x0},
- {OP_RD, USDM_REG_NUM_OF_Q4_CMD, 0x0},
- {OP_RD, USDM_REG_NUM_OF_Q5_CMD, 0x0},
- {OP_RD, USDM_REG_NUM_OF_Q6_CMD, 0x0},
- {OP_RD, USDM_REG_NUM_OF_Q7_CMD, 0x0},
- {OP_RD, USDM_REG_NUM_OF_Q8_CMD, 0x0},
- {OP_RD, USDM_REG_NUM_OF_Q9_CMD, 0x0},
- {OP_RD, USDM_REG_NUM_OF_Q10_CMD, 0x0},
- {OP_RD, USDM_REG_NUM_OF_Q11_CMD, 0x0},
- {OP_RD, USDM_REG_NUM_OF_PKT_END_MSG, 0x0},
- {OP_RD, USDM_REG_NUM_OF_PXP_ASYNC_REQ, 0x0},
- {OP_RD, USDM_REG_NUM_OF_ACK_AFTER_PLACE, 0x0},
- {OP_WR_E1, USDM_REG_INIT_CREDIT_PXP_CTRL, 0x1},
- {OP_WR_ASIC, USDM_REG_TIMER_TICK, 0x3e8},
- {OP_WR_EMUL, USDM_REG_TIMER_TICK, 0x1},
- {OP_WR_FPGA, USDM_REG_TIMER_TICK, 0xa},
-#define USDM_COMMON_END 532
-#define CCM_COMMON_START 532
- {OP_WR, CCM_REG_XX_OVFL_EVNT_ID, 0x32},
- {OP_WR, CCM_REG_CQM_CCM_HDR_P, 0x2150020},
- {OP_WR, CCM_REG_CQM_CCM_HDR_S, 0x2150020},
- {OP_WR, CCM_REG_ERR_CCM_HDR, 0x8100000},
- {OP_WR, CCM_REG_ERR_EVNT_ID, 0x33},
- {OP_WR, CCM_REG_STORM_WEIGHT, 0x2},
- {OP_WR, CCM_REG_TSEM_WEIGHT, 0x0},
- {OP_WR, CCM_REG_XSEM_WEIGHT, 0x5},
- {OP_WR, CCM_REG_USEM_WEIGHT, 0x5},
- {OP_ZR, CCM_REG_PBF_WEIGHT, 0x2},
- {OP_WR, CCM_REG_CSDM_WEIGHT, 0x2},
- {OP_WR, CCM_REG_CQM_P_WEIGHT, 0x3},
- {OP_WR, CCM_REG_CQM_S_WEIGHT, 0x2},
- {OP_WR, CCM_REG_CCM_CQM_USE_Q, 0x1},
- {OP_WR, CCM_REG_CNT_AUX1_Q, 0x2},
- {OP_WR, CCM_REG_CNT_AUX2_Q, 0x2},
- {OP_WR, CCM_REG_INV_DONE_Q, 0x1},
- {OP_WR, CCM_REG_GR_ARB_TYPE, 0x1},
- {OP_WR, CCM_REG_GR_LD0_PR, 0x1},
- {OP_WR, CCM_REG_GR_LD1_PR, 0x2},
- {OP_WR, CCM_REG_CFC_INIT_CRD, 0x1},
- {OP_WR, CCM_REG_CQM_INIT_CRD, 0x20},
- {OP_WR, CCM_REG_FIC0_INIT_CRD, 0x40},
- {OP_WR, CCM_REG_FIC1_INIT_CRD, 0x40},
- {OP_WR, CCM_REG_XX_INIT_CRD, 0x3},
- {OP_WR, CCM_REG_XX_MSG_NUM, 0x18},
- {OP_ZR, CCM_REG_XX_TABLE, 0x12},
- {OP_SW_E1, CCM_REG_XX_DESCR_TABLE, 0x240242},
- {OP_SW_E1H, CCM_REG_XX_DESCR_TABLE, 0x24027c},
- {OP_WR, CCM_REG_N_SM_CTX_LD_0, 0x1},
- {OP_WR, CCM_REG_N_SM_CTX_LD_1, 0x2},
- {OP_WR, CCM_REG_N_SM_CTX_LD_2, 0x8},
- {OP_WR, CCM_REG_N_SM_CTX_LD_3, 0x8},
- {OP_ZR, CCM_REG_N_SM_CTX_LD_4, 0x4},
- {OP_WR, CCM_REG_CCM_REG0_SZ, 0x4},
- {OP_WR_E1, CCM_REG_QOS_PHYS_QNUM0_0, 0x9},
- {OP_WR_E1, CCM_REG_QOS_PHYS_QNUM0_1, 0x29},
- {OP_WR_E1, CCM_REG_QOS_PHYS_QNUM1_0, 0xa},
- {OP_WR_E1, CCM_REG_QOS_PHYS_QNUM1_1, 0x2a},
- {OP_WR_E1, CCM_REG_QOS_PHYS_QNUM2_0, 0x7},
- {OP_WR_E1, CCM_REG_QOS_PHYS_QNUM2_1, 0x27},
- {OP_WR_E1, CCM_REG_QOS_PHYS_QNUM3_0, 0x7},
- {OP_WR_E1, CCM_REG_QOS_PHYS_QNUM3_1, 0x27},
- {OP_WR_E1, CCM_REG_PHYS_QNUM1_0, 0xc},
- {OP_WR_E1, CCM_REG_PHYS_QNUM1_1, 0x2c},
- {OP_WR_E1, CCM_REG_PHYS_QNUM2_0, 0xc},
- {OP_WR_E1, CCM_REG_PHYS_QNUM2_1, 0x2c},
- {OP_WR_E1, CCM_REG_PHYS_QNUM3_0, 0xc},
- {OP_WR_E1, CCM_REG_PHYS_QNUM3_1, 0x2c},
- {OP_WR, CCM_REG_CCM_STORM0_IFEN, 0x1},
- {OP_WR, CCM_REG_CCM_STORM1_IFEN, 0x1},
- {OP_WR, CCM_REG_CCM_CQM_IFEN, 0x1},
- {OP_WR, CCM_REG_STORM_CCM_IFEN, 0x1},
- {OP_WR, CCM_REG_CQM_CCM_IFEN, 0x1},
- {OP_WR, CCM_REG_CSDM_IFEN, 0x1},
- {OP_WR, CCM_REG_TSEM_IFEN, 0x1},
- {OP_WR, CCM_REG_XSEM_IFEN, 0x1},
- {OP_WR, CCM_REG_USEM_IFEN, 0x1},
- {OP_WR, CCM_REG_PBF_IFEN, 0x1},
- {OP_WR, CCM_REG_CDU_AG_WR_IFEN, 0x1},
- {OP_WR, CCM_REG_CDU_AG_RD_IFEN, 0x1},
- {OP_WR, CCM_REG_CDU_SM_WR_IFEN, 0x1},
- {OP_WR, CCM_REG_CDU_SM_RD_IFEN, 0x1},
- {OP_WR, CCM_REG_CCM_CFC_IFEN, 0x1},
-#define CCM_COMMON_END 596
-#define CCM_FUNC0_START 596
- {OP_WR_E1H, CCM_REG_QOS_PHYS_QNUM0_0, 0x9},
- {OP_WR_E1H, CCM_REG_QOS_PHYS_QNUM1_0, 0xa},
- {OP_WR_E1H, CCM_REG_QOS_PHYS_QNUM2_0, 0x7},
- {OP_WR_E1H, CCM_REG_QOS_PHYS_QNUM3_0, 0x7},
- {OP_WR_E1H, CCM_REG_PHYS_QNUM1_0, 0xc},
- {OP_WR_E1H, CCM_REG_PHYS_QNUM2_0, 0xb},
- {OP_WR_E1H, CCM_REG_PHYS_QNUM3_0, 0x7},
-#define CCM_FUNC0_END 603
-#define CCM_FUNC1_START 603
- {OP_WR_E1H, CCM_REG_QOS_PHYS_QNUM0_1, 0x29},
- {OP_WR_E1H, CCM_REG_QOS_PHYS_QNUM1_1, 0x2a},
- {OP_WR_E1H, CCM_REG_QOS_PHYS_QNUM2_1, 0x27},
- {OP_WR_E1H, CCM_REG_QOS_PHYS_QNUM3_1, 0x27},
- {OP_WR_E1H, CCM_REG_PHYS_QNUM1_1, 0x2c},
- {OP_WR_E1H, CCM_REG_PHYS_QNUM2_1, 0x2b},
- {OP_WR_E1H, CCM_REG_PHYS_QNUM3_1, 0x27},
-#define CCM_FUNC1_END 610
-#define CCM_FUNC2_START 610
- {OP_WR_E1H, CCM_REG_QOS_PHYS_QNUM0_0, 0x19},
- {OP_WR_E1H, CCM_REG_QOS_PHYS_QNUM1_0, 0x1a},
- {OP_WR_E1H, CCM_REG_QOS_PHYS_QNUM2_0, 0x17},
- {OP_WR_E1H, CCM_REG_QOS_PHYS_QNUM3_0, 0x17},
- {OP_WR_E1H, CCM_REG_PHYS_QNUM1_0, 0x1c},
- {OP_WR_E1H, CCM_REG_PHYS_QNUM2_0, 0x1b},
- {OP_WR_E1H, CCM_REG_PHYS_QNUM3_0, 0x17},
-#define CCM_FUNC2_END 617
-#define CCM_FUNC3_START 617
- {OP_WR_E1H, CCM_REG_QOS_PHYS_QNUM0_1, 0x39},
- {OP_WR_E1H, CCM_REG_QOS_PHYS_QNUM1_1, 0x3a},
- {OP_WR_E1H, CCM_REG_QOS_PHYS_QNUM2_1, 0x37},
- {OP_WR_E1H, CCM_REG_QOS_PHYS_QNUM3_1, 0x37},
- {OP_WR_E1H, CCM_REG_PHYS_QNUM1_1, 0x3c},
- {OP_WR_E1H, CCM_REG_PHYS_QNUM2_1, 0x3b},
- {OP_WR_E1H, CCM_REG_PHYS_QNUM3_1, 0x37},
-#define CCM_FUNC3_END 624
-#define CCM_FUNC4_START 624
- {OP_WR_E1H, CCM_REG_QOS_PHYS_QNUM0_0, 0x49},
- {OP_WR_E1H, CCM_REG_QOS_PHYS_QNUM1_0, 0x4a},
- {OP_WR_E1H, CCM_REG_QOS_PHYS_QNUM2_0, 0x47},
- {OP_WR_E1H, CCM_REG_QOS_PHYS_QNUM3_0, 0x47},
- {OP_WR_E1H, CCM_REG_PHYS_QNUM1_0, 0x4c},
- {OP_WR_E1H, CCM_REG_PHYS_QNUM2_0, 0x4b},
- {OP_WR_E1H, CCM_REG_PHYS_QNUM3_0, 0x47},
-#define CCM_FUNC4_END 631
-#define CCM_FUNC5_START 631
- {OP_WR_E1H, CCM_REG_QOS_PHYS_QNUM0_1, 0x69},
- {OP_WR_E1H, CCM_REG_QOS_PHYS_QNUM1_1, 0x6a},
- {OP_WR_E1H, CCM_REG_QOS_PHYS_QNUM2_1, 0x67},
- {OP_WR_E1H, CCM_REG_QOS_PHYS_QNUM3_1, 0x67},
- {OP_WR_E1H, CCM_REG_PHYS_QNUM1_1, 0x6c},
- {OP_WR_E1H, CCM_REG_PHYS_QNUM2_1, 0x6b},
- {OP_WR_E1H, CCM_REG_PHYS_QNUM3_1, 0x67},
-#define CCM_FUNC5_END 638
-#define CCM_FUNC6_START 638
- {OP_WR_E1H, CCM_REG_QOS_PHYS_QNUM0_0, 0x59},
- {OP_WR_E1H, CCM_REG_QOS_PHYS_QNUM1_0, 0x5a},
- {OP_WR_E1H, CCM_REG_QOS_PHYS_QNUM2_0, 0x57},
- {OP_WR_E1H, CCM_REG_QOS_PHYS_QNUM3_0, 0x57},
- {OP_WR_E1H, CCM_REG_PHYS_QNUM1_0, 0x5c},
- {OP_WR_E1H, CCM_REG_PHYS_QNUM2_0, 0x5b},
- {OP_WR_E1H, CCM_REG_PHYS_QNUM3_0, 0x57},
-#define CCM_FUNC6_END 645
-#define CCM_FUNC7_START 645
- {OP_WR_E1H, CCM_REG_QOS_PHYS_QNUM0_1, 0x79},
- {OP_WR_E1H, CCM_REG_QOS_PHYS_QNUM1_1, 0x7a},
- {OP_WR_E1H, CCM_REG_QOS_PHYS_QNUM2_1, 0x77},
- {OP_WR_E1H, CCM_REG_QOS_PHYS_QNUM3_1, 0x77},
- {OP_WR_E1H, CCM_REG_PHYS_QNUM1_1, 0x7c},
- {OP_WR_E1H, CCM_REG_PHYS_QNUM2_1, 0x7b},
- {OP_WR_E1H, CCM_REG_PHYS_QNUM3_1, 0x77},
-#define CCM_FUNC7_END 652
-#define UCM_COMMON_START 652
- {OP_WR, UCM_REG_XX_OVFL_EVNT_ID, 0x32},
- {OP_WR, UCM_REG_UQM_UCM_HDR_P, 0x2150020},
- {OP_WR, UCM_REG_UQM_UCM_HDR_S, 0x2150020},
- {OP_WR, UCM_REG_TM_UCM_HDR, 0x30},
- {OP_WR, UCM_REG_ERR_UCM_HDR, 0x8100000},
- {OP_WR, UCM_REG_ERR_EVNT_ID, 0x33},
- {OP_WR, UCM_REG_EXPR_EVNT_ID, 0x30},
- {OP_WR, UCM_REG_STOP_EVNT_ID, 0x31},
- {OP_WR, UCM_REG_STORM_WEIGHT, 0x2},
- {OP_WR, UCM_REG_TSEM_WEIGHT, 0x4},
- {OP_WR, UCM_REG_CSEM_WEIGHT, 0x0},
- {OP_WR, UCM_REG_XSEM_WEIGHT, 0x2},
- {OP_WR, UCM_REG_DORQ_WEIGHT, 0x2},
- {OP_WR, UCM_REG_CP_WEIGHT, 0x0},
- {OP_WR, UCM_REG_USDM_WEIGHT, 0x2},
- {OP_WR, UCM_REG_UQM_P_WEIGHT, 0x7},
- {OP_WR, UCM_REG_UQM_S_WEIGHT, 0x2},
- {OP_WR, UCM_REG_TM_WEIGHT, 0x2},
- {OP_WR, UCM_REG_UCM_UQM_USE_Q, 0x1},
- {OP_WR, UCM_REG_INV_CFLG_Q, 0x1},
- {OP_WR, UCM_REG_GR_ARB_TYPE, 0x1},
- {OP_WR, UCM_REG_GR_LD0_PR, 0x1},
- {OP_WR, UCM_REG_GR_LD1_PR, 0x2},
- {OP_WR, UCM_REG_CFC_INIT_CRD, 0x1},
- {OP_WR, UCM_REG_FIC0_INIT_CRD, 0x40},
- {OP_WR, UCM_REG_FIC1_INIT_CRD, 0x40},
- {OP_WR, UCM_REG_TM_INIT_CRD, 0x4},
- {OP_WR, UCM_REG_UQM_INIT_CRD, 0x20},
- {OP_WR, UCM_REG_XX_INIT_CRD, 0xe},
- {OP_WR, UCM_REG_XX_MSG_NUM, 0x1b},
- {OP_ZR, UCM_REG_XX_TABLE, 0x12},
- {OP_SW_E1, UCM_REG_XX_DESCR_TABLE, 0x1b0266},
- {OP_SW_E1H, UCM_REG_XX_DESCR_TABLE, 0x1b02a0},
- {OP_WR, UCM_REG_N_SM_CTX_LD_0, 0x10},
- {OP_WR, UCM_REG_N_SM_CTX_LD_1, 0x7},
- {OP_WR, UCM_REG_N_SM_CTX_LD_2, 0xf},
- {OP_WR, UCM_REG_N_SM_CTX_LD_3, 0x10},
- {OP_ZR_E1, UCM_REG_N_SM_CTX_LD_4, 0x4},
- {OP_WR_E1H, UCM_REG_N_SM_CTX_LD_4, 0xb},
- {OP_ZR_E1H, UCM_REG_N_SM_CTX_LD_5, 0x3},
- {OP_WR, UCM_REG_UCM_REG0_SZ, 0x3},
- {OP_WR_E1, UCM_REG_PHYS_QNUM0_0, 0xf},
- {OP_WR_E1, UCM_REG_PHYS_QNUM0_1, 0x2f},
- {OP_WR_E1, UCM_REG_PHYS_QNUM1_0, 0xe},
- {OP_WR_E1, UCM_REG_PHYS_QNUM1_1, 0x2e},
- {OP_WR, UCM_REG_UCM_STORM0_IFEN, 0x1},
- {OP_WR, UCM_REG_UCM_STORM1_IFEN, 0x1},
- {OP_WR, UCM_REG_UCM_UQM_IFEN, 0x1},
- {OP_WR, UCM_REG_STORM_UCM_IFEN, 0x1},
- {OP_WR, UCM_REG_UQM_UCM_IFEN, 0x1},
- {OP_WR, UCM_REG_USDM_IFEN, 0x1},
- {OP_WR, UCM_REG_TM_UCM_IFEN, 0x1},
- {OP_WR, UCM_REG_UCM_TM_IFEN, 0x1},
- {OP_WR, UCM_REG_TSEM_IFEN, 0x1},
- {OP_WR, UCM_REG_CSEM_IFEN, 0x1},
- {OP_WR, UCM_REG_XSEM_IFEN, 0x1},
- {OP_WR, UCM_REG_DORQ_IFEN, 0x1},
- {OP_WR, UCM_REG_CDU_AG_WR_IFEN, 0x1},
- {OP_WR, UCM_REG_CDU_AG_RD_IFEN, 0x1},
- {OP_WR, UCM_REG_CDU_SM_WR_IFEN, 0x1},
- {OP_WR, UCM_REG_CDU_SM_RD_IFEN, 0x1},
- {OP_WR, UCM_REG_UCM_CFC_IFEN, 0x1},
-#define UCM_COMMON_END 714
-#define UCM_FUNC0_START 714
- {OP_WR_E1H, UCM_REG_PHYS_QNUM0_0, 0xf},
- {OP_WR_E1H, UCM_REG_PHYS_QNUM1_0, 0xe},
- {OP_WR_E1H, UCM_REG_PHYS_QNUM2_0, 0x0},
- {OP_WR_E1H, UCM_REG_PHYS_QNUM3_0, 0x0},
-#define UCM_FUNC0_END 718
-#define UCM_FUNC1_START 718
- {OP_WR_E1H, UCM_REG_PHYS_QNUM0_1, 0x2f},
- {OP_WR_E1H, UCM_REG_PHYS_QNUM1_1, 0x2e},
- {OP_WR_E1H, UCM_REG_PHYS_QNUM2_1, 0x0},
- {OP_WR_E1H, UCM_REG_PHYS_QNUM3_1, 0x0},
-#define UCM_FUNC1_END 722
-#define UCM_FUNC2_START 722
- {OP_WR_E1H, UCM_REG_PHYS_QNUM0_0, 0x1f},
- {OP_WR_E1H, UCM_REG_PHYS_QNUM1_0, 0x1e},
- {OP_WR_E1H, UCM_REG_PHYS_QNUM2_0, 0x0},
- {OP_WR_E1H, UCM_REG_PHYS_QNUM3_0, 0x0},
-#define UCM_FUNC2_END 726
-#define UCM_FUNC3_START 726
- {OP_WR_E1H, UCM_REG_PHYS_QNUM0_1, 0x3f},
- {OP_WR_E1H, UCM_REG_PHYS_QNUM1_1, 0x3e},
- {OP_WR_E1H, UCM_REG_PHYS_QNUM2_1, 0x0},
- {OP_WR_E1H, UCM_REG_PHYS_QNUM3_1, 0x0},
-#define UCM_FUNC3_END 730
-#define UCM_FUNC4_START 730
- {OP_WR_E1H, UCM_REG_PHYS_QNUM0_0, 0x4f},
- {OP_WR_E1H, UCM_REG_PHYS_QNUM1_0, 0x4e},
- {OP_WR_E1H, UCM_REG_PHYS_QNUM2_0, 0x0},
- {OP_WR_E1H, UCM_REG_PHYS_QNUM3_0, 0x0},
-#define UCM_FUNC4_END 734
-#define UCM_FUNC5_START 734
- {OP_WR_E1H, UCM_REG_PHYS_QNUM0_1, 0x6f},
- {OP_WR_E1H, UCM_REG_PHYS_QNUM1_1, 0x6e},
- {OP_WR_E1H, UCM_REG_PHYS_QNUM2_1, 0x0},
- {OP_WR_E1H, UCM_REG_PHYS_QNUM3_1, 0x0},
-#define UCM_FUNC5_END 738
-#define UCM_FUNC6_START 738
- {OP_WR_E1H, UCM_REG_PHYS_QNUM0_0, 0x5f},
- {OP_WR_E1H, UCM_REG_PHYS_QNUM1_0, 0x5e},
- {OP_WR_E1H, UCM_REG_PHYS_QNUM2_0, 0x0},
- {OP_WR_E1H, UCM_REG_PHYS_QNUM3_0, 0x0},
-#define UCM_FUNC6_END 742
-#define UCM_FUNC7_START 742
- {OP_WR_E1H, UCM_REG_PHYS_QNUM0_1, 0x7f},
- {OP_WR_E1H, UCM_REG_PHYS_QNUM1_1, 0x7e},
- {OP_WR_E1H, UCM_REG_PHYS_QNUM2_1, 0x0},
- {OP_WR_E1H, UCM_REG_PHYS_QNUM3_1, 0x0},
-#define UCM_FUNC7_END 746
-#define USEM_COMMON_START 746
- {OP_RD, USEM_REG_MSG_NUM_FIC0, 0x0},
- {OP_RD, USEM_REG_MSG_NUM_FIC1, 0x0},
- {OP_RD, USEM_REG_MSG_NUM_FOC0, 0x0},
- {OP_RD, USEM_REG_MSG_NUM_FOC1, 0x0},
- {OP_RD, USEM_REG_MSG_NUM_FOC2, 0x0},
- {OP_RD, USEM_REG_MSG_NUM_FOC3, 0x0},
- {OP_WR, USEM_REG_ARB_ELEMENT0, 0x1},
- {OP_WR, USEM_REG_ARB_ELEMENT1, 0x2},
- {OP_WR, USEM_REG_ARB_ELEMENT2, 0x3},
- {OP_WR, USEM_REG_ARB_ELEMENT3, 0x0},
- {OP_WR, USEM_REG_ARB_ELEMENT4, 0x4},
- {OP_WR, USEM_REG_ARB_CYCLE_SIZE, 0x1},
- {OP_WR, USEM_REG_TS_0_AS, 0x0},
- {OP_WR, USEM_REG_TS_1_AS, 0x1},
- {OP_WR, USEM_REG_TS_2_AS, 0x4},
- {OP_WR, USEM_REG_TS_3_AS, 0x0},
- {OP_WR, USEM_REG_TS_4_AS, 0x1},
- {OP_WR, USEM_REG_TS_5_AS, 0x3},
- {OP_WR, USEM_REG_TS_6_AS, 0x0},
- {OP_WR, USEM_REG_TS_7_AS, 0x1},
- {OP_WR, USEM_REG_TS_8_AS, 0x4},
- {OP_WR, USEM_REG_TS_9_AS, 0x0},
- {OP_WR, USEM_REG_TS_10_AS, 0x1},
- {OP_WR, USEM_REG_TS_11_AS, 0x3},
- {OP_WR, USEM_REG_TS_12_AS, 0x0},
- {OP_WR, USEM_REG_TS_13_AS, 0x1},
- {OP_WR, USEM_REG_TS_14_AS, 0x4},
- {OP_WR, USEM_REG_TS_15_AS, 0x0},
- {OP_WR, USEM_REG_TS_16_AS, 0x4},
- {OP_WR, USEM_REG_TS_17_AS, 0x3},
- {OP_ZR, USEM_REG_TS_18_AS, 0x2},
- {OP_WR, USEM_REG_ENABLE_IN, 0x3fff},
- {OP_WR, USEM_REG_ENABLE_OUT, 0x3ff},
- {OP_WR, USEM_REG_FIC0_DISABLE, 0x0},
- {OP_WR, USEM_REG_FIC1_DISABLE, 0x0},
- {OP_WR, USEM_REG_PAS_DISABLE, 0x0},
- {OP_WR, USEM_REG_THREADS_LIST, 0xffff},
- {OP_ZR, USEM_REG_PASSIVE_BUFFER, 0x800},
- {OP_WR, USEM_REG_FAST_MEMORY + 0x18bc0, 0x1},
- {OP_WR, USEM_REG_FAST_MEMORY + 0x18000, 0x1a},
- {OP_WR, USEM_REG_FAST_MEMORY + 0x18040, 0x4e},
- {OP_WR, USEM_REG_FAST_MEMORY + 0x18080, 0x10},
- {OP_WR, USEM_REG_FAST_MEMORY + 0x180c0, 0x20},
- {OP_WR_ASIC, USEM_REG_FAST_MEMORY + 0x18300, 0x7a120},
- {OP_WR_EMUL, USEM_REG_FAST_MEMORY + 0x18300, 0x138},
- {OP_WR_FPGA, USEM_REG_FAST_MEMORY + 0x18300, 0x1388},
- {OP_WR, USEM_REG_FAST_MEMORY + 0x183c0, 0x1f4},
- {OP_WR_ASIC, USEM_REG_FAST_MEMORY + 0x18380, 0x1dcd6500},
- {OP_WR_EMUL, USEM_REG_FAST_MEMORY + 0x18380, 0x4c4b4},
- {OP_WR_FPGA, USEM_REG_FAST_MEMORY + 0x18380, 0x4c4b40},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x5000, 0xc2},
- {OP_WR_EMUL_E1H, USEM_REG_FAST_MEMORY + 0x11480, 0x0},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1020, 0xc8},
- {OP_WR_E1H, USEM_REG_FAST_MEMORY + 0x11480, 0x1},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1000, 0x2},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x2000, 0x102},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4640, 0x40},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x8980, 0xc8},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x57f0, 0x4},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x8960, 0x2},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x57d8, 0x5},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3228, 0x4},
- {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x57d8 + 0x14, 0x10281},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3200, 0x9},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1c60, 0x20},
- {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x3200 + 0x24, 0x102bb},
- {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x2830, 0x20282},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3180, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5000, 0x400},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4000, 0x2},
- {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x4000 + 0x8, 0x102bc},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4000 + 0xc, 0x3},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b68, 0x2},
- {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x6b68 + 0x8, 0x202bd},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b10, 0x2},
- {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x74c0, 0x202bf},
- {OP_WR, USEM_REG_FAST_MEMORY + 0x10800, 0x1000000},
- {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x10c00, 0x100284},
- {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x10c00, 0x1002c1},
- {OP_WR, USEM_REG_FAST_MEMORY + 0x10800, 0x0},
- {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x10c40, 0x100294},
- {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x10c40, 0x1002d1},
- {OP_ZP_E1, USEM_REG_INT_TABLE, 0xc30000},
- {OP_ZP_E1H, USEM_REG_INT_TABLE, 0xd20000},
- {OP_WR_64_E1, USEM_REG_INT_TABLE + 0x368, 0x1302a4},
- {OP_WR_64_E1H, USEM_REG_INT_TABLE + 0x3a8, 0xb02e1},
- {OP_ZP_E1, USEM_REG_PRAM, 0x314c0000},
- {OP_ZP_E1H, USEM_REG_PRAM, 0x31b60000},
- {OP_ZP_E1, USEM_REG_PRAM + 0x8000, 0x35ef0c53},
- {OP_ZP_E1H, USEM_REG_PRAM + 0x8000, 0x36500c6e},
- {OP_ZP_E1, USEM_REG_PRAM + 0x10000, 0x361319cf},
- {OP_ZP_E1H, USEM_REG_PRAM + 0x10000, 0x37591a02},
- {OP_ZP_E1, USEM_REG_PRAM + 0x18000, 0x7112754},
- {OP_ZP_E1H, USEM_REG_PRAM + 0x18000, 0x286127d9},
- {OP_WR_64_E1, USEM_REG_PRAM + 0x18ee0, 0x4e2402a6},
- {OP_WR_64_E1H, USEM_REG_PRAM + 0x1ff40, 0x401802e3},
-#define USEM_COMMON_END 842
-#define USEM_PORT0_START 842
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1400, 0xa0},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x9000, 0xa0},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1900, 0x10},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x9500, 0x40},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1980, 0x30},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x9700, 0x3c},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4740, 0xb4},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x2450, 0xb4},
- {OP_WR_E1, USEM_REG_FAST_MEMORY + 0x1d90, 0x0},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x2ad0, 0x2},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1b40, 0x4},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3080, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1b60, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x8000, 0x12c},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x5318, 0x98},
- {OP_WR_E1H, USEM_REG_FAST_MEMORY + 0x3238, 0x0},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5000, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5100, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5200, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5300, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5400, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5500, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5600, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5700, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5800, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5900, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5a00, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5b00, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5c00, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5d00, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5e00, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5f00, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b78, 0x52},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6e08, 0xc},
-#define USEM_PORT0_END 876
-#define USEM_PORT1_START 876
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1680, 0xa0},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x9280, 0xa0},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1940, 0x10},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x9600, 0x40},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1a40, 0x30},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x97f0, 0x3c},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4a10, 0xb4},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x2720, 0xb4},
- {OP_WR_E1, USEM_REG_FAST_MEMORY + 0x1d94, 0x0},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x2ad8, 0x2},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1b50, 0x4},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3100, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1be0, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x84b0, 0x12c},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x5578, 0x98},
- {OP_WR_E1H, USEM_REG_FAST_MEMORY + 0x323c, 0x0},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5080, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5180, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5280, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5380, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5480, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5580, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5680, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5780, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5880, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5980, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5a80, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5b80, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5c80, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5d80, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5e80, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5f80, 0x20},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6cc0, 0x52},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6e38, 0xc},
-#define USEM_PORT1_END 910
-#define USEM_FUNC0_START 910
- {OP_WR_E1H, USEM_REG_FAST_MEMORY + 0x2a30, 0x0},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3000, 0x4},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4018, 0x2},
-#define USEM_FUNC0_END 913
-#define USEM_FUNC1_START 913
- {OP_WR_E1H, USEM_REG_FAST_MEMORY + 0x2a34, 0x0},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3010, 0x4},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4028, 0x2},
-#define USEM_FUNC1_END 916
-#define USEM_FUNC2_START 916
- {OP_WR_E1H, USEM_REG_FAST_MEMORY + 0x2a38, 0x0},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3020, 0x4},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4038, 0x2},
-#define USEM_FUNC2_END 919
-#define USEM_FUNC3_START 919
- {OP_WR_E1H, USEM_REG_FAST_MEMORY + 0x2a3c, 0x0},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3030, 0x4},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4048, 0x2},
-#define USEM_FUNC3_END 922
-#define USEM_FUNC4_START 922
- {OP_WR_E1H, USEM_REG_FAST_MEMORY + 0x2a40, 0x0},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3040, 0x4},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4058, 0x2},
-#define USEM_FUNC4_END 925
-#define USEM_FUNC5_START 925
- {OP_WR_E1H, USEM_REG_FAST_MEMORY + 0x2a44, 0x0},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3050, 0x4},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4068, 0x2},
-#define USEM_FUNC5_END 928
-#define USEM_FUNC6_START 928
- {OP_WR_E1H, USEM_REG_FAST_MEMORY + 0x2a48, 0x0},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3060, 0x4},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4078, 0x2},
-#define USEM_FUNC6_END 931
-#define USEM_FUNC7_START 931
- {OP_WR_E1H, USEM_REG_FAST_MEMORY + 0x2a4c, 0x0},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3070, 0x4},
- {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4088, 0x2},
-#define USEM_FUNC7_END 934
-#define CSEM_COMMON_START 934
- {OP_RD, CSEM_REG_MSG_NUM_FIC0, 0x0},
- {OP_RD, CSEM_REG_MSG_NUM_FIC1, 0x0},
- {OP_RD, CSEM_REG_MSG_NUM_FOC0, 0x0},
- {OP_RD, CSEM_REG_MSG_NUM_FOC1, 0x0},
- {OP_RD, CSEM_REG_MSG_NUM_FOC2, 0x0},
- {OP_RD, CSEM_REG_MSG_NUM_FOC3, 0x0},
- {OP_WR, CSEM_REG_ARB_ELEMENT0, 0x1},
- {OP_WR, CSEM_REG_ARB_ELEMENT1, 0x2},
- {OP_WR, CSEM_REG_ARB_ELEMENT2, 0x3},
- {OP_WR, CSEM_REG_ARB_ELEMENT3, 0x0},
- {OP_WR, CSEM_REG_ARB_ELEMENT4, 0x4},
- {OP_WR, CSEM_REG_ARB_CYCLE_SIZE, 0x1},
- {OP_WR, CSEM_REG_TS_0_AS, 0x0},
- {OP_WR, CSEM_REG_TS_1_AS, 0x1},
- {OP_WR, CSEM_REG_TS_2_AS, 0x4},
- {OP_WR, CSEM_REG_TS_3_AS, 0x0},
- {OP_WR, CSEM_REG_TS_4_AS, 0x1},
- {OP_WR, CSEM_REG_TS_5_AS, 0x3},
- {OP_WR, CSEM_REG_TS_6_AS, 0x0},
- {OP_WR, CSEM_REG_TS_7_AS, 0x1},
- {OP_WR, CSEM_REG_TS_8_AS, 0x4},
- {OP_WR, CSEM_REG_TS_9_AS, 0x0},
- {OP_WR, CSEM_REG_TS_10_AS, 0x1},
- {OP_WR, CSEM_REG_TS_11_AS, 0x3},
- {OP_WR, CSEM_REG_TS_12_AS, 0x0},
- {OP_WR, CSEM_REG_TS_13_AS, 0x1},
- {OP_WR, CSEM_REG_TS_14_AS, 0x4},
- {OP_WR, CSEM_REG_TS_15_AS, 0x0},
- {OP_WR, CSEM_REG_TS_16_AS, 0x4},
- {OP_WR, CSEM_REG_TS_17_AS, 0x3},
- {OP_ZR, CSEM_REG_TS_18_AS, 0x2},
- {OP_WR, CSEM_REG_ENABLE_IN, 0x3fff},
- {OP_WR, CSEM_REG_ENABLE_OUT, 0x3ff},
- {OP_WR, CSEM_REG_FIC0_DISABLE, 0x0},
- {OP_WR, CSEM_REG_FIC1_DISABLE, 0x0},
- {OP_WR, CSEM_REG_PAS_DISABLE, 0x0},
- {OP_WR, CSEM_REG_THREADS_LIST, 0xffff},
- {OP_ZR, CSEM_REG_PASSIVE_BUFFER, 0x800},
- {OP_WR, CSEM_REG_FAST_MEMORY + 0x18bc0, 0x1},
- {OP_WR, CSEM_REG_FAST_MEMORY + 0x18000, 0x10},
- {OP_WR, CSEM_REG_FAST_MEMORY + 0x18040, 0x12},
- {OP_WR, CSEM_REG_FAST_MEMORY + 0x18080, 0x30},
- {OP_WR, CSEM_REG_FAST_MEMORY + 0x180c0, 0xe},
- {OP_WR, CSEM_REG_FAST_MEMORY + 0x183c0, 0x1f4},
- {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x5000, 0x42},
- {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x11480, 0x1},
- {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1020, 0xc8},
- {OP_WR_EMUL_E1H, CSEM_REG_FAST_MEMORY + 0x11480, 0x0},
- {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1000, 0x2},
- {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x1000, 0x42},
- {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x2000, 0xc0},
- {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x7020, 0xc8},
- {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x3070, 0x80},
- {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x7000, 0x2},
- {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x4280, 0x4},
- {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x11e8, 0x0},
- {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x25c0, 0x240},
- {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3000, 0xc0},
- {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x2ec8, 0x802a8},
- {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x4070, 0x80},
- {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x5280, 0x4},
- {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6700, 0x100},
- {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x9000, 0x400},
- {OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x6b08, 0x2002e5},
- {OP_WR, CSEM_REG_FAST_MEMORY + 0x10800, 0x13fffff},
- {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x10c00, 0x1002b0},
- {OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x10c00, 0x100305},
- {OP_WR, CSEM_REG_FAST_MEMORY + 0x10800, 0x0},
- {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x10c40, 0x1002c0},
- {OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x10c40, 0x100315},
- {OP_ZP_E1, CSEM_REG_INT_TABLE, 0x710000},
- {OP_ZP_E1H, CSEM_REG_INT_TABLE, 0x740000},
- {OP_WR_64_E1, CSEM_REG_INT_TABLE + 0x380, 0x1002d0},
- {OP_WR_64_E1H, CSEM_REG_INT_TABLE + 0x380, 0x100325},
- {OP_ZP_E1, CSEM_REG_PRAM, 0x32290000},
- {OP_ZP_E1H, CSEM_REG_PRAM, 0x32260000},
- {OP_ZP_E1, CSEM_REG_PRAM + 0x8000, 0x23630c8b},
- {OP_ZP_E1H, CSEM_REG_PRAM + 0x8000, 0x246e0c8a},
- {OP_WR_64_E1, CSEM_REG_PRAM + 0xc930, 0x654002d2},
- {OP_WR_64_E1H, CSEM_REG_PRAM + 0xcbb0, 0x64f00327},
-#define CSEM_COMMON_END 1014
-#define CSEM_PORT0_START 1014
- {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1400, 0xa0},
- {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x8000, 0xa0},
- {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1900, 0x10},
- {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x8500, 0x40},
- {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1980, 0x30},
- {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x8700, 0x3c},
- {OP_WR_E1, CSEM_REG_FAST_MEMORY + 0x5118, 0x0},
- {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x4040, 0x6},
- {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x2300, 0xe},
- {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x3040, 0x6},
- {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x2410, 0x30},
-#define CSEM_PORT0_END 1025
-#define CSEM_PORT1_START 1025
- {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1680, 0xa0},
- {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x8280, 0xa0},
- {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1940, 0x10},
- {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x8600, 0x40},
- {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1a40, 0x30},
- {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x87f0, 0x3c},
- {OP_WR_E1, CSEM_REG_FAST_MEMORY + 0x511c, 0x0},
- {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x4058, 0x6},
- {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x2338, 0xe},
- {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x3058, 0x6},
- {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x24d0, 0x30},
-#define CSEM_PORT1_END 1036
-#define CSEM_FUNC0_START 1036
- {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1148, 0x0},
- {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3300, 0x2},
- {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6040, 0x30},
-#define CSEM_FUNC0_END 1039
-#define CSEM_FUNC1_START 1039
- {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x114c, 0x0},
- {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3308, 0x2},
- {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6100, 0x30},
-#define CSEM_FUNC1_END 1042
-#define CSEM_FUNC2_START 1042
- {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1150, 0x0},
- {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3310, 0x2},
- {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x61c0, 0x30},
-#define CSEM_FUNC2_END 1045
-#define CSEM_FUNC3_START 1045
- {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1154, 0x0},
- {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3318, 0x2},
- {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6280, 0x30},
-#define CSEM_FUNC3_END 1048
-#define CSEM_FUNC4_START 1048
- {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1158, 0x0},
- {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3320, 0x2},
- {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6340, 0x30},
-#define CSEM_FUNC4_END 1051
-#define CSEM_FUNC5_START 1051
- {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x115c, 0x0},
- {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3328, 0x2},
- {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6400, 0x30},
-#define CSEM_FUNC5_END 1054
-#define CSEM_FUNC6_START 1054
- {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1160, 0x0},
- {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3330, 0x2},
- {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x64c0, 0x30},
-#define CSEM_FUNC6_END 1057
-#define CSEM_FUNC7_START 1057
- {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1164, 0x0},
- {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3338, 0x2},
- {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6580, 0x30},
-#define CSEM_FUNC7_END 1060
-#define XPB_COMMON_START 1060
- {OP_WR, GRCBASE_XPB + PB_REG_CONTROL, 0x20},
-#define XPB_COMMON_END 1061
-#define DQ_COMMON_START 1061
- {OP_WR, DORQ_REG_MODE_ACT, 0x2},
- {OP_WR, DORQ_REG_NORM_CID_OFST, 0x3},
- {OP_WR, DORQ_REG_OUTST_REQ, 0x4},
- {OP_WR, DORQ_REG_DPM_CID_ADDR, 0x8},
- {OP_WR, DORQ_REG_RSP_INIT_CRD, 0x2},
- {OP_WR, DORQ_REG_NORM_CMHEAD_TX, 0x90},
- {OP_WR, DORQ_REG_CMHEAD_RX, 0x90},
- {OP_WR, DORQ_REG_SHRT_CMHEAD, 0x800090},
- {OP_WR, DORQ_REG_ERR_CMHEAD, 0x8140000},
- {OP_WR, DORQ_REG_AGG_CMD0, 0x8a},
- {OP_WR, DORQ_REG_AGG_CMD1, 0x80},
- {OP_WR, DORQ_REG_AGG_CMD2, 0x90},
- {OP_WR, DORQ_REG_AGG_CMD3, 0x80},
- {OP_WR, DORQ_REG_SHRT_ACT_CNT, 0x6},
- {OP_WR, DORQ_REG_DQ_FIFO_FULL_TH, 0x7d0},
- {OP_WR, DORQ_REG_DQ_FIFO_AFULL_TH, 0x76c},
- {OP_WR, DORQ_REG_REGN, 0x7c1004},
- {OP_WR, DORQ_REG_IF_EN, 0xf},
-#define DQ_COMMON_END 1079
-#define TIMERS_COMMON_START 1079
- {OP_ZR, TM_REG_CLIN_PRIOR0_CLIENT, 0x2},
- {OP_WR, TM_REG_LIN_SETCLR_FIFO_ALFULL_THR, 0x1c},
- {OP_WR, TM_REG_CFC_AC_CRDCNT_VAL, 0x1},
- {OP_WR, TM_REG_CFC_CLD_CRDCNT_VAL, 0x1},
- {OP_WR, TM_REG_CLOUT_CRDCNT0_VAL, 0x1},
- {OP_WR, TM_REG_CLOUT_CRDCNT1_VAL, 0x1},
- {OP_WR, TM_REG_CLOUT_CRDCNT2_VAL, 0x1},
- {OP_WR, TM_REG_EXP_CRDCNT_VAL, 0x1},
- {OP_WR_E1, TM_REG_PCIARB_CRDCNT_VAL, 0x1},
- {OP_WR_E1H, TM_REG_PCIARB_CRDCNT_VAL, 0x2},
- {OP_WR_ASIC, TM_REG_TIMER_TICK_SIZE, 0x3d090},
- {OP_WR_EMUL, TM_REG_TIMER_TICK_SIZE, 0x9c},
- {OP_WR_FPGA, TM_REG_TIMER_TICK_SIZE, 0x9c4},
- {OP_WR, TM_REG_CL0_CONT_REGION, 0x8},
- {OP_WR, TM_REG_CL1_CONT_REGION, 0xc},
- {OP_WR, TM_REG_CL2_CONT_REGION, 0x10},
- {OP_WR, TM_REG_TM_CONTEXT_REGION, 0x20},
- {OP_WR, TM_REG_EN_TIMERS, 0x1},
- {OP_WR, TM_REG_EN_REAL_TIME_CNT, 0x1},
- {OP_WR, TM_REG_EN_CL0_INPUT, 0x1},
- {OP_WR, TM_REG_EN_CL1_INPUT, 0x1},
- {OP_WR, TM_REG_EN_CL2_INPUT, 0x1},
-#define TIMERS_COMMON_END 1101
-#define TIMERS_PORT0_START 1101
- {OP_WR, TM_REG_LIN0_LOGIC_ADDR, 0x0},
- {OP_WR, TM_REG_LIN0_PHY_ADDR_VALID, 0x0},
- {OP_ZR, TM_REG_LIN0_PHY_ADDR, 0x2},
-#define TIMERS_PORT0_END 1104
-#define TIMERS_PORT1_START 1104
- {OP_WR, TM_REG_LIN1_LOGIC_ADDR, 0x0},
- {OP_WR, TM_REG_LIN1_PHY_ADDR_VALID, 0x0},
- {OP_ZR, TM_REG_LIN1_PHY_ADDR, 0x2},
-#define TIMERS_PORT1_END 1107
-#define XSDM_COMMON_START 1107
- {OP_WR_E1, XSDM_REG_CFC_RSP_START_ADDR, 0x614},
- {OP_WR_E1H, XSDM_REG_CFC_RSP_START_ADDR, 0x424},
- {OP_WR_E1, XSDM_REG_CMP_COUNTER_START_ADDR, 0x600},
- {OP_WR_E1H, XSDM_REG_CMP_COUNTER_START_ADDR, 0x410},
- {OP_WR_E1, XSDM_REG_Q_COUNTER_START_ADDR, 0x604},
- {OP_WR_E1H, XSDM_REG_Q_COUNTER_START_ADDR, 0x414},
- {OP_WR, XSDM_REG_CMP_COUNTER_MAX0, 0xffff},
- {OP_WR, XSDM_REG_CMP_COUNTER_MAX1, 0xffff},
- {OP_WR, XSDM_REG_CMP_COUNTER_MAX2, 0xffff},
- {OP_WR, XSDM_REG_CMP_COUNTER_MAX3, 0xffff},
- {OP_WR, XSDM_REG_AGG_INT_EVENT_0, 0x20},
- {OP_WR, XSDM_REG_AGG_INT_EVENT_1, 0x20},
- {OP_WR, XSDM_REG_AGG_INT_EVENT_2, 0x34},
- {OP_WR, XSDM_REG_AGG_INT_EVENT_3, 0x35},
- {OP_WR, XSDM_REG_AGG_INT_EVENT_4, 0x23},
- {OP_WR, XSDM_REG_AGG_INT_EVENT_5, 0x24},
- {OP_WR, XSDM_REG_AGG_INT_EVENT_6, 0x25},
- {OP_WR, XSDM_REG_AGG_INT_EVENT_7, 0x26},
- {OP_WR, XSDM_REG_AGG_INT_EVENT_8, 0x27},
- {OP_WR, XSDM_REG_AGG_INT_EVENT_9, 0x29},
- {OP_WR, XSDM_REG_AGG_INT_EVENT_10, 0x2a},
- {OP_WR, XSDM_REG_AGG_INT_EVENT_11, 0x2b},
- {OP_WR, XSDM_REG_AGG_INT_EVENT_12, 0x2c},
- {OP_WR, XSDM_REG_AGG_INT_EVENT_13, 0x2d},
- {OP_ZR, XSDM_REG_AGG_INT_EVENT_14, 0x52},
- {OP_WR, XSDM_REG_AGG_INT_MODE_0, 0x1},
- {OP_ZR, XSDM_REG_AGG_INT_MODE_1, 0x1f},
- {OP_WR, XSDM_REG_ENABLE_IN1, 0x7ffffff},
- {OP_WR, XSDM_REG_ENABLE_IN2, 0x3f},
- {OP_WR, XSDM_REG_ENABLE_OUT1, 0x7ffffff},
- {OP_WR, XSDM_REG_ENABLE_OUT2, 0xf},
- {OP_RD, XSDM_REG_NUM_OF_Q0_CMD, 0x0},
- {OP_RD, XSDM_REG_NUM_OF_Q1_CMD, 0x0},
- {OP_RD, XSDM_REG_NUM_OF_Q3_CMD, 0x0},
- {OP_RD, XSDM_REG_NUM_OF_Q4_CMD, 0x0},
- {OP_RD, XSDM_REG_NUM_OF_Q5_CMD, 0x0},
- {OP_RD, XSDM_REG_NUM_OF_Q6_CMD, 0x0},
- {OP_RD, XSDM_REG_NUM_OF_Q7_CMD, 0x0},
- {OP_RD, XSDM_REG_NUM_OF_Q8_CMD, 0x0},
- {OP_RD, XSDM_REG_NUM_OF_Q9_CMD, 0x0},
- {OP_RD, XSDM_REG_NUM_OF_Q10_CMD, 0x0},
- {OP_RD, XSDM_REG_NUM_OF_Q11_CMD, 0x0},
- {OP_RD, XSDM_REG_NUM_OF_PKT_END_MSG, 0x0},
- {OP_RD, XSDM_REG_NUM_OF_PXP_ASYNC_REQ, 0x0},
- {OP_RD, XSDM_REG_NUM_OF_ACK_AFTER_PLACE, 0x0},
- {OP_WR_E1, XSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1},
- {OP_WR_ASIC, XSDM_REG_TIMER_TICK, 0x3e8},
- {OP_WR_EMUL, XSDM_REG_TIMER_TICK, 0x1},
- {OP_WR_FPGA, XSDM_REG_TIMER_TICK, 0xa},
-#define XSDM_COMMON_END 1156
-#define QM_COMMON_START 1156
- {OP_WR, QM_REG_ACTCTRINITVAL_0, 0x6},
- {OP_WR, QM_REG_ACTCTRINITVAL_1, 0x5},
- {OP_WR, QM_REG_ACTCTRINITVAL_2, 0xa},
- {OP_WR, QM_REG_ACTCTRINITVAL_3, 0x5},
- {OP_WR, QM_REG_PCIREQAT, 0x2},
- {OP_WR, QM_REG_CMINITCRD_0, 0x4},
- {OP_WR, QM_REG_CMINITCRD_1, 0x4},
- {OP_WR, QM_REG_CMINITCRD_2, 0x4},
- {OP_WR, QM_REG_CMINITCRD_3, 0x4},
- {OP_WR, QM_REG_CMINITCRD_4, 0x4},
- {OP_WR, QM_REG_CMINITCRD_5, 0x4},
- {OP_WR, QM_REG_CMINITCRD_6, 0x4},
- {OP_WR, QM_REG_CMINITCRD_7, 0x4},
- {OP_WR, QM_REG_OUTLDREQ, 0x4},
- {OP_WR, QM_REG_CTXREG_0, 0x7c},
- {OP_WR, QM_REG_CTXREG_1, 0x3d},
- {OP_WR, QM_REG_CTXREG_2, 0x3f},
- {OP_WR, QM_REG_CTXREG_3, 0x9c},
- {OP_WR, QM_REG_ENSEC, 0x7},
- {OP_ZR, QM_REG_QVOQIDX_0, 0x5},
- {OP_WR, QM_REG_WRRWEIGHTS_0, 0x1010101},
- {OP_WR, QM_REG_QVOQIDX_5, 0x0},
- {OP_WR, QM_REG_QVOQIDX_6, 0x4},
- {OP_WR, QM_REG_QVOQIDX_7, 0x4},
- {OP_WR, QM_REG_QVOQIDX_8, 0x2},
- {OP_WR, QM_REG_WRRWEIGHTS_1, 0x8012004},
- {OP_WR, QM_REG_QVOQIDX_9, 0x5},
- {OP_WR, QM_REG_QVOQIDX_10, 0x5},
- {OP_WR, QM_REG_QVOQIDX_11, 0x5},
- {OP_WR, QM_REG_QVOQIDX_12, 0x5},
- {OP_WR, QM_REG_WRRWEIGHTS_2, 0x20081001},
- {OP_WR, QM_REG_QVOQIDX_13, 0x8},
- {OP_WR, QM_REG_QVOQIDX_14, 0x6},
- {OP_WR, QM_REG_QVOQIDX_15, 0x7},
- {OP_WR, QM_REG_QVOQIDX_16, 0x0},
- {OP_WR, QM_REG_WRRWEIGHTS_3, 0x1010120},
- {OP_ZR, QM_REG_QVOQIDX_17, 0x4},
- {OP_WR, QM_REG_WRRWEIGHTS_4, 0x1010101},
- {OP_ZR_E1, QM_REG_QVOQIDX_21, 0x4},
- {OP_WR_E1H, QM_REG_QVOQIDX_21, 0x0},
- {OP_WR_E1, QM_REG_WRRWEIGHTS_5, 0x1010101},
- {OP_WR_E1H, QM_REG_QVOQIDX_22, 0x4},
- {OP_ZR_E1, QM_REG_QVOQIDX_25, 0x4},
- {OP_WR_E1H, QM_REG_QVOQIDX_23, 0x4},
- {OP_WR_E1, QM_REG_WRRWEIGHTS_6, 0x1010101},
- {OP_WR_E1H, QM_REG_QVOQIDX_24, 0x2},
- {OP_ZR_E1, QM_REG_QVOQIDX_29, 0x3},
- {OP_WR_E1H, QM_REG_WRRWEIGHTS_5, 0x8012004},
- {OP_WR_E1H, QM_REG_QVOQIDX_25, 0x5},
- {OP_WR_E1H, QM_REG_QVOQIDX_26, 0x5},
- {OP_WR_E1H, QM_REG_QVOQIDX_27, 0x5},
- {OP_WR_E1H, QM_REG_QVOQIDX_28, 0x5},
- {OP_WR_E1H, QM_REG_WRRWEIGHTS_6, 0x20081001},
- {OP_WR_E1H, QM_REG_QVOQIDX_29, 0x8},
- {OP_WR_E1H, QM_REG_QVOQIDX_30, 0x6},
- {OP_WR_E1H, QM_REG_QVOQIDX_31, 0x7},
- {OP_WR, QM_REG_QVOQIDX_32, 0x1},
- {OP_WR_E1, QM_REG_WRRWEIGHTS_7, 0x1010101},
- {OP_WR_E1H, QM_REG_WRRWEIGHTS_7, 0x1010120},
- {OP_WR, QM_REG_QVOQIDX_33, 0x1},
- {OP_WR, QM_REG_QVOQIDX_34, 0x1},
- {OP_WR, QM_REG_QVOQIDX_35, 0x1},
- {OP_WR, QM_REG_QVOQIDX_36, 0x1},
- {OP_WR, QM_REG_WRRWEIGHTS_8, 0x1010101},
- {OP_WR, QM_REG_QVOQIDX_37, 0x1},
- {OP_WR, QM_REG_QVOQIDX_38, 0x4},
- {OP_WR, QM_REG_QVOQIDX_39, 0x4},
- {OP_WR, QM_REG_QVOQIDX_40, 0x2},
- {OP_WR, QM_REG_WRRWEIGHTS_9, 0x8012004},
- {OP_WR, QM_REG_QVOQIDX_41, 0x5},
- {OP_WR, QM_REG_QVOQIDX_42, 0x5},
- {OP_WR, QM_REG_QVOQIDX_43, 0x5},
- {OP_WR, QM_REG_QVOQIDX_44, 0x5},
- {OP_WR, QM_REG_WRRWEIGHTS_10, 0x20081001},
- {OP_WR, QM_REG_QVOQIDX_45, 0x8},
- {OP_WR, QM_REG_QVOQIDX_46, 0x6},
- {OP_WR, QM_REG_QVOQIDX_47, 0x7},
- {OP_WR, QM_REG_QVOQIDX_48, 0x1},
- {OP_WR, QM_REG_WRRWEIGHTS_11, 0x1010120},
- {OP_WR, QM_REG_QVOQIDX_49, 0x1},
- {OP_WR, QM_REG_QVOQIDX_50, 0x1},
- {OP_WR, QM_REG_QVOQIDX_51, 0x1},
- {OP_WR, QM_REG_QVOQIDX_52, 0x1},
- {OP_WR, QM_REG_WRRWEIGHTS_12, 0x1010101},
- {OP_WR, QM_REG_QVOQIDX_53, 0x1},
- {OP_WR_E1, QM_REG_QVOQIDX_54, 0x1},
- {OP_WR_E1H, QM_REG_QVOQIDX_54, 0x4},
- {OP_WR_E1, QM_REG_QVOQIDX_55, 0x1},
- {OP_WR_E1H, QM_REG_QVOQIDX_55, 0x4},
- {OP_WR_E1, QM_REG_QVOQIDX_56, 0x1},
- {OP_WR_E1H, QM_REG_QVOQIDX_56, 0x2},
- {OP_WR_E1, QM_REG_WRRWEIGHTS_13, 0x1010101},
- {OP_WR_E1H, QM_REG_WRRWEIGHTS_13, 0x8012004},
- {OP_WR_E1, QM_REG_QVOQIDX_57, 0x1},
- {OP_WR_E1H, QM_REG_QVOQIDX_57, 0x5},
- {OP_WR_E1, QM_REG_QVOQIDX_58, 0x1},
- {OP_WR_E1H, QM_REG_QVOQIDX_58, 0x5},
- {OP_WR_E1, QM_REG_QVOQIDX_59, 0x1},
- {OP_WR_E1H, QM_REG_QVOQIDX_59, 0x5},
- {OP_WR_E1, QM_REG_QVOQIDX_60, 0x1},
- {OP_WR_E1H, QM_REG_QVOQIDX_60, 0x5},
- {OP_WR_E1, QM_REG_WRRWEIGHTS_14, 0x1010101},
- {OP_WR_E1H, QM_REG_WRRWEIGHTS_14, 0x20081001},
- {OP_WR_E1, QM_REG_QVOQIDX_61, 0x1},
- {OP_WR_E1H, QM_REG_QVOQIDX_61, 0x8},
- {OP_WR_E1, QM_REG_QVOQIDX_62, 0x1},
- {OP_WR_E1H, QM_REG_QVOQIDX_62, 0x6},
- {OP_WR_E1, QM_REG_QVOQIDX_63, 0x1},
- {OP_WR_E1H, QM_REG_QVOQIDX_63, 0x7},
- {OP_WR_E1, QM_REG_WRRWEIGHTS_15, 0x1010101},
- {OP_WR_E1H, QM_REG_QVOQIDX_64, 0x0},
- {OP_WR_E1, QM_REG_VOQQMASK_0_LSB, 0xffff003f},
- {OP_WR_E1H, QM_REG_WRRWEIGHTS_15, 0x1010120},
- {OP_ZR_E1, QM_REG_VOQQMASK_0_MSB, 0x2},
- {OP_ZR_E1H, QM_REG_QVOQIDX_65, 0x4},
- {OP_WR_E1, QM_REG_VOQQMASK_1_MSB, 0xffff003f},
- {OP_WR_E1H, QM_REG_WRRWEIGHTS_16, 0x1010101},
- {OP_WR_E1, QM_REG_VOQQMASK_2_LSB, 0x100},
- {OP_WR_E1H, QM_REG_QVOQIDX_69, 0x0},
- {OP_WR_E1, QM_REG_VOQQMASK_2_MSB, 0x100},
- {OP_WR_E1H, QM_REG_QVOQIDX_70, 0x4},
- {OP_WR_E1H, QM_REG_QVOQIDX_71, 0x4},
- {OP_WR_E1H, QM_REG_QVOQIDX_72, 0x2},
- {OP_WR_E1H, QM_REG_WRRWEIGHTS_17, 0x8012004},
- {OP_WR_E1H, QM_REG_QVOQIDX_73, 0x5},
- {OP_WR_E1H, QM_REG_QVOQIDX_74, 0x5},
- {OP_WR_E1H, QM_REG_QVOQIDX_75, 0x5},
- {OP_WR_E1H, QM_REG_QVOQIDX_76, 0x5},
- {OP_WR_E1H, QM_REG_WRRWEIGHTS_18, 0x20081001},
- {OP_WR_E1H, QM_REG_QVOQIDX_77, 0x8},
- {OP_WR_E1H, QM_REG_QVOQIDX_78, 0x6},
- {OP_WR_E1H, QM_REG_QVOQIDX_79, 0x7},
- {OP_WR_E1H, QM_REG_QVOQIDX_80, 0x0},
- {OP_WR_E1H, QM_REG_WRRWEIGHTS_19, 0x1010120},
- {OP_ZR_E1H, QM_REG_QVOQIDX_81, 0x4},
- {OP_WR_E1H, QM_REG_WRRWEIGHTS_20, 0x1010101},
- {OP_WR_E1H, QM_REG_QVOQIDX_85, 0x0},
- {OP_WR_E1H, QM_REG_QVOQIDX_86, 0x4},
- {OP_WR_E1H, QM_REG_QVOQIDX_87, 0x4},
- {OP_WR_E1H, QM_REG_QVOQIDX_88, 0x2},
- {OP_WR_E1H, QM_REG_WRRWEIGHTS_21, 0x8012004},
- {OP_WR_E1H, QM_REG_QVOQIDX_89, 0x5},
- {OP_WR_E1H, QM_REG_QVOQIDX_90, 0x5},
- {OP_WR_E1H, QM_REG_QVOQIDX_91, 0x5},
- {OP_WR_E1H, QM_REG_QVOQIDX_92, 0x5},
- {OP_WR_E1H, QM_REG_WRRWEIGHTS_22, 0x20081001},
- {OP_WR_E1H, QM_REG_QVOQIDX_93, 0x8},
- {OP_WR_E1H, QM_REG_QVOQIDX_94, 0x6},
- {OP_WR_E1H, QM_REG_QVOQIDX_95, 0x7},
- {OP_WR_E1H, QM_REG_QVOQIDX_96, 0x1},
- {OP_WR_E1H, QM_REG_WRRWEIGHTS_23, 0x1010120},
- {OP_WR_E1H, QM_REG_QVOQIDX_97, 0x1},
- {OP_WR_E1H, QM_REG_QVOQIDX_98, 0x1},
- {OP_WR_E1H, QM_REG_QVOQIDX_99, 0x1},
- {OP_WR_E1H, QM_REG_QVOQIDX_100, 0x1},
- {OP_WR_E1H, QM_REG_WRRWEIGHTS_24, 0x1010101},
- {OP_WR_E1H, QM_REG_QVOQIDX_101, 0x1},
- {OP_WR_E1H, QM_REG_QVOQIDX_102, 0x4},
- {OP_WR_E1H, QM_REG_QVOQIDX_103, 0x4},
- {OP_WR_E1H, QM_REG_QVOQIDX_104, 0x2},
- {OP_WR_E1H, QM_REG_WRRWEIGHTS_25, 0x8012004},
- {OP_WR_E1H, QM_REG_QVOQIDX_105, 0x5},
- {OP_WR_E1H, QM_REG_QVOQIDX_106, 0x5},
- {OP_WR_E1H, QM_REG_QVOQIDX_107, 0x5},
- {OP_WR_E1H, QM_REG_QVOQIDX_108, 0x5},
- {OP_WR_E1H, QM_REG_WRRWEIGHTS_26, 0x20081001},
- {OP_WR_E1H, QM_REG_QVOQIDX_109, 0x8},
- {OP_WR_E1H, QM_REG_QVOQIDX_110, 0x6},
- {OP_WR_E1H, QM_REG_QVOQIDX_111, 0x7},
- {OP_WR_E1H, QM_REG_QVOQIDX_112, 0x1},
- {OP_WR_E1H, QM_REG_WRRWEIGHTS_27, 0x1010120},
- {OP_WR_E1H, QM_REG_QVOQIDX_113, 0x1},
- {OP_WR_E1H, QM_REG_QVOQIDX_114, 0x1},
- {OP_WR_E1H, QM_REG_QVOQIDX_115, 0x1},
- {OP_WR_E1H, QM_REG_QVOQIDX_116, 0x1},
- {OP_WR_E1H, QM_REG_WRRWEIGHTS_28, 0x1010101},
- {OP_WR_E1H, QM_REG_QVOQIDX_117, 0x1},
- {OP_WR_E1H, QM_REG_QVOQIDX_118, 0x4},
- {OP_WR_E1H, QM_REG_QVOQIDX_119, 0x4},
- {OP_WR_E1H, QM_REG_QVOQIDX_120, 0x2},
- {OP_WR_E1H, QM_REG_WRRWEIGHTS_29, 0x8012004},
- {OP_WR_E1H, QM_REG_QVOQIDX_121, 0x5},
- {OP_WR_E1H, QM_REG_QVOQIDX_122, 0x5},
- {OP_WR_E1H, QM_REG_QVOQIDX_123, 0x5},
- {OP_WR_E1H, QM_REG_QVOQIDX_124, 0x5},
- {OP_WR_E1H, QM_REG_WRRWEIGHTS_30, 0x20081001},
- {OP_WR_E1H, QM_REG_QVOQIDX_125, 0x8},
- {OP_WR_E1H, QM_REG_QVOQIDX_126, 0x6},
- {OP_WR_E1H, QM_REG_QVOQIDX_127, 0x7},
- {OP_WR_E1H, QM_REG_WRRWEIGHTS_31, 0x1010120},
- {OP_WR_E1H, QM_REG_VOQQMASK_0_LSB, 0x3f003f},
- {OP_WR_E1H, QM_REG_VOQQMASK_0_MSB, 0x0},
- {OP_WR_E1H, QM_REG_VOQQMASK_0_LSB_EXT_A, 0x3f003f},
- {OP_WR_E1H, QM_REG_VOQQMASK_0_MSB_EXT_A, 0x0},
- {OP_WR_E1H, QM_REG_VOQQMASK_1_LSB, 0x0},
- {OP_WR_E1H, QM_REG_VOQQMASK_1_MSB, 0x3f003f},
- {OP_WR_E1H, QM_REG_VOQQMASK_1_LSB_EXT_A, 0x0},
- {OP_WR_E1H, QM_REG_VOQQMASK_1_MSB_EXT_A, 0x3f003f},
- {OP_WR_E1H, QM_REG_VOQQMASK_2_LSB, 0x1000100},
- {OP_WR_E1H, QM_REG_VOQQMASK_2_MSB, 0x1000100},
- {OP_WR_E1H, QM_REG_VOQQMASK_2_LSB_EXT_A, 0x1000100},
- {OP_WR_E1H, QM_REG_VOQQMASK_2_MSB_EXT_A, 0x1000100},
- {OP_ZR, QM_REG_VOQQMASK_3_LSB, 0x2},
- {OP_WR_E1, QM_REG_VOQQMASK_4_LSB, 0xc0},
- {OP_WR_E1H, QM_REG_VOQQMASK_3_LSB_EXT_A, 0x0},
- {OP_WR_E1, QM_REG_VOQQMASK_4_MSB, 0xc0},
- {OP_WR_E1H, QM_REG_VOQQMASK_3_MSB_EXT_A, 0x0},
- {OP_WR_E1, QM_REG_VOQQMASK_5_LSB, 0x1e00},
- {OP_WR_E1H, QM_REG_VOQQMASK_4_LSB, 0xc000c0},
- {OP_WR_E1, QM_REG_VOQQMASK_5_MSB, 0x1e00},
- {OP_WR_E1H, QM_REG_VOQQMASK_4_MSB, 0xc000c0},
- {OP_WR_E1, QM_REG_VOQQMASK_6_LSB, 0x4000},
- {OP_WR_E1H, QM_REG_VOQQMASK_4_LSB_EXT_A, 0xc000c0},
- {OP_WR_E1, QM_REG_VOQQMASK_6_MSB, 0x4000},
- {OP_WR_E1H, QM_REG_VOQQMASK_4_MSB_EXT_A, 0xc000c0},
- {OP_WR_E1, QM_REG_VOQQMASK_7_LSB, 0x8000},
- {OP_WR_E1H, QM_REG_VOQQMASK_5_LSB, 0x1e001e00},
- {OP_WR_E1, QM_REG_VOQQMASK_7_MSB, 0x8000},
- {OP_WR_E1H, QM_REG_VOQQMASK_5_MSB, 0x1e001e00},
- {OP_WR_E1, QM_REG_VOQQMASK_8_LSB, 0x2000},
- {OP_WR_E1H, QM_REG_VOQQMASK_5_LSB_EXT_A, 0x1e001e00},
- {OP_WR_E1, QM_REG_VOQQMASK_8_MSB, 0x2000},
- {OP_WR_E1H, QM_REG_VOQQMASK_5_MSB_EXT_A, 0x1e001e00},
- {OP_ZR_E1, QM_REG_VOQQMASK_9_LSB, 0x7},
- {OP_WR_E1H, QM_REG_VOQQMASK_6_LSB, 0x40004000},
- {OP_WR_E1H, QM_REG_VOQQMASK_6_MSB, 0x40004000},
- {OP_WR_E1H, QM_REG_VOQQMASK_6_LSB_EXT_A, 0x40004000},
- {OP_WR_E1H, QM_REG_VOQQMASK_6_MSB_EXT_A, 0x40004000},
- {OP_WR_E1H, QM_REG_VOQQMASK_7_LSB, 0x80008000},
- {OP_WR_E1H, QM_REG_VOQQMASK_7_MSB, 0x80008000},
- {OP_WR_E1H, QM_REG_VOQQMASK_7_LSB_EXT_A, 0x80008000},
- {OP_WR_E1H, QM_REG_VOQQMASK_7_MSB_EXT_A, 0x80008000},
- {OP_WR_E1H, QM_REG_VOQQMASK_8_LSB, 0x20002000},
- {OP_WR_E1H, QM_REG_VOQQMASK_8_MSB, 0x20002000},
- {OP_WR_E1H, QM_REG_VOQQMASK_8_LSB_EXT_A, 0x20002000},
- {OP_WR_E1H, QM_REG_VOQQMASK_8_MSB_EXT_A, 0x20002000},
- {OP_ZR_E1H, QM_REG_VOQQMASK_9_LSB, 0x2},
- {OP_WR_E1H, QM_REG_VOQQMASK_9_LSB_EXT_A, 0x0},
- {OP_WR_E1H, QM_REG_VOQQMASK_9_MSB_EXT_A, 0x0},
- {OP_WR_E1H, QM_REG_VOQQMASK_10_LSB, 0x0},
- {OP_WR_E1H, QM_REG_VOQQMASK_10_MSB, 0x0},
- {OP_WR_E1H, QM_REG_VOQQMASK_10_LSB_EXT_A, 0x0},
- {OP_WR_E1H, QM_REG_VOQQMASK_10_MSB_EXT_A, 0x0},
- {OP_WR_E1H, QM_REG_VOQQMASK_11_LSB, 0x0},
- {OP_WR_E1H, QM_REG_VOQQMASK_11_MSB, 0x0},
- {OP_WR_E1H, QM_REG_VOQQMASK_11_LSB_EXT_A, 0x0},
- {OP_WR_E1H, QM_REG_VOQQMASK_11_MSB_EXT_A, 0x0},
- {OP_WR_E1H, QM_REG_VOQPORT_0, 0x0},
- {OP_WR, QM_REG_VOQPORT_1, 0x1},
- {OP_ZR, QM_REG_VOQPORT_2, 0xa},
- {OP_WR, QM_REG_CMINTVOQMASK_0, 0xc08},
- {OP_WR, QM_REG_CMINTVOQMASK_1, 0x40},
- {OP_WR, QM_REG_CMINTVOQMASK_2, 0x100},
- {OP_WR, QM_REG_CMINTVOQMASK_3, 0x20},
- {OP_WR, QM_REG_CMINTVOQMASK_4, 0x17},
- {OP_WR, QM_REG_CMINTVOQMASK_5, 0x80},
- {OP_WR, QM_REG_CMINTVOQMASK_6, 0x200},
- {OP_WR, QM_REG_CMINTVOQMASK_7, 0x0},
- {OP_WR_E1, QM_REG_HWAEMPTYMASK_LSB, 0xffff01ff},
- {OP_WR_E1H, QM_REG_HWAEMPTYMASK_LSB, 0x1ff01ff},
- {OP_WR_E1, QM_REG_HWAEMPTYMASK_MSB, 0xffff01ff},
- {OP_WR_E1H, QM_REG_HWAEMPTYMASK_MSB, 0x1ff01ff},
- {OP_WR_E1H, QM_REG_HWAEMPTYMASK_LSB_EXT_A, 0x1ff01ff},
- {OP_WR_E1H, QM_REG_HWAEMPTYMASK_MSB_EXT_A, 0x1ff01ff},
- {OP_WR, QM_REG_ENBYPVOQMASK, 0x13},
- {OP_WR, QM_REG_VOQCREDITAFULLTHR, 0x13f},
- {OP_WR, QM_REG_VOQINITCREDIT_0, 0x140},
- {OP_WR, QM_REG_VOQINITCREDIT_1, 0x140},
- {OP_ZR, QM_REG_VOQINITCREDIT_2, 0x2},
- {OP_WR, QM_REG_VOQINITCREDIT_4, 0xc0},
- {OP_ZR, QM_REG_VOQINITCREDIT_5, 0x7},
- {OP_WR, QM_REG_TASKCRDCOST_0, 0x48},
- {OP_WR, QM_REG_TASKCRDCOST_1, 0x48},
- {OP_ZR, QM_REG_TASKCRDCOST_2, 0x2},
- {OP_WR, QM_REG_TASKCRDCOST_4, 0x48},
- {OP_ZR, QM_REG_TASKCRDCOST_5, 0x7},
- {OP_WR, QM_REG_BYTECRDINITVAL, 0x8000},
- {OP_WR, QM_REG_BYTECRDCOST, 0x25e4},
- {OP_WR, QM_REG_BYTECREDITAFULLTHR, 0x7fff},
- {OP_WR_E1, QM_REG_ENBYTECRD_LSB, 0x7},
- {OP_WR_E1H, QM_REG_ENBYTECRD_LSB, 0x70007},
- {OP_WR_E1, QM_REG_ENBYTECRD_MSB, 0x7},
- {OP_WR_E1H, QM_REG_ENBYTECRD_MSB, 0x70007},
- {OP_WR_E1H, QM_REG_ENBYTECRD_LSB_EXT_A, 0x70007},
- {OP_WR_E1H, QM_REG_ENBYTECRD_MSB_EXT_A, 0x70007},
- {OP_WR, QM_REG_BYTECRDPORT_LSB, 0x0},
- {OP_WR, QM_REG_BYTECRDPORT_MSB, 0xffffffff},
- {OP_WR_E1, QM_REG_FUNCNUMSEL_LSB, 0x0},
- {OP_WR_E1H, QM_REG_BYTECRDPORT_LSB_EXT_A, 0x0},
- {OP_WR_E1, QM_REG_FUNCNUMSEL_MSB, 0xffffffff},
- {OP_WR_E1H, QM_REG_BYTECRDPORT_MSB_EXT_A, 0xffffffff},
- {OP_WR_E1H, QM_REG_PQ2PCIFUNC_0, 0x0},
- {OP_WR_E1H, QM_REG_PQ2PCIFUNC_1, 0x2},
- {OP_WR_E1H, QM_REG_PQ2PCIFUNC_2, 0x1},
- {OP_WR_E1H, QM_REG_PQ2PCIFUNC_3, 0x3},
- {OP_WR_E1H, QM_REG_PQ2PCIFUNC_4, 0x4},
- {OP_WR_E1H, QM_REG_PQ2PCIFUNC_5, 0x6},
- {OP_WR_E1H, QM_REG_PQ2PCIFUNC_6, 0x5},
- {OP_WR_E1H, QM_REG_PQ2PCIFUNC_7, 0x7},
- {OP_WR, QM_REG_CMINTEN, 0xff},
-#define QM_COMMON_END 1456
-#define PBF_COMMON_START 1456
- {OP_WR, PBF_REG_INIT, 0x1},
- {OP_WR, PBF_REG_INIT_P4, 0x1},
- {OP_WR, PBF_REG_MAC_LB_ENABLE, 0x1},
- {OP_WR, PBF_REG_IF_ENABLE_REG, 0x7fff},
- {OP_WR, PBF_REG_INIT_P4, 0x0},
- {OP_WR, PBF_REG_INIT, 0x0},
- {OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P4, 0x0},
-#define PBF_COMMON_END 1463
-#define PBF_PORT0_START 1463
- {OP_WR, PBF_REG_INIT_P0, 0x1},
- {OP_WR, PBF_REG_MAC_IF0_ENABLE, 0x1},
- {OP_WR, PBF_REG_INIT_P0, 0x0},
- {OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P0, 0x0},
-#define PBF_PORT0_END 1467
-#define PBF_PORT1_START 1467
- {OP_WR, PBF_REG_INIT_P1, 0x1},
- {OP_WR, PBF_REG_MAC_IF1_ENABLE, 0x1},
- {OP_WR, PBF_REG_INIT_P1, 0x0},
- {OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P1, 0x0},
-#define PBF_PORT1_END 1471
-#define XCM_COMMON_START 1471
- {OP_WR, XCM_REG_XX_OVFL_EVNT_ID, 0x32},
- {OP_WR, XCM_REG_XQM_XCM_HDR_P, 0x3150020},
- {OP_WR, XCM_REG_XQM_XCM_HDR_S, 0x3150020},
- {OP_WR, XCM_REG_TM_XCM_HDR, 0x1000030},
- {OP_WR, XCM_REG_ERR_XCM_HDR, 0x8100000},
- {OP_WR, XCM_REG_ERR_EVNT_ID, 0x33},
- {OP_WR, XCM_REG_EXPR_EVNT_ID, 0x30},
- {OP_WR, XCM_REG_STOP_EVNT_ID, 0x31},
- {OP_WR, XCM_REG_STORM_WEIGHT, 0x3},
- {OP_WR, XCM_REG_TSEM_WEIGHT, 0x6},
- {OP_WR, XCM_REG_CSEM_WEIGHT, 0x3},
- {OP_WR, XCM_REG_USEM_WEIGHT, 0x3},
- {OP_WR, XCM_REG_DORQ_WEIGHT, 0x2},
- {OP_WR, XCM_REG_PBF_WEIGHT, 0x0},
- {OP_WR, XCM_REG_NIG0_WEIGHT, 0x2},
- {OP_WR, XCM_REG_CP_WEIGHT, 0x0},
- {OP_WR, XCM_REG_XSDM_WEIGHT, 0x6},
- {OP_WR, XCM_REG_XQM_P_WEIGHT, 0x4},
- {OP_WR, XCM_REG_XQM_S_WEIGHT, 0x2},
- {OP_WR, XCM_REG_TM_WEIGHT, 0x2},
- {OP_WR, XCM_REG_XCM_XQM_USE_Q, 0x1},
- {OP_WR, XCM_REG_XQM_BYP_ACT_UPD, 0x6},
- {OP_WR, XCM_REG_UNA_GT_NXT_Q, 0x0},
- {OP_WR, XCM_REG_AUX1_Q, 0x2},
- {OP_WR, XCM_REG_AUX_CNT_FLG_Q_19, 0x1},
- {OP_WR, XCM_REG_GR_ARB_TYPE, 0x1},
- {OP_WR, XCM_REG_GR_LD0_PR, 0x1},
- {OP_WR, XCM_REG_GR_LD1_PR, 0x2},
- {OP_WR, XCM_REG_CFC_INIT_CRD, 0x1},
- {OP_WR, XCM_REG_FIC0_INIT_CRD, 0x40},
- {OP_WR, XCM_REG_FIC1_INIT_CRD, 0x40},
- {OP_WR, XCM_REG_TM_INIT_CRD, 0x4},
- {OP_WR, XCM_REG_XQM_INIT_CRD, 0x20},
- {OP_WR, XCM_REG_XX_INIT_CRD, 0x2},
- {OP_WR_E1, XCM_REG_XX_MSG_NUM, 0x1f},
- {OP_WR_E1H, XCM_REG_XX_MSG_NUM, 0x20},
- {OP_ZR, XCM_REG_XX_TABLE, 0x12},
- {OP_SW_E1, XCM_REG_XX_DESCR_TABLE, 0x1f02d4},
- {OP_SW_E1H, XCM_REG_XX_DESCR_TABLE, 0x1f0329},
- {OP_WR, XCM_REG_N_SM_CTX_LD_0, 0xf},
- {OP_WR, XCM_REG_N_SM_CTX_LD_1, 0x7},
- {OP_WR, XCM_REG_N_SM_CTX_LD_2, 0xb},
- {OP_WR, XCM_REG_N_SM_CTX_LD_3, 0xe},
- {OP_ZR_E1, XCM_REG_N_SM_CTX_LD_4, 0x4},
- {OP_WR_E1H, XCM_REG_N_SM_CTX_LD_4, 0xe},
- {OP_ZR_E1H, XCM_REG_N_SM_CTX_LD_5, 0x3},
- {OP_WR, XCM_REG_XCM_REG0_SZ, 0x4},
- {OP_WR, XCM_REG_XCM_STORM0_IFEN, 0x1},
- {OP_WR, XCM_REG_XCM_STORM1_IFEN, 0x1},
- {OP_WR, XCM_REG_XCM_XQM_IFEN, 0x1},
- {OP_WR, XCM_REG_STORM_XCM_IFEN, 0x1},
- {OP_WR, XCM_REG_XQM_XCM_IFEN, 0x1},
- {OP_WR, XCM_REG_XSDM_IFEN, 0x1},
- {OP_WR, XCM_REG_TM_XCM_IFEN, 0x1},
- {OP_WR, XCM_REG_XCM_TM_IFEN, 0x1},
- {OP_WR, XCM_REG_TSEM_IFEN, 0x1},
- {OP_WR, XCM_REG_CSEM_IFEN, 0x1},
- {OP_WR, XCM_REG_USEM_IFEN, 0x1},
- {OP_WR, XCM_REG_DORQ_IFEN, 0x1},
- {OP_WR, XCM_REG_PBF_IFEN, 0x1},
- {OP_WR, XCM_REG_NIG0_IFEN, 0x1},
- {OP_WR, XCM_REG_NIG1_IFEN, 0x1},
- {OP_WR, XCM_REG_CDU_AG_WR_IFEN, 0x1},
- {OP_WR, XCM_REG_CDU_AG_RD_IFEN, 0x1},
- {OP_WR, XCM_REG_CDU_SM_WR_IFEN, 0x1},
- {OP_WR, XCM_REG_CDU_SM_RD_IFEN, 0x1},
- {OP_WR, XCM_REG_XCM_CFC_IFEN, 0x1},
-#define XCM_COMMON_END 1538
-#define XCM_PORT0_START 1538
- {OP_WR_E1, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
- {OP_WR_E1, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
- {OP_WR_E1, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
- {OP_WR_E1, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD10, 0x0},
- {OP_WR_E1, XCM_REG_WU_DA_CNT_CMD00, 0x2},
- {OP_WR_E1, XCM_REG_WU_DA_CNT_CMD10, 0x2},
- {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
- {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
-#define XCM_PORT0_END 1546
-#define XCM_PORT1_START 1546
- {OP_WR_E1, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
- {OP_WR_E1, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
- {OP_WR_E1, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
- {OP_WR_E1, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD11, 0x0},
- {OP_WR_E1, XCM_REG_WU_DA_CNT_CMD01, 0x2},
- {OP_WR_E1, XCM_REG_WU_DA_CNT_CMD11, 0x2},
- {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
- {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
-#define XCM_PORT1_END 1554
-#define XCM_FUNC0_START 1554
- {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
- {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
- {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
- {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD10, 0x0},
- {OP_WR_E1H, XCM_REG_WU_DA_CNT_CMD00, 0x2},
- {OP_WR_E1H, XCM_REG_WU_DA_CNT_CMD10, 0x2},
- {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
- {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
- {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0},
-#define XCM_FUNC0_END 1563
-#define XCM_FUNC1_START 1563
- {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
- {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
- {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
- {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD11, 0x0},
- {OP_WR_E1H, XCM_REG_WU_DA_CNT_CMD01, 0x2},
- {OP_WR_E1H, XCM_REG_WU_DA_CNT_CMD11, 0x2},
- {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
- {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
- {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0},
-#define XCM_FUNC1_END 1572
-#define XCM_FUNC2_START 1572
- {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
- {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
- {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
- {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD10, 0x0},
- {OP_WR_E1H, XCM_REG_WU_DA_CNT_CMD00, 0x2},
- {OP_WR_E1H, XCM_REG_WU_DA_CNT_CMD10, 0x2},
- {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
- {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
- {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0},
-#define XCM_FUNC2_END 1581
-#define XCM_FUNC3_START 1581
- {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
- {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
- {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
- {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD11, 0x0},
- {OP_WR_E1H, XCM_REG_WU_DA_CNT_CMD01, 0x2},
- {OP_WR_E1H, XCM_REG_WU_DA_CNT_CMD11, 0x2},
- {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
- {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
- {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0},
-#define XCM_FUNC3_END 1590
-#define XCM_FUNC4_START 1590
- {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
- {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
- {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
- {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD10, 0x0},
- {OP_WR_E1H, XCM_REG_WU_DA_CNT_CMD00, 0x2},
- {OP_WR_E1H, XCM_REG_WU_DA_CNT_CMD10, 0x2},
- {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
- {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
- {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0},
-#define XCM_FUNC4_END 1599
-#define XCM_FUNC5_START 1599
- {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
- {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
- {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
- {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD11, 0x0},
- {OP_WR_E1H, XCM_REG_WU_DA_CNT_CMD01, 0x2},
- {OP_WR_E1H, XCM_REG_WU_DA_CNT_CMD11, 0x2},
- {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
- {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
- {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0},
-#define XCM_FUNC5_END 1608
-#define XCM_FUNC6_START 1608
- {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
- {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
- {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
- {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD10, 0x0},
- {OP_WR_E1H, XCM_REG_WU_DA_CNT_CMD00, 0x2},
- {OP_WR_E1H, XCM_REG_WU_DA_CNT_CMD10, 0x2},
- {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
- {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
- {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0},
-#define XCM_FUNC6_END 1617
-#define XCM_FUNC7_START 1617
- {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
- {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
- {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
- {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD11, 0x0},
- {OP_WR_E1H, XCM_REG_WU_DA_CNT_CMD01, 0x2},
- {OP_WR_E1H, XCM_REG_WU_DA_CNT_CMD11, 0x2},
- {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
- {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
- {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0},
-#define XCM_FUNC7_END 1626
-#define XSEM_COMMON_START 1626
- {OP_RD, XSEM_REG_MSG_NUM_FIC0, 0x0},
- {OP_RD, XSEM_REG_MSG_NUM_FIC1, 0x0},
- {OP_RD, XSEM_REG_MSG_NUM_FOC0, 0x0},
- {OP_RD, XSEM_REG_MSG_NUM_FOC1, 0x0},
- {OP_RD, XSEM_REG_MSG_NUM_FOC2, 0x0},
- {OP_RD, XSEM_REG_MSG_NUM_FOC3, 0x0},
- {OP_WR, XSEM_REG_ARB_ELEMENT0, 0x1},
- {OP_WR, XSEM_REG_ARB_ELEMENT1, 0x2},
- {OP_WR, XSEM_REG_ARB_ELEMENT2, 0x3},
- {OP_WR, XSEM_REG_ARB_ELEMENT3, 0x0},
- {OP_WR, XSEM_REG_ARB_ELEMENT4, 0x4},
- {OP_WR, XSEM_REG_ARB_CYCLE_SIZE, 0x1},
- {OP_WR, XSEM_REG_TS_0_AS, 0x0},
- {OP_WR, XSEM_REG_TS_1_AS, 0x1},
- {OP_WR, XSEM_REG_TS_2_AS, 0x4},
- {OP_WR, XSEM_REG_TS_3_AS, 0x0},
- {OP_WR, XSEM_REG_TS_4_AS, 0x1},
- {OP_WR, XSEM_REG_TS_5_AS, 0x3},
- {OP_WR, XSEM_REG_TS_6_AS, 0x0},
- {OP_WR, XSEM_REG_TS_7_AS, 0x1},
- {OP_WR, XSEM_REG_TS_8_AS, 0x4},
- {OP_WR, XSEM_REG_TS_9_AS, 0x0},
- {OP_WR, XSEM_REG_TS_10_AS, 0x1},
- {OP_WR, XSEM_REG_TS_11_AS, 0x3},
- {OP_WR, XSEM_REG_TS_12_AS, 0x0},
- {OP_WR, XSEM_REG_TS_13_AS, 0x1},
- {OP_WR, XSEM_REG_TS_14_AS, 0x4},
- {OP_WR, XSEM_REG_TS_15_AS, 0x0},
- {OP_WR, XSEM_REG_TS_16_AS, 0x4},
- {OP_WR, XSEM_REG_TS_17_AS, 0x3},
- {OP_ZR, XSEM_REG_TS_18_AS, 0x2},
- {OP_WR, XSEM_REG_ENABLE_IN, 0x3fff},
- {OP_WR, XSEM_REG_ENABLE_OUT, 0x3ff},
- {OP_WR, XSEM_REG_FIC0_DISABLE, 0x0},
- {OP_WR, XSEM_REG_FIC1_DISABLE, 0x0},
- {OP_WR, XSEM_REG_PAS_DISABLE, 0x0},
- {OP_WR, XSEM_REG_THREADS_LIST, 0xffff},
- {OP_ZR, XSEM_REG_PASSIVE_BUFFER, 0x800},
- {OP_WR, XSEM_REG_FAST_MEMORY + 0x18bc0, 0x1},
- {OP_WR, XSEM_REG_FAST_MEMORY + 0x18000, 0x0},
- {OP_WR, XSEM_REG_FAST_MEMORY + 0x18040, 0x18},
- {OP_WR, XSEM_REG_FAST_MEMORY + 0x18080, 0xc},
- {OP_WR, XSEM_REG_FAST_MEMORY + 0x180c0, 0x66},
- {OP_WR_ASIC, XSEM_REG_FAST_MEMORY + 0x18300, 0x7a120},
- {OP_WR_EMUL, XSEM_REG_FAST_MEMORY + 0x18300, 0x138},
- {OP_WR_FPGA, XSEM_REG_FAST_MEMORY + 0x18300, 0x1388},
- {OP_WR, XSEM_REG_FAST_MEMORY + 0x183c0, 0x1f4},
- {OP_WR_ASIC, XSEM_REG_FAST_MEMORY + 0x18340, 0x1f4},
- {OP_WR_EMUL, XSEM_REG_FAST_MEMORY + 0x18340, 0x0},
- {OP_WR_FPGA, XSEM_REG_FAST_MEMORY + 0x18340, 0x5},
- {OP_WR_EMUL, XSEM_REG_FAST_MEMORY + 0x18380, 0x4c4b4},
- {OP_WR_ASIC, XSEM_REG_FAST_MEMORY + 0x18380, 0x1dcd6500},
- {OP_WR_EMUL_E1H, XSEM_REG_FAST_MEMORY + 0x11480, 0x0},
- {OP_WR_FPGA, XSEM_REG_FAST_MEMORY + 0x18380, 0x4c4b40},
- {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3d60, 0x4},
- {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x11480, 0x1},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d60 + 0x10, 0x202f3},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x29c8, 0x4},
- {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3000, 0x48},
- {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x29c8 + 0x10, 0x20348},
- {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x1020, 0xc8},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2080, 0x48},
- {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x1000, 0x2},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x9020, 0xc8},
- {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3128, 0x8e},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x9000, 0x2},
- {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3368, 0x0},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x21a8, 0x86},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3370, 0x202f5},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2000, 0x20},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3b90, 0x402f7},
- {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x23c8, 0x0},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3e20, 0x202fb},
- {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x23d0, 0x2034a},
- {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1518, 0x1},
- {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2498, 0x4034c},
- {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1830, 0x0},
- {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x2c20, 0x0},
- {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1838, 0x0},
- {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x2c10, 0x0},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x1820, 0x202fd},
- {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2c08, 0x20350},
- {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4ac0, 0x2},
- {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x3010, 0x1},
- {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4b00, 0x4},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x4040, 0x10},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x1f48, 0x202ff},
- {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x4000, 0x100352},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6ac0, 0x2},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6b00, 0x4},
- {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x8408, 0x20362},
- {OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x0},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c00, 0x100301},
- {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c00, 0x100364},
- {OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x1000000},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c40, 0x80311},
- {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c40, 0x80374},
- {OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x2000000},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c60, 0x80319},
- {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c60, 0x8037c},
- {OP_ZP_E1, XSEM_REG_INT_TABLE, 0xb50000},
- {OP_ZP_E1H, XSEM_REG_INT_TABLE, 0xbd0000},
- {OP_WR_64_E1, XSEM_REG_INT_TABLE + 0x368, 0x130321},
- {OP_WR_64_E1H, XSEM_REG_INT_TABLE + 0x3a8, 0xb0384},
- {OP_ZP_E1, XSEM_REG_PRAM, 0x33660000},
- {OP_ZP_E1H, XSEM_REG_PRAM, 0x34060000},
- {OP_ZP_E1, XSEM_REG_PRAM + 0x8000, 0x38b30cda},
- {OP_ZP_E1H, XSEM_REG_PRAM + 0x8000, 0x37960d02},
- {OP_ZP_E1, XSEM_REG_PRAM + 0x10000, 0x3bb11b07},
- {OP_ZP_E1H, XSEM_REG_PRAM + 0x10000, 0x3bc31ae8},
- {OP_ZP_E1, XSEM_REG_PRAM + 0x18000, 0x2a2629f4},
- {OP_ZP_E1H, XSEM_REG_PRAM + 0x18000, 0x382629d9},
- {OP_WR_64_E1, XSEM_REG_PRAM + 0x1d6c0, 0x45280323},
- {OP_ZP_E1H, XSEM_REG_PRAM + 0x20000, 0x124537e3},
- {OP_WR_64_E1H, XSEM_REG_PRAM + 0x22220, 0x3bbc0386},
-#define XSEM_COMMON_END 1741
-#define XSEM_PORT0_START 1741
- {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3ba0, 0x14},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xc000, 0xfc},
- {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3c40, 0x24},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x24a8, 0x14},
- {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x1400, 0xa},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2548, 0x24},
- {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x1450, 0x6},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2668, 0x24},
- {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3378, 0xfc},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2788, 0x24},
- {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3b58, 0x0},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x28a8, 0x24},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d78, 0x20325},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa000, 0x28},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d88, 0x100327},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa140, 0xc},
- {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1500, 0x0},
- {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x29e0, 0x20388},
- {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1508, 0x1},
- {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x3000, 0x1},
- {OP_ZR, XSEM_REG_FAST_MEMORY + 0x5020, 0x2},
- {OP_ZR, XSEM_REG_FAST_MEMORY + 0x5030, 0x2},
- {OP_ZR, XSEM_REG_FAST_MEMORY + 0x5000, 0x2},
- {OP_ZR, XSEM_REG_FAST_MEMORY + 0x5010, 0x2},
- {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x5040, 0x0},
- {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x5208, 0x1},
- {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x5048, 0xe},
- {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x6ac8, 0x2038a},
- {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x50b8, 0x1},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6b10, 0x42},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x4ac8, 0x20337},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6d20, 0x4},
- {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4b10, 0x42},
- {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4d20, 0x4},
-#define XSEM_PORT0_END 1775
-#define XSEM_PORT1_START 1775
- {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3bf0, 0x14},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xc3f0, 0xfc},
- {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3cd0, 0x24},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x24f8, 0x14},
- {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x1428, 0xa},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x25d8, 0x24},
- {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x1468, 0x6},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x26f8, 0x24},
- {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3768, 0xfc},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2818, 0x24},
- {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3b5c, 0x0},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2938, 0x24},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d80, 0x20339},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa0a0, 0x28},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3dc8, 0x10033b},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa170, 0xc},
- {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1504, 0x0},
- {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x29e8, 0x2038c},
- {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x150c, 0x1},
- {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x3004, 0x1},
- {OP_ZR, XSEM_REG_FAST_MEMORY + 0x5028, 0x2},
- {OP_ZR, XSEM_REG_FAST_MEMORY + 0x5038, 0x2},
- {OP_ZR, XSEM_REG_FAST_MEMORY + 0x5008, 0x2},
- {OP_ZR, XSEM_REG_FAST_MEMORY + 0x5018, 0x2},
- {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x5044, 0x0},
- {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x520c, 0x1},
- {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x5080, 0xe},
- {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x6ad0, 0x2038e},
- {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x50bc, 0x1},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6c18, 0x42},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x4ad0, 0x2034b},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6d30, 0x4},
- {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4c18, 0x42},
- {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4d30, 0x4},
-#define XSEM_PORT1_END 1809
-#define XSEM_FUNC0_START 1809
- {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e0, 0x0},
- {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x29f0, 0x100390},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5048, 0xe},
-#define XSEM_FUNC0_END 1812
-#define XSEM_FUNC1_START 1812
- {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e4, 0x0},
- {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2a30, 0x1003a0},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5080, 0xe},
-#define XSEM_FUNC1_END 1815
-#define XSEM_FUNC2_START 1815
- {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e8, 0x0},
- {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2a70, 0x1003b0},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x50b8, 0xe},
-#define XSEM_FUNC2_END 1818
-#define XSEM_FUNC3_START 1818
- {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7ec, 0x0},
- {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2ab0, 0x1003c0},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x50f0, 0xe},
-#define XSEM_FUNC3_END 1821
-#define XSEM_FUNC4_START 1821
- {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f0, 0x0},
- {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2af0, 0x1003d0},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5128, 0xe},
-#define XSEM_FUNC4_END 1824
-#define XSEM_FUNC5_START 1824
- {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f4, 0x0},
- {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2b30, 0x1003e0},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5160, 0xe},
-#define XSEM_FUNC5_END 1827
-#define XSEM_FUNC6_START 1827
- {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f8, 0x0},
- {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2b70, 0x1003f0},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5198, 0xe},
-#define XSEM_FUNC6_END 1830
-#define XSEM_FUNC7_START 1830
- {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7fc, 0x0},
- {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2bb0, 0x100400},
- {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x51d0, 0xe},
-#define XSEM_FUNC7_END 1833
-#define CDU_COMMON_START 1833
- {OP_WR, CDU_REG_CDU_CONTROL0, 0x1},
- {OP_WR_E1H, CDU_REG_MF_MODE, 0x1},
- {OP_WR, CDU_REG_CDU_CHK_MASK0, 0x3d000},
- {OP_WR, CDU_REG_CDU_CHK_MASK1, 0x3d},
- {OP_WB_E1, CDU_REG_L1TT, 0x200034d},
- {OP_WB_E1H, CDU_REG_L1TT, 0x2000410},
- {OP_WB_E1, CDU_REG_MATT, 0x20054d},
- {OP_WB_E1H, CDU_REG_MATT, 0x280610},
- {OP_ZR_E1, CDU_REG_MATT + 0x80, 0x2},
- {OP_WB_E1, CDU_REG_MATT + 0x88, 0x6056d},
- {OP_ZR, CDU_REG_MATT + 0xa0, 0x18},
-#define CDU_COMMON_END 1844
-#define DMAE_COMMON_START 1844
- {OP_ZR, DMAE_REG_CMD_MEM, 0xe0},
- {OP_WR, DMAE_REG_CRC16C_INIT, 0x0},
- {OP_WR, DMAE_REG_CRC16T10_INIT, 0x1},
- {OP_WR_E1, DMAE_REG_PXP_REQ_INIT_CRD, 0x1},
- {OP_WR_E1H, DMAE_REG_PXP_REQ_INIT_CRD, 0x2},
- {OP_WR, DMAE_REG_PCI_IFEN, 0x1},
- {OP_WR, DMAE_REG_GRC_IFEN, 0x1},
-#define DMAE_COMMON_END 1851
-#define PXP_COMMON_START 1851
- {OP_WB_E1, PXP_REG_HST_INBOUND_INT + 0x400, 0x50573},
- {OP_WB_E1H, PXP_REG_HST_INBOUND_INT + 0x400, 0x50638},
- {OP_WB_E1, PXP_REG_HST_INBOUND_INT + 0x420, 0x50578},
- {OP_WB_E1H, PXP_REG_HST_INBOUND_INT, 0x5063d},
- {OP_WB_E1, PXP_REG_HST_INBOUND_INT, 0x5057d},
- {OP_WB_E1H, PXP_REG_HST_INBOUND_INT + 0x20, 0x50642},
-#define PXP_COMMON_END 1857
-#define CFC_COMMON_START 1857
- {OP_ZR_E1H, CFC_REG_LINK_LIST, 0x100},
- {OP_WR, CFC_REG_CONTROL0, 0x10},
- {OP_WR, CFC_REG_DISABLE_ON_ERROR, 0x3fff},
- {OP_WR, CFC_REG_INTERFACES, 0x280000},
- {OP_WR, CFC_REG_LCREQ_WEIGHTS, 0x84924a},
- {OP_WR, CFC_REG_INTERFACES, 0x0},
-#define CFC_COMMON_END 1863
-#define HC_COMMON_START 1863
- {OP_ZR_E1, HC_REG_USTORM_ADDR_FOR_COALESCE, 0x4},
-#define HC_COMMON_END 1864
-#define HC_PORT0_START 1864
- {OP_WR_E1, HC_REG_CONFIG_0, 0x1080},
- {OP_ZR_E1, HC_REG_UC_RAM_ADDR_0, 0x2},
- {OP_WR_E1, HC_REG_ATTN_NUM_P0, 0x10},
- {OP_WR_E1, HC_REG_LEADING_EDGE_0, 0xffff},
- {OP_WR_E1, HC_REG_TRAILING_EDGE_0, 0xffff},
- {OP_WR_E1, HC_REG_AGG_INT_0, 0x0},
- {OP_WR_E1, HC_REG_ATTN_IDX, 0x0},
- {OP_ZR_E1, HC_REG_ATTN_BIT, 0x2},
- {OP_WR_E1, HC_REG_VQID_0, 0x2b5},
- {OP_WR_E1, HC_REG_PCI_CONFIG_0, 0x0},
- {OP_ZR_E1, HC_REG_P0_PROD_CONS, 0x4a},
- {OP_WR_E1, HC_REG_INT_MASK, 0x1ffff},
- {OP_ZR_E1, HC_REG_PBA_COMMAND, 0x2},
- {OP_WR_E1, HC_REG_CONFIG_0, 0x1a80},
- {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS, 0x24},
- {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
- {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
- {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
-#define HC_PORT0_END 1882
-#define HC_PORT1_START 1882
- {OP_WR_E1, HC_REG_CONFIG_1, 0x1080},
- {OP_ZR_E1, HC_REG_UC_RAM_ADDR_1, 0x2},
- {OP_WR_E1, HC_REG_ATTN_NUM_P1, 0x10},
- {OP_WR_E1, HC_REG_LEADING_EDGE_1, 0xffff},
- {OP_WR_E1, HC_REG_TRAILING_EDGE_1, 0xffff},
- {OP_WR_E1, HC_REG_AGG_INT_1, 0x0},
- {OP_WR_E1, HC_REG_ATTN_IDX + 0x4, 0x0},
- {OP_ZR_E1, HC_REG_ATTN_BIT + 0x8, 0x2},
- {OP_WR_E1, HC_REG_VQID_1, 0x2b5},
- {OP_WR_E1, HC_REG_PCI_CONFIG_1, 0x0},
- {OP_ZR_E1, HC_REG_P1_PROD_CONS, 0x4a},
- {OP_WR_E1, HC_REG_INT_MASK + 0x4, 0x1ffff},
- {OP_ZR_E1, HC_REG_PBA_COMMAND + 0x8, 0x2},
- {OP_WR_E1, HC_REG_CONFIG_1, 0x1a80},
- {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x90, 0x24},
- {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
- {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
- {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
-#define HC_PORT1_END 1900
-#define HC_FUNC0_START 1900
- {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080},
- {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x0},
- {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10},
- {OP_WR_E1H, HC_REG_ATTN_IDX, 0x0},
- {OP_ZR_E1H, HC_REG_ATTN_BIT, 0x2},
- {OP_WR_E1H, HC_REG_VQID_0, 0x2b5},
- {OP_WR_E1H, HC_REG_PCI_CONFIG_0, 0x0},
- {OP_ZR_E1H, HC_REG_P0_PROD_CONS, 0x4a},
- {OP_WR_E1H, HC_REG_INT_MASK, 0x1ffff},
- {OP_ZR_E1H, HC_REG_PBA_COMMAND, 0x2},
- {OP_WR_E1H, HC_REG_CONFIG_0, 0x1a80},
- {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS, 0x24},
- {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
- {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
- {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
-#define HC_FUNC0_END 1915
-#define HC_FUNC1_START 1915
- {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080},
- {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x1},
- {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10},
- {OP_WR_E1H, HC_REG_ATTN_IDX + 0x4, 0x0},
- {OP_ZR_E1H, HC_REG_ATTN_BIT + 0x8, 0x2},
- {OP_WR_E1H, HC_REG_VQID_1, 0x2b5},
- {OP_WR_E1H, HC_REG_PCI_CONFIG_1, 0x0},
- {OP_ZR_E1H, HC_REG_P1_PROD_CONS, 0x4a},
- {OP_WR_E1H, HC_REG_INT_MASK + 0x4, 0x1ffff},
- {OP_ZR_E1H, HC_REG_PBA_COMMAND + 0x8, 0x2},
- {OP_WR_E1H, HC_REG_CONFIG_1, 0x1a80},
- {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x90, 0x24},
- {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
- {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
- {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
-#define HC_FUNC1_END 1930
-#define HC_FUNC2_START 1930
- {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080},
- {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x2},
- {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10},
- {OP_WR_E1H, HC_REG_ATTN_IDX, 0x0},
- {OP_ZR_E1H, HC_REG_ATTN_BIT, 0x2},
- {OP_WR_E1H, HC_REG_VQID_0, 0x2b5},
- {OP_WR_E1H, HC_REG_PCI_CONFIG_0, 0x0},
- {OP_ZR_E1H, HC_REG_P0_PROD_CONS, 0x4a},
- {OP_WR_E1H, HC_REG_INT_MASK, 0x1ffff},
- {OP_ZR_E1H, HC_REG_PBA_COMMAND, 0x2},
- {OP_WR_E1H, HC_REG_CONFIG_0, 0x1a80},
- {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS, 0x24},
- {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
- {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
- {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
-#define HC_FUNC2_END 1945
-#define HC_FUNC3_START 1945
- {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080},
- {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x3},
- {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10},
- {OP_WR_E1H, HC_REG_ATTN_IDX + 0x4, 0x0},
- {OP_ZR_E1H, HC_REG_ATTN_BIT + 0x8, 0x2},
- {OP_WR_E1H, HC_REG_VQID_1, 0x2b5},
- {OP_WR_E1H, HC_REG_PCI_CONFIG_1, 0x0},
- {OP_ZR_E1H, HC_REG_P1_PROD_CONS, 0x4a},
- {OP_WR_E1H, HC_REG_INT_MASK + 0x4, 0x1ffff},
- {OP_ZR_E1H, HC_REG_PBA_COMMAND + 0x8, 0x2},
- {OP_WR_E1H, HC_REG_CONFIG_1, 0x1a80},
- {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x90, 0x24},
- {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
- {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
- {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
-#define HC_FUNC3_END 1960
-#define HC_FUNC4_START 1960
- {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080},
- {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x4},
- {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10},
- {OP_WR_E1H, HC_REG_ATTN_IDX, 0x0},
- {OP_ZR_E1H, HC_REG_ATTN_BIT, 0x2},
- {OP_WR_E1H, HC_REG_VQID_0, 0x2b5},
- {OP_WR_E1H, HC_REG_PCI_CONFIG_0, 0x0},
- {OP_ZR_E1H, HC_REG_P0_PROD_CONS, 0x4a},
- {OP_WR_E1H, HC_REG_INT_MASK, 0x1ffff},
- {OP_ZR_E1H, HC_REG_PBA_COMMAND, 0x2},
- {OP_WR_E1H, HC_REG_CONFIG_0, 0x1a80},
- {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS, 0x24},
- {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
- {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
- {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
-#define HC_FUNC4_END 1975
-#define HC_FUNC5_START 1975
- {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080},
- {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x5},
- {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10},
- {OP_WR_E1H, HC_REG_ATTN_IDX + 0x4, 0x0},
- {OP_ZR_E1H, HC_REG_ATTN_BIT + 0x8, 0x2},
- {OP_WR_E1H, HC_REG_VQID_1, 0x2b5},
- {OP_WR_E1H, HC_REG_PCI_CONFIG_1, 0x0},
- {OP_ZR_E1H, HC_REG_P1_PROD_CONS, 0x4a},
- {OP_WR_E1H, HC_REG_INT_MASK + 0x4, 0x1ffff},
- {OP_ZR_E1H, HC_REG_PBA_COMMAND + 0x8, 0x2},
- {OP_WR_E1H, HC_REG_CONFIG_1, 0x1a80},
- {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x90, 0x24},
- {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
- {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
- {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
-#define HC_FUNC5_END 1990
-#define HC_FUNC6_START 1990
- {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080},
- {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x6},
- {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10},
- {OP_WR_E1H, HC_REG_ATTN_IDX, 0x0},
- {OP_ZR_E1H, HC_REG_ATTN_BIT, 0x2},
- {OP_WR_E1H, HC_REG_VQID_0, 0x2b5},
- {OP_WR_E1H, HC_REG_PCI_CONFIG_0, 0x0},
- {OP_ZR_E1H, HC_REG_P0_PROD_CONS, 0x4a},
- {OP_WR_E1H, HC_REG_INT_MASK, 0x1ffff},
- {OP_ZR_E1H, HC_REG_PBA_COMMAND, 0x2},
- {OP_WR_E1H, HC_REG_CONFIG_0, 0x1a80},
- {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS, 0x24},
- {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
- {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
- {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
-#define HC_FUNC6_END 2005
-#define HC_FUNC7_START 2005
- {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080},
- {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x7},
- {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10},
- {OP_WR_E1H, HC_REG_ATTN_IDX + 0x4, 0x0},
- {OP_ZR_E1H, HC_REG_ATTN_BIT + 0x8, 0x2},
- {OP_WR_E1H, HC_REG_VQID_1, 0x2b5},
- {OP_WR_E1H, HC_REG_PCI_CONFIG_1, 0x0},
- {OP_ZR_E1H, HC_REG_P1_PROD_CONS, 0x4a},
- {OP_WR_E1H, HC_REG_INT_MASK + 0x4, 0x1ffff},
- {OP_ZR_E1H, HC_REG_PBA_COMMAND + 0x8, 0x2},
- {OP_WR_E1H, HC_REG_CONFIG_1, 0x1a80},
- {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x90, 0x24},
- {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
- {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
- {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
-#define HC_FUNC7_END 2020
-#define PXP2_COMMON_START 2020
- {OP_WR_E1H, PXP2_REG_RQ_DRAM_ALIGN, 0x1},
- {OP_WR, PXP2_REG_PGL_CONTROL0, 0xe38340},
- {OP_WR, PXP2_REG_PGL_CONTROL1, 0x3c10},
- {OP_WR_E1H, PXP2_REG_RQ_ELT_DISABLE, 0x1},
- {OP_WR_E1H, PXP2_REG_WR_REV_MODE, 0x0},
- {OP_WR, PXP2_REG_PGL_INT_TSDM_0, 0xffffffff},
- {OP_WR, PXP2_REG_PGL_INT_TSDM_1, 0xffffffff},
- {OP_WR, PXP2_REG_PGL_INT_TSDM_2, 0xffffffff},
- {OP_WR, PXP2_REG_PGL_INT_TSDM_3, 0xffffffff},
- {OP_WR, PXP2_REG_PGL_INT_TSDM_4, 0xffffffff},
- {OP_WR, PXP2_REG_PGL_INT_TSDM_5, 0xffffffff},
- {OP_WR, PXP2_REG_PGL_INT_TSDM_6, 0xffffffff},
- {OP_WR, PXP2_REG_PGL_INT_TSDM_7, 0xffffffff},
- {OP_WR_E1, PXP2_REG_PGL_INT_USDM_1, 0xffffffff},
- {OP_WR, PXP2_REG_PGL_INT_USDM_2, 0xffffffff},
- {OP_WR, PXP2_REG_PGL_INT_USDM_3, 0xffffffff},
- {OP_WR, PXP2_REG_PGL_INT_USDM_4, 0xffffffff},
- {OP_WR, PXP2_REG_PGL_INT_USDM_5, 0xffffffff},
- {OP_WR, PXP2_REG_PGL_INT_USDM_6, 0xffffffff},
- {OP_WR, PXP2_REG_PGL_INT_USDM_7, 0xffffffff},
- {OP_WR_E1H, PXP2_REG_PGL_INT_XSDM_1, 0xffffffff},
- {OP_WR, PXP2_REG_PGL_INT_XSDM_2, 0xffffffff},
- {OP_WR, PXP2_REG_PGL_INT_XSDM_3, 0xffffffff},
- {OP_WR, PXP2_REG_PGL_INT_XSDM_4, 0xffffffff},
- {OP_WR, PXP2_REG_PGL_INT_XSDM_5, 0xffffffff},
- {OP_WR, PXP2_REG_PGL_INT_XSDM_6, 0xffffffff},
- {OP_WR, PXP2_REG_PGL_INT_XSDM_7, 0xffffffff},
- {OP_WR, PXP2_REG_PGL_INT_CSDM_0, 0xffffffff},
- {OP_WR, PXP2_REG_PGL_INT_CSDM_1, 0xffffffff},
- {OP_WR, PXP2_REG_PGL_INT_CSDM_2, 0xffffffff},
- {OP_WR, PXP2_REG_PGL_INT_CSDM_3, 0xffffffff},
- {OP_WR, PXP2_REG_PGL_INT_CSDM_4, 0xffffffff},
- {OP_WR, PXP2_REG_PGL_INT_CSDM_5, 0xffffffff},
- {OP_WR, PXP2_REG_PGL_INT_CSDM_6, 0xffffffff},
- {OP_WR, PXP2_REG_PGL_INT_CSDM_7, 0xffffffff},
- {OP_WR_E1, PXP2_REG_PGL_INT_XSDM_0, 0xffff3330},
- {OP_WR_E1H, PXP2_REG_PGL_INT_XSDM_0, 0xff802000},
- {OP_WR_E1, PXP2_REG_PGL_INT_XSDM_1, 0xffff3340},
- {OP_WR_E1H, PXP2_REG_PGL_INT_USDM_0, 0xf0005000},
- {OP_WR_E1, PXP2_REG_PGL_INT_USDM_0, 0xf0003000},
- {OP_WR_E1H, PXP2_REG_PGL_INT_USDM_1, 0xf0008000},
- {OP_WR, PXP2_REG_RD_MAX_BLKS_VQ6, 0x8},
- {OP_WR, PXP2_REG_RD_MAX_BLKS_VQ9, 0x8},
- {OP_WR, PXP2_REG_RD_MAX_BLKS_VQ10, 0x8},
- {OP_WR, PXP2_REG_RD_MAX_BLKS_VQ11, 0x2},
- {OP_WR, PXP2_REG_RD_MAX_BLKS_VQ17, 0x4},
- {OP_WR, PXP2_REG_RD_MAX_BLKS_VQ18, 0x5},
- {OP_WR, PXP2_REG_RD_MAX_BLKS_VQ19, 0x4},
- {OP_WR, PXP2_REG_RD_MAX_BLKS_VQ22, 0x0},
- {OP_WR, PXP2_REG_RD_START_INIT, 0x1},
- {OP_WR, PXP2_REG_WR_DMAE_TH, 0x3f},
- {OP_WR, PXP2_REG_RQ_BW_RD_ADD0, 0x40},
- {OP_WR, PXP2_REG_PSWRQ_BW_ADD1, 0x1808},
- {OP_WR, PXP2_REG_PSWRQ_BW_ADD2, 0x803},
- {OP_WR, PXP2_REG_PSWRQ_BW_ADD3, 0x803},
- {OP_WR, PXP2_REG_RQ_BW_RD_ADD4, 0x40},
- {OP_WR, PXP2_REG_RQ_BW_RD_ADD5, 0x3},
- {OP_WR, PXP2_REG_PSWRQ_BW_ADD6, 0x803},
- {OP_WR, PXP2_REG_PSWRQ_BW_ADD7, 0x803},
- {OP_WR, PXP2_REG_PSWRQ_BW_ADD8, 0x803},
- {OP_WR, PXP2_REG_PSWRQ_BW_ADD9, 0x10003},
- {OP_WR, PXP2_REG_PSWRQ_BW_ADD10, 0x803},
- {OP_WR, PXP2_REG_PSWRQ_BW_ADD11, 0x803},
- {OP_WR, PXP2_REG_RQ_BW_RD_ADD12, 0x3},
- {OP_WR, PXP2_REG_RQ_BW_RD_ADD13, 0x3},
- {OP_WR, PXP2_REG_RQ_BW_RD_ADD14, 0x3},
- {OP_WR, PXP2_REG_RQ_BW_RD_ADD15, 0x3},
- {OP_WR, PXP2_REG_RQ_BW_RD_ADD16, 0x3},
- {OP_WR, PXP2_REG_RQ_BW_RD_ADD17, 0x3},
- {OP_WR, PXP2_REG_RQ_BW_RD_ADD18, 0x3},
- {OP_WR, PXP2_REG_RQ_BW_RD_ADD19, 0x3},
- {OP_WR, PXP2_REG_RQ_BW_RD_ADD20, 0x3},
- {OP_WR, PXP2_REG_RQ_BW_RD_ADD22, 0x3},
- {OP_WR, PXP2_REG_RQ_BW_RD_ADD23, 0x3},
- {OP_WR, PXP2_REG_RQ_BW_RD_ADD24, 0x3},
- {OP_WR, PXP2_REG_RQ_BW_RD_ADD25, 0x3},
- {OP_WR, PXP2_REG_RQ_BW_RD_ADD26, 0x3},
- {OP_WR, PXP2_REG_RQ_BW_RD_ADD27, 0x3},
- {OP_WR, PXP2_REG_PSWRQ_BW_ADD28, 0x2403},
- {OP_WR, PXP2_REG_RQ_BW_WR_ADD29, 0x2f},
- {OP_WR, PXP2_REG_RQ_BW_WR_ADD30, 0x9},
- {OP_WR, PXP2_REG_RQ_BW_RD_UBOUND0, 0x19},
- {OP_WR, PXP2_REG_PSWRQ_BW_UB1, 0x184},
- {OP_WR, PXP2_REG_PSWRQ_BW_UB2, 0x183},
- {OP_WR, PXP2_REG_PSWRQ_BW_UB3, 0x306},
- {OP_WR, PXP2_REG_RQ_BW_RD_UBOUND4, 0x19},
- {OP_WR, PXP2_REG_RQ_BW_RD_UBOUND5, 0x6},
- {OP_WR, PXP2_REG_PSWRQ_BW_UB6, 0x306},
- {OP_WR, PXP2_REG_PSWRQ_BW_UB7, 0x306},
- {OP_WR, PXP2_REG_PSWRQ_BW_UB8, 0x306},
- {OP_WR, PXP2_REG_PSWRQ_BW_UB9, 0xc86},
- {OP_WR, PXP2_REG_PSWRQ_BW_UB10, 0x306},
- {OP_WR, PXP2_REG_PSWRQ_BW_UB11, 0x306},
- {OP_WR, PXP2_REG_RQ_BW_RD_UBOUND12, 0x6},
- {OP_WR, PXP2_REG_RQ_BW_RD_UBOUND13, 0x6},
- {OP_WR, PXP2_REG_RQ_BW_RD_UBOUND14, 0x6},
- {OP_WR, PXP2_REG_RQ_BW_RD_UBOUND15, 0x6},
- {OP_WR, PXP2_REG_RQ_BW_RD_UBOUND16, 0x6},
- {OP_WR, PXP2_REG_RQ_BW_RD_UBOUND17, 0x6},
- {OP_WR, PXP2_REG_RQ_BW_RD_UBOUND18, 0x6},
- {OP_WR, PXP2_REG_RQ_BW_RD_UBOUND19, 0x6},
- {OP_WR, PXP2_REG_RQ_BW_RD_UBOUND20, 0x6},
- {OP_WR, PXP2_REG_RQ_BW_RD_UBOUND22, 0x6},
- {OP_WR, PXP2_REG_RQ_BW_RD_UBOUND23, 0x6},
- {OP_WR, PXP2_REG_RQ_BW_RD_UBOUND24, 0x6},
- {OP_WR, PXP2_REG_RQ_BW_RD_UBOUND25, 0x6},
- {OP_WR, PXP2_REG_RQ_BW_RD_UBOUND26, 0x6},
- {OP_WR, PXP2_REG_RQ_BW_RD_UBOUND27, 0x6},
- {OP_WR, PXP2_REG_PSWRQ_BW_UB28, 0x306},
- {OP_WR, PXP2_REG_RQ_BW_WR_UBOUND29, 0x13},
- {OP_WR, PXP2_REG_RQ_BW_WR_UBOUND30, 0x6},
- {OP_WR, PXP2_REG_PSWRQ_BW_L1, 0x1004},
- {OP_WR, PXP2_REG_PSWRQ_BW_L2, 0x1004},
- {OP_WR, PXP2_REG_PSWRQ_BW_RD, 0x106440},
- {OP_WR, PXP2_REG_PSWRQ_BW_WR, 0x106440},
- {OP_WR_E1H, PXP2_REG_RQ_ILT_MODE, 0x1},
- {OP_WR, PXP2_REG_RQ_RBC_DONE, 0x1},
-#define PXP2_COMMON_END 2137
-#define MISC_AEU_COMMON_START 2137
- {OP_ZR, MISC_REG_AEU_GENERAL_ATTN_0, 0x16},
- {OP_WR_E1H, MISC_REG_AEU_ENABLE1_NIG_0, 0x55540000},
- {OP_WR_E1H, MISC_REG_AEU_ENABLE2_NIG_0, 0x55555555},
- {OP_WR_E1H, MISC_REG_AEU_ENABLE3_NIG_0, 0x5555},
- {OP_WR_E1H, MISC_REG_AEU_ENABLE4_NIG_0, 0xf0000000},
- {OP_WR_E1H, MISC_REG_AEU_ENABLE1_PXP_0, 0x55540000},
- {OP_WR_E1H, MISC_REG_AEU_ENABLE2_PXP_0, 0x55555555},
- {OP_WR_E1H, MISC_REG_AEU_ENABLE3_PXP_0, 0x5555},
- {OP_WR_E1H, MISC_REG_AEU_ENABLE4_PXP_0, 0xf0000000},
- {OP_WR_E1H, MISC_REG_AEU_ENABLE1_NIG_1, 0x55540000},
- {OP_WR_E1H, MISC_REG_AEU_ENABLE2_NIG_1, 0x55555555},
- {OP_WR_E1H, MISC_REG_AEU_ENABLE3_NIG_1, 0x5555},
- {OP_WR_E1H, MISC_REG_AEU_ENABLE4_NIG_1, 0xf0000000},
- {OP_WR_E1H, MISC_REG_AEU_ENABLE1_PXP_1, 0x0},
- {OP_WR_E1H, MISC_REG_AEU_ENABLE2_PXP_1, 0x10000},
- {OP_WR_E1H, MISC_REG_AEU_ENABLE3_PXP_1, 0x5014},
- {OP_WR_E1H, MISC_REG_AEU_ENABLE4_PXP_1, 0x0},
- {OP_WR_E1H, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0xc00},
- {OP_WR_E1H, MISC_REG_AEU_GENERAL_MASK, 0x3},
-#define MISC_AEU_COMMON_END 2156
-#define MISC_AEU_PORT0_START 2156
- {OP_WR_E1, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, 0xbf5c0000},
- {OP_WR_E1H, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, 0xff5c0000},
- {OP_WR_E1, MISC_REG_AEU_ENABLE2_FUNC_0_OUT_0, 0xfff51fef},
- {OP_WR_E1H, MISC_REG_AEU_ENABLE2_FUNC_0_OUT_0, 0xfff55fff},
- {OP_WR, MISC_REG_AEU_ENABLE3_FUNC_0_OUT_0, 0xffff},
- {OP_WR_E1, MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0, 0x500003e0},
- {OP_WR_E1H, MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0, 0xf00003e0},
- {OP_WR, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1, 0x0},
- {OP_WR, MISC_REG_AEU_ENABLE2_FUNC_0_OUT_1, 0xa000},
- {OP_ZR, MISC_REG_AEU_ENABLE3_FUNC_0_OUT_1, 0x5},
- {OP_WR, MISC_REG_AEU_ENABLE4_FUNC_0_OUT_2, 0xfe00000},
- {OP_ZR_E1, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_3, 0x14},
- {OP_ZR_E1H, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_3, 0x7},
- {OP_WR_E1, MISC_REG_AEU_ENABLE1_NIG_0, 0x55540000},
- {OP_WR_E1H, MISC_REG_AEU_ENABLE4_FUNC_0_OUT_4, 0x400},
- {OP_WR_E1, MISC_REG_AEU_ENABLE2_NIG_0, 0x55555555},
- {OP_ZR_E1H, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_5, 0x3},
- {OP_WR_E1, MISC_REG_AEU_ENABLE3_NIG_0, 0x5555},
- {OP_WR_E1H, MISC_REG_AEU_ENABLE4_FUNC_0_OUT_5, 0x1000},
- {OP_WR_E1, MISC_REG_AEU_ENABLE4_NIG_0, 0x0},
- {OP_ZR_E1H, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_6, 0x3},
- {OP_WR_E1, MISC_REG_AEU_ENABLE1_PXP_0, 0x55540000},
- {OP_WR_E1H, MISC_REG_AEU_ENABLE4_FUNC_0_OUT_6, 0x4000},
- {OP_WR_E1, MISC_REG_AEU_ENABLE2_PXP_0, 0x55555555},
- {OP_ZR_E1H, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_7, 0x3},
- {OP_WR_E1, MISC_REG_AEU_ENABLE3_PXP_0, 0x5555},
- {OP_WR_E1H, MISC_REG_AEU_ENABLE4_FUNC_0_OUT_7, 0x10000},
- {OP_WR_E1, MISC_REG_AEU_ENABLE4_PXP_0, 0x0},
- {OP_ZR_E1H, MISC_REG_AEU_INVERTER_1_FUNC_0, 0x4},
- {OP_WR_E1, MISC_REG_AEU_INVERTER_1_FUNC_0, 0x0},
- {OP_ZR_E1, MISC_REG_AEU_INVERTER_2_FUNC_0, 0x3},
- {OP_WR_E1, MISC_REG_AEU_MASK_ATTN_FUNC_0, 0x7},
-#define MISC_AEU_PORT0_END 2188
-#define MISC_AEU_PORT1_START 2188
- {OP_WR_E1, MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0, 0xbf5c0000},
- {OP_WR_E1H, MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0, 0xff5c0000},
- {OP_WR_E1, MISC_REG_AEU_ENABLE2_FUNC_1_OUT_0, 0xfff51fef},
- {OP_WR_E1H, MISC_REG_AEU_ENABLE2_FUNC_1_OUT_0, 0xfff55fff},
- {OP_WR, MISC_REG_AEU_ENABLE3_FUNC_1_OUT_0, 0xffff},
- {OP_WR_E1, MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0, 0x500003e0},
- {OP_WR_E1H, MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0, 0xf00003e0},
- {OP_WR, MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1, 0x0},
- {OP_WR, MISC_REG_AEU_ENABLE2_FUNC_1_OUT_1, 0xa000},
- {OP_ZR, MISC_REG_AEU_ENABLE3_FUNC_1_OUT_1, 0x5},
- {OP_WR, MISC_REG_AEU_ENABLE4_FUNC_1_OUT_2, 0xfe00000},
- {OP_ZR_E1, MISC_REG_AEU_ENABLE1_FUNC_1_OUT_3, 0x14},
- {OP_ZR_E1H, MISC_REG_AEU_ENABLE1_FUNC_1_OUT_3, 0x7},
- {OP_WR_E1, MISC_REG_AEU_ENABLE1_NIG_1, 0x55540000},
- {OP_WR_E1H, MISC_REG_AEU_ENABLE4_FUNC_1_OUT_4, 0x800},
- {OP_WR_E1, MISC_REG_AEU_ENABLE2_NIG_1, 0x55555555},
- {OP_ZR_E1H, MISC_REG_AEU_ENABLE1_FUNC_1_OUT_5, 0x3},
- {OP_WR_E1, MISC_REG_AEU_ENABLE3_NIG_1, 0x5555},
- {OP_WR_E1H, MISC_REG_AEU_ENABLE4_FUNC_1_OUT_5, 0x2000},
- {OP_WR_E1, MISC_REG_AEU_ENABLE4_NIG_1, 0x0},
- {OP_ZR_E1H, MISC_REG_AEU_ENABLE1_FUNC_1_OUT_6, 0x3},
- {OP_WR_E1, MISC_REG_AEU_ENABLE1_PXP_1, 0x55540000},
- {OP_WR_E1H, MISC_REG_AEU_ENABLE4_FUNC_1_OUT_6, 0x8000},
- {OP_WR_E1, MISC_REG_AEU_ENABLE2_PXP_1, 0x55555555},
- {OP_ZR_E1H, MISC_REG_AEU_ENABLE1_FUNC_1_OUT_7, 0x3},
- {OP_WR_E1, MISC_REG_AEU_ENABLE3_PXP_1, 0x5555},
- {OP_WR_E1H, MISC_REG_AEU_ENABLE4_FUNC_1_OUT_7, 0x20000},
- {OP_WR_E1, MISC_REG_AEU_ENABLE4_PXP_1, 0x0},
- {OP_ZR_E1H, MISC_REG_AEU_INVERTER_1_FUNC_1, 0x4},
- {OP_WR_E1, MISC_REG_AEU_INVERTER_1_FUNC_1, 0x0},
- {OP_ZR_E1, MISC_REG_AEU_INVERTER_2_FUNC_1, 0x3},
- {OP_WR_E1, MISC_REG_AEU_MASK_ATTN_FUNC_1, 0x7},
-#define MISC_AEU_PORT1_END 2220
-
-};
-
-static const u32 init_data_e1[] = {
- 0x00010000, 0x000204c0, 0x00030980, 0x00040e40, 0x00051300, 0x000617c0,
- 0x00071c80, 0x00082140, 0x00092600, 0x000a2ac0, 0x000b2f80, 0x000c3440,
- 0x000d3900, 0x000e3dc0, 0x000f4280, 0x00104740, 0x00114c00, 0x001250c0,
- 0x00135580, 0x00145a40, 0x00155f00, 0x001663c0, 0x00176880, 0x00186d40,
- 0x00197200, 0x001a76c0, 0x001b7b80, 0x001c8040, 0x001d8500, 0x001e89c0,
- 0x001f8e80, 0x00209340, 0x00002000, 0x00004000, 0x00006000, 0x00008000,
- 0x0000a000, 0x0000c000, 0x0000e000, 0x00010000, 0x00012000, 0x00014000,
- 0x00016000, 0x00018000, 0x0001a000, 0x0001c000, 0x0001e000, 0x00020000,
- 0x00022000, 0x00024000, 0x00026000, 0x00028000, 0x0002a000, 0x0002c000,
- 0x0002e000, 0x00030000, 0x00032000, 0x00034000, 0x00036000, 0x00038000,
- 0x0003a000, 0x0003c000, 0x0003e000, 0x00040000, 0x00042000, 0x00044000,
- 0x00046000, 0x00048000, 0x0004a000, 0x0004c000, 0x0004e000, 0x00050000,
- 0x00052000, 0x00054000, 0x00056000, 0x00058000, 0x0005a000, 0x0005c000,
- 0x0005e000, 0x00060000, 0x00062000, 0x00064000, 0x00066000, 0x00068000,
- 0x0006a000, 0x0006c000, 0x0006e000, 0x00070000, 0x00072000, 0x00074000,
- 0x00076000, 0x00078000, 0x0007a000, 0x0007c000, 0x0007e000, 0x00080000,
- 0x00082000, 0x00084000, 0x00086000, 0x00088000, 0x0008a000, 0x0008c000,
- 0x0008e000, 0x00090000, 0x00092000, 0x00094000, 0x00096000, 0x00098000,
- 0x0009a000, 0x0009c000, 0x0009e000, 0x000a0000, 0x000a2000, 0x000a4000,
- 0x000a6000, 0x000a8000, 0x000aa000, 0x000ac000, 0x000ae000, 0x000b0000,
- 0x000b2000, 0x000b4000, 0x000b6000, 0x000b8000, 0x000ba000, 0x000bc000,
- 0x000be000, 0x000c0000, 0x000c2000, 0x000c4000, 0x000c6000, 0x000c8000,
- 0x000ca000, 0x000cc000, 0x000ce000, 0x000d0000, 0x000d2000, 0x000d4000,
- 0x000d6000, 0x000d8000, 0x000da000, 0x000dc000, 0x000de000, 0x000e0000,
- 0x000e2000, 0x000e4000, 0x000e6000, 0x000e8000, 0x000ea000, 0x000ec000,
- 0x000ee000, 0x000f0000, 0x000f2000, 0x000f4000, 0x000f6000, 0x000f8000,
- 0x000fa000, 0x000fc000, 0x000fe000, 0x00100000, 0x00102000, 0x00104000,
- 0x00106000, 0x00108000, 0x0010a000, 0x0010c000, 0x0010e000, 0x00110000,
- 0x00112000, 0x00114000, 0x00116000, 0x00118000, 0x0011a000, 0x0011c000,
- 0x0011e000, 0x00120000, 0x00122000, 0x00124000, 0x00126000, 0x00128000,
- 0x0012a000, 0x0012c000, 0x0012e000, 0x00130000, 0x00132000, 0x00134000,
- 0x00136000, 0x00138000, 0x0013a000, 0x0013c000, 0x0013e000, 0x00140000,
- 0x00142000, 0x00144000, 0x00146000, 0x00148000, 0x0014a000, 0x0014c000,
- 0x0014e000, 0x00150000, 0x00152000, 0x00154000, 0x00156000, 0x00158000,
- 0x0015a000, 0x0015c000, 0x0015e000, 0x00160000, 0x00162000, 0x00164000,
- 0x00166000, 0x00168000, 0x0016a000, 0x0016c000, 0x0016e000, 0x00170000,
- 0x00172000, 0x00174000, 0x00176000, 0x00178000, 0x0017a000, 0x0017c000,
- 0x0017e000, 0x00180000, 0x00182000, 0x00184000, 0x00186000, 0x00188000,
- 0x0018a000, 0x0018c000, 0x0018e000, 0x00190000, 0x00192000, 0x00194000,
- 0x00196000, 0x00198000, 0x0019a000, 0x0019c000, 0x0019e000, 0x001a0000,
- 0x001a2000, 0x001a4000, 0x001a6000, 0x001a8000, 0x001aa000, 0x001ac000,
- 0x001ae000, 0x001b0000, 0x001b2000, 0x001b4000, 0x001b6000, 0x001b8000,
- 0x001ba000, 0x001bc000, 0x001be000, 0x001c0000, 0x001c2000, 0x001c4000,
- 0x001c6000, 0x001c8000, 0x001ca000, 0x001cc000, 0x001ce000, 0x001d0000,
- 0x001d2000, 0x001d4000, 0x001d6000, 0x001d8000, 0x001da000, 0x001dc000,
- 0x001de000, 0x001e0000, 0x001e2000, 0x001e4000, 0x001e6000, 0x001e8000,
- 0x001ea000, 0x001ec000, 0x001ee000, 0x001f0000, 0x001f2000, 0x001f4000,
- 0x001f6000, 0x001f8000, 0x001fa000, 0x001fc000, 0x001fe000, 0x00200000,
- 0x00202000, 0x00204000, 0x00206000, 0x00208000, 0x0020a000, 0x0020c000,
- 0x0020e000, 0x00210000, 0x00212000, 0x00214000, 0x00216000, 0x00218000,
- 0x0021a000, 0x0021c000, 0x0021e000, 0x00220000, 0x00222000, 0x00224000,
- 0x00226000, 0x00228000, 0x0022a000, 0x0022c000, 0x0022e000, 0x00230000,
- 0x00232000, 0x00234000, 0x00236000, 0x00238000, 0x0023a000, 0x0023c000,
- 0x0023e000, 0x00240000, 0x00242000, 0x00244000, 0x00246000, 0x00248000,
- 0x0024a000, 0x0024c000, 0x0024e000, 0x00250000, 0x00252000, 0x00254000,
- 0x00256000, 0x00258000, 0x0025a000, 0x0025c000, 0x0025e000, 0x00260000,
- 0x00262000, 0x00264000, 0x00266000, 0x00268000, 0x0026a000, 0x0026c000,
- 0x0026e000, 0x00270000, 0x00272000, 0x00274000, 0x00276000, 0x00278000,
- 0x0027a000, 0x0027c000, 0x0027e000, 0x00280000, 0x00282000, 0x00284000,
- 0x00286000, 0x00288000, 0x0028a000, 0x0028c000, 0x0028e000, 0x00290000,
- 0x00292000, 0x00294000, 0x00296000, 0x00298000, 0x0029a000, 0x0029c000,
- 0x0029e000, 0x002a0000, 0x002a2000, 0x002a4000, 0x002a6000, 0x002a8000,
- 0x002aa000, 0x002ac000, 0x002ae000, 0x002b0000, 0x002b2000, 0x002b4000,
- 0x002b6000, 0x002b8000, 0x002ba000, 0x002bc000, 0x002be000, 0x002c0000,
- 0x002c2000, 0x002c4000, 0x002c6000, 0x002c8000, 0x002ca000, 0x002cc000,
- 0x002ce000, 0x002d0000, 0x002d2000, 0x002d4000, 0x002d6000, 0x002d8000,
- 0x002da000, 0x002dc000, 0x002de000, 0x002e0000, 0x002e2000, 0x002e4000,
- 0x002e6000, 0x002e8000, 0x002ea000, 0x002ec000, 0x002ee000, 0x002f0000,
- 0x002f2000, 0x002f4000, 0x002f6000, 0x002f8000, 0x002fa000, 0x002fc000,
- 0x002fe000, 0x00300000, 0x00302000, 0x00304000, 0x00306000, 0x00308000,
- 0x0030a000, 0x0030c000, 0x0030e000, 0x00310000, 0x00312000, 0x00314000,
- 0x00316000, 0x00318000, 0x0031a000, 0x0031c000, 0x0031e000, 0x00320000,
- 0x00322000, 0x00324000, 0x00326000, 0x00328000, 0x0032a000, 0x0032c000,
- 0x0032e000, 0x00330000, 0x00332000, 0x00334000, 0x00336000, 0x00338000,
- 0x0033a000, 0x0033c000, 0x0033e000, 0x00340000, 0x00342000, 0x00344000,
- 0x00346000, 0x00348000, 0x0034a000, 0x0034c000, 0x0034e000, 0x00350000,
- 0x00352000, 0x00354000, 0x00356000, 0x00358000, 0x0035a000, 0x0035c000,
- 0x0035e000, 0x00360000, 0x00362000, 0x00364000, 0x00366000, 0x00368000,
- 0x0036a000, 0x0036c000, 0x0036e000, 0x00370000, 0x00372000, 0x00374000,
- 0x00376000, 0x00378000, 0x0037a000, 0x0037c000, 0x0037e000, 0x00380000,
- 0x00382000, 0x00384000, 0x00386000, 0x00388000, 0x0038a000, 0x0038c000,
- 0x0038e000, 0x00390000, 0x00392000, 0x00394000, 0x00396000, 0x00398000,
- 0x0039a000, 0x0039c000, 0x0039e000, 0x003a0000, 0x003a2000, 0x003a4000,
- 0x003a6000, 0x003a8000, 0x003aa000, 0x003ac000, 0x003ae000, 0x003b0000,
- 0x003b2000, 0x003b4000, 0x003b6000, 0x003b8000, 0x003ba000, 0x003bc000,
- 0x003be000, 0x003c0000, 0x003c2000, 0x003c4000, 0x003c6000, 0x003c8000,
- 0x003ca000, 0x003cc000, 0x003ce000, 0x003d0000, 0x003d2000, 0x003d4000,
- 0x003d6000, 0x003d8000, 0x003da000, 0x003dc000, 0x003de000, 0x003e0000,
- 0x003e2000, 0x003e4000, 0x003e6000, 0x003e8000, 0x003ea000, 0x003ec000,
- 0x003ee000, 0x003f0000, 0x003f2000, 0x003f4000, 0x003f6000, 0x003f8000,
- 0x003fa000, 0x003fc000, 0x003fe000, 0x003fe001, 0x00000000, 0x000001ff,
- 0x00000200, 0x00000001, 0x00000003, 0x00bebc20, 0x00000003, 0x00bebc20,
- 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
- 0xffffffff, 0xffffffff, 0x00000000, 0x00007ff8, 0x00000000, 0x00003500,
- 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x00000003,
- 0x00bebc20, 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
- 0x00000003, 0x00bebc20, 0x00002000, 0x000040c0, 0x00006180, 0x00008240,
- 0x0000a300, 0x0000c3c0, 0x0000e480, 0x00010540, 0x00012600, 0x000146c0,
- 0x00016780, 0x00018840, 0x0001a900, 0x0001c9c0, 0x0001ea80, 0x00020b40,
- 0x00022c00, 0x00024cc0, 0x00026d80, 0x00028e40, 0x0002af00, 0x0002cfc0,
- 0x0002f080, 0x00031140, 0x00033200, 0x000352c0, 0x00037380, 0x00039440,
- 0x0003b500, 0x0003d5c0, 0x0003f680, 0x00041740, 0x00043800, 0x000458c0,
- 0x00047980, 0x00049a40, 0x00008000, 0x00010380, 0x00018700, 0x00020a80,
- 0x00028e00, 0x00031180, 0x00039500, 0x00041880, 0x00049c00, 0x00051f80,
- 0x0005a300, 0x00062680, 0x0006aa00, 0x00072d80, 0x0007b100, 0x00083480,
- 0x0008b800, 0x00093b80, 0x0009bf00, 0x000a4280, 0x000ac600, 0x000b4980,
- 0x000bcd00, 0x000c5080, 0x000cd400, 0x000d5780, 0x000ddb00, 0x00001900,
- 0x00000000, 0xffffffff, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x00000000, 0x00007ff8,
- 0x00000000, 0x00001500, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
- 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x40000000, 0x40000000,
- 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x00000000, 0x00007ff8, 0x00000000, 0x00001500, 0x00001000, 0x00002080,
- 0x00003100, 0x00004180, 0x00005200, 0x00006280, 0x00007300, 0x00008380,
- 0x00009400, 0x0000a480, 0x0000b500, 0x0000c580, 0x0000d600, 0x0000e680,
- 0x0000f700, 0x00010780, 0x00011800, 0x00012880, 0x00013900, 0x00014980,
- 0x00015a00, 0x00016a80, 0x00017b00, 0x00018b80, 0x00019c00, 0x0001ac80,
- 0x0001bd00, 0x0001cd80, 0x0001de00, 0x0001ee80, 0x0001ff00, 0x10000000,
- 0x000028ad, 0x00000000, 0x00010001, 0x00350804, 0xccccccc1, 0xffffffff,
- 0xffffffff, 0x7058103c, 0x00000000, 0xcccc0201, 0xcccccccc, 0x00000000,
- 0xffffffff, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x40000000, 0x40000000, 0x40000000, 0x00000000, 0x00007ff8, 0x00000000,
- 0x00003500, 0x000e01b7, 0x011600d6, 0x0000ffff, 0x00000000, 0x0000ffff,
- 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
- 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
- 0x00000000, 0x00100000, 0x00000000, 0x007201bb, 0x012300f3, 0x0000ffff,
- 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
- 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
- 0x00000000, 0x0000ffff, 0x00000000, 0x00100000, 0x00000000, 0xfffffff3,
- 0x320fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c,
- 0xcdcdcdcd, 0xfffffff1, 0x30efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
- 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406,
- 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c,
- 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
- 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xfffffff7,
- 0x31efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0020cf3c,
- 0xcdcdcdcd, 0xfffffff5, 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
- 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3, 0x310fffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1,
- 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c,
- 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
- 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305,
- 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2,
- 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c,
- 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
- 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xfffffff7, 0x30efffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5,
- 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c,
- 0xcdcdcdcd, 0xfffffff3, 0x31efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
- 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x310fffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6,
- 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c,
- 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014,
- 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa,
- 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c,
- 0xcdcdcdcd, 0xffffff97, 0x056fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000,
- 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x310fffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3,
- 0x320fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c,
- 0xcdcdcdcd, 0xfffffff1, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
- 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406,
- 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c,
- 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
- 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xffffff8a, 0x042fffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97,
- 0x05cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0020cf3c,
- 0xcdcdcdcd, 0xfffffff5, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
- 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3, 0x300fffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1,
- 0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c,
- 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
- 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305,
- 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2,
- 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c,
- 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
- 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97, 0x040fffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5,
- 0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c,
- 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
- 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xffffffff,
- 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0002cf3c,
- 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
- 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xffffffff,
- 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0010cf3c,
- 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
- 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xffffffff,
- 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0000cf3c,
- 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
- 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xffffffff,
- 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0004cf3c,
- 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
- 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffffff,
- 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0020cf3c,
- 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
- 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xffffffff,
- 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0001cf3c,
- 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
- 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xffffffff,
- 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0008cf3c,
- 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
- 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xffffffff,
- 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0040cf3c,
- 0xcdcdcdcd, 0x00100000, 0x00070100, 0x00028170, 0x000b8198, 0x00020250,
- 0x00010270, 0x000f0280, 0x00010370, 0x00080000, 0x00080080, 0x00028100,
- 0x000b8128, 0x000201e0, 0x00010200, 0x00070210, 0x00020280, 0x000f0000,
- 0x000800f0, 0x00028170, 0x000b8198, 0x00020250, 0x00010270, 0x000b8280,
- 0x00080338, 0x00100000, 0x00080100, 0x00028180, 0x000b81a8, 0x00020260,
- 0x00018280, 0x000e8298, 0x00080380, 0x00028000, 0x000b8028, 0x000200e0,
- 0x00010100, 0x00008110, 0x00000118, 0xcccccccc, 0xcccccccc, 0xcccccccc,
- 0xcccccccc, 0x00002000, 0xcccccccc, 0xcccccccc, 0xcccccccc, 0xcccccccc,
- 0x00002000, 0xcccccccc, 0xcccccccc, 0xcccccccc, 0xcccccccc, 0x00002000
-};
-
-static const u32 init_data_e1h[] = {
- 0x00010000, 0x000204c0, 0x00030980, 0x00040e40, 0x00051300, 0x000617c0,
- 0x00071c80, 0x00082140, 0x00092600, 0x000a2ac0, 0x000b2f80, 0x000c3440,
- 0x000d3900, 0x000e3dc0, 0x000f4280, 0x00104740, 0x00114c00, 0x001250c0,
- 0x00135580, 0x00145a40, 0x00155f00, 0x001663c0, 0x00176880, 0x00186d40,
- 0x00197200, 0x001a76c0, 0x001b7b80, 0x001c8040, 0x001d8500, 0x001e89c0,
- 0x001f8e80, 0x00209340, 0x00002000, 0x00004000, 0x00006000, 0x00008000,
- 0x0000a000, 0x0000c000, 0x0000e000, 0x00010000, 0x00012000, 0x00014000,
- 0x00016000, 0x00018000, 0x0001a000, 0x0001c000, 0x0001e000, 0x00020000,
- 0x00022000, 0x00024000, 0x00026000, 0x00028000, 0x0002a000, 0x0002c000,
- 0x0002e000, 0x00030000, 0x00032000, 0x00034000, 0x00036000, 0x00038000,
- 0x0003a000, 0x0003c000, 0x0003e000, 0x00040000, 0x00042000, 0x00044000,
- 0x00046000, 0x00048000, 0x0004a000, 0x0004c000, 0x0004e000, 0x00050000,
- 0x00052000, 0x00054000, 0x00056000, 0x00058000, 0x0005a000, 0x0005c000,
- 0x0005e000, 0x00060000, 0x00062000, 0x00064000, 0x00066000, 0x00068000,
- 0x0006a000, 0x0006c000, 0x0006e000, 0x00070000, 0x00072000, 0x00074000,
- 0x00076000, 0x00078000, 0x0007a000, 0x0007c000, 0x0007e000, 0x00080000,
- 0x00082000, 0x00084000, 0x00086000, 0x00088000, 0x0008a000, 0x0008c000,
- 0x0008e000, 0x00090000, 0x00092000, 0x00094000, 0x00096000, 0x00098000,
- 0x0009a000, 0x0009c000, 0x0009e000, 0x000a0000, 0x000a2000, 0x000a4000,
- 0x000a6000, 0x000a8000, 0x000aa000, 0x000ac000, 0x000ae000, 0x000b0000,
- 0x000b2000, 0x000b4000, 0x000b6000, 0x000b8000, 0x000ba000, 0x000bc000,
- 0x000be000, 0x000c0000, 0x000c2000, 0x000c4000, 0x000c6000, 0x000c8000,
- 0x000ca000, 0x000cc000, 0x000ce000, 0x000d0000, 0x000d2000, 0x000d4000,
- 0x000d6000, 0x000d8000, 0x000da000, 0x000dc000, 0x000de000, 0x000e0000,
- 0x000e2000, 0x000e4000, 0x000e6000, 0x000e8000, 0x000ea000, 0x000ec000,
- 0x000ee000, 0x000f0000, 0x000f2000, 0x000f4000, 0x000f6000, 0x000f8000,
- 0x000fa000, 0x000fc000, 0x000fe000, 0x00100000, 0x00102000, 0x00104000,
- 0x00106000, 0x00108000, 0x0010a000, 0x0010c000, 0x0010e000, 0x00110000,
- 0x00112000, 0x00114000, 0x00116000, 0x00118000, 0x0011a000, 0x0011c000,
- 0x0011e000, 0x00120000, 0x00122000, 0x00124000, 0x00126000, 0x00128000,
- 0x0012a000, 0x0012c000, 0x0012e000, 0x00130000, 0x00132000, 0x00134000,
- 0x00136000, 0x00138000, 0x0013a000, 0x0013c000, 0x0013e000, 0x00140000,
- 0x00142000, 0x00144000, 0x00146000, 0x00148000, 0x0014a000, 0x0014c000,
- 0x0014e000, 0x00150000, 0x00152000, 0x00154000, 0x00156000, 0x00158000,
- 0x0015a000, 0x0015c000, 0x0015e000, 0x00160000, 0x00162000, 0x00164000,
- 0x00166000, 0x00168000, 0x0016a000, 0x0016c000, 0x0016e000, 0x00170000,
- 0x00172000, 0x00174000, 0x00176000, 0x00178000, 0x0017a000, 0x0017c000,
- 0x0017e000, 0x00180000, 0x00182000, 0x00184000, 0x00186000, 0x00188000,
- 0x0018a000, 0x0018c000, 0x0018e000, 0x00190000, 0x00192000, 0x00194000,
- 0x00196000, 0x00198000, 0x0019a000, 0x0019c000, 0x0019e000, 0x001a0000,
- 0x001a2000, 0x001a4000, 0x001a6000, 0x001a8000, 0x001aa000, 0x001ac000,
- 0x001ae000, 0x001b0000, 0x001b2000, 0x001b4000, 0x001b6000, 0x001b8000,
- 0x001ba000, 0x001bc000, 0x001be000, 0x001c0000, 0x001c2000, 0x001c4000,
- 0x001c6000, 0x001c8000, 0x001ca000, 0x001cc000, 0x001ce000, 0x001d0000,
- 0x001d2000, 0x001d4000, 0x001d6000, 0x001d8000, 0x001da000, 0x001dc000,
- 0x001de000, 0x001e0000, 0x001e2000, 0x001e4000, 0x001e6000, 0x001e8000,
- 0x001ea000, 0x001ec000, 0x001ee000, 0x001f0000, 0x001f2000, 0x001f4000,
- 0x001f6000, 0x001f8000, 0x001fa000, 0x001fc000, 0x001fe000, 0x00200000,
- 0x00202000, 0x00204000, 0x00206000, 0x00208000, 0x0020a000, 0x0020c000,
- 0x0020e000, 0x00210000, 0x00212000, 0x00214000, 0x00216000, 0x00218000,
- 0x0021a000, 0x0021c000, 0x0021e000, 0x00220000, 0x00222000, 0x00224000,
- 0x00226000, 0x00228000, 0x0022a000, 0x0022c000, 0x0022e000, 0x00230000,
- 0x00232000, 0x00234000, 0x00236000, 0x00238000, 0x0023a000, 0x0023c000,
- 0x0023e000, 0x00240000, 0x00242000, 0x00244000, 0x00246000, 0x00248000,
- 0x0024a000, 0x0024c000, 0x0024e000, 0x00250000, 0x00252000, 0x00254000,
- 0x00256000, 0x00258000, 0x0025a000, 0x0025c000, 0x0025e000, 0x00260000,
- 0x00262000, 0x00264000, 0x00266000, 0x00268000, 0x0026a000, 0x0026c000,
- 0x0026e000, 0x00270000, 0x00272000, 0x00274000, 0x00276000, 0x00278000,
- 0x0027a000, 0x0027c000, 0x0027e000, 0x00280000, 0x00282000, 0x00284000,
- 0x00286000, 0x00288000, 0x0028a000, 0x0028c000, 0x0028e000, 0x00290000,
- 0x00292000, 0x00294000, 0x00296000, 0x00298000, 0x0029a000, 0x0029c000,
- 0x0029e000, 0x002a0000, 0x002a2000, 0x002a4000, 0x002a6000, 0x002a8000,
- 0x002aa000, 0x002ac000, 0x002ae000, 0x002b0000, 0x002b2000, 0x002b4000,
- 0x002b6000, 0x002b8000, 0x002ba000, 0x002bc000, 0x002be000, 0x002c0000,
- 0x002c2000, 0x002c4000, 0x002c6000, 0x002c8000, 0x002ca000, 0x002cc000,
- 0x002ce000, 0x002d0000, 0x002d2000, 0x002d4000, 0x002d6000, 0x002d8000,
- 0x002da000, 0x002dc000, 0x002de000, 0x002e0000, 0x002e2000, 0x002e4000,
- 0x002e6000, 0x002e8000, 0x002ea000, 0x002ec000, 0x002ee000, 0x002f0000,
- 0x002f2000, 0x002f4000, 0x002f6000, 0x002f8000, 0x002fa000, 0x002fc000,
- 0x002fe000, 0x00300000, 0x00302000, 0x00304000, 0x00306000, 0x00308000,
- 0x0030a000, 0x0030c000, 0x0030e000, 0x00310000, 0x00312000, 0x00314000,
- 0x00316000, 0x00318000, 0x0031a000, 0x0031c000, 0x0031e000, 0x00320000,
- 0x00322000, 0x00324000, 0x00326000, 0x00328000, 0x0032a000, 0x0032c000,
- 0x0032e000, 0x00330000, 0x00332000, 0x00334000, 0x00336000, 0x00338000,
- 0x0033a000, 0x0033c000, 0x0033e000, 0x00340000, 0x00342000, 0x00344000,
- 0x00346000, 0x00348000, 0x0034a000, 0x0034c000, 0x0034e000, 0x00350000,
- 0x00352000, 0x00354000, 0x00356000, 0x00358000, 0x0035a000, 0x0035c000,
- 0x0035e000, 0x00360000, 0x00362000, 0x00364000, 0x00366000, 0x00368000,
- 0x0036a000, 0x0036c000, 0x0036e000, 0x00370000, 0x00372000, 0x00374000,
- 0x00376000, 0x00378000, 0x0037a000, 0x0037c000, 0x0037e000, 0x00380000,
- 0x00382000, 0x00384000, 0x00386000, 0x00388000, 0x0038a000, 0x0038c000,
- 0x0038e000, 0x00390000, 0x00392000, 0x00394000, 0x00396000, 0x00398000,
- 0x0039a000, 0x0039c000, 0x0039e000, 0x003a0000, 0x003a2000, 0x003a4000,
- 0x003a6000, 0x003a8000, 0x003aa000, 0x003ac000, 0x003ae000, 0x003b0000,
- 0x003b2000, 0x003b4000, 0x003b6000, 0x003b8000, 0x003ba000, 0x003bc000,
- 0x003be000, 0x003c0000, 0x003c2000, 0x003c4000, 0x003c6000, 0x003c8000,
- 0x003ca000, 0x003cc000, 0x003ce000, 0x003d0000, 0x003d2000, 0x003d4000,
- 0x003d6000, 0x003d8000, 0x003da000, 0x003dc000, 0x003de000, 0x003e0000,
- 0x003e2000, 0x003e4000, 0x003e6000, 0x003e8000, 0x003ea000, 0x003ec000,
- 0x003ee000, 0x003f0000, 0x003f2000, 0x003f4000, 0x003f6000, 0x003f8000,
- 0x003fa000, 0x003fc000, 0x003fe000, 0x003fe001, 0x00000000, 0x000001ff,
- 0x00000200, 0x00000001, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
- 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
- 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
- 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
- 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
- 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000, 0x00007ff8,
- 0x00000000, 0x00003500, 0xffffffff, 0x00000000, 0xffffffff, 0x00000000,
- 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
- 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x00000003,
- 0x00bebc20, 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
- 0x00000003, 0x00bebc20, 0xffffffff, 0x00000000, 0xffffffff, 0x00000000,
- 0xffffffff, 0x00000003, 0x00bebc20, 0xffffffff, 0x00000000, 0xffffffff,
- 0x00000000, 0xffffffff, 0x00000003, 0x00bebc20, 0xffffffff, 0x00000000,
- 0xffffffff, 0x00000000, 0xffffffff, 0x00000003, 0x00bebc20, 0xffffffff,
- 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x00000003, 0x00bebc20,
- 0x00002000, 0x000040c0, 0x00006180, 0x00008240, 0x0000a300, 0x0000c3c0,
- 0x0000e480, 0x00010540, 0x00012600, 0x000146c0, 0x00016780, 0x00018840,
- 0x0001a900, 0x0001c9c0, 0x0001ea80, 0x00020b40, 0x00022c00, 0x00024cc0,
- 0x00026d80, 0x00028e40, 0x0002af00, 0x0002cfc0, 0x0002f080, 0x00031140,
- 0x00033200, 0x000352c0, 0x00037380, 0x00039440, 0x0003b500, 0x0003d5c0,
- 0x0003f680, 0x00041740, 0x00043800, 0x000458c0, 0x00047980, 0x00049a40,
- 0x00008000, 0x00010380, 0x00018700, 0x00020a80, 0x00028e00, 0x00031180,
- 0x00039500, 0x00041880, 0x00049c00, 0x00051f80, 0x0005a300, 0x00062680,
- 0x0006aa00, 0x00072d80, 0x0007b100, 0x00083480, 0x0008b800, 0x00093b80,
- 0x0009bf00, 0x000a4280, 0x000ac600, 0x000b4980, 0x000bcd00, 0x000c5080,
- 0x000cd400, 0x000d5780, 0x000ddb00, 0x00001900, 0x00000028, 0x00100000,
- 0x00000000, 0x00000000, 0xffffffff, 0x40000000, 0x40000000, 0x40000000,
- 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x00000000,
- 0x00007ff8, 0x00000000, 0x00001500, 0xffffffff, 0xffffffff, 0xffffffff,
- 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
- 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
- 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
- 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
- 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x40000000,
- 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x40000000, 0x00000000, 0x00007ff8, 0x00000000, 0x00001500, 0x00001000,
- 0x00002080, 0x00003100, 0x00004180, 0x00005200, 0x00006280, 0x00007300,
- 0x00008380, 0x00009400, 0x0000a480, 0x0000b500, 0x0000c580, 0x0000d600,
- 0x0000e680, 0x0000f700, 0x00010780, 0x00011800, 0x00012880, 0x00013900,
- 0x00014980, 0x00015a00, 0x00016a80, 0x00017b00, 0x00018b80, 0x00019c00,
- 0x0001ac80, 0x0001bd00, 0x0001cd80, 0x0001de00, 0x0001ee80, 0x0001ff00,
- 0x10000000, 0x000028ad, 0x00000000, 0x00010001, 0x00350804, 0xccccccc5,
- 0xffffffff, 0xffffffff, 0x7058103c, 0x00000000, 0xcccc0201, 0xcccccccc,
- 0xcccc0201, 0xcccccccc, 0xcccc0201, 0xcccccccc, 0xcccc0201, 0xcccccccc,
- 0xcccc0201, 0xcccccccc, 0xcccc0201, 0xcccccccc, 0xcccc0201, 0xcccccccc,
- 0xcccc0201, 0xcccccccc, 0x00000000, 0xffffffff, 0x40000000, 0x40000000,
- 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x00000000, 0x00007ff8, 0x00000000, 0x00003500, 0x000e0232, 0x011600d6,
- 0x00100000, 0x00000000, 0x00720236, 0x012300f3, 0x00100000, 0x00000000,
- 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000,
- 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000,
- 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000,
- 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000,
- 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000,
- 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000,
- 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000,
- 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000,
- 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000,
- 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000,
- 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000,
- 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000,
- 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000,
- 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000,
- 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000,
- 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000,
- 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000,
- 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000,
- 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000,
- 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000,
- 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000,
- 0x0000ffff, 0x00000000, 0xfffffff3, 0x320fffff, 0x0c30c30c, 0xc30c30c3,
- 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x30efffff,
- 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd,
- 0xfffffff6, 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3,
- 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, 0xc30c30c3,
- 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, 0x304fffff,
- 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd,
- 0xfffffffa, 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3,
- 0x0010cf3c, 0xcdcdcdcd, 0xfffffff7, 0x31efffff, 0x0c30c30c, 0xc30c30c3,
- 0xcf3cf300, 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x302fffff,
- 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd,
- 0xfffffff3, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3,
- 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x310fffff, 0x0c30c30c, 0xc30c30c3,
- 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, 0x305fffff,
- 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd,
- 0xfffff406, 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, 0xf3cf3cf3,
- 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, 0xc30c30c3,
- 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, 0x302fffff,
- 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd,
- 0xfffffff7, 0x30efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3,
- 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x304fffff, 0x0c30c30c, 0xc30c30c3,
- 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3, 0x31efffff,
- 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd,
- 0xfffffff1, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3,
- 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c, 0xc30c30c3,
- 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, 0x1cbfffff,
- 0x0c30c305, 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd,
- 0xfffffff2, 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3,
- 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c, 0xc30c30c3,
- 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97, 0x056fffff,
- 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd,
- 0xfffffff5, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3,
- 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3, 0x320fffff, 0x0c30c30c, 0xc30c30c3,
- 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x310fffff,
- 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd,
- 0xfffffff6, 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3,
- 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, 0xc30c30c3,
- 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, 0x304fffff,
- 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd,
- 0xffffff8a, 0x042fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3,
- 0x0010cf3c, 0xcdcdcdcd, 0xffffff97, 0x05cfffff, 0x0c30c30c, 0xc30c30c3,
- 0xcf3cc000, 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x310fffff,
- 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd,
- 0xfffffff3, 0x316fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3,
- 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x302fffff, 0x0c30c30c, 0xc30c30c3,
- 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, 0x305fffff,
- 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd,
- 0xfffffff6, 0x30bfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf314, 0xf3cf3cf3,
- 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, 0xc30c30c3,
- 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, 0x302fffff,
- 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd,
- 0xfffffff7, 0x31cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3,
- 0x0020cf3c, 0xcdcdcdcd, 0xfffffff0, 0x307fffff, 0x0c30c30c, 0xc30c30c3,
- 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff,
- 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd,
- 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3,
- 0x0001cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3,
- 0xcf3cf3cc, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff,
- 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd,
- 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3,
- 0x0008cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3,
- 0xcf3cf3cc, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff,
- 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd,
- 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3,
- 0x0040cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3,
- 0xcf3cf3cc, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff,
- 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd,
- 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3,
- 0x0002cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3,
- 0xcf3cf3cc, 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff,
- 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd,
- 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3,
- 0x0010cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3,
- 0xcf3cf3cc, 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff,
- 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd,
- 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3,
- 0x0000cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3,
- 0xcf3cf3cc, 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff,
- 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd,
- 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3,
- 0x0004cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3,
- 0xcf3cf3cc, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff,
- 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd,
- 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3,
- 0x0020cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3,
- 0xcf3cf3cc, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0x00100000, 0x00070100,
- 0x00028170, 0x000b8198, 0x00020250, 0x00010270, 0x000f0280, 0x00010370,
- 0x00080000, 0x00080080, 0x00028100, 0x000b8128, 0x000201e0, 0x00010200,
- 0x00070210, 0x00020280, 0x000f0000, 0x000800f0, 0x00028170, 0x000b8198,
- 0x00020250, 0x00010270, 0x000b8280, 0x00080338, 0x00100000, 0x00080100,
- 0x00028180, 0x000b81a8, 0x00020260, 0x00018280, 0x000e8298, 0x00080380,
- 0x000b0000, 0x000100b0, 0x000280c0, 0x000580e8, 0x00020140, 0x00010160,
- 0x000e0170, 0x00038250, 0xcccccccc, 0xcccccccc, 0xcccccccc, 0xcccccccc,
- 0x00002000, 0xcccccccc, 0xcccccccc, 0xcccccccc, 0xcccccccc, 0x00002000,
- 0xcccccccc, 0xcccccccc, 0xcccccccc, 0xcccccccc, 0x04002000
-};
-
-static const u32 tsem_int_table_data_e1[] = {
- 0x00088b1f, 0x00000000, 0x51fbff00, 0x03f0c0cf, 0x19d9458a, 0x1138fc18,
- 0x5980a1fc, 0xd8181998, 0x88039880, 0x81b8803d, 0x91a18191, 0xdafd7891,
- 0xbf760862, 0x6ec30330, 0x0211e620, 0x1082239a, 0xf354029f, 0x0f5fc806,
- 0x6512b315, 0x3a263860, 0x06a77ef0, 0x298d2ade, 0xc1124536, 0x1e4586de,
- 0x93476f19, 0xca8922ff, 0xff4041df, 0x65296340, 0x229dbe54, 0x04a65e84,
- 0xe4d1a5a1, 0xd7f2a1ed, 0x5192fea1, 0x0dee6ec6, 0xf8003ca8, 0x6065495c,
- 0x00606549
-};
-
-static const u32 tsem_pram_data_e1[] = {
- 0x00088b1f, 0x00000000, 0x7dedff00, 0xd554780b, 0x733ef0b5, 0x49999cce,
- 0x424e4cce, 0x4c22f212, 0x21a08812, 0x8a80af0c, 0x2201277f, 0x282039f5,
- 0x4201d458, 0xd4837908, 0xcdedaf4b, 0x11102484, 0x0547f435, 0x5088768b,
- 0x340da2d1, 0x0ec160d2, 0x6d7b1420, 0xc0faf06f, 0x480bf5ea, 0xb12f3141,
- 0xcbc57e20, 0xe7dad6bf, 0x264ce664, 0xafdbd880, 0xb4fdffff, 0xece7d9b8,
- 0xebdaf7b3, 0x7b5ad7b5, 0x75923ded, 0xc7bf9302, 0x03fc45d8, 0x8c4d4fe5,
- 0xf2c109b1, 0x80f66667, 0xc9b18727, 0x473afebd, 0x6d55633c, 0x9da23b19,
- 0x99ec258c, 0x50281421, 0x2c23d5fe, 0xbfdf8250, 0xf097c365, 0x4219b6ff,
- 0xd0977c3e, 0xc3e611e9, 0x02d5e4fb, 0x933a876b, 0xea0c319c, 0x4b19b98c,
- 0x126b2c64, 0x4b223fa3, 0xe0ff828d, 0x8f392573, 0x27fc34b1, 0x61467574,
- 0xc678c0ae, 0x5c531e32, 0x38a9d0d4, 0x843f7815, 0xbcd2c67f, 0x0731b725,
- 0xbc03cf63, 0xbcf071c9, 0x61de3a5a, 0xc22eefe5, 0x3af3a1df, 0xae58cd6a,
- 0x32773e30, 0x416589a7, 0xf3e33ebf, 0x3d6346ac, 0x4fe3d99b, 0xec3fc346,
- 0x8c517ecc, 0xb1138f30, 0xcb3c018c, 0x632f0e54, 0x5467b2bd, 0x6a9b1f10,
- 0x2c32a258, 0x852f0f58, 0x56338765, 0xed975e03, 0xf8165c16, 0x2f6c39fe,
- 0xbed7e631, 0x15ead66b, 0xa362593e, 0xb50404a1, 0x67eff4e4, 0x11bfcb11,
- 0x2b30fef1, 0xea4ab2a0, 0xaf805595, 0xfb765aaa, 0x67edfe95, 0x54e88933,
- 0x5bf6b929, 0x5adefc61, 0xf1866ae6, 0x02d59997, 0x91fb2fac, 0x7a83e5d3,
- 0x21476366, 0x36d0fa83, 0xc1198981, 0x9e00afef, 0x007f91db, 0xa52b2a1e,
- 0x740642de, 0xb02bddec, 0x871c0ce6, 0x776cbbf7, 0x0c65f306, 0x6658e70c,
- 0x32e73826, 0x74893a41, 0xd2073071, 0xf324bcc1, 0xcea84289, 0xb0f3dc54,
- 0x9de29f7f, 0xe7f4ed87, 0x38e60b49, 0xf25700bd, 0x57a06650, 0x9d3ab5b1,
- 0xea563265, 0xbe442969, 0x8decb538, 0x9e025855, 0xf7c02413, 0x08a78b7c,
- 0x5798a9fb, 0xa780cd9d, 0x8d1859e6, 0xff2db43f, 0x0cf80933, 0xc4c5f3ba,
- 0x76313f21, 0xf04f843c, 0x5a11f1cf, 0xde981c5d, 0xb08b2b97, 0x05f1091f,
- 0x1f011772, 0x26a0423f, 0x544d28e0, 0x29fc573a, 0xd0a0d86b, 0xc3595bfa,
- 0xe50069c6, 0x3f0cbc11, 0x0843ac61, 0x1cbd4be5, 0xb0ff3bba, 0x3c303b94,
- 0x04f3d3ff, 0xab7e1274, 0x1fc8b700, 0xbfd97615, 0xca61f812, 0xf7c0e6ab,
- 0x362a3f80, 0xd40b7e0b, 0xe8dbaf4d, 0x5e26f77d, 0x3765d80f, 0x16d74ff2,
- 0xbdf00ba8, 0x1bf1fda5, 0x155ebf91, 0x3b763359, 0xfa84ca22, 0x3b06a4e5,
- 0xc0750f1b, 0x578db147, 0xbbef362b, 0x692fbf1f, 0xf41b30b3, 0xf17bb157,
- 0x7d42371d, 0xc33361f1, 0x6b5b1526, 0x2fa814e5, 0x582d0bf1, 0xf0e029ef,
- 0xfe8976f9, 0xc7bed0ad, 0x1b389ed1, 0xc6ba73e8, 0x63329cca, 0xc4f6f675,
- 0xa567c059, 0xf8851dfe, 0xc176e95d, 0xf29f6885, 0xc943d6ea, 0xb917af38,
- 0x8b6d977c, 0x7f1d4e66, 0xab64f7f0, 0xbc1ef818, 0x64f3065f, 0xd662efd0,
- 0xb07a8854, 0xd93aeb62, 0xebe20f69, 0xe34a8d74, 0x8bdfc9f4, 0x577c0e30,
- 0xc2e08af9, 0x989a5629, 0x7bb0e517, 0xf3ce6db7, 0x56afce0a, 0xbce3b6ce,
- 0x6ed8cb56, 0x2efe7fb1, 0xfea5cd54, 0xbc2172cd, 0x59b4dd2f, 0xdb910e2e,
- 0x8c836db2, 0x1ac86d70, 0xd7208dec, 0xd7807841, 0x52f583b2, 0x0fa803d4,
- 0x233cbf25, 0x852acf84, 0xb2cbe258, 0x1a73226d, 0xddd388f8, 0x13fc2ef7,
- 0x6bdbe5f0, 0x74eabc27, 0x1aa7d3eb, 0x1aa8cedd, 0xbcdea51d, 0xf8f81119,
- 0xc003a471, 0x827884d3, 0x9f06ad72, 0x97cb3263, 0x9b877fa0, 0x3cbb02a9,
- 0xd999365f, 0xf563067c, 0xd2dd07b0, 0xd99750f5, 0xf582e652, 0x7bb1fa50,
- 0x0493d42a, 0x9f3867ef, 0xc25bc17a, 0x2496abd7, 0x9579e00f, 0xfce0e6c0,
- 0x30d0218f, 0xc0c955af, 0xa6f7809f, 0x8bcf7969, 0x3c7b0682, 0xb17f8bd6,
- 0x39fa8326, 0xa198fd43, 0xf7cbac9f, 0x324e16a3, 0x9616a3f4, 0x8dfcfde8,
- 0x595db1fa, 0x37854d3f, 0x2c29e118, 0x0491fbd5, 0xbf6f5768, 0x37a979b2,
- 0x73c2364f, 0x13f53973, 0xbcc5d53b, 0x1ae7ae0a, 0xb776b03f, 0xed95ef08,
- 0xb0d763b1, 0x5028be50, 0x0edb8005, 0x17ca1252, 0x0ca29331, 0x8eeb187c,
- 0x27a1d433, 0x4175e4f5, 0x958813fd, 0x45397c69, 0x9f90bd26, 0x43faa562,
- 0x85e90a29, 0x5ef06e87, 0x8e20eb83, 0x2ea192cd, 0x11ab477b, 0x14e01af8,
- 0x60961258, 0x7ff52417, 0xf1ccdabf, 0x217a9050, 0x5f7c18fb, 0x8ecdea65,
- 0xa635fa85, 0x6f116bfc, 0xe5eb812e, 0x63559128, 0x230b28bb, 0x233ab4fd,
- 0x085f79ef, 0x00871a9e, 0xbf6085fd, 0x126f668d, 0xdaf8d7f2, 0xc0127c67,
- 0xcc086ab7, 0x12ba0527, 0x60f8099e, 0xa43d5a3d, 0x69f10938, 0x43be4191,
- 0xfc6197c7, 0x06ff1a1c, 0xc71ba3c4, 0xbe3fe84f, 0x61927325, 0x897a6b7c,
- 0x0cab7c61, 0x4fbf03e3, 0xc6d66dad, 0xc0ddfc07, 0x99a134d8, 0x97a4b7c6,
- 0x65abf8d0, 0x3f186256, 0x5bfc6faa, 0x819efe70, 0x83799fe6, 0xa5e9fe71,
- 0x7ccbf9c6, 0x5f6ab3fe, 0xa28fc6d2, 0x5e16cff9, 0x2f4ff3e2, 0xf0b7f3e5,
- 0x77c6fb7e, 0x5effe1f4, 0x8077bf9c, 0x93592df1, 0x5a1ff38d, 0x85bf9c6e,
- 0xbb545f8f, 0xa15f1b53, 0x3b0917f1, 0x9687fcf9, 0xc5b2f8d3, 0x923e42eb,
- 0xfdaaa353, 0x81a44f68, 0x1d260c40, 0x0472a6e5, 0x1fa00518, 0x04990a86,
- 0x9c5143c7, 0x0145ceef, 0x7d4129bf, 0x5120fcc7, 0x352acfa0, 0x9edb2f9e,
- 0xa59ea32f, 0x376889f7, 0x6d453ff1, 0x6c37ad22, 0x1c3fc5bd, 0x136eede0,
- 0x5a2f587d, 0xa45f937f, 0xe5db8ef5, 0xf005c660, 0x1c9e5ff9, 0xaf3a1cd5,
- 0x935172f0, 0x418764f9, 0xbe3c388e, 0x1aa23602, 0x26476be0, 0x9fac1098,
- 0xc60a3d04, 0x7a0e3b2f, 0x6653cb14, 0x009c8f6e, 0x50e4cb3d, 0xf6e5a9b9,
- 0x93bd8f88, 0x5bbaf303, 0xc4a4ff83, 0xb9727df1, 0x9ffc47e0, 0x4654b75b,
- 0x09a8b4fd, 0x7910e98c, 0xd4e8d2af, 0x6fe2dbbb, 0x4e9f0816, 0x243dcfc4,
- 0x97fd8c8a, 0xde2fd766, 0xddf0c830, 0x486fe63c, 0xd70bf682, 0xc2a21fce,
- 0xc7307ffc, 0x53ed1632, 0x13548490, 0x354b31c9, 0xa7316f81, 0x15399d75,
- 0x31ccf72a, 0xb9faec1b, 0x07fd635e, 0xb77ac625, 0xa1366fd9, 0x6044b1bd,
- 0xfbdfa19b, 0xf5ef8daa, 0x71093671, 0x78daae9c, 0x78722777, 0x2c72c3ef,
- 0xae89563e, 0x5bfcabf7, 0x87aa9e1d, 0xeb402ccd, 0x43024765, 0xc812fa3a,
- 0x7caaf35e, 0xdef0e1de, 0x3dbab66f, 0x3ffdcf00, 0xe19f0912, 0x1ebc77f0,
- 0x1d8136ed, 0x4be1b1d7, 0xe1b7da33, 0xff8709f3, 0xf3e11581, 0xf7d46551,
- 0xcfe17df3, 0xf3849f39, 0xfe5b5553, 0xed2113a0, 0x3fbe5a2a, 0x7f0844e8,
- 0x619b6d95, 0xcff12fa8, 0xbc5fb435, 0xfde1be61, 0x8625a6a2, 0x971b0bf7,
- 0x7df3ea1a, 0x7fb4323f, 0xe1ad4560, 0x8fd57dfd, 0xa0ffde18, 0x3ea19d64,
- 0xd0d1bbd7, 0x9b399efe, 0xaf4def0d, 0xe513bc8f, 0x915deea8, 0x5671bae1,
- 0xda38f939, 0xc9156783, 0xb4f8f485, 0xe0e48926, 0xca938d74, 0x5671b6ee,
- 0xc583d72f, 0x5e9c4c0c, 0x71af9ce1, 0x4665ea32, 0xc2aff3fa, 0xea0f9f05,
- 0x849c137f, 0xcc83713f, 0x02c2c002, 0xe3e3eb8b, 0x7de4315e, 0x6fde65c7,
- 0xd71f4100, 0x11d8bdff, 0xf35579f9, 0x046aa1fc, 0xb72821ff, 0xb9d7152c,
- 0x0f7d6d1e, 0x302e5f7f, 0xc673e84f, 0xd79c9256, 0xb94ceb69, 0xf941f5cc,
- 0xf402e4ce, 0x40e5cbbf, 0xc6a5f576, 0xa2a4016b, 0x3ceb449e, 0xa401f901,
- 0x592fc0c0, 0xdceef906, 0x955c1227, 0xaf4013a8, 0xc19e63ae, 0x8133d426,
- 0x8d77e903, 0x49dc3842, 0xa04b6f54, 0x66b3bd75, 0x1213a0fa, 0xe543c7d2,
- 0xa87335a7, 0xa5e3593c, 0x094d44f2, 0xaa6bc795, 0x59a8eca9, 0x35c7e541,
- 0xaa3f2a3e, 0xcff2a469, 0x1e544d35, 0xe540d9ad, 0x2a7e357b, 0x54dc6bbb,
- 0xba2bf9f6, 0xecd78dfe, 0x80afcd55, 0x67ea8795, 0x43d4b9b4, 0x739276db,
- 0xf9ca1257, 0x365ce519, 0x8e67e3da, 0x31996382, 0xf9c2be30, 0xba3a606d,
- 0xf6295ec9, 0xf5c7fd03, 0xe28b6f7f, 0xd899b274, 0xd9a67074, 0xe17fc323,
- 0x7543905a, 0x16065909, 0x7b0cd724, 0xed617e84, 0x8e5d7a43, 0x9bddcc4e,
- 0x71e8133b, 0xe5bf99f2, 0xda75bf61, 0x407517fa, 0x059875a4, 0xbd21779e,
- 0xeb46f042, 0x9aabef5a, 0xfe0cbfbe, 0xd7f0faba, 0xfbfe8e9e, 0xfea54141,
- 0x985ab4cf, 0x13dc7884, 0x67e607ed, 0xef3f0e67, 0x965c7940, 0x444f5264,
- 0x565e83c0, 0x4aa864b4, 0x66d3fae8, 0x12699fac, 0x7ad0b7a6, 0x35998cec,
- 0x554af90a, 0xfa430c4f, 0xf7a95127, 0x56492cb3, 0x9ebc804f, 0xc31596de,
- 0x5f3f6fd7, 0x12c7b707, 0x485f82bf, 0xf0deffed, 0x0e8fd40c, 0xecad630f,
- 0x03ea2b13, 0xedf59778, 0xfd2672fd, 0x6c57e295, 0xda1cf98f, 0x7bf8160b,
- 0x1207e291, 0xab7ef5d5, 0x659f445a, 0x6edf3e34, 0x73be0f18, 0xc5f73eea,
- 0xadf14bcc, 0xc4864ec4, 0xf169d611, 0x366db2c6, 0xc4aeb8d5, 0x6d55ea1a,
- 0x9d61aac9, 0xfc807fc1, 0x42416ab3, 0xb8d729be, 0x1a5247a8, 0xdcaf8005,
- 0xab7ea4e4, 0xc2aede04, 0xe17b21da, 0xa72b5751, 0x9df040db, 0x94ec39ae,
- 0xac4bde40, 0x7c0201e0, 0xa7232d25, 0x6aeb9ea2, 0x7b444bad, 0xf33c4cb0,
- 0xf7c22790, 0x8d024d3b, 0xee6fc4b7, 0x1aa3ae35, 0x57e8307f, 0x167e4e7f,
- 0xd4864e53, 0xcedc4d3b, 0x80f7ef0e, 0xf0e5efd6, 0x48fdb953, 0xfece8de1,
- 0xfb6caa78, 0x1c92f642, 0x82f2ffc1, 0x57f1d278, 0x401cf26e, 0xedba5a7d,
- 0x9fa3d918, 0x0fd97d9a, 0xf53f425f, 0x44929f9f, 0x5e7d5271, 0x6129303e,
- 0x7c5a059c, 0x9ef7f817, 0xfd3d610f, 0x6ccddfec, 0x2ec7c00d, 0x6f782b40,
- 0x13335fd6, 0xa17d4133, 0x5ad05b56, 0xcfdd7146, 0x2a8edc4c, 0xd1071e62,
- 0xa1e53581, 0x4cc5d91d, 0x5d4f11d3, 0xb8c76dec, 0x329c3a10, 0x1667a4c9,
- 0x18ed1a36, 0xf50d7fb0, 0xf58c1bc5, 0x11333662, 0x7af149f5, 0x340bf333,
- 0xf7fbc33f, 0x7fe12a4d, 0x855eab31, 0xf9a4aaeb, 0x26540b23, 0x157c02a5,
- 0x6f3679bf, 0x4c5fc63e, 0xe3d10fdc, 0x38e24b23, 0x7ef1a5fc, 0x3fef0dd9,
- 0xf7771d69, 0x8b06488d, 0x89c6157f, 0x6d730c58, 0xfbf1fd65, 0x870fe16d,
- 0xfbaf5f57, 0x383469c5, 0xdb826dc4, 0x2f7f811f, 0xeba67c68, 0xd5445beb,
- 0x8ddc17e0, 0x21bf10f5, 0xb2e2d446, 0xee911322, 0x67d5aa5d, 0x14f7a18a,
- 0xb4edf7b7, 0xde80eab8, 0x0679373f, 0x4ec81389, 0xed2165c8, 0xd2f26e7e,
- 0xade14ef3, 0x995bb462, 0x3b45c814, 0xe218d614, 0x07e35953, 0x8b91ca31,
- 0x833b66f1, 0x6fe81475, 0x9fa0f841, 0xdb92f655, 0xbbc62e58, 0xbfa1de41,
- 0x87851cc7, 0x94dba805, 0x59fd8656, 0x7cf18c92, 0xaf58d39b, 0x1d207fc2,
- 0x3f8a3046, 0xc2908f45, 0xa86018f0, 0xe32eed0f, 0x84e851f3, 0x201fdd91,
- 0x8b10763e, 0x02556943, 0x26cf8c22, 0xfe7d9d49, 0x7bfa2656, 0x2f8a7e1c,
- 0x2ae5fb40, 0xc5a95f6f, 0x507f97cf, 0x12ee3830, 0x3ceaa8fa, 0x21419068,
- 0x7fbe3d75, 0x71fe625e, 0x3a6f41df, 0xd10d5561, 0xde4d739b, 0xd40f4869,
- 0x009f0893, 0x760d78e3, 0x2a1a9a60, 0xf8c20be7, 0x88e1aae3, 0x5dfd0667,
- 0x2ddef26e, 0xe387f426, 0xea4ffbaa, 0x4882ffd7, 0xfeeabcf3, 0xbfebf587,
- 0x15ff5232, 0x977979bf, 0xf4a9f80f, 0x803a4f57, 0x5267d999, 0xe80ba253,
- 0xc1b5be5e, 0xa9f97281, 0x5d2073e4, 0xd38bf33f, 0x16e7c923, 0xb74f9751,
- 0xff11e422, 0x7e9d1f10, 0xd03bd1e9, 0xd5b73aae, 0x607c52ac, 0x0160259b,
- 0x5d6caca6, 0x9b63e5c2, 0xafaf18e5, 0xb3f902fe, 0x2fa8cdaa, 0x32785f4a,
- 0x40cde311, 0x0e7fcd49, 0xb9507f90, 0x68852fe5, 0x5eb15577, 0xdfe17bd2,
- 0xe3e7f8e1, 0x2f981ec8, 0xaf7ff4c7, 0x6955f3ef, 0x7d6aa978, 0x84910bf4,
- 0xcdfc83a5, 0x9a25f6f0, 0x68a4ffbd, 0xdf38a78f, 0xf9113644, 0xf307c74c,
- 0xeebf7b73, 0x8f73a7c5, 0x5b9d3c01, 0xe421ddfe, 0xd727f284, 0x165a677b,
- 0xe735fcfe, 0x0dd4f2c0, 0xaed4317a, 0x6767d7d6, 0xeca7e69b, 0x39b965e1,
- 0xb03f40e0, 0xe5d9b37c, 0xfcbeba43, 0xc19e2ffc, 0x1607ccb8, 0xbfe870d7,
- 0xef83e5ec, 0xb2f500dd, 0xdbf8e61d, 0x211434f8, 0x2a974831, 0x6c62bbf8,
- 0xbfa50e90, 0x473b283e, 0x8e3fe7f1, 0x6ca3d20b, 0x8d993ec7, 0x298f8fea,
- 0x0ee4fb2d, 0x5a5d0225, 0x3fa2158e, 0x57e2f751, 0xcfafea32, 0xe0d8175e,
- 0xdcf8088c, 0x62ec907c, 0x51d113c4, 0xdd1f53a3, 0x0157dac2, 0xeabf505d,
- 0xff7f0a74, 0xc7fe9a2f, 0xc4399cfe, 0x86bcafcf, 0xb67f28fb, 0x25fe70b8,
- 0xc0e83caf, 0xaa929c79, 0xdb3f5f39, 0x7186e890, 0x44becc4b, 0xbcfe4a95,
- 0x121fb9e4, 0xf23e2dbf, 0x7f8a44c3, 0x89b27730, 0x325f056c, 0xa6d16fce,
- 0x62bf34d9, 0x2bbe25e6, 0xe0f45679, 0x8959e0fe, 0x4111df4d, 0xae24522e,
- 0x5b354f8f, 0xa7654d70, 0x7dc0e170, 0xff45b785, 0x2dff8a56, 0x0fcaf8a5,
- 0xb620fdf5, 0xad67ea8c, 0x885f4e9c, 0xac1abefa, 0xbafca13c, 0xd23b7565,
- 0xf710f4e7, 0x571b8c60, 0xe1a7c931, 0xfd08b843, 0x0da6478a, 0x4e61f4e6,
- 0x0efb4f29, 0x7c14fdf4, 0xcddbed8e, 0xe1ae5b6e, 0x2331763b, 0x6d72fe38,
- 0x0a3b807c, 0x8953d5ed, 0x9845ff60, 0xafa4a15f, 0x85576037, 0x7c8857f0,
- 0xf2df7973, 0x5d6f9708, 0xa633fdde, 0xbebffbe3, 0xbf07e5c3, 0xe0057b43,
- 0xf7a60c0a, 0xa40966fb, 0x102c2c0f, 0x98b7ae49, 0xbe36b935, 0xe004f9d7,
- 0xeddd7096, 0x3fec17e3, 0x764ff08e, 0xf87af16c, 0x565f442e, 0xfe8e78e1,
- 0xbb72e9fd, 0x04ff9358, 0x6cff28c9, 0x81fb9713, 0x8f1f09fd, 0xbffd984b,
- 0x15e50678, 0xaff713e4, 0x7b365fcb, 0x6f9fde70, 0xbddef03f, 0xb79fa720,
- 0xd46a72e8, 0x5a72e19f, 0x8b0273b2, 0x639fa724, 0x3924421a, 0xe511e785,
- 0x3e20e954, 0x4fe947fe, 0x85377f1d, 0x87d74fe1, 0x5c31e103, 0x7459fe1f,
- 0xf087d446, 0xd7961de7, 0xbe74ff9f, 0xf4adf9d3, 0x29431597, 0xa5f3a63e,
- 0x7c7d77ce, 0xbf5df3a9, 0x7f01a378, 0x182defe1, 0x3cb80183, 0x4714cdbb,
- 0xf7c3df28, 0x43bbe17f, 0x4f09e3a9, 0x58c65a6e, 0xf8d4a1d3, 0xac3fbad0,
- 0xded612de, 0x84f7561d, 0xd586f7b5, 0xcbed0dab, 0x3cc80edb, 0x686025af,
- 0x8678ae27, 0x1228327d, 0x5fb9fabf, 0xec85fada, 0x7a50be43, 0x4af37be9,
- 0x3c63b3e6, 0xf148af7c, 0xf1c01e91, 0x67a7182a, 0xf31f867b, 0x3c6c1a24,
- 0xf6a3d4ee, 0x6c5ed03a, 0xdef5e588, 0xea157904, 0xaf79fd3d, 0x7af5c78d,
- 0xd88ebd3c, 0xd8edfb10, 0x76f4911e, 0x8f4d9f87, 0x8ac267e4, 0xc1d47242,
- 0xe3cf7a06, 0x2544d3fd, 0x5fc6057d, 0x8617449a, 0xef6a8a74, 0x419e6071,
- 0x3bac9ecf, 0x45f3c0e7, 0x8db48a6f, 0x7b7683d8, 0x2dc7920c, 0x77f88725,
- 0x53df329f, 0xbf7e3193, 0x4579fb87, 0x11ecc36b, 0xa9f896b6, 0xa32e5958,
- 0xecbf053e, 0x82ff71b8, 0xd6a945cb, 0xe326c95f, 0xdc7bfd7b, 0xbdfb45c1,
- 0xbdf18b74, 0x34078b57, 0x863272eb, 0x71fcd18d, 0xf4d28f1e, 0xe73134f2,
- 0x8f4039ce, 0xc322c39e, 0x7b33fbfd, 0x99c7a244, 0x9ebf7ced, 0x23d7f6e2,
- 0x3e92f77f, 0x89d4f1d4, 0xac0f24f2, 0xfd5f3aaf, 0x9187bcaf, 0x987d766f,
- 0x3b2833fb, 0xfd907d77, 0xe6ffac5b, 0x590ff4fd, 0x5e53f6ff, 0x493ed1b7,
- 0xe276ebcf, 0x7fbd9ef7, 0xeb187f4c, 0x9f142dbb, 0xabfd79ee, 0x9e9fe45c,
- 0xd4129695, 0x80433d77, 0xec1f68fe, 0x83b72afd, 0xa27ad7d6, 0x99251fdb,
- 0xfe7b47db, 0x8f10b1f6, 0xed0acc25, 0x49a3d786, 0xb35eaa9e, 0x67ad3c51,
- 0xa17957ef, 0x9d77ff76, 0x7fb6a54f, 0x736763d9, 0xb17c2276, 0xeaa7df7c,
- 0x5f3fd7b7, 0xad17f98b, 0xf085e4fb, 0xbeefca7e, 0xda3d45c9, 0x43db93b3,
- 0xe78ee6dd, 0x68ee7f70, 0x6695dcfd, 0x0a3773c0, 0x9bac0a55, 0xa014cf0d,
- 0xcbc7f4dc, 0xbc271437, 0xfcf47c52, 0xc10f835f, 0xdd9df5cd, 0xde70156f,
- 0x21fc7f5f, 0x2dd785ea, 0x7cfa97c4, 0x25a96f3f, 0xf372e57b, 0x9c799876,
- 0x799dffe4, 0x992b810b, 0x3ff328f7, 0x2d7fbd37, 0xe12e8939, 0xcf9fd072,
- 0xf5443ef7, 0x822eed97, 0xfdf90af7, 0xf9f27ff6, 0xefba6b7f, 0x2e3bba04,
- 0x7ff2ef3f, 0x4c0f79f2, 0xd7ef37f7, 0x7e62aee8, 0xbeefd54b, 0xadbef821,
- 0x4ffb5b9e, 0xcd03ef2e, 0xd7ebb75f, 0x994d5c98, 0xaf439f18, 0xc569d601,
- 0xc311b33e, 0xcc466b85, 0x14ced154, 0xf52bf6c3, 0xfb99af72, 0x5bee224d,
- 0x086f9c62, 0xcd31f38f, 0x3d2da28f, 0x8a525a18, 0x94958ec9, 0x96bedc55,
- 0xc06eed5c, 0x176b9acb, 0x887728b8, 0xc5ea3d8d, 0x1764da7a, 0xfcc3afc5,
- 0xb9817ac9, 0xa56fb005, 0x8c396f6b, 0x84798dfe, 0xa5c96029, 0xab9618f2,
- 0x59a4b8b5, 0x8f7e0d95, 0xdbac6392, 0x45bac69c, 0x38cacfeb, 0xe624d6fc,
- 0x38b4f8c7, 0x798abf72, 0x8918e26d, 0xb1b75009, 0x17fa1f26, 0xf7e32496,
- 0x1ec0311b, 0x5abdcf12, 0xe1f6f63c, 0x1bbb6c71, 0x44d238f1, 0x2e4a6b71,
- 0xfcb8bc25, 0x8d9e786b, 0x55d788d5, 0x094f88ed, 0x7f6e5ffd, 0x34ccdf91,
- 0xbad2597f, 0xdc984690, 0xcd3b6336, 0x9d2cbe4f, 0xcbd25d38, 0x332d3a35,
- 0x43a745d0, 0xe818fa04, 0xdbf9e3a2, 0x2e9c27d2, 0x6fdff8e1, 0x07fe1276,
- 0x974e97a2, 0x50cdc534, 0xbf9acd5e, 0x49fd1530, 0x1879a7ac, 0xfe6b33ed,
- 0x66ef1482, 0xea93a43e, 0xf42f4862, 0xb7e002db, 0xf3fb7efd, 0x7aea2714,
- 0x081dd8e9, 0x456fd94f, 0x75fc0566, 0x00b3b76f, 0x65f92f7e, 0x5b4b43f4,
- 0x33fb8a45, 0x57779029, 0x6eafbec8, 0xcafd97f7, 0xdd29f34e, 0x06dff169,
- 0xfaee97df, 0xb0ec9724, 0xd7e4bd95, 0x38125ef8, 0xb91d7ddd, 0x2a5bafb8,
- 0x9c23ff71, 0x9e65625f, 0x3bb9d027, 0x6dc60e7a, 0xdf3c6d84, 0x1ee5b4b6,
- 0x7f90c5b3, 0x91dbd666, 0x28bdf05e, 0xc213be50, 0xa9e79a17, 0x2f9d1dfb,
- 0x36be1c0a, 0x76f31fb3, 0xe8edb74b, 0xe953f8c6, 0xc30b8b51, 0xbedb55d2,
- 0x298dfda0, 0xd1d97abf, 0x68b6fe41, 0xf3f43f88, 0xc489b7fb, 0x15ad951f,
- 0x9d4087e4, 0x25b2a2f8, 0x72ebfc72, 0xafd969fe, 0x01f2eef6, 0xfd7ecb0a,
- 0x5be30382, 0x3ab7dba7, 0x76dfffc8, 0x3f5bb8e9, 0x91e5bf3f, 0xa7f9fa4b,
- 0xbfe01ff1, 0xc58720dc, 0x220db649, 0xcbe0047f, 0xadef948b, 0xcbd95bf3,
- 0x38c39f67, 0xcfcee774, 0xcb78439f, 0xe7cbfbff, 0xaf609fd0, 0x88add4db,
- 0xa5de9797, 0xddfe9e38, 0x9dc7992c, 0x817c5fbb, 0x1fdd9fe2, 0x7f01df80,
- 0x4a9ef7ba, 0x7bb27f47, 0x1889d7c7, 0x77be592f, 0xef9c60da, 0x0ca757f2,
- 0x88f41166, 0xfadf225e, 0x1afe20af, 0xad03af4e, 0xe9efc807, 0xdfa37a02,
- 0x69b717d9, 0x3071e0a9, 0xd9af16a7, 0xddce391e, 0x7ad33e2f, 0x8d379dd7,
- 0x72ecd2c7, 0xd3882bb2, 0x1c7403bc, 0xdbf4057d, 0x7fe688fe, 0x175fa646,
- 0xb4e803fe, 0x677e8c2c, 0xfcfd175b, 0x3ad77c19, 0x4d38c068, 0xa6f00ae0,
- 0x27bfd1c7, 0xa3227fbb, 0x25fced7c, 0x6e90c5c5, 0xbb44c1b7, 0x8e9b3e5f,
- 0x9a9f0ffb, 0xe7e7ef7b, 0xc62a2c32, 0xbef74a1b, 0xfdd3f24f, 0x538a11ea,
- 0xdd9e2d33, 0xf0fefacd, 0x5396a3f8, 0xcec5b559, 0xfe1a3be3, 0x74e3fe31,
- 0xce2d73d0, 0x8f58f9c3, 0xf7140cff, 0xde799569, 0xafdf1e88, 0x16a1f2d8,
- 0x4bd2094f, 0x3a748498, 0x08fdc976, 0xe22a0f1d, 0x1c686261, 0xad7c7233,
- 0xd1adde2f, 0xf187e90b, 0x2538becb, 0x6e30d3d4, 0x67de17e5, 0xf741f411,
- 0xecff1e66, 0xa3ce4736, 0xbf9ae3d2, 0xff53970a, 0x2f33c595, 0xc5b7ff07,
- 0x52765f8f, 0xea78e1bf, 0x8f5910bc, 0x75f32dbf, 0xc5f7aaff, 0xbe4305ca,
- 0x7bc9b941, 0xa2b1f904, 0xcfcc98f5, 0x52f3f0a5, 0xb53b7cfa, 0x97ced1bc,
- 0xad778a44, 0x7a40396a, 0xe85f5c3c, 0xf0e1a6fb, 0xade0d09e, 0x44ebe36c,
- 0x5fbb4ee7, 0xf73a7e81, 0xae7e26ef, 0xe28c7edc, 0xfdb6876d, 0x5908ee5a,
- 0xf09d7157, 0xf9dfc087, 0x1e5cbeca, 0xec79e5b7, 0xd19ce3e1, 0x9ed561f4,
- 0x6d54e3c8, 0x4e30c2ff, 0xe645af99, 0xc7a5fdeb, 0xefb92d3b, 0xb74efec1,
- 0x79e6199b, 0xa7116e9e, 0xf5173a47, 0xea6eb6ae, 0xbd707ee7, 0x23fd7ca4,
- 0xf8cc5cfc, 0x1f28a3b7, 0xe991f97e, 0x63c4c61f, 0xfe878409, 0x9f396b5a,
- 0x9dc4feb6, 0x3d6ef48a, 0xeb88af72, 0x6fc42ed4, 0xf79f9d88, 0x3b14f54e,
- 0x8fbc85e8, 0x5d91e33c, 0xc4cdf5c3, 0xfc0326a3, 0x5c60ce30, 0x1fa1ea07,
- 0xf982a73c, 0xe88f086e, 0xf08e9c28, 0xb549aa88, 0x61e7fd09, 0x5e743c56,
- 0xacd7ef40, 0xb003cf1a, 0xd89ce30b, 0x1e4cd24f, 0x8bbe4c47, 0x3cfd3e97,
- 0xbf94714e, 0xa059e60e, 0xf02cd378, 0x7d7d927d, 0xe78641c6, 0xde5126bf,
- 0x78c59676, 0x554bac6e, 0xdfd0267c, 0x596ce4cf, 0xf949d937, 0x4258d707,
- 0x20580ff3, 0x6f51f917, 0x587defd7, 0x7e1a427e, 0x1a88fd02, 0x7e5a597c,
- 0x21d610c2, 0xc9bd7da0, 0x0e27cfce, 0xf84de255, 0xbd9af3fd, 0xec3f9fa9,
- 0xec315e77, 0xcee5e29c, 0xc94fc627, 0x05d43a5f, 0xcf7ab4fc, 0x8f5fcc14,
- 0x477f27bf, 0x12798aaa, 0xe7829e7f, 0xc85fe257, 0xf50c931f, 0x9c1fab13,
- 0xc8fff54a, 0xf50e931f, 0x8dff1a6f, 0xca04cbd7, 0x3d582d57, 0xebf22a7a,
- 0x2cf9cb55, 0xce5188c0, 0x75bc40ef, 0x7bba3e79, 0x1ff31391, 0x5f8e657a,
- 0x4376c1e6, 0xa5bc7126, 0x40ed1f29, 0x75350d3c, 0xfe7f22d4, 0xb157ef7c,
- 0x4c9c17a8, 0xaca8fc25, 0xc1fa455e, 0x2f1749a3, 0x9e174791, 0x93eba64f,
- 0x01f78acd, 0xf991e384, 0xe05ce823, 0xad332533, 0x98cf5f3c, 0x78d4c0c7,
- 0x469167a2, 0x1ad3844f, 0x58bf8f92, 0x5cfcc9f4, 0xafa6934c, 0x9e38aaa6,
- 0x2f172be0, 0xe1edf75e, 0x4be7507a, 0xc8695ced, 0x1087c679, 0x7de29f5e,
- 0xffd092c8, 0x161de33c, 0x9f7c0acf, 0x1d3e7c5c, 0x9f3f5bf9, 0x91ce7e18,
- 0x48a6bd49, 0x76b35ff6, 0x782071ce, 0x2aef563d, 0x77f3371c, 0x41879dce,
- 0xc91746be, 0x1ccd4c2e, 0x827c62bd, 0x5215cfc4, 0x4695439e, 0x971324bf,
- 0xa310f643, 0x8b1ed67d, 0xb9bf46e0, 0xf49541e7, 0x6dd15a77, 0x44f557e4,
- 0x3e60b467, 0xfcc3cf50, 0x4c66295c, 0xd90d1ca3, 0xbf401313, 0x5ce0e40f,
- 0x339c1c98, 0x71c673ae, 0xdb3a2e7b, 0x553fa83a, 0xb00c0feb, 0xe0b9e0c4,
- 0xe5eb911e, 0xc1271a9e, 0xf6f98431, 0xa57efae1, 0x18b56de3, 0xde02fdcf,
- 0xe8bc405f, 0xc0e5ff78, 0xa3457bf4, 0xe8912bdf, 0x74fe7e17, 0x6bfbc5be,
- 0xf8aa7f6c, 0x15742ad0, 0x869dcfc3, 0x6a4ab5d3, 0x78ae88da, 0x682f9ee2,
- 0x36e9d19d, 0x412b857a, 0xe3a0641a, 0xe8efd8eb, 0x10985f3d, 0x4f7e9d5e,
- 0xfdef900f, 0xcf5f0b07, 0x9c4bc7c2, 0x8de291d3, 0x1060feb4, 0xcf58212e,
- 0xf8d1e6b5, 0xde3c4d83, 0x10bdbfd3, 0x7fd03e71, 0x30f2a5e6, 0x2fde5573,
- 0x50fdffe0, 0xf110ffbe, 0xfe22223f, 0x03bc463f, 0xf6c63ffe, 0x4fff8057,
- 0xdd7f852e, 0xf4d32fae, 0x2bee1942, 0xc5fcff01, 0x13313339, 0x15142dc6,
- 0x5b25ffe2, 0xd21f6d45, 0x62725b7d, 0xb92c8f50, 0x199f7abe, 0xb3c970f8,
- 0x9169f102, 0x73fd5e76, 0x784b5ced, 0x1cf15f5e, 0xcbc9e91f, 0x49f9f7e7,
- 0x336d950f, 0xe410bf3e, 0xc7ca74db, 0x93f3f1f5, 0xe11726a2, 0x68a33ff8,
- 0xd628a2fa, 0x0e9c2ed0, 0x864daa43, 0x29e4f03c, 0x9f90fd7d, 0xbe726497,
- 0x3fb5f14f, 0x07c61998, 0xf8fc52dc, 0xbc63e200, 0xcdfb1530, 0xef56e4d7,
- 0xe30ae86f, 0x33e19f3a, 0x6acbfb9e, 0x1bfb9e34, 0x686294de, 0x4c86cd7f,
- 0xfe91fbc3, 0xafef0d6b, 0x50d636db, 0x8372d51f, 0xb6e8fda1, 0x4c7d4302,
- 0xfb4316e0, 0x1a678771, 0xefda13ea, 0x789fb435, 0xfde18174, 0x86a51df5,
- 0xba7e37f7, 0xa9bf50cc, 0xed0d5ff7, 0x5eb88b03, 0x94935fdc, 0x740ae786,
- 0x2a5fecbc, 0xbe7c549b, 0xe6a4db34, 0x744f3e86, 0xccb1733b, 0x357fa373,
- 0xb14cbc90, 0xae85a726, 0x6cccf9cf, 0xac536bd0, 0x55817ac6, 0xe07497e3,
- 0xcdfe2017, 0xa55ca356, 0x026bcf56, 0x2ca59bb3, 0xd2f94619, 0x58b75f20,
- 0xc5bb7eb9, 0xedfa657a, 0xaa7f6c4e, 0xece74032, 0x444be5c3, 0xd2a2caf9,
- 0xcca9fb47, 0x1b96f945, 0x4d3e466d, 0xccaf401a, 0xc3cf1ab3, 0x9b61ea8b,
- 0xfd139064, 0xd0a8f9f9, 0xdbe2c2e7, 0xa0676f28, 0x05a4e57e, 0x7d016eb7,
- 0xa97b2729, 0x9aaf9fd4, 0x97988a63, 0x3bf590f7, 0xfa1b49fa, 0x210f4fd9,
- 0xde92bb7e, 0xccf30a7e, 0x92f5e72a, 0x316c94bf, 0x775825e2, 0x5479e60d,
- 0x508e7e4e, 0x54dcffe5, 0xae2b78d3, 0x173cfaf7, 0x1133307d, 0x06d203f3,
- 0xd55e9ff0, 0x4d5cf1e6, 0xc345adc9, 0x14d53fb0, 0x7a45bf7a, 0xfd10aaa0,
- 0x1e5aa198, 0xc8a653f4, 0xd53f4af9, 0x0a8c19fa, 0x5503ed14, 0xaba0fb21,
- 0x048723f8, 0x9377f8f3, 0x17c8a7f7, 0xe7b7460b, 0x6ecb73f1, 0xabb24cd7,
- 0xd75f8254, 0xe78ae943, 0xe92bd429, 0x66731249, 0xf06787f4, 0x06636f3c,
- 0x338fafbc, 0xf7aa14b9, 0xe59e380a, 0x820d8cc7, 0x5bffeaf1, 0xf7eaf824,
- 0x3cf94f5f, 0xe29aa516, 0x993cfaa7, 0xc7afdfb6, 0xcd26cf8b, 0xd63c418a,
- 0x6e77db5f, 0xacfd6187, 0x7ec76de2, 0xd337aecf, 0x8205f5b8, 0x6f3ebde1,
- 0x741dc369, 0xe15d2b1e, 0x79862ef3, 0x3496dc23, 0x73fd570e, 0xb8234b02,
- 0x43d2f7fe, 0x8aecbfe8, 0x0dbc6176, 0xe889b1f0, 0x6738ceed, 0xf30f27d2,
- 0x34a418aa, 0x25fb21af, 0x96fa1fb2, 0x4cdbaf95, 0xf688959d, 0x4635f254,
- 0x66f5c5b8, 0xc51707f2, 0xe0af942a, 0xd1a159af, 0x55ef38d5, 0x9cef3349,
- 0xbdd4f00b, 0xfd4c3223, 0x2d347671, 0x15df043d, 0xfcbb46b7, 0xef78fece,
- 0x01387272, 0x6e10b7ee, 0xcd214371, 0xdc21da9b, 0x33d8173f, 0xde77c819,
- 0x14ff44e9, 0x977dc313, 0xd3fff870, 0xefc4e3ca, 0x879a5558, 0xeb7c549c,
- 0x16584196, 0xb51ddc91, 0x7fceb80e, 0xfcebe568, 0xeb08d687, 0xfdeee5fc,
- 0x7ed39f28, 0x6dd14b08, 0xeadaecd2, 0x197b50ec, 0xec49d6f4, 0x318baf57,
- 0x7ec5ce3f, 0x3ccbb607, 0x9719d94d, 0x1ed0cbb7, 0xedd3fe43, 0x8d5abfb1,
- 0x9e886476, 0xb3d34afd, 0x9ea15d5f, 0x4eed80bd, 0x68881bdd, 0x91c824c7,
- 0x1b38dcf5, 0x2a957fc7, 0x1be3ca33, 0x6d9f9e20, 0x0e1f6c8b, 0xce5d0eb4,
- 0xd68cc8cf, 0xefd754f9, 0xc560dfdc, 0x9c2fed32, 0x247fe0d9, 0xe329f71b,
- 0xedde5187, 0xaea4fd40, 0xf10d83f8, 0x886de3ab, 0xc04f88cd, 0xf9c6d3a3,
- 0x648b69c4, 0x1e03960f, 0x0e3c064d, 0xd903f6c7, 0xdfad5783, 0x6ee78c4b,
- 0x837c35ba, 0xbc71979e, 0xce3ebbae, 0xe545b8c3, 0xe7a473ec, 0x399c7aaf,
- 0xf74475c6, 0x011c61bf, 0xd77ddfe7, 0xb7716b7f, 0xd5178a0e, 0x0c4d7abd,
- 0x0a2aa7c6, 0x9586f4fa, 0xa49985db, 0x5976bb07, 0x576708dc, 0xece7cf1c,
- 0x10f135da, 0xaf3e39c6, 0x85299c39, 0xaf5d2dd8, 0x4333c91c, 0x8f3b5d7f,
- 0x209449e4, 0xd17e27e7, 0xfc5c57df, 0x128f3ccf, 0xc7d0273c, 0x0ebe566f,
- 0xf8f1b7ad, 0xc798d3c9, 0xf7b19a36, 0xfaf963a3, 0x1c7a6891, 0xe3bf479f,
- 0x7f7ea1f3, 0xc7e57054, 0xdd262b00, 0xab664c55, 0x00a68de5, 0x6e0a8f96,
- 0xbe55e311, 0x4d3cfc91, 0x911b1652, 0x4728fd01, 0x8e0803db, 0x8dd7a3a3,
- 0xa7ea29db, 0x0b2c3eea, 0xc402ef14, 0x2baf5c7b, 0x37fc7d09, 0xa1b38d87,
- 0xe14d215f, 0x7e77b77a, 0x29ef1f94, 0x8fb9344a, 0x6767aa80, 0xc58c2f0e,
- 0xe68b7ff7, 0xd258f2f2, 0x8e5c2689, 0x38a03970, 0x5a669bd4, 0x710d96df,
- 0xef822f27, 0xe7884d97, 0x9805e97e, 0x9639c87d, 0x0967e3c7, 0x00b90714,
- 0x14e78178, 0xb53e02fa, 0x3317cc4b, 0x89b0f416, 0x0bf055b9, 0xee4293ec,
- 0x68ffc1d9, 0x4d60fc76, 0x4d38f6ff, 0x45e0333e, 0xd5db1976, 0x5c0cee09,
- 0xfc81774f, 0xb4fa030d, 0x7ca958d5, 0x184d46ad, 0xa7b8f206, 0xafea8926,
- 0x0ca938d7, 0xe7a08cd4, 0x545c6bfb, 0xa7b5368f, 0x552c7ad3, 0xfe879baf,
- 0x0b12fdf5, 0xbc23b7bd, 0x8e567bc4, 0x97f9c887, 0x0093e62e, 0x3d352fae,
- 0x38d6fb52, 0xc6a7afe2, 0xfd31d400, 0x8ad9ffbf, 0x054d643f, 0xa3fa983d,
- 0x415359c3, 0x78129f8e, 0x181e02fe, 0x552e748e, 0xb7329f2f, 0xf0a9a2c4,
- 0x4df31b6b, 0xc8bcc24d, 0x8e153fde, 0xe8f6cc61, 0x1d4ea69f, 0xfdc7f309,
- 0xf6c614fe, 0xc7abdeda, 0xfba9d873, 0x818bab3f, 0xfc6dc8f9, 0x82899f3e,
- 0x7d4056a7, 0x0bf39b33, 0x98b53c93, 0x3773d9cf, 0xbefee273, 0x1b5ece3e,
- 0xdb7613b7, 0x51399db8, 0xe6c1fce0, 0x411e5aca, 0xbae86e6f, 0xef3ede11,
- 0x8f74d7c0, 0x2fb78c0f, 0x2f5e30d5, 0xfa0711c5, 0xd7ff752e, 0xe95c8eb4,
- 0x491f3d77, 0xc4847cfc, 0xe6ba2bf3, 0xfda66e26, 0xba1f6878, 0x4723da8f,
- 0xd2ded7b6, 0xed0681d6, 0x659fb7be, 0xa68372f0, 0xf5c62457, 0x05d16bff,
- 0x61b947bf, 0x5fda38de, 0x09e2d3aa, 0xe57a3a93, 0x2f7e76f9, 0x13d43e5c,
- 0x3c04784e, 0xd1a5d70f, 0x1e605533, 0x6048ffcc, 0x28028433, 0xdbcf36dd,
- 0xc2d24fd8, 0xd197be4d, 0x7165d85e, 0xe3a3d9a2, 0xd903eeab, 0x6121fa8a,
- 0x8fda31cf, 0x643a13aa, 0x8f97f7a4, 0xcb714d7c, 0x3275733a, 0x1b176dd5,
- 0xbdb4feb0, 0x99d6237a, 0x6fa9ebef, 0xd60c3f00, 0x13e3e1ce, 0x90bb63d4,
- 0x3b5829e8, 0x7e06ef57, 0xf5e665d8, 0x4b391f2f, 0x5ccbb1fc, 0xe18d9f3d,
- 0xef987af5, 0xaa6f58ae, 0x52905409, 0xe1fd65fb, 0xb7aeb09b, 0x77da92ff,
- 0x3f6e54db, 0xa40cde2a, 0x855d5f7f, 0x2ea43dba, 0xf2b33af1, 0xc55f9f52,
- 0x7f29af81, 0xe70f72b8, 0xce1bc447, 0xc3cdef13, 0x7a8ce975, 0xa4eb51d4,
- 0xd73673b8, 0x9f0301d7, 0xb753e7e9, 0x2a2cbc98, 0x085ecf44, 0xce42ff5b,
- 0x84a8b1d9, 0x38b4fe90, 0xbb033a76, 0x6f182b34, 0x2a2e64cd, 0x950f3187,
- 0xee96dbd9, 0xbb2db447, 0x7a89d5af, 0xfad73e2b, 0x68e89780, 0xd09e1ff4,
- 0xe8fae2f4, 0x01a31bfc, 0x7782f59e, 0xf17a888d, 0xe74ab365, 0xb2243a2f,
- 0x35b53b37, 0x5330ad8c, 0xf73b5cbd, 0x5de95e6d, 0xeef51233, 0x19ab9322,
- 0x4f0cb3d7, 0x2fbe19be, 0xaae19ddb, 0xe7bcc165, 0xc496d8b2, 0xef304ab1,
- 0xaa3faf59, 0xb546d9f1, 0xd2f786c9, 0x9543fbe8, 0x71d2eb6d, 0x7db3fb7f,
- 0xbd3c901f, 0x7c41951f, 0x447ad15c, 0xbf69daf6, 0x29f3c64c, 0xd6fad99f,
- 0xbef75a3f, 0x0b74196c, 0xeb2ff5ea, 0x79b8744d, 0x06f43d20, 0x38373fe8,
- 0xac6a87c8, 0x5dc2fe54, 0xde9a2d7d, 0xd3295105, 0xbfb4606c, 0x9156e990,
- 0xb9ade68f, 0x7c0ac1a6, 0x8eac82f4, 0x179b3e44, 0x5b7c4b95, 0xbe722b16,
- 0xdd07b964, 0x0dcfb692, 0x6535d4f5, 0x3c53bed3, 0x6f68c8be, 0x88526d54,
- 0xc80ec00f, 0xdca83e47, 0x7ee1f260, 0x4edac9a2, 0x45e77de7, 0x368fcd31,
- 0xaf5c4f9e, 0x6cdf3be9, 0xdcbddf1e, 0xf392c539, 0x511e7451, 0xe51fbfaf,
- 0x9128be24, 0x55ffb47c, 0xe741de72, 0x38fbbf18, 0xc879c356, 0x0428e3fe,
- 0x794e3e87, 0x57d21c70, 0x27ec1fe5, 0x6b8df83f, 0x4dd67b75, 0x3be5ac71,
- 0x9b05fcb4, 0xf3f6307b, 0x1827a6b4, 0x7780b1fa, 0xeeafa329, 0x1ccf4093,
- 0xf933d3ec, 0x90d3530d, 0x6cf7c1be, 0xb90f4192, 0xc1d3a025, 0xfbeead28,
- 0x51f6e47e, 0xf533ed61, 0xa986ccfa, 0xf3c3fb93, 0xb7582db4, 0x1be97b42,
- 0xe11d5f4b, 0xa1f573a3, 0xe3e9c5fd, 0xe9fd4bdf, 0xb51a97fc, 0xf80fa0f0,
- 0x27ac60ef, 0x7a7cf31f, 0xca273367, 0x8d4966e1, 0x21d08572, 0x37af8a35,
- 0x35afe725, 0xf7883fe0, 0x4ffd035c, 0xfb0d38d5, 0x968b8b8a, 0x549879d1,
- 0xc7cc5e9d, 0xca6d77cf, 0xfe27ec32, 0x16ae387f, 0x7000f70e, 0xc307fd4c,
- 0x5e30c231, 0x031c8e10, 0x870b577c, 0x7a6b3b18, 0x8f6bda06, 0x280f2898,
- 0x66b3e4d1, 0x6ff65da2, 0x0ca78a26, 0x880f3c67, 0xfb0de329, 0x2c7b99b4,
- 0x9efe31f3, 0xd2c3b129, 0xb7279458, 0xa9d716b3, 0x5f64489f, 0x39ab73c1,
- 0x726e870e, 0x204ffde3, 0x665fd495, 0x1f82b7b9, 0x82fbe6f2, 0x70d33efa,
- 0xbde3e595, 0xe7a39ba0, 0xfb8d18df, 0xaa1e7366, 0xe7983cc2, 0x352ff195,
- 0x92bc8330, 0x2c20d21d, 0xabe33b25, 0x701b87a8, 0x147076bf, 0xf6045bb7,
- 0xe4e1c370, 0xbcb9ef48, 0xf0f35b9e, 0x341b8c34, 0x8b5e74af, 0xcdb353fe,
- 0xf31fb2df, 0xe8531e76, 0x47acd6de, 0x50998dfd, 0x6899d717, 0xfd6314bd,
- 0xdf719adb, 0x176cc9fd, 0x68cd11da, 0x47fb60bf, 0x566bf214, 0xf0d338d5,
- 0x6d3a927e, 0x76b2d81d, 0xc1798ab2, 0x87148af5, 0x732addeb, 0x773c5cf5,
- 0x8dd1ee31, 0x51ebd60d, 0x071ab98f, 0x949ebca9, 0x339aeb8c, 0x7b463ad3,
- 0xae68c80f, 0xb1a8a675, 0x6ff0fac1, 0x3d20fac0, 0x9dc1b1b3, 0x2b2efd86,
- 0x883bfad5, 0xe2178a71, 0xef68bc34, 0xd2f4fb33, 0xcbcc06c6, 0xf5836b12,
- 0xaddf30a9, 0x353d72cf, 0xfb47fa18, 0xc3e3e60b, 0x877b057b, 0xca4e744b,
- 0x603b6306, 0x9991b67e, 0x0cd7dfb4, 0x2bef117a, 0xdfc91dee, 0xf234fbeb,
- 0xd3ed1370, 0x6f5c7b60, 0xbdbcc96c, 0x57df833c, 0x8a15db20, 0x307ac037,
- 0x149ef315, 0xf371ffa6, 0x8fb6a4b8, 0x01f78511, 0x75a3f084, 0xf73a32ed,
- 0xdc661d69, 0x8cd3979f, 0x16798975, 0xaa27bfa0, 0x4637e7c6, 0x8341f96b,
- 0x6dda0a58, 0x133e4dc3, 0xfcc91bb6, 0x02c75e54, 0x628d8f64, 0x8d1f541d,
- 0x1e16635e, 0x41d2725b, 0x2dcc71ab, 0x47aa38f2, 0x71313b44, 0xf2471eae,
- 0x74be414e, 0x61766b47, 0x7e3fef0a, 0xf7ca2cc6, 0xcdef69f5, 0x313d8633,
- 0x7eb5e60d, 0x50ceddc6, 0x2f5f4efb, 0x48adfb75, 0x68cbf983, 0xa0bda221,
- 0x364575db, 0xe5984f5e, 0xc6147a7e, 0x26cddb11, 0x58ad9fbc, 0x6f16638b,
- 0xbb444fc8, 0x5cdf68ac, 0x44d787b2, 0xd16cbb73, 0x77b219fa, 0x9af79e14,
- 0xfe7f764f, 0x183109ba, 0xb9b06fa7, 0xfe47ae62, 0xdfbf23fd, 0xb1ed4af5,
- 0x0a6bc4e6, 0xc19b1e50, 0x8ffbc6af, 0xcadd9a0e, 0x758be418, 0x6ccec8c3,
- 0xc7d6f28e, 0xffcdbf42, 0x41d574ea, 0xf73667fd, 0x4e3823f8, 0x8f7f1443,
- 0x6f2126f7, 0x3ff7edf6, 0x1e8c5c2f, 0xfdf0fc78, 0x91b183fe, 0x43bdf1ef,
- 0xfed05fc3, 0xd9dd90f0, 0xdcc4f660, 0xcbbdf087, 0xead679e7, 0x112f5189,
- 0x86b4fb67, 0xd67603e7, 0xe64058b8, 0xbd62d1f7, 0xdd3f68a4, 0x22d25caf,
- 0x097cdc50, 0x3a3d226b, 0xeed5894c, 0xc5e631a7, 0x4dbd1696, 0x795b4bed,
- 0x42d632fb, 0xe17a75ed, 0x8301b5bd, 0xf0ce3a1d, 0xdcd11dfd, 0x78acdd6f,
- 0xfffb431f, 0xc4d91991, 0x1167c04f, 0x07e22df3, 0x31b977d8, 0x371faf9a,
- 0xf6c8db62, 0xb863f128, 0xe05cb87d, 0xc207b3e7, 0x9379d553, 0xde0a3d61,
- 0x7ad3219f, 0x49f9add4, 0xeb7c9387, 0x7ff9846f, 0x244af59f, 0x23bddaa7,
- 0x425f8275, 0x33befaf9, 0x2d1bdeec, 0xa6ba680f, 0xcb43fe25, 0x6b3ef9ef,
- 0xfd0dc379, 0xf402cedc, 0x9eed61dd, 0x48a41412, 0xafd01979, 0x3adc1f41,
- 0xddbf58c2, 0x39dfac65, 0x7d3df229, 0xb4d46b6b, 0xd6aa7bd0, 0xf38e700e,
- 0x575e8d2d, 0x0b12df7f, 0x0b7bebcf, 0xf9815ee9, 0xc17fb494, 0x1b14faf3,
- 0xd91ea0f3, 0xfb46cfbf, 0xbc8a5521, 0x55164319, 0xbde2efe0, 0x9f9d8af0,
- 0x17ff4037, 0x998a05e3, 0x97de0fb7, 0x1251f7c4, 0x3c5a493c, 0x633495c3,
- 0x1b87b7d4, 0x0bca2afd, 0xdc46d896, 0x27e60d77, 0xe8edef1a, 0x0f38fe8a,
- 0xcf9863fd, 0xe84f8c6c, 0x3e7d5a71, 0x2c7f28ff, 0x39cfbb5a, 0x346b6e9c,
- 0xa30ba8f9, 0x39d23d73, 0xb973df07, 0x87bb52f9, 0x43c129ff, 0x26896879,
- 0xb3ac171e, 0x9c528eb9, 0xea0ff344, 0xf306b8b4, 0x56ef5d7d, 0x1095ecc2,
- 0xed7d22ff, 0x39f5f5d7, 0x38f377e4, 0x156a2d88, 0x360dc3ae, 0xfaa66ebd,
- 0x00a6e6d3, 0x5aaffdf9, 0x831f189c, 0x6718ddef, 0x411dcdf3, 0x065d5d7e,
- 0x0e7bc14b, 0xb4cbb0de, 0x6abdd6fe, 0xd8f5c669, 0xdb76f36d, 0xba49f242,
- 0xf9d1efec, 0xf9fd08a6, 0xf90dfd0c, 0xf8c1fd76, 0xfd7788ad, 0xa7f61bfd,
- 0xceff2202, 0x691bf949, 0xa17b1fed, 0x42cbb89e, 0xf7fd4f71, 0xe7cfca0f,
- 0x7a8cfdd1, 0x4e14137f, 0xd51ace87, 0x8e5d8f90, 0xe507377a, 0x67f49da0,
- 0x8320cb45, 0x69f5d4de, 0x681bf9c5, 0x57482cbf, 0xb5212e2d, 0x38aa5e93,
- 0xbc68bfff, 0x349e903b, 0xf7e9124e, 0x3549c781, 0x38e0e3e8, 0xf7700c93,
- 0xc132671f, 0xdac38fa1, 0x9fe70ca2, 0xae3ccceb, 0x33986672, 0x1e823626,
- 0x83714bd1, 0x60ff70c9, 0x62b2e49d, 0x4eca75ff, 0x287f3435, 0xef005f7c,
- 0xf02164c5, 0x867ac1d5, 0xf30b72c1, 0x3d405e52, 0x072213f6, 0x62a5d9a2,
- 0xf8c262bf, 0xc62ef9e9, 0xbdb945fe, 0x33727d10, 0x8bd82df2, 0x7e3b583f,
- 0x49ec0f95, 0x4769ecc9, 0x93764aba, 0x6b68f313, 0x45c51315, 0x64ac92eb,
- 0xca12adff, 0xbd739455, 0x2243a95f, 0x8f7df438, 0x9f29e681, 0xc33cd077,
- 0x9ffb3bef, 0x76a14f1b, 0x3b57eefd, 0x0f93d730, 0x93545bfd, 0x4e5d450b,
- 0x2127d853, 0x47ef0ff4, 0xec4520da, 0x8e9dbc27, 0x6ce901ff, 0x3d2070ee,
- 0x88a5e3ba, 0x951b1079, 0x310798ef, 0x271f4c2f, 0xe00ecd1c, 0x238742f7,
- 0xe5f48323, 0xa5f50546, 0xdc9fccfd, 0xb4cbd24e, 0x7fafadef, 0x56a984e6,
- 0xbc6c4f9e, 0xfe206f57, 0x7d62f1d3, 0xf3c641f3, 0x7f94bd53, 0x483c3972,
- 0xfb43cb82, 0xe2297600, 0x425836be, 0x4938fc86, 0x354fe631, 0x63e21e48,
- 0x56d54df3, 0x5b4927da, 0x927dc569, 0x063392d5, 0x37ded8fd, 0xf983fec1,
- 0xbe793a35, 0x6bff786a, 0xb4661ef0, 0xc4531573, 0x06a379fb, 0x34baafb0,
- 0x77d9acfa, 0xf501fe69, 0x3775c1e1, 0x71524e2a, 0x399b35dd, 0x4bad03bc,
- 0xe103bf7a, 0xfc7dd1eb, 0x392c1bf7, 0xffae3f56, 0x4a35e3e1, 0xbc3e4f1e,
- 0x7078046f, 0xcd31acdf, 0x546df84d, 0xd5fd1525, 0xcc5111e8, 0x656fea1a,
- 0x67f7a2cc, 0xccf1e3e2, 0x8f8c6b22, 0xe3a3979c, 0x1df2b769, 0x186f1ded,
- 0x8f7f0e5f, 0x7c21e8e2, 0xe84ca9d9, 0x91bb97cf, 0xfc381317, 0x5aa6f7a1,
- 0xbc458633, 0x7f1fb27f, 0xde4df186, 0xf7e74664, 0x19b94306, 0xeed56efa,
- 0xb5c0aa43, 0xf2f4fc0b, 0xc2a6147b, 0xe477e6fa, 0xc939405f, 0xcfcfdf6b,
- 0xf026b4d1, 0xefb51e7b, 0x625dc7c3, 0x12c8fee7, 0xcc654abb, 0xf12ff751,
- 0xdb5a37f7, 0x5e789ca3, 0xb22e35b1, 0xd763931f, 0xb49a68f3, 0xb41d0873,
- 0xcfb7a14f, 0xcddcc79b, 0x1b3dc50a, 0x257fe0ad, 0xa9ffb5f4, 0x82023fb6,
- 0xf9dce30b, 0xf99aebdb, 0x7f3e51f9, 0xfb62ab8c, 0x5c8ec4c4, 0x740efc41,
- 0xf70cc6db, 0x3b5855db, 0xe7ce51b2, 0x7699bc9a, 0x68eaa718, 0x5bd62247,
- 0x4e9d3edf, 0xb71d6f7e, 0x47777b37, 0x3fa0dc53, 0x79f96a7d, 0xcc29f3f2,
- 0xdd3fa837, 0xad00a5ca, 0xd3e7e5ab, 0xcd1c6f99, 0x0b8d67db, 0xc2d3bfbe,
- 0xc8b1a4d8, 0x884f1c00, 0x30f4489f, 0x7a064ddb, 0x675e3c80, 0x7881e8d7,
- 0x76b7a3ea, 0x76a74bef, 0xcd71e0bf, 0xf72a615f, 0xa5fbf5d7, 0xe5f7678e,
- 0x5b7dd05b, 0x13f736e5, 0x3b0b4f78, 0xe6be5222, 0xfefc29c2, 0x771685c4,
- 0x0b57da0c, 0x6a1453ff, 0xf3937f69, 0x7f0d22fd, 0xf745cc9d, 0xdad6f80e,
- 0x984ebef8, 0x54f1967f, 0xc23fb8fe, 0x44da8bf7, 0x305f1bca, 0xbf61f145,
- 0xbf05ed79, 0xe2f9fdc4, 0x773a68c6, 0xae7bf0be, 0x2dd76893, 0xc4e6c27c,
- 0xfe597bdf, 0x327e6641, 0xc9fcbbf4, 0xdf07d50c, 0x24f0af45, 0x27a05d59,
- 0x8853db1f, 0x3083457e, 0x0630be7f, 0x0ec7cafc, 0x98b8c2ac, 0xcf94cdf4,
- 0xf47f506d, 0x615677b1, 0x18b7e9bc, 0xf63b26e7, 0x63b442dd, 0xf4d0425f,
- 0xc1c03ffd, 0x5a4d874b, 0xa9aac4fd, 0x4daff2a0, 0x2a293568, 0x562765fa,
- 0xc4d939e1, 0x1318f738, 0xfd8ef78c, 0x6fec42b3, 0xcea313c1, 0xe7f7806d,
- 0xbec817ed, 0xdf693a00, 0x014f370b, 0xb278bbbb, 0x582d975e, 0x86668f8f,
- 0x3339be38, 0xd9da3e38, 0x8b7c7aad, 0x718cdd45, 0xd505e3f7, 0x3fe82453,
- 0xceaea1c3, 0xc4d8c919, 0x1baaf9f5, 0xc476cfc6, 0x98f18cdd, 0x837dbed6,
- 0x1f18d5f8, 0x03de656b, 0xd37f7866, 0xb66e43e3, 0x07e36202, 0x30a8c679,
- 0x29d641ff, 0xf4dfa7ab, 0x4463e41d, 0x0d59f3bf, 0x939204f6, 0xf4d28843,
- 0xc2defc15, 0xe34ef93e, 0x1f57db7c, 0xc3cfc1d1, 0x5e24f5e8, 0x59195d6f,
- 0xc312dc5f, 0x2a35baf9, 0x4d66fbf2, 0x03be003f, 0x8b6d7f14, 0x08cf0ff6,
- 0x1f900fea, 0xa79224eb, 0x807f5c75, 0x93b4cd1c, 0xefec46dc, 0xab6bcadb,
- 0xab79af76, 0xf8c68ee4, 0x30c7ae82, 0xcce38682, 0xfa0d3bb5, 0x017daf39,
- 0x3151d7e9, 0xc9c7872a, 0xc53093f1, 0xb59847a8, 0xdf743b3d, 0x3cda77fe,
- 0xe3d3ae33, 0x7439013f, 0x8ef969df, 0x9f9e4bfc, 0x7f38c72d, 0x9c1dbd88,
- 0x678a54c7, 0xfa17ebf4, 0xbbfd6a73, 0x5f04ae72, 0x15f065ea, 0xdcbaf84f,
- 0x19fd833c, 0xfbfd2fe3, 0xc17db593, 0x223ae326, 0xa4f38647, 0x7f8027ce,
- 0x2cfe8853, 0x9ea9f4d6, 0x6bbdd107, 0x064fe524, 0x31e7e5c7, 0xc4bfbe81,
- 0x81f25ddf, 0x963d453e, 0xc56fb927, 0x71ed79f6, 0xc0efda2a, 0xbf1af56f,
- 0x3a6fc849, 0xaae35b9d, 0xebbe69f3, 0xfa4bb204, 0x3ada41d9, 0xd024d79a,
- 0x9e0d99ef, 0x87e715e7, 0x7beba7c0, 0xeb55bfe8, 0xe7e73c7f, 0xf9f8fb67,
- 0xcd1f943e, 0x0eae679f, 0x1c3d5fba, 0x732885fe, 0xd5eb2109, 0xeb8a36ab,
- 0x5abb78eb, 0x85edd7fe, 0x25d701f2, 0xf8377d07, 0x84e1c068, 0x1e518b76,
- 0x7cb55f50, 0xb664f742, 0x7f0c89b6, 0x8de700b2, 0xd74f237a, 0x8795be93,
- 0x5bb2240a, 0x49d97a82, 0x37ae8bf6, 0xb06b5603, 0x2bb5b9fd, 0xaf51eb0b,
- 0xc7ecf76a, 0xc7c5a08b, 0x7348e106, 0x9277bb70, 0xddfc4597, 0x5dbb1cb8,
- 0x8f3423d7, 0xe80a2b8d, 0x6d2a855e, 0xe1d49d3d, 0x345378a3, 0x104271e2,
- 0x7d7afa1f, 0x15e3a5a7, 0x8197bced, 0xd83af51f, 0xaca69525, 0x7ef8ff60,
- 0xe7df0866, 0x841f3839, 0xeabbe7d3, 0x0f83c027, 0x0d43f0f1, 0xc75009ef,
- 0x04982d7b, 0xfde617eb, 0x6b5c50cc, 0x56e04232, 0x104e3eb8, 0x5d8f5c4d,
- 0x07e22e46, 0x58f7ed31, 0x41cf24ec, 0xe5bb707c, 0xe7168701, 0x3f29eed5,
- 0xa1390268, 0xcf3e367d, 0xd4c95dee, 0xddad6cef, 0x6f2f4bdf, 0x7496f2d6,
- 0xb5e2b17b, 0xf846cdd4, 0x1cbb7f79, 0x47792f1c, 0x8ed063dd, 0x5c51ee19,
- 0xb5e41fe8, 0xf98fdccf, 0xf07bd924, 0x7c820407, 0xe5c2dd9e, 0xc34bb814,
- 0x43db9bcb, 0xd874adfb, 0xbbfb3975, 0x28fb23cc, 0xcba457e7, 0x5a72217d,
- 0xbe3850e3, 0xc4167e60, 0xa3d464a7, 0x83f7f97a, 0xcf1fb3c3, 0x3e039525,
- 0x78eb6d50, 0xbf996d6b, 0x7c7286d2, 0x8e50b994, 0x9be65bbb, 0x315d2146,
- 0xa219cc11, 0x79f3d61f, 0xf02876e8, 0xe5f20b65, 0x1a10c73a, 0x7751f86f,
- 0x1fbc0e74, 0xc613768e, 0x7eec5b7d, 0x0efc23f4, 0xe2072eef, 0x14ef5673,
- 0x8cad4fdc, 0x85b5aeae, 0xdf6bef3c, 0x8f89ed06, 0x1fee22bd, 0xf8c03f95,
- 0x7cdaf1bd, 0x02cfa9a0, 0x20c855e3, 0x0f5459a7, 0x655ade20, 0x3ef7bede,
- 0x5611a5fa, 0xf97bf229, 0x95befd60, 0x536df3f8, 0xfd17d21c, 0xcb93b424,
- 0xee1a4f45, 0x16c1b27b, 0xe003ae17, 0x1fbf1077, 0x06c931b4, 0x4a1efffd,
- 0xc4d3d3d4, 0x6b8e74a5, 0xfb40965a, 0x8f7fba44, 0xa4fd0b7a, 0x4d17bf3b,
- 0x3c2ec8fc, 0xd3e30c38, 0x4e9cd109, 0xeb558fd0, 0x1b1d6ac7, 0x48ffbf94,
- 0x8efe491f, 0x141dbebe, 0xe0e7ea04, 0xd735d74d, 0xd841f885, 0x83ffea1f,
- 0xa5b48cc2, 0x1f9affc8, 0x7c61f647, 0x7b45dd70, 0xce21ac65, 0xebe0d3e6,
- 0x0a3e4748, 0x3ed147b5, 0x7d67b3ed, 0x0cdf57f3, 0xf4e2767a, 0x715e0096,
- 0x6e8acb5f, 0x6fe817fe, 0x36b3d81a, 0x8d3ec3c0, 0xf8660e2f, 0x3d4fdc44,
- 0xfba05c38, 0x79450fdc, 0xe095aa73, 0x5c5a8938, 0x33cb5120, 0xe5e28c4f,
- 0x1eb8620f, 0xaf863cfc, 0xbca837d7, 0x1c88e89e, 0xbdad9847, 0xeb89a9ff,
- 0x91f935f7, 0x684fd1f3, 0xedaaf5fa, 0xfc211cde, 0x8e13eebe, 0xdd7dda09,
- 0xe3c76f94, 0xfe340bfd, 0xe8873b06, 0xf8abb414, 0x876e9d7e, 0x4d9f6d9e,
- 0xf78213f5, 0x2c047bbd, 0x9f904f3d, 0xf148aef4, 0x6f58e2a1, 0x55a5e622,
- 0xcb8e4544, 0xf4f0d20c, 0x38a30d59, 0xcfdc29ef, 0xb7bfe6cb, 0x1b25f585,
- 0x1d7ba49e, 0xe29f994a, 0x1a1b45bd, 0xc8aebf68, 0x80a7302f, 0xc78799fe,
- 0x943a3581, 0x710d116e, 0xb552c5e7, 0xe4139b1c, 0xae1dfc5b, 0x9b25f7d0,
- 0x57ea19c6, 0x1816976f, 0x7a823bf2, 0xbfec90a7, 0x3e757ef0, 0x8f4fb70f,
- 0xf10d38d7, 0x861b05ee, 0x78cdf217, 0x73217ede, 0x36177cd1, 0x11ef1966,
- 0xb2b84c6e, 0x1ec1827b, 0x538445f5, 0x3fa8fd26, 0xf7f80e3f, 0x17f7c485,
- 0xe08c3a7c, 0x7bc38bf8, 0x8e018c5d, 0xc433e668, 0xdff326cf, 0xf8f1b429,
- 0xef8ff023, 0x7e407652, 0x07f577cc, 0x1fc01dae, 0xfa87fdc8, 0xb9937903,
- 0x024d65fe, 0x9c4ce0e5, 0x66cfb46a, 0x6e75f0c7, 0xc1b44c76, 0x679a2b3e,
- 0x6b57376c, 0x776b5737, 0xa84a38b9, 0xa164265d, 0xe9fde33e, 0x71951fbf,
- 0xffca6dfa, 0xedc310dc, 0x6eaa9fd7, 0xbbf40c6f, 0xd0773b56, 0xefdade6f,
- 0xf735ad4a, 0x5e5285e5, 0x1e6fe6e0, 0x09dfc3ac, 0xf601dde9, 0xfcc1e83f,
- 0x7ba0df67, 0x6eeff4ba, 0xb432d15f, 0xa954f008, 0x8bb973c9, 0x15dcafcf,
- 0x3f418790, 0xae1ce529, 0x827d96b4, 0x5a5df214, 0x22b76f09, 0x2ff06cc6,
- 0xffd5a27f, 0x6549c635, 0x1840495a, 0x3652ed06, 0xec24116d, 0x0b577f0f,
- 0x66a949e0, 0x93dfd0fe, 0xedcf194a, 0x6fc7db9a, 0x04e61616, 0x9a68d0f1,
- 0x5c62a391, 0x85da3135, 0xaa47cceb, 0x50f10eb7, 0xce8051fe, 0xa8fc4c49,
- 0xbdbb425d, 0x23d7755b, 0x7078eb7f, 0xae0a6a8a, 0x711fd1a3, 0xfcfa9b38,
- 0x30bebe62, 0x4766a8f6, 0xe976eb37, 0x42edc661, 0x5712a75a, 0x3efc308e,
- 0xb69c6261, 0xef32244e, 0x37da854e, 0xa327a232, 0xccae24f9, 0x2c76e66e,
- 0x2cb4f7a7, 0x3e7c16cf, 0xb1c8f06d, 0xd647df78, 0xbf8480da, 0x26dafe3b,
- 0x51fbc453, 0xc3fc359a, 0xf9c59e3c, 0x9cb8f3e9, 0x99dbd05e, 0xe84f8807,
- 0x3d3d10f2, 0xd8c1dfb2, 0xff3ef4e3, 0xe97bf8d9, 0x997acafe, 0xc74e4f7e,
- 0x69eab77f, 0xb4a6bc41, 0x3708166c, 0xdc7a78da, 0x5ca39f0e, 0xe78d971e,
- 0xfb94073a, 0x16873b15, 0xbd7dca85, 0x1cd9f44c, 0xdbafe796, 0xe1887ff7,
- 0x5890eaeb, 0x4bc43ecf, 0x8a3adb65, 0xc2d92cef, 0x4067f77f, 0xfe215cfc,
- 0x07ee1284, 0xfbe1cf94, 0x4acff95e, 0x7ab2ff44, 0xe5acfbfe, 0xb8f077db,
- 0x0b998fce, 0xcb7943f5, 0x0728a10e, 0x8773077f, 0x9f0428b0, 0xca7ee5a9,
- 0xaec6bf83, 0x5bd71fcb, 0x718efce1, 0x0e53b462, 0xe87eb8d9, 0xb87c57cc,
- 0xf669dc1f, 0xcebb6396, 0xe61768e4, 0xfff8e977, 0xe99b8efd, 0xa77db5dc,
- 0xa3658025, 0x8ac939a9, 0xcad70efb, 0x6eabe42e, 0xe9127bd5, 0x5a792713,
- 0x78d8fbf1, 0xbe16abbb, 0x58d85a75, 0x3c72b955, 0x8fe30c4c, 0x72e63f89,
- 0xf43aa493, 0x24b186bc, 0xeb9daa37, 0x67d7132d, 0x1e7ccc87, 0xf787193b,
- 0xb3ee3107, 0xe3878724, 0xf294dc68, 0xca1f9d00, 0x70d3bd38, 0x5fc830ce,
- 0x1b7dbfee, 0x2c3fff7e, 0x00284ba1, 0x0000284b, 0x00088b1f, 0x00000000,
- 0x7dd5ff00, 0xc5547c7b, 0xbddcf8f5, 0x0dd90afb, 0xdc3bf79b, 0x24280c10,
- 0x01049e6c, 0x878424d9, 0x28026e20, 0x47796bc8, 0xd2026c92, 0x6dfad0fe,
- 0xe5318316, 0x16d46b6b, 0x882ea945, 0x835ab696, 0xb80d06a2, 0x47d62228,
- 0x2d8aa523, 0x2220a5da, 0xfb1b6484, 0x6fcb56c0, 0xec9999ce, 0x0f0d9bde,
- 0x7cf9fb6b, 0xdcc98fe1, 0xe6739f79, 0x9ce7339c, 0xb5331d99, 0x228ca8cd,
- 0x389aa6a4, 0xfd2d34bc, 0x84e90ee9, 0xf00b9085, 0x8321226f, 0xf121326c,
- 0xbd3ca484, 0x582c524d, 0x1c5a7fbe, 0x052627d6, 0xaed84bbe, 0xa0af9686,
- 0x84225ba9, 0xa0e4258c, 0x31cc7881, 0x2ae8b484, 0xfd68d947, 0x7b488496,
- 0x663b2d13, 0x68db4573, 0x9d63967f, 0xeab4ac07, 0xa33e6398, 0xda4bf68b,
- 0x4264c7e9, 0x121230de, 0x8c1ddb41, 0x6dd3ed60, 0x42229074, 0x07891b26,
- 0x233d1bbf, 0xc1dfda14, 0xbfb083a1, 0xa6eab797, 0xc37b697a, 0x19d8b220,
- 0x9b74ef32, 0x6ca5db0e, 0xe8e921dd, 0x5a48b8f7, 0x513f321e, 0x595b01ef,
- 0xcc67cc26, 0x43844ed4, 0xae3d56dd, 0x1ce8c2a7, 0xb05466b6, 0x8fe868de,
- 0xe6ed7bd6, 0xfa7e8c4f, 0xbd2fc7fd, 0xbf50246f, 0xc1b47255, 0x37df5bfc,
- 0xa9a1c9ce, 0x4932e7e7, 0x613a6420, 0xf0bf36ff, 0xdfa151be, 0x70a6efa7,
- 0x76eaf5a2, 0xa32fd2ef, 0xc5a95769, 0x24268d23, 0x8b6bccf3, 0xdefd2148,
- 0x74112266, 0x5736dd6e, 0xfbce8d91, 0x0e535dcd, 0x58845149, 0x20f9339f,
- 0xc421f015, 0x06b4e19f, 0x3a5225e7, 0x722fce30, 0x10b3fd2a, 0x55ef46b2,
- 0x95ddf099, 0xb7eb4559, 0xccf830c6, 0x42e0cc1f, 0x157bec2c, 0x0b80975f,
- 0x8c0a7f1d, 0xdce5915f, 0x2454fd01, 0x329bd846, 0xaff99e61, 0xebdc2999,
- 0x823d92ec, 0x672b8d56, 0x579d0cf0, 0x79b84e65, 0xe2f1c06d, 0x44f8d963,
- 0x7180cfef, 0xefb2f109, 0xf9216932, 0x3873f6c1, 0x1f77e9be, 0x3a864efb,
- 0xd7f38273, 0x4d836fe2, 0xe861fac2, 0xa3ac116c, 0xc7cf98f6, 0x0f53ace8,
- 0xe6799674, 0xd043e230, 0xeab6cfcd, 0xd5fd1531, 0x5fd88cd9, 0x708f4d9d,
- 0x1a780cd9, 0x2c03534c, 0xcc1ba69a, 0xc7850f5e, 0x1f26f39b, 0x7e297292,
- 0x20fa316e, 0xb4b7437d, 0x48dfca16, 0xe8b7e361, 0xdf216b74, 0x34859772,
- 0x6e1d5e21, 0x376bcf98, 0x6a7e2147, 0x26bd1ae7, 0x9f8fcfda, 0x63de5897,
- 0x67fa12f1, 0x72f66bad, 0x22e24768, 0xfe024fec, 0x738c075c, 0x4ae9fdac,
- 0x51bf6e79, 0xb7a7dfa1, 0xf027fdb1, 0xe8c6db50, 0x0107ec4b, 0xbd3c20d7,
- 0xf024fdaa, 0x8db07f31, 0x17a505d1, 0xcecd7780, 0x3349db08, 0x0102eb1b,
- 0xe5568cfa, 0x6d93dbeb, 0xf3044727, 0xd007f035, 0x81620ec1, 0x79f2d679,
- 0xeeb9d3ce, 0x6755fd83, 0x50341ff6, 0x0d1f16d4, 0x881f7ee0, 0xffd66b7e,
- 0x0164fb3a, 0x0e93ab21, 0xadf62a61, 0x7f7aa861, 0x6f78e56f, 0x7bb69482,
- 0x4d4dd382, 0xcbee1b61, 0xb80d939a, 0x19532d9f, 0x231cb35f, 0x133c508e,
- 0xdebe43f2, 0x457db17b, 0x9964db64, 0x71e2bdc2, 0xfa44d6c9, 0xaba44fcf,
- 0xa36ce224, 0xf5b67dfd, 0x0025a8f5, 0xc82eafdf, 0x37ae98a4, 0x3777ad82,
- 0x3b5d3be7, 0xde91c029, 0xcc248c1b, 0x3fb72f7a, 0x01223be2, 0xec386b3c,
- 0x1f2e9ebb, 0x17a529fb, 0xf6c1cecf, 0xcf92e8ab, 0xf6eb3d3e, 0xbbbce94f,
- 0xcbb44c76, 0x4e4d3640, 0x7c409fa4, 0xa53b7d84, 0x4c99389f, 0x37de2895,
- 0x351ebdb4, 0xfcfbb68e, 0x81f3a397, 0xf7cdbf6f, 0xe5e799fd, 0xbb9700f5,
- 0x75ecf67e, 0x95e35e50, 0xce304d62, 0x95ffc7ce, 0xdea9fb42, 0x26100f51,
- 0xd57cbf4d, 0x74f5a7e8, 0xfbec6dde, 0xa836c1ce, 0xf713f9f7, 0x6ef0075f,
- 0xf2c26b6a, 0xc36c4f33, 0x7b3f6bfc, 0x683fdf76, 0x94675abd, 0x799dea1d,
- 0xa5e23f7e, 0xc077a5d6, 0xeba2077a, 0x33f6bbcd, 0xe126d97e, 0x1973237e,
- 0x2d74131f, 0x9ff3f7e8, 0x2a1b1e2d, 0x16f7c437, 0x833b9723, 0xcb391c98,
- 0x994d6ff7, 0xb3d205a5, 0x85cc44cf, 0xfe8dbaf5, 0xcf9868ec, 0x1f174628,
- 0xb6d47871, 0x2db831ad, 0xf9fb1ed8, 0x487bee80, 0x9e7d2873, 0x6b4ef4a2,
- 0x0050b8ed, 0xeb0afde3, 0x11257ad3, 0x2f37be14, 0x7cc12e38, 0x18354722,
- 0xbf9e706a, 0xd574e564, 0xebc5e5a1, 0xe96a3d18, 0x059b0be0, 0x907d8beb,
- 0x4d32bbb2, 0xb2603e41, 0xd6ce3e33, 0x6aed5297, 0xd7efd2b2, 0x65d973af,
- 0x4f97c78a, 0x4a9c9e1f, 0x211b3ff3, 0x65fe2015, 0x8f39c989, 0x5ba9c705,
- 0xd04e465f, 0xd9a2eaf9, 0x11b0497e, 0x07c8b5fd, 0xd7ba31b6, 0xf205bd13,
- 0x89f200fa, 0x072e16ba, 0x1279b077, 0x96632073, 0xae59db15, 0x83ce098d,
- 0x3058b3b6, 0x29b8c81f, 0x678ee407, 0xc51dca2e, 0x233df388, 0xe4f101f8,
- 0xee09cc2e, 0x711d04b7, 0x8c13fe01, 0x7e30b534, 0x4ffb4953, 0x8a4d2e8f,
- 0x09a60a2e, 0x16b95883, 0xf2f0b74e, 0x923b451f, 0x3b5e01a2, 0x60f25563,
- 0xdaf2be20, 0x6e4cddcd, 0xe91c72bf, 0x08740dd0, 0xaa4ebbe3, 0x6472f6e4,
- 0xf70e94ae, 0x5c3a471c, 0xabf8cede, 0xb8d4bdbb, 0x259ba68c, 0xefe61732,
- 0x7404f4e6, 0x9bf9a764, 0x7d71a3a2, 0xd9fcebee, 0x39baa7c0, 0x61ef75dd,
- 0x4e86f6f4, 0xbcf801e7, 0x36e47db1, 0xf95a0790, 0x113b5528, 0x6f8a6edf,
- 0xd67a0493, 0x67a0c3ba, 0x3b1355d5, 0x6ef757ec, 0x081a1fbe, 0x7b7707ee,
- 0x2fe872e5, 0x5d9a6e35, 0x992f5096, 0xeef34a9c, 0xfb04525a, 0xd96b652d,
- 0xd20ba01e, 0x20ab912e, 0x0ddced5f, 0xab8fafed, 0x2099cbb3, 0x9927b6f7,
- 0x47ebfd69, 0xcdfb6314, 0xf4294ae9, 0xb620a9e7, 0x706f2127, 0x9cdb3ca1,
- 0xf20ed23c, 0x88daedce, 0x0651177a, 0xb89ca1d9, 0xbea16bf4, 0x6c80b3db,
- 0xe3ecc292, 0xcb03923d, 0x08de91bd, 0x3b7a7eda, 0xeddc7fd3, 0x139f1f6c,
- 0xa1a911f0, 0xd78c1173, 0x788982fd, 0xbd9d20a4, 0x930a67b9, 0x509a2fb3,
- 0x746d9ece, 0x34541390, 0x10568c8f, 0x358e02af, 0xa03a99ad, 0x9966425b,
- 0xf8cb1e60, 0x4c0d5a3c, 0x3973446e, 0x9b2f7590, 0x1fbe72c7, 0x78eb4796,
- 0xaffbe46a, 0x9b05c995, 0xe0e41727, 0x921e3e39, 0x73970245, 0xe5bae874,
- 0xb527dc3e, 0xe2feb34f, 0xdbbc8e4c, 0xe855e842, 0x6c7a5a47, 0x6ca6e3e2,
- 0x3301203d, 0xa55ad949, 0x57d7e7da, 0x5d3d5f13, 0x755bf5e7, 0xa8b989be,
- 0xfc82dd37, 0x6c91837c, 0x77f7fa97, 0x5be3344f, 0x3cceb115, 0xcdd7e409,
- 0xfa3fd416, 0x8720e9f6, 0xb8f4022e, 0x84dec97c, 0xea1670ab, 0x09d3bd68,
- 0xede03b56, 0x5a8e1c47, 0xbdafab7d, 0xd19c0f46, 0x4325c9ea, 0x4186e969,
- 0x549e0173, 0xa198b75f, 0xa7b68d9d, 0x8fb612f5, 0xf213627b, 0xf7ec26ab,
- 0xeb1b68f6, 0xd859d627, 0xcb2e2c00, 0x27c98eb9, 0x7adc3dab, 0x9e85a3d2,
- 0x074f28bd, 0x82db33f4, 0xafc7e1fb, 0xbd194b48, 0x01284151, 0x41fdb23d,
- 0x5278fee8, 0x3d447a41, 0xf4e0ddca, 0xb647a786, 0xaf54dc5f, 0xd29b3d02,
- 0x27a65ae3, 0x4fdb085b, 0xa7232e8c, 0x7c007932, 0x159f542b, 0xefbb54fb,
- 0xedf6fd05, 0xefdccbfb, 0x1fb606dd, 0x801c29bb, 0xfce8fbde, 0x9be74665,
- 0x48fdd036, 0xfdd137cb, 0xffc214d8, 0x981fe7b5, 0x04079c27, 0x73663dbf,
- 0x745f0076, 0x19ab7f6f, 0x47f396c9, 0xc83fcbf7, 0x65de98be, 0x5fb4ca86,
- 0xa1afdd33, 0xe0a663f4, 0xea63e6b6, 0xb9113901, 0x8947ad5e, 0x266883f4,
- 0xdeb1bebf, 0xa3d24238, 0x55f1c3de, 0x007d73ab, 0x46f41e7c, 0x6324114a,
- 0x131bd33d, 0xde00e640, 0x417a47b6, 0x20484e3f, 0xccc2e7ae, 0x67df3f67,
- 0xfce29f02, 0xeac51090, 0xff7df2f7, 0x4ff7af29, 0x4b9c869e, 0x3a283f70,
- 0x0f2271d7, 0x3b448f2c, 0x172c2f3a, 0xd73be9f3, 0x137cd998, 0xbedecaee,
- 0xc18f0429, 0x3e75757d, 0x5eeb37e0, 0x25a97e9f, 0x8b908e38, 0xf981ba5a,
- 0xf581fc83, 0xb81a8e54, 0x7e7eeb5e, 0x71d0d33e, 0x7b5fdf04, 0xb482b8a8,
- 0x2bfbe0d5, 0x5635c7ee, 0x2522fa02, 0x9d2c4707, 0xe1befd57, 0x088d1f6c,
- 0xeafb4364, 0x314dd758, 0x9331dfb4, 0xdfa0fef8, 0xa09ce0ab, 0x0c531e27,
- 0xf5e0dde0, 0xd3f9e087, 0x8f7fd618, 0xe991ecd5, 0xd623b8fe, 0x3ded0d15,
- 0x03eec465, 0xe5077339, 0x896fb027, 0x4df808af, 0x0147f13d, 0x5d58c79c,
- 0xbf4031f1, 0xc9366772, 0xf257fca2, 0x63f7c2e7, 0x84b95111, 0xfb71f3f6,
- 0x85799f6f, 0xcaf85ab6, 0x03df85b9, 0x22317afa, 0x18e2e803, 0x513b97d5,
- 0x830656df, 0x944bddbe, 0xfc30b6c1, 0xcc0ba457, 0x20beb33a, 0x55663f98,
- 0x7a442f9f, 0x6fc30c4d, 0xe214f9fd, 0x09fe8589, 0x627bbf9e, 0x1f5df614,
- 0x56ea8230, 0x78833e7f, 0xeb1f7f68, 0x985b7548, 0x30c53771, 0xc84ddb7a,
- 0xf7e570d4, 0x0f7671f1, 0xb030fb65, 0x7f02f24d, 0xe1bf5eac, 0x07e532cf,
- 0x3eacebd5, 0xe831694c, 0x8f36d56f, 0xc7f3474f, 0xd11f8c0c, 0xc5cdb37f,
- 0xeb47b941, 0x0e03c7e9, 0x4b20f43c, 0x2e52e0f9, 0xbc3596b7, 0x13d825f9,
- 0x3cc4f5aa, 0xdcb3f69e, 0x4a983cec, 0x2cb33e8b, 0xbfb6028f, 0x25b73bf2,
- 0x5c4a5c80, 0x32eccad0, 0xf40d9264, 0x4331c95e, 0x317910be, 0xfa8f4b3d,
- 0xe29fe231, 0xe7188beb, 0x82f7c04c, 0x80f504c1, 0x7a45b705, 0xd07c213d,
- 0xa5a1e1cc, 0x7eb84f9b, 0x767feda5, 0xfe81196c, 0x739ca23a, 0xdc163e81,
- 0xcfc54e76, 0x53ff25ba, 0xad5d028f, 0x649fdab1, 0x433865dd, 0x623bfee8,
- 0xc43af3bc, 0x675e4f53, 0xa04cfaf6, 0x87c640df, 0x30eacf60, 0x940a3cd9,
- 0xa2cf111b, 0x291a9fdd, 0xe60ecbe3, 0x361e9e97, 0x196be819, 0xde0337b1,
- 0x1244b597, 0x033f084f, 0xc3e81805, 0x3f609e7d, 0x3cde3b4b, 0xaddcfc0a,
- 0x2c9d23f7, 0x8f105b35, 0x9c7af3ee, 0x74316907, 0x8a7b45f9, 0x8e5123fb,
- 0x9e7d60db, 0x9f47c67b, 0x263f491a, 0x7eb8efd2, 0xfdf0edd7, 0xe228c8a1,
- 0x4b78fa00, 0xf3a70ef6, 0x832b35ef, 0xeabc2863, 0x460e948d, 0x734e3763,
- 0xd243fe88, 0x77fc6aac, 0xcc25f5bc, 0x65d9b967, 0xc2be3904, 0x32a7cf41,
- 0x6457c9e0, 0xef8a151b, 0x9185f2f1, 0x45ef8f97, 0x6c7fbf7c, 0x661ff4a4,
- 0xe1c7dd1f, 0xa5ea23df, 0x71f27db0, 0x0e7ea906, 0x4a686bd2, 0x943c7ddb,
- 0x3e79f3e7, 0x604bd79b, 0x9f7c1df9, 0xeacb9c7c, 0xc5f1c769, 0x3b36fe30,
- 0xe015b1d6, 0x8cf9db45, 0x22fb1740, 0x16b7a8bc, 0xfc077e52, 0xea3a6d6d,
- 0xedaf94ad, 0xc4d97ac0, 0x4cf58d17, 0x03485728, 0xcfb07be5, 0xcf84148b,
- 0x274a52a6, 0xd7b03cb4, 0x04aedb64, 0xa9f02af1, 0x8c7b63c5, 0xc3c9eff4,
- 0xd43e9251, 0x61e5428e, 0xa3b30c7b, 0xd27f6118, 0x9e991899, 0x85f10c36,
- 0x3e5f2274, 0x608c6ebe, 0xf4fba078, 0x7bd418b5, 0xfba1397d, 0x97c704e5,
- 0x0f1f67e0, 0x8ab5e352, 0x5e6de1e3, 0x8d8c516f, 0x007b4fc9, 0x31c7ddf7,
- 0x165c7eac, 0x0f424393, 0xcb8703ff, 0xc8549a4d, 0xd5533195, 0x1c4dc5fa,
- 0x44c5379f, 0x087ebc09, 0x88f215f3, 0x345f17f2, 0x5e0b0fdd, 0x217f2135,
- 0xb0d9031b, 0x63bd68ff, 0x2c0a6e58, 0xe472a58a, 0x18dfaa26, 0x22ddb1f3,
- 0x3cefdf68, 0x85d04a3f, 0x3e9c8095, 0x1853d625, 0xe54f7e40, 0x296bae1d,
- 0xba437ca1, 0xd037fe34, 0xd694c93a, 0xd8560c87, 0x03a64ef9, 0x208fed02,
- 0xe147f40a, 0xb850e7fe, 0xc63bc76b, 0x319fe0e9, 0x96f11e92, 0x9f70f247,
- 0x7585ffbd, 0x9cf2ed21, 0x51d1eccd, 0xceb8ffbe, 0xe9fa0175, 0xd42dfdba,
- 0x7d198fcb, 0x3096add9, 0x63df46e5, 0xf2c1490f, 0x3d973fc5, 0x762fca46,
- 0xe4fd7677, 0xa5eeba66, 0x861db29d, 0x5fe77a5c, 0x7a031ddf, 0x0ec1a775,
- 0xdf2a46e7, 0xbcc3d5ef, 0x791e981b, 0x83a6a74c, 0x9f55dfb3, 0x1962d273,
- 0x83dea8be, 0x3be09cfa, 0x4adf7e42, 0x5c81577c, 0x461c465f, 0x66b4ff48,
- 0xe1420cd5, 0x0eb2c2b7, 0x6be7d1f9, 0xbc1ea1a7, 0x7cb07892, 0x4fe18b59,
- 0x4561f2a1, 0xeabf3aab, 0x56fe7561, 0x67df3aaf, 0x2f0f2e7f, 0x32c7a7df,
- 0xd94b3e3a, 0xb03fe03e, 0x3d05e4c1, 0x98af5b48, 0x3ca4cbd7, 0x24f813b1,
- 0x4760fb95, 0x48e11758, 0x6ebebe04, 0xc5e2696f, 0xe7f57cec, 0x10de21af,
- 0x37a8237d, 0x6c6f12e4, 0x63cb7eff, 0xf4238415, 0xe071f0ab, 0x2cb671bc,
- 0xc6263afc, 0x10653d0a, 0x8bbe27f6, 0x45e3827e, 0xcfea3ce1, 0x8f98079b,
- 0x5d9fdb05, 0x1b9c7e19, 0xa9b1c6fa, 0x7db064e4, 0xfc6b931c, 0xa3e82d1c,
- 0x7e127cfe, 0x07a17917, 0x31a5db07, 0xd9a48afd, 0xf906454e, 0x8b63b094,
- 0x224270fd, 0x3bb464e6, 0xebcfcfdd, 0x5fb05cf6, 0x083d009f, 0x6514e3f6,
- 0x714e9f9f, 0x66c6f7d9, 0xf70687eb, 0x941d768b, 0x43f8ceae, 0x58b2eef8,
- 0x6b8e1c6b, 0x57187627, 0xdba2fbd0, 0xe8bb062b, 0x6778ff7c, 0x546ba279,
- 0x30f53f28, 0xe85189bf, 0x9c95165f, 0xbc391a25, 0xd42dfdac, 0x95bea8bb,
- 0xd3e74c0d, 0xbd23b7bd, 0x04ce24af, 0xbd9ef3a0, 0xff5c33c3, 0xf315fc86,
- 0x6259e599, 0x9ba79820, 0x2abfee98, 0x1d599f3e, 0x99d3ef4c, 0xc71c061d,
- 0x025b1441, 0x7cd9a51e, 0xd3d4f329, 0xf9be84fc, 0x908e9183, 0x12fe7cf1,
- 0x9df0a7b4, 0xd4b253c0, 0x36f30495, 0xbc74b8c1, 0x824de208, 0x5dac69b4,
- 0xe4a27481, 0x3079b3d4, 0x0d264f3c, 0x675f5069, 0x9d6ccbf6, 0xd0090674,
- 0x985e0fbe, 0xc6fcf2b7, 0x568bbdd9, 0x158b77c0, 0xcf9188f9, 0xe52c4763,
- 0x2fe46687, 0xc9b75866, 0xfd1cfbe2, 0x94f53c64, 0x3d8fb829, 0x3bda0943,
- 0x02369106, 0xe72c5ae3, 0x1b58b03c, 0x8f9049b1, 0xe0faf5b0, 0x66ce70fc,
- 0x2346e8f1, 0x0b19dc9f, 0x677a527c, 0xf285e025, 0x19ff6665, 0xc3ef5ca8,
- 0x73e0b773, 0x4d7e7c31, 0x1325cf9c, 0xe2588706, 0xb01ef1c0, 0x40419cae,
- 0x49d65984, 0xd44b7ed0, 0x25eff454, 0x7969e000, 0x8abd54ec, 0xd53bc56f,
- 0xeab9c4f9, 0x9d4bb27c, 0x3f93d337, 0xcf928be8, 0x16217499, 0xc02359cf,
- 0xfce062de, 0xebcf99a2, 0x3d74a408, 0xfd3e71ef, 0xbb13f58d, 0x8eeebcfa,
- 0xe3b41231, 0xd0cd9825, 0xde57d53f, 0xcafa658b, 0x97281e27, 0xf3cfc803,
- 0xc0f9b626, 0x837cd3ae, 0x7e8bbf64, 0x1b2ab66f, 0x3d5494f4, 0xc76624d3,
- 0xf54adbd1, 0x865b7cb4, 0xe689becd, 0x1d7ec1eb, 0xd5b7f30b, 0x49f68d3c,
- 0x0247ab13, 0xdc8fe0cd, 0x8366c9b6, 0x81e5717a, 0x8bd046c9, 0x76db1057,
- 0xbfbe8612, 0xce86fba2, 0x53c809f7, 0x5e9c48bc, 0xf3290f22, 0x7cbc0d71,
- 0xc59dfc07, 0xe18b13f2, 0x65feca5c, 0x3a75e475, 0x90e0eec0, 0x77d62e51,
- 0xa16ebde4, 0xcecd65e3, 0xb3bc3b43, 0x0973f99c, 0x3463f7df, 0x905c6afb,
- 0x8f3cb1ce, 0x707587e7, 0x899c6ebb, 0x746d6bf9, 0x36666288, 0x8c71fac0,
- 0xfc44edfe, 0x5af2fb63, 0xfd07fc12, 0x47fb0795, 0x5829343e, 0x7467dc1c,
- 0xbdb37ca8, 0x1bd696a9, 0x87edf7a3, 0xa78e3c7d, 0xa37d3c79, 0xbf9406f4,
- 0x628b3a9d, 0x6d7db1d4, 0x861302ce, 0x14b80653, 0x04d07edb, 0x7c904cfd,
- 0xbd4c6698, 0x5fa609bf, 0xd147edc5, 0x06ef4649, 0x12a68eba, 0x77b4678a,
- 0xd820f9fc, 0x4907e5a5, 0x88e9e001, 0x0739c841, 0x7a15d39e, 0x3a1afb1c,
- 0xfd406c98, 0x2a932ffc, 0xd5013bf0, 0xd2bbce8c, 0xbe2116c6, 0x755f2c0f,
- 0xe36a3f28, 0xb337e464, 0x6abf32a6, 0x57b35cfd, 0x10ce4092, 0xbbd277e6,
- 0xb10a6f32, 0xc43f0897, 0x773206fb, 0x44f39857, 0x4fd31c6d, 0x3b3066ab,
- 0xa346fd6f, 0x1d537fca, 0x335eecc7, 0x8c8f26e3, 0xfcc47943, 0x5c7179a7,
- 0x46411b6f, 0xe10a9f00, 0xaf504523, 0x81edf4ab, 0x9cba8cf8, 0x8e3999f3,
- 0xfdea0302, 0xbe08df9c, 0xf6c41cee, 0xd2423cf3, 0x9367ae81, 0x87ffe406,
- 0x7e456f4b, 0x18f11373, 0x951f6f00, 0x310278b1, 0xd55359ef, 0xa71cd27a,
- 0x5cf37f3a, 0xf0c51ead, 0xe79dc621, 0x09579752, 0x5aaa783d, 0xfff05e0f,
- 0xb9468abe, 0xdd54f89a, 0xe7e02185, 0xc000f660, 0xe873f30b, 0x1bdc6e91,
- 0xbebc3f99, 0x388def3d, 0x127767d8, 0x4976cfbe, 0xb2ffd1cb, 0x82115a4b,
- 0xaf5ad4ff, 0xb8647204, 0x987dd855, 0x7934e4de, 0xc3df83f7, 0x91128359,
- 0x49e3cebc, 0x493c400e, 0xadbe907e, 0x57165665, 0x30b51c41, 0xa45b349f,
- 0x16fef41d, 0x65da3e5d, 0x2aaca25b, 0x0da6fa3b, 0x13d4054a, 0x44c558f6,
- 0x6e9c6df2, 0xd7779dc2, 0xbafe31c5, 0x71766259, 0x9e333ccf, 0x9cb3e32b,
- 0xbc413f2a, 0x9d828e4c, 0x229c6470, 0x63fda3ea, 0x5c95c1b3, 0x44af5340,
- 0xd2171197, 0xd11b265e, 0x4fe1a8ae, 0xb476b1f1, 0xce0fc7eb, 0x9cfd3b41,
- 0x2fb43c8e, 0xfd844b12, 0xffd8292a, 0xf4dbd99c, 0xdf53a710, 0xdcbf4db1,
- 0x7bbba412, 0x8d5cb8e2, 0xaf409124, 0xe7ba767d, 0x7ba7684c, 0x5ffce75c,
- 0xa35acba0, 0x2171ed0f, 0x07df8af4, 0x38ab8b92, 0x9cca18bd, 0xf9d056f3,
- 0xb46bbcf5, 0x1d19f9c3, 0xe7ffb46d, 0x9da344f7, 0x50455f51, 0x652c6d3e,
- 0xec07a47c, 0x9f1d745f, 0xf9e35745, 0x8ad4a360, 0x663ba3f2, 0x6af1d232,
- 0x073c01cb, 0x74a56f57, 0x569f2218, 0x6be750ef, 0xdf9e2748, 0x47ee9f6b,
- 0x0517c8cf, 0x56ef761f, 0xd4c323b7, 0xcb8f377c, 0x7da77c8b, 0x7d4c0556,
- 0x9d8b3dae, 0x8641e9c3, 0x1de8a3ae, 0x2eefb723, 0x008a9db0, 0x7494fb56,
- 0xc7adfd31, 0xfb665a7a, 0x2283c99f, 0x6e568e3e, 0xedbd7115, 0xe0bfca3a,
- 0xb0f42afc, 0x07f2c222, 0x92721d74, 0x42df382e, 0x7fe84ede, 0x7c084e42,
- 0x52109695, 0xa743f742, 0xc9434fe0, 0xed37c050, 0x0489f71f, 0xd8314391,
- 0x7dce07bf, 0x9478f8e3, 0xb7203c1e, 0x17b33a3d, 0x6a59abe8, 0x06693940,
- 0x9ba69f3d, 0x83ca1724, 0x323daa97, 0x692c7bc0, 0xd6833598, 0x8cee6f1b,
- 0x3a513804, 0x3f4a1239, 0xb197c44d, 0x11d1524b, 0xbf457796, 0x848e961d,
- 0xc47df33c, 0x74e998f4, 0x054fd0c5, 0x7a687e38, 0xc8e76240, 0xeb1f9629,
- 0xbadbfda1, 0x53274869, 0xf30f5789, 0xf1a9b0ab, 0x9d49253f, 0xa3f4a69f,
- 0x70c38c0f, 0x1f4e7870, 0x08772ea1, 0xfcd4477e, 0x857c932e, 0xc1f9187a,
- 0x5d80efc1, 0x193d7221, 0xd50dfa01, 0x6a1f7144, 0xb8ebe0e9, 0xf26fdd6f,
- 0xbf185c75, 0x13b70f49, 0x2bfc4b5f, 0xd3fa969f, 0xeb1bf759, 0x7a10a4e5,
- 0x65e2fb14, 0x907f8b03, 0xa9fe655e, 0xe5193312, 0x642afa03, 0x3fb14576,
- 0x8cfa05ae, 0x315dd209, 0xcf119eff, 0x19086ce9, 0xa1367402, 0x5327c23d,
- 0xfef1e4bc, 0xcbba05ae, 0xe223ea0f, 0xd254d15c, 0xa532be8f, 0x705fd42c,
- 0xa47d09df, 0xd26b6a40, 0xb91f0267, 0x1be609a7, 0xe3434f42, 0x512f808b,
- 0xda7dc27a, 0xcbaace4f, 0xf026f435, 0x52e5f42e, 0x367480d2, 0xea60a75b,
- 0xf2855cab, 0x5b32dcb5, 0xacdfed0f, 0x0936f462, 0xea0b31e8, 0xdca5e9ab,
- 0xe96bceac, 0x0ba88e91, 0x9f4b571d, 0x10dbd103, 0x5f2137a0, 0x6f4d2f63,
- 0x75bfe3d3, 0x7f1e9b7a, 0xd2d37a11, 0xbb5fe099, 0xa0e56c22, 0x4b57d73f,
- 0xde0a0f28, 0xf904d61d, 0xa89975e1, 0xb68aef4f, 0xdf5d7ea3, 0x3b0bcac0,
- 0xbdc4321d, 0xe5e3ad64, 0xc872ce99, 0xe5a7afd7, 0x23a2ebb4, 0xd8662e2c,
- 0xef3cac9d, 0xadd786ae, 0x587867a0, 0x6f3f97fb, 0xb968a396, 0x2fb799b7,
- 0x7c872d6d, 0xff6b0b7d, 0x46a9f819, 0xb79a7c43, 0x7d5fbe09, 0x1d9da66f,
- 0x3efe99ab, 0x9777af91, 0xd8ebdcf4, 0x5bb595ae, 0x883cd075, 0xe99ff9e0,
- 0x75f1d7e5, 0xd6cadc4e, 0xfae27719, 0x8ba50aa9, 0xea0f0115, 0xb574a76f,
- 0x06dfc8c5, 0x4a973f38, 0x4e9069ad, 0x5217eca1, 0x22ded987, 0xcc7e650f,
- 0x7b8874ed, 0x45fd99e2, 0xce20fff8, 0x9f4432a0, 0xd99e27b8, 0x84bd4563,
- 0xa0018a18, 0xa8ac4787, 0xa4ff0b5f, 0x812221ef, 0x723587bc, 0xbfac243d,
- 0x03564a72, 0x865311ea, 0x5fa53f08, 0x9b8e94bf, 0xc98be177, 0x3f43ece1,
- 0xe69ee3e2, 0x7a7d1371, 0x075337b2, 0x880bb174, 0xf4800524, 0x7f41afde,
- 0x487e05db, 0xa43ca426, 0xf2caf004, 0xbd7fe35b, 0x853c65a9, 0xef41aeaf,
- 0x2196a101, 0xccc9dc61, 0x1c1be24e, 0x53fe7fd5, 0xc467c4f4, 0xff362638,
- 0x1e544d95, 0x7e31d123, 0x8fe38736, 0xe90abf8c, 0xd3c73677, 0x2a7f05cf,
- 0x9fc00520, 0xddbc70e6, 0x347aa664, 0x8356c3f2, 0x133c6f86, 0xcb6a720f,
- 0xbabbfa80, 0xaa57c35c, 0xb92bb8f9, 0x092b7ede, 0x4a727ea0, 0x7a7a62f2,
- 0x6bdbd30b, 0xbf50472c, 0xe98479e9, 0x8fc4b5ed, 0xbed68ffa, 0x9d53b359,
- 0xe7536baf, 0xf9d5dbeb, 0x8e0f1c9e, 0xaea5b3d3, 0xd58aec18, 0x768bbf0f,
- 0x5fc16aec, 0x42fe6abc, 0x5fc67115, 0xed06ba1e, 0x6f98df51, 0x6f4f4d3c,
- 0xd0b8aac5, 0xfa9c02df, 0xb22f35ec, 0x613714f8, 0xeb1e8276, 0x4e70f988,
- 0xa4791a25, 0x9c7927e5, 0x86fb089f, 0x84792fe0, 0x8e7a23c9, 0x8241d77d,
- 0xd7ab5c7c, 0xbd73c4f5, 0x84d39f97, 0xfa0793fe, 0x35d3d00f, 0x4164480d,
- 0xdb7f6439, 0x9fc88724, 0xc0668579, 0xee99be73, 0x6c978067, 0x545971c9,
- 0xca05f0f4, 0x7bcf8199, 0xbd0d72ea, 0x2dd6f388, 0x81665e9c, 0xff8e8527,
- 0xe781c97b, 0x32b7c3ba, 0x5963997a, 0xd1faf487, 0xe22f466a, 0xe7e577dc,
- 0x4dfc873c, 0x747d79ce, 0x5ef54112, 0x17491e9a, 0x8fe67ce7, 0x1d1897af,
- 0xf87d55d4, 0x82cfe818, 0x246e1c2f, 0x3e10faf1, 0x64cd34d1, 0x934a3b06,
- 0xaa839d81, 0xbc5c7fa6, 0x38cf8434, 0x13134a22, 0x88ee5940, 0x5d6cfb47,
- 0x669c7aa4, 0x7fbbf3a9, 0x4fd2f380, 0xd44ed123, 0x954130fb, 0x70eb77a4,
- 0x6b3edb8c, 0x31527e60, 0x7707559f, 0x7ec3f3ee, 0x08ebff5c, 0x359f953d,
- 0x5ac5f792, 0xe4c4b65a, 0x6a7186ca, 0xb3233e74, 0x5fa27663, 0xb03b8c57,
- 0xf283d65d, 0x931af8d5, 0x106901c3, 0xefd2e1c6, 0xb5fea1d9, 0x4f8c89d1,
- 0x2aeb11df, 0xca0f8848, 0xa43f614b, 0xc6a49652, 0xc126fd04, 0x376606f8,
- 0x5c710bf9, 0x6361be32, 0xb69e7d88, 0x568f1b0f, 0xf5f8c096, 0x61ce29f7,
- 0x1da307de, 0xd39f30e5, 0x9a7b8fdc, 0x474efdfb, 0xcce03f31, 0x752cfabe,
- 0xf734f4f1, 0x518e9e13, 0xbd4487fb, 0x8ecc09a5, 0x6cd1b272, 0xbf07a8de,
- 0x1c6f313a, 0x21ec4946, 0x93c60353, 0xea78602c, 0x6dbd13d9, 0xa5ff8853,
- 0xfbf4a12e, 0xd87f9f30, 0xba43379c, 0x31b8f5bc, 0x23df0ed4, 0x0f77f3b1,
- 0x089e97a7, 0xeb718a96, 0x0fbf2a12, 0xe799ec78, 0xe5f63c47, 0x13bc8ac7,
- 0xc099effb, 0xfe5f6bf8, 0xb84f2c3b, 0xf5d843dd, 0x69f7f207, 0xd0547bfd,
- 0x044073b0, 0xfd6bcfb3, 0x3f050bf3, 0x05f9fee3, 0xec2d1f9c, 0x45827e60,
- 0x4a9cd266, 0x247717cb, 0x946e73b2, 0x9fe55b27, 0x515e44f7, 0xcba0863c,
- 0x3ecef49e, 0x2123f67f, 0xee3aecfe, 0xeb13accf, 0xdf5eaddb, 0xbadf0903,
- 0x8481fb3f, 0x6d6cfe30, 0xa08bc3dc, 0xe20b0c47, 0xc18dad61, 0x2dae42ad,
- 0xef6f7b07, 0xe8718272, 0xd65adf6b, 0xb5e0f604, 0x2a0b003f, 0xa7d431f2,
- 0x079c38eb, 0x53bc575a, 0x9d951447, 0x4251107d, 0x8aec9fe6, 0x4351e551,
- 0x4d03890f, 0xb5514ead, 0xaa186f4f, 0xfd643faa, 0x4cf2aa35, 0x9f2abe4f,
- 0xaaad72d5, 0x65ad55fe, 0x87f0fcaa, 0xcfd557af, 0xa3074323, 0x0c90ecfd,
- 0xb57221b6, 0x3e554ab7, 0xaa2de772, 0x86919ff6, 0xbd69e3cd, 0x79fe42dd,
- 0x8aa39d1c, 0x39ce7183, 0xed554b6d, 0x62b6a49b, 0x9f46f01f, 0xbd6893e4,
- 0xcb5f1c15, 0x62ff993b, 0x556afb74, 0xd8a367ff, 0x5fad183d, 0xa32e7696,
- 0x912ed61e, 0x7efea447, 0xfb8eeada, 0xa8ee219b, 0xf296bfbf, 0x356eda4d,
- 0xfe80bf3d, 0x8e6fd5a6, 0x5d3f7026, 0x7461490a, 0x0b0ba5ad, 0x5bbd7fea,
- 0xe627b465, 0xc687ec91, 0xc40cbc23, 0x5fc7f4ab, 0xae76612f, 0x76ada7de,
- 0xf559ff88, 0x47a432d6, 0x2e9a9253, 0x5d351422, 0xd3508e44, 0xa6aed585,
- 0x6a25c183, 0x3dc2d03a, 0x0ba6a1da, 0x43ffa7e2, 0x2ae02de0, 0x553b1ee0,
- 0x785a374d, 0xda0093e7, 0xc95eddd9, 0xfc6123ee, 0x70dbede2, 0x1fd2975d,
- 0xf86a89f5, 0x34701c16, 0x2c6e1059, 0x61e84cbe, 0x68ffae26, 0xaf4213fd,
- 0x7ae44b89, 0xf847ef15, 0x0f259a17, 0xfe7d51ea, 0x865f12c2, 0xa7f4132f,
- 0x44ecc206, 0xf0c4a4ce, 0x3efc4676, 0xc0519d90, 0x2620f675, 0x03886b2f,
- 0x882be1ed, 0xc9ddf90b, 0x3ed15bca, 0x63f2cab4, 0xad9ebbf4, 0x35527a62,
- 0xbf9f22f1, 0xaa02b8e2, 0xfb109287, 0x528e16ab, 0x02b3e487, 0x20f2e1bf,
- 0x79087485, 0x1378c2e0, 0x62e6864a, 0xad471f95, 0x30df82e7, 0xbd097f84,
- 0xe0278c57, 0x189af829, 0xe41a44cf, 0x1a17d824, 0x6846473e, 0x3b6a48fd,
- 0xe0f3b08d, 0x22fe2160, 0x44265dad, 0xe9916e0f, 0xcc90f238, 0x57f14226,
- 0xdda07360, 0xce7488af, 0xe625ef87, 0xc2bd26b6, 0x935713ed, 0x80c4fb3e,
- 0xfe12eaf2, 0xc91e59e8, 0x7f6668ff, 0x6bf0b43f, 0xf89fe5d3, 0x5e3c6d03,
- 0x55ef1052, 0x078f0f2f, 0xa76fea7a, 0x857d1fb4, 0x02eb23f6, 0xc3f6a1b1,
- 0x9206fbbe, 0x1373d71f, 0x424ddc71, 0x693710f4, 0x133f3c9b, 0x4e54c4ec,
- 0xecca4146, 0xab5da458, 0x8ed1eb07, 0x012ba3ac, 0x2121afba, 0x60ccda7e,
- 0xfee5e639, 0x74371179, 0x42e27d29, 0x3fc78da2, 0xc22778b0, 0x5f38a0f9,
- 0x139e740e, 0xcf701471, 0x2221fc16, 0x9139a84e, 0x4973839f, 0xf74ff42e,
- 0xb444a6db, 0x29fdd01f, 0xbec7ee85, 0xef2c22b8, 0xeb351fb7, 0xb2147117,
- 0x42ed10b5, 0x7bd742cb, 0x7a10f019, 0x3ea86fcb, 0x1234f780, 0x0dd7a615,
- 0x0381f63a, 0x553b0b8a, 0x46f59e71, 0x7b01807a, 0x8e3c8bc1, 0xafad4daf,
- 0xe3c89b3f, 0xe739f893, 0x181af052, 0x4eee3c1f, 0xf532e3e0, 0x27771130,
- 0x7f42f8e0, 0xc151efb8, 0xd8bcefb5, 0xe13df707, 0x72e02ee0, 0x29f3a8ae,
- 0x3d6c97c0, 0xeef00092, 0x83ee7288, 0xfd47e9fa, 0x705d24a7, 0x1772155e,
- 0xb6f6cbc6, 0xcf3f78cb, 0x5bc5813d, 0xc8c8f7b9, 0x4c3bba1a, 0x6ead37e8,
- 0xc2f51d7f, 0x68ca46ae, 0xa6761ea9, 0xbfa90e91, 0x0340bd88, 0x47ce81ef,
- 0x681f3d62, 0x75e22fd6, 0x7f7f3ae8, 0x3efc3809, 0xe42a1c1c, 0x9ee18351,
- 0x919c5f57, 0x1551903e, 0x75da1f42, 0xb0f4e66c, 0x013cef56, 0x4cef1dfd,
- 0x55fd0cdb, 0x53071dc8, 0x24eb3e00, 0xd1357fbc, 0x26cbeec4, 0x2574fbf3,
- 0xdc3b06fe, 0xb34a4e37, 0xe1a9fde0, 0xe09d91df, 0xdd78fe17, 0x4f70316d,
- 0x5c3ff44c, 0x4efde1d2, 0xe4fb9e42, 0x57f0428e, 0xa26fb02e, 0x162685b9,
- 0xddf99197, 0xe5165f48, 0x891aaf23, 0x61a3f619, 0x6bde0368, 0x68cb4409,
- 0xc9938cb7, 0x313ba024, 0x561f716f, 0xfafb877c, 0x7c52ef70, 0x2814d89f,
- 0x0ed34b58, 0xdd620f4e, 0x0503cb13, 0xfc20960d, 0x16a65c45, 0x3c385b3e,
- 0x66c6f1dc, 0xcdbb17d0, 0x52c9fce2, 0x9f23d362, 0xfb666759, 0xe255d8e6,
- 0xc2b3edfc, 0xe11dd4b9, 0xf6063c18, 0x7932fbdf, 0xb17f2692, 0xe3470639,
- 0xc7d415fa, 0xbb58d9d7, 0xbf071e6e, 0x44b2a3ee, 0x6f8e5f88, 0xa66f1f0a,
- 0x5c73b124, 0x8beff72d, 0x1f7ab5ef, 0x42d58dc6, 0xf04176bc, 0x6bc695fb,
- 0x01fc788b, 0x27ad10e1, 0xef78d1fa, 0x7ab179dc, 0x0e4bfcaf, 0x3ebebe7b,
- 0x0fe22749, 0xcf4defd1, 0x0be3615b, 0xf9d8934b, 0xe807182a, 0x7dc37bc7,
- 0x4a7b85f1, 0xce45c913, 0x0ef3eefb, 0x889fdc55, 0x9f9f7737, 0xfd7e7184,
- 0x29fda5fa, 0xe15c6096, 0x92dff040, 0x86e5e6c8, 0xd082bf78, 0xdefb0aef,
- 0x8f1c4e37, 0xc147f3df, 0xbb45fd3e, 0x7feaf78c, 0x75374871, 0xd779987b,
- 0x5db83127, 0x7dc7af13, 0x1c47d233, 0x2f8cc2db, 0x35fb89ea, 0x76aa9ee2,
- 0xfccbbb7e, 0x10fe608b, 0xbc6e1c47, 0xc48e9ca5, 0x30c777bc, 0x99f7b87c,
- 0xed059ef0, 0x77bde317, 0xe257f8c7, 0xf1a9b0be, 0xd7b73b7c, 0x9fdebeec,
- 0xa6bcf781, 0x40c33b31, 0x728de0f4, 0xd8f504fd, 0x4a65699b, 0x06fac53f,
- 0xffb216c9, 0x410f452e, 0x7854ebb8, 0x23770fed, 0xfbf457e2, 0x4fbe61f9,
- 0xe702c389, 0xbe5c25b7, 0x8351d92d, 0x1f38affa, 0x8cb0fe7d, 0x139f17f1,
- 0xbcf927e6, 0xcff3c255, 0xeb211752, 0xf95a1f29, 0x88334164, 0x844925b9,
- 0xbec3175c, 0x46e909df, 0xf97c9fb5, 0x87ecfdbd, 0xaeae5424, 0x57280d20,
- 0xdd58fe56, 0xfbdc9aae, 0xb49fd067, 0xdce1fbfe, 0xb1d6272e, 0x479e8939,
- 0x2629ef40, 0xd61f20c5, 0x3185f93e, 0x1f78194a, 0x777ca69c, 0xaff81e98,
- 0x46aed319, 0x48bfa61b, 0x44963c72, 0xc927b7f1, 0x9ed20db5, 0xbdff59f7,
- 0x7e4cbdb5, 0xb9438d6c, 0x2efd64d5, 0x087975f2, 0xc0f7ebe3, 0x1e14093b,
- 0x97d31326, 0x720d7412, 0x5e2dea14, 0xf1e387a4, 0x6266aa6b, 0x74049ede,
- 0x29cbf71f, 0xefce3153, 0x800e9197, 0x4752a6f7, 0x3625d81e, 0x997bb255,
- 0xb3f31366, 0xf6317f7b, 0x0c837035, 0x0cbfbb6b, 0x8eb6c1ce, 0xf7b03efd,
- 0x7cfee8b4, 0x5a57e210, 0x8bf163ae, 0xfeff3a09, 0xe2f190d4, 0xe049bd23,
- 0x691f8f87, 0x388f38c4, 0x21af34b9, 0xcdca5e24, 0x9edbd171, 0x1ec27685,
- 0xb61bb083, 0xa8bde045, 0x8183a1da, 0x18435c8f, 0x1bdc459e, 0xdf5421cc,
- 0x8470a2ed, 0xbf914ba0, 0x19f24da6, 0x12fdf0a2, 0xded4e3f4, 0x0f984be5,
- 0x7ae7ea72, 0xf98983f4, 0x5c9abdc5, 0x212efe06, 0x364be69f, 0xfae34766,
- 0xfe223cfc, 0x5cd97f31, 0xb8f3274e, 0x923f3f1e, 0xb1297bc1, 0x5bbb4067,
- 0x474a24cc, 0xf2fcd4b7, 0x675c22b6, 0xf41a218d, 0x19f7e105, 0x6f08cf58,
- 0x653f72ff, 0xa58df765, 0x72743640, 0x62e7c286, 0xf9c030fb, 0xdd9b3bb2,
- 0x88c323bb, 0xee8cfc03, 0x81b7c37d, 0x8834c27d, 0x7f29aff9, 0xa3e49732,
- 0x73866d5e, 0x44afadd4, 0x2f22f8f0, 0xc8ec4fbf, 0xfa9cf883, 0x8c7cb4aa,
- 0xc01a3913, 0x0dba3777, 0xc7dc0cfe, 0x5448ef94, 0xf2da0e36, 0xb87a19fd,
- 0xf54299af, 0xd92f9a35, 0x0fcb2f72, 0xd1d4aff5, 0x4f2d92fc, 0xfdd3d0cd,
- 0x7fc6bee1, 0x5b35f20a, 0xbe7cb176, 0xf34ca57f, 0x655e5bcd, 0x4960e1f5,
- 0x2dc1ec09, 0x68786707, 0xb9a267ff, 0xf1fbb7bc, 0xe5fbb59e, 0x3b50bae1,
- 0xe332636b, 0x9db8675b, 0x59264cf8, 0x1ef0055c, 0xb2febe11, 0x5a0f1d64,
- 0xac54c569, 0x4927b457, 0xf325dbe1, 0xf7e56e71, 0x924627a3, 0xb7e60896,
- 0x3c5144f3, 0x8e18e81c, 0x93afc77e, 0x6e9e9862, 0x38fbe3f3, 0x4f011fa2,
- 0x77189fd1, 0xe067c835, 0xac789acb, 0x3f8664ec, 0x1c46ce3a, 0xdd839867,
- 0xcb4aae2b, 0xbc51fc03, 0xf4de39e9, 0x8dbbfcec, 0x1bf68fcd, 0xa0728b9d,
- 0x07f8ec00, 0x33f5a2be, 0xd2d6f383, 0x93324149, 0x3136b72f, 0x3a206b7f,
- 0x6269e90b, 0xa5677f24, 0x668ebd50, 0xe4c6870e, 0x466f7e68, 0xc2512bc0,
- 0x1c389a71, 0xcf78fcd3, 0x5dd74af2, 0x775a1ff1, 0xf01cbe08, 0xf681ca5e,
- 0xf5b3b7ab, 0xcd7bf0c4, 0xd4188cfe, 0xf557eef7, 0xa6836677, 0xb8c1097d,
- 0x164c7736, 0x8227bfb6, 0xd93bf198, 0x332ed7de, 0xa0ade997, 0x2c778acf,
- 0x123ae788, 0x7bec47ea, 0xc58da2af, 0x39d70667, 0xd3af90a3, 0x6369d7c6,
- 0xe8aaf4eb, 0x64091c95, 0xa7f6b6cc, 0xf7f83ee3, 0x9f2a37f5, 0xe7daa7f7,
- 0x7d83fae1, 0xd65e103d, 0xd6f93a73, 0xa9e622f0, 0x9c1f6781, 0xf013f335,
- 0xed8dfd84, 0x52e9a946, 0x5926b3cc, 0xfb35939c, 0x8e1bf33b, 0xd5daca57,
- 0xb9e2cedd, 0xeba6a289, 0x3a99ddba, 0xed102b88, 0x1027c16a, 0x3dffb41f,
- 0x89edcc9a, 0x806d2469, 0x93c7c4f8, 0x0ddac28b, 0x9cf6bbf1, 0xe2cd13d8,
- 0x8af6b5d5, 0x277b789e, 0x9cf4afdc, 0x8c0aef63, 0xc06fd8d3, 0x259cf4cf,
- 0x5f282ed8, 0x9c73f9d4, 0x3fb7f63f, 0x7e603205, 0xe7b2a685, 0x47d53b15,
- 0x7e431cb6, 0xecb8385d, 0x7f9a4cb6, 0xc88ff935, 0xcb530bcf, 0xfca4c8be,
- 0x9fb27f7c, 0x7d9647e5, 0x5bf21431, 0x44feacfc, 0xefc0f3c7, 0x633fc789,
- 0xaf507252, 0x41592d78, 0x3ae5c9b8, 0x02647402, 0xc7ae8625, 0xe309aaf4,
- 0x075c04f6, 0xba4d0b4a, 0x6ff77086, 0x07a3eedf, 0x812957e6, 0xf3b0153f,
- 0x5f803b71, 0xf403b2af, 0xdf7bb144, 0x8eff7b3d, 0x483e5df7, 0x1e027576,
- 0x16bb23ea, 0x5dfcd265, 0x1fa09f91, 0x3dd0724f, 0xc515f601, 0xf9d01646,
- 0x9c9b5d4a, 0xd5913fa0, 0xe11eb376, 0x745eedca, 0xfc28178d, 0xf3f7d95e,
- 0x61b2a5ef, 0xb18f309c, 0xefd40f9c, 0xe06ff2fb, 0x633fadf7, 0x7af983b1,
- 0xdb96c76c, 0xdb1aff40, 0x4c97f6f1, 0x39fbb30e, 0xc163de62, 0x97bf49ae,
- 0xfe709bb4, 0x5eae3b63, 0xfdc7f501, 0xe80b23b6, 0x5f31c264, 0x9e85b013,
- 0xaaa52fbd, 0xcf90e5ee, 0x39d71a2e, 0xc04ddfa0, 0x54aa53e3, 0xf478460d,
- 0x857c7832, 0xf1dd67f1, 0x68fd9b87, 0x7fdf54fc, 0xfa3afaa2, 0x20c97b91,
- 0xc433f83b, 0xbdad7a7d, 0x5d2568f4, 0x213efd1f, 0xa2106740, 0x6f3c4f4f,
- 0x98248ca6, 0xaad1252f, 0x5939b97c, 0x96c2bf55, 0x929f2aa9, 0x7caab574,
- 0xcaa7929a, 0x56311f4f, 0x7b06ff55, 0x637f2aa9, 0xfd5534c9, 0x2aa5474a,
- 0x536be79f, 0xd4382fd5, 0x423f2eae, 0xfe43c064, 0x90ebfb51, 0xa0749d16,
- 0x74f8b5d9, 0x8e90ebc3, 0x87030bfd, 0xed7c5ed6, 0xe1d7b6f9, 0x6b17b0bb,
- 0xfb0933ef, 0xc5b2cdf1, 0x276f0c6b, 0x0567d24e, 0x75901fdf, 0x18d33eec,
- 0x94eaebab, 0xa67de0a2, 0xa62f6089, 0x7c58a848, 0x00e347ee, 0x3d980b4f,
- 0x062028ed, 0x68adbee3, 0x686a3c87, 0x2c0fe678, 0xf81d200e, 0x5cc084de,
- 0x9e27bad5, 0x5dd6a977, 0xe0d56e4a, 0x5f2a8d69, 0x555dbb61, 0x06d24a7f,
- 0xe534f955, 0xdd3c1a07, 0x60dfcaaf, 0xd3c1a2df, 0x7e9e0d36, 0xf4172aae,
- 0x5aedc1dd, 0x451ec0fb, 0xcefef1d3, 0x75c3c072, 0x8f8803a7, 0x72d6ce92,
- 0x47b5d3c0, 0x855f10db, 0xb039673e, 0x0d43e2cb, 0xa3ea43af, 0xf76831e7,
- 0xa612635a, 0xb4151a07, 0x1c6c1d6b, 0x46a1e981, 0x75ff7e3b, 0xefa60963,
- 0x7d303a34, 0xa62a71af, 0x4c4e8d9d, 0xb0db1adb, 0xed8d73fe, 0xdb162ecc,
- 0x3a45def7, 0x75ba07d8, 0x8278377e, 0x77923f17, 0xeecbf003, 0xc86efe41,
- 0x37ec45df, 0x25f9a24c, 0xbee844c0, 0x374f29fc, 0x80023aa4, 0xca1c3757,
- 0x18f1828a, 0x1e473a6d, 0xa477e1e8, 0x3ea1f84c, 0x37bb909d, 0xd16c9338,
- 0x79a66f2c, 0xb3c63644, 0x83a1f84d, 0x2033d712, 0x33a9d04a, 0xf7c806e1,
- 0x7772b044, 0x401b84ca, 0xfec6ff0f, 0xff3f4773, 0x61291df9, 0x9ccfe7fc,
- 0xd760ac56, 0x70d5fc39, 0x30e3c02b, 0x48396fb7, 0x4d09619e, 0x0679f54b,
- 0x0747b390, 0x80f0b7b8, 0xe009b4ae, 0xcfb3a6d0, 0xebef78c1, 0xb7e8040d,
- 0x5fe7624a, 0xca95cf51, 0x0dcf41e4, 0x1d4f3c26, 0x605639d1, 0x7814995c,
- 0x3dbce00c, 0xdee112a5, 0x00640d63, 0x4e29bcfc, 0x3c0f8f96, 0xf243d926,
- 0x079f0606, 0xb6fc6e52, 0x3858f3e1, 0x62913cf8, 0xcfb6fa63, 0x807a0e91,
- 0x74a91fc8, 0x3e7ec1d4, 0x0ab8ea52, 0x4d38cfef, 0xad3aff6c, 0x3ed0abde,
- 0x139a28e4, 0x9219e762, 0xd1f7606a, 0xd82262e1, 0x71916f3b, 0x39e16b9f,
- 0x7be99521, 0xce702748, 0xe789179c, 0xf63a2382, 0xfeb6819e, 0xb3d70e02,
- 0xe3dbc283, 0xbd32a616, 0xdd566ca2, 0xfed02f33, 0x6045d67a, 0xe1ce3dd7,
- 0x34f58fa8, 0x342ae850, 0x8e70dd3d, 0x1cec9b95, 0x9dbfe824, 0xf2d17f0f,
- 0xfb6e3a67, 0xfd68efeb, 0xda45d64f, 0xaed88651, 0xc2ddf841, 0x358c2f2b,
- 0xb0b4fea3, 0xe40cbd2a, 0x07ee7ce2, 0x27d5645c, 0x1f503ba0, 0x1727846d,
- 0x9af25b97, 0xcac90429, 0x3c234ab8, 0xb69d5525, 0xd5d219a6, 0xc237eec3,
- 0xa3b52913, 0x1a833576, 0x70b7475b, 0xff3ff211, 0x5e748dbb, 0x0acbd78b,
- 0xfb89cf3b, 0x145735ae, 0xf7e822cf, 0xa23b8f08, 0xeaf3c040, 0xd16f0e22,
- 0xd787116e, 0x3fae1489, 0x0b9c90e6, 0x67d63f6a, 0x0b5def40, 0xc01ecddf,
- 0xe72ea0cf, 0x4607e3fa, 0xfdf6ae36, 0xe2ee3112, 0x3afe7654, 0x0994a462,
- 0x4c2de4fa, 0xfdcee40e, 0xba22aee2, 0x9b0edcfe, 0x1615e30e, 0x7800bdd7,
- 0x78e76de8, 0xd8769034, 0x4b3cb34f, 0x221df93e, 0x8f754e26, 0xd2f252f7,
- 0x0f2b69de, 0x7e859795, 0xbca87967, 0x96b4092c, 0xf2145c83, 0x7d4ff851,
- 0x2f9cd58d, 0x3d3f2037, 0xf31eb8d0, 0x983d1b07, 0xe16c6a1e, 0xcb15b97c,
- 0x987c69df, 0xe72f65f3, 0x7bf13bcb, 0x4c5ce347, 0x58ba35f7, 0x9519ca3e,
- 0xecc4fb0a, 0x37c4f8c2, 0xf80898b6, 0x4c3b7ae7, 0x77419f18, 0xc68f63c4,
- 0x0991fc41, 0xe36a71ef, 0xbe7cb490, 0xf7761e8f, 0x0525377b, 0x1f8be9c6,
- 0xc8717d02, 0x9874f4c0, 0x4be05628, 0xe4fe8ff9, 0x0fafd006, 0xf4158a2b,
- 0xcdd482e9, 0xac50ef2c, 0x7c2f9a06, 0x8a5de794, 0x22ac04d5, 0x5be421f5,
- 0x358a3d87, 0x1f8be682, 0x7b95887d, 0xc7f33a09, 0x06f5ba3d, 0x2259f00f,
- 0x51fd801d, 0x449cce6e, 0xd79505c5, 0x5f6007a5, 0x325a494c, 0xcec5f609,
- 0x63e90514, 0x00e1b29a, 0xd88fa7e4, 0xfaab87a6, 0x46de4b0e, 0x57165768,
- 0x937687a9, 0x846cd6d2, 0xdd879376, 0x376d0faf, 0x8daed475, 0xdf619f90,
- 0x74fd07a6, 0xf8b1f027, 0x2f223f60, 0x169f05ca, 0x3ba37271, 0x720f289c,
- 0x0f289ddb, 0x66ca5c04, 0xf60dde57, 0xc976facf, 0x663cc126, 0x0a417d4b,
- 0x86e89310, 0xbf2949f7, 0x97f53ebe, 0x5db40dd6, 0xc5afe43d, 0x7c370ffc,
- 0xa6fbfcbb, 0xfbfc30d4, 0x41dfbe43, 0xfbdc43de, 0x0ef64687, 0x2e1c33cc,
- 0xac4b1df5, 0x712a8ef8, 0x7acf4159, 0x97f3cb9e, 0x9fe5a520, 0x7f8bb4f1,
- 0x70e7407f, 0x52ab38c1, 0x3fb21fb9, 0x6549a87f, 0x8f735ffb, 0xce728064,
- 0x6d52ea1f, 0xea3cf7aa, 0x76bfbb08, 0x6d3b38a9, 0xee36b89c, 0x3b693564,
- 0x1fecc8e8, 0xf70b526f, 0xc7fab2dc, 0x6a788b7f, 0xdf781c6d, 0xe3106ab9,
- 0xd78173a7, 0x496eba50, 0xde4f901d, 0xdc38097b, 0xfd0e0e1b, 0x9c5843d6,
- 0x2438bf7b, 0x5daeef1e, 0xb3c57117, 0xfc798d77, 0x827d76bb, 0xafaffa2e,
- 0xcf5fc195, 0xbbf54fe0, 0x157e60c7, 0xec04ad8b, 0xe7fca156, 0x3fe11777,
- 0x6df67e5a, 0x179bc9f1, 0x65bbe1d7, 0xf3d8f861, 0xb898f8e1, 0x854dafcf,
- 0xf3f70a9e, 0xde121e20, 0x7e738239, 0x1f6ba265, 0x1653d3f4, 0xce0e9bf8,
- 0xddec71a1, 0xaed183f0, 0x325df0fd, 0x1106b8b2, 0xe75a7c97, 0x6de815f9,
- 0x19fa3def, 0x106eb3f2, 0xbc8f7416, 0x97a0f341, 0x7ce6c3fd, 0x4647e013,
- 0xefa18df8, 0x7f140f8f, 0xff2e1bfb, 0xdf6ca7fa, 0x9ed43889, 0xa0665f6d,
- 0xfb12eefd, 0x54782062, 0x70fde3c8, 0x24b7a43f, 0x9767e512, 0xdef07c44,
- 0x02695771, 0xe95d2dea, 0xdfa43d46, 0x2bb8b9f6, 0xd7fb1b3d, 0x92bb8f9e,
- 0xdc6ccc4b, 0x7d2153c9, 0x2154f92f, 0xf0dfbdc8, 0xa376cbf8, 0xb03de0a7,
- 0x408d8f0f, 0x928dfeff, 0xa0e010bf, 0xbd77573d, 0xc94ca0b5, 0xa6be7fed,
- 0xeb049beb, 0xbbae3dab, 0xe95dbe1b, 0xfd76bb79, 0xb9c408df, 0x15ff5b8f,
- 0xaeeff781, 0x19ddfcfc, 0x5e80ab93, 0x12026ae1, 0xf545d319, 0x54bf9a09,
- 0x8939d6fc, 0x4333d39e, 0x6f25e5ce, 0x9e2e38b3, 0xfde7cf6a, 0xed5c09de,
- 0x960dc751, 0x5e308afa, 0xe147f645, 0x7b1d60bd, 0x73c4436f, 0xf3cfc9bd,
- 0x243ebcf4, 0x9d70ed0d, 0xfaf3aa19, 0xfa2d1f1d, 0x0881f21e, 0xa678ef8c,
- 0xcc3c44fd, 0xf19d8ff5, 0x0945793f, 0xedbd77d2, 0x9df4246f, 0xbdf5dac0,
- 0x86d77caa, 0x7056fbe0, 0x5ff6ab1f, 0x2a25ca32, 0x4f105ac7, 0xdae335e4,
- 0xda4af910, 0xef07dcfe, 0x55df94d1, 0xbe50c002, 0xd074cf36, 0x3cfc9576,
- 0x8de44844, 0x92aeda0e, 0x67891f9f, 0xe78f2b25, 0xb09f3df5, 0xff28ba7c,
- 0x7fac4ce3, 0x1c6fe895, 0x79164f2b, 0x22d5dfde, 0x9623789f, 0x25f68a67,
- 0xc6239f2c, 0xaded64af, 0xade4feac, 0x7a099cff, 0x33f7e08d, 0xe09cb8d2,
- 0xfe5152ef, 0xba2b4cef, 0xba8ebc68, 0xf107cb9c, 0x1dc5550e, 0x85df22d3,
- 0x6126dc7d, 0x9ded61ec, 0x9fe7b406, 0x0dcb698d, 0xe22f2fbd, 0x56e59c7a,
- 0x7baaf38c, 0x9976f871, 0x1e813bc7, 0xe9fb9cb3, 0xfa0b642b, 0x65ef7a61,
- 0x3d207e7c, 0x97fae570, 0x57ecf855, 0x46d3dfce, 0xef1d2547, 0xf1130fdc,
- 0xd619027e, 0xee02f189, 0x27c44934, 0x116abde0, 0x45971757, 0x8e7e701c,
- 0xbd82297b, 0x13b27aa7, 0x97caeff9, 0x6ebb7d98, 0xef28ebcb, 0xda0aca96,
- 0xea1ebe97, 0xe9f0075a, 0x2f77b2b6, 0x87dfae57, 0xc531f4fb, 0x8160fdc6,
- 0xbcf0a151, 0x3dfc3fb3, 0x8517f169, 0x3ffcbabf, 0x5dad7f0c, 0x5fd7dc5d,
- 0x7ff1857d, 0x07e656dd, 0xfd3c73b6, 0x7314a749, 0xf0023d78, 0x33a2b73a,
- 0x20865a3b, 0x778c4eee, 0x29df90ab, 0x6f56f382, 0xde009583, 0xdccea5bb,
- 0x5fe0c30f, 0x9f38ae7b, 0x69715441, 0xf7c3fb3b, 0xf7d04be9, 0xfa4dffd3,
- 0x8dfbd44e, 0x1f1bf076, 0x15e5ef65, 0x18c9fe77, 0x4ef41f1a, 0x0f073b1a,
- 0x317e676e, 0x2c171711, 0x713c32b7, 0xd55d53ff, 0xffc1e33b, 0xc4c3f624,
- 0xfe06d248, 0x76d74a04, 0xa076d74e, 0x41bf416b, 0x176d143f, 0xfa41be06,
- 0x912e2c25, 0xfdb7e9c3, 0x25e1fae1, 0x7fe1fae0, 0xa9bbae13, 0xdbfe8c3e,
- 0x4ed02217, 0x3ff385a3, 0xbc055c38, 0x0e2fd323, 0xfff4c8e7, 0xd3239c0c,
- 0x439d9515, 0xf9207bc0, 0x988fc5a4, 0x5a7d9877, 0xe9f64df4, 0x4f9c1923,
- 0x8f18fde9, 0x4c7bc5ab, 0x27bc3f7a, 0xef1c5fa4, 0x30fff2ea, 0xd370b5de,
- 0x9fdcb5a6, 0x985efdab, 0xfbbf203e, 0x7ff8e056, 0xb0b04fcd, 0x7caabf61,
- 0x54b7faf1, 0xef3c4be5, 0x415f965f, 0x5bfffbd8, 0x57e105fe, 0x2afcf998,
- 0xb51ff81d, 0xc43741f2, 0xc7a023f9, 0x7afe5f1b, 0xf93087d4, 0xb78c6cea,
- 0xe5af8dba, 0xff7ff18b, 0xbe947be4, 0xf66068be, 0x3ffb63d6, 0xfd392837,
- 0x79e165e9, 0xd084711d, 0xeff5c2d9, 0x5acf401b, 0x54fbd848, 0xa60ffada,
- 0xc6307b33, 0x30be5149, 0x710c4cd7, 0x9dcc1f94, 0xcf4beecb, 0x0c630705,
- 0x2d173ea6, 0x3e27a99f, 0xd2bb8f78, 0xd099f7a2, 0xffbea9ef, 0x75efe26e,
- 0xd558b893, 0x21d9783d, 0x9cdf9c63, 0xbd2fe612, 0xf786d2c5, 0xba1de787,
- 0x60f7dceb, 0x14f46f2f, 0x7177b8b1, 0xbaf7f0ff, 0x3e83f12b, 0x9c2a7a08,
- 0x5fee24fb, 0xff40635f, 0x2addb6ba, 0xca669fbe, 0xd9a38b12, 0xc533e2c3,
- 0x3beba4fe, 0xf767ca64, 0x48f7e060, 0xbe0c1b2a, 0x67a0b9e1, 0xbb3d47ef,
- 0x2e4364be, 0x927eafe0, 0x1b94dc74, 0xafd4ef3c, 0xd7b60bff, 0x7a6d67f2,
- 0x4fd5f6b9, 0xbd210f1a, 0xb1d17388, 0xb5e61f46, 0x1305d6f6, 0x93fd17ff,
- 0x6de2ff63, 0x57dc7482, 0xd0c70f8e, 0x3b1bfb75, 0x827b2dff, 0xbb0823b0,
- 0x214ed682, 0xbf7888f8, 0xe36e97d2, 0xa296b2ef, 0x59e883df, 0xc1edf82a,
- 0xd7f9fcfe, 0x27d717e9, 0x61ffe5d5, 0xf4b97cfe, 0xe4c49b5f, 0x8fee96aa,
- 0xed0cfd56, 0xb5be82cf, 0x6200bff4, 0x0bf8e852, 0x27bc25ea, 0xab55773e,
- 0xcdf49c61, 0xe5b57de9, 0x9b830664, 0xb15c2e46, 0x3c78503c, 0x0be78c87,
- 0x0ec6bc93, 0x87810f76, 0xefd5d3af, 0x48bc5303, 0xab971719, 0xfafff2ea,
- 0xa2e4e039, 0x8917266f, 0xcb489f4a, 0xe56eb7e8, 0xfe56eb12, 0xb90eeb9b,
- 0xd648b4a7, 0x40e3ef05, 0x81c435fc, 0xfdd978f6, 0x56ccead4, 0x5203ddfc,
- 0x7e028812, 0x995eeb77, 0xde8e9efd, 0x790e4fa1, 0xf88bad3f, 0x5c7a003e,
- 0x3bbd9e35, 0x769b8da4, 0xd5ea78f3, 0xbf78dd96, 0x9a0efb51, 0xf376cb88,
- 0xa1efb4fe, 0x9de2f689, 0x687bed03, 0x1c783253, 0x67c93e76, 0xe7172971,
- 0xcf20dda3, 0xdd35fc43, 0x5da2355f, 0x4ac7c6e1, 0x05a4f3ee, 0xa32732f1,
- 0x9799df1f, 0xa3be7171, 0xd32fff2e, 0x81dd6caf, 0x94aed8f7, 0x3a64477e,
- 0x827f40dc, 0x9bf1f72f, 0x6fc84c57, 0x3df33bc4, 0xf7c19e6b, 0x24be7a87,
- 0xd4bd97e8, 0xbb13e33b, 0x09c435d8, 0x564dbd27, 0xc771d78a, 0x1baf683c,
- 0xf785c47b, 0xe3bcc776, 0x927f8880, 0xc329c077, 0x233e85bc, 0xff209bf4,
- 0x78d9f7e2, 0xf2e5d59c, 0x37f52bb9, 0x8cfca7fe, 0xb75d5ee2, 0xbf0457c1,
- 0x13d9e0e7, 0xa103bf81, 0x1993ff9d, 0x93be8bba, 0x7ae511dc, 0x8b34f012,
- 0x5ef109cb, 0xdecc8572, 0x94edef49, 0xeeb85ed1, 0xabfb7e7f, 0xdb4b9547,
- 0xbd6dea15, 0x2265cf61, 0x7bb1d7ad, 0x0ae5f92d, 0x79c249c6, 0xc287ec0d,
- 0x7fada89e, 0x297b294a, 0xe35bed03, 0xdeda3df1, 0x067cd987, 0xfafd57c2,
- 0x603f8e00, 0x7ebf1f39, 0xf42d916f, 0x19f3959d, 0xdb43e77d, 0xdf107329,
- 0xff174b97, 0x332dced7, 0xa891e265, 0xbc91cd7c, 0x4c25ef4c, 0x3a405dff,
- 0x02445d31, 0xbb08ba98, 0xd894b0cf, 0x9c4e5d31, 0x5c55ae98, 0x3718195d,
- 0xf8c04814, 0x39ff17d3, 0xdce6e80a, 0x215ae375, 0xf2dbf146, 0xbef56923,
- 0xbd853c9e, 0x3fc2f497, 0xd9f4077f, 0xcb863ff3, 0xa2471be2, 0x1848641c,
- 0xa57bd33f, 0x54d7bb32, 0x17dc5df2, 0x325dfde2, 0x96e1f989, 0xfee26627,
- 0xc4cdf209, 0x826f826d, 0xb8e7393f, 0xdcc3c58e, 0xc67be12a, 0x6e1d7eed,
- 0x99159cb1, 0xb9ddf85e, 0x3cacddf6, 0x2bda2ea7, 0xd8ce7cd1, 0x26be5608,
- 0x4ff70bda, 0xee3f1216, 0xfb70c5cb, 0x4bfb8644, 0xdf8a2f8c, 0x8227a00b,
- 0xbf6d12a1, 0xeeccc3a2, 0x30ff1051, 0x70b9ffe0, 0x4f170a7e, 0xf1f18433,
- 0xbf1943d1, 0xb6a65d1f, 0xe5e50c87, 0xdf6529f7, 0x0cb47807, 0x810d6471,
- 0x9b9d31fa, 0x3b042c1e, 0x9f808e90, 0xb7b1b3e6, 0xb4535fa3, 0x1b29517b,
- 0x959ff501, 0x67bfbc58, 0x4c6fbf2f, 0xfea10902, 0x794ab5b1, 0x1a2bd42e,
- 0x40d3f8c2, 0x51df8570, 0x2f398674, 0xd1d1f88a, 0xbb436670, 0xd277c13c,
- 0x7e58959e, 0x6f5be3f2, 0x5f4168dc, 0xe3f40782, 0x44d71517, 0x91b46f18,
- 0xffcc0b10, 0x1b7efca7, 0xbb00c869, 0x4cc7aae8, 0x3e2fd03a, 0xee18e2a2,
- 0x9e81fcf7, 0x98fd21b7, 0xfd219b9e, 0x43373d23, 0x9b9e9c7a, 0xcf413d21,
- 0x38ae90cd, 0xccc7876f, 0x8e2189c9, 0xf0bfd0b9, 0x5be769f7, 0xd7f18439,
- 0x1af7f1be, 0x14fbfc71, 0x0fb7c217, 0xfe087bdf, 0xd1b3edde, 0x8b989481,
- 0xdecf5bd0, 0x749fa3bf, 0xff08b820, 0x0a7cb6a3, 0x6eee6bc7, 0xdcffca3d,
- 0xcad47f76, 0x872b7a90, 0xce7db118, 0xa0a3270b, 0x1f5b6eff, 0x5d121d7c,
- 0xcb1d4cf2, 0xb9fc7caf, 0x828e371b, 0x0fbe3bf9, 0xf9f74174, 0xdb73486a,
- 0xfc00fb7f, 0x1fed2a5e, 0xa47a24e3, 0x2f3c66c0, 0x85a1d668, 0xb3d75883,
- 0xa09b9dd1, 0x0c2fb3fd, 0xbe509585, 0x01bed843, 0xadc2923a, 0xe7ae0377,
- 0x10a05346, 0x78dd0bee, 0xa71b0e41, 0x8a529f7d, 0x7a064e70, 0xcaddfe43,
- 0x025f4653, 0x7f74df6f, 0xe223fb6b, 0xa5e015fd, 0xafa5c80a, 0xac2f40a2,
- 0xed04f1e4, 0x06fd87bb, 0x2439e9d6, 0xdabdf813, 0xfdea31d1, 0x49e38bb7,
- 0x8d7b39a4, 0xb3d7c04e, 0x830d4f7b, 0x7e2989f7, 0xc57bc186, 0x455c86db,
- 0x31ef7f42, 0x8f97ec67, 0xf587183c, 0xe769f7f1, 0x10156d91, 0xbcdf3337,
- 0xd2580dff, 0x0af1db42, 0x9c599b88, 0x74841d24, 0x19399289, 0x7dc465e2,
- 0x4a236583, 0x9a96c20f, 0x7d44af61, 0x4c9814ae, 0x37287c88, 0xd847f247,
- 0x558a3c85, 0xa524a7e5, 0x534feaaa, 0xd3e554b2, 0x95548c47, 0xd867718b,
- 0x46f5540b, 0xc28604c7, 0x83ae8e79, 0xa4ff03bd, 0x3986f18c, 0x7eb91c2f,
- 0x7ccfcd24, 0x43be0e6a, 0x9f2f2cf9, 0x17b95cf9, 0xe143d1f0, 0xd50aa469,
- 0x6c1f92e9, 0xd33a107e, 0xf76a179c, 0xa1d0713e, 0xe179c65c, 0x8184e712,
- 0xed164fde, 0xca04e7b5, 0xbe001f37, 0xd9c5fa39, 0x3edbf036, 0x14cbf63b,
- 0xcdfd4788, 0x0caefe10, 0x440c3f3f, 0x43fb7d37, 0xbcdd1852, 0xc3279325,
- 0xf2440dd0, 0xe9643a32, 0x7eecc3cc, 0xb5f295de, 0xa1af814f, 0x4243c0bd,
- 0x6b577f7f, 0xb46d1bdf, 0xb036e6ff, 0xc32bbd47, 0x7bbea50d, 0x6fe12b93,
- 0x64ef4839, 0x93bbb6f9, 0x07bbf0e3, 0xb73761f4, 0xb8520df7, 0xffbb553e,
- 0x60c2e4ee, 0x5c775939, 0x29dc9f55, 0x1bf2ab35, 0x7bf9d533, 0xdb439b4b,
- 0xad8fa40f, 0xd189787c, 0x6ef70395, 0x37bfb0a5, 0xfff3e62c, 0xd5800901,
- 0x00800020, 0x00000000, 0x00088b1f, 0x00000000, 0x19b5ff00, 0x6554540b,
- 0xcef7bbfa, 0x7860190b, 0x77421028, 0x840a4498, 0xb0285789, 0xd71ea08d,
- 0x8f4c1ada, 0xe5935654, 0xe04d7903, 0xe3d8f4b6, 0xb68fad18, 0x695b6d07,
- 0x9656356b, 0xb139e4e7, 0x36254644, 0xeb495bae, 0x66a254d6, 0x42d899e4,
- 0x7a26704c, 0xf75a9d6d, 0x5efffefb, 0x5b602e67, 0xe74ed69d, 0xffffef9f,
- 0x27ef7ffb, 0x70ca1490, 0xf60800ce, 0x9970ccac, 0xd6f70601, 0x8018e41a,
- 0xe2eff4d3, 0xf9b8daf0, 0xcf03837a, 0x3a380057, 0x01cfe3cc, 0x031401d6,
- 0x7b81295c, 0x5914f3a2, 0xff60e760, 0x2fbdf165, 0x00490801, 0xde8d179e,
- 0xe38456ed, 0x6e67065c, 0xf08d9ef8, 0xd918064e, 0xbfefff72, 0x25f5c22a,
- 0x8c0ee3b0, 0xbd0d43c4, 0x5c75d79b, 0x9fcd7114, 0x77d1d704, 0x98809679,
- 0x55706b80, 0x00e207b4, 0x3c636a9a, 0x679e3e62, 0x2e990e86, 0xa6f1c804,
- 0x01529bb5, 0xc0228a8e, 0x78074439, 0xe3ef14a0, 0x245f041b, 0xa4ef39a7,
- 0x67c8b9df, 0xe7c72fcc, 0x35b03245, 0xe99432bf, 0x7ed77bc6, 0xbf1ce787,
- 0xce91a203, 0x8f4a803f, 0x2e5c4c03, 0x19bdbb04, 0x3f233ffe, 0x1812bd4d,
- 0xf0619f90, 0x605aba1d, 0x25a9ccd7, 0xa7e92b00, 0x061dfb4c, 0x196fd890,
- 0xb2957f70, 0xe57cb2bf, 0xf08b10a5, 0x2040fd56, 0xa70fbd9b, 0x7fd5fff1,
- 0xf198376f, 0xda53f57c, 0x530f427d, 0xfbe52f90, 0x77b3f03a, 0x3de277eb,
- 0x235ef853, 0x8499fddc, 0x697636ef, 0xc7245fbf, 0xe0e60d24, 0x3828028f,
- 0x916db12d, 0xf1366bde, 0xdf4ae6f7, 0xea097bbd, 0xc925e9b1, 0xd1adc46e,
- 0x00e2d3dd, 0xa83983f9, 0xf2f1d4e4, 0x7a8cda6b, 0xa7c30c2a, 0xbdf34b76,
- 0xca175f74, 0x5e319ea8, 0xe2c960ce, 0x5f1c2cf8, 0x0bd7c573, 0xf735f101,
- 0x7ed0d0e8, 0x1146cf00, 0xcf3c08e4, 0xf2ae31ed, 0x07f4c39a, 0xfcc79c2d,
- 0x66132f9e, 0x1d75f2e7, 0x1ced01d1, 0x98bf702f, 0x093e8021, 0x60b541f1,
- 0x5f4b8edf, 0xc925724f, 0x9f004fbf, 0x589ec05c, 0x5cfd4264, 0x7da39fc1,
- 0x8fe02433, 0xe11d6c94, 0xfdc8a859, 0x7c7fd28a, 0x8b77e023, 0x434f167b,
- 0xd750e00a, 0x0cd43eb2, 0x0c6d906c, 0xe8f012df, 0xf17d2f49, 0xa416f4ce,
- 0x604932f3, 0x1f17dff6, 0x7f10161a, 0xd0b4455f, 0xeff7b026, 0xbe55f7a4,
- 0x74cef4fa, 0x8e6be337, 0x26f51065, 0x4e902e38, 0x47978f47, 0x183aae58,
- 0x712bf554, 0x9c7f2177, 0xfb3edd2b, 0xd1dbf412, 0x223160a5, 0x684b97bd,
- 0xabc30838, 0x49ab8fdc, 0xae5133d6, 0xe8813f82, 0x6e17966c, 0xe4bbf191,
- 0x97417bd6, 0x1d372118, 0x2f5533f7, 0xd3c71caa, 0xf72de2a1, 0xd1bf903b,
- 0x9fb3021d, 0xecccbb46, 0xe4fcb1b3, 0x7f104b83, 0x0bb25fa3, 0xeeb2cf48,
- 0x7e4847bd, 0x403b6b1d, 0xe7401e77, 0x2eb66eda, 0x527d9933, 0xd98be31e,
- 0x414688ad, 0x6056ad7c, 0x7c35a7c6, 0x0c7be61e, 0x7c3f51bc, 0x3804ce06,
- 0x9e7ceb18, 0xfbf35bf8, 0x1096fbcf, 0xe3ca1bb5, 0xad901f99, 0x3fa61b9d,
- 0x466000df, 0xa3db43fa, 0x3c304fbd, 0x6125af68, 0x27b13d7b, 0x9e0b70eb,
- 0xcf53789f, 0xc55f75cc, 0x416ea7c3, 0x68f6cd78, 0x0913e726, 0xf3a549e0,
- 0x3d7ba627, 0x18a56178, 0xb9216fea, 0xb7d8c677, 0x47f1f9e3, 0x13bb3fa7,
- 0xa767dae1, 0xf24c89d9, 0x7de5191f, 0xda011d3c, 0x7d953e7f, 0xef1c2907,
- 0xa52fc50a, 0x57dc6667, 0x24cfe74e, 0xaa8f09da, 0xa347a1bf, 0xfaf2801c,
- 0x16797c12, 0x8293bd34, 0x87c45067, 0xc79dc4eb, 0xdbcbb404, 0xc483ae0c,
- 0xfe47eaf7, 0xf90ca87a, 0x37e67aa9, 0xfa44cf0f, 0x0c9bdf6a, 0xaf584ef4,
- 0x6c40e2e3, 0xdffc9720, 0x915ffdc6, 0x288edd4f, 0x7111c7cb, 0x5e23a1a6,
- 0xa7753703, 0x888e3e5a, 0x65c07537, 0x8e82f50c, 0xfc57a9f8, 0x1d745eaa,
- 0x201aba25, 0x8422e3c0, 0x0557640c, 0x78a0e092, 0xc8c7ba10, 0xced63fe4,
- 0xd1916546, 0x8b822f51, 0x224179e2, 0x6fc85298, 0xc17e5c5d, 0x713fa67b,
- 0x9266ca17, 0xfbf54135, 0x223385ac, 0xf1bfb3ed, 0xcdcfb215, 0xc39f6646,
- 0x8d2f3c51, 0x391ff3c5, 0xc2bf7d9e, 0x87f305f4, 0xf5a4f933, 0x17385adf,
- 0x0aef141c, 0x9bc2e4da, 0xf4516d70, 0x29c52c72, 0x1dee4958, 0xc03aeff7,
- 0xe6f5b9e4, 0xbf7784a3, 0x0e0fe999, 0x0507b970, 0xf7d5279e, 0xa0429856,
- 0xb77a242b, 0xfdd57a31, 0xf3872dc1, 0xe0fef85c, 0x8079390b, 0xf24cbcf6,
- 0xe505fe0f, 0x7fc62a73, 0x3b5c36ec, 0x853bbff7, 0x7a58e0f2, 0xd9475beb,
- 0x2c3c2a7b, 0x4f5438f0, 0xf7fee07c, 0xa264ce99, 0x8b08c313, 0xcd77030f,
- 0x2b9bf260, 0xfe91c6e5, 0xf4b42783, 0x7d53a58d, 0x7d4fa55f, 0xcd37bd5f,
- 0x8e39322a, 0x9479fc18, 0x90cc5ff7, 0xc48d7c35, 0xcde2f3fa, 0x75e16fb4,
- 0x11c20244, 0x3f4356f6, 0x2635bf51, 0x5f48db8c, 0x6f608d4d, 0xa1efb603,
- 0xcfab89b8, 0x29f5e785, 0x03722b2f, 0x78ed09a7, 0x906762b1, 0x6eecc894,
- 0x5ebb7011, 0xabedc4ac, 0xe80f43a0, 0x75c4472f, 0xfda035bf, 0xa03c770d,
- 0x5dc64577, 0x74f151cf, 0xbb8f4fce, 0xa6c84a9e, 0x31623df8, 0x9e66bab3,
- 0x7bb1d055, 0xf74e90e9, 0x43eedcf0, 0x1a74f03b, 0xcf486ded, 0x8173a2e3,
- 0x6f53e515, 0x1f91e5f0, 0x2e3ddfa8, 0xb0fed8ba, 0xe9056070, 0x3cbe741b,
- 0x1bc4ef56, 0x233ac040, 0x77eed938, 0x25f65d50, 0xd412b8b0, 0xe2274d43,
- 0xdd12aa2d, 0x3c70e69d, 0x5167e354, 0x5ca3ee0e, 0xf55f682b, 0xa29bb457,
- 0x30fded7c, 0xb8f85a3f, 0x515dfb95, 0x98eee8a2, 0x576e5fc0, 0x1d06dcf4,
- 0x9e67f712, 0xb914cb47, 0x4522dd63, 0x4a8d2dd6, 0x9ac60f9c, 0x53f2123c,
- 0x82f51bb6, 0xb7ce9970, 0x028c5697, 0x9dfa8ad0, 0x5efe3e05, 0x539e36b7,
- 0xf2ca590e, 0xcbcb0e72, 0xaf7514b3, 0x7a40dc17, 0xc2fe5135, 0x2fc12ef5,
- 0x082bfad1, 0x1601d5b1, 0x06a8ad1d, 0x2701ceb6, 0x9b81e75b, 0x9da1f3ad,
- 0x83a00bad, 0x9f8297ad, 0xaf8170ad, 0xb83e580d, 0xf98832dd, 0x52bd7e18,
- 0xb45cbca4, 0xfc7ae264, 0x675e4571, 0xf9460797, 0x8f2f9f92, 0x614ae079,
- 0x2e4d9d74, 0xdd99b353, 0x923172ab, 0x5177e27d, 0xb5a14de0, 0x9dec0202,
- 0xf190c98b, 0xeb20d99d, 0x0702ae08, 0x48fee783, 0x5c069479, 0x04ee573a,
- 0xd89aa972, 0x7628764e, 0x5d364c52, 0x87d7d61c, 0x54171e56, 0x49d201bd,
- 0x3ebc60f9, 0x2ce88321, 0x5cfcae8a, 0xdaf190c6, 0x3a9df7b6, 0x8545a38c,
- 0x84362d95, 0xe53f590f, 0xf3e55970, 0xd91f0899, 0x9ba9d276, 0x595e7649,
- 0x1563b7a8, 0x39d86eba, 0x184dcf07, 0x4e50a3fc, 0x52824d92, 0xb1515d5c,
- 0x677e8079, 0x74ebac94, 0xdbc657e8, 0x5719c580, 0xd59f8014, 0xe46568b3,
- 0x7ebc3527, 0x7ae1635d, 0xbd706bdb, 0x60da7e4a, 0xc8cac5f2, 0x73759ad3,
- 0x6e5f2993, 0x3ff7f030, 0xbce72c9a, 0x22327282, 0x9841533e, 0x44f73c4f,
- 0xe4101c21, 0x186b5696, 0xf7c08fff, 0x77c21fcf, 0x5deb8a6b, 0x594dcf16,
- 0x534ffa40, 0x676779f2, 0x245d8a59, 0x3f8616fc, 0xd2d59310, 0x5207478a,
- 0x7bd21f9c, 0x776e18c1, 0x5eb95a1f, 0x329ab6ce, 0x27581f1d, 0xfbae0fd8,
- 0xe28f977a, 0x084827d0, 0xd599f3be, 0x34744035, 0x428c8189, 0xd11dd7d4,
- 0x27640cc7, 0xa80b1daa, 0x7fc8a5bc, 0x06bc039b, 0x96e51fe6, 0x2d98f533,
- 0xe5c107ec, 0xa5ca1ef8, 0xd2ace9c8, 0x8d4971e3, 0xa5ad1f7b, 0x3c2af6ae,
- 0xa2413c21, 0xad0a49a7, 0x1538bb20, 0x13e19eff, 0xb953a7e6, 0xabf12a3d,
- 0x6def4f67, 0x42741c69, 0xed361f84, 0x0fabe6f9, 0xa34ddf50, 0x0f5d3b66,
- 0x1696dffb, 0x1ab00bea, 0xd517c4d4, 0x1b75672f, 0x955fd47d, 0x397f3eed,
- 0xaf78abdd, 0xfa7146df, 0x1b40fee2, 0x9ec1cb95, 0x2fdc69c3, 0x6bfe3ad4,
- 0x93e4e7e1, 0x750512b9, 0x905ecbac, 0xabe446bc, 0x050bea99, 0xdc7c20f6,
- 0x47137684, 0xa9bf79f7, 0xbd0d95f6, 0x2fa819af, 0xc43c67c1, 0xff42ad6f,
- 0x5c37adaa, 0xad62f54a, 0xd62fdb57, 0x13d77bf5, 0xb43b75dd, 0xd9bf6afb,
- 0x4f5f7ca4, 0xea8d9af5, 0x26bded3e, 0xa8f337ea, 0x67eed3fe, 0xa6fd2a66,
- 0xa940ff92, 0x772a5733, 0xeb333f88, 0x235e0cff, 0x68fcd6f2, 0x7042dd8b,
- 0xe2ced9ba, 0x6606fd6d, 0xd87deac7, 0x446cc89a, 0x8ddadbf5, 0x5567eac0,
- 0x1b4ff98e, 0xfc8fa41d, 0x5d5993a7, 0xab3cf58c, 0xf09e3047, 0x22651efe,
- 0x173ddda0, 0x1114ca1f, 0xd93d73cf, 0x5fb7aa76, 0x7f80eb5e, 0x4fa6179d,
- 0x4ae7f7ad, 0xd40cab3b, 0x7d88dd47, 0xf7356e14, 0x4266d93e, 0xd01379b8,
- 0x3b2d240d, 0xe2a6ea8e, 0xe97812fd, 0x503ef01d, 0x1a976b8e, 0xbeb0524f,
- 0x9447908f, 0x09e4093c, 0xa2a379fa, 0x04ece8b7, 0xfd8c79cd, 0x71f7cd1c,
- 0xefda99a5, 0x6e7f0e3d, 0xfe73c509, 0x02e3bc7d, 0xc136fdcd, 0xcd91f76d,
- 0xbc3ff004, 0xb188ae57, 0xcd82bfc2, 0x17e7121d, 0x886fee68, 0xec9fb79d,
- 0xdd0bfcbb, 0x4b703b18, 0x01657764, 0x2f504780, 0xe7b586b3, 0x21db07c5,
- 0x1d47f116, 0x57998dd4, 0x16ea8078, 0x5fddaca3, 0x54c5daae, 0x4399aa98,
- 0xb73b919c, 0x9121d1fb, 0x9c0945d9, 0x117970f7, 0x5e0f59c9, 0x46dcbc79,
- 0x66fba569, 0x598bf303, 0x8a762dbd, 0x5a9d9347, 0x68d727f9, 0xd2ea9fe5,
- 0x956d3bca, 0x6ee9de56, 0x6dcfbcad, 0xead7cad5, 0xb6cfcad1, 0xfee69671,
- 0x0d4af6b4, 0x02f37d3c, 0xbdf3fdcd, 0xce70350b, 0xf734ab8e, 0xd32c7467,
- 0xaf77e79c, 0x76ab9cd6, 0x2e0ed636, 0xb18f35f4, 0x08c41133, 0x567aabfd,
- 0xff70a0ed, 0xff3c1aad, 0x1f6ffd6f, 0xc8a7ffa3, 0xbdde31d7, 0x37743b15,
- 0xd165ebb9, 0x999dae3c, 0x59eb9427, 0x9cb6f6bf, 0x77431558, 0x54e6fc95,
- 0xff941bf2, 0x0e16c391, 0x7df6fddb, 0x8e0acf14, 0xae88ab38, 0x1f7b80a2,
- 0xa192f385, 0xba38aaf6, 0x32d900ef, 0xe89c8f85, 0x02a5e5fe, 0x857e34e8,
- 0x3cb094f0, 0x93acf0af, 0xd972f0e2, 0x1636de77, 0x4be9c96e, 0xa120f3c2,
- 0x6a85e794, 0x22e81447, 0xeca152e4, 0x29e92503, 0xf3abf961, 0xc007ec23,
- 0x6880fd40, 0x2759b6f2, 0x9bec77aa, 0xb98f5625, 0x86f67d58, 0xa57362e0,
- 0x067f2645, 0x25dff7c5, 0xc4cedebb, 0xb44a76b8, 0x139eaccb, 0x54c46d02,
- 0xa4a31890, 0x76afa9e5, 0xed871f50, 0xff2bcd2c, 0xb56d4774, 0x6f919727,
- 0xb6f09409, 0x67f5fe99, 0xa2648eba, 0xcb0407fc, 0xf6fc42fa, 0x332759ad,
- 0x25197f28, 0xcd014494, 0xe9471c4f, 0x24dcccfc, 0x78c3f919, 0x0f65101b,
- 0xab73c289, 0x9390fac0, 0x08ccd8f4, 0x67a8ddff, 0xbe940b79, 0x5e451aec,
- 0x54b65f6a, 0x4f803fc1, 0x7e78c2ac, 0x4c1bf74d, 0xc329752e, 0x6dca1cca,
- 0x37fbe0b7, 0x5017354c, 0x428e0a5f, 0xd5e7ef3b, 0xfa4d2d3e, 0xed557929,
- 0xbf76cf6b, 0xd348652e, 0xc4cec32f, 0x6391e709, 0xbe615ee7, 0x9b03fbf9,
- 0x1a1e59a2, 0xe6ce94d8, 0x4ff7e18b, 0x71aff7b1, 0x93b1a3bf, 0xd7df5aeb,
- 0x9ef5dfd8, 0x81e7348f, 0x0d3e90a4, 0x4aec0ff9, 0xca35779d, 0x6305e46f,
- 0xa8abf509, 0x715b38b7, 0x3dc0fdf8, 0xbe10e7d3, 0xa7e7cdff, 0x6bfdbe4c,
- 0xa6ee6cfd, 0xed6c79f2, 0xd6070611, 0x963ad806, 0x55bfb54f, 0x20dfc357,
- 0x90e5ea9b, 0xd13ec930, 0x1b31e6bc, 0x67fa83ef, 0x7ca4a94f, 0xd06e9fdc,
- 0xc38b35f1, 0x0e26a5a9, 0x6b4f9bcf, 0x48de6f50, 0x00bf6dfd, 0x61ed010e,
- 0xec7ce0a9, 0x3abbe47a, 0xa36b95d9, 0x2875727a, 0xd0fe874f, 0x56b81f94,
- 0x1ebe7d40, 0xfe27caef, 0x53062c05, 0x599d1813, 0x890c37d4, 0x63013c9e,
- 0x764f4dc1, 0x3ffd5357, 0x8a73c934, 0xadad7632, 0x83fa9aab, 0x8ff70321,
- 0x847c9e5b, 0x16b7c3f9, 0xe77a4cd7, 0x31f24113, 0xf0497e7b, 0x16f6676e,
- 0xf88c2c30, 0xaf3a82bb, 0xfa9b7ea1, 0x5f14609b, 0xdff8db67, 0xacc72a5f,
- 0xae3c6d4b, 0x316b42dd, 0x2ceee5e9, 0xe8ca7bfa, 0x5f902366, 0x48873e20,
- 0x5cf87af8, 0x40f20a76, 0x6f560d75, 0x78285fac, 0xf8a3e8d5, 0x1675cea1,
- 0xbdecacdb, 0xd79f1b24, 0xf467dd24, 0xeefcb1b6, 0x3a46a8cf, 0xef58f5db,
- 0x1070df50, 0x846d1cd8, 0x7a1161e6, 0xfde36c59, 0xc008dc17, 0xd1b793cf,
- 0xc0ef4379, 0xdf278a31, 0x24aeba67, 0x72880c39, 0x8693c509, 0xdeacbdd8,
- 0xecc9b66b, 0xf197a43b, 0x4ee1718f, 0x382fcfd2, 0xfa455c0f, 0x7805306d,
- 0x03ccaadd, 0x91b726cf, 0x7287fee5, 0x9ecbfcd9, 0xf4a9f441, 0xb6cdfd22,
- 0x4a9db988, 0x1f9df87f, 0xae750bf6, 0x5b83c2a5, 0x9f916436, 0xbcf85ea1,
- 0x5bae1251, 0x0ff7c138, 0x963efa2a, 0xfe10f4e2, 0xf73e6ed5, 0xfb4ddb0b,
- 0xcf4947cd, 0x271666bd, 0x58fdf6cf, 0xf4f61b3e, 0x9f207932, 0x117f12d7,
- 0xc91bde7c, 0xe1cf48e7, 0x9fe57287, 0x9d305fcf, 0xb8291dff, 0x190c9bed,
- 0xfdd86fbf, 0xbbb211c6, 0xdf1e7506, 0x18339da5, 0x9aff6127, 0x1d1cd130,
- 0x5d444fb3, 0x9fbf5466, 0x8f7ed3aa, 0xa9cd3905, 0xa9dfc9bb, 0xbff0717e,
- 0xb8f9a98b, 0x846be94b, 0x1f2cebc0, 0x87f23e10, 0xe58782f5, 0xd18e7545,
- 0x23de6026, 0x585abadd, 0x35ce909e, 0xbb9ec9db, 0xe4fd06d0, 0x34473a07,
- 0xe98f5f9b, 0xb835bfa3, 0xd6fc9176, 0x4c9953bc, 0x9be7767d, 0x8d7e940b,
- 0xc562a9e7, 0x3f098b2e, 0xc71b9fd1, 0xa148b5ec, 0xf584c4af, 0x7c852650,
- 0x15e7c48f, 0xd363fcca, 0xef3e9aeb, 0x685932bf, 0x001e0053, 0x00000000
-};
-
-static const u32 usem_int_table_data_e1[] = {
- 0x00088b1f, 0x00000000, 0x51fbff00, 0x03f0c0cf, 0x1915c58a, 0x19d44418,
- 0x18344c18, 0x20685618, 0xb58969c4, 0x9fd329b8, 0x90c0c2c9, 0x40b9c40d,
- 0x7cc40f9c, 0xfc0c0c4c, 0x17ebc44c, 0xf5b04514, 0x84181904, 0x026ffc80,
- 0x85d70c0c, 0x8bbe1818, 0x03083030, 0xf1402ef9, 0x01ce2004, 0x58a06f62,
- 0x045e900b, 0x2c40ddc4, 0x7cdf8a22, 0x6bf20251, 0x37f95185, 0x847bf8d1,
- 0x1057ebf0, 0x47af2fc1, 0x161b1e40, 0x3e3f22d1, 0x3bd02922, 0x015f5810,
- 0xc7265f95, 0x0f27d0c0, 0xb8a87f8c, 0x4bfc9201, 0x0e5cbb20, 0x6096f6c2,
- 0xf2062860, 0x9bb0150d, 0x2f9403eb, 0x857dca01, 0xcc0003ca, 0x688cbacc,
- 0x00688cba
-};
-
-static const u32 usem_pram_data_e1[] = {
- 0x00088b1f, 0x00000000, 0x7de5ff00, 0x4514780b, 0x74f570b6, 0x9924ccf7,
- 0xf2124c99, 0x00493de0, 0x210e0311, 0x9970c044, 0x46a2c024, 0x141a020d,
- 0x84920275, 0x7e889790, 0x33feaeec, 0xa8a08901, 0x17acb9f1, 0x376141dd,
- 0x0c06e8b2, 0x60e03518, 0xba2eb300, 0xe7c045c1, 0x36bc8026, 0xb9f04324,
- 0x9d6f5ecb, 0xdd33d553, 0xc7c4099d, 0xffef7fde, 0xa55f9f8f, 0x71ebaaba,
- 0xe9d4e7de, 0x278f3242, 0xdf210a64, 0x9f968fc1, 0xf890848b, 0x206f5950,
- 0x8841922e, 0x6df466d7, 0x88cfa64c, 0x68e67a23, 0xd11d985a, 0xef435837,
- 0x9ed5cbe9, 0x186934a4, 0x1c790f21, 0x23e21258, 0x5ab54e84, 0x16f50520,
- 0xe0933ba8, 0x213e3a7d, 0x0ff8e8fd, 0x30f8e246, 0x1a18992d, 0xe0311067,
- 0xf9ad537b, 0x421eb089, 0xc3e6476a, 0x213b16fc, 0x9f9f7bfd, 0xa7eb888e,
- 0xf7c10def, 0xb59efa24, 0x6529efef, 0x13fd0e57, 0xf66ddff6, 0xf108146d,
- 0x71c7fcef, 0x8699ef58, 0x5d130e39, 0xd9b7766f, 0xbeea053f, 0xb6899f6c,
- 0xc0fb6ee7, 0xe5fed126, 0xda0944db, 0xf0233b0d, 0x8275cefd, 0xa695ea0f,
- 0xa4ae47d6, 0x9136c0f5, 0xf8d30f3c, 0x4092062d, 0x613371c8, 0xeb15873f,
- 0x5aac1145, 0x6dd77ebe, 0xa3495f30, 0xf7d04489, 0x2e88378f, 0x28d92fa8,
- 0xaf9d08f8, 0x8c07b5fc, 0xc578e803, 0xc23ea13e, 0x1f1beb41, 0x3ee83a33,
- 0xd8fcef97, 0x7ce146d6, 0x0e2663da, 0xa4a3ee23, 0x52908d3e, 0x03e996df,
- 0x9ba5fbe8, 0xbe802705, 0x4ed9dd74, 0x83695f58, 0xe3a45c3c, 0x228bae2d,
- 0xee6c918e, 0xd7cc08ef, 0xdbe7147c, 0xf9830f26, 0x65dfce4a, 0x6ee52404,
- 0xca2e9193, 0xdf603ae9, 0x3da7bcca, 0x053e716b, 0xd22fe59e, 0x8abf68f9,
- 0x6c270597, 0xdc40c2b1, 0xd7ce8eb0, 0xb05e0a22, 0x8daafec0, 0x7deaac23,
- 0x7d876e14, 0x1bd6fa94, 0x7d605ba7, 0x69458deb, 0xd716f1a1, 0x7d76a7fd,
- 0xdd13536b, 0xf3e3bd69, 0x748287b4, 0x4a5ea844, 0x4fe90b88, 0x8e8119f2,
- 0x85e7e795, 0xe2f7c418, 0xcc38913a, 0x3a15f1a1, 0xe9c07ef0, 0xf1958774,
- 0x12dbc701, 0x842ce03b, 0xc73b6eb7, 0x54c814a3, 0x8f94af8f, 0x7ae83e00,
- 0x2c763d6a, 0x6124f71f, 0x81c7a4f9, 0x33da7b70, 0x1eb5cb12, 0xc67f3e27,
- 0x75cb0133, 0xcf96171e, 0xe9606679, 0xbf63e4f3, 0x58053de7, 0xf1b8f06e,
- 0x253d6ff9, 0xaa79d658, 0xcf26f9f0, 0x7b372c32, 0x65fcf8bc, 0xeb2c2acf,
- 0x6e58b53d, 0xb2d17c05, 0x7ec3e3c1, 0x58753ddb, 0xf1ea7a36, 0x469eebf9,
- 0x3870d72c, 0xb648b2da, 0x7360e144, 0x3b453b11, 0x6573cd89, 0x9b1eb4cb,
- 0xf309eacf, 0x5a46d9bc, 0x3ad3704f, 0x3280cb85, 0xd689b67f, 0xf6b15407,
- 0x1c92f721, 0xf10fad33, 0x3594f6b2, 0x5a089cae, 0xed65a94f, 0x7379d623,
- 0x847d6922, 0xda8fb58f, 0xa289cfec, 0xacf551f5, 0xc9134c7d, 0x18fad0b5,
- 0xefa7ab3f, 0x695ae573, 0xd595bd3d, 0xe6f13f33, 0xb33d68da, 0xa93fbd8d,
- 0x2c3a27c3, 0x55b0f13d, 0x14202c76, 0xb9de4d57, 0x9bca892e, 0x5dc746ad,
- 0x7bc849c4, 0x892ef9de, 0xd90687ca, 0xde6ded85, 0x746eacc5, 0x77b77b61,
- 0x2edff629, 0xaa5db1bb, 0xb7db0fbe, 0x9ed8dd1b, 0xed835d50, 0xdb17b28d,
- 0x8a3f5647, 0x2f468ded, 0x5eaa4fb6, 0x65d7f58b, 0x55e7b61f, 0xaffec7af,
- 0xfed87d1b, 0x5c9bfdf8, 0x05fd6953, 0xe41dbdc1, 0x5dc10ade, 0x9e815242,
- 0xee4093ea, 0xf9f970d5, 0x6d1d101c, 0x446fe9af, 0x6efadc3e, 0xbfc00be6,
- 0xf507ebf8, 0x983edfa2, 0x89bce38c, 0x1c74c8e3, 0xa4e3e2f1, 0x334137bf,
- 0x257bfa4e, 0x382d38ca, 0xc6df444e, 0x24defad9, 0x2bde7aed, 0x0fd9c655,
- 0xdac2b4fc, 0xffa57db7, 0x9ebb4b39, 0xe329973f, 0x89eb847c, 0x1a7adbe9,
- 0xa7c2d03e, 0x7c2083e1, 0xf138e28e, 0xc64f5b7d, 0x327c2d41, 0xd3e1060e,
- 0xeff4e381, 0x070d38db, 0x3869f0b5, 0x97cf8418, 0x7dc19f08, 0x21c657db,
- 0x0e327c2d, 0x5ff3e105, 0xbee49eb8, 0x53fdb38d, 0x7fb67c2d, 0x498f841a,
- 0xef0cf580, 0x3f32bedb, 0xf327c2d3, 0xec7c20b3, 0x7da5ce38, 0x67fb671b,
- 0xff6cf85a, 0xfe9f082c, 0xbee8ce38, 0xaff32bed, 0xfe64f85a, 0x149f0835,
- 0xb633e001, 0xfc69eb6f, 0xc69f0b5c, 0xb9f083cf, 0xdf19c70c, 0x384cf5b7,
- 0x84cf85ae, 0x64f841e3, 0xf626bee0, 0xe3c69c6d, 0x1e34f85a, 0x3267c20f,
- 0x6fb9338e, 0x42709afb, 0x27099f0b, 0x8e99f082, 0xebbb64e3, 0xc7465198,
- 0xce3ef6b1, 0xf0b467eb, 0x104cfd79, 0x38e3d73e, 0xa938e8d3, 0x52671f17,
- 0x933e16a7, 0x29f0833a, 0xeaae71c0, 0x77af38db, 0x7af3e16a, 0x853e1067,
- 0x7db5ce38, 0x2ea4d7db, 0x75267c2d, 0x3b8cf831, 0xa36b9550, 0x3a1754ed,
- 0x95cafa45, 0x40972e1d, 0xd59da2eb, 0x8093bb45, 0xf62a225d, 0x89756906,
- 0xe6cb7df4, 0x8907f498, 0xb9c8eeda, 0x4ac7e813, 0xddb531ad, 0x52213d11,
- 0xb8c4e763, 0xb8f53562, 0xfd340319, 0x3453f3e3, 0xa30589ed, 0xddfded34,
- 0xc0fa9ae9, 0xfe9a4992, 0x3472ab83, 0xaecba1f5, 0xf64ff4d6, 0xa7a9a0de,
- 0xd359baae, 0x7ced787f, 0x6b25fb4d, 0x97ed354b, 0xea6896fa, 0x42fdd597,
- 0xfd747fd3, 0xe5fb4d72, 0xda6a0f8d, 0xd71ffac7, 0x3cb5c7d4, 0xbe3fe9a3,
- 0xfb4d79f5, 0x69378715, 0x6db627da, 0x3cafd4d5, 0xcecebaf9, 0x5f8fd8b3,
- 0xd022ebc6, 0x7f76613d, 0xf8f7e67f, 0x1bac46ad, 0x8ed05807, 0x722d65df,
- 0x35f8a31c, 0xd1c0b5be, 0x7017e28f, 0xf6457e0a, 0xda4b9280, 0x267bf3e8,
- 0xd3b12fb9, 0xdd18f7e7, 0xd8c3db97, 0x041e94a7, 0xc3a50492, 0xfdbea500,
- 0x63d19901, 0x23f3c0ce, 0x95fbe9da, 0x1888c086, 0xb312cf5a, 0x777e811b,
- 0x8a2fcc0a, 0xf4154914, 0xf411348b, 0xce481e0b, 0xa2abc17c, 0x298355d7,
- 0x71a10a1f, 0x24eec957, 0xaade33b0, 0x1d82f76e, 0xee983352, 0xcd21006b,
- 0x4bdfbb42, 0x0607b4fd, 0xe9b92517, 0xb5232678, 0x1bf3d5d3, 0x3869fce9,
- 0xebd00b7f, 0x7c7f307b, 0x294df9cf, 0x9bf33413, 0xe6689487, 0xfce91b37,
- 0xafde4276, 0xa7e7c53e, 0xc8449848, 0x3853845f, 0x094869bf, 0x9180ffce,
- 0xf9ea2bfa, 0x1ff38323, 0xd67ffd86, 0x2653fedb, 0xa43ff769, 0x237fbb54,
- 0xea91ffdb, 0x48fe7cb3, 0xe151ffdc, 0x90ffdb2c, 0x137fb652, 0xbf38371b,
- 0x93ff082d, 0x2f677f30, 0xb3529bf3, 0x5a1ffbb4, 0x89bfdda6, 0xfd5ddfcd,
- 0x82df9f2d, 0xbe139ff3, 0x5a1ffb65, 0x4d1bf386, 0x8fd0276e, 0x95646071,
- 0x1d4fce8f, 0x04490761, 0x40f3a172, 0xfd163239, 0x148497e4, 0x913dc75c,
- 0x8abede8c, 0xf286f4a4, 0x58fccbc6, 0x996c951f, 0xb4e2efd4, 0xb67378be,
- 0xb612e411, 0x21657f53, 0x8d1dea5f, 0x7937d222, 0x4e1be4d2, 0x28d9dbd7,
- 0xc77ea17a, 0x207d9393, 0xe44d2aff, 0x4f787cf6, 0xfe787e22, 0x9fa353f9,
- 0x5f4bef57, 0x8f95ac2e, 0x2df8a9b7, 0x90e547e8, 0x98165591, 0x1fa9aed7,
- 0xa1107ea1, 0xd427c5fe, 0x249420cf, 0xbe6cca8f, 0x04bfa8cf, 0x2fea36f5,
- 0xd5213b41, 0x75e25fb8, 0xae8c307d, 0x3f289f3f, 0x26f3ee17, 0x9e3a2964,
- 0xc8f7f8c4, 0xf807c11a, 0x14597db8, 0x3d687e05, 0x4d38de85, 0xfb0fee9d,
- 0x7c12ae07, 0x86380bcf, 0x9333e01b, 0xbd60478f, 0x67605eb4, 0xe8a21cef,
- 0xb0514fef, 0x8a40d560, 0xb338d08b, 0xa44e4b88, 0x5a59cfef, 0xa2e84328,
- 0x2f061055, 0x2059fa01, 0xe0119753, 0x69278de7, 0x7ae8389c, 0xf1b3b66f,
- 0x63e4ba67, 0x14f5aa6d, 0x3e5766c8, 0xa1414f5d, 0x359feb88, 0xd1fedb43,
- 0x619728ce, 0x3d572d0c, 0xc9ccf381, 0x54dfb6d7, 0x75cfada0, 0xef54eb6b,
- 0x6ef7a0c9, 0xa73fd129, 0x9cfba160, 0x5d2b8417, 0xade75d28, 0x56b70e1c,
- 0x5e0d5ec9, 0xda556dc1, 0x26e0a021, 0x5f7c2fb8, 0xf6ffd9ec, 0xa17ada65,
- 0xb05f5b57, 0xa73e374a, 0xff1ce39b, 0x683e8047, 0x83e80279, 0xa718f3c2,
- 0xd55c908f, 0xc18dc7d2, 0x50fa306d, 0x625c71f6, 0x8ebe3828, 0xdac2c7d1,
- 0x50f4f0a8, 0x9707a418, 0x7a14be9a, 0xc3d38db8, 0x8a1e9ca7, 0x6ae96ad7,
- 0x4cadf12e, 0xb2dfefa2, 0xe9e21766, 0xe8c850a0, 0x3d0a97e1, 0xc1e869b4,
- 0x8f41e9cd, 0x3d38dbbf, 0x4673f6dc, 0xb67c7a0f, 0x5b687a71, 0x29264e7b,
- 0xf14af13d, 0x3c53a9a0, 0x376ef0e8, 0x7c503d02, 0xf987a584, 0x8dd37dc1,
- 0xde02fad3, 0x9c5f03d6, 0x053db01e, 0x4e104ec0, 0xcf6a7ef8, 0x684e54fe,
- 0xc36bc5ea, 0x31b2bfe3, 0x6a98ee3b, 0x57f5e55d, 0xf5531dc5, 0x53375c5b,
- 0x9be45f53, 0xbc5fe9ab, 0xbed350b6, 0xa69176b0, 0x3baac17d, 0xef42f535,
- 0x6ffd35cf, 0x69ad565f, 0xb56ab5bf, 0x9296fda6, 0xb9f534c7, 0xfa6b5fee,
- 0x5eb054df, 0xff273ed3, 0xacfb4d45, 0xf5345b19, 0x34d7ae99, 0x2f3b0dfd,
- 0x31e81ebd, 0xc048a0eb, 0x71e638fe, 0xf71fdd20, 0xa4f2c48c, 0x717c89c7,
- 0xcb0133da, 0x4fcd8d7d, 0x7cbb495c, 0x26882819, 0x91167f4c, 0x2cf64218,
- 0xcaaf5777, 0x504e7d02, 0x7971feef, 0x1077b551, 0x07065395, 0xad6f39c2,
- 0x11c7f891, 0xe2528022, 0x50c407f3, 0x6d1b567b, 0x3dbdeb8f, 0xb614ad6f,
- 0x0a4dab3d, 0xe3e56e3b, 0x61ed4385, 0x7e2133e2, 0x3ec10326, 0xb9f6e2ea,
- 0x204cbdbe, 0x84e7eaf9, 0x8ffe472c, 0x5cfd3031, 0xa2726466, 0x1cff6b6c,
- 0x7fba8362, 0xe685d544, 0x95cca7ab, 0xaa91f408, 0xb5e2e3e1, 0x3ce46997,
- 0x3c6de881, 0xcd38d9d0, 0x5775c5f3, 0xd8ebde6a, 0x2bbae225, 0x7d508640,
- 0x07e48c3d, 0xfd97a00c, 0x8d6643cd, 0xceba7586, 0x355e401a, 0xd8fda3c6,
- 0x771199e1, 0xe63c024c, 0x53d27963, 0x8f71e580, 0x9e63cb1b, 0x788f2c12,
- 0xf36cb0aa, 0x33f2c32c, 0x4fcb178f, 0xfcb0ab3c, 0xe58b53c8, 0x2c5acf61,
- 0x587c7a0f, 0x61d4f01e, 0x1ea7bef9, 0x234f56cb, 0x171e9d96, 0x29bf0a96,
- 0xddd3d0f0, 0x4fa7ae49, 0x805dfd03, 0x07c4e2ce, 0x0a0d57aa, 0x8ae259d1,
- 0x59bdab87, 0xd6f371a1, 0x249d389a, 0x0861e868, 0x81e364f8, 0x5e9c4c5e,
- 0xade7b7c2, 0x9ed82f95, 0x1d8726d5, 0x7ff9f2bf, 0xf9cdbd0d, 0xa216f491,
- 0x7a3a81a7, 0x3d18bd4a, 0x87c7124d, 0xc18e69e8, 0xa71ff4ce, 0x1f6087a7,
- 0x628ffc61, 0xabc6ea30, 0x14573aec, 0x73f3678b, 0xb2d16a07, 0x7b9f3678,
- 0x15ae1d05, 0x48df3fed, 0xc8f773c0, 0x78808fbf, 0xaa61f77d, 0xdf7b9328,
- 0x7a5e9e9f, 0xfd0bbb87, 0x551f22a0, 0xb67ae3a2, 0xb8324447, 0x0cc81846,
- 0xeac84e92, 0xce75bfe1, 0x44bac34f, 0x9f7de162, 0xf3986673, 0x4f670228,
- 0xeb871789, 0x6357eb0a, 0xcfe3bc60, 0xca1323fc, 0xe6148639, 0x688b34df,
- 0x0bff13df, 0x506f804e, 0x499c991f, 0x0c0f0710, 0x3ed1da37, 0xf0b459f0,
- 0x667ad2f3, 0x29ab7e08, 0xdf515a8c, 0xfc2126d7, 0xa2357d86, 0x77e01f7f,
- 0x317f3931, 0x0df7ce78, 0xf3c097f8, 0x62f1f262, 0x1cb89172, 0xfdd2372f,
- 0x9d1ced77, 0xe403a0fe, 0xade185e0, 0xd7e95f9c, 0x7d28bcf3, 0x4aee3a2f,
- 0x75b8e850, 0x88f3a108, 0xefc71da2, 0xc984b86e, 0xdbeec52f, 0x05f5f266,
- 0xb59267af, 0xf9c53de3, 0xbbaf194f, 0xf81b05e5, 0x0c924b5b, 0xf06163f6,
- 0xf5e594a0, 0xda8b76c9, 0xd6d46cfe, 0x19856bbb, 0x3a2768f5, 0x068f67a2,
- 0x1fb7613d, 0x3b72061b, 0x9c654b09, 0xfb464925, 0x4ef72a31, 0x3346a5d7,
- 0xe4c7cf3f, 0x9418e6c1, 0x663da717, 0xaf53ffec, 0x9471b7a7, 0x40d210f5,
- 0x1e419a8e, 0xded4b6d3, 0xcf4af7fb, 0x3c4617d2, 0xbf926df7, 0x4cd0fb02,
- 0xfc0effff, 0x5d61220d, 0x5eb2b719, 0x872e9af4, 0xf3359be7, 0xca1cf7d1,
- 0x2977eb19, 0x891be217, 0x96e8571c, 0x5c638bea, 0x8bae2c9f, 0xcfed6bb0,
- 0xb2bd4d18, 0x635dbfb1, 0x541fc28f, 0x412a92fe, 0xe329fa02, 0x5925d3e4,
- 0x7d740956, 0x1090426f, 0xfd6056e8, 0x466dcdca, 0x95ce03b7, 0x5b7ade19,
- 0x4cdfc51f, 0x897c9289, 0x61cf16b9, 0x9d2fea4b, 0x4ce67a0a, 0x88048ac3,
- 0xbb162c1b, 0xf30fc9ef, 0x79312cbe, 0x0727c8bf, 0xa73df6b4, 0x55c7420f,
- 0x945c639e, 0xeaab8e58, 0x8f16061d, 0x009b8fd1, 0x1c5fc6e3, 0xf79b153e,
- 0x413b074f, 0xc68aa70f, 0xca3746b3, 0x665ffa45, 0xfc0a4e08, 0x13f314de,
- 0x697e3a24, 0x2dc63b5d, 0x11deca4e, 0xbc80bcbf, 0x4b3fb9ec, 0xd528f1d2,
- 0x15a4bcbe, 0x813f99e6, 0x371c444f, 0xf8e54c7d, 0x04c93fa9, 0x4f3910e4,
- 0x4862657b, 0x063bfb86, 0xf90cc862, 0x161bf33b, 0x4fbc01e7, 0xe547bf81,
- 0xdf241de6, 0xb5207886, 0x9cdfe099, 0x00198621, 0x7de433a7, 0x1e1538db,
- 0x87232572, 0x94829533, 0xf1c14c1e, 0xefc1fddd, 0xb4d41532, 0xb9e0638a,
- 0xc23ff8a4, 0x72471877, 0x50b7980b, 0x897b9ec0, 0x97e81ab9, 0x3f1a51fa,
- 0x9a51fa8d, 0x34a3f53a, 0x437222f5, 0x1425e402, 0x9f13293d, 0xb4d27080,
- 0x88ec928f, 0x555262c7, 0xafd1f603, 0xbe6dfc79, 0xe903489d, 0x7d5fcb3e,
- 0xee90b336, 0xc5896ae7, 0xf57394fd, 0x0e0cb145, 0xa33e25a5, 0x7f73d2cb,
- 0xddc61b47, 0x8c3f2548, 0x6fcfcec7, 0x051e6236, 0xdc994ab4, 0x7de949f4,
- 0xa20d89ff, 0xfd06c22b, 0xbd6573e8, 0xff7fdfc9, 0xf212fdfa, 0xb4a3ee38,
- 0xfa01266e, 0xc02791eb, 0x09d723f7, 0x7e185f2e, 0xaf74e42a, 0x013ba7e8,
- 0xf143e37e, 0xf574fbac, 0x4abe1cd5, 0xfb5d29fd, 0xfd2f960f, 0xbcbe4688,
- 0x93efba29, 0x027933c6, 0x00a417f0, 0x1e0fd7f2, 0xf8c7a93f, 0x7f1f81ab,
- 0x6907f1b2, 0xff8e9ef9, 0xfeba4fd4, 0xbfd63d61, 0xcbfadc3e, 0xbb697d5f,
- 0xd297ea97, 0x23653c3f, 0x494edf94, 0x8a4e09b5, 0x2b8db2f7, 0x3b902e6d,
- 0x7c06d792, 0xce27ca71, 0x870f4708, 0x06a57cb8, 0x78804588, 0xc749fc2b,
- 0xa1e9bd3f, 0xf89ec0eb, 0xc76ca36a, 0xc99d6a38, 0x5cf4a26f, 0x8f28b0d1,
- 0x3b7ac18c, 0x16703fab, 0x51b2bfe8, 0xd3a6a23e, 0x953ddfd2, 0x94357900,
- 0x921a0652, 0x7fa3f4a0, 0xf3e1b152, 0x546f60e9, 0xabf20092, 0xff983dfe,
- 0x87d4589b, 0x9ee8c685, 0x499818ed, 0xafd9f780, 0xb3a1e1b2, 0x6afe0da2,
- 0xbe9038c3, 0x37f7097f, 0x3f6b245a, 0x82b9273c, 0x54f2015c, 0x31f10781,
- 0x8bc5637f, 0x4ed31abf, 0xe1b1d3d7, 0x0541364f, 0xea4fc527, 0x9bf05fae,
- 0xcbbd1dd7, 0x87ca4eef, 0xd156bb6a, 0xee468e4f, 0xb2b13527, 0xaedbf64c,
- 0x6ca4f2a4, 0xd2f4ecbd, 0xbd2f65c7, 0x2067dbf0, 0xc9a1fd7e, 0xbf8e9c39,
- 0x073da5f8, 0xe2fad127, 0x3cbf722d, 0x908b0db6, 0x254bc210, 0x7a597ffd,
- 0xafa50f08, 0x463a31ec, 0xc6f7e7b3, 0x78b1f086, 0x19232ecb, 0xb91e13dd,
- 0x6d8cbbf5, 0xa9783096, 0x760e9f6f, 0xbf178adf, 0x3bbc8236, 0x892053b5,
- 0xb9e061c9, 0x85da0f7e, 0x23c42700, 0x6f1053c7, 0x1495feba, 0x145fa02f,
- 0x37dbe93c, 0xe3027971, 0x9e90d239, 0xce9d7e5f, 0xc991f25f, 0x792ffb09,
- 0xec1b2ef9, 0xc55484e3, 0x95d9fae8, 0x4adf2009, 0x1d826f64, 0xe4bcf64b,
- 0xde91bbb9, 0x9ddff78a, 0xae92e2c0, 0x95d406fe, 0xbfb0fede, 0xe0e6eb85,
- 0x1b888afe, 0x59e4aee9, 0xcb47df31, 0x6332252f, 0xb8be184b, 0xf2726656,
- 0x2354f3c4, 0x509dfdd0, 0x22fbc0f4, 0x627703d7, 0xbe6ef3dc, 0x62316a17,
- 0xc3334bfd, 0xc0d930d3, 0x2bd32ce5, 0xf0dd7a41, 0xd38bea00, 0x70895d83,
- 0xd21956be, 0xc437a80f, 0x1912d3fd, 0xc19e987e, 0xc99ea8f7, 0xbfd0e785,
- 0x8c457655, 0x7a87bcd1, 0x297f9945, 0x3bae9f9f, 0x13b5ec12, 0x8ffde109,
- 0xdf0acb9f, 0x4542bb53, 0x66fe2a5f, 0x3561befc, 0xdf856fe3, 0x4067337d,
- 0x180717fb, 0xa07bc0ff, 0xfcbae967, 0x6bbed889, 0xc0f280b6, 0xf8e1fc44,
- 0xf9cc837a, 0xd0aedb5a, 0x24b75d09, 0x05e8ab44, 0xf8458fc6, 0x7a2f5ea3,
- 0x92a7be92, 0x9f309597, 0x0a78e7ad, 0xfe7316df, 0xf96b219f, 0xef8bac9f,
- 0x99653e53, 0x33edfbec, 0x3f94afc0, 0x57e00ebd, 0xfdf61f39, 0x07cdcdb7,
- 0xfa7ca66f, 0x4f857ab7, 0xcf53f2b5, 0xef5b25a7, 0x7a9f4026, 0xa0dbfd3e,
- 0x4f96122a, 0x7cb0f3e9, 0x7feda83a, 0xf02a7e54, 0x45f802ab, 0xc8a7e085,
- 0xa8e0dec3, 0xd9568797, 0xd21e5611, 0xd99201df, 0xba14fe93, 0xf4ade853,
- 0x4143e5f7, 0x52e904ee, 0x5d20bba1, 0xdfa7742a, 0xfa7e16af, 0xc13249f6,
- 0x26bfe575, 0xd65cffa3, 0xe942f2c4, 0x66077b7b, 0xa85c9c20, 0xdeed48c6,
- 0x75d4fd81, 0xaf5eae9f, 0xd5d2efeb, 0xddfd75f3, 0xa6957aba, 0xb363597f,
- 0xfdfe2091, 0xb2e27e9a, 0xb8d1c867, 0x63eaf470, 0x59c3597e, 0xc2e817a3,
- 0xe5f7c012, 0x790bc4e5, 0x221d8186, 0x28ba05c6, 0x7e206b8c, 0xe7d939b1,
- 0x656372a5, 0x743f8b04, 0x58b603d9, 0x618cae5a, 0xb1bc40f5, 0xf29ee406,
- 0xe2c1103c, 0x01f95d8a, 0x9fca8c5b, 0x51126f6a, 0xc8f9b76e, 0x167e708c,
- 0x47c828d5, 0x76b7a46d, 0x4d35ee76, 0x06590cb4, 0xd15aa571, 0x1df83609,
- 0xe0d937d5, 0x29aba9dc, 0xfa02faf5, 0x451fe17c, 0x8ae97b4c, 0x75818db4,
- 0xea4baf11, 0x4e6ff2af, 0x59d3ef12, 0x9a0b7f74, 0xc7739f98, 0x423d9d1a,
- 0x153d20d6, 0x411a9659, 0xe27f529e, 0x86bcf688, 0x1f2945f1, 0xb69d64af,
- 0xd29f795d, 0xa5f3fa01, 0x6eedb41b, 0xa07f0fee, 0x3258c9f0, 0x2ca3fb96,
- 0xf2e5c30e, 0xfaed3134, 0xae8f8a02, 0x68989116, 0x4449bbbd, 0x166f747c,
- 0x234f107c, 0xb7e478de, 0x7940120a, 0x03a9c0a8, 0xbbd8c9f8, 0xf6d8eabc,
- 0x812fe669, 0x3c7fb66e, 0xd9693fe6, 0x11c3fdb1, 0xd5fd406f, 0x263e4343,
- 0xeee7fb3d, 0xff73c08c, 0xa4569dae, 0xef1daef7, 0xd425f90e, 0x6139335f,
- 0xbf3b5d9d, 0xd3f4031e, 0x0254dcb5, 0x306baef2, 0xd8aec78b, 0x3698f5f1,
- 0x33f7afd4, 0xfe6461fd, 0x3fd37761, 0x41fa133e, 0x76057749, 0xe428cec3,
- 0x23d7caa7, 0x6bbbdf30, 0x6e99d7c7, 0x9fb74fcb, 0xd1e79e0a, 0x5b052565,
- 0x2c317d61, 0x8df2a18f, 0xd3542f89, 0x761bcbf9, 0xf6d01719, 0x39ff37dc,
- 0xd768e406, 0x750f6656, 0x41cf6dca, 0xf194431e, 0xe29972dc, 0x8a9813ab,
- 0x33bfcd3a, 0xdf02e466, 0x3fcb84f4, 0x57d63e31, 0xaffca478, 0x62e08781,
- 0x33eacef2, 0x6fefb00b, 0xe318fb3f, 0x4ba4269a, 0xab7fbf65, 0xe57b46af,
- 0xfecefb62, 0x82df6e7f, 0x76fb2fbf, 0xfbef9bff, 0x7abbd738, 0xe749e83e,
- 0xfac1ee3b, 0xae27564b, 0xeef48fb7, 0xffc7fd85, 0x7bffeefb, 0xc52b7de3,
- 0xfbe2edbb, 0x5affcdfe, 0x36f1ffbc, 0x7e3b778e, 0x3fe6f3d7, 0x57df7d71,
- 0x6ff9bdce, 0xf6def78e, 0xadf5d8af, 0x067b2a86, 0xa9015f5d, 0x316182b5,
- 0x92dbbe37, 0xd76c80e1, 0x8f30fd73, 0xc34bce0e, 0x23014df8, 0x904d0bf3,
- 0xfb7e0747, 0x0b89411c, 0x1d751fa2, 0x1bae1fb7, 0xc8768254, 0x9987ae75,
- 0xb55520dd, 0xadfed366, 0x989c0b39, 0x0fd24973, 0x7b3ea1bb, 0xfd6baf32,
- 0xe204f7c9, 0x7f1da812, 0x2d35ce5d, 0xef5ed760, 0xa5eed112, 0x1fbbda25,
- 0x9ece990c, 0x3dfad04f, 0xfadadd73, 0xfada054d, 0x8dde3e1c, 0xc6ffad84,
- 0x7107c17c, 0x13f65355, 0x356710f1, 0x9089942d, 0x5542f90c, 0x98bfc12b,
- 0x7f7daf93, 0xe3c1f81f, 0x531c3c7f, 0x971c0a4d, 0xb6485c20, 0xfa48dce8,
- 0xeebe4700, 0x7d63d9d6, 0x244ccf90, 0x8de38327, 0x838c4ee5, 0xe65b7f73,
- 0x5596cbef, 0xb2cfc0ad, 0xfc56cfce, 0x4de0dee5, 0xe38dffb8, 0x2fe084a4,
- 0xbff444bb, 0xadff9d65, 0xe2fbe5e0, 0x8fe3c143, 0x3a97cbc5, 0xb8b20cbc,
- 0xccabf008, 0x16a985fd, 0xca0fab27, 0xc8de7827, 0x9fa905fd, 0x42d3788b,
- 0xcc8bdf79, 0x5f386ce6, 0x30148f33, 0x1afd71de, 0x3983f511, 0xe575c04d,
- 0x8f31904f, 0x7e4373f8, 0x6f3be026, 0xbf1515dc, 0x0270cb60, 0xb871173c,
- 0xb42a9117, 0xeff9c95f, 0x84cacbbe, 0xf6820673, 0x211722ef, 0xadbf52d7,
- 0x47f04b28, 0x4b157852, 0x0e9d1bc4, 0x1c816f71, 0xb5bdc4d1, 0x47fdf875,
- 0x8b9c6842, 0xbe4deff5, 0x5fe5d4fc, 0x323b3ca9, 0x1e41fa8c, 0x5849bb9e,
- 0xaff156dc, 0x70626f6f, 0x7c132cac, 0x7e774829, 0xee755f39, 0x83e71cf8,
- 0x0e3bdebf, 0xa3f664e5, 0xefd1a3a3, 0x6fd03977, 0x437ee4a8, 0x0d11d7b7,
- 0xa6eeae71, 0x9b3e7e54, 0x55cb536b, 0xfdcfdca5, 0x7e40bfee, 0xa6bb2d21,
- 0xf822b50a, 0xf1802471, 0x08d4ef5c, 0xfe4c31b2, 0x1931ffeb, 0x6154fd38,
- 0x627e001c, 0xfd91e026, 0xd1abe98a, 0xadbc2ab1, 0x3494f6b9, 0xe01e27a6,
- 0x0e754477, 0x0be163f9, 0x75c03e7e, 0xf4db447f, 0xadfb30fc, 0x5638b135,
- 0xe2df5bde, 0x34379c77, 0x9700cb25, 0x9a8aa61b, 0x2655fb73, 0x238d97ee,
- 0x795d60e3, 0x9b655d6f, 0x11697808, 0x66ddf09d, 0xa359d365, 0xb079cc3b,
- 0x3e309e1e, 0xca8fd5c4, 0xe78022ce, 0x082387d1, 0xf2efd1f8, 0xf834c673,
- 0x430d226e, 0x2b882e3a, 0xb5c39afe, 0x72c764d3, 0x1cb65e56, 0x6b4de74f,
- 0xa3858fb0, 0x7fbc01d4, 0xe3211827, 0xbd878921, 0x501bf4a7, 0x1bb5fac6,
- 0xf6e115ed, 0xed823c3f, 0xef3898bf, 0xfbfc2099, 0x51fb2de1, 0xec073fab,
- 0xae3ca983, 0x09ae83d2, 0x0cf747f2, 0x1bfa07c1, 0xe7ac61fc, 0xdf84efb1,
- 0xbeb3be55, 0x8559e981, 0xde70f5ee, 0xccace1cb, 0xe2c4dc7f, 0xfce27faf,
- 0x8159c0a5, 0x4abd7eaf, 0xa6af2a7f, 0xd751e3a8, 0x197d8e37, 0x268a9bec,
- 0x43a06ec1, 0x466d4855, 0xc6cac3e0, 0x6f3864c7, 0xe360eb99, 0x8c7f2912,
- 0xd3a09f3a, 0x9a3bf2c4, 0x0dbcafd3, 0x1adce4e8, 0xe242e813, 0x0dace3ba,
- 0x06bbf0e4, 0x7be19fe7, 0x227bef6a, 0x790178d9, 0xcea7b1f5, 0x8f525535,
- 0x9ceb8c9b, 0x432d9655, 0xb296ecf8, 0x8aa2725d, 0xb14d3a9e, 0x1f3caed8,
- 0x8748e650, 0x5c3e74e7, 0x1311f787, 0xd242ef81, 0xfbf15bea, 0x62aa5bd4,
- 0xd4961d18, 0x3d6232ef, 0xf32bfb19, 0xf9e8f329, 0x369e7669, 0x6e4178ef,
- 0x4b8020db, 0xd4bfc99e, 0x1f4e8619, 0x8ee72b33, 0xb84f53df, 0xf98edd28,
- 0x2c4f48ff, 0x1b44f455, 0xef2bb9e6, 0x3e3335cf, 0xc489e957, 0x49627a70,
- 0xf01123b5, 0x6a4764b0, 0x92c93022, 0x084b4e3f, 0xf03e27a7, 0x43e373b0,
- 0xaeeebbfc, 0x5c4f54d9, 0xb313d2ae, 0xf44e9023, 0x959ae5ef, 0xe88b8d73,
- 0xff6f0509, 0x03e6f2c3, 0xf86113d0, 0x56b346fb, 0x54d8dc4f, 0xc6e27a88,
- 0xfbe6a2d9, 0x17cd6baa, 0x09d913d3, 0x3b2eb173, 0xcfd0c2a3, 0x39bfdc07,
- 0x61fbf3c4, 0x6b17d01e, 0x9b8476cc, 0x26f46f5e, 0x55f6c7d7, 0x825fffae,
- 0x97d722b3, 0x43f761cc, 0x8243a4f4, 0x2e64bcf4, 0xd002eb95, 0x4beb9323,
- 0x4e71e56e, 0x07cd9c9d, 0x96a19b39, 0x0afdecb3, 0x950bd337, 0x9abfc98d,
- 0xae6fdb47, 0xda669d95, 0xdc90e6ff, 0xddcb54cd, 0xe1096635, 0x96ee43dc,
- 0xae7bb930, 0x4fda1c69, 0x53efedbd, 0x3ca96f2e, 0x430ce61b, 0xa2f3051f,
- 0x1ddfdf34, 0xda76fb1d, 0xef2fc02b, 0xfb7e788a, 0xf603b739, 0x033b7d8d,
- 0x7c53e9ec, 0xfcb8db3d, 0x5cf8f4eb, 0xc81ca953, 0xd3d983bd, 0x84459be5,
- 0xe303455f, 0x91618aff, 0x4fad073e, 0xcf16ff4f, 0x60ef1761, 0x0774b0e7,
- 0xfc2ad979, 0xadb77e2f, 0x1605ce06, 0x1cf017af, 0x005fbe19, 0x78731678,
- 0xe3173916, 0xdaf14a39, 0xe53fe7f0, 0xa6a2dfbc, 0x5f0f18f5, 0x2bf6d3e4,
- 0x29f5761f, 0x4cf247f0, 0xaf790f0c, 0xc9f5be08, 0xeba0cf90, 0xe78d5ce7,
- 0xdaf5e547, 0x7ef907e7, 0xb4fe8a17, 0xe4fbe857, 0xd3bfcccf, 0x1ea0fbc3,
- 0x123f252e, 0x5c29fb2e, 0x70f5fe32, 0xd40e0de9, 0x01f3c7be, 0x2b0e3c3d,
- 0x649d2572, 0x6e7e1f00, 0xf15ca170, 0x42e143be, 0xb1e3bbe7, 0x40fb9ae1,
- 0xbcb8738e, 0xf4d7706b, 0x02157e87, 0xb75e2bf6, 0xad024f65, 0x6327abcf,
- 0x78a178e1, 0x070f56ad, 0x03837ef5, 0xaabcfaf5, 0xc7e4022d, 0xfbdfe42e,
- 0xc31ef895, 0x5d6e547b, 0xed3cf546, 0xe4dd20bf, 0xfdf07386, 0x9b940111,
- 0x47c6dd0a, 0x8e25fb30, 0x03a3a1cb, 0x3f515302, 0xe4760a35, 0xd3047f47,
- 0x704f1457, 0xc4f34bbf, 0xbc73a7af, 0x2352b8b0, 0xc52563c3, 0xf4f3f01b,
- 0x63af5e26, 0x4a2de076, 0x39f05ebd, 0xbbab9004, 0x8c5f7664, 0x8e04e240,
- 0x847e9504, 0x04ecd5f3, 0xb4779e15, 0x28dffa33, 0x8f81f138, 0xf043f7fe,
- 0x33dd2bdc, 0xf4afa63b, 0x3e29c0ae, 0xfde78b42, 0xe379ad11, 0x9bf9c1fa,
- 0x737bc4e0, 0x9cf0629a, 0xbb2d58e8, 0xbf9525ff, 0xf00be7a7, 0xd1f7d43b,
- 0x438584cc, 0x088cf37e, 0x79c50fcc, 0xa65d9853, 0x7bfa4f96, 0xfe6fbe80,
- 0x45692551, 0xc7942c7b, 0x04de7ee8, 0x3410cde3, 0x09104ef9, 0x09ff3a03,
- 0x7cd5bfc6, 0x9249f181, 0xef96f31d, 0xedcf2bb2, 0x5e2cbbec, 0x799c1e6f,
- 0xef851e68, 0x13a255b9, 0x0c9ba85c, 0x2059838e, 0x09941e6f, 0x7db9cbf0,
- 0x81e98bb2, 0x853bd428, 0xefc51eef, 0x0e590ad9, 0x8fbc291a, 0x7ca397e9,
- 0xbd511f3d, 0x0a5882ed, 0xf70f1fa9, 0xee76cce0, 0x7880e69a, 0x95fec64b,
- 0x2b39ce70, 0x20f07ed7, 0xbb3573e0, 0xaac8e907, 0xeed3f45c, 0x7e4053ba,
- 0x06f14c3b, 0x9d99cfef, 0x9d20f07c, 0xf155d6f7, 0xcf396e78, 0x9c237f15,
- 0x27dbce63, 0xdfb41d3a, 0x16adeb82, 0x66bcc738, 0x2adc48a4, 0x5a4adcf8,
- 0xaf33be31, 0xb039ceea, 0x0faa12ee, 0xf72864c5, 0xfa844b4f, 0xafdf013a,
- 0xf08dbaf4, 0xdaeb66bd, 0x3a0cc6b3, 0x07ed117f, 0xc75caf60, 0x106e55fa,
- 0x5dfa909c, 0xca18f4d4, 0x020da2cb, 0x4ad3345e, 0x1b335fed, 0xe3a667d7,
- 0xefa3aeb6, 0xbc317ad3, 0x886ee30f, 0x27ce11b9, 0xf13ae92b, 0x78d509fd,
- 0xdf445d2e, 0x75a4cda3, 0xde389dc3, 0x810275a2, 0x264b03ef, 0x01323f24,
- 0x3b27dbc6, 0xaed0fda1, 0xdd611b5f, 0xfaf9d1b7, 0x8817f1d3, 0x3fa0d36d,
- 0xc760a5f9, 0x46ea87fb, 0x0baddc60, 0x542f0c36, 0xe63ac70d, 0x0c103b13,
- 0x651f3a92, 0x4efb0447, 0x755968b8, 0xcdabec0d, 0x406c9f1b, 0x046b3aba,
- 0xc28c7e3d, 0x3c874ddb, 0x2e79646a, 0xc6a3decd, 0xfd1c9536, 0xdf152228,
- 0x1b31a3bf, 0x921626f3, 0x709bcc7c, 0xa47b3357, 0x0f84657d, 0xb2ed6ae2,
- 0x6f3c087d, 0xe7c2695e, 0xfd312f9d, 0x0db839d3, 0xd627dbe3, 0x5fbe04c9,
- 0xf52eb15c, 0x89e3c069, 0x9bd232cd, 0x1bcb2cf8, 0xc5897e28, 0xb1d79ac9,
- 0x352c4f71, 0xa0b57bb2, 0x5f8bd69d, 0xb40af254, 0x51c8ac72, 0x2f4a728a,
- 0x6b72f497, 0x46d593d1, 0xf21ed920, 0x9474e96a, 0xef54a557, 0x90f5a100,
- 0x306a8357, 0x5797aa6e, 0xc8a2c495, 0x5f8874a9, 0xbe0f9616, 0xf1d02d6d,
- 0x8183ecad, 0x5f6bff56, 0xb77a053e, 0x49e04acc, 0x5a7f6485, 0x80417460,
- 0x6d35e65f, 0xa0f812f9, 0xde045e64, 0xf01c75c7, 0x26b37c00, 0x8bafcf0e,
- 0x6f08857c, 0x78b72f25, 0x95ce96ad, 0xb75ce3f8, 0xfd6a5c48, 0x5c451255,
- 0xee877eaa, 0xe74af4a3, 0xf051bdb9, 0x743ca881, 0xbbdefb5f, 0xc76eb033,
- 0xf6e32d75, 0xd22b1ac1, 0x9f9fa1f3, 0x7af4095d, 0x0102bddb, 0xdd789d7b,
- 0x2b0fa035, 0x880e0ad6, 0xcbdf170f, 0xefa62fde, 0xfd0fbac3, 0xa515d19a,
- 0x7fd0076f, 0xf6c7bac5, 0x820dff62, 0xb43fc603, 0x89ed10be, 0xb048af12,
- 0xfa3b437d, 0x614f540f, 0x57b3599e, 0xaffa004c, 0x0764dfb8, 0x23a14953,
- 0xf9db2e85, 0xecc4087d, 0xbe8b331c, 0x15160ab7, 0xc51e93df, 0x4fa82102,
- 0xbd2a4e81, 0x5fdace8c, 0x36409d92, 0xf64ba717, 0xae09fc1d, 0xe767c55f,
- 0x74ef874c, 0x047ea98a, 0x975e6d3a, 0xe23b046b, 0xfcc04f84, 0x49749e8a,
- 0x4a3b6a2c, 0x0567cba1, 0xd07baf26, 0x7b0e8ea3, 0x4ba71dd0, 0x5128fe85,
- 0xb6595fa3, 0xfdf0264f, 0xdd1e9ebd, 0x908dfd72, 0x32f851ff, 0x6f5d0fdd,
- 0xd1cfcd6e, 0x48e0575e, 0x2f5009eb, 0x8fde5892, 0x12e50fc2, 0xdefb0fcb,
- 0xbfcdd3eb, 0x87e78229, 0x2c883f01, 0xe74fc97f, 0x0bf37b7d, 0x66792f98,
- 0x6875fb53, 0xe941bcdf, 0xd9bfe000, 0x7fe8fc4a, 0x950e50f8, 0x6f988836,
- 0xe67f244d, 0xaedf952c, 0xfe2cbfe4, 0x5de11583, 0xcbe4d5ba, 0x6e97efa3,
- 0xf3063a3f, 0x30079611, 0x49bbc7bf, 0xde1fe760, 0xfdf031b6, 0xf2c4a950,
- 0xc176f470, 0xe71648f0, 0xaa2d78ea, 0xa136ec00, 0x57b3dd8e, 0x6c937f5a,
- 0x0775c552, 0x2f38ca45, 0x63e787ed, 0xb8678a5e, 0xa7d6011f, 0xc94ddfbe,
- 0x9178e9bf, 0xa3695fd1, 0x695cf68c, 0xc84e96dd, 0x6c3d2cc3, 0xb2b73f42,
- 0xfc11acee, 0xbc3fdcd8, 0x55f3f784, 0x3c9a8a36, 0x6ff91ebc, 0x04dfa275,
- 0xe50d5bf9, 0xeb437e78, 0x0faf58ed, 0xb17a079f, 0x7fc5ad3c, 0x4bddb1d8,
- 0x95f8b841, 0xbef0d9df, 0x368dd6be, 0xeea9eefc, 0x3da7f9c2, 0x57eaddf2,
- 0xeddf305f, 0x0941fc34, 0x5d48baeb, 0x931d60be, 0x997d746b, 0xc4e261f9,
- 0xa5eb7b41, 0x7c839f2f, 0x7fad8eec, 0x5bd60e7b, 0x9ee75ff6, 0xb21abc83,
- 0xf60ec233, 0x0fa6e548, 0x79559f30, 0xd99224af, 0xfe2f9d7f, 0x3b80f30e,
- 0x6139343f, 0x271dceb4, 0xd1bd8086, 0x04dbe5b9, 0xba5defc8, 0x73c61a63,
- 0x4dcb6e96, 0x05e26124, 0xeb74094a, 0xb21e43eb, 0x0dcd5f1e, 0x73ff98cd,
- 0xe41be286, 0xb17f3043, 0x83e59cbe, 0xe2de783a, 0xdef9f0e6, 0x1b14f23d,
- 0x193f5d66, 0xb30362e4, 0xaf9a2e0b, 0xf1e28078, 0x6fe03ef2, 0x4f2e6af3,
- 0x0cefe3c2, 0x54f141fc, 0x57cda913, 0xe161b2fc, 0xb6819e6f, 0xf35eb886,
- 0x807dfd15, 0x3e4f929b, 0x58f5d22b, 0xd6cdf024, 0x287e63df, 0x01cf2de0,
- 0x21cbf9f2, 0x293e1710, 0x10fe3007, 0xefc261d8, 0x1c774b2c, 0xdce9fe42,
- 0xb5f31fb6, 0xeeebcf09, 0xcb1fc124, 0x9eb1e5c3, 0x96560dca, 0x0fe1c8e7,
- 0x1b0e9079, 0x8ef9e73e, 0x3c958e8c, 0x1ff2dec8, 0xf853225b, 0x4f2c2a7d,
- 0xc7c37cb3, 0x8430af3c, 0xe002612f, 0x7982fda9, 0xec1c0aa5, 0xd42b8700,
- 0xf3e1bcf9, 0xdf79834c, 0x3f805f1c, 0x7cc1d390, 0x041f5a5d, 0xe13e9bf9,
- 0x0f230910, 0x3cb3d73a, 0xfc394ee0, 0x7e4accf7, 0x4f9eb36f, 0xebcb7d7d,
- 0xbffad8bd, 0x221a7c2f, 0x1eae381d, 0xc3e71d4e, 0x9ce3f1cb, 0xf7f07fef,
- 0xfe826521, 0x81cf93fb, 0xda1bcb1e, 0x899f788d, 0xfdf13fe2, 0x5a78f076,
- 0xf31126c7, 0x9bea6b3e, 0xe5eb8e51, 0x273f0545, 0x1bf7afd1, 0x468adebe,
- 0xb63a27a6, 0x3a167d5a, 0xaf5bc74e, 0x6c573d21, 0xd3bcc76e, 0x18af75bd,
- 0xcbe754db, 0xaa7c27a0, 0xbcbc77f5, 0x6dca0c6b, 0x7e98bf7e, 0x9b14631e,
- 0xfc29b2a7, 0xcf9b953c, 0xec59e66a, 0x2dda37ad, 0xb4ef9f17, 0xc609bae4,
- 0xeccadd3b, 0xe2f5d3a5, 0xf9985f0c, 0x49bd714f, 0x0f3b2d48, 0xef12faa5,
- 0x7bb2b359, 0x245b44d5, 0xad4b5fda, 0xf7470e13, 0x7988d283, 0x79aa2b2f,
- 0xce7dcc9e, 0x0af7c024, 0xd604d5eb, 0xabe012bb, 0xbb65e3f2, 0x26dd809b,
- 0xa867e527, 0xf59459cf, 0x8819fb1b, 0xa5ff854f, 0xe08919d6, 0x3bc9127f,
- 0x5d71d608, 0xe812d7ca, 0x8b0f56bf, 0x47f393df, 0x56f3c187, 0x8e27bec9,
- 0x553adb8e, 0xb5e84270, 0x34fbdd27, 0x2e998671, 0xf8b17ded, 0x5fdd1c0d,
- 0xf680d3b7, 0xb7e0d5a0, 0x1ac42ed3, 0x0e849ad1, 0xa43883ac, 0x0214fc04,
- 0xf41e21f3, 0xfdc16798, 0xcd4236c8, 0x94e7e853, 0x449ff734, 0xf774904f,
- 0xc30390a1, 0x385abd28, 0x39712abd, 0x8e9bfb2b, 0xacf786db, 0x6a91a4c4,
- 0x5635df20, 0x4f0dda37, 0xe869fbf0, 0x41e4246f, 0xa74892c4, 0x3a26fefa,
- 0x38fbe6af, 0x1ea6af3a, 0xe3d6d2e3, 0xc2c74866, 0xbe3897e9, 0x05a2e98e,
- 0xcd3310e1, 0x796bf8a0, 0xeb4213c3, 0x4e27898e, 0x6b5fb43f, 0x2801efc1,
- 0xfbd5797b, 0xebb458b0, 0xc8d3eba6, 0x36b4e889, 0x53ad81af, 0x1c4edcf3,
- 0xa5dfee02, 0xabd74e7d, 0x714617a9, 0x09112779, 0xdcc59de4, 0xbbfc384a,
- 0x5fbef035, 0x013837da, 0x5fb843f0, 0xcf9025ed, 0x41f7cd12, 0xb7f4d7e7,
- 0xcd807443, 0x0c871daf, 0x43bfabb2, 0xbfeb9cbd, 0x25fc9651, 0x329e9451,
- 0x62615687, 0x49125676, 0x9ba5efc3, 0x4a7ca1fa, 0x79839659, 0xa32d4227,
- 0x086b61ab, 0x00b0cdf2, 0x3fba9bcf, 0x9351ed12, 0xd2237899, 0xe2367be1,
- 0x1661d395, 0x7043cf1f, 0xa1d0372c, 0xd69455a4, 0x814d7be8, 0x8a42c31c,
- 0xa2687a84, 0xb94657e3, 0x9f28a3aa, 0x2774ccd6, 0xf6c2c619, 0x3bb95ede,
- 0x52f6b90b, 0xaa1a0888, 0x1969eef7, 0xef04228c, 0xbdde20f1, 0x6bc6807b,
- 0xec153a95, 0x6d7f9df0, 0x32eeb064, 0x9b5f7953, 0x3e8feac2, 0xdc2577b4,
- 0xcf6e9337, 0x49f6147f, 0x7851f6cc, 0xd58fe5de, 0x9bddf3dc, 0x39d6517e,
- 0x173f8372, 0x3abe05ee, 0x1df5eec0, 0x777e0d6e, 0xa3787a8f, 0xd1fba945,
- 0xd86fc88f, 0xbcad728f, 0x1bb63f7e, 0xa1cb4ea0, 0x81dcfe71, 0x472f9f74,
- 0x74969a1d, 0xa1af1d29, 0xd93e0930, 0xd0ea357f, 0x30eef81e, 0x578f8704,
- 0x82141eee, 0xc3fcb483, 0xd5932d3d, 0x0f70f870, 0x857d21d8, 0x7d0fabc3,
- 0xebe87088, 0x6c5d29be, 0x935cbc07, 0xd6fbc06d, 0xc16c38ae, 0x45fd0af8,
- 0xe3e23ea0, 0xbbe5efac, 0xa11fc56c, 0xd354ece3, 0x429ef297, 0x4dd1bd57,
- 0x628fb12a, 0xe96a6cf7, 0xf5ca2ba7, 0x25abac36, 0x358f75ca, 0xf78ff71e,
- 0x76e35c00, 0x0b8ea73b, 0xb8b135d1, 0xeefdc1c7, 0x5bf32ca3, 0xf2f254a3,
- 0x6225ec9a, 0x81c9c40a, 0xfb43e217, 0x8de65f89, 0x19f74118, 0xde6032ef,
- 0xfb8b28fc, 0x2b63cc20, 0x7721f808, 0x1ee20aec, 0xbe218827, 0xd9abe650,
- 0x9af5fcc1, 0x396f8342, 0x6015ef3b, 0xb47d10bc, 0xc023e8e9, 0x1db1a3e8,
- 0xa9a2a3e9, 0x6347d19d, 0x06644e75, 0x3f03373b, 0xc60496bf, 0xa23bde02,
- 0x51269abe, 0x151a1ffa, 0x91ba01a8, 0xe5a6e7bd, 0x3d708a5d, 0x7c3f161d,
- 0x7abeca7d, 0x4b6d6fc3, 0xe06943ca, 0xe033ee4f, 0x37029779, 0x858c5fa5,
- 0x58b6c5e5, 0xfc035fdd, 0xedb4272a, 0x6d6fed85, 0x36f31f79, 0xd86efb6c,
- 0x66dbef05, 0x42390c53, 0x5beed6f3, 0xd767f78e, 0xd87dd806, 0x05e024d1,
- 0x3c3bdebd, 0xe104ff18, 0x1a4f7601, 0xf91da3c0, 0x5eff1365, 0x7051ab2c,
- 0x1a1f9e81, 0x890deec5, 0xebf3b0b4, 0xd7111621, 0xcec2c439, 0xbdf7e259,
- 0x95bf8225, 0xfb01bf75, 0xa69943c7, 0xbcfdffde, 0xaf89a4bf, 0xe854f82b,
- 0x28d24a89, 0x4ce47fb8, 0xa2d13b8c, 0xa8d89fa1, 0xca78d514, 0x88788f7b,
- 0x47aa0ef3, 0xff3dd8af, 0x94abd02e, 0xfc00be7e, 0x968dfb04, 0x4a6e0023,
- 0x70cfa07f, 0x0defa1f8, 0x1e60f987, 0xecb617b0, 0x3de2225e, 0x6bc47d1a,
- 0x491bdc26, 0x914667fb, 0xfdf4adaa, 0x425a68cc, 0x13d78e3d, 0x2b207d79,
- 0x1a713f53, 0x3d7cfa04, 0xb64ebb8f, 0xc931fcf0, 0xd186ea3e, 0x5c766750,
- 0xea245256, 0xe16f8505, 0xb738080e, 0x75f9f896, 0x00c5204a, 0x58f85378,
- 0x56fc180a, 0x2452b4aa, 0x18f20187, 0x7dd0f1de, 0x9dcbbd4b, 0xdf652fab,
- 0xcf9fea71, 0xc1f9ebe4, 0x3d17f5f6, 0x099eefbe, 0xb7372df0, 0xad03723d,
- 0x0e7a5c57, 0x479707cf, 0xa1795c55, 0xebf42f7b, 0xdbfce77f, 0x4d874051,
- 0x7ec298df, 0xe1c3550f, 0xc029e2d6, 0xbdf932f7, 0xc951242e, 0xb1e55633,
- 0xcbb71b42, 0xf8299510, 0xffaced1c, 0xf8081140, 0xf0b14af4, 0x9e32547e,
- 0xe5d14e1f, 0x9f87fead, 0xc132f9c5, 0x71250030, 0xcf3628c9, 0x324ecc37,
- 0x9ce0ffd3, 0x173f1a69, 0xaf72477b, 0xdef1d2b0, 0x357cec21, 0x19aaae33,
- 0xc6c4c4ea, 0xfbda26cf, 0x6b13320a, 0xb8c2c6a7, 0xdf18c9d1, 0xdaeb35ca,
- 0xc812bd53, 0x8c9a9269, 0x88d38979, 0x7a633eca, 0x08eeed03, 0xe3902eba,
- 0x742c8cd7, 0xe7ede54e, 0x4d43ffe8, 0x2a99f6e4, 0xc72ae37f, 0x8d54e75c,
- 0xf78b52fd, 0x9fe9fd01, 0xf2e1ede2, 0xfce33e2c, 0x77c9e2df, 0x1eb3f9c2,
- 0xe10e399e, 0x27a3ab7e, 0xd470f5dc, 0xab815dc3, 0x9fd98e1e, 0x86c6a738,
- 0x2e1e9381, 0x48ed48d3, 0xdc0a63d0, 0xc77970f5, 0x9de2e8cc, 0xabde1334,
- 0xe0c48ef8, 0x23350ef8, 0x9ba41ea1, 0x01ef1168, 0xf5e28cce, 0xbab04737,
- 0x64a8ef82, 0xae1eb7c4, 0xc666387a, 0x58e3f7f1, 0x2f00bdff, 0xe202e509,
- 0xcc9ba147, 0x31ddaad4, 0xec507a2a, 0x2e1c69eb, 0xfb46bed4, 0x8ade3739,
- 0xafb02671, 0xe08425a7, 0xd6ce5b96, 0xf2dc3b95, 0xf4a9f83c, 0x72bfbc75,
- 0xff3fe10a, 0xca2f3c14, 0xce0555e4, 0xdad1be53, 0xa69ad3c9, 0xc338829f,
- 0x7efb9350, 0x1080cfa0, 0x1475e783, 0x707447ee, 0xd767bed7, 0xf870f5d6,
- 0xb131fc03, 0x3a14bc27, 0xe3c2c0fb, 0x20c8034e, 0xbcc797b0, 0xf20d1196,
- 0x86c5349b, 0x715c4b8d, 0x0e383f38, 0x63a457a7, 0xcf83b881, 0x423fa9ed,
- 0x5aa3ed8e, 0x36c72552, 0x96d3ee8b, 0x4c729ef8, 0x8e64d3c1, 0xbbc4c987,
- 0x934be20b, 0xd045f489, 0xed0e481f, 0xe7d95bbf, 0xf6103240, 0xc1c58379,
- 0x67df3fe4, 0xbcd4df70, 0xf8b3f984, 0x0b6659fc, 0x02435f7e, 0xce104752,
- 0x91df7829, 0x5a1b18d7, 0x4fbf9287, 0x66115486, 0x7bc3cdcf, 0xc85c29ae,
- 0x75a15177, 0x8482ac98, 0x75f0fdf7, 0x7b33a99c, 0x5bad8f21, 0x9d814774,
- 0x1bec21d7, 0xfc93fb83, 0xe9ff4023, 0xee35cfdb, 0xda7680ae, 0xf1fb08af,
- 0x59162ff5, 0xc3b021fc, 0xb93e7be4, 0xb2953b77, 0x8509ea35, 0xfbe0b37e,
- 0xde81cbd4, 0xdd6103fd, 0xd1d54053, 0xf659e772, 0x6376431d, 0xecf304ba,
- 0xff7966e9, 0x8125c439, 0xdfa7ab7d, 0xa1a74bef, 0x08604578, 0x5d8a29f8,
- 0x1eead536, 0x57cfa569, 0x9b66eff3, 0x05188ece, 0xf4c1f978, 0x4f4b4074,
- 0xba511d0a, 0xecf00f1a, 0x69fcf8ca, 0xcddfc730, 0x7a009738, 0x43dfb151,
- 0x026b8a76, 0x3b37a7ac, 0x3dd5897e, 0x3b017ac5, 0x2e67b15e, 0xdd9087e6,
- 0x74a641d4, 0xcbc71b74, 0xccab733c, 0x600fcdee, 0x48eafdfe, 0x473b5ee5,
- 0xb791d824, 0x54d94ead, 0xd586e707, 0x6b7efe2c, 0x83f1ac95, 0x86dd90dd,
- 0x66fbb3ef, 0x6da65f6f, 0xfe604cff, 0xfb57a17e, 0x3412589c, 0x9cf6edf7,
- 0xc035e0d7, 0x3123c3bc, 0xaf3f413f, 0x79fa7f7e, 0xe6adf3f5, 0x636ad57e,
- 0xcaa748ed, 0x6560592e, 0xaaf750f6, 0xcae0ebc7, 0xe7e0deba, 0x93480f36,
- 0xb5abf754, 0x082c8ead, 0x328c6f77, 0x79ef029e, 0x00e2cfc4, 0xc3dff255,
- 0xe59b83dd, 0xde81f166, 0x98161de6, 0xe1a0768a, 0xf608bf49, 0x4100b0fc,
- 0xe02b3bfb, 0xf8d02c32, 0xee4fcc01, 0xbdc007e1, 0xd65eb426, 0x01f98bb0,
- 0x4fe8d5f5, 0x027f5194, 0x387accf4, 0x17c01e9b, 0x41fe8694, 0xfc85dfd0,
- 0xcfb82ca9, 0x12c3fb92, 0x5ad497a5, 0xf5a37fd6, 0x096ae35d, 0x927755fc,
- 0xe363294c, 0x98d7efc3, 0x93555fa9, 0xf508224c, 0xf8785eab, 0xc06be9fd,
- 0x09758e5f, 0xad287f70, 0xf7fa58d7, 0xf8debfdd, 0xb81afef0, 0x65fa946f,
- 0x7f2aaceb, 0x78fe37b7, 0x0b918c04, 0x3dba47f0, 0xa1f0237f, 0x8a68af12,
- 0x111f7ce0, 0x8068bba4, 0x57274297, 0x7e8f9b2f, 0xe725c445, 0x589e4087,
- 0x16ffc716, 0x46c38f2b, 0x129ff7b1, 0xbf905bdd, 0x023f3adf, 0x8cefcbe2,
- 0x5cfe8a3c, 0xe7c59593, 0xd04a79f7, 0xb9f0527e, 0x3f702acb, 0xf8103fc9,
- 0xcdf1daed, 0x6497e071, 0x43de02c9, 0x9efb24da, 0x7900cf4c, 0x82bedb34,
- 0xa26b296e, 0x6b13bf11, 0xbc0ff08a, 0x35a7655b, 0x9b4cfbf1, 0xa81deece,
- 0x661e63ce, 0x4565ddcd, 0x308bf3fa, 0xeec6567e, 0x7befa0c3, 0xfd212fd3,
- 0x7f4012e1, 0x13fbf390, 0x61a8fcf0, 0x42e20d8f, 0x827efc1d, 0x09cd0d33,
- 0xbe5969f3, 0xf0362fe5, 0x70e7ded7, 0xe2b52338, 0xdb654a3d, 0x83ffbc44,
- 0x0b709bce, 0x161ba7c4, 0xf7efb264, 0x8a6777d8, 0x67ff4cfe, 0x167cc0f3,
- 0xc8efc434, 0xb2727b14, 0xf583ebd9, 0x188b5535, 0xbbac9e2f, 0xbf0a6c37,
- 0x5f6f4c37, 0x712ab49e, 0x87cc04e9, 0xf3676a5a, 0x594fc6f4, 0x9f12c4df,
- 0x24b2df8a, 0xcfeb08bf, 0xc08126fa, 0x5a679715, 0x03c89e52, 0xcec89e3f,
- 0x44ac30e1, 0xcafda376, 0x6fe89fcf, 0xbe43dc37, 0x6cde5793, 0x4aeb3738,
- 0xfdfb6624, 0x77600997, 0xe0b1fbb9, 0x8a07d322, 0x1be5377d, 0x6f557c6d,
- 0x8d34e6e7, 0x41207fbd, 0xa77fb378, 0x55eec022, 0x06ff61eb, 0x7bb587a6,
- 0xe10c7909, 0xf52dbe90, 0x2947b81a, 0x3b068ffd, 0x7c02dbcc, 0x3cf80f11,
- 0x54fcf787, 0xd08dff10, 0xda20d15e, 0x927eb0fd, 0x95f01971, 0x70283762,
- 0x6cdf3c69, 0x91d7431e, 0x5f60d675, 0x3378874e, 0xcffa05f0, 0x2fbcfee3,
- 0x5357f40e, 0x8273c240, 0xa3bbb3ac, 0x60e4a816, 0x3e0379d3, 0x5bfcf37f,
- 0xd881e7da, 0x2dbde0cb, 0xef17e606, 0x28f7fbde, 0x9e6f4b90, 0x82f1c6ef,
- 0xbb4f4376, 0x6b91e118, 0x67718315, 0xc44c5db5, 0xb79003ac, 0xaa9a2d35,
- 0xbb77a131, 0x7213fb37, 0xb4f495bf, 0x1ec0721f, 0x11d8c311, 0x3e74ce1c,
- 0x6f48f809, 0xe36e5c57, 0x24c1e6f1, 0x6ca3ee1d, 0x18b71f3c, 0x8a6e1fbf,
- 0x7d472fb2, 0xaea5eccc, 0xbf1fa7dd, 0x13ef9e06, 0xe711fdd9, 0xb7a7e445,
- 0xda1e2e76, 0x2a5be2a6, 0x5f4f1139, 0xf9db3ff5, 0xd5ffed06, 0x4716489e,
- 0xa9979c97, 0xf50877bf, 0xf97871f7, 0x38f3b007, 0xd7b800ef, 0x2fdb07e9,
- 0xfafc3d1d, 0xc3da1b3c, 0x1ea0e981, 0x376bac46, 0xac5bfe02, 0x90d9d7f9,
- 0x00a3eadf, 0x33925e2e, 0xf3804c90, 0x7403927a, 0x7626a815, 0xb5b627f0,
- 0x97800c18, 0x98c98d8c, 0x9f89b3a7, 0xafaa2e57, 0xe8ab2635, 0xef3f1162,
- 0x877b293a, 0x1f9eaac4, 0x72f931b0, 0x6fe94f72, 0x43e34f30, 0xc61e1913,
- 0x31b06cde, 0xd18afb7d, 0xe866cd7b, 0xdf80c477, 0xfbdeed0d, 0xcfb85efa,
- 0xc761d178, 0x3dff8858, 0xbf607362, 0x6a9b7dc6, 0xf1b7cf20, 0x5452d013,
- 0x14286caf, 0x37de740d, 0xefc58388, 0x1fd0180e, 0x893dc60f, 0xf9f60e2b,
- 0x1bcf5f10, 0xf9cd062d, 0xdefc1bd8, 0x326d7bdd, 0x89b12fb6, 0x2dadc788,
- 0x37a07139, 0xbbeeac5a, 0x8e2fa06d, 0x12c0d5b5, 0x629f7894, 0xb03c4daf,
- 0xf4d19af5, 0x061d89cf, 0x71dba03b, 0xf61179e0, 0x484938eb, 0xd87a408c,
- 0x8f3c746f, 0x3a37bc7a, 0x1d7478d8, 0x2abf016f, 0xff9aa7f0, 0x7f5a34a1,
- 0x858a55a3, 0xfe052efc, 0xf9943eca, 0xb5c4f51d, 0xe4ac82de, 0xb8817ada,
- 0x3a5f92b3, 0x7f944ece, 0xd2e9012f, 0x6a3d7368, 0xda0ef663, 0x30f727d7,
- 0xae15e8bb, 0xe1524947, 0x829f8050, 0x8b1b7aa7, 0xe9eb2a33, 0xdbdffe6d,
- 0xca549e98, 0x9bd74bef, 0xe9178764, 0x0fcb1b66, 0xe6fd7877, 0x6b23c854,
- 0xcf7fc54c, 0xfb679549, 0x93e7a47b, 0x1b057bda, 0xf8060cdb, 0x8762ac0e,
- 0x6de68310, 0x20ebed1f, 0x48f5499d, 0x09878d7f, 0xdd6971f2, 0x48ee2c9d,
- 0x7fbf0de5, 0x81d1cf0d, 0x7a79f87f, 0x1396817f, 0x18afd69b, 0xdbf5f088,
- 0x7042af80, 0x65037ca2, 0xe811ad55, 0x5652d175, 0xe973f316, 0xc8dd6b45,
- 0x58eb459f, 0x5b1ee17e, 0x0ce7ae32, 0x886257a7, 0x27f9001c, 0x657f0e2e,
- 0x6e9c79dd, 0xd13c354f, 0xfa52f30c, 0x73e013fa, 0x4f63e352, 0x7a1572c3,
- 0xc641b53d, 0xb5224a71, 0x7cadf6e7, 0x18076dbc, 0x20dc4fbd, 0x5ffdc0e7,
- 0x0ec4bd13, 0x3637ec71, 0x7e41146c, 0x9cf401bf, 0xf41cf8cc, 0x0f3dc9ed,
- 0xf402fd46, 0x4081bb5c, 0x2315fcbc, 0xe2a2d929, 0x5febdd8b, 0x696ff0d5,
- 0x1835f55d, 0x158b6f97, 0x7c2bce40, 0x15fdc0ab, 0xe42af3a3, 0xfc74ebff,
- 0x2fbd953f, 0xdfb03b47, 0x9fff9c13, 0x9ff94198, 0x10d98a3e, 0xaf38c023,
- 0x07dfc318, 0xa40ff5f7, 0x6d5ca0bf, 0xbf5721eb, 0xcdb57266, 0xafe74a96,
- 0xe31bb6b8, 0x71f5c10c, 0xaa6279ba, 0x02c93742, 0x6296bfc0, 0x8bc01870,
- 0x99e815c2, 0xdfb7cdda, 0xd95c8690, 0x0f39e122, 0xf9e2473f, 0x0277ca1f,
- 0xe486bf0a, 0x6eb7c299, 0xd5d83473, 0x8056ab53, 0xe7971f5d, 0x253cfa45,
- 0x52c687b8, 0xf80a767d, 0x383c8c38, 0x57c7e5c1, 0xf762fbb2, 0x7a000fee,
- 0x221ea12e, 0x13ac87ec, 0xa51780ec, 0xdd8eb1dd, 0x5a3c3ac3, 0x2bbf9cd0,
- 0xef0c1ff5, 0x41fb8f40, 0x7c634787, 0x3c84603a, 0xa63e11d5, 0x839088c7,
- 0x570c40fc, 0x5b3e000f, 0xcdd7f544, 0x4a75fd2a, 0x553a45ae, 0x347970e0,
- 0xc55f5f93, 0x95ff7db0, 0xe21585d3, 0x2f73aa85, 0x024890ea, 0x605167e3,
- 0xf8c22e7c, 0xba4cf6a7, 0xdafde90a, 0x076ef4f0, 0xfe162aff, 0x48f5a73d,
- 0xec33319f, 0x13cb1df8, 0x46223392, 0xb0f91f63, 0x57f1bddc, 0xfd01d7e8,
- 0x46f7bf02, 0xff4025ff, 0xfbdd0c24, 0xf0abdd28, 0xa501d0a7, 0x55a26b8b,
- 0x174f41f2, 0x02a379ca, 0xe7745797, 0x33474f95, 0x9780b079, 0x66f25e8e,
- 0x60fcfd94, 0x81ec9f3c, 0xcf0cf9e7, 0x7c02be21, 0x67e1f22b, 0x3dafe117,
- 0xaf6c76d1, 0xe60174f9, 0x73f2d4a9, 0x9f3e7aab, 0xd1a5eeb2, 0x4b7ce682,
- 0xca34ee7f, 0x5a2bbee2, 0x48ea3e77, 0x34c7d10b, 0xd92ecf6e, 0xff73c268,
- 0x7802d02d, 0xbdfecddb, 0xbb3de56c, 0x0a4f0d44, 0x4cd115ea, 0xe1f42b8e,
- 0xc3246658, 0x55f2ffa8, 0xc86ee172, 0xab59f9ff, 0xdd346f37, 0x77a316c7,
- 0x07e8b5f6, 0xf182af38, 0xd833caaf, 0xc6241f21, 0xde201765, 0x7c0a2481,
- 0x33cd8fb8, 0x1979d52e, 0xe70ec9fa, 0x0f079038, 0xf8297c73, 0x9bd0e055,
- 0xaf0deb9e, 0xe27c387b, 0x8e5ebb79, 0x9efe7314, 0x5c5dfbc2, 0xe045788e,
- 0x2f711b27, 0x59fd6117, 0xe505fe4c, 0x73c11648, 0x727597c9, 0x5e9fe5e4,
- 0x91acbc8e, 0xc2da1323, 0x0bfa11ca, 0x1bf1ffcb, 0x4b1da216, 0x63b7870e,
- 0x3d334677, 0x93fa0f1c, 0xb2f78bf6, 0x3987def3, 0xc48a52dd, 0x0ddac9bc,
- 0x5713cc26, 0x366d90e4, 0xaf7f81c9, 0x16fa06d9, 0x1f76cd7a, 0xc5e70c9e,
- 0x9c074649, 0x03929f5f, 0xd453f788, 0xfa45af01, 0xc7fbc7c8, 0x0223c8c3,
- 0x47ef12f1, 0x9cefd083, 0x9f213626, 0x5046992c, 0x78deee7f, 0x4955d205,
- 0x74008a5e, 0x23175a5b, 0x54ec7b40, 0x94e401bb, 0x962b8fb2, 0x71b0c819,
- 0xd40707c8, 0x6f0ae472, 0xcdbbfe15, 0x49dce343, 0x47210c72, 0x33fb3d1f,
- 0x8da8bc75, 0x22f1d5d6, 0xfbe07f5a, 0x468e8bc4, 0x78801e07, 0xae6c3fd1,
- 0x94779876, 0xea28ff25, 0x912ac4b7, 0xc5d6e4a8, 0x4a1fb7ec, 0x2e3c952f,
- 0x34a95a45, 0x81c4d04e, 0x6073ab6f, 0x582bf81c, 0xe380c54f, 0x7870c34f,
- 0x7b5846af, 0x1fa3ad9b, 0x13e40b97, 0x97e8f882, 0xfc42255f, 0x063b9145,
- 0x2fe2ac7b, 0xa197900b, 0x037fdbf0, 0xd7b57d1c, 0x00008000, 0x00088b1f,
- 0x00000000, 0x7dcdff00, 0xd5947809, 0xe77df0d5, 0x24cb2d9d, 0x7642c993,
- 0x03bbec26, 0x4eb1ab09, 0x04906008, 0x8d441007, 0xb0900938, 0xd2910364,
- 0x208196d6, 0x2b188a06, 0x0eb154b5, 0x4551fa14, 0xdb1ab61b, 0x1a4013a0,
- 0xad563414, 0x61a5afd8, 0x80891d91, 0xe58ad3fd, 0x7bdce73b, 0x264ef333,
- 0xe79f6a02, 0xcdcf0f0f, 0xcf7bde5d, 0x773dfb3d, 0x79632d49, 0x9b19223b,
- 0xed50b390, 0xccc9a906, 0x831b163c, 0x43632f1f, 0x087a3e79, 0x6b9859e6,
- 0xf26b6320, 0x01de7975, 0x9fc75d8c, 0xa269fd3b, 0x678e3e15, 0x30d6fe6c,
- 0xb53349eb, 0x7fe1d767, 0x268c5d79, 0x649f595f, 0x9fc7d93d, 0x24bf8f9f,
- 0x4c33ffc1, 0x2b1812cf, 0xe1a7e263, 0x70ffdfcb, 0xdfc7b418, 0x54dec655,
- 0x805689dd, 0x12dcdca7, 0xd8ceddd5, 0x13c7f933, 0x52d491dc, 0x223ddf87,
- 0xb97178c6, 0x71debde3, 0x55de798c, 0x0fc816d9, 0x4a3d1936, 0x64e2fc34,
- 0xe8826b76, 0x06947e0f, 0x041967c5, 0x55437ff9, 0xfe97632c, 0xc5f4a3ad,
- 0x32f2b89f, 0x7a8afbe0, 0xb439639d, 0x185068ab, 0xd1cc62cb, 0x19cd7995,
- 0x4785d58c, 0x5fd0620d, 0xa748bece, 0x292fca01, 0x8329af66, 0x870800d1,
- 0xa23fc241, 0xd41b7d35, 0xd8983757, 0xca5ec86a, 0xe119ac61, 0xd9a23bd5,
- 0x077e3f00, 0x0fa67e1d, 0x6bca00cc, 0x630b2e1d, 0xff78c29b, 0x7b64c29b,
- 0xb483347b, 0xecd192db, 0xd7a7c004, 0xf40d1633, 0x6983c481, 0x036e92f6,
- 0x1f3099f4, 0x8018f33d, 0x3eadd94e, 0xadd2f115, 0x9163aed5, 0xadf471fe,
- 0x4ddbe68c, 0x6039d76f, 0xf7813afc, 0x543b4d9d, 0xd873e03d, 0x156ada98,
- 0xf728d2d2, 0xa8f3c458, 0x2c6aaaec, 0xa8f7cd8f, 0xa59f686e, 0x00b3ed54,
- 0xe678d85e, 0xcce502d8, 0x848f8307, 0x18d616fa, 0x2398d905, 0x2828ce2d,
- 0xd9c6751f, 0x1de60038, 0xf60a62ed, 0x1a32d8c7, 0xcba45fbf, 0x38b2fd85,
- 0xfd0052a7, 0x2307fb0c, 0x767ec8bc, 0xec568d36, 0xf35c6f00, 0xc01ec518,
- 0xe24d305f, 0xfeccf5ab, 0x73d7ec53, 0xba40e748, 0x7398ebda, 0xfaefb423,
- 0x2c31bf72, 0xd2bedfca, 0xb778d0aa, 0x46b9ddbb, 0xd879f346, 0x9c8fdf0a,
- 0x0067ddee, 0xf1cf8678, 0x03da33ec, 0x519733e6, 0x1e2d297f, 0x11d2479a,
- 0x8ba60e77, 0xcfd4312d, 0xc6be33a8, 0x4baed001, 0xca07cc15, 0x543f9ad3,
- 0x5569af10, 0x71d20acd, 0x825ed367, 0x3f79b537, 0xea11814f, 0x7201dd20,
- 0x8f301755, 0xcd1d16c5, 0xa145b163, 0x99c5e574, 0x7fa82e89, 0x6c5eeac4,
- 0xc79fea19, 0x62771cca, 0xbebff415, 0x7cfb5ba9, 0xb7bc027b, 0x278867c1,
- 0xab72dfd9, 0xb269fa91, 0xf3847fb8, 0x72de9cab, 0x476f7900, 0x48a39fd3,
- 0x88e21a2e, 0x8e8709be, 0x96123d13, 0x78a0ce00, 0x0a85a1fd, 0x85f5aa83,
- 0x23a53340, 0xddb8279e, 0x8899318e, 0xf89f806f, 0x09311ac7, 0x8c62f2f8,
- 0x91c706bb, 0xd91427ed, 0xb45f5022, 0xcf77c5e7, 0x598e652e, 0xfbcf003d,
- 0x6eb2778b, 0x12f77748, 0xbb7b7ba4, 0x21878f81, 0xb38b313f, 0xa4bbfb16,
- 0xce38e9b0, 0xf942984b, 0x979e064b, 0x95abfaf8, 0xeb11ebe3, 0x7896c5a4,
- 0xf02b278e, 0xbc614bf1, 0x7a1eb05d, 0xb3bfcad7, 0xcd617e0b, 0x1e705f9c,
- 0xbb64bfa8, 0xdbdfa2dd, 0x3d5cee9d, 0x08cd93e7, 0xde11c8e7, 0xc3b7c87d,
- 0x90fb3cf3, 0xe098d3ff, 0x27ce3c93, 0xcfbefcc1, 0x467ea36a, 0x8865699d,
- 0xb9852cf7, 0xd32ee902, 0xa1ae10d7, 0x89a3dfc8, 0xb61e407f, 0x52c378e3,
- 0x800354aa, 0x13acc712, 0xdec67758, 0x5da9dab6, 0x3ee708e6, 0xd3ab37e4,
- 0x207cf245, 0x2be54ac2, 0xc07f3833, 0xfc407ff7, 0x2ed3cb05, 0x7e01a394,
- 0x7e438a39, 0x8fc8ec8c, 0xbcf013bf, 0x63a5e42d, 0x613feeff, 0x3acd71ff,
- 0x0c9d02a7, 0x65e9123f, 0xa2f874d0, 0x8fe1152a, 0x00ffc063, 0x358e6da0,
- 0xa52ce507, 0x5d20a1cc, 0x875c229b, 0x230fd4ad, 0x4732b9bd, 0xef86ade7,
- 0x893b46e6, 0x3ed8df7e, 0x366667ec, 0xc01f6fcc, 0xe115fc1e, 0xb6b07b03,
- 0x6ff4f508, 0x3d4469ff, 0xd7f2976a, 0x38a22434, 0x48ae3f80, 0xf7a7e0bd,
- 0x410bd271, 0x73d60ef4, 0xebf6f4c1, 0x039ead05, 0xf4e05d7c, 0x9f226770,
- 0x522f35c5, 0xead07eb9, 0xd1e8ceb5, 0x3a083f41, 0x7e708a5c, 0x0c5cf881,
- 0x7ae38832, 0x583eb346, 0x0e1d99de, 0x76a4898b, 0x1c3b45ff, 0xd1df1316,
- 0xadcceb0e, 0x47c01507, 0x576a8379, 0x8fd50a61, 0xff489cd9, 0x8714eb17,
- 0x41a4e509, 0x71787e25, 0x4bd93865, 0xbb67ef09, 0xdbb107a3, 0xcff21520,
- 0x8ec9dddc, 0xf188c656, 0xf18790ed, 0xd679592f, 0xc5277568, 0x1793e804,
- 0x4308be1b, 0x644ff73c, 0x62c64e03, 0x0ed6a70b, 0xf0290883, 0x537f09bd,
- 0x6b83af10, 0x69ac1454, 0x95269dee, 0xfb1dd6e8, 0xfe5ace1b, 0xbd700d9c,
- 0xceb46dbc, 0xf537c81d, 0x05ec6757, 0xb8232bf8, 0xf7b4be5d, 0x33e42f5a,
- 0x92f77f59, 0x3f7bd262, 0x13192b07, 0xeedf1fa0, 0x61eb8273, 0xd53d6856,
- 0x2a76871f, 0x850023d7, 0x7a3ee651, 0x70d427f4, 0x27f5aa87, 0x7baa3988,
- 0x7f107e02, 0xbf84e18d, 0x77209eea, 0x48e70419, 0xc5bf7fec, 0xfca092c8,
- 0xb623c80a, 0x037f7093, 0xe2074778, 0xaa6f0fd1, 0xe08454f5, 0x5f0fd50b,
- 0x0e905d3b, 0xd16abfd0, 0xb12e3cd3, 0xb3d20770, 0x4864e8ce, 0xe151cf7f,
- 0x55ba081f, 0xd4bb9e08, 0x1b88f430, 0x91e2f7e6, 0x7f7e0754, 0x984afea9,
- 0x46fd122e, 0x979c14f7, 0x851ad69a, 0xc35f70f6, 0x032dbe95, 0xc54dbf48,
- 0x4e872bf2, 0xbe03dcd8, 0x8466fcd1, 0x7e7687a6, 0x9ac8fa95, 0x6150ff40,
- 0x7cc17e7c, 0xfe3434a4, 0xf244194b, 0x7a1f50c7, 0x1ab799d2, 0xa072bde6,
- 0x997ba7e0, 0x983ee51d, 0xabab46de, 0x6e402622, 0xaaed15ff, 0x9c70b842,
- 0xcf342194, 0x741f9885, 0x41a66678, 0xc31bf387, 0x7f4a6ed0, 0xb0effd0f,
- 0x4eff7e21, 0xfe7dafce, 0x3a0daf3d, 0x3fbe68c4, 0xf80071c6, 0xaf803ae5,
- 0xeac18c12, 0xfb79c08c, 0x62e51e88, 0x48e6e57e, 0x61f30a9f, 0x5a170cbb,
- 0xd6e81ea4, 0x4fa07e11, 0x19e7c20d, 0x46e238ed, 0xd235fefa, 0xd1f88d51,
- 0x5fd15b37, 0xc6acf985, 0x28a2367a, 0x2fdfe711, 0xf505b9d2, 0x18cb4bfc,
- 0x57a5f3f1, 0x4c137798, 0x1d7efb76, 0xd646cbf1, 0x9fbef88f, 0x570cf2ee,
- 0x3af3c924, 0xb9ab3b84, 0xb77a8756, 0x43aed7a7, 0xb9b7da7d, 0x3e23a74e,
- 0x8622ff70, 0x5799e4fc, 0xff5f00f4, 0x37ea556d, 0x686f07e4, 0xaf40ef7e,
- 0x92f481cd, 0x7c16cb78, 0x43788d90, 0xbfaad083, 0xe3f662d0, 0x52c3e80d,
- 0x63fb1d6c, 0xf11d25ac, 0xe9faf8d4, 0x5fd02d5e, 0x67a71bc5, 0xfe2a5e20,
- 0x18262260, 0xd1be87f3, 0xe59fefc8, 0x2e67f684, 0xe872ad2c, 0x257a9fc1,
- 0x85d91ba6, 0x446cd632, 0x6ebca13f, 0xa01f4381, 0x76ffa5cf, 0x8f9cd0c8,
- 0xaa1a1ffd, 0xe78065b2, 0x5de18e2b, 0xd13b0858, 0xbfa12fee, 0x472fb006,
- 0xafcf11d8, 0xff734567, 0x20f646d1, 0xa5a1fdb8, 0x7a631c73, 0xa1dfde74,
- 0xbdd361c1, 0x907e7df0, 0x8634741f, 0x18b6b5db, 0x2141876e, 0x8bce113b,
- 0x3d78b7bf, 0xc8717450, 0xfd0c5147, 0x641f5dcc, 0x2e8f807a, 0xfe47e6b2,
- 0x35767288, 0xf971d692, 0x5cca3f83, 0xe5cbf166, 0xdcf8231d, 0x70ed78ef,
- 0xe7da1d94, 0xe717df6f, 0x1ddfc009, 0x9db57f4c, 0x5c7033af, 0x7d33e346,
- 0x07a42f4b, 0x04d9e3fb, 0xaff9a43b, 0xf9c715bf, 0x74874120, 0xdbf4245e,
- 0xc028b0ef, 0x525fcedf, 0xf3247c41, 0xe6666de5, 0xebc80d8d, 0x04b88d9b,
- 0x648b6f2e, 0x37b7685e, 0x7a4712c6, 0xa3259926, 0x5f14e99e, 0xe0cf8937,
- 0x43cf8972, 0xa3a6cf83, 0x2759f15f, 0x1b6f70fe, 0xf7ced76f, 0xd74fc14b,
- 0x7ceff3fb, 0x0aec3ea5, 0x8d845df1, 0x638ae9d3, 0xf7f41764, 0x1ebe8df2,
- 0xe4ed10d6, 0x4f1832fa, 0x8d2b5f6f, 0xdf7f7ec0, 0x050bca66, 0xfba6de97,
- 0x21ca7e76, 0x1fc02a80, 0x53baa6dc, 0x9c1fc26d, 0xbe8af90c, 0xbad1c657,
- 0xd08bf25a, 0x4f30777b, 0x306fca23, 0xf77a487f, 0xfadd01b0, 0x296672a3,
- 0x318bb748, 0x4237ce76, 0x047ca41e, 0x9d3831e3, 0x50ee6460, 0x62af80eb,
- 0x98c35e24, 0x23e1f407, 0xdf640d1b, 0xf8fb5f0f, 0xf7e04cde, 0xd60c1bd3,
- 0x9ce430e5, 0x3ea5f617, 0x0673f503, 0xe40aac9b, 0x3d7ddb4f, 0x5f4bee50,
- 0x0fc85d50, 0x923c37a7, 0xf3d21330, 0x0065bf20, 0x0346e947, 0xf8d9cf95,
- 0x159f9528, 0x42fa5d72, 0x97d47f7c, 0x4eaff787, 0xf9ce3a40, 0x486989ea,
- 0x6b46ed97, 0x9690c1ff, 0x631c536d, 0x6f3df002, 0xbf269873, 0xe0e6c75d,
- 0x995d243c, 0x47af6ee2, 0xa6d53f13, 0x1f7176fb, 0x04dfd69f, 0xa0aa72ff,
- 0x3029c4e3, 0x57a18aef, 0x77dfafc8, 0xc9ec9c20, 0xf92a919b, 0xffc1be8f,
- 0x20db9def, 0xde95fb9f, 0xa09dbe41, 0x674158f3, 0xb7a45cb0, 0x9f3ccc4b,
- 0xfc9ebafb, 0x4a8b5ccf, 0x91f686f8, 0x7bfea163, 0xf40fbda2, 0x5321ca2c,
- 0x848dd7b3, 0x0da36376, 0xf7d0ed0c, 0x19f2f37c, 0xdb3bc9d8, 0x025bc7f4,
- 0xd59a33e6, 0x387f41f6, 0xca821987, 0xcf2c7a9d, 0xc7cb6b35, 0x2cfbb8f6,
- 0xad630ffa, 0x77c972da, 0xfab4d88f, 0x7381df62, 0xf3009b3f, 0x92a4392b,
- 0x7d237cff, 0xafc22efc, 0x5dbf3a47, 0x3bbe0db8, 0xf872b02c, 0x4915e9da,
- 0x142357c8, 0xd3acf186, 0xe3cb42d7, 0x665ea41f, 0x7e3edf40, 0xe63816fa,
- 0xdc51e7b6, 0xa0b119ef, 0xdff08f3d, 0x7afb1ebc, 0x8d9b753d, 0xed8233c7,
- 0xfcdcb046, 0xfe46ecb7, 0x3dd2b7e0, 0x837f4a16, 0x5ced85e9, 0x89eb06ef,
- 0xe1213b60, 0xbcb7860a, 0x0fb1a74d, 0x191ea15f, 0x8c27681a, 0xc3be2764,
- 0x6d8370f5, 0x30f5c768, 0x2f8431f8, 0xad3d30cf, 0x6adfc1c3, 0x220376c4,
- 0x0039b1ed, 0xb76b0ad2, 0x167fb208, 0x405b7ef7, 0xb6ffaa1c, 0x8dea1d5a,
- 0x418cf8f6, 0xeb91a71b, 0x9d8477d5, 0x1cfa13a8, 0xceb4abd6, 0x4eee18df,
- 0xc9b8e3d4, 0x8c96c746, 0x48f9f88e, 0xcfccefb4, 0xbeac7e95, 0x8fb1fb86,
- 0x389a5d58, 0xa893eb19, 0x5fa2fb89, 0xd9edf28e, 0xfcf34e1b, 0x420d7154,
- 0xe733d439, 0xf4016ddf, 0xf87036a9, 0x8f89e183, 0x319276e5, 0xb207f917,
- 0xd0e8f67c, 0xd5895c7a, 0x679cfb53, 0xff474fef, 0x07c6d3ed, 0x7ebf51d2,
- 0xede7141a, 0x169e1e4c, 0xb0a4faf2, 0x2e223123, 0x11ca14f0, 0x9dda41e4,
- 0x55fbe1db, 0x57245d4b, 0x9d3a5d3f, 0x2e9667d2, 0xb613faa1, 0x9adf1a0c,
- 0x775767f3, 0x35c9d3c0, 0xd989d92a, 0x18af5746, 0x59a4eef8, 0x1385f147,
- 0xbb799155, 0xd19cb122, 0xb3f6727a, 0x0278b613, 0x6c277ee2, 0x6743a27a,
- 0xfd8f8cf3, 0x9cbdfa66, 0xbda144b5, 0x23d7e76a, 0x2db5ec1d, 0xac7fe316,
- 0x55db0125, 0xb6bdcd2a, 0xb7c71d29, 0x2c7c6732, 0xf59197f2, 0x0ba9b947,
- 0xb828f6e7, 0xe72c4a5d, 0x1fa1bfc3, 0x8a7efd0b, 0x9e842b76, 0x2f3a0bb1,
- 0xb1ed38b4, 0xc922efa4, 0x81b8275f, 0x728cbe5f, 0x60b84776, 0xd313d65d,
- 0x7c785d61, 0x2ac5b16e, 0xf0a9c3f8, 0x5fab8e75, 0x5bf8886c, 0x609ef167,
- 0x7811de38, 0xc9038e0d, 0x8681c654, 0xbcf9f2c6, 0xe26ab36d, 0x0eff1842,
- 0x79f9ff15, 0x1ba3d72c, 0x2a3c5bc3, 0x7c03c3ca, 0xdb25de3d, 0x2b7c60a3,
- 0x0736bf25, 0xc5775fe3, 0x2da5b8f2, 0xeb889dcf, 0xa344e4f6, 0x3736e303,
- 0xea038f2b, 0xc05f7c9c, 0xd63823ad, 0x7ac27b37, 0xcd095c01, 0xa4231cee,
- 0x0d317153, 0xaac2edf8, 0xa0dd2196, 0x6b23211d, 0x9157b87f, 0x92686c8b,
- 0x4028f429, 0x6772861a, 0x454f728c, 0x559b0cba, 0xfa718d55, 0xffbe3294,
- 0x77715670, 0x9c9f01b5, 0xa43c2b7d, 0xe8225c3f, 0xc9d194cf, 0x36b3fa68,
- 0xacae081b, 0x819f3fcd, 0xf9b59f7c, 0xa012bd2e, 0x1ac79687, 0xdf67fe68,
- 0xe295d79a, 0x58989f16, 0xea7a10cf, 0x1e5f8287, 0x69d87410, 0x8b7fcd0c,
- 0xac6fbb45, 0x55405bcf, 0xe2f9f569, 0x5b3880d8, 0xd265f945, 0xed6393e7,
- 0xfaf3ce34, 0x80c903ba, 0x3167e7cf, 0xe68a0787, 0xf148dd6d, 0xbac69dd7,
- 0xe4efb8f1, 0xa431d0a9, 0x7f03fefb, 0x7e8c9038, 0x60fb2ce4, 0xbfda23f4,
- 0x4ad2f43f, 0x0e7e7f2c, 0x4ff88c1b, 0x60ad3ef2, 0x619fe63c, 0x7faf84b5,
- 0x65b1316e, 0xbf7db718, 0x210ffc6e, 0xc74cbebf, 0xcf04bf50, 0xe832461b,
- 0x19f53112, 0xfd382374, 0xba4a0f51, 0x91190303, 0x886e921e, 0xee90edbf,
- 0xc11af2df, 0x2a9ae12f, 0xb6f3dee2, 0x7c8f1f6e, 0x53665c92, 0xe1204e30,
- 0x793d91ba, 0xd9dbf631, 0x0ed0ef93, 0x221b12bf, 0x0ec21d2e, 0x610d88ef,
- 0x2fed8387, 0x637738b3, 0xe1d91bbb, 0x36ff82e1, 0x7fed06c6, 0x08597ee0,
- 0xed8d0dba, 0x1fdf0f10, 0xe3ff621b, 0xdef0f146, 0x43ff72b1, 0xc97f0f1b,
- 0xad6ff5c8, 0x293c3d8d, 0x1374bc84, 0xb5d6ebe3, 0xc67ed11a, 0xf5212835,
- 0x518b62d9, 0x1c7e1fee, 0x125fb01b, 0xe0438fac, 0xefb628f7, 0x17b7cdbb,
- 0xe9890d5b, 0x5bee8111, 0xdeb0ebbf, 0x29ce310d, 0x22fb3f81, 0xf6a6fb46,
- 0x0afa462a, 0x141e5ef2, 0x371429f4, 0x934f38b0, 0xbec1badd, 0x1cd69dd5,
- 0x3ee483f2, 0xd6ab54d5, 0x938b8c4f, 0x239f9ced, 0xd89d699c, 0x3c097f89,
- 0x70dbfcc1, 0x4c45b8dc, 0xf7231fb3, 0x3caae261, 0xcd0b318a, 0x6af8a311,
- 0x32f758d7, 0x1d6bb403, 0x209b1cc9, 0xcd685d1f, 0x9b6a7d41, 0xfdc468e3,
- 0x7917959a, 0x02e57e3c, 0xfd4e5de6, 0x7732f9f0, 0x17ca2f37, 0x2e302dd2,
- 0xe1c71110, 0x72e38888, 0x1cd27606, 0x8f0ae1c7, 0x9293b00b, 0x3e75ba1e,
- 0x1adae895, 0xd0054eda, 0x77df46d5, 0x50efd742, 0x3771ed2e, 0x8cf71fe7,
- 0x19be05bb, 0x7c737718, 0x4b4c14f3, 0x8f04dfec, 0x0ff38997, 0x2cf80f1e,
- 0xf826ee2b, 0xee2deaec, 0x50881e8f, 0xa5dc5dbe, 0xc4275e23, 0x6b77d085,
- 0x487d75f2, 0x4770b35f, 0x233abb28, 0x7dbff22e, 0xa086645e, 0x179d2df7,
- 0x2d27f179, 0xdcb3a446, 0x06f090f7, 0xe591139f, 0x264a0d15, 0xf6733fae,
- 0xc075a35d, 0xd37fc6a5, 0x79e3a03a, 0x42ee3e0f, 0x678003e4, 0xcea03aac,
- 0x7977f207, 0xea56919b, 0x22ffb32a, 0x4df6e31b, 0x53b5e606, 0xefb6337b,
- 0x7efb78c7, 0x633656db, 0xd31faadc, 0x547ac47b, 0xa8fdceb9, 0x7ee39468,
- 0x8d5b2a4d, 0xbebcadf9, 0x6197998a, 0x847a3a5c, 0xf003d98e, 0x670ce319,
- 0xc67c00f6, 0x7934d9e6, 0x2f9e4eb9, 0x25778dc6, 0x32efbe6b, 0x7da69bbd,
- 0xa69fbb92, 0x10ce653e, 0x6aad3e4d, 0x577da694, 0xf981cfb0, 0x9addcf0c,
- 0x266bddf6, 0x6b3df26b, 0x3fb4d01f, 0xcd9eaacd, 0x9c7a79c6, 0xce003dcd,
- 0xdece0259, 0xdf358beb, 0x0dd5d7f5, 0xf920a1f3, 0xffee1f14, 0x326c16cd,
- 0xfa73c44b, 0xfa69e77a, 0x5e6aff3d, 0x9df80293, 0x19386b5d, 0x7c219f18,
- 0xea4b7e00, 0x8c1cf615, 0x5b5eba5b, 0xe9e1a73f, 0xce902995, 0xe7cb6af5,
- 0x829cbee1, 0xdf2dadfc, 0x823d73ad, 0x71ed9deb, 0xe228ce22, 0x7f03ac3d,
- 0x97b8d244, 0x9778f037, 0xc41de9ea, 0xb13a5a1f, 0x762fc96f, 0x44a62fc1,
- 0xd984bf2d, 0x7a52fcb5, 0x1f30e770, 0x88ff88eb, 0xe47c413e, 0x78017f81,
- 0xf895f897, 0x7cb438b7, 0x9de15df5, 0xdb91af31, 0x3ff96d0d, 0xb5f1b4e2,
- 0xe3ee917e, 0xd4aeb7a8, 0xbf71522f, 0x7df1e58d, 0x57f52bfb, 0x6bb21d07,
- 0xdb3ad7f6, 0x17173a4f, 0xfb175718, 0x3f709749, 0x137636f1, 0xfefb89fb,
- 0xd900f885, 0x1f801b77, 0xb98708d8, 0xcc30ff7d, 0xac147fbf, 0x760a6537,
- 0xb238cbe3, 0xd77dfe27, 0x587af86a, 0x085c5ff4, 0x9387dcaf, 0xf9e472e7,
- 0xe7c91621, 0x3c01fb29, 0xf143f1a8, 0x022d9ffc, 0xf7dbadd7, 0x96f5a42f,
- 0xc4c43f3c, 0x7e10bfb9, 0x3aefe93f, 0xfc8935fc, 0x6e78f081, 0x6d3123cf,
- 0xf9c407f7, 0x3bcf692e, 0xb7ee47eb, 0x7b2a9675, 0x6c3fda55, 0x9b1dc255,
- 0x5e93d842, 0xfd72dff1, 0x1c43a675, 0xe07c57ba, 0x2bd1ebfa, 0x00aed007,
- 0xb962fbde, 0x5af602c5, 0x5febdbf1, 0xbf114444, 0xbd541eb6, 0xb2e0a1bd,
- 0x036d1ed9, 0x6984363c, 0xab4c88ed, 0xf403c9cc, 0x3daf58b1, 0x11f727e7,
- 0x6367db83, 0x27fd80fb, 0x8fe34ef8, 0xfdc21cac, 0x6e78ecb5, 0xe488bfdf,
- 0x9114c1fe, 0x175a0caf, 0xbf70d655, 0xe9f8236c, 0x4cfd010d, 0xfb50b789,
- 0x533911a5, 0xc08e29e2, 0x332bbbd7, 0x5627f214, 0x21f90a2a, 0x5347a267,
- 0x9cb8b5fd, 0x623fc4c9, 0xbbe8299c, 0x6edaffc1, 0x38bb1e1c, 0x37fb238f,
- 0xe6f6f3c7, 0x4e94d1f8, 0xb88ab789, 0xf21fb5b3, 0x93846547, 0xa1aa35fa,
- 0xc7a6a5f7, 0xb5ad79f3, 0x6a27e930, 0x84e3fee2, 0xbf38c4de, 0xf9a3fb89,
- 0x71fc1363, 0x7f66a67e, 0xf1138c68, 0x773e857d, 0x53ba9ea2, 0x78b3bdd0,
- 0x758fb3bf, 0x75276e3f, 0x12fbf78c, 0x1dfbc643, 0x0fde28e3, 0x5e71ea5f,
- 0xa47f71c1, 0x2fcd8443, 0xb7c48f3f, 0xd45483db, 0x7746f54f, 0x5abbae35,
- 0x0eff8377, 0x2dbc7dc5, 0xf0301fb4, 0xae077750, 0xb06656af, 0xb7e416bd,
- 0x7fa18c65, 0x5ba2df5f, 0x6bf5061e, 0xf58acc3c, 0x75f03723, 0xbb7ae95b,
- 0x8069a703, 0x8865b075, 0x8bac1f5f, 0xd7bc218f, 0xc46e4cb7, 0xf73581fd,
- 0xa19a1b32, 0x1b9b565a, 0x06d0fe85, 0xf8dd7216, 0x09b39094, 0xd4557b39,
- 0xdfe8f117, 0x53d3a087, 0x0fa04e82, 0x879d22d6, 0xf0afbab7, 0x4fae38fd,
- 0xfae22548, 0x1bcebf74, 0x373f3d6a, 0xf21d773a, 0xe86f40c1, 0x0b5ac3f5,
- 0x1175b7e7, 0xdda8e7ae, 0xb5ee7e2e, 0x59dfea54, 0xba07a63a, 0x9d74114f,
- 0xdf9f81b2, 0x5faed760, 0xc89fa557, 0x7a867fa8, 0x8b5ef4a5, 0xa543e317,
- 0x821e190d, 0x7a38a5cb, 0x7868bea2, 0xd2f985df, 0xc62b2cac, 0x657f9c23,
- 0xe43ea9ca, 0xbfae3262, 0xd494ecf4, 0xe3c76427, 0xb269df68, 0x16b5b7e0,
- 0x0d473f3b, 0xfc008e28, 0xb6586e97, 0x7cefcf17, 0xfb466bf4, 0xd6d4474b,
- 0x54d9d861, 0x2192ce40, 0x2159ea98, 0xfef82bc5, 0x1fa2bdd5, 0x5c61bfcf,
- 0xa9dc7fb3, 0x3e69070d, 0x63cc0c47, 0xb4ba3e06, 0xfa3b3ee7, 0xcde32bdb,
- 0xfb93c714, 0xf75547b2, 0xf48464e3, 0xda0815ad, 0x56110dfd, 0x43fc7f8c,
- 0x82465df0, 0xc7d21f7b, 0x057c7cbd, 0x6596c1da, 0xa3c474a6, 0x1a92797b,
- 0xd7ba7007, 0xd1ef342a, 0x788c93cb, 0x8ae6617e, 0x4f50e3c1, 0x95ce610d,
- 0x2879df18, 0xd3948596, 0x9d7cf1f3, 0x60655c58, 0x849a68c6, 0x172fe311,
- 0xbf8a146b, 0x015bfee0, 0x988e67d0, 0xf7aaf95f, 0xec3f88a3, 0x1e186ff0,
- 0x733fe5cb, 0x1e494aaa, 0xe46bcec1, 0x19d58ca7, 0x799fa093, 0xdb39606b,
- 0xf1fdec8d, 0xc043fe0e, 0x3af2ef79, 0x5b257d6d, 0x7f9d39f3, 0x5befe061,
- 0xeb682ed0, 0xe8f2953f, 0xaba40e60, 0x8cc5b17b, 0x0b4684e7, 0x51ad14bc,
- 0x665fa8ac, 0xbf8c68ae, 0x9b33f20f, 0xb4f20754, 0xe8eb0bc4, 0x8f39d3ef,
- 0x693d4dcb, 0x6ed9703d, 0xdced82ef, 0x2bff5c51, 0x2c70f77a, 0x3dd6287f,
- 0xadf1d71c, 0x9e18589f, 0x06d6399b, 0x1ec618fe, 0xc5106ccd, 0xd06cac1f,
- 0x5a3b00a4, 0x6026b064, 0x16c3dfbc, 0xb7be3226, 0x779c0e65, 0xf90d79a5,
- 0x07e89581, 0x1f91f9f2, 0x672c50f8, 0x15d7d6d5, 0xeac39f3a, 0x9a07e40d,
- 0xd6781f85, 0xae009b5e, 0x6e3d7317, 0xde2d3920, 0x687e4316, 0x235c8af1,
- 0xaef16ff2, 0xe043d218, 0x63d87e07, 0xc78e9f98, 0x481e6456, 0xeb8b7e84,
- 0x96aa1c32, 0xb8ff5a7e, 0x5ba498e6, 0x4b4fbf47, 0xe8a193b7, 0x94d2f406,
- 0xff0824a7, 0x3d8bd04e, 0x477e4b16, 0x7ab782e1, 0xcb9e019a, 0xee746155,
- 0x83ff33a8, 0xbed1c6a5, 0xe6175c9e, 0xdfafb725, 0xcafcdf68, 0xb744a19a,
- 0xfde57a60, 0xb470e667, 0x16afec27, 0xa1ec7a86, 0xc9e1ecee, 0xc2b0fe50,
- 0xeaa1ebe5, 0xa72879f1, 0x6f5c0959, 0x6614b7be, 0xfdec6468, 0xece666a5,
- 0x4b07d8c5, 0x68ff94ad, 0x3fe52269, 0xf4a76a5e, 0x287da593, 0x68e2293d,
- 0x0180ce52, 0x25475f88, 0x951af970, 0x6cdee220, 0x8caa2251, 0x307cffbc,
- 0xa1c56754, 0x4d8c27d2, 0x271eec63, 0xdfc02320, 0x046f7e99, 0xdf3db912,
- 0x4b8eb062, 0x9571ff44, 0xd8befa42, 0x3da6bb75, 0x3c4625f8, 0x64facdff,
- 0x9ce9cbfa, 0x1938dd98, 0xfcfef0f8, 0x69fb4d58, 0xfc9a2935, 0xcd3b04e4,
- 0x775e527b, 0x8503f94d, 0xa2f935fd, 0x9e024036, 0xf8db302f, 0xb7d8aafe,
- 0xd7c6cc67, 0xf6de56eb, 0x2ef0d56a, 0xaf7807df, 0x7d50321e, 0x5d243d30,
- 0x31d7ad67, 0x73368037, 0xfa0dcc3d, 0x933b593d, 0x11fceae4, 0x95fdd90b,
- 0x1ddf32db, 0xdb63f901, 0x7ebfb40c, 0x5aec456c, 0xb63e3fdc, 0x418a3e2d,
- 0x32a95eea, 0x7ac1fa1f, 0xe80591ab, 0xcb15dcb7, 0x9156fce8, 0xf940e4d7,
- 0xf9efda2f, 0xd1dbcc95, 0x5120441f, 0xbd543e3e, 0xe7e8853e, 0x14befc24,
- 0xf902dde6, 0xf1afa033, 0x16ccf8c8, 0x8519d70e, 0xd9fcc0ad, 0xfaf5bfb0,
- 0xb171c03e, 0x8744b6a0, 0x50f501eb, 0x1dcbe93c, 0x2dbe432a, 0xfda0605c,
- 0xa91fb9bb, 0x94cf311b, 0xde9c2921, 0x64479d2a, 0x7fa8992f, 0x021c0ad6,
- 0xd6fd16ed, 0x77d689b4, 0x0f77e44c, 0xfc16aef4, 0x26747c89, 0x08633986,
- 0x3de08558, 0x46fc7ef7, 0xd3e3f7ac, 0xfde7083b, 0x32b5dea5, 0x5cebf801,
- 0xe9107789, 0x49e2c7b5, 0x7ef182ae, 0x96f5c8d2, 0xf140e507, 0x9cbf4beb,
- 0x9d3e272d, 0x38247069, 0x25655f48, 0xb93abea1, 0x98e740c6, 0x3519de99,
- 0x3b3fd689, 0x38e58f88, 0x71f6f527, 0x9ac9df08, 0x96fb860c, 0xf2546bc5,
- 0x78ff9007, 0xffe72f7b, 0x1b3755a7, 0x28d6fd0e, 0x23a5d66e, 0x60b23cdf,
- 0x9a639d38, 0xc141d621, 0x9033e07a, 0x7f2f7755, 0x7e1ede7e, 0xb56586ce,
- 0x181defe8, 0x7c158f38, 0xbfde44bc, 0xd650073c, 0xd1474fed, 0xd4dcbee5,
- 0xf0ed1a3d, 0xe2550fd9, 0x6addb3b3, 0x3d022587, 0x0ef6e82f, 0xcfe43efb,
- 0xe94e7817, 0xb854ff21, 0xae02677b, 0xd26b7457, 0x3e786e95, 0xdbdac4f6,
- 0xaf73bbe1, 0xe6241c18, 0x7f49e24b, 0x6e697bcc, 0x8ef3c0d7, 0x2f97f51d,
- 0x38bfef99, 0xe7c01493, 0x38dd7b7c, 0x775c00eb, 0xe21d84bb, 0xe6e3b1f8,
- 0xbac5e02a, 0xc343c14e, 0xcb50c4ec, 0x3d773c6a, 0x3024c413, 0xec42784e,
- 0x7a101f8f, 0x1109fa45, 0xa67e785d, 0x7b37f38e, 0x0dbf2155, 0xbffa347e,
- 0x944f9c52, 0xebea5f7a, 0xfb4b5632, 0xf81247c1, 0x8ab9c639, 0xe055da97,
- 0xa8a82ece, 0x420f2b12, 0x79356d97, 0x97d419bd, 0x480fd035, 0xfbe762ee,
- 0x9e57c643, 0x652db645, 0x417e7ccd, 0xbc19ceed, 0x1d19cd25, 0x75894ded,
- 0xf51b0ae3, 0xfc60706f, 0x69c854a4, 0xf5e2ad79, 0xdd9079f1, 0xd98af194,
- 0xdb066ec2, 0x28f60ea7, 0x0ecd0ec8, 0x56acb3b2, 0xde01576b, 0xc2471c48,
- 0x2ba70c1b, 0x987842c2, 0xbda17007, 0x0f7b712d, 0x15b8244c, 0x03b25007,
- 0x35cca53c, 0x01e70626, 0x477b3ef5, 0xd5e782f8, 0xcf315e01, 0x1c4bb860,
- 0xff70fb9f, 0x5187cf18, 0x1e6829bc, 0xfec11e92, 0x894ba49a, 0xa4b7e387,
- 0x7e7a2141, 0x3207eeda, 0x7e491b8a, 0x9d500581, 0x44d77bf6, 0xeba567d4,
- 0xf866ff40, 0xd3f247f9, 0x8517563c, 0x7cc152fc, 0xbe50932b, 0x46d3757c,
- 0xe1bedfa2, 0x49ca237d, 0x53d7cda1, 0x5c288317, 0xb4bcd3fb, 0x1044f580,
- 0xd7446f9e, 0x9c378a6f, 0xbdc65e95, 0x37b34f00, 0x41b0ceab, 0x1ce2769a,
- 0x60e48791, 0xc463c78e, 0xb11cc6f8, 0x9c11bfee, 0x6e7e7a95, 0x76f086fc,
- 0x5b7d27e2, 0x68baf3b8, 0x9fbf62fd, 0x943a33d5, 0xaff76a9e, 0xdffaec82,
- 0x217e3c0c, 0xbd55f1f5, 0x5025e293, 0x2aaefc2e, 0x079b8f2b, 0x2cb4f1e9,
- 0xc225e3d2, 0x4e71cbae, 0xf149dfad, 0xf768d9b7, 0x5d3fca03, 0xfee293b7,
- 0x476657c6, 0x6a7dffa1, 0x78b5ccfd, 0xb6d4ebbe, 0x4be76499, 0xde76939f,
- 0x1ff40c6d, 0xbd17d772, 0xdfe463f8, 0x9f1461ad, 0x16efd92f, 0xabd01eeb,
- 0x9ebeced0, 0x8eb651eb, 0xb452d5eb, 0x1ec80387, 0x8b76c6f6, 0x51bddc4b,
- 0xc9ca020e, 0xe99e2e4e, 0xe81eddfe, 0x11cbf177, 0xc2a10f0e, 0xdf91d3ea,
- 0x3da1f56e, 0xf1f85f2c, 0x0ee73c51, 0x1fa3fff6, 0xd90ffb48, 0x92f77a8d,
- 0xc26f282e, 0x1bab97ee, 0x27e8add3, 0x5ff13b08, 0x3439bfc0, 0xfe1087fe,
- 0x01ff118b, 0x6bc720fb, 0x06679e38, 0x1ac4ffe1, 0xdb95974e, 0xfae147ba,
- 0x774d78f1, 0x6f8eb3f2, 0x9ff849eb, 0xcff01ab5, 0x3f5a5fe3, 0x8ff006ab,
- 0x3fc408eb, 0xdfbc5bc0, 0xe0eff02e, 0x78e1aff8, 0x3a786b67, 0x3d9e03ab,
- 0xf1618e74, 0xf40e4daf, 0xf984ce1b, 0xb33a9fc8, 0x01d5655d, 0x83f4987e,
- 0xbfb560be, 0xe24d7f42, 0xe6af6e7f, 0x0a7fa841, 0x1453fe9f, 0x76ee61d2,
- 0x80efa41e, 0x82ece67f, 0xfc7fb8fe, 0x95e9bf76, 0x7e01f12e, 0xa9d24dd3,
- 0xd918b982, 0x1d3f86c5, 0xe28375c1, 0x786d4f84, 0xd7dc468f, 0xd7ded7a8,
- 0x851933c0, 0x382f8c36, 0x278466cc, 0x8a1ef835, 0xf91c619b, 0x9f3f3d9f,
- 0xbce490b1, 0x6dbe7355, 0xba19f322, 0xa758ffa0, 0xf2c6fdd0, 0x39513945,
- 0x55d4e30c, 0x2ee9c704, 0x78fce68a, 0xff23aab9, 0xfdc8ccb1, 0xf93fb948,
- 0xde0ff0ae, 0x25d7c2ed, 0xd2b175ef, 0x18c29777, 0x164b81d9, 0x23ef17a3,
- 0x112a7ac0, 0x31ecf7c7, 0x228edd11, 0xd57f804c, 0x6036e228, 0x1f4f9102,
- 0x1f4f9c64, 0x38a26c8c, 0x5e54ac39, 0xd0f8bb40, 0xfc839312, 0x8a68b8da,
- 0xf4203ed7, 0x2b9183dd, 0x1e1f685d, 0x313a348a, 0x783d7e85, 0x1a30ce88,
- 0x0df18786, 0xafba46c9, 0x796e6853, 0x0fbe9705, 0xd4789e27, 0x06dea00c,
- 0x55c637ee, 0xd75dd7e0, 0xc5d23b63, 0x9c6d1f4f, 0x743c32a7, 0xe055a3dc,
- 0x293a714b, 0xbe82639e, 0x2b8e01c3, 0xdcb12f68, 0x9df1e56e, 0x8b27400d,
- 0xe5c1be9f, 0x03f7c3cc, 0xe35cf0ca, 0x6419c628, 0x935fe104, 0xfaf2332f,
- 0x26afc4f0, 0xec66cbac, 0x103ecccf, 0x3a8656e9, 0x45f7d704, 0x978c5ed1,
- 0x25f183df, 0xe27aa61b, 0xe67ac997, 0x26b4f1d1, 0x89cccf12, 0x8144d378,
- 0x9823a9cf, 0xeef01a2f, 0x778da83b, 0xf9d3e7dd, 0xef7e037a, 0x911afbd1,
- 0xbb66753f, 0xb8e30ac1, 0xb82194f2, 0x7e5b292e, 0x25a6f073, 0xa5d7f39a,
- 0xcfc75e42, 0x80feb585, 0x041e21c7, 0xcdbc4561, 0xeafdb3d0, 0x8807ce10,
- 0xdd9b4a97, 0x2816ed43, 0x71950f14, 0xc1ff4936, 0x1d207dd0, 0x41f0141f,
- 0xf851353f, 0xbd3431bd, 0xf5ca26fa, 0xabad1d73, 0x9bb90be7, 0x9b6de52f,
- 0x27c6de56, 0x82bda9da, 0xede0307e, 0x26769141, 0xff44e317, 0x5cb912e3,
- 0x78e2dd64, 0x9d35b22c, 0x7908b7ce, 0xfef13729, 0x88f7f8e5, 0x8da24c74,
- 0x00f095fa, 0x7ec71ebf, 0x9001a074, 0x56d64f5f, 0x87347f93, 0x33b5c405,
- 0x5f24edfc, 0xebb7cc77, 0x4c75dd11, 0x10b3ad8d, 0xc2f7caf5, 0x1f47a81c,
- 0xacf5d634, 0x4ed8ec8a, 0xb214dbee, 0x3d230366, 0xc261e229, 0x23558cfe,
- 0xa1407ce9, 0x5e0fefc2, 0x7478b1ca, 0x31a718d1, 0x630b6910, 0xf13f3a14,
- 0xf27fb137, 0x89e2f450, 0x5185d728, 0x33a63d46, 0xfbf52b58, 0xe8b76b77,
- 0x33de18fb, 0x0e763156, 0x77c88f03, 0xa8d5b8c2, 0x8d7e7877, 0x4aa29b33,
- 0x21c77f22, 0xdee1e027, 0xca7f406b, 0x09df2f7f, 0x4b7a5ffd, 0xe93bb3d4,
- 0x0b8f4bfb, 0xf2e4f63e, 0x7d65cffc, 0x92afcf1e, 0xff3cf9f5, 0xe45feca4,
- 0x5faa0e9f, 0x4bff5416, 0x3ebc5f9e, 0x7e83df3f, 0xd2ce68eb, 0xd214a385,
- 0xbe847b53, 0x23ee5c28, 0xf6ea16fc, 0xf33474f2, 0xdc6eb2e9, 0x6ba2536e,
- 0xd51fda0f, 0xf682d272, 0x2764d5f7, 0x704f9fe5, 0x75c44b2f, 0x63c524d4,
- 0xa1fb7d44, 0xc4c1ec97, 0xed1fce4e, 0xf8e3c2e9, 0xbfa0929b, 0x05fb7ea1,
- 0x8c4eacfd, 0x1c7ef5bf, 0x80ae1c49, 0x26f99e7e, 0x23a7fcf0, 0x49be1b3c,
- 0xb592b33f, 0x487f48fd, 0x86be7549, 0xe32763f3, 0xe645e734, 0x8d11c4ff,
- 0xe78627f1, 0xf3f50045, 0x67a5d797, 0x5ff3ff42, 0x04bd3d7d, 0xe76125fd,
- 0xcc74da2b, 0xdb34f789, 0x1be7a518, 0x7de2313f, 0x8d5d8ab3, 0xaed071c6,
- 0x00dcd212, 0x9c04cab8, 0x93f4e760, 0x8373ec03, 0x3d8c1bd0, 0x5f37f7cd,
- 0x7a47ab3d, 0x9dd5d7ce, 0xf6ed0e7a, 0x3ee42528, 0xddf166cd, 0x17ce4ed1,
- 0xe87a15ef, 0x11b39a6a, 0x8c6bf9e7, 0xb73e4021, 0x60fb93ba, 0x855f1c49,
- 0x1ccdeec2, 0x01db3166, 0xe43f43cf, 0x1b2554fb, 0x3c608632, 0xd184cdf8,
- 0x6d7e24ef, 0xc795b53c, 0x3c781b53, 0xbcd6d0b5, 0x33379408, 0x31ad9526,
- 0x84d8c1df, 0x0339485f, 0xf36f8591, 0x9d5f324c, 0xc79b263f, 0x767f30bb,
- 0x506b63fd, 0xb9c29a6e, 0xf0d0fb1f, 0x17a8e181, 0x7422325a, 0x37e79056,
- 0x498c8be3, 0x8efcb143, 0x639e5871, 0x222af4b2, 0x8be232fc, 0xc75de337,
- 0x3db05f90, 0x46dc41c6, 0x307dfc5f, 0x2adbf70f, 0x0f75a79d, 0xda87708a,
- 0xc087fa77, 0x4ac931ab, 0x19901369, 0x870917fd, 0xf9fc1f1c, 0xdfd0cd45,
- 0x8349e5c9, 0x9f71db57, 0x3490c725, 0x467e3fd4, 0xe072fb82, 0xac7527f8,
- 0x6e292e17, 0x024b8e16, 0x5dee030e, 0xd95dbe03, 0xac06732a, 0x898f26f3,
- 0xcdefe4d0, 0xc0ce658f, 0x29bded38, 0xc2f4fc9a, 0x0ff69aee, 0xa9afeacc,
- 0x6735302f, 0xb1f80555, 0x1e49fb9d, 0x62d2a782, 0xee7f4709, 0xfc5f0def,
- 0xfff441e5, 0xf40eab36, 0xd9eee755, 0xa1db97f2, 0x4e3c65d5, 0x3b47f145,
- 0xc9cecbc5, 0xa77a28f3, 0xe85f703e, 0xf3a66b22, 0x03ed9d3e, 0x7cee75c9,
- 0x773a7eeb, 0x03ef5df6, 0xeb124af5, 0x086c21dd, 0x55cbc3da, 0xaebc511f,
- 0xefcf9222, 0x8cc7ebe2, 0xfb8a0fb8, 0xdf81d78a, 0xd10fc2ef, 0x50f36c3f,
- 0xfeb73b3c, 0x1d9bed18, 0xba7ae448, 0x1e817522, 0x42551def, 0x1c68768a,
- 0xe068abe8, 0x3646e697, 0xbbee1770, 0x1fb85866, 0xb19936de, 0x1e2be458,
- 0x884a69df, 0xf3d77ba1, 0xbca8f26b, 0xfa9c2da2, 0x1e6d4f7f, 0x5f09e747,
- 0xa1ff6853, 0xe5a1e520, 0x29b87e78, 0x11e033dc, 0x77e0b718, 0xde21e577,
- 0xf3f1762a, 0x9fea05b5, 0x5a4016b3, 0xdf479b56, 0x49aca817, 0x42617f01,
- 0x05bfb72e, 0xaf21f368, 0x46acb30e, 0x7d1aabbb, 0x3d479ebd, 0x9e90b463,
- 0x807b6e89, 0x336cafc6, 0x24f7f7d3, 0xd0d77f71, 0xdb1ae1c2, 0xc972a2e6,
- 0xba93530f, 0x860fd669, 0x7c7acdf8, 0xc2d0c397, 0x6dddfda8, 0xf39528fd,
- 0xafcfbb7d, 0x21ae3ee9, 0x14b8eafe, 0x71dbf798, 0x90dc958a, 0xa57c3d20,
- 0x0b34786a, 0xc7daa7a1, 0xf4e7e369, 0xdcfc6d4c, 0x738a615e, 0x31c0127e,
- 0x3d16b1f1, 0x48b107ee, 0x35f115b3, 0x7b60c471, 0xa97c8049, 0x2237ef7b,
- 0xdeeb0c7d, 0x369da237, 0x501b9a41, 0x33ce2e5f, 0x05e9eb04, 0x2f4f5249,
- 0x1dda54a3, 0xf9067576, 0x82ac33a9, 0xccfc8501, 0x7e54fa10, 0x1a6b4cdf,
- 0x1e6ee3a0, 0xc5347fc7, 0x45f502bd, 0xbe9b0e73, 0x7366f483, 0x7cc3ee3a,
- 0x9d03f057, 0x89f90acd, 0x751e742d, 0x0ebb08e2, 0x04abffe3, 0x6f8e525c,
- 0xa2c58f34, 0x5b2a7bfd, 0x77befd82, 0x4ecd9de7, 0x47f1afe8, 0x0f689999,
- 0x25cfb8e4, 0xcc297bf1, 0xd70d7ada, 0x446f9583, 0x0bde51d8, 0xfb863170,
- 0xb44c7b9d, 0xda72814e, 0xc37ca8cf, 0x89ef09b4, 0x7ac14654, 0x7dcfc615,
- 0x7c87cc33, 0x9866f8dc, 0xef46ed1e, 0x4873f774, 0x7b37dccf, 0xa1f5c18f,
- 0x67a4c1b3, 0x9f38f7e4, 0x09db3d27, 0x5bd237bc, 0xe5267cc1, 0xe5c35dd3,
- 0x69f048a5, 0xb73f90b1, 0x552ba390, 0xaf0e485d, 0xe340063c, 0xca63e93e,
- 0x27e85dc0, 0x7ef42dd8, 0x02cb9493, 0x79410f5c, 0x3a18ff41, 0x8dfd2bff,
- 0x6b2c3960, 0xe5bf52b3, 0xef21766d, 0xb94bca36, 0x6372162b, 0x2cc67e12,
- 0x53bbe7c1, 0x5e20efdc, 0xee41aa0a, 0x3f1f68a3, 0xcfc9e50b, 0xf22b4637,
- 0x624df017, 0x54fc8049, 0xd0cfde37, 0xb9c5313f, 0x41666f88, 0xcc9fe81a,
- 0xac4ff76e, 0xfe0012e3, 0x74322b89, 0x9da9df78, 0xe6f9db7f, 0x8f7ff8e6,
- 0x48e29790, 0x67f44f5f, 0x0e61550d, 0x397e873c, 0x2cf7ef8e, 0xb8f1c55c,
- 0x45cae0d0, 0x2fe162ff, 0xa1978a15, 0x697a81df, 0x632a91d8, 0xfee51c60,
- 0x0ecc41b6, 0xd6d29878, 0x1337d283, 0x0f1147dc, 0x04d36d45, 0xa37d06be,
- 0x4ea1c5fd, 0x7832471e, 0x198d8e4d, 0xab724718, 0x6933e748, 0xe04cfacc,
- 0x1d7ade7e, 0xbee4c3c5, 0x992b8ca3, 0x807cbcf0, 0x5f42fd1d, 0x37ef4e9b,
- 0x7299c704, 0xb0bfddb8, 0xbf7640d9, 0xc3cff1ac, 0x2e7f5074, 0xed05d9c3,
- 0x777a97c9, 0xbcf5f21b, 0x0d57dec9, 0xe4ff9f90, 0x7691a8ce, 0xf4eb3e3f,
- 0x47689ebe, 0x188f00eb, 0xa35baaef, 0xbfb979e6, 0xc9f7c113, 0x7e4b7f38,
- 0x79f3e60e, 0x9f88dd6d, 0xc89954ae, 0xbe015d0e, 0x48760165, 0xe7a91dda,
- 0x7c91fda5, 0xfae7ae5d, 0xda323cab, 0xb408c9eb, 0x395c933b, 0xf87d77c8,
- 0xddbf1a79, 0x1476b4d9, 0x38a5c1ca, 0xed28b73a, 0xbc1cb046, 0xd31c14b6,
- 0x8eed1d5e, 0xcf52ebd4, 0xfa35bb4b, 0xfe772fe5, 0x8a55cd15, 0xe032407b,
- 0xb73d6eeb, 0x2deb775f, 0xe3633fc4, 0x6f91e926, 0x1e8f5e6e, 0x98f47a13,
- 0x74568f46, 0x27060762, 0x741c9adf, 0x4cf3ed15, 0x3d447fdc, 0xa3dfd81f,
- 0xe0c7a329, 0x7327c63c, 0xc15dfb3b, 0x1ffe99dd, 0xfa6b7c9f, 0x88b2587f,
- 0xff40ddf7, 0xfd732617, 0x99efac1f, 0x73a7bf95, 0x2f5f4f69, 0x0ca383da,
- 0xc1da03ec, 0x16fb0886, 0x5ec80f61, 0xbf7b4784, 0x4d5c1ece, 0x7888599b,
- 0x1e0f610a, 0xf616fe4a, 0x94a1f240, 0x5227291b, 0x48e5822e, 0x9f84c5ca,
- 0xe9113ac2, 0x57bf1ef4, 0x6cf7ae50, 0x7b46fda3, 0xcf9460c4, 0x976e3f76,
- 0xc1d6f242, 0xf1f9084e, 0x6ed68bcb, 0xf290b948, 0x5ca7e522, 0x20ecc5c8,
- 0xd6a7d8b9, 0xf70a333d, 0x6bdd92cb, 0x72fde393, 0xd823b652, 0x57ca743e,
- 0xb3645918, 0xf218aae3, 0x81a43955, 0x44ea657c, 0xbe499e1e, 0xd968bf35,
- 0x6fd3f24d, 0xf4fcfbfe, 0xe9f84e9b, 0x3f0dbe7f, 0x3f63f475, 0x3c03b2ce,
- 0xdf40b257, 0x6df9f866, 0xfb8c3bc2, 0x7af9dd0f, 0x0497d600, 0x8d319377,
- 0xae133ee2, 0x2f8a2ab3, 0x7494be0a, 0x6cc0fe96, 0x93387711, 0xe1077ae2,
- 0x82c0f5c5, 0x447bd39b, 0xb35c5367, 0xb327d711, 0x03f40d21, 0x9b7ff33a,
- 0xbdef516f, 0x3e749371, 0xfb9dfc96, 0x4392c78d, 0xb1c78dfb, 0xb5d29bfc,
- 0xf8899720, 0x300b8fde, 0x29e138de, 0x0acb6791, 0x46d38f10, 0x3c9ffac8,
- 0xfa81ece9, 0x38b8c981, 0x53de39c5, 0xf4b3de45, 0x3f6818f0, 0x8fe619e1,
- 0xd8fd439b, 0xb8f6e8ec, 0xedfcc288, 0x3b1fe795, 0x51e886bc, 0xddcce5cb,
- 0x3ffbe7a2, 0x79059f22, 0x447e404b, 0x6f037e50, 0x616fa51f, 0x9045e781,
- 0x9c21949f, 0xff00aece, 0x7fdf8601, 0x5585ed05, 0xa8d71861, 0xd6f8db8c,
- 0xf56bd415, 0xb545ed0a, 0x14d581e3, 0xfbc55338, 0x735f94b9, 0xc23e09d8,
- 0xbe78bb03, 0x93ed4a4f, 0x37e8bd1e, 0xa1bddce0, 0xb9d2714e, 0xb78a1183,
- 0x7c47465b, 0xdad149f7, 0x6bd1fc27, 0x1e7867bf, 0x6fbe56ef, 0x8dd6cfbf,
- 0x6e99f7be, 0xb2fbfc61, 0xfe7f9f43, 0xefe70a5e, 0x21d15931, 0xa8a5c7eb,
- 0x37111fbc, 0x68a1f1a0, 0x7be85d4a, 0xb8c513b0, 0x425fb8cd, 0x24f6dcf8,
- 0x28d9b7e2, 0x93f2fdf1, 0x196377c8, 0x60cfc4d3, 0x9fdfca7c, 0x8d4172f2,
- 0x19d74f9e, 0x9bafaf84, 0xa91480ef, 0xf2f8fdbf, 0xf4cbd104, 0x8476f835,
- 0xfb5db7c0, 0xe8ebefbd, 0xf7c3ac35, 0x48e76f82, 0xf86a763e, 0x22ef5a3d,
- 0x538e8cfe, 0x5747e8ac, 0xe0d6c2b8, 0x9f2fc17a, 0x6cbe27bf, 0x1d03fb96,
- 0x02b5efe4, 0x890abf94, 0x4febcf47, 0x74bafca2, 0x3cb9eded, 0x7335f3b6,
- 0x52bc01f6, 0x7fbe0fc8, 0x27faf9e4, 0x7c092e31, 0xd794f541, 0x2c1dc007,
- 0xf941758f, 0x15f920ec, 0x03fa47f2, 0xa9e001ed, 0xb8b7ea27, 0x007b4663,
- 0xd04cfc9f, 0x27b8a2e9, 0xfb9a3cda, 0x78ddcd93, 0xfb8523ba, 0x7c09cf8f,
- 0x9bed126e, 0x90f003c3, 0x1f1fe1aa, 0xe6025bae, 0x3fba784d, 0x93fbce4e,
- 0x63d07c82, 0xbf5e36cd, 0x27a3e52d, 0x7cfed93d, 0xa6af7f70, 0x7880527c,
- 0xf1fff7f1, 0xee23f461, 0x91db7817, 0x27b0e472, 0xc139d052, 0xf0214eff,
- 0x5163bd07, 0x74a1fc8e, 0x761f8fe4, 0xd04f4fe4, 0x774ef4f7, 0x3a7bdf67,
- 0xfa0cef7e, 0xea3de19e, 0xd05eff9b, 0xae883f2d, 0x1d744179, 0x942f9413,
- 0xff46af79, 0x5c5cbd4a, 0x09e3f4ff, 0x54df1871, 0x9fbf38e8, 0xbd934f9f,
- 0xfc956f99, 0xf230e67b, 0xcf9f9fab, 0xd7279e12, 0xce79ba09, 0x787a893d,
- 0x51e1ea12, 0xd414fcfe, 0x5f3f9443, 0x7e61fb80, 0xea81757b, 0xed91abef,
- 0x7afd922f, 0xefec8560, 0x3a4bca33, 0xb225cf32, 0xbd0bf777, 0xa4ad3cc3,
- 0xf035e7f7, 0xfe7d3fef, 0x2b5fc3f3, 0x7841b50f, 0xf79410d9, 0x775fb504,
- 0x22b6fb03, 0x82c7fbe8, 0xe504d7ea, 0x6be507d7, 0xcd17dfb4, 0x8b0e4852,
- 0xc9fdf046, 0xe60cb960, 0xda522f8f, 0xf6ed63e3, 0xff24895c, 0x1357234e,
- 0xfafe79aa, 0xa829fff3, 0xa7c80c89, 0xb21e89b0, 0xc05a30ef, 0xb9d041fd,
- 0xc1f8151e, 0xbcde89d0, 0xecde99d8, 0x79ef6c13, 0xd7f03ffd, 0xd22fda24,
- 0x54fb25f8, 0x9551be6d, 0xbbbd4770, 0x5f603228, 0xe2265995, 0xfdf3baba,
- 0x6389889b, 0x22fc0352, 0x6744f84f, 0xfb653cc0, 0x575d5f71, 0xd4f8bc71,
- 0x719b89f0, 0xd8b4687f, 0x14f1b4d7, 0xf68aa5ec, 0x6fc452ba, 0x5d39e1c6,
- 0xebee176c, 0x0fd030b9, 0x70bd7562, 0x523f8d9e, 0xd550bbf9, 0x53c01f40,
- 0x79d3b311, 0xea7899d3, 0x47d43bbe, 0x0df92f47, 0x47efb77c, 0xd3b412ea,
- 0xd2c49747, 0xa945a639, 0x17dcefdc, 0x366135dd, 0x1e231be4, 0xf8db7d26,
- 0xf74a58c4, 0xad95ceaa, 0x353ae856, 0x5f646279, 0x3c268fac, 0x4fc43639,
- 0x9dfc065c, 0x38276a99, 0x426b36dc, 0xabdbfa3d, 0x085fb40d, 0xfc457e2d,
- 0x3069a4f3, 0x968bb7ae, 0xb0fda7f5, 0x1a4f9fe2, 0x8ab0f787, 0x3ed0371f,
- 0x4ace8b49, 0xeaf72271, 0x6a2e74b1, 0x7a910ad6, 0xcdf4a2ee, 0x1abafcff,
- 0xbeb569ef, 0x0efbd0a0, 0x7bad5c77, 0x9b9e1067, 0xf74ee9ac, 0x7580de2e,
- 0x3efb9e00, 0x17b7bebe, 0x089f21cf, 0xa473a2ab, 0x8bee9ed0, 0xbfb35e95,
- 0xdb0b313b, 0xd0e5d6ab, 0xa39414fe, 0x956a7cff, 0x41ef09ba, 0xc2594515,
- 0xa9f6fcf1, 0x75c518af, 0xa26e3d4e, 0x478e2277, 0x8fac67ba, 0xe8b8fba3,
- 0x8a53b3e9, 0x3c014a3d, 0x67d0da4c, 0x96126262, 0xfb7d049b, 0x1e011544,
- 0xfced748a, 0x24a4f643, 0xa527e786, 0x05b899f6, 0x97ea71e6, 0xd87ce9bd,
- 0xe4e754c1, 0x2fd7c054, 0x52539cd2, 0x6e5e11e3, 0x0ffcdeb7, 0xe3fc8fdf,
- 0xe404e285, 0x395287af, 0x56d1bf5f, 0xbef8109c, 0x3d45c977, 0x469bc1f1,
- 0x277a22fb, 0xc14d3c30, 0x41f03675, 0x67cdc62c, 0xf00b7589, 0x219d92f3,
- 0xa9e1abfc, 0xf1abd12a, 0x48d9f1a7, 0x0f5f3f5f, 0x19f5177f, 0x5f2037ad,
- 0x2d7321b1, 0xa693b9da, 0x0f44ec25, 0x91fe7ed6, 0x3c230bed, 0x7985b619,
- 0x7e3032c3, 0xfc871cea, 0x7aeb12cd, 0xc804b64d, 0x77ff68a1, 0xfbe73f0f,
- 0xeb8f6877, 0xfe7bbfbe, 0x9fb812d7, 0x4697db21, 0x5ce83c59, 0xf458b69c,
- 0xfaa38f48, 0x3cf0a7a5, 0x060bc95a, 0x2f2503b2, 0x8ad63fc4, 0x06f807fa,
- 0xf3c16be3, 0x81aa63fa, 0x933a31c7, 0xad18cf4b, 0x1331b25c, 0xc99dbe71,
- 0xdc86cd65, 0x3b239b89, 0x2b52cb97, 0x5ee34fd7, 0x9651efe0, 0x71e8b50e,
- 0x0bf4737f, 0x7ba16d6f, 0x86c20b74, 0xd289bde0, 0xaf444c17, 0x63c58b16,
- 0x13ef0dbd, 0x9031f458, 0xedca9b3e, 0x9f90bd69, 0xcde9955c, 0x7ba52843,
- 0xf0df7212, 0x9e6792ec, 0xe797e3c5, 0x70da7798, 0x2765dfc0, 0xbbcbd3e7,
- 0x063f8a54, 0x1f9623ef, 0x61cc69dc, 0x788fb137, 0x32c3fd83, 0xdfde22d6,
- 0xf07dfd0d, 0xa9abe1fe, 0x0687fbc1, 0x2a5e4fba, 0x37730ff6, 0xe9770428,
- 0x3fcf7e12, 0x8dc79637, 0xfa052e4f, 0x1ede691b, 0x0f3c10eb, 0x294eedcd,
- 0xf866bc53, 0x0b0daef5, 0x13d98e28, 0x6e7184f1, 0x89e26ef1, 0xae3a4730,
- 0x085e4bf3, 0xfe96fc23, 0x7e5fee6a, 0xfaf78599, 0xd702e20a, 0x2f91fbd5,
- 0xde197e38, 0x9cfd941f, 0x7a63f65e, 0x45f731a7, 0x7cc31f58, 0xabd8634a,
- 0xbd2067f7, 0x9f71d292, 0x5d2bb653, 0xbc5ea3fe, 0x61afac1d, 0x4ab45d1d,
- 0xecc7efe5, 0x041d9136, 0x01644f59, 0xad672cf7, 0x72346838, 0x8712c63b,
- 0xeedaff09, 0x1c769fbf, 0x6eeaf1ea, 0x2687b8a5, 0x51ef27f1, 0xfbf8e915,
- 0x853a8574, 0x268157ee, 0x97497ba3, 0x7b5fb953, 0x679f953a, 0xd0774a28,
- 0xb5f29cfd, 0x0cf2c726, 0x7dfb4fde, 0x95df584f, 0x684b9aeb, 0x26f7f33f,
- 0xae54ab8a, 0x45867308, 0x3b17f3f1, 0x6af9d006, 0x51f011bd, 0xe2fae766,
- 0xa56f98b2, 0x745dd27d, 0x8ecf419f, 0xdcbea1e8, 0x7963f5c2, 0x923de00c,
- 0x186e8a33, 0xe8c767be, 0x9bf624dc, 0x7c602834, 0x3f439445, 0xbf7f28f6,
- 0x83563a4d, 0xdf6c1b71, 0x31e21077, 0x8474da76, 0xe49515ef, 0x76b49019,
- 0xaf7dfa04, 0xdeb899a8, 0x6fb55a2e, 0xf9dfea1c, 0x8de307db, 0xeaf45609,
- 0xe63f6407, 0x88b7fa0b, 0x90c56773, 0x22b677c7, 0xb93cb8d2, 0x8a2f1e55,
- 0xef345348, 0xf2fac910, 0xbf1e0655, 0x8a8f3d07, 0xc7d97ca5, 0xa5b96fd4,
- 0x6ff50139, 0x30c36ef9, 0x6951d3d4, 0x978b2e5c, 0x011f65a5, 0x44362abe,
- 0x6583bbd3, 0xe623029e, 0xca156acb, 0x4f8bbffb, 0xbf3d3e47, 0xe428b5e2,
- 0xbe61139f, 0x5a97689e, 0xe8398417, 0xc795a1de, 0xe8afceeb, 0x6695f749,
- 0xf82d9b59, 0x45acc19e, 0xbfa86ddd, 0x467d5a8f, 0x975a3fac, 0xd3bcc3a1,
- 0xbcc6cd6a, 0xe51b7f53, 0x2df38bcf, 0xa57c83f4, 0x6d973a70, 0xbfc467db,
- 0xcc44324f, 0x1f2be2f7, 0x4ff8c2f4, 0x2f737a79, 0x07c02bb4, 0xcf1052bd,
- 0x9764292f, 0xf3f1b62b, 0x2a0f92ee, 0xee400f90, 0xa83e09e6, 0xf7daf5d8,
- 0x902a1e51, 0xf23a43fe, 0x7ef3f011, 0x71b1df2a, 0xefdfe31c, 0x76913e47,
- 0x0c2bf20c, 0xf1df8c36, 0xa71a667c, 0xe18f943f, 0xfc019ee5, 0x39fb5d1c,
- 0x741c8d04, 0x1a575f46, 0xcbc587b7, 0x5d6fa44c, 0x5a1e5c69, 0x428eed56,
- 0x657c5dfa, 0x67dc01df, 0x5601df29, 0x1e421eda, 0x712a3e04, 0x3f0451fe,
- 0x389517f9, 0xfcff28df, 0xc85ef9db, 0xf3e32561, 0xd4adf393, 0x7acbf98b,
- 0xc124fdf1, 0xe04c652f, 0x2e6f576b, 0x50ce4277, 0x0ebde98e, 0x4db73f31,
- 0xf7e50efb, 0x22dcfcc5, 0x28bad665, 0x17d20fc4, 0x103e0ff5, 0x04fa9469,
- 0xbedc5c9a, 0x4bf1ce91, 0x57f8497a, 0xdf403809, 0x7e6c6339, 0xf274b63a,
- 0xe095644e, 0x778f639b, 0xfc07af49, 0xcf4adf9d, 0xeabf116c, 0x7c93c603,
- 0x7dcbc723, 0x38ddd279, 0x87dfe86f, 0x8f71cbfd, 0xd8f4227a, 0xe5c651cf,
- 0xe52fc243, 0xa1fab732, 0x614707bb, 0xee968bbb, 0x9cde7003, 0xacd2ab8e,
- 0xfd13bdf4, 0x59def805, 0xfadc50af, 0x1fe75898, 0xa8b5fc7b, 0x7e7e01e2,
- 0x277c427f, 0x307f0cf9, 0x6de1263e, 0x7038f1b2, 0x0f52dc30, 0x9f73b849,
- 0x2c6eefb8, 0xbee3f097, 0x4a9f2051, 0x957e4a3c, 0xf982f5f7, 0xda4e7896,
- 0xfe73b54f, 0x463d4cae, 0x943a77e7, 0xfc27f707, 0x5ce213a2, 0xe33c6b79,
- 0x8915ff71, 0x8af735fb, 0xb40c8b18, 0xc08ed23f, 0x807d1acf, 0xa7bf69fd,
- 0x8b3ce716, 0xd690bb74, 0xc73ffa8d, 0xd0398cea, 0xe699d96f, 0x9dcef871,
- 0xb8ef43f4, 0x31a353ea, 0x9f8bb55e, 0x52fc9377, 0xc93f6176, 0xffb8b941,
- 0x83eab454, 0xefc8e182, 0xbed035bf, 0x63e3d14e, 0xcfdfe88d, 0x187332dd,
- 0x41ef01a2, 0xbb3f5ea0, 0xbf266879, 0x984d6059, 0x3621f786, 0x01ed333f,
- 0xaf559f28, 0xd80dbbd2, 0xd16fca0f, 0x5f1499a3, 0x52d6113d, 0xf8fd0a30,
- 0xf456a81f, 0x32df6fe3, 0x7f6c31f4, 0x0c6ba5bb, 0x39b1b63d, 0x7d4ef296,
- 0xe907d934, 0x8073caf7, 0xc7dee2d5, 0x3fde845f, 0xe22a9edc, 0x2f755ffc,
- 0xdd6f8fdc, 0xacef4618, 0x75ed1a14, 0x4f6f1c3e, 0x54675a17, 0x23eaf11a,
- 0xbf255bdd, 0x99918e6c, 0xb9508693, 0x0fca0939, 0x451f9a1a, 0x51f0723b,
- 0x3f7c60cb, 0x86d7a992, 0x9a7f7315, 0x6ac63bef, 0x70912ddf, 0x0fc6247c,
- 0xdf7e4fee, 0x1b08eb84, 0xefa44fdd, 0xedf8aecf, 0x6783b434, 0xe1b4f6b7,
- 0x09ef4fbc, 0xa703fba3, 0xcf77da0d, 0x57def56e, 0x79297df0, 0x9a74f56f,
- 0xfc938fd6, 0x377bc37e, 0x839ed37f, 0xa5b9d1bc, 0xdd194b0b, 0x8d397efb,
- 0x2263f7d1, 0xe789dabe, 0x4d6a4b08, 0x7307bc56, 0xf71ef912, 0xfcab76b3,
- 0xcb99a5fe, 0x99ddc9c1, 0xe4b7bf74, 0x525f3c6f, 0x3ef178a7, 0x9fa7fef2,
- 0xb30af3a0, 0xabc4cfc1, 0xf3feed09, 0xa1a7a7ba, 0xb8765c38, 0xfe7de257,
- 0xf9f95bcb, 0x32ef0e8a, 0x2079dc1c, 0xdfb9dec9, 0xcbfb5dfc, 0x9f110e32,
- 0x2fcfbdae, 0x4fd72cf1, 0x8c3f026f, 0xdbc7e218, 0x90e74b67, 0xa9617cbf,
- 0xca4bd29b, 0x23b7b5b1, 0xe9a25b1f, 0xeb1fc0be, 0xbdf1519f, 0x835df2a8,
- 0x783ae1af, 0xbd346454, 0xd2d9f293, 0x7a8fb425, 0xa5156961, 0x0e32de92,
- 0x46aec777, 0xfab3eefa, 0x9fbc06cc, 0xb46446fb, 0x90b603b0, 0xdeeced74,
- 0xb9d79cb1, 0x4afa701f, 0x9d6dcfb8, 0x6af38519, 0x61b63e7c, 0x2235d224,
- 0x5f7e8ada, 0x724738a9, 0xabf73d6a, 0x7fa398cf, 0x3df40f93, 0x024a61b3,
- 0xbb3737be, 0x5869def1, 0xb147b25e, 0xb1c07ae2, 0xae145267, 0xb7d53edb,
- 0xa8fde144, 0x7bcbd74f, 0x3bfa5e55, 0xd8f2a354, 0xca3f6c14, 0x0cf667a7,
- 0x7c8d75be, 0x9e82f232, 0x879ebfee, 0x5c938a72, 0x0938a70b, 0xb9e2c4fc,
- 0xf3afd991, 0x3afb4af8, 0x6ef3ac57, 0x347ef317, 0x31f726dd, 0x04773ca8,
- 0x61bd3f2f, 0xefec44e7, 0x158366ec, 0xb36cfee1, 0x079ffa81, 0xc01d33eb,
- 0x5f2b667b, 0xd71e60f7, 0xf2b767cb, 0x3abccdf5, 0x5f29bebe, 0xfbf27060,
- 0xcc7e5aa2, 0xef47680d, 0x5ef274fb, 0x9f273cc8, 0x97b03cdf, 0xbe60df38,
- 0x7475618d, 0x9e9bec8f, 0xdbe60d17, 0x855f92f9, 0xd7e7687e, 0xe044f8ce,
- 0xf91de513, 0xbcc3f255, 0xdedf7cf5, 0x07383756, 0x47f24ef9, 0xd5593bf0,
- 0x57dfc646, 0x7bd385d4, 0xed2293b8, 0x530fb406, 0x20c65ae2, 0xe74e3e5a,
- 0xab9a807e, 0x37bde273, 0xf90a6d56, 0x748acece, 0x744557ee, 0xdeefc465,
- 0xcf3f2b74, 0xc97fee29, 0x86922614, 0x84527b76, 0x343b0bed, 0xaefd3a79,
- 0xcfc0f47b, 0x3db76e93, 0xd8c1ddda, 0xee0bd32f, 0x267cc3d1, 0x7776da65,
- 0xbff3fc83, 0xb7f3c09a, 0x201a86d9, 0x78994cbf, 0xd7c818cf, 0x4a9f3ba7,
- 0xece7180f, 0x38692e96, 0x19ff283f, 0xbbc5d796, 0xa5698e7f, 0x49760fb8,
- 0x24b41d69, 0xfdfedfc3, 0x7fa3963d, 0x2df3fbb4, 0x18ee9606, 0x7f097980,
- 0xdd25b4e8, 0xf8f4abf9, 0x8cc5e58e, 0xf5e74e3d, 0x0e1efc3c, 0x09ccb8fc,
- 0xf38a4f78, 0x97bcb15b, 0x2f741353, 0x9fefc1c7, 0xf252fbc9, 0xff5f543e,
- 0xb70db27d, 0x92ec9f72, 0x2a1dff81, 0x0fbda9c5, 0x89fc34a6, 0xf6a9f9de,
- 0xa23096b0, 0x4d9ef683, 0x7753be39, 0xa20bdd1b, 0x6f4c87fb, 0x57337d27,
- 0xac683bfa, 0x29dff987, 0xfcfc8960, 0x1f82a3c1, 0x3d652f4f, 0x9fe8807a,
- 0xca39b77f, 0x985c700e, 0xfa85ebe8, 0xfddc3220, 0xe9d7c427, 0x0ba9d50d,
- 0xaa1ef0e3, 0xed0f91c9, 0x7df978cf, 0xb7e132ce, 0x42f1cdb2, 0x3bbea82f,
- 0x46535da1, 0x87684ddf, 0x757c17de, 0xfe97c321, 0x192bd423, 0xed049d7c,
- 0xde5d3ce8, 0xb1e2bbf2, 0x4fed85dd, 0x700d98f4, 0xbc06009b, 0xfa1e35a7,
- 0xf7a3611c, 0xfb4a98aa, 0x7efe5ecf, 0xa2799e92, 0xd5eef86c, 0x3ee2e933,
- 0x9413dd11, 0xeff89274, 0x1a181740, 0x999d59fd, 0xbeee1019, 0xfdfd036c,
- 0xf91f492f, 0xddc2e2ce, 0x7742fe3c, 0xe31e4524, 0xe2cda748, 0xb0759fef,
- 0xbf916f8b, 0xd1e9620a, 0x330d9fe8, 0x11c9db43, 0xf5d88d1b, 0xff2f6d77,
- 0x92fc2e9d, 0xe0706cc1, 0xa2a5923b, 0xd0b558cd, 0x2d5bef8e, 0x7bd3378c,
- 0xe85f0b25, 0x96c1fc4e, 0xb60590fc, 0x89621b63, 0xa866565f, 0xf5b9ff84,
- 0x2a1dde8c, 0xf3a64fa8, 0x36f5f994, 0x12daa34a, 0x6fecfca9, 0x7ae2c9de,
- 0xe0a7d389, 0xa68cba7f, 0xd3ff4b73, 0xd9d6529b, 0x6691a77b, 0xf0bbcfba,
- 0x0b6bb720, 0xc7bfcaa7, 0xbbafefc2, 0xc8ae3804, 0xb90b8a1a, 0xe753dc3a,
- 0xf0ba57ef, 0xe77e00be, 0x677d5034, 0xfc4a57ef, 0x82dd049a, 0x7a9cb3df,
- 0xde913330, 0x59ef147f, 0x9aed174e, 0xceb3bdc5, 0x4fbcb5de, 0x6a227bb4,
- 0xddf2135c, 0xc8696774, 0x9ef52d77, 0xcffa0730, 0x7a48d486, 0x373ee147,
- 0xf59a97f8, 0xf11e8b4c, 0xbf9b2d30, 0xba753973, 0xfd90deb6, 0x4373e939,
- 0x92778776, 0x4d8fa80c, 0xecfb6d2e, 0xf50ec2ae, 0xf6bf7d65, 0x2354d15d,
- 0xbe3d4fd9, 0x69f90b3b, 0xf4515de2, 0xbfffd10f, 0xff10b4ec, 0xb83bc49b,
- 0x112b2ccd, 0xbeadc2f5, 0xdbfffb27, 0xefc1b1fb, 0x03bf06c4, 0x42eb7fdb,
- 0xdd607e4d, 0x6fed350f, 0xa9ae5fab, 0xad5bec1f, 0xfa6ccfa9, 0xb43f2699,
- 0xfb4d39f9, 0x693647e1, 0xbcb647ea, 0xfdbfa9a4, 0xfe4d0ecc, 0x683fd68e,
- 0xb6d9dfda, 0x61cf9357, 0xe7da68ef, 0xe4d03f9a, 0x57ff5ac7, 0xc4aefed3,
- 0xf1fa9a13, 0xfa9af3f6, 0x68ae7d09, 0x2f8e05f2, 0x373fed35, 0x457757ba,
- 0x0cea22bf, 0x433aadf1, 0xf944f861, 0x6629a6e5, 0xba6b07d4, 0xf8247d0a,
- 0xd3b0b96f, 0x7ba307ac, 0x6cd563ac, 0x3d5ff11f, 0xc1a0bbff, 0xde7f4f76,
- 0x1ff8c4e5, 0x6bdf8d7b, 0x527f068b, 0x8c7e301f, 0xff02ccd3, 0x9e6c5dee,
- 0x778f9355, 0x77da6a25, 0xd4d76e99, 0x68fbb927, 0x38e653ea, 0xaab4f934,
- 0x5df69a11, 0xf9353897, 0xa69e4f0c, 0x971af77d, 0x76b3df26, 0xef7da6ba,
- 0x7d4d6ef5, 0x4d1cef5f, 0x55adff7d, 0xbac0fc9a, 0xb7f69a25, 0xf5347bd5,
- 0x9a357d83, 0x5aa6ccfa, 0xf3687e4d, 0xe1fb4d7a, 0xfa9abc47, 0x355b2d91,
- 0xa99fb7f5, 0x68efe4d3, 0xbfb4d7ad, 0xc9a7cdb3, 0x9a83b0e7, 0xf7e6b9f6,
- 0xd6b1f935, 0xefed344f, 0xa9a63c4a, 0xab3f6f1f, 0xce7e97a9, 0x6b9f3e84,
- 0x53df85cb, 0xe697f8e0, 0x6ef7f6fb, 0x1f742877, 0x6ed75cea, 0x96c57df2,
- 0x24fd1530, 0x6c99c517, 0xf5111078, 0xb727de15, 0x8539f3f1, 0x238aaf14,
- 0x944c887f, 0x810bcf1d, 0xab8510df, 0xf40c8cb6, 0xfefc23ab, 0xccf5ea5b,
- 0xdadff79b, 0xd5dcff84, 0xf288beee, 0xfb6eaf31, 0x989f7a38, 0x1c225679,
- 0x77bffdf2, 0x956dde83, 0x19f378e9, 0x17f00fa6, 0xa6d5860f, 0x389af90e,
- 0x05f378c1, 0xb6c1ef86, 0x22e22bf7, 0x17bdc7f8, 0x16bff406, 0xf7f817d6,
- 0x9a976b23, 0xffc76fe9, 0xf295a96c, 0x522696eb, 0x3a00fffe, 0x0047bf29,
- 0x000047bf, 0x00088b1f, 0x00000000, 0x7dedff00, 0x4554780b, 0xeedd7096,
- 0x9d248fdb, 0x777579d0, 0x493cdc9e, 0xf09d0848, 0x3a3e00d8, 0xc0406021,
- 0x490435e6, 0x41a8c1a4, 0xf881ba43, 0xbb75744f, 0xb2021031, 0x10d191b3,
- 0x0186c195, 0x099d1964, 0x09d1a32e, 0xe09af09a, 0x1c604c3a, 0x3719d9c5,
- 0x8ee2a3a0, 0xcfe31e10, 0x7fc38fee, 0x937ba9ce, 0x380e9dbe, 0xf7ff3afe,
- 0xb4fbfa3f, 0xab755538, 0x739d554e, 0x2aaa3cea, 0x89895ead, 0xde6d6322,
- 0xf39f4a1c, 0xc99899da, 0x316f36d8, 0x0ebddbc1, 0x72defd82, 0x9d7a774a,
- 0x5bcbbf94, 0xaf1ef041, 0xdebdd28b, 0x79f74a1a, 0x92fe543d, 0x9fe081b7,
- 0xb6947d7a, 0xff299b7b, 0xc10b6f15, 0x046dbc07, 0x53f5eabf, 0x4bdde1da,
- 0x76de1be9, 0x76f4ef2a, 0xb7a6fc10, 0x6f2ee08b, 0xbc87c10f, 0xf11f04bd,
- 0x98f8269e, 0x1ed28fb7, 0xbe9467ef, 0xf2a7eded, 0x0957bc77, 0xdbbf8ebe,
- 0xcd2afc19, 0x2631e49f, 0x7ae69730, 0xc9ccc21e, 0x00f63226, 0xff824bfe,
- 0xcc8846e2, 0x8cfdd8c2, 0xb0d73eff, 0x9413769a, 0x3ff2fe77, 0x74c60285,
- 0x1f8d8ca5, 0xf662690f, 0xc634c6ce, 0xe675b026, 0xd318724f, 0x63d75740,
- 0x4e09fb07, 0xd6191319, 0x2d75dfc3, 0x58c7dffe, 0x94bffc3c, 0x834c78e5,
- 0x14095369, 0x55befb42, 0x1a777f82, 0x3e20d755, 0xbf1c8dab, 0x95135a69,
- 0xf82c3e57, 0x5563020d, 0x8be9f322, 0x587dfb18, 0xc11b0154, 0x2e8f25d8,
- 0x2172c447, 0x04830edc, 0x443a8ff8, 0x2c5d7e0e, 0x9da5b18e, 0xd29b1d86,
- 0x84aaf106, 0xfdf035ef, 0x0c1a562b, 0x3a3d64ab, 0x1df203c4, 0x9ec618da,
- 0x4ba6b8b7, 0x8a60ff90, 0xf687a7c6, 0xed9fc999, 0x2dec648c, 0xe6066b8b,
- 0x77dcf25f, 0xc07f1b0c, 0x7ec6cf6c, 0xe2ba1b66, 0xfdff4117, 0x9df6b5c7,
- 0xe1f3f0d2, 0x6c67296e, 0xdfca0ddc, 0x02ec973c, 0xa2ffca3c, 0x9ffce175,
- 0xf85645d0, 0x492f01f3, 0x1a4a78e0, 0xeadb3a55, 0x7cf8825a, 0x47b9e919,
- 0xe5f85303, 0xacf6ab6d, 0xae552181, 0x82e11aca, 0x2582eeef, 0xcd8c1963,
- 0x418e9265, 0x38e67cf9, 0x2d4d069a, 0x17822e64, 0x6fa51f31, 0xb1aabbc5,
- 0x4fccc59d, 0x21b26bb0, 0x82b8d435, 0x78cb72f1, 0x947c65b9, 0xabab71f4,
- 0x20e5e38e, 0xba4c4ebc, 0xa5c71b23, 0x5596f5e0, 0x58737aa2, 0xefc476ff,
- 0x3f1783cb, 0xd2863211, 0x174077c7, 0xeb1e6826, 0x73ac3d31, 0xf7d18ea3,
- 0x7e088f8d, 0x2e856ba4, 0xb791992a, 0x0be418fb, 0xf4027481, 0xd2fc8451,
- 0x92373a9f, 0x0949e4e8, 0x92707c1a, 0x0b1fa7d6, 0xb3f8d4e3, 0x87d12d05,
- 0x1efc005e, 0x48fa0388, 0xea0e9e1f, 0xe00d219a, 0x67afa01f, 0xaff3bdb0,
- 0xce0e5dff, 0x49cdfb97, 0xa357ce12, 0xe801d606, 0xd6b6f7d8, 0x99925bbe,
- 0x94ed6014, 0x6ba47e31, 0x73edda26, 0xb17ae0c7, 0xdfa84ea5, 0xfb0d65ad,
- 0xfbf687f3, 0xb417ae2a, 0x2ba6c27f, 0x9cbb53f7, 0x9ff295cf, 0x63c5fae2,
- 0x5eadebca, 0x16ad69f5, 0xe8713ff0, 0x131a5e9c, 0x6f0d1c62, 0xe00666e7,
- 0x3338eeef, 0xe44261dd, 0x45e7f2fa, 0x949fe60e, 0x1fa144e9, 0xb5d23ead,
- 0x178814c3, 0xf9e817ef, 0xd13744e7, 0x4419cf40, 0xe0f89fcf, 0x690639d3,
- 0xfb4822c4, 0x20ba6a60, 0xd660bd75, 0x672cdd23, 0x99ab4a76, 0x6007d293,
- 0x9d7e91f9, 0x38f4a7be, 0x76b20fef, 0xbe2bafca, 0x7cf482d7, 0x8a95c6bf,
- 0xa5eb2d70, 0x2feb377c, 0x2dfcc1b3, 0xd47b5e6c, 0x6fcf5806, 0x025a6a79,
- 0xff3cefcc, 0xce98a3b2, 0xe27c25dd, 0x7a3f8893, 0xc13eaf10, 0xfa113eb3,
- 0x92a5fbbf, 0x3f9049f5, 0x85d7cb47, 0x175f2bfd, 0x8f047e45, 0x97a1f81b,
- 0xe341cb8f, 0xab9546d2, 0x09f2a1f8, 0x9da010e6, 0xff0683fe, 0xfe00b4ce,
- 0xdfe87e28, 0x898e5093, 0x7ae0f7fd, 0xc434dfbb, 0x083f7ae0, 0x29c71be2,
- 0x806b1838, 0x2ad690fe, 0x882a8863, 0xe1621fde, 0xb9f7ac76, 0xa79fdf4c,
- 0xcfefa230, 0x4060e605, 0xa67417bf, 0x5ae50166, 0x0456cbaa, 0xbe60acf8,
- 0x6ee50626, 0x38054bcd, 0xd725234f, 0x1fd744a7, 0xa61537b5, 0xa7f7fca1,
- 0xf7ff280a, 0x698dec19, 0x5d84fdaa, 0xe321408f, 0x3f9f3861, 0x0ec776c0,
- 0x3e284bc5, 0x14429ff6, 0xf33d0663, 0xbf888b19, 0xebe444d9, 0x57f13d44,
- 0x066d4e23, 0x3c94e3f6, 0x9feda1a6, 0xf9edf190, 0xef744b4a, 0x16ecf183,
- 0xf7c3f542, 0x60f8432b, 0x2c3f243f, 0xa7d1fa8c, 0xfa563e71, 0x64bea663,
- 0x6a75d027, 0x86974b84, 0xc716c182, 0xcb998be5, 0x4fb49e97, 0xe67d7133,
- 0xc8a8de4c, 0xdd8f6a65, 0x09d1e5c6, 0xf843e49d, 0x1d9a4944, 0x9c80d724,
- 0x97f8abbd, 0xbb8f11a7, 0x2d2ea68c, 0xdd16eca4, 0x7d94fa9f, 0xff707d30,
- 0x7fd94bbd, 0xeffbe16f, 0xefb5991c, 0xf1babb12, 0x3aecc67b, 0xfd972fc4,
- 0x6d8f995e, 0xf22adfb8, 0x0b7ec871, 0xf9c5eb03, 0x832b2513, 0xc7c0b574,
- 0xdc253abf, 0x36c47c8f, 0xf4f8d2e7, 0xfe0cfcb2, 0x58c15ade, 0x13c651a6,
- 0x19704bfe, 0x20ce6659, 0x5663549c, 0x9b1e29c1, 0x06c8feaa, 0xe69e5549,
- 0x679551cb, 0x7055db34, 0xaab14b56, 0x8736a8fe, 0x97f5ce0a, 0xede7eaab,
- 0x31e0aa75, 0xfaaa15ed, 0xaa5c3b63, 0x1aaec2f2, 0x1eb8f955, 0xd09e0a8f,
- 0xffaaa0db, 0xaa7da777, 0xcd7d49f2, 0x9f29f2aa, 0x5be0a8b5, 0xf554dbfb,
- 0x57eabf6f, 0x7fb09795, 0x354f9556, 0xd3c157ee, 0xeaabafcc, 0x16fb80bf,
- 0xb61dcff0, 0x0cfe556e, 0xbbeab8e9, 0xaa4e733b, 0x75e417e0, 0x270e2df6,
- 0xe72d68be, 0xfb6cd1fb, 0x5876aa07, 0xfebe68e7, 0x7b6f3c61, 0x510b1fcf,
- 0xf4d76e4e, 0x4add21a7, 0xf222735a, 0xdcf16671, 0x2c664043, 0x14aab1db,
- 0xf94e5bc5, 0x1d308753, 0x8a5fdced, 0x715f926c, 0x05a610f2, 0xd0a58bae,
- 0x44d7b31c, 0xc8b4c61f, 0x8ad53853, 0x3dcc34a8, 0xdf44e98c, 0x5728a9aa,
- 0x3f5c785f, 0x2a28f91a, 0x074fa146, 0x827d67d1, 0x7eb9f886, 0xf28fc9b1,
- 0x24c532c7, 0x96139032, 0xb1d65a1f, 0x9a8b4c02, 0x1af66d31, 0xfa0f5f99,
- 0xba519780, 0xfe666bc9, 0xaf67206c, 0x93fae08d, 0x43a795f7, 0x44ab233b,
- 0xe699aa33, 0x48258f51, 0xe5f5bed0, 0x765d9c96, 0xb0edd6c6, 0x42e5d7cf,
- 0xc59e19ff, 0x4e01a23b, 0x30058e63, 0x2398167f, 0x25b19936, 0xfa76df3b,
- 0x97c287f2, 0x244d9d33, 0x5cf6c417, 0x7694ffd4, 0x6e7e9637, 0xdf2198d7,
- 0x11e74bf7, 0xf1adf798, 0xc21df4e1, 0xa75f9925, 0xb60b475d, 0xf69ef90b,
- 0x4cfc7c10, 0x977fdf1a, 0x0a7d73a4, 0x66eb8487, 0xacc6cbe5, 0x808ca7a0,
- 0xa93adcb9, 0xf0ea0ce1, 0xa027d9a2, 0xe0d7627c, 0x892b907b, 0xe0832958,
- 0x4e01c3db, 0x6c625840, 0xc7eb967f, 0xc5d5afd8, 0xfa00d9d8, 0xc351a849,
- 0xcf9e615b, 0xa3e20e66, 0x89f1aa5b, 0x8fdd0630, 0xf43b769d, 0xc60aadb3,
- 0x3d56f00b, 0x7a9437d7, 0xf68c34a3, 0xf8d32ff7, 0xcdaece7e, 0xf49ea3b7,
- 0xe79e3ca5, 0x3e8bfdbc, 0xeb7b44ce, 0x3589a52a, 0xf205c11e, 0xb4e95acd,
- 0xae9ea0fb, 0x8423e611, 0x348bf2de, 0x1dd7be91, 0xa090447e, 0x1269a7de,
- 0x7e9589c1, 0x0d52771a, 0xbf801bb4, 0x6609f44d, 0xe63db7c8, 0x9b3f1013,
- 0xf06769d9, 0x885a331d, 0x3cc8d7fe, 0xcbf86b61, 0x0f418fa2, 0x769d79d3,
- 0x13bba7ac, 0x271f33e8, 0xb86663d4, 0x47e3c94e, 0xa3eb10b0, 0x0d3e86a4,
- 0x9f40dc73, 0xd47d4a8f, 0xddc5fb43, 0xe80b2926, 0xa6c585fb, 0xda211d8a,
- 0x9f9e54fb, 0xfeea310d, 0x1f3e5a2d, 0x6cbedc8a, 0xf0a8ebe6, 0xa6689af1,
- 0x526be392, 0x846f5bc6, 0xc8cdb5af, 0x6ef8015c, 0xd0cc7e85, 0x55abe1be,
- 0xe83f1c66, 0x4fea4ed0, 0xc764f2e4, 0x6ec8728f, 0x21c75e0a, 0x3f9408f4,
- 0xa3e908a2, 0x7c08dfcf, 0xdbd2a868, 0xfeb26f51, 0x7b809343, 0x116b2273,
- 0x5358be50, 0xdc447aca, 0xae5744d7, 0x793e8433, 0xd654ba33, 0x839fd94f,
- 0x3d774bd7, 0x9c12fde3, 0x31d57fef, 0x1f56c7d4, 0x05718f8f, 0x3a4c53d0,
- 0xd662cba7, 0x98fe45a2, 0xbd29dacc, 0x7a52f585, 0xd4a7eb1b, 0xa622ccc1,
- 0x694ecca5, 0x3a527319, 0x2d28799d, 0xce942d67, 0xce94ed64, 0x8294bd62,
- 0x93a9433d, 0x9aeec999, 0xa637fb07, 0xd293980b, 0xa50f31ef, 0x7e9ce999,
- 0xc10b582b, 0x4a76b377, 0x24ef803b, 0x8c01fa3d, 0x9c827694, 0x904df4c3,
- 0x79769873, 0xa3652625, 0xd287201f, 0xa53b5e23, 0x94c5bcc7, 0x541d78f6,
- 0x396f6def, 0x9d78efa5, 0x56f09e94, 0x75ebda50, 0xde53bd51, 0xf5df4a1a,
- 0xcf7d287a, 0x69e940db, 0xdfd28faf, 0x1d299b79, 0xc67e07a5, 0x2be9cdf9,
- 0xf53e7d44, 0x0243bd23, 0xe591ecf9, 0xe68761d2, 0xb8f1dc99, 0x40661a6e,
- 0x235b283a, 0x7359fd20, 0xa43f0e34, 0x0b56729b, 0xe1ba42f7, 0x0cdfd704,
- 0x0fd248d6, 0x82f56dad, 0x0876ed04, 0x5808da7d, 0xa9e9e9cb, 0x9f208d73,
- 0xbad93a4b, 0x19c23b08, 0xee81194f, 0xfd3ce876, 0x64bca03b, 0x08d734a7,
- 0xc8e4d67b, 0x8fb1ff53, 0xe93e54c3, 0x03346b08, 0xfb3d2b3c, 0xe38138a9,
- 0x7befc281, 0xaee6733b, 0x253c44cd, 0x34c5f3c2, 0xbf512c30, 0xfa44c47e,
- 0xd9db6175, 0x93c00d42, 0x68454c1a, 0xebd40617, 0xae1fba42, 0xf2b9705e,
- 0x9655b6dd, 0x0d879c46, 0x0e5a809f, 0x7d19a959, 0xd5f3ab85, 0x5ebe6aed,
- 0x837b4cff, 0x09eb03fe, 0x7f6f6837, 0x7d32a396, 0x3a7effc2, 0xdd6fa60f,
- 0x6bf68ff5, 0x8e640fc8, 0x5774fdc1, 0xfd339734, 0x04ee9fa0, 0xc8c577bc,
- 0x1a4cc6f0, 0x5c43ca17, 0xd29b0db7, 0x77803efd, 0x1f573985, 0xd47ff2c7,
- 0xd14277d8, 0xa6d770ae, 0xc933825c, 0xc621b13f, 0x8f931393, 0x97df3cfa,
- 0x0d1f935a, 0xbff80293, 0xafcb9d39, 0xe95cb7ad, 0x447681dd, 0xc16586b3,
- 0x979b3b77, 0x7681cc6f, 0xf419bc7e, 0xdc478007, 0x0b31bd5a, 0x6a37d3ca,
- 0x19656a26, 0x78fc7887, 0x4d9ef573, 0x8f255fa8, 0x7686c98e, 0x99796433,
- 0xb857db1a, 0x55bce806, 0x95a7b6af, 0x7e649ff4, 0xce6686a9, 0x954d6c82,
- 0x3a1df4ed, 0xdfb81156, 0xefc64cfd, 0xd53c5957, 0x57fdcedd, 0xfee19186,
- 0x25fc6027, 0xc3bfb923, 0xfe48ceb2, 0x7ff24b0e, 0xca03bc9d, 0x7634db2d,
- 0x6b44e954, 0xea1d5aed, 0x3b598bfb, 0x810ee3f2, 0xe14bef7f, 0x0a28217a,
- 0xec0373e9, 0xd3deadf2, 0xfc03328b, 0x9f0ab684, 0xc97bf680, 0x0fa15ac8,
- 0x07e49ff7, 0x2dfbb405, 0x15c12e15, 0x8781f689, 0xb965cfca, 0xf80e927e,
- 0xfbe12ad9, 0xb992fd40, 0xa15d7012, 0xd2768adf, 0x605158f3, 0xf8b3a77f,
- 0x3b07ac3c, 0x75d5f499, 0x782bc27a, 0x97654ebe, 0x95f6c01e, 0xff2e0c75,
- 0xabb7067e, 0x5d013d77, 0xe24c1f20, 0xdd89f376, 0xce3fa489, 0xa0770d69,
- 0xa9e274fd, 0x6f0465d1, 0x3d40ec99, 0xc9eec4f2, 0xb39d31d3, 0x3bd52665,
- 0xb41eb021, 0xb96ce787, 0xd4fcc45b, 0xdb0bc00c, 0x7a27a33b, 0x41e5b39c,
- 0xcb47fbf9, 0xe358bcb0, 0x91da09fb, 0x22fb8dc5, 0x1c9e4ed0, 0xa6124af9,
- 0xf57d3427, 0x5dffd0dd, 0xdc2bf666, 0xe31b9d9e, 0xfe1cb1fe, 0x74a9f731,
- 0xbfd7ea1d, 0xf5a74f4b, 0xc41ccf9c, 0xec835007, 0xf6f65bba, 0x8a4d061e,
- 0x3fded6dd, 0xe65ccc15, 0xc11f30e8, 0x5ecbb4b4, 0xd657d386, 0xe9823ce3,
- 0x194f5ef4, 0xa7e52faa, 0xba608e5b, 0x3bf5d724, 0xff179f9c, 0xbcaf7be1,
- 0xc7c8f008, 0x07a8cdeb, 0x37d13e65, 0xebf92763, 0x21f90c74, 0x45a7926b,
- 0x746c048b, 0x787ca0c6, 0x835169f6, 0xad21b9fb, 0xc3610c25, 0xb41ae97d,
- 0xb745770b, 0xb3b8d0d4, 0xe3ce19c2, 0x210fb0db, 0x3b45645d, 0x809f9063,
- 0x3e1d692f, 0x10d3e993, 0x634731ed, 0x1fcf187e, 0xe1ce17a8, 0x5af503f3,
- 0x27ea04c2, 0x13fb2046, 0xbfdc06d4, 0xddfef26d, 0x38093cc6, 0xcf48d61f,
- 0x64ebf40e, 0x67fe7095, 0xb02e9c91, 0xfa44d37a, 0x96cd9c84, 0xb89dcf16,
- 0xe897560e, 0x3ba40beb, 0xf40706d8, 0x19827491, 0x055bd7da, 0x219c830e,
- 0x7e87f0bc, 0x42c98b0c, 0xbf828d7a, 0x0bda02b5, 0x1c7033d0, 0xdf834cf3,
- 0x9b177429, 0x4235c7c1, 0x65acbfa6, 0xdf053dbc, 0x9f2e0cb8, 0x83496f01,
- 0x7c6568f8, 0x009a1c60, 0xd1ea027c, 0x66f006f8, 0x05d21b76, 0xa7817a48,
- 0xc703124c, 0x8471ad5f, 0x7a645da1, 0x4fe8fbf5, 0x02394289, 0xf753fe78,
- 0x69f22324, 0x327238f0, 0x46df08d2, 0x6f5187d0, 0xfce2e99f, 0x11e958db,
- 0x18423ec8, 0x3a2c9fc7, 0x35681fd0, 0x017416ad, 0xf326b5e3, 0xa5667a7d,
- 0xcae51ba7, 0x3b08e2cc, 0xac9df7d2, 0xf3999d7d, 0xc04c91f7, 0xf81b146f,
- 0x41c4d899, 0x4f47f0bd, 0x97127bbf, 0x97b59934, 0xf9fee2e4, 0x70b3fdf8,
- 0x3a3eb6fd, 0x4ff107b3, 0x3baa79cc, 0x5e741a06, 0x197dd794, 0x6ca4ebf7,
- 0x1c0e3f13, 0xe0079247, 0x8d44bf40, 0x41e81cba, 0x65742b05, 0x71a21a13,
- 0xe444d1bf, 0x7a819ccf, 0xe486944a, 0x6129c83b, 0x33fd4841, 0xf9e4ae88,
- 0xaee7d657, 0x375a4c8c, 0x03af89ab, 0x171bb7eb, 0x5583ca46, 0x6dfa09d6,
- 0x011f3604, 0xa658df74, 0xd7680bf5, 0xac28fd62, 0x919de2cf, 0x8769d743,
- 0x03399a4e, 0x1ac3f4e5, 0x74366ef3, 0x7edc815e, 0xc3ed7a4b, 0xf90e5471,
- 0x7dfc91a2, 0x436af801, 0xb8f5eac1, 0xac6dfd59, 0xb48cee6b, 0xc989f9a1,
- 0x3ee2660b, 0x68cb589b, 0x47cb83fd, 0x575ea63d, 0xa3c737b2, 0xbbf988ff,
- 0xce7ac1be, 0x8655de4c, 0xbaf07cfa, 0x1a97742a, 0x105f7a1b, 0xa65f3933,
- 0x954fd846, 0x92215e93, 0x114ce09e, 0xa2ed03e3, 0x1f7c190d, 0x1335fdba,
- 0x23ec43ed, 0x5cb71fa3, 0x93ef577b, 0xa50eb0b9, 0xae683457, 0x1cefa3b0,
- 0x484ce1fa, 0xddeafda7, 0xf50affd5, 0x1a657da6, 0xb06ff987, 0x10e819be,
- 0x9d80f5f1, 0x2fdaf001, 0xa3a0b4f6, 0xd992b1a4, 0xfa55df71, 0xf3f723d9,
- 0x49df9d0b, 0x645735f2, 0x820e493b, 0x2f401e65, 0x7e896900, 0x56731978,
- 0x68dd90b6, 0x5907cccd, 0xf6bd6e8d, 0xfe5fb425, 0xefd0cf49, 0xd242af38,
- 0xe7f8c095, 0x65ba4a55, 0x5917c7e8, 0xf311ea39, 0x3f94ec2b, 0xf3ced2bf,
- 0x417f3c8d, 0x17f28385, 0xbe783a54, 0x3e61f209, 0x7bf85616, 0x42eed0bd,
- 0x41a48c53, 0x93a5783f, 0x50355f97, 0x0f603bf9, 0x87c90264, 0xde49f987,
- 0xe1a67cb7, 0xb84f12f1, 0x0d5c005f, 0x123d4591, 0x1a7ddfce, 0x8624167b,
- 0x01999114, 0xe9c2b67e, 0xf3f942d7, 0x13809fcc, 0x6af546f9, 0xbfe43667,
- 0x719a2c91, 0xc0386f7c, 0xabf9c66e, 0xe9de6915, 0x351bf007, 0xd8cf14c9,
- 0x7dadfb21, 0x67e48c2b, 0xfc875d05, 0xa2456cac, 0xf2f0c7ac, 0x7ecdcb6d,
- 0x0e40c7d4, 0xdff63ad2, 0x001e9227, 0x1cedf0f2, 0xca5e8bd4, 0x484987b4,
- 0xb37e6ab7, 0xa12f2f54, 0x19992af6, 0x9559f2ed, 0x97c6bdbc, 0xea0d7e34,
- 0x3379a655, 0xf71a0e91, 0xafda3b32, 0x2a9f56cf, 0x9b78458b, 0x87e5cedd,
- 0x56342e5a, 0xe12e396f, 0x731a3c78, 0x7ee7d516, 0xe94f46ad, 0xc830ad2e,
- 0xd2deb4cf, 0x2a18fca0, 0xd83dec6f, 0xa955424e, 0xb3d4137d, 0x8fd138c0,
- 0xf3cf4dbd, 0x1488f77d, 0x8b54b067, 0x4182e65d, 0xa7914efe, 0x187c82d0,
- 0xc545f245, 0x525fa35f, 0xf9923fc3, 0xc45df814, 0x3176f971, 0x5d95c93b,
- 0x27207aea, 0x52af2835, 0x67f9d4da, 0x2fc504b2, 0xe0879aa5, 0xb87ab679,
- 0xe71e0bdc, 0xc2bab8e0, 0x34b8d146, 0xd7285c7c, 0x74d73f20, 0x2b73f288,
- 0xa124bb45, 0x853f6bbc, 0x92f51fe3, 0x9e00bac8, 0x13345d55, 0x66d669e9,
- 0x6643b90d, 0x0b925eb3, 0xe2b71b37, 0x671e08df, 0x6436f8f0, 0x1cdf6968,
- 0x1bd707d0, 0x7d6c1f49, 0xb8c1a4c4, 0xe15be822, 0x973857e8, 0x099e1a55,
- 0x532230f3, 0x6bafd7d6, 0x34693cb9, 0xacea9d11, 0xfa04e58f, 0xa5d1841f,
- 0xeebeb023, 0x72bcec7a, 0x68e91b8a, 0xac72c1dc, 0x93639658, 0x2a7c60fa,
- 0x933873ff, 0xd6f7e602, 0x7c1518fe, 0x5544d5fb, 0x2e1012fd, 0x8d53e581,
- 0x69f2aa79, 0xf82a71e6, 0x5514db0e, 0x54d219fd, 0xce677c15, 0xb3faaa9d,
- 0xf055f3ed, 0x544bc55d, 0xee3ae7f5, 0x2fcf9555, 0xbe55487f, 0x0546b9d0,
- 0x7bf8aa2f, 0xaec5fd55, 0x92f95546, 0xe555279a, 0x9293f6c1, 0x3f726696,
- 0xe79b56cf, 0x8be483a4, 0x0e46704b, 0x14a967ad, 0x3dd703fb, 0x53eb822a,
- 0xe51af5c1, 0xe04178f5, 0x48feb1fd, 0xe29df20d, 0x91ed637f, 0xd75cff2a,
- 0x3ec6c9e4, 0xec72fa13, 0xb2dd23ef, 0xc51fdc13, 0xc53bfcfc, 0x04fdd57f,
- 0x56f683d4, 0x715ba647, 0xabbf43bf, 0xf9fd0f4b, 0xb7bfe435, 0xb40bd736,
- 0xe7ae6e0f, 0x7aab957e, 0xc00d8ae6, 0x7ef38583, 0x828e78e5, 0x70447327,
- 0xcf1860ff, 0x95fb26df, 0x52bf439e, 0xb03f6336, 0x17787e28, 0xac509bb5,
- 0x467fa1f7, 0x7a08f6a8, 0x662e5b94, 0x07284836, 0x97ff5397, 0x5823ec2b,
- 0xd2567988, 0x398daf56, 0xbd5b7da0, 0xc61ead6f, 0x69399756, 0xbcd0b3cb,
- 0x1fc8c3cd, 0x94e668f3, 0xe7816b2d, 0xa25321ff, 0xf9104975, 0xf75f3f3c,
- 0xd03db9c6, 0xa5956b5f, 0x706ae508, 0xf3bb9279, 0x941bb1e2, 0xd5f2bf3f,
- 0xd1bfee0f, 0x671d75f7, 0x87ca029a, 0x24568bf5, 0x1d004732, 0x7ed08648,
- 0x74c81ff9, 0x27ac9ee5, 0x5a33d63f, 0xcfc893d7, 0xd7ff2617, 0xddf0bdc4,
- 0x3dcae979, 0xfe43a7a8, 0xea1f300f, 0xcf9ebcd1, 0x29e744bc, 0xbf40f352,
- 0x2c3bfd47, 0x941edd34, 0x95ad17db, 0x5a07e8fd, 0xd097a91d, 0x7e324fce,
- 0x0eea7e46, 0x83abb71d, 0x07e7f7ed, 0xb276edec, 0xedcbf7bf, 0xad5396ec,
- 0x8ccf7f99, 0xe45a263e, 0xf98840f8, 0x09b546d0, 0xab12af28, 0xf9024fc5,
- 0x4f523ba2, 0x498b047e, 0x6a983bf6, 0xc5017e54, 0x791dd06f, 0x6f7480bf,
- 0x7e415ae9, 0x6017f47a, 0x75e5077b, 0x2417f609, 0xe8b4ff19, 0xb51bd8e3,
- 0x23f206cf, 0x04fd1084, 0x27f8a1e1, 0xa3f1fb82, 0xccfb813e, 0x27d270ca,
- 0x397dc782, 0x6650e4b3, 0xff48f23c, 0x7e503bc9, 0x7ed1d29f, 0x84bd1c38,
- 0x1fcce149, 0x266f7ed0, 0x7223efb7, 0x91f95462, 0x0fe644f6, 0x64b35ff4,
- 0x321127f3, 0x48cc5b0c, 0x51c6df7d, 0xbcdbdfd8, 0xbb3a13bf, 0xd841f48d,
- 0x7bfed0f6, 0xeee7e64f, 0xab69465c, 0x01b7976c, 0x9cfa631f, 0xd9fa4cd7,
- 0x843fe036, 0x4a257bb7, 0x4a1cdefd, 0x54ed7a77, 0x4c5bcbbe, 0x83af1ef0,
- 0x72debde0, 0x9d79f74a, 0xb792ff94, 0x7a9fe082, 0x7bb6945d, 0x15ff286b,
- 0x03e087af, 0x5f8206de, 0x44507d45, 0xa533753f, 0xa85b786f, 0x46dbd3bc,
- 0x3f5e9bf0, 0xbdde5da5, 0xdbc87f94, 0x6f11f04e, 0xbcc7c107, 0x78f7045d,
- 0xdb7d287b, 0xeff94bdb, 0xdc134f78, 0xb1938f2b, 0xfa2f5ee6, 0x84a25c2b,
- 0xaeb3ddfa, 0xd7ea336a, 0x331a65b6, 0x9736f38a, 0x39fa0566, 0x78f255b6,
- 0x86956d9f, 0x87a040fc, 0xb39f51fa, 0xf8fa7b2d, 0x405f2a34, 0x49b15be5,
- 0xabac4a30, 0x56cafeb8, 0x159ea9ea, 0x77c1020f, 0xfd2b6adb, 0xb25b805a,
- 0xbadca9c3, 0xedbb244a, 0x3a278d79, 0x245deb9b, 0xae3aadbf, 0x1074283f,
- 0x22e9fb21, 0x974a44b2, 0xa3fc7365, 0x23ef4232, 0xfc4a5919, 0xc707b9a4,
- 0xf182ab67, 0xf555be21, 0x5a5f8c46, 0x49f183ef, 0x3aae3fa4, 0xc3247804,
- 0xc07f1178, 0x6cc2fc87, 0xad97d719, 0xaf08eb55, 0x94f365e4, 0xda346778,
- 0xfa9e6de9, 0xa56f84b9, 0x36cd7e48, 0xfbe8a1d7, 0xe7ca4e50, 0xe1f1ac7c,
- 0x50fe7f4e, 0xf082327c, 0xddb8c6df, 0xd2fa0c90, 0x00e310fe, 0xc7a9478b,
- 0x198f78fd, 0x26f1fa43, 0xb2627acb, 0x3d8ff9c0, 0x6dff122c, 0xc1f7a067,
- 0x7e81cf77, 0x503943e7, 0xfa156fba, 0x72cd1eab, 0x3c02cc4d, 0x3f8d247e,
- 0x531c0224, 0xaf4fdbce, 0x1ea0f717, 0xae7deb0d, 0x3e37f209, 0xb689d9b8,
- 0x6966ec71, 0xdb5bb22d, 0xd01357b2, 0xa82c95ff, 0xcb4947d7, 0x3bb63c65,
- 0xd5cf7b78, 0xb64fe48b, 0x76903aae, 0x865bf784, 0xfa8e26fd, 0x61ba187d,
- 0x1a481632, 0x55c91914, 0x2fec45e2, 0xe5cda1ec, 0x66fc5fd8, 0x215fd287,
- 0xe775f7f6, 0x65e3e469, 0xbc085756, 0x37279386, 0xb38b2393, 0x8e48e322,
- 0x9424b124, 0xb8ebcf23, 0xe49df2dc, 0x861d24fa, 0x1fb43499, 0xc4ae7e25,
- 0xe8e383d3, 0x243d3235, 0x61c74219, 0xfde80d57, 0xddb7aa3e, 0xe749c9b1,
- 0x0b3e304b, 0x6c75ab45, 0xeaf3ae05, 0xdaf37092, 0xa4a0f410, 0x018e505b,
- 0x468bdfdf, 0xb9b3b523, 0xe1a3b79c, 0x56cadced, 0x32625c11, 0x6bc842fb,
- 0x6e5a7d25, 0x4e2c81e7, 0xbe0f49b7, 0xae18f1b4, 0x3a3adbef, 0xc69cf413,
- 0x04ed11bf, 0xc92d09df, 0xfb6d44d7, 0x78013d8e, 0x79e3cb35, 0xb5cfef4e,
- 0x7b40ff23, 0x915728f9, 0x7fc9f25f, 0xcc0566c1, 0x5addf2bb, 0xa3fd8fd9,
- 0x29a21b6b, 0x2eb55cbe, 0x36a3f50c, 0xb07a2466, 0x85d13cea, 0x4560f401,
- 0x1f410cba, 0x4af4b2fb, 0x3254d53c, 0x7c32e311, 0x728a9e5f, 0x583ef009,
- 0xe1fb451d, 0xdeef65d4, 0x5be01639, 0xd47688ae, 0xe5ab9ecb, 0xfd2713e7,
- 0x26e74351, 0xdb9c7cde, 0xb7fdb77d, 0x7bf41eb2, 0xb3e9c827, 0x7a41e768,
- 0x77b3334b, 0x52dcbac1, 0xcdff226d, 0xb3c557ad, 0xb91a7681, 0x7da3c7a4,
- 0x5335fdc1, 0x803ca1fa, 0xa67d26fe, 0xaa330fae, 0xa1233d45, 0xab6f9d4f,
- 0x1a66703a, 0xcb56d3f4, 0xf6bb6e3f, 0x7d8471fe, 0x93f21af5, 0xf429f500,
- 0x4dfe4093, 0xe78097d1, 0x191a3716, 0x32eb73c9, 0x41e519a3, 0xe49c25b7,
- 0x1362db5f, 0x321ba1e9, 0x9ea18757, 0x8faaa86c, 0xdea474c6, 0xa77d03b7,
- 0x7dffbbdd, 0xe4812d9e, 0x817f3f2d, 0xfa0c5984, 0xae7abdc0, 0xb6faf8e4,
- 0x7944cb4c, 0xc3f2a1a9, 0x8b7dffbb, 0xac077d0f, 0x3490f085, 0x7e0f912c,
- 0xe29bbe00, 0x2fdf02d4, 0x444e6740, 0xabae74f8, 0x4243e507, 0x8b1baf9c,
- 0x56ce9ab6, 0xcae2f9d2, 0x5d33e238, 0x31e987b5, 0x18c8b2bf, 0x9f0ae7c8,
- 0xfffcb5fc, 0xbbe71cb4, 0x59b81c99, 0xf7e337bb, 0xbe73a59f, 0x369ece19,
- 0x154f230c, 0x9acf18fb, 0x4b6c8a63, 0x7d59eafb, 0xf6eafd48, 0xc567ce6c,
- 0x643e7e7a, 0x72f4c54e, 0xfaf8c38a, 0x96c73c44, 0xa9f9cd93, 0x714912cd,
- 0xd674b6e2, 0xdf037185, 0xf7f70d72, 0xdafff1c4, 0x3fc9d32a, 0xa2782ad4,
- 0x3e78197d, 0xeb8695c2, 0x147306d7, 0xa6b7fd09, 0xfcceb972, 0xabc6331e,
- 0x409a9da1, 0xf3dec1fc, 0x9198e9a9, 0x0bfe4c3e, 0xa49ef12d, 0xa3b7f00f,
- 0x17fd1c5f, 0xdadddfc1, 0xbf6f86af, 0x984b86aa, 0x354f054e, 0x6669e1aa,
- 0xe6c96c35, 0x361dfa7a, 0x490cfe75, 0x1ca7270d, 0x73f0d309, 0x7f01a6ed,
- 0xbbf044b3, 0xa07e0199, 0x01769a0d, 0xafa525e7, 0xadb2fc96, 0x93dfdca9,
- 0x73f6af5c, 0xb596b224, 0x6df90231, 0xfcb6f954, 0x73fa0255, 0x7cead56c,
- 0x8c150798, 0xbff48acb, 0x9f95d724, 0x02e9718d, 0x4b3aee7e, 0xc5cde9eb,
- 0x238b6875, 0xcf934a3e, 0x1fdd0d05, 0x95b8d0b7, 0x7c0f9076, 0xfa45e301,
- 0xe62f840f, 0xc76726ba, 0x05f03e73, 0x7df997ee, 0xa503e43a, 0x41f8e1bf,
- 0x3962c7e1, 0xab9ca26e, 0xa5b722b9, 0x97a9ab9c, 0x0475c7cf, 0x92b9c79d,
- 0x81e7465c, 0x09aef4ae, 0xc877c50f, 0xce6cbe67, 0x946e9877, 0xdf20d7cf,
- 0xef9cf4f9, 0xd5f0df30, 0xfd28672a, 0xafbcbcf0, 0xcc37b43c, 0xf8c071fc,
- 0xab47b656, 0x82477760, 0xd3bcf039, 0x740ff843, 0x3a3337ba, 0x0f2b5a47,
- 0xbf338be7, 0xe908d3db, 0x0b975a5b, 0xd66567d7, 0x338f3d59, 0xfe51ebf5,
- 0xdff7979e, 0x38dd820d, 0xdb94c05a, 0xbb668da5, 0xccbdfd11, 0x57f2a19f,
- 0xd5877e28, 0x31acd22b, 0x0188ff80, 0x4cd3823f, 0x33e34de7, 0x35118fc9,
- 0x2b3771d5, 0xccf73c04, 0xa2046f98, 0x554e85dc, 0x658fc790, 0x97d0fd97,
- 0x035e383c, 0xce02abfa, 0x0f441923, 0x3573b923, 0xe7014995, 0xff655bca,
- 0x9b1f00c5, 0x7f609796, 0x7896cc3f, 0xd679863c, 0xda9fff72, 0x6ab8665f,
- 0xd833b553, 0xdfaab27e, 0x722fd956, 0xfed5f6f3, 0xf4d5c337, 0x0ecfda61,
- 0xb21cdf6a, 0xd576c1fd, 0x11afbc7e, 0x3e436f1f, 0xb5ffce18, 0x1ba7965f,
- 0xf45e9e6c, 0xd818e97f, 0x06071495, 0x6a8898fa, 0xc369ff23, 0x5d5379f9,
- 0xa60fdab2, 0x1f927e63, 0x90d647cf, 0x3e6048fc, 0x9859df8a, 0x47d7f287,
- 0xbbf1e74d, 0x1371b960, 0x8798c179, 0xfd00dea1, 0x3f57f2a9, 0x46d3f7c2,
- 0x3c42eec1, 0xfff3c783, 0x89abdd8a, 0xfa4bdf1c, 0x64b84a73, 0xdbde81b8,
- 0xe32575ea, 0x3dc21bf7, 0xfeeb1835, 0x762ebb3c, 0x1b3de83d, 0x7c08c630,
- 0x71bb3576, 0xfb84cfbe, 0xf58fa724, 0x27a21a71, 0x1d96ddcb, 0xb27a00da,
- 0xa8695de8, 0xf2345af7, 0xa1fc2f39, 0x65bd6cbf, 0xfd10a6e7, 0x029bfa2b,
- 0x8f002b5b, 0x956dba00, 0xfe81e6f5, 0xd0ee2496, 0x3163b406, 0x3fa90ab8,
- 0xd2a35ce5, 0xeb37d35f, 0xf7ecab6f, 0xdeb3fa52, 0x559f1ea3, 0x59f1a1ef,
- 0xe947fde5, 0x2f8e2b6f, 0xb4dd5f04, 0x1e8ed93f, 0x7aa96fe8, 0x8783567c,
- 0x72d567c6, 0x7f40e1ff, 0xbfacdf4d, 0x931ef936, 0xe1d3e64d, 0x0e37fdce,
- 0x1a76ab9c, 0x29b4ae51, 0x7f7940fe, 0xbfb414bb, 0x37f796ad, 0x714bfbe5,
- 0xbddfe4e9, 0x18df33d4, 0xfdc35f6a, 0x247ac98c, 0x2648f593, 0xcf0891eb,
- 0x3ea67a99, 0xf0b0e81e, 0x22f9b6c5, 0x1e73e3ec, 0x153e333a, 0xf62f19e0,
- 0x5f6117cf, 0x99e2f39a, 0xc9f04903, 0xccbe2fcb, 0xe6787fcb, 0x9b94324a,
- 0x2fcdef7c, 0x962a3803, 0xde0ac7ef, 0x8db7bf4a, 0xbba37ae2, 0xbfee73e1,
- 0xfb8af904, 0x87aa6356, 0x42f17fa1, 0x32d47f72, 0xe94eefa8, 0x310f2cb7,
- 0x6eea4a2f, 0x467e4f38, 0xff287914, 0x40bfe1e6, 0xee53cfb4, 0xefab9467,
- 0x6d454467, 0x8c2ffd22, 0xef28cdf9, 0xba0e8c22, 0x28d6f84f, 0x642c1fbe,
- 0x376e0af6, 0x39f25948, 0x31cfba0d, 0xd76e58bf, 0xb1cfcb7c, 0xb1c6a17c,
- 0xdfd9ed1e, 0x478aaa4e, 0x173958f5, 0x7f783797, 0x1bf5d772, 0xbe0e1f9c,
- 0x23e7393f, 0x23c2eb81, 0x3ba5bfb8, 0xb9f7e257, 0xa886fc02, 0x9f370ef7,
- 0x7db8a3b5, 0x0e9f383f, 0x0e319f3e, 0xfee69f3e, 0xfeb0b5a8, 0xcb7ee8ee,
- 0x5d9fc413, 0xc86be721, 0x5fcd06bc, 0xd8a628fd, 0xb7a45a79, 0x32b7cd9b,
- 0xc1f9caaf, 0xa2ca4146, 0x794feafd, 0x3183fd2a, 0x8e2a14bb, 0xb819f483,
- 0x4e6deabf, 0x5d19fe86, 0xcf073d9c, 0x45fe3795, 0xde5cf21b, 0xd63e796e,
- 0xb9d38546, 0x3d28c6ce, 0x51dddbd9, 0x97fa1933, 0x9a74d345, 0x72a8b297,
- 0x8da96f3a, 0x8f68ed65, 0xd7239f06, 0xce9bb88e, 0x68ad6901, 0x703a4041,
- 0xa461d92e, 0xe8b989f8, 0xc5c8e786, 0x8f239d9b, 0x91cfc999, 0x7425e947,
- 0xea45b459, 0x7f239e43, 0xf7df146c, 0x713c71bb, 0x278c3f88, 0xa21c706d,
- 0x31d1c799, 0xf5f2023f, 0x7f231766, 0xc7de3cba, 0xea5075e4, 0x861ecc1b,
- 0xb7ab40f2, 0x68a7a84b, 0x613ef9c3, 0x4e5869b0, 0xf7c8bf08, 0xfbe8e947,
- 0x34cd724f, 0xd7231aaf, 0x5afce7cb, 0x7a4fe908, 0x4fe29475, 0xc3674d0a,
- 0x0d3cc9d1, 0xf0a7473f, 0x9e9302e1, 0x302df7ce, 0x0fd14ad9, 0xef0800b2,
- 0xfa8098b3, 0xf7e22db7, 0x0dbb4581, 0x5337e7e9, 0x6ff9ff6e, 0xb9f9ba18,
- 0x43353e7c, 0x4ee3f747, 0x8f61cc09, 0xd57e7a19, 0x869e8ce9, 0x66bb9a6e,
- 0xec87599d, 0xeff2e2f7, 0x867b6115, 0x167553e3, 0x779fec89, 0xa85ca146,
- 0x0f1f5b4e, 0x42caa2f0, 0x94dfdd1d, 0x08ee44e3, 0xd19fc72e, 0xa28e4eba,
- 0x51e7d178, 0x624748a5, 0xa3e22a7f, 0xea0289c4, 0x9e2a37bd, 0xd7e1cdea,
- 0x18dea0c5, 0x7f67d394, 0x1eb462ef, 0x127a50b6, 0x302c4fda, 0x07cb236e,
- 0xf302a3b4, 0x01627ed1, 0x2f2c27eb, 0x9085f6b6, 0x609a4a17, 0x0d5292db,
- 0xd0909e7c, 0xff92a942, 0xee5b43ac, 0x57aeb10f, 0xc6fd75e5, 0x601b4c76,
- 0x8ed0713b, 0x2ae5fb01, 0xd517dbce, 0x61e32ca7, 0x17657548, 0xe9b18ec9,
- 0x88933e15, 0x1e6106bf, 0x45fb04af, 0xd5b0de75, 0xfba14beb, 0x6d38f2fe,
- 0xa346d62b, 0x1c55031d, 0xbcdb9f48, 0x9965296d, 0xcec8e383, 0x8d13e991,
- 0x17a889b6, 0x5c725ff3, 0x97f40ccb, 0x74349b24, 0x5326013e, 0x6e612f75,
- 0x38b4f459, 0xccdd4534, 0x2ed0926e, 0x3f3d25ef, 0xf896bae1, 0x62f0e120,
- 0xa4ab7186, 0xef0176b8, 0x23f40d03, 0x97ae6fdf, 0x5c57b9e3, 0x048e3465,
- 0xcefec0eb, 0xf38d0ec8, 0x747945cd, 0x518e69e5, 0xe5d58fc2, 0xdcfc71c9,
- 0xe1d5fdc2, 0x3ae8097e, 0x4057b6a9, 0x380c0138, 0x5c27c5f4, 0x426b4c6f,
- 0x045af279, 0x6db58bf9, 0xf207cc99, 0x45745527, 0xfb3ce0e5, 0xc9a6ffa5,
- 0xdf0b0790, 0x9c207cad, 0xa136b3f4, 0x25d90665, 0x285869f7, 0x1c98cf3f,
- 0xc828c7ee, 0x609ffce1, 0x1ea94d6b, 0x44d3e987, 0xcf975f37, 0x1db39067,
- 0x9f11d9f4, 0x1c58fa2d, 0x759a7bd4, 0x9b73e9a1, 0x7ec0efdc, 0x185fcf21,
- 0x7f911973, 0x3b9382b5, 0xdbeef739, 0xce82cfb7, 0x55e74613, 0x8d7ac936,
- 0x4e444ed2, 0xd846153b, 0x45cf0c1d, 0xdf07660a, 0x9b3ecc07, 0xf9abd60f,
- 0x7b486d2b, 0x791cc1eb, 0xb26b491f, 0x4cb2fd61, 0x94fd0dbb, 0x1a55f7c1,
- 0x70da26d3, 0x0c671a7d, 0x7031a9d0, 0xcb4ee85f, 0x53cf1f7e, 0x2c747a99,
- 0x70591cf0, 0x2173c7aa, 0x972837b1, 0x1d3797cf, 0x9b42e7e2, 0x78bd454c,
- 0x14c9a10e, 0xfec74457, 0xa99cfc4c, 0x3a64df52, 0x64def246, 0x1356f1e7,
- 0xa2020fee, 0x3ba1e7c7, 0x83cd18ed, 0x0e5a6f52, 0x7b26ab97, 0x5bfbc317,
- 0xd53f76e1, 0x3e9d130b, 0x161fe00d, 0x4ffee8c7, 0x55f39198, 0x318fac04,
- 0x7e95ef80, 0xd1d1cb6f, 0x172535bc, 0x7e1096f5, 0x6704de22, 0x1b22b64d,
- 0xdf38dbed, 0xb4636fd0, 0xaf9cdf42, 0x5be7971f, 0x3e51a769, 0x434f7aff,
- 0x58b2fcce, 0xd73913f4, 0xa6bb60e4, 0x83b446e5, 0x6fe391b4, 0x6186ded8,
- 0x285e6bdb, 0xf238f7b7, 0xc8b23685, 0x50e9ed8c, 0xc3a0845e, 0xfe76e467,
- 0x30ede357, 0x97df74b3, 0x9f305a60, 0xa3de942d, 0xa38c46a4, 0xddfa1b8f,
- 0xebbf9637, 0x90247ed4, 0xfc717bbf, 0x1ecf932b, 0xee1ca12b, 0xdb2eda3f,
- 0x619d8a27, 0x2ededb91, 0x3ee87bf0, 0xef4edc21, 0x8776c836, 0x0f3d4c25,
- 0xf941f783, 0xe0ae5c3a, 0xc72ee6e7, 0x046b8a31, 0x77e5f3f3, 0xb40e8a3e,
- 0x73ef504f, 0x04bb7364, 0x1c4bb453, 0x610d9fb2, 0x9da01bf4, 0x80fdf0a2,
- 0xfa42b75f, 0x3f005ba7, 0x0d6fc803, 0x7b242bb4, 0xc90dbe88, 0xe7c8697d,
- 0x053bf2a9, 0x5cd7f643, 0x4785a72e, 0xfe434b3f, 0x4feb8b2e, 0xf982efc0,
- 0xae4785fc, 0x5e78e026, 0xce703198, 0xab43e286, 0x7bf3a717, 0x1cffc7d9,
- 0xfed10aed, 0x457feecd, 0x86b4dd9d, 0x5ce5c0f4, 0xdf5bfc0f, 0x8cbedb07,
- 0x821d9deb, 0xcda4efda, 0x5fe77ae5, 0x7acc7de4, 0x2df7a455, 0x2eb965ce,
- 0x0ab61f20, 0x7f28e5e6, 0xe7e11f53, 0x5e3f56e6, 0x2103d9ff, 0xed1841f8,
- 0x804e73fa, 0xacdc5c3d, 0xa35a3f27, 0x8dc47d29, 0xafc31ce2, 0x71cb87b3,
- 0x1e4b7d79, 0x24f3f235, 0x5b17ae2e, 0x39d3f75c, 0x23f3e095, 0x243bd5c3,
- 0xbea53dcb, 0x7947db01, 0xfe479a62, 0x23684b80, 0x964dbfa0, 0x7678c17b,
- 0xf3c864e2, 0x12b90463, 0x18456875, 0xed93abb6, 0x3f8aedb1, 0xe8c8a2a3,
- 0xb75ef808, 0x1a58c8b8, 0x8c8d2f40, 0x6e0ddf9f, 0x01dcceff, 0xafa55a7d,
- 0x459f70e8, 0xf89c50df, 0x433901a7, 0xcab8658a, 0x0ffa518e, 0x70163f3e,
- 0xf38c2c5e, 0x5a67b729, 0xe818dcf1, 0x962b6bb7, 0x6df9c48f, 0x582f5d15,
- 0x3ffa1e61, 0xe739d030, 0xecbe650f, 0xb872f993, 0x43b1f3f5, 0xf6318ca0,
- 0x97823b04, 0xc64e80d6, 0x68fe408f, 0xfb94d4cc, 0x5cb38c43, 0xfd08671e,
- 0xafdcd6fa, 0xe6975fa7, 0x51afd086, 0xc504767f, 0xd461021f, 0xf4211d9b,
- 0xec8213eb, 0xedcf900f, 0x1d49d09c, 0x270f307d, 0x6f611d6e, 0x919ee894,
- 0xf6045ec3, 0x32678bc3, 0x239f6f51, 0x23c7bc0c, 0x608c31db, 0x221623b0,
- 0x6799735e, 0xc50203df, 0x48ee5a51, 0x5c04eea3, 0xa7ad9b8f, 0x3711fb24,
- 0xdc613ac2, 0xb2daf5a1, 0x0ce2a119, 0x274038ff, 0xef02ba28, 0x042ac007,
- 0xc48695c4, 0x63e282cb, 0x38194e18, 0x5a1dabff, 0x7562aeff, 0x35e36afc,
- 0xac30bfee, 0x0427fe60, 0x2eaa1f3c, 0x0bae5f9f, 0xe2a9cf8e, 0xba9d0b5a,
- 0x887c7c60, 0x6f8cb26f, 0x2907b6ca, 0x452d3e0e, 0x64acfde4, 0xc707cf64,
- 0xbd850cbd, 0xc3ea2d0e, 0xf7f48c15, 0x9e363f60, 0x5eff2517, 0x09cf6305,
- 0x7d5ade74, 0x59e9e4df, 0x1cfbf8e7, 0x8fd8e52d, 0x50c3532d, 0xafbe0e3f,
- 0xf09bc4a0, 0x7de04c98, 0x22dde569, 0xe10d62cb, 0x53b77cc7, 0x8797e2b9,
- 0x4b12a6fa, 0x55eeb9c0, 0xdd3ea06c, 0x1e85f2a3, 0x59d6d6f7, 0x1c4fef14,
- 0x82ec29ef, 0xa33b24a2, 0x66aa4879, 0x3cc00764, 0x86fae2a8, 0xce345d94,
- 0xc58c354b, 0xaaa038f2, 0x7076013c, 0xbf184fa4, 0xe30fce2a, 0x76c1c154,
- 0x4deccde7, 0x0de64a43, 0xfccdf9e5, 0x5e39068d, 0x646e46f8, 0x633be91c,
- 0x77d418d8, 0xa69f8b2c, 0xeb04b8f5, 0x34597e81, 0x03c54284, 0x68fb4337,
- 0x5f90ba07, 0xc4dfd1e2, 0x30caef54, 0xc9f74887, 0xb38e6284, 0xf1e4eb10,
- 0xfc467ce1, 0x9507086d, 0x9859f80b, 0xbce0706c, 0xc3000c1b, 0x333d0590,
- 0x56be4026, 0x15af37f6, 0x33308898, 0x4fdc8ddb, 0xe36bbca8, 0x866d77a7,
- 0xff7407eb, 0xcfec30be, 0xfa53bfc7, 0xdb8ef219, 0xfb27be0d, 0x7dfc1ccd,
- 0x951df919, 0xff9b87fe, 0xfb6ddee1, 0x2777d4c3, 0x3267bf70, 0x20e56bd1,
- 0x4cbc12bd, 0x1efc47a6, 0x4abdc2e5, 0x0c77adb1, 0xfb3c37de, 0xfd1d8470,
- 0xf0e303e0, 0xefe5a9dd, 0xed75fdd1, 0x9361e00d, 0x8c0fbe48, 0xafef6e4b,
- 0x02fd171d, 0xbc06dae4, 0x4fe78434, 0x279d192a, 0xcf4ab8b0, 0x000e7489,
- 0x1392a0c6, 0x4013ef01, 0xf10e1b7b, 0xf15baf73, 0xb0710b9d, 0xd02bd45f,
- 0x4255b06e, 0x483b087e, 0xbf19ab49, 0xd5e0ac1f, 0x1a3d07a8, 0xf5c352e9,
- 0x3ea6d9e8, 0xe935cf11, 0xe445fae2, 0x2e68e71f, 0x37e807c7, 0x0bf9d19e,
- 0xce7c926d, 0xdfc4a471, 0xfbe4cb4d, 0x84cfd29b, 0xfe591ad1, 0x47fbd2f7,
- 0xd54e23b3, 0xf2c6764b, 0x442fc5f6, 0x11ecfcff, 0x00d3eaa7, 0xe167d82e,
- 0x2ba9d270, 0xecc66669, 0xd10bacfb, 0xef3b3f9f, 0x807495d4, 0x78d9f68b,
- 0x464614f9, 0x097df8ce, 0x319fd10b, 0xd559b461, 0xd7385bfe, 0xd53cdff1,
- 0xcf21341a, 0x5a36df63, 0xe8c94f39, 0xf3b3ad3e, 0x2e31a797, 0x47f654e9,
- 0xdf482e87, 0xde0b2281, 0x717f9323, 0x9d0f1451, 0x9dd9d329, 0x1452eef8,
- 0x68bb2dfa, 0xfa0d477e, 0x6bfe9fe9, 0x4382f4fc, 0xccca9779, 0x1bff812a,
- 0x2c2b981b, 0x99ee8023, 0x9f9d7343, 0x81b8a603, 0xffd28f82, 0x6960bc10,
- 0xbc4098c5, 0x0a3b51b6, 0x0d0a175e, 0xc70587a4, 0xfd082e87, 0x549cf1db,
- 0x2fd6cfc4, 0x10ba75c2, 0x002a9fd9, 0x9eb79e1c, 0x7c42edd6, 0x1ec176ff,
- 0x3af8eb00, 0xa22bef56, 0xd6a3fc75, 0x027a3d51, 0xdfc5dbbe, 0xe396686f,
- 0xbbc64f50, 0x019f687c, 0x32b9d1dd, 0xfc9e8a5f, 0x05e36548, 0xb03fa047,
- 0xcbf7346e, 0x69d3925c, 0x49cf1686, 0x01f74ba0, 0xfa85ad81, 0xf5976bee,
- 0xeecc9fa8, 0x3d47a33c, 0x19c0eeba, 0xad4bde3d, 0xa9da1843, 0x8477a6db,
- 0x32feffd2, 0x97f2ab5a, 0xd1d77629, 0xe8f5ec94, 0x16abab77, 0xeade1ee8,
- 0x6e1fb9d7, 0x251c32f5, 0xb2df7763, 0x55b8fc9f, 0x98f5eece, 0x477d652e,
- 0xc588ddf1, 0xdceaad2e, 0x089a92c0, 0xcabe3c55, 0x9782ab13, 0xc0eac0b6,
- 0x21b4bb3e, 0xc31df549, 0xef82cf5d, 0xd0fe0e28, 0xd790affe, 0x736db171,
- 0x16bbfa2f, 0xcc7ad4bb, 0x62b5ceec, 0x11ede20c, 0xfece1aec, 0x84b77e12,
- 0x00d2108f, 0x7de60abd, 0xa3d7075a, 0x648c2aea, 0x0c22549f, 0xab595af3,
- 0x7393e3f5, 0x54f45347, 0xa810eeee, 0xbb972ddc, 0x576e046b, 0xa975f2cb,
- 0x8392e38c, 0x75d1a727, 0x37a39f52, 0xa5df0033, 0x88e7e515, 0x437d651e,
- 0xc4855719, 0x290d8df5, 0xddb0e9ef, 0x5817dfa5, 0x4b8d1a6c, 0xa92d47a5,
- 0x1e55c351, 0xd59e905d, 0xbdf943a7, 0xff9d41b4, 0xbef2d1f3, 0x90f2c79e,
- 0x2d18de9f, 0x07189f88, 0xbca53fe1, 0x81ef0277, 0x0f3ad25d, 0xead67d22,
- 0xdf700abb, 0x1175a4bb, 0x7e4394ac, 0xb8afdf1d, 0xce01a2be, 0x751befc3,
- 0xa7de330e, 0x8814adf5, 0x00b0976e, 0xb06d27bd, 0xa1dac13e, 0x7722bf70,
- 0xe4f59cc3, 0x98283d0f, 0x13286f51, 0x83de221d, 0x8fc61c6f, 0x83e6517e,
- 0x1f2fac13, 0x2246caf1, 0x9eca7585, 0x99f6823f, 0xf996b7ec, 0x3d948e30,
- 0xc4bf204e, 0x4371809d, 0xc922a611, 0x37b03991, 0xad9a4184, 0xf920af6d,
- 0xe66f563e, 0x58da62ef, 0xfc70bca0, 0xbe701333, 0x0bb27510, 0xfde87b93,
- 0x9b6d39bd, 0xcc5ca22f, 0x3d1470cc, 0x193df805, 0xf27ffbc6, 0x16bfc175,
- 0x3fcba9da, 0x0ad3f246, 0xd6fd81e4, 0xda3964d9, 0x5acff6a1, 0xc121ae51,
- 0x7ea7cacc, 0x32f89413, 0xc3ed15d0, 0x9fa4a55d, 0x9f7ee558, 0xddb8942d,
- 0x2e7ca0fc, 0xe2d6f98d, 0xd1befaf9, 0xf495f28c, 0xcaabfb1b, 0x5e283104,
- 0xe61a3750, 0x4fada579, 0x5be9de75, 0xe901bf6c, 0x187db82c, 0x395e2abe,
- 0x8a2d7b4d, 0xcc1d77ee, 0xfee8deb0, 0x3ea8d694, 0x8733a4b7, 0x53bbdd07,
- 0xf6d99fcb, 0x777ba0a4, 0xeef74119, 0xddee82f2, 0x2df9efa9, 0xba094e74,
- 0xe7bea777, 0x54cdb067, 0xe6930fd5, 0x9c88e0a9, 0x23f555bb, 0xe7d5cbed,
- 0xbcff8eae, 0x77c74842, 0x2e7d7d06, 0xe0668798, 0xc198e28d, 0x0a2f247d,
- 0x3e1a432a, 0x6984cc2e, 0x0ba1a173, 0x9139a4e9, 0xe386e7db, 0xa0badb0f,
- 0x9788e31d, 0xfc6fe88c, 0x452fe089, 0x9f28c4fb, 0x18fd1953, 0xaf33ed53,
- 0x52be7015, 0x5cf2dbd2, 0x7d61f3b3, 0x494af7f2, 0xdee01dfc, 0x463eb8d1,
- 0xf9036c7c, 0xaf7e55e6, 0x476cac78, 0x5f3ced28, 0x0f353258, 0xcc7fcfbe,
- 0xece28331, 0xf07efc1d, 0xa78f3d4e, 0xe7e55ef8, 0xfa3bf35b, 0xfe008e34,
- 0xf5c7993e, 0xa59c6790, 0xeba91f14, 0xf9165d84, 0x1eaceda9, 0x75d465ef,
- 0x7ee14776, 0xab3ce056, 0x60c9bf47, 0x89cfbcbe, 0xaf68e1fd, 0x743dacff,
- 0x9e6d6d3c, 0x98e3f89e, 0x1eabd72c, 0x91bb72d6, 0xb9e16639, 0x575e4571,
- 0xabe3e7ed, 0xe7e7a85a, 0x1d1a3160, 0x9c836e42, 0xc396ab3e, 0x707ef50e,
- 0xca3d7f8b, 0xaadf900e, 0x4c174b7a, 0x12b7bb27, 0x31f236e4, 0x797573c8,
- 0x17c91372, 0x7caae790, 0x3c80de77, 0xdf0ebfcf, 0x426a769d, 0x8170f38e,
- 0xda78f348, 0xffe69535, 0xf4479819, 0x64333da7, 0xde7cf8de, 0xc65ebe6c,
- 0xb7580fb5, 0x2ebf7ae0, 0xf2f6ebe0, 0x7dfaf230, 0x0a775d79, 0x4c2845bd,
- 0xa270cedd, 0x634625db, 0x387b9e09, 0x1365f296, 0x7eaae1c6, 0xfd3dd02e,
- 0x6e3eeb7f, 0xbf5ffd41, 0xfd05a8ff, 0xa3f6ee61, 0x594db016, 0x8b5a4ead,
- 0x85d03c7a, 0xba9ef51d, 0x7fc22e8e, 0x6eb8fb65, 0x21d95fcc, 0xee3dfbd0,
- 0xd3a71703, 0x135f5938, 0x0e770bc6, 0xf28fd783, 0x37befeb8, 0x307ee24f,
- 0xfb89f417, 0xdf8301fd, 0x801d202f, 0x5ebc425c, 0x35e5f017, 0xb1a373b6,
- 0xf51e9422, 0x1f7cf26f, 0xe867a8d9, 0xef85afaf, 0x52c71c71, 0x488de98e,
- 0xb205fb47, 0x27edddb5, 0xfb604617, 0xfa863af5, 0x17df8285, 0xe21fbd29,
- 0xd6a83ff8, 0x3580a7a1, 0xbe696b56, 0x52d7a83f, 0x74fbda3b, 0xc15e2f04,
- 0xae1c55cf, 0x0929a8fe, 0x56a0fdda, 0x530c7ba0, 0x31f3fbef, 0xe0af1784,
- 0x3d305afa, 0x4c1af42b, 0x3bf4c19f, 0xbe6b4e84, 0xc91e9d19, 0xe4d5ef93,
- 0xe7dc02b1, 0x59a7dd61, 0xe11e7e91, 0xae2be3f7, 0x7fa1d61d, 0x6cc9bd33,
- 0x31e0027f, 0x0b7aff5a, 0xbcc389ca, 0x8776b0bc, 0x748de5c0, 0x5261f237,
- 0xbda19eb3, 0x971e7efb, 0x7ae3ef4b, 0xfad0e3c5, 0x31af8c05, 0xe51d37b1,
- 0x4f1c9a52, 0x2ac7457a, 0x905c53f6, 0xaf319bcb, 0x96ef54d9, 0xbd71e537,
- 0x40839143, 0xe4fdaee6, 0xc44f6d97, 0xcb2f147b, 0xe55341e7, 0xfa32b2f6,
- 0xf485cfa7, 0x672f4caa, 0xf8143c64, 0x6cff4cb2, 0x1f68cfdb, 0x59c72d9f,
- 0xd1743666, 0x833ccc79, 0x879453eb, 0xabdf52b3, 0x39ae7dc5, 0x3a7cedfd,
- 0x25378d67, 0xc9fbe226, 0xd27877fc, 0x60d6dba7, 0x9c372fdc, 0xe0ec31d6,
- 0x381e7453, 0x35bc6b2c, 0x49439c54, 0x7a2557dd, 0x6aeeb08f, 0x3c0aa53a,
- 0x7d39d0f7, 0x1701d91f, 0x5c9f13a7, 0x2ddea9b5, 0x7bc654fc, 0x5d3ef28c,
- 0x80f3f727, 0x8b8b169c, 0xf329eb06, 0x1c659e80, 0x2ed6586f, 0xab35af5a,
- 0x75a92158, 0xcefe29ec, 0x0149ed66, 0xa458bddf, 0x304ce873, 0x608defdf,
- 0xfc8b1f25, 0x3657241d, 0x748c4dc6, 0xd784fb2f, 0xd7af704c, 0xaf29f046,
- 0x61ade944, 0x687c88ce, 0x4f11e167, 0xe1dbce16, 0xde275694, 0x8ef0bc57,
- 0x336ba64e, 0xd32e2fbf, 0x2d4ee5da, 0xeb9c6233, 0xc2e3e800, 0x5cf7fe20,
- 0xddfba44d, 0xa0fec3bf, 0xbf0d959f, 0xaf677921, 0xed1d38b9, 0x33dac2a2,
- 0x1bbb084d, 0xf51d3cfe, 0xff610f89, 0xf41ebd20, 0x72809b7b, 0xd3bd24e4,
- 0x8635795d, 0xd35979da, 0xcbf91d3f, 0xf740a6f7, 0xb8ec3e6f, 0xfdf2b7a8,
- 0x7873dc89, 0xf2fd1c2f, 0xb9a3f2b3, 0xd42ce91c, 0x243afad0, 0xddeb3bd5,
- 0x0a2bffa3, 0x20d8c3d7, 0x7e0a7afd, 0xc865a3df, 0xfb4a9bbf, 0xe421cca3,
- 0xdff469ef, 0xb071f365, 0x15e5a1ec, 0x9e165f7a, 0xe990ac93, 0x3cbcb838,
- 0xa13fc07f, 0xebefca73, 0xa1fe0108, 0x3fdfc62a, 0x2ba7bb05, 0xaab8f81c,
- 0x67f92a73, 0x0dc23caa, 0xd1c333fd, 0xeb4dd7e1, 0x28cff718, 0x818dda30,
- 0x1bb44d7e, 0x1f7faa99, 0x252b077f, 0x74f786f3, 0x14897ddc, 0x666bafcf,
- 0x795aef14, 0xe7a53475, 0x2120df5f, 0xfbc318ff, 0x6b7ee95b, 0x8cec86b6,
- 0xfeb53e95, 0xcb80a533, 0x7c052985, 0x60f95cde, 0x01b0e52f, 0xa2ec097b,
- 0x5db9f37b, 0xa9887514, 0x7ce0a5ec, 0xf28c97b2, 0x13101d3d, 0xe04257ae,
- 0x6941477d, 0xca977e0d, 0xd9ce885a, 0x72fefc3b, 0xe4d738f0, 0xf8553e73,
- 0x35f2317a, 0xe7076fd2, 0xe776e0eb, 0x41ac9415, 0x4c38da3e, 0xe65cab7c,
- 0x8c6d95fb, 0x16ddcbe6, 0x05f4f343, 0x0296736b, 0x78017e5d, 0x0bd23de0,
- 0x3444674e, 0xe5c17f7c, 0x3df3c08c, 0x6ba81c8a, 0x36efa70b, 0xc58bce66,
- 0x48cda373, 0xf9ded0f7, 0x9c46a98e, 0x38da7be7, 0x7dfd29ff, 0x1638b5d8,
- 0xae8269f9, 0xf2876f5f, 0x5133cee4, 0x784df7a8, 0xf22c7e7d, 0xfe7870e1,
- 0xcf1918fd, 0x1e12c486, 0x70bd8797, 0x004f6ca2, 0xba27ee7e, 0x87fa9de8,
- 0x6fd1181a, 0x56ffabf7, 0xbe7c7c87, 0xfb8cc233, 0x230717e6, 0x0bcabde8,
- 0x3de81a6f, 0x7df95a9a, 0x7d742fd5, 0x7cbf4873, 0x22f74b5d, 0x47fe6fdd,
- 0xfba3f22b, 0x3b9d0355, 0xc373a87c, 0x6af7dc45, 0xc39c673b, 0x5f6b5677,
- 0xc3f2dbfa, 0xd25486ca, 0x3ff8148b, 0xbe2746bc, 0xe45e7bf3, 0x8ea1e272,
- 0xf77d217f, 0x0b976ba1, 0x17ca7cf0, 0x44c7ff07, 0xb7ee6b94, 0x79444f2e,
- 0xe5c5fee6, 0x7eee5889, 0x4a13c22d, 0xc05eb95f, 0x7eca3efd, 0xa8aef699,
- 0x90ad0ff2, 0x5d6d23e9, 0x3d07bd0b, 0x7b02e4e1, 0x130fbbe0, 0xac4e34b2,
- 0x65b7e13f, 0x7c05f7b1, 0xa457aeb7, 0x93f8e338, 0x01d51165, 0x814f7974,
- 0x7e2ff14e, 0x930ebf23, 0xf019c7df, 0x77f2f4e3, 0x8af6367f, 0x1aa37dfc,
- 0x83a1ee81, 0x5ee432fe, 0x8affad1a, 0x88df7a58, 0xc73bce89, 0x8b38478c,
- 0x3e6205de, 0x00ccd1e3, 0xaf71693d, 0x7400cba6, 0xe869d154, 0x57befd22,
- 0xacb0433c, 0xf173dd3f, 0x2134c8d3, 0xe7267f84, 0xe39434d4, 0xf81a7ec1,
- 0x8ade2245, 0x71e955be, 0xc1fe20a5, 0x15a59bfe, 0x287f9077, 0xd6937fd8,
- 0x8f666a2b, 0xe9dff7c7, 0xf78c2f3a, 0x8bf22369, 0xfae287a2, 0xffc1081d,
- 0x3970c66b, 0xbad5f0a6, 0x5dff1e5e, 0xcc6a5bd7, 0xe656b484, 0x9db003ab,
- 0x75fda0fa, 0xa377f39d, 0xb5dd611f, 0x60631145, 0x3bbdd6be, 0xfc813d8f,
- 0x66352e6f, 0xeed777e1, 0x1fcc2e30, 0xfee097a4, 0xf2e18ca6, 0x184f4072,
- 0x97cbdf66, 0xed5bfaf1, 0xe463397f, 0x87ff6cf3, 0x87607fef, 0xedc31d8f,
- 0x4fdc25f0, 0xe0f0edc2, 0xc791437d, 0x792ebb4a, 0x63ca879d, 0xc04877fe,
- 0xec79265e, 0xbd51231a, 0xb1b9cee4, 0xbbbf4764, 0x0d325620, 0xb4433f6d,
- 0x87f2626b, 0x71e8afb6, 0xa8e39f6e, 0x871ced11, 0x1749fb96, 0x99bef408,
- 0x9db9f7a2, 0xeee3e902, 0xeb1f9d4a, 0x2ef57829, 0x85d57ee0, 0x4c3f9d0e,
- 0x150fd418, 0xdca071fd, 0xa2b9502e, 0x637a2a4f, 0x71fb93af, 0xe5e78c17,
- 0x16b986b7, 0xd87d7ce1, 0x13f94615, 0xc67cf126, 0x0093479c, 0x813df45e,
- 0xfacbff11, 0xc1a742a4, 0xcea5e6fa, 0xd7be1b8c, 0x4fa64db3, 0x8352cf85,
- 0xbfd943f8, 0xdd39f39a, 0x1dfee1d7, 0x7ff7fb8b, 0xe0f2f4aa, 0x7eff6176,
- 0x466ff108, 0x1fbe121d, 0x19d15bff, 0x4aff3eca, 0x5efe0bd6, 0x23debf59,
- 0x72af8825, 0x7db8764e, 0x6fd64eb5, 0xce6f2b86, 0xbcfc7aff, 0xdfdf1e47,
- 0x8e779f8a, 0x9f43bff9, 0xf50bd01d, 0x4779c7a1, 0xc64fc941, 0x6b86bcc0,
- 0x739e743d, 0x1ecdf7b8, 0xb055f595, 0x7c745573, 0x41af4534, 0xc35dc8bb,
- 0xd1cd03fd, 0x7ec9afd2, 0xf7a5ae39, 0xef921e57, 0xd5783e73, 0xea2d694c,
- 0xd92e90c9, 0xd9de927b, 0xc6a3525d, 0xb77d19bf, 0x34aafdf8, 0xb157eb8a,
- 0x552bf232, 0xf19b4675, 0x7c737fbc, 0x2be431fb, 0xb4ad6edf, 0xc6feb03b,
- 0x237d72d1, 0x44d5b7f7, 0x618fdfd3, 0x95407a41, 0x57df5827, 0x645305eb,
- 0xfa70d13f, 0xc3ba2fc1, 0xd1e95cb8, 0x39d79f12, 0xd055dff9, 0xe0b97277,
- 0xbf23086e, 0x886bda09, 0x820d67d7, 0x5f3ddf1f, 0x4cfbd729, 0x13ff25ec,
- 0xcaa0d3c5, 0x2ca7ceb9, 0xd4dfb20e, 0xbb95a3f1, 0x33724b7e, 0x73148e38,
- 0x0d0b8e40, 0xd2268dc4, 0x60959079, 0xbc9fe24f, 0x5dd24b72, 0x1f80ff38,
- 0x71dc93f3, 0x9730fca8, 0xfb223e7a, 0x3cebc973, 0x7e2433fe, 0xc7933cd4,
- 0x95ddf08f, 0xa5c87e10, 0xc51d8f23, 0xb374a3bf, 0xc75d51b8, 0x3247ba49,
- 0xd208ff44, 0x518dae3b, 0x8a4719fb, 0xb23b8fdc, 0x7fa33053, 0x8f3ca46a,
- 0x9c31b77f, 0x7fe9eaf4, 0x79458ea8, 0xf9d3aca0, 0xcf5e7cb9, 0x04a5707f,
- 0x07f89bbf, 0xe3061cd1, 0xf507c5fa, 0xb0e249bf, 0xbbf09464, 0x63b408fa,
- 0x7561ffce, 0x2febce04, 0x2fbfc520, 0x3169daf1, 0x2bd23fa4, 0x0fd82dcb,
- 0xbfc80ba3, 0x45a5fb2c, 0x07230fd8, 0xdfc139fd, 0x6bbf976d, 0xfc853306,
- 0xf8f3af98, 0x6b82737e, 0xa6cfc126, 0xa61efe33, 0xffcefc83, 0xfe8d7402,
- 0x2c183ca2, 0x1a7f9fc6, 0xbc61d6ce, 0xf5de39a8, 0xb3d1f136, 0x08f9091a,
- 0x9338a7be, 0xfb44bf6f, 0xc6194e23, 0xfcf40a73, 0x0cea465d, 0xbcb46bd6,
- 0xc760098e, 0xb0093c62, 0xe6f18059, 0x53ae393c, 0x6c44ce10, 0x6b9d49ff,
- 0xf997be13, 0x5f68b764, 0x8f604c4b, 0x83bf77c5, 0x83824be3, 0x3895c5c7,
- 0x1f1f1293, 0x6024ce2e, 0xee9c053f, 0x8715b9f8, 0x25737180, 0xe7bec49e,
- 0x635720a4, 0xf7ce87b1, 0x48c8533f, 0x718b7fae, 0x21f9259e, 0xc83548e2,
- 0xd5cf2463, 0x394dc571, 0xfe9d10af, 0x3dc4f1b5, 0x1cb7d863, 0x13f3e2ff,
- 0xe8f8f78c, 0xb7b2164b, 0x51effabe, 0x5e76cfb4, 0x42f0fda6, 0x8fbe41bf,
- 0xecfdc7d0, 0x2ff23a9d, 0x2dbb005c, 0x3d9f603e, 0x3ddbcf9a, 0xe89539d4,
- 0x596f40ff, 0x2f3fd604, 0x777c6fe7, 0x48d9fcc1, 0xbd22bd97, 0xe3e78b07,
- 0xd03b0ed0, 0xbd404e2b, 0x975c5be5, 0xb1098fef, 0xd0f0c62e, 0xcbe44cef,
- 0xc05f947c, 0x07ea63e9, 0xd772fedc, 0x598a38f3, 0x72ee0b96, 0x63c1c93d,
- 0xf1411629, 0xc5435b9f, 0xa7ef5e59, 0x856aa37d, 0xbe1cf740, 0xa3a59c71,
- 0x82b7de5b, 0xd3e5d3fc, 0xe70e6e49, 0x9b880eb7, 0x15b7caab, 0x23879c0c,
- 0x815475ff, 0xe31dc699, 0xb3c12c29, 0xbff8ca87, 0x73b875c5, 0xfb3a086c,
- 0x695fdb16, 0x9fdb6eff, 0xb6ddf792, 0x6e3be6bf, 0x77de6d7f, 0xf1dee71b,
- 0xf57fe31d, 0xccff6e3b, 0xaf3f3c77, 0x73ffffe5, 0x70700300, 0x5cf83f9c,
- 0xdfbc7040, 0x1f3efc70, 0x36106d96, 0x6a4177f7, 0x42b57a8c, 0x462d5100,
- 0x0877a2ef, 0x71e24d66, 0x46658db0, 0xf0610eff, 0x5f32807c, 0x1db90b4b,
- 0xe8bfce61, 0x4e79858b, 0xc596f475, 0x126c9fa2, 0x82fd877f, 0x132acefc,
- 0x18cef4cd, 0xfbc04cd4, 0xa161cdf1, 0x7261aae3, 0x23edc37b, 0xdc9617cf,
- 0xfe9e3631, 0xfbc8436c, 0xfec82f1d, 0x23077347, 0xa51324bf, 0xafd47984,
- 0xd0fa8094, 0x3f3c2388, 0x76398baa, 0xda525e10, 0xa507b23c, 0x8b6fc434,
- 0x998def1c, 0xfe869d99, 0x0bd018e8, 0x14f81e31, 0x8e2905c6, 0x4a7aa665,
- 0xe58c1f7f, 0x31d16dfd, 0xe55bee81, 0xac4cf3f2, 0x9e2aa0f9, 0xebae8ef7,
- 0xc50265e3, 0xfe2396f3, 0xe574d2bf, 0x89934cef, 0xff37e618, 0x8ce29980,
- 0x48bd2627, 0x8956777f, 0xf331adef, 0x6ed1872a, 0x1cd63c06, 0xbbf4471c,
- 0xf7a4d0e4, 0x7733e9f1, 0x750f68a6, 0x17f1514f, 0x1dfd969c, 0x8940bf8a,
- 0xf65efd6b, 0xfe887c93, 0x221fba17, 0xab1dea51, 0x27f7b6f9, 0xfc10d8ef,
- 0xfee045bf, 0xb8afdf21, 0xde9c687f, 0x6e78119e, 0xaf3cb372, 0xdc78bacb,
- 0xecf7f142, 0xee33c793, 0x01976d11, 0x4d71d2f6, 0xcfb8f463, 0x094a6b8f,
- 0xe1c0b4c7, 0xf07de0d7, 0x35fbd5c5, 0xa285faca, 0xb2b1d6a3, 0x9fa7a31f,
- 0x12ff7ae2, 0x60ffe714, 0x30bf3f2c, 0xf1e390af, 0x0824d801, 0x1b12c9af,
- 0x711c7d51, 0x2a1b598b, 0x7605549e, 0xacb9e231, 0xccade399, 0x34975577,
- 0x7339fb15, 0xd309bfb4, 0xa9b49aa2, 0xbdaf6fe4, 0x9bfa84d8, 0x63e5dafb,
- 0x4cbf93b7, 0x713da12f, 0xefda9f4f, 0xa5a30ab6, 0xfd2784f1, 0x0cdf5a9d,
- 0x71e3ec91, 0x9fa24433, 0xb230aa98, 0x4c74635f, 0x2e78426b, 0xca8a8bd7,
- 0xcc22a6e3, 0x3e934416, 0x4523d458, 0xe83f97df, 0x51998b07, 0x74f38f1f,
- 0xe2aaf8b3, 0xcc35bb4f, 0xe7ae1af1, 0x5d5f1b5f, 0x74bbcef9, 0x971954ae,
- 0x9c4a2f32, 0xaaf8b874, 0x5dc1ce72, 0x2e75457e, 0x4ff62dc3, 0xb8e3ff47,
- 0xf2f46783, 0xfe334b39, 0xfe25611d, 0x8bf5ee3a, 0xe2557714, 0x45cf6eac,
- 0x3be1abd2, 0x0e3f7e3d, 0x06213f96, 0xf1e217df, 0x9ef157d7, 0xffef1e94,
- 0xd4bee999, 0xc5fdf21b, 0xe63f184e, 0xa1cde9bf, 0xa76bcbb4, 0x98b790f4,
- 0x45e77752, 0x387ed78a, 0x53ef1d56, 0xb49779e2, 0xd56333ce, 0x0e0923e7,
- 0xc369f78e, 0xddc6bfb9, 0x97bfa3a3, 0x48c77ee1, 0x73a4bbf2, 0xfaebdd19,
- 0x246ba4f2, 0xf1875de2, 0x38ba00b6, 0xe187b0ef, 0x9de7613b, 0xb38c30f6,
- 0xa73eeb8d, 0x752ff3c7, 0xfd1eaf62, 0x12718061, 0x1f744d1e, 0xf7cfc8b8,
- 0xdfd09676, 0x5c69bae3, 0x136fe3ac, 0xe59ffb17, 0x00210141, 0x00002101,
- 0x00088b1f, 0x00000000, 0x17b5ff00, 0x67534c5d, 0xb7b77cf4, 0x0bdf96e5,
- 0x72223f2a, 0xa29862c1, 0xa740c2dc, 0xac741631, 0x4a8a5c13, 0x66b50102,
- 0x2d09922e, 0xcbc2ccd9, 0x01893ad2, 0xb8b2c3dd, 0x26a9f364, 0x87b6e883,
- 0x6ead93fa, 0x840b0b55, 0xc3e22e25, 0x52c9719c, 0xba33330d, 0xd8dc6281,
- 0xdf39d85c, 0xd8bdb5bd, 0xa4da2cbd, 0xdf3f3d39, 0xfff9df39, 0x0730000a,
- 0xb956e7da, 0xf84ce706, 0xffc6cfa5, 0x4fa43d0f, 0xbddde38c, 0x1726eb4b,
- 0x3ebbf1c4, 0x036935c9, 0xce9c5df8, 0xf6002b94, 0x1c5ace87, 0xaccf1f04,
- 0xedfc2176, 0x4ab5dfc2, 0xd2f7803a, 0xee202d4c, 0x96a666fd, 0xd2afdef8,
- 0x4fe9027a, 0x8ed77bdb, 0x70045e97, 0x106b4196, 0xc12e55f9, 0xb8e08271,
- 0x4546fac3, 0x25c06e5c, 0x80fce938, 0xe89b34ad, 0xd9a55cc7, 0xf5f4137b,
- 0x00674539, 0xdaaf39e9, 0x09f489ac, 0x06a019bb, 0xc15c0ce9, 0x556bf624,
- 0x7fdfdb81, 0x4a4f2ad2, 0xdbf11c7a, 0xe036cc1d, 0x3c8150de, 0xf7c71e59,
- 0xfc028fd3, 0xd422666e, 0xb9e97402, 0xd77264b8, 0x3bafa8aa, 0x52816a3f,
- 0x5c5b4ca2, 0xdf5e30ab, 0x12acfd4b, 0xbefcf860, 0x170568e2, 0xa0172095,
- 0xe0373f4d, 0xb19c5bdc, 0x3d7fc010, 0x4698ca05, 0x4adf9113, 0x2a8e143f,
- 0xa769da22, 0xb6e5785f, 0x4c27f7fc, 0xcdba11ab, 0xbdc268fe, 0xc3829dda,
- 0x2eedc99a, 0x7920ce6d, 0x212fce31, 0xb74bd3a5, 0x51cc22ad, 0xf413e428,
- 0x025ac217, 0x36ada8d2, 0x0406ef6e, 0x7ca1b07e, 0xa488539b, 0x70d69da0,
- 0x6f50044a, 0xdf045395, 0xcd7b088b, 0x7c461281, 0x2be726f1, 0xbf11366f,
- 0x042aae4f, 0xcbe7e3f4, 0x09417916, 0x3464dfbf, 0xf3aa8e21, 0x3c52fed3,
- 0x953176af, 0x34ef6adc, 0x4193f1f2, 0xe3d6ee7e, 0xfc3433b2, 0x5ff72172,
- 0xcb9e0136, 0x8e38031d, 0x18f67776, 0x756eedcf, 0x3f7461de, 0x337fa0f8,
- 0x904d81ca, 0x6bde8907, 0x3b3aa394, 0x001ff18d, 0x43d62670, 0x0d41d537,
- 0xe7e1c641, 0x45d513e8, 0x582a0615, 0xbf373f11, 0xf974467e, 0xf062faf9,
- 0x5117f10c, 0xd80d567d, 0x024dbf28, 0xf5e10194, 0x9f412d92, 0x9eb9155f,
- 0xfc01650f, 0xde17d728, 0x7e30c83e, 0x915f8f7d, 0x4a6e505a, 0xd2ca97ae,
- 0x32f7b8e3, 0x401653d6, 0x036cf0f5, 0x9858fbc5, 0x794d1581, 0x28bef1f3,
- 0x6eb17d57, 0xbc4ead9c, 0x31dfe6a7, 0x3a44f9cc, 0x0d9a81d5, 0x61205f9a,
- 0x5817fcbc, 0x55ed1060, 0x491c3d35, 0xb2b9f8a3, 0xec8532d5, 0x23a50ed8,
- 0x6b126dee, 0x3c2ee118, 0x160ef840, 0xf6e7f090, 0x005b9d2b, 0xd8b754ed,
- 0x5bdc52af, 0x15aacc3a, 0x40dade88, 0x50596bf4, 0xff122937, 0xc4a97bc4,
- 0x55aa672f, 0x9eb3be21, 0x445f8df8, 0xade7aa44, 0x7865a514, 0x8d28cbb7,
- 0xeac1b37d, 0x551f500e, 0xf411a337, 0x3587476f, 0x84b5fe40, 0xce90cd9b,
- 0xbea041eb, 0xedcf5b7b, 0x328f45b7, 0x099f48a3, 0x5f916757, 0x16d3cc0b,
- 0xea356115, 0x7841c82e, 0xb005bee1, 0xb4e2bc1f, 0x73b94645, 0x05ac4c3e,
- 0xc49f4be0, 0x8327c861, 0xd7c03963, 0xc30e7cbe, 0x2fd3853e, 0x8c39cafa,
- 0x470e54f8, 0xa8356b7c, 0x959e3cca, 0xdd02fbe1, 0x7059a727, 0x7e6f9b25,
- 0xf9623ebd, 0x29ef0baa, 0xba3e7ba6, 0x93962eea, 0x5a20c7c8, 0x6ad6142c,
- 0x6730c9fd, 0x06c3e75e, 0xe881e3b2, 0x60a73a61, 0x460d61be, 0xef308421,
- 0xaa6febbf, 0xd96f9b52, 0xf193b802, 0x04faf467, 0x3cd02287, 0xfad24b7a,
- 0x5fc5787f, 0x48529e3c, 0xf31eb2b9, 0x413719e6, 0xd48fd20a, 0xbe7f736b,
- 0x4f18eb9e, 0xc0ddb8c0, 0xcf9c1278, 0xcbf8b56f, 0x74ad1f28, 0xa7e7248f,
- 0x0ac4faf4, 0xb432bb30, 0x4fbc329c, 0x600e6c47, 0x8f30b220, 0x4916211d,
- 0x19df3ec5, 0x244774de, 0x96e0c3d8, 0xd73c7e71, 0x46e6d87d, 0xb6f389a5,
- 0xf9268d33, 0xd54a5c13, 0x34592ce2, 0xf6d56fd8, 0x8b5d935a, 0xb56e0c73,
- 0x9471756b, 0xfbeb059a, 0x381679e8, 0xa9e7fd88, 0x734ebb18, 0xd001cf2c,
- 0x93c7c38b, 0xd1311e9f, 0xef990f3c, 0xd8e3993d, 0x0c8a791f, 0x3794391d,
- 0x41f6260c, 0x24077e81, 0x63946dc0, 0x6e611c91, 0x192dca25, 0x9f7d259f,
- 0x2f6e15d8, 0x01905777, 0xfe0450ba, 0x465f3483, 0x1a77502d, 0x639d77ed,
- 0x3bfa9070, 0xf70502e0, 0x1bf1e8fd, 0xdd214f87, 0xb4c98e73, 0x5783ba8e,
- 0x16ef9ef9, 0x78ff7827, 0xbc2d77c2, 0x3537831e, 0x0239887a, 0x403af249,
- 0x6eed1dfb, 0x6e687f46, 0x05301f74, 0xc36ed052, 0xbdafad39, 0x41404b0e,
- 0x1de8ac7c, 0xe79df091, 0x1cd1e419, 0xa5162653, 0x1980ff11, 0xafdc2502,
- 0xfd84baa6, 0xfc855f84, 0xd8cfed8e, 0xf4c0f1fe, 0x6bd52158, 0x0a235327,
- 0x8cb73fdf, 0x922f05fb, 0xed2fdf1b, 0xcd3361a1, 0x0643dc1b, 0x868eddb9,
- 0x713f343b, 0x4be918cf, 0xf9e7df0e, 0x53af2880, 0x7e062fdc, 0xe1633d8d,
- 0x58092c6d, 0x38731509, 0xca5536cf, 0x6eaddf3c, 0x1f18dcc6, 0x91757049,
- 0xc991709f, 0xc7e76f34, 0xf5aed1ab, 0x419fa341, 0xc877e62f, 0xd0c7d5a2,
- 0xa3f0163a, 0x94a2237c, 0x7b8ba1f7, 0x1cfd0cfc, 0x40aa62aa, 0x8c2120f1,
- 0x3e3d16c7, 0x6e1f802f, 0x70a9fc4b, 0xe01aae6c, 0x2b61ac35, 0x5ea353d6,
- 0x6e01000f, 0xd4e3f9a7, 0x8e23bd08, 0x2af9839a, 0xf9f8977f, 0xe055677d,
- 0xe814d4fc, 0x6bafd649, 0x6ab375ec, 0xf33dc90d, 0x7b2ed3f7, 0x47d3f792,
- 0x3b2276f8, 0x244ced7b, 0x2bbe30bf, 0x15efbfa9, 0x411ccb5d, 0xb5c981f4,
- 0x5975ce8f, 0xbea9ab22, 0x1cdc981f, 0xee91f18f, 0x8b82fda1, 0x91f516ef,
- 0x87cf8c00, 0xcc67e523, 0xa2769794, 0x125beec7, 0xe9b8e397, 0x0aa1fc69,
- 0xa1efdacf, 0xfda79aca, 0x17fa867e, 0x7f57cfab, 0x1dbcb26e, 0xa497f9a2,
- 0xe7d27e7e, 0x9912c01f, 0x3893cbf7, 0x6487b683, 0xaa3c75e0, 0xa67ef5ef,
- 0xc87b48fc, 0x06199dff, 0xed5cf895, 0x7ff4d0cf, 0xc18beba6, 0x9bf61d51,
- 0xf6f9b935, 0xfc39f427, 0x8a71c98b, 0xa4f2bdee, 0xdf824a00, 0xa0a982ab,
- 0x195df824, 0xf6e031ca, 0x7c8362c8, 0x81a978f5, 0xc2b1335f, 0xbd735fc0,
- 0x14bf80ca, 0x5f90675d, 0x01bd6bff, 0xc5bd30df, 0xcff1bf20, 0xd63c066d,
- 0xb95875bf, 0x4936e789, 0x0ae41baf, 0x157e8d3b, 0x2afd18f5, 0x15fa35ee,
- 0x37e25bd3, 0xdfacb7fa, 0xf64e5fe8, 0xde855c83, 0x91fcffa1, 0x2060fb49,
- 0x34f3c33c, 0x13029d35, 0xa8c44734, 0x66c21fb6, 0xf0a617be, 0x84a2e2ff,
- 0xd7a72c3c, 0x0919da3f, 0xb9d2b32f, 0x2b369d0f, 0x512edbbd, 0xfe3e600d,
- 0x2e76d705, 0x000ee017, 0x00000000
-};
-
-static const u32 csem_int_table_data_e1[] = {
- 0x00088b1f, 0x00000000, 0xe3e3ff00, 0x51f86066, 0xb8d3c10f, 0x72361818,
- 0x0143f821, 0x684333b7, 0x0606163e, 0xc77e2001, 0x9ef0c0c8, 0x38330491,
- 0x207eec10, 0x27880abb, 0x7dcf5071, 0xe52f1143, 0x5f5d9fa1, 0x153d76a0,
- 0x837f7818, 0x031083b0, 0x03309b83, 0x8408b483, 0x55045fbf, 0xc10851de,
- 0x99412e7e, 0xfa819f5d, 0xbbeb8d01, 0x00038031, 0x00000000
-};
-
-static const u32 csem_pram_data_e1[] = {
- 0x00088b1f, 0x00000000, 0x7de5ff00, 0xd5547c0b, 0x733ef7b5, 0x9993331e,
- 0x420f264c, 0x084f0042, 0x21842a20, 0x38880840, 0x8d069009, 0x8808089a,
- 0x420100ca, 0xa9113248, 0x676d5e97, 0x6ad11422, 0xa36d2d1b, 0x4101da97,
- 0x180d45a3, 0x1d0340e8, 0x5abc414c, 0x5b4a0a8d, 0x3c3141b5, 0xe878490a,
- 0xef5bd6c5, 0x33ef6b5e, 0x42667399, 0xfddfb6a2, 0x7f17dfbe, 0xecfb36fe,
- 0x7b5ef673, 0x7b5affad, 0x231fb5ed, 0xd313c659, 0x057c8415, 0x7213d77f,
- 0xf4842448, 0x0b3b4eeb, 0x108e3a68, 0xb0398e7f, 0xb4242055, 0x24edefef,
- 0x4db39085, 0x267355b3, 0x98c7fb21, 0xf2d3d743, 0x80fc81b3, 0xdd4f9699,
- 0xd121c479, 0x514ed57c, 0x3ed37282, 0xb1df7e2b, 0xde400851, 0xf1fd6e6b,
- 0xb5df34b5, 0x699b2453, 0x376424d5, 0xda425491, 0xa9fd842d, 0x6a5f98f1,
- 0x4daad965, 0xf682effb, 0x626683ca, 0x37b7dfa5, 0xafc86e89, 0x08042adc,
- 0xf76aaf6d, 0x5a00ca83, 0xd080b2df, 0x7e695568, 0x1a8a63eb, 0xfb03c84f,
- 0xb368ecfe, 0x67da7213, 0xfd82aa21, 0x491c6f28, 0x7b604548, 0x7dfa00e1,
- 0x65c136c5, 0x6c57f5a2, 0xf401d73c, 0xc3c93455, 0x8adf5a44, 0x47511b06,
- 0x22bfb6b0, 0x07cb5ed0, 0x794eded8, 0xfe57b428, 0x110a3de5, 0x1bb29fa1,
- 0x74a2abbe, 0x76b5bf40, 0xb1eafb4f, 0x559f8d3d, 0xe8f6d1cf, 0x0a2fd57b,
- 0xb562e82e, 0x8e807889, 0xb9d6dd8e, 0x7fa1db4f, 0xf8f0cab5, 0xdc2c7ec8,
- 0x08a8fd05, 0xed0a526c, 0x6526df40, 0xfaeec8e9, 0x87fc3456, 0xacfabe9e,
- 0x72889efe, 0x47da7a63, 0x3bbc3a59, 0xbb88415f, 0xa44bd691, 0xaa3a5280,
- 0x4207f9fb, 0xa1e32122, 0x96a8917e, 0xe4a9faee, 0x23fe0711, 0x0f949ff4,
- 0x81f1bdfe, 0x72dd99ad, 0x9904e95b, 0xbcedcb75, 0xea51cb93, 0x5fac8dca,
- 0xf20c7f4b, 0xf9d4f4a0, 0xee3e989c, 0x8374c34b, 0xfdbe454f, 0xd30237dc,
- 0x9f0b9f7a, 0xc3cbe93f, 0x8dcfa374, 0x22be53e9, 0x12be034c, 0x6fb36f7c,
- 0x7c5ba62e, 0x8cfe7c1e, 0x06d31caf, 0x7f3e0d5f, 0xd31ab7de, 0x3e3f3e6d,
- 0x1eb7d17f, 0x1d5f46d3, 0x5e403ba6, 0x05f26d34, 0xbe5dbdf0, 0xbe834c06,
- 0xc7be7c46, 0x11f4c417, 0x64c747ce, 0x3e512f92, 0x49c4dc38, 0x8a924ec5,
- 0xcd32f9dd, 0x7cb09527, 0x7cfe1dea, 0x3d53e685, 0x32f94f34, 0x6f9432a0,
- 0x3501f9a6, 0xfdf07cac, 0xf342c0a4, 0x7cacfd83, 0x02ee23c8, 0xa90fcd2b,
- 0x37c3e563, 0x68e20bfa, 0x9580787e, 0x542dbd5f, 0xabf9a360, 0x7679591b,
- 0xa76a9933, 0xcb10ecf9, 0x9ee1bce7, 0x39f34f1a, 0xfb9f2cad, 0x83aa7f81,
- 0xd8db73e6, 0x04ce93f7, 0x2d1ed544, 0xbab21d87, 0x6d595098, 0x4b70cff4,
- 0x515e6913, 0xca2e21ef, 0x6eadff1f, 0x43f29d29, 0xc8796376, 0x9bcb1f3f,
- 0xbf963714, 0xfcc32fe3, 0xe583d92e, 0x2c55fdc7, 0xfcb078af, 0x98bdff73,
- 0x2c7eca0f, 0x58fad4b7, 0xf963f15e, 0x58f5da80, 0x80391eff, 0x1f6b23e5,
- 0x4a3df2c3, 0x5f1fcb00, 0x1a7ba4fb, 0x3c11afcd, 0xd23c073f, 0x01649cb4,
- 0x4ad31a9e, 0x09d69e28, 0xf7e02e64, 0x8a3a0007, 0x0ae975cb, 0x3f8ee1ea,
- 0xfa0d3ee4, 0x90297f8b, 0xc3ccfa5f, 0xaffdf899, 0xd6991eb0, 0x4f5ef623,
- 0xba799bce, 0x2cde727a, 0x8069ead7, 0xfb58d6f7, 0x378ecf56, 0x79e9e927,
- 0x67ab42b3, 0xdf13d23b, 0xbce57eb7, 0xcf4f5935, 0x3d5a955b, 0xc49e907b,
- 0x74d3d1be, 0xa69fcf44, 0xb4fe6123, 0xfafd3d20, 0xf7b8cf46, 0xf719fcf4,
- 0x6f3f985e, 0x7de93d60, 0x3de9a7ab, 0xde9a7f3d, 0x08e7f30b, 0xdf506bf6,
- 0x7dee35fa, 0xbdc67f3d, 0x47cfe61f, 0xdf664f48, 0xa1f5d9ea, 0x3ebb3f9e,
- 0x04e7f30c, 0x6fac33d6, 0x48fdcafd, 0x8fdc9fcf, 0xc2e9fcc2, 0x5beaae7a,
- 0xd23ebb3d, 0x47d767f3, 0x817cfe61, 0x5bea8cf5, 0xa2ff72bf, 0x5fee4fe7,
- 0x0931fcc2, 0xbe98cf50, 0x54fc13d1, 0xa7e09fcf, 0xb0d8fe61, 0xa37df19e,
- 0xcf5daf27, 0x30f6bc9f, 0x9004527f, 0xd5bec4fb, 0xf3d76c13, 0xe61ed827,
- 0xe7ac20cf, 0x2bf5beba, 0x3f9e84ef, 0xfcc22779, 0xcafd8e19, 0xf40f7aa7,
- 0x7c4f5a10, 0x39ecf5cf, 0x9ecfe7ab, 0x8cfe61b3, 0xd3a67ac6, 0xaf7ab27a,
- 0x9e875267, 0xc23a933f, 0x7ac3c9fc, 0x9eadf466, 0xfe7a1d3d, 0xf308e9ec,
- 0x73f91f27, 0x35fadf53, 0x9fcf53a9, 0x3f8c9d49, 0x4cd70f63, 0x3a72d075,
- 0xfa44ba16, 0xdc67b5c9, 0x45e6816e, 0x4e8bcb27, 0x44bf0117, 0xd20dfcd4,
- 0xf7e916ea, 0x39896df6, 0xbf48930f, 0xfa14a0a3, 0xb1bd4f15, 0x2123bf48,
- 0xe7e74e2f, 0x7493ba24, 0x01a2e4f9, 0x95fbf7ba, 0xf795d10c, 0xaeb57b9f,
- 0xa393dd3c, 0x4f9467cb, 0xa83fbdd2, 0xbf9740a6, 0xba0df562, 0xb7fd33f7,
- 0xeb59f2ea, 0x3fbdd76f, 0xae916eac, 0x0afacafc, 0x8155f95d, 0x35fcba95,
- 0x7baeff0d, 0x03e3547f, 0xc1d1f2ba, 0x63e57587, 0xf2eb8f42, 0xa93d0f63,
- 0xeb7c7f7b, 0x84f95d66, 0xcaebcfa3, 0xd0edb627, 0xb93dafe5, 0xd9e7e0c7,
- 0xf0d7ed9d, 0x27b808bc, 0x574fefcc, 0xc50bdfd0, 0x0657982b, 0xdf8fd1d8,
- 0x3d54bf1f, 0xbcabe54e, 0xa14d58b2, 0x129905f2, 0x0fe7ae3a, 0x8db2bf28,
- 0x9277bf3e, 0x7d274ae7, 0x19e2af7e, 0x9fe18ced, 0x24083c52, 0x04d5520d,
- 0x41fcb1a9, 0x20b1e199, 0xc7e1cbe3, 0x535ef7e8, 0x9a44f0d7, 0xd7e62fef,
- 0x129e5e03, 0x38d3884c, 0x7bc0d491, 0xf3826671, 0x73330782, 0xe047f69f,
- 0xaa20fd75, 0xbd774287, 0x1a4f65eb, 0x6b9b19f8, 0x1f83f6df, 0xed106eb2,
- 0x9e4200d7, 0xf90ede16, 0x07efd287, 0xd0f34d2d, 0xf50accfa, 0x57db23d3,
- 0xb123fb68, 0xff6806fd, 0x37da1ec5, 0xb5d3c90f, 0xae5c196f, 0xd0a2df6b,
- 0x1fd49df6, 0x6f23fda8, 0x12610a9f, 0x7f0b2f21, 0x83cdf6c4, 0x07fdb1cb,
- 0x895f3a15, 0xdc2e3f6c, 0x77f4107e, 0x3e3fb41f, 0x4c87ff46, 0x707ff7d2,
- 0x0affbe85, 0xb52bffeb, 0x71fb78c7, 0xe116ffd8, 0xe0ffeb18, 0x337fd60a,
- 0xbedc37ab, 0x43ffcc23, 0x7b37ffd0, 0x64d67fea, 0xa8afff7d, 0xccdff7d4,
- 0xf6a77fda, 0x8edf6f14, 0x9c2bbfed, 0xa2bffd62, 0xcc57db12, 0x47e0171e,
- 0x09ab88c9, 0x40c9f6d0, 0x03fa986a, 0x903b685c, 0xa0a2488e, 0x6151e426,
- 0x6f71d208, 0xe7dbc31c, 0x686f1471, 0x9cf8fc6f, 0xcb65a804, 0x8e2ef3a5,
- 0xf2db15f5, 0x584bb015, 0x4b2be74e, 0x4165a938, 0x364df111, 0x28c30398,
- 0xd1411dbd, 0x0db2f90f, 0xfec005d7, 0x4164cd79, 0x91277c09, 0xf4ff3c10,
- 0x4736a367, 0x2e98cbf6, 0xdbdb93a9, 0x3c2df422, 0x23202a8f, 0xd37d286a,
- 0xbbe30401, 0x79d31ff3, 0x8bf3a110, 0x833ce80f, 0x227e4850, 0xa23ef6b3,
- 0xb7c825f3, 0x9412f9d1, 0xdf8b5213, 0xa70eead1, 0x384bfa51, 0x4c4b1ffd,
- 0x43be3f5f, 0x7ea9f808, 0x31bdfe7e, 0xb05d4f38, 0xde9946da, 0x7f42c74d,
- 0xfb4f2eb2, 0x753907e5, 0x360736ed, 0x155fc80b, 0xaa8a55f8, 0xf5e2c849,
- 0xddd79419, 0xc54fbfef, 0x7f594e9e, 0xfdcb711b, 0xae77fbe9, 0x69390612,
- 0xe6ddb3bb, 0xb84e361c, 0xd7a84c33, 0x4794c324, 0x634dadc8, 0x29035a64,
- 0xb71fcb75, 0xb33bb445, 0x5d9f9f48, 0xc877cfa2, 0xe944d96e, 0xc81528ea,
- 0x9cef5a26, 0xad72fab9, 0x8929bb1d, 0x57b799c9, 0x9be8ca92, 0x3c0e6903,
- 0xe79ca276, 0x1ab7d93a, 0x32c5de3d, 0x3c82c29d, 0xc808fdfd, 0xef829fd5,
- 0xfd64eedd, 0x573bd236, 0x684bb8b6, 0x142ee73f, 0x236f8003, 0x67da7ffd,
- 0x69b29b73, 0x32a7feba, 0xc7f74531, 0x8f3cff48, 0x328d3fb1, 0xcbf3c38c,
- 0x6e19cf8d, 0x3ffcb9da, 0xf4d07c06, 0xd283e004, 0xa3e39c7f, 0x7c32aedb,
- 0x9b9ecd78, 0x4f5d0f01, 0xcae50488, 0xc71b72f1, 0x3d3a92cb, 0xc8f1082e,
- 0x7365c720, 0x23879c85, 0xe3873070, 0x1ebd5960, 0xbf127737, 0x2e431e9c,
- 0xfa6c36f3, 0xdd1a9a61, 0x6e390fbf, 0x1fb23fe6, 0x4fdd13f9, 0xb8bba726,
- 0xd1acee9c, 0xe5c6df97, 0xeb97277a, 0x0ec7fadc, 0x6e41f350, 0xe8320357,
- 0x4d3ef7bb, 0x793a6d9e, 0x8d3cb87a, 0xe5c5de74, 0x98f7d779, 0xb6f48d3c,
- 0xd5b67971, 0x90c9905f, 0xf48d7a68, 0xd51d582d, 0xb9e4051f, 0x9e5b1fd0,
- 0xf27e60de, 0x55e788f1, 0x8f92338d, 0x8ad9e847, 0x7e5d5286, 0xee9e6079,
- 0x0bf565fd, 0xea4be575, 0x17caeb96, 0x975bbfaf, 0x9effe85f, 0xab05fdee,
- 0x77e5756b, 0x2ba43cd6, 0x98fe5f9f, 0xf3cf3f2e, 0x39fdee84, 0xcae93773,
- 0xa73c9767, 0xb4599f2b, 0x752f975d, 0x6fbdd6ef, 0x2dd577da, 0x8e37cf80,
- 0x89fc0488, 0x30275ccf, 0x4b99f82e, 0xc73bc176, 0xbae22a7d, 0xd30237dd,
- 0xe2173e93, 0x0f2fb4fe, 0x8b608ed3, 0x3a09c61d, 0x89252e2e, 0x6e9bccd4,
- 0x103f5dae, 0xe38269c6, 0xacd316e9, 0x4264ff5a, 0x91526d7e, 0x0af5c5df,
- 0x44258d09, 0xd386c180, 0x944625d1, 0x5e52f5b7, 0x727f0d4f, 0x5b717974,
- 0xc7dee7ec, 0x8ce15e17, 0xd6e97711, 0x7e019253, 0xbbf7274f, 0x0295e58d,
- 0x4e7c8929, 0xd7a803c8, 0x5bb8f8e7, 0xaffd30a9, 0x405e91dc, 0x0d0f901c,
- 0x7b5cb9af, 0xfde4148d, 0x3a592701, 0x7f565e3d, 0x13bece8d, 0x03f6ca88,
- 0x3dd58dbc, 0xad70d15f, 0xfb33bbee, 0x1abba445, 0x1844c56c, 0x375e4b16,
- 0xb50e1d81, 0x0a399e0c, 0x61bf74e8, 0xd82a7182, 0x33c2fd0f, 0xe3e4a2be,
- 0x04afc812, 0xb9be93d3, 0x3cfbb698, 0x95f71e98, 0xafb1fa63, 0xdf36d306,
- 0xf23f4c6a, 0xc0fd31f9, 0x3fd31eb7, 0x3d30eaf9, 0xf4c7abea, 0xd3005f3d,
- 0x4c06bec3, 0x6235f1df, 0x620beada, 0x6373e1da, 0x6f5de9aa, 0xc7c93bb8,
- 0xbf80d3e1, 0x78eb3818, 0xfaed5560, 0x3b38ddc9, 0xd6afba6f, 0xcf9bb03f,
- 0x8e66f5c5, 0x1e1d5487, 0x336080be, 0xf297ace2, 0xb0e3997a, 0x9763efa7,
- 0x6bff377b, 0xc4de36f0, 0x8a6fccfc, 0x653c6eb7, 0xd594f018, 0x89fa9e1b,
- 0x34f1bbe3, 0x7e64e4de, 0x8fd3c70f, 0xe6311fa0, 0x84e00515, 0xd7f08bf4,
- 0xa71636dc, 0xd409ebe6, 0x6a716553, 0xfa1af6de, 0x75fa2b6e, 0x6e301181,
- 0xb7f11f9c, 0x3f8710e1, 0x274cd47f, 0xcff73d7d, 0xf99e9388, 0xcdf9c6ee,
- 0xbc1d00d2, 0x5b9746c6, 0xad5f18e3, 0xe8445226, 0xb0d0f6b8, 0x468b5c67,
- 0x49225e62, 0x55c07df0, 0x5ea31f1a, 0x7f032bea, 0x9aebe37b, 0xa1b3cff8,
- 0xff27f6be, 0x78e90b9e, 0xe397f313, 0xdbf4445a, 0x1769e849, 0x2e3c37f0,
- 0x10695d99, 0xa61aeedf, 0x086fd138, 0xf3fcf5d8, 0x376e7cd3, 0x3ca21eff,
- 0xb771c56a, 0x37f90d2e, 0xe9e1f3f4, 0x6f5fe07d, 0x7e37bdf6, 0xf80c0a2e,
- 0x37b3f097, 0x5d9bd8fb, 0xc5f56e24, 0xe77fed20, 0x0f3951be, 0x870760ab,
- 0x0f5a79b0, 0xa5d6d6fc, 0xff7cf48c, 0x33b8e26b, 0xeb71c355, 0xf9db4260,
- 0xf9cbeb88, 0x40132ba6, 0x9c1eb42e, 0xe4ed741f, 0x722f7e7e, 0xafa99b3d,
- 0x2fac6bcf, 0x68c22141, 0xa2bc6eda, 0x6a420490, 0x22a3cf44, 0x6d5c6fce,
- 0xec1b887e, 0xa8d6b9b3, 0xfd3c6f30, 0x1a854866, 0xf385f1f5, 0xc0769e87,
- 0x9a656c3c, 0x863c79c9, 0xaab4b51c, 0x8f69d331, 0xf504af9c, 0x4275f3f9,
- 0xe7ce2351, 0xa35984d4, 0x3a719c60, 0xbb050f8f, 0x56a9869f, 0xc534cacf,
- 0xddda9c79, 0x09590dd3, 0x6017fe6c, 0x225b64f6, 0x3ed39bda, 0xcfffbe0b,
- 0x7aa7a7a6, 0x69087a34, 0xc0589f22, 0xacb2d39e, 0x51efdf9e, 0x0a8ba41f,
- 0xb34937c4, 0x37cc39fc, 0xaffff4ad, 0x12bd4086, 0xbd7ab5e6, 0x3cdfa28d,
- 0xd36f9e1a, 0x53bf47b5, 0xf857b5b5, 0x93bd67ae, 0x7b86bb48, 0xf957bf2a,
- 0x068af0fa, 0xc7ef53ab, 0xdef2ea26, 0xe753ba60, 0xc03fc2d7, 0x2411af71,
- 0x87255fc0, 0x5f64a75f, 0x0cda780f, 0xbf9843c1, 0x2abdec08, 0x51e29b4b,
- 0x015a5d51, 0x34f28b9f, 0xfe73abfc, 0xa9d4ed4c, 0x7a0265d7, 0x54a4be46,
- 0x20a9f2e5, 0x4c44cde2, 0xbf98bf34, 0x4569a8bd, 0xa75b8c31, 0x50fe6c4c,
- 0x301c4a46, 0x75272e3f, 0x3d4f10b9, 0xa0454c4b, 0x3bc9679f, 0xfa11b18e,
- 0xb0544e78, 0xf5ebc5d2, 0xdf9d3c7e, 0xeacbf2eb, 0xe4a5f9f5, 0x075854d6,
- 0x2641ba5f, 0x86ec0101, 0xe898fcbe, 0x91977ac4, 0x6e30759a, 0xbbc79cff,
- 0x94893916, 0xfaabe941, 0x17732fcd, 0x892e3a52, 0xd6d5fc6c, 0x31279771,
- 0x05662bfa, 0xff7d3714, 0xb7b1a693, 0xbf440b51, 0x7583ac0f, 0x71297f6d,
- 0xf2d1256d, 0x1bf4bafb, 0x81fcd5e9, 0x4e64f5a8, 0xe0834a47, 0x63b0c40e,
- 0x3d30248a, 0x6aeef6e3, 0x838ba9c9, 0x223e42e4, 0xf18df20e, 0x78744294,
- 0x067a2cda, 0x7e19e34b, 0xd4820f00, 0x4dbe78a5, 0xf55169fd, 0xfbf52d5f,
- 0x903fd627, 0xabab9fd6, 0x4a9ff73f, 0xfa28d0ff, 0x5fd5620b, 0x76179bf5,
- 0xa93da9f9, 0x0e67e978, 0x53c9c742, 0x8baa5d52, 0xeb72b5ca, 0x9a6e1d0f,
- 0x0efc949e, 0x80a9ebc0, 0xde4b1458, 0x76f2c3ab, 0xbb8805db, 0xfd693fc1,
- 0x11ff9fa7, 0xdf3e23c6, 0x9e313b2a, 0x990e60d6, 0x37563ea9, 0xd9262f2d,
- 0xf2c63f98, 0x21139e0f, 0xafe3d41f, 0xa59fd9e2, 0x8a0afec2, 0x0a5f1e14,
- 0xbd5b3fa1, 0x1c42d3e5, 0xea17489f, 0x683bf191, 0xda1252ff, 0x424a85bf,
- 0x13a53974, 0xce5e04e3, 0x8fd36f17, 0xf80e89ce, 0xdfce1b57, 0x7397ef8d,
- 0x14b974ff, 0xa36ed29f, 0x26409eff, 0xf8d43d80, 0x3fcc0241, 0x2fdcf35d,
- 0x7a518fb2, 0xdd796cce, 0x49f04421, 0x8df3a3d3, 0x47b57f8b, 0x76ded9ed,
- 0xf48b3d50, 0x4af1b483, 0xbabf720d, 0x7295a599, 0xd52d71c8, 0xb24dcafb,
- 0x571f4fcb, 0x33f4f0be, 0xf31c424f, 0x30d7668f, 0x178aff5a, 0x8937bc0e,
- 0x976c57e6, 0x37598a53, 0xb76a42ec, 0xfff4bc5c, 0x72dd39d5, 0x80f978a8,
- 0xcf628ea2, 0x96ea4fef, 0xfbed8ac7, 0x9a2a3215, 0xf71b531f, 0x18f66d2b,
- 0x563c6972, 0x9ac27e08, 0x814bfee7, 0x7da9e2f8, 0x199fe902, 0x0e9b9f83,
- 0x7c001fa0, 0x39723942, 0x7866e585, 0x02e54bdf, 0x93935af6, 0x6e46fcb1,
- 0x753ea04f, 0xc5f9e224, 0xf2fdb43d, 0xd05d993f, 0xef17f2ff, 0x4e3f4071,
- 0xd70c3548, 0x2a61fcbf, 0xd972afd8, 0x94c4ea9f, 0xea7f2f3d, 0x65b788bb,
- 0x15377f6f, 0x9dc449e3, 0xc7b1c26e, 0x5bfd0e9f, 0xf63671b2, 0x20f1296f,
- 0x297fcaee, 0xfa680496, 0xb68a4499, 0x0e8bd0c7, 0x362717ae, 0xf68c4753,
- 0x1fcc04ef, 0x23bf5ec1, 0x422abee2, 0xffa026ee, 0xa17de5df, 0x3fe6021e,
- 0x1d3930b3, 0xaf5c4c90, 0x8919b7a8, 0xb60f2dd7, 0x0f4e7c82, 0x7587f772,
- 0xfc912f16, 0x57c21f9c, 0xedf15eb4, 0x7c99fde9, 0xf2e52a88, 0x3f38e8ef,
- 0xfbc39e15, 0x9adbb2ad, 0x7c8efbad, 0xfeddd995, 0xa53ede2a, 0xe7c60e3b,
- 0xfdb1a913, 0xa6b201d5, 0x8e9c74f7, 0x7e8457c0, 0xf2df7d33, 0x4d6fd310,
- 0x45a503df, 0x30f17e50, 0x0ef81fd3, 0x574a3fc6, 0xef963fa8, 0x3da0259f,
- 0x58e6f3a0, 0xe645bd7a, 0xd72fad7a, 0x96e94270, 0xbc088484, 0xc849fd40,
- 0x595f5c7f, 0x177e81ba, 0xd0bd2f61, 0x3eba239e, 0x71976f4d, 0xfa053ffd,
- 0xfeb74d7f, 0xef4c8d93, 0xfc9ff67b, 0x07236e2c, 0xf412799e, 0x5fa7ea95,
- 0x4b957d05, 0xdd7fdfa0, 0xeddef2d6, 0xdaff4f54, 0xdea9e9a8, 0xb4f51a7e,
- 0x1278c77c, 0x6aff4f4b, 0x7a989177, 0xd4c79f4a, 0xe21b7b53, 0xfd94fff8,
- 0x2897f8d4, 0xd9a7f9eb, 0x68f89ec3, 0x4a2f87b4, 0x68d3f22a, 0x61dfe90f,
- 0x3f8d1b92, 0xde1a770d, 0x755d7e2a, 0x309dd805, 0x85dc352e, 0x2ee1a971,
- 0xe3ab7e2a, 0xcffcb19f, 0x74a26b10, 0x4fd4b7cd, 0xa5847981, 0x2bbce08b,
- 0xeb88967f, 0x048b96a6, 0xc880bef5, 0x7df06fb8, 0xf078cd53, 0xfdfca743,
- 0x52f7b3b6, 0xbf13e77a, 0xebe5d6cc, 0xbfebf464, 0xc9abeafc, 0x73b73fb4,
- 0xeca7cefe, 0x63ca89be, 0xe28424ae, 0x24f39d28, 0x82484fe2, 0x3e40acf8,
- 0x7e63a08e, 0x7f0f3eb9, 0xebbb5253, 0xaffdede9, 0xa6f90f3b, 0xb63edbe9,
- 0xa42ef576, 0x2979b143, 0x20a54f12, 0xef3fca57, 0x04302138, 0x549b52ed,
- 0x7aaf3112, 0xb79ddb9c, 0x0f1ccda7, 0x9cfe4bfe, 0xe70c0951, 0xeb7e7183,
- 0xbcebf6e5, 0xbb004d5f, 0x0b39be7e, 0x0fe7afe7, 0xb58f8e2d, 0xe385b26f,
- 0x575f00ec, 0x750bb4e9, 0x277dc522, 0xc5ff42e9, 0xad92b76f, 0xbefdd631,
- 0xe4f89b2f, 0xfe8dcb83, 0x736e5489, 0xd1e70e39, 0x2272134f, 0xa6eb36e4,
- 0x5a239253, 0x71f7f00e, 0xdbeb3cc4, 0x0d7017f2, 0xb6b16dfc, 0x88c49615,
- 0xf384fdd7, 0x5f2bca6f, 0x69d5fee0, 0xf012f9cd, 0xd45d9a71, 0xd41e39c5,
- 0x192475f4, 0x794d7409, 0xe3a3f965, 0x87bd8e29, 0x57ec1744, 0x539f36f5,
- 0xbbe6313c, 0xfd427e46, 0x901e47e0, 0x23afc89d, 0xe012c972, 0x56d991eb,
- 0xef96eb02, 0x58aaae2a, 0xb3374e74, 0x523cc878, 0xe138d9f2, 0xe6fe3eff,
- 0x150f89cf, 0xb79c7e50, 0xd1c0fdb3, 0x7f7a63f8, 0x6a0429de, 0xc8a1c005,
- 0x004229f2, 0xc48564e2, 0x0164e8f3, 0x48fafdf5, 0x2c1f95fb, 0xd5f6017d,
- 0x4e0b3754, 0x96af2d13, 0xb1c014da, 0x025db837, 0x9546fcff, 0x20de31b8,
- 0x159a8cd5, 0x203ad711, 0x96afc84b, 0x277eddbf, 0x2cc2f7f0, 0xdcba0133,
- 0x6039cf23, 0x3cd0bfdc, 0xa7a0f516, 0x47c1fd7e, 0xa0934ca6, 0x30f8821e,
- 0xc530a1e2, 0x9ecfcba6, 0xa8ba064a, 0xdb98a6dc, 0x90c571ee, 0xe18532df,
- 0xcdf6cfac, 0x7d99fff2, 0x1bed4c9b, 0x691cb5c3, 0x512b46df, 0x2c7fadf6,
- 0x56b2b6fb, 0x58b80fed, 0x3fab37b9, 0x6be575c8, 0xb2c5fa4b, 0xd8faaf6f,
- 0xbe35fdfc, 0x2073bb0f, 0x23a36fb5, 0x6a40dbec, 0x6dc462df, 0xffcd15d3,
- 0x59bec5ec, 0xeff477fe, 0x316fb055, 0xd1523bfa, 0xe6a2b7db, 0x456fb45a,
- 0x7edd4503, 0xcf852ca8, 0x6fb47ae7, 0x1b367f0b, 0x16cbb2f3, 0x57c03f03,
- 0x71af6fb0, 0x80ed073b, 0x38a45b9d, 0xdabefdb1, 0xdabed2b9, 0x3e25ffb9,
- 0xe56e766b, 0x239e7620, 0xcecc871a, 0x7664ccad, 0x665ee56e, 0x630e56e7,
- 0xdf68ce76, 0x1beca20a, 0x047abefb, 0x8be71efd, 0x83b8bf99, 0x95cf1796,
- 0x7efa165d, 0x5f9daab1, 0x691f19a8, 0x122916ef, 0xdeca39f2, 0x39e1ceb9,
- 0xddecde90, 0x86ef605b, 0x0a1b1da2, 0xc7c4647a, 0xba6d430d, 0xbe4772fd,
- 0x4bf5ff68, 0x107f2fa0, 0xbefd9e71, 0xf68bcd89, 0x163ed17d, 0xa7376ef4,
- 0xc98551e7, 0x47e7c3b3, 0x24753a7b, 0x43aaf7d3, 0x544e3871, 0xbfac0937,
- 0x15010bf7, 0x5dbf81c6, 0x9df2f9c2, 0x797cd97b, 0x83f1998e, 0xcec89bfc,
- 0x2c16505d, 0xdcc3c08c, 0xd718154b, 0x0b112b9c, 0x0e0baff8, 0xfc0a70dd,
- 0xd69705d6, 0x00bbbfa3, 0x89bec39e, 0x32eb6ded, 0x2e77bb68, 0xa1de7017,
- 0x1798efed, 0x3f7a97b6, 0xec737e76, 0x38531a7d, 0x13ee533d, 0xa72fb002,
- 0x2f107db7, 0x239bfd72, 0xc8bf21b6, 0xf8cc625b, 0x985ef6a4, 0x97c62e4f,
- 0x6fcc55aa, 0xf289f30e, 0xf9a2154d, 0xbc5d2544, 0x2f9bb530, 0xfda4ee77,
- 0xbfb9e94d, 0xf7a2df1a, 0x3e71b8e1, 0xd7bf80b3, 0x3e341f13, 0xf39ff547,
- 0xb8a9fa9d, 0x8fc8c79f, 0x5ccecd46, 0x5e814643, 0x3cf26fbe, 0xf4701e3a,
- 0x942f949f, 0x0de6dbce, 0xd9e79dd3, 0x9372f9c5, 0x54b4d8e7, 0x6a48f815,
- 0x97b76700, 0x4a903f6f, 0x3b8b77fb, 0xe6420733, 0x78a6ffb3, 0x62d0fe20,
- 0xa83b42ed, 0xcccd30e1, 0x8e57f870, 0x9c0323c3, 0x09bc70d3, 0xafd44e0a,
- 0xf1cec190, 0xca5e647d, 0x6f5fd067, 0x144f8f90, 0x859fa09f, 0x720578da,
- 0xa9bcffa1, 0x5bc5c999, 0x6e5ca023, 0x81075d26, 0x8229d5ef, 0x2aab442b,
- 0x21ff6e0c, 0xd57ab7ec, 0x9e839f4a, 0xcdae0b97, 0xf4f61d8c, 0x963898d4,
- 0x3718e162, 0x9b8c4609, 0x24bde00b, 0xc668b7d8, 0x1efceff6, 0x3f4647b3,
- 0xf5b98a65, 0xe533d008, 0x09b264df, 0x7fe8061e, 0xfa303811, 0xf07a5131,
- 0xa4fdf735, 0x2afdfb72, 0xfc0ec1c8, 0x1fc052ee, 0xf8d886f2, 0x257b95a3,
- 0x4df21a75, 0x8e904393, 0x1c98e32f, 0xc8dfa960, 0x3cb45be7, 0x6fde1fe0,
- 0x7ef86416, 0x324f9506, 0xfa6a9a2d, 0x7786a0e2, 0x2fcd1ab3, 0x47a42788,
- 0x6fb00dd8, 0xd596eb91, 0x4f91b7c1, 0xeea2ac37, 0x27cd1a99, 0x07603e4d,
- 0x28afc72f, 0x8fee09fd, 0x9bea7fb9, 0x599a4fea, 0xacfb3faf, 0x868faf5d,
- 0x015eda38, 0x4e690aed, 0xf20fc5d4, 0xd65e6ccc, 0xce20f562, 0x5d935ebb,
- 0xfb68d59b, 0x0b971573, 0xcf2257cc, 0x0e854def, 0xd3b1bac1, 0xfad13e4a,
- 0x2ce1843d, 0xd5783c72, 0xf5f941ea, 0x5e04ff54, 0x98fe4f7f, 0xb40eff96,
- 0x5516fb07, 0xf1c67b7d, 0xf5278b48, 0x6669afd6, 0x69be3f66, 0xbe76b4bf,
- 0x9783baee, 0x4dfc62b4, 0x2cd87f5b, 0xc32e7e7a, 0xd048b8fc, 0xca5073ad,
- 0x9ff2fd71, 0xe558ff50, 0xfc43f532, 0xfcf94428, 0x31a7b6ea, 0xf9f67d5e,
- 0xfe833763, 0x48adf8ac, 0x3a9f542c, 0x1093c5b6, 0x80a207db, 0x24d1509f,
- 0x91167ae2, 0x2333b942, 0xd6420cfc, 0x14bc7e30, 0x62a7768f, 0x4c503987,
- 0xf5d8afbf, 0xddc43649, 0xf6601e3a, 0xc73cffcf, 0x1b2dbfa3, 0x242bfbd6,
- 0x946f8eb6, 0x9f1cbdcf, 0xe6db7667, 0x1a16fd82, 0x8ad779d8, 0x39b239c6,
- 0x6550ce22, 0x7b5c5996, 0x59f70db7, 0x70139ffb, 0x17d0329f, 0xdb52ce79,
- 0x39e679ff, 0x822b9766, 0xcdce0072, 0xef6c3456, 0xc5783880, 0xaffa3351,
- 0x0a7386de, 0x5f53a7f8, 0x23fd017a, 0xc5d4506f, 0x8fe2a341, 0x0cc8620d,
- 0x2aa6b3f1, 0x7f3403b4, 0xb18df30c, 0xc5bdf0e3, 0xb4bc56c9, 0xb29f9777,
- 0xcfcbc570, 0x6cdcf03a, 0xc60756eb, 0x1f2e1b21, 0x378a8fff, 0xd9743e36,
- 0x8e69e378, 0xf5995f8f, 0x21a435eb, 0x9490e319, 0x1892dcbe, 0xee308b71,
- 0x29ecf85f, 0xb33b0f58, 0x014fafe3, 0xb8ff95bd, 0xe07dd4f0, 0x3ab3ed8f,
- 0x3ef6bc61, 0x13d7047f, 0x7376efb4, 0xee78ef3d, 0xe9875c59, 0x05d9a3f8,
- 0x3dec75b5, 0x23d61831, 0x917fb63a, 0x55db710a, 0x3ce3a77b, 0xa9ced56d,
- 0x798c49fd, 0x6e029680, 0x07587d03, 0xa5abca32, 0xd03065a9, 0x1bca9679,
- 0xfc70b65c, 0xc58ab1b8, 0x371e55e3, 0xbd7b16de, 0xdc4e2a2d, 0xeb96f334,
- 0x926efc60, 0x43e92a5d, 0x9530f8bc, 0xc83ee8e3, 0x9a43db6f, 0xbf298fbd,
- 0x2a0ff0b3, 0xf20df7a7, 0xc969acfb, 0xb2849eaf, 0xe31ee4a6, 0xf03ea1c5,
- 0xdbcf5b4d, 0x6c7f7662, 0xf1d9bd06, 0x83cf8c6b, 0x835ce8dc, 0xd9f0bc74,
- 0x9cb38860, 0xc2eebb94, 0xcd7b33fd, 0x62aa2fb8, 0x3fa8fbef, 0xc6df3b1d,
- 0xd7c232f5, 0xf8483ad5, 0xf083ad8f, 0x4b779c39, 0x73338b3c, 0x5a1c43fe,
- 0x1e73e075, 0xd638666f, 0xc53dd0e2, 0x6c2dd39f, 0xceb6913f, 0xcfe5b558,
- 0xf5c4310a, 0xd3903c85, 0x8baecbb1, 0x02707c6a, 0xae44261f, 0xf38ec4a7,
- 0xb8ddd787, 0xe07e40bc, 0x4bd7857f, 0xc4207e68, 0xfbc203cf, 0xe83d8624,
- 0x61d6c4d8, 0xfbd8e43a, 0x95f5b50f, 0x4f418b14, 0x575b7d03, 0xaffe8de9,
- 0x26191fcb, 0xf9a3e39b, 0xd8cbe674, 0x45827c76, 0x61fb76f8, 0xf5a8852a,
- 0xcfac0b7f, 0x60531d37, 0x638da1fe, 0xf0cf7f5a, 0xe799f279, 0xb1ef3c45,
- 0xed05b1ae, 0x545ed1b8, 0x341f1613, 0xd3833bd2, 0xe3641d5b, 0xf11d99c4,
- 0xe7ad3b01, 0x11bb2bcc, 0x8edbd5cf, 0xdf5a7e29, 0x959786c1, 0x52fd88b6,
- 0x22044e30, 0xe2f33fe8, 0x7eb66cfe, 0xc6e5e6c4, 0x76f0e676, 0xdbb1cdbc,
- 0xa73b6ef1, 0xbf85676b, 0x77e76151, 0x69dedf2e, 0xea073dc9, 0x132add3b,
- 0xba7fbfd8, 0xc40a2c51, 0x45927f68, 0x4cf2e2d6, 0x8978e86f, 0xd9e31de7,
- 0x493146f9, 0x557aff41, 0x84d1de7c, 0x15154814, 0x6a6b14e2, 0x35d2fed9,
- 0x81a577df, 0xde24d7bc, 0x756deb86, 0x533afe06, 0xbf10f99c, 0x192b4e70,
- 0xda357007, 0x49b7449b, 0xf8aaff47, 0x1e70fea2, 0xc7f72d7f, 0xf3c2e488,
- 0xb0a3a297, 0x3a23fa08, 0x8c6b4e4d, 0x5ed905f5, 0x7c113c42, 0xf3fa9d91,
- 0xb1cbe492, 0xeda3d42a, 0xb4b4a9a3, 0xa742c41e, 0x4b0ab71b, 0xfcddec79,
- 0x77ec4e59, 0x4f3e36e7, 0x99d51370, 0xc183f6ca, 0x23b9d1b8, 0xc68c9b21,
- 0xe29b890f, 0x0b9fc6d1, 0x07eff6db, 0x41d6b47b, 0xcf5489dc, 0x31555728,
- 0x8e8dce0b, 0x805908b1, 0xfe6f73d3, 0x6d6bd696, 0xd0fe7116, 0x85f6d769,
- 0xcf4113ba, 0xdc11e46c, 0xfe5007ff, 0x4e9bb92a, 0x0240bee3, 0xdf76a4af,
- 0xb45d312b, 0xfce7bce3, 0x10071646, 0x98bfd5c9, 0x2470e2cc, 0xf1f3f400,
- 0xf3a70564, 0x7ef0a43c, 0x3ae78299, 0x72f6bd96, 0x3930886e, 0xb92bf244,
- 0xb4be7f48, 0x86c8eade, 0x77e27975, 0x9939c2b7, 0xbf5f5e32, 0x53b9fd12,
- 0xd07e7eaa, 0x4f5c25d3, 0xf0b4baff, 0x32bfd810, 0xebb452cf, 0xcfeae7fd,
- 0x59f71aa5, 0x2fb0bd21, 0x93ddea8c, 0xabbef442, 0xd6c7e6f2, 0x7a397cc1,
- 0x011915ff, 0x482ac9fb, 0xe7ce0b9c, 0x97d068ac, 0x79bb03ef, 0xd893b015,
- 0xebef172f, 0xc36dbf95, 0xdc79317f, 0x0aebf965, 0xe2dbfbc1, 0xecb2ae31,
- 0x9f68c5b5, 0xfe7a69cc, 0x7f3d555a, 0x7c8c236d, 0xde39f3d4, 0x369be7a5,
- 0xcf89ecff, 0xf3fa7909, 0x4b3e46be, 0x021bc784, 0xb5110e2c, 0x09b82dbf,
- 0xd9f236e9, 0x0b76d7c8, 0x7ce2f75f, 0x96257fa5, 0x81b26654, 0x842974ee,
- 0xa3d4617a, 0x80487b0c, 0x094a0f7f, 0xae57e2d4, 0x69efb478, 0x19423fbc,
- 0x7a06cefb, 0x2ebd0224, 0xd13823d4, 0x46de7607, 0x5bb6703e, 0x909f5841,
- 0x59e9aeaf, 0xfe94f79e, 0xfa22d8fe, 0xefc046d5, 0x762bd468, 0x0489376e,
- 0x9e8264d6, 0x6e309cba, 0x6a2cfa37, 0x9ff70499, 0x286cb510, 0xc4e27bdc,
- 0x1f7cc7f4, 0x8248e74a, 0x431b82fb, 0x3ec15317, 0xeba738fd, 0xde7bb066,
- 0xec04a425, 0x6c71cde0, 0xa798b29e, 0xdc2ab77f, 0xed7b4eae, 0x59bef87a,
- 0x785ce156, 0xcb56f367, 0x4507c830, 0x9bc47fbc, 0xb4157758, 0x332cd1df,
- 0x812ccaba, 0xd8bf1061, 0x1dbb632b, 0x7cf19365, 0x6b6fd865, 0x3a40fb66,
- 0x2c713f9b, 0x1de7b08e, 0x055e9fc1, 0x4c27acf1, 0x0c9ff7b1, 0xfdf89ab7,
- 0xfc70df35, 0x7fda8c6d, 0xf6cadc37, 0x0878fdc7, 0xe97547ed, 0x330e3e79,
- 0xcb96b87d, 0x6f4bf7f9, 0x6ceffec0, 0x47185416, 0x6e3bc50a, 0x39fb451c,
- 0xe3c488f1, 0x392e2c30, 0x9bbfbf8e, 0x1f80d2b8, 0x0223af13, 0x3fc4f3e8,
- 0x35b7dc12, 0xce2966ae, 0xa0eb7eab, 0x7586cf38, 0xb05b8a52, 0xe8ec6773,
- 0x956f8fbf, 0x9c4cb874, 0x798169e6, 0x04a384e6, 0xa384eded, 0x123cf2fa,
- 0x369db110, 0x4f6bf3fd, 0x5605f3ea, 0x49076f8e, 0xf86f7c05, 0x3aba4452,
- 0x01eb88bb, 0xd9676f5a, 0xa6efbc00, 0xda9c22f3, 0x6297e9a5, 0x7e71ac51,
- 0xf281e026, 0xe08509eb, 0x52c4f2f8, 0x5fefc63b, 0x05715a59, 0x7f9feb8c,
- 0xac4573cf, 0x7611e073, 0xafed8129, 0xe2502772, 0x0ad7ae29, 0xe975df8c,
- 0xebf63125, 0x70d6386b, 0x7d39e1ad, 0xd6279c69, 0xad0fc4be, 0x58290fcc,
- 0x6050423a, 0x204fb65e, 0x078ed7c0, 0x373d973c, 0x640f27e3, 0xe0de3d00,
- 0x2db3f405, 0x963b82cd, 0xeacab19f, 0x07bef360, 0x1fb0b5fc, 0x94aa5eef,
- 0xae7b1f60, 0xabce0377, 0x2ba0876e, 0x5ba9677c, 0x9770e788, 0x8866c858,
- 0xe9754b67, 0xee03c6b7, 0x4758cea7, 0xc4b27de9, 0x427e7284, 0xfefd2176,
- 0xbe30422a, 0x03b5e3c0, 0x229b266f, 0xc0ff23cf, 0x95b0ff9a, 0xd5b0fbd6,
- 0xf6497289, 0xdfa004b8, 0xe5ffdff5, 0xbceebd00, 0x0e394664, 0xed1ff3e0,
- 0x4f9cadbb, 0x6bdce3a3, 0x3ca4ff01, 0xe5c3f505, 0xc965a871, 0xe5012f70,
- 0x54758c7b, 0xf24d2fcd, 0xe946db50, 0xf09bb249, 0x76e22e7e, 0xdf6d4eab,
- 0x45efe119, 0x090903e8, 0x69a5f604, 0x166854f6, 0xff14f142, 0x10f62ce3,
- 0xca9233cc, 0x0929bf69, 0xf5616256, 0xc2674ab5, 0x9f71aae7, 0x2e65c53b,
- 0xb8226e1c, 0xd2360daf, 0x9d9afd42, 0xdaf3ecd5, 0x66f68244, 0x605263ed,
- 0xa367ad0d, 0xf88566d4, 0x2d5ba649, 0x4944ad80, 0xbde78a92, 0x7d339507,
- 0x32fd65b4, 0x50afec31, 0x0ff31f65, 0xf5ac4171, 0xfcc38fba, 0xfc00bcf2,
- 0xf5d43eca, 0x2e35b80a, 0x75cf6b2b, 0x79fe82c0, 0x3f706ed8, 0xbb8f90ac,
- 0xb1353f13, 0xeba102fd, 0xb69dbf71, 0x1bb4668e, 0x15de0f54, 0x953eaf41,
- 0x75cf9e08, 0xb572ffd3, 0xf611772d, 0xb9938f83, 0xf4169ac1, 0x2d91c4dd,
- 0x5f6bc780, 0xa238bbc7, 0x3784c8ec, 0xde806b3c, 0x8c3f6a6f, 0xa2131878,
- 0x33e5c8fa, 0x2a7b8552, 0x943ce19c, 0xfcf95ab7, 0x949bdb8e, 0xd1fd71d2,
- 0x3338e5af, 0x6bd357f2, 0xace7bea3, 0xb198b576, 0xc13200d6, 0xba110ed8,
- 0xc0b350e9, 0x8221bd1e, 0x4f99df7f, 0xbae942f1, 0xfc0ef417, 0x19818bf3,
- 0xcd0d9b80, 0x7e82b1df, 0xa3f1e37c, 0xb11d7e99, 0xa227fabf, 0xf6cac3a7,
- 0x0f803b87, 0xf22ffbd5, 0x728423f1, 0xea95cb0e, 0x443c6447, 0xe711bdc3,
- 0x81ac5e9a, 0xf0b2330b, 0xf9011c7c, 0x0a234288, 0x3a364cd6, 0x7f9aaf81,
- 0xcfec26bd, 0x9eb9e226, 0xded64e55, 0xcbf3591a, 0xefe8165d, 0xfe82f8aa,
- 0xf5175b97, 0x97f3a25e, 0x5e30f388, 0x67e70ba9, 0x090a4bde, 0x7e0b7f24,
- 0xb9eb6905, 0xc4a57e3a, 0x0757e0c5, 0xfff8d1e8, 0x6d93fb27, 0x6a83cfec,
- 0x25ae7f77, 0x51b6df9e, 0x4f7b90bb, 0x05c33a7d, 0x89169c39, 0xd95ebe0b,
- 0x51e77e93, 0x2dd706fd, 0x7a0d9f4a, 0xea99b13c, 0x8f5a29b1, 0xde09725d,
- 0x6bdae75b, 0xd4677b43, 0x5fbc8beb, 0x7be31b5e, 0x1199e88d, 0x5371f307,
- 0x5dc060d7, 0x8b924ab6, 0x41d9b129, 0xb6afad91, 0x91c6eb26, 0x8fddf1e8,
- 0x2ff35276, 0xf8d2bf35, 0xe4afd85e, 0xcf1686e3, 0xff148214, 0xc7f7dfb4,
- 0x5f01e679, 0xfcd938d7, 0x829a0e13, 0xab03fcbf, 0xcfa00ee7, 0x0710fb46,
- 0x42231aeb, 0x125e2c99, 0x4f5be9d2, 0xe83365fa, 0xc248634f, 0xc74fcc71,
- 0xcf4261b1, 0x4b8bf4f4, 0xab52f464, 0xc0ca7e02, 0xab1ac25c, 0x7bafb826,
- 0x47bb3660, 0xbb0e6066, 0xe3dff1f7, 0x7dcc7ad8, 0xd0dc6c71, 0x4b581fd2,
- 0xc97e81ee, 0x739bf112, 0x40dd39be, 0xef0fc42a, 0x0ddf738f, 0xb914b0fc,
- 0x4e191fb1, 0xde2812e2, 0x0146a432, 0x552437f6, 0x1b8b02aa, 0x40ee3612,
- 0x28cbeb4a, 0xdc61f356, 0x16b6a0ca, 0xcd1920e2, 0x6cf5cfb1, 0x5dbf9388,
- 0xea29abbc, 0x3886ca3c, 0x541f6fe6, 0x8b937f68, 0x709479d9, 0x0f9286dc,
- 0xbf88edfc, 0xf1db3b0d, 0xf50f5b19, 0x1b3e3227, 0x30205fe7, 0xf3c497e8,
- 0x6488543f, 0x91f9c24d, 0x99df04df, 0xb23f382c, 0xd8235711, 0x58e297cf,
- 0xe09d755c, 0x9ff90f5a, 0xb8165d48, 0xffc3476f, 0xeadf278f, 0x8e2be892,
- 0xc367ceeb, 0xc3ea959c, 0x8eaf20f3, 0xaf87e7c9, 0x0d741ec5, 0x8b737866,
- 0x9aeffdba, 0x4eb282dc, 0x8788566e, 0x78ebda44, 0x1df7e705, 0xe60b9c42,
- 0x05cb1d43, 0xe160a746, 0x7d2403e7, 0x43db869c, 0x27781c6d, 0xb7e9132e,
- 0xbc18ff11, 0xb2e2cc1f, 0xfd1a3d00, 0x86a473e1, 0x73c69e73, 0xe15969a0,
- 0xee3a4db8, 0xe0512d93, 0xbe722f8e, 0x97fe86d9, 0xdff4c1d7, 0x60fe8788,
- 0x6e732e3c, 0x15abf292, 0xae5fdbe4, 0xdb7b0449, 0x0ee3ceff, 0x0f91af90,
- 0xcc7f829d, 0xfadf5841, 0x9f731c83, 0xa5a69688, 0x7f05280d, 0x0d3d2d34,
- 0xbd79ed53, 0x07198a5d, 0xeef5cd3c, 0x9eb5e31b, 0xc3f30c58, 0xb39fd0d3,
- 0x73de779e, 0x6177baa3, 0x8e3ebc7c, 0xd15ebbf1, 0x8026a8f8, 0x5de7759e,
- 0x927eec09, 0x90ff7644, 0x8373c993, 0x492a52cf, 0xd1b9ef91, 0xdc47e91f,
- 0x1181e633, 0xad9320d8, 0xbf166b88, 0x5065ced1, 0x97e4c743, 0xb9fae8c8,
- 0xec294c6f, 0xcc6d919f, 0x30dc5d2e, 0x254bdb1d, 0x5387580f, 0x312d2ebe,
- 0x73ae20b9, 0x1c9fd176, 0x517c1470, 0xe02e6b46, 0x91e888fd, 0xd431e36d,
- 0x65b7f38d, 0x88409573, 0x9ea52a42, 0x9726241a, 0xb8eeb819, 0xff7314d0,
- 0xe6c6ddb3, 0x4e71cddf, 0x06d2d34c, 0x8d8d9697, 0x39440fcc, 0x7aa47869,
- 0xae365e98, 0xc5309cfb, 0x6fb40a47, 0x994ea7d4, 0x40dd6104, 0x9c2987b3,
- 0xd63d9aff, 0xf40521f4, 0x4b01d60c, 0xe4d47da8, 0x9abf8439, 0xfc4af18a,
- 0xfd5f9a13, 0x8e03e602, 0xded00aeb, 0x8f5ffd51, 0xd98232fa, 0xd3d82b6f,
- 0xe7653888, 0xc107f7de, 0xdf87dffa, 0xeb0f10bf, 0xba917f40, 0x9e7ec1e2,
- 0x78ddffbc, 0xe1f43738, 0x715e2cfd, 0x351e6197, 0xfaecc6c1, 0x6c92b6a3,
- 0xdf02d7e8, 0x227ae77f, 0xb3564970, 0x01e58d75, 0x5c9e70d1, 0x554f2f7f,
- 0x9ee1b263, 0x5bd63f1e, 0x32e7efc5, 0xcb92a1ca, 0x4db6f961, 0xed0a864a,
- 0x7eb6dc7b, 0xa08f6e70, 0x14d93cbd, 0x8d11f38f, 0xc6db459e, 0xfa773028,
- 0xaf99b34b, 0x6ccab77f, 0xc317705c, 0xf387967a, 0xc1e748cd, 0x6fac367d,
- 0xfed8cfc9, 0xadb48594, 0x7abed4d5, 0x0b79d99a, 0x7c963f63, 0xa7f616a8,
- 0x90f25bc2, 0x7862f380, 0x4e3a7eff, 0x883f5679, 0x7ec65eeb, 0x31cdef1a,
- 0xe6d55f9e, 0xfcb8cb53, 0xf50788cb, 0x6b5cea2f, 0x369f5b33, 0x1fceffc6,
- 0xff61b2ca, 0x91fe3c6d, 0x79c0264b, 0x86871de8, 0x3eee5c24, 0x1efc33e5,
- 0x1a57b826, 0x823e93d2, 0x654db669, 0xbf3464f7, 0xaae02f7d, 0xef1db83a,
- 0xf6bafb19, 0x94f9aaa6, 0x6b30f603, 0x91fa8776, 0xa43f01ab, 0x97f9d04c,
- 0x282e9bd5, 0xf8cbbe6c, 0x7e7f0770, 0x1b37f407, 0x3fab59f6, 0x7ce54c9a,
- 0x317ca547, 0xbf321ef2, 0xa6ee7f4d, 0xfad5fdaf, 0x2b5bf5a9, 0xd1fbe2af,
- 0xf869df8c, 0x2c78e6cf, 0x6eba52db, 0x9ad16500, 0xf30627bd, 0x4e7868f3,
- 0x1197c347, 0x4948cb3f, 0x8110ba61, 0xb147e693, 0xcb3c9a5f, 0xfef216ea,
- 0xbaf8d3fb, 0x8fc41a5e, 0x118dc37a, 0xdb721d1f, 0x2367d060, 0xb87a3a3e,
- 0x3627e45f, 0xa99427e6, 0x678842ee, 0x9d763751, 0x394fc233, 0xbc1dd529,
- 0x0b6cf40f, 0xf8c479c3, 0xd7b99ccb, 0x57cf1c65, 0xe1fdabc6, 0xdb943d3e,
- 0x6017d844, 0x5a7bc3f6, 0xb73eed3b, 0x7c04a86b, 0xb4f4c22f, 0x7435c4af,
- 0xd2f97768, 0xc0382e27, 0x411cee8f, 0x11d38e30, 0x77f7a7af, 0xf9e91136,
- 0xa543971d, 0x3f8ffad4, 0xd2da8d83, 0xda1dfb8a, 0xa071e4bf, 0xb145cd2f,
- 0xfe27f9fa, 0x86f7e11d, 0x4cf5c5dd, 0xe02c979b, 0x99abbb72, 0xda2f67ad,
- 0x30f28a58, 0x437ad7ee, 0xd0661670, 0x638d3caa, 0x51bf9014, 0xdf5c4b3e,
- 0x51607dc1, 0xf08f2272, 0xdc1e017d, 0x2f5653a7, 0x28c3dc62, 0x93bb7cbf,
- 0x6b57a39d, 0x42b71e70, 0x790ebd8a, 0xb5f4dda5, 0xc7ec63fa, 0xed0126c2,
- 0x65fe91a4, 0x0dd76a5b, 0xd67fd020, 0x847ee8bf, 0xe25da206, 0x55fad4fd,
- 0xff785c46, 0x50fad440, 0x7224f5db, 0xe6f2f53f, 0xbf2fb8c5, 0xb09ff69a,
- 0x85c61238, 0x7a35fa34, 0x1428fd8d, 0x8816c8cf, 0x5c16aee3, 0xefa39f60,
- 0xeeeb7327, 0xb0d85c3f, 0xafc8207f, 0x66fb625d, 0xce02fddb, 0x203f3b0b,
- 0xbd607d6c, 0x3d69eb80, 0xed99afca, 0x8e59ea07, 0xe7dc0a35, 0x8851b657,
- 0xf565c729, 0x11a1aef9, 0x60796a76, 0x8ea86b86, 0x1fdc46de, 0x54a1a2b2,
- 0x9d807cf7, 0xfdb893c7, 0x48df4dee, 0x38cc624d, 0x9a5fa693, 0x97fa69be,
- 0x3cd9576f, 0xcc1c0a47, 0xfebb04c8, 0xa84bce18, 0xcedabe5d, 0xd1e607d7,
- 0xb89faa57, 0x581f5ba0, 0xb3e4c792, 0xe7c62c44, 0xca57d93d, 0x1e7d9d07,
- 0x7fefd767, 0xd1fdb6f8, 0x85e9a1fa, 0x5ea33fd6, 0xc5aff918, 0x30728cd5,
- 0x82e81ff2, 0x7b1bc1f9, 0x0407816d, 0xa74677d0, 0xe915af7f, 0x702b48ec,
- 0x3fc839c1, 0xce9407c0, 0x31dbf4ea, 0x9496235f, 0xc75e71da, 0xfae34de7,
- 0xe1c83f16, 0xab4ed67e, 0x473b2c7b, 0xb80c9254, 0x36db5298, 0xf8a4e2d3,
- 0x2174647c, 0xe53a2bdc, 0x1099b6b1, 0xc8f25efa, 0x5fb84298, 0xb6d6ca7e,
- 0xea2e2d77, 0x2d8ed5d7, 0x57e5fb84, 0xf680dd80, 0x10db9607, 0x89e53d57,
- 0x9579f2e7, 0x5cce3fbd, 0xcbdd9f32, 0x292bcacc, 0x3c742d8f, 0xfc3c3cab,
- 0x959fa0f7, 0x1045c5ad, 0x8884a9b7, 0xaacf1d37, 0x2d908edc, 0x93e78d9b,
- 0x6cdb7be8, 0xf076559e, 0xc5ad2eeb, 0xb0f2adf1, 0x0e44fa84, 0x44a8b8b1,
- 0xdfa56afb, 0x7ea38dad, 0xdaefe52f, 0xe60b8b24, 0x1fb9f4af, 0x936bb5e6,
- 0xf11aaadc, 0x7c1f935c, 0x35ac7407, 0xfdfb75b1, 0x684f0daf, 0xbf78ccbb,
- 0xc32647df, 0x67c5a43c, 0x9e58d4c1, 0x7cf138af, 0xc589be44, 0xbfe58c35,
- 0x512cecd2, 0xd5170033, 0xaf161487, 0x4fe07fa6, 0x17a61156, 0xeb9d84d5,
- 0xf04c3aaa, 0xa1f54b1c, 0x02c73c42, 0xd63ffcfb, 0x735ff0e1, 0x571ba58e,
- 0x92054f8c, 0xc08a5930, 0x55bb34f5, 0x9bfb1797, 0x887f4046, 0x7382e718,
- 0xa9866218, 0xbfed0766, 0x294d0578, 0x4ba7243f, 0xea5afb84, 0xe1114ea6,
- 0x3235a9be, 0x62623fdf, 0xf6b5cce7, 0xb47f389a, 0x64bdb4f4, 0xecfdc807,
- 0x36b9ad5b, 0xf3817eb6, 0x6bbfad61, 0x60113704, 0xe08075de, 0x149c80f9,
- 0xb3fc02d2, 0xe01e7711, 0x127b3ded, 0x375b37e7, 0x90197fa6, 0x97a6ccba,
- 0x57f78a55, 0xfa84591f, 0xdd304ae0, 0x6ff34993, 0x7cb197d9, 0x2642e986,
- 0xf6dbf085, 0xc6bf9672, 0xcd086174, 0xce5f6bbf, 0x49c0b0f2, 0xf623b607,
- 0x48ff428a, 0x7de38768, 0xc01afb9b, 0xf16264b7, 0x0515e917, 0x7db453d7,
- 0x44e96b21, 0x1ef8f00b, 0xa7884dd8, 0xdafc16d4, 0x02eaf106, 0x3dfb0ab7,
- 0x837bf336, 0xf1aa69be, 0x7099b455, 0xf02903fe, 0x4fce43ff, 0xc9a42e4d,
- 0x8bec0d7d, 0x6bee8d25, 0xb4ed1d60, 0xf606d3dd, 0xad1bec66, 0x377ec053,
- 0x5ca7f3bd, 0x0afc9fbc, 0x37d8cf56, 0xf6909ce8, 0x066fece7, 0x8f5c8a6d,
- 0xffcf6d3e, 0x62a3be14, 0x5054ff40, 0xaa0ee49d, 0x44dc6b87, 0xbe60b255,
- 0x362f9fac, 0xb69f6611, 0x82ce4ad9, 0x10c0c95c, 0xf6455b8e, 0x6bc84e92,
- 0x26b49cf6, 0x9b5f69f5, 0x845abd92, 0x23293971, 0xd8117478, 0x7bb1dfff,
- 0x1bb7043d, 0x14be73f7, 0xed8cd7be, 0xfec1b10f, 0x177fd010, 0xf2a7f349,
- 0x2f8b17f8, 0x88dc9493, 0x07ef3079, 0xec1a7efd, 0x63609f37, 0xbf6807fb,
- 0x63abb044, 0x58e98a88, 0x81656788, 0xdf983d28, 0xde8f1a43, 0xdc7feb47,
- 0x9649e2ca, 0x0f8ba206, 0x38b3c766, 0x8b074ccb, 0x8dc9c60f, 0x10ff82b7,
- 0xc1388e77, 0xa5fce87f, 0x22fee122, 0xae2379c9, 0x11341d8f, 0x3959f7a0,
- 0x99f584e0, 0x7524abae, 0xafaee933, 0xab5bcb93, 0xbd4571f7, 0x7372e20a,
- 0x7d3fcc1d, 0x7ee09f36, 0x4a53aee8, 0x934dd600, 0x91352c32, 0xf92b9ff1,
- 0x0b390f77, 0xfe21e332, 0x408e37bc, 0xc5f613fb, 0xbf0e4099, 0x056396de,
- 0x3c04bc3c, 0x32487ca2, 0xfa86ef3b, 0x7a502e92, 0xdb39f133, 0x63a92f27,
- 0x277f2812, 0xe15760dd, 0xad67a9be, 0x7ee0378d, 0x784c17d0, 0x8a4beebf,
- 0x2ff214ab, 0x609d579d, 0x6ccd9f7f, 0xbcfec38c, 0xbcfec260, 0xf575d714,
- 0xc3f74a4a, 0x4a7c5823, 0xec0911b2, 0x502a64b2, 0x73cfa6ef, 0x493cd9e3,
- 0xe3cbd3b3, 0x2e3cfe99, 0x200f3c10, 0x7f51f299, 0x9d3afdb4, 0xebf7045d,
- 0x2ad59bf4, 0x4ae38e02, 0x4851b8b5, 0xe4cf7884, 0xdb7e8212, 0xe78cbb64,
- 0x307f6072, 0xc29b1b9c, 0x0f94efed, 0x813d712a, 0xdc30e04a, 0xe762d97f,
- 0x75f168f7, 0x4e39ebc5, 0x226d23ae, 0x5dd791f8, 0x07882e22, 0x8a497f5d,
- 0xe74e47e9, 0x10233ef5, 0x97be8129, 0x0095517f, 0xef44f977, 0x2faf4de7,
- 0xe0e3053c, 0x39e18b54, 0x8f4889f8, 0x4ff7cf1b, 0x4fee1a77, 0xd6f3e78e,
- 0xe47064c1, 0x1d6d313e, 0xf238ba68, 0x8b53f409, 0x65f01714, 0x3efdddbb,
- 0x5bfa7bac, 0x38b97265, 0xb63e33e2, 0xa5c46f9f, 0xa1ff5c38, 0x2e03b4f8,
- 0xf3afaeae, 0xafbf695b, 0x07eaca1e, 0xb95d821c, 0xc92997f2, 0xf1cde760,
- 0xa968decc, 0x97c03b86, 0xcbe18133, 0x3f5bba39, 0xe267c557, 0x01be9a7b,
- 0x3d9ea0ee, 0xac7e6072, 0xd78a6575, 0xedd78055, 0xb3f38276, 0xe65ba51f,
- 0xed699e82, 0xf86789ec, 0x6aeba637, 0x7587ee57, 0x6c67ff1d, 0xe03698bc,
- 0xde391292, 0x3864e87f, 0x3375aaf3, 0xf3fda66f, 0xbf5c65da, 0x8be7e549,
- 0x1e92f122, 0x770d1781, 0xef16761d, 0x7533f0d1, 0x3c22bbf7, 0x2119fa9f,
- 0x9e124cce, 0x99f86887, 0x9c3867e2, 0x138d8872, 0xddc7e1bd, 0xe6069ce5,
- 0x87370a31, 0x280a6e18, 0xb7f7f03e, 0x118fc07a, 0x8af54f37, 0x3cf12f9b,
- 0x53bdaafc, 0x186ebb2f, 0x7df7e9f4, 0xb689d31a, 0x8f82fbd8, 0x56ef7e72,
- 0x06cc6eea, 0x8993717b, 0x48937f9d, 0xe714adc6, 0x7bb4e3bb, 0x20b26be5,
- 0xf6625dbf, 0x19b37b67, 0xbb76cfed, 0xd77183c7, 0xcb9e0b34, 0x0da79226,
- 0x06bbf5f6, 0xdfc8d458, 0xc72ef8d1, 0x9a1ca361, 0xeffc0278, 0xf3fc98d7,
- 0x1b81ca6e, 0x197a48ec, 0x93b8c393, 0xe21126ca, 0xfdfc8e42, 0xe04faf80,
- 0xc3c02d4e, 0x25ef0571, 0xa0ae3eaa, 0xbecdfc5d, 0xbe210bb7, 0x9e1feda9,
- 0xc2ede7b0, 0x3ef09182, 0xdc6bd9c3, 0x3c796aae, 0xd276014a, 0xc4fd2f70,
- 0xfd2679f0, 0x8025f2bd, 0x72eeec9f, 0x73eb7de3, 0x1d1784e0, 0x736d6e77,
- 0x60b3074d, 0x4e7d6fbc, 0x9b1f9d88, 0x1d3dd47d, 0x7dd4654c, 0xb70f1ceb,
- 0x715dd691, 0xdffdf462, 0x163e7c74, 0xd7fbf10f, 0xc1c40a43, 0x6b9a951b,
- 0x26a27cd8, 0xa68fec0b, 0xf947f877, 0x08e1f8d6, 0xc3ff7ddd, 0x88f3f8f8,
- 0x38f7624b, 0x4eb829b5, 0xdf6128f8, 0xeb71f235, 0x39f7ae2b, 0x0f9c5eab,
- 0xb96c70f6, 0x576dd71b, 0xfe7c4539, 0x64b21213, 0x125cbc81, 0xca3477ae,
- 0x4213d4ea, 0x2a447bf7, 0xe5a76119, 0x8dc5fa7b, 0x70eff685, 0xf00e4c78,
- 0x43aea42b, 0x4984ddf0, 0x6ec16339, 0x4dacc7db, 0xa34c7186, 0xb455d29b,
- 0x874dded5, 0xc7275746, 0x858e1dc9, 0xf678c726, 0xe0a1d81d, 0x7c0b76b1,
- 0x71a0e1af, 0xff2639ef, 0xfc803c08, 0x2c20f1bb, 0x07c6bc80, 0xfcb54f1b,
- 0xa10f1f3f, 0x1e9f2081, 0x087100f1, 0x43c2b7c7, 0xc355f016, 0xaed7f503,
- 0xfe68fe02, 0xf6c2a35a, 0x0d796a1d, 0x7cf64507, 0x10a5cf7c, 0xf6443fdc,
- 0x6d39d959, 0x57ae224f, 0x6d64ecbc, 0x3ed87e74, 0x6498ba98, 0xebf983b2,
- 0x8c89efda, 0x57e80909, 0xe106a18e, 0x06e3f40e, 0x71743da2, 0x4176917e,
- 0x71377b80, 0xf806796f, 0xd064a197, 0xe2e6318b, 0x877fe010, 0xf8064a6d,
- 0xc9b63987, 0x5daf8059, 0x0ebd5623, 0x01aed643, 0x516f2fe5, 0xcd27fdbb,
- 0x6af0b5ee, 0x746ff6b5, 0xbd74fb30, 0x020f9d9b, 0x87491dbb, 0x2c41ff66,
- 0x9a108740, 0x86cafa63, 0x0db9baf6, 0x41d039ec, 0x205fbaf9, 0xe41fe04d,
- 0x5329dc9b, 0x17ebcfc1, 0xcece8092, 0x860c5fbd, 0xf3716538, 0xde8147ab,
- 0xd43d6d62, 0x350f5fa5, 0xbf9ad1fa, 0x342bcda3, 0xcf9b487f, 0xe504df82,
- 0xd3b9fcd9, 0x26add6cc, 0x6e782c87, 0xeaf3f1f9, 0xbfae6e55, 0x3147ed12,
- 0xa42dc7ef, 0xeed2e915, 0xf399b91d, 0x9d10f8a3, 0x39436687, 0x3e513721,
- 0x1defc4dc, 0xbfa5c9b9, 0xbc96e157, 0x688ef7d8, 0xbef6f4f5, 0xd6b87c71,
- 0x6df46eb0, 0x763e43d3, 0xfa7fc8cf, 0xd2773cd8, 0xc12fd110, 0xce979a80,
- 0x327069ee, 0x62521c3e, 0xb9fb578f, 0xb5324efe, 0xe5b9c365, 0x8efe7bfd,
- 0x7b2be3e3, 0xf81efefe, 0x494f1389, 0x4eb5f604, 0x012e353a, 0xd2f3e7d7,
- 0xab593d70, 0x9369fbd7, 0x88e7b08d, 0x5777ed1f, 0xdc7a520e, 0xc167d00a,
- 0x1e67b7f4, 0x77d429ff, 0x2244b8f1, 0x2a144dcf, 0xc710a19e, 0xbfbfe42d,
- 0xfe605073, 0x907e5692, 0xf3c5eeff, 0x21b0b71c, 0xc48fbb42, 0x9be7211c,
- 0x9a193df8, 0x593a6f5c, 0x8eca4a74, 0xfdfc0f96, 0x5a0be233, 0xf399a15c,
- 0x11fcb48f, 0xa77cb7cf, 0xfc096ee9, 0x89a4eff3, 0xeed11dc4, 0xe0ce950c,
- 0x2f1ce223, 0x4a7e0c3b, 0xbd3f73cb, 0xde2b8e99, 0x4adc4437, 0xfede6de8,
- 0x5be4367a, 0x8ff798c5, 0x9dfbf918, 0xfef1bbe6, 0x2a092191, 0xfb5bd3d2,
- 0x38a6ae93, 0x27720d19, 0x65cbcfcc, 0xe51877f7, 0xff9ecafb, 0xd91b6e48,
- 0x927bbf33, 0xbf0e51bf, 0xeb80533d, 0xc3c9bd4e, 0xb47ddb88, 0xbfda7e9f,
- 0x9fa7ed10, 0xf8af63fa, 0xbdff989c, 0x1af30d2f, 0x9f7bbd5e, 0x4e7f064e,
- 0x64f4e9c3, 0xbb899b86, 0xec4c9a7f, 0xf232f2dd, 0xf30773ed, 0xd710e66b,
- 0x3fe99bec, 0x78ae0a77, 0xd27e17b4, 0x4ab3fb0a, 0x0e738d38, 0x41ffa217,
- 0xe31079f8, 0x1c473638, 0x7bdee789, 0xa19bddfd, 0x5b9832f9, 0xbf0578e3,
- 0x36b802c7, 0x6f45538e, 0xb2e049d4, 0x98ab4a15, 0xba7ee31d, 0xce519746,
- 0xf6094847, 0x8d3a27a3, 0xabd23038, 0xefdae7f9, 0x4c7e50c0, 0xddf5a24f,
- 0xff63e6aa, 0xce61c6a2, 0xdfc69e97, 0x9c95fa9e, 0x1cdee3c0, 0x4e4e17ff,
- 0xdbd80b7b, 0xe519be93, 0xecd72c66, 0x90342147, 0xfc1fa983, 0xa9092191,
- 0x97850fa8, 0xe3a6e214, 0x3fa8190c, 0xaf8a4eff, 0xe3ddbc40, 0xc27acf64,
- 0x1325f55b, 0xe37724f1, 0x663e0e4e, 0x96eddc03, 0x1f106cea, 0xba5ecebd,
- 0xda6f5406, 0xf0b305a7, 0x1336879e, 0xf4d99081, 0x102d73e6, 0x251ef047,
- 0xa0a03e78, 0xc054ff79, 0x56a5efe9, 0x5e75479c, 0x08b1e424, 0xbcd8333f,
- 0x587e06c6, 0x6f9d8530, 0x7448983b, 0x27f790fb, 0x8f06bc30, 0x1dd9bf9f,
- 0x2f12fca3, 0x29dc499f, 0xc72e38d9, 0x70a3fc63, 0x3ffb09bd, 0x481d704d,
- 0x9e65bf01, 0xce85f380, 0xa40e13bd, 0xee3b7a50, 0x333b444c, 0xdde0f83d,
- 0xc6882a4e, 0xcf8b1eff, 0x780a16bb, 0xeaa140cf, 0x59617cc2, 0x98dfee26,
- 0x99e48ff0, 0x433e50c5, 0x74cdf9c6, 0x788252b1, 0x2134d9d4, 0x7b8be607,
- 0xf85ab684, 0x297f5c1d, 0xfd38876b, 0x7cff30c9, 0x0c7938a7, 0xaa9c76fe,
- 0xefe187e5, 0x3cc31203, 0x0bced56d, 0x7cb4cdfb, 0xf7016cd5, 0x66774eef,
- 0x7e047f10, 0x7953d70a, 0x6cb8a58e, 0xb8005fee, 0x3fa4599e, 0xc4bb3837,
- 0xeb910246, 0xef138216, 0xf7c83b21, 0xfc097769, 0xe7ae7c5e, 0xcfbc7037,
- 0xfbac9b99, 0x3f3094ae, 0x6453bbd3, 0xb3e471f7, 0xf4d6a1df, 0x87ae46eb,
- 0xf1f9f037, 0x8e864f3c, 0xd288628e, 0x5b3f4088, 0x62e5be47, 0xa5cf0e1b,
- 0xe78707e2, 0x1c6ef6d3, 0x8ad79861, 0x17116b95, 0x6f1d6c69, 0x1382ec36,
- 0xdfea6ec1, 0x9f4f25bb, 0x97c7a18f, 0x3d9a13b5, 0xc457bd98, 0xf9d9c375,
- 0x8902cf94, 0xaebfd3f2, 0x726098bd, 0xd078801d, 0x78dc97ad, 0xbd944e8f,
- 0x1be8a0f8, 0x50df8bd9, 0x7aa3cc1d, 0x1f0264fd, 0x41910737, 0x8ba63760,
- 0x380a7edb, 0xa6e3a4ea, 0xb48e0639, 0xef9b4771, 0x063d5217, 0x6c3390dc,
- 0xa5fa659d, 0xa4efdc34, 0x76c3f7cd, 0xae25fa84, 0x6fd2fe18, 0xf774b212,
- 0x4aed847a, 0xe7225f84, 0x64bf7b7e, 0x9547f53a, 0x206fa01c, 0x7b458ff1,
- 0x42c97186, 0xf10518d8, 0x1873b4c1, 0x744af1e7, 0xf3e38afe, 0xb02f88d3,
- 0xb752427f, 0x15fd187e, 0xd63cce2d, 0x7a53f9a9, 0x8de3849c, 0xa57dd8b3,
- 0x810a4e1a, 0xb3ffa09f, 0xf4eb8c44, 0xfdad1b86, 0xa212f0de, 0x564c08f6,
- 0xfae1788f, 0xf2f175f7, 0x197c6d17, 0x05f6864a, 0x343e1a27, 0x9bc718bc,
- 0x83c434ff, 0xba6f1a7a, 0x0e6f197a, 0xaed1908c, 0x7cd7e6f7, 0xeaf3e4ea,
- 0x37a62704, 0x7f1b355c, 0xd82c6897, 0xf937f011, 0x6b8e15b8, 0x1c9c33d5,
- 0xc1245b68, 0xec1791fa, 0x1cb76647, 0xfff078b4, 0x81c4c600, 0x008000d5,
- 0x00000000, 0x00088b1f, 0x00000000, 0x7cbdff00, 0xd5547c0b, 0x67b7efb9,
- 0x332479ef, 0x49926649, 0x1d843c32, 0x09212108, 0x11bc210e, 0x11084937,
- 0x88a80ca2, 0x1f01d68f, 0x4d092060, 0x6f53d5ad, 0x52812133, 0xbd583d6c,
- 0xf4f47bd6, 0x41ed5837, 0x0108750d, 0x26702783, 0xd0f098a0, 0x7ac0f820,
- 0x452968da, 0x5a18921b, 0xf5cf6a0f, 0xf6b7df7c, 0x8326664e, 0x73def7a5,
- 0xd62e9f87, 0xf5af6b5e, 0x5ff9efad, 0x4a9b5adf, 0x500a9fc0, 0x982015b3,
- 0xe1f95006, 0x06484a86, 0x01203be8, 0x96d40314, 0xfe1b41c9, 0xb1fdb4b5,
- 0x0cdd45c7, 0x51df8956, 0xf5c02661, 0x950b7e20, 0x06e6c50e, 0xbc95e658,
- 0xd6801672, 0xe1bfde2e, 0x62a82592, 0xf9af7afd, 0xe89b1ee3, 0x5fff35fb,
- 0xb800c803, 0x197f7f51, 0x2ffb0ffd, 0x1860a4d3, 0x3b365d5f, 0x978dffba,
- 0xa6913fca, 0x62d72c02, 0x3e4fc39e, 0x22f2a793, 0xac24becc, 0x8fb74457,
- 0xcfb92673, 0xe32ff1d9, 0x32d7fc61, 0x65f3c5c0, 0xaff7516f, 0xc8ed77e9,
- 0x523d48f2, 0xace80166, 0xdb1971b7, 0x7f016e5e, 0x4d2581c8, 0x0065c450,
- 0x9d0a0152, 0x581998b8, 0x0d0164bf, 0x136308f9, 0x3ab9f7e0, 0xdf86131d,
- 0x07e3883f, 0x8e30dc70, 0x3801c81f, 0xfae4ef54, 0xae1ef56b, 0x2f82935f,
- 0x877fe3a4, 0xb8a8b0bf, 0xdfedc59e, 0xdf38fc51, 0x8f0a4be2, 0x93d69b3b,
- 0x71830b4f, 0xf73f9eb4, 0x0de3fd16, 0xb6bfdc30, 0xbf8b33fd, 0xf9ebf2f0,
- 0xa7e7f35b, 0xcd3f3ca8, 0xcf0c15ab, 0x180144bb, 0x9a5ceb83, 0x635fcd1b,
- 0xc6f9589e, 0xfeb96fca, 0x56fcac7e, 0x90c7acc4, 0x32a2f40f, 0xffc38a8d,
- 0xa6d0264b, 0x437e5e5f, 0xb3f8b1f2, 0xdb2305bd, 0x72c97e01, 0x974f1e1a,
- 0xbe2b7417, 0x19974142, 0xb752baf5, 0x40efd40b, 0xb2b9ca0a, 0x4889c213,
- 0x2be7f5d8, 0xf4a8513e, 0x9471f04a, 0x6c7c033e, 0x1e1e5bc8, 0xe425d17f,
- 0xbcf0da6b, 0x7764148b, 0x0b1f3d11, 0xa8eb3881, 0xea1471a1, 0x6c39601f,
- 0x155a61d9, 0x635c75c2, 0x547d3b81, 0xe0328a26, 0x937db894, 0xf6e3c4d3,
- 0x8266c5ff, 0x3e5e01ab, 0x48025998, 0x3cead79f, 0x42011306, 0xbff7e066,
- 0xdfb9524e, 0x240824e6, 0x74a0fce0, 0x945ec8db, 0xb45c8012, 0x25b4653b,
- 0x364388d8, 0xd91f612b, 0x8157bf61, 0x2fdbb406, 0x050ef78d, 0xc914076e,
- 0x4c738fe6, 0x7ef4598d, 0xf61e7bb8, 0xa8a4559b, 0xcb8bb0db, 0xda877eaa,
- 0x9d580efb, 0xfeff5ef4, 0x699877ef, 0x1780bf9c, 0x0ed78429, 0xe7da8158,
- 0xf241cc3b, 0xe3e48b1f, 0xfdeb09b2, 0xec560003, 0xb8557f3f, 0xd76e5014,
- 0x13a37740, 0xc724b9cb, 0x8d7bf238, 0xfece9bd6, 0xe50a31c3, 0xd39740b4,
- 0x956fdd02, 0x8fd9901e, 0x41c92694, 0x53f61ac8, 0x5a32ead7, 0x3a256c80,
- 0x7ae1b416, 0x4bdafdb1, 0xb4bf5c4a, 0xe1771f48, 0x9321419f, 0x7ad1085c,
- 0xf84da8bb, 0x14d012a3, 0x6d4b29c0, 0x97e4ed31, 0xce459e90, 0xc973e7e8,
- 0xef5ea277, 0xf84bf18e, 0xf7bcd133, 0x8bbcc53b, 0xf726bfe4, 0xb65ffae0,
- 0x43cdef68, 0x9a4e077a, 0x0e7c59e1, 0x6732b1e7, 0xd0da5f24, 0x973983ae,
- 0x07fd8bb7, 0xe9153513, 0xc7f49979, 0x9ad1f541, 0xfdf2f53a, 0x940cf6a7,
- 0xb9787760, 0x13028579, 0x7b4f1d58, 0x3df98632, 0xff3aaf82, 0x22d9f963,
- 0xf91d9341, 0x5bddb0bc, 0x0465ae46, 0x8f913b1f, 0x80c95ef8, 0x295cf3c6,
- 0x78fb0d1f, 0x46a25cf9, 0xb9f2fefa, 0xffcd1ad1, 0xfa6be92a, 0x3feffeb0,
- 0xfe48983b, 0x2aa8f913, 0x2e0317a8, 0xd659f79b, 0xa1f7a15f, 0x6dc13951,
- 0xf5d16f58, 0x81c4e5fc, 0xefc4f9a3, 0x67de4deb, 0xe3651a69, 0xe48515c4,
- 0x9c96b737, 0x0d2477c5, 0x7ed107a4, 0x7709d524, 0xfb401960, 0xa40b7d21,
- 0x1993eb02, 0x36f34042, 0x9534f06a, 0x29abe60b, 0x1c41efc4, 0xe341dbb3,
- 0xe7a429a9, 0x338bd7f0, 0xf923857d, 0x0a8f52de, 0xd6b4f385, 0x1b2534db,
- 0x93bd5f60, 0x1ef56deb, 0x57ad5f2e, 0x2c74dcb9, 0x8e20c1f9, 0x3459ab53,
- 0x7df86d25, 0x8fb0aa57, 0x8867e805, 0x052005ae, 0x0b73a803, 0x5ef5d61e,
- 0x249b8291, 0x27ae1392, 0x97289f82, 0x08dc9ec1, 0x4d2936fe, 0x35b26693,
- 0xf28bbcb0, 0x063efc4d, 0x59f3e0ff, 0xdca3fb23, 0x4baaeaa3, 0xbae67e62,
- 0x961724b4, 0x52f8743f, 0x6e916ca8, 0xb8017c39, 0x5bf512ee, 0xfe380be5,
- 0x7842f0f4, 0x74c3e310, 0x15f10c14, 0xfd13b77f, 0x402c3a33, 0xca80c1f2,
- 0xd3a45976, 0x492e7da3, 0x323e70e7, 0xe0c7af9d, 0x30cad67b, 0xe83ed1e9,
- 0x17be2349, 0xb4767cdb, 0xc843a428, 0xf66e9355, 0x9cde3848, 0x26578b74,
- 0xa6fa26d2, 0x75fe446f, 0xd884e3a4, 0x6323a3bf, 0xa6f68aac, 0x4fbc6f29,
- 0x1cd9ad76, 0xcc2b8161, 0x71113dbd, 0x719e133c, 0x9f22bc52, 0xbdf235f6,
- 0x01c977f3, 0x73886bf6, 0x9e758fc8, 0x7186f64a, 0xd92e6b9e, 0x7fd256bc,
- 0xf5ed117a, 0x54f26260, 0x5dd0cff2, 0xd6b8ebc6, 0x6f91d626, 0xd8bb17f6,
- 0x9f0f859f, 0x6dfef7c0, 0xe9fc8010, 0xca1fdd35, 0x9a94f480, 0x907b32f2,
- 0xe4d2c3ce, 0xfc9f2a14, 0x9701725d, 0x897b92a7, 0x764d073f, 0xa77de695,
- 0x46ce2d5e, 0x72ded17d, 0xd61d0098, 0x0a1cb782, 0xa1ea2795, 0xe5f51b38,
- 0x971b7aad, 0x5c9deb27, 0x70f7aa9e, 0x49db1879, 0x76c75e9f, 0xb037b7d2,
- 0xd85dea9d, 0x1c31e54e, 0xdba2ca97, 0x96bc7893, 0xb42c92db, 0xd0c9838e,
- 0xfa4fcc6c, 0xb70c4e14, 0x3fe01782, 0xab6afb1e, 0x978c2aed, 0x34a17f56,
- 0xb3f4f90d, 0xac142aaf, 0xee3c5b1f, 0xbb60bfa1, 0x1fa667f9, 0x9bde90db,
- 0x8f5a394e, 0xaa9edd63, 0xfa41d8fc, 0x9d112c79, 0xb9b59923, 0xa63a434b,
- 0xcd1c6f93, 0x3660d76c, 0x7af4d3d6, 0xd95d7cc0, 0xa595807c, 0x42e0f93d,
- 0xaf597125, 0x577f21b3, 0xa3dffdda, 0xabb025ba, 0xaa2aeca2, 0x7b248863,
- 0xb641cc97, 0x7b92b1f7, 0x95bbc605, 0x127fff0e, 0xd17b0dbf, 0x91a43fa4,
- 0x49a9987f, 0xbcb372e0, 0xe1a49383, 0x95bf46e7, 0x49ff458e, 0x25debafa,
- 0x98cd7db3, 0xc741692f, 0xfa35fb48, 0x96ab38cd, 0x9e66fc91, 0x72a26f2f,
- 0xeb8b8559, 0x9ab355be, 0x1ccbfec8, 0x94bf7b97, 0x65d07fdf, 0x512dbd66,
- 0x389c5fb0, 0x8e9bf468, 0x7bb8464a, 0xf8312ba0, 0x69f5e92c, 0xd612637f,
- 0x8ab27917, 0x0337dd64, 0xb63b5dd7, 0xebe57b51, 0xde576cad, 0xf2e12f24,
- 0x6990a899, 0x21d0bed8, 0x2a64c379, 0x4a6a3ff3, 0xe4a776a3, 0xd42db625,
- 0xa6289da6, 0xbbd96cf6, 0xd5a7ea90, 0xa7510a32, 0x2ef38594, 0xb9c5bf60,
- 0xbe1b5e29, 0xd0fc2a77, 0x85dfd245, 0x04de49b9, 0xf04af3f2, 0x7baa1ae9,
- 0x9506dead, 0x6958f1d6, 0x4aa38d32, 0x5bd6135e, 0x1ffd286e, 0x2f894744,
- 0xda892405, 0xd7e10a5d, 0xc3f77f27, 0x6bf62732, 0xd9012719, 0xde70a9cf,
- 0x85fe889f, 0x026a899d, 0xb38b69f9, 0xb16bde8c, 0x5c4dce48, 0xff5295f7,
- 0x7d598e40, 0x7e3dc71e, 0x41e39d9b, 0x8c055be5, 0x36df8e83, 0xe542e386,
- 0xa84bf35b, 0x78126e3c, 0xcaadf2bd, 0x41376e0a, 0xe54a9c98, 0xf7e2dfed,
- 0xb0fe97b6, 0x1fe38dcc, 0xdf62ef1b, 0x562a7282, 0xf624fc22, 0xbfde8907,
- 0x77fb9abd, 0x9d0f9948, 0x0b17fec5, 0x38ac8a52, 0x213bb97f, 0x58c57faa,
- 0x497d5457, 0x2562a8f6, 0x16564fc2, 0x6fd54564, 0xb38a9628, 0xe6fbfa23,
- 0x57d54427, 0xb38ab994, 0x54dffa23, 0xbeaa2a39, 0xa8aca6f2, 0x1cd342fa,
- 0x61ea3fe1, 0xf3b0997e, 0x20d965f9, 0xa9fc898c, 0x037df453, 0x3b26efdb,
- 0xa9f812ef, 0x09f885c8, 0x55d652e5, 0xd8b971cc, 0x00affbae, 0x92667b74,
- 0x2ebf17ff, 0x83de8f1c, 0x4999fe20, 0x16fc4878, 0x201e2376, 0xdc60a47c,
- 0xcca7a08e, 0x2bc71d11, 0xff27cd40, 0x84b9d15b, 0x67b954f1, 0x319a1979,
- 0xbdfa31f2, 0x8161f90a, 0x2ee86243, 0xa5c732ea, 0xf46c62b7, 0xabae06fb,
- 0x316be9d0, 0x09529bf9, 0x6eb4058b, 0x7ac635c2, 0xb72e3a0c, 0x8ef90a9e,
- 0xdfff1432, 0xfd2c575c, 0x7f791b00, 0xa0d74b11, 0x8881c74b, 0x760718a7,
- 0xc43e32a6, 0x3e1e3b9f, 0x254f7030, 0xa45737fa, 0xee48737f, 0xf79785e5,
- 0x97b8c66f, 0xb8c67bc3, 0x2cd0fbc6, 0xfbc6fcf3, 0x2955cf43, 0xd89df3fe,
- 0x73ce293f, 0xf9ff14b7, 0x17fdd173, 0x03a36a28, 0x4828f8ed, 0xfbf100de,
- 0x2410aab8, 0x35ad3d2b, 0x43e8b951, 0x747842bf, 0x1340d9f1, 0x837af395,
- 0xfeb8adc7, 0x956572eb, 0x1e060df2, 0x8cd75d8a, 0x3a4fb154, 0xabb5497a,
- 0x5012ded8, 0x68bfb4ed, 0xd3b55879, 0x8345aa3e, 0x6a2fda0a, 0x691b4d13,
- 0x63e5a1bf, 0x549cf589, 0x52f3e7e1, 0xca318e05, 0x5e7f6567, 0xb6d5962a,
- 0x8b09479e, 0x60317cd4, 0x94703021, 0x48c15e70, 0x0954779c, 0xa3be683a,
- 0xbe1d070f, 0xa57e3f08, 0x29146f38, 0xa7e93a5b, 0x91fa45a4, 0x16825b52,
- 0x391637e9, 0xaff5515d, 0xc7d8aa9c, 0xbd45467b, 0x159a9d77, 0x6bf10fd5,
- 0x46a0e12e, 0xe42ae40b, 0x5e894d47, 0x7cb177c2, 0xfd7d17fc, 0xe093875e,
- 0xaadd9813, 0x97138fec, 0x7f82a5bc, 0xa457cfb1, 0x6fea7d8a, 0x6e583d68,
- 0x35f4154f, 0xe2013ec9, 0x3e933710, 0x4f71d6c4, 0xc3c68aba, 0x77a43d91,
- 0x22e9bb5e, 0x4ef86f42, 0x29d7d5ea, 0xe9decfa8, 0xc44eefa6, 0x8ba923fa,
- 0x699e7d44, 0xffac04cf, 0xd45cb534, 0x374d36af, 0x057e017b, 0xc32baf8b,
- 0xe7dbba7a, 0xfb79c54f, 0x12ad8ecd, 0x24c519e2, 0x787b9141, 0x0588fbff,
- 0x160a29e3, 0x21734c95, 0xd8ab76ad, 0xa024d72f, 0x25fea3ff, 0xaafc4a9a,
- 0xdce43049, 0x1d714193, 0x9134ceee, 0x6ff630bf, 0x87be0b76, 0xfcd8e7ef,
- 0xc25f42b5, 0xa5fa2ac9, 0xca2ff7fb, 0x9ed8bd24, 0xe7f5e4ea, 0x21e799ef,
- 0x7d74857f, 0x7ffdfa97, 0x3c536ba4, 0x62cc9aba, 0xfc96de9e, 0xf9bb8a52,
- 0xd21c51e3, 0xbb70bcb0, 0x4213cfcf, 0xcb8c2ba8, 0xf5848f35, 0x4bc59746,
- 0x90fb7d38, 0x6cacaf7c, 0xb213f3d4, 0x9c8de85d, 0x1bb8da62, 0xfaeb9fcb,
- 0x3bcd14f2, 0x1819e683, 0x01f51cb7, 0x56e3039c, 0xe93a8a14, 0x0d172c83,
- 0xf81ede10, 0xf7979256, 0x7e0763c7, 0xbfac03fb, 0x97be4777, 0x8907f470,
- 0x82ab3f82, 0xb07b1a13, 0xbc1acd3e, 0x3e748c2a, 0x967fe694, 0x6b36e0f4,
- 0x9beb1270, 0x8d126363, 0xfbc0692f, 0x877f6310, 0x32d538fb, 0x1fee2471,
- 0x88d82f39, 0x11ccb7cf, 0x45e6367c, 0xee28da9e, 0x3c5b37bc, 0xf9c0e837,
- 0x06ff885b, 0xc074433d, 0x03c72758, 0x45a47910, 0x61e443b2, 0xbce260ff,
- 0x1f192d49, 0x359cf7d6, 0xcfaebef1, 0xe68057d8, 0x9b933d58, 0x27399e78,
- 0x6c4cc936, 0xd4ccff00, 0xb6af4859, 0xfa7de7ad, 0x7de8ee8c, 0x7f4eacfa,
- 0x9bdf03a9, 0x7cfc7ef5, 0xdc7d3cc8, 0x299410d9, 0x89e9cf5e, 0xdf4f37ee,
- 0x1f3bcfa7, 0x54cdfcfd, 0x868dbe3d, 0x8e7991d1, 0x33ff7d3a, 0x0a427c78,
- 0x3af929e9, 0x516ef919, 0x3a9d9865, 0xeb5a37ec, 0xf90e5fb1, 0x85971b47,
- 0x11779da7, 0xa8f56dfd, 0x93f216d0, 0x7b0d2e1e, 0x770d3c2a, 0x7988d4c2,
- 0xf837bd74, 0xe0bef838, 0xdc9eb4e3, 0xbac7cf9f, 0x410cd3b9, 0x739d7875,
- 0x820d3e75, 0xd3a0f3e0, 0xd99e7cc2, 0x61e9cbf5, 0x98f5647c, 0x5f588d40,
- 0x6a089a82, 0xe7b53f84, 0x65d546a6, 0xce3a3e43, 0x9f8fae98, 0x351d1de4,
- 0x0c37818e, 0x5a9abdf9, 0x643849de, 0xe31d63ba, 0x13d9948e, 0x3e2dc6a2,
- 0x64272b1e, 0x2476e11c, 0xd7089ff2, 0x70a2569f, 0x80772cc6, 0x3fb30038,
- 0xc9706254, 0x68ec9976, 0xe7e8d9ee, 0xc8cbb4e6, 0x84fc72ed, 0xef9f3fa9,
- 0xe9f9fbfe, 0x99f9a2d2, 0xcfcd1156, 0x3f345f74, 0xf3455733, 0x9a3f946d,
- 0x4dd6632f, 0x6abf6a89, 0x7d545163, 0xa37383fc, 0x006167fa, 0xdc4fac8c,
- 0xffaa24ba, 0xa22beda4, 0xba9f93ea, 0x5e7faa2d, 0x7b544d70, 0x2baacefe,
- 0xa8617f92, 0xb11faa2e, 0xf20df4d7, 0x1fedbabf, 0xeff9e6a2, 0x34a2ff96,
- 0xcbaf2f3d, 0x438e997f, 0xc7fa1a5c, 0x4538e1a2, 0x93f215e9, 0xbe37f6ae,
- 0xa04bfca0, 0x53bce544, 0x1b780960, 0xd909edb1, 0x1b5f9127, 0xa136ac83,
- 0xc13268df, 0x8ff7f23a, 0x879c4c9a, 0x2abcec34, 0xaa8be04d, 0x9ff7e134,
- 0xf8a69b46, 0xeccff066, 0x78449ede, 0xa0993643, 0xe29ae29b, 0xc671bf65,
- 0x716be101, 0x7034793e, 0x7934ab5e, 0xa899b2f6, 0x329bf287, 0xd1e51fef,
- 0x7d71507e, 0x3f1fc78d, 0x8b9ff941, 0xfa4a9b0e, 0x03faa72a, 0x7d70b714,
- 0x2461e7a1, 0x3eaf795e, 0x49f6893c, 0x67957d46, 0xbfea66e3, 0x93858eea,
- 0xf43aff68, 0x498e8efc, 0x88721165, 0x388adf5c, 0x697be8f8, 0xc4673f22,
- 0x6aacdb87, 0x6bca0f63, 0x3ffc2c9a, 0xa5ef85c6, 0x903ffae1, 0x9c0b361e,
- 0xbad33015, 0xde724e31, 0xba8085df, 0xe194cb0e, 0x8d7ea478, 0x84c1f54a,
- 0x26f9dc68, 0x08fb4411, 0xffc72a5b, 0xbb7f0d5b, 0x36f98f2e, 0x3fee88e8,
- 0xc144f778, 0xd7236fcf, 0x2c67df90, 0xdd061d62, 0xfc3f75e7, 0x7b87eea9,
- 0xe446ffc9, 0x78edd79b, 0xf5f0893c, 0xa7815c99, 0xe8cfe78d, 0x4d267d72,
- 0x3bf5e9e0, 0x79919c13, 0x2b90eac7, 0x36db5eaa, 0x423679ca, 0xeb1bd70f,
- 0xad0a4bf0, 0x3f6e8363, 0xc93d9dd5, 0xf1e301b7, 0xdc70d3a7, 0xbf171d9a,
- 0x37de8fbd, 0x3888640d, 0x6ef9e01a, 0xd7180f79, 0x4415f2df, 0xd297d2e1,
- 0x25e57e44, 0x95cb0ba2, 0xdf513858, 0x776c6256, 0xfcc6f951, 0x7c3bb272,
- 0xbeb0961e, 0x84cc13ac, 0xb3fb96d3, 0x53f4c4cc, 0x2f677de0, 0x9a5f6d2e,
- 0x04dcf3ca, 0xc5c5333e, 0x9d5dbf77, 0xc686fd44, 0x4319c633, 0x2bd459b9,
- 0x419cc057, 0x246b9af9, 0x9f380dc6, 0xeefb9e8d, 0xe299b318, 0xbbdf31af,
- 0x0ef0bfe0, 0xf00ac5ea, 0xf8c6b8f2, 0x7e51a900, 0x90cb03f8, 0xbf24ed1c,
- 0x98532eba, 0x677539f2, 0xd1fe7cac, 0xebf030df, 0xdfce9007, 0x57b17d55,
- 0xbb7fba42, 0xfb57e07a, 0x4fadf9f0, 0xb5c5dfa1, 0x87fddb66, 0xc3ce22d3,
- 0x917ddcce, 0xc8f10ced, 0x33924ff3, 0x25eac97a, 0xeaa0f8fb, 0xbc489ca5,
- 0x616d227f, 0xb307bd27, 0x479ee2d3, 0xab2beec4, 0x6ed93f48, 0x8699c655,
- 0xeba7de5e, 0x930bdd65, 0xd25d79f9, 0xfd447ad0, 0x57f39979, 0xbcac0775,
- 0xd7477fbf, 0xbf17aa87, 0x9f991998, 0x54bd416b, 0x7274a873, 0x2a9ce749,
- 0xf1cf66f2, 0x6fd4a9c1, 0x21efeca3, 0xe978393d, 0xefaa64e3, 0x5e4bf379,
- 0x6574dc0f, 0xf323f7f4, 0x9b5cf922, 0xf92ef034, 0x567dea93, 0xa550f295,
- 0xdd8ef2fb, 0x4e9af58e, 0x9eb83bf4, 0x239eae0b, 0x6a7ed34f, 0x979404bf,
- 0xb02709aa, 0x9569af73, 0xaac79482, 0x65bddb6a, 0x9aabd60c, 0xba931797,
- 0x3f0b26d6, 0x0522d356, 0xa7f03c69, 0x748a7f31, 0x357d28db, 0xb00b1fef,
- 0x18a8efbd, 0x787d5cff, 0x4ff459ea, 0x9d9f2d7d, 0xb4363a23, 0x763f3959,
- 0x196c292e, 0xd85ef383, 0xea02852d, 0xb9f6ddb0, 0x5d6a3ec4, 0xcd0b31e8,
- 0xe5b9eebb, 0xfb0f04d9, 0x90b49df6, 0xfad75c7f, 0xbcfeb958, 0x20fb8458,
- 0x9dfac5db, 0xf322931d, 0xfc21f749, 0x81f8c883, 0xf70805f4, 0x09e39082,
- 0xdaab3e5d, 0xbb123627, 0x7dedafd0, 0xe6bf625a, 0x3bdf9374, 0xffdfebb8,
- 0xffdde695, 0xff98932f, 0x7fc657c5, 0x4f1805f0, 0xc0f3e8d4, 0xda1653f9,
- 0x9ab4d61f, 0x173bc5cf, 0x77dbbefa, 0xf0c67b95, 0x571636fe, 0x3880b16f,
- 0xc10099b7, 0x381e29e7, 0x91ab2c06, 0x07511f9e, 0x0e7147a4, 0xdd60d658,
- 0x71467646, 0xc608e3c2, 0xb9d717db, 0xf70f9e15, 0x9de31b08, 0xedead657,
- 0x858f6aa5, 0x9e3570f2, 0x75e90b5e, 0x7113f84d, 0x51f6df5e, 0xbf6716b9,
- 0x7e16ed5c, 0xc303d0d3, 0xa7d2ebde, 0x87985db0, 0xc6c860e2, 0x66b85f76,
- 0xdd53e280, 0xda0ed47e, 0x03fcb817, 0x6f607744, 0x2beb4a82, 0xccc9f7dd,
- 0xfe79889e, 0x5187dba4, 0xa3f979af, 0xe7e266a9, 0x2aa68f9e, 0xfbcfc349,
- 0xe2eb1c32, 0xddaa55e5, 0x4e6f4e22, 0xb68f9df6, 0x186f0483, 0xb769ade7,
- 0xda07f9f3, 0xe28e96c1, 0x75b83b48, 0xf3d20edd, 0x168bd976, 0xcb6d3501,
- 0x3fdc0dfd, 0x91e24bc9, 0x480616db, 0x8fa7650f, 0x8afe13f5, 0xcfd4cdf3,
- 0xae52267d, 0xe91b7fdb, 0x4f08e291, 0xd46bcc8a, 0xd17e7a98, 0xb75c9121,
- 0xaf9a266b, 0xc96fd808, 0xef0f5f0e, 0x54338a9f, 0x9e1ec7da, 0xa7c40e51,
- 0x38901e9f, 0xf54e6b5f, 0xf922cd6f, 0xbfb9951e, 0x010bf0e5, 0x9bee973a,
- 0x9f44c506, 0x4e9fdeb1, 0xe38c830b, 0x2f042e6c, 0xff79bb4d, 0xdfd21e2d,
- 0x611fa69f, 0x86f7617c, 0x234afc80, 0xe7ca06e2, 0x61420bd8, 0xd721271a,
- 0x3ce32c57, 0xf3276e79, 0x7bf562cb, 0x4d941f29, 0x4d875c75, 0xfa23adf1,
- 0xb144ee6c, 0x657b1e9f, 0xb23eba79, 0x1bce857f, 0xe92f9cfd, 0xed3d0b33,
- 0x5f180abb, 0x399d1df3, 0xbe09a4d4, 0x6b2e76c1, 0x46a63adb, 0xf044dbc6,
- 0xdf79e8db, 0x44bc5bfe, 0xd75139bf, 0xb6448cf7, 0xfff9d88b, 0xe499f499,
- 0x35b35a6e, 0x7c4cf88b, 0x7bf64753, 0xf067ef5a, 0x2e7f1403, 0xaa922bd9,
- 0x8d0fcbc5, 0x4e3c9777, 0xea9cdd72, 0xf01575cb, 0xc300acf1, 0x76b8c49e,
- 0x57fffd87, 0xe91fff64, 0xf8a2dcfc, 0x4ddfc46d, 0xf16cdd7a, 0x7fb64ea9,
- 0x0ff34a9f, 0xfc746fad, 0xf6e9b5f5, 0xff8265ba, 0x4fb7d71b, 0x15399f5f,
- 0x4bc68df5, 0x4e67ee06, 0xff48060e, 0x78a0929a, 0xec3e3e13, 0xdd90f72a,
- 0xc5f4396d, 0xa6f7b238, 0x2807af24, 0xd66075af, 0x2dfbb22a, 0xfd636fa3,
- 0xbc85f4ff, 0x9e623922, 0xad256913, 0x697ebfe4, 0xcfe2cbac, 0x42fc75a5,
- 0x5d6323d7, 0xabdf6ee9, 0x11bc88ee, 0x915f758c, 0x9fbe80fe, 0x17f1eaf3,
- 0xff9c73f7, 0x2f4487eb, 0xd6ee279d, 0x3e02b771, 0x783b1bd6, 0x1a55c655,
- 0x1f7ba31c, 0xd8261d8d, 0xe4a6cb46, 0x5d39c454, 0x36326cb2, 0xb70d9a7d,
- 0x4c6c74e6, 0xebba6fed, 0xbd213496, 0x0b2021fb, 0xaf9d167c, 0xa4d24d00,
- 0x6bc53cc8, 0x14f3f155, 0xc91bf8cf, 0x9e56ff2e, 0xb4d84e62, 0xf7f292da,
- 0xbace2be7, 0x9f3df7a3, 0x2a2655ba, 0x31f42b94, 0x8a0c4e14, 0xa74c17bc,
- 0xf133537d, 0x067bff12, 0x2df6cad7, 0x5cdae5f5, 0xfbee4f68, 0x8b1d6db7,
- 0xd62d9776, 0xfa46f54f, 0x3dc0a77b, 0xd78ff943, 0x46ee30a6, 0xfef3273f,
- 0x12d3a9df, 0xc462b723, 0x88f7c06b, 0x27f2a352, 0x265f23e1, 0x4aed7e93,
- 0xb922df55, 0x7cc9740c, 0xe941244f, 0xd8e2b8b1, 0x03c23ed9, 0xd38bf7e8,
- 0xd6c43ec5, 0x6d17ce26, 0x317ec6aa, 0x269ce9c1, 0x79c5d847, 0x579fc8b7,
- 0x9c60f91f, 0x05dc3105, 0x02e4f1d3, 0xdec37f1c, 0xc34a6496, 0xf6f33fad,
- 0x5392ba4c, 0xe91df8a2, 0xa54ffeb8, 0xc236391d, 0x53835ba9, 0xf0a13f89,
- 0x93f21484, 0x94040a85, 0xf1f138a2, 0x14de22a9, 0x87c53cd7, 0x3ed850ff,
- 0xe5e12d7d, 0x2f58b0da, 0xa6c661cd, 0x7ae036bc, 0x2a635b5f, 0x6ffb578d,
- 0xbf934607, 0xe05d297a, 0x4d2f179a, 0xa02bef11, 0x74acff50, 0x41e7ae97,
- 0x5f0be513, 0x7d8cf3d2, 0xcfdc4c97, 0x6db0bf98, 0x3d44eca1, 0x3f6f0828,
- 0x76cd79c4, 0x4df24aab, 0x3f040e78, 0x771813da, 0x0c5c1ed6, 0x5dfae714,
- 0x7db095e0, 0x6fc0e257, 0x0e061f49, 0x773a5558, 0x876e704d, 0x977f1c54,
- 0x0f2b4f7e, 0x31c5b215, 0x2f98dcfc, 0x94e7fda1, 0xe0bd2413, 0x6504dff3,
- 0x88e35c3b, 0xd31277e4, 0xd5eba25d, 0x89b0c4ae, 0x260645ce, 0xe9875ee1,
- 0x7813a61a, 0x3cce835e, 0x2cbbfbf6, 0xe07ee703, 0xa49cba66, 0xe8207907,
- 0x2ac9f684, 0xa704d1f9, 0x7048ebc6, 0x3480deaa, 0xa9daae92, 0x6ed0fc08,
- 0x35e7251e, 0x6baed877, 0x36eb8713, 0x135d85b4, 0xff8f3f40, 0x8f3a36d5,
- 0x8393aa1f, 0xfefc3d99, 0x6c2fe92d, 0xf9cb1a6b, 0xa28c80bd, 0x949963b6,
- 0x7954eb4a, 0xb545e87d, 0x07c122f4, 0xb767afba, 0xfea1f689, 0xaa0ee7a8,
- 0xaf3f443b, 0x925f69d5, 0x8742f6c5, 0xf07843d7, 0xee7c297e, 0xded7b45a,
- 0x05297cc7, 0xd35eaeb8, 0xb19a07a1, 0x87569fdf, 0xe93ee872, 0x1aff5673,
- 0x3e57cfd2, 0x2148f35c, 0x665ddbf9, 0x9df9256f, 0x18cfc09e, 0x5668bf87,
- 0x1dd845b7, 0xe0c81d8c, 0xf33180fb, 0xf4d99ed4, 0xfdd7c233, 0xce9525fb,
- 0x03cdfbbd, 0x9aee88e3, 0xe85a3ff6, 0xfeed64be, 0x4b4f188f, 0x9a1afe19,
- 0x035b766f, 0x039b6bdf, 0x1b5ef9db, 0x9fbe43fa, 0x7f08f218, 0xe73a7832,
- 0x390886d9, 0xec2e913c, 0xe3fc060d, 0xebb55e29, 0xf6907af6, 0xedee58dd,
- 0x8d2275b8, 0x07cf57bf, 0xc035ef18, 0xf7c431a1, 0x971af04d, 0xa0fef3c6,
- 0x967a4152, 0x4eedaeff, 0xa3ffa22d, 0x3dbf02e9, 0x66702e9a, 0x89bf8676,
- 0xf1df5de8, 0xd69925b1, 0x8e343679, 0x4521b9f6, 0xf1c6714d, 0x1bf64323,
- 0xcab54f63, 0xd7fa8580, 0x2bf6079e, 0x14a6ef28, 0xef4bdf94, 0x8d594db6,
- 0x4ca2f7cf, 0xe2ae2852, 0xefe294fe, 0x0c6fb35e, 0xe7786ff5, 0xc7e4a9f3,
- 0xe74f481e, 0x607e7ff5, 0x1be278fd, 0x22497fea, 0x606ed9d9, 0x69d866bf,
- 0xbe29d901, 0x5ffa405f, 0xf63cf54c, 0xdd2cbbcb, 0xf72ed84b, 0x12ec809a,
- 0x01ed9ff5, 0xf849dff5, 0xf689c0ae, 0x664fefc0, 0x6dd5df3b, 0xa32718d2,
- 0xf5c49f5f, 0xbea91b92, 0xc9ccbbab, 0x05f74e76, 0xc35ec88a, 0x109c7887,
- 0x9d1224c0, 0xfca8be04, 0xe9993b88, 0xe7e5fc36, 0x86e3d06e, 0x75c5e29a,
- 0xca3965d8, 0xa4e2223e, 0xf4711ba8, 0x9edb33c9, 0xee817b40, 0x0fdc28d4,
- 0xffbf0bef, 0xd5563d5d, 0x59773ebd, 0xeeb3ae33, 0x07edcc84, 0x585e7df5,
- 0x7a4393d7, 0x86e2867e, 0xefdc5d3a, 0xb033e7a5, 0x675df74e, 0x094b1b6a,
- 0xd8aecd63, 0xb9ca987e, 0x412950be, 0xbaabf9c8, 0x99859b0f, 0xb5fe93e7,
- 0x9f2eba1e, 0x9ff5cdee, 0x920242b1, 0xc0e5fad8, 0xb5c45f6f, 0xf910703d,
- 0xe14e271f, 0x10ff5872, 0xcf018d6f, 0xc97ef6c8, 0x4bbd23f0, 0x5afb1f48,
- 0x123ae1e5, 0x4d8a8352, 0x3fd277f5, 0xbac016dd, 0x9f7b433f, 0x99c7e254,
- 0x3f49f7fe, 0x997e4ffb, 0xf7b96641, 0xb7c7d8b4, 0xd27cb45a, 0xb809d69f,
- 0xa7a38db5, 0x3c8ba5af, 0xe215b14f, 0xa57e4b9e, 0x009cc1c7, 0xfef1223c,
- 0x5106765c, 0x655a17ee, 0xe7f882cd, 0xce56043c, 0xe7a37f23, 0xfd01ef6f,
- 0x941fe1a7, 0xf57abdcf, 0x182bc21e, 0x627deeb7, 0x89bfb9c4, 0xbb6d69f4,
- 0x6b84ff04, 0xfc4cf75f, 0x5e46097b, 0x1db6beeb, 0x3ca0c647, 0xee8b9eab,
- 0x9ee861b7, 0xbdf7887b, 0xefa39a1b, 0xae957c1b, 0x308996e3, 0xea0b7091,
- 0xb307fb13, 0xd4569d02, 0xfd3257df, 0x7e5e109f, 0x84f824c9, 0xf2ed957e,
- 0x671eb713, 0x1a3f844a, 0x8f7c00b8, 0xe8676f28, 0x99d7dd30, 0xa5e59cdb,
- 0xa22ae639, 0x165b6e7d, 0x825019c7, 0x9837dd32, 0x19cbf9ad, 0xd3b4075f,
- 0x9d642f74, 0x89c127ee, 0xa3bf287f, 0x8e9d6baf, 0x460beee7, 0x82073dd3,
- 0xdacf348d, 0xd1fee29d, 0xbff19dfa, 0xc7d5a813, 0xc9d4f0d9, 0xac0e9423,
- 0xabf7450e, 0xdb53f9d5, 0x05bcfd16, 0x9f181278, 0xfccf7d11, 0xfc43ef4a,
- 0xc5f02cc9, 0x4638fadb, 0xc219b5fb, 0x7920d67f, 0x410ddf24, 0xb37a4839,
- 0x0b240a4e, 0xd58e0b7c, 0x865df748, 0x65f7e8be, 0x73f9d6ef, 0xbc2fae97,
- 0xfabf7c2e, 0xe7482919, 0x24ff7780, 0x427bfe9a, 0x1f49cbab, 0x5d6af562,
- 0x6df199b3, 0x10b46c15, 0xabd58dbe, 0xd97d21aa, 0x1638a16e, 0x7e354fab,
- 0x0b7432e9, 0x4b7ffe5c, 0x98737af7, 0xffeb4cf3, 0x3ca5e4a7, 0xdfadbdd4,
- 0x745e50b1, 0xf9087da4, 0xa27a1956, 0x5defebaf, 0x7c86edcf, 0x9614c0a7,
- 0x15da456f, 0xf1a283ee, 0x50cb6ef4, 0x7cf3929e, 0x38139d83, 0x51e86e3f,
- 0x3ae538ec, 0x14fc5f2e, 0xada17970, 0xa2f84ed4, 0x2bc9b8ff, 0x4c7f9ca9,
- 0x4c3e442e, 0xe103203f, 0x89f6fe8d, 0xfe90078f, 0x37c513a2, 0xfa7cbe52,
- 0xd83f8c09, 0xf8b3a7be, 0x115fe457, 0x7d91a4e1, 0x4ecaec2f, 0xc783bdfb,
- 0x7fddf8e1, 0x4af1e8bc, 0xcbc7c3f2, 0x095c698a, 0x787ee9db, 0x1a871e91,
- 0x7bd9178e, 0xfdd1e222, 0xbedf2219, 0x47fbf3c5, 0x78b05f69, 0xf94bd9b7,
- 0xff66c58e, 0xafa404a6, 0xdbc905e8, 0xe91db380, 0x8d24f43e, 0x52a4f68b,
- 0xeef166be, 0xf6a21def, 0xd3125dda, 0x28bedfdf, 0xc7c5177f, 0xb8a5eca0,
- 0x577edfd7, 0x7384bdc1, 0x7053dc0e, 0x477cc457, 0x595f8552, 0xe399957c,
- 0xfb68ef71, 0xcf292b2f, 0xff60eda9, 0x1eeafbd0, 0xb0577f0b, 0xae0f024b,
- 0xf0e788f3, 0xbd0f89af, 0x085a4efb, 0x9eb697e4, 0xf1c4f4b4, 0x017946cf,
- 0xce6171b7, 0xf0b76ebf, 0x7fbdca3d, 0x6b7ad0b0, 0xf388fb9e, 0x126d5569,
- 0x67aaa7fb, 0x794aa0e4, 0x58af039e, 0x9a6b3f48, 0x8accc825, 0x3b73376c,
- 0xffaa51f9, 0x65f859b1, 0xe8d7ef43, 0xbde86e7a, 0x746b0761, 0xd6d775ff,
- 0x17bf3d68, 0xf1a32db4, 0x579a7bfd, 0x7ba03df3, 0x5ff7c7b6, 0x4b5c1f86,
- 0x2b8c52fb, 0xee8a9f6b, 0x7e90df77, 0xc7bfd807, 0x3257c9fa, 0x3cd8f3f4,
- 0x1efd23c8, 0xaf19af0c, 0x7e7d3da0, 0xc43c6ab2, 0xd71e3bc9, 0x8f1180f1,
- 0x8e26c307, 0xfafdb167, 0x285a2eda, 0xfe76d68e, 0x796bf199, 0x872c67fb,
- 0xbe0ef6ca, 0xf83d7aaf, 0xaf1f1037, 0x093bb76f, 0x08f65747, 0x6ed25fbb,
- 0x6a25060d, 0x7c91350f, 0xef8a0ffb, 0x4aaf023b, 0xf740a38c, 0x7e756b7f,
- 0xb7f9d5ae, 0x6367f716, 0xbe8adee8, 0x772fbeff, 0xe2ba11c7, 0xdf3fa351,
- 0x993875d0, 0x8404b8f8, 0x12125a7f, 0x8790c9fe, 0xc6465f11, 0x936f81d1,
- 0x5bb7d73c, 0x52f8bef9, 0x6d2d3fdd, 0xc593df16, 0x51fe8bfa, 0xddff9176,
- 0x8d8de82b, 0xd1ad35a7, 0xd232bbfd, 0xea0fdfd9, 0xc313f878, 0xa8f2e8df,
- 0xe53b001c, 0xb2a897e7, 0x756fc837, 0xe77f342b, 0x63cf8954, 0x2989d53b,
- 0x194beef1, 0xce3a9be5, 0x28cbea37, 0x43fd864f, 0xc88eedce, 0x998dfc7d,
- 0xcd54af02, 0xcf240391, 0xb2cf7d88, 0xfbb8aca2, 0xba44c81e, 0xd77b18df,
- 0xee45a64f, 0x391db47b, 0xf23f67a4, 0xf7495e3c, 0x859b6a8c, 0x8d9be87f,
- 0x8e10a6e3, 0xb5c5c607, 0xed20d6cd, 0x4f516a4f, 0xd2307bf8, 0x76bfb20f,
- 0x07e91169, 0x991c1bee, 0x3a72cf48, 0x2ff5c4aa, 0x8e38c36f, 0xfb2f2fc3,
- 0x2baaf58c, 0xf91bac5b, 0x1f6f1996, 0x6a1fec61, 0x6a9c092e, 0x7a51af3c,
- 0xd3511f78, 0xb749ee2e, 0x5c62fa1e, 0x7fdbd216, 0xadc63fe0, 0xa64172c6,
- 0x25adbf88, 0xbee98fa1, 0x6cbbe7cf, 0x90e8f171, 0xd89a97bf, 0x3dcc6a0e,
- 0xf6578c19, 0x859c2e23, 0x4fedf37e, 0x7ad1df79, 0xca4ee128, 0xf1adf5c7,
- 0x837e778f, 0x13601a9c, 0xd04ddae3, 0x9ee125c4, 0xd6b81c48, 0xdd0fee29,
- 0xaa49c7ab, 0x7d7a8c3f, 0x505df62f, 0x03eecf14, 0x2349e65b, 0x1bf0dded,
- 0xafbebad5, 0x85b1d82f, 0xf163e08e, 0x476eb551, 0x2e39fc88, 0x1ba2e2c6,
- 0xb4f5a333, 0xc644ede1, 0xb25dfdb9, 0xc8c47293, 0x268c5fd7, 0x7ef887dc,
- 0xeb3c9c42, 0xf65ebaa7, 0x0dbee7ea, 0x9be665b3, 0x0d288e69, 0x63fc851c,
- 0xe93fe97d, 0xcb1c5208, 0x04caf83f, 0x9dfaff92, 0x2445e7d1, 0xdfdf3f3f,
- 0xaafcb20a, 0x433e7643, 0x3a5fda32, 0xd214caf8, 0xbfb786b3, 0xf97211cc,
- 0xbee2c722, 0x29467d3c, 0x6bff6249, 0x77fec492, 0x4c93d2af, 0xfddaaefb,
- 0xfef22fb2, 0xbbe721bc, 0x4501fd75, 0x65d7f3ec, 0x686f5caa, 0x3a617a17,
- 0x7f9c4a20, 0x98f3c8a5, 0xed11fe79, 0xdf2fa825, 0xe880b710, 0x9ad7d942,
- 0x6aef5807, 0x9e989a4f, 0x198393da, 0x05ef11d9, 0xf24cdc68, 0x801b0d7b,
- 0xc81d88fd, 0xefa52ebf, 0xf4beb10f, 0x59c604fd, 0x523c2647, 0x64adce29,
- 0xc0f5bf20, 0x239355db, 0x3d05fffd, 0xc1f103ee, 0x09e72843, 0x87cebf31,
- 0xcf42b04d, 0xae3d0813, 0xee2c71d2, 0x8ec89aaf, 0x5757f763, 0x0e1b938a,
- 0xc7f28158, 0xf9581fb5, 0x07e7d03e, 0x1dfef2d6, 0x9327f92e, 0xffbf019f,
- 0x381f2a6a, 0x3d3276b8, 0x7de0fcff, 0x53de7357, 0xaf9514da, 0x1e7d1008,
- 0x797f51c2, 0x97c276f7, 0x90e30a5d, 0xefd3aea2, 0x9eeaf9d2, 0x9b7f3356,
- 0xc677bd59, 0xc22481e3, 0xe94b454f, 0x9d3d9dbb, 0xe21feb0a, 0xd947032a,
- 0x289d4719, 0xe3851c66, 0x165c82f8, 0x71a4ab1f, 0x5314944f, 0xc9db0937,
- 0x1c274d9f, 0x55c618e5, 0x9cff3d06, 0xdfd9f829, 0x13f9c091, 0x3d082609,
- 0xba28ec83, 0xcebeb437, 0x9b436438, 0x86bdf51c, 0x4ceedef1, 0xd7b0ae71,
- 0xa490b3d0, 0x16ed1eb1, 0x3db686e5, 0xf58eb419, 0x49eb10e6, 0xb459b343,
- 0xd02512ef, 0xfe0ccdf9, 0xfc4038d8, 0x508f4639, 0xc12c4b2e, 0x77db84b1,
- 0xea079d0f, 0xe28239f7, 0xe7635d5e, 0x686efe44, 0x3b8f73b0, 0x4e39fc5e,
- 0xda4833e7, 0xcfb12cd1, 0xf3af6d89, 0xd1c67c88, 0x1e2fc233, 0x1c9fe85d,
- 0xa0063fcf, 0x25fbec44, 0xe6ad7e5d, 0x86e7788a, 0xdf223d1b, 0xe06c73b4,
- 0xef71e928, 0x373e04ee, 0xfc8975ec, 0x974b696c, 0xac9fdf42, 0x5066956f,
- 0xfb9f5a3c, 0x829f7f1e, 0x83f662f5, 0x6e7cfde1, 0xbbbcdd5a, 0xc7438b5c,
- 0x1f111eb5, 0x01dfe897, 0xbd57d08f, 0xd37814fd, 0x23da7df5, 0xaa735e74,
- 0xdf14ecd2, 0x4aef8999, 0xf9fdd06f, 0x199bd78d, 0xd5029d37, 0xc43fce2c,
- 0xdd32c539, 0xfba0df2f, 0x3bdf11f9, 0xe99b033f, 0x7bf6229c, 0xdfafc367,
- 0x02bf8fac, 0xf6371e34, 0x759bf5bb, 0x2fb3783c, 0x4ca5f9fa, 0x40e6071d,
- 0x10f3f90e, 0x1fbe17c6, 0x71c09791, 0xa4e1697a, 0xbe8338b8, 0xeff55703,
- 0xe434a9c0, 0x1ef790de, 0x1da3bfc7, 0xdfcd7e67, 0x80499c13, 0xc5ce7109,
- 0xe19fe386, 0xf2126e3f, 0xaa2dcfd6, 0x4346829d, 0x077017ee, 0x331f9916,
- 0xe7463ee4, 0x3df1ffff, 0x22505ca0, 0xcacf25f3, 0x3a1e31fe, 0x32f5b09f,
- 0x7015c6fe, 0x8d75c41c, 0xce35d7d3, 0x2edf111a, 0x571d5a8c, 0x97d7877f,
- 0xc471101a, 0x9e4765cd, 0x75627195, 0xfbefe248, 0x5f62c3aa, 0xf6265d5b,
- 0x02ad39b5, 0x49bcaea7, 0xbb126a0a, 0x9f44d3c3, 0x6ccb3fa1, 0x7a180f02,
- 0x5d2fc432, 0xf09da87f, 0x3e3a0dc1, 0xc9a3d588, 0xf94e5dd0, 0xc9345be6,
- 0x472af74b, 0x6abf3f7c, 0xeee4ebda, 0xa83fa72f, 0x22effc6f, 0xf7d1eac4,
- 0x0f12fe3d, 0x07bad3e5, 0xb8e21657, 0x38fef347, 0xe6a6f48e, 0x2df37632,
- 0x3b13888f, 0x7ca3811d, 0x3f45a4df, 0x6bc96fef, 0x747e21eb, 0x5ec7e302,
- 0x3a96f757, 0xbcf9ab81, 0xc09c3abf, 0xb75643df, 0xb0bbf9db, 0xc486d6ea,
- 0xdb7fe429, 0xeb1daf65, 0xcb27ad95, 0xc5959f88, 0xad4a3ee2, 0xb38bdc5d,
- 0x10b7fdb9, 0x3b7bd77d, 0x2df9cfa2, 0xfba8bf3d, 0xf58f3d06, 0xa3091b6b,
- 0xf9f7ffa5, 0x5f3a8d9b, 0x662ebaea, 0xfea9c14c, 0x26afe690, 0xfd7fec7d,
- 0xd71d0b45, 0xa5794ed5, 0x6d88ae9c, 0xefdca8f4, 0x268ee6fa, 0xf41af34a,
- 0xf93a6477, 0x707351f9, 0x82d2e32c, 0xd30fbb2b, 0xa98dffb0, 0x3afaa714,
- 0x225aa6ba, 0x0eb7bbf4, 0xf91c73fd, 0xb7498035, 0x494b855b, 0x25e3a15a,
- 0x07b5c761, 0x3e50670e, 0xb7ec97a2, 0x9277ca80, 0xdeacb1b3, 0x12792a7d,
- 0xa2bd96e1, 0xed19c21c, 0xa503f732, 0xa4e6e727, 0x3336fa48, 0x6242177d,
- 0x9c9e801c, 0xdb230b1b, 0xdb5fff1b, 0x27a4d3ff, 0x289eb6d5, 0x84f837fb,
- 0x3efe7cbb, 0xcad07037, 0xbce808f7, 0xc72115c6, 0x347f7457, 0x644b370b,
- 0x7ce873a7, 0x3b41227a, 0xbaa0ee05, 0x95256102, 0xb2c38bce, 0x96377fb8,
- 0x7d4cf7fd, 0xb84a59fe, 0xcee9ee83, 0xe737cfd1, 0x5bf73742, 0x6a5c0300,
- 0x8befe4e0, 0xe5cbc04b, 0x29785b6c, 0x5082f1c1, 0x099fbf88, 0x73ae2867,
- 0xe63bfd36, 0xe61ef238, 0x0ce7029b, 0x0e74c87e, 0xc51bee26, 0xd75ba7ae,
- 0xf09a59c7, 0xe81d043b, 0x086cee7e, 0x35fbb9f1, 0xbf9fa3d0, 0x3cc75fc7,
- 0xb9d1f7dc, 0x3b6835db, 0x208fcc89, 0xeb4f9ff7, 0xa7ade391, 0x198b2967,
- 0x9f449cca, 0x7d30ce14, 0x75f2bf6d, 0x4d6dd3f4, 0x35df57ca, 0xe283f7d0,
- 0x6f4b73e1, 0xd3ef1bfb, 0xd349627d, 0x97b1bef8, 0x7da6c7be, 0x1de9584e,
- 0x001363f2, 0xe6b3ddfe, 0xc66f6437, 0x07e01af7, 0xfb11e7be, 0xf72d06bd,
- 0x3df0c67c, 0xedfee335, 0x950ae0ce, 0x72ac6fee, 0xaddff8db, 0x9b8db229,
- 0x87ca794e, 0x21bffcd2, 0x6a40fabd, 0x2883c49e, 0x638c19c1, 0x83b2bd75,
- 0x2fd69b36, 0x22f7c669, 0xf432fac0, 0x006401ef, 0xb763b77f, 0x1e69ef8a,
- 0xa5b43d41, 0x74abf6b2, 0x2e1e1a0f, 0x6f2f0955, 0xf4bcd971, 0xe895ae5d,
- 0xdbd30fb5, 0x2ff71368, 0xb16bfb8c, 0x19b46c7c, 0x8daf4a79, 0x77cc7671,
- 0xfcda773e, 0xbfe89bff, 0x9324a93f, 0xcbfe6070, 0x3f7c3df3, 0x7758c129,
- 0xc3eb8d89, 0x37191af7, 0x6a7dd136, 0x1d31aba4, 0xe1e21ef8, 0xba149ca4,
- 0x25d8d51f, 0xa63753ee, 0x9b7fd807, 0xf3fb12fd, 0x4ed2cec6, 0x0e83078a,
- 0x6eabdf0b, 0xaa34f974, 0x4421d207, 0x3ef89ffe, 0x69bbe0df, 0xfd21ef98,
- 0xed91505e, 0xde5157d7, 0x4a1b9a7f, 0x6169bde9, 0xc0f1126d, 0xe5061ef8,
- 0x4e9519e0, 0x7befa3cf, 0xfbe4e281, 0x501c8617, 0x9d995831, 0xb90d7cba,
- 0xf8ef8d38, 0x3bea2535, 0x8f5e437e, 0x98b90a24, 0x44cc26e9, 0x71a5577e,
- 0xf061e563, 0x8f00bffd, 0x30b1ae40, 0x0030b1ae
-};
-
-static const u32 xsem_int_table_data_e1[] = {
- 0x00088b1f, 0x00000000, 0x243bff00, 0xa3f0c0c3, 0x4aef811e, 0xf1303031,
- 0x12d18aa2, 0x6064e3ef, 0x6062e010, 0xfbe20530, 0x330c0c3c, 0x204cf480,
- 0x6066e516, 0x1ae20310, 0xc40dde20, 0x19f8807b, 0x1039fc50, 0x1be200ef,
- 0xbefd103c, 0xfe0c0c4c, 0xc4081c40, 0x95fc40c1, 0x1be18181, 0x73f6f103,
- 0x4c30330a, 0x2ff04715, 0x249fd903, 0xc1ffe7e9, 0xe90c4386, 0xa071df6b,
- 0x10acf37d, 0x7b20467c, 0x9aaa15be, 0xcdf85605, 0xbf268858, 0x18bf8d08,
- 0x0372fe8f, 0x4d5afe54, 0x81b5b334, 0xcd4909e9, 0x6efc4d3a, 0x40aac741,
- 0x3101a9ff, 0x5ff1ad00, 0x000368ca, 0x00000000
-};
-
-static const u32 xsem_pram_data_e1[] = {
- 0x00088b1f, 0x00000000, 0x7de5ff00, 0xd5947809, 0xe77df0d5, 0x9926665d,
- 0x6c81bc99, 0x44d84eac, 0x4242740b, 0x61db101a, 0x8311688b, 0x9817054b,
- 0x264f6408, 0xdac7f520, 0x0040cfef, 0x8b435151, 0x03b45a35, 0x341b0504,
- 0x024180d8, 0x2d82e00e, 0x5d6ad8d5, 0xc8a0d2da, 0x1b80931a, 0xcefc5dfe,
- 0xc9bef739, 0x82264efb, 0xf5ffffb5, 0xf8f8fefb, 0xf77bee5c, 0xce73ddb3,
- 0xe28ef73d, 0x8b94c718, 0xff12fb18, 0xdd98c7be, 0xd71b18c6, 0x2356329d,
- 0xcf2d4ad2, 0x03d058cd, 0xac62ccff, 0x9785addc, 0x653633a7, 0x5ef91a87,
- 0x4837e412, 0x6de43b61, 0xde549d7b, 0x23e79ebe, 0xa0cfcd6e, 0xc83aa3fc,
- 0xdeded04b, 0x5065c0f2, 0x6643b9de, 0x91dbb11b, 0xc2963671, 0xe30731d8,
- 0xcf90597f, 0xc9c0ac66, 0xf61be5b3, 0x45f6c5cd, 0x84e6764d, 0x1577cafe,
- 0xf20cbcce, 0x86550785, 0x2f37ca55, 0xbe43fad3, 0x60352c38, 0x9f3263be,
- 0x1ca7685f, 0x3bf50cde, 0x37292d3c, 0x553b18b8, 0x8d5e60e5, 0x4b776ab1,
- 0x18a3f5ca, 0xcf6f092b, 0xf52576c5, 0x3a970f92, 0x97e6c765, 0xb6bae1fb,
- 0x97bb3e4a, 0xf12dd2b1, 0xcf923bcc, 0xfff84be1, 0xf91ca358, 0x862f941e,
- 0xb7e83275, 0x32e4d590, 0xab5fc719, 0xf0dddd79, 0xd3a5553b, 0x7cbe4638,
- 0xed038c2b, 0x7c969e2a, 0x1b0ac4b8, 0xf8c0340b, 0xb39cbbed, 0x7d70b937,
- 0x6e11b4cb, 0x1addd75c, 0xe70c2bd6, 0x717a74ef, 0x5cb41b7e, 0xbf592f28,
- 0xd5182b41, 0x96e95fdd, 0xd579d6be, 0x980d4d0e, 0x53d3a3ca, 0x47798c55,
- 0x184be774, 0xbf4037f3, 0xb36b094c, 0xff7f7746, 0x96322580, 0xcccbfd8c,
- 0x1feee8eb, 0x43df4920, 0x013fc16f, 0x6e3da15e, 0xb781ab82, 0xdbfc3ac5,
- 0xb3b78ddb, 0xd2a3c0ba, 0xdfee6d99, 0x3c401f48, 0x106a7cc0, 0x11ae904e,
- 0x644cf3f3, 0xdfe81493, 0xc4ba67e3, 0x918f5f7a, 0x9f2ca8d6, 0x420b58c1,
- 0x78cafda3, 0x116c6bc8, 0x93f631f3, 0xa9fed915, 0x059fbf90, 0x57bce2e6,
- 0xc4228148, 0x690d6313, 0x24abbf48, 0xffd71b36, 0x94349c03, 0xf407eaaf,
- 0x99af7009, 0x9649bd96, 0x3093dcc4, 0xbdc4f05f, 0x214fd4e9, 0x3f42a3f5,
- 0x8fdfcf43, 0xe9639b9e, 0x322dcf47, 0x7ea4a9fa, 0x4fd6179c, 0xeb04ee4d,
- 0x8c4b727c, 0x7ea4ee7e, 0x2eb617dc, 0xd6898afd, 0x46515cf9, 0xf54e24fd,
- 0x97d400b1, 0x2d607a0d, 0x5dca197e, 0xef6389f5, 0x98ba6665, 0xdff912bc,
- 0xa6625c0d, 0x389c848b, 0xfa261d0b, 0xd0f258fb, 0x5bec7e93, 0x44830f22,
- 0x5f1a3512, 0x5c91afeb, 0x41dfd498, 0xbbfa3bf9, 0x31dc2e48, 0x0eb17021,
- 0x59b1c0f3, 0x816bc83f, 0xe831eb6f, 0xb129e61a, 0x36bd4c34, 0xefcba34c,
- 0x261c3956, 0x88dbf80f, 0x683ed023, 0x08911ceb, 0xa4d2f5f9, 0x365c205f,
- 0x27c11b33, 0x5895664e, 0x48cece2f, 0x9e9ddd3e, 0x826429bd, 0x8041ead3,
- 0xdfbba97f, 0x0d206ad5, 0xbb2b6be9, 0xb6f48421, 0xa7ac106a, 0x854f633f,
- 0x14848fae, 0x20a83f68, 0xd769e10d, 0xe3cdbf80, 0x28fe306b, 0x1c19ff1a,
- 0x3f8e0777, 0xf6f8e7ae, 0xf1963921, 0x2c8b831d, 0x18343be3, 0x177de81f,
- 0x0f8c55bb, 0xd8c09bf0, 0x685ba1f3, 0x22e0fb7c, 0x8d6eff1a, 0x8fc65915,
- 0x05ff1aeb, 0xdea5f71c, 0x82643fd6, 0x4b83fd75, 0xf8d7ebac, 0x0b655ffa,
- 0xb471f8c5, 0x16875ffe, 0x2e0ff5f0, 0xf3b7ebe1, 0xf7c6bb7e, 0x75ffc174,
- 0xdeadf71c, 0xa2743fd6, 0x9517fd75, 0xe76fd759, 0xcacbbfe3, 0x1a2ef8c5,
- 0x1950bdff, 0x6545ff5f, 0x0f66be34, 0xb48e90f8, 0x61957101, 0x040d9f18,
- 0x19254886, 0x6474a7e5, 0xfda0c61f, 0xe084363a, 0xa71c4770, 0x80e2cfbb,
- 0xf2dd5cde, 0xa381858e, 0x08559e40, 0x9fad2f9a, 0xa59ca1a4, 0x0b7b9072,
- 0x341754c5, 0xb0dfb4c9, 0x70f0f675, 0x937b6f98, 0x82fcc21c, 0x65879775,
- 0xbcd8efda, 0xb0a76c3c, 0xf0ff7e08, 0xcd1bd1a1, 0x174e8aeb, 0x5ac7a8d6,
- 0xe7c47c2d, 0x46cc9a13, 0xa6fcc256, 0xeb8c1121, 0x39031fce, 0xbe7e40ca,
- 0xd314720f, 0x078c2ae3, 0x9c828fdf, 0x6f999a65, 0x3c7df196, 0x991cdec4,
- 0xff827282, 0xbc38531d, 0xc2912d8f, 0xd6cfff08, 0x3e50d22e, 0x0789ac2d,
- 0x23cf77a0, 0xb9d4f028, 0x28de1ecb, 0x08bd3c11, 0x15891b9f, 0xa033fb19,
- 0xb6801fb5, 0xa5afb6b0, 0xbc2c2ddd, 0xcfea0d32, 0x483f935f, 0x3cdff587,
- 0xc6563edb, 0x07f983fd, 0xd98f88d8, 0x630e5b00, 0xaad71bb3, 0xe3732003,
- 0x56fdf56c, 0x0139fb53, 0x22e6fe6b, 0x0d5dbe6b, 0x37d436ab, 0x01b6258a,
- 0xd2b5bdfa, 0x938c6e5a, 0x78e1f528, 0x20fde315, 0x56f7cf85, 0x67e2c74c,
- 0x573af09d, 0xf98d6de7, 0x5952ef04, 0xd6cbf684, 0x4ff84664, 0x9d017aa6,
- 0xbe7a3baf, 0x3f875573, 0xf733e60f, 0x19e0994e, 0xbdd6ff3d, 0x18db7ef1,
- 0x563f6b48, 0xd7a30491, 0x79ff3d13, 0xa3d7a34b, 0x1f961e93, 0xe9cefe8a,
- 0x829e9a24, 0x1efa934d, 0x6f2bd535, 0x51efb2b8, 0x6e957d13, 0x17c96599,
- 0xea58e787, 0x5be6d617, 0x14d617ca, 0xafe7ca5b, 0xe7c9645e, 0xd4b4ee87,
- 0x9974b79f, 0xcad6fca5, 0x37e52c7b, 0xe4b5ad17, 0xb11e04e7, 0xf671bfd4,
- 0x6db94b06, 0x20d725ef, 0x25bf551f, 0x3e37dc33, 0x007b1d75, 0x5d4fc1f5,
- 0x71f10f8a, 0xf88d2aa2, 0x979554e0, 0x4ca6f6d2, 0xc1cb8047, 0x16715402,
- 0xad086b2e, 0x9972889e, 0x5e2cfc91, 0x1a1433b6, 0xe4adda08, 0xb5c9f825,
- 0x09008b1a, 0x174fac4b, 0xa6ca771d, 0x94f3d6f2, 0x67a302d7, 0x5b972cf6,
- 0x6e0f7f63, 0xe508bfcc, 0x00f26f4e, 0x3ee006fe, 0xf7f621d7, 0x8932f2e8,
- 0x33bf99e5, 0xfcf89cb6, 0x65b3909c, 0x6507971a, 0xcc9bf6cf, 0x8c8f983c,
- 0xf1aafca8, 0x28a8001a, 0x4f3b5127, 0x2e007a46, 0x50cbd0b0, 0xedb77f0b,
- 0x155e6993, 0xa3c61328, 0x235bc9c8, 0x1c899ca1, 0x68dd7c18, 0x7eff879c,
- 0x29aef909, 0xb6f5f699, 0x8f7de9aa, 0x3c9f882a, 0xcd544f4a, 0x5558f4a6,
- 0x55a3d280, 0x5fbe9445, 0x6b694955, 0x0f4a52d5, 0xfd288557, 0x4a6ad553,
- 0xa1aaabdf, 0x5aaa9df4, 0x1550ff4a, 0xcabdb4a6, 0x0fc1a94f, 0x4937f25d,
- 0xaec2abe8, 0xf2879d80, 0x565ac567, 0xe216dd40, 0xf21a5f73, 0x67d759f9,
- 0x3f1f5023, 0xa19d8efb, 0xbc1bdfbe, 0x77ade9a2, 0x5f49fa3c, 0xfe030829,
- 0x3b967b33, 0x9c9e38e3, 0x819d3636, 0xf867ba3c, 0x46dbbe13, 0x92415e51,
- 0x37c90d81, 0xdfa31dcb, 0x75f08c61, 0x7f313c3e, 0x016b98e7, 0xfd1ec71f,
- 0x7bf6366b, 0x717fec4e, 0x87da6407, 0x0f936459, 0x1b8265f1, 0x3b9d6bed,
- 0x0f3fbc84, 0x82ea3efc, 0x0660cb5f, 0x274904e9, 0x68db3bfa, 0xb1c20a67,
- 0xc40c3e39, 0xdc1ececf, 0x5c7e41e4, 0x8fd3669c, 0x918380c6, 0xeba43796,
- 0xd3fef32e, 0xad9fcd64, 0x9037a691, 0x5c26f63c, 0x4ae91a3f, 0x430e8fd7,
- 0x6a51a7fc, 0x4d38b3f4, 0xbf028fd3, 0x3432da9e, 0x61dfef81, 0x27d60fbe,
- 0x5d82bd12, 0xd5fff548, 0x1fade9f3, 0x358c3e63, 0x281c0fb2, 0xe86ca00f,
- 0x1e9dedf9, 0xd13e5778, 0xcd5f10f2, 0x2fa867ea, 0x5fffc1c4, 0x157ec10e,
- 0x06fd1bca, 0xfd90e41b, 0xdebdf8db, 0xf3b41e32, 0xb637361a, 0x156e9deb,
- 0x2ba0cc76, 0x1a1a5790, 0x3a2764cb, 0x92beeb75, 0x9b07c968, 0x7d96e9fa,
- 0xfc01ff06, 0x4152820f, 0x541329ba, 0x56b8a1d4, 0x23bf304b, 0x2f013f28,
- 0x2bd78941, 0x016ab477, 0x0b63912f, 0xe5b719ea, 0xbde77418, 0xef208ff1,
- 0x6546fe4f, 0xdff962f7, 0xae505660, 0xf720c51a, 0x2f9f906c, 0x9635b772,
- 0x77b940ce, 0xf9f7c6d2, 0x83cf2c05, 0xab114f46, 0xe73d46c9, 0x8e9b6623,
- 0xfb11fff4, 0x64d3279d, 0x1a6cf86f, 0x6afe73ad, 0xfa0b3eeb, 0x585f2599,
- 0xb017cd6b, 0xde90536b, 0x2ca9d60b, 0x5a5f212c, 0x9d36bdcb, 0x95642dfa,
- 0x41dec8ab, 0xbdc6057b, 0x00ca674d, 0x73f95f98, 0x43e7658f, 0xae363bfe,
- 0x7bf61ba7, 0xff3e1f71, 0xe6b0a492, 0x1a0ff287, 0x19707f33, 0x55e1f6c3,
- 0x6e42a728, 0x7accdbe6, 0xceebefe2, 0xc6be7a3f, 0x8fc3d27e, 0x6f21e620,
- 0x18e1fc91, 0x9b34da76, 0x10adf424, 0xbae037a5, 0xbe6b7a4d, 0x2f918380,
- 0x19dfca8f, 0xe9a7ff95, 0xe859892d, 0xc81488ed, 0xfa7325b7, 0x52417d42,
- 0x37c0b53a, 0x47ad3fe9, 0xd2ffe5ff, 0xff4207fe, 0xeffe96d9, 0x3ff697fc,
- 0x57fcc7ac, 0xfcbfeac6, 0x433b6db9, 0x9e4a6f20, 0x60c3c879, 0xd4a93a1f,
- 0xef00f8a4, 0x7a579b65, 0x284e0e90, 0xf3d20f21, 0x3d3cb0c9, 0x46ec3d16,
- 0xa23f207a, 0x5da125df, 0xfe84ff85, 0xdf753d4f, 0x3e67dc4c, 0x64eaacdb,
- 0x47b16d2f, 0x15afc0ec, 0xd833c58d, 0xe11fca18, 0xc8cfbbf9, 0xd2b26f98,
- 0x0f93c967, 0x0dffa0a5, 0x13d84528, 0x2a97d211, 0xc5cc3eea, 0x3ac8287d,
- 0xdba2cf89, 0xfae7f8e1, 0xd7cc7c90, 0xc3967a12, 0x0fcf493c, 0x1b04b80a,
- 0xa3233bef, 0x72b593ef, 0xfd54683f, 0xd4ffa122, 0xc749dcdb, 0xdf6274c0,
- 0x7941df61, 0x8583c069, 0xbd53feb9, 0x4731e1f5, 0x756d3e60, 0x2648f1bf,
- 0x767c1938, 0x2ffe61b6, 0x2ef6f79e, 0x9d85db8f, 0x4c2eddd7, 0x424dbced,
- 0x69e66b6f, 0xa0f11a99, 0x41b65c5e, 0x73e085f5, 0x4f4db6d9, 0xea768a3c,
- 0x70df76bb, 0xdb74bfe8, 0x00987f4b, 0x6aedd2f9, 0x0cbe0cd2, 0x8dd29497,
- 0xbffc2097, 0x1e376c74, 0x92f57e4a, 0xbfcd1de6, 0x374fb8ff, 0xefb74a3e,
- 0x9ee8d8d3, 0xb0d298f0, 0xc5756b4f, 0x91c34bc0, 0xaaafd45c, 0xa1ae7eb7,
- 0xaf72793d, 0x63d352c4, 0x4bc373c0, 0x9c2027a4, 0x4f028f08, 0xaa8b785d,
- 0xa0bc041f, 0x14f0373c, 0x9080fe5d, 0x779fd83f, 0xd3f3d114, 0xf95fa3cf,
- 0xd70fbb3d, 0xedf4f45f, 0x71c75c1e, 0xf5d392a5, 0xfc532b63, 0x1a916e30,
- 0xe529d14a, 0xff1e8bdf, 0xe15bd121, 0xe14c3f1b, 0x9fbfa0f6, 0x50a6f68d,
- 0xc2df5c3f, 0xfad037f5, 0xe880580a, 0x3931e8ae, 0xa7e90c2f, 0x3dbe9b0a,
- 0xc8645f06, 0x6f8e2f9c, 0xd60b8504, 0x12ed1c77, 0x4efeb3f4, 0x7f266f0e,
- 0xfc8622c8, 0x3fc343ff, 0xfe5316ce, 0x93c70753, 0xbe09b643, 0x4ccf6d02,
- 0xb77775af, 0x0d4e7e20, 0x1fa2a7f2, 0x9094c82d, 0x25f48780, 0x31bf2bf4,
- 0x87e3952d, 0xd16c9579, 0x2980f40e, 0xb7eb1df8, 0x18e77ea8, 0x65f63b4b,
- 0x4bf8f77a, 0xef30f8c4, 0x8fbfdc38, 0x86a2b0a7, 0x55bbeb18, 0xf61ef836,
- 0x997d1371, 0x7bf39bf8, 0xbf9c3ddd, 0xcf0f7e0d, 0x9fe58e9a, 0x07f9c9bc,
- 0x08fee1db, 0x52a48af3, 0x25ebbef9, 0x16760792, 0x75d7c589, 0x6b6be0aa,
- 0x302741b9, 0x304a456f, 0xdff60bd1, 0xfe4fe087, 0xd07582b3, 0x832f24ce,
- 0x67a79e38, 0x16df067f, 0x251ffe0a, 0xad9ff95b, 0xbbdf7ce7, 0x7da2157e,
- 0x3f0d4cae, 0x2bf228f1, 0x8bc867f0, 0xe1e6aafc, 0x1fd74fba, 0xc04dc3f8,
- 0x0c1ba7a7, 0xcfcab53e, 0x7a2d3e68, 0xf484b376, 0xa7a7cd19, 0x891b408b,
- 0x3df0a7c5, 0xb5169f26, 0x9f953ffe, 0x8faefc06, 0x0f219f82, 0xb463837b,
- 0xf3633c3c, 0xe5a33c12, 0xc9bdfc21, 0x9fd27bb2, 0xbd067741, 0xfcb2fc95,
- 0x1817f222, 0x5dd06974, 0x741a5d17, 0x62ffc3d7, 0xf9745f81, 0xa005a460,
- 0x2a5dbc93, 0xcd6555d9, 0xda0c3cfc, 0x8b203cbd, 0x6da7e4e0, 0x3cbcd58c,
- 0x0f9cc920, 0xa21be547, 0xf2a3fbea, 0x03f55179, 0x4b4af951, 0x03a6b2fd,
- 0x47fc231a, 0x4becf52c, 0xc41f651f, 0xfd8c87b0, 0x4678d826, 0x0c5d210b,
- 0xde50d4ec, 0x0541cba9, 0x1ee0ff45, 0x0ec5ec99, 0x182af180, 0x3d3ea3a7,
- 0xb1b9021c, 0xf0edc9d2, 0xd3d3bd0e, 0x8995d207, 0xac99df3c, 0x67b942ad,
- 0x7274ef7c, 0x5e47473f, 0xe4d127a7, 0x2cd238a7, 0x0d6edc93, 0xfcb1e9b3,
- 0x10a3b020, 0x6f595bde, 0x39ffdce5, 0x5e0d764d, 0x8d68f68a, 0x771343ec,
- 0x3d1bb2c1, 0x68e92ae8, 0x6ccd7a1e, 0x55f0bef8, 0x3a558e99, 0x70633ce2,
- 0x57e42c5e, 0x89ec99fa, 0xb3f42c13, 0x6ccfe889, 0x82c576e6, 0x08fa5135,
- 0x54f4435b, 0xa1a25ae0, 0xe03b053c, 0x973fab7d, 0xe88945f3, 0xb5bd68af,
- 0x278f68dc, 0x95df685a, 0x03cb59d4, 0x65da3ec2, 0xcb0649f0, 0x86833920,
- 0x42e7da0f, 0x47934f2e, 0xf01b335d, 0x665071d1, 0xb6799e78, 0x9c7c2659,
- 0x97e7e7ad, 0xfc4c942c, 0x493ac156, 0x6a701a1e, 0x7ad27e43, 0x3b6d5797,
- 0x177d9cfe, 0x3fcecdd2, 0xa439f98e, 0x1ddf3b6d, 0xcff31bb1, 0x8ff83dbd,
- 0x29fd67ac, 0x37e49d7b, 0x26ebd8ee, 0xf3d8ef7a, 0x6177c1db, 0xfc9d977e,
- 0x3f7b1d75, 0xd7f4831f, 0x845edcb1, 0x1063aef2, 0xf7aa87b7, 0x95a63e7c,
- 0x8415d7f6, 0xc6b2a3b7, 0xbfe6a3f5, 0x3092961e, 0xdea15585, 0xf89e37b0,
- 0x93bcf829, 0x7b1d0bfc, 0xb80ecf3e, 0xa9eb75fe, 0x1d39e710, 0x59fb366e,
- 0xf7225f9c, 0x8f770783, 0xcf0cb4df, 0x69f71ba3, 0xca7ca1f3, 0x831f7f0b,
- 0x59e3b472, 0xabb50fae, 0x79e38fad, 0xe1fc8745, 0xdce01532, 0x9fd54c8e,
- 0x1194fbb1, 0x62a34393, 0xe4efe5c1, 0x7850b937, 0x4fb70a68, 0x21e1fdff,
- 0x73bcb9f9, 0xae121d87, 0x7fc38bfb, 0xd7b0b944, 0xac69744e, 0x32c374f7,
- 0xb7ce57d2, 0x7fcae1ee, 0xd0b83a17, 0x0b914b75, 0x7fd4f759, 0x5aff7c73,
- 0xa2fcd2f6, 0x9d27a0f9, 0xf283b9ef, 0x9a3edcdb, 0xfbd205ef, 0x7869ff82,
- 0xeef0167f, 0xabbaff39, 0xddd7cdff, 0x9d5fde3b, 0xe3aef02b, 0x85f07f79,
- 0xbefcd3bf, 0xf4db9cae, 0x0dee94df, 0x9b15febd, 0xa80ccdf7, 0xb8d59d3f,
- 0xb2b8b150, 0x19abff7c, 0xc0cafa50, 0xdf388903, 0x37e751c9, 0xc2a6fa46,
- 0x92c9a6ed, 0x9641ec8d, 0x907b0928, 0x840351db, 0x496030fe, 0x0321e901,
- 0xf5cfde06, 0x0ebbc506, 0x195f1fcf, 0xb147df3c, 0x3bec6c14, 0x97ebcc01,
- 0x9ed5bc8b, 0xfcc4b9fc, 0x603a3478, 0xa20ff7f0, 0xa0703b5e, 0xa42f7a4c,
- 0xe8beefa4, 0x97bfce99, 0xe7bb1669, 0x0ed5af4a, 0xdab24dca, 0x7f45534b,
- 0xff9f18d3, 0x0e149734, 0x0f261c03, 0xfa1269fa, 0x570f2747, 0x5cf90499,
- 0xf6815816, 0xb59754c5, 0xc33c06bf, 0xe1cdf719, 0x380d5768, 0x8797590e,
- 0x93241c70, 0x70bcf49f, 0xdc1379c4, 0x2f18c232, 0x871f14c3, 0x1f729f63,
- 0xbd859df9, 0x904d474d, 0x5aab5c57, 0xb4159f90, 0xc81c3997, 0xe5ec36ce,
- 0x84cde0de, 0x2188adfd, 0x032bf80d, 0xdad3bf65, 0x06dfd91f, 0x3e2ebe5e,
- 0x537e3c0c, 0x851bbcbc, 0x45ae8197, 0x74ebf20a, 0x62fa17b4, 0x1f9e0cfd,
- 0xdce85218, 0x570bd84d, 0xa4291778, 0x50cc0cf7, 0x8e855f10, 0x1cf0aba6,
- 0x6893e1c4, 0xd9b7171e, 0x64707f68, 0x91f70449, 0xdfe50ab5, 0x10e3e9b0,
- 0x7b2f33df, 0x0c9df4ee, 0x8d7ee6fd, 0xf1e53dc7, 0x7dbf8ff3, 0x8c44e5f9,
- 0x2c781417, 0x46afff84, 0x167aff3f, 0x2d380389, 0xa1285854, 0x25bdfa0f,
- 0xefc5bef6, 0xdfe3cd6d, 0xfbba5377, 0xdefd5f39, 0x3abfcae4, 0xd8e40e7d,
- 0x5df300fb, 0xe1a30e98, 0xfd5db315, 0xaf0e669d, 0x1e1621e0, 0xe42ae3c2,
- 0xe139d33c, 0x9ffe821d, 0x99b3d3fb, 0xea4ed768, 0xe5c09518, 0x17b230eb,
- 0x385ec282, 0x3e09bccb, 0xa1d7ca17, 0x79fe7409, 0x8ac77650, 0xec2adc2b,
- 0x7c625fed, 0x751da409, 0xfb4646ac, 0x1f92758d, 0x60e75437, 0xb7c4639b,
- 0x9f86abe4, 0xfe0de04a, 0x8f3c6ce4, 0x842fe3e1, 0xbc6609a6, 0xa938cdb5,
- 0x0789e98c, 0x74df7bfc, 0xe14df51e, 0xc23df68b, 0x35b3ab77, 0xfc862f37,
- 0x6fc05db8, 0xc82ffee6, 0xf36979ff, 0xefd21b06, 0xcb3675a5, 0x2aa96af9,
- 0x6cb1b1ec, 0xe66bce2c, 0x0b3ba77e, 0xf65072f1, 0xb0673c61, 0x88168cf9,
- 0x0d182e71, 0xfd1fec4f, 0x9d555be9, 0xdab76bf8, 0xbae11f30, 0x430723fb,
- 0x56977a3b, 0x9e6b1a53, 0x9e7cec03, 0xedc3590b, 0xfdffd263, 0xc3a93be0,
- 0xf68c9915, 0x7e131d67, 0x63fa688f, 0xb767c744, 0xafee30b0, 0x5ca2af68,
- 0x68cf5b38, 0xdc90077f, 0xf037768f, 0x7eccf7fb, 0xb69b8b9c, 0x82f512ff,
- 0xc749668a, 0xa50a8623, 0x6d3f4355, 0xc65129b0, 0x6bc3387d, 0xa3eabbc4,
- 0xc5f137af, 0xf8215556, 0xfbb0981e, 0xe3f71c66, 0x9ea18d36, 0xd3b17fe2,
- 0xc7f8fb83, 0x07c499cd, 0x57ebadbd, 0xa9aaec55, 0x972a3748, 0x30f4d187,
- 0x92ce3eaa, 0xf40e4cbf, 0x699afe47, 0x8be25d6f, 0x6befbf81, 0xbc173892,
- 0xfe16f940, 0xbdbe4cff, 0x80b7c869, 0xa3e2679c, 0xf90f5abe, 0xbe4b1a96,
- 0xa9bc962d, 0x7bc5f708, 0x5e22a686, 0x26aabf17, 0xdb6f92c7, 0xe739f8aa,
- 0xb94e2233, 0xdbe411a3, 0xadf24db7, 0x8c5be411, 0x5fbf9078, 0xff0b7ca8,
- 0x36dff0d7, 0xd6316f94, 0xf9566bab, 0x08f9a636, 0x8d31b7c9, 0xaf3c4b36,
- 0x7c9f3b5d, 0x47af9293, 0xeaa0f8fd, 0x3f418f8b, 0xfe3e84db, 0xc44b888c,
- 0xe5ce5071, 0x7f8d3a6a, 0xe2ee72a1, 0xff73950b, 0xe4367045, 0x8c1de2dc,
- 0xd27b9ce2, 0x8b739721, 0x939c8177, 0x9cb91e90, 0x7187bc5b, 0x7c945cbf,
- 0xbe43d91b, 0x115faa31, 0x4d83ede1, 0x02df0fe9, 0xabf58dfd, 0x1fd4073e,
- 0xc6ef2ddb, 0xae5de599, 0x7adc10a6, 0x1783bbc8, 0x0876ef26, 0x6c720779,
- 0x68d7ca08, 0x5b35f0fa, 0x8f77c1e3, 0x2bff5e3f, 0xcadf97e4, 0x86f8de74,
- 0xbd0f9f8d, 0x1678fe36, 0x6263d7d2, 0xdc848b3e, 0x510aaf6b, 0xe45e53ff,
- 0xf1772beb, 0x468af138, 0xd4561fae, 0xfc2f614b, 0x17c27733, 0x78bfce1a,
- 0x2152c48f, 0xebcecb3f, 0xf38d27b3, 0xc3233632, 0x720b0f44, 0xbfcfc94a,
- 0xf40e6140, 0x72ba97e3, 0x5bfea24f, 0xfefdc39a, 0xf17be2a5, 0xd0abadab,
- 0x88fc5dff, 0x46a5e744, 0xe711ab7c, 0xa26e23db, 0x77f91979, 0x6627e3a3,
- 0x99a9b88a, 0x1bc294bf, 0x947fc462, 0x7fe6b16e, 0x966ff822, 0x62f04adc,
- 0xe331cbaf, 0x5d7a8c38, 0xe0283a70, 0x7ed32754, 0x802705da, 0x27bd379b,
- 0x33d3009c, 0xe1ba5232, 0x585bfc52, 0xebef566f, 0xbd16e035, 0x5d7fc36e,
- 0x0de9fa2a, 0x7b9c060e, 0x1dc05fac, 0x939202ec, 0xa758f0d1, 0x33b5f975,
- 0xe8094e31, 0xe84ce486, 0x181700d7, 0x70d26f2f, 0x47977dcb, 0xd664fd05,
- 0xe1829a4c, 0xae30b30c, 0x6a0bca1c, 0x0fdcbd17, 0x1dee31e0, 0x4edc58ef,
- 0x0f609b2f, 0x56ec39e0, 0x41c92767, 0x35db0e83, 0x141c07ae, 0xbfddddf0,
- 0xf9d93272, 0x7a8e924d, 0x24e23048, 0xae0106b8, 0xa828ff78, 0x8106fc70,
- 0x0eff911e, 0x8719f23c, 0x8bc91ee3, 0x635c9dfe, 0x40bc42bf, 0x8e66ccfd,
- 0x22586097, 0x4eb164bc, 0xa8a97f3a, 0x84117c95, 0x8206d35f, 0xe19f219f,
- 0x93c665cf, 0x58957e89, 0x4c954bf4, 0xa8a965fb, 0xf35ed337, 0x99e7a407,
- 0x036caa4f, 0xfbfe1b81, 0x4f3e7a2a, 0x9e34501c, 0xb6bc04da, 0x085d3c21,
- 0x77ae4eb7, 0xfba0be45, 0x50794650, 0x39e05909, 0x5f73d1e5, 0x0671af09,
- 0x518e90bc, 0xcbe735be, 0x92682f98, 0x9ef4df58, 0x50f3c5eb, 0xef17bf33,
- 0x7ffbc239, 0x18b1f24c, 0xba60beeb, 0x8b9e85ee, 0xbcf16e80, 0x8eba37a4,
- 0xd23b5386, 0xe9bab7f3, 0xe76735f9, 0xe7a44ca1, 0x43f7123d, 0xce5a2734,
- 0x1e763d35, 0x09d5b5f7, 0xeeeb0f74, 0x6df5557c, 0xcb90c6f4, 0x7ed1ee82,
- 0xbb37df30, 0xf1821704, 0x141c5ba2, 0x18f347ef, 0xb353f5c2, 0x64e6de7c,
- 0xf567d7c9, 0x2edbbfde, 0x37278cc5, 0x3f2323f4, 0xf8c3c71a, 0x6f1826f9,
- 0xd178f764, 0x2fe183fb, 0xfabadf38, 0xbad9bfdb, 0x6ebe718c, 0xb590543c,
- 0xb88d5e10, 0xe05223a8, 0x73ea2a47, 0xfc4b25d3, 0xc85ccc15, 0x7fdc6fad,
- 0x7bdc96aa, 0xa74d2cff, 0xebb774fb, 0xe89d77c6, 0xe2674f33, 0xdbe26f7c,
- 0xec7fefb8, 0x5998e7e7, 0x44d6f636, 0xab263fdc, 0xe3eae90c, 0xa1f92a43,
- 0xe3ca9e3f, 0xedf9af6e, 0x3e5d0501, 0x3898f0d7, 0x8938cd76, 0xb037acec,
- 0x87a8f1eb, 0x047f983b, 0xef132fc1, 0xd5d465ed, 0x5f02f14c, 0xae12dd8f,
- 0xa537d4c6, 0xc435e933, 0x219924db, 0x7dfc5ade, 0x7f20b8a7, 0x032c87fb,
- 0x1fa0a05d, 0x07b1fb50, 0x9bd63259, 0x33264fe0, 0x27e37c66, 0xf3075e6f,
- 0xf6487f18, 0xacdea179, 0x03172e03, 0x7ae47a7d, 0xe0980b1b, 0xab858135,
- 0xfe34dfd1, 0x9c3affa8, 0x3be81177, 0xa8a44f78, 0x07b54379, 0x6a25ebf3,
- 0x72050c1f, 0x6d9eb03d, 0xbf7267b5, 0xea017285, 0xf5c13761, 0x4eb05ebc,
- 0x728861f2, 0x8a5af341, 0x93a82a2f, 0xb6e142e8, 0xbc1cc4b0, 0xfa03b0df,
- 0xda1eb6dd, 0x05fee167, 0xbadfb1ed, 0x875e6f33, 0x49e60772, 0xb9f9ebed,
- 0xf3b4017c, 0x2dd4bf22, 0xd78afea2, 0xb3ef0c4b, 0x7df3d514, 0xea90e8a9,
- 0x7dc6f2c3, 0x3b7ed08f, 0x97ebc603, 0x6450fb8e, 0xca0bdd2a, 0x2fba7494,
- 0xf84c1a19, 0x7f1c60eb, 0xf220fa6c, 0xabbc520b, 0x98a0c097, 0x64fe63e3,
- 0xb3f2421f, 0xeb4cb7c0, 0x96bcfd0b, 0x236e9c93, 0xe7433aba, 0xde0147ed,
- 0x1bf78001, 0x40e55e22, 0xab5a07ce, 0xc2fc5f69, 0x9b9f943e, 0x38a24d34,
- 0x8f2c858e, 0x8e6638e2, 0xebe6fae7, 0xe8c33b97, 0xed5d7be7, 0x0fdf881c,
- 0xefa76e5c, 0x1cb8dbed, 0x4ad41f6e, 0xebe3f6fe, 0x58f78655, 0x24cbd7aa,
- 0xabd78e2f, 0x7e563f74, 0x1431c78c, 0xc7fae36e, 0xdfbcf581, 0xe1b7a8e3,
- 0x439607b9, 0x11ab70be, 0xb4e5c0e7, 0xe8167f61, 0x90c2fe3c, 0xdd62e5bb,
- 0x75a1f63d, 0xd92434cb, 0x2e5b7968, 0x8b0971e4, 0x2d1cbd90, 0x1cf1cb77,
- 0x197c83dd, 0xeae6271d, 0x0e9063b6, 0x5d105f22, 0x19521c57, 0x07d231da,
- 0x9714b96d, 0x73a247b6, 0x92cdf18b, 0x8a1ae31f, 0x4987b1de, 0xcfe70eff,
- 0x2fee11fb, 0x8619daef, 0x3c431f2c, 0xb1ca9c80, 0xa7e9fe77, 0x1be4fdf0,
- 0x02088c0e, 0xb827c9ba, 0x59be711b, 0x4f3c799b, 0x52eb1bd6, 0x9b2f3ef0,
- 0x40e497da, 0x02ccad61, 0x52713926, 0x13775ff2, 0xd4741fdf, 0x667c0c77,
- 0xb8053569, 0xc1cb7ebf, 0x66ef55f7, 0xd8646315, 0x043d84ef, 0xf51f81f6,
- 0x6b5de29b, 0x1d133453, 0x9b59ef14, 0x55f7c322, 0x1aa61e22, 0x576857f7,
- 0x8a71f1ac, 0x06fa4af7, 0x3f4638d3, 0x2edabf46, 0x1ee86ede, 0x7fd6f69e,
- 0xd68fce31, 0xf286a9f9, 0x1e9fba22, 0x4fd2f7e3, 0xadcfefce, 0x395e6093,
- 0xac14cbaf, 0x8cc1f39e, 0x1d73e6a1, 0xcafc3523, 0x98da6fd9, 0x9cbf04df,
- 0xbf31c53f, 0xe38ddf09, 0x7f01ef80, 0x61493757, 0x23f984a7, 0xe516795a,
- 0x0af3e475, 0x98ae7fe1, 0x1aebcfca, 0x8158de33, 0x4b233f7c, 0xa1607a22,
- 0x8937d680, 0xfa2f35f4, 0x78b30bfd, 0x8f6842fb, 0x08f8d8fc, 0x13904fe7,
- 0x1523945e, 0x52e6d7eb, 0x8bc93afd, 0xc86e37fd, 0x28a47d27, 0x5e546647,
- 0xa3f48477, 0x4923a6a2, 0xb7c0c7c8, 0xa81f50eb, 0x51d76898, 0x68fc1b8e,
- 0xe2ebf7f0, 0xe8732b3e, 0xc67ef1a3, 0x8be646ff, 0x21f2d7c1, 0xf583d72e,
- 0x39831f9c, 0xdf5f5f9c, 0x2f0ae2a6, 0x9f89bf84, 0xfce16f8f, 0x43763f2a,
- 0x2f87de2e, 0xc3efccdc, 0x453f581f, 0x78fa7d43, 0xa6fda258, 0xbd737f27,
- 0xabbf9a35, 0x7defbe25, 0xe0d2fd5d, 0xe18f836f, 0xaf1c6aef, 0xef14f80c,
- 0xf5f3464f, 0x0fe1b54c, 0xaf7cc3b7, 0x03d61a79, 0xddf29df3, 0x26ad3d3b,
- 0x14f90939, 0x3c2817cc, 0x2b90cb9e, 0xfb8f0ae5, 0x8bf3dafb, 0xb76c3f1a,
- 0x677e47fa, 0xebf1e44f, 0xfdd12a75, 0x07b3f8f9, 0x7f205d9f, 0x9d5f285e,
- 0xf18d3f8f, 0x7ed63de5, 0x2de8277e, 0x8076bfba, 0xfae0046b, 0x8a26fd4a,
- 0xa9e8fdf3, 0x3f113323, 0xce81bba5, 0xddeabe91, 0x4e72822c, 0x557ed309,
- 0x3dd328d2, 0xb244a601, 0xf99b73af, 0x956dc798, 0x7a26e33e, 0x1ed6792a,
- 0x4bed019e, 0x86307e76, 0x779668e2, 0xe85a5c52, 0x872fa129, 0x1f20c75a,
- 0x7e3c2894, 0x042e5a2c, 0xd70fdc1c, 0x367e2bb6, 0xc2da4fae, 0x69288fdf,
- 0x9bb551b8, 0x746e25ce, 0x3a3d46a8, 0x1d010dd7, 0x9c5dfde3, 0x02ddf99f,
- 0x6dac6e23, 0x2513972b, 0x81d92a4d, 0xb7b65b25, 0xc970fc63, 0x77f2763b,
- 0xa19cb705, 0x9e5310fb, 0x94fe46d9, 0x396c9360, 0xb711eb7f, 0xd88e45b9,
- 0xb1b0ee31, 0x211dce9c, 0x0df2847f, 0xcf97e3b1, 0xfa585f17, 0x219d9ecc,
- 0xafe96100, 0x7ca0f49e, 0xd9b2d539, 0x7e877083, 0xba6e32bf, 0xc3669c47,
- 0xa7195f3d, 0x01ff0a79, 0x438f738c, 0x9c4635d8, 0xa4be96eb, 0xb2fcc247,
- 0x739c62f3, 0xdbe5e974, 0xef1c7d3e, 0x7b3db411, 0x6f9c71fa, 0x7e3e3fd9,
- 0xd8c1e31c, 0x77ed275e, 0xa2dea7c1, 0xdda36c38, 0xb5dfb126, 0xe2927d6f,
- 0xb5dfdbd7, 0xf6b7b0fb, 0x6307c73d, 0xf6fb603c, 0x6fd0522f, 0x34cf64b9,
- 0xd92eebf2, 0xc5952531, 0x148c2fd8, 0x017c36e9, 0xf8d1f3f1, 0x8078d03a,
- 0x023ed6e2, 0xebfca37c, 0x286dbb8a, 0x5ecc71e7, 0x1f5b6f11, 0x8f429efb,
- 0xebc6daf8, 0xe51cb9ae, 0x1fd7237e, 0xfb671bcf, 0xdbf1e026, 0x7abf8017,
- 0x1b102fc3, 0x3fe11f7f, 0xa2fb4f00, 0xe369f7f0, 0x8a3c5fc2, 0x7b7d8d65,
- 0x5c8db38d, 0x7b8cdc00, 0xfc380f10, 0x88c91c87, 0x5c525fe7, 0x866de233,
- 0x4f9ff717, 0x91fde307, 0x5ce76a13, 0xc7b85fba, 0x68bc63ae, 0x89b79ec9,
- 0xbc77da7f, 0x26687e41, 0xb3b423ee, 0x3f1e3fae, 0xd675892e, 0xae7c79b9,
- 0xefc63f80, 0x5df43d7a, 0x3a72e9c4, 0x7b77e236, 0x9d95db8b, 0xee36cf5c,
- 0xfdd79467, 0x76c3c451, 0x41ca4be3, 0xcaf91d3c, 0x0e036878, 0xddc390ba,
- 0xe3b1e871, 0x2beebba3, 0xfe217c08, 0x8aea1f70, 0xc0fc8733, 0x7a37e4b1,
- 0x0790c59b, 0xfc781bed, 0xfb1abb7a, 0x9711b05e, 0x3e688ffe, 0x3c585f0d,
- 0xffe6f9fa, 0x81d3e175, 0x4f724fca, 0x2dfdb538, 0x774f1cb6, 0x9eecdf4a,
- 0x7d149961, 0x1cebca6f, 0x473b7187, 0xbb83b434, 0xdeb16cec, 0x6f57e136,
- 0x78a64a7b, 0x070d55e5, 0xcb478e48, 0x2f7c0a8b, 0x337cc625, 0x9a2b7cc5,
- 0xf6d13ef8, 0x82ec3bac, 0xeff6da7e, 0xa8ae7a22, 0xd2f1423f, 0x95f96f2f,
- 0x8b8a6ad6, 0x6c227dc0, 0xd3db7f9a, 0xdc90c6fb, 0x44b2ef16, 0x8c49338f,
- 0x3d8e6c13, 0x8cfeb04e, 0x50d29c65, 0xcebba683, 0xc529de3f, 0x77f6237f,
- 0x83eb85aa, 0xe9e41aa7, 0x0fe318e1, 0x77d6aa73, 0xa4bf60a1, 0xfcdc67f7,
- 0x13d919bd, 0x12e5fa47, 0x093bce77, 0x146a9849, 0x4cbd55af, 0x87fd77c4,
- 0x5df10d2f, 0xe18abeed, 0x7c22577c, 0x551e7e4d, 0x4bd4300f, 0x2cbcb4d5,
- 0xcb442bf4, 0xc6c92d1b, 0x2ffc26c2, 0xf54f4f71, 0x644779d2, 0x70f3e220,
- 0xfb1571c6, 0x9f944bfd, 0x5b0a2fea, 0xfbf30a95, 0x2df2484b, 0xdfd97e83,
- 0xe794183e, 0x977fd9ee, 0xd963f11e, 0xb0aa57e2, 0xbb461e77, 0xa4e0bd84,
- 0xaf7906f8, 0x08f9de93, 0xf5a4ee3e, 0xf9f1d81e, 0xff5fa413, 0xb9f8cec2,
- 0x202f580f, 0xde750aaf, 0x9f1df1c7, 0xd457fe3f, 0x8c31b19f, 0xd438f85f,
- 0x3ea1bb47, 0x8e9cec9e, 0xa0c61367, 0x85f0a45f, 0x3ca266bb, 0xf4e78c2b,
- 0x6a4cf858, 0x3bfa1ab8, 0xc7f3cc96, 0x77a20db6, 0x759f4a25, 0x81e8dc53,
- 0xe055cf1d, 0x4cfed4be, 0xa97dc33c, 0xe7a105fd, 0x53bc52ff, 0xcd7aab55,
- 0x4cc5e3f1, 0xf3ef9af1, 0x12de6294, 0x2fc8c4af, 0xfe847c41, 0xba04a6aa,
- 0xaffa0407, 0xd48f9fa5, 0x447c3bce, 0xfcfea1c7, 0x3fa453fe, 0x7ae883ca,
- 0xcebdf946, 0x395ee221, 0xd77573b4, 0x059d6a0f, 0x9fd35f3c, 0xaa1693cb,
- 0xf853b0ff, 0xcf79458f, 0x87fd797e, 0xeeeefc84, 0x10afb787, 0xd344d0ef,
- 0x6fd146ef, 0x5fbf96e9, 0x959deb1c, 0xefa7f318, 0xc858943f, 0x76ca7a86,
- 0x857fbd27, 0x7de028ef, 0x3f1a9d85, 0x44f32ad7, 0xdd9b9de8, 0xc5971e42,
- 0x9ddde845, 0x581e62fe, 0xd1ccf31f, 0x9fbd34fa, 0x61939cf6, 0x0bf9595c,
- 0xbbe23f6e, 0xc7e7e77c, 0x71859e7e, 0x1714f189, 0xffc2fe5f, 0xb7283a22,
- 0x3f22e647, 0xb3b76e74, 0xdbe7c88d, 0xfc8c1d5f, 0x0646a6da, 0xf38af9f7,
- 0xbb06f2ba, 0x58bffef5, 0xdaaaa9c7, 0xbbbef0cb, 0xa4b6af71, 0x57f33917,
- 0x5fa3d727, 0x725ffa71, 0xdf5bbf12, 0x35553a73, 0x59df133b, 0x3897ea30,
- 0x77c5cb62, 0x137c5e51, 0x71eaee39, 0x9c5df53f, 0x25ac47cf, 0x153efd05,
- 0x3ca377a0, 0x3e92d92f, 0xfb04fd10, 0x043bc69d, 0x8fdc04de, 0xb25eabf6,
- 0xef3531a7, 0x7caa2733, 0xf430ca99, 0x54cc8cdf, 0x1b1d1d60, 0x67f414ce,
- 0x9cea4718, 0x9dee4b1d, 0x3ad3c676, 0x98f61c4a, 0x09cf9df7, 0x8d0f7aba,
- 0x1e3e64fe, 0xee3187f2, 0x724c357e, 0x2f388ef8, 0xbb012bd5, 0x6e97bf08,
- 0x79699399, 0xa1a9457f, 0x04a7c07a, 0x4eb662cf, 0x6fc932cb, 0xa5e2bc63,
- 0x264a9959, 0x4ffe577e, 0xddcabd17, 0x24a16678, 0x01150bd2, 0x9faeb663,
- 0xa2887ee5, 0x7e1039e6, 0x15d3cb1a, 0x86a54919, 0x358e3df8, 0x7502ea50,
- 0x5653bf8b, 0x9e4e50c9, 0xe8f6e710, 0x1bb4c2ef, 0xdc92775e, 0x6c620d0f,
- 0x07be3a78, 0x4dd04de1, 0x10f153f0, 0x5763e61e, 0x1797192a, 0x93b47433,
- 0x7d8050a5, 0x5d54f409, 0xe9133d35, 0x18fcfe11, 0x07a64907, 0xbfa3cfcc,
- 0x41ea05fc, 0x3dfca740, 0x70b2efbf, 0xc3188035, 0x3aafc981, 0xe877f199,
- 0xccc3bf56, 0x71a5c6f6, 0x68e77892, 0x1731c91f, 0x84ebfeeb, 0x94558a3c,
- 0xff09d5b7, 0x4473c1c8, 0xa822ce97, 0x58bbc617, 0xd7521e24, 0x9994e528,
- 0x90593dff, 0x3ea9d137, 0xcd1e5ad1, 0xa2d72017, 0xf8206dc6, 0x449fbf41,
- 0x5449fbf5, 0x557c9fbf, 0x829c06bf, 0x7c3e8d8e, 0x556562a7, 0x1f1b809e,
- 0x06e021cf, 0xef8919f0, 0x9c5b31e8, 0x13879c4f, 0x6cc4e35c, 0xf01f58ec,
- 0x4e971e5a, 0x612fb978, 0x41c6276f, 0x865aa4b3, 0x890aef76, 0x97f0d3ef,
- 0x63b532e7, 0xf0f883ba, 0xb94e311e, 0xc30af380, 0xbea3d53f, 0x1eaff493,
- 0xd5bd9df5, 0xdf5d8b8f, 0xe27cdbdd, 0xb127cf63, 0xe26bfe97, 0x75fff663,
- 0x0bc552e5, 0xe786bf6f, 0x49f25db6, 0xf55bd007, 0xd438f2cc, 0xfd8f1333,
- 0x70be95a3, 0xd031d33d, 0xa438ef8a, 0xb8b7a06f, 0xd1efd661, 0x7d1bd274,
- 0x40758be5, 0xe00fd5de, 0x7f189370, 0xf73f1d6a, 0xf70965be, 0xe87981d3,
- 0x8b37fac7, 0x0a3937fa, 0xc470dfc9, 0xbabfdfc6, 0x91eb96a5, 0xa658fcb7,
- 0xef1ee9f3, 0x803fe151, 0x2a5d6edf, 0xb1e92385, 0x7562d7de, 0x9e6e3f78,
- 0xbdfa3f18, 0xe6f300aa, 0xbd6396ae, 0xd2e9c557, 0xfd1757b8, 0x9602c54e,
- 0xb321de27, 0xd4eb3f7e, 0xcd67e4bb, 0x2c7deea8, 0x1ee816b4, 0x7ba04ff5,
- 0xebc7973c, 0xe2171462, 0x485c78d1, 0x4f3a20cb, 0xd9dfe433, 0xdc04de91,
- 0x307f99af, 0xeff3aeff, 0xa6f4af8b, 0x2a1def9a, 0xef2ce2e7, 0xbeab54f5,
- 0xea6f9434, 0xc15fd039, 0x0dced654, 0x39535e61, 0xfcc4ade8, 0x7249be18,
- 0xa3094d2a, 0x3cf4545c, 0xaf7a8856, 0xa8c3ef78, 0xd55d25af, 0x0ebe67a5,
- 0x4b8e6fff, 0xbc5cbe26, 0xb6f74bd6, 0xdbcf8b3f, 0x7b24f489, 0x57017c72,
- 0xed087a4f, 0xc6aeebe0, 0x5cf7a775, 0xf3e6b54f, 0x157dbe91, 0x6ffc8080,
- 0x4fce073e, 0xf6df7bb6, 0xa37efce2, 0xf4bbf432, 0x44ef8f84, 0x86cfca18,
- 0x887c5007, 0x0463309c, 0xbc46bf8e, 0x6ababa6f, 0xaa2ba524, 0xbc794898,
- 0xaf2a8adc, 0xea447348, 0xaa74e917, 0x1a556f77, 0xf95557e5, 0xebe03528,
- 0xfbd0d5f0, 0x7c618e93, 0xf02e877c, 0xf0136497, 0xe5fe900b, 0x73a4ab48,
- 0x195ea745, 0xd8df77c1, 0x7077d840, 0xc10d7a59, 0xf919d357, 0xe53ea135,
- 0x548798ed, 0xf8419dad, 0x564af452, 0xc56fe508, 0x5c228e07, 0x2d4bde9d,
- 0xd3da5aef, 0x7fbd46c6, 0x836f051a, 0xb871b6de, 0x7a712fdf, 0xfdfb868b,
- 0xe3178b6a, 0xb2bfee22, 0x82295deb, 0x1c43983b, 0xe7e6bfee, 0xdee13db4,
- 0x09be7b32, 0xe9eceeee, 0x04fbf704, 0x7016ef9c, 0xa87816ad, 0x46acf016,
- 0xa47e53b8, 0x3a44fbc8, 0xc47bd29d, 0x112bb7fb, 0x08ca901f, 0x7d878077,
- 0x30c98854, 0xf179ba1e, 0x7412eecf, 0x1fc8dbab, 0xfcf2bb2a, 0x127b7708,
- 0x05c3bb87, 0x06ba2078, 0x3cb17bb8, 0x5d54f044, 0x9f50ce1c, 0x15e441e3,
- 0x7e267b84, 0xb800f8e7, 0x871f8df7, 0x058270f2, 0xe4d25ace, 0x23fb830b,
- 0x124b7f7c, 0xbf0d1e7c, 0xafbf8d24, 0x6f77f06b, 0x88b571fa, 0xef16aeb7,
- 0x511c2227, 0xe00879af, 0x4f0e34c3, 0xc8a47dfe, 0x164d28fb, 0xe0357ff6,
- 0x161fcb01, 0x19c210fa, 0x73c51fd3, 0x56c93cfb, 0x38454fb9, 0xf2efe1f5,
- 0x1b10a4b0, 0x6878cd98, 0x85a5fe2f, 0xc6f31798, 0x4ad7aff9, 0x44aeb23f,
- 0x7de903bd, 0xdd6de918, 0xcf0daaab, 0x57a5e43f, 0xcb67ee59, 0x5e89fdb3,
- 0x42407f82, 0xc13ee6bd, 0x5e916b87, 0x1edc7e68, 0xe0e8295e, 0xeefef16e,
- 0xee7e823d, 0xa0fcfc17, 0x7e3e3741, 0x74cd643f, 0x9498c71e, 0x20ef1361,
- 0x4e399846, 0xc0d7d130, 0xd9179834, 0xe94a5e36, 0x8ef64b9d, 0x7e36f89d,
- 0x7d23f314, 0x6773be25, 0x9b43bf98, 0xa1f50fd7, 0xd9dde344, 0x39c4de89,
- 0xa23d3174, 0x6520f788, 0xc4b64f6e, 0xdab5df7c, 0x7e81dfb7, 0x13e6de99,
- 0xafa7f3ef, 0x6be2f184, 0xf54764dd, 0x59cde40d, 0x2d577908, 0xcb0c55d8,
- 0xc1707cb9, 0xf44d0bbc, 0x5f8bdf11, 0x595d7547, 0xf64af480, 0xa93fa438,
- 0xf844ea1d, 0xe8bdac02, 0xcd487abf, 0xc1fb87eb, 0xfdb18943, 0xb9e2b370,
- 0x7593c4bf, 0xb09fc5a9, 0x3b53f54e, 0xde55a27d, 0x43fb13ac, 0xfe56a99f,
- 0xcb83f630, 0xc59f7f44, 0x0f147e07, 0x6e729674, 0xa4cfc92a, 0x0fef636b,
- 0xc216aad5, 0x47aa39f7, 0xe50ebe73, 0xee6f5c6b, 0x9d187721, 0x8c0d9c83,
- 0x2ffe88d8, 0x3396be83, 0x8c42e72e, 0xbac693eb, 0x3ef4867e, 0x9d01f702,
- 0x01f71ee8, 0x34c3cbcc, 0x5b949ba2, 0x587fe46c, 0x5eecf825, 0x046b5fc1,
- 0x7c803d5f, 0x81df0235, 0x13fa51f6, 0x90eb4fc1, 0x12dd20ff, 0x78f5ce9f,
- 0x6197bef1, 0x8c772cc7, 0xab1cf7a5, 0x0fda365d, 0x61ed7794, 0xfb05da17,
- 0xf7e8ebc6, 0x7a59dd91, 0x59e58dc0, 0x43c03f0e, 0x91d2cc4a, 0x2f733e5e,
- 0xe2abe8b8, 0xf4fd015b, 0x72a2b0ae, 0x949ea47f, 0x451fdec7, 0x9ef453a7,
- 0xcb5faa80, 0x70fd3fd0, 0xe113a552, 0x8f3c70fc, 0xcfcdaad3, 0x3e03b826,
- 0xe29bc5b6, 0x5f6752a5, 0x1f1ef5d5, 0x2f70ff76, 0x12ec12ef, 0x0c0d82f2,
- 0xf92661b1, 0x7bde2b50, 0x1b10de64, 0xcb139269, 0xf432e787, 0x0f94f5eb,
- 0xcdcbc8bd, 0x90ae5e5c, 0x434bcb62, 0x65e4377f, 0xd2315cb4, 0xb74d1795,
- 0x5d54a7dc, 0x66f2463e, 0xc1ff6c64, 0xfcede3ef, 0xf3e66a1d, 0x2cf7a7e5,
- 0xdefd7807, 0x83d42f0c, 0xf7c57f0c, 0x5a27ac62, 0xcab89e3e, 0xf40357d8,
- 0xcd02d03e, 0xaff8f0fb, 0xfbad57dc, 0xd5b9e141, 0x9c140fa4, 0xa8b4a1cb,
- 0x11f13a76, 0x9318d3c3, 0xfd7c7c9d, 0xdf883d0f, 0x7ce2b5e3, 0xc61abd7c,
- 0x1b7da4b8, 0x0e607aef, 0xe3c175c0, 0xe38f9d32, 0x14d90d3d, 0xbcb2c2df,
- 0xef1ba7a4, 0xdf0f1e23, 0x31c731ee, 0xe9d4fc14, 0x37bffd47, 0x1f582c7a,
- 0x8d537fdb, 0x83bb44ff, 0x5da246f9, 0x675dfa01, 0x9fbff504, 0x18dde938,
- 0x18da1f23, 0x8fc733ef, 0xf15be918, 0xdc21ced3, 0x33f78adf, 0xab9dd217,
- 0xf4d79d0f, 0xdc7c17ce, 0xc733e863, 0x4547d273, 0x7dc0359c, 0xca7fc596,
- 0x098f49cf, 0xf473187f, 0xca271e52, 0x7b94bdbd, 0xd3133c78, 0xd3ff9763,
- 0x3d51a47e, 0xb821b4ec, 0xd3daf2fe, 0x2fa5deab, 0x6280f3d6, 0x60bdc711,
- 0x79787c63, 0x2059be0c, 0x102f07ee, 0x7d065de7, 0xd0df7f15, 0x465d39b3,
- 0xd4f4add3, 0x1fa74425, 0x5fc9541f, 0x87f4015b, 0xd4f4987a, 0x698f7a78,
- 0x3d9713d9, 0xf9c00b78, 0x95324393, 0x10739d97, 0x87ffb65d, 0x3bfce28f,
- 0xefd2ab7f, 0x70727c26, 0xb7ebc51a, 0xbb9ef78b, 0xedc5cef8, 0x5c5c1bda,
- 0xbb9c14ae, 0x8bbbe897, 0x35f3c2ae, 0x7382dbe4, 0x565e900f, 0x4ebdef80,
- 0x788f75c0, 0xe8229279, 0x1fb60277, 0xea1cbb92, 0x3e72b615, 0x15431361,
- 0x10de8fe4, 0x0637bbef, 0xf2e0df3f, 0x0b45e25a, 0xff50d75b, 0x24c3f40c,
- 0x02c85c53, 0x8507fbf1, 0xe584b878, 0x21a5887a, 0xd6f908df, 0x77d6ff4d,
- 0xd69ddbb0, 0xc2e91a75, 0xb04e75ae, 0xebdd61d8, 0x73e72efc, 0xb6146fa1,
- 0x7579f0e6, 0xe3e7e7ad, 0x7ba7ad63, 0x7da9131d, 0xf1ec627b, 0x1fe317ef,
- 0x9bc22aab, 0xbc44ac5b, 0xbe105bb7, 0xd5f7c35d, 0xf627d60a, 0x2aeb4c91,
- 0xc4b52dac, 0xe6d4152b, 0x857afbd3, 0xdbb5f3eb, 0x1753fb4c, 0x97fe425e,
- 0xc6eb0d27, 0x2e6bfdcb, 0xaea8bc23, 0x7e913bbc, 0x7c553d8f, 0xec5e7bc7,
- 0x84e9e397, 0x49531a16, 0x4f76eaf2, 0xf74dfbac, 0x56a9e347, 0x80ec09e0,
- 0x0c9e132c, 0x3fb744fc, 0x8efcb9b6, 0xe21cf0f7, 0x509afe11, 0x0a2bf2ef,
- 0xe1b1e97f, 0xd7a0a4f7, 0x040fa063, 0xdf7e082e, 0xf5cbc4b2, 0x6f813f0a,
- 0x77c63e03, 0x08c2e7d0, 0x379127b7, 0x4e3fb193, 0x5b83e70f, 0x1e82f8f7,
- 0xcf5ec526, 0x59eb5d6b, 0xc7a042ca, 0x3eb8d49c, 0xee7afbd1, 0x05c72162,
- 0x4f8ba7ac, 0xfa7c2df3, 0x385b9ada, 0x9ffc0a9f, 0xd10f7fdc, 0xbc70b1f5,
- 0x7a54e30f, 0x486bcf5b, 0xd062e307, 0x1b10b0c1, 0x8f7ffd95, 0xfe684e0e,
- 0x3ee08f11, 0xee15fb02, 0xde4fc5d3, 0xbf3cc63f, 0xfd27e2b7, 0x9aa5f69e,
- 0x0ea73b0b, 0xf41df64d, 0xfb277ed2, 0x434961aa, 0x633f815d, 0xa3cf12fb,
- 0x6e9fa19f, 0xc6cfe245, 0x578bd2c4, 0xc72e0556, 0xaaea8bf3, 0x8fd1cba2,
- 0x973d0aaa, 0xce9b9ea1, 0xd623177f, 0xcdcc7d7b, 0xaab2bbf1, 0x983fce92,
- 0x94f4c423, 0xd80077ee, 0x47f24c89, 0x7eab2e2b, 0x1d01a9a6, 0x789adffd,
- 0x025540a7, 0xd0b3d3ab, 0x1194945f, 0x1fefc132, 0xb2ee7e2e, 0x8ebf6d26,
- 0xe8761ae7, 0x8e60fd1f, 0x8b59ca81, 0x6545ecff, 0x93c05f41, 0x1c731cf1,
- 0x124bf06b, 0x46cf13f9, 0x5acf13f9, 0xf30491e6, 0x5b3b0afe, 0x1ce87871,
- 0x8fc26152, 0x5f01e02c, 0x5bbb1f68, 0x1e5cebe7, 0x607d21ef, 0xb23645d5,
- 0xff09d6eb, 0x2fd0cff4, 0xe851704d, 0xe0c301bc, 0xc096beb0, 0x73c4e1d7,
- 0xaded190c, 0xf8dde508, 0x4f4c9762, 0x4f984a1f, 0xf3095e64, 0x1564597d,
- 0x931d0fda, 0x3f20ef5a, 0xe69033a0, 0xd67ebc06, 0x84b0e5af, 0x817912e7,
- 0xb5ce118b, 0x0bc32872, 0x77217ff5, 0xc1bd6066, 0x1f4ef450, 0xf7845e9e,
- 0xf3a60212, 0x8eb421ce, 0x33dcbcee, 0x2552d17f, 0x6feffcea, 0x5fcb9b17,
- 0xc62ffa71, 0xf93b5479, 0x1f4960fb, 0x57be7448, 0x94ee75dd, 0x64f2fb47,
- 0xd123c737, 0x41d5d205, 0x559d37ae, 0xc87d09bc, 0xf3323d9f, 0x7c3ccb45,
- 0xec22bc96, 0x53a03ad1, 0xe832a73c, 0xb7e29863, 0xaf0d1d21, 0x3c66ef81,
- 0xfb77d0c6, 0xf079e8b8, 0x31b9fa98, 0xe897183f, 0xcb973e55, 0x7a28e3f7,
- 0xf9f12dd8, 0xdced8a87, 0x1921646e, 0x20c73af1, 0xcf05653c, 0x8fe33227,
- 0x8d77b5ec, 0x82b62773, 0x0b9e75e7, 0xb9cfc6de, 0xa733e7a3, 0x3c16eef9,
- 0xdcd399ef, 0x7b9e3e6f, 0xf8ba29ce, 0xab774c43, 0x51e3fb9d, 0x2250fc5e,
- 0xb7a6f67d, 0x21e3545e, 0x46e6f1dd, 0xae609f7a, 0x73bfef9b, 0xf88aa759,
- 0xe35d300f, 0x5a6156e7, 0x27cc6ddf, 0xfa2a0df2, 0x84896e1d, 0x721ab86f,
- 0xbe18beb7, 0xb9bdd51a, 0x425f3b07, 0xf41a05b4, 0x321b3a5c, 0x5ef935da,
- 0xaede79e6, 0xf14e0e0b, 0x838a47fb, 0x6ebdd5d6, 0x58c1c107, 0x7c24d427,
- 0xbc49c174, 0xa5c066cf, 0x37f1dd6b, 0xf8fe855f, 0x1cdce4de, 0x790fe8c7,
- 0x579f3c9c, 0xc13e9c34, 0x0b939022, 0xb91ffedd, 0xb77afca1, 0xcb4c2eff,
- 0x95839d1c, 0x5537c421, 0x0390af98, 0x1c00a517, 0xb8141a3e, 0xd1e11938,
- 0xf482f7d2, 0x2e4ec3fe, 0x1ba70e0f, 0x1bab6d7a, 0xc8bd9be9, 0xf7e35af5,
- 0x38ed5632, 0x008dff7e, 0xe55e8f9f, 0xcb95cdef, 0x22ee6bde, 0x62ccb2ff,
- 0x7a4621d9, 0x1e0df858, 0x7bc4db76, 0xf39d7cd1, 0x79ef0e14, 0xcf74f18d,
- 0xd952de6e, 0xbcfdbef2, 0x957e3ca5, 0xae632d29, 0x2b09d8b7, 0xecf75d04,
- 0xd11afd07, 0x3a5fccfd, 0xd92dde99, 0xc893e9a9, 0xa738f1cf, 0xfd53f9b3,
- 0x4ffdc632, 0x1f912d58, 0x6f51fc69, 0xe7e42b53, 0xc87d1387, 0x5ceff3ae,
- 0x97ef899c, 0x3f70b454, 0x28bcaa67, 0x7d203fe3, 0xefdf3f68, 0xe108bf31,
- 0x5d7bf2e4, 0x369af3a6, 0x474332b5, 0xe5d5be7a, 0x7f907947, 0xc45dffbd,
- 0x30e7e1a7, 0x40e7ddf6, 0xce38c00f, 0x39bc5bfc, 0x0df91f48, 0x0ea53f4e,
- 0x5c81c4a6, 0x039857cb, 0x298d54d3, 0x98e34c4e, 0xe5ecd303, 0x9daa7f42,
- 0x2783fa45, 0xf7c34f27, 0x9f9bf3c4, 0x17db14fc, 0x927d912a, 0x9bf29e89,
- 0xee82fd5f, 0x28f3f9c3, 0x5b5bf74e, 0x39dd8b34, 0xff9ac1ba, 0x9f359376,
- 0x33e6b111, 0x03f35a0f, 0x7d266d70, 0x1e1eff73, 0x9a7a529f, 0x1e7defdf,
- 0x3c22a060, 0x3e49d9b6, 0xce6d294e, 0xd38778c6, 0x586dbb4f, 0xdceffbbe,
- 0xeadceeee, 0xf72577a4, 0xe2c717bd, 0x6a1df092, 0x3f2c46e7, 0x8d90d71d,
- 0x4fa845fe, 0xbbb04b71, 0x71bb408e, 0xbd06a5e7, 0x69be2986, 0xfdf83fe7,
- 0xf6adce94, 0x9c51c630, 0xd3e4f5de, 0xb7a79f48, 0xec7d200e, 0xc3642979,
- 0x70f7437e, 0xf3884ffc, 0xe954e458, 0x7e3dab16, 0x1d01e842, 0xe879b527,
- 0xd6b8ba17, 0xc24beeba, 0xa8a77cd6, 0x7d331b7f, 0x10f8c74f, 0xdc34aaef,
- 0x7ec53767, 0xc173fdbf, 0xa53d4f17, 0x8b2b7419, 0x37991ef7, 0xe2befba5,
- 0xdb73eeee, 0x8e800325, 0x4316114a, 0x4697bfba, 0x0f85db8d, 0x38927fd1,
- 0xadfceb2f, 0x5539f04c, 0xef896455, 0xaafbd2e8, 0x4d53697c, 0xed4151fb,
- 0xd27c94be, 0xdca5f757, 0xca67da66, 0xb9481eb6, 0xdb6bfa19, 0xebefd0a2,
- 0xe279cd95, 0xf8490981, 0xf9c06e5d, 0xe7fa3962, 0xf25e1ada, 0x8bd03d60,
- 0xbde39ac7, 0xa5c76b0a, 0xe9f1354d, 0x834ba5d7, 0x0e74ca9f, 0x5a72e79f,
- 0x461ef172, 0x1dff2d69, 0x1b86d2be, 0xe11abbae, 0x4fdf9ebe, 0x9e5856cf,
- 0xb653fc0c, 0x8fef86fd, 0xc7c1ef86, 0x3e70f9c6, 0x2d24f8d9, 0x3fadbbde,
- 0xee76f743, 0x44ff71b5, 0x7b465c34, 0xfdef7e38, 0xe508ab81, 0xa2f7f412,
- 0x3fb52c6d, 0xe47d0368, 0x6bd0e4c4, 0xd3954337, 0x570fe1ac, 0x95965c53,
- 0x253e809e, 0x373ab7d4, 0xf48dbe4b, 0xb5f67975, 0x97fb5aa8, 0x943a3c51,
- 0x8a37b4c7, 0xa27ed6c3, 0x47a57cf1, 0x68f77e32, 0xa25e99a4, 0x119df5eb,
- 0x2ab7dfa2, 0xcd720a5b, 0x1d97efa4, 0xd878a1ad, 0x19c871e8, 0xc13b8a68,
- 0x7c4bd1fd, 0x027a14ec, 0x22d91c93, 0xcf287c5e, 0x48a7bbd3, 0xa5104097,
- 0x5ea9b297, 0x6cb5eb84, 0x4bcfa63a, 0x99640791, 0x33f203cb, 0x79278b6d,
- 0x3eb7a6d0, 0xa3a473f1, 0x1e74d1fa, 0xefdc5fd6, 0xe846f36e, 0x77e358bd,
- 0x9e74da71, 0x5276692f, 0x8fe9d53c, 0xcb979234, 0xeb138e30, 0xd9b4ed3f,
- 0xbd3b5f40, 0x8c64efbd, 0xe06b6e0b, 0x6a9f8aef, 0xfb593be8, 0x9a37fd8a,
- 0xf17d8def, 0xdf13fe08, 0x8bf8f54f, 0xacdfb83d, 0xa9d371e0, 0x2a332b7d,
- 0x68c9e063, 0x6655a497, 0x79d2bda0, 0x91b7cbab, 0x0ae953eb, 0xc8d3ddf9,
- 0xfc2f92af, 0x79e9f749, 0xbea07877, 0xe1faea1b, 0xe5073eff, 0xfc77e88d,
- 0xa1ddf640, 0x1f3eed0f, 0xa015905f, 0x03f4e3fb, 0x7cfe3036, 0xa0144585,
- 0xafb8f9b7, 0xc93ca18c, 0x25ebf9cf, 0xb05c238a, 0x7c1ebac6, 0x1fbcdae7,
- 0x292fe116, 0xb0b8c50e, 0xd68b4a06, 0x718bcb5b, 0xbc50f0ff, 0x94cbc517,
- 0x19cb59d4, 0xad1f908b, 0x7b4bfeac, 0xca7db82c, 0x6bf9f58c, 0xa3b9163c,
- 0x50f9dc92, 0x95f5a3ce, 0xd20037a2, 0x1cfec49b, 0x6d2733bc, 0x4b1da401,
- 0xc82da427, 0x7cee7bc4, 0x8ed3231f, 0x8fbf98f8, 0x45953a57, 0x7177f106,
- 0xa58fee18, 0x346452cd, 0xe42c1de6, 0xe3bfcb19, 0x6eb820fb, 0x63313df3,
- 0xf35eb8dd, 0xc726f677, 0x10cf71ab, 0x2f2e6bd7, 0xab287013, 0x3afc5bb7,
- 0x90a5ea13, 0x7f78dbfb, 0x55da95a9, 0xb7697e34, 0x5ff62bf4, 0x1399b7d8,
- 0x68a135f0, 0x798b8c83, 0x878e4ba8, 0x0f75892f, 0xdf80b0e5, 0xf4143743,
- 0x107d7e43, 0x02fecff5, 0x9f2439c0, 0x45809f84, 0x5c5e301c, 0xca9e5766,
- 0xfcf397b3, 0x8ae04f3e, 0xaf6081da, 0x873a01a9, 0xbde72f9a, 0x471bf78c,
- 0x425e0626, 0xf9b7ccbc, 0xb7c42b07, 0x17e411fe, 0xf0e1ca5d, 0x4bcad8f0,
- 0xc5d9632a, 0x933fbf02, 0x3e5ce81a, 0xa3ef8bb4, 0xd54eacee, 0x8bfef7ec,
- 0xfb094790, 0x4f5ee95b, 0xee890ee7, 0x1d223f13, 0x8fb6fba1, 0x6865e95c,
- 0x9e01a7c7, 0x223eee50, 0x5ef815b1, 0xace3565e, 0x9e212f45, 0x787ce2b3,
- 0x38de5fde, 0xd1772e49, 0x15beb9fa, 0xcf01b95f, 0x8de75283, 0x0e748b1c,
- 0x2d779eeb, 0xe3d0ff7c, 0xb9faf1a5, 0x96d3de38, 0x8f17d287, 0x16f3f1c2,
- 0x387c84bd, 0xa1ee8e3a, 0xdcac02a7, 0x8d5d287d, 0xcb1a547a, 0x60fa87d8,
- 0x58ef0cda, 0xe43fb9f4, 0xdb24e3e1, 0x50b379e4, 0xf3cb1bcf, 0xf92c7e7d,
- 0xae111237, 0xed5271a5, 0x17ce3a6c, 0xe85ea3bf, 0xcb823f40, 0x9529ef84,
- 0xea7e3819, 0x69269e50, 0x3c0f8a11, 0x99bde064, 0xd12de9c7, 0x25e9c919,
- 0x454a4c5f, 0x1b50ee3b, 0xbf20d418, 0xec340d0e, 0xf48b88cc, 0x792361f0,
- 0x2df7617d, 0x5ff7e0c9, 0xfa1b0692, 0xa1d7af0f, 0x513213fa, 0xf5c1beae,
- 0x7e5c4ff1, 0xa65f57f2, 0x4fbf8ec5, 0x0a78e048, 0x61638b9e, 0x3838ff93,
- 0x13d233fb, 0x2c3ce783, 0xf027da6c, 0x1c734dbd, 0x7ed15977, 0x72841dfa,
- 0x01f742d6, 0x71ac1bca, 0xc5c597e7, 0xf3a0d71e, 0x0b1b39e0, 0x1e3225f5,
- 0x21d7f688, 0xc2d573a9, 0x4b17ba24, 0x73c13be7, 0xc6990b1a, 0x63eb9e0f,
- 0x33cd77cd, 0xaee9fa85, 0x9af5acc7, 0x2c7aa73e, 0xe383d5a7, 0xb4adebb4,
- 0x29cf048f, 0x6fd8ced4, 0x90b6c23a, 0xeedcfc0a, 0xe21de5e5, 0x6fa8cf3c,
- 0x5b3ed7f4, 0xf8def713, 0x556de8bb, 0x2e007820, 0x219f368f, 0xf359747f,
- 0x418b823e, 0x89f0dd0f, 0x7c132167, 0x225baa52, 0x69ede036, 0x7bb86e0f,
- 0xc990d814, 0x871f05fb, 0x9feddd36, 0xbbf8e508, 0x8ebe260f, 0x73e2eacf,
- 0xd90f3312, 0x42172e64, 0x9d4f7775, 0xfa8bca68, 0x9b43ed5d, 0x340a98af,
- 0xb78a3595, 0x27c1e6bf, 0x4e4e595e, 0xdf111a8b, 0xcf3839df, 0xa23bca6d,
- 0x8fc82ef2, 0xa8f5e536, 0xf2a8a4f2, 0x40fca8b4, 0x1dcdbf5e, 0x40fb2c19,
- 0xd4f83fde, 0x9efbf815, 0x7322fd6d, 0x358f05f1, 0xf02bb6f2, 0xb3cb7cf7,
- 0x56d3eff8, 0xc93e4b34, 0xba2b3f24, 0xb55bf3ce, 0x0e10a3f9, 0x1ca32f2d,
- 0x74926b28, 0x0d5debd4, 0xc79423db, 0xbb535e76, 0x963f6e06, 0x76a11c35,
- 0xd23ff6bd, 0xef7c7c46, 0xdea9f7c8, 0xd89fcef9, 0x5befeb0c, 0x751e76f5,
- 0x4e2c0a44, 0x7f7eee48, 0xdbfb6dbd, 0x0a2fbfdd, 0xf9fbdbdf, 0x8d1c900f,
- 0xc41e07ba, 0xf9bab8f9, 0xbde380c4, 0x7bd2cf23, 0xdd1c7fb4, 0x03aa16fc,
- 0x2cce4bf1, 0x9fdda053, 0x438f8fcb, 0xb4df0b7d, 0x67e37dc5, 0xdc12f852,
- 0x81651e8f, 0x5bdc704b, 0xc3ef87cb, 0xac384c9f, 0xef863eb6, 0xf0786f95,
- 0x98142be5, 0xeb7761df, 0xb709fc20, 0xf06f094d, 0x7f2b727c, 0x1dff4551,
- 0xa0f0e7e1, 0xc83641d6, 0xa6c0993f, 0xeb787e53, 0x3d7f9863, 0x3faf04e1,
- 0xac9075ba, 0xf27209a3, 0xdf90a98a, 0xf7e6c7ac, 0x37ae04da, 0x369bf31b,
- 0x8453ffdc, 0xfbe32b0b, 0x76335e0d, 0x5b9d3c80, 0x66db6a72, 0x1fdcc780,
- 0xdb9c93c5, 0x55dd76dc, 0x50870479, 0xa611c61e, 0x7ed3780a, 0x215d7248,
- 0xee1fa27d, 0xf1ba7d81, 0xe2dc1ee9, 0xd2e2fda5, 0x37a8edca, 0x451dcea4,
- 0xc7a145fb, 0xddc9e92f, 0x52193869, 0x3ae92fa8, 0x8db2eb97, 0xef5223e2,
- 0xf51e9372, 0x033df8a0, 0x0fa23be0, 0x36cbc097, 0x096d9eb1, 0x80f6bfdc,
- 0xf3f51a67, 0xc47fe49e, 0x42ca6321, 0xc5a7f24e, 0xf372be91, 0x202b9006,
- 0x7989d7d7, 0x097b5baf, 0x466e29f9, 0xfc28f13f, 0x67d7efee, 0xbffeb754,
- 0xaef2032e, 0xdd6ebb6e, 0xf33f7f2c, 0x0dbaf6b9, 0x5b90dcbc, 0x7b79d2eb,
- 0xad4abf63, 0x3ea4e3e6, 0x5c46c978, 0x28c2bc47, 0xece17b7e, 0xd1971b11,
- 0xad2d7ffe, 0xfdfdf99f, 0x5a3f0ed0, 0x7a86c43c, 0xc29f81a9, 0x1257fee1,
- 0x89373d60, 0xf035efdf, 0xb74f8c64, 0x907f51e2, 0x3b407f41, 0xeb403135,
- 0x0c627d07, 0xfae06e0f, 0x3316d204, 0xad2997c1, 0xfffbf0e3, 0xfdb53820,
- 0x4ff4e02b, 0xc947779f, 0x0c379021, 0x7a5f2ffb, 0xedff72dc, 0x23d1013f,
- 0x8000702a, 0x00008000, 0x00088b1f, 0x00000000, 0x7dc5ff00, 0xd554780b,
- 0x733ef0b5, 0x9992bcce, 0x924c2664, 0x84e3c849, 0x71100840, 0xf0444312,
- 0x51888431, 0xd4503b69, 0x20717ad8, 0x9926023c, 0x4b62d5a8, 0x79120cff,
- 0x22341809, 0x0281c150, 0x06f55bc5, 0x701a8c45, 0xaf7a8a44, 0xb7ad8ef6,
- 0xff7f6c57, 0x4a8f8808, 0xd2f5a232, 0x5ad7fd97, 0xce64ef7b, 0xb6b4a924,
- 0xdc3ef9bd, 0xd9cfb3ee, 0xd7b5ed7b, 0xf6b5af6b, 0x4cf5a61e, 0xd8c05c0a,
- 0xce6a2566, 0x7c2c64ae, 0x61edf148, 0x67cf07f0, 0xf7fb193b, 0x95d5a0b4,
- 0xf19fd8c9, 0xe6c60aef, 0x2729bf7e, 0x8ded0658, 0x61cb1823, 0xcfbbf52c,
- 0xd500ded4, 0x7a3f4bb9, 0x3f7c0f7c, 0x8ca97bf7, 0x43ef03e9, 0xf7c18c8f,
- 0x5b6dbcef, 0xd2842ecf, 0x793519d2, 0xf986bca0, 0xef7bc056, 0x27435898,
- 0xbe0ef7f4, 0x5563097a, 0x35e5bbdf, 0xdbb19136, 0x93632a5d, 0xf8ab5b18,
- 0x258a9873, 0xbbe0db0b, 0x644b2cf0, 0x7d63114f, 0x0cd85311, 0x23b875fd,
- 0xb8c1175b, 0xf995d71d, 0x1f5fd0c2, 0xef867e63, 0xf7a54b2d, 0x4c3ddc3a,
- 0x744bf6c3, 0x0ec24017, 0x19faa17e, 0x5a37e3d4, 0x7814bb22, 0x76c2ce5e,
- 0x3e325f6c, 0xf3fa8612, 0x7f7d0e30, 0x0f644a63, 0x8f82cfb6, 0xa371dea0,
- 0xfe861237, 0x47622cec, 0xfa763a78, 0x8c1c3273, 0x05acaae5, 0x8228efe1,
- 0x2b59943a, 0x0701cdd9, 0x713fcfe2, 0x0f007396, 0x6899775f, 0x3d95a93e,
- 0xf437ff4f, 0x7ddbd6c7, 0xe1b0a063, 0xcf6f58ab, 0x3b997826, 0x66f88558,
- 0xb803fc1a, 0x89c81fea, 0xcce7c3ac, 0xb1eb8557, 0x479fe9da, 0x5520fff0,
- 0xb61ff847, 0x0ab635b3, 0x66d61528, 0x0bfc00cb, 0x43fb5878, 0x37630c00,
- 0x978000e3, 0xd670d7ff, 0x2bf3c3a9, 0x07ad0a5d, 0x556df9fc, 0xbc67cd8c,
- 0xd4f2fe7d, 0x58899577, 0x1a6b51aa, 0xa5735b3c, 0xef46c7bf, 0xe28f3fb1,
- 0x0bfa0da5, 0x25b7f132, 0x09ba44ee, 0xd86977e2, 0xba9defff, 0xfdf0eb03,
- 0xf8765c44, 0xfbe074be, 0xf9a3173a, 0xfc3955cf, 0x475535ac, 0xc8fc4afc,
- 0xd7c24eb2, 0x713f90fe, 0x724e393c, 0xfcbaf7bf, 0x00be2237, 0x0f74d1ef,
- 0x75a545e2, 0x63d7864d, 0x43b06f89, 0xd556dcfb, 0x33e0377d, 0xf349ccb8,
- 0x9cdef095, 0x158cbf1d, 0x74ffee0f, 0x8695736a, 0xe9c65ff3, 0x02b72d9d,
- 0x62f112e2, 0x5d03a819, 0x63f1642c, 0xe7886526, 0x8445f45a, 0xc247517f,
- 0x2fdff4f7, 0x49ef89ac, 0x92ba617e, 0x2ba0bf04, 0xdd70d15d, 0x375f0a82,
- 0x373e258b, 0x6e183650, 0x82d5cf89, 0x245d24ee, 0xe2357be3, 0x7c60d27b,
- 0xf93d2c7b, 0x99706fd8, 0xdc94f095, 0xfa11633f, 0xf03b21ee, 0x16ddd00f,
- 0xdd7a2145, 0x646a5772, 0x3da7f225, 0x19f117be, 0xad157c3a, 0x1492ef77,
- 0xdb2bc19d, 0x9441302c, 0x9c9d8733, 0x4a7a4b8f, 0x617a9f90, 0x9f587ece,
- 0x9dd5d7de, 0xc1024561, 0xeaebf358, 0xdeffa42e, 0xfde6af67, 0x19d548ac,
- 0x618843d4, 0xfe143718, 0xbc032b43, 0x2cc5349e, 0xe0175e34, 0xff09c257,
- 0x435fe17a, 0xe8bc80c5, 0xbf1c06c3, 0x9807cb8b, 0xe71a12e1, 0x1d6c29db,
- 0x526cdb8c, 0x6f6826fc, 0xb23e3a5e, 0x4361c392, 0x20146a3e, 0x9b589b35,
- 0xf7c03152, 0x2513a6cd, 0x36f195b7, 0x85bbde0d, 0xf78175f1, 0x5f002a8e,
- 0x4c7d6da3, 0xfa45ba45, 0xd8a5f687, 0xff80345e, 0xf9bff5e6, 0x7cdfc213,
- 0x8d1748c0, 0xb1e7198f, 0x8a9e9134, 0xb524e806, 0xd589e392, 0x8018c8b0,
- 0xa9ea29d7, 0x8a88b1b5, 0xcd565a78, 0x66e9024e, 0x8199e91e, 0xecace3f4,
- 0x0ca1f364, 0x11fcc07d, 0xf74a7b80, 0xc24697ce, 0x58daeefb, 0xb4374e3e,
- 0x4e8bb6ec, 0xbeb0345d, 0xba4bce12, 0x41cca937, 0x0077d1e3, 0x83bf7f8a,
- 0xfb371bde, 0x6ef2c482, 0xa4d32efa, 0xe0f6bd12, 0xcd1be423, 0x38c0a39f,
- 0x328c979b, 0x82a67ae1, 0xbcb9437c, 0xaeec7db0, 0x7f678415, 0x07b7ca09,
- 0x09ce80d3, 0x942abe9f, 0xeaeea52f, 0xbfa0bb5f, 0xffd5dca7, 0x138e258d,
- 0x47f4e270, 0xa648be90, 0x23bd64e7, 0x6e2ecbdf, 0x0c716f29, 0x17f01eff,
- 0x35be2de1, 0x4d0fcb9d, 0xffa2faa1, 0x475cef30, 0x374469f0, 0x222f6db7,
- 0xdac0dc79, 0x591ade89, 0xbbf482c4, 0x69c2c6c8, 0xe08b1740, 0x172874b5,
- 0x3d32b16f, 0x4496fd61, 0x2146f58c, 0x31616e0f, 0x7d96fefa, 0x2c35ed49,
- 0x26f684ea, 0x62a68b1b, 0x6f6c69d9, 0xef417166, 0x3e26b67f, 0xbe7c1d67,
- 0x77758b37, 0x20d444d7, 0x1dab7bcc, 0xbe065774, 0x33eb4ed0, 0x5a4ef965,
- 0x7f0af4cf, 0x4fa83dd1, 0x76fb355e, 0x27bedd01, 0xe7e24bd6, 0xbbf5575e,
- 0xd514f788, 0xdf8ff344, 0x9cfc2563, 0xeb8dbd02, 0xdd9b9f7f, 0xdeca1b3c,
- 0x7a3bdd56, 0x55c9a234, 0x1839db30, 0xb39eebcf, 0xb8dd2037, 0xb7c226dd,
- 0x65cc7e7c, 0xddd57aa6, 0x522bff02, 0x088abb23, 0x9e9113dd, 0x7d4946aa,
- 0x82ce26ca, 0x5666bbfa, 0xe6f509d7, 0xec411deb, 0x07ed07af, 0xb9b817a8,
- 0x5b7028de, 0x2eb27e68, 0x06e92be7, 0x52fea1c6, 0x84a5e22e, 0xc85b8c63,
- 0x7f89b641, 0xe6eff427, 0x493b2925, 0xafd3f6ff, 0x96fd1189, 0xd7882c05,
- 0x795c84e7, 0x3695ca4e, 0x9a2b972b, 0xa832ee6b, 0x9941f73f, 0x95e01d6f,
- 0x83de8295, 0x4a3f1bef, 0x5e9e91d1, 0xef5fe8f9, 0x9fb419fd, 0x5ba2c0fd,
- 0xd2017da1, 0x73dbac18, 0x6047970a, 0x8fd4307f, 0x38f6c08d, 0xb1c1e36e,
- 0xb41c7f64, 0xc846fb08, 0xe0b4c6cf, 0x4fd085d6, 0x3e9993e3, 0xbcf664a0,
- 0x5ee8f239, 0xf7643f6c, 0x1b7e81ed, 0x1b5e3853, 0xdf491b9f, 0x4e0f613f,
- 0x733cc377, 0x3c9f91ad, 0x05e37281, 0x677bd92b, 0xc2117459, 0xaed3b67f,
- 0x70277be0, 0x4250e95e, 0xeaf3829b, 0x043d9276, 0x10b39a7c, 0x36b93bbe,
- 0xe04bcf9b, 0x0986e4e9, 0xcd9b17c4, 0xb3f0bc7c, 0xc27fbd0a, 0x0391981c,
- 0x974f13f5, 0x534056fb, 0xb512c05f, 0xee873d00, 0xbd5e76e3, 0x7881df49,
- 0x09538762, 0xa11eb95e, 0x62678b8d, 0xf2e9687b, 0x1c7ca3af, 0xde51cf8e,
- 0xd9b904b3, 0xc805e2cb, 0x461adb4f, 0x9f514675, 0x91f38f74, 0x585e655a,
- 0x171ba07c, 0x9994f77f, 0xc87e35e3, 0xbe7cf44c, 0xb57ce3fd, 0x707fae2a,
- 0x65c8109c, 0xe48ff926, 0x1d5d4272, 0xfec8ceaa, 0x2d973d0c, 0xfb633aec,
- 0x1de0ceeb, 0xe6bfa06e, 0x9de7fffb, 0x38df8465, 0x9ed645c9, 0xca797c49,
- 0x748b9fec, 0x5a89aeeb, 0xf3e827e6, 0xeaaf820d, 0x63fec2ad, 0x1f224b51,
- 0x6aaaf6ca, 0x72cbdd23, 0x677fa0bd, 0xc1f7cb8c, 0x1fc126ed, 0x1ea2b4df,
- 0x6641c29b, 0x1c47a885, 0xfd4ebfd8, 0x6a20f94f, 0xe17a87a9, 0x9165a8f2,
- 0x004f9128, 0xcc1b51df, 0x2756d4fb, 0x03406fe7, 0x3268b3f6, 0x48b97d23,
- 0x02b5fb05, 0x3d5deb9f, 0x9fa72eb0, 0xfafa7376, 0xf0056023, 0xf7898f3d,
- 0x82ef60ac, 0x4da67b1c, 0x607ee289, 0x2eeb16de, 0xb5f2117b, 0x7ae2755f,
- 0x8e48576e, 0x569b6bcd, 0x4bea15b2, 0xeb405c0f, 0xd399369f, 0x9b88d2e5,
- 0x2114e2f4, 0x11adeb1f, 0x3fdfd90b, 0x01f21851, 0x74764ff4, 0x53947c23,
- 0x280f1831, 0x10f40dd7, 0x5d83a849, 0xcb93a534, 0x9cf20655, 0x524bd825,
- 0x671ca3de, 0x919ff649, 0x1f52c23e, 0xf4c7b971, 0x527b946c, 0x0aebdf2e,
- 0xae49b974, 0xf5c6ce8d, 0x72e4f585, 0x7842bf73, 0xa42d626d, 0xe81ea44f,
- 0x91e8571f, 0xeb986ad3, 0x2a26eb2b, 0xd117775f, 0x27594b78, 0x57a913e9,
- 0xd783a386, 0x7e0e84bf, 0xed007486, 0xe8b01f8d, 0xe35e3065, 0x9a6d1672,
- 0x44e74bc8, 0x4f5c8fd2, 0xc49eb8da, 0xf43ce532, 0x4c7eb265, 0x7ac987d6,
- 0xf5c2db30, 0x69675c9d, 0xe74f64cf, 0xe594c076, 0xfcc19511, 0x3ff9a636,
- 0x60349cce, 0xb2de84d7, 0xb10bf4d9, 0xe0a3601e, 0xd369c5bd, 0xf686ca6e,
- 0xc8fcc690, 0x9780a957, 0xbb7cf09d, 0xb36b7bb0, 0x2d7bdd39, 0x5acd7e9c,
- 0x703b9580, 0xcfb4625d, 0x1732678d, 0xe7c4a3d2, 0x577c2776, 0xfd71df81,
- 0x8dbdffd4, 0x7c24a5b5, 0x9d9cfd5f, 0xcfe7eae4, 0xb256233a, 0x6e858247,
- 0x57eb2fe8, 0x7b53b256, 0x3e92739f, 0xd0fa15ed, 0xd128f683, 0x5fae2acb,
- 0x21275d71, 0x648f5535, 0x35cafb48, 0x4a045123, 0xcdc9acfb, 0xf6c1aced,
- 0x8a2f6890, 0xcae9cf5d, 0xcfccfb4a, 0x62773ddd, 0x6861e01d, 0x588fff9f,
- 0xe981d0e7, 0xe787dfe5, 0xc7c380a1, 0x472c1fb1, 0xdfaab57a, 0xf406deb9,
- 0x1e5d4335, 0xd1e60bbf, 0xa170ef94, 0xfc29ab9e, 0x846f2ff5, 0xcc0787fa,
- 0xbceb8acc, 0x1b1d7a67, 0x5dfcbc35, 0xe89f3112, 0x5b97e049, 0x4deb19f6,
- 0xa87ac69d, 0xc82f58cb, 0x4f4e7a70, 0xfbe1d920, 0xd5b5d033, 0x4ec1f2da,
- 0xbcfad780, 0xa7d1f495, 0x067160ee, 0xfc535bca, 0x5ad9e218, 0xf444f1f8,
- 0x3fba846f, 0x02b4b051, 0x38cac4ed, 0x3faf91fe, 0x742bcfb7, 0x01b7810d,
- 0x899b31fc, 0x3ad60cf0, 0x85ce492f, 0x2c789f04, 0x845f0ffe, 0x9d9a2ff9,
- 0x9e9bbae2, 0xaa4e67f8, 0x60927b42, 0x572da164, 0x2dfb887f, 0xae096f78,
- 0xf059e9ff, 0xb0d5c01e, 0x46c3eb85, 0xbe434b16, 0xd3bb066d, 0x477c4b06,
- 0x5b875f06, 0x5155e2da, 0x8c8edff8, 0xa70871da, 0x802d27f6, 0xfb1ab1f4,
- 0xaf93b04d, 0xe07ae0cd, 0xd7767d96, 0xdd1e0329, 0x2ba87a86, 0x912a75c7,
- 0x61fc84ff, 0xb8968a79, 0x09577a1f, 0x6bf88b5e, 0xa658f5b2, 0xfc57f8c1,
- 0xf79233e9, 0xa7cd978b, 0x5d80c47d, 0xb4f9256d, 0x3d20aef8, 0xdf2b697f,
- 0x6ade9e17, 0xa7ef11d7, 0xe3fdf3b7, 0xc4473ce2, 0x857bff06, 0x8bdbdb9b,
- 0x19453be1, 0xbc64c78c, 0x55f0ffbd, 0xf7a3a45e, 0xdc2f1f19, 0xf78fedc9,
- 0x62af805d, 0xf7f60efe, 0xefd8177e, 0xaaf9e20a, 0x2e2e510f, 0xd0faaede,
- 0x9c87604e, 0xbc598fe4, 0x0b71cafd, 0xa7dfd81d, 0x5fa1a623, 0xe895c768,
- 0x02f7c289, 0x89d7ee11, 0xb5cf015d, 0x1d93171d, 0x95bb7477, 0xedb6e9c3,
- 0xf15c79c5, 0x4af89527, 0xda20b133, 0xff161dc7, 0xfd67e438, 0x70333de1,
- 0x959fde5d, 0x1ff7a26b, 0xf10b9857, 0x85965ee1, 0x5a2b17cf, 0x61b7f900,
- 0x899992cb, 0x866a4ef6, 0xad35eb4a, 0x8f567970, 0x7ae9f883, 0xdb4da3d4,
- 0xfc707f81, 0xf2ff6fd9, 0x3f224f46, 0x4cc55edd, 0xc3cbf609, 0xc27a235f,
- 0x6be028bf, 0x57d7c0b1, 0xaa6b2be5, 0x12968be4, 0xef8ba394, 0x0d71296e,
- 0xf3f45761, 0x0f9cc3c5, 0xb850cbef, 0xc32f20ff, 0x6e18cfb0, 0x3528e65f,
- 0xf9b15e91, 0x940f7f98, 0xc62db0d9, 0x0c81f7fd, 0x640fda8d, 0xfb5f7b70,
- 0x5efb6ddd, 0xcd4d7987, 0x80cf6e08, 0x97f3217a, 0x71ebbae3, 0xb1983557,
- 0xbbb26288, 0x6e56c3d8, 0x8edc6ac7, 0xf6c99cde, 0x8fd84bae, 0x997f6277,
- 0x62f61724, 0xffc7bd3f, 0xdaf10c78, 0x44f1f05c, 0xe7420fb4, 0x1fa0b33e,
- 0x90add1cd, 0x02f8773c, 0xd425e90c, 0x282d8b3d, 0x21c3901b, 0xc913d71f,
- 0x0653faf3, 0x3c84efdf, 0xfde7ea71, 0x3b7f9c11, 0xb73e9cdc, 0x3f214b2d,
- 0xc44f36e4, 0xb5ff408e, 0xa5ed2a7c, 0x67efc225, 0x5f94fddb, 0x288efa45,
- 0xd75831ae, 0x35f47f39, 0x30a88dcc, 0x8bbb87ce, 0x77d9dfc9, 0x854f9a60,
- 0x1feec736, 0x788519ad, 0xf0fb32ea, 0x27af877d, 0xe1f7e70d, 0x386993ee,
- 0x2add86bf, 0xf221bbcd, 0xb2c2d649, 0x33f0b901, 0xca3af825, 0x1c52e601,
- 0x0f94be38, 0x34feee1f, 0xfb83816b, 0x567f480b, 0xa7978d0e, 0x8e7ed3dc,
- 0x39715fdf, 0x1e8f975f, 0xee5046b7, 0xfd07647d, 0x3f6f2096, 0x777d7272,
- 0xefee432a, 0xdf500b3a, 0x2a6fd7f4, 0xf89ca135, 0xdfcf8b8e, 0x402ce963,
- 0x628dfc7e, 0x1ae967ec, 0x6c808b13, 0x5006b25f, 0xa3e8f203, 0x40299f47,
- 0x9ee1ff79, 0x27e60a67, 0x9dfc97c0, 0x78e57b2c, 0xfd266fc2, 0x344b1ea8,
- 0xef9bc70f, 0xe57f72b3, 0xf9547e30, 0xfdc2db3e, 0xab3e7da1, 0xbf28748e,
- 0xf1486beb, 0xc7db7da3, 0xe6ff246c, 0xacbf0b77, 0x83da3fdf, 0x7df918fb,
- 0x1e15ff52, 0x974a4f4e, 0xd483fe42, 0xe3c938b2, 0x7bde59bf, 0xe4133d60,
- 0xbff3ac3d, 0xd9345f21, 0x3246c7e2, 0xe20b0fa2, 0xd0ecd2cd, 0xad653f50,
- 0xbf15f4e4, 0x2afb44c3, 0x81d7ae4b, 0x928aecf2, 0x30133694, 0x6d2527a2,
- 0xf201c622, 0xfde94b73, 0xfba73570, 0x7f231670, 0x516b0e5b, 0xb8bb5e48,
- 0x326f3edf, 0xea83ad73, 0x9edc11ac, 0x16df32f1, 0x5dd698b5, 0xc490ff41,
- 0x480bd487, 0x3f1f283f, 0x3f446c52, 0x7642b74a, 0x9cea6aa7, 0x470efd8b,
- 0xc847134f, 0xa7a73ffd, 0x893ffafe, 0x188f269e, 0x3a829e8e, 0x36d793d1,
- 0x3f093d34, 0xcfbf79f1, 0xbfdc01c2, 0xdd7c0b66, 0xfc4ee427, 0x9bf49b0e,
- 0x99df382b, 0x3699bfa2, 0xfd455447, 0x9c7f33e1, 0x187dffe8, 0x27dc5fec,
- 0x0efda0e5, 0xf395c8e5, 0x16375a74, 0x417f41eb, 0xd236c5ea, 0x629af78f,
- 0x64af6845, 0x8fd96f8f, 0xb73d9174, 0xed2562ac, 0xa8dcf2ff, 0x92b7500f,
- 0x7559741f, 0x0488d4e0, 0xdf10e17b, 0xe5df34cf, 0xbbd3e7a7, 0x1a107db9,
- 0x387b216d, 0x7184a5d4, 0x96039b9d, 0x6b41da08, 0x095fee15, 0x3cebb06f,
- 0xa0afa3ee, 0x740fcfb8, 0x488a171e, 0x8ecf6fff, 0x35bd23ef, 0x91efc838,
- 0xf83a3f1c, 0x1fc7ca85, 0x8efc68c3, 0xeaaf1e72, 0xfee75ab8, 0x1d7eb115,
- 0x6e39fcb3, 0x18d607b4, 0x85c659f9, 0x18809cfa, 0x7afe6f18, 0xae43ad3b,
- 0xd2ccf1ca, 0x0cff5c6d, 0x67e40b96, 0x162c9ace, 0xfa6fc446, 0x05241cf9,
- 0xbb4573d6, 0xcb39378c, 0x79684502, 0xb7e9bbbf, 0xbd00a4af, 0x8e9fa166,
- 0x68c2ca6f, 0x987f93f7, 0xfc7dc9d7, 0xc90009a0, 0x5cffd6c5, 0x42697357,
- 0xf03fbc5d, 0x6b6bdcfd, 0xf6ef48f2, 0xa0d823b7, 0x6fdd658f, 0x351e31d7,
- 0xbd81aed0, 0xf852eb6b, 0xb5c761f7, 0x3c068e57, 0xfffc7228, 0x7f47eb5c,
- 0xc6199da1, 0x4b4f844b, 0xfd0cdfea, 0x7ef9743b, 0x784af6bc, 0x590ac1e1,
- 0x1fad66bf, 0x6ccfbbe0, 0x2a508fec, 0x1c8ac7be, 0x6dacd6ef, 0x0de7d6eb,
- 0x276005d8, 0x39911aec, 0x7eac1f60, 0x85fb667b, 0xebaa5630, 0x5dea0b53,
- 0x1e43e7a9, 0x648fec85, 0xe4203d7e, 0x572cdfc3, 0xf47641f5, 0x57241181,
- 0x4b20667b, 0xe483fec2, 0x80796876, 0xf6073da1, 0xdbc21748, 0xd3e3fc7b,
- 0xef7b422f, 0x1b1fdfec, 0xb77be3a9, 0x37bb45ce, 0xba234e74, 0x2353ed0d,
- 0x4dadd00a, 0x71e80bf0, 0x7a480b8a, 0x7412ea5a, 0x8c330eea, 0x521c7473,
- 0xbe898526, 0x83ae0a5a, 0xf8f7ccb5, 0xe438425f, 0xde7932be, 0xf7c11ebf,
- 0xead2a10f, 0xffd825ad, 0x9da1856e, 0x44f4aeb0, 0x37da02d6, 0x30b59bbd,
- 0x5d25bbe1, 0x973e120f, 0x890f33ec, 0xe08f5fc7, 0x67e6de71, 0xdca3f69f,
- 0x90a228f8, 0x22d0684c, 0xbd40a76b, 0x9684c1a1, 0xcc8cab20, 0x1ce291bb,
- 0x06361675, 0xed0477e6, 0xdf638426, 0x7053c337, 0x814ff0de, 0x8b02db47,
- 0xab6e9f48, 0x4e2839b3, 0x56ff3a54, 0xc52b83c2, 0x5758788f, 0xe3839d5a,
- 0xe19acf34, 0xd808f8a2, 0x7cded871, 0x2fc8b7d7, 0xd75ae124, 0x3b45ae65,
- 0xb9ec0aa7, 0xfcee00a7, 0x7d6a6b8c, 0xd523c7c0, 0x3bb7e5fd, 0xb43e5e30,
- 0x266eff1c, 0x25368ba7, 0x2546fe10, 0xa25941f9, 0x8bb0b35f, 0x8d9f8ddb,
- 0x2be218b0, 0xf6e08d9f, 0xd8e1a5ec, 0xc39f2474, 0xda2a4f5a, 0xffe621d3,
- 0x2ccff704, 0xd0f7d0e0, 0xe479713a, 0x3fc9d569, 0xdcf4f366, 0x5fb6217b,
- 0x85fed1aa, 0xe00f3d69, 0x3d6bbdb8, 0xf04c75be, 0x2aefd601, 0xbd4f7da3,
- 0x3278a827, 0x7d70bcf9, 0xaabd9f7b, 0xd288fd1c, 0xa8fb5aee, 0x5ae6f49d,
- 0x2d7f7ea7, 0x69dfd3ca, 0x16c01f3c, 0x7e874d8f, 0x833efe96, 0x8f6b5dc6,
- 0x0ff5c5ac, 0xbda954ef, 0x607411d8, 0xb9be4bbf, 0x2bd22265, 0x504a4473,
- 0xed3d9a8f, 0xb618e90c, 0xb12fe42b, 0xe30c0279, 0xf3c3d3c4, 0x45f50534,
- 0x1d333d92, 0x5fb455fd, 0xb6bf90f1, 0x18c7f05a, 0x36d382a0, 0x3fb68562,
- 0x6fb0188f, 0x1ce3f95e, 0xbb4a1f0d, 0xfee364c7, 0x14ba9af3, 0x5b2df686,
- 0x42663f15, 0xc627bd7f, 0xea6e0ed0, 0xa69c6854, 0xb82d27d2, 0x93a5b96f,
- 0xec4877f3, 0x057f0033, 0xf18c5768, 0xa07b0aa4, 0x5839fb8d, 0xc448d8ec,
- 0x3d7c63f3, 0x12f78319, 0xad8cabfc, 0xf440f2e0, 0x86711167, 0x0257e126,
- 0x3eed484d, 0xef5bb48f, 0xf3b7fd16, 0x59bb8c52, 0xc455f989, 0xf062b96b,
- 0x2af2cb87, 0x91fa7df1, 0xdfe40cf8, 0xfa17d038, 0xb32ab697, 0xcc29fd6e,
- 0xf1a4eb6f, 0xbcc1343e, 0x056a4e40, 0x0452073e, 0x7d7657bc, 0x86e90332,
- 0xd7e232d5, 0x6a69aee6, 0x6fc78393, 0x4ec85ed0, 0xed89d96d, 0x929737d9,
- 0xc8f8c67e, 0x35fe786c, 0x1cb3e70f, 0x79d7f707, 0x61170eef, 0x48fd33bc,
- 0xe29fd6e0, 0x90a417f8, 0xd61b35bf, 0xe54199c6, 0x796ff41a, 0xa88bf959,
- 0xf533bd7f, 0xc647840a, 0x382e30f5, 0x38e48cab, 0x8a7c33d5, 0x3794177f,
- 0x0e34dc78, 0x116aefec, 0x711e7c5e, 0xd119c40c, 0x62e3876f, 0xdecb0f1c,
- 0x7e46a89c, 0x7be7d07e, 0xacbb4e90, 0xccd7c42e, 0x42df97ae, 0xc6d33b7d,
- 0x9aee293a, 0xd3c4b764, 0x3a4419be, 0x7a733bed, 0xd7083dc7, 0x947d0775,
- 0x8f6492cf, 0xb739715c, 0x93da3fcb, 0xec3f23aa, 0xec1b7212, 0x797f1e47,
- 0x9323ef9c, 0xf6efc8ed, 0x23b72faf, 0x777febfb, 0x17c3923b, 0x28f3c3de,
- 0x890f7ca4, 0xc35f5f7d, 0x5fd434d9, 0xaed23d2b, 0xd8f7ca4d, 0xba3df22d,
- 0xd0f7ce87, 0x0ff8f276, 0xee494186, 0xae222dba, 0x43cef7e5, 0x84bb87ff,
- 0x7b2bc0e4, 0x3748c353, 0x64ed3b67, 0x4b6e6997, 0x7b6ae114, 0x3cc11ada,
- 0xe0453688, 0x578890eb, 0x2c1c4459, 0x834744a3, 0x4025a0e2, 0xa4fc3f72,
- 0xd2abc799, 0x85ee8f22, 0x1ce1e37a, 0xd462cf11, 0x54d9f87f, 0x8a6377c2,
- 0x37fd8e9e, 0x8608fbfe, 0x183eaa71, 0x09f9d70f, 0x87bb707b, 0x7fbc5ae2,
- 0xe75582f6, 0x8968b3f1, 0x33b0f01b, 0x93bfedc2, 0x05a8a6a5, 0x8993bde6,
- 0x32f59f3c, 0xd9e7c5ae, 0x8f2461b7, 0x8231a0cb, 0xdcb5b2fb, 0xe32f545f,
- 0x1fb98755, 0xa5c45eae, 0xf45ece4e, 0xb458a608, 0xb8b52d29, 0xdd7cfb4f,
- 0xdf9ff234, 0x2a2cff79, 0xfd18d7eb, 0x3fd08bfd, 0x3f103c5f, 0xf9f77e96,
- 0xf6e167cc, 0xd0ff80f3, 0xbe8cf604, 0xfd46ab46, 0x775cc459, 0xc3d03ea3,
- 0x43177ab0, 0x1a2506d9, 0x19600af1, 0x7ad6d3de, 0x57bd7052, 0x6f4899b7,
- 0x7e4cdb85, 0xcbd24f7c, 0x67ae1eab, 0xbd70f5df, 0xe08ea803, 0x336cb238,
- 0xe883788b, 0x4533056f, 0xbc2934b1, 0x6ad2ed5c, 0xb2dfd287, 0x8b7336bf,
- 0xcdda1a6d, 0x77f226f9, 0xd7f16e7f, 0xa338bf91, 0xcda9d684, 0x96a6b2d9,
- 0x7d2f13b7, 0xa8078f6e, 0x1b8d32fd, 0x40cf4c96, 0x8782fa79, 0xf19217c7,
- 0x58ddb0b6, 0x2cc38e4f, 0xf5eb1889, 0xa736382c, 0x120dc798, 0xf420bf9f,
- 0x8aebe7c4, 0x167dbe04, 0xfe72bb94, 0xde38f22b, 0x1cbcf94c, 0x875d5f95,
- 0xe30a3074, 0x725845ca, 0x84cc2abf, 0x99751fe0, 0x48fa2147, 0xcdcdb782,
- 0x96bc29e9, 0x5aff21fb, 0xfee193f8, 0xf82593d0, 0xb8c5efd8, 0x42cbbd65,
- 0xf5f3e871, 0x1a64a497, 0xf5fdb7f7, 0x96feffbf, 0xaad7e7ef, 0x45c6bf22,
- 0x8bbe6972, 0xe2a4e5cf, 0xe7c51bdb, 0x701fb00e, 0x0ad56b1e, 0xbfd32394,
- 0x3479a0e9, 0x8239061f, 0x3de5ac71, 0x9bd7a805, 0x304a7bab, 0x5f8c935e,
- 0xf2935684, 0xef9bde0c, 0xda08fd81, 0xf18c6b0d, 0x55f1c1b8, 0x2666617f,
- 0xee4e7bee, 0xd7f5197f, 0x2fe93b7e, 0xad17bff8, 0x16cfffc8, 0x388b5c91,
- 0x2e316383, 0xee894a14, 0xda1171f8, 0x6a8edc31, 0x44eeb6ea, 0x5b7e713b,
- 0xff0f7c42, 0xef18deb3, 0xc8326b63, 0x8c3aceef, 0x4e1e1bcf, 0xfc506efa,
- 0x65d69daf, 0x0e3825b3, 0x10f41df9, 0x599c1de5, 0xbeb86262, 0x116faaf5,
- 0x7633a92a, 0xc67e7da3, 0x466bd7c9, 0x3e3fd4a6, 0xebfabfb2, 0x57d9d683,
- 0x6f5ceb82, 0x7ec4c162, 0x619d709a, 0x3ac51b7d, 0xe2fa799f, 0x8fe75c12,
- 0x825c5ff7, 0xdcd9bceb, 0x29be47ee, 0x41af0775, 0x32ba9cfc, 0xe615f640,
- 0x9d1f20ef, 0x94de3d2f, 0xfd378f44, 0x2eb1e8e3, 0x59321f31, 0xe3f4363f,
- 0x3d1fa8cb, 0xe50f3d16, 0xf59b7cc1, 0xfc49ea2f, 0x679fd21b, 0xe3ff92b3,
- 0xdd3f1ff0, 0xdd18f948, 0xb227d92a, 0xb05ac3bf, 0xc3a3ed18, 0x617d796f,
- 0xd3fef865, 0xb5e5fd85, 0x59fa30b1, 0xda40bee4, 0x78f2c997, 0x03cf98b9,
- 0x8cdffe99, 0x3ce21bf8, 0x20dc7fd2, 0x3e45b1de, 0x6eef76e5, 0xc7fdc7c5,
- 0x6e4fe49d, 0x85cf343e, 0x220fe2fe, 0xfcdc7fdc, 0x8ff93974, 0x8a5e4497,
- 0xeb193e62, 0x1fb85c56, 0xddfbb259, 0x9003cc34, 0xe7d85efb, 0xf9499fde,
- 0xeffdc635, 0x69939107, 0x70003798, 0xa7708fbf, 0xa416e27a, 0xffd866fb,
- 0xb360de61, 0xfd863f16, 0xd9187b36, 0xc65d9505, 0xef8ca0fd, 0x48580b6e,
- 0x56eb6eee, 0xb3af2822, 0x3ca2c12c, 0xcf2c6bd4, 0x80753a75, 0xfe9922cf,
- 0x077e9017, 0x5dfeeae2, 0xa7fe9758, 0x44e9ad9e, 0xe6663dbf, 0xdb3ca177,
- 0x4cf214ea, 0xdbf6f923, 0x7591dd87, 0x6fc37609, 0x584edc23, 0x34c1f322,
- 0xbd60d516, 0x1ff1cd16, 0x7946f5c7, 0xf7924d1f, 0xaf197589, 0x494585bf,
- 0x9eb1352f, 0xc4b0bcfe, 0xf9e392f7, 0xf4370718, 0x75ca5ac3, 0xe4dd93f4,
- 0xab9c51f0, 0x6356cfeb, 0x9bef987b, 0x55961f81, 0x5cafd00c, 0xcdbf4907,
- 0x6a1d1fc2, 0xe90ab138, 0xb8a5d437, 0xe8dd7f70, 0x08dbf4e3, 0x9bcc2ffb,
- 0xd677e64d, 0x1d13cc69, 0xdd7fff8e, 0xff9c77cf, 0x9cac3d7f, 0x5bf8c7fa,
- 0x3f97efdf, 0xfce5ef9c, 0x7fbeffa5, 0xe70add9e, 0x2579a4cb, 0xfd935bef,
- 0x79df72ee, 0x8bf0f2be, 0xf17c7c24, 0x70c7c60f, 0x6b2530f2, 0x7242fde8,
- 0x96afe35c, 0xf8e0ff87, 0x1e8723c6, 0xa881334a, 0xf6145258, 0x6fcebb72,
- 0x50cf1788, 0x371e7e1d, 0x453e7955, 0x9f5ddc72, 0x9ceee221, 0x5a7a5f9f,
- 0x126af5a5, 0x43a6e1f1, 0xdabe1f4e, 0xf14c58e0, 0xe9f8e4d9, 0x067fb652,
- 0x1c72512e, 0xb8f911b5, 0xa9e3cbf8, 0x3d396517, 0x56749f24, 0xc90a7984,
- 0x392f5a79, 0x4c111d3e, 0x32eaef58, 0x410c08d8, 0xcc2d36d7, 0x4f9e1232,
- 0xf4f6e2cb, 0xc89f224b, 0x05e7e16b, 0xfaa673ee, 0x35e31f71, 0x7ca77cf1,
- 0xd67a604f, 0x02ef9424, 0xc0decf9f, 0x4797804c, 0x4f249dcc, 0x1f0e4dc2,
- 0xc7e93fca, 0x10effd91, 0xa5f38b71, 0x19676ff3, 0xfebfbce9, 0x85fbe24f,
- 0xdaffbe92, 0xc016ddb8, 0x181f818d, 0x186befd1, 0x4c1a2a67, 0x33f5c3b7,
- 0xcb2efd1c, 0x459edc0a, 0xa44ff178, 0x94f3e6fc, 0xf195a92f, 0x622fe893,
- 0xfa3aac07, 0xfce26d37, 0x239f5b0b, 0xaabeebf6, 0xcd979459, 0x73c88fcf,
- 0x8fd9c336, 0x9ff9fb1b, 0xf5fd8fdb, 0xc8fd93bf, 0x9c7681dc, 0x7e69a9e7,
- 0xed5ffbca, 0x5f71f804, 0xc875a8f8, 0xdcf22cf3, 0x5df997c5, 0xcdc3f6f6,
- 0xb0ef6ae9, 0xe563ff70, 0x34ff93e7, 0x34fbf9f9, 0xdf0ea3f4, 0x7be4584f,
- 0x9e22fcc6, 0xf82576c6, 0x9e3143b7, 0x8092f7af, 0x447f3e57, 0x6cf4c5e8,
- 0xa75c51ff, 0x6cdb8f8f, 0x9e4c1d2c, 0x9b172e4a, 0x2f8a0e03, 0xbcf27734,
- 0x8ce5e697, 0xfa3377e4, 0xafd0cb4f, 0xbda18b38, 0xf8233537, 0xf57efc33,
- 0x9ab08edb, 0xd9f77e8e, 0xdebbed0c, 0xa9febce3, 0x6c73f83b, 0xa1ef2d0f,
- 0xecc74678, 0xaaef288f, 0xae9fcfea, 0x7824adaa, 0xb1e08fc9, 0x7cadcf1a,
- 0x0fe3c591, 0xbf43fcb2, 0x5f93f13c, 0x9e06e3ff, 0xf8f21d9f, 0x908fe85d,
- 0x3d54f17e, 0xe6a67e46, 0xab68dc03, 0xd87cf952, 0x9e88f3f7, 0x2b4c0f9a,
- 0xeb29ff47, 0x9da397b4, 0x072bf55b, 0xbe3ef7ed, 0x24f2972b, 0x0fc13dea,
- 0x2b3a3e51, 0x5da35723, 0xa0e3085a, 0xfef1fbbd, 0x6e11cbab, 0xa336ff29,
- 0xdfc61cff, 0xc7fa34ec, 0xd9e78d99, 0x4f9bdd2f, 0x4762cbfb, 0x6fec53f0,
- 0x11c39db3, 0xa3ce23de, 0x3279bf08, 0xcfbe4bff, 0x4e344d56, 0x146db7f6,
- 0x6172972f, 0x6d0599b3, 0xe4f203c5, 0xf22e9678, 0x635e444c, 0xa79e2cf7,
- 0x8f0eea22, 0x81f3bf31, 0x238f2bcc, 0xb4b3c73c, 0xe61f96af, 0x402f9418,
- 0x187e62fd, 0x9c5805c5, 0xe7a92dfc, 0xca3d1db8, 0x727a2165, 0x9c51df02,
- 0xed09e806, 0xb1e34bf9, 0x6b9e78e9, 0x52f44774, 0x73e1f3c4, 0x3693c226,
- 0xd38a35eb, 0xbc7c2ba4, 0x6b3df5e2, 0xc61fa2c7, 0xb865e6f5, 0xabbb7aa4,
- 0x353a511a, 0x4d9f9023, 0x52968cfb, 0xbffaaf1c, 0xf29f3ccc, 0x17f92a50,
- 0x291a0a3a, 0x6df7b29f, 0x4e3ce1ab, 0x77661972, 0xa8e4fe53, 0xbc505fae,
- 0x194079c0, 0x72762f32, 0xd6f083b6, 0xa0cd8e1c, 0x3d4b9b1c, 0x43b56724,
- 0xb538a2bf, 0x094356b6, 0x7eccdac4, 0x0b439e13, 0xefe460af, 0x32e1d31c,
- 0x8a157ae1, 0xc3be98fc, 0xa418987f, 0x0f8c8afd, 0x937e7844, 0xee1567b9,
- 0x3d15b4c3, 0x3d718797, 0xcd4db80e, 0xbc5fd865, 0x872dfc99, 0x4fd21952,
- 0xddf15761, 0xb85ea153, 0xb0f145c6, 0x296ee7be, 0xcc2aff24, 0xf310cc73,
- 0xf336ca2c, 0x9aa50ffe, 0x9b699f50, 0xd5470f98, 0x04d559dc, 0x875083f7,
- 0x3c8a467e, 0x453f1955, 0xa9ea143f, 0xd7e0ecf4, 0xffb63f30, 0x14b2d21c,
- 0xe3490e7e, 0xb2e712fe, 0xf3c71fb7, 0xfc2c3b3d, 0xc391e79c, 0xdf936613,
- 0xe0a6ad6f, 0xe6ee7c3a, 0x663794f9, 0x42e70c9b, 0x847ff006, 0x1d4756f2,
- 0x6bd705fa, 0xaff61630, 0x68bd3999, 0xd13b3d6b, 0x2d38b8bf, 0x97ba7948,
- 0x8a5545b5, 0x3d4eb99f, 0xec27ab50, 0x3d70b673, 0xef8fbcd3, 0x35f793cb,
- 0x6e5fa65f, 0x2dda36cb, 0x927ff5fd, 0xfe83f2dd, 0xfdc6fff1, 0xf3ee330a,
- 0xe497ed92, 0x3b20dd7d, 0xe1bfa93c, 0x8f9918ec, 0x0e303d52, 0xd29da7b7,
- 0xf21b0a4e, 0xe21daa75, 0xd8cda17c, 0x5c23e5ff, 0xbf995f34, 0x5f52b593,
- 0xf1fa7981, 0xd7f2301f, 0x70a23cc9, 0x09f7cf35, 0xee746c9b, 0x29dacbd0,
- 0x8b24687f, 0xd3f90a2a, 0x6d7ce87a, 0xc111e636, 0x47b2eb77, 0x99dbbf51,
- 0xf24a8d6f, 0x6e105752, 0x509fe63b, 0xeb646abe, 0x3ae71c22, 0xc7638d07,
- 0x308b93fc, 0x15bfc085, 0x06f4ebe5, 0x05c85f1e, 0x9bf48c7d, 0xa1d39a3b,
- 0xa1e78ebd, 0x99133cc8, 0x50f12217, 0x3b9bf02d, 0xca115176, 0xf04ab5c1,
- 0x99ae7151, 0x4e0dffcb, 0xec195e71, 0x79c0cf6b, 0xfbc63ee4, 0xf898370f,
- 0xaf376479, 0xf9d18c77, 0xf82fa079, 0x728ebb39, 0x2832779e, 0xaf09253f,
- 0xc9e53f32, 0xe411e0b4, 0xcbf3e497, 0xd6124e92, 0x6f47ce8f, 0xfaedc43b,
- 0xcd0b06ea, 0x35c96fa3, 0x5bec97e4, 0xffa86262, 0x9cadbbab, 0xdbc692ee,
- 0x51616756, 0xd88b57ec, 0xf5ca7e51, 0x16bb32ff, 0xfd14b5d6, 0x6b5a47be,
- 0x38053a2f, 0xa01ada3e, 0x74aecfb2, 0xf9d217aa, 0xa928b9d2, 0xf9f2d89e,
- 0xe60e5ab5, 0x140bc4db, 0xb7e79920, 0xea93347e, 0xb077f199, 0xcf16eafe,
- 0xc39034d7, 0x3be2637d, 0xffff7814, 0xdbc47f79, 0x19731691, 0x87a1f77b,
- 0x5f1d8666, 0x91fddf41, 0x9c71e725, 0xc4d3bd41, 0xc8c5567a, 0x5df4245f,
- 0x5e879725, 0x25ec8587, 0x653fc8af, 0x151f2235, 0x5f503b23, 0x714e5399,
- 0x0fd3077b, 0x8f3db0c4, 0x7682cf9f, 0x6395ffa2, 0x26da7fb8, 0x49e582e8,
- 0x184f54bf, 0x22cadbe5, 0x2fdf7d37, 0x23580f8f, 0xc7c63fdc, 0x0be9e37f,
- 0xf325be28, 0xe573196a, 0xf280b9cf, 0xfbf50c6c, 0xa61ce2ec, 0x73c7cae6,
- 0xc2cd2bec, 0x36f515ee, 0x8bc53067, 0x043e44f3, 0x2b8dee7b, 0x7c865bbf,
- 0xf7e0e674, 0xbb5ca347, 0x82fb02ca, 0xeed8e421, 0xfac72e1c, 0x0a358ecc,
- 0x0ce3577c, 0xfac2f08e, 0x773a95bf, 0xa8291e06, 0x82cd311b, 0xf8581747,
- 0x9af11551, 0x3dc4e829, 0xaee7c387, 0x77982809, 0xc7ced2ba, 0x7e6a0ccd,
- 0x9c10f73a, 0x70075ca3, 0xbcb854af, 0xc9d28f86, 0xd3d52a8e, 0xa3611cf1,
- 0x0aaad738, 0xe74971b9, 0xf5ed1526, 0x35beb79f, 0xe006d355, 0x878885fb,
- 0x78c8a2ff, 0xcbdca23e, 0xce5962ed, 0x900bf556, 0xf0ad56fe, 0x12f336bc,
- 0xf8e9ce7c, 0x9565b4ca, 0x557da798, 0xfa254b6a, 0x4f3d0769, 0x0965e787,
- 0xaf7f2b75, 0xc02f16d1, 0xed0be673, 0x8f8849c1, 0x7bbde7cf, 0x3b87ca27,
- 0x0778df73, 0x30de24f3, 0xaf91390b, 0x98eef588, 0x79eb069e, 0xf22dd8ac,
- 0x0f446c3c, 0x3f72c6c7, 0xdb1e7f64, 0xfaf28538, 0xe7e1ce66, 0xb165af49,
- 0x9ef81eb6, 0x3ed0159c, 0xd9117263, 0xde453391, 0x02b78cc5, 0x9817e869,
- 0x435c4371, 0x5d9c3a39, 0xacd79574, 0xc3f024f7, 0x7eb1bad5, 0xfeb1a96d,
- 0xfeb19f35, 0x1a7fc98d, 0xd97f589b, 0xf48d9fee, 0xf3c2bcf4, 0x3097c33e,
- 0x47bdd117, 0x32fda309, 0x715c99e1, 0x27e7970e, 0x77d8abfe, 0x5199333d,
- 0xdb3ed67b, 0x20373c2e, 0x68aad7fd, 0xb665e09f, 0xef7e0cc2, 0xb8bbea03,
- 0x3d1cf222, 0x0761d314, 0x3f71756f, 0x816bbc2e, 0xd3ce0e7e, 0x46aa3f89,
- 0x4e9052da, 0x76261cb2, 0xa552f239, 0x239aec8d, 0xb9e4ee82, 0x058d2aea,
- 0x8602cf30, 0x1be5839c, 0xe88bd766, 0xc87f0ea7, 0x69dbe718, 0x8a5be51a,
- 0x415b1831, 0x5d0487f5, 0xfff2911f, 0x57589a07, 0xef1840e0, 0x1e5cfaec,
- 0x74652e87, 0x656cfe5e, 0xdd25d509, 0xae01fe4b, 0x41a18d1e, 0xe927f445,
- 0x0b4ea7f9, 0xbf7a83d1, 0x34bbf849, 0xea590213, 0xb8807092, 0xc93d42c9,
- 0x7195fec8, 0x6c9b7cba, 0x7ef8fcd7, 0x7e75930e, 0xd44b2bde, 0xaa6a29a1,
- 0xf2fcfd40, 0x3b7f48ef, 0x879c417c, 0xe746cd25, 0xa3e38693, 0x767f825d,
- 0x4b91f125, 0xe8f89abb, 0x3b92cea7, 0x9c6bee08, 0xa5afd42e, 0x0cba9730,
- 0x2a23fd2a, 0x4fce3bf0, 0x474f8a6a, 0xed66f9be, 0x825aa9f1, 0x9bef869c,
- 0xc6a9f7f8, 0x7df0bdef, 0x69f7c246, 0xe0d97df0, 0x6d3b77ef, 0xec44d351,
- 0x21ef4aa7, 0xc9a5187d, 0xade6235e, 0xaeeff166, 0xa3b17911, 0xd47de0d3,
- 0x3cc6a55b, 0x67d93613, 0xa4e4fca3, 0x94ab92bc, 0x57c839e5, 0xba9adb57,
- 0x97bfa07b, 0x655e3ac5, 0x82e67e0a, 0x2dcf2fe7, 0x57fc8a39, 0x98688b6b,
- 0x8b2d77d7, 0xedc5c2ae, 0xc47c169b, 0xe1f24c5d, 0x6693e459, 0x03363835,
- 0xedcc67ea, 0x222d6fa3, 0x9bce79fe, 0x495e1839, 0xa45e1227, 0x493fa417,
- 0xf770c92f, 0x3bd7e7af, 0x3b6f1129, 0x4a5851d8, 0x16b18abb, 0xe7ad779e,
- 0x880bcc0f, 0x65ad8668, 0x2f35da34, 0xdc93092f, 0x00fdcab8, 0x9cbd65e5,
- 0x944ffc1b, 0x7e177567, 0xcbe1a052, 0x716379de, 0xc6f000df, 0x95e137fe,
- 0xbca0e3f6, 0x862d0fe3, 0xf8e1e3bc, 0x421b8e4c, 0xfcc550bb, 0x47949de4,
- 0xf0e39a1c, 0x109179e1, 0x5256c1e6, 0xc2ec8afc, 0x6f6e14e7, 0xb09b1587,
- 0x3f37cee3, 0xaf7b7199, 0x33dedfa9, 0x9477d7e4, 0x6e3bb873, 0x35fde5d5,
- 0xc79c5edc, 0x4b393db8, 0x51742fb4, 0xefc3ccbb, 0x86fdc9d8, 0xd58f4f1d,
- 0x03b05fb1, 0xe9607ec9, 0x322ccec2, 0x7f839bde, 0xde40d64a, 0xb73366f8,
- 0x7c99b75f, 0x62e79455, 0x9ce2f7e1, 0x318cfe91, 0xff64fc7c, 0x628e6e62,
- 0xd7f74f88, 0xc5fe4eff, 0x3f307752, 0x585bfa07, 0x4c502fe6, 0x33f71708,
- 0x1c3e7d02, 0xd6fe9863, 0xfeed1a32, 0x9bcb59ab, 0xcfa262a6, 0xcfa41ce2,
- 0xc59f4009, 0x9d1cfa06, 0x24e5c993, 0xb3e925c2, 0x934b2e9f, 0x1782caf3,
- 0xee1d3f60, 0xa3355fce, 0xf56ddcbd, 0x9e5abcf1, 0x072c9827, 0xa059e012,
- 0xaa3c7871, 0xc24cf04a, 0x39e19371, 0xa4493e1f, 0xf9c66ccf, 0xc97e3861,
- 0x59d858d6, 0x2f2fc031, 0x083583db, 0xacdf3f7f, 0x41a3fbe2, 0xa20eab70,
- 0xe46febaf, 0x2a509471, 0x4f1847d7, 0x67f181c9, 0x5faff189, 0xfaf0e9cb,
- 0xdd10b17f, 0x8451f802, 0x9f68b67f, 0x7944ddd5, 0xcc6bb354, 0x00ed1227,
- 0x13a9b719, 0x94f1cdef, 0xe605919b, 0x4bc634cc, 0xf1e26eea, 0x8b58eb9a,
- 0x7ebde119, 0x3a9fb18e, 0xcc13feb6, 0xc9bbab0b, 0xd3983cf8, 0x8b19671f,
- 0xc2a39671, 0xfc07d08f, 0xd3f4fc82, 0xdf0fc5cc, 0xf63bf40a, 0xe8156587,
- 0x6b32ba77, 0x504deec9, 0xf1b6f1ff, 0xd669ff54, 0x06f3bfb7, 0xa128efb6,
- 0xb9260d7a, 0xf9083ee1, 0xd54df511, 0xc114d78d, 0xa2ed47b8, 0x39f95bdf,
- 0xb04a6f02, 0xe7bc1663, 0xf28a389d, 0xbc846b21, 0xdbc85be9, 0xc9d0bdaf,
- 0xd66f5e53, 0x9358dfc9, 0x79f5efc2, 0x2bbffcad, 0x66b73f30, 0x3c57a894,
- 0x47eb1e3f, 0x1ee84f95, 0x8726de80, 0xce6ff886, 0x09da7ee5, 0x1c744aa5,
- 0x4326a5e0, 0xe7c4dff4, 0xd1e2b335, 0x9a8fe45c, 0x04cfde6a, 0xc5b5b4df,
- 0x2ce99ef8, 0x6d737bd5, 0x250f2869, 0x70e7b59f, 0xa9ed2cfd, 0x67fbe080,
- 0x8db6effc, 0x36037e80, 0x0c5387c5, 0xdf977e4c, 0xe7e0e55f, 0x22fe02cb,
- 0xd3bcfd0d, 0xb37f3cfd, 0x0722b6da, 0x0f5fe7f7, 0x5e712a5a, 0x51f95b2e,
- 0x27d26fb4, 0xf9449b4f, 0x466636be, 0xdf74073d, 0xf1787c41, 0x48690527,
- 0xdd230e6c, 0x259aba4b, 0x48c6bde9, 0xdbafc0d7, 0x5cbf3272, 0xdae81b5e,
- 0xc3d7403e, 0xd700b9d9, 0xa9ae098f, 0x3fd5fd46, 0x265f124e, 0xbc9c3b8f,
- 0x7944bfc0, 0x275f003a, 0x661e5ff1, 0x7f2315c5, 0xecfe4bb6, 0xa62c3842,
- 0x29b39d18, 0xe9e37c54, 0x7630ea60, 0x1f2f8486, 0x8315ae7d, 0x4d3fc2e1,
- 0xcffc7fda, 0x3cf2fb83, 0xf9df0ed1, 0x5c00c659, 0x242bfe30, 0x7871570f,
- 0x5c2d2bb6, 0x5c7b08be, 0xe59be7ee, 0xf38cbb7c, 0xe7e14aad, 0x77e8bdde,
- 0xf97bdf21, 0xcfb87ef8, 0x7ce41d63, 0xcf6f4242, 0x169c05b1, 0xee75db98,
- 0x1b9fa27a, 0xb469f858, 0x8339968f, 0x74f117ff, 0x843e14c4, 0xcc859cfb,
- 0xa057f14f, 0x1272778f, 0x6f090dec, 0x879a9e32, 0xf3c9a791, 0xc0cfc649,
- 0xbec8c5ba, 0xfad8e793, 0xf39e7be7, 0xc7897694, 0x6577f462, 0x8654ff08,
- 0x452147f0, 0xc99e7ea1, 0x2fb87c52, 0xc853ee98, 0x6cc998be, 0xd6679724,
- 0xe19e7d72, 0xd487f6e0, 0xc7f1c3fb, 0x92339715, 0x991b3357, 0xe7c73738,
- 0xd9874c95, 0x2c0ee7ac, 0xadf4da67, 0xd1ef900b, 0xe007d14e, 0x7ee0e99f,
- 0x215cecec, 0x24cf583f, 0xf5fdf3d4, 0x67be7a41, 0xe49fca12, 0x9e66c9fb,
- 0x8cd2fee3, 0x99ca8c73, 0xa12777be, 0xeda8cf9e, 0xcc728499, 0x737bf2e9,
- 0x1fdc0d27, 0xd1c78c98, 0x871926b9, 0x4cfa2f3b, 0x3f191fe6, 0xc706736b,
- 0xcfd238e2, 0x55894671, 0x1e154be1, 0x583fe897, 0x76ec7484, 0xff434d15,
- 0xe81b1cc5, 0xcbba1823, 0x1730bc15, 0x008fede9, 0xf10e6e0f, 0x75e3c055,
- 0x347bc135, 0xe1e12c3e, 0x2b878136, 0xfb3d5e85, 0x89f14ab0, 0x43e97f1a,
- 0xbd084218, 0xe2116662, 0x45e93f57, 0x59d8bf79, 0xf8a2a030, 0xb24e63fe,
- 0x5abf1633, 0xb195f14f, 0x4f4c4c5b, 0x22e393d0, 0x327b8629, 0x30cbec26,
- 0xcce82fd4, 0x617fbc35, 0x7b4328d7, 0xa1bc7479, 0x2a57a2fd, 0xb149f50c,
- 0x2ff78629, 0x50daab7e, 0x1ae7a4bf, 0x3f53fbc3, 0x83a86d98, 0xde70d0c6,
- 0xd8559f7b, 0x65f79836, 0xbe196ff1, 0x19b6cdff, 0xf3bc60af, 0xb2f7d8a4,
- 0x0a563fb0, 0xe9205878, 0x637cdcf6, 0xaa25517f, 0xf3df2154, 0x79eb05bf,
- 0xc034f6be, 0x0bbf625c, 0xc4f68ddb, 0x7ac62de2, 0x55a6f743, 0xe7071b28,
- 0xe354b479, 0x79c93955, 0x79ae351e, 0x50cfa426, 0xe317d82e, 0x62ec1110,
- 0x8e1b33cc, 0xfd6cbfcf, 0x2fe8d84b, 0xfefe7f5b, 0xf5d90b16, 0xaa7d5f8b,
- 0xac8f0a2a, 0xdbfae35e, 0xa7fae375, 0xdfd71a96, 0x7fae33e9, 0xfae364fa,
- 0xf5c6fdbb, 0x5c6b511f, 0x7180ccff, 0x8cebb3fd, 0x34139feb, 0x06c8ffae,
- 0xb7e7fae3, 0x70bbd718, 0x8b3d7199, 0xed0d4bc2, 0xe52febc9, 0xdb9af165,
- 0x7a01ef8c, 0x2074094d, 0xa04168e9, 0xc677f281, 0x30cf7fb4, 0xc03d324e,
- 0xf744d1be, 0x6e3ec1bf, 0xf4fe729b, 0xfc8ad201, 0xf1f4c9bd, 0xeec2b96f,
- 0xfa017414, 0x0ae513a8, 0x614f17d8, 0xd8563759, 0xeda181fb, 0x12bf290b,
- 0xbee279f5, 0x36b93878, 0x7d894f48, 0x0a7ab0f2, 0x81f6c9f7, 0xf470f27d,
- 0x8fb31c7b, 0x4f5a8ec8, 0x5d1ba57e, 0xf14f8e1c, 0xcfe825d1, 0xf2f4827d,
- 0x32700071, 0xe71c7bc0, 0x4f1fe303, 0x8dfe4099, 0xd928b36f, 0x1982ffed,
- 0xcdd83096, 0xe2ada898, 0x953a4f3d, 0x2f6f42f5, 0x858ffe14, 0xb95be0f6,
- 0x9851efdc, 0xc5e6e385, 0x7b14bce8, 0xdd79c46d, 0xe3027123, 0x4f782008,
- 0xca011c61, 0x9ba73b5e, 0xf8ae52d3, 0xc049ee99, 0x4ae9fce7, 0x6ab5d929,
- 0xcd3de50a, 0xfd2141bf, 0x787fff34, 0x2cfa1719, 0x4cb95cb9, 0xd85db3e2,
- 0x482edc03, 0x149ac3c7, 0x07416277, 0xc2e7cde1, 0xb8fe8675, 0xfc47ff1f,
- 0x0ae3cd7c, 0xd06726d3, 0x675a3331, 0x0f3c6f5c, 0x9c38daa5, 0x0d999b8f,
- 0x058139f1, 0x57bc0ec9, 0xfbc6d103, 0x7bfbcf1b, 0xd3fa1463, 0x7cfff554,
- 0xcfe306ad, 0x9ffa2e38, 0xfe863e1d, 0x63ebd566, 0xf41abfe8, 0xba14c0fc,
- 0xcebfa324, 0x87f87bf8, 0xc27e9178, 0xbb2f413c, 0xbdfb8c9d, 0x2386fc92,
- 0x98c09055, 0xe7e663bf, 0x16dabdc3, 0xe63c9fbe, 0x984d635b, 0x37666337,
- 0x32fbe79e, 0x6df3087b, 0xc7e4274f, 0x790cbac3, 0x3cb4e98f, 0xb657efae,
- 0x71865c36, 0x07115ea2, 0x7835a7bb, 0xaab7ae19, 0x0e2f28f9, 0x277257ef,
- 0x40f16f7c, 0x6a9f027b, 0xdd43068e, 0x5e1008ec, 0x10b6b556, 0x3dc598dd,
- 0x6ef3ab50, 0xfc130573, 0x148e1712, 0xcc2d86f6, 0x984c595c, 0xc1f7f0c6,
- 0x55f38674, 0xc4e9da5d, 0x0bd5a87d, 0x2bca25fd, 0xfa2590ad, 0xb6b4d7ad,
- 0x94abdc10, 0xb33b1060, 0x3b51668e, 0xe1aadbe0, 0xbe785459, 0x67922af5,
- 0x3e6de716, 0x3867f7bc, 0xdabd59fc, 0xc87289e7, 0x8728d23f, 0x5db56fee,
- 0xd95af88e, 0x93c68cba, 0x756f6b78, 0x8df58e5d, 0xbed15ead, 0xb54dda82,
- 0x71f482ba, 0x8621681a, 0xc7ad1671, 0xb03d7189, 0x03c61933, 0x045b5ecd,
- 0xe1eac899, 0x05a3b2f0, 0x5d731f44, 0x8df4061c, 0xbc7d03e5, 0xfb4cccb3,
- 0x1df3dbd3, 0xafedfef4, 0x0fd1798f, 0x1e01607c, 0xf98aff51, 0xf0238ff4,
- 0x88721167, 0xf200c169, 0x23572595, 0x6b86747f, 0xdfe3ad36, 0xbbc49fda,
- 0x1f28cec5, 0x20ef92d4, 0xe17cebbb, 0xf7148d0c, 0x27bc0950, 0x44f00fe3,
- 0x8176f472, 0xe2efdba7, 0xefdbbe7e, 0xcec79460, 0xd55fc196, 0x148d5d00,
- 0x059b25f2, 0x056f6ca8, 0xeb8252c1, 0x0b416365, 0x49d675e8, 0xbfd1db2f,
- 0xcd7eed83, 0x4deaa18d, 0x58599e35, 0xaa7c9c6e, 0xbef1d91d, 0x618b3f4b,
- 0xc474be89, 0x4581ab97, 0xc51fccf0, 0x1d783d8e, 0x89a7af35, 0x5e3ab4f6,
- 0xe9ed174f, 0xf7cf5e01, 0x20d4a93f, 0x97f14960, 0xd5c6477c, 0x2dbe51a3,
- 0xb27cfc60, 0x3b60e7b8, 0x91c2f213, 0xdb06ab31, 0xbe74626f, 0x50ffb640,
- 0x4e38e7b4, 0x394629ac, 0xfdf8c73c, 0x6c878156, 0xe9f689c7, 0xb6319f14,
- 0xa64af8cb, 0xf42bece3, 0xfdb1997d, 0xd3bdf141, 0x1d363671, 0xc71b47db,
- 0x53da20df, 0x08ef3c1d, 0x473f21c4, 0x15efda2f, 0xc76c76ed, 0xed8d4bf1,
- 0x2bf8c56b, 0x43ad0095, 0xf8899f3f, 0x9d568ee4, 0xf67cc9ea, 0x7e0cc8ea,
- 0x99f0598e, 0xf21b196b, 0x23abfcdc, 0x9a4cbe79, 0x0ff7e3de, 0x7b650f21,
- 0xb8a34b86, 0x10b5eaaf, 0x421d591d, 0xec3e088e, 0x8b4a2397, 0xf717138f,
- 0x3d197f97, 0x8e7ee103, 0xfe00acf2, 0xbef39e90, 0x04c16263, 0x98db9679,
- 0xef0982c2, 0x1864177b, 0xe53759ea, 0xccf7de1a, 0x77686519, 0xb4378e54,
- 0xf13e58b3, 0xae826dc7, 0x0c2aca52, 0x02bef2ed, 0x923e73a6, 0xf479b576,
- 0x677bc314, 0xa474f54b, 0xcb57d3ed, 0x367cabfb, 0x39b70b94, 0x1f236547,
- 0x3275a4fe, 0xef20df1a, 0x8974a26c, 0xc9474bd2, 0xa2eab656, 0xc7f54a5e,
- 0x7aaf3cdc, 0xd78fab65, 0x6f9d188b, 0x4b5ad95e, 0x2f81c7f5, 0x56d7fcb1,
- 0xa07b953f, 0xfb0e160e, 0x43aff411, 0x7fa13b3f, 0xfd023fac, 0xfa1db963,
- 0xd087f2c7, 0x856fb63f, 0x10feb17e, 0x83e585c0, 0x7f3e0fd0, 0xfac7fa00,
- 0xd500d6a6, 0xa3ad6bef, 0x20d686fa, 0x36b6f795, 0xadb5f3d0, 0xdd5f542d,
- 0x7bca8cba, 0xae54c35a, 0xed435d6c, 0x1f37a116, 0x1f1fe713, 0x7dcfc69b,
- 0x467faf27, 0xeed5f29e, 0x4504a8bb, 0x93c8655e, 0x7dc3c8c8, 0x2f31a775,
- 0x7047f110, 0x931e39d8, 0xe02b18e1, 0x2b62cf18, 0xa0dcb952, 0x2e98eb38,
- 0xc0a9dddf, 0xf85b23a7, 0xc57517ba, 0x81a73da9, 0xcc56378f, 0x2cc27993,
- 0xef1f8aaf, 0x957af7c5, 0x8f2a7558, 0x831f9337, 0x553f0179, 0x9133dce2,
- 0x7084c479, 0x7f419369, 0x1fb3cc27, 0x270e6e5f, 0x78286de2, 0x7c78f22b,
- 0xe4ecad43, 0xb6d49bf7, 0x52cd9147, 0xbf742dfc, 0x102ead89, 0x04f4a0d5,
- 0xc24f7482, 0x603373f8, 0xed720593, 0x874ce5dc, 0xb8d1af32, 0x505dcaff,
- 0xa8e7d861, 0xfd3236e7, 0xccb7e822, 0x194a4fb8, 0x3be19b7d, 0x0cf7de5b,
- 0xddcf16ed, 0xb7f9f686, 0x22f2ad14, 0x8a32473e, 0xafd7593b, 0xd7003960,
- 0x044d8ec2, 0xbde05057, 0xfdf9d157, 0xefc5fbed, 0xfffb0844, 0x2f812ec7,
- 0xd7da7adb, 0x6af3758e, 0x84d87a49, 0xa7e3e7fa, 0xee8e49d2, 0xfe44c47d,
- 0x622c71ed, 0x01337942, 0xbf6c48cc, 0x56726f00, 0x151ccae9, 0x574ce5eb,
- 0x8b117eb1, 0xec08bed3, 0x17a43aff, 0xb11fffb0, 0x7e4cfd43, 0xbf3cc7f0,
- 0xba7a4a2b, 0x8967ac44, 0xd16d07dc, 0x73c3fd92, 0x635fc8f3, 0x26d6073c,
- 0xdc2b01f6, 0xef18d955, 0xff261ded, 0xd6fde9ce, 0x1eb005d4, 0x2a3a9cce,
- 0xba0df3e5, 0xd173c869, 0xbf2abecb, 0x623d016c, 0x086597e4, 0x9fca99ff,
- 0xaabba167, 0xf89c7479, 0xe52ccda6, 0x728e9e98, 0x4beea06c, 0x3ba2bb27,
- 0x89a3f087, 0x1adbd9f5, 0x3900fca4, 0xd377ed4f, 0x560f985c, 0xf74ff222,
- 0xb53adeb5, 0x661f2126, 0x52dfa0b7, 0x3d45ab16, 0xf2665a5a, 0xaf27a845,
- 0x1f617fc1, 0x73c7eedd, 0xf3055afa, 0xc71e18a1, 0xbabce9da, 0x794a85f6,
- 0x4b91da40, 0xb0760e98, 0xe16511c3, 0x997e81b2, 0x477c83f0, 0x5ccff5c8,
- 0xad0f3435, 0x3e47da40, 0x33ecddd4, 0x74863f87, 0x998759f7, 0x6309e5b3,
- 0x9183a97b, 0x6a545b9e, 0xa60ae889, 0x3dd3b423, 0x850eda8b, 0x1c2115e5,
- 0x50f3bdd3, 0x254c1f8f, 0x52dd4dba, 0x7bc35745, 0x79f5c14c, 0x1e99c700,
- 0x1a49a0fd, 0xd35ef057, 0xb42ecb04, 0x5e22c71b, 0xa5d19c70, 0x6285c784,
- 0x575afbbe, 0x159e9e10, 0xefcb68f3, 0x87873bc7, 0xcf78fcf9, 0xe90ea2e9,
- 0x73ce31bf, 0x37f56543, 0x95c6def0, 0x74d85531, 0x33ae7c46, 0x53ce26c7,
- 0x3802e006, 0x5538459f, 0xbffc9b84, 0x8f4d7e7c, 0xc819bf70, 0xbb8a5ab1,
- 0xc4b1324e, 0xa32cabb8, 0xc8cdfe42, 0x4ebc1173, 0x0ba9dcef, 0xdfac4a2e,
- 0xb8fd7444, 0x761fe7c2, 0xcd2f3c6a, 0xfd13d7ef, 0x7ee4ebc4, 0x487f9c59,
- 0xdff6f011, 0xe79e36e3, 0x6c583a61, 0x0d2de60b, 0x25fe1fb7, 0xc4a4ca2e,
- 0x18e830fd, 0x649d5718, 0x759f3ca3, 0x7f4b8f32, 0x337cc743, 0x8afcf12c,
- 0x6ae9d04d, 0xe4af31a7, 0x77b71e5c, 0xe8f589ce, 0xc6be71b3, 0x567964b8,
- 0xdd17bdc2, 0x91ffe4f5, 0x6ebecc71, 0xfd2e0f9e, 0x09044b6b, 0x78e51df4,
- 0xa5587be8, 0xee92e5f8, 0xa17df96b, 0x7ba01e87, 0x1d2fa156, 0xbc5ea1e9,
- 0xe8c6bf5f, 0x3bb059ef, 0x1f452ab0, 0xc45a57ec, 0x48d7e4c2, 0xfb2527f6,
- 0x279b7e91, 0x563e69f6, 0x93ca718e, 0xa58f8beb, 0x88a67ee0, 0x91dbb9d1,
- 0x8f7493ea, 0xedfb5831, 0x2b294f78, 0x24f4f7e3, 0x0525fbaf, 0x3ef3c216,
- 0x1c63aeb9, 0x37d867e4, 0xbefff389, 0x7d7ecf17, 0x96f3f40c, 0x81ce18ae,
- 0x099ba5bc, 0xae81e38f, 0xac64ecb1, 0xa0fba876, 0x5f4df3b2, 0xa5c0bced,
- 0x68d7dfc6, 0x9639eaea, 0xcbbc93aa, 0xfa7807ca, 0xad3efd8e, 0x4e27f88c,
- 0x575107bf, 0xb841e1c4, 0x7ecbfcf1, 0x137327d3, 0xbe7c60e9, 0x7c83edc6,
- 0xbf9357d3, 0xceb84635, 0x7599bd94, 0x1c6b870d, 0x3d1bd5c2, 0xb7689591,
- 0x0ff6b848, 0xa5fabf73, 0x934f155f, 0x0120fa8d, 0x12ff534f, 0x865bf69e,
- 0xc0966786, 0xf28e4fcf, 0xb70642fb, 0xd5fd457f, 0xccf78461, 0x51ebb163,
- 0xb3bc023c, 0x53de7f08, 0x15331459, 0x14acebf8, 0xe42959f4, 0xa8ce8f8e,
- 0x17dbd0b0, 0xa75e7e53, 0x7743dbc6, 0x79e08a2e, 0x0b8bdf02, 0xe0dfd10a,
- 0x7ba56e3c, 0x7949429e, 0x2f5cb222, 0x573fb1e6, 0xab0f1219, 0x78bcb9f3,
- 0xd4f9b5f1, 0x596fbddf, 0xed83ca19, 0x5ef5df22, 0xef0da757, 0xc47895ef,
- 0x2194057a, 0x2a117c4b, 0x3f2b7bba, 0x218ee9b3, 0xf1f133c6, 0x1be24bee,
- 0x572c9e39, 0xbb7297e4, 0xe2aa3954, 0x2efb8739, 0xd9ee937d, 0x2352afba,
- 0xe337fb99, 0xfe127d7f, 0x7d666fa3, 0x8acbd39d, 0x2aa3f47c, 0x037da5ef,
- 0xfe2521ea, 0x9fc432b3, 0x0ff34825, 0x3b656dbd, 0x6a78c195, 0xe72bf414,
- 0xe12fe727, 0x80b65e76, 0x7ae099ce, 0xc5abe020, 0xa44f52fb, 0xf387bd0b,
- 0xa9713d22, 0x4c89cfc6, 0xef5d68d2, 0xce853c9b, 0xf917ecf5, 0x2c4bf114,
- 0xc32efe43, 0xf76f0233, 0x0c45fb9e, 0x770da9ef, 0x055ee99b, 0xbf78f5a6,
- 0x7a147409, 0x9a7ba35d, 0xf16a9e81, 0x5f17a8de, 0xfb869b3b, 0x66f903e8,
- 0x3e40fb82, 0xe77d8797, 0x6f3a5ad4, 0x2e67daf3, 0x9ffe8135, 0x5d8557ce,
- 0x39e18b12, 0xfc158d9a, 0xd84380dc, 0x4ed78e01, 0x8efc2035, 0x2ab3dfc9,
- 0x4f767a09, 0xdbbe95be, 0xf9678968, 0x48740e30, 0xdfee5165, 0xfaeffba1,
- 0x38ba0bcf, 0x8f2b9a1e, 0xe8e09d92, 0x3de2f7e6, 0x5c13af9d, 0x7841f708,
- 0x33ee329e, 0x57b63d57, 0x06a378c2, 0xf8ba77be, 0xe24ca67a, 0x77dcd0f7,
- 0x785dcfe9, 0xc2ba5eef, 0x4be8fee8, 0x3906cc0a, 0xd7c45f5e, 0x1632b3a1,
- 0xe5fc851b, 0xb6af28d2, 0x0d9b7a33, 0x17c97aed, 0xf4a947a3, 0x97916add,
- 0x29727abe, 0xe3a499f1, 0x57c105b9, 0x02366e91, 0x76493c5f, 0x9d302c4e,
- 0xc72ae3f0, 0x95c7e3fc, 0x3fb71d58, 0x2fd42e2c, 0x819326a5, 0x380b8adf,
- 0xec12c8dd, 0xee77b252, 0xf52cb410, 0xc6c45b7d, 0xb7ffbf30, 0xa3a566e7,
- 0x2796da5d, 0x0dbf3ced, 0x095db5f1, 0x38e117a4, 0xbed2a5ff, 0x4ed7c5ef,
- 0x6db8e034, 0xdbe42f30, 0x58ea7e7b, 0x9b49f68f, 0x2a7c4fcb, 0x26dcd7c7,
- 0x9fbc1eb1, 0x2bff16fd, 0xbb8c6fe8, 0x47447df9, 0x43635ca6, 0x4cf787fa,
- 0xad7bcfd8, 0xd81a0bf8, 0x07f90d4f, 0xcf66ba16, 0xb116efc0, 0x6bb42ae0,
- 0x86a87517, 0x11ee177b, 0xb9e162f2, 0xd84d1614, 0xfc9c2c1f, 0xb6373a2d,
- 0x8b1a5cf0, 0x8f75bd82, 0x8f7d1738, 0x0ac45738, 0x443b3d38, 0xdfe515e9,
- 0x24bef7cc, 0xcbf995bd, 0xdf39f3bd, 0xb7e802b3, 0x14b1e949, 0xa71b10c1,
- 0xebce9efc, 0x62ead7bc, 0x780492d1, 0x0473cd07, 0xd38c268b, 0x1b39deff,
- 0x7d6067cd, 0xcc0efd39, 0x787841d7, 0x784bd5af, 0x91ffbe1c, 0xf40e3c06,
- 0xc780ca3f, 0xd097fdc1, 0x96b5eff8, 0x13ff497e, 0x375fe986, 0xefd03fc0,
- 0xf7b71a68, 0xa71ffcc8, 0x891ddf02, 0xf9edbdde, 0x331dfc4a, 0x017efd2b,
- 0xa6aceaeb, 0x338f8473, 0xe77a673a, 0x83bddfe1, 0xee779af9, 0x8c70f8b2,
- 0xae7c733e, 0x333d086f, 0x78c2b99a, 0xc72c979f, 0x7cd31417, 0xb4db9f92,
- 0x32f30b88, 0x0b9aa56d, 0xc6d11f7f, 0x71bb9162, 0x236952df, 0x42e32739,
- 0xe53fdfb2, 0x3b3cf2a9, 0x6f7907c9, 0x979112cb, 0x5e08ec57, 0xd4beeb81,
- 0x13fcded4, 0xd283f6f8, 0xcc373a36, 0x33f69b98, 0xe1a36e32, 0xa315bf2f,
- 0xf0c0305f, 0x7a13cdfc, 0xf77e0670, 0x18271043, 0xd82f51c7, 0xb5bcc41d,
- 0xb8df620d, 0xfaf31ec1, 0x0de6e412, 0xb357da05, 0xe893da17, 0xf72fd276,
- 0xdedd3946, 0x227e8050, 0xfcc4f635, 0xedb7cc22, 0xe976f711, 0x55778a16,
- 0xfefba31e, 0xff7dcbe6, 0x8ebce49a, 0xd1da377c, 0x8694ce32, 0x6b687779,
- 0xbef7e46c, 0x7af2a7d5, 0xa08c7ffd, 0xffb01073, 0xf9432d49, 0xfee55fb2,
- 0x3adf1120, 0x67d0e62b, 0xf94058db, 0xe41f22f7, 0x744bf76c, 0x35d9e4af,
- 0xebfd6863, 0xdd3ef85f, 0x973fb70b, 0x43beb1eb, 0xf563d24e, 0xde155f56,
- 0x973fb85b, 0x5a9dabdb, 0xc122ad34, 0x579d5d7e, 0xe3f9f24d, 0x894c6335,
- 0x0aef8ff1, 0xfa51b079, 0x1578b537, 0xb8f9679e, 0xd233fa32, 0xf4ec07a3,
- 0x83a6513b, 0xfba2abbf, 0xe95a372d, 0x0faa0177, 0x875cdb8e, 0x8d34c3fb,
- 0x074d91f2, 0x99cf04ed, 0x2d7efc75, 0x5de516fc, 0xf491c1d3, 0xa9f556fb,
- 0x6c27ca71, 0x6095f14a, 0xe0feafdc, 0xffd7ba54, 0x057186e4, 0x644eff28,
- 0xaf7405ea, 0xf43c06f1, 0x6343f509, 0x17b5f3f9, 0xe75579d3, 0x50e0d4c6,
- 0x8fa9107e, 0xdaefef92, 0xdbde5b9f, 0x6719f885, 0x7dfca1f3, 0xb8f1f14f,
- 0xd9f12bef, 0xf915c53a, 0xd77ad2f9, 0xbc7dfa69, 0x6f778efa, 0x43dcf462,
- 0x73f72b74, 0x0bd5e3c9, 0x12dfb57d, 0x4bbf8b9f, 0xe052bf56, 0x624efc5d,
- 0x7e9ca9bd, 0x7de24faf, 0xb73dd263, 0x24fd084f, 0x69af93cb, 0xd8205f74,
- 0x1fe80511, 0x4ad8793a, 0xe3727674, 0x2eaf3a04, 0x9c07abc7, 0x4f2f80b8,
- 0x7113e864, 0xf2169f5d, 0xdf257eaa, 0x30e7caa7, 0xe77f0c7e, 0xa5487e30,
- 0x573eaebc, 0x92e30d3c, 0xef593df2, 0x4ab74f9f, 0x3fb597bf, 0x6f5a45f7,
- 0xf48627a0, 0x49c17a64, 0x82f43a7a, 0xe7ae1775, 0x8c2c95c7, 0x5bf0cffe,
- 0x8ec7f7fc, 0xfd92b21c, 0xef9c77dc, 0x9fdd076f, 0xe01788e6, 0x17a1a1fd,
- 0xeb1c9246, 0xe0ef8fcb, 0xdf9db6f6, 0xf046f8ab, 0xcdff1162, 0xe611f711,
- 0xf9f79da0, 0xed147b21, 0x9e29283c, 0xbdbb6610, 0x2a70fcf4, 0x940fff43,
- 0xca63cf14, 0xeefd2f01, 0xdf176b0c, 0x3bfbe8c7, 0x5163820b, 0x0c59df2e,
- 0xfbf03bfd, 0xe7def453, 0x521782a3, 0x5cb18a0c, 0xf52e5e31, 0xa42c1f9b,
- 0x5e67cae7, 0xed2a7bfe, 0xed463da2, 0x62f06b21, 0x9cafc79a, 0xf310e76c,
- 0xeef7aafc, 0x41182d1d, 0x06e128eb, 0x98dd933c, 0x05b3978c, 0x3b7bf126,
- 0x74c91f1c, 0x5f9ebfd9, 0x5ffa1f02, 0x21f6bce8, 0xc6eb8ebe, 0x591cb859,
- 0x63ec813b, 0xc86296d6, 0x88715b9f, 0xbbd0c3df, 0xdceb49c1, 0x843ff667,
- 0x86fe28fa, 0xed7bf199, 0x3f4022c0, 0xf9a1e4c5, 0x4f9123e5, 0xcf2d64bf,
- 0xd1e8fbb3, 0x39e60bf7, 0xfbf25cf0, 0xd2c97386, 0xa49d085f, 0xe1c63fa1,
- 0x97e6d3d3, 0x255d7b71, 0x7aeff8f8, 0xf4eb346d, 0x0de16bbb, 0x711fc8e3,
- 0xa41fa2d2, 0x775b87df, 0x4ae90981, 0x77f2172d, 0xbc9f9f13, 0x7bde4b2f,
- 0x8a9f8c95, 0x7f9262c2, 0x33782c4d, 0x827ea1de, 0x61c259e9, 0x91fff746,
- 0x6984df7b, 0xe29f9a88, 0xf485acb6, 0xf67be0c2, 0xbbf25f2c, 0xf5bbff6c,
- 0xf42b5c2a, 0xd38795ef, 0xe07eaf50, 0x3a46fbe2, 0x5d7a8f7d, 0xc36cf3e5,
- 0x83af300f, 0x4e14a605, 0x879276a2, 0x5dbc281f, 0xf341b9ca, 0x7ec17a47,
- 0xebe83df0, 0x7fee429a, 0x710cae63, 0xc16c2bc8, 0x9fa94d3c, 0xc2668a42,
- 0xdca7f373, 0xe3d21a7a, 0x19c5b5a9, 0xc60f302b, 0xda2567ad, 0x8ca3a1d5,
- 0x78eb8bce, 0xf8c5f8df, 0x89fe32fb, 0x9fdbf7e8, 0x243bf461, 0xb4e4f3c3,
- 0xed89b99e, 0x9eb90a73, 0xf6285f9b, 0xe44c8599, 0x8de32657, 0xb162fbaf,
- 0xf0796aee, 0x2687da72, 0x4b78b7bd, 0xa487a0ba, 0x7fcfede7, 0x1ea0f297,
- 0x4913de36, 0xef185fb1, 0x612c5783, 0x532c42e5, 0x61bdaf28, 0x0d739713,
- 0xb92cceaf, 0x5ee9d78b, 0xef3b6985, 0x59e3dfa1, 0x12caebb0, 0xad16e5c6,
- 0x05c7a483, 0xb412fe05, 0x555be2e3, 0xc345f217, 0x51e09516, 0x56d73ecd,
- 0x8b7a7e78, 0x06bbec99, 0xbce9cf01, 0x9bad1a60, 0xe00b34c6, 0x4c16b4fd,
- 0xab75fb23, 0xa4a1ddf8, 0x6f7bc079, 0xe043156a, 0x99d7c6c7, 0x043c53f0,
- 0xa261bc1d, 0xbf805a75, 0xec70890b, 0x79bfc854, 0x33783f3a, 0xfec14fd6,
- 0xabe9cf1f, 0x7dd0969b, 0x3ef8c936, 0x7d120b6d, 0x91f3162e, 0x471e4cf8,
- 0xf10fb3e2, 0x2bd2f689, 0x85ef4853, 0x6291541e, 0x9fdd8bd9, 0xda7cbed2,
- 0x99df2ba1, 0x8137cf8a, 0x9ea3a2dd, 0x6fb3f51d, 0xa35b9ea3, 0xfe7a8c52,
- 0x962ce2a1, 0xff71afa4, 0x39f5ff39, 0x3d04a669, 0xf1c83ea9, 0xa4e73aa4,
- 0xcff7a42d, 0xebe91d7e, 0xaacbee84, 0xf1ef7848, 0x83b8fec2, 0x37eabd61,
- 0xc70d21fd, 0xa66f5869, 0xe975c979, 0x7ca079c3, 0x208d4f6b, 0xc5e5f7fd,
- 0x4eb95fa9, 0xeb8df4fd, 0xde09e920, 0x9742affb, 0xf24c2718, 0xb05154e8,
- 0x9d0437ca, 0xc6007a2c, 0x38b6fec5, 0xf3d2fee9, 0xa3e2571d, 0x5f05bd7f,
- 0x71d71d53, 0xd1f91147, 0x74686f4d, 0xfb44e0c8, 0xc7c80555, 0xd02fa0f5,
- 0xc4af516f, 0x647ba1df, 0x1ee9590f, 0xb74dfae6, 0xd5e38cdc, 0xb09ab71f,
- 0xabe1d4fd, 0x495c1226, 0x2a780aeb, 0xfbde8a18, 0xbe05e28a, 0x3d81fc04,
- 0xe72dfc41, 0xc1370093, 0xba29fb01, 0x08afa88f, 0x0e4133d6, 0xedc859bc,
- 0x65df782e, 0xf7e6fa21, 0xd7cff561, 0x575f3495, 0x64df7cd2, 0x17ccdf44,
- 0x782cf9a4, 0xe7c26adb, 0xb6bfee0b, 0xa73df704, 0x79a74dd6, 0x4ff0ca9d,
- 0x553afe44, 0x210ffd00, 0x0fdc172f, 0x0bfb2cf3, 0xd3b76cf3, 0xcf3b0d7a,
- 0xf60ef88c, 0x89f9e617, 0xcf3943a0, 0x0d3d34f7, 0xcf43f9f3, 0x2efd0caa,
- 0xc6c0fca1, 0xbf4ebca9, 0x73321713, 0x4a6d087a, 0xdedf7ca5, 0x04fbfe6e,
- 0x78c00e52, 0x260f49e7, 0xd55387a2, 0xd0c9e6fa, 0x3f07ffdf, 0x00b71737,
- 0x0000b717, 0x00088b1f, 0x00000000, 0x7dcdff00, 0xd554780b, 0x733effb5,
- 0x64932666, 0x08124c92, 0x3c984081, 0x49849009, 0x768a8802, 0x02d10478,
- 0x89794f0d, 0x42100793, 0x69b5a05e, 0x0240cd6b, 0x350d45a2, 0x01d45a2a,
- 0x0da2a281, 0xbc150a0a, 0x45622a03, 0xb68b57c5, 0x514026e5, 0x5ea0c679,
- 0xffd7b5ae, 0x4e7dadfa, 0x5490ce72, 0x7dfbdedb, 0xdf1f7cff, 0x5afd9f66,
- 0xd7b5ed7b, 0xe7bdaf5e, 0xcbaa3dc2, 0xe4e216ef, 0xff6f05dd, 0xf7a517bc,
- 0xfc812eaa, 0x02cddf58, 0x2cc38ff9, 0x934a14fe, 0xfe70a68b, 0xa56b9b65,
- 0xa689c422, 0xa141fdbf, 0x367d85fc, 0x0bf2a0b7, 0xcef74529, 0xea8f7e46,
- 0x2fed415a, 0x259f680c, 0xd8b97386, 0x5deae544, 0x2e509a23, 0xcae61e1e,
- 0x0df2f2a0, 0x09644261, 0x6856fbfe, 0x3be9237f, 0x6ba50b52, 0x34786f55,
- 0xfd2f4da5, 0x6fa5c944, 0x578f3756, 0xf724abf3, 0xb97e34dd, 0xf9ed5855,
- 0x399edca8, 0xf388472d, 0x03136d7b, 0x233f32d9, 0xb03fcb87, 0x5d8d7952,
- 0x7bf85af8, 0x0e6a7dad, 0xd2b6e28f, 0x0fa98b38, 0xfae94a91, 0xea60ef18,
- 0x938d317f, 0x8b7de342, 0x0e5c1fbf, 0xe7cb0edf, 0x9fa88a0b, 0xf6626d7b,
- 0xf55dd973, 0x4ddb34f4, 0x6d46f61d, 0x553b577b, 0x3cc62588, 0x2422a1e1,
- 0x477914bf, 0xa432be57, 0x7ca42abc, 0xa7cfbf47, 0x1f2f3914, 0x1fc6037f,
- 0x12c785d1, 0x2c3b8f0d, 0x72eb1df4, 0x44d8128f, 0xbab12dff, 0xfe09cbed,
- 0x9b88c6e7, 0x55a94fd1, 0x47cdae38, 0x5f9e0ebd, 0x86345589, 0x87bbe3e5,
- 0xa6f3e9eb, 0xe2c31afe, 0xced11f28, 0x89bd53e3, 0xda9e4376, 0xab9ae09b,
- 0x4ba7a3c1, 0x4ebb85b7, 0x7fa0f3ea, 0xa6b12d55, 0xbeeecd7c, 0x7c0693ae,
- 0xfa7fc52f, 0x3449bfd2, 0x8bc091fb, 0xdddaabfc, 0xe892307a, 0xcdf14da7,
- 0x17bf5375, 0x9d1d7e0e, 0x1f75e6ed, 0x9beb4459, 0x7f3ea6af, 0xbedbe698,
- 0x426cf547, 0xfdd86fbf, 0xe7fd0c4a, 0x6e0f8b9b, 0x2fd5efa7, 0xddbb89a7,
- 0xc50ff35d, 0x17dd85e3, 0xfbe953a3, 0x08f643f9, 0xb9e68c31, 0x9dac7e37,
- 0x5c4762e8, 0xaf78b77b, 0xef5dd6d5, 0x35b5bcef, 0xdea1aa7a, 0x3d1ad179,
- 0x252f40cd, 0x8bd62fbb, 0x5f9fc65c, 0xefc2efcd, 0xc7bd7d89, 0xf4bdf4aa,
- 0x5ac6fdf7, 0xc35fbd28, 0xf736bddb, 0x6e096b13, 0xcf309eff, 0xfcfb8216,
- 0x682db31b, 0x728dfe7f, 0xcbcdbd2d, 0x244f5fe9, 0x8b68fc63, 0xbc7d77a4,
- 0xf70b6e35, 0x7b696a53, 0x8fda1aec, 0x93ebbfa5, 0x55d290d2, 0xcfc96a5e,
- 0xb76f3ac4, 0xdc002fb5, 0x46b115e7, 0xe7eaeb89, 0xdb9bb77e, 0xc9f3df9f,
- 0x270fe063, 0x4f901dee, 0x41f94d64, 0x3f2a3eb9, 0xa64151ff, 0x7f3dfa8b,
- 0xf80f5f4f, 0xfb9b76c2, 0xbbb7a636, 0xf7d83ca2, 0xa7187f7a, 0xb9effd16,
- 0xe90fa462, 0xca5e1247, 0xb8e4fae7, 0xabbf257e, 0xfb8b68df, 0x17e0d1be,
- 0xb679b7bf, 0xeffcb250, 0x31627d69, 0x6ee7ec3f, 0x4d74a1d6, 0xf89b7098,
- 0xbb864ac9, 0x22ec7844, 0x030b964d, 0x228995bd, 0xc9b0befe, 0xf1117dfc,
- 0xbc5f7c09, 0x5cbdfcde, 0xa56ead34, 0x98092df9, 0x3fda0bbf, 0x6bec04fe,
- 0x5ec5affd, 0xff7a0514, 0x4f54cc36, 0x40fe35f8, 0x255f091f, 0xc7c11fb4,
- 0x8102519e, 0xbbdf5895, 0xe8679e54, 0xa775debc, 0x2287bb70, 0xab4167ff,
- 0x5b8f5d61, 0xf0913584, 0x1c2edc7a, 0x47e8f989, 0xf5fa8508, 0x6c0b0bef,
- 0x0f7726a1, 0x04b92afe, 0xc231af7c, 0x5bbe0822, 0x427ce05d, 0x49f7d8ac,
- 0x5d29fa32, 0x356de853, 0xe6c177c0, 0xd4e7e87d, 0xe25dfdbe, 0xf8ef54d7,
- 0x58d340d8, 0xddf8eb82, 0x1ba1a7aa, 0xb1af4eb8, 0xe1014088, 0x02212ee3,
- 0xf27f94f1, 0x5a62e493, 0x7cb6cf97, 0xa4625c92, 0x36cc457f, 0x129ea3d2,
- 0xef3ebc24, 0xfcd2ef61, 0x305cd38e, 0x5d7ad09f, 0x57866beb, 0x2ab867c7,
- 0x87a3bb6a, 0x54f740cf, 0x5577f59e, 0xf7efb178, 0xcfa002d9, 0x3c5f8dc8,
- 0xb9f40f38, 0xa14ae6d7, 0xa9f66bb1, 0x2de6955e, 0x0c81a8f6, 0x4f7b639c,
- 0x9ce05744, 0x52e053d9, 0xd9aef30d, 0x7ce116a3, 0x62fbbf39, 0xf7366942,
- 0x44edc533, 0xa03c07fb, 0xf8e09edf, 0xf7fd5b9b, 0xccd79ff8, 0x7f65ceff,
- 0xf8037090, 0x9953c6c0, 0x07c4efe0, 0x79a6e6ff, 0xc6fccf9e, 0xad10bc8e,
- 0x047f4123, 0xe09bb45f, 0x2af810bc, 0x8e5c105a, 0xd037aabd, 0xa5443acd,
- 0xff4f19d8, 0x09137c32, 0xf82e84be, 0x94a89e08, 0x56705d11, 0x38eddbbb,
- 0xa2382f67, 0x694fc173, 0x12f29342, 0x0dc9f3b4, 0x493a5afe, 0xbf1d7f10,
- 0x70cae167, 0x8a4d0bc8, 0x97be8c27, 0xdcf955ed, 0x191bdb2a, 0x3f1816c0,
- 0xd2f9c9c8, 0x1840d547, 0xa3a827e5, 0xea9b30ff, 0xf547be2f, 0xfb148ed0,
- 0x07f6de37, 0x8375d61b, 0x881ca04d, 0x344b5c03, 0xfdec96b8, 0x8d0a67e1,
- 0x2ddf78b3, 0xe2ce3007, 0x170f1407, 0xe8edf3a8, 0x8a43c977, 0x67405788,
- 0x3a46e3b6, 0xce64d85b, 0x0bad122f, 0xc1fc1554, 0x9ef09dfc, 0x96fd415c,
- 0xdd730eef, 0xa102f18d, 0x4abc42e5, 0xff68d4f0, 0x3b58cacf, 0xcfd500f7,
- 0x3c6768da, 0xe2170f9c, 0xb5abf608, 0x0d204135, 0x1d6326b6, 0x6bf9efcf,
- 0xe6b1d632, 0x2995ff77, 0xcebf59ba, 0xaa37b4fd, 0x2148a9d7, 0xc4514eae,
- 0xfba0a9b7, 0xba8a538b, 0x201b6fa9, 0x9b36c5f7, 0xca879fbf, 0x8afc6c0f,
- 0x3d85c1f9, 0x6eae81be, 0xbf6bb7e8, 0x892dc3fa, 0x02e18348, 0xd716f0e9,
- 0x2427d80d, 0x35be620f, 0xdb4f569b, 0x97f0f944, 0x44b9386c, 0xf793864d,
- 0x42c7f60b, 0x3efabf43, 0x80be06d9, 0x0fe48474, 0x1cf201b6, 0x7b9daa3d,
- 0x4c77f6db, 0x9e06e8c9, 0xd71b6ea3, 0x5c100db3, 0xd17759ef, 0x845bd05c,
- 0xbe0d1fc5, 0x775ebc2d, 0xa2b3c22f, 0x5d4357d1, 0x89ef82ad, 0xfed389a2,
- 0x47d385a2, 0x5139f085, 0xc05c785f, 0x620310b3, 0xf3de3f81, 0xccbecf8c,
- 0xadac7f37, 0xe8357821, 0x022b1fc4, 0xeaffe85b, 0x63508746, 0x46b5ed20,
- 0xce5451a3, 0x5206fbb4, 0xd503edb9, 0x1dde40a2, 0xc7a7afa0, 0xa5f7fd85,
- 0x718c4673, 0x2e1ff6fd, 0xaafe8899, 0x8c22a976, 0xd3b73bfa, 0xd44e8226,
- 0x7c02fda7, 0x9ec2fe1c, 0x57b4be02, 0x6b979011, 0x35ef8f28, 0x80deada3,
- 0xa4772f01, 0xd50f18d8, 0x9cf29929, 0x4d0f7ab6, 0xbbcf7ea2, 0xeb90ec4a,
- 0x26cb876d, 0x25b1bce2, 0xd701d896, 0x1d63c3fe, 0x58b75829, 0xd59bef2b,
- 0x8f71f57b, 0x547fd0b8, 0x6f0c07ec, 0x4445b663, 0xa3c6bb53, 0xa6460161,
- 0x30a1f243, 0xc18daf0d, 0x37770ef1, 0xe317e3eb, 0x2a28c493, 0x044ea9df,
- 0x255ce79c, 0x2da3ac2e, 0x0aeee896, 0x4ef36f6a, 0x0dfa9abc, 0x078b8d8b,
- 0xd2526b7c, 0x8abf3d78, 0xbf3865ad, 0xcfc2f122, 0x08b6b7fb, 0x65fec6f9,
- 0xe9bee32a, 0x35cfde21, 0xd47e390e, 0x51cf953e, 0xe343a571, 0x4abbefdb,
- 0xc9059c69, 0x0f4147be, 0xc46dfc9d, 0x39fc02ab, 0xf96162cb, 0xe735f1ac,
- 0xbd21f05a, 0xa48a6cfc, 0x563d7d37, 0xc0bab2f5, 0xe8d51e3f, 0xc8ab9573,
- 0xa7986f91, 0xc345b7ad, 0x78039157, 0x3a2f61da, 0x9da5f384, 0x3cdce3be,
- 0x01cd3890, 0x59ea853d, 0x7aa9cfd0, 0xf47631da, 0xfc01c33b, 0x3255ea2e,
- 0x8b16ed01, 0x6fd8d5da, 0x22abd78b, 0x8dabe068, 0xa2ee3936, 0x1822114b,
- 0x22ee35df, 0x7f6e3f5a, 0xe9045a29, 0x970c6ddc, 0xe4a31af5, 0x452379fe,
- 0xb50f9fee, 0xf969b4f9, 0x7baf997f, 0x5cfd7ccc, 0x96b61fec, 0x46fefe48,
- 0xa55bf50a, 0xa1484eb4, 0xec4eadf9, 0xa7bf03b0, 0x98225dea, 0xb2f78069,
- 0xa08bc679, 0x7acedc61, 0x9f3c1c97, 0x3370f5ae, 0x75f35dfc, 0xd54e1e32,
- 0x7b1e3227, 0x84c93fee, 0x53feaec7, 0xbcbc784d, 0xff1e4cff, 0x77d67d54,
- 0x87b43ff4, 0xf826ddfe, 0xe74dfabb, 0xdf6ec571, 0x3b97f040, 0x980fc45d,
- 0x505f886e, 0x3df15db9, 0xdbe4357e, 0xe404c28f, 0x7dc2afbe, 0xfe065c83,
- 0x65ce0df0, 0x4982e7d6, 0xd17eb7e0, 0xeecd69cc, 0xded0138b, 0xfb017c4d,
- 0x11af2f49, 0xf8270ced, 0x3583e885, 0x7cb9bab4, 0xf4106fb7, 0x4e8e1d22,
- 0xa775e679, 0x20fb42df, 0xa3dfdf6c, 0x582f8fb1, 0x2878a7db, 0x46abd96d,
- 0x7c75f71a, 0xb1a7a4aa, 0x6dc8be03, 0x68d5efa5, 0xb6ef7c47, 0x020e9797,
- 0xdac3aa7e, 0x81f70173, 0x8b96ff97, 0x7dabe473, 0xfbe2074b, 0xf2cfaa36,
- 0xdabdbd87, 0x5d2fe863, 0x5196fcb8, 0x5533a95d, 0xefd0ffee, 0xf435f6db,
- 0x89db1c03, 0x0b9209bd, 0x0a2aebab, 0x0d83c64e, 0xff07ef79, 0x58b7c412,
- 0x5d3dda1f, 0xeef9031d, 0x28799243, 0x948f7c3f, 0x100f861b, 0xe40a4b01,
- 0x83670fbe, 0xb08f544f, 0x470aa0bb, 0x79611de4, 0xc384623b, 0xbffd50a5,
- 0xdbe397f7, 0xa1a42f21, 0xe83c617d, 0xf164caf9, 0x1b6dc9ef, 0xf04ff642,
- 0x34edeabd, 0x3ada955e, 0xfbc29c65, 0x7c153f28, 0xde2fadfb, 0x04d7f4d6,
- 0x9c01cfac, 0xfbcf046b, 0xa9653f5a, 0xcf502bbe, 0xe7bd68e7, 0x7337aa0a,
- 0xe864df56, 0x7e9ac547, 0x52112626, 0x947e64ae, 0xe5c83b03, 0x83d4b6e7,
- 0x3e7c525e, 0xd7dfc1e0, 0x35f9c1e1, 0xfa384934, 0x62455e12, 0x8d1a7520,
- 0xa2d5213b, 0xc0140fdb, 0xe39b443d, 0x965afd33, 0x1939f2c7, 0xfd1d0388,
- 0x4b4ee3b3, 0xfc064ef5, 0x670ffd7a, 0x75f1cbcd, 0xf2e86e73, 0xb2deddbc,
- 0x57b6700c, 0xfc7f5939, 0xeed44500, 0x41615989, 0x8a7a1a35, 0xf9f40d73,
- 0x0e0b91a3, 0x8c7c26f6, 0x43cdf28f, 0xbc6fb3ff, 0xcaf2357e, 0x72f77881,
- 0xeb7173d0, 0xc2efd648, 0xbca3377d, 0x981be1d2, 0x373c5340, 0x062837c0,
- 0xbc517c87, 0x9451275c, 0xfc2dd453, 0x45629e12, 0xe518df8e, 0xc53f472b,
- 0x5d8f2396, 0x23d66f81, 0x8d106f6c, 0x739fb9bd, 0x9dde5176, 0xe1e3affc,
- 0xfd8f3698, 0x25ed7136, 0x75fedf9a, 0x82069e31, 0x1bceecb7, 0xba50d417,
- 0x109452d0, 0xdf704d54, 0x575d4a96, 0xf6078bfa, 0x9cee7925, 0x4ebb834d,
- 0xcaeadb83, 0x50e7ee34, 0x79741bb8, 0xeb0a17e5, 0xf947fc83, 0x81b1fd17,
- 0x661f29bb, 0x74c1f8b9, 0x16395b9c, 0x886c93b6, 0x7a45bda0, 0x9c1a3be4,
- 0x73bed28f, 0x35f7f1c4, 0x21189ef8, 0x683e27db, 0x507ec009, 0xaf0f7634,
- 0x4ed513d3, 0x15634a8f, 0x721db70b, 0xb2f0a950, 0x31d7fcfe, 0x4ad7db7f,
- 0xf54e6af3, 0xddbc07dd, 0xfe496f1c, 0x0df0e180, 0x6b790a9d, 0x3fc79cb4,
- 0x1e793790, 0xc6cb84d3, 0x6b589942, 0xcd31fc81, 0x50b2a725, 0xba63fb91,
- 0xde30daf0, 0x7ae3e14c, 0x7bb79def, 0x07bc4fa1, 0xbee0885d, 0xfbf9f851,
- 0x74e0111c, 0xfd72089e, 0xe72b449b, 0x7d137c75, 0xd24bdcdc, 0x547d27c7,
- 0xc056ffc6, 0xee646efd, 0xc79a3a80, 0x650687d4, 0xdcd57eb0, 0x5df3f19b,
- 0xe8d54f70, 0x6feff686, 0xbfd88eb1, 0x699fa833, 0xdfec8437, 0xf5f5bdf1,
- 0xfb47f8cc, 0xa74748e8, 0x4bece4fe, 0x5b4eb878, 0x4bbcebd2, 0xc2127f59,
- 0x8fd1f2c5, 0xa85b7db4, 0x5a2f453a, 0xd26e239f, 0x89960897, 0x62259663,
- 0x10afafbe, 0x3ad0156f, 0x15463ebe, 0xcbd35c0d, 0xfd68a6ed, 0xe13fe94d,
- 0x0f5bd833, 0xe1af608b, 0x92f0aed4, 0x8218d5ef, 0xbdba8a7b, 0x27ca9631,
- 0xc6bd27f6, 0xefaeefc0, 0xfd0ac3b5, 0xf37486ee, 0x66d949f7, 0x68b267fd,
- 0x94fe3152, 0xfb7ae6e1, 0xb3a22781, 0xfea553f7, 0x21888622, 0x0b7af2df,
- 0x1d3e718b, 0x2fda2e93, 0x69111c10, 0x2eb18fbe, 0x75ff27cb, 0x5615c601,
- 0xb7ea9f39, 0x1debb655, 0x288b7927, 0xa7ac9c50, 0xca532293, 0x4a7fc825,
- 0xd3d203f2, 0xcf4e6ef5, 0xf28f79d2, 0xce1ef5f3, 0x9d15a417, 0x6b25bf40,
- 0x7d62b73e, 0xf8970cee, 0x9a696f2e, 0x762ab000, 0x5a441cb7, 0x809a2c16,
- 0xa1d17663, 0xef4883da, 0x6adbd4ec, 0xe1d3be59, 0xd3a345be, 0xfda6a25b,
- 0xe1d68730, 0x87aa3d96, 0x4da603f3, 0x790bdc9f, 0xa27be71b, 0x444baaa3,
- 0x54572433, 0x012fdeab, 0xdb6f7797, 0xdf9a78c1, 0x18a3f527, 0xbdeacfee,
- 0xfa99f70c, 0x4a4e9c89, 0xe81bfa2b, 0x673e2bcc, 0xf6f26c79, 0xd5126b36,
- 0x5e2af42f, 0xbd6bdbec, 0xfda01022, 0xcf26deb7, 0x0e74f581, 0xc98f1f60,
- 0xade8f699, 0x67da7c02, 0x6671a34b, 0x10a4b0de, 0x55194ce3, 0x1bbd456c,
- 0x201921bf, 0xbf4e8ba5, 0xf9f5ee8b, 0x76eb6957, 0xf18565ee, 0x2d33b1d8,
- 0xf2c17206, 0x907d695d, 0xf5d4bf12, 0xade048dd, 0xe18592e3, 0xc11a38ec,
- 0x6ede86fa, 0xe648aef9, 0xd807cb3b, 0xb760c203, 0x2ceb433c, 0x99e6f20c,
- 0x36e27e67, 0xe2672b9e, 0x18fabe5a, 0x922fedfc, 0x736491bf, 0xbff011ea,
- 0xa03cfdfe, 0x9c9af393, 0x24467a4d, 0xcd6abfce, 0x84fe08ae, 0x690899fc,
- 0x6cf91dcf, 0x2fec58d2, 0x4e1c478c, 0xf32bfce8, 0x7f5287d9, 0x6f1aeeee,
- 0x8fc2cb5b, 0xb9fca11f, 0xa36fc580, 0x689ce9f3, 0xf03bff39, 0xb64ecddf,
- 0x10a9ddeb, 0xb77f383c, 0x967ce3f4, 0xc3a88d62, 0xbc022bf9, 0xb714b3bf,
- 0x55f88eb5, 0xf70f73e5, 0x44bdfd03, 0xe5451838, 0x648bad2f, 0xcd92f67e,
- 0xfd0eac73, 0xfbfb2a3d, 0x3d3fbdcd, 0x3de91bbe, 0xc53ff955, 0x31c5a4f2,
- 0xb57ecbfe, 0xd3da0864, 0x52fa2ef9, 0xa8bf4f7f, 0x3f69c304, 0x5fef34e7,
- 0x1be097d9, 0xd2cda1b6, 0xcdbcd28f, 0xa1d2034a, 0x04386e03, 0x690df3bb,
- 0xb05f6e6e, 0xc24d453d, 0xf817ec36, 0xee7c07f8, 0xec81e59b, 0xf6c7cfe6,
- 0xc1725d13, 0xf4e5a510, 0x2c358246, 0x8fbe68f9, 0xf3c4d7f1, 0xfa77b5d9,
- 0xe604f3fb, 0xe0071241, 0xf87bb62e, 0x37f80a1c, 0xf9cf3d62, 0xd7ac3cb2,
- 0x061ff915, 0x25f39d9d, 0xbd2e7078, 0x55fc7a40, 0xcacc7fa8, 0xf3717cf3,
- 0xebb0d1bb, 0x1c7e90c4, 0x5e487f0e, 0xafd404fb, 0x7e16e01a, 0xb1e03f6a,
- 0x3f0226eb, 0x6d773bd5, 0x1d2a7ee4, 0xe5b9e81e, 0x4c1e2ebb, 0xbd789c82,
- 0x0e8064f0, 0x2ffcca77, 0x2a3d7bc7, 0xe37cb54d, 0xa47f3297, 0xa445f388,
- 0xbf515a75, 0x0f47ca54, 0x689fe769, 0xc8fe65cd, 0x5aeeeda4, 0x31525faf,
- 0xf8a7d7ca, 0xadaec2fb, 0xde749b9f, 0xab61a555, 0x23fb65d8, 0x3325e763,
- 0x79e7be5e, 0x2dced767, 0xec87bfbd, 0xfd4e1fc2, 0x3b036cc0, 0x3cbb06fa,
- 0x7f10b7a7, 0xe7e8bd01, 0x0bfb8d34, 0xef6a5fec, 0x524d3f05, 0x849d9c95,
- 0xc287a48f, 0xbef8ce58, 0x598ecfe3, 0x2eb6e7cd, 0x4844d567, 0x0567c83e,
- 0xe5185fb5, 0x5bf30bbb, 0xc2f39da8, 0x1c86ceea, 0x7bca2332, 0x3a57be37,
- 0x073193da, 0x45ef3f3a, 0xb9255abe, 0x7720cd2b, 0xd7bee339, 0xe51759f9,
- 0xbe7cc66d, 0x91b1fd6f, 0x520c6704, 0xad22cf5d, 0x9c7e5ba3, 0xfc05bdba,
- 0xc93081f8, 0x980aa38f, 0x0bf3e91f, 0xda15ffed, 0x33e23ef7, 0xe279d1af,
- 0x738fdc23, 0x7af2b18e, 0x3ca96abe, 0xa170e748, 0x6c84011c, 0x79ce1d8d,
- 0x6fe1146f, 0xbe1553bc, 0x3d45f46e, 0x1e7af5a5, 0x2bd099f8, 0xb0af54d2,
- 0xfb1dac57, 0x5bcefbf7, 0x8d47d6d5, 0xbba9f9d0, 0x37767097, 0x5bcf396f,
- 0x01f447c2, 0xdf2ac65e, 0x5b0ebe6f, 0xc3ac41d1, 0x9f1e6738, 0x539ce38b,
- 0xc304f98f, 0x43df187f, 0x1f3a19e7, 0xf27ef8e1, 0xe1158fd2, 0x7dff60b7,
- 0xdafb676e, 0xd3eb5b5e, 0xee7c25bf, 0x81ed925d, 0xf005e89d, 0x68cff05e,
- 0x7bb3cb3b, 0x7c1f84a3, 0x84d4ef65, 0x2f45ec00, 0xdf044f41, 0x79642de8,
- 0x77bfacb8, 0x7f53950e, 0x5f68c2fd, 0x2f345ec0, 0x069feb42, 0x7d15fb46,
- 0x52837db8, 0xda0e66fb, 0xbe6488af, 0xf9203473, 0x706c7378, 0x9bdf4a9e,
- 0xa814cf3f, 0xd6045477, 0xeddea2a4, 0xe5435fd2, 0x6fd43549, 0x122192f3,
- 0xf951ffce, 0xf3ce68dc, 0x32738df6, 0x4cfb671f, 0x5f6f54f3, 0x697aaff8,
- 0x51bcd5eb, 0x3d83f796, 0x9429e514, 0x9b63f4a7, 0xab573fac, 0x974691f7,
- 0x0f99cfb4, 0x5a44a6f8, 0xd2fde741, 0x99d2c24b, 0xc2fb7929, 0x5948f3ef,
- 0xe728db06, 0xc29f5c56, 0xcbd9a3f4, 0x5b7c6b66, 0x1c0d0fef, 0xcb2f2123,
- 0xb70b39bf, 0x679e8384, 0x41b78796, 0xbe5a3bee, 0xfb7a0a32, 0x0c1a9f4c,
- 0xa3395c83, 0xaf29dbd0, 0xaf5c62a5, 0xd74cb71c, 0xc0a582ff, 0x0eedcbeb,
- 0xf7cb3771, 0x3ea302c1, 0x556fafa5, 0xa37898cb, 0x27615cde, 0x2cf042fe,
- 0x9a6df6d1, 0x7d3bd7d3, 0xebe9f404, 0xf4fa8de3, 0xe40f65a7, 0xc6d2fef2,
- 0x8def5b3b, 0xb90e993f, 0xfaa7226d, 0x0f7a9d78, 0x57e74dd6, 0x25db97a0,
- 0xfb6b08bd, 0xdf298b47, 0x07887b30, 0xd389eaf6, 0x1d6f4c1c, 0x1f2ce12d,
- 0xf08ecc37, 0xb0d76ec2, 0x06ed8c2b, 0x83fb6b37, 0x1ba628f4, 0xee28beff,
- 0xf32ed2a7, 0x3065cf95, 0x0f60bcdd, 0xe515b93c, 0x7fb3872f, 0x8e5a32b6,
- 0x24bbf95b, 0x29bd2e8d, 0x5fb17aab, 0x32a5e98e, 0xe5c31dc2, 0x7a678fbf,
- 0x57dc367a, 0x7d127c00, 0xd2bc5db2, 0x258ccc1e, 0xe8d31ffc, 0xfb237a0f,
- 0x98a36af5, 0xf796856e, 0xf59e3cfd, 0x797f9238, 0x437ddf3c, 0x7bb3ef39,
- 0x3ec42efb, 0xf0c91e5a, 0xf2c9731c, 0xbcb19563, 0xb7dfe60f, 0xc5fc30f4,
- 0xbcec8ac7, 0x55ed8e5f, 0x73fade59, 0x9f841a9d, 0x9acfa75b, 0x86dfc725,
- 0xe1baa8f9, 0xd4dde4a3, 0x5595fccf, 0x7e6ed093, 0x7053edc5, 0xf69af6de,
- 0x4369fb29, 0x4afcf3f7, 0x97d47fb3, 0xf98c9dee, 0x760764d5, 0x1e7ea41c,
- 0x116a739c, 0xee9fd3d6, 0x3ef5869b, 0xe17d6f6f, 0xeb77044f, 0x5eb23f8a,
- 0x6fb47d74, 0xd85fbe26, 0x5fae1bf3, 0x16bff2bd, 0xb6afce41, 0xcfd17a4a,
- 0x7e8d1ae3, 0x1e6f8645, 0x7cae57ea, 0xc881f59d, 0x7f9223ec, 0xf8fdfede,
- 0x5bbde9ce, 0xdda8505e, 0xd5bd88d2, 0x5c81aa9c, 0x94676b9c, 0x0692b460,
- 0xed3d4a7c, 0x8d182bac, 0xa459f175, 0x5b74b89c, 0x4b181f88, 0x7fd9a091,
- 0x16d2dda8, 0xf242bf19, 0x19f41d82, 0x7d297f5a, 0xf3df7ae7, 0x9168857b,
- 0xdf7763df, 0xf75cb57b, 0xfc387060, 0xb09b19ae, 0xfd76e7ee, 0x77e8d326,
- 0x872f4d0d, 0xacffce57, 0xeace5fb6, 0xbdb665fb, 0xb9005ed3, 0x7d33fceb,
- 0xfe76744c, 0x3f9cc1c9, 0x112bb4ad, 0x577f974a, 0x72465a78, 0x65b78d7c,
- 0x07e7e424, 0xe34befb5, 0xaf82465b, 0x384e7ce9, 0xff59725a, 0x85c96acf,
- 0x47f3abbe, 0xc992d451, 0x992d03df, 0x4582ff68, 0x8ff853da, 0x3a78a8e0,
- 0xd2dde369, 0x0cb7e10f, 0x26e87e47, 0x1a71f5e4, 0xe91f25c2, 0xa58fbe69,
- 0xf9e63b4b, 0xbd9bf639, 0x7ad07f54, 0x707ca9bb, 0x9adf9cc0, 0x03e7ed2e,
- 0xfa6fe243, 0xc3fbeda9, 0x213c8e70, 0x3fd7f5ba, 0xd4f38da3, 0x3c719d53,
- 0x0acfa6aa, 0x6e1f4eb7, 0x80dbdf29, 0xf13a8fef, 0xf1126b7d, 0x6398a5e5,
- 0x9e5fcf2a, 0xfb494f67, 0xbe790bcd, 0xe7179c24, 0xff821781, 0x67ca5885,
- 0x7892be43, 0x3e3af3d5, 0xf2bfe743, 0xb04945a3, 0xae947edf, 0x7d3cf9db,
- 0xde3a9740, 0xefa46c1a, 0x2e3f4364, 0x8569382f, 0x2c49ecde, 0xdaec1ab3,
- 0xd81cf3eb, 0xe4378b57, 0xd6748c39, 0x10e0adb0, 0xece21c49, 0x2007ab36,
- 0xf7035837, 0xd86f7e42, 0x05f8b6a6, 0x54bc3b97, 0xc0f3acff, 0x68b7a8db,
- 0xe43e6c43, 0xe2fdbd6d, 0xcb1223f5, 0xdd0e700c, 0x9c875e66, 0xd87e7316,
- 0x04fe736e, 0xffde80ce, 0xa0bcbb7c, 0xc8705f39, 0x83e4ff9c, 0x9c81675b,
- 0xbdff55cb, 0x3f89f42a, 0xf384384d, 0x5e17d3f1, 0x2cfac68c, 0x41d94bfd,
- 0x71a149e8, 0x0a6eb422, 0x56ff08f4, 0xb2e3e985, 0xf81e9178, 0x810f1e81,
- 0x93a4619d, 0x683fe10a, 0xfebcb94d, 0xcb32d119, 0x5955744b, 0x64e0bcb7,
- 0x57bf5741, 0x43b3acb6, 0xb71d0bce, 0x9c2ffda7, 0x6aec375e, 0xd964a3c5,
- 0xc5637555, 0xd62df809, 0xf4013bbe, 0x854bfc57, 0xe7e28ae5, 0xbe0abd07,
- 0xce63b6df, 0x9ace9c0d, 0xedd0f8c8, 0x2af78b7d, 0xbbca28c1, 0x5dba4946,
- 0x7d615eb8, 0x1cd1a4bd, 0x36b65747, 0xd9a6de24, 0xf778be5c, 0x9f86fffe,
- 0x18f1dcbe, 0x1e5c33c7, 0x05781827, 0xa7c55b7a, 0x153ded82, 0x330dbf9b,
- 0x2d79cb97, 0xbfe1e7d4, 0xaee1e396, 0xcf3a5592, 0xd0c893b7, 0x574bf0f9,
- 0xe1066a51, 0x8de6aafd, 0xbd9b94aa, 0x7597ecc5, 0xb0ce39da, 0x918221ca,
- 0x28a1cf04, 0xdf45d7bb, 0x628fae2f, 0x18516179, 0x9cf99fc9, 0x438e708a,
- 0xdda0c4f4, 0x965477a9, 0x25e2aa83, 0x571eaa1e, 0x56c5b002, 0xfc8a48c1,
- 0x04bf3213, 0xd25526de, 0x3f00d78f, 0x1798cdd6, 0xab6f524d, 0x03f706b4,
- 0xc0e79d2f, 0x4abd7336, 0x38d1e79a, 0xefe659c8, 0xeb9da2d5, 0x36feecd7,
- 0x60ddf8cc, 0x527d65df, 0xc8556bd7, 0x7aa70433, 0xb3cb0447, 0x402398c4,
- 0xef14ab57, 0x29150ec3, 0xefa0e39d, 0x2f964e25, 0xe7946ee6, 0x838c3347,
- 0x0dd78390, 0xae67b966, 0xbe04cde2, 0xd670bac4, 0x1fdca9a7, 0x64e58bde,
- 0x4e484396, 0x7daa3966, 0x12e39d8e, 0x825af39b, 0x1f9ac790, 0x0ad0f148,
- 0xf3e47f32, 0x96709ee8, 0x4aa79a7b, 0x5d79a7b9, 0x31ff6e1f, 0x390beb40,
- 0xf9b9e25a, 0xb71e1c9e, 0x89db2a9f, 0x200f523e, 0x7a133ab9, 0x41e5c102,
- 0x67472eb9, 0xb04afac5, 0x9bfcfaee, 0xe0a3db63, 0xf1e512c7, 0xd86e4bdf,
- 0xed879da2, 0xf003c2eb, 0x8a549c53, 0xf60c5a0e, 0xe4325ba9, 0xac7231f9,
- 0x8f56ab77, 0x966519fd, 0x80ff77a9, 0xd07b29e0, 0xe9486ee2, 0xf5e1075a,
- 0x27fe6266, 0x83e785d7, 0x958e46ee, 0x7963a7f6, 0x0e479f92, 0x73bf1c17,
- 0xedefd6bb, 0x9a531619, 0x48f44118, 0x942c9cfd, 0xe1a8edf6, 0xa4076c45,
- 0xbbb9ba35, 0x19359038, 0xb32a9cf2, 0x9afefd17, 0xa4e9e06e, 0xde52f18b,
- 0xd94cb93d, 0x72f2e124, 0x5fc8dfbc, 0xf0fdb385, 0xe8de76a4, 0x347441eb,
- 0x8d71cf82, 0xe8d3ad9f, 0x74cf5d66, 0xb98ba263, 0x3741b790, 0xd8b9e473,
- 0xfada5749, 0x7af0dd12, 0x485d13b7, 0x6be211ba, 0x4b742fb4, 0xdf443b79,
- 0x7bb7d4ea, 0x0e88b7d0, 0x6de8c89e, 0x074217a8, 0xf1181ac2, 0x252cfc8f,
- 0x2a4762a3, 0x564904b4, 0x2cdc47e1, 0xd7c646ce, 0x4ac0d65d, 0x55bfa782,
- 0xab00cbae, 0x653a3ba4, 0x5cf911fc, 0xd9f88bdf, 0x0b17fbe2, 0x4e2fd52f,
- 0x4ab76c12, 0x2626f927, 0xa409db8e, 0xb0aa0770, 0xf68a50e7, 0xffb231c5,
- 0xc46fa262, 0x5e7f51b3, 0xdcc69c4b, 0x2a7fc246, 0xfb407e58, 0xef6e7ce8,
- 0x5ef6c8b7, 0xd303a52b, 0x5f6a3ee4, 0x2bf8c615, 0x264073be, 0x58d264e8,
- 0x66249d33, 0x4f41394a, 0xbadd331b, 0x7e0890dc, 0x9838d250, 0xf08699cf,
- 0xd85daaa2, 0x7d48cfa7, 0x4df578a3, 0xbe004793, 0xd83de367, 0xeca7a7c2,
- 0x494aff60, 0x7f1f9ce3, 0x5b653d08, 0x98df7f38, 0x780f7be9, 0x17b8ad3f,
- 0x250fa2ec, 0x27b15f33, 0xcb5f7b52, 0xa3bf73d4, 0xc77f8a74, 0xa62390db,
- 0xfeb950cc, 0xb9ed2114, 0x338e51a2, 0x35b9ffd9, 0x3c9bfa91, 0x94f0e15a,
- 0xf25770b6, 0xb455f832, 0xd1a63df5, 0x98b84377, 0x70139cfd, 0xcd4cc80d,
- 0x24821f86, 0xe4e46ed4, 0xfd5a9901, 0xc806ca31, 0xc9c70343, 0x47d7a7fd,
- 0x2d37e83f, 0xcfb7ee53, 0xe7a77db4, 0xbf5caf09, 0x5b584d6c, 0x5b52345a,
- 0xf3a51070, 0x039ec6b2, 0x2a94999f, 0xde07ac26, 0x4d8aaa7f, 0x317b6f0c,
- 0x4ca885f3, 0x8f82f837, 0xfba65914, 0xee99836d, 0xb7b4c6db, 0xb6f949dd,
- 0x7ed2392d, 0x0bf3e9a7, 0x82e5825d, 0xef9231b6, 0x64896fb5, 0xd5cc73fe,
- 0xed27151a, 0x97dfac5d, 0xd7f1246a, 0xe17b86ba, 0xda752e37, 0x2c7e70db,
- 0xfb3a607c, 0x499b4bfb, 0xfd5c47bf, 0x4f7eb35a, 0x6658787a, 0xa87afa37,
- 0x019a0e5e, 0x55ad951d, 0x36070e98, 0x997dec78, 0xcc2e29cf, 0x4c19ccaf,
- 0xf32fff07, 0xf05c7384, 0x37fb7a65, 0xb091f784, 0xba0e0b1f, 0xd7c83a1a,
- 0x3716e30b, 0x1a7eb315, 0xfe63ed99, 0xbc255035, 0xd4cf1d10, 0xec126fe8,
- 0xd64be0a0, 0x97d8bed6, 0xdf6865af, 0x4e995ce3, 0xbe70eba6, 0x7366d06f,
- 0xb6be0265, 0x9c16e155, 0x4a5693b7, 0xe77da6fa, 0xbb80bdc0, 0xfc0222ac,
- 0x7ed6b8e0, 0xd16b0afe, 0x77f7ca46, 0x1c546b08, 0xe98f2be8, 0xfbe5903b,
- 0xe9f7eb0c, 0xc828c42f, 0xd76ba50d, 0xfa34b849, 0xe323d610, 0x4f9c69e3,
- 0xc3e4a4b7, 0x53d3a0ae, 0x766c6b20, 0x5d0308e6, 0x1e9850cc, 0x60873e68,
- 0x55979d9f, 0xfa728f92, 0x32f41fbe, 0x1d306c69, 0x6a85d871, 0x76abc725,
- 0xe20f0a24, 0x3b443a87, 0x241d23cb, 0xea3f808f, 0x27e7467c, 0x475e1744,
- 0xeb7ad742, 0x3d6b657c, 0xde784681, 0xf4e0ef56, 0xb0977aa9, 0x5ed84f12,
- 0x67f9c89f, 0xb69cddeb, 0xf7f167d4, 0x9c3deae7, 0x8a3f59df, 0x7bd42ff3,
- 0x7ebbbf39, 0xabbfa722, 0x84efe22f, 0x3a4be61f, 0x93f9d1df, 0x9f3a5f4e,
- 0x05aba50a, 0x33dc6684, 0x0fccf6a0, 0x79883e75, 0xf8bbf258, 0xa9cebe93,
- 0x113f914a, 0xceb450fc, 0x5428ff01, 0x22f33ecf, 0xdca3b9e1, 0x1da1f1c9,
- 0x0ec1f242, 0x8dcf83a7, 0x0dd8bb64, 0x32c340fb, 0x869ddb6f, 0xb95e7873,
- 0xba06ac22, 0x5c36a9bd, 0x7d740d58, 0x29ac5d73, 0xfdeebf3f, 0xff50fad7,
- 0x2df18b3f, 0xbb1cfac2, 0x7fa3d4e3, 0xdf8fefa4, 0xeb033a71, 0x796c704e,
- 0x1edee308, 0x2ad1c1a1, 0xceddbae1, 0x08c0d2f2, 0xcfe14fa9, 0x2d738861,
- 0xfb7d894e, 0x31e70437, 0xda061d6d, 0xc146b35d, 0x754ab32e, 0x1f2aa4ad,
- 0xfbe8bd63, 0x2af5b59f, 0x7c630ba9, 0xfb6a3496, 0x4cff18d4, 0x57de3cf8,
- 0x78a75cb0, 0xe91f8085, 0x41ec2ff8, 0x71c41192, 0x685011c5, 0xad94851c,
- 0xae17b0f9, 0xe428fd79, 0x10eea574, 0x873ed5cb, 0x8428e393, 0xd8d676df,
- 0x9077529f, 0xfed689eb, 0xc8c838a6, 0xf83b5ee1, 0xeb1b6805, 0x40759256,
- 0x268b9f60, 0x94f1ef85, 0xd6cbdf69, 0xe2f8a628, 0xe655337b, 0xac32878b,
- 0x14ca0e9c, 0x89a5139a, 0x5e9d29cf, 0x3c707f89, 0x9e535963, 0x2f81917d,
- 0x4bdf6897, 0x78a62cb3, 0x321943c7, 0x98106a2e, 0x6777a505, 0x3217daa5,
- 0xdc7373df, 0x24bf5ebd, 0xea757f2b, 0xe883f470, 0x8ff6aea0, 0xddb95a64,
- 0x84970ca1, 0x59e741c7, 0x74e9c714, 0x72e82e7b, 0xeff8a7cc, 0xeb3f0c95,
- 0x3ecf1559, 0x7bfc2cfd, 0xe30a7f15, 0x9862a9f3, 0x0ec8df66, 0xf2ce9c8c,
- 0x8ca9d78f, 0xd72dfc84, 0x7fd3fc7f, 0x547c4689, 0x57694ecd, 0x690db4a5,
- 0x6ce5edf5, 0xd0f6ab57, 0x9f0388fe, 0x5fcdfb35, 0xd14ff67d, 0xf897acad,
- 0x5bf8b493, 0xdeaaf78e, 0xc0e38279, 0xd0afb8f4, 0x37ca3576, 0x71eaa7ac,
- 0xb63fba01, 0xf78d9f70, 0x851ecd4d, 0x5d9a98f5, 0xe7c01317, 0x4aadf66a,
- 0x272e2ee8, 0x5e3d5fb4, 0xd5bfb740, 0xc39fb588, 0xc384487f, 0xffad0ff8,
- 0xaf546676, 0x62054353, 0x3b60aed5, 0x1c705588, 0x8e2d72c7, 0xc19023b3,
- 0xe49ee17e, 0xc4f59aed, 0xedbf49f0, 0x1063bab0, 0x88417caf, 0x53addd89,
- 0x7e7920fc, 0x047da39f, 0xed85f0bf, 0x1791cb2a, 0x7ef147b6, 0xff7edea8,
- 0x55dbc441, 0x8dd8566f, 0x308e2557, 0xaa77aade, 0x0fe061c2, 0xc50bb035,
- 0xd2e70cf7, 0x416aa3a7, 0x145a4b5f, 0xadf816ef, 0xde98ee1d, 0x35da7806,
- 0xd61a5afa, 0x45d79232, 0xf814ff83, 0x5cce419a, 0x8e7ef7b2, 0xe77aa413,
- 0x6df9ae59, 0x51b3c724, 0xce036f0f, 0xd80a1b33, 0x65a4bd4f, 0x05cb3547,
- 0x105d23db, 0xccdb4e69, 0xd1be29f7, 0x9346fbc6, 0x4fc0ced3, 0xaacff682,
- 0x4e030f2c, 0x5ad3cbec, 0xcedeab3c, 0xb3b64832, 0x0dfb7868, 0xae88e7f6,
- 0xda5a4bfa, 0xfea9d3a2, 0xe7dfbb27, 0xee4839d4, 0x23ce25e3, 0x3f4738b9,
- 0xf7c919d9, 0xf9d93eed, 0xd12f09eb, 0xc65ae778, 0xfb0c52f4, 0x1b20e06c,
- 0x41b73fd7, 0x25f301c6, 0x5a0fd611, 0xb6673e78, 0x27ac2927, 0x9f84df03,
- 0x8f9f3b33, 0xf63ef0b6, 0xab717bff, 0xc86b15b3, 0xb2eb847d, 0x5bfe8047,
- 0xed11fbb5, 0x7fe5fd9a, 0xed6affa7, 0xa4529b7b, 0x3bef238d, 0xc20e3d08,
- 0xd53beda5, 0x796efbc9, 0x2fef9d94, 0xbee6182c, 0xe8f81e71, 0x518e329b,
- 0x043f77f4, 0x3bc16ffd, 0x596f6cf1, 0x41f7736e, 0x36c38bfe, 0x282c13f6,
- 0xa1f51cf0, 0xed9d8c7e, 0x75b1224a, 0x6dadec04, 0xa8be5229, 0x933b435c,
- 0xe88fdc50, 0xacf84fcd, 0x8f84580c, 0xa50793f2, 0x523ebaf2, 0xd9daf16e,
- 0xfbe413ff, 0xc2ecc792, 0xfe2f7b8f, 0x5d7ea4e7, 0x0efd2a99, 0x517f608f,
- 0xcc17195a, 0x4edd878c, 0x69ca9ba3, 0x9fa06e54, 0x4cc14dca, 0xed674fc8,
- 0x72891ed2, 0xf9954de8, 0x46835eb2, 0xfd07e8a7, 0xceb8a5b6, 0x2bbbbcb3,
- 0xb0ef404a, 0xed2518bc, 0xdf09bf21, 0xaf386614, 0xe9241a6f, 0x07f341b8,
- 0xf848b7fa, 0xf8e41700, 0x4ca6f625, 0xb8fab9c8, 0x789ba24b, 0x78dab3bc,
- 0xa49a224b, 0xc78b44ff, 0xdf1e7d43, 0xd3bfd826, 0xcb1864fe, 0x1efba7cb,
- 0x9fe30179, 0xc730727e, 0x04a5b411, 0xf2e6ed16, 0xaee38e70, 0x082c2a78,
- 0x327b3f78, 0x9f58ed8a, 0xf61ca4d9, 0xcb025459, 0x1ea28761, 0x3afe805c,
- 0xe0298736, 0xc2299fa3, 0x2df2889e, 0xa80e59bd, 0x3f63afa8, 0x17b10549,
- 0x4e94afc8, 0xd9f7dc84, 0x276cc196, 0x950decfa, 0xf7d0292f, 0x13eef835,
- 0xc67e0b4d, 0x3ff4bff2, 0x6fea7e9e, 0xaff3bb83, 0xdb366c54, 0xd7f5f4c3,
- 0x2418efcb, 0x0c7703ed, 0xcac97a92, 0x7fe92e41, 0x30796c8b, 0xf929e795,
- 0xeb03ad03, 0x131fb47f, 0xd63f6f60, 0x714127b2, 0xccc1cf02, 0xdbef035f,
- 0x7a759ea4, 0x42fd8dbb, 0x79462f15, 0xe774fed9, 0xb0ef7c15, 0x78729542,
- 0xdbde4585, 0x99eed0ab, 0xb2674456, 0xcdf08b7d, 0x16fb6d7a, 0x8e471b55,
- 0x01d6d9fb, 0xd4109fd2, 0xf0e42ff2, 0x7149b83d, 0x2c69e6e2, 0x05c86d5f,
- 0x8e6e2f5e, 0x1a79f8e4, 0x87b88bc7, 0x5cfd9f8a, 0x116633e2, 0xae7407eb,
- 0xd4ae7f31, 0x9dd573f8, 0x5ee0c757, 0xe5477881, 0xad1712fb, 0xbfe03e9e,
- 0x4f7af8a1, 0x5f4b63e3, 0x41f99478, 0xc417ed25, 0xfcd52d59, 0x529f4bcb,
- 0xba5c9e58, 0x9887eafa, 0x136dbeef, 0xb6705fb8, 0xe368fee5, 0xf8d5ec7a,
- 0x0f7b5767, 0xf54a5fd7, 0xa337fb65, 0x36ad9e19, 0x5d09e00f, 0xfeddefc7,
- 0x6a5ff529, 0xf242d15f, 0xf3e5ee45, 0xdc8e28bf, 0x95f027f6, 0x676ce1ed,
- 0x7be7f5a3, 0x2d6c6746, 0x33ff308b, 0xbf3384cf, 0xf72b1339, 0x9e7427fd,
- 0xe780edfa, 0x5db1f787, 0x02e9bded, 0x80e2d0e7, 0xae1d5fb9, 0xafdf1afd,
- 0xc0c4f78a, 0xeb45534f, 0x7d68fe81, 0xdfedc47e, 0xd0fb71b1, 0x768e3cf9,
- 0xf4fb0c23, 0x9ba64899, 0xbae4fd33, 0x1076799c, 0xb16d679f, 0xde09de92,
- 0x674be864, 0x1b8a6562, 0x0a87a03e, 0x3387a497, 0x1ed85a63, 0xc1d94670,
- 0xb64d9b69, 0xfcd3a8ab, 0x897f44da, 0x26d01fa0, 0xb58ea9ec, 0x80c72047,
- 0xe7cc8fcf, 0xcefb8834, 0xd07376a1, 0xbebfce29, 0x84dcf259, 0xcf3e0578,
- 0xe3f61b7e, 0x733df0b4, 0x564f269f, 0x4fdcbf6e, 0xdcaa7588, 0xeb3fb0ae,
- 0x02fe7692, 0xddba5eea, 0xa972e89f, 0x6e22f15f, 0xb2e3696b, 0xa5fba025,
- 0x81e6ebb5, 0x2c79f5ee, 0xb75feed5, 0xd3c32a29, 0x27fb4e16, 0x74f1f2fb,
- 0x8a59aafc, 0x5c279f1e, 0x7b45f9fa, 0x2dcb2c38, 0x4db288e9, 0xa3655fd4,
- 0x49c796b2, 0x395bf5d1, 0xb84f1d3f, 0x5efb02cd, 0xb9df769b, 0x70ebf9a7,
- 0x524f04f6, 0x6d4fe496, 0x6f9ef229, 0xbef25bfb, 0x0a6fc5f6, 0x81fee262,
- 0xfa91c83c, 0x7f047f7a, 0x72106816, 0x96e7c68f, 0x704eaec2, 0xfb306e66,
- 0x9b36b273, 0x9bb643f3, 0xcd3bd9f3, 0xe6bddcf9, 0x7355e7bc, 0x7b7185de,
- 0xfa09e177, 0xa103e236, 0xbe85236f, 0xfa94ceed, 0xb7d0f236, 0xc6df4291,
- 0xc8dbe877, 0x1e46df43, 0xd0f236fa, 0xdf4291b7, 0x1a39f7c6, 0x7b352a9e,
- 0xe381d629, 0xeb84f6d4, 0x5fbc00f1, 0x6049cc2e, 0x52ac2a3e, 0x24b0bd1f,
- 0x4ec7e59b, 0x0754d33b, 0x38f499db, 0x7ca2b7a7, 0xc37ab2e3, 0x75647b7f,
- 0xc76e14df, 0xdf9ae7f6, 0x6573fb49, 0xf613b87e, 0x9f55d68e, 0x13d886ac,
- 0x444df288, 0xf6b60bfd, 0x556fc0ad, 0x6fec2bdd, 0x7ee15d6f, 0xfd7207e7,
- 0xc7a2eed8, 0xdce3152d, 0x7d26df03, 0x7b6fc649, 0x6a3b461c, 0xac485fa8,
- 0x3f21e435, 0xa1f39b35, 0x35f0207e, 0x355f69af, 0x6e3c1d31, 0xbe0bef6b,
- 0x0749e27f, 0xa13af3df, 0x6cc4edd9, 0xba63cb17, 0x8f29df3c, 0xc26ca3dd,
- 0x2b24bdf9, 0x1c77db8e, 0xd3dd30ca, 0x2f4b4d0b, 0xdabcb3e5, 0x58ab5f99,
- 0x8cd94e30, 0xaf59fb15, 0x73e68c4e, 0xb60d16e9, 0x0dff9041, 0x440e0f70,
- 0xe868e898, 0xbf10b8b6, 0xfd533f65, 0x4353bb61, 0x4c4396fc, 0x16d7d3e5,
- 0x17b5ef98, 0x1f2a6d5d, 0x878cef68, 0x96ff4873, 0xd64b9f06, 0xe7cce8b9,
- 0x4bd686f9, 0x847f3c56, 0x25bc56bf, 0x9a32d3e5, 0x7b40b787, 0xfe1f239f,
- 0xe014ef39, 0x16c5bcaf, 0x54bef38b, 0x9d44873b, 0xb72e5cf9, 0xae2bbf0a,
- 0x5dcaee8b, 0xed4b1bc2, 0x2536b4bf, 0x71dd1d33, 0x14255934, 0x2472192d,
- 0xe57d0837, 0x8246278d, 0x8dd4aaae, 0xbf2a2ec9, 0xc56c0955, 0x71d3ec45,
- 0xa4ae123b, 0xf8b4fff2, 0x75a2c91e, 0xef05b64a, 0xba3e0f9d, 0xbde04d23,
- 0x9f6b1391, 0x3cb8e68f, 0x6d92fe8d, 0xc1d008ae, 0xf59526c6, 0xd4cdd814,
- 0xd93a2eb8, 0x011f14d8, 0x8c7ca6bd, 0x717297e2, 0x0bdf49b4, 0x9aa147c5,
- 0xe69b8efd, 0x2a90f17b, 0xf36a40ef, 0xf592ee5c, 0xaf6cba45, 0xe7d7b3df,
- 0x47d3527b, 0x96be362b, 0x3f7d0b79, 0x2e7f783b, 0xd65784ae, 0x2f79e6ef,
- 0xdadf6cb5, 0x1d7bd297, 0xd7985dbe, 0x45c6b7f2, 0x7ce3495f, 0x87c65db9,
- 0x89f9b5ff, 0x4fc4fcc7, 0xc27a6cef, 0x8541fb0e, 0x1e876035, 0x46e4fe5c,
- 0xae18e23b, 0xbf91b6db, 0xcc56da2f, 0xbefd00f6, 0x7c7aa7da, 0xf28f86b3,
- 0x1706cab1, 0x60ffb3cd, 0x23ca4fc9, 0xa14b1c64, 0xdc35fa3a, 0x4db8b813,
- 0x66bcc3c9, 0xe3b0ed14, 0x1a3bff1e, 0x37dd09df, 0x17a7871d, 0xabbd86f6,
- 0xdf20f145, 0x89993ed3, 0x0f4472cb, 0x39b92fd3, 0x0869719d, 0x0a1fd5eb,
- 0x8e70bed2, 0xd7e88764, 0xcf695587, 0xad123c43, 0xd83db973, 0x85e43a6b,
- 0x0ffbe597, 0x8d0e7455, 0x2ede87ca, 0x169455c4, 0x744d568d, 0xd23e335e,
- 0x0b660fb4, 0xfea2bec3, 0xa67e8966, 0x69498cfc, 0xce37faa6, 0x67e21a18,
- 0x672aefce, 0xe99575fe, 0xdbaa975d, 0xe3856efd, 0xbf9ac67b, 0xd33b7a53,
- 0xf6a60dd9, 0xe99a6255, 0x9b25975d, 0x133c65df, 0x8fa1d995, 0x41f76b95,
- 0xb4dafb66, 0x9dfcadef, 0xafda6226, 0x7f3cd303, 0x8da5e794, 0x1dba08fd,
- 0x9ffe367d, 0x1979ddd6, 0x898f19d1, 0x3bd85bf8, 0xe479667c, 0x42a9eb08,
- 0x669543ae, 0x92caaa8f, 0xff2aa8f6, 0x9be23b11, 0x24d6ff09, 0x93754942,
- 0xaa46f6c2, 0x938a48ef, 0x7df1dbf9, 0x075ef9a7, 0xf284bbfa, 0xf6316a43,
- 0x35796857, 0x42e71bcd, 0xb047ec35, 0x6bfde983, 0x19f4eafd, 0xb43ecf7a,
- 0x3760df1f, 0x6d8ae7ef, 0x23f60e3b, 0x8a2f4cbc, 0x497c43cd, 0x5b54cb65,
- 0x74e5f671, 0x8bfdf3a6, 0x7fb616e5, 0x857e1c75, 0xdca3ace9, 0xddf191ea,
- 0xd51f18ff, 0x0d6f695e, 0x3445bfa5, 0x1799f81f, 0x013c54ed, 0x5e03d645,
- 0x432e81fa, 0x0dd492f0, 0x753c74be, 0x7ca42de2, 0x1c33fe31, 0x004a11f1,
- 0xbc32f27c, 0xfad478e8, 0x2c13e45f, 0x013c1267, 0x3fa42786, 0xff742a31,
- 0x20efc2dd, 0x84506c74, 0x3c6ef94b, 0xed24bb4c, 0xe4d30336, 0x2977dbbc,
- 0xb3ebc81e, 0x7bf9592e, 0xc914bd27, 0x9319f538, 0x93b3fbcc, 0x7ee48a7e,
- 0x81297999, 0xd36ffddb, 0x8fd177d1, 0x8d099fee, 0x5defd6ff, 0xddac1a2d,
- 0x46211a4b, 0xc44ffb7c, 0x4072ebaf, 0xe749e74f, 0xf0d2e379, 0xf6ed4cef,
- 0x951e5e1c, 0x3edaf1ca, 0xf3a88ecd, 0x4ed69f63, 0xca98b71f, 0xefa005dd,
- 0x17b009fe, 0x4b4c6eaf, 0xe317be36, 0x3b63655b, 0x788af26f, 0x5dc38b4e,
- 0x8a473809, 0x4d77c857, 0x27d5df8e, 0xe388fe01, 0x26bd6cab, 0x7fc7d751,
- 0xe4593c3a, 0xe9c85f20, 0xb3617589, 0x3d78768d, 0xed2c1a6d, 0x7e39fa35,
- 0x7a3872ce, 0xaadff636, 0x0508d15d, 0xfdeaba5a, 0xcf3a2e91, 0x8f7c5957,
- 0xef7f660f, 0x85cf3e46, 0xec2e636b, 0x513cba70, 0x0fb0aab1, 0xffcafdce,
- 0x9c31e579, 0xb615befb, 0x9fa3c804, 0x840aa07d, 0xc679d0ce, 0xe1a7c472,
- 0xdd3fae92, 0x10f7c912, 0x1784553f, 0xea25a6fe, 0xe9975cfe, 0x0b3bdd5c,
- 0x41deffe3, 0xb8c08ee3, 0x1889ad81, 0xc4cfe3d7, 0x1915beb8, 0x57d9dd31,
- 0xbd66b4f4, 0xeab6f394, 0xeecd64f6, 0x791ef90c, 0x6270f7c8, 0x1e47be41,
- 0xc8523df2, 0x4dbef8f7, 0x6c0c2ff3, 0x7e83cf68, 0xb22b1fea, 0x75bcf96a,
- 0xbf7e4166, 0x80f08945, 0x78a2dcff, 0xc1bc70fa, 0xc4def966, 0xd57590f2,
- 0x49da6bd3, 0xedd03306, 0x84392e0f, 0x8c6e6e31, 0xc7e53588, 0xea9afa39,
- 0x4c52ba17, 0x0e25d7e5, 0xef5f9536, 0x7fe533ce, 0xa9a57598, 0x18cf64fe,
- 0xa347fe53, 0x9fd537ae, 0xca6a9dea, 0xc7389f4f, 0x51667f54, 0xc6fca9b1,
- 0xe54c4bd9, 0x4ccb7c73, 0x3fe579f9, 0xa9bfd535, 0xdca98576, 0x9c565c2b,
- 0x77a17b77, 0x865fde11, 0x2de945de, 0x8d38656f, 0x9c7a3aeb, 0x7977dba5,
- 0x5072694e, 0x36dea1bf, 0xd7ee25d0, 0x68070e80, 0xc0be67f7, 0x7e1bd2b9,
- 0xf522a0c9, 0x417d23de, 0x42f5a137, 0x1e4747cb, 0x35a6e8b8, 0x7a2a7a2c,
- 0xc4f895a6, 0x140e2be8, 0xf57e51a4, 0xad338553, 0x5f465fc4, 0x54f9e07d,
- 0xb53d37ca, 0x3b0f964a, 0x51e51170, 0xf320df4f, 0x1a8b05d3, 0x7af90c8f,
- 0x31e69f86, 0x6ba907fa, 0xd26ed23d, 0xa769bc6d, 0x80f0883d, 0x01e04d78,
- 0x13e89069, 0x4fa201e9, 0x7d12afa4, 0x710ba596, 0xe913e890, 0xf13fd221,
- 0x7fa4f7fd, 0xfa4c3d22, 0x49b7d227, 0x847a44ff, 0xefa44ff4, 0xf4e6cfd5,
- 0xb71f7a83, 0x397d43fb, 0x6beb47a7, 0xf5c7fbf9, 0xb9fe9c75, 0x5ddfcfde,
- 0x433b7443, 0xba31ed90, 0x7e1aabff, 0x76edd847, 0xfd1acedb, 0x9e578ac2,
- 0xb2bfbaa7, 0x46b456a1, 0x5ab69f62, 0xba394f63, 0xf33d90fc, 0x6538ab5b,
- 0x0f5ebf90, 0xe10d26f7, 0x98bbfbf1, 0x8fdf6b6f, 0xf203e989, 0x520a9d69,
- 0x9f28297d, 0x2aba5f99, 0x0fc7f886, 0x81b5a9be, 0x74b43e5e, 0x0bfb7f7f,
- 0x87da4e8d, 0x8c873378, 0xebbbf2e8, 0xf8ceef7e, 0x1e5f831a, 0x8e3d6cfe,
- 0xf3c7f747, 0x473e219f, 0xf1eb93f7, 0xef6ad741, 0x707ee923, 0xef47fe3d,
- 0x6092f0b7, 0x1acd77be, 0x512dcb2b, 0x38c41156, 0x35cf9c7a, 0xc43c968e,
- 0xe9535c3b, 0x3eb738e4, 0x23ef1df4, 0xde2f3d52, 0x25fcdb99, 0xf2edcfd5,
- 0xe6d1f7bb, 0x8f3ce541, 0xb63e4bc2, 0xc5a35ed7, 0x43bda01d, 0x753bf396,
- 0xfc831188, 0xe1db2941, 0x7da8551e, 0x001c577e, 0xfaca47ea, 0xfe97f441,
- 0xdd178d15, 0xc4068fb5, 0x6e21e4b5, 0x93b5406c, 0x8c36c6e3, 0xb9bdf209,
- 0xded7b1e9, 0x4e22aad3, 0xd8c7e638, 0xfaa74f47, 0xefcf0266, 0xf00f73ee,
- 0x5c5a7e50, 0x53a46f5a, 0xebb5de43, 0x62e67d66, 0xe97c755d, 0x4bd7d778,
- 0x9e21b7c7, 0x5dfcf18a, 0x6eba17eb, 0xafa5eba1, 0x2ea5e153, 0x977f03fc,
- 0xfad74faf, 0xfffb3ec1, 0xb70cfacb, 0xcc7df0b6, 0x07874ab0, 0x7ad77d70,
- 0x262edcdd, 0xf1783ee0, 0xa441f725, 0x5f7cd24b, 0x8a967f80, 0xc05c1331,
- 0x81cd4b91, 0xdeb4279d, 0xef59eff2, 0x077a90e1, 0x7cd60ae2, 0xbd79769d,
- 0x1af521dd, 0x7ed60b62, 0x374ced2d, 0xe43d7ce2, 0xed1fb1b6, 0x1e3d70cd,
- 0xa3ccebc3, 0xf1a71c75, 0x96a243a3, 0xd7120fcf, 0xf1df58fc, 0xcb8f52da,
- 0xee72cc5e, 0x6e59a778, 0x514df09c, 0xd09dc623, 0xd68e442b, 0x6837e1fc,
- 0x9a275efd, 0x7bd0dfb8, 0xf26837f2, 0xf93de869, 0xf26886e7, 0x01488a65,
- 0x3c2f1a9f, 0xbded2e12, 0xd92ef183, 0x2f46926f, 0x8c556e69, 0x9f083d06,
- 0x55f1cc15, 0x8b9077bc, 0xbf9cfd71, 0xfd38045e, 0x3f7208a1, 0x22918993,
- 0x6c2123ff, 0x79039df2, 0xf55ab33f, 0x2ce427fd, 0xc2e65efd, 0xd8abdf70,
- 0x4e44af77, 0x28f78f99, 0x4b8c15c6, 0xe303bb47, 0x1c9f2cdd, 0xbcb1a470,
- 0x8b657d84, 0x6f7fdaeb, 0x6e92f9d0, 0x9106f795, 0x3a60dee2, 0xe9d7966d,
- 0x86fe41fc, 0xed0215ac, 0x71cdb826, 0x1ffb7a38, 0x6b0ac6ba, 0xfdfccf41,
- 0xfa7bb987, 0x66f3aec3, 0xb0f1a0ec, 0x7b19bf83, 0xf7cc78c5, 0x163ed8cd,
- 0x39d98699, 0xbd3179cd, 0xa74e793b, 0x26f1aee7, 0xcf7c6b08, 0x1af1d678,
- 0xcf1d4f8a, 0x2f5e02ff, 0x2fd78774, 0x37290b06, 0xa23e4a47, 0xac2ab7e3,
- 0x1f671ef3, 0xa5445c63, 0x33e9e243, 0xb87b8c80, 0x3cf15775, 0xfd17a93e,
- 0x7545ef81, 0xd43cb8a3, 0xc736250e, 0xc77f5e70, 0xef83ccec, 0x7c122746,
- 0xea7c39d7, 0xd9cbc250, 0x492660fc, 0x0d93e230, 0xfaef7c0d, 0x07aab3dc,
- 0xeaa57ff2, 0x4f01eb5e, 0x3f655379, 0x95e6f8a0, 0xc509f864, 0xa3e3eee6,
- 0xaf121d2b, 0x32b3feba, 0xa7f788ad, 0xca241570, 0x3de4d5f7, 0xe81e5d72,
- 0x8be3261f, 0x3dccfbf5, 0x232307c9, 0x1feb1f24, 0xbeaef926, 0xf38ed49e,
- 0x48cfd449, 0xf74953e2, 0xb03eb045, 0x1a4de5db, 0x78ca7f0a, 0x75a79261,
- 0x97a3bfbd, 0x5af03f7c, 0x68d0f991, 0x643f8fca, 0xef2a0113, 0xf14c2f18,
- 0x946f8564, 0xfa16bbf2, 0x44b098fd, 0xfc0cafd2, 0x611ddc20, 0x32346f7a,
- 0x8f94ce2b, 0xd533f4f2, 0x6a95198f, 0x0ef58f2a, 0x4fc79531, 0x7be537cc,
- 0xaa655d17, 0x58f667df, 0xefafbe53, 0xa4fd5306, 0xbca669f2, 0xe49297ce,
- 0x76a03127, 0xc0fda9ae, 0xfd5312ba, 0xf9857f14, 0xf7bf54fd, 0x9e025648,
- 0x697f9477, 0xbc87966b, 0x96116aaf, 0x90ef5d91, 0xde5a3afc, 0x5ce89907,
- 0x6491efc4, 0x0cccfd34, 0x8a8dfa8f, 0x46292673, 0xae24f7e1, 0x60f99b73,
- 0x3f39a261, 0x2af795de, 0x2b9c6e96, 0xa828fe34, 0x6a929e80, 0xf3efa6ad,
- 0x347c6a86, 0x9eb2e457, 0xefeb5dfe, 0xff5a621b, 0x4e5d7c50, 0x05e7cf0f,
- 0x6f9ed2dd, 0xf9ec179c, 0x9ec0bc46, 0x9ec3cc6f, 0x7b0fac6f, 0xf61cb1be,
- 0xc179637c, 0x8c1d3321, 0x83a66238, 0xe99a8e2b, 0xd0cfa740, 0xd5b2baf5,
- 0xa7c21f4c, 0xb07f94f8, 0x6a7f575e, 0xf5d09fa6, 0xd78b0921, 0x1153826f,
- 0xfbfca48f, 0x642eb5eb, 0x87d73abd, 0xf6f90df0, 0xbe50e0da, 0x48785b27,
- 0xe4b77815, 0xd79e6d51, 0x27e5ad0d, 0x7d5bb6c3, 0xae7de6bf, 0x5f4355d1,
- 0x871d5749, 0x1f070f9e, 0xffa4347d, 0x02f9fbd7, 0xfe3b3fa8, 0xf41db262,
- 0x45df2ed1, 0xee1f036b, 0x49b715dd, 0x77bcba7a, 0xe39533f2, 0x9a598e18,
- 0x82ebf014, 0xf036df18, 0x6bb39bcb, 0x1367f815, 0x458ed145, 0x8ff9cb88,
- 0x17e464b0, 0x77f7940c, 0xdf2b7645, 0xe2f9d07f, 0x17192482, 0xf35432d8,
- 0xdc647af3, 0xa543f3ce, 0xe3ca24dd, 0xe2e338e2, 0x83bf796c, 0x05e499d4,
- 0xe1c7c039, 0x47c041f0, 0x69df85b3, 0x39b9b56c, 0x86ffc46e, 0x83f7e02a,
- 0x72f90b1c, 0x35df2680, 0x0070e47c, 0x85eb41fe, 0x1c1d9ce9, 0x5be29870,
- 0x99736e87, 0x892d39f2, 0x76c3faa6, 0x79e54dbb, 0x79532cc1, 0x298f21c1,
- 0x28c8e23f, 0x8e2bfd53, 0x2bf94d7a, 0xea9a275b, 0x9169fd5f, 0xda249f29,
- 0x077e061f, 0x3e8f522a, 0xe2553433, 0x7e382dc9, 0xb19dc16f, 0xa6a7def2,
- 0xba72eb5f, 0xd3ecf71e, 0xde5439e8, 0x71f7681f, 0x5f43751c, 0x187be1e1,
- 0x129f4d0e, 0xa7c03d66, 0xde772dc5, 0xe39bf0a3, 0xe578f596, 0xd3417d4e,
- 0xdd39740f, 0x35ecaf10, 0x082f2bc6, 0xe03dfeeb, 0x3dd6d2ff, 0x7681fe14,
- 0x9f86df39, 0x88def9da, 0x3c28570e, 0xda38b533, 0xefee8e2d, 0x25b2d8f5,
- 0x1465259d, 0xfef41dfd, 0xcad3cd98, 0x365fdd6f, 0x4fd5a79e, 0x0bf0d5f4,
- 0xfba567bb, 0x305bc7bb, 0xbe67ce4c, 0xd4f372c5, 0x32cf8e6c, 0x3bdf83bd,
- 0x51fdea5b, 0xf3b74ab9, 0xe323105b, 0xdd8c44a7, 0x79edfa0d, 0x44cf893c,
- 0xfbe468bb, 0xff9c5a47, 0xee5b6b4d, 0xf3454419, 0x7bda4ded, 0xe81b0160,
- 0x9258d261, 0xd884f2c3, 0xb35765b6, 0xc7a2e493, 0x3f6cacea, 0x35ddbfe9,
- 0xceb8248b, 0x49031bfb, 0x407d01ef, 0xb4be81b6, 0x71e54721, 0x1b9ec1ae,
- 0x4df6fd81, 0xfb3d9c67, 0xced08405, 0x754fa90b, 0x874c6f43, 0xaffe3cfa,
- 0xf143ede3, 0x42adeba4, 0xd74c338b, 0xd7f1d2e0, 0xf0d07769, 0x707fdfa7,
- 0x6d15b32f, 0x8a3df272, 0xd2db8889, 0x4725afd7, 0x86ad65d2, 0x3f03d40f,
- 0xa15ea9ea, 0xfb031d9a, 0xc7708772, 0x43a1bab8, 0xc2bffd3d, 0x9eff8e8b,
- 0xc4f557ee, 0xc2f9421f, 0xeab7dd77, 0xd9ff4e89, 0x3e06577b, 0xb468e321,
- 0x5ba40975, 0x677d3fd8, 0x6f3f7994, 0x8d7696cc, 0xa2beda71, 0x6799ddf3,
- 0xf60217e6, 0xaa5586f1, 0x39e42fc6, 0xc4529c5e, 0x3a8b11ef, 0xe533cb17,
- 0xcfd62f1a, 0xc80f85d7, 0xf5aee0f7, 0x455c2516, 0x9b6692f9, 0x15a048d8,
- 0xf38fbbfb, 0x1b93bf66, 0x449efe71, 0x3726cdc0, 0x49b4b71a, 0xb94fec09,
- 0xc5ecbdfc, 0x5c7ef94e, 0x8bd9bbe8, 0x2f17fd35, 0x86df602a, 0xcfee65fa,
- 0x8f6bced6, 0xe12b7cf0, 0x0ba5bd67, 0x02de8d7e, 0xe8311df2, 0xf3b4aac3,
- 0x8e969c70, 0x24fc3737, 0xc2795df1, 0xce16446f, 0xb76b46f3, 0xf89d7471,
- 0x9d2afdd5, 0x1cc3c27e, 0x171a1faf, 0x4affd670, 0x1a7e7774, 0xd3d37fdd,
- 0xdff4e83e, 0xa85f877c, 0x1e8969c3, 0xff043e05, 0x57e8dbbf, 0x8248f3f9,
- 0x3127fd90, 0x6ab48ff0, 0xb98d89fd, 0xf44fdddd, 0xf1c5d8ad, 0xbe2cab10,
- 0xf25c40ff, 0xef1fea19, 0x3ce5deb0, 0xf8fb8090, 0xd3bb3493, 0xe4d6f3e5,
- 0x7ef2d1b0, 0xe55f8740, 0x2898eef5, 0x1f1373ef, 0xba77d815, 0x08aa7f1f,
- 0xfbaa42f6, 0x466b7c49, 0x3f75fe31, 0x14707149, 0x61b339c3, 0x484f0db1,
- 0x7b7c3eb6, 0xcbbb8461, 0x403ad27f, 0xe4f4e7c6, 0xc97e9947, 0xb8cdfdc3,
- 0x73f71574, 0xca453b2c, 0x2cf78d6b, 0xd90d9d03, 0x67986c26, 0xdffbadab,
- 0x23929819, 0xf852d1f7, 0xe21b7bd2, 0xac28567e, 0x84572c07, 0x7cc4071e,
- 0x6c5fcf5e, 0x3285f2d2, 0x992acb7f, 0xc5ded56f, 0x3bd9aada, 0x98a44f30,
- 0xedf0f40f, 0xf3043d9a, 0xe0798a40, 0xf21af83b, 0x83c86be0, 0xbe0f21af,
- 0x0d7c1486, 0x51444bdf, 0xfd2a9e72, 0xfb0de33d, 0xf19efe03, 0xfe09b906,
- 0xfe1e631e, 0xf87d631e, 0xf0e58c7b, 0xe1cb18f7, 0xe1e631ef, 0x87d631ef,
- 0x8798c7bf, 0x1f58c7bf, 0x1e631efe, 0x7d631efe, 0xe58c7bf8, 0xcb18f7f0,
- 0xe631efe1, 0xd631efe1, 0x58c7bf87, 0x629d737e, 0xacdd07f2, 0xba503bbd,
- 0xa3e98e3e, 0x2928b539, 0x3ff7d687, 0xc7e7ff23, 0xf3ac54b6, 0x6efc25de,
- 0x47845560, 0x44d373ae, 0x2116eeb9, 0x0e7db9d7, 0x76edf3af, 0xf1942f99,
- 0x03f4a1c2, 0x74f8cabf, 0x0a4157e9, 0xf8520abf, 0xafc29055, 0xfd2ade32,
- 0x57e1482a, 0x55f877c1, 0x82afc290, 0xa4157e14, 0x8520abf0, 0xfc29055f,
- 0xbf07682a, 0x55f8520a, 0x157e1df0, 0xe0abf0a4, 0x038231fb, 0x2e1d15fe,
- 0x26e9f9c8, 0xd0e8943d, 0x4cba87a4, 0xc6f9c879, 0x8df390fa, 0x8df390e5,
- 0x8df390e5, 0xc6f9c879, 0x8df390fa, 0x88f11bf9, 0xef296f71, 0xde41db1b,
- 0x9a73e637, 0x86c1affc, 0xf9c37935, 0x46b69157, 0x2e298f29, 0x9e72eb92,
- 0x58ff059a, 0x1c92b86b, 0x763921eb, 0x8d676fc5, 0x89f892bf, 0x2b976f16,
- 0x6f582dda, 0xbad37bf6, 0xe38282f9, 0x3bfe7383, 0x7f75cb91, 0xbad1ff2b,
- 0xac14ede7, 0x90cb6c37, 0x3dede970, 0xd522a5c2, 0xbbe577eb, 0xbe3a17af,
- 0xa5f98f5e, 0x28520e01, 0x910710f3, 0x1f33bb77, 0x2d5bedf1, 0x5d0b8c8a,
- 0x4372e329, 0xe349aae9, 0x3b2d5b4b, 0xbb50440e, 0x83c562f6, 0x5699077b,
- 0x0f96c871, 0x43e6d53c, 0x89cba89e, 0x6b83e2d5, 0x903c42af, 0x7f6eb70e,
- 0xf7c13e24, 0x0bb746dd, 0x738d197b, 0x8ec217cd, 0x64985bdf, 0xda04f297,
- 0xea1cdf41, 0x0c9bbc57, 0x4732bdf4, 0xff3e7b9e, 0xe8caabb2, 0xb5edd0fb,
- 0xdd1ee157, 0xeed908a4, 0x33478f37, 0x2a34a71e, 0xc9fef09b, 0x8bc2ede8,
- 0xd4bfb1fb, 0xddba1ee0, 0x71e32f65, 0x4eff7c5d, 0x4378b7ef, 0x2c17df32,
- 0x13e3ad16, 0xf0a127d9, 0xde3ae6fb, 0x62af7e68, 0x0fe2c47e, 0x2ffc8ec1,
- 0x7121c3df, 0xc3dcfbf1, 0x6c7866b9, 0xc4dbe221, 0xe40ef95e, 0xabc5e1e4,
- 0xbe5c137a, 0xf2077c80, 0x5f6a2f9a, 0xf966fce3, 0xcf93240e, 0xcf8f8648,
- 0x857e41c3, 0xe298ffdf, 0xe5e968df, 0x15e0bdc6, 0xdfd404b6, 0x0b503c2e,
- 0xaf790906, 0xb6579f55, 0x57096238, 0x95e22f40, 0x11fd0378, 0x7a0198e7,
- 0xf8d8f3c9, 0x56a9907d, 0x46fc0d97, 0x5d8a7397, 0x7542ec72, 0xf7df0c74,
- 0x2e13910b, 0xb8e6f636, 0x8e3d7f8d, 0x93ef83a4, 0xce15df2f, 0xf88f8572,
- 0xb9e6f807, 0x5977193c, 0x91c3ecb9, 0x38aed496, 0x8129d392, 0xf3df78ad,
- 0xe88eea74, 0x11cb0a51, 0xdb5fc087, 0x7be79f1a, 0x5eabc582, 0x1710fbe3,
- 0x96881f1b, 0x7fa4aaf7, 0x9bc34ca5, 0xa85a6f6e, 0x16994fad, 0x5ebc743f,
- 0xe74bcfbe, 0x71f7bf30, 0x72c6d1b1, 0x28bca846, 0x880feb84, 0xb46f2b73,
- 0xc5d3ddf1, 0x27578b6b, 0x777eee8a, 0xcfe32bc5, 0x349771b5, 0xa84e38da,
- 0x2b477b5c, 0x965877f4, 0xd2e8726a, 0x27efe66d, 0x4fe44272, 0xad44fdfd,
- 0xaf5bf14e, 0xa83964cc, 0xee71c6d6, 0xb94857f3, 0x0110ec48, 0xbf9867dc,
- 0xef059c62, 0xe5dfcc58, 0xd0dd28f5, 0x7c9a3bd1, 0x12830934, 0x8a5ea7de,
- 0xed32be60, 0xcfce3f52, 0xe9c894ba, 0xe4b2978d, 0xd11dac77, 0xbca16b4b,
- 0x26beff5f, 0x1c44cd17, 0xb2e40165, 0x1dd74c3e, 0x2218d744, 0x4357e357,
- 0x302bcabc, 0xf9b24fc3, 0x734e2156, 0xf7c146d2, 0x7e432691, 0x90d982a2,
- 0x2fb00aa7, 0x3ef90a52, 0xef8544c6, 0xbe1e4cb7, 0xb407db4c, 0x11f3042f,
- 0x37c3f77e, 0xf1006a7e, 0x3b7db337, 0x7d77582c, 0x6dfcf83f, 0x64e60efc,
- 0x7bc762bf, 0xcf4d3e81, 0x743de9d6, 0xd4882c5f, 0x694e63ec, 0x8fb4eaff,
- 0x9c7e5801, 0xc446f227, 0x2d802eb3, 0xb0b69fb3, 0x6ca8aa84, 0x1b9c9e59,
- 0x94236379, 0x6ed37fac, 0x9162df32, 0xfe489ff7, 0x717578e3, 0xe50fb0a4,
- 0xf92ad78b, 0xe1d79788, 0xd8d87afa, 0x777ac349, 0xe74941d1, 0x03eeccfe,
- 0x5df960bd, 0x4cff2619, 0xf84a5a68, 0x105a3ec6, 0xfc63fad3, 0x5c84d0d2,
- 0x889f1de1, 0xf9f5c877, 0x02e2208a, 0xdbf96ae7, 0x8ff2bd9e, 0x278c9c6b,
- 0xedcd8fce, 0x3fe22377, 0x38fe65fe, 0x7f1ab15f, 0xf14daa37, 0x3de57379,
- 0xdf5809c8, 0x7e76ecc8, 0xa66660c7, 0x212f9e4b, 0x72db9676, 0x845df2dd,
- 0x694b7b10, 0x846f3cb6, 0x8f3c9dd5, 0xb66e3afd, 0x3ef3ea57, 0x33df336e,
- 0x5cead3d5, 0xbff83769, 0xa0f127ba, 0x7ffbde45, 0xf1d812c4, 0xa06bd122,
- 0xe6016fb7, 0xbde208fb, 0xcf1f7e19, 0xa3477f4f, 0xef694f93, 0x74f4e865,
- 0xdf22eeac, 0xda712897, 0x9e81aadd, 0x7c28d6ca, 0x4ea4b11f, 0xadf627de,
- 0xfcbde451, 0x8f57ec1e, 0xdf9af9cf, 0x17fc7a51, 0xc350b211, 0x91fb7cef,
- 0x5d291cd7, 0xdd53fdf8, 0x7f926caf, 0x6f103306, 0x9c36b73f, 0x7afe41df,
- 0x018fd158, 0x87b6647f, 0xf11269d7, 0x1a9fd434, 0xbe29fbe3, 0x07238897,
- 0xf05c6f61, 0x7df8b7f3, 0x29bd9ae1, 0xf161eefc, 0x78d5bcee, 0x61ebe547,
- 0x89723bbe, 0x2e9d8b7e, 0xea90e43b, 0x973e58c6, 0xeed1f417, 0xf6768ba9,
- 0xf3ebe9e7, 0x0794ae60, 0x9c723c8a, 0x453e9a0b, 0x354db308, 0x7fe793d5,
- 0xedd43ee8, 0x95f6a1a2, 0xbe2af7d0, 0x7b029ecb, 0x5760dd89, 0xb631edd5,
- 0x7073e3ac, 0x77b7cf9e, 0xaf8dbbff, 0x4d4776eb, 0xa9d85d9f, 0xe84f66cf,
- 0x9d7fb903, 0xbd1d82e2, 0x11bee9cc, 0x8765f5f1, 0x14f0886c, 0xf2dce293,
- 0xe4bb92dd, 0xb72525bb, 0x9b3f31e6, 0xc7c9358f, 0xbc5187bd, 0x9402cb8f,
- 0xdfbcf7c6, 0x657bade7, 0x53d01ec0, 0xe0f7cbd0, 0x7b5e5ccb, 0x2a2e419c,
- 0xef4c526d, 0x17f9e818, 0x721da573, 0xc019a93e, 0xa6b74ddf, 0xd086f1d8,
- 0x108f127c, 0x46baf837, 0xaa3b7a27, 0x932571c7, 0xf941c552, 0x9dc114ae,
- 0xfe33c722, 0x9349514e, 0xaa063df9, 0xfedd72cf, 0xafee28dc, 0x4ef345d8,
- 0x3862ea41, 0xa78c0f09, 0x3fa87bf6, 0xebc38f37, 0xd80c1a51, 0x8f983760,
- 0xcfca1678, 0x186c0575, 0xbe51a4a7, 0x2b5be28f, 0x89adf1c7, 0x2e40e7b5,
- 0x03ecbab0, 0xa360f28c, 0xbe78cf7c, 0x0411fbf6, 0x65cbcb2f, 0xf3dc2784,
- 0x867defd2, 0x8358abe2, 0xd629acbf, 0xad365f2c, 0xe67d61b3, 0xc35ef0f4,
- 0x971d1a97, 0xfbe18e34, 0x2f5665f5, 0x69f097df, 0xb2ebfef8, 0x27e1bbf0,
- 0x90fcd399, 0x8d79052d, 0x3c097d2f, 0x829e06f7, 0xd23efc19, 0x678e70b2,
- 0xe26ae39e, 0xfbe276f9, 0xe40fc201, 0xe3115715, 0x8fc4e89b, 0x66b2c038,
- 0xc176faf3, 0x65df2513, 0xfde62e6f, 0xb3eac8b7, 0xac708cbb, 0xfb819fdf,
- 0xdf68735b, 0xe127fde3, 0xcc7df1f2, 0xc8dc0f53, 0x3806fbbf, 0x8f28e781,
- 0x3bde027b, 0xba61ba22, 0x4167e9d6, 0xfacfdc81, 0xacc35178, 0xec3f0ed2,
- 0xf7a9f6bb, 0x06f3e420, 0xffd86f5a, 0x76a7da7b, 0x71a4005c, 0xddf265ed,
- 0xab7dd0ba, 0x9f7d57ef, 0xdf56fbea, 0x8ad98fcf, 0x174a8982, 0xbdf3a1df,
- 0xf06eb9f6, 0x77834934, 0x84aeb6ae, 0xc18557d7, 0x986237ae, 0x53f3dfdc,
- 0xfd1ffbcd, 0xbd54dfa1, 0x03306c7c, 0xfaf53df7, 0xe3f3b8fa, 0x03ec3b64,
- 0x30ea5bd0, 0xe5fd674b, 0xe262df5c, 0xfc9da51f, 0x04aafb63, 0x0a1d8ff0,
- 0xdf96e1fb, 0xe5bee523, 0xe17bc8cf, 0xf8bf9667, 0x77aa2cf6, 0x0b8dc1fa,
- 0xca2ff78c, 0x2c5f9282, 0xfc0b7924, 0x0f269163, 0xe5f4cc6c, 0xe7ec330d,
- 0xc99ffbb2, 0xdb98fec4, 0x6f0a7792, 0xf9bcd1a8, 0x3f3fa3bd, 0x5787d2f3,
- 0x5dcebbb9, 0x6b251e7d, 0x3b15b5de, 0xf493788d, 0x0756777e, 0x38a55b9f,
- 0x6953e8b7, 0x61f33163, 0x8f7a4891, 0xa1eeb5d2, 0xa8dc50f4, 0x5a579cb0,
- 0xfea03237, 0xd2ebf22f, 0x8df953b5, 0xf4bd7d28, 0x73f45f77, 0xa67f8757,
- 0x35734eb6, 0x4c3d9e7a, 0xe5b9c3ee, 0xbc45f629, 0xf8ec53ff, 0x5a39cdbd,
- 0xf3329cf9, 0x72f5cef7, 0xd5fbf275, 0x9fb827d8, 0x58eaf584, 0xcbf6936b,
- 0x57ad97ed, 0x1d02e432, 0x9d9ac58e, 0x6f66a172, 0xcbc9a45c, 0x8d8b5eb4,
- 0x71f8b5eb, 0xcfa55eb9, 0xbc17f5be, 0xd3f34653, 0xef93a3de, 0xecb4940a,
- 0xb39b75e0, 0x852d9d66, 0x5864fdea, 0xa8fe298a, 0x05cf36b4, 0x35dcfd3b,
- 0x07e06d1b, 0xf126193f, 0xf3f74a16, 0x1d62f8b5, 0x148f38af, 0xaf473f83,
- 0x83b83e0f, 0x5975b9c6, 0x77f1b478, 0x3d1a02ed, 0xc6c6d697, 0xdad2bc61,
- 0x7fa17be2, 0xe840e7e9, 0x2e113bfe, 0xf50945f7, 0x6279fd87, 0x55ef878c,
- 0x377ed7cf, 0x43feb42f, 0xdafddd1a, 0x4cbfdf26, 0x92796c8a, 0x226a3be3,
- 0xe913e03c, 0x8c01ade9, 0xdc067a0b, 0x2f7c8b76, 0xf5b2bd33, 0xa241c073,
- 0x20f5c73c, 0x105b7bc5, 0xbf2813ed, 0x79ef22c9, 0xdd32aaa5, 0xa607e1a7,
- 0x5e8b9437, 0x5f1efcad, 0x2151bbe7, 0x9b176af4, 0x1fdde0d7, 0xefc12f7b,
- 0xa9f4e9a6, 0xa7d3a3f7, 0xcd3dbf4e, 0x9cf7d76f, 0xcd1489df, 0x49e592ce,
- 0x0bb8d076, 0xbfc41bee, 0xf35df16b, 0x35ef6bb4, 0x4c37ce76, 0x65c7defe,
- 0xfa974df3, 0x6df3cf52, 0x66e84c2e, 0xd4f3a1be, 0x431e89f7, 0x694fe9df,
- 0x1394fe92, 0xd4639d0a, 0x70b2c4ee, 0xe7d3ab8e, 0xbff5dba5, 0x8f87bdef,
- 0xf669c586, 0xc828d6aa, 0xdfb5f397, 0x99411151, 0x86a9f7c7, 0xaff9faef,
- 0xf3a75fbf, 0xdab593e9, 0xef5a28f9, 0x75167ec1, 0xf50f7c69, 0x2889d358,
- 0x627313bc, 0x6f4f848c, 0xde4dab77, 0x76511673, 0xbbcfde37, 0xfe3cd6e9,
- 0x8d8594bd, 0xbd9f587d, 0xf98d2ea9, 0x587e4dc2, 0x1ea74517, 0x83e348b0,
- 0xaf16b791, 0xd7f502be, 0xc2eff0a7, 0x4e7a742d, 0x43f6e7ad, 0x2313903d,
- 0x53daa79b, 0xfa961e59, 0xbb17ee82, 0x7c451155, 0x127c0d4f, 0x3fb2cbfb,
- 0xe2cfe71d, 0x983f52f5, 0xe06b1164, 0x50d81d3f, 0xf82fa134, 0xfc8ac3c4,
- 0xe60d9a3d, 0x0e86bcf7, 0x7c02fa2a, 0xbc5df426, 0x5e2d6d5b, 0xe482a7e8,
- 0x16d1d80e, 0x9e588b3a, 0x93e345bf, 0xf8db9ac7, 0xaf1d7ebd, 0x78bfe08f,
- 0x7ec42aa7, 0x1a276a14, 0xff07d9a8, 0x7b8faf1f, 0x0080003d, 0x00000000,
- 0x00088b1f, 0x00000000, 0x7cb5ff00, 0x65545c0b, 0xce7bf8da, 0xc0cc2b99,
- 0x1245c880, 0x85848b87, 0xd780c034, 0x508151da, 0xb9bba0bb, 0x658e21ba,
- 0x500665ca, 0xd7775ddb, 0xa6a18cfe, 0x45fa7d66, 0x80ed65a6, 0x76c36a97,
- 0xa8283448, 0x59990bc9, 0x6a37627f, 0x5b63b92f, 0xba402de6, 0x6dbfedfc,
- 0xbcf3cf7d, 0x511730e7, 0x6fbefddb, 0x797bf9fc, 0x9f5ef3df, 0xcfbcf3fb,
- 0x8c346339, 0x63181b75, 0x418e8b2a, 0x89486839, 0x8d8c9964, 0x615f6909,
- 0xf6c3894c, 0xc645db25, 0x7cfb4046, 0x1962c893, 0xfa31d72b, 0x1fbf8f7d,
- 0x3c1f7631, 0x42f3c337, 0x07d634a9, 0xcc37d7fd, 0xd6c972a0, 0xf1ae6dc2,
- 0x2509258c, 0x606393b1, 0xb66ae3c0, 0xce258a07, 0x2b307719, 0x8da4d3cc,
- 0x73c325d2, 0xc7292bb5, 0x496f9fe0, 0x0c4943e3, 0xd4699dc6, 0x7b4377cf,
- 0x414e6981, 0xba5f8c14, 0x325b2a33, 0x6f5dfbfb, 0xc91b1811, 0x4673a558,
- 0xcc614b1c, 0x67e1ddf1, 0x1fb0a94c, 0xf304d398, 0x7709e57f, 0xa38ba0bb,
- 0x82492dae, 0xb3aa3c23, 0x7fa058a7, 0x6f31d895, 0x4e73cc12, 0xa04def70,
- 0x5338e6fe, 0xe5a1fac0, 0xccc63ae9, 0xff398ce9, 0xcf3487cf, 0x3b89e2e7,
- 0x8778c016, 0xce047f73, 0x1ff8f553, 0x6c3201f2, 0xb2cf689d, 0x8dbce1e4,
- 0x1c124d7b, 0x56637b74, 0xbee39c09, 0xda46c7c7, 0xf79f2f33, 0xa8b668b6,
- 0xdbcbda04, 0x6957c118, 0xd48ee85f, 0x5eeddc20, 0x696131a6, 0x28689a62,
- 0x956c48cf, 0x52dbca07, 0x06b9a2d8, 0xe10dbb7f, 0x899eeb00, 0x025492dc,
- 0x9f7b15ed, 0x79433248, 0xa5ebc8d6, 0x9c7a7f7b, 0xddff4045, 0xd5e20d5a,
- 0x0b1a62ae, 0x25d7bb8c, 0xb250dcd8, 0x12c668f2, 0x7d6ea310, 0x59b19199,
- 0xaf9a7096, 0x630e7b62, 0xc17dfeb9, 0xab3f6a73, 0x8fb8c562, 0xd903f531,
- 0x8bfca1cb, 0xe21f7bca, 0x7ab52ff3, 0xf1192b8b, 0xfcbc2662, 0x84548b65,
- 0x05fbaeed, 0xfac05636, 0xac1a637e, 0x6ac9165f, 0xf4a17c71, 0xcf049b57,
- 0xb2101c57, 0x787b30b5, 0x1ee6b766, 0xd1169103, 0xd389b559, 0x75768ad9,
- 0x7fe02251, 0x2d408b45, 0x3357950e, 0x2f2abe1c, 0x80e6664d, 0xb6b656fd,
- 0x8eb0cc68, 0x09ce19a3, 0x539e1dfd, 0x5e78a59a, 0x7cb185b6, 0x89fe27c0,
- 0xa53e6bf8, 0x3f0037b9, 0x2e1cadd5, 0xbede56ce, 0x3b900338, 0x1362b699,
- 0x26dea0d2, 0x6b3ebd50, 0xaa35fcc2, 0xfcfe7ac0, 0x9a586935, 0x354c4e08,
- 0xf0025490, 0xa3d194dc, 0xd73bfc41, 0x5d42f3ca, 0x0d5eb01d, 0xde48e512,
- 0xf1c06a9e, 0x3caf1a66, 0xd146b677, 0x6c33af78, 0x3609bb03, 0x61506d51,
- 0x4434ef59, 0x56b3b960, 0x715950cc, 0x09166173, 0x1611dfe0, 0x6122c591,
- 0x3d5eaaca, 0x3b501240, 0xf6845870, 0x83fb48dc, 0x0c719f48, 0x33e802b8,
- 0x06057991, 0xf9dfdff4, 0x7fce2e59, 0x17df18cb, 0x5eaeb60c, 0x017e7d33,
- 0x8b26c3d0, 0x5f4c8e7c, 0xc8f1d22e, 0xb0af4043, 0x6cd5a7bf, 0xcf073e83,
- 0x42e6c257, 0x08c2cc3b, 0xb984dfcf, 0xafdfc0f7, 0xce226f31, 0x54de74cf,
- 0xcc9af71c, 0xc35a25a7, 0xf5f60106, 0x63fb149b, 0x053b8fb8, 0x116cf8f5,
- 0xc58d2071, 0xfe6afd7e, 0xc2796d9c, 0x4ffc03a6, 0xa367e8e7, 0x4062e645,
- 0xf1519aa2, 0xc163a406, 0x60ab6366, 0x0f3307fd, 0x2e80cbdd, 0x31d01e1e,
- 0x5eaf9c2d, 0x7d3dcfcf, 0x0094ec20, 0x2a8f46fd, 0xf4165916, 0x2958ab37,
- 0x29943ff4, 0x9fd8557a, 0x9fd8dce9, 0xbe6d0ae9, 0x8196590c, 0x7a88d0fc,
- 0x78eff51b, 0xca011bf3, 0xe1e2e944, 0xe2573848, 0x3d4b052f, 0x3c1b29f4,
- 0x85fff4fd, 0x5352f4b2, 0xfca1efcb, 0x19bda475, 0xb0727751, 0x501d94f8,
- 0xa3c9b0f6, 0xf9f264b3, 0xf1dff702, 0xa2226fab, 0xfabeff45, 0x7bef4e07,
- 0xcea58ad9, 0xe9b8c022, 0x9b57921d, 0xfa7ef975, 0xa9e361e3, 0x39e18fd4,
- 0x5ba20dfb, 0xe1ffb010, 0xd37f710f, 0x14af0675, 0x17d4e381, 0x38ff3a7c,
- 0x751d1e66, 0x284646fa, 0xb1913ef8, 0x8c3cdc58, 0x7068b81f, 0x6fb9c137,
- 0xa14ff991, 0x488b1eaf, 0xf191e9f8, 0xe991c0a4, 0x7e9122b2, 0x5e8ad5eb,
- 0x4cb385f0, 0xe8d67f3f, 0x5e0cff38, 0xff386d12, 0xcdd6b960, 0x8ec18a60,
- 0x5727d254, 0x007fa792, 0xd740ca79, 0xfb6a97bc, 0xf90fc233, 0x78fc074a,
- 0xc02f0c97, 0x25f9c33f, 0x64bbfe79, 0x1065ddb8, 0x127e9c39, 0x6e992702,
- 0xf650ba14, 0x5863982f, 0x9f1fb469, 0x47ddd7ec, 0x8f099323, 0xe383ffe3,
- 0xdfc213ef, 0x01d62737, 0xff51b11b, 0xfc784598, 0x579fc05f, 0xf80bfeb4,
- 0x9fc50eeb, 0xe23d7f57, 0xd2d171ef, 0x0880fc84, 0x26d2a1c8, 0x0658f9c2,
- 0x932ffe23, 0xe09b2cc2, 0xffe4767c, 0x8fef889b, 0xfc2bdf22, 0x37d8a63e,
- 0x49e4f51e, 0x7dc19ec6, 0xb50f44f9, 0x10a766a7, 0xbac75f9f, 0xf01db013,
- 0xf117379c, 0xe876d82e, 0xfb7d2c24, 0x5df5865e, 0x1ecb0615, 0x5bbf6f38,
- 0xfe61a974, 0x8ef72886, 0xc36eb0a5, 0x1716995d, 0xd59633b6, 0x3ca07286,
- 0xbe7a82cc, 0x85cb8a21, 0xa4e90586, 0x277e0f87, 0x73c2ad25, 0x34b3082c,
- 0x6c86ff41, 0xa01323df, 0x6cd630de, 0x57d74171, 0x1dbe9605, 0xd1d1cfc7,
- 0x38730d3a, 0x323c36dd, 0x057a8dda, 0x7af3cff4, 0x1e593fa8, 0x0d9cf0c6,
- 0x6884b68d, 0x2297cba0, 0x61b3a3ec, 0x4e24a3ff, 0xd0c90dd7, 0xa136e50e,
- 0x387dc164, 0x6fbe78df, 0x17b082df, 0xfa2cc2f7, 0xd0591034, 0xb195727e,
- 0x42a5e509, 0x73a57797, 0x75c63779, 0xc9c2076a, 0xd4659d35, 0x937ffcc2,
- 0xbb814be9, 0x556c6360, 0x0afe1fb4, 0x70c8c59d, 0x7a735617, 0xe5974d73,
- 0xd7733562, 0xe65f080b, 0x96f8070b, 0x42414169, 0xadd93469, 0xe5faefb8,
- 0xf7091780, 0x527fe1cf, 0x619ee564, 0x2847e7f8, 0x4c88f32c, 0x328fe404,
- 0x0ca3f9c6, 0x6fe908e9, 0x0051d015, 0x75df31fd, 0x65c3a751, 0x6bf1823a,
- 0x70d3258d, 0x3f85cfbd, 0x89e363e4, 0x87f34a7f, 0x3656675b, 0xf40b0397,
- 0x9fd899f6, 0x51148eed, 0xf130ae5f, 0x7f644ef5, 0xc5f51a36, 0x79bd5fc9,
- 0x23c7961d, 0x86cb6e4a, 0x7c16dbfe, 0xfce2845d, 0x353e096f, 0x0b6fe39e,
- 0xb468fffe, 0xbf56ca43, 0x43fef449, 0x192859f7, 0xaf4e5032, 0x15a67d04,
- 0xb074e4b0, 0x74463ef2, 0x896ac890, 0xfd65e36e, 0x47a30c05, 0x00f37539,
- 0xbcca8ed8, 0x9747fff0, 0xce896c74, 0xa2c69f5f, 0x7433ea82, 0x0bfd4109,
- 0xcf41c94d, 0x171f8d6f, 0x39a67cf4, 0x3b3ea83b, 0xff505263, 0x82d32ddb,
- 0x9c4e77ea, 0x8e7fd419, 0xdd504e6d, 0x1f244cc6, 0x738aea3a, 0xe1bb013f,
- 0xdc92bab5, 0xe397544f, 0xd0f6aa63, 0xe805b37e, 0x5f3bdb24, 0x91ea0935,
- 0x43265687, 0x11cb537d, 0xf43c4f5e, 0x66e03245, 0xf9d1cb73, 0x53dbd02a,
- 0x45f43c6f, 0x6d1fd40a, 0xa7cfe2e9, 0x40a362c0, 0x39bcb6ac, 0x9af805df,
- 0x8adf629b, 0xa9779af8, 0xf98a28f6, 0xbdccd7f7, 0xb942592f, 0x06746730,
- 0xc53727ec, 0xbf3e10fe, 0x759ef62b, 0x1ffb0091, 0x46ab5b7d, 0x5d98bf3c,
- 0xa1e915b2, 0xf3a722ff, 0x91978853, 0x96097eff, 0x8a61d903, 0x3c92bd9c,
- 0x03adf854, 0x1512c6f4, 0xa54749c2, 0xe544ceb7, 0x2a78baa1, 0xd999d48f,
- 0x57638012, 0xc795065d, 0x7ed42cea, 0x95226ebc, 0x546cea27, 0x4c575bbe,
- 0x095d7765, 0xb2ff0a95, 0x6a0d96bd, 0x2a974224, 0x4fceb4ea, 0x63d2578f,
- 0x69d92820, 0xf9d09f05, 0x39adbd4d, 0x9d75ce9b, 0x6959e909, 0x81f9da3f,
- 0x9dfa0459, 0x3d11d56a, 0x9b74354f, 0x475e5675, 0x22fbb8b9, 0xbc118af3,
- 0x5fb02a9f, 0xfd1b278c, 0x030ea998, 0x841b4bf5, 0xefd8597e, 0xf79404cd,
- 0x16356b53, 0x509e63b4, 0xf61e9733, 0x90df146f, 0x20e31d70, 0x05d86cca,
- 0x903ad518, 0x6ba07157, 0x63f439f1, 0x0f77283a, 0x4746f5f2, 0xf9472ed7,
- 0x60ccba34, 0x95f2e7a9, 0x08e24be4, 0x2fee026f, 0xda86a571, 0x198f628d,
- 0x9d5f60e5, 0x69d21677, 0x0ae72c81, 0x30ef5c34, 0x80bde411, 0x576dfb7c,
- 0xedf3da22, 0xd02f9edc, 0xfa44ec96, 0xfe7156ae, 0xf77a3165, 0xd5475c1e,
- 0xf42cf15d, 0xb44d89eb, 0xc568426c, 0x6069641f, 0x14d617b7, 0xeddca267,
- 0xaf885b61, 0x13fd5ec1, 0x338fdbd2, 0x9945df81, 0xf89e6048, 0x4da6f0e2,
- 0xd7adda02, 0xce5d21e7, 0x300f9ad5, 0xf65f07e0, 0xdcfbd100, 0x7a1e7348,
- 0xafecbe0a, 0x7cce3d79, 0x26e78040, 0x00bf3036, 0xe9cf55ea, 0xf0049f4d,
- 0x1513d3a9, 0x4b69af54, 0xc0127d30, 0x07f855a7, 0x09b747fa, 0x0ebb942a,
- 0x04a72b79, 0xb7c405fb, 0x0d73f8b3, 0x0680dfdf, 0xd98bd8ed, 0xb06fa266,
- 0x0fef4997, 0x9bba34cc, 0x2371bf60, 0xa057b36a, 0xce6994fd, 0xea497ec3,
- 0x63f40881, 0x1555bffa, 0x19efc9bb, 0x78c9f888, 0xd33d7d3f, 0x287b5121,
- 0x70e6b5dd, 0xcd26af7f, 0x50f119b0, 0x51d3af1d, 0x88b171d9, 0x92569d91,
- 0x4db97686, 0xd0cbfdc3, 0xb8737bf1, 0xaf5e491e, 0xa240b921, 0x0b6cb608,
- 0xc5dd4a63, 0xf8d63226, 0xbf21b08b, 0x34c9c007, 0xc83f6563, 0x41476235,
- 0x1b7f505a, 0x8e7a1ff6, 0x8ff31cf6, 0xbb49794f, 0x5e4aad63, 0x2965e90f,
- 0x183fa373, 0x72abc72a, 0xc0b965a3, 0x8c74fcf1, 0x9cf111be, 0xe77d0126,
- 0x504cc950, 0xb2338626, 0xd75f1337, 0x773e91bb, 0xa7fd39eb, 0x2ddcbc89,
- 0x309afef2, 0x577a42e6, 0xb94d87fc, 0x79b929f6, 0xe6978f34, 0x91b25a91,
- 0x879813ae, 0x24ec57d6, 0x43d81fa5, 0xaa02127a, 0xd7c47481, 0x161c4954,
- 0xa672279e, 0x448ce515, 0x4fb1b0fe, 0x5f77e403, 0x40aac478, 0x78e355fc,
- 0xe9b8e043, 0xef194e34, 0x4fb2255c, 0x245c9047, 0x89a6723a, 0x4b562fe4,
- 0xfe8088ec, 0xa0d6eb16, 0xf5c6539e, 0x14fc1c82, 0x9047f0f0, 0x087e588b,
- 0xffd710f2, 0x2c43c833, 0x10f20aff, 0x3c824fdb, 0xf207d2c4, 0x063fdb10,
- 0xbce58879, 0xe4568dbb, 0x69b69a9f, 0x7e20d3ec, 0x017ddb50, 0xc369def5,
- 0xa64391d3, 0xe1e4dea3, 0xe1cbaf9f, 0x2def457e, 0xa0fd9f1c, 0x00ffd1bf,
- 0x8a6b5cba, 0xef1eb2be, 0x1ef9b237, 0x7cb6d380, 0x27f4876e, 0x1cd2faf0,
- 0x226dd535, 0xb7e38edb, 0xfaf86be5, 0x3e396229, 0x79f345b7, 0xb245d37b,
- 0x9e4ea69f, 0x9e0724b6, 0x3da162db, 0xbdef5fc7, 0x8db73f81, 0x7a43ede2,
- 0x253e7e56, 0xeff8a4d7, 0x652fdce9, 0xbd012764, 0xff4afcd3, 0x3e7226f7,
- 0x4eefed0c, 0xc3dd8b13, 0x992de3c0, 0xd02f896f, 0x483e469e, 0x6a8be00e,
- 0x4285f133, 0x55fd0a87, 0xa73872e5, 0x073a6569, 0x43f042dd, 0x3a72831d,
- 0x25f64f4e, 0xe0c6c5c0, 0x9905bb3c, 0x01f9425f, 0x0a07d44a, 0xa07c283f,
- 0xf0227bd0, 0x3fed1099, 0xf421cdc7, 0xca8f94aa, 0x358ee8e7, 0xdaf09cfe,
- 0xdfa136a1, 0x87cc3377, 0xc1c513e3, 0xfe46ef77, 0xd19a358c, 0xc87c2cf5,
- 0x1c9c3b50, 0xc2effaeb, 0x570b9143, 0x7065c780, 0x5ff2f0d0, 0x39c90385,
- 0xbf48e394, 0xfdadc8c3, 0x5db2d139, 0x44f7e9ce, 0xd21f6d8e, 0x57a0bf51,
- 0xafd0dfa1, 0xd9dac367, 0x7f39f2db, 0x05547428, 0x89eb103a, 0xcbd4ce78,
- 0x9e729ee5, 0x632e73a1, 0xc2d2ff24, 0x9e287b78, 0xc8057395, 0x0139c3bf,
- 0xb8c61ff1, 0xd81d717d, 0x4d4fe817, 0x3ed335c9, 0x567e47fa, 0xcff995b6,
- 0x6f0e477f, 0x88944a7f, 0x32fd141e, 0x8b482ed4, 0xe90664e6, 0x3f42661d,
- 0xfc4b53af, 0xe67f05ee, 0x3e871825, 0x3fe49b21, 0x71e320bf, 0x1158fe70,
- 0xafbebbed, 0x63d42e0d, 0x4e06a37d, 0x6fbe300a, 0x6436183b, 0x136ed897,
- 0x07deef40, 0xd4f5063a, 0x2c7b9005, 0x7273b19d, 0x73cfa11c, 0x73fa24f1,
- 0xa6243b35, 0x3b4a5003, 0xf4879abe, 0x6d53b4b4, 0xf6d0b8a2, 0x3d218ec7,
- 0xcc156e9e, 0xb5f49768, 0xbfa1f1c2, 0xd6e8c1b3, 0xa822ff43, 0x0665ff63,
- 0xd7ccdb8d, 0x4383f8a3, 0x7d53ef5e, 0x85c60a75, 0x1350ec66, 0x2f3e97dc,
- 0x3b3ed1b9, 0x0aa57dbc, 0x02dda7eb, 0x4571838b, 0x04aeead2, 0x2d3e57a8,
- 0xce30b458, 0x8b3a2861, 0xe7ca82f1, 0xf30bc599, 0xc918b657, 0xa0da5dcf,
- 0x6017d8fe, 0xb7bb945f, 0x29eed06a, 0x5edc19df, 0x0eb83bb9, 0xeff922f8,
- 0x00f68668, 0x8e6dfcf9, 0xcdec8631, 0xed1b2c71, 0xe3c0d64d, 0xbf7c8ab9,
- 0x71756edc, 0xcb82ba39, 0x8cbec66d, 0xea5f7f45, 0x63e92afc, 0x91cfaf03,
- 0xfea0a7eb, 0x2f1c57f9, 0xe5aa5c0a, 0x604f3fb5, 0xb79c0f56, 0x426f53ba,
- 0xfdbabfff, 0xb71811ef, 0x51f309af, 0x22bd9f7c, 0xdbded099, 0x10b926d8,
- 0xfd0ecebe, 0x56e0112e, 0x8d3de07d, 0x5f7088d9, 0xadcaf64e, 0xe31eb5cc,
- 0x64515edc, 0xdd4744dd, 0xb666a714, 0x4f38a581, 0x84debb5f, 0x2bf54076,
- 0x8ddd741f, 0xd30203f5, 0xfb471e23, 0xee964e88, 0x2da58640, 0xfbe426dd,
- 0xbc4244e9, 0x913d9e22, 0x8865af84, 0xe033c477, 0x212cf11d, 0x51bc713e,
- 0x2274e6e3, 0x73f4fd51, 0xde0abf1c, 0x1acda48b, 0xe46caf6c, 0x63b19e78,
- 0xa230b6cf, 0xe9933503, 0x7a7ef080, 0xa6bbc727, 0x14669d73, 0x08636fd2,
- 0x876bb41d, 0x77f700e8, 0xe0841d19, 0x8df002bf, 0x0e942072, 0x93dff142,
- 0xd806e5c3, 0x429fa7df, 0x7e08dbfb, 0x23c7f301, 0x1ef55646, 0xcaf5818d,
- 0x973bcfa2, 0xf8d8fb43, 0x08da5897, 0xe4157f8a, 0xe66bfc62, 0x5d10eb3b,
- 0xc85eb33d, 0x57a97a46, 0x813cf6fb, 0x70a88b71, 0x27f4e38a, 0x32f9d9cf,
- 0xfa20bb9c, 0xd816a49b, 0x5a6bcd7f, 0xc928deba, 0x35ec8715, 0xf129978c,
- 0xc533c074, 0x5a1d9cc9, 0x2d6b171e, 0x73fa05b4, 0x5cfc09fd, 0xb2819a85,
- 0xc017e8e5, 0x608eb87c, 0x555e7ee3, 0x28178f07, 0x1295597e, 0x8e0e6837,
- 0x77a6081b, 0x2e455fa8, 0xc6a55f8f, 0x3b466cdd, 0xb3017ebc, 0x4d5ea587,
- 0xcc78f03f, 0x74dfb2d3, 0x0af1fb45, 0x1bb1427a, 0x6e382b99, 0x65a9c8a1,
- 0x8f1ef913, 0x6ab8e2fe, 0x76fdc604, 0x8a8f840b, 0x704f900e, 0xdab70ade,
- 0xf802166d, 0x19ffee49, 0xedc01bd2, 0xa0f6ab8c, 0x19dd2128, 0x990ebf68,
- 0x8e91ab3e, 0xd695bce0, 0x9cf02237, 0x38ae590e, 0x7f148dda, 0x8a5b64ab,
- 0x612aee90, 0xf430ef3d, 0x0b5cf954, 0xa2d729f9, 0xaccec527, 0xb7ee0d6e,
- 0xce487731, 0xe01eff8c, 0x396401bb, 0x992e7bd7, 0xf12332fe, 0x99906fde,
- 0x879f9123, 0xedc09ff4, 0x5cfe4537, 0x3c581dbf, 0x6e71de60, 0xac05531d,
- 0xa673e37f, 0x4f78faa0, 0x9bff507c, 0xcf41ccda, 0x4119bdb3, 0x598f73cf,
- 0xdd79ea82, 0x4ffa8313, 0x5416d0f8, 0x0e2be49f, 0x4ce53fea, 0x307d5049,
- 0x9573ce13, 0xe35bc1fb, 0x33fea085, 0xf9a0facd, 0x05446767, 0x320d07d5,
- 0x52bb647c, 0xce5f77b1, 0x686ef6e5, 0x76f7c0a9, 0x8a3af04b, 0xc4e77ebf,
- 0xd8e6f5e0, 0xa1fbd782, 0xfa0bd978, 0xc27e0554, 0xb15fa073, 0x13f81dfc,
- 0x9a13f02a, 0xfac09fc1, 0x604fe08b, 0x027f01e9, 0x7f025fdb, 0xe0adeb02,
- 0x20fd604f, 0x6f583ff8, 0xf2a62bab, 0x6a12ba95, 0xbafc16bf, 0xc98f75e4,
- 0xd7971eeb, 0x74e177fd, 0x5def9e42, 0x9c6ebe79, 0xdcffcd2f, 0xbc563c59,
- 0x2d3cfc04, 0x87c6acfa, 0x5f0ac37e, 0x062dc611, 0xc61892de, 0x7ddd99a5,
- 0xf5062eac, 0x338a08d9, 0xb78192b3, 0x7d52ae31, 0xc9b4d520, 0x0fecfa8c,
- 0xbef3e2ef, 0x7c8cc956, 0xefeda879, 0x85cf3811, 0x5f74614b, 0xef107a4a,
- 0x5461bf1b, 0x31cf04df, 0x05b33a0e, 0x37e90804, 0x0bb7e90f, 0x4ddd4a69,
- 0xcd25bdf7, 0x53c41a2d, 0x3c29f215, 0x0ea3cb7f, 0x635cfe7e, 0xf61373d0,
- 0xe54ab287, 0xb9c238a6, 0x2f3e6536, 0xf1a477f5, 0xcf73cf7b, 0x053f5e1d,
- 0xe2cb6ff5, 0x5e7f7811, 0xec5efcd5, 0xc7c157bd, 0xc85f07e7, 0x76cafba4,
- 0x9fe0cf98, 0x96aed9cf, 0x8eff7ceb, 0x68a296b4, 0x02cd1c54, 0x05c50b1b,
- 0x5fee85b6, 0x38f6daaa, 0x65556e50, 0x0066addc, 0x248f7e9f, 0xbdf837c7,
- 0xb8c4c391, 0x22f9e71d, 0x5f57ef02, 0xc3bd1cf7, 0x38edf886, 0xf6eb811c,
- 0xfe414ab7, 0xb72b68d3, 0x355b478f, 0xad0bbf84, 0x5ef02387, 0x30e7a377,
- 0x4bcabb87, 0xd7243fe7, 0x7824a1fb, 0xfe7449b7, 0xd0624b8a, 0x5586763d,
- 0xb66679a2, 0xb9e2358d, 0xbd7c3c7a, 0xb799ab1c, 0x825e2da7, 0xbf1e3ffb,
- 0x7b224f20, 0xca350410, 0xef661bf1, 0xcaa43b41, 0xf386d923, 0x79e40aef,
- 0x4e9cd4bb, 0xdfdb4adf, 0x78f8f285, 0xd26c88f1, 0x699f8a11, 0x916f76e5,
- 0x7a869ec6, 0x3660c7a4, 0x583f9d22, 0xfc446a9c, 0xdb12c21e, 0xec69778b,
- 0xe6c7bc06, 0xba98f5eb, 0x7aee9023, 0xb425735a, 0xf2f99477, 0xe88775e5,
- 0x2e6f087b, 0xbbb953c2, 0xf8f380d1, 0x73dbc7f1, 0x7fea26ac, 0x3cdefddc,
- 0x327aabb4, 0xe92ec9c2, 0xa7f230d2, 0x5db99aab, 0xd891d3dc, 0x924f7818,
- 0xf2fd9563, 0xdaf0910c, 0xfb96a6d7, 0x0587bddd, 0x39d353f5, 0xde0eec2f,
- 0xea7dc98f, 0x1dc7b451, 0xf50c4b4f, 0x1b5d43a2, 0x6cffe78b, 0xcafef067,
- 0xd43b3865, 0x00d8d8de, 0x0fb4757a, 0xe8818df1, 0x13e15dbc, 0xc153e133,
- 0xb7064f63, 0xf9e27ae7, 0x65a3a4bd, 0xe5f5d10f, 0x64eea4f1, 0x7f121ff4,
- 0xe778a3a9, 0xc567fcb5, 0xaff0086e, 0xea8bfa18, 0x313cd4e2, 0xbc1f6fc5,
- 0xb6ab6b96, 0xabbf448e, 0x1ecafc84, 0xac2cf606, 0x3a7b44e9, 0xb61ff292,
- 0xad83bae1, 0x76bfba6a, 0x7d66daea, 0xa0d97602, 0xf6051805, 0x2d53b83d,
- 0x0cd97bdf, 0x952ed768, 0xcccfdaed, 0xf4097cf6, 0x488d73af, 0x866473e7,
- 0x27d31f91, 0x5bcfbdcb, 0xbb24ef92, 0x426498e1, 0x8786687f, 0x668ec1db,
- 0x7b56d76e, 0x2a63ff92, 0x6acdedda, 0x8a60d4cc, 0xed94e21d, 0xddb2d390,
- 0x80cd7a7b, 0x4e9bfc86, 0x8c963d81, 0xbb5b2a79, 0x33b9e112, 0xb3d91673,
- 0xec99a94e, 0x83db6589, 0x6fb007ed, 0xcf55ea82, 0x7217da85, 0xdc13cb7c,
- 0xdb87f8ae, 0xc67643ac, 0x0a67f438, 0xc871a9a5, 0x78c82f0f, 0x959bfc55,
- 0xb7942de3, 0x9e6551b3, 0x1021c4a5, 0xfb9d355e, 0x6edf702b, 0xfc859847,
- 0x10af558d, 0xe4bffb5c, 0x3c7f4c7e, 0x33f47823, 0x36fedf0e, 0x6a35e74e,
- 0x48f98dc1, 0x34b64035, 0xaf704e9f, 0x6bb338c1, 0x13e48230, 0x17c8c563,
- 0x7f70162b, 0x5f3186d5, 0x7e156b28, 0xb7abd108, 0x54527ca8, 0xb3689c80,
- 0x02bf50a6, 0x3ea3b30d, 0x90b4695b, 0xe3a31b7e, 0x6ae17a76, 0xc3d2364f,
- 0xb5c7f018, 0x31f8f101, 0xfb010186, 0xc7807eed, 0xc27895ff, 0x1d49951c,
- 0x191fa015, 0x95fd435b, 0xf0a241f9, 0xdf9ad7f1, 0xfc1bf304, 0x23bc03f3,
- 0x585fde11, 0x93b7a42d, 0xfff728e6, 0xca97c4b5, 0xbd002e79, 0x63181c61,
- 0xc7378834, 0x9fc837ce, 0xf5eeb273, 0x8228ae38, 0xf708a3ef, 0x5e30f583,
- 0xe79651fe, 0xbcbc79b1, 0xf9152e4d, 0x7b6790d5, 0x95f50adf, 0x593ff679,
- 0x8899ae49, 0xb4ca573e, 0x9c4cf602, 0xbbf9186f, 0xf0891de2, 0x03f005fa,
- 0x5ac2fbf2, 0x64169cc1, 0xaf3a712f, 0x3cec4de1, 0x454ef4f3, 0xdd6cb838,
- 0xc6d79819, 0x5a2d7f38, 0x05a737a5, 0xbd2007de, 0x0251ffa3, 0x2fd1f2ff,
- 0x5372bfe3, 0x57946dc5, 0x8cd62ab5, 0xc83517f7, 0xa6d7290f, 0x87ba50ff,
- 0xb7a44edf, 0xa0b5a66a, 0x0b69a97e, 0xa1516e7d, 0x084dfe3d, 0x74c9245f,
- 0xe81768da, 0x5bf1bd7e, 0x95a38f15, 0xbb37140a, 0x4e911a0b, 0xcafd87e9,
- 0x7e71b9a7, 0xfba0b3cc, 0x391886ae, 0xee862bef, 0x9f227ee1, 0xf7952eff,
- 0xa3efe40d, 0xc6324f6a, 0xf93a8e71, 0xe602d53d, 0x52dd7ba1, 0x6f654dd6,
- 0x2f01f578, 0x8d1aee4d, 0x7607f87e, 0x1a91c52d, 0x511936d7, 0xb5b166ce,
- 0x031fbc26, 0xfbee7936, 0x1bdb2bdc, 0x9468f7a1, 0xc7e7959f, 0x92bccaf3,
- 0x83a03cf8, 0xf8a38033, 0x57c7cb9c, 0x02207fbd, 0xfefe7ee1, 0xca7ef3fd,
- 0xb9020fd0, 0xd654f305, 0xbdd064b6, 0x67758b8c, 0xabf1fbe4, 0x1fc153e0,
- 0xf08399b4, 0x0eb9670a, 0x0d9f33e0, 0xd36e63e4, 0xc0a9f0b5, 0xf03d4939,
- 0xe8250463, 0xa1439231, 0x5ee05678, 0x3cd56acd, 0xeefb56fe, 0xb3ffe802,
- 0xed19a2b5, 0x7ef0cbca, 0xf578fc0d, 0x28d791fc, 0x257b3f90, 0x5a78297c,
- 0xff54bcc8, 0xfb8f8a30, 0xf144a170, 0x7c69ffc3, 0x4bd9e435, 0xb7c2aef2,
- 0x257a7ca1, 0x2ab45cbe, 0xf8437f84, 0x2d2f8874, 0x7f002ff0, 0xc41155f6,
- 0x1d7e0a8f, 0x5193f066, 0x2d1875ff, 0x3e39766e, 0xfdc5dfd1, 0xec0ab654,
- 0xf087e149, 0xe3fc8fab, 0xad017c50, 0x541f50d8, 0xa9f305e7, 0xc92bc782,
- 0x7dee452d, 0x4d47bcc4, 0x7067dd02, 0x9685fe3e, 0xfb94e9f2, 0xe2dee50c,
- 0x6fae3cd1, 0xc08fcb42, 0xa27bc16e, 0x233d194d, 0xd5ebded1, 0x7bf0f328,
- 0x74d68fd7, 0xcd7cc68f, 0xb2f1a68f, 0x4cf3c357, 0x8c2dba94, 0x4a3f03c7,
- 0x6e10bad0, 0xa6e3091f, 0x91f01da3, 0x783bcbf0, 0xc6634e3d, 0xe9b882fa,
- 0xaaf7a826, 0x83e1f895, 0x5376fcb2, 0x0982d1f9, 0x604fd405, 0x661e1047,
- 0x3ad09581, 0x0c5d1082, 0xe7e8f9fb, 0x5ef07363, 0x03ff3940, 0xf48e3c79,
- 0x431cf91b, 0x095ff6f1, 0x045f6f14, 0xf784c3aa, 0x289fd302, 0x93f6814e,
- 0xa6cd4ebf, 0xc115cafb, 0x4dfea3eb, 0x56e538a6, 0x4b96e79a, 0x99c5ea03,
- 0xe90d7dfe, 0xbc8b82cd, 0xa0d8c97d, 0xf45b667c, 0xbb27ee38, 0x8f512353,
- 0xf06d7a29, 0x60437ed8, 0x1cacf51c, 0x8faa1e77, 0x5029247b, 0xf7b1be2f,
- 0xa79c74e1, 0xb2cae35c, 0xf98f3018, 0x1fb424a9, 0x943ef7ca, 0xdeed764e,
- 0x9fe8e98d, 0x9c3d3794, 0xb87aa36e, 0x1523fc9d, 0x7df9f73f, 0x0a6d7693,
- 0x9d6fa3b0, 0x52fbedc0, 0x68851bdd, 0xf1bacedf, 0xb71875f3, 0x816bfb1f,
- 0x590b70e2, 0xb7d43af7, 0xaf9c1965, 0xe48e8358, 0xf747cc3f, 0xddc67ba4,
- 0xfee51bbe, 0xabbf8cf1, 0xcdfea097, 0x657cd153, 0x0e39bdcc, 0x041b6fe3,
- 0xfbf9bf8a, 0xd77ba68f, 0x4d056bc5, 0x51ea0c7c, 0xc7fd779e, 0xe38228bc,
- 0xbbb21b3d, 0x356cbdb0, 0x75a59f6f, 0x3ce0f6b7, 0xcf28684f, 0x4a65140a,
- 0xf63dc049, 0x3df8f31f, 0x72cda2dd, 0xcebcb1be, 0x8db16dd8, 0xa3fce781,
- 0xfe567f8c, 0x3b83c901, 0xb6dcbc65, 0xf123bdfa, 0xdaa46f30, 0x2b9467fe,
- 0x3a7e3eef, 0x4159e50d, 0xa44cd9f4, 0xfb1ab1f3, 0xee781593, 0x2bc52d26,
- 0x528959ad, 0xb9cff41c, 0x5863da7f, 0xf8bd5a2e, 0xfb8c0ac9, 0x1f91d76e,
- 0x856feca6, 0x5fd11660, 0xd3f8dc3d, 0x676e107b, 0xa479e71e, 0x8faf1a2b,
- 0xe2fe5abb, 0x1f3c75b3, 0x1d3af9fc, 0x99d42f95, 0xb88e1998, 0xea9e2eaf,
- 0x5333afbf, 0xfa73e6c9, 0xaea6f252, 0xa5ee5f34, 0xb65bca30, 0x1e51d06e,
- 0xf6d6ae7d, 0x6bc01e55, 0xa6ce0dec, 0xc6b9f28d, 0x576cf8c2, 0x83840cf2,
- 0x2ef2e375, 0xe54f6134, 0xedeaff71, 0x64490d9e, 0xf53eaf87, 0xcf3c054c,
- 0x0ef92fcc, 0x5e76ebc7, 0xe1c178a4, 0x10ca87f6, 0x5a4de5de, 0xdcf93e7e,
- 0x8f4f1e67, 0xbce3127b, 0x7fc9a96f, 0xea79d9af, 0xde508d99, 0x26b979bb,
- 0xe87fde90, 0xf795bf79, 0xbabe2d73, 0x75fe1c12, 0x89387abe, 0xf809a7f8,
- 0xe7dff32a, 0x6bd59aab, 0xf3737e08, 0xc3e6c64b, 0x5da8cffe, 0x674fc849,
- 0x744cddc6, 0x0814eaee, 0x571c8afe, 0xf3e6a7c6, 0xd12ae8fb, 0x48ec99b3,
- 0x057f8e59, 0xf7c2239e, 0xd2cff68d, 0xaafe8ed1, 0xcf55c16d, 0x3d0eb07d,
- 0xe99b8c22, 0x9e7d0ca8, 0x62a7b1af, 0xc8be715e, 0xe7d0e7ed, 0x712be7b7,
- 0xc5b71e78, 0xa790109f, 0x458d84e2, 0x13fea346, 0x972061bc, 0x9eb76d9d,
- 0x06ffa155, 0xc1c7e17e, 0x38fb2876, 0x695faf40, 0xab2ad7bf, 0x5f951e71,
- 0x50f84eee, 0x3d00de64, 0xc617c93e, 0x0eeb01bc, 0xa6d02bf9, 0x049efba1,
- 0x37880b92, 0x6bea87cc, 0xf584d71f, 0xc78e1ab3, 0x97e083be, 0x930cb8f1,
- 0x3ce7fdf2, 0x0b3e7edb, 0x79e86ce5, 0x4f395fab, 0x7b68e1f4, 0xe740a2e8,
- 0x9556799f, 0xbff3ed75, 0x167cf7b1, 0xdaacefe2, 0x57fe8f97, 0x8597c69f,
- 0x957ea878, 0x157ff3cb, 0xc57b4a0e, 0x138f0d06, 0x23f2260a, 0x140ba50b,
- 0xc63be807, 0xedd500e3, 0x0e9e48bb, 0x4c78f076, 0x145dd88d, 0xda8bf187,
- 0xcf285d53, 0x5fb9e306, 0x28dbf306, 0xd65521cf, 0xa481aa83, 0x1ed72039,
- 0xee07a219, 0xfe419e0f, 0x1e3ce0d5, 0x6561e507, 0x2bb43385, 0x433f21ce,
- 0xe4aad97b, 0x67e748f5, 0x507323dc, 0xf59dbe3e, 0xdf8a6a8f, 0x926e1ebd,
- 0x30958ec8, 0x4e0878c7, 0x4b70e743, 0x2987d13c, 0xb3387176, 0xd1abf405,
- 0xbf24ef98, 0x9fbf38fa, 0xf381b9ab, 0xf3857b13, 0x1bed7393, 0xf980f89a,
- 0x93f3257e, 0x7ace4f62, 0x97c41ffd, 0x9edae70f, 0x470cbe40, 0xc5f38859,
- 0xb51e5247, 0x72871ef2, 0x90e8691f, 0x8962f8fd, 0x49b4d79e, 0x58e5ef0c,
- 0x0af291bc, 0xca16d98a, 0x3cc59cb7, 0x8f5e7953, 0xf029d1ef, 0x52ebf67c,
- 0x81a1e62e, 0xdbf74d3d, 0x34f66145, 0x15ead3cc, 0x1eae1905, 0x7d009ceb,
- 0xf73e7ef6, 0x024aa757, 0x8481b1e9, 0x6653b270, 0xe29677ce, 0x5a7e957e,
- 0x9833eb91, 0xe62cec87, 0x7bc2c81e, 0xc81de655, 0xa556fbc2, 0xc3d7bcdf,
- 0x322f496b, 0xc05e758f, 0x79f3a73a, 0xbc0bf3bc, 0x5279071f, 0x16c3df23,
- 0x436edf4a, 0x27456b7f, 0x15ec7fbf, 0x3cc03f93, 0x19c01733, 0xf01e7af1,
- 0x63fd436a, 0xf21b3667, 0x6e91f8cf, 0x7d9a9fe4, 0xbd7cc302, 0x136d76ea,
- 0xd5aa85c6, 0xe7a458be, 0x6bef6d8d, 0xf06b9e90, 0x8e3c765e, 0xdcbc7267,
- 0x35bf7944, 0xf5efc626, 0x00ffaeda, 0x63ce76e3, 0x553a8b5c, 0x003c51ef,
- 0x57ae36e5, 0xfe42dad5, 0xc97983bf, 0x073477ff, 0x3c60cdd3, 0xf287e78f,
- 0x737ce9c9, 0xdccc7f10, 0x1e63f9ce, 0x681b73e6, 0x675fc538, 0x5d91c7c6,
- 0xf1c71fe2, 0xafce9360, 0x3fa63b43, 0xfc7ca045, 0x5ad730fe, 0xe95c8fb4,
- 0x7fe9aedc, 0xbd2b05ed, 0x5b73a3f7, 0x3cf98c7f, 0x733bddb2, 0x149556c3,
- 0x1d999fb0, 0x1fd8de3c, 0x120fcac6, 0x21d7edde, 0x25b97c3e, 0xbe31f267,
- 0x777819a3, 0x4483c60a, 0x3d0d1f1e, 0x6b8e5d87, 0xfdfc671c, 0xb3df6528,
- 0x3cb91313, 0xa3a26e63, 0x5bbf392e, 0xc651d7c9, 0x504132df, 0x9fb8d1ee,
- 0xfaae5824, 0xd7de1a5e, 0xde2b9e60, 0x3bf960d7, 0xef161f39, 0xa2feb06b,
- 0xf78b0f9c, 0xf78ed835, 0x956f5835, 0xbef161f3, 0x5f78eb06, 0x722be583,
- 0xdc71ec3e, 0x6df1e52a, 0xa2e90a7c, 0x946d790b, 0x1d1bdebf, 0xdc151f52,
- 0x085a37bf, 0xa8128fae, 0xc6c269bf, 0x78ff30a7, 0xc52f9293, 0xc3b446cc,
- 0xfbf9c22a, 0x7b06a91a, 0xf3f404ec, 0x45ed778b, 0x9bf81e7f, 0x882b9fde,
- 0xef4a505e, 0x1968c497, 0x27dba3ca, 0x804f47d8, 0xcd8ec7f1, 0x724adb48,
- 0xecf74f52, 0x31bb3fbd, 0x6f7ae292, 0x25824c49, 0xaee465a7, 0x9a7d42b5,
- 0x85fa2041, 0x512a6ef9, 0x67984cfc, 0xab62c746, 0x1307cb8a, 0x9285cbda,
- 0xb3ae0963, 0x8b9f4122, 0xd588f872, 0x51fb819e, 0x6a345de0, 0x45da1a59,
- 0x07f36a5d, 0xbd759f18, 0x57e866a3, 0xa53c7129, 0x69d693df, 0x99bfb8f9,
- 0x736ba919, 0x36607e70, 0x0d367794, 0xd9fdc66b, 0x9ea170c4, 0x855997e4,
- 0x027f20f2, 0x66e99ba7, 0x357b1f6e, 0xfa345566, 0x62d361f5, 0x4bb75ef0,
- 0x1931f3c6, 0x3c65c7cf, 0x81573de3, 0x6173df8a, 0x585cf789, 0x2da3e686,
- 0x4db12fcf, 0x240f7820, 0xcdb3e605, 0x0097df92, 0xd4f7a10a, 0xf31c729c,
- 0x34ee594b, 0xcadc95e6, 0x9e6879c7, 0x778e392a, 0x6f7c1d5e, 0x8ff38ca8,
- 0xbdce5467, 0x166bcb89, 0xbf79e1a9, 0xe61731d7, 0x397dcf15, 0x7d50e281,
- 0x06a5d16e, 0x5e05cfbc, 0x5ec67947, 0x8b2efee6, 0x77337ce7, 0xabf51c7f,
- 0xcea9e397, 0x7c74eee8, 0xe64fa3af, 0xf07ea52f, 0xe4979e3b, 0x81ba81cc,
- 0xf3a123de, 0xa776e739, 0x4fbd4147, 0x5f488bd0, 0xbcfe26ce, 0x673fe647,
- 0x7c9f3c8a, 0x24f3a61e, 0x3bfa3e21, 0xf7c24e5f, 0xbc193710, 0xf7fcb94b,
- 0x66dc93a6, 0x6fcc74de, 0x4e782f3a, 0xadf1be62, 0x98a5296d, 0xed5e7f6f,
- 0xb3eb0679, 0x5d78d2db, 0x0a693968, 0x93d23e7c, 0xb7c8f984, 0x6296a5b6,
- 0x91f9f23e, 0x5944cf2d, 0x97eaea02, 0xcd73e24e, 0x98a56983, 0xeb5b3c9f,
- 0x7487563e, 0x5f4e7bf3, 0x4e077dfa, 0xdaaa7c23, 0x7d3efdb9, 0x2f0b81df,
- 0x603e7ea1, 0x7582bca4, 0xdf099213, 0x6ecfedf5, 0x6beb7a46, 0x6c3f1351,
- 0x99ff7e6a, 0x2d3f50df, 0x6b665985, 0xc90c563d, 0xba1db494, 0x3b35f71b,
- 0xd625f3dc, 0x89bc4f17, 0x047b31f6, 0x13a5ebe7, 0x4f914a73, 0xfbec0efb,
- 0xd9b3ea05, 0xe2d86091, 0x13d05e7e, 0x8ff72a7e, 0x40b8c02b, 0x0cbbb19f,
- 0x9dfeafe7, 0xec1fe796, 0x718e9b8f, 0xc09cff82, 0x6e2a31f3, 0xfa8492d8,
- 0xf9e57c56, 0xf8c1bfbd, 0x74f4efe6, 0xcd1353df, 0xa94abded, 0xe1397f31,
- 0xcc5ed76f, 0x5e7be95b, 0x15aff74f, 0xe2b39318, 0x2963ac7b, 0x3feaff3e,
- 0x78865eff, 0xee769428, 0x772ff6c5, 0x438445eb, 0x7916c68f, 0x8f61f233,
- 0x0acefc9a, 0x3bd15eb9, 0xdc27ef82, 0x8bf84457, 0xfa8492d9, 0xafc472b6,
- 0xcf7dc0ee, 0x833cb696, 0x567b0f7e, 0x670e774b, 0xc1b8079f, 0xd47bb3b8,
- 0xfb034998, 0x5e90aa30, 0xca5b0bce, 0xf7f9c49e, 0xf5e77ef0, 0xae9e085b,
- 0x5c33d73d, 0xd431fa0f, 0xad85e7c7, 0xf767ed41, 0x7e859b3c, 0xcddfb9e9,
- 0xa154ff26, 0x2a5f8573, 0xf4836b77, 0x58824922, 0x3fca5b8c, 0x02198b93,
- 0x0ee759e2, 0x927be8ee, 0x6b9fc7f9, 0x00357b56, 0x5b2d0aa3, 0x0acdf98b,
- 0xde26543f, 0xabe78c5a, 0xd1d31b14, 0xf5c8a97e, 0x9c5f2195, 0x3c5d33d5,
- 0xd65b7bf4, 0x6bad955b, 0x7c1da0e6, 0x45fcfda5, 0xc95f4796, 0x9b55f87c,
- 0xdf781dde, 0xb5fe5a18, 0xa84f998e, 0xb585f5fc, 0x95e5b25a, 0x78d6def9,
- 0x55afe81c, 0x71f97347, 0x8337fa12, 0xac93fb1c, 0x0aad16ef, 0x04eeffee,
- 0xad594f9e, 0xfea3a5f1, 0x7b7fe653, 0x87db8982, 0xca3a5f2a, 0xb371d0ab,
- 0xbcde5925, 0xa9bc9020, 0xd14e156c, 0xd20824fb, 0x2b3c3f58, 0xe6372e75,
- 0x87870d13, 0x0f197cf8, 0x70f1e612, 0xd66caabe, 0x4f30f4db, 0x022f9855,
- 0xd18725ed, 0x50768117, 0x6b122bdf, 0xa317d192, 0x5fe29fcc, 0x5653894f,
- 0xfd36d478, 0x71c1eb70, 0x865e4fd0, 0x5628037f, 0x537ce44f, 0xeb4e16de,
- 0x1f2f9331, 0x677cb6e9, 0xa73c38f0, 0xfb1f8029, 0x06c576da, 0xf70cebfa,
- 0xa751f74d, 0xfffa6f88, 0x07bd31f9, 0xb5b2295e, 0x4e794f78, 0x4083efe0,
- 0x3907de9d, 0x5677df27, 0x43086d7e, 0x95f739fa, 0x4fb8f883, 0x7b9c91f0,
- 0x7246cdb1, 0x23ed4fac, 0x626bb739, 0xf947af11, 0xbd6e50ca, 0x24a7f138,
- 0x7c12fa3f, 0xd29ef865, 0x2f8ea566, 0xd01d81cc, 0xe490b6d1, 0xe7896cc9,
- 0x6a5db051, 0x71138f13, 0xe1932f78, 0xde86d8c4, 0xd443f23a, 0x3a97b466,
- 0x42f72a27, 0x25f9ff1a, 0x31f53f3f, 0xe456bef3, 0xc50372cf, 0x6abec07d,
- 0xc0ed097e, 0x8c16b105, 0x4dc287a7, 0x898c5fb9, 0xe22966fe, 0x7da31de9,
- 0xbc3cc4ec, 0x9f91f4cf, 0xf64ed401, 0x5a6bf57c, 0xd5fb37f2, 0x7e517b03,
- 0x71f68db9, 0x0e9e0113, 0x547af74d, 0xbed41771, 0x67ed342e, 0x1c91bd21,
- 0x97d6f4e1, 0x07bf8bb9, 0x4e4fdf22, 0xfa00f192, 0x7d22358c, 0x87cf1c08,
- 0x409ff6db, 0x9dd7d379, 0x943a724a, 0x3db6ce9f, 0xe3728116, 0xaeefd043,
- 0x87c07880, 0x7ca59f8b, 0x0798f980, 0x6b3de502, 0xf837cf83, 0x2fc98fed,
- 0xda039b64, 0x821b1e91, 0x4170a1eb, 0xf106f4ba, 0x7df68b81, 0xc01bf23f,
- 0x777a6aa7, 0xa861c235, 0x52ea173e, 0x4850fd40, 0x2b3fa43d, 0xfdc3f6e1,
- 0xebcd1813, 0x25eded18, 0xcf1f7e3f, 0xbd07adef, 0xe103f546, 0xee2d3f74,
- 0xc315bd03, 0x8bea16f5, 0x8a6e88aa, 0xd01df55f, 0x83cabdd2, 0xe1259fed,
- 0x291e81f9, 0xfed5534e, 0xf7fa2c72, 0x699826eb, 0xb18fd07e, 0xa0731794,
- 0x11fd163d, 0x7a21dda5, 0x0cf7e5d2, 0x287e50b2, 0x9bc94eed, 0x42e5794e,
- 0x5f3e1677, 0x8719cb27, 0xeb097508, 0x10e1ce8c, 0x76b3df38, 0x287ce489,
- 0xf4c956de, 0x8b21f20a, 0xe15e39f2, 0x1e4503f1, 0xe572c854, 0x7267b610,
- 0xf72937fe, 0x84cf7852, 0x9c8c3f7c, 0x23815af8, 0xcef8a5ab, 0xbccd13fd,
- 0x79a78f2e, 0xe789fc79, 0x3e7abded, 0xc713252b, 0x45f79599, 0xd528f2fd,
- 0x0e505f2f, 0x781c93c4, 0xe70b1d50, 0xb0467caa, 0x8a90c3d4, 0xd04535d2,
- 0x2d532d9d, 0x1ab8a22f, 0x2e1ae7e6, 0x1d2857df, 0xf8e44e45, 0x1fb914ad,
- 0xa057b95e, 0x6a73a5fb, 0xf2076f7e, 0x580fde86, 0x08a6b056, 0x18beeff9,
- 0x5fd026a7, 0x4bfcd6be, 0xcbeb821b, 0xc514d76a, 0x95e7c30d, 0x8d76b6ab,
- 0x57cf9764, 0x8a2c6f61, 0xaede5f35, 0x5e9cfbec, 0xf574e7d9, 0xcc7840c8,
- 0x1234535f, 0x7fba65ca, 0xdcfc1e17, 0x5eb5dae7, 0x5d2d57ca, 0xb7c4f743,
- 0x8a0daa5c, 0xbe6b5eb3, 0x7ba6e60a, 0x3fed2b90, 0xde0874b4, 0x312b8c5e,
- 0xc43dafe0, 0x0137c067, 0xf4cbc5eb, 0x289f143a, 0xfd063c5f, 0x3c5f6653,
- 0x113e3026, 0x339be25a, 0xb88ae522, 0x2c3e04a8, 0x0f030cb3, 0x89d1ffa5,
- 0x3f1ace3c, 0xa8f1d391, 0x5e3b55d9, 0xe9d71462, 0xa57bf941, 0x22fe4cf7,
- 0xbf07efef, 0x9c233267, 0x9bd4ebf7, 0xdfd3fe30, 0x87c50df9, 0xe619d7fb,
- 0x03077bf3, 0x5de7804b, 0x029be3f1, 0x26abc0e3, 0xdc243b71, 0x9a9dedca,
- 0xe3d5f50c, 0xa1c6994b, 0x1bd912f8, 0xdf74e199, 0x05f6d7d2, 0x8efeb7e9,
- 0x8bde133c, 0x0dbb75f4, 0xff356fc8, 0xa8b17ffc, 0x3e3afb87, 0xbeb4bca5,
- 0x7ca33f6f, 0xde728db5, 0x3936a1e0, 0x6cdbabdf, 0xe2fef6c4, 0xbf6117bf,
- 0x8fe9724d, 0x98f8f02f, 0x83f12a52, 0x3bf9ff00, 0x4c7b53d4, 0x54c51f1b,
- 0x2db0ca99, 0x7fc0bf24, 0xe1bd468e, 0xe3982b8f, 0x04c38b86, 0x54d215c6,
- 0x2580ae3c, 0xd2d215c6, 0xeb015c78, 0x12c05718, 0x8ed80ae3, 0x63ac0571,
- 0x18eb015c, 0xc63ac057, 0xb8c4b015, 0x297fb602, 0x0fd1e7bf, 0x3dc78b82,
- 0x57f7240d, 0xc0337e62, 0xbf27656f, 0x47b95abf, 0x7c617ba2, 0xdff503cf,
- 0xad3b7965, 0x678ede72, 0xd607dd2b, 0x882c901c, 0xc41f7c22, 0x3bf98e95,
- 0x1e314703, 0x5822d354, 0x7db198d6, 0x6363ed3e, 0xa507de47, 0x6d47bed8,
- 0x4f44cd11, 0x828d9f1c, 0xd57c8f5b, 0x57e287b1, 0x949d7105, 0x2cd35fef,
- 0xcebb8c30, 0xd93f72d3, 0x0e6b4129, 0x1d92abdd, 0x9376d4ed, 0x9cf552ff,
- 0xef7c0e60, 0x45e266bb, 0x7dca1392, 0x88fe52f5, 0xf37a3d39, 0x6b3d2092,
- 0xf85779e3, 0x29c7ec27, 0x1d9377e7, 0x2d176012, 0x6c6c7bc5, 0xf7a5be97,
- 0xe0e09c4d, 0x9dae108e, 0xd1d95577, 0xb5f31eb2, 0x55fe687e, 0xdef924e1,
- 0x47bf9b3a, 0x92f8fc52, 0xbd60077d, 0xb25a2dc1, 0x609f6e53, 0x1e526dbf,
- 0x28d819e6, 0x53b472ee, 0x27d67e4f, 0x8a0faf5b, 0xd74a14bb, 0x8563dc51,
- 0x7dfd205a, 0x3b9f6ac4, 0x4672e809, 0x6f14bdf1, 0x246f1199, 0xf0e8563e,
- 0x4fc96b49, 0xe315de70, 0x0a9877f9, 0x34e7ecb5, 0xa5cf9c9f, 0x5eb0a9e0,
- 0x2cf7e370, 0xbd0b3316, 0xc008b03f, 0xe5517187, 0x188b1ffb, 0xe850afcf,
- 0x8156c56a, 0xbcbe8ae8, 0x84bdf010, 0x103df845, 0x6af77fd0, 0x289bfdb9,
- 0xc4f27bf2, 0xe28f3c4d, 0xfca7663e, 0x7d486591, 0xd7d7e912, 0x0f6ed07c,
- 0xdb892ebf, 0x9c38e6f7, 0x3571f17f, 0xc61707e5, 0x276e9edd, 0xf9405f3d,
- 0x6ef78213, 0xaf1bfbe9, 0xf3833f26, 0xb9aa8e27, 0x927801dc, 0xf485d59f,
- 0x3b1ef14a, 0xcaebc795, 0xbf229ca9, 0x5efa161f, 0xcbe77f43, 0xb0be382d,
- 0xf84160c6, 0xc03df15f, 0xffbfabb0, 0xebf394e6, 0x5c9ec512, 0x6be3c233,
- 0x51e10583, 0xef49d718, 0xf3caa589, 0xb25b9c3f, 0x8b69f109, 0x1f5c2e60,
- 0x00b669ef, 0x40f08b7c, 0x3dd1c602, 0x66cf7c36, 0x513053a0, 0x7265f53e,
- 0xcc3aeb74, 0x1349a953, 0x2daf184c, 0x1852db0a, 0xa517f30b, 0x7677d324,
- 0x1c78da6a, 0x4e4a0fb6, 0x92dc5307, 0x62aac7c0, 0xf1cf7e31, 0x3fb5cbe8,
- 0xdfcf293d, 0xb3e65b3e, 0x782f8efe, 0xe3b5f11e, 0xfbe9bbb4, 0xef4f4c1c,
- 0xf60fd3af, 0xd1c6fb35, 0xe855af14, 0xae39b068, 0x45a8e909, 0xa5a513d6,
- 0x8ed1a9ef, 0x37b3cc68, 0x96ca1c23, 0x4b175c5c, 0x4e3e1ce7, 0x87dfe4a6,
- 0xe502c9c6, 0xde46ffed, 0x5aba89fd, 0x5baddf2a, 0x9d776545, 0xe8079e8b,
- 0xbf08238b, 0x7d6f101f, 0xf130d27b, 0x4733a656, 0xbe05be92, 0x90edf1e7,
- 0xbbf02af8, 0x33efc018, 0xd9eea23a, 0x8348f680, 0xcdf782c6, 0xd2bcff98,
- 0xfde1f240, 0x59c6c349, 0x2819f815, 0xf4c83b2e, 0xe9878839, 0x112aa919,
- 0x926dff9e, 0x44e71410, 0x2273c1be, 0x7c8960df, 0xf9f952a3, 0xa30d69e6,
- 0x7da194ef, 0x40fb22e0, 0xbf5df84f, 0xd40ae406, 0x20a665ef, 0x442172b9,
- 0xf32380ae, 0xac5f7a33, 0x0936cb43, 0xe9cffcfe, 0x19ca5c50, 0xb878ce46,
- 0xe61d199c, 0xb3be9e79, 0xf9461d50, 0x44efc0c5, 0xf2374767, 0xac25bdbc,
- 0x461dd684, 0x3e7bd0de, 0x38188ef7, 0xbff414f9, 0xd90361f7, 0x9613eb39,
- 0xe477ffe6, 0x71279fcd, 0xdf813c78, 0xed5d5073, 0x059b0e7b, 0xb15d8fbc,
- 0xbe236590, 0xc439ef95, 0x7bf218c1, 0x39eff5ff, 0x123c988c, 0x0ae0e7bf,
- 0xdc439efa, 0x30e7be15, 0x6171b26a, 0x461cf7e0, 0x30b11e4d, 0xc60e7bf8,
- 0xb29df885, 0x39efc1ff, 0xbbce4fa4, 0x370e7bfc, 0xa0ff364e, 0x3d8039ef,
- 0x4fe3899b, 0x63c6d8e4, 0x3b9f2899, 0xe3e5fdf4, 0x97643df2, 0xb2c52f68,
- 0xa2a5c228, 0xc56dd176, 0x73d16df2, 0x5f03b63f, 0x1578444c, 0x037d963d,
- 0x5125977e, 0xf2bca1eb, 0x49b6ad27, 0xb96bff38, 0x3cf8c9b6, 0x2f5fde34,
- 0x73f60f9f, 0xcd0bf60b, 0xe2e8531f, 0x0e5cc9ae, 0xa9cb93cd, 0xc7bfc3f5,
- 0xbf326f3f, 0xbed6be4f, 0xe077bf61, 0xd9feca7a, 0xdc56c596, 0x4ef7ec0f,
- 0xb7ec27dc, 0x88664b04, 0xf80f7a0e, 0xc9757a72, 0x2e7462fb, 0x3fb38f90,
- 0x9528f6e1, 0x14772ae9, 0x1ea30dda, 0x7ee14770, 0xe80e3547, 0xdf907df1,
- 0x68f406ba, 0xf576fc3f, 0x7fdb1e80, 0xdaf9e36b, 0x9f6ff3a1, 0xf491fdcc,
- 0x7e3403a9, 0x4dddfc0c, 0x1440c1fe, 0xe5e96dda, 0x14ae3447, 0xf66d4f7a,
- 0x93764127, 0x8c7bf899, 0x01e636dc, 0x8c3d2191, 0x1e78c1d9, 0xe90c4163,
- 0xef1d4ab9, 0xfdc0f95a, 0x38d1d222, 0x803bec37, 0xaedfaefe, 0xaf7e0c1a,
- 0xaae34bd7, 0x5ec025e4, 0x64ee7a0b, 0x9474bf3c, 0x5ed08f2c, 0x39d9efc4,
- 0x77d08fcb, 0x0b8ec0a6, 0x79be8f99, 0x594f9430, 0xe6bfb3ad, 0x62be5333,
- 0x77a5bf50, 0xfa3d66ff, 0x5ba5e5b5, 0x2f17e435, 0xa58f5969, 0x269ef1c3,
- 0x197d918e, 0xcbec1fd4, 0xfb65faa0, 0xf547d90c, 0xc7d717fd, 0xd02eb30d,
- 0x13a456c1, 0xaa8e0e85, 0x4fd0e33d, 0xb30cea3d, 0x3c1df7f0, 0x269a5df4,
- 0x5df4bdf9, 0xcadd39ff, 0x5d3965f3, 0x3f1df8c0, 0xa9ea8e8c, 0xaf1df2bf,
- 0x1c8a956b, 0x8dcefa1c, 0x5e28b986, 0x7248cb39, 0xfff646ae, 0xc89897aa,
- 0xbaf8511e, 0xb539e55a, 0x18c5fb1e, 0x8a9bfe79, 0x7aabf7d1, 0xcf9451c4,
- 0xcfac3f4d, 0xacfba54a, 0x178f7ad8, 0x5db81de7, 0x15b9efa0, 0x5ab97384,
- 0x9e389628, 0x4447e785, 0xa7815dfe, 0x98487e51, 0x37bfc147, 0xbf972530,
- 0xf8cf103f, 0xca98db77, 0x369df21c, 0xed7a1e39, 0xeff1b4ef, 0xf7e7cd92,
- 0x25510d72, 0xf9f1cba5, 0xf9e81b82, 0x1f52efc1, 0x2afee2b1, 0x71b4ef94,
- 0x4e61bf7b, 0x557f3d06, 0x3d41af30, 0x5d807ae1, 0x7a6d3be9, 0xef80fbe4,
- 0xf887bdb3, 0xd9f7d6ba, 0x1c0fef94, 0x23c3705d, 0xbd1c397b, 0x3355ef78,
- 0xafe418cb, 0x3c3117ff, 0x56c0d29e, 0x000056c0
-};
-
-static const u32 tsem_int_table_data_e1h[] = {
- 0x00088b1f, 0x00000000, 0x51fbff00, 0x03f0c0cf, 0x3370278a, 0x45e39c30,
- 0x8381e9f0, 0x5fd32918, 0x50c0cec6, 0x4055c401, 0x3f880bbc, 0x7c3032b1,
- 0xff5e2566, 0xdb042935, 0x21818248, 0x88d7881e, 0x49a83031, 0xa41dc422,
- 0x03261819, 0xb150a1f9, 0x5f3a4047, 0x0f77328a, 0x80a69c16, 0x872ae629,
- 0x9163a760, 0x6819c647, 0x50e54bf2, 0xf40499f9, 0xa2be340f, 0xa2ffca8e,
- 0xa013a10a, 0xe4d157e2, 0x3be542bf, 0xa6bafea0, 0x4edcdd8e, 0xc35dfd32,
- 0xfc01a102, 0x9847b099, 0x009847b0
-};
-
-static const u32 tsem_pram_data_e1h[] = {
- 0x00088b1f, 0x00000000, 0x7dedff00, 0xd554780b, 0x733ef0b5, 0x7993331e,
- 0x31e424e4, 0x1e4e1081, 0x03086820, 0xb78a8884, 0xf6301027, 0xd57876d2,
- 0xaf0e2d58, 0x6b86f210, 0x7fb6bd2d, 0x78490806, 0x111fc1a8, 0x29e1d5ad,
- 0x311d82f6, 0x0388b622, 0xbdabd228, 0x557c5837, 0x0bd11feb, 0x2a44908a,
- 0xe5b5ef6a, 0x3ef6b5ee, 0x09939cc9, 0xafdb7b04, 0x7cfdffff, 0xc7ecee9f,
- 0xf5ed7bd9, 0xfdad6bda, 0x8131c918, 0xe4230da4, 0x06fbfc22, 0x109d9221,
- 0x9d37a132, 0xd72120e3, 0x66924218, 0x92ddfd2f, 0xbadc4214, 0xa349bfe2,
- 0xe4febba9, 0x5f5f9a1a, 0xa4aff0ec, 0x4d1f5b4d, 0x2d096a78, 0x2fa1efbf,
- 0x7cd04fc3, 0xad79ecfa, 0xce7eb4c5, 0x5d0fc924, 0x10ab1a1d, 0xd8e7ed02,
- 0x520176c2, 0x9097e581, 0x927fa130, 0x45f8e8af, 0x9fe8b884, 0x37fe9724,
- 0xa5b21c89, 0xd1e43e60, 0x99029ea0, 0x31364846, 0x7e6055fe, 0xf509fc36,
- 0xd0677744, 0x779e75ef, 0x7fec13e5, 0xdb0a5f8e, 0xf27347e9, 0x4dc87203,
- 0x6d08c6d3, 0xfb1dc749, 0xf908395d, 0xe4990cc0, 0xd8b68763, 0x0fca6ffe,
- 0xd214754c, 0x080b67fe, 0x49d9f9ff, 0xcc82da36, 0x3be1eda6, 0x1a1f4e22,
- 0xd99f67cc, 0xe0dfb8e8, 0x40dfdddf, 0x5fc0a7ff, 0xe5a1094b, 0x9e0c1ccb,
- 0xc5d9d9e4, 0x1989ce30, 0xbf4316d7, 0x3213a673, 0x9d4d5688, 0x4490f63d,
- 0x891aebf3, 0xcddb4edf, 0xcf1c0d29, 0xcbf8d981, 0xe80293a7, 0x60ee8e97,
- 0xffc45f7d, 0x64ef38eb, 0xcc3fde3e, 0xfa102b1e, 0x8a97f641, 0xa0725da9,
- 0x4bd153f9, 0x2d056a48, 0xee53191f, 0xd09a769f, 0xe6e9e4f2, 0x721340f6,
- 0xc425211d, 0x1aea7293, 0xc91677fa, 0xad4228ba, 0xee8c0b6a, 0x4a6b0fe5,
- 0x22f9fdff, 0x12fa01d0, 0x6a1fd6fb, 0xa0e9e99c, 0x7b375af8, 0xf4d08796,
- 0x0f2cc0f5, 0xd9f5efe0, 0xd68f10b3, 0x676673f6, 0xfb5e0227, 0x06feef1b,
- 0xfa08d03a, 0xb5b3c73d, 0xea48907e, 0x39fbf423, 0x31c3d13a, 0x9301dffd,
- 0x0964254d, 0xb34d1b30, 0xd2a6d525, 0xe88d6cfa, 0x95e33891, 0x3b69b102,
- 0x28994992, 0x0ff352e3, 0x2637bb61, 0x71c34ad9, 0xef0773e6, 0xb81a55c7,
- 0x6fa50e67, 0x751274d6, 0x2912ba51, 0x75e2c6aa, 0x1a548a5d, 0xddc7038e,
- 0x68425e1c, 0xe1c0cf7b, 0x02d86571, 0x47329fd0, 0xf5c2c40c, 0x42678003,
- 0x4ca24a6f, 0x18f8e6ee, 0xfbbf72fd, 0x338f506c, 0x8e00bb8f, 0xecaa9009,
- 0x0c8f0e3b, 0xb201937c, 0x43fc8296, 0xe02463c2, 0xb91eccfb, 0xe5070124,
- 0x17f0a18f, 0xa42dae15, 0x1412d47b, 0x47ec56b2, 0x0324a4f4, 0xde1157bd,
- 0x4f51290f, 0x42157e51, 0xd78b84ab, 0x129e6a5c, 0x35f06539, 0x193ba9f0,
- 0x5e1cbfc7, 0xe518a048, 0x88d43152, 0xe307c078, 0xed4df92b, 0xbacba502,
- 0x947ee5d1, 0x90218a6f, 0xe17e4333, 0x3bf457df, 0xbdccff37, 0xca91d29c,
- 0xd3396b91, 0x2cdf8a8f, 0x8e92eb67, 0xc82008b5, 0x0f49b964, 0x40241210,
- 0xeb6f0377, 0xbc78a2be, 0xfb5bbf04, 0xe7c86fdd, 0xc9983260, 0x053f9327,
- 0x79bd07fa, 0xf0077ed3, 0x5edf3af5, 0x5d6bcc07, 0x7a825efe, 0xe8c7d38a,
- 0x5daf95d7, 0x81d6ce52, 0xee808d7c, 0x9ea748c3, 0xc6a7d1f2, 0xa6767e02,
- 0x748bf72a, 0x08933b7e, 0x6b75d9ea, 0xd02b7848, 0xb5e594b5, 0xb5e1696f,
- 0x9d3b8e74, 0xd698bebc, 0xed06bcd3, 0x3adbbea9, 0x68870b49, 0x8d8b6b7e,
- 0x85ab1fb5, 0x311f9a45, 0x8a148491, 0xc2c56d4f, 0xbe5a4e95, 0x17eaa375,
- 0x34fd5eb5, 0x2d6725a2, 0xdeb2dfb4, 0xbdf00aa6, 0x3b23ad8f, 0x443849f3,
- 0x39b1dfc7, 0x69239bf9, 0xf9b2143e, 0xf24fd387, 0xd945faf8, 0x022844a5,
- 0x9c3a31b4, 0xae5c7921, 0xa9e50204, 0x7ecb7cbc, 0xa48ebfff, 0x5d363df0,
- 0x45a0aff2, 0x4ccfd63b, 0x769e28d0, 0x3f048168, 0xc0f07f4d, 0xccefcc7c,
- 0xdd00260c, 0x9f9c6e76, 0xc0f14c00, 0x461b99e6, 0xd2617b41, 0x6ce91d61,
- 0x1f47ffd6, 0x00ec3a28, 0x9e994b38, 0x7e0d6a4f, 0xbcf9a253, 0x89b91249,
- 0xadcf214e, 0x1fdbdc21, 0x4f5811b2, 0x8b6de8fb, 0xcdd44ebe, 0xb4eb5779,
- 0x5d7f818f, 0x2009160f, 0x86353bda, 0x923741e7, 0xbeec8af0, 0xc85cefe3,
- 0x5d2913ea, 0xbf2e0e80, 0x96675962, 0x4756f85a, 0x3c717079, 0x1c755ab9,
- 0x897a488f, 0x578bd690, 0x62469ffa, 0xf07ae9f0, 0xe9f58128, 0x8163e78a,
- 0x7ca05d27, 0x9cab830f, 0x1c1eb9ef, 0x6357caf7, 0x21b1f1e3, 0x7fa01eef,
- 0x6177c522, 0x50eb0424, 0x0a24617a, 0x57c9e2fc, 0x9a0bb034, 0xc5c93109,
- 0x7cfbd1a1, 0x1337201d, 0xaf583e22, 0x837e8dd7, 0x81568fc9, 0x6fe630f8,
- 0xb7e4f7b4, 0xc1fb5893, 0xf87287a8, 0x697853ce, 0xcb87bb39, 0x847f4f59,
- 0x732447ad, 0x4f75c21c, 0x5e7561e1, 0xc84c7d1c, 0x97cb940a, 0x817ae049,
- 0xedcddea6, 0x6c06a751, 0x7900dfae, 0x910240b7, 0x134ddf38, 0x42d27ad8,
- 0xe9a05d74, 0x162f44a3, 0x664b9d70, 0x2eb2d7fa, 0xbc9fd1f5, 0x16e9165d,
- 0xd468df40, 0x8734803e, 0x23cd31f8, 0x92474c01, 0x26af4c56, 0x49f34c11,
- 0xa405a63b, 0xaf43531b, 0x422a89f8, 0xfac4c47e, 0xa4e720c9, 0xe58b63da,
- 0x3264d0bb, 0x3cce3eb2, 0x73782d9a, 0xf8ebf890, 0xe75f021d, 0xfc8aa648,
- 0xf17af843, 0xe818b06e, 0xfc2854c5, 0xefccdbbb, 0x0ebe24e2, 0x693dea38,
- 0xdd335780, 0xcd54548d, 0xf06d5a7c, 0x1c2444a9, 0xc41f32c7, 0xd6b8cc63,
- 0xfb7356fe, 0x9c9f98c6, 0x491f23a2, 0x50cf666e, 0x71bdf438, 0x0828d1f0,
- 0xab419e7c, 0x29b23adc, 0xb6494bbf, 0x6b9a5ac8, 0xe8764f74, 0x893f2f38,
- 0x7ceb6747, 0xbe2af0ac, 0xa9b3c181, 0xcb0a9e07, 0xc297fa3a, 0xfea64ff3,
- 0x1db24ca4, 0x86a20bd6, 0xbc56de70, 0x42def9f6, 0xd2e4fe8f, 0x57d66991,
- 0x02d78a4a, 0x069f8587, 0xe37c7f68, 0xe47fea4b, 0x211bfac3, 0xa9ec2a41,
- 0xe60101f9, 0x1a40ad7f, 0xc85e2913, 0xa0eda268, 0x6af5cd70, 0x698b8ecc,
- 0x91f41937, 0x74c90b66, 0xee92cfbe, 0xe43ed023, 0xf41d9127, 0x6df367b3,
- 0x7d695ba5, 0x63b69faa, 0x1e4005fd, 0x0b6d1dae, 0x9fd1b940, 0xf5329e4e,
- 0x112efd47, 0xe836e1f0, 0xe4c3f163, 0x1fb436b1, 0x6fa27c95, 0xc75f2389,
- 0x1d157cb9, 0x9eb0ea71, 0xb094cab8, 0x8ffa1b33, 0xb53e4314, 0x5d61f89a,
- 0xe6cdd49f, 0x30e1c651, 0x59fa9de6, 0x6a7e0040, 0xc03b31f7, 0x3fd04855,
- 0x74da3254, 0xb4dd1ff0, 0x7e42f50e, 0x50531754, 0xd83fa23f, 0x5f3d9e1f,
- 0xd00bed3d, 0xb9d47d7e, 0x92be076a, 0x9fa0e9fc, 0xc8568f57, 0xf8bdfa60,
- 0xfff63b3b, 0x1862a7b9, 0x47cf9989, 0x0bb1d10d, 0xa670c1a9, 0x67a618e1,
- 0xed31da1b, 0xd30b786c, 0x594ae9fe, 0xebceda8e, 0xc0d7e27e, 0x3f0f101e,
- 0xc00f3447, 0x5c75d51d, 0x12a21c17, 0x82802eb3, 0xd36fff06, 0xdaf0bcd8,
- 0x086bc46c, 0x67d54de6, 0xe0367878, 0x9e000a5b, 0xc2223887, 0x77867cd2,
- 0x01d03fc0, 0x3c8e8bca, 0x5cf102b7, 0x9e02c64d, 0x0ca4c937, 0x13e03cf4,
- 0x827d89d6, 0x03800eed, 0xfe69f3f2, 0xc1fa8ffa, 0xf78659fb, 0x9e29978c,
- 0x4b494e00, 0x8eadc664, 0x913b95f2, 0x27d7120b, 0xf9927abd, 0xbdf6bdd9,
- 0xcf00eff6, 0xac894696, 0x68a6bc85, 0xf7b7114a, 0x15824a41, 0xdcdfc1d3,
- 0x28fd3e56, 0xe2b90bd8, 0x5a648757, 0x3a5eb43e, 0x4f2311d9, 0xfd746559,
- 0xb0c276a4, 0xecf2e6e5, 0x074f3990, 0xb7347f40, 0xc43e5cc5, 0x87aef5c6,
- 0xad1e4078, 0xe8b65af3, 0xd3fd7157, 0xea240d55, 0x02f26396, 0x1791a7c3,
- 0x423b13bc, 0x46cb7e74, 0x4c038c11, 0xb3f5b1b7, 0x7ca3be15, 0xd74f2f71,
- 0x5231fa5b, 0x46dba269, 0x193d30c6, 0xddba7d6f, 0x46ce3c56, 0x4db06bfb,
- 0x72a3671e, 0x042292e4, 0x5b9777ec, 0x93f9e3d1, 0x8e126dd3, 0xebb5209f,
- 0xfbab8522, 0x4adfe23a, 0xee4cbef1, 0x2c7c828e, 0x21fcd62b, 0x30fe4750,
- 0xd51263ca, 0x15f66261, 0x700253c1, 0x6c4a7850, 0xfb044e38, 0x0738a6e9,
- 0xcc997dfb, 0x4a365dfa, 0xf6376823, 0x3655e703, 0xd80ce119, 0x34fd0904,
- 0x1555974e, 0xa34df739, 0x9f554ed9, 0x2667fd05, 0x78c16705, 0x7e6fb946,
- 0x1963fa29, 0x2bf4ccc3, 0x0167d477, 0x2206547f, 0x4e7f80d3, 0x926c5f7b,
- 0x36b7bce9, 0xf144bf10, 0x702b9eff, 0x7357f41b, 0xb008fd7b, 0x5e06df7f,
- 0x3b2a0098, 0xa5ac7a80, 0xa25fa564, 0x148972e5, 0xca3f2995, 0x25bf0f13,
- 0x2a1a4017, 0x938b3886, 0x477d33d4, 0x35224bf5, 0xf5231cbd, 0xa27e218f,
- 0x72d03c80, 0x60256f55, 0xdba8380f, 0xef863270, 0x1e734c94, 0x7cd8fa6c,
- 0x72e37a7f, 0x27e79829, 0x09fe0387, 0x49dde9da, 0x26927fcc, 0xde000bfa,
- 0xa6ffaf7b, 0xc4fcf006, 0x67f93366, 0x5a4d7f56, 0x79696ffe, 0xfee26810,
- 0xd5fcb4b6, 0x80f1bcb4, 0x979c5697, 0x9432ca63, 0x24f0050b, 0xbf07f932,
- 0xe41d67be, 0xcf5bf878, 0x572f58db, 0xf835bc37, 0xa720e1c3, 0xa43f831d,
- 0x052d59c4, 0x3e42ff91, 0x1c28b5f0, 0x098f9992, 0x62134f78, 0x18176e52,
- 0x5a9c9bf6, 0xdc947488, 0x988ea9d2, 0x8fc0ddf5, 0xc1ddfcb2, 0xf4878e1f,
- 0x78ad4c6d, 0x0120a455, 0xd768d03f, 0xe018b713, 0xe234effa, 0x1fc479a9,
- 0xa47338a5, 0x7a6a78b0, 0xf30a285e, 0x09fd5272, 0x233dd3eb, 0xf0c56b5f,
- 0x3b0fc581, 0x3f20ad94, 0x61b68cb4, 0x7865b35c, 0xf90290e4, 0xd1406a0c,
- 0x5dca677a, 0x4c6d8821, 0xe37c63cd, 0xd4f5b3f4, 0x7e8c3c35, 0xbdbfa198,
- 0xad5e1f1c, 0x67b3f51c, 0x31ded9a2, 0x2b2701d6, 0xdb36f4a2, 0xa694ce51,
- 0xbe82c54f, 0x9b539a7e, 0x69e51a76, 0x233fb9a4, 0xe36ed1de, 0xc96b8bf1,
- 0x593768b5, 0xe1fd12e2, 0x71f86bcd, 0xdcbcd913, 0xfc1849d3, 0x5189f4e6,
- 0xcfd79bc4, 0x9bd866f0, 0x9bbf3e1e, 0xfc04234a, 0xf3f89ebe, 0xeb6f0ccd,
- 0x7fa28baf, 0x29ec7eb7, 0x85bd67b4, 0xce3f261d, 0xa1ca1ef8, 0x76c4fcfe,
- 0x18f6a1a8, 0x23711bcb, 0x19e9a78a, 0x61ebbd69, 0xaddbb72b, 0x92275374,
- 0x4763d062, 0x8315a747, 0x84b7bede, 0x3cab4ec2, 0x6297587a, 0x7ef2adf0,
- 0xdf867cd4, 0x7cd8cb7b, 0xd3f47b69, 0x93989def, 0xf266e304, 0x7f11b0f8,
- 0xebcbf1ee, 0x284f78cd, 0x2d85c4b4, 0x67eab77e, 0xcc4b52f4, 0x5f39abe5,
- 0x57a7222b, 0xe8ce192a, 0x4f3e3035, 0x84bcf832, 0xdf3279f1, 0xd12f4837,
- 0x921d010e, 0x4dff994a, 0x4df37e2c, 0x5e8c1d29, 0x00c88f37, 0xa9fefc3f,
- 0x97663edd, 0x7126ba30, 0xc6efbc7e, 0x71c80211, 0x40268972, 0xc33e727d,
- 0x13c53253, 0xa5c977ac, 0x4ef6efcc, 0x525db0d4, 0x0b5ba309, 0x5fe14c7c,
- 0x898a7403, 0x5b4b3b1f, 0x6b7fa075, 0x741d4ef1, 0xe1c53259, 0xd4cf0a20,
- 0xd81b0a52, 0x805fb49d, 0xc51853d7, 0x49525ab8, 0xfca5d870, 0x82a2427a,
- 0x10f887cd, 0xe94b686c, 0x7e58253c, 0x20b26268, 0xa7ef7ed0, 0x80f1c07c,
- 0x8f173c71, 0x785fdfa3, 0xfddcf512, 0xc8364df0, 0x1cebf6a9, 0x36a5279a,
- 0xf707f83f, 0xf108e697, 0xbcc6e7e2, 0x2acb83fb, 0x35f812c7, 0x9572fc31,
- 0x16dee3c8, 0x22475dfe, 0x57c82e6a, 0xa5f76f48, 0x830feaf5, 0x65e47aff,
- 0xfecbe312, 0x87a189f4, 0xb21824dd, 0x1fd5f765, 0xdef8ce92, 0x7940179e,
- 0x24ef03eb, 0x8a52afc6, 0x9136fcb9, 0x5e867fdb, 0x140792d1, 0x00a12727,
- 0xd72277ae, 0x02da4f4f, 0xf054c50b, 0x4eb8c3bb, 0xae74c8b8, 0xca9f1937,
- 0xe6a6ff9c, 0x4ae2bd33, 0x9776b089, 0x903aa78d, 0xa834d6df, 0xc65a1fa8,
- 0x439ed5de, 0x7866b0f1, 0x73be1f93, 0xb9196b8c, 0x3cdbf68d, 0x2b70797c,
- 0x30fdc00c, 0xac22bfd4, 0x8d5f8c83, 0x0e03307d, 0x61d21992, 0x7e09cc9f,
- 0x2467e011, 0x24905fdb, 0xeb8fb512, 0x4795df1d, 0x6967cde1, 0x0dc19f28,
- 0xb6ebc9fe, 0x6f9c2ae0, 0xa2946b7e, 0xc99bce9b, 0x42216acb, 0xebcc225c,
- 0xb21e7c72, 0x9f8ebe31, 0x025df7f6, 0xe1c4d3ec, 0xdb22dddf, 0x472d59a7,
- 0x7e085088, 0xf289ed1b, 0x6a6ceac9, 0xa381c797, 0xd56bf391, 0xc3f8672e,
- 0x89ab7e18, 0x75f29924, 0x1fc233fb, 0x7711fac6, 0x514fb80a, 0x4358d1cd,
- 0xcf00b23a, 0xf61a29ef, 0x48eb6b57, 0xe9177c83, 0x78025b4e, 0x8b8f0310,
- 0x5277fd74, 0x90b8c4be, 0xd6eb607b, 0x81a7f0e5, 0x4085d10c, 0x0fa9e4cb,
- 0x178e9879, 0x69e307dc, 0xd2bf2b2a, 0xf5f0e371, 0x4d7cb10b, 0x6a5957c4,
- 0x147f6be5, 0x129ddaf9, 0x9fb77ac5, 0x8fc1b5f1, 0xad1f9c89, 0xbf8c687c,
- 0xd6afc79e, 0x335f3e72, 0x708549a1, 0x9b51dadf, 0x2ce69e30, 0x7c015cda,
- 0x25aa32de, 0x64de7c0c, 0x17df35f0, 0xdb2efea1, 0xa35ad31f, 0x72264eb0,
- 0xeed456be, 0xeb7b9006, 0x38935d7c, 0xbe0534bc, 0x5f39b806, 0x93f807bf,
- 0xae08ddfd, 0x076bd7c1, 0x45bad7cc, 0xfed39b6f, 0xc2d927fc, 0xf1891ed7,
- 0xd7c067e7, 0xb30a4f00, 0x963edc63, 0xdfea7eca, 0x06e7e9c7, 0x6b99f989,
- 0xe5ac1cb9, 0x1fb6b072, 0x223ff839, 0x9afe03f3, 0xc98b3072, 0xe5a24381,
- 0x8cae9389, 0xd3b0251f, 0x9e7bc8e3, 0xe1b5fc24, 0xbc4334de, 0x27fe3c72,
- 0xbe84cd30, 0xcfd234f4, 0xe4c0e744, 0x73c98f3b, 0x76be727f, 0xfe387c68,
- 0xa7f46453, 0x26922ef8, 0xffbe395c, 0x7215705a, 0x43f0367e, 0xb728c582,
- 0x3a667a31, 0x86a92bc0, 0xd304799e, 0x28ad79a9, 0x81be86a7, 0x2bfa0a58,
- 0x3bc62b6a, 0x68ad999c, 0x98f3399f, 0x24fc31be, 0xb88d7ceb, 0x33d71378,
- 0x02217949, 0x853d9feb, 0x9eaa3f70, 0xcc9951f9, 0xd5e2301f, 0x18a8fd86,
- 0xbbd0d47e, 0x79a79f70, 0x97da3d78, 0x3bd637cd, 0x36c3ede2, 0xf077eefb,
- 0x3ed35f4d, 0x0e5a1aeb, 0xbbc9aefd, 0xc3b3c49f, 0xfb18798c, 0x6e3e4e46,
- 0xa8b51e73, 0x32df5c60, 0xc76adefc, 0xfeb26734, 0x7ae7be7a, 0xbbd1ee57,
- 0x54768f2c, 0x6f9f8c6e, 0x03e0c7e9, 0xa343e885, 0xe4183710, 0x4ce7091e,
- 0x3f04849a, 0x924dc7d8, 0x90729d20, 0x5aa97b1e, 0xff401689, 0x9cbc6627,
- 0x3db2f685, 0xbce406b8, 0x0380abe9, 0x53aba9e2, 0x96c6f35e, 0xe6f19d9f,
- 0xa56cc1b7, 0x7ee2d5df, 0xb148838e, 0xdc2763fb, 0x4b7ed1db, 0xe9c737c8,
- 0x09ae149c, 0xcc13c402, 0xbf8f0ae5, 0x72e69928, 0xa45fc799, 0xe7394b14,
- 0xbe34b90f, 0xeb93795e, 0x7602df15, 0x1cc4e74e, 0x713bd716, 0x30674f2b,
- 0xdbc73efe, 0xc234f375, 0x535feef5, 0xf6e5251f, 0xe29befc2, 0x9922f8c7,
- 0x3ebb4765, 0x2b9d852b, 0x26e7b120, 0xa26b5fe0, 0xccfc6244, 0xff9781a1,
- 0x61d4f108, 0x06cabe0b, 0xea156b3c, 0xdb465861, 0xc79c6ca5, 0x28f74263,
- 0x3ae6cf18, 0x02f5fb8b, 0x6a72e345, 0xdbf40b12, 0xef8521db, 0x3a52c0bd,
- 0x91057b8f, 0x37c0ecbf, 0x8c29d022, 0xb1448ba9, 0x303a43de, 0xe61b089f,
- 0x5187e0b9, 0x33c44bbf, 0xc61b614c, 0xf6cc7e8d, 0x7888d643, 0x03a8836e,
- 0xedc9bfb4, 0x4297f82a, 0x1ae77e88, 0x40be77f8, 0xc4c8316b, 0xc608b2e0,
- 0x7cedad93, 0xa039d853, 0xaffb7a3e, 0x5237df4f, 0x548f8173, 0x1ce8372c,
- 0x694ef514, 0x5ff2df42, 0x12f38189, 0xdc58abc6, 0x2a5f5acd, 0xde4017e9,
- 0x0bc2e5ac, 0x7e053a7f, 0x6467aaf4, 0x3ca7a3f4, 0x41bf4723, 0x045b35f3,
- 0x076d53f4, 0x770fe89d, 0x51f2c23e, 0xfb43a28f, 0x8c05db73, 0xdbd9d507,
- 0xe201bdef, 0x0488cf8a, 0x19708dfd, 0xe29d8bee, 0x59fb2673, 0x80079cbe,
- 0x9a1cb66a, 0xec57df0b, 0x77d813b7, 0xf16e79a8, 0xf9a06fb7, 0xf464c7c5,
- 0xac766a51, 0x657f6050, 0x40885849, 0xc1326bbe, 0xc4fb43f1, 0xbeda0ef0,
- 0x369edbcb, 0x2bfb0dc7, 0x455da20e, 0xa7b4f6e1, 0xa9f4a6cd, 0x0d353f0c,
- 0x2ddcabbe, 0x98ffc1f8, 0xa38eccf2, 0x9abbf419, 0xa73c0427, 0x45cbb550,
- 0x9cba18b4, 0xdfa3136a, 0x9d33f880, 0x8bc5f827, 0xc034eb49, 0x0d8ecd7b,
- 0xb713168a, 0x9230d33c, 0x7cdef668, 0x1c7413cd, 0x93ed5dfa, 0x58a61f82,
- 0xe041236b, 0x9ce7ce8f, 0x30dcdb65, 0x08ca15bf, 0x59abcc0f, 0xc38b7681,
- 0x7ec1f6f3, 0x8b78657a, 0x57d68d32, 0x2945b23e, 0xaaa7e18f, 0x2d10d75d,
- 0x6d4e3f86, 0xd4bf4dce, 0xae867b7a, 0x54f0b15b, 0x88b7a612, 0xc4665614,
- 0xc8767bec, 0x453f9c49, 0xdf06ff53, 0x18275e87, 0xef3d0ac7, 0x741abc41,
- 0xbd32a65b, 0xbcedd060, 0x7528e9ca, 0xb388cd17, 0xd02f7aaf, 0x2af10ec1,
- 0x1bfbd315, 0x6c6e987c, 0x9b3e90d0, 0xe1577f00, 0x7b9606be, 0x88629127,
- 0xa275788b, 0x3c9f52c2, 0x21962f5d, 0xf960124e, 0x8283dbb4, 0x1bf98976,
- 0x383926e9, 0xb75c820f, 0x964c4fc9, 0x49bebae8, 0x7e252e09, 0x77b4be3a,
- 0x6075c972, 0xc8e705a7, 0x1d9fb0a9, 0xe8f160ac, 0xa2e40f88, 0x7ccbef89,
- 0xd6f2664b, 0x7c60a194, 0x8bf7d364, 0x41eda1b6, 0x7d365e18, 0xec277cbf,
- 0xe67ed1ab, 0x1aae54ca, 0xee4fda65, 0x9b49fbe5, 0x4fd4d13b, 0xa30adcda,
- 0x8fd8c59f, 0x6cfd6073, 0x19bd7b9a, 0x4695b99e, 0x8fd8f53f, 0x4af36067,
- 0x199263bb, 0xb1aa3b9e, 0xfddbf49f, 0x06881f68, 0xdc7690ff, 0x4eef7517,
- 0xc496b71a, 0x1fe4d1f2, 0x2a1e78c3, 0x947cb155, 0x24c13138, 0xb27a494f,
- 0x74a83f29, 0x6a7da9b0, 0x9f54c720, 0x85b5765e, 0xbfa8dd2f, 0x8a814f31,
- 0xab38b126, 0xbd415832, 0x81d83c53, 0xbb06bbf9, 0x75dcef51, 0xb02af07d,
- 0xd05600f7, 0xc12977f3, 0xbe1c6a31, 0x0812f3a1, 0x1b34cfc6, 0x32ec7da4,
- 0x7939efbe, 0xde2357c8, 0xd9d7dfa7, 0xd12447ba, 0xf14a43fc, 0xadef3023,
- 0x8de3b332, 0x9d824338, 0xce6bfea0, 0x8a16da2e, 0x50cb6bc1, 0x6f422fca,
- 0xd0722dbf, 0x73b8fc07, 0x4a576ff5, 0xa1a6be14, 0x7a6e1e0a, 0xe1edfe5c,
- 0x9b99b510, 0x00fb9687, 0xea8576ff, 0x15f1d88f, 0x8e3e7e3a, 0xdfc61bbf,
- 0xd93b332b, 0x673dbe3a, 0x477c69a2, 0x7c698556, 0xa7c74287, 0xf56fb1f2,
- 0x8a7c7c3b, 0x7909ebbf, 0xc7077e56, 0xe05567b7, 0xa90acdf8, 0xd09f8d30,
- 0x9001fe33, 0xcdfdc39f, 0xf37ae73f, 0xcd2ab3fc, 0xfcd857f3, 0x80feae8f,
- 0xf3809f8f, 0x80fe597f, 0x92ab3fcd, 0xfacedfcd, 0xdbdf19ed, 0x6157ff83,
- 0x37f5affe, 0xe649dcff, 0x36ab0ff9, 0xc6cedfcf, 0x27f5637f, 0x8e377c7c,
- 0x09fca6ff, 0x6ab0ff9b, 0x07b15f1c, 0xca47c0fd, 0xf07a8490, 0xa461a99f,
- 0xa35d4061, 0x923a40e3, 0xc837a9c5, 0x5dc70839, 0x3eef8c09, 0xe6fca04f,
- 0x24a7d5ee, 0xea90254c, 0xd3ce5acb, 0x158bbb55, 0xd0842fe6, 0x41c4585f,
- 0xc45fb85d, 0xb46c8cf5, 0x78538787, 0x218bf73b, 0x78dc2fc8, 0xac5b9e23,
- 0x77dc13b3, 0xfa3056a7, 0xc0f1ff1f, 0xf3b1ade8, 0x5a8ba6b2, 0x13fecb65,
- 0xef38c783, 0xa6ab2454, 0xa78829fc, 0x127c3d50, 0x9e9a1ffc, 0xb478e996,
- 0x83f043fc, 0x1b958aae, 0x768c2e76, 0xce9f2277, 0xfe1cbbeb, 0xf7c31253,
- 0xdc053ba9, 0xc37cf847, 0xd9f40552, 0xc99756a2, 0x74e3a86f, 0x8ece7eea,
- 0x79c68dfb, 0x14505bbe, 0xc1fdf909, 0xbb051d6f, 0x41e6f171, 0x276eefa6,
- 0x1470d5e1, 0x79fac173, 0xff3a5543, 0xe3eccadb, 0xeb6b8c44, 0x9c1c5843,
- 0x49b8810c, 0x7906db59, 0x52dccd08, 0xf9ec9b26, 0xfce6835b, 0x7ce6156d,
- 0x6d961ca7, 0x112ccf60, 0xefda16c8, 0xcb65ebf7, 0x8e3600b9, 0x76e64957,
- 0xbbef1b25, 0xb4198694, 0x8e90fbe8, 0xe9156283, 0xdf65573a, 0xddd4f01a,
- 0xae28932d, 0xb8a91dc7, 0x454a140f, 0xb2a82dbf, 0x9de15b79, 0x00f43b2b,
- 0x8fee6785, 0x15a7c444, 0x2e838efe, 0xf3ed46dd, 0x907fcd8e, 0x9d6c7e21,
- 0x3bfe158f, 0xa3e75b96, 0xcc3eb02a, 0x19cb590b, 0xa9f9589f, 0x36f17f6e,
- 0xf37b3db3, 0xb5fb58b6, 0x530cd76a, 0x56f8497e, 0x9bc5fb53, 0x17ea99e7,
- 0xd5312eb5, 0x6a59682f, 0xfd0bcfca, 0x8efed4c8, 0xf54c2be5, 0x635fafdf,
- 0x62adbfaa, 0x6b7f2983, 0xfb5321f0, 0x98b6ca5b, 0x47076dea, 0x68e4077d,
- 0x9e22ebd5, 0x3ee0bdfd, 0x9bd82f75, 0xee12dc17, 0xed447e73, 0x33839015,
- 0xb4815ed4, 0xf478ff73, 0x2c1ea9a7, 0xf9a24d87, 0x2d5598ca, 0x0997a099,
- 0x6572cfea, 0x24d0aac2, 0xd7b0178a, 0x9b887ca9, 0x041d1215, 0x0f724cce,
- 0x318b771f, 0xa9971df5, 0x7d054cdf, 0xb17adf7c, 0xdd797e23, 0xd4c379d6,
- 0xaf6a7e38, 0xb02192dc, 0xce5975ce, 0xb569189f, 0xb1a73ce5, 0xeb817dbf,
- 0xe9856ad9, 0x19bdea83, 0x86736193, 0xe223e5fa, 0xf9ec9b9d, 0x0db0223e,
- 0x53feb048, 0x98861f81, 0x76aa6bf6, 0xfbe49ae5, 0xe11121ec, 0x409d05aa,
- 0x5217eaf5, 0x72028d60, 0x35923d16, 0xa1c0346b, 0xef4055af, 0x75c54fec,
- 0xe9436cfd, 0x3fafd836, 0xf4c010d3, 0x4c3286a3, 0x3104354f, 0x02a1b0fd,
- 0xf50d93d3, 0x2c347698, 0x86bdf4c7, 0x36efa610, 0xbbfa60b4, 0xdf4c5686,
- 0xe98cd86a, 0x4c610d1b, 0x4c741b3b, 0xd1e8790d, 0x0f6e01bf, 0x84d71b1b,
- 0x2e7cc3db, 0xe73da98d, 0xf7ff70c2, 0x11f3fbbc, 0x7f9fef60, 0x6cfb8ecb,
- 0xcacbe1fd, 0x51d3fd6f, 0x3ca57b47, 0x639d083c, 0x8133bd6b, 0x2369c9d1,
- 0xec1c64a5, 0xcff4ecff, 0xc4bcc7cf, 0xfd84f52e, 0xfa713c33, 0xfa5d7885,
- 0xf444e9e5, 0x949982ee, 0xfe1ea71e, 0xf7e822af, 0x607fac02, 0x71111c07,
- 0x6dd1261d, 0x95d7a07e, 0xe1784de2, 0x2f1059f7, 0xbf83d73b, 0x8b882cf6,
- 0xd8a8ab5d, 0x3b36f9ff, 0xf9d2f881, 0xcfbc2e03, 0x36d8fe75, 0x32e3c82f,
- 0x3d8e189f, 0x6b4d6147, 0xb761d922, 0xe736efc9, 0xa50ab7cf, 0x689a1e78,
- 0x0347b389, 0x3861a95f, 0xe0d9d20f, 0x67e8d4c3, 0x71c14f99, 0x65caecce,
- 0x5e7e3371, 0x424eec83, 0x277ea57e, 0x869fffb6, 0x0c7ebfa7, 0x23690878,
- 0x5e08381f, 0xdb98bb71, 0xf97efb7f, 0x814cfa49, 0x20392af8, 0x82f60e7f,
- 0x1177fe9d, 0x6122651c, 0x2eb7e8de, 0x63ec8622, 0x69b77ca0, 0x669dfa3c,
- 0x7317f4f8, 0xe6b7c52e, 0x042234fb, 0x9c46bf68, 0x80fe0aa4, 0x02f530f3,
- 0x788cabc6, 0x6ee49749, 0xc6f4c611, 0xbc78ea4b, 0xa1d1c6b8, 0xb9d16904,
- 0xee4bdcb6, 0x38699e9f, 0xaf39c2a6, 0x02ab470a, 0x64454fce, 0xb9e80954,
- 0xd2ab6d73, 0x12ac1ec0, 0x2f15dfcf, 0x67679b7e, 0x6125bd71, 0x8ebcdbb9,
- 0xb420fe72, 0x1d9ca35f, 0x8a9f7472, 0x31b59fc9, 0xefdadb3b, 0xf6c0fb04,
- 0x1f776a2d, 0xd683769f, 0xfbb54f17, 0x3e30553f, 0x5c53112e, 0x7bdfc0c9,
- 0x9d82604a, 0xd772a9e2, 0x7e8f68fb, 0xdc468724, 0xd4fd097d, 0x9f6e7e77,
- 0x88727ce9, 0xf338c3b4, 0x8881f227, 0x743bcb76, 0x15f8fd3d, 0xe4db9d99,
- 0xf67b80e7, 0xfebef995, 0xdf9b9e02, 0x84e78556, 0x8ff7de3e, 0x242dff68,
- 0x81d22f01, 0x1d8116b4, 0x4ad88e79, 0xc8e74f01, 0x31f1de6b, 0x3a95b874,
- 0x05ef404a, 0x863b662d, 0xfca6bfda, 0x7ce6cde2, 0x06999939, 0xa37ca4fa,
- 0xce02c24c, 0xb37f54cf, 0x177ec55d, 0x5826fa93, 0x8fee2557, 0x348957cc,
- 0x9ad5ea84, 0x34567f67, 0xb411c28f, 0x88c83f33, 0x66492cbf, 0x4a656076,
- 0xc27d8158, 0x7fc6da0f, 0x832006f7, 0x7104dc3d, 0xfc489228, 0xfc5f483b,
- 0x56fc295e, 0xbaf58d78, 0xc3881c4f, 0xe212ee21, 0xdad603ee, 0x86cf6e05,
- 0xe7c03d30, 0xc705f86d, 0x7615f92c, 0x2452de7e, 0x9cfdd022, 0x12d908ea,
- 0x851fdeb1, 0xbb5edc78, 0x7106957f, 0x7b6ac0fc, 0xdfdc6f6a, 0x0f649768,
- 0xe159f962, 0xf73c72d5, 0xf00b4520, 0xef8f21de, 0xc41f9c2b, 0x9c016f04,
- 0xdc40f523, 0x3a75ce1e, 0xdf215bbd, 0xf3c0a9e4, 0xef20751f, 0xbc7bbfb4,
- 0x9c418ed3, 0xa35a41da, 0xbb5fe53a, 0xa01fb2cf, 0x902ef399, 0x5067d04b,
- 0x09a2e780, 0x235cb9c6, 0x7e3cedcd, 0x2e31fe73, 0x4381748f, 0xdf39ee3e,
- 0xe14770e6, 0x2899093c, 0xb7f1ed9d, 0x63dfc072, 0x56fdc3f0, 0xcaa77f68,
- 0x7df2d7da, 0x9edf3bdd, 0x097c9e9c, 0xa5f7547d, 0xeb31c240, 0xadd49d48,
- 0x7dd61f00, 0xd7f96129, 0x5d1a9bd6, 0xe6f46355, 0x59f04a2b, 0x27a604fb,
- 0x8e813e01, 0x64dfbb13, 0x027087bd, 0x2f4261f8, 0x386d7c3f, 0x7f498852,
- 0x8f04a7bf, 0x6be383c5, 0xd3ebeff7, 0xbbe009ff, 0xf3feb1ff, 0x11dff4fa,
- 0x679afe0f, 0xd10becf7, 0x17f2b5f5, 0x10e1780f, 0x6b52c7d3, 0xacef9c1a,
- 0x0bdab1de, 0x1491f972, 0x3f5d02f2, 0xef718b0e, 0x0c3ba6e7, 0x70ddd3be,
- 0xf396b08f, 0xaf9f99dd, 0x2ff836fb, 0x5590ef9f, 0xb2ec0f8c, 0x2994ed47,
- 0x3096db6b, 0x7f65d8f9, 0xf19be59f, 0x7edd53bc, 0x7ca8beb3, 0x0d3481fc,
- 0xb885550f, 0xff451cff, 0x7ffb76a0, 0xabbb034c, 0xdea3748e, 0xe3077eb3,
- 0xf68c997f, 0xb1253cc0, 0x0e71bd6f, 0xa5e38eab, 0xde1d6dae, 0xbb6baf13,
- 0x9a1334f9, 0xad3bf40e, 0xbb7055df, 0x9f30e1df, 0x42f034df, 0x5feeccc2,
- 0x0583c4f5, 0xceb853ef, 0x2bc57f6e, 0x29f2c7e4, 0x23e77fd3, 0xa5fb1e0b,
- 0x755cbfd6, 0x6b18fd52, 0xb4eb1b5f, 0x2bfb9fb6, 0x2d5a5807, 0x0fe0bd47,
- 0xf1916f56, 0x9ef57bbc, 0x861f3b6d, 0x76dbccf8, 0x7fda70ff, 0xde979ed5,
- 0xabe69eb8, 0xa653929e, 0xe1b869b3, 0x9f41a29e, 0xf2dffa3a, 0xa8f4bd71,
- 0x4525fa7c, 0x7fcacd1b, 0x974bdc9c, 0x290ea7aa, 0x7c5f7464, 0x521c894c,
- 0x85b67ed8, 0xa7f7913e, 0xeab15f81, 0xd0142f89, 0x141f1f5f, 0x1c767a48,
- 0x09796e7c, 0x13c063ed, 0xabd393d2, 0xf6967e87, 0xa82e9475, 0xb2f42d17,
- 0xaf98fbb6, 0xc42fd007, 0x3f7f29e3, 0xb7d84bf9, 0x0fdd9df9, 0xf278a878,
- 0x9f30bc9f, 0xe33d52d3, 0x4ead9ff3, 0x1520fb83, 0xc54d2872, 0x3c579594,
- 0xbfe403fa, 0x907fc7c5, 0xeac5cec6, 0x575cfc19, 0xd173cc06, 0x9e707323,
- 0x7c8824e5, 0x7633f158, 0x4482fccf, 0x238a9f4a, 0xd839f727, 0xab7a763a,
- 0x4f11371d, 0xfb0242c3, 0xe189af18, 0x640f181d, 0x607f0de7, 0x65913fec,
- 0x60e0bf98, 0x30e45af4, 0xa5d1f1ee, 0xb77f9624, 0x9d03b737, 0xd3d50cce,
- 0x475c8e21, 0x87c710a4, 0x2bf69170, 0xce9a9f1e, 0x52681dcf, 0xea8eff11,
- 0x0e5029ff, 0xdd65dbed, 0x8efa6059, 0x1deecc5c, 0x3e3e4eff, 0xec013b85,
- 0xd469d3d7, 0xbf338c1e, 0x9d630e03, 0x0c5676a6, 0x3bc8c59f, 0x6bedff93,
- 0xf21bbc98, 0x0a519e1f, 0x609d9ff5, 0x81df0472, 0x8ae142fd, 0xa1fd63a6,
- 0x07ea02d9, 0x93c6893b, 0x9732edf3, 0x95bd7373, 0x12fc285f, 0x7c3dc7ac,
- 0x2324e303, 0x2dbed01d, 0x8fbf48df, 0xae30abd5, 0x86bf6fb7, 0xfae62f1c,
- 0x1f45ad60, 0x07524790, 0x2d5b7fb4, 0x0c3e7787, 0x35e54bf2, 0xcafc81a4,
- 0x27c15ef8, 0xf1bc8fbb, 0x9fb72a3d, 0x35b90c44, 0xdcab55eb, 0x409dec56,
- 0x27edc9e2, 0x2b893f6e, 0xae3cb4b7, 0x903bbadc, 0x7ad6ff9f, 0xd3e3c0d5,
- 0x7a1a3c16, 0x5a7c3fb2, 0x1db7e4f5, 0x493d5c5a, 0x45209dff, 0xe0d1f97d,
- 0x2aff8343, 0xcf06a5ff, 0xa9f0f50b, 0x7c3d87c1, 0x9f61f06a, 0x8f09a478,
- 0xe1bbfad6, 0xc0853a6f, 0xcfc63273, 0xfdb00fab, 0xd1ddfa67, 0x2f888521,
- 0xd239971d, 0x4a48747a, 0xc96c3e6c, 0x75ed2c47, 0x69603e4b, 0xebe4b41f,
- 0xf7abed4d, 0xb9d8511f, 0x9da9a89e, 0xe4a7fcb8, 0x01f13883, 0x6baa1d63,
- 0x010954fb, 0xf1bbb87f, 0x0925797b, 0xfe5e2079, 0xf2f188bc, 0x26e38a2e,
- 0xeed74e3a, 0x608f7c6c, 0x57c593b5, 0x2f6ed4ba, 0x93ab93d8, 0x553bbe58,
- 0x683d0269, 0x593b7794, 0xd02bafdc, 0xb18a4ded, 0x203fdeef, 0x08ef1ea2,
- 0x7e78d293, 0xa140de28, 0xfd20edf8, 0x80fdb3d5, 0x61d7b02e, 0x30ac84bc,
- 0xd154e3f0, 0xe21cb59d, 0xde22ad35, 0xe2b85b6b, 0x239c2f16, 0xfb903ae9,
- 0xbe5a329d, 0xdb22d7e8, 0x52e90ca6, 0xf81f8c46, 0x9a6d0911, 0xf5fec024,
- 0x059fe47a, 0xb85f9807, 0x797c7d70, 0x95dfe4a8, 0x4054efbb, 0xee7f52ef,
- 0x87beec64, 0x23c54fd1, 0xff03f296, 0x15a6e5c8, 0xdb951fe3, 0x1e41f5cd,
- 0xe4ec183f, 0x92c7bee7, 0xb77fdcb1, 0x9e0fdec5, 0xa77fe62a, 0xeba7d28c,
- 0x3ce04898, 0x2203f9c1, 0x6efce7d2, 0xe3007e76, 0xbf01d7fc, 0xe7b12b77,
- 0x7c82cea9, 0x1ebfd55d, 0xefccfb3b, 0x3e06ee8b, 0xc14ef7da, 0x767a694f,
- 0x7e23dbdf, 0xdf67f905, 0xf4877acc, 0xa0e53f6d, 0xba55f713, 0xff907a0e,
- 0x4dff9ebb, 0x7f90ddd6, 0xecf18ece, 0x145f83ae, 0xad753f00, 0x1e8057b4,
- 0xefe7e2ec, 0x45ff3d56, 0xbfae0741, 0xa9c7488d, 0x9f4fe64e, 0xef5ff03f,
- 0xf381fdc1, 0xc0ace807, 0x42e838be, 0xa5fbaaf9, 0x5d6fe313, 0x14517fcf,
- 0xa5fc27eb, 0xfbe5a9f3, 0x521e5d9d, 0x4be017b6, 0x7544fb62, 0x1b6ebabf,
- 0xd3512fbc, 0x40594876, 0xf0bca7eb, 0xafd002a7, 0xdd997b5d, 0x3c7729d4,
- 0x8179fb0a, 0xca340f35, 0x209d5e94, 0xff698364, 0x0128de6b, 0x978bea39,
- 0x715c613f, 0xfc58f8a0, 0x043e0d7f, 0x4f3fe99d, 0x29854c18, 0xdef8ff07,
- 0x0e27a03b, 0x8d2f91da, 0x59127ef9, 0xe5ccf681, 0xfff4dde6, 0xe885bcdc,
- 0x03bde640, 0xefe13de6, 0xc0d7de77, 0xbed4a1a1, 0xcf97d072, 0xf30bbf9f,
- 0x847e3c7b, 0xfefc8077, 0xfcf9dfd2, 0x7bee98af, 0x97bddd29, 0x7f87f79f,
- 0x9feef3e7, 0xcb9ebfee, 0x79c2aee9, 0xfe17ba98, 0xa95df084, 0xfe12939e,
- 0xbfbde5be, 0xfef61bf9, 0x35bf9b5a, 0x93cf1b27, 0x70b0c03a, 0x40b6667b,
- 0xc8ed7178, 0x9dd82a99, 0xd5ef3f62, 0x7e60484c, 0xf7b02895, 0x1650c821,
- 0xcfdc240f, 0xf80d6382, 0xdd9b880e, 0x4ddc933b, 0xc9137768, 0xbc53aedf,
- 0xbe7abdac, 0x911acf1f, 0x21056f71, 0x8fc9399f, 0xbf8b6ef1, 0x5d1028d9,
- 0x74aef6a0, 0x818f37f5, 0xb48f23df, 0x9805ed45, 0x33690fbe, 0xaaca638f,
- 0xdc87f262, 0x53bce6f9, 0xbcede733, 0xf061073f, 0x0c247c3b, 0xd4716cf1,
- 0xd3ce1561, 0x02256389, 0x4938a54f, 0x0efc086b, 0x7dfccfbb, 0x7ee10252,
- 0xc7865fef, 0xa716048a, 0xed718512, 0x9471e03a, 0x78dce30d, 0xe2f11b48,
- 0x08baf7c7, 0xf17baff7, 0xf80ed4da, 0x663fc094, 0xd5fa17f6, 0x12d3fb84,
- 0x691a42ef, 0x76e69dd3, 0x2cbe8fdc, 0xb25d189d, 0x969d39aa, 0xd062e899,
- 0x4c7d0e21, 0x7cf03174, 0xd2b1f4a5, 0xebff8ac5, 0xf75f1813, 0x2e832f47,
- 0xef1d6e99, 0x7fdcc9d7, 0x133f0128, 0x820199f2, 0x7fdcabfb, 0xbc745290,
- 0x7eaa24a7, 0x7e0097bc, 0xe01a945c, 0x86deef2f, 0x3f73a4f1, 0x00dbff7f,
- 0x44afd54f, 0xebf8a0e2, 0x5121eddc, 0x2b0a5ef8, 0xb6569fe0, 0x404fb889,
- 0xacd168a4, 0xc3627d98, 0x85faa80f, 0xb953e707, 0x4e9e1ed7, 0xf7dcafbf,
- 0x61da0141, 0xcfd1bb2b, 0x605cfd09, 0x14750f74, 0xa5703ec0, 0x2b11fcc4,
- 0x6049acbf, 0xcfb396f1, 0xae20cab9, 0xe762ec23, 0xb7232b7f, 0x7398f6c8,
- 0x9904a14d, 0x739e9bc5, 0x3d085ea1, 0x28f9e021, 0xdcf62f80, 0x9c87e1a9,
- 0xf060427d, 0x9df197ed, 0x4f5dca98, 0xaa7c4275, 0x1fbb2df2, 0x5dbaf8cc,
- 0xc87ee29f, 0xf1ec5f94, 0x2e1fa076, 0x7d12e29a, 0x44bb01e2, 0xb6da8fe4,
- 0x6843f41a, 0x36a2f91e, 0x2beffdc2, 0x7eab57f4, 0xb83efcf1, 0xf4f55670,
- 0x7f885ee3, 0xe1df699d, 0xaec0b8c5, 0xfae7c74b, 0x3591fff8, 0xd6ffffdc,
- 0x3b4f7669, 0x067fffbe, 0xf176a0fe, 0xfb9609d3, 0xb106bbab, 0xb44914f7,
- 0x71ef52e8, 0xf0b9ed55, 0xcfafc428, 0x51e4fdee, 0xcffabb80, 0xfc14787f,
- 0xa9d0720a, 0xba827dc2, 0xf18ebf9f, 0xdfbbe33e, 0xf9d03d70, 0x2f18e3c4,
- 0xfa9b7ced, 0xe75ff41f, 0xc0b3a7f3, 0xea7ceccf, 0x4f10698f, 0xa9f9f3b9,
- 0x9dce6ef8, 0x27494ccf, 0x9189f471, 0x0786ff02, 0xd2b5af10, 0x11db48ed,
- 0x51ce7ff4, 0xd9ff83ba, 0xd489d713, 0xc69978b0, 0xe3bb39e3, 0xc4fbc7c7,
- 0x7d66da6f, 0x4842c6e7, 0x0646bf65, 0x4139c710, 0x006639e9, 0xe3cddc74,
- 0x5d6f9175, 0x0e738e32, 0x3af4a0fe, 0x85e3a16b, 0x3d8f45b6, 0x836d750c,
- 0x44e38dfa, 0x233f8007, 0x413fbefe, 0xe40122ff, 0x60bfef68, 0x37e80cfc,
- 0x73b84e9d, 0xd82c85cf, 0xee48f8bf, 0x85ef8b9e, 0x21576f3c, 0xfcf9511e,
- 0x9d4f289b, 0xf8c71ccf, 0xf071e136, 0x8ff3d24e, 0x99f92bc5, 0x1eedbacc,
- 0x74e1ff16, 0xe690f880, 0x071e72c5, 0xc46d7c62, 0x0b8bfa87, 0x73b1718d,
- 0x40bec627, 0x7877f6cd, 0x6e97a5bc, 0xd7a044c2, 0xcfdc97fb, 0x02a0f030,
- 0x8d8dae1e, 0xc38fc67b, 0x2d3dc4f5, 0x427a0374, 0xae27b3bc, 0x7b13d849,
- 0xcde16175, 0xcef47178, 0xda5e2c2d, 0x8f3fc729, 0xaf41c465, 0x9fe25976,
- 0x3fc581e1, 0x2b8f372f, 0x9760d3c5, 0xf15afd86, 0xf8abf675, 0xae5bfdfa,
- 0xe85f9d81, 0xdaad7f77, 0x3de61f7c, 0x05dd3825, 0xef6d6bfb, 0xa7cf042b,
- 0x2b73b374, 0xf967be7c, 0x3fb3d3f9, 0xd62e3117, 0xfa823914, 0x8c3faadd,
- 0xbc2b57fd, 0x6b787077, 0xe3f5b3f7, 0xd84f5eec, 0x7b39fb4d, 0xebe439f8,
- 0x63efddda, 0xaee8ee5c, 0x988e95a7, 0x8fd616f5, 0xce7ccc70, 0x80a8793e,
- 0xdfe379c5, 0x3171ab1b, 0x40eeb37b, 0x76ea71fc, 0xa71a6a7f, 0x9851142a,
- 0xf1e9701d, 0x2dfa48ce, 0x7699dfd0, 0x3a4ddf19, 0xbee31113, 0xc9701c17,
- 0x95a11f7c, 0x475d0fc9, 0x71c09ef1, 0xf0a77f76, 0x573c04b5, 0xd2f1e77f,
- 0xb7fde077, 0x8358a93b, 0x82b6dfdd, 0x4fdb69f1, 0xef4021f4, 0x13dfe3b6,
- 0x07dafd61, 0xffb18df8, 0x4fd44e8b, 0x50fd50f1, 0x10a06ef4, 0x77aad5d8,
- 0x66cb7dae, 0x938e9f82, 0x2342eb0e, 0x83ee07ed, 0xbc39b9c0, 0x569dee57,
- 0xaa88f00a, 0x7d0a754d, 0xf9bb21e7, 0xafde557d, 0x21ef9aac, 0x6bc6af90,
- 0x266f8fd0, 0xcc664b8b, 0x9f165232, 0x87f7190c, 0xabc213ca, 0xaf7f7ce8,
- 0x28bb3706, 0x8d9e7c5e, 0x78112fd9, 0x06a26a27, 0x2e63d3f6, 0x4bc1de7e,
- 0x83bcecbc, 0x7aa3643b, 0xf6ff61fd, 0x9e08930d, 0x7c06d867, 0x819e73d9,
- 0x2f800b44, 0xea7d768d, 0x321de72d, 0x5a2a13e1, 0x17338722, 0xfd5025ee,
- 0x77866ab2, 0xf11fe42b, 0xb2cdf7b0, 0x3821f748, 0xdb8db0df, 0x1560d9a3,
- 0xe77df6e7, 0x6fbeebec, 0x78a53296, 0x853f30e9, 0x535ff5d3, 0xca7c5ce1,
- 0x0d4fc52d, 0x2d856951, 0xa3dc1a63, 0x78bc7949, 0xcfcd066c, 0x34f4ff5f,
- 0xe2f7ffac, 0xff3459b1, 0xb20e7b93, 0xcf012afa, 0xaeb0bec1, 0xeff01a76,
- 0xd9fb96d7, 0xb9663314, 0x77f0057f, 0x77eef02e, 0xae706917, 0xefe15ba2,
- 0xdda00465, 0x7f0c89d0, 0x9f77ee99, 0x98657f02, 0xf00563ad, 0x7fcf63eb,
- 0x16f78491, 0x2aa1fbf9, 0xdce15469, 0x789a2d15, 0xf3e712f1, 0xccfbbcfa,
- 0x498dcbf5, 0x3f8177c1, 0xbc8de79a, 0xef1d1a6f, 0x0e718ae1, 0xf2d00e76,
- 0x829aa34e, 0xe9c85d74, 0xcb3df84a, 0x8b3e7c45, 0x02c290c5, 0x7f5caddf,
- 0x5751ef19, 0x1ef4578b, 0xdbab8735, 0x67fbefd5, 0x3378007f, 0x007f8293,
- 0x3fdf59fc, 0xb815c57b, 0xe519f500, 0x5c7de25f, 0xeb333de3, 0xea4468bb,
- 0xfb3efb8e, 0x5a2efe56, 0x9bfef07f, 0x69ba283b, 0x682f39ff, 0x39518f7b,
- 0xf150b31b, 0x31cfd1c3, 0xd2fd0bcb, 0x0426c220, 0x1fa71076, 0xf788cf3c,
- 0xb822ee1f, 0x1765db5f, 0xbbaff074, 0x7d4549fe, 0x3a70b99e, 0xae5d5ffa,
- 0x339c08ec, 0xc35bbaea, 0x018a3d7d, 0xe869e401, 0xf828c481, 0x3e1e5487,
- 0xe7c3c8b7, 0x8f37fe66, 0xdb5175db, 0xa9fd81df, 0xa06a3fbc, 0xed8bdcc5,
- 0xee9e9912, 0x8e10d06a, 0x087424a1, 0x8e81fdd6, 0xebe6e397, 0x46cfa737,
- 0x9eeb9e9b, 0xee1ff880, 0x28ffc18e, 0xcc74bf77, 0xe63a3377, 0xd1d0e3bb,
- 0x7bdd6efa, 0x6858e0ae, 0x28fcba77, 0x7d675fbe, 0xdf4aceaf, 0x21f5a93a,
- 0xed2b3b78, 0xfeb8a7c0, 0xb31bf418, 0x29d22c7c, 0x00939e86, 0x3e07318e,
- 0x06bc01b5, 0xe7ec1f1d, 0x9ba9793c, 0xf1d673ad, 0x063acad2, 0x9fb4e307,
- 0xf74cd6e5, 0x451c0a0e, 0x69ca897e, 0x87c58dba, 0xb7efb6fa, 0x9d4b9cff,
- 0x59502cf3, 0x73ff14bf, 0xfe064f00, 0xfbf3756f, 0x7ff17dea, 0xf8a3b43b,
- 0x3fe61dbf, 0xbbcffc00, 0xfb0dfe14, 0x87eb8abf, 0xa29fd82a, 0xda1ff47c,
- 0x3a1cb4cc, 0x8dd134ee, 0x8764b072, 0xd931f7c8, 0x3e865ffb, 0xfd5d7259,
- 0xb917b821, 0xf20267e4, 0xfefe42eb, 0xfdfc27e5, 0xc1b9eb4b, 0xea10b2f2,
- 0x1f9fcbcb, 0xaa3ea30c, 0xff2e65da, 0xbbcf7767, 0xde785997, 0xad352417,
- 0x2b4ebdf7, 0xb9bbcffe, 0xb4e858de, 0xd72f7bc5, 0xdbd68748, 0x33cce74c,
- 0x83f4f4a6, 0xa6cfa0fe, 0xb8f189bd, 0xc409957f, 0xa5297413, 0x2019de10,
- 0xf4094285, 0xd7e5cc5b, 0x72d86fe8, 0x1d9ffaf3, 0xcc43df32, 0x43df316d,
- 0xbe6ade1c, 0x66d57887, 0x51c43df3, 0xc43df361, 0x338d766b, 0xae4747e5,
- 0xb31fb537, 0x3f29b27f, 0x534dfa36, 0x66c7f1fb, 0xda13f29a, 0x7f6a67bf,
- 0x4df35bed, 0x5475d7f5, 0xf86fea9a, 0x7f299968, 0x9b3ff763, 0x311747da,
- 0x61b878fd, 0x5bdc1179, 0x7872f030, 0xa15362a9, 0x9b08e97c, 0x9e5b105a,
- 0x90056ba6, 0x7fe0e916, 0x532f27f5, 0x8a2a3f1c, 0x898ba75d, 0x536fd26c,
- 0xd47ce61c, 0x8f7dfe6d, 0x35722c97, 0xcb527ea4, 0xbc1d9a08, 0x63cf0f24,
- 0xc63af953, 0x01b1dbf5, 0xa0dfb7ea, 0x9b46d57c, 0x2f956f20, 0x9a01bde3,
- 0xad8fd886, 0x1f9f77b0, 0xd4f99a72, 0xf95e8fd7, 0x0738db9d, 0x53facad4,
- 0x264bc6cb, 0x3fca3139, 0x172d8c2a, 0x73c74f16, 0xabf5fd40, 0xb35c16f8,
- 0x3bbc107d, 0xa3530f3c, 0x16cca9bc, 0x67dfdcf7, 0x7fd0bfee, 0x8ec7dcda,
- 0x7cf00cfa, 0x26daf7ce, 0x7b56bf38, 0x51fa377b, 0x5e337619, 0x5decf37a,
- 0x479e0363, 0xac3e481a, 0xb6fdc6f7, 0xebe13445, 0xd177ce36, 0xe0d333db,
- 0x3f66419c, 0x87a155fe, 0x1b47ebe9, 0x042cd742, 0xe98711c2, 0xb456795e,
- 0x39f81a6e, 0xeb79f8c3, 0xe7f8b64d, 0x0c3c92a0, 0x48a0e92f, 0xdd505ec1,
- 0x2b9c2f68, 0x4f0bd77f, 0x0f68dcc7, 0x38e87926, 0x17b4c7f3, 0xbad4fbb1,
- 0x5ee8e67f, 0x947d0db8, 0x026c0ee5, 0x93e592fd, 0x17dd9688, 0x83dbf49e,
- 0x788ef02d, 0xefcdb263, 0x00b77cc6, 0x4c7fd9e3, 0x5e3a20c8, 0x84459fe3,
- 0x75f0f1af, 0xeec83e78, 0xabb8c7ae, 0xcfc24f31, 0xe6711809, 0x89d9be0b,
- 0xabc6cf80, 0xe107ee7b, 0xdc17907b, 0xf7e876c1, 0xc71946c2, 0x982c9c6d,
- 0x4b798dea, 0xf78dbf9b, 0x6cdcb2be, 0xde74e5de, 0x933db700, 0xa8f8d5c1,
- 0xae113240, 0x28f4bdff, 0x5cfb35f8, 0xc507def8, 0xc7c010c7, 0x73fb3d39,
- 0x3a79319b, 0xee419aaf, 0x9ed16bad, 0x4ea7ef89, 0x8fa1e637, 0xe33bd134,
- 0xe2fc332a, 0xc6fd1946, 0x3c6e4945, 0x886e10ef, 0xe79afa72, 0xf99be4df,
- 0x5c295b9d, 0xf8884eab, 0xd1d6b6de, 0xfd28f4ba, 0xe5cdbcae, 0xf17d6b0f,
- 0xf0634e7f, 0x7ba7f852, 0x4f36de40, 0x79e3f9c3, 0x31ff806a, 0x0d9c3c81,
- 0x452c17fc, 0x236c183e, 0xc7d36fb0, 0x7930f04e, 0xd951ed9e, 0x72be357b,
- 0x85be8726, 0xfda76cd7, 0x5dfa7995, 0xb0f36ff7, 0x4f36ff75, 0x72ff759c,
- 0x975707ef, 0x54851fb5, 0x37b445d4, 0x111fa908, 0x3d53476b, 0x1bfd0e56,
- 0xc7f13179, 0xff6fd005, 0x65bf9a76, 0xede5f644, 0x80cfb034, 0xecfb0d0f,
- 0xaed98f4e, 0x3fbbd18c, 0xfbbd30f4, 0xf4c0cf43, 0xfda18fee, 0xe345efe9,
- 0x4aa935da, 0xb4f7bd7c, 0xac1fdd87, 0xe7821553, 0x0fd9fb03, 0xdae5c9d8,
- 0xd6feef3a, 0x1ff9cba5, 0x4f832e39, 0xfcfefacd, 0xf4112d31, 0x4207ca54,
- 0x86b777dc, 0x3c6dbf2c, 0xad9b6bd0, 0xf7cc3378, 0x78fe1667, 0xf59f4d68,
- 0x18f1cc2b, 0x6ac78b8e, 0x099b478a, 0x973c3b8f, 0xb6bf0fb0, 0xdf30afbe,
- 0xf8e4e9f3, 0x09bf7aa6, 0xeb373bc6, 0x6b8822bd, 0x1cf9ced4, 0x1ed7fef5,
- 0xaebccca7, 0x1d397e18, 0xa1e00567, 0x799fd673, 0xe303ae7c, 0xb99eab4a,
- 0x58e2112a, 0x7a0d9335, 0xaa7df044, 0x4839f9f3, 0xecfb7397, 0x2e79c03a,
- 0x3d139d99, 0xa3daefb7, 0xa4f8f710, 0x2c7258e1, 0x793dcf7d, 0xfda648bc,
- 0xbc7bdb9d, 0x7703c248, 0xd43bd361, 0x5b7f1735, 0xdef77014, 0x37e1e84b,
- 0x368f5b07, 0xe57c593a, 0x37173841, 0x6abfda86, 0x7a65f212, 0x285eed9a,
- 0x1dbf1d17, 0x395c21fc, 0xa62fe63f, 0xd04ab259, 0x7ec11633, 0xee373cd3,
- 0xc972f967, 0x9dfa6cc8, 0xe387ad12, 0x6af9c4cc, 0x04efc098, 0xf1444eb8,
- 0xef3c02f7, 0x3ef86076, 0x02b243f1, 0xbdffc29e, 0x78093c1a, 0x09ab3a4f,
- 0x77c41a89, 0x471e6e8a, 0xd30a6953, 0x3ab7ddbf, 0x848cf7f0, 0x53aaaf05,
- 0x3cdafea8, 0xdd8efd93, 0x920b63f9, 0x55691fc0, 0x0df2de99, 0xb463e1eb,
- 0x6be7078b, 0xe6231737, 0x40577c75, 0x921d1955, 0x8a6b6c33, 0xf9c78c35,
- 0x25fe98f3, 0x3dff1a52, 0x1eb7ca29, 0x1c43a6cd, 0xc5cef8d8, 0xfc522bbf,
- 0x7803cebf, 0xf0fcdb46, 0xabfb8f4e, 0x92ca386f, 0x28ccfb69, 0x945e5fbe,
- 0xc39de1c0, 0xfbef07de, 0xd345e34e, 0xd4b5187d, 0x27b74efc, 0xe214b4d0,
- 0x6defd0fc, 0x8bcef8c9, 0x38a29eb9, 0x0bbb9691, 0x0aaee5cd, 0x47e18f9a,
- 0xf6d677eb, 0xae925f86, 0x37f63832, 0xbf0f1038, 0x79fd506c, 0x340e8f94,
- 0xb07f30f8, 0xda34c341, 0x0fcb1230, 0x2f31f837, 0x986cf4da, 0x7faffa4f,
- 0xeebb1490, 0x9d54efc2, 0x75c3e18c, 0xf9ade472, 0x88df8f80, 0xaf15af88,
- 0xdefe478e, 0x3a8e9a34, 0x7367bd86, 0xfefec632, 0xd257f2e9, 0x0d5f284a,
- 0xd3afd1f9, 0xefc3c64a, 0x17f3b096, 0xf63a4170, 0x257aee1e, 0xe093f9a0,
- 0x34c63477, 0xfbc2863b, 0xc95bc1a4, 0xfd239458, 0xbbf089c5, 0x6c337b0b,
- 0x949a7eb4, 0x07f8ecd7, 0x64deab80, 0xe7e7afe7, 0x6c9f1f33, 0xbce3afd6,
- 0x0a66ff4c, 0x71bce3c5, 0x537e09f8, 0x39acfd00, 0x3307b9c6, 0xbf54891e,
- 0xeb8a76e1, 0xdfb3efe6, 0xc754485f, 0xf7bde1fb, 0xcec35ecd, 0xbec5f7a9,
- 0xff99bee7, 0xe1e2440e, 0x29176825, 0x103bf63e, 0x0f7b0a29, 0x4d930489,
- 0xe5cdedef, 0x0606ccfd, 0x1f57d09a, 0x15fde6cf, 0xfb0e791d, 0x8897df83,
- 0xf087eff5, 0xccd8c3df, 0x3574d3f5, 0xed4dec50, 0x5bd4676d, 0x7759ebbf,
- 0x0bd1216d, 0x890143f6, 0xed04bd80, 0xd4bb698a, 0xdc02030f, 0xd7ce33ef,
- 0xcdf80b3e, 0x93ddfdef, 0xebc3027d, 0x8a7d3c93, 0xd4494eff, 0xe3079813,
- 0x123c16fb, 0xca7603da, 0x6206bb3e, 0x8fe47452, 0xf8947ef8, 0xe701c53b,
- 0xbc0f53fc, 0xd9e5eecf, 0xbf38143a, 0x7c5bd2b8, 0x4551c413, 0xf3033ea5,
- 0x816f4ef7, 0xfa9e83fc, 0xdda3d887, 0x1f0137c5, 0x06f7f3a4, 0x01000408,
- 0xe1aa080e, 0x43fd638f, 0x64654eb3, 0x2bf2cc9f, 0x873637bd, 0x3f5f2132,
- 0xf208f80f, 0x7dbfb48d, 0xc021c149, 0x86e16e47, 0x423763e6, 0x037f68de,
- 0x5789ffb6, 0x3b8ffe65, 0x4af60d98, 0x79a55e4f, 0x625e4c4f, 0xa960e279,
- 0x239abf31, 0xd073c47f, 0x3bd807b5, 0x6607a95d, 0xd4cf3008, 0xff1033fd,
- 0xbcb7d5e7, 0x878c342b, 0x04eb608f, 0xf41b978b, 0xa6e1ee30, 0x79f783d9,
- 0x9d61f863, 0xc4c76cd7, 0x0fbc27dc, 0x1fd3ddf0, 0x36cbaf8f, 0xf74a9ef6,
- 0xc6efd88f, 0x547b030d, 0x86b9b884, 0xa3dc2e69, 0x70c1fea7, 0x98395c12,
- 0xcde35fd6, 0xb55e3a41, 0xa9cdfb3b, 0xa75f2f5a, 0x80772964, 0xf8b54e3e,
- 0x3f5cd935, 0x7f8b508f, 0x372b4893, 0xf0b5c10a, 0x8f686c9e, 0x937de2ac,
- 0x4728195d, 0x228eb967, 0x5f3183f5, 0xcbbfefcd, 0x476b832b, 0xc4591e81,
- 0x76556ef4, 0xdfa658a0, 0x8d973d57, 0xf0670bbf, 0x2452555d, 0xf7bb9c6d,
- 0x3791b75c, 0x21e4477e, 0xef09d5b5, 0x5afb18f2, 0x6437fbb5, 0xc7fd3ec1,
- 0x3b46de87, 0x36624971, 0x76d359c2, 0x69df815c, 0x7ca36da9, 0xd8fbbf47,
- 0xa3c763d9, 0xc887f25f, 0xbe026fa0, 0xc368d0fe, 0xd9fdddcb, 0xfd522f55,
- 0xea85d3a6, 0xd3038368, 0xd5fd7a9d, 0xf09c0ae0, 0x49c9b82e, 0x1cf1e9f9,
- 0xeafe055d, 0xf51eb7bc, 0x3a8ae3d8, 0xd3aff80a, 0xe066c3fb, 0x9124aae7,
- 0x5f0febf3, 0x7ef8f3d6, 0xb9efcd3d, 0xbfa6edb7, 0xa160df68, 0x7ba9fed9,
- 0x41fc8dc4, 0x02df92ed, 0xb66ed51f, 0xb3fd6085, 0xbef1da39, 0x1be82773,
- 0xfbe65fd4, 0xa6051b99, 0x5873430f, 0xf772fa1c, 0x74be2b34, 0xfd8c7091,
- 0xe99124bd, 0x84290aab, 0x5f1576fb, 0x2ffeb17a, 0xba69c71f, 0x1c77da0f,
- 0xe31bd637, 0x838ef754, 0xf6fd527c, 0x5fcff461, 0xafb0dabc, 0x77ffd475,
- 0x61cfc53e, 0xe874f538, 0xd0d941e7, 0xa4cfd427, 0x1ee78678, 0xf9243e79,
- 0x47a97908, 0x2e6dacff, 0xf6fa04c9, 0x7eb313d6, 0xb74a25d2, 0x3e781297,
- 0xc9737f74, 0x13ed38a4, 0x8c73ed2c, 0x7fb14ffc, 0x3a0f3c2b, 0xda0ef37b,
- 0xbd93fa5e, 0x6df00f27, 0x4bfa59b0, 0x86dfc636, 0xb7fc19fd, 0x65fbbbc7,
- 0xa5e9ef78, 0x10f140a4, 0x2068df66, 0x15245b87, 0xc0d1877f, 0x3ebe1ef3,
- 0x8d797c55, 0x78bdff09, 0x9f95302f, 0xfb4cd06e, 0x2e178ba1, 0x7b3f7bc3,
- 0x41563af8, 0xdb3eec42, 0x7da6b923, 0xc70fffd0, 0xb8a385d7, 0xfe4a381f,
- 0x328c70c6, 0x385e81e9, 0x5fe54df2, 0x69310e17, 0xec366976, 0xa1b1ad53,
- 0x0b66909f, 0x6ec07239, 0xf7ec5ffb, 0x0734201a, 0x61a899e7, 0xce57da6c,
- 0x079f6039, 0x448e79c6, 0x40e3498e, 0x056d749e, 0x913f53ac, 0xe5a1afd0,
- 0x8619390e, 0xfde1d56e, 0xd8952a2b, 0xb0754e5f, 0xe6f51986, 0xab876277,
- 0xe5a258a4, 0xb879d5e3, 0xdd94e5ee, 0xfdf60755, 0xcfe17736, 0xbc931357,
- 0x5ab27da2, 0xc7f68a58, 0x9b17c49a, 0x3d38df61, 0x0233d981, 0x58f9b179,
- 0x267abc46, 0xbdf72daf, 0x31c41378, 0xaed4aeb6, 0xab6e7f06, 0x0c391c33,
- 0x6e3df9eb, 0xdcaa3dfc, 0xf816e175, 0x83710abd, 0x314bd70d, 0x2b6bbc29,
- 0x6078179c, 0x8bec1f84, 0x40d760ad, 0x5c6e35fb, 0x421a5bff, 0xe2af2f68,
- 0xf5f6c0eb, 0xe7385493, 0x9296e8dc, 0x916bbd61, 0xb8056feb, 0x64efba5e,
- 0xd03ae3b3, 0x4196fe63, 0x80cbef3c, 0x71cb4d75, 0x14f5e58c, 0x68575b19,
- 0x3485c183, 0x44ffc3f2, 0x33d412ad, 0x6b0ed127, 0x04ab2f74, 0xbfd68e3b,
- 0x7a78a0ec, 0x85e3993c, 0xe8f4e7d7, 0xa0cd93a5, 0xbd15c507, 0xef3a12f9,
- 0x7a633f1d, 0x3fda09ea, 0xe7452bbc, 0x18778213, 0xe84947bb, 0xc1242587,
- 0x74d0c76f, 0x9dfb0e74, 0xbd17aa5b, 0x3849c7e8, 0x491f635f, 0x0f7ec519,
- 0x7ac3da1d, 0x83919093, 0x0cca98fb, 0xe2835ef6, 0x66fde371, 0xdd5677e3,
- 0xb3a7f8c4, 0x67df2978, 0x43de5971, 0x895fae70, 0x784ee99b, 0x4f862137,
- 0x0b7bcb15, 0x07cc0912, 0xcd544b7b, 0xe449bfe5, 0xd8834afb, 0xb70eadef,
- 0xa32ed085, 0x35e98452, 0xd9f68a24, 0xf981d268, 0xe66de031, 0x0d2c35c2,
- 0x278d8e2f, 0xbbe2c8ba, 0xc7ec1101, 0x788d5bb4, 0x44f5f04c, 0x96bfdeba,
- 0xfc36e3c8, 0x66538c77, 0x7d7ee214, 0x5fab1266, 0x0fbbc815, 0x338dbcec,
- 0xc69c61a6, 0xf58cefb0, 0x16dfb0d2, 0xab579872, 0xc17b5c42, 0x3a16ebb0,
- 0xb6753e59, 0x8868e0fe, 0x0b53b66b, 0x2fee1b34, 0xf96af8b3, 0xb034e2e6,
- 0xba1e16cb, 0x4ce4ed15, 0x8b65d995, 0x8250ce36, 0xdadee1a3, 0x8a07f43c,
- 0x84484c37, 0x4ed5847d, 0x8575e27f, 0xfbe267e8, 0xc7b503b9, 0x355da6b6,
- 0xab71e419, 0xca90128d, 0xeeb24bfb, 0xb17c0b1e, 0xcced0f21, 0xc6c109ad,
- 0xd6efda24, 0xabfcd67f, 0x4ea7fe68, 0x8c38f0bd, 0xd5813ab3, 0xc051bbc3,
- 0x12e73d9b, 0xc9bbe1b6, 0x3d6f8dfd, 0x1252f097, 0xa8ca7f7b, 0x7e536fff,
- 0x80007d7c, 0x00008000, 0x00088b1f, 0x00000000, 0x7dbdff00, 0xd5947c0b,
- 0x66fdf895, 0x666579be, 0xf263c992, 0xc2130922, 0x9e4e5e4b, 0x124c2280,
- 0x4cb443c2, 0xe8202a10, 0xa79034f0, 0xbadc5d48, 0x30040cff, 0x5706b650,
- 0x84ea2b11, 0xbbaac562, 0x351ba341, 0xb88080ea, 0xda446dd6, 0x8ffd16d2,
- 0x4810154a, 0xfed2b58a, 0x39cf65dd, 0x7cccdef7, 0xb5f01993, 0x3efeb4ff,
- 0xe7ddf7ee, 0xce73df79, 0x12e973bd, 0x2c614dfc, 0x5630a494, 0x51c1d8ca,
- 0x618cf58c, 0x24dea98c, 0x064dcf06, 0x34e2d26f, 0xa37efac6, 0xf5e1bb67,
- 0x926f6617, 0x25d8c6c3, 0x79fa2ed1, 0xa0b199aa, 0xcdb3b189, 0xc11c166e,
- 0x336699d8, 0x1f966b95, 0x9fa0c698, 0xb9850e9a, 0xc55b19f2, 0x3f6336da,
- 0x69923baf, 0x3d0155dc, 0xf4648e0b, 0x5bfe0977, 0xd528fcbd, 0xebc9dd5f,
- 0xaa0eb2d7, 0x4f3192bf, 0xf76b3c07, 0x1cd0595a, 0x51df5fae, 0x4a1f69ac,
- 0xd7bf51d2, 0x3b06fb25, 0x5bdd8c9c, 0x2abefc3d, 0x5d569f78, 0xfae1f662,
- 0x13e38e58, 0x87afa8ab, 0xebe63af7, 0x77aeb188, 0x49de5c04, 0xac4e8a82,
- 0x32b1d0ed, 0xe03ad6c6, 0x1ec62e9f, 0x6c7cd850, 0x15f7f6b7, 0xc2632919,
- 0x9e2ad6ed, 0xf898c70c, 0xa8bc6a70, 0x88d48167, 0xc467dab2, 0x345e35f5,
- 0x0ef3fbd2, 0x63075fb3, 0x3ff4c91e, 0x7fac2d70, 0x1953eb26, 0x909f53cc,
- 0xcd5d8e38, 0x71258b58, 0xba673e37, 0x08a17d0c, 0x53333038, 0xf8337e71,
- 0xc4ebf62b, 0x71944769, 0xed403ed8, 0xa98ed967, 0xc473400c, 0x8303735e,
- 0x90b037f7, 0xce060aca, 0x28fdfa0f, 0x2359dfb2, 0xdd1be5b5, 0x09ecf2da,
- 0x7e6332da, 0x9a4fd782, 0x1a4eca9b, 0x04fefdc2, 0x0311d6a6, 0x5802b72e,
- 0x5eed7eb1, 0x0464e04b, 0x923beb1e, 0xaec6e535, 0x88c9c0ac, 0x1ff16a71,
- 0x135ff059, 0x589d775f, 0xdf4607f7, 0x5bc00ead, 0x71190b3d, 0x46c140dd,
- 0xacc658ef, 0x7c45e616, 0xf16fd81d, 0xcfe812b0, 0x00b76e66, 0xabbb1b7c,
- 0xdef02595, 0x30ef876a, 0xe196273f, 0xfaf03569, 0x0e88058c, 0x39da8fe0,
- 0x5bed99bd, 0xb713e415, 0xcdfedaab, 0xff00dde7, 0x4f0293dd, 0x24ac3d95,
- 0x0ea3ac46, 0x5e1e91d6, 0x07eb39c7, 0x582f8865, 0xa5ec6fcf, 0xa6461748,
- 0xf3d78524, 0x5cb5e1ad, 0x5dc6af0b, 0x1837a236, 0x1844bde7, 0xcea761a7,
- 0xceb2846f, 0x7e4463dc, 0x3ce917b9, 0x1c72de06, 0x6f3c4c4a, 0xe0c73c43,
- 0xfcaceda7, 0x1cc7e43e, 0xc7181fe4, 0x6527eeb6, 0x8f818b27, 0xfa1737aa,
- 0x054dbea0, 0x547186fe, 0x08a9bff8, 0x6d3bd9c8, 0xe9d03255, 0xc3e73b29,
- 0x7a2e890d, 0x0abc50ee, 0xe75d35e2, 0xd5fa47c9, 0xcc43b827, 0x66a3f1c1,
- 0xcb601942, 0xf962c1ae, 0xefe81ebd, 0xcb1b9821, 0xf0098416, 0xcbc2c878,
- 0xefca392f, 0x6ee308dd, 0x0de5e64a, 0xd5b9bd3f, 0x16ca093f, 0xe5439e59,
- 0x416d0c87, 0xffdde03d, 0x44f5c982, 0x28f7b53e, 0x32305e58, 0x4d4c04f0,
- 0x2365843f, 0x4226a65e, 0xf4b720e7, 0x9b769a2f, 0xeed05ea0, 0xa78427fb,
- 0xfb39ff5c, 0x26363cf1, 0x4d7e7f66, 0xacdf797f, 0x53bf183f, 0xfbf87577,
- 0x419dc4d4, 0xc2e76f3d, 0xe7a72c76, 0x9ff43f03, 0x6d89a2fe, 0x0623e285,
- 0xda179b76, 0x632c5dd5, 0xd43908c1, 0xb679f023, 0x5b7241d9, 0xf943afbe,
- 0xa9f9063d, 0x7d70e487, 0xfe46aa47, 0x7f5cb94a, 0x7f4d5af1, 0xc27c8e39,
- 0x43dabd57, 0xe2feadf2, 0x121efa64, 0x736cf4f2, 0xf889e926, 0xc8d20ce3,
- 0x1338f0f5, 0x72f8b059, 0xefa24ce3, 0xcbe61c72, 0x06bdb922, 0xa4240ce7,
- 0x87bc8237, 0x750c2e71, 0x465f2525, 0x0276d99f, 0x67ac4591, 0x63341a9b,
- 0x07dcdbff, 0xfc18be1c, 0xb844e520, 0x1edb51bd, 0x811b23f4, 0x4863edf4,
- 0x7aacffb7, 0xd4fa91cc, 0x5038f066, 0x8af6a86f, 0x34372e23, 0x86647e4d,
- 0x5169280a, 0xd97f3d39, 0xd1e881b6, 0x411f706e, 0x3aeddafc, 0xafbe1f97,
- 0x483ce9b7, 0x730e2d27, 0x51bdf100, 0x7281c1b9, 0x076bf643, 0xb8f38f31,
- 0x7a421b60, 0xd57ade3d, 0x0f54898b, 0xa9199266, 0xc6f7ff1f, 0x0f17c583,
- 0x31bd8f1c, 0xfa0b88f1, 0x6e744b57, 0x910be00b, 0x1d0471fa, 0xdf63f744,
- 0x779cc6a9, 0xff093d74, 0x8436f8b8, 0xe7fe1112, 0x8119ba6d, 0x4d2ef318,
- 0x9e9bbe56, 0xdf25145a, 0x3c3b7f93, 0xd095ed88, 0x49f20a0f, 0xa32696fe,
- 0x90bc2dfc, 0x3922e958, 0x73b5ee9f, 0xb6e385b1, 0x7d9a8ed8, 0xed988fa4,
- 0x3ebfd913, 0xb849fd3e, 0x64aade18, 0x04601f88, 0x4ae37a86, 0x412bb070,
- 0x1597e22f, 0xd3e50caa, 0xa76f3a40, 0x77d7f4f9, 0xeb11e743, 0x0edd516f,
- 0x75fd6ba4, 0x99ca331e, 0xe6398ef5, 0xaa379410, 0x20f9d9ae, 0x02defa87,
- 0xb7d6127b, 0x1cc8ad29, 0xc15a8f8b, 0x2df9c46e, 0xd669d64d, 0x1d669f23,
- 0xf7e8dfe7, 0x9eb9925b, 0xb13fdd68, 0x9f1253c7, 0x79d2407e, 0xcb9b8f51,
- 0x5856c754, 0xe7c0de9f, 0x0763896b, 0xc1e37eca, 0x0b7ecb6b, 0xa87569c9,
- 0x44f68039, 0x549b78f4, 0xbe078f77, 0xf8f9826f, 0xc7c39cb1, 0x3aa254df,
- 0x16d74376, 0x47330257, 0x4b3837d4, 0x6c17fe08, 0xdf0f4c69, 0xc63d2137,
- 0xd3849798, 0x68efe4bc, 0xe8a2f93f, 0xc3b7f732, 0x5d0d10d9, 0xcba2c6b6,
- 0x4b37a879, 0x3999fcf9, 0x9343fe72, 0x8c7c8ebe, 0x49b29ba6, 0x977c83cc,
- 0x5876cf01, 0x66caa76f, 0x807d43ec, 0x9844d6b6, 0xe636cddf, 0x393e9900,
- 0x32677cb5, 0xe7acb7ac, 0x3f219180, 0x1d693a74, 0xf47da275, 0x8e14a6b8,
- 0x838a533f, 0x3dc7499e, 0xf8014f4f, 0xb824f676, 0x99d4f814, 0x0630fa02,
- 0xbc088f7c, 0x9fc6d633, 0xf21c686a, 0xb65fac8f, 0xc3767988, 0x6b0250f3,
- 0xb2bd8335, 0xc607c84f, 0x154fe7ee, 0x99c61718, 0xdde8cafd, 0xf58a2f64,
- 0x80b5021f, 0x43d3d3f5, 0x77fd14e3, 0x585edbc0, 0xeade047d, 0xdbc221cd,
- 0x6bdf46db, 0xe3c69f08, 0x772c74a1, 0x87933d1e, 0x70b670f8, 0x25efb5fb,
- 0x6ee9006b, 0xd3d67e20, 0x0e2eeafc, 0x81e2fe62, 0x8353ab78, 0x2eeb5fa4,
- 0x5877d1e6, 0x129cdfb7, 0x6c1b3efe, 0x2eb00986, 0x4270e666, 0x47c2c4f8,
- 0x896be0bb, 0xa3eb500f, 0x081f02ed, 0x3ec48fa7, 0x4eef1fa7, 0x780666e3,
- 0xefe66b3f, 0xdf246799, 0xfe7d0987, 0x005e4251, 0x0d1f33f7, 0x7a46ce07,
- 0x91e95d50, 0xe2cea77a, 0xac3e6fb9, 0x67abbd9b, 0x5963c04f, 0xed15ada5,
- 0x4f2665ab, 0xcb5dda12, 0xeb2bd3de, 0xc48409f3, 0x3c042b07, 0x5df732cf,
- 0x36ff3f88, 0xc209e38b, 0x97ff769f, 0xf8bca3bf, 0xa2bf681d, 0x9346ef4b,
- 0xcda56971, 0xf596fd8f, 0x876d3c54, 0xad5b2bf7, 0x2efd062e, 0x04fee29b,
- 0xf765bded, 0xf21b5c59, 0xa438bf71, 0x5dfa0b3c, 0x03be60fb, 0x9d7581e5,
- 0x06ecd337, 0xeb7ee093, 0x6de02b4c, 0x8964b293, 0x7dac0852, 0x1516ca7f,
- 0x57feacf0, 0x281667e5, 0xc17f755f, 0x8c75f316, 0x1cb282bd, 0xd0ff59ed,
- 0xfedf6899, 0xf646a712, 0x7f6fb72e, 0xca274479, 0x82a65bc5, 0xece5180a,
- 0xce60d9d4, 0xb78a532b, 0xf4fc6198, 0x0ea14f14, 0x32935bc6, 0xb7f62661,
- 0x7fb1bef4, 0x77df09dd, 0xa99acca4, 0x44de13e6, 0xcaccef7b, 0x926ed0a1,
- 0x54b3cf2c, 0x56697f42, 0x27a7b616, 0x090fbf00, 0x77f68f8f, 0xbdfdbf67,
- 0x1cd4e660, 0x53454bc0, 0xbffd0aa5, 0x96db729e, 0x36315731, 0x877281fe,
- 0x983fc607, 0x15a664b2, 0x9469dff0, 0xf16d97f5, 0xcc9d58c0, 0xbe0186f7,
- 0xbafef1ff, 0x9fa86699, 0x742dea96, 0xcd53fac0, 0x91996ff7, 0x65f81dfb,
- 0x4fbd12a4, 0x1271482c, 0x90461cdf, 0x33ebade6, 0xf82afd72, 0x71d3873d,
- 0x9424797f, 0x4ce511ed, 0x10dcee5e, 0x4e9c7e5b, 0x33b972e5, 0x8f77f621,
- 0xef004b90, 0xb3e90ea0, 0x96181acb, 0xc027961f, 0xc409e64f, 0x3cb9db5f,
- 0x639abe01, 0xd98e3e27, 0x5fe537df, 0xd3f53b80, 0xe8ac7aa9, 0x0d7290bf,
- 0x71c56666, 0x69764cbe, 0xcf5975e4, 0xbfb79252, 0xeb7c154a, 0x5671f0e2,
- 0x330ae56a, 0xb8470cf7, 0xc58b76c8, 0x4b47f33a, 0x88d052ff, 0xe2d6b9fc,
- 0xacf9c0c9, 0xc19e57dd, 0xd787632e, 0xf8d72c5d, 0x98ebc24b, 0xf407ef4a,
- 0x44b9fd0b, 0x5f31eeff, 0x57c95e07, 0x7d1a5780, 0xc33ad2bf, 0x3bfd69fd,
- 0x7ee3fb03, 0xc57a019e, 0x1b9e7b18, 0x415eb853, 0xe422ebf8, 0x5f214ada,
- 0x3d916a41, 0xabc5fe7e, 0x8fcd6f76, 0xf503771c, 0x7a7df80f, 0x412fee0a,
- 0x2b1ca2be, 0xb0b33d53, 0x8a4f597e, 0xa079ed03, 0xde828db7, 0x8937a454,
- 0xa694cee7, 0x76ef5a72, 0x863bb1c5, 0x3913f81d, 0x83c536af, 0x22c649f6,
- 0xc4497e9f, 0x87fca4fc, 0xff453bfe, 0x1f9e9c25, 0xc5e7a7ed, 0x2fc23fc8,
- 0xdf40dcc4, 0x5fce0763, 0x08ce36c1, 0x2582f5fd, 0x7091f380, 0x2ff4b6fb,
- 0xb8ba87ed, 0xda912a7a, 0x260f1c65, 0x219ea0ba, 0xb9f9c5c5, 0x6bc4e3e3,
- 0xe9755f51, 0xebe2e299, 0x2651b946, 0xe7bfa4e5, 0x764a8548, 0xbb2e584c,
- 0x72919ec8, 0x87366833, 0xbdbfdfeb, 0x9446c667, 0x2fbe26ab, 0x1483d34f,
- 0xe3cf0a2f, 0x2fae14e5, 0x2798f827, 0x24781d96, 0x08e52ed9, 0x75e1e3ad,
- 0x5863ec95, 0xd7e85d9f, 0xf93f2109, 0x664c9eb3, 0xf0e13ec2, 0xb67bfa90,
- 0xdcaff429, 0x47a5a64f, 0xa53b0659, 0xee105741, 0x89cfbf1f, 0x8263fba0,
- 0xf3f1a20e, 0xc013e55d, 0x48708fc1, 0xc872e14d, 0xed056509, 0x17f4150b,
- 0xb55cffc0, 0x3f35bdcd, 0x2e36de62, 0x1d7a9d8f, 0xe0576397, 0xfe54b48b,
- 0x3b25efbd, 0x1e1873f1, 0x2ff98edc, 0x1d76f701, 0xc17dc1ab, 0x09f01d21,
- 0xaf8da2d2, 0x598bfcf3, 0x29e7efd4, 0xeffeb93a, 0x1bb72e45, 0xd7ce3c61,
- 0xbfd63f64, 0x3b5da83c, 0xdcdfde7f, 0x658f4e54, 0x68853dc7, 0xdaf30cc7,
- 0x285fb8dc, 0xc07399eb, 0xdde1e500, 0x4b6b038a, 0xc1de1f3a, 0x4ba814f9,
- 0x67ade5c0, 0xa5b5fb22, 0xbefa3a72, 0x38fc31d6, 0xcbc457a7, 0xcd3e08e2,
- 0x7871b54d, 0x084cfac1, 0x472b554f, 0x9bfbf3e5, 0x0180f787, 0xf0075bd6,
- 0xe12db140, 0x64b1d9d6, 0xf8f90583, 0x1f237338, 0xa258ef9f, 0xdd788b1b,
- 0xebbec8c9, 0xf00f0f97, 0x147b4875, 0xe903b2e8, 0x5cd4eaf2, 0x71d86f1a,
- 0x7c0c758f, 0x1376861f, 0xec3b95fa, 0x4c57dc01, 0x4dcdf54e, 0xedce7845,
- 0x4a2064ab, 0xeb4dac1a, 0xfbef112d, 0x7d4177eb, 0x7e9f63a2, 0x36f3ce2c,
- 0xc2bab6c6, 0xfaa981eb, 0x6f5a1fd1, 0xa02d3e04, 0xd6e11072, 0xfbf5e469,
- 0xcf8a3316, 0x8cfbe834, 0xf7f41df8, 0xa3cfa22c, 0x2fc414ba, 0x6ed09bee,
- 0x737bd3ae, 0x67d7dc14, 0x07a8dc98, 0xf4421f60, 0x54c2c87e, 0x26fff40b,
- 0x574e513c, 0x8fd8efe9, 0x502aaaf0, 0xae242ddc, 0x60dc85da, 0xcd37dc41,
- 0x7a25629e, 0x39dd5f64, 0xbff51ab4, 0xfa09e395, 0xcc7e8b31, 0xff30a2e6,
- 0x2aef3afd, 0xfd48ffda, 0xa8514876, 0x8dd7439f, 0xb112ddde, 0x50fec22f,
- 0xaa521ffe, 0xfa40ee73, 0xb23afb17, 0x7ac2d30f, 0x2295ed17, 0xfdde245b,
- 0x3ca17e62, 0x7b48a3a4, 0x7ed3ed14, 0xb1dfd67b, 0xd9c83a65, 0x823a33f1,
- 0xd152073a, 0x01ff33f3, 0xf60551d6, 0x70e005ac, 0x33972a5b, 0xfafdf287,
- 0x30df9c44, 0x3abc3f58, 0xd31be09c, 0x938f0b64, 0xe90ffc2e, 0x482fee46,
- 0xbfcfe04f, 0xe73b72a7, 0x1e5c6927, 0x978d21fe, 0xfb6313d3, 0xf510be2b,
- 0x1f971354, 0xfcb9cb5b, 0x41b7ae8f, 0xedadfbf4, 0x668a31de, 0xef760f7f,
- 0xf76e5486, 0x885fb91a, 0x0a9613c7, 0x873b06fa, 0xabb614f0, 0xda503c78,
- 0xccf45fa1, 0xf372f2a3, 0xbfe8d97b, 0xe69fedbf, 0xb6f487dd, 0xf27a37f2,
- 0xf1149c57, 0xbec99582, 0xbfdc64f4, 0xabf6c427, 0x75d6273c, 0x1f9199ba,
- 0x1b8c53f8, 0x893dec82, 0xaf284371, 0x34e0f9da, 0x2cd5bfe4, 0xe5e0fe40,
- 0x5c51373b, 0x61407970, 0x69f7052e, 0x1627ee5e, 0x47bd58f8, 0xdf4bf1af,
- 0x931936ae, 0x77e984c7, 0x6e369b45, 0x58abb358, 0xf6f6fe53, 0x440b2569,
- 0xfcb0a1c8, 0xa3ef4699, 0x1ef495fb, 0xd47ea76d, 0xc3f8d2ec, 0x461cee97,
- 0x97f597eb, 0x77ad3731, 0x66816b59, 0xf395acde, 0x49f9bc49, 0xd4bb45ba,
- 0x469e731f, 0x008ffbcd, 0xff08fefe, 0x4353d2ff, 0xa92d1e69, 0xba40ffbe,
- 0xcac3cf09, 0xdabf1afc, 0xb027c724, 0x7ee16656, 0xff23a049, 0x5081ece5,
- 0x6fffa72a, 0x71d1f70f, 0xfee305f6, 0xca0beebf, 0xcc397126, 0x0c3614da,
- 0x6aa7e31e, 0xa0bfb4ed, 0xa7c52bbe, 0x616beb95, 0xa45f2d47, 0xd45177ad,
- 0x5dea28bb, 0x6912bfc9, 0x2805299f, 0x0d7f78d7, 0xcff38f82, 0xa0ba351c,
- 0x6e34f8de, 0x3d3916c7, 0x6cef5097, 0x438e24b3, 0x4b36ca7f, 0x52f5005e,
- 0x38bafaff, 0x48336d1d, 0x033b8a3f, 0x5ffed6e1, 0x88a8b677, 0x0fb07dc7,
- 0xfe587900, 0x8a1641e8, 0xae375390, 0x01cb3c53, 0xef2176de, 0x7d7cdcea,
- 0x71a0ee75, 0x5d93f428, 0xd395e7c7, 0x23515fb1, 0x08d4e5da, 0x419a5f7f,
- 0xc1cccfb3, 0x8d254e32, 0x9a9ce3cb, 0xea1432a0, 0xa3daef65, 0x875fd8a8,
- 0xbb337a42, 0x41130009, 0xdc25d957, 0xa4b1b9be, 0x2636595d, 0x5d56870c,
- 0xe00718f5, 0xeadd35a7, 0x8f737d46, 0x0d2437d3, 0x182279e7, 0xf8fcedc4,
- 0xda2a3d13, 0x7a753fef, 0x619cd20a, 0x8ef4b838, 0xd6a7d46c, 0x6d9e7c13,
- 0xae63fbfd, 0x4da22867, 0xdcddabfe, 0x23b3d19e, 0xa487db8c, 0x8c37d2af,
- 0xe727bd24, 0xf4229e97, 0x3df33a45, 0xadfaa367, 0x68c9f08c, 0x861be93d,
- 0x2cf6e6ef, 0x815577c7, 0xafbe0f77, 0xa8aab8ca, 0x6c17de05, 0xa14baa0b,
- 0x62bbcbdd, 0x85cb83fb, 0x4c75813e, 0x75c9f5c2, 0xc82e495c, 0x7e38867a,
- 0xd60fc90a, 0x1ff7b119, 0x2f7d2230, 0x216d347f, 0xa36eb7ce, 0x30949991,
- 0xf4ea7ffc, 0x64f9c6ce, 0x082bb477, 0x93dfe91b, 0x1dff4ae3, 0xfcfe477e,
- 0x96f928c8, 0xfe5c5fd3, 0x2cf7a75f, 0x1b67948f, 0x6927848d, 0xc7e7873f,
- 0x69d5fded, 0xeabecf5c, 0xf9c12ef4, 0x276d7434, 0x3daacf7f, 0xf9631a1f,
- 0xe2f9a3ab, 0x6a54704a, 0x6ea0bef8, 0xeb8039be, 0x25547f2f, 0x68dda83a,
- 0xdd5938a4, 0x9fb8fb33, 0x46e61ee7, 0xb1d75139, 0xe30e594f, 0x4fb33ed6,
- 0xd7011159, 0x05017541, 0x2ec233e7, 0xafcb90f9, 0x3f47ba68, 0x872dda32,
- 0x9c4e5c2d, 0x15f9b72d, 0x8359eb80, 0x9deb0eaf, 0xfdbab2cd, 0xbc3c61f9,
- 0x11fa6fb9, 0x3fb037cc, 0xb3e20a67, 0xd33bb755, 0x475af50c, 0x5f48dd19,
- 0x36fa753f, 0x54525c23, 0x4fb6276f, 0xd7bbb34e, 0x89975b43, 0xfbbca115,
- 0x1f1870ba, 0xefe32745, 0x85d3fce1, 0x91db8872, 0x813fc845, 0xa7b4bb34,
- 0x361dae48, 0xc73c75f0, 0x9eff7cf8, 0xe89079ea, 0x8d0a48f8, 0x54175d9b,
- 0x7f9d9fd0, 0x6b9239e6, 0x9d30ffb2, 0xe4891e79, 0xb3cf2bdf, 0x56f488c3,
- 0xf950e42b, 0x8f947ba3, 0x4bfde623, 0x6e91f430, 0xba019fb2, 0xfdf7d1b4,
- 0x3ea8d333, 0xbdf0b933, 0xe16aed42, 0x79088afb, 0x4311d723, 0xf5c3ecee,
- 0x1cf44ed8, 0xc82772e4, 0xbe628dfd, 0x157cf8d1, 0x9a7c1fe5, 0xde99ea06,
- 0x3123fd1b, 0x48787a2e, 0xe527f502, 0xbedf3440, 0xe51ce82a, 0xfe46cea5,
- 0xe52bb25a, 0x22bd64fc, 0x34fec567, 0xc10f4382, 0x4a977ea1, 0x7a32a9eb,
- 0xc111de87, 0xfd16bf0f, 0x03f9b81d, 0x07fdca23, 0xa25dfdfe, 0x8536fac7,
- 0xedacf1e2, 0xd43ce35d, 0xf4a7fe47, 0xe2ce6768, 0xf1db0126, 0xa784bbc2,
- 0x2e5c9d59, 0x53ee77d7, 0x74c2d997, 0xc0e67f9a, 0xf56748ad, 0xfb86261d,
- 0xdfbfa022, 0xe9a27c20, 0xca47c254, 0xb4f878dd, 0x9472e0ce, 0x47e48df9,
- 0x3e54fd85, 0xdca429fa, 0x4eaa7bfe, 0xfbf809f8, 0x1cbc690b, 0xa7df1fa6,
- 0xdcb08f08, 0xe45f10b5, 0x603a299f, 0x5bb87dc6, 0x5a7f3f21, 0xe4678725,
- 0xc6c7aabc, 0x54ce9b97, 0x50d437a1, 0x587b1cde, 0x7fcbf7be, 0x810bfed1,
- 0xe41f786f, 0xd410d9ef, 0xd1fe72f3, 0x7277cbf8, 0xce831b7d, 0x11d71bfe,
- 0x3da4dfad, 0xc9e6e920, 0x8f46aa5d, 0xea469dd8, 0x731b0ecf, 0x728ec79c,
- 0x61d8cf1e, 0xc76cfae0, 0xe500737a, 0x780b9bc9, 0xa17d995e, 0x43cf8831,
- 0xf875a5ba, 0x1f3650fd, 0x5ba755bf, 0x0dd6e583, 0x7842d636, 0x4b3a24f5,
- 0x19127e91, 0x1e5c8f97, 0x973cf03e, 0x5b7e7567, 0xeb3fc180, 0x5397737c,
- 0xe2cd39dc, 0xf333b8c6, 0xb3ce341d, 0xd72ba40f, 0x2e8dfb73, 0x83ab3f78,
- 0x03c49ff3, 0x2fa253c8, 0xae120fc9, 0xb8727861, 0xf8927e4b, 0x67f4bbf8,
- 0xa77cbd03, 0xda3daabc, 0x46dbdc78, 0xbe4ed5df, 0x706c3acf, 0xc7869fa1,
- 0x6fd1e217, 0xb3b7baed, 0xb51d824f, 0x7a2df33a, 0xfd85be4a, 0x27bf86ac,
- 0x3817bc09, 0x74afbeb9, 0x2da9ba72, 0xacbe20e9, 0xfc44d93d, 0xe5c19b6c,
- 0x5684ed9a, 0xcff6331e, 0xa8dbbd62, 0x63b5955d, 0x5477e61a, 0x7038ae3d,
- 0x6e4f1fbf, 0xfcf0aede, 0xbd774fb8, 0x72f98891, 0xf2b02bec, 0xe1629e31,
- 0xe4eb9deb, 0x5eece272, 0x7bd13800, 0xf672f193, 0x63fa95fb, 0xc20a63c1,
- 0xfac056b3, 0x139533ec, 0xec7e84ff, 0x61ee49bd, 0x9ecfe4b0, 0x3feee9b9,
- 0xeecfbcc1, 0xf746e299, 0x978b5a65, 0x69cfa7e8, 0x037ee371, 0xf40cf7c4,
- 0x78efae17, 0x685af123, 0xabe9fa77, 0xd76e508b, 0x799e798a, 0xe10ebf5e,
- 0x7fc9e1b2, 0x6bdf8c9b, 0x09aa4a03, 0xf8fbd9c7, 0xbe63677f, 0xf2469eea,
- 0xfd3cb517, 0x17f311ba, 0xfe768174, 0x03926f7e, 0xf9fe9deb, 0xef08c9f6,
- 0xfa168e96, 0xede7e67e, 0xe4f03e54, 0x77f6bdbf, 0x1378e3f4, 0xccef58dd,
- 0x2f09fbf3, 0xf2953e75, 0xf5d1e2db, 0xb9fb7d8e, 0x8f3d44bc, 0xcb9f307c,
- 0x92d74931, 0x793f4f7e, 0xbd48c4db, 0x0ab7df21, 0x026b4e7f, 0x099f23d7,
- 0xe0adadfe, 0x9ecfcef1, 0xfda7ccb5, 0x1c343181, 0x6375c5f7, 0x2ddc5d38,
- 0x71d751e0, 0x46c1a187, 0xdf9fa9ed, 0x73e3df02, 0x27e7d02c, 0x64852923,
- 0xd7ca25cf, 0x0faa57f9, 0xbfa026f5, 0x1f45e6e7, 0xeb1f382a, 0x2d432698,
- 0xfe69f3cd, 0xf91d561d, 0x6c6d6cbd, 0x38bffb3f, 0xe02ec26d, 0x59dd907c,
- 0x3922e39d, 0x8b0f7260, 0x6bb387b0, 0x27182d7c, 0xcfad7ebe, 0xbe3c07ad,
- 0xb4e8ea7c, 0x727e5041, 0xf84c52a4, 0xcf5c2bd7, 0x4e346df9, 0x81c1fa3d,
- 0x3e0b768f, 0xf5307749, 0xdc03921a, 0x32a5501f, 0xaa7e46d5, 0xf513923e,
- 0xee0f42ad, 0x736e7e11, 0x764dde5f, 0x26bb676a, 0xed061f62, 0xf72a366c,
- 0xf310fde5, 0xe7f8674a, 0x1971de93, 0xa467be69, 0xf27e603c, 0x8b7e4a7b,
- 0xbca11bf6, 0x57d1fcc1, 0x285de59d, 0x3d33e51f, 0xefe4bf8e, 0x74b8fbe2,
- 0x15ca174f, 0xa35537b6, 0x95c9e20f, 0xe79f3703, 0x0f95fb7a, 0x901b0e89,
- 0xdf7c710e, 0x5ede8d49, 0xe815c24e, 0x890fa5f0, 0x7177970e, 0x55d0daf9,
- 0xd4fdc46e, 0x69d7e10e, 0x7484f410, 0xbfe8fb84, 0x9c69933a, 0xe1a1c986,
- 0x0bcea728, 0x12ff3bba, 0x3a43b7a7, 0x0e01f91d, 0xdedd1eed, 0x8fd40ca2,
- 0xf0978d4a, 0xe6f800dd, 0xa3daf376, 0xd7d56768, 0x6bf23730, 0x07e0a743,
- 0xbe117bb0, 0xcbd55d0d, 0x6941bfb1, 0x0ddfc933, 0x41b4c976, 0x63a86e50,
- 0xd96fc8a5, 0x425e2abb, 0x7c5d60ba, 0x035d9e3f, 0x4dd22cf6, 0x9f5bbd5a,
- 0xdb8f7a8f, 0x2c8fd7bd, 0x723bf6a3, 0xf2e7145d, 0xf38a3157, 0x19fb40ef,
- 0x68d65df5, 0x36fb3f6e, 0x5cc5dd92, 0x6537e409, 0xd60d753e, 0x622ff06f,
- 0xcd11d794, 0xb4292fc7, 0x5808680f, 0x56681f29, 0x1a7fb717, 0x9f1f38ba,
- 0x4f91f093, 0xe4adcf43, 0xc4c17f8f, 0xd0fe11fc, 0xce9a5f37, 0x268becf5,
- 0x359fb3d2, 0xb0c7d87b, 0xbc85b7a0, 0x6dac7097, 0xdd21d7cb, 0x09937632,
- 0xcc4cb1e7, 0xf5c0cda3, 0x03d0b246, 0x3cf8db05, 0x77e174d4, 0xa789d74c,
- 0xe2e79b51, 0xe17f93f0, 0xc88f189c, 0x39e85d22, 0x797eba18, 0xeea4f890,
- 0x9a5fde19, 0x677853c9, 0xfb4abd04, 0x0b1e9a28, 0xbbd9b8c4, 0x4c282a0e,
- 0xd1cd77b2, 0x35f5f9f0, 0xeb08d82c, 0x2e977ebc, 0xd91c6538, 0x9e490b4d,
- 0x58b237af, 0xd2232af9, 0x9556fdb9, 0xbf442dea, 0x36cdd576, 0xb93fa477,
- 0xb3a3c12a, 0x24571da0, 0x12ded8fd, 0x4a9d1b8c, 0x6f11bbb7, 0xd25dd2e3,
- 0x5bd8d30e, 0xdcdff703, 0xcda1bc0e, 0x1ff70e3f, 0xf309f581, 0xda09ae29,
- 0x0edcfc8d, 0xf7fb8f68, 0x9cfed7dd, 0xb85bdfef, 0x070402ff, 0x6f5499c9,
- 0x9d8ffa09, 0x5cb5de05, 0xb1ddd93f, 0x93aaf3d6, 0xcdc0e7af, 0x3ee216b7,
- 0x8dae61b1, 0x5781e1f8, 0xde9ca594, 0xbee21140, 0x83f9c7da, 0xe13c7f01,
- 0x3b247a22, 0x1bc9a1aa, 0x9c8f4d88, 0xaec01a5f, 0xe81b3b42, 0x93b70d71,
- 0x13f38c6d, 0xbfb94ba7, 0x6915b947, 0x8d3e4acf, 0xbf497be6, 0x35ff7cfd,
- 0x81bcfdf9, 0x8079e3f3, 0x3f7bd203, 0xe9ccbf9d, 0xe07aeb7c, 0x15bef847,
- 0x4225b1f8, 0x7fef47fe, 0x1fb89e70, 0x1ef6f290, 0x3a5177a7, 0x6fdda1ca,
- 0xab50c66d, 0x2f0d8f94, 0x397e196f, 0x958ccbbb, 0xf0335fa1, 0x60fd246b,
- 0x445ed68c, 0x09ea3f4f, 0xfed8baeb, 0x6dd2bb8f, 0x3f3fcddf, 0xaee52f30,
- 0x7981d2f4, 0x73a8e929, 0xadbb6f10, 0xa58fb401, 0x3e7a291f, 0x499f044b,
- 0x03fdca7c, 0xfeb021c6, 0x37ef82fd, 0x888f7a89, 0x84bec467, 0xf3a5c1f8,
- 0xf679487d, 0x6c91f471, 0xb5f97df7, 0x2f5238e4, 0x8c8dd346, 0xaa7ae0a3,
- 0xd7afc4c7, 0xcfec5fb6, 0x29f8e8a2, 0x800b5fc1, 0x6dde7037, 0xc417e086,
- 0x19d8778f, 0x99f813e5, 0x71e31527, 0x7ce08daf, 0x386fb234, 0xfa66b7b6,
- 0x92ba44be, 0x8f7c5d7e, 0x115aaa71, 0x6bddacbf, 0x7f9e4408, 0xd18337b9,
- 0xb376bd38, 0x91fc0f5f, 0x87dbe99b, 0xcc15fded, 0xf33d441d, 0x2b8a168f,
- 0x8a5b8181, 0x8591a6f2, 0xefce1112, 0x205efb25, 0x0cf7d5fa, 0x39e3bf47,
- 0xdf0e3425, 0xdafe109f, 0xfb5fc213, 0xcf7bd77e, 0x17ad02be, 0xf16e72bf,
- 0x5ece918f, 0x7d21e4b4, 0xe7f4b09c, 0xdda125bf, 0x92b9817d, 0x690e772e,
- 0x55ef5991, 0x2767fc23, 0x7ffdb1ec, 0x373c0b6f, 0x532b788a, 0xb7c3ed19,
- 0x19f97067, 0xe3c4bf3e, 0xb4ebefad, 0x1cf937fe, 0xdfe1c193, 0x501ce719,
- 0xd7df0661, 0xfce10e74, 0x50aed7d8, 0xdd66afbc, 0x41eb3ffa, 0xb5acd42e,
- 0x9f09e907, 0x71ab8ed1, 0x283a83f8, 0xb970309f, 0xcd737f04, 0x07fd6165,
- 0xc849d4f5, 0xc5077e33, 0xfd0a96bf, 0xcde9acb5, 0xa15fa1bf, 0x49e66546,
- 0xf2c71845, 0x9f041e1e, 0x4f2db53e, 0xf2bff144, 0xa3a67747, 0xd458ca7e,
- 0xa6c1f226, 0x55fed03a, 0x2fe8373c, 0x6f672f59, 0x132764a9, 0x1ede001d,
- 0x22f6f0cc, 0xe24a2f1f, 0x7745c17e, 0x08fe035a, 0xf057b879, 0x7cfd3461,
- 0xcdbce710, 0x01151997, 0xaa616fec, 0xefd46ff7, 0x7234e79d, 0xfa12bced,
- 0x5f4823c5, 0x4fddd877, 0x22f2eef0, 0x05d263f3, 0x0e316bfc, 0xb7b1a204,
- 0xbaf867b0, 0xbee3a47c, 0x5fa1dfbc, 0xf0e4dbab, 0x91bbf0bb, 0xf176cbbe,
- 0xe3f141e2, 0xd2232e40, 0x8545c347, 0xf76961e8, 0xae51c79b, 0xa221a837,
- 0x1b66a593, 0xa2e1b1e1, 0xd5a9e9ca, 0xdb243670, 0xa4d7ee03, 0x6032d27a,
- 0x277bfe9e, 0x093c5325, 0x17c389f8, 0xe39c452a, 0x37c332f8, 0xf7e00328,
- 0x36f13590, 0x305d7fdc, 0x8f3092bb, 0xf408d1ad, 0xe7753570, 0x43d84735,
- 0xa5aec72c, 0xfeb7ee83, 0x6e5af386, 0xaf5119f6, 0xd134f0af, 0x4945b179,
- 0x98c752c1, 0xd24fbc48, 0xf5fd1a5f, 0xc8497ef1, 0x3c8df797, 0x9d20646f,
- 0xcf6c3c73, 0x39b2f510, 0xc79fbcbf, 0xe7c39e6a, 0xbb427828, 0x728887f7,
- 0x170f2891, 0x7979d2f5, 0xf797e09a, 0xd17af1c7, 0xe3152ce3, 0x99e1997c,
- 0x7682708c, 0xc7d22341, 0x673a166b, 0xfe404ac4, 0x935bbca1, 0xc82c764b,
- 0x770869b9, 0x4c982718, 0x67a44cf5, 0x41c81358, 0xa96f67c0, 0xb5b3e08b,
- 0xe305e81c, 0x289aaf41, 0x2bf1241e, 0xcf8d1af1, 0x77ca1985, 0x181f4fcb,
- 0x51985ebf, 0xcd7e303a, 0x17c250d8, 0xf38c4bd1, 0xf5f0f11f, 0x03c73b6b,
- 0x0cf2ebf0, 0xb1c183d2, 0x49b37c91, 0xb5ca51c0, 0x78b413f7, 0xe39d3cfd,
- 0xbbd45ea1, 0xda1f7684, 0xe23eeed1, 0x7853f7bf, 0xae02b4fd, 0xfe5da497,
- 0x5f2e024f, 0x529f6ba6, 0xd1fb44cf, 0xe2f214bf, 0x47ee74cb, 0x8ddebc07,
- 0x09fa95f3, 0x19304ce4, 0xebd178f8, 0x6c339226, 0xd5e51f63, 0x83ffbd40,
- 0xea645af0, 0xf41535bb, 0xdec10fca, 0xbb511631, 0x0647284b, 0x48c7efec,
- 0xd433b0ba, 0x0e19addb, 0xf4941f6e, 0x48d7b8f1, 0x0ca5aebe, 0x657287e4,
- 0x210c63fc, 0x6bf0e40b, 0xc142bf22, 0x6214aebc, 0x07f61767, 0x8c97ff70,
- 0xa3dc30d2, 0xe8213bc7, 0x450cd1e0, 0xb215bc25, 0xaddbc442, 0x5f6e7eef,
- 0x139e5d0c, 0x4d9365e7, 0xe7ec76be, 0x916fddec, 0x18c27e9d, 0x8b66de1c,
- 0x7c18ddf1, 0xfc2521ec, 0xf4a7b6fd, 0x98d5e37e, 0xc1f67e7f, 0x4bdd619b,
- 0x1c6ef47b, 0x5f3de972, 0xed1e33bc, 0xc5031eec, 0x6fdf406b, 0x4dde7153,
- 0xa13caedc, 0x67c1d2d3, 0x3df76977, 0x907a0baf, 0xdca572e7, 0x157cfa91,
- 0x93797373, 0xcb1bb890, 0x3b71dd1f, 0xce5dcb9d, 0x003d87bc, 0x666bd3f7,
- 0x3b7c5d92, 0xcf5e51f3, 0x39ed56b2, 0xda6d15db, 0x073bedf2, 0x3925c39c,
- 0xfd102abc, 0x2ba5f85e, 0xf3fbeb63, 0x333f2e82, 0x7afdf88a, 0xfdf8cc53,
- 0x09fc85ef, 0xb4fbd9c7, 0x2f57fbf1, 0x8c6fbf1f, 0xed87df8a, 0x3df8e888,
- 0x6113f7ef, 0x91bf606f, 0x2acffc71, 0xa3e3af62, 0x58b7690c, 0x9f19f935,
- 0x125bb00c, 0x45e37be9, 0xd8f99c4b, 0x2b67fde8, 0xd7d38f63, 0x5f368e3f,
- 0xe79edc20, 0xb5fdc809, 0x8bc693a4, 0x6567a459, 0xe1c7da59, 0x3df43976,
- 0xf4babcfa, 0xf8be4b6f, 0x2ce66158, 0x176d47f2, 0xf29bbc76, 0xbb463f33,
- 0xba72f908, 0x4c10b5eb, 0x278fd971, 0x8487570e, 0x1d7a26f9, 0x42675f40,
- 0x98d8f211, 0xfd234f69, 0x3a6e66eb, 0xfee82797, 0xe8abbbd6, 0xbdffcf7c,
- 0x322332a7, 0x53f72a6e, 0x8f69460d, 0x96fa34d9, 0x65e3e945, 0x1d916f5d,
- 0x70d7f606, 0x7a7f3e14, 0xc3d26ef5, 0x1d37992b, 0xd5f7bde9, 0xe56e7411,
- 0x3961eadb, 0x4bfc9b9f, 0x35bf3c0c, 0x3987ec8d, 0x75373e62, 0x67503b73,
- 0x1c6818f6, 0xd239730f, 0xe7cd8b69, 0x8554420b, 0xf3fd75f2, 0x631de747,
- 0x15fcc493, 0xbdf00f6c, 0x52d93c4e, 0xdbce265f, 0x31eae384, 0x8990260d,
- 0x6e8996cf, 0x28a73e17, 0x83cf955e, 0x6b1a79e3, 0x3afac1ca, 0xd7cf4873,
- 0xc7483309, 0xa2f0fdf6, 0x37945db2, 0xeb70bdce, 0xb36f7c0a, 0x8a313c93,
- 0x04cf4c79, 0x6dfc88b9, 0xb75c6666, 0x4d3c16c9, 0xf4f133f1, 0x8f88b857,
- 0xf91843fd, 0x07581241, 0xd9b53f9d, 0xb171f9ce, 0xb050e60e, 0x085c716c,
- 0x9ce27ee2, 0xf3c7e6c3, 0x672f20c4, 0x5f3fa265, 0xc44ad5f9, 0xee67c80b,
- 0x5df78a17, 0x11f3f20d, 0x88a5de42, 0x7908b5f9, 0x8adcc597, 0xe480c7b8,
- 0x95f6fddd, 0x267ee037, 0xa3f7798d, 0x71387bbc, 0x32fdc506, 0x93e68f98,
- 0xc8d5433a, 0x5b57a72d, 0x46af98ce, 0xe7053bcb, 0xbed3e597, 0x9413b337,
- 0xbd04a497, 0x69529799, 0xfd12361f, 0x37210631, 0x3e78598e, 0xcf3166c2,
- 0x8853333b, 0x78598e6e, 0xb6b7c25e, 0x7ddadc5b, 0x3f0a437d, 0xdd3e7fbf,
- 0xc0f1c66c, 0xe113b98e, 0x07cc7607, 0xdfce167e, 0x41d29c2c, 0x6f9b0279,
- 0xe01bdc54, 0x6bbb66fd, 0xeba40abd, 0x7f70a99f, 0xc728ea8f, 0x3cf69c7c,
- 0x44f31b87, 0xf7be451b, 0xf6a38b66, 0x9a63f219, 0xb7d63d78, 0xed96e319,
- 0xac0e1d5b, 0x37d95697, 0xaafb88cd, 0x9bb1cc15, 0xaff7a0c5, 0xbe608f80,
- 0xe032c73f, 0x63f41149, 0x85bae23d, 0xefbe20d6, 0xf0177fc1, 0xdc3294f2,
- 0x82bff2bf, 0x773a42ee, 0xd27a3cc9, 0x85dd8d97, 0xbc60d86f, 0xe485b982,
- 0xf256f7af, 0x72132fb8, 0x571cbc60, 0x61b5d9f0, 0x85efa7ba, 0x5bb43ca2,
- 0x4cd37ff8, 0x80ebae3c, 0x91c4ca21, 0xc69fac43, 0xe4cdc1f9, 0x6bd3e71f,
- 0xfb0ff858, 0x4ff70cab, 0xf86a6972, 0x60ef9873, 0xbbb322a8, 0x8dea0ea5,
- 0x91fb8357, 0xcf15afaf, 0x543cf142, 0xe5e9237e, 0x65cd9d1e, 0x373e71d4,
- 0xc3aba017, 0x94b847ab, 0x60b4b639, 0x4129d73f, 0xd8cde67a, 0x9fd382de,
- 0xc94cfc23, 0x9e00c699, 0x00996b37, 0xc8bf2678, 0xffa30f9f, 0xac2fff5a,
- 0xcc74f118, 0x279ef520, 0xb9ffe789, 0x0ed53d68, 0xce97593e, 0xc22e7b33,
- 0xcf3f28f9, 0x8b74d0c6, 0xa997fef8, 0xd7974955, 0x300f3cbb, 0xe5cf4b25,
- 0x871e0ce3, 0xa66f9c6d, 0x7146e5bc, 0x7d53030f, 0xf08c3ff9, 0xc5b8c8af,
- 0x7f3606bb, 0x46fd8c5f, 0x07b7164a, 0xfdc0dce6, 0x1f228dc2, 0xcc4f7e47,
- 0xbfb27ee2, 0x377b4e64, 0x1e3dec93, 0x949dfd6f, 0x48d9235f, 0xf2115f94,
- 0x5f248fe5, 0x278edfca, 0x85dfb47f, 0x29e799fc, 0x40bed036, 0xef2921c8,
- 0x68ef22bd, 0x1018f301, 0x1ee23d4f, 0xabe5a395, 0xccdd0e48, 0xf7fef47c,
- 0x00ff3c15, 0xfd80d308, 0x6b4334dd, 0x6af3cd3f, 0x8fcf37cb, 0x3e38afb6,
- 0x7c0bb8e4, 0xa473efda, 0x6b433c9d, 0x87fa2f27, 0xcf4992af, 0xfea2fc67,
- 0x3e70e0d2, 0x3e546351, 0x27c88351, 0x3c2aec6a, 0x4f911694, 0xf3cdd8d4,
- 0xaeba1a89, 0xedc44f94, 0xb029af29, 0xde24e31f, 0xec94d25a, 0x727f910d,
- 0x8a4ff310, 0xaf6e74c2, 0x0c3cf3b0, 0xa2bca1e6, 0xce95871c, 0x8aeab45d,
- 0x9adb8fc8, 0xddf8d768, 0x5577aeb7, 0xbfe93d61, 0x4843f995, 0xc3f6b137,
- 0x5f7c59d9, 0x7ee143b3, 0x3302ff74, 0x059d3bed, 0xc15c4d5e, 0xd1c767a9,
- 0xa66ef8af, 0x3f27d64b, 0x2f830ec9, 0x019e18ab, 0x8979d185, 0xb67af6fe,
- 0x5190fc91, 0xea99ca72, 0x338038a6, 0xf5f92c69, 0x921775e7, 0x2cd9923f,
- 0xb84a61ee, 0xdb7379ff, 0x69ebcc55, 0x97576eec, 0x3774c2db, 0x43ec4b36,
- 0xf48accac, 0x7bd7efda, 0xbd7e44ce, 0x30b79364, 0x19d7da0b, 0x8bbfe483,
- 0x1f7a57a0, 0x2ede5f4f, 0xe150c5e8, 0xe602b05d, 0xd18efbd7, 0x0a6bfb8d,
- 0xcfff41cc, 0x7e4c94bf, 0x230e7549, 0x9cb1b53d, 0xf45e93cb, 0x3e3ac193,
- 0xfaf7e64d, 0xa466ac6c, 0x4ca7cf5f, 0xc55e3a23, 0xe83d8702, 0x7405db47,
- 0xd2cf7918, 0x8eb96d1e, 0xa9fe9075, 0x3d00667b, 0xf2947c93, 0xf4b99eb8,
- 0x741b8c06, 0xa1b3db6a, 0x285c395a, 0xbcf147f4, 0xff982da9, 0xf2e3ac50,
- 0x0f772d91, 0xf5fb439a, 0x2c4f8e45, 0xa17f7228, 0xebaece5c, 0x73d29fb5,
- 0xa17ae7fe, 0xad1ff454, 0x3d854abf, 0x17e41937, 0x7da56fee, 0x7ca9f506,
- 0xcfadd750, 0x59e785d4, 0xbb49da22, 0x890bea50, 0x8c09f2af, 0xbbb579e1,
- 0x1a7c84b2, 0xbe2086c2, 0x13e27fa1, 0x29583705, 0xf71fbfd4, 0xf18ad785,
- 0xa8fc1e80, 0x733afdbf, 0xb36be900, 0xd2fa44d2, 0x3c3ff68c, 0xd23e2ab7,
- 0xe35eff0b, 0x4c7d23d7, 0xdd374d64, 0x9926f500, 0xb728ac7b, 0x31fe4e80,
- 0x585a7e92, 0xa29f6f30, 0x7efa23a1, 0x98edf936, 0x5af52474, 0x9edcf7f0,
- 0x74bcc599, 0x0fe793cf, 0xa83aeedc, 0xd8267df0, 0xeed1079f, 0x93375b7a,
- 0x2c2664e8, 0x3955eb03, 0x9e9ff8b4, 0xdf472da9, 0x5ad0c50f, 0x48744328,
- 0x9c540678, 0x51bdf44f, 0x98f7291e, 0xb3e5ee56, 0xf8de78af, 0x423fc396,
- 0x1d7583ff, 0x0ef6891b, 0x8fc4aa58, 0x27b0f4d1, 0xb78f7beb, 0x118ac9ec,
- 0x7e15e96f, 0x68586be5, 0x40ca33e5, 0x687f3b7a, 0x82333e9d, 0xbc7f252e,
- 0x1379e06c, 0xae3cebca, 0xc6ccc135, 0x252e1104, 0xf928ffdc, 0xd3a41ae3,
- 0x95fd3094, 0xa487f789, 0x06cc197c, 0xd75dbd23, 0x01ea0965, 0xbfa2853f,
- 0x8afe906b, 0xdc89f6c2, 0x184cb477, 0x98f96740, 0x4cf60e17, 0xb495d201,
- 0x8e958a4f, 0xfde73c16, 0xb6f74bfc, 0xe8237ce0, 0xc3842dea, 0x8ae80559,
- 0xf4036afc, 0x147bf6ad, 0x4bfc49dd, 0x359cba7b, 0x6e677ed1, 0xc1b3d702,
- 0xcf49dbea, 0xba24b882, 0xa70e737b, 0x21e62abb, 0x5a672bba, 0x6b34a97a,
- 0x0974d1cb, 0x74b4ea23, 0x8bba23e7, 0x7478d7a6, 0x4ee91837, 0x2bd4dbea,
- 0x48e7ddd3, 0x8fc5df77, 0x3eee9039, 0xa7c672cf, 0x937a68bb, 0xc59f6cf2,
- 0x75768951, 0xfa428d63, 0xa19df8a1, 0x511e582d, 0xf6764f9f, 0xd93764be,
- 0xfc4d8ddd, 0x478f497b, 0x0533798f, 0x7cf7c56b, 0x1b96256f, 0xe877c1d7,
- 0xcf18f4ba, 0x0f7a19ab, 0xa1b85eff, 0xbfbc0de9, 0xdbcfd1a0, 0x337a7cbf,
- 0x6c0a879d, 0xdbcbed16, 0xd8dcb12a, 0x8a7fdbca, 0x31abae71, 0x82d02dbf,
- 0x96daafef, 0x6fdbe6ef, 0x114fd76e, 0x3ca7ddeb, 0x6bb506f7, 0x3f6eede3,
- 0xe4604e6c, 0xfaf6e027, 0x29ceb164, 0xd33af8bb, 0x867ebe3e, 0x4563e80b,
- 0xb9ec9f9f, 0x6575744f, 0xece2ef24, 0xb695cbb3, 0x474e7c1c, 0xbc47a639,
- 0x43f55bbb, 0x37741c78, 0x3675710c, 0xf8c8d3f7, 0x5429e621, 0x5717d847,
- 0x443b7367, 0x614f4bd6, 0xfd8d7e74, 0x72fe4ecc, 0x3e0cdf19, 0x07d414c4,
- 0x976146b8, 0x4e778cc4, 0xbb22682f, 0x1960fa62, 0x4ba90ce7, 0x4bc2dc61,
- 0xe58b4f9f, 0xe6cb2a47, 0x9129f3f6, 0xd7df2177, 0x4875f204, 0x4890fb17,
- 0x90f0fce8, 0xdb9ed077, 0xe734cc97, 0xf3e5f6dc, 0xbcecd4f2, 0x01cd6e7f,
- 0x549aeaf8, 0xe58bf7bc, 0xf2b79429, 0x4c161e83, 0xff381c4a, 0x1a0b2ae9,
- 0xcf8a0f29, 0x518b657f, 0xe5c257dc, 0x3f813cda, 0xd1186936, 0xa7815cef,
- 0x17a16cea, 0xca31598b, 0x6f0279a9, 0x1937343f, 0x336ebeb8, 0x798fc944,
- 0x1e3f28d9, 0x6bcf6b8e, 0x97c947bf, 0x4aee311a, 0x62afc761, 0xfd3fe47d,
- 0xa7f1d844, 0xfc76e61e, 0xfe41d66a, 0x719fded3, 0x961ebe3b, 0xed12f487,
- 0xabddad07, 0x6877d72d, 0xdea00dcb, 0xaec6ffb1, 0x8633b928, 0x1f7ba5fb,
- 0x3f432fae, 0x77cecd64, 0x235dffc8, 0x2b404ce5, 0xe740c067, 0x51a1e672,
- 0xd6fabfe4, 0xd819e3f2, 0xf26995d5, 0x76c9ef4b, 0x5e77f846, 0x0d2e79e4,
- 0x7a11fb8c, 0xc16f23ed, 0xd28b2c7d, 0xa572d14f, 0xe915c850, 0x5bf290ff,
- 0xc6429bca, 0x77ec14f5, 0x9b60f1df, 0xe1ef3b28, 0xc4afa16c, 0x4dfa4dfe,
- 0x88547da4, 0xd7d211f6, 0xba2226a8, 0xbf727eb0, 0xe847d26d, 0x662505a5,
- 0x19917bc4, 0xa6e11bf8, 0xd9f58db7, 0x68f1f434, 0x83f7e5f6, 0xf451efda,
- 0x0b63f723, 0x84dcfd05, 0xff181891, 0xb81f720f, 0x5ccfd38a, 0xf4843de6,
- 0x9cd5fbf3, 0xf0bee47e, 0x3d03ef9b, 0xa07dc8fa, 0xdd7e4fdc, 0xdd3f60fd,
- 0x863ec058, 0xe95cb1e7, 0xfdf1e017, 0x45d38546, 0x3ddf9cfd, 0xf2c2c556,
- 0x733368e2, 0xb97086ab, 0x8cc9a665, 0x0316977e, 0x99cd2fdf, 0xd2d97ef5,
- 0xb413f908, 0xb8401f97, 0xf9276eb1, 0xdaadeb77, 0x3d2b3671, 0xf711fd77,
- 0xc95a8f55, 0x08e174af, 0xc2e59323, 0x0f9411ae, 0xc78199fe, 0x6e7ce55b,
- 0xb5f7bc3f, 0xb95bd410, 0xcc6296ee, 0x0255a82f, 0xb7c1d62f, 0x77beac0f,
- 0x756bbf91, 0x672f7141, 0xe427c50f, 0xbafdf1b8, 0xc6c7927e, 0xf73f5c03,
- 0xdf940929, 0x4787ef0b, 0x38fee9c6, 0xdcefcb88, 0xb807df4c, 0x5b47a91e,
- 0xf827f3fa, 0x1b30b413, 0xa7ce1294, 0x704de708, 0x985fcb7f, 0x13798ae7,
- 0x04e2231b, 0xbe663ed0, 0xcf2179db, 0x01726147, 0xba6567cc, 0x7fd717a7,
- 0x4f85c44a, 0xa9e23889, 0xfc571e44, 0xfe7d7f7b, 0x7ae20db4, 0x3b8894e8,
- 0xc6d14a9e, 0x44bd649b, 0x6cd93cf1, 0x60346ef6, 0x37799ebc, 0x3d70fe82,
- 0x7ca26a64, 0x5f1c4595, 0x6b252fbd, 0x1ac811ee, 0x106a3e54, 0xd399fbb4,
- 0xe3041fab, 0x9ed36f2c, 0xf715ea83, 0xba0af6db, 0xa7f0b5e9, 0x506b371c,
- 0xd76c28f5, 0xb3df80bb, 0x289e5fda, 0xbfea5ecb, 0xc910baf7, 0x8ff2fea3,
- 0x3a9ce242, 0xf8c893f7, 0x61f8bc40, 0xfa30bc74, 0xbc74699c, 0x37e4b17f,
- 0x245fef11, 0x6ee2d4e4, 0xd5b86f1e, 0x820f36cc, 0x6775b7ef, 0x26b8fc50,
- 0x135c3fd1, 0xbf5bf7ef, 0xa4879e51, 0x9edcdecf, 0xcff8f8fe, 0xf5e3e222,
- 0x2f5a3e22, 0x5da9d7d7, 0xcf0164df, 0x3e3e31ef, 0x60739079, 0x9f3a3c7c,
- 0x33b445e2, 0xac3c610c, 0x55b87071, 0xd12aebe9, 0x39617c4f, 0x35f73ca3,
- 0x416b2d6f, 0x2d9af23f, 0x615596e2, 0x380bd777, 0xd3975bc7, 0x8d2e63ab,
- 0xb9813ebf, 0xe593d622, 0xc994d14b, 0x33c88f92, 0xad93541d, 0x34db9f69,
- 0xf07f5344, 0xef9a51ba, 0x4d22fef9, 0x1af5a0b9, 0x6d61fd4d, 0x88f29a15,
- 0xea6bd79d, 0x4921b217, 0xa23b6fe4, 0xbb125f47, 0xf347302a, 0x85def47d,
- 0xb29ff69a, 0xa0931da6, 0x7b45a75e, 0x7bf3332f, 0x8594c67a, 0x76c7a9f3,
- 0xc3f4d52c, 0x29504a82, 0xf633df0b, 0x79a7b899, 0x878f0f79, 0x5ab5dd5c,
- 0xbead4e33, 0x1d18f08e, 0x7d622def, 0x086e37e4, 0x1b20ee28, 0xd55dff18,
- 0xaeed4ed5, 0x78c8f764, 0x2241c6c8, 0xb2cd97de, 0xd3475d39, 0xc3ec8d85,
- 0xe4f4bb0b, 0xc3642c7f, 0xf0449a1f, 0x6b80b032, 0x302efe20, 0xf0bb17ee,
- 0x798ddd8c, 0xb5bfb745, 0xca63e68c, 0xde05d2d4, 0x5eb9181f, 0x5d2d48eb,
- 0x3a5addd8, 0xa5a09a48, 0x27786883, 0xc174b47b, 0xcbbff042, 0x86753bc0,
- 0xb7fe6e96, 0xf081dce0, 0x5be186b5, 0x62d3d07c, 0xbcf1b823, 0x999f6935,
- 0x1d143d84, 0x4e7690d7, 0xbbb09070, 0x2e323f45, 0x93f159b1, 0x771fd55d,
- 0x0bcc109c, 0xd53da3e6, 0xc95fb8c4, 0x6fe5107b, 0xbd24bf0c, 0x9497e78f,
- 0xc09b9d70, 0xf32d67f5, 0x663efc92, 0x93a4fee1, 0x9fc8abd5, 0x913592b4,
- 0x9cd9fddd, 0xe8efc2e9, 0xca577aa7, 0x9f901853, 0x8b29f600, 0x06aed7f0,
- 0x1fef1a8b, 0xd2a7868a, 0x78ed04ad, 0x4b75986e, 0xfca3e07d, 0xedde0f15,
- 0x68ff89a9, 0x51e65fed, 0xb497e522, 0x1278e587, 0xc9ae529e, 0x49ac4c71,
- 0x117c899f, 0xc8d8e725, 0xd693f8e8, 0x41fd239f, 0xe3238e32, 0x9bfed14c,
- 0x55b8bb08, 0xc1d2718f, 0xce782df9, 0x21e1d8b7, 0xeb29613f, 0x4fdf05b9,
- 0xa8b47730, 0xb8bfe12e, 0x2ffbf58a, 0x2babeae2, 0xf26fb5c4, 0xf247a12b,
- 0x65b3c607, 0xff961c7c, 0xf1a11ff1, 0x8b95c524, 0xf2f398f3, 0xd3d22708,
- 0xfba6bf40, 0x8fd02bf8, 0x8a97d75e, 0x3130aaf5, 0xfdde223d, 0xf5a212a6,
- 0x367990fc, 0x6fdb9e45, 0x6ed31d60, 0xe8327f21, 0xa727bc49, 0x58bbfb93,
- 0xac3db457, 0x0759bf92, 0xafba412b, 0xde711165, 0x2a77f0fc, 0x659607eb,
- 0xec95d21e, 0x82f1a789, 0xe0a3ca72, 0xc84c53f9, 0x8cffb941, 0xf993cfb7,
- 0x4f7ef218, 0x94547799, 0x1c75f823, 0x03fd871b, 0x94e595bf, 0xf8017fa2,
- 0xc7e48953, 0xfa253cfe, 0xa31f803e, 0x31f32fd6, 0xf212f6f4, 0x5c8b2d1b,
- 0x8780d7ef, 0x9c7e5bb4, 0xc8e9e8c3, 0x3eb3d171, 0x3d6fc4e1, 0x47a4b9f8,
- 0x68da3f93, 0xfa0d0d84, 0x79145e13, 0xbb5a1d81, 0x9d1c96fd, 0xf7df1b80,
- 0xf15e095c, 0xf8f221f1, 0xaebe3e04, 0xf8f3261d, 0x0c971c24, 0x0d0afb84,
- 0x8ff7fb5c, 0x115f70fd, 0x2e12ee0b, 0x7e7b4ae7, 0xdbd2f881, 0xbc23a58f,
- 0x577aa617, 0xe3dcfd63, 0x5d144fa1, 0xfa555e78, 0x7b8794b3, 0xe74f4ba1,
- 0xe3ae1aff, 0x25e1b072, 0xf386893d, 0xb38a26dd, 0x41bd74d6, 0x66d9e176,
- 0xfc06b410, 0x72180cbd, 0x08fc65d8, 0xb78ef051, 0xd9eb924f, 0xe5f62390,
- 0xef32fd62, 0x39b9d607, 0x475e6627, 0xdfb41c3f, 0xf4aa1c05, 0x1cf09263,
- 0x92be7f59, 0x791797da, 0x3af45f5f, 0xfe78722a, 0xf7b329db, 0xc3efec3e,
- 0xd81d9c9e, 0xfbbe955f, 0x67c23670, 0xbbe742ad, 0x79f898a6, 0x5e712cdc,
- 0xc9a8426f, 0x3399e78d, 0x667df873, 0xf7a47e23, 0xfdf91c62, 0xe0a677b5,
- 0xffd93439, 0xe7474508, 0xee7b1947, 0xc12c7f83, 0x3722e6f8, 0xd8bf6fde,
- 0xba2fee24, 0x002c1acb, 0x91baf239, 0x186547df, 0x66e744d2, 0xe4f592fa,
- 0xb3d34cd7, 0x727748a9, 0xadbcf2de, 0x01f78f7c, 0x7cb15e60, 0xafa5a53f,
- 0x7bfa0d78, 0x75c83d04, 0x2beca94f, 0xe185b26e, 0xa33cf32f, 0xf82553e1,
- 0x1b43ee43, 0xd05b5ef0, 0xbccde7bf, 0x6daf7d1c, 0x5874feff, 0xf6363dff,
- 0x837eb8c8, 0x1e3493c5, 0x6bdc51f9, 0x278717fb, 0xa477df89, 0xbcf6a7bd,
- 0xbf97b7c8, 0xdeef1253, 0x51ccff9b, 0x827b97be, 0xcb85e5e2, 0x7078ffbc,
- 0x99564869, 0x739d357a, 0xdeae5ef7, 0x8fadff2b, 0x28bedfb9, 0xd3dff7be,
- 0xbcec6af9, 0xceb15ec5, 0x88617e70, 0xe7f729ce, 0x7e54be2d, 0x1a477bd9,
- 0x08d23ef0, 0xf6bcc838, 0xb48fbc06, 0xd0bf4638, 0xe576cb9f, 0xc4aa877b,
- 0x8f301abc, 0x3df23530, 0x38f0b7a7, 0x302f2269, 0xfb7c8894, 0x9d3d2ecb,
- 0x11e5ffe3, 0x31dd8cdd, 0xcb9b9de6, 0xa9ca32e1, 0x0e8d1f12, 0xe3b5fbf2,
- 0x79e793d5, 0xe8d8b9e4, 0x845675f9, 0x63f9c56e, 0xf1ac729e, 0x3f7cb91c,
- 0x99dde72b, 0xf0d1f1c2, 0x9bdf899a, 0xf27cf2aa, 0x4ad094ce, 0x168713e2,
- 0x1e04c3cf, 0xe50faf3b, 0x99b2b9d1, 0x8f10efdc, 0xfaa7261e, 0xc8be51ca,
- 0xd4caee7b, 0xc111c526, 0x87a2578f, 0x1af5dc30, 0xbb840ebc, 0x7ecbbe91,
- 0x7c151f4f, 0xdc363ccb, 0xfa112dcf, 0x5a9ec96e, 0x7ee587b2, 0x3d4ef4e9,
- 0x669c6fe5, 0xba72ae3c, 0xd0fda14d, 0xeb631f52, 0x7cad2e40, 0xaa3378a2,
- 0xb09925ae, 0x1efe1496, 0xe9661f7f, 0xbaf8a8c6, 0xbd495fd8, 0xbe5fc72a,
- 0xd7176b4d, 0x467f6db0, 0x51f4e9fb, 0xb0563f2e, 0x2b3fd226, 0x57f6d01f,
- 0xa7d667b3, 0xbce8cf38, 0x8fc21180, 0xec572d37, 0x1958ca57, 0x4f358fea,
- 0x9fc46e7c, 0x17c78960, 0x434ffbf8, 0x7ddff187, 0x8efe27c2, 0xc96f3e08,
- 0x1720fcf5, 0xf2ef3efd, 0xf5ceaef9, 0x7f45c9cd, 0x7e7cbbc9, 0x702eea17,
- 0x8d2bb33d, 0xfd8b5bf4, 0xfaff922e, 0xdffe5cc1, 0x0035f78b, 0x71264c9c,
- 0x49aeba6f, 0x4df6489f, 0x43ece880, 0x7e731de7, 0x26776540, 0xd3da43f2,
- 0xbf6067d3, 0x1eada7ce, 0x72d6df5c, 0x2ffa214f, 0xf3f356b6, 0x8f1366a1,
- 0x397e07df, 0x538600f1, 0xaf6c7686, 0xa39d1740, 0xfd107f7b, 0x3fbaad42,
- 0x98bcc61f, 0x5f3cc9d6, 0xda175d32, 0x3c62283f, 0x899035a7, 0x68e77df1,
- 0x9a4e6f7e, 0xcb90bcbe, 0x36276e7f, 0xe85c5c08, 0x627f202a, 0x17fa41af,
- 0x2e742c3b, 0x2eff8eab, 0xf1c628c6, 0x70eaf32f, 0xf176910e, 0xf9a666d9,
- 0x1d51869d, 0x4890d04a, 0x3c50a8f7, 0x30436ea8, 0x67d416fd, 0x4f14a94f,
- 0x7b4bf393, 0xe302ab3d, 0xdd8f5266, 0xbfb838a2, 0x0bfdd84d, 0x71e3fc98,
- 0x9e82f9b7, 0xff7b0816, 0xf7872b16, 0xc3cee652, 0x2ccd0b76, 0x76ac0751,
- 0x2947654b, 0x90d6b3ad, 0x8c85768c, 0x47b7c51f, 0x1ac9fb62, 0xe7e7617e,
- 0xe1be959e, 0xf419c9d8, 0x3f865d41, 0x693c85db, 0xffcfce18, 0x83cca1b1,
- 0xd786caf1, 0x30efd046, 0x3c781299, 0x1996e41c, 0x843a11ef, 0xbdd61efa,
- 0xc88b15ea, 0x73880a5b, 0xd8defccd, 0x453939c6, 0x78f11e74, 0x71ffdd1f,
- 0x23ca10de, 0x847906fe, 0x92bea637, 0xe01fbbfb, 0xd90debbd, 0xf941d760,
- 0x789fee74, 0x25684879, 0x92991bde, 0x47978977, 0x72c13e85, 0x6cc25e4e,
- 0x5685172a, 0x3e3ca1ea, 0x7197932f, 0x36040a03, 0x6cf06f35, 0xc92dda3f,
- 0x87702ec2, 0xc377bce8, 0x917423cb, 0x1e7758fe, 0x84676797, 0x3e4cf93f,
- 0xc20e6dbb, 0xfaec647b, 0xded27969, 0xc9f5a36e, 0x2cf6caf5, 0xffbb7633,
- 0x957e7210, 0xc53da738, 0x3c2d5aca, 0x044fe7fe, 0x1eec1c3c, 0x7fcbf141,
- 0xfde14dcd, 0x4f74423b, 0xfa27ec8f, 0xb8e41c77, 0x7bfe15ab, 0xf2a3e759,
- 0xcb977ebc, 0xb38c2be5, 0xea19c691, 0xe79be7e1, 0xfe2214e4, 0xc0aa6f00,
- 0xc9f7ca9b, 0xd07e7ced, 0x13fefe2f, 0xf46287e8, 0x0afc1ffb, 0xe787e7ea,
- 0x27274d1d, 0x5aab38c5, 0x2fb6dc78, 0x69e880b9, 0xf7dc558a, 0xc75da252,
- 0x9f6e3cf8, 0xc592f3ba, 0xaf091d79, 0x69c6850c, 0x09cc7011, 0xf6339f0d,
- 0xaffbaf9b, 0x8eeb4bff, 0xef0797c0, 0xc5d8396d, 0x9f1e37ef, 0xf485f1c7,
- 0xe076429c, 0x677f4d76, 0x9fb547d6, 0xae5f28c0, 0xda80a521, 0x6fee2bdf,
- 0x61d9f795, 0xdc71fcbb, 0x57bb45be, 0xfcc1a7de, 0x1c645993, 0x8cdeffb2,
- 0x70bf710e, 0x286f3aff, 0x8874eb11, 0xef223a75, 0xcfe7ec33, 0x73797d63,
- 0xb8e903ed, 0xfdfdfe1f, 0xfd12298d, 0x563cf869, 0xf1d7c87f, 0x1e5572f0,
- 0x1bdf2797, 0xa33ce45e, 0xe70fd9e0, 0x7849f9a8, 0xbb537f22, 0x94ba5a31,
- 0xf411ccf3, 0xf3bf8039, 0xe278e5bf, 0xbbfad97e, 0x7338be79, 0xbebae969,
- 0xf9875f33, 0x07ddb209, 0x34a0c078, 0x21bc27ec, 0x7327ec4f, 0xbc9c4e58,
- 0x339e6f47, 0xde917fa6, 0x7b27ef47, 0xd9efe593, 0xbd1afd69, 0x397bfe4e,
- 0x7efc73fe, 0xfca36b83, 0x7a8f183c, 0x176e16ce, 0xf2da2fd6, 0x037e0263,
- 0x797d54fd, 0x5312bf38, 0x3bf8f3c5, 0x4af3ab5b, 0x7760ab67, 0xdc552494,
- 0xa6e50f99, 0xfc93c6c7, 0xf154192e, 0xf3c83e79, 0xe04d8f35, 0x7c5747df,
- 0x79cb043e, 0x8a797913, 0xf79479e7, 0x9eccb460, 0x2bb6159a, 0xd173ccee,
- 0x1d4afab8, 0x42676bdd, 0x63d60623, 0xf944dd7a, 0xf1ae1280, 0x5d26259e,
- 0xbff3ac53, 0x03d3f4f7, 0xc04cbf8f, 0x1fd0ba7f, 0x2105fc28, 0xe82fe355,
- 0xcfe7e44d, 0x92fd5f7b, 0x9bdcd7ce, 0x079f5be7, 0x67ad1bed, 0xf287cd6f,
- 0x1407eb06, 0x603c53ef, 0xc61fa087, 0x608b60e5, 0xadd9cabe, 0xd89fb45e,
- 0x2559b76a, 0x18ff0ab8, 0xf007a9de, 0xa7b5e57b, 0x0f883c7e, 0x4c79c5e3,
- 0xf1a07214, 0x15fdbfb5, 0x678dc4e3, 0x1b346e6c, 0x7c3f7627, 0x135fda27,
- 0x1bfedfbb, 0x3cfcc3d7, 0x44f79c90, 0xfbc27ff4, 0x9a265d09, 0xabeec4ff,
- 0x6bfd6056, 0x402eebdf, 0x44eeb93a, 0x42c505b0, 0x3effabef, 0x880e7348,
- 0x7cd97a7d, 0x39f1b8d1, 0x013f8f09, 0x1e0893a0, 0xf8f0e73f, 0xced9f28a,
- 0x0e6e8f3b, 0x7b549391, 0x78ec0ade, 0x605f3b8a, 0x7b8c6e09, 0xd1fd73c8,
- 0xd5e3d57e, 0x7f783eb0, 0x20cc1f5d, 0x729e9ec2, 0x24820cd1, 0xc9672e5c,
- 0x39a5729a, 0xcabf534b, 0x3ef9af91, 0xcd2af33d, 0x42ae99f7, 0xc8d6794d,
- 0x37fa9a89, 0xe535cbba, 0x6a6613d9, 0xaa7b57ea, 0x60c2e535, 0xf17ea687,
- 0xf7ed2e91, 0x788f4c63, 0x7eee3a28, 0x27a59f03, 0x2d9ee43d, 0x06f4d53b,
- 0x85fe273e, 0xafda2383, 0x8ec5cd7c, 0xf4bdf037, 0x9f9f472b, 0x9be67e92,
- 0x3180b6f5, 0x53393fb6, 0x83df082d, 0xf3f1d7a0, 0xfb25ef53, 0x937880fe,
- 0xa0a6a7e7, 0x5090195f, 0x9fb9db75, 0x5c32efe2, 0x6efdc458, 0x943111c7,
- 0x7f276e4f, 0xd34371e5, 0x79e47fb3, 0xf728b903, 0xaf9bebae, 0x8b94f759,
- 0xcbbacd3e, 0x3c28aad9, 0x5729a1dd, 0xd4d6ee39, 0x5eb99e9f, 0x6ba67df3,
- 0xb69e144b, 0xba37ca6b, 0x53c28e1f, 0x9e9e147b, 0xf4977cd2, 0x5ffc2ddd,
- 0x0adfa1af, 0x3d42939e, 0xc9878895, 0x1f08faa7, 0x2ada7a13, 0x1cd74f11,
- 0x957c20ef, 0x2895bd04, 0x348ecb2e, 0x20cf81bd, 0xddb0cbec, 0x7a4ce681,
- 0x91766c1e, 0xf370ebff, 0x691e7a48, 0xfffbd376, 0xcf409e68, 0xe87b355f,
- 0x33cd9ff9, 0xecd3d9e8, 0x734767a5, 0xae7fd507, 0xcffb8bb9, 0xddeffb52,
- 0xc1bf45c8, 0x01a85d79, 0xc18ec623, 0x9e9e495b, 0xc79d084f, 0xeb7f06f2,
- 0x3420a9ec, 0x901d67f0, 0x9e207f2f, 0xfb4f2e45, 0x1e0e7860, 0x19fdf8cc,
- 0x43a37f22, 0x3b4429e7, 0x0fe3f47c, 0x32e6a7de, 0x37bd69b6, 0x183a7f13,
- 0x132866cb, 0x90f7e9b2, 0x93cfbc1e, 0xa8b3d8c3, 0xf7bee84a, 0xb9636707,
- 0xe79efc57, 0x241fc7e0, 0xc75bfc3b, 0xe2033dd9, 0xc707a3ec, 0x7b3fb388,
- 0x7f4765ca, 0x46afd1cf, 0x471e11d8, 0xc3cbdf85, 0x3bf80d0a, 0xa16ae51c,
- 0xf1ecf501, 0x5421e738, 0xe0c97953, 0x7bb75c52, 0x5f91e51e, 0xe1dfa06f,
- 0xfdf0a39b, 0xec97acaf, 0xf7a0fae2, 0xa7ed1346, 0x2b3ce98e, 0x957a3f90,
- 0xce10c7be, 0x14fe3d37, 0x7b5ea9e9, 0xafc21e5f, 0xbcbdf1fa, 0x4c7becec,
- 0x0d944771, 0x8f953e1c, 0x6fdb393f, 0x33e15efb, 0x0d11d71b, 0x1d83b99f,
- 0xf4115f7c, 0x201da2e4, 0x3daac7cb, 0x2c5714f5, 0x75c30cf7, 0x389af51c,
- 0x81ed7aff, 0xf237e461, 0xf08bce94, 0x360cf4ff, 0xa2e7fad0, 0xe83f44de,
- 0xd6207d42, 0x1f9e15ef, 0xa23dedcc, 0x2e67dc13, 0xe0bed2b0, 0xa3df8e98,
- 0xeedf5d10, 0xcfa3fbe2, 0x9c87dce2, 0xdc57be28, 0x0becff54, 0x5aef5fd0,
- 0xebdd750b, 0x58f6411d, 0x1740136f, 0x54f7ba68, 0xddfe39d3, 0xa12016e4,
- 0xf82def38, 0x6cfef82f, 0xfc5f9df7, 0xb07fea06, 0x1d26ead6, 0xf04518e2,
- 0x2f2a285b, 0xfd93354e, 0x6e4e78b4, 0x5bc5ea05, 0x56f0bc44, 0x614e9abb,
- 0xe8d93543, 0xdb967e80, 0x0403370a, 0x6ab4cad9, 0x5364fe23, 0x9b1dce4d,
- 0xe87ab9f1, 0x58f22376, 0x9b7d98dd, 0x07ad8cd1, 0xf9252e16, 0x19a77a17,
- 0x3adb75e6, 0xe7ef8bbd, 0xdadaf9e4, 0x22f7946f, 0xc898f7e9, 0xd020024f,
- 0xa5c9357e, 0x4949f668, 0x181f4e82, 0x6331e22c, 0xda4bd8d9, 0xe44ed778,
- 0xdf8bb3a3, 0x1e271e19, 0xec2efda1, 0x7b8adfc7, 0x4fefa39e, 0x5c3ae391,
- 0xcc75ff7c, 0xf6899408, 0x07ae42e4, 0x7cffcdc6, 0x7fac2943, 0x4743b75c,
- 0xd5b70d79, 0xb87880bf, 0x68f025d0, 0x4fc85d60, 0x7c97f937, 0x4c421bf0,
- 0xf0dce69a, 0xbba5f495, 0xa91951c7, 0xde424b2f, 0x397d48ca, 0x12adafa1,
- 0x8fd4a2f5, 0x211ae6c1, 0x8e489b8f, 0x475e6c1e, 0x7ecdc3e5, 0x6e691e7a,
- 0x8db8f215, 0x79aaffbe, 0x01c790a7, 0xd2f78f21, 0xf9e6eefb, 0xd9ad7cf4,
- 0xa985cf47, 0x6e11d7be, 0xd73b8e32, 0xdcbca3ec, 0x189b0f43, 0x742ef1c6,
- 0x038f289b, 0x47b1e238, 0xc94f30d2, 0xa344e744, 0xe5a295f3, 0x743d3f7c,
- 0xe5b7b80e, 0xfef7e46d, 0x17b4233c, 0xa7a7c707, 0x4765cca3, 0xe0f1f4b9,
- 0x8e8724f3, 0xb4765cba, 0x2e3e4fa7, 0xdd971eca, 0x0d3fe500, 0x71fd25ee,
- 0x9bb2e7d4, 0xe3c9fca0, 0xdfbbfcbd, 0xf940b765, 0xfc7dc1d3, 0xd051807b,
- 0x2fe0ecff, 0x33958e48, 0x47f220e5, 0x2a7f39a5, 0xd7d00ba8, 0x2f9107e5,
- 0x992e67a6, 0x60b17c8a, 0x72e88bb0, 0x41fd6ba6, 0xd91acf2c, 0x1515e2ae,
- 0x46e15b1e, 0x5691576c, 0xa9bb62ad, 0xc46c7739, 0x6e86d376, 0xcddb2357,
- 0x236fb318, 0xb7706f96, 0x2e9f68ab, 0x1a563940, 0xe59647ee, 0x65a72977,
- 0xddd5dd3e, 0xe307d25e, 0x0fa4bcba, 0xe62a5c24, 0xff426f56, 0xe2976367,
- 0xa9a5dfc0, 0x27f4ab98, 0x62ee6033, 0x0a93de03, 0x3dfffaf2, 0x87ac77ae,
- 0xf9623675, 0xadb72a1a, 0xedfa06ff, 0xe2f6007f, 0x8000e831, 0x00008000,
- 0x00088b1f, 0x00000000, 0x3cd5ff00, 0xe5547809, 0x9dcee7b5, 0x4c92642d,
- 0xe2420836, 0x96249964, 0x26b6432c, 0x0c486410, 0xf90130ee, 0x10196544,
- 0x044816c2, 0x7eab17eb, 0xe0171a19, 0xd16b8d69, 0xb5c46faa, 0x61e4b6af,
- 0x1d0958d4, 0x87d252aa, 0xb410553a, 0x88a47479, 0x119099f0, 0xc7d278dc,
- 0xf7fce73b, 0x24cee666, 0xbefd1480, 0xff938607, 0xcfd9fbfe, 0x005ffff9,
- 0xb3f85380, 0x34607f02, 0x3d2538fe, 0x0468048c, 0xd1bf67f1, 0x0395b26d,
- 0x8c0734ac, 0x412fa3a0, 0x6010aba3, 0x76960eaf, 0x24a40014, 0x0b7afcc3,
- 0x74b08d96, 0x5de7960b, 0x05480368, 0x20bada68, 0xda85816e, 0x6066e3bb,
- 0xe21745fb, 0x52ce38af, 0xe0546e05, 0x9bd40a93, 0x4f34899c, 0x1fd24d9a,
- 0x856fc7ca, 0x064a7850, 0x3d9ab7e8, 0x448004ba, 0x0dac7b93, 0xfb9bedc7,
- 0x8758834e, 0x31d688af, 0xeb1ebd6c, 0x6c3200e3, 0xca1f1e56, 0x5e541982,
- 0xc846ed09, 0xcff5b846, 0xda22a4fb, 0x2327c597, 0x5f434e84, 0xf9ec5f20,
- 0x63fe1654, 0xe0543d06, 0x6e03e97a, 0x70d79969, 0xea566020, 0x743967f1,
- 0x226670ec, 0x7ed45ede, 0x18bf046f, 0x7dfb43bf, 0xbe8de215, 0xbf697537,
- 0x0f5b5403, 0x0230438d, 0xd9dfb8cb, 0x855efe12, 0x878f0dff, 0xc3c06749,
- 0x10f0a1a4, 0x1af01fb5, 0xd7de8e41, 0xa66ecb0a, 0xc0bee473, 0xd1fcca9d,
- 0xf7218108, 0xf445f2a7, 0xfe05dafe, 0x25ff02f5, 0x5a74e736, 0x38fcdd5f,
- 0x8e005390, 0x4d79356d, 0xa9ffdc99, 0x1744293d, 0x2d837593, 0x5b5b3e9a,
- 0x00de8367, 0xcdd5adb0, 0x14bad7d0, 0x3f8076f4, 0xf97336b5, 0x2e16d6ad,
- 0x30f568ef, 0x8ebad9dc, 0xdb5a5fe1, 0xeb577eb9, 0xdbbf2e46, 0x6fe865ea,
- 0x5fbf917d, 0x86657f3b, 0xff4d12db, 0x45cec9d5, 0x7b9e8fc4, 0x3e9913aa,
- 0xc853f7b8, 0xc81a9eab, 0xc344b66f, 0x9d658301, 0x8f65c094, 0xd9e1f711,
- 0xc36de316, 0x8c73fd7d, 0x19c689bb, 0xad7e09a8, 0x4a437c43, 0x30d0f180,
- 0xbe9abdd2, 0x3a1c464b, 0xe4f56689, 0xeb0efe12, 0x6407bf3c, 0xd39b56d9,
- 0x383e47a5, 0x17558f1a, 0x6582dc00, 0x78f3ce0f, 0xfa85abaa, 0x48c07e41,
- 0x3fa29ce3, 0x097c7193, 0x3af07766, 0x0e1ca210, 0xb1e808c4, 0xd18f2c49,
- 0x40ddc850, 0x60f99fba, 0x1913e9e0, 0xcb3382b6, 0xa8f870c5, 0x9e03ab93,
- 0x6567a432, 0xd584c3ac, 0xa4f8127d, 0x46538412, 0xd38e00d6, 0x13d46e97,
- 0x5644149c, 0x9fc3c750, 0x6322dfc9, 0x14cbf186, 0x6b94d448, 0xed35a3cd,
- 0x9a99aceb, 0x5cf4befa, 0xb4d2ff1e, 0xfaed348b, 0xc59a5746, 0x232f53bc,
- 0x6f7465d8, 0x0974805c, 0x9b1c9956, 0x98bdfeb0, 0x062822f8, 0xbf1b1698,
- 0x317be089, 0x376a5e19, 0x14c9c0ed, 0x934e7dc0, 0x108b237d, 0xc6db9755,
- 0x99edc76b, 0x76c36353, 0x6957e657, 0x25c121d7, 0x1dabdfb4, 0xa7df3453,
- 0xe9ae5e57, 0x93fb32ef, 0xcef5ad08, 0xc6f7cd7a, 0x9f41af96, 0xe5b1300c,
- 0x014be824, 0x343afbc6, 0xecee94e3, 0xbf1fb4d2, 0xeb12641b, 0x378e7d92,
- 0x741f9609, 0x152ac4ca, 0x24df9b87, 0x3fa5f558, 0x4e01f683, 0x9f5159b0,
- 0x83dc8a4b, 0x963d0247, 0xcae52927, 0x812db7a0, 0xf21270e8, 0x89bf48f5,
- 0x6321c230, 0xbff2a963, 0x4c1f104e, 0x32002052, 0x16abf607, 0x3a79804b,
- 0x4598d78b, 0x1be5056a, 0xc906c1f5, 0x3847a691, 0x50540e47, 0x8a0428f9,
- 0x973aca08, 0x2a6bf89e, 0xb2a58f34, 0x0469dc09, 0xbf74483f, 0x2fdc7c4f,
- 0xe2c4da05, 0xcfc4c982, 0x68ff3f75, 0x17ac6dde, 0x5c261c62, 0xfebaefdf,
- 0xfd6b4a91, 0x91f53fbb, 0x7e216c37, 0xcd7eb26f, 0x8dcb5278, 0x7f19c72d,
- 0x2d6fb96a, 0x92e177cc, 0x9f680ddb, 0xaffe3610, 0xed0a76cc, 0x53a23ada,
- 0xb8c9fc23, 0x1ed420ba, 0xc418be3a, 0xbfea3177, 0x04d35a7a, 0x5ddfd7c2,
- 0x6f576c4e, 0x9e087210, 0x1c4ef545, 0x29463ea7, 0xa7d44f83, 0xb2a1ed2a,
- 0x6ae9cb9e, 0x6f68957d, 0x587bc757, 0x96ad1fb4, 0x522ef195, 0x9ca2cdcb,
- 0xa0f7cea9, 0xe5aa1728, 0x397170ff, 0x613fb44e, 0x29ef559b, 0x53ffe908,
- 0x0f18ddaa, 0xa06ec9e3, 0xc933903d, 0x1abb481e, 0xeda21f98, 0xb15eb685,
- 0x237ff317, 0x44c58c7b, 0xdf63cb8f, 0x90eeb921, 0xf1bae480, 0x8bd38376,
- 0xd49e7ff3, 0x12c6b451, 0xb7e7dfc2, 0x6abb7c08, 0xb6329ce3, 0xde6af95f,
- 0x3923e226, 0x85c8f76e, 0x563addf8, 0x4ea77ac7, 0xe88138b6, 0xfbd0af7b,
- 0xd848ff6a, 0x8686f2d0, 0xb2f5519e, 0x6cc1f3c4, 0xfaf9c1fc, 0xf4f51bef,
- 0x8e59c206, 0x9370973f, 0xdcf42999, 0x95add448, 0xea36f3f8, 0x3dd23359,
- 0x3a9c1b25, 0x1af29f6c, 0xf1a267ea, 0xc7d0bf41, 0x24f8c5af, 0x8f959c14,
- 0x6ff6c9bd, 0x8e528ca1, 0xcf2d7bf8, 0x7c89979d, 0x9fce347a, 0xd0db7c4b,
- 0x75d78db2, 0xe3cadd8d, 0x10583583, 0xcce2cf8e, 0x9e97fc28, 0x3fa5ff01,
- 0x8271fe26, 0x75cfa403, 0xbeb47428, 0x78b796e5, 0xef95cbbf, 0x43325fac,
- 0x472cb8ed, 0x39c68f97, 0x0e1f0832, 0x74074e85, 0x5f050ab6, 0xfabe1357,
- 0x1373fb17, 0xf9f0ecb1, 0x3ed9ba47, 0x88972aec, 0x4071cdee, 0xf92a3f74,
- 0x410ef68b, 0x0ed68eff, 0x5a5bcbb6, 0x84be18bb, 0x55bad073, 0x8d10dd7c,
- 0xc067f9df, 0x131ffcef, 0x01f4a3bf, 0xadfb03a3, 0x4e3c07da, 0x1180ff85,
- 0x2b4cf4c2, 0x27e225f1, 0xfae24ba1, 0x2120b93a, 0x138fa9a0, 0x738e52fe,
- 0xcae9f13c, 0x37aabb19, 0x98792148, 0x311d0b1c, 0x0ed7cc49, 0xc41daf85,
- 0x22ff2ad7, 0x11ea963f, 0xc49d6fcf, 0x37e0437e, 0xf3c0bfc4, 0x3bba78a8,
- 0x881baf1a, 0xabc1a78e, 0xc607feb6, 0xc7fa276b, 0xaf8237a4, 0x653778c4,
- 0x77f9e346, 0x7c555e0a, 0x7fe4536f, 0x236f3c38, 0x7f9ea73c, 0xb4cb6f3c,
- 0xf891b8f1, 0x453ece1e, 0xf75d47d2, 0x7e5a7210, 0x2d3a722e, 0xdff8aadb,
- 0x461677e8, 0xa26dfdd3, 0xf74359bb, 0x53c8339e, 0x4f298fcf, 0x848b7891,
- 0x3ab8128d, 0x3fdfd12c, 0xdf561ccb, 0x4e3c179d, 0xf8d6ce0a, 0x75bf9367,
- 0x29b3fc6b, 0xfd696118, 0xe5349bd8, 0x9aadeb3a, 0xada697f6, 0x6e5fd4d5,
- 0xbfa9af5b, 0x4d01ff32, 0x63c76af9, 0xd3e67e11, 0xe77afa9a, 0x73f5346f,
- 0xea6bb79b, 0x68f4b7e7, 0xfe7817ea, 0x78ab29aa, 0x1c0adadc, 0x356b6d2f,
- 0x65d3f12b, 0xfe03ab0c, 0x8b83125a, 0xdfd9070f, 0xd7b7f4ac, 0xafb24bb2,
- 0x32d1fd83, 0xc296ab9f, 0xcd9d8aaa, 0xb5f4126f, 0x2d78955a, 0xad5be18d,
- 0x3befd636, 0xee19f35a, 0xf128756c, 0x5dc3255a, 0x4e254ead, 0xb48c3173,
- 0xbb66af0b, 0x3d2164a7, 0x44bcbda6, 0xe980b807, 0xc167f87f, 0x77bc5a5e,
- 0xdce28ebc, 0x8e5eca35, 0xffb37f57, 0xe3afe436, 0x950726cd, 0x311fb77b,
- 0xf9a30ad8, 0x32b7ee65, 0x6cf8c338, 0x0d5fcb6e, 0x45cd67e4, 0x0863ba0f,
- 0xa59a9d39, 0xf7207e63, 0xcc2f5003, 0x05218336, 0xd9ecc1f5, 0xf80da392,
- 0x189207bb, 0x3f58dcfa, 0x133e0f4d, 0x7a2deb96, 0x979e299f, 0x3fa332e6,
- 0x132c4b3d, 0x50f07a4c, 0x7324a43d, 0x7d0d7e9c, 0x383ca1b4, 0xbf18b865,
- 0x1c3f22cf, 0xe96f5dfb, 0xbddd9030, 0xfa2fe76e, 0xb87ac36f, 0xf1ce53d3,
- 0x25625c3d, 0x3e792dfd, 0x371a9dd6, 0x3520bcbc, 0x5f1850e1, 0x4e19254f,
- 0xe1d1f8a5, 0x7adc5277, 0x9cf1c193, 0x05dfba2d, 0xc9a77eca, 0xc16ffb12,
- 0x4dde4dd7, 0x713e4852, 0xb84d3b7f, 0x01e226cf, 0xc1b367da, 0x7e445797,
- 0xb3f676f6, 0x16d72a9b, 0x2e9a9d11, 0x15313e91, 0xa69de285, 0x8fd92f96,
- 0x91cfd48f, 0x90f14cf0, 0xfe656fc3, 0x93badfdb, 0x8b8d45f9, 0x5c7d5b7a,
- 0x6ab80b2e, 0x39a3e3c3, 0xa64dff93, 0x4beaa7b8, 0x3c4cff11, 0x53fbdce9,
- 0xd5a077dc, 0x17e52fff, 0x9a82bcd4, 0x5bcc8867, 0xde647aa8, 0x6e0975ad,
- 0x2c9aefdc, 0x94f30651, 0x5ea3a674, 0xdedf5540, 0x9765159a, 0xbd3ac8ef,
- 0xffd6d67e, 0x6cf9fac0, 0x4d31bff9, 0xbe48d5d8, 0x05e337df, 0xe553ad03,
- 0x9ffe48ef, 0xfe707d43, 0xd7924d39, 0xf67a7a83, 0x44c37692, 0x7df9dce9,
- 0x88a0e5b9, 0xc2ed238b, 0x3607b6f7, 0x2b5c9ba6, 0x92617c73, 0x1e50726f,
- 0x4743a544, 0xcf3add34, 0x1fceb740, 0x76d16e93, 0x73bce0f1, 0xf1a08bb3,
- 0x8acbdfcf, 0xc83e27f2, 0xa91fbd3a, 0xf781ffad, 0x636b1d6d, 0xef4472b8,
- 0xea27593c, 0x01627c45, 0x80eff21d, 0x74b8e657, 0x3c7b6669, 0x195033c5,
- 0xb77d278c, 0xf2065bac, 0xd4ef410f, 0x5cfe468f, 0x1f67f0a2, 0x7e243ef8,
- 0x7ef7055c, 0x25980941, 0x22ae4bd5, 0x09f4b73d, 0xfc4f0843, 0x27f393af,
- 0xcb7e94ab, 0xda469d2d, 0xb567f2ef, 0x1a71d4ed, 0x1fc88a5f, 0x19fa5f57,
- 0x73f0ddb0, 0xa7556fde, 0x5977dfb6, 0x62872971, 0x084b1659, 0xa7d267ef,
- 0x1baec947, 0x5b27cac2, 0x3f20bf39, 0x9e03e0f3, 0xc4fa21b1, 0x71f14764,
- 0xf9e9f65c, 0x23ff5b58, 0xf71cb1f3, 0xf9e891ac, 0xbf78f97d, 0xeb4599dd,
- 0xa0f3f556, 0xc4c379de, 0xdca0677a, 0x37a4f226, 0x9e2daefd, 0x3804e4f8,
- 0x2a161d88, 0x6a85d2f9, 0x369d74be, 0xcfb93a5f, 0x49b979c7, 0x716dbd07,
- 0xbd89f748, 0x55dbce1e, 0x59b776ed, 0x5dd3fcb0, 0xa3fc994e, 0x64b96ff1,
- 0xdfeab75a, 0x484efea8, 0xbeb0e58f, 0xf66bbce3, 0xd53bebd9, 0x30f6aa2e,
- 0x06560ed2, 0xfec96bdb, 0xe3b6f84d, 0x2be7824d, 0xf7691eaf, 0x7a138bde,
- 0x57c1a8e2, 0x8cfc97be, 0xcf1c63d7, 0xaffaf441, 0xc67e16cb, 0x0302cf4c,
- 0x1f091fcb, 0x450cdbca, 0x764fae6e, 0xf54d975f, 0x99d0bd8a, 0xa3b2069d,
- 0x23df7275, 0xf95f12d7, 0xde1bce65, 0xbf07c77c, 0xccedb5ff, 0xeb2685f9,
- 0x863ff671, 0x749a338b, 0xa2f8a6e0, 0x4a56d6a4, 0x6dc50c7e, 0xf3544794,
- 0x9fed918a, 0xc8acd7b0, 0xb5573ce6, 0xd9eb49df, 0x1c33d628, 0xbe5a9abd,
- 0x8731e4d0, 0xbba9bf9b, 0x4c30badc, 0x16ca5e3e, 0xdbd22ef1, 0xa21cc87a,
- 0xc5f2d9f7, 0x65f8b7ff, 0x091e0c9a, 0xe4a16ced, 0x082ffe15, 0xf80e783f,
- 0x2083ce19, 0xf9583743, 0x38216a7c, 0x17022e18, 0xdbbb7cc3, 0x60337c4b,
- 0xbe248e08, 0xbbf57fea, 0xfe9be202, 0x57b6278b, 0x5f3bf9c1, 0x06fff48a,
- 0xfe78dbc6, 0xdbbe5781, 0x7039fc43, 0xf4ace187, 0x6764eac6, 0xe451f127,
- 0x53bbcb79, 0x01d73e64, 0x8b967afd, 0x03e8ab7a, 0x1b72a497, 0xe64cd8eb,
- 0x1cde908b, 0x37aab4f5, 0xc2bf6017, 0xd7ac744f, 0xb5e49960, 0x80aed363,
- 0x70ac458e, 0xa657a671, 0x2fa8a772, 0x95e8995c, 0x15585cb0, 0x4a6fea23,
- 0xc52642f4, 0xcb960143, 0x0fac00f9, 0x8bd08017, 0xf7938237, 0x96419189,
- 0x6f17fd84, 0x3e709735, 0xa4390216, 0x8e288bff, 0xb74e221a, 0x2f7908e6,
- 0xbefa12ce, 0x179b46b8, 0xc95b30f9, 0x4a6bdb1b, 0xe7561072, 0x1d12f738,
- 0x1bfc938b, 0x7ca32a1e, 0xa1360e90, 0x75567d7c, 0xaa779f2b, 0x35ed1afd,
- 0xcdbf84bd, 0xe2bc7012, 0x9f7936e9, 0x238ce713, 0xf2cdcfc3, 0xa807b5ed,
- 0x7b5a11dd, 0xc578f0f9, 0x47f71c7e, 0xd78795cb, 0x1bb14c57, 0x43f1fbe7,
- 0xe5cbcf25, 0xebe679ff, 0xf80b3bfa, 0x7e43c14c, 0x7ddda372, 0x046fd79c,
- 0x9ffad0b0, 0x40147114, 0x47e50673, 0xd2653c97, 0x11f19f91, 0x03c8639a,
- 0xddc61bf8, 0xe31bff04, 0xc77f8267, 0x27e099f8, 0xfc133f18, 0x04cfc607,
- 0x1e3b7f17, 0x8d802283, 0x4e4b4e39, 0x301ce879, 0x1c86bd72, 0xf9c1cf81,
- 0x7f3c8dbb, 0xf8cddd9d, 0xebf7a41d, 0x9973a5e0, 0x0bc189cd, 0x6fc34e92,
- 0xf0dfdd03, 0x7870e9f9, 0x2325cf51, 0xe853bfeb, 0x67a9d45a, 0x15d45ffb,
- 0x6b086f88, 0x918e4177, 0x395f0beb, 0x1fe34f18, 0xd7e20eb5, 0xa796e129,
- 0x82f944e9, 0x08332d67, 0x677675bf, 0xe51bed09, 0x51f6833c, 0x83c1f5d4,
- 0x7c78cb13, 0x75a364ab, 0xb8e51f04, 0x911f6221, 0xb5f75078, 0x81bfd139,
- 0x257e6ffa, 0x7a82768b, 0x10dc8407, 0xed43491f, 0xcff32d77, 0xccb05374,
- 0x7e9deb8a, 0x0a1e6e4a, 0x29f675ff, 0x5477e78c, 0xc7cf537e, 0xe952fe15,
- 0x1df2ae76, 0x6b901e39, 0x6ae77e84, 0xb236a5d2, 0xc9e3274b, 0xee57ac2f,
- 0xe6757419, 0xf4fb47bc, 0x6ec194ec, 0xc8d63e63, 0x5f8562df, 0x6dd85854,
- 0x36bd1174, 0x59ca1f77, 0xe4fa6164, 0xb3f6c62c, 0xbc9aec72, 0x89d56167,
- 0xe14a1fc7, 0xea9a56f6, 0xceba783a, 0x60dfe38a, 0x6d36fede, 0x93f5ae69,
- 0xcf2c44ba, 0x1076934f, 0x9ccb605c, 0xe5a3649a, 0x37fd797a, 0x41d94fe6,
- 0xa3957522, 0x29b9fd9e, 0xecc264a4, 0x9639ad43, 0xcb071f0e, 0x4d64d675,
- 0x31b4d2f9, 0xdb97f69a, 0x57f535b2, 0xd4d38fe6, 0xe55ef3ab, 0x2bb4d528,
- 0x51660a4e, 0x60fb875c, 0x64e780b9, 0x7da25daf, 0x1c62beef, 0x3f3da796,
- 0xca6e41cb, 0xda7ea566, 0xd12f5a96, 0x69f10063, 0x70ea8744, 0x1f47fae5,
- 0xe222c1a2, 0x67eec686, 0xe76d3876, 0x130c3710, 0x7100ac2f, 0xdc5c3f8e,
- 0xa02f1910, 0xf4907cfe, 0xd7abf167, 0xdc70db58, 0x2e2755cd, 0xef3ee1b0,
- 0x8238c1cc, 0x47128dec, 0x833fc70c, 0x04d92272, 0xf0c8f4ff, 0x03fc10dc,
- 0x1f850bf0, 0x3ef69c05, 0xdf0533fa, 0x4a7dbf5f, 0x07f38dfa, 0x57efc51e,
- 0xafbe4460, 0x6f78e8d8, 0xdea7e50d, 0xf9ce8191, 0x03ae24e1, 0x77e81beb,
- 0xb0f7c439, 0x46bbe9e8, 0x0f77f0a7, 0x94aba6b9, 0x731fdffc, 0x71aabf14,
- 0xcba35dfb, 0xf370f542, 0x70f69a27, 0xa6b774bb, 0xb6a6677c, 0xbb94ef7c,
- 0xe70ffbc3, 0x5c6c3597, 0x9ca37460, 0x392c3c36, 0x8be843bd, 0x28a4f5c1,
- 0x097ba1fa, 0xdb43dbeb, 0x716d1997, 0xe157edd1, 0x6cb79478, 0xaadec618,
- 0x193aea7a, 0x7faa879a, 0x71cf9d47, 0x08fbe77e, 0xcdf64f71, 0xc3b09af3,
- 0x8db8251f, 0x2fb27ce1, 0xf47b9c47, 0x1dc0f63f, 0xee79d35f, 0xeabe22eb,
- 0xa3f6144f, 0x9d33f3a8, 0x08f1610e, 0xd6455fc1, 0xd2ec8e80, 0x0f58d2c0,
- 0x2d359ca0, 0x8ae8f38b, 0x7e088fc9, 0x5e4f1e68, 0x9a0f2dc7, 0xeb1fce91,
- 0x3cbb1a74, 0x9616c140, 0xd9fd9efb, 0xf7e93b8c, 0xe92f7102, 0x2fb3ed7c,
- 0xad7d8407, 0x1e637a11, 0xb7629ff1, 0xd3565500, 0xc768305d, 0x5e10cf48,
- 0xbf6b9ca8, 0xc987fdf5, 0x7a11efcf, 0x370af283, 0xb4e05fb0, 0x1d0df368,
- 0xbbf8e72c, 0xd73efc2d, 0xb78feb59, 0xf3968dda, 0xd84647a8, 0x218779b5,
- 0x57fbc5f4, 0x5e9b3d22, 0xd05ef997, 0x04f6adb1, 0x911fd308, 0x28da6c78,
- 0xe5ab9cb3, 0xcbe55ba3, 0x830f6cbc, 0xd185f519, 0x83a706f8, 0x55b57fe5,
- 0xd27ab7cc, 0x4ec83337, 0x8bd6eeaf, 0xcdc32f4b, 0x5b064eb3, 0xb79b3cd8,
- 0x5d929699, 0x603fd753, 0xf749f455, 0x0a534957, 0x843b5c04, 0xc0bf457d,
- 0x417e4290, 0xecf1c4bd, 0x3551ee9e, 0x123da784, 0x3c256cf8, 0x5c6635c4,
- 0x1c314cdf, 0x5e50b77d, 0xf9f2d214, 0xd71aa59e, 0xacecf1aa, 0xf27659ee,
- 0x5cec3f63, 0x072907b8, 0xccd9ec93, 0x8782d74f, 0x893deefd, 0x93ca74ae,
- 0x3fee534c, 0xf60c49ec, 0x67bdbf91, 0xf744ac52, 0x3bfc7019, 0x5f757c69,
- 0xf4ae0c7b, 0x9297de66, 0x44fe752f, 0xf67b153e, 0x67aaa271, 0xa7dbe4eb,
- 0xf7e7495c, 0xf14ac073, 0xf85974ec, 0x2147f0d5, 0x6d5eaacf, 0x6c49fc92,
- 0xa633edaa, 0xa34f11db, 0xe4a97e5a, 0xf735db7d, 0xafd6cf00, 0x5fa38c1e,
- 0x567f9894, 0xb1bc4439, 0x47f3faaa, 0x57f9c5ae, 0xfe4fbc61, 0xedaec2dd,
- 0x917f08fb, 0x7c60fdf1, 0x7be62ff5, 0xd733e3a3, 0xcddfda32, 0x73df1c77,
- 0x69c7442d, 0xea817183, 0xcb173e5f, 0x58a8d6e7, 0xfc287c85, 0xee5b647a,
- 0x7f5835de, 0x66062fe4, 0xfd2737d7, 0x58ade8ea, 0x58b9f9be, 0x2491f3c6,
- 0x391d90f0, 0xebcf29b2, 0x744b06c1, 0x6fb25ec1, 0xcf8e25b0, 0x327b3e69,
- 0x512fb7ef, 0x76b7f27e, 0x7e4843f6, 0x7cc2e86c, 0xe88ff746, 0xb9a3cbfb,
- 0xbcc5b81f, 0xca5c8a0d, 0x5fde142f, 0x242ff54d, 0xa5427e73, 0x6327e1df,
- 0x7dac149c, 0x2b22be05, 0x92e7deb2, 0x5bdd10e7, 0x697ba15c, 0xf97c84bf,
- 0x06f708d5, 0xc7f2f7da, 0x89c3f3ff, 0x17da9fb8, 0x1555feae, 0x07d96fae,
- 0x84e340a9, 0xdfb43ae8, 0xb66b6c70, 0x3f057f90, 0xf18c5e2b, 0x27d30c1e,
- 0x35123b2b, 0xd1aea6e5, 0x7a6183de, 0x729ef068, 0x04e7c4a3, 0xed5c8a8d,
- 0xeddbba37, 0xfba5d794, 0x713f4ca9, 0xce84b51f, 0xf7ff08ab, 0x91ff1899,
- 0x5da2f67c, 0xf3566739, 0x4e60c93c, 0x0b75b923, 0x97dc0dfc, 0x31e289b7,
- 0xf1493d9f, 0x33b9823b, 0xb3f9c30a, 0x9c1bd577, 0x7b8931e7, 0x9f43318f,
- 0x13da77eb, 0xf9837db9, 0x699e4d4b, 0x4d09cb55, 0x015567fe, 0xd2501fb0,
- 0x89bd2b66, 0xba1487ce, 0x7aee9fdc, 0xf1def18d, 0x18bbabc0, 0xf2dc4f9f,
- 0xd3bbcd31, 0x5d83f4c2, 0x11163c4a, 0xbf98356f, 0x69efe450, 0xe87e7f8d,
- 0x9445bdb1, 0xcaf438ff, 0xd3ca22b8, 0x87fd942e, 0xf7c99d7e, 0x86dff69c,
- 0xf7cecb70, 0xb91e2f39, 0x0c7ba8de, 0x8d43d092, 0xba648fb4, 0xc77ae3ff,
- 0xf42e72ce, 0x850d81c3, 0x1f353f8c, 0xe864703d, 0xa75d5078, 0x3ebabe3a,
- 0xddb57c75, 0x5f22ecdf, 0x17f46fd0, 0x415ffd91, 0xfb6130e7, 0x17f78fea,
- 0xbbe85a25, 0x4913f855, 0xb7e30e58, 0x2abcf57f, 0xf06a7cf2, 0x47c9565c,
- 0xfdadd3ed, 0xaff91199, 0x8321e3af, 0x7eef5c3b, 0x752a73cf, 0xdafdeabd,
- 0xfd85ab7e, 0x2576939b, 0xe90e9f1f, 0x1deb0ed4, 0xb7bff23e, 0xc7e41c98,
- 0x905c9a27, 0x3469f88b, 0x9afef8fa, 0xe4d12fda, 0x7991efe6, 0x234481f9,
- 0x81e5bdd0, 0xf7fce554, 0x42bd602b, 0x9c16e871, 0xd756acb7, 0xd212c5bc,
- 0x2cfeaa87, 0x0bdf871b, 0x3ea91ee8, 0xdf2027e4, 0xdb6b1594, 0x2da3f687,
- 0xa0ae7bff, 0x6eb25963, 0x24b938c8, 0x1b65c857, 0x9e42c874, 0xa89d1d85,
- 0xbf0d1de0, 0xacd7ee1c, 0x40fcf452, 0x0743d9bc, 0x8ab713bb, 0xf513ab7b,
- 0x5104edbe, 0x124b400b, 0x89f4995d, 0xfab4baf1, 0x1cc86c9c, 0xd8e3cbb6,
- 0xc8504f2e, 0xccd4b77d, 0xf7435bf7, 0xc132fd5c, 0xd0c8218b, 0x413d5609,
- 0x4bc30324, 0x2a70c5c0, 0x0cbc3334, 0x015e19da, 0x02af0c1d, 0x89f8433f,
- 0x1ed80daf, 0xff656edc, 0x3a25f2e9, 0xd765cbb2, 0xcf97ed0e, 0x3c35f052,
- 0x57cb503e, 0xa63e3b9c, 0x8772f72a, 0x7cd8cbc5, 0x2f157be6, 0xec4fe226,
- 0x34de0317, 0xc0209382, 0x46264ce3, 0xdd78a6ce, 0x6b7ef7c6, 0xbae5c9c1,
- 0x6547c4b3, 0x1777adc0, 0xda97810c, 0x9e64f9a1, 0xb90327c2, 0xe951f8c0,
- 0x8dae48f5, 0x30dd2acb, 0x7b234c50, 0xc84f4d10, 0xc59675a0, 0xa7a71cd6,
- 0x5f3fb722, 0x7a8f5336, 0x66b4acb4, 0x5f90a90d, 0xe5154ee6, 0xd32f3589,
- 0xf3fb7d51, 0x71728a97, 0x81c1a94d, 0xcbdae85e, 0xfa825e66, 0x27dcadd9,
- 0xa011a337, 0xb672637b, 0x2b761738, 0x724dcaf7, 0x2fba5ae6, 0x580dba66,
- 0x0063b8df, 0x22d7ecfc, 0xb8bc91b7, 0xb37b1c45, 0x3aafd055, 0x2e4b7dd6,
- 0x2fb333d9, 0xb87486de, 0x28dd6d79, 0x18f79bb2, 0xa49ff8c5, 0x81bdc6da,
- 0xc49932f8, 0xbf7556bd, 0xc189ef58, 0xadb24072, 0xad0cc969, 0x59c75d4b,
- 0xe9d77bcc, 0x774fcde6, 0x023d206f, 0x15c70736, 0xbefb1463, 0x34c4cb65,
- 0x1d7030f4, 0x20475e56, 0x0b4d772e, 0xee91dd56, 0x1bc6847a, 0xf87c1388,
- 0x7529d108, 0x75cbe878, 0xc43b943e, 0xdfb581f7, 0xbd23eabc, 0x042e29f4,
- 0xae4df9c7, 0x475a71fb, 0x51902e93, 0x45fbf48a, 0x05007a2c, 0x9096be21,
- 0x0f107398, 0x1af04e6d, 0x77945e59, 0x99f9ca36, 0x8b03172a, 0x4fc43c71,
- 0x8b55cb99, 0xcc5422ee, 0x60937e6e, 0x16bdbba5, 0x179e686b, 0x724d3d68,
- 0xe4f881b0, 0xc607e268, 0xc76e594f, 0xf454fe0a, 0x9eb6f759, 0xa288c6db,
- 0xb2e4c20b, 0xaf89db69, 0x6f7486ff, 0x5071663a, 0x3e3c90f3, 0xe79c46b6,
- 0x46fa345a, 0x5d09d395, 0x9c383e26, 0xc5a2f5b8, 0x07d26ed4, 0xb7a4d9bd,
- 0xda06f727, 0xe81e7dc8, 0xc8463bde, 0x7fd7ea89, 0x7d8b9d23, 0xfc8cccf9,
- 0xf9104091, 0x839c5e23, 0x8af4fc73, 0xad5f0738, 0xeca93de4, 0x788b7de1,
- 0x573b8d54, 0xde2dbcf6, 0x6ff07985, 0x122bbd05, 0x713e967f, 0x6fc455e3,
- 0xd48bef91, 0x6949b478, 0x7bd7a2b7, 0xf227ed37, 0xc51351bd, 0xbfc345a3,
- 0xa27a6a37, 0xc8d1e396, 0xb9bd46f7, 0xb14afe4a, 0xa1f8a08f, 0xb436944b,
- 0x87c70b3f, 0xf709af46, 0x35c9fab6, 0xd3952b0e, 0xc7d60e2d, 0xe10e3d57,
- 0x6489e1ed, 0x9e5519e6, 0xd893ad97, 0x5af156bf, 0x49c9bcfc, 0x7e7e28eb,
- 0xe8cdfb14, 0xe97b8bef, 0xde913e4a, 0x837bd012, 0x2cc189cd, 0xb9ba67ba,
- 0xfe41df3b, 0x9d5a1ff8, 0x68430ff0, 0x503c6a7d, 0x4b54bf27, 0x2d8e87ef,
- 0x3f0b38c2, 0xa679eead, 0xd5e58ccd, 0x1055e9eb, 0x13f384a5, 0xdf2aafc8,
- 0xc6978329, 0xd543ef01, 0x39153b08, 0xc0e9a537, 0x188f8616, 0x0db008db,
- 0x29300f91, 0x4fcd8bf4, 0x39c7d4d2, 0x4ff4d02e, 0xd4d2cca8, 0xf788c5b7,
- 0xfa71e032, 0xef422a0b, 0x3f9d1248, 0x0083f4d1, 0xbd08adfe, 0x7e27b917,
- 0x4f7e4eea, 0x3ce06fe2, 0x2565f245, 0x7926f69a, 0x5dfc4cab, 0x435ea5fd,
- 0xa6529fcf, 0xb956f11b, 0x9c1cfd1b, 0xb69f4267, 0x01e0263e, 0x5bb317ea,
- 0x984bc9af, 0x85f2da6c, 0x629e4a79, 0xe4d58c2c, 0x2ef10629, 0x61d8fef8,
- 0x0545f31d, 0xbaaafce4, 0x006cf08c, 0x6f1a2daf, 0x5df840c7, 0x1c5ac6c7,
- 0x426a96f4, 0xc932cbcf, 0x8b4baa41, 0xb5b351ff, 0xd6ed1ff8, 0x52cc7be2,
- 0x9bd3be2d, 0xb0bef8b5, 0x4cd78b45, 0xd96f168f, 0xda6826eb, 0x346bdbdb,
- 0x578dbce5, 0xe45fda68, 0x8f29a19d, 0x4d7af17b, 0x858ec2fb, 0xaee2fa9a,
- 0x957a9ae5, 0x83e386cf, 0x11d5bef1, 0xdd03a6fc, 0x74a0f869, 0xf335cfe7,
- 0x9ff450fd, 0x05e8a79f, 0x2bd7fe85, 0xf8dde576, 0xb892f7c3, 0x7a158b4e,
- 0x50df3070, 0xee78e06d, 0x6d725b3d, 0xc505e91c, 0x7b14b9de, 0x894f3907,
- 0x1d9455b2, 0xa2bf717e, 0x36ab9f64, 0x2cf746de, 0x8a8afdc4, 0x09b5edc3,
- 0x1de23ed4, 0x494af4d6, 0xf91dde73, 0x67a66b2e, 0xd1209cad, 0x3f7144bd,
- 0x841ef231, 0xdeab3efa, 0x2739a319, 0x96bdee1f, 0x78c25f9a, 0x2f148c01,
- 0x3efccce8, 0x721fda31, 0x5fd9946c, 0xf6ffb373, 0x79cdc967, 0xfecc6ba7,
- 0xecefe20c, 0x79f5eeb1, 0x3aa3b788, 0x3dfd8d87, 0x00d6fde5, 0x2fdf8c6d,
- 0x3ca4ccf5, 0xbb8600e9, 0xdfd4d794, 0xbd2663b3, 0x5be5c45b, 0xac221503,
- 0xe68f3fa6, 0x71a41fb9, 0xbbd6480e, 0xbbefb14b, 0x20cf3e13, 0x73a2667b,
- 0xff3e12fe, 0xfc2f387e, 0xee147bfc, 0x406df50f, 0x79f0f367, 0x8e25cfee,
- 0xc1a73e53, 0x77e4c2aa, 0x4f79419d, 0xb9b6e22a, 0xb8d4be0a, 0xff328e6d,
- 0x29621c01, 0x5db9f3ea, 0xacbc667f, 0xe253eb48, 0x158766bd, 0xac71bfc7,
- 0x7bef84a5, 0x221d4b39, 0xb8e357de, 0xb1f7f231, 0x87ba67b3, 0x88ed22b5,
- 0x97869e26, 0x57cc8792, 0x42c5e646, 0x1fe316fb, 0x76cb1f58, 0x74fac0e8,
- 0xc6863fce, 0x85f9ca3f, 0xfbdacdb9, 0x3e5bf3dd, 0x02dff0d2, 0xfa9a27cf,
- 0x30d04a40, 0x781ff706, 0x1bd4f475, 0xaffd7f10, 0x9e282a98, 0xe4f5ba45,
- 0x0fc68795, 0x897797dc, 0xb4ce1bf0, 0x1d79cfcf, 0x5afdcabd, 0x5bfb9f75,
- 0xd03971f7, 0xefb8881a, 0xbfc4d1e4, 0x09fe342a, 0x0e8fc9f2, 0x431c234a,
- 0xe76673e5, 0x57a9388b, 0xabda7779, 0x55ed3bbc, 0x2af689de, 0x957b42ef,
- 0xbbeaa177, 0x9c095edd, 0x8e64bf74, 0x816a7ae4, 0x0f76efc4, 0x969b3ed1,
- 0x5de88b07, 0x60715363, 0xbbf3249e, 0x7df423dd, 0x45bb7788, 0x69f49592,
- 0x6ef435ea, 0xcf08f0f7, 0xd9d8b251, 0x50d789f9, 0x28d870bc, 0xc5f4a3e4,
- 0x7bd2666f, 0xdd53c7bd, 0x3fbf89c7, 0xf31f6495, 0x9f12ebf3, 0x6033832b,
- 0xef16719c, 0x13cbbf7a, 0xf32734e7, 0x0a89b9c4, 0xbd0be647, 0x9cb04a56,
- 0x2e6f72e8, 0x68dabbbf, 0x385777e2, 0x7617539f, 0x7df2221f, 0x4d1fc95c,
- 0x6a1fd23e, 0x5f2127de, 0xbbd3379c, 0xca675a39, 0x6ef126fd, 0xa0cfe02e,
- 0x0f747677, 0xbdf245ca, 0x1de29c1b, 0xf0a6de5e, 0xf90930f7, 0x45bd3dca,
- 0xb7a779a0, 0x7ec0e7b2, 0xe5af27d2, 0x53f393b9, 0x56c7ef43, 0x7f676fc6,
- 0xc6df20af, 0x797ae2f3, 0xee17a8ff, 0xc79cfa24, 0x7e56617c, 0xd7fff479,
- 0xf5bc3860, 0x5a3ff62f, 0xf77a1990, 0xaddef616, 0xaf2c3f20, 0x343faea7,
- 0x42e5e9ce, 0xe5a97bcd, 0x93dd8f43, 0x7486724b, 0x5948f911, 0x88633f9a,
- 0x05f3d3fe, 0x774feffd, 0x97f9a04d, 0x60bc8b37, 0x6fda6f88, 0xdce898f9,
- 0x3ef21963, 0x723fdabf, 0xf5b72b9f, 0x9f703e30, 0x617f68f3, 0xf08cbcfb,
- 0xbfb3eea5, 0x05a3ef7b, 0xdfdf73b6, 0x6e777df7, 0x6d71811a, 0x767950d2,
- 0x7e613eb4, 0x3fed4999, 0x8fedaf56, 0xb1e5390e, 0xd1de48bb, 0xe254e96f,
- 0xc4cbb21e, 0xee5daa17, 0x2920e254, 0x02ed83b0, 0x71724bcd, 0xfb61e4b7,
- 0xda3be6a8, 0x947fb424, 0x61af7057, 0x685f217b, 0x775f1071, 0x49f9ef22,
- 0x58cef41e, 0xc44773ce, 0x758b3fd7, 0xbc7844df, 0xa335aff9, 0xf2defe28,
- 0x38942b97, 0x72abd84e, 0x69cb3f85, 0x4147da7e, 0x996df302, 0x730abeb2,
- 0x55c9fb33, 0x5e4591e0, 0xac93c943, 0xdff1c1df, 0x141a01ff, 0x43d0aaaf,
- 0x000043d0
-};
-
-static const u32 usem_int_table_data_e1h[] = {
- 0x00088b1f, 0x00000000, 0x51fbff00, 0x03f0c0cf, 0x1894738a, 0x18357a18,
- 0x326b3618, 0x31686830, 0x20318830, 0xaf8568e4, 0x9fa65371, 0x8181959b,
- 0x81f98817, 0xd7881058, 0x6c303133, 0xff5e2260, 0xfb045111, 0xc303209e,
- 0x197f2051, 0x6614ee90, 0x64055860, 0x2fe2031f, 0x1080be40, 0x100c8303,
- 0x606115ff, 0xc1d20530, 0xc4036c40, 0x9bf145c7, 0x7c80827f, 0xbf2a08bc,
- 0x279f8d1b, 0x25ff5f8c, 0x0ff2fc11, 0xc363c808, 0xc7e41632, 0x7a052247,
- 0x29370207, 0xca8ff2a2, 0x543c3033, 0x51d06060, 0x919bf082, 0x6280ede4,
- 0xec21e4c7, 0xb8c09229, 0x28b5ca07, 0x2a773762, 0x5004fe50, 0x34894bce,
- 0x41d3dcf7, 0x8434afe5, 0x3ebc00d0, 0x03a8e414, 0x000003a8
-};
-
-static const u32 usem_pram_data_e1h[] = {
- 0x00088b1f, 0x00000000, 0x7de5ff00, 0xc5547c0b, 0x3d9cf8b9, 0x926eece7,
- 0x2126cddd, 0x26c2bc21, 0xb80d4401, 0x41a00c40, 0x94520f37, 0xa2a1e1a8,
- 0x24786c22, 0xf622ef21, 0xddbf1f62, 0x52c488f0, 0xd4c51f1b, 0xb051768b,
- 0x40368bd1, 0x5c1758d0, 0x6d8b459e, 0xd5a045e8, 0x5e40137a, 0xa540b206,
- 0xcffd45b6, 0xdd9ccdf7, 0x26364e73, 0xf6ffef6a, 0x9fdbfffe, 0xcccce61d,
- 0x7cdf3337, 0x9be6bdf3, 0x91be6489, 0xf21387d8, 0x256efc25, 0x108951e4,
- 0x9b4e1892, 0x9fc6933c, 0xedb10994, 0x3bea247e, 0x9e0c8499, 0xd146706b,
- 0x64085fa2, 0x21064b4e, 0x758d65c9, 0x793dfa46, 0xe8bc7d91, 0x9819c308,
- 0xbdbae47c, 0x720c8409, 0x8266f6d3, 0x7fe92fbf, 0xc637b73f, 0x79c87491,
- 0xe131a285, 0xbc9a4afa, 0x877bfa48, 0x149bbeaa, 0x48dda675, 0xfd51fbfb,
- 0x9085b720, 0x4678c31f, 0x40cba942, 0x78b8e8b2, 0xfd2e6f5d, 0xaf0e3a3f,
- 0x295be8cd, 0x1c1a2210, 0x242444b1, 0xa0482326, 0x679fbe9d, 0x49d7ed3c,
- 0xaa36a708, 0x49e74ac8, 0x0896b4c8, 0x808972f5, 0x76cd8878, 0x78e97b25,
- 0xe22e7d57, 0x220d57bc, 0x27d69da4, 0x684ed392, 0x61c4953a, 0x232dc7bd,
- 0xd57e4206, 0x07123fae, 0x199b1b85, 0xdc155e24, 0x0e210f06, 0xc7afc513,
- 0x8f2ae98c, 0xc4c9a79d, 0xab210d71, 0x574c0e3d, 0xfa634679, 0xdb27d0d8,
- 0xf9d00673, 0xe9d8320c, 0x63834848, 0x1191dda6, 0x2dc72786, 0x0938e730,
- 0xae2926c9, 0x2f5a54c4, 0x4dc840d1, 0xf342dc84, 0x19082375, 0x5084ec49,
- 0xbc933dfc, 0x019dca2c, 0x45e60779, 0x7cc37103, 0x2e033263, 0xc62de291,
- 0x1c95cf69, 0x1bf8efe1, 0x785fdf80, 0xc1d6152c, 0xefcc1b7d, 0xf0ece219,
- 0x46dd9e02, 0x7f5a26e7, 0x5e4ceca5, 0x468ae14a, 0xad832eda, 0x1e1094bb,
- 0x1fc04cdf, 0xc9cc8fed, 0x30d7acd5, 0xb4224718, 0xeb05dc73, 0x75b27164,
- 0x1574095a, 0xf2e61d61, 0x77655875, 0x38fd19d9, 0x11fa2449, 0xb1fa5aed,
- 0x8fe96864, 0xba9fb17e, 0xbf69e79e, 0x3bed424d, 0xbd3ce953, 0x4dce0c4b,
- 0xa160dbf4, 0x0af80051, 0x80c425bc, 0x41f92afd, 0x63a05265, 0x179e1ab4,
- 0xf6f7e9cb, 0xefcf730e, 0x52e2062d, 0x2a5372e8, 0xcec4777c, 0x6626e3a0,
- 0x6b4cec47, 0xe987f1cf, 0xc925d924, 0x4c0eef40, 0xac47041b, 0x477ce300,
- 0x9c049b24, 0xf6a4727f, 0x08edd3ff, 0x377ae1f1, 0xda5475f0, 0x3d203c51,
- 0x468b1f79, 0x3450883c, 0x17e70c39, 0xad17cf1c, 0x4a434f45, 0x5868e348,
- 0x875f9a5f, 0x79a663f4, 0xb09be62b, 0x3fca1edc, 0xacc9e58b, 0xba5e5d3e,
- 0x55e3bd18, 0x6eda1759, 0x4291c203, 0x38445e70, 0x9bbf5096, 0x94203f30,
- 0x1fd625ff, 0xb7f7eaca, 0xceed251f, 0xf9c29e0d, 0x003b2ebf, 0xd6dbc29f,
- 0x2e94adc0, 0x7c106e0e, 0xec0f9a26, 0x75fe418c, 0x9f0cfd7e, 0xd767e289,
- 0xea0b9338, 0x8398df9f, 0xefedbcf9, 0x5e5a24db, 0x20945db5, 0xd679d86f,
- 0x5bf141d6, 0xfc7f6a63, 0xb83dfa66, 0x602f245d, 0x9f71d377, 0x48b4e29d,
- 0x92f9633e, 0xdaad9628, 0xc01e6bb0, 0x91336b2d, 0xeaa70a28, 0x6f3bd2cd,
- 0x03d2f9a6, 0x552a8132, 0x838cea9b, 0x4f897e69, 0x8afc8168, 0x3f601f9d,
- 0x0ca4b9dd, 0xe039f7f6, 0x752f945b, 0xee93dadb, 0x7dcbdfa6, 0x7ea00a5b,
- 0x09c166f9, 0x7ebe5c4b, 0xbf0087d7, 0x211e55bc, 0xcd15f852, 0xdaa1c431,
- 0x17db792f, 0xade309df, 0x9426477a, 0xabb291ed, 0x1891190f, 0xe02346a4,
- 0x13d36ab5, 0xe79be046, 0x1fb0073c, 0x2f559f05, 0xbb687ed3, 0x2704d7ea,
- 0xc0daad4c, 0x3785cdf8, 0x68bce6a3, 0x19d57981, 0x37fb4147, 0xbd42351f,
- 0xebf15f52, 0xdf180e51, 0x8c016306, 0x6306fd73, 0x566f8a89, 0x3356ff34,
- 0xe94d53ae, 0xbd19dd03, 0xce39e7af, 0x397c95b7, 0xdf484015, 0x4338cf92,
- 0x339e87c5, 0x57fd21c4, 0x11c48b34, 0xdf781f81, 0x8760fbe7, 0xbe03f195,
- 0xc0ed4b5e, 0x75ebc21a, 0x4fd7cec9, 0x88cd660a, 0xe71ee7c0, 0xcb91a3f2,
- 0x7e41278f, 0x2e69f4d0, 0xf971d63f, 0xe271e4d1, 0x933d67f7, 0x71ef5d30,
- 0x67bcfa61, 0x4f3ea61a, 0xc17bd611, 0x8dd30733, 0x7f7e371e, 0x698653c9,
- 0xbf16a7b3, 0x8e59e2bf, 0x178f66e9, 0x59e6bfbf, 0xa78b6983, 0x9eadd311,
- 0x3d5b4c26, 0xbaf7ac3e, 0x36d319a7, 0xff7e0b4f, 0x530da7b5, 0xe98027bf,
- 0x9a5f584e, 0x98ed3c06, 0xc31cf6ee, 0x03a7af74, 0x9cf7eddb, 0xc72d74c1,
- 0xe49b2dbb, 0x366f1448, 0x18394117, 0x91cae85f, 0x88be3e69, 0x7ae693e5,
- 0x9f348c73, 0x8a79a6e4, 0x8195c1c7, 0x0fcd131c, 0x29e565ae, 0x66b9243f,
- 0xb2f14f9a, 0xaeb5b4f2, 0x4f9a28dc, 0xa3e5646b, 0xa3737bd6, 0x8f947e69,
- 0x39b75f95, 0xf3431b90, 0xf2b0b5d7, 0x67927eb1, 0x01b1f9a1, 0xd07f1f96,
- 0xf9a56795, 0x9f2cedf1, 0xcf37a1f5, 0x1d59f346, 0x5d4dfdac, 0x9a58bc81,
- 0xcac829bf, 0xf24ab96f, 0x2e4003ed, 0xb5cf980b, 0xd1c7e4e4, 0xe59dae7c,
- 0x4b16860b, 0xb7f4088e, 0x0858ee53, 0xb0867c22, 0x7e5125d1, 0xf1d8d3b3,
- 0x647d1510, 0x4baaf0a1, 0x2033fca2, 0xfe504593, 0x963af0b0, 0x19648c07,
- 0xbc2a3f94, 0x65bbe58d, 0x5cff9607, 0x46f2c038, 0x87ff9607, 0x7bf30870,
- 0xef961765, 0xf2c4fe10, 0xff961746, 0xf981385e, 0xcb1bb2fd, 0x962e853b,
- 0xfcb1ba37, 0x5753e949, 0xdf2fed3e, 0xdc2e9ee0, 0x5ddb7208, 0x6a597286,
- 0x597e0649, 0x04fff9cf, 0xaf2d0640, 0x3944641f, 0xacf3f3b8, 0xd3814517,
- 0x97c800f6, 0x05fa04bc, 0x485b3385, 0xc2827d04, 0x7386fb11, 0x349317cb,
- 0xa2f96e70, 0x20f3814c, 0x1fea89c2, 0xdc5f9d9c, 0x17cf1da4, 0x129c0ae5,
- 0x7fb5979c, 0xcbe5baf3, 0xbe78ed6c, 0xd4e054ac, 0xfeb89c20, 0x4f20278d,
- 0xc809c0d4, 0xe59c0aa5, 0x7fb12708, 0x271971e3, 0x8cb8e06b, 0x7538144b,
- 0xff506708, 0x378c04e0, 0xac63c76b, 0x863ce050, 0xbfd61e78, 0x534cb979,
- 0x5531e3b4, 0xc29e7029, 0x0ff6a4f1, 0x16ab6ece, 0x21adbb3f, 0x3847acfc,
- 0xaf37fb23, 0x3f169b5c, 0x7e10b6b9, 0x6b9c2136, 0xb76707fb, 0xdd9f8b4d,
- 0x5e7e10b6, 0xe98cfc43, 0x6372bcdf, 0x8dc9f8b5, 0x0de7e10d, 0xfeb8cf1c,
- 0xa26f678d, 0x137b3f16, 0x2009f843, 0x37fb1b9c, 0x2d24fc9e, 0x4293f27e,
- 0xe10779f8, 0x9c1fee4c, 0xfc5a49bd, 0xe10a4dec, 0x67080fe7, 0x95e6ff4a,
- 0x9f8b503f, 0xfc2181fc, 0x7270807c, 0xa94ccddc, 0xbdac70a4, 0x4c3fd9c3,
- 0xc3fd9f8b, 0x8939f842, 0x3852a670, 0x29c37de9, 0xa7e2d148, 0x9f842520,
- 0xb6e708f3, 0x9fd9c1fe, 0xfecfc5a2, 0xae7e1094, 0xfa3b9c20, 0xe182af37,
- 0x0c14fc5a, 0x55b9f867, 0x8672871a, 0xd8274eca, 0x9a19dfe7, 0xc13212e3,
- 0xd179624e, 0xf7a024ee, 0x433e8a88, 0xad225dda, 0x371cd96f, 0xd6a231fe,
- 0x068d726b, 0xaa56cf4a, 0x9af5a9e5, 0x1ad149d8, 0x15ce2a3d, 0x4c27c9af,
- 0x9fa9ac1b, 0x29a69458, 0x3ae7381f, 0xf720f94d, 0x487e4d78, 0xfa9a4dd9,
- 0x35736ac3, 0x6fcbe1f9, 0xf54fd4d7, 0xd3e4d4ce, 0xa9afdcd7, 0x8171b23f,
- 0xa69afca6, 0xb5f94d72, 0xfc9aa5be, 0xd7dfcdf5, 0xb2d31fd4, 0x437e5342,
- 0xf29a63db, 0x35278171, 0x9e0709f9, 0xb1bfd4d5, 0xf94d05fd, 0x68af63c4,
- 0x6c7727ca, 0x3e6fe4d5, 0xfd4d6bf3, 0x9addc129, 0xbd9fadfc, 0x439fa9ab,
- 0x6fab53fe, 0xd4d03f9b, 0xa13f6a9f, 0xf24eff29, 0x553d3a27, 0xccaf1f6b,
- 0x32afcc21, 0x01afd988, 0xc6f311ab, 0xa76616c1, 0x271c4b58, 0x7718fd29,
- 0xa00c742f, 0x033403f4, 0xe0ce5176, 0xe83a6bb2, 0xe4ddeff7, 0xbf4ec6be,
- 0xbee8cf7f, 0xbf411ec1, 0x9026f4a1, 0x061d4864, 0x8fe5f548, 0x73de8cca,
- 0xd51d5c87, 0x18d7db49, 0x68e2a382, 0xe73123fe, 0xf9c7a03e, 0x83ee0306,
- 0x42d49168, 0x411368bd, 0xd4d1e9bb, 0xaabd17ac, 0x1866b0fd, 0x308433d5,
- 0x3bb235dc, 0xc328f519, 0x9bd03af8, 0x79d187ea, 0xd164260d, 0xbcbb718a,
- 0x61fb6823, 0x8fe0c925, 0x3f991930, 0x91bfd424, 0xfd819ff6, 0x6bfe812f,
- 0x94dfe97a, 0xbfd34936, 0xd34ca539, 0xfb48d9bf, 0x90f213b7, 0xbfde26e1,
- 0xcb36fd19, 0xfec64c56, 0xd865294d, 0x6a46a3ff, 0x8ffba977, 0x88fff50e,
- 0x31ebf681, 0xc7b9bb30, 0xd26ed3fc, 0x5ca53fce, 0x9b237f3b, 0x2e434aff,
- 0xe71a3fef, 0x0e456abf, 0x394a7f9b, 0xc189bf9b, 0x0b6ff50d, 0xfa01bfe1,
- 0xfd2f69ff, 0xb5b3d29b, 0xa95e1ff3, 0xf589bf9d, 0x76e194ff, 0xfb05bfde,
- 0x6dc57a7f, 0x2bc3fe6c, 0xc9a37fb1, 0x31fa04ed, 0xf5ae890e, 0x50c9fed1,
- 0x040d256a, 0x40fda172, 0xfd1e3a3a, 0xd21a7708, 0x8bdf1c70, 0x2576f466,
- 0xf21bd29a, 0xc3b32f33, 0x5273947d, 0xd39aaabb, 0x1ce6c57a, 0x2c3df023,
- 0x3164224f, 0x36a2ea1f, 0x3c9be911, 0xa48df26d, 0x26d0bde3, 0xe8bf217a,
- 0xe03e29e9, 0x322635af, 0x49da08bf, 0x3fdf0024, 0x1798d9fe, 0xa7d2f3d4,
- 0x53e51b8b, 0xde45fc91, 0x96ba325c, 0x1002ef8e, 0x9c7f2a81, 0x186071da,
- 0x1eed487f, 0x139fed42, 0x23efeb32, 0xbe41ef6a, 0x687bda83, 0xe63a9338,
- 0xdebaf357, 0x5fc7411f, 0x0b9f9444, 0x6e2f79e7, 0x624f0a29, 0x5af8f7fb,
- 0x31eb07c1, 0x450c797f, 0xc53c787e, 0xbe4d04de, 0x07eac4c6, 0x2f7c136f,
- 0x0dc30808, 0xe1c199f8, 0xcd2f1811, 0x36b3b1cf, 0xb1de7747, 0x7dd1b05f,
- 0x5d0866b4, 0x599c308b, 0xa40f25c4, 0x565eefed, 0xa76b832c, 0x8186105d,
- 0xa166b09f, 0x3768024c, 0x0649137b, 0xde3a0e27, 0xfe0cedfb, 0x527c970c,
- 0x239b47ed, 0x74455d9b, 0x458a733c, 0x69acff1c, 0xed3ff2da, 0xc619718c,
- 0x53cd74d2, 0x45ddcfd8, 0xe77bf2da, 0xb9f7f368, 0x572a79b5, 0x0fab9065,
- 0x53bfe994, 0xdf8124b4, 0x001bf431, 0xe3907ce3, 0xd95ac1e1, 0x6a5759ab,
- 0x72951b76, 0x09b88470, 0x8bdb0bce, 0x5fcfeb3d, 0x7b17cdae, 0x6b85f9b5,
- 0xdd39f074, 0x7febe29c, 0xa683e81c, 0x283e81a7, 0xfa724f3e, 0x2d55f308,
- 0x9a13dc7d, 0x650fa306, 0xb624a71e, 0x33d6a704, 0x18dc58fa, 0x8a1e9913,
- 0x52e0f443, 0x0f4297d3, 0xfc3d38f3, 0x6943d399, 0xeeae9693, 0xc9d5be23,
- 0x6b03fdb4, 0x1d3ae177, 0x3d198a11, 0x87a140fc, 0xb83d0d0e, 0xd7e83d39,
- 0x87a71e6f, 0x7a308f79, 0x0767afd0, 0xa68e87a7, 0xeb4932cb, 0x1d74aeb9,
- 0x0eba7934, 0xa3b775ba, 0x47ae8a1f, 0x10587a44, 0x389d379a, 0x65e42fcd,
- 0xd38bd63d, 0x01a79603, 0x09e209da, 0xacf7a7db, 0x2684f94f, 0x7914da5f,
- 0x36da6bfd, 0xfaa93dac, 0xf2f2d55e, 0xbb7fb55a, 0x268b79a2, 0xa6f7c4bf,
- 0xd3697ea6, 0x717e4d2e, 0xfa9a3be3, 0xd21cd70b, 0x7fbd8be4, 0xbf9fd4d4,
- 0xfe5353bc, 0x4d59ed60, 0x176503f9, 0xdcfbf935, 0xbfd4d37f, 0x13f08e77,
- 0xa2eefe75, 0xeba89fa8, 0xa7169acf, 0x0d70cfc9, 0xec37d4d2, 0xa02ef6bc,
- 0x8bef83c7, 0x4f8ff404, 0xffd1a79d, 0x7653a9f9, 0x1e939d42, 0x5383ee07,
- 0x9e98d19e, 0x9f7138f1, 0xc24cf39c, 0x1edb42f4, 0x2a15c80b, 0xc8e04b47,
- 0x96e574a6, 0x20d935ba, 0xdfca09d7, 0xd46f958a, 0xb2128779, 0x8a639414,
- 0x2326c2ef, 0x44204c09, 0xc7c4e100, 0xe5551415, 0x37947d1d, 0xd0914151,
- 0x23b0bcb3, 0x279af7f2, 0x23db878b, 0xf7847f9c, 0x7a021935, 0xe72f7752,
- 0x29029524, 0x64277f52, 0xad81f205, 0x34a94f5c, 0xb9517e32, 0xb12e5075,
- 0xaa303e41, 0x6bfaabf6, 0x206c9ba1, 0x66ba49d0, 0x5d36973f, 0x04f7e1af,
- 0x4089cfbc, 0xdf34136f, 0xd66f9a2b, 0x25daebb4, 0x81abb8e2, 0xfdb95097,
- 0x527a292a, 0xd8153e04, 0x0c6b3293, 0x14f5d38c, 0x5112dbe6, 0x34f2ec8f,
- 0xc7f385af, 0x4c169e73, 0x30da78cf, 0xac09e53d, 0x93c07385, 0x1e98039e,
- 0xda63b4f7, 0xe98639e1, 0x4c0e9e47, 0xc19cf43f, 0x209e4ff4, 0x9f3cc7a6,
- 0x43c47a61, 0x0e70027e, 0xfe98cc7b, 0xb4c763c1, 0xe98c93dd, 0xfb0f8f05,
- 0x5f5df651, 0xcb867774, 0x7f4073ed, 0xbb6ce811, 0xcd6eac78, 0xbd9d30d0,
- 0xa423f2b9, 0x85cf0533, 0x0f4e264d, 0x087a1a49, 0x2377ed80, 0xe51f4bd0,
- 0xc3a7324d, 0x2e47dade, 0xbfe179af, 0x7caede87, 0xd30b7a4b, 0x3d1d10d3,
- 0x4f45f7a5, 0xe9e9aa1f, 0xe4cec18a, 0x1fa7a720, 0x7d6b73f3, 0x3b1251bf,
- 0x77e90ca7, 0x800cdbae, 0xdcfca5aa, 0x98699084, 0x5efbc4bf, 0xa3b5c149,
- 0x0d1be81e, 0xe8d2eb72, 0x73828fbf, 0xe8c74b87, 0x3ef6a7e2, 0xf49d3d3f,
- 0xda17778e, 0xba3e2543, 0xc61109e8, 0x1af0cd1b, 0xc8d32065, 0x461a4278,
- 0x905ce4be, 0xc4897981, 0x739f7b43, 0x3af38446, 0xc06b7542, 0x5d785f6f,
- 0x1632bd69, 0xf67a9de0, 0x5e90591f, 0xf475fae1, 0x7cf5111e, 0x09d17812,
- 0x25c90df8, 0xfc193393, 0x4691c1f3, 0x6409ea3b, 0xde7f16ba, 0x6bf835db,
- 0xcfce5548, 0x7f11931b, 0x1a8cae23, 0xf5fc0fd4, 0xc2fbdfc5, 0xf79802e7,
- 0xfb3e037f, 0xf17d8fe2, 0xbc72e245, 0xdfe746d6, 0xf8cc73bd, 0x07c01cc7,
- 0xcadf1abf, 0x3c7e9c79, 0xb5f492f9, 0x493bb8e8, 0x14f6e3a1, 0x24d85b88,
- 0xa5fe11da, 0x61626bd6, 0xfa70425d, 0xede14f58, 0xbd80a73f, 0x0b553e7f,
- 0xef074b2f, 0x6d1781d2, 0x4d2d7760, 0x3270d85e, 0x69283ac2, 0x835d4b55,
- 0xb687ad45, 0x1a97f369, 0x382d6985, 0xecec472e, 0x6a272031, 0x5edf181b,
- 0x6702a98e, 0xf6097269, 0x3d39ae73, 0x3e7bd996, 0xc7360fc6, 0xe738bf21,
- 0xfffac329, 0x06f4f1ec, 0x210f1947, 0x1984e4ad, 0x4b2d31f8, 0xaf7ebdef,
- 0xc5fd2cf4, 0x7edde788, 0x89e80af9, 0x97dffa66, 0x09107760, 0x9adc0af3,
- 0xba6bd116, 0xd66fbe01, 0x39db47f4, 0x2fe23394, 0xdf6abed3, 0x4294e448,
- 0xd45e54b7, 0xe780bd83, 0xd41a4b9b, 0xcb55597a, 0x9af93577, 0xd41a47b6,
- 0xfc055c7a, 0x525ff283, 0x3740482b, 0xba8c9c95, 0x811acb25, 0x8218af8e,
- 0x088f804f, 0xb9955eac, 0x9fa6e8c3, 0x75b2b7ce, 0xe77c6deb, 0x8854e63c,
- 0x3733283b, 0x597239e2, 0x20f9b5cb, 0xaeb7ce67, 0x5bf9024c, 0x5efa5e21,
- 0xbff68451, 0x5f2e0494, 0xcf56a0e4, 0x2841f4e7, 0xe9bcd55c, 0x17c212cf,
- 0x981977cd, 0xb70f465f, 0xbe91cb8d, 0xf9bd53f0, 0x8ecdc150, 0x6eaa789e,
- 0x237468bb, 0xcbffa45f, 0xc149e30a, 0xd454b15f, 0x9771d126, 0xffa27737,
- 0x1dec8ce2, 0xe0af83f5, 0x67e73d97, 0xa3153e49, 0xb497979a, 0x260b3f43,
- 0xe3888ef1, 0x1f280fde, 0x995bfb3f, 0x57221f00, 0x742cad69, 0xc7bb70cd,
- 0x61590c40, 0x6b4167bb, 0xfbe02145, 0xd1d3ebec, 0x8a445bee, 0xa40eb86c,
- 0x9cec0b31, 0x7180c4f3, 0x1fc9e755, 0xeb54136c, 0xe464b1f0, 0x305ca678,
- 0x1034bb89, 0xd8107bbe, 0xf539a65f, 0xf858e2ed, 0x9076292e, 0x15ca25f0,
- 0x17f180b7, 0x2f77d873, 0xfd01d731, 0x634a7b52, 0x4a7b51af, 0x94f6a793,
- 0x66425f26, 0x453c4126, 0xf132d3c9, 0x4d278809, 0x8ec941f9, 0xb5252be3,
- 0xfd1fa0f5, 0x16e1fb9c, 0x843489d8, 0x37aa87ca, 0xa62acb9e, 0x9892e9f3,
- 0x0f894bff, 0xc5654258, 0x5e5e5a20, 0x2cba33e6, 0xa277e73d, 0x943dc275,
- 0x163c03f3, 0xd1bf7007, 0x886be2c7, 0xaae2ca43, 0xf3ef5a4f, 0xba2ad81f,
- 0x8f506ca2, 0x9bc6577e, 0xafe7f5fa, 0x4f212f9f, 0x9bca3ce2, 0x561c6067,
- 0x3e07788f, 0x7059b91f, 0x27ec22f9, 0xf1341651, 0xc332092e, 0x1ad7e29f,
- 0x0f5fcbae, 0x6bdf9579, 0x1bfcaea2, 0x440197f3, 0x258a5fc3, 0x913970f7,
- 0xbfc013f1, 0x7f710520, 0xc9f8637d, 0x2f5fe03d, 0x844ff8df, 0xefbb543f,
- 0xff89fc29, 0xe88ff1d4, 0x48bcfe31, 0x78deabf0, 0x778875f5, 0xcfd5ff2c,
- 0xb7ee846c, 0x926352b3, 0xe57bae93, 0x1736d5a6, 0x6bc91df0, 0xd6717d83,
- 0x448b4e27, 0x7997df0f, 0x59711fdc, 0xc2bbf004, 0x73fc293f, 0x4ef8fcaf,
- 0x6caea3b0, 0x4fe8ed92, 0x4c05366d, 0x5bab9e94, 0xb1921497, 0xf964ef98,
- 0xfd022f47, 0xc7c9b4d7, 0xfe5a74d5, 0x41259fcb, 0x94a5107c, 0x48648621,
- 0x549f6803, 0x00b8fc6f, 0x121a8df4, 0x6faafdc4, 0x167ffd07, 0xd8b0f28f,
- 0x1cb3db18, 0xd049341d, 0x5a6afd9e, 0xb4517c7f, 0x69db7a41, 0xb2d46fe0,
- 0xbe90bf44, 0x3bb7195d, 0x00eb642a, 0xc913abe4, 0x0aabe044, 0xd98f885e,
- 0x7e5f2b1d, 0x1d276c66, 0xdfc6d757, 0xbe1504da, 0x52e97b14, 0xfc5fc2cb,
- 0x1f3bd1da, 0xa3f740bf, 0xe455bf5a, 0xdb8da394, 0x56967e4b, 0xc41b01ca,
- 0x906e5573, 0xb9fa5e9d, 0x5f17d5f2, 0xd9fc167a, 0x4739347f, 0xaf15f852,
- 0x13723bee, 0x916c57cd, 0x69b21407, 0xf0a48458, 0xffd0d4fa, 0xaf0f8f96,
- 0x7db6f947, 0xf618c746, 0xe1f187fd, 0xb2d8a975, 0xf4c648cb, 0xcb5e47c4,
- 0x2587632e, 0x74b4fac2, 0xb7fe8016, 0x1abfe5f2, 0xdd9def81, 0xa9819029,
- 0x61b9f011, 0x8087ea1f, 0xbe4f5c27, 0xd3f5a15e, 0xeba1afd5, 0xbf467202,
- 0x857afa5e, 0x5c00bf9c, 0xf4a1b388, 0x21afdafd, 0x264fee0e, 0xeca1e027,
- 0x306c87b5, 0x2d52d39f, 0x56e7f3a3, 0x2bbf0127, 0x7649bd93, 0xb2f7d94c,
- 0xfa461fe7, 0x70f43d2b, 0xba57f312, 0x76501906, 0x3cc1077a, 0x1bbfaf17,
- 0xdc4259e7, 0xcf658748, 0x9a3e4589, 0x69912a7e, 0x93ec225b, 0x9c9f7c4b,
- 0x2e2e817e, 0x4569e79d, 0xe441fc2e, 0xe8bbe172, 0xcf987d0f, 0x98906a85,
- 0xd6ccd4ff, 0xd06c80eb, 0xfd10d1c8, 0xe5147c6e, 0xae61b9c7, 0x56ee7081,
- 0x28179613, 0x5343c447, 0x6275b207, 0x51db0772, 0xdf0793b9, 0xecbb7ea1,
- 0x69ab1a8a, 0xe9467287, 0x9fef197f, 0xf41a3b8e, 0x9149d3b7, 0x46e191ed,
- 0x3d500f85, 0xaa674543, 0xfff6cefd, 0xdfb606c6, 0x9beffd95, 0xffca0d31,
- 0x23ed9872, 0x97720768, 0x10302b8e, 0x16cd77cb, 0x48983f90, 0xdf3ed220,
- 0xae7df328, 0x0bd3d72d, 0xc424bf1d, 0xfa06e8aa, 0x4075c789, 0x34f25f79,
- 0xa3e2af6d, 0xadafd035, 0x1f655c27, 0x707e7297, 0xf5c1f81e, 0xd6407e61,
- 0x5e2bf627, 0xbdf652b4, 0x5fec2cf4, 0x0ebd5fba, 0x1172bfd8, 0xcd93ffcc,
- 0x4fdc8e7b, 0x2d27edf7, 0xf2d5beca, 0x2dbefd55, 0xc630fadb, 0xedf7eb9f,
- 0x496b4327, 0xbf4b7dc4, 0x43b7dc47, 0xf847fe3b, 0x24c782aa, 0x2aaf96a3,
- 0x7c37b27c, 0x16e4f9ea, 0xd57881d9, 0x3bfa49f3, 0xd27ab24c, 0x0a8742a3,
- 0x47ff95fd, 0x47e070d5, 0xe8553a21, 0x0aa74430, 0xade7ea1d, 0xfcbea3c5,
- 0x522df023, 0x537a297a, 0x4a95f3c6, 0xa6038b3f, 0xc90ff6ed, 0x50b97c44,
- 0xfdc691cc, 0xf3a80643, 0x45be5d3e, 0xcba5df57, 0xbbeae917, 0x4d5af975,
- 0x3db6cafd, 0x10748246, 0x9713d4d0, 0x31393bff, 0xfadd1221, 0xf5a1bf98,
- 0x39ee11a2, 0xd88258d4, 0xbc415e5e, 0xdc191f10, 0x51b9e221, 0x3d71c537,
- 0xa73637f8, 0x8f94bcfa, 0x7e628eac, 0xd07b6e85, 0xd5d34f16, 0x4c1f2c71,
- 0x1f03d634, 0xa307be54, 0xbbb1df98, 0x538b6828, 0x7e9d5bd9, 0x8778f911,
- 0x0dbdf22f, 0xdd70692a, 0xd7b97a3b, 0x758f9ca1, 0x62c6db47, 0x3e29d17f,
- 0xa2a9c7a0, 0xf8396525, 0x27451aa1, 0xea8bece8, 0x79c1b5ee, 0xfd3fb755,
- 0xfbf439ae, 0x164477e2, 0x4975e38e, 0xd0765483, 0x975de219, 0xf05c402d,
- 0x859fa45a, 0x2d17667e, 0x3f791b5a, 0x2576f394, 0xe0466596, 0x390ed4bb,
- 0x51ef3eae, 0xd7ad94e0, 0xe74ff77d, 0xed11a6fb, 0xda8df2ff, 0x9fb73772,
- 0xfce4647f, 0xc7191c67, 0xbfe657ed, 0xaa7b7ce1, 0x1cd77198, 0x07dd3eba,
- 0xf9a26244, 0xd7117e4b, 0xc166d2e7, 0xe237fe07, 0x57fdc44d, 0xf546824d,
- 0xd82e887d, 0xcef4d75f, 0xffb6bac7, 0x4007eb39, 0x307faf47, 0xda69dff6,
- 0x471ffaf5, 0x37f905bd, 0x407ca68e, 0xdbcfd67a, 0x02f18519, 0x48ad3bdd,
- 0xde3bddff, 0xe401f94b, 0x613934df, 0xff3bdd9c, 0xa9e8163a, 0x0ca9857b,
- 0x30f760f8, 0xb95dbfe6, 0x3b63afe3, 0xc2aefe50, 0x1a366c75, 0xffc99f99,
- 0xdee08f8f, 0x9d3f25c2, 0x3c17e815, 0xd7e6beae, 0xf0823e51, 0xbea59aff,
- 0xe76df40f, 0x43e3cd53, 0xce64adba, 0x1c5f1856, 0x79a87f8b, 0x758bf26b,
- 0xd6aaffb4, 0xb485c65d, 0xcfedef3c, 0xfd1c62c7, 0x5d37a656, 0x879e3b90,
- 0xc66d87f8, 0x2994acef, 0x26c02f9e, 0x5df646a3, 0xb07935a6, 0xe5e27a8f,
- 0xf5a7f473, 0x1aef1f17, 0xaffd7d99, 0xc5c10f8f, 0x67d59ff8, 0x7fde6016,
- 0xf403e8fb, 0x2e909a83, 0xd27ef995, 0x4cfa8752, 0xfb3ef35f, 0xfa0e86bf,
- 0xd0216f3b, 0x4d9b799f, 0xfdebdaff, 0xeb81d1b8, 0x3d07dfa2, 0xf7c77ce9,
- 0xd5aaff30, 0x27ede389, 0xfd368fbd, 0x5beeb63f, 0xfadbbae9, 0xfe77f79e,
- 0xfbdde7c5, 0x8f33bfba, 0xce1dbbaf, 0xfede6b53, 0x75f7c71c, 0xffe95cf9,
- 0xf457ba52, 0xad4376fd, 0x6f8e933d, 0x82b4690e, 0xe25f7162, 0x03a64b25,
- 0x0f2f70a2, 0x383bf8c0, 0x57608d5f, 0xbf989935, 0x581824cd, 0x82dddabc,
- 0x3b4447e2, 0x5fae4eeb, 0x0cb439dc, 0x5d3b90f5, 0x827d413f, 0x39edb548,
- 0xee7b7eb4, 0x6ac62742, 0xdcc7f192, 0x853d9f90, 0xe6f6b5e7, 0x12e204ef,
- 0x677f7ea8, 0xa003f4ee, 0x12e5dee7, 0x29a60f51, 0xa7efbfea, 0x93efb014,
- 0x7dd0f6b4, 0xdefe6d6e, 0xddfcda39, 0x843dde1c, 0x229eff8d, 0x587e07c1,
- 0x8953ce53, 0x2d3597e0, 0x0a908996, 0xabb54671, 0xbbb8eec1, 0x197edb48,
- 0x7f2a6728, 0xe3c537e3, 0x5c78436f, 0x3921788a, 0xed03de22, 0xdc7c8f13,
- 0xf9c7b3ed, 0x489ace20, 0x7bc7864e, 0x37e89caa, 0xccb8f6e7, 0x3596cbdb,
- 0x659fb0ed, 0xd8ae1d9d, 0x9be1bdd3, 0xe11cfb70, 0x2ff185a4, 0xcfb444bb,
- 0xae7d9d65, 0x5dd7cbac, 0xbf1d650f, 0x073cbaed, 0x786372eb, 0x5c5a865d,
- 0xccabf604, 0x8b7c87ed, 0x1687e593, 0xe8ae7611, 0x00d2fac3, 0x8466f117,
- 0x9897d1e3, 0x9ce02b8b, 0x405a3ccc, 0x6b5dc87f, 0xe60f9445, 0x55c7010c,
- 0xf8c64934, 0x450d8fe3, 0x85deb059, 0x7f562613, 0x09c30d9f, 0xf1c25cf8,
- 0x516a425e, 0xbf57257f, 0x0b3b32fc, 0xea0825ce, 0x845c4bbf, 0xb8ed41dc,
- 0x10216c92, 0x126af10d, 0x078ec6f1, 0x8e40b93f, 0xdae4fc6b, 0x496efc3c,
- 0xabe9c30a, 0xe5fc6f7e, 0x17fe9d9f, 0x29c767e5, 0x3a427491, 0xd497bb12,
- 0x3f7cf776, 0xa54d63c3, 0xba434be0, 0xfbeac80b, 0xb66605d3, 0x87ebfe0f,
- 0xe64f9013, 0x2c4c74fc, 0xe26407f3, 0x6e1ac47d, 0x3af25c37, 0x760fc162,
- 0xcfce9fc9, 0x6a6cf373, 0x87a4aabe, 0x9ffe1db8, 0xd3489710, 0x51a17cee,
- 0x048b3b04, 0xcdebffd1, 0xc57bc28f, 0xcca376fa, 0x6bafbe06, 0x3f002fd1,
- 0xf2afee11, 0xa6d29479, 0x75aaf1b1, 0x59c77b5b, 0x75cf6c69, 0xd571df80,
- 0xc5b7ddf9, 0x80fdd82f, 0x1d5143d7, 0xe6114505, 0xafe3893b, 0xf0dff770,
- 0x7f4ceafe, 0x99780caa, 0xce6a2f99, 0xee669df9, 0x264098cf, 0x770ab8c0,
- 0x0c9b66df, 0x744072eb, 0x1ad57dc2, 0x3ba345ff, 0x1eb1f9c2, 0xfc1f009f,
- 0x676427f0, 0xc4f3e009, 0xd808a24f, 0x73f1efd5, 0xcfb80cc9, 0x1eae8191,
- 0x731df817, 0xb84fcccc, 0x74c76cce, 0x9c8e6156, 0x68cdfb48, 0x50838fb5,
- 0xfebcc1b5, 0xcb03b33a, 0x4a76f087, 0xac95c1bd, 0x5ed1a75e, 0xbffe691d,
- 0x79f8128b, 0x937bf399, 0x75bd7f84, 0x35a9f9cb, 0x41fa0b90, 0xd167e2be,
- 0xfdc135d3, 0xe09a6971, 0xf60df903, 0x050be630, 0x53de1fd4, 0x64cfafee,
- 0x7ba15672, 0xcdab9e3d, 0x1e02f7da, 0x887f78e7, 0x678287d3, 0xf1fabf05,
- 0xbf29f52a, 0x9fed55ba, 0xdc6dabba, 0x54dfa06b, 0x81bb05fb, 0x3a96aa82,
- 0x6b0fb08c, 0xe1909eda, 0x073ee67c, 0xa2a44b83, 0x83f8eab1, 0x0f166675,
- 0xcdfc67b2, 0x2e4e80f3, 0x2c8135af, 0x1745ae24, 0xdf87c06d, 0x0f7f3833,
- 0xdfde6c71, 0x57d36489, 0x3c0fabf0, 0x597cf2f6, 0xe715370d, 0x4732ab33,
- 0x03da7706, 0x81e4bb95, 0x675a7a2a, 0xe572c78e, 0x074ae0fb, 0xfb420fbe,
- 0x1f7871c3, 0x4fb81137, 0x8ed2d192, 0xa81f6cf7, 0x5874638d, 0x88cbb2d6,
- 0x7bcd54f9, 0xfa3f4a22, 0x50bb34fd, 0x30bcaf1b, 0xe02863af, 0x5814d0a5,
- 0xa7408ce6, 0x7394a393, 0x2729afca, 0xc76e945c, 0x27247ffe, 0xa2722996,
- 0x95e3e303, 0xe7df02f6, 0x3031392a, 0x5a589c92, 0xeb0818e7, 0xfa4774b0,
- 0x4b24d840, 0x149a99df, 0xefbc4e49, 0x47cce761, 0xeef67df9, 0xdc4e54d9,
- 0xb31392a0, 0xf44e90a3, 0xf6513eed, 0x42725f49, 0xcbffb759, 0x907de6fc,
- 0xf7f61113, 0x71393a17, 0xe518bf8f, 0x45b33dc4, 0x4facbdcd, 0x391394fb,
- 0x5e61f749, 0xa044c676, 0x7db9f79f, 0x0bdf9473, 0x5e407e80, 0x11d94664,
- 0xd1b97a6f, 0x53ff5f1b, 0xfffaf97f, 0xbe159e10, 0x3fda94be, 0x7448d4a6,
- 0x979e9048, 0xbaf947de, 0xf8c8f400, 0xb907d2fa, 0x67c7539f, 0x66cf808b,
- 0x6b2cf9aa, 0x7266ed5f, 0xf52da6a1, 0xfcb4b999, 0x09d946fe, 0x30fef2a2,
- 0xf352cddf, 0x5ea8ccdd, 0xe3196ef8, 0x13fd97bb, 0x6dea7f50, 0xfce67f7e,
- 0x11b3f296, 0x31f408ce, 0xe68c6e67, 0x7189dede, 0x06d8c9bf, 0x113de5fb,
- 0xe63f79cf, 0x71c1c07a, 0x7d04c9bf, 0x9e3e49fa, 0x2f3d7c79, 0x3e794878,
- 0x22de5fc5, 0x6d574fa6, 0xd57f1116, 0x57ff6cad, 0x073e0b37, 0xff8c17cd,
- 0x56d79e1b, 0xb5e760ec, 0x39850674, 0x78aff5aa, 0xce1ea3af, 0x08362a09,
- 0xf70cde78, 0xa59f001e, 0xce259f1c, 0x29573c0a, 0xc1fad26d, 0x2f6da21b,
- 0x1e101b90, 0x78abc3fa, 0x105342fd, 0x7f0a1dcf, 0x20373829, 0xabb6d49f,
- 0x59fb711a, 0x9a2a4fb0, 0x0d9ddf75, 0x3704ae78, 0xadaf41d4, 0x7b96d16a,
- 0x8b17bcf8, 0x146d8c81, 0xb33df93e, 0xf778f4f3, 0xf653f1e8, 0x8873f12d,
- 0x9ff327e2, 0xfe259f87, 0xf05e785d, 0xc7a2f175, 0x3f145d47, 0x324f181f,
- 0x6c7e3f61, 0xf17f2170, 0xbbe22bbe, 0x189e8b4e, 0xe50f79a9, 0xef2f1ca7,
- 0x83f5dc19, 0x80b55ea0, 0x6fd74af5, 0xf34497d9, 0x38c85b73, 0x6d286638,
- 0x90e3cad2, 0xa0756ffc, 0xbb5b9f7c, 0xdab88045, 0x60befcc5, 0x7c09ef99,
- 0xe5c1e63f, 0xfad3de54, 0xdc9bc70b, 0x287e5e70, 0x53fc882a, 0x409ddfa1,
- 0xfa6f94bf, 0x0c4e38f9, 0x9ca3e60e, 0xb8ec136a, 0x4c51032f, 0x8275d16e,
- 0x4cf66d77, 0x5f016bfe, 0x6a5fe617, 0x4358eb63, 0x75f60dd7, 0x5be79f81,
- 0x80f83e98, 0xe73b22ff, 0xd992eff4, 0x490238fb, 0x44e77e40, 0x8f915cfb,
- 0x939aee70, 0x1ddf5aa4, 0x37fe8c9d, 0xe0fc4e09, 0x36fbffa3, 0xbacbdbeb,
- 0x7f4cfa2a, 0xd3c157d7, 0x1f3add4e, 0x9c2f6a8a, 0x8272e780, 0xf20bdf13,
- 0xd179e1c7, 0xfef646c9, 0x1fae41cd, 0xd805f7e9, 0x78f7ea1f, 0x21c0c066,
- 0x145a7a2e, 0xbf6295c6, 0x533f4c69, 0x3bfd29f3, 0x6e375f48, 0xa3b486a9,
- 0x7f8e165e, 0x066edf74, 0xd04d3fb9, 0x19d149c4, 0x27ffbee1, 0xbea397c0,
- 0x36493e02, 0xc8797ca3, 0x19eebece, 0x1ef02466, 0x3235e5da, 0x06bdd1d2,
- 0xe06c91c7, 0x04c98f2f, 0x83e5c7f6, 0x3b9d1e37, 0xa445ca00, 0x26f7e28f,
- 0xa56b8354, 0x91aac598, 0x5c9847e2, 0xfffbc13a, 0xaed79513, 0xa097786b,
- 0x7ce27b07, 0xb674bf70, 0x0754d773, 0x0f325bf8, 0xcf7386ad, 0x23f67959,
- 0x1b1d6047, 0xc74fbbd8, 0x9da1e4d7, 0x0577d0f6, 0xe8c767dc, 0xbb9c017a,
- 0x391ea767, 0xe0f7c742, 0xb73c449a, 0xbc4aeb9c, 0xe731ce07, 0x0e9d141e,
- 0x5de16f5a, 0x7f9c0b56, 0x122d1f4e, 0x0f2f0ab7, 0xeff47692, 0xeefdadcc,
- 0xca4fb0dd, 0xf7843ca8, 0x87fcca9c, 0xfc840ea6, 0xd7db017a, 0xb846dd78,
- 0x74ded9e7, 0xfb4998d1, 0x00f5a12f, 0x59eb35dd, 0x028d9abd, 0xe25817be,
- 0x64f9027f, 0x1f0146c9, 0xfaa769c4, 0xf5c199af, 0xbed12981, 0xf9a7b69e,
- 0x700f6852, 0x03ddc637, 0x743593e7, 0x527ed89c, 0x97539e1d, 0x98b476d0,
- 0x2efc6fb4, 0x5eb25bc2, 0xb0af7020, 0xe3ea526c, 0x41de0093, 0x47e509d9,
- 0x08dcfd77, 0xfb479ee3, 0x2fe1441b, 0x42a63b14, 0xc103f27d, 0x152eef8e,
- 0xbafb8b5e, 0x623bef9b, 0x14e2cd1f, 0xefdca204, 0x2af1d493, 0xb869d125,
- 0xd7964a9f, 0x3abcc3d5, 0x06a989bc, 0x468ba7a4, 0x2927e390, 0x7f54ddbe,
- 0x7164613d, 0x27bc7b30, 0xfe2a1d8c, 0x1d3a32b9, 0xcb6a2fdb, 0x98b33e41,
- 0x853c6114, 0x23e98ebb, 0x7ca3ab9d, 0x94eb57e0, 0x7df029ed, 0x7e136acf,
- 0x932af0bf, 0x7586173b, 0xf4fb7fa0, 0xbdc0997a, 0xd3eb95c7, 0x9e3ac1d7,
- 0xe13b2cde, 0x8e7c40dc, 0xbe9411c5, 0x7b64fcca, 0xa7b833d6, 0xcb7f199b,
- 0x02eeee7b, 0x94a5dbe0, 0x7e70994f, 0xe2563e5a, 0xf53e4728, 0x4be3145c,
- 0xe7a57e7a, 0x1b8d6553, 0x65a1cb22, 0xd0e59105, 0x37ee7ad0, 0x6b9f2c8d,
- 0xbbe98588, 0xf078c9e3, 0x0d571663, 0xf404d79b, 0x70bb20fc, 0xf3781ed0,
- 0xdc819fd3, 0x3be2b4ad, 0x7d92d565, 0x83e8c09d, 0xeb9fff00, 0xec29f2df,
- 0x51b9b287, 0x29d90ef8, 0x47d80f58, 0x1cf0136b, 0x8c57c4bb, 0x79e25778,
- 0xf4b49b4b, 0xef21f8ae, 0xa5c48b73, 0x31655f96, 0x77aaa5c4, 0x6e9447f4,
- 0xdbf33f69, 0xcab01f04, 0xdcea3fa3, 0xe62670ff, 0xe5ae38ed, 0x6b583f5c,
- 0xfa3eba45, 0x40d5c5c8, 0x287ef3b7, 0x89dbd010, 0xf20f5dd7, 0x7049bab0,
- 0xf4b89f80, 0x297dac87, 0x3ee30f6d, 0x5f4673fa, 0xb01dce94, 0x3fd7aa0f,
- 0x6feb1fb1, 0xff001c14, 0xd46286e8, 0x22bcca27, 0x9d8df2c1, 0x79504768,
- 0x7b68f185, 0xec1262bc, 0x2655ea83, 0x1562b83b, 0xb3e8527a, 0x40a7bf93,
- 0xb3b1cecc, 0xb4673b68, 0x9eb2f903, 0x4239c52e, 0xfd029e48, 0xbd1978d4,
- 0x3b2abe75, 0x4df48b61, 0xbd1eec9f, 0xd0227f42, 0xfe98665f, 0x33cfacf6,
- 0xf69e3d02, 0x91671809, 0xf40bf407, 0x9fe2b2f9, 0x1eb51621, 0xea7d17d5,
- 0xfd07b0e9, 0xe958cbc1, 0xe854fa61, 0xc7874f51, 0x8c0f7225, 0x51d5a1fb,
- 0xfabc8f38, 0xce59ef37, 0xcf9e19f6, 0x923fd5e5, 0x44bafb04, 0x0a407162,
- 0x1624ab5e, 0x5c63d637, 0x0719d74f, 0xba7c67f5, 0x0e8acc71, 0x20f1edc9,
- 0x4a3ea9b7, 0xdf9fb402, 0xe3117e6c, 0xd2bfcf2b, 0x37ea2779, 0x003a516f,
- 0x92b66ffc, 0x3e21da00, 0xade547e4, 0x1369c622, 0x4b359029, 0x712bbf65,
- 0xb2bf1531, 0xb7cbbe22, 0xb4857c9a, 0x27ebf97d, 0xc25c62c6, 0x7b7fa60a,
- 0x39d8115f, 0x2c63b788, 0x972a3bdc, 0x5e8e7c58, 0x345df82f, 0xdebbc3b3,
- 0x7a03998c, 0xdeccd09b, 0xb96f2c17, 0xfca93657, 0x65a2fbbb, 0x939e1ea5,
- 0xc33ae979, 0x3e300afb, 0x4a76fbf5, 0xcbd74e5c, 0xdbcafe8c, 0xcae7d464,
- 0x805cb79b, 0x63f2cd78, 0x6b77b426, 0xc11a2f9a, 0xd5fbcd9e, 0xc41decab,
- 0xa7135136, 0xae3e23d7, 0x20980c59, 0x0ab5abae, 0xbe6871cf, 0x3c2df313,
- 0xc66bed1e, 0x6afc556f, 0x8a5eebc7, 0xbf6d8ac2, 0x857be1b4, 0x786c9bed,
- 0xabf9aa5f, 0xc47b55e3, 0xf034b5c3, 0x4e3bb878, 0x918fe413, 0xc1cee1e3,
- 0x7d21283f, 0x878dad16, 0xbf8ccd23, 0x456671d1, 0xa4350f7a, 0xe9fabd77,
- 0xddcf10f3, 0xe793f5b5, 0x5f95dc61, 0xe21e7e5c, 0x6882a86c, 0xe6a53d07,
- 0xfe31f795, 0x1f176955, 0x97a0edc3, 0xc3f1897f, 0x7268fe17, 0x3b0bb402,
- 0xfe80a74e, 0xdfcb6171, 0x782e2092, 0xe8a98ed9, 0x3db659df, 0xedbddf0f,
- 0xfa059ecd, 0x7e23f5f5, 0x7b6ebf59, 0x3c60f75a, 0xeeba79d0, 0x714227c0,
- 0x2c552d8e, 0xe3c1d47e, 0x8e47371b, 0xcf11ef17, 0xaeeb3ad8, 0x5b1fc029,
- 0x67d09e99, 0xba61e35c, 0x63f4379e, 0xf8f3371d, 0xbe3b96d5, 0x3cf1ae32,
- 0xd7ae83fc, 0x7c6d18b8, 0x8bada7e8, 0x942cf17f, 0x1af426b5, 0x07e818d7,
- 0xf47894fe, 0xd95da3cf, 0xdeb08931, 0xa4f181ff, 0x0e857eb0, 0xa69fd7e0,
- 0x8cf8bf80, 0x44bf441c, 0xef098770, 0x38ef966b, 0xc2e9fb8a, 0xef1843f6,
- 0xf85c784d, 0xb1fa1205, 0x03ee5df8, 0x6568d9aa, 0x1d7c90f1, 0x61d25e20,
- 0x2e3cefc3, 0x12b5d192, 0x9c5bd92f, 0x3498b2e4, 0x8b0b3efe, 0xf8daace7,
- 0x0c3f8f01, 0x09309baf, 0x2bf3abd6, 0x3c1550e3, 0x130f01d8, 0x91c071d5,
- 0x714352e3, 0x8375bac0, 0x1e3e04f5, 0xf9a5f38a, 0xe9ef10a1, 0x8a468e13,
- 0x81b0bf2b, 0x905f0fc5, 0x568fdf23, 0x626b82e2, 0xd7d6fb74, 0xbee078bf,
- 0xf175f1e6, 0xa1c63f34, 0x5f051ba1, 0xf323575d, 0x846327f7, 0x85b7646f,
- 0x0a549bee, 0x4f2cfbfa, 0x997dc0c4, 0xbe234ed6, 0x55d8a293, 0xdc5dbe7c,
- 0x8af5d6df, 0xf5a27188, 0xbf907b2d, 0x3547f9eb, 0x8fd1473f, 0x6f1f2377,
- 0x1f931b75, 0x3ead5b5d, 0xb5d3850b, 0xeba44dfb, 0x189dcdea, 0x79bfbaa7,
- 0x7543b19b, 0xc3f20cfe, 0xd77f1aa7, 0x22c6bb6b, 0xa2efe73f, 0xcad9fbe9,
- 0x3f8d63f9, 0x9b353f72, 0xa9fa6a2f, 0xed18b6f0, 0x3bfbbe96, 0x05eeb927,
- 0x99dae778, 0x45ae751e, 0xd08be69c, 0x6462cfff, 0x3237576e, 0xa395383f,
- 0x959c3ef8, 0xda2707bd, 0x5b2f51a2, 0x0c749d66, 0x665d21de, 0xcc5c6235,
- 0x471e3aca, 0x326723f3, 0xeb4af7ac, 0xbbc604ed, 0xb2abf01a, 0x9bbb65ef,
- 0x27273e80, 0xcfaa6517, 0x1bf19461, 0x4f881807, 0xd3ad8fbd, 0x4a7d8206,
- 0x1850ef24, 0x7f699dc7, 0xb62e4095, 0x9f7f987b, 0x02313d5c, 0xf64bb7be,
- 0x6e14097d, 0x04e0be7b, 0xba4f6cd7, 0x7e43f067, 0x1df69706, 0x4753bc5d,
- 0x69c977e7, 0x6ad13d40, 0x1569d808, 0x4f6a8d62, 0x41d70346, 0x5e12521c,
- 0x8978c0a5, 0x11e63cc7, 0x46d43f70, 0xf50adaa8, 0x5de6929c, 0x9249d895,
- 0x72159dee, 0x57a50478, 0xc557a70b, 0xff65675f, 0xf03b70a6, 0x0cb896fd,
- 0xbbe40752, 0x7d46ead6, 0x0b9b29e2, 0xdfd23fef, 0x88938868, 0xf54ed127,
- 0x5fb450bb, 0xda3b7dcd, 0xe71f26af, 0xa613e6d2, 0xc9232749, 0x8e3e049b,
- 0x7882d102, 0xe06a9b88, 0x70abb2fa, 0xdc785264, 0x19c48971, 0x2c3bf280,
- 0x6e808f78, 0x1e7aaf2f, 0xdc768b16, 0x3e1a7c74, 0xe1d19b11, 0x6a71b035,
- 0x0389dbbe, 0xe3a747df, 0xaf1bae3d, 0x70b56f26, 0x0911277e, 0xdcc59df8,
- 0x2ffe384e, 0x4abdf069, 0x224f8a2a, 0x38c2644c, 0xde2130e9, 0x83cf9625,
- 0x6fa9aff6, 0xeb01ef07, 0x0c870b5f, 0xb5efabb5, 0xe0e5c0de, 0x8a514b11,
- 0xac93ee85, 0xb818b5e1, 0xb82c9d9d, 0x65a04e4e, 0x9fc801fe, 0x61e796d2,
- 0xc8d08a5c, 0x5ac46ae8, 0x4466fb82, 0xad55c790, 0xd53d468a, 0xa8de2664,
- 0x8e3f70f1, 0xd0f1e578, 0x3f9f8f8b, 0x28185639, 0xa50d6928, 0x1fceda35,
- 0xa62c35cc, 0x4502d08b, 0x8b10fe14, 0x5ff40c6b, 0x6c9dc33f, 0x1bd30718,
- 0x2877bf56, 0xc16bdaee, 0x1ba86863, 0x14c0ffba, 0x2ef82164, 0xdd0ef108,
- 0x51b8720f, 0x0ec653c9, 0x463406ef, 0x332ef287, 0x698df7e5, 0xdaf37f2c,
- 0x7ef0f5a1, 0xe8bda74d, 0xc78c0b66, 0xcf0ebf6c, 0x9af102c3, 0x937bd5f7,
- 0x4738ca2f, 0xc1e40c6f, 0x07b7c7fb, 0x23c5fbd9, 0xfc7ba1ad, 0x59378f28,
- 0xed007c94, 0x43d6a28b, 0x5aaad7c8, 0x01b9610e, 0x9e1d34f2, 0x421bc810,
- 0x65bf79e7, 0xb5f2414c, 0x716b5e14, 0xb1c13c32, 0xbf6632d0, 0xb8c3c1a4,
- 0x61d9bdfa, 0x6e2b483c, 0x86e1e1d0, 0x858785af, 0x87a86f6b, 0x3bfa55f7,
- 0x0b174a6f, 0x73d35f00, 0xdb8ef80c, 0xf41663c4, 0x145fd0bf, 0x670e23ca,
- 0xbcf72f43, 0xc2853b15, 0x97d35729, 0xaba14ed2, 0x9d26e8de, 0xef63afd8,
- 0xe7e96a83, 0x87ae2ddf, 0x8b32de00, 0x8896dd2b, 0x0577c7f3, 0x8b8bbba6,
- 0x74c213f2, 0x09ee2c4d, 0x11feaef0, 0x5a1f82cb, 0xe9dafe54, 0x34c54b39,
- 0x011fde3a, 0x3c31b823, 0xdee5c49f, 0x9e7451a8, 0xc6032e89, 0xb8b11fd5,
- 0xdb3b4057, 0x6200609c, 0xf71226cb, 0xf70c4104, 0xfa8feca3, 0xaf8ecc1d,
- 0x56f8362f, 0xfc60f3b0, 0xfa611dbb, 0x47d1d0e8, 0x5b47d190, 0xb547d227,
- 0x47d193bf, 0x346e5d5b, 0x3b03573f, 0xc70491ba, 0x5937bac1, 0x61a40b0a,
- 0x82626e94, 0xb23740f5, 0xc282b6f7, 0x3aebc428, 0xf58f5f18, 0xe652e3e1,
- 0x36fadbd5, 0x71fb481e, 0x0799d806, 0xa61e780d, 0xb7a94de0, 0x46e2c2c3,
- 0x7ee6cdff, 0x99a9fa11, 0x961fb1d4, 0x3f81eb6f, 0xed875c60, 0xd05dcce0,
- 0xae9b36de, 0x6fdc2390, 0xf815c1e3, 0x7f23784e, 0x04963bd7, 0x7bd720bd,
- 0x3fdb07c7, 0xde807c21, 0xe8f80193, 0x0af2fdc4, 0xdd9667bf, 0xce40b826,
- 0xef618f0f, 0x616ebc90, 0x16bbf576, 0xaf4ca791, 0x5bb83c49, 0x7b2bfb04,
- 0x27eb0195, 0x9ea19a42, 0x5f5e2aff, 0x95ddf8d2, 0x89e85575, 0x7818d272,
- 0x78f9e4cf, 0xfd2d1ef7, 0xa9a4c7c4, 0x1eed29e0, 0x45a290f7, 0xee265094,
- 0xc810f05d, 0x3e3f4a55, 0x7231bb4d, 0x442de014, 0x233e943f, 0x776d0fad,
- 0x7c6bad88, 0x5b0be80c, 0xf1152ef6, 0xae7e8e5d, 0x47bc166b, 0xc567eb49,
- 0xb4adda90, 0x4d2c567d, 0xf3c09f21, 0x087e7903, 0x743fd34b, 0x7dfa041a,
- 0x9ecbf703, 0x922578e5, 0x6399bafd, 0xb8f4cf20, 0x9418e4ec, 0x85beca0b,
- 0x5ce05467, 0xdbe7e25b, 0x0334c169, 0x8fb29bac, 0x6021a0a5, 0x4d2b42a5,
- 0xce201872, 0xbb8f5de1, 0x7b0bce08, 0x5fb73be3, 0xea81eecb, 0xc7ce7fbf,
- 0xebf58df3, 0x2fb8f4bf, 0xb7e039fe, 0xf8f1dcca, 0xbeaf9a4c, 0x69fa32f6,
- 0xdf54c457, 0x68ef55fc, 0xeeffc7e8, 0x00c4dfd5, 0x333ffa1d, 0x353fbc45,
- 0xf1e8cace, 0xb60e6780, 0x04efe2cb, 0x1c4a8192, 0x15ce2ab2, 0x865eb8ca,
- 0xdf86994b, 0x05bacf51, 0xa7ac2059, 0xf6878a57, 0xae78a931, 0xae2eab78,
- 0xbb2f3955, 0x80fcc025, 0x42beb159, 0x74faefec, 0x3e49f7f6, 0x9992fa63,
- 0xa1ce0ffe, 0x3373f2a6, 0x5efb31d7, 0x09598710, 0x3f08f98d, 0x5afd1536,
- 0xfefb4439, 0xaf40d641, 0xf4f28064, 0x67978454, 0x13a795d6, 0x6b915bf4,
- 0x4df9c3ae, 0x0cfd2a25, 0x13a346e5, 0xd1f4f1f8, 0xf7f9fe94, 0xe1bff4cb,
- 0x867f3226, 0xcdb8ceca, 0xbe515f71, 0xc4696f46, 0xfb7e8577, 0x523f38b9,
- 0x2163c5fc, 0x4115173c, 0x8f1e803c, 0x95610eb9, 0xdf13d1d5, 0xf1e9fc7a,
- 0x8f51e0ad, 0x9d0f9cb7, 0x9e8ef1f9, 0x23e3d1f8, 0x5be97e7c, 0x7530eb0b,
- 0x4bf288b4, 0xfc4f7bc1, 0xedfaf5b0, 0x009eb4b5, 0x0df142f4, 0x30174cf8,
- 0x862b27be, 0x3a6c4fd3, 0x140b37e2, 0x0f4801c0, 0x55e3d798, 0x04acc78f,
- 0xd5813ede, 0x1af80fdf, 0xed00fc89, 0xb2dd6521, 0xc6f76bd2, 0xe8c4de41,
- 0xa7abd14c, 0xbd50bc72, 0xbce6f51a, 0x9bfa2b44, 0x9a9eaf40, 0x5e5bc614,
- 0xde571b15, 0x600bcb48, 0xfc0afe9f, 0x0608b1d1, 0x95bcc5df, 0x56e05ff0,
- 0xe2ca5f3c, 0xb7df0559, 0x93d5a67e, 0xce0bfaa7, 0xdc3dfe82, 0x05655ee4,
- 0x87108c7f, 0xff1479e7, 0xd7f875c7, 0xf36bb3b6, 0x0afc7c7a, 0x27b931fe,
- 0xab7a14de, 0x7cfd72b3, 0xfa0a1930, 0x3337d8f2, 0x137f8163, 0xbf447c7f,
- 0x4563c4c4, 0x6e9c3821, 0xe2059e91, 0xa7b77d0e, 0x763e0b7f, 0x15494a90,
- 0x7253bb1f, 0xd8deb3af, 0xeb298f93, 0xc7af8535, 0x05dde214, 0x4429a67e,
- 0x1ec84bfa, 0xffcf0d48, 0xb3f72b79, 0x6043210d, 0x1c585f9e, 0x79f3ff8c,
- 0xd54de686, 0x9b3fe84b, 0xb66d9ff7, 0x121bfde0, 0x7842ba94, 0x93f7c34e,
- 0xf0f8c6bc, 0xbd656438, 0xd3096a53, 0xfbe2e6e7, 0xe42f14d9, 0x38f098cb,
- 0x090559b6, 0xe3e20fdf, 0xf66cdf38, 0xd3db9c42, 0x2b0dfb92, 0x83843df8,
- 0x527e7163, 0x3fc81460, 0xb6e5f97d, 0x4e50d5bd, 0x3d6095eb, 0x910afe3e,
- 0x3b821ff9, 0x5217be4c, 0xe953d77b, 0xa13946f6, 0xdc3671d0, 0xe801bd7b,
- 0xb8c2283e, 0x655cfcfd, 0xcf7645e7, 0xabec45c1, 0xf6f0a7ae, 0x02f7625d,
- 0x81db28f2, 0xedfa7a7a, 0x49f34e8b, 0xd3b41e70, 0xcf22abd3, 0x5bfdf81b,
- 0x68bb3f45, 0xeb0fbf75, 0xe3efc0de, 0xba8e2ffa, 0x1be7d60e, 0x2d61d5d3,
- 0x44f4295d, 0xf438ebe9, 0xf1f99dd3, 0xdfcb503a, 0x049cbccb, 0xe3d151f2,
- 0xeb8afac3, 0x9bd7a863, 0xe6dc3f3e, 0x857eb2ee, 0x35e8af3e, 0x6423d97d,
- 0xd34644af, 0xebe3ce71, 0x2edcd733, 0x41c68bb3, 0x886fd38c, 0x73cbde56,
- 0x393e8344, 0x95ed21af, 0xac573866, 0x07f97e63, 0x3d8d656b, 0xbedb86ec,
- 0x67bd9ed8, 0xb6b97f3b, 0xef8a23fc, 0xcef8f141, 0x868ed6ad, 0x7e1491fd,
- 0xbf67b7cf, 0xc9c82f06, 0x31a32379, 0xafdf4e7e, 0x7efa7e7e, 0x7035fdf5,
- 0xd8c1aafb, 0xd2a9da38, 0x1636ec4f, 0xcd697d47, 0x14f0cd93, 0x73f86f5e,
- 0xd4b8879b, 0xa3ae6f7f, 0xa44e7a21, 0x781468f7, 0xc2e7be0a, 0x0fe098cb,
- 0x1eff95a8, 0x6cdd9de9, 0xf413f337, 0x6088efd6, 0x8683ea3e, 0xe82203a7,
- 0x048223f5, 0xa0ecf0f5, 0x6350b0cb, 0xb97b300f, 0xef003d87, 0x657cd0a4,
- 0x0f662eeb, 0x5da1d4b4, 0x0aed4951, 0xe01b3b90, 0x7cc1901d, 0x27681940,
- 0x885eec84, 0x7786caaf, 0x0c5db932, 0x37a5a74f, 0x1f33fab2, 0x8c35374f,
- 0x9dd5bb00, 0xce932f0c, 0xa8fb5db4, 0xf557da99, 0x508a24a9, 0x76c8aafb,
- 0x4ed677ed, 0xde3bdca2, 0xa4bbc025, 0x65a8ff34, 0xecedff72, 0x8fd1b699,
- 0xda81bf82, 0x54d9b767, 0xfb6f6f76, 0x039b66e3, 0xfedbd9f1, 0xcd17b4e8,
- 0xdf8a3804, 0xe421f282, 0xbe28fe07, 0xf442a3ff, 0x53780b17, 0xe5f6e5e8,
- 0x88ced10b, 0x0a6ae6ba, 0x736593c4, 0xb8db703a, 0xc186365f, 0x706189a9,
- 0x376e4a26, 0xfbbdd3f4, 0x3bf028ea, 0x04fa3bbf, 0x2b7e79da, 0xf41fdbf3,
- 0xe3a91594, 0x026d6bdb, 0x10c097a7, 0xdc777fd8, 0xec0127b8, 0xc0596c95,
- 0xb2fd4ef7, 0x99f1e9ef, 0xdb669fc0, 0x6629d257, 0xfde2364f, 0x60547f32,
- 0x3d2ac3c8, 0x4ddfcfce, 0x3fdc5ff5, 0xdcff1d44, 0xb5abcd6e, 0xc022f831,
- 0x7bdccecf, 0xde80f40c, 0xae584ee4, 0x2b9004b8, 0x24a1eae4, 0xdd6a5f3c,
- 0x689f0367, 0xe0e77f17, 0xa49eeb7c, 0xdaad3405, 0xb0369ff2, 0x3e38eae3,
- 0xf8ed4bce, 0x76e5535e, 0xa113df11, 0x003d26fd, 0x85d6eb7e, 0x1fdeec59,
- 0xc8a6afbb, 0xef9ff1d5, 0x9cc45219, 0x4cff7889, 0x98a737d1, 0x9f183ebd,
- 0x1dfd8ebf, 0xf7ba29cc, 0xefe56ec9, 0xcff5e993, 0xe2b26c67, 0xa9f8c32f,
- 0x4fd67665, 0xfb2bfb6f, 0x53e204b3, 0x296cbfed, 0xb3f8c220, 0xefc24b3f,
- 0x3da40fe7, 0x3df60855, 0x01239d91, 0xa2f644ac, 0xa7cfcb7e, 0xbc2f6fe8,
- 0x6793be7b, 0x3cbd6ede, 0x6e2466eb, 0xd004d72a, 0x9803dcc7, 0x07c991d3,
- 0xf5bbff45, 0x55dfad33, 0x722b07af, 0x6e3fd732, 0x780210d5, 0xfbc2cc77,
- 0x8c240713, 0xc9ebd2f7, 0xbd058353, 0x204fc3e8, 0x4d6d6d7d, 0xfe7b018f,
- 0x009f60df, 0xc65eb6dd, 0x49aa83f7, 0xc7781ee0, 0xb455baa9, 0x24fc6007,
- 0x67d87af3, 0xe0a13e8a, 0x1eeaeda5, 0x7fda58f3, 0x7d8345f6, 0x76f10505,
- 0x57682ff0, 0xc1fdf84f, 0xfe679004, 0x07e784a0, 0xc6f717d9, 0x45f1542c,
- 0x7e0355ff, 0x6eeafb7f, 0xfc60f7ed, 0x8b6ed0f5, 0x7bcbd981, 0x063dfff7,
- 0x57dbd4f8, 0x90bc212f, 0x6f53d13e, 0xcf3bfd46, 0xd1fc862a, 0x8899bd6a,
- 0xde2187d9, 0x152c6a6b, 0xfaef426b, 0x6487ce70, 0xd4f49595, 0x2778422b,
- 0xd0f0092e, 0xf0561f43, 0x3e33d3ac, 0x773d71b2, 0xef0f1918, 0x4d5db65e,
- 0xc77bc0a8, 0x0afb18fe, 0x8f4ccf94, 0xee7bdeea, 0xfdf0e9c9, 0x9ddec8a0,
- 0x7ee22ab8, 0x62b3bdda, 0xdf6a8778, 0xeb8a4e52, 0xc5d06576, 0xfad16f0b,
- 0x3345f657, 0xbce4baff, 0x224f7d5e, 0xf1c7df5c, 0xcecc1815, 0xe183bce9,
- 0xac6fa8bd, 0xb0f574cf, 0x4361fff7, 0x0058393d, 0xfad462e2, 0xffac234e,
- 0x3e8ad9bf, 0x25a5fc67, 0x978bf806, 0xb4210ee4, 0x9268b2df, 0xac15740b,
- 0x33b07623, 0x18301fb6, 0x37325d60, 0xcf5c62a5, 0x731cfc2b, 0x31a34b49,
- 0x4b174555, 0x19d839f8, 0xaf129bd9, 0x9632befa, 0xfb3397f1, 0x71817fca,
- 0x91347f1d, 0xd0f41191, 0xfe4cb185, 0xa77a336e, 0xc77a866f, 0x68df780d,
- 0xef5ffeef, 0x97937787, 0x038c731c, 0x5894e0bf, 0x0eda0fb6, 0x710f52be,
- 0x821f6dc1, 0x6672a2d4, 0xa068b163, 0x1c45befd, 0x9877afcc, 0x90d6bdfd,
- 0x12b8943f, 0xc5379e60, 0xd6fdbe32, 0xdce68301, 0xbefe2dec, 0x8cb1f5ef,
- 0x32c6a50f, 0x03d6dabf, 0x700b09b9, 0xc447b9ff, 0x64f01f82, 0x456c3d47,
- 0xe8a80fc5, 0x6f0f936f, 0xfe863e9d, 0xc1977273, 0x38edcb0e, 0xf30882fc,
- 0x24219c75, 0xf43d204e, 0xac6f0a39, 0x5d5bde1a, 0xc71d1e0c, 0x0aafd86e,
- 0xfe04a9fe, 0xbfcd0671, 0x43c52ed5, 0x7f82983c, 0xff4a1e65, 0xdae2168e,
- 0xe4adc160, 0xfe15fada, 0x31efe568, 0xe513b04c, 0x5d2033ef, 0x6eb9b76e,
- 0x6f72c589, 0xeccfe40a, 0x36d27a63, 0x72494f2e, 0x9fc070f1, 0x79f34eb2,
- 0xb7a78caa, 0xe3e7effe, 0x4bafd254, 0x0ec8afd7, 0x76cdd22f, 0xf0ee4f16,
- 0x10e9cc06, 0x8e96d677, 0x154a9f7f, 0x8e3f7b97, 0xfdf68cff, 0x830eb6c2,
- 0x558fee01, 0x50e210ef, 0xbea7eddd, 0xa933a21d, 0xedafe905, 0x5c7dc118,
- 0xe65dfcde, 0x9f72a48f, 0xe786ebbf, 0xf87fb0eb, 0x8120fb39, 0x1bfd93a6,
- 0xfc410e56, 0xafb06c06, 0xfba278c2, 0x46dad206, 0x92ebd023, 0xe52d2c65,
- 0x9a4bd5e7, 0x4b3fb8dc, 0xc478b3d6, 0x4ca9b165, 0x257a733e, 0xf8c1e886,
- 0x7b4dbade, 0x5c1ff08f, 0x7d6d9e7e, 0x4bf40f6a, 0x80fbebe9, 0x9eda9b8f,
- 0x55c30758, 0x46ecf5e8, 0x4594e119, 0x3df3cf6b, 0xb8cfccb1, 0xf7a330fd,
- 0x5ce51bc9, 0xb26c4f78, 0xbfc1d897, 0x4dc6d6fd, 0xb7eff022, 0xd9539e80,
- 0x7b79073e, 0x3163df66, 0xd6f907df, 0x47e02069, 0x652466df, 0x62fb545b,
- 0x343786ef, 0xb9bcb7fb, 0xebfa3a6e, 0xe005e23b, 0x55becaf3, 0xe8cdbef0,
- 0xfff70dbc, 0x9ffdba9d, 0xc2b7dacf, 0x4f7ad0ba, 0xd4fffdf1, 0xe9ffba34,
- 0x718db8a7, 0x66d9c002, 0xdccefc8c, 0xfe90df4b, 0xa3b57c42, 0x297d5f07,
- 0x6c1ff996, 0xeaffb4e9, 0xffa12e3b, 0x4e3e3421, 0x2be54f37, 0x002a9374,
- 0x0ae91bfe, 0x517c008e, 0x533d02bc, 0xccfe7a3d, 0x0795dc0c, 0xe1ff3c24,
- 0x2e3c4be7, 0x6a947704, 0x99f986bf, 0x6fdd6fb2, 0xf2b238c0, 0x9e82576b,
- 0x45e8509f, 0xb82b3cfa, 0x7953c787, 0x71f61676, 0x8eb07e04, 0xec8ae3e3,
- 0xff7debee, 0x65cf40c1, 0xf98462d4, 0x1d82f590, 0x772945d6, 0xbdef67ac,
- 0x94f0e61e, 0x2dbf87ef, 0xde3d83be, 0xf0cc3d21, 0x5c42341d, 0x931f28eb,
- 0xc5c84463, 0xb70c40fd, 0xb67d860f, 0xfbafca84, 0xacebf255, 0xaa7488dc,
- 0xce23e1e0, 0xb1c4aeb7, 0xc7ce3f79, 0x5d1f07e9, 0x82ff0aa2, 0x3f6c0924,
- 0xe7db028b, 0xbb3fc022, 0x90dba6ef, 0x4c88efde, 0xb7b076ef, 0x47efe1e2,
- 0x8cfa406d, 0x7bc7669b, 0xce494f2f, 0x7e8d38a8, 0xef74c3e4, 0xced067a6,
- 0xfd85fa03, 0x97fd1bde, 0x0893fd01, 0x74a3ef74, 0x853f5aaf, 0xfc5d280e,
- 0x7ca5f01d, 0x7285d3d7, 0x65e0a90e, 0x731cee9b, 0xe9e19daa, 0x8d5200ff,
- 0x8000b56e, 0x00008000, 0x00088b1f, 0x00000000, 0x7dbdff00, 0xd5547c09,
- 0xf37df8d5, 0x3332c966, 0xb2764c99, 0xa00c4930, 0x80490e2c, 0x084ed8b0,
- 0xc3884a20, 0x93a0d752, 0x364b0900, 0x94569510, 0x8b062081, 0xb62d1518,
- 0xef858320, 0x106d1b43, 0x268358a8, 0x622d1110, 0xa57fb1dc, 0x41459041,
- 0xed1fa822, 0xe73bf587, 0x33337bdc, 0x6a02266f, 0xefc7e1ff, 0xdf77dee6,
- 0xcf7ece5d, 0x6ec5f739, 0x843631d3, 0xae630731, 0x7d8c01e6, 0xf4bf3f8f,
- 0xa12d8c97, 0xcf3773e7, 0x8224715e, 0xf0f7cfd1, 0x5e79babf, 0xc776bd50,
- 0xe91c57af, 0xc5cfafe7, 0xffe0525f, 0x8967a671, 0x47f1558c, 0x89ef4cac,
- 0xfbc025b5, 0x6cd69d11, 0xe266b498, 0x03846319, 0x8319789f, 0x1c636ff6,
- 0x6faa3e54, 0x1f2bd3de, 0xe9cc31ca, 0x9b39a685, 0x998f1533, 0x1b3605b1,
- 0x27f967ad, 0xb0dd93d3, 0xc6c646de, 0xc59bbae3, 0xd9dd727c, 0x99c68536,
- 0x8f7329e0, 0x66678389, 0xa07b9e4f, 0x6607f9ff, 0x886d964f, 0x70a13ebf,
- 0x4018eabe, 0x7ec48c4b, 0x0c985563, 0xc9bf9fe6, 0x326533e4, 0xc49f05b6,
- 0xa3db2ac3, 0xfd329b7c, 0xccf4d9ef, 0x9633b7a5, 0x75a4cb6b, 0xb15aeac1,
- 0x646858ee, 0xdfbcc197, 0x9bb59666, 0x1dd5d1e1, 0xb081e2de, 0xef462d8f,
- 0x389989d4, 0x1c1af398, 0x9d629aff, 0x6a5801ed, 0x38a6cbac, 0x6992d007,
- 0xcf8fb65e, 0xeced9793, 0xeea1b2cf, 0x2db9e670, 0xf8128d8c, 0xc930ad62,
- 0x421c3f70, 0xd9e1c6f1, 0x20175e13, 0x1750867c, 0xd5e27e7a, 0x667d1ba8,
- 0x0144f3ff, 0x78632fbf, 0xd3c1adf9, 0x1d0e66db, 0x553e6027, 0xcacc6649,
- 0x5c554b3c, 0xdb6628ff, 0x773ff0e4, 0x1c2e628c, 0x57189a7f, 0x8ffacbb6,
- 0x10aeafa6, 0x78869afa, 0x22e88ead, 0xd053eaba, 0xbcc07469, 0x8bdd194c,
- 0xee98fac1, 0x80ac5eda, 0xbd71eaba, 0x29554800, 0xf6c7ebe0, 0xd7338d84,
- 0x01d99e23, 0x0cc9cb13, 0xcdb567dd, 0xd1b6e1c0, 0xe866fa30, 0xb865e097,
- 0x838bc003, 0xde3585e3, 0xdc6bf88a, 0xd7886268, 0xdc39636f, 0x68d953ac,
- 0x42b7f403, 0xc6f7247f, 0x57039ea9, 0xd931bd41, 0x9e6050e8, 0xc22e8613,
- 0xa0ab89fb, 0xadc3e88d, 0xbc4fbe20, 0xdf9ae23a, 0xa277bb40, 0x8bc0e58e,
- 0x05b08d5b, 0xa7bfc3a4, 0x482936f9, 0x27c42da7, 0xc1b2c7f3, 0xbede7826,
- 0xd5d11869, 0x052ce2ac, 0xe6ce76e9, 0xdb9f0558, 0x2a74d8c7, 0xd863eff3,
- 0x13e89542, 0x16dea0f8, 0xde618f80, 0xd5e1153a, 0x6c93ce3b, 0x11ec6588,
- 0x23e335db, 0x32ce81d2, 0xcc1b2459, 0x5f5043d8, 0xb46b7009, 0x4b1614bb,
- 0x0a7306f5, 0x236321f1, 0x0cadae08, 0xd518d4f0, 0x6045775d, 0x7f5e2cff,
- 0x86eaf1a4, 0x9ec63486, 0x22357189, 0xbb303e9e, 0x87f9c11f, 0x44a9d526,
- 0x7fae82e7, 0x127ece80, 0x2c2ed4bd, 0xe0f38762, 0x0e660ef7, 0x47901ba4,
- 0xf8066eb7, 0x9b3de895, 0xbd355d70, 0xc38e3e8e, 0x657cb6d5, 0x7ba184f4,
- 0x21788eb9, 0x8cc7d817, 0xaeef11f0, 0x7ecdcb53, 0x66c5b3b9, 0x52ed4c17,
- 0xfcfc5b26, 0x79eb7286, 0x593b9733, 0x98229a50, 0x7ec762d7, 0xac23a23e,
- 0xe5fae4c7, 0xdd62cad1, 0x3e185f78, 0xeb890d7e, 0xaccf9467, 0xf2ff7fe5,
- 0x1fd0a19b, 0x9f89577f, 0xc4fce35e, 0xc5a3f5e5, 0x3c22269f, 0xbdf09390,
- 0xeb1cc5db, 0xeadf3a97, 0x5fb9748b, 0x3ea09b65, 0x2af464d8, 0xe66db831,
- 0xf18a25b0, 0x945b0d90, 0xa820cb56, 0x2e614bff, 0x7fe9c3c0, 0xbfae54a9,
- 0x133e3dd7, 0xa1d9afde, 0x7e852e73, 0xf10a2d35, 0xf337b383, 0x371d0045,
- 0x31075947, 0xb770af18, 0xf68065d9, 0xc9044ad3, 0xd7009c65, 0x2feb9061,
- 0x8268cae8, 0xc02df5e7, 0x5d3d507f, 0xf5c6a808, 0x6c319dea, 0x83be1f80,
- 0x1fb47cde, 0x1bda00ec, 0x4f67c89d, 0x9ff1f245, 0x81f0cc8a, 0xbe528310,
- 0x4a0188e7, 0x6e825961, 0x62c67af4, 0x78504f00, 0x30b51cb1, 0x3c07a5db,
- 0x35e67a2b, 0x7546be00, 0x9f08aab5, 0x8e872eac, 0xe9f3fd99, 0x3bed18bb,
- 0x39d0ef4b, 0xd6d5f403, 0x7aabcfbd, 0x31d0eb80, 0x942af58d, 0x6f9e3189,
- 0x4623de10, 0x7b63d5d5, 0xf557b15c, 0xa57cff40, 0xe905b76e, 0x44e61aec,
- 0x24f92ab9, 0xc1b593ae, 0x81ec5972, 0xda15974e, 0xb5771c47, 0xc7fd800d,
- 0xe38299b8, 0x7c0dfd49, 0x0e9162c6, 0x44ca5de3, 0x3fe86718, 0x8e47c118,
- 0x68ef75b3, 0xbe3ef905, 0x18e28c63, 0x6be74a10, 0x38b7e398, 0xce9f77ae,
- 0x3475f889, 0x9406d733, 0x62ba7f13, 0xf64bc20c, 0xf5cb1def, 0xa78f48ca,
- 0xee154408, 0x1de70627, 0xb76fa37e, 0x6c33d601, 0xd1b77796, 0xd99fb011,
- 0xed4bc68c, 0x523ed2f4, 0xb10a780e, 0xd2c6b6cc, 0xb78e2338, 0x7e800db6,
- 0x1fb07549, 0x78ef4f68, 0xa6b8414f, 0x882875d7, 0x777badcf, 0x3ad37ad1,
- 0xf82979da, 0x176179eb, 0x5603db74, 0xdb23ed1d, 0xf2cdf5ca, 0x666f5c53,
- 0xadb27ac7, 0x2c7dfe09, 0xd724f42f, 0xf66bf8c1, 0x17ef7566, 0x2895eee5,
- 0x52e403e4, 0xc691ac6e, 0xe9eacbb9, 0xbdabfb8f, 0xd20f73b1, 0xdf7bf3e0,
- 0xb47f12b1, 0x137e13c4, 0xf84a7df7, 0xba485c48, 0x37d9ad98, 0xb179415f,
- 0xa67014a6, 0xca59c284, 0x7eb8358d, 0xf45b1fa2, 0x27cfa024, 0xd8829eed,
- 0xcca57b40, 0x20159d1e, 0x8f7be497, 0x19ee662f, 0xed6190b0, 0x16595be5,
- 0xb17ef7c9, 0xc27df201, 0x9122fbc0, 0x2e2c9a9e, 0x569fb617, 0xcf388dd8,
- 0xfb425833, 0x975e0633, 0xe0407af0, 0x52763b33, 0x870875b6, 0xf8e80d93,
- 0x4fde4899, 0x8dee8f2b, 0x83bb09d5, 0xe0eb0bd3, 0x08fb82f4, 0xc666bfb4,
- 0x748c3c72, 0x9cf48a79, 0xfee2a64e, 0x76f81786, 0x1b0edd23, 0xf48dddef,
- 0x9d04ce9f, 0xa274e78c, 0x63dbf7a5, 0x711d7cb9, 0xdc218bbc, 0x0ee615f3,
- 0xc654ef67, 0xbe29bb72, 0x0fd13463, 0xce1943f0, 0x4eb53437, 0x66dca035,
- 0x7fa809b6, 0xa0eb137e, 0xae05797e, 0x366f48dd, 0x79f89b97, 0xe9c6d9be,
- 0x07d39326, 0x1c273fbc, 0x3e436d3f, 0xa3e7e01a, 0x7347a478, 0x26de3d28,
- 0x011bf4e0, 0x3df1d0df, 0xf1f8e177, 0x02973a1d, 0x603b0c83, 0x3ae831f6,
- 0x49e8016c, 0xf40653e4, 0xcfb40b0f, 0x9f2215b2, 0x153956a5, 0xb302ebc4,
- 0xd7dc1efb, 0xf98d9472, 0x5f3acf48, 0x6c97bc3d, 0x7efe1173, 0x67689eb4,
- 0x90686c11, 0x2c40f59d, 0x9557e3b7, 0xfa7ebca1, 0x53ca235b, 0xbdbfe487,
- 0x01e5d1b9, 0x50c7f1f4, 0x0e123f7c, 0xa7395adf, 0x2e5fb7c5, 0x8039cb04,
- 0x0f8e5ad7, 0x77c7e76e, 0x7cb9502c, 0x3d6b9608, 0x3e4423e1, 0xc1fc3c08,
- 0xeb84daec, 0x4045c6cc, 0xfacd1de9, 0xfa4f7b61, 0xf358c5f8, 0x2f67b34f,
- 0x8c5f8fac, 0xc7d5084d, 0xd7ae88ba, 0x75478015, 0x6150ea8b, 0xec8bf50b,
- 0xfaffc42e, 0x7689c566, 0x9516eee1, 0xf2fd61e8, 0x729bd228, 0x8f6e5fbd,
- 0x84aec419, 0x6b3ff052, 0x5a7d258f, 0x5fe62b1b, 0xfcc0bf06, 0xbabc6da7,
- 0x3d79e6dd, 0x6fad0e50, 0xe11226f9, 0x71593fdc, 0x4abb411d, 0x85818fab,
- 0x84df7814, 0xd70829bf, 0x322a0dc1, 0xad9ef5d6, 0xf6f09526, 0x8c378e2e,
- 0x0a33be75, 0xa6a7f5d6, 0x7b40bad1, 0xaf8eeceb, 0xeb05dd6f, 0xfae68daf,
- 0xeb9a36b9, 0xcebbd3e7, 0xbb7a41f7, 0x9274e176, 0x9ef4c37b, 0xdce5038f,
- 0x730efd1b, 0x6d4acc39, 0xd4e3fb47, 0x68fbe52e, 0xa235a100, 0x0aed1e9f,
- 0x06a9ca35, 0x1c4415db, 0x1f0039f5, 0x145b5f84, 0x073eafc1, 0xd6932ef8,
- 0xd9d70279, 0x14b2316e, 0xfc0b3f68, 0xbae4fd88, 0xcf7ac47f, 0x9a6f5f84,
- 0xa0b468ed, 0xbebfd50b, 0x1e20bc6e, 0xa2d57c60, 0x669c7da7, 0x67c44e51,
- 0x10c5d1b3, 0xc283deff, 0xab78103e, 0xba773411, 0x0f01c865, 0x81fd6fec,
- 0x7fbe2734, 0x48d9c77a, 0x47ed199d, 0x7d876a8f, 0x565de9a9, 0x5bbb87d4,
- 0x39de5ece, 0xc19be480, 0xbb3beb0e, 0x33ddb0ef, 0xcfed47e0, 0xfd88bf6a,
- 0x0dcdead5, 0xaa227681, 0x1a945259, 0xda5a523a, 0x220ca9fe, 0x0f2863fe,
- 0x57cc19bd, 0x3e5efb0f, 0x54d3d050, 0xd7be40e6, 0xcdbb7be2, 0xe00f619d,
- 0xb736cddb, 0xf385d703, 0x3ed2e6a4, 0xe83eb0b3, 0x0679e9cd, 0x0c6fee1d,
- 0xed29bd43, 0xb44fb93d, 0xfd7323c7, 0x5ef693fb, 0xce73fef7, 0x68c7da0f,
- 0x74ec83df, 0xc52c740e, 0x282d5a73, 0x03320785, 0x784adff7, 0xacfb16a9,
- 0xaab7308e, 0xcdb62fb0, 0xca53b270, 0x7dd0ac83, 0xf3869e00, 0x780f387d,
- 0x377fcf10, 0x4e11eb3b, 0xda1b0efa, 0x91afb0af, 0x50560fb8, 0xf8708c96,
- 0x8c9aecc0, 0x2599fef2, 0xf4fae88c, 0x14f7ccb3, 0xbb4b7a6c, 0x0366e88e,
- 0xffe882bb, 0x5b99dd0a, 0xebe266dd, 0x7e7ef8b1, 0xe50ea347, 0xa1cf8e6f,
- 0x797ab943, 0x474eb347, 0x2f19e0f4, 0x0f5674e0, 0x54dfebeb, 0xba48fea7,
- 0x2e9e1498, 0xabf6f042, 0xb849bc40, 0x30f4128d, 0x3286e10a, 0xb27f4bb9,
- 0x370fc233, 0xb54b0bc0, 0x58c78e87, 0xa9c20667, 0x7d63c5ed, 0x9abc613a,
- 0x8add8f97, 0xabf9a970, 0xcc618889, 0x418f2f5f, 0x30b2ffba, 0x72735dbc,
- 0x0f5b9d71, 0x3dab78c4, 0x157be48b, 0xbda230eb, 0xaddd7200, 0xd2eb900f,
- 0xe867db8e, 0xf7f1c628, 0x12d9510d, 0xe759f380, 0x461624b0, 0x3f72c85f,
- 0x00396dda, 0xe155f47f, 0xfcc5e4f9, 0xda476e18, 0xb25afc83, 0x9678b7dc,
- 0xe7a18c7b, 0x8e0df6f1, 0xb14e7f24, 0xc87a1ec6, 0xa78dedce, 0x81cf5ced,
- 0x3ae17660, 0xe2c5fd4f, 0xa9d342f3, 0x0cadf7dc, 0xdb6a74e0, 0xa05e79bb,
- 0xe3ac53a3, 0xdf9097f6, 0xb9d336be, 0xfbf078ae, 0x7e2c4b79, 0xfb8abcfe,
- 0x2f55d852, 0x407569bc, 0xbb2de2ff, 0xf001bde3, 0xbfe98abb, 0x4ceba6ee,
- 0x8fef979c, 0x20fb3be9, 0x274bd83e, 0x16501d12, 0x3c6af8bf, 0x373803a7,
- 0x14ff450d, 0x633f8fe7, 0x5ddd8053, 0x3dc6b0bc, 0x1fc02f26, 0xf9922e73,
- 0x0efaeb02, 0xab891e23, 0xfda999cf, 0x9e25bb76, 0xd62f4de4, 0x8f4de50b,
- 0x7848baf0, 0x3c25db6a, 0x913e0df7, 0x97c5f681, 0x07c5d83d, 0xaf7ec1ec,
- 0xdc7b1637, 0xf5fc7263, 0xdea77dbf, 0x5ef30ee6, 0xacff188b, 0xda127873,
- 0xd6c6c57d, 0xe1f9c2cb, 0x0b5339e4, 0x57ce37a7, 0x7fb8e346, 0x1f1a6739,
- 0xeddb5d6e, 0x3fe69fba, 0x0aab07d8, 0xf44b0bb0, 0xdced379e, 0x326ab05f,
- 0x3f2e6bd4, 0xd9ce8e3b, 0xdc845e92, 0xdb798abb, 0xf4b57f90, 0xbefdf121,
- 0x4b2a5bc4, 0x693dbc45, 0x11ba7077, 0x43f920fc, 0x396d470b, 0xe13cc8c1,
- 0x75cf03ce, 0x799235c3, 0xbede1e41, 0xfe7a40c1, 0x4e1ff3cd, 0x3f7a03cd,
- 0x5b60d6b6, 0x5ace530a, 0x51ed3bb7, 0x360da7f4, 0x1fa814d9, 0x865eb473,
- 0x4eb95f7c, 0x1c3d2375, 0xe27b7b5b, 0x00bf2874, 0x7582adfe, 0x50306e54,
- 0x770dacfb, 0xc109fb50, 0x12eb95d7, 0x7cbca3fa, 0x4481fdb3, 0xeface3c4,
- 0xf11ebfee, 0xeb68c5b2, 0xaaca183f, 0x263dc526, 0x3673de00, 0xb7d50646,
- 0xdc2cd8e8, 0x532bc487, 0x1dcfdd3c, 0xdc631eb0, 0x6b63c44f, 0x18d51f88,
- 0x92ccedc7, 0x9aefb029, 0xbec96721, 0x8a27bddf, 0x662bc9f4, 0xd6c8ec95,
- 0xb7f3ffc1, 0xf45ea12b, 0x3649de9d, 0x58f3c09e, 0xce866781, 0x8a8fa040,
- 0x7a2f5e08, 0x68764f51, 0x78254591, 0x1d5c82b7, 0x65de4a43, 0x7efd0b1e,
- 0x4753df21, 0xfd0e1ead, 0x1818c6c6, 0xb45fa1ea, 0xa06629ce, 0x4e797f27,
- 0xc06769ff, 0x9733467e, 0x42e2ed1e, 0x5f6a0866, 0x0a7d6511, 0x4c185aed,
- 0x28b0fa8f, 0xd8f9f822, 0x813daecb, 0x5b58c4f6, 0xbfa4bb70, 0xbfa0389c,
- 0xb1b97efe, 0x8fe7cf7b, 0x567d804c, 0x9ec953ec, 0xde5c90be, 0x41cafb8f,
- 0xda51bf7c, 0x29bdef04, 0xf9e6f3a0, 0x9052cb63, 0x86140abe, 0xe763ae7e,
- 0x5ce5f3c3, 0x018c9f41, 0xf1f9fb9e, 0xbb39d13c, 0x7f6c8ebb, 0xeebcc15d,
- 0xe2ffae3a, 0xcb0fd9e5, 0xf73b4eab, 0x87f5686f, 0x53fe7f96, 0xf078a3fa,
- 0x0b5e19ab, 0xe585ff95, 0xfe5c2dc9, 0x5685e56a, 0x6975c85f, 0x4e7d5bd7,
- 0xcf13d20d, 0xd7e27909, 0xfe7a4617, 0x6b8f9e33, 0x9e8486e2, 0x219ec18f,
- 0xea877978, 0x760e5ce9, 0x6fd8edd7, 0x5711d980, 0x665a4b08, 0xb9468acb,
- 0xdcf164d6, 0x97e01719, 0xa8d4dfea, 0xedd1f943, 0xfb0431af, 0xf5f9e46d,
- 0xffbe389e, 0x875e38f2, 0xfb9d2bf3, 0x92794437, 0x2f6f38f5, 0x7e3033a3,
- 0x85fbcb25, 0xaf7690fc, 0x9da577b4, 0x7d59ed2b, 0xb367b703, 0x7e41dd63,
- 0x5fafd633, 0xed183f90, 0xef73e474, 0xe7da28ae, 0x107b8a67, 0x731569f2,
- 0xa017143d, 0x7c0deb07, 0xbcf5dafd, 0x13ec2716, 0x413b26e6, 0x1d1ccfb6,
- 0x8d6f9e5e, 0xc2ff53d3, 0x40ca6fc7, 0xb6b3ee3b, 0xce50339b, 0xfff1a67e,
- 0x0f0fc4f6, 0x507d7e0b, 0xe15893e8, 0xf9087827, 0xedc0ea08, 0xfef02dae,
- 0xe26ec58a, 0xbe2edfab, 0x1747ca91, 0x8fe7a42f, 0xfcd265c5, 0xdc77c2b6,
- 0x23a700d9, 0x17d2552b, 0x3eee8d33, 0x91def0b1, 0x1bf91d08, 0xc799d70e,
- 0x3b63336f, 0xcce5f3a3, 0xf26c27c7, 0x1f01f806, 0xe90c7dc3, 0x09af9f79,
- 0x9d2a5ebc, 0x2b4eb356, 0x7a6eadea, 0xb7a2103d, 0xa72f11db, 0xf6226759,
- 0xfa3a55ab, 0xf3a12934, 0xaf5cc83f, 0x2366bf61, 0xd37c8feb, 0x92f5c51c,
- 0x029bdfb6, 0x9ed0ab7f, 0xd433f285, 0xcce4615f, 0x7b5e823b, 0xc4ef5d5b,
- 0x9333bc91, 0x56eb4edd, 0x3ef44e29, 0xb4e707ce, 0xc639f10f, 0x975fd089,
- 0xc775b56d, 0xc779f855, 0x185c71fd, 0xfe4f5c7f, 0xee3aed1f, 0xd570e789,
- 0x296485fa, 0xc4c742fd, 0xd9c5167e, 0xe844fc82, 0x0fe70f0f, 0x48de3aba,
- 0xd510dbfb, 0xa8b8c5e9, 0x3e7e66f7, 0x55e51a4f, 0x5a95dfd0, 0xfe8858dc,
- 0x71c67fb0, 0xbc7da63f, 0xf1cf1927, 0x4e8d92bd, 0x2c5ce3f4, 0x76680e3f,
- 0x478138f1, 0xd358eb47, 0x03ec8de1, 0xf0ec4aeb, 0x34a11a14, 0xf8d226c4,
- 0x096eac49, 0x11fa0de2, 0xa3dbf230, 0x743c8a7c, 0x213c9743, 0x91a56147,
- 0xaddb77c8, 0xbc3c531e, 0x5d467437, 0x53edfa3d, 0xe23c78ab, 0x6de9e284,
- 0x1b8d0ecf, 0x5e3932cb, 0xbb88aff0, 0xd0346fe6, 0x3e37f2ba, 0xfcd1f146,
- 0x95eb578d, 0x1f943900, 0x27fea06f, 0x9bd39d5e, 0x851f18fc, 0x880ebf8f,
- 0x4662f4e7, 0x8a10e11f, 0xbfa58687, 0x313ff17e, 0xf609dcae, 0xef3cb050,
- 0x20243f17, 0x0cfaa16f, 0x3c7d73c9, 0xc07f23d7, 0xd358057c, 0x7cc8661f,
- 0x8fe40d85, 0xbe6c6add, 0xde51e306, 0xbc431d1a, 0x2bf9efb7, 0xcd676bc5,
- 0xb6bc5070, 0xcf7dfdfb, 0x3ff6c72c, 0x118362cc, 0x87be29fe, 0x7ccff415,
- 0xc9e3c91b, 0xb1326edf, 0xd9a5b025, 0x3c53376f, 0x8e954d78, 0xeed17ca1,
- 0xe832661b, 0xa0db5c78, 0x47f8e02b, 0x0ef12839, 0x7c446404, 0xfc21bc48,
- 0x6e3198b6, 0x5fad02fc, 0x7e80cbae, 0xc7dc04f0, 0x4e906b96, 0x15d9557a,
- 0x75c817f4, 0x4c9f485d, 0xfa69ff8a, 0xc3d467e4, 0x9c5fc5b7, 0x74819e1e,
- 0x5ac3d5ae, 0x7164dbfd, 0x0bbf6255, 0x4213e1e9, 0xd7894dfe, 0x7e3043c4,
- 0xc76f0616, 0xf00c1c84, 0xff19df8f, 0x1fe3fc23, 0xe3091ca1, 0xc8c67f1f,
- 0x9da36fe5, 0x0c2936dd, 0xe627bcbf, 0x32eeadd7, 0x6f8cfea2, 0xb3cac251,
- 0xc651ab6c, 0xfae05713, 0x2b44fc64, 0xb7c0c38f, 0xde5ffb11, 0x7b0bbad4,
- 0x165ec6e5, 0x15dbe68d, 0x37b9c3df, 0x046b3ccc, 0x30163b9f, 0xea3aa38a,
- 0xa82bc918, 0xb093b8fb, 0x0023f295, 0xb8e04f1f, 0x9d638a17, 0x1d9215b6,
- 0x8656e324, 0xf8f3e0d8, 0xcfd9789e, 0x6fc447f5, 0xe27a0f7a, 0xb05d624f,
- 0x5fb067ff, 0x141602fc, 0x9971919f, 0x62af3af8, 0xc47547cc, 0x3bdadfc8,
- 0xf304b3ce, 0x3386ba3f, 0x8f5053e7, 0xa215b529, 0x72cfb53c, 0x66fc61b6,
- 0x9fee29f9, 0xf98f3e5f, 0x79012a96, 0xca2c9e05, 0x05bc4203, 0x0a7607fa,
- 0xac53b6f9, 0x5fe4f41b, 0x5abaf9e3, 0xa4f403e2, 0x756e87c4, 0xed784aab,
- 0x02a7af0a, 0x43fedaf0, 0xf919fd78, 0xe48b9df2, 0x852f0fcb, 0x9171fe6e,
- 0x8c4bf374, 0x3b090c6f, 0x578867df, 0x67407ced, 0xbef88347, 0x78db3a07,
- 0xf9d2f189, 0xa7a89fe6, 0x5dfae142, 0x5c28427c, 0xfaeb1657, 0x5166bc91,
- 0xd57a70ce, 0x4ffe6096, 0x12d891e7, 0x1e7a5c86, 0x3b7bfac9, 0x1967888d,
- 0xd5eb9107, 0xf6c88fe6, 0x0f25169a, 0xbb859fcf, 0xe039d3cf, 0xe8bfdb74,
- 0x079a101c, 0x0f9de7c2, 0x39fc8de4, 0xdcd01d56, 0xa677fc0a, 0x752748c9,
- 0x377e3995, 0x7bf7fd0a, 0xa0e7cc0d, 0xffd811fe, 0x5ffbc60a, 0x03bcb4d1,
- 0x305757fd, 0x479c57bd, 0x1db9ef95, 0x7c7c8315, 0xf2955ec5, 0xfe66d8bf,
- 0x2c458ebc, 0x4c74ff41, 0x02398e81, 0x8aeb19e0, 0x78008e62, 0x07b78dc6,
- 0x7939e5d5, 0xc13798ae, 0xfde82bdd, 0x82b31ba4, 0xae78a7fa, 0x3a9e7a08,
- 0x9ea8372b, 0xa822375e, 0xa385ef7f, 0xde99ea83, 0x67fa836b, 0xaa0e4c37,
- 0x1cde34e7, 0xab18ffd4, 0xec147c7b, 0x5bfa3fb3, 0xebcc3f00, 0xd60cb8ea,
- 0xb7ac433b, 0xa0f29aef, 0x5afd81f7, 0x53381e31, 0xcf17c990, 0x00abdf8d,
- 0xff08671c, 0x39bee5ce, 0x55cfe341, 0x677e88ab, 0x048e1a36, 0xacf469fa,
- 0xc02a4f6b, 0x38a7c6b9, 0xb95ea15b, 0xecf9f826, 0xbde81b1d, 0xf6e0bed9,
- 0xeb839bb9, 0xd29cde83, 0xb901e79f, 0x4672bf29, 0x87dfb042, 0x9989d085,
- 0xb599f4e4, 0x7aa5ffdc, 0x24e86afb, 0x27ec5f18, 0xd0438dd2, 0x9839298d,
- 0xc11d226e, 0xfaf95374, 0x0708a1d6, 0xfba3fc2d, 0x0bc9e869, 0xfe007eff,
- 0x7d306716, 0xfb4ee169, 0xec567583, 0x95efd850, 0x6acfb850, 0x2a797879,
- 0x633b850b, 0x1fd7233e, 0x688fedc1, 0xd981fde0, 0x89ca18f3, 0xa8163bb3,
- 0xe3638718, 0xecff3c79, 0xed07af8e, 0x2f898473, 0x97c484e5, 0xf80425d6,
- 0xe97c647f, 0xaf19c634, 0x67111764, 0x844ffddc, 0x8f00c807, 0x0b11f301,
- 0x71dd119c, 0x3c7f48c5, 0x653f2b49, 0xb9e3fa09, 0xfe2bd204, 0xbc3d6c78,
- 0x03e6343d, 0xc653ae37, 0x8ebe64e5, 0x930a2bcc, 0x8497fc9f, 0xb40fcc04,
- 0x75f9e347, 0x3a7cfb4b, 0x22bcc977, 0xc302f226, 0x4fa4ffd8, 0x14d7f19a,
- 0x26760bb2, 0x0afebd70, 0x176dd78e, 0xa92fbc88, 0x471a7bd7, 0x2a7adfc6,
- 0xc52a5d95, 0x2894bb13, 0xf46153e7, 0x3f0947ca, 0xe685bf51, 0xa8147f8f,
- 0x38ee6a47, 0x0bf507bb, 0x6c63fdc0, 0x6f4058b7, 0xfdbd7e2b, 0xd12b68bb,
- 0xaaebd76b, 0x9c342faf, 0xd8c47089, 0x222d8e01, 0xe445afdf, 0xd6f5c9fb,
- 0xf4baa445, 0x9c1ed879, 0x8ae327ff, 0xc28dfdda, 0x3afe71fe, 0x976417bc,
- 0x76e5cf2c, 0x870169db, 0x276e160d, 0x9db91318, 0x1f39ab50, 0xfb7036d5,
- 0xacd7a40c, 0x8c718096, 0xdfea1689, 0x4c685cdf, 0xdf8471c4, 0x42a67965,
- 0x4572c576, 0x77e47b21, 0xfcf4d1b6, 0x868509cf, 0x6fffe844, 0x6011af31,
- 0x16636588, 0x93438f5f, 0xe892fe17, 0x68c252ff, 0x36898d2a, 0x5db67fe1,
- 0x6a91f481, 0x5da91976, 0x8aa71f0d, 0x1afbe44e, 0xb464f229, 0x42718071,
- 0xdfdfa26e, 0xe2d2edc4, 0xfe4189b1, 0xf18cf7fe, 0xd0afdfc2, 0x53942eeb,
- 0xef7814bf, 0xeaefffc9, 0xf1476b67, 0xaffff5ad, 0x7ffee48c, 0xf1081f19,
- 0x85c73bff, 0x1e23fff5, 0xfe7eeda2, 0x2dd893e9, 0x3e50d29f, 0xea7a30ed,
- 0xb5d5fcf1, 0x6878ec1d, 0xa4ebe7ee, 0x7dc0c0b8, 0x673c4ed9, 0xbb64cf2d,
- 0x4b6f482c, 0x7f3b4318, 0x696ea747, 0xe95f3c24, 0x3e7c3665, 0xbf3f2d7c,
- 0x3871ae55, 0x0e529b71, 0xfdc89d5c, 0x1f380687, 0xbdf1725b, 0x27186dc9,
- 0xd9953958, 0xb2350d51, 0xda2b22c6, 0xd8596b45, 0x4168e3f3, 0x770a134f,
- 0x4276fe51, 0x0fb7cf17, 0x7693a7ed, 0x73e2217e, 0x2ce79758, 0x7971cf8f,
- 0xcf112242, 0x5f3aeda9, 0xb27d73b8, 0xa46ae8ad, 0xf0be81af, 0xc29188e7,
- 0xbb32e64f, 0x03f8e227, 0xd87cf1b7, 0x677f294a, 0xf01e98e8, 0x75e0453e,
- 0xd79e5aca, 0xff7fcec1, 0x45ed2abd, 0x9433e346, 0x2d53952b, 0x5513f43e,
- 0x697a64b6, 0x99c56e5d, 0xe5a2f283, 0x2bd6376d, 0xe86df3f3, 0x9dfd7097,
- 0x91fb4729, 0xfcb8c9a9, 0x52a3b3d2, 0x8f1e909e, 0x64d438a7, 0xf1236f41,
- 0x72409cb0, 0xc034657e, 0x92ed72bb, 0xf3d7985d, 0xc5191ffe, 0x5da80e9b,
- 0x5fb07143, 0xe6167001, 0xa567a962, 0xa648b7e4, 0x718aa671, 0x7982ffbc,
- 0xfef3fc23, 0xda75e5aa, 0xf30301cf, 0x9d1d0046, 0x038379de, 0x09e5eeed,
- 0x93cfe411, 0xaa83da71, 0x42523ee7, 0x5c1aaefa, 0xd12d4dda, 0x9c83f436,
- 0x3a61f9de, 0xb93e4dc6, 0xed05ace7, 0xbbe87fd8, 0x7c2f982c, 0x16bbd17c,
- 0x93f1c017, 0x9de52a45, 0x86f42add, 0x897dfb84, 0x271fee1c, 0xdc2117ba,
- 0xddfc5b6f, 0x2a3cc0d0, 0xe88130b6, 0xa54fb679, 0x9cb75c90, 0x168d1cdd,
- 0xbabd774a, 0x2ea82e39, 0x89b540f5, 0xbf3b85d5, 0x69e824bc, 0x6306d376,
- 0xf7e703aa, 0x38fa42ac, 0x7474e7ae, 0xe7c59b7c, 0xa7aff4cd, 0x11b069bc,
- 0xb47f83ed, 0xea2b5898, 0x81877d33, 0x8abb49e7, 0x8f0ba015, 0x9d76bfc0,
- 0xa68e5e7b, 0xc63e66f1, 0xcc1947e5, 0xebe010b5, 0x724d9969, 0x998fbb41,
- 0x25c6389f, 0xae675c01, 0xe3fa1850, 0xc022ffb0, 0xfde1679f, 0xa5fda15f,
- 0x379967dd, 0xf9480e58, 0x3cf02955, 0x79f821bd, 0x696fcb65, 0x43cc199c,
- 0x27602079, 0x735f9e69, 0xd80af50c, 0x67ed4147, 0x519b66f6, 0x14496f8f,
- 0x6ca1edf1, 0xfef0f7d8, 0x18ec88ae, 0x927e184f, 0x36c5c9e5, 0x17cfd622,
- 0x7c795047, 0x5172a331, 0xaae3a722, 0x44efb796, 0xc766a97b, 0x738e2316,
- 0x9681f1c9, 0x999bd613, 0xe896ef5f, 0x1e21af3d, 0x28252cd9, 0xe1b3504e,
- 0x32ec91ac, 0xd1529f01, 0x00f08a5e, 0x1cc2dc3c, 0x503cf1ab, 0xc7f0655a,
- 0x5bffe0e9, 0x60c3bf72, 0xd1da80bf, 0xb5173991, 0xb94fb95f, 0x5abcf96f,
- 0xf30c7a05, 0x4583e4be, 0xd8cd1f42, 0x2f796938, 0xcefd5100, 0xfc9d1963,
- 0x78f649bc, 0x516bc091, 0x8f64fdc4, 0xe3d88517, 0xb7dc3dec, 0xe64ac7a4,
- 0xa5a23d24, 0xcc07493b, 0x23f7a06c, 0x4ddebd84, 0x120f2d6f, 0xcc3d245c,
- 0xe4685d5c, 0xc8c79be6, 0xb26f4aed, 0xa326d7f0, 0x30e816bf, 0xba063ec8,
- 0xed6baa0b, 0x9d0d2ff1, 0xb41f2819, 0xe7ccb876, 0x60e7c2d4, 0x9a5fcf22,
- 0xb38d70e1, 0x8073c00d, 0x85f9397b, 0x10ca46f1, 0x3dc6dbe4, 0x6edf2377,
- 0x0ff37ce4, 0x92b30f41, 0xf049113c, 0x9af4b3dc, 0xe77fc41a, 0xf2065312,
- 0x8a6967b3, 0x3dfd0f1a, 0xc39214ab, 0x18a697a3, 0x97d3ce11, 0x7c871e0d,
- 0x345ccb9d, 0x1638b4fe, 0x9c186ed6, 0x7947e5c4, 0x6319819d, 0xf4462908,
- 0x506a9f0b, 0x9a8c2fc8, 0x6be02264, 0xf557c096, 0xa4adb78a, 0x6ff01caf,
- 0xe01f5e28, 0x2aeab33f, 0xee701f09, 0x52c7cb08, 0x66b154af, 0x3b8c0a63,
- 0xd11b6567, 0x82479c31, 0x95b2d7fa, 0x66eaaf84, 0xd5dd7b4d, 0xb9d83f84,
- 0xb589cf11, 0xf79f823b, 0x8276124f, 0xbe913993, 0x225a773d, 0xb471af30,
- 0x4ad94f80, 0x6b7286c5, 0x84b6b2fa, 0x7d3ea01b, 0xed84eaaf, 0x8587c374,
- 0x1f2823fa, 0x1cdecced, 0x719c57f2, 0x9fb09238, 0x52edcac7, 0x8619ef45,
- 0x3ce2d3ed, 0xd8cfd0c3, 0x618591f5, 0x73a1df7e, 0x11e4732a, 0xfca20f95,
- 0x341b1b2a, 0x2fbec029, 0x3112d833, 0xf365e91e, 0xb83dc593, 0xd4f84088,
- 0x7e24ac46, 0x887e45a8, 0x34c3f3f0, 0xdda17b8c, 0x7942a2f3, 0x30476c84,
- 0x34709fbd, 0x9091c226, 0x9033efb9, 0x8a6f2e9c, 0xf971f3a5, 0xea2322b2,
- 0xb16df2d5, 0xf9c11f64, 0x86cbbdf4, 0x6879f1fa, 0x649ede59, 0x39bcb718,
- 0xe306a9d3, 0x19dcf431, 0x0d39ceb0, 0x8efe2487, 0x23bf80f4, 0x7807f0e1,
- 0x4f2f6a0b, 0xe0497212, 0x238a2d8d, 0xef3a5abd, 0x58089cd5, 0x48ba99d7,
- 0xe9ec93ca, 0x8c66c47c, 0xe903fca0, 0xabdf1abf, 0x7944cfdb, 0x0e57673e,
- 0xf169fc21, 0xd3dffe36, 0xcbbda187, 0x68616efe, 0x66cb4d67, 0xf6864db7,
- 0x6fce14db, 0x19afead3, 0xddc4768f, 0xbd9e5c49, 0xd5d07205, 0x41b7b197,
- 0x0e40ba0e, 0x11f20bbe, 0x4e2fefeb, 0x8b87faa6, 0xb47e541d, 0x47951fb8,
- 0xf25fbf84, 0xd102101d, 0x2e2492ed, 0xc9249717, 0x6482faf8, 0xffc66934,
- 0x3a21ddfb, 0x6c67de1b, 0x9d36309b, 0xc15cebb1, 0xa9c6f7fa, 0xd3b5fac1,
- 0x8c971b60, 0x0a15dbe7, 0xeee5a3d9, 0xe1fd4191, 0x7bc464dc, 0x9e5fac2b,
- 0x86f49dbf, 0xe81938c3, 0xb8e2a6f1, 0x37d7fea0, 0x9fd506a4, 0xfad07248,
- 0x0ecf8d26, 0xb4b9bf6a, 0xecbd507f, 0xa431919c, 0xf1014777, 0x679102cf,
- 0x8e770704, 0xfbef1b17, 0xa4b0dd75, 0x5fbd60bb, 0x73d033ef, 0x5e243e2d,
- 0x9951ac67, 0xbe9bc607, 0x2c1fe406, 0x74e348f1, 0x0e2fafe2, 0xb25ce858,
- 0x7f6f29bc, 0x350fd401, 0xe80f2819, 0x55b8b2d0, 0x6a1daf68, 0x2832b04a,
- 0x6155cb3d, 0x2c8cd8d7, 0x2d15de40, 0xa6f9425b, 0x93933e65, 0xef7697ec,
- 0xc0eeb293, 0x122e2af3, 0xd53de4f9, 0xfe10aad7, 0x67df045c, 0xe15ee922,
- 0xf127b071, 0x4ef4e12a, 0x0ae3aebe, 0x13f5865b, 0xb9f71f91, 0x6a71d60b,
- 0x0e856362, 0xbeea15fd, 0xcf9f9204, 0xb6e90cab, 0xf2819e74, 0xafbf660c,
- 0x533ac06e, 0xbe395372, 0x66a3ca55, 0x7c6898cf, 0x021405d6, 0x6d3c16fd,
- 0xae01ea7a, 0xfb7d78ff, 0x8203df40, 0x8e8e913e, 0x2c5732c7, 0xf5a1562d,
- 0xc276fe7e, 0x87ee7c06, 0xee106fa7, 0xfdc1bd7b, 0xefe01266, 0x20df0af3,
- 0x128b6f92, 0x70b4af88, 0xf2e4623f, 0xa07c8bcb, 0x9fa6e5f8, 0x7e9296cf,
- 0x26706e98, 0xe56e493a, 0x3adca0a7, 0x7640d2b2, 0xacbbd330, 0x3e8f4893,
- 0xe72c744f, 0x8c77a239, 0x691df5c3, 0x5f9864c8, 0xa552be51, 0x1fd20af3,
- 0xe7e3efef, 0x0f5587d4, 0xdbcc24d8, 0x525878a0, 0x3edcdd28, 0x5de5186f,
- 0x596ded6a, 0xeabc097e, 0xe64bb009, 0x8179e4ec, 0x3f53bf2c, 0x3ee06075,
- 0x3cf9f196, 0x4af94105, 0x1daf5955, 0x2ec8eb34, 0xc273c18c, 0xa254f789,
- 0x8d5913b3, 0x9f019343, 0x86bb7527, 0x2ff505de, 0xea8f7a26, 0xda223ea1,
- 0xeb842ff3, 0x0ccadd5c, 0xfaf30b25, 0x091aaf0f, 0xacfe7def, 0x2e6c49c1,
- 0xcf39fcec, 0x43fdeecc, 0x1bd815f6, 0xe1ddb8bd, 0xcffbc14a, 0x1acd1d73,
- 0xc7c3807b, 0x00204981, 0x056adb17, 0x7876376d, 0x3c01e588, 0x31d74f57,
- 0xe079e04f, 0xb1e3c8b0, 0xa873f324, 0x24a61bb4, 0x5d333973, 0xbd9be9c7,
- 0x037a70ac, 0x0f502afb, 0x173db948, 0xbf55aaf3, 0xbb72901e, 0xfe52358a,
- 0xdb99238a, 0x1f15b32f, 0x9835ddd9, 0x2bfd0289, 0x56683c6c, 0xf3e4d9b5,
- 0xb698d265, 0x7a60fb40, 0x73279732, 0x8e3ecf8c, 0x3ac95be4, 0xfd353ef8,
- 0x2af8373e, 0xf543323a, 0xc36d854f, 0x6f8d3b32, 0xb79fa270, 0x756bf052,
- 0x256be2a3, 0x51eb853b, 0x299d57e9, 0x3f56a8f4, 0x8d25e885, 0xd1a99a3e,
- 0x5831acbb, 0xe97ac2cf, 0xaf5cb2de, 0x68ee9d76, 0x0e61c10b, 0x6ff50bac,
- 0xa7b7c785, 0x38add684, 0xeb21a301, 0x31ee64a9, 0x780fb8b1, 0x5d08eade,
- 0x215f769f, 0xd4fb15eb, 0xf9c2bbae, 0xbbe62b55, 0x04293554, 0x243eed2f,
- 0x9c76b43e, 0x97854bc4, 0x45a2b7c3, 0xd67e7c21, 0xf2321f6d, 0x61ba4b6f,
- 0xe02ff0cd, 0xb047497d, 0x6f9412af, 0x1e41ab95, 0xd3d28667, 0x456cd61c,
- 0xad61a9ba, 0xdf9053cb, 0x1faf9b5a, 0xb5867e8e, 0xa9f9d36f, 0xa39aded4,
- 0x7a50058b, 0xbf8e5773, 0x17e74fca, 0x5cfc278a, 0xe9ddc3f0, 0xe01e9fa5,
- 0xeaa37335, 0xd4141c8a, 0x7a3fe44f, 0xbcb38e09, 0x63fa235e, 0xe53f0967,
- 0xab59d685, 0x2e5ff7e7, 0xcf015eb8, 0x0fdd7b9f, 0x7764f727, 0x76173f85,
- 0x3d1a5bdb, 0xfb81b1ff, 0xfe7ca55b, 0xff22ef5a, 0x3b0bb42e, 0xe3c6ceac,
- 0x9eb91a7d, 0xbd724f9e, 0x3979e842, 0x5b35d1cf, 0x13305e29, 0xf68080ed,
- 0xa56f56cf, 0xd9f1a578, 0xcbb45699, 0xbe99ae8f, 0x8f52f30a, 0xbd274d36,
- 0xa97bf95f, 0x40c63df7, 0x4d68a27b, 0x907bfad9, 0x5247378e, 0xdda2b15c,
- 0x2045ce23, 0x639fa167, 0x6cab970d, 0x95b39735, 0x90071fa8, 0xfd8bec7e,
- 0xf4f0acee, 0x68087e45, 0xe779bd27, 0x779fcf74, 0xfe77b8c2, 0x25e9da29,
- 0x755ad850, 0x5adbcf64, 0xcf563f51, 0xde20f0ff, 0xe07b01d3, 0x7b4b5ffd,
- 0x0645aa00, 0xea325b4f, 0x54076121, 0x6c321bab, 0xbd083768, 0x7f801ff1,
- 0xe7f87844, 0x11cbfc12, 0xdbf101fe, 0x9c381b4f, 0x7fe06ba7, 0xcbc72578,
- 0x2b6ca6f2, 0x6d3f2f9e, 0xebbb26c8, 0x821ca6d3, 0x00078cff, 0x1f5fdcfe,
- 0x0035776c, 0x817cf1fc, 0x96f587f0, 0xfc043bb7, 0x0dfe1c9d, 0x256cee1c,
- 0xc084672f, 0x57f94bd9, 0xe4d1fe77, 0x78e23b44, 0x57d92b64, 0xaca8b7c7,
- 0x932ec03e, 0xac37d176, 0xdfb449f1, 0xf767fc24, 0xf28418e9, 0xd7e1f4a7,
- 0xd98788ac, 0x3150785b, 0x95ccfe04, 0xeec7f144, 0xa8edbbf9, 0xdd5f116f,
- 0xe245d476, 0x19b986a9, 0xd796c7e9, 0x4239e00e, 0x45bf7171, 0xb66fbc8d,
- 0x19bef823, 0xda9592e6, 0x8ce2c5d1, 0xa4f5c088, 0x79d3ef69, 0xbebdc618,
- 0xd6eaf9cd, 0x5de7c4f9, 0x25b97941, 0xfe80d5ac, 0xd0acd636, 0x05eaed3d,
- 0x8642d685, 0x5a3ab571, 0x09453387, 0x56aecbca, 0x564f6475, 0x447c4519,
- 0x892455e4, 0xdc29497e, 0x629c902f, 0x2474f2a3, 0xda707d23, 0x7de1f462,
- 0x255f382a, 0x9ddefce3, 0x41dbc226, 0x5fc0135c, 0x333f0a35, 0xb7a442a8,
- 0xd3a7185b, 0x297bcc5b, 0x972707ae, 0xa71fa875, 0xc939342d, 0xa18f8dce,
- 0x42cdd2f8, 0x5e62ddde, 0x313d46ee, 0x61746ccd, 0x317ae309, 0x1a30d7f9,
- 0x0df987a6, 0xef748cbd, 0xf3a234b0, 0xdf123c59, 0x8ef87c96, 0x95ea5808,
- 0x1fa38e60, 0xf6dd7e18, 0x5e285c7d, 0xf609f8fc, 0x8546d74b, 0x170c8bf7,
- 0x2e5c5070, 0x609b3ff2, 0xc75828ec, 0x6f0714a8, 0x3fb8d877, 0xc9e02d7a,
- 0xb6afc7e2, 0xfc79799d, 0xb1f30ab2, 0xbc58f7f9, 0x5d541f24, 0xc8cabd50,
- 0x62794f9f, 0x34eaf143, 0x8e61dec6, 0x2ab78881, 0xee782194, 0x61f18b2f,
- 0xf43aef7c, 0xd4b1db87, 0xd64cbd13, 0x978eaf33, 0x0e3b660d, 0x25e3fdc7,
- 0x2abf9e02, 0x0031ad60, 0x83d7d6f7, 0xb437f3fd, 0x02fb5aea, 0xb7d3af7c,
- 0xea7d21b6, 0x1b061c22, 0xaf1ee3cc, 0xa539e2e4, 0xadc5e96a, 0xa689e2b7,
- 0xa0a3d5e2, 0xacf9e3ae, 0xfee6bf5d, 0x59805b89, 0xeab76e11, 0xcb9abb59,
- 0x52e100e9, 0xa978776b, 0xe2853cdf, 0x202e3237, 0x86183ff1, 0x2de3c40f,
- 0x4e307d62, 0x37df1583, 0x7cd7c786, 0x8ccc2f81, 0x15cfb846, 0xf257377c,
- 0xbf30535b, 0x0478823d, 0x2ed0553e, 0x22ddb806, 0xe2f89eca, 0x476e14a1,
- 0xcfc2943f, 0xe25cf7ef, 0x95dccc7c, 0x82797941, 0x7c4daa5f, 0x75ff397c,
- 0xc5307ca2, 0xf5cbe34e, 0x8a7ebd00, 0x0240e8e3, 0x691ebd20, 0x68fb27ac,
- 0xc3880b16, 0x49fbfa65, 0x77ad0eba, 0xf7dd11cb, 0x66bb1a58, 0x5f95e820,
- 0x8e513a44, 0xce6c62de, 0xf68733a9, 0x52e67193, 0xefa061d6, 0x993f4903,
- 0x55633fd0, 0x9535e533, 0xf41efc2a, 0x578b42e9, 0xc7e27607, 0x067bbe22,
- 0xa79c46ec, 0xfca15634, 0x773169af, 0x975f2096, 0x2c7a8d5a, 0xdff1163f,
- 0x8d17595b, 0x2b2d89ff, 0xdf8606e6, 0xe631504d, 0xa4f80b07, 0xadfb05d8,
- 0xebc3f947, 0xe4695d2b, 0x4ff5128d, 0x665c61f7, 0x03a079ea, 0x762e1faa,
- 0x5fa7bcc1, 0x91d9cf15, 0xfa7e7c62, 0xdecf413e, 0xe9de34e3, 0x53f3e4c7,
- 0xf90af9fc, 0x7f5e2bfc, 0xea2f8e50, 0x2f9e6b0f, 0x827f3cd1, 0xf89f5bd7,
- 0xf68bd8aa, 0xda4e68eb, 0x921423a5, 0x37214657, 0x12b6974a, 0xbb754b9c,
- 0xb59a3978, 0xdc2ff2ea, 0xdb8a64ef, 0xf7809298, 0x50588fce, 0xd533ff0f,
- 0x7e814c65, 0xfe79269f, 0x499ce586, 0x7288d78a, 0xe91f3df7, 0xf21e9285,
- 0xae428d27, 0xa6e5ea9f, 0x831ea5f7, 0xf1c3de71, 0xbb9c752f, 0xe4f9c743,
- 0x4d738cc7, 0x4738c86a, 0xfebecc7f, 0x24e1ca9c, 0x517691c7, 0xe94bba71,
- 0x663afc85, 0x70bf9064, 0x18cebdee, 0xcc314bf4, 0xcfd00377, 0x919cda5d,
- 0x7dcfe307, 0x0a8cc5ee, 0xbe849bb4, 0x32a739af, 0xdaa3c817, 0x5f950a73,
- 0xc4647db4, 0xdb35b37b, 0xa0e38cda, 0x45942d5f, 0x12aaeb01, 0xa77d04eb,
- 0xa7a01c9f, 0x4274bd8d, 0xf3cc3c6e, 0xa13b9e32, 0xd5ce7c47, 0x3dfd1f35,
- 0x4a11efea, 0x97f2f8d8, 0x7b077f62, 0xee611fb8, 0x6151e5c5, 0xee34576f,
- 0x390614f3, 0xfaa2fc60, 0xbf02f9f0, 0x1528eb12, 0x50fa6ffa, 0xe003844b,
- 0x1e3de43c, 0x9b58af5f, 0x62fe8b99, 0x04e8f263, 0x7e8f5b82, 0x67ee2ed6,
- 0xd67ee16d, 0x02c77b52, 0xbb994efc, 0xf883ec57, 0xbf09b2dd, 0x2c067c93,
- 0x378eef0b, 0x89bf7ac9, 0x3f714663, 0x7ef3d618, 0xf220f99f, 0x6e28decd,
- 0x361b4ecf, 0x43e50a30, 0xceb46667, 0x4facfc06, 0x4af7732a, 0xc6abf6c0,
- 0xc9a17b61, 0xe08ca7d2, 0xf42e88cb, 0xb8feee49, 0xfe8fec4e, 0x35b18f11,
- 0x87d806e1, 0xee950de7, 0x45067803, 0xcbc5c27b, 0xe5c5ef2f, 0x8d97bbb9,
- 0x0757b1e0, 0xaff5cceb, 0x760fce43, 0x180e82fd, 0x94d2c8ef, 0x30380fc1,
- 0x5d78531e, 0xfcff04af, 0xa33c92b3, 0xa23fe702, 0x68f07d63, 0x18ebb171,
- 0x1d71f02e, 0xf74a06c2, 0x80b7a028, 0x31e4def3, 0xa9f541d1, 0x15cc71c5,
- 0x9a9dbfe8, 0xb1bf541a, 0xffa83b34, 0x07fa33cd, 0xea689f3d, 0xb00bafc8,
- 0x97b67b67, 0x5a9f04bc, 0xed0a258b, 0xbe5a4fba, 0xb416e0f9, 0x4eaa3703,
- 0xef6759b4, 0x48c1f4dc, 0xf00675e8, 0x497e471b, 0x3ed1f907, 0x7a747997,
- 0x7e303e97, 0x50758a74, 0xd73ca2be, 0x68dc6c5e, 0xf29dadae, 0x78ae7f09,
- 0xe25ebcd1, 0x72cb9bfc, 0xa99e3f51, 0x79fc9f7b, 0xcfbe6635, 0xe21e2f53,
- 0xbd442ddf, 0x357f7402, 0xe2f688be, 0xc0fc1039, 0x0e3b45ff, 0x86cd9c51,
- 0xf7b44dc1, 0xf1e9e6d3, 0x942d64fe, 0x81c65fa8, 0xba018adc, 0x036368e9,
- 0x48de61b7, 0xe5db85a6, 0xbf90a6e7, 0xf7146f7c, 0x8c4decfb, 0xcc48efa5,
- 0xda86b86d, 0xd345ce6b, 0x1da96ff8, 0xfa2f28e3, 0x13e28505, 0x2da75313,
- 0x8decfdc5, 0x1db80ef2, 0xe7f89de6, 0xdc23c7ab, 0x7bf1b66a, 0x77d51302,
- 0x5941e7f2, 0xbd0d78d5, 0x7b65513f, 0x621fb015, 0xe342ddce, 0x001b444e,
- 0xcb50eaea, 0x5597a803, 0x3f9f6b63, 0x5a31bca0, 0x9ed4de48, 0x26f7a48b,
- 0xfdfd7114, 0x0c4f217b, 0x635d7c9d, 0xe7a24dcf, 0xbd7c2e92, 0xf7f282b2,
- 0xf51b090d, 0x4321bdf8, 0xaffd42a7, 0x4a9fd73d, 0xfa37dfb9, 0x2fbe91fd,
- 0x7af84860, 0xbe8de61c, 0xc8d8af11, 0xe3c8d12c, 0x62c375b3, 0xd5791858,
- 0x9e39cf3d, 0xc73a6c6f, 0x3bff8ff3, 0xe417f5cd, 0x9d4592c3, 0x31178c7a,
- 0xe10d8473, 0xd8305255, 0x67c01151, 0x8c9baeea, 0xeac31f88, 0xafa88db7,
- 0x0516504d, 0xf916af3c, 0xdd795a58, 0xfc83e422, 0x3fd49b8e, 0xd106792c,
- 0x4136ade7, 0x9e8f82a0, 0xdf2c7c16, 0x0e33a0ef, 0xf73b71e0, 0x79a6b7f3,
- 0xd13942de, 0x7c6ec851, 0x4767e49e, 0x7e69c606, 0x9e03f5ae, 0x1bb21423,
- 0xbeaf293b, 0xc3a1c0a4, 0xdd7bc0f8, 0xf3f3877a, 0x26d1f685, 0xca96fdf6,
- 0x6fb8e216, 0x1d97f2fd, 0xedaf182e, 0xd43cbcee, 0xcfde720b, 0x0b7bf17a,
- 0x1e75d6cc, 0x2fb587cf, 0xef903b47, 0x0ce1e227, 0x4d3ba2e3, 0x76816e74,
- 0xbed4778a, 0xef0a7a6c, 0xe15ad5bb, 0x17f30afc, 0x8ad912bd, 0x4af6dc74,
- 0xc7f51ea4, 0x9f3dbd1d, 0x5e8a7c21, 0xcf063be9, 0x9316ae8b, 0xe3fd919e,
- 0xb6f24a3a, 0x485ef587, 0x4ce9697e, 0x81a7a7f2, 0xa0994bf3, 0x7fc172d3,
- 0x6747c11e, 0x0f8892eb, 0x45822ecf, 0x91753c63, 0xda1b7032, 0x7d0b0e0b,
- 0xe5f24e97, 0x411f3c44, 0xb1fec4fb, 0xeaa18f74, 0x1f106c5c, 0x14cb9f56,
- 0xed471dfc, 0x05cadf92, 0x7d84989f, 0xb5d04331, 0x3f245bb4, 0x624f74b2,
- 0x6263ec2a, 0x95bdf03d, 0xc8f9fee2, 0x31ba7e4f, 0x80be90da, 0x022b926e,
- 0xedbea7f8, 0x8a768626, 0x744dde29, 0x40ca0333, 0xb766653b, 0x971b629d,
- 0x592feb08, 0x7bc3a996, 0x6a79f29b, 0x7fd1a5ea, 0x27c8aa44, 0xa0e6b730,
- 0x8ba86b5d, 0x9a7de274, 0x1e3cf133, 0x87057f34, 0xbb837ce3, 0xf98ced08,
- 0x3fe4284f, 0xe503fb43, 0x7503d0d2, 0xcabf40c6, 0x5106e74d, 0xb9ca7d3c,
- 0x2634a8cb, 0x38472e7b, 0x5ebb9d94, 0x8fd06be0, 0x710e3f8a, 0xc192bf71,
- 0xc86e7267, 0x4792bf44, 0x4f1f3c45, 0x0347b667, 0x546b7de3, 0xb8c98ff2,
- 0x9933f4a4, 0x804fdef0, 0xdf46fd3e, 0xa3df4198, 0x0a9dfad1, 0xb93ffeb9,
- 0xbffa40d1, 0xa19dedaa, 0x79fe783a, 0xf5092ba6, 0xa7837ac9, 0x6733f708,
- 0x0339efb2, 0xb83ee7d4, 0xe1dc3ad5, 0xbeb36787, 0x69f50735, 0xfd11c033,
- 0xd52bcd5f, 0xf76e3ef1, 0x1a38f344, 0xcf496fa7, 0x739f3a5a, 0x73e09b2e,
- 0x7a44ceb6, 0x2dd02ce8, 0x5243d00b, 0x2f3948ef, 0xeba48ff5, 0x5f973d6a,
- 0xca18e6d5, 0x9debf323, 0xa442ee49, 0x1f3c7ebb, 0x9e77afd0, 0xf902355a,
- 0xe53c74e0, 0x21bd4b5d, 0xc585e0f9, 0x244794c9, 0x03e492f5, 0x7ca5cf29,
- 0xf52f7497, 0xfd3fe8d6, 0xc3bfd6ef, 0x3cd293af, 0x7d02a577, 0xfae7ab5d,
- 0x85bd5aeb, 0xfc6c67e8, 0xcdd23d24, 0x43d1cbc5, 0x0a1e8e42, 0x3d35a392,
- 0xa94581e8, 0x1c5e7e52, 0x7c11cc1f, 0xedc4de33, 0x81f39448, 0x8da6b5e3,
- 0xb1f7087e, 0xd9eb953a, 0x3ddd6b9f, 0xa9d5ffc9, 0x97ff2697, 0xfc9c5ea4,
- 0xfe54efff, 0xbcad0a9d, 0xa3710fc7, 0xf3bd3fbc, 0x83ea1f72, 0x03f40c83,
- 0x6896c1ea, 0x0fa126c7, 0xc5445f48, 0x1f487ef8, 0xfe9ef614, 0xf5e1a9df,
- 0xff6c66c1, 0xd240fab1, 0xc91be497, 0xb42f9227, 0x62f9247c, 0xbce16fc2,
- 0x8b7a7888, 0xd6aa8fdf, 0x0e289b7b, 0x8c1e88e9, 0xc76dd9fc, 0xdf88536d,
- 0xe084f442, 0x04bf3f1f, 0x1fc90deb, 0xff245f92, 0x98fe0b54, 0xf243f829,
- 0xf9b56ec5, 0x0a9811d1, 0x08f9e690, 0x93e488e5, 0xfab5e7aa, 0x42ba47a1,
- 0x199b234f, 0xae90c75f, 0xa40ca1aa, 0xfc0f532b, 0xf512f070, 0xa48bbec2,
- 0xf3feafa7, 0x0f55f4f4, 0xa7e674f4, 0x1da853d0, 0x03be61fb, 0x1f3673d6,
- 0x9fa66de4, 0x8dfc26de, 0x9b21fb71, 0x46f6079f, 0x35ee7df1, 0x67ff93a6,
- 0xcf3cf7c2, 0x3303da57, 0xd241bf85, 0x01d81e69, 0xf8fc4369, 0xbcc0f3c7,
- 0x847b938b, 0x877c5367, 0x3327cf11, 0x03b40ca1, 0x537fb27a, 0xee4d2ed3,
- 0x17fe8a45, 0xb480f3c2, 0x1c6edd9f, 0xaf1e1690, 0x027ed303, 0xa262275e,
- 0xb22bf92a, 0x00a35c4b, 0xba890f3f, 0xc2094fb7, 0xf902faf1, 0x2e55b2fc,
- 0xd607ca3d, 0xf475ddfd, 0xcaa5bc77, 0x31fece7a, 0x8f099eb0, 0x1d1fbf32,
- 0x151d8f9e, 0x511fedd3, 0x717fbf98, 0x95fed65a, 0x451be7a4, 0x9ea74ade,
- 0x3d4483c7, 0x8096ea1f, 0xbea6817a, 0xeab7f796, 0x3e60593b, 0x928dd209,
- 0x0eecf1cb, 0xfa601fb0, 0x2fa8b3f6, 0xff430a6c, 0x86fc6d46, 0x5ca2cf57,
- 0x6c50a1db, 0xde0e1baa, 0xfa070481, 0x07d5383e, 0xc65d77ef, 0x78dbcdd8,
- 0x43b5f6fd, 0x132f7956, 0x1bdc06e3, 0x2e2b3432, 0x502847ca, 0xa199597c,
- 0x4d3dbef0, 0x1eb913ca, 0x0214f746, 0xaf9867de, 0x77ef2977, 0xd1674de7,
- 0x4ba6adfb, 0x5186ff98, 0xac6fefe1, 0xe4c6fd10, 0xfe7d84c7, 0xe1ca9a70,
- 0x3406fc2f, 0x694d343f, 0xff9f7f0e, 0x1d2fc109, 0x78213f18, 0xfe855a4e,
- 0x84a30eed, 0xc2157bb7, 0x41632c7b, 0x9f98347e, 0xbad71bea, 0x7cf41a4b,
- 0xbc0d2ebb, 0x77bf687e, 0x03db05a5, 0x5f069bf4, 0xf7801fa6, 0x77de61dd,
- 0xfadfbf04, 0x125af843, 0xdba0bef2, 0x5d8e9259, 0xa31ef441, 0xc67d1377,
- 0xb4362bc4, 0x91dc6ba3, 0xe0bcf12b, 0x12d2cfe7, 0x138c9b82, 0xb4dff084,
- 0xc8033192, 0x6fcf124e, 0xebfc855b, 0xe7f775d6, 0x1ae928fc, 0x5c0136af,
- 0xef07f5c9, 0xeb769cbf, 0x024ff42a, 0xaa73d05d, 0xadd603eb, 0x4d66c7e5,
- 0x7f86947d, 0xfc91ff05, 0x70007d40, 0x16f941ca, 0x08e8c87f, 0x044f93a0,
- 0x7c894e9c, 0xdcd18ef2, 0xe2c8ec9e, 0x3c6977e5, 0xa099f52e, 0x9e9253cb,
- 0x90e002a2, 0xf43f4416, 0xf60221f7, 0x29ba704d, 0xbdf3f25e, 0xbfc0c525,
- 0xfcfe761d, 0x66fed67f, 0x5fcd3795, 0xcff78ed7, 0x2096f7b5, 0xf7c7ba5c,
- 0x768fda2e, 0x6e024dc4, 0x67bdaddf, 0xc08077bf, 0xbbffa273, 0xf41f40ab,
- 0xa46a9e3d, 0x3e94250f, 0x2fa50d5e, 0xfa7de6af, 0xbd3d043b, 0x7950b7ff,
- 0xcfbcd2e0, 0x17107bf0, 0x5bc0beff, 0xe3af06b1, 0xa31d7835, 0xbaca97a9,
- 0x4afe482f, 0xe3be5cb9, 0xe312fbe1, 0xd0a9afd1, 0x3d3f2475, 0x3cf4483f,
- 0x77e926d7, 0x4fd5c115, 0x47ed0fcf, 0x2cdc9dfa, 0xf491b5e9, 0xc39424cf,
- 0x29f5fc23, 0xfea88728, 0xc5c60970, 0x91eafafc, 0x6fb72855, 0x1ffd9068,
- 0x2f37fea5, 0xa6ade393, 0x8c8f12e7, 0xeefa463d, 0xafa97ca6, 0x3fb9271e,
- 0xefbf85a7, 0xfeddff4c, 0x4687e41f, 0x6ad65c0d, 0xff4d5eea, 0x6fd017d5,
- 0x2fef34cb, 0xa95f3cd2, 0xd4d1afa9, 0x2fdf821b, 0x1f10a19b, 0x38f01596,
- 0xb52f96a5, 0xef4ae1f4, 0xf5ba73b6, 0xfb2662b9, 0x26af882e, 0xb5faf3d4,
- 0x1a4b1be9, 0x75f501cc, 0x5f384a9b, 0xf7016cc0, 0x7ae6419f, 0x4307a055,
- 0x20b3720f, 0xd9b907bd, 0xf9efab4f, 0xafe07ff3, 0x0a371429, 0xab764bb2,
- 0x5536f5c1, 0x75bbdbac, 0x810182ff, 0x4be7d5f1, 0x63dd5cf0, 0x5cdbc71c,
- 0x2ed02632, 0xae25cd62, 0xfeca7d80, 0x5efbbee3, 0xeb3cf9c5, 0xd1b25cfe,
- 0x114f718c, 0x12f3b4df, 0xfa8aa5f4, 0x6ff856ba, 0x2f1e61c6, 0x5aa293c6,
- 0x080fe673, 0xee6b27d8, 0xbb6cfb83, 0xb7dfe55b, 0x00fc07ab, 0x8398ca9c,
- 0x33a750f2, 0xb6a5e517, 0xfc2cc4c5, 0xf76f782f, 0xd442eadd, 0xc61707d3,
- 0x2bd35e51, 0xb17cfce9, 0xe277a63b, 0x45b7a84c, 0xb6f246df, 0x54b1feed,
- 0xcb5d52ee, 0x2d8c6cbb, 0x894af75e, 0x1e7c72f9, 0x3bba7043, 0x2592a5fd,
- 0xa6f7ef40, 0x8f7de83b, 0x03b896dd, 0x0c07eded, 0x0e043714, 0xe7e89278,
- 0x3c60d341, 0xe92d976f, 0xf2b1714f, 0xc38d26c7, 0xf7e5a47b, 0xd25e2819,
- 0x9f91b3a9, 0x58c0f3c8, 0xb19a93ca, 0xabde944c, 0xdfe57ca9, 0x7bc7ac7f,
- 0x14eb5da6, 0x55cd4efa, 0x6dff375f, 0x1b2bd410, 0xc67ee9b2, 0x9474e7cb,
- 0x9ae9fe17, 0x53cc3ebe, 0x47591c3e, 0x9fd0a5f9, 0x9e56bbee, 0xc92fec77,
- 0x0daafec7, 0x428e3853, 0x47cfda3b, 0xe14fa857, 0xab32ce3d, 0xbbf30a25,
- 0xc8c77e8f, 0x3ef473af, 0xe6ef7d13, 0xd6e7b353, 0x8f74f135, 0x767d2d09,
- 0x15587154, 0xf4dac380, 0x29c925fd, 0xf8126ed8, 0x032d6fdc, 0x75d32f3e,
- 0x93e90fd3, 0x5f5e588a, 0x92cdeebb, 0xa38fb04d, 0x7ca5f44f, 0xd70c84b1,
- 0xd78074ba, 0x93d70ce1, 0xd9fb8a54, 0xc79bfcaf, 0x593a5f25, 0xb5f4889e,
- 0xcbf5d059, 0x0313cae7, 0xc5416eff, 0xc1d13d29, 0x02c7462b, 0x5469eefa,
- 0x6ce782ba, 0xf43883a0, 0xe710cf9b, 0x85e7a016, 0x57d8c35b, 0x3db9d34b,
- 0x84dbf6d6, 0xba208e51, 0x29e498fe, 0xfea0379d, 0xa447fa62, 0x16988ee7,
- 0x77752e62, 0x1ddb243e, 0x34327049, 0x3322fa45, 0x1aea7fd1, 0x4ff3a2e7,
- 0x96c99f73, 0xfd143e00, 0xfcf2f13f, 0x3fa04fdf, 0xf13efb9e, 0x604b3ffe,
- 0xaff6433c, 0x68bc5ab4, 0x58b31c5c, 0x838f88f4, 0x3147c5fa, 0x9d62ad3f,
- 0xc540f481, 0x5d4584ba, 0x6e807f28, 0xdda6bfd0, 0xc0c32997, 0xc63d18fd,
- 0x192f3f57, 0x5e781693, 0xbbf24139, 0x377cbe27, 0xc6c944fd, 0x65f33d01,
- 0xa7cb8da5, 0xf7f8eb71, 0x58872b75, 0x7bbf74f4, 0xe89babdd, 0xc8bcf01e,
- 0x5157bc5c, 0xf844ceb9, 0xbc58b4ea, 0xbef12bb6, 0x0b1f4581, 0xae74a1e5,
- 0x0e19f54f, 0xca9147c2, 0x54b911f4, 0x190a3dd2, 0xf0b13c3f, 0x1d31c7d1,
- 0xbbe673f3, 0xefc0187e, 0xe9d38b8c, 0xf92a6d99, 0xc4f1b51f, 0x1ab7d7ea,
- 0x888fd90b, 0x4710f627, 0xe29d62ac, 0xe3e1e2bb, 0xe238da89, 0xbdc5d2bf,
- 0xa238eeb3, 0x5187be81, 0x4588e229, 0x7f5a156b, 0xbdfe42e5, 0xee38a292,
- 0x18b23e3f, 0xed2fa0e8, 0xc5cb6bdf, 0x7fae693c, 0x8be2992a, 0x5eaf5f80,
- 0x99fc8f3b, 0x5e1ce975, 0xb03be265, 0x199d2387, 0x58afde78, 0xae704917,
- 0x07f6727c, 0x7de3e44f, 0x81f189c0, 0xa09b6be7, 0xe19f180f, 0xce39683d,
- 0xd0f725b1, 0xd68b6777, 0xf31939c3, 0x73d963b4, 0xde226537, 0xde303252,
- 0x2e55fb29, 0xef0fbbbd, 0xd86ce707, 0x950ad977, 0xbb6b0f7f, 0xf021f489,
- 0xb80b327c, 0xd9633e2f, 0xd72346eb, 0xa8792c67, 0xfe1db3b0, 0xc29f6cfb,
- 0x4104fc73, 0x26b7b8ad, 0x5bef27e1, 0xf7f8e995, 0x0a754b36, 0x8902c3dd,
- 0x7e97f746, 0xa3fdc191, 0xcedc1979, 0xd270cb2d, 0x55ea9e3d, 0x4cd2e726,
- 0xbdf74e3e, 0x55e73875, 0x282a3aeb, 0x1317f9ae, 0x976a57f9, 0xad22ba45,
- 0xf9e31f3c, 0xad779401, 0xaa3c0237, 0xfc6e5c1c, 0xad63d042, 0x94d5d263,
- 0x0767ae9f, 0x5607eff0, 0xac9cb85b, 0x9bb8058e, 0x86e90139, 0x1939f7e2,
- 0xb8e253e5, 0xe6028329, 0x8c3b4457, 0xc7fab0e3, 0x0d63abdd, 0xfec1a7e8,
- 0x38420f97, 0x237f5dc6, 0x9559efae, 0xb4801af8, 0xbfed00aa, 0x3c3cd567,
- 0xd8ad9777, 0xf0e50e3d, 0xf1832ddc, 0x7b2b0546, 0x23d25dee, 0x3bfe0573,
- 0x395a3e45, 0x81bbf1d4, 0xb9706437, 0xdfb951e9, 0x2f0106e8, 0x79f20749,
- 0xf7030af5, 0x47bc83e3, 0xe4bd5301, 0xdab71a43, 0x728891d2, 0x81bb7ab8,
- 0x8cdf87ee, 0x973806eb, 0x751f492f, 0xdcaae800, 0x51df4310, 0x344ef2ad,
- 0x51aabd62, 0xbdffbaa1, 0xd3a40c84, 0x60bd962f, 0x1c39fa45, 0xfa839ad9,
- 0xa45e6ba9, 0xda29ee93, 0x8beedfb8, 0xaf743965, 0xc3ad8669, 0x9965df82,
- 0x8edb20b1, 0xada0fc72, 0x0fcf88d5, 0xb0ea6736, 0x46d9b2ee, 0xbfa5dd61,
- 0x885fea92, 0x41fa177c, 0x9e3852ba, 0x1b769aab, 0xdd2feff1, 0xd4e2e82b,
- 0x41f6efb3, 0x4675503f, 0x2fd41f47, 0x527d0740, 0x28cfce11, 0x9a6b9fa4,
- 0xa4bc1e78, 0x03a41a83, 0x8239bbe0, 0xbdb66a0e, 0x43f21770, 0x373fe20d,
- 0x9e808e94, 0xf9fda9db, 0x7f18e30d, 0x44e91dbf, 0x2fa833ea, 0xce3fa033,
- 0x058bebc8, 0x0c7da1fe, 0xe00ef76f, 0xf9dd75f9, 0xd07c4108, 0x66d77e13,
- 0x4c3e04e8, 0x8ad77724, 0xd8ae76fd, 0x9dfc456e, 0x1ef7767a, 0x3f54b78c,
- 0x11dada0f, 0x3c041f86, 0x56ff716a, 0x19d93f5a, 0xab5fb8b5, 0x74ddff7f,
- 0x96b0f82f, 0xfdc9fdf1, 0xfec5ead6, 0xfef173e5, 0x2acfb13a, 0xadb5e026,
- 0xc13be72f, 0xf4c7c867, 0x3fb121dd, 0xbbf83d8f, 0xfec5bbba, 0x6cc9449a,
- 0x0f8456cd, 0x0fe517e2, 0x28cb8fd0, 0x397409e5, 0xe50365b5, 0xc97c4bf7,
- 0xd7efbff5, 0x93fc2e2b, 0x9d8f1254, 0xb6f77c3d, 0xa1c9f045, 0x763292fb,
- 0xd8befc00, 0x240d9f4c, 0xcc07d57a, 0xce46e927, 0x49a7f457, 0x61bee2d7,
- 0x5f1c56fc, 0x1bd07bce, 0x0e71c7ad, 0x721fce32, 0x8b2f92fd, 0x3b5da7ea,
- 0x4efd8ad8, 0x5ed1b259, 0x3f7c7811, 0xbbef46c3, 0xf7806ed0, 0xf2143b5d,
- 0xe7121f5f, 0xbdf743fd, 0xe01f2d60, 0xc577f7a7, 0xe19d25ba, 0xe4c9fa0f,
- 0xb9daf77a, 0x6ebb583f, 0xbae48729, 0xe1bfee8b, 0xeb976c5a, 0x2051f8c7,
- 0x4a384a9d, 0xfaf7957a, 0x344b74b4, 0xdaaaf527, 0xa6d77d33, 0x9dfca21d,
- 0x78ed7690, 0x2c3a3ec2, 0x9ebbcdf2, 0xe77df956, 0x41edc965, 0x33188ef7,
- 0x259fea07, 0x15b6aef3, 0x49b73c62, 0x521e7ba1, 0x4acfc0af, 0x923e807d,
- 0xdef0c1f6, 0x36a57f3c, 0xf10275de, 0x63bc7559, 0x44927e1c, 0x7d57a5da,
- 0xa0c6aadd, 0xcf1b6aff, 0x53749383, 0xf89db275, 0x93dc5aa1, 0xeef15b2a,
- 0xf74861c4, 0xce281b4f, 0x58df7653, 0xb3fbda23, 0x8a1f4d37, 0x127bc0e0,
- 0x6c4fdf28, 0xe047921c, 0xebc4b661, 0xc6c4bef1, 0xbfc7af47, 0x87633a5f,
- 0xc1a1df4a, 0xcb8f9071, 0x7f23c8ee, 0xcec8e1eb, 0xaed02389, 0x436ab5ff,
- 0xa6e47ebb, 0xffb08b21, 0x58ef4b48, 0xc6d078fa, 0xf4bbaa38, 0xa427a431,
- 0x03f32c1d, 0x90bdc5eb, 0xaf9cdeec, 0x8a97757a, 0xefac0bc8, 0xaef9f183,
- 0xddf4910d, 0xa3a352a8, 0xdbe715b9, 0x8ee74499, 0x2526385a, 0x2b9ee9db,
- 0x8cb369d9, 0x9725222c, 0x453023da, 0xc74375f9, 0x5e4fa81d, 0x1832c3bf,
- 0xd4cb8bbf, 0xb9cb43f3, 0x90e3cd7d, 0x61dfc171, 0xeac8eb92, 0x3b57e9cd,
- 0x2f7f7c9e, 0x74ec2b9e, 0x3fde919f, 0xebe9ecb1, 0xd89e1f51, 0x7dc7639c,
- 0x4919db1f, 0x198e0bf7, 0x78e7bf82, 0xb4c2f7a9, 0x3de4a9f7, 0xc1defcd7,
- 0xfba49cf6, 0xb9cbde0b, 0x98f2d2ec, 0x74662e4e, 0x065cfc4f, 0xe8e4fee3,
- 0x7e62b6ef, 0xc9a34561, 0x8e64f786, 0x7c63fd20, 0xdf4abb6b, 0xdfbba201,
- 0xdb23af80, 0x703ee8f3, 0x5ebcc7c5, 0xf0f8acd1, 0xc3fb72fe, 0xc2fe53f7,
- 0xf137b04c, 0x3db7796a, 0xebe1fabd, 0x38ce0d91, 0x3bf1cb3d, 0xf3366700,
- 0xef908b7c, 0x75bebc3a, 0xffe38a4f, 0xfd2cfdbf, 0x077bd313, 0xf09347df,
- 0x57aae796, 0x720a2e80, 0x72fbf0fd, 0x9fb22cf1, 0x50f62e4f, 0xb4395a79,
- 0xb3d2246a, 0x15ee8625, 0x51e3b4bc, 0x5373bf15, 0x79f06dbd, 0x308f1f3c,
- 0x9ff7d0c7, 0x90bc5cbe, 0x17279a82, 0x8a3f89d7, 0x15f854b6, 0xb55e547c,
- 0xfbde8dad, 0x0b5e4772, 0xd97debde, 0x9ac5c31c, 0x3be820ab, 0x3dbf1299,
- 0xfb9657dd, 0x1cd7fcfa, 0x9fdd72cf, 0xc56e9e6f, 0x21fdb57d, 0xe23865ae,
- 0x86c63a37, 0xc8a56076, 0x86ba392f, 0x23c7d9db, 0x18abfe62, 0xed7c7cf0,
- 0x0efc4494, 0xa9b6ccd1, 0xc57b63d7, 0xe786c54e, 0x49cee703, 0x7dcf3c56,
- 0xe2b4efa6, 0xd6cfaa3d, 0x3e57fbc8, 0x237bbbfa, 0xd829b3c6, 0x9eab447f,
- 0x84293239, 0xfc32f44c, 0xcbee9ea4, 0xe4ed017e, 0x451f393f, 0x89f813fe,
- 0x99927cc3, 0x4bbf722f, 0x9c57f9f7, 0x7d8a46ff, 0xcbb6f7b7, 0x17b506f8,
- 0x63f6f015, 0xd096b76b, 0x68de4fdf, 0xbfbae1b0, 0xf1a068dc, 0x4db9c0e7,
- 0xbe3deb07, 0xb0839f99, 0x13e8e78e, 0x6779f99b, 0xef3f334e, 0xb833cf54,
- 0x60d2fdf8, 0xe80a2cba, 0xeb2ddf47, 0x7323bbe1, 0x737f7c5c, 0x7ee27f40,
- 0xb1bf442f, 0x9fee99ac, 0xa93e6a5d, 0xaaff7e96, 0x74dd1791, 0x08bd13db,
- 0x23ff22b8, 0x98ba4add, 0xf5c786b3, 0x0b83cded, 0x9d24fe91, 0x5803bf68,
- 0x3bfc646f, 0xfa28bab2, 0xb324f7ee, 0x62fa80c3, 0x18e77c4a, 0x075f4f04,
- 0x916aaf97, 0xe4f785ce, 0xec29bd58, 0x15dec477, 0xbc1a78f2, 0x7abbf74b,
- 0xcff7f8db, 0x59317dc4, 0x474abe82, 0xda293e7d, 0xba038dfe, 0xf2ffae74,
- 0xf5d03d3a, 0x3bf691a4, 0xd7809db2, 0x7af35ff5, 0x167b7d9e, 0x7def3fd4,
- 0xec3d3af6, 0xf695d26f, 0x2fa80621, 0x75cdf259, 0xc9b9e063, 0x3bd83ae1,
- 0x9b139e60, 0x7f71d292, 0xdb087ed0, 0x8e7a27a7, 0xee2b5960, 0xba569d8f,
- 0xf4cc2d06, 0xa77b7ff7, 0x0e4be0e5, 0xc0d3bd7f, 0xa73bbbe8, 0x87f5cb7b,
- 0xd70e9d2f, 0xa109dea9, 0xdd8ce5ed, 0x5e7cf947, 0x7e076f7e, 0x788911dc,
- 0x5dd38aaf, 0xd3c7bcb9, 0xe3c7b9a0, 0xe4d5f7e4, 0x7f792afd, 0xfdff72ea,
- 0x7297b5b3, 0x00ffecff, 0x4fb2f369, 0x00008000, 0x00088b1f, 0x00000000,
- 0x7de5ff00, 0x5554740b, 0x55b9e896, 0x2a493eb7, 0x54842549, 0xaa84a925,
- 0xc0902b7c, 0x310c7c25, 0x0403e548, 0x6a285888, 0xa205a0d4, 0x01148280,
- 0x55f4741d, 0x9a7c3061, 0x179f8ee9, 0x102d4622, 0xdd787195, 0x866db1d1,
- 0x3220538f, 0xb40e9afc, 0xd38ceb63, 0x68d1d01d, 0x38311b63, 0x6f360cf4,
- 0x4dce7def, 0x802a56ea, 0x6f7be7be, 0xf65e97ad, 0xee739f61, 0xffb3ecf9,
- 0x5f5bdf67, 0xa5eb2c19, 0x0f24c664, 0xa153ab63, 0x63595a74, 0xff7b9419,
- 0x68d2cfe9, 0xac839ac6, 0xc18f2e3b, 0xd3f5a35f, 0x7ccf5051, 0x2e504bba,
- 0xcd1a484b, 0x139960c6, 0x996fd062, 0x12aeb189, 0x6d3b26e8, 0x12d8c2cc,
- 0x019dfe09, 0x19caf9ff, 0x956c674b, 0x44edfe15, 0xf08742b8, 0x7f466683,
- 0x3f98059f, 0x1fd8c0df, 0x3925744f, 0x9d9d8cf5, 0xbb0c2e1d, 0xbc65fb18,
- 0x009ce6cf, 0x8ecd1ded, 0xbec634a6, 0xd4a4c37c, 0xd09eff43, 0x60f927df,
- 0xe67aa5fc, 0x6d9284ef, 0x24d8ce1f, 0xc3ea2f28, 0x61dfa053, 0x8db6f157,
- 0xa9cbbcf0, 0x3f9e0c63, 0xfce70aeb, 0x82c678f5, 0x32f2932e, 0x32777ea3,
- 0x9f1eeb80, 0x9dfb1c3e, 0xfe5d7d7d, 0x936eb03d, 0xec1258cc, 0x598c067b,
- 0x0030780e, 0x07c32fac, 0xb40e3442, 0xbea09307, 0x0e74f5c5, 0x2f307c23,
- 0x0dd8c89b, 0x06ebef8c, 0xf11fbc39, 0x196494c5, 0x6f9b37f7, 0x3283db0f,
- 0x3333a38c, 0x98bbae03, 0x4d3f5875, 0x8eb19800, 0x7f60c34b, 0x3263c066,
- 0x24c78096, 0xa1eaa6c6, 0xe9afac13, 0x0990fa97, 0x7884d7d6, 0x85d07014,
- 0xd72c7a23, 0xc58bf8c3, 0xbc8ecbbc, 0x58c1c46b, 0xeb07fe10, 0x5de62259,
- 0x3d7771dc, 0x9092cb9e, 0xc0b74ce1, 0x7ffa25f5, 0x72c79d0f, 0xf2feefd1,
- 0xe29c46ad, 0x7eda1dfe, 0xafa6d9cb, 0x52fcf0f5, 0xfdc46dd6, 0x6a8ceb2e,
- 0x0cf9a8ef, 0x977cbbd7, 0x99debeb6, 0xc2748698, 0x97b1b2c6, 0xe74d54f4,
- 0x99f448bd, 0xca746faa, 0x66e19fb8, 0x371304c5, 0xf4479f3d, 0xb00cdec2,
- 0x63ac7ec8, 0x8b4fd118, 0xb1e74f4b, 0x9cc49764, 0x7ebb18e3, 0x42731657,
- 0x61aefd53, 0xc85d2654, 0x5fcffaa0, 0x57de3639, 0xd75e7032, 0xc6ab6abf,
- 0x6aff5df5, 0x3aea9511, 0x4e1d049a, 0x867497d5, 0x2ce71d61, 0x9b800eb0,
- 0x8162c08e, 0xd66e9a7e, 0x99e11887, 0x57bfb199, 0x596bc72c, 0x424fa5df,
- 0xfd8a0e58, 0x3a24974a, 0x1f3c6484, 0x433d61ef, 0x031e627a, 0x79993bb5,
- 0xa05ca5cd, 0x398ebb1b, 0x24dfe063, 0x19ce2fce, 0x81ee9ccf, 0xfc583976,
- 0x87584ab3, 0x0941ae61, 0x5c737b41, 0x33e436d2, 0xe574f416, 0xe20d73c3,
- 0x4e38aeb9, 0x54ee0937, 0x1d2af3cd, 0xadfa2adc, 0x18769a4b, 0xb3c1b2e9,
- 0x5121e888, 0xc7acd4c9, 0x406a5fa4, 0x67d5b9fa, 0x623ba4f8, 0x585fa21c,
- 0x295e0dc7, 0x7378fc84, 0x50ddb683, 0x44ad75f9, 0x8e47d425, 0xfbf6d5e7,
- 0xd003d229, 0x717e103b, 0x9c0c3d24, 0xc3a3c583, 0x1224f073, 0xb9cccbbd,
- 0x5be012b9, 0xee181b0e, 0xf7cf14df, 0x97310e79, 0x0f80fd86, 0x8875e260,
- 0x2e4fa817, 0xc537e2d7, 0xb0e7c5a3, 0x867e2d3a, 0xb7fbb57b, 0xda6ae435,
- 0x35237c33, 0xcb8b59ed, 0xbfb67034, 0xc47fd342, 0xec0d6aea, 0xf4d4ce0a,
- 0xa37f5bcf, 0xbd682e06, 0xa8bfd35d, 0xbda6817d, 0xa69f7438, 0x268ed47d,
- 0xf9da5c0d, 0x98ffa688, 0xda6b8f5d, 0x6a3786c7, 0x7e1dc7da, 0xe84f034a,
- 0x7fe9a2da, 0x34db07cd, 0x5fba93ed, 0xdb5fb4d3, 0x9e0686f3, 0xd35bbbdc,
- 0x0385ca7f, 0x1d8ab81a, 0x31aff4d3, 0x4f0356ff, 0xa6abfeb5, 0xc7fb74ff,
- 0xce19f69a, 0x67da6a3f, 0xd2d1bfb9, 0x93973c6b, 0xa5ff2bd7, 0x9359ee79,
- 0x5f50dfef, 0xdd954b34, 0xebf48641, 0x35ba3e24, 0x0184a74d, 0x972a83fe,
- 0x1c9e1d04, 0xc1392561, 0xb78e59f2, 0x32b0e914, 0x6e7c7c8c, 0xe0f24497,
- 0x28bd28ab, 0x91ebd1ff, 0x4afd9da0, 0x22765e52, 0xc45d41dd, 0x331e29fc,
- 0x39d62393, 0x81aaceac, 0x9aed7b87, 0xa706b6fe, 0xe7c33da6, 0x2d67b4d6,
- 0xb6703456, 0x7fd35cbf, 0x068f6ac4, 0x34eb0576, 0x7bd6f3fd, 0x6b417035,
- 0x517fa683, 0x5ed34fbb, 0x69ac5a1c, 0xafc3b51f, 0x573b4b81, 0xd98ffa6b,
- 0x8fb4d415, 0xb4d7af0d, 0xaadc3b8f, 0xb5742781, 0xf35ffa6b, 0x3ed34841,
- 0xa6877ba9, 0x4e9edafd, 0x77b93c0d, 0x94ffa697, 0x5c0d610b, 0xfa688ec5,
- 0x6a4f98d7, 0x0fd6a9e0, 0xdba7fd35, 0x67da6b4f, 0xb4d73f38, 0x2cd076ab,
- 0x7adad7f7, 0xaf5d1761, 0x7cf359fc, 0x90c3dab0, 0x486f823e, 0xd613b34a,
- 0x3fe102eb, 0x777ce49c, 0x28ed1e9c, 0x8c14182f, 0x41ad2901, 0x420c92fd,
- 0x480ae90c, 0xe2a6358c, 0x20ac4028, 0xc15549b7, 0x4f68c9f3, 0x73aa9000,
- 0x1fa0fcb9, 0xeb5ad813, 0x1bb266a7, 0xdf40971c, 0x29bfc25d, 0xa0944b83,
- 0x85ebaa9f, 0x4ea166f9, 0x726f3a02, 0x582e3cf9, 0xaf9d7dcf, 0x6862cb4f,
- 0x705b0427, 0x9307a01d, 0x9e6f41bb, 0x79776388, 0x378f064b, 0x89780cc3,
- 0x5c48ef98, 0x32cca3ab, 0xcc33fcf4, 0xff7fa967, 0xae3e06b8, 0x8a6bfb04,
- 0xa0a7ff18, 0xfbb065ee, 0x37c0035a, 0x153d8c05, 0x4cfc12b0, 0x5b704ec0,
- 0x7b6549c0, 0x96e54dc0, 0x1ded4280, 0x5f827281, 0x0e087808, 0xdca8ea05,
- 0xfd52f016, 0xc10340f6, 0x547c04af, 0xa62c08ee, 0x9f80b5f2, 0x560677da,
- 0x40f3fc13, 0xc0ceca90, 0x237faa7a, 0x9bf04ad0, 0xdf827681, 0xdca8840a,
- 0xe541d815, 0xb52740ee, 0x22ec0def, 0x9840edf8, 0x30e070e0, 0x5d0207c1,
- 0x7c0c1f04, 0x40a1f040, 0x03879537, 0x0d1e543d, 0xf1fb52f4, 0x078205c0,
- 0x576b4bce, 0x2e576133, 0xd2c07412, 0x2f793db8, 0x85f6523f, 0xbb462d8e,
- 0x43a09177, 0x87983543, 0xcc867740, 0x2ecd6dc2, 0x13a06a3c, 0x71c9d137,
- 0x96af2fb4, 0x4070d04e, 0x33560d7a, 0x5a9bd237, 0xc9e954b6, 0x90ae0cfe,
- 0xbb732f42, 0x336feb88, 0xb537f553, 0x85283e37, 0x079b9ed0, 0xf811b276,
- 0x9f6123e6, 0xe5c7147c, 0xc1bf39f5, 0xa35f768d, 0x66c0af14, 0xb277fe01,
- 0x8c2fe03b, 0x5ed77fa4, 0x9fb456d3, 0xc235f5d4, 0xe7183a38, 0x5a2eecbf,
- 0x6c3b2357, 0xbd40f5c0, 0x72fc0f47, 0xe2dbe6c6, 0x2fbeba02, 0x2234175b,
- 0x85425913, 0x8cc644de, 0x48f3df76, 0xf7fce7e7, 0x17c1c21c, 0x9c429559,
- 0x78537ae7, 0x7f30adf8, 0xb2bd11ef, 0x5e3439cd, 0x00ceb796, 0xf6997de1,
- 0xa0773fb7, 0x17f75f1d, 0xe1cf0fbd, 0x21b12184, 0xe9dd7404, 0x3acf8892,
- 0xd94d3a5d, 0x02fdf766, 0xa26df9d7, 0xa01d4eff, 0x56ebdbf2, 0xb612b2bc,
- 0x20a2b8d4, 0xf055ed19, 0x5f680c0f, 0x034e61cf, 0x989b87ca, 0xea1c5de7,
- 0xf823e666, 0x52a41656, 0xe176f9b6, 0x21636ebe, 0xa6157d82, 0x8afb589c,
- 0x383bd75e, 0x752c70d8, 0xdd90f29a, 0xfbc70077, 0xeb43d136, 0x7a69313a,
- 0x5d4bee21, 0x49c33997, 0xdeb366fd, 0xdf7d7017, 0x53fafbee, 0x9d306f29,
- 0x61f488fc, 0x3fb06981, 0x3779c233, 0x38e0ef03, 0x214b0fcd, 0x3e9573a4,
- 0xa74cc11a, 0x7a851ff1, 0x805e9229, 0x4b7fd04e, 0x9399cdeb, 0x4dd2f448,
- 0x3e926ff2, 0xfa7b0468, 0x28542e84, 0x8c4e89e9, 0x5173ac12, 0xee49d01a,
- 0xc0f0f4d0, 0xe423287a, 0x17d9d020, 0xfe9fde38, 0xe2371ae1, 0x93dbf96d,
- 0x66b7889c, 0xd0079c1d, 0x9ea8f073, 0x442604ec, 0xd2dafebb, 0xb207f910,
- 0xc529cca2, 0x7172e373, 0x7ead9ebf, 0x4ddc863d, 0x74f4e5c8, 0x0ba86ec2,
- 0xef9cb8d1, 0x2e7d76d5, 0x2e7d473f, 0x15a6df3f, 0xc3d52d9f, 0xf81c4ffa,
- 0x8d2ce8dc, 0x3fd58fb1, 0x823f2879, 0x3aef97ae, 0x7e83cd3d, 0xb92eeb17,
- 0xbea07131, 0xd2abcc50, 0xe898de91, 0x5c896adb, 0xc75d3f57, 0xa75d22e7,
- 0x11e75d00, 0x768a7f5d, 0x33936cf9, 0xbb2856f9, 0x28613501, 0xc95a2f7d,
- 0x14c05c7f, 0x2f32172a, 0x0c808b95, 0xc1bd8e90, 0xf7888d27, 0xd52758fb,
- 0x777e503f, 0xbdf5d23a, 0xd9f91f3a, 0xd517594b, 0xb5bd672f, 0xaf37b478,
- 0x1daef35f, 0xeb537d56, 0x44915393, 0x7fcd0c6f, 0xdb39cb17, 0x4e834fa5,
- 0x5cec93e2, 0xe4b747c0, 0xe613227f, 0xd5677dbf, 0x3f505913, 0x85cfcf5b,
- 0xa2e7e31d, 0x7de891ca, 0xe0145f03, 0x78a6dff3, 0xa4fa5f68, 0x345f0d3a,
- 0x123cce3e, 0xcfbbd38c, 0x8ae6da14, 0xfbf293e0, 0xf28590ff, 0x13bdee4c,
- 0xdbf97bcf, 0xef3c54a6, 0xfbd718fb, 0xaf8d4792, 0x119efaa8, 0xe2cfdfbd,
- 0x85f7ec15, 0xb22fefa0, 0x17f7d119, 0x658a3812, 0xcb287603, 0xf2cbd9f1,
- 0x72f6f406, 0x88d1cedd, 0x95ebd027, 0x7df70e78, 0xd632d9dc, 0x9420fa85,
- 0xe1887683, 0xecd29335, 0x7617e8d2, 0x8d850129, 0x7f3e219f, 0x198ee38a,
- 0xfca92ebc, 0xeed19fd0, 0xd0591930, 0xce7c465c, 0x677f2226, 0x9abf891a,
- 0x3879b511, 0xb7e9d78f, 0x84ff1e0c, 0xefe1ef58, 0x841d3fb8, 0xc52cbcfd,
- 0x2feb879a, 0x3cc0fc53, 0x19d24724, 0x2e4773cd, 0x3e7563e7, 0xa076e966,
- 0xc1347e9d, 0x8f9e3b77, 0xfd52471a, 0xcfaec3c8, 0x667cf1f2, 0xe5506c57,
- 0xaec78d33, 0x84e972e3, 0xf84419c1, 0x014b9544, 0x6f2fd609, 0xa7ff286f,
- 0x371d700f, 0x8c612d98, 0xc34ab077, 0x35ff7ed9, 0xdf1f4077, 0xcae3eaa0,
- 0x1a8ef8a8, 0x018a61f7, 0x3ecc57d7, 0xf141f152, 0x0e4e551d, 0xb1b0bfd2,
- 0x889fc42e, 0x79c7f597, 0xce3fb9fe, 0x8d79c3a3, 0xe6328e42, 0xf8237b77,
- 0xae00d6d4, 0x307fc68b, 0xff8d7cb3, 0xd9c0d560, 0xffa6bb7e, 0x4d4ed588,
- 0xd6e82bbb, 0x57ade7b4, 0x6b417034, 0x517fa6b9, 0x170347bb, 0xfd34ea87,
- 0x6af0ed47, 0x06ced2e0, 0xbb31ff4d, 0xb1f69a7c, 0xf69ac5e1, 0x1afd8771,
- 0x6ad74278, 0x3e6bff4d, 0x27da6a08, 0xb4d7af75, 0xaad3db5f, 0xb6f72781,
- 0xb94ffa6b, 0x57b4d210, 0xb4d5bfb1, 0xd75f98d7, 0xf1c62fc0, 0xeb54e778,
- 0x9b9e683f, 0x81afdf6e, 0xf1e7885f, 0x23ce199a, 0xf9c5e7da, 0x278b6f06,
- 0x3ee5987e, 0xffdf69a9, 0x8dcdc8c8, 0x47a12fc0, 0xb67fe474, 0xc53d71d5,
- 0x1d39aa1a, 0xd7bef1c4, 0xe66a1f2b, 0xbaf7d1cb, 0x7a9d8e90, 0x75f2266b,
- 0x039cf97e, 0xd0ac7640, 0xc51aa2b3, 0xc9eebb9d, 0xf48e5803, 0x6c8d5ebe,
- 0xf270d253, 0x7da39600, 0x1903575f, 0xca716533, 0xf996583b, 0x70d8d6c7,
- 0xbce7ab89, 0xe0c4e583, 0xb3941a8a, 0xb17e2660, 0x3e7dd608, 0x846d9238,
- 0xaa652575, 0xd10f5e71, 0x1d0caaed, 0xdbaaf9f1, 0x4b37142c, 0xa322d6bf,
- 0xe81cdf3e, 0xec10791a, 0xd3a7f4dc, 0x69dc62d7, 0xa1f9d83d, 0xc017b022,
- 0x59f2c7ee, 0xe043ebb0, 0xbb046d1c, 0x604dcb1f, 0x73fd63f7, 0x9c23f760,
- 0x7ad974fe, 0xc9d56e7c, 0x6172ea7b, 0xca0f11fc, 0xd7533456, 0x0e2b7539,
- 0x0fec7ac0, 0x4b28ad1a, 0xe267ae8c, 0xec8eb09c, 0x0a8eb065, 0x51078b42,
- 0x3052ecfd, 0x1c39321c, 0x2becfd07, 0x1db81a74, 0x2eb045ff, 0x97cf8b58,
- 0xaedee93e, 0xb6f9f630, 0x8a1937b3, 0x5d799f61, 0x014d6db7, 0xe6799738,
- 0xf1d3be02, 0x93fc602c, 0x09cd8f79, 0x97860c96, 0x48ff7a14, 0x2cee4972,
- 0x16b53e46, 0xeb81947d, 0x85fd50da, 0x3ce2b3a2, 0x0839cc8f, 0x787e7eeb,
- 0x23e758bc, 0xaff2aa73, 0x79cf5c66, 0xef50ef94, 0x47218cef, 0xffe853fe,
- 0x2f5c7993, 0xe5159fc3, 0x08938d93, 0x949d81b3, 0xf376f20c, 0xd1a7d5a4,
- 0x41ad90a7, 0xbb23e509, 0x144fcf08, 0x7823c828, 0x1e44fc4a, 0x537947b2,
- 0xe733e454, 0x1f3678c1, 0xf1914ed8, 0x785ac5b1, 0x19e79c33, 0x0c7ac0c3,
- 0x3282e977, 0x92f0c1ec, 0x9e605157, 0x5fb0798e, 0xb51b3780, 0x8e9165fe,
- 0x9f717d91, 0x378beeba, 0xdda76829, 0xf0f5c797, 0xecbb8096, 0x7ffa0313,
- 0x4027d94a, 0xfd8b9947, 0xabe40f92, 0x04abd1ad, 0xd2e19fa1, 0x2237efdf,
- 0x3ac5df61, 0xfd639f51, 0x27bb089a, 0xb86b8c47, 0x19bc27a4, 0x6a4abd9a,
- 0xbd06fcc4, 0x618cefe4, 0xc9e69577, 0x4666b9fc, 0x1e7786ed, 0x5f309c96,
- 0xba486366, 0x4bf1c687, 0xc2dbd40a, 0xb1ab3ebc, 0xf0085a53, 0x3a4e791d,
- 0xac9c7507, 0x08feb0c5, 0xa0e5d93c, 0xede78564, 0xc425068f, 0x55eadd83,
- 0xdae22258, 0x52bdfc4b, 0xf57db4f0, 0x864fb2c4, 0x8abdbae1, 0xa59be717,
- 0xcadf9bf2, 0x2b1f3d70, 0xbd42a9bf, 0xc33d1bf6, 0x9c6f8cfb, 0x8c5a724a,
- 0xa9a5a87d, 0x973ccf5b, 0x729267f1, 0x9e93f11e, 0x2ce2ddbc, 0xf17dfe00,
- 0xd6c34b36, 0x47767418, 0x75b3a71e, 0xe2deaa99, 0x73d749eb, 0xeafaa93c,
- 0x1fa72b1b, 0x4cad6e55, 0xce3ab0e1, 0x24f1ecca, 0x8f5a8637, 0xf98e3dad,
- 0x19bbd622, 0xfde2b1e6, 0xac2b77cc, 0x21f03788, 0xcc9f34bc, 0xe7d6e9e5,
- 0x4637e4dd, 0x3e20cfd3, 0x5c9bd100, 0x1dd97486, 0x31ba066a, 0x682a33cd,
- 0x5bfeda2f, 0x2472848b, 0x284ab593, 0xb5d96d47, 0xe7d46587, 0x1bd8afb2,
- 0x12be8ec3, 0xa9e7a83f, 0xcf8325da, 0x2abbf9cf, 0x0c0d43b2, 0xe5c9b6e9,
- 0xa9f9e1b4, 0x6474da74, 0xe22faa78, 0x3a0bd73c, 0x7aad672e, 0xd60ad3e4,
- 0xeb256549, 0xd63af2a2, 0x5987a54b, 0x99ab2c65, 0xf32d6542, 0xf98d3952,
- 0xad63aca9, 0x9d64ce54, 0x2eb3d654, 0xa3bfc12a, 0xe4accbd2, 0x7608de87,
- 0x9973960c, 0x985bca97, 0x482dca9f, 0xfef013df, 0xdbe095a1, 0xbb952759,
- 0x459dff80, 0x53311f18, 0x39e417b9, 0x3c836f96, 0xf20c32c7, 0xf8c269dc,
- 0x83ca9b88, 0x43ca8501, 0x0f2a7281, 0xefd43c07, 0xca8ea068, 0x952f01e3,
- 0x540d0227, 0xd47c0576, 0x316054ef, 0xfc05ef95, 0x581fbe54, 0x8107e54d,
- 0x12ff9520, 0x91654f58, 0xe20ae411, 0x163ddddb, 0x8f9c9afd, 0xe8271e8d,
- 0x84f34fa3, 0xfdd1ebff, 0x78f1e2f7, 0x83ba3a8e, 0x94aed5b1, 0x6e6d1e90,
- 0xe504279d, 0x74937746, 0x4eeb9437, 0x64d63b70, 0xd93b244b, 0x724abed6,
- 0xa4bbb8c2, 0x9c027d6e, 0x0fbcdeed, 0x4f4198e7, 0x6b65ed54, 0x7f841413,
- 0xe819a4ff, 0xdfc1ab70, 0x7fda15bb, 0xcebfd2f5, 0xaece8331, 0xc7f53c48,
- 0xed4c38d7, 0x66b20e93, 0xf207c02b, 0x26993f4e, 0x10a075f1, 0xc7d2fe3e,
- 0xe3e4d673, 0x791cb93a, 0x368e05d5, 0x5b0f19fa, 0xdb18cfc8, 0x0eac667e,
- 0x2dab49e0, 0x471fe865, 0xf5b8a3f4, 0xdba2fd50, 0x563ae7d5, 0x1e2337a4,
- 0xee0a7dd6, 0x65e51389, 0xcf8c58f7, 0xbbab8e45, 0xfe99f9ba, 0xf647f332,
- 0x5e332e93, 0x54e79151, 0x7403ffde, 0x86df4d1e, 0xaebf1f4b, 0x3598f8a3,
- 0xf6faf515, 0x7a772e14, 0xdb43e3b3, 0x53dbdd60, 0x9f21bc05, 0x610eb5f6,
- 0x8dbad7d8, 0x6fc764b9, 0x1d36f38f, 0xd1bff6c1, 0x69a0db29, 0x636fbc7b,
- 0x14f9c131, 0xea9759af, 0xca5ea377, 0xb1fae049, 0x9c48f1cd, 0xffff8233,
- 0xe6eee422, 0xadaebeca, 0xd7ad4fe8, 0x6e7802b1, 0x1b05fadb, 0x4bce30dc,
- 0x022ec09b, 0xc2f27938, 0x9ed0050d, 0x513741bb, 0xae1e642f, 0x0b4da5e3,
- 0xe128d95f, 0x0b5323b8, 0x213cdc61, 0x7586733f, 0x74095d8f, 0xc6d466de,
- 0x63f5fa15, 0x8597e73e, 0xd019cc30, 0x5e2239bf, 0x1ac0d5b1, 0x4166f522,
- 0x35b7973e, 0x4c34bc45, 0xd33cf1e1, 0xe3d0f888, 0x11aaffb0, 0xbe93fe3c,
- 0x9ff14cd6, 0x9e7fc774, 0xe9ed0e9c, 0x4d053a1d, 0x74b6d95a, 0x9efd018d,
- 0xf22758db, 0x3f8956e3, 0x76e6cc1f, 0xb92c1832, 0x1efc2273, 0xf2ca37b0,
- 0xf6a7e223, 0x7184a086, 0x4b11aa8c, 0xa7378c25, 0x3093ee3b, 0xe0de91fe,
- 0xe8897c93, 0xfda9f927, 0x2b9f22ae, 0x6f67d61f, 0xf735e784, 0x3c0cbeb1,
- 0x81b79857, 0x566409f2, 0xecdf3916, 0x27878e22, 0xb43d2064, 0x691ad7df,
- 0x9af8d15d, 0x991eaee5, 0x1ce07c3f, 0xccbfdba3, 0xfdb8e7e7, 0xf9029871,
- 0x1f8f1260, 0x9339789c, 0x6dd9e5ec, 0x23f41ef1, 0xb2393c4f, 0x2aa7e096,
- 0x9eff683c, 0x1a62b978, 0x9a9fb396, 0x484efd49, 0x05c630ec, 0xbb753f66,
- 0x7939f58c, 0xef1c2eb0, 0x71e891b4, 0x89f753f6, 0xc5a91fe7, 0xf05d72f6,
- 0x2e9fd0ef, 0x012d7e6f, 0x891505e3, 0x7961646f, 0x6abdd742, 0x837fcc3f,
- 0xd51afd95, 0xda4377b5, 0x3fc1d4df, 0x6b993ce6, 0x378c78c0, 0xf3ce973f,
- 0xf031e626, 0xf92f077c, 0xba3b95b7, 0x8e8b01de, 0x5bd9d75d, 0x9ebad783,
- 0x5883d65d, 0x34158756, 0x61ed59a3, 0xa72c41ef, 0xd16bf975, 0xa0fba9bc,
- 0xdd9620f5, 0xc45e7d23, 0xedffabff, 0x055e3796, 0xab63c478, 0xe68768ac,
- 0x633dde3f, 0x0d2fc51b, 0x60a1f90c, 0x8f55bf92, 0xa6f66c04, 0xf6787ca2,
- 0xfa875919, 0x8d6ae4dd, 0x4f74d843, 0x4ce30eae, 0x175a53db, 0xe20bbed3,
- 0x28f1e219, 0x8ba421e8, 0x2a6f1482, 0x25f013f2, 0x829dc39d, 0x7e294d3e,
- 0x8bd60a6b, 0x7efe8e7b, 0xd85e1cf1, 0x4e35afdf, 0xa55279a2, 0x08cdc4a0,
- 0x42ec23b4, 0x7f33b79f, 0x96c3e206, 0xf18adfe8, 0xce2a82ad, 0xd3912cff,
- 0x996f7605, 0x10508f48, 0x3fd5bd3e, 0xe6c1cf0b, 0xe01bcfeb, 0xb06a0774,
- 0x25c93edf, 0x367fee98, 0x39061c4b, 0x3fc2eb43, 0x5ba4e78c, 0x51cec9d2,
- 0x806b6fd7, 0x0eec22f1, 0x968dc7dc, 0xba54e781, 0xf5f620db, 0xe60b2884,
- 0xf5d4f1f3, 0x7ae7cc6e, 0xd4edd61d, 0x62f47ae1, 0x5d0fb03d, 0xfa227ac4,
- 0xd7fafed1, 0xd23b14cd, 0x897e6305, 0x4729cc97, 0xcea17fee, 0xd71a3713,
- 0x615fd6ff, 0xe509295c, 0x67f3ac08, 0x919927fb, 0xf1f7a34f, 0xfb88be42,
- 0xfe8cbd80, 0xe71f4cc3, 0xbf46e6df, 0x0d94fe84, 0x43bfdc61, 0xd45c60f6,
- 0x0c926966, 0x39b5fb11, 0xd99e9eb8, 0x728fdfe8, 0x82796665, 0x8b3df482,
- 0x72b5ae75, 0x0f983efe, 0xfd622beb, 0x7136167f, 0xfe7c4ec0, 0xe24ef799,
- 0x67583a91, 0xcfea3155, 0xbca5eec7, 0xcc8fcd89, 0xcfeb8c3c, 0x3fb93f74,
- 0x7ef40615, 0x3e7dce2d, 0x77656f8f, 0x87bc9ac8, 0xc9acbf2c, 0xac58c88b,
- 0xd07a076d, 0xd95d2ac0, 0xa829d5a5, 0x2509746f, 0x9da06733, 0xf923a492,
- 0x9a4a720e, 0x8d1ed31b, 0x7f812ba2, 0xcdde7cf5, 0xb373a7cf, 0xb039f859,
- 0x737cc55e, 0x7aacd8a2, 0x336f304e, 0xd03279b0, 0x3ead737d, 0x8f1fa124,
- 0xb232f1fd, 0x26f70d97, 0xada73e0e, 0x19cce706, 0xd41fa728, 0x2aaf2959,
- 0x67a0af0a, 0x94fc99fe, 0x1da8d307, 0xc53365ea, 0xabd6233d, 0xcf6b058f,
- 0x3bfd4ae3, 0x0cce7858, 0x4cfae0d7, 0xa899dcc7, 0x32973367, 0xf6e8ff3a,
- 0xe7b547a2, 0x78ef7e4a, 0x50b1a38c, 0xe79c07f8, 0x5af6c74c, 0xcfa3efb4,
- 0x9cbba555, 0xc2f5b161, 0x2f5c0523, 0xa72824e4, 0x90aec8ea, 0x66f04ec9,
- 0xfd23c3cb, 0xb9f2efa4, 0x57e5301f, 0xec43d216, 0x1f087363, 0x3f5dc255,
- 0xa8738629, 0xfba1d3ee, 0x7e3ddec2, 0x5099e9e0, 0x7bf5fd4e, 0xdfa0c74d,
- 0xe32cefd4, 0xeb377eb0, 0xa081819e, 0xf90361e0, 0x7cbe6b8d, 0x440315a5,
- 0x414c8dcd, 0xe3ceabbd, 0x6efae028, 0x25e4218f, 0xb32c5af9, 0xd907246d,
- 0xf4114bc5, 0xe89e9002, 0x6f31b787, 0x91f92b63, 0x8612fcb6, 0xaccd3c79,
- 0x97d20ef3, 0xe30c8d67, 0x383637af, 0xffb0550d, 0x869d1b1b, 0x219f78f9,
- 0x23b43ab3, 0xa8506a6e, 0x8546a6fe, 0xfd799bd7, 0xd50f06dc, 0xf0f46dcf,
- 0x5bd40b7a, 0xd4290f30, 0x6f718015, 0x753aa4b6, 0xe8d01f78, 0x4c17ede5,
- 0xfc1c7ed4, 0x8c27cc01, 0xbc910b8f, 0xf1d1ed6f, 0x7c7f237b, 0x9ab58248,
- 0x3b0ecb12, 0xe8f77f11, 0xcc175014, 0x2b332d94, 0x7836cfc0, 0x7f687a43,
- 0x1032858e, 0x3617bb91, 0x7ea10b79, 0xe2b643b7, 0x7b8376fe, 0xc147f98b,
- 0x9de132a5, 0xbdde00fe, 0x3af741d8, 0x3efc455b, 0xf14cc4b8, 0x2875b066,
- 0x6496ccde, 0xb7863b0a, 0x86b02c77, 0x39031ed0, 0x7bd8e748, 0x00764c8f,
- 0x6ea3c3c8, 0xe6768bb4, 0xca9f182b, 0xb7e5aa5c, 0xaa0b85f4, 0x6646bc68,
- 0xbebd3f45, 0xbcd78f91, 0xa1d21753, 0x71b9655d, 0xf341d226, 0x3fa0a60e,
- 0x53c2ecfb, 0xee10c3b5, 0x3f2e1450, 0x79a172b4, 0x9c4753e1, 0xcebf1fb8,
- 0x79fd405a, 0x5fd9af1c, 0x0d2f5dbd, 0xb79d33f0, 0x0c7e4073, 0x61ca3796,
- 0x5e63271c, 0xc1f182b5, 0xc627d874, 0xbc8c3bf3, 0x4ca7783e, 0x0be92df7,
- 0xf475b0e5, 0x0b677f00, 0x1f23a614, 0xd17c9126, 0x9dc8d7f0, 0xc179f85a,
- 0x32efc0a7, 0x9db82bee, 0xd1673e51, 0x903f6d76, 0x57943a93, 0xfadaedd9,
- 0xe3fa8b5b, 0x443c1a02, 0xe7e6d9e3, 0x9f7a2f72, 0xc70afb03, 0x74bed115,
- 0xd7285f7c, 0x72d8bea0, 0x1b8bea80, 0x44e9c565, 0x0a79d779, 0x53b45ff7,
- 0xeb806166, 0x44cd9617, 0xa4dfaa7a, 0xf5c8f0a3, 0xc2e48bac, 0xc6adf695,
- 0x19f7a23b, 0xf2efbfbd, 0xa88e5cf5, 0x6613e0f6, 0x1edb07b2, 0x9ef0e8b6,
- 0x78adec15, 0x55dc2b8c, 0x402c1d54, 0x1dc8407b, 0x973d25c3, 0xa216cd27,
- 0x51ed8353, 0xa1fc6477, 0x13f55465, 0xd86fd405, 0x6e3c758d, 0x968efb41,
- 0xe3173f63, 0xe6d6ea4d, 0x13db5de3, 0xa63f951f, 0x729bc7cd, 0x7f2a3321,
- 0x7f2a2f2c, 0x7da6946c, 0x545c75aa, 0x51b5d8fe, 0x1a3563f9, 0x967b9678,
- 0xcaf5ffa6, 0x86f81a4d, 0xfd343bf2, 0xd6ee78e7, 0x7754dfb4, 0x66fda6bf,
- 0x7c0d4aef, 0x6b5fc36b, 0x6be6dffa, 0xb1dfb4d6, 0xf69a27f8, 0x2d49ff68,
- 0xf9701577, 0x3feed9be, 0xd06c0149, 0x3df791f7, 0x28de996a, 0x7b9e46f7,
- 0xa7b70252, 0xb9fdfba2, 0x8bfbd380, 0x058f9f12, 0x1c789aed, 0xf135d8f3,
- 0x854570ba, 0x71c61bb9, 0x21df951a, 0x0ee262dd, 0x6bb6bfbf, 0x3ff6d9ee,
- 0xbc60fd0e, 0x1b5cc4a6, 0xff8c3bea, 0x37b940fa, 0x5f747487, 0x4abf5b61,
- 0x69b83d22, 0x5cab8f15, 0xaf3c5230, 0x8eac1d7f, 0xdc46fbc6, 0xeceba96b,
- 0x2da47110, 0x85424718, 0xfd4199e4, 0x7ef1c656, 0xe3574d3f, 0x371a3e8e,
- 0xa418dca3, 0x7e85bcfc, 0xcdc0e309, 0x7bc5663e, 0x02cb7493, 0x069bff9a,
- 0x265d9925, 0xb016da2e, 0x67cc0f33, 0x635c155f, 0x527dbd84, 0xb3d4f7d0,
- 0xcfddbcf0, 0x5bd31e28, 0x1853edf0, 0xe143e98f, 0x219d7408, 0xf73f3c73,
- 0x5fcfeec0, 0xe6daf981, 0x5ca197a5, 0x5acf6e8d, 0x43ebf7be, 0x6e7f6997,
- 0xde1f5bd9, 0x1fe7b97b, 0x25ba75d8, 0x38c87da2, 0xf3264369, 0xe001d044,
- 0x80fde62c, 0xf9f5c85c, 0xc5230af2, 0xfc01aeb1, 0xbfbe0215, 0x89cff8aa,
- 0x3fb9e03b, 0x44eefb39, 0x65ea1d3b, 0x847e87ac, 0x9fe53fd7, 0x9c92fca3,
- 0xa3df6051, 0x9b171dfd, 0x7215c76e, 0x8d6d95ba, 0x6a0718bd, 0xd13a344a,
- 0xbb32cfcf, 0x07193eca, 0x7ee06d74, 0xfeffb475, 0xddbfbf5c, 0x9177fe49,
- 0x944b2718, 0x9e7f2b7a, 0x4e4c8399, 0x3114f717, 0x6a4fa1f0, 0x655e5097,
- 0x8d3f942e, 0x89fd8be4, 0xa4238a46, 0xc1dc445b, 0x171358b2, 0xfb06e350,
- 0x9095ef13, 0x4bc3d3ee, 0xf18f9e28, 0x5c6f1c02, 0x11d693a1, 0x32c82f1d,
- 0x86bad3e7, 0xd7f59f1e, 0xe6314827, 0x1a875a89, 0x279e889f, 0xea04f684,
- 0x64f16599, 0x7bef444f, 0x79556149, 0xd7e470e1, 0x51d393de, 0x90329f7e,
- 0x5e8a1c3e, 0x143ccf30, 0x42b4fe31, 0xfe63dfee, 0xe63d6478, 0xe3228787,
- 0xef41ccd9, 0x2a0c102b, 0x7bb18466, 0x77ce1143, 0xc277cf94, 0x1ec9d8a0,
- 0xfe87b504, 0xd7217d5f, 0x9730d303, 0x19f2386d, 0x81dbcbf7, 0x12dcb1af,
- 0xd67d92b0, 0xf087fc01, 0x9527010b, 0xca9b80a1, 0xda85016d, 0x09ca07b7,
- 0x21e0257e, 0x8ea04778, 0x5e02d7ca, 0x6819dfaa, 0x01e7f820, 0x819d951f,
- 0x46ff54c5, 0x9bf04fc0, 0xbf04d581, 0x0160f685, 0x53d7a9c6, 0x4ad03bb9,
- 0xed037bed, 0x081dbf04, 0xec0e1951, 0x8103faa0, 0x060f824e, 0x287c1176,
- 0x70f82610, 0x479530e0, 0x7f545d03, 0xc101f03c, 0xc93ee337, 0x15df58ae,
- 0x4f8578c0, 0xf77da276, 0x15a16fac, 0x7cfb6bed, 0x79c5159d, 0x04b74fbb,
- 0x8d1d1ce3, 0xef131efc, 0x040e3123, 0xed086876, 0x67fcfb39, 0xf6a54c8e,
- 0x19bed4b9, 0xc4a70487, 0xafdb82b6, 0xfa91996c, 0x0a2130d9, 0xda58eb9e,
- 0xdc03d218, 0xed4f185a, 0xf93215b6, 0xfce9cf6d, 0xeedc8529, 0x336df922,
- 0xa541fb70, 0x5c853583, 0xe429aede, 0x161f5cf7, 0xca4a73f1, 0x9e47ad8b,
- 0xa6ed542d, 0x7f7187f5, 0x3fb079f6, 0x55bcef5c, 0xb597bc66, 0x72b3a3dd,
- 0xcbdf937b, 0xb8bf603a, 0xc50f587e, 0x6e7cd985, 0xade6db2f, 0xe63975a5,
- 0xd6ed48df, 0xd05347a2, 0xaff76f4f, 0x6deb439a, 0xd9ae2994, 0x82903bee,
- 0x89cf6cf7, 0xf0db277c, 0x64a86f78, 0x8d9e7af0, 0xe3077f5a, 0xec03837e,
- 0xec63d7a5, 0x496ed303, 0xc71130cf, 0x331cd47b, 0xe829d78e, 0xfe7121d7,
- 0xc4ca0f63, 0xb0302f7f, 0x2db9e0fb, 0x5105df60, 0xb6bf540e, 0xbd55e302,
- 0x289dbe85, 0x8e779f37, 0x9c03203f, 0xcfc7c75a, 0x82bf255d, 0xf79c3476,
- 0xbf502de7, 0x5e4dbee1, 0x4db0ea5c, 0x6ec835ce, 0xcadff3ed, 0x4397f8c0,
- 0x5a8f9ed2, 0xf9f62dbe, 0xb78f8782, 0xf14c9565, 0x378adb64, 0xbcf9075d,
- 0x33ff91bb, 0xc38a3e8b, 0x5cde4c30, 0x86450ea7, 0x35fc9e63, 0xff7717ce,
- 0xcd423aa6, 0xdf3a8f26, 0x6aef9c58, 0xb1e234f1, 0xc62f9b5a, 0x279286ba,
- 0xbf239257, 0x81ef1429, 0x942cb164, 0xb819cf23, 0xe49e0adc, 0x052524f6,
- 0x8f3c1a4d, 0xe6a02f9a, 0x0be7440f, 0x29e05efc, 0xde62c4cb, 0x4fd67ee7,
- 0xd73a41bf, 0xbd1b3c17, 0x92e36794, 0x35874931, 0x7a12d819, 0xde2aa7ef,
- 0x3d0626ba, 0x2853e5a8, 0x1fc100c7, 0x2e713216, 0x38f8ea9b, 0x64ce3e3a,
- 0x2ae082b6, 0x4318358b, 0x33e5ab5e, 0x2b7f9c62, 0x759b74e4, 0x870dcedf,
- 0x1a8f7cf0, 0x8b6a3728, 0xfd01b42e, 0x3d09e084, 0x7d4cd7c9, 0x2257d6e7,
- 0x4117f5d6, 0xa18c5b9f, 0x6e87bc62, 0x504af187, 0x94bf22ae, 0xd582fc50,
- 0x7d5de047, 0x1d843fee, 0x75c5fcf0, 0x3cff7429, 0xda1c6d6f, 0x4ccced47,
- 0xf83360f4, 0xd046c653, 0x32e90d83, 0x8b387d06, 0xfc0e348c, 0xe226aa59,
- 0xcbd70cbd, 0x60972835, 0x2616fbdd, 0x99ac3e91, 0x31acdeff, 0xc9f65700,
- 0x65b2cf94, 0x9cd3cf29, 0xca3aeff4, 0x31fb7913, 0xf75df7ee, 0x9fbd2d50,
- 0x67ca2194, 0xb283cfd1, 0xc2533d72, 0x0b952e70, 0x99bf142d, 0x133e417c,
- 0x4851a7e9, 0x17fa3c7a, 0x6d735e3c, 0x0f79acaf, 0xb68f526d, 0x8f54603f,
- 0xb7464676, 0xef3edeb6, 0x6153d3cd, 0xfca5f69e, 0xbf8bf6e1, 0x7070823b,
- 0x24e28e95, 0x5064fbea, 0x4df14193, 0xf9819839, 0x8ccc1b9b, 0xbe6db9e4,
- 0x60f28ad9, 0xc93c7a7f, 0x2cdbf6bf, 0xcbae8764, 0x3b438a6f, 0x556f34d9,
- 0x8c23960a, 0x3b18adc1, 0x2dd5e6bd, 0x9127d937, 0x55f355bc, 0xec1cac82,
- 0xdc8dbb81, 0xf73e09c8, 0xe51954f4, 0x9967cf25, 0xa796eaf4, 0xadd8f983,
- 0xd4e1d695, 0xf9be4cb1, 0xba76f87e, 0xbeb896b7, 0x1da11a70, 0x7ca379e0,
- 0x7b4115d7, 0x3f705a46, 0x9cb66d36, 0x6b7ca469, 0xf982324c, 0xccfd0b0d,
- 0x658df588, 0x573e48e6, 0xf2fbe4f9, 0x87c63c7f, 0x0e595f87, 0x2bdc695c,
- 0x3e417bf1, 0xfca19be7, 0xf210d369, 0x1f8ff554, 0x8bfce029, 0xf57a4b6c,
- 0x7da6318c, 0xce22abf5, 0x9e46e567, 0x61bb32a7, 0xc3dd1d5a, 0xcc65bb04,
- 0xdc517be7, 0x27a7b50b, 0x7b713dd2, 0xbde30b2e, 0x8e817041, 0xf8d27bc7,
- 0xb98ded7f, 0x936a1f14, 0x3cc129fc, 0xb9f097cc, 0x01dafcf0, 0x7f308947,
- 0xeb970e5b, 0xe2b7e42c, 0xb38c357d, 0xe0fd7238, 0x74b505ef, 0xa61ec8cc,
- 0x789e85f8, 0x888c69cf, 0x1c5c63b7, 0xcfd117e3, 0xc2d5ee4d, 0xe1682e53,
- 0xc0d0662a, 0x3c2d6635, 0x3785aad5, 0x7d3b7115, 0x3f5b5dba, 0x1d85a9c3,
- 0x74e307d9, 0xc2dfcf3c, 0xf049e222, 0x447e5ef7, 0x68dd681c, 0x765f3efe,
- 0xcb8a52f7, 0x78f0e5b6, 0x3695638f, 0x42c591ce, 0x732fe212, 0xd63c5469,
- 0x863f66bd, 0xa30bed97, 0x5efc41e3, 0xa9a1f002, 0x7c8a3147, 0x9e68eaf0,
- 0xd23dde87, 0xce08bdb7, 0x5f0c60b3, 0x4d1dabc7, 0xdac53f74, 0x5caf88d3,
- 0xb0695f22, 0xe88a7cbe, 0xf90f1d40, 0x7fb11822, 0xf84887e4, 0x37be51a1,
- 0x8be41e70, 0x2cfea460, 0xfbd036e9, 0xa1be62ab, 0xcfe543f8, 0xa28f7172,
- 0x89f4dde6, 0x9bbcd4b1, 0x705f2255, 0x4ebe5457, 0x1936a39f, 0xaebee7fd,
- 0xf8b1f195, 0x682ef910, 0xe5887ce2, 0x837f9106, 0xe22c187c, 0xa6f5887c,
- 0x637856ef, 0x050b8bd9, 0x63bfdc5d, 0x224f86c9, 0x73acb7f6, 0xbe5e0685,
- 0x910de1eb, 0xe3f751f9, 0xbf08303f, 0xed7e519e, 0x2f9db152, 0x2bcbff4f,
- 0x48cd0ccd, 0xcf2807df, 0x319c1596, 0x5ea77bf2, 0x42eb9e23, 0xcc99ff61,
- 0x2fd1f2b3, 0xf2e211db, 0x37889d6c, 0x33155b34, 0x3b9981c6, 0x50c71355,
- 0x4b4d11e3, 0xd854b65c, 0x7c7f7247, 0x7ca56bc1, 0x14f9e1dc, 0xf1aa48df,
- 0x32c6cedc, 0x9c6453d6, 0xdd62066b, 0xc056c18e, 0xd957f9e9, 0xc0c74133,
- 0xf8174dbe, 0xb0fdc97b, 0x8f2467ec, 0xea68e42e, 0xa39c44db, 0x0b2dbf9f,
- 0x8b3e70aa, 0xb0fb49db, 0x1e1aff31, 0xd1c6d417, 0xe73de24c, 0x343be3c1,
- 0x7e226df5, 0xb5f1e572, 0x85d33971, 0x2fc621bc, 0x1adf1a8c, 0x7181fc73,
- 0xabbdf1b5, 0x714f37b8, 0x2e3a184f, 0xe9cdd236, 0xdfdd9d69, 0x83317185,
- 0x06667fe0, 0xf288993a, 0xe3e9ff80, 0xc1ae7e79, 0x198f8da1, 0xfa0baf30,
- 0x98b8973f, 0x3efa8ae4, 0x0599fdb5, 0x27cbca12, 0xa4e8fdf0, 0x5a24f2c1,
- 0xde44d88e, 0x811cb47c, 0xd37034f9, 0x36cf5cc3, 0xe50f8a4b, 0xfe7ef473,
- 0x7587f17f, 0xc8bbe389, 0x778eceae, 0x0f632e9a, 0x638205b7, 0xf175b9f1,
- 0x0778c5fe, 0x918ef77e, 0xe3944f69, 0xf88ceb7b, 0x298ec56c, 0xb60f3173,
- 0x13ebf04d, 0xa7a23a69, 0xb7d3dbf4, 0x29e823ea, 0xa173e8dd, 0x13ae9c3d,
- 0x87f85ff9, 0xd227b471, 0x318c6db7, 0xa9d76abf, 0xd601adb0, 0x6d8e7111,
- 0x9fee9ec2, 0xadcc8eef, 0xd27188fa, 0x34b1be4d, 0xa3fe7f5f, 0x87e5af98,
- 0xc459b7cd, 0x6cf9d4c3, 0x67f76a77, 0x7f69fa33, 0xd487a136, 0xfee336f9,
- 0x15d5f062, 0x75fe13e7, 0x69777cff, 0xf93367f7, 0xf5367f69, 0x7cea1b3b,
- 0x3e6c3f2d, 0x76cfe0bb, 0x3d97def1, 0x64377f9e, 0x9a23f7a8, 0xbdccf3fb,
- 0xed2f9466, 0xe850bc6a, 0xfa5173bb, 0x3be849ae, 0x6973f9f5, 0x3dff14b9,
- 0x7326f877, 0xfa873d2c, 0x933b098d, 0x09933b09, 0xd9e1133b, 0x0d57a735,
- 0xbbfc7a07, 0x95cf371d, 0xbecfaf41, 0x1cf8f4ff, 0xf1d9df80, 0xe832b98b,
- 0x9eafd9a9, 0x1e0b337e, 0xfbdbfe29, 0xe9f4fc38, 0x5bb4073e, 0xadcccf82,
- 0x18693805, 0xbaeacfd7, 0xb97779d5, 0x7cb95db9, 0x6f8f1e75, 0x3d459c82,
- 0x67e39cd6, 0xcafc7f18, 0x1f349bd1, 0xeecf2ffa, 0x03105f4d, 0x7cb96a2f,
- 0x1e7f2175, 0x9889c0ad, 0xaf48c6cf, 0xc7bea8dd, 0x8f7da752, 0x23692925,
- 0x2c7187d9, 0x96394564, 0xfbd0f660, 0x7316d797, 0x533370bd, 0xe15fb81a,
- 0x1c79102c, 0x4dcbdef4, 0x6b5c62e5, 0xfdb12fe5, 0x09e79d66, 0x79ff77f4,
- 0x68e1bcd2, 0x7379ab3e, 0x3bde06a3, 0xde7dfedd, 0xb1fd9e4f, 0x64e56dc4,
- 0x36998f1e, 0x7bdfc9ca, 0xa22b888b, 0x3e143c5d, 0xff73753f, 0x1e9f007e,
- 0x0f54cf8e, 0x37b5a7c7, 0xff9c0d69, 0xf4de94f2, 0xc34f89c7, 0x0835f131,
- 0xfbf851af, 0x7ea99b93, 0x88ed1ea0, 0xc55be12b, 0xd85f9aab, 0xc80b392c,
- 0x9a19675e, 0x14c660fc, 0x1e7bac67, 0x3d48cfa4, 0x197bb46b, 0x3486a3da,
- 0xeff98dc5, 0xc7d17fcd, 0x1573dbcc, 0x525ef88b, 0x367de53c, 0x1fc91927,
- 0x93349fdf, 0xb659fc61, 0xd4bc234c, 0x3e51d526, 0xeb187ea6, 0x3a357f44,
- 0x7c9f5d0f, 0xe909e53b, 0x4061e86d, 0xecc79fd9, 0xe95f2331, 0x5d92563f,
- 0x99570048, 0x295f2e87, 0x26753d0f, 0x324f43cf, 0xd85e313a, 0x998c689e,
- 0x9badfe87, 0x71fb5dfb, 0x1fb8697f, 0x886eff23, 0xe56c97d8, 0x9f58eef7,
- 0xab46f500, 0x5dfc518f, 0x615bffec, 0x4dfda907, 0xf9433f56, 0x4e2a37a0,
- 0xa9f45fb4, 0x77e07f5c, 0xe231dc5b, 0x0dbb5f28, 0xa3fdfcf4, 0x99cf5cac,
- 0xfb3eddb9, 0x27dfb137, 0x44962b28, 0xdd3624bc, 0x3737450d, 0x9dde78ea,
- 0x2d7397ca, 0x9c07ca46, 0xf7d3b16b, 0x2102e66c, 0xf446a4f6, 0xfe56eebf,
- 0xd8acb53e, 0x7b7e9e91, 0xfe5f94ed, 0x00b5e196, 0x66a7db97, 0xc2bde818,
- 0x28f31253, 0x5df383a9, 0xa994debd, 0xea1bccb8, 0x7645af4f, 0xc8c3182c,
- 0xc1757a4e, 0x4e95ad43, 0x8aea8ff2, 0x737d9e91, 0x770fa2a6, 0x77d8c363,
- 0x04f2ffb1, 0xfbd5cf2e, 0xfc09e5a5, 0x646bde7d, 0xffec6cdf, 0x4917c555,
- 0x725f48d5, 0x2bae0e6f, 0x33dd3fa2, 0x5b9ff445, 0xcffa813c, 0xf5ddafe6,
- 0xce3977d2, 0xc8f99da5, 0xec7ce8c7, 0xf429ecb1, 0xbde458b7, 0xe81f9086,
- 0xa3d64577, 0xce22c5bf, 0x6e505a53, 0x4f218eeb, 0x3ec332d4, 0xa042e46e,
- 0xb18c8cd7, 0xcff5aa54, 0xa175f47a, 0x90fce718, 0xec1fe738, 0x6ac3b4f7,
- 0x3df80e27, 0x843cefe0, 0xe172f8f8, 0xb0f1f4a9, 0xbf6a07a5, 0x6ec7bf26,
- 0x16ceffec, 0xb00371f9, 0x2db3bef6, 0xdf8c5bd0, 0x87886019, 0x8bf91189,
- 0x25f6bf62, 0xefd0526b, 0x20f4b381, 0xbc0476dd, 0x030d6b23, 0xe42cbfcf,
- 0xd2160b08, 0x2eb106f3, 0x0ceb5a47, 0x2c38bb8c, 0x6073ca3a, 0x6791aacc,
- 0xa6536a73, 0xbb0ba1a6, 0x2716299c, 0xe45df7fa, 0xfbe06417, 0x7c9f180d,
- 0xf78636e7, 0x9d69595f, 0xfd75da12, 0xc6f788f3, 0xe17b8957, 0xed2d5f3e,
- 0xefd6c327, 0x5ff23197, 0x507382d3, 0xc33df56e, 0x92f19457, 0xc691ddb6,
- 0x5dee1ff3, 0x02721f18, 0xed2d67bd, 0xb04f4fc2, 0x30f8700c, 0xa9719fa6,
- 0x7282d5d9, 0x140957e6, 0xe7b7d677, 0xfbc820b3, 0x1bfbecb0, 0xf9c877c2,
- 0x6c4f2e90, 0xb65b9e16, 0xb9ec9e28, 0x0675a976, 0xe8f731f9, 0xd178ac58,
- 0xa7ee62ce, 0x2b628ffc, 0x4fb5cfbf, 0x1f63f4e4, 0xb8e1723f, 0x77ffe7ab,
- 0x49f99e84, 0x7fde93b2, 0x4da3ed52, 0x8e47f843, 0x5f213a91, 0xf8115e84,
- 0x199f3333, 0x792f45f9, 0xc774b079, 0x96bdaf6b, 0x5b53e8ce, 0x841b4748,
- 0x138ea3fd, 0xc6a21f9f, 0xf243c578, 0xb87ab252, 0xedfab0be, 0x75af3841,
- 0xce63e9df, 0xfba085ff, 0xd90fd935, 0xd3ad7fb1, 0xfa32c6e7, 0x9c96983d,
- 0x3e78dd7b, 0xe9fb332d, 0x96baec95, 0x61dfce33, 0x28f0563e, 0x9fd837cf,
- 0xefc3685d, 0x1b0e5941, 0xe265d364, 0x27d046f1, 0x19d8cbce, 0xdeaf9146,
- 0x99e78072, 0xbaca58b6, 0xc5a1af30, 0x6a465692, 0x56f3c2d8, 0xcb16fed5,
- 0x1607d221, 0x5adfdf85, 0xb8487f50, 0x0947c04c, 0xe110ebae, 0x5077ed43,
- 0x128b5dc8, 0x5a5ef866, 0xfd4ad29c, 0x83d7b2cc, 0x34e6ca79, 0x6644f746,
- 0xe217f02b, 0xccc75f35, 0xbec9f431, 0x5a4fae62, 0xaabaca4a, 0x7e327d69,
- 0xd8b58eb9, 0xe7a4eccb, 0x7cf0ef11, 0xcf968673, 0x577813bf, 0xf91669c7,
- 0x4eae77b4, 0xc38ed609, 0x1bf79338, 0xcb16e7a4, 0xf5107302, 0xcbf8e25a,
- 0x391a678e, 0xd7fb78d4, 0x43bef18a, 0x2c4fae79, 0x0678e2b3, 0x8d15f7c6,
- 0x9c62e1f7, 0x7bfbd5ff, 0x47bd2cc0, 0x581d304d, 0x7deb1ed8, 0x13f3feac,
- 0xcfd1f2f7, 0xbc7960fd, 0x02871b56, 0x091eec79, 0x72565afb, 0xd53550e6,
- 0x0bd3f8f0, 0xfd519fc2, 0xf6317492, 0xbf7f120b, 0x2bc5283f, 0xe10c5df4,
- 0x69c7a4a7, 0x3ef030f1, 0x6f873f2d, 0xde1e781a, 0x7c4c6937, 0xd7cb4e87,
- 0x38ff1cdf, 0x059f9345, 0x7131ec7c, 0xde23c63f, 0x3888f5a7, 0x6f18c21d,
- 0x049d17a0, 0x89c7f7ff, 0xc33f9c1b, 0x4099acef, 0x5c60eb8e, 0xfa27e1c9,
- 0x5dba3936, 0xe239efc8, 0x78d10ffb, 0x2e48c9fe, 0x7a47a5af, 0x51af785c,
- 0x119bd69b, 0x7c5fdf8b, 0xee0642f8, 0xfd19c5e7, 0xdf743b79, 0xe769e2f4,
- 0xcbc402e6, 0x6ffbed11, 0x474df8ea, 0x22f8a7ce, 0xe5c0a5fd, 0x3a4b2fdf,
- 0xd92571b3, 0xddb80b8d, 0x7dfa7ff8, 0x73e617b3, 0xa497753b, 0x084fcf95,
- 0xfc141cb9, 0xd320bb60, 0x2aee43f7, 0x3a40adf2, 0x0bac936a, 0x3ce72e50,
- 0x9af2f3cc, 0xb3d55b1e, 0xb47f0628, 0xd5fdf28c, 0xbabf08c8, 0xe491bd7c,
- 0x3725b35e, 0x28f772f9, 0x9fbb9f43, 0xde7a94e4, 0x79988f55, 0xdb8f8abe,
- 0x7bbee9d1, 0x3a260f29, 0x7ea8653f, 0x449d23d5, 0x137f6a0f, 0xc5f28f8e,
- 0x15c52372, 0x818fa1ce, 0xb72b3b71, 0x59d7a233, 0xc098c889, 0x7d0663f1,
- 0x49607499, 0x106b8e1c, 0x6a0bdcfe, 0xcca1a45c, 0xde7889ec, 0xb9995b70,
- 0x9a4e823a, 0x8e0fcf19, 0x2337ff1e, 0xac22fb55, 0xa80c63b1, 0x94c6317f,
- 0x97ed9f26, 0xf278643b, 0x0395b4fc, 0x0ee3cc7c, 0xc5a3e743, 0x232dbe71,
- 0xf30dac7b, 0x5e0ec0d2, 0x0460b0d3, 0xf64db7e2, 0x3c0368bd, 0xa2687ef4,
- 0x0bdfea3c, 0x7c16ed3e, 0x05f9d43a, 0xd23001d9, 0x1d9c7b18, 0x80e297c4,
- 0x2327c64e, 0xebe7345f, 0xf63142ea, 0xf3ec5c79, 0x7878f632, 0xc7b3f7e4,
- 0x1afe890f, 0x6ded1c7b, 0x46fdd44f, 0xb6768c24, 0x7e3d8627, 0x05f11242,
- 0xc2742791, 0x0dd5dce0, 0x39313978, 0x5697be99, 0x5defc090, 0x527fa04e,
- 0xabc87e06, 0x7ed1f3a7, 0x7ff52d99, 0x43f84917, 0x47f0608c, 0x62f905fe,
- 0xb6bff30e, 0xe23e2248, 0x1b7835f7, 0x73e57e9f, 0xd19236de, 0xf9c036ba,
- 0xdb46fde1, 0x6233f9c5, 0x8ea467dd, 0x5037fd52, 0x13ef1165, 0x889d3c33,
- 0xf741acfb, 0x72f75174, 0x9fd36d78, 0xdc1fac77, 0xed7286ff, 0x8b86d9bd,
- 0x5a622e3c, 0x4b8f42c2, 0x6daa9798, 0x10cea9bf, 0xb9a82f7d, 0x6bd9bb4f,
- 0x7d0f5e88, 0xd621d417, 0x1f71dd9b, 0xcd4fbfdd, 0xa644f9e6, 0x04e28c65,
- 0xa66df7d1, 0x3d1e5db1, 0x8c0dcfda, 0x38e8fdec, 0x52a9e639, 0xb181b0fc,
- 0x79e5122d, 0x7973df6b, 0xfe73fea6, 0x3ad6947d, 0x760ec79c, 0xd1d3da10,
- 0xf7a849ef, 0x51f7ea5a, 0xdfab3c55, 0x4dbf4a8f, 0xc163eb47, 0xc6aed429,
- 0xb87bf1fd, 0xf911e955, 0x7f582be4, 0x2b7f7a7f, 0x9bdf7b18, 0xcf29eadb,
- 0x7fbc7320, 0xb29a186c, 0xc8784614, 0x6fc8c85a, 0xf2683c00, 0xdbd94c09,
- 0x16e79f69, 0x224c3633, 0x33da683c, 0xfdd3d340, 0x7e6d42c0, 0x0353ec01,
- 0x57abfb47, 0xd94d53f2, 0xf39597f9, 0x9a638a57, 0x94e4bdc4, 0x97fbe251,
- 0xed14aca5, 0x9edbf4a2, 0x0738f3a1, 0x5f7181e7, 0xdd629c74, 0xf1826e3b,
- 0x77fc8fd1, 0xe2cd2fc1, 0xaefd5b6e, 0xf4c8f30f, 0xce549b1e, 0xf30d2cd7,
- 0x12e97bf2, 0x8a6e7c46, 0x8fcc6667, 0x1e4c2a97, 0x04c9bbc4, 0x32596c30,
- 0xd4067332, 0x7b3c45eb, 0x8464c48d, 0xf3315599, 0x5dfaa3f8, 0x577d10a1,
- 0xd479a868, 0x20b22bef, 0x679d4e7f, 0x2ef577d0, 0x7bf93cf3, 0x6be7e8d6,
- 0xcea99e29, 0x87fe143f, 0xbfde8efa, 0x8137bcd5, 0x441d53c7, 0xf4038daf,
- 0x417cf04a, 0xca3cf88f, 0x66d5bb85, 0xfa29577b, 0x4c1e670f, 0x114a3650,
- 0xd2ce08fb, 0x7e03eff3, 0xd7faf1bf, 0xbc927761, 0x1897dfef, 0x8beefbef,
- 0xdae422fe, 0x09597c3a, 0x191a5ef3, 0xfba431e5, 0x29973faa, 0xa0f600ff,
- 0xdf111391, 0xd6f18049, 0x1fefce3d, 0x3e77e336, 0xc0e7fef0, 0x13edfe40,
- 0xd1f76c16, 0xbcd43677, 0x5da3cc5c, 0x3ef0325b, 0x837df472, 0x89c611bd,
- 0x99b26f98, 0x9f401305, 0x0531f4a4, 0xb9ecf3bf, 0x03a65468, 0xa07d6c7e,
- 0x93bf0150, 0xa5ee8999, 0x337ead2a, 0x0ae10eff, 0xef676916, 0xefa56e97,
- 0x64bf86bd, 0x3e14bf94, 0xc3ee88cf, 0x70c4c39f, 0xddf8ff2e, 0xcbe47afb,
- 0x7a5d7da9, 0xfb93f3d4, 0xf67fa4fc, 0xed3d4fd9, 0x9962e7e2, 0x751f9111,
- 0xcf545b1d, 0x979f8ae9, 0x5f27ea2c, 0xfdf86a30, 0x46a35eda, 0x5b17c81e,
- 0x65fd46be, 0x67aa89ea, 0xe3e33fd3, 0x2dde28f9, 0x53edb5db, 0xdbd03e44,
- 0xc35fdf2a, 0xbc73a1ae, 0xcf331698, 0xd5dce06e, 0x71e26e73, 0x79c7c674,
- 0x9ff7946e, 0xff404d46, 0xe87147c0, 0xd82fc464, 0xdd952917, 0x58798ecc,
- 0x8e994ce5, 0xf3b2bd72, 0xe487a41d, 0x3c0f3cb3, 0x5ffdfc7a, 0x7e8595be,
- 0x9dc99ec3, 0xed2f6859, 0x3d15f2f9, 0xd3d5578e, 0x193eb0cb, 0x58a41e4a,
- 0x10aa784c, 0xcf5033b4, 0xf59144e3, 0xe74a9eb4, 0xcfe4e565, 0x871a4e50,
- 0xd933df6e, 0x87e4c2c2, 0x3da02675, 0xfe7c7cc7, 0x57cab364, 0x70972fec,
- 0x9f81845e, 0x52ea54f6, 0x7b9f616a, 0x6c15919e, 0x46f3d3dc, 0xe7bfce02,
- 0x7acd39ec, 0xf322fe98, 0x5afbac45, 0x2b53ee71, 0x7efec97b, 0xf92bac4a,
- 0xa1c52f6c, 0x30de9553, 0xc3d4dbea, 0x37fefa7a, 0xc8bcf0b3, 0x38a14c61,
- 0xfc5eba9a, 0xd73e2a6e, 0x88de7282, 0x5632b3fb, 0x287eec28, 0x55832a33,
- 0xcc9a8f98, 0x6b484a76, 0x417d2b53, 0xa5e4e7a4, 0xad939e9c, 0xbf18c3fe,
- 0xa3af5e4f, 0x59f24538, 0xcbf406df, 0x0565cb92, 0xf84ae4c9, 0xb7f310ec,
- 0xf9d4f98a, 0xa88cf5d5, 0x7ee84bf4, 0x62e3b324, 0x132aaefe, 0xcc5abf8e,
- 0x59eb14ef, 0x2c3be8c5, 0x55ad67c9, 0x83272fe8, 0x33df419d, 0x9a79e2b8,
- 0xb745f335, 0x99d94f27, 0xb1e0e745, 0xe30ad91e, 0x365b64a5, 0xceb94154,
- 0x32ff23a1, 0x3c5187a9, 0x9d27a93d, 0x4f527de4, 0x8a2728a1, 0x0f4269fa,
- 0xd4933970, 0xaf2d50f3, 0xa4d2a16f, 0x3f8717ca, 0xda98fb8c, 0x717cb8cb,
- 0x98be4978, 0x517cb8d2, 0x9556b8e9, 0x325acc2b, 0x0ff7ea1f, 0x06b5acff,
- 0x73f77b9e, 0xbbe5fefe, 0xbcbe5cd9, 0xfbf725fb, 0xd3bf694d, 0x42aa5ca3,
- 0x4dfa05c3, 0x33edc0bb, 0xbaab1f95, 0x48113625, 0xe7839a63, 0xc8ba1f87,
- 0xd9843b9f, 0x63af00d5, 0x9529c912, 0xdd056b6e, 0x83eab3ae, 0x81fed849,
- 0x3477de89, 0x98e5ce39, 0x83fe1ec1, 0x7c41f7ae, 0x33f7f8ef, 0x415def7a,
- 0x437e82f5, 0x9a2e494b, 0xe4396c84, 0x3e416058, 0xae487984, 0xe918721e,
- 0x3a080ebc, 0xe9af673e, 0x5b3fc308, 0x69fcdfcc, 0xc1ffd43e, 0x9340eb9e,
- 0x1e973d4c, 0x885f2f41, 0xe82b3bf3, 0x7bdd29a6, 0xfc04c953, 0xcd1e62fe,
- 0x0dfece7b, 0xdcb27fbd, 0xc69f1ff6, 0x3aa72efa, 0x4f7cfa8e, 0xcf19a568,
- 0x1f9bd0f5, 0xabb4409f, 0x7b235fe3, 0x7ee3e4ef, 0xd0f89a33, 0x78cf3b12,
- 0xadf3fc90, 0x8b7f9688, 0xc80f864a, 0x91667abf, 0xe7121e79, 0x1e4259eb,
- 0xddb8554d, 0xe7e56f5e, 0x5fb25f55, 0x8b8afea6, 0xd2af55f6, 0x5ae4f28d,
- 0xf29b7fe4, 0x7d4bd124, 0xbd97a73e, 0x0a0f9e82, 0x63ecce7c, 0xed97e89d,
- 0x196e982f, 0xe23ef80b, 0x1cff13d2, 0x52c6bb19, 0x291dcf06, 0x024bf095,
- 0xd6c97a6c, 0xcf00a9b5, 0xa7a22452, 0x1dad92fa, 0x967fd04d, 0x03a94135,
- 0xb3f9e8f8, 0x0eaf3416, 0x9784bc05, 0xe3f872e9, 0x1b9e1754, 0xcaccd3c7,
- 0x4ffe8cb0, 0x8cbf63bd, 0xc46df6fa, 0x2ee311ab, 0x7567c11e, 0x6a571f04,
- 0x238fb82c, 0x2f2126a5, 0x5e51e33d, 0xd537cace, 0x7c75a18f, 0x1a4c6c7d,
- 0x506f77be, 0xa73c95f8, 0x7e82c9ff, 0x6b27ccea, 0xccdfa053, 0xd7196692,
- 0x912cd47b, 0xd7da3cfc, 0xc793f944, 0xd2ac393b, 0x74073d2d, 0x5b6fbc79,
- 0x6fb61273, 0xd53e4897, 0xe21d2f2b, 0xa7d38fea, 0x0b0f7e3a, 0x3077e1b6,
- 0xb7af29cf, 0xb1f53e60, 0xcccf05be, 0x7f1c3c01, 0xb8c43f53, 0xc0ad1f4d,
- 0x244cdf71, 0x356d9e3a, 0x8cb1f469, 0x5e84694e, 0xd36f1ead, 0x0d4af5c0,
- 0x29cfd3c5, 0x7806e592, 0xad5df3de, 0xd191397b, 0xbd77da52, 0xe14c7be8,
- 0xa1cc797c, 0xd63cbe23, 0x8a9cb8f7, 0xfb4659f0, 0xde769991, 0xb21a92fa,
- 0x5b29ceaf, 0x826744f5, 0x54f1d50f, 0x4f8a3e3a, 0x9ced1e23, 0x6df68fff,
- 0x1dbd20b3, 0xe323bf75, 0x89675d39, 0xa1d0571a, 0x5df0dfaa, 0xda9fa471,
- 0x7ef3fb51, 0x3851eb86, 0xd7946bfe, 0xd57d238f, 0xa7a2643b, 0x9afecaa3,
- 0x27bf8614, 0xffecd262, 0x9ac91dfe, 0xda7597f1, 0x5db47034, 0xdbfdcfb5,
- 0x0ec4cba7, 0xd847a466, 0x3ec2f947, 0xe0771fd2, 0x1768aefa, 0xf7e8eb29,
- 0x45d90649, 0x8a2edcf9, 0xbe5487b6, 0xb889d66e, 0x65c78eac, 0x58dff9a3,
- 0xf1eec3fa, 0x8e492ad8, 0xc65bbfbc, 0xbe9073ba, 0x8612a3e6, 0x75d01ef1,
- 0x15fc23a7, 0x078b6deb, 0xccad7c39, 0xbfbf6976, 0x3f37bfca, 0x1e77d4be,
- 0x367495e5, 0x6561df4c, 0x07e518ab, 0x9e786be8, 0xad665e11, 0x258bd488,
- 0x8d2df1fd, 0xd84ba45d, 0xac78859b, 0xf1e46b38, 0x38e89e8f, 0x851fea97,
- 0xae2bf225, 0x93ea2e52, 0xe67bb8f9, 0x7f6778f0, 0x938f3379, 0xe5fc0915,
- 0x5d691a94, 0xdcf7808a, 0xb59efb16, 0x41fa54a4, 0x07dfa2f9, 0xef8029df,
- 0x61e690af, 0xb1553d9e, 0xca5291d7, 0x9777b5e3, 0xa14af278, 0x15f12a2c,
- 0x3d5e2296, 0x77d35f60, 0x9f643dc5, 0x33bce3a9, 0xfd16e3ca, 0xbcc96146,
- 0xee8f0edc, 0x6863ed07, 0xe3d8869c, 0x7a38fb40, 0xf6ec6e7e, 0xfc39b843,
- 0x30fb237d, 0x0c0643d2, 0x52f2a664, 0x16e5e32a, 0x8846aefa, 0x1e0de7fb,
- 0x9146f3d2, 0x103788af, 0xf4c7fc91, 0x1cbe34f5, 0x58f7419e, 0x8325d82d,
- 0x7691f9fb, 0x6a84ac94, 0x6aade322, 0xdf94a8b9, 0x1528ce73, 0x604cf3ef,
- 0xa8afc813, 0xea3cfcbc, 0xc3cd43f3, 0xc85765ec, 0x63f35e7b, 0xc3fa49d3,
- 0xa55f27e4, 0x4a0e607d, 0x1f39df91, 0x4a89bf6a, 0xc28a1c94, 0x5cf350d3,
- 0x22dc79ed, 0xd76af6e8, 0xcc7a655d, 0x2ed1f94e, 0x056b07ce, 0xd5dbf57d,
- 0xbe3f1e3f, 0xfe861f3f, 0xfefbd9ca, 0x4a7ef893, 0x37a56842, 0x145e7ec6,
- 0xe531f9fb, 0xce70d3c1, 0xfba18c1b, 0x0799ded1, 0xd20263a7, 0xf1f2c77b,
- 0x9eb005f0, 0x2dfcc537, 0x6feb9596, 0x1c5b4785, 0x1ef998ba, 0x7a78fe6f,
- 0xd93f0da2, 0xdce395ff, 0xa7206dfb, 0xd07d1e5b, 0x7581b0ac, 0x15d96b30,
- 0xe347f9ce, 0x6d5d7513, 0x625e305b, 0xf7aabd4d, 0xaf5c5a14, 0x697de024,
- 0x764de7ed, 0xb4460efc, 0x43c6a03d, 0xc6a5bd54, 0xa4bb544d, 0x1fe3c1c2,
- 0x7bfced33, 0x69f6153d, 0xb20fdfa4, 0xaff21aae, 0x226d3b30, 0x19fd0ab1,
- 0xe4fb5add, 0x4657bf3a, 0x9eda2a3b, 0x45334da8, 0x7abd07d9, 0x1715df94,
- 0xfe54fdb5, 0xc454acf8, 0x17795451, 0xcf9a4765, 0xbb40499f, 0x670bedeb,
- 0x8d142e28, 0x5c2eeb9f, 0xaebd9fde, 0x83f74f5d, 0xb7ad71d1, 0xfdd2e42f,
- 0x1baf11a0, 0xcccad4c1, 0x808cc43f, 0x6b7b4c97, 0x18ec1195, 0xf82983a3,
- 0x58fc36dd, 0x8d0dc633, 0x7c9c7817, 0xd7fb9d84, 0x2fc18b89, 0x95335185,
- 0x78c17da0, 0xef48ce7c, 0xef5d99d3, 0x09bdbd91, 0x9e2186ea, 0xa6199ec7,
- 0x5307118b, 0xc08a63cf, 0xb7e66eef, 0x4d81efe3, 0xf7f78bb1, 0x59db462e,
- 0xa22d3cfc, 0x2d62e27f, 0x3ce22d3e, 0x435b8327, 0xd3ec79df, 0xe7116f3e,
- 0xfb1a596b, 0x63a3ec1c, 0x7aa2439f, 0xc5b6130e, 0x596c39e1, 0x758a4dbc,
- 0x2f8b990e, 0x845b0e7f, 0xefd2a95f, 0xb204acb4, 0xe8a6f7af, 0x7b41fbe8,
- 0xcb59c313, 0xb2fdfe31, 0xaccfde99, 0xb4367110, 0x6b9f10a4, 0x84bf97b7,
- 0xa371db71, 0x7b8e74f5, 0xbcffd125, 0x60feff8e, 0x1fea78cf, 0xf883b327,
- 0xfb0780bb, 0x1bbbe9aa, 0xd3bbfcd1, 0x33dd007a, 0x3db9ceb6, 0x839c016b,
- 0x9980fb25, 0xcf839bfb, 0x4a3fef07, 0x7b4bbe05, 0xa654b9f8, 0xbda3d49d,
- 0xec66f25e, 0x8fe351f3, 0x3a9da47b, 0xaa362b20, 0x98fc8baf, 0xeca33b91,
- 0x642db3e9, 0x47c65b67, 0x39c0f7d5, 0x9f8ebdd1, 0xa1d916ec, 0x87ebaf3d,
- 0x114aa738, 0x7d7f3087, 0xfb65e28a, 0x7e714efa, 0x25c6d1e9, 0xfe5df727,
- 0xe7135e9c, 0xa1c453b4, 0x00e22e7e, 0x7e7fc29d, 0xf2a4e023, 0xea9b80cd,
- 0x04280adf, 0x0b940aef, 0x77cfc161, 0x47cb3e56, 0x84aaaf17, 0xcf47edfb,
- 0x7f7dbf73, 0xbe83a862, 0x42de74ef, 0xbda417df, 0xdbbf144d, 0x2c727c07,
- 0x1fbf49ce, 0x4cdb3478, 0xa5a3b3f1, 0x8a7bfbbf, 0xd16b7cbb, 0xc70bb8f1,
- 0xeb4e7ab3, 0x6f184e70, 0xee8b855d, 0x47e036a5, 0xeeffa114, 0xffbe8932,
- 0x040e1b34, 0x3a5445ef, 0xf169f537, 0x65ff0f59, 0x1ddff195, 0x617f42c8,
- 0xa4e88b8c, 0x2273ef42, 0x4fba373e, 0x571e5f91, 0x871beed8, 0x7af18071,
- 0x14fbbe26, 0x3bac4534, 0xe0426f0e, 0x44895379, 0x0a1a73df, 0x86e5d1bf,
- 0xa32fae54, 0x3475ffeb, 0xdd1b83d7, 0x773ce8db, 0x50b2ba3b, 0xf913efaf,
- 0x2964b310, 0x285fbf48, 0x1cb1dd77, 0x65915bf6, 0xc011bb1f, 0x6853a32d,
- 0x4f4afbfd, 0xd8f81c53, 0x21c65785, 0x76b31dfd, 0x79ce305a, 0x53dfc9cc,
- 0xf9951387, 0xf29db77b, 0xa4f2312d, 0xe958f2d5, 0x95fdfa22, 0xef7cfc7f,
- 0xdf7cd1da, 0x0af5524c, 0xfb815b8c, 0xdef9424d, 0xc3f902b2, 0xfc83b998,
- 0xaa73e8cc, 0x88b35f70, 0xfc64f3de, 0xb6bf4f7b, 0x3f782c86, 0x0267ed1c,
- 0xff426c7d, 0x3d04f778, 0x5eadaf7d, 0xe782e7e6, 0x1fa4e788, 0x997e13a3,
- 0x65667c76, 0xc0fa633e, 0xde1d6079, 0xf13ad0d7, 0xadefaa3e, 0xf86292bf,
- 0xc9d683b5, 0xf9589efc, 0xcdcfd1b0, 0x9a20b276, 0x1615b386, 0x709fa176,
- 0x99c69f30, 0xcce3e56d, 0x9b7c7112, 0x1759ffa0, 0x81df9475, 0xcf80d6f4,
- 0x7927cc95, 0x50f5cc72, 0xb7ac1d57, 0x41c6ce1e, 0x569346fe, 0x5b39090e,
- 0xf745525f, 0x07dcc7bd, 0xbfbea3c6, 0xe1e574be, 0x9fc22732, 0xb74154d4,
- 0xd52fa529, 0x945a5d20, 0x4c2f8a2e, 0x5d74cb76, 0x36b97d78, 0x17afd14e,
- 0xb44bf4a3, 0x928fa8be, 0x54f7113b, 0xf9c4434e, 0x4b9f72cf, 0xab83a488,
- 0x2f1ccbc7, 0xd67baf7e, 0x1da71e05, 0xe9f7978c, 0x7803a573, 0xd8ef3e89,
- 0xa73e8978, 0xeaad3c01, 0x8c1dcbaa, 0xbf6f155f, 0x1ac9792e, 0x8d1be799,
- 0xaf8a1fb7, 0x4156d1e6, 0x579e5caf, 0x3b7dbe08, 0xb771443b, 0xeff94bb7,
- 0xff731d4b, 0xb3bfe296, 0x0d63a858, 0xcbe045ca, 0xe8f94239, 0x344ce423,
- 0xf4f8061e, 0xbf2e283f, 0x28b7f3fb, 0x21caaa2e, 0xea25f213, 0xea1fc113,
- 0x1f9099b9, 0xf02f6f91, 0x2d5e87ef, 0x457ca87f, 0x383f21eb, 0x1fcc8fa4,
- 0x5fc5dffc, 0x18cd717b, 0x52167617, 0xd1cab83c, 0xac7bc7a5, 0x3dd250c9,
- 0xd27f8892, 0xf5919c3a, 0x591c6e3c, 0xa12f714f, 0xa6b8d4f7, 0xbb3ebeda,
- 0x5518412b, 0xc24d47a4, 0xc4fd51de, 0x64538e7d, 0x54faaa7f, 0x57fef716,
- 0x08f74fab, 0xfb543a7d, 0x79bb73de, 0x43cbe7dc, 0x1c800f3d, 0xa46f9c9f,
- 0x0e4cb8ef, 0xa3dfcc3f, 0xfe498470, 0x9137746f, 0x69fb0c1f, 0xe604a346,
- 0xf3e64475, 0xf3bee5a9, 0xefd60a6d, 0x0af4bb95, 0x16ff07f5, 0x568603e0,
- 0x0d869dfa, 0x137d738f, 0xd651f457, 0xba83dee5, 0xe6994fb3, 0x61efc24a,
- 0xdf835f6f, 0x8c2a0b01, 0xb598ebbe, 0xee458bb4, 0xdc31a5f7, 0xff9c9daf,
- 0x68071dd6, 0x69ea1fdd, 0x035768dd, 0x9d964f9d, 0xcfe7050e, 0xfd23dbf6,
- 0xf7d97dec, 0xb7009dda, 0x086c697c, 0xa3c37ad1, 0xecf3017e, 0x078d5733,
- 0x73ed8d4b, 0x0bf5181f, 0x7346c7c0, 0xa3cc04d5, 0xdbdfb6ac, 0xdae51f3e,
- 0x60265bd9, 0xbcec1bfc, 0x83b7d456, 0xcdb7dff6, 0x5d3bbf8c, 0xd69f78ed,
- 0xabafa231, 0x5abfbf06, 0x48f7e0ed, 0x87bed1be, 0x7b404cc6, 0x73c1ec39,
- 0x0c9f344f, 0x164cdd22, 0x2f1765eb, 0xbd41fc51, 0x9e73d997, 0xf8ff47d3,
- 0x8fb80f40, 0x455cbf6b, 0x7db922f7, 0xa026f9ba, 0x3c7d75d5, 0xf1db0c7f,
- 0xbf58e07b, 0x001d8628, 0x4be75cf3, 0xcaebf3c6, 0xde8637ba, 0x36fbedcd,
- 0xf6e783e0, 0x44bc7064, 0xc31e6fb7, 0xafb882ed, 0x9272cfde, 0x7ff5c03d,
- 0xf312c0b0, 0xa9b7553b, 0x3b1ad2fa, 0x552d450f, 0x23daa579, 0x21b35f80,
- 0xc69bbf02, 0x7cfdff70, 0xf103a7a9, 0x7fe77df7, 0xe50b710a, 0xf6cf52f1,
- 0xae91c331, 0x97ad5f04, 0xee01c663, 0xc5a3f31c, 0x13416e2b, 0x73beabef,
- 0x86e96fe9, 0x7441dba6, 0xf8e95065, 0xffe3a221, 0xe27bf48b, 0x20efd0fb,
- 0x14fe7479, 0xd347c3dd, 0x03bf4ab1, 0x9be6c874, 0x79e80692, 0x7ec1ba28,
- 0xe806928b, 0xfb7ea87a, 0x931fbfce, 0xe9c607de, 0xabdd2cbd, 0xd059b7f4,
- 0x540f7a91, 0xfd79ce29, 0xfeec8cd7, 0xf5463da3, 0x7e35c677, 0xca3fe811,
- 0x72b0aea3, 0x37b47fb9, 0x3d19fb97, 0x9a9fba66, 0xbe295ee9, 0x235d0dbb,
- 0xf211adc6, 0x75dba67e, 0xeaaf3959, 0xfddf550b, 0x43777c4f, 0x00800098,
- 0x00000000, 0x00088b1f, 0x00000000, 0x7dedff00, 0xc754740b, 0xeebd6095,
- 0x91a93fd7, 0x16883e9e, 0xc085a092, 0xeb404616, 0x7d3d05ff, 0xa71f3210,
- 0x900b18c1, 0x04e08b4c, 0x60dd491b, 0x9e3d90e2, 0x123231a1, 0x1f62cd9f,
- 0xbd6678cc, 0x8c030d39, 0xc077b19d, 0x0b611c56, 0x3837e2dc, 0x71c6ec4b,
- 0x3c4c9c18, 0x6c0843c2, 0xc718d36c, 0xbc4ab243, 0x0f7adef7, 0x0b756bf5,
- 0xcceb43db, 0x40e75764, 0xaabd5ea9, 0xfeeb75ba, 0x7ad3d56f, 0xcecc6333,
- 0x53f817d8, 0x08acbc3d, 0x65cb191a, 0xfc05f3f4, 0xebf2e2db, 0xdf632b25,
- 0xb4cbeeb9, 0x2c93f943, 0x1e670adf, 0x355faf63, 0xb188acca, 0xf6e3a9ae,
- 0xfa87b3ea, 0x8c08758f, 0x127b4315, 0xef768674, 0x0077c154, 0x27d1311e,
- 0x7d1e9d2e, 0xfd4c9fde, 0x8795cdae, 0x9d32fab3, 0xaf7a6863, 0x59b18d3e,
- 0xca0e9bf8, 0x332d9320, 0x5aedcca0, 0xcca012c7, 0x9636f92c, 0xb12e624c,
- 0xf2933184, 0x6bcae99e, 0x39f015df, 0xfe831993, 0x56f595eb, 0x0d5d7f30,
- 0xcccf31c0, 0x3889f1bc, 0xc9c9d37a, 0x97cd5ed0, 0xd769e30a, 0xfe9fde1d,
- 0xe4f329a5, 0x572e3630, 0xfbcdf5a0, 0xf04cf98c, 0xeb6e756b, 0xe7033602,
- 0xd1cc7187, 0x1e60a9c7, 0x1d01e999, 0x2adb5ee5, 0xaa6b3fe8, 0xafe5e1c9,
- 0x6cdef84b, 0x3e1bdfc6, 0x4ac67ace, 0xe2d9c686, 0x54fac809, 0x4c498ec6,
- 0x6f5b2fe9, 0xf8e767c1, 0xb2857ac3, 0xcd6e533e, 0xebcbc455, 0x07e1c792,
- 0x860f6778, 0xcb1d6f8d, 0x55633a58, 0x8b9f699f, 0x387d6d0e, 0x58fb305c,
- 0x5f4fe5fa, 0x33467cc0, 0x5d398fc7, 0x5d79f847, 0x3e90fa32, 0xeed4ae2a,
- 0x4b189362, 0xfc87ec96, 0x992adcf0, 0x36053e1d, 0x7e53f633, 0x0402b8af,
- 0xc29fed04, 0x738014bf, 0x5aef6e9b, 0x16ddd027, 0xa21f4c6c, 0x4c0b2597,
- 0x2e96381a, 0x9bd4d449, 0xea69c79a, 0xd44f57cb, 0xdb736fec, 0x43fa9add,
- 0xea6a661b, 0x354a27ae, 0xd59d55f5, 0xef56f19a, 0xff69ab9c, 0x686feed6,
- 0x7f9e6bea, 0x747f5350, 0xe911caff, 0x885f1a22, 0x89e991b6, 0xd23b0579,
- 0x3a4d88bf, 0x9cc5cffc, 0xf7f7a3d3, 0x7ec27f21, 0xb7af6ebd, 0xd5fe82ad,
- 0xe78d3f57, 0xc5f4b4fd, 0xf90072bc, 0x17d956a3, 0x402bc798, 0xad528d7b,
- 0xf445be95, 0x4536635e, 0xee951be4, 0x11560ab6, 0xda66afc7, 0x6db0aafd,
- 0x7e47aebd, 0xebd01a2e, 0x3759be61, 0x2157fcb5, 0xc706fc35, 0xc7afcc67,
- 0xa5cb84f5, 0x9318eeff, 0x609e397a, 0xfc017fb5, 0x8cc9acc4, 0xa3c74795,
- 0x5db7a74a, 0x6f5f6337, 0xeafccadd, 0xa0582b7a, 0xda3ee90e, 0xfc3b67ba,
- 0x516b927a, 0x5ebe1dd6, 0x6feffe15, 0xef815f86, 0xf323ff5f, 0xdb1013ed,
- 0xe6fc88b2, 0xcfeb5ddf, 0x02402bb5, 0x7a1527bc, 0x278291a8, 0x0e908b12,
- 0x83650398, 0x51f99071, 0xc133d40e, 0x9652846f, 0x38af041c, 0x3034ef68,
- 0x7e71eb2a, 0xa6861071, 0x35eb45d2, 0x288dca0e, 0xa92c456f, 0xebc79cee,
- 0x875e068e, 0xd52ea8f7, 0x3ee7baf2, 0xea92875e, 0x43af275e, 0x5aefaa3f,
- 0xb8cef58c, 0x014489c0, 0x5825beeb, 0x5a1e0e37, 0x05ff4bca, 0xe4994a81,
- 0x5de83896, 0xec7ea7a7, 0x3707af22, 0xf2d0fa71, 0xf65e3183, 0x5f19b3f9,
- 0x813f3c3d, 0xfdc24dbf, 0x006fd75c, 0x828ad43c, 0xd5ef0ec3, 0x3f87320c,
- 0xe4d5f715, 0xe097d990, 0x3133a033, 0x8ccf886d, 0x4ab1982c, 0x0f793e20,
- 0xedcaadf1, 0xe968e3e1, 0x5af51eab, 0xfc3bec01, 0xe51931be, 0xfaa7a730,
- 0x2c12e0b2, 0xd013f9f1, 0xfc327b3e, 0x1f4e1e4b, 0x32a25c2a, 0x5fb5a55f,
- 0x32b6aa8f, 0x59b72618, 0x8559d117, 0xd92c9d79, 0x716dcb87, 0x2e2fe550,
- 0x56e1c22f, 0x1f39ade5, 0xfb1a6cf8, 0xb9967c3a, 0xaeb36a0a, 0xd9f0abbe,
- 0x933e30ba, 0x8496f7ad, 0x321019f0, 0xb7d9f0d3, 0x276e9e89, 0x83094e5f,
- 0x4f182675, 0x23e80cf6, 0xc05499ca, 0x3e244243, 0xb21bfb9b, 0x97c8a21f,
- 0x5817f037, 0x987e6e67, 0x5cf4519c, 0xfee7aab5, 0xdda2b3a8, 0x117a67e2,
- 0xc87c70cb, 0xd2313b2c, 0xbe3cb573, 0x6bba0609, 0x90f80ea8, 0x63be810f,
- 0x06dc151f, 0xcec99bcf, 0x805ab862, 0x31e2aac7, 0x71ad6e8c, 0xec909d2f,
- 0xecf505bc, 0x4e6e8518, 0xdf11a766, 0xc7bd76c3, 0xb59de442, 0x01f1cbe8,
- 0xc70937cd, 0xbf226673, 0xc6db40d5, 0x33fde182, 0x99f00fb1, 0x3e7cc835,
- 0x08eacf78, 0x46af5808, 0xf48d9efd, 0xfe51eacf, 0xe8095f57, 0xe5a5ea25,
- 0x00745b97, 0xa4013fbf, 0x517c8b57, 0x642e9c30, 0x7ee3d3e6, 0xaffdd57c,
- 0x90341fb6, 0xffdb0ebc, 0xcfb0419a, 0xd5394e00, 0x1afa20e4, 0xc8cf6464,
- 0x48c7c14e, 0xbc21acbe, 0xade3f6a8, 0x176826f6, 0x622e91e8, 0xf640b37b,
- 0xb36ca7d0, 0xf305f381, 0x867a55fa, 0x30afcbe7, 0x34ed1139, 0x7ed4f3cb,
- 0xfa23136c, 0x3c97b6a8, 0xb8476755, 0x44cc7606, 0x0abd24b8, 0xc7153f97,
- 0x70e260bd, 0x7f08daf9, 0xe4fe0ad5, 0x43331d41, 0x694fbfbb, 0xff5fa30f,
- 0xfcd3b720, 0x702bc44f, 0x43f0c8dd, 0x4ed007bf, 0xe5689306, 0x43ff03dd,
- 0x69dcf784, 0x01df3f77, 0xf028ae5d, 0x6e856ffc, 0x5124da3f, 0xf5cd3eb9,
- 0xc7a07cfd, 0x7c6e917f, 0x0fc866e2, 0xd5f74439, 0x9027cff6, 0xd9f1521e,
- 0x10f48637, 0xe8f84dc2, 0xdb8344b3, 0x387510aa, 0xe5bf0abc, 0x2a57c88c,
- 0x577c2a9c, 0x0bab7a7c, 0xe7b37bc6, 0xf61c135f, 0x454bf821, 0xfb847ee7,
- 0x674fc1cd, 0x92ef88b8, 0x34667635, 0xfc5266f9, 0x964ca488, 0x3ea2a62b,
- 0x85cc7388, 0x74cb8c75, 0x364cbad1, 0x02fd1da3, 0xf393274b, 0x82734860,
- 0xf7f08ccf, 0xd2ec37a7, 0xa9d6fb43, 0xfbc13322, 0x36bcb35b, 0x09fea75a,
- 0x9399eb86, 0xd60361e9, 0x6577f3e8, 0x9c11293e, 0xbf475083, 0xa2f1c6c7,
- 0xf8f4689f, 0xc75fb9e0, 0xc10a1cd6, 0xee02c1cb, 0xe7cd9d8f, 0x1e17dae5,
- 0x842976d9, 0xad0fda3f, 0xbdddd4d3, 0x2976dbdf, 0x106ede54, 0x3697dc44,
- 0xd9db8e85, 0x5ff404c0, 0xe13b455a, 0x903743a1, 0x2c6bf586, 0xae0a7e78,
- 0x008fe70f, 0x5be73fff, 0xd2ff3d62, 0x8ff650a9, 0xff69fee1, 0x0df91f24,
- 0x2b1ffcea, 0x5f38ffeb, 0xa6387317, 0xb3ba1e01, 0x9f70c5bf, 0x3f7c6c6a,
- 0x7a72504d, 0xf54372e6, 0x739e8677, 0x3ea55179, 0x73de3a0f, 0x1ce91d12,
- 0x994c76a8, 0x7de911f6, 0x71bbd227, 0x48b5bea8, 0xf08be8d7, 0x220573fe,
- 0x16c07bc1, 0xa15b3db9, 0x5c139d97, 0xdfe8d9dd, 0x6c73ab3f, 0xcbce2586,
- 0xe51e3adc, 0x7adb56fd, 0xe2e653a2, 0x38c058eb, 0xf4cbcebf, 0xd1e81978,
- 0x82b7fe79, 0x8dd62a42, 0xf886c2cc, 0xe70bf557, 0x01d13e89, 0xd7f2bbfd,
- 0x5ddd7199, 0x1c66f5b6, 0x82d684cf, 0xea3c386e, 0xe6ec0fba, 0xf4cbcef3,
- 0xb2e9e089, 0xc57bb9b9, 0xa33fa801, 0xee5e7bac, 0x6aaf6a3a, 0x09149ff4,
- 0x4367e07f, 0xf568e8dd, 0x87263ce2, 0x3ac246bd, 0xb2c66c73, 0xec89784f,
- 0x0dbf5513, 0x9dad4fe9, 0x91bafa72, 0x15d763e4, 0xf244cf9d, 0x07305e53,
- 0xbcaf9fde, 0x7b720d6d, 0xa3bf6bbe, 0x1cf7c45f, 0xf3e3f902, 0x764dbf73,
- 0x2e77359d, 0x56b3e7b7, 0x98549179, 0x3fa1f04f, 0x5bc54f31, 0x195f569e,
- 0xa0b7171d, 0xd4de30fa, 0x2c59d1ea, 0x2abe90c0, 0x70f3fcc2, 0x40bb3b7d,
- 0x5972d7b1, 0x2d2bda31, 0xc863df26, 0x420d960f, 0xe2a1c23a, 0x8e814575,
- 0x7c8620e8, 0x942667b0, 0xb465d95d, 0xd3ccfaaf, 0x5f980417, 0x4366df17,
- 0x8cdbd0be, 0xb31bc47e, 0xa80d1f8a, 0xffc24df3, 0xb7b22533, 0x2f848eb0,
- 0xfc79397d, 0x86c7966e, 0xb032eceb, 0xa7f248a6, 0xfdf380b6, 0x927e5907,
- 0x8ddad10f, 0x7bd2041b, 0x66377caa, 0xc237d932, 0x5c8cdb78, 0xd25f80ce,
- 0x3ebf402b, 0xffa017a4, 0xf7b35a17, 0x7ba205e9, 0xadf1e519, 0x12efb152,
- 0xfe1fb90b, 0xca898166, 0x547eca93, 0xc336717f, 0xaffc8235, 0x8430f2aa,
- 0xc7ca94df, 0xf1f26533, 0x732f224c, 0x2e3f7195, 0x5cdc7ce2, 0x5fb97b14,
- 0xfb7a339a, 0x8d7a4b1c, 0x90285287, 0x98dd79e8, 0xc52e4987, 0xa3396f12,
- 0x5da33788, 0xa7073fa1, 0x73fdb513, 0xaaffde45, 0xb274e620, 0xe51472b4,
- 0xa4e92c41, 0xde83cfe2, 0x79ff97cf, 0x68da47ec, 0x99a3e3ae, 0xe042d4f2,
- 0x2e9cbf93, 0x96e583df, 0xbbdb23d7, 0x772f63f3, 0x72801ed3, 0x37df04f1,
- 0x79b3e923, 0xfb97b185, 0x48bed14b, 0xf845f6e1, 0x3d15cbfc, 0xd2794b14,
- 0x11b36504, 0xff011fd6, 0xfcc28e8f, 0xef5073cb, 0xe5817b72, 0x0fa4b99e,
- 0x67e1dfb1, 0x3ec42eb2, 0x1c147b06, 0x0543b441, 0xeddd9239, 0x2d7efd8d,
- 0x643f487d, 0x5c61ccaf, 0x2ff3a81f, 0x7af8e12c, 0xb0e0147a, 0x25eade00,
- 0xc29baf6e, 0xbfcb50e0, 0x7dbd8c2d, 0xe043e92a, 0x1fbb0548, 0x444205cb,
- 0xd0a7af7b, 0x1a7e7318, 0xe2d9780b, 0xe4918d60, 0xefd8c9c9, 0xca0d9a4b,
- 0xd3cee9aa, 0x4b96d728, 0x5c60e787, 0x1ff8fbf9, 0x79d21be6, 0xc1a561be,
- 0xaeff45e5, 0xbc3bb24b, 0xac478c76, 0x7ccafaa2, 0x52af43bb, 0x94dfb63e,
- 0x6c05db7c, 0xe57d51d7, 0xda1f24ef, 0xd617dfc3, 0xf1c6c99d, 0xfb1deb36,
- 0x959dc618, 0xe78d837c, 0x9e36ac1b, 0x3c6d109f, 0x29b560ef, 0xcf1b0779,
- 0x4a6d583b, 0xf3c6c1de, 0x929b560e, 0xbcf1b077, 0xe4a6d583, 0xef3c6c1d,
- 0x7929b560, 0x3bcf1b07, 0xde6a6d58, 0x1538d3c1, 0x9e3691e3, 0x78dab077,
- 0xe36ac1de, 0x97980779, 0x3c6c1de7, 0xf17000ef, 0x722d83bc, 0x943ac1de,
- 0xf1ff2077, 0x67a49618, 0x547f5fb8, 0x7b06cfa9, 0x786334dd, 0xe76ab55e,
- 0x17182dfe, 0xfc7209ab, 0x5e783955, 0x77b414d9, 0x457e300a, 0x04dfb066,
- 0x114565fe, 0xb0d5f0c3, 0x13f3c3f9, 0xbde7d9dd, 0xb0a3fa02, 0x03bc2997,
- 0x18ea29e5, 0xfe7c3f7f, 0x3b577161, 0x65d6e78f, 0x3bf08b7e, 0x78482cbb,
- 0x8718e7c3, 0x13f253ad, 0xffad56b5, 0xb2e67f2a, 0x995f71f9, 0xf80899d4,
- 0xbb6d4590, 0x8c2ff988, 0xd7f15ebb, 0x6ab7dc61, 0x15771b50, 0xb8d41b33,
- 0x844bcf3c, 0xe1df39c1, 0x6b045f1c, 0x2a7f016a, 0xc1cfe601, 0x53ae15a4,
- 0xa45e7c12, 0x881bddb7, 0x453bbcbe, 0xaa6814c7, 0x9e606404, 0x6a2012a5,
- 0xc654d286, 0xf24cd2ac, 0xfe9d389b, 0xf1bdd4d7, 0x030f28d9, 0x6c04467a,
- 0xf9a83c23, 0xbf580485, 0x2bfc8334, 0x332aeff2, 0x3a47bffd, 0xcc82eb50,
- 0x5d8238f3, 0x2f80cd62, 0x5c266ef9, 0xceab9336, 0x45eb6f90, 0x73c335e6,
- 0x9b37e941, 0x2d4e2905, 0x76f28933, 0x6541ccb5, 0x695774d1, 0xcea3f011,
- 0x7ae175ec, 0x0966ecaa, 0xee3d85f9, 0xf148db3c, 0x32e97ae4, 0xf60049e0,
- 0x48da62f8, 0x2ec88a76, 0x451cf8ab, 0x2f1f48bb, 0xadc92278, 0x61ebd88a,
- 0x0a2a83b2, 0x2dc7c919, 0x7cfcb22c, 0xcfcb84ac, 0xdf5d4b37, 0xfbd73c44,
- 0x878fd516, 0xd0be8f2e, 0x6d9b794a, 0x5bc2f161, 0x6f628afd, 0x90e72447,
- 0xe901b93d, 0x8f04f17a, 0x1688e10d, 0xf62a78c7, 0x1b49ecf0, 0x767baec1,
- 0xd45aec14, 0x2aecb743, 0xfa9399fb, 0xb1ca3b50, 0xd0ba4f0e, 0x9092baef,
- 0xc3ddaa38, 0x8d67143c, 0x6979af1c, 0x9920e214, 0xfe13bfa8, 0xea639d0b,
- 0x87fb023a, 0x54bcfbe5, 0x428ef8e1, 0x634be387, 0xa0d317cf, 0x613e27d4,
- 0x771b8d0b, 0x970e0459, 0xf5c0d041, 0xfdff62b9, 0x78f624a0, 0x7d72812a,
- 0xf22efca9, 0xf974d561, 0x7caa5867, 0xbae1df3b, 0x2d29fc40, 0xf9024faa,
- 0x767f66bc, 0x0689ea01, 0x2ae35edb, 0x52d5f780, 0x3a22fdf9, 0x5b1ff46f,
- 0x1eb16e1e, 0xf7bc478f, 0xa3cb22b9, 0xc50911bb, 0x28de4fe4, 0x67e4fee2,
- 0xfe288784, 0x638bafe4, 0xea7b2fbe, 0xc9fc8673, 0x5b1ffe70, 0x639de3cc,
- 0x068e0147, 0xf1465728, 0xc28ca6eb, 0xbf5a8dfc, 0x443ebf6e, 0x854110ce,
- 0xdc6c69e6, 0xfc1163af, 0xba35973d, 0x147b37bf, 0xf310b1d6, 0xf591b571,
- 0x31c57dda, 0x7e96aff7, 0x3307ca03, 0xd0a9fd5b, 0x6f30bcc4, 0xb2be50da,
- 0x0fafd5c9, 0x383f738d, 0xe537e0e1, 0xb37e7008, 0xf272e638, 0xf91e9c7c,
- 0xe119bd75, 0x7e066ae9, 0x255fb08d, 0x9bffdc25, 0x9611e13b, 0x3a637688,
- 0xe411f743, 0x3637cf09, 0x83637e29, 0x2a767fa2, 0x2c53d472, 0xe52ce952,
- 0xe851b9f5, 0xfbcad7fa, 0xb69c4a8d, 0x2bf26251, 0xec7ff491, 0x38fce6e9,
- 0x71c6e728, 0xf30def38, 0xf453dfcf, 0xd3d89ec4, 0xa00c9ecf, 0x91b5b993,
- 0xf63075f1, 0x0274b174, 0xf6d4bfcc, 0xf8901cf4, 0x0d9b2fb0, 0x2e9c5f94,
- 0xef0c58bf, 0x57c28b6d, 0x69782b9f, 0xb03a3e86, 0x34a05bfb, 0x9010f942,
- 0xfa405481, 0x6fec55ed, 0x14308e01, 0xf1de0420, 0x3af4e7e8, 0xce7c1ee4,
- 0x7fe46d5c, 0xca1bc884, 0x3999569f, 0x83a774df, 0x0efe88ff, 0x9339d333,
- 0x9fcf20ae, 0xfcf26576, 0x6a3c3868, 0x456ff4fe, 0x71cf198f, 0xc4166577,
- 0xebddf285, 0x6f26989b, 0xe78575dc, 0x8a4fc185, 0xc949fad3, 0x8f8527e4,
- 0x74b9909f, 0x247fa85e, 0x021fdcc8, 0x5c50267f, 0xa917e43c, 0x7c98cffe,
- 0x7ff15cf9, 0xcd1dfd02, 0x4947119f, 0x07308b0f, 0x74ddff3e, 0x8f10dff9,
- 0x1fd3d023, 0x9fd3d0a3, 0x347fdfb0, 0x11cc627a, 0xf0cc68b7, 0x1eb2bb71,
- 0x708eb5b7, 0xd321203f, 0x54ce9119, 0x9b7a02fa, 0xe8763250, 0xb4f3217d,
- 0x4b136f4a, 0xbd3ebf8a, 0xf0bec05d, 0xafe1acef, 0xd149d6ec, 0x7ab59dfb,
- 0xfa0f28cc, 0xf19bd732, 0x99d4c49e, 0x6bce072e, 0x006c9911, 0x3e5cbb38,
- 0x007b9e33, 0x64bf2f3b, 0xcdc1bc84, 0x7e3f2e22, 0xdcffecad, 0x2aff9c09,
- 0x7dbe0b96, 0x6c4d7fc0, 0x4df70197, 0x4958ff7d, 0x7b88fdec, 0x35ee4872,
- 0xf7208c96, 0xe2f8565e, 0x2b5f0573, 0xe729d6da, 0xfb445899, 0xfdec5567,
- 0x942b348c, 0x45d75273, 0x745be3a4, 0x2285e787, 0xe21467ac, 0xe4c7e7e0,
- 0xcd4c3ce0, 0xfc406d2b, 0xf1c4563e, 0x7dfecd17, 0xb3fc146a, 0x4ad70089,
- 0xfd0199f6, 0xc7ff5969, 0x11e748ac, 0x39d9158e, 0x0390069c, 0x5995df80,
- 0xfc31b7ba, 0xb5a495d9, 0x67f7d487, 0x67aefe2b, 0x5267e414, 0xf1c3ce0e,
- 0x4c0e7e1a, 0x3ec5cf2d, 0x88b4b01e, 0x80ffb9fb, 0x73fc0efc, 0xe358a4a8,
- 0x64a8fee7, 0xb5f73f64, 0x1adc8f7f, 0xcba8e850, 0x7d9ed911, 0x40fbf621,
- 0xdd60e0e7, 0xb9157cef, 0xaa6f6e11, 0xdbd89ee5, 0x7a59697b, 0xd1c3879c,
- 0x24e7b4c8, 0x28dff16e, 0x8f5d2de0, 0x77e968f3, 0x5ffae325, 0x1a58c385,
- 0xe78c73f3, 0xff31e867, 0x7d79f9c3, 0x94be2ba9, 0x198f36ec, 0x07d837f5,
- 0xb95f6714, 0xcd1f2539, 0x6b67d82a, 0x93be96a9, 0xefa6474a, 0x403bd2e4,
- 0xfc9caadf, 0x534c6117, 0xa899bca8, 0xa5699bc8, 0x29afdfb1, 0x8e48af18,
- 0x632baf2c, 0x73dbe71f, 0x79744e77, 0xfb379eac, 0x4f1b4147, 0x8f25bd6e,
- 0x69474e63, 0x7921faa9, 0x7087eaac, 0x5e868d3c, 0x0bd10c48, 0xcce4510d,
- 0x3631c068, 0x9db51f04, 0x98afe502, 0xc0a6f98d, 0x39d89daf, 0x96c7dee4,
- 0x4f138851, 0xc87f8d0f, 0xa047af60, 0xb1a5dde3, 0xbca37fcf, 0xdb7c37e9,
- 0x47f9d236, 0x28c5d5a3, 0x3173623f, 0xb19fec3b, 0xf6f295b3, 0x94ad05c6,
- 0x029d6687, 0xf498fde5, 0xab79f92e, 0x30a7de6c, 0x9e5c367d, 0x5e398d6c,
- 0x97d8728b, 0x28c537f6, 0xf8ebcd7e, 0x37f93a4a, 0x153ca3af, 0xb347ee24,
- 0xf74ecc0d, 0xb97cafa4, 0x202f58fb, 0x9a2f3012, 0x763ee8c4, 0x7ac7e1a7,
- 0x051b47cc, 0xe65cfe3e, 0x974ff9e7, 0xf0a3fc77, 0x31a0e59e, 0x7f543b7f,
- 0xfb4d9a51, 0xcfe569de, 0xf39bf6bb, 0x9ea5849f, 0xc19e15b6, 0xa753e1f3,
- 0xec1c5327, 0x6b8f03c5, 0xbc5aecec, 0xdf7e80c7, 0x7e861e0b, 0x35d329a2,
- 0x9c3b2471, 0xae927f31, 0xa89a508c, 0xfa5e7a94, 0xf86d9d05, 0xf7f73bfb,
- 0xbd67e848, 0x724cc078, 0x9e81ea7f, 0xe32155f8, 0x3e719627, 0xfe3d06b6,
- 0x35af0019, 0x093273d3, 0xf92b1def, 0xce799173, 0x632bc2d4, 0x0f9d00a4,
- 0x6b5791df, 0x2ebba50e, 0x794cd6a9, 0x639d5ce4, 0xfceb1d8a, 0xf4fc431c,
- 0x4082fdda, 0x112edd7e, 0xe19aef50, 0xec7a86ba, 0xc3eb890d, 0x496b53a9,
- 0xe72a75ab, 0x2de4761e, 0xc7ef8039, 0xf1a7af4f, 0x7b64593b, 0xe5c658cc,
- 0x99068bf0, 0x80f17de6, 0x55c8e748, 0x7c69debc, 0x0f30cbd6, 0xa6dea3e0,
- 0xa7d87ba7, 0xe5114e8d, 0x8134e9d0, 0xdf2d82ed, 0xee8578e9, 0x0288ec99,
- 0xb94a2fef, 0xa96cca9f, 0xf7a77f44, 0x748f2e77, 0xbfa55f7e, 0xa96433c9,
- 0x49c0ed0c, 0xff94269e, 0x36e4b6cc, 0xdf24f17c, 0xe68743bf, 0xcffc03cf,
- 0x227fc17d, 0x6b21f70e, 0xf75c78cf, 0x6a76a507, 0x1f0f45bf, 0xb0f442bd,
- 0xe3efc8c5, 0x8e717578, 0x858eb003, 0xcf1101d6, 0x8352184f, 0x2a6de794,
- 0x8b74f2e3, 0x81630fca, 0x7d3de01f, 0xc507bf06, 0xcea9da26, 0x84270726,
- 0x559ea3de, 0x0597aca6, 0xc2f9f132, 0xd83777f8, 0xf6158c1d, 0xb08b37a6,
- 0xef8985fe, 0x7a86c86e, 0x5e78ef49, 0xb3f778cf, 0xb2b1896c, 0x9f5e0cde,
- 0x9367eef6, 0x9bd15ea0, 0x3095ebc9, 0x9bb078d4, 0xbd9ab37a, 0xa4be3879,
- 0x0f15bf19, 0xfd7f1472, 0x45d26f52, 0x28355f8c, 0x031f87eb, 0xfbece3ce,
- 0xe2ef3a47, 0x78fd5b34, 0x86c9832e, 0x3419f8b0, 0x6077dc85, 0xff2409a9,
- 0xf30bfbd4, 0xf63f7f45, 0x84fd99d7, 0x2759bbdf, 0xad8073d9, 0x7e145fdd,
- 0x5c2e7b79, 0xc8d96dee, 0x8c4e785c, 0xef0966b1, 0xd64c0833, 0x78e3c17d,
- 0x855f9c4e, 0x187f9d47, 0xc8b5b05c, 0x3d70439f, 0x70a8e34f, 0x7937cb5d,
- 0x6b7ade51, 0x0427ae59, 0xe084d74f, 0x456c180f, 0x0f1dacda, 0x1b5d3ee1,
- 0x40209c13, 0x1b7cb93d, 0xb1b5dbed, 0x8e081fc1, 0xef81ba90, 0x096f5b63,
- 0x289763ed, 0x9b9d21b7, 0x216ed68c, 0xb8eeb191, 0xe60c1984, 0xe5d631d9,
- 0x406e7bc5, 0xd8ef7477, 0x1c74659b, 0x8beb3800, 0xc7a1f9b4, 0x83fb3a15,
- 0x98e60f22, 0x01ce7976, 0x047bba5d, 0xdc70f786, 0xa88c3783, 0x5df616ff,
- 0xa4be6234, 0x11a2e7a3, 0xeb03bee9, 0xf3c74e30, 0x6ff988c6, 0xbd5eb963,
- 0x91e74a2f, 0x4f1c4bd6, 0x7aeeb2a7, 0xe9e0152a, 0xbdd02d58, 0x9247f5bc,
- 0x7d7d60e6, 0xb192ae2b, 0xe78755e5, 0x38b4b920, 0x72c29ffb, 0x95381b25,
- 0xccb4240a, 0x10f667a5, 0x4d62b93d, 0xe887b33c, 0x8a4f16c9, 0x1772bea6,
- 0x280cceec, 0xfa686637, 0xeafee531, 0x83f43632, 0x949a98e7, 0xdb665c7d,
- 0xeeb8c019, 0x3bc19732, 0xc2de3210, 0x3262cdbe, 0x1c11edc2, 0xcbd28d74,
- 0xe0ad3fcf, 0x1c52842b, 0xf7cf1f7a, 0xefc40af4, 0xba7a6d1d, 0xcbdcd3c7,
- 0x35e6aeed, 0x202ecf82, 0x0dbdf81e, 0xed40af77, 0xeba7deee, 0x1bef1193,
- 0x47000f00, 0x7d84f8da, 0x9347112c, 0x4ad5dce9, 0xdf775605, 0xb38e0339,
- 0xdd023be2, 0x9bbf5567, 0xb2bfc2a0, 0x91ce91a9, 0xa45e7c01, 0x4d9d754f,
- 0x5ce947f6, 0x9789cad5, 0x6187a80d, 0x3406977d, 0x5bd6d4f8, 0xcafada3c,
- 0xd1fada8d, 0xf29ee532, 0xf90d3634, 0x209978e9, 0xb52439e2, 0xf9d651fe,
- 0xe75ef1b7, 0x0a79d16e, 0xe8cdb3e9, 0x7efa819f, 0xbaa2e8b7, 0x3bcf0eda,
- 0xf89df118, 0xb1f100e1, 0xc5b9fc40, 0xf8354b20, 0x8176973d, 0xb60576e8,
- 0x0f67f3ab, 0xfd754fbd, 0x554dfaf0, 0x1b9c5aee, 0x4730bbe8, 0x90ed0905,
- 0x9316517a, 0x222f88f3, 0xbdf9779e, 0xa36f07c2, 0xeeb803f6, 0x7d852242,
- 0x44feebba, 0xbde6179f, 0x3a56cdeb, 0x215776e7, 0xf91d81d0, 0xd6e7475e,
- 0x2f79e3e6, 0x7903ef28, 0x2ef28168, 0x9226a1fd, 0x128d33ef, 0x9ce577c9,
- 0xe42f2a97, 0xb6e7b517, 0x219e67e6, 0xe7beeb3b, 0xb579d276, 0xc17239c9,
- 0x2d0fa8d3, 0x471dfe7c, 0x1866df04, 0x34ef23a7, 0x3271cfc9, 0x61d72f3f,
- 0x2f68e453, 0xfd7551c8, 0x50e3bfb0, 0x692261ae, 0x9bcd1e41, 0x5da9566e,
- 0xf8283926, 0xffe8756b, 0xdde6538e, 0x75a76d5f, 0x487a3da7, 0x316775c0,
- 0xe60166ef, 0x7c3a82bb, 0x55f950ec, 0xf8b5d7ed, 0x565de80b, 0x5b63b5af,
- 0x65e3d745, 0x6dbb5f30, 0x2dcfc8c0, 0x1fa88d06, 0xdef8e0ee, 0xf2c3e348,
- 0x50ee7475, 0x4730fbe2, 0x7680983f, 0x39a3face, 0xf608d8e3, 0xd3acde8d,
- 0x148311c8, 0xf33791a7, 0x67f2b573, 0x772b43a1, 0xa39651be, 0x4f245f69,
- 0xdfdfb4d3, 0x3fa9a858, 0xbcd4ace0, 0x9d5360ff, 0xcdb26ea6, 0xb16fbcd3,
- 0xe3d4d62f, 0xde6b9773, 0xa558e31f, 0xf5bd3769, 0xed0126a3, 0xc077b371,
- 0x06e87805, 0x1e4b28de, 0x12d323d2, 0xe32865e0, 0x2c562d0a, 0x744df3bb,
- 0x3c7bb64e, 0xec4497e8, 0x773a1dff, 0xc97c4e69, 0xdfe5035c, 0xfc5abe0b,
- 0xc9a00e7e, 0x3ddbf685, 0xf0e3684f, 0x5f38abc7, 0xb94365a9, 0x280ac1eb,
- 0x67b05ef3, 0x81e97bf0, 0xcc7f707b, 0x45db8c71, 0x2ec999af, 0x5376899a,
- 0xb071bf88, 0x397e66ba, 0xbe427bf6, 0xf582f917, 0xbc0a6932, 0x3b08cfb7,
- 0x35ee7a8b, 0xebcbf523, 0xe685d01d, 0xce597ff7, 0xceb3c1e5, 0xfc717bda,
- 0xb2d03e8e, 0x7dcf4fc8, 0xaebf4468, 0x013fafa3, 0xe70cd9f5, 0xcfb44687,
- 0x7a2b9e87, 0x61c60c4e, 0x0e1fe39c, 0x63de23e9, 0xa46e5edd, 0x722b4ff1,
- 0x7a3be5bd, 0xf78b4f3a, 0x864a6b37, 0xe1cb73e4, 0x63ef1fa0, 0x7bf3b4b7,
- 0x368e789b, 0xf9069c92, 0xbca861fc, 0xf41d367d, 0x4f1a4cc1, 0x97960fa3,
- 0x3edee9f9, 0xfbcd6fc8, 0x5c34972b, 0xc8257cc5, 0x4cf3ed71, 0xb1961e7e,
- 0x73fa0301, 0xed35d720, 0xfb95cb1b, 0xde58d39c, 0x2e072017, 0xdbdf2338,
- 0x9f8abb7d, 0xaae0b833, 0x16787da3, 0x47e789f0, 0x3f861d9f, 0xe1dbd78e,
- 0x81a465ed, 0x661bfd90, 0xff7157ff, 0x7228e5ea, 0xbfb2a2fb, 0x014084fb,
- 0x33b7550a, 0x8577e88e, 0xa5ed75b9, 0xcf97dc42, 0xdebfa2b7, 0xee8123f2,
- 0xffb2ffc1, 0xac3b34ad, 0x54f878df, 0x7697de1a, 0x86953a1f, 0x0cdb89f7,
- 0xcf68cdf6, 0x3dd07d03, 0x3f5e8ee4, 0xa87edfc1, 0xb79f1fb0, 0x140e0af7,
- 0xeb9eeec9, 0x364eb40d, 0xa3fd15b0, 0x3ae0517d, 0xabedce58, 0x2a76e5c3,
- 0x13982edd, 0xc98f7ef5, 0x1e9013e7, 0x5c213e40, 0x820fc047, 0xe66dbefa,
- 0xd4f648a6, 0x27df0565, 0xde1919b7, 0xfe836dfb, 0x59d38eb8, 0xbdf88da7,
- 0xb0c39685, 0x71d7012e, 0x1c7881ee, 0xa181837f, 0xf7c2a19d, 0x03e05671,
- 0xabfbff5c, 0x2dcf0c0d, 0x27ad596f, 0x81bfbf7c, 0x87fa27db, 0x0335bd7f,
- 0x2e80cbea, 0x270f3bf1, 0x9d38df56, 0xbfbee301, 0x7569db86, 0x2f881b0f,
- 0x6e13ad97, 0xa50e4bdd, 0xff6e956b, 0x70d3a146, 0xdba70dfa, 0xe6baf461,
- 0xdbebd1eb, 0x90ecb7e5, 0x61f7a819, 0x438ce7ff, 0x731efa3a, 0x1a7e401c,
- 0x0718613c, 0x2ada4f70, 0x63c3c039, 0xa7bef5d3, 0x9c6e50d2, 0x6df9ede0,
- 0x7f2e057b, 0x7d71cbec, 0x8646dd19, 0x2a2e08f7, 0xd95176fb, 0x0a0b3b37,
- 0x62cfc8f1, 0xa9ca1260, 0xf9ce256c, 0x1d35818a, 0xe452ff46, 0x8ffa67f2,
- 0x87b77b6d, 0x2b7fefd0, 0xcaab74e7, 0xe64669c5, 0xf7c56c3a, 0x045f8d0f,
- 0xe06cbce3, 0x282735e7, 0x09c94de3, 0x52822c36, 0xe4e5c157, 0xd438156f,
- 0x3e149ffa, 0x437f2760, 0xe6685cf8, 0xd1b79ff8, 0xf8eb8cdf, 0xbd75c999,
- 0x305fde1c, 0xea1b7ce1, 0x3f49eebd, 0xb9dfe3c9, 0xee1d9073, 0xa27ff056,
- 0x63cf37fc, 0x3532cfee, 0x72d7f68e, 0xe90969f4, 0x68a532d9, 0x30f148dc,
- 0xe66ccdf7, 0xbaba014a, 0x5aa54399, 0xaafc745f, 0x82fc8071, 0x2fc40deb,
- 0xb9e9d337, 0x0553efda, 0x8e46e9ee, 0x4a97d607, 0xb6e41b7a, 0x67921658,
- 0xfaa0f150, 0xfb2996e3, 0x5b1ebf69, 0xcfee28c7, 0x8f84d3d8, 0x84e76d15,
- 0x41e71856, 0x02b06c83, 0x3627cfbe, 0x8534bdad, 0x626c9fb1, 0x3a0b05cc,
- 0x5cccc9bb, 0x6c7d0377, 0xd81ea892, 0xbbfaa364, 0x5e54ec9b, 0xa2c738b6,
- 0xa4e079fe, 0xede20a67, 0x671ed644, 0xed73cc2d, 0x41f081ac, 0xfdf4063b,
- 0x376b4298, 0x0cccf4fa, 0x947a37ed, 0xf028adef, 0x8333743f, 0xfcbe40b9,
- 0x8e83e702, 0x467f1bed, 0xe6b1cf72, 0x49fa037a, 0xb7176df9, 0xf1bddd62,
- 0x5f680d77, 0x04fa007c, 0x7eec0ce9, 0x9e501366, 0x81da339c, 0xa66f9a0e,
- 0xeb2f38d0, 0x77c80d82, 0x430263de, 0xf7a0fcfa, 0xd71c71ba, 0xe12d7457,
- 0xe6744df9, 0x48ad2d9f, 0xfbb8fc22, 0x7f7c75f2, 0xf7a1748b, 0xf9effa29,
- 0x87ee0243, 0x9dfda379, 0x467fec4a, 0xb7af90cf, 0xa41306d7, 0x02f87208,
- 0xa3978b8c, 0x86581d70, 0xbd1937b7, 0x413cf053, 0xe1c74293, 0xfa158bed,
- 0x941b4c04, 0x213229ef, 0xc1239f8d, 0x5258169f, 0x41f93262, 0xe7e038fe,
- 0xabbabe96, 0x811e9622, 0xcff5a87d, 0x762187d8, 0xfc3a347c, 0x437e25b2,
- 0x4329c3e6, 0x4323f475, 0x1fa07fef, 0x82fbe919, 0xe3c9aca6, 0xb9778a46,
- 0x83bec007, 0xc300eafd, 0xf4829ec9, 0x4fca8d3e, 0x91d9748e, 0xc171cc7f,
- 0xd82fa83d, 0x3bf23a5d, 0x7b5abdda, 0x87e141a8, 0x7a1ec506, 0x3feb42b4,
- 0x5ed49f81, 0x84a4fdc1, 0x3fe34ce3, 0xff273f85, 0x057c8cc9, 0xfd834ef3,
- 0xfed22579, 0xc7aed554, 0x2ed8137e, 0x740673a2, 0xbe15f48e, 0xe0a8bfce,
- 0xebe5547a, 0x48d7c865, 0xaf9cbdbf, 0xa96dc7c3, 0x1c0bf9aa, 0x7b945b77,
- 0xf157f710, 0x139fdaf1, 0xe7ee5f08, 0x1f4f0852, 0xb59ce7b3, 0x031f9740,
- 0x2e80fb1c, 0xf19d39af, 0x9affd049, 0x6c9f19cb, 0x1d03921d, 0xbf53c5cf,
- 0xa717936d, 0x19fd42b4, 0xef187ba4, 0x8a3ade93, 0x1cf43e6f, 0xbe753fe7,
- 0x52cbfc0f, 0x0d8a9f90, 0xda03a341, 0x262efb33, 0x7c581da0, 0xe414fee4,
- 0x6e7068c3, 0xdce054ad, 0x4bdad8f8, 0xf01e9c29, 0x455aee1c, 0xf49fb9bf,
- 0x6bff111b, 0xdfa23237, 0xd97ed7fe, 0x49fc7c81, 0xfa84bc5f, 0xf1a3e3f0,
- 0xd7ff945e, 0x3a7c998e, 0x6f10fc81, 0x8fb5c822, 0x91f7030d, 0x2f4479bd,
- 0x3ee8ffa1, 0xc83ddbf6, 0x4cd7ee8f, 0xb5b1f968, 0x51d75d7d, 0xddf5b5ef,
- 0xf2c4557b, 0x87e5d1a5, 0xd38f8df6, 0x6ffad57a, 0x28ed9937, 0xb9113f47,
- 0xfbdb589c, 0x6bbfd92a, 0xc0cc6f07, 0x47c7ea38, 0xca094fff, 0x379bf735,
- 0xb99e504a, 0x8251b8df, 0xc71feeed, 0x2dd9227b, 0x84f4b371, 0x665f5557,
- 0xe26a27bf, 0xfa141b45, 0xd5dceec8, 0x57f00938, 0x03896e7c, 0x7b6ab6f3,
- 0x139d14f1, 0x79fe2fdf, 0x21c3ca7d, 0x22033afd, 0xce8a8de5, 0x0eafc7d5,
- 0xaa47cba1, 0x94ef9a0c, 0x0f38d57d, 0x2df7e2b6, 0x0275c56f, 0xec0efa3a,
- 0xbfc880b6, 0xf046bb2f, 0xf601bfa1, 0xb23f784b, 0x923daea7, 0x9910f742,
- 0xbd3f9ce8, 0x1c53eb08, 0x5628eddb, 0x8726a7cc, 0x02cb4bd8, 0xe809bf3a,
- 0xe4cfa9a8, 0x203f7b27, 0x62baa45e, 0xfd743d47, 0x85b26337, 0xdc9a7108,
- 0x1ca0e777, 0xc0d78f0f, 0x5b8418cf, 0xa7ceafb5, 0xa651f8f3, 0x042dd247,
- 0x993b9ab8, 0x161d7884, 0x4b9a69d7, 0xf8f0a2f1, 0xdbb03d8e, 0x93ecfd8c,
- 0xabc21275, 0x8fb8a5da, 0xc0e4113d, 0x7972565f, 0x6757f2a6, 0x3af5e780,
- 0xc9e6b503, 0x69577e50, 0xcc7491ef, 0xece9780b, 0xc600ffbb, 0x1f073a17,
- 0xf30e9f8c, 0x98b7be74, 0x9ae79e61, 0x829e6b54, 0x9ae706fd, 0xa21bc81f,
- 0x1af9f09b, 0x345db92a, 0xd6442f64, 0x5f0aa57f, 0x2c1e968f, 0xd0f3fcac,
- 0x579fe083, 0x7cff7254, 0x709f8f05, 0x7f5c3cff, 0x1d5972a8, 0xef82ad77,
- 0x2af972e1, 0x47e039d9, 0x5b5e5c93, 0x32f7a769, 0x90a516fb, 0x92defd7f,
- 0xf875bb14, 0x43a239f8, 0x7c379c97, 0x739ed57c, 0x6653e57b, 0x3d647bf4,
- 0x897ba7f1, 0x97fcc3e0, 0x9db9ff6a, 0xeee5d902, 0xdf0f8366, 0x2c0ae153,
- 0x07d8f1e1, 0x22cf8364, 0xaa89d90c, 0xddab791d, 0x6abb5021, 0x63f6aa57,
- 0xbbe3c9c0, 0xfb788a4b, 0x556b8b65, 0xa75c58e5, 0x97179e85, 0x398ce7f3,
- 0xf55fa14f, 0x918152e0, 0x4aec501c, 0xedc34e95, 0x72fb4fce, 0x438a4712,
- 0xa7d0af3d, 0xc21367ca, 0x63d543f8, 0x7e7054cf, 0x5e3d40eb, 0xf7fc7a88,
- 0x7c3cfb1a, 0x8fe3d05c, 0x463f5257, 0x2e7e8bfd, 0xebf9233a, 0xadfc6566,
- 0x4e05feea, 0x59bc20d6, 0xfd19b02c, 0xf5fa68cc, 0x139d8a7b, 0xc519fdad,
- 0x3f1dbc7e, 0xc8e4561f, 0xcbf3f168, 0x70f2b795, 0xf9f9a9ff, 0x8c687d96,
- 0xd557bf62, 0x1ffb86bc, 0x6a04f7a7, 0x97feff90, 0xe1eee281, 0xfaa53def,
- 0xecd3229f, 0xa32ca9cf, 0xf723fd0e, 0xe83f1e1a, 0x43bfb24e, 0x7a5bcc7e,
- 0x05ace386, 0xb8ed107f, 0x51266f47, 0x32f686d9, 0x385e2b94, 0x1588f947,
- 0x77f0b46b, 0xfb174c63, 0xf3a6cbaf, 0x9eac2d03, 0x7d2d4ce2, 0x5016d249,
- 0x0f63dd0a, 0xf2be469d, 0x0fdbd01e, 0x88ffbe84, 0xc799befa, 0xfba636e1,
- 0x661af728, 0xc278b40f, 0x7dad7eed, 0x8dc4a950, 0x6f09d9a2, 0x5cb85bf4,
- 0x3e27a339, 0xd378c0ef, 0x7d8cc64c, 0x37b82e5c, 0x474938f2, 0xfc04e6df,
- 0xd2bb6a08, 0x500c457e, 0x6bf28a1c, 0xbed5eaf0, 0x3ee8d3a7, 0x27cdac97,
- 0x39ffefb5, 0xbcf0cbc9, 0x970f7252, 0xbd36ee4b, 0xa12a4792, 0x83b797ef,
- 0xed0e393f, 0x1bfdbb4b, 0x8b807788, 0xc8de48f9, 0x3ecbb89a, 0xe9f4af25,
- 0x3f257f99, 0x2891c574, 0x79eea479, 0x7ca48f3c, 0x1e787727, 0x0ca9f4e9,
- 0x48f3edfb, 0xe8e7bd1d, 0xf2edcb1e, 0x5baaa649, 0x3b8a1c71, 0x7c79144e,
- 0x209f6277, 0x626abc61, 0xe9775f1f, 0xafb238b6, 0x3ab57e1b, 0xba81e505,
- 0xe22c9f4f, 0xfde4fdf8, 0x5fb06acf, 0xc944afc9, 0xe37aeb02, 0xcb3f0dfd,
- 0xd9bb75e0, 0xc436fd8e, 0xc9f33fa3, 0x23c7d6af, 0x1280df7e, 0x17c7e7f7,
- 0xc9f1c8c4, 0x667578a1, 0x959dc515, 0xfa08b578, 0x5c50cc70, 0x8bdfbc27,
- 0x746e5c55, 0x7142bb83, 0xef3c23cd, 0x9aee9ddf, 0x9ebff547, 0x29dff8c4,
- 0xfd1e2990, 0xc635d02a, 0x62c1a68d, 0x3ffd2f31, 0xe62758e3, 0xbac7ba91,
- 0xf47c427e, 0xf23b3ad7, 0xfca7f411, 0x9db0f793, 0x6a8623f4, 0x0333cf31,
- 0xf7405b3f, 0xdf295231, 0x8fb28f73, 0xa2cfe0e9, 0x2cd0095f, 0x34f43cc1,
- 0x2333df25, 0xfe38898e, 0xfb6b830e, 0xf52fcc54, 0xc4cd8e8b, 0x3c148e04,
- 0x779e196f, 0x2f9e6412, 0x6a4ff357, 0xfe5a3cde, 0x9479c049, 0xf3f056ef,
- 0xf980b296, 0xc4cf3574, 0xf9293a96, 0x43cca5b5, 0x807c42f7, 0xde73afb3,
- 0xcffc846b, 0xf21115c3, 0x982beb3f, 0xf3699f62, 0x5f73db8d, 0xaafd3a21,
- 0x78c3c679, 0xe68ee2ae, 0x0fdcecc5, 0x28bb830d, 0xf234d3e9, 0x6ff8febb,
- 0xfb67a409, 0xbc3e906e, 0x00b13416, 0x78071fc5, 0x40cfbddc, 0xe06b8671,
- 0xd10d8dbf, 0xf054efb3, 0xd5f78f71, 0x04f7fd11, 0xfdf1e3ea, 0xb7cf2bdf,
- 0x7ee4b7bd, 0x01bdf7b4, 0xc60b1dfe, 0x13d63fb8, 0x6ed18b10, 0xf2fb83b9,
- 0xf5846564, 0x7eb8e399, 0x52a3e70a, 0xab4aff85, 0xdeeb9e38, 0x7b987ee7,
- 0x48afcf03, 0x7b705c8a, 0x74fd5e78, 0xc6a09a56, 0xca45c7d3, 0x92091cbf,
- 0x447968de, 0x7cc9efa0, 0x33cbfcd9, 0xf1f391e9, 0x462f7a2b, 0x3634bfce,
- 0x168cfc90, 0x638f0ecf, 0xff2a9dff, 0x7638f3a6, 0xeb44957a, 0xd7e4f64e,
- 0xeb43638c, 0xb0fb83bf, 0x7d845674, 0x6fe383b8, 0xf1db1d9c, 0x3b63caeb,
- 0x1e3113fe, 0xc78c7b7f, 0x6c7961ff, 0x8abaebf3, 0xdbffc2d8, 0x1fdfc318,
- 0x57fffe09, 0xe78407cf, 0x0bcfc3ff, 0x30039f84, 0x17b0f5bd, 0x9d84badb,
- 0xefc33f41, 0x24d6bf8d, 0x93141c80, 0xce7dd3f6, 0x1f3ce505, 0xecdeadb6,
- 0x9d0c2bdb, 0xf5f1501b, 0x23f73568, 0x5eebbca5, 0x04d45efc, 0x829f5039,
- 0xfe72b2f1, 0x59ae4377, 0x898c5e78, 0xc9e63a52, 0xc5f4cde0, 0x5b749760,
- 0x5359e722, 0xe9a2f195, 0x35d58391, 0xe315678b, 0x7c8a3d1f, 0x4a87ff0e,
- 0xe5ea521c, 0x6899e9f7, 0xf34c5f4f, 0x1ed1d3da, 0x9fc93c4a, 0xbc99f827,
- 0x67b2e097, 0xdc7fe62f, 0x7be11b20, 0x26ffc946, 0x1b2bbbcf, 0x60c9d2e3,
- 0xf05e6217, 0x920fcc69, 0x7bd12b3f, 0x66977cea, 0x19ea3f71, 0x9f301303,
- 0xcfca9deb, 0xdf9eb139, 0xe6a6fd91, 0xfc1acef5, 0xca055a97, 0xfc23b5eb,
- 0xf3f9d53f, 0x442b65f6, 0xebcdf80c, 0xf8df2891, 0xe5dff976, 0x9e1e68ba,
- 0xe24c7be7, 0xbf409caf, 0xec78e019, 0xefd12168, 0xffe5e38a, 0xc7fe8cd4,
- 0x3cf09164, 0xe254afcc, 0x78941705, 0x502f1a87, 0xacff5bf3, 0xc03e49c4,
- 0x03940bf8, 0x72810f18, 0x9ddd2cfb, 0x67db4fd6, 0x67ffe045, 0xf21f1e04,
- 0x87c794fe, 0x9c8e8d0a, 0x7f94f3d0, 0x63ef8b35, 0x2fe79fab, 0x0fcf78d4,
- 0x7e14f595, 0xefc7ef4f, 0x9d6e15a1, 0x5f3d446e, 0x0e2b35b8, 0x4fb219cb,
- 0x5ef80697, 0x4eb0fab6, 0x7448bfdd, 0xe75957b4, 0xca4133b6, 0x961abdef,
- 0x8b34bff7, 0xa968679f, 0x097bfbe6, 0xd418a0ff, 0x6e229b1f, 0xb131f341,
- 0xca47d24a, 0x5f80b52b, 0x76b6708c, 0x7c55b873, 0x75edf8b7, 0x53c5cf38,
- 0x1658adc1, 0xa3aec9ac, 0xf5db7bb8, 0xb9bf68ad, 0xae9c6f07, 0xd19978a6,
- 0x43c4fe8e, 0xdbb8da03, 0xfe3cd513, 0xe01a2afe, 0x457bb788, 0xef0b8ff2,
- 0x927e8915, 0xbfe42c5b, 0xde981b26, 0xdc45f58a, 0x4c682a2e, 0x5b3ac3ad,
- 0x60f64492, 0xd27b19c1, 0x714379c3, 0x377be4b0, 0xdd3c458c, 0xc6aacf2e,
- 0xe45b2ed3, 0x5f9fb8e3, 0x7edacf35, 0x6f3a3df6, 0xc54baeaa, 0x8d2f9a8b,
- 0xcf556796, 0x3f1bb83d, 0x70d39cd3, 0x8c06c18b, 0xbd1e163f, 0x5fce5468,
- 0xa1ecb64a, 0x91e16c3c, 0x557f94eb, 0xcf6eadf3, 0xa1abb275, 0xf7fb119f,
- 0xd7f9e26c, 0x2c0e00d7, 0xb2dd617b, 0xdba1ef09, 0x7583eca8, 0x9b63f3e3,
- 0x6ff4a972, 0x7c795072, 0xf8951953, 0x64f4c999, 0xa75edbcc, 0x59d5edbc,
- 0x524dcec3, 0xe8b76f5e, 0x15f108fc, 0x7aa3f792, 0x1f4fde78, 0xee35e3c7,
- 0xbefa0633, 0x99acfba1, 0x5f4d77e4, 0x0ff444eb, 0x9ae73f3e, 0x60777088,
- 0x2e842dde, 0x961d913e, 0x9d83f424, 0xdbe60b0e, 0x1aaffb0a, 0x874c3cf1,
- 0x3c623018, 0xc0cf200c, 0x9eee8b23, 0x6fdcfceb, 0xf1058d5f, 0x58fcdb7b,
- 0x8fcfffef, 0x58fc957d, 0x273f3577, 0x0291caad, 0x8bde4ea7, 0xf2d919e5,
- 0xd89c8870, 0x94f9e4e6, 0x1127936d, 0x4e76ee1f, 0x321d5a4a, 0x8f23675e,
- 0x3bdf245f, 0xfcfa12d9, 0x1f3cd597, 0x67302c96, 0x33567924, 0xc7ddebfb,
- 0xd76e411b, 0x3ffbb244, 0xe5d029f9, 0x238ba04c, 0x90389edf, 0x343f19c7,
- 0x71adb71e, 0x2831c4f5, 0xd8982e9f, 0xd3af1e14, 0x5858e3c4, 0x5c5a2e5e,
- 0x4ea3456e, 0xa179d0b6, 0xdbd404e0, 0x2633f1e1, 0xbd702706, 0xbde0672e,
- 0x83319814, 0xf961b195, 0xdf305fb9, 0xfbc327cf, 0xa4751acb, 0x5d3cd42f,
- 0x36c8a341, 0x6c167d01, 0x3f28f834, 0xa45e28d7, 0xec6fc576, 0x90320a94,
- 0x6b383fce, 0x16ae3094, 0x34731f1e, 0x049eefee, 0x026d1dfd, 0x5dba0bdd,
- 0x073d4f1e, 0x3fdbd2de, 0xf96de307, 0x3d77576c, 0xac9fcff4, 0xf4fcf093,
- 0x3f1e7cf0, 0xd8a05dff, 0x3671e139, 0x4bb433a4, 0x1bf0e3c2, 0x38d83a15,
- 0xd7adeb9b, 0x1d537f14, 0x4e17ed47, 0xcd9d3f3f, 0xb8b46df5, 0xbc6551fe,
- 0x6d82c6ce, 0x177f7193, 0x93ea18e8, 0xf35ac6ce, 0x76f190e4, 0xdbe7e6cc,
- 0x60fcc19e, 0x907e686a, 0xf3c301e7, 0x2b9caa82, 0x037dffec, 0xc8377be2,
- 0x6e63d46b, 0x141fcf1b, 0x8339ab1e, 0x4e086372, 0xc377b7ae, 0xbec8079e,
- 0xbe312473, 0x457bfa2e, 0xc502a3de, 0x7c160ae7, 0xea1e3c76, 0xf74499cc,
- 0x2767dcb0, 0x3ca2b16f, 0xe316205a, 0x53b71ebd, 0xc26795b8, 0xeffdd27c,
- 0xbcefa1ac, 0x7f903e16, 0x81e5f70e, 0x0f9d0366, 0xfddef554, 0xf03534ef,
- 0xc64fbdf6, 0xe0ae7b61, 0x79f1d8b0, 0x763f7d23, 0xfa7e4bfe, 0x110b58bb,
- 0x81e7903a, 0xe44497b3, 0xd716cb9b, 0x151f23df, 0x4cf2522e, 0x3f4f5ccd,
- 0xc0c8b7d0, 0xead3cec8, 0x9f5913cd, 0x46e83c53, 0x8fe5215f, 0x43fcf052,
- 0x29be90de, 0x6c7a0a3c, 0x63d39dcc, 0x112d9d3f, 0x1e82673e, 0x17927843,
- 0xf87d3cf1, 0xf650effe, 0x8a5e9a3f, 0xff518726, 0xde63fbf0, 0x5c36c10c,
- 0xee31db8f, 0x3af68a53, 0xd3a2e394, 0x71661fdf, 0x628a4eba, 0xa149fdcf,
- 0xa7e7a8df, 0xcf27477a, 0x7fd987f4, 0xd563791e, 0x4df352ff, 0xa0f6bfaa,
- 0x65ab0f62, 0xbbfe5293, 0x72a037ef, 0x21bf788d, 0x86fdf239, 0x68090e87,
- 0x1bf7a8a7, 0xcd59194e, 0x4f3c54e1, 0xf277dcab, 0xe93b222d, 0x62fcdfd4,
- 0x78a98beb, 0x509049df, 0x3a1fb51d, 0x1dbe7a47, 0xa7b223ec, 0xe3cb7efa,
- 0xf9f887ad, 0xc8def6f0, 0xaddbdddf, 0x32f5dfd8, 0xaed1b923, 0xe86db79c,
- 0xb744f251, 0x8b8f96e2, 0xb74b71a1, 0x3d56303d, 0x7f23ebc7, 0x8cc71b4f,
- 0x60ecb716, 0xdbb73a41, 0xa07046b1, 0x39ddb87e, 0x79ca5ddc, 0x4c71a8dd,
- 0xb973e479, 0xb94c71e1, 0xb9c79b5a, 0xf7c4966f, 0x680825db, 0xce759aef,
- 0x7fb73a36, 0x9d22904c, 0xcd5a5ef3, 0x257cf4e5, 0xc8d0720f, 0x8560e3e9,
- 0xf06ff740, 0xfbee1e1e, 0x1efc63c1, 0x7e02c1ce, 0x7bcd470f, 0xa66bf7a8,
- 0xe83de50e, 0x2f7a9dfe, 0xc367bd47, 0x5ae08f98, 0x7d1c5cf4, 0xe1876bc7,
- 0xbddb8d90, 0x1a3f7e5e, 0x43e61065, 0x8913cc4d, 0x6bab6b5f, 0xef7a6ac4,
- 0x8f4fdb8e, 0x7bf04754, 0xe91f8a7d, 0x2fea8eae, 0xfad6afac, 0xf70f540b,
- 0xde54ee17, 0xecd98be3, 0x87fbfb4e, 0x9e8f1c63, 0xf1b63b32, 0xfb48ecc4,
- 0xfda2c5b6, 0x5e5aaf1e, 0xb0dfe456, 0xbb0dc5ef, 0x7af5c92f, 0xa7ff60b7,
- 0x1ba7ca4a, 0xcaae9f28, 0x7c31eed5, 0xe3291ffc, 0x294065a9, 0xd77ca3bb,
- 0xbc4a97f9, 0x7b944bac, 0x2f69ee10, 0xd397e368, 0x97e3690f, 0x35937b33,
- 0x78ff7cf5, 0x6785fbcd, 0x8bda6926, 0xda68f703, 0x68142f4b, 0x1503e5ea,
- 0xaf2bf79a, 0xb3ea6ad4, 0x65f8da82, 0x61c5cdf5, 0xb47d38f7, 0xdca01abe,
- 0xe6757ed0, 0xcbde6a6f, 0x575da358, 0xaebb4796, 0xebb51b89, 0xf6cdc752,
- 0xd397d76a, 0x325f5dad, 0xbef2e3e6, 0x7f2e3e7e, 0xc7c95db6, 0xdfb058e5,
- 0x2d938d33, 0xf7a9b768, 0xfef7d5a9, 0x5866372f, 0x007f40df, 0x00000000
-};
-
-static const u32 csem_int_table_data_e1h[] = {
- 0x00088b1f, 0x00000000, 0xe4b3ff00, 0x51f86066, 0xb97bc10f, 0x726e1818,
- 0x0143f821, 0xd08667cf, 0x0c0c2c6a, 0xc6cc401a, 0xcec0c0c4, 0x717ebc44,
- 0x1d7b044e, 0x4cc30307, 0x31c8de20, 0x481afef0, 0x7e879d7c, 0x42f3a976,
- 0x81c15968, 0x570837f7, 0xb430310a, 0xc430330a, 0x0cf84088, 0x55f2a8a2,
- 0xa9b60842, 0x39766524, 0x0003f502, 0x3471cc24, 0x00000380
-};
-
-static const u32 csem_pram_data_e1h[] = {
- 0x00088b1f, 0x00000000, 0x7de5ff00, 0xd554780b, 0x733ef0b5, 0x7993331e,
- 0x0f20f264, 0x0084f102, 0x021842a2, 0x27088784, 0x01a8c421, 0x54bc8083,
- 0x48433c26, 0xbfa81132, 0x2677bd6d, 0xb5ad1104, 0xbc1b6951, 0x14101de8,
- 0xd1a07515, 0x40e81c06, 0xdbdab114, 0xd5a8f8a8, 0x40445076, 0x8bcbc248,
- 0xaf7fd52d, 0xe649cfb5, 0xd4484c9c, 0xffffbff6, 0xdbf3f1fd, 0xd9cfb3ec,
- 0xdaf5ed7b, 0xf6bdad6b, 0x9a32c11e, 0xe4224e24, 0x65a3f85b, 0xf4842784,
- 0x0adb2ce9, 0x64918fda, 0x8b2ffc42, 0x108e36f2, 0xf08e77ee, 0xbc8451a4,
- 0x980b99b5, 0xcfbc3d69, 0x9fad0846, 0x603d34de, 0xff6422ce, 0xef02b308,
- 0x8f9ade9f, 0xbd2fc9f5, 0x7b6814e7, 0x00bc4bd5, 0x13bed375, 0x109d88ce,
- 0xa1e5b9af, 0xf6f3e96b, 0x85b27897, 0x93fc450e, 0x9085244d, 0xfec21663,
- 0x52fab22e, 0x6d56ab2b, 0xf4077fde, 0x2664de5b, 0xd54fda56, 0xaed365ee,
- 0x8765f5a5, 0x54af0244, 0xfa95ab6d, 0x00f2fad2, 0x9afa8417, 0x71c67f7d,
- 0x79480ada, 0x27212870, 0x5f22167d, 0x96ceeb49, 0x79f45994, 0x11676045,
- 0x83b15fbc, 0xfbe89b73, 0x7ff69b15, 0x3457fd0c, 0xda79038a, 0x36ed8aff,
- 0x63610f22, 0x1e604b7f, 0xbc01a64b, 0xc4886f55, 0x97e5eb4c, 0x70044956,
- 0xa54bd424, 0x61ff180e, 0x38c07649, 0x0d1c7087, 0xd0cf559f, 0xd577e871,
- 0x986e702f, 0x7889b55a, 0xddd69e00, 0xda4f39d6, 0xd2b55e61, 0xf77ef860,
- 0xb7bc127d, 0xb2f6502c, 0x36f80655, 0xe700454b, 0xd2d2cda6, 0xadfd9da1,
- 0x0ea6fed8, 0xd90d63ae, 0x76a89ea9, 0x47d27963, 0xacee6c88, 0x04a21057,
- 0x407700ed, 0xf3ac3e9a, 0x812e79f9, 0x3fd0d190, 0x674b644f, 0xc83094ff,
- 0xe8f7fe07, 0xf60f813f, 0xb2db023a, 0xd2b5e93a, 0x772dff45, 0x4bacebd2,
- 0x9ed09fa5, 0x56bfdd17, 0xa074043e, 0x5cfbd4f0, 0x4be23e58, 0x4f8372c3,
- 0xcafdbc46, 0x06cb0437, 0x3f9f1b9f, 0xe58b1be6, 0xe5829f26, 0x2c62be13,
- 0x7c52be03, 0x0e6f8b6f, 0x1e7d5b96, 0xaf94fe7c, 0xbeedcb1c, 0xacfe7c1a,
- 0x772c6eef, 0xfcf8fcf8, 0x2c7adf05, 0x2c7abe83, 0xb01af977, 0xf005f46c,
- 0xdb7d97bd, 0x05f26cb1, 0x5f1ef9f1, 0x5f219613, 0x407dcb18, 0x7d865a5f,
- 0xf01e582d, 0xabe5887d, 0x777e08be, 0xcb1c77d0, 0x3bbc5507, 0x817c9027,
- 0x10a9cde2, 0x92171517, 0x8be4a258, 0xca589eb4, 0xf9b729ea, 0x4f5a25f3,
- 0xc53ad0f1, 0x70cadf63, 0xfbd699be, 0xccf6b0d6, 0x8581487b, 0xacfd33d6,
- 0x4a83c07d, 0x07d69581, 0xc1f6b3d4, 0x7105fc9b, 0xc0383eb4, 0x11deafda,
- 0xfad1b02e, 0x9ed641d5, 0xed932213, 0x66139eb4, 0x94b7dcf5, 0xcf5a0ec9,
- 0xbcf5616d, 0x9d93fd8f, 0x61179eb4, 0x153f8fdf, 0xeb4f1c9e, 0xfb59dbe3,
- 0xd0a44bc4, 0x0913eb45, 0x7b02f587, 0xad02617e, 0xbd58b817, 0x20995fa8,
- 0x7dbfe0c7, 0xa1116462, 0x7fddb0bc, 0xd3a422b3, 0x455914ba, 0xfe9f14dc,
- 0x8b0f5839, 0xbfb43164, 0x84532fe5, 0x912eb471, 0x17fed0d5, 0x1fb6057f,
- 0x6f6c6510, 0xf6c2aff7, 0xed8c9203, 0xb07bdaa6, 0x60a8aafe, 0xbded727b,
- 0x92abfef8, 0x6b83ed82, 0x41fac21f, 0x63ed83d1, 0xef8d7f6b, 0xd83c941f,
- 0x80dac8fe, 0xffeb4852, 0x00b679c1, 0x9e71d75f, 0xfc0d9272, 0xa52b4c1a,
- 0x238ebafc, 0x1e3e4073, 0xf2a6a600, 0x525d2eb0, 0xfbedbf40, 0x3c93de47,
- 0x3276f2a7, 0xf53e97d4, 0xf3f61640, 0xd223f61c, 0xfb9ef87e, 0x58cdf899,
- 0xbf133f5d, 0x69fad729, 0xaceaf784, 0xdf67ebbd, 0xf0f5e337, 0xf5a1537c,
- 0x71fb17b3, 0x135e6ef4, 0x87a09dbf, 0xad4adbe7, 0x4fd8839f, 0x09e0ef42,
- 0xfd74638b, 0x5a65c584, 0x7ec47f3f, 0x1e0ef4fa, 0xeba71a45, 0x6b969147,
- 0xfd887cfd, 0x9faef7a4, 0x1ead74b0, 0xd685691e, 0xa7ec11cf, 0xa5e6ef7f,
- 0xc3d3af98, 0xfad2ae63, 0xcf748939, 0x073f5dea, 0x1cfc7a1c, 0xe7e07470,
- 0xa833c21c, 0x702af377, 0xe053f1ea, 0x25cfc0ec, 0xdeaae7ec, 0xa9c073f5,
- 0x6701cfc7, 0x0e447e07, 0x77ac35e6, 0xd7882af3, 0xbe20a7e3, 0x0e4e3f03,
- 0x3bd119e0, 0xa3ed5e78, 0x7dabcfc7, 0x8a93f03a, 0x1dee8cf0, 0x7a29853c,
- 0x74a614fc, 0x784647e0, 0x9faef5c6, 0xf8f45357, 0x03a53579, 0x3f61573f,
- 0x5e6ef5d7, 0xfc7aa985, 0xe076a614, 0xc9fb1727, 0x78476cf7, 0xd1c7ed08,
- 0xfb073f7d, 0xb073f1eb, 0xae7e077f, 0xd0a67ec5, 0xee7bb27e, 0x8f5328a7,
- 0x0ecca29f, 0x9e2214fc, 0x3f5de86f, 0xf8f53307, 0x81d99839, 0xcfd8a99f,
- 0xabcdded4, 0x7e3d0ae8, 0xf860ae8a, 0xc08c2499, 0x9dae8675, 0x727e9e6e,
- 0x857cbf7d, 0x2ef3efa3, 0x6e5de756, 0xaf0f7602, 0x45a433d9, 0xf6fbe9e1,
- 0x0fb9091d, 0xa6bb6890, 0x8fc076e0, 0xd1795a83, 0xd8fc4d76, 0x951d9d38,
- 0xeaea24a0, 0x757dc549, 0x1d29f7ef, 0x9d4f6ba0, 0x3daeb573, 0xabab93dd,
- 0xd78f9467, 0xa6bfdfbd, 0xe2bf5740, 0xef751bee, 0xd16ff967, 0xcfd7b3d5,
- 0xa83fbdd3, 0xfdaea17e, 0x5d0a86ca, 0x958155fb, 0xdb35faba, 0x7f7ba27f,
- 0xae8d7058, 0x03d3787d, 0xe111f6ba, 0x91f57447, 0xbdd31e87, 0x8b65ba3f,
- 0x87cc7dae, 0xc7daeacf, 0xeae97645, 0xa3df1ed7, 0xf6baff7b, 0xa4faba03,
- 0xbdd7bf8b, 0xd5de4f9f, 0x3f96dbdb, 0xe29fdeeb, 0x7ed74cfa, 0x0697da7d,
- 0x3aeed53b, 0xe75a8d76, 0xfa08ae41, 0x0974fe25, 0x43b0d5ed, 0xd7d4bac2,
- 0xc714fcce, 0xe528f952, 0x22c0e91f, 0x92f91939, 0x21bb51fe, 0x95f96fbf,
- 0xaefcfa11, 0x5d2b9ef1, 0x925df9f4, 0x862bb867, 0x8df9437d, 0xca506923,
- 0xed8d2826, 0xdf8c89f7, 0x0d2e320b, 0xf7bf423e, 0x2707da9a, 0x3e70fad0,
- 0xf1f2083f, 0xbb426732, 0x35278e3a, 0x999c5ef0, 0x5f74dfa0, 0xfe93de56,
- 0xdf587928, 0x5c19da9f, 0x45ebbf67, 0x51da1a4f, 0xc1f89fb5, 0x9fd759ce,
- 0x212fabce, 0x849f70af, 0xf7e903fd, 0x79a697fd, 0x89667f68, 0xe3d119fa,
- 0xa1fc744b, 0xe4187e38, 0x3f8c20e1, 0xe6f8ebba, 0xe3756301, 0x75cb325b,
- 0x3a245be3, 0xdf908bbe, 0x8eae7ed7, 0x9e30894f, 0xcfb93790, 0x66737c71,
- 0x9f7f8e39, 0xe8aefd44, 0xc63ae3f1, 0xbff9816f, 0x07fcdddf, 0x3fcfd78c,
- 0x7f3f42b3, 0xa3ffcd89, 0xf8ead3da, 0x3fff3871, 0xfcd9a773, 0xfcd82b33,
- 0x8edfaccd, 0xf81d9df1, 0xc7f8c08f, 0x9cdf19ba, 0xff3f413d, 0xf3f52a2b,
- 0x4ff1b337, 0xc7505ed6, 0x5ff8e3b7, 0xfcd81772, 0xf1c4a8af, 0x0dc7b325,
- 0xc46523fc, 0xf8e804d5, 0x7c551fa4, 0x742ec0a9, 0x2487281c, 0x6421a152,
- 0xe100371b, 0xf18e2bb8, 0x947157db, 0xf99f50df, 0x500939f1, 0xfd4799cd,
- 0x57d79546, 0x4097c8ec, 0xf53b61ef, 0xa0a896b7, 0x91e26e2e, 0x7306c9bf,
- 0xb7851060, 0x43f789b5, 0xeb86f17d, 0xbd5fa002, 0x14a0b266, 0x184813be,
- 0xcbfe7f9e, 0xe7a25b61, 0xd4972c65, 0x5122c78b, 0x07e1d6f2, 0x35219016,
- 0xc029be14, 0xeb3ba304, 0x0e3f529f, 0xa98fe31c, 0xffa843df, 0xc6c899f3,
- 0xf7f50bfb, 0xfea11ea0, 0xa4ce3a1e, 0xdda3bf16, 0xf0a34e1d, 0x7ff0aa97,
- 0xfd79302a, 0xe02e36f4, 0xd5f9f2a7, 0x7fa0478f, 0x1b6ee0bd, 0x1c277a45,
- 0x5d24fe65, 0x0fcbce9d, 0x6ddf6a74, 0x10e16c0e, 0x6be032bf, 0x21268a3a,
- 0x5067d68b, 0xffbe775c, 0x07a9bec1, 0xb7212739, 0xa54c4512, 0x9abedfef,
- 0xe3a2efeb, 0xb9cc52ce, 0x32521d6c, 0x24d7284c, 0x909634c3, 0x88471a5b,
- 0x5d3207b4, 0x448d3f11, 0x58a33b8d, 0xaa559f5f, 0x8a8ab7af, 0xb570a268,
- 0x66c8e74e, 0x39ecefda, 0x9c6d76fa, 0x4644a681, 0x52bc7567, 0xfa74938a,
- 0xb9943668, 0x7485d8f0, 0xdf62e39e, 0x1778f06a, 0x7e8c2489, 0x010fbfbc,
- 0xf193faba, 0x362e79bd, 0x73bc236f, 0x4a528b44, 0x8ca739e7, 0x1b7f000f,
- 0x9f68ffe1, 0xd0e5314d, 0x22a3fd72, 0x8f9d7531, 0x1eb9fe11, 0x22827ce3,
- 0xcbf3f3ac, 0xdf19cf8d, 0xffe9531c, 0x2d07f029, 0xa0fe00bf, 0xf9551ff0,
- 0x32af53a3, 0x3d9af0fe, 0xda1f80d3, 0xe904f237, 0xadcbf2aa, 0xa92cbf2a,
- 0x2105f3d7, 0xb8e8111e, 0xfb970e6c, 0xcc1f1440, 0x5960f956, 0x6e6f9e83,
- 0x3d317e27, 0x6ee65d06, 0x9a6f7cd8, 0x9f7e75dc, 0xff337ca8, 0x9f88fdc1,
- 0x7464f3ae, 0xd3a543ba, 0xf6fa35ed, 0xdeba555b, 0x4e75d2ae, 0x51afc3c3,
- 0xc9e641f5, 0xdde41101, 0x9e8d53fb, 0x3d3d1d11, 0x708d3d2a, 0xf3d2a1de,
- 0x7a331f8e, 0xa88de11a, 0x9c348cf4, 0xae80c913, 0x5be11af0, 0xf9977770,
- 0xccab5c60, 0xde9e9b1f, 0x4755fca6, 0x05579ea3, 0x3475586f, 0x6c56ce4a,
- 0x2fabae9f, 0xbdd5cc0f, 0x4ca1acbf, 0x7ea4bed7, 0xd17daeb9, 0xf5753bfa,
- 0x758fff32, 0xbbb82fef, 0x7b7ed756, 0xf6bafdcd, 0xeb0fe5f9, 0x1b3d73ea,
- 0x9ecfef75, 0x3ed759b3, 0x5d19f4ab, 0x9de28cfb, 0x6574faba, 0xd37deeb7,
- 0x066eabbe, 0x3deb7cfe, 0x713d809e, 0xc605fdc1, 0x45b82f33, 0xd473bc37,
- 0x1f5f2327, 0xf2c10df3, 0x7c8dcfb8, 0x1637d27f, 0x66a6d6cb, 0x7413ac3b,
- 0x124a5c5d, 0xcd326908, 0x0fdf5dab, 0xeb8269d6, 0xa9e4c869, 0xb37ad3f5,
- 0x28794649, 0x78489069, 0x2c1c2124, 0xed1c2a36, 0xa47b547c, 0xaa06f687,
- 0xb6ba93f8, 0x3f624497, 0x76523ef7, 0x7765f005, 0xcbfd03eb, 0x4da7bb00,
- 0xed8debb5, 0x8d291c95, 0x1ac84e7e, 0x626ac384, 0xc4a54776, 0x5772bfe4,
- 0x4271017a, 0xe6b83c3d, 0x2905e372, 0x9f02a793, 0x78e4eb64, 0x3a35c359,
- 0x9a204efd, 0x6df807dc, 0x8af9eea4, 0xbeead7ed, 0x479fb53b, 0x5741abb8,
- 0xb165838c, 0xe81333f4, 0xe67370e1, 0xa700618c, 0x4deb1472, 0x3c2ed07d,
- 0x755e2be5, 0xafd01074, 0xbee3cb14, 0xf31e5839, 0xea3cb079, 0x53f2c72b,
- 0x11960d5f, 0xfe58dddf, 0xf2c7e7c5, 0x2c7adf63, 0x63d5f23f, 0x01afa1f9,
- 0x017df7cb, 0xb6fb0f2c, 0x2f8ef963, 0xaf8b6588, 0x9f56cb09, 0x7726a582,
- 0x71ddf13d, 0x093e1d75, 0xcf8317fc, 0xed7f3aa4, 0x7c9d09fa, 0x87dfc716,
- 0xb9e1a67c, 0xf2acc1a4, 0x1f8e8a43, 0x4397c012, 0xbda1eb3e, 0xb0f95441,
- 0xb763efbb, 0x6bfcb77b, 0x0fea6df8, 0x7e4eb7e4, 0x53f030ca, 0x34fc4f76,
- 0xa7e28f8c, 0xb31726a9, 0xfa7e547b, 0x8623cc19, 0x8c0f315f, 0xf6513e90,
- 0x826f929a, 0xbaa56c75, 0x601f420f, 0x763aacfd, 0x05b77d1d, 0x10c0baed,
- 0x0ece3758, 0xd860dbf9, 0x21fb3847, 0xf7567e25, 0x49f233f3, 0xd05778f7,
- 0xa01a59bf, 0xdaea4f44, 0x78638d6e, 0x31489ab5, 0xfddaeba1, 0x2f729e83,
- 0x81758a1c, 0x01f7c224, 0x8c4774f7, 0x0cada97e, 0xaf09edf8, 0x72af8e75,
- 0x8f63bfa0, 0xca0f3dfd, 0x2f6626a9, 0x68f335c7, 0x93f093b7, 0xc76fc06e,
- 0xa77664ba, 0x7dbdbc41, 0xbb42e490, 0xe3d64c81, 0xc3a05fcf, 0x4714fddf,
- 0xd6eeb82d, 0x06fe21a7, 0xbd38357e, 0x49ebf817, 0xcf84f7bf, 0x7fef3085,
- 0xd27b3e01, 0x45e93d8f, 0x342fc8a2, 0xedb7f9d1, 0x80fb961b, 0x1e5ee30c,
- 0x9afedadf, 0x7e5f9e11, 0x4b6e3e47, 0x25b8f8d1, 0xa41f8093, 0xc6d9cbca,
- 0x2e81e32b, 0x40f4fd1d, 0x9f77ee4f, 0x7eff8264, 0xdfdf4b3a, 0xe2fec67d,
- 0xa70dc225, 0x89243ae3, 0x92bf53c4, 0xd1e36cf7, 0xfc47719f, 0x67d83710,
- 0xe0c9d773, 0xaffc9e37, 0xeb3d0cb0, 0x0f670be3, 0x3f04e93d, 0x5f1a656c,
- 0x1d063473, 0x982ab2b5, 0xf68f6355, 0xf5f49a57, 0x715cf5b3, 0xc725e710,
- 0xeb061ccf, 0xa8e6ab8c, 0x0d3f7a08, 0x959fad4b, 0x738e1269, 0x3c767b72,
- 0x4c02fff3, 0x4048ec9f, 0x67d2737d, 0xd9fff7c1, 0x8fd774f0, 0x8d210f06,
- 0xe80b33e4, 0xdd96da73, 0xcaddfbfd, 0x825e7083, 0x890929f8, 0x69bf815f,
- 0x3b7fffa5, 0xb015fa00, 0x0debf5ae, 0x70f37ef2, 0x3743be78, 0xd64efd1e,
- 0xbbe18cf6, 0x204ee5ee, 0xa7be34c7, 0xbaabfbf4, 0xbb62bdbf, 0xc76f46b5,
- 0xdeeae826, 0x9d1af4a6, 0x00ff0b5d, 0x1386bdc3, 0x54ab0960, 0xb04db0d9,
- 0xda7e07af, 0xf049c103, 0x5ef6385f, 0x79222595, 0x16971464, 0x6385d3e0,
- 0x6797fe35, 0xa9ea99ff, 0x048a6f13, 0xa25c8ce4, 0xaea9e550, 0x899bf220,
- 0x917d6898, 0xc6c2f6fa, 0x6eb02515, 0x26f9789d, 0x931643f4, 0xc7f82752,
- 0x0f3ea4e5, 0x89a7b5e2, 0xf3e418a9, 0x39c77934, 0xad1e4a32, 0x2e9d8482,
- 0xe3b7af5a, 0xabaf7fa9, 0xd7d06b2f, 0x525b9297, 0xe97edf60, 0x38049106,
- 0xf6fa1bd0, 0xf61779af, 0xacd48cbb, 0x57fb7583, 0xa16bbc6a, 0x5419088b,
- 0xf6fd556f, 0x0a32bcf1, 0x71604b87, 0x7a1f6d16, 0x7ff32279, 0x2406662a,
- 0x59ffbe85, 0xa95bc8e3, 0x07dfa206, 0xbc7ec1d6, 0x6f713a7f, 0xf7e95d23,
- 0xc237e15d, 0x5d03d9ab, 0x3855c3f6, 0x4a73b792, 0xcb1e8620, 0x67263814,
- 0x102bbeaa, 0xc7b8ba5c, 0x7bfd4334, 0xd13253f3, 0xde1ed3f1, 0x668c2793,
- 0xc0fc03fc, 0xe792ed09, 0xd69ff487, 0x7f82ffa5, 0x68fdff6a, 0xfeba79ff,
- 0xfb53fda7, 0xfe05d81f, 0xaffab179, 0x2ff3edfa, 0xa93ea9fb, 0x4e97f178,
- 0x13c9d742, 0x9b8a7d42, 0xdb72b5d2, 0x96854ebd, 0x13bf05c7, 0x8044f5f8,
- 0xef458e2f, 0x154e0587, 0x41cec542, 0x49fe0ddc, 0xecf93ffb, 0xf11e30cf,
- 0x85d136d5, 0xe6edb4f1, 0x61ca99b0, 0x12f2e375, 0x23f58392, 0x19df7eac,
- 0x7a4dea1e, 0xfa78abfe, 0x1f902997, 0x8d453942, 0x9ed1852f, 0x79fadead,
- 0x5e4fac11, 0x64728ed2, 0xbfc60efc, 0xaffa8898, 0xdaef8a4c, 0x38c4e14e,
- 0x8bc55781, 0x6767f1b7, 0x37942778, 0x82fa017c, 0xc7e8227a, 0x572bbf1b,
- 0x8235d4ed, 0x8d98f923, 0x190c7bfe, 0xc3517a03, 0x3b30090b, 0x5f99eeb5,
- 0x70a11ce7, 0xa6fada9d, 0x97c28b93, 0x83674f26, 0x8f73fe8b, 0x6ddebddc,
- 0x6d16f2a5, 0x96eb690f, 0xd7daa4dc, 0x8a565621, 0xf3e42761, 0xdd166e54,
- 0xf2b8fa7e, 0x7a9f27f9, 0x7f99e20c, 0xd107bb32, 0xff3c57fe, 0x69e3fbda,
- 0x314ec57d, 0xc34d84b9, 0xd274a5ce, 0xbfff4bc7, 0x1e93b73b, 0x4d79bf15,
- 0xe7b18792, 0xd27624f7, 0xfdcec565, 0x15d61912, 0xfb83931f, 0x063d88a5,
- 0x8a47827d, 0x7ba85ec0, 0x8825bfe6, 0xc7d89e33, 0x18acd491, 0x1afcdcf8,
- 0x278003da, 0xaf4aa7a4, 0xde19fa0e, 0x80ba52f7, 0xcbd236de, 0x5522def8,
- 0x90bf40bf, 0xfbf94167, 0x07903d85, 0x0dd991f5, 0x617f2878, 0xf3e61395,
- 0xf8c35085, 0x537bf1fc, 0x8b959f40, 0x961714fe, 0x13f979ec, 0x5bfe423f,
- 0x528ff6f6, 0x9444be30, 0x45ae1374, 0xe79874fe, 0x70b34d92, 0x07894b9e,
- 0x71fcb0f9, 0xcb4034be, 0xde29020b, 0xac4e4319, 0x2f939322, 0xd2e250b6,
- 0xfe017e79, 0x77efe824, 0xb436c0f8, 0xa024efbb, 0xcea47e0f, 0x580c5a85,
- 0xf46160bf, 0x2ebaabfa, 0xa33eb010, 0x7deb77f9, 0x93a7402d, 0xd85495fd,
- 0xc112f177, 0xfb83e2af, 0xbfcbf696, 0xd195253d, 0x9e42c889, 0xf75d1dfa,
- 0x7873c327, 0xac5445bf, 0xc8efba39, 0xe39d59b7, 0xa7c756af, 0x7ac1c770,
- 0x6c4a45fd, 0xd44f35fb, 0xb75d00f4, 0x4a2cf8a3, 0x6ffc99de, 0xbbc99fbd,
- 0xe981ff26, 0xfe7ed0c5, 0xf8239330, 0x0a43d60e, 0xdb153857, 0x50e4cff7,
- 0x3a3f503f, 0x156f9c96, 0xb7d73955, 0xc285f91b, 0x51f1702f, 0x24e3037c,
- 0xdf701d22, 0xfa06e965, 0xf51e947d, 0xaeceb8c2, 0xbc726afd, 0x455bf34c,
- 0x9b203ebd, 0xdee81e98, 0x9e23f2ae, 0x53c0c52a, 0x52fc818f, 0x20cbf579,
- 0xcc2962bf, 0xcd1dcbff, 0x72a3cfef, 0x935065fb, 0x355ebd5b, 0xaef96dca,
- 0xdc9624d1, 0x9377697e, 0x9f4b7298, 0x7b5b94c7, 0xfff9f904, 0xf0d55eb4,
- 0x78f00c38, 0x351e274d, 0x3e1dd93d, 0x90f27a8d, 0x8d5e2320, 0x1dfe927a,
- 0xf95d7926, 0x8d43e351, 0xab9e2aff, 0x51f402ba, 0x7c6a9f07, 0x1aa7c1d8,
- 0xef89761f, 0x6c68f0ea, 0x34fb00bf, 0x96f9ae14, 0x528ed38d, 0x4ed2d257,
- 0x7f26df70, 0xaafb8f26, 0xe584f396, 0xf3111081, 0xd5303f43, 0x753b068c,
- 0x3d6fdfe2, 0x7785177d, 0x2c8bf13e, 0x445ea02f, 0xb7d2ffbb, 0xf3a04edf,
- 0x79e2a799, 0x54fb453e, 0x0239974a, 0x70a38e17, 0x3f8815ce, 0xb7e2113e,
- 0x0a34f91c, 0xfae5f98e, 0x4c4dfc3a, 0xa7a7ece9, 0x9fabbff7, 0xb7cb7df2,
- 0x1d3f5d29, 0x50bbc844, 0x10bc388a, 0x81d357cd, 0xde7f14ae, 0x00605c51,
- 0x5088a9ea, 0xbd77e51a, 0xf3fb3220, 0xa319c2ed, 0x9fc9bec1, 0xf19e2c3d,
- 0x7bf460fb, 0x3b3eaabd, 0x41e397eb, 0x650d9fcf, 0xf67b7fa3, 0xac4722d0,
- 0x9566d0bc, 0xf7535ecf, 0x5cecda5d, 0xbff94f3d, 0x7da3b48d, 0xc95bb7e3,
- 0x82fb18b6, 0x5f4d451b, 0x7fa374ab, 0x8e7f1d3c, 0xe97387ca, 0x51b9f1a7,
- 0xf5399b74, 0x4aece084, 0x34fbf807, 0xa1fb0fca, 0x06be05f8, 0x235b36fc,
- 0xe231a545, 0xf8a9cf79, 0x6be0789b, 0x0c2dcfcc, 0xc7800be5, 0x8baf3b42,
- 0xc9a93c83, 0x143250eb, 0x60789ae0, 0x5347cff9, 0xe21ef63e, 0xeb8f90dd,
- 0x3c919d0d, 0x2361fc31, 0xb07fa13e, 0x2e888f33, 0x627bd7c4, 0xebc012e9,
- 0x04acb37b, 0x95df35f6, 0xe0b155dc, 0xf5b3332c, 0xbd292e66, 0xbffb8a30,
- 0xb3f9bf0f, 0x8a02e11d, 0x9676b38f, 0xff95ddf7, 0x759fdc98, 0x0e5a8171,
- 0xf56790f8, 0x71002e64, 0xf9e222b4, 0xe50e04f9, 0xfeb4527d, 0xfe9a6fca,
- 0x54f5e43c, 0x89973887, 0x110b57b6, 0xdbd8f809, 0xfb0053b4, 0x289926fd,
- 0x6a108f18, 0x8a8ccd86, 0x05111d7b, 0xdff357c4, 0xf80edde6, 0x4cd3373d,
- 0xe8652e00, 0x01d82719, 0x512342e7, 0xd79bba0e, 0x5a647c1f, 0x10fd08a1,
- 0x3f230fe4, 0x74db2514, 0x499359f5, 0xdba5373f, 0x95647344, 0xeafd063b,
- 0xbd67c624, 0xff966fd6, 0x04dbf4cf, 0x5f18dfaa, 0xedfad18b, 0x7df18926,
- 0x8d4b7e94, 0xd656dfa5, 0x17c0a9ca, 0xa766f72b, 0xbe575d02, 0x2c4fa4b1,
- 0x8fbaf6fd, 0xa86ff7f9, 0xfd6de679, 0x1e1b7ea8, 0x51fadfa5, 0xae2316fd,
- 0xa5ea5a3b, 0xcffcb37e, 0xa0ebdfc8, 0xb7cc62df, 0xa69fc558, 0xb5438adf,
- 0xfb8adfa8, 0xa97cbaf1, 0xfa4f5249, 0xb397ecad, 0x6dba2eb0, 0xf007f831,
- 0x0dedfa0a, 0x38c1cf55, 0x5dfee7a0, 0x3d277d72, 0x3d5bd557, 0x67843ff7,
- 0xecadcf4d, 0xd33da1cf, 0x9e990f95, 0xf4c5995b, 0x4cbdcadc, 0xc41cadcf,
- 0xbf519cf4, 0x6fd17415, 0x21eafbec, 0x23f47bf4, 0x1b7d7eb3, 0x23992f6d,
- 0xfdf42dba, 0x9f3b5912, 0x5a3a3351, 0xd4bbfddf, 0x9f73be8d, 0x3d2e73c1,
- 0x823fbbe9, 0x7a841bbe, 0x91c8206c, 0x1c6e3f91, 0xd3e5d368, 0xb1f5f7bd,
- 0x79045ee7, 0xfe8f03f9, 0xe62607f7, 0xfafbed27, 0xbb9048d8, 0xaa1e00f7,
- 0xf9519eef, 0x47e7d5af, 0x28793a7d, 0x7baaf793, 0x1baafe18, 0xd5df3639,
- 0x7586407c, 0xd1176fe0, 0x2f7dbe8f, 0x9b67a3f3, 0xc0d8fa5b, 0x225fffcf,
- 0x2bfd25ca, 0x87e28b29, 0x302b1739, 0xce2bb9ee, 0xccfc05b9, 0x80b10239,
- 0xd0e0bb1d, 0x7ec0278d, 0x3d71705d, 0xe00bbdda, 0xc8e92279, 0xa1db98fb,
- 0xebf6f527, 0xaf9da6bc, 0xf9ae9065, 0x8ccd1310, 0x7157132e, 0x5d59cd81,
- 0x5da823f3, 0xc62df986, 0xabdf893c, 0x47cd9ff3, 0x8fe45f10, 0xe5f8cc7c,
- 0x72788def, 0xad54bc33, 0xaa78f5e6, 0x89e262e1, 0x2ca4ba52, 0xb72f13a5,
- 0x975914bf, 0x39cbed01, 0xf38cb7f4, 0x9c66f0d6, 0xcd4786bf, 0xbea059bf,
- 0x24743f3f, 0x86889e66, 0x22f92ee7, 0xf80dde1a, 0x33d34b78, 0x206190d7,
- 0x3c981f97, 0x7c0d1f3f, 0x178a7bf4, 0x73ade70a, 0x2bcee907, 0x65e2a3bd,
- 0x6148e7aa, 0x24780c8a, 0xf367c035, 0x96be77ca, 0xb377e742, 0x210b9592,
- 0x937f59f5, 0x688f103c, 0x1ea3b6b6, 0x9470f8d4, 0x5985c999, 0xf5ef2bd5,
- 0x7ae21575, 0x50b9c4d3, 0x3d06c2be, 0x7999e6a7, 0xd7f261ef, 0xbaf7fdcc,
- 0x6788518c, 0x91482e58, 0x5d015eb6, 0x86a73fe8, 0x40bc5d18, 0x69cffa17,
- 0xe020fda4, 0x73993abd, 0xa6fde187, 0x3f02f79d, 0xcfa56ebd, 0x39cbce41,
- 0x0e8616f7, 0x65ca7a7b, 0xc58858f9, 0xc8cc18c7, 0x173f5ceb, 0x5d897bc0,
- 0xe68993ed, 0x8e00f796, 0x366e5489, 0xfd00c3c3, 0x46fe8a2f, 0x07c8c5fb,
- 0x7503e53d, 0x2ab9efc7, 0x3a075f9e, 0xeddf01d8, 0xde43d812, 0xb47f5b30,
- 0x1720af76, 0x3a34de22, 0x65f5d134, 0x2983a314, 0xd66f9e06, 0xbd9e2aea,
- 0xef844e21, 0x6ffea06f, 0x7ebb0712, 0xf8d41f5f, 0xc5772cdd, 0xd215c413,
- 0xe806f4a3, 0xcd7de8b7, 0x11b7a6ea, 0xa6ae375f, 0x15dc99e6, 0x407d1a5f,
- 0x6f0e5e0f, 0xd34fcadd, 0x61799891, 0x467e55df, 0xb3f2f599, 0xe6fedacf,
- 0xedab8870, 0x90af50e5, 0x7e2e8250, 0xecccd330, 0x07ab6696, 0x0dfdbcf9,
- 0xee45bdd1, 0xa8a67faa, 0x257f0174, 0x64e27cfa, 0x4de80898, 0xa67c9597,
- 0xe3007bcd, 0xe0f5e8b7, 0xe907ab75, 0x33ff5dd7, 0xedfecccc, 0xead31ece,
- 0x9078c3d7, 0xc7eb28d7, 0x5a47ae33, 0x79a7a95c, 0xbf71d479, 0x71b4bcef,
- 0x84beebbe, 0x3c60b497, 0xd89fdb4e, 0x89767bc3, 0x73163f60, 0x0a107dba,
- 0xfe6fee39, 0x59bffa13, 0xff7ddd1a, 0x530a3f10, 0xe3bebf3c, 0x9f638c49,
- 0x34c8f67e, 0xaf159f90, 0xe542c4f0, 0x838775af, 0x889e5984, 0x45427604,
- 0x5dfb8f23, 0xcde70844, 0xa833b288, 0xf1f0c327, 0x5ef503a2, 0xa0732ec5,
- 0xf15f7e84, 0x103967f2, 0xc35d6b7f, 0x59ff9ff4, 0x91fd1f95, 0xa807c81c,
- 0xbcfed810, 0xa88f3e91, 0xb76a79fc, 0x6fe80e6d, 0xc73b3b6e, 0x2d9c115b,
- 0x0ce2a39a, 0x16459bf7, 0xc36ddee7, 0x33ff6c3c, 0x0331e61c, 0x48e7d17c,
- 0x53d0fcb5, 0x5cbd30cf, 0xe00624d1, 0xf6c5703c, 0x1710ad9d, 0x8cd0f8af,
- 0xfb77abf6, 0xd3ec0919, 0xcf3e2fc9, 0xf1371bce, 0x976878ba, 0x3e76e438,
- 0x59f8866c, 0x01ea1563, 0xf5823f5a, 0xfbf71aa0, 0x15b2718e, 0xd97dee2f,
- 0xe2b8514f, 0xf13b4f37, 0x36cb705c, 0xb723c627, 0xa05f7e5f, 0x7831dc4b,
- 0x9c771abf, 0xf5c105fa, 0xf1dc7621, 0x2127caa4, 0x48ee3eb3, 0x3c6127b6,
- 0x5799c292, 0xd17e231a, 0x5fcfffc1, 0xcdf60278, 0xf2faed4e, 0xf2bb8009,
- 0xfc4f739b, 0x2a93e294, 0xdbac1720, 0xc3df67de, 0xbbed067d, 0xd27d55f7,
- 0xb8d3ccfa, 0x27fad34f, 0x6b7a1bb3, 0x2645fbdd, 0xeeb453ec, 0xe2053afc,
- 0x4ef7abb8, 0xf56d7f41, 0x4953934b, 0x2b407f0c, 0xbd037f81, 0xd23227d8,
- 0x39b1b9ad, 0x4d3ce013, 0x77eb0ee9, 0x58393c47, 0xb2f4e2c5, 0x11de471a,
- 0x4bae5f7b, 0xc671b8dc, 0x3ac1d7cd, 0x65ba28de, 0xf1f88bd2, 0xe1cf4a61,
- 0x0ee3a0bd, 0x38209f75, 0xe3f1b2f5, 0x4a0ff0d3, 0x7d660baf, 0xe342fe1c,
- 0x0c7cff92, 0xba931ada, 0x7b871f8d, 0xdf6d3f81, 0x2159bf6f, 0x5da39016,
- 0x1edc61cf, 0x0f7d47e8, 0x85fcfad2, 0x1cbb884c, 0xc0efdb17, 0x66bdebfc,
- 0xd8aaa3cc, 0x53ca01fb, 0xe36f4beb, 0xf7511abe, 0xc7495adf, 0x5127db1f,
- 0x56ef3a7d, 0xab3b8b07, 0xb4b88074, 0x6ae7c4ec, 0xb5f199fc, 0x2eee9716,
- 0x32d63d1e, 0xabbfed80, 0x5b5591fb, 0x43112cff, 0x0b1b9fbc, 0xd9723af4,
- 0x0f0d547d, 0x98c3c02e, 0xd896fde8, 0xfd8efff1, 0x80dd0316, 0x758f609e,
- 0x7ad0a7ec, 0x07ab883f, 0xed620fb8, 0x666eb5df, 0x62ad34fb, 0x6d45bcec,
- 0xc5a465ff, 0xe812efb0, 0x4f4bbedd, 0x78aef8f3, 0xb42706ca, 0x9d7c574f,
- 0xbf5b14ba, 0xd4e16adf, 0xe7be037b, 0x67257ebe, 0x05981bcc, 0x78d5597c,
- 0x4a7b0244, 0xf9b4df5b, 0xe7fd529e, 0x78f2cf53, 0xb05d644e, 0x51bbf519,
- 0x1613547f, 0x5bc23437, 0xab7a616e, 0x1fad89ba, 0x023976a7, 0xa9cfda76,
- 0xaf91bb03, 0x911c77ab, 0x83bfb4fc, 0x45237fdb, 0x60acf2a2, 0xc14408fd,
- 0xfd85ea7f, 0x897f6c39, 0x71c6ebf9, 0xf1c752ee, 0xe38f6286, 0x374671dd,
- 0x507f0ace, 0x2e7ee762, 0x4149de3f, 0x9dfd0d99, 0xe409116c, 0xf20c9ffd,
- 0xe31881c5, 0x5a88b5cf, 0x0fc98b3c, 0x1cf1475d, 0xbb9d83a6, 0xb42513e4,
- 0xc63b57bb, 0xfde7087e, 0x19243b57, 0xb57517e2, 0xeeb52f2c, 0x4069dcdf,
- 0x5b84baef, 0xdeae3f71, 0xb2575f80, 0x1ae21f2b, 0x51d2c2ae, 0xbd46aec0,
- 0x7498a093, 0x1476aff0, 0xce14caa7, 0x469daab5, 0x0b9e3704, 0xc5459de5,
- 0x65de1f30, 0xec635972, 0x52f6883b, 0x8bc289e2, 0xe39fd2e8, 0xca469724,
- 0x7f968fd0, 0x39696953, 0x052e8d88, 0xb1858c53, 0x99fcddec, 0xdb779c2e,
- 0x507fd50e, 0x614aec88, 0x8eb06479, 0xcf8aedb4, 0x43f1c321, 0xb47e50a2,
- 0x1db9cfa9, 0x5e83f7fb, 0x4a24eb5a, 0x8a339502, 0x86cf9955, 0x2c23acf3,
- 0xe9f0365c, 0x857aff99, 0xf10d6bf6, 0xd9b457e8, 0x25527cb5, 0x46cce401,
- 0x41bcc41e, 0x936fe560, 0xe634e13a, 0x25781213, 0x298d3eea, 0xa1fd1dbc,
- 0x7c589bb3, 0xff572780, 0xcf8b027a, 0xafc00091, 0x542b2fae, 0xe212cf38,
- 0x3bfbc30e, 0x580f181c, 0x9422c6f6, 0x3443a01f, 0xf58a92be, 0xac1b5ee7,
- 0x67f01cce, 0xbf555a36, 0xc0b4afc6, 0x6ac59b39, 0xf54a7dbf, 0x18a962e7,
- 0x975e41fa, 0xe5faf2a8, 0x9843a2d2, 0x8a59e68f, 0x8e753e7a, 0x31acdcfe,
- 0xaa204b4f, 0xfe601c9f, 0xadbc9773, 0xb708cda6, 0xc56b7f31, 0xc35d7d76,
- 0x37a027e3, 0x127402af, 0x80e2e5fb, 0x3237e23d, 0x4613d7f8, 0x9afc777d,
- 0xb5f8f4d3, 0xdafc7aca, 0xa7f11886, 0xadc3afc7, 0x97e697c7, 0xf8df8776,
- 0xc873fab1, 0x8afc2aff, 0x548ed556, 0x4aff4af1, 0x654c292d, 0x42e9dc03,
- 0x28c2e518, 0x30f61947, 0x9feeff07, 0xafc5a81c, 0xefd40edc, 0xb87cea69,
- 0x1b3becc5, 0xf00f11c8, 0xe08e51ba, 0x79e82024, 0x75d8f91b, 0xfac40acc,
- 0x4d7b7c8c, 0x355cf4f7, 0x0b23d5fd, 0x809b57e9, 0x3f23877f, 0x89bef3b0,
- 0x05975024, 0xc176ea72, 0x59f41dfa, 0xb8458bdc, 0x17b885f7, 0x5efb820f,
- 0xa43e585c, 0x0db4abe6, 0x306e7825, 0x029f3e18, 0xdca1e9fa, 0xf7a069f6,
- 0x29484bbc, 0xc51fc1e8, 0x0f03f3b1, 0x38bf65a8, 0x85abd549, 0xef824c6f,
- 0xe70ca243, 0xdb42dbd5, 0xdea0832c, 0x2a3de3c4, 0xabfa84de, 0x90d6fc60,
- 0x6c5ae0c8, 0x38820c09, 0xc7115ec8, 0x013c5d6d, 0xd87665cf, 0x2f96216f,
- 0xccfa16d2, 0x9f4a28b9, 0x7a3d36b7, 0x36b9c415, 0xa9e7629a, 0x7e9ab14c,
- 0xec1bfad8, 0xc537b792, 0x1eeaf2ca, 0x51e7401a, 0xbaacfd3d, 0xb879330e,
- 0xabf3a556, 0xf9c46f4b, 0x64e214bf, 0xf88a4758, 0x461a6edb, 0x3dee39e7,
- 0xb0cf8d41, 0xf528e578, 0x2fd03afd, 0xdfeb5fa8, 0xddc3e9e5, 0xe7020547,
- 0x2cd546f4, 0x15eb7f45, 0xca73e527, 0x704a7917, 0x616df024, 0xcdef0128,
- 0xd04c7ef1, 0x22dd1fae, 0x4f33f8e8, 0x22f3575d, 0x44c05e54, 0x3edf50f9,
- 0xec18222e, 0xfc0f00a0, 0xfd71ed75, 0xd55685f3, 0x149272ee, 0x970df3e0,
- 0x5ef20e78, 0x7b89dadb, 0x6dbb850d, 0xa3845f71, 0xfa9c3ced, 0xbe25e5a5,
- 0xbf7ad638, 0x62f8caca, 0x9d7044a3, 0x8ed4b8d9, 0x4b03c5f1, 0xf580ae2b,
- 0x7b1fabfd, 0x8e7588ae, 0xc53f4a3c, 0x252c7cb3, 0xfe058c9e, 0xbc31cb5e,
- 0xc697a7d7, 0xf8d7f7c8, 0x8d6fe359, 0xc697d3af, 0x35e56279, 0x7665637e,
- 0x21d2c15a, 0xdafe049c, 0xe38f0e3e, 0x5cf1aeb6, 0x9f8ddcf6, 0x7800c9ac,
- 0xd0e74dbc, 0x3884b6ee, 0xb1bd9625, 0xfec4eac1, 0x6bf8d75b, 0x2eef8f21,
- 0x27a09c2a, 0x0377efbb, 0x813f5bfa, 0x677c2ca0, 0x73c32fa9, 0x642c4bbc,
- 0x85bbc40b, 0x1c5bf4fa, 0x6f93ffc0, 0x7de14758, 0xeb84d4ac, 0x8dd146eb,
- 0x08abfbf0, 0x1a83f8c1, 0x8d37edcf, 0xcf3e8e6c, 0xff3584fc, 0xc7a52b59,
- 0xd21756b3, 0xe6e3c925, 0x3ff77e00, 0xf209dbfe, 0x8cd176dd, 0xfaabec74,
- 0xaf77da43, 0x36dd9959, 0x0ad94e33, 0x64f293df, 0xc7b71394, 0x123c5ee9,
- 0x78a04dee, 0x4ea3928f, 0xbbf1f9fa, 0x71d50f44, 0xbb449e14, 0x2892f909,
- 0xc5d2ec8a, 0x4728fd29, 0x30905c9a, 0x4d8f2047, 0x885cabd3, 0x1ca83b0b,
- 0xc3d8b28c, 0x962477e0, 0x105379d3, 0xfac2c4ac, 0x754e116b, 0x3cc6cb9c,
- 0x7997ca79, 0x608a3870, 0x71121448, 0xdf4d7fa1, 0x6d7a7a6a, 0x59794102,
- 0xa810b0fb, 0x126cfda3, 0xaf885622, 0x01722964, 0x0a251aa4, 0xef179e32,
- 0xff304e5f, 0x78fd96d2, 0x42dd8742, 0x37c47e85, 0xfb5882fc, 0x869c7c79,
- 0x401fcf2f, 0x6a1ce46f, 0x8de6003f, 0x73c6cadb, 0x3b4166bf, 0xa87f43bd,
- 0xba9158f6, 0xdcfb8efe, 0xc209f2c4, 0x3ef44893, 0x687f519b, 0xebb8c9f6,
- 0xbf606b22, 0x189fdc68, 0x055bb49c, 0xcead1bbe, 0xa89f43b7, 0x1098e6eb,
- 0xfbf427d5, 0x66c8d0dc, 0xf72b07b8, 0x5dc72663, 0x3f21c9f4, 0xbefbf80f,
- 0x17c8c47a, 0x2e54c223, 0x32a1f6d5, 0xeae1a7dc, 0x19fb43a8, 0x9e49de83,
- 0xed152a3e, 0x8cdf955c, 0xdbe4d678, 0x77b9f728, 0x0ccd9bbb, 0x1a47e62e,
- 0x275c21c6, 0xe836721c, 0xb078b7a5, 0x29f2dbf7, 0xbf5c281e, 0xf4a3f902,
- 0xb81f384b, 0x21bfe601, 0xe415bd74, 0x27af7be4, 0x5475fa21, 0xbcd3f69e,
- 0x0fae3e27, 0xc04e27ae, 0xf3fef543, 0x46127aea, 0x01c8777a, 0x78e89395,
- 0x46ffc689, 0xa3e4d77e, 0xc8de2e0e, 0x047673c6, 0x0b9e59d6, 0x663065e2,
- 0x7805c9b2, 0x3aedecd5, 0xfe25bed5, 0x56ffb474, 0x1eba88bb, 0x4f800665,
- 0x5ecd44ea, 0xe3a25b77, 0x051dab53, 0x4fdb97a9, 0xfea45ef5, 0x1b39d459,
- 0xfe8ed0af, 0x84a2f79b, 0xfe37bfcf, 0x7914289a, 0x898dfc3a, 0x7f15dd1b,
- 0xb08471f1, 0x9ffec2fb, 0x230cdf9f, 0x7727dc7f, 0xe25aefce, 0xf5422579,
- 0xf5aade47, 0x3f193675, 0xd7969c39, 0xfb682f55, 0x01e9dd9f, 0xa98fbca3,
- 0x70c66fac, 0xeef20f4b, 0x25f8a9b5, 0xb3e80664, 0xb76a61f1, 0xff957564,
- 0xbcfceaf3, 0xb6f78636, 0x8307db5b, 0x3de7ad3c, 0x2f687135, 0xb0264c46,
- 0xb413762b, 0x8ffbdc7b, 0x5bde513a, 0x4feb7492, 0x8fc0bb57, 0xa7d9a9f2,
- 0x7c6a5d9a, 0x552ec3af, 0x3e2d1bd7, 0x795135ca, 0x193cf0a1, 0x1c03f1e7,
- 0xbfe62e75, 0xd05701c2, 0x3d584f63, 0x365fc075, 0xd838973a, 0xce12192f,
- 0xb096f162, 0x93bb3e4e, 0xde419b47, 0xb03f1482, 0xfae9f9fe, 0x1eeb8837,
- 0x0971b19e, 0x85aa5e0c, 0x97e9bec0, 0xd560d847, 0xfbc7f304, 0x6bfbb0e5,
- 0xfbb2e5fa, 0x31c73732, 0xa18b65fb, 0xfa5ac27c, 0x144bcc18, 0xfb9cdf88,
- 0x5006e1fd, 0x64f90e21, 0xf800d29c, 0x382b14b0, 0x71170c8f, 0x19de9473,
- 0xbc839042, 0x994ac489, 0xd247f163, 0xfb48ffb9, 0xf39128cb, 0x06323f81,
- 0x9a373c12, 0x9e7d8a11, 0xf13895e8, 0x8debd987, 0xbcc1ce5d, 0x41cfcaa2,
- 0xa937ce89, 0xe9839d84, 0xf050fb88, 0x8eadbf01, 0xadb2715b, 0xab3ed817,
- 0xd9e1813f, 0xc101bfe8, 0x5a825e60, 0x822257bf, 0x2bfd1135, 0xdfaff03f,
- 0xcaff4164, 0xd041dc46, 0xb1c41fb7, 0xce3aecf8, 0xbfea1ebd, 0x702dda10,
- 0x37268fcf, 0x343c3c9a, 0x40b716e1, 0x775e755f, 0x5f7c0736, 0x00ef172a,
- 0x9c275b9e, 0x6716bc1d, 0xf8c99ed2, 0xf975e14e, 0x9afd1af6, 0xacfd1d44,
- 0xd6895710, 0x08690ef3, 0x5cadedd8, 0xaafc1b9c, 0x831ce5d6, 0xe7a8b397,
- 0x3cdd0411, 0x1fcc6cd3, 0xe7f55369, 0x90b2e3df, 0x84f5b51f, 0xc63d6fce,
- 0xe8f30d7f, 0x386a8738, 0x973c6a77, 0x11dd969a, 0x7cc74a07, 0xee0952dc,
- 0x43672332, 0xbce3b436, 0x124cf6ce, 0x6a64bce3, 0x5cf31ac4, 0xbc5256b0,
- 0x4b780d7d, 0x40126b15, 0xcedea71f, 0x1f1188f4, 0xc47f9c6d, 0xfb47d841,
- 0x1f331883, 0xd2d32b45, 0x4c584a6e, 0xbf4b4fdf, 0x79ed4b01, 0x19885d83,
- 0x71b24c57, 0x64dcec3f, 0xfc18b103, 0xed1b24cd, 0x9de7b4ef, 0xff1415f7,
- 0x37bb8c76, 0x37bc31c4, 0x37bc31c4, 0xe53e1a48, 0xcebf9078, 0xf7975bba,
- 0x4967c1b4, 0xd10f2010, 0xb9f28e9f, 0xdde8c5c4, 0xf682bef9, 0xdca21f4d,
- 0xc4941730, 0x436c9106, 0x21f167be, 0xafc87ced, 0x4024cf7c, 0x96db94be,
- 0x16fbfaa5, 0x0604df46, 0x530df9f7, 0xb214dc70, 0xe2983ac6, 0xc9a96979,
- 0xd3a3710d, 0xc2723f0b, 0x1985e991, 0x0f20792d, 0x1b8f79a7, 0x984a97e6,
- 0x8e6cb85f, 0x2ed30814, 0xc2a81b93, 0x01ba7462, 0x4ccb3bf7, 0xc7aef331,
- 0xe9fed8d8, 0x961738d1, 0x90d36969, 0x92f59d4e, 0x8c4bf004, 0x35e54a7c,
- 0xcea8f17d, 0x6f94cc73, 0x790c5b14, 0x1349136b, 0x185beb87, 0xb1ac95f1,
- 0x35b7aab3, 0x39064979, 0x52ab7d84, 0x3a3547aa, 0x4d5f500f, 0xfe28b8c1,
- 0xbfd7a171, 0x79c37f06, 0x45ea015d, 0x10c88e4d, 0xa0ade7a6, 0xa1e2a377,
- 0xe83f7b6d, 0x9f8efb0f, 0x0739dbfc, 0x55fd3bec, 0x9e807768, 0xbbcf7e3d,
- 0xf4c7380e, 0x2e2c3dc9, 0x4fc18772, 0xfa63645b, 0x74ad8f7f, 0x8177e01e,
- 0x9e791fef, 0x77257e28, 0xe593fdb3, 0xde70de01, 0x506f7f5d, 0x70c93055,
- 0xf11f908f, 0x1fdea55a, 0x46c7a466, 0x2cb78b1b, 0xca59b1df, 0x4f7c6190,
- 0x5c7f8c25, 0x378f43f5, 0xd1dfd03a, 0x63b859e0, 0xd9be0484, 0x7566435f,
- 0x6445bdff, 0x025287eb, 0x151e67fb, 0x0f38466f, 0x2f6af7aa, 0x8fcb25f9,
- 0xdd63a329, 0x27b0f54d, 0x187bcec4, 0x13e4b1e7, 0x129528b9, 0x1cfdc96f,
- 0xfef0ddfa, 0xf33d743d, 0xee1f7ddc, 0xd9e719b3, 0x051f5ef5, 0x2a65b3fd,
- 0x6de78dbd, 0x3ed88597, 0x41e262f8, 0xfe72ecf9, 0xc5cec436, 0x678de318,
- 0x00db650f, 0xa9b0bf0f, 0x323cc4f1, 0xef6dce06, 0xfe20343a, 0x9f56f772,
- 0x828bef52, 0x3ca1bb7b, 0xce5823ee, 0xe2902303, 0xf6f432bf, 0xd55fc05d,
- 0x3ef13b4d, 0x8faaefb4, 0x9c2742aa, 0xe9ad4b90, 0x6e876a25, 0x8b20f903,
- 0x5a7d9d39, 0xc49cf9bd, 0x9f866afc, 0xb7e7d4db, 0x20723e01, 0xd9fe5bdf,
- 0xdbef4a04, 0xd1f3c52a, 0x6f3da5f7, 0xf7c3efd3, 0x3cdabe76, 0xed6b79b5,
- 0x9c5e6a55, 0x0e4d3bf1, 0xb97ca9e6, 0x75f4eb78, 0xea8a297d, 0xa9917ef6,
- 0xf3c3939e, 0x2d9f1a42, 0xe945a523, 0xa349f831, 0x84df076f, 0xd3f38e7e,
- 0xbeb8bae8, 0x3f83c54c, 0x4c4fdd4d, 0x91957883, 0xa73a7a8f, 0x32462add,
- 0x7c8dbe41, 0x7dcddad2, 0xb31b2128, 0x4acb162f, 0xa8cfc411, 0x15cebbf9,
- 0x171cdf65, 0x14fe0fea, 0xe32b6ee4, 0x083d0efb, 0xf7e8314e, 0xff5c4537,
- 0x54e43a57, 0xe90fffb8, 0x13f20e37, 0x4f787ecc, 0x4f98e71b, 0x808a0651,
- 0x4f2c62ff, 0x81954afa, 0x97db3a83, 0x04e73c6e, 0x086dbbfb, 0xab51b8c1,
- 0x69ef0f43, 0xe37a8f1b, 0x4a8f4ab4, 0x3f9be5a1, 0xdadace83, 0xc7177982,
- 0xa771a92e, 0x58e2e64b, 0x39d8b4f3, 0x7778bddd, 0xf9a872a8, 0xfa540b35,
- 0xfb6211dd, 0x4d370be9, 0xc9747a47, 0xa1fd54bf, 0xf50e46b2, 0x4b3e918e,
- 0x3e63d7dc, 0x3e40a2da, 0x5e85df7e, 0xbba441e4, 0xda776e91, 0x371eec5d,
- 0x7b8246a1, 0xe26d8eed, 0x8571ff70, 0x73ff7b10, 0x9df4ddb6, 0x479c63e6,
- 0xdb2ff077, 0x2faebb5a, 0x908ff981, 0xf08dcf80, 0x425ff36b, 0xd4a7a614,
- 0x2f3776f9, 0xf2c49912, 0x7f75d434, 0x815b9c39, 0x790793fd, 0xc6adcf96,
- 0x35f834a1, 0x28f38d78, 0x7cf12be2, 0x8f035b41, 0xf877d979, 0x77067dd9,
- 0xfd70e5ac, 0x9a4e8eed, 0x7bf6d15e, 0x30ca9baa, 0xfb43cb6f, 0xedb63b01,
- 0x29f6c263, 0xcf7873d6, 0x0cf4b75b, 0xeea57cb1, 0xa30849e8, 0x374a62db,
- 0xdebb3ff0, 0x0caf909f, 0x0a27de8c, 0x8c0ca0dc, 0x4122b7de, 0xb15957cc,
- 0xf9eeba7d, 0x8e35768d, 0x9ef5eaa5, 0x24d48fe4, 0x696f8cc4, 0x9fc9a6f2,
- 0xda778f26, 0x0a6f6665, 0x59198381, 0xd1879f80, 0x7afaa1bf, 0x3df99d9e,
- 0xafa5cd0f, 0xa73c6f54, 0x3258a7db, 0x096bd196, 0xbbcf825f, 0x0f14bbd3,
- 0xd8bdbd3a, 0xf8c3cfae, 0xe6d1f3a1, 0x3689c9a1, 0x189ca33f, 0x43d237bf,
- 0xe919ef8b, 0xc007a462, 0x5bc67e09, 0x35816d7b, 0x38b7ce14, 0xaf7fb706,
- 0x4d6ee115, 0xb9c0ac9b, 0xf9f4eacd, 0x3e01fe58, 0xb0fb074c, 0x768eceb4,
- 0x8d87eaae, 0x225c5dea, 0x5ecbb557, 0xd9232ab2, 0x24d2a7b9, 0xb58993e0,
- 0x17161991, 0x583e8f21, 0xa4f7045c, 0x32387c4e, 0xbff14203, 0x5646f07e,
- 0xd77916df, 0xfcfb86e2, 0x690a8a4b, 0x2fd071fd, 0x95fa2fa0, 0x6efbe0db,
- 0xf973c4eb, 0x1fdeccba, 0xcf9d2e65, 0xed66b1ee, 0xb33ed495, 0xf2aef5d0,
- 0x4157e0f0, 0x4a5b2b7e, 0x542e218a, 0xba781109, 0xce2955de, 0xad872db0,
- 0x7be9e3fb, 0x4f3cd91f, 0x76ee21d9, 0x8bae2d71, 0x94058795, 0xc588722f,
- 0x573a0567, 0x6d6efd2b, 0x2993b51c, 0x5a26d77e, 0xd317e0dc, 0x97f07ee7,
- 0xb22a26d7, 0xa35cf51c, 0x70077c1f, 0x7cb135ac, 0x0c6ffd82, 0xc65c8f8e,
- 0x23efd82e, 0x34cfe025, 0x1a582cfb, 0x1725f3db, 0x37c8e79e, 0xb18978b1,
- 0x76bb6dfd, 0x00ce49ae, 0x243ea8be, 0x1fc3fca1, 0x84559fd8, 0x613549c9,
- 0x0eaad0e7, 0x53773c13, 0xcf08a87d, 0x84b9c0dd, 0xe07a754f, 0xa6ee735f,
- 0x9e18af93, 0x44ae040a, 0xd0b71a88, 0xf9d5d56e, 0x9810d354, 0xdce3e10f,
- 0xcf846e70, 0xa2ecd530, 0xa0afe7f3, 0xe087c525, 0xff70f372, 0x89e4fd4c,
- 0x559fdc3c, 0x2d79aa27, 0xbe3e7626, 0xe557dd63, 0x39fb2b4f, 0xdd028f4a,
- 0x25b1ff4f, 0x53f6c6cf, 0xe6d6afd0, 0x8429c75b, 0x35ed7f03, 0x7219e782,
- 0xe0334993, 0xb19446cf, 0x47166fc0, 0x6c97caa2, 0x65fe9b3f, 0x9b36e940,
- 0x7e53237e, 0x9fc1faff, 0xe3bbff22, 0x68b2ab4d, 0x59f8739d, 0x2e74c33d,
- 0xef30b28b, 0x6bf565a8, 0xd006e74c, 0xc358e8ba, 0x4af1b0ea, 0xa3b33e17,
- 0x143fc8ae, 0xba3171c6, 0x01ae50ee, 0x2c4c16fe, 0x901c23ae, 0xcb477f70,
- 0x794b5957, 0xe2d1f806, 0x5d747a60, 0x3673c16d, 0x28179b88, 0xec4fe432,
- 0xfa41ef51, 0x5786a5be, 0x7f4266de, 0x7fc04a09, 0x53e183bc, 0x54690ba3,
- 0x5e3c81ae, 0x81ae5412, 0x0e7ebd7d, 0x7e83a24f, 0x96adf23f, 0x1bcfa089,
- 0x0f28f9df, 0x82bf73ef, 0x2df233f5, 0xf9d243ea, 0xff6dfe9c, 0xe4fbd1cd,
- 0xa9f91cfd, 0x03251df0, 0xec9cafda, 0x3951b722, 0x2c8853ac, 0xad9f8365,
- 0x091b2ff7, 0xd91f4fd3, 0xa070af4a, 0x3ae3093e, 0x67bb13f6, 0x7e8dd400,
- 0xec496933, 0x949dde55, 0x17ee3773, 0x7d346625, 0x85d610b8, 0x4a7bb1df,
- 0xfcc14538, 0xee3b17d3, 0x9718236b, 0x373891b2, 0x9b898dc4, 0xd453f9a4,
- 0xf1899af8, 0x945e7255, 0x87ceacff, 0x79085f7e, 0x2c6c63f2, 0x8fce81bf,
- 0x823afd07, 0x848e58c8, 0xa0365938, 0x3df583d8, 0x4fe1f824, 0x588fff69,
- 0xb2ca7c59, 0x8fe2e89d, 0xb7160e65, 0xe2c9c079, 0xf062718f, 0xe22bb016,
- 0xd81710ce, 0x4b8f6d15, 0x0bcfee02, 0xdf709bae, 0xe309a0ee, 0x018b4fdc,
- 0xb517ec17, 0x5bc8255f, 0x6d7f6748, 0x7eb5dd2a, 0xabe4911f, 0x0bf7ee20,
- 0x9d3fab27, 0xbb8f304f, 0x801484fd, 0x0cc4dcfd, 0x3c744d4b, 0xd9839e69,
- 0x1573bfbb, 0x043a04cc, 0x1811c6f7, 0x38becc7f, 0xb9e3d013, 0xe056b91d,
- 0x4701178b, 0x66890f14, 0x5fd09dd7, 0x6f0a39e2, 0xf9673e16, 0x223c82fd,
- 0xfc77e29e, 0xee19768d, 0x1ada7ad1, 0x56f311bc, 0xbde1379f, 0xee4a2fc1,
- 0xf17f50e9, 0x3d02ecac, 0x305674fc, 0xbcf3e70e, 0x62f3e709, 0x2bd5fb5c,
- 0x0c0fdc29, 0xc12ef161, 0xcfd00446, 0xbe40a982, 0xae5f3e93, 0xafa6b1f2,
- 0xd11c8137, 0xc11c23fa, 0x29820c73, 0xcb48e51f, 0x276b6a27, 0x67d44f98,
- 0xfc0f0b66, 0x8b5d371c, 0x8879853b, 0x25284d17, 0xb04dcf68, 0x05ce788b,
- 0xbce199f2, 0x0f5544db, 0xb2b8fc4f, 0xbb07e584, 0x9ebdc30f, 0x7ef576cd,
- 0x739f7530, 0xd727ca92, 0xa16f0891, 0x245dd7ed, 0x4d27886e, 0x8fdfa8ba,
- 0x5f60ca78, 0x57fb514e, 0xf0855dda, 0xede89f38, 0x450d29bf, 0x2c3d6027,
- 0xfc008a20, 0x6862d934, 0x09d23b07, 0x9febf0f7, 0xfd50e03f, 0x19924cdc,
- 0xeda6c79d, 0x87e14d43, 0xb5fb446e, 0x5e037258, 0x0f95da76, 0xdfc03f20,
- 0xc5cb9222, 0xb1f29f25, 0x1721c37c, 0x2cae046d, 0x8769fca4, 0x50dd5cf9,
- 0x79d2b7ef, 0xf5903d5f, 0xfa087c0f, 0x93c7cae5, 0x8fdf4093, 0x95a5e957,
- 0x7f01df1a, 0x7f19e2b9, 0xca7786b9, 0x2c8ed3e7, 0x37934f7c, 0x67e83be0,
- 0x1f98188f, 0xf29bbf6b, 0xafc0d3eb, 0xfd04ed3b, 0x5ba59fb8, 0x699c80e6,
- 0x7caab4e3, 0xd5d72c6d, 0xbf0fdcae, 0xc0bfe4fb, 0xbec28dd6, 0x75e8a517,
- 0x83024fbf, 0xcfd72bd7, 0x68790698, 0xccf31176, 0x5173d452, 0x1e92fcd1,
- 0xdf1a2fc5, 0x9c59dab5, 0xc99fe348, 0xe1e5dfbb, 0x840be4f9, 0x8748b2b8,
- 0xcff1a2bf, 0xf7187f44, 0xe6122781, 0x67f9189f, 0x93fc3757, 0x40165e7f,
- 0x9fe1756f, 0xba7f8c43, 0x0fda1be3, 0xe4616bde, 0xaa1c779f, 0xff2dddbd,
- 0xfcf3c25c, 0xbe4ef7ab, 0x93657eec, 0x6a9f9f4f, 0x62d9c72c, 0x54bb2fe7,
- 0xea96efbc, 0xbd01ca6f, 0xcec4d531, 0xe32409ef, 0xeeef0a16, 0xfcdeed38,
- 0xefa86c9a, 0xda1e9814, 0x1ea33219, 0x78f769da, 0x669b7e30, 0x0557e761,
- 0x23ce044f, 0xe45afbb8, 0xd921dfe8, 0x587660ee, 0xfb8f0c50, 0x37f1ec02,
- 0x9d8c1f18, 0x2a43cb9a, 0xa662fa5a, 0x971d4617, 0xcfefc336, 0xbfccea3d,
- 0x06967c00, 0x79dfa3b8, 0xcf784a8f, 0x0951f552, 0xf76fe3f5, 0xf1045dbd,
- 0x79cf2d51, 0x8bb79ecc, 0xfbc2860a, 0xa1fedf8c, 0x8ead65ea, 0x5f404047,
- 0x1fddee1a, 0x4b1e71d7, 0x297e6f7f, 0x674e51f0, 0xf93ef1bb, 0x3fdb1c60,
- 0x11644d99, 0xb4719886, 0xdeb85ef4, 0xfcaf78cc, 0xff3b30ec, 0x27b0f336,
- 0x2af2bdf6, 0x37ef187f, 0x8a77f6a7, 0xe7efec13, 0x6841f65a, 0xdd6cc884,
- 0xd7354a0e, 0x26a33f30, 0x9349f81b, 0xf8a3fc43, 0x44327e0b, 0xb706bbf7,
- 0xd07ff3a9, 0xa9fbb02d, 0x4bee1222, 0xefb09cfe, 0x363aea82, 0x40f4c9ca,
- 0xe617eaee, 0xcb239c03, 0xcba06695, 0xd7162eb3, 0x5b0905f9, 0x6e6e81b2,
- 0x8253f701, 0x0bf93ca2, 0x223df3ae, 0xd3b10e94, 0xe27d3df6, 0x8ff8c246,
- 0x9c633d3a, 0xed099f60, 0x31bbe087, 0x824773e3, 0x588fb71e, 0x9aeb049b,
- 0xab853706, 0x13bd9b78, 0x81ae0d0e, 0x8f97c64e, 0x00c6cb3a, 0xd03be01e,
- 0x8e474d4b, 0x8d7be059, 0xcf7b821f, 0xe2883e30, 0x4e83e00b, 0x4016e0be,
- 0xa283c35d, 0xfdb7cfae, 0x8b42f88b, 0x17c8f46b, 0x7e70b710, 0x0b98be2f,
- 0x05f23578, 0xce3ce7b5, 0x505cf618, 0xc6937962, 0xe7b13fe7, 0x896efbe3,
- 0xd913de60, 0x6bbf6567, 0x7dc049e8, 0xd8b8df90, 0x1c9f6d98, 0x28a6a4cb,
- 0x7664e559, 0x2a3bf6bd, 0xda1cc223, 0xa2a8239f, 0xbced0db8, 0xdd27a87e,
- 0x31c8fe2a, 0x46fb838e, 0x033e3731, 0x1270d0ec, 0xcc236bd8, 0x87906125,
- 0x1989b65d, 0xdae63f60, 0xfcc36426, 0xfab21e5f, 0x5ed2584d, 0x9bfe5bee,
- 0x7da772ad, 0x66c3e9a5, 0xfc6d5bb5, 0x4c7f2a35, 0x2779d852, 0x70d1ebd0,
- 0x9003eb68, 0xc2e0e009, 0xdadf4db3, 0x22878f50, 0x00936f14, 0xfd878a0e,
- 0xfb04691c, 0x24a4df21, 0xb03b064f, 0x3800584f, 0x9d7ef6db, 0x92cef109,
- 0x021febd0, 0x3eda25bc, 0x0fbf5bac, 0x35b7b46b, 0x3ff3487b, 0xf3497b34,
- 0x82aec17b, 0x1fd532f6, 0xbed99971, 0x05b084d8, 0xe47853ef, 0xa2957cac,
- 0xe740b3ca, 0xe3cea251, 0x7082d266, 0x4ca9f769, 0x7251f153, 0x39abfa88,
- 0x54c84ed0, 0xeaa9878a, 0xdd532a7d, 0x6e22fbfa, 0x3efb1b1e, 0x9e9fad15,
- 0x1bae37de, 0xa3f61ad7, 0xc879687e, 0xf94feec7, 0xc3f98fbd, 0x7ef007e3,
- 0xb1dfd601, 0x09feec59, 0x21c3c317, 0x763af629, 0xf1eb9551, 0x9c0e48a6,
- 0xe7bfdec7, 0x1e6eb8ef, 0xf7f7f3d8, 0x771c8ec1, 0x97902042, 0xa8c4fe3b,
- 0x180fcc78, 0x695f232b, 0x6f3f7afd, 0x9cf611b4, 0xccdeff44, 0x728f0a41,
- 0xc167c025, 0x8f5395f2, 0x9dfd0a3c, 0xe8902a3f, 0x92451f73, 0xe5c42067,
- 0x1ddfd48b, 0x7b31c83a, 0xa83f6b4a, 0x11c7defe, 0x21216f95, 0xc567bb43,
- 0xa9e7451c, 0x1a1ffdea, 0x2f1ea75d, 0x74e4213e, 0xfdfc0fa5, 0x5a10e233,
- 0xaa9ce1fc, 0x9ff3490f, 0xf5ce7cf0, 0x600a778d, 0xc69f5fbf, 0xdc6a2513,
- 0xe9ea641b, 0x4e21c547, 0x6ff60d3a, 0x4e8f33f3, 0xb7e5bc83, 0xf4814c78,
- 0x99be7686, 0x3e16f503, 0x462abde6, 0xba1f5efc, 0x9955ef13, 0xa3e4cf1b,
- 0x5fc7f1a7, 0x3864f94d, 0x3b309dd0, 0x4bdd8f30, 0x81efa462, 0x1491ff3d,
- 0xbe3dda19, 0x91bfa3bb, 0x2a9caf0e, 0x9eac7dc0, 0xdbe461e8, 0xfe9e747d,
- 0x5377e12b, 0xbf8fc42d, 0xfac2ea9b, 0xac34beab, 0xddeafc6b, 0xd81264fb,
- 0x4e9f1a79, 0x337c6249, 0x49a3f76e, 0x2f49dec4, 0xc586de23, 0xc97ffe33,
- 0xee25d9aa, 0xa3e8d359, 0x843ae977, 0x2d27ea8c, 0xaa563790, 0x70fb3ac2,
- 0x4425da11, 0x5c62cf3d, 0xb7f366df, 0xda75014d, 0xccfeefd3, 0xf57acb94,
- 0x6fc5397b, 0x0eb00bd5, 0x99ab0916, 0x78ed531e, 0x9d231e0d, 0x08a523df,
- 0x19f0777f, 0xe708c0f9, 0xf6f91d4b, 0xda07ae0a, 0x18cce0ef, 0xc6262598,
- 0x8f868798, 0xc7ae8499, 0x8f5fbdd9, 0xdbd78c6f, 0x178ee7ef, 0xdad011fe,
- 0xe919bc9d, 0xf4d76c66, 0x900c1147, 0xec1fa58b, 0x29f1b993, 0x8ba2df48,
- 0x3ae85112, 0xcfe9fa43, 0x2b9293f5, 0xb956df91, 0xf8eeb3d8, 0x4f22faad,
- 0xf93b233c, 0xd87a6c4e, 0x5bb77c06, 0x1c41b7a8, 0xf4fb3af5, 0xc2bd103a,
- 0xe2660bcf, 0x131115fd, 0x94dad081, 0x886699d0, 0x243f7833, 0x20a1be78,
- 0xf0327fb4, 0x0b42abe9, 0xe76d59fa, 0xca387909, 0xd50a6cee, 0x6b2ec0d9,
- 0xf3e7664a, 0x5d10260e, 0x78c5f4fe, 0x8e9a6fa6, 0xc4a69d9e, 0x62ee1f68,
- 0x28833cfc, 0x9ff439c9, 0x7f29eb45, 0xc7dc1f80, 0x42927181, 0x325afdce,
- 0x39732dd8, 0xf6da23f4, 0x992d798e, 0x7b7adbc2, 0x2999ea02, 0xe9ddf74d,
- 0xfe0884a2, 0x6f3e2463, 0xfef0e32f, 0x8dd92284, 0x59c5a2f5, 0xf98df7e8,
- 0x09466926, 0xa220dfb4, 0xaf8533df, 0x755e2014, 0xabc38d3a, 0x9170f63c,
- 0x623df85a, 0x0e3674ff, 0xc093f971, 0x24a77d3f, 0x66fe0469, 0x07d5ac9e,
- 0x122eefe1, 0xfab6ff83, 0xcdfb73a5, 0x2cf57db4, 0xa78df30e, 0x3f8832b8,
- 0xd68a7f8a, 0x5baf384b, 0xc005a942, 0xf58a33d7, 0xaac31ee7, 0xd4ab393d,
- 0xfbd10246, 0xe50b9cf9, 0xfdb5a0fa, 0xf11dfc56, 0xf98122bd, 0xa37e7269,
- 0xe7b3efca, 0x9bbee924, 0xa9dd984e, 0xaa4dd5c9, 0xfaa8ddfc, 0x193d73cd,
- 0x094bf1ba, 0x40fe87b3, 0x64810bda, 0x7ed8b8f7, 0x96f6b4f4, 0x25e1f3c3,
- 0xd068dfbe, 0x310d3787, 0xe57cbdf8, 0x348be45a, 0xc8f78dce, 0x37edf5b8,
- 0x8dd7d58f, 0x0fd46bf2, 0x8f8b20b3, 0x3427aaef, 0x2f6b307d, 0x683c422f,
- 0x33c53e56, 0xbb7ac5da, 0x317d27bd, 0x7d1bae26, 0x17be4971, 0xfa2e90cc,
- 0xefe2e3e2, 0x4b7e2fa5, 0x54b196dd, 0x578edfae, 0xa4fecdd7, 0x298de83f,
- 0x81478efc, 0xbae8da8f, 0x47c18e69, 0xc5a3bada, 0x1ca88a01, 0xc5381ea3,
- 0x5ca663f6, 0x91fe274c, 0x0e80719d, 0x12ed423d, 0xa97b0c77, 0xe51b3e35,
- 0x68ef43ab, 0x2eca257a, 0xb87ef311, 0xda85325d, 0xc02e52a3, 0x2be10fd7,
- 0x2925ef52, 0x882b8482, 0xc19da61f, 0xa0579038, 0xcfae2cfe, 0x817f224f,
- 0x3b12e3fe, 0xaf68c3e5, 0x0ae87168, 0xe94f6697, 0x1e23a5f9, 0xc3dd9339,
- 0x1149f1aa, 0x7fe413e0, 0x04fb8f26, 0x7fd41bff, 0x8000b303, 0x00008000,
- 0x00088b1f, 0x00000000, 0x7cc5ff00, 0x55547809, 0xf579f096, 0x55492d5e,
- 0x146caa92, 0xb612f08b, 0x84582484, 0x5916ec80, 0xa014a358, 0x8168cb80,
- 0x9a126b0b, 0x69ee9c71, 0x0242a6ff, 0x83b74343, 0xe8cedad2, 0x3ad857f4,
- 0x08b41a83, 0xd09d0301, 0x584c5015, 0xf82e0834, 0x1a6d1ad9, 0x84490ed1,
- 0xbfbb46d6, 0x739cffcf, 0x2aaa4bef, 0xffff4d85, 0xb49fdf3f, 0xdeefb97d,
- 0x67b9ef77, 0x979ee73f, 0xb3559bdb, 0xf6e008ad, 0x14078a99, 0x77f1d000,
- 0xe042c022, 0xadacc37f, 0xc78ef016, 0x69fcd7be, 0xe7f80d87, 0x9c2ffc3b,
- 0xa42cfc90, 0x900bcf50, 0xce54b009, 0x7d57fc5f, 0x3c5e3d33, 0x52fe7a27,
- 0x92b9fd98, 0xf81b700c, 0xf71c30cd, 0xff8ec5f3, 0xc7154bec, 0x7e8b2e97,
- 0x4a4ce99e, 0xff8790bf, 0xbe230118, 0xd4ca0153, 0x22b79dba, 0x5527ddbc,
- 0x334c558f, 0x390ffed1, 0x350ffec5, 0x7c800c97, 0x1674df80, 0x3db1f8a7,
- 0x1c2111b6, 0xd1bad00d, 0xec71edc6, 0xefc5f107, 0x3c66e7e8, 0x21fc059f,
- 0x52b4b607, 0x4801b721, 0xc4ed1805, 0xff602745, 0x0d3f9e2a, 0xc02486c7,
- 0x5c2473ef, 0xe7af00d9, 0xf104e798, 0x5eb8c3bd, 0xe30dd700, 0xf75c01fa,
- 0xf72746c8, 0xe6e8db5f, 0x442e0dfe, 0xfac0066a, 0x37af2714, 0x8776f72f,
- 0xe35bdf1f, 0x50e7e6cc, 0xf844da3e, 0x7b2fe036, 0x1004086a, 0xac77afdf,
- 0x807494d0, 0xb1e2b72a, 0x01d1f566, 0x8e58e34f, 0xbe25cbf3, 0x102ab72b,
- 0x1fe5e7c4, 0xc4072bae, 0xee33575f, 0x2aa9f887, 0xfc368355, 0x63d34967,
- 0x0cdf453b, 0xdefa08d6, 0x33028e22, 0x86cd16b5, 0x28f02cfb, 0xf1f7151e,
- 0x6ff78936, 0x1d412af5, 0x278b79ff, 0x63871e9a, 0xae58bee8, 0x0ffe80b3,
- 0x47305bdf, 0x9d718609, 0xdfa29305, 0xd6757c5b, 0xfed8cae7, 0xe898b47c,
- 0xbf3fb63e, 0xb7744edc, 0xc1863fe3, 0x95fba230, 0x8d6fa58b, 0xc5ba3e85,
- 0x8e74b1b6, 0x9d16bf1d, 0x673a27ef, 0xc6ce9b5c, 0xce8b7ffb, 0x904e110f,
- 0x1bf470be, 0x30049e8b, 0x1a07e9bb, 0x55f679d1, 0x32add78d, 0x861433ce,
- 0xd4fe79d2, 0xb76ff859, 0xaafcdf42, 0xfe2d2fa6, 0xe79f6b82, 0x22bfed6f,
- 0x69fde745, 0xdf7e6f5f, 0x8034f3be, 0xf9ce78c1, 0x8df8d139, 0xbd2c6ba5,
- 0x62f7ca16, 0xe635b5e9, 0xa05d061c, 0x60689913, 0x555fbc1c, 0xd16b8e19,
- 0x162e97ad, 0x7e4f08ff, 0x5f8071ff, 0xd6fab9f3, 0x76827493, 0xe02057cd,
- 0x03c84732, 0x052075f2, 0x0ade5be9, 0xec0444e1, 0x3e738b96, 0x28f5a8d7,
- 0x39c469f0, 0x8353e7e8, 0xfadf6ace, 0xb2075765, 0x96fb19fc, 0x14b01069,
- 0x972173d2, 0xd255f0b1, 0x7e69233a, 0xcb68f980, 0x10b2d32e, 0x05ba639e,
- 0x9b51f5ee, 0x4e03da28, 0x9d1a07a1, 0xfbb75e26, 0x6e099b2b, 0xf0e57804,
- 0xf8406280, 0x09e7563c, 0x33218bd2, 0xc945fbf0, 0x9cdfcf3a, 0xdc048104,
- 0x9b76b41f, 0x2b44f90f, 0x3941c806, 0x825906d3, 0x9d64b835, 0xdc91f616,
- 0xa8347b75, 0xd1bbb946, 0xe048ef78, 0x6c92402e, 0xa99474fe, 0x0fde8b26,
- 0x79c54fb7, 0xfa6a1537, 0xb2e29c36, 0xf1a03fea, 0x93ab66ae, 0xf83eebde,
- 0x5a662dcf, 0x20f02be7, 0x335af084, 0x4f8e1532, 0xa4839873, 0x93d4ca37,
- 0xddfb09aa, 0x8fa4003d, 0x970b2da6, 0x3a6dd200, 0xb138d7b4, 0x8c74429d,
- 0xd0bfbf43, 0xde843379, 0x9a74881a, 0x4334e921, 0x6692abea, 0x4946f595,
- 0xac841a92, 0xb374fd3e, 0x408cd1a4, 0xee0a497b, 0x3c53433b, 0xf9bfd835,
- 0x8fc516aa, 0xba3e932f, 0x951f4fb1, 0x3dbdf5c9, 0x019686ba, 0xff344266,
- 0x3d0a3596, 0x798b6254, 0xbe0fed2e, 0xe6557cc9, 0x429f1e8c, 0xfbf913ba,
- 0x825e8c62, 0xa653fb18, 0x03b935fe, 0xc58af0f1, 0x8454defb, 0x19b4e7bb,
- 0x70e7c59e, 0xfb933b11, 0xc631a7e3, 0x07b6c51e, 0x757a270f, 0xe215eb84,
- 0x07f50723, 0xd7b1f579, 0xa3b63f9e, 0xf18e98a7, 0x0d22dd4a, 0xcf2d99b2,
- 0xf0fb33ba, 0x92bf0fde, 0xad78b10e, 0xdbde7a97, 0x8e8d7ce8, 0xad8f0fdb,
- 0x979ed8b3, 0x48d749fc, 0xf8bdfdf5, 0x95e5e434, 0xe25e890b, 0x0c2e57ff,
- 0x4afdb2f2, 0x0c3e59d2, 0xfe783879, 0x51e92272, 0x21aea7c4, 0x6cb8f49f,
- 0x9f99679e, 0x06841c85, 0xc36e0a8e, 0xc12485fe, 0xf624aff7, 0xbc23cf1c,
- 0x3cf262df, 0xeb68db4b, 0x202a2f87, 0xd175b9fd, 0xe2803379, 0x2887e505,
- 0x84ee8a4f, 0xa00db1bb, 0x05ff113c, 0x72482152, 0xd0108662, 0x785e35be,
- 0xd319da9a, 0xfbc214f4, 0xddb35c65, 0x29e9eb41, 0xd7f0f7c4, 0x057d132b,
- 0xbf5ff1fd, 0xee1a7c11, 0xe6d1b0d3, 0x1c2f50d3, 0x7ee4e8d8, 0xb73746e3,
- 0xdcea3687, 0xfc913a6e, 0xaa1ff243, 0x169f16bc, 0xa5e657a1, 0x0330fd0a,
- 0x35c10ef5, 0x00609403, 0x23c66f79, 0x7288f2c0, 0x10a4dc10, 0x393cf0bd,
- 0xfa44f87d, 0xe888ee5c, 0x3b56ad43, 0x82a59321, 0x67985ee9, 0x09d1f7e2,
- 0xd59f3e08, 0x3fd23e7f, 0xfc8cbfaa, 0xefc38416, 0xb0b996f2, 0xb37c390d,
- 0x7adf885d, 0x4bd1e965, 0xf4b9fdc0, 0x43b95ebc, 0x8feb84bd, 0x1f84cf0f,
- 0x8f18bda2, 0xe2bc2184, 0xff045dcf, 0x100b1684, 0xb2b0d07d, 0xf21a167c,
- 0x5ca2b5f9, 0x8448fdc0, 0xef8c1bbe, 0xc4c3cb59, 0x23a0074f, 0x645ef08d,
- 0x7e49d9f3, 0x3f04b997, 0xdddb9578, 0x3937ae0a, 0x0951e2c3, 0xf1bee1b4,
- 0x2d7f11eb, 0xce21b819, 0x1cc9696f, 0xb713e7a2, 0xc93ef1bd, 0x08e6d7b7,
- 0xcc986cab, 0xf6111d9d, 0xe33c007c, 0x74927f24, 0x476877dc, 0x0e4e3f5a,
- 0x6abb9eb0, 0x6b5e9209, 0x09ec933d, 0xafd33ee3, 0xacbfd8da, 0x6e5117a7,
- 0x8f26270f, 0x42133d65, 0x63f2430d, 0x6d98e713, 0x3c23aff6, 0x7a3e65a0,
- 0xedfbdf0a, 0xafd20065, 0x543fdbd3, 0x312d0fc9, 0x40e80ee5, 0xa32a8f38,
- 0xf27da853, 0x6e7ce4e3, 0x127724cf, 0xec9b0e7e, 0xcefbed3a, 0x0474bd46,
- 0x8da9cbf2, 0xf9023a50, 0x368dade3, 0x746f4f6e, 0xa3667b72, 0x38a3db9b,
- 0x3af3fc4e, 0xbd7f138e, 0x6fd4e381, 0x4f6a71c3, 0x7ad43ae0, 0x489872df,
- 0xe5bc677e, 0xbc2da8d0, 0xfadb892b, 0x742c933b, 0x50c9c38e, 0xfc4fec67,
- 0x370c4e14, 0x2ee50780, 0xdb53fdd5, 0xad3f680d, 0x1c6502fe, 0xf754d4c0,
- 0xae3a6d5c, 0x8fe87b3b, 0x19f1c2be, 0x6a7ec3a9, 0x3b6f7843, 0x893868e3,
- 0xf5c7d2fd, 0x3f883a9f, 0x92a2244f, 0xb7643c24, 0xc7365179, 0xc418d293,
- 0x24c2f768, 0xfd19a7ec, 0xc8a5b9ec, 0x3a4a4b0a, 0xb005c0f2, 0x73e1b1f3,
- 0x25df886d, 0x513fff76, 0x9b6812df, 0xfa9d3a53, 0xbc927010, 0xcb20e62b,
- 0x7d2510fb, 0xadbfda11, 0xd4c4ff0e, 0xabc07f08, 0xf48d21fc, 0x02af44c3,
- 0x77a674ae, 0xf7d2a470, 0xd5b798dc, 0x15313651, 0x2ed7f7f9, 0xc269f995,
- 0x741bc534, 0x635f97f4, 0x7af384de, 0xe26fd129, 0xd4b559c5, 0xe2e3982e,
- 0x4dd96fbe, 0xa5ff24b5, 0x5fbd6b8e, 0x5c87efc2, 0xc547d666, 0x638bce1a,
- 0xe3798d17, 0xffc8c991, 0x0c4b69ee, 0x7d7c4b3e, 0x84935fd6, 0x567732fd,
- 0x34752aa1, 0x6bb5df70, 0xbf978d1b, 0xe572c0de, 0x59e8f249, 0xa5ed8ac4,
- 0xd33e3869, 0x24c278a1, 0xa93ed32a, 0xa771a34d, 0xcdb421e2, 0x553b8ef4,
- 0x5423e344, 0x9fea4ccf, 0xd89d4bd5, 0x4e66d19e, 0x66f384bd, 0x6c78a4e7,
- 0xf099def8, 0xff10ae8f, 0xf249ccce, 0x1714e824, 0xa4ae9f04, 0xa9eac7be,
- 0x8f1ce9d6, 0x3ad32495, 0x6131e2ba, 0x92d6febf, 0x147045ff, 0x514052fb,
- 0x91a5dc68, 0xc7e4fb1f, 0xe275293f, 0x0aa1fabc, 0x099cfc90, 0xe091fee7,
- 0xa89ad99f, 0xf5402027, 0xbd296716, 0x9c516f57, 0x2befb89b, 0x5181f9a1,
- 0x5c79fd66, 0x769de4f7, 0x6fe507b6, 0x745fb3e5, 0x6cc1cef2, 0x9adfca17,
- 0x49ed4c56, 0xcbdbf093, 0xc39f2adf, 0x930826e3, 0xebbf2952, 0x1dbdf81f,
- 0x4d293f25, 0x0c4ff5c7, 0x416eb173, 0x116a953a, 0x83e70d7a, 0x55dfef42,
- 0x945bf3c1, 0x8f3a3f01, 0xa7e4aff3, 0xff715a14, 0x5109e959, 0x710ce7f3,
- 0x8de7f545, 0x45aa5818, 0xfcf2b1d8, 0xadfea8ac, 0x88e94a45, 0x69bef988,
- 0x45fd5109, 0x11d2aea5, 0x2b6ff311, 0x5fd5181f, 0x54565b72, 0xb269a67f,
- 0xa11b1fd0, 0x7dc854bc, 0x9bfb2979, 0x0a87acd9, 0x1bee629d, 0x8e3f7e38,
- 0x7f09b617, 0xfe85d0aa, 0xb4a5d214, 0x80fd88ad, 0xc68f7c36, 0x8f52ec8b,
- 0x3ddda005, 0xfc80ec91, 0xf62fe209, 0x6677658b, 0x6305fa3b, 0xb768ae89,
- 0x752fe023, 0xfaf1db44, 0xcc27e144, 0x0bad7f01, 0x75a739e9, 0x6557fb1e,
- 0x6145d5e9, 0xbf467c20, 0x2a370b1f, 0x13779dd1, 0xac6ab61f, 0x890ce737,
- 0xd4adafd9, 0xf48c7ec1, 0xb7c8e6ef, 0xe0217d8c, 0xc6ff276d, 0xffb18738,
- 0x4733ceb5, 0xe17ec69d, 0xacd73adf, 0xa9a5859c, 0xcf5fe171, 0x9c2c4cfe,
- 0x4f32a979, 0xfb15fe81, 0xeca9ad85, 0x7a4fe22f, 0xf0c13f3c, 0xcf51ca7f,
- 0x9cf522b9, 0x585f4943, 0x673d6d70, 0xff2d7bac, 0x78d758cf, 0xde54ea1f,
- 0xf7bf78df, 0x7fc4aeb9, 0x27e71bbe, 0xae9e79c5, 0x5cfe7fc4, 0x9a19fcf4,
- 0x7940e35e, 0x278a0e3e, 0xa93f7840, 0x959ae46b, 0x52d6f59c, 0x5fa1f65b,
- 0xf670b723, 0xb52d40f9, 0x27833b53, 0xb9fe78ad, 0xf94eaa39, 0xea2fc307,
- 0x0aa56717, 0x55f8e47f, 0x78e3aedd, 0x53b7407d, 0x88ff51dc, 0xaaf8a76e,
- 0x28730bde, 0x370ef48e, 0xc438a7ad, 0xf62d879b, 0xfa154def, 0xdf7c857e,
- 0xfea90f68, 0x857f0415, 0xf251e3ad, 0xe9bf2a49, 0x1c5f10b1, 0xbf2f7f27,
- 0x5c77bfbf, 0xbf683a0b, 0x1d048fe3, 0x7e7d08be, 0x22defebd, 0xe20ca662,
- 0xa917c553, 0x096c4a4f, 0x51fd48be, 0xf54574e6, 0x83ff93c7, 0x0f76cdef,
- 0xae6f7a8c, 0x0f35159e, 0xf21cd8fd, 0x902c6a87, 0x353e91a3, 0xcfc87a25,
- 0xefe3c58b, 0x0e3df9e5, 0x6051b927, 0x7f55d74e, 0x29e4fb1c, 0x7d8bf811,
- 0xc4553dbe, 0x9ec88373, 0x52273ae3, 0x556b6e75, 0xd0e6709f, 0x887e267f,
- 0xbaac63ed, 0x95c3d68c, 0x1e77c4dd, 0xba0ae9bb, 0xe3d2f0e0, 0x686ae7f3,
- 0x4de33dff, 0xfb1c38ff, 0x215f1ec7, 0xfb4d53fa, 0xfadfb1b2, 0x7fa27df8,
- 0xd9bc69a3, 0xb819ec83, 0xf9296e7f, 0x7fbee90b, 0x9b75fa27, 0xd4155b1d,
- 0xa8099503, 0x4cd5b0d7, 0xf7a6140a, 0x2ddbc7c5, 0xc0915f68, 0x175e6cc8,
- 0x156ec0d2, 0xc26574f1, 0x13d920d0, 0x13abd8ad, 0x0c132ade, 0x9264777a,
- 0x09da879d, 0xdd9d7151, 0xfbe1ef82, 0x0bff3665, 0xc9c26f82, 0x87a5ea3a,
- 0x200d960e, 0xea9138fe, 0xede43c64, 0x7d26ea9b, 0x21edf085, 0x01fffefb,
- 0xf078e6be, 0x3ec59ad5, 0x2579f59d, 0x86f38f64, 0x3e947921, 0x3a6a15fa,
- 0x21084f3f, 0xeebfb0ef, 0xb1fe5375, 0x90f165c6, 0xf2427eff, 0x35b6f5ef,
- 0xdaa18f32, 0x29a91f05, 0xb17bf5a6, 0xcff25b7d, 0x6a2fb919, 0x910ba1be,
- 0x33d9c03f, 0xe8815174, 0x6aa87f13, 0xe1f90f61, 0xe490ff3d, 0x83f9f8a1,
- 0x183f8144, 0x8fd1f3d2, 0x777bf9f1, 0xf1b679e7, 0x65a78173, 0x0e9aecc1,
- 0x06b38fec, 0xe3fca06f, 0x47ff34a1, 0xad3b8784, 0x87cc91c1, 0xb449f58e,
- 0x8e05e28f, 0x0ff88c53, 0x32dd3ce3, 0x705c7fbc, 0x46c1fded, 0x64e96576,
- 0x7f637781, 0xe88db1c4, 0xf124def1, 0xe10780dc, 0x303e26e8, 0x0785eae0,
- 0x8f393886, 0xd4ff2200, 0x2e8be325, 0xa7ec8f5a, 0xdc7910ea, 0xff38983f,
- 0x8ac24552, 0x4de77dfd, 0x791a0f1c, 0xfb1022e0, 0x49b53e4d, 0xf803e065,
- 0x96a2513b, 0xcb5c695e, 0x7684fdf1, 0xf6fe262d, 0x2a9d3db2, 0xd5a9bfdf,
- 0xf6a0e25f, 0x5379e8fd, 0x093f1c65, 0x59c52a82, 0xc6623b93, 0x467fe71b,
- 0x18bd4feb, 0x4d293cfd, 0x3041c3da, 0xb24f3228, 0x974dc641, 0x10a7664f,
- 0x2b186b1f, 0x8b0f48a8, 0xd4a4c2ae, 0xb0d33d3e, 0x4959e711, 0x2db8da9f,
- 0x8fbceb38, 0x88dc69f6, 0xf49a3f0c, 0x435b9baa, 0xc2ce0aee, 0x623630bd,
- 0x0617291f, 0x2fbf2f1e, 0x23ae38f0, 0xb0d397e7, 0x43356e6f, 0xc3c3ef50,
- 0x0c2f8914, 0xbbfe38df, 0xf6c1d17c, 0x62db626b, 0xead9e495, 0x711e8136,
- 0x113d04be, 0x6c7d08f4, 0xea8d49cf, 0xb59e92ab, 0xca7ed109, 0x924fb978,
- 0x7ad45067, 0xd062bf86, 0xacea50f3, 0xc663f256, 0xdfb1563b, 0x3b293292,
- 0x93a2fd6a, 0x042ef8d1, 0x63e2979f, 0x71c087cf, 0x5ab3c4c0, 0x29327f94,
- 0x63f983bc, 0xcfd187d2, 0xee8f9ca7, 0xfcc9a697, 0xaedd2c56, 0xfb184f85,
- 0x7fe969cb, 0xde5d3e3f, 0xcda67c68, 0x8667c689, 0x039f1a2f, 0x6be34596,
- 0x1f1a3fa0, 0x898d5783, 0x7d61bf1a, 0xd87f5461, 0xcd44a70f, 0x198342cf,
- 0x75be1fd9, 0x91fcd45e, 0xf545163b, 0x67753f47, 0xe0dcfcd4, 0xbcf1a88a,
- 0x2efe6bdd, 0x368417fa, 0x3427cd44, 0xfe87be9b, 0x4bfe3637, 0x53ff7ed4,
- 0xa18daff4, 0xe7d256ff, 0xa85e8813, 0xecfb0b37, 0xa425b919, 0x6f4d5d3f,
- 0x9b79437c, 0xf9ceb140, 0xf03cc0aa, 0x2fd8e0d4, 0x7a429f54, 0xdaa2306e,
- 0xd6a37a84, 0x1ec8ab04, 0x71326a3c, 0xf390d31e, 0x3f84d317, 0xc89a62f2,
- 0x7b78197b, 0xd922ed1a, 0x43ea5541, 0x248efa73, 0x935453f2, 0xfec9da09,
- 0x1fd61e69, 0xbe101ce7, 0x0755f719, 0x4ac5e701, 0x982d2792, 0xbea8fc88,
- 0xa9fc72a9, 0x8e83f20e, 0xe4e6a4f6, 0x9f8453ce, 0x4e9b0e2b, 0xfaa6ab7c,
- 0xf0df6403, 0xa1e7a19c, 0xdef39fbf, 0xf1495846, 0xe65fd196, 0xe699bf5e,
- 0xe67bbb2f, 0x7b7f8a48, 0xacaddf9f, 0x43a08d2c, 0xc51739e4, 0xa40eabf1,
- 0xd867fb09, 0xdc7ec313, 0x3a6bd569, 0x538c3ed0, 0xbb407d85, 0xf10d2071,
- 0x88f281f1, 0x0169f859, 0xed1f9d33, 0xb827df2a, 0x80df21d0, 0xdd9f4a6d,
- 0xe643bfd2, 0xda3b20fa, 0x00937d6f, 0xf74438a3, 0x1de33df0, 0x955dfe86,
- 0xecbbf8cb, 0xf97f9e88, 0xaf02e1fe, 0x90f620f7, 0xe22ce70e, 0x17fc063c,
- 0x42fe5ea5, 0xd24ee5ea, 0xebf911c7, 0x2676bb55, 0x2e7e7f91, 0xc6e3f847,
- 0x3a49dff3, 0xe54d2eff, 0x9dddaff3, 0x047cfeb0, 0xae422ade, 0xe3557ea8,
- 0xf991f5c0, 0x39c6b5cd, 0xe7468afc, 0xa8e52758, 0xf923b5a2, 0xfe7f604e,
- 0xb75c34c3, 0x7b95d772, 0xa6fbd00e, 0x67614c9e, 0xd2dfbc03, 0x82e380f7,
- 0x28817fa5, 0xcd33b6bf, 0xa67570cc, 0x09613889, 0xbbe44e36, 0xb803e825,
- 0xda637da8, 0x7c3fe22c, 0xbfb0561e, 0xa4cc15a0, 0xb3fa16bf, 0x33f484cc,
- 0x3f6779e0, 0x69cfed1e, 0x1b73ef3a, 0xfb7ef8f8, 0x922a7bdd, 0xd6340fd8,
- 0x8141d633, 0x3def3f20, 0x499cc057, 0x0ae99f79, 0x9fb817fb, 0xeefb9e8d,
- 0xec99bc1e, 0xbbdfb1b0, 0x0ef4bfe0, 0xf022c5f2, 0xfac6baf0, 0x1134d740,
- 0x4b03f97a, 0x44ed1d2f, 0xa66d0dbf, 0x3685e530, 0xbdf2b39c, 0xcf75774a,
- 0x9c230fcb, 0x66f9af1f, 0x7f7082bf, 0xbfe7bcb4, 0x3bf3e00e, 0x4741529e,
- 0xfdd9f1be, 0xbc447a5a, 0x2bfeee79, 0xafe86f2c, 0x2924ff3c, 0x1eac97a5,
- 0xd60f9392, 0x322a34bc, 0x9f4e485e, 0xf74e9099, 0xe2d2933b, 0xc6c46fde,
- 0x3f886ba5, 0xf6757e59, 0x5cbe0d4b, 0xc372f92d, 0xf9f9a30b, 0xf3dea486,
- 0x2af3fe88, 0x3436c1e7, 0xf1af395b, 0x3587e48e, 0x31337e2f, 0x8f57cf32,
- 0xa95ba97c, 0xf74b7275, 0x09fa3aa6, 0x1e5e4efb, 0xeaab71cb, 0x1e73a2cf,
- 0x3269fabe, 0xf9bdf7f5, 0x6e079f2d, 0xbbf83ebb, 0x78917911, 0x79ea4aae,
- 0xc54af497, 0xf94eab01, 0xb90392ac, 0x7ec2ee27, 0x1fea2b65, 0x5725cf3c,
- 0x29a7b1c7, 0x00a83b1e, 0xc32ab7e5, 0x2be4ac09, 0xe520941b, 0xee34d6c3,
- 0xfb1832dd, 0x1616ea6b, 0x2556bb11, 0x59567d0b, 0xb13704af, 0xe9873e1f,
- 0xa365285c, 0xbfee50e4, 0xd778e3e5, 0x85feb171, 0x3cf6b08d, 0xf55d9dc6,
- 0x38205e5f, 0x395a35d6, 0x292176cf, 0xfb8c196c, 0x84abda5e, 0xdc9f7a02,
- 0x1c44b976, 0x98f42ab6, 0xf55df685, 0xe411c2dc, 0x381d7636, 0xb93d216d,
- 0x59b1e1aa, 0x309179f2, 0x8b9641ff, 0x9595bbb5, 0xd5ac7991, 0xa4420ee8,
- 0x02f842fd, 0x40eafb13, 0x2cfa494e, 0x435e1a6b, 0xb6bd42e8, 0xf58569ff,
- 0x97e242da, 0xf9ef0e0f, 0xdf686fdd, 0x923dff83, 0x7d745ff1, 0x805f07f8,
- 0xb94a8bfd, 0x633c5c0f, 0xb2b2f8a1, 0xde217cc1, 0xba6352fd, 0xcfda1eef,
- 0xfd638f88, 0x10162dee, 0x10d93ff7, 0x82ec9e7c, 0x1cb2c7a3, 0x9a57f9e1,
- 0xce68f080, 0xec0acb1e, 0xc8ce48db, 0xc11db85e, 0xbef57178, 0x84f18d74,
- 0x78fac4b1, 0x3921a0f7, 0x5877a25e, 0xe9579f28, 0x5f10b4ee, 0x173f8657,
- 0x9f8dfff7, 0x89c6f2cc, 0x574e5fb7, 0x273f33a4, 0x6437de4a, 0x30b9618c,
- 0x90c3daf7, 0x60bee58d, 0xa9d90f45, 0x3b91070d, 0xbfb7a214, 0xec0ee087,
- 0x7ce9d04f, 0x9f3e0725, 0xcfb111d8, 0xf47ca51f, 0xf95e59e6, 0xf099ac64,
- 0xb193efb9, 0xf3df525a, 0xaac74cae, 0x97997978, 0xdedd85d3, 0x18bbec9c,
- 0x9e09072d, 0xd95bee30, 0x147be764, 0x2d2d8394, 0x70729145, 0xc45daac7,
- 0x07aa4de7, 0x1b2a0225, 0x79abfa17, 0xc887927e, 0x0cce36a3, 0x4eaa1f10,
- 0xfc27f31f, 0x289bf715, 0xff508fef, 0x3ff74dcd, 0x39ac724d, 0x1e646560,
- 0xf1d4d2a4, 0xe88516ab, 0xa26bc3ba, 0xf5810afd, 0x75f0ea96, 0x94553c70,
- 0x7bdc53a9, 0x202cc274, 0x59bcfd3c, 0x39afbce1, 0x8b35bfd5, 0xe55c7be8,
- 0x2fa396f9, 0xbbbee804, 0x11141c6b, 0x7f79c67c, 0x2a0c2e3e, 0x00b9b3ae,
- 0xe6edb43c, 0xc4dd4b83, 0x38cf3fbf, 0xdd867ec6, 0x2bd20213, 0x503fe835,
- 0x1f9ec7de, 0x093ad30a, 0x1522be39, 0xee9359f7, 0x58b4be64, 0x27ca1efd,
- 0x36fb203d, 0xe08ab7cd, 0x8a27a5b3, 0xf5c87bf8, 0x48baf9e5, 0x6f74cdfe,
- 0x4b7ce7e9, 0x69f05997, 0x71c00dd7, 0x4cfcd722, 0xe32f21a6, 0xb83b06cd,
- 0xd6546aad, 0x44d7da46, 0x79e8dbf0, 0x1ea5c1d7, 0x2d9cdf82, 0x91333df9,
- 0xff7622e5, 0xa99f888f, 0x62b4c0ce, 0x99e1166b, 0x9ff7a6f0, 0x7be4a1f7,
- 0xf9a01f03, 0x115e4873, 0xfcbc6f54, 0xf8bd3f92, 0x7375d12c, 0x95c72faa,
- 0xc2bcd7cf, 0xc6277629, 0xff9c3b95, 0x31b7c901, 0x89f095bf, 0xfcd16fef,
- 0x51efc236, 0xc5b35f89, 0xfe593ba7, 0xdfed3a7d, 0xf141be7b, 0x3c8df7cf,
- 0xff6c96df, 0xcfef9ef7, 0x54e67cf2, 0x2f5a37e4, 0x399e7819, 0xfe201839,
- 0xa2824dab, 0xa8f8f84d, 0xe49b966b, 0x6f91cb6e, 0xd7bc9286, 0x40dc7926,
- 0x9be6b179, 0x6fdc9196, 0xf3277419, 0xe42f17ff, 0xfb11aa16, 0x7f17a724,
- 0x2f3be745, 0xe2c5b1cf, 0x7c73a1cf, 0x9c69cf2c, 0xefba496b, 0xf911e92f,
- 0x7e921826, 0xd7b3fc5d, 0x7194d7b8, 0x61da479e, 0x8f38f38c, 0x5c890dd8,
- 0xfa57b67f, 0x98f81ae1, 0xd1e0ec7f, 0xc2f2bfd9, 0xa92baa09, 0x36c13169,
- 0xb5253e3a, 0x92fdee22, 0x0c119355, 0xb2186d7a, 0x8d3435d3, 0x39d89f3b,
- 0x3ee116af, 0xf8550108, 0x0cf9d169, 0x255e2af0, 0xdaf18f32, 0x63cbeb9a,
- 0x21afe33c, 0x63cd8d79, 0x969b0e4c, 0x23ae525b, 0xbcdaf19f, 0xd18cf9b8,
- 0x5171d119, 0xc0ce8641, 0x7f047285, 0x4189c286, 0xf1833791, 0x26eabf94,
- 0x9fe3855b, 0xde5999e3, 0x9b1cae95, 0x13e0ed0b, 0x3adb6c4f, 0x8470e50d,
- 0x6bf503c0, 0xf1f5c988, 0xe6f507ec, 0xc31b5e47, 0x3dea387e, 0x4e02f395,
- 0x1d0c4b09, 0x137fd08b, 0x2a27a541, 0xf63e12bf, 0xdbf1326d, 0x2dfd549e,
- 0x9b4f4fa2, 0x092875ca, 0x15c5875a, 0x900eee47, 0xe076807f, 0x3e84339f,
- 0x7b8a35b7, 0x65ee9b4b, 0x74e0993d, 0xe423538e, 0xd8432f24, 0x1f638aa5,
- 0x2ca0b50c, 0x4c9d325d, 0xd7f5c7ce, 0xa64fafed, 0x73dade34, 0xd2a44f6f,
- 0xff353b9f, 0xff3c0c82, 0x51aeca17, 0xb75bf236, 0x7e12c706, 0xae6ce142,
- 0x20d02ccf, 0x87f615a0, 0x3c29eaa7, 0x9504ba14, 0x5b71a54f, 0x257e34d0,
- 0x58555ff1, 0x30f6979c, 0x6dcc8aa3, 0xd5dfb854, 0xeb4a9358, 0x79b6fd55,
- 0x97afe910, 0xf39f8432, 0xf13cd5e2, 0xcd1a06be, 0x25c33fbf, 0xa2682a99,
- 0x324be37c, 0x5575d94f, 0xfec679e5, 0x55338d05, 0x7e41f227, 0xce11c778,
- 0x7559368b, 0x73c26fa2, 0x94d1f020, 0xd5687ec0, 0x7640d9e1, 0x1e05dbaf,
- 0x24f7c705, 0xfc447cf6, 0xd56cd3d0, 0xc1373ee9, 0x715203b9, 0x1eb23dfd,
- 0xb619172c, 0xdcf7d1de, 0xe9a12fd8, 0x24439497, 0xe57be0be, 0x4c076d04,
- 0x3be24477, 0xc959d849, 0xc3127b57, 0xe967ba26, 0x59efdf41, 0x4e0fab86,
- 0x380d7de0, 0xec0ed973, 0xf85cf4b1, 0xe5c33a71, 0x05c83e24, 0x8d342701,
- 0x26af49d6, 0x8e7cbd38, 0x09eea704, 0xaae1fd28, 0x208e8b1a, 0xa4a3cfca,
- 0xc9f4a6cc, 0x5f625577, 0xb0b286df, 0xfca41b27, 0x9d2b6eb7, 0x8547d916,
- 0xfdefa933, 0xf8e278ff, 0x9c21dab6, 0x48a80bff, 0x49b63b7f, 0x214e74a9,
- 0xde122857, 0x0781428c, 0xdbb40baa, 0x7cef78c4, 0x2a4773e4, 0xe79e7fdb,
- 0xf3ed3ab1, 0xf85e58b2, 0x0ff0d17e, 0x17c2521f, 0x76d2466e, 0xc94be63e,
- 0x9a7573c7, 0x875c3c8e, 0x3cb8fef3, 0xfd8c2a34, 0x79ffab16, 0x3f57cfe2,
- 0x80bdb15c, 0xb31eeff4, 0x4ff49ca7, 0xf671e047, 0xeb345fcb, 0x0ee422db,
- 0x7264f486, 0x3ecc61dd, 0x3d36e755, 0xf776fe4e, 0xfba74541, 0xcf736ef4,
- 0xb2bb8250, 0xd50b33e9, 0xfc1d5663, 0xb2ebfd84, 0xda5eff4c, 0x3563526f,
- 0x864dcfd7, 0x873f5c1d, 0x20eb917a, 0x4fa16e43, 0xf51d9f2e, 0x1d04756c,
- 0x06f293de, 0x6461fe3d, 0xbca49abf, 0x1c38a61e, 0x772dfddb, 0xf7f5a70c,
- 0xf180fccb, 0x191c355e, 0x04df784c, 0x3c6b71af, 0x5c6a0eef, 0xaef79cbf,
- 0xc46f5eed, 0xfe52d3fc, 0x56fdcb4f, 0x20dffa67, 0x7cb6d778, 0xc2d2a16a,
- 0xa1a1f3aa, 0x3486f714, 0x4719c53d, 0x6fcaf48f, 0x69553c8c, 0x5f9a163d,
- 0xaf581175, 0x536bbd20, 0xbd2f7e90, 0x396638db, 0x348fdf3e, 0x8b3b2149,
- 0xe3d92af1, 0x60df66fd, 0xcefd5f9a, 0x8f4963d7, 0xf020241d, 0xaf375f7f,
- 0x4378461d, 0x244e2ffe, 0xac0bdb39, 0xcd390cd7, 0xf7c53927, 0x8bff89f3,
- 0x7acb9fa8, 0x7da58f79, 0x7ee5cb1e, 0x225c9013, 0x203cb3ff, 0xdfc93bff,
- 0x3ca277cb, 0x59a3fbf0, 0x9bb4f7ce, 0xa337ecbc, 0xfdc91f5e, 0xdfd4c5e2,
- 0x64ea5ed3, 0x843aa739, 0xf0d79242, 0x04275e11, 0x45f2d740, 0xfd22f80a,
- 0x648cbcff, 0xf1242f71, 0xd8433939, 0x9e8e5083, 0xd873c1e2, 0x3e4a3555,
- 0xe8a5ec22, 0x9a5a7b4b, 0xbf6d97f9, 0x26524698, 0xda5384ed, 0xd678ff49,
- 0x6d28fffb, 0xa7d786d5, 0x73c3a82c, 0xf2a5386e, 0xf71961f8, 0x1c9f2585,
- 0xb21af991, 0xa89a7a1b, 0x13e7a61f, 0xe5f54fb0, 0x4bebbd13, 0x9a4d1cc8,
- 0xb528e3b3, 0x12a17c53, 0x2da6db72, 0x166a3f6b, 0xfc479e62, 0x6e6470d7,
- 0x75e3d51e, 0x0242b39f, 0x6df0d882, 0x222f47cf, 0x8c3e1e1e, 0xa7638ff8,
- 0x7fcc38f0, 0x060d7fe8, 0x6ef1ff77, 0x4091ea45, 0xc7e27cd2, 0xafb506ae,
- 0x2254808f, 0xfddf5363, 0x007b7da4, 0x677fe76d, 0x2267077a, 0xf00ccf3e,
- 0xfa7ddb73, 0xcb124cdb, 0x5c4567fd, 0xe6f160df, 0x6434ff11, 0x871ba863,
- 0x2299ad4f, 0x856453ef, 0x379cf7b8, 0x18390a67, 0x0aed8015, 0x33aaebc7,
- 0xd4bf7488, 0xe82d7aa0, 0xc0845d2f, 0x6fa4a992, 0x01f75e64, 0xfc5e3f90,
- 0x283cfa45, 0xdf900fbb, 0xc1eb7682, 0x11d6c45f, 0x316fd978, 0xeb0058f2,
- 0x2b5fc44d, 0x0ff8149b, 0x67aac5c4, 0x93afe7a2, 0x1babf275, 0x8c1b1d77,
- 0x5cf35d74, 0x2df938cc, 0xe211ff7c, 0x5d5d7878, 0xcdc33d74, 0x197fd299,
- 0x7fc9c30e, 0x3d62be41, 0xd5a0d661, 0x60ac7c8a, 0xf6224314, 0x0be04992,
- 0x67cb2bf5, 0x8a6f61be, 0xb9ea5e85, 0x94a75c04, 0x94743bbb, 0xd4a681ea,
- 0x8f687965, 0x9f189a29, 0xee2598d2, 0x4aa0b4a7, 0xbc930c75, 0x77ec59fe,
- 0xa967bc07, 0xd49564de, 0x5fc4e097, 0xd3c8fbca, 0xddfe92af, 0x9d6c60ea,
- 0x9553c0a8, 0x8219379c, 0xcaefb435, 0x69dea2dd, 0xf9e8eeed, 0xeb4f1255,
- 0x8c7d3a9e, 0xa11585d2, 0xa7897198, 0x3a98c6aa, 0xdc38b116, 0xadef299f,
- 0xf5fb7d64, 0xd1fc5bd6, 0xb78dfc2c, 0xf8c63ae5, 0xef914ce6, 0x1063c4ad,
- 0xd505d77d, 0x2ad1f120, 0xfc2c9025, 0xc72c705e, 0xb067ddf3, 0xbdcfac81,
- 0x3ee9e25f, 0x7e7c3fc9, 0x19e2bf7c, 0xfdc57df1, 0x7f868b3f, 0x832cf80d,
- 0x0d83f9f1, 0xde50b03f, 0x56df9631, 0xffd999d5, 0x13f1235d, 0x7a6b5fcb,
- 0x858767f9, 0x7f2c5bec, 0x6b95fa55, 0xf6fc2fc0, 0x0df85bff, 0xcb91af3a,
- 0x398735ac, 0x09f6b53f, 0xabf943c5, 0xf7ab9d6b, 0x7f7cf23c, 0x359eb9ef,
- 0xc6063de8, 0xa4e7bd30, 0x63bd175d, 0x96bdedfd, 0xe726bcde, 0x66b72f05,
- 0xe0dd3e73, 0x6675d8ab, 0xf67dbf49, 0xbd7fc253, 0x276258d4, 0xa4fba3ff,
- 0x9ce8a3c9, 0xc02e4caf, 0x566f8ff3, 0xee91fc81, 0x033be1a6, 0xa9e17f08,
- 0x5f94c5a2, 0xed0a7f1f, 0xb707dd13, 0x0257fd16, 0xd27b07df, 0xb667ee48,
- 0xbdfb488d, 0xfb61d783, 0x70b27dde, 0xe9e93967, 0xb445a593, 0x54e382ae,
- 0xed285e9f, 0x42c9f554, 0xbd84477b, 0xc8a64dfa, 0xff716fcf, 0x1b8a51fe,
- 0x7b35ef66, 0xdb323f28, 0x8094dfec, 0xa0be15f0, 0xb771aa78, 0x9e47ed63,
- 0xbe5175a4, 0xf38acd28, 0x8d08e9fe, 0x1851ed57, 0xb2a6eefe, 0x3f643dbc,
- 0xec87aa83, 0x7dd37762, 0x1f920ff9, 0xfe40fcd4, 0x1dfb117d, 0x66fa154b,
- 0xb6660df6, 0xec839dd7, 0xbca72ebf, 0xeb1777a8, 0xfb43ce8d, 0xc37dfc2c,
- 0x78dc051e, 0xa3ee63ce, 0x3a1e132f, 0x50b69c0f, 0x70d25010, 0x4711d2da,
- 0x25ed1b7f, 0x9cc2f30c, 0xe161ddaf, 0x7ffdcdfa, 0x2b79d0b0, 0xf384fb9f,
- 0x7e286b6f, 0x19ef3f20, 0x3a77bf95, 0x85aafcda, 0x4eb2b4f8, 0x91590181,
- 0x271e66e3, 0xcffd4afd, 0x369e84ea, 0xe4857ff8, 0x0dff8377, 0xf5215879,
- 0xe35b7dda, 0xffa1bbf3, 0x96ef041e, 0x03deb841, 0x0eb9cb75, 0xd11d81e9,
- 0xfc5257f3, 0x17cd237a, 0xa9f2befc, 0xbdd5f1e8, 0xfd601e7c, 0xbf47ab1f,
- 0x2e7ff433, 0x5f9f12e4, 0xa618af0e, 0x7d7d3ca1, 0x8830cd7a, 0x293caf93,
- 0x6ec306fd, 0xd89b0e5f, 0xebcec5ee, 0x2162bc6a, 0xfad55b7b, 0x6773f627,
- 0xf4c97d35, 0xe16a6f28, 0xc05bd97a, 0x79f08ebf, 0x49dd934b, 0x877976fe,
- 0x9af303d0, 0x42a0c1ac, 0xa25aa1e3, 0xf161f773, 0x90e049bd, 0x4816fec2,
- 0xe252b8bd, 0xe3c4a575, 0x06e38c4a, 0x8c8c7d53, 0xbc652be3, 0xd7673abe,
- 0xa9292a31, 0xdaebe394, 0x1e13268e, 0x5bd081e7, 0x6f4242f3, 0xec31f419,
- 0x40beac50, 0xd10f770b, 0x95936d77, 0x4be7d6eb, 0x0ebf35bd, 0x5fd296f5,
- 0xb40f747f, 0x7ef1fc8b, 0xddac7701, 0x9e886cad, 0xfc201aef, 0x7fd2cbd8,
- 0x4d101c2e, 0xb01ef297, 0x9e7f7e53, 0xfc827aae, 0xfb46bb56, 0x7e458eb7,
- 0xf55bf5c3, 0xeaf129e1, 0x4df68cb5, 0xfd181719, 0xc3679405, 0x0ee721f9,
- 0xffdc6447, 0x7f84039f, 0x1ab266b3, 0xc446ba20, 0xb30a0b49, 0x7349c6e2,
- 0x8637f409, 0x71b3f7dc, 0x77927191, 0xb7e228d7, 0x2cef757f, 0xaaa6baa7,
- 0xba2fa13a, 0x29bae126, 0xfb03f7e4, 0xd67555d1, 0x5533c520, 0x7dfc2fc8,
- 0xc909f11a, 0x46529363, 0xc671827c, 0x42feb4ed, 0xf3c4eaba, 0xfb446c2f,
- 0x2c2fcb8e, 0xabf992fd, 0x3738846d, 0xbc406de2, 0x3788c3b8, 0xbf0aaf2a,
- 0xa95e78d6, 0x51dc7075, 0xb5ea24d9, 0x337c8e1b, 0xede10b34, 0xd0f3fe6e,
- 0x3e4f54e5, 0x86e7d840, 0x54a7d0e2, 0x1ef5f457, 0xb5ff5c5b, 0xb52bff57,
- 0xe6950f2c, 0x6bf64c8e, 0x791fa20e, 0xb7d1ea13, 0xa538e0bf, 0xbdc248e1,
- 0x5aebcf94, 0xfce61fe1, 0xc3553a1e, 0x3783f626, 0xe4b72d41, 0x8dfa451f,
- 0x4ea29d2b, 0x3f6941e9, 0xf208ffa9, 0x773883f5, 0xf97f6146, 0x8549f437,
- 0x10d5ebe4, 0xe95532d8, 0xa42a07af, 0xdfcdacdc, 0xf6ae85b5, 0x0aa7fd63,
- 0x31184f29, 0xf58cbdb0, 0x8633fe9f, 0x4edfab5f, 0xbbb8ff64, 0xae52754d,
- 0xabfbf119, 0x6227e441, 0x271d905c, 0xe485fb5f, 0x7f794ecf, 0x65493d5c,
- 0x8b2699fc, 0x10b7f0d6, 0xdd37cc8f, 0xb2411da7, 0x5f87796f, 0x583e26cd,
- 0xdefa3bbb, 0xe7e8e889, 0xc8a0b1ba, 0x7cec8547, 0xbf8c6517, 0x31d5f874,
- 0x6fd56fc4, 0xd04a3b7f, 0x96519be7, 0x67c36bf4, 0xe2228ab4, 0xe2229366,
- 0x475afde6, 0x556bc532, 0x645f5507, 0xef41bf3d, 0x813ca597, 0xf503f226,
- 0xabe67403, 0x1fc126ea, 0xe2d166a6, 0x7e444bfe, 0xf7fbccc8, 0xbe462f1b,
- 0x067b1ef9, 0x40f21704, 0xf987b995, 0x89b4eaaf, 0x7d3aa9e1, 0xc4447339,
- 0x775a017b, 0xeabfe900, 0xc4fac075, 0xe76fa40e, 0xd891f7d3, 0x027eff9f,
- 0xb2c761fb, 0xfb8b97b7, 0x7f48152f, 0x9af6fc3d, 0x60f691d1, 0x2f7100e2,
- 0x3707840e, 0x10bf3f79, 0x6c3c4bfb, 0xbf324582, 0xaf79f820, 0x2e309b8c,
- 0x9c40737b, 0xfd7aecb8, 0x2b66eae9, 0xeab9fe50, 0x19d72b03, 0x92bcdd7d,
- 0xeab87ee3, 0x06fa4d1f, 0xa91bfefc, 0xcaf0f87d, 0xebfcf4d1, 0xe46fabc3,
- 0x29b2a97c, 0x2011cf2a, 0x5fe4dcf8, 0x5deef57d, 0x69b67fe4, 0xb6149fec,
- 0xee977d9d, 0x9835f769, 0x8ad1dbf9, 0x067613fd, 0x2aafc892, 0x11df4f9c,
- 0x98d56bed, 0x32dfd0ff, 0xfb3b2bfc, 0xd88aa757, 0x7c75c2bf, 0x4e8b6e41,
- 0xa7bad394, 0x9a688b8a, 0xd7a4e382, 0xe57f93c6, 0x0cb7f3e8, 0xf8933e78,
- 0xe0496eec, 0xd9028bbc, 0x4460cf82, 0x1aebdc14, 0x64ff675e, 0xf51aa35d,
- 0xdef188fd, 0x7b88065f, 0x3e0cef87, 0xe65e290b, 0x5da2bda3, 0x932fb8d7,
- 0x1cd6b1ce, 0xaea93e62, 0x3df18b33, 0x07ba05a2, 0x411fc09a, 0x273f0807,
- 0x65d211e8, 0x8438259e, 0xbdbdfafc, 0x2eb28af7, 0xf5ea2824, 0xf3875a6a,
- 0x5b974c77, 0xb3c7a5f7, 0x9ee92743, 0xe3be1d07, 0x8a1e2259, 0x5badff1d,
- 0x9f8e41e4, 0xf8f1fa13, 0x78e8ff4c, 0xc500327a, 0x48a05f22, 0xef8835fa,
- 0x35e763ed, 0xfe787ad3, 0xc70243dd, 0xf73b8f89, 0x63fdf026, 0x75e44ba7,
- 0x34dbea4b, 0x6d661efa, 0xec813283, 0xf79cdad2, 0xec197bf8, 0x5c9e9117,
- 0xf7c83e1b, 0x7b7515af, 0xc0e37967, 0xec2dddb1, 0x9df93cc3, 0x7cf8ec72,
- 0x3c067e39, 0xdb9e3219, 0x673ef74e, 0xf14ecdca, 0xeef89bdd, 0xd7a956f5,
- 0x6662df17, 0xe80cea3f, 0x227f7166, 0x481639ee, 0xd4ab7dbd, 0x63dfb1f7,
- 0x76fdfa1b, 0x67c2ceb9, 0x73dd3360, 0x7d9f7ec4, 0x170ff4bb, 0xefd79781,
- 0xf4bd5df7, 0xfeff6313, 0xea5b8942, 0x8a3051f5, 0xed087af4, 0xe45bd70d,
- 0x9eddb026, 0xae32305f, 0xa9df41df, 0x753bf2eb, 0x57fa0d6a, 0xf6c7bee7,
- 0xb9cb78f0, 0x86fbf997, 0x1d900973, 0xcfad9ee7, 0xe47a1a1e, 0x79774136,
- 0x14e151ef, 0xbff41a30, 0x645a1184, 0xffa0cd7e, 0xff0e9283, 0x394575c7,
- 0x63e645a0, 0x07fea90e, 0x475419dd, 0x38fec9dd, 0x21ed809e, 0x3ca86b9e,
- 0x846b50d7, 0x7034bb9d, 0xee3e5c79, 0x406a5f3b, 0x6e7493d8, 0xecad725b,
- 0x14845637, 0x22b0eefe, 0x9b7fa20c, 0x51caf9c2, 0xbf5c2d0d, 0xf154a4b1,
- 0x3c5a8578, 0x7c19f04b, 0xfc26c0b4, 0x7a5f8303, 0x88f925f1, 0xb85fc9c6,
- 0xb10bfa55, 0xda1a34fc, 0x4bfad167, 0xea878abd, 0x47c9af36, 0x4ebdc6cb,
- 0xf8b5eee1, 0x4cd2f908, 0x9f96317a, 0x1ffb735e, 0xb775fc25, 0x80525e5f,
- 0xfdcf1feb, 0x4ff1286c, 0xe8e435e5, 0xddbf4945, 0xae71c08e, 0x7597a963,
- 0x8db9f504, 0x5697841d, 0xd3b2bf60, 0x8365bed5, 0x8eb9b7f0, 0xc09c22a0,
- 0x4d15a5f5, 0xad6efe76, 0x713aa1a2, 0x26e3e90c, 0x7ec763d5, 0x52e9c379,
- 0x179423ca, 0x2a973a89, 0xabbba8bb, 0xbbba8bb2, 0xe7fee9b9, 0xeff813dc,
- 0x11d74403, 0xb57e3a58, 0x3ef87be1, 0x9335df56, 0x7fe1370f, 0x4b350f4e,
- 0xbb686cfc, 0xc4863992, 0x887fdd34, 0xfd7eeee0, 0x8a7d0b45, 0xd5e63551,
- 0xd6825459, 0xde79d6ec, 0xb52d752d, 0xe0365694, 0x429dbbb7, 0x43839aef,
- 0xbfcd6fd8, 0xea9b7abe, 0x258c6f83, 0xd2d5d5fb, 0xc112d636, 0x501b9def,
- 0x63a1c76f, 0x1ca49835, 0xca2adc6b, 0x0947e922, 0x3e6aaeb9, 0x8bf9499a,
- 0x7ce7924e, 0x1e49df6a, 0xdec56d81, 0xc923c933, 0x5a2fd99f, 0x4d4bbd63,
- 0xad03cf2a, 0x49edd463, 0x666df091, 0xc4852ef8, 0xa31d003a, 0x8e441716,
- 0xd6bffcd7, 0x3e851707, 0x07d288e8, 0x16429e06, 0x0388efe0, 0xa75cad87,
- 0xae3ff740, 0x036a3a08, 0xc26efd05, 0x76e48a60, 0x0f7bdd32, 0x834f9727,
- 0x41afa9bb, 0xfba54b58, 0xea2ab0e9, 0xbeb086fb, 0xbff7289f, 0x50b70b49,
- 0x7964f03d, 0xda053a5f, 0xe80cde79, 0x4e0bd6e1, 0x18ba0efe, 0xb24edc3c,
- 0x5a53e785, 0xe2143f2b, 0x99c27bef, 0x4d82eb9a, 0x871d6f7e, 0xd3787dde,
- 0x0f8182e0, 0x44d5ee95, 0x79ec533d, 0xfc28186e, 0x42bc9bbc, 0x414b910f,
- 0x2093c0f5, 0xd3f9f7c4, 0xebde43a1, 0x54c73fc7, 0x1ee90b8c, 0x31ba3a6f,
- 0x208f8c89, 0xe7469fcf, 0xa79de351, 0x19933e63, 0x9f049d4a, 0x7d34ce12,
- 0x73febf67, 0x28355374, 0xebefbde5, 0x5171fae9, 0xa7a83ddf, 0xa9f78de9,
- 0x7af1597e, 0x43d9beb9, 0xfc538ddf, 0x7b3ad613, 0xc03659f5, 0x722b8198,
- 0xfbe84e06, 0xc0fc046e, 0xdfb12475, 0x277c448d, 0xfe231702, 0xe751922e,
- 0x48b83bbb, 0x65913aa7, 0x7bfe36e9, 0xa1b6853b, 0x79af29e3, 0x339fcd28,
- 0x7dea8330, 0x62f246eb, 0xfb02705a, 0xecbf92b9, 0x78449de0, 0xf7c6bc50,
- 0xd37cc02b, 0x320bf7fb, 0xb227bf81, 0x3e77c55b, 0x6d1f91db, 0x3a052ca9,
- 0x23c35fd5, 0xd3f93a25, 0x0f365c5b, 0x256c477d, 0x707dedb8, 0x1d44d8d7,
- 0xd44d8d48, 0x72f51879, 0xb1ad78b1, 0x8f4e7919, 0x98f1fd8d, 0x627e90ef,
- 0x7842dff3, 0x98a9287a, 0xde530eff, 0x5cdcd32b, 0x7fbf250f, 0xf03049aa,
- 0xe7e36c50, 0xec94f57d, 0x3d52d667, 0x9359c237, 0x78ff5c01, 0x0a546934,
- 0xb4d565d5, 0x9aee7a89, 0x77eb008c, 0x7d615fb3, 0xd25ad35f, 0xb2e2f64a,
- 0xdd75c2c3, 0x07512e35, 0xd080c81f, 0x3d347af3, 0xa0f419a6, 0x8f594bfb,
- 0x5c65d064, 0x7bd0943a, 0x91dec2e3, 0xdb429384, 0xfdd037cb, 0x7470b8c2,
- 0x69d2197d, 0xf7a9d135, 0xc9cd1e3b, 0x7d061ef5, 0xd9987190, 0x06ee929d,
- 0xdfc69cbd, 0x516af023, 0xa0c08f7f, 0x4144f6e3, 0xe055332f, 0x7577e70c,
- 0x4e96375a, 0x14e11bff, 0x4bb0bac1, 0x00004bb0
-};
-
-static const u32 xsem_int_table_data_e1h[] = {
- 0x00088b1f, 0x00000000, 0x93cbff00, 0x51f86065, 0xd2f9c08f, 0xbcde0c0c,
- 0xc4b462a8, 0x0c0c5c0c, 0x0e5c4041, 0x7b401ac4, 0xdbe9016f, 0xcdce1c40,
- 0xc40110c0, 0x1ff881fb, 0x6207ff10, 0x04d6200d, 0x79405fe2, 0x5b1ba845,
- 0xda181898, 0x8803b880, 0x875880bb, 0x97418191, 0x93fb7891, 0xde181984,
- 0x7af82389, 0xcd0c0c12, 0xfff3f452, 0x5631c360, 0x29efb5f4, 0x174e3ed0,
- 0x19c73f04, 0x505c2498, 0xe0bb70d5, 0x4d078337, 0xcf8d179e, 0x9e7f4787,
- 0x5cbf2a21, 0x4d3f950b, 0x23e18187, 0x2d0a9a92, 0xc7416efc, 0x0c0c468a,
- 0xabc4464a, 0xca8c60df, 0xd081300f, 0xb1adf900, 0x0003a809, 0x00000000
-};
-
-static const u32 xsem_pram_data_e1h[] = {
- 0x00088b1f, 0x00000000, 0x7de5ff00, 0xd5547c09, 0xf37df0f5, 0x66499996,
- 0x0d909326, 0x8a027108, 0x081380a8, 0xc3b44069, 0x8e22a222, 0x260dc55b,
- 0x1037d902, 0xfdaff0fd, 0x2d361032, 0x4682d0d6, 0xa0c0748b, 0x0d0482c1,
- 0x00e02418, 0xd52ff82e, 0x5b7bbfd8, 0x16c34a0c, 0x7e1b8092, 0xce7fe5b5,
- 0xef25f7b9, 0x6d80264d, 0xfdfb5fbf, 0xe6f169bf, 0x5ddf7bbc, 0x3dcf76ce,
- 0x14fbdcf7, 0xf89628db, 0xe0cec663, 0x2cabd20f, 0xdd51b18c, 0xa0db6ce9,
- 0x74fa87a1, 0x6559ffe7, 0x4287ead3, 0x6ed0ab1e, 0x9cfdd61e, 0x7e5485b1,
- 0x386b8c40, 0xf4d31cbb, 0x8f0e1b10, 0x9616c96d, 0x6e7dd8cc, 0x0901bf46,
- 0x912974be, 0x5615ceb1, 0x780cbaf7, 0x085c7aae, 0x20c8f7df, 0xf7c25077,
- 0x1652c93c, 0xbaa23fe4, 0xbecc01d7, 0x6cb3233f, 0xe0da66c6, 0xc3fc0f6f,
- 0x7cbea07a, 0x63106cac, 0x059969ea, 0x77ea7fa0, 0x3403a512, 0x00efc336,
- 0x6c2df978, 0xfeeed0dc, 0xecfd13b9, 0xe8b78073, 0x5b7e89f9, 0x097f90c0,
- 0xecffa893, 0x5faa4afb, 0xf2f16f6f, 0x073e6c5b, 0x8d941636, 0x7f963bcf,
- 0x8be7c2e9, 0x7aa16e9b, 0xd82c5318, 0xc8b772cf, 0x635cfc4f, 0xbbfaee96,
- 0x76a214f1, 0x04d3d874, 0x3188feed, 0xb6bfb5e9, 0xd543c032, 0x0563d556,
- 0xaad84cbc, 0x700c7f86, 0x84295ae9, 0x9d9765b0, 0xfb4bc031, 0x9d870e6a,
- 0x9fcb0f9a, 0xaa1805a8, 0xff36b728, 0xdbc40afc, 0xa3ad9956, 0xf784fc66,
- 0x3bc71b56, 0x9e70e46b, 0x6ba5839d, 0xf7c74f77, 0xccad1a0b, 0x6e95f50e,
- 0x057a9fcf, 0x54fbf9c0, 0xd74a4586, 0x01fad02f, 0xea92185c, 0xcf18ee11,
- 0x36e0fda8, 0xca1eb439, 0x939ab877, 0x1ff1836f, 0xcf31b4ab, 0x676fd0c5,
- 0x80adf273, 0x96d0a287, 0xd15ef849, 0x2c8bc946, 0xfaeb6134, 0x7bc2fb37,
- 0xaf241ba5, 0x5c52f015, 0x433ccb17, 0xc0ce1f78, 0x3906d6bf, 0x9fc863fc,
- 0x1964e30b, 0x49ead748, 0x57f6c64c, 0x37671e68, 0x0e558e9e, 0x376b1e61,
- 0xda01ba5c, 0x5ecaf781, 0xc438bc94, 0x200b0b32, 0xb76fb65f, 0x81d67b4f,
- 0x0da05be3, 0x96b8e276, 0x3f2e586a, 0xeecdbb94, 0xd5adfec1, 0xd17e4126,
- 0x9e5ab5ea, 0x75c5fe81, 0xcd8f3197, 0x419aafd0, 0x88fe4629, 0x9b018f4c,
- 0xc453fb18, 0xf91eaf98, 0x37690944, 0x9fa2e419, 0x2ef44f14, 0xf54d93ae,
- 0x4b192603, 0x007eff82, 0x82be027f, 0x57849d3b, 0x05736e9d, 0x3b74e91f,
- 0x2987e425, 0xfac6d99d, 0x48e7f4f5, 0x754fe807, 0x5fd29ba5, 0xba52a654,
- 0xf443d2b2, 0x07f10279, 0x6fd172e9, 0x4e3658d7, 0x3e5d7681, 0x4b3146e6,
- 0xa71be298, 0xe7e09c02, 0x01ddf270, 0xf19673c9, 0x6a13e9eb, 0x72651720,
- 0x53e49b15, 0x05ac02fa, 0xb42f30e6, 0x9be8dcaf, 0xd7b19a38, 0x1c71e059,
- 0x502922ff, 0xac657480, 0x740fd53e, 0x1e812259, 0xc52d7c01, 0x961e0241,
- 0x067e05f4, 0x4f713d3a, 0x24b2b3f6, 0x70889ac6, 0x668f73de, 0x54353d50,
- 0x0cf50a8f, 0xc93d773a, 0xf54c73d3, 0xd02f4f24, 0xf54b59eb, 0x9eafcfd8,
- 0x318fa627, 0x917a67f7, 0x580bcf5e, 0xfcf3916e, 0x633c95c6, 0x333fb9ac,
- 0x949ea84a, 0x002cbdbb, 0x7f9d65e5, 0x864fb358, 0xf8c6c77c, 0x23328f11,
- 0x86df8d1f, 0xaa4139f5, 0x97147c8c, 0xa7926313, 0x09825f78, 0x9fb933ee,
- 0x7f927ca9, 0x653f29a0, 0x7c11a5da, 0x5eb770e9, 0x54e86026, 0xfea35e38,
- 0xd234fd6a, 0xd9fae33b, 0x9fa07e08, 0x03cef483, 0xbb170263, 0x57d8dfa1,
- 0x407ff406, 0x8eee5c0b, 0x2e3037c2, 0xea62a589, 0x991a60b1, 0x78e554bf,
- 0xfbffdf1b, 0xfde107c1, 0xe0e74b72, 0x4bff8078, 0x8e21f71a, 0x4ca88ffb,
- 0x32316e81, 0x3172c0ab, 0x6ffa4656, 0xb3f64669, 0x975e0341, 0x917e000d,
- 0xa65debba, 0x1f915206, 0x4115825b, 0x3025cfe4, 0x8c7eb041, 0xe115641d,
- 0xfef085e0, 0xc454bc80, 0xfd0355bf, 0x835f6c61, 0xfb5287f6, 0x5b7ed8ad,
- 0x075bed2f, 0xb7da98e6, 0xda9817a5, 0x3ed069b7, 0x706747c8, 0xf01f68be,
- 0x93b18637, 0x6fb53e6c, 0xf6a02f4d, 0xc0ac6a97, 0x574c7ed4, 0xbb60dffb,
- 0x49fb63df, 0x703fc651, 0xe9fc798c, 0x6bf1e645, 0x416cfc7c, 0x431fb450,
- 0x20a497e3, 0x117a7f1f, 0xd795bf1f, 0xabbed5db, 0x049aff0b, 0x5ea43aed,
- 0xac683fc6, 0x9417fc79, 0xe56fc798, 0x2c17bbed, 0xa83bed13, 0x196978fd,
- 0x2505ff1f, 0x0d66bed4, 0xb48f9178, 0x211531fe, 0x40d34f9c, 0x07485280,
- 0x921d28fa, 0x93e818c0, 0x8481b2df, 0x38620787, 0x03137ddf, 0xd6e8e6fc,
- 0x12042ca7, 0x9970f308, 0x8de2e9e0, 0x3f3e34e3, 0x9f5e6907, 0xab3aba2d,
- 0x8cf9a651, 0x2e86b4ad, 0x37b6fe82, 0x678441ca, 0x2432bcad, 0x760a7cd3,
- 0xb0a7be02, 0xf3ff3e30, 0x8ceb61aa, 0x674c8ae3, 0x2dadab57, 0xd2eab906,
- 0x90d9e3ef, 0x055fe80a, 0x6ea8c132, 0x34f415b8, 0x39fc3d03, 0x0f4c63e8,
- 0x392edcab, 0x5d9c7a04, 0x059b946c, 0xb2092edc, 0xff4087f7, 0xdffa227f,
- 0xb77c70aa, 0x884293ce, 0xcc566fff, 0xcd9f50d1, 0xa4058eae, 0x5ca3f777,
- 0xbb73a9d0, 0x14f64435, 0x9f88074c, 0x191449db, 0x1baed3fb, 0x305c94de,
- 0x31f4d63f, 0x0b3777bd, 0xf2854e2d, 0x0ee79733, 0xeff8c370, 0x211d669a,
- 0x7cc4fce3, 0xc7242dfd, 0x8725bfac, 0x2b0d69b1, 0xdc00fed4, 0xbcfd4d3e,
- 0x1dfef0c5, 0x577c3301, 0x576e1981, 0xed05aa43, 0xad894299, 0xa9ef7a85,
- 0xedebe730, 0x3812964c, 0x3f7b455d, 0x85f41c01, 0x3a60f747, 0x89bb1f02,
- 0xfcddd3ae, 0xe53fbd5d, 0x4c2ca90f, 0x124b71f3, 0x5127fa23, 0x8f9b80b9,
- 0xd3bfb23b, 0x0fcf9b55, 0xa0fe99fd, 0xec8cf84c, 0x58aecb7f, 0xd9ec059f,
- 0x552f9a96, 0xf1c8c164, 0xc67ff644, 0xb8f1c8fc, 0x721f9c35, 0x39cf9183,
- 0x53f2449f, 0x5fb8e379, 0x53f0321e, 0xbfb5fd69, 0xaf78643c, 0x1326eeb8,
- 0xc3ba185c, 0x26bf3e54, 0xbb3f94d7, 0x3f94d0ba, 0x131cd973, 0xd07c1b9c,
- 0xfcc67e54, 0x7bfca605, 0xe5311e2a, 0xc2b055df, 0x7811df04, 0xf6fe54ca,
- 0xf94d6b69, 0xdc975d96, 0xf5547288, 0xde70cc81, 0xfad1daf8, 0xbf37b473,
- 0xa45e2876, 0x57b011c7, 0x818e0e50, 0x7b6982bd, 0xabb248e3, 0x31e60f41,
- 0x4b56a130, 0xc6cb83fb, 0xa4665ea2, 0xefd2433f, 0xc634c183, 0x843c979e,
- 0x6346b93f, 0x89616061, 0x71f17425, 0x6fc86ca7, 0x0d7e4739, 0x9ec8fa08,
- 0xf44b72f9, 0x72ebe5e7, 0xd3bd402f, 0x5f803e9b, 0x3ae79c7f, 0x99103d84,
- 0xbf31225f, 0x7ebeb9f1, 0x7ae25cba, 0x53ac44be, 0x3bea5e4a, 0x0e10b99e,
- 0x5b38ae0f, 0x5480f57b, 0xfd6893d4, 0x803f2127, 0x51e81814, 0x24c8375c,
- 0x65bb6ddf, 0xea1957ea, 0x99abd004, 0x4257bfcc, 0x9bde133d, 0xec30cb7e,
- 0xd475ef87, 0x8931acfb, 0xcab6f5e6, 0xa43cbfc9, 0x94fc7d22, 0x469ca91e,
- 0x80656b69, 0x059543d2, 0x595e7e94, 0xe54b6941, 0x540f4a7c, 0x63fd2906,
- 0x3f4a32e5, 0xf4a6acad, 0x4a1acae3, 0x510cac3f, 0xa3e95eda, 0x2e879754,
- 0xf617ebfd, 0xc0b758b0, 0x8b0f6e03, 0xb2822cb1, 0xdee724cd, 0x53f39454,
- 0xa3066f8e, 0x63ea7fbd, 0xde8ca260, 0x6fc915f1, 0x47d1d3bd, 0x085e4af6,
- 0xcf4fa798, 0xa70c7b7c, 0x26c2dd93, 0x8f47d033, 0xf79cf45c, 0x2b04a1de,
- 0xa9c032c8, 0x519c9bde, 0x0fb985ea, 0x3a2e75e9, 0x70e75f31, 0x3eb7675c,
- 0xac6c97fd, 0x206972f7, 0xcaf7879f, 0xf0b30f34, 0xd7a45eb3, 0xf49bc50f,
- 0x5bd29fda, 0x2e89dca0, 0x33a735fc, 0x6e48e748, 0x8354ffaa, 0xc7882995,
- 0x2e10d8a6, 0x7bed4f8d, 0x38f285d6, 0x3eae5537, 0x7c77b234, 0x5467d695,
- 0x29e30c3b, 0xa7c3346f, 0xdcc9a5aa, 0xbb89e07f, 0x95f21875, 0x45d0fabb,
- 0x4a54ff48, 0xa6e67af5, 0xb1ced46a, 0x7c7141ba, 0x79f10efe, 0xf13c6370,
- 0xa93bac2b, 0x3fbb3ffc, 0xf7a3d5bd, 0xf606b187, 0x01f50d85, 0xf73a0de4,
- 0xdd07a8fa, 0x3f34af95, 0xfd49abd2, 0x1885ed06, 0x206677f8, 0xac10abd6,
- 0x2f5e5bd7, 0xe397ad07, 0xf6a68df3, 0xbe8f3de0, 0x7af7c7a6, 0xb5855be7,
- 0x7d04e3ea, 0xe7f0a575, 0xabd9d714, 0xbc4af3cd, 0xec6f2e09, 0x679a6d5b,
- 0x7f900ff0, 0xc8292156, 0x9b82253f, 0x32c7143a, 0x6a97fa09, 0xe5bd50f1,
- 0x6a572f12, 0x1e02d16b, 0xb466c762, 0xd1cdee33, 0xfb73cff9, 0x1fdf401f,
- 0x1ecaadfd, 0xd7e17cc5, 0xb06cf551, 0xe7e82d6f, 0x0dbdd011, 0xef5053c5,
- 0x3d76dd1d, 0x98b327d9, 0xa48b85df, 0x8d9d5602, 0x6609ce76, 0x7ffc8c99,
- 0x35defd82, 0xc8deb0d2, 0x9fd468b7, 0xf3cccb99, 0x2667d82c, 0x0cc6bf38,
- 0x939bb1e7, 0x56f3df91, 0xa0b730aa, 0x6ff39a5c, 0xbe49b8b7, 0x12c559f2,
- 0x3f08ef5a, 0xa66ebdd8, 0x2fb907f4, 0xec99e57d, 0x87e8a1dc, 0xf75bfae0,
- 0x7026140f, 0x3128a53b, 0xff20f9a4, 0x3f91868b, 0xfbe182b9, 0xa7a825a1,
- 0x5de64e82, 0xefd27acf, 0xb23ff687, 0xd27cfabf, 0xfa214fc3, 0xbe49d721,
- 0xda759450, 0xd84916c3, 0x974a415b, 0x76eb718b, 0x5c044d6b, 0x47d7011b,
- 0xbf377fc0, 0xbf133225, 0x35025aa5, 0x5fce2496, 0x4a482f68, 0x47f816a7,
- 0x51ed4fea, 0xb53fed7f, 0x3fa834fe, 0xfd7f54db, 0x0bfeb53f, 0x29bff47b,
- 0xafa5fd5a, 0x0416da6c, 0x79b4537d, 0x3cc18b95, 0x4ea95474, 0x4bdd02f6,
- 0x21762fd6, 0x82511c1f, 0xa3e7e42e, 0x34727921, 0x22d2f87e, 0xe7cca2fc,
- 0x855d7090, 0xc7fd427f, 0x54d9f85e, 0x59bee7b4, 0xbd69baaf, 0x5b0d6754,
- 0x1acb4e41, 0xdfa0a70a, 0x1c83e017, 0x46527a5e, 0x9fccd1b8, 0x4aafcf45,
- 0x701eff46, 0x844f588a, 0xab2a5ec9, 0x9c24f3fd, 0x2759ca87, 0x7be459c9,
- 0x04e9fed8, 0x5ab98fd2, 0xe87ccf5c, 0xe5f9d927, 0x0de52f02, 0xbb293b3f,
- 0x30f6bd30, 0x4cb013ea, 0x7c8f0ec8, 0x41d840af, 0xc4ce2c87, 0xb1425856,
- 0xa11fb11f, 0xc2f1d4de, 0xaa0edc42, 0xf0e4f0da, 0xb6afd083, 0x24badfda,
- 0x7974be03, 0xf33f553b, 0x7a7aafd7, 0x62edcbd7, 0x5efdd7bd, 0x34f3de88,
- 0x89adfb0a, 0xd86a25a7, 0xc971f685, 0x841bd55a, 0x9e9b25c7, 0x5c69ee7d,
- 0xf5627eaf, 0x17f5045e, 0xc3e37a6f, 0x6f170031, 0xf0a71351, 0xe4a43861,
- 0xc394fa6e, 0xba23f9bf, 0xf2f451e9, 0x18679a1b, 0x4270fe7f, 0xb78a5d37,
- 0xb0d8d6ec, 0x5098f89e, 0x716b5bbf, 0xfd4fa144, 0x676849c1, 0x56f86d55,
- 0xd1e575c3, 0xc94b125d, 0xb5cf8288, 0x80bd906f, 0x0a7a2278, 0x2fd1757a,
- 0xd0397ca2, 0x247af505, 0xbdcb22bd, 0x1465fa81, 0x93fd17af, 0xbe2fdfc0,
- 0x4fec7e8a, 0x43c45ead, 0xb9f78bc1, 0x95873c70, 0xcfe7ce0a, 0x3f464e2c,
- 0x4c1a817d, 0x9fca5376, 0x9fb9ac17, 0xbdff2ff8, 0xfaf993fb, 0x42d7d7d0,
- 0x2fb05573, 0xeaf6738e, 0x799c68db, 0x587c402c, 0x0fec8cf0, 0xc2b5fa41,
- 0x22896f26, 0x9732c527, 0x80ebc393, 0xc3ce30b8, 0xbf414eb8, 0xd0e5efee,
- 0xcc8ff27a, 0xee0fa861, 0x6cf1fdd7, 0x8b5fc12e, 0xb27184fd, 0xdae75f45,
- 0xbb5bfc4c, 0x74e919b4, 0x052f806c, 0x4ce56afd, 0x487c0a09, 0xf95ea067,
- 0x3853abbd, 0xc947989d, 0x5d81ef16, 0x639f0130, 0x67d566f9, 0x8f7a6e1f,
- 0x6ee8c99d, 0x1f689e7f, 0xf3831dfa, 0x5664e1f9, 0x7c651f50, 0x07aeb235,
- 0xf0375e60, 0xb9de4199, 0xc23ed7fc, 0xff975de5, 0x31934dd0, 0xb9f7abff,
- 0x387be11c, 0xc2bf425f, 0xfbbf9429, 0x83f48956, 0xc9a38595, 0xe42aad79,
- 0xc91f9cdc, 0x457fd02f, 0x0df0338a, 0x744093b6, 0xde5abf20, 0xa8df784a,
- 0x575db157, 0x39757acf, 0x20fa17ce, 0x707d064f, 0x603eb759, 0xe81eb9ab,
- 0xf20aeedd, 0x7a1a9bf5, 0x5f9469ee, 0x07a0d790, 0xe3f557e5, 0xdc6f8ff8,
- 0x209de1fb, 0x75ebc7b7, 0xd5eb35b9, 0x782db948, 0x7c84bd69, 0xc7b7291a,
- 0x894ac00b, 0x3cf0b726, 0xb416dcaa, 0xaaf44bfc, 0x65c7c78e, 0xf5d55eb3,
- 0x8cf86f64, 0xca9793d4, 0x127aa89e, 0xecb3ef7e, 0xf3a8fc9e, 0x457fcea1,
- 0x80bd29bf, 0x9f3a09fc, 0xc5d87cea, 0xf61f3aa7, 0xf098cff0, 0x3b7f9918,
- 0xc10c04f2, 0x7f255dbf, 0xdf134962, 0xdef7838f, 0xf8459fec, 0xc734d1f2,
- 0x9fecdfaa, 0x11438468, 0x79447d70, 0x8fec045f, 0x80881f28, 0x4be54c2b,
- 0x8c6af71a, 0x2a6c20f8, 0x2bff9d67, 0x759445f6, 0x950f3eac, 0x82d49c37,
- 0x9d691fc8, 0x753fea1a, 0xe8a89821, 0x9329dc3f, 0x7003b0ff, 0xe9da04bc,
- 0x0a1198d8, 0x6c591e82, 0x0ecdebe7, 0x012ba777, 0x1cf1c5d2, 0x96d24cee,
- 0x9fd41ea0, 0x1fb9da77, 0xe9dfc3a4, 0x31f8378a, 0xa4c9360e, 0x6c425bc7,
- 0x093f3472, 0xdf8434cc, 0x3e5bd616, 0xe0768ff7, 0x87b633be, 0xcf40cefb,
- 0xfa4765ab, 0x56bf5c7c, 0x23605ecb, 0xedc16b36, 0x77a1742e, 0x71ba0d34,
- 0xfd9f3c1a, 0x6db7ccb6, 0xafa53e82, 0x8471dd61, 0x2b189f05, 0x72de7ee1,
- 0x4d999fe2, 0x6b321d7c, 0x28797479, 0x39e5ef12, 0x77a869e6, 0xb9f0fd61,
- 0xd7ac0fd1, 0xe23ab053, 0x42f5d379, 0x3d69aa6e, 0xce6b5458, 0xd4f5880f,
- 0xc9feba37, 0x7fa49964, 0xeb84a170, 0xafb7a17a, 0x38de8796, 0xb1d3e80d,
- 0xbfb8664f, 0x2649aa7a, 0xc8da9cfa, 0x305953f7, 0x8cafe489, 0xd4be9275,
- 0xf286d6f1, 0x7aef7175, 0x9feb6dac, 0x3e421fb2, 0xe187f6da, 0x6db482bf,
- 0x778327db, 0x47cafc20, 0x3d607fe9, 0x65d84fcb, 0xc7733f27, 0x7ff1272e,
- 0xa5dfcec7, 0x76f0843f, 0x3af7f92b, 0x1c3b7d76, 0xa163b1f9, 0x60f500b5,
- 0x7ebe00c7, 0xedf9daaa, 0x7f9a16f0, 0x331d1117, 0x7de88d14, 0x072fe9aa,
- 0x54e02e30, 0xed0a8c13, 0x24b15d8b, 0xdaafe55f, 0xb1d11fc9, 0x80ecdbf3,
- 0x9e379fe3, 0xd3ffb132, 0x5ed364e1, 0x73c5fec2, 0x8ef979bf, 0xc02ecfd1,
- 0xdd86f1fd, 0x9fc84cda, 0x875fdaf0, 0x78ed7ea3, 0xed4ddb89, 0xdc1acb6a,
- 0x48ba18df, 0xbd02a85e, 0xfac851da, 0xd16fb631, 0x4728f1c4, 0x57f2f13d,
- 0x2f9cb3f2, 0x7c28263e, 0x8feffb3d, 0xf5c7c90f, 0x9364339f, 0x1ddfdc70,
- 0x89ea03f8, 0x4be2565d, 0xebc7bc7d, 0x733d9017, 0xfbdf71ae, 0x52dc6e3f,
- 0xbdc67cf8, 0xff9cdfe0, 0xfa878aad, 0x3d072917, 0x03e77cf9, 0x7a726f04,
- 0xc9e7bfa9, 0xa7ff6bef, 0xa025fdd1, 0xe3dcebbb, 0x4b3fff0e, 0x0ba7b7f7,
- 0x3e31bbba, 0xbfb5fca0, 0xa87eff52, 0x37f96b9e, 0xe36f7ba7, 0xb7fdedd7,
- 0x33f79e2c, 0x5664fca1, 0xe2c340ed, 0x6f3dd2da, 0x5bee4267, 0xb1e37b69,
- 0xf623e3bf, 0x5e34f47b, 0xf1b6fee5, 0xed03efb8, 0xac4978b2, 0xab3af917,
- 0xfa2fb0bf, 0x3b23cbcf, 0x63da7fa4, 0xc5304f64, 0x2bf712b3, 0xe99f4adf,
- 0x360bd8a5, 0xc200e3e2, 0x6c052bee, 0x4afe6f5e, 0x4adc3e62, 0xfd7e9fed,
- 0xd373b43e, 0x83b264d2, 0xf7fb2521, 0xfe64d775, 0xad3344bc, 0x98f5ae87,
- 0x129347d7, 0x89a8ebcd, 0x19abbea2, 0x0ed5ffef, 0x3c021429, 0xcbf01f8c,
- 0x8ea7f444, 0x126548bf, 0x605893c0, 0x263bae11, 0x9df5cc3a, 0x836c7dc0,
- 0xc5eff1bf, 0x3c2ec4e3, 0x47e95c0e, 0xc9900e3c, 0x3c4e7aaf, 0x6f09bf62,
- 0xc78c2199, 0xe3978a61, 0x4bd4a131, 0x1eb16a7e, 0x1da26ec7, 0x349638a3,
- 0xb82b3ca3, 0xa078e69e, 0x9ebeb875, 0x4cdf0dee, 0xd115cfac, 0x257f8ea4,
- 0xcdd9f64d, 0x5cfad1f5, 0xa50fcba7, 0x7fc74e87, 0xaac92e94, 0x8e692e99,
- 0xebca0a39, 0x8c3f5c64, 0x1c99f2c4, 0xb42a0b4e, 0x0fd624df, 0x28e678d7,
- 0xa4278041, 0xaf482a65, 0x22f6db7c, 0x79b51f8c, 0xc5c7ea25, 0x1f9a166d,
- 0xe112596c, 0x428d487d, 0xf7167bf0, 0xd4f7a428, 0xfe395e2b, 0xbb3f4320,
- 0xba06e34f, 0x7c97ef9f, 0xd8cce67f, 0xf0c7f46c, 0xde7fc61f, 0x59b7eb00,
- 0x063859ab, 0x615b34f0, 0xf404b8c1, 0x73ec4b93, 0x0cdc9f93, 0xe4aaefe3,
- 0x55ce7aee, 0xf2bd37be, 0x015f4ecf, 0x45f9f63d, 0x94c76d8c, 0xc6288a6f,
- 0x9a8ff6f5, 0xf7cabe38, 0x1e40d0b3, 0x0dfb2187, 0xab2f8afb, 0xcaf33fdc,
- 0x891a5f1f, 0x1d71dceb, 0x7eb8e343, 0xa971e2cd, 0x8a70bd62, 0x0ebce279,
- 0xe283afd4, 0x9f74bf68, 0xe7168cec, 0xbfac41b8, 0x28f1837f, 0xb2d47690,
- 0x57d7196a, 0xbefc93ac, 0x5b1b5ac1, 0x661e251f, 0x7e116a8d, 0xf8374122,
- 0x7fb8d9c9, 0x0d9fdbc3, 0xe919c6af, 0xa8e536d6, 0x1d27bc32, 0x61b9f7f0,
- 0xc51feaff, 0x11f7ae2f, 0x2dec1bbf, 0xf451fc93, 0xdfc0bd47, 0x91df3dd4,
- 0xa6d2f49f, 0xdfe416b5, 0xa62d6b4b, 0x553ad8fd, 0xb04631f8, 0xa9afd811,
- 0x2cceec7b, 0xd93ecba4, 0xe5a5f18b, 0x40b5274d, 0x48c47d94, 0xe8fd627c,
- 0xdd556f7f, 0xb50eeed4, 0x75e2267e, 0xc31b09c7, 0xad76f575, 0x3f5a38ba,
- 0x7ef2b4ef, 0xf7f566ce, 0xf7f8cf0d, 0x0eb8efc3, 0xae3c7847, 0xf0996b3f,
- 0x1ff24483, 0x553e3e23, 0xbf3842c7, 0xf5157ae2, 0x8c8da9c2, 0xc94077e6,
- 0x06feb863, 0xd1b1ff79, 0xe37173af, 0x5da0df96, 0xb924d650, 0x4ca24b71,
- 0x8fd0d169, 0x2f18de5b, 0xe99c3ce3, 0xdd6fe3d1, 0xc8356ec3, 0x10aaab45,
- 0xd98ef6be, 0xfbb61771, 0xd0c69b65, 0xdebdf14e, 0xfc79c2e9, 0x2491a6cb,
- 0xeb8dbd07, 0x34564ae5, 0x841ce311, 0x87e48c3e, 0x4c631ba1, 0xa07215f0,
- 0x54d7ca1f, 0x6f3ccb6b, 0xd32dfa14, 0x788fb124, 0xf42dfa9e, 0x7b7e99ff,
- 0x016fd75f, 0x23906fd9, 0x419bc0bf, 0xd344a5bf, 0x4f25736f, 0xee7de20a,
- 0x482941ce, 0xab6fb9d7, 0xdbf4d149, 0x2fbe4aa6, 0x4dc459ba, 0x7e803477,
- 0xdfa0dcbb, 0x45bf401a, 0xa3191f89, 0x73fa7ee9, 0xbfd0b7e8, 0xa136fe46,
- 0xde328b7e, 0x74fe041b, 0xe9bc36fd, 0xe1b7e920, 0x3c53160d, 0xf84d44f0,
- 0x6fd57a9b, 0x68add252, 0xbd53ef1f, 0x67f851b1, 0x37c7b093, 0x6c46388b,
- 0x955cf507, 0x5cf4c3f6, 0xb9eaf9de, 0x759e117f, 0x2b77373d, 0x9ee8b8a3,
- 0xdcf5c87c, 0xe7a0eddc, 0xae47e424, 0x64eee6e7, 0xa1172fdc, 0xd0f486df,
- 0x97ca8c6f, 0xe5fbf985, 0xde4f198d, 0xf08df50d, 0x941b5ea9, 0xefadd11f,
- 0x5df51946, 0xbe8bd695, 0xfa7e77db, 0x77d0ab6e, 0xa206c7a0, 0x0fe48d7e,
- 0xde39936f, 0xc3e8c77c, 0x79465f1b, 0xfb4c9df9, 0xf859ef92, 0xa33bd1fe,
- 0x7f21670f, 0xf3fa2a7d, 0xd9e9c6a2, 0xfaa4195e, 0xc7cebc27, 0xfb91ba57,
- 0xb81acbcb, 0x2b56587d, 0xe7f03c87, 0x69df31a4, 0x9dc2ffd8, 0xf8014b12,
- 0x13f56b26, 0x9febb40e, 0x1f589957, 0xf034c94d, 0x629cacc3, 0xd957fbf2,
- 0xfcf0eb5d, 0xd9852cd1, 0xec5fbfd0, 0xed147498, 0xbe1ce2e0, 0x9e2c501f,
- 0xb3b071eb, 0x4464f4bb, 0x9ce3483c, 0x9eb3fb37, 0xad531671, 0x9f53ae9c,
- 0xb7184295, 0x22ee33da, 0xb8a17a44, 0x5dfcfcce, 0xc9377f21, 0xf62f842d,
- 0xee351cae, 0x85d72f43, 0x4f027da7, 0x9e131780, 0x3c545242, 0x64a7a501,
- 0xa5e37726, 0x4b2d77f0, 0xf0a05f70, 0x91f68929, 0xe3adc723, 0x3afc722d,
- 0x773f751e, 0xbf2fa8b1, 0x6b9d2d69, 0x8e8bc48a, 0x747c48e7, 0x1f023de1,
- 0x7e5d69ef, 0xd71891ed, 0x7a437c04, 0x809ff826, 0x3fc76817, 0xf9d322ee,
- 0x5e048f9b, 0x9b8f5646, 0x370fe180, 0xe43a1c61, 0x79ccd5e7, 0x63e02fb3,
- 0x119ec7d4, 0x315e9d38, 0x7dc01ac6, 0x4ef60dda, 0x1f3a83d2, 0x3e72b30e,
- 0xd95e84d4, 0x3c62afd1, 0x251bf3ad, 0xe521da37, 0xb5e13b61, 0x0ed1f81c,
- 0xf53ef645, 0x578124cd, 0x6aaf9d37, 0xd013f314, 0x26eb82c1, 0xf10abe7d,
- 0x9b2358f3, 0xebca5d38, 0x58b25d38, 0x94c7ed27, 0x30de48d5, 0x71aca78c,
- 0xeba1c50e, 0x0e7e19fa, 0xf0a29d23, 0x7f8d12af, 0xb39b3a19, 0x62cde7bb,
- 0xb5aa6e51, 0xe7dc43fa, 0xb1f20a99, 0xfe10d896, 0x3efe1677, 0x2450fc57,
- 0x78132ebd, 0x757884db, 0xb93afe20, 0x8efe15fd, 0xf9e969ce, 0x84d04ae5,
- 0x9f4f09d7, 0x53b7fce6, 0xac5fa0f2, 0xcfc86f0b, 0xc74a3f90, 0xd233f21b,
- 0x465729a1, 0xddf00f38, 0x78e7a327, 0x28d4bf71, 0x0f3b85f7, 0x919ffaf2,
- 0xb8ca2cbc, 0xff7f307f, 0x4deebe40, 0xc55987de, 0xf7f0413a, 0xfdcef63b,
- 0x77bf9123, 0xd12fdc4a, 0xf7da6f14, 0xd7cac1bc, 0x20ec1b4d, 0xf6dfb807,
- 0xe75deab6, 0xae1fa9e9, 0xc03972b2, 0xe0c784f5, 0x0704baf7, 0x75a6f182,
- 0xa4178a36, 0xf6e40c7e, 0x6f5f51aa, 0x5ba4b3b2, 0x7faf7ab3, 0x37bfa88a,
- 0x8787497b, 0x47a87b61, 0x21bda11b, 0xce45eddd, 0xe0ffba17, 0x43bae35c,
- 0x3bdfcff0, 0x1f9dbd2e, 0xce554f1a, 0x4c5f699a, 0xb54aab8f, 0xd1515e04,
- 0x992c9bbe, 0xe7a0b7e2, 0x70ffce82, 0xa36ab7fd, 0xa59faf7a, 0xd08ec8da,
- 0x2d5ecbcf, 0xf010583b, 0x254fa5f6, 0xe32b0179, 0x7f38b183, 0x1079878c,
- 0x8ff72f92, 0xca6ee8fa, 0xed9f6997, 0x29f5dfc6, 0x1bc87dc5, 0xbb1d0c79,
- 0xfb225331, 0x6cac3de1, 0x36e1da34, 0x268ab3e6, 0x47e73f21, 0xd73c21f1,
- 0x3d5b5992, 0x3d7203c1, 0x85542ea2, 0x674277a9, 0xaf483be2, 0x7a433271,
- 0x4cfafb35, 0xbef7f9c0, 0xdc4cb33f, 0x813b078a, 0x8fb119ea, 0x31b96125,
- 0xce5a24be, 0xeaf86e8c, 0x7fa05bfd, 0x5ecbf7a3, 0x69bb940f, 0x7281c3af,
- 0x85b56436, 0x19780c05, 0xe85542c3, 0xc87d1a77, 0x83ea0b77, 0xf07bb002,
- 0xd341497c, 0xc2172ada, 0xbf7a25ab, 0xe498183c, 0xaa6dfe82, 0x32e93939,
- 0x0e500bd4, 0xcd5f29ab, 0x4ad795cb, 0xce710c5e, 0xf1415a6b, 0x12b57948,
- 0x14dc601d, 0xd78d9892, 0xbd41b21b, 0xfbc3569b, 0xc17f3859, 0xed6f58fb,
- 0x416ff7c9, 0xa4fd03bd, 0xfdf237f7, 0xcf783cfa, 0x3b527283, 0x5c2bea87,
- 0x073c312d, 0xcf91b053, 0x55fb054f, 0xec37e62f, 0xdde68a7e, 0xdf5ed029,
- 0xc28f9c0c, 0xd13ce913, 0xe74dc948, 0x80be1f38, 0x0e0756f4, 0xa5f05f18,
- 0x4909f8f3, 0xa3fd62c0, 0x6dfd7fdb, 0x9e535cfc, 0x07d68177, 0x272779ea,
- 0xfc42c329, 0xe1f69274, 0x03be010f, 0x79379c56, 0x9cdecb3c, 0xc316b42f,
- 0x1b6398fc, 0x6acfef44, 0x1e71471c, 0xe29f99b3, 0xe68ea63c, 0x57bbe776,
- 0xefe843da, 0x4ced577b, 0xaf7be7c3, 0xf6f3d2b4, 0xb70f5c4d, 0xbf21680f,
- 0x2ad5e1fb, 0x552ff3c3, 0x13d265ab, 0x74aa3787, 0x867e576e, 0x6fe437c7,
- 0xdec876e2, 0xc3debcd5, 0x9b7edc75, 0x936dccf0, 0x5fce1e70, 0xeaf9cfcf,
- 0x875f5a7a, 0x4d0bb9e6, 0x9ea45bf3, 0xa970f5d5, 0xf54147c0, 0x8f4fda5a,
- 0xfaa5bbd4, 0x6fa1187c, 0x9d9f714b, 0x24c3d3f6, 0x91f5a547, 0x6e8e65f1,
- 0x43f20dbf, 0xefe23bf8, 0xc32afdb2, 0x45f48d75, 0x678a24db, 0xf21ef9c3,
- 0x3e493747, 0xf8287b8c, 0x7b221ad8, 0xe33b943c, 0xf99e703f, 0xc8f0cac4,
- 0x03d22b48, 0xb58e54f4, 0x84bf8ff3, 0xb3df47e7, 0xf010e461, 0x5fe12e4f,
- 0x5ace138f, 0xb27ee3cf, 0x8c995bde, 0xc4d98de7, 0xf207a43e, 0x3016646b,
- 0x92a38de8, 0xf93b96ef, 0xfb46e0fc, 0x966ba747, 0xf3879d56, 0xeb8d916c,
- 0x957acf47, 0x0bc78bce, 0x3e0bd618, 0xb70a77b4, 0x0cd648af, 0x73b850fc,
- 0x0ea83245, 0xcbc444b6, 0xe6738954, 0xa76f1a85, 0x5ee49770, 0xaf47686b,
- 0xdb57af47, 0xb41bdfce, 0xd5bda793, 0xa3fd885f, 0xa1ad7e71, 0xa7ac88de,
- 0xd4bdfb47, 0xa75ff393, 0xffb9e257, 0x0a65779c, 0x62f9cf76, 0x3ee320ca,
- 0x7eea9e8f, 0x2df7ece6, 0x5fc067cc, 0x98631fce, 0x86ef40cf, 0x40ff2051,
- 0xb91a1bbf, 0xea30d73d, 0x459a56a0, 0xbde51bb0, 0xcba3f84c, 0xbbfdf226,
- 0x16f7cc86, 0xc94ffca1, 0x581f8892, 0x49f5a030, 0x83cdfb24, 0x59f900fb,
- 0x787cfdfc, 0xe1b2e51f, 0xa0a72e29, 0x4fa83c2f, 0x98af56ca, 0x256be544,
- 0xb0dfd60f, 0x92ec9736, 0x51991c82, 0xb241ed7e, 0x0f1a0a93, 0x833ca226,
- 0x7b44aefc, 0xd5ac6ca0, 0x9758ea8d, 0xdfce5d4b, 0x9961e715, 0xcf0c3dcd,
- 0x5876e077, 0xf796f934, 0x72b76133, 0xe1cf2cb9, 0x26eefb72, 0x3e784715,
- 0xad724e72, 0x728cb1cb, 0xc72dd59c, 0xd04f577b, 0x870fae50, 0x99a38a24,
- 0xe5007a80, 0xe71e837c, 0xf3d8edc4, 0xd973f395, 0xbf20a599, 0x72ea5f38,
- 0xa475cbae, 0x2d2b373c, 0xee3f62b7, 0xe34ed2bb, 0xf6d57098, 0xfae3ef0f,
- 0x275ff68a, 0x3260f55c, 0x853cc7ea, 0x78e979c7, 0x5c78552d, 0xad7e8f60,
- 0x3df5a05c, 0xf445fe9f, 0xd9ab33e3, 0x8b6f125f, 0xb5eff1e7, 0xb9fdf12a,
- 0x9c87b3e4, 0x9e7ca79d, 0x4b9d5caf, 0xe5f6f53e, 0xe27ae69d, 0xf5476991,
- 0xf01dafac, 0xbe3c0618, 0x8a59f1b5, 0xc2f13e0f, 0x8748a9c1, 0xe7c01de2,
- 0x9d1b5fc8, 0x4e7a8c2c, 0x55bcd109, 0x39d320d4, 0xb384a603, 0xe51a716f,
- 0x1575c798, 0x3f12766f, 0x0d64bd15, 0x8bf7814d, 0x9c317db6, 0x76166ae2,
- 0xf05adc52, 0x072f6105, 0x1ca3865b, 0xbe3c2914, 0xfc2f522c, 0xe7edc6da,
- 0xd3b1edb6, 0x1d527c70, 0xa8a2dfbf, 0x6ad576fe, 0xc278a7d8, 0x50bb52a6,
- 0x8379f68e, 0xfe78c7c0, 0xbc67fb17, 0x2b8f4157, 0xf5c0db6b, 0x1aa35144,
- 0x8a327bc2, 0x63b4b6ea, 0xc7d171bc, 0x957ff256, 0xf3a49dd7, 0x6f361314,
- 0x794aff22, 0x8fda6ca3, 0x59df11eb, 0x18ad8727, 0x4a50d897, 0x1f0090fb,
- 0xc45eb824, 0xf8c0fe53, 0x3bcde2c3, 0x850105b7, 0xef3f2fc5, 0xac72fd42,
- 0xf10bbd79, 0x359ef50e, 0x3c47bade, 0x2223fd67, 0xc386f39e, 0x2f4b6f74,
- 0xf0c79cf1, 0xb794100f, 0x4e78e66d, 0xec87d756, 0xb667e84b, 0x47feca3f,
- 0xe9be7d97, 0x1e7835eb, 0xe3a5eda1, 0x25dfb06b, 0x0d72fb7f, 0x5db189c6,
- 0xcaf79a76, 0xe280f85f, 0xbef7f5b7, 0xc87f09b0, 0xfe29e786, 0xab13fdbd,
- 0xdf6b6b17, 0xe31d3879, 0x7cb7db06, 0xccfe8c97, 0x26af3b79, 0xadbcef7f,
- 0x94585213, 0x914f4379, 0x11e7437f, 0xb7491f7f, 0x297b0dbd, 0xd004ed9e,
- 0x5760f51d, 0x297d6e9c, 0x88f67f8f, 0xd18ddcf8, 0x88f43bcf, 0xe5b86dc7,
- 0xff512bc6, 0xf27b7037, 0xefb9719c, 0x3f2f3d03, 0x146e3a0f, 0x37f5d7f1,
- 0xf72e359c, 0xf401fe04, 0x74cdd8b2, 0xf661bafc, 0xbcc69faf, 0xc6bd3e86,
- 0x03cf86e5, 0x8a79fa7f, 0xa73e6c77, 0xa5e8e51d, 0x343c50df, 0x078a6fd2,
- 0xd3afa3e7, 0x11ca3cf1, 0xce3a73b5, 0xad3b9d3f, 0xd14fdf74, 0x9e488fce,
- 0x47beb7da, 0xce266a7e, 0xaeb3b435, 0x973f8f1f, 0x9d6b78c4, 0xd0579e3c,
- 0x3d7de301, 0xe22e7a2e, 0xeb5f397a, 0xc5bdbef1, 0x1f6d5ef9, 0xe00fee28,
- 0xb5a5c8f1, 0xb3f1107f, 0x729374dd, 0xcf075e90, 0xe3ad471a, 0x72f42dc1,
- 0xf47b1c77, 0x38aeeab8, 0x21ba08f6, 0xea9e713d, 0x3807538a, 0x49f9046d,
- 0x2e768a3e, 0x8f2d7da2, 0xa3576f7f, 0xe3d6379f, 0xf4117dda, 0x9bdbc7d6,
- 0x7b72e8bc, 0x50fc71ae, 0x4266d13c, 0xb57c4f52, 0xbf5d1f7d, 0x7f4bb4cf,
- 0xebbefad7, 0x148954bc, 0x5eeb1e79, 0x86d2cbe4, 0xeccfe483, 0xf18b7f5a,
- 0x7bff09b6, 0xc5320bdb, 0xdfa92f39, 0x523dfa4b, 0xbde1947f, 0x7bfa512d,
- 0xe7d85dbf, 0x68fe7c8d, 0x7219c97b, 0x7b6d3d40, 0x975f13b6, 0x71483c6d,
- 0xbdd66fd6, 0xe218b5ac, 0x08fe7027, 0xf6d3c619, 0xa4e1eee2, 0x30fdc5cf,
- 0x8954ed91, 0xa29bca76, 0xf1be53b7, 0xe29da9a4, 0x76e6bd60, 0x63bdbb9c,
- 0xac76ef8a, 0xb73358ef, 0xcbd58f13, 0xf6d14393, 0xd8aaec69, 0x29e6afef,
- 0x0f74a3cc, 0x7bdf938e, 0xc862bb23, 0xcce79cee, 0x112e38f9, 0xbae28d53,
- 0x388816aa, 0x537a9fb0, 0xce55de91, 0x77f618eb, 0xe0d7e231, 0x01dd51ff,
- 0x9aaaf686, 0xbf42cfea, 0xd1dea443, 0x642c2d12, 0xe73cf7a0, 0xed0f14e4,
- 0x90d3848b, 0x0f32079e, 0x67ef58ab, 0xcfabfe11, 0x0524b614, 0x5059f7fa,
- 0x7a802ef2, 0x0c5ff7d9, 0xd67b3bf0, 0x8781c3af, 0xa9bf9365, 0x0079dacc,
- 0x35ec2bd7, 0xc847ed3b, 0x7f74ecbb, 0x3b1af948, 0xd6ef778d, 0xbb239f6f,
- 0xc656d7fb, 0x556087f7, 0xb996f783, 0xb7871d79, 0x2fe6bb7e, 0xcbe35768,
- 0x1afe7ed0, 0x89eb8f28, 0xfeb4b18d, 0x8e2978e9, 0x7fcb51ee, 0xa8a9a1ed,
- 0x3963f2d7, 0x4e6fe63f, 0x7d415509, 0x6e1c49ad, 0xee8034dd, 0xc97e28ab,
- 0x7ba5f149, 0x8652beb7, 0xe6fb54f3, 0x60330c58, 0xfb09afed, 0x7ee237ff,
- 0x63d51aad, 0x642f338c, 0x2e78c78a, 0x536118a8, 0x3f23135c, 0xfa11c906,
- 0xe8e31ab1, 0xc5d8113c, 0xa84e7aa6, 0x89780f9d, 0xf8fd838f, 0x3f70aa39,
- 0xe59107d4, 0x76fdfcf4, 0xdcbd8e7e, 0xba39ef0a, 0x9d6b97cb, 0xeeae1c79,
- 0x3349f5c7, 0xe744ff95, 0xb3df9173, 0x23fe5e5e, 0x7aaedebd, 0xe265fbf8,
- 0xfe489b7e, 0x57a5d43d, 0xb4717afe, 0xf4a25bfb, 0x2fdf9e9f, 0xed05a0b1,
- 0xba762d96, 0x67ef0bb7, 0xdb0e73c0, 0x55ffbe34, 0xfdd30ae2, 0x3e843b39,
- 0xd309892e, 0x44fd3e7d, 0xc23cb03f, 0x22f2d6cc, 0x15f4bdd2, 0xb3f8c33b,
- 0x3e9cd7d2, 0xb6e977a4, 0x957f5b6f, 0xb18bf185, 0x7d2bd7e4, 0x1f117fed,
- 0xd3c35a94, 0x2dbcfee9, 0xe7f78656, 0x3b796db5, 0x4db5e51d, 0x9c27a70d,
- 0xb4af5f65, 0x57be6ade, 0x58e38c40, 0xbc42e865, 0xaf3f4177, 0x9e8bd45b,
- 0xdb8632b9, 0x9f18f6d7, 0xdde48635, 0xb1d44f7c, 0xc641c6ca, 0x9e45fbf3,
- 0x7fb17ae8, 0x9d8ffa8c, 0x38f47743, 0x263fdaf7, 0xe699ed09, 0x96407464,
- 0xfa8e3d81, 0xe4eea8bc, 0x7c1ff430, 0x6fdfa316, 0xf9c389e0, 0x73ad33e8,
- 0xcfbfea18, 0x19873bd2, 0x952399e7, 0x92a28f36, 0x8ed2875f, 0xfaf327b5,
- 0xb5378c31, 0xf7a68b4f, 0x12c4c586, 0x8e1e8afe, 0x7a8e7a8d, 0xabe70c4c,
- 0x531f6834, 0x43e5176c, 0x2d33f76f, 0x31b87a44, 0x49ebc3c6, 0xbcf8690b,
- 0x20fb6eb8, 0x950a1e23, 0x72072a6a, 0x642cf84a, 0x832cb52b, 0x2bdbd6fe,
- 0x28f0ef9c, 0xf2813f74, 0x55f8ba7f, 0x3933a6ee, 0x518e3191, 0x5c6614e0,
- 0xe7eb33f2, 0x6fd51448, 0xcc69fc43, 0x646456ef, 0xf7926a95, 0xa94f5618,
- 0x7e2ecc03, 0x43055a5f, 0xec44793d, 0x0c1fa3ef, 0x3d7c6ed3, 0x383f7a48,
- 0x328bb180, 0x4f09bef0, 0xbf80db9c, 0x3eb3706b, 0x2b69e309, 0x5317d718,
- 0x488f68dc, 0x033b00c1, 0x595554fc, 0x03df8837, 0x2f187cc2, 0xf42c2649,
- 0xf37ee5cf, 0x8083b43b, 0x843df94f, 0x1550b157, 0x86c318a0, 0x9e3aefc9,
- 0x96f3fbf1, 0xeb8ccc7d, 0x0741128f, 0x7a4ddc16, 0x7fbac1cc, 0x5aa3009d,
- 0x155b7d45, 0xdc1cb7f7, 0x59b8739f, 0x58c3ed18, 0x42c70b07, 0x1caa18ea,
- 0x2bbfca33, 0x5f03600b, 0xf39ac7ba, 0xf402e523, 0xf5bb445a, 0xf7a83f84,
- 0xbf7a88ab, 0xabf7a88a, 0xe3abd5b3, 0x2836dca9, 0x58a5ef03, 0xf02f595c,
- 0x09b3dbc6, 0x867e01bc, 0xcc7f3de0, 0xfb15e312, 0x18d784e6, 0x962b1bd1,
- 0x8e6eba07, 0xfa7e20c9, 0x4eaedc59, 0x8966978c, 0xc3d70c35, 0xbefd248b,
- 0x1877e64f, 0x1b6e63de, 0xa223e1c9, 0xcf02cd9d, 0x55bf748b, 0xb276f28f,
- 0xb7947abf, 0x263e56f7, 0x6f67797a, 0xde8d89fd, 0xea5ef89f, 0x5a8d8eaf,
- 0x4bd50fff, 0xf47daf65, 0x76efee0e, 0xa03727e9, 0x6e676cde, 0x1a54fd46,
- 0x16b7fadf, 0x4cedc27a, 0xde999a5b, 0xc0dfc831, 0x9a17316f, 0x49f2479f,
- 0xf472fe14, 0x29df7185, 0x61f3bbda, 0xe74d5e7e, 0xbe315c83, 0x9f7cb589,
- 0xdc79eefc, 0x46cb078f, 0x25dfe61f, 0xe1aeeff2, 0x0fdde3f6, 0x1b0fb4cb,
- 0xc1d5bf79, 0xe1efcdff, 0x801f07b8, 0x5f33ddbc, 0xa233850a, 0xdec5dbdd,
- 0x677a878f, 0xde44fe88, 0x9e601583, 0x68e4cb49, 0xbe7160f7, 0x45e1cfd4,
- 0x8b317fde, 0x66fdc5f9, 0x3159e7e6, 0x22dbe26e, 0x0f55a425, 0x82c79eed,
- 0xd1ef802b, 0xc7be04fe, 0x9c592ffd, 0x8d2e66c7, 0x9adfd7c7, 0xa77fd260,
- 0x70e27886, 0x8baf66be, 0x7cfe6ee7, 0x149eb5ee, 0xe54579e3, 0x1ee9ac74,
- 0x9bd52aa0, 0x3d4dfa8a, 0x80f33d7d, 0x07a76a1c, 0x1f218f31, 0x7e1256ec,
- 0x3d24f30c, 0x5084aa95, 0x3f6e2a2f, 0x37bb47cb, 0x5eb3f39e, 0xaebc4b5e,
- 0x177ccecb, 0xe6461dfe, 0x5dcfff81, 0xdff3a1e1, 0xa7fc3957, 0x5d95fe71,
- 0xa380de39, 0xb7043f27, 0xe15771f2, 0x8e7a33b8, 0x7af34aa7, 0x0ebedec9,
- 0xcbf245e6, 0x3fd86d79, 0xc8f9ead9, 0xadfbfd83, 0xd30fd0ca, 0x23de3f13,
- 0x153fc821, 0x24714fea, 0xc194dc72, 0xbc72fed8, 0xfff4146f, 0xe8bf7132,
- 0x52264aa2, 0x08b7571e, 0x34a51fef, 0x913ea447, 0x8f72a64e, 0x974a7b8a,
- 0xd5297a54, 0x56f36bf1, 0xb6983dd3, 0x36fb790b, 0xa25fa0b7, 0x402fe045,
- 0xb52d97f6, 0xed1df682, 0xde308aee, 0x60d2c727, 0xb961c1de, 0x9abf09ab,
- 0x13bf919b, 0x456c13ca, 0x66b55218, 0x7149d10a, 0xc87cb057, 0x0604754f,
- 0xe37da2c7, 0x6987df31, 0xd6711d4d, 0xf8bb31fb, 0xfbc7136d, 0xb7e71263,
- 0xa63fbc48, 0x6e307b36, 0xbb6bb1e2, 0xbc22abee, 0xe38872c3, 0x7ef93ffd,
- 0x37bc4f6e, 0xc0699b4f, 0x327b33bb, 0x0267fde1, 0x5e054bcf, 0xaa1d04ab,
- 0xc4ab3e04, 0xe255afbd, 0x812adcde, 0x3bc42adf, 0x7b888fa4, 0x3d23177b,
- 0xef119520, 0xa1d6bfc0, 0x7b432641, 0x520f0f7c, 0xb75be087, 0xb2a4fc85,
- 0xbc43efc3, 0xde39135b, 0x03a00e6d, 0xdde035f1, 0xc241f983, 0x78e0eaa7,
- 0x0e9cf286, 0xef105602, 0xa739f123, 0xb6fde007, 0x87d424d8, 0xd6784c63,
- 0x1f9fc712, 0xf3e29fde, 0xf390a25b, 0x6a25fc48, 0x5d605efc, 0x8fb37bbe,
- 0x765c44ab, 0x1197b895, 0xcd7688f1, 0xbb0fc02a, 0x3f93e398, 0x2f60221f,
- 0xfd6249a5, 0xc074055f, 0x3d8527fc, 0x74c67884, 0xb75f2c67, 0x762a5a24,
- 0x04a788ab, 0x4b0fdf7e, 0x6cb4b20c, 0xf18043c6, 0x1e641a97, 0xe1fdf584,
- 0xc8ff335a, 0x7ee2256e, 0xc893cf49, 0x56df2b6f, 0x2f7fb82d, 0x74debfcf,
- 0xe69e5bae, 0xfe10f78b, 0x35da1203, 0xb872133f, 0xfa85f902, 0x8bc3d7c7,
- 0x152c1f05, 0x223f06f7, 0x2131e7f8, 0x7ceb8fcf, 0x48e7e5e3, 0xf1e64f96,
- 0x1699898e, 0x9862bf71, 0xe309e319, 0x834cf5bd, 0xc6dad17e, 0x9fdd2943,
- 0x92b63ec9, 0x650ec6dc, 0xf8afe43e, 0xf10c1f8f, 0x7dfd70fb, 0xd78a1ed1,
- 0xeb1dbfef, 0x0e83bf89, 0x711587b4, 0xd7cca4cf, 0xef51cd93, 0xeef2ad8d,
- 0xa65fa1b1, 0xbeecfeb7, 0x73f11b69, 0x1e44e5de, 0xbfe027d6, 0x8ec03663,
- 0xc157581a, 0xc81bd62f, 0xa57a8c5e, 0xebfc49c6, 0x1be7fe80, 0x17b624f8,
- 0xc754ceff, 0x5f501bb5, 0x240680bc, 0x6a2fc130, 0xcf11d906, 0xfa0f7380,
- 0xfea956b1, 0xac2f38be, 0x8fee8f8a, 0xdf2c65fb, 0x5d657087, 0xac2bf26a,
- 0x9ef47d54, 0xef59ac7b, 0x44f314ac, 0x9cb344c2, 0x4fc2f3e8, 0x39577d45,
- 0x5fb13b02, 0x9227a7c9, 0x1cde273c, 0x52a89e7d, 0xf7de20b5, 0xcb8c3551,
- 0xe3573077, 0xfd0f73f6, 0xe49f68c3, 0x3af06054, 0x7ce983f4, 0x7d7190b6,
- 0xaedc6417, 0x04f5c7d4, 0xe715bdf2, 0x3ff13703, 0xcb0807ce, 0xdefc6a17,
- 0xe42dbbe3, 0xe4225887, 0x672151ed, 0xfd7c85cb, 0x8acbe51c, 0x297b58f7,
- 0xab9085fd, 0x90872895, 0xe0cd8f1e, 0x3fb8bcf6, 0x2ccf5f4c, 0xfba08db4,
- 0x161cab22, 0x4f94179a, 0xae0721ac, 0x768f760f, 0x7647ffa3, 0x4e03b966,
- 0xfba2cfcc, 0x2cf8a5dc, 0x37cdf237, 0x9c1759ce, 0xc8161e2d, 0x2b16f74f,
- 0xea4bf72a, 0x8957c8f7, 0xf75408fb, 0xd5fd0ccd, 0xd856264f, 0x9635de29,
- 0xe4d787c7, 0x81de1366, 0x86b19d1e, 0xb4ca5a75, 0xfbeebaeb, 0xec9feac3,
- 0xef835ee5, 0x6960de85, 0x41a17641, 0xfb88d44f, 0xc8279924, 0xc5f41886,
- 0x31268bc2, 0xd4f5eff4, 0xcde8bd13, 0x2e6f5cf5, 0x4deba292, 0xf5d78edd,
- 0xd17ea466, 0x4c17c5d3, 0x54bf9d36, 0x7a465e1d, 0x3f744867, 0xcac3b242,
- 0xd7992ff7, 0x33d19b9b, 0xec95f01f, 0xed0be030, 0xf16fdd21, 0x89e328e3,
- 0xae4793d4, 0x00d7e7d3, 0x4b343bdd, 0xfa393cf3, 0x79abf3ac, 0xfd3fc85e,
- 0x794fcd08, 0x5a5347a7, 0xd92d7350, 0x69770c23, 0x1c2ef70c, 0x0bbf7d5e,
- 0x46b87be9, 0x56af9fec, 0x74bf1843, 0x197166b8, 0x0f76d5c1, 0x2fdc66d2,
- 0x3de1741e, 0xc8d6b34d, 0x247e9fb4, 0xf14e9826, 0x399fdf9f, 0x63e21b23,
- 0x9a5dac16, 0x6b38fc8e, 0x373a52dd, 0x37c1247b, 0x59806fd8, 0x79ec8796,
- 0xbcc6fefd, 0xb43ead67, 0xdbe37bc7, 0xfdd1d5bc, 0x27f8553d, 0x779853ae,
- 0x7643ae08, 0x8674c6bb, 0xfc8e0f81, 0x82eb4ec2, 0x8ed82d2a, 0x58bad570,
- 0x2555837c, 0x280fbf44, 0x0fb571d5, 0xf0ea94e9, 0x73298b5d, 0x4f7ed024,
- 0xdbc472ef, 0x3b5f456f, 0x2d3ebbee, 0x3a0245ee, 0xbbf264dd, 0x56d77df2,
- 0xb66347e1, 0x4463f25e, 0x7c97a7be, 0x62938f17, 0x7ff9e8ee, 0x6869ffb7,
- 0x315df58f, 0xe6b4bf8e, 0xe84f92f8, 0xa11f7989, 0xcf8bc450, 0x5a1b175a,
- 0x66ce4518, 0xb3c7f389, 0xce997ec4, 0xebd7858f, 0xe3e739f6, 0x5e973e48,
- 0xfaf884b8, 0xcd2a7dcb, 0xafc0656b, 0x17a5efa8, 0x507dd346, 0xe2bb9e0b,
- 0xfc0b8c71, 0xf94cbf6a, 0xc4eda725, 0xf53fedc7, 0xce8239d2, 0x9df4a9df,
- 0x5df6af88, 0xda01e74d, 0x3ad3fdc5, 0x7c9cd29e, 0xc1c6bc3d, 0xe9de50f5,
- 0x743c919f, 0xa66857f1, 0x779e7286, 0x5e5ce7fa, 0x31d9fef0, 0xdeebec38,
- 0xe0a1771c, 0x2cd7806b, 0x2169349f, 0xc2df9bd4, 0x0d8d8ae5, 0x7b1f9196,
- 0xd1f7ac6d, 0xc6bff228, 0xb5f931e1, 0xf3965bf2, 0x4caec0f7, 0xcc99c434,
- 0xc87bdf12, 0x65efcd3f, 0x4b20ee65, 0xfa127945, 0xcc0e5bb0, 0xdbb772f7,
- 0xd3d4e3cd, 0x79c6bb17, 0xd6fd6985, 0xe8abce3d, 0x53b09e79, 0x13465bf2,
- 0x5f9e3ddc, 0x9e368e8d, 0xa58c71ee, 0xd1f1edf2, 0xb450ffdb, 0xc0595adf,
- 0x8b4fa07c, 0x79bffdc4, 0xd434fba1, 0xe3ab791f, 0xb8c32413, 0x4d8a6bf2,
- 0x35794ff1, 0x5fdf74b9, 0xed5ceafe, 0x76bc835e, 0x36cd85d1, 0x4743e5d1,
- 0xd532e880, 0x40e3dfe1, 0xf0a17cb9, 0x583d4f81, 0x11d3a72f, 0x82bc382d,
- 0xae7ddbf4, 0x3ea7e768, 0x8d53a48c, 0x1fac13a0, 0xd2740cb2, 0x7ef913e9,
- 0x07d2faeb, 0x8bfdc53e, 0x45edf8a7, 0x08b1ebbd, 0xc74465fa, 0xf383a75f,
- 0x0fd82b6b, 0xdfc2f381, 0xbbf8a665, 0xe827f15e, 0x7f47e3ad, 0x51dff60e,
- 0x944efb94, 0x1f5851cd, 0xe17387e7, 0xc17dbbad, 0x8fa2e30f, 0xf3a9c527,
- 0x04167cc3, 0x71aa3e01, 0xf1f7a3fb, 0xb682c43c, 0x974f18f3, 0xf1362e9c,
- 0x01738a43, 0xff415397, 0x211e7ba3, 0xc50d6ebe, 0xbd19ed7a, 0xd9c5278f,
- 0x185c1f0b, 0xffb34364, 0x11c1f1ef, 0x29f837cd, 0xde60479c, 0xf0bd79c2,
- 0x1b86c7f9, 0x18ea97fb, 0x79c4e697, 0xf6fcfaa9, 0xbde383b7, 0x1e1d70fb,
- 0xab7b275c, 0x8e254860, 0xbf7c60c0, 0x37f4f5c8, 0x145bdfe8, 0x68f0c4ff,
- 0xcae2f476, 0xdfa0977d, 0x99b2aab5, 0xc5d85552, 0x0ce2ed0c, 0x688f5dfe,
- 0x2b5e05f7, 0xcae2ebf4, 0xb5b7ee66, 0x70db9905, 0x323b00cf, 0x8ad8fc92,
- 0x69cf6cc3, 0x7e8dc06a, 0x83dc4d73, 0x65812aa0, 0x67e85919, 0x1a0ccc4a,
- 0x938d77f0, 0x4f2cd7ef, 0xfdc6e3dd, 0x8fd42b10, 0x9a5b33fe, 0x77e4ca72,
- 0xe5995d7b, 0xd7e93a04, 0x7ab0c744, 0x1f91249f, 0x1f8454f2, 0x1fa994f2,
- 0x7da6cd89, 0x28bc488a, 0x281dfe36, 0xb3ff0985, 0x9dfe87c0, 0x3e6ea5a4,
- 0x8fc295ce, 0xaffa04fd, 0x363b022f, 0xc1f645d6, 0x7c2c0b92, 0xe853687d,
- 0x3c6b2bbe, 0xf1c5af2f, 0xff71d871, 0x59f5c643, 0xde276098, 0xd5d32610,
- 0xf1c2128b, 0xdc2123cc, 0xe0978587, 0x5c63a37a, 0xfcf803cb, 0xd7207b79,
- 0xa6ff7809, 0x0901f36f, 0x81fc33f7, 0x35cf118b, 0x03d33072, 0x772d7fe5,
- 0x74b96266, 0x8138fac8, 0x39e017a7, 0x9f618087, 0xdcd7df94, 0xe13dcb0d,
- 0x112be60b, 0x5fdf86fb, 0xc7b7cc6c, 0x1fb02af8, 0x4acbdf91, 0xfda242fc,
- 0x074cfc41, 0x34fdff9f, 0xf2772f3f, 0x07f10575, 0x6e3ed7f2, 0x3c529d3f,
- 0x87efb871, 0xb4dd79d8, 0xbcf363ef, 0x114f686e, 0xe05ad8eb, 0x4fb0c557,
- 0xb3e2a177, 0x545d3f20, 0xfd8dcf0d, 0x2727628d, 0xf2dae838, 0xdebd76a8,
- 0x83af3c3f, 0x4f4f94f8, 0x92247b22, 0xed74762f, 0x69eff027, 0x9e33a1dd,
- 0xf1c388b2, 0xfad1ff4c, 0xfdae93ee, 0x761c45aa, 0x5efca873, 0x69fe3fbe,
- 0x99c51c6e, 0xa3b8fdea, 0xda336969, 0xf53477c5, 0xbcf8899d, 0x3fe2e0a3,
- 0xed43ba63, 0xfa8b13dc, 0xce897ee2, 0xc1f9dd97, 0xa42c6aeb, 0x475fdf5b,
- 0x2f3c1ff7, 0xeb3a71e4, 0x01fe9154, 0x7ef8d6e6, 0x47df8857, 0xae02bcc5,
- 0x715fd157, 0xdb743877, 0xf3dd000d, 0x3406e87a, 0xebefa6f7, 0x43d5037c,
- 0x893ddea0, 0xf7f494f6, 0x726fbd1a, 0x35d7bf98, 0xe346c57e, 0x8daf1ee9,
- 0x0f8fafc6, 0xeff84a7a, 0xf7b88fc2, 0xf8c7c74d, 0x77dc7c99, 0x1d75dec4,
- 0xe03acb3b, 0x3fb0db0e, 0xde3e78f3, 0x7cfc489f, 0x1fa05985, 0xf6fa745f,
- 0xe33e3f20, 0xbd45edf4, 0x4ceb6257, 0xbc920657, 0xcbcf85bc, 0xc905c0e4,
- 0x0313fe30, 0x21271702, 0x9cfb9a1e, 0xc07fbd97, 0x80fbf9ce, 0x81f7f39d,
- 0x5baf9d0c, 0xecd7c89c, 0x538d2274, 0x92c77eff, 0xbb1f87ad, 0xc4fbf81d,
- 0xbb3efcdb, 0xe373d952, 0x9697e443, 0x3e2e3199, 0xfc203d32, 0x7c5d2d0c,
- 0x433bc7c8, 0xa332f2e1, 0xc1ce9621, 0x9b2f98cc, 0x319fdbee, 0x32c3c79f,
- 0xadf879a5, 0x22c275a6, 0xb3d2d711, 0x4483f41e, 0xfe7333d6, 0x6517ba04,
- 0x471ee69b, 0x9c35917e, 0xf44e66cf, 0x608e78c9, 0xf68932cc, 0xf21f47f1,
- 0x3c00b634, 0x45ef1433, 0xe0f99d31, 0xfbc20d7f, 0xb99aca51, 0x9e45347f,
- 0x4853f993, 0xf3e1ed57, 0x1073c23d, 0xc79f0e4f, 0x69cfd861, 0xdd322b53,
- 0x38f7cfc8, 0x7207a87c, 0x827df56f, 0x22fdd574, 0x0afbbcfa, 0x171801d8,
- 0x27b77b9a, 0xb724f907, 0xd4a3eede, 0xd01894c6, 0xb30ab96b, 0x31a29a61,
- 0x0c698ec5, 0xbd9a61b3, 0x354fd850, 0xe0fd9137, 0x786bf1ca, 0xd36e79bf,
- 0xfbe35ff3, 0xb7784af9, 0xdcabe064, 0xa3bfb7a6, 0x3f3e7673, 0x565efdc5,
- 0x9fd699a7, 0xf0cdeac3, 0x38668de5, 0x47866c33, 0x65c333ee, 0x67683638,
- 0xe11ef88c, 0x86569d9e, 0xde2b2bdf, 0xf438a56c, 0xa0ae7157, 0xe93b5e78,
- 0x4dc509c5, 0x712718d9, 0x05a745fc, 0xfe3b95fd, 0x59c532fa, 0x658a6e74,
- 0xfbb86718, 0x831618d3, 0xed9bbc71, 0x18bf30eb, 0xd9fe8768, 0xb5f6cde2,
- 0xedb34f18, 0x434f9d52, 0xedb50f14, 0x4bbcfc6f, 0x086b06e7, 0x6f8e2ee3,
- 0xe445fe7f, 0xfa1be3cb, 0xaaf61d93, 0x7d60b414, 0xdd3847c1, 0x3ebd08c7,
- 0xe2f5e846, 0xfc7af33a, 0xaf6e970a, 0xfce287ba, 0xe3812637, 0x2fb2b5bb,
- 0x2577c587, 0x8c6dfb74, 0x40dd67b0, 0x7687cfcf, 0xd08879d3, 0x7ba179cf,
- 0x6bd11f35, 0xb3ea0c41, 0x0cdaa38e, 0x5c5d4bf4, 0xcea3c663, 0xcc8497df,
- 0x24ba27cf, 0xcf0ce7a1, 0xc55e3033, 0xe71524b3, 0xdfa367ef, 0x8bbd7f53,
- 0xbc2f19db, 0x20f2e9a0, 0x992a7b8b, 0xb2ae70c7, 0x79b3ef5b, 0x089e31fd,
- 0xfbb035e9, 0xbb447179, 0x2eb1fd7a, 0xda5fcf2e, 0x448279f8, 0x95638781,
- 0xe86ec8fb, 0x7d2b23b2, 0x380689be, 0x727766af, 0x776fe12e, 0xf986bdf2,
- 0xdec364aa, 0x4e30bbe4, 0x8f414eae, 0xe3b92b36, 0x498f9fb9, 0x3d72e7c4,
- 0x11f313f6, 0x6ff56d8f, 0xd81cb0b8, 0x718c23d9, 0x5735f967, 0xe41a26fb,
- 0xb9f287fe, 0x3712b74e, 0x392bcbc7, 0x9cf093cd, 0xff0d7148, 0xfd71618e,
- 0x80dcb76d, 0xcfcd5ef8, 0xcc2b67d3, 0x963c832f, 0xfaedb96c, 0xfcbcf061,
- 0xec5e5199, 0x2b71540f, 0x78de2f3e, 0x3d15ce92, 0x8efee16a, 0x0504fa48,
- 0x13fa3d9f, 0x39ea0147, 0xcb75efa8, 0x9f7f7a08, 0x239bec05, 0xdcef83e3,
- 0xb38e4505, 0xa06f0ffa, 0x9f3330f8, 0x684bdf02, 0x97ae75bf, 0xe9e8ebbc,
- 0xd19becc2, 0xa02df754, 0x8fc87978, 0x0c297eba, 0xa489eb99, 0x6fc16aef,
- 0xe4621bf0, 0x7d72c893, 0xdce904a6, 0xd07cc974, 0x43ff0693, 0x8a6aa1c9,
- 0x076c8dcf, 0xc1985823, 0xe181da0e, 0x9b972875, 0xfc169c78, 0x88b65b24,
- 0x61c41f63, 0xf915c7ba, 0xf8a21811, 0x83d13652, 0x4d99bd78, 0x37d754c6,
- 0xc3f5396d, 0x52abfcb1, 0xa2bf73cb, 0xe7249cfa, 0xdd30ee6d, 0x9c4fdb6f,
- 0x70f36f47, 0x8feeff58, 0x64d45f9e, 0xe3aeb8a7, 0xbfd23427, 0x149e300b,
- 0xb768bfcb, 0x3d68c058, 0x78cfa5be, 0xab6e538c, 0x1fa5f7e7, 0x5e33efab,
- 0x1ebccb3b, 0xf9f44fa9, 0x39fe20f6, 0xbe7afd1a, 0x83ce1726, 0x771c788b,
- 0xcc8a9f4a, 0xd274288a, 0x2ad44bdf, 0xd3fd7132, 0xbbc38f79, 0xcc14f08e,
- 0x9fefc850, 0x5c9a7e4a, 0xe83a9fe1, 0x02edbff6, 0xaca139ba, 0xec25fe0f,
- 0xdfa1d793, 0xfbd90bf3, 0xf7507b80, 0xacacfd3e, 0xa3df9d00, 0xfb40f40f,
- 0x56161533, 0xe266df80, 0x609c353c, 0xae674f9a, 0x719e2896, 0x78f372d6,
- 0xe36ebef0, 0xaac05afc, 0x5622dea9, 0x6ac1694f, 0xce217e73, 0x273c2e47,
- 0x8a76e3c7, 0xfaf9cd6a, 0x8d92c0b9, 0xf0d1ac67, 0xc7d335f6, 0xc702fbe7,
- 0xa36cdd27, 0x2ab7ddfe, 0x6977ef8e, 0x6df387cc, 0xe9272ae7, 0xa01662ff,
- 0xe629dc7e, 0x7307f2fd, 0xed20b37d, 0x37d33f98, 0xcb9e0fab, 0x2d23f3e7,
- 0xf99e4919, 0x13c57ebd, 0xbf007859, 0xbce187cf, 0x8524dc5b, 0x036c2187,
- 0x9fc11d51, 0x3826bde3, 0x3679e37e, 0x6e38fc7d, 0x3ef673e3, 0x9e116fa7,
- 0xb8f9929b, 0x0225f98d, 0xad1b251f, 0x98d17f26, 0x79d0528d, 0x6aaf9e39,
- 0x1acaf7a1, 0xf54d58ce, 0xec1d798a, 0xba076601, 0x2b58298d, 0x755660e3,
- 0x25d0f1e9, 0x1ccbced1, 0x755c7810, 0x945f5e5b, 0xbed1c7db, 0x9d1027ed,
- 0xf84a7a43, 0x06d858a3, 0xec8cc5ed, 0x7b3f2982, 0xe2938721, 0x26a6bd1e,
- 0x4a2fbe8d, 0x653361fb, 0x0e789bff, 0xd03e32b3, 0x99da0e37, 0x2ba5f169,
- 0xc1bebf24, 0xcf9d6fe4, 0x88e4f0f1, 0xa22a4b8a, 0x3a75b7ac, 0x53d5213f,
- 0x3b70409c, 0x599da79f, 0x20c06a9d, 0xb8e32317, 0xd3db8cbe, 0x3a8fe742,
- 0xfe744ab7, 0x20e929f9, 0xea7e0f9d, 0xff430f5a, 0x09d02a40, 0x1253eff5,
- 0xfcf7845b, 0x34dc3565, 0x73a40de7, 0xe3178c56, 0xc61b4a0f, 0x1a0ea5c9,
- 0x62e3b73f, 0xbee2d62b, 0x218cca54, 0x61df9023, 0x866e33dc, 0x3ce3a3e7,
- 0x075f5ed4, 0xc2ea7ba7, 0xa1dcc660, 0xce7fbed8, 0xc38f281b, 0x7cbce862,
- 0x99cae00a, 0xb432e940, 0xe65952cf, 0x5333b46e, 0xa04678a7, 0x8b957f4f,
- 0x916493b7, 0xf684dc67, 0x270e08e3, 0xd6f92c3c, 0xa0ae1311, 0xfcac5276,
- 0x1cf87a93, 0xfa1b4ded, 0x7a061c27, 0xc4c8a84f, 0xf10653f6, 0xa70b5134,
- 0xbbfb3d61, 0x75a01ee8, 0x23373cc8, 0xbbef3d7d, 0x2eb82971, 0x0b06dfdc,
- 0xfb63d075, 0x97fdf4d3, 0xfbec83b0, 0xf2f0870b, 0x97994ec2, 0x24e7cfc9,
- 0x15f6eba6, 0xfaa1d72f, 0x5ea246e2, 0xf9db8f7d, 0xe47c395f, 0xeb42bf07,
- 0xa0803ce5, 0x58e9f953, 0x0bcc3216, 0x21bed3e5, 0xb4fcb3fb, 0xef1cf4cb,
- 0x95c63509, 0xa1ec1497, 0xb2943bef, 0x509f3a66, 0x838e6f5a, 0x7a2622bf,
- 0x979f3abb, 0x3059969f, 0xf99abb6a, 0x2c3ee9f9, 0xb95a27d8, 0xd3f3f364,
- 0xfc6e1992, 0xe8539456, 0x7169cb39, 0x0a71fae1, 0x67a847ce, 0x7aa09642,
- 0xda5d3779, 0x79a62ddd, 0xe1c3dda8, 0xa8bfb10e, 0xb7f5ebe7, 0x39c4d34f,
- 0x7bcf7e3d, 0x2028255b, 0x4dd3cfc0, 0x5ddfc927, 0xf09bb4d2, 0x3745f3a3,
- 0xd059629e, 0xaae4a780, 0x6405b0e6, 0xfdb8e9fc, 0x025e874b, 0xcba3d924,
- 0x7d4b3bf3, 0xbf1f2163, 0xef90616b, 0x792eacd8, 0xcab37527, 0xf0fd0b22,
- 0x6389d2ee, 0xdf2f308d, 0x35f37efd, 0xb2b78113, 0x6bf7f286, 0x6b7bdee3,
- 0x7506c621, 0x4ce83b9d, 0xb00d3bf7, 0x0076022d, 0xbb00cc3c, 0x2313f223,
- 0xe02353f2, 0xcdbe5e34, 0xbd974e19, 0xfb8e6d8c, 0xf4051ae0, 0xf0df5b5c,
- 0x6479bc74, 0xd747d579, 0x4b8ef7e0, 0x7bf7f8b3, 0xaf249acb, 0x65a3d064,
- 0xe4871e5e, 0x3368b6f0, 0x9a1e2287, 0x507d44df, 0x71b928d2, 0xdfa8e0fd,
- 0xdf182612, 0xabde18f3, 0x92d3ebe7, 0x5ef483fa, 0xb10ff5af, 0x3a3f3c90,
- 0x772a73e4, 0x362dfdae, 0xaf3dfcc5, 0x52bbf6de, 0x50b7812d, 0xc7f7eadf,
- 0xa8f96db7, 0x20aa7bfb, 0x7d7ded7f, 0x469f497b, 0xc72f5b9c, 0xe5ba38fe,
- 0xa3e3c745, 0x7dd14e43, 0xd6187fba, 0x079a01f4, 0x2e4ef3b1, 0xff75abc0,
- 0x14787a54, 0x27785fda, 0x4f1fce2f, 0x704ba148, 0x059a7a5e, 0xef76c12e,
- 0x3768bd29, 0x80ec6a7e, 0x5e0cf2da, 0xdc4df309, 0x01ffc249, 0xfc00cb67,
- 0xc7e9dd64, 0xa4e1c2ff, 0xe609ba16, 0xafefeab6, 0x1ecdefc1, 0xc032d018,
- 0x8ff7f002, 0xe0993a5f, 0x419e5a8b, 0x04eac2f8, 0x196b0f0e, 0x03fbc320,
- 0x23267d83, 0xa3bbd6fd, 0xa109fdcb, 0xc5fef46f, 0x891df60e, 0x84aafb43,
- 0x2c783bcf, 0x757a06da, 0x4db1d17a, 0xd31f01eb, 0xf49623ff, 0x5bb6fadd,
- 0x1e13f3ae, 0x6f8c7e82, 0xa6f8114c, 0xbbf49179, 0x5f44f642, 0xba130b9c,
- 0x6f4ce9e9, 0x71bcd0f6, 0xb416c569, 0x0fa6521f, 0x55e379a5, 0xee27ef97,
- 0xe9e9dd51, 0xc4fe9e24, 0x5bfae6ed, 0x46bc50b6, 0xb24e43ca, 0xdf171eab,
- 0x23b90067, 0xba0870fe, 0x3c7cdc58, 0x73190dff, 0x80001bd1, 0x00008000,
- 0x00088b1f, 0x00000000, 0x7dddff00, 0xc594780d, 0xbbbcf0b5, 0x26effeef,
- 0xb24d9bbb, 0xf379f909, 0x71100843, 0xa9189313, 0x18884dd6, 0x220bb531,
- 0x49716b62, 0xc93049f8, 0x2d16ad46, 0x544859bd, 0x46d10882, 0xe1b80a04,
- 0xfd2b6202, 0x1a8c4582, 0xf68882e8, 0x72dfbd2b, 0xbd3fadeb, 0x8a7e1bd7,
- 0xb4564288, 0xeb6dea5c, 0x2666739d, 0x5c9377d9, 0x9f7b7aa8, 0xbe8f8be7,
- 0xcef33bce, 0xfe73399c, 0xb3339ce6, 0x9086bb1a, 0x258e4264, 0x4ec730dc,
- 0xe631df9f, 0x224c9d15, 0xf433bba4, 0xc84b499c, 0xc421127b, 0x09f04845,
- 0x0e7b6853, 0xd1eddf21, 0xb4897504, 0x734de1de, 0x44b2d116, 0xc345bd7c,
- 0xd57fbde5, 0xd2b3e5de, 0xd32d3172, 0x9912abe7, 0xbd33f58b, 0xfe5a0e69,
- 0xa7aefe02, 0x9f561ee5, 0x58ad25ae, 0x7fbec39f, 0xe5dc84aa, 0x76d4cfa3,
- 0x7d296e3a, 0xb5b89dae, 0x916b8e9d, 0x0bca1789, 0x5d6c16e7, 0x736a614e,
- 0x897842cc, 0x51269bd7, 0xe6364ef8, 0x76d1566a, 0xc8afa7bf, 0x515c8435,
- 0x97b0cde0, 0xff40f9d1, 0x44d21c74, 0x3c369527, 0x37981cfe, 0x7ad7afad,
- 0xcf3a64b3, 0x39fe1ddb, 0xe35ebed0, 0xd0b4429d, 0x77c0b789, 0x8823b405,
- 0x3b76399f, 0x40f227b6, 0xffffbc19, 0xb8954f08, 0x4f115fee, 0x7534ef77,
- 0xf8242c9d, 0xb7fd05f7, 0x2aa1d7b9, 0xbad2fa07, 0xcb871a4e, 0xd0ffc377,
- 0x24ad4871, 0x2c3a1493, 0x8d6e22ab, 0xbfe836ff, 0x7a07e979, 0x513781a2,
- 0xf02ff43d, 0xeb0a4ebe, 0x82fce952, 0x76cf24fb, 0xb2cfdec2, 0xe8959211,
- 0x21e613bf, 0xc3f79ee0, 0xd17f34e6, 0x3f2c7cf0, 0x76e6a978, 0xe4b79c5a,
- 0xf4edaecc, 0x590f79fb, 0xdb865108, 0x9d711ed3, 0xb8cf7fdf, 0x9f495c9a,
- 0x86b0defa, 0x19fdebe2, 0xb69cb3c4, 0x1a435f7b, 0x73efff00, 0x743f95bc,
- 0x853211f7, 0xb5d95f90, 0xd0499df8, 0xbddbe483, 0x53b7e288, 0xd0234122,
- 0xd23d4cc3, 0xd28860c3, 0xfeb095c3, 0xfbe87d74, 0x872707ee, 0x067d7482,
- 0x619100c9, 0xbec4153e, 0xbfa9895d, 0x29eb00a3, 0xb44d049d, 0x0e68da3e,
- 0x5e80956d, 0x50a5a028, 0x5feb093f, 0x07fad895, 0x38e230ef, 0x1d1933dd,
- 0x433a3776, 0xb6001cc1, 0xc6de008b, 0x7694a3a2, 0x64487482, 0xb69994ef,
- 0xe36c7c61, 0x3a52c172, 0xb4dbe2f0, 0xe4dab23f, 0xb01f4f19, 0xbb943284,
- 0x4a3c7152, 0xbe91ab0f, 0xfd470664, 0xab42d38f, 0x394f5c70, 0x36a3cbac,
- 0x1f787cef, 0x014591fd, 0xc89937f8, 0x7d09634a, 0xe8c3a44a, 0xe1c0eba4,
- 0x134f5d21, 0x60bb83a0, 0x7f878a00, 0x536f386f, 0xd89ffbe8, 0x08038425,
- 0x484e58cd, 0x75f5611d, 0xa4c72ccb, 0x86d4f029, 0xddf4090d, 0x03bc1bca,
- 0xd972be82, 0xf3fb48d3, 0xeb22ba73, 0x83c036a3, 0x804bbe1f, 0x29b73ffe,
- 0xefd32856, 0xd3f2c0a7, 0xbf870bef, 0x7df039ff, 0xe0c7c019, 0x65166d27,
- 0x1f8c34bb, 0x80b9fcf1, 0x6f69ebaf, 0x9c6278ec, 0xd98f7ef8, 0xf007ffbd,
- 0x4e1ef145, 0xb45e0174, 0xf0f1aeb8, 0xc7d1f4ba, 0x5e7eb44d, 0x82b1d69b,
- 0xd4bfd3e2, 0xf015f386, 0xbd1a95de, 0xec8de48e, 0xa5a594ff, 0x2ff9865c,
- 0x964f7465, 0x62e22819, 0x2e1fa2f1, 0x485dd253, 0xd23587a2, 0xf0e5ef28,
- 0x517f8001, 0xf8f7cc77, 0x621abfdf, 0x21be09db, 0x3f07148a, 0x92452ba7,
- 0x12fcdd61, 0x3ebd375b, 0x56e40f3a, 0x9f02dc3c, 0x8de5c6ff, 0x7c788ba0,
- 0xbbe01bff, 0x5dbe246c, 0xe84c81fa, 0x80d4bfd7, 0x9fef8a78, 0xf77e1090,
- 0x4d0489b4, 0x4bd6ee94, 0xfaebd212, 0x90212d07, 0xe8a6839b, 0x6067c00e,
- 0xdeb88dff, 0x0acd1ae7, 0xea364da1, 0xf705bf19, 0x171f18db, 0xdc80140c,
- 0xece7473b, 0xee73eba5, 0xa09eda37, 0xa4208024, 0xadda37e5, 0x5ee7ff40,
- 0xb3f589be, 0x5a10a2dc, 0xe986000e, 0x7fe081b8, 0x578a11b6, 0xc7119669,
- 0x0a57b13f, 0x8fbea019, 0x5e4639e2, 0x36ffa39e, 0x8dcb0c94, 0x3800a841,
- 0x07082cfa, 0x8e23699d, 0x59335df7, 0xd16fc745, 0xad0d5e48, 0x2932596f,
- 0x31e0ced4, 0x16d5cac6, 0x4a90e90d, 0x45bff986, 0xdee0bc73, 0xa7275622,
- 0x60ac7970, 0x851243bb, 0x3ab8c9b3, 0x05fa05a2, 0x4bf5a03e, 0x78673ea0,
- 0x7f565a1d, 0x479817f4, 0x7d351ecb, 0x9eaf3d34, 0xba5892c7, 0x17a619ca,
- 0x17c3294b, 0xf1a126a9, 0x2f1b5e14, 0x21226a5b, 0xe963e02d, 0x2ae27234,
- 0xc0e0f4da, 0x5b23a074, 0xa3a92f69, 0xe269c0cf, 0xa7b8510d, 0x36bcf7f6,
- 0xd477b68e, 0xba613244, 0xcdbfa581, 0xf0d5793b, 0xaf380b3a, 0x4b427fe9,
- 0x7e1e38ac, 0xeef14147, 0x89bb62b6, 0x7d88aeb3, 0x92efc0ae, 0x0f2f4c35,
- 0x1fe02bc0, 0x75cde999, 0xfd526f5c, 0x617af28a, 0xafd404d7, 0xce40ffa0,
- 0xf4c0953d, 0x903dc831, 0xaa8e6d33, 0x9e741cab, 0x14d56766, 0xc71b1947,
- 0x27e98367, 0xa788fea1, 0x243d78eb, 0xb5e6a5da, 0xc75ab716, 0x0cf9476e,
- 0x90dd16f1, 0x2c88e4c8, 0xafd17961, 0x10ab3d75, 0xe6e9193e, 0x2839cdde,
- 0x97481b97, 0x89bce81e, 0xc740f484, 0x98248c8b, 0x1162e940, 0x9f48966d,
- 0x881bd78b, 0x8d7eb312, 0x89b97521, 0x49bb85cb, 0x7bbfbf04, 0x357d5c67,
- 0x7d68db09, 0x2d8491b3, 0xd755ecf0, 0x8e7d50b7, 0x85f7d67c, 0xe93df621,
- 0x8bbdf366, 0x3aa3bbaf, 0x79d3f5a4, 0xdd51306f, 0x5d0684ea, 0x97533eb8,
- 0x267ae0f5, 0x1e737896, 0x2bf2bd06, 0xa52b679d, 0xf5f03fdb, 0x6639f812,
- 0xbc00aaaf, 0x9c1ab59b, 0x0361f47f, 0xd524e7e2, 0x6bfeb0e3, 0x59aee41d,
- 0x8dae79f4, 0x8075f378, 0x2cd67b26, 0xaf3c48db, 0x00d4c57a, 0xd0e0d374,
- 0xd32ae8a8, 0xf972c3a1, 0xe5ff8e0a, 0x1cd6d096, 0xa014f744, 0xa15cf2a7,
- 0x9994c957, 0x474c7cb4, 0x6e5a3bed, 0x43f56399, 0x4f60037f, 0x0efdf2d0,
- 0x77eecf26, 0x453d71e8, 0xd057cc59, 0xfb071d0d, 0x9ec55f33, 0x43658e02,
- 0x74609fed, 0xbaf0f5c3, 0xea0f7346, 0xd5fa21e1, 0x22dfa410, 0xfaf01e9f,
- 0xf32b970a, 0xf8a16ec1, 0x1412857e, 0x2f608fc8, 0x3e295fb3, 0xe5a52de6,
- 0x79174e57, 0x67402956, 0xf2bcde4c, 0x4159331d, 0xccf37c87, 0x27fb1f4f,
- 0xb4fe7f5a, 0xfad0315e, 0x746b4005, 0x88915efd, 0xbe0bc617, 0xbe810868,
- 0x7b33d26c, 0x15ff69b4, 0xfbd19ecc, 0x71842c37, 0xc079d05e, 0x084a69de,
- 0xeb03b73d, 0xa2943cd3, 0x863aabc9, 0x16cbe0cf, 0xf439bdf6, 0xa0bb9fb3,
- 0xa413d53e, 0xff26a3ed, 0x74d75846, 0x7a889283, 0xc25ad7f9, 0x381709c6,
- 0x878f5e28, 0x9c7ddd98, 0x4e304956, 0xa1fb0dbf, 0x90b69a7c, 0x33a273f6,
- 0xe543e715, 0x2759da2f, 0x82b618d6, 0x34375dfc, 0x8fed84ae, 0xd3d37ceb,
- 0x8bf8f968, 0xb4e59ec5, 0x0fa7d06a, 0x073d29eb, 0xafbb32d6, 0x016ca35e,
- 0x16fd90fc, 0x8f58879c, 0xb5c59ac0, 0xb2581f50, 0x8f9016ec, 0xc839f163,
- 0x3723127b, 0x166891cf, 0x86c6d3f0, 0xe830dedc, 0x1e89fe95, 0x4dc4af54,
- 0x8dd29f17, 0xa93db59d, 0xee8df863, 0x5f3d21d3, 0x1740ff6e, 0x43d33972,
- 0xca804e30, 0x0ff82265, 0x594c72e5, 0xad995a3b, 0x5495e063, 0xeba9df6e,
- 0x47fc1253, 0x9e5a5d60, 0x97f78ffc, 0x145e302a, 0x4ae922e5, 0xa93cbe46,
- 0xba03cef3, 0xf5875475, 0xfd7a3175, 0x99d983a4, 0x076e06f5, 0x963eb092,
- 0x797d450f, 0xc5ee9a95, 0x7fa704f3, 0xf7c9845b, 0xce1af591, 0x401ab71f,
- 0x20654d8f, 0x23d06c93, 0xc15fe856, 0x0e9ea7fe, 0x3969ebeb, 0xbf58597b,
- 0x4f813f88, 0x46c3be28, 0x1b93ef3a, 0x29bf8c6c, 0x459fa01a, 0xe5f50415,
- 0x63b52d22, 0xd2bde04b, 0xe5d74037, 0xa40e8bd4, 0x8a67f22f, 0xf9ef8a15,
- 0x5033b784, 0xb1ca97bb, 0x30a43a97, 0xafe60bec, 0xe5356c37, 0xb57b5f00,
- 0xcdcf5836, 0xf9b1ca12, 0x1b05951d, 0x9ec97968, 0x13fd702b, 0x2e5d182a,
- 0x2f503909, 0xb1f2e54e, 0xa3d210de, 0x8933fe1d, 0xf6883ec0, 0x1374f68f,
- 0x64ad28fd, 0xae401e24, 0x1421e8ab, 0xd1f6a653, 0x57265ed4, 0x24e79509,
- 0xf2126ec6, 0x8938e41e, 0xf4d503b3, 0x88fa1411, 0xa4a23dc9, 0x7213dc82,
- 0x97dd98f9, 0x3e3b44e8, 0x97d6153f, 0x9b9327ae, 0x6bc425bb, 0x7d456933,
- 0xd0c0f422, 0x9c8f5cb8, 0xbe9906d2, 0xcf813c32, 0xae0e677c, 0x8bd212eb,
- 0x95e844fa, 0xdf20e8b1, 0x88fbe1a9, 0xbc60e9d1, 0x9afec153, 0x75f0934e,
- 0x65a6bc74, 0x853cdc24, 0x50536d3d, 0x693d323f, 0x9e127a64, 0x97d0cbe6,
- 0x5e31faf1, 0xc1ebc61f, 0x77d33d54, 0x3d859d62, 0xd98d3a93, 0x85975301,
- 0xfc08a4b4, 0x94eade35, 0x26bb61e4, 0xa8d18ef0, 0x1f65095c, 0x9ef905c9,
- 0x2a62f950, 0xc4c80fad, 0xa1657c0b, 0xefa1e978, 0xb7fb7337, 0xd7cd9527,
- 0xabf467ad, 0xd8a47d93, 0xc112eb0a, 0x99346f7d, 0x051e81d8, 0xe8db373e,
- 0x1df02577, 0xefa1b7e3, 0x1cc3a48d, 0x2bd57df3, 0x173fd426, 0x0c85b65e,
- 0xb3f88f68, 0x94bfb41d, 0x4ed01bdf, 0x0d8af73d, 0xae39e9f5, 0xefc25d0f,
- 0x7e611e40, 0x41aebe16, 0x008e3552, 0xc6334bed, 0xf6140881, 0xd983b359,
- 0x21ed2359, 0x99139f5e, 0x80cae8c3, 0x8e0bcdfb, 0x4ca00781, 0x7fa021e1,
- 0x7e32716e, 0x5699ec0f, 0x3efa43fc, 0x187ab3e0, 0x40c5fdf6, 0xf7ed06af,
- 0x7d2918e7, 0x8b2ed74d, 0xd1e7483e, 0x83b5699c, 0xfeceab7e, 0x41dddfd7,
- 0xd1ee1fcb, 0xf3ac0311, 0x497369f6, 0xb7f2d8ee, 0x3e3ba431, 0x772fc310,
- 0x9b9754ef, 0x40e5d57b, 0xbf7cba9f, 0x653ae6d3, 0xf9e1d941, 0xc1b5d282,
- 0x87ba7ad0, 0xd5786bc2, 0x8668fa80, 0x1390ffd3, 0x7a26ade4, 0xc86cf018,
- 0xf80a78fe, 0x9ffd023b, 0x5034f048, 0x31148a36, 0x5f5f03fc, 0xb6cfcd30,
- 0x61b7828f, 0x06a311fc, 0x75ad4cf1, 0x173944f6, 0xd2e27ce0, 0x403c3f7b,
- 0x9668bfe7, 0xdf02bed9, 0xcacb6b78, 0x18449ec1, 0x4dfd6049, 0x8bbec21f,
- 0xf5846bb6, 0xb693353f, 0xac3570a3, 0x89b0fa67, 0x6f801244, 0x69dda85b,
- 0x1bfc4ba4, 0x77776fce, 0xf4c3cb44, 0x359dbb7f, 0xf94e0113, 0xe80fb22f,
- 0x37e851e3, 0xeade4ec6, 0x4bfc7264, 0x463299fb, 0x02b699f8, 0x038d9afe,
- 0xfe3a4afa, 0x0cf97ff5, 0xa5e2fde5, 0x823ee1af, 0xb33e63ce, 0xc80a4dab,
- 0x1e0fc5a7, 0xdf62f7c0, 0x8ad32a76, 0x36d7b6fb, 0xa1d95818, 0xcda9f2c7,
- 0xb8b95f6c, 0x3cc10a57, 0x931759a5, 0x6c2f412a, 0x640dd9d6, 0xf1e35e24,
- 0xb7a6c1f8, 0xf5efc013, 0xf61d2201, 0xc7b3127b, 0xb809adec, 0x135a507f,
- 0xac0cbec0, 0x9043f1bf, 0x6b378b8f, 0x902f603d, 0xcff4367d, 0xc37cde2c,
- 0xe85685c4, 0x4aa4d3e7, 0xb96d13f0, 0xd0543c01, 0x7e6217ce, 0xf4f5c89e,
- 0x6ae5bcbd, 0xd0f1f805, 0x62ff0666, 0xd0077187, 0xd17ff5d1, 0x1ac97f22,
- 0xb93b07e2, 0x089def5b, 0xda6cad7c, 0xe7d61d3e, 0x1ae99983, 0x224bbf6c,
- 0x638bc076, 0x5fbc0a69, 0xe03ec92c, 0x8df586e3, 0x4f76b1b5, 0xc7f9939d,
- 0xa597b32a, 0xaf91580c, 0x6d3e80e6, 0x08f94cde, 0xdef59fc6, 0x0d70eeef,
- 0x6b3495f3, 0xa1532dfd, 0x1e7567ff, 0x7b21bbe0, 0x90b7d366, 0x4c2fe0be,
- 0xe398b5f1, 0x99f2abeb, 0xa4f822c1, 0xeae400b5, 0x05ad15e2, 0x8cec51f6,
- 0x44d93e21, 0x213272f9, 0xf9129da7, 0x1993fc02, 0x63bed54e, 0xb59a7dac,
- 0xc67a8350, 0xedde21e8, 0xb74a99f4, 0xb71fb0c9, 0x6f58c925, 0x7d23d24b,
- 0x77ba7fcb, 0xbce86fe7, 0x2ffa749e, 0x26ccbe80, 0x6bd062de, 0x455ed44a,
- 0x35405acd, 0x136461da, 0xcc89afb3, 0x92eb85fc, 0x31558ec9, 0x125373fb,
- 0x39504fdb, 0x73f405f1, 0x1f3fddee, 0x64b6fc06, 0xec053c7d, 0xcfc5c085,
- 0xb35fe0f4, 0xdf284bf6, 0x011f1ead, 0xcff409ba, 0x868a0b24, 0xc3c072e5,
- 0xbcfc46f4, 0x98e924e6, 0xb145751c, 0x82974a9f, 0x41076ee5, 0xd4b8da7a,
- 0x46fef68c, 0x4004c857, 0x2b7cadff, 0xee46a7ec, 0xfb6e340f, 0x740b5785,
- 0xe3ec921e, 0xfe30aca1, 0x17986c18, 0x3e71d2d2, 0xc41796dc, 0x8f4a9ef2,
- 0xb759d696, 0x5ccae3fd, 0xce9f53c0, 0x9eaded03, 0xdf980481, 0x244edb87,
- 0xb61afcc0, 0x0dde7169, 0x16524fa1, 0x09cb0d16, 0x905fc69d, 0x5d827604,
- 0x42f8c2b2, 0xedb87c5e, 0x560594d3, 0x7d403fe6, 0x5e3a3a5a, 0xc9cacecc,
- 0xdff5fdf0, 0x6672eb64, 0x72042197, 0xf9898cf0, 0x33bb45f7, 0xaeffa636,
- 0xcdfe124b, 0x3fd02cde, 0xa2796543, 0xf7c4e406, 0x2efe6ced, 0xf0166f7d,
- 0xba9247e5, 0xb52b259f, 0x52e54424, 0x84894ae3, 0xf33fcca8, 0x20594bdc,
- 0xfedc3fff, 0x0c5d5652, 0x89dff17c, 0x6a498de7, 0xaff09a7f, 0x0ce1f4ba,
- 0x63ceaf18, 0x31cc7e60, 0x3da2abfc, 0xa4fccf59, 0x816b9483, 0x8377c50e,
- 0xd82f660d, 0x18c483bb, 0x5d5b9702, 0x7ad7f73f, 0xd82ef9bd, 0xe3ef88d7,
- 0xf40d5ffa, 0xfa92e144, 0x5827f424, 0x9f28a28a, 0x77fcb4bf, 0x918cf5d1,
- 0x8d9d74ff, 0xc6cd4eb0, 0x0471e1e8, 0x780f43e9, 0x6558b7d3, 0xed2957d0,
- 0xf7a2be8c, 0xe59f706b, 0x814c2732, 0xdd3e80b8, 0x76f9056d, 0x43b9817d,
- 0xb3ef0893, 0x333ed042, 0x0bbf10bd, 0x0ffa3156, 0xc3f410a6, 0x095691a5,
- 0xe676b4fd, 0x20a8cfe7, 0xf8b455f6, 0xe76624b3, 0xbd6789b8, 0x5fd70f36,
- 0xc70738c2, 0x400fd08b, 0xcf2f2047, 0x23a44648, 0x7b425fa9, 0x9de9ab54,
- 0x458efd07, 0xbac52b57, 0x2a1aba72, 0xeae89dff, 0x23c74149, 0x20afcae8,
- 0xa38db95d, 0x23f715d3, 0x127b765f, 0x36bec1f4, 0x2d6be395, 0xfbd13f97,
- 0x2eafc28d, 0x796b7cc1, 0x6b46b57f, 0xc47a0b58, 0xd638fba3, 0xda89bc3f,
- 0x4a7fb0c5, 0x3e9ebb03, 0x57cfb5c7, 0x6448dd70, 0x9608fc00, 0x2fa88dab,
- 0x7244d31f, 0x9d498ec0, 0xe81bbad4, 0x515e7523, 0x0bee3b49, 0x17a01c33,
- 0x7fa2d740, 0x1f34e974, 0x1c5823b3, 0x8f533a28, 0x07969cfb, 0x8ecc7d2b,
- 0x097e41a8, 0xa02936ac, 0xe7f5a707, 0x08fa072c, 0x031e232c, 0x86cf71d2,
- 0x9dcfe045, 0xac80f56d, 0xb225f113, 0x9d77cd52, 0x05538b12, 0x0f18226f,
- 0x0d338e1b, 0xe821f96c, 0xf422c6ff, 0x8ff90c9b, 0x8dea09f3, 0xd6438dcb,
- 0xde2136ad, 0xd068fc45, 0x25741146, 0x3fb191fa, 0x7a646892, 0x407ca468,
- 0x054a73ff, 0xa1eb7f11, 0x3cca2e9c, 0x5ff823de, 0x456ca3c4, 0xc8b01cbe,
- 0xbf399c2b, 0x049c4332, 0xb36c77fc, 0x00fd8416, 0x19598dfa, 0x694fcadd,
- 0x50e92028, 0xaaab9ffb, 0xbcca6233, 0xc1f7d0fd, 0x5f573755, 0xa877fa8b,
- 0x7aa6c01e, 0x26bd9459, 0xc355e205, 0x83b532f5, 0xff127d8d, 0x1be6e2be,
- 0x941eaaa8, 0xee7ff8c4, 0xd11f82f4, 0xc5e35444, 0xf5c727c2, 0x5bfda3af,
- 0x383ede15, 0xefe826ee, 0x7e5112a9, 0xe14bd3a0, 0xf753ab5b, 0xdfe52887,
- 0xf78c4143, 0xaf0e537f, 0x6c319d5a, 0xe17b501f, 0xc36549cf, 0xefa3c276,
- 0x495d76d5, 0x3fd8b2c7, 0x15fe83d5, 0x92e03efa, 0xe7890ed0, 0xf49704d7,
- 0x656becd5, 0xe09d7d84, 0x74f5f662, 0x2fba4960, 0x581df941, 0xdf6023e6,
- 0xe9c4bb52, 0xe3e4bb42, 0xfd680753, 0x1f9f59b9, 0xbb40a71e, 0xbec1e67b,
- 0x4651702a, 0xa9d81bf9, 0x6f94490d, 0xaeccfd8c, 0xe31cfaa6, 0x967e8205,
- 0x38fd39d8, 0x97fc0482, 0x32a4fbdd, 0x5a4277a0, 0x6ba36eb3, 0x379703f9,
- 0xfbe1c713, 0x9779f8cd, 0x205f98bb, 0xa1b55850, 0x26dffa00, 0x5617b011,
- 0x594f1e15, 0x63a9fb80, 0xbe630b29, 0x0a7bec6b, 0x53b8d9f1, 0xcb1e2a37,
- 0x7804e1a9, 0x45f9796c, 0x338dc82f, 0x42650921, 0x04ea1c83, 0x41a1b6bb,
- 0x29211603, 0x03bfcd0d, 0xf5731fe3, 0x5f9d3c64, 0x8177d706, 0xa706b79d,
- 0xbfe9bcc2, 0xd1b57d12, 0xc4e508b7, 0x2b46b9c6, 0xeb0a91c6, 0xd83c41ee,
- 0xc3c05ecd, 0x34aac2aa, 0xe665a718, 0x4dc63b74, 0xf5073da8, 0x077e0f2d,
- 0xae0245fd, 0x1aba7d55, 0xa9ca77b0, 0x0a0bf75d, 0x4673a677, 0xf5f2878d,
- 0xe2eeed38, 0x5710acfb, 0xff8e5d1f, 0x174664bf, 0xf82f921d, 0x8ff452ed,
- 0x677f5892, 0x1fb31f76, 0x55e9716f, 0x5c5bf1fe, 0x36bdaecc, 0xe4069918,
- 0xb01e5fb2, 0x201d5d80, 0x5f604fde, 0x65567c4d, 0x9313ae3b, 0x0536ae5f,
- 0xd74666fd, 0x425763c0, 0xee32b5fd, 0xf03c8867, 0x5cf71863, 0x8ab3cba7,
- 0xf2803e70, 0x90214583, 0xfe5cfbc7, 0x96fac2ef, 0xdcf57b73, 0x831637cb,
- 0x5fd8517f, 0x5099cf9d, 0x94da766f, 0x4e406b27, 0x796649fe, 0x6468c603,
- 0xc967ed1a, 0xb71c4ee7, 0x84ea14d3, 0x68f60ff5, 0xeac9beb1, 0xdbf4045f,
- 0x29faeb78, 0x0d608798, 0xd51e8015, 0xd008bab9, 0xa0bedd31, 0xf2e8c51f,
- 0x4f238811, 0x2597ce0b, 0x767117d0, 0x63f034cd, 0x85c5bee1, 0xce2adc7e,
- 0x1529e31f, 0x2b22329c, 0x34917c74, 0xf9bce76c, 0xbc32d9e7, 0xc68ff614,
- 0xd5eefcc8, 0xec04cd73, 0xf448f25c, 0xf8fc06b0, 0x7b0108ae, 0xc257b9b8,
- 0x5f0b9a71, 0xe5fec3d0, 0xdfc65eee, 0xa0dfbe01, 0x78c2cbf8, 0xedc2cb94,
- 0x67460e81, 0x1c787224, 0xb679f9e0, 0x76d0849e, 0x42577e31, 0xc0793396,
- 0xf8f3d3fb, 0x37f73343, 0xf5884d51, 0xcec25cab, 0x5b8ea158, 0x49d771f0,
- 0xf016af11, 0xfca92c5a, 0xc3d9e2e4, 0x183c4fbf, 0x1bfc0e9d, 0x7e05f4a5,
- 0xccd20da7, 0xf3a75e7b, 0xc93650db, 0x9b98a603, 0x04b69392, 0xa45253de,
- 0xfbed2f78, 0x0dd03a64, 0xafc821a9, 0xda957d2d, 0xeb8b2b66, 0xa58c5e60,
- 0x53ea07b5, 0xfd442aef, 0x5993710c, 0x1cb3fdf4, 0xe65669af, 0x08af73c7,
- 0x341c40e6, 0x9eefa0f1, 0x2bfc61d7, 0x4d87e8cc, 0x73636ba5, 0xf82573e2,
- 0xf8a4dcbd, 0x7767e800, 0xf00252ad, 0xc41f7888, 0x0d16670b, 0x3d38e3e7,
- 0x7d2b7792, 0x983976ee, 0x6a40cedf, 0x1f785e00, 0x1fc70eb2, 0xe3079fc1,
- 0x9e0f7c42, 0x32c2d2e7, 0xc0c9fffa, 0xefb8ed96, 0x483e6037, 0xef1bdf6d,
- 0x35adfe80, 0xc78c5e92, 0xd52dd9c6, 0x02beb789, 0xf6783ff4, 0x018796d4,
- 0x7af6dbae, 0xce25ef30, 0x9bfeb91e, 0xb33f3ecc, 0x3f00a2dd, 0x9b72e56c,
- 0x27c88fda, 0x3f6c5dc1, 0x1e476f19, 0x27f7a975, 0xddafe543, 0x05f0648e,
- 0x0b3cb3b7, 0x7c03df21, 0x70d837ff, 0xd7f4013e, 0x13c10dbf, 0x8db20f97,
- 0x8ff483e7, 0xe6f20f9e, 0xe8104b0e, 0x09af7ce1, 0x8f3292fd, 0x45b59d7b,
- 0x7524001f, 0x01cf0e38, 0x72581b79, 0xca1ae7e6, 0x8b0f727f, 0x8f12c923,
- 0x4b4afd33, 0x6bcc5c58, 0x808bff07, 0x774c765b, 0x3e408b7b, 0x713779b2,
- 0xb39351bf, 0x2ef16140, 0x94d43796, 0x7b002e10, 0x87ae55ea, 0x5e2ccc9a,
- 0x87d0e835, 0x26a35d61, 0x185d1fff, 0x93df62d7, 0xd8941a5f, 0x884f56b8,
- 0x0a2775d9, 0x9d3d6135, 0x07d80f67, 0x59ab6eb0, 0x9f286b7c, 0xc7a1bf60,
- 0x5890e2c0, 0x41ccec1e, 0xe22f5939, 0x3f99fb56, 0xa7c79eae, 0xf45acc4e,
- 0x745ca40c, 0xb035ad48, 0xaab0bb3f, 0x0fe8fd12, 0x9f13393c, 0x7ed52bf5,
- 0xa7e045ff, 0x238e1bcf, 0x1d7bff0b, 0xbd99e8f1, 0xd447ec3c, 0x5945fb50,
- 0xbe815729, 0x15f621cc, 0x2095ff20, 0xe9abad5d, 0xc13e83b3, 0x12349768,
- 0xa77239d8, 0xfbeb0ccb, 0xf5068768, 0xe343b054, 0xcf027685, 0x8fccc939,
- 0xf3324d74, 0x0aa5d06b, 0xd9e238c1, 0x0771e3a1, 0xa272dfde, 0x2353c309,
- 0x76a3e7b1, 0x7d3466b9, 0xd0ebfa2d, 0x4fc11ab5, 0xa0d4cd17, 0xde82fbdf,
- 0xc6fd173b, 0x790202ce, 0xd6b61d54, 0x1eac3595, 0x2982e779, 0xebfac3ea,
- 0x12486664, 0x33f209c5, 0xcbe79935, 0x616de1c5, 0x1f1cba97, 0xa90c698f,
- 0x3fc4f5cb, 0xc58f2d21, 0xa7df620f, 0x75f93326, 0xc70e5561, 0xe3f307b7,
- 0x6797fcc4, 0x3269bc30, 0xb33733d9, 0x41d02e6a, 0x7397c42e, 0xabfcc9e0,
- 0x7f082674, 0x092e75ee, 0xde0e23e9, 0x80da300e, 0x3c593abe, 0x3f856ff4,
- 0xe4caee1e, 0xbc1fae14, 0xd65f886e, 0x8f18f5db, 0x493f5cbe, 0x0e5ab25d,
- 0xfebf950d, 0x27db2cfd, 0x9e796a75, 0x50e51d8d, 0xe5cd9d9d, 0x11d9e484,
- 0x6a56f786, 0x3a3cc02f, 0x3f6025b5, 0x8ad5bb4c, 0x6fd968f3, 0xf3703f42,
- 0xd02c81b2, 0x03552de3, 0x89b71005, 0x2b402fc7, 0xdb45f90b, 0xfee8b9d5,
- 0xd884ec03, 0x4b5f3c76, 0xfef5671d, 0xf60cb920, 0xbf762739, 0xb76afa01,
- 0xfd15f509, 0xf00603bf, 0x839cbcb3, 0x3687ccf6, 0x0bb41b7f, 0x9e5bc398,
- 0x85bb01cd, 0xfdcd4dd9, 0x0bb01e86, 0xe2623aeb, 0xd59ff07c, 0x591fb8ea,
- 0x3bff44e9, 0x6fbf56e9, 0xddf714d8, 0x17603888, 0xbd3af3ae, 0x3bf0227f,
- 0x7f983bd5, 0x8351b670, 0x5af50379, 0xb79022cf, 0x7b6a4d67, 0xad8dacfc,
- 0x75a196d7, 0xb5bda0f6, 0xf675cc65, 0xd73ac014, 0xb63f886b, 0x6758669f,
- 0x7c4dbeba, 0x78becf9d, 0xf3ac0175, 0x2eafbbc7, 0xa75e7580, 0xdf02f2eb,
- 0x5bfc39b4, 0x27bf6993, 0x3da1f06f, 0x2f58f225, 0x24f71e97, 0x432bfdab,
- 0xff21ffe5, 0x30fa58ca, 0x5a87043c, 0x4af4ba1f, 0x3a83c806, 0xd5bfe1a3,
- 0xaa37f08b, 0x4068cf1f, 0x3ffec77f, 0x0766ba7f, 0x2d7e81c8, 0xbfa223da,
- 0x0f3fb2fd, 0xeffda1ec, 0x69413db8, 0xb91bfeec, 0x246dd85f, 0x88abf041,
- 0x27b0807d, 0x5bf1e5e3, 0x251f3e7c, 0xbb006f7b, 0x8dfacbe6, 0x633bf81b,
- 0xe76653e8, 0xcc3c936e, 0x7e8bdc6f, 0xf37d96e4, 0x3e27e0ed, 0xe37e621d,
- 0xd18b1796, 0x7a18dc6f, 0x65790c2d, 0xc3b25fa4, 0xdb71a3fe, 0x07c804b1,
- 0xba5cfb10, 0x8c933daf, 0xefdea8f4, 0x489d0e9e, 0x0a01fc80, 0x71e82577,
- 0x07aa2b8b, 0xfba16fba, 0x6c23c83d, 0xa187a391, 0xc11716df, 0x3ed2973c,
- 0xc44ffef5, 0xf4fa3779, 0xf6377728, 0xd790214b, 0x4e7f7a29, 0x9235e806,
- 0x2078c761, 0xc98bb3e7, 0xb294d4de, 0xf7baf8d8, 0xe1e4e4f3, 0x0d81b07c,
- 0x03bf4889, 0x83b5e23a, 0xe360db3c, 0xf9464cf2, 0x8e4dc7f6, 0x630fcb10,
- 0x6218ffed, 0xc8a34276, 0x5849930b, 0x68b5eba6, 0x585df7b6, 0xe1f7906f,
- 0x5b1f7938, 0xbbfb7116, 0x52f51849, 0xca181933, 0x176d8b0f, 0x7887d71f,
- 0xd21faab8, 0x1f80ac5a, 0x07ab4eec, 0xb8f881e0, 0xf6c83eba, 0x961f94eb,
- 0xafd30c96, 0xbf410758, 0x1d0fdc2d, 0x08fe3868, 0x4fa06fd0, 0xd77d83b2,
- 0xdbf461e4, 0x905bf744, 0xefcf1b47, 0x379d57a4, 0x97ff163a, 0xd1f5a8a6,
- 0xe4eff950, 0x453ebd5f, 0xfe62f7cd, 0x343f6fc2, 0xf1897ecf, 0xe2bcdc65,
- 0xfef1a9f7, 0xbccfb176, 0x2738795c, 0x870f2d45, 0x79677fca, 0x2eb43758,
- 0x1d50f2f1, 0xe59bf8cf, 0x5c70ffe1, 0x4f7d99e3, 0x03bec027, 0xa7d878ec,
- 0x63aedcac, 0xfd1413f9, 0xe9e2f1b1, 0xc597ab5a, 0x5ea2b54f, 0xa657871d,
- 0xe33c38f3, 0xd45ed7eb, 0xf335bae2, 0x3ef37138, 0x369a079b, 0x9c631758,
- 0xba7e3e36, 0x0e9eec84, 0x471f154b, 0xce7e026d, 0x753c74bb, 0x8f8b0a75,
- 0x20725852, 0x6f3e216f, 0xa7eb35eb, 0x7598fe49, 0x1ba22aaf, 0xdbe85182,
- 0x33890728, 0x14c6fde6, 0xc6bf5766, 0x7adc8bf3, 0xad608e76, 0xfd85e2cd,
- 0xf2c35b8c, 0xf2e07e9f, 0x18bc826d, 0xfaf1c2a3, 0x4344edf2, 0xae8cf2f1,
- 0xb04ae517, 0x3901ead9, 0xa237fc28, 0x5e3c8bff, 0x3ffad971, 0xfcf7de8e,
- 0x3c7bd30f, 0xf18bf67e, 0xddb8d6fe, 0x151b8a7a, 0xe3a4105f, 0xca5f1023,
- 0x63bf4919, 0x1d1633f5, 0x1df14d1f, 0xfe766149, 0xb857cc14, 0x2963394c,
- 0x3f009e8d, 0x5063d911, 0x7e09afc0, 0xab8fd412, 0xaa3e78d3, 0xe6267ca7,
- 0x2573b369, 0x58ce1ce2, 0x4307e476, 0xc8ecc3eb, 0x9f5cc60f, 0xde47672f,
- 0x087df0ee, 0xd2b27674, 0x1e01e78b, 0x61f851b5, 0x619cf87f, 0xe22e73d4,
- 0xfca5c63c, 0xd1c45f2d, 0x4bff17d5, 0x3a92d472, 0x75f95d96, 0x13cfd1cb,
- 0x763a7fc0, 0xff9e413f, 0x45bc4119, 0x6b6449f7, 0x103b5f8c, 0x197bf961,
- 0xfef15e1c, 0xa1bef83f, 0x999bd521, 0xfc7fdf4a, 0x1248d1ae, 0x652e9ea9,
- 0x67c5b172, 0x4b42b8c5, 0x697fcb2f, 0x8e504659, 0xb4df80b7, 0x3389fc08,
- 0x4f7dd809, 0x2013fd3a, 0xf7d1ee8f, 0x226d41ac, 0x1167dbf8, 0xbdcef3b0,
- 0x9c33cacb, 0x6798c9fe, 0xe3006cb7, 0xceb3dd18, 0xf9561e60, 0x523e9f17,
- 0xa2f08a53, 0x5065c13f, 0x3db9679f, 0xf133c995, 0x6f843d9c, 0x3ff3fa2f,
- 0xdaf9606e, 0x0ed79701, 0x0fe1097e, 0x8c1128b7, 0x61ecb42b, 0x96b95bc6,
- 0x30fc87cd, 0x65a9e903, 0xf831654f, 0xa9af494d, 0xf2ddec18, 0xdf715bdf,
- 0xe983f1f7, 0xcfb12798, 0x7c02afe5, 0x37434ad8, 0x4d9a7d81, 0xf7bb01c7,
- 0x1537dde3, 0xfe42dc03, 0x39bf03ad, 0x4d9d7f1d, 0x462717ed, 0x64bf7796,
- 0x33ee2647, 0x5afeac9b, 0x133aff98, 0x9db82383, 0xfee14f9f, 0x17fe78f2,
- 0xd5aa9f7c, 0xdfa938e0, 0x5c9c6235, 0x3a7585c8, 0x0de637e2, 0x9e1293cb,
- 0xf1701867, 0x99fec73c, 0x97854f2c, 0xbe752ead, 0x37c947e7, 0x253c0094,
- 0x8b57f2a9, 0xe4475967, 0xb2a4940b, 0x5cea9678, 0xb322e5a2, 0x7aed73a7,
- 0x4adb27a4, 0x9454e2c2, 0xbfaec09e, 0x06991a36, 0xbbf2bce7, 0x9e02d7c3,
- 0x20d45ff7, 0x4799e49e, 0x28933e30, 0x56f2f1b1, 0x004e740f, 0xadfd8c7f,
- 0x954960eb, 0xa0157b2e, 0xefa749f4, 0xffe45481, 0xaf1842d6, 0x2c745fea,
- 0x059f72bf, 0x59dd0cfd, 0x795f984d, 0xea833dee, 0x33fc4e7c, 0x3e605648,
- 0x6fdf6e65, 0xdb604e31, 0x279a8d23, 0x15aa44fb, 0x1825a6f9, 0x36398e99,
- 0xce50bad7, 0x1f7efbca, 0xee43bb04, 0x91024194, 0x03579d0e, 0xcb82d3e7,
- 0xc7f5fa09, 0xb035db77, 0x3f3cd95e, 0x7fff7066, 0xbee3f14e, 0x4205c445,
- 0x3749bf2c, 0xc7ec08f0, 0xdf03e5e4, 0xec7ac20c, 0xc05a6871, 0xb68baa7f,
- 0x9f65dfa0, 0xd9acfd05, 0xbe2b797d, 0x2bd9cb41, 0x1b0718ed, 0x6ceee57c,
- 0xf3a7e7cc, 0x3cca3f1c, 0xf9654a1f, 0x8b3ef248, 0xfc99f406, 0xa8c0f104,
- 0x0aa523b2, 0x968a7ee1, 0x07df3f69, 0x8e1e4a7a, 0x0a3f829b, 0xaa4354f4,
- 0xcdd0077f, 0xa5a4b9d0, 0x892e7666, 0x2db5a79f, 0x9c176f7d, 0xfdc2d9f7,
- 0x4ff707b9, 0xbffe859e, 0x7582594e, 0xf960e0b8, 0xb2a42f95, 0xfc48e278,
- 0xd63cc41f, 0x65bf7ddc, 0x02e28d7a, 0x8c7597fa, 0x574ee45e, 0x5f19f80f,
- 0xde63f049, 0x611d75ee, 0xeccdc62d, 0xa35c7f27, 0xd677ecc4, 0xb2d33d33,
- 0xe3cfed93, 0xd29737f7, 0x921ebf2f, 0x0cbf4c33, 0x764eff95, 0xf4e2efcb,
- 0x6fbcdfca, 0x5efdea21, 0xbf12fdbc, 0x8f611bbf, 0x9637f5c7, 0x50f2231d,
- 0x61c786aa, 0xd84db4f6, 0x9e554149, 0x9f95954e, 0xfbaa343b, 0x47649f5f,
- 0x3b79107a, 0x72caed29, 0xfe8fdbc8, 0x4edfa088, 0x3c8915e4, 0xcb1560a2,
- 0x8c6a09f7, 0x4dd12e78, 0x687f30ba, 0x124b091c, 0xf006d7fa, 0xe42a6dfc,
- 0x4fefd111, 0xff62e6a4, 0xa567899b, 0x22a6e516, 0xc826fc01, 0xb802493f,
- 0xc1161b43, 0x7159b778, 0x9fe4133c, 0x033ee124, 0xc7dd39f9, 0x35a756f2,
- 0x3a43b8b0, 0x4e50cfd5, 0x697467cf, 0x3cc7ab9a, 0x22579156, 0x5e044ff2,
- 0xd3be38aa, 0x01ca2c27, 0x54f228b9, 0xe53d99d6, 0x39c11760, 0x1cbf3868,
- 0xbcf3346c, 0x776fd613, 0x4f9e2e63, 0x2979b2fe, 0xf91678f1, 0x7f44fa29,
- 0xe46cbba6, 0x37416739, 0x7089eb31, 0xecc7dc6d, 0x3d06aafc, 0x71b063ce,
- 0x072bfa06, 0xec04351b, 0x037eaa81, 0xf1b8c3a3, 0x93d5ce36, 0x872bf430,
- 0x054f204c, 0xceca5c3d, 0x085beba5, 0x6d83e0fe, 0x524ef33b, 0xd6d43fde,
- 0x9341cf8b, 0x12bdabd4, 0x03cea1cf, 0xb48df5c9, 0x1af95afc, 0x6689b7c8,
- 0x6a849449, 0x96faafd3, 0x60c4f54c, 0x287df472, 0x7be1bedf, 0xba3e3cac,
- 0x9beda245, 0xed623ed3, 0xa9d33681, 0xefff5ed8, 0x57eb41b5, 0x267f7fd0,
- 0x351cf9f1, 0xebf464ee, 0x3f41123c, 0xa57fd712, 0xadba3e4c, 0xdc9fb47a,
- 0x54951f3c, 0x8854fcf3, 0x7b72d0f8, 0xefc6315a, 0x13d944ad, 0x030cfa81,
- 0xe30827b3, 0xccf1f687, 0xf4e46d6f, 0xbf843240, 0xf208206a, 0x81c73dae,
- 0xe7c90fdf, 0xf310863d, 0x8ff1b19b, 0x9e0578be, 0xb679124b, 0x6733d884,
- 0x36f9815f, 0x135af2aa, 0xdb29a73f, 0x7bbce133, 0xc1db8ebb, 0xe6c47cbc,
- 0x04f3885f, 0xddf6a4be, 0x507e1bd1, 0x7674fc04, 0x41f30fef, 0xa7a8ddce,
- 0xe45184fb, 0x933a595a, 0xeb3a836b, 0xb77e894a, 0x016fe6c6, 0x32c77c3a,
- 0x311c3a6f, 0xa9549b9a, 0xc1bc0077, 0xd780b4e7, 0x8e274e64, 0x473e0cd9,
- 0x3c824fb5, 0x1b1376d4, 0x7b2fd937, 0x7f845cf1, 0x851b74b6, 0xf2625dbb,
- 0x88947e9b, 0x92a15576, 0x26c87108, 0xcb55ee7e, 0xcb9688e5, 0x1b6e7f91,
- 0xc1a997c8, 0x13ef4d78, 0x7fe7b05a, 0xbfe2e3cb, 0x9f9f51cd, 0xb83371e8,
- 0x569673c5, 0xcfe802b9, 0x523aaba5, 0xab40e94b, 0xfd7084f7, 0x37186d32,
- 0x772b603e, 0xa5efd00f, 0x42951fec, 0xe67b773e, 0xbe214a8f, 0xdc7a75a7,
- 0x8f7298be, 0x357fd19b, 0x59bc03b4, 0xefc14b5a, 0x97f5b5fb, 0xfa2bfb48,
- 0xf50dfdf2, 0x35706063, 0x7059a319, 0x9359fe6e, 0xffac3b7f, 0x30c7f985,
- 0xfa40fc2e, 0xaf21f7d1, 0xd18ea8e3, 0xe793305d, 0x69ee93e3, 0x0eee9409,
- 0x5e7839e7, 0x1ebf8b0a, 0xa8fcc09e, 0xfcb17e54, 0xe61289d1, 0x66ced621,
- 0x266f62e7, 0xfa018e91, 0x68593a3d, 0x1c8af4fd, 0x796b7fb4, 0x77f4c89e,
- 0x7eb0097c, 0xca8f6fd3, 0xb2c7f720, 0xeb746ee7, 0x3e188194, 0x975149be,
- 0x97542e6f, 0x9751e5bf, 0x97f15dbf, 0x9365b109, 0xe8107bd9, 0xdf8955f5,
- 0xb10d7147, 0xa4ba7f23, 0x935dd820, 0xe7e74a5f, 0xe53e5989, 0xf17ef94f,
- 0xf5820aa5, 0x5b8d3b29, 0xfdcf508d, 0x3f5e5aef, 0xd98c4dd9, 0xea07c44e,
- 0xf3c4a8e9, 0x93185d32, 0xb1ef7b22, 0x6d343f33, 0xecfda7ab, 0x1f20af9d,
- 0xe43558a7, 0xa5ebc09b, 0x04c3b446, 0x289bb45f, 0x963c537d, 0x819a338f,
- 0x9e75dbde, 0xd697d0f5, 0x2f40506c, 0xb1182657, 0xc83fed6f, 0x9b96d7a8,
- 0xcb4ec40c, 0x63371f07, 0x3e265cb9, 0x20a123c8, 0x51e786ce, 0x0ad4279d,
- 0x60eda5f3, 0x160eedbe, 0xeb96d757, 0x7ed3cf51, 0x3e0f5d71, 0x9c1109a1,
- 0xf98ca57f, 0xc1661ca6, 0x7c247477, 0x6be734ff, 0x518486ad, 0x7bb3a58e,
- 0xfed10e39, 0xbf83dfa1, 0xdfa0f6d2, 0x13b950ae, 0x271bcfea, 0xc0a8b9e0,
- 0x592f79d0, 0xda15c003, 0x67e87cb9, 0x3f04f2e7, 0xa542f90b, 0x852bbe5e,
- 0xf8149030, 0x053b050f, 0xfb8204ec, 0xccb71100, 0xd97a8a34, 0x0da6fd0b,
- 0xe0e767b5, 0x56997852, 0x60253585, 0x27f1bca7, 0xe4b1c3a0, 0x931a6145,
- 0xe429e213, 0xc3c316c5, 0x19af6a47, 0xef0a3d6d, 0x9b0cf2c7, 0x84c7ad9d,
- 0xad4be00c, 0xe8064279, 0x478776cd, 0x0b94c5f1, 0xae6a76e9, 0xdb45f013,
- 0x35adf2d1, 0x18e5f2c7, 0xc69854fd, 0xe76d00e4, 0x1ef0a24d, 0xc0192934,
- 0xdb8ca3bf, 0x7cb5c6cc, 0xe9bae3bd, 0x41ddb4b8, 0xb6971d1b, 0x843266db,
- 0x8da30935, 0x78c0a15f, 0xa2971a97, 0x833f911d, 0x93a503af, 0x0ec1f820,
- 0xaf4834da, 0xf2be7833, 0x1e3664c1, 0xffe75429, 0xf8e99be4, 0xcea65f98,
- 0xda51b9f7, 0xfa7a01d4, 0x56c25369, 0x3837cfa0, 0xfcb61cdd, 0x69d83e43,
- 0x382bcc6f, 0x71267284, 0xd1100779, 0xf28bd20c, 0x0bc1c846, 0xa1cac769,
- 0x8de1e54c, 0x0fde7469, 0x079d1ee4, 0x3c721f9d, 0xa5f68f9d, 0x56935bd9,
- 0x417e1236, 0xa06e029f, 0x218be053, 0x5f838d3a, 0xa5b91bd0, 0x37251317,
- 0x9e173b53, 0xa425eec2, 0x2bc5e595, 0xa3f3c399, 0xd8dd3e44, 0x18d1e6ca,
- 0x746fb844, 0xe6f318fc, 0xffe718ee, 0xf735c01e, 0xbef04fca, 0x3f9ee222,
- 0x1477f601, 0xc96979de, 0xc6f07bff, 0xca97f9db, 0xcf8b1f0f, 0xacd2f8c5,
- 0x1e1f989d, 0x7cc56d98, 0x3f3c69f1, 0xc1a0d036, 0x0fdd00b8, 0x85a23ee2,
- 0x97204318, 0x5cbb72a7, 0x45785b9c, 0xe0d6fe62, 0x7d45068b, 0xde7c513f,
- 0xe97982b8, 0xbf2c65c1, 0x6f7ec87c, 0x1f7ed056, 0x9cfc73d2, 0x93317744,
- 0x28f7dded, 0xe689fbea, 0x44fdf513, 0x7121ed0b, 0x32948a6f, 0xdddfbf9c,
- 0xbbfe9043, 0x9f58b96d, 0x88fa65ae, 0xa3e9837c, 0xbaa21cee, 0x399d691f,
- 0x94f31134, 0x46c15e78, 0x6ff83fc8, 0x9a7e0bf2, 0xc2fca926, 0xc9afe543,
- 0xf0e6dc2f, 0x5b3a02ef, 0x92cde458, 0xebd63d28, 0xd2987f99, 0x8108a6eb,
- 0x7cd6c574, 0x44dda7d8, 0xe4d2df5a, 0x02febd21, 0xa53275e9, 0xbd153cd7,
- 0x15fc61ee, 0xe82d2144, 0x0b4e8875, 0xfd00f3e3, 0x6edc60fb, 0x5befef47,
- 0x6fd35f60, 0x683cb0f0, 0x2f19d796, 0x59e0621f, 0x5c783320, 0xf3c22d5a,
- 0x0f13f43a, 0x4df0e5cf, 0x12a69d2c, 0xf1631fc6, 0x824caa4f, 0xfe6192b6,
- 0x6ffd9931, 0x7c43b8c1, 0xcfdb08f4, 0x6b5b808e, 0x739a7a45, 0x057f8b07,
- 0x802ecc2c, 0x1d1a5838, 0x7f1f267f, 0xdaafa74e, 0x4a66ed01, 0xfa610f0c,
- 0x167f850f, 0xdfd99faf, 0xb370798c, 0x63c775aa, 0x2f2120ed, 0x2e6ddc45,
- 0x7dab7f6f, 0xd2f20ea6, 0xe3aad767, 0x64ef735d, 0xb339b6f1, 0x25ccfdd5,
- 0x7cc13fab, 0x03aad24d, 0x53bdcdfc, 0xda49ff5d, 0x026c9c50, 0x710ec9c4,
- 0xe520d03f, 0xd7a3e013, 0xba6f1793, 0x84f9d287, 0x8095149f, 0x9486f0df,
- 0xd04f6dc6, 0x271bb3f2, 0x94f5f961, 0x28ff7eef, 0xc0296fd4, 0xe25c3572,
- 0xe056fb04, 0x8567ca43, 0xc8cc77e5, 0xc27d0049, 0xe087bdfb, 0xdfd9b77b,
- 0x73b6933d, 0x3cc5c94f, 0xd7ee6ad6, 0xeac85c18, 0xc6d2be6f, 0x52ef9552,
- 0x6d5c5fd0, 0x1d35f766, 0x04bb7e29, 0x73abaaf2, 0xd5e4b979, 0x243e5049,
- 0x3af7827d, 0xcab66b9c, 0xe92d0eb0, 0xb93ecfcc, 0xa44f0a50, 0xf276a978,
- 0xbcd99bfe, 0x4b9e4b6a, 0xaea93e07, 0xf64cfd62, 0x41e67654, 0x2c79cd3b,
- 0x26374b9f, 0xbf05fe00, 0x957df0e5, 0x5c7d2bb0, 0x3de4bc4e, 0xfb4491d6,
- 0x0f2519f5, 0x7157d14c, 0x664bdd8c, 0x7d03e765, 0xfc191cde, 0x2cbcd3f4,
- 0xb8cab6ef, 0xe1e40d3c, 0x297d7e2d, 0x2c6ecbcc, 0x1179043e, 0x65951589,
- 0x246da798, 0xfe94beb8, 0x3f03a7c7, 0x5649fd5e, 0x559d1002, 0xbe82ff4d,
- 0xafba0a66, 0xe537d356, 0x65e9c9db, 0x2adf9697, 0xd30d36fa, 0x3ef658f7,
- 0xe102ab85, 0xaf5a86fa, 0xc7138d0d, 0x8ee3f19f, 0x2fe03725, 0xc30efe56,
- 0x987c8bd7, 0x798dd901, 0x825da7c0, 0x38026f4f, 0x9e089af4, 0x3c96ca8f,
- 0x7180f093, 0x7a88c785, 0x055f8e2f, 0x33e0997c, 0x1709192f, 0x6ec2c9fc,
- 0xf02e7f65, 0x7af064a3, 0x38979dfa, 0xe3a2e187, 0x7a0e91df, 0xcf0611fc,
- 0xc995a966, 0x112fbe19, 0x311697fe, 0x7cf53edf, 0x1eeccdcb, 0xcbf83703,
- 0x3c527630, 0xcbee0869, 0x9f310758, 0xb3e7d700, 0x9e9b3e8d, 0x4736edce,
- 0xedcfd23d, 0x283aa354, 0xff74c25e, 0xd41e700f, 0x7ec1f704, 0x62f1216f,
- 0x47d29cfc, 0x6ec63d20, 0xe3c6f012, 0xb90c65a9, 0x789f3f1a, 0x5baf0cfc,
- 0xe29e0d24, 0x7cd0e1fc, 0xf19e732f, 0xaf4b8af6, 0xb93a5bf6, 0xec39d17f,
- 0xfa842c4f, 0xa0f6625a, 0xe123d13f, 0xda96af7e, 0x6ba7201f, 0x71fa086b,
- 0xfa0d569c, 0x321d8513, 0xff5c9f16, 0x6ab7264d, 0xf73f089b, 0x4f17e6c8,
- 0xe064a98f, 0x1ad6787e, 0x800b0d95, 0xaaac1bff, 0x56b50673, 0x4e788f16,
- 0x503211c8, 0xa87f7a06, 0x0fef423c, 0x8a1c8194, 0xf6ae7fbf, 0x3b7bf322,
- 0xacf84a2f, 0x032b1cf5, 0x19acefbd, 0x56790328, 0x14fef767, 0x13dc55d6,
- 0xbdcf8f19, 0x97911ae9, 0x1a745972, 0x0e2247cf, 0x7fe6c47a, 0x3fc63d39,
- 0xa08a5675, 0xcbb5759f, 0xcfe7e7ce, 0xd8767a01, 0xefc12a73, 0xaad8db34,
- 0xd138bb03, 0xce903fe1, 0x5f17e8e4, 0x9a17e6c0, 0x56de3c52, 0x66587fed,
- 0xee1e22c3, 0x81b878e5, 0x9bf3d9eb, 0xa90fb8b4, 0x0d3e16f6, 0x9bc0a0a1,
- 0xbe42123a, 0x147ea2f5, 0xa79dc6f7, 0xda185506, 0x7d436f3f, 0xf557f8bb,
- 0xb09b2718, 0x1894435e, 0x6431397d, 0x327fdd56, 0x5553a779, 0x5d37a2be,
- 0xbecafed5, 0x717d555c, 0xfeaa9278, 0x544b37aa, 0x54c8b2e5, 0xdfabfb55,
- 0xaf9552a9, 0x6aa19819, 0x3df403bf, 0x4e37e3c5, 0xffbd52cf, 0x37f4e368,
- 0x5d7d3e21, 0xefaa3bf4, 0x043f704f, 0x85237a09, 0x040bfe74, 0x53ac5ebd,
- 0x9acd4f7d, 0x7c0e54b0, 0xac16fecf, 0x95f77966, 0xfdb16314, 0xdb52ec2d,
- 0x28903713, 0x84b62be6, 0x40984f24, 0xbeba9dde, 0xf107afb1, 0x0f7ba97a,
- 0x6be6b826, 0xc69754c0, 0x22633b51, 0x0d5c8220, 0xf1638379, 0xbfd6a5fb,
- 0xa97fe480, 0xd5b837f5, 0xd4526feb, 0x54296feb, 0xa3cdbfaf, 0x0ac4ff5e,
- 0xbc3bfaf5, 0xaa4ff5ea, 0xb27faf51, 0xa9febd4f, 0x9febd573, 0xffaf57e6,
- 0xbaf506b8, 0xd7aab667, 0x7aa97b3b, 0x4b82735d, 0x74f1f554, 0xc849e820,
- 0xf795bccf, 0xa453dbaa, 0x42e86268, 0x5f02d9d0, 0x8d63e603, 0x35487aef,
- 0x4c7d3c5e, 0xf48f3c20, 0xe547d22b, 0x727f6e38, 0xa97aa0ba, 0xda325c6a,
- 0x73c03719, 0xff6e04ee, 0x33b746fb, 0xd58afc84, 0xe6fbb1eb, 0x08dae4b1,
- 0xd1f7c57d, 0xd82bea63, 0xf7869b47, 0xefd163d1, 0x59ea84bb, 0xfa357c1c,
- 0x33b0eecf, 0x999a0e38, 0x5429ff3d, 0xd1db435d, 0xde141536, 0x743e6177,
- 0x93cd77fc, 0x1df20fd1, 0x7183abd2, 0x7c1124ef, 0x7c5123ce, 0x755d89fa,
- 0xf10165be, 0x645fb9e9, 0xc9a9e009, 0x7a0e5038, 0xa3fcc2ff, 0xee7c63ef,
- 0xdd65d248, 0xe21c7207, 0x3909e33f, 0x49c9e5c5, 0xf2d10388, 0x27f71339,
- 0x8ae2897a, 0xee29e7c1, 0x269bacf7, 0x5a259ea1, 0xd5b1e633, 0x9ea12edd,
- 0xf60ffbaa, 0xf4c71f6c, 0x52e788da, 0x6e2ed781, 0xa13b301f, 0x62dd0e0f,
- 0xadf890fc, 0xec7a8f68, 0x3fb4deb0, 0x47f98716, 0xdbb17f76, 0x25dce0c8,
- 0x89c2b911, 0xdd7106c7, 0x7ce1fd61, 0xe0c8db9b, 0x4623a5db, 0xe264efc0,
- 0x31f0fb47, 0xd63c8a99, 0xefd624ef, 0x17f05cd1, 0x7e6fcd9a, 0xa9f21aab,
- 0xf472e0e5, 0xa88f7fdf, 0xb677f7fd, 0xffbfe84a, 0x3e9b851e, 0x1b8f76dd,
- 0xef28b2f4, 0x1e21b802, 0xfbe8ed00, 0x04ab7754, 0xadafb1fe, 0xb10f3dbf,
- 0xbfb25f76, 0x83087ea1, 0x3d60fe0b, 0x83367f85, 0xd281b7d2, 0x11efb6cb,
- 0xfbc61ff4, 0xa969c76b, 0x31fc8106, 0xc097bb1d, 0xfb67af2d, 0xbb1ec3e2,
- 0x7444cfca, 0x7a47380e, 0xf20e7b29, 0x9cc54fbe, 0x3474a0d3, 0x1fbafdea,
- 0xa47cb065, 0x44c8b6f6, 0xe39e4b88, 0xdb87d771, 0x56b2c4de, 0x265e60f6,
- 0x88e5f7dc, 0xa2ef5ef9, 0x4e7231f5, 0x63efd2b6, 0xeace8de4, 0x1e6bcbc3,
- 0xbc608b69, 0x25efc753, 0x5ee3347b, 0xbfc63fbb, 0x6e38cef2, 0xc4831927,
- 0x69b8429e, 0x4f4eff4f, 0x5a3f4023, 0x762a7cbb, 0xd3706d1f, 0xeef562fe,
- 0x1b1264c5, 0xf6d9587f, 0x88fdc20e, 0x6743f7e4, 0x36127c86, 0x641b7c51,
- 0xdf4d0338, 0xe511b86f, 0xaf76deaf, 0x1aa1ee07, 0xb7067f0b, 0x7e56217c,
- 0xdf8e387e, 0xe7e5c5cf, 0xc9d16d93, 0xdb7b4e30, 0x5d6f0a80, 0xfdf1176d,
- 0xc452369a, 0x7ac43fea, 0xf4828d4d, 0x42d28a73, 0xf27ae204, 0xc1fc0d80,
- 0x10263ed4, 0x8db9d67f, 0xb8306f18, 0xa2e4bc18, 0x9e63e90b, 0x3e00c19d,
- 0x1f4a4cbf, 0xe1d52fef, 0x4cbefafe, 0xefbfb62b, 0x780fe337, 0x29eadf29,
- 0xc37fb41a, 0x61c7867c, 0xf973d3f8, 0x0fbf6449, 0x5d3e80e4, 0xd38538c6,
- 0xf1d70b3e, 0xf027f6cb, 0xcc1137ae, 0x5a67d647, 0x5cb85ed1, 0x6a19dd38,
- 0x8fe3c7bc, 0xffe509e2, 0xf74f1c7d, 0x93fcc83d, 0xf04fddf7, 0x4a59053c,
- 0xe987caff, 0x87971c6a, 0xcf87c4a9, 0x082a36ae, 0x465eb00d, 0xed409fe2,
- 0x8d8bd211, 0x82a7e04d, 0x80ecaf7a, 0xaa2788d4, 0xb9e1335d, 0x1764e2a6,
- 0x6bdae3b0, 0x13c08b3f, 0x2f801162, 0xc109037b, 0xab98bc55, 0xf877c740,
- 0xd7813959, 0xe565cc27, 0xe89f5e44, 0xef4ce563, 0x0035520b, 0xcb471716,
- 0xca6f3ab4, 0x80bc7907, 0xfcf173b0, 0x84cd8d7e, 0x73ab0bcb, 0xdfd43566,
- 0x8b9cf049, 0xaf09ffa8, 0x5f1eb34f, 0x637a0799, 0x0dfbfab6, 0xd6a90f1c,
- 0x30d35de3, 0x17aea6de, 0xeb64acf1, 0xfbf137f9, 0x53fd7522, 0xf59b7bfc,
- 0xd41a647c, 0x7fc7abe7, 0xac5bd79c, 0xca8def2c, 0x037f767e, 0xd78c7faf,
- 0xc61ea09b, 0x5af5642f, 0x6f09df71, 0x18e1c9d7, 0x431e33e2, 0xbf33260c,
- 0x233696f2, 0xbf3c57f2, 0x73f707dd, 0x9bf30d95, 0xf519297d, 0x32fb86de,
- 0x5049dc98, 0x51debc06, 0x77a8a2e4, 0x37cc65da, 0x48f5bad0, 0x972b064f,
- 0x871f9c27, 0xead489e4, 0xe4c64461, 0x9ae8a5f4, 0x9005f012, 0x0ff4596b,
- 0xfbe33ae8, 0x918fe21a, 0x664de973, 0x83e58fe2, 0x39554e05, 0x5574cee5,
- 0x5cecd77b, 0x74b5bd55, 0xcc9eaa92, 0xdc9f2276, 0x5ccbe9cb, 0x17aaa254,
- 0x6d9065f7, 0xb59d9dbc, 0x554fe5d3, 0xa85f3bb5, 0x9f8d0224, 0xa3dc49ba,
- 0x7c8231e6, 0x9b96d7b8, 0xb3f8fc0a, 0x4e440ab6, 0xf627e53b, 0x1ba7f3b4,
- 0x54fa598c, 0xcb10b978, 0x7e583bcf, 0x879b7b7f, 0x9e087395, 0xab6f6faf,
- 0xca2ef2c1, 0x69fcf09f, 0xdb9f179b, 0xcfa15969, 0x2bfda47f, 0xda1e9fb4,
- 0xa19f943f, 0x3373c3fd, 0x3f9e1fed, 0xfd43fda1, 0x942fda06, 0xc170a69f,
- 0x0fda29f3, 0xed31ffbc, 0xb44fca1f, 0xb6bcb0f9, 0x1f962e6d, 0xf3e3f36f,
- 0x7c06b6b1, 0x8ad6d9df, 0x96db47e5, 0xb6e1f3e2, 0xdbdb3e20, 0x2f7d6256,
- 0xf9e793a7, 0x2be7dc79, 0xf103bdd9, 0x219fdfeb, 0x9d2fbb61, 0x154a539f,
- 0x3f2ab97a, 0xfc033fa7, 0xfceabd02, 0xc21fc054, 0x8c786261, 0x0668e387,
- 0x91270fb3, 0x930b72c8, 0x1f07181f, 0x6159e7df, 0xae35fca1, 0x4e7bad95,
- 0xd3eb145f, 0x4778d81a, 0x5c409db9, 0xce519241, 0xbb439b8e, 0xcaa45273,
- 0xd007f2c2, 0x14dc430f, 0x5aece53f, 0x21e64672, 0x5725c00d, 0x331e3d50,
- 0x397c21af, 0xb7809c18, 0x57fde0d1, 0x696d378e, 0xa75fbb2f, 0x6c0c2ba6,
- 0xe2b6f666, 0xb9c63afd, 0x9f4cb0be, 0xef1f9f2e, 0x717498e9, 0x5224dd3a,
- 0x9bf176f9, 0x8ccfa144, 0x31fef526, 0x6a89417d, 0xb8bda67f, 0xd1572886,
- 0xfbd48b7e, 0xbdfe733c, 0x198cefaa, 0xf5eaa90f, 0xfaaa15ae, 0x1e73bbba,
- 0xc73e0371, 0x6199b8b1, 0x210272f5, 0x4fdd85e4, 0x4ce65c20, 0x92738f36,
- 0x605f7ec2, 0x133dff37, 0xd6797fbe, 0x5c65f1ce, 0x09f2fb8d, 0x482c560e,
- 0x0f406a0c, 0x448bfc7d, 0x9e3b7f94, 0x7e9fa0d1, 0x40690922, 0xe6a64cde,
- 0x015fd424, 0xf16b8c37, 0x942d26aa, 0x85a2898b, 0x8a2455f2, 0x7fba3afb,
- 0xf5d264d1, 0x9ffad16b, 0xd9f2d131, 0x2cb3ff4c, 0xfa8c30bf, 0x67af80ba,
- 0xadc7d881, 0xdcfde397, 0x37f44cc6, 0x7625cfa9, 0x874bfdf0, 0x970df989,
- 0xc153ca3b, 0x46373e0e, 0x7dcda83e, 0x14058de2, 0x63b4b6df, 0x1fef14f8,
- 0x9ea33457, 0x94cbcfd7, 0xf75e5abe, 0xd44faa7a, 0xf307937c, 0xcffa5b38,
- 0x6b76e029, 0xff228fc9, 0x3c84e874, 0x9e411253, 0x893d970d, 0x3b1e6624,
- 0x93c9deda, 0x11fa2151, 0x375f326f, 0x4ff95f31, 0x4bc87695, 0xd179e02c,
- 0xf2d2db3b, 0x7f0fc7ab, 0x5074e3ef, 0xa0352248, 0x348b0b5f, 0xe074109e,
- 0x31fcabb5, 0x2f2efca6, 0x4b930b9c, 0xf2c40788, 0xf785d244, 0xd57fc829,
- 0x7c9a3e62, 0x50a21af0, 0x0d6ad8fb, 0x70d16093, 0x75fb84bf, 0xfce5cfbe,
- 0x9c6af667, 0xec259c8f, 0x74e80f26, 0x1bd599f2, 0xacfc3a01, 0xf4d8c75b,
- 0xd0bebab8, 0x2def504a, 0x1487d42a, 0xebcbee09, 0xe5407bc2, 0xc8c9122d,
- 0xb9a63801, 0xdf7e83e7, 0xb63a416b, 0x9fc72fac, 0xe359b830, 0x0efc40af,
- 0xc2357b28, 0x9aeda2e8, 0x82b7eb27, 0xf001393d, 0xdcbc0562, 0x430d5f91,
- 0x1e2217e7, 0x2f79f217, 0xc8e8f3e4, 0x833fc21c, 0x70bc7887, 0x6ea2ef1f,
- 0xcf528fe1, 0xaad2f1f3, 0xa2bdc36f, 0xff2d11ed, 0x3e352299, 0x5322e957,
- 0x70c34be6, 0x66173851, 0xb8015c82, 0xd80f82af, 0x919f9afc, 0x941cc6c8,
- 0x6f68356b, 0x3f306994, 0x9b2e9e3d, 0x8fdc9932, 0x7dd9d866, 0x4853eee4,
- 0x93e5c3cf, 0xc2482c6f, 0x5bb333fd, 0x8fcb326c, 0x58957af4, 0xe12af7bf,
- 0x72e7e45f, 0xa1de1ccc, 0xb127057b, 0x892b60bc, 0xb4bc87ac, 0x2bf5f6b1,
- 0xdc707332, 0xb53d7de1, 0xa53f205d, 0x5c7988cc, 0xa13e44ab, 0x69995472,
- 0x57384f63, 0xfa74e465, 0xf79d4da8, 0x6fcf9db3, 0x418f5f3b, 0xf27bc8a3,
- 0x9cc716d6, 0xafeba9f3, 0x88963f47, 0xff720ed3, 0x1d59a0e1, 0xa4e46d57,
- 0xaba87830, 0xc1f10cca, 0x6da7162e, 0xdd99bfec, 0xc7efbb6b, 0xed302f78,
- 0xa87a0153, 0x57ebede7, 0x0b58fdaa, 0x5f91fa77, 0xf164fde3, 0x398a317e,
- 0xe31fec3d, 0x335b62fd, 0x43c27ef9, 0x7b0e218a, 0x61de7562, 0x7277ec33,
- 0x1f0f9e08, 0xef017da7, 0x81bba6dd, 0x974fb82e, 0xa3fbb114, 0x54ffd7e2,
- 0xb2f11e96, 0x78efac53, 0x3ba5da2b, 0xd81c98cb, 0x2dfce079, 0x2b3f69e3,
- 0xf9d52473, 0x3a399580, 0x29b7a75c, 0x97b3c37d, 0xeed19a90, 0xa9943b29,
- 0xef726af9, 0xc1eff55a, 0x1c0df734, 0xff2828b6, 0x1fba035e, 0x4279fc3c,
- 0x1451f211, 0x08c4bf7f, 0xc19930c2, 0x76b3e70d, 0x17993e6b, 0x676f0f4b,
- 0x36ff58ca, 0x0d2f8ba7, 0xae1ab557, 0xa56f45b3, 0xfd7060cc, 0xb9c6e40e,
- 0x5894f1f0, 0xb5c0467b, 0x03df8c7b, 0xe2abf0f8, 0x1fad42a9, 0xfa74f062,
- 0x6ed3c3bf, 0xfcf0e8fd, 0xb3f3c395, 0x9b5eec43, 0x515ff591, 0xe1ab759f,
- 0x647cfb3d, 0x0a49880e, 0x7f115fef, 0x182993ee, 0xebf8e613, 0x59f4e52d,
- 0x078fe5ca, 0xc09150cf, 0x1e63577b, 0x924caf66, 0xbe5ca16a, 0x2cf77f00,
- 0x02c3ef93, 0xbf9e3f1a, 0x4619ef21, 0x64d77f95, 0x9ddeba31, 0x878a8d2e,
- 0xcf362cd2, 0xb2ea4703, 0xd8f1bfc1, 0x7202063b, 0xbfcf3361, 0x6bdecc7e,
- 0x05a67b84, 0x425ca11e, 0x747d019f, 0xc7bc2811, 0x2a333f4a, 0xd3c651d6,
- 0xc6d6f1b0, 0x4f188df1, 0x1564a73c, 0x3a557b32, 0x387385a4, 0x0b7daf6b,
- 0x5b5b7fef, 0xfdeca32a, 0x1d5ff8d2, 0x5bf0ff87, 0x7a30b729, 0xe1459299,
- 0x90fe916b, 0x0e5a6df0, 0x2a7fd148, 0xbd157c82, 0xa01fe71f, 0x4691ec78,
- 0x8b2d29e2, 0x9f988bd1, 0xdb80bf98, 0x5c02dd76, 0x06bbdb07, 0xbca51a54,
- 0x8917d0bd, 0xbcc1ff5c, 0x2b5c5f50, 0x654c73b3, 0xdafbae32, 0xee78a98a,
- 0x17cf376c, 0x1fa35e89, 0x164477f5, 0x763dbc70, 0xb811b7f1, 0xafd83947,
- 0x883ffbc3, 0x15bee03c, 0x8dff1fee, 0x5389f78c, 0xfb81aa7a, 0x391c0ecc,
- 0xebfb009f, 0x843a59f9, 0x87973e7b, 0x35b51783, 0xc8ec1e78, 0x0c6d7667,
- 0x6965d7fd, 0x89127db9, 0x35af39c0, 0x8b3cec25, 0xd603db95, 0x001b2723,
- 0x43c56b7e, 0xde862906, 0x10196566, 0x68fa35ae, 0x17f6878a, 0x6125a7d8,
- 0x7bc1ffe6, 0xb3ce8719, 0x5a1e19da, 0xbda10f4a, 0xf767ed60, 0x2965d635,
- 0xb852ce4c, 0xe6f7d2f7, 0xebd99f89, 0x8e94bea1, 0xbdfc3b97, 0xd3d6cecd,
- 0x4beec65c, 0x3fed7b5a, 0xc83ee1db, 0x471df844, 0x79f2efd8, 0x7292b80a,
- 0x48dfbc35, 0xcbf81724, 0x5abe632e, 0x235ef86f, 0x2f82fbd8, 0x62d5cade,
- 0x39f947bb, 0x0a2b4789, 0x4cae7043, 0xdb35b1d0, 0xcc1640ff, 0x0bc5f28f,
- 0x7c44ef68, 0xf63f10d3, 0xdf33c862, 0x32e64525, 0x076f443b, 0x68d42c3e,
- 0xe116fc54, 0x549e26eb, 0x73dda3fd, 0xa15da73f, 0x46423bf7, 0xfe4de702,
- 0x22667178, 0x191da7d8, 0x1af3dec2, 0x259b5f01, 0x8c0e625f, 0xdc5a2ff3,
- 0x72381e0f, 0xedc601bc, 0xbe02f3a0, 0xeb75f87d, 0x3a53b06a, 0x53e45eec,
- 0xd0ecbe31, 0xfd60f5f0, 0xdf7bd6ec, 0xd4647f04, 0x3acdeec1, 0x11867538,
- 0xd3b69fe8, 0xa07f3f40, 0x04f0f7fb, 0xfd1aa7e8, 0xb3ed0b06, 0x8b77c3a6,
- 0xb011ff88, 0x6b75176f, 0x3c83f755, 0xc2c5e5cc, 0x8e12168b, 0xac2c2fd9,
- 0x6b9e0ef8, 0x0131785c, 0xcfcc3fdf, 0xcfcc3fed, 0xf8e2edbd, 0x5f3f9978,
- 0xfb8afacf, 0xde9c4f6f, 0x8eddfe4a, 0xbbf7c63a, 0xa59afd60, 0xfaa83d01,
- 0xc73f5335, 0x687f64f3, 0x21b093b3, 0xf311de06, 0x98e1224c, 0x5dda8771,
- 0x52709134, 0xfa32faec, 0x15af9e1d, 0xe6d0ffeb, 0xdd8e3c46, 0x1e1538ff,
- 0x53cffb87, 0xff6471e1, 0x87ff5805, 0xf44bc2b6, 0xfe8c793f, 0x0ff05473,
- 0xef8f9bf7, 0x7bac1dcf, 0xeef942d6, 0xc6c57860, 0xf7e14b57, 0x7bf80cf7,
- 0xb7afa41b, 0xeeb9cf0a, 0xfa0e7822, 0x7a7ffd81, 0x1f52f21f, 0xc0f7aad8,
- 0xe8ba8318, 0x7a28cf3c, 0x83d89f26, 0x789cef70, 0x87959dbe, 0xdeff1ceb,
- 0x90ec8728, 0x942c6a67, 0x8ce7a0ed, 0x015a1224, 0x235297fb, 0xdf1139ca,
- 0x34fefd12, 0xb392ab53, 0x46a17ee2, 0x68d87f9e, 0x26aff161, 0xac2cffab,
- 0x574d12ff, 0x1f68cfcb, 0xf03b0ba7, 0x0e213b7c, 0xf888d7dc, 0xc5fb0c8e,
- 0xf97d703c, 0x9b0b9c08, 0x0cffe397, 0x1509fbbe, 0xd1ef102b, 0x0fcdb15e,
- 0xf38f79f9, 0xa6e3e47d, 0x2313db88, 0xf2d9753f, 0xba1bb357, 0xecb7e8e2,
- 0x8cc23b77, 0x40d9757e, 0xcfc8dabf, 0x7ce072eb, 0x67bf00e3, 0xb9fb414e,
- 0xde30f4ae, 0xfc275947, 0xf3e32b02, 0x601d3256, 0x54788b4f, 0x8217e402,
- 0xdd86b0dd, 0x616f5853, 0x275ffae5, 0xba17b9e1, 0x8114b21f, 0x65add99c,
- 0x071e22fe, 0x1e61cf3b, 0x02c9ae34, 0x7cf20efc, 0xaffdf397, 0xe7a7bde1,
- 0x3a478e57, 0x0fe5ffa8, 0xddc31ef1, 0x0d9f4ce9, 0x3ea1b3ee, 0xca7837ab,
- 0xddcd5ee1, 0xed6ecce9, 0x2509c395, 0xa3ec7631, 0xa7285519, 0x5e395e3f,
- 0x19271b13, 0xc2e4cd9f, 0x94ffe11a, 0xce70e5a3, 0xf0d2e3ea, 0xdd3ea13f,
- 0x1bdfc263, 0x53fdecd3, 0xbabfde39, 0x63bf80c6, 0xe277b551, 0x5fb0b9ba,
- 0x1f31904c, 0x84d7b35b, 0xc55d9cf0, 0x6f8383ee, 0xef35ee41, 0xcbbf871d,
- 0x62896f56, 0xd9b364de, 0xc1f10053, 0xc0dfbdfa, 0xf1389fdf, 0x902cc4f8,
- 0x9593c783, 0xfb6fbc2c, 0x3a748a71, 0x26fbe3ef, 0x579df7b0, 0xdb6ab9e3,
- 0x347f8b1a, 0x4bd088ff, 0xedcdf7f1, 0xd73e2f4e, 0x6899f807, 0x5e1c80f4,
- 0x675fe438, 0xbfd607f7, 0x24a59d19, 0xf306ffa0, 0xa7edea17, 0xeb1df7f0,
- 0x49dd8fbb, 0x42e93e30, 0xbcfb15fa, 0xb9eb71f8, 0x8faddafe, 0xd5df83ce,
- 0xf8e76fab, 0x59f7bfe7, 0xdfc629af, 0x5ff8149b, 0xf31f7713, 0xc5de38f7,
- 0x1911c4f3, 0x76a4b9ef, 0x8ff81645, 0x2037ec4e, 0x33589dbd, 0x1057ef19,
- 0x5853d5e3, 0x8f4f94ed, 0xb029953b, 0x15b20bbe, 0xb676ff36, 0xe9ef157f,
- 0xbdf811f8, 0x2d3ee8ce, 0xda0dafe6, 0xc71009a3, 0x7944ed14, 0x5afd3e68,
- 0xddb7bdfc, 0xdd7007e9, 0xa0115d29, 0x6d2fcd2b, 0xbf03b7a8, 0xf71bbf64,
- 0x82092971, 0x8ef8349f, 0x077bf701, 0x9ed019f7, 0x9d977c1e, 0xef711fcf,
- 0x059a359f, 0x7e06dcf7, 0xdae50e39, 0x0b746660, 0x784dbfb3, 0x51e0e4cf,
- 0x5e3cd5ff, 0xd1efc69c, 0xefbb034c, 0x18533ec2, 0xc4fa3eec, 0x6c9043b8,
- 0xfdfbe373, 0x882fbc16, 0x24f9d93e, 0x7f1b8f69, 0xf1748937, 0xfdf823ef,
- 0x09fe5903, 0xc81fa720, 0xe15bfda3, 0xeed893fb, 0x4affae42, 0x9211598c,
- 0xc74bc7ce, 0xd9fd337e, 0xccfa5ef8, 0xa80f7ecd, 0xb047d78b, 0x5ff0603e,
- 0xbdf8f38d, 0x328cf48d, 0x5f7aafcf, 0x49d97de4, 0x6e028eb8, 0x23b4678a,
- 0x93d2f1e3, 0x029ff6cd, 0x3974f11f, 0x80979eb4, 0x7832fc0f, 0xb63ec0bf,
- 0xea26ab8e, 0x9ce93c99, 0xde32f68c, 0x27931430, 0x07df7cb8, 0x3a839748,
- 0x97a0d3ef, 0x847547a2, 0x2d48c3dd, 0x5457f003, 0x16f576b0, 0x7010f7e6,
- 0x07e7e19c, 0x1e593b1a, 0x3ee799e1, 0x856f9ac8, 0xa81ff3f3, 0xff0bcfce,
- 0x0124ed21, 0x8f5633fc, 0xc4575092, 0xc22d76ec, 0x6e8f7ec5, 0xdfc14e8d,
- 0x3e3e0c0b, 0x2712fd11, 0xbc5ecaf4, 0x15d57607, 0x6d27a01a, 0x4fbf9731,
- 0xfb8979b0, 0x6d3dc4dd, 0xc81978f1, 0x99b2fa66, 0x3b82eff8, 0x25138ffe,
- 0xf14b0785, 0xff744abe, 0x52cd206b, 0x7e589af6, 0x9185ea3b, 0x2fea42f7,
- 0xff827bf1, 0xb053e5bb, 0x9f806556, 0xd5e80260, 0x7fb676d3, 0x40225bc4,
- 0x3cd96bbf, 0xb803e03b, 0x4ef882ff, 0x5bc45e83, 0x4e1e2825, 0x9e3eac4b,
- 0x1bbc84ce, 0x1ba00f81, 0xa3be17e8, 0x9d9e7f7d, 0x763ff6c3, 0x17943f27,
- 0xaa5e43d4, 0x490157d0, 0x0f7dc0d6, 0x024a1d27, 0xcec9e5d0, 0xc872e73c,
- 0x292871f7, 0xe076f6b1, 0x0e7bc110, 0x73ff3814, 0x922efc35, 0x05f80a77,
- 0xfed53b77, 0x5e787888, 0xfd401ca8, 0xc6985ff3, 0x800053c5, 0x00008000,
- 0x00088b1f, 0x00000000, 0x7db5ff00, 0xd554780b, 0x733effb5, 0x79332666,
- 0x84841e4e, 0xe4249840, 0x09308401, 0x0741410f, 0x68151048, 0x85280978,
- 0x42100793, 0x17b6881e, 0x240cdb5b, 0x41b45a20, 0x768bd151, 0xb4544140,
- 0x03414141, 0x58a50077, 0x56d56351, 0x880dcb6d, 0xa8311fbc, 0xadadff97,
- 0xfb5bf5ff, 0x90ce649c, 0xf9b7b5a8, 0x67d9d83e, 0x7b5ad7bf, 0x3bdaf5ed,
- 0x3f437cdf, 0x756109d7, 0x10f4422b, 0x116dce22, 0xa3109862, 0x42f382dc,
- 0x11c885d8, 0xca8df3fc, 0x5c3adb89, 0x8df88588, 0xdaf9aaaa, 0x2bc89eb5,
- 0x1f5c2deb, 0x774675e3, 0xb5f1bdf1, 0x28f250ff, 0x084c21b3, 0x5d3dfe87,
- 0x1e310f88, 0x90ea7b8d, 0x460ca7dd, 0x09ac2e1a, 0x75ac5442, 0xa51a45fa,
- 0xbacc4e6f, 0x4285e653, 0x5c6cc775, 0xe34ad899, 0xca2a6f96, 0x1fb29112,
- 0xd0aaf341, 0xea9e55e7, 0x0a8752cd, 0xeaa8f6d1, 0xebf684da, 0x6a8f9e55,
- 0xa51eb8f3, 0xdbd627ef, 0x9fa9cb5c, 0x273e7d0a, 0xc5d16b7a, 0x0ab794b9,
- 0xf280bdab, 0x45da2d56, 0x54782efd, 0xb177f62d, 0x81f7aa3e, 0xf80ba0b5,
- 0x3a894d60, 0x1ea8327c, 0xe39469d6, 0x7268f7bf, 0xc2fcd157, 0x63cc77e9,
- 0xd1f7e2a3, 0xd1f44efc, 0xfc7f21e6, 0x82b13093, 0x45daeaf2, 0x2bdceed1,
- 0xa1c27de1, 0x01727b45, 0x031eb95f, 0x18e3a19e, 0x7808bcf0, 0xbabd20c6,
- 0xabf04bc5, 0xa0f5487d, 0x4be6bece, 0x98df80d1, 0xae47453d, 0x61b5f7c1,
- 0xecda8c22, 0xcefe9c3b, 0x8b8f34ad, 0x3d4155ab, 0x193dd28a, 0xdbc68289,
- 0x2e7cf96e, 0x4dbf73e0, 0xd2917acc, 0x47a7b7d2, 0x46fd285b, 0x21f51fa7,
- 0x6a352709, 0xae7cf47a, 0x98b93edf, 0x9edcc7f2, 0x7aa082c4, 0x2ce9d63f,
- 0x6b655f14, 0x718d16ff, 0x78d8d62a, 0x2e5e065d, 0xbf341d62, 0xfe118d8b,
- 0x8b9704e5, 0x5fb8dbbd, 0xbf464f03, 0x4183c015, 0xa56f544f, 0xe38fa5db,
- 0x114717b3, 0xe1b4d35c, 0x64e82e9e, 0x2e3483c1, 0xe51f5bfa, 0xff7de675,
- 0x74780d71, 0x25969be0, 0x8f03fc0c, 0xbc6da44f, 0xde59be97, 0x93d10ab3,
- 0x7d78d0e0, 0xcbdf42aa, 0x850984f6, 0xda532bfa, 0x9c09288e, 0xac14fb77,
- 0x8c3cf17f, 0x9871f657, 0xf7c7e505, 0xf3c79f5e, 0xc12bc10a, 0x7a2bb529,
- 0x96d698bf, 0xdf8209ea, 0x736d3ec5, 0x7d06be79, 0xe79bce74, 0x4c37bb51,
- 0x4c5c5f60, 0x76467c23, 0xf801b1c2, 0x5df5c619, 0x6f6fa676, 0xb1d7e01e,
- 0x7ac8575e, 0xfdeb215d, 0x0b7d3040, 0x29ebc68f, 0x82cc24ff, 0xdc0fe5ef,
- 0x71a6e594, 0x8e3bdf8e, 0xce3c75d7, 0x75fd879b, 0xcffa953c, 0xe59d72bd,
- 0x733ad00f, 0x8d9d65bf, 0xb757c64e, 0x7b6b8ceb, 0x3acb7c42, 0x6874ea37,
- 0xab33f3ac, 0xfa8ada3b, 0x9cb155bb, 0x14fb6a8a, 0xcc8d7fdf, 0xd0d056bb,
- 0x0f4936b5, 0xfc236bb5, 0x6685d7c0, 0x6e7f04db, 0xf1a01f27, 0x583d27be,
- 0x8f8e1e98, 0x023c3f75, 0xea9f75f4, 0x9701b9f2, 0x9cfcef21, 0x57efc7f2,
- 0xb1f9025d, 0x0fc47e12, 0xc70fe02c, 0x73278b53, 0xb5cdaf7e, 0xb4f9fa82,
- 0x047f6ff1, 0x63dc5cfd, 0x5f5e0b73, 0x90b3fdf1, 0xaf82f9a7, 0xc66e5c11,
- 0xebce54b8, 0xd619ef57, 0xe8f17288, 0xbba65760, 0x0214be0b, 0xaa358ae7,
- 0x01d21f69, 0x3ed5e3c1, 0x3ac6e290, 0x8713380c, 0xa83a9acd, 0x4fc81e13,
- 0x9709d41f, 0x9ad0bb5a, 0xfb12fbf8, 0xfc414194, 0x9749f00c, 0x3cff44e7,
- 0xa2d2c7c9, 0xc873f683, 0x8ff942fc, 0xff48cf51, 0xcc57c1d2, 0x705178fc,
- 0x1950e09e, 0xc21c5f92, 0xca42aa37, 0xb7ca43ab, 0xf99cfaf7, 0x13f8e840,
- 0x31bee4d3, 0xc3aa58e0, 0x9145bb71, 0xc53ee47c, 0x2d7e89bf, 0x4bedbab5,
- 0x8e2ffe69, 0x4fd0f311, 0xae385599, 0xced97de5, 0x8ab52beb, 0x7c02b086,
- 0x633d70f7, 0x435fd4dd, 0xc3e51c58, 0x7aa21f25, 0x7941ec13, 0xf31adf83,
- 0xf0934f09, 0x5bc5d230, 0x83c78d9d, 0x14dd59e0, 0xeed7ca6b, 0xee3c6e4a,
- 0xc62fcce8, 0x6ff4c92f, 0x387ead22, 0xaa7f22e0, 0x8c1eb577, 0xb249fa20,
- 0xd469e168, 0xff83c5ef, 0x66eb9d1b, 0x262f0fbc, 0xe9a816fc, 0xf5a61d2f,
- 0x3d56edb6, 0x9befd09b, 0x5f11fcf2, 0x1b787a5f, 0x9bd9af7d, 0x7775ee21,
- 0x8e143fae, 0x8a5d7717, 0x17efa74e, 0x05c3d90e, 0xcda501c9, 0xd1ed63f1,
- 0x84babfbc, 0xbb875bbb, 0x6b05dfd1, 0xa094f46b, 0x6b44177a, 0x7a20d4f4,
- 0xa975dd29, 0xfc31e44e, 0x1f106a7c, 0x8f4f74ff, 0x2f7d2ab1, 0xb1af7dfb,
- 0xe9c74a16, 0x736fdefd, 0xe686b93f, 0xe61ddbed, 0xbef34cd9, 0x0b2ce6f0,
- 0xb3785fda, 0xea7f4f42, 0x4fb3f4e5, 0xb47e3153, 0x31bbd245, 0x293488de,
- 0x8e59954f, 0xfc1a313e, 0x699ed5ff, 0xe59d69f5, 0x2c829665, 0xad9b79e0,
- 0x7de62e6d, 0x95ab1141, 0xc67eceb8, 0xfdb9b9a5, 0x9f27dd85, 0xf4f87f00,
- 0x60053efe, 0x6776172b, 0x7e5469e7, 0x5921e3e1, 0xc2f7ea2e, 0xf01ec97d,
- 0xf7366d85, 0xeede994f, 0xdf60f289, 0x9c61fd1b, 0xf77fe825, 0xa435c355,
- 0x52f9cb1f, 0xe3934f3e, 0xce8294fa, 0xee2da37e, 0xbf028f5f, 0xb3cdbdf8,
- 0x7fa59299, 0x8b19e8cf, 0x773f61f5, 0xd5b28781, 0xe26dc261, 0xde782b27,
- 0x8763c2a5, 0x1172841c, 0xa2e55f44, 0x6fcfbf98, 0x405f7f34, 0x17df027c,
- 0xaf7f37ae, 0x5aab8d10, 0xfc8b7d69, 0xf682efe6, 0xfb112f8f, 0xe269f8da,
- 0xd6f17ef3, 0xf54cc26f, 0x0fe74f84, 0xa5c7e60e, 0xc7c11fb4, 0xfefc579e,
- 0x3defc015, 0x7433af2a, 0xd2d3cb5d, 0x448f1d78, 0x6b6e63fc, 0x598f5e10,
- 0xe0893584, 0x1c2ecc7a, 0x47e80549, 0xbeab9c08, 0xcdfe613d, 0xfedee4d4,
- 0x3296a55f, 0xe0b24bdf, 0x05c3b83c, 0xedfd47ce, 0x7e8a9939, 0x791756ca,
- 0x77e6835b, 0xe47de6d1, 0xdf5790e7, 0xf54d7e23, 0x37f58d8e, 0x1d79a58e,
- 0xd4f557bb, 0x33af3177, 0xe3f99f24, 0x28f71e09, 0xf2843fc2, 0x2d4c9ccf,
- 0xde9cbc21, 0x34b5327c, 0x3115fe98, 0x8d1e982b, 0xebc26694, 0xf489ac82,
- 0xeb05ad18, 0x89d7ad09, 0x0d7cf35f, 0xb1553a78, 0x787a2ba4, 0xe54f7414,
- 0x05577f39, 0x3d9acee4, 0x33e83f36, 0x0f17e362, 0x6cfa0adf, 0xda40b9b6,
- 0x2f547bb5, 0x5bb174e4, 0x1ce267f5, 0xba34fe6b, 0xeecce712, 0x68352e21,
- 0x170e74dd, 0xb0b97ce3, 0x596252eb, 0x1933f72e, 0x9eed44f2, 0xdfa03c87,
- 0x5cf1e6ee, 0xf1affa9a, 0xff99af3f, 0x20fdcf9d, 0x81f046f1, 0xc0b2278d,
- 0x3e0f89df, 0x99fd8697, 0x791db5f5, 0x83875a21, 0x2c7e68fe, 0x7ff45fbf,
- 0xa3a5f2c5, 0xbbfaf559, 0x349ec3c0, 0x07ffc33b, 0xcd026fcf, 0x7e73b12f,
- 0xe03d9f34, 0x6b3cc6cb, 0x9c766ddc, 0xd11d17b3, 0xb4a7e079, 0x89f989a0,
- 0x06e4f9fa, 0x289b2d7f, 0xee16bf88, 0x09c2bf9e, 0xf151a179, 0x6f280f40,
- 0x2adcf956, 0xddfaa0df, 0xecc29784, 0x213bf6bb, 0xe5443e9f, 0x3abc74d4,
- 0xa8b38fea, 0x54fbc2fe, 0xf8dbefce, 0x80da3fd3, 0x11360dd7, 0x47863c06,
- 0x63c7d26f, 0x3e916879, 0x37cd45bf, 0xb7a0569d, 0x013d51ec, 0xe51cdfb4,
- 0xd5b2f7c1, 0xf4c163b6, 0x50bf1f35, 0x2bd7f37f, 0xf81a40fe, 0xc2fcb984,
- 0x27ce9f30, 0x5c6fbe42, 0x027f7535, 0x89eb2b52, 0x72e6fc8b, 0xf34ebcfc,
- 0xdf7fd5a9, 0x17a6b99a, 0x29c173f0, 0xabf60b44, 0x1df595b5, 0x054d6811,
- 0xfeec2f1e, 0xac78152c, 0xb27ff57d, 0xebf81744, 0x51cd4fd8, 0x84222709,
- 0xc6e74b38, 0xfdd01637, 0x5dc5c9a5, 0x8fb17fd4, 0x6d5b17dc, 0x63905ebe,
- 0x8afc6ff7, 0xddc5c1f9, 0x17575f5a, 0x5fb9dbf4, 0x124b4e7d, 0x205380d2,
- 0xbae4da1d, 0xe484fb11, 0x1f417bc1, 0xdd4d569b, 0x6be77944, 0xa2424836,
- 0xf7923336, 0xfcc7f60b, 0x7d8d7ea0, 0x017c15b2, 0x1fc930e9, 0xe523ec5e,
- 0xbcbad584, 0xfb5f75b0, 0xe62ecce4, 0x26cd3ab3, 0x923edbcf, 0x2b4dcc6f,
- 0x9c874f0e, 0x0345f161, 0x75ebc21f, 0x157c1027, 0x97eabe9d, 0x3df0512d,
- 0xda723449, 0xfa71345b, 0x245a10b8, 0x0b8f0bea, 0x4fa21670, 0x7bc7e02c,
- 0x31abd79e, 0x8e89fcdf, 0xd06af03b, 0xf8562f83, 0xeaefd337, 0x63508746,
- 0x46b5ed20, 0xa0d451a3, 0x4beebfa9, 0x540fb6e5, 0x3ed49fd6, 0x2e3d3e06,
- 0xc8cbf7e0, 0xf8c131e9, 0x74b877db, 0xdeabfa02, 0xf52e7a49, 0x4d877e77,
- 0x5957a004, 0x3d50bf69, 0xec2fe1db, 0xcd4be069, 0x5cbc808a, 0xaf7c794b,
- 0xfaf5651d, 0x9792e083, 0x9a91e12e, 0xbb70c59e, 0x9a1ef560, 0x9573fa08,
- 0xdbd721da, 0xc04f970e, 0x2a4b6379, 0xfb5e63b5, 0xa4758f0e, 0x56b16f01,
- 0x9aab37de, 0x5c5b718d, 0xf62a3fe8, 0x436f9e03, 0x534445ae, 0xc28f86bb,
- 0x874c0cfc, 0x3a6143e4, 0x85021b5c, 0xf817770e, 0x14c289f1, 0x77ca8231,
- 0xcf3013a8, 0x1752aef3, 0x958b68f0, 0x9b308776, 0x9af40277, 0x78b8d8b0,
- 0x7a446fcc, 0xabffcf06, 0xcf3c25d8, 0x80bc48af, 0x7af241be, 0xbd0df231,
- 0x7d8655cb, 0x8fbc23e3, 0xfc721d6b, 0x9f2a3da8, 0x43a462a3, 0xbbefdbe7,
- 0xc89c68ca, 0xd024dabe, 0xa89f3183, 0xf00baf61, 0x858b1ce7, 0x3786b3e5,
- 0x2f80d722, 0xd21be518, 0xf819bd25, 0x75609958, 0xae3c7f81, 0x574ae7d3,
- 0x6820a3b1, 0x161eb69d, 0x0ec55e1d, 0xbdbb69c0, 0x97ce10f8, 0x738efa36,
- 0x54d27f73, 0xaa14f407, 0x273f4167, 0x64c768ea, 0x0ee9dfa3, 0x424177c0,
- 0x5bb478e9, 0xc2ac9628, 0x5eb85b7e, 0x3e208115, 0xe39360db, 0x211cb82e,
- 0xc73a4302, 0xe3f8405d, 0xf9a397f6, 0xd653837b, 0xfb92886b, 0x2974cee7,
- 0xd3d4ae2f, 0x83fa7ab5, 0xccc7baf9, 0xfec7cfd7, 0xe4a94b71, 0x53a677ef,
- 0xeb4ad5bf, 0xab268878, 0x80edda92, 0x973a89ef, 0xcd058408, 0xc6758af7,
- 0x38d0408b, 0x3b2ef51d, 0x183e2045, 0x47c66e1f, 0xb9237ae7, 0x125d44fc,
- 0xfefbb1c3, 0xec704c53, 0x04c33fec, 0xcffb2bc7, 0x7513f2e2, 0xffd1df39,
- 0x4ffa6ec0, 0xb3bfcd33, 0x5dfc7edf, 0x82fad2ec, 0x22eddcbf, 0xd374c07e,
- 0xbb2676ca, 0x7606fda2, 0x9871f12f, 0x95f7dc80, 0x0576cb53, 0xe96d87f1,
- 0x0b9f5d76, 0xfadf80a4, 0x6b4e3ac4, 0x8f1a5d77, 0x0be26ef6, 0xcbd0e76c,
- 0xcf1b456b, 0xfa02fc13, 0xbab648bd, 0x96977cb9, 0x98c8bd0d, 0xf33c8747,
- 0x859f48bb, 0x9ed841f6, 0x1f6347bf, 0x4db6b45f, 0xa9aa7d86, 0xde088d59,
- 0xfdf407d7, 0x17cfb631, 0xd5d7b079, 0xbdf11da0, 0x9c5dee9f, 0x84d4f80d,
- 0xde61e5b5, 0x2dfef7fb, 0x57c8e70f, 0xc5f7167b, 0x9f54adf7, 0x5b7b0fe5,
- 0xbfa8278d, 0xb3e5c27e, 0x9b4ce88c, 0xfa1ffdf5, 0xfa9eeb7d, 0x36c7985e,
- 0x2e483dd8, 0x56f659ac, 0xb078f1c6, 0xe083ef21, 0x16f8825f, 0xa6bb43eb,
- 0xdf2063a9, 0x0f32487d, 0x96ee87e5, 0x3ed0c372, 0x83496fc2, 0x2ce1f7dc,
- 0x619a89f0, 0xe1541776, 0x9616fcf0, 0x3987c3b7, 0x9f9f385c, 0xebc193ed,
- 0xc84e43b3, 0x78a2fb40, 0xf154e7a6, 0x1b6dc9af, 0xe04ff642, 0x68db350f,
- 0x75b56abc, 0xf78538f2, 0xa9f031d1, 0x78bfb79a, 0x135fd747, 0x8c5a9f81,
- 0x5f78f04b, 0x432127ef, 0x9a7a893b, 0xabbef5a3, 0x9b8cdea8, 0x1fa15393,
- 0x99fae895, 0x19404898, 0x1068fcc9, 0x9e5720ec, 0x7a0f52db, 0xc034d14d,
- 0xf0ec97c1, 0x981afce0, 0x71c91c22, 0xf4fed0d8, 0x57ee34a9, 0x2ec316e9,
- 0x03de60aa, 0xdf3d35f4, 0x2c6969ab, 0x38fe939f, 0x3b3fd1d0, 0xfbc325ae,
- 0xa30bf077, 0xdf3787fe, 0xe2eb483a, 0x79e430dc, 0x36e9bdbb, 0x3aadb3cc,
- 0x9847f592, 0xc9eed0f3, 0x2a02c39b, 0xc8b68aec, 0x3fe7d0d5, 0xb07b988a,
- 0xfee3c107, 0x83222d28, 0x799acdda, 0x95e42ac3, 0xa5eef1fd, 0xd692e7a0,
- 0xd3e7c0e1, 0x83262d4e, 0x6872afc6, 0x8969137d, 0xd968fbe7, 0x8be43831,
- 0x449e25e3, 0x2ee2a0f1, 0x53e72fe7, 0x0ff1c8a2, 0xe8496947, 0xf2d2c4a7,
- 0x66f855c4, 0x837b616d, 0xfdc1e868, 0x8a8bb79c, 0x037fe4ae, 0x75f4470e,
- 0x6b89b3ec, 0xf6fad32f, 0xd4f18dad, 0xefcb7cd3, 0x96b7399e, 0xe296f9d2,
- 0x9a6a80c4, 0x55d61677, 0x9be92be5, 0x79f3f24c, 0x65ef3e9b, 0xaeadbcfa,
- 0x1cfde0ac, 0x437aef3a, 0xc285f95e, 0x50ff20fa, 0xac5f15fe, 0x87ca5eff,
- 0x307d2e59, 0x20d6e70d, 0x3649db0b, 0x22ded384, 0x7d5df23d, 0xbed38f9e,
- 0xf7f1c473, 0x149ef8d5, 0x3d27db2e, 0x7ec3f16c, 0x37763c50, 0x9ac9e9e3,
- 0x634a8f48, 0x1db70895, 0xf0995072, 0xd9fc5cb2, 0x69edbf98, 0x661575a5,
- 0x6e01d768, 0xa92de2f3, 0xb43cf01f, 0x6f205381, 0xf8eb928d, 0xd7937916,
- 0x6cb85531, 0x3589942c, 0x531fc816, 0xf329725d, 0xa63fb915, 0xe30daf09,
- 0xd71a10cd, 0xbb05c3fb, 0x1fc4fa17, 0xbcd10ba0, 0x7f1f0a67, 0x9cfc2d9f,
- 0x5c022ade, 0x95a2c7ff, 0x89bc06f3, 0x4825e63e, 0x53d938fa, 0x8cdff865,
- 0x9117bf79, 0xad1967d7, 0xd923ca63, 0xad5fac99, 0xf9f0c1eb, 0xd14f79ae,
- 0xeff686e9, 0xfd83f43c, 0x99fa9d3b, 0xfec8637a, 0xe061ec2d, 0xf6dbf333,
- 0x877b4b51, 0x4c9914fe, 0x475eb870, 0xc5def5e9, 0xa9153fac, 0xf847c291,
- 0xfaa51969, 0xf21c7721, 0xa0484f2e, 0xb9fddc75, 0xb77e83d4, 0x843dbc5c,
- 0x46377fca, 0xd2dcf515, 0x64d4f2cb, 0xbdc99fbd, 0x9d869fc7, 0x2408b0f5,
- 0x17b9a50c, 0x8f5ed65d, 0x564ef9a1, 0x52c435b7, 0xa4de84f9, 0xbdf104c7,
- 0x5bb6121b, 0xd8ddd7a1, 0x627dfccf, 0x99df59ae, 0xc0551a24, 0x7350f279,
- 0x0f807dbd, 0x0c8ad7d1, 0x0188b7a9, 0xdfee8062, 0x47fbb963, 0x13ed1f7a,
- 0x0be1d92f, 0xbb433579, 0x3a5c7007, 0x18b9b3e9, 0x7ae55857, 0xd951de6a,
- 0xe48c779e, 0xdf9d2f2c, 0x84e8ebc7, 0x08f29308, 0x6e48a7f2, 0xcea67a40,
- 0x3a39e9cd, 0xa17e51e7, 0x67eb23ce, 0x7d803f14, 0x73faba56, 0xb772f02b,
- 0x6ef9c253, 0x761faa67, 0xc6a68ab5, 0xe55e98f1, 0x4883a6ae, 0xe3c503cb,
- 0x382ecc71, 0xd220e4b4, 0xb6f43bbb, 0x5fdd0e25, 0x5dbff3c0, 0xa3dbd3a7,
- 0x8834dea6, 0xbbedc9f6, 0xf6ebdfa5, 0x4da7533e, 0xc6de404a, 0xa8e8eef9,
- 0x3422b2fa, 0xa8cf22da, 0xb758cdde, 0xc1e8f9b7, 0xc9e75a78, 0xff3c11ba,
- 0x9e5e75d3, 0x09bad9ff, 0xa33dfe9c, 0x27ae39f8, 0x6c79673e, 0xd6a3b6d2,
- 0xe45faa14, 0xb7d8bc55, 0xdf857ad7, 0xdeb1744f, 0xf581cf26, 0x1f600e74,
- 0xf534198f, 0xe9303bf1, 0xda276d0c, 0x5171b8cf, 0x3c99c611, 0x7a8ac4a2,
- 0x2216bf33, 0x4e93ad3e, 0x8dee8bbf, 0xf8eb57f5, 0x6157bf3b, 0x1bdf9e78,
- 0xdf6cf0cb, 0xf9031191, 0x8ccee960, 0x6f89489e, 0x046f7a1a, 0xd97ed7e0,
- 0xff6df3c2, 0x8ef59c28, 0x774bf6d0, 0x7a89ec0e, 0x22df0967, 0xfb3cde48,
- 0x83b87ffc, 0x71b395cf, 0x027d5f4d, 0xc957f6fe, 0x39b248df, 0x5ff818f1,
- 0xd01e5eef, 0xce5d05cb, 0x02233d26, 0x6e8d5fe7, 0x427f0457, 0xb4b86cfe,
- 0x217f0ee7, 0x17f62c65, 0x9c1373c6, 0xb9191cc3, 0x7f7087c7, 0xfd467e47,
- 0x9258fcbd, 0xb9effa07, 0x63a48fed, 0xf34dadfc, 0xfa8c793f, 0xf8b7ef3f,
- 0x9d4e746c, 0x5e174d17, 0x2a7e1f05, 0x777bed92, 0x9c2f342b, 0x717b9bc3,
- 0x86b1473e, 0x2c39c32f, 0x73cfbcfc, 0x1975b714, 0x17aaadf1, 0x7a05ed1f,
- 0x30608d7c, 0x5b5fca82, 0xed7cc11b, 0xc8e79b35, 0xb8f7f432, 0xfbe9efec,
- 0x0bb63cef, 0x05d57de9, 0x65fa5277, 0xa1e31c46, 0x034581fc, 0x79ce9ed0,
- 0x3dfd4c91, 0x80128af7, 0xbad06abf, 0x69922b7d, 0x1c37837e, 0x34a2f4b5,
- 0x1ea529ff, 0x373ec0e9, 0xf9fd821e, 0x6e6e196f, 0x9a5b643f, 0xd96d829a,
- 0x75c9ffcb, 0xe59bfe7c, 0x9f75dc82, 0x3a25ed8f, 0x4a0183e5, 0x348d99d3,
- 0x8fd4b0d7, 0x6386340a, 0x6767af23, 0x6ff7b4f3, 0x49075815, 0x8977981e,
- 0x91e7c3dd, 0x7811b7df, 0x78e5af3e, 0xf62bbf58, 0x3b7a0a3d, 0x707a25af,
- 0xfa4cbd3e, 0x7fa8d5fc, 0x7cebcac4, 0xa2f7a9e5, 0x2189df61, 0xbf09d93d,
- 0x78db7e48, 0xe3eeafd4, 0xf587f016, 0x09fae279, 0x8ecd4f80, 0x9fb91b9d,
- 0xfa07872a, 0x4bcec56e, 0x2f205307, 0x993c20de, 0x321dc7a0, 0xaf42abff,
- 0x56a5a547, 0xe652fc6c, 0x5f398a5b, 0xfc27da4c, 0xca4ca2fe, 0xe7e88f49,
- 0x63cd68a0, 0xee24c97e, 0xafd7c4ee, 0xebe518a8, 0xb0c97cd3, 0x27e71b6f,
- 0xfda97b9d, 0xb2ec65b0, 0x5bd8cb7d, 0xdf2e1953, 0xebb3dcf3, 0xdfd197e7,
- 0x73e17623, 0xdf303f52, 0xc1be9ec0, 0xcdebcf2e, 0x2f4067c4, 0xe34d79c6,
- 0xed93f5e6, 0x29afe0bd, 0x076832aa, 0x91e523c1, 0xbe379630, 0x62b3f8ef,
- 0x6df9f156, 0xe1b55bc8, 0xd9f22f92, 0x885fed41, 0x37eeaed2, 0x7932760c,
- 0x3bab0bde, 0xe3c8f217, 0xfa6bde51, 0x96d5bcb1, 0x39d0358a, 0x53e61d7a,
- 0x95fc92a3, 0x457f2411, 0x73af7dc6, 0x2de51034, 0x5beffb42, 0xcd046c7f,
- 0xf1290633, 0x86a468ef, 0x485a45de, 0x7d39fc97, 0xf3f8135b, 0x3f926143,
- 0xbe610d47, 0x7433ac64, 0x07c4039c, 0x6f3be234, 0x11a339d0, 0x3917183f,
- 0x7d78cac6, 0x907552dd, 0x3922e5ce, 0x1ada0802, 0xdef3943b, 0x61638228,
- 0x2eb42aa4, 0x252347f4, 0x3ac7cdbc, 0x984427c7, 0xc42515ea, 0xbf8fb1da,
- 0x1d55faef, 0x3cb86a4f, 0x8370d4bf, 0x7413babf, 0xfac87dbf, 0x776f7ae5,
- 0x3abe630e, 0xeb7ef956, 0xdf15b22e, 0x8b8f3c09, 0x7373e7cc, 0x2394e45c,
- 0xd475e09a, 0x7ae87be1, 0x1c25d743, 0xfa8e4fdf, 0x167c2289, 0xedcfc7ec,
- 0xb66b776c, 0x5bf73eb5, 0xa2f36b82, 0x47ec0f5c, 0x70bed32f, 0xb54e9f80,
- 0x0cdb9cb3, 0x37ea92d9, 0x5c7170f7, 0xdfba6bcb, 0x6f5b37dc, 0xbe7c14ef,
- 0xc5eecf34, 0x615ef47e, 0x48ec1034, 0xd26fd2f5, 0x16f25b6c, 0xe5e6ebce,
- 0x819a1e29, 0x69efc150, 0x97e47ccb, 0x095ebcfc, 0x817f0adc, 0x40bb73ef,
- 0xfeb175fd, 0x5f94e9d5, 0x9e0c8bf7, 0x8fa447cf, 0x8abc62d7, 0xeb5be670,
- 0x71c9bd62, 0xff979f04, 0xb279f2a1, 0x91f29f2e, 0x6ed9d7cc, 0x378a6eb3,
- 0x04ad7e19, 0x36eb6f1d, 0xb05ef2ca, 0x4d3ca293, 0x26c4ffb6, 0xd7a2dfeb,
- 0xfbf1d04b, 0xb4a74eb1, 0x683f99ff, 0xa0ac2393, 0x95fb77ff, 0x46fbc6be,
- 0x17e19378, 0x933cac7a, 0x54b7986d, 0xf4c690de, 0x66d3dba4, 0xe8dcfce9,
- 0xc31c0d17, 0x9fcb4f21, 0x3b37f3d9, 0x2a33df41, 0x1f21479c, 0x22b1547e,
- 0x0cfd7a0a, 0x21840d4d, 0x917a6abe, 0x4bae43b7, 0x395eb8c7, 0x1fa1b96e,
- 0xdf00ca06, 0x710e6dbd, 0x81ffcb2f, 0x96fac121, 0xfa45e781, 0x1a6eadbe,
- 0x7eb060f3, 0xfe27695d, 0x72e51833, 0x6a45573f, 0xd57d07bf, 0xc1e1baee,
- 0x54fbbd60, 0xbc7943f9, 0xcf71f4ef, 0x3c1f79d5, 0x6f39099e, 0xb8c6a702,
- 0xf011fa1d, 0xd5f6fd49, 0xefda73cf, 0xdaf00b3e, 0x92fa94e4, 0x85e3edb5,
- 0x359faf94, 0x09f7cf80, 0xd85e1d53, 0x8477daf5, 0xadaf61f9, 0xc5ee159f,
- 0x44f7eaba, 0x985761f9, 0xd21befa7, 0x6fbbf8bb, 0xade98dbb, 0x667feeca,
- 0xdaabb50b, 0x89e99fdb, 0x96ed95e8, 0x642357be, 0x6d37b479, 0xf79450e5,
- 0xcd79fa64, 0xf2e18ee6, 0xe5c15537, 0x58ac81e1, 0x539b7782, 0x1375853b,
- 0xbcfdb2bd, 0x1fbf2eca, 0xfa12e9d1, 0x96fffb25, 0x42bf7144, 0x8f3f6bcf,
- 0x584f7d67, 0x91f3c9bd, 0xaf207dbf, 0x7dcbb347, 0x7ea46a21, 0xe725c478,
- 0x9e39a475, 0x8f4185f7, 0xdcb78a47, 0xef809593, 0xfcd2af24, 0x0d4cb2fe,
- 0xd06e1fc0, 0xf2f5329f, 0xbb5157db, 0xd9555f4f, 0xd7e7336f, 0xfbe30f8b,
- 0x13d5159c, 0x6bbcad39, 0xf5cb9f32, 0xa2eea71f, 0xa7a984f2, 0xe06bbfd8,
- 0xd9954e7f, 0xe9989a9f, 0xdf813355, 0x83f6ff5c, 0xd9e1ff54, 0xaf001627,
- 0x473533c3, 0xf6dbec26, 0x4cff97d6, 0xfe5d6df3, 0x36a3d627, 0xf8116b44,
- 0xd3f7717e, 0xc566bf5c, 0x7e802cff, 0x74956350, 0xb5db9c63, 0x91bf3d3a,
- 0xffa2c5a9, 0x63bf15aa, 0xb722223d, 0xbbd7f245, 0x3a3f1fbe, 0x65f9bcd3,
- 0x0e2dda85, 0x54e1ae6f, 0xfce1e7f5, 0xa204ab3b, 0x2b860c95, 0x5ce775eb,
- 0x47e06881, 0x72b948b3, 0xbf10d6f1, 0xa8925031, 0xdda88bdb, 0x8f5526e2,
- 0x41dbf457, 0x17f5a1a7, 0xf7ae77d3, 0x8a57bf3d, 0x763df926, 0x30d7fdd7,
- 0xfe0045fe, 0x64cf33ae, 0x9c3d026e, 0x5674f9df, 0x34369fb0, 0x72be72bd,
- 0x7df4e7fd, 0xd8be8e73, 0xc2db6f40, 0x0b85f64c, 0xf4eb24cf, 0xc2e9e926,
- 0xe7ecd933, 0xd9f7d44e, 0x0967c505, 0x085fbb47, 0x9e10678e, 0x971ce1a6,
- 0x0e1a6de3, 0x9d61fbb9, 0x6f8e2e2f, 0x67be0e1a, 0x68e141f0, 0x41bb63ca,
- 0xf41e534e, 0x8a7f8364, 0xfe2ca6e2, 0x42ca6fee, 0xd22d97fb, 0xa7cc630e,
- 0x8e14b9f2, 0xf0b5a27b, 0x6e7caefe, 0x8de426e0, 0x929dea71, 0x253bd4ef,
- 0xb75d097f, 0xe8ed2996, 0x1ee64fa4, 0x3ea4b3a7, 0x5c82f496, 0xde6593e4,
- 0x370bf329, 0x6fe1d894, 0xbcc9b7af, 0xfd897d6b, 0x35f7e618, 0x4fc4b569,
- 0xc5bf7473, 0xdda97f02, 0xfafefb0b, 0x3f1ce1e5, 0xcf3edb31, 0x7af6694b,
- 0x035c69bb, 0xe6fdfd0f, 0xf9f48bef, 0x0b890df4, 0xedb6a5ea, 0xf639c30b,
- 0x8e32e8a4, 0xb4a64960, 0x6cfb3ee3, 0xaa917c47, 0x6dc2dbeb, 0xea5ba1d0,
- 0x8fd78c1b, 0xef5e06ed, 0x2ff9a148, 0x5ea5e3d6, 0xfab848b6, 0xe37c959e,
- 0x052d7ce5, 0xc005d5e7, 0x2c5ce71d, 0xef2173e5, 0x7919bc49, 0xf0a15f03,
- 0x82f1f15d, 0x9f95d814, 0xc7f694c8, 0x8f2c7c50, 0xe513de1a, 0xd4cf5c06,
- 0xdc00ca1d, 0xa89d758d, 0x68e97af3, 0xafbfb4eb, 0x8c6877cf, 0x30477c8f,
- 0x1d6fd77d, 0x20248872, 0xd5ab6e71, 0x5bd42583, 0xbf43f7fd, 0x36aadfa8,
- 0x3b9704f9, 0x91739780, 0xbd41dffb, 0x0b621b05, 0xed180fa0, 0xf22fae17,
- 0x7bcd0796, 0x5e3356e0, 0xf3993506, 0xf3997783, 0x0d06715b, 0xe5dbe7fb,
- 0x21f9cc07, 0x93f9cc87, 0x3371dc1f, 0xea854fb6, 0xbe855fbb, 0x87f1e7f3,
- 0x7a9f4e70, 0xe06895f9, 0xcb5ff16b, 0x0b8f420f, 0x3c21678d, 0x7c73d02a,
- 0x97a6145b, 0xba5ae22b, 0xdc4643e1, 0x30627606, 0xdf0a54dd, 0x95da6b41,
- 0xa68acff5, 0x6744bd9b, 0xcfdecb55, 0xecb37a90, 0x5ad36477, 0xd0cee43b,
- 0x3be3bb71, 0xc6ebd222, 0x21192a3e, 0x365eaecb, 0xfe8a7c4e, 0x1e37bcd6,
- 0x2bf258f4, 0x929be585, 0x075fb297, 0x3edd1cdd, 0x6f4e46fb, 0x30f0c09e,
- 0x9b0b92ec, 0xe5144095, 0x8744235d, 0x8bf11f7f, 0xf1210da5, 0xe2426b9f,
- 0xcb99d682, 0xf7daee17, 0x504710ff, 0xe38278e1, 0x678cae2e, 0x5e801ff4,
- 0xb04a7959, 0xa9f2aa7d, 0xe5ccc26f, 0xf5f35d72, 0xade71875, 0xf1cb5df0,
- 0xaacb577b, 0xa3fe38e2, 0x4ba8e973, 0xcb73853b, 0x5eaf7738, 0xf29551b7,
- 0xfd84b7bc, 0x8e7691c8, 0x8876ac8b, 0xebcd0440, 0xebdd9450, 0xd697efa2,
- 0xb2bcb147, 0xe85ce480, 0x270a948b, 0x627a4dc7, 0xbbc4eed0, 0xd565cb2a,
- 0xd51e12f1, 0xb0fc51bf, 0xa8c3571d, 0x3293fc8b, 0xd80448bf, 0xf23a0bbe,
- 0xf803ad06, 0x468bce0b, 0x59565fa9, 0x9f00fb03, 0xab68738e, 0xad155eb9,
- 0xc838a3f3, 0x35efe67b, 0xd7eb8da2, 0x2c26faee, 0x3d907bf8, 0x435ef59a,
- 0xf97556ff, 0x3865fd4a, 0x20744fcb, 0x6cbe8384, 0xeebedcba, 0x6823921e,
- 0x7b0d2b7d, 0xdf295af8, 0x9e51b2dd, 0x679f2463, 0xd6eb4e01, 0xef867c92,
- 0xce206fa4, 0xf70a2c44, 0xea5ef94b, 0x6766eb0b, 0xf2f45cb1, 0x400a5e3f,
- 0x487b691f, 0xbad2bb61, 0xbaefd97c, 0x3973fafb, 0xe3c2a7f8, 0xb61cb184,
- 0xde518a6e, 0x4f91f587, 0xc864c530, 0x1ef7faf9, 0x4bdefe6e, 0x3ec166f5,
- 0xb2741e22, 0xfb90c47d, 0x2ebeb90d, 0x1ac9cb56, 0x75feb9d6, 0xf0516db1,
- 0xf88a89bb, 0x6cb725ef, 0x36e3cfd6, 0x2afc3f5a, 0x83e295c7, 0xf5198ff0,
- 0x73c96646, 0xef58f865, 0xfb1ebd16, 0x532dca5f, 0xe68ebdee, 0xc5b0fe55,
- 0x81a17b0d, 0x66f5e107, 0xd7c70e01, 0x77400b3a, 0x7b4ac7c3, 0xc97c91d9,
- 0x0a8623e7, 0xab1a378e, 0xcb363bf7, 0xc2934a62, 0x9fa91e89, 0x1ed245e3,
- 0x98b4351f, 0x132496ed, 0x6e2aebcc, 0x4fdeb265, 0xf7f8bdb9, 0x4f98bba3,
- 0x4a145d07, 0x6df8fbf9, 0x2f3926ca, 0x0e27c72f, 0xf6c9c07f, 0x39fae3ce,
- 0xe881e064, 0x0fcb1868, 0x0d8b5f3a, 0xa0b59ba1, 0xa0879c07, 0x6e982507,
- 0x8b7174cc, 0xee2be122, 0xbaf7f293, 0xe8379958, 0xf13ff482, 0x229ba0bf,
- 0x4eeff0c1, 0xdbd6340f, 0x74481e9d, 0xd07408b0, 0x0e842f51, 0xec3fd584,
- 0xa542fcdb, 0x2b876223, 0xab148284, 0x17ce21f0, 0xebc32467, 0xb11fab1e,
- 0x12adbd78, 0x92ac7d2f, 0xf1b4e8ee, 0x7d701785, 0x4b7de22f, 0xbc285c1e,
- 0x4524bf54, 0x1fad7db0, 0x713155c9, 0x45204edc, 0xbb0a8e58, 0x5f68a513,
- 0x2ffb2312, 0xfe3829e9, 0x5af3fa8e, 0x36e674e2, 0xc15efe1c, 0x47da0572,
- 0x9f7b7017, 0x5af7b645, 0x219bed29, 0x95b698f5, 0x5cbf1c61, 0x2c9f7772,
- 0xb1c4c9d0, 0xa2393a66, 0xde9195d3, 0x8dd6ed94, 0xf0249e75, 0xfe86d283,
- 0x3bd4de7c, 0x0277ab8b, 0xb047cf9f, 0xeab259fc, 0x618693c7, 0x7bc42f7e,
- 0x9333c280, 0xa83fb3b7, 0x4ff661b4, 0xb4be843e, 0x2bdfc9d5, 0x59f5b5b2,
- 0xf715ff90, 0xa1f49ddc, 0x7b01c674, 0x9ad7b512, 0x957f9ca6, 0xedbec9d2,
- 0x989e40ef, 0xfae55d3c, 0x3fb48593, 0xce39068b, 0xd6e7df24, 0xd26fea24,
- 0x69c38528, 0xa95d3a8e, 0xa40fc1b7, 0x2ca9f7e3, 0xe47f9745, 0x5592813d,
- 0x73d2b27d, 0xc9213fe1, 0xfd390bb8, 0x18fea353, 0xa5e40365, 0xfee4e3be,
- 0x1fa3ea33, 0x4b3a7bf4, 0xd5bf55ca, 0x09e462e1, 0x7cbf5cae, 0x8cd1d875,
- 0x40b6a468, 0xfde9a536, 0x7584bec7, 0xe3174c4e, 0x77ee03d7, 0xcf1362aa,
- 0xf984adb7, 0x1b965442, 0x4291abf4, 0xd6e3facd, 0x8f27ccfe, 0x7a3b698e,
- 0x6e8fe537, 0x8ced241e, 0xbe617dba, 0x707cb04b, 0x77e48c74, 0x98239b6d,
- 0x5ab88ffe, 0x3da4e2a3, 0x650ded8e, 0xb67c493a, 0xf87efeac, 0xed22cbb9,
- 0xcc7e7147, 0xc71a687f, 0x53ad9427, 0x07b86103, 0x6f6cc6b0, 0xded48aa8,
- 0xb5087ec6, 0x07da841b, 0xa3a03340, 0xd30a35f2, 0x8f7adf61, 0x9cf9993d,
- 0xcafac2e2, 0x70fd0323, 0x9c7b940e, 0x857cc71c, 0x826be0fd, 0xb9d870fb,
- 0xb5890705, 0x009f6b67, 0x258d254e, 0xe446efac, 0xfabfcc6a, 0xec1704aa,
- 0x26fe9d17, 0xbe0b5ec2, 0xc64d6d64, 0x9655fcbe, 0x3130f19b, 0xc91595ce,
- 0xfbe4ed74, 0xae6d4711, 0xe8efcc2c, 0xe705a74a, 0x9215c4ed, 0x39df69be,
- 0x1ee18f70, 0x3f0708ab, 0xfb259ad8, 0x5adcb39d, 0xbdf291bc, 0x151ac21d,
- 0x67cafa07, 0x5fd85efa, 0x75e62dfb, 0x046217ea, 0x35b289e4, 0x74bce56b,
- 0x96d610fa, 0xe71a78c2, 0x02b4e6d4, 0xe43a01d8, 0xdb63fedb, 0xba0ec701,
- 0x3d30ae98, 0xc10e7ad0, 0xbfde9c8e, 0xe9ca32f4, 0xcbd03efb, 0x7e81b1c4,
- 0x522f1b88, 0xb6be38ab, 0xe078493d, 0x6d176c77, 0x93b48f2c, 0x67be023c,
- 0x9f8319f0, 0x1d785d10, 0xdef89d89, 0x22534752, 0xceb5be1e, 0x75b3e9c1,
- 0x7825612e, 0xe24bab6f, 0x6e750bfc, 0x72ea9b4e, 0x758bfbf8, 0xa8efce1e,
- 0x97f9c11b, 0xdf9cbceb, 0xd3813755, 0xf98aeb57, 0x08afc1bb, 0x0c97cc3e,
- 0x27f063be, 0x3e0cbe83, 0x73574a15, 0x17b8cc09, 0x1f99ed41, 0xeb107c1a,
- 0xf27414b0, 0xe9cf0327, 0x092fb14a, 0xceb450fc, 0x5228ff06, 0x0aff3dcf,
- 0x22bdb9c1, 0xed22ed92, 0xf6c1f242, 0x079f0748, 0x0bb176a9, 0xdbfa89f6,
- 0x0eef47bc, 0xad6f873a, 0x88358450, 0xc4bc7f8e, 0x7441ac29, 0xac596f3c,
- 0xd9673f29, 0xa4f46ffb, 0xd18ddffe, 0x1e69b372, 0xe2797e47, 0xf7d13ff1,
- 0x4cf2fcdf, 0xf34ef026, 0xc608fcd8, 0x9f5c2bbd, 0xd79cab47, 0x9796765d,
- 0x7d4b86fa, 0xd4b86fab, 0x08e3f0b7, 0xa23dcf8a, 0x2d24a3e3, 0xb1a79df6,
- 0x9baeec6f, 0xb44ec047, 0xfa4bde52, 0xf2a3f804, 0x2bf8e8bf, 0x70c6165d,
- 0xf7d06d2d, 0x36dc706b, 0xb258f5c1, 0xf14ef960, 0xd7bf010a, 0x03d85fb1,
- 0x63882b24, 0x40afc38a, 0x6be52276, 0xab98ec0e, 0xe411f77f, 0x21d96576,
- 0x4735b396, 0x087fc724, 0x705ced9f, 0x83b2ca7f, 0xf6b44f5c, 0x466fc536,
- 0xc1daf50c, 0xc0e34037, 0x7ece695f, 0x1a2efd81, 0x547fbe15, 0x5f2f7fae,
- 0xa2e298ad, 0xf2ab6467, 0x959645de, 0x4259f3d3, 0xf91a473f, 0x96e832bc,
- 0x61c708f8, 0xdbe63796, 0xe7f10417, 0x597bfd52, 0xa714c58e, 0x9d967f79,
- 0xcd8f39fb, 0x9bdefd49, 0xcf9f6e93, 0xf7cdcffc, 0xa5fa8c05, 0x60ab0539,
- 0x835d2e4f, 0xf6cea6a8, 0x7d56d925, 0x7fa92e5e, 0x8b5ce839, 0x2ea2f8e2,
- 0x9e4307cf, 0x475f8539, 0xef59fcf2, 0xb9e17caa, 0xac8fe017, 0x9f1873fc,
- 0x75839553, 0x7fae4afb, 0x839634f8, 0x70ca91bc, 0xff12e1c8, 0x35fdcf85,
- 0xb55d711a, 0x97bda43b, 0x6b48a3d2, 0xfb60aaee, 0xf6878d5e, 0xadf81c49,
- 0xfaffefd9, 0xfbe67f0b, 0x14b0849d, 0xe3986e3d, 0x7c766ad4, 0xb06bf1ca,
- 0x7286f9c7, 0x1d5f1c7f, 0xf956beda, 0x4257f59a, 0x68a7c32c, 0x3c8997cf,
- 0x12979d8f, 0xf76ae7c1, 0x2aec4cac, 0x1fb4288e, 0xb0c1ce23, 0x358ad5ff,
- 0x59cfc39f, 0x9384d77b, 0xff65de92, 0xca1eeda1, 0x31384a8e, 0xeec62050,
- 0x5893b62a, 0x2c9fc705, 0xed38edd5, 0x63f64cfe, 0xd7772777, 0x2446a7ae,
- 0xdd5bb592, 0xbeb38831, 0xae84c428, 0xb649f4f0, 0xd3cf6b94, 0xfb3f823e,
- 0xe59536e2, 0xa2db8bca, 0x6cd43f78, 0xe220f07b, 0x529e6b04, 0x2d58edd8,
- 0xafce385a, 0x1c2a8766, 0xec1de7c1, 0xcfddf147, 0x5173f173, 0x8b5f404b,
- 0x66ef149a, 0xc3bb4f9a, 0x6823d31d, 0xbe80af3e, 0x87173488, 0xff470d35,
- 0x079ffcc6, 0x37920b7f, 0xc83f3517, 0xef548df1, 0xbf37cb02, 0x3c38e08d,
- 0xc1dec93a, 0x186ecf3c, 0x45f5fec2, 0x586bdb4d, 0xe116d83e, 0x8ce9e982,
- 0xe40f7ccd, 0x81de3e80, 0x9c8f296c, 0x67d1edd5, 0x1f59505f, 0x2fb13cc2,
- 0xb871e8cf, 0xf537bb46, 0x4b666f76, 0x5b26a7f9, 0xfea34fee, 0xd1a3a6a2,
- 0xc9fea8fd, 0xfa73ef5d, 0x166fbebe, 0x3df0b645, 0x223b93cf, 0xc9e09ef8,
- 0xfe39ff1d, 0x7cef1a35, 0x250498d3, 0xc89d9f68, 0xba7dc9b6, 0xf32fc641,
- 0x13d61725, 0x673d789a, 0xac28a5b7, 0xa4de032f, 0xf1dd9fb7, 0x3643df12,
- 0x1c1febe4, 0xb14b382f, 0x18a3dc86, 0x8d72083d, 0xd7bf55a3, 0x63dba6d1,
- 0xaffa6ffe, 0x5927bfd1, 0xe471f58a, 0xc4610d3d, 0xbd64b86f, 0x27b73522,
- 0xc1dcddf6, 0x97b899ee, 0x7192de2d, 0xfbb7a297, 0x737fe811, 0xbb678ddf,
- 0xbb9b52cb, 0x1c5ff227, 0xe0958526, 0xce887733, 0x875a9257, 0x347bbec1,
- 0x8615ca4d, 0xb56aad3f, 0xbf04bc22, 0x3e67445e, 0xd0aafde4, 0x49c90760,
- 0x563d7c67, 0x71daf161, 0x99d9463e, 0x3bb7ea2e, 0x75bec137, 0xec7c2ec4,
- 0x29d17b7f, 0xd2e927fd, 0xf618f20f, 0x7155a517, 0xd878ac81, 0xa9ba74f1,
- 0x41e54a9a, 0x29b553f4, 0xea390590, 0x2bda5d9c, 0xa9fd0e51, 0xabd640aa,
- 0xfd14e9d7, 0x69a3dfa0, 0xf72c0bae, 0xd0128cee, 0x44af2c3f, 0xd3c8bb49,
- 0x41614dce, 0x1cbf76e7, 0xc777118b, 0x66df4afe, 0x0bcc3f39, 0x7b22fcf2,
- 0x5de41663, 0xd145dc63, 0x39de3c8d, 0xe145bc7d, 0xa39fd28d, 0x3ea211c7,
- 0xec136f8f, 0x267b6a77, 0x33e6e58c, 0x632f2dc9, 0xf4cf33fc, 0xa387b8e7,
- 0xbb05fe69, 0xf1c9dcb8, 0x86cf95e7, 0xd3df3005, 0x5db144cf, 0xca4db4eb,
- 0x12a3a763, 0x143b1e59, 0xf65cf0f5, 0xa6153d63, 0x5a742ec0, 0x9424f611,
- 0xf2c1ee6f, 0x32dd6540, 0x1254d7f6, 0x4b1c817b, 0x7fc804e9, 0x67f737a7,
- 0x37e9d17b, 0x415cbe55, 0xbbe357df, 0xfee71a26, 0x0880f58c, 0x799e23b6,
- 0x83fd6ff6, 0x5a463bfd, 0x3ed994f9, 0x5183d066, 0xf923945e, 0xd48e5160,
- 0x71c86e53, 0xd11d5a71, 0xaf3a70f4, 0xc207f253, 0xb481eb03, 0xef61131f,
- 0x3bb4d23f, 0x9c048951, 0x09ff5903, 0xfa956fb8, 0x366d99d6, 0xbe5553f6,
- 0x3b65e512, 0xf157ddd4, 0x554ac97d, 0x1695e3ca, 0xf950df79, 0xffb656e5,
- 0x2ce33fd4, 0xde083c00, 0x6db6dfac, 0xe471f556, 0xf4b7a7b8, 0x808d7e91,
- 0x10797f9e, 0xc62635fb, 0x59537989, 0x0790dace, 0x2a7a5eb8, 0x69fee382,
- 0x34e22f1c, 0x73f67f2a, 0x0598cf09, 0xc1d027ac, 0x4ab9fccd, 0x77d5cfe7,
- 0xfb83135f, 0x955de210, 0xb25c79ef, 0xff827a7a, 0xd1ebc286, 0xbf3f9d1c,
- 0x83f338fc, 0x8837da2a, 0xfbac5b0f, 0xab3f1713, 0x78b95cb0, 0x308f35d5,
- 0x26c77ddf, 0x5ce22f70, 0xc1d183d3, 0x91cfa8f8, 0xbdb3b0fc, 0xa62feb87,
- 0x5bfdb2fa, 0x2d9f3cd1, 0x13e60f36, 0xddefc78a, 0x07f559fe, 0x226cafb5,
- 0xcaef22f9, 0x2d125ff5, 0xf833fd3f, 0xfb2736cb, 0x7cff0951, 0xadacea3f,
- 0x7fd60169, 0xf32779f6, 0x72b1b39d, 0xf3a49fdf, 0xf98d97d4, 0xb66bf4dc,
- 0x9a61fccb, 0xd0b439c0, 0x98d7ee6f, 0x9b1cead7, 0x4f78b7a4, 0xd544fc0c,
- 0x47f40f84, 0xb711f9f8, 0xedc6d77f, 0x40b3d743, 0x4fb1d76f, 0xba608f9f,
- 0xe24bdb39, 0xd90bece5, 0x8d1d9e7c, 0xf34ef495, 0x74de854d, 0xb8a65636,
- 0xa87a0499, 0x387a4974, 0xed89a63b, 0xed956702, 0xc9b51e9c, 0x74cb9576,
- 0x5fd13b7f, 0xb407e812, 0x63aabb09, 0x19e40b4d, 0x05e1f9f0, 0x4f70069c,
- 0x0e8ed43b, 0xd7f9c53a, 0xe333efaa, 0xebf6051d, 0x89a79fb0, 0xba6bc7ef,
- 0xedb5593c, 0xf0257f4a, 0x614db554, 0x197fda7f, 0x13acd0fd, 0xd1003f0c,
- 0xc29f62e5, 0x6e2c22ef, 0xfb116db8, 0xbdac96d3, 0xac6f7415, 0x6f7eb163,
- 0xf2a2a8fc, 0x693a8e7c, 0x395ee4ff, 0xdfdf019e, 0xef1d28e7, 0xf38cb875,
- 0x3bf0f68b, 0x47496e39, 0xfea46d94, 0x35941aaa, 0xa19a4e3c, 0x19f5cae7,
- 0xd8b02270, 0xaa3be761, 0x6eeed3df, 0xbb3cc6fd, 0x9d31cf9b, 0x934d1fcf,
- 0xdedb93f7, 0x2fccf796, 0x26243d7d, 0x41e45bee, 0xf46061be, 0xd063b887,
- 0x22f3f520, 0x35f61cf8, 0xcf34ebec, 0xff660dac, 0xe653ce4e, 0xe66d80fc,
- 0x730eee7c, 0x9cd9af3e, 0xce6ebcf7, 0x0ee309ff, 0xf41384eb, 0x4235c46f,
- 0xfd0a46ff, 0xf5261ddb, 0x7fa1e46f, 0x8dfe8523, 0x91bfd0ef, 0x3c8dfe87,
- 0xa1e46ff4, 0xfe85237f, 0xfeeeef8d, 0xba554e12, 0xe07814bd, 0x4eddbab8,
- 0xf7813e23, 0x89d9c5cb, 0x558547cc, 0x1717a5ea, 0xd8fcb2e5, 0xe29a6f61,
- 0x11937b60, 0xad18958f, 0x1c47f785, 0x2ab2d280, 0xb1db8d39, 0x55f6ba1d,
- 0x2aae8769, 0xeec24670, 0xe4f2bad1, 0xa73d882a, 0xd418b4a3, 0xdb6b68bf,
- 0x2f56f802, 0xff7ec03b, 0x77eee5d6, 0x63f893af, 0x5f118bfb, 0x0f738255,
- 0x25f49b7c, 0x71ee7719, 0x4353db04, 0x4d6244fd, 0xd3f21e50, 0xd43e733a,
- 0xe6be042f, 0x46abed35, 0x6c6fc7c6, 0xfbe0c96d, 0xe0e33e9f, 0xea9f69fb,
- 0xcb313bf6, 0x2198f2c1, 0x1bce23cf, 0x9f7a5970, 0x95e709b2, 0xc31c7fa7,
- 0xa650fdb9, 0x68617ee9, 0x0879765a, 0xf91df3cb, 0xe3058a35, 0xbe5f4f94,
- 0xcd6af58f, 0xedd2e7cb, 0xd7b2dcfa, 0xeef34c5b, 0xd1370fef, 0x71adc0d1,
- 0x5ecc7e21, 0x7783faa6, 0x31f887a5, 0xa7ca9807, 0xbe6219ad, 0x8d7c5f8f,
- 0xbdafbca9, 0xa1cf6e23, 0xfce4b9fb, 0xa5ceb25c, 0x6f5e02c8, 0xc554bd68,
- 0x6ff827f3, 0x4e52dbc5, 0xc673a72d, 0x2bbfaf1b, 0x9d7f0052, 0x57e03277,
- 0x858b64d1, 0x3f4cad9d, 0xf332c487, 0x15615cb9, 0x275c573e, 0x84b855dd,
- 0x7fda9637, 0x654c6d69, 0x47e5fa3a, 0x68812aa9, 0x3c9680d1, 0xde8bafe8,
- 0xe838727c, 0x98dd5aaa, 0x3bf2a2ea, 0x5c16ff14, 0xb7133f20, 0x9524713d,
- 0x495aa927, 0xaa73c27f, 0x1cff702d, 0x19edd1f0, 0x7ca4ef02, 0x68a5e6ba,
- 0xe9d41f8e, 0x0ae6d52f, 0x6c6c1d07, 0x814f5954, 0xeb8d4cfd, 0x8d8d93a4,
- 0x6bd0e1f1, 0x3e28c62a, 0x5c06c7c8, 0x8508fa0c, 0xefefc907, 0x97bd69ba,
- 0x94f7c90e, 0x752e758c, 0x41a2fac9, 0x07de37b6, 0x5c7bd630, 0x372b47d7,
- 0x3b7591be, 0x7fd73f7d, 0x7e1b9e5f, 0xbc5dbadc, 0x6d96c5ee, 0x7d29925b,
- 0x85dda1c7, 0x7b7d2d75, 0x1a48f45c, 0x326dcbe7, 0x75b3fdae, 0x23e13e4f,
- 0xe3777a7e, 0x0fe87693, 0x9dbf5695, 0x6679cddf, 0x0ef11da3, 0xc8dd6fd7,
- 0x2d6c17df, 0x71807b61, 0x1194eecf, 0xa3c3ad97, 0xc1b2ac62, 0xde3ceb45,
- 0x1585ca5f, 0xa69e32e1, 0x46f51d48, 0x55b5d602, 0xa6bac7c9, 0xe3b76f17,
- 0xf58781fd, 0xa6fba17a, 0xa2b4f0fd, 0x357bb01e, 0xd4c71c29, 0x11993edd,
- 0x64fbcfe8, 0x4e34e5da, 0xc81e5fa7, 0x8287de7a, 0x239c2fb4, 0xf5fa21b9,
- 0xf5da5561, 0x4ef9559a, 0x30903d98, 0x5ec2f20d, 0x35456ef9, 0x0a23439d,
- 0x710bb7a0, 0xa3c5a535, 0xd79d1354, 0xf6984616, 0xb4112cc1, 0x9bbfa8af,
- 0x7e532f45, 0x530cc4fa, 0x07d399fd, 0x0b9df886, 0x7f99cb3a, 0x8f7a6d5d,
- 0x9f2efaa6, 0xc7bb615b, 0x4a77f358, 0xbb3a62ef, 0x5abed4d1, 0x47bd354c,
- 0xf7a9e8b3, 0x6544ce18, 0x54e1c476, 0x6658f7e8, 0xeff54769, 0x169dfcad,
- 0x7dafda62, 0x927f3c33, 0xfd8da5e7, 0x7d3d8609, 0xe8cf7e16, 0xef0cbcea,
- 0xbc451788, 0x9a1dec30, 0x61691e59, 0x3c48553d, 0xa8f6eb54, 0x8f691cea,
- 0xd8b5f6aa, 0xf04d8f11, 0x4810a6b7, 0xb6151a6a, 0x477d5237, 0xdfcc9c4a,
- 0xcc3bef85, 0xdfd052f7, 0x521f9465, 0x42bfb04b, 0xdd686bcb, 0x606a1738,
- 0xd83b6247, 0x076c9384, 0xfbd1c633, 0xf434e837, 0xfed0807d, 0x33700c06,
- 0xbadcaeff, 0x822f60fd, 0xd8e2b4cf, 0x549dc47c, 0x95b14d35, 0x6f495ee4,
- 0x54bfff06, 0x7ebf6161, 0x4c03e1fb, 0x5615ed6f, 0xff4b8c8f, 0x78d4b876,
- 0x9435dda5, 0xbad0170e, 0xb05e7fe0, 0x15f8f143, 0xe9f00f59, 0x810cfa05,
- 0x70174a4f, 0x01d5e033, 0x7e7290bf, 0xe2286e3f, 0x0a7be426, 0x1425c057,
- 0x0b38fa9e, 0x2772c134, 0xf3cfcbcd, 0x8c57e90a, 0xb78fdd08, 0xb0ffd9db,
- 0xe52e1141, 0xf531e1bb, 0xf4e3b4b2, 0x6feb90cd, 0x2078a9db, 0xd97cdfbf,
- 0xe83bdfca, 0xd9c658a7, 0xde689f4f, 0x54f41daf, 0x8ccff72c, 0xeedc89cb,
- 0xdf5745df, 0xff864f45, 0xdd7e3426, 0x19e647b8, 0x1ab7dfa0, 0xf37d8626,
- 0x1be1c44f, 0xe77741f2, 0xe439c74c, 0x9910f1d2, 0xbcf9e5de, 0xe39d2a34,
- 0xbedd35cc, 0x4d69f068, 0x5b8c676f, 0x78eee542, 0x7e6f7bd0, 0x43557167,
- 0xfbf06a46, 0x1b4ada37, 0x975381db, 0xe25a73c7, 0x7ce22574, 0xdf2e5929,
- 0x4f763969, 0x29f844cf, 0x25b4ad8e, 0x8faea44c, 0x553439dd, 0x88be420c,
- 0x86d74fd3, 0xcfb60acd, 0x032de91b, 0x9c61f9e4, 0x872de7ef, 0x3f6167a3,
- 0x8d19dcae, 0x2bc5a190, 0xa2ee1fdf, 0xa2d9be73, 0xfec01a2d, 0xce7c8de0,
- 0x3cc6d70b, 0x390ce7d8, 0x516572a3, 0x5503c84f, 0x04f038ff, 0xb96d01e4,
- 0xea72112d, 0x81540fe9, 0xe73a1ad0, 0xe538be58, 0x6a7df494, 0xabdf0473,
- 0x2f08a53c, 0xd48b51fc, 0xd32e797d, 0x337cdb79, 0xd2708fc6, 0xae30c3b8,
- 0xc6124b7f, 0x30b2f8f5, 0x333cb6ae, 0x8bbb2ba6, 0x97aed691, 0xc316dff2,
- 0xa3dbacce, 0x0f23f721, 0x4b645fb9, 0xc8791fb9, 0xf72148fd, 0xfc0d7be3,
- 0xd71bf00e, 0x6f43e5b7, 0x39158df5, 0x8e1cf84b, 0x7f5c81cc, 0x01ee12bb,
- 0xf15dba3f, 0x8478e4f4, 0xf9e5620f, 0x2357821b, 0xb246dd1d, 0x5edd1059,
- 0x8221d977, 0x88c6ebe3, 0x9c7e5358, 0xbf54d923, 0x2a6695c8, 0xbfa93ebf,
- 0x7706fca9, 0x537f29be, 0xfd5348ce, 0xa6319e49, 0xdc468ffc, 0xc53faa60,
- 0x9f94c53b, 0xa9b66136, 0x12e28cfe, 0x59ccf953, 0xb3e54c8b, 0xf94cdbb5,
- 0x34ee2b5b, 0x92f1ffd5, 0xaf72a6e5, 0x0e715970, 0x231f4336, 0x3e85efb8,
- 0x6fede946, 0xe3064667, 0x4b38d475, 0x350cef97, 0x56fa900d, 0x39f7ae74,
- 0x09ee25d0, 0xe8fb0e81, 0xe2f680f7, 0xbf06199c, 0xfa914065, 0xe8324b70,
- 0x685eb426, 0xfd88a8f9, 0x6594f097, 0x9f6ee7ff, 0x313e2561, 0x43fdaaea,
- 0xfb7f2832, 0x5619e6c0, 0xaea32fe2, 0x3a52ffd9, 0xdcf17fe5, 0xf07cb237,
- 0x57284bfe, 0xfccbf772, 0xc822c134, 0x9bce06af, 0x8c75a3e1, 0x8d7d2eba,
- 0xba53da47, 0x5235538c, 0x9d7101c0, 0x00d20380, 0xfdd227d1, 0x5f489f44,
- 0xb72cfa27, 0xe8907109, 0xd221e913, 0xf7fdf14b, 0x3d2297a4, 0xd2297a4c,
- 0x452f4877, 0x297a42da, 0xcdd43fd2, 0x3a83f4e2, 0xb1fddb8d, 0x8fd382ae,
- 0xf7f096ea, 0x7196ea4f, 0x1f3a97fa, 0x8064ff7f, 0xb0087f61, 0x8bf0c69d,
- 0x091fc0d5, 0xdb2ede7b, 0xb1bf60b9, 0xabe795e2, 0x5facc7e1, 0xb0235a22,
- 0xb1ad5b4f, 0xfe9d1c27, 0xadf9eec9, 0x92089c55, 0xdee19ccb, 0xf8f006cf,
- 0xb7cc5dbd, 0xc447eff5, 0x84053eb4, 0xb5d34fa7, 0x3307e0b3, 0xc656ca0a,
- 0xeeb8ff10, 0xd03625eb, 0xae9687cb, 0xd5efa3ef, 0x03a7dbf9, 0xf7e86dbd,
- 0xa65dfd5a, 0xae321d6b, 0x6b6b5af0, 0xbfdbdb3d, 0x03c46e14, 0xefec33ed,
- 0x5ef958f7, 0xe085f2b1, 0xc104e8fc, 0x5b2ffaf9, 0x5af10e38, 0xcf892797,
- 0xc46f3d1d, 0xfdf87505, 0xef1413f1, 0x8de1f863, 0x2fc29f78, 0x3e41c75a,
- 0x77691d18, 0x0dc48587, 0x2fbedfc0, 0xbcc68fea, 0xc3277df8, 0x97d4ffbf,
- 0xf2f78022, 0xb5fe3f0c, 0xd834968e, 0xe1df4615, 0x33c704f0, 0xe57afe19,
- 0xe715168b, 0x64af1189, 0x3bcc638c, 0x15fd4aca, 0x5d23c674, 0x47ca6aeb,
- 0x757d465c, 0x93d7f724, 0x49dde3be, 0xb955e7aa, 0xec3e535d, 0x22aba3c7,
- 0x8ce9423d, 0xafa9e813, 0x3af1ea9b, 0x38d0bf0b, 0x1eb4624e, 0x1c7e8127,
- 0x01df9cb2, 0x48109d1c, 0xa39ffbc6, 0xb03dd897, 0xb88e3e83, 0xb8ce82b3,
- 0x088fd405, 0x2798c7da, 0x15f7edfa, 0xbcf217cd, 0x9df0cbd7, 0x29f492e6,
- 0x3f5e7adc, 0x2c6385af, 0x3d54bbdb, 0xfae61c5f, 0xcfd1046b, 0x90da19ad,
- 0x1fdfd481, 0x7ccc8c94, 0x7a3217d6, 0x094fdf60, 0x0bb04779, 0xf03119fa,
- 0xbc7e84ff, 0x05bdef12, 0xe942e1db, 0x418fc0c8, 0xbbde064f, 0x0e832ba3,
- 0x18d0e282, 0x65711def, 0xf4a17f7a, 0x199d1dd6, 0x43ad75f4, 0x8cf001d2,
- 0xae8320f8, 0xf89a2f94, 0x2ae8eb1e, 0x067f9f07, 0xc5d2855d, 0xe9257495,
- 0x0e27feb4, 0xba4aefee, 0xc007a4ea, 0x55d387e5, 0xf8f38a8b, 0x79a7a59f,
- 0xee3c626d, 0x765c97ff, 0xabda441f, 0x7c17b69f, 0x198c4ca6, 0x2e13120f,
- 0x62ec1075, 0xba32f5a1, 0x7f4a17a9, 0xbc6eeda1, 0x6e465da2, 0xfa71bb70,
- 0xaeda1959, 0x047aed12, 0x71c21bb7, 0xf7e96f11, 0x1a72de08, 0x999e2d71,
- 0xb90e5390, 0x2f2df26d, 0x27cc8de1, 0xf3a719ba, 0x096243f3, 0xee24bf9f,
- 0x854f40fa, 0xcb8d693a, 0x42fac85d, 0x71d76461, 0xedea3705, 0xdc5d7fc7,
- 0x4f8cf980, 0x37e3cb30, 0xf5fcf2ea, 0x0ffb91a2, 0xc6e1477d, 0xce59ebdc,
- 0x61477d0f, 0xfe3d40f3, 0x81a44d20, 0x8f0a17df, 0x0efe9f9c, 0xcfbea146,
- 0xc9ba7453, 0x5462ab70, 0x6a9bee1c, 0xaf321c56, 0x3c8c1de3, 0xfce3eb8c,
- 0xe9cfc20d, 0xfdc0224d, 0x0903a24c, 0x61091ff9, 0xc85fef93, 0xaa35bafb,
- 0x5790dff6, 0x4fba7b8d, 0xb057b53b, 0x4c440fcf, 0x4a77839e, 0xd7190dc6,
- 0x17f6ed0f, 0xdbce59ba, 0xb960c8ef, 0x16d2fb13, 0xde27c9d7, 0x5fba73a4,
- 0x8a37bca6, 0xfb46f714, 0x3bfa79d1, 0xbf91c73a, 0x40b96731, 0x736ef1bb,
- 0xfed193bc, 0x6138d726, 0x9cfeef8d, 0x776f29bc, 0xdecdd86f, 0x042ecd8a,
- 0xbd8adf7e, 0x7deb10aa, 0x45a7b62b, 0x4e7661a6, 0xafd2bd07, 0xa0d8b92b,
- 0xc6f1cefb, 0xc1dd78f3, 0x286b80d9, 0xff380d3c, 0xeb1eb800, 0x077bae0a,
- 0xaa379608, 0xad5f2423, 0xd57c908e, 0x58757380, 0x9ede7dc7, 0x5527f1c1,
- 0xf9af090e, 0x5611aecb, 0x2c17f512, 0x93f9c91b, 0xb807c275, 0x6092c69f,
- 0xf7b10f10, 0xce58eb09, 0x9ddcef1b, 0xe8bdf87d, 0x2ae79a24, 0x983ff687,
- 0x4082b9df, 0xe2844916, 0xbe7a86e4, 0x05fe7f8f, 0xaa7d03d5, 0xf1af754a,
- 0x96facec0, 0x7d71ef2a, 0xfe15207d, 0xd754f158, 0x0e55e8f1, 0x9f435f09,
- 0x98afd21c, 0x5fb1adf7, 0x80befe85, 0x0ef55f21, 0xc6dcf193, 0xe4eef73d,
- 0x92e1e1ad, 0x930ef5af, 0xebc0d8fc, 0xee7ebb06, 0x89c33f53, 0x3afca68f,
- 0x98abf59c, 0x2e4c6f58, 0xc28583ec, 0x76189fa4, 0xf904f695, 0x22b5ca7e,
- 0x945191eb, 0x13643c2f, 0x076f2a7e, 0x92794043, 0x1fb6b3f4, 0xef82a4ba,
- 0xfc294517, 0x6f71863e, 0x22c92c29, 0xc27c41dc, 0xbd370ee9, 0x13911b4b,
- 0x7947ca67, 0xc7ea997a, 0x9537488c, 0x98077ac7, 0x1427e3ca, 0x8a3df298,
- 0xefd536af, 0x29ac6b39, 0x68ddac9f, 0x31529faa, 0xf83794d5, 0x24fc8a58,
- 0xc5b92cfa, 0xb2efbed4, 0x34fd5352, 0x5ca9a55f, 0x319b3bc5, 0x25b7ab40,
- 0x81cf1127, 0xd58c9fca, 0xe3de434b, 0xf0cb08b5, 0xfe6b77ce, 0xcbde5a33,
- 0x80b91099, 0x6cc9afdf, 0x9e7999fc, 0x2c6b44ea, 0xcdbbd7a5, 0xc928b17c,
- 0xb9171f9c, 0x71df814f, 0x9fcccb9c, 0x4d589653, 0xeae51ff9, 0xe44f34fc,
- 0xbbe3ddb1, 0x300daff0, 0x4e143fe1, 0x7efc0f44, 0xef9d3b68, 0xcdebcc3e,
- 0x070f7e32, 0x75e0937e, 0x0c126fc0, 0x824df807, 0x049bf0f3, 0x24df87d7,
- 0x937e1cb8, 0x8721f2e0, 0xd61ff8cc, 0x55ffc662, 0x6ff1991f, 0x787765d0,
- 0xa66ad91a, 0x9a29a10f, 0xba782df2, 0x7e9994e6, 0x1d73f142, 0x09b15b89,
- 0x09e287c0, 0xbe387fb8, 0x8be08656, 0x37f7c3f0, 0x31477bdb, 0xd7d71e7e,
- 0x20606d75, 0x3a2e97df, 0x6fbc0aa4, 0x71f3ac63, 0x6d3fdcfd, 0x89731ad5,
- 0x5dcefccf, 0x763e0490, 0xdb7e909d, 0x4f1655f1, 0x207fba80, 0x29d99c64,
- 0x3fa843da, 0xaa52fe2b, 0x26d2741d, 0x0e7bcfdf, 0x95d8797c, 0x123eb2f1,
- 0xf6bef84f, 0x27be54cf, 0xa8d2c475, 0x8eaf8f80, 0x9f009ed1, 0x84bb9799,
- 0x8a249bd7, 0x5c402fb7, 0xa5857fd4, 0xafb73f22, 0x722dc7bc, 0x89fef95b,
- 0xa6e727c1, 0x2da9b8c8, 0xdd7fbab9, 0x9ea7e323, 0x777295c9, 0x71c5c794,
- 0xf2b925de, 0x8ba90d7e, 0x010773a9, 0xd99d873e, 0x0b3acf80, 0x8ad9ebbf,
- 0x9d09e47b, 0x0f21c8f7, 0xa3ecfe43, 0x1ff57fcb, 0x47581c3b, 0x3a617af6,
- 0x1dfbfb7f, 0x81e2f8a6, 0xfca65d5b, 0x5324a6a0, 0xdcbbc1fd, 0x40fcf2a6,
- 0xc87ca98e, 0x3f298f21, 0xa98465ac, 0x791f55fe, 0xad91f94d, 0xaff54c13,
- 0xca6c5539, 0x47b688a7, 0x8abedf01, 0xcd1c53b4, 0xb9fa974d, 0xe4dc705b,
- 0xe563bbdc, 0x7edd5f7d, 0x46fbc861, 0xd3a6b9dc, 0xd11ea875, 0xe8e52ed7,
- 0xf52164fa, 0x1fae8746, 0xa13eb30a, 0x1de371e9, 0xe67c58f7, 0x5e2371b8,
- 0xd0ef43bc, 0x7b8e8af5, 0xe2f19d34, 0xf178e0de, 0xbf5d61b9, 0xc75e7d1f,
- 0xfbfce87b, 0xeb7ae5da, 0xbdf3b4ef, 0xa15e631d, 0x96c949f3, 0xd1d9bb74,
- 0xbfaabdf5, 0x46fb4ae5, 0x25f1666b, 0x3613fdd0, 0x6dff2b4f, 0xf3c62b84,
- 0xb75e22b4, 0xcf7617fe, 0x8f77f70a, 0x5cb07737, 0x658b1ccf, 0x8e5c94de,
- 0xddfd338b, 0x92418884, 0x7583adbe, 0x78c84afb, 0xdc646373, 0xbbb1889a,
- 0x8fddbf41, 0x6499e127, 0xff7c0d17, 0xf167bf4b, 0xf74d35e3, 0xbd8e68a1,
- 0xce3f7f51, 0x4c3d036f, 0x59f24b1c, 0x5b13c93e, 0x49143d7a, 0xcb13ca72,
- 0x8a9f6cac, 0x3757fe7b, 0xde75fafb, 0xfa4be99f, 0xb203e810, 0x07a8f40e,
- 0xb72954f2, 0x2beb920c, 0xb8eebf52, 0x187ec0e7, 0x43f3f421, 0x174fbc70,
- 0x6874c6f4, 0x75dfe3ac, 0xdba0c1ef, 0xfa193850, 0x3be3d0af, 0xd2f319fb,
- 0x07ec67e1, 0xfdc67e03, 0xb66df713, 0x7c914de2, 0xe202658f, 0x6dfc0ce5,
- 0xc6f693c9, 0x503c3a05, 0xbba8fc0f, 0x6e6a457a, 0xedf7ec0c, 0xeae31dc2,
- 0xea3b0e82, 0x62e09bff, 0xfbbbbfc0, 0x87f1dd6d, 0x5df0be50, 0xa3baddf7,
- 0xdef67fd3, 0x909f105b, 0x5e3a4bf1, 0x1c2cfdfc, 0x94777017, 0x323f3f79,
- 0x9189f248, 0xe4fdf483, 0xccd6fd23, 0x0ffc042f, 0x9d552bf5, 0xbcf3c85f,
- 0xdf98ad24, 0x8af9c6af, 0x37f35bfb, 0x8847f94a, 0x87abc4e2, 0xe36f8fc2,
- 0x7ad7cbfb, 0x4a6ddb05, 0xde3f151b, 0x75f12af9, 0x9fb30fad, 0x0772cdcf,
- 0x26c83bbf, 0x46576a4d, 0x5da5f6ed, 0xc2facef9, 0x60eef948, 0x0beafbe8,
- 0x2f19bd75, 0x86df606a, 0x19eebdfa, 0x3cb1fba4, 0x9cf2c3c2, 0xa150d71e,
- 0x37a0e9d7, 0x0e6f7cbf, 0x31f6327a, 0x671f1ca5, 0x3f0dcdc0, 0x3cce90d4,
- 0x073c37c1, 0xb58379e7, 0x0ec5f8cb, 0xab7e1af8, 0x61ecbf0e, 0x1a1127be,
- 0x77ce718f, 0x3f2bba20, 0xeb1fe198, 0xf8741f6e, 0xefc3bec7, 0x89691ed6,
- 0x69f1dfc6, 0x67df26df, 0xfd5bf4e9, 0x22cbb865, 0xf03153fd, 0xfdeab53f,
- 0x5d798d89, 0x96d50fdd, 0x9438e4ef, 0x73b68b66, 0x2cefad10, 0x7b778ff5,
- 0x9fdc89ef, 0x26117788, 0x2ba976ea, 0xcdcbadd7, 0x8fe914a3, 0xef8ca9f6,
- 0x5ef209f6, 0x8151f13d, 0x4fc4677d, 0x0481114c, 0x4a1f86a4, 0xe1923d5b,
- 0x4aa1f86f, 0x9e792302, 0xda17ea33, 0xeb68e4f0, 0x851577a3, 0x53fd3bbb,
- 0x5c647dad, 0xaa7e7754, 0xb9f39769, 0xaf97e9bf, 0x183e7ee1, 0xb6e52694,
- 0x036efb86, 0x2ad80d9d, 0xab67586c, 0x4bbfebad, 0xabf3a851, 0x3d9ae812,
- 0x96ade282, 0x962fbc2b, 0xbf88c22a, 0xe46f3e62, 0xf9ea352f, 0x7dbf9922,
- 0xeab7cca5, 0xd16da7ef, 0x275820ed, 0x7a07ac52, 0x4edd36f9, 0xc5207582,
- 0x7c1df03a, 0x35f0790d, 0x90d7c1e4, 0x0a435f07, 0xa5ef86be, 0x761538a2,
- 0x0ad3f85b, 0xfc07f683, 0x72418569, 0xc169fc13, 0x82d3f879, 0x169fc3eb,
- 0x5a7f0e5c, 0x69fc3970, 0xd3f879c1, 0x9fc3eb82, 0x3f879c16, 0xfc3eb82d,
- 0xf879c169, 0xc3eb82d3, 0x0e5c169f, 0x39705a7f, 0x79c169fc, 0xeb82d3f8,
- 0x5c169fc3, 0x62996f3e, 0xd3cdb7f2, 0x5b287fdf, 0x51f4cf1f, 0x9b1c5198,
- 0xeffdf847, 0xc4fc7f88, 0x373c0e96, 0x2edd022f, 0x48f70ead, 0x904e373c,
- 0x8908b778, 0x8cd9b6e7, 0x32ecbbe7, 0xb4e3245f, 0x7e07e943, 0xf49b42ab,
- 0xdf85215b, 0x56fc290a, 0x2ab7e148, 0x2b7e94cc, 0xe15bf0a4, 0x4856fc3b,
- 0x0a42b7e1, 0xf85215bf, 0x6fc290ad, 0x2b7e1485, 0x0adf83b4, 0xf856fc29,
- 0x5215bf0e, 0xfdf0adf8, 0xfe03cd08, 0x905e632b, 0xf499fbf3, 0x9343a252,
- 0xe532ea5e, 0xd707e721, 0x5c1f9c87, 0xb83f390e, 0x707e721c, 0x707e721e,
- 0xc1f9c87d, 0xdc8349f9, 0xef20bfbc, 0xbc83b707, 0xd41f9c1f, 0xb6037be8,
- 0x2e1b49ab, 0x35b48ebc, 0x7142794a, 0x2f353109, 0x8bfc2673, 0x35254ead,
- 0x6d8423d6, 0x859980f9, 0x38f4d794, 0x66d13cc7, 0xae39be01, 0x05a6f080,
- 0x0f65c704, 0x5cc97ffa, 0xf9b3f86e, 0xbf9ef087, 0x50deb043, 0x35afdfa3,
- 0x4b847bda, 0xefd46a45, 0x2f5d77cc, 0x1ea37c74, 0x79a0cbf3, 0x8f996290,
- 0xbbfc9378, 0xaf700b22, 0x91458b60, 0x642bb8f1, 0x5d28743c, 0xe793caad,
- 0xf6cb16fb, 0x5388e1fd, 0xb83c5129, 0x156591ef, 0x80056c87, 0x7e0292d3,
- 0x562f2af7, 0xab92d75f, 0xcc658711, 0x124bb0db, 0x867be09f, 0xbd84daa3,
- 0xfd19c69c, 0xefe3b085, 0x4bb44c73, 0xa0ed0279, 0x29f40e6f, 0xf4414dde,
- 0x9e4f2cbd, 0xb6ef9a7b, 0xfbe9cbab, 0xae4b6dc0, 0x89c5fdc6, 0xd3ddb2e1,
- 0x386689bf, 0xf8506e4e, 0x6da8ce9e, 0xc9fb8bc2, 0x3a7e75cb, 0x5ecb9b70,
- 0xf8bae3ce, 0x6fd1a3de, 0xbe5486c9, 0x5a2259a7, 0x450f710b, 0x3df8550c,
- 0xb46e037c, 0xbeb1d7be, 0xb02ada2c, 0xfbe0ff2f, 0x7e2e244f, 0xa34ffb9f,
- 0x2116c687, 0xcb3450ae, 0x0d2742f7, 0x83d9592d, 0xe5f9a5e6, 0xbfa3a17b,
- 0xe706f258, 0x85ef929f, 0xe70cf932, 0x9879f1f9, 0xfdf853ed, 0x8dbec995,
- 0xee370496, 0x25b72f65, 0xe136fea2, 0xf5055881, 0x8d8af708, 0x168ae575,
- 0xbd01538b, 0x41f10388, 0x639c47f4, 0xdf25e83a, 0x59f7e363, 0x365d58a6,
- 0xce431bf0, 0xb1c97129, 0x3151d48b, 0x4432cf7c, 0xd0d4b84e, 0x7e62e383,
- 0x3b4e9c78, 0xba5fe7df, 0x8572c9d3, 0xbe615eb6, 0xc65f3e79, 0xf6dd56bb,
- 0x7a4b70e1, 0x99ce9c67, 0xbc16ff34, 0x7d267b03, 0x8508f47b, 0x79bd88e5,
- 0x0da3d704, 0xcd3df12f, 0xf06f9592, 0x0d4b943d, 0xfbcb450f, 0x52ffd26c,
- 0xb0cdc3ae, 0xf1d7cd3b, 0x69c7ae53, 0xef93ab1d, 0xcc39d33a, 0x6a5caeef,
- 0x119cb1b0, 0xf21a2f2a, 0x911ef1a7, 0xc23fae10, 0x60d153ce, 0x0bc03be3,
- 0x4aac94d7, 0x91ca5712, 0x7339758b, 0xe38d8351, 0xf7f42ab2, 0x6eff42d4,
- 0x1e3d62cf, 0xf356f16b, 0x42f24ff7, 0x7de350e4, 0xc94ebd4d, 0xb06607db,
- 0xc6c4b43c, 0xcb39cdfe, 0xb6247ca5, 0x93ee3f0b, 0xce3e59cc, 0xe666f782,
- 0x947af4ec, 0x5de9e82e, 0xd89a4e5d, 0x4ff864ea, 0xfcc11cb9, 0x14e5ea65,
- 0x397cdfce, 0x9799d399, 0xee2adc65, 0x5a6e8122, 0x073be48b, 0xd1f2ebef,
- 0xc6b1c40c, 0xc9eb3e43, 0xf44eddf4, 0x74f2218d, 0xabc035be, 0x3878801c,
- 0x44ada6d1, 0x8dc4ec9c, 0x4d83ef82, 0xfd451c82, 0x155c82cf, 0x5691fd8f,
- 0x2661f7c4, 0x68bf7c28, 0xf4dbd0d2, 0x43fb40fd, 0xf7e13f30, 0x29c2b457,
- 0xcd39c47e, 0x805bb6d2, 0x685f637f, 0x5efca3be, 0x629f24ea, 0x9a923bc7,
- 0x741b6fae, 0xe63fba1f, 0xb276e846, 0xeb076948, 0xb26193b0, 0xb7c92478,
- 0xd3e4266a, 0xf66db0b9, 0x608109d2, 0xcb2b951f, 0xd576fc7b, 0x419206cc,
- 0xc9fb55ff, 0xde458b1c, 0x9039233f, 0x96880bef, 0x2f949ea2, 0x4014a359,
- 0xf5e6323f, 0x38ec67b9, 0x3a4eb0cf, 0xa7d8ed28, 0xd2e51ef2, 0x7e4d327b,
- 0x8cb4d09b, 0x34fd8de0, 0x27f5a637, 0xf468610a, 0x1c2de160, 0x85ebf781,
- 0x005428f1, 0xe8b798f1, 0x3bc1eaf9, 0x973ab4ff, 0x91f9c4e1, 0xa6687dbf,
- 0xe05ff22f, 0x6b147393, 0x4b1bdff0, 0x0d5768b2, 0x50f7c1ee, 0x11bf03c7,
- 0x9efc6db9, 0x4bb64a44, 0x7b10a9f2, 0x8960f54b, 0x4bbb50c7, 0x773cae59,
- 0x7a9dd584, 0xdc7bfb2b, 0xd634b76c, 0xbe66cc7d, 0xf3a46be7, 0x5ee7eb07,
- 0x9ee7bfdd, 0xf916c3ca, 0x1663c2fe, 0x8857f6ff, 0xd0f72f5e, 0x8fbd6066,
- 0xe79ede21, 0x8cfaf1af, 0xf83a4cf7, 0x896efe95, 0x96474f41, 0x2afbe25d,
- 0xb77691a5, 0xe54f441a, 0x0fbe15ab, 0xde4cba5a, 0x2b56d6e7, 0x6e845df2,
- 0x99bf58ec, 0x9efcbffd, 0x117e6fd5, 0xf8750b2e, 0xbcb7b1ce, 0x84eb48e1,
- 0x3d979fef, 0x347c9360, 0x0efb8990, 0xf2712cb7, 0x7b9fd9bb, 0x0f06ab8a,
- 0x8c3c9e03, 0xe5d871a7, 0x8c4b7f54, 0x4bdf04b6, 0xd841cb45, 0xfcfee71d,
- 0xba8f7e6d, 0xdf8d39b6, 0x95d92cbf, 0xd9ef0abf, 0x7ed1ee7c, 0x4497f582,
- 0x4790ecba, 0xf9621a6a, 0x53db9e7c, 0xda2bcfbf, 0xe067cfd8, 0xd2c1bee7,
- 0xbe7d778e, 0xaea2e71c, 0x6cc01157, 0xbaf54c53, 0xf61a63b6, 0x4b3b50d1,
- 0xdf21bbe8, 0xdd815765, 0x5f641ec4, 0xb632ec35, 0x71b3639c, 0x77b1cfae,
- 0xf73ef7fd, 0xf437f3ea, 0x7a1df9da, 0x9ef8ee6d, 0x2ad7ff90, 0xcbd3d82e,
- 0x7133dd23, 0x90fca1bf, 0x62b4910d, 0xbe5b9c62, 0x7c8f731f, 0x8ef4a63f,
- 0x7367e67c, 0xdc029380, 0xfbc9193b, 0xe94fdcb8, 0xfbf7ee90, 0xf40f6bad,
- 0x0a7a0dd9, 0x821df978, 0x1ed79772, 0x951f2417, 0x77a62a35, 0x0bfc8c24,
- 0x97c95583, 0xfc00dd48, 0x9a477c7e, 0xcd396f1d, 0xf1093121, 0x27417b99,
- 0x51db3ac3, 0x993f8e3d, 0xca0e2e98, 0xfe14bdf7, 0x2f18e4bb, 0x955beff8,
- 0xc3df9a36, 0xdf2cfaa0, 0xc51c1aed, 0x68b895fd, 0x5d4869dd, 0x03824f3c,
- 0x3eeda9c3, 0xe3cdcfea, 0x02ca3e30, 0x07ec1b7e, 0x42f71073, 0x7e5c5bf9,
- 0x6d29c61b, 0x68d4ef90, 0xda38e46b, 0x20fb58ea, 0x975607c8, 0x5cb04fbd,
- 0xbddf20d8, 0xddffa39e, 0xcb2f9a11, 0x2704e5cd, 0xee33b3dc, 0xabc286dd,
- 0x797f9f44, 0xce59ac63, 0x036b5c6c, 0xf377667e, 0xdd39c65e, 0x30da5d0e,
- 0x2837df86, 0xbef97ab7, 0xfbc3a68a, 0xf79cdbb3, 0x7b325fc2, 0x34b623dd,
- 0xb065ffca, 0xe9cf6b94, 0x7a6271fb, 0xa9df9320, 0xf1c9cdba, 0x8d5db3e3,
- 0x780edebc, 0x81f0443f, 0x662ae2dc, 0xf89d1378, 0xbe596f10, 0xcedf8cce,
- 0x77c944f9, 0xf095d7fc, 0xd5605bfe, 0x79e0aeec, 0x8997dfac, 0xb4d9a1f8,
- 0x970ef1ef, 0x1f7c6970, 0x3343c4f6, 0x6205eff9, 0xe6cf1c9e, 0xbbc78f71,
- 0x4c374453, 0x18fd06d7, 0xf18df3f7, 0xb4ab1eac, 0x6bbe7bc7, 0x420f7a9f,
- 0x7c27af4e, 0xddd00fc3, 0x02e3b53e, 0x2f6b8d26, 0x5e07ef97, 0xfdf4efc1,
- 0x6a01fffa, 0x00d36c91, 0x0000d36c, 0x00088b1f, 0x00000000, 0x7dedff00,
- 0x65555c7b, 0xd6bbf0ba, 0xc0d857da, 0x51b08b66, 0x62020dc0, 0x10106d11,
- 0x80a17515, 0x636a735b, 0x8dc42937, 0x910150b7, 0xf39fa8ac, 0xa96f0db1,
- 0x962a2695, 0x8da6b675, 0xb2707595, 0xd99b1b46, 0x0f5d9a6a, 0x39d38d3a,
- 0x32cdb653, 0x34d209bb, 0x99d37d9f, 0xde79e7be, 0x0daf60b5, 0xf9a675a4,
- 0x3efcefce, 0x7df5e3fc, 0xee7d7bd7, 0x765ef3cf, 0xb24c6322, 0x98783631,
- 0x8c81b183, 0x45931819, 0x77a13fc8, 0x6302edfb, 0xbeac7195, 0x28d2132d,
- 0x0b6bff56, 0xdfe3df63, 0xa2749ef8, 0x8f2c6453, 0x5da0dbb1, 0x10af7c1b,
- 0xd6ccabd9, 0x29d33df3, 0x148af7d0, 0xd41dd336, 0x7ec3fb1e, 0x13efd5e3,
- 0x338b69fc, 0xe3abea7b, 0xbb78d856, 0x9a7ed0a9, 0xdd8beb05, 0x3ea81bf5,
- 0x26e9b3cf, 0x3632c591, 0x87ff04db, 0x7b4894a5, 0x32e6c456, 0x7057b0d6,
- 0xe1a8a11a, 0x18b18941, 0xf08b9f48, 0xbec664b1, 0x3bcdf868, 0xcfbd40b7,
- 0x64af6f37, 0x35e67b43, 0x398a3fc7, 0xf7363046, 0x0c733652, 0xa5de6c60,
- 0x0f569431, 0x073864f3, 0xf8dbd506, 0xc607ba5c, 0xfb2dadbf, 0x5a307fdc,
- 0x30e59fb7, 0xbf73fef6, 0xe868e3fd, 0xde9f3197, 0x3acf4d7d, 0xa13ead66,
- 0xc304b2af, 0x4d73af8d, 0x83155746, 0x18b6ce79, 0xcd14121c, 0x64c55e5e,
- 0x33b32798, 0x13e41a67, 0x422bcc75, 0x16fd6cbf, 0xf3eb04d9, 0xb557fe30,
- 0x5da9905c, 0x824fbe63, 0x7a860cf5, 0xdf5e1cc6, 0x58f7f00c, 0x4884670c,
- 0x912de8a8, 0xcdbc1903, 0xf83d29f7, 0x21a90c1d, 0x89ef07a7, 0x5ecc1d0a,
- 0x7b6d0657, 0x330305f0, 0x65eb0f96, 0xfceaae1c, 0xbff32aa7, 0xfb20bd6d,
- 0x4e0ddea0, 0x2757cf07, 0x7ac1e61b, 0x364face7, 0x5cb486cc, 0x473e5ef9,
- 0x2f8dde5b, 0xccaf8aab, 0x4f587695, 0x57c71b57, 0xe13d3ad7, 0x2f4f6bab,
- 0xa931257c, 0x00771e67, 0x8e00d8be, 0xe382362f, 0x57c0530b, 0xa82f33a5,
- 0x7c70bfde, 0xedfe7ecd, 0xd0765e0f, 0xceffa8fa, 0x1dbe8d07, 0xb6acffd0,
- 0xf2b784bd, 0xc9f41bd5, 0x99ceaf50, 0xdb51704c, 0xb6cfaecf, 0x6de78032,
- 0x1debb7ab, 0xe8eefc16, 0x9521ee93, 0x413f1059, 0x6790099e, 0x3bbc40ba,
- 0x397ea7a2, 0x9c617ba4, 0x91cd8b25, 0x719edaec, 0x3cf428b6, 0x86da3ebb,
- 0xafaecde3, 0x7165887a, 0xf66777fa, 0x66b3bfe6, 0xa0b317ce, 0xb3cfe43f,
- 0xe424ce45, 0xd4510a8b, 0x3e9a9bf0, 0x79837884, 0x35e0063d, 0xdbc3fe23,
- 0x0c38469e, 0x6ce6145e, 0xa9a94f86, 0x8b8e1f01, 0x36f38cf4, 0x826bcc88,
- 0x8135a97a, 0x548f388b, 0x876c28a0, 0x528d8469, 0x1df90b16, 0x4802ed30,
- 0xee9e2453, 0xc3f5f273, 0x677e3f77, 0x5c3c8131, 0xa1cf4abf, 0x3b606af4,
- 0xfe00a757, 0xd9cc310d, 0x4fa1e8ec, 0x3e951fb5, 0x6fedfa55, 0x70ae7b7d,
- 0x3993677d, 0x3386593c, 0x56e304c9, 0xb9cc3c3e, 0x3afcb846, 0x0f00610d,
- 0x817ad05b, 0x7d6c0b58, 0x903537ac, 0x0fcd7657, 0x24a5b7ad, 0x86f58fb6,
- 0x93e553ae, 0xc3ce2639, 0x015127c0, 0xfad77798, 0xf10c51a1, 0xca2f302d,
- 0xcd8bb39d, 0x5d5bce22, 0x504d53d7, 0xbbb6e619, 0x33e944c9, 0xc368c04d,
- 0x764d3e00, 0xe4df3fca, 0x29adb4a0, 0x9ddbca83, 0x633f9a36, 0x80698881,
- 0x85308aa7, 0x8f3cfdff, 0xe54165e5, 0xee9c6d53, 0x11035b4e, 0xfd3920b6,
- 0x0d79bd71, 0x0efa428b, 0xccfc883c, 0x5db2871a, 0xf5e9d430, 0x4cc42367,
- 0x3f3e4fa4, 0x7d12ddb5, 0x163fc927, 0x02ec7061, 0x24b19b96, 0xe02374fb,
- 0x38f2ee6b, 0x9b129027, 0xe49438d1, 0xb46f0cc2, 0x7eb00093, 0xcc78430b,
- 0xf860e453, 0x2395aa92, 0xb26b7eb1, 0xe6718055, 0x5ae79c5a, 0xced5fda9,
- 0x4b7bf1fb, 0x93252199, 0x9026caaa, 0x72287763, 0xdcc0896c, 0xa07787c8,
- 0x12fe449c, 0xfea2a50f, 0xf055a3b9, 0x56cf291b, 0x21d7000b, 0x78d2eecd,
- 0x055b1394, 0xbb6c0ee7, 0xa9a9e40d, 0x0d769e47, 0x7a817f73, 0xf4077d81,
- 0x255ffd07, 0x5d710220, 0xca1c726d, 0x75824df1, 0xb979c317, 0x059f080d,
- 0x6e9c9d0a, 0xc8e11339, 0xa3eb81ec, 0x65105ff8, 0x288b23cc, 0x3d141b6f,
- 0x79406486, 0x310e4e45, 0xca41f870, 0x3a3f11da, 0xa0e909a7, 0xc51292cb,
- 0x886f28fa, 0xf2c38948, 0xc912ad39, 0xaa9e9545, 0x7c2835d5, 0xa3be1733,
- 0x2a912bde, 0x56be9162, 0x74a44cb6, 0xcd9286ce, 0x24e3a05e, 0x9a3b1dc1,
- 0xb867a3d6, 0x13b5399e, 0x55bc47af, 0x9b4630ef, 0x7a496f00, 0x104e526f,
- 0xbed9ce3f, 0xbb41892a, 0xd3cddbf3, 0x5b17fe51, 0xa1a38acb, 0xd16dbcfd,
- 0x1e9052d9, 0x7ae6ca49, 0xc4ed4164, 0xacf00638, 0x2791fbf9, 0xe78fa0ac,
- 0xe8569f42, 0x70b69bff, 0xd4aa179f, 0x924fed34, 0x31dee780, 0xa1b3e279,
- 0xe0f142fe, 0x1f50a32d, 0xe2a7be08, 0xce23bea9, 0x503c87da, 0x0fc073d3,
- 0xdaf7a00e, 0xf51ef5ff, 0xd6f895f3, 0xbed0e5f5, 0x4885f6a6, 0xa6e167ec,
- 0xa15be43f, 0x49e8a0fc, 0x173ffec3, 0x45653fb6, 0x9edd6c02, 0xee7c7a85,
- 0x4f28b4a6, 0xf917f21f, 0xbd51f100, 0xaaf8205f, 0x517c33e5, 0xc7bb8406,
- 0x4c560ccf, 0xf480ccad, 0x8d625e7e, 0xf6a80f68, 0x323e5a8c, 0xf3adcb9b,
- 0x9093eb51, 0xb53fe64e, 0xccf50925, 0x5f3c1167, 0x6fadd4f1, 0xecf2dca0,
- 0xaf101a34, 0x38331d1e, 0x41799f51, 0x551d22bf, 0x93c6ff03, 0x04ae61dd,
- 0x0eca2a7a, 0x6fceb827, 0x51fa411d, 0xa77c179d, 0xcc74cff2, 0xd218f385,
- 0x748d99dc, 0x7f9d67ff, 0xcff3e22e, 0x17a766f5, 0xf5cd99f1, 0x4bd79fdb,
- 0x05ee58a5, 0xe47b4186, 0x885febcf, 0xd8f4b548, 0xfbd2256f, 0xd67684b2,
- 0xea36428b, 0x76ccf0ed, 0x1bb22cc3, 0x9d59d118, 0xc359d395, 0xe0e97d01,
- 0xa8c7b218, 0x37e746a4, 0x05af85f4, 0x9dd5bd8a, 0x80d7b7df, 0x87b3527c,
- 0x6d3511db, 0x508ec867, 0x855a92ed, 0x39ee179f, 0xda857643, 0x871f6eae,
- 0x5e3eed4b, 0xc801955e, 0x80e5cd1d, 0x0aba4200, 0xfbf905f1, 0xd3d7f5fe,
- 0x7bf26997, 0xb5df7f29, 0x750f2e56, 0xb7a8499d, 0x349c64d6, 0x976bfe20,
- 0x5e42fd2b, 0xbcde341f, 0x2f0481ec, 0xc16a1f2c, 0xfd8d0ef6, 0x9a2fbb50,
- 0xc36bfbda, 0x97bea356, 0x4c3a2ceb, 0xa42d6b9b, 0x8bd5b7ff, 0x9c5cba19,
- 0xed13985c, 0x916183b9, 0x07472859, 0x06653c2e, 0x3b6a0248, 0xbe50888f,
- 0x2f73ca3a, 0x031c67d2, 0x6f5090ae, 0x5fb405f8, 0x74f95e3b, 0x6e3ff604,
- 0x066be048, 0xbf0bd753, 0xf20c5e9b, 0x2d981964, 0x06671f64, 0x0f1d2046,
- 0xf4e5cc3c, 0x1ab67ae3, 0xb9d31bf5, 0xf72834d9, 0x3c42dca3, 0xdbe61b7f,
- 0x1dffff05, 0xfec60ff3, 0x143fc999, 0xd0a4bddb, 0x5968dba7, 0xaebcc02d,
- 0x8359ea1e, 0xd041be4b, 0x222d935f, 0xac34a40e, 0xdfe7a1d7, 0x82757b55,
- 0xc113a722, 0xc8bb408e, 0x416ec830, 0xf3366e3a, 0xccb209f5, 0xf71e611a,
- 0x8d9e1e67, 0xa76d7a7a, 0x9d611989, 0xc8c33a13, 0xe41c4d7e, 0x6cd6bd22,
- 0x2dbd224e, 0x02ac7438, 0x14ce1fea, 0xcfac3afd, 0xcfac3e4c, 0xae76214c,
- 0x9db19668, 0x6fb5f070, 0x960fbe51, 0x82891e2e, 0x47be0df5, 0x9d433670,
- 0x8f73aeca, 0x15a41bff, 0xfdec7697, 0x2dff4857, 0xe113b3ca, 0x75cbba7a,
- 0x5ba803c6, 0x39336d6b, 0xc160deb9, 0xef7838eb, 0xb13691b7, 0x9d1d7e67,
- 0x4dfb9ee7, 0x2726a62e, 0x07099fea, 0xa11fd225, 0xc3b0e9f3, 0x3d5287c4,
- 0x2c3b3bb2, 0xb0104fa2, 0x70f791fe, 0x164c137e, 0xbaf684bf, 0xb065ff49,
- 0xc48e617e, 0x935773e8, 0x83df614a, 0x938b1091, 0xd1747d47, 0x38eb7642,
- 0x3a550f85, 0xdb9b2b7b, 0x72296f8f, 0x73338555, 0xaf582cc8, 0xd66cf0a8,
- 0x1022faab, 0xf87b32ce, 0xaf529176, 0x932892eb, 0xf46d5e1d, 0xebb2e831,
- 0x33d250e0, 0x3fd9c9fd, 0xf601dda0, 0xf954f2c4, 0x5327845d, 0x0acdeef4,
- 0x2dfd54fc, 0xacdffd29, 0x8b66f1c0, 0x5376e1c8, 0x7a14dc08, 0x1e97a50a,
- 0xdb41887a, 0xeccf1ba3, 0x51a3f6be, 0x6bdf84c9, 0xfd78e61e, 0xedfaf090,
- 0xcd802b5b, 0x4dbfa0d8, 0x0cfefc21, 0xb98bf578, 0xedaf023f, 0x9bf578a1,
- 0x375e2187, 0xa0676489, 0xa376881d, 0xb5c0e507, 0x60f6e794, 0x51392306,
- 0xd7398529, 0x8077e324, 0x87c91643, 0x541f28a3, 0x15c430b9, 0xe9f506b8,
- 0x71e57069, 0x8db1017e, 0x10a4e797, 0xf6b0245d, 0xbc072c78, 0x277da1e7,
- 0x7e668699, 0xe5b7720c, 0xdfdedf01, 0x82cd3f40, 0x983ceb6f, 0x8b66f78f,
- 0x0eb7ed13, 0xc3d5e89f, 0x59a25a3c, 0xbf187ed6, 0x4e55fe65, 0x4533962f,
- 0xf8374c6e, 0xef837ed0, 0xbdf88d49, 0xe97f7f9c, 0x441f67ef, 0x8d33acfb,
- 0xb25abad1, 0x67da3a59, 0xf419652d, 0xdcaab7fd, 0xb7823079, 0xddf4e32f,
- 0x43e6df32, 0xa3e8f43d, 0x27a8e1be, 0xf1cbb65a, 0x510d4836, 0x981cf89e,
- 0xd38920ff, 0x94324b7d, 0x7059ba43, 0x7ec5cf97, 0x0df3e1ae, 0xff3d8794,
- 0x4e657414, 0xe506d105, 0x76231ba7, 0xe9555ca0, 0x2e30f074, 0xcf7887df,
- 0x7938456c, 0x7a8cb027, 0x8543ffe8, 0x0bf854de, 0x008d36c0, 0x3c1098f7,
- 0xa31774a8, 0xca585fc0, 0x413bcfe9, 0xf31d4f59, 0x0091bfa1, 0x38070953,
- 0xf0c11a57, 0xd5967483, 0x6ff38d93, 0x91f80c5e, 0x0f1c1d70, 0xdd5645a7,
- 0x155f07d6, 0x0df128e6, 0xa6f90218, 0xa9be7192, 0x7a42ba44, 0x0aba046c,
- 0x481647a2, 0xae173e88, 0x83ca15d0, 0x1cb8f708, 0xc257e758, 0xf0b1f21f,
- 0xa694ffad, 0xa4ce35f7, 0x1e2f2e1c, 0x734a7b33, 0xd04def48, 0x656f9dd8,
- 0xfa889a7f, 0xafbdf4e5, 0x149a6025, 0xeb8c9ee5, 0xd7fe5cd8, 0x1fff1452,
- 0xb8f407c6, 0xffcb0f8e, 0x2dda224f, 0x235f4b95, 0x79d2bffa, 0x37989996,
- 0x5d1ee01c, 0x00fd382c, 0x462d79bf, 0x16f824e7, 0x0c755c57, 0x6ad659e1,
- 0x56911874, 0x1c8d62d8, 0x408f2b3b, 0xda1d8a37, 0x39ba246a, 0xf4894bf7,
- 0x8b92b9c1, 0x0f800eaf, 0x4e0856e7, 0x32d657ac, 0x3790256c, 0xa816292b,
- 0xa057995e, 0x1ca9657a, 0x051a275f, 0xaf4037a2, 0xde283b25, 0x8ef5a731,
- 0x13df88a9, 0xa40f5fc5, 0x187ac1f8, 0x2e63e547, 0x959e48de, 0x56abf529,
- 0xbb47911b, 0xacb597bf, 0x3ffc88ba, 0xde521bd3, 0xec53e93a, 0x5b51acb7,
- 0x8235c6ee, 0x4e57771d, 0x07ca0c44, 0xefce64e6, 0x40f24811, 0x93fa0ff2,
- 0x447140bb, 0xc20f30cf, 0x61bc89c3, 0x99eba47a, 0xdba05708, 0xdd7ca5e8,
- 0xbe0aa733, 0x99ceb049, 0x1f67ce16, 0xfbbe0996, 0x09ff3e05, 0x5065ef40,
- 0x5d740b1e, 0x38e4c316, 0xe626dd48, 0x2e79c70f, 0xca09f61d, 0xd438d3db,
- 0x8fefaa28, 0xf75ca1b4, 0xb78911e8, 0xe5a52f32, 0xede947cc, 0x98f34fcc,
- 0x74933ac1, 0xfc93af90, 0xf38e312b, 0x0367b32b, 0x8b5fd7d2, 0xe0169c4e,
- 0x4239ddfb, 0x439428de, 0xdd68038d, 0x701f5b97, 0x067680a5, 0x5fac41aa,
- 0x147ee396, 0x4ccab3fb, 0x74ab718f, 0x995a8e95, 0x17d61537, 0x9be2debb,
- 0x6b8beb04, 0xf5c21cf5, 0x38374fa6, 0x1383ef10, 0x74133af8, 0x543f9fdc,
- 0x81a3606e, 0x183f360e, 0xdb3a43e6, 0xda2035bc, 0x03e6fea1, 0x97cc0180,
- 0xf0bc14af, 0x4e61eadd, 0x7559d38c, 0xfeed111e, 0x188f56a2, 0xe1a7e208,
- 0x394441b2, 0x23d3be91, 0x5ab49ea2, 0x353d53d7, 0x4fd4ac5f, 0x59067937,
- 0x4e9e4ec9, 0xa15c33f4, 0xd49fb51f, 0x045f8356, 0x4ac917b8, 0x57b7997d,
- 0x9e8a61fb, 0x3961db56, 0xab7a67d2, 0x453f4fa3, 0x3f505b4d, 0x08f88f83,
- 0x7bd205c6, 0xfd711fca, 0x1bf084da, 0x36abf378, 0x2103374c, 0x9d1c71f8,
- 0x79e34cc7, 0x15d33cf8, 0xbfbf87da, 0x63e38f7f, 0xb9bbf01e, 0xe422fef5,
- 0x1f00cda3, 0x9de6f3c4, 0xcfd4ccb3, 0x799a59f4, 0xf286a931, 0x4b070825,
- 0xa23c75ef, 0xd53d2fcc, 0xbd695a23, 0xe76843ce, 0xe28ccba5, 0x763f6a01,
- 0xf405741d, 0x857a776a, 0x90cbcc7a, 0x47b8945a, 0x4eb38e3a, 0x5d31ff40,
- 0x6b4957c1, 0xf63361fd, 0x5f1466e3, 0xfe85cf4c, 0x8426fea1, 0xfc37a183,
- 0x5e99e742, 0x4273e7cc, 0xf18edfa9, 0xe76c7494, 0x74431158, 0xd5d7e7c5,
- 0x1c6214ea, 0xa08fb197, 0x7e90c9fd, 0x0cb8c29e, 0x7e4ecfa4, 0xb42e4a6d,
- 0x97d1fe7f, 0x52d0b476, 0x0fb33e0e, 0xf5ab56e5, 0x9c709be6, 0xff9f2283,
- 0x5de2724d, 0x206ddf98, 0x53be9a3d, 0x7944db4e, 0xebfd1393, 0x55df56ae,
- 0x2cbd422f, 0xeb64c618, 0x0a138f3d, 0x3a15da9c, 0xcb793938, 0x53ef5c20,
- 0x582c7745, 0x5f87ed02, 0x3b8d38f7, 0x94fcca7c, 0x9b0fc816, 0x30abcbf8,
- 0xc78e5f7f, 0xfee4023c, 0xde5fd2b0, 0x6ae50fb6, 0x821cfce9, 0x4576c597,
- 0xda39a9f2, 0xbf02f5e3, 0x52dc4737, 0xd2b8fc8e, 0x24487066, 0xfa19eb77,
- 0x7711cd59, 0xe5bfb1db, 0x4c26f9d1, 0x36bfd60d, 0x6e3c9ca1, 0x1e2131c9,
- 0x18132d9a, 0x0aaceb96, 0x4f1bbe9e, 0x885240fa, 0x73847772, 0xe5744cba,
- 0x0c272864, 0xbcad0609, 0x2f7971d6, 0xb43e1dd9, 0xf5acba9f, 0x60cc8148,
- 0x7ca05409, 0x69795493, 0x3c717a71, 0xa3b4e2c9, 0x9142b4bf, 0xaf862c37,
- 0xf73960d3, 0x5eaf389f, 0x962e63fd, 0x867f4b50, 0x4128ccb0, 0x9bda3ff9,
- 0x573e10c6, 0xbbc58353, 0x6844f518, 0x70205d7f, 0x4fa38e50, 0x7e337647,
- 0x5eddee48, 0x7a341bb2, 0x7a5e5ba4, 0xf221f5a2, 0x7a811e52, 0x792058a2,
- 0x0ceb61e9, 0x0c12feb8, 0x3c5c00b2, 0x545eb8db, 0xce267bf9, 0x49e5f005,
- 0x5e5c9165, 0x2673e4e9, 0xd1efc60e, 0x452a5cae, 0x5e021e22, 0xc3e9e0c9,
- 0x9378f79f, 0x9d1e507f, 0xa987fa41, 0x21fe9007, 0x0fe144fd, 0x6f078a8f,
- 0x50a3f5d9, 0xd912cb5f, 0x85cb9503, 0xe2cfb436, 0x6947d1bc, 0x2edcc5be,
- 0xa6bf9768, 0x892a1cf5, 0x7979444b, 0xc799d962, 0xbebd41ef, 0xfff49c29,
- 0x5667a5a4, 0xa66e3c60, 0x211a7057, 0x7d1a59ef, 0x6f987bf7, 0x905d3a34,
- 0x81f0ab3f, 0x914597fb, 0xb8084f68, 0x759e8cce, 0x83321656, 0xf139e29b,
- 0x3b409633, 0x34e0a34f, 0xa1e07a3b, 0xef527ac7, 0x37bc05f5, 0xfad0fe46,
- 0xa8d7bc3f, 0x3e7759c8, 0xc6a2d9ca, 0x700a87e4, 0x7cf5eb5d, 0xd7d18d9e,
- 0x3927de53, 0x1cb97639, 0x3fd8a99e, 0x57b3a59e, 0xc0f7c3c6, 0xcef9416a,
- 0xc8a956f1, 0x88e340e5, 0x0d3f1863, 0x3e48172c, 0xa252bc79, 0x5ccdda1f,
- 0xef784d9e, 0x5d900a28, 0xb43de5d8, 0x722b5a92, 0x3674a3de, 0x94588c17,
- 0xe3dfa76f, 0xfe479ffe, 0x9bb62563, 0x83b5a2d3, 0x0cfd0226, 0x67bcb840,
- 0xf1e9fd26, 0xefbad196, 0xea6945f0, 0xd38ead55, 0x8bab3de4, 0xe7b0c7ae,
- 0x52277bac, 0x3992af3c, 0x2ade9122, 0xc1376e65, 0xe61b259f, 0x4f9d5733,
- 0x2ac7b731, 0xa27ec18b, 0xb07b5aef, 0x3389fec7, 0x78a8df20, 0x3a13f154,
- 0x19765eef, 0xacd076e3, 0xdd7ada75, 0x16301076, 0xe5caba8a, 0x32bf76da,
- 0xb9459fbb, 0xe038dee8, 0xaeabf21b, 0x80821e50, 0x266e7f21, 0xa61f71e3,
- 0xced543e8, 0x517a592f, 0x6668728f, 0xc23d45e3, 0x1fd2e673, 0x385fa7e6,
- 0xa1bef3b5, 0x3ebb4a20, 0xbf934e39, 0x534435d1, 0x30f766ff, 0xa75bf7cd,
- 0x5ef9ab5f, 0xc9a919ee, 0x5e3d5edf, 0xfd467f53, 0x7a1cad24, 0xa3d69a37,
- 0x79bfa7e2, 0xa3e8a5bf, 0x12ec4277, 0xe23a9d35, 0xbf91133f, 0xe4672c3f,
- 0x33787cbf, 0x1d31f8d3, 0xe7822423, 0xf93d5c80, 0xc8e5f731, 0xbf208fef,
- 0xa1f44fc9, 0x7343f3c2, 0xa228c9a1, 0x7ee4627f, 0x725fa879, 0x5e503990,
- 0xfdb1d4ce, 0xff8f5442, 0x9cf30e67, 0xe3bafec7, 0xf10f4b0d, 0x1f884378,
- 0xff78a573, 0xbfcc0670, 0x4d79790f, 0x16c3f72e, 0xffaf36ee, 0xb58e0838,
- 0xfc9ad16d, 0xebf1fda2, 0x3bd60e3f, 0xae1d044b, 0x93e146b3, 0x7da0165a,
- 0xa0e8bb52, 0x40c4eafe, 0x583be62c, 0xf4b46c67, 0x0a8b838f, 0x258f800f,
- 0xa9de2837, 0x193fc3fa, 0x9e0a453e, 0x1a70b4cb, 0x90e3cb93, 0x29108f5f,
- 0xc7239cdd, 0xf103fee7, 0xf49eb41c, 0xdc7fbcdc, 0xc847227a, 0x9f9f0859,
- 0xe3dfaf1b, 0x17940f1e, 0x7f0b908e, 0xfe4cb826, 0x47a5c247, 0xd82f47cf,
- 0x72a3fa0d, 0xfdfb819d, 0xbf5c33de, 0xfefd7237, 0xf68fb47c, 0x44f39b87,
- 0xed070ee7, 0xe93ed297, 0x618d7ba1, 0x59df1819, 0x797d23a1, 0x8190997a,
- 0x2126c2fe, 0x0f36fc8c, 0xc81648ab, 0xc878b9be, 0xaa31fd48, 0x3afded07,
- 0x0551f390, 0xe4096b3e, 0x71e5ccdf, 0x716e35b1, 0xf5038a4a, 0x1eac1ff6,
- 0xe27a8b8a, 0xfd9e1e11, 0x696be130, 0x2bee45df, 0x6bd0127e, 0x8512d1e5,
- 0xd5cc5f8f, 0x2e1f3ad1, 0x949db914, 0x1f70ba2a, 0xa9d81563, 0x96065768,
- 0x915f28eb, 0x6d59e9c7, 0x9d0a1ff7, 0x14dd94c7, 0xda88f386, 0x7d7dba24,
- 0x2e51fb8c, 0x793d0f8f, 0xb3ed1857, 0x68742a01, 0xd6bddbef, 0xaf582e69,
- 0x8d96d0bd, 0xdcdadfb6, 0x4d6fea68, 0xcf159746, 0x748cfca1, 0xfe8eca78,
- 0x8cfac0a0, 0xe655cd33, 0x580feeab, 0xe936fe94, 0x9da2bf41, 0x1e289822,
- 0xf91af161, 0xb16715f9, 0x1ccade26, 0xa76e0b02, 0xf428f3d6, 0x1b42c76a,
- 0xeff697e1, 0x5db178d5, 0x1ba417a1, 0xcf4abbc4, 0x8983fda1, 0xe9cc8dec,
- 0x15d9f389, 0xe51a8e52, 0x8f748dd9, 0x1f90eafd, 0x07fe5cbc, 0xe3c4be6e,
- 0xded7de2c, 0xe18879c3, 0x0c9c525f, 0x5fce4aec, 0xbee2434b, 0x024f913f,
- 0x40cf0e66, 0xde1e5684, 0xdac9d0ab, 0x48de827f, 0x3ebc02ba, 0x8fe5cb9c,
- 0x36fdc0ab, 0xb2c73bca, 0xfba230d4, 0x96270b1f, 0x51384560, 0x3872ba5e,
- 0x1f3df379, 0xcc749e10, 0x7f71b46f, 0x72e1f90b, 0x31c2a47c, 0xbe9e6449,
- 0x3c88f2f4, 0xca1bfbe5, 0x6ab7ae7e, 0x61d90fc8, 0x02c57728, 0xcc2cd2c6,
- 0xffb6bc61, 0x8f9c0e7a, 0x396ae885, 0x96aee5d2, 0xcfe43094, 0xcabc6564,
- 0x0ca7d29b, 0x205fa077, 0x040ed018, 0xe3c0c3bf, 0x574edc3f, 0xb94f7ae5,
- 0x72c47ed6, 0x1f8f664f, 0xeaf7daa6, 0x82973c77, 0x59f3aae3, 0xfd61d8a5,
- 0x125577cd, 0xdca2724b, 0x808f765a, 0x5ab1dcf0, 0x717df8d4, 0x0325615e,
- 0x7fce3e3f, 0x8efd13af, 0xcdafb038, 0xf575cf28, 0x5e083382, 0x0a817b81,
- 0xf8871338, 0xe24fe02b, 0x8967bd3a, 0x57c7425e, 0x85abb1d7, 0x5de71b8f,
- 0x9f90c82c, 0xa02f5154, 0x9956b7df, 0xa3055c7f, 0x3e9969ef, 0x9e21e301,
- 0xe38a5616, 0x03335655, 0x332af5c6, 0xf2865399, 0x7443c6ca, 0x02b9edde,
- 0x206f2539, 0x4f3a4fc9, 0xa4f02ae3, 0x4d1d01e7, 0x7643284d, 0x864bb867,
- 0x167f89f1, 0x4cb5a71d, 0x59efa2f0, 0x843ce2d4, 0x11d001fe, 0x2d3c7dc5,
- 0xe75f1a01, 0x5d7271e5, 0x80e9c18b, 0x658d4875, 0x5521ffb4, 0xd4329c1b,
- 0xdf29b63b, 0x2d95f18f, 0x0ddfdc8d, 0xd67493f5, 0xd1c3d7fa, 0x9910ab5f,
- 0x790a0144, 0x5659fa2a, 0x30562e85, 0xf7ee3b77, 0x41fe1933, 0xdca548bb,
- 0x2a6f3d5a, 0xd0e52266, 0x13dc8378, 0x6b9e409f, 0xd3b240dc, 0x678daa94,
- 0xc92b2d48, 0x7d6233af, 0x890faca8, 0xa5d54877, 0xac971714, 0xf35bf2da,
- 0x8e086222, 0x9e1e3c59, 0x8d88ef73, 0x727cc2b7, 0x1fe3797f, 0x41873d30,
- 0xfe5b8033, 0x4c073bb2, 0xac16ff87, 0xa62378d8, 0xe8cebc4f, 0xe08fc19c,
- 0xfa8c6657, 0xe8fafcf1, 0xb2abf295, 0xb9d00f26, 0xd27dafc2, 0x094f1abf,
- 0xfe954fe5, 0x4d4ef943, 0x7f865ed7, 0x120526d7, 0xdf4f0f91, 0xdb03323b,
- 0x73de3657, 0x5f88c7c8, 0x77d00705, 0x117177a5, 0x5660ba23, 0x5e5bd097,
- 0x490b3784, 0x1f37bfec, 0x3b7c863f, 0x13e7effe, 0x95df087f, 0x63ce8c09,
- 0xf649fd81, 0xdfaa9a9c, 0x3eb9f223, 0x246790bd, 0xc1464ebc, 0x053bac0a,
- 0xc7bee0da, 0x2b4e6078, 0xf02e7fa4, 0x4de38b3c, 0xef9e3c81, 0x6b40fd7b,
- 0x4b37ae0c, 0xa78ef5c7, 0x76e68edb, 0xa8ffb748, 0x06590fd8, 0xdced0536,
- 0x771126c0, 0xc0e0596e, 0xe2d63be7, 0xc9b96bbe, 0xcaeb0699, 0xf448dfba,
- 0xf68a0484, 0xa7689651, 0xb6aee5b8, 0x9f69cb8f, 0xd2c0d1b1, 0x0ee06e28,
- 0x006473c3, 0x4caab0cf, 0xacc357b2, 0x857aaf68, 0x1e88aaa4, 0x22f5087b,
- 0xef2cb40a, 0x30ac53ad, 0xb042aefe, 0xec7b3347, 0x59fed51c, 0xeb4c9f9c,
- 0x6820ed57, 0x8fddb0c7, 0x0b4aa7e5, 0x19bb224b, 0xf7fb83e6, 0x90264cb6,
- 0xff5213ed, 0xd92332f4, 0x7baf76af, 0x5bb470ca, 0xa0e6a797, 0x9ae30add,
- 0xfd7f6e24, 0xbb67ca3b, 0x285ef94e, 0x49b9e0e6, 0x05801f75, 0x8718d174,
- 0xfeaaed53, 0xe763a5e7, 0x9505b954, 0x9f2f9b7f, 0x59ff7ec1, 0x5563dd72,
- 0x634d4de8, 0xe9e079f8, 0xcf8dbd0a, 0x9e9a5337, 0x73dbb3ff, 0xfe93aea7,
- 0x39feeb79, 0x9ffae1d7, 0xffd88eb1, 0x3cd8f821, 0x3ac67ff9, 0x4ff2996e,
- 0x5f37875c, 0x43cfc86e, 0xe746d679, 0x7e2eec88, 0xdee41c2e, 0xcb9cbc3d,
- 0x09938b39, 0x74ffc764, 0xfd84d779, 0x1c1f419d, 0x7778a1cf, 0x67183a08,
- 0x88a1a588, 0x8ca5cdec, 0x2c8681f6, 0x4340fbe5, 0x224d3a96, 0x6e1603da,
- 0xec520e27, 0xb94b41ce, 0x9ca719dd, 0x118b882b, 0x3fd4327d, 0xca5bcbf6,
- 0xbee0c6e7, 0xd5ec9190, 0xf5dd36e6, 0x5e53d416, 0x868fce89, 0xd2d289da,
- 0x5f9c4a95, 0xf6aa9f6b, 0x79e31a68, 0xb4b3b2fd, 0xa3daa74f, 0xb4f29443,
- 0x603f7ca5, 0x1ed68b48, 0xdad6ab8d, 0x1082fa8f, 0xbaa9733b, 0xeb49d07f,
- 0xe9d72dd3, 0xa87ffcf1, 0xb07b89dd, 0xab0714a8, 0xfeb4067a, 0xf630d74d,
- 0xf9f77e74, 0x1fdc1c8c, 0xd05ab99a, 0xcf5539e8, 0x7e282402, 0x43e3e2a4,
- 0x3f8873d4, 0x5fb534f7, 0xaf3e6fd2, 0x2e4c7048, 0x539cd74f, 0x1e7eec62,
- 0x3a73712f, 0x11eaf5fa, 0xddaed16b, 0x37c6eeb7, 0xfd697d31, 0x04e5e19d,
- 0x6f54f5af, 0xebc6cb7f, 0x7650ff30, 0xe6123dbb, 0x53a8d0f5, 0xefd009ef,
- 0x85e33c6a, 0x2978b3eb, 0x9e14b68f, 0x7ab3d08b, 0xa4dbdfda, 0x0d9f4c0f,
- 0x45122be5, 0x23be911c, 0x1832981d, 0x59309f9d, 0x35f57e78, 0x30e3ee8c,
- 0x1a9f5ce5, 0xd73d0987, 0xc7c3cab7, 0xc4f00559, 0x18963573, 0x39a91f7e,
- 0x08e9fee1, 0x818766ef, 0x8b3939e1, 0x624fee72, 0xa82aaed5, 0xbf7edc77,
- 0x0bee167d, 0x7e881867, 0x26ce98a4, 0x9ed993c4, 0x71c2af63, 0xcd35e242,
- 0x2037e929, 0xfbb5bc59, 0x20679ef0, 0xc9cf7b7d, 0x2a9d312f, 0xf3c3efb5,
- 0x88465225, 0xbf6bd29f, 0x9bdc1198, 0x4a5f3387, 0x6e3b9632, 0x754e9110,
- 0x869a6feb, 0x50c154fe, 0x4f581ae7, 0x1bf30258, 0xbd611e60, 0x7b7989f6,
- 0xce9c20a9, 0xf59648d5, 0xdfd80d15, 0x7fba08a8, 0xae03bec0, 0x4df3a7cb,
- 0x80f657f4, 0x0e3a47a8, 0xc38ff653, 0x1fed7b32, 0x30c2fcd3, 0x3b59e14d,
- 0x7a15c3e4, 0xca7b635f, 0x6b77fb87, 0x106e5213, 0x69f53d1d, 0xbbd6195f,
- 0xab1bbbf2, 0x26fd5f68, 0x9fcf1593, 0xcafb2985, 0xa574e3a1, 0x43675b3a,
- 0x6418b46a, 0x55f11673, 0xe0725f0f, 0xe68353d8, 0x9cfaf501, 0x736e3aa7,
- 0xda83c71e, 0x64c2d19f, 0xf74323b4, 0x81fb43ad, 0x56be7079, 0x31fda9ea,
- 0x01e746d6, 0x518b8874, 0xa7abb57d, 0xe9df0db9, 0xff1fbb24, 0xcc82e382,
- 0x35fc431a, 0xcedb55ac, 0x0dcfca9c, 0x2a18f4a8, 0xfdd0a52f, 0x6f7ad4f7,
- 0x9f81e138, 0x1fad0827, 0xf6d0d70a, 0x5833b7f0, 0xe160ddde, 0xa95d88fb,
- 0xac22b99d, 0x473da57b, 0xbf5a7f33, 0xef7fd3d0, 0xbe6d3cfd, 0xb9f6364c,
- 0x7d07943e, 0x4e8277cd, 0x6c596fbb, 0x8d2fb6d1, 0x25777ed1, 0x4592f368,
- 0x6d91dfe9, 0x8e1af3a2, 0xf79e137c, 0x18d8f953, 0x8af7efe2, 0x2efa2adf,
- 0x1e74c991, 0x79de9375, 0xe3c4e50e, 0x01d24033, 0xf82c3c7d, 0x552e58cc,
- 0x0f515bb4, 0x93d3adc6, 0xaefc7a9c, 0xbde0ce5b, 0x6ba016a0, 0xf771e584,
- 0xc56e9b47, 0xf148b17c, 0x70fb9b0d, 0xcb51d239, 0x37bf5f77, 0x3a9fcf1b,
- 0x5bce740d, 0x75b1b73d, 0x1e2b61ac, 0x94cb8a0b, 0xfdf069cd, 0x84e62aa1,
- 0x6323b099, 0xfc712363, 0x15a2d12d, 0xd1f3ddf5, 0x718efff5, 0x8c19e694,
- 0xe14219f6, 0x46d23757, 0x756e3672, 0x5bf780b6, 0x5f9b1e1e, 0x06317289,
- 0xaf9465fc, 0xc57dca90, 0xf015c717, 0xc5b6c0f8, 0xbe23c52f, 0xdb0f5837,
- 0xa2159e69, 0x52d8a713, 0x8b7fe5c1, 0x7b3ed185, 0x3752a8f3, 0x72ec4d1e,
- 0x8b3c0ec9, 0x41dee2d8, 0x2d595ef3, 0x9cacd972, 0xc8580197, 0xbe9c3951,
- 0xfd4a360c, 0xb3346e98, 0x972839f8, 0x7ce8c292, 0x81f67f50, 0x57cb84b4,
- 0xe9cc6f6c, 0x1143debf, 0x5cc8f18d, 0x9fe467cb, 0x6dfd0b9f, 0xf299b19d,
- 0x538d3caa, 0xdfbbbbe8, 0x1bba3d10, 0x36724bf0, 0x9e2ce41a, 0x878e63c7,
- 0xf575b789, 0x3a20b4c2, 0x7c8055d5, 0x2ee7e2cd, 0xbebb7e30, 0x87e334a8,
- 0x9b9d4cfc, 0x5518e5cb, 0x97ebbb1e, 0xda05dc61, 0x635f51fb, 0x0fce783f,
- 0xb7d71952, 0x443fa851, 0x830cf37e, 0xe839ef03, 0x430e402a, 0x56825d7b,
- 0xc9181c4f, 0xc1fb35f1, 0xa09eccbb, 0x728c4d38, 0xbea3661e, 0xeb069cde,
- 0x744338d9, 0x6c78076e, 0xfa1068a9, 0x55c4c78a, 0x0d997e70, 0xde6fd1f5,
- 0x1b139152, 0x8c669ebc, 0x36d0c073, 0xdb9f0fd7, 0xbfc885d3, 0xbc79235d,
- 0x1cdd67bd, 0x3f4071be, 0x479a87a8, 0x6ae6ae97, 0x557241da, 0x6d6de567,
- 0x073a7aae, 0xcfc155d9, 0xf813ad5d, 0xc4a4827e, 0xf28b9bcb, 0x4bf4bf49,
- 0x492b152f, 0x57e2849f, 0xe5fd3f9d, 0x9911e907, 0x8719f1f1, 0x4cceb49b,
- 0xda07e1fa, 0x4af9743b, 0x1e0bd47d, 0x62ba7f6d, 0xf8e33651, 0xaf105fa8,
- 0x7bc5e63a, 0x95fda193, 0x3a16fc2a, 0xe069c91f, 0x8d9db1ed, 0xdbf48bf8,
- 0xc8e00169, 0xa1ce82e9, 0x3c38f7b3, 0x7c3a24b7, 0x2e01f86b, 0xae7487d2,
- 0x78f7a52b, 0x658e098f, 0x7de8efb8, 0xe7f3a25d, 0xcd4f7aa1, 0xb13070ab,
- 0x3e9ce736, 0x0f9c4dec, 0xa76935ea, 0x01fa89d7, 0xed8a97d0, 0x094b2a36,
- 0x5cd4b3bb, 0xd7ade8e0, 0xab6d5cba, 0x937282af, 0x03f69983, 0x99534c3c,
- 0x79c0307f, 0x1ccf33ed, 0x3df58abf, 0xf803ae0e, 0xe6bce099, 0xe63347f9,
- 0xedf37b40, 0xb9b426ad, 0xf5a1cf43, 0x76a4ef6e, 0x4cab4b71, 0x23c519d3,
- 0x86b95465, 0xafd90b08, 0xe48674cc, 0xc6c934b3, 0x3d10a7ed, 0x50f80eb9,
- 0x644af08f, 0x38d0af04, 0x49f8e871, 0xdefcc905, 0x1a52e2e4, 0xa7454377,
- 0x5d90e31e, 0x4ec57aae, 0xefa9f4f2, 0x7bb1fb42, 0xff23a73a, 0xc81a1577,
- 0xc1f87e1e, 0xcf119349, 0xc51265de, 0x7bd17ad1, 0x233dbac8, 0x1cae9cf5,
- 0x7f28cd8b, 0xdbb9ccb7, 0xfec3e8ec, 0x8b3152d8, 0x965a2fce, 0xa5633b70,
- 0x88e481bd, 0xe7b942a5, 0x71cbd8a2, 0xa213bfaa, 0x3754b727, 0x9bdef198,
- 0x8590de7a, 0xed97dbd8, 0xbbbd1fbf, 0xf18bf961, 0x2aee4fa8, 0x79c78869,
- 0x4ff0896f, 0xd5df1377, 0xf2029f93, 0xf1e51e38, 0x920e91a2, 0x7ff2dd2f,
- 0x0e3b39c6, 0x5569181e, 0x43db8af8, 0x417fe25f, 0x928d66bf, 0x1c6014a7,
- 0x8e44b52d, 0x74bc6076, 0x00d3bbdf, 0x7abd93ae, 0xfb8d63f2, 0x2fc1f481,
- 0x49382632, 0x681e9e5f, 0xac9d22b0, 0x8f46fc6a, 0xe2ce6bf7, 0x7fe855e9,
- 0x87b70255, 0x9818c556, 0xa1f342df, 0x2e754f45, 0xf5a7a866, 0x0137dc09,
- 0x798fc5bf, 0xfd102316, 0xbaf18e4c, 0x298700ad, 0x30bbbe31, 0xd7ac4aad,
- 0x129c60d9, 0xf080b447, 0xc563a29d, 0x9daf1e06, 0x115cf385, 0x59c511f3,
- 0xd1c5a727, 0xc5a3547d, 0xc9fbc3ad, 0xf6d5677a, 0x8bce8cb5, 0xb015bc51,
- 0x8ce1f74a, 0xb6bdc718, 0xeeab8e27, 0xdcf1a913, 0x04290575, 0xffc1212f,
- 0xcb23e22d, 0x1ef1101a, 0x403e987e, 0x46aaf8f6, 0xcd41f913, 0x91da3b09,
- 0xf823323d, 0x3e7aa43e, 0xb163e017, 0x0c567c02, 0x95d69bbc, 0x17bb1494,
- 0x91faa5e7, 0x50af20f1, 0xcc2565de, 0x4beb8a9b, 0x4aedc37a, 0x0cc67a86,
- 0x1660365d, 0xa2b9eae0, 0xc61eac78, 0xd8583c21, 0xabfb4c38, 0x40d122bf,
- 0xf2d0d8f1, 0x5c780b27, 0x21050b32, 0x135eebae, 0x755dcfdd, 0xaaf7bee0,
- 0x301e7e50, 0x6767c939, 0xffbc1481, 0x66771dde, 0xaeddff7c, 0xdbb5bf9c,
- 0xa63f740d, 0xf8ccbe3b, 0xe71abcbe, 0xd9e51a88, 0x31f2788c, 0xc7f421c7,
- 0x8f71abfe, 0x4fca461b, 0x6dac7f60, 0x63c515c0, 0x68eac6c2, 0x8529279f,
- 0x05f78279, 0x5feb2956, 0x7a870dd5, 0x75f5ea3b, 0xf6079428, 0x378d870f,
- 0x2226bbe7, 0x88ec46dd, 0x47dafd47, 0x171343bc, 0x79287bc1, 0xfa6204b1,
- 0x9c79aa93, 0x3cea6624, 0x5348778a, 0x0ad93ef9, 0xdcfc6016, 0x7c84c0a0,
- 0xfd4beaba, 0xe887700d, 0x927accd7, 0x90ae9193, 0x7f2f48b8, 0x6576e645,
- 0xa2dbdc88, 0x02f1f9f0, 0xcf91ee25, 0x93e7bff2, 0x5228bb25, 0x1889f7d9,
- 0xe8724295, 0xd90a5711, 0x411cc443, 0x9d9cbef0, 0x87b15073, 0x8f9ca77e,
- 0x83e72bf7, 0x83e72b0f, 0x0b77c55f, 0x863f90c0, 0x7a4016ef, 0xe549c9bc,
- 0x2815346f, 0x6fe399ca, 0x4892ee45, 0xf9941fb2, 0xadf994e9, 0x25734eb0,
- 0x6d567e03, 0x5879d3d3, 0xe984c263, 0xaf32cf68, 0xdb119edc, 0x7587ef73,
- 0x9efcbfc2, 0x095da773, 0x3a4f3a1e, 0x3ff7cac7, 0x542d7c55, 0xb612afea,
- 0xc75f9019, 0x36e16a76, 0xc6a452fe, 0xda622cae, 0x4d1397f3, 0x4bf53443,
- 0x97f3daac, 0x6ad7ec93, 0x68764fbe, 0x975e5fcf, 0x1f643e31, 0x0b574b56,
- 0x806e8013, 0x7d33aa4e, 0xd685ff34, 0xcd7b8a52, 0x74c7b90d, 0x3f36ab47,
- 0x2fcda7df, 0xabcdaddc, 0x46c3c9e7, 0x9230f429, 0xf131fc8d, 0x0cf7e44e,
- 0x0107ef85, 0x3ab36fbf, 0xcfda1199, 0xf8bf7816, 0x1b00c61e, 0x76c94bed,
- 0xe3ac238c, 0x5cb09ff7, 0xc3bdcbc7, 0xbe7af695, 0xdb8ec56f, 0x4b1dbee8,
- 0x06ede1cc, 0xfc7eeff0, 0x72f1a48e, 0xfe87af0e, 0xa21bd612, 0xe5c353f4,
- 0xfe459c93, 0x33ec9b82, 0x62b36b23, 0xc2fe016a, 0xe9399a34, 0xab0c759e,
- 0x9075bf27, 0x51327f72, 0x43bca4e8, 0x3e6186b0, 0xf0a67045, 0xe48f85f8,
- 0xfe4950c3, 0x89f12a75, 0xf447745b, 0xb90d97e6, 0xaaf007ba, 0x910286f8,
- 0x5623e2e4, 0xd8f9ef46, 0xbf13f75c, 0xb016c36e, 0x8f6b336e, 0xf87a1549,
- 0x61bcede7, 0x5c135fc4, 0xb52fda0e, 0x780b2415, 0x8588a52e, 0x4536cb9c,
- 0xc4a3e869, 0x5f42171a, 0x4be84243, 0x9d7f7f4b, 0x3945d5ec, 0xe842ce90,
- 0xe5be2ff0, 0xaf182c51, 0x05fa8f4c, 0x7689d58f, 0xca9e054a, 0x754f9602,
- 0xee319b46, 0x73d18b67, 0x3fd8a579, 0xab9f4aa5, 0xb2eb59e4, 0x566bfbe1,
- 0xf6e649d1, 0xf13d2dba, 0x96df1825, 0xea35f600, 0x5c919963, 0x1fd3b07d,
- 0x7fd43576, 0x65fffc04, 0xeb1bfd0c, 0x65efbc64, 0x5f10e4b5, 0x7949623f,
- 0xd05ef300, 0x437f58e8, 0xe5aff731, 0xef826ce8, 0x05bea151, 0xc23da5ef,
- 0xc1a94cf2, 0xfebc2df5, 0x5164597a, 0x31189f90, 0x9dbfea82, 0xcef86667,
- 0xd9a07781, 0x63ec7187, 0xdd77acf2, 0x7b7feb41, 0xaf07d714, 0xa8f11c19,
- 0x7c991e0f, 0xd287a677, 0x685f8d74, 0xbee4104b, 0x8f8517ce, 0x92bd7236,
- 0x78c7e25b, 0x34c2747c, 0xfd039957, 0xe90bb71d, 0xf112e742, 0xfd375be8,
- 0xa87e2b44, 0x0fc106e4, 0x5d749b4b, 0xe3ff430e, 0x35770b11, 0xd59e291e,
- 0x02f98652, 0xd903ee91, 0xd9ebe25c, 0x7e02aeb8, 0xe54af0fe, 0x181ea871,
- 0xbfdb9a32, 0x3c448693, 0xdffa1430, 0xb15a2450, 0x2b7a0dff, 0x4ef1e7dd,
- 0x1757e466, 0xe097eabb, 0x11096dbc, 0xda6fcf0c, 0xbe781a0a, 0x00ccc0d8,
- 0xbd6b934f, 0x4a69f117, 0xf6c08cf5, 0x7a7e0747, 0x5b19ed1c, 0x703a4882,
- 0xaef9cba2, 0x2817eb1b, 0x79e29df1, 0x9b3c70eb, 0x599ab445, 0xb9541c0f,
- 0xcbc99ebe, 0x0ab2ffc9, 0x0accdef4, 0xce0d436b, 0x708491e7, 0xae6df3b2,
- 0x79e20b06, 0xd13d40c3, 0x7f224941, 0xc2556a57, 0xffcc0ab4, 0x7191a062,
- 0x1f2ff3be, 0x5381fe8a, 0x5b3575a0, 0xc75e681a, 0xc139892e, 0xa9a896ef,
- 0xcf85a1d9, 0xf8f146ef, 0xf1e30990, 0xf5b79f80, 0x8b9a9ccc, 0x78a7a3f4,
- 0x6b7c7f53, 0x7ece5d73, 0x2afe61fb, 0x110b0394, 0xc29785be, 0xfa404f98,
- 0xfe61293d, 0x5b017f77, 0x60d9d680, 0x6e41fec1, 0xbfa2fabb, 0x7e9cd188,
- 0x4b0e0b9b, 0xd7824ef8, 0x341f1021, 0x444e4e5b, 0xb528783e, 0xa1e8e88c,
- 0x899a8f82, 0xfcf3862f, 0xeef779db, 0x137a3c41, 0xe77f69c3, 0x315982a1,
- 0x60bef4c1, 0x32f44894, 0x4e616fee, 0x9463e23f, 0x91fbf881, 0x8c56c708,
- 0xccf3794f, 0x824bbf64, 0x986cd975, 0x2862c4e7, 0x5da2fe87, 0x7ef13e62,
- 0x0b8f0575, 0xd57e302b, 0xf8843b65, 0x31664a90, 0x3949ae7e, 0xef9114f7,
- 0xb435435c, 0x25956ffa, 0xd8cfde97, 0x4270d092, 0xbb20b6d9, 0x5722906a,
- 0x45889406, 0x5906df28, 0xeb655db8, 0xbb940594, 0xaa47fba1, 0xed29d7be,
- 0xa65b328c, 0x371b54a3, 0x7d7015d2, 0xe97f6a1b, 0xc35edc2c, 0xae8fbc3f,
- 0x893f35cf, 0x317438e7, 0x197f7ada, 0x0a739ca5, 0x3a54afb1, 0x7ae560df,
- 0xd1ae4362, 0xa1bd7487, 0x2a59db98, 0x47f2ab1f, 0x872abf85, 0xef7e7bd4,
- 0x5195f628, 0x87365e4e, 0xe599eae4, 0xd4076f78, 0xc317739e, 0xfa56897b,
- 0x1f72aefe, 0x6f6294f1, 0x5bd8a7bd, 0x5bd8a1ff, 0x05bda3ef, 0x835186f9,
- 0x1437bf08, 0x08dd762e, 0x20901af3, 0x3a14df11, 0xedc44c4e, 0x271971af,
- 0x61d97d2c, 0x0ea2d111, 0x348f05e3, 0x36497bf2, 0x1598ed97, 0x95cf59f1,
- 0x766b2cf9, 0xbcbb3469, 0x7ae3f27c, 0xe43ed1d2, 0xb9557beb, 0x994c794f,
- 0xfe6571ff, 0x57f3286f, 0x43498fed, 0xbca132be, 0xefaf10df, 0xfc297e47,
- 0xb9d0a729, 0xb57950a8, 0xe908c401, 0xfc9122eb, 0xe778209f, 0x25bde46a,
- 0x4ef5f720, 0x1e68c959, 0x7660dbbf, 0x7c462c57, 0xa714664e, 0x27aae89d,
- 0xb90365b9, 0xf8b37245, 0xca6ceb5c, 0xe5833847, 0x6381519e, 0x8e5acbb1,
- 0x4638be72, 0x24571f4f, 0x5bbdd0a4, 0x1d0efed5, 0x0cf072f7, 0x82a66795,
- 0xf3e25ef5, 0x6c7247ff, 0x8ee3e32c, 0xfd3a24bd, 0x4f1cde5c, 0x91237926,
- 0x5fed0a7c, 0x636f3795, 0xbe2064cc, 0xfba1644f, 0x610a7700, 0x9de7844e,
- 0x3f307c28, 0xff7dd197, 0x673d8ac1, 0x01d1de57, 0xceb8a14e, 0xf5b04ed8,
- 0xfc7af883, 0x1bf75325, 0x8f8edc6c, 0x9a27e4d6, 0x4bf535e2, 0xef9ac9ac,
- 0x35c3ec93, 0xb23b27df, 0x32ebf935, 0xffea6946, 0xc9a459c2, 0x593050df,
- 0x2e4cbf53, 0x6665e4d3, 0x0e4877a5, 0xfa1eed36, 0xa7fd1eb1, 0xa71fa1b6,
- 0xa14f86f5, 0x0e0bf0f1, 0xa3b5e160, 0x00e8678b, 0x87518546, 0xfc864cef,
- 0x6793d1c6, 0x0aef80e8, 0x3afe1f07, 0xd3685819, 0x7f3c29e8, 0x53afe1f5,
- 0xf465bf20, 0x863f3c75, 0x3adcf091, 0x2f0cea7a, 0xea1c59e0, 0xd0347479,
- 0xea972df1, 0xf83da28f, 0xe6d365c7, 0x4ef84b26, 0x06625940, 0x8a6ce7cc,
- 0x7efc61e7, 0x15acec73, 0xfc761178, 0x8f74f577, 0x79a337ed, 0xfc8b8ebb,
- 0x8b8e8d5d, 0xbb0d1dfc, 0x53f6f1c8, 0xcbbef553, 0x7e9a0e04, 0x64115fdf,
- 0xfd05af7e, 0xdc6939cd, 0xfd14c4df, 0xdfd14c4d, 0xcdfd14c4, 0x6fee7a39,
- 0x26fe8a62, 0x89bfa396, 0x324d5be9, 0xb934efa5, 0x726f6d28, 0xbf7dda53,
- 0x6ae729e0, 0xef380a3d, 0x985b8b4f, 0x1117e4c3, 0x4ae7a14e, 0x4f9df43a,
- 0x45ca7ed0, 0x12fbc0e6, 0x394e2287, 0x39acbf70, 0x2e7444cf, 0x4c3bb674,
- 0x9730bdf0, 0xb62f9be7, 0x617e503b, 0x8e9eed1e, 0x946cb6e8, 0x16b5cdbf,
- 0x35bbee27, 0x23c7f45f, 0x1bd6149f, 0x714cdee8, 0xe9c52767, 0xbdfc2d0a,
- 0xc7114384, 0x91e3288e, 0x9179da5c, 0x277c45f9, 0xb0eedcc7, 0x3f60e32a,
- 0xea326e30, 0x3025c2b7, 0xbf5bf52e, 0xb0a9fa98, 0x842b8f74, 0xe1fa6b8c,
- 0x410a2771, 0x3176a6e3, 0xc701177a, 0xf5c66875, 0xfc3f744b, 0x78c25fa2,
- 0xd3b44761, 0x354d718e, 0x21df8c5f, 0xdfcf45bf, 0xf8742c5f, 0xb893d425,
- 0x5bef89bd, 0x33d881c6, 0x1f0d79d3, 0x47a865c9, 0x5d9db971, 0x9a179d7c,
- 0x9f900bfc, 0x6077958a, 0x7ba58cc0, 0x730c4c9b, 0x9128bea2, 0x38363bbe,
- 0xe339df17, 0x1b15a0ef, 0x1f90dbdd, 0xde5c74eb, 0xdf3318b0, 0xcfb5d057,
- 0x3bd415ff, 0xec983b19, 0x4bfb0495, 0xb404752c, 0x44cae45f, 0x5b24e3bb,
- 0x7568724f, 0x8f7fe794, 0x31be7a06, 0xf4505eb3, 0x7763f527, 0xfdc0ef85,
- 0x86578e88, 0x4d1f21e5, 0x645e3a33, 0x1f7fbfc0, 0x3cfc44dd, 0x8791ca2d,
- 0x7dfdac2f, 0x8f9e0af4, 0x3788bc73, 0x69acad83, 0x8ffc7146, 0xf1452f3f,
- 0xa79a740d, 0x7fb43a0a, 0x765373d4, 0x063613d1, 0xfaeb54e0, 0xd7e7c0ca,
- 0x7078c069, 0xc10718d4, 0x4661e06f, 0x273a98e7, 0xe076f847, 0xfa80df7e,
- 0xb2878c6c, 0xd61e474c, 0xd277d24b, 0xef8ef27d, 0xa827fdf8, 0x50537919,
- 0x8389fefc, 0xf8afab92, 0xdc31c9e3, 0x6a945d4e, 0xa8788759, 0x944c9bb3,
- 0xe748bc1e, 0x9dce9982, 0xd96bf18a, 0xa97bf7d0, 0xfae18eaf, 0x41a57921,
- 0x74410231, 0x30e594c2, 0x0cbde6a6, 0xbbf6d0f4, 0x8bdfbac3, 0x43327003,
- 0xdef3c89a, 0xeb178f1d, 0xac65f534, 0xe31dfddf, 0xbda7acf4, 0x8fcddbfc,
- 0xc6554f2f, 0xc7997a43, 0xd50c7fc8, 0x6d0b6b3b, 0x843f3bea, 0xcc63c7e7,
- 0x523de9db, 0x14877949, 0x4e315aea, 0x3de8ce11, 0x37bfc1ae, 0x9e819afb,
- 0x3d399a76, 0xdd3c8aa7, 0xeafaf229, 0xbbeaa53b, 0x7a14cf6b, 0xa3cc2b6e,
- 0x8fd4fc7e, 0xdf1b37b4, 0x6d6788cb, 0x6efabe34, 0x012f5a94, 0x2b0920f4,
- 0xe23a675d, 0xfb1250ab, 0x63e7e784, 0x1d20e68a, 0xb2f2685c, 0xcdf4824f,
- 0xc6c8f085, 0x49d5fc60, 0x3f5af7da, 0xda81331f, 0x1b6beda9, 0x284fb227,
- 0x3a27e978, 0x69154daa, 0x624ebd1f, 0x9c3be2af, 0xa80d5920, 0xa776d597,
- 0x78e2de40, 0xc5fc82ef, 0xbb903df4, 0x3ed1e89f, 0xfbe94e9d, 0x3f2a1e4d,
- 0x694d54dd, 0xca9ea9a0, 0xa46a6a3f, 0x23db96f4, 0x167f9172, 0x3ec65fed,
- 0xdaf7a209, 0x1027bd36, 0xd4553f94, 0x51ad1d37, 0x7254e8e8, 0x1c9fdfdf,
- 0x53bbedc3, 0x3b3e38e3, 0xf7c0dec0, 0x972e80da, 0xdbfd6d5b, 0xfdca1d1d,
- 0xf9e3a3ae, 0x3c4765fe, 0xc31f2fcf, 0x64596efd, 0xe8fed8ad, 0x3fef0378,
- 0xefc6ac45, 0x7bf78db2, 0x77a45d2a, 0xb1357ea4, 0x5831fee8, 0x73ca163f,
- 0xaea7f27a, 0x41e5dfe2, 0xaf9d5cbc, 0x42b6fdfa, 0x61eefaab, 0x8fcf7f3a,
- 0x7a02f7df, 0xfad5fea7, 0x3e702663, 0x4befad0c, 0x43c9e7e5, 0xc2a9f1e1,
- 0x6817c4b7, 0x241f2367, 0x9d45f107, 0xa542f883, 0xd6fe9543, 0xa7387216,
- 0x0f3a151a, 0x07eee7e7, 0x76842ea3, 0xb4cce383, 0xbbe3208f, 0x9d9eb0cb,
- 0xa12fc282, 0x63bca0fc, 0xca83f2ac, 0x0fbd2a07, 0xefe33e06, 0x8c75f334,
- 0x5f79fa95, 0x7b475958, 0x845dc46c, 0x2b7a83d7, 0x12b1adea, 0xb0792f7f,
- 0xedcef5d8, 0x11b19fc8, 0x96918c0e, 0xf7d4b2ac, 0xfe3ec725, 0x9543c2af,
- 0xa3c0fd0b, 0xca0c1c17, 0x123fc3bb, 0x50e7240e, 0xf6fd238e, 0x8c4eb721,
- 0x177629b1, 0xc7cfdfa7, 0x1d226fc8, 0x1d660875, 0x30e87bde, 0xf05f7df7,
- 0x0ebe78e8, 0x16d68e95, 0xe3074a32, 0x96f6873a, 0xba7aea7b, 0x230779d7,
- 0xc61a97f9, 0xacf143cb, 0xfe482bae, 0xa979c19d, 0xec0c64f0, 0xa127e81f,
- 0xe8ef42c6, 0xe0e856dd, 0xd3f036fc, 0x67ac0dca, 0x3e8a35fd, 0xf8944c7b,
- 0x7e8ac87b, 0xa4172ee9, 0x1a996599, 0xcf4743bd, 0xefe15f3a, 0x5645fb8a,
- 0xa372ef8d, 0x171c1d6f, 0xe5c8521f, 0x3cf4d743, 0x2f13f711, 0xf742d5fc,
- 0xea0c2f8b, 0xcef68d3c, 0x533d5685, 0x2a9b23de, 0x8b9dde80, 0xf90c133e,
- 0xf39ef2a5, 0x0ef7e8e7, 0x176d7a83, 0xb5bda34f, 0x4cc4ab68, 0xeb609e78,
- 0x3d20b737, 0x4f457bcd, 0x5b9a99e7, 0x7a471e37, 0xc04fb73c, 0x85d96fe7,
- 0x31c5027d, 0x7c225b6b, 0x0a5fa866, 0xc77fac79, 0xe3c7c96e, 0x39c5328b,
- 0x3ef1946a, 0xa0a5d731, 0xeb713e5d, 0xfb843a5d, 0x8b02edbc, 0xb48f7e83,
- 0xdf0c79de, 0x8db7391d, 0x366b2fdf, 0xf9c0ee47, 0x0ceb4a97, 0xc17ca083,
- 0x635e5e76, 0xf70cfd29, 0xcd3c1743, 0x60b3fe28, 0x8ff9046f, 0x1ff45af3,
- 0x8eec8205, 0xfcda6dea, 0x4066d67c, 0x53b0583a, 0x07b35e5b, 0x79f241f2,
- 0x2f12a150, 0x29b2ce19, 0xe3c2d25e, 0xef7caa07, 0xb97b5f6d, 0xde506d1c,
- 0x897f66ff, 0x5f0ff7a5, 0x23e926f9, 0x2997cf0b, 0xff9033f9, 0x178e1bea,
- 0xca752e15, 0xb3c55fd8, 0x675836dc, 0x2b662bdd, 0x0775edfa, 0x79e246ce,
- 0x016145b0, 0xbd99177a, 0xded1592c, 0xe4bf607b, 0x1ed6f845, 0xfb893f34,
- 0x9e2f7598, 0xa13c6030, 0xb262df74, 0xef312657, 0xfd9bf44a, 0x89dac8a2,
- 0x7de9da8e, 0xce77265f, 0x1c2e7ec3, 0x0ed15b30, 0x4ff97ba8, 0xfcc76ec7,
- 0xa7dd028b, 0x201890bc, 0x55b7993a, 0x0c437ba2, 0x1efcb42d, 0xaf10913a,
- 0x28b6e788, 0x0ee58f7a, 0x19e23bc4, 0x4648eef0, 0xd071e13a, 0xf44b4f79,
- 0xd9b5b9bd, 0xf9cff580, 0xbc557e38, 0x094b4a97, 0xe71e0bcf, 0x75b89cf8,
- 0xa430e6d7, 0x64f5a783, 0x76811f8f, 0x0d3b7883, 0xe15e181d, 0xc5d85ffd,
- 0xd37791cf, 0x376301c1, 0x2f3ce01d, 0xa7c00747, 0x472ff7a4, 0x97e89d07,
- 0xddffbf92, 0x0ed0c7b9, 0x87e7e08f, 0xd51b77d5, 0x2e071a77, 0xdf14753c,
- 0xbfb34ae7, 0xa5777640, 0xc5da8bed, 0x9d09de8c, 0x4be78853, 0x02bdbf26,
- 0xca7277d1, 0x2f14d9b3, 0x6d7607d5, 0x744378c1, 0x3ac7c538, 0xe1b6cb92,
- 0xb6c598fb, 0xc2f6936f, 0xcefd163a, 0xeb11e748, 0x3fc67e15, 0xbea2073c,
- 0xa7780c4b, 0x07b519f3, 0xc65cfc8d, 0xe1fbed3e, 0xce9cf11d, 0x959d0c4b,
- 0x804fd18b, 0x411c70fe, 0xc2e9fee6, 0x451dfc83, 0xa85c9fd5, 0x47b8a7be,
- 0x60836efb, 0x3dea9b7d, 0x70429850, 0xd6371a18, 0x73797681, 0x0d84f339,
- 0xc17d21fd, 0x5c7ca9df, 0xb5fa8bf1, 0x1bd1fb27, 0xbcc91daa, 0x15daf721,
- 0x014775b9, 0xbbd1e39f, 0x7be1d0df, 0xf93b1d8c, 0x3fcc3bef, 0x062af208,
- 0x1af519f7, 0xd4a753b9, 0x0fe914ff, 0x5bde76e0, 0x052892ff, 0x8bf797cb,
- 0xa79a9fb1, 0x48753f79, 0xf5571457, 0xfe830e6f, 0x33d36cbe, 0x4c0597e4,
- 0x23fb3791, 0x4bd46e28, 0xbedcecbe, 0x50f7cd12, 0x76085aba, 0x94222e26,
- 0x69f6eabe, 0xa85d3deb, 0x670e7bec, 0xfff9e0d7, 0x90e031af, 0xba5f23d7,
- 0x8e67e9df, 0x6eefe428, 0xef68287d, 0x056b0fef, 0x77ea0f96, 0x63c4cfb8,
- 0xeafe4aa4, 0xfe4d56dd, 0xa6bb369a, 0x1dfbb5fe, 0xfed9ef9a, 0x11f7cd0c,
- 0x7c9a9dc7, 0xa6817b5e, 0x64f7c8fe, 0xc0547e4d, 0xe63fa9a5, 0xef935bbc,
- 0xf4f584ca, 0xe9a8cf61, 0xa9a0bb24, 0xd661d93f, 0x465d7ff4, 0x65df26b4,
- 0xd8a3e051, 0xfbfdaa99, 0xff6e42a7, 0x7c2aa686, 0x8ed4e17f, 0x37edfaa3,
- 0x2bc76814, 0x5de3b593, 0x1df05e29, 0xff9e9d41, 0xf8a6027e, 0x34ba09fb,
- 0x9809fbfe, 0xc04fdfc7, 0x013f7f14, 0xe4dd7fcb, 0xa6befca4, 0x04bfca02,
- 0xf7e6097e, 0xe9829f83, 0x609fe0cb, 0x77e0e5f9, 0xb8f7194c, 0xca1bee32,
- 0x4e153fb8, 0xbdfd296f, 0xef87f4a3, 0x3df4ea0d, 0x795d6bdf, 0xe3e025e2,
- 0x7467db22, 0x086e595f, 0xa31c6294, 0xe18975f3, 0xf1e667f9, 0x418a6f1d,
- 0xa18036fd, 0x9ddcc49d, 0x6e3ee26f, 0xad438bdd, 0x81efae78, 0x07787ffd,
- 0x27d85fbf, 0xf0e6bc51, 0xd891b3fe, 0xc29797cf, 0xe90a30e9, 0x7cd7c845,
- 0x0d7e502b, 0x6773c77c, 0xca4af793, 0xa46d67bb, 0x554cbeef, 0x2fba3ef3,
- 0xd7b3deed, 0xb85a7880, 0x7f7d2f7b, 0xf35af81d, 0xcd76f77d, 0x87d62b77,
- 0x47cea174, 0xf7cf8571, 0xa9f9fdab, 0x7f2a4d7e, 0xddf3fe3f, 0x418e5647,
- 0x6ab7bc7d, 0x17b5c600, 0x3b1139ee, 0xf1f007fb, 0x66cf4bd5, 0x3da81ea2,
- 0xd1e72920, 0x07b57f78, 0xcf9e4af4, 0x6f7526bf, 0x63379e29, 0xb3ba047b,
- 0x4f5b25f4, 0xee16c4e7, 0x728355df, 0xedc66176, 0xb57f8a7a, 0x83bc4a49,
- 0x6350d3bf, 0x6baedc65, 0xdf123afb, 0xdb7dffa9, 0x1ef1ebd1, 0x23c1c774,
- 0xc74c5ef4, 0x4fe3493e, 0xf1cb1d8d, 0xf72c763b, 0x77dcbeff, 0x09cf1224,
- 0x8476d03e, 0xa7e75dc3, 0xf2561ff5, 0xbc12901d, 0xfeb44bf7, 0xcd2497f9,
- 0xc0b801ef, 0xcdfba171, 0x7022d3fb, 0xf8db275d, 0x7d40d378, 0xd56a7950,
- 0x1bff9c1c, 0xe53cf2f6, 0xddfa327d, 0xaeab8ea1, 0x767285db, 0xe7d49b61,
- 0xd839f779, 0x98efcc2f, 0xebe9d39e, 0x1b1c2fa9, 0xc37f7004, 0x749b2035,
- 0x62af5284, 0xa59bd72c, 0xea2a4f06, 0x47169391, 0xfdf9d204, 0x8811a1ae,
- 0x6298c3df, 0xba49ebb9, 0x1f60be07, 0xb5a4ef1f, 0xeee9123d, 0x5211bee4,
- 0x2dcbfef0, 0xe9ba7aca, 0x5e6f1bbb, 0x6fb553c2, 0xe9fb05bb, 0x9f2f1fc1,
- 0xf54ad636, 0xb89df0df, 0x93aced05, 0xc0ddfc19, 0xfe460663, 0xdc3dc2f5,
- 0x70793e4e, 0xfbe0f6f0, 0x7c14d359, 0x7df06474, 0xf5fb0bf5, 0x77dbbd4a,
- 0x3d3d4d31, 0xed4ed7c7, 0x33beac7c, 0x5e7da10f, 0x6744b8f3, 0xf5f46a1d,
- 0x4ffeb8b1, 0x57e769b7, 0x46b9c0ad, 0x0d8d8fef, 0x039ad7a2, 0x21637cdd,
- 0xf2a03f3a, 0xa9f08389, 0x8664f1e2, 0xe765e7c4, 0xecd5f8ef, 0xdd7445dd,
- 0xe3c4be3b, 0x31ef83b7, 0x74be2939, 0xb55a7f19, 0x2dfc821b, 0xbd23bdfa,
- 0x989e686b, 0x9c1f63d2, 0x1a1f942e, 0x987e879b, 0x93ccfc9b, 0x185bec0c,
- 0xebed16b3, 0x891dfa28, 0xd76e38cd, 0xffbd3d3e, 0xb03a6b5f, 0xef23df00,
- 0xc0a30272, 0xe9b6bbbc, 0x4667dfbb, 0x629e773b, 0xf0ea7ee7, 0x7fa04be4,
- 0xb489976b, 0x923d9abe, 0x5c17cf0f, 0x57dee450, 0x406f937b, 0x2e39ad7a,
- 0x9a3fd159, 0x70fc96d8, 0x77e92e61, 0xdaeb57b2, 0xb75c9db8, 0x58bb205b,
- 0x156a43b5, 0x629ce43b, 0xd65b9f07, 0x57e3edc9, 0xea11b604, 0x7b030dad,
- 0x9b5de604, 0xdaec8539, 0xcadd695a, 0x473c0ec1, 0xe61bb5a5, 0xe7443c1f,
- 0xd1cec0ae, 0xaddc2f94, 0x69b6d7cd, 0xf8bfaaf3, 0x3b6ed8d2, 0x37fa3863,
- 0x1c615225, 0x0a9bc3f2, 0xe77f555e, 0xf49c6bce, 0xe80f27fb, 0xb8d4531c,
- 0xd16bc204, 0xc7cc2f2b, 0xc26afb79, 0x347fe49c, 0xdae096fe, 0xd7ee17f1,
- 0x1bdee074, 0xf0e333f2, 0xe9cfd8ed, 0xf05e917c, 0x2cbaefe1, 0xfd815d42,
- 0x32df0499, 0xbb7b3b8c, 0x293e4922, 0xca1a08e7, 0x632affb9, 0xb2a6180c,
- 0xe7f72c7e, 0x38eaf543, 0x0e8a405d, 0xb1811392, 0xd8af5429, 0xde57cfc0,
- 0xfd24e119, 0xedc4c436, 0x6e2621f5, 0x75d10faf, 0x85dfa32e, 0x7df02418,
- 0x6997ac8e, 0x9d6d60ff, 0x0417f685, 0xe10e957b, 0x955076b1, 0x887de90e,
- 0xfd032db5, 0x8c5f353a, 0xec7b33df, 0x478b5283, 0x8cece387, 0x047d9417,
- 0x74cb4fae, 0x90747d56, 0x996c657e, 0x3dea7e66, 0x7e7d9fcc, 0xa59f9856,
- 0xef807e67, 0x75ff6c28, 0x86ec93ae, 0xf7f532a4, 0x2e49ea1a, 0x12fcf0a8,
- 0x7018bde8, 0x816c7745, 0x1db66a6e, 0x96b2ff22, 0x76c4afb5, 0x1d7c91a5,
- 0xbefe06a5, 0x4f186951, 0xfcb8e375, 0xa5d2512a, 0xc3f6b4e3, 0x7eb27191,
- 0xc63ba41c, 0xf2e34db9, 0x79953f68, 0x6f0d7d61, 0x43d677f3, 0x13943ee5,
- 0x7c0d7d3d, 0x466ff288, 0xd54d60bd, 0xbf9c69d8, 0x6ea2f116, 0xf6f85a3e,
- 0xea27c493, 0xc66acc39, 0xa3667bf9, 0xc41a9ad9, 0xdf908b17, 0xc377ded0,
- 0x138c7cbf, 0xeb6abff0, 0xe9973a7c, 0xa2b58c69, 0xfa48b75f, 0x7e25f692,
- 0xda2ddf2c, 0xcdfbcdaf, 0x11fdfe9c, 0x1949f902, 0x0de3bae3, 0x0a498fc4,
- 0x08f76c7f, 0xfb600e74, 0xcf1e3a77, 0x7e2810ab, 0x36143377, 0x624994da,
- 0xba22eabd, 0xe333724f, 0x4b596347, 0x3dbbf28c, 0x796fb431, 0x9beeb293,
- 0x2843fe75, 0xdbea8e57, 0x749e3192, 0xc4e4d6b4, 0x1b59cbfe, 0xc15a6fbd,
- 0x153d29da, 0x9fa2f75e, 0x2226f73a, 0x63c9fddd, 0xf6d678a4, 0x50223be5,
- 0xe5b167be, 0x044fdc56, 0xb7c6ec50, 0x73b3fb01, 0xd51df742, 0x2f60c4fb,
- 0x027d21c7, 0x1d2127fe, 0xd8dc079c, 0x5fb77a73, 0x2317c7f8, 0x2c5f7f11,
- 0x5b96e179, 0x9d5ffa19, 0xa82d47e4, 0x5df7465f, 0x33da9c81, 0xaaf07bf2,
- 0x5bc55de2, 0xf01d5e37, 0xb0af182f, 0x938404a6, 0xc91965f4, 0xe8917fa5,
- 0xf0e5d69c, 0xbc7dc2aa, 0x0e3b9f82, 0x8a15f970, 0xb70f156f, 0x43f5bbe2,
- 0x236eef49, 0xc923313c, 0xb87a50cb, 0x3080be1c, 0xc4d93ce9, 0xa1f92318,
- 0x4f2e4583, 0x2813cf07, 0xf33b795e, 0xf48012bc, 0xf5f7f096, 0xc70adefc,
- 0xb782855b, 0x2fdf313f, 0x71ff9232, 0xce323b78, 0x606e34dd, 0xe50dbe6f,
- 0xb2f8fed3, 0x37fce1c6, 0x18fa5f55, 0x3f0863f2, 0x376bf087, 0x2ff09d1e,
- 0xc69e8f08, 0x6ecbe248, 0xae7b4b7c, 0x7fe22f4c, 0x5fb58e30, 0xaf93f9f0,
- 0x46055f80, 0xf8f8bfbf, 0x54dead38, 0xa5c2a6f1, 0x07e0b8bf, 0x2ad47fcc,
- 0xe70abed0, 0x748dbbe3, 0x57dd43ff, 0x5e4ff751, 0x5cf5c719, 0xd801fa68,
- 0xb55fb84b, 0x40e4e326, 0x194f33b4, 0xfef87ed4, 0xdacb07ef, 0xb6f7f113,
- 0xe9e2283e, 0xddff0329, 0x872f350f, 0x07e4b8f1, 0xc22f46f1, 0xf86103e9,
- 0x1f33d404, 0xcf5cf708, 0x070671f3, 0x26e3cbe6, 0x53903de0, 0x798e357a,
- 0xf07f12bf, 0x8f9479f1, 0x027aa62c, 0x074a5dff, 0xf61ddefe, 0x1736082b,
- 0xbb83e7cc, 0x9d7de8d2, 0x718d7dee, 0x467f0edc, 0xdc774759, 0xe2815eff,
- 0x355133fe, 0x84c4fee1, 0x877c2e2d, 0x437df4f3, 0xb4df69b3, 0xea3e3b41,
- 0x978a64fd, 0xfee7e06e, 0xabea752e, 0xb75bf482, 0xc8be2dbe, 0xd1e4e7cb,
- 0xbfa88cef, 0x86db2747, 0xc7dcff73, 0xd550df7f, 0x5fc75d67, 0x73c704b0,
- 0xdfe2533f, 0x0fe1f9dd, 0xc3eb0526, 0x4dad4f43, 0x0daa7ec7, 0x05a72cde,
- 0xb1ee877f, 0xaa5bb424, 0x4e955a9f, 0x0f53f4f6, 0x545fa3a4, 0xba6d63bd,
- 0x0f70f94c, 0xbe3a65f9, 0x1b9e2fee, 0xd4f3e745, 0xdccaf6fe, 0x1d30a8af,
- 0x3dbd412a, 0x56afeae5, 0xda41fee0, 0xffc5027d, 0x15f6b066, 0x332b67a8,
- 0x16b15f38, 0xe87fc913, 0x776bf68f, 0x4ec5ff0d, 0x646f0794, 0x81cfd59c,
- 0x203e127a, 0xd5df1035, 0xdfa3adf2, 0x7fcd5c09, 0x7ea7b027, 0x56ffed22,
- 0x9de8172d, 0xfcf519ab, 0xf1a4f377, 0x59c704d1, 0xd18fb3ab, 0x794b57fb,
- 0x6fa314f9, 0x67ec49df, 0x3fdf818b, 0xe094ee64, 0xb29270fb, 0x9d73385f,
- 0xca717bf2, 0x639b6938, 0x53af07b7, 0x591df652, 0xdf2477ea, 0x32f0506e,
- 0x5789e975, 0x54efd0ed, 0x1e3cc7bd, 0xe9b3bfbe, 0x619faa5c, 0x3a33f41c,
- 0xf066985f, 0xd71f3a3c, 0x7148c3bd, 0xc1acdcd2, 0xcfd41c52, 0x11b45259,
- 0xab4b99d3, 0x728f9d1e, 0xcd94d5b8, 0xf9f2358f, 0x114e0835, 0xed91f4e5,
- 0xb842ceb7, 0x7fc4df9d, 0xfb61abff, 0xc2ddfa30, 0xafdee4fe, 0xf9eff89a,
- 0xa264dd3d, 0x2b26d9f4, 0xcfeba3c3, 0xff29d935, 0x4a0e4daf, 0xcd3cf4d9,
- 0xd7f4d8ef, 0xe14b88df, 0xb973bbf4, 0x3bf4cdab, 0x39460d88, 0x2bc03c75,
- 0x1ddaaaed, 0xc2eb280f, 0xbdb2b8f7, 0x0e1113c9, 0x3e2c0fb9, 0x75b3a686,
- 0x7d97df3c, 0x24eae4f3, 0x2c77d614, 0x7cc4f33d, 0x78fb7ef8, 0x270835f7,
- 0x148bb6d3, 0x607238a7, 0xbbc22555, 0xaf91877a, 0xf9f7594f, 0x8f71e94d,
- 0x75f97c62, 0x160dc53d, 0xc4f33b6d, 0x877f0c03, 0x9821b177, 0xbd74afde,
- 0x7ee78f57, 0xaaf8e5e6, 0x78fc383f, 0x449b5abe, 0xf803b3fc, 0xf3a7f32a,
- 0xcba5a2d5, 0xc6cb7e08, 0xb8426fbb, 0x937bd79d, 0x0fce9f90, 0xee744b1f,
- 0xf820457d, 0xdbbc72ab, 0xbfe056af, 0x575b6c50, 0xc723dc98, 0x04b961bf,
- 0x3e0743df, 0xf9fb474d, 0x3fe5fd9b, 0x6097eeb8, 0x1861fedd, 0x1a5133af,
- 0xe0df3dfa, 0xf584ea19, 0x3ef3f7e5, 0x6af1d2e8, 0x539f9ffc, 0x36213fc0,
- 0xe6219df8, 0xd4f9828d, 0x61fc141c, 0x26cfa859, 0xbf43a67d, 0x8fcffc11,
- 0x198ce734, 0x9f4828fb, 0xcbdf37f7, 0x4fd875a5, 0x1ddc8f5a, 0xfcc89ef9,
- 0x547b7a41, 0x83f98c20, 0x73f6edc6, 0xfb40cca0, 0x1724f14e, 0x2e786f10,
- 0xe181c2ec, 0x99ff303b, 0xc2f1e381, 0xf14f604b, 0xf2931078, 0xd9f9d67b,
- 0x50b22f81, 0xb7ae86ce, 0x882fe5ee, 0xaff66a3e, 0xfad028ba, 0xcba67f27,
- 0x0ffafcba, 0x1645f53d, 0x7d33efe2, 0xb6fd1f20, 0x1640860b, 0xb7eea1e2,
- 0x58ff9e5c, 0xf3e5a838, 0xf7e81b57, 0x1304eb3a, 0xf48949d1, 0x2e1fc525,
- 0x87f1e71c, 0x924f03ba, 0xc83aed3c, 0xc48b40e3, 0x8c38d2f6, 0x399fd49b,
- 0xcbacf286, 0xe831fdce, 0x2b71de9f, 0x41e8aa1c, 0xde12675d, 0x4dd83bf7,
- 0x3c8fdc0f, 0x0c3bfc8f, 0xf3a369fb, 0x9c2b2cbd, 0x39f15da1, 0x7c30f3f2,
- 0x257930bf, 0xdf79f9d2, 0x8f941cc0, 0xadfd666f, 0xbf86e28e, 0xb27ba70f,
- 0xe6926db5, 0x3af7799c, 0x703f127c, 0x29fb54c4, 0xe7c1c99c, 0x1b8e0cb9,
- 0x837e6ec8, 0xba2e13ef, 0x7b13f38a, 0xed53f399, 0x7b0643f2, 0x15bb7f50,
- 0xc8ed527e, 0x1bffbf29, 0x85b3e5f1, 0xd504fd32, 0xffc7a54f, 0x4cf7fe9f,
- 0x33412fff, 0x800063ec, 0x00008000, 0x00088b1f, 0x00000000, 0x5aa5ff00,
- 0x5554700d, 0xbdef3e96, 0xdd2749fe, 0x12421349, 0x42068408, 0x03621a88,
- 0xc4d67281, 0x206efce9, 0x6b01bb33, 0x8d08c42d, 0x749d2422, 0x6aece882,
- 0x021a6eb9, 0x367564ac, 0x1d47598c, 0x809f9b47, 0x9476ec28, 0x0da0c040,
- 0x6ba2cb0a, 0x3a26aa45, 0xab545b55, 0xa6e00eac, 0xd63b8223, 0x3be7b8e2,
- 0x1dddb5ef, 0xa6ed4fe2, 0xee7dba8a, 0x9ee7b9cf, 0xce77ce7b, 0x52b48f3d,
- 0x6a22ca22, 0x2a08cdcf, 0x6ea6ff0a, 0x6d4445a2, 0xf67f2429, 0xaf1f4541,
- 0x7dbc64d3, 0x58c9a340, 0xce08eb93, 0x4754419e, 0x459a26dd, 0x654284b4,
- 0x29bf71a4, 0x795bcbf2, 0x6d0dfebc, 0x61931741, 0xb06aad1b, 0x7aa6d513,
- 0x1314360b, 0xaacaab0d, 0x0bcc6286, 0x4b9d2e95, 0x7efe0df4, 0x879b744a,
- 0x2a224f99, 0xf60f14d3, 0x169c9d1b, 0x8b5dc9dc, 0x491bfb97, 0xeaa7984d,
- 0x534f98f3, 0xa3827c08, 0xd81d4b25, 0x9964b468, 0x8eef3e23, 0x07d6d237,
- 0xd2a14e31, 0xf9f6123b, 0x68858f4b, 0xcb17d121, 0x650faa17, 0xdaae79f2,
- 0x51337403, 0x29ed768d, 0x70ddf785, 0x05564ccc, 0xafbc67fc, 0x670239f2,
- 0xfa65ea34, 0x790a4752, 0x4bceccac, 0x1bcf0f1e, 0xd8293b99, 0x3f6fe7c5,
- 0xe78990a0, 0xeeb11db5, 0x6d79e14c, 0x9bb648e6, 0xf036ddf7, 0xdb878b3b,
- 0x2cf7fef6, 0xcf58b9ae, 0x9e433647, 0x6dfa1514, 0x174ff277, 0xce607f91,
- 0x3f3177fb, 0xf44d69ff, 0x67bd37ed, 0x05d7d621, 0x5afd8f9e, 0x82d5f60e,
- 0x56f0a56e, 0x3cfdda27, 0x9bbf48af, 0xe5dff86f, 0xb386575c, 0xf8be7e38,
- 0xa7a25d39, 0x69f8bd18, 0xd602f73e, 0x43eb468b, 0x21ee5976, 0x6e717b96,
- 0x252e8ece, 0x5fae9d71, 0x4b69768f, 0x9d15cb0e, 0x6615b8a9, 0x6c0d4d15,
- 0x851f4276, 0x1797b38a, 0xb97f5f47, 0x9bf42d74, 0x9dbd2c12, 0x90b517cf,
- 0xc7e2c7dc, 0x04b49bb6, 0x6a0f1679, 0x3443dc1e, 0xc90e9a95, 0xe2a77bbd,
- 0x61bca83e, 0xd61267a9, 0x2b7bbe8d, 0x7955196f, 0x73fafdbd, 0x7975ca04,
- 0xc9a9b0ce, 0xd7721b7d, 0x80ede5e7, 0xc400d9fe, 0xfaa94e8e, 0xc7951efe,
- 0xbfd7c7e7, 0x46a79c68, 0xf089ce2b, 0x9dc506f8, 0x9f671ce3, 0xf587bb58,
- 0xe9ef86be, 0xb46fbe45, 0x6938752f, 0xfda26ccd, 0xff42b91d, 0xda4b8773,
- 0xf4914750, 0x16e1d2bf, 0x2dc760fa, 0xf0ea1f42, 0x51d03d08, 0x8ed1ed27,
- 0x033fe906, 0x708cff51, 0xe2ec20a0, 0x4a6d7c14, 0x1cce1e9c, 0x9e494f43,
- 0x24a99c3f, 0x991453d3, 0x3cb870ff, 0xa5fa6018, 0x939ca732, 0xc0d1ca8d,
- 0x4b4e68fa, 0xb40ca12f, 0x674a41f9, 0x037bb1bb, 0x3ffbb61e, 0x43daedd4,
- 0x4bcfc533, 0xf33aaf30, 0xdbf846ce, 0x692b2ce5, 0x1ec3cfac, 0xde63dfef,
- 0x0555d3e9, 0x683fd1db, 0xfef95b73, 0x94dc5787, 0xff47bd01, 0xb78d234d,
- 0xb05a28ae, 0x0ae994b9, 0xcc7622bb, 0xc86e6efc, 0x4fc6bb66, 0xb83553e6,
- 0xe3a4d4ba, 0x02b67384, 0xc1aeb7fd, 0xdb261b3e, 0xfc795816, 0x57f7b94a,
- 0xf7a627d8, 0x0aeb29da, 0xd98bd0bc, 0xf3cabef5, 0xa66eff02, 0x80f79e5e,
- 0xdf649dc3, 0x822bcb47, 0x45c59bb0, 0x07cf36b3, 0xd9ba767f, 0xf5bb3fbc,
- 0x51e7c87e, 0xc14e94d3, 0xb8099731, 0xf3fdc410, 0x452e953e, 0x716c30ec,
- 0x7f993299, 0x2b22b538, 0x194eebc0, 0xb8da7df7, 0x7dc633ef, 0xd5fdc3bf,
- 0xfdcbbedc, 0x1fb88768, 0x9e988aed, 0xd085a34d, 0x0da7f05f, 0xef44792f,
- 0xa3a0f9a3, 0x3b0e5a66, 0xd33f025c, 0x81bf27f1, 0x5ca278e5, 0x036ee397,
- 0x6b6e867f, 0x85d7d3e8, 0x858b4f84, 0x4bffb0bc, 0x9f609f58, 0xf5f175b1,
- 0x9bb2ed24, 0xe7c29029, 0x20b6c3a0, 0x895344f4, 0x0ecb5f80, 0x692e08e7,
- 0xcf8ab329, 0x017a644f, 0xabd29e09, 0xae5e7d56, 0x85fc216b, 0x8cfb2475,
- 0x9980d504, 0x399fc4ed, 0xf2c99854, 0x167e188e, 0x49fd0fa3, 0xcff6fc13,
- 0xd7db9a67, 0xfe7cfd14, 0x13854365, 0xa9b15eb0, 0x4eff33b0, 0xe1acfc7d,
- 0x0699fe87, 0x943ce33f, 0x7dc7ca12, 0xce1d8f44, 0x2ee987bf, 0x1ead787b,
- 0x8285c207, 0x5c2e14df, 0x42549c06, 0xd44dc8e7, 0xaf7dfc77, 0x47d027e9,
- 0xfdd079e8, 0xd3af7fc7, 0x6739df4a, 0x50a4f8e2, 0xfd04e2be, 0x3df74181,
- 0x409dc13f, 0x87395f9b, 0xdfa6cb71, 0x3327ab7b, 0xa783586e, 0xff37603b,
- 0xed97dba2, 0xd027ff40, 0x2b11cb5b, 0x0f2b2ec1, 0x0af67ff4, 0x3bf4b78f,
- 0xa8fb80dc, 0xbbe88667, 0xc7dec8f3, 0x7d236f61, 0xae87f166, 0xfe7bbffd,
- 0x25679911, 0x35bd5b98, 0xcfbc4a54, 0xe37fe3d1, 0xb49f6135, 0x6fd015d0,
- 0x39179c57, 0xfeea27ea, 0xa9f1543d, 0xf5bd0037, 0xc46bf81f, 0x0a0cfab5,
- 0x65e96ec0, 0xe46647be, 0x6a86f57b, 0xc771e12a, 0x6ff04ad0, 0x86eac97b,
- 0xcd5efa9d, 0x5f1f84a9, 0xfb14ccf3, 0x3bb1be6b, 0x3d57711f, 0x23cf6fba,
- 0x23679859, 0xc99edbc8, 0xf57ac476, 0xdd163b69, 0x6bb7f72f, 0xf7206fcf,
- 0x64ed1b3e, 0x03cccd3e, 0x99ef35fb, 0xd1f00c1d, 0xe3fafb5e, 0x1a87b895,
- 0xe83db9ed, 0xa6dbb2bf, 0x2d670f42, 0xf8728c9e, 0xbbb359e1, 0xba0ceb17,
- 0x2ea27879, 0x56a45a4f, 0x3bab2fee, 0x37d7711f, 0x80bfe1f1, 0xf75af5dc,
- 0x3ce2cff3, 0x3884ad7b, 0xeb1f6171, 0xe85dd78d, 0x2dc7cf35, 0x9873ec8f,
- 0x94972f60, 0x82cf95ee, 0xef3eaf7f, 0xf45bad92, 0x439de819, 0x11e78fd8,
- 0x6525f2e2, 0x85ff527b, 0x5e25bdde, 0x97c5dfd6, 0x09bd8bea, 0x17f31bf6,
- 0x526b6edf, 0x6024147c, 0xbf19f25c, 0x3b2019c9, 0x8366dfc7, 0x99e878bc,
- 0xbe296791, 0x737ee2fe, 0x6a3ac2d8, 0x94e6d2b6, 0xf83fb8cc, 0x0ebcfef2,
- 0x3ba9f3e7, 0x34c7910a, 0x84ac882f, 0x51b05c5f, 0x7bcf2e4a, 0xbe5f88db,
- 0x2a971b83, 0x4f2ddf25, 0xc7ee854d, 0x38f57357, 0x16c07576, 0x1ddf280c,
- 0x83a3fe8f, 0x8eccef9c, 0x67af77d3, 0x7e71297e, 0xad425b6f, 0x2db7de6e,
- 0x738fc753, 0xe33f7f3c, 0xcfb19558, 0xe79287aa, 0x299152df, 0x8966d3f6,
- 0xc60e2214, 0x88ac2ff8, 0x10a45ae1, 0x5d763578, 0xe013d23d, 0xe08acbc8,
- 0xf2a0ef88, 0xa86e96a1, 0x4fae3b34, 0xa8204a5f, 0x0f7f8c95, 0x7b8c8bb9,
- 0x55ef5e60, 0xdbe57ee8, 0x98d8f36f, 0x246a4b4f, 0xab53791d, 0x7c8e9223,
- 0x46a8e468, 0xcbeb8d3b, 0x199ff18a, 0x4649afdf, 0xf6643fb8, 0x8fdbc6d8,
- 0x6f3c438f, 0x2ab37e1d, 0xfdd0a93e, 0x4d2069a6, 0x646723f6, 0xe9b6ec11,
- 0x3875e4b9, 0x1fc133a7, 0x65760647, 0x8fe2137b, 0xaa3b90cf, 0x137e287c,
- 0x7ca3fafd, 0xa54f81d8, 0xf6ab0acd, 0x65b1af22, 0x3a294d0a, 0xe5b1b01d,
- 0xa7b4befb, 0x5e2e7ec2, 0x1e3f156d, 0x73822251, 0x0aae27b9, 0x23988c3e,
- 0xca8e7382, 0x18fc11ff, 0x09591099, 0xb4adcadc, 0xc9f196af, 0x1e2e9591,
- 0xa0c2b2ff, 0x03e491a7, 0x57322785, 0xb5ea4f03, 0x3711b7d1, 0x09d99769,
- 0x7be9893f, 0xa227a19d, 0x783b86c7, 0xb3410ffc, 0x4e61f10b, 0x0be462a5,
- 0xb5f9f896, 0x908d3fb8, 0xf841c0eb, 0xdddd9367, 0xfdc1a32b, 0xbc1f20af,
- 0xe5dddb33, 0x326f5eba, 0xcacae587, 0xee2d6afc, 0xfbdd3b29, 0xdfcc158f,
- 0x07c91c5f, 0x4bb5b1ce, 0xdd7b6a1c, 0x47fb13ba, 0x807ba3cd, 0xde1c175f,
- 0x9f582b27, 0xada196ad, 0x67d22ca5, 0xa429c8e6, 0x6f604bf8, 0xbdba2382,
- 0xb2ef148d, 0x561e9f08, 0x9767eb2a, 0x4b940f71, 0xa1f603b4, 0xe6fcf7e8,
- 0x00efabc0, 0x6686466f, 0xf58f4e09, 0x4f030ba7, 0x3e3703a6, 0x981ef8e0,
- 0x60ffef13, 0xfa0b5ef5, 0xc3b8bf97, 0x9ef6e112, 0xbbe7c9cd, 0x0cf6ed7c,
- 0x9e3d0bf8, 0xec7b0fd0, 0xeac7a649, 0x4e197605, 0xe88083f2, 0x29c79cfd,
- 0x0e54b7f2, 0xdf0f41b5, 0xd698cbd2, 0x6313e812, 0xa9f331e8, 0xce1fcf41,
- 0xb6f84879, 0x430f0b4e, 0xcc07236f, 0xe4ef7c04, 0x7f7426b8, 0x1aef105a,
- 0xadc700f5, 0x216e3d2c, 0x0fa4b45e, 0x8cbdc4ad, 0x989d5bf4, 0xa7c8e9bf,
- 0xdb3f38f9, 0xdbce3e63, 0x3670e472, 0xf78dcec1, 0x6cc7c704, 0x1ff4b78c,
- 0x24bbc6c9, 0x86aadfd6, 0x4717210a, 0x47e012b8, 0x85afdfac, 0x8b7f210b,
- 0xf1825432, 0xf996e428, 0x49a06b4c, 0xe8aad1ce, 0x34474f7e, 0xf7b1f9c1,
- 0x6e8551f6, 0x250f8caf, 0x1d3707c8, 0xb11254bd, 0xd9a0f1c7, 0xe81395c0,
- 0xdffdd62f, 0x09c0c8b9, 0xbf0cda5e, 0xbc27071f, 0xf5a3dc31, 0xae7dc816,
- 0xb63b9742, 0x3bcaf85e, 0x6de32023, 0x992a5daf, 0xc6758c59, 0xa3c04a3c,
- 0x017c7159, 0x0e0ae40e, 0xf367326c, 0x2bcf7cb9, 0xd25e4eee, 0x96b1b8dc,
- 0xd33ad3a7, 0x6bd0b5fd, 0x5de40513, 0xb1ae5637, 0x8570f476, 0x553ebf7e,
- 0xf6fb8f7f, 0x964ec128, 0x7d6322eb, 0x9bc17cee, 0x5d67f030, 0xf85ac6f5,
- 0x7e597ddf, 0xf9bbe24b, 0xedaff887, 0xadd7a649, 0x339b58df, 0x1efb9f8e,
- 0x126a7eda, 0xd9afcf5d, 0xdb36a3bb, 0x4c7f7d75, 0x98b68bee, 0x49e6959c,
- 0xbe58fa89, 0xb71276b1, 0xaffe52eb, 0x7dcfd0fa, 0x8c3588f1, 0xb5facbb8,
- 0xfdb7fce0, 0x0925bd71, 0x5f807fb7, 0x9d6b0533, 0xfe5bbe33, 0x12ab6db1,
- 0x4f80561e, 0xe2bd5fec, 0x0937ec67, 0x18aa6dfb, 0x5a68a753, 0x37791d3d,
- 0x4f4f5779, 0x6d8c53ac, 0xa8a72819, 0x49bd88b7, 0xd2b87ecb, 0x623e8e3d,
- 0xd4ae6ff3, 0x864d9f64, 0x6fa94f1d, 0x1e15e679, 0x53ccb97f, 0x557ec956,
- 0xbf92a9ae, 0xa6a7dd8f, 0x860fcf52, 0x80bf1db2, 0xcd07453d, 0xb949f84e,
- 0x4f33eba6, 0xfc2efcbd, 0xe1db2f31, 0x357e64ea, 0xbcfa6955, 0xea273663,
- 0x5f63d140, 0xffe07be5, 0xa5cbec5b, 0xd3b262ce, 0x953f1e64, 0x3df0573b,
- 0xc6c7cfb6, 0x7aa5693e, 0x645ed3be, 0x533afefe, 0x3adbe3b1, 0xc51be493,
- 0x12d9c169, 0xf890bf87, 0xe85c1d16, 0xf7c05cd4, 0xd6fe1da0, 0x677ff5fe,
- 0x0c8f0e23, 0x0bba67fe, 0xd3ed75b8, 0xb6cfd874, 0x81d38f81, 0x6606270f,
- 0x1d9ed039, 0x1fd03972, 0x16b8fbad, 0x368cbaf3, 0x8b30675e, 0xf636b19c,
- 0x13d58e7e, 0xcdd1de12, 0x73af6bd0, 0x614b2fdb, 0x82dda9ef, 0xce8efc63,
- 0xd788fc44, 0xe3395c19, 0xeb1265d5, 0xf5b3050c, 0xd45a033a, 0x7acc0a19,
- 0xea34019d, 0x6751680c, 0x0cea3f40, 0x006751a0, 0x68033a8d, 0xa2d019d4,
- 0x2bfe80ce, 0xbcb16ba8, 0x50de4e51, 0xecd1aadf, 0xf40fff82, 0xf5e4416a,
- 0x74d31c0f, 0xc412877a, 0xfbc67f7d, 0xaf460e23, 0x28b075e8, 0xe77953c7,
- 0xfbcb341f, 0x5eda1a20, 0xd6f71337, 0x5571b9af, 0xd9371117, 0xa3cddb14,
- 0x6a1f5127, 0x1bdc53ef, 0x2e5fe85d, 0x5c1b6c72, 0xfa237ef8, 0xd56ecd7b,
- 0x9abafb85, 0x6f97fa8d, 0x58ea57b0, 0x2cc739d5, 0x15fbcf72, 0xeffde1ca,
- 0x64efeab0, 0x2cbdc6bf, 0x9545b0f7, 0x1d6fda3c, 0xcb4bf792, 0x4d738a8b,
- 0x7eda1e42, 0x1382e7cb, 0x9df4b69d, 0xb21fd790, 0xc524d739, 0x7de48f57,
- 0xc174fc7e, 0x45433c8d, 0xa7d5af4c, 0x7b5edf90, 0xf9213801, 0xb47e4248,
- 0x33fcd9d6, 0x5da467e4, 0x7efce133, 0x644ee87e, 0x6d3dd6f9, 0xb09228fe,
- 0x9930737f, 0x6df60df6, 0x99c823cd, 0x034a8fdc, 0x943fb2e2, 0x74aa1fdc,
- 0xc6927d64, 0x3cd1e63f, 0xff755be4, 0x3f40e6b4, 0xdbb977ef, 0x61caf92a,
- 0x2570f78f, 0xb349f1fb, 0x272fea47, 0xbde4d98f, 0x9cfb7e75, 0x8d4bfaa4,
- 0x1fea5536, 0xc48acc1b, 0xdee52bf1, 0xaacbb063, 0xeea57bba, 0x10eb9552,
- 0xe1d98e7f, 0x3786f2d1, 0x5cc377c0, 0x6abfd497, 0xe10bf4ac, 0xf72f7ec7,
- 0x7f16683c, 0x8254e9ab, 0xc9abd32a, 0xad7f816d, 0x3d8a7562, 0x298fed99,
- 0x78ab0fec, 0xbb04c6af, 0x915db0da, 0x100a42bc, 0x8695e7ac, 0xfe0d573e,
- 0xfb0a57eb, 0x3d56bdcb, 0x5553ce0f, 0xc5d79fe1, 0xdfe78b71, 0xfc0b5e47,
- 0x55c71d66, 0x5f671cb4, 0x37bf708f, 0x9ebe6a6d, 0xc072bbe7, 0x3319f57f,
- 0x726a4f01, 0x95e85e7e, 0xfced5f78, 0x095bef84, 0x737da5df, 0xa8e1f7d3,
- 0xc8493e89, 0x759a4f6b, 0xca7d61fc, 0xec14eb7b, 0x5e3eea55, 0x3474cf69,
- 0x8ca2bb49, 0x340bffc4, 0xe02f6f3d, 0x4a6672bb, 0xeb0fa2dd, 0x6eedda6b,
- 0x0b863fe8, 0xbefc8dbb, 0xe796e540, 0x5bd54eed, 0xdc29d30b, 0xca5b8d3f,
- 0xe7617e14, 0x5c71d47c, 0x3d4f3fc2, 0x7d05c0bb, 0x4377697c, 0xc739351f,
- 0x31dde1e6, 0x72ffe0cb, 0xdc55c359, 0x029d2797, 0x438f9fd8, 0x5adfacdd,
- 0x5d41481c, 0x50d1e43d, 0x38a63f27, 0x1ebfaef1, 0x716fb74f, 0x140acfc2,
- 0xb5b7ea27, 0xd935dda9, 0xe676bfcc, 0x9cf0370c, 0x61ea55f1, 0x76eadc23,
- 0x53506b70, 0x6ca6b5f4, 0xff0df3d4, 0x54a6f729, 0xd7f78a4d, 0x1d147e1b,
- 0xf11d22fc, 0x6dd447f5, 0x807f8377, 0x835eee6c, 0xfea0ec3f, 0xbf5269a6,
- 0xe5d1d98d, 0x613b39fd, 0xf4ab5347, 0xef8d8d75, 0x0c4f9199, 0xc1cde6dd,
- 0x7cd72bfe, 0xce5b25be, 0x8cbd7e39, 0xb8033771, 0x165eb63b, 0x463df1c3,
- 0x6b781dfd, 0x26baea32, 0x326baea3, 0xa326baea, 0xea326bae, 0xaea326ba,
- 0xbaea326b, 0x6baea326, 0x26baea32, 0x97ae13a9, 0x878eddf6, 0x08ea1da4,
- 0xbc4278c8, 0xc7eab9b8, 0x55175dd5, 0xa577538d, 0xc4865714, 0xbd7bf65d,
- 0xcea63dde, 0x26aefeca, 0xe0d57bf8, 0x431e7b84, 0x7163d25e, 0x6b5438b3,
- 0x603c16f1, 0x54f07c17, 0x8d6f5b8d, 0xf52ecfe9, 0x9b64cbd9, 0x2ca87b8f,
- 0x5152659a, 0x23ee34c7, 0xdf84ef56, 0x1bf09ce0, 0xd31bf0b4, 0xfcdfb8ec,
- 0x5daec2d6, 0x0109e7aa, 0x726c13c9, 0x32375bbf, 0x2f7d30ae, 0xe5709339,
- 0xc2b831b3, 0x90159a0f, 0x95a0ed63, 0x233f74ba, 0x37254ff8, 0x0e3f9c7f,
- 0x9c7484ce, 0xda1a9699, 0xb5a67d87, 0x1791baa5, 0x9d8bbfb3, 0x88a6dc9d,
- 0x06a1afbf, 0xbc1c77d9, 0x7c9e4749, 0x73d1c4ef, 0xf7f9e1bf, 0xf25d83fe,
- 0xd793ad9d, 0xe0fffa2e, 0xae954d4d, 0x84775f8f, 0x9659e77d, 0x43b654ea,
- 0x16d5578e, 0xb8ba922a, 0x50fe2a9a, 0x58ae3da3, 0xd377a380, 0xb577dc3c,
- 0xc839e1b5, 0x2efec399, 0xa32cdfef, 0x6a1a6bbe, 0xd9f4cbde, 0xbe373cec,
- 0x3dcd4dab, 0x800b0544, 0xb0c57547, 0xadd4b3ab, 0x1963dd60, 0xbf83bfde,
- 0xa7cc5edc, 0x96fee356, 0xb3b69753, 0xce63ee4b, 0x32fbdc42, 0xf51fabab,
- 0x9e6f2d86, 0xb2c27a90, 0xd442d70c, 0x79bcb61b, 0x5c73a75a, 0xe51b8afd,
- 0xabab13b2, 0x66f8827f, 0x109f3eeb, 0x09a1fd67, 0xc0fb7449, 0x09f3eee4,
- 0x3f05ae71, 0x3f4e9df8, 0xafb84c53, 0x5c944356, 0xf21dbbd5, 0x4bfeebf9,
- 0xa8d3cf7f, 0xaaa7c9c4, 0x749c5c74, 0xe7b0c282, 0xf90ec5d1, 0x713397fe,
- 0xdbd43cfd, 0x10b5d8a8, 0xfdaf38f3, 0xe2f8ec35, 0xc1a79ead, 0x7738cf27,
- 0x3579f10e, 0x0d90c758, 0xbbe319ed, 0xdbd529e4, 0x3d36b688, 0x357e9260,
- 0x6df68a58, 0xfa20f435, 0x81762fd9, 0xf393ed4f, 0x3a622c6e, 0xb79a1acf,
- 0xe9788ede, 0x73fe8dd9, 0xec5e9e65, 0xdfeeb637, 0xe58f9c69, 0xc37987d9,
- 0xf9e4aa85, 0x6dea37c3, 0x55c22ecc, 0x371d0e3a, 0x93aca8fc, 0x58e7fbe4,
- 0xa55e39fb, 0xfdf9781a, 0x47bcb372, 0x7e4c4f20, 0x0a5d5eea, 0xa3de5879,
- 0x61afbc86, 0x5299ec9c, 0x7b6ef98f, 0xc77ec80d, 0x7c653b0d, 0xe3af396c,
- 0xa685b4a3, 0x0de404e0, 0x4e0d1e53, 0x7653bc80, 0xdc5cda8c, 0xb51810be,
- 0xe1787f21, 0xe86d476f, 0x7543c17f, 0x56bfe1e3, 0xc2df09ad, 0x3f5951b2,
- 0x921af79d, 0x04e8c277, 0x4d856fe4, 0x6c02596f, 0xe69e73b7, 0xb7f2937d,
- 0xd3638922, 0xfe9a5b0d, 0x87b04aa8, 0xe490cfe9, 0x259d54ab, 0xdd14bee3,
- 0x3e8f7dff, 0x6e8ec2a8, 0x09dcb208, 0x69e737f6, 0x1248e5d3, 0x6bacb0df,
- 0xf3837031, 0x3f71a4b5, 0x3cbe4733, 0xe423c0c5, 0x378c8d8f, 0xbeb7a6fe,
- 0x79c8be42, 0x8c7e18dc, 0x261c314d, 0xebef5dc4, 0x20fc1711, 0xbbdade79,
- 0xf78ad91e, 0x76e3536d, 0xd78fbc8b, 0xdb456df4, 0x259efada, 0x4cbf2487,
- 0x65f9cf9a, 0x9c9d7d12, 0x7b68e463, 0x39adf3f0, 0x76cbece3, 0x0ed12d70,
- 0x37439a4e, 0xd27abde0, 0x480eea9f, 0x0fdd091c, 0x6ad9d97c, 0xecae2377,
- 0xd8a555fa, 0xa9f5b7d7, 0xfaa98f32, 0xa751d947, 0x52540fcc, 0x87e6fc11,
- 0xe1e011da, 0x0c3c24ec, 0x8972dd48, 0xddae7043, 0xadb60778, 0x123e4a71,
- 0x5336ebf3, 0x98adc3e8, 0xef90e597, 0x45b70c11, 0x6f7c2ca6, 0x4aeee29a,
- 0x2ece2bc8, 0x9d041599, 0x830a19dd, 0x15cfe8ef, 0xd5c8ea37, 0xd6ae3a69,
- 0x7bad0da8, 0x352dfc79, 0x5bf1b82c, 0x6ff307e0, 0x7697acb7, 0xb14cd945,
- 0x290a5cdb, 0x7067fffa, 0x3d7bc42d, 0x67ad3df6, 0x20efb05b, 0xf04afbde,
- 0x8fa8b599, 0xe45d95fe, 0x80d50689, 0x33d3cf99, 0x7bb70481, 0xb8942cee,
- 0xc686a513, 0x2cff91fb, 0x6a3a954f, 0x13d704cf, 0xd3da6ef8, 0xf5e4a37c,
- 0xc7a4fe87, 0x29a5d1fb, 0x3d71e46e, 0x711b5cf3, 0x5d479eae, 0xa29afe32,
- 0xa13c0bf3, 0xa967a7eb, 0xe69ece7e, 0xc59f794c, 0x8767a1ee, 0xbfbe3267,
- 0xe5bc3259, 0x1803a8d5, 0x667b1fdf, 0x4fb73f70, 0xde770d29, 0x75733e07,
- 0xb9945779, 0xde4edee4, 0x7b13e379, 0xf1867d74, 0x897dac1d, 0xfefde923,
- 0xcb80febf, 0x002220b3, 0x00000000
-};
-
-#endif /*__BNX2X_INIT_VALUES_H__*/
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index ad5ef25add3..fbf1352e9c1 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -53,12 +53,19 @@
#include "bnx2x.h"
#include "bnx2x_init.h"
+#include "bnx2x_init_ops.h"
#include "bnx2x_dump.h"
-#define DRV_MODULE_VERSION "1.48.105"
-#define DRV_MODULE_RELDATE "2009/03/02"
+#define DRV_MODULE_VERSION "1.48.105-1"
+#define DRV_MODULE_RELDATE "2009/04/22"
#define BNX2X_BC_VER 0x040200
+#include <linux/firmware.h>
+#include "bnx2x_fw_file_hdr.h"
+/* FW files */
+#define FW_FILE_PREFIX_E1 "bnx2x-e1-"
+#define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
+
/* Time in jiffies before concluding the transmitter is hung */
#define TX_TIMEOUT (5*HZ)
@@ -1539,7 +1546,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
len, cqe, comp_ring_cons);
#ifdef BNX2X_STOP_ON_ERROR
if (bp->panic)
- return -EINVAL;
+ return 0;
#endif
bnx2x_update_sge_prod(fp,
@@ -5232,13 +5239,15 @@ static void bnx2x_gunzip_end(struct bnx2x *bp)
}
}
-static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
+static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
{
int n, rc;
/* check gzip header */
- if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
+ if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
+ BNX2X_ERR("Bad gzip header\n");
return -EINVAL;
+ }
n = 10;
@@ -5247,7 +5256,7 @@ static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
if (zbuf[3] & FNAME)
while ((zbuf[n++] != 0) && (n < len));
- bp->strm->next_in = zbuf + n;
+ bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
bp->strm->avail_in = len - n;
bp->strm->next_out = bp->gunzip_buf;
bp->strm->avail_out = FW_BUF_SIZE;
@@ -5369,8 +5378,8 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
msleep(50);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
msleep(50);
- bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
- bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
+ bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
DP(NETIF_MSG_HW, "part2\n");
@@ -5434,8 +5443,8 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
msleep(50);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
msleep(50);
- bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
- bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
+ bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
#ifndef BCM_ISCSI
/* set NIC mode */
REG_WR(bp, PRS_REG_NIC_MODE, 1);
@@ -5510,7 +5519,7 @@ static int bnx2x_init_common(struct bnx2x *bp)
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
- bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
+ bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
if (CHIP_IS_E1H(bp))
REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
@@ -5518,14 +5527,14 @@ static int bnx2x_init_common(struct bnx2x *bp)
msleep(30);
REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
- bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
+ bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
if (CHIP_IS_E1(bp)) {
/* enable HW interrupt from PXP on USDM overflow
bit 16 on INT_MASK_0 */
REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
}
- bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
+ bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
bnx2x_init_pxp(bp);
#ifdef __BIG_ENDIAN
@@ -5571,60 +5580,60 @@ static int bnx2x_init_common(struct bnx2x *bp)
REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
- bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
+ bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
/* clean the DMAE memory */
bp->dmae_ready = 1;
bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
- bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
- bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
- bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
- bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
+ bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
- bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
+ bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
/* soft reset pulse */
REG_WR(bp, QM_REG_SOFT_RESET, 1);
REG_WR(bp, QM_REG_SOFT_RESET, 0);
#ifdef BCM_ISCSI
- bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
+ bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
#endif
- bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
+ bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
if (!CHIP_REV_IS_SLOW(bp)) {
/* enable hw interrupt from doorbell Q */
REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
}
- bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
- bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
+ bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
/* set NIC mode */
REG_WR(bp, PRS_REG_NIC_MODE, 1);
if (CHIP_IS_E1H(bp))
REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
- bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
- bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
- bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
- bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
+ bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
- bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
- bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
- bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
- bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
+ bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
/* sync semi rtc */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
@@ -5632,16 +5641,16 @@ static int bnx2x_init_common(struct bnx2x *bp)
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
0x80000000);
- bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
- bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
- bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
+ bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
REG_WR(bp, SRC_REG_SOFT_RST, 1);
for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
REG_WR(bp, i, 0xc0cac01a);
/* TODO: replace with something meaningful */
}
- bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
+ bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
REG_WR(bp, SRC_REG_SOFT_RST, 0);
if (sizeof(union cdu_context) != 1024)
@@ -5649,7 +5658,7 @@ static int bnx2x_init_common(struct bnx2x *bp)
printk(KERN_ALERT PFX "please adjust the size of"
" cdu_context(%ld)\n", (long)sizeof(union cdu_context));
- bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
+ bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
val = (4 << 24) + (0 << 12) + 1024;
REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
if (CHIP_IS_E1(bp)) {
@@ -5658,7 +5667,7 @@ static int bnx2x_init_common(struct bnx2x *bp)
REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
}
- bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
+ bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
/* enable context validation interrupt from CFC */
REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
@@ -5666,20 +5675,25 @@ static int bnx2x_init_common(struct bnx2x *bp)
/* set the thresholds to prevent CFC/CDU race */
REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
- bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
- bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
+ bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
/* PXPCS COMMON comes here */
+ bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
/* Reset PCIE errors for debug */
REG_WR(bp, 0x2814, 0xffffffff);
REG_WR(bp, 0x3820, 0xffffffff);
/* EMAC0 COMMON comes here */
+ bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
/* EMAC1 COMMON comes here */
+ bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
/* DBU COMMON comes here */
+ bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
/* DBG COMMON comes here */
+ bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
- bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
+ bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
if (CHIP_IS_E1H(bp)) {
REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
@@ -5763,6 +5777,7 @@ static int bnx2x_init_common(struct bnx2x *bp)
static int bnx2x_init_port(struct bnx2x *bp)
{
int port = BP_PORT(bp);
+ int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
u32 low, high;
u32 val;
@@ -5771,7 +5786,9 @@ static int bnx2x_init_port(struct bnx2x *bp)
REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
/* Port PXP comes here */
+ bnx2x_init_block(bp, PXP_BLOCK, init_stage);
/* Port PXP2 comes here */
+ bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
#ifdef BCM_ISCSI
/* Port0 1
* Port1 385 */
@@ -5798,21 +5815,19 @@ static int bnx2x_init_port(struct bnx2x *bp)
REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
#endif
/* Port CMs come here */
- bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
- (port ? XCM_PORT1_END : XCM_PORT0_END));
+ bnx2x_init_block(bp, XCM_BLOCK, init_stage);
/* Port QM comes here */
#ifdef BCM_ISCSI
REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
- bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
- func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
+ bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
#endif
/* Port DQ comes here */
+ bnx2x_init_block(bp, DQ_BLOCK, init_stage);
- bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
- (port ? BRB1_PORT1_END : BRB1_PORT0_END));
+ bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
/* no pause for emulation and FPGA */
low = 0;
@@ -5837,25 +5852,27 @@ static int bnx2x_init_port(struct bnx2x *bp)
/* Port PRS comes here */
+ bnx2x_init_block(bp, PRS_BLOCK, init_stage);
/* Port TSDM comes here */
+ bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
/* Port CSDM comes here */
+ bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
/* Port USDM comes here */
+ bnx2x_init_block(bp, USDM_BLOCK, init_stage);
/* Port XSDM comes here */
+ bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
- bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
- port ? TSEM_PORT1_END : TSEM_PORT0_END);
- bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
- port ? USEM_PORT1_END : USEM_PORT0_END);
- bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
- port ? CSEM_PORT1_END : CSEM_PORT0_END);
- bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
- port ? XSEM_PORT1_END : XSEM_PORT0_END);
+ bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
+ bnx2x_init_block(bp, USEM_BLOCK, init_stage);
+ bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
+ bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
/* Port UPB comes here */
+ bnx2x_init_block(bp, UPB_BLOCK, init_stage);
/* Port XPB comes here */
+ bnx2x_init_block(bp, XPB_BLOCK, init_stage);
- bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
- port ? PBF_PORT1_END : PBF_PORT0_END);
+ bnx2x_init_block(bp, PBF_BLOCK, init_stage);
/* configure PBF to work without PAUSE mtu 9000 */
REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
@@ -5885,18 +5902,17 @@ static int bnx2x_init_port(struct bnx2x *bp)
/* Port SRCH comes here */
#endif
/* Port CDU comes here */
+ bnx2x_init_block(bp, CDU_BLOCK, init_stage);
/* Port CFC comes here */
+ bnx2x_init_block(bp, CFC_BLOCK, init_stage);
if (CHIP_IS_E1(bp)) {
REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
}
- bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
- port ? HC_PORT1_END : HC_PORT0_END);
+ bnx2x_init_block(bp, HC_BLOCK, init_stage);
- bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
- MISC_AEU_PORT0_START,
- port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
+ bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
/* init aeu_mask_attn_func_0/1:
* - SF mode: bits 3-7 are masked. only bits 0-2 are in use
* - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
@@ -5905,13 +5921,17 @@ static int bnx2x_init_port(struct bnx2x *bp)
(IS_E1HMF(bp) ? 0xF7 : 0x7));
/* Port PXPCS comes here */
+ bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
/* Port EMAC0 comes here */
+ bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
/* Port EMAC1 comes here */
+ bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
/* Port DBU comes here */
+ bnx2x_init_block(bp, DBU_BLOCK, init_stage);
/* Port DBG comes here */
+ bnx2x_init_block(bp, DBG_BLOCK, init_stage);
- bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
- port ? NIG_PORT1_END : NIG_PORT0_END);
+ bnx2x_init_block(bp, NIG_BLOCK, init_stage);
REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
@@ -5931,7 +5951,9 @@ static int bnx2x_init_port(struct bnx2x *bp)
}
/* Port MCP comes here */
+ bnx2x_init_block(bp, MCP_BLOCK, init_stage);
/* Port DMAE comes here */
+ bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
@@ -6036,7 +6058,7 @@ static int bnx2x_init_func(struct bnx2x *bp)
if (CHIP_IS_E1H(bp)) {
for (i = 0; i < 9; i++)
bnx2x_init_block(bp,
- cm_start[func][i], cm_end[func][i]);
+ cm_blocks[i], FUNC0_STAGE + func);
REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
@@ -6049,7 +6071,7 @@ static int bnx2x_init_func(struct bnx2x *bp)
REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
}
- bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
+ bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
/* Reset PCIE errors for debug */
REG_WR(bp, 0x2114, 0xffffffff);
@@ -10595,7 +10617,6 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
mmiowb();
fp->tx_bd_prod += nbd;
- dev->trans_start = jiffies;
if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
/* We want bnx2x_tx_int to "see" the updated tx_bd_prod
@@ -11082,6 +11103,190 @@ static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
return val;
}
+static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
+{
+ struct bnx2x_fw_file_hdr *fw_hdr;
+ struct bnx2x_fw_file_section *sections;
+ u16 *ops_offsets;
+ u32 offset, len, num_ops;
+ int i;
+ const struct firmware *firmware = bp->firmware;
+ const u8 * fw_ver;
+
+ if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
+ return -EINVAL;
+
+ fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
+ sections = (struct bnx2x_fw_file_section *)fw_hdr;
+
+ /* Make sure none of the offsets and sizes make us read beyond
+ * the end of the firmware data */
+ for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
+ offset = be32_to_cpu(sections[i].offset);
+ len = be32_to_cpu(sections[i].len);
+ if (offset + len > firmware->size) {
+ printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
+ return -EINVAL;
+ }
+ }
+
+ /* Likewise for the init_ops offsets */
+ offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
+ ops_offsets = (u16 *)(firmware->data + offset);
+ num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
+
+ for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
+ if (be16_to_cpu(ops_offsets[i]) > num_ops) {
+ printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
+ return -EINVAL;
+ }
+ }
+
+ /* Check FW version */
+ offset = be32_to_cpu(fw_hdr->fw_version.offset);
+ fw_ver = firmware->data + offset;
+ if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
+ (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
+ (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
+ (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
+ printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
+ " Should be %d.%d.%d.%d\n",
+ fw_ver[0], fw_ver[1], fw_ver[2],
+ fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
+ BCM_5710_FW_MINOR_VERSION,
+ BCM_5710_FW_REVISION_VERSION,
+ BCM_5710_FW_ENGINEERING_VERSION);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
+{
+ u32 i;
+ const __be32 *source = (const __be32*)_source;
+ u32 *target = (u32*)_target;
+
+ for (i = 0; i < n/4; i++)
+ target[i] = be32_to_cpu(source[i]);
+}
+
+/*
+ Ops array is stored in the following format:
+ {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
+ */
+static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
+{
+ u32 i, j, tmp;
+ const __be32 *source = (const __be32*)_source;
+ struct raw_op *target = (struct raw_op*)_target;
+
+ for (i = 0, j = 0; i < n/8; i++, j+=2) {
+ tmp = be32_to_cpu(source[j]);
+ target[i].op = (tmp >> 24) & 0xff;
+ target[i].offset = tmp & 0xffffff;
+ target[i].raw_data = be32_to_cpu(source[j+1]);
+ }
+}
+static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
+{
+ u32 i;
+ u16 *target = (u16*)_target;
+ const __be16 *source = (const __be16*)_source;
+
+ for (i = 0; i < n/2; i++)
+ target[i] = be16_to_cpu(source[i]);
+}
+
+#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
+ do { \
+ u32 len = be32_to_cpu(fw_hdr->arr.len); \
+ bp->arr = kmalloc(len, GFP_KERNEL); \
+ if (!bp->arr) { \
+ printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
+ goto lbl; \
+ } \
+ func(bp->firmware->data + \
+ be32_to_cpu(fw_hdr->arr.offset), \
+ (u8*)bp->arr, len); \
+ } while (0)
+
+
+static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
+{
+ char fw_file_name[40] = {0};
+ int rc, offset;
+ struct bnx2x_fw_file_hdr *fw_hdr;
+
+ /* Create a FW file name */
+ if (CHIP_IS_E1(bp))
+ offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
+ else
+ offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
+
+ sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
+ BCM_5710_FW_MAJOR_VERSION,
+ BCM_5710_FW_MINOR_VERSION,
+ BCM_5710_FW_REVISION_VERSION,
+ BCM_5710_FW_ENGINEERING_VERSION);
+
+ printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
+
+ rc = request_firmware(&bp->firmware, fw_file_name, dev);
+ if (rc) {
+ printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
+ goto request_firmware_exit;
+ }
+
+ rc = bnx2x_check_firmware(bp);
+ if (rc) {
+ printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
+ goto request_firmware_exit;
+ }
+
+ fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
+
+ /* Initialize the pointers to the init arrays */
+ /* Blob */
+ BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
+
+ /* Opcodes */
+ BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
+
+ /* Offsets */
+ BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
+
+ /* STORMs firmware */
+ bp->tsem_int_table_data = bp->firmware->data +
+ be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
+ bp->tsem_pram_data = bp->firmware->data +
+ be32_to_cpu(fw_hdr->tsem_pram_data.offset);
+ bp->usem_int_table_data = bp->firmware->data +
+ be32_to_cpu(fw_hdr->usem_int_table_data.offset);
+ bp->usem_pram_data = bp->firmware->data +
+ be32_to_cpu(fw_hdr->usem_pram_data.offset);
+ bp->xsem_int_table_data = bp->firmware->data +
+ be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
+ bp->xsem_pram_data = bp->firmware->data +
+ be32_to_cpu(fw_hdr->xsem_pram_data.offset);
+ bp->csem_int_table_data = bp->firmware->data +
+ be32_to_cpu(fw_hdr->csem_int_table_data.offset);
+ bp->csem_pram_data = bp->firmware->data +
+ be32_to_cpu(fw_hdr->csem_pram_data.offset);
+
+ return 0;
+init_offsets_alloc_err:
+ kfree(bp->init_ops);
+init_ops_alloc_err:
+ kfree(bp->init_data);
+request_firmware_exit:
+ release_firmware(bp->firmware);
+
+ return rc;
+}
+
+
static int __devinit bnx2x_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
@@ -11116,6 +11321,13 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
if (rc)
goto init_one_exit;
+ /* Set init arrays */
+ rc = bnx2x_init_firmware(bp, &pdev->dev);
+ if (rc) {
+ printk(KERN_ERR PFX "Error loading firmware\n");
+ goto init_one_exit;
+ }
+
rc = register_netdev(dev);
if (rc) {
dev_err(&pdev->dev, "Cannot register net device\n");
@@ -11163,6 +11375,11 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
+ kfree(bp->init_ops_offsets);
+ kfree(bp->init_ops);
+ kfree(bp->init_data);
+ release_firmware(bp->firmware);
+
if (bp->regview)
iounmap(bp->regview);
@@ -11412,13 +11629,20 @@ static struct pci_driver bnx2x_pci_driver = {
static int __init bnx2x_init(void)
{
+ int ret;
+
bnx2x_wq = create_singlethread_workqueue("bnx2x");
if (bnx2x_wq == NULL) {
printk(KERN_ERR PFX "Cannot create workqueue\n");
return -ENOMEM;
}
- return pci_register_driver(&bnx2x_pci_driver);
+ ret = pci_register_driver(&bnx2x_pci_driver);
+ if (ret) {
+ printk(KERN_ERR PFX "Cannot register driver\n");
+ destroy_workqueue(bnx2x_wq);
+ }
+ return ret;
}
static void __exit bnx2x_cleanup(void)
@@ -11431,3 +11655,4 @@ static void __exit bnx2x_cleanup(void)
module_init(bnx2x_init);
module_exit(bnx2x_cleanup);
+
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 8c2e5ab51f0..d4b570886c6 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -1465,6 +1465,12 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
return best;
}
+static int agg_device_up(const struct aggregator *agg)
+{
+ return (netif_running(agg->slave->dev) &&
+ netif_carrier_ok(agg->slave->dev));
+}
+
/**
* ad_agg_selection_logic - select an aggregation group for a team
* @aggregator: the aggregator we're looking at
@@ -1496,14 +1502,13 @@ static void ad_agg_selection_logic(struct aggregator *agg)
struct port *port;
origin = agg;
-
active = __get_active_agg(agg);
- best = active;
+ best = (active && agg_device_up(active)) ? active : NULL;
do {
agg->is_active = 0;
- if (agg->num_of_ports)
+ if (agg->num_of_ports && agg_device_up(agg))
best = ad_agg_selection_test(best, agg);
} while ((agg = __get_next_agg(agg)));
@@ -1845,9 +1850,10 @@ static u16 aggregator_identifier;
* Can be called only after the mac address of the bond is set.
*/
void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution, int lacp_fast)
-{
+{
// check that the bond is not initialized yet
- if (MAC_ADDRESS_COMPARE(&(BOND_AD_INFO(bond).system.sys_mac_addr), &(bond->dev->dev_addr))) {
+ if (MAC_ADDRESS_COMPARE(&(BOND_AD_INFO(bond).system.sys_mac_addr),
+ bond->dev->dev_addr)) {
aggregator_identifier = 0;
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index a306230381c..2c46a154f2c 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -26,10 +26,10 @@
#include <asm/byteorder.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
+#include <linux/if_ether.h>
// General definitions
-#define BOND_ETH_P_LACPDU 0x8809
-#define PKT_TYPE_LACPDU cpu_to_be16(BOND_ETH_P_LACPDU)
+#define PKT_TYPE_LACPDU cpu_to_be16(ETH_P_SLOW)
#define AD_TIMER_INTERVAL 100 /*msec*/
#define MULTICAST_LACPDU_ADDR {0x01, 0x80, 0xC2, 0x00, 0x00, 0x02}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 74824028f85..d927f71af8a 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -51,10 +51,10 @@
#include <linux/ctype.h>
#include <linux/inet.h>
#include <linux/bitops.h>
+#include <linux/io.h>
#include <asm/system.h>
-#include <asm/io.h>
#include <asm/dma.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
@@ -89,19 +89,19 @@ static int max_bonds = BOND_DEFAULT_MAX_BONDS;
static int num_grat_arp = 1;
static int num_unsol_na = 1;
static int miimon = BOND_LINK_MON_INTERV;
-static int updelay = 0;
-static int downdelay = 0;
+static int updelay;
+static int downdelay;
static int use_carrier = 1;
-static char *mode = NULL;
-static char *primary = NULL;
-static char *lacp_rate = NULL;
-static char *ad_select = NULL;
-static char *xmit_hash_policy = NULL;
+static char *mode;
+static char *primary;
+static char *lacp_rate;
+static char *ad_select;
+static char *xmit_hash_policy;
static int arp_interval = BOND_LINK_ARP_INTERV;
-static char *arp_ip_target[BOND_MAX_ARP_TARGETS] = { NULL, };
-static char *arp_validate = NULL;
-static char *fail_over_mac = NULL;
-struct bond_params bonding_defaults;
+static char *arp_ip_target[BOND_MAX_ARP_TARGETS];
+static char *arp_validate;
+static char *fail_over_mac;
+static struct bond_params bonding_defaults;
module_param(max_bonds, int, 0);
MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
@@ -151,14 +151,14 @@ static const char * const version =
LIST_HEAD(bond_dev_list);
#ifdef CONFIG_PROC_FS
-static struct proc_dir_entry *bond_proc_dir = NULL;
+static struct proc_dir_entry *bond_proc_dir;
#endif
-static __be32 arp_target[BOND_MAX_ARP_TARGETS] = { 0, } ;
-static int arp_ip_count = 0;
+static __be32 arp_target[BOND_MAX_ARP_TARGETS];
+static int arp_ip_count;
static int bond_mode = BOND_MODE_ROUNDROBIN;
-static int xmit_hashtype= BOND_XMIT_POLICY_LAYER2;
-static int lacp_fast = 0;
+static int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
+static int lacp_fast;
const struct bond_parm_tbl bond_lacp_tbl[] = {
@@ -210,6 +210,7 @@ struct bond_parm_tbl ad_select_tbl[] = {
/*-------------------------- Forward declarations ---------------------------*/
static void bond_send_gratuitous_arp(struct bonding *bond);
+static int bond_init(struct net_device *bond_dev);
static void bond_deinit(struct net_device *bond_dev);
/*---------------------------- General routines -----------------------------*/
@@ -221,7 +222,7 @@ static const char *bond_mode_name(int mode)
[BOND_MODE_ACTIVEBACKUP] = "fault-tolerance (active-backup)",
[BOND_MODE_XOR] = "load balancing (xor)",
[BOND_MODE_BROADCAST] = "fault-tolerance (broadcast)",
- [BOND_MODE_8023AD]= "IEEE 802.3ad Dynamic link aggregation",
+ [BOND_MODE_8023AD] = "IEEE 802.3ad Dynamic link aggregation",
[BOND_MODE_TLB] = "transmit load balancing",
[BOND_MODE_ALB] = "adaptive load balancing",
};
@@ -246,12 +247,11 @@ static int bond_add_vlan(struct bonding *bond, unsigned short vlan_id)
struct vlan_entry *vlan;
pr_debug("bond: %s, vlan id %d\n",
- (bond ? bond->dev->name: "None"), vlan_id);
+ (bond ? bond->dev->name : "None"), vlan_id);
vlan = kzalloc(sizeof(struct vlan_entry), GFP_KERNEL);
- if (!vlan) {
+ if (!vlan)
return -ENOMEM;
- }
INIT_LIST_HEAD(&vlan->vlan_list);
vlan->vlan_id = vlan_id;
@@ -351,16 +351,15 @@ static int bond_has_challenged_slaves(struct bonding *bond)
*
* Returns %NULL if list is empty, bond->next_vlan if @curr is %NULL,
* or @curr->next otherwise (even if it is @curr itself again).
- *
+ *
* Caller must hold bond->lock
*/
struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr)
{
struct vlan_entry *next, *last;
- if (list_empty(&bond->vlan_list)) {
+ if (list_empty(&bond->vlan_list))
return NULL;
- }
if (!curr) {
next = list_entry(bond->vlan_list.next,
@@ -382,11 +381,11 @@ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr)
/**
* bond_dev_queue_xmit - Prepare skb for xmit.
- *
+ *
* @bond: bond device that got this skb for tx.
* @skb: hw accel VLAN tagged skb to transmit
* @slave_dev: slave that is supposed to xmit this skbuff
- *
+ *
* When the bond gets an skb to transmit that is
* already hardware accelerated VLAN tagged, and it
* needs to relay this skb to a slave that is not
@@ -394,7 +393,8 @@ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr)
* i.e. strip the hwaccel tag and re-insert it as part
* of the payload.
*/
-int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev)
+int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
+ struct net_device *slave_dev)
{
unsigned short uninitialized_var(vlan_id);
@@ -428,7 +428,7 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_de
* b. The operation is protected by the RTNL semaphore in the 8021q code,
* c. Holding a lock with BH disabled while directly calling a base driver
* entry point is generally a BAD idea.
- *
+ *
* The design of synchronization/protection for this operation in the 8021q
* module is good for one or more VLAN devices over a single physical device
* and cannot be extended for a teaming solution like bonding, so there is a
@@ -443,7 +443,8 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_de
* @bond_dev: bonding net device that got called
* @grp: vlan group being registered
*/
-static void bond_vlan_rx_register(struct net_device *bond_dev, struct vlan_group *grp)
+static void bond_vlan_rx_register(struct net_device *bond_dev,
+ struct vlan_group *grp)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave;
@@ -485,7 +486,7 @@ static void bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
res = bond_add_vlan(bond, vid);
if (res) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: Error: Failed to add vlan id %d\n",
bond_dev->name, vid);
}
@@ -520,7 +521,7 @@ static void bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
res = bond_del_vlan(bond, vid);
if (res) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: Error: Failed to remove vlan id %d\n",
bond_dev->name, vid);
}
@@ -551,7 +552,8 @@ out:
write_unlock_bh(&bond->lock);
}
-static void bond_del_vlans_from_slave(struct bonding *bond, struct net_device *slave_dev)
+static void bond_del_vlans_from_slave(struct bonding *bond,
+ struct net_device *slave_dev)
{
const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
struct vlan_entry *vlan;
@@ -673,7 +675,7 @@ static int bond_update_speed_duplex(struct slave *slave)
* if <dev> supports MII link status reporting, check its link status.
*
* We either do MII/ETHTOOL ioctls, or check netif_carrier_ok(),
- * depening upon the setting of the use_carrier parameter.
+ * depending upon the setting of the use_carrier parameter.
*
* Return either BMSR_LSTATUS, meaning that the link is up (or we
* can't tell and just pretend it is), or 0, meaning that the link is
@@ -685,16 +687,29 @@ static int bond_update_speed_duplex(struct slave *slave)
* It'd be nice if there was a good way to tell if a driver supports
* netif_carrier, but there really isn't.
*/
-static int bond_check_dev_link(struct bonding *bond, struct net_device *slave_dev, int reporting)
+static int bond_check_dev_link(struct bonding *bond,
+ struct net_device *slave_dev, int reporting)
{
const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
- static int (* ioctl)(struct net_device *, struct ifreq *, int);
+ static int (*ioctl)(struct net_device *, struct ifreq *, int);
struct ifreq ifr;
struct mii_ioctl_data *mii;
if (bond->params.use_carrier)
return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0;
+ /* Try to get link status using Ethtool first. */
+ if (slave_dev->ethtool_ops) {
+ if (slave_dev->ethtool_ops->get_link) {
+ u32 link;
+
+ link = slave_dev->ethtool_ops->get_link(slave_dev);
+
+ return link ? BMSR_LSTATUS : 0;
+ }
+ }
+
+ /* Ethtool can't be used, fallback to MII ioctls. */
ioctl = slave_ops->ndo_do_ioctl;
if (ioctl) {
/* TODO: set pointer to correct ioctl on a per team member */
@@ -714,23 +729,8 @@ static int bond_check_dev_link(struct bonding *bond, struct net_device *slave_de
mii = if_mii(&ifr);
if (IOCTL(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
mii->reg_num = MII_BMSR;
- if (IOCTL(slave_dev, &ifr, SIOCGMIIREG) == 0) {
- return (mii->val_out & BMSR_LSTATUS);
- }
- }
- }
-
- /*
- * Some drivers cache ETHTOOL_GLINK for a period of time so we only
- * attempt to get link status from it if the above MII ioctls fail.
- */
- if (slave_dev->ethtool_ops) {
- if (slave_dev->ethtool_ops->get_link) {
- u32 link;
-
- link = slave_dev->ethtool_ops->get_link(slave_dev);
-
- return link ? BMSR_LSTATUS : 0;
+ if (IOCTL(slave_dev, &ifr, SIOCGMIIREG) == 0)
+ return mii->val_out & BMSR_LSTATUS;
}
}
@@ -740,7 +740,7 @@ static int bond_check_dev_link(struct bonding *bond, struct net_device *slave_de
* cannot report link status). If not reporting, pretend
* we're ok.
*/
- return (reporting ? -1 : BMSR_LSTATUS);
+ return reporting ? -1 : BMSR_LSTATUS;
}
/*----------------------------- Multicast list ------------------------------*/
@@ -748,7 +748,8 @@ static int bond_check_dev_link(struct bonding *bond, struct net_device *slave_de
/*
* Returns 0 if dmi1 and dmi2 are the same, non-0 otherwise
*/
-static inline int bond_is_dmi_same(struct dev_mc_list *dmi1, struct dev_mc_list *dmi2)
+static inline int bond_is_dmi_same(const struct dev_mc_list *dmi1,
+ const struct dev_mc_list *dmi2)
{
return memcmp(dmi1->dmi_addr, dmi2->dmi_addr, dmi1->dmi_addrlen) == 0 &&
dmi1->dmi_addrlen == dmi2->dmi_addrlen;
@@ -757,14 +758,14 @@ static inline int bond_is_dmi_same(struct dev_mc_list *dmi1, struct dev_mc_list
/*
* returns dmi entry if found, NULL otherwise
*/
-static struct dev_mc_list *bond_mc_list_find_dmi(struct dev_mc_list *dmi, struct dev_mc_list *mc_list)
+static struct dev_mc_list *bond_mc_list_find_dmi(struct dev_mc_list *dmi,
+ struct dev_mc_list *mc_list)
{
struct dev_mc_list *idmi;
for (idmi = mc_list; idmi; idmi = idmi->next) {
- if (bond_is_dmi_same(dmi, idmi)) {
+ if (bond_is_dmi_same(dmi, idmi))
return idmi;
- }
}
return NULL;
@@ -826,15 +827,14 @@ static void bond_mc_add(struct bonding *bond, void *addr, int alen)
{
if (USES_PRIMARY(bond->params.mode)) {
/* write lock already acquired */
- if (bond->curr_active_slave) {
+ if (bond->curr_active_slave)
dev_mc_add(bond->curr_active_slave->dev, addr, alen, 0);
- }
} else {
struct slave *slave;
int i;
- bond_for_each_slave(bond, slave, i) {
+
+ bond_for_each_slave(bond, slave, i)
dev_mc_add(slave->dev, addr, alen, 0);
- }
}
}
@@ -846,9 +846,9 @@ static void bond_mc_delete(struct bonding *bond, void *addr, int alen)
{
if (USES_PRIMARY(bond->params.mode)) {
/* write lock already acquired */
- if (bond->curr_active_slave) {
- dev_mc_delete(bond->curr_active_slave->dev, addr, alen, 0);
- }
+ if (bond->curr_active_slave)
+ dev_mc_delete(bond->curr_active_slave->dev, addr,
+ alen, 0);
} else {
struct slave *slave;
int i;
@@ -872,9 +872,8 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
rcu_read_lock();
in_dev = __in_dev_get_rcu(bond->dev);
if (in_dev) {
- for (im = in_dev->mc_list; im; im = im->next) {
+ for (im = in_dev->mc_list; im; im = im->next)
ip_mc_rejoin_group(im);
- }
}
rcu_read_unlock();
@@ -893,7 +892,8 @@ static void bond_mc_list_destroy(struct bonding *bond)
kfree(dmi);
dmi = bond->mc_list;
}
- bond->mc_list = NULL;
+
+ bond->mc_list = NULL;
}
/*
@@ -926,14 +926,14 @@ static int bond_mc_list_copy(struct dev_mc_list *mc_list, struct bonding *bond,
/*
* flush all members of flush->mc_list from device dev->mc_list
*/
-static void bond_mc_list_flush(struct net_device *bond_dev, struct net_device *slave_dev)
+static void bond_mc_list_flush(struct net_device *bond_dev,
+ struct net_device *slave_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct dev_mc_list *dmi;
- for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) {
+ for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next)
dev_mc_delete(slave_dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
- }
if (bond->params.mode == BOND_MODE_8023AD) {
/* del lacpdu mc addr from mc list */
@@ -950,44 +950,40 @@ static void bond_mc_list_flush(struct net_device *bond_dev, struct net_device *s
* old active slaves (if any) according to the multicast mode, and
* promiscuous flags unconditionally.
*/
-static void bond_mc_swap(struct bonding *bond, struct slave *new_active, struct slave *old_active)
+static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
+ struct slave *old_active)
{
struct dev_mc_list *dmi;
- if (!USES_PRIMARY(bond->params.mode)) {
+ if (!USES_PRIMARY(bond->params.mode))
/* nothing to do - mc list is already up-to-date on
* all slaves
*/
return;
- }
if (old_active) {
- if (bond->dev->flags & IFF_PROMISC) {
+ if (bond->dev->flags & IFF_PROMISC)
dev_set_promiscuity(old_active->dev, -1);
- }
- if (bond->dev->flags & IFF_ALLMULTI) {
+ if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(old_active->dev, -1);
- }
- for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next) {
- dev_mc_delete(old_active->dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
- }
+ for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next)
+ dev_mc_delete(old_active->dev, dmi->dmi_addr,
+ dmi->dmi_addrlen, 0);
}
if (new_active) {
/* FIXME: Signal errors upstream. */
- if (bond->dev->flags & IFF_PROMISC) {
+ if (bond->dev->flags & IFF_PROMISC)
dev_set_promiscuity(new_active->dev, 1);
- }
- if (bond->dev->flags & IFF_ALLMULTI) {
+ if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(new_active->dev, 1);
- }
- for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next) {
- dev_mc_add(new_active->dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
- }
+ for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next)
+ dev_mc_add(new_active->dev, dmi->dmi_addr,
+ dmi->dmi_addrlen, 0);
bond_resend_igmp_join_requests(bond);
}
}
@@ -1041,7 +1037,7 @@ static void bond_do_fail_over_mac(struct bonding *bond,
rv = dev_set_mac_address(new_active->dev, &saddr);
if (rv) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: Error %d setting MAC of slave %s\n",
bond->dev->name, -rv, new_active->dev->name);
goto out;
@@ -1055,7 +1051,7 @@ static void bond_do_fail_over_mac(struct bonding *bond,
rv = dev_set_mac_address(old_active->dev, &saddr);
if (rv)
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: Error %d setting MAC of slave %s\n",
bond->dev->name, -rv, new_active->dev->name);
out:
@@ -1063,7 +1059,7 @@ out:
write_lock_bh(&bond->curr_slave_lock);
break;
default:
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: bond_do_fail_over_mac impossible: bad policy %d\n",
bond->dev->name, bond->params.fail_over_mac);
break;
@@ -1088,17 +1084,17 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
new_active = old_active = bond->curr_active_slave;
if (!new_active) { /* there were no active slaves left */
- if (bond->slave_cnt > 0) { /* found one slave */
+ if (bond->slave_cnt > 0) /* found one slave */
new_active = bond->first_slave;
- } else {
+ else
return NULL; /* still no slave, return NULL */
- }
}
- /* first try the primary link; if arping, a link must tx/rx traffic
- * before it can be considered the curr_active_slave - also, we would skip
- * slaves between the curr_active_slave and primary_slave that may be up
- * and able to arp
+ /*
+ * first try the primary link; if arping, a link must tx/rx
+ * traffic before it can be considered the curr_active_slave.
+ * also, we would skip slaves between the curr_active_slave
+ * and primary_slave that may be up and able to arp
*/
if ((bond->primary_slave) &&
(!bond->params.arp_interval) &&
@@ -1146,16 +1142,15 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
{
struct slave *old_active = bond->curr_active_slave;
- if (old_active == new_active) {
+ if (old_active == new_active)
return;
- }
if (new_active) {
new_active->jiffies = jiffies;
if (new_active->link == BOND_LINK_BACK) {
if (USES_PRIMARY(bond->params.mode)) {
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: making interface %s the new "
"active one %d ms earlier.\n",
bond->dev->name, new_active->dev->name,
@@ -1165,15 +1160,14 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
new_active->delay = 0;
new_active->link = BOND_LINK_UP;
- if (bond->params.mode == BOND_MODE_8023AD) {
+ if (bond->params.mode == BOND_MODE_8023AD)
bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
- }
if (bond_is_lb(bond))
bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
} else {
if (USES_PRIMARY(bond->params.mode)) {
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: making interface %s the new "
"active one.\n",
bond->dev->name, new_active->dev->name);
@@ -1181,9 +1175,8 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
}
}
- if (USES_PRIMARY(bond->params.mode)) {
+ if (USES_PRIMARY(bond->params.mode))
bond_mc_swap(bond, new_active, old_active);
- }
if (bond_is_lb(bond)) {
bond_alb_handle_active_change(bond, new_active);
@@ -1196,9 +1189,8 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
}
if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
- if (old_active) {
+ if (old_active)
bond_set_slave_inactive_flags(old_active);
- }
if (new_active) {
bond_set_slave_active_flags(new_active);
@@ -1228,7 +1220,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
* bond_select_active_slave - select a new active slave, if needed
* @bond: our bonding struct
*
- * This functions shoud be called when one of the following occurs:
+ * This functions should be called when one of the following occurs:
* - The old curr_active_slave has been released or lost its link.
* - The primary_slave has got its link back.
* - A slave has got its link back and there's no old curr_active_slave.
@@ -1248,11 +1240,11 @@ void bond_select_active_slave(struct bonding *bond)
return;
if (netif_carrier_ok(bond->dev)) {
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: first active interface up!\n",
bond->dev->name);
} else {
- printk(KERN_INFO DRV_NAME ": %s: "
+ pr_info(DRV_NAME ": %s: "
"now running without any active interface !\n",
bond->dev->name);
}
@@ -1294,13 +1286,11 @@ static void bond_attach_slave(struct bonding *bond, struct slave *new_slave)
*/
static void bond_detach_slave(struct bonding *bond, struct slave *slave)
{
- if (slave->next) {
+ if (slave->next)
slave->next->prev = slave->prev;
- }
- if (slave->prev) {
+ if (slave->prev)
slave->prev->next = slave->next;
- }
if (bond->first_slave == slave) { /* slave is the first slave */
if (bond->slave_cnt > 1) { /* there are more slave */
@@ -1331,7 +1321,7 @@ static int bond_sethwaddr(struct net_device *bond_dev,
(NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX | \
NETIF_F_HW_VLAN_FILTER)
-/*
+/*
* Compute the common dev->feature set available to all slaves. Some
* feature bits are managed elsewhere, so preserve those feature bits
* on the master device.
@@ -1399,14 +1389,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL &&
slave_ops->ndo_do_ioctl == NULL) {
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
": %s: Warning: no link monitoring support for %s\n",
bond_dev->name, slave_dev->name);
}
/* bond must be initialized by bond_open() before enslaving */
if (!(bond_dev->flags & IFF_UP)) {
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
" %s: master_dev is not up in bond_enslave\n",
bond_dev->name);
}
@@ -1422,14 +1412,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
pr_debug("%s: NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
if (!list_empty(&bond->vlan_list)) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: Error: cannot enslave VLAN "
"challenged slave %s on VLAN enabled "
"bond %s\n", bond_dev->name, slave_dev->name,
bond_dev->name);
return -EPERM;
} else {
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
": %s: Warning: enslaved VLAN challenged "
"slave %s. Adding VLANs will be blocked as "
"long as %s is part of bond %s\n",
@@ -1449,12 +1439,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
/*
* Old ifenslave binaries are no longer supported. These can
- * be identified with moderate accurary by the state of the slave:
+ * be identified with moderate accuracy by the state of the slave:
* the current ifenslave will set the interface down prior to
* enslaving it; the old ifenslave will not.
*/
if ((slave_dev->flags & IFF_UP)) {
- printk(KERN_ERR DRV_NAME ": %s is up. "
+ pr_err(DRV_NAME ": %s is up. "
"This may be due to an out of date ifenslave.\n",
slave_dev->name);
res = -EPERM;
@@ -1472,7 +1462,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (slave_dev->type != ARPHRD_ETHER)
bond_setup_by_slave(bond_dev, slave_dev);
} else if (bond_dev->type != slave_dev->type) {
- printk(KERN_ERR DRV_NAME ": %s ether type (%d) is different "
+ pr_err(DRV_NAME ": %s ether type (%d) is different "
"from other slaves (%d), can not enslave it.\n",
slave_dev->name,
slave_dev->type, bond_dev->type);
@@ -1482,14 +1472,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (slave_ops->ndo_set_mac_address == NULL) {
if (bond->slave_cnt == 0) {
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
": %s: Warning: The first slave device "
"specified does not support setting the MAC "
"address. Setting fail_over_mac to active.",
bond_dev->name);
bond->params.fail_over_mac = BOND_FOM_ACTIVE;
} else if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: Error: The slave device specified "
"does not support setting the MAC address, "
"but fail_over_mac is not set to active.\n"
@@ -1539,7 +1529,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
/* open the slave since the application closed it */
res = dev_open(slave_dev);
if (res) {
- pr_debug("Openning slave %s failed\n", slave_dev->name);
+ pr_debug("Opening slave %s failed\n", slave_dev->name);
goto err_unset_master;
}
@@ -1551,9 +1541,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
* it might fail and we do not want to have to undo everything
*/
res = bond_alb_init_slave(bond, new_slave);
- if (res) {
+ if (res)
goto err_close;
- }
}
/* If the mode USES_PRIMARY, then the new slave gets the
@@ -1578,9 +1567,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
netif_addr_lock_bh(bond_dev);
/* upload master's mc_list to new slave */
- for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) {
- dev_mc_add (slave_dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
- }
+ for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next)
+ dev_mc_add(slave_dev, dmi->dmi_addr,
+ dmi->dmi_addrlen, 0);
netif_addr_unlock_bh(bond_dev);
}
@@ -1621,7 +1610,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
* supported); thus, we don't need to change
* the messages for netif_carrier.
*/
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
": %s: Warning: MII and ETHTOOL support not "
"available for interface %s, and "
"arp_interval/arp_ip_target module parameters "
@@ -1630,7 +1619,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_dev->name, slave_dev->name);
} else if (link_reporting == -1) {
/* unable get link status using mii/ethtool */
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
": %s: Warning: can't get link status from "
"interface %s; the network driver associated "
"with this interface does not support MII or "
@@ -1662,13 +1651,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (bond_update_speed_duplex(new_slave) &&
(new_slave->link != BOND_LINK_DOWN)) {
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
": %s: Warning: failed to get speed and duplex from %s, "
"assumed to be 100Mb/sec and Full.\n",
bond_dev->name, new_slave->dev->name);
if (bond->params.mode == BOND_MODE_8023AD) {
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
": %s: Warning: Operation of 802.3ad mode requires ETHTOOL "
"support in base driver for proper aggregator "
"selection.\n", bond_dev->name);
@@ -1677,9 +1666,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
/* if there is a primary slave, remember it */
- if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
+ if (strcmp(bond->params.primary, new_slave->dev->name) == 0)
bond->primary_slave = new_slave;
- }
}
write_lock_bh(&bond->curr_slave_lock);
@@ -1726,9 +1714,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
* anyway (it holds no special properties of the bond device),
* so we can change it without calling change_active_interface()
*/
- if (!bond->curr_active_slave) {
+ if (!bond->curr_active_slave)
bond->curr_active_slave = new_slave;
- }
+
break;
} /* switch(bond_mode) */
@@ -1742,7 +1730,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (res)
goto err_close;
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: enslaving %s as a%s interface with a%s link.\n",
bond_dev->name, slave_dev->name,
new_slave->state == BOND_STATE_ACTIVE ? "n active" : " backup",
@@ -1774,7 +1762,7 @@ err_free:
err_undo_flags:
bond_dev->features = old_features;
-
+
return res;
}
@@ -1799,7 +1787,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
/* slave is not a slave or master is not master of this slave */
if (!(slave_dev->flags & IFF_SLAVE) ||
(slave_dev->master != bond_dev)) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: Error: cannot release %s.\n",
bond_dev->name, slave_dev->name);
return -EINVAL;
@@ -1810,7 +1798,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
slave = bond_get_slave_by_dev(bond, slave_dev);
if (!slave) {
/* not a slave of this bond */
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: %s not enslaved\n",
bond_dev->name, slave_dev->name);
write_unlock_bh(&bond->lock);
@@ -1821,7 +1809,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
mac_addr_differ = memcmp(bond_dev->dev_addr, slave->perm_hwaddr,
ETH_ALEN);
if (!mac_addr_differ && (bond->slave_cnt > 1))
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
": %s: Warning: the permanent HWaddr of %s - "
"%pM - is still in use by %s. "
"Set the HWaddr of %s to a different address "
@@ -1839,7 +1827,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
bond_3ad_unbind_slave(slave);
}
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: releasing %s interface %s\n",
bond_dev->name,
(slave->state == BOND_STATE_ACTIVE)
@@ -1855,13 +1843,11 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
bond_compute_features(bond);
- if (bond->primary_slave == slave) {
+ if (bond->primary_slave == slave)
bond->primary_slave = NULL;
- }
- if (oldcurrent == slave) {
+ if (oldcurrent == slave)
bond_change_active_slave(bond, NULL);
- }
if (bond_is_lb(bond)) {
/* Must be called only after the slave has been
@@ -1903,18 +1889,18 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
if (list_empty(&bond->vlan_list)) {
bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
} else {
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
": %s: Warning: clearing HW address of %s while it "
"still has VLANs.\n",
bond_dev->name, bond_dev->name);
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
": %s: When re-adding slaves, make sure the bond's "
"HW address matches its VLANs'.\n",
bond_dev->name);
}
} else if ((bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
!bond_has_challenged_slaves(bond)) {
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: last VLAN challenged slave %s "
"left bond %s. VLAN blocking is removed\n",
bond_dev->name, slave_dev->name, bond_dev->name);
@@ -1934,14 +1920,12 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
*/
if (!USES_PRIMARY(bond->params.mode)) {
/* unset promiscuity level from slave */
- if (bond_dev->flags & IFF_PROMISC) {
+ if (bond_dev->flags & IFF_PROMISC)
dev_set_promiscuity(slave_dev, -1);
- }
/* unset allmulti level from slave */
- if (bond_dev->flags & IFF_ALLMULTI) {
+ if (bond_dev->flags & IFF_ALLMULTI)
dev_set_allmulti(slave_dev, -1);
- }
/* flush master's mc_list from slave */
netif_addr_lock_bh(bond_dev);
@@ -1974,41 +1958,36 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
* Destroy a bonding device.
* Must be under rtnl_lock when this function is called.
*/
-void bond_destroy(struct bonding *bond)
-{
- bond_deinit(bond->dev);
- bond_destroy_sysfs_entry(bond);
- unregister_netdevice(bond->dev);
-}
-
-static void bond_destructor(struct net_device *bond_dev)
+static void bond_uninit(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
+ bond_deinit(bond_dev);
+ bond_destroy_sysfs_entry(bond);
+
if (bond->wq)
destroy_workqueue(bond->wq);
netif_addr_lock_bh(bond_dev);
bond_mc_list_destroy(bond);
netif_addr_unlock_bh(bond_dev);
-
- free_netdev(bond_dev);
}
/*
-* First release a slave and than destroy the bond if no more slaves iare left.
+* First release a slave and than destroy the bond if no more slaves are left.
* Must be under rtnl_lock when this function is called.
*/
-int bond_release_and_destroy(struct net_device *bond_dev, struct net_device *slave_dev)
+int bond_release_and_destroy(struct net_device *bond_dev,
+ struct net_device *slave_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
int ret;
ret = bond_release(bond_dev, slave_dev);
if ((ret == 0) && (bond->slave_cnt == 0)) {
- printk(KERN_INFO DRV_NAME ": %s: destroying bond %s.\n",
+ pr_info(DRV_NAME ": %s: destroying bond %s.\n",
bond_dev->name, bond_dev->name);
- bond_destroy(bond);
+ unregister_netdevice(bond_dev);
}
return ret;
}
@@ -2027,9 +2006,8 @@ static int bond_release_all(struct net_device *bond_dev)
netif_carrier_off(bond_dev);
- if (bond->slave_cnt == 0) {
+ if (bond->slave_cnt == 0)
goto out;
- }
bond->current_arp_slave = NULL;
bond->primary_slave = NULL;
@@ -2039,9 +2017,8 @@ static int bond_release_all(struct net_device *bond_dev)
/* Inform AD package of unbinding of slave
* before slave is detached from the list.
*/
- if (bond->params.mode == BOND_MODE_8023AD) {
+ if (bond->params.mode == BOND_MODE_8023AD)
bond_3ad_unbind_slave(slave);
- }
slave_dev = slave->dev;
bond_detach_slave(bond, slave);
@@ -2070,14 +2047,12 @@ static int bond_release_all(struct net_device *bond_dev)
*/
if (!USES_PRIMARY(bond->params.mode)) {
/* unset promiscuity level from slave */
- if (bond_dev->flags & IFF_PROMISC) {
+ if (bond_dev->flags & IFF_PROMISC)
dev_set_promiscuity(slave_dev, -1);
- }
/* unset allmulti level from slave */
- if (bond_dev->flags & IFF_ALLMULTI) {
+ if (bond_dev->flags & IFF_ALLMULTI)
dev_set_allmulti(slave_dev, -1);
- }
/* flush master's mc_list from slave */
netif_addr_lock_bh(bond_dev);
@@ -2112,20 +2087,20 @@ static int bond_release_all(struct net_device *bond_dev)
*/
memset(bond_dev->dev_addr, 0, bond_dev->addr_len);
- if (list_empty(&bond->vlan_list)) {
+ if (list_empty(&bond->vlan_list))
bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
- } else {
- printk(KERN_WARNING DRV_NAME
+ else {
+ pr_warning(DRV_NAME
": %s: Warning: clearing HW address of %s while it "
"still has VLANs.\n",
bond_dev->name, bond_dev->name);
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
": %s: When re-adding slaves, make sure the bond's "
"HW address matches its VLANs'.\n",
bond_dev->name);
}
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: released all slaves\n",
bond_dev->name);
@@ -2143,8 +2118,8 @@ out:
* - <slave_dev> is already active.
* - The link state of <slave_dev> is not BOND_LINK_UP.
* - <slave_dev> is not running.
- * In these cases, this fuction does nothing.
- * In the other cases, currnt_slave pointer is changed and 0 is returned.
+ * In these cases, this function does nothing.
+ * In the other cases, current_slave pointer is changed and 0 is returned.
*/
static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_device *slave_dev)
{
@@ -2153,15 +2128,12 @@ static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_devi
struct slave *new_active = NULL;
int res = 0;
- if (!USES_PRIMARY(bond->params.mode)) {
+ if (!USES_PRIMARY(bond->params.mode))
return -EINVAL;
- }
/* Verify that master_dev is indeed the master of slave_dev */
- if (!(slave_dev->flags & IFF_SLAVE) ||
- (slave_dev->master != bond_dev)) {
+ if (!(slave_dev->flags & IFF_SLAVE) || (slave_dev->master != bond_dev))
return -EINVAL;
- }
read_lock(&bond->lock);
@@ -2186,9 +2158,8 @@ static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_devi
write_lock_bh(&bond->curr_slave_lock);
bond_change_active_slave(bond, new_active);
write_unlock_bh(&bond->curr_slave_lock);
- } else {
+ } else
res = -EINVAL;
- }
read_unlock(&bond->lock);
@@ -2240,6 +2211,9 @@ static int bond_miimon_inspect(struct bonding *bond)
{
struct slave *slave;
int i, link_state, commit = 0;
+ bool ignore_updelay;
+
+ ignore_updelay = !bond->curr_active_slave ? true : false;
bond_for_each_slave(bond, slave, i) {
slave->new_link = BOND_LINK_NOCHANGE;
@@ -2254,7 +2228,7 @@ static int bond_miimon_inspect(struct bonding *bond)
slave->link = BOND_LINK_FAIL;
slave->delay = bond->params.downdelay;
if (slave->delay) {
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: link status down for %s"
"interface %s, disabling it in %d ms.\n",
bond->dev->name,
@@ -2273,7 +2247,7 @@ static int bond_miimon_inspect(struct bonding *bond)
*/
slave->link = BOND_LINK_UP;
slave->jiffies = jiffies;
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: link status up again after %d "
"ms for interface %s.\n",
bond->dev->name,
@@ -2300,10 +2274,11 @@ static int bond_miimon_inspect(struct bonding *bond)
slave->delay = bond->params.updelay;
if (slave->delay) {
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: link status up for "
"interface %s, enabling it in %d ms.\n",
bond->dev->name, slave->dev->name,
+ ignore_updelay ? 0 :
bond->params.updelay *
bond->params.miimon);
}
@@ -2311,7 +2286,7 @@ static int bond_miimon_inspect(struct bonding *bond)
case BOND_LINK_BACK:
if (!link_state) {
slave->link = BOND_LINK_DOWN;
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: link status down again after %d "
"ms for interface %s.\n",
bond->dev->name,
@@ -2322,9 +2297,13 @@ static int bond_miimon_inspect(struct bonding *bond)
continue;
}
+ if (ignore_updelay)
+ slave->delay = 0;
+
if (slave->delay <= 0) {
slave->new_link = BOND_LINK_UP;
commit++;
+ ignore_updelay = false;
continue;
}
@@ -2361,7 +2340,7 @@ static void bond_miimon_commit(struct bonding *bond)
slave->state = BOND_STATE_BACKUP;
}
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: link status definitely "
"up for interface %s.\n",
bond->dev->name, slave->dev->name);
@@ -2390,7 +2369,7 @@ static void bond_miimon_commit(struct bonding *bond)
bond->params.mode == BOND_MODE_8023AD)
bond_set_slave_inactive_flags(slave);
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: link status definitely down for "
"interface %s, disabling it\n",
bond->dev->name, slave->dev->name);
@@ -2399,8 +2378,7 @@ static void bond_miimon_commit(struct bonding *bond)
bond_3ad_handle_link_change(slave,
BOND_LINK_DOWN);
- if (bond->params.mode == BOND_MODE_TLB ||
- bond->params.mode == BOND_MODE_ALB)
+ if (bond_is_lb(bond))
bond_alb_handle_link_change(bond, slave,
BOND_LINK_DOWN);
@@ -2410,7 +2388,7 @@ static void bond_miimon_commit(struct bonding *bond)
continue;
default:
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: invalid new link %d on slave %s\n",
bond->dev->name, slave->new_link,
slave->dev->name);
@@ -2531,18 +2509,18 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_
pr_debug("arp %d on slave %s: dst %x src %x vid %d\n", arp_op,
slave_dev->name, dest_ip, src_ip, vlan_id);
-
+
skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
NULL, slave_dev->dev_addr, NULL);
if (!skb) {
- printk(KERN_ERR DRV_NAME ": ARP packet allocation failed\n");
+ pr_err(DRV_NAME ": ARP packet allocation failed\n");
return;
}
if (vlan_id) {
skb = vlan_put_tag(skb, vlan_id);
if (!skb) {
- printk(KERN_ERR DRV_NAME ": failed to insert VLAN tag\n");
+ pr_err(DRV_NAME ": failed to insert VLAN tag\n");
return;
}
}
@@ -2582,7 +2560,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
rv = ip_route_output_key(&init_net, &rt, &fl);
if (rv) {
if (net_ratelimit()) {
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
": %s: no route to arp_ip_target %pI4\n",
bond->dev->name, &fl.fl4_dst);
}
@@ -2619,7 +2597,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
}
if (net_ratelimit()) {
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
": %s: no path to arp_ip_target %pI4 via rt.dev %s\n",
bond->dev->name, &fl.fl4_dst,
rt->u.dst.dev ? rt->u.dst.dev->name : "NULL");
@@ -2767,13 +2745,11 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
- if (bond->kill_timers) {
+ if (bond->kill_timers)
goto out;
- }
- if (bond->slave_cnt == 0) {
+ if (bond->slave_cnt == 0)
goto re_arm;
- }
read_lock(&bond->curr_slave_lock);
oldcurrent = bond->curr_active_slave;
@@ -2789,7 +2765,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
*/
bond_for_each_slave(bond, slave, i) {
if (slave->link != BOND_LINK_UP) {
- if (time_before_eq(jiffies, slave->dev->trans_start + delta_in_ticks) &&
+ if (time_before_eq(jiffies, dev_trans_start(slave->dev) + delta_in_ticks) &&
time_before_eq(jiffies, slave->dev->last_rx + delta_in_ticks)) {
slave->link = BOND_LINK_UP;
@@ -2801,14 +2777,14 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
* is closed.
*/
if (!oldcurrent) {
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: link status definitely "
"up for interface %s, ",
bond->dev->name,
slave->dev->name);
do_failover = 1;
} else {
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: interface %s is now up\n",
bond->dev->name,
slave->dev->name);
@@ -2821,24 +2797,22 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
* when the source ip is 0, so don't take the link down
* if we don't know our ip yet
*/
- if (time_after_eq(jiffies, slave->dev->trans_start + 2*delta_in_ticks) ||
+ if (time_after_eq(jiffies, dev_trans_start(slave->dev) + 2*delta_in_ticks) ||
(time_after_eq(jiffies, slave->dev->last_rx + 2*delta_in_ticks))) {
slave->link = BOND_LINK_DOWN;
slave->state = BOND_STATE_BACKUP;
- if (slave->link_failure_count < UINT_MAX) {
+ if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
- }
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: interface %s is now down.\n",
bond->dev->name,
slave->dev->name);
- if (slave == oldcurrent) {
+ if (slave == oldcurrent)
do_failover = 1;
- }
}
}
@@ -2849,9 +2823,8 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
* do - all replies will be rx'ed on same link causing slaves
* to be unstable during low/no traffic periods
*/
- if (IS_UP(slave->dev)) {
+ if (IS_UP(slave->dev))
bond_arp_send_all(bond, slave);
- }
}
if (do_failover) {
@@ -2932,7 +2905,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
* the bond has an IP address)
*/
if ((slave->state == BOND_STATE_ACTIVE) &&
- (time_after_eq(jiffies, slave->dev->trans_start +
+ (time_after_eq(jiffies, dev_trans_start(slave->dev) +
2 * delta_in_ticks) ||
(time_after_eq(jiffies, slave_last_rx(bond, slave)
+ 2 * delta_in_ticks)))) {
@@ -2976,13 +2949,13 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
write_lock_bh(&bond->curr_slave_lock);
if (!bond->curr_active_slave &&
- time_before_eq(jiffies, slave->dev->trans_start +
+ time_before_eq(jiffies, dev_trans_start(slave->dev) +
delta_in_ticks)) {
slave->link = BOND_LINK_UP;
bond_change_active_slave(bond, slave);
bond->current_arp_slave = NULL;
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: %s is up and now the "
"active interface\n",
bond->dev->name, slave->dev->name);
@@ -2998,7 +2971,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
bond_set_slave_inactive_flags(slave);
bond->current_arp_slave = NULL;
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: backup interface %s is now up\n",
bond->dev->name, slave->dev->name);
}
@@ -3014,7 +2987,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
slave->link = BOND_LINK_DOWN;
if (slave == bond->curr_active_slave) {
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: link status down for active "
"interface %s, disabling it\n",
bond->dev->name, slave->dev->name);
@@ -3033,7 +3006,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
bond->current_arp_slave = NULL;
} else if (slave->state == BOND_STATE_BACKUP) {
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: backup interface %s is now down\n",
bond->dev->name, slave->dev->name);
@@ -3042,7 +3015,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
break;
default:
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: impossible: new_link %d on slave %s\n",
bond->dev->name, slave->new_link,
slave->dev->name);
@@ -3076,7 +3049,7 @@ static void bond_ab_arp_probe(struct bonding *bond)
read_lock(&bond->curr_slave_lock);
if (bond->current_arp_slave && bond->curr_active_slave)
- printk("PROBE: c_arp %s && cas %s BAD\n",
+ pr_info(DRV_NAME "PROBE: c_arp %s && cas %s BAD\n",
bond->current_arp_slave->dev->name,
bond->curr_active_slave->dev->name);
@@ -3126,7 +3099,7 @@ static void bond_ab_arp_probe(struct bonding *bond)
bond_set_slave_inactive_flags(slave);
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: backup interface %s is now down.\n",
bond->dev->name, slave->dev->name);
}
@@ -3176,9 +3149,8 @@ void bond_activebackup_arp_mon(struct work_struct *work)
bond_ab_arp_probe(bond);
re_arm:
- if (bond->params.arp_interval) {
+ if (bond->params.arp_interval)
queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
- }
out:
read_unlock(&bond->lock);
}
@@ -3200,14 +3172,12 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
read_lock(&dev_base_lock);
read_lock(&bond->lock);
- if (*pos == 0) {
+ if (*pos == 0)
return SEQ_START_TOKEN;
- }
bond_for_each_slave(bond, slave, i) {
- if (++off == *pos) {
+ if (++off == *pos)
return slave;
- }
}
return NULL;
@@ -3219,9 +3189,8 @@ static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
struct slave *slave = v;
++*pos;
- if (v == SEQ_START_TOKEN) {
+ if (v == SEQ_START_TOKEN)
return bond->first_slave;
- }
slave = slave->next;
@@ -3284,14 +3253,14 @@ static void bond_info_show_master(struct seq_file *seq)
/* ARP information */
- if(bond->params.arp_interval > 0) {
- int printed=0;
+ if (bond->params.arp_interval > 0) {
+ int printed = 0;
seq_printf(seq, "ARP Polling Interval (ms): %d\n",
bond->params.arp_interval);
seq_printf(seq, "ARP IP target/s (n.n.n.n form):");
- for(i = 0; (i < BOND_MAX_ARP_TARGETS) ;i++) {
+ for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
if (!bond->params.arp_targets[i])
break;
if (printed)
@@ -3331,7 +3300,8 @@ static void bond_info_show_master(struct seq_file *seq)
}
}
-static void bond_info_show_slave(struct seq_file *seq, const struct slave *slave)
+static void bond_info_show_slave(struct seq_file *seq,
+ const struct slave *slave)
{
struct bonding *bond = seq->private;
@@ -3347,12 +3317,11 @@ static void bond_info_show_slave(struct seq_file *seq, const struct slave *slave
const struct aggregator *agg
= SLAVE_AD_INFO(slave).port.aggregator;
- if (agg) {
+ if (agg)
seq_printf(seq, "Aggregator ID: %d\n",
agg->aggregator_identifier);
- } else {
+ else
seq_puts(seq, "Aggregator ID: N/A\n");
- }
}
}
@@ -3361,9 +3330,8 @@ static int bond_info_seq_show(struct seq_file *seq, void *v)
if (v == SEQ_START_TOKEN) {
seq_printf(seq, "%s\n", version);
bond_info_show_master(seq);
- } else {
+ } else
bond_info_show_slave(seq, v);
- }
return 0;
}
@@ -3408,13 +3376,12 @@ static int bond_create_proc_entry(struct bonding *bond)
bond->proc_entry = proc_create_data(bond_dev->name,
S_IRUGO, bond_proc_dir,
&bond_info_fops, bond);
- if (bond->proc_entry == NULL) {
- printk(KERN_WARNING DRV_NAME
+ if (bond->proc_entry == NULL)
+ pr_warning(DRV_NAME
": Warning: Cannot create /proc/net/%s/%s\n",
DRV_NAME, bond_dev->name);
- } else {
+ else
memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ);
- }
}
return 0;
@@ -3437,7 +3404,7 @@ static void bond_create_proc_dir(void)
if (!bond_proc_dir) {
bond_proc_dir = proc_mkdir(DRV_NAME, init_net.proc_net);
if (!bond_proc_dir)
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
": Warning: cannot create /proc/net/%s\n",
DRV_NAME);
}
@@ -3453,8 +3420,28 @@ static void bond_destroy_proc_dir(void)
bond_proc_dir = NULL;
}
}
+
+#else /* !CONFIG_PROC_FS */
+
+static int bond_create_proc_entry(struct bonding *bond)
+{
+}
+
+static void bond_remove_proc_entry(struct bonding *bond)
+{
+}
+
+static void bond_create_proc_dir(void)
+{
+}
+
+static void bond_destroy_proc_dir(void)
+{
+}
+
#endif /* CONFIG_PROC_FS */
+
/*-------------------------- netdev event handling --------------------------*/
/*
@@ -3462,18 +3449,17 @@ static void bond_destroy_proc_dir(void)
*/
static int bond_event_changename(struct bonding *bond)
{
-#ifdef CONFIG_PROC_FS
bond_remove_proc_entry(bond);
bond_create_proc_entry(bond);
-#endif
- down_write(&(bonding_rwsem));
- bond_destroy_sysfs_entry(bond);
- bond_create_sysfs_entry(bond);
- up_write(&(bonding_rwsem));
+
+ bond_destroy_sysfs_entry(bond);
+ bond_create_sysfs_entry(bond);
+
return NOTIFY_DONE;
}
-static int bond_master_netdev_event(unsigned long event, struct net_device *bond_dev)
+static int bond_master_netdev_event(unsigned long event,
+ struct net_device *bond_dev)
{
struct bonding *event_bond = netdev_priv(bond_dev);
@@ -3490,7 +3476,8 @@ static int bond_master_netdev_event(unsigned long event, struct net_device *bond
return NOTIFY_DONE;
}
-static int bond_slave_netdev_event(unsigned long event, struct net_device *slave_dev)
+static int bond_slave_netdev_event(unsigned long event,
+ struct net_device *slave_dev)
{
struct net_device *bond_dev = slave_dev->master;
struct bonding *bond = netdev_priv(bond_dev);
@@ -3568,7 +3555,8 @@ static int bond_slave_netdev_event(unsigned long event, struct net_device *slave
* locks for us to safely manipulate the slave devices (RTNL lock,
* dev_probe_lock).
*/
-static int bond_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
+static int bond_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
{
struct net_device *event_dev = (struct net_device *)ptr;
@@ -3923,9 +3911,9 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
switch (cmd) {
case SIOCGMIIPHY:
mii = if_mii(ifr);
- if (!mii) {
+ if (!mii)
return -EINVAL;
- }
+
mii->phy_id = 0;
/* Fall Through */
case SIOCGMIIREG:
@@ -3934,18 +3922,18 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
* instead of SIOCGMIIPHY.
*/
mii = if_mii(ifr);
- if (!mii) {
+ if (!mii)
return -EINVAL;
- }
+
if (mii->reg_num == 1) {
struct bonding *bond = netdev_priv(bond_dev);
mii->val_out = 0;
read_lock(&bond->lock);
read_lock(&bond->curr_slave_lock);
- if (netif_carrier_ok(bond->dev)) {
+ if (netif_carrier_ok(bond->dev))
mii->val_out = BMSR_LSTATUS;
- }
+
read_unlock(&bond->curr_slave_lock);
read_unlock(&bond->lock);
}
@@ -3955,32 +3943,26 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
case SIOCBONDINFOQUERY:
u_binfo = (struct ifbond __user *)ifr->ifr_data;
- if (copy_from_user(&k_binfo, u_binfo, sizeof(ifbond))) {
+ if (copy_from_user(&k_binfo, u_binfo, sizeof(ifbond)))
return -EFAULT;
- }
res = bond_info_query(bond_dev, &k_binfo);
- if (res == 0) {
- if (copy_to_user(u_binfo, &k_binfo, sizeof(ifbond))) {
- return -EFAULT;
- }
- }
+ if (res == 0 &&
+ copy_to_user(u_binfo, &k_binfo, sizeof(ifbond)))
+ return -EFAULT;
return res;
case BOND_SLAVE_INFO_QUERY_OLD:
case SIOCBONDSLAVEINFOQUERY:
u_sinfo = (struct ifslave __user *)ifr->ifr_data;
- if (copy_from_user(&k_sinfo, u_sinfo, sizeof(ifslave))) {
+ if (copy_from_user(&k_sinfo, u_sinfo, sizeof(ifslave)))
return -EFAULT;
- }
res = bond_slave_info_query(bond_dev, &k_sinfo);
- if (res == 0) {
- if (copy_to_user(u_sinfo, &k_sinfo, sizeof(ifslave))) {
- return -EFAULT;
- }
- }
+ if (res == 0 &&
+ copy_to_user(u_sinfo, &k_sinfo, sizeof(ifslave)))
+ return -EFAULT;
return res;
default:
@@ -3988,18 +3970,16 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
break;
}
- if (!capable(CAP_NET_ADMIN)) {
+ if (!capable(CAP_NET_ADMIN))
return -EPERM;
- }
- down_write(&(bonding_rwsem));
slave_dev = dev_get_by_name(&init_net, ifr->ifr_slave);
pr_debug("slave_dev=%p: \n", slave_dev);
- if (!slave_dev) {
+ if (!slave_dev)
res = -ENODEV;
- } else {
+ else {
pr_debug("slave_dev->name=%s: \n", slave_dev->name);
switch (cmd) {
case BOND_ENSLAVE_OLD:
@@ -4025,7 +4005,6 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
dev_put(slave_dev);
}
- up_write(&(bonding_rwsem));
return res;
}
@@ -4037,30 +4016,30 @@ static void bond_set_multicast_list(struct net_device *bond_dev)
/*
* Do promisc before checking multicast_mode
*/
- if ((bond_dev->flags & IFF_PROMISC) && !(bond->flags & IFF_PROMISC)) {
+ if ((bond_dev->flags & IFF_PROMISC) && !(bond->flags & IFF_PROMISC))
/*
* FIXME: Need to handle the error when one of the multi-slaves
* encounters error.
*/
bond_set_promiscuity(bond, 1);
- }
- if (!(bond_dev->flags & IFF_PROMISC) && (bond->flags & IFF_PROMISC)) {
+
+ if (!(bond_dev->flags & IFF_PROMISC) && (bond->flags & IFF_PROMISC))
bond_set_promiscuity(bond, -1);
- }
+
/* set allmulti flag to slaves */
- if ((bond_dev->flags & IFF_ALLMULTI) && !(bond->flags & IFF_ALLMULTI)) {
+ if ((bond_dev->flags & IFF_ALLMULTI) && !(bond->flags & IFF_ALLMULTI))
/*
* FIXME: Need to handle the error when one of the multi-slaves
* encounters error.
*/
bond_set_allmulti(bond, 1);
- }
- if (!(bond_dev->flags & IFF_ALLMULTI) && (bond->flags & IFF_ALLMULTI)) {
+
+ if (!(bond_dev->flags & IFF_ALLMULTI) && (bond->flags & IFF_ALLMULTI))
bond_set_allmulti(bond, -1);
- }
+
read_lock(&bond->lock);
@@ -4068,16 +4047,14 @@ static void bond_set_multicast_list(struct net_device *bond_dev)
/* looking for addresses to add to slaves' mc list */
for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) {
- if (!bond_mc_list_find_dmi(dmi, bond->mc_list)) {
+ if (!bond_mc_list_find_dmi(dmi, bond->mc_list))
bond_mc_add(bond, dmi->dmi_addr, dmi->dmi_addrlen);
- }
}
/* looking for addresses to delete from slaves' list */
for (dmi = bond->mc_list; dmi; dmi = dmi->next) {
- if (!bond_mc_list_find_dmi(dmi, bond_dev->mc_list)) {
+ if (!bond_mc_list_find_dmi(dmi, bond_dev->mc_list))
bond_mc_delete(bond, dmi->dmi_addr, dmi->dmi_addrlen);
- }
}
/* save master's multicast list */
@@ -4197,9 +4174,8 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
if (bond->params.fail_over_mac == BOND_FOM_ACTIVE)
return 0;
- if (!is_valid_ether_addr(sa->sa_data)) {
+ if (!is_valid_ether_addr(sa->sa_data))
return -EADDRNOTAVAIL;
- }
/* Can't hold bond->lock with bh disabled here since
* some base drivers panic. On the other hand we can't
@@ -4270,9 +4246,8 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
read_lock(&bond->lock);
- if (!BOND_IS_OK(bond)) {
+ if (!BOND_IS_OK(bond))
goto out;
- }
/*
* Concurrent TX may collide on rr_tx_counter; we accept that
@@ -4282,9 +4257,8 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
bond_for_each_slave(bond, slave, i) {
slave_no--;
- if (slave_no < 0) {
+ if (slave_no < 0)
break;
- }
}
start_at = slave;
@@ -4319,9 +4293,8 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
read_lock(&bond->lock);
read_lock(&bond->curr_slave_lock);
- if (!BOND_IS_OK(bond)) {
+ if (!BOND_IS_OK(bond))
goto out;
- }
if (!bond->curr_active_slave)
goto out;
@@ -4329,10 +4302,10 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
res = bond_dev_queue_xmit(bond, skb, bond->curr_active_slave->dev);
out:
- if (res) {
+ if (res)
/* no suitable interface, frame not sent */
dev_kfree_skb(skb);
- }
+
read_unlock(&bond->curr_slave_lock);
read_unlock(&bond->lock);
return 0;
@@ -4353,17 +4326,15 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
read_lock(&bond->lock);
- if (!BOND_IS_OK(bond)) {
+ if (!BOND_IS_OK(bond))
goto out;
- }
slave_no = bond->xmit_hash_policy(skb, bond_dev, bond->slave_cnt);
bond_for_each_slave(bond, slave, i) {
slave_no--;
- if (slave_no < 0) {
+ if (slave_no < 0)
break;
- }
}
start_at = slave;
@@ -4399,17 +4370,15 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
read_lock(&bond->lock);
- if (!BOND_IS_OK(bond)) {
+ if (!BOND_IS_OK(bond))
goto out;
- }
read_lock(&bond->curr_slave_lock);
start_at = bond->curr_active_slave;
read_unlock(&bond->curr_slave_lock);
- if (!start_at) {
+ if (!start_at)
goto out;
- }
bond_for_each_slave_from(bond, slave, i, start_at) {
if (IS_UP(slave->dev) &&
@@ -4418,7 +4387,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
if (tx_dev) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (!skb2) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: Error: bond_xmit_broadcast(): "
"skb_clone() failed\n",
bond_dev->name);
@@ -4435,15 +4404,14 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
}
}
- if (tx_dev) {
+ if (tx_dev)
res = bond_dev_queue_xmit(bond, skb, tx_dev);
- }
out:
- if (res) {
+ if (res)
/* no suitable interface, frame not sent */
dev_kfree_skb(skb);
- }
+
/* frame sent to all suitable interfaces */
read_unlock(&bond->lock);
return 0;
@@ -4487,7 +4455,7 @@ static int bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
return bond_alb_xmit(skb, dev);
default:
/* Should never happen, mode already checked */
- printk(KERN_ERR DRV_NAME ": %s: Error: Unknown bonding mode %d\n",
+ pr_err(DRV_NAME ": %s: Error: Unknown bonding mode %d\n",
dev->name, bond->params.mode);
WARN_ON_ONCE(1);
dev_kfree_skb(skb);
@@ -4524,7 +4492,7 @@ void bond_set_mode_ops(struct bonding *bond, int mode)
break;
default:
/* Should never happen, mode already checked */
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: Error: Unknown bonding mode %d\n",
bond_dev->name,
mode);
@@ -4551,6 +4519,8 @@ static const struct ethtool_ops bond_ethtool_ops = {
};
static const struct net_device_ops bond_netdev_ops = {
+ .ndo_init = bond_init,
+ .ndo_uninit = bond_uninit,
.ndo_open = bond_open,
.ndo_stop = bond_close,
.ndo_start_xmit = bond_start_xmit,
@@ -4565,48 +4535,34 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
};
-/*
- * Does not allocate but creates a /proc entry.
- * Allowed to fail.
- */
-static int bond_init(struct net_device *bond_dev, struct bond_params *params)
+static void bond_setup(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- pr_debug("Begin bond_init for %s\n", bond_dev->name);
-
/* initialize rwlocks */
rwlock_init(&bond->lock);
rwlock_init(&bond->curr_slave_lock);
- bond->params = *params; /* copy params struct */
-
- bond->wq = create_singlethread_workqueue(bond_dev->name);
- if (!bond->wq)
- return -ENOMEM;
+ bond->params = bonding_defaults;
/* Initialize pointers */
- bond->first_slave = NULL;
- bond->curr_active_slave = NULL;
- bond->current_arp_slave = NULL;
- bond->primary_slave = NULL;
bond->dev = bond_dev;
- bond->send_grat_arp = 0;
- bond->send_unsol_na = 0;
- bond->setup_by_slave = 0;
INIT_LIST_HEAD(&bond->vlan_list);
/* Initialize the device entry points */
+ ether_setup(bond_dev);
bond_dev->netdev_ops = &bond_netdev_ops;
bond_dev->ethtool_ops = &bond_ethtool_ops;
bond_set_mode_ops(bond, bond->params.mode);
- bond_dev->destructor = bond_destructor;
+ bond_dev->destructor = free_netdev;
/* Initialize the device options */
bond_dev->tx_queue_len = 0;
bond_dev->flags |= IFF_MASTER|IFF_MULTICAST;
bond_dev->priv_flags |= IFF_BONDING;
+ bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+
if (bond->params.arp_interval)
bond_dev->priv_flags |= IFF_MASTER_ARPMON;
@@ -4631,12 +4587,6 @@ static int bond_init(struct net_device *bond_dev, struct bond_params *params)
NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER);
-#ifdef CONFIG_PROC_FS
- bond_create_proc_entry(bond);
-#endif
- list_add_tail(&bond->bond_list, &bond_dev_list);
-
- return 0;
}
static void bond_work_cancel_all(struct bonding *bond)
@@ -4671,9 +4621,7 @@ static void bond_deinit(struct net_device *bond_dev)
bond_work_cancel_all(bond);
-#ifdef CONFIG_PROC_FS
bond_remove_proc_entry(bond);
-#endif
}
/* Unregister and free all bond devices.
@@ -4689,12 +4637,10 @@ static void bond_free_all(void)
bond_work_cancel_all(bond);
/* Release the bonded slaves */
bond_release_all(bond_dev);
- bond_destroy(bond);
+ unregister_netdevice(bond_dev);
}
-#ifdef CONFIG_PROC_FS
bond_destroy_proc_dir();
-#endif
}
/*------------------------- Module initialization ---------------------------*/
@@ -4742,7 +4688,7 @@ static int bond_check_params(struct bond_params *params)
if (mode) {
bond_mode = bond_parse_parm(mode, bond_mode_tbl);
if (bond_mode == -1) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": Error: Invalid bonding mode \"%s\"\n",
mode == NULL ? "NULL" : mode);
return -EINVAL;
@@ -4752,16 +4698,16 @@ static int bond_check_params(struct bond_params *params)
if (xmit_hash_policy) {
if ((bond_mode != BOND_MODE_XOR) &&
(bond_mode != BOND_MODE_8023AD)) {
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": xor_mode param is irrelevant in mode %s\n",
bond_mode_name(bond_mode));
} else {
xmit_hashtype = bond_parse_parm(xmit_hash_policy,
xmit_hashtype_tbl);
if (xmit_hashtype == -1) {
- printk(KERN_ERR DRV_NAME
- ": Error: Invalid xmit_hash_policy \"%s\"\n",
- xmit_hash_policy == NULL ? "NULL" :
+ pr_err(DRV_NAME
+ ": Error: Invalid xmit_hash_policy \"%s\"\n",
+ xmit_hash_policy == NULL ? "NULL" :
xmit_hash_policy);
return -EINVAL;
}
@@ -4770,13 +4716,13 @@ static int bond_check_params(struct bond_params *params)
if (lacp_rate) {
if (bond_mode != BOND_MODE_8023AD) {
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": lacp_rate param is irrelevant in mode %s\n",
bond_mode_name(bond_mode));
} else {
lacp_fast = bond_parse_parm(lacp_rate, bond_lacp_tbl);
if (lacp_fast == -1) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": Error: Invalid lacp rate \"%s\"\n",
lacp_rate == NULL ? "NULL" : lacp_rate);
return -EINVAL;
@@ -4787,14 +4733,14 @@ static int bond_check_params(struct bond_params *params)
if (ad_select) {
params->ad_select = bond_parse_parm(ad_select, ad_select_tbl);
if (params->ad_select == -1) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": Error: Invalid ad_select \"%s\"\n",
ad_select == NULL ? "NULL" : ad_select);
return -EINVAL;
}
if (bond_mode != BOND_MODE_8023AD) {
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
": ad_select param only affects 802.3ad mode\n");
}
} else {
@@ -4802,7 +4748,7 @@ static int bond_check_params(struct bond_params *params)
}
if (max_bonds < 0 || max_bonds > INT_MAX) {
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
": Warning: max_bonds (%d) not in range %d-%d, so it "
"was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
@@ -4810,7 +4756,7 @@ static int bond_check_params(struct bond_params *params)
}
if (miimon < 0) {
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
": Warning: miimon module parameter (%d), "
"not in range 0-%d, so it was reset to %d\n",
miimon, INT_MAX, BOND_LINK_MON_INTERV);
@@ -4818,7 +4764,7 @@ static int bond_check_params(struct bond_params *params)
}
if (updelay < 0) {
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
": Warning: updelay module parameter (%d), "
"not in range 0-%d, so it was reset to 0\n",
updelay, INT_MAX);
@@ -4826,7 +4772,7 @@ static int bond_check_params(struct bond_params *params)
}
if (downdelay < 0) {
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
": Warning: downdelay module parameter (%d), "
"not in range 0-%d, so it was reset to 0\n",
downdelay, INT_MAX);
@@ -4834,7 +4780,7 @@ static int bond_check_params(struct bond_params *params)
}
if ((use_carrier != 0) && (use_carrier != 1)) {
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
": Warning: use_carrier module parameter (%d), "
"not of valid value (0/1), so it was set to 1\n",
use_carrier);
@@ -4842,14 +4788,14 @@ static int bond_check_params(struct bond_params *params)
}
if (num_grat_arp < 0 || num_grat_arp > 255) {
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
": Warning: num_grat_arp (%d) not in range 0-255 so it "
"was reset to 1 \n", num_grat_arp);
num_grat_arp = 1;
}
if (num_unsol_na < 0 || num_unsol_na > 255) {
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
": Warning: num_unsol_na (%d) not in range 0-255 so it "
"was reset to 1 \n", num_unsol_na);
num_unsol_na = 1;
@@ -4858,12 +4804,12 @@ static int bond_check_params(struct bond_params *params)
/* reset values for 802.3ad */
if (bond_mode == BOND_MODE_8023AD) {
if (!miimon) {
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
": Warning: miimon must be specified, "
"otherwise bonding will not detect link "
"failure, speed and duplex which are "
"essential for 802.3ad operation\n");
- printk(KERN_WARNING "Forcing miimon to 100msec\n");
+ pr_warning("Forcing miimon to 100msec\n");
miimon = 100;
}
}
@@ -4872,12 +4818,12 @@ static int bond_check_params(struct bond_params *params)
if ((bond_mode == BOND_MODE_TLB) ||
(bond_mode == BOND_MODE_ALB)) {
if (!miimon) {
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
": Warning: miimon must be specified, "
"otherwise bonding will not detect link "
"failure and link speed which are essential "
"for TLB/ALB load balancing\n");
- printk(KERN_WARNING "Forcing miimon to 100msec\n");
+ pr_warning("Forcing miimon to 100msec\n");
miimon = 100;
}
}
@@ -4897,7 +4843,7 @@ static int bond_check_params(struct bond_params *params)
/* just warn the user the up/down delay will have
* no effect since miimon is zero...
*/
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
": Warning: miimon module parameter not set "
"and updelay (%d) or downdelay (%d) module "
"parameter is set; updelay and downdelay have "
@@ -4907,7 +4853,7 @@ static int bond_check_params(struct bond_params *params)
} else {
/* don't allow arp monitoring */
if (arp_interval) {
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
": Warning: miimon (%d) and arp_interval (%d) "
"can't be used simultaneously, disabling ARP "
"monitoring\n",
@@ -4916,7 +4862,7 @@ static int bond_check_params(struct bond_params *params)
}
if ((updelay % miimon) != 0) {
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
": Warning: updelay (%d) is not a multiple "
"of miimon (%d), updelay rounded to %d ms\n",
updelay, miimon, (updelay / miimon) * miimon);
@@ -4925,7 +4871,7 @@ static int bond_check_params(struct bond_params *params)
updelay /= miimon;
if ((downdelay % miimon) != 0) {
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
": Warning: downdelay (%d) is not a multiple "
"of miimon (%d), downdelay rounded to %d ms\n",
downdelay, miimon,
@@ -4936,7 +4882,7 @@ static int bond_check_params(struct bond_params *params)
}
if (arp_interval < 0) {
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
": Warning: arp_interval module parameter (%d) "
", not in range 0-%d, so it was reset to %d\n",
arp_interval, INT_MAX, BOND_LINK_ARP_INTERV);
@@ -4949,7 +4895,7 @@ static int bond_check_params(struct bond_params *params)
/* not complete check, but should be good enough to
catch mistakes */
if (!isdigit(arp_ip_target[arp_ip_count][0])) {
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
": Warning: bad arp_ip_target module parameter "
"(%s), ARP monitoring will not be performed\n",
arp_ip_target[arp_ip_count]);
@@ -4962,7 +4908,7 @@ static int bond_check_params(struct bond_params *params)
if (arp_interval && !arp_ip_count) {
/* don't allow arping if no arp_ip_target given... */
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
": Warning: arp_interval module parameter (%d) "
"specified without providing an arp_ip_target "
"parameter, arp_interval was reset to 0\n",
@@ -4972,12 +4918,12 @@ static int bond_check_params(struct bond_params *params)
if (arp_validate) {
if (bond_mode != BOND_MODE_ACTIVEBACKUP) {
- printk(KERN_ERR DRV_NAME
- ": arp_validate only supported in active-backup mode\n");
+ pr_err(DRV_NAME
+ ": arp_validate only supported in active-backup mode\n");
return -EINVAL;
}
if (!arp_interval) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": arp_validate requires arp_interval\n");
return -EINVAL;
}
@@ -4985,7 +4931,7 @@ static int bond_check_params(struct bond_params *params)
arp_validate_value = bond_parse_parm(arp_validate,
arp_validate_tbl);
if (arp_validate_value == -1) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": Error: invalid arp_validate \"%s\"\n",
arp_validate == NULL ? "NULL" : arp_validate);
return -EINVAL;
@@ -4994,20 +4940,20 @@ static int bond_check_params(struct bond_params *params)
arp_validate_value = 0;
if (miimon) {
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": MII link monitoring set to %d ms\n",
miimon);
} else if (arp_interval) {
int i;
- printk(KERN_INFO DRV_NAME
- ": ARP monitoring set to %d ms, validate %s, with %d target(s):",
+ pr_info(DRV_NAME ": ARP monitoring set to %d ms,"
+ " validate %s, with %d target(s):",
arp_interval,
arp_validate_tbl[arp_validate_value].modename,
arp_ip_count);
for (i = 0; i < arp_ip_count; i++)
- printk (" %s", arp_ip_target[i]);
+ printk(" %s", arp_ip_target[i]);
printk("\n");
@@ -5015,7 +4961,7 @@ static int bond_check_params(struct bond_params *params)
/* miimon and arp_interval not set, we need one so things
* work as expected, see bonding.txt for details
*/
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
": Warning: either miimon or arp_interval and "
"arp_ip_target module parameters must be specified, "
"otherwise bonding will not detect link failures! see "
@@ -5026,7 +4972,7 @@ static int bond_check_params(struct bond_params *params)
/* currently, using a primary only makes sense
* in active backup, TLB or ALB modes
*/
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
": Warning: %s primary device specified but has no "
"effect in %s mode\n",
primary, bond_mode_name(bond_mode));
@@ -5037,14 +4983,14 @@ static int bond_check_params(struct bond_params *params)
fail_over_mac_value = bond_parse_parm(fail_over_mac,
fail_over_mac_tbl);
if (fail_over_mac_value == -1) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": Error: invalid fail_over_mac \"%s\"\n",
arp_validate == NULL ? "NULL" : arp_validate);
return -EINVAL;
}
if (bond_mode != BOND_MODE_ACTIVEBACKUP)
- printk(KERN_WARNING DRV_NAME
+ pr_warning(DRV_NAME
": Warning: fail_over_mac only affects "
"active-backup mode.\n");
} else {
@@ -5094,37 +5040,53 @@ static void bond_set_lockdep_class(struct net_device *dev)
netdev_for_each_tx_queue(dev, bond_set_lockdep_class_one, NULL);
}
+/*
+ * Called from registration process
+ */
+static int bond_init(struct net_device *bond_dev)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+
+ pr_debug("Begin bond_init for %s\n", bond_dev->name);
+
+ bond->wq = create_singlethread_workqueue(bond_dev->name);
+ if (!bond->wq)
+ return -ENOMEM;
+
+ bond_set_lockdep_class(bond_dev);
+
+ netif_carrier_off(bond_dev);
+
+ bond_create_proc_entry(bond);
+ list_add_tail(&bond->bond_list, &bond_dev_list);
+
+ return 0;
+}
+
/* Create a new bond based on the specified name and bonding parameters.
* If name is NULL, obtain a suitable "bond%d" name for us.
* Caller must NOT hold rtnl_lock; we need to release it here before we
* set up our sysfs entries.
*/
-int bond_create(char *name, struct bond_params *params)
+int bond_create(const char *name)
{
struct net_device *bond_dev;
- struct bonding *bond;
int res;
rtnl_lock();
- down_write(&bonding_rwsem);
-
/* Check to see if the bond already exists. */
- if (name) {
- list_for_each_entry(bond, &bond_dev_list, bond_list)
- if (strnicmp(bond->dev->name, name, IFNAMSIZ) == 0) {
- printk(KERN_ERR DRV_NAME
- ": cannot add bond %s; it already exists\n",
- name);
- res = -EPERM;
- goto out_rtnl;
- }
+ /* FIXME: pass netns from caller */
+ if (name && __dev_get_by_name(&init_net, name)) {
+ pr_err(DRV_NAME ": cannot add bond %s; already exists\n",
+ name);
+ res = -EEXIST;
+ goto out_rtnl;
}
bond_dev = alloc_netdev(sizeof(struct bonding), name ? name : "",
- ether_setup);
+ bond_setup);
if (!bond_dev) {
- printk(KERN_ERR DRV_NAME
- ": %s: eek! can't alloc netdev!\n",
+ pr_err(DRV_NAME ": %s: eek! can't alloc netdev!\n",
name);
res = -ENOMEM;
goto out_rtnl;
@@ -5136,43 +5098,24 @@ int bond_create(char *name, struct bond_params *params)
goto out_netdev;
}
- /* bond_init() must be called after dev_alloc_name() (for the
- * /proc files), but before register_netdevice(), because we
- * need to set function pointers.
- */
-
- res = bond_init(bond_dev, params);
- if (res < 0) {
- goto out_netdev;
- }
-
res = register_netdevice(bond_dev);
- if (res < 0) {
+ if (res < 0)
goto out_bond;
- }
-
- bond_set_lockdep_class(bond_dev);
-
- netif_carrier_off(bond_dev);
- up_write(&bonding_rwsem);
- rtnl_unlock(); /* allows sysfs registration of net device */
res = bond_create_sysfs_entry(netdev_priv(bond_dev));
if (res < 0)
goto out_unreg;
+ rtnl_unlock();
return 0;
out_unreg:
- rtnl_lock();
- down_write(&bonding_rwsem);
unregister_netdevice(bond_dev);
out_bond:
bond_deinit(bond_dev);
out_netdev:
free_netdev(bond_dev);
out_rtnl:
- up_write(&bonding_rwsem);
rtnl_unlock();
return res;
}
@@ -5182,21 +5125,16 @@ static int __init bonding_init(void)
int i;
int res;
- printk(KERN_INFO "%s", version);
+ pr_info("%s", version);
res = bond_check_params(&bonding_defaults);
- if (res) {
+ if (res)
goto out;
- }
-#ifdef CONFIG_PROC_FS
bond_create_proc_dir();
-#endif
-
- init_rwsem(&bonding_rwsem);
for (i = 0; i < max_bonds; i++) {
- res = bond_create(NULL, &bonding_defaults);
+ res = bond_create(NULL);
if (res)
goto err;
}
@@ -5238,13 +5176,3 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DESCRIPTION(DRV_DESCRIPTION ", v" DRV_VERSION);
MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");
-MODULE_SUPPORTED_DEVICE("most ethernet devices");
-
-/*
- * Local variables:
- * c-indent-level: 8
- * c-basic-offset: 8
- * tab-width: 8
- * End:
- */
-
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index d2873153522..55bf34f59bb 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1,4 +1,3 @@
-
/*
* Copyright(c) 2004-2005 Intel Corporation. All rights reserved.
*
@@ -34,33 +33,14 @@
#include <linux/ctype.h>
#include <linux/inet.h>
#include <linux/rtnetlink.h>
+#include <linux/etherdevice.h>
#include <net/net_namespace.h>
#include "bonding.h"
-#define to_dev(obj) container_of(obj,struct device,kobj)
+#define to_dev(obj) container_of(obj, struct device, kobj)
#define to_bond(cd) ((struct bonding *)(netdev_priv(to_net_dev(cd))))
-/*---------------------------- Declarations -------------------------------*/
-
-static int expected_refcount = -1;
-/*--------------------------- Data Structures -----------------------------*/
-
-/* Bonding sysfs lock. Why can't we just use the subsystem lock?
- * Because kobject_register tries to acquire the subsystem lock. If
- * we already hold the lock (which we would if the user was creating
- * a new bond through the sysfs interface), we deadlock.
- * This lock is only needed when deleting a bond - we need to make sure
- * that we don't collide with an ongoing ioctl.
- */
-
-struct rw_semaphore bonding_rwsem;
-
-
-
-
-/*------------------------------ Functions --------------------------------*/
-
/*
* "show" function for the bond_masters attribute.
* The class parameter is ignored.
@@ -70,7 +50,7 @@ static ssize_t bonding_show_bonds(struct class *cls, char *buf)
int res = 0;
struct bonding *bond;
- down_read(&(bonding_rwsem));
+ rtnl_lock();
list_for_each_entry(bond, &bond_dev_list, bond_list) {
if (res > (PAGE_SIZE - IFNAMSIZ)) {
@@ -84,10 +64,22 @@ static ssize_t bonding_show_bonds(struct class *cls, char *buf)
}
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
- up_read(&(bonding_rwsem));
+
+ rtnl_unlock();
return res;
}
+static struct net_device *bond_get_by_name(const char *ifname)
+{
+ struct bonding *bond;
+
+ list_for_each_entry(bond, &bond_dev_list, bond_list) {
+ if (strncmp(bond->dev->name, ifname, IFNAMSIZ) == 0)
+ return bond->dev;
+ }
+ return NULL;
+}
+
/*
* "store" function for the bond_masters attribute. This is what
* creates and deletes entire bonds.
@@ -96,12 +88,12 @@ static ssize_t bonding_show_bonds(struct class *cls, char *buf)
*
*/
-static ssize_t bonding_store_bonds(struct class *cls, const char *buffer, size_t count)
+static ssize_t bonding_store_bonds(struct class *cls,
+ const char *buffer, size_t count)
{
char command[IFNAMSIZ + 1] = {0, };
char *ifname;
int rv, res = count;
- struct bonding *bond;
sscanf(buffer, "%16s", command); /* IFNAMSIZ*/
ifname = command + 1;
@@ -110,67 +102,48 @@ static ssize_t bonding_store_bonds(struct class *cls, const char *buffer, size_t
goto err_no_cmd;
if (command[0] == '+') {
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s is being created...\n", ifname);
- rv = bond_create(ifname, &bonding_defaults);
+ rv = bond_create(ifname);
if (rv) {
- printk(KERN_INFO DRV_NAME ": Bond creation failed.\n");
+ pr_info(DRV_NAME ": Bond creation failed.\n");
res = rv;
}
- goto out;
- }
+ } else if (command[0] == '-') {
+ struct net_device *bond_dev;
- if (command[0] == '-') {
rtnl_lock();
- down_write(&bonding_rwsem);
-
- list_for_each_entry(bond, &bond_dev_list, bond_list)
- if (strnicmp(bond->dev->name, ifname, IFNAMSIZ) == 0) {
- /* check the ref count on the bond's kobject.
- * If it's > expected, then there's a file open,
- * and we have to fail.
- */
- if (atomic_read(&bond->dev->dev.kobj.kref.refcount)
- > expected_refcount){
- printk(KERN_INFO DRV_NAME
- ": Unable remove bond %s due to open references.\n",
- ifname);
- res = -EPERM;
- goto out_unlock;
- }
- printk(KERN_INFO DRV_NAME
- ": %s is being deleted...\n",
- bond->dev->name);
- bond_destroy(bond);
- goto out_unlock;
- }
-
- printk(KERN_ERR DRV_NAME
- ": unable to delete non-existent bond %s\n", ifname);
- res = -ENODEV;
- goto out_unlock;
- }
-
-err_no_cmd:
- printk(KERN_ERR DRV_NAME
- ": no command found in bonding_masters. Use +ifname or -ifname.\n");
- return -EPERM;
-
-out_unlock:
- up_write(&bonding_rwsem);
- rtnl_unlock();
+ bond_dev = bond_get_by_name(ifname);
+ if (bond_dev) {
+ pr_info(DRV_NAME ": %s is being deleted...\n",
+ ifname);
+ unregister_netdevice(bond_dev);
+ } else {
+ pr_err(DRV_NAME ": unable to delete non-existent %s\n",
+ ifname);
+ res = -ENODEV;
+ }
+ rtnl_unlock();
+ } else
+ goto err_no_cmd;
/* Always return either count or an error. If you return 0, you'll
* get called forever, which is bad.
*/
-out:
return res;
+
+err_no_cmd:
+ pr_err(DRV_NAME ": no command found in bonding_masters."
+ " Use +ifname or -ifname.\n");
+ return -EPERM;
}
+
/* class attribute for bond_masters file. This ends up in /sys/class/net */
static CLASS_ATTR(bonding_masters, S_IWUSR | S_IRUGO,
bonding_show_bonds, bonding_store_bonds);
-int bond_create_slave_symlinks(struct net_device *master, struct net_device *slave)
+int bond_create_slave_symlinks(struct net_device *master,
+ struct net_device *slave)
{
char linkname[IFNAMSIZ+7];
int ret = 0;
@@ -181,19 +154,20 @@ int bond_create_slave_symlinks(struct net_device *master, struct net_device *sla
if (ret)
return ret;
/* next, create a link from the master to the slave */
- sprintf(linkname,"slave_%s",slave->name);
+ sprintf(linkname, "slave_%s", slave->name);
ret = sysfs_create_link(&(master->dev.kobj), &(slave->dev.kobj),
linkname);
return ret;
}
-void bond_destroy_slave_symlinks(struct net_device *master, struct net_device *slave)
+void bond_destroy_slave_symlinks(struct net_device *master,
+ struct net_device *slave)
{
char linkname[IFNAMSIZ+7];
sysfs_remove_link(&(slave->dev.kobj), "master");
- sprintf(linkname,"slave_%s",slave->name);
+ sprintf(linkname, "slave_%s", slave->name);
sysfs_remove_link(&(master->dev.kobj), linkname);
}
@@ -251,8 +225,8 @@ static ssize_t bonding_store_slaves(struct device *d,
/* Note: We can't hold bond->lock here, as bond_create grabs it. */
- rtnl_lock();
- down_write(&(bonding_rwsem));
+ if (!rtnl_trylock())
+ return restart_syscall();
sscanf(buffer, "%16s", command); /* IFNAMSIZ*/
ifname = command + 1;
@@ -264,46 +238,47 @@ static ssize_t bonding_store_slaves(struct device *d,
/* Got a slave name in ifname. Is it already in the list? */
found = 0;
- read_lock(&bond->lock);
- bond_for_each_slave(bond, slave, i)
- if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
- printk(KERN_ERR DRV_NAME
- ": %s: Interface %s is already enslaved!\n",
- bond->dev->name, ifname);
- ret = -EPERM;
- read_unlock(&bond->lock);
- goto out;
- }
- read_unlock(&bond->lock);
- printk(KERN_INFO DRV_NAME ": %s: Adding slave %s.\n",
- bond->dev->name, ifname);
- dev = dev_get_by_name(&init_net, ifname);
+ /* FIXME: get netns from sysfs object */
+ dev = __dev_get_by_name(&init_net, ifname);
if (!dev) {
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: Interface %s does not exist!\n",
bond->dev->name, ifname);
- ret = -EPERM;
+ ret = -ENODEV;
goto out;
}
- else
- dev_put(dev);
if (dev->flags & IFF_UP) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: Error: Unable to enslave %s "
"because it is already up.\n",
bond->dev->name, dev->name);
ret = -EPERM;
goto out;
}
+
+ read_lock(&bond->lock);
+ bond_for_each_slave(bond, slave, i)
+ if (slave->dev == dev) {
+ pr_err(DRV_NAME
+ ": %s: Interface %s is already enslaved!\n",
+ bond->dev->name, ifname);
+ ret = -EPERM;
+ read_unlock(&bond->lock);
+ goto out;
+ }
+ read_unlock(&bond->lock);
+
+ pr_info(DRV_NAME ": %s: Adding slave %s.\n",
+ bond->dev->name, ifname);
+
/* If this is the first slave, then we need to set
the master's hardware address to be the same as the
slave's. */
- if (!(*((u32 *) & (bond->dev->dev_addr[0])))) {
+ if (is_zero_ether_addr(bond->dev->dev_addr))
memcpy(bond->dev->dev_addr, dev->dev_addr,
dev->addr_len);
- }
/* Set the slave's MTU to match the bond */
original_mtu = dev->mtu;
@@ -317,9 +292,9 @@ static ssize_t bonding_store_slaves(struct device *d,
bond_for_each_slave(bond, slave, i)
if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0)
slave->original_mtu = original_mtu;
- if (res) {
+ if (res)
ret = res;
- }
+
goto out;
}
@@ -333,7 +308,7 @@ static ssize_t bonding_store_slaves(struct device *d,
break;
}
if (dev) {
- printk(KERN_INFO DRV_NAME ": %s: Removing slave %s\n",
+ pr_info(DRV_NAME ": %s: Removing slave %s\n",
bond->dev->name, dev->name);
res = bond_release(bond->dev, dev);
if (res) {
@@ -342,9 +317,9 @@ static ssize_t bonding_store_slaves(struct device *d,
}
/* set the slave MTU to the default */
dev_set_mtu(dev, original_mtu);
- }
- else {
- printk(KERN_ERR DRV_NAME ": unable to remove non-existent slave %s for bond %s.\n",
+ } else {
+ pr_err(DRV_NAME ": unable to remove non-existent"
+ " slave %s for bond %s.\n",
ifname, bond->dev->name);
ret = -ENODEV;
}
@@ -352,16 +327,16 @@ static ssize_t bonding_store_slaves(struct device *d,
}
err_no_cmd:
- printk(KERN_ERR DRV_NAME ": no command found in slaves file for bond %s. Use +ifname or -ifname.\n", bond->dev->name);
+ pr_err(DRV_NAME ": no command found in slaves file for bond %s. Use +ifname or -ifname.\n", bond->dev->name);
ret = -EPERM;
out:
- up_write(&(bonding_rwsem));
rtnl_unlock();
return ret;
}
-static DEVICE_ATTR(slaves, S_IRUGO | S_IWUSR, bonding_show_slaves, bonding_store_slaves);
+static DEVICE_ATTR(slaves, S_IRUGO | S_IWUSR, bonding_show_slaves,
+ bonding_store_slaves);
/*
* Show and set the bonding mode. The bond interface must be down to
@@ -385,16 +360,15 @@ static ssize_t bonding_store_mode(struct device *d,
struct bonding *bond = to_bond(d);
if (bond->dev->flags & IFF_UP) {
- printk(KERN_ERR DRV_NAME
- ": unable to update mode of %s because interface is up.\n",
- bond->dev->name);
+ pr_err(DRV_NAME ": unable to update mode of %s"
+ " because interface is up.\n", bond->dev->name);
ret = -EPERM;
goto out;
}
new_value = bond_parse_parm(buf, bond_mode_tbl);
if (new_value < 0) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: Ignoring invalid mode value %.*s.\n",
bond->dev->name,
(int)strlen(buf) - 1, buf);
@@ -409,17 +383,19 @@ static ssize_t bonding_store_mode(struct device *d,
bond->params.mode = new_value;
bond_set_mode_ops(bond, bond->params.mode);
- printk(KERN_INFO DRV_NAME ": %s: setting mode to %s (%d).\n",
- bond->dev->name, bond_mode_tbl[new_value].modename, new_value);
+ pr_info(DRV_NAME ": %s: setting mode to %s (%d).\n",
+ bond->dev->name, bond_mode_tbl[new_value].modename,
+ new_value);
}
out:
return ret;
}
-static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, bonding_show_mode, bonding_store_mode);
+static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
+ bonding_show_mode, bonding_store_mode);
/*
- * Show and set the bonding transmit hash method. The bond interface must be down to
- * change the xmit hash policy.
+ * Show and set the bonding transmit hash method.
+ * The bond interface must be down to change the xmit hash policy.
*/
static ssize_t bonding_show_xmit_hash(struct device *d,
struct device_attribute *attr,
@@ -440,7 +416,7 @@ static ssize_t bonding_store_xmit_hash(struct device *d,
struct bonding *bond = to_bond(d);
if (bond->dev->flags & IFF_UP) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
"%s: Interface is up. Unable to update xmit policy.\n",
bond->dev->name);
ret = -EPERM;
@@ -449,7 +425,7 @@ static ssize_t bonding_store_xmit_hash(struct device *d,
new_value = bond_parse_parm(buf, xmit_hashtype_tbl);
if (new_value < 0) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: Ignoring invalid xmit hash policy value %.*s.\n",
bond->dev->name,
(int)strlen(buf) - 1, buf);
@@ -458,13 +434,15 @@ static ssize_t bonding_store_xmit_hash(struct device *d,
} else {
bond->params.xmit_policy = new_value;
bond_set_mode_ops(bond, bond->params.mode);
- printk(KERN_INFO DRV_NAME ": %s: setting xmit hash policy to %s (%d).\n",
- bond->dev->name, xmit_hashtype_tbl[new_value].modename, new_value);
+ pr_info(DRV_NAME ": %s: setting xmit hash policy to %s (%d).\n",
+ bond->dev->name,
+ xmit_hashtype_tbl[new_value].modename, new_value);
}
out:
return ret;
}
-static DEVICE_ATTR(xmit_hash_policy, S_IRUGO | S_IWUSR, bonding_show_xmit_hash, bonding_store_xmit_hash);
+static DEVICE_ATTR(xmit_hash_policy, S_IRUGO | S_IWUSR,
+ bonding_show_xmit_hash, bonding_store_xmit_hash);
/*
* Show and set arp_validate.
@@ -489,39 +467,41 @@ static ssize_t bonding_store_arp_validate(struct device *d,
new_value = bond_parse_parm(buf, arp_validate_tbl);
if (new_value < 0) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: Ignoring invalid arp_validate value %s\n",
bond->dev->name, buf);
return -EINVAL;
}
if (new_value && (bond->params.mode != BOND_MODE_ACTIVEBACKUP)) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: arp_validate only supported in active-backup mode.\n",
bond->dev->name);
return -EINVAL;
}
- printk(KERN_INFO DRV_NAME ": %s: setting arp_validate to %s (%d).\n",
+ pr_info(DRV_NAME ": %s: setting arp_validate to %s (%d).\n",
bond->dev->name, arp_validate_tbl[new_value].modename,
new_value);
- if (!bond->params.arp_validate && new_value) {
+ if (!bond->params.arp_validate && new_value)
bond_register_arp(bond);
- } else if (bond->params.arp_validate && !new_value) {
+ else if (bond->params.arp_validate && !new_value)
bond_unregister_arp(bond);
- }
bond->params.arp_validate = new_value;
return count;
}
-static DEVICE_ATTR(arp_validate, S_IRUGO | S_IWUSR, bonding_show_arp_validate, bonding_store_arp_validate);
+static DEVICE_ATTR(arp_validate, S_IRUGO | S_IWUSR, bonding_show_arp_validate,
+ bonding_store_arp_validate);
/*
* Show and store fail_over_mac. User only allowed to change the
* value when there are no slaves.
*/
-static ssize_t bonding_show_fail_over_mac(struct device *d, struct device_attribute *attr, char *buf)
+static ssize_t bonding_show_fail_over_mac(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
{
struct bonding *bond = to_bond(d);
@@ -530,13 +510,15 @@ static ssize_t bonding_show_fail_over_mac(struct device *d, struct device_attrib
bond->params.fail_over_mac);
}
-static ssize_t bonding_store_fail_over_mac(struct device *d, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t bonding_store_fail_over_mac(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int new_value;
struct bonding *bond = to_bond(d);
if (bond->slave_cnt != 0) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: Can't alter fail_over_mac with slaves in bond.\n",
bond->dev->name);
return -EPERM;
@@ -544,21 +526,22 @@ static ssize_t bonding_store_fail_over_mac(struct device *d, struct device_attri
new_value = bond_parse_parm(buf, fail_over_mac_tbl);
if (new_value < 0) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: Ignoring invalid fail_over_mac value %s.\n",
bond->dev->name, buf);
return -EINVAL;
}
bond->params.fail_over_mac = new_value;
- printk(KERN_INFO DRV_NAME ": %s: Setting fail_over_mac to %s (%d).\n",
+ pr_info(DRV_NAME ": %s: Setting fail_over_mac to %s (%d).\n",
bond->dev->name, fail_over_mac_tbl[new_value].modename,
new_value);
return count;
}
-static DEVICE_ATTR(fail_over_mac, S_IRUGO | S_IWUSR, bonding_show_fail_over_mac, bonding_store_fail_over_mac);
+static DEVICE_ATTR(fail_over_mac, S_IRUGO | S_IWUSR,
+ bonding_show_fail_over_mac, bonding_store_fail_over_mac);
/*
* Show and set the arp timer interval. There are two tricky bits
@@ -583,28 +566,28 @@ static ssize_t bonding_store_arp_interval(struct device *d,
struct bonding *bond = to_bond(d);
if (sscanf(buf, "%d", &new_value) != 1) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: no arp_interval value specified.\n",
bond->dev->name);
ret = -EINVAL;
goto out;
}
if (new_value < 0) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: Invalid arp_interval value %d not in range 1-%d; rejected.\n",
bond->dev->name, new_value, INT_MAX);
ret = -EINVAL;
goto out;
}
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: Setting ARP monitoring interval to %d.\n",
bond->dev->name, new_value);
bond->params.arp_interval = new_value;
if (bond->params.arp_interval)
bond->dev->priv_flags |= IFF_MASTER_ARPMON;
if (bond->params.miimon) {
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: ARP monitoring cannot be used with MII monitoring. "
"%s Disabling MII monitoring.\n",
bond->dev->name, bond->dev->name);
@@ -615,7 +598,7 @@ static ssize_t bonding_store_arp_interval(struct device *d,
}
}
if (!bond->params.arp_targets[0]) {
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: ARP monitoring has been set up, "
"but no ARP targets have been specified.\n",
bond->dev->name);
@@ -641,7 +624,8 @@ static ssize_t bonding_store_arp_interval(struct device *d,
out:
return ret;
}
-static DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR , bonding_show_arp_interval, bonding_store_arp_interval);
+static DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR,
+ bonding_show_arp_interval, bonding_store_arp_interval);
/*
* Show and set the arp targets.
@@ -677,7 +661,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
/* look for adds */
if (buf[0] == '+') {
if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: invalid ARP target %pI4 specified for addition\n",
bond->dev->name, &newtarget);
ret = -EINVAL;
@@ -686,14 +670,14 @@ static ssize_t bonding_store_arp_targets(struct device *d,
/* look for an empty slot to put the target in, and check for dupes */
for (i = 0; (i < BOND_MAX_ARP_TARGETS) && !done; i++) {
if (targets[i] == newtarget) { /* duplicate */
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: ARP target %pI4 is already present\n",
bond->dev->name, &newtarget);
ret = -EINVAL;
goto out;
}
if (targets[i] == 0) {
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: adding ARP target %pI4.\n",
bond->dev->name, &newtarget);
done = 1;
@@ -701,17 +685,16 @@ static ssize_t bonding_store_arp_targets(struct device *d,
}
}
if (!done) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: ARP target table is full!\n",
bond->dev->name);
ret = -EINVAL;
goto out;
}
- }
- else if (buf[0] == '-') {
+ } else if (buf[0] == '-') {
if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: invalid ARP target %pI4 specified for removal\n",
bond->dev->name, &newtarget);
ret = -EINVAL;
@@ -721,7 +704,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
for (i = 0; (i < BOND_MAX_ARP_TARGETS) && !done; i++) {
if (targets[i] == newtarget) {
int j;
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: removing ARP target %pI4.\n",
bond->dev->name, &newtarget);
for (j = i; (j < (BOND_MAX_ARP_TARGETS-1)) && targets[j+1]; j++)
@@ -732,15 +715,15 @@ static ssize_t bonding_store_arp_targets(struct device *d,
}
}
if (!done) {
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: unable to remove nonexistent ARP target %pI4.\n",
bond->dev->name, &newtarget);
ret = -EINVAL;
goto out;
}
- }
- else {
- printk(KERN_ERR DRV_NAME ": no command found in arp_ip_targets file for bond %s. Use +<addr> or -<addr>.\n",
+ } else {
+ pr_err(DRV_NAME ": no command found in arp_ip_targets file"
+ " for bond %s. Use +<addr> or -<addr>.\n",
bond->dev->name);
ret = -EPERM;
goto out;
@@ -773,7 +756,7 @@ static ssize_t bonding_store_downdelay(struct device *d,
struct bonding *bond = to_bond(d);
if (!(bond->params.miimon)) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: Unable to set down delay as MII monitoring is disabled\n",
bond->dev->name);
ret = -EPERM;
@@ -781,14 +764,14 @@ static ssize_t bonding_store_downdelay(struct device *d,
}
if (sscanf(buf, "%d", &new_value) != 1) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: no down delay value specified.\n",
bond->dev->name);
ret = -EINVAL;
goto out;
}
if (new_value < 0) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: Invalid down delay value %d not in range %d-%d; rejected.\n",
bond->dev->name, new_value, 1, INT_MAX);
ret = -EINVAL;
@@ -803,15 +786,17 @@ static ssize_t bonding_store_downdelay(struct device *d,
bond->params.miimon);
}
bond->params.downdelay = new_value / bond->params.miimon;
- printk(KERN_INFO DRV_NAME ": %s: Setting down delay to %d.\n",
- bond->dev->name, bond->params.downdelay * bond->params.miimon);
+ pr_info(DRV_NAME ": %s: Setting down delay to %d.\n",
+ bond->dev->name,
+ bond->params.downdelay * bond->params.miimon);
}
out:
return ret;
}
-static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR , bonding_show_downdelay, bonding_store_downdelay);
+static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR,
+ bonding_show_downdelay, bonding_store_downdelay);
static ssize_t bonding_show_updelay(struct device *d,
struct device_attribute *attr,
@@ -831,7 +816,7 @@ static ssize_t bonding_store_updelay(struct device *d,
struct bonding *bond = to_bond(d);
if (!(bond->params.miimon)) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: Unable to set up delay as MII monitoring is disabled\n",
bond->dev->name);
ret = -EPERM;
@@ -839,14 +824,14 @@ static ssize_t bonding_store_updelay(struct device *d,
}
if (sscanf(buf, "%d", &new_value) != 1) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: no up delay value specified.\n",
bond->dev->name);
ret = -EINVAL;
goto out;
}
if (new_value < 0) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: Invalid down delay value %d not in range %d-%d; rejected.\n",
bond->dev->name, new_value, 1, INT_MAX);
ret = -EINVAL;
@@ -861,7 +846,7 @@ static ssize_t bonding_store_updelay(struct device *d,
bond->params.miimon);
}
bond->params.updelay = new_value / bond->params.miimon;
- printk(KERN_INFO DRV_NAME ": %s: Setting up delay to %d.\n",
+ pr_info(DRV_NAME ": %s: Setting up delay to %d.\n",
bond->dev->name, bond->params.updelay * bond->params.miimon);
}
@@ -869,7 +854,8 @@ static ssize_t bonding_store_updelay(struct device *d,
out:
return ret;
}
-static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR , bonding_show_updelay, bonding_store_updelay);
+static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR,
+ bonding_show_updelay, bonding_store_updelay);
/*
* Show and set the LACP interval. Interface must be down, and the mode
@@ -894,7 +880,7 @@ static ssize_t bonding_store_lacp(struct device *d,
struct bonding *bond = to_bond(d);
if (bond->dev->flags & IFF_UP) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: Unable to update LACP rate because interface is up.\n",
bond->dev->name);
ret = -EPERM;
@@ -902,7 +888,7 @@ static ssize_t bonding_store_lacp(struct device *d,
}
if (bond->params.mode != BOND_MODE_8023AD) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: Unable to update LACP rate because bond is not in 802.3ad mode.\n",
bond->dev->name);
ret = -EPERM;
@@ -913,19 +899,20 @@ static ssize_t bonding_store_lacp(struct device *d,
if ((new_value == 1) || (new_value == 0)) {
bond->params.lacp_fast = new_value;
- printk(KERN_INFO DRV_NAME
- ": %s: Setting LACP rate to %s (%d).\n",
- bond->dev->name, bond_lacp_tbl[new_value].modename, new_value);
+ pr_info(DRV_NAME ": %s: Setting LACP rate to %s (%d).\n",
+ bond->dev->name, bond_lacp_tbl[new_value].modename,
+ new_value);
} else {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: Ignoring invalid LACP rate value %.*s.\n",
- bond->dev->name, (int)strlen(buf) - 1, buf);
+ bond->dev->name, (int)strlen(buf) - 1, buf);
ret = -EINVAL;
}
out:
return ret;
}
-static DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR, bonding_show_lacp, bonding_store_lacp);
+static DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR,
+ bonding_show_lacp, bonding_store_lacp);
static ssize_t bonding_show_ad_select(struct device *d,
struct device_attribute *attr,
@@ -947,7 +934,7 @@ static ssize_t bonding_store_ad_select(struct device *d,
struct bonding *bond = to_bond(d);
if (bond->dev->flags & IFF_UP) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: Unable to update ad_select because interface "
"is up.\n", bond->dev->name);
ret = -EPERM;
@@ -958,12 +945,12 @@ static ssize_t bonding_store_ad_select(struct device *d,
if (new_value != -1) {
bond->params.ad_select = new_value;
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: Setting ad_select to %s (%d).\n",
bond->dev->name, ad_select_tbl[new_value].modename,
new_value);
} else {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: Ignoring invalid ad_select value %.*s.\n",
bond->dev->name, (int)strlen(buf) - 1, buf);
ret = -EINVAL;
@@ -971,8 +958,8 @@ static ssize_t bonding_store_ad_select(struct device *d,
out:
return ret;
}
-
-static DEVICE_ATTR(ad_select, S_IRUGO | S_IWUSR, bonding_show_ad_select, bonding_store_ad_select);
+static DEVICE_ATTR(ad_select, S_IRUGO | S_IWUSR,
+ bonding_show_ad_select, bonding_store_ad_select);
/*
* Show and set the number of grat ARP to send after a failover event.
@@ -994,14 +981,14 @@ static ssize_t bonding_store_n_grat_arp(struct device *d,
struct bonding *bond = to_bond(d);
if (sscanf(buf, "%d", &new_value) != 1) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: no num_grat_arp value specified.\n",
bond->dev->name);
ret = -EINVAL;
goto out;
}
if (new_value < 0 || new_value > 255) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: Invalid num_grat_arp value %d not in range 0-255; rejected.\n",
bond->dev->name, new_value);
ret = -EINVAL;
@@ -1012,10 +999,11 @@ static ssize_t bonding_store_n_grat_arp(struct device *d,
out:
return ret;
}
-static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR, bonding_show_n_grat_arp, bonding_store_n_grat_arp);
+static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR,
+ bonding_show_n_grat_arp, bonding_store_n_grat_arp);
/*
- * Show and set the number of unsolicted NA's to send after a failover event.
+ * Show and set the number of unsolicited NA's to send after a failover event.
*/
static ssize_t bonding_show_n_unsol_na(struct device *d,
struct device_attribute *attr,
@@ -1034,25 +1022,26 @@ static ssize_t bonding_store_n_unsol_na(struct device *d,
struct bonding *bond = to_bond(d);
if (sscanf(buf, "%d", &new_value) != 1) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: no num_unsol_na value specified.\n",
bond->dev->name);
ret = -EINVAL;
goto out;
}
+
if (new_value < 0 || new_value > 255) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: Invalid num_unsol_na value %d not in range 0-255; rejected.\n",
bond->dev->name, new_value);
ret = -EINVAL;
goto out;
- } else {
+ } else
bond->params.num_unsol_na = new_value;
- }
out:
return ret;
}
-static DEVICE_ATTR(num_unsol_na, S_IRUGO | S_IWUSR, bonding_show_n_unsol_na, bonding_store_n_unsol_na);
+static DEVICE_ATTR(num_unsol_na, S_IRUGO | S_IWUSR,
+ bonding_show_n_unsol_na, bonding_store_n_unsol_na);
/*
* Show and set the MII monitor interval. There are two tricky bits
@@ -1077,37 +1066,37 @@ static ssize_t bonding_store_miimon(struct device *d,
struct bonding *bond = to_bond(d);
if (sscanf(buf, "%d", &new_value) != 1) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: no miimon value specified.\n",
bond->dev->name);
ret = -EINVAL;
goto out;
}
if (new_value < 0) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: Invalid miimon value %d not in range %d-%d; rejected.\n",
bond->dev->name, new_value, 1, INT_MAX);
ret = -EINVAL;
goto out;
} else {
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: Setting MII monitoring interval to %d.\n",
bond->dev->name, new_value);
bond->params.miimon = new_value;
- if(bond->params.updelay)
- printk(KERN_INFO DRV_NAME
+ if (bond->params.updelay)
+ pr_info(DRV_NAME
": %s: Note: Updating updelay (to %d) "
"since it is a multiple of the miimon value.\n",
bond->dev->name,
bond->params.updelay * bond->params.miimon);
- if(bond->params.downdelay)
- printk(KERN_INFO DRV_NAME
+ if (bond->params.downdelay)
+ pr_info(DRV_NAME
": %s: Note: Updating downdelay (to %d) "
"since it is a multiple of the miimon value.\n",
bond->dev->name,
bond->params.downdelay * bond->params.miimon);
if (bond->params.arp_interval) {
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: MII monitoring cannot be used with "
"ARP monitoring. Disabling ARP monitoring...\n",
bond->dev->name);
@@ -1141,7 +1130,8 @@ static ssize_t bonding_store_miimon(struct device *d,
out:
return ret;
}
-static DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR, bonding_show_miimon, bonding_store_miimon);
+static DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR,
+ bonding_show_miimon, bonding_store_miimon);
/*
* Show and set the primary slave. The store function is much
@@ -1171,12 +1161,13 @@ static ssize_t bonding_store_primary(struct device *d,
struct slave *slave;
struct bonding *bond = to_bond(d);
- rtnl_lock();
+ if (!rtnl_trylock())
+ return restart_syscall();
read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
if (!USES_PRIMARY(bond->params.mode)) {
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: Unable to set primary slave; %s is in mode %d\n",
bond->dev->name, bond->dev->name, bond->params.mode);
} else {
@@ -1184,7 +1175,7 @@ static ssize_t bonding_store_primary(struct device *d,
if (strnicmp
(slave->dev->name, buf,
strlen(slave->dev->name)) == 0) {
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: Setting %s as primary slave.\n",
bond->dev->name, slave->dev->name);
bond->primary_slave = slave;
@@ -1196,13 +1187,13 @@ static ssize_t bonding_store_primary(struct device *d,
/* if we got here, then we didn't match the name of any slave */
if (strlen(buf) == 0 || buf[0] == '\n') {
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: Setting primary slave to None.\n",
bond->dev->name);
bond->primary_slave = NULL;
bond_select_active_slave(bond);
} else {
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: Unable to set %.*s as primary slave as it is not a slave.\n",
bond->dev->name, (int)strlen(buf) - 1, buf);
}
@@ -1214,7 +1205,8 @@ out:
return count;
}
-static DEVICE_ATTR(primary, S_IRUGO | S_IWUSR, bonding_show_primary, bonding_store_primary);
+static DEVICE_ATTR(primary, S_IRUGO | S_IWUSR,
+ bonding_show_primary, bonding_store_primary);
/*
* Show and set the use_carrier flag.
@@ -1237,7 +1229,7 @@ static ssize_t bonding_store_carrier(struct device *d,
if (sscanf(buf, "%d", &new_value) != 1) {
- printk(KERN_ERR DRV_NAME
+ pr_err(DRV_NAME
": %s: no use_carrier value specified.\n",
bond->dev->name);
ret = -EINVAL;
@@ -1245,17 +1237,18 @@ static ssize_t bonding_store_carrier(struct device *d,
}
if ((new_value == 0) || (new_value == 1)) {
bond->params.use_carrier = new_value;
- printk(KERN_INFO DRV_NAME ": %s: Setting use_carrier to %d.\n",
+ pr_info(DRV_NAME ": %s: Setting use_carrier to %d.\n",
bond->dev->name, new_value);
} else {
- printk(KERN_INFO DRV_NAME
+ pr_info(DRV_NAME
": %s: Ignoring invalid use_carrier value %d.\n",
bond->dev->name, new_value);
}
out:
return count;
}
-static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR, bonding_show_carrier, bonding_store_carrier);
+static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR,
+ bonding_show_carrier, bonding_store_carrier);
/*
@@ -1284,19 +1277,20 @@ static ssize_t bonding_store_active_slave(struct device *d,
{
int i;
struct slave *slave;
- struct slave *old_active = NULL;
- struct slave *new_active = NULL;
+ struct slave *old_active = NULL;
+ struct slave *new_active = NULL;
struct bonding *bond = to_bond(d);
- rtnl_lock();
+ if (!rtnl_trylock())
+ return restart_syscall();
read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
- if (!USES_PRIMARY(bond->params.mode)) {
- printk(KERN_INFO DRV_NAME
- ": %s: Unable to change active slave; %s is in mode %d\n",
- bond->dev->name, bond->dev->name, bond->params.mode);
- } else {
+ if (!USES_PRIMARY(bond->params.mode))
+ pr_info(DRV_NAME ": %s: Unable to change active slave;"
+ " %s is in mode %d\n",
+ bond->dev->name, bond->dev->name, bond->params.mode);
+ else {
bond_for_each_slave(bond, slave, i) {
if (strnicmp
(slave->dev->name, buf,
@@ -1335,18 +1329,18 @@ static ssize_t bonding_store_active_slave(struct device *d,
/* if we got here, then we didn't match the name of any slave */
if (strlen(buf) == 0 || buf[0] == '\n') {
- printk(KERN_INFO DRV_NAME
- ": %s: Setting active slave to None.\n",
- bond->dev->name);
+ pr_info(DRV_NAME
+ ": %s: Setting active slave to None.\n",
+ bond->dev->name);
bond->primary_slave = NULL;
- bond_select_active_slave(bond);
+ bond_select_active_slave(bond);
} else {
- printk(KERN_INFO DRV_NAME
- ": %s: Unable to set %.*s as active slave as it is not a slave.\n",
- bond->dev->name, (int)strlen(buf) - 1, buf);
+ pr_info(DRV_NAME ": %s: Unable to set %.*s"
+ " as active slave as it is not a slave.\n",
+ bond->dev->name, (int)strlen(buf) - 1, buf);
}
}
-out:
+ out:
write_unlock_bh(&bond->curr_slave_lock);
read_unlock(&bond->lock);
rtnl_unlock();
@@ -1354,7 +1348,8 @@ out:
return count;
}
-static DEVICE_ATTR(active_slave, S_IRUGO | S_IWUSR, bonding_show_active_slave, bonding_store_active_slave);
+static DEVICE_ATTR(active_slave, S_IRUGO | S_IWUSR,
+ bonding_show_active_slave, bonding_store_active_slave);
/*
@@ -1371,7 +1366,7 @@ static ssize_t bonding_show_mii_status(struct device *d,
curr = bond->curr_active_slave;
read_unlock(&bond->curr_slave_lock);
- return sprintf(buf, "%s\n", (curr) ? "up" : "down");
+ return sprintf(buf, "%s\n", curr ? "up" : "down");
}
static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL);
@@ -1388,7 +1383,9 @@ static ssize_t bonding_show_ad_aggregator(struct device *d,
if (bond->params.mode == BOND_MODE_8023AD) {
struct ad_info ad_info;
- count = sprintf(buf, "%d\n", (bond_3ad_get_active_agg_info(bond, &ad_info)) ? 0 : ad_info.aggregator_id);
+ count = sprintf(buf, "%d\n",
+ (bond_3ad_get_active_agg_info(bond, &ad_info))
+ ? 0 : ad_info.aggregator_id);
}
return count;
@@ -1408,7 +1405,9 @@ static ssize_t bonding_show_ad_num_ports(struct device *d,
if (bond->params.mode == BOND_MODE_8023AD) {
struct ad_info ad_info;
- count = sprintf(buf, "%d\n", (bond_3ad_get_active_agg_info(bond, &ad_info)) ? 0: ad_info.ports);
+ count = sprintf(buf, "%d\n",
+ (bond_3ad_get_active_agg_info(bond, &ad_info))
+ ? 0 : ad_info.ports);
}
return count;
@@ -1428,7 +1427,9 @@ static ssize_t bonding_show_ad_actor_key(struct device *d,
if (bond->params.mode == BOND_MODE_8023AD) {
struct ad_info ad_info;
- count = sprintf(buf, "%d\n", (bond_3ad_get_active_agg_info(bond, &ad_info)) ? 0 : ad_info.actor_key);
+ count = sprintf(buf, "%d\n",
+ (bond_3ad_get_active_agg_info(bond, &ad_info))
+ ? 0 : ad_info.actor_key);
}
return count;
@@ -1448,7 +1449,9 @@ static ssize_t bonding_show_ad_partner_key(struct device *d,
if (bond->params.mode == BOND_MODE_8023AD) {
struct ad_info ad_info;
- count = sprintf(buf, "%d\n", (bond_3ad_get_active_agg_info(bond, &ad_info)) ? 0 : ad_info.partner_key);
+ count = sprintf(buf, "%d\n",
+ (bond_3ad_get_active_agg_info(bond, &ad_info))
+ ? 0 : ad_info.partner_key);
}
return count;
@@ -1468,9 +1471,8 @@ static ssize_t bonding_show_ad_partner_mac(struct device *d,
if (bond->params.mode == BOND_MODE_8023AD) {
struct ad_info ad_info;
- if (!bond_3ad_get_active_agg_info(bond, &ad_info)) {
+ if (!bond_3ad_get_active_agg_info(bond, &ad_info))
count = sprintf(buf, "%pM\n", ad_info.partner_system);
- }
}
return count;
@@ -1538,6 +1540,7 @@ int bond_create_sysfs(void)
printk(KERN_ERR
"network device named %s already exists in sysfs",
class_attr_bonding_masters.attr.name);
+ ret = 0;
}
return ret;
@@ -1562,12 +1565,8 @@ int bond_create_sysfs_entry(struct bonding *bond)
int err;
err = sysfs_create_group(&(dev->dev.kobj), &bonding_group);
- if (err) {
+ if (err)
printk(KERN_EMERG "eek! didn't create group!\n");
- }
-
- if (expected_refcount < 1)
- expected_refcount = atomic_read(&bond->dev->dev.kobj.kref.refcount);
return err;
}
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index ca849d2adf9..6290a502742 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -286,8 +286,7 @@ static inline unsigned long slave_last_rx(struct bonding *bond,
static inline void bond_set_slave_inactive_flags(struct slave *slave)
{
struct bonding *bond = netdev_priv(slave->dev->master);
- if (bond->params.mode != BOND_MODE_TLB &&
- bond->params.mode != BOND_MODE_ALB)
+ if (!bond_is_lb(bond))
slave->state = BOND_STATE_BACKUP;
slave->dev->priv_flags |= IFF_SLAVE_INACTIVE;
if (slave_do_arp_validate(bond, slave))
@@ -322,8 +321,7 @@ static inline void bond_unset_master_alb_flags(struct bonding *bond)
struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
-int bond_create(char *name, struct bond_params *params);
-void bond_destroy(struct bonding *bond);
+int bond_create(const char *name);
int bond_release_and_destroy(struct net_device *bond_dev, struct net_device *slave_dev);
int bond_create_sysfs(void);
void bond_destroy_sysfs(void);
@@ -350,12 +348,8 @@ extern const struct bond_parm_tbl bond_mode_tbl[];
extern const struct bond_parm_tbl xmit_hashtype_tbl[];
extern const struct bond_parm_tbl arp_validate_tbl[];
extern const struct bond_parm_tbl fail_over_mac_tbl[];
-extern struct bond_params bonding_defaults;
extern struct bond_parm_tbl ad_select_tbl[];
-/* exported from bond_sysfs.c */
-extern struct rw_semaphore bonding_rwsem;
-
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
void bond_send_unsolicited_na(struct bonding *bond);
void bond_register_ipv6_notifier(void);
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 57def0d5737..d5e18812bf4 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -12,6 +12,68 @@ config CAN_VCAN
This driver can also be built as a module. If so, the module
will be called vcan.
+config CAN_DEV
+ tristate "Platform CAN drivers with Netlink support"
+ depends on CAN
+ default Y
+ ---help---
+ Enables the common framework for platform CAN drivers with Netlink
+ support. This is the standard library for CAN drivers.
+ If unsure, say Y.
+
+config CAN_CALC_BITTIMING
+ bool "CAN bit-timing calculation"
+ depends on CAN_DEV
+ default Y
+ ---help---
+ If enabled, CAN bit-timing parameters will be calculated for the
+ bit-rate specified via Netlink argument "bitrate" when the device
+ get started. This works fine for the most common CAN controllers
+ with standard bit-rates but may fail for exotic bit-rates or CAN
+ source clock frequencies. Disabling saves some space, but then the
+ bit-timing parameters must be specified directly using the Netlink
+ arguments "tq", "prop_seg", "phase_seg1", "phase_seg2" and "sjw".
+ If unsure, say Y.
+
+config CAN_SJA1000
+ depends on CAN_DEV
+ tristate "Philips SJA1000"
+ ---help---
+ Driver for the SJA1000 CAN controllers from Philips or NXP
+
+config CAN_SJA1000_PLATFORM
+ depends on CAN_SJA1000
+ tristate "Generic Platform Bus based SJA1000 driver"
+ ---help---
+ This driver adds support for the SJA1000 chips connected to
+ the "platform bus" (Linux abstraction for directly to the
+ processor attached devices). Which can be found on various
+ boards from Phytec (http://www.phytec.de) like the PCM027,
+ PCM038.
+
+config CAN_SJA1000_OF_PLATFORM
+ depends on CAN_SJA1000 && PPC_OF
+ tristate "Generic OF Platform Bus based SJA1000 driver"
+ ---help---
+ This driver adds support for the SJA1000 chips connected to
+ the OpenFirmware "platform bus" found on embedded systems with
+ OpenFirmware bindings, e.g. if you have a PowerPC based system
+ you may want to enable this option.
+
+config CAN_EMS_PCI
+ tristate "EMS CPC-PCI and CPC-PCIe Card"
+ depends on PCI && CAN_SJA1000
+ ---help---
+ This driver is for the one or two channel CPC-PCI and CPC-PCIe
+ cards from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
+
+config CAN_KVASER_PCI
+ tristate "Kvaser PCIcanx and Kvaser PCIcan PCI Cards"
+ depends on PCI && CAN_SJA1000
+ ---help---
+ This driver is for the the PCIcanx and PCIcan cards (1, 2 or
+ 4 channel) from Kvaser (http://www.kvaser.com).
+
config CAN_DEBUG_DEVICES
bool "CAN devices debugging messages"
depends on CAN
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index c4bead705cd..523a941b358 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -3,3 +3,10 @@
#
obj-$(CONFIG_CAN_VCAN) += vcan.o
+
+obj-$(CONFIG_CAN_DEV) += can-dev.o
+can-dev-y := dev.o
+
+obj-$(CONFIG_CAN_SJA1000) += sja1000/
+
+ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
new file mode 100644
index 00000000000..574daddc21b
--- /dev/null
+++ b/drivers/net/can/dev.c
@@ -0,0 +1,657 @@
+/*
+ * Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
+ * Copyright (C) 2006 Andrey Volkov, Varma Electronics
+ * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/netlink.h>
+#include <net/rtnetlink.h>
+
+#define MOD_DESC "CAN device driver interface"
+
+MODULE_DESCRIPTION(MOD_DESC);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+
+#ifdef CONFIG_CAN_CALC_BITTIMING
+#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
+
+/*
+ * Bit-timing calculation derived from:
+ *
+ * Code based on LinCAN sources and H8S2638 project
+ * Copyright 2004-2006 Pavel Pisa - DCE FELK CVUT cz
+ * Copyright 2005 Stanislav Marek
+ * email: pisa@cmp.felk.cvut.cz
+ *
+ * Calculates proper bit-timing parameters for a specified bit-rate
+ * and sample-point, which can then be used to set the bit-timing
+ * registers of the CAN controller. You can find more information
+ * in the header file linux/can/netlink.h.
+ */
+static int can_update_spt(const struct can_bittiming_const *btc,
+ int sampl_pt, int tseg, int *tseg1, int *tseg2)
+{
+ *tseg2 = tseg + 1 - (sampl_pt * (tseg + 1)) / 1000;
+ if (*tseg2 < btc->tseg2_min)
+ *tseg2 = btc->tseg2_min;
+ if (*tseg2 > btc->tseg2_max)
+ *tseg2 = btc->tseg2_max;
+ *tseg1 = tseg - *tseg2;
+ if (*tseg1 > btc->tseg1_max) {
+ *tseg1 = btc->tseg1_max;
+ *tseg2 = tseg - *tseg1;
+ }
+ return 1000 * (tseg + 1 - *tseg2) / (tseg + 1);
+}
+
+static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ const struct can_bittiming_const *btc = priv->bittiming_const;
+ long rate, best_rate = 0;
+ long best_error = 1000000000, error = 0;
+ int best_tseg = 0, best_brp = 0, brp = 0;
+ int tsegall, tseg = 0, tseg1 = 0, tseg2 = 0;
+ int spt_error = 1000, spt = 0, sampl_pt;
+ u64 v64;
+
+ if (!priv->bittiming_const)
+ return -ENOTSUPP;
+
+ /* Use CIA recommended sample points */
+ if (bt->sample_point) {
+ sampl_pt = bt->sample_point;
+ } else {
+ if (bt->bitrate > 800000)
+ sampl_pt = 750;
+ else if (bt->bitrate > 500000)
+ sampl_pt = 800;
+ else
+ sampl_pt = 875;
+ }
+
+ /* tseg even = round down, odd = round up */
+ for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
+ tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
+ tsegall = 1 + tseg / 2;
+ /* Compute all possible tseg choices (tseg=tseg1+tseg2) */
+ brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2;
+ /* chose brp step which is possible in system */
+ brp = (brp / btc->brp_inc) * btc->brp_inc;
+ if ((brp < btc->brp_min) || (brp > btc->brp_max))
+ continue;
+ rate = priv->clock.freq / (brp * tsegall);
+ error = bt->bitrate - rate;
+ /* tseg brp biterror */
+ if (error < 0)
+ error = -error;
+ if (error > best_error)
+ continue;
+ best_error = error;
+ if (error == 0) {
+ spt = can_update_spt(btc, sampl_pt, tseg / 2,
+ &tseg1, &tseg2);
+ error = sampl_pt - spt;
+ if (error < 0)
+ error = -error;
+ if (error > spt_error)
+ continue;
+ spt_error = error;
+ }
+ best_tseg = tseg / 2;
+ best_brp = brp;
+ best_rate = rate;
+ if (error == 0)
+ break;
+ }
+
+ if (best_error) {
+ /* Error in one-tenth of a percent */
+ error = (best_error * 1000) / bt->bitrate;
+ if (error > CAN_CALC_MAX_ERROR) {
+ dev_err(dev->dev.parent,
+ "bitrate error %ld.%ld%% too high\n",
+ error / 10, error % 10);
+ return -EDOM;
+ } else {
+ dev_warn(dev->dev.parent, "bitrate error %ld.%ld%%\n",
+ error / 10, error % 10);
+ }
+ }
+
+ /* real sample point */
+ bt->sample_point = can_update_spt(btc, sampl_pt, best_tseg,
+ &tseg1, &tseg2);
+
+ v64 = (u64)best_brp * 1000000000UL;
+ do_div(v64, priv->clock.freq);
+ bt->tq = (u32)v64;
+ bt->prop_seg = tseg1 / 2;
+ bt->phase_seg1 = tseg1 - bt->prop_seg;
+ bt->phase_seg2 = tseg2;
+ bt->sjw = 1;
+ bt->brp = best_brp;
+ /* real bit-rate */
+ bt->bitrate = priv->clock.freq / (bt->brp * (tseg1 + tseg2 + 1));
+
+ return 0;
+}
+#else /* !CONFIG_CAN_CALC_BITTIMING */
+static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt)
+{
+ dev_err(dev->dev.parent, "bit-timing calculation not available\n");
+ return -EINVAL;
+}
+#endif /* CONFIG_CAN_CALC_BITTIMING */
+
+/*
+ * Checks the validity of the specified bit-timing parameters prop_seg,
+ * phase_seg1, phase_seg2 and sjw and tries to determine the bitrate
+ * prescaler value brp. You can find more information in the header
+ * file linux/can/netlink.h.
+ */
+static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ const struct can_bittiming_const *btc = priv->bittiming_const;
+ int tseg1, alltseg;
+ u64 brp64;
+
+ if (!priv->bittiming_const)
+ return -ENOTSUPP;
+
+ tseg1 = bt->prop_seg + bt->phase_seg1;
+ if (!bt->sjw)
+ bt->sjw = 1;
+ if (bt->sjw > btc->sjw_max ||
+ tseg1 < btc->tseg1_min || tseg1 > btc->tseg1_max ||
+ bt->phase_seg2 < btc->tseg2_min || bt->phase_seg2 > btc->tseg2_max)
+ return -ERANGE;
+
+ brp64 = (u64)priv->clock.freq * (u64)bt->tq;
+ if (btc->brp_inc > 1)
+ do_div(brp64, btc->brp_inc);
+ brp64 += 500000000UL - 1;
+ do_div(brp64, 1000000000UL); /* the practicable BRP */
+ if (btc->brp_inc > 1)
+ brp64 *= btc->brp_inc;
+ bt->brp = (u32)brp64;
+
+ if (bt->brp < btc->brp_min || bt->brp > btc->brp_max)
+ return -EINVAL;
+
+ alltseg = bt->prop_seg + bt->phase_seg1 + bt->phase_seg2 + 1;
+ bt->bitrate = priv->clock.freq / (bt->brp * alltseg);
+ bt->sample_point = ((tseg1 + 1) * 1000) / alltseg;
+
+ return 0;
+}
+
+int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ int err;
+
+ /* Check if the CAN device has bit-timing parameters */
+ if (priv->bittiming_const) {
+
+ /* Non-expert mode? Check if the bitrate has been pre-defined */
+ if (!bt->tq)
+ /* Determine bit-timing parameters */
+ err = can_calc_bittiming(dev, bt);
+ else
+ /* Check bit-timing params and calculate proper brp */
+ err = can_fixup_bittiming(dev, bt);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/*
+ * Local echo of CAN messages
+ *
+ * CAN network devices *should* support a local echo functionality
+ * (see Documentation/networking/can.txt). To test the handling of CAN
+ * interfaces that do not support the local echo both driver types are
+ * implemented. In the case that the driver does not support the echo
+ * the IFF_ECHO remains clear in dev->flags. This causes the PF_CAN core
+ * to perform the echo as a fallback solution.
+ */
+static void can_flush_echo_skb(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ int i;
+
+ for (i = 0; i < CAN_ECHO_SKB_MAX; i++) {
+ if (priv->echo_skb[i]) {
+ kfree_skb(priv->echo_skb[i]);
+ priv->echo_skb[i] = NULL;
+ stats->tx_dropped++;
+ stats->tx_aborted_errors++;
+ }
+ }
+}
+
+/*
+ * Put the skb on the stack to be looped backed locally lateron
+ *
+ * The function is typically called in the start_xmit function
+ * of the device driver. The driver must protect access to
+ * priv->echo_skb, if necessary.
+ */
+void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, int idx)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ /* check flag whether this packet has to be looped back */
+ if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK) {
+ kfree_skb(skb);
+ return;
+ }
+
+ if (!priv->echo_skb[idx]) {
+ struct sock *srcsk = skb->sk;
+
+ if (atomic_read(&skb->users) != 1) {
+ struct sk_buff *old_skb = skb;
+
+ skb = skb_clone(old_skb, GFP_ATOMIC);
+ kfree_skb(old_skb);
+ if (!skb)
+ return;
+ } else
+ skb_orphan(skb);
+
+ skb->sk = srcsk;
+
+ /* make settings for echo to reduce code in irq context */
+ skb->protocol = htons(ETH_P_CAN);
+ skb->pkt_type = PACKET_BROADCAST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->dev = dev;
+
+ /* save this skb for tx interrupt echo handling */
+ priv->echo_skb[idx] = skb;
+ } else {
+ /* locking problem with netif_stop_queue() ?? */
+ dev_err(dev->dev.parent, "%s: BUG! echo_skb is occupied!\n",
+ __func__);
+ kfree_skb(skb);
+ }
+}
+EXPORT_SYMBOL_GPL(can_put_echo_skb);
+
+/*
+ * Get the skb from the stack and loop it back locally
+ *
+ * The function is typically called when the TX done interrupt
+ * is handled in the device driver. The driver must protect
+ * access to priv->echo_skb, if necessary.
+ */
+void can_get_echo_skb(struct net_device *dev, int idx)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if ((dev->flags & IFF_ECHO) && priv->echo_skb[idx]) {
+ netif_rx(priv->echo_skb[idx]);
+ priv->echo_skb[idx] = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(can_get_echo_skb);
+
+/*
+ * CAN device restart for bus-off recovery
+ */
+void can_restart(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct can_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ int err;
+
+ BUG_ON(netif_carrier_ok(dev));
+
+ /*
+ * No synchronization needed because the device is bus-off and
+ * no messages can come in or go out.
+ */
+ can_flush_echo_skb(dev);
+
+ /* send restart message upstream */
+ skb = dev_alloc_skb(sizeof(struct can_frame));
+ if (skb == NULL) {
+ err = -ENOMEM;
+ goto out;
+ }
+ skb->dev = dev;
+ skb->protocol = htons(ETH_P_CAN);
+ cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
+ memset(cf, 0, sizeof(struct can_frame));
+ cf->can_id = CAN_ERR_FLAG | CAN_ERR_RESTARTED;
+ cf->can_dlc = CAN_ERR_DLC;
+
+ netif_rx(skb);
+
+ dev->last_rx = jiffies;
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+
+ dev_dbg(dev->dev.parent, "restarted\n");
+ priv->can_stats.restarts++;
+
+ /* Now restart the device */
+ err = priv->do_set_mode(dev, CAN_MODE_START);
+
+out:
+ netif_carrier_on(dev);
+ if (err)
+ dev_err(dev->dev.parent, "Error %d during restart", err);
+}
+
+int can_restart_now(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ /*
+ * A manual restart is only permitted if automatic restart is
+ * disabled and the device is in the bus-off state
+ */
+ if (priv->restart_ms)
+ return -EINVAL;
+ if (priv->state != CAN_STATE_BUS_OFF)
+ return -EBUSY;
+
+ /* Runs as soon as possible in the timer context */
+ mod_timer(&priv->restart_timer, jiffies);
+
+ return 0;
+}
+
+/*
+ * CAN bus-off
+ *
+ * This functions should be called when the device goes bus-off to
+ * tell the netif layer that no more packets can be sent or received.
+ * If enabled, a timer is started to trigger bus-off recovery.
+ */
+void can_bus_off(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ dev_dbg(dev->dev.parent, "bus-off\n");
+
+ netif_carrier_off(dev);
+ priv->can_stats.bus_off++;
+
+ if (priv->restart_ms)
+ mod_timer(&priv->restart_timer,
+ jiffies + (priv->restart_ms * HZ) / 1000);
+}
+EXPORT_SYMBOL_GPL(can_bus_off);
+
+static void can_setup(struct net_device *dev)
+{
+ dev->type = ARPHRD_CAN;
+ dev->mtu = sizeof(struct can_frame);
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->tx_queue_len = 10;
+
+ /* New-style flags. */
+ dev->flags = IFF_NOARP;
+ dev->features = NETIF_F_NO_CSUM;
+}
+
+/*
+ * Allocate and setup space for the CAN network device
+ */
+struct net_device *alloc_candev(int sizeof_priv)
+{
+ struct net_device *dev;
+ struct can_priv *priv;
+
+ dev = alloc_netdev(sizeof_priv, "can%d", can_setup);
+ if (!dev)
+ return NULL;
+
+ priv = netdev_priv(dev);
+
+ priv->state = CAN_STATE_STOPPED;
+
+ init_timer(&priv->restart_timer);
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(alloc_candev);
+
+/*
+ * Free space of the CAN network device
+ */
+void free_candev(struct net_device *dev)
+{
+ free_netdev(dev);
+}
+EXPORT_SYMBOL_GPL(free_candev);
+
+/*
+ * Common open function when the device gets opened.
+ *
+ * This function should be called in the open function of the device
+ * driver.
+ */
+int open_candev(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (!priv->bittiming.tq && !priv->bittiming.bitrate) {
+ dev_err(dev->dev.parent, "bit-timing not yet defined\n");
+ return -EINVAL;
+ }
+
+ setup_timer(&priv->restart_timer, can_restart, (unsigned long)dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(open_candev);
+
+/*
+ * Common close function for cleanup before the device gets closed.
+ *
+ * This function should be called in the close function of the device
+ * driver.
+ */
+void close_candev(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (del_timer_sync(&priv->restart_timer))
+ dev_put(dev);
+ can_flush_echo_skb(dev);
+}
+EXPORT_SYMBOL_GPL(close_candev);
+
+/*
+ * CAN netlink interface
+ */
+static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
+ [IFLA_CAN_STATE] = { .type = NLA_U32 },
+ [IFLA_CAN_CTRLMODE] = { .len = sizeof(struct can_ctrlmode) },
+ [IFLA_CAN_RESTART_MS] = { .type = NLA_U32 },
+ [IFLA_CAN_RESTART] = { .type = NLA_U32 },
+ [IFLA_CAN_BITTIMING] = { .len = sizeof(struct can_bittiming) },
+ [IFLA_CAN_BITTIMING_CONST]
+ = { .len = sizeof(struct can_bittiming_const) },
+ [IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) },
+};
+
+static int can_changelink(struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ struct can_priv *priv = netdev_priv(dev);
+ int err;
+
+ /* We need synchronization with dev->stop() */
+ ASSERT_RTNL();
+
+ if (data[IFLA_CAN_CTRLMODE]) {
+ struct can_ctrlmode *cm;
+
+ /* Do not allow changing controller mode while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+ cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+ priv->ctrlmode &= ~cm->mask;
+ priv->ctrlmode |= cm->flags;
+ }
+
+ if (data[IFLA_CAN_BITTIMING]) {
+ struct can_bittiming bt;
+
+ /* Do not allow changing bittiming while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+ memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
+ if ((!bt.bitrate && !bt.tq) || (bt.bitrate && bt.tq))
+ return -EINVAL;
+ err = can_get_bittiming(dev, &bt);
+ if (err)
+ return err;
+ memcpy(&priv->bittiming, &bt, sizeof(bt));
+
+ if (priv->do_set_bittiming) {
+ /* Finally, set the bit-timing registers */
+ err = priv->do_set_bittiming(dev);
+ if (err)
+ return err;
+ }
+ }
+
+ if (data[IFLA_CAN_RESTART_MS]) {
+ /* Do not allow changing restart delay while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+ priv->restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]);
+ }
+
+ if (data[IFLA_CAN_RESTART]) {
+ /* Do not allow a restart while not running */
+ if (!(dev->flags & IFF_UP))
+ return -EINVAL;
+ err = can_restart_now(dev);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ struct can_ctrlmode cm = {.flags = priv->ctrlmode};
+ enum can_state state = priv->state;
+
+ if (priv->do_get_state)
+ priv->do_get_state(dev, &state);
+ NLA_PUT_U32(skb, IFLA_CAN_STATE, state);
+ NLA_PUT(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm);
+ NLA_PUT_U32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms);
+ NLA_PUT(skb, IFLA_CAN_BITTIMING,
+ sizeof(priv->bittiming), &priv->bittiming);
+ NLA_PUT(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock);
+ if (priv->bittiming_const)
+ NLA_PUT(skb, IFLA_CAN_BITTIMING_CONST,
+ sizeof(*priv->bittiming_const), priv->bittiming_const);
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ NLA_PUT(skb, IFLA_INFO_XSTATS,
+ sizeof(priv->can_stats), &priv->can_stats);
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static struct rtnl_link_ops can_link_ops __read_mostly = {
+ .kind = "can",
+ .maxtype = IFLA_CAN_MAX,
+ .policy = can_policy,
+ .setup = can_setup,
+ .changelink = can_changelink,
+ .fill_info = can_fill_info,
+ .fill_xstats = can_fill_xstats,
+};
+
+/*
+ * Register the CAN network device
+ */
+int register_candev(struct net_device *dev)
+{
+ dev->rtnl_link_ops = &can_link_ops;
+ return register_netdev(dev);
+}
+EXPORT_SYMBOL_GPL(register_candev);
+
+/*
+ * Unregister the CAN network device
+ */
+void unregister_candev(struct net_device *dev)
+{
+ unregister_netdev(dev);
+}
+EXPORT_SYMBOL_GPL(unregister_candev);
+
+static __init int can_dev_init(void)
+{
+ int err;
+
+ err = rtnl_link_register(&can_link_ops);
+ if (!err)
+ printk(KERN_INFO MOD_DESC "\n");
+
+ return err;
+}
+module_init(can_dev_init);
+
+static __exit void can_dev_exit(void)
+{
+ rtnl_link_unregister(&can_link_ops);
+}
+module_exit(can_dev_exit);
+
+MODULE_ALIAS_RTNL_LINK("can");
diff --git a/drivers/net/can/sja1000/Makefile b/drivers/net/can/sja1000/Makefile
new file mode 100644
index 00000000000..9d0c08da273
--- /dev/null
+++ b/drivers/net/can/sja1000/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for the SJA1000 CAN controller drivers.
+#
+
+obj-$(CONFIG_CAN_SJA1000) += sja1000.o
+obj-$(CONFIG_CAN_SJA1000_PLATFORM) += sja1000_platform.o
+obj-$(CONFIG_CAN_SJA1000_OF_PLATFORM) += sja1000_of_platform.o
+obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o
+obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o
+
+ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
new file mode 100644
index 00000000000..121b64101d7
--- /dev/null
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -0,0 +1,320 @@
+/*
+ * Copyright (C) 2007 Wolfgang Grandegger <wg@grandegger.com>
+ * Copyright (C) 2008 Markus Plessing <plessing@ems-wuensche.com>
+ * Copyright (C) 2008 Sebastian Haas <haas@ems-wuensche.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/io.h>
+
+#include "sja1000.h"
+
+#define DRV_NAME "ems_pci"
+
+MODULE_AUTHOR("Sebastian Haas <haas@ems-wuenche.com>");
+MODULE_DESCRIPTION("Socket-CAN driver for EMS CPC-PCI/PCIe CAN cards");
+MODULE_SUPPORTED_DEVICE("EMS CPC-PCI/PCIe CAN card");
+MODULE_LICENSE("GPL v2");
+
+#define EMS_PCI_MAX_CHAN 2
+
+struct ems_pci_card {
+ int channels;
+
+ struct pci_dev *pci_dev;
+ struct net_device *net_dev[EMS_PCI_MAX_CHAN];
+
+ void __iomem *conf_addr;
+ void __iomem *base_addr;
+};
+
+#define EMS_PCI_CAN_CLOCK (16000000 / 2)
+
+/*
+ * Register definitions and descriptions are from LinCAN 0.3.3.
+ *
+ * PSB4610 PITA-2 bridge control registers
+ */
+#define PITA2_ICR 0x00 /* Interrupt Control Register */
+#define PITA2_ICR_INT0 0x00000002 /* [RC] INT0 Active/Clear */
+#define PITA2_ICR_INT0_EN 0x00020000 /* [RW] Enable INT0 */
+
+#define PITA2_MISC 0x1c /* Miscellaneous Register */
+#define PITA2_MISC_CONFIG 0x04000000 /* Multiplexed parallel interface */
+
+/*
+ * The board configuration is probably following:
+ * RX1 is connected to ground.
+ * TX1 is not connected.
+ * CLKO is not connected.
+ * Setting the OCR register to 0xDA is a good idea.
+ * This means normal output mode , push-pull and the correct polarity.
+ */
+#define EMS_PCI_OCR (OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL)
+
+/*
+ * In the CDR register, you should set CBP to 1.
+ * You will probably also want to set the clock divider value to 7
+ * (meaning direct oscillator output) because the second SJA1000 chip
+ * is driven by the first one CLKOUT output.
+ */
+#define EMS_PCI_CDR (CDR_CBP | CDR_CLKOUT_MASK)
+#define EMS_PCI_MEM_SIZE 4096 /* Size of the remapped io-memory */
+#define EMS_PCI_CAN_BASE_OFFSET 0x400 /* offset where the controllers starts */
+#define EMS_PCI_CAN_CTRL_SIZE 0x200 /* memory size for each controller */
+
+#define EMS_PCI_PORT_BYTES 0x4 /* Each register occupies 4 bytes */
+
+#define EMS_PCI_VENDOR_ID 0x110a /* PCI device and vendor ID */
+#define EMS_PCI_DEVICE_ID 0x2104
+
+static struct pci_device_id ems_pci_tbl[] = {
+ {EMS_PCI_VENDOR_ID, EMS_PCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, ems_pci_tbl);
+
+/*
+ * Helper to read internal registers from card logic (not CAN)
+ */
+static u8 ems_pci_readb(struct ems_pci_card *card, unsigned int port)
+{
+ return readb(card->base_addr + (port * EMS_PCI_PORT_BYTES));
+}
+
+static u8 ems_pci_read_reg(const struct sja1000_priv *priv, int port)
+{
+ return readb(priv->reg_base + (port * EMS_PCI_PORT_BYTES));
+}
+
+static void ems_pci_write_reg(const struct sja1000_priv *priv, int port, u8 val)
+{
+ writeb(val, priv->reg_base + (port * EMS_PCI_PORT_BYTES));
+}
+
+static void ems_pci_post_irq(const struct sja1000_priv *priv)
+{
+ struct ems_pci_card *card = (struct ems_pci_card *)priv->priv;
+
+ /* reset int flag of pita */
+ writel(PITA2_ICR_INT0_EN | PITA2_ICR_INT0, card->conf_addr
+ + PITA2_ICR);
+}
+
+/*
+ * Check if a CAN controller is present at the specified location
+ * by trying to set 'em into the PeliCAN mode
+ */
+static inline int ems_pci_check_chan(const struct sja1000_priv *priv)
+{
+ unsigned char res;
+
+ /* Make sure SJA1000 is in reset mode */
+ ems_pci_write_reg(priv, REG_MOD, 1);
+
+ ems_pci_write_reg(priv, REG_CDR, CDR_PELICAN);
+
+ /* read reset-values */
+ res = ems_pci_read_reg(priv, REG_CDR);
+
+ if (res == CDR_PELICAN)
+ return 1;
+
+ return 0;
+}
+
+static void ems_pci_del_card(struct pci_dev *pdev)
+{
+ struct ems_pci_card *card = pci_get_drvdata(pdev);
+ struct net_device *dev;
+ int i = 0;
+
+ for (i = 0; i < card->channels; i++) {
+ dev = card->net_dev[i];
+
+ if (!dev)
+ continue;
+
+ dev_info(&pdev->dev, "Removing %s.\n", dev->name);
+ unregister_sja1000dev(dev);
+ free_sja1000dev(dev);
+ }
+
+ if (card->base_addr != NULL)
+ pci_iounmap(card->pci_dev, card->base_addr);
+
+ if (card->conf_addr != NULL)
+ pci_iounmap(card->pci_dev, card->conf_addr);
+
+ kfree(card);
+
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static void ems_pci_card_reset(struct ems_pci_card *card)
+{
+ /* Request board reset */
+ writeb(0, card->base_addr);
+}
+
+/*
+ * Probe PCI device for EMS CAN signature and register each available
+ * CAN channel to SJA1000 Socket-CAN subsystem.
+ */
+static int __devinit ems_pci_add_card(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct sja1000_priv *priv;
+ struct net_device *dev;
+ struct ems_pci_card *card;
+ int err, i;
+
+ /* Enabling PCI device */
+ if (pci_enable_device(pdev) < 0) {
+ dev_err(&pdev->dev, "Enabling PCI device failed\n");
+ return -ENODEV;
+ }
+
+ /* Allocating card structures to hold addresses, ... */
+ card = kzalloc(sizeof(struct ems_pci_card), GFP_KERNEL);
+ if (card == NULL) {
+ dev_err(&pdev->dev, "Unable to allocate memory\n");
+ pci_disable_device(pdev);
+ return -ENOMEM;
+ }
+
+ pci_set_drvdata(pdev, card);
+
+ card->pci_dev = pdev;
+
+ card->channels = 0;
+
+ /* Remap PITA configuration space, and controller memory area */
+ card->conf_addr = pci_iomap(pdev, 0, EMS_PCI_MEM_SIZE);
+ if (card->conf_addr == NULL) {
+ err = -ENOMEM;
+ goto failure_cleanup;
+ }
+
+ card->base_addr = pci_iomap(pdev, 1, EMS_PCI_MEM_SIZE);
+ if (card->base_addr == NULL) {
+ err = -ENOMEM;
+ goto failure_cleanup;
+ }
+
+ /* Configure PITA-2 parallel interface (enable MUX) */
+ writel(PITA2_MISC_CONFIG, card->conf_addr + PITA2_MISC);
+
+ /* Check for unique EMS CAN signature */
+ if (ems_pci_readb(card, 0) != 0x55 ||
+ ems_pci_readb(card, 1) != 0xAA ||
+ ems_pci_readb(card, 2) != 0x01 ||
+ ems_pci_readb(card, 3) != 0xCB ||
+ ems_pci_readb(card, 4) != 0x11) {
+ dev_err(&pdev->dev, "Not EMS Dr. Thomas Wuensche interface\n");
+ err = -ENODEV;
+ goto failure_cleanup;
+ }
+
+ ems_pci_card_reset(card);
+
+ /* Detect available channels */
+ for (i = 0; i < EMS_PCI_MAX_CHAN; i++) {
+ dev = alloc_sja1000dev(0);
+ if (dev == NULL) {
+ err = -ENOMEM;
+ goto failure_cleanup;
+ }
+
+ card->net_dev[i] = dev;
+ priv = netdev_priv(dev);
+ priv->priv = card;
+ priv->irq_flags = IRQF_SHARED;
+
+ dev->irq = pdev->irq;
+ priv->reg_base = card->base_addr + EMS_PCI_CAN_BASE_OFFSET
+ + (i * EMS_PCI_CAN_CTRL_SIZE);
+
+ /* Check if channel is present */
+ if (ems_pci_check_chan(priv)) {
+ priv->read_reg = ems_pci_read_reg;
+ priv->write_reg = ems_pci_write_reg;
+ priv->post_irq = ems_pci_post_irq;
+ priv->can.clock.freq = EMS_PCI_CAN_CLOCK;
+ priv->ocr = EMS_PCI_OCR;
+ priv->cdr = EMS_PCI_CDR;
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ /* Enable interrupts from card */
+ writel(PITA2_ICR_INT0_EN, card->conf_addr + PITA2_ICR);
+
+ /* Register SJA1000 device */
+ err = register_sja1000dev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Registering device failed "
+ "(err=%d)\n", err);
+ free_sja1000dev(dev);
+ goto failure_cleanup;
+ }
+
+ card->channels++;
+
+ dev_info(&pdev->dev, "Channel #%d at 0x%p, irq %d\n",
+ i + 1, priv->reg_base, dev->irq);
+ } else {
+ free_sja1000dev(dev);
+ }
+ }
+
+ return 0;
+
+failure_cleanup:
+ dev_err(&pdev->dev, "Error: %d. Cleaning Up.\n", err);
+
+ ems_pci_del_card(pdev);
+
+ return err;
+}
+
+static struct pci_driver ems_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = ems_pci_tbl,
+ .probe = ems_pci_add_card,
+ .remove = ems_pci_del_card,
+};
+
+static int __init ems_pci_init(void)
+{
+ return pci_register_driver(&ems_pci_driver);
+}
+
+static void __exit ems_pci_exit(void)
+{
+ pci_unregister_driver(&ems_pci_driver);
+}
+
+module_init(ems_pci_init);
+module_exit(ems_pci_exit);
+
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
new file mode 100644
index 00000000000..7dd7769b971
--- /dev/null
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -0,0 +1,412 @@
+/*
+ * Copyright (C) 2008 Per Dalen <per.dalen@cnw.se>
+ *
+ * Parts of this software are based on (derived) the following:
+ *
+ * - Kvaser linux driver, version 4.72 BETA
+ * Copyright (C) 2002-2007 KVASER AB
+ *
+ * - Lincan driver, version 0.3.3, OCERA project
+ * Copyright (C) 2004 Pavel Pisa
+ * Copyright (C) 2001 Arnaud Westenberg
+ *
+ * - Socketcan SJA1000 drivers
+ * Copyright (C) 2007 Wolfgang Grandegger <wg@grandegger.com>
+ * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
+ * Copyright (c) 2003 Matthias Brukner, Trajet Gmbh, Rebenring 33,
+ * 38106 Braunschweig, GERMANY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/io.h>
+
+#include "sja1000.h"
+
+#define DRV_NAME "kvaser_pci"
+
+MODULE_AUTHOR("Per Dalen <per.dalen@cnw.se>");
+MODULE_DESCRIPTION("Socket-CAN driver for KVASER PCAN PCI cards");
+MODULE_SUPPORTED_DEVICE("KVASER PCAN PCI CAN card");
+MODULE_LICENSE("GPL v2");
+
+#define MAX_NO_OF_CHANNELS 4 /* max no of channels on a single card */
+
+struct kvaser_pci {
+ int channel;
+ struct pci_dev *pci_dev;
+ struct net_device *slave_dev[MAX_NO_OF_CHANNELS-1];
+ void __iomem *conf_addr;
+ void __iomem *res_addr;
+ int no_channels;
+ u8 xilinx_ver;
+};
+
+#define KVASER_PCI_CAN_CLOCK (16000000 / 2)
+
+/*
+ * The board configuration is probably following:
+ * RX1 is connected to ground.
+ * TX1 is not connected.
+ * CLKO is not connected.
+ * Setting the OCR register to 0xDA is a good idea.
+ * This means normal output mode , push-pull and the correct polarity.
+ */
+#define KVASER_PCI_OCR (OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL)
+
+/*
+ * In the CDR register, you should set CBP to 1.
+ * You will probably also want to set the clock divider value to 0
+ * (meaning divide-by-2), the Pelican bit, and the clock-off bit
+ * (you will have no need for CLKOUT anyway).
+ */
+#define KVASER_PCI_CDR (CDR_CBP | CDR_CLKOUT_MASK)
+
+/*
+ * These register values are valid for revision 14 of the Xilinx logic.
+ */
+#define XILINX_VERINT 7 /* Lower nibble simulate interrupts,
+ high nibble version number. */
+
+#define XILINX_PRESUMED_VERSION 14
+
+/*
+ * Important S5920 registers
+ */
+#define S5920_INTCSR 0x38
+#define S5920_PTCR 0x60
+#define INTCSR_ADDON_INTENABLE_M 0x2000
+
+
+#define KVASER_PCI_PORT_BYTES 0x20
+
+#define PCI_CONFIG_PORT_SIZE 0x80 /* size of the config io-memory */
+#define PCI_PORT_SIZE 0x80 /* size of a channel io-memory */
+#define PCI_PORT_XILINX_SIZE 0x08 /* size of a xilinx io-memory */
+
+#define KVASER_PCI_VENDOR_ID1 0x10e8 /* the PCI device and vendor IDs */
+#define KVASER_PCI_DEVICE_ID1 0x8406
+
+#define KVASER_PCI_VENDOR_ID2 0x1a07 /* the PCI device and vendor IDs */
+#define KVASER_PCI_DEVICE_ID2 0x0008
+
+static struct pci_device_id kvaser_pci_tbl[] = {
+ {KVASER_PCI_VENDOR_ID1, KVASER_PCI_DEVICE_ID1, PCI_ANY_ID, PCI_ANY_ID,},
+ {KVASER_PCI_VENDOR_ID2, KVASER_PCI_DEVICE_ID2, PCI_ANY_ID, PCI_ANY_ID,},
+ { 0,}
+};
+
+MODULE_DEVICE_TABLE(pci, kvaser_pci_tbl);
+
+static u8 kvaser_pci_read_reg(const struct sja1000_priv *priv, int port)
+{
+ return ioread8(priv->reg_base + port);
+}
+
+static void kvaser_pci_write_reg(const struct sja1000_priv *priv,
+ int port, u8 val)
+{
+ iowrite8(val, priv->reg_base + port);
+}
+
+static void kvaser_pci_disable_irq(struct net_device *dev)
+{
+ struct sja1000_priv *priv = netdev_priv(dev);
+ struct kvaser_pci *board = priv->priv;
+ u32 intcsr;
+
+ /* Disable interrupts from card */
+ intcsr = ioread32(board->conf_addr + S5920_INTCSR);
+ intcsr &= ~INTCSR_ADDON_INTENABLE_M;
+ iowrite32(intcsr, board->conf_addr + S5920_INTCSR);
+}
+
+static void kvaser_pci_enable_irq(struct net_device *dev)
+{
+ struct sja1000_priv *priv = netdev_priv(dev);
+ struct kvaser_pci *board = priv->priv;
+ u32 tmp_en_io;
+
+ /* Enable interrupts from card */
+ tmp_en_io = ioread32(board->conf_addr + S5920_INTCSR);
+ tmp_en_io |= INTCSR_ADDON_INTENABLE_M;
+ iowrite32(tmp_en_io, board->conf_addr + S5920_INTCSR);
+}
+
+static int number_of_sja1000_chip(void __iomem *base_addr)
+{
+ u8 status;
+ int i;
+
+ for (i = 0; i < MAX_NO_OF_CHANNELS; i++) {
+ /* reset chip */
+ iowrite8(MOD_RM, base_addr +
+ (i * KVASER_PCI_PORT_BYTES) + REG_MOD);
+ status = ioread8(base_addr +
+ (i * KVASER_PCI_PORT_BYTES) + REG_MOD);
+ /* check reset bit */
+ if (!(status & MOD_RM))
+ break;
+ }
+
+ return i;
+}
+
+static void kvaser_pci_del_chan(struct net_device *dev)
+{
+ struct sja1000_priv *priv;
+ struct kvaser_pci *board;
+ int i;
+
+ if (!dev)
+ return;
+ priv = netdev_priv(dev);
+ board = priv->priv;
+ if (!board)
+ return;
+
+ dev_info(&board->pci_dev->dev, "Removing device %s\n",
+ dev->name);
+
+ /* Disable PCI interrupts */
+ kvaser_pci_disable_irq(dev);
+
+ for (i = 0; i < board->no_channels - 1; i++) {
+ if (board->slave_dev[i]) {
+ dev_info(&board->pci_dev->dev, "Removing device %s\n",
+ board->slave_dev[i]->name);
+ unregister_sja1000dev(board->slave_dev[i]);
+ free_sja1000dev(board->slave_dev[i]);
+ }
+ }
+ unregister_sja1000dev(dev);
+
+ pci_iounmap(board->pci_dev, priv->reg_base);
+ pci_iounmap(board->pci_dev, board->conf_addr);
+ pci_iounmap(board->pci_dev, board->res_addr);
+
+ free_sja1000dev(dev);
+}
+
+static int kvaser_pci_add_chan(struct pci_dev *pdev, int channel,
+ struct net_device **master_dev,
+ void __iomem *conf_addr,
+ void __iomem *res_addr,
+ void __iomem *base_addr)
+{
+ struct net_device *dev;
+ struct sja1000_priv *priv;
+ struct kvaser_pci *board;
+ int err, init_step;
+
+ dev = alloc_sja1000dev(sizeof(struct kvaser_pci));
+ if (dev == NULL)
+ return -ENOMEM;
+
+ priv = netdev_priv(dev);
+ board = priv->priv;
+
+ board->pci_dev = pdev;
+ board->channel = channel;
+
+ /* S5920 */
+ board->conf_addr = conf_addr;
+
+ /* XILINX board wide address */
+ board->res_addr = res_addr;
+
+ if (channel == 0) {
+ board->xilinx_ver =
+ ioread8(board->res_addr + XILINX_VERINT) >> 4;
+ init_step = 2;
+
+ /* Assert PTADR# - we're in passive mode so the other bits are
+ not important */
+ iowrite32(0x80808080UL, board->conf_addr + S5920_PTCR);
+
+ /* Enable interrupts from card */
+ kvaser_pci_enable_irq(dev);
+ } else {
+ struct sja1000_priv *master_priv = netdev_priv(*master_dev);
+ struct kvaser_pci *master_board = master_priv->priv;
+ master_board->slave_dev[channel - 1] = dev;
+ master_board->no_channels = channel + 1;
+ board->xilinx_ver = master_board->xilinx_ver;
+ }
+
+ priv->reg_base = base_addr + channel * KVASER_PCI_PORT_BYTES;
+
+ priv->read_reg = kvaser_pci_read_reg;
+ priv->write_reg = kvaser_pci_write_reg;
+
+ priv->can.clock.freq = KVASER_PCI_CAN_CLOCK;
+
+ priv->ocr = KVASER_PCI_OCR;
+ priv->cdr = KVASER_PCI_CDR;
+
+ priv->irq_flags = IRQF_SHARED;
+ dev->irq = pdev->irq;
+
+ init_step = 4;
+
+ dev_info(&pdev->dev, "reg_base=%p conf_addr=%p irq=%d\n",
+ priv->reg_base, board->conf_addr, dev->irq);
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ /* Register SJA1000 device */
+ err = register_sja1000dev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Registering device failed (err=%d)\n",
+ err);
+ goto failure;
+ }
+
+ if (channel == 0)
+ *master_dev = dev;
+
+ return 0;
+
+failure:
+ kvaser_pci_del_chan(dev);
+ return err;
+}
+
+static int __devinit kvaser_pci_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int err;
+ struct net_device *master_dev = NULL;
+ struct sja1000_priv *priv;
+ struct kvaser_pci *board;
+ int no_channels;
+ void __iomem *base_addr = NULL;
+ void __iomem *conf_addr = NULL;
+ void __iomem *res_addr = NULL;
+ int i;
+
+ dev_info(&pdev->dev, "initializing device %04x:%04x\n",
+ pdev->vendor, pdev->device);
+
+ err = pci_enable_device(pdev);
+ if (err)
+ goto failure;
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err)
+ goto failure_release_pci;
+
+ /* S5920 */
+ conf_addr = pci_iomap(pdev, 0, PCI_CONFIG_PORT_SIZE);
+ if (conf_addr == NULL) {
+ err = -ENODEV;
+ goto failure_release_regions;
+ }
+
+ /* XILINX board wide address */
+ res_addr = pci_iomap(pdev, 2, PCI_PORT_XILINX_SIZE);
+ if (res_addr == NULL) {
+ err = -ENOMEM;
+ goto failure_iounmap;
+ }
+
+ base_addr = pci_iomap(pdev, 1, PCI_PORT_SIZE);
+ if (base_addr == NULL) {
+ err = -ENOMEM;
+ goto failure_iounmap;
+ }
+
+ no_channels = number_of_sja1000_chip(base_addr);
+ if (no_channels == 0) {
+ err = -ENOMEM;
+ goto failure_iounmap;
+ }
+
+ for (i = 0; i < no_channels; i++) {
+ err = kvaser_pci_add_chan(pdev, i, &master_dev,
+ conf_addr, res_addr,
+ base_addr);
+ if (err)
+ goto failure_cleanup;
+ }
+
+ priv = netdev_priv(master_dev);
+ board = priv->priv;
+
+ dev_info(&pdev->dev, "xilinx version=%d number of channels=%d\n",
+ board->xilinx_ver, board->no_channels);
+
+ pci_set_drvdata(pdev, master_dev);
+ return 0;
+
+failure_cleanup:
+ kvaser_pci_del_chan(master_dev);
+
+failure_iounmap:
+ if (conf_addr != NULL)
+ pci_iounmap(pdev, conf_addr);
+ if (res_addr != NULL)
+ pci_iounmap(pdev, res_addr);
+ if (base_addr != NULL)
+ pci_iounmap(pdev, base_addr);
+
+failure_release_regions:
+ pci_release_regions(pdev);
+
+failure_release_pci:
+ pci_disable_device(pdev);
+
+failure:
+ return err;
+
+}
+
+static void __devexit kvaser_pci_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ kvaser_pci_del_chan(dev);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver kvaser_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = kvaser_pci_tbl,
+ .probe = kvaser_pci_init_one,
+ .remove = __devexit_p(kvaser_pci_remove_one),
+};
+
+static int __init kvaser_pci_init(void)
+{
+ return pci_register_driver(&kvaser_pci_driver);
+}
+
+static void __exit kvaser_pci_exit(void)
+{
+ pci_unregister_driver(&kvaser_pci_driver);
+}
+
+module_init(kvaser_pci_init);
+module_exit(kvaser_pci_exit);
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
new file mode 100644
index 00000000000..571f133a8fe
--- /dev/null
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -0,0 +1,637 @@
+/*
+ * sja1000.c - Philips SJA1000 network device driver
+ *
+ * Copyright (c) 2003 Matthias Brukner, Trajet Gmbh, Rebenring 33,
+ * 38106 Braunschweig, GERMANY
+ *
+ * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ * Send feedback to <socketcan-users@lists.berlios.de>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/dev.h>
+
+#include "sja1000.h"
+
+#define DRV_NAME "sja1000"
+
+MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION(DRV_NAME "CAN netdevice driver");
+
+static struct can_bittiming_const sja1000_bittiming_const = {
+ .name = DRV_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 64,
+ .brp_inc = 1,
+};
+
+static int sja1000_probe_chip(struct net_device *dev)
+{
+ struct sja1000_priv *priv = netdev_priv(dev);
+
+ if (priv->reg_base && (priv->read_reg(priv, 0) == 0xFF)) {
+ printk(KERN_INFO "%s: probing @0x%lX failed\n",
+ DRV_NAME, dev->base_addr);
+ return 0;
+ }
+ return -1;
+}
+
+static void set_reset_mode(struct net_device *dev)
+{
+ struct sja1000_priv *priv = netdev_priv(dev);
+ unsigned char status = priv->read_reg(priv, REG_MOD);
+ int i;
+
+ /* disable interrupts */
+ priv->write_reg(priv, REG_IER, IRQ_OFF);
+
+ for (i = 0; i < 100; i++) {
+ /* check reset bit */
+ if (status & MOD_RM) {
+ priv->can.state = CAN_STATE_STOPPED;
+ return;
+ }
+
+ priv->write_reg(priv, REG_MOD, MOD_RM); /* reset chip */
+ udelay(10);
+ status = priv->read_reg(priv, REG_MOD);
+ }
+
+ dev_err(dev->dev.parent, "setting SJA1000 into reset mode failed!\n");
+}
+
+static void set_normal_mode(struct net_device *dev)
+{
+ struct sja1000_priv *priv = netdev_priv(dev);
+ unsigned char status = priv->read_reg(priv, REG_MOD);
+ int i;
+
+ for (i = 0; i < 100; i++) {
+ /* check reset bit */
+ if ((status & MOD_RM) == 0) {
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ /* enable all interrupts */
+ priv->write_reg(priv, REG_IER, IRQ_ALL);
+ return;
+ }
+
+ /* set chip to normal mode */
+ priv->write_reg(priv, REG_MOD, 0x00);
+ udelay(10);
+ status = priv->read_reg(priv, REG_MOD);
+ }
+
+ dev_err(dev->dev.parent, "setting SJA1000 into normal mode failed!\n");
+}
+
+static void sja1000_start(struct net_device *dev)
+{
+ struct sja1000_priv *priv = netdev_priv(dev);
+
+ /* leave reset mode */
+ if (priv->can.state != CAN_STATE_STOPPED)
+ set_reset_mode(dev);
+
+ /* Clear error counters and error code capture */
+ priv->write_reg(priv, REG_TXERR, 0x0);
+ priv->write_reg(priv, REG_RXERR, 0x0);
+ priv->read_reg(priv, REG_ECC);
+
+ /* leave reset mode */
+ set_normal_mode(dev);
+}
+
+static int sja1000_set_mode(struct net_device *dev, enum can_mode mode)
+{
+ struct sja1000_priv *priv = netdev_priv(dev);
+
+ if (!priv->open_time)
+ return -EINVAL;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ sja1000_start(dev);
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int sja1000_set_bittiming(struct net_device *dev)
+{
+ struct sja1000_priv *priv = netdev_priv(dev);
+ struct can_bittiming *bt = &priv->can.bittiming;
+ u8 btr0, btr1;
+
+ btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6);
+ btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) |
+ (((bt->phase_seg2 - 1) & 0x7) << 4);
+ if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ btr1 |= 0x80;
+
+ dev_info(dev->dev.parent,
+ "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1);
+
+ priv->write_reg(priv, REG_BTR0, btr0);
+ priv->write_reg(priv, REG_BTR1, btr1);
+
+ return 0;
+}
+
+/*
+ * initialize SJA1000 chip:
+ * - reset chip
+ * - set output mode
+ * - set baudrate
+ * - enable interrupts
+ * - start operating mode
+ */
+static void chipset_init(struct net_device *dev)
+{
+ struct sja1000_priv *priv = netdev_priv(dev);
+
+ /* set clock divider and output control register */
+ priv->write_reg(priv, REG_CDR, priv->cdr | CDR_PELICAN);
+
+ /* set acceptance filter (accept all) */
+ priv->write_reg(priv, REG_ACCC0, 0x00);
+ priv->write_reg(priv, REG_ACCC1, 0x00);
+ priv->write_reg(priv, REG_ACCC2, 0x00);
+ priv->write_reg(priv, REG_ACCC3, 0x00);
+
+ priv->write_reg(priv, REG_ACCM0, 0xFF);
+ priv->write_reg(priv, REG_ACCM1, 0xFF);
+ priv->write_reg(priv, REG_ACCM2, 0xFF);
+ priv->write_reg(priv, REG_ACCM3, 0xFF);
+
+ priv->write_reg(priv, REG_OCR, priv->ocr | OCR_MODE_NORMAL);
+}
+
+/*
+ * transmit a CAN message
+ * message layout in the sk_buff should be like this:
+ * xx xx xx xx ff ll 00 11 22 33 44 55 66 77
+ * [ can-id ] [flags] [len] [can data (up to 8 bytes]
+ */
+static int sja1000_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct sja1000_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ uint8_t fi;
+ uint8_t dlc;
+ canid_t id;
+ uint8_t dreg;
+ int i;
+
+ netif_stop_queue(dev);
+
+ fi = dlc = cf->can_dlc;
+ id = cf->can_id;
+
+ if (id & CAN_RTR_FLAG)
+ fi |= FI_RTR;
+
+ if (id & CAN_EFF_FLAG) {
+ fi |= FI_FF;
+ dreg = EFF_BUF;
+ priv->write_reg(priv, REG_FI, fi);
+ priv->write_reg(priv, REG_ID1, (id & 0x1fe00000) >> (5 + 16));
+ priv->write_reg(priv, REG_ID2, (id & 0x001fe000) >> (5 + 8));
+ priv->write_reg(priv, REG_ID3, (id & 0x00001fe0) >> 5);
+ priv->write_reg(priv, REG_ID4, (id & 0x0000001f) << 3);
+ } else {
+ dreg = SFF_BUF;
+ priv->write_reg(priv, REG_FI, fi);
+ priv->write_reg(priv, REG_ID1, (id & 0x000007f8) >> 3);
+ priv->write_reg(priv, REG_ID2, (id & 0x00000007) << 5);
+ }
+
+ for (i = 0; i < dlc; i++)
+ priv->write_reg(priv, dreg++, cf->data[i]);
+
+ stats->tx_bytes += dlc;
+ dev->trans_start = jiffies;
+
+ can_put_echo_skb(skb, dev, 0);
+
+ priv->write_reg(priv, REG_CMR, CMD_TR);
+
+ return 0;
+}
+
+static void sja1000_rx(struct net_device *dev)
+{
+ struct sja1000_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ uint8_t fi;
+ uint8_t dreg;
+ canid_t id;
+ uint8_t dlc;
+ int i;
+
+ skb = dev_alloc_skb(sizeof(struct can_frame));
+ if (skb == NULL)
+ return;
+ skb->dev = dev;
+ skb->protocol = htons(ETH_P_CAN);
+
+ fi = priv->read_reg(priv, REG_FI);
+ dlc = fi & 0x0F;
+
+ if (fi & FI_FF) {
+ /* extended frame format (EFF) */
+ dreg = EFF_BUF;
+ id = (priv->read_reg(priv, REG_ID1) << (5 + 16))
+ | (priv->read_reg(priv, REG_ID2) << (5 + 8))
+ | (priv->read_reg(priv, REG_ID3) << 5)
+ | (priv->read_reg(priv, REG_ID4) >> 3);
+ id |= CAN_EFF_FLAG;
+ } else {
+ /* standard frame format (SFF) */
+ dreg = SFF_BUF;
+ id = (priv->read_reg(priv, REG_ID1) << 3)
+ | (priv->read_reg(priv, REG_ID2) >> 5);
+ }
+
+ if (fi & FI_RTR)
+ id |= CAN_RTR_FLAG;
+
+ cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
+ memset(cf, 0, sizeof(struct can_frame));
+ cf->can_id = id;
+ cf->can_dlc = dlc;
+ for (i = 0; i < dlc; i++)
+ cf->data[i] = priv->read_reg(priv, dreg++);
+
+ while (i < 8)
+ cf->data[i++] = 0;
+
+ /* release receive buffer */
+ priv->write_reg(priv, REG_CMR, CMD_RRB);
+
+ netif_rx(skb);
+
+ dev->last_rx = jiffies;
+ stats->rx_packets++;
+ stats->rx_bytes += dlc;
+}
+
+static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
+{
+ struct sja1000_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ enum can_state state = priv->can.state;
+ uint8_t ecc, alc;
+
+ skb = dev_alloc_skb(sizeof(struct can_frame));
+ if (skb == NULL)
+ return -ENOMEM;
+ skb->dev = dev;
+ skb->protocol = htons(ETH_P_CAN);
+ cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
+ memset(cf, 0, sizeof(struct can_frame));
+ cf->can_id = CAN_ERR_FLAG;
+ cf->can_dlc = CAN_ERR_DLC;
+
+ if (isrc & IRQ_DOI) {
+ /* data overrun interrupt */
+ dev_dbg(dev->dev.parent, "data overrun interrupt\n");
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+ priv->write_reg(priv, REG_CMR, CMD_CDO); /* clear bit */
+ }
+
+ if (isrc & IRQ_EI) {
+ /* error warning interrupt */
+ dev_dbg(dev->dev.parent, "error warning interrupt\n");
+
+ if (status & SR_BS) {
+ state = CAN_STATE_BUS_OFF;
+ cf->can_id |= CAN_ERR_BUSOFF;
+ can_bus_off(dev);
+ } else if (status & SR_ES) {
+ state = CAN_STATE_ERROR_WARNING;
+ } else
+ state = CAN_STATE_ERROR_ACTIVE;
+ }
+ if (isrc & IRQ_BEI) {
+ /* bus error interrupt */
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+
+ ecc = priv->read_reg(priv, REG_ECC);
+
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+ switch (ecc & ECC_MASK) {
+ case ECC_BIT:
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ break;
+ case ECC_FORM:
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+ case ECC_STUFF:
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+ default:
+ cf->data[2] |= CAN_ERR_PROT_UNSPEC;
+ cf->data[3] = ecc & ECC_SEG;
+ break;
+ }
+ /* Error occured during transmission? */
+ if ((ecc & ECC_DIR) == 0)
+ cf->data[2] |= CAN_ERR_PROT_TX;
+ }
+ if (isrc & IRQ_EPI) {
+ /* error passive interrupt */
+ dev_dbg(dev->dev.parent, "error passive interrupt\n");
+ if (status & SR_ES)
+ state = CAN_STATE_ERROR_PASSIVE;
+ else
+ state = CAN_STATE_ERROR_ACTIVE;
+ }
+ if (isrc & IRQ_ALI) {
+ /* arbitration lost interrupt */
+ dev_dbg(dev->dev.parent, "arbitration lost interrupt\n");
+ alc = priv->read_reg(priv, REG_ALC);
+ priv->can.can_stats.arbitration_lost++;
+ stats->rx_errors++;
+ cf->can_id |= CAN_ERR_LOSTARB;
+ cf->data[0] = alc & 0x1f;
+ }
+
+ if (state != priv->can.state && (state == CAN_STATE_ERROR_WARNING ||
+ state == CAN_STATE_ERROR_PASSIVE)) {
+ uint8_t rxerr = priv->read_reg(priv, REG_RXERR);
+ uint8_t txerr = priv->read_reg(priv, REG_TXERR);
+ cf->can_id |= CAN_ERR_CRTL;
+ if (state == CAN_STATE_ERROR_WARNING) {
+ priv->can.can_stats.error_warning++;
+ cf->data[1] = (txerr > rxerr) ?
+ CAN_ERR_CRTL_TX_WARNING :
+ CAN_ERR_CRTL_RX_WARNING;
+ } else {
+ priv->can.can_stats.error_passive++;
+ cf->data[1] = (txerr > rxerr) ?
+ CAN_ERR_CRTL_TX_PASSIVE :
+ CAN_ERR_CRTL_RX_PASSIVE;
+ }
+ }
+
+ priv->can.state = state;
+
+ netif_rx(skb);
+
+ dev->last_rx = jiffies;
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+
+ return 0;
+}
+
+irqreturn_t sja1000_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct sja1000_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ uint8_t isrc, status;
+ int n = 0;
+
+ /* Shared interrupts and IRQ off? */
+ if (priv->read_reg(priv, REG_IER) == IRQ_OFF)
+ return IRQ_NONE;
+
+ if (priv->pre_irq)
+ priv->pre_irq(priv);
+
+ while ((isrc = priv->read_reg(priv, REG_IR)) && (n < SJA1000_MAX_IRQ)) {
+ n++;
+ status = priv->read_reg(priv, REG_SR);
+
+ if (isrc & IRQ_WUI)
+ dev_warn(dev->dev.parent, "wakeup interrupt\n");
+
+ if (isrc & IRQ_TI) {
+ /* transmission complete interrupt */
+ stats->tx_packets++;
+ can_get_echo_skb(dev, 0);
+ netif_wake_queue(dev);
+ }
+ if (isrc & IRQ_RI) {
+ /* receive interrupt */
+ while (status & SR_RBS) {
+ sja1000_rx(dev);
+ status = priv->read_reg(priv, REG_SR);
+ }
+ }
+ if (isrc & (IRQ_DOI | IRQ_EI | IRQ_BEI | IRQ_EPI | IRQ_ALI)) {
+ /* error interrupt */
+ if (sja1000_err(dev, isrc, status))
+ break;
+ }
+ }
+
+ if (priv->post_irq)
+ priv->post_irq(priv);
+
+ if (n >= SJA1000_MAX_IRQ)
+ dev_dbg(dev->dev.parent, "%d messages handled in ISR", n);
+
+ return (n) ? IRQ_HANDLED : IRQ_NONE;
+}
+EXPORT_SYMBOL_GPL(sja1000_interrupt);
+
+static int sja1000_open(struct net_device *dev)
+{
+ struct sja1000_priv *priv = netdev_priv(dev);
+ int err;
+
+ /* set chip into reset mode */
+ set_reset_mode(dev);
+
+ /* common open */
+ err = open_candev(dev);
+ if (err)
+ return err;
+
+ /* register interrupt handler, if not done by the device driver */
+ if (!(priv->flags & SJA1000_CUSTOM_IRQ_HANDLER)) {
+ err = request_irq(dev->irq, &sja1000_interrupt, priv->irq_flags,
+ dev->name, (void *)dev);
+ if (err) {
+ close_candev(dev);
+ return -EAGAIN;
+ }
+ }
+
+ /* init and start chi */
+ sja1000_start(dev);
+ priv->open_time = jiffies;
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int sja1000_close(struct net_device *dev)
+{
+ struct sja1000_priv *priv = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ set_reset_mode(dev);
+
+ if (!(priv->flags & SJA1000_CUSTOM_IRQ_HANDLER))
+ free_irq(dev->irq, (void *)dev);
+
+ close_candev(dev);
+
+ priv->open_time = 0;
+
+ return 0;
+}
+
+struct net_device *alloc_sja1000dev(int sizeof_priv)
+{
+ struct net_device *dev;
+ struct sja1000_priv *priv;
+
+ dev = alloc_candev(sizeof(struct sja1000_priv) + sizeof_priv);
+ if (!dev)
+ return NULL;
+
+ priv = netdev_priv(dev);
+
+ priv->dev = dev;
+ priv->can.bittiming_const = &sja1000_bittiming_const;
+ priv->can.do_set_bittiming = sja1000_set_bittiming;
+ priv->can.do_set_mode = sja1000_set_mode;
+
+ if (sizeof_priv)
+ priv->priv = (void *)priv + sizeof(struct sja1000_priv);
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(alloc_sja1000dev);
+
+void free_sja1000dev(struct net_device *dev)
+{
+ free_candev(dev);
+}
+EXPORT_SYMBOL_GPL(free_sja1000dev);
+
+static const struct net_device_ops sja1000_netdev_ops = {
+ .ndo_open = sja1000_open,
+ .ndo_stop = sja1000_close,
+ .ndo_start_xmit = sja1000_start_xmit,
+};
+
+int register_sja1000dev(struct net_device *dev)
+{
+ if (!sja1000_probe_chip(dev))
+ return -ENODEV;
+
+ dev->flags |= IFF_ECHO; /* we support local echo */
+ dev->netdev_ops = &sja1000_netdev_ops;
+
+ set_reset_mode(dev);
+ chipset_init(dev);
+
+ return register_candev(dev);
+}
+EXPORT_SYMBOL_GPL(register_sja1000dev);
+
+void unregister_sja1000dev(struct net_device *dev)
+{
+ set_reset_mode(dev);
+ unregister_candev(dev);
+}
+EXPORT_SYMBOL_GPL(unregister_sja1000dev);
+
+static __init int sja1000_init(void)
+{
+ printk(KERN_INFO "%s CAN netdevice driver\n", DRV_NAME);
+
+ return 0;
+}
+
+module_init(sja1000_init);
+
+static __exit void sja1000_exit(void)
+{
+ printk(KERN_INFO "%s: driver removed\n", DRV_NAME);
+}
+
+module_exit(sja1000_exit);
diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h
new file mode 100644
index 00000000000..302d2c763ad
--- /dev/null
+++ b/drivers/net/can/sja1000/sja1000.h
@@ -0,0 +1,181 @@
+/*
+ * sja1000.h - Philips SJA1000 network device driver
+ *
+ * Copyright (c) 2003 Matthias Brukner, Trajet Gmbh, Rebenring 33,
+ * 38106 Braunschweig, GERMANY
+ *
+ * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ * Send feedback to <socketcan-users@lists.berlios.de>
+ *
+ */
+
+#ifndef SJA1000_DEV_H
+#define SJA1000_DEV_H
+
+#include <linux/can/dev.h>
+#include <linux/can/platform/sja1000.h>
+
+#define SJA1000_MAX_IRQ 20 /* max. number of interrupts handled in ISR */
+
+/* SJA1000 registers - manual section 6.4 (Pelican Mode) */
+#define REG_MOD 0x00
+#define REG_CMR 0x01
+#define REG_SR 0x02
+#define REG_IR 0x03
+#define REG_IER 0x04
+#define REG_ALC 0x0B
+#define REG_ECC 0x0C
+#define REG_EWL 0x0D
+#define REG_RXERR 0x0E
+#define REG_TXERR 0x0F
+#define REG_ACCC0 0x10
+#define REG_ACCC1 0x11
+#define REG_ACCC2 0x12
+#define REG_ACCC3 0x13
+#define REG_ACCM0 0x14
+#define REG_ACCM1 0x15
+#define REG_ACCM2 0x16
+#define REG_ACCM3 0x17
+#define REG_RMC 0x1D
+#define REG_RBSA 0x1E
+
+/* Common registers - manual section 6.5 */
+#define REG_BTR0 0x06
+#define REG_BTR1 0x07
+#define REG_OCR 0x08
+#define REG_CDR 0x1F
+
+#define REG_FI 0x10
+#define SFF_BUF 0x13
+#define EFF_BUF 0x15
+
+#define FI_FF 0x80
+#define FI_RTR 0x40
+
+#define REG_ID1 0x11
+#define REG_ID2 0x12
+#define REG_ID3 0x13
+#define REG_ID4 0x14
+
+#define CAN_RAM 0x20
+
+/* mode register */
+#define MOD_RM 0x01
+#define MOD_LOM 0x02
+#define MOD_STM 0x04
+#define MOD_AFM 0x08
+#define MOD_SM 0x10
+
+/* commands */
+#define CMD_SRR 0x10
+#define CMD_CDO 0x08
+#define CMD_RRB 0x04
+#define CMD_AT 0x02
+#define CMD_TR 0x01
+
+/* interrupt sources */
+#define IRQ_BEI 0x80
+#define IRQ_ALI 0x40
+#define IRQ_EPI 0x20
+#define IRQ_WUI 0x10
+#define IRQ_DOI 0x08
+#define IRQ_EI 0x04
+#define IRQ_TI 0x02
+#define IRQ_RI 0x01
+#define IRQ_ALL 0xFF
+#define IRQ_OFF 0x00
+
+/* status register content */
+#define SR_BS 0x80
+#define SR_ES 0x40
+#define SR_TS 0x20
+#define SR_RS 0x10
+#define SR_TCS 0x08
+#define SR_TBS 0x04
+#define SR_DOS 0x02
+#define SR_RBS 0x01
+
+#define SR_CRIT (SR_BS|SR_ES)
+
+/* ECC register */
+#define ECC_SEG 0x1F
+#define ECC_DIR 0x20
+#define ECC_ERR 6
+#define ECC_BIT 0x00
+#define ECC_FORM 0x40
+#define ECC_STUFF 0x80
+#define ECC_MASK 0xc0
+
+/*
+ * Flags for sja1000priv.flags
+ */
+#define SJA1000_CUSTOM_IRQ_HANDLER 0x1
+
+/*
+ * SJA1000 private data structure
+ */
+struct sja1000_priv {
+ struct can_priv can; /* must be the first member */
+ int open_time;
+ struct sk_buff *echo_skb;
+
+ /* the lower-layer is responsible for appropriate locking */
+ u8 (*read_reg) (const struct sja1000_priv *priv, int reg);
+ void (*write_reg) (const struct sja1000_priv *priv, int reg, u8 val);
+ void (*pre_irq) (const struct sja1000_priv *priv);
+ void (*post_irq) (const struct sja1000_priv *priv);
+
+ void *priv; /* for board-specific data */
+ struct net_device *dev;
+
+ void __iomem *reg_base; /* ioremap'ed address to registers */
+ unsigned long irq_flags; /* for request_irq() */
+
+ u16 flags; /* custom mode flags */
+ u8 ocr; /* output control register */
+ u8 cdr; /* clock divider register */
+};
+
+struct net_device *alloc_sja1000dev(int sizeof_priv);
+void free_sja1000dev(struct net_device *dev);
+int register_sja1000dev(struct net_device *dev);
+void unregister_sja1000dev(struct net_device *dev);
+
+irqreturn_t sja1000_interrupt(int irq, void *dev_id);
+
+#endif /* SJA1000_DEV_H */
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
new file mode 100644
index 00000000000..3373560405b
--- /dev/null
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -0,0 +1,235 @@
+/*
+ * Driver for SJA1000 CAN controllers on the OpenFirmware platform bus
+ *
+ * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/* This is a generic driver for SJA1000 chips on the OpenFirmware platform
+ * bus found on embedded PowerPC systems. You need a SJA1000 CAN node
+ * definition in your flattened device tree source (DTS) file similar to:
+ *
+ * can@3,100 {
+ * compatible = "nxp,sja1000";
+ * reg = <3 0x100 0x80>;
+ * interrupts = <2 0>;
+ * interrupt-parent = <&mpic>;
+ * nxp,external-clock-frequency = <16000000>;
+ * };
+ *
+ * See "Documentation/powerpc/dts-bindings/can/sja1000.txt" for further
+ * information.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+
+#include <linux/of_platform.h>
+#include <asm/prom.h>
+
+#include "sja1000.h"
+
+#define DRV_NAME "sja1000_of_platform"
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the OF platform bus");
+MODULE_LICENSE("GPL v2");
+
+#define SJA1000_OFP_CAN_CLOCK (16000000 / 2)
+
+#define SJA1000_OFP_OCR OCR_TX0_PULLDOWN
+#define SJA1000_OFP_CDR (CDR_CBP | CDR_CLK_OFF)
+
+static u8 sja1000_ofp_read_reg(const struct sja1000_priv *priv, int reg)
+{
+ return in_8(priv->reg_base + reg);
+}
+
+static void sja1000_ofp_write_reg(const struct sja1000_priv *priv,
+ int reg, u8 val)
+{
+ out_8(priv->reg_base + reg, val);
+}
+
+static int __devexit sja1000_ofp_remove(struct of_device *ofdev)
+{
+ struct net_device *dev = dev_get_drvdata(&ofdev->dev);
+ struct sja1000_priv *priv = netdev_priv(dev);
+ struct device_node *np = ofdev->node;
+ struct resource res;
+
+ dev_set_drvdata(&ofdev->dev, NULL);
+
+ unregister_sja1000dev(dev);
+ free_sja1000dev(dev);
+ iounmap(priv->reg_base);
+ irq_dispose_mapping(dev->irq);
+
+ of_address_to_resource(np, 0, &res);
+ release_mem_region(res.start, resource_size(&res));
+
+ return 0;
+}
+
+static int __devinit sja1000_ofp_probe(struct of_device *ofdev,
+ const struct of_device_id *id)
+{
+ struct device_node *np = ofdev->node;
+ struct net_device *dev;
+ struct sja1000_priv *priv;
+ struct resource res;
+ const u32 *prop;
+ int err, irq, res_size, prop_size;
+ void __iomem *base;
+
+ err = of_address_to_resource(np, 0, &res);
+ if (err) {
+ dev_err(&ofdev->dev, "invalid address\n");
+ return err;
+ }
+
+ res_size = resource_size(&res);
+
+ if (!request_mem_region(res.start, res_size, DRV_NAME)) {
+ dev_err(&ofdev->dev, "couldn't request %#llx..%#llx\n",
+ (unsigned long long)res.start,
+ (unsigned long long)res.end);
+ return -EBUSY;
+ }
+
+ base = ioremap_nocache(res.start, res_size);
+ if (!base) {
+ dev_err(&ofdev->dev, "couldn't ioremap %#llx..%#llx\n",
+ (unsigned long long)res.start,
+ (unsigned long long)res.end);
+ err = -ENOMEM;
+ goto exit_release_mem;
+ }
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (irq == NO_IRQ) {
+ dev_err(&ofdev->dev, "no irq found\n");
+ err = -ENODEV;
+ goto exit_unmap_mem;
+ }
+
+ dev = alloc_sja1000dev(0);
+ if (!dev) {
+ err = -ENOMEM;
+ goto exit_dispose_irq;
+ }
+
+ priv = netdev_priv(dev);
+
+ priv->read_reg = sja1000_ofp_read_reg;
+ priv->write_reg = sja1000_ofp_write_reg;
+
+ prop = of_get_property(np, "nxp,external-clock-frequency", &prop_size);
+ if (prop && (prop_size == sizeof(u32)))
+ priv->can.clock.freq = *prop / 2;
+ else
+ priv->can.clock.freq = SJA1000_OFP_CAN_CLOCK; /* default */
+
+ prop = of_get_property(np, "nxp,tx-output-mode", &prop_size);
+ if (prop && (prop_size == sizeof(u32)))
+ priv->ocr |= *prop & OCR_MODE_MASK;
+ else
+ priv->ocr |= OCR_MODE_NORMAL; /* default */
+
+ prop = of_get_property(np, "nxp,tx-output-config", &prop_size);
+ if (prop && (prop_size == sizeof(u32)))
+ priv->ocr |= (*prop << OCR_TX_SHIFT) & OCR_TX_MASK;
+ else
+ priv->ocr |= OCR_TX0_PULLDOWN; /* default */
+
+ prop = of_get_property(np, "nxp,clock-out-frequency", &prop_size);
+ if (prop && (prop_size == sizeof(u32)) && *prop) {
+ u32 divider = priv->can.clock.freq * 2 / *prop;
+
+ if (divider > 1)
+ priv->cdr |= divider / 2 - 1;
+ else
+ priv->cdr |= CDR_CLKOUT_MASK;
+ } else {
+ priv->cdr |= CDR_CLK_OFF; /* default */
+ }
+
+ prop = of_get_property(np, "nxp,no-comparator-bypass", NULL);
+ if (!prop)
+ priv->cdr |= CDR_CBP; /* default */
+
+ priv->irq_flags = IRQF_SHARED;
+ priv->reg_base = base;
+
+ dev->irq = irq;
+
+ dev_info(&ofdev->dev,
+ "reg_base=0x%p irq=%d clock=%d ocr=0x%02x cdr=0x%02x\n",
+ priv->reg_base, dev->irq, priv->can.clock.freq,
+ priv->ocr, priv->cdr);
+
+ dev_set_drvdata(&ofdev->dev, dev);
+ SET_NETDEV_DEV(dev, &ofdev->dev);
+
+ err = register_sja1000dev(dev);
+ if (err) {
+ dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
+ DRV_NAME, err);
+ goto exit_free_sja1000;
+ }
+
+ return 0;
+
+exit_free_sja1000:
+ free_sja1000dev(dev);
+exit_dispose_irq:
+ irq_dispose_mapping(irq);
+exit_unmap_mem:
+ iounmap(base);
+exit_release_mem:
+ release_mem_region(res.start, res_size);
+
+ return err;
+}
+
+static struct of_device_id __devinitdata sja1000_ofp_table[] = {
+ {.compatible = "nxp,sja1000"},
+ {},
+};
+
+static struct of_platform_driver sja1000_ofp_driver = {
+ .owner = THIS_MODULE,
+ .name = DRV_NAME,
+ .probe = sja1000_ofp_probe,
+ .remove = __devexit_p(sja1000_ofp_remove),
+ .match_table = sja1000_ofp_table,
+};
+
+static int __init sja1000_ofp_init(void)
+{
+ return of_register_platform_driver(&sja1000_ofp_driver);
+}
+module_init(sja1000_ofp_init);
+
+static void __exit sja1000_ofp_exit(void)
+{
+ return of_unregister_platform_driver(&sja1000_ofp_driver);
+};
+module_exit(sja1000_ofp_exit);
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
new file mode 100644
index 00000000000..628374c2a05
--- /dev/null
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2005 Sascha Hauer, Pengutronix
+ * Copyright (C) 2007 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/irq.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/platform/sja1000.h>
+#include <linux/io.h>
+
+#include "sja1000.h"
+
+#define DRV_NAME "sja1000_platform"
+
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
+MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the platform bus");
+MODULE_LICENSE("GPL v2");
+
+static u8 sp_read_reg(const struct sja1000_priv *priv, int reg)
+{
+ return ioread8(priv->reg_base + reg);
+}
+
+static void sp_write_reg(const struct sja1000_priv *priv, int reg, u8 val)
+{
+ iowrite8(val, priv->reg_base + reg);
+}
+
+static int sp_probe(struct platform_device *pdev)
+{
+ int err;
+ void __iomem *addr;
+ struct net_device *dev;
+ struct sja1000_priv *priv;
+ struct resource *res_mem, *res_irq;
+ struct sja1000_platform_data *pdata;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data provided!\n");
+ err = -ENODEV;
+ goto exit;
+ }
+
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res_mem || !res_irq) {
+ err = -ENODEV;
+ goto exit;
+ }
+
+ if (!request_mem_region(res_mem->start, resource_size(res_mem),
+ DRV_NAME)) {
+ err = -EBUSY;
+ goto exit;
+ }
+
+ addr = ioremap_nocache(res_mem->start, resource_size(res_mem));
+ if (!addr) {
+ err = -ENOMEM;
+ goto exit_release;
+ }
+
+ dev = alloc_sja1000dev(0);
+ if (!dev) {
+ err = -ENOMEM;
+ goto exit_iounmap;
+ }
+ priv = netdev_priv(dev);
+
+ dev->irq = res_irq->start;
+ priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
+ priv->reg_base = addr;
+ priv->read_reg = sp_read_reg;
+ priv->write_reg = sp_write_reg;
+ priv->can.clock.freq = pdata->clock;
+ priv->ocr = pdata->ocr;
+ priv->cdr = pdata->cdr;
+
+ dev_set_drvdata(&pdev->dev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ err = register_sja1000dev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
+ DRV_NAME, err);
+ goto exit_free;
+ }
+
+ dev_info(&pdev->dev, "%s device registered (reg_base=%p, irq=%d)\n",
+ DRV_NAME, priv->reg_base, dev->irq);
+ return 0;
+
+ exit_free:
+ free_sja1000dev(dev);
+ exit_iounmap:
+ iounmap(addr);
+ exit_release:
+ release_mem_region(res_mem->start, resource_size(res_mem));
+ exit:
+ return err;
+}
+
+static int sp_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = dev_get_drvdata(&pdev->dev);
+ struct sja1000_priv *priv = netdev_priv(dev);
+ struct resource *res;
+
+ unregister_sja1000dev(dev);
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ if (priv->reg_base)
+ iounmap(priv->reg_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+
+ free_sja1000dev(dev);
+
+ return 0;
+}
+
+static struct platform_driver sp_driver = {
+ .probe = sp_probe,
+ .remove = sp_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init sp_init(void)
+{
+ return platform_driver_register(&sp_driver);
+}
+
+static void __exit sp_exit(void)
+{
+ platform_driver_unregister(&sp_driver);
+}
+
+module_init(sp_init);
+module_exit(sp_exit);
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index f5222764061..eb066673c2a 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -2934,7 +2934,7 @@ static int cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
* individual queues.
*/
if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
- return 1;
+ return NETDEV_TX_BUSY;
dev->trans_start = jiffies;
return 0;
}
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
index 4bd2455b0fe..699d22c5fe0 100644
--- a/drivers/net/chelsio/common.h
+++ b/drivers/net/chelsio/common.h
@@ -46,7 +46,7 @@
#include <linux/pci.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
-#include <linux/mii.h>
+#include <linux/mdio.h>
#include <linux/crc32.h>
#include <linux/init.h>
#include <asm/io.h>
diff --git a/drivers/net/chelsio/cphy.h b/drivers/net/chelsio/cphy.h
index 79d855e267e..1f095a9fc73 100644
--- a/drivers/net/chelsio/cphy.h
+++ b/drivers/net/chelsio/cphy.h
@@ -43,10 +43,11 @@
struct mdio_ops {
void (*init)(adapter_t *adapter, const struct board_info *bi);
- int (*read)(adapter_t *adapter, int phy_addr, int mmd_addr,
- int reg_addr, unsigned int *val);
- int (*write)(adapter_t *adapter, int phy_addr, int mmd_addr,
- int reg_addr, unsigned int val);
+ int (*read)(struct net_device *dev, int phy_addr, int mmd_addr,
+ u16 reg_addr);
+ int (*write)(struct net_device *dev, int phy_addr, int mmd_addr,
+ u16 reg_addr, u16 val);
+ unsigned mode_support;
};
/* PHY interrupt types */
@@ -83,11 +84,12 @@ struct cphy_ops {
int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex);
int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed,
int *duplex, int *fc);
+
+ u32 mmds;
};
/* A PHY instance */
struct cphy {
- int addr; /* PHY address */
int state; /* Link status state machine */
adapter_t *adapter; /* associated adapter */
@@ -101,56 +103,61 @@ struct cphy {
u32 elmer_gpo;
const struct cphy_ops *ops; /* PHY operations */
- int (*mdio_read)(adapter_t *adapter, int phy_addr, int mmd_addr,
- int reg_addr, unsigned int *val);
- int (*mdio_write)(adapter_t *adapter, int phy_addr, int mmd_addr,
- int reg_addr, unsigned int val);
+ struct mdio_if_info mdio;
struct cphy_instance *instance;
};
/* Convenience MDIO read/write wrappers */
-static inline int mdio_read(struct cphy *cphy, int mmd, int reg,
- unsigned int *valp)
+static inline int cphy_mdio_read(struct cphy *cphy, int mmd, int reg,
+ unsigned int *valp)
{
- return cphy->mdio_read(cphy->adapter, cphy->addr, mmd, reg, valp);
+ int rc = cphy->mdio.mdio_read(cphy->mdio.dev, cphy->mdio.prtad, mmd,
+ reg);
+ *valp = (rc >= 0) ? rc : -1;
+ return (rc >= 0) ? 0 : rc;
}
-static inline int mdio_write(struct cphy *cphy, int mmd, int reg,
- unsigned int val)
+static inline int cphy_mdio_write(struct cphy *cphy, int mmd, int reg,
+ unsigned int val)
{
- return cphy->mdio_write(cphy->adapter, cphy->addr, mmd, reg, val);
+ return cphy->mdio.mdio_write(cphy->mdio.dev, cphy->mdio.prtad, mmd,
+ reg, val);
}
static inline int simple_mdio_read(struct cphy *cphy, int reg,
unsigned int *valp)
{
- return mdio_read(cphy, 0, reg, valp);
+ return cphy_mdio_read(cphy, MDIO_DEVAD_NONE, reg, valp);
}
static inline int simple_mdio_write(struct cphy *cphy, int reg,
unsigned int val)
{
- return mdio_write(cphy, 0, reg, val);
+ return cphy_mdio_write(cphy, MDIO_DEVAD_NONE, reg, val);
}
/* Convenience initializer */
-static inline void cphy_init(struct cphy *phy, adapter_t *adapter,
+static inline void cphy_init(struct cphy *phy, struct net_device *dev,
int phy_addr, struct cphy_ops *phy_ops,
const struct mdio_ops *mdio_ops)
{
+ struct adapter *adapter = netdev_priv(dev);
phy->adapter = adapter;
- phy->addr = phy_addr;
phy->ops = phy_ops;
if (mdio_ops) {
- phy->mdio_read = mdio_ops->read;
- phy->mdio_write = mdio_ops->write;
+ phy->mdio.prtad = phy_addr;
+ phy->mdio.mmds = phy_ops->mmds;
+ phy->mdio.mode_support = mdio_ops->mode_support;
+ phy->mdio.mdio_read = mdio_ops->read;
+ phy->mdio.mdio_write = mdio_ops->write;
}
+ phy->mdio.dev = dev;
}
/* Operations of the PHY-instance factory */
struct gphy {
/* Construct a PHY instance with the given PHY address */
- struct cphy *(*create)(adapter_t *adapter, int phy_addr,
+ struct cphy *(*create)(struct net_device *dev, int phy_addr,
const struct mdio_ops *mdio_ops);
/*
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
index fa06994f973..082cdb28b51 100644
--- a/drivers/net/chelsio/cxgb2.c
+++ b/drivers/net/chelsio/cxgb2.c
@@ -589,7 +589,7 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
}
cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
- cmd->phy_address = p->phy->addr;
+ cmd->phy_address = p->phy->mdio.prtad;
cmd->transceiver = XCVR_EXTERNAL;
cmd->autoneg = p->link_config.autoneg;
cmd->maxtxpkt = 0;
@@ -849,39 +849,9 @@ static const struct ethtool_ops t1_ethtool_ops = {
static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
struct adapter *adapter = dev->ml_priv;
- struct mii_ioctl_data *data = if_mii(req);
-
- switch (cmd) {
- case SIOCGMIIPHY:
- data->phy_id = adapter->port[dev->if_port].phy->addr;
- /* FALLTHRU */
- case SIOCGMIIREG: {
- struct cphy *phy = adapter->port[dev->if_port].phy;
- u32 val;
-
- if (!phy->mdio_read)
- return -EOPNOTSUPP;
- phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
- &val);
- data->val_out = val;
- break;
- }
- case SIOCSMIIREG: {
- struct cphy *phy = adapter->port[dev->if_port].phy;
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- if (!phy->mdio_write)
- return -EOPNOTSUPP;
- phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
- data->val_in);
- break;
- }
+ struct mdio_if_info *mdio = &adapter->port[dev->if_port].phy->mdio;
- default:
- return -EOPNOTSUPP;
- }
- return 0;
+ return mdio_mii_ioctl(mdio, if_mii(req), cmd);
}
static int t1_change_mtu(struct net_device *dev, int new_mtu)
diff --git a/drivers/net/chelsio/mv88e1xxx.c b/drivers/net/chelsio/mv88e1xxx.c
index 0632be0d649..809047a99e9 100644
--- a/drivers/net/chelsio/mv88e1xxx.c
+++ b/drivers/net/chelsio/mv88e1xxx.c
@@ -353,15 +353,16 @@ static struct cphy_ops mv88e1xxx_ops = {
.get_link_status = mv88e1xxx_get_link_status,
};
-static struct cphy *mv88e1xxx_phy_create(adapter_t *adapter, int phy_addr,
+static struct cphy *mv88e1xxx_phy_create(struct net_device *dev, int phy_addr,
const struct mdio_ops *mdio_ops)
{
+ struct adapter *adapter = netdev_priv(dev);
struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL);
if (!cphy)
return NULL;
- cphy_init(cphy, adapter, phy_addr, &mv88e1xxx_ops, mdio_ops);
+ cphy_init(cphy, dev, phy_addr, &mv88e1xxx_ops, mdio_ops);
/* Configure particular PHY's to run in a different mode. */
if ((board_info(adapter)->caps & SUPPORTED_TP) &&
diff --git a/drivers/net/chelsio/mv88x201x.c b/drivers/net/chelsio/mv88x201x.c
index cd856041af3..f7136b2fd1e 100644
--- a/drivers/net/chelsio/mv88x201x.c
+++ b/drivers/net/chelsio/mv88x201x.c
@@ -53,7 +53,7 @@ static int led_init(struct cphy *cphy)
* Writing these bits maps control to another
* register. mmd(0x1) addr(0x7)
*/
- mdio_write(cphy, 0x3, 0x8304, 0xdddd);
+ cphy_mdio_write(cphy, MDIO_MMD_PCS, 0x8304, 0xdddd);
return 0;
}
@@ -62,14 +62,14 @@ static int led_link(struct cphy *cphy, u32 do_enable)
u32 led = 0;
#define LINK_ENABLE_BIT 0x1
- mdio_read(cphy, 0x1, 0x7, &led);
+ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_CTRL2, &led);
if (do_enable & LINK_ENABLE_BIT) {
led |= LINK_ENABLE_BIT;
- mdio_write(cphy, 0x1, 0x7, led);
+ cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_CTRL2, led);
} else {
led &= ~LINK_ENABLE_BIT;
- mdio_write(cphy, 0x1, 0x7, led);
+ cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_CTRL2, led);
}
return 0;
}
@@ -86,7 +86,8 @@ static int mv88x201x_reset(struct cphy *cphy, int wait)
static int mv88x201x_interrupt_enable(struct cphy *cphy)
{
/* Enable PHY LASI interrupts. */
- mdio_write(cphy, 0x1, 0x9002, 0x1);
+ cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
+ MDIO_PMA_LASI_LSALARM);
/* Enable Marvell interrupts through Elmer0. */
if (t1_is_asic(cphy->adapter)) {
@@ -102,7 +103,7 @@ static int mv88x201x_interrupt_enable(struct cphy *cphy)
static int mv88x201x_interrupt_disable(struct cphy *cphy)
{
/* Disable PHY LASI interrupts. */
- mdio_write(cphy, 0x1, 0x9002, 0x0);
+ cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0x0);
/* Disable Marvell interrupts through Elmer0. */
if (t1_is_asic(cphy->adapter)) {
@@ -122,25 +123,25 @@ static int mv88x201x_interrupt_clear(struct cphy *cphy)
#ifdef MV88x2010_LINK_STATUS_BUGS
/* Required to read twice before clear takes affect. */
- mdio_read(cphy, 0x1, 0x9003, &val);
- mdio_read(cphy, 0x1, 0x9004, &val);
- mdio_read(cphy, 0x1, 0x9005, &val);
+ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_RXSTAT, &val);
+ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_TXSTAT, &val);
+ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val);
/* Read this register after the others above it else
* the register doesn't clear correctly.
*/
- mdio_read(cphy, 0x1, 0x1, &val);
+ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val);
#endif
/* Clear link status. */
- mdio_read(cphy, 0x1, 0x1, &val);
+ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val);
/* Clear PHY LASI interrupts. */
- mdio_read(cphy, 0x1, 0x9005, &val);
+ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val);
#ifdef MV88x2010_LINK_STATUS_BUGS
/* Do it again. */
- mdio_read(cphy, 0x1, 0x9003, &val);
- mdio_read(cphy, 0x1, 0x9004, &val);
+ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_RXSTAT, &val);
+ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_TXSTAT, &val);
#endif
/* Clear Marvell interrupts through Elmer0. */
@@ -172,13 +173,12 @@ static int mv88x201x_get_link_status(struct cphy *cphy, int *link_ok,
int *speed, int *duplex, int *fc)
{
u32 val = 0;
-#define LINK_STATUS_BIT 0x4
if (link_ok) {
/* Read link status. */
- mdio_read(cphy, 0x1, 0x1, &val);
- val &= LINK_STATUS_BIT;
- *link_ok = (val == LINK_STATUS_BIT);
+ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val);
+ val &= MDIO_STAT1_LSTATUS;
+ *link_ok = (val == MDIO_STAT1_LSTATUS);
/* Turn on/off Link LED */
led_link(cphy, *link_ok);
}
@@ -205,9 +205,11 @@ static struct cphy_ops mv88x201x_ops = {
.interrupt_handler = mv88x201x_interrupt_handler,
.get_link_status = mv88x201x_get_link_status,
.set_loopback = mv88x201x_set_loopback,
+ .mmds = (MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS |
+ MDIO_DEVS_PHYXS | MDIO_DEVS_WIS),
};
-static struct cphy *mv88x201x_phy_create(adapter_t *adapter, int phy_addr,
+static struct cphy *mv88x201x_phy_create(struct net_device *dev, int phy_addr,
const struct mdio_ops *mdio_ops)
{
u32 val;
@@ -216,15 +218,15 @@ static struct cphy *mv88x201x_phy_create(adapter_t *adapter, int phy_addr,
if (!cphy)
return NULL;
- cphy_init(cphy, adapter, phy_addr, &mv88x201x_ops, mdio_ops);
+ cphy_init(cphy, dev, phy_addr, &mv88x201x_ops, mdio_ops);
/* Commands the PHY to enable XFP's clock. */
- mdio_read(cphy, 0x3, 0x8300, &val);
- mdio_write(cphy, 0x3, 0x8300, val | 1);
+ cphy_mdio_read(cphy, MDIO_MMD_PCS, 0x8300, &val);
+ cphy_mdio_write(cphy, MDIO_MMD_PCS, 0x8300, val | 1);
/* Clear link status. Required because of a bug in the PHY. */
- mdio_read(cphy, 0x1, 0x8, &val);
- mdio_read(cphy, 0x3, 0x8, &val);
+ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT2, &val);
+ cphy_mdio_read(cphy, MDIO_MMD_PCS, MDIO_STAT2, &val);
/* Allows for Link,Ack LED turn on/off */
led_init(cphy);
diff --git a/drivers/net/chelsio/my3126.c b/drivers/net/chelsio/my3126.c
index 040acd29995..4c6028512d1 100644
--- a/drivers/net/chelsio/my3126.c
+++ b/drivers/net/chelsio/my3126.c
@@ -43,11 +43,11 @@ static int my3126_interrupt_handler(struct cphy *cphy)
adapter = cphy->adapter;
if (cphy->count == 50) {
- mdio_read(cphy, 0x1, 0x1, &val);
+ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val);
val16 = (u16) val;
status = cphy->bmsr ^ val16;
- if (status & BMSR_LSTATUS)
+ if (status & MDIO_STAT1_LSTATUS)
t1_link_changed(adapter, 0);
cphy->bmsr = val16;
@@ -114,14 +114,14 @@ static int my3126_get_link_status(struct cphy *cphy,
adapter_t *adapter;
adapter = cphy->adapter;
- mdio_read(cphy, 0x1, 0x1, &val);
+ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val);
val16 = (u16) val;
/* Populate elmer_gpo with the register value */
t1_tpi_read(adapter, A_ELMER0_GPO, &val);
cphy->elmer_gpo = val;
- *link_ok = (val16 & BMSR_LSTATUS);
+ *link_ok = (val16 & MDIO_STAT1_LSTATUS);
if (*link_ok) {
/* Turn on the LED. */
@@ -163,9 +163,11 @@ static struct cphy_ops my3126_ops = {
.interrupt_handler = my3126_interrupt_handler,
.get_link_status = my3126_get_link_status,
.set_loopback = my3126_set_loopback,
+ .mmds = (MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS |
+ MDIO_DEVS_PHYXS),
};
-static struct cphy *my3126_phy_create(adapter_t *adapter,
+static struct cphy *my3126_phy_create(struct net_device *dev,
int phy_addr, const struct mdio_ops *mdio_ops)
{
struct cphy *cphy = kzalloc(sizeof (*cphy), GFP_KERNEL);
@@ -173,7 +175,7 @@ static struct cphy *my3126_phy_create(adapter_t *adapter,
if (!cphy)
return NULL;
- cphy_init(cphy, adapter, phy_addr, &my3126_ops, mdio_ops);
+ cphy_init(cphy, dev, phy_addr, &my3126_ops, mdio_ops);
INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll);
cphy->bmsr = 0;
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 58f6fc055f6..3711d64e45e 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1149,8 +1149,8 @@ static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping,
unsigned int len, unsigned int gen,
unsigned int eop)
{
- if (unlikely(len > SGE_TX_DESC_MAX_PLEN))
- BUG();
+ BUG_ON(len > SGE_TX_DESC_MAX_PLEN);
+
e->addr_lo = (u32)mapping;
e->addr_hi = (u64)mapping >> 32;
e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen);
@@ -1879,7 +1879,6 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
cpl->vlan_valid = 0;
send:
- dev->trans_start = jiffies;
ret = t1_sge_tx(skb, adapter, 0, dev);
/* If transmit busy, and we reallocated skb's due to headroom limit,
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
index 7adf30230c4..17720c6e5bf 100644
--- a/drivers/net/chelsio/subr.c
+++ b/drivers/net/chelsio/subr.c
@@ -284,32 +284,29 @@ static void mi1_mdio_init(adapter_t *adapter, const struct board_info *bi)
/*
* Elmer MI1 MDIO read/write operations.
*/
-static int mi1_mdio_read(adapter_t *adapter, int phy_addr, int mmd_addr,
- int reg_addr, unsigned int *valp)
+static int mi1_mdio_read(struct net_device *dev, int phy_addr, int mmd_addr,
+ u16 reg_addr)
{
+ struct adapter *adapter = dev->ml_priv;
u32 addr = V_MI1_REG_ADDR(reg_addr) | V_MI1_PHY_ADDR(phy_addr);
-
- if (mmd_addr)
- return -EINVAL;
+ unsigned int val;
spin_lock(&adapter->tpi_lock);
__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
__t1_tpi_write(adapter,
A_ELMER0_PORT0_MI1_OP, MI1_OP_DIRECT_READ);
mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
- __t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, valp);
+ __t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, &val);
spin_unlock(&adapter->tpi_lock);
- return 0;
+ return val;
}
-static int mi1_mdio_write(adapter_t *adapter, int phy_addr, int mmd_addr,
- int reg_addr, unsigned int val)
+static int mi1_mdio_write(struct net_device *dev, int phy_addr, int mmd_addr,
+ u16 reg_addr, u16 val)
{
+ struct adapter *adapter = dev->ml_priv;
u32 addr = V_MI1_REG_ADDR(reg_addr) | V_MI1_PHY_ADDR(phy_addr);
- if (mmd_addr)
- return -EINVAL;
-
spin_lock(&adapter->tpi_lock);
__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val);
@@ -324,16 +321,19 @@ static int mi1_mdio_write(adapter_t *adapter, int phy_addr, int mmd_addr,
static const struct mdio_ops mi1_mdio_ops = {
.init = mi1_mdio_init,
.read = mi1_mdio_read,
- .write = mi1_mdio_write
+ .write = mi1_mdio_write,
+ .mode_support = MDIO_SUPPORTS_C22
};
#endif
#endif
-static int mi1_mdio_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
- int reg_addr, unsigned int *valp)
+static int mi1_mdio_ext_read(struct net_device *dev, int phy_addr, int mmd_addr,
+ u16 reg_addr)
{
+ struct adapter *adapter = dev->ml_priv;
u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr);
+ unsigned int val;
spin_lock(&adapter->tpi_lock);
@@ -350,14 +350,15 @@ static int mi1_mdio_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
/* Read the data. */
- __t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, valp);
+ __t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, &val);
spin_unlock(&adapter->tpi_lock);
- return 0;
+ return val;
}
-static int mi1_mdio_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
- int reg_addr, unsigned int val)
+static int mi1_mdio_ext_write(struct net_device *dev, int phy_addr,
+ int mmd_addr, u16 reg_addr, u16 val)
{
+ struct adapter *adapter = dev->ml_priv;
u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr);
spin_lock(&adapter->tpi_lock);
@@ -380,7 +381,8 @@ static int mi1_mdio_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
static const struct mdio_ops mi1_mdio_ext_ops = {
.init = mi1_mdio_init,
.read = mi1_mdio_ext_read,
- .write = mi1_mdio_ext_write
+ .write = mi1_mdio_ext_write,
+ .mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22
};
enum {
@@ -1133,8 +1135,8 @@ int __devinit t1_init_sw_modules(adapter_t *adapter,
struct cmac *mac;
int phy_addr = bi->mdio_phybaseaddr + i;
- adapter->port[i].phy = bi->gphy->create(adapter, phy_addr,
- bi->mdio_ops);
+ adapter->port[i].phy = bi->gphy->create(adapter->port[i].dev,
+ phy_addr, bi->mdio_ops);
if (!adapter->port[i].phy) {
CH_ERR("%s: PHY %d initialization failed\n",
adapter->name, i);
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
new file mode 100644
index 00000000000..44f77eb1180
--- /dev/null
+++ b/drivers/net/cnic.c
@@ -0,0 +1,2717 @@
+/* cnic.c: Broadcom CNIC core network driver.
+ *
+ * Copyright (c) 2006-2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
+ * Modified and maintained by: Michael Chan <mchan@broadcom.com>
+ */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/uio_driver.h>
+#include <linux/in.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/module.h>
+
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define BCM_VLAN 1
+#endif
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <net/route.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <scsi/iscsi_if.h>
+
+#include "cnic_if.h"
+#include "bnx2.h"
+#include "cnic.h"
+#include "cnic_defs.h"
+
+#define DRV_MODULE_NAME "cnic"
+#define PFX DRV_MODULE_NAME ": "
+
+static char version[] __devinitdata =
+ "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
+ "Chen (zongxi@broadcom.com");
+MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(CNIC_MODULE_VERSION);
+
+static LIST_HEAD(cnic_dev_list);
+static DEFINE_RWLOCK(cnic_dev_lock);
+static DEFINE_MUTEX(cnic_lock);
+
+static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
+
+static int cnic_service_bnx2(void *, void *);
+static int cnic_ctl(void *, struct cnic_ctl_info *);
+
+static struct cnic_ops cnic_bnx2_ops = {
+ .cnic_owner = THIS_MODULE,
+ .cnic_handler = cnic_service_bnx2,
+ .cnic_ctl = cnic_ctl,
+};
+
+static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *);
+static void cnic_init_bnx2_tx_ring(struct cnic_dev *);
+static void cnic_init_bnx2_rx_ring(struct cnic_dev *);
+static int cnic_cm_set_pg(struct cnic_sock *);
+
+static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
+{
+ struct cnic_dev *dev = uinfo->priv;
+ struct cnic_local *cp = dev->cnic_priv;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (cp->uio_dev != -1)
+ return -EBUSY;
+
+ cp->uio_dev = iminor(inode);
+
+ cnic_shutdown_bnx2_rx_ring(dev);
+
+ cnic_init_bnx2_tx_ring(dev);
+ cnic_init_bnx2_rx_ring(dev);
+
+ return 0;
+}
+
+static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
+{
+ struct cnic_dev *dev = uinfo->priv;
+ struct cnic_local *cp = dev->cnic_priv;
+
+ cp->uio_dev = -1;
+ return 0;
+}
+
+static inline void cnic_hold(struct cnic_dev *dev)
+{
+ atomic_inc(&dev->ref_count);
+}
+
+static inline void cnic_put(struct cnic_dev *dev)
+{
+ atomic_dec(&dev->ref_count);
+}
+
+static inline void csk_hold(struct cnic_sock *csk)
+{
+ atomic_inc(&csk->ref_count);
+}
+
+static inline void csk_put(struct cnic_sock *csk)
+{
+ atomic_dec(&csk->ref_count);
+}
+
+static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
+{
+ struct cnic_dev *cdev;
+
+ read_lock(&cnic_dev_lock);
+ list_for_each_entry(cdev, &cnic_dev_list, list) {
+ if (netdev == cdev->netdev) {
+ cnic_hold(cdev);
+ read_unlock(&cnic_dev_lock);
+ return cdev;
+ }
+ }
+ read_unlock(&cnic_dev_lock);
+ return NULL;
+}
+
+static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+ struct drv_ctl_info info;
+ struct drv_ctl_io *io = &info.data.io;
+
+ info.cmd = DRV_CTL_CTX_WR_CMD;
+ io->cid_addr = cid_addr;
+ io->offset = off;
+ io->data = val;
+ ethdev->drv_ctl(dev->netdev, &info);
+}
+
+static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+ struct drv_ctl_info info;
+ struct drv_ctl_io *io = &info.data.io;
+
+ info.cmd = DRV_CTL_IO_WR_CMD;
+ io->offset = off;
+ io->data = val;
+ ethdev->drv_ctl(dev->netdev, &info);
+}
+
+static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+ struct drv_ctl_info info;
+ struct drv_ctl_io *io = &info.data.io;
+
+ info.cmd = DRV_CTL_IO_RD_CMD;
+ io->offset = off;
+ ethdev->drv_ctl(dev->netdev, &info);
+ return io->data;
+}
+
+static int cnic_in_use(struct cnic_sock *csk)
+{
+ return test_bit(SK_F_INUSE, &csk->flags);
+}
+
+static void cnic_kwq_completion(struct cnic_dev *dev, u32 count)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+ struct drv_ctl_info info;
+
+ info.cmd = DRV_CTL_COMPLETION_CMD;
+ info.data.comp.comp_count = count;
+ ethdev->drv_ctl(dev->netdev, &info);
+}
+
+static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
+ struct cnic_sock *csk)
+{
+ struct iscsi_path path_req;
+ char *buf = NULL;
+ u16 len = 0;
+ u32 msg_type = ISCSI_KEVENT_IF_DOWN;
+ struct cnic_ulp_ops *ulp_ops;
+
+ if (cp->uio_dev == -1)
+ return -ENODEV;
+
+ if (csk) {
+ len = sizeof(path_req);
+ buf = (char *) &path_req;
+ memset(&path_req, 0, len);
+
+ msg_type = ISCSI_KEVENT_PATH_REQ;
+ path_req.handle = (u64) csk->l5_cid;
+ if (test_bit(SK_F_IPV6, &csk->flags)) {
+ memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
+ sizeof(struct in6_addr));
+ path_req.ip_addr_len = 16;
+ } else {
+ memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
+ sizeof(struct in_addr));
+ path_req.ip_addr_len = 4;
+ }
+ path_req.vlan_id = csk->vlan_id;
+ path_req.pmtu = csk->mtu;
+ }
+
+ rcu_read_lock();
+ ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);
+ if (ulp_ops)
+ ulp_ops->iscsi_nl_send_msg(cp->dev, msg_type, buf, len);
+ rcu_read_unlock();
+ return 0;
+}
+
+static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
+ char *buf, u16 len)
+{
+ int rc = -EINVAL;
+
+ switch (msg_type) {
+ case ISCSI_UEVENT_PATH_UPDATE: {
+ struct cnic_local *cp;
+ u32 l5_cid;
+ struct cnic_sock *csk;
+ struct iscsi_path *path_resp;
+
+ if (len < sizeof(*path_resp))
+ break;
+
+ path_resp = (struct iscsi_path *) buf;
+ cp = dev->cnic_priv;
+ l5_cid = (u32) path_resp->handle;
+ if (l5_cid >= MAX_CM_SK_TBL_SZ)
+ break;
+
+ csk = &cp->csk_tbl[l5_cid];
+ csk_hold(csk);
+ if (cnic_in_use(csk)) {
+ memcpy(csk->ha, path_resp->mac_addr, 6);
+ if (test_bit(SK_F_IPV6, &csk->flags))
+ memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
+ sizeof(struct in6_addr));
+ else
+ memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
+ sizeof(struct in_addr));
+ if (is_valid_ether_addr(csk->ha))
+ cnic_cm_set_pg(csk);
+ }
+ csk_put(csk);
+ rc = 0;
+ }
+ }
+
+ return rc;
+}
+
+static int cnic_offld_prep(struct cnic_sock *csk)
+{
+ if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
+ return 0;
+
+ if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
+ clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+ return 0;
+ }
+
+ return 1;
+}
+
+static int cnic_close_prep(struct cnic_sock *csk)
+{
+ clear_bit(SK_F_CONNECT_START, &csk->flags);
+ smp_mb__after_clear_bit();
+
+ if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
+ while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
+ msleep(1);
+
+ return 1;
+ }
+ return 0;
+}
+
+static int cnic_abort_prep(struct cnic_sock *csk)
+{
+ clear_bit(SK_F_CONNECT_START, &csk->flags);
+ smp_mb__after_clear_bit();
+
+ while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
+ msleep(1);
+
+ if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
+ csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
+ return 1;
+ }
+
+ return 0;
+}
+
+int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
+{
+ struct cnic_dev *dev;
+
+ if (ulp_type >= MAX_CNIC_ULP_TYPE) {
+ printk(KERN_ERR PFX "cnic_register_driver: Bad type %d\n",
+ ulp_type);
+ return -EINVAL;
+ }
+ mutex_lock(&cnic_lock);
+ if (cnic_ulp_tbl[ulp_type]) {
+ printk(KERN_ERR PFX "cnic_register_driver: Type %d has already "
+ "been registered\n", ulp_type);
+ mutex_unlock(&cnic_lock);
+ return -EBUSY;
+ }
+
+ read_lock(&cnic_dev_lock);
+ list_for_each_entry(dev, &cnic_dev_list, list) {
+ struct cnic_local *cp = dev->cnic_priv;
+
+ clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
+ }
+ read_unlock(&cnic_dev_lock);
+
+ rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
+ mutex_unlock(&cnic_lock);
+
+ /* Prevent race conditions with netdev_event */
+ rtnl_lock();
+ read_lock(&cnic_dev_lock);
+ list_for_each_entry(dev, &cnic_dev_list, list) {
+ struct cnic_local *cp = dev->cnic_priv;
+
+ if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
+ ulp_ops->cnic_init(dev);
+ }
+ read_unlock(&cnic_dev_lock);
+ rtnl_unlock();
+
+ return 0;
+}
+
+int cnic_unregister_driver(int ulp_type)
+{
+ struct cnic_dev *dev;
+
+ if (ulp_type >= MAX_CNIC_ULP_TYPE) {
+ printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n",
+ ulp_type);
+ return -EINVAL;
+ }
+ mutex_lock(&cnic_lock);
+ if (!cnic_ulp_tbl[ulp_type]) {
+ printk(KERN_ERR PFX "cnic_unregister_driver: Type %d has not "
+ "been registered\n", ulp_type);
+ goto out_unlock;
+ }
+ read_lock(&cnic_dev_lock);
+ list_for_each_entry(dev, &cnic_dev_list, list) {
+ struct cnic_local *cp = dev->cnic_priv;
+
+ if (rcu_dereference(cp->ulp_ops[ulp_type])) {
+ printk(KERN_ERR PFX "cnic_unregister_driver: Type %d "
+ "still has devices registered\n", ulp_type);
+ read_unlock(&cnic_dev_lock);
+ goto out_unlock;
+ }
+ }
+ read_unlock(&cnic_dev_lock);
+
+ rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
+
+ mutex_unlock(&cnic_lock);
+ synchronize_rcu();
+ return 0;
+
+out_unlock:
+ mutex_unlock(&cnic_lock);
+ return -EINVAL;
+}
+
+static int cnic_start_hw(struct cnic_dev *);
+static void cnic_stop_hw(struct cnic_dev *);
+
+static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
+ void *ulp_ctx)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_ulp_ops *ulp_ops;
+
+ if (ulp_type >= MAX_CNIC_ULP_TYPE) {
+ printk(KERN_ERR PFX "cnic_register_device: Bad type %d\n",
+ ulp_type);
+ return -EINVAL;
+ }
+ mutex_lock(&cnic_lock);
+ if (cnic_ulp_tbl[ulp_type] == NULL) {
+ printk(KERN_ERR PFX "cnic_register_device: Driver with type %d "
+ "has not been registered\n", ulp_type);
+ mutex_unlock(&cnic_lock);
+ return -EAGAIN;
+ }
+ if (rcu_dereference(cp->ulp_ops[ulp_type])) {
+ printk(KERN_ERR PFX "cnic_register_device: Type %d has already "
+ "been registered to this device\n", ulp_type);
+ mutex_unlock(&cnic_lock);
+ return -EBUSY;
+ }
+
+ clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
+ cp->ulp_handle[ulp_type] = ulp_ctx;
+ ulp_ops = cnic_ulp_tbl[ulp_type];
+ rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
+ cnic_hold(dev);
+
+ if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
+ if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
+ ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
+
+ mutex_unlock(&cnic_lock);
+
+ return 0;
+
+}
+EXPORT_SYMBOL(cnic_register_driver);
+
+static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+
+ if (ulp_type >= MAX_CNIC_ULP_TYPE) {
+ printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n",
+ ulp_type);
+ return -EINVAL;
+ }
+ mutex_lock(&cnic_lock);
+ if (rcu_dereference(cp->ulp_ops[ulp_type])) {
+ rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
+ cnic_put(dev);
+ } else {
+ printk(KERN_ERR PFX "cnic_unregister_device: device not "
+ "registered to this ulp type %d\n", ulp_type);
+ mutex_unlock(&cnic_lock);
+ return -EINVAL;
+ }
+ mutex_unlock(&cnic_lock);
+
+ synchronize_rcu();
+
+ return 0;
+}
+EXPORT_SYMBOL(cnic_unregister_driver);
+
+static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id)
+{
+ id_tbl->start = start_id;
+ id_tbl->max = size;
+ id_tbl->next = 0;
+ spin_lock_init(&id_tbl->lock);
+ id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
+ if (!id_tbl->table)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
+{
+ kfree(id_tbl->table);
+ id_tbl->table = NULL;
+}
+
+static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
+{
+ int ret = -1;
+
+ id -= id_tbl->start;
+ if (id >= id_tbl->max)
+ return ret;
+
+ spin_lock(&id_tbl->lock);
+ if (!test_bit(id, id_tbl->table)) {
+ set_bit(id, id_tbl->table);
+ ret = 0;
+ }
+ spin_unlock(&id_tbl->lock);
+ return ret;
+}
+
+/* Returns -1 if not successful */
+static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
+{
+ u32 id;
+
+ spin_lock(&id_tbl->lock);
+ id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
+ if (id >= id_tbl->max) {
+ id = -1;
+ if (id_tbl->next != 0) {
+ id = find_first_zero_bit(id_tbl->table, id_tbl->next);
+ if (id >= id_tbl->next)
+ id = -1;
+ }
+ }
+
+ if (id < id_tbl->max) {
+ set_bit(id, id_tbl->table);
+ id_tbl->next = (id + 1) & (id_tbl->max - 1);
+ id += id_tbl->start;
+ }
+
+ spin_unlock(&id_tbl->lock);
+
+ return id;
+}
+
+static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
+{
+ if (id == -1)
+ return;
+
+ id -= id_tbl->start;
+ if (id >= id_tbl->max)
+ return;
+
+ clear_bit(id, id_tbl->table);
+}
+
+static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
+{
+ int i;
+
+ if (!dma->pg_arr)
+ return;
+
+ for (i = 0; i < dma->num_pages; i++) {
+ if (dma->pg_arr[i]) {
+ pci_free_consistent(dev->pcidev, BCM_PAGE_SIZE,
+ dma->pg_arr[i], dma->pg_map_arr[i]);
+ dma->pg_arr[i] = NULL;
+ }
+ }
+ if (dma->pgtbl) {
+ pci_free_consistent(dev->pcidev, dma->pgtbl_size,
+ dma->pgtbl, dma->pgtbl_map);
+ dma->pgtbl = NULL;
+ }
+ kfree(dma->pg_arr);
+ dma->pg_arr = NULL;
+ dma->num_pages = 0;
+}
+
+static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
+{
+ int i;
+ u32 *page_table = dma->pgtbl;
+
+ for (i = 0; i < dma->num_pages; i++) {
+ /* Each entry needs to be in big endian format. */
+ *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
+ page_table++;
+ *page_table = (u32) dma->pg_map_arr[i];
+ page_table++;
+ }
+}
+
+static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
+ int pages, int use_pg_tbl)
+{
+ int i, size;
+ struct cnic_local *cp = dev->cnic_priv;
+
+ size = pages * (sizeof(void *) + sizeof(dma_addr_t));
+ dma->pg_arr = kzalloc(size, GFP_ATOMIC);
+ if (dma->pg_arr == NULL)
+ return -ENOMEM;
+
+ dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
+ dma->num_pages = pages;
+
+ for (i = 0; i < pages; i++) {
+ dma->pg_arr[i] = pci_alloc_consistent(dev->pcidev,
+ BCM_PAGE_SIZE,
+ &dma->pg_map_arr[i]);
+ if (dma->pg_arr[i] == NULL)
+ goto error;
+ }
+ if (!use_pg_tbl)
+ return 0;
+
+ dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) &
+ ~(BCM_PAGE_SIZE - 1);
+ dma->pgtbl = pci_alloc_consistent(dev->pcidev, dma->pgtbl_size,
+ &dma->pgtbl_map);
+ if (dma->pgtbl == NULL)
+ goto error;
+
+ cp->setup_pgtbl(dev, dma);
+
+ return 0;
+
+error:
+ cnic_free_dma(dev, dma);
+ return -ENOMEM;
+}
+
+static void cnic_free_resc(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ int i = 0;
+
+ if (cp->cnic_uinfo) {
+ cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
+ while (cp->uio_dev != -1 && i < 15) {
+ msleep(100);
+ i++;
+ }
+ uio_unregister_device(cp->cnic_uinfo);
+ kfree(cp->cnic_uinfo);
+ cp->cnic_uinfo = NULL;
+ }
+
+ if (cp->l2_buf) {
+ pci_free_consistent(dev->pcidev, cp->l2_buf_size,
+ cp->l2_buf, cp->l2_buf_map);
+ cp->l2_buf = NULL;
+ }
+
+ if (cp->l2_ring) {
+ pci_free_consistent(dev->pcidev, cp->l2_ring_size,
+ cp->l2_ring, cp->l2_ring_map);
+ cp->l2_ring = NULL;
+ }
+
+ for (i = 0; i < cp->ctx_blks; i++) {
+ if (cp->ctx_arr[i].ctx) {
+ pci_free_consistent(dev->pcidev, cp->ctx_blk_size,
+ cp->ctx_arr[i].ctx,
+ cp->ctx_arr[i].mapping);
+ cp->ctx_arr[i].ctx = NULL;
+ }
+ }
+ kfree(cp->ctx_arr);
+ cp->ctx_arr = NULL;
+ cp->ctx_blks = 0;
+
+ cnic_free_dma(dev, &cp->gbl_buf_info);
+ cnic_free_dma(dev, &cp->conn_buf_info);
+ cnic_free_dma(dev, &cp->kwq_info);
+ cnic_free_dma(dev, &cp->kcq_info);
+ kfree(cp->iscsi_tbl);
+ cp->iscsi_tbl = NULL;
+ kfree(cp->ctx_tbl);
+ cp->ctx_tbl = NULL;
+
+ cnic_free_id_tbl(&cp->cid_tbl);
+}
+
+static int cnic_alloc_context(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+
+ if (CHIP_NUM(cp) == CHIP_NUM_5709) {
+ int i, k, arr_size;
+
+ cp->ctx_blk_size = BCM_PAGE_SIZE;
+ cp->cids_per_blk = BCM_PAGE_SIZE / 128;
+ arr_size = BNX2_MAX_CID / cp->cids_per_blk *
+ sizeof(struct cnic_ctx);
+ cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
+ if (cp->ctx_arr == NULL)
+ return -ENOMEM;
+
+ k = 0;
+ for (i = 0; i < 2; i++) {
+ u32 j, reg, off, lo, hi;
+
+ if (i == 0)
+ off = BNX2_PG_CTX_MAP;
+ else
+ off = BNX2_ISCSI_CTX_MAP;
+
+ reg = cnic_reg_rd_ind(dev, off);
+ lo = reg >> 16;
+ hi = reg & 0xffff;
+ for (j = lo; j < hi; j += cp->cids_per_blk, k++)
+ cp->ctx_arr[k].cid = j;
+ }
+
+ cp->ctx_blks = k;
+ if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
+ cp->ctx_blks = 0;
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < cp->ctx_blks; i++) {
+ cp->ctx_arr[i].ctx =
+ pci_alloc_consistent(dev->pcidev, BCM_PAGE_SIZE,
+ &cp->ctx_arr[i].mapping);
+ if (cp->ctx_arr[i].ctx == NULL)
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct uio_info *uinfo;
+ int ret;
+
+ ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
+ if (ret)
+ goto error;
+ cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
+
+ ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 1);
+ if (ret)
+ goto error;
+ cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr;
+
+ ret = cnic_alloc_context(dev);
+ if (ret)
+ goto error;
+
+ cp->l2_ring_size = 2 * BCM_PAGE_SIZE;
+ cp->l2_ring = pci_alloc_consistent(dev->pcidev, cp->l2_ring_size,
+ &cp->l2_ring_map);
+ if (!cp->l2_ring)
+ goto error;
+
+ cp->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
+ cp->l2_buf_size = PAGE_ALIGN(cp->l2_buf_size);
+ cp->l2_buf = pci_alloc_consistent(dev->pcidev, cp->l2_buf_size,
+ &cp->l2_buf_map);
+ if (!cp->l2_buf)
+ goto error;
+
+ uinfo = kzalloc(sizeof(*uinfo), GFP_ATOMIC);
+ if (!uinfo)
+ goto error;
+
+ uinfo->mem[0].addr = dev->netdev->base_addr;
+ uinfo->mem[0].internal_addr = dev->regview;
+ uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
+ uinfo->mem[0].memtype = UIO_MEM_PHYS;
+
+ uinfo->mem[1].addr = (unsigned long) cp->status_blk & PAGE_MASK;
+ if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
+ uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
+ else
+ uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
+ uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
+
+ uinfo->mem[2].addr = (unsigned long) cp->l2_ring;
+ uinfo->mem[2].size = cp->l2_ring_size;
+ uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
+
+ uinfo->mem[3].addr = (unsigned long) cp->l2_buf;
+ uinfo->mem[3].size = cp->l2_buf_size;
+ uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
+
+ uinfo->name = "bnx2_cnic";
+ uinfo->version = CNIC_MODULE_VERSION;
+ uinfo->irq = UIO_IRQ_CUSTOM;
+
+ uinfo->open = cnic_uio_open;
+ uinfo->release = cnic_uio_close;
+
+ uinfo->priv = dev;
+
+ ret = uio_register_device(&dev->pcidev->dev, uinfo);
+ if (ret) {
+ kfree(uinfo);
+ goto error;
+ }
+
+ cp->cnic_uinfo = uinfo;
+
+ return 0;
+
+error:
+ cnic_free_resc(dev);
+ return ret;
+}
+
+static inline u32 cnic_kwq_avail(struct cnic_local *cp)
+{
+ return cp->max_kwq_idx -
+ ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
+}
+
+static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
+ u32 num_wqes)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct kwqe *prod_qe;
+ u16 prod, sw_prod, i;
+
+ if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
+ return -EAGAIN; /* bnx2 is down */
+
+ spin_lock_bh(&cp->cnic_ulp_lock);
+ if (num_wqes > cnic_kwq_avail(cp) &&
+ !(cp->cnic_local_flags & CNIC_LCL_FL_KWQ_INIT)) {
+ spin_unlock_bh(&cp->cnic_ulp_lock);
+ return -EAGAIN;
+ }
+
+ cp->cnic_local_flags &= ~CNIC_LCL_FL_KWQ_INIT;
+
+ prod = cp->kwq_prod_idx;
+ sw_prod = prod & MAX_KWQ_IDX;
+ for (i = 0; i < num_wqes; i++) {
+ prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
+ memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
+ prod++;
+ sw_prod = prod & MAX_KWQ_IDX;
+ }
+ cp->kwq_prod_idx = prod;
+
+ CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
+
+ spin_unlock_bh(&cp->cnic_ulp_lock);
+ return 0;
+}
+
+static void service_kcqes(struct cnic_dev *dev, int num_cqes)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ int i, j;
+
+ i = 0;
+ j = 1;
+ while (num_cqes) {
+ struct cnic_ulp_ops *ulp_ops;
+ int ulp_type;
+ u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
+ u32 kcqe_layer = kcqe_op_flag & KCQE_FLAGS_LAYER_MASK;
+
+ if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
+ cnic_kwq_completion(dev, 1);
+
+ while (j < num_cqes) {
+ u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
+
+ if ((next_op & KCQE_FLAGS_LAYER_MASK) != kcqe_layer)
+ break;
+
+ if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
+ cnic_kwq_completion(dev, 1);
+ j++;
+ }
+
+ if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
+ ulp_type = CNIC_ULP_RDMA;
+ else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
+ ulp_type = CNIC_ULP_ISCSI;
+ else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
+ ulp_type = CNIC_ULP_L4;
+ else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
+ goto end;
+ else {
+ printk(KERN_ERR PFX "%s: Unknown type of KCQE(0x%x)\n",
+ dev->netdev->name, kcqe_op_flag);
+ goto end;
+ }
+
+ rcu_read_lock();
+ ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
+ if (likely(ulp_ops)) {
+ ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
+ cp->completed_kcq + i, j);
+ }
+ rcu_read_unlock();
+end:
+ num_cqes -= j;
+ i += j;
+ j = 1;
+ }
+ return;
+}
+
+static u16 cnic_bnx2_next_idx(u16 idx)
+{
+ return idx + 1;
+}
+
+static u16 cnic_bnx2_hw_idx(u16 idx)
+{
+ return idx;
+}
+
+static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ u16 i, ri, last;
+ struct kcqe *kcqe;
+ int kcqe_cnt = 0, last_cnt = 0;
+
+ i = ri = last = *sw_prod;
+ ri &= MAX_KCQ_IDX;
+
+ while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
+ kcqe = &cp->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
+ cp->completed_kcq[kcqe_cnt++] = kcqe;
+ i = cp->next_idx(i);
+ ri = i & MAX_KCQ_IDX;
+ if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
+ last_cnt = kcqe_cnt;
+ last = i;
+ }
+ }
+
+ *sw_prod = last;
+ return last_cnt;
+}
+
+static void cnic_chk_bnx2_pkt_rings(struct cnic_local *cp)
+{
+ u16 rx_cons = *cp->rx_cons_ptr;
+ u16 tx_cons = *cp->tx_cons_ptr;
+
+ if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
+ cp->tx_cons = tx_cons;
+ cp->rx_cons = rx_cons;
+ uio_event_notify(cp->cnic_uinfo);
+ }
+}
+
+static int cnic_service_bnx2(void *data, void *status_blk)
+{
+ struct cnic_dev *dev = data;
+ struct status_block *sblk = status_blk;
+ struct cnic_local *cp = dev->cnic_priv;
+ u32 status_idx = sblk->status_idx;
+ u16 hw_prod, sw_prod;
+ int kcqe_cnt;
+
+ if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
+ return status_idx;
+
+ cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
+
+ hw_prod = sblk->status_completion_producer_index;
+ sw_prod = cp->kcq_prod_idx;
+ while (sw_prod != hw_prod) {
+ kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
+ if (kcqe_cnt == 0)
+ goto done;
+
+ service_kcqes(dev, kcqe_cnt);
+
+ /* Tell compiler that status_blk fields can change. */
+ barrier();
+ if (status_idx != sblk->status_idx) {
+ status_idx = sblk->status_idx;
+ cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
+ hw_prod = sblk->status_completion_producer_index;
+ } else
+ break;
+ }
+
+done:
+ CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
+
+ cp->kcq_prod_idx = sw_prod;
+
+ cnic_chk_bnx2_pkt_rings(cp);
+ return status_idx;
+}
+
+static void cnic_service_bnx2_msix(unsigned long data)
+{
+ struct cnic_dev *dev = (struct cnic_dev *) data;
+ struct cnic_local *cp = dev->cnic_priv;
+ struct status_block_msix *status_blk = cp->bnx2_status_blk;
+ u32 status_idx = status_blk->status_idx;
+ u16 hw_prod, sw_prod;
+ int kcqe_cnt;
+
+ cp->kwq_con_idx = status_blk->status_cmd_consumer_index;
+
+ hw_prod = status_blk->status_completion_producer_index;
+ sw_prod = cp->kcq_prod_idx;
+ while (sw_prod != hw_prod) {
+ kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
+ if (kcqe_cnt == 0)
+ goto done;
+
+ service_kcqes(dev, kcqe_cnt);
+
+ /* Tell compiler that status_blk fields can change. */
+ barrier();
+ if (status_idx != status_blk->status_idx) {
+ status_idx = status_blk->status_idx;
+ cp->kwq_con_idx = status_blk->status_cmd_consumer_index;
+ hw_prod = status_blk->status_completion_producer_index;
+ } else
+ break;
+ }
+
+done:
+ CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
+ cp->kcq_prod_idx = sw_prod;
+
+ cnic_chk_bnx2_pkt_rings(cp);
+
+ cp->last_status_idx = status_idx;
+ CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
+ BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
+}
+
+static irqreturn_t cnic_irq(int irq, void *dev_instance)
+{
+ struct cnic_dev *dev = dev_instance;
+ struct cnic_local *cp = dev->cnic_priv;
+ u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX;
+
+ if (cp->ack_int)
+ cp->ack_int(dev);
+
+ prefetch(cp->status_blk);
+ prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
+
+ if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags)))
+ tasklet_schedule(&cp->cnic_irq_task);
+
+ return IRQ_HANDLED;
+}
+
+static void cnic_ulp_stop(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ int if_type;
+
+ rcu_read_lock();
+ for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
+ struct cnic_ulp_ops *ulp_ops;
+
+ ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
+ if (!ulp_ops)
+ continue;
+
+ if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
+ ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
+ }
+ rcu_read_unlock();
+}
+
+static void cnic_ulp_start(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ int if_type;
+
+ rcu_read_lock();
+ for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
+ struct cnic_ulp_ops *ulp_ops;
+
+ ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
+ if (!ulp_ops || !ulp_ops->cnic_start)
+ continue;
+
+ if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
+ ulp_ops->cnic_start(cp->ulp_handle[if_type]);
+ }
+ rcu_read_unlock();
+}
+
+static int cnic_ctl(void *data, struct cnic_ctl_info *info)
+{
+ struct cnic_dev *dev = data;
+
+ switch (info->cmd) {
+ case CNIC_CTL_STOP_CMD:
+ cnic_hold(dev);
+ mutex_lock(&cnic_lock);
+
+ cnic_ulp_stop(dev);
+ cnic_stop_hw(dev);
+
+ mutex_unlock(&cnic_lock);
+ cnic_put(dev);
+ break;
+ case CNIC_CTL_START_CMD:
+ cnic_hold(dev);
+ mutex_lock(&cnic_lock);
+
+ if (!cnic_start_hw(dev))
+ cnic_ulp_start(dev);
+
+ mutex_unlock(&cnic_lock);
+ cnic_put(dev);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void cnic_ulp_init(struct cnic_dev *dev)
+{
+ int i;
+ struct cnic_local *cp = dev->cnic_priv;
+
+ rcu_read_lock();
+ for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
+ struct cnic_ulp_ops *ulp_ops;
+
+ ulp_ops = rcu_dereference(cnic_ulp_tbl[i]);
+ if (!ulp_ops || !ulp_ops->cnic_init)
+ continue;
+
+ if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
+ ulp_ops->cnic_init(dev);
+
+ }
+ rcu_read_unlock();
+}
+
+static void cnic_ulp_exit(struct cnic_dev *dev)
+{
+ int i;
+ struct cnic_local *cp = dev->cnic_priv;
+
+ rcu_read_lock();
+ for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
+ struct cnic_ulp_ops *ulp_ops;
+
+ ulp_ops = rcu_dereference(cnic_ulp_tbl[i]);
+ if (!ulp_ops || !ulp_ops->cnic_exit)
+ continue;
+
+ if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
+ ulp_ops->cnic_exit(dev);
+
+ }
+ rcu_read_unlock();
+}
+
+static int cnic_cm_offload_pg(struct cnic_sock *csk)
+{
+ struct cnic_dev *dev = csk->dev;
+ struct l4_kwq_offload_pg *l4kwqe;
+ struct kwqe *wqes[1];
+
+ l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
+ memset(l4kwqe, 0, sizeof(*l4kwqe));
+ wqes[0] = (struct kwqe *) l4kwqe;
+
+ l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
+ l4kwqe->flags =
+ L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
+ l4kwqe->l2hdr_nbytes = ETH_HLEN;
+
+ l4kwqe->da0 = csk->ha[0];
+ l4kwqe->da1 = csk->ha[1];
+ l4kwqe->da2 = csk->ha[2];
+ l4kwqe->da3 = csk->ha[3];
+ l4kwqe->da4 = csk->ha[4];
+ l4kwqe->da5 = csk->ha[5];
+
+ l4kwqe->sa0 = dev->mac_addr[0];
+ l4kwqe->sa1 = dev->mac_addr[1];
+ l4kwqe->sa2 = dev->mac_addr[2];
+ l4kwqe->sa3 = dev->mac_addr[3];
+ l4kwqe->sa4 = dev->mac_addr[4];
+ l4kwqe->sa5 = dev->mac_addr[5];
+
+ l4kwqe->etype = ETH_P_IP;
+ l4kwqe->ipid_count = DEF_IPID_COUNT;
+ l4kwqe->host_opaque = csk->l5_cid;
+
+ if (csk->vlan_id) {
+ l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
+ l4kwqe->vlan_tag = csk->vlan_id;
+ l4kwqe->l2hdr_nbytes += 4;
+ }
+
+ return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_update_pg(struct cnic_sock *csk)
+{
+ struct cnic_dev *dev = csk->dev;
+ struct l4_kwq_update_pg *l4kwqe;
+ struct kwqe *wqes[1];
+
+ l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
+ memset(l4kwqe, 0, sizeof(*l4kwqe));
+ wqes[0] = (struct kwqe *) l4kwqe;
+
+ l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
+ l4kwqe->flags =
+ L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
+ l4kwqe->pg_cid = csk->pg_cid;
+
+ l4kwqe->da0 = csk->ha[0];
+ l4kwqe->da1 = csk->ha[1];
+ l4kwqe->da2 = csk->ha[2];
+ l4kwqe->da3 = csk->ha[3];
+ l4kwqe->da4 = csk->ha[4];
+ l4kwqe->da5 = csk->ha[5];
+
+ l4kwqe->pg_host_opaque = csk->l5_cid;
+ l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
+
+ return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_upload_pg(struct cnic_sock *csk)
+{
+ struct cnic_dev *dev = csk->dev;
+ struct l4_kwq_upload *l4kwqe;
+ struct kwqe *wqes[1];
+
+ l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
+ memset(l4kwqe, 0, sizeof(*l4kwqe));
+ wqes[0] = (struct kwqe *) l4kwqe;
+
+ l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
+ l4kwqe->flags =
+ L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
+ l4kwqe->cid = csk->pg_cid;
+
+ return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_conn_req(struct cnic_sock *csk)
+{
+ struct cnic_dev *dev = csk->dev;
+ struct l4_kwq_connect_req1 *l4kwqe1;
+ struct l4_kwq_connect_req2 *l4kwqe2;
+ struct l4_kwq_connect_req3 *l4kwqe3;
+ struct kwqe *wqes[3];
+ u8 tcp_flags = 0;
+ int num_wqes = 2;
+
+ l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
+ l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
+ l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
+ memset(l4kwqe1, 0, sizeof(*l4kwqe1));
+ memset(l4kwqe2, 0, sizeof(*l4kwqe2));
+ memset(l4kwqe3, 0, sizeof(*l4kwqe3));
+
+ l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
+ l4kwqe3->flags =
+ L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
+ l4kwqe3->ka_timeout = csk->ka_timeout;
+ l4kwqe3->ka_interval = csk->ka_interval;
+ l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
+ l4kwqe3->tos = csk->tos;
+ l4kwqe3->ttl = csk->ttl;
+ l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
+ l4kwqe3->pmtu = csk->mtu;
+ l4kwqe3->rcv_buf = csk->rcv_buf;
+ l4kwqe3->snd_buf = csk->snd_buf;
+ l4kwqe3->seed = csk->seed;
+
+ wqes[0] = (struct kwqe *) l4kwqe1;
+ if (test_bit(SK_F_IPV6, &csk->flags)) {
+ wqes[1] = (struct kwqe *) l4kwqe2;
+ wqes[2] = (struct kwqe *) l4kwqe3;
+ num_wqes = 3;
+
+ l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
+ l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
+ l4kwqe2->flags =
+ L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
+ L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
+ l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
+ l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
+ l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
+ l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
+ l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
+ l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
+ l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
+ sizeof(struct tcphdr);
+ } else {
+ wqes[1] = (struct kwqe *) l4kwqe3;
+ l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
+ sizeof(struct tcphdr);
+ }
+
+ l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
+ l4kwqe1->flags =
+ (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
+ L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
+ l4kwqe1->cid = csk->cid;
+ l4kwqe1->pg_cid = csk->pg_cid;
+ l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
+ l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
+ l4kwqe1->src_port = be16_to_cpu(csk->src_port);
+ l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
+ if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
+ tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
+ if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
+ tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
+ if (csk->tcp_flags & SK_TCP_NAGLE)
+ tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
+ if (csk->tcp_flags & SK_TCP_TIMESTAMP)
+ tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
+ if (csk->tcp_flags & SK_TCP_SACK)
+ tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
+ if (csk->tcp_flags & SK_TCP_SEG_SCALING)
+ tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
+
+ l4kwqe1->tcp_flags = tcp_flags;
+
+ return dev->submit_kwqes(dev, wqes, num_wqes);
+}
+
+static int cnic_cm_close_req(struct cnic_sock *csk)
+{
+ struct cnic_dev *dev = csk->dev;
+ struct l4_kwq_close_req *l4kwqe;
+ struct kwqe *wqes[1];
+
+ l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
+ memset(l4kwqe, 0, sizeof(*l4kwqe));
+ wqes[0] = (struct kwqe *) l4kwqe;
+
+ l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
+ l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
+ l4kwqe->cid = csk->cid;
+
+ return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_abort_req(struct cnic_sock *csk)
+{
+ struct cnic_dev *dev = csk->dev;
+ struct l4_kwq_reset_req *l4kwqe;
+ struct kwqe *wqes[1];
+
+ l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
+ memset(l4kwqe, 0, sizeof(*l4kwqe));
+ wqes[0] = (struct kwqe *) l4kwqe;
+
+ l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
+ l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
+ l4kwqe->cid = csk->cid;
+
+ return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
+ u32 l5_cid, struct cnic_sock **csk, void *context)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_sock *csk1;
+
+ if (l5_cid >= MAX_CM_SK_TBL_SZ)
+ return -EINVAL;
+
+ csk1 = &cp->csk_tbl[l5_cid];
+ if (atomic_read(&csk1->ref_count))
+ return -EAGAIN;
+
+ if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
+ return -EBUSY;
+
+ csk1->dev = dev;
+ csk1->cid = cid;
+ csk1->l5_cid = l5_cid;
+ csk1->ulp_type = ulp_type;
+ csk1->context = context;
+
+ csk1->ka_timeout = DEF_KA_TIMEOUT;
+ csk1->ka_interval = DEF_KA_INTERVAL;
+ csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
+ csk1->tos = DEF_TOS;
+ csk1->ttl = DEF_TTL;
+ csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
+ csk1->rcv_buf = DEF_RCV_BUF;
+ csk1->snd_buf = DEF_SND_BUF;
+ csk1->seed = DEF_SEED;
+
+ *csk = csk1;
+ return 0;
+}
+
+static void cnic_cm_cleanup(struct cnic_sock *csk)
+{
+ if (csk->src_port) {
+ struct cnic_dev *dev = csk->dev;
+ struct cnic_local *cp = dev->cnic_priv;
+
+ cnic_free_id(&cp->csk_port_tbl, csk->src_port);
+ csk->src_port = 0;
+ }
+}
+
+static void cnic_close_conn(struct cnic_sock *csk)
+{
+ if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
+ cnic_cm_upload_pg(csk);
+ clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
+ }
+ cnic_cm_cleanup(csk);
+}
+
+static int cnic_cm_destroy(struct cnic_sock *csk)
+{
+ if (!cnic_in_use(csk))
+ return -EINVAL;
+
+ csk_hold(csk);
+ clear_bit(SK_F_INUSE, &csk->flags);
+ smp_mb__after_clear_bit();
+ while (atomic_read(&csk->ref_count) != 1)
+ msleep(1);
+ cnic_cm_cleanup(csk);
+
+ csk->flags = 0;
+ csk_put(csk);
+ return 0;
+}
+
+static inline u16 cnic_get_vlan(struct net_device *dev,
+ struct net_device **vlan_dev)
+{
+ if (dev->priv_flags & IFF_802_1Q_VLAN) {
+ *vlan_dev = vlan_dev_real_dev(dev);
+ return vlan_dev_vlan_id(dev);
+ }
+ *vlan_dev = dev;
+ return 0;
+}
+
+static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
+ struct dst_entry **dst)
+{
+#if defined(CONFIG_INET)
+ struct flowi fl;
+ int err;
+ struct rtable *rt;
+
+ memset(&fl, 0, sizeof(fl));
+ fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr;
+
+ err = ip_route_output_key(&init_net, &rt, &fl);
+ if (!err)
+ *dst = &rt->u.dst;
+ return err;
+#else
+ return -ENETUNREACH;
+#endif
+}
+
+static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
+ struct dst_entry **dst)
+{
+#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
+ struct flowi fl;
+
+ memset(&fl, 0, sizeof(fl));
+ ipv6_addr_copy(&fl.fl6_dst, &dst_addr->sin6_addr);
+ if (ipv6_addr_type(&fl.fl6_dst) & IPV6_ADDR_LINKLOCAL)
+ fl.oif = dst_addr->sin6_scope_id;
+
+ *dst = ip6_route_output(&init_net, NULL, &fl);
+ if (*dst)
+ return 0;
+#endif
+
+ return -ENETUNREACH;
+}
+
+static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
+ int ulp_type)
+{
+ struct cnic_dev *dev = NULL;
+ struct dst_entry *dst;
+ struct net_device *netdev = NULL;
+ int err = -ENETUNREACH;
+
+ if (dst_addr->sin_family == AF_INET)
+ err = cnic_get_v4_route(dst_addr, &dst);
+ else if (dst_addr->sin_family == AF_INET6) {
+ struct sockaddr_in6 *dst_addr6 =
+ (struct sockaddr_in6 *) dst_addr;
+
+ err = cnic_get_v6_route(dst_addr6, &dst);
+ } else
+ return NULL;
+
+ if (err)
+ return NULL;
+
+ if (!dst->dev)
+ goto done;
+
+ cnic_get_vlan(dst->dev, &netdev);
+
+ dev = cnic_from_netdev(netdev);
+
+done:
+ dst_release(dst);
+ if (dev)
+ cnic_put(dev);
+ return dev;
+}
+
+static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
+{
+ struct cnic_dev *dev = csk->dev;
+ struct cnic_local *cp = dev->cnic_priv;
+
+ return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
+}
+
+static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
+{
+ struct cnic_dev *dev = csk->dev;
+ struct cnic_local *cp = dev->cnic_priv;
+ int is_v6, err, rc = -ENETUNREACH;
+ struct dst_entry *dst;
+ struct net_device *realdev;
+ u32 local_port;
+
+ if (saddr->local.v6.sin6_family == AF_INET6 &&
+ saddr->remote.v6.sin6_family == AF_INET6)
+ is_v6 = 1;
+ else if (saddr->local.v4.sin_family == AF_INET &&
+ saddr->remote.v4.sin_family == AF_INET)
+ is_v6 = 0;
+ else
+ return -EINVAL;
+
+ clear_bit(SK_F_IPV6, &csk->flags);
+
+ if (is_v6) {
+#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
+ set_bit(SK_F_IPV6, &csk->flags);
+ err = cnic_get_v6_route(&saddr->remote.v6, &dst);
+ if (err)
+ return err;
+
+ if (!dst || dst->error || !dst->dev)
+ goto err_out;
+
+ memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
+ sizeof(struct in6_addr));
+ csk->dst_port = saddr->remote.v6.sin6_port;
+ local_port = saddr->local.v6.sin6_port;
+#else
+ return rc;
+#endif
+
+ } else {
+ err = cnic_get_v4_route(&saddr->remote.v4, &dst);
+ if (err)
+ return err;
+
+ if (!dst || dst->error || !dst->dev)
+ goto err_out;
+
+ csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
+ csk->dst_port = saddr->remote.v4.sin_port;
+ local_port = saddr->local.v4.sin_port;
+ }
+
+ csk->vlan_id = cnic_get_vlan(dst->dev, &realdev);
+ if (realdev != dev->netdev)
+ goto err_out;
+
+ if (local_port >= CNIC_LOCAL_PORT_MIN &&
+ local_port < CNIC_LOCAL_PORT_MAX) {
+ if (cnic_alloc_id(&cp->csk_port_tbl, local_port))
+ local_port = 0;
+ } else
+ local_port = 0;
+
+ if (!local_port) {
+ local_port = cnic_alloc_new_id(&cp->csk_port_tbl);
+ if (local_port == -1) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ }
+ csk->src_port = local_port;
+
+ csk->mtu = dst_mtu(dst);
+ rc = 0;
+
+err_out:
+ dst_release(dst);
+ return rc;
+}
+
+static void cnic_init_csk_state(struct cnic_sock *csk)
+{
+ csk->state = 0;
+ clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+ clear_bit(SK_F_CLOSING, &csk->flags);
+}
+
+static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
+{
+ int err = 0;
+
+ if (!cnic_in_use(csk))
+ return -EINVAL;
+
+ if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
+ return -EINVAL;
+
+ cnic_init_csk_state(csk);
+
+ err = cnic_get_route(csk, saddr);
+ if (err)
+ goto err_out;
+
+ err = cnic_resolve_addr(csk, saddr);
+ if (!err)
+ return 0;
+
+err_out:
+ clear_bit(SK_F_CONNECT_START, &csk->flags);
+ return err;
+}
+
+static int cnic_cm_abort(struct cnic_sock *csk)
+{
+ struct cnic_local *cp = csk->dev->cnic_priv;
+ u32 opcode;
+
+ if (!cnic_in_use(csk))
+ return -EINVAL;
+
+ if (cnic_abort_prep(csk))
+ return cnic_cm_abort_req(csk);
+
+ /* Getting here means that we haven't started connect, or
+ * connect was not successful.
+ */
+
+ csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
+ if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
+ opcode = csk->state;
+ else
+ opcode = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
+ cp->close_conn(csk, opcode);
+
+ return 0;
+}
+
+static int cnic_cm_close(struct cnic_sock *csk)
+{
+ if (!cnic_in_use(csk))
+ return -EINVAL;
+
+ if (cnic_close_prep(csk)) {
+ csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
+ return cnic_cm_close_req(csk);
+ }
+ return 0;
+}
+
+static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
+ u8 opcode)
+{
+ struct cnic_ulp_ops *ulp_ops;
+ int ulp_type = csk->ulp_type;
+
+ rcu_read_lock();
+ ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
+ if (ulp_ops) {
+ if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
+ ulp_ops->cm_connect_complete(csk);
+ else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
+ ulp_ops->cm_close_complete(csk);
+ else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
+ ulp_ops->cm_remote_abort(csk);
+ else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
+ ulp_ops->cm_abort_complete(csk);
+ else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
+ ulp_ops->cm_remote_close(csk);
+ }
+ rcu_read_unlock();
+}
+
+static int cnic_cm_set_pg(struct cnic_sock *csk)
+{
+ if (cnic_offld_prep(csk)) {
+ if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
+ cnic_cm_update_pg(csk);
+ else
+ cnic_cm_offload_pg(csk);
+ }
+ return 0;
+}
+
+static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ u32 l5_cid = kcqe->pg_host_opaque;
+ u8 opcode = kcqe->op_code;
+ struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
+
+ csk_hold(csk);
+ if (!cnic_in_use(csk))
+ goto done;
+
+ if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
+ clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+ goto done;
+ }
+ csk->pg_cid = kcqe->pg_cid;
+ set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
+ cnic_cm_conn_req(csk);
+
+done:
+ csk_put(csk);
+}
+
+static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
+ u8 opcode = l4kcqe->op_code;
+ u32 l5_cid;
+ struct cnic_sock *csk;
+
+ if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
+ opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
+ cnic_cm_process_offld_pg(dev, l4kcqe);
+ return;
+ }
+
+ l5_cid = l4kcqe->conn_id;
+ if (opcode & 0x80)
+ l5_cid = l4kcqe->cid;
+ if (l5_cid >= MAX_CM_SK_TBL_SZ)
+ return;
+
+ csk = &cp->csk_tbl[l5_cid];
+ csk_hold(csk);
+
+ if (!cnic_in_use(csk)) {
+ csk_put(csk);
+ return;
+ }
+
+ switch (opcode) {
+ case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
+ if (l4kcqe->status == 0)
+ set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
+
+ smp_mb__before_clear_bit();
+ clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+ cnic_cm_upcall(cp, csk, opcode);
+ break;
+
+ case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
+ if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags))
+ csk->state = opcode;
+ /* fall through */
+ case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
+ case L4_KCQE_OPCODE_VALUE_RESET_COMP:
+ cp->close_conn(csk, opcode);
+ break;
+
+ case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
+ cnic_cm_upcall(cp, csk, opcode);
+ break;
+ }
+ csk_put(csk);
+}
+
+static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
+{
+ struct cnic_dev *dev = data;
+ int i;
+
+ for (i = 0; i < num; i++)
+ cnic_cm_process_kcqe(dev, kcqe[i]);
+}
+
+static struct cnic_ulp_ops cm_ulp_ops = {
+ .indicate_kcqes = cnic_cm_indicate_kcqe,
+};
+
+static void cnic_cm_free_mem(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+
+ kfree(cp->csk_tbl);
+ cp->csk_tbl = NULL;
+ cnic_free_id_tbl(&cp->csk_port_tbl);
+}
+
+static int cnic_cm_alloc_mem(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+
+ cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
+ GFP_KERNEL);
+ if (!cp->csk_tbl)
+ return -ENOMEM;
+
+ if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
+ CNIC_LOCAL_PORT_MIN)) {
+ cnic_cm_free_mem(dev);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
+{
+ if ((opcode == csk->state) ||
+ (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED &&
+ csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)) {
+ if (!test_and_set_bit(SK_F_CLOSING, &csk->flags))
+ return 1;
+ }
+ return 0;
+}
+
+static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
+{
+ struct cnic_dev *dev = csk->dev;
+ struct cnic_local *cp = dev->cnic_priv;
+
+ clear_bit(SK_F_CONNECT_START, &csk->flags);
+ if (cnic_ready_to_close(csk, opcode)) {
+ cnic_close_conn(csk);
+ cnic_cm_upcall(cp, csk, opcode);
+ }
+}
+
+static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
+{
+}
+
+static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
+{
+ u32 seed;
+
+ get_random_bytes(&seed, 4);
+ cnic_ctx_wr(dev, 45, 0, seed);
+ return 0;
+}
+
+static int cnic_cm_open(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ int err;
+
+ err = cnic_cm_alloc_mem(dev);
+ if (err)
+ return err;
+
+ err = cp->start_cm(dev);
+
+ if (err)
+ goto err_out;
+
+ dev->cm_create = cnic_cm_create;
+ dev->cm_destroy = cnic_cm_destroy;
+ dev->cm_connect = cnic_cm_connect;
+ dev->cm_abort = cnic_cm_abort;
+ dev->cm_close = cnic_cm_close;
+ dev->cm_select_dev = cnic_cm_select_dev;
+
+ cp->ulp_handle[CNIC_ULP_L4] = dev;
+ rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
+ return 0;
+
+err_out:
+ cnic_cm_free_mem(dev);
+ return err;
+}
+
+static int cnic_cm_shutdown(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ int i;
+
+ cp->stop_cm(dev);
+
+ if (!cp->csk_tbl)
+ return 0;
+
+ for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
+ struct cnic_sock *csk = &cp->csk_tbl[i];
+
+ clear_bit(SK_F_INUSE, &csk->flags);
+ cnic_cm_cleanup(csk);
+ }
+ cnic_cm_free_mem(dev);
+
+ return 0;
+}
+
+static void cnic_init_context(struct cnic_dev *dev, u32 cid)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ u32 cid_addr;
+ int i;
+
+ if (CHIP_NUM(cp) == CHIP_NUM_5709)
+ return;
+
+ cid_addr = GET_CID_ADDR(cid);
+
+ for (i = 0; i < CTX_SIZE; i += 4)
+ cnic_ctx_wr(dev, cid_addr, i, 0);
+}
+
+static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ int ret = 0, i;
+ u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
+
+ if (CHIP_NUM(cp) != CHIP_NUM_5709)
+ return 0;
+
+ for (i = 0; i < cp->ctx_blks; i++) {
+ int j;
+ u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
+ u32 val;
+
+ memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE);
+
+ CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
+ (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
+ CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
+ (u64) cp->ctx_arr[i].mapping >> 32);
+ CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
+ BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
+ for (j = 0; j < 10; j++) {
+
+ val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
+ if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
+ break;
+ udelay(5);
+ }
+ if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
+ ret = -EBUSY;
+ break;
+ }
+ }
+ return ret;
+}
+
+static void cnic_free_irq(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+
+ if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+ cp->disable_int_sync(dev);
+ tasklet_disable(&cp->cnic_irq_task);
+ free_irq(ethdev->irq_arr[0].vector, dev);
+ }
+}
+
+static int cnic_init_bnx2_irq(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+
+ if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+ int err, i = 0;
+ int sblk_num = cp->status_blk_num;
+ u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
+ BNX2_HC_SB_CONFIG_1;
+
+ CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
+
+ CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
+ CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
+ CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
+
+ cp->bnx2_status_blk = cp->status_blk;
+ cp->last_status_idx = cp->bnx2_status_blk->status_idx;
+ tasklet_init(&cp->cnic_irq_task, &cnic_service_bnx2_msix,
+ (unsigned long) dev);
+ err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0,
+ "cnic", dev);
+ if (err) {
+ tasklet_disable(&cp->cnic_irq_task);
+ return err;
+ }
+ while (cp->bnx2_status_blk->status_completion_producer_index &&
+ i < 10) {
+ CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
+ 1 << (11 + sblk_num));
+ udelay(10);
+ i++;
+ barrier();
+ }
+ if (cp->bnx2_status_blk->status_completion_producer_index) {
+ cnic_free_irq(dev);
+ goto failed;
+ }
+
+ } else {
+ struct status_block *sblk = cp->status_blk;
+ u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
+ int i = 0;
+
+ while (sblk->status_completion_producer_index && i < 10) {
+ CNIC_WR(dev, BNX2_HC_COMMAND,
+ hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
+ udelay(10);
+ i++;
+ barrier();
+ }
+ if (sblk->status_completion_producer_index)
+ goto failed;
+
+ }
+ return 0;
+
+failed:
+ printk(KERN_ERR PFX "%s: " "KCQ index not resetting to 0.\n",
+ dev->netdev->name);
+ return -EBUSY;
+}
+
+static void cnic_enable_bnx2_int(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+
+ if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
+ return;
+
+ CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
+ BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
+}
+
+static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+
+ if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
+ return;
+
+ CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
+ BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
+ CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
+ synchronize_irq(ethdev->irq_arr[0].vector);
+}
+
+static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+ u32 cid_addr, tx_cid, sb_id;
+ u32 val, offset0, offset1, offset2, offset3;
+ int i;
+ struct tx_bd *txbd;
+ dma_addr_t buf_map;
+ struct status_block *s_blk = cp->status_blk;
+
+ sb_id = cp->status_blk_num;
+ tx_cid = 20;
+ cnic_init_context(dev, tx_cid);
+ cnic_init_context(dev, tx_cid + 1);
+ cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
+ if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+ struct status_block_msix *sblk = cp->status_blk;
+
+ tx_cid = TX_TSS_CID + sb_id - 1;
+ cnic_init_context(dev, tx_cid);
+ CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
+ (TX_TSS_CID << 7));
+ cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
+ }
+ cp->tx_cons = *cp->tx_cons_ptr;
+
+ cid_addr = GET_CID_ADDR(tx_cid);
+ if (CHIP_NUM(cp) == CHIP_NUM_5709) {
+ u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
+
+ for (i = 0; i < PHY_CTX_SIZE; i += 4)
+ cnic_ctx_wr(dev, cid_addr2, i, 0);
+
+ offset0 = BNX2_L2CTX_TYPE_XI;
+ offset1 = BNX2_L2CTX_CMD_TYPE_XI;
+ offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
+ offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
+ } else {
+ offset0 = BNX2_L2CTX_TYPE;
+ offset1 = BNX2_L2CTX_CMD_TYPE;
+ offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
+ offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
+ }
+ val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
+ cnic_ctx_wr(dev, cid_addr, offset0, val);
+
+ val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
+ cnic_ctx_wr(dev, cid_addr, offset1, val);
+
+ txbd = (struct tx_bd *) cp->l2_ring;
+
+ buf_map = cp->l2_buf_map;
+ for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
+ txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
+ txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
+ }
+ val = (u64) cp->l2_ring_map >> 32;
+ cnic_ctx_wr(dev, cid_addr, offset2, val);
+ txbd->tx_bd_haddr_hi = val;
+
+ val = (u64) cp->l2_ring_map & 0xffffffff;
+ cnic_ctx_wr(dev, cid_addr, offset3, val);
+ txbd->tx_bd_haddr_lo = val;
+}
+
+static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+ u32 cid_addr, sb_id, val, coal_reg, coal_val;
+ int i;
+ struct rx_bd *rxbd;
+ struct status_block *s_blk = cp->status_blk;
+
+ sb_id = cp->status_blk_num;
+ cnic_init_context(dev, 2);
+ cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
+ coal_reg = BNX2_HC_COMMAND;
+ coal_val = CNIC_RD(dev, coal_reg);
+ if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+ struct status_block_msix *sblk = cp->status_blk;
+
+ cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
+ coal_reg = BNX2_HC_COALESCE_NOW;
+ coal_val = 1 << (11 + sb_id);
+ }
+ i = 0;
+ while (!(*cp->rx_cons_ptr != 0) && i < 10) {
+ CNIC_WR(dev, coal_reg, coal_val);
+ udelay(10);
+ i++;
+ barrier();
+ }
+ cp->rx_cons = *cp->rx_cons_ptr;
+
+ cid_addr = GET_CID_ADDR(2);
+ val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
+ BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
+ cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
+
+ if (sb_id == 0)
+ val = 2 << BNX2_L2CTX_STATUSB_NUM_SHIFT;
+ else
+ val = BNX2_L2CTX_STATUSB_NUM(sb_id);
+ cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
+
+ rxbd = (struct rx_bd *) (cp->l2_ring + BCM_PAGE_SIZE);
+ for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
+ dma_addr_t buf_map;
+ int n = (i % cp->l2_rx_ring_size) + 1;
+
+ buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size);
+ rxbd->rx_bd_len = cp->l2_single_buf_size;
+ rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
+ rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
+ rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
+ }
+ val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32;
+ cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
+ rxbd->rx_bd_haddr_hi = val;
+
+ val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff;
+ cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
+ rxbd->rx_bd_haddr_lo = val;
+
+ val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
+ cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
+}
+
+static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
+{
+ struct kwqe *wqes[1], l2kwqe;
+
+ memset(&l2kwqe, 0, sizeof(l2kwqe));
+ wqes[0] = &l2kwqe;
+ l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_FLAGS_LAYER_SHIFT) |
+ (L2_KWQE_OPCODE_VALUE_FLUSH <<
+ KWQE_OPCODE_SHIFT) | 2;
+ dev->submit_kwqes(dev, wqes, 1);
+}
+
+static void cnic_set_bnx2_mac(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ u32 val;
+
+ val = cp->func << 2;
+
+ cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
+
+ val = cnic_reg_rd_ind(dev, cp->shmem_base +
+ BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
+ dev->mac_addr[0] = (u8) (val >> 8);
+ dev->mac_addr[1] = (u8) val;
+
+ CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
+
+ val = cnic_reg_rd_ind(dev, cp->shmem_base +
+ BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
+ dev->mac_addr[2] = (u8) (val >> 24);
+ dev->mac_addr[3] = (u8) (val >> 16);
+ dev->mac_addr[4] = (u8) (val >> 8);
+ dev->mac_addr[5] = (u8) val;
+
+ CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
+
+ val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
+ if (CHIP_NUM(cp) != CHIP_NUM_5709)
+ val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
+
+ CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
+ CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
+ CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
+}
+
+static int cnic_start_bnx2_hw(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+ struct status_block *sblk = cp->status_blk;
+ u32 val;
+ int err;
+
+ cnic_set_bnx2_mac(dev);
+
+ val = CNIC_RD(dev, BNX2_MQ_CONFIG);
+ val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
+ if (BCM_PAGE_BITS > 12)
+ val |= (12 - 8) << 4;
+ else
+ val |= (BCM_PAGE_BITS - 8) << 4;
+
+ CNIC_WR(dev, BNX2_MQ_CONFIG, val);
+
+ CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
+ CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
+ CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
+
+ err = cnic_setup_5709_context(dev, 1);
+ if (err)
+ return err;
+
+ cnic_init_context(dev, KWQ_CID);
+ cnic_init_context(dev, KCQ_CID);
+
+ cp->kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
+ cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
+
+ cp->max_kwq_idx = MAX_KWQ_IDX;
+ cp->kwq_prod_idx = 0;
+ cp->kwq_con_idx = 0;
+ cp->cnic_local_flags |= CNIC_LCL_FL_KWQ_INIT;
+
+ if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
+ cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
+ else
+ cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
+
+ /* Initialize the kernel work queue context. */
+ val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
+ (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
+ cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_TYPE, val);
+
+ val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
+ cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
+
+ val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
+ cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
+
+ val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
+ cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
+
+ val = (u32) cp->kwq_info.pgtbl_map;
+ cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
+
+ cp->kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
+ cp->kcq_io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
+
+ cp->kcq_prod_idx = 0;
+
+ /* Initialize the kernel complete queue context. */
+ val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
+ (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
+ cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_TYPE, val);
+
+ val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
+ cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
+
+ val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
+ cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
+
+ val = (u32) ((u64) cp->kcq_info.pgtbl_map >> 32);
+ cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
+
+ val = (u32) cp->kcq_info.pgtbl_map;
+ cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
+
+ cp->int_num = 0;
+ if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+ u32 sb_id = cp->status_blk_num;
+ u32 sb = BNX2_L2CTX_STATUSB_NUM(sb_id);
+
+ cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
+ cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
+ cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
+ }
+
+ /* Enable Commnad Scheduler notification when we write to the
+ * host producer index of the kernel contexts. */
+ CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
+
+ /* Enable Command Scheduler notification when we write to either
+ * the Send Queue or Receive Queue producer indexes of the kernel
+ * bypass contexts. */
+ CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
+ CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
+
+ /* Notify COM when the driver post an application buffer. */
+ CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
+
+ /* Set the CP and COM doorbells. These two processors polls the
+ * doorbell for a non zero value before running. This must be done
+ * after setting up the kernel queue contexts. */
+ cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
+ cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
+
+ cnic_init_bnx2_tx_ring(dev);
+ cnic_init_bnx2_rx_ring(dev);
+
+ err = cnic_init_bnx2_irq(dev);
+ if (err) {
+ printk(KERN_ERR PFX "%s: cnic_init_irq failed\n",
+ dev->netdev->name);
+ cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
+ cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
+ return err;
+ }
+
+ return 0;
+}
+
+static int cnic_start_hw(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+ int err;
+
+ if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
+ return -EALREADY;
+
+ err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
+ if (err) {
+ printk(KERN_ERR PFX "%s: register_cnic failed\n",
+ dev->netdev->name);
+ goto err2;
+ }
+
+ dev->regview = ethdev->io_base;
+ cp->chip_id = ethdev->chip_id;
+ pci_dev_get(dev->pcidev);
+ cp->func = PCI_FUNC(dev->pcidev->devfn);
+ cp->status_blk = ethdev->irq_arr[0].status_blk;
+ cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
+
+ err = cp->alloc_resc(dev);
+ if (err) {
+ printk(KERN_ERR PFX "%s: allocate resource failure\n",
+ dev->netdev->name);
+ goto err1;
+ }
+
+ err = cp->start_hw(dev);
+ if (err)
+ goto err1;
+
+ err = cnic_cm_open(dev);
+ if (err)
+ goto err1;
+
+ set_bit(CNIC_F_CNIC_UP, &dev->flags);
+
+ cp->enable_int(dev);
+
+ return 0;
+
+err1:
+ ethdev->drv_unregister_cnic(dev->netdev);
+ cp->free_resc(dev);
+ pci_dev_put(dev->pcidev);
+err2:
+ return err;
+}
+
+static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+
+ cnic_disable_bnx2_int_sync(dev);
+
+ cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
+ cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
+
+ cnic_init_context(dev, KWQ_CID);
+ cnic_init_context(dev, KCQ_CID);
+
+ cnic_setup_5709_context(dev, 0);
+ cnic_free_irq(dev);
+
+ ethdev->drv_unregister_cnic(dev->netdev);
+
+ cnic_free_resc(dev);
+}
+
+static void cnic_stop_hw(struct cnic_dev *dev)
+{
+ if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
+ struct cnic_local *cp = dev->cnic_priv;
+
+ clear_bit(CNIC_F_CNIC_UP, &dev->flags);
+ rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
+ synchronize_rcu();
+ cnic_cm_shutdown(dev);
+ cp->stop_hw(dev);
+ pci_dev_put(dev->pcidev);
+ }
+}
+
+static void cnic_free_dev(struct cnic_dev *dev)
+{
+ int i = 0;
+
+ while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
+ msleep(100);
+ i++;
+ }
+ if (atomic_read(&dev->ref_count) != 0)
+ printk(KERN_ERR PFX "%s: Failed waiting for ref count to go"
+ " to zero.\n", dev->netdev->name);
+
+ printk(KERN_INFO PFX "Removed CNIC device: %s\n", dev->netdev->name);
+ dev_put(dev->netdev);
+ kfree(dev);
+}
+
+static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
+ struct pci_dev *pdev)
+{
+ struct cnic_dev *cdev;
+ struct cnic_local *cp;
+ int alloc_size;
+
+ alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
+
+ cdev = kzalloc(alloc_size , GFP_KERNEL);
+ if (cdev == NULL) {
+ printk(KERN_ERR PFX "%s: allocate dev struct failure\n",
+ dev->name);
+ return NULL;
+ }
+
+ cdev->netdev = dev;
+ cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
+ cdev->register_device = cnic_register_device;
+ cdev->unregister_device = cnic_unregister_device;
+ cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
+
+ cp = cdev->cnic_priv;
+ cp->dev = cdev;
+ cp->uio_dev = -1;
+ cp->l2_single_buf_size = 0x400;
+ cp->l2_rx_ring_size = 3;
+
+ spin_lock_init(&cp->cnic_ulp_lock);
+
+ printk(KERN_INFO PFX "Added CNIC device: %s\n", dev->name);
+
+ return cdev;
+}
+
+static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
+{
+ struct pci_dev *pdev;
+ struct cnic_dev *cdev;
+ struct cnic_local *cp;
+ struct cnic_eth_dev *ethdev = NULL;
+ struct cnic_eth_dev *(*probe)(void *) = NULL;
+
+ probe = __symbol_get("bnx2_cnic_probe");
+ if (probe) {
+ ethdev = (*probe)(dev);
+ symbol_put_addr(probe);
+ }
+ if (!ethdev)
+ return NULL;
+
+ pdev = ethdev->pdev;
+ if (!pdev)
+ return NULL;
+
+ dev_hold(dev);
+ pci_dev_get(pdev);
+ if (pdev->device == PCI_DEVICE_ID_NX2_5709 ||
+ pdev->device == PCI_DEVICE_ID_NX2_5709S) {
+ u8 rev;
+
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
+ if (rev < 0x10) {
+ pci_dev_put(pdev);
+ goto cnic_err;
+ }
+ }
+ pci_dev_put(pdev);
+
+ cdev = cnic_alloc_dev(dev, pdev);
+ if (cdev == NULL)
+ goto cnic_err;
+
+ set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
+ cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
+
+ cp = cdev->cnic_priv;
+ cp->ethdev = ethdev;
+ cdev->pcidev = pdev;
+
+ cp->cnic_ops = &cnic_bnx2_ops;
+ cp->start_hw = cnic_start_bnx2_hw;
+ cp->stop_hw = cnic_stop_bnx2_hw;
+ cp->setup_pgtbl = cnic_setup_page_tbl;
+ cp->alloc_resc = cnic_alloc_bnx2_resc;
+ cp->free_resc = cnic_free_resc;
+ cp->start_cm = cnic_cm_init_bnx2_hw;
+ cp->stop_cm = cnic_cm_stop_bnx2_hw;
+ cp->enable_int = cnic_enable_bnx2_int;
+ cp->disable_int_sync = cnic_disable_bnx2_int_sync;
+ cp->close_conn = cnic_close_bnx2_conn;
+ cp->next_idx = cnic_bnx2_next_idx;
+ cp->hw_idx = cnic_bnx2_hw_idx;
+ return cdev;
+
+cnic_err:
+ dev_put(dev);
+ return NULL;
+}
+
+static struct cnic_dev *is_cnic_dev(struct net_device *dev)
+{
+ struct ethtool_drvinfo drvinfo;
+ struct cnic_dev *cdev = NULL;
+
+ if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
+ memset(&drvinfo, 0, sizeof(drvinfo));
+ dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
+
+ if (!strcmp(drvinfo.driver, "bnx2"))
+ cdev = init_bnx2_cnic(dev);
+ if (cdev) {
+ write_lock(&cnic_dev_lock);
+ list_add(&cdev->list, &cnic_dev_list);
+ write_unlock(&cnic_dev_lock);
+ }
+ }
+ return cdev;
+}
+
+/**
+ * netdev event handler
+ */
+static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ struct net_device *netdev = ptr;
+ struct cnic_dev *dev;
+ int if_type;
+ int new_dev = 0;
+
+ dev = cnic_from_netdev(netdev);
+
+ if (!dev && (event == NETDEV_REGISTER || event == NETDEV_UP)) {
+ /* Check for the hot-plug device */
+ dev = is_cnic_dev(netdev);
+ if (dev) {
+ new_dev = 1;
+ cnic_hold(dev);
+ }
+ }
+ if (dev) {
+ struct cnic_local *cp = dev->cnic_priv;
+
+ if (new_dev)
+ cnic_ulp_init(dev);
+ else if (event == NETDEV_UNREGISTER)
+ cnic_ulp_exit(dev);
+ else if (event == NETDEV_UP) {
+ mutex_lock(&cnic_lock);
+ if (!cnic_start_hw(dev))
+ cnic_ulp_start(dev);
+ mutex_unlock(&cnic_lock);
+ }
+
+ rcu_read_lock();
+ for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
+ struct cnic_ulp_ops *ulp_ops;
+ void *ctx;
+
+ ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
+ if (!ulp_ops || !ulp_ops->indicate_netevent)
+ continue;
+
+ ctx = cp->ulp_handle[if_type];
+
+ ulp_ops->indicate_netevent(ctx, event);
+ }
+ rcu_read_unlock();
+
+ if (event == NETDEV_GOING_DOWN) {
+ mutex_lock(&cnic_lock);
+ cnic_ulp_stop(dev);
+ cnic_stop_hw(dev);
+ mutex_unlock(&cnic_lock);
+ } else if (event == NETDEV_UNREGISTER) {
+ write_lock(&cnic_dev_lock);
+ list_del_init(&dev->list);
+ write_unlock(&cnic_dev_lock);
+
+ cnic_put(dev);
+ cnic_free_dev(dev);
+ goto done;
+ }
+ cnic_put(dev);
+ }
+done:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block cnic_netdev_notifier = {
+ .notifier_call = cnic_netdev_event
+};
+
+static void cnic_release(void)
+{
+ struct cnic_dev *dev;
+
+ while (!list_empty(&cnic_dev_list)) {
+ dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
+ if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
+ cnic_ulp_stop(dev);
+ cnic_stop_hw(dev);
+ }
+
+ cnic_ulp_exit(dev);
+ list_del_init(&dev->list);
+ cnic_free_dev(dev);
+ }
+}
+
+static int __init cnic_init(void)
+{
+ int rc = 0;
+
+ printk(KERN_INFO "%s", version);
+
+ rc = register_netdevice_notifier(&cnic_netdev_notifier);
+ if (rc) {
+ cnic_release();
+ return rc;
+ }
+
+ return 0;
+}
+
+static void __exit cnic_exit(void)
+{
+ unregister_netdevice_notifier(&cnic_netdev_notifier);
+ cnic_release();
+ return;
+}
+
+module_init(cnic_init);
+module_exit(cnic_exit);
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
new file mode 100644
index 00000000000..5192d4a9df5
--- /dev/null
+++ b/drivers/net/cnic.h
@@ -0,0 +1,299 @@
+/* cnic.h: Broadcom CNIC core network driver.
+ *
+ * Copyright (c) 2006-2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
+
+#ifndef CNIC_H
+#define CNIC_H
+
+#define KWQ_PAGE_CNT 4
+#define KCQ_PAGE_CNT 16
+
+#define KWQ_CID 24
+#define KCQ_CID 25
+
+/*
+ * krnlq_context definition
+ */
+#define L5_KRNLQ_FLAGS 0x00000000
+#define L5_KRNLQ_SIZE 0x00000000
+#define L5_KRNLQ_TYPE 0x00000000
+#define KRNLQ_FLAGS_PG_SZ (0xf<<0)
+#define KRNLQ_FLAGS_PG_SZ_256 (0<<0)
+#define KRNLQ_FLAGS_PG_SZ_512 (1<<0)
+#define KRNLQ_FLAGS_PG_SZ_1K (2<<0)
+#define KRNLQ_FLAGS_PG_SZ_2K (3<<0)
+#define KRNLQ_FLAGS_PG_SZ_4K (4<<0)
+#define KRNLQ_FLAGS_PG_SZ_8K (5<<0)
+#define KRNLQ_FLAGS_PG_SZ_16K (6<<0)
+#define KRNLQ_FLAGS_PG_SZ_32K (7<<0)
+#define KRNLQ_FLAGS_PG_SZ_64K (8<<0)
+#define KRNLQ_FLAGS_PG_SZ_128K (9<<0)
+#define KRNLQ_FLAGS_PG_SZ_256K (10<<0)
+#define KRNLQ_FLAGS_PG_SZ_512K (11<<0)
+#define KRNLQ_FLAGS_PG_SZ_1M (12<<0)
+#define KRNLQ_FLAGS_PG_SZ_2M (13<<0)
+#define KRNLQ_FLAGS_QE_SELF_SEQ (1<<15)
+#define KRNLQ_SIZE_TYPE_SIZE ((((0x28 + 0x1f) & ~0x1f) / 0x20) << 16)
+#define KRNLQ_TYPE_TYPE (0xf<<28)
+#define KRNLQ_TYPE_TYPE_EMPTY (0<<28)
+#define KRNLQ_TYPE_TYPE_KRNLQ (6<<28)
+
+#define L5_KRNLQ_HOST_QIDX 0x00000004
+#define L5_KRNLQ_HOST_FW_QIDX 0x00000008
+#define L5_KRNLQ_NX_QE_SELF_SEQ 0x0000000c
+#define L5_KRNLQ_QE_SELF_SEQ_MAX 0x0000000c
+#define L5_KRNLQ_NX_QE_HADDR_HI 0x00000010
+#define L5_KRNLQ_NX_QE_HADDR_LO 0x00000014
+#define L5_KRNLQ_PGTBL_PGIDX 0x00000018
+#define L5_KRNLQ_NX_PG_QIDX 0x00000018
+#define L5_KRNLQ_PGTBL_NPAGES 0x0000001c
+#define L5_KRNLQ_QIDX_INCR 0x0000001c
+#define L5_KRNLQ_PGTBL_HADDR_HI 0x00000020
+#define L5_KRNLQ_PGTBL_HADDR_LO 0x00000024
+
+#define BNX2_PG_CTX_MAP 0x1a0034
+#define BNX2_ISCSI_CTX_MAP 0x1a0074
+
+struct cnic_redirect_entry {
+ struct dst_entry *old_dst;
+ struct dst_entry *new_dst;
+};
+
+#define MAX_COMPLETED_KCQE 64
+
+#define MAX_CNIC_L5_CONTEXT 256
+
+#define MAX_CM_SK_TBL_SZ MAX_CNIC_L5_CONTEXT
+
+#define MAX_ISCSI_TBL_SZ 256
+
+#define CNIC_LOCAL_PORT_MIN 60000
+#define CNIC_LOCAL_PORT_MAX 61000
+#define CNIC_LOCAL_PORT_RANGE (CNIC_LOCAL_PORT_MAX - CNIC_LOCAL_PORT_MIN)
+
+#define KWQE_CNT (BCM_PAGE_SIZE / sizeof(struct kwqe))
+#define KCQE_CNT (BCM_PAGE_SIZE / sizeof(struct kcqe))
+#define MAX_KWQE_CNT (KWQE_CNT - 1)
+#define MAX_KCQE_CNT (KCQE_CNT - 1)
+
+#define MAX_KWQ_IDX ((KWQ_PAGE_CNT * KWQE_CNT) - 1)
+#define MAX_KCQ_IDX ((KCQ_PAGE_CNT * KCQE_CNT) - 1)
+
+#define KWQ_PG(x) (((x) & ~MAX_KWQE_CNT) >> (BCM_PAGE_BITS - 5))
+#define KWQ_IDX(x) ((x) & MAX_KWQE_CNT)
+
+#define KCQ_PG(x) (((x) & ~MAX_KCQE_CNT) >> (BCM_PAGE_BITS - 5))
+#define KCQ_IDX(x) ((x) & MAX_KCQE_CNT)
+
+#define BNX2X_NEXT_KCQE(x) (((x) & (MAX_KCQE_CNT - 1)) == \
+ (MAX_KCQE_CNT - 1)) ? \
+ (x) + 2 : (x) + 1
+
+#define BNX2X_KWQ_DATA_PG(cp, x) ((x) / (cp)->kwq_16_data_pp)
+#define BNX2X_KWQ_DATA_IDX(cp, x) ((x) % (cp)->kwq_16_data_pp)
+#define BNX2X_KWQ_DATA(cp, x) \
+ &(cp)->kwq_16_data[BNX2X_KWQ_DATA_PG(cp, x)][BNX2X_KWQ_DATA_IDX(cp, x)]
+
+#define DEF_IPID_COUNT 0xc001
+
+#define DEF_KA_TIMEOUT 10000
+#define DEF_KA_INTERVAL 300000
+#define DEF_KA_MAX_PROBE_COUNT 3
+#define DEF_TOS 0
+#define DEF_TTL 0xfe
+#define DEF_SND_SEQ_SCALE 0
+#define DEF_RCV_BUF 0xffff
+#define DEF_SND_BUF 0xffff
+#define DEF_SEED 0
+#define DEF_MAX_RT_TIME 500
+#define DEF_MAX_DA_COUNT 2
+#define DEF_SWS_TIMER 1000
+#define DEF_MAX_CWND 0xffff
+
+struct cnic_ctx {
+ u32 cid;
+ void *ctx;
+ dma_addr_t mapping;
+};
+
+#define BNX2_MAX_CID 0x2000
+
+struct cnic_dma {
+ int num_pages;
+ void **pg_arr;
+ dma_addr_t *pg_map_arr;
+ int pgtbl_size;
+ u32 *pgtbl;
+ dma_addr_t pgtbl_map;
+};
+
+struct cnic_id_tbl {
+ spinlock_t lock;
+ u32 start;
+ u32 max;
+ u32 next;
+ unsigned long *table;
+};
+
+#define CNIC_KWQ16_DATA_SIZE 128
+
+struct kwqe_16_data {
+ u8 data[CNIC_KWQ16_DATA_SIZE];
+};
+
+struct cnic_iscsi {
+ struct cnic_dma task_array_info;
+ struct cnic_dma r2tq_info;
+ struct cnic_dma hq_info;
+};
+
+struct cnic_context {
+ u32 cid;
+ struct kwqe_16_data *kwqe_data;
+ dma_addr_t kwqe_data_mapping;
+ wait_queue_head_t waitq;
+ int wait_cond;
+ unsigned long timestamp;
+ u32 ctx_flags;
+#define CTX_FL_OFFLD_START 0x00000001
+ u8 ulp_proto_id;
+ union {
+ struct cnic_iscsi *iscsi;
+ } proto;
+};
+
+struct cnic_local {
+
+ spinlock_t cnic_ulp_lock;
+ void *ulp_handle[MAX_CNIC_ULP_TYPE];
+ unsigned long ulp_flags[MAX_CNIC_ULP_TYPE];
+#define ULP_F_INIT 0
+#define ULP_F_START 1
+ struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE];
+
+ /* protected by ulp_lock */
+ u32 cnic_local_flags;
+#define CNIC_LCL_FL_KWQ_INIT 0x00000001
+
+ struct cnic_dev *dev;
+
+ struct cnic_eth_dev *ethdev;
+
+ void *l2_ring;
+ dma_addr_t l2_ring_map;
+ int l2_ring_size;
+ int l2_rx_ring_size;
+
+ void *l2_buf;
+ dma_addr_t l2_buf_map;
+ int l2_buf_size;
+ int l2_single_buf_size;
+
+ u16 *rx_cons_ptr;
+ u16 *tx_cons_ptr;
+ u16 rx_cons;
+ u16 tx_cons;
+
+ u32 kwq_cid_addr;
+ u32 kcq_cid_addr;
+
+ struct cnic_dma kwq_info;
+ struct kwqe **kwq;
+
+ struct cnic_dma kwq_16_data_info;
+
+ u16 max_kwq_idx;
+
+ u16 kwq_prod_idx;
+ u32 kwq_io_addr;
+
+ u16 *kwq_con_idx_ptr;
+ u16 kwq_con_idx;
+
+ struct cnic_dma kcq_info;
+ struct kcqe **kcq;
+
+ u16 kcq_prod_idx;
+ u32 kcq_io_addr;
+
+ void *status_blk;
+ struct status_block_msix *bnx2_status_blk;
+ struct host_status_block *bnx2x_status_blk;
+
+ u32 status_blk_num;
+ u32 int_num;
+ u32 last_status_idx;
+ struct tasklet_struct cnic_irq_task;
+
+ struct kcqe *completed_kcq[MAX_COMPLETED_KCQE];
+
+ struct cnic_sock *csk_tbl;
+ struct cnic_id_tbl csk_port_tbl;
+
+ struct cnic_dma conn_buf_info;
+ struct cnic_dma gbl_buf_info;
+
+ struct cnic_iscsi *iscsi_tbl;
+ struct cnic_context *ctx_tbl;
+ struct cnic_id_tbl cid_tbl;
+ int max_iscsi_conn;
+ atomic_t iscsi_conn;
+
+ /* per connection parameters */
+ int num_iscsi_tasks;
+ int num_ccells;
+ int task_array_size;
+ int r2tq_size;
+ int hq_size;
+ int num_cqs;
+
+ struct cnic_ctx *ctx_arr;
+ int ctx_blks;
+ int ctx_blk_size;
+ int cids_per_blk;
+
+ u32 chip_id;
+ int func;
+ u32 shmem_base;
+
+ u32 uio_dev;
+ struct uio_info *cnic_uinfo;
+
+ struct cnic_ops *cnic_ops;
+ int (*start_hw)(struct cnic_dev *);
+ void (*stop_hw)(struct cnic_dev *);
+ void (*setup_pgtbl)(struct cnic_dev *,
+ struct cnic_dma *);
+ int (*alloc_resc)(struct cnic_dev *);
+ void (*free_resc)(struct cnic_dev *);
+ int (*start_cm)(struct cnic_dev *);
+ void (*stop_cm)(struct cnic_dev *);
+ void (*enable_int)(struct cnic_dev *);
+ void (*disable_int_sync)(struct cnic_dev *);
+ void (*ack_int)(struct cnic_dev *);
+ void (*close_conn)(struct cnic_sock *, u32 opcode);
+ u16 (*next_idx)(u16);
+ u16 (*hw_idx)(u16);
+};
+
+struct bnx2x_bd_chain_next {
+ u32 addr_lo;
+ u32 addr_hi;
+ u8 reserved[8];
+};
+
+#define ISCSI_RAMROD_CMD_ID_UPDATE_CONN (ISCSI_KCQE_OPCODE_UPDATE_CONN)
+#define ISCSI_RAMROD_CMD_ID_INIT (ISCSI_KCQE_OPCODE_INIT)
+
+#define CDU_REGION_NUMBER_XCM_AG 2
+#define CDU_REGION_NUMBER_UCM_AG 4
+
+#endif
+
diff --git a/drivers/net/cnic_defs.h b/drivers/net/cnic_defs.h
new file mode 100644
index 00000000000..cee80f69445
--- /dev/null
+++ b/drivers/net/cnic_defs.h
@@ -0,0 +1,580 @@
+
+/* cnic.c: Broadcom CNIC core network driver.
+ *
+ * Copyright (c) 2006-2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
+#ifndef CNIC_DEFS_H
+#define CNIC_DEFS_H
+
+/* KWQ (kernel work queue) request op codes */
+#define L2_KWQE_OPCODE_VALUE_FLUSH (4)
+
+#define L4_KWQE_OPCODE_VALUE_CONNECT1 (50)
+#define L4_KWQE_OPCODE_VALUE_CONNECT2 (51)
+#define L4_KWQE_OPCODE_VALUE_CONNECT3 (52)
+#define L4_KWQE_OPCODE_VALUE_RESET (53)
+#define L4_KWQE_OPCODE_VALUE_CLOSE (54)
+#define L4_KWQE_OPCODE_VALUE_UPDATE_SECRET (60)
+#define L4_KWQE_OPCODE_VALUE_INIT_ULP (61)
+
+#define L4_KWQE_OPCODE_VALUE_OFFLOAD_PG (1)
+#define L4_KWQE_OPCODE_VALUE_UPDATE_PG (9)
+#define L4_KWQE_OPCODE_VALUE_UPLOAD_PG (14)
+
+#define L5CM_RAMROD_CMD_ID_BASE (0x80)
+#define L5CM_RAMROD_CMD_ID_TCP_CONNECT (L5CM_RAMROD_CMD_ID_BASE + 3)
+#define L5CM_RAMROD_CMD_ID_CLOSE (L5CM_RAMROD_CMD_ID_BASE + 12)
+#define L5CM_RAMROD_CMD_ID_ABORT (L5CM_RAMROD_CMD_ID_BASE + 13)
+#define L5CM_RAMROD_CMD_ID_SEARCHER_DELETE (L5CM_RAMROD_CMD_ID_BASE + 14)
+#define L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD (L5CM_RAMROD_CMD_ID_BASE + 15)
+
+/* KCQ (kernel completion queue) response op codes */
+#define L4_KCQE_OPCODE_VALUE_CLOSE_COMP (53)
+#define L4_KCQE_OPCODE_VALUE_RESET_COMP (54)
+#define L4_KCQE_OPCODE_VALUE_FW_TCP_UPDATE (55)
+#define L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE (56)
+#define L4_KCQE_OPCODE_VALUE_RESET_RECEIVED (57)
+#define L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED (58)
+#define L4_KCQE_OPCODE_VALUE_INIT_ULP (61)
+
+#define L4_KCQE_OPCODE_VALUE_OFFLOAD_PG (1)
+#define L4_KCQE_OPCODE_VALUE_UPDATE_PG (9)
+#define L4_KCQE_OPCODE_VALUE_UPLOAD_PG (14)
+
+/* KCQ (kernel completion queue) completion status */
+#define L4_KCQE_COMPLETION_STATUS_SUCCESS (0)
+#define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93)
+
+#define L4_LAYER_CODE (4)
+#define L2_LAYER_CODE (2)
+
+/*
+ * L4 KCQ CQE
+ */
+struct l4_kcq {
+ u32 cid;
+ u32 pg_cid;
+ u32 conn_id;
+ u32 pg_host_opaque;
+#if defined(__BIG_ENDIAN)
+ u16 status;
+ u16 reserved1;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved1;
+ u16 status;
+#endif
+ u32 reserved2[2];
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define L4_KCQ_RESERVED3 (0x7<<0)
+#define L4_KCQ_RESERVED3_SHIFT 0
+#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */
+#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3
+#define L4_KCQ_LAYER_CODE (0x7<<4)
+#define L4_KCQ_LAYER_CODE_SHIFT 4
+#define L4_KCQ_RESERVED4 (0x1<<7)
+#define L4_KCQ_RESERVED4_SHIFT 7
+ u8 op_code;
+ u16 qe_self_seq;
+#elif defined(__LITTLE_ENDIAN)
+ u16 qe_self_seq;
+ u8 op_code;
+ u8 flags;
+#define L4_KCQ_RESERVED3 (0xF<<0)
+#define L4_KCQ_RESERVED3_SHIFT 0
+#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */
+#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3
+#define L4_KCQ_LAYER_CODE (0x7<<4)
+#define L4_KCQ_LAYER_CODE_SHIFT 4
+#define L4_KCQ_RESERVED4 (0x1<<7)
+#define L4_KCQ_RESERVED4_SHIFT 7
+#endif
+};
+
+
+/*
+ * L4 KCQ CQE PG upload
+ */
+struct l4_kcq_upload_pg {
+ u32 pg_cid;
+#if defined(__BIG_ENDIAN)
+ u16 pg_status;
+ u16 pg_ipid_count;
+#elif defined(__LITTLE_ENDIAN)
+ u16 pg_ipid_count;
+ u16 pg_status;
+#endif
+ u32 reserved1[5];
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0)
+#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0
+#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4)
+#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4
+#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7)
+#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7
+ u8 op_code;
+ u16 qe_self_seq;
+#elif defined(__LITTLE_ENDIAN)
+ u16 qe_self_seq;
+ u8 op_code;
+ u8 flags;
+#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0)
+#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0
+#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4)
+#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4
+#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7)
+#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7
+#endif
+};
+
+
+/*
+ * Gracefully close the connection request
+ */
+struct l4_kwq_close_req {
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0)
+#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0
+#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7
+ u8 op_code;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ u8 op_code;
+ u8 flags;
+#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0)
+#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0
+#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7
+#endif
+ u32 cid;
+ u32 reserved2[6];
+};
+
+
+/*
+ * The first request to be passed in order to establish connection in option2
+ */
+struct l4_kwq_connect_req1 {
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7
+ u8 op_code;
+ u8 reserved0;
+ u8 conn_flags;
+#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0)
+#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1)
+#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1
+#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2)
+#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2
+#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3)
+#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3
+#elif defined(__LITTLE_ENDIAN)
+ u8 conn_flags;
+#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0)
+#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1)
+#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1
+#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2)
+#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2
+#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3)
+#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3
+ u8 reserved0;
+ u8 op_code;
+ u8 flags;
+#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7
+#endif
+ u32 cid;
+ u32 pg_cid;
+ u32 src_ip;
+ u32 dst_ip;
+#if defined(__BIG_ENDIAN)
+ u16 dst_port;
+ u16 src_port;
+#elif defined(__LITTLE_ENDIAN)
+ u16 src_port;
+ u16 dst_port;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 rsrv1[3];
+ u8 tcp_flags;
+#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0)
+#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1)
+#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1
+#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2)
+#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2
+#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3)
+#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3
+#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4)
+#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4
+#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5)
+#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5
+#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6)
+#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6
+#elif defined(__LITTLE_ENDIAN)
+ u8 tcp_flags;
+#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0)
+#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1)
+#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1
+#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2)
+#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2
+#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3)
+#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3
+#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4)
+#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4
+#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5)
+#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5
+#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6)
+#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6
+ u8 rsrv1[3];
+#endif
+ u32 rsrv2;
+};
+
+
+/*
+ * The second ( optional )request to be passed in order to establish
+ * connection in option2 - for IPv6 only
+ */
+struct l4_kwq_connect_req2 {
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7
+ u8 op_code;
+ u8 reserved0;
+ u8 rsrv;
+#elif defined(__LITTLE_ENDIAN)
+ u8 rsrv;
+ u8 reserved0;
+ u8 op_code;
+ u8 flags;
+#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7
+#endif
+ u32 reserved2;
+ u32 src_ip_v6_2;
+ u32 src_ip_v6_3;
+ u32 src_ip_v6_4;
+ u32 dst_ip_v6_2;
+ u32 dst_ip_v6_3;
+ u32 dst_ip_v6_4;
+};
+
+
+/*
+ * The third ( and last )request to be passed in order to establish
+ * connection in option2
+ */
+struct l4_kwq_connect_req3 {
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7
+ u8 op_code;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ u8 op_code;
+ u8 flags;
+#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7
+#endif
+ u32 ka_timeout;
+ u32 ka_interval ;
+#if defined(__BIG_ENDIAN)
+ u8 snd_seq_scale;
+ u8 ttl;
+ u8 tos;
+ u8 ka_max_probe_count;
+#elif defined(__LITTLE_ENDIAN)
+ u8 ka_max_probe_count;
+ u8 tos;
+ u8 ttl;
+ u8 snd_seq_scale;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 pmtu;
+ u16 mss;
+#elif defined(__LITTLE_ENDIAN)
+ u16 mss;
+ u16 pmtu;
+#endif
+ u32 rcv_buf;
+ u32 snd_buf;
+ u32 seed;
+};
+
+
+/*
+ * a KWQE request to offload a PG connection
+ */
+struct l4_kwq_offload_pg {
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0)
+#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0
+#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4)
+#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4
+#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7
+ u8 op_code;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ u8 op_code;
+ u8 flags;
+#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0)
+#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0
+#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4)
+#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4
+#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 l2hdr_nbytes;
+ u8 pg_flags;
+#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0)
+#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0
+#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1)
+#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1
+#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2)
+#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2
+ u8 da0;
+ u8 da1;
+#elif defined(__LITTLE_ENDIAN)
+ u8 da1;
+ u8 da0;
+ u8 pg_flags;
+#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0)
+#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0
+#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1)
+#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1
+#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2)
+#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2
+ u8 l2hdr_nbytes;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 da2;
+ u8 da3;
+ u8 da4;
+ u8 da5;
+#elif defined(__LITTLE_ENDIAN)
+ u8 da5;
+ u8 da4;
+ u8 da3;
+ u8 da2;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 sa0;
+ u8 sa1;
+ u8 sa2;
+ u8 sa3;
+#elif defined(__LITTLE_ENDIAN)
+ u8 sa3;
+ u8 sa2;
+ u8 sa1;
+ u8 sa0;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 sa4;
+ u8 sa5;
+ u16 etype;
+#elif defined(__LITTLE_ENDIAN)
+ u16 etype;
+ u8 sa5;
+ u8 sa4;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 vlan_tag;
+ u16 ipid_start;
+#elif defined(__LITTLE_ENDIAN)
+ u16 ipid_start;
+ u16 vlan_tag;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 ipid_count;
+ u16 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved3;
+ u16 ipid_count;
+#endif
+ u32 host_opaque;
+};
+
+
+/*
+ * Abortively close the connection request
+ */
+struct l4_kwq_reset_req {
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0)
+#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0
+#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4)
+#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4
+#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7
+ u8 op_code;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ u8 op_code;
+ u8 flags;
+#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0)
+#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0
+#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4)
+#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4
+#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7
+#endif
+ u32 cid;
+ u32 reserved2[6];
+};
+
+
+/*
+ * a KWQE request to update a PG connection
+ */
+struct l4_kwq_update_pg {
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0)
+#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0
+#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4)
+#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4
+#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7
+ u8 opcode;
+ u16 oper16;
+#elif defined(__LITTLE_ENDIAN)
+ u16 oper16;
+ u8 opcode;
+ u8 flags;
+#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0)
+#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0
+#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4)
+#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4
+#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7
+#endif
+ u32 pg_cid;
+ u32 pg_host_opaque;
+#if defined(__BIG_ENDIAN)
+ u8 pg_valids;
+#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0)
+#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0
+#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1)
+#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1
+#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2)
+#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
+ u8 pg_unused_a;
+ u16 pg_ipid_count;
+#elif defined(__LITTLE_ENDIAN)
+ u16 pg_ipid_count;
+ u8 pg_unused_a;
+ u8 pg_valids;
+#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0)
+#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0
+#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1)
+#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1
+#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2)
+#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 reserverd3;
+ u8 da0;
+ u8 da1;
+#elif defined(__LITTLE_ENDIAN)
+ u8 da1;
+ u8 da0;
+ u16 reserverd3;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 da2;
+ u8 da3;
+ u8 da4;
+ u8 da5;
+#elif defined(__LITTLE_ENDIAN)
+ u8 da5;
+ u8 da4;
+ u8 da3;
+ u8 da2;
+#endif
+ u32 reserved4;
+ u32 reserved5;
+};
+
+
+/*
+ * a KWQE request to upload a PG or L4 context
+ */
+struct l4_kwq_upload {
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0)
+#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0
+#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4)
+#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4
+#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7
+ u8 opcode;
+ u16 oper16;
+#elif defined(__LITTLE_ENDIAN)
+ u16 oper16;
+ u8 opcode;
+ u8 flags;
+#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0)
+#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0
+#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4)
+#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4
+#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7
+#endif
+ u32 cid;
+ u32 reserved2[6];
+};
+
+#endif /* CNIC_DEFS_H */
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
new file mode 100644
index 00000000000..06380963a34
--- /dev/null
+++ b/drivers/net/cnic_if.h
@@ -0,0 +1,299 @@
+/* cnic_if.h: Broadcom CNIC core network driver.
+ *
+ * Copyright (c) 2006 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
+
+#ifndef CNIC_IF_H
+#define CNIC_IF_H
+
+#define CNIC_MODULE_VERSION "2.0.0"
+#define CNIC_MODULE_RELDATE "May 21, 2009"
+
+#define CNIC_ULP_RDMA 0
+#define CNIC_ULP_ISCSI 1
+#define CNIC_ULP_L4 2
+#define MAX_CNIC_ULP_TYPE_EXT 2
+#define MAX_CNIC_ULP_TYPE 3
+
+struct kwqe {
+ u32 kwqe_op_flag;
+
+#define KWQE_OPCODE_MASK 0x00ff0000
+#define KWQE_OPCODE_SHIFT 16
+#define KWQE_FLAGS_LAYER_SHIFT 28
+#define KWQE_OPCODE(x) ((x & KWQE_OPCODE_MASK) >> KWQE_OPCODE_SHIFT)
+
+ u32 kwqe_info0;
+ u32 kwqe_info1;
+ u32 kwqe_info2;
+ u32 kwqe_info3;
+ u32 kwqe_info4;
+ u32 kwqe_info5;
+ u32 kwqe_info6;
+};
+
+struct kwqe_16 {
+ u32 kwqe_info0;
+ u32 kwqe_info1;
+ u32 kwqe_info2;
+ u32 kwqe_info3;
+};
+
+struct kcqe {
+ u32 kcqe_info0;
+ u32 kcqe_info1;
+ u32 kcqe_info2;
+ u32 kcqe_info3;
+ u32 kcqe_info4;
+ u32 kcqe_info5;
+ u32 kcqe_info6;
+ u32 kcqe_op_flag;
+ #define KCQE_RAMROD_COMPLETION (0x1<<27) /* Everest */
+ #define KCQE_FLAGS_LAYER_MASK (0x7<<28)
+ #define KCQE_FLAGS_LAYER_MASK_MISC (0<<28)
+ #define KCQE_FLAGS_LAYER_MASK_L2 (2<<28)
+ #define KCQE_FLAGS_LAYER_MASK_L3 (3<<28)
+ #define KCQE_FLAGS_LAYER_MASK_L4 (4<<28)
+ #define KCQE_FLAGS_LAYER_MASK_L5_RDMA (5<<28)
+ #define KCQE_FLAGS_LAYER_MASK_L5_ISCSI (6<<28)
+ #define KCQE_FLAGS_NEXT (1<<31)
+ #define KCQE_FLAGS_OPCODE_MASK (0xff<<16)
+ #define KCQE_FLAGS_OPCODE_SHIFT (16)
+ #define KCQE_OPCODE(op) \
+ (((op) & KCQE_FLAGS_OPCODE_MASK) >> KCQE_FLAGS_OPCODE_SHIFT)
+};
+
+#define MAX_CNIC_CTL_DATA 64
+#define MAX_DRV_CTL_DATA 64
+
+#define CNIC_CTL_STOP_CMD 1
+#define CNIC_CTL_START_CMD 2
+#define CNIC_CTL_COMPLETION_CMD 3
+
+#define DRV_CTL_IO_WR_CMD 0x101
+#define DRV_CTL_IO_RD_CMD 0x102
+#define DRV_CTL_CTX_WR_CMD 0x103
+#define DRV_CTL_CTXTBL_WR_CMD 0x104
+#define DRV_CTL_COMPLETION_CMD 0x105
+
+struct cnic_ctl_completion {
+ u32 cid;
+};
+
+struct drv_ctl_completion {
+ u32 comp_count;
+};
+
+struct cnic_ctl_info {
+ int cmd;
+ union {
+ struct cnic_ctl_completion comp;
+ char bytes[MAX_CNIC_CTL_DATA];
+ } data;
+};
+
+struct drv_ctl_io {
+ u32 cid_addr;
+ u32 offset;
+ u32 data;
+ dma_addr_t dma_addr;
+};
+
+struct drv_ctl_info {
+ int cmd;
+ union {
+ struct drv_ctl_completion comp;
+ struct drv_ctl_io io;
+ char bytes[MAX_DRV_CTL_DATA];
+ } data;
+};
+
+struct cnic_ops {
+ struct module *cnic_owner;
+ /* Calls to these functions are protected by RCU. When
+ * unregistering, we wait for any calls to complete before
+ * continuing.
+ */
+ int (*cnic_handler)(void *, void *);
+ int (*cnic_ctl)(void *, struct cnic_ctl_info *);
+};
+
+#define MAX_CNIC_VEC 8
+
+struct cnic_irq {
+ unsigned int vector;
+ void *status_blk;
+ u32 status_blk_num;
+ u32 irq_flags;
+#define CNIC_IRQ_FL_MSIX 0x00000001
+};
+
+struct cnic_eth_dev {
+ struct module *drv_owner;
+ u32 drv_state;
+#define CNIC_DRV_STATE_REGD 0x00000001
+#define CNIC_DRV_STATE_USING_MSIX 0x00000002
+ u32 chip_id;
+ u32 max_kwqe_pending;
+ struct pci_dev *pdev;
+ void __iomem *io_base;
+
+ u32 ctx_tbl_offset;
+ u32 ctx_tbl_len;
+ int ctx_blk_size;
+ u32 starting_cid;
+ u32 max_iscsi_conn;
+ u32 max_fcoe_conn;
+ u32 max_rdma_conn;
+ u32 reserved0[2];
+
+ int num_irq;
+ struct cnic_irq irq_arr[MAX_CNIC_VEC];
+ int (*drv_register_cnic)(struct net_device *,
+ struct cnic_ops *, void *);
+ int (*drv_unregister_cnic)(struct net_device *);
+ int (*drv_submit_kwqes_32)(struct net_device *,
+ struct kwqe *[], u32);
+ int (*drv_submit_kwqes_16)(struct net_device *,
+ struct kwqe_16 *[], u32);
+ int (*drv_ctl)(struct net_device *, struct drv_ctl_info *);
+ unsigned long reserved1[2];
+};
+
+struct cnic_sockaddr {
+ union {
+ struct sockaddr_in v4;
+ struct sockaddr_in6 v6;
+ } local;
+ union {
+ struct sockaddr_in v4;
+ struct sockaddr_in6 v6;
+ } remote;
+};
+
+struct cnic_sock {
+ struct cnic_dev *dev;
+ void *context;
+ u32 src_ip[4];
+ u32 dst_ip[4];
+ u16 src_port;
+ u16 dst_port;
+ u16 vlan_id;
+ unsigned char old_ha[6];
+ unsigned char ha[6];
+ u32 mtu;
+ u32 cid;
+ u32 l5_cid;
+ u32 pg_cid;
+ int ulp_type;
+
+ u32 ka_timeout;
+ u32 ka_interval;
+ u8 ka_max_probe_count;
+ u8 tos;
+ u8 ttl;
+ u8 snd_seq_scale;
+ u32 rcv_buf;
+ u32 snd_buf;
+ u32 seed;
+
+ unsigned long tcp_flags;
+#define SK_TCP_NO_DELAY_ACK 0x1
+#define SK_TCP_KEEP_ALIVE 0x2
+#define SK_TCP_NAGLE 0x4
+#define SK_TCP_TIMESTAMP 0x8
+#define SK_TCP_SACK 0x10
+#define SK_TCP_SEG_SCALING 0x20
+ unsigned long flags;
+#define SK_F_INUSE 0
+#define SK_F_OFFLD_COMPLETE 1
+#define SK_F_OFFLD_SCHED 2
+#define SK_F_PG_OFFLD_COMPLETE 3
+#define SK_F_CONNECT_START 4
+#define SK_F_IPV6 5
+#define SK_F_CLOSING 7
+
+ atomic_t ref_count;
+ u32 state;
+ struct kwqe kwqe1;
+ struct kwqe kwqe2;
+ struct kwqe kwqe3;
+};
+
+struct cnic_dev {
+ struct net_device *netdev;
+ struct pci_dev *pcidev;
+ void __iomem *regview;
+ struct list_head list;
+
+ int (*register_device)(struct cnic_dev *dev, int ulp_type,
+ void *ulp_ctx);
+ int (*unregister_device)(struct cnic_dev *dev, int ulp_type);
+ int (*submit_kwqes)(struct cnic_dev *dev, struct kwqe *wqes[],
+ u32 num_wqes);
+ int (*submit_kwqes_16)(struct cnic_dev *dev, struct kwqe_16 *wqes[],
+ u32 num_wqes);
+
+ int (*cm_create)(struct cnic_dev *, int, u32, u32, struct cnic_sock **,
+ void *);
+ int (*cm_destroy)(struct cnic_sock *);
+ int (*cm_connect)(struct cnic_sock *, struct cnic_sockaddr *);
+ int (*cm_abort)(struct cnic_sock *);
+ int (*cm_close)(struct cnic_sock *);
+ struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type);
+ int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type,
+ char *data, u16 data_size);
+ unsigned long flags;
+#define CNIC_F_CNIC_UP 1
+#define CNIC_F_BNX2_CLASS 3
+#define CNIC_F_BNX2X_CLASS 4
+ atomic_t ref_count;
+ u8 mac_addr[6];
+
+ int max_iscsi_conn;
+ int max_fcoe_conn;
+ int max_rdma_conn;
+
+ void *cnic_priv;
+};
+
+#define CNIC_WR(dev, off, val) writel(val, dev->regview + off)
+#define CNIC_WR16(dev, off, val) writew(val, dev->regview + off)
+#define CNIC_WR8(dev, off, val) writeb(val, dev->regview + off)
+#define CNIC_RD(dev, off) readl(dev->regview + off)
+#define CNIC_RD16(dev, off) readw(dev->regview + off)
+
+struct cnic_ulp_ops {
+ /* Calls to these functions are protected by RCU. When
+ * unregistering, we wait for any calls to complete before
+ * continuing.
+ */
+
+ void (*cnic_init)(struct cnic_dev *dev);
+ void (*cnic_exit)(struct cnic_dev *dev);
+ void (*cnic_start)(void *ulp_ctx);
+ void (*cnic_stop)(void *ulp_ctx);
+ void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[],
+ u32 num_cqes);
+ void (*indicate_netevent)(void *ulp_ctx, unsigned long event);
+ void (*cm_connect_complete)(struct cnic_sock *);
+ void (*cm_close_complete)(struct cnic_sock *);
+ void (*cm_abort_complete)(struct cnic_sock *);
+ void (*cm_remote_close)(struct cnic_sock *);
+ void (*cm_remote_abort)(struct cnic_sock *);
+ void (*iscsi_nl_send_msg)(struct cnic_dev *dev, u32 msg_type,
+ char *data, u16 data_size);
+ struct module *owner;
+};
+
+extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
+
+extern int cnic_unregister_driver(int ulp_type);
+
+#endif
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 3f476c7c073..58afafbd3b9 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -202,7 +202,7 @@ struct cpmac_priv {
void __iomem *regs;
struct mii_bus *mii_bus;
struct phy_device *phy;
- char phy_name[BUS_ID_SIZE];
+ char phy_name[MII_BUS_ID_SIZE + 3];
int oldlink, oldspeed, oldduplex;
u32 msg_enable;
struct net_device *dev;
@@ -615,13 +615,13 @@ static void cpmac_end_xmit(struct net_device *dev, int queue)
dev_kfree_skb_irq(desc->skb);
desc->skb = NULL;
- if (netif_subqueue_stopped(dev, queue))
+ if (__netif_subqueue_stopped(dev, queue))
netif_wake_subqueue(dev, queue);
} else {
if (netif_msg_tx_err(priv) && net_ratelimit())
printk(KERN_WARNING
"%s: end_xmit: spurious interrupt\n", dev->name);
- if (netif_subqueue_stopped(dev, queue))
+ if (__netif_subqueue_stopped(dev, queue))
netif_wake_subqueue(dev, queue);
}
}
@@ -731,7 +731,6 @@ static void cpmac_clear_tx(struct net_device *dev)
static void cpmac_hw_error(struct work_struct *work)
{
- int i;
struct cpmac_priv *priv =
container_of(work, struct cpmac_priv, reset_work);
@@ -818,7 +817,6 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id)
static void cpmac_tx_timeout(struct net_device *dev)
{
- int i;
struct cpmac_priv *priv = netdev_priv(dev);
spin_lock(&priv->lock);
@@ -1093,11 +1091,24 @@ static int cpmac_stop(struct net_device *dev)
return 0;
}
+static const struct net_device_ops cpmac_netdev_ops = {
+ .ndo_open = cpmac_open,
+ .ndo_stop = cpmac_stop,
+ .ndo_start_xmit = cpmac_start_xmit,
+ .ndo_tx_timeout = cpmac_tx_timeout,
+ .ndo_set_multicast_list = cpmac_set_multicast_list,
+ .ndo_so_ioctl = cpmac_ioctl,
+ .ndo_set_config = cpmac_config,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
static int external_switch;
static int __devinit cpmac_probe(struct platform_device *pdev)
{
- int rc, phy_id, i;
+ int rc, phy_id;
char *mdio_bus_id = "0";
struct resource *mem;
struct cpmac_priv *priv;
@@ -1143,14 +1154,8 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
dev->irq = platform_get_irq_byname(pdev, "irq");
- dev->open = cpmac_open;
- dev->stop = cpmac_stop;
- dev->set_config = cpmac_config;
- dev->hard_start_xmit = cpmac_start_xmit;
- dev->do_ioctl = cpmac_ioctl;
- dev->set_multicast_list = cpmac_set_multicast_list;
- dev->tx_timeout = cpmac_tx_timeout;
- dev->ethtool_ops = &cpmac_ethtool_ops;
+ dev->netdev_ops = &cpmac_netdev_ops;
+ dev->ethtool_ops = &cpmac_ethtool_ops;
netif_napi_add(dev, &priv->napi, cpmac_poll, 64);
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index 7433b88eed7..3eee666a9cd 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -1551,7 +1551,7 @@ static int net_send_packet(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irq(&lp->lock);
if (net_debug) printk("cs89x0: Tx buffer not free!\n");
- return 1;
+ return NETDEV_TX_BUSY;
}
/* Write the contents of the packet */
writewords(dev->base_addr, TX_FRAME_PORT,skb->data,(skb->len+1) >>1);
diff --git a/drivers/net/cxgb3/Makefile b/drivers/net/cxgb3/Makefile
index 34346798532..29aff78c782 100644
--- a/drivers/net/cxgb3/Makefile
+++ b/drivers/net/cxgb3/Makefile
@@ -5,4 +5,4 @@
obj-$(CONFIG_CHELSIO_T3) += cxgb3.o
cxgb3-objs := cxgb3_main.o ael1002.o vsc8211.o t3_hw.o mc5.o \
- xgmac.o sge.o l2t.o cxgb3_offload.o
+ xgmac.o sge.o l2t.o cxgb3_offload.o aq100x.o
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 714df2b675e..1694fad3872 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -85,8 +85,8 @@ struct fl_pg_chunk {
struct page *page;
void *va;
unsigned int offset;
- u64 *p_cnt;
- DECLARE_PCI_UNMAP_ADDR(mapping);
+ unsigned long *p_cnt;
+ dma_addr_t mapping;
};
struct rx_desc;
@@ -195,7 +195,7 @@ struct sge_qset { /* an SGE queue set */
struct sge_rspq rspq;
struct sge_fl fl[SGE_RXQ_PER_SET];
struct sge_txq txq[SGE_TXQ_PER_SET];
- struct napi_gro_fraginfo lro_frag_tbl;
+ int nomem;
int lro_enabled;
void *lro_va;
struct net_device *netdev;
@@ -253,6 +253,8 @@ struct adapter {
struct mutex mdio_lock;
spinlock_t stats_lock;
spinlock_t work_lock;
+
+ struct sk_buff *nofail_skb;
};
static inline u32 t3_read_reg(struct adapter *adapter, u32 reg_addr)
diff --git a/drivers/net/cxgb3/ael1002.c b/drivers/net/cxgb3/ael1002.c
index e1b22490ff5..9fe008ec9ba 100644
--- a/drivers/net/cxgb3/ael1002.c
+++ b/drivers/net/cxgb3/ael1002.c
@@ -33,14 +33,6 @@
#include "regs.h"
enum {
- PMD_RSD = 10, /* PMA/PMD receive signal detect register */
- PCS_STAT1_X = 24, /* 10GBASE-X PCS status 1 register */
- PCS_STAT1_R = 32, /* 10GBASE-R PCS status 1 register */
- XS_LN_STAT = 24 /* XS lane status register */
-};
-
-enum {
- AEL100X_TX_DISABLE = 9,
AEL100X_TX_CONFIG1 = 0xc002,
AEL1002_PWR_DOWN_HI = 0xc011,
AEL1002_PWR_DOWN_LO = 0xc012,
@@ -52,12 +44,33 @@ enum {
AEL_I2C_STAT = 0xc30c,
AEL2005_GPIO_CTRL = 0xc214,
AEL2005_GPIO_STAT = 0xc215,
+
+ AEL2020_GPIO_INTR = 0xc103, /* Latch High (LH) */
+ AEL2020_GPIO_CTRL = 0xc108, /* Store Clear (SC) */
+ AEL2020_GPIO_STAT = 0xc10c, /* Read Only (RO) */
+ AEL2020_GPIO_CFG = 0xc110, /* Read Write (RW) */
+
+ AEL2020_GPIO_SDA = 0, /* IN: i2c serial data */
+ AEL2020_GPIO_MODDET = 1, /* IN: Module Detect */
+ AEL2020_GPIO_0 = 3, /* IN: unassigned */
+ AEL2020_GPIO_1 = 2, /* OUT: unassigned */
+ AEL2020_GPIO_LSTAT = AEL2020_GPIO_1, /* wired to link status LED */
};
enum { edc_none, edc_sr, edc_twinax };
/* PHY module I2C device address */
-#define MODULE_DEV_ADDR 0xa0
+enum {
+ MODULE_DEV_ADDR = 0xa0,
+ SFF_DEV_ADDR = 0xa2,
+};
+
+/* PHY transceiver type */
+enum {
+ phy_transtype_unknown = 0,
+ phy_transtype_sfp = 3,
+ phy_transtype_xfp = 6,
+};
#define AEL2005_MODDET_IRQ 4
@@ -74,8 +87,8 @@ static int set_phy_regs(struct cphy *phy, const struct reg_val *rv)
for (err = 0; rv->mmd_addr && !err; rv++) {
if (rv->clear_bits == 0xffff)
- err = mdio_write(phy, rv->mmd_addr, rv->reg_addr,
- rv->set_bits);
+ err = t3_mdio_write(phy, rv->mmd_addr, rv->reg_addr,
+ rv->set_bits);
else
err = t3_mdio_change_bits(phy, rv->mmd_addr,
rv->reg_addr, rv->clear_bits,
@@ -86,21 +99,54 @@ static int set_phy_regs(struct cphy *phy, const struct reg_val *rv)
static void ael100x_txon(struct cphy *phy)
{
- int tx_on_gpio = phy->addr == 0 ? F_GPIO7_OUT_VAL : F_GPIO2_OUT_VAL;
+ int tx_on_gpio =
+ phy->mdio.prtad == 0 ? F_GPIO7_OUT_VAL : F_GPIO2_OUT_VAL;
msleep(100);
t3_set_reg_field(phy->adapter, A_T3DBG_GPIO_EN, 0, tx_on_gpio);
msleep(30);
}
+/*
+ * Read an 8-bit word from a device attached to the PHY's i2c bus.
+ */
+static int ael_i2c_rd(struct cphy *phy, int dev_addr, int word_addr)
+{
+ int i, err;
+ unsigned int stat, data;
+
+ err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL_I2C_CTRL,
+ (dev_addr << 8) | (1 << 8) | word_addr);
+ if (err)
+ return err;
+
+ for (i = 0; i < 200; i++) {
+ msleep(1);
+ err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL_I2C_STAT, &stat);
+ if (err)
+ return err;
+ if ((stat & 3) == 1) {
+ err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL_I2C_DATA,
+ &data);
+ if (err)
+ return err;
+ return data >> 8;
+ }
+ }
+ CH_WARN(phy->adapter, "PHY %u i2c read of dev.addr %#x.%#x timed out\n",
+ phy->mdio.prtad, dev_addr, word_addr);
+ return -ETIMEDOUT;
+}
+
static int ael1002_power_down(struct cphy *phy, int enable)
{
int err;
- err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL100X_TX_DISABLE, !!enable);
+ err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_TXDIS, !!enable);
if (!err)
- err = t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, MII_BMCR,
- BMCR_PDOWN, enable ? BMCR_PDOWN : 0);
+ err = mdio_set_flag(&phy->mdio, phy->mdio.prtad,
+ MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER, enable);
return err;
}
@@ -109,11 +155,11 @@ static int ael1002_reset(struct cphy *phy, int wait)
int err;
if ((err = ael1002_power_down(phy, 0)) ||
- (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL100X_TX_CONFIG1, 1)) ||
- (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_PWR_DOWN_HI, 0)) ||
- (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_PWR_DOWN_LO, 0)) ||
- (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_XFI_EQL, 0x18)) ||
- (err = t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, AEL1002_LB_EN,
+ (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL100X_TX_CONFIG1, 1)) ||
+ (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL1002_PWR_DOWN_HI, 0)) ||
+ (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL1002_PWR_DOWN_LO, 0)) ||
+ (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL1002_XFI_EQL, 0x18)) ||
+ (err = t3_mdio_change_bits(phy, MDIO_MMD_PMAPMD, AEL1002_LB_EN,
0, 1 << 5)))
return err;
return 0;
@@ -132,12 +178,15 @@ static int get_link_status_r(struct cphy *phy, int *link_ok, int *speed,
{
if (link_ok) {
unsigned int stat0, stat1, stat2;
- int err = mdio_read(phy, MDIO_DEV_PMA_PMD, PMD_RSD, &stat0);
+ int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD,
+ MDIO_PMA_RXDET, &stat0);
if (!err)
- err = mdio_read(phy, MDIO_DEV_PCS, PCS_STAT1_R, &stat1);
+ err = t3_mdio_read(phy, MDIO_MMD_PCS,
+ MDIO_PCS_10GBRT_STAT1, &stat1);
if (!err)
- err = mdio_read(phy, MDIO_DEV_XGXS, XS_LN_STAT, &stat2);
+ err = t3_mdio_read(phy, MDIO_MMD_PHYXS,
+ MDIO_PHYXS_LNSTAT, &stat2);
if (err)
return err;
*link_ok = (stat0 & stat1 & (stat2 >> 12)) & 1;
@@ -157,6 +206,7 @@ static struct cphy_ops ael1002_ops = {
.intr_handler = ael1002_intr_noop,
.get_link_status = get_link_status_r,
.power_down = ael1002_power_down,
+ .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
};
int t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
@@ -171,13 +221,13 @@ int t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
static int ael1006_reset(struct cphy *phy, int wait)
{
- return t3_phy_reset(phy, MDIO_DEV_PMA_PMD, wait);
+ return t3_phy_reset(phy, MDIO_MMD_PMAPMD, wait);
}
static int ael1006_power_down(struct cphy *phy, int enable)
{
- return t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, MII_BMCR,
- BMCR_PDOWN, enable ? BMCR_PDOWN : 0);
+ return mdio_set_flag(&phy->mdio, phy->mdio.prtad, MDIO_MMD_PMAPMD,
+ MDIO_CTRL1, MDIO_CTRL1_LPOWER, enable);
}
static struct cphy_ops ael1006_ops = {
@@ -188,6 +238,7 @@ static struct cphy_ops ael1006_ops = {
.intr_handler = t3_phy_lasi_intr_handler,
.get_link_status = get_link_status_r,
.power_down = ael1006_power_down,
+ .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
};
int t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
@@ -200,12 +251,57 @@ int t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
return 0;
}
+/*
+ * Decode our module type.
+ */
+static int ael2xxx_get_module_type(struct cphy *phy, int delay_ms)
+{
+ int v;
+
+ if (delay_ms)
+ msleep(delay_ms);
+
+ /* see SFF-8472 for below */
+ v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 3);
+ if (v < 0)
+ return v;
+
+ if (v == 0x10)
+ return phy_modtype_sr;
+ if (v == 0x20)
+ return phy_modtype_lr;
+ if (v == 0x40)
+ return phy_modtype_lrm;
+
+ v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 6);
+ if (v < 0)
+ return v;
+ if (v != 4)
+ goto unknown;
+
+ v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 10);
+ if (v < 0)
+ return v;
+
+ if (v & 0x80) {
+ v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 0x12);
+ if (v < 0)
+ return v;
+ return v > 10 ? phy_modtype_twinax_long : phy_modtype_twinax;
+ }
+unknown:
+ return phy_modtype_unknown;
+}
+
+/*
+ * Code to support the Aeluros/NetLogic 2005 10Gb PHY.
+ */
static int ael2005_setup_sr_edc(struct cphy *phy)
{
static struct reg_val regs[] = {
- { MDIO_DEV_PMA_PMD, 0xc003, 0xffff, 0x181 },
- { MDIO_DEV_PMA_PMD, 0xc010, 0xffff, 0x448a },
- { MDIO_DEV_PMA_PMD, 0xc04a, 0xffff, 0x5200 },
+ { MDIO_MMD_PMAPMD, 0xc003, 0xffff, 0x181 },
+ { MDIO_MMD_PMAPMD, 0xc010, 0xffff, 0x448a },
+ { MDIO_MMD_PMAPMD, 0xc04a, 0xffff, 0x5200 },
{ 0, 0, 0, 0 }
};
static u16 sr_edc[] = {
@@ -490,8 +586,8 @@ static int ael2005_setup_sr_edc(struct cphy *phy)
msleep(50);
for (i = 0; i < ARRAY_SIZE(sr_edc) && !err; i += 2)
- err = mdio_write(phy, MDIO_DEV_PMA_PMD, sr_edc[i],
- sr_edc[i + 1]);
+ err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, sr_edc[i],
+ sr_edc[i + 1]);
if (!err)
phy->priv = edc_sr;
return err;
@@ -500,12 +596,12 @@ static int ael2005_setup_sr_edc(struct cphy *phy)
static int ael2005_setup_twinax_edc(struct cphy *phy, int modtype)
{
static struct reg_val regs[] = {
- { MDIO_DEV_PMA_PMD, 0xc04a, 0xffff, 0x5a00 },
+ { MDIO_MMD_PMAPMD, 0xc04a, 0xffff, 0x5a00 },
{ 0, 0, 0, 0 }
};
static struct reg_val preemphasis[] = {
- { MDIO_DEV_PMA_PMD, 0xc014, 0xffff, 0xfe16 },
- { MDIO_DEV_PMA_PMD, 0xc015, 0xffff, 0xa000 },
+ { MDIO_MMD_PMAPMD, 0xc014, 0xffff, 0xfe16 },
+ { MDIO_MMD_PMAPMD, 0xc015, 0xffff, 0xa000 },
{ 0, 0, 0, 0 }
};
static u16 twinax_edc[] = {
@@ -887,132 +983,73 @@ static int ael2005_setup_twinax_edc(struct cphy *phy, int modtype)
msleep(50);
for (i = 0; i < ARRAY_SIZE(twinax_edc) && !err; i += 2)
- err = mdio_write(phy, MDIO_DEV_PMA_PMD, twinax_edc[i],
- twinax_edc[i + 1]);
+ err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, twinax_edc[i],
+ twinax_edc[i + 1]);
if (!err)
phy->priv = edc_twinax;
return err;
}
-static int ael2005_i2c_rd(struct cphy *phy, int dev_addr, int word_addr)
-{
- int i, err;
- unsigned int stat, data;
-
- err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL_I2C_CTRL,
- (dev_addr << 8) | (1 << 8) | word_addr);
- if (err)
- return err;
-
- for (i = 0; i < 5; i++) {
- msleep(1);
- err = mdio_read(phy, MDIO_DEV_PMA_PMD, AEL_I2C_STAT, &stat);
- if (err)
- return err;
- if ((stat & 3) == 1) {
- err = mdio_read(phy, MDIO_DEV_PMA_PMD, AEL_I2C_DATA,
- &data);
- if (err)
- return err;
- return data >> 8;
- }
- }
- CH_WARN(phy->adapter, "PHY %u I2C read of addr %u timed out\n",
- phy->addr, word_addr);
- return -ETIMEDOUT;
-}
-
-static int get_module_type(struct cphy *phy, int delay_ms)
+static int ael2005_get_module_type(struct cphy *phy, int delay_ms)
{
int v;
unsigned int stat;
- v = mdio_read(phy, MDIO_DEV_PMA_PMD, AEL2005_GPIO_CTRL, &stat);
+ v = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, &stat);
if (v)
return v;
if (stat & (1 << 8)) /* module absent */
return phy_modtype_none;
- if (delay_ms)
- msleep(delay_ms);
-
- /* see SFF-8472 for below */
- v = ael2005_i2c_rd(phy, MODULE_DEV_ADDR, 3);
- if (v < 0)
- return v;
-
- if (v == 0x10)
- return phy_modtype_sr;
- if (v == 0x20)
- return phy_modtype_lr;
- if (v == 0x40)
- return phy_modtype_lrm;
-
- v = ael2005_i2c_rd(phy, MODULE_DEV_ADDR, 6);
- if (v < 0)
- return v;
- if (v != 4)
- goto unknown;
-
- v = ael2005_i2c_rd(phy, MODULE_DEV_ADDR, 10);
- if (v < 0)
- return v;
-
- if (v & 0x80) {
- v = ael2005_i2c_rd(phy, MODULE_DEV_ADDR, 0x12);
- if (v < 0)
- return v;
- return v > 10 ? phy_modtype_twinax_long : phy_modtype_twinax;
- }
-unknown:
- return phy_modtype_unknown;
+ return ael2xxx_get_module_type(phy, delay_ms);
}
static int ael2005_intr_enable(struct cphy *phy)
{
- int err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL2005_GPIO_CTRL, 0x200);
+ int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, 0x200);
return err ? err : t3_phy_lasi_intr_enable(phy);
}
static int ael2005_intr_disable(struct cphy *phy)
{
- int err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL2005_GPIO_CTRL, 0x100);
+ int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, 0x100);
return err ? err : t3_phy_lasi_intr_disable(phy);
}
static int ael2005_intr_clear(struct cphy *phy)
{
- int err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL2005_GPIO_CTRL, 0xd00);
+ int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, 0xd00);
return err ? err : t3_phy_lasi_intr_clear(phy);
}
static int ael2005_reset(struct cphy *phy, int wait)
{
static struct reg_val regs0[] = {
- { MDIO_DEV_PMA_PMD, 0xc001, 0, 1 << 5 },
- { MDIO_DEV_PMA_PMD, 0xc017, 0, 1 << 5 },
- { MDIO_DEV_PMA_PMD, 0xc013, 0xffff, 0xf341 },
- { MDIO_DEV_PMA_PMD, 0xc210, 0xffff, 0x8000 },
- { MDIO_DEV_PMA_PMD, 0xc210, 0xffff, 0x8100 },
- { MDIO_DEV_PMA_PMD, 0xc210, 0xffff, 0x8000 },
- { MDIO_DEV_PMA_PMD, 0xc210, 0xffff, 0 },
+ { MDIO_MMD_PMAPMD, 0xc001, 0, 1 << 5 },
+ { MDIO_MMD_PMAPMD, 0xc017, 0, 1 << 5 },
+ { MDIO_MMD_PMAPMD, 0xc013, 0xffff, 0xf341 },
+ { MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0x8000 },
+ { MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0x8100 },
+ { MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0x8000 },
+ { MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0 },
{ 0, 0, 0, 0 }
};
static struct reg_val regs1[] = {
- { MDIO_DEV_PMA_PMD, 0xca00, 0xffff, 0x0080 },
- { MDIO_DEV_PMA_PMD, 0xca12, 0xffff, 0 },
+ { MDIO_MMD_PMAPMD, 0xca00, 0xffff, 0x0080 },
+ { MDIO_MMD_PMAPMD, 0xca12, 0xffff, 0 },
{ 0, 0, 0, 0 }
};
int err;
unsigned int lasi_ctrl;
- err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, &lasi_ctrl);
+ err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
+ &lasi_ctrl);
if (err)
return err;
- err = t3_phy_reset(phy, MDIO_DEV_PMA_PMD, 0);
+ err = t3_phy_reset(phy, MDIO_MMD_PMAPMD, 0);
if (err)
return err;
@@ -1024,7 +1061,7 @@ static int ael2005_reset(struct cphy *phy, int wait)
msleep(50);
- err = get_module_type(phy, 0);
+ err = ael2005_get_module_type(phy, 0);
if (err < 0)
return err;
phy->modtype = err;
@@ -1051,18 +1088,18 @@ static int ael2005_intr_handler(struct cphy *phy)
unsigned int stat;
int ret, edc_needed, cause = 0;
- ret = mdio_read(phy, MDIO_DEV_PMA_PMD, AEL2005_GPIO_STAT, &stat);
+ ret = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_STAT, &stat);
if (ret)
return ret;
if (stat & AEL2005_MODDET_IRQ) {
- ret = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL2005_GPIO_CTRL,
- 0xd00);
+ ret = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL,
+ 0xd00);
if (ret)
return ret;
/* modules have max 300 ms init time after hot plug */
- ret = get_module_type(phy, 300);
+ ret = ael2005_get_module_type(phy, 300);
if (ret < 0)
return ret;
@@ -1098,6 +1135,7 @@ static struct cphy_ops ael2005_ops = {
.intr_handler = ael2005_intr_handler,
.get_link_status = get_link_status_r,
.power_down = ael1002_power_down,
+ .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
};
int t3_ael2005_phy_prep(struct cphy *phy, struct adapter *adapter,
@@ -1107,11 +1145,667 @@ int t3_ael2005_phy_prep(struct cphy *phy, struct adapter *adapter,
SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE |
SUPPORTED_IRQ, "10GBASE-R");
msleep(125);
- return t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, AEL_OPT_SETTINGS, 0,
+ return t3_mdio_change_bits(phy, MDIO_MMD_PMAPMD, AEL_OPT_SETTINGS, 0,
1 << 5);
}
/*
+ * Setup EDC and other parameters for operation with an optical module.
+ */
+static int ael2020_setup_sr_edc(struct cphy *phy)
+{
+ static struct reg_val regs[] = {
+ /* set CDR offset to 10 */
+ { MDIO_MMD_PMAPMD, 0xcc01, 0xffff, 0x488a },
+
+ /* adjust 10G RX bias current */
+ { MDIO_MMD_PMAPMD, 0xcb1b, 0xffff, 0x0200 },
+ { MDIO_MMD_PMAPMD, 0xcb1c, 0xffff, 0x00f0 },
+ { MDIO_MMD_PMAPMD, 0xcc06, 0xffff, 0x00e0 },
+
+ /* end */
+ { 0, 0, 0, 0 }
+ };
+ int err;
+
+ err = set_phy_regs(phy, regs);
+ msleep(50);
+ if (err)
+ return err;
+
+ phy->priv = edc_sr;
+ return 0;
+}
+
+/*
+ * Setup EDC and other parameters for operation with an TWINAX module.
+ */
+static int ael2020_setup_twinax_edc(struct cphy *phy, int modtype)
+{
+ /* set uC to 40MHz */
+ static struct reg_val uCclock40MHz[] = {
+ { MDIO_MMD_PMAPMD, 0xff28, 0xffff, 0x4001 },
+ { MDIO_MMD_PMAPMD, 0xff2a, 0xffff, 0x0002 },
+ { 0, 0, 0, 0 }
+ };
+
+ /* activate uC clock */
+ static struct reg_val uCclockActivate[] = {
+ { MDIO_MMD_PMAPMD, 0xd000, 0xffff, 0x5200 },
+ { 0, 0, 0, 0 }
+ };
+
+ /* set PC to start of SRAM and activate uC */
+ static struct reg_val uCactivate[] = {
+ { MDIO_MMD_PMAPMD, 0xd080, 0xffff, 0x0100 },
+ { MDIO_MMD_PMAPMD, 0xd092, 0xffff, 0x0000 },
+ { 0, 0, 0, 0 }
+ };
+
+ /* TWINAX EDC firmware */
+ static u16 twinax_edc[] = {
+ 0xd800, 0x4009,
+ 0xd801, 0x2fff,
+ 0xd802, 0x300f,
+ 0xd803, 0x40aa,
+ 0xd804, 0x401c,
+ 0xd805, 0x401e,
+ 0xd806, 0x2ff4,
+ 0xd807, 0x3dc4,
+ 0xd808, 0x2035,
+ 0xd809, 0x3035,
+ 0xd80a, 0x6524,
+ 0xd80b, 0x2cb2,
+ 0xd80c, 0x3012,
+ 0xd80d, 0x1002,
+ 0xd80e, 0x26e2,
+ 0xd80f, 0x3022,
+ 0xd810, 0x1002,
+ 0xd811, 0x27d2,
+ 0xd812, 0x3022,
+ 0xd813, 0x1002,
+ 0xd814, 0x2822,
+ 0xd815, 0x3012,
+ 0xd816, 0x1002,
+ 0xd817, 0x2492,
+ 0xd818, 0x3022,
+ 0xd819, 0x1002,
+ 0xd81a, 0x2772,
+ 0xd81b, 0x3012,
+ 0xd81c, 0x1002,
+ 0xd81d, 0x23d2,
+ 0xd81e, 0x3022,
+ 0xd81f, 0x1002,
+ 0xd820, 0x22cd,
+ 0xd821, 0x301d,
+ 0xd822, 0x27f2,
+ 0xd823, 0x3022,
+ 0xd824, 0x1002,
+ 0xd825, 0x5553,
+ 0xd826, 0x0307,
+ 0xd827, 0x2522,
+ 0xd828, 0x3022,
+ 0xd829, 0x1002,
+ 0xd82a, 0x2142,
+ 0xd82b, 0x3012,
+ 0xd82c, 0x1002,
+ 0xd82d, 0x4016,
+ 0xd82e, 0x5e63,
+ 0xd82f, 0x0344,
+ 0xd830, 0x2142,
+ 0xd831, 0x3012,
+ 0xd832, 0x1002,
+ 0xd833, 0x400e,
+ 0xd834, 0x2522,
+ 0xd835, 0x3022,
+ 0xd836, 0x1002,
+ 0xd837, 0x2b52,
+ 0xd838, 0x3012,
+ 0xd839, 0x1002,
+ 0xd83a, 0x2742,
+ 0xd83b, 0x3022,
+ 0xd83c, 0x1002,
+ 0xd83d, 0x25e2,
+ 0xd83e, 0x3022,
+ 0xd83f, 0x1002,
+ 0xd840, 0x2fa4,
+ 0xd841, 0x3dc4,
+ 0xd842, 0x6624,
+ 0xd843, 0x414b,
+ 0xd844, 0x56b3,
+ 0xd845, 0x03c6,
+ 0xd846, 0x866b,
+ 0xd847, 0x400c,
+ 0xd848, 0x2712,
+ 0xd849, 0x3012,
+ 0xd84a, 0x1002,
+ 0xd84b, 0x2c4b,
+ 0xd84c, 0x309b,
+ 0xd84d, 0x56b3,
+ 0xd84e, 0x03c3,
+ 0xd84f, 0x866b,
+ 0xd850, 0x400c,
+ 0xd851, 0x2272,
+ 0xd852, 0x3022,
+ 0xd853, 0x1002,
+ 0xd854, 0x2742,
+ 0xd855, 0x3022,
+ 0xd856, 0x1002,
+ 0xd857, 0x25e2,
+ 0xd858, 0x3022,
+ 0xd859, 0x1002,
+ 0xd85a, 0x2fb4,
+ 0xd85b, 0x3dc4,
+ 0xd85c, 0x6624,
+ 0xd85d, 0x56b3,
+ 0xd85e, 0x03c3,
+ 0xd85f, 0x866b,
+ 0xd860, 0x401c,
+ 0xd861, 0x2c45,
+ 0xd862, 0x3095,
+ 0xd863, 0x5b53,
+ 0xd864, 0x2372,
+ 0xd865, 0x3012,
+ 0xd866, 0x13c2,
+ 0xd867, 0x5cc3,
+ 0xd868, 0x2712,
+ 0xd869, 0x3012,
+ 0xd86a, 0x1312,
+ 0xd86b, 0x2b52,
+ 0xd86c, 0x3012,
+ 0xd86d, 0x1002,
+ 0xd86e, 0x2742,
+ 0xd86f, 0x3022,
+ 0xd870, 0x1002,
+ 0xd871, 0x2582,
+ 0xd872, 0x3022,
+ 0xd873, 0x1002,
+ 0xd874, 0x2142,
+ 0xd875, 0x3012,
+ 0xd876, 0x1002,
+ 0xd877, 0x628f,
+ 0xd878, 0x2985,
+ 0xd879, 0x33a5,
+ 0xd87a, 0x25e2,
+ 0xd87b, 0x3022,
+ 0xd87c, 0x1002,
+ 0xd87d, 0x5653,
+ 0xd87e, 0x03d2,
+ 0xd87f, 0x401e,
+ 0xd880, 0x6f72,
+ 0xd881, 0x1002,
+ 0xd882, 0x628f,
+ 0xd883, 0x2304,
+ 0xd884, 0x3c84,
+ 0xd885, 0x6436,
+ 0xd886, 0xdff4,
+ 0xd887, 0x6436,
+ 0xd888, 0x2ff5,
+ 0xd889, 0x3005,
+ 0xd88a, 0x8656,
+ 0xd88b, 0xdfba,
+ 0xd88c, 0x56a3,
+ 0xd88d, 0xd05a,
+ 0xd88e, 0x2972,
+ 0xd88f, 0x3012,
+ 0xd890, 0x1392,
+ 0xd891, 0xd05a,
+ 0xd892, 0x56a3,
+ 0xd893, 0xdfba,
+ 0xd894, 0x0383,
+ 0xd895, 0x6f72,
+ 0xd896, 0x1002,
+ 0xd897, 0x2b45,
+ 0xd898, 0x3005,
+ 0xd899, 0x4178,
+ 0xd89a, 0x5653,
+ 0xd89b, 0x0384,
+ 0xd89c, 0x2a62,
+ 0xd89d, 0x3012,
+ 0xd89e, 0x1002,
+ 0xd89f, 0x2f05,
+ 0xd8a0, 0x3005,
+ 0xd8a1, 0x41c8,
+ 0xd8a2, 0x5653,
+ 0xd8a3, 0x0382,
+ 0xd8a4, 0x0002,
+ 0xd8a5, 0x4218,
+ 0xd8a6, 0x2474,
+ 0xd8a7, 0x3c84,
+ 0xd8a8, 0x6437,
+ 0xd8a9, 0xdff4,
+ 0xd8aa, 0x6437,
+ 0xd8ab, 0x2ff5,
+ 0xd8ac, 0x3c05,
+ 0xd8ad, 0x8757,
+ 0xd8ae, 0xb888,
+ 0xd8af, 0x9787,
+ 0xd8b0, 0xdff4,
+ 0xd8b1, 0x6724,
+ 0xd8b2, 0x866a,
+ 0xd8b3, 0x6f72,
+ 0xd8b4, 0x1002,
+ 0xd8b5, 0x2641,
+ 0xd8b6, 0x3021,
+ 0xd8b7, 0x1001,
+ 0xd8b8, 0xc620,
+ 0xd8b9, 0x0000,
+ 0xd8ba, 0xc621,
+ 0xd8bb, 0x0000,
+ 0xd8bc, 0xc622,
+ 0xd8bd, 0x00ce,
+ 0xd8be, 0xc623,
+ 0xd8bf, 0x007f,
+ 0xd8c0, 0xc624,
+ 0xd8c1, 0x0032,
+ 0xd8c2, 0xc625,
+ 0xd8c3, 0x0000,
+ 0xd8c4, 0xc627,
+ 0xd8c5, 0x0000,
+ 0xd8c6, 0xc628,
+ 0xd8c7, 0x0000,
+ 0xd8c8, 0xc62c,
+ 0xd8c9, 0x0000,
+ 0xd8ca, 0x0000,
+ 0xd8cb, 0x2641,
+ 0xd8cc, 0x3021,
+ 0xd8cd, 0x1001,
+ 0xd8ce, 0xc502,
+ 0xd8cf, 0x53ac,
+ 0xd8d0, 0xc503,
+ 0xd8d1, 0x2cd3,
+ 0xd8d2, 0xc600,
+ 0xd8d3, 0x2a6e,
+ 0xd8d4, 0xc601,
+ 0xd8d5, 0x2a2c,
+ 0xd8d6, 0xc605,
+ 0xd8d7, 0x5557,
+ 0xd8d8, 0xc60c,
+ 0xd8d9, 0x5400,
+ 0xd8da, 0xc710,
+ 0xd8db, 0x0700,
+ 0xd8dc, 0xc711,
+ 0xd8dd, 0x0f06,
+ 0xd8de, 0xc718,
+ 0xd8df, 0x0700,
+ 0xd8e0, 0xc719,
+ 0xd8e1, 0x0f06,
+ 0xd8e2, 0xc720,
+ 0xd8e3, 0x4700,
+ 0xd8e4, 0xc721,
+ 0xd8e5, 0x0f06,
+ 0xd8e6, 0xc728,
+ 0xd8e7, 0x0700,
+ 0xd8e8, 0xc729,
+ 0xd8e9, 0x1207,
+ 0xd8ea, 0xc801,
+ 0xd8eb, 0x7f50,
+ 0xd8ec, 0xc802,
+ 0xd8ed, 0x7760,
+ 0xd8ee, 0xc803,
+ 0xd8ef, 0x7fce,
+ 0xd8f0, 0xc804,
+ 0xd8f1, 0x520e,
+ 0xd8f2, 0xc805,
+ 0xd8f3, 0x5c11,
+ 0xd8f4, 0xc806,
+ 0xd8f5, 0x3c51,
+ 0xd8f6, 0xc807,
+ 0xd8f7, 0x4061,
+ 0xd8f8, 0xc808,
+ 0xd8f9, 0x49c1,
+ 0xd8fa, 0xc809,
+ 0xd8fb, 0x3840,
+ 0xd8fc, 0xc80a,
+ 0xd8fd, 0x0000,
+ 0xd8fe, 0xc821,
+ 0xd8ff, 0x0002,
+ 0xd900, 0xc822,
+ 0xd901, 0x0046,
+ 0xd902, 0xc844,
+ 0xd903, 0x182f,
+ 0xd904, 0xc013,
+ 0xd905, 0xf341,
+ 0xd906, 0xc084,
+ 0xd907, 0x0030,
+ 0xd908, 0xc904,
+ 0xd909, 0x1401,
+ 0xd90a, 0xcb0c,
+ 0xd90b, 0x0004,
+ 0xd90c, 0xcb0e,
+ 0xd90d, 0xa00a,
+ 0xd90e, 0xcb0f,
+ 0xd90f, 0xc0c0,
+ 0xd910, 0xcb10,
+ 0xd911, 0xc0c0,
+ 0xd912, 0xcb11,
+ 0xd913, 0x00a0,
+ 0xd914, 0xcb12,
+ 0xd915, 0x0007,
+ 0xd916, 0xc241,
+ 0xd917, 0xa000,
+ 0xd918, 0xc243,
+ 0xd919, 0x7fe0,
+ 0xd91a, 0xc604,
+ 0xd91b, 0x000e,
+ 0xd91c, 0xc609,
+ 0xd91d, 0x00f5,
+ 0xd91e, 0xc611,
+ 0xd91f, 0x000e,
+ 0xd920, 0xc660,
+ 0xd921, 0x9600,
+ 0xd922, 0xc687,
+ 0xd923, 0x0004,
+ 0xd924, 0xc60a,
+ 0xd925, 0x04f5,
+ 0xd926, 0x0000,
+ 0xd927, 0x2641,
+ 0xd928, 0x3021,
+ 0xd929, 0x1001,
+ 0xd92a, 0xc620,
+ 0xd92b, 0x14e5,
+ 0xd92c, 0xc621,
+ 0xd92d, 0xc53d,
+ 0xd92e, 0xc622,
+ 0xd92f, 0x3cbe,
+ 0xd930, 0xc623,
+ 0xd931, 0x4452,
+ 0xd932, 0xc624,
+ 0xd933, 0xc5c5,
+ 0xd934, 0xc625,
+ 0xd935, 0xe01e,
+ 0xd936, 0xc627,
+ 0xd937, 0x0000,
+ 0xd938, 0xc628,
+ 0xd939, 0x0000,
+ 0xd93a, 0xc62c,
+ 0xd93b, 0x0000,
+ 0xd93c, 0x0000,
+ 0xd93d, 0x2b84,
+ 0xd93e, 0x3c74,
+ 0xd93f, 0x6435,
+ 0xd940, 0xdff4,
+ 0xd941, 0x6435,
+ 0xd942, 0x2806,
+ 0xd943, 0x3006,
+ 0xd944, 0x8565,
+ 0xd945, 0x2b24,
+ 0xd946, 0x3c24,
+ 0xd947, 0x6436,
+ 0xd948, 0x1002,
+ 0xd949, 0x2b24,
+ 0xd94a, 0x3c24,
+ 0xd94b, 0x6436,
+ 0xd94c, 0x4045,
+ 0xd94d, 0x8656,
+ 0xd94e, 0x5663,
+ 0xd94f, 0x0302,
+ 0xd950, 0x401e,
+ 0xd951, 0x1002,
+ 0xd952, 0x2807,
+ 0xd953, 0x31a7,
+ 0xd954, 0x20c4,
+ 0xd955, 0x3c24,
+ 0xd956, 0x6724,
+ 0xd957, 0x1002,
+ 0xd958, 0x2807,
+ 0xd959, 0x3187,
+ 0xd95a, 0x20c4,
+ 0xd95b, 0x3c24,
+ 0xd95c, 0x6724,
+ 0xd95d, 0x1002,
+ 0xd95e, 0x24f4,
+ 0xd95f, 0x3c64,
+ 0xd960, 0x6436,
+ 0xd961, 0xdff4,
+ 0xd962, 0x6436,
+ 0xd963, 0x1002,
+ 0xd964, 0x2006,
+ 0xd965, 0x3d76,
+ 0xd966, 0xc161,
+ 0xd967, 0x6134,
+ 0xd968, 0x6135,
+ 0xd969, 0x5443,
+ 0xd96a, 0x0303,
+ 0xd96b, 0x6524,
+ 0xd96c, 0x00fb,
+ 0xd96d, 0x1002,
+ 0xd96e, 0x20d4,
+ 0xd96f, 0x3c24,
+ 0xd970, 0x2025,
+ 0xd971, 0x3005,
+ 0xd972, 0x6524,
+ 0xd973, 0x1002,
+ 0xd974, 0xd019,
+ 0xd975, 0x2104,
+ 0xd976, 0x3c24,
+ 0xd977, 0x2105,
+ 0xd978, 0x3805,
+ 0xd979, 0x6524,
+ 0xd97a, 0xdff4,
+ 0xd97b, 0x4005,
+ 0xd97c, 0x6524,
+ 0xd97d, 0x2e8d,
+ 0xd97e, 0x303d,
+ 0xd97f, 0x2408,
+ 0xd980, 0x35d8,
+ 0xd981, 0x5dd3,
+ 0xd982, 0x0307,
+ 0xd983, 0x8887,
+ 0xd984, 0x63a7,
+ 0xd985, 0x8887,
+ 0xd986, 0x63a7,
+ 0xd987, 0xdffd,
+ 0xd988, 0x00f9,
+ 0xd989, 0x1002,
+ 0xd98a, 0x0000,
+ };
+ int i, err;
+
+ /* set uC clock and activate it */
+ err = set_phy_regs(phy, uCclock40MHz);
+ msleep(500);
+ if (err)
+ return err;
+ err = set_phy_regs(phy, uCclockActivate);
+ msleep(500);
+ if (err)
+ return err;
+
+ /* write TWINAX EDC firmware into PHY */
+ for (i = 0; i < ARRAY_SIZE(twinax_edc) && !err; i += 2)
+ err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, twinax_edc[i],
+ twinax_edc[i + 1]);
+ /* activate uC */
+ err = set_phy_regs(phy, uCactivate);
+ if (!err)
+ phy->priv = edc_twinax;
+ return err;
+}
+
+/*
+ * Return Module Type.
+ */
+static int ael2020_get_module_type(struct cphy *phy, int delay_ms)
+{
+ int v;
+ unsigned int stat;
+
+ v = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_STAT, &stat);
+ if (v)
+ return v;
+
+ if (stat & (0x1 << (AEL2020_GPIO_MODDET*4))) {
+ /* module absent */
+ return phy_modtype_none;
+ }
+
+ return ael2xxx_get_module_type(phy, delay_ms);
+}
+
+/*
+ * Enable PHY interrupts. We enable "Module Detection" interrupts (on any
+ * state transition) and then generic Link Alarm Status Interrupt (LASI).
+ */
+static int ael2020_intr_enable(struct cphy *phy)
+{
+ int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL,
+ 0x2 << (AEL2020_GPIO_MODDET*4));
+ return err ? err : t3_phy_lasi_intr_enable(phy);
+}
+
+/*
+ * Disable PHY interrupts. The mirror of the above ...
+ */
+static int ael2020_intr_disable(struct cphy *phy)
+{
+ int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL,
+ 0x1 << (AEL2020_GPIO_MODDET*4));
+ return err ? err : t3_phy_lasi_intr_disable(phy);
+}
+
+/*
+ * Clear PHY interrupt state.
+ */
+static int ael2020_intr_clear(struct cphy *phy)
+{
+ /*
+ * The GPIO Interrupt register on the AEL2020 is a "Latching High"
+ * (LH) register which is cleared to the current state when it's read.
+ * Thus, we simply read the register and discard the result.
+ */
+ unsigned int stat;
+ int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_INTR, &stat);
+ return err ? err : t3_phy_lasi_intr_clear(phy);
+}
+
+/*
+ * Reset the PHY and put it into a canonical operating state.
+ */
+static int ael2020_reset(struct cphy *phy, int wait)
+{
+ static struct reg_val regs0[] = {
+ /* Erratum #2: CDRLOL asserted, causing PMA link down status */
+ { MDIO_MMD_PMAPMD, 0xc003, 0xffff, 0x3101 },
+
+ /* force XAUI to send LF when RX_LOS is asserted */
+ { MDIO_MMD_PMAPMD, 0xcd40, 0xffff, 0x0001 },
+
+ /* RX_LOS pin is active high */
+ { MDIO_MMD_PMAPMD, AEL_OPT_SETTINGS,
+ 0x0020, 0x0020 },
+
+ /* output Module's Loss Of Signal (LOS) to LED */
+ { MDIO_MMD_PMAPMD, AEL2020_GPIO_CFG+AEL2020_GPIO_LSTAT,
+ 0xffff, 0x0004 },
+ { MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL,
+ 0xffff, 0x8 << (AEL2020_GPIO_LSTAT*4) },
+
+ /* end */
+ { 0, 0, 0, 0 }
+ };
+ int err;
+ unsigned int lasi_ctrl;
+
+ /* grab current interrupt state */
+ err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
+ &lasi_ctrl);
+ if (err)
+ return err;
+
+ err = t3_phy_reset(phy, MDIO_MMD_PMAPMD, 125);
+ if (err)
+ return err;
+ msleep(100);
+
+ /* basic initialization for all module types */
+ phy->priv = edc_none;
+ err = set_phy_regs(phy, regs0);
+ if (err)
+ return err;
+
+ /* determine module type and perform appropriate initialization */
+ err = ael2020_get_module_type(phy, 0);
+ if (err < 0)
+ return err;
+ phy->modtype = (u8)err;
+ if (err == phy_modtype_twinax || err == phy_modtype_twinax_long)
+ err = ael2020_setup_twinax_edc(phy, err);
+ else
+ err = ael2020_setup_sr_edc(phy);
+ if (err)
+ return err;
+
+ /* reset wipes out interrupts, reenable them if they were on */
+ if (lasi_ctrl & 1)
+ err = ael2005_intr_enable(phy);
+ return err;
+}
+
+/*
+ * Handle a PHY interrupt.
+ */
+static int ael2020_intr_handler(struct cphy *phy)
+{
+ unsigned int stat;
+ int ret, edc_needed, cause = 0;
+
+ ret = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_INTR, &stat);
+ if (ret)
+ return ret;
+
+ if (stat & (0x1 << AEL2020_GPIO_MODDET)) {
+ /* modules have max 300 ms init time after hot plug */
+ ret = ael2020_get_module_type(phy, 300);
+ if (ret < 0)
+ return ret;
+
+ phy->modtype = (u8)ret;
+ if (ret == phy_modtype_none)
+ edc_needed = phy->priv; /* on unplug retain EDC */
+ else if (ret == phy_modtype_twinax ||
+ ret == phy_modtype_twinax_long)
+ edc_needed = edc_twinax;
+ else
+ edc_needed = edc_sr;
+
+ if (edc_needed != phy->priv) {
+ ret = ael2020_reset(phy, 0);
+ return ret ? ret : cphy_cause_module_change;
+ }
+ cause = cphy_cause_module_change;
+ }
+
+ ret = t3_phy_lasi_intr_handler(phy);
+ if (ret < 0)
+ return ret;
+
+ ret |= cause;
+ return ret ? ret : cphy_cause_link_change;
+}
+
+static struct cphy_ops ael2020_ops = {
+ .reset = ael2020_reset,
+ .intr_enable = ael2020_intr_enable,
+ .intr_disable = ael2020_intr_disable,
+ .intr_clear = ael2020_intr_clear,
+ .intr_handler = ael2020_intr_handler,
+ .get_link_status = get_link_status_r,
+ .power_down = ael1002_power_down,
+ .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
+};
+
+int t3_ael2020_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
+ const struct mdio_ops *mdio_ops)
+{
+ cphy_init(phy, adapter, phy_addr, &ael2020_ops, mdio_ops,
+ SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE |
+ SUPPORTED_IRQ, "10GBASE-R");
+ msleep(125);
+ return 0;
+}
+
+/*
* Get link status for a 10GBASE-X device.
*/
static int get_link_status_x(struct cphy *phy, int *link_ok, int *speed,
@@ -1119,12 +1813,15 @@ static int get_link_status_x(struct cphy *phy, int *link_ok, int *speed,
{
if (link_ok) {
unsigned int stat0, stat1, stat2;
- int err = mdio_read(phy, MDIO_DEV_PMA_PMD, PMD_RSD, &stat0);
+ int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD,
+ MDIO_PMA_RXDET, &stat0);
if (!err)
- err = mdio_read(phy, MDIO_DEV_PCS, PCS_STAT1_X, &stat1);
+ err = t3_mdio_read(phy, MDIO_MMD_PCS,
+ MDIO_PCS_10GBX_STAT1, &stat1);
if (!err)
- err = mdio_read(phy, MDIO_DEV_XGXS, XS_LN_STAT, &stat2);
+ err = t3_mdio_read(phy, MDIO_MMD_PHYXS,
+ MDIO_PHYXS_LNSTAT, &stat2);
if (err)
return err;
*link_ok = (stat0 & (stat1 >> 12) & (stat2 >> 12)) & 1;
@@ -1144,6 +1841,7 @@ static struct cphy_ops qt2045_ops = {
.intr_handler = t3_phy_lasi_intr_handler,
.get_link_status = get_link_status_x,
.power_down = ael1006_power_down,
+ .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
};
int t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter,
@@ -1159,9 +1857,10 @@ int t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter,
* Some cards where the PHY is supposed to be at address 0 actually
* have it at 1.
*/
- if (!phy_addr && !mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR, &stat) &&
+ if (!phy_addr &&
+ !t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_STAT1, &stat) &&
stat == 0xffff)
- phy->addr = 1;
+ phy->mdio.prtad = 1;
return 0;
}
@@ -1175,15 +1874,16 @@ static int xaui_direct_get_link_status(struct cphy *phy, int *link_ok,
{
if (link_ok) {
unsigned int status;
+ int prtad = phy->mdio.prtad;
status = t3_read_reg(phy->adapter,
- XGM_REG(A_XGM_SERDES_STAT0, phy->addr)) |
+ XGM_REG(A_XGM_SERDES_STAT0, prtad)) |
t3_read_reg(phy->adapter,
- XGM_REG(A_XGM_SERDES_STAT1, phy->addr)) |
+ XGM_REG(A_XGM_SERDES_STAT1, prtad)) |
t3_read_reg(phy->adapter,
- XGM_REG(A_XGM_SERDES_STAT2, phy->addr)) |
+ XGM_REG(A_XGM_SERDES_STAT2, prtad)) |
t3_read_reg(phy->adapter,
- XGM_REG(A_XGM_SERDES_STAT3, phy->addr));
+ XGM_REG(A_XGM_SERDES_STAT3, prtad));
*link_ok = !(status & F_LOWSIG0);
}
if (speed)
@@ -1211,7 +1911,7 @@ static struct cphy_ops xaui_direct_ops = {
int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops)
{
- cphy_init(phy, adapter, phy_addr, &xaui_direct_ops, mdio_ops,
+ cphy_init(phy, adapter, MDIO_PRTAD_NONE, &xaui_direct_ops, mdio_ops,
SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP,
"10GBASE-CX4");
return 0;
diff --git a/drivers/net/cxgb3/aq100x.c b/drivers/net/cxgb3/aq100x.c
new file mode 100644
index 00000000000..b1fd5bf836e
--- /dev/null
+++ b/drivers/net/cxgb3/aq100x.c
@@ -0,0 +1,355 @@
+/*
+ * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "common.h"
+#include "regs.h"
+
+enum {
+ /* MDIO_DEV_PMA_PMD registers */
+ AQ_LINK_STAT = 0xe800,
+ AQ_IMASK_PMA = 0xf000,
+
+ /* MDIO_DEV_XGXS registers */
+ AQ_XAUI_RX_CFG = 0xc400,
+ AQ_XAUI_TX_CFG = 0xe400,
+
+ /* MDIO_DEV_ANEG registers */
+ AQ_1G_CTRL = 0xc400,
+ AQ_ANEG_STAT = 0xc800,
+
+ /* MDIO_DEV_VEND1 registers */
+ AQ_FW_VERSION = 0x0020,
+ AQ_IFLAG_GLOBAL = 0xfc00,
+ AQ_IMASK_GLOBAL = 0xff00,
+};
+
+enum {
+ IMASK_PMA = 1 << 2,
+ IMASK_GLOBAL = 1 << 15,
+ ADV_1G_FULL = 1 << 15,
+ ADV_1G_HALF = 1 << 14,
+ ADV_10G_FULL = 1 << 12,
+ AQ_RESET = (1 << 14) | (1 << 15),
+ AQ_LOWPOWER = 1 << 12,
+};
+
+static int aq100x_reset(struct cphy *phy, int wait)
+{
+ /*
+ * Ignore the caller specified wait time; always wait for the reset to
+ * complete. Can take up to 3s.
+ */
+ int err = t3_phy_reset(phy, MDIO_MMD_VEND1, 3000);
+
+ if (err)
+ CH_WARN(phy->adapter, "PHY%d: reset failed (0x%x).\n",
+ phy->mdio.prtad, err);
+
+ return err;
+}
+
+static int aq100x_intr_enable(struct cphy *phy)
+{
+ int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AQ_IMASK_PMA, IMASK_PMA);
+ if (err)
+ return err;
+
+ err = t3_mdio_write(phy, MDIO_MMD_VEND1, AQ_IMASK_GLOBAL, IMASK_GLOBAL);
+ return err;
+}
+
+static int aq100x_intr_disable(struct cphy *phy)
+{
+ return t3_mdio_write(phy, MDIO_MMD_VEND1, AQ_IMASK_GLOBAL, 0);
+}
+
+static int aq100x_intr_clear(struct cphy *phy)
+{
+ unsigned int v;
+
+ t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_IFLAG_GLOBAL, &v);
+ t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_STAT1, &v);
+
+ return 0;
+}
+
+static int aq100x_intr_handler(struct cphy *phy)
+{
+ int err;
+ unsigned int cause, v;
+
+ err = t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_IFLAG_GLOBAL, &cause);
+ if (err)
+ return err;
+
+ /* Read (and reset) the latching version of the status */
+ t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_STAT1, &v);
+
+ return cphy_cause_link_change;
+}
+
+static int aq100x_power_down(struct cphy *phy, int off)
+{
+ return mdio_set_flag(&phy->mdio, phy->mdio.prtad,
+ MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER, off);
+}
+
+static int aq100x_autoneg_enable(struct cphy *phy)
+{
+ int err;
+
+ err = aq100x_power_down(phy, 0);
+ if (!err)
+ err = mdio_set_flag(&phy->mdio, phy->mdio.prtad,
+ MDIO_MMD_AN, MDIO_CTRL1,
+ BMCR_ANENABLE | BMCR_ANRESTART, 1);
+
+ return err;
+}
+
+static int aq100x_autoneg_restart(struct cphy *phy)
+{
+ int err;
+
+ err = aq100x_power_down(phy, 0);
+ if (!err)
+ err = mdio_set_flag(&phy->mdio, phy->mdio.prtad,
+ MDIO_MMD_AN, MDIO_CTRL1,
+ BMCR_ANENABLE | BMCR_ANRESTART, 1);
+
+ return err;
+}
+
+static int aq100x_advertise(struct cphy *phy, unsigned int advertise_map)
+{
+ unsigned int adv;
+ int err;
+
+ /* 10G advertisement */
+ adv = 0;
+ if (advertise_map & ADVERTISED_10000baseT_Full)
+ adv |= ADV_10G_FULL;
+ err = t3_mdio_change_bits(phy, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
+ ADV_10G_FULL, adv);
+ if (err)
+ return err;
+
+ /* 1G advertisement */
+ adv = 0;
+ if (advertise_map & ADVERTISED_1000baseT_Full)
+ adv |= ADV_1G_FULL;
+ if (advertise_map & ADVERTISED_1000baseT_Half)
+ adv |= ADV_1G_HALF;
+ err = t3_mdio_change_bits(phy, MDIO_MMD_AN, AQ_1G_CTRL,
+ ADV_1G_FULL | ADV_1G_HALF, adv);
+ if (err)
+ return err;
+
+ /* 100M, pause advertisement */
+ adv = 0;
+ if (advertise_map & ADVERTISED_100baseT_Half)
+ adv |= ADVERTISE_100HALF;
+ if (advertise_map & ADVERTISED_100baseT_Full)
+ adv |= ADVERTISE_100FULL;
+ if (advertise_map & ADVERTISED_Pause)
+ adv |= ADVERTISE_PAUSE_CAP;
+ if (advertise_map & ADVERTISED_Asym_Pause)
+ adv |= ADVERTISE_PAUSE_ASYM;
+ err = t3_mdio_change_bits(phy, MDIO_MMD_AN, MDIO_AN_ADVERTISE,
+ 0xfe0, adv);
+
+ return err;
+}
+
+static int aq100x_set_loopback(struct cphy *phy, int mmd, int dir, int enable)
+{
+ return mdio_set_flag(&phy->mdio, phy->mdio.prtad,
+ MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ BMCR_LOOPBACK, enable);
+}
+
+static int aq100x_set_speed_duplex(struct cphy *phy, int speed, int duplex)
+{
+ /* no can do */
+ return -1;
+}
+
+static int aq100x_get_link_status(struct cphy *phy, int *link_ok,
+ int *speed, int *duplex, int *fc)
+{
+ int err;
+ unsigned int v;
+
+ if (link_ok) {
+ err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AQ_LINK_STAT, &v);
+ if (err)
+ return err;
+
+ *link_ok = v & 1;
+ if (!*link_ok)
+ return 0;
+ }
+
+ err = t3_mdio_read(phy, MDIO_MMD_AN, AQ_ANEG_STAT, &v);
+ if (err)
+ return err;
+
+ if (speed) {
+ switch (v & 0x6) {
+ case 0x6:
+ *speed = SPEED_10000;
+ break;
+ case 0x4:
+ *speed = SPEED_1000;
+ break;
+ case 0x2:
+ *speed = SPEED_100;
+ break;
+ case 0x0:
+ *speed = SPEED_10;
+ break;
+ }
+ }
+
+ if (duplex)
+ *duplex = v & 1 ? DUPLEX_FULL : DUPLEX_HALF;
+
+ return 0;
+}
+
+static struct cphy_ops aq100x_ops = {
+ .reset = aq100x_reset,
+ .intr_enable = aq100x_intr_enable,
+ .intr_disable = aq100x_intr_disable,
+ .intr_clear = aq100x_intr_clear,
+ .intr_handler = aq100x_intr_handler,
+ .autoneg_enable = aq100x_autoneg_enable,
+ .autoneg_restart = aq100x_autoneg_restart,
+ .advertise = aq100x_advertise,
+ .set_loopback = aq100x_set_loopback,
+ .set_speed_duplex = aq100x_set_speed_duplex,
+ .get_link_status = aq100x_get_link_status,
+ .power_down = aq100x_power_down,
+ .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
+};
+
+int t3_aq100x_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
+ const struct mdio_ops *mdio_ops)
+{
+ unsigned int v, v2, gpio, wait;
+ int err;
+
+ cphy_init(phy, adapter, phy_addr, &aq100x_ops, mdio_ops,
+ SUPPORTED_1000baseT_Full | SUPPORTED_10000baseT_Full |
+ SUPPORTED_Autoneg | SUPPORTED_AUI, "1000/10GBASE-T");
+
+ /*
+ * The PHY has been out of reset ever since the system powered up. So
+ * we do a hard reset over here.
+ */
+ gpio = phy_addr ? F_GPIO10_OUT_VAL : F_GPIO6_OUT_VAL;
+ t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, gpio, 0);
+ msleep(1);
+ t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, gpio, gpio);
+
+ /*
+ * Give it enough time to load the firmware and get ready for mdio.
+ */
+ msleep(1000);
+ wait = 500; /* in 10ms increments */
+ do {
+ err = t3_mdio_read(phy, MDIO_MMD_VEND1, MDIO_CTRL1, &v);
+ if (err || v == 0xffff) {
+
+ /* Allow prep_adapter to succeed when ffff is read */
+
+ CH_WARN(adapter, "PHY%d: reset failed (0x%x, 0x%x).\n",
+ phy_addr, err, v);
+ goto done;
+ }
+
+ v &= AQ_RESET;
+ if (v)
+ msleep(10);
+ } while (v && --wait);
+ if (v) {
+ CH_WARN(adapter, "PHY%d: reset timed out (0x%x).\n",
+ phy_addr, v);
+
+ goto done; /* let prep_adapter succeed */
+ }
+
+ /* Datasheet says 3s max but this has been observed */
+ wait = (500 - wait) * 10 + 1000;
+ if (wait > 3000)
+ CH_WARN(adapter, "PHY%d: reset took %ums\n", phy_addr, wait);
+
+ /* Firmware version check. */
+ t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_FW_VERSION, &v);
+ if (v != 30) {
+ CH_WARN(adapter, "PHY%d: unsupported firmware %d\n",
+ phy_addr, v);
+ return 0; /* allow t3_prep_adapter to succeed */
+ }
+
+ /*
+ * The PHY should start in really-low-power mode. Prepare it for normal
+ * operations.
+ */
+ err = t3_mdio_read(phy, MDIO_MMD_VEND1, MDIO_CTRL1, &v);
+ if (err)
+ return err;
+ if (v & AQ_LOWPOWER) {
+ err = t3_mdio_change_bits(phy, MDIO_MMD_VEND1, MDIO_CTRL1,
+ AQ_LOWPOWER, 0);
+ if (err)
+ return err;
+ msleep(10);
+ } else
+ CH_WARN(adapter, "PHY%d does not start in low power mode.\n",
+ phy_addr);
+
+ /*
+ * Verify XAUI settings, but let prep succeed no matter what.
+ */
+ v = v2 = 0;
+ t3_mdio_read(phy, MDIO_MMD_PHYXS, AQ_XAUI_RX_CFG, &v);
+ t3_mdio_read(phy, MDIO_MMD_PHYXS, AQ_XAUI_TX_CFG, &v2);
+ if (v != 0x1b || v2 != 0x1b)
+ CH_WARN(adapter,
+ "PHY%d: incorrect XAUI settings (0x%x, 0x%x).\n",
+ phy_addr, v, v2);
+
+done:
+ return err;
+}
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
index e508dc32f3e..d21b705501a 100644
--- a/drivers/net/cxgb3/common.h
+++ b/drivers/net/cxgb3/common.h
@@ -39,7 +39,7 @@
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
-#include <linux/mii.h>
+#include <linux/mdio.h>
#include "version.h"
#define CH_ERR(adap, fmt, ...) dev_err(&adap->pdev->dev, fmt, ## __VA_ARGS__)
@@ -184,10 +184,11 @@ struct cphy;
struct adapter;
struct mdio_ops {
- int (*read)(struct adapter *adapter, int phy_addr, int mmd_addr,
- int reg_addr, unsigned int *val);
- int (*write)(struct adapter *adapter, int phy_addr, int mmd_addr,
- int reg_addr, unsigned int val);
+ int (*read)(struct net_device *dev, int phy_addr, int mmd_addr,
+ u16 reg_addr);
+ int (*write)(struct net_device *dev, int phy_addr, int mmd_addr,
+ u16 reg_addr, u16 val);
+ unsigned mode_support;
};
struct adapter_info {
@@ -520,27 +521,6 @@ enum {
MAC_RXFIFO_SIZE = 32768
};
-/* IEEE 802.3 specified MDIO devices */
-enum {
- MDIO_DEV_PMA_PMD = 1,
- MDIO_DEV_WIS = 2,
- MDIO_DEV_PCS = 3,
- MDIO_DEV_XGXS = 4,
- MDIO_DEV_ANEG = 7,
- MDIO_DEV_VEND1 = 30,
- MDIO_DEV_VEND2 = 31
-};
-
-/* LASI control and status registers */
-enum {
- RX_ALARM_CTRL = 0x9000,
- TX_ALARM_CTRL = 0x9001,
- LASI_CTRL = 0x9002,
- RX_ALARM_STAT = 0x9003,
- TX_ALARM_STAT = 0x9004,
- LASI_STAT = 0x9005
-};
-
/* PHY loopback direction */
enum {
PHY_LOOPBACK_TX = 1,
@@ -583,11 +563,12 @@ struct cphy_ops {
int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed,
int *duplex, int *fc);
int (*power_down)(struct cphy *phy, int enable);
+
+ u32 mmds;
};
/* A PHY instance */
struct cphy {
- u8 addr; /* PHY address */
u8 modtype; /* PHY module type */
short priv; /* scratch pad */
unsigned int caps; /* PHY capabilities */
@@ -595,23 +576,23 @@ struct cphy {
const char *desc; /* PHY description */
unsigned long fifo_errors; /* FIFO over/under-flows */
const struct cphy_ops *ops; /* PHY operations */
- int (*mdio_read)(struct adapter *adapter, int phy_addr, int mmd_addr,
- int reg_addr, unsigned int *val);
- int (*mdio_write)(struct adapter *adapter, int phy_addr, int mmd_addr,
- int reg_addr, unsigned int val);
+ struct mdio_if_info mdio;
};
/* Convenience MDIO read/write wrappers */
-static inline int mdio_read(struct cphy *phy, int mmd, int reg,
- unsigned int *valp)
+static inline int t3_mdio_read(struct cphy *phy, int mmd, int reg,
+ unsigned int *valp)
{
- return phy->mdio_read(phy->adapter, phy->addr, mmd, reg, valp);
+ int rc = phy->mdio.mdio_read(phy->mdio.dev, phy->mdio.prtad, mmd, reg);
+ *valp = (rc >= 0) ? rc : -1;
+ return (rc >= 0) ? 0 : rc;
}
-static inline int mdio_write(struct cphy *phy, int mmd, int reg,
- unsigned int val)
+static inline int t3_mdio_write(struct cphy *phy, int mmd, int reg,
+ unsigned int val)
{
- return phy->mdio_write(phy->adapter, phy->addr, mmd, reg, val);
+ return phy->mdio.mdio_write(phy->mdio.dev, phy->mdio.prtad, mmd,
+ reg, val);
}
/* Convenience initializer */
@@ -620,14 +601,16 @@ static inline void cphy_init(struct cphy *phy, struct adapter *adapter,
const struct mdio_ops *mdio_ops,
unsigned int caps, const char *desc)
{
- phy->addr = phy_addr;
phy->caps = caps;
phy->adapter = adapter;
phy->desc = desc;
phy->ops = phy_ops;
if (mdio_ops) {
- phy->mdio_read = mdio_ops->read;
- phy->mdio_write = mdio_ops->write;
+ phy->mdio.prtad = phy_addr;
+ phy->mdio.mmds = phy_ops->mmds;
+ phy->mdio.mode_support = mdio_ops->mode_support;
+ phy->mdio.mdio_read = mdio_ops->read;
+ phy->mdio.mdio_write = mdio_ops->write;
}
}
@@ -819,8 +802,12 @@ int t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops);
int t3_ael2005_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops);
+int t3_ael2020_phy_prep(struct cphy *phy, struct adapter *adapter,
+ int phy_addr, const struct mdio_ops *mdio_ops);
int t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
const struct mdio_ops *mdio_ops);
int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops);
+int t3_aq100x_phy_prep(struct cphy *phy, struct adapter *adapter,
+ int phy_addr, const struct mdio_ops *mdio_ops);
#endif /* __CHELSIO_COMMON_H */
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 7ea48414c6c..538dda4422d 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -37,7 +37,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
-#include <linux/mii.h>
+#include <linux/mdio.h>
#include <linux/sockios.h>
#include <linux/workqueue.h>
#include <linux/proc_fs.h>
@@ -91,6 +91,8 @@ static const struct pci_device_id cxgb3_pci_tbl[] = {
CH_DEVICE(0x31, 3), /* T3B20 */
CH_DEVICE(0x32, 1), /* T3B02 */
CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */
+ CH_DEVICE(0x36, 3), /* S320E-CR */
+ CH_DEVICE(0x37, 7), /* N320E-G2 */
{0,}
};
@@ -431,40 +433,78 @@ static int init_tp_parity(struct adapter *adap)
for (i = 0; i < 16; i++) {
struct cpl_smt_write_req *req;
- skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
+ skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ skb = adap->nofail_skb;
+ if (!skb)
+ goto alloc_skb_fail;
+
req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
memset(req, 0, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
req->iff = i;
t3_mgmt_tx(adap, skb);
+ if (skb == adap->nofail_skb) {
+ await_mgmt_replies(adap, cnt, i + 1);
+ adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
+ if (!adap->nofail_skb)
+ goto alloc_skb_fail;
+ }
}
for (i = 0; i < 2048; i++) {
struct cpl_l2t_write_req *req;
- skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
+ skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ skb = adap->nofail_skb;
+ if (!skb)
+ goto alloc_skb_fail;
+
req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
memset(req, 0, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
req->params = htonl(V_L2T_W_IDX(i));
t3_mgmt_tx(adap, skb);
+ if (skb == adap->nofail_skb) {
+ await_mgmt_replies(adap, cnt, 16 + i + 1);
+ adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
+ if (!adap->nofail_skb)
+ goto alloc_skb_fail;
+ }
}
for (i = 0; i < 2048; i++) {
struct cpl_rte_write_req *req;
- skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
+ skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ skb = adap->nofail_skb;
+ if (!skb)
+ goto alloc_skb_fail;
+
req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
memset(req, 0, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
req->l2t_idx = htonl(V_L2T_W_IDX(i));
t3_mgmt_tx(adap, skb);
+ if (skb == adap->nofail_skb) {
+ await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
+ adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
+ if (!adap->nofail_skb)
+ goto alloc_skb_fail;
+ }
}
- skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
+ skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
+ if (!skb)
+ skb = adap->nofail_skb;
+ if (!skb)
+ goto alloc_skb_fail;
+
greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
memset(greq, 0, sizeof(*greq));
greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
@@ -473,8 +513,17 @@ static int init_tp_parity(struct adapter *adap)
t3_mgmt_tx(adap, skb);
i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
+ if (skb == adap->nofail_skb) {
+ i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
+ adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
+ }
+
t3_tp_set_offload_mode(adap, 0);
return i;
+
+alloc_skb_fail:
+ t3_tp_set_offload_mode(adap, 0);
+ return -ENOMEM;
}
/**
@@ -869,7 +918,12 @@ static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
struct mngt_pktsched_wr *req;
int ret;
- skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
+ skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ skb = adap->nofail_skb;
+ if (!skb)
+ return -ENOMEM;
+
req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
@@ -879,6 +933,12 @@ static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
req->max = hi;
req->binding = port;
ret = t3_mgmt_tx(adap, skb);
+ if (skb == adap->nofail_skb) {
+ adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
+ GFP_KERNEL);
+ if (!adap->nofail_skb)
+ ret = -ENOMEM;
+ }
return ret;
}
@@ -1593,7 +1653,7 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
}
cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
- cmd->phy_address = p->phy.addr;
+ cmd->phy_address = p->phy.mdio.prtad;
cmd->transceiver = XCVR_EXTERNAL;
cmd->autoneg = p->link_config.autoneg;
cmd->maxtxpkt = 0;
@@ -2308,70 +2368,25 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
struct mii_ioctl_data *data = if_mii(req);
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
- int ret, mmd;
switch (cmd) {
- case SIOCGMIIPHY:
- data->phy_id = pi->phy.addr;
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ /* Convert phy_id from older PRTAD/DEVAD format */
+ if (is_10G(adapter) &&
+ !mdio_phy_id_is_c45(data->phy_id) &&
+ (data->phy_id & 0x1f00) &&
+ !(data->phy_id & 0xe0e0))
+ data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
+ data->phy_id & 0x1f);
/* FALLTHRU */
- case SIOCGMIIREG:{
- u32 val;
- struct cphy *phy = &pi->phy;
-
- if (!phy->mdio_read)
- return -EOPNOTSUPP;
- if (is_10G(adapter)) {
- mmd = data->phy_id >> 8;
- if (!mmd)
- mmd = MDIO_DEV_PCS;
- else if (mmd > MDIO_DEV_VEND2)
- return -EINVAL;
-
- ret =
- phy->mdio_read(adapter, data->phy_id & 0x1f,
- mmd, data->reg_num, &val);
- } else
- ret =
- phy->mdio_read(adapter, data->phy_id & 0x1f,
- 0, data->reg_num & 0x1f,
- &val);
- if (!ret)
- data->val_out = val;
- break;
- }
- case SIOCSMIIREG:{
- struct cphy *phy = &pi->phy;
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- if (!phy->mdio_write)
- return -EOPNOTSUPP;
- if (is_10G(adapter)) {
- mmd = data->phy_id >> 8;
- if (!mmd)
- mmd = MDIO_DEV_PCS;
- else if (mmd > MDIO_DEV_VEND2)
- return -EINVAL;
-
- ret =
- phy->mdio_write(adapter,
- data->phy_id & 0x1f, mmd,
- data->reg_num,
- data->val_in);
- } else
- ret =
- phy->mdio_write(adapter,
- data->phy_id & 0x1f, 0,
- data->reg_num & 0x1f,
- data->val_in);
- break;
- }
+ case SIOCGMIIPHY:
+ return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
case SIOCCHIOCTL:
return cxgb_extension_ioctl(dev, req->ifr_data);
default:
return -EOPNOTSUPP;
}
- return ret;
}
static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
@@ -2496,14 +2511,16 @@ static void check_link_status(struct adapter *adapter)
for_each_port(adapter, i) {
struct net_device *dev = adapter->port[i];
struct port_info *p = netdev_priv(dev);
+ int link_fault;
spin_lock_irq(&adapter->work_lock);
- if (p->link_fault) {
+ link_fault = p->link_fault;
+ spin_unlock_irq(&adapter->work_lock);
+
+ if (link_fault) {
t3_link_fault(adapter, i);
- spin_unlock_irq(&adapter->work_lock);
continue;
}
- spin_unlock_irq(&adapter->work_lock);
if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
t3_xgm_intr_disable(adapter, i);
@@ -3061,6 +3078,14 @@ static int __devinit init_one(struct pci_dev *pdev,
goto out_disable_device;
}
+ adapter->nofail_skb =
+ alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
+ if (!adapter->nofail_skb) {
+ dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
+ err = -ENOMEM;
+ goto out_free_adapter;
+ }
+
adapter->regs = ioremap_nocache(mmio_start, mmio_len);
if (!adapter->regs) {
dev_err(&pdev->dev, "cannot map device registers\n");
@@ -3104,7 +3129,6 @@ static int __devinit init_one(struct pci_dev *pdev,
netdev->mem_start = mmio_start;
netdev->mem_end = mmio_start + mmio_len - 1;
netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
- netdev->features |= NETIF_F_LLTX;
netdev->features |= NETIF_F_GRO;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
@@ -3218,6 +3242,8 @@ static void __devexit remove_one(struct pci_dev *pdev)
free_netdev(adapter->port[i]);
iounmap(adapter->regs);
+ if (adapter->nofail_skb)
+ kfree_skb(adapter->nofail_skb);
kfree(adapter);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index 620d80be6aa..f9f54b57b28 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -566,13 +566,31 @@ static void t3_process_tid_release_list(struct work_struct *work)
spin_unlock_bh(&td->tid_release_lock);
skb = alloc_skb(sizeof(struct cpl_tid_release),
- GFP_KERNEL | __GFP_NOFAIL);
+ GFP_KERNEL);
+ if (!skb)
+ skb = td->nofail_skb;
+ if (!skb) {
+ spin_lock_bh(&td->tid_release_lock);
+ p->ctx = (void *)td->tid_release_list;
+ td->tid_release_list = (struct t3c_tid_entry *)p;
+ break;
+ }
mk_tid_release(skb, p - td->tid_maps.tid_tab);
cxgb3_ofld_send(tdev, skb);
p->ctx = NULL;
+ if (skb == td->nofail_skb)
+ td->nofail_skb =
+ alloc_skb(sizeof(struct cpl_tid_release),
+ GFP_KERNEL);
spin_lock_bh(&td->tid_release_lock);
}
+ td->release_list_incomplete = (td->tid_release_list == NULL) ? 0 : 1;
spin_unlock_bh(&td->tid_release_lock);
+
+ if (!td->nofail_skb)
+ td->nofail_skb =
+ alloc_skb(sizeof(struct cpl_tid_release),
+ GFP_KERNEL);
}
/* use ctx as a next pointer in the tid release list */
@@ -585,7 +603,7 @@ void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid)
p->ctx = (void *)td->tid_release_list;
p->client = NULL;
td->tid_release_list = p;
- if (!p->ctx)
+ if (!p->ctx || td->release_list_incomplete)
schedule_work(&td->tid_release_task);
spin_unlock_bh(&td->tid_release_lock);
}
@@ -1274,6 +1292,9 @@ int cxgb3_offload_activate(struct adapter *adapter)
if (list_empty(&adapter_list))
register_netevent_notifier(&nb);
+ t->nofail_skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_KERNEL);
+ t->release_list_incomplete = 0;
+
add_adapter(adapter);
return 0;
@@ -1298,6 +1319,8 @@ void cxgb3_offload_deactivate(struct adapter *adapter)
T3C_DATA(tdev) = NULL;
t3_free_l2t(L2DATA(tdev));
L2DATA(tdev) = NULL;
+ if (t->nofail_skb)
+ kfree_skb(t->nofail_skb);
kfree(t);
}
diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h
index a8e8e5fcdf8..55945f422ae 100644
--- a/drivers/net/cxgb3/cxgb3_offload.h
+++ b/drivers/net/cxgb3/cxgb3_offload.h
@@ -191,6 +191,9 @@ struct t3c_data {
struct t3c_tid_entry *tid_release_list;
spinlock_t tid_release_lock;
struct work_struct tid_release_task;
+
+ struct sk_buff *nofail_skb;
+ unsigned int release_list_incomplete;
};
/*
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 26d3587f339..29c79eb43be 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -355,7 +355,7 @@ static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
(*d->pg_chunk.p_cnt)--;
if (!*d->pg_chunk.p_cnt)
pci_unmap_page(pdev,
- pci_unmap_addr(&d->pg_chunk, mapping),
+ d->pg_chunk.mapping,
q->alloc_size, PCI_DMA_FROMDEVICE);
put_page(d->pg_chunk.page);
@@ -454,7 +454,7 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
q->pg_chunk.offset = 0;
mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
0, q->alloc_size, PCI_DMA_FROMDEVICE);
- pci_unmap_addr_set(&q->pg_chunk, mapping, mapping);
+ q->pg_chunk.mapping = mapping;
}
sd->pg_chunk = q->pg_chunk;
@@ -511,8 +511,7 @@ static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
nomem: q->alloc_failed++;
break;
}
- mapping = pci_unmap_addr(&sd->pg_chunk, mapping) +
- sd->pg_chunk.offset;
+ mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
pci_unmap_addr_set(sd, dma_addr, mapping);
add_one_rx_chunk(mapping, d, q->gen);
@@ -654,7 +653,8 @@ static void t3_reset_qset(struct sge_qset *q)
q->txq_stopped = 0;
q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
q->rx_reclaim_timer.function = NULL;
- q->lro_frag_tbl.nr_frags = q->lro_frag_tbl.len = 0;
+ q->nomem = 0;
+ napi_free_frags(&q->napi);
}
@@ -881,7 +881,7 @@ recycle:
(*sd->pg_chunk.p_cnt)--;
if (!*sd->pg_chunk.p_cnt)
pci_unmap_page(adap->pdev,
- pci_unmap_addr(&sd->pg_chunk, mapping),
+ sd->pg_chunk.mapping,
fl->alloc_size,
PCI_DMA_FROMDEVICE);
if (!skb) {
@@ -1240,7 +1240,6 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
q = &qs->txq[TXQ_ETH];
txq = netdev_get_tx_queue(dev, qidx);
- spin_lock(&q->lock);
reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
credits = q->size - q->in_use;
@@ -1251,7 +1250,6 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
dev_err(&adap->pdev->dev,
"%s: Tx ring %u full while queue awake!\n",
dev->name, q->cntxt_id & 7);
- spin_unlock(&q->lock);
return NETDEV_TX_BUSY;
}
@@ -1285,9 +1283,6 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
if (vlan_tx_tag_present(skb) && pi->vlan_grp)
qs->port_stats[SGE_PSTAT_VLANINS]++;
- dev->trans_start = jiffies;
- spin_unlock(&q->lock);
-
/*
* We do not use Tx completion interrupts to free DMAd Tx packets.
* This is good for performamce but means that we rely on new Tx
@@ -2074,20 +2069,19 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
struct sge_fl *fl, int len, int complete)
{
struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+ struct sk_buff *skb = NULL;
struct cpl_rx_pkt *cpl;
- struct skb_frag_struct *rx_frag = qs->lro_frag_tbl.frags;
- int nr_frags = qs->lro_frag_tbl.nr_frags;
- int frag_len = qs->lro_frag_tbl.len;
+ struct skb_frag_struct *rx_frag;
+ int nr_frags;
int offset = 0;
- if (!nr_frags) {
- offset = 2 + sizeof(struct cpl_rx_pkt);
- qs->lro_va = cpl = sd->pg_chunk.va + 2;
+ if (!qs->nomem) {
+ skb = napi_get_frags(&qs->napi);
+ qs->nomem = !skb;
}
fl->credits--;
- len -= offset;
pci_dma_sync_single_for_cpu(adap->pdev,
pci_unmap_addr(sd, dma_addr),
fl->buf_size - SGE_PG_RSVD,
@@ -2096,25 +2090,42 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
(*sd->pg_chunk.p_cnt)--;
if (!*sd->pg_chunk.p_cnt)
pci_unmap_page(adap->pdev,
- pci_unmap_addr(&sd->pg_chunk, mapping),
+ sd->pg_chunk.mapping,
fl->alloc_size,
PCI_DMA_FROMDEVICE);
+ if (!skb) {
+ put_page(sd->pg_chunk.page);
+ if (complete)
+ qs->nomem = 0;
+ return;
+ }
+
+ rx_frag = skb_shinfo(skb)->frags;
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ if (!nr_frags) {
+ offset = 2 + sizeof(struct cpl_rx_pkt);
+ qs->lro_va = sd->pg_chunk.va + 2;
+ }
+ len -= offset;
+
prefetch(qs->lro_va);
rx_frag += nr_frags;
rx_frag->page = sd->pg_chunk.page;
rx_frag->page_offset = sd->pg_chunk.offset + offset;
rx_frag->size = len;
- frag_len += len;
- qs->lro_frag_tbl.nr_frags++;
- qs->lro_frag_tbl.len = frag_len;
+ skb->len += len;
+ skb->data_len += len;
+ skb->truesize += len;
+ skb_shinfo(skb)->nr_frags++;
if (!complete)
return;
- qs->lro_frag_tbl.ip_summed = CHECKSUM_UNNECESSARY;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
cpl = qs->lro_va;
if (unlikely(cpl->vlan_valid)) {
@@ -2123,15 +2134,11 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
struct vlan_group *grp = pi->vlan_grp;
if (likely(grp != NULL)) {
- vlan_gro_frags(&qs->napi, grp, ntohs(cpl->vlan),
- &qs->lro_frag_tbl);
- goto out;
+ vlan_gro_frags(&qs->napi, grp, ntohs(cpl->vlan));
+ return;
}
}
- napi_gro_frags(&qs->napi, &qs->lro_frag_tbl);
-
-out:
- qs->lro_frag_tbl.nr_frags = qs->lro_frag_tbl.len = 0;
+ napi_gro_frags(&qs->napi);
}
/**
@@ -2300,8 +2307,6 @@ no_mem:
if (fl->use_pages) {
void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
- prefetch(&qs->lro_frag_tbl);
-
prefetch(addr);
#if L1_CACHE_BYTES < 128
prefetch(addr + L1_CACHE_BYTES);
@@ -2847,11 +2852,12 @@ static void sge_timer_tx(unsigned long data)
unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
unsigned long next_period;
- if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
- tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
- TX_RECLAIM_TIMER_CHUNK);
- spin_unlock(&qs->txq[TXQ_ETH].lock);
+ if (__netif_tx_trylock(qs->tx_q)) {
+ tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
+ TX_RECLAIM_TIMER_CHUNK);
+ __netif_tx_unlock(qs->tx_q);
}
+
if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
TX_RECLAIM_TIMER_CHUNK);
@@ -2859,8 +2865,8 @@ static void sge_timer_tx(unsigned long data)
}
next_period = TX_RECLAIM_PERIOD >>
- (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
- TX_RECLAIM_TIMER_CHUNK);
+ (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
+ TX_RECLAIM_TIMER_CHUNK);
mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
}
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 4f68aeb2679..870d44992c7 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -204,35 +204,33 @@ static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
/*
* MI1 read/write operations for clause 22 PHYs.
*/
-static int t3_mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
- int reg_addr, unsigned int *valp)
+static int t3_mi1_read(struct net_device *dev, int phy_addr, int mmd_addr,
+ u16 reg_addr)
{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
int ret;
u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
- if (mmd_addr)
- return -EINVAL;
-
mutex_lock(&adapter->mdio_lock);
t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
t3_write_reg(adapter, A_MI1_ADDR, addr);
t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
if (!ret)
- *valp = t3_read_reg(adapter, A_MI1_DATA);
+ ret = t3_read_reg(adapter, A_MI1_DATA);
mutex_unlock(&adapter->mdio_lock);
return ret;
}
-static int t3_mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
- int reg_addr, unsigned int val)
+static int t3_mi1_write(struct net_device *dev, int phy_addr, int mmd_addr,
+ u16 reg_addr, u16 val)
{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
int ret;
u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
- if (mmd_addr)
- return -EINVAL;
-
mutex_lock(&adapter->mdio_lock);
t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
t3_write_reg(adapter, A_MI1_ADDR, addr);
@@ -244,8 +242,9 @@ static int t3_mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
}
static const struct mdio_ops mi1_mdio_ops = {
- t3_mi1_read,
- t3_mi1_write
+ .read = t3_mi1_read,
+ .write = t3_mi1_write,
+ .mode_support = MDIO_SUPPORTS_C22
};
/*
@@ -268,9 +267,11 @@ static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
/*
* MI1 read/write operations for indirect-addressed PHYs.
*/
-static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
- int reg_addr, unsigned int *valp)
+static int mi1_ext_read(struct net_device *dev, int phy_addr, int mmd_addr,
+ u16 reg_addr)
{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
int ret;
mutex_lock(&adapter->mdio_lock);
@@ -280,15 +281,17 @@ static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
MDIO_ATTEMPTS, 10);
if (!ret)
- *valp = t3_read_reg(adapter, A_MI1_DATA);
+ ret = t3_read_reg(adapter, A_MI1_DATA);
}
mutex_unlock(&adapter->mdio_lock);
return ret;
}
-static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
- int reg_addr, unsigned int val)
+static int mi1_ext_write(struct net_device *dev, int phy_addr, int mmd_addr,
+ u16 reg_addr, u16 val)
{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
int ret;
mutex_lock(&adapter->mdio_lock);
@@ -304,8 +307,9 @@ static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
}
static const struct mdio_ops mi1_mdio_ext_ops = {
- mi1_ext_read,
- mi1_ext_write
+ .read = mi1_ext_read,
+ .write = mi1_ext_write,
+ .mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22
};
/**
@@ -325,10 +329,10 @@ int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
int ret;
unsigned int val;
- ret = mdio_read(phy, mmd, reg, &val);
+ ret = t3_mdio_read(phy, mmd, reg, &val);
if (!ret) {
val &= ~clear;
- ret = mdio_write(phy, mmd, reg, val | set);
+ ret = t3_mdio_write(phy, mmd, reg, val | set);
}
return ret;
}
@@ -348,15 +352,16 @@ int t3_phy_reset(struct cphy *phy, int mmd, int wait)
int err;
unsigned int ctl;
- err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
+ err = t3_mdio_change_bits(phy, mmd, MDIO_CTRL1, MDIO_CTRL1_LPOWER,
+ MDIO_CTRL1_RESET);
if (err || !wait)
return err;
do {
- err = mdio_read(phy, mmd, MII_BMCR, &ctl);
+ err = t3_mdio_read(phy, mmd, MDIO_CTRL1, &ctl);
if (err)
return err;
- ctl &= BMCR_RESET;
+ ctl &= MDIO_CTRL1_RESET;
if (ctl)
msleep(1);
} while (ctl && --wait);
@@ -377,7 +382,7 @@ int t3_phy_advertise(struct cphy *phy, unsigned int advert)
int err;
unsigned int val = 0;
- err = mdio_read(phy, 0, MII_CTRL1000, &val);
+ err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_CTRL1000, &val);
if (err)
return err;
@@ -387,7 +392,7 @@ int t3_phy_advertise(struct cphy *phy, unsigned int advert)
if (advert & ADVERTISED_1000baseT_Full)
val |= ADVERTISE_1000FULL;
- err = mdio_write(phy, 0, MII_CTRL1000, val);
+ err = t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_CTRL1000, val);
if (err)
return err;
@@ -404,7 +409,7 @@ int t3_phy_advertise(struct cphy *phy, unsigned int advert)
val |= ADVERTISE_PAUSE_CAP;
if (advert & ADVERTISED_Asym_Pause)
val |= ADVERTISE_PAUSE_ASYM;
- return mdio_write(phy, 0, MII_ADVERTISE, val);
+ return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
}
/**
@@ -427,7 +432,7 @@ int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
val |= ADVERTISE_1000XPAUSE;
if (advert & ADVERTISED_Asym_Pause)
val |= ADVERTISE_1000XPSE_ASYM;
- return mdio_write(phy, 0, MII_ADVERTISE, val);
+ return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
}
/**
@@ -444,7 +449,7 @@ int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
int err;
unsigned int ctl;
- err = mdio_read(phy, 0, MII_BMCR, &ctl);
+ err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_BMCR, &ctl);
if (err)
return err;
@@ -462,34 +467,36 @@ int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
}
if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
ctl |= BMCR_ANENABLE;
- return mdio_write(phy, 0, MII_BMCR, ctl);
+ return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_BMCR, ctl);
}
int t3_phy_lasi_intr_enable(struct cphy *phy)
{
- return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
+ return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
+ MDIO_PMA_LASI_LSALARM);
}
int t3_phy_lasi_intr_disable(struct cphy *phy)
{
- return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
+ return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
}
int t3_phy_lasi_intr_clear(struct cphy *phy)
{
u32 val;
- return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
+ return t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val);
}
int t3_phy_lasi_intr_handler(struct cphy *phy)
{
unsigned int status;
- int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
+ int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT,
+ &status);
if (err)
return err;
- return (status & 1) ? cphy_cause_link_change : 0;
+ return (status & MDIO_PMA_LASI_LSALARM) ? cphy_cause_link_change : 0;
}
static const struct adapter_info t3_adap_info[] = {
@@ -519,6 +526,11 @@ static const struct adapter_info t3_adap_info[] = {
F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
{ S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
&mi1_mdio_ext_ops, "Chelsio T310" },
+ {1, 0, 0,
+ F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
+ F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL,
+ { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
+ &mi1_mdio_ext_ops, "Chelsio N320E-G2" },
};
/*
@@ -545,6 +557,8 @@ static const struct port_type_info port_types[] = {
{ t3_qt2045_phy_prep },
{ t3_ael1006_phy_prep },
{ NULL },
+ { t3_aq100x_phy_prep },
+ { t3_ael2020_phy_prep },
};
#define VPD_ENTRY(name, len) \
@@ -1274,6 +1288,11 @@ void t3_link_fault(struct adapter *adapter, int port_id)
A_XGM_INT_STATUS + mac->offset);
link_fault &= F_LINKFAULTCHANGE;
+ link_ok = lc->link_ok;
+ speed = lc->speed;
+ duplex = lc->duplex;
+ fc = lc->fc;
+
phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
if (link_fault) {
@@ -3859,6 +3878,7 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
return -EINVAL;
}
+ p->phy.mdio.dev = adapter->port[i];
ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
ai->mdio_ops);
if (ret)
@@ -3918,7 +3938,7 @@ int t3_replay_prep_adapter(struct adapter *adapter)
;
pti = &port_types[adapter->params.vpd.port_type[j]];
- ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
+ ret = pti->phy_prep(&p->phy, adapter, p->phy.mdio.prtad, NULL);
if (ret)
return ret;
p->phy.ops->power_down(&p->phy, 1);
diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h
index 7bf963ec554..9d0bd9dd9ab 100644
--- a/drivers/net/cxgb3/version.h
+++ b/drivers/net/cxgb3/version.h
@@ -35,10 +35,10 @@
#define DRV_DESC "Chelsio T3 Network Driver"
#define DRV_NAME "cxgb3"
/* Driver version */
-#define DRV_VERSION "1.1.2-ko"
+#define DRV_VERSION "1.1.3-ko"
/* Firmware version */
#define FW_VERSION_MAJOR 7
-#define FW_VERSION_MINOR 1
+#define FW_VERSION_MINOR 4
#define FW_VERSION_MICRO 0
#endif /* __CHELSIO_VERSION_H */
diff --git a/drivers/net/cxgb3/vsc8211.c b/drivers/net/cxgb3/vsc8211.c
index d07130971b8..4f9a1c2724f 100644
--- a/drivers/net/cxgb3/vsc8211.c
+++ b/drivers/net/cxgb3/vsc8211.c
@@ -91,17 +91,18 @@ enum {
*/
static int vsc8211_reset(struct cphy *cphy, int wait)
{
- return t3_phy_reset(cphy, 0, 0);
+ return t3_phy_reset(cphy, MDIO_DEVAD_NONE, 0);
}
static int vsc8211_intr_enable(struct cphy *cphy)
{
- return mdio_write(cphy, 0, VSC8211_INTR_ENABLE, INTR_MASK);
+ return t3_mdio_write(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_ENABLE,
+ INTR_MASK);
}
static int vsc8211_intr_disable(struct cphy *cphy)
{
- return mdio_write(cphy, 0, VSC8211_INTR_ENABLE, 0);
+ return t3_mdio_write(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_ENABLE, 0);
}
static int vsc8211_intr_clear(struct cphy *cphy)
@@ -109,18 +110,20 @@ static int vsc8211_intr_clear(struct cphy *cphy)
u32 val;
/* Clear PHY interrupts by reading the register. */
- return mdio_read(cphy, 0, VSC8211_INTR_STATUS, &val);
+ return t3_mdio_read(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_STATUS, &val);
}
static int vsc8211_autoneg_enable(struct cphy *cphy)
{
- return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE,
+ return t3_mdio_change_bits(cphy, MDIO_DEVAD_NONE, MII_BMCR,
+ BMCR_PDOWN | BMCR_ISOLATE,
BMCR_ANENABLE | BMCR_ANRESTART);
}
static int vsc8211_autoneg_restart(struct cphy *cphy)
{
- return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE,
+ return t3_mdio_change_bits(cphy, MDIO_DEVAD_NONE, MII_BMCR,
+ BMCR_PDOWN | BMCR_ISOLATE,
BMCR_ANRESTART);
}
@@ -130,9 +133,9 @@ static int vsc8211_get_link_status(struct cphy *cphy, int *link_ok,
unsigned int bmcr, status, lpa, adv;
int err, sp = -1, dplx = -1, pause = 0;
- err = mdio_read(cphy, 0, MII_BMCR, &bmcr);
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMCR, &bmcr);
if (!err)
- err = mdio_read(cphy, 0, MII_BMSR, &status);
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR, &status);
if (err)
return err;
@@ -142,7 +145,8 @@ static int vsc8211_get_link_status(struct cphy *cphy, int *link_ok,
* once more to get the current link state.
*/
if (!(status & BMSR_LSTATUS))
- err = mdio_read(cphy, 0, MII_BMSR, &status);
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR,
+ &status);
if (err)
return err;
*link_ok = (status & BMSR_LSTATUS) != 0;
@@ -156,7 +160,8 @@ static int vsc8211_get_link_status(struct cphy *cphy, int *link_ok,
else
sp = SPEED_10;
} else if (status & BMSR_ANEGCOMPLETE) {
- err = mdio_read(cphy, 0, VSC8211_AUX_CTRL_STAT, &status);
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, VSC8211_AUX_CTRL_STAT,
+ &status);
if (err)
return err;
@@ -170,9 +175,11 @@ static int vsc8211_get_link_status(struct cphy *cphy, int *link_ok,
sp = SPEED_1000;
if (fc && dplx == DUPLEX_FULL) {
- err = mdio_read(cphy, 0, MII_LPA, &lpa);
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_LPA,
+ &lpa);
if (!err)
- err = mdio_read(cphy, 0, MII_ADVERTISE, &adv);
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE,
+ MII_ADVERTISE, &adv);
if (err)
return err;
@@ -202,9 +209,9 @@ static int vsc8211_get_link_status_fiber(struct cphy *cphy, int *link_ok,
unsigned int bmcr, status, lpa, adv;
int err, sp = -1, dplx = -1, pause = 0;
- err = mdio_read(cphy, 0, MII_BMCR, &bmcr);
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMCR, &bmcr);
if (!err)
- err = mdio_read(cphy, 0, MII_BMSR, &status);
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR, &status);
if (err)
return err;
@@ -214,7 +221,8 @@ static int vsc8211_get_link_status_fiber(struct cphy *cphy, int *link_ok,
* once more to get the current link state.
*/
if (!(status & BMSR_LSTATUS))
- err = mdio_read(cphy, 0, MII_BMSR, &status);
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR,
+ &status);
if (err)
return err;
*link_ok = (status & BMSR_LSTATUS) != 0;
@@ -228,9 +236,10 @@ static int vsc8211_get_link_status_fiber(struct cphy *cphy, int *link_ok,
else
sp = SPEED_10;
} else if (status & BMSR_ANEGCOMPLETE) {
- err = mdio_read(cphy, 0, MII_LPA, &lpa);
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_LPA, &lpa);
if (!err)
- err = mdio_read(cphy, 0, MII_ADVERTISE, &adv);
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_ADVERTISE,
+ &adv);
if (err)
return err;
@@ -270,23 +279,23 @@ static int vsc8211_set_automdi(struct cphy *phy, int enable)
{
int err;
- err = mdio_write(phy, 0, VSC8211_EXT_PAGE_AXS, 0x52b5);
+ err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 0x52b5);
if (err)
return err;
- err = mdio_write(phy, 0, 18, 0x12);
+ err = t3_mdio_write(phy, MDIO_DEVAD_NONE, 18, 0x12);
if (err)
return err;
- err = mdio_write(phy, 0, 17, enable ? 0x2803 : 0x3003);
+ err = t3_mdio_write(phy, MDIO_DEVAD_NONE, 17, enable ? 0x2803 : 0x3003);
if (err)
return err;
- err = mdio_write(phy, 0, 16, 0x87fa);
+ err = t3_mdio_write(phy, MDIO_DEVAD_NONE, 16, 0x87fa);
if (err)
return err;
- err = mdio_write(phy, 0, VSC8211_EXT_PAGE_AXS, 0);
+ err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 0);
if (err)
return err;
@@ -315,7 +324,7 @@ static int vsc8211_intr_handler(struct cphy *cphy)
unsigned int cause;
int err, cphy_cause = 0;
- err = mdio_read(cphy, 0, VSC8211_INTR_STATUS, &cause);
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_STATUS, &cause);
if (err)
return err;
@@ -367,12 +376,13 @@ int t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
SUPPORTED_TP | SUPPORTED_IRQ, "10/100/1000BASE-T");
msleep(20); /* PHY needs ~10ms to start responding to MDIO */
- err = mdio_read(phy, 0, VSC8211_EXT_CTRL, &val);
+ err = t3_mdio_read(phy, MDIO_DEVAD_NONE, VSC8211_EXT_CTRL, &val);
if (err)
return err;
if (val & VSC_CTRL_MEDIA_MODE_HI) {
/* copper interface, just need to configure the LEDs */
- return mdio_write(phy, 0, VSC8211_LED_CTRL, 0x100);
+ return t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_LED_CTRL,
+ 0x100);
}
phy->caps = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
@@ -380,20 +390,20 @@ int t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
phy->desc = "1000BASE-X";
phy->ops = &vsc8211_fiber_ops;
- err = mdio_write(phy, 0, VSC8211_EXT_PAGE_AXS, 1);
+ err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 1);
if (err)
return err;
- err = mdio_write(phy, 0, VSC8211_SIGDET_CTRL, 1);
+ err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_SIGDET_CTRL, 1);
if (err)
return err;
- err = mdio_write(phy, 0, VSC8211_EXT_PAGE_AXS, 0);
+ err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 0);
if (err)
return err;
- err = mdio_write(phy, 0, VSC8211_EXT_CTRL,
- val | VSC_CTRL_CLAUSE37_VIEW);
+ err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_CTRL,
+ val | VSC_CTRL_CLAUSE37_VIEW);
if (err)
return err;
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
new file mode 100644
index 00000000000..0e9b9f9632c
--- /dev/null
+++ b/drivers/net/davinci_emac.c
@@ -0,0 +1,2830 @@
+/*
+ * DaVinci Ethernet Medium Access Controller
+ *
+ * DaVinci EMAC is based upon CPPI 3.0 TI DMA engine
+ *
+ * Copyright (C) 2009 Texas Instruments.
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * ---------------------------------------------------------------------------
+ * History:
+ * 0-5 A number of folks worked on this driver in bits and pieces but the major
+ * contribution came from Suraj Iyer and Anant Gole
+ * 6.0 Anant Gole - rewrote the driver as per Linux conventions
+ * 6.1 Chaithrika U S - added support for Gigabit and RMII features,
+ * PHY layer usage
+ */
+
+/** Pending Items in this driver:
+ * 1. Use Linux cache infrastcture for DMA'ed memory (dma_xxx functions)
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/highmem.h>
+#include <linux/proc_fs.h>
+#include <linux/ctype.h>
+#include <linux/version.h>
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/semaphore.h>
+#include <linux/phy.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include <asm/irq.h>
+#include <asm/page.h>
+
+#include <mach/emac.h>
+
+static int debug_level;
+module_param(debug_level, int, 0);
+MODULE_PARM_DESC(debug_level, "DaVinci EMAC debug level (NETIF_MSG bits)");
+
+/* Netif debug messages possible */
+#define DAVINCI_EMAC_DEBUG (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR | \
+ NETIF_MSG_TX_QUEUED | \
+ NETIF_MSG_INTR | \
+ NETIF_MSG_TX_DONE | \
+ NETIF_MSG_RX_STATUS | \
+ NETIF_MSG_PKTDATA | \
+ NETIF_MSG_HW | \
+ NETIF_MSG_WOL)
+
+/* version info */
+#define EMAC_MAJOR_VERSION 6
+#define EMAC_MINOR_VERSION 1
+#define EMAC_MODULE_VERSION "6.1"
+MODULE_VERSION(EMAC_MODULE_VERSION);
+static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
+
+/* Configuration items */
+#define EMAC_DEF_PASS_CRC (0) /* Do not pass CRC upto frames */
+#define EMAC_DEF_QOS_EN (0) /* EMAC proprietary QoS disabled */
+#define EMAC_DEF_NO_BUFF_CHAIN (0) /* No buffer chain */
+#define EMAC_DEF_MACCTRL_FRAME_EN (0) /* Discard Maccontrol frames */
+#define EMAC_DEF_SHORT_FRAME_EN (0) /* Discard short frames */
+#define EMAC_DEF_ERROR_FRAME_EN (0) /* Discard error frames */
+#define EMAC_DEF_PROM_EN (0) /* Promiscous disabled */
+#define EMAC_DEF_PROM_CH (0) /* Promiscous channel is 0 */
+#define EMAC_DEF_BCAST_EN (1) /* Broadcast enabled */
+#define EMAC_DEF_BCAST_CH (0) /* Broadcast channel is 0 */
+#define EMAC_DEF_MCAST_EN (1) /* Multicast enabled */
+#define EMAC_DEF_MCAST_CH (0) /* Multicast channel is 0 */
+
+#define EMAC_DEF_TXPRIO_FIXED (1) /* TX Priority is fixed */
+#define EMAC_DEF_TXPACING_EN (0) /* TX pacing NOT supported*/
+
+#define EMAC_DEF_BUFFER_OFFSET (0) /* Buffer offset to DMA (future) */
+#define EMAC_DEF_MIN_ETHPKTSIZE (60) /* Minimum ethernet pkt size */
+#define EMAC_DEF_MAX_FRAME_SIZE (1500 + 14 + 4 + 4)
+#define EMAC_DEF_TX_CH (0) /* Default 0th channel */
+#define EMAC_DEF_RX_CH (0) /* Default 0th channel */
+#define EMAC_DEF_MDIO_TICK_MS (10) /* typically 1 tick=1 ms) */
+#define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */
+#define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */
+#define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */
+
+/* Buffer descriptor parameters */
+#define EMAC_DEF_TX_MAX_SERVICE (32) /* TX max service BD's */
+#define EMAC_DEF_RX_MAX_SERVICE (64) /* should = netdev->weight */
+
+/* EMAC register related defines */
+#define EMAC_ALL_MULTI_REG_VALUE (0xFFFFFFFF)
+#define EMAC_NUM_MULTICAST_BITS (64)
+#define EMAC_TEARDOWN_VALUE (0xFFFFFFFC)
+#define EMAC_TX_CONTROL_TX_ENABLE_VAL (0x1)
+#define EMAC_RX_CONTROL_RX_ENABLE_VAL (0x1)
+#define EMAC_MAC_HOST_ERR_INTMASK_VAL (0x2)
+#define EMAC_RX_UNICAST_CLEAR_ALL (0xFF)
+#define EMAC_INT_MASK_CLEAR (0xFF)
+
+/* RX MBP register bit positions */
+#define EMAC_RXMBP_PASSCRC_MASK BIT(30)
+#define EMAC_RXMBP_QOSEN_MASK BIT(29)
+#define EMAC_RXMBP_NOCHAIN_MASK BIT(28)
+#define EMAC_RXMBP_CMFEN_MASK BIT(24)
+#define EMAC_RXMBP_CSFEN_MASK BIT(23)
+#define EMAC_RXMBP_CEFEN_MASK BIT(22)
+#define EMAC_RXMBP_CAFEN_MASK BIT(21)
+#define EMAC_RXMBP_PROMCH_SHIFT (16)
+#define EMAC_RXMBP_PROMCH_MASK (0x7 << 16)
+#define EMAC_RXMBP_BROADEN_MASK BIT(13)
+#define EMAC_RXMBP_BROADCH_SHIFT (8)
+#define EMAC_RXMBP_BROADCH_MASK (0x7 << 8)
+#define EMAC_RXMBP_MULTIEN_MASK BIT(5)
+#define EMAC_RXMBP_MULTICH_SHIFT (0)
+#define EMAC_RXMBP_MULTICH_MASK (0x7)
+#define EMAC_RXMBP_CHMASK (0x7)
+
+/* EMAC register definitions/bit maps used */
+# define EMAC_MBP_RXPROMISC (0x00200000)
+# define EMAC_MBP_PROMISCCH(ch) (((ch) & 0x7) << 16)
+# define EMAC_MBP_RXBCAST (0x00002000)
+# define EMAC_MBP_BCASTCHAN(ch) (((ch) & 0x7) << 8)
+# define EMAC_MBP_RXMCAST (0x00000020)
+# define EMAC_MBP_MCASTCHAN(ch) ((ch) & 0x7)
+
+/* EMAC mac_control register */
+#define EMAC_MACCONTROL_TXPTYPE (0x200)
+#define EMAC_MACCONTROL_TXPACEEN (0x40)
+#define EMAC_MACCONTROL_MIIEN (0x20)
+#define EMAC_MACCONTROL_GIGABITEN (0x80)
+#define EMAC_MACCONTROL_GIGABITEN_SHIFT (7)
+#define EMAC_MACCONTROL_FULLDUPLEXEN (0x1)
+#define EMAC_MACCONTROL_RMIISPEED_MASK BIT(15)
+
+/* GIGABIT MODE related bits */
+#define EMAC_DM646X_MACCONTORL_GMIIEN BIT(5)
+#define EMAC_DM646X_MACCONTORL_GIG BIT(7)
+#define EMAC_DM646X_MACCONTORL_GIGFORCE BIT(17)
+
+/* EMAC mac_status register */
+#define EMAC_MACSTATUS_TXERRCODE_MASK (0xF00000)
+#define EMAC_MACSTATUS_TXERRCODE_SHIFT (20)
+#define EMAC_MACSTATUS_TXERRCH_MASK (0x7)
+#define EMAC_MACSTATUS_TXERRCH_SHIFT (16)
+#define EMAC_MACSTATUS_RXERRCODE_MASK (0xF000)
+#define EMAC_MACSTATUS_RXERRCODE_SHIFT (12)
+#define EMAC_MACSTATUS_RXERRCH_MASK (0x7)
+#define EMAC_MACSTATUS_RXERRCH_SHIFT (8)
+
+/* EMAC RX register masks */
+#define EMAC_RX_MAX_LEN_MASK (0xFFFF)
+#define EMAC_RX_BUFFER_OFFSET_MASK (0xFFFF)
+
+/* MAC_IN_VECTOR (0x180) register bit fields */
+#define EMAC_DM644X_MAC_IN_VECTOR_HOST_INT (0x20000)
+#define EMAC_DM644X_MAC_IN_VECTOR_STATPEND_INT (0x10000)
+#define EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC (0x0100)
+#define EMAC_DM644X_MAC_IN_VECTOR_TX_INT_VEC (0x01)
+
+/** NOTE:: For DM646x the IN_VECTOR has changed */
+#define EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC BIT(EMAC_DEF_RX_CH)
+#define EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC BIT(16 + EMAC_DEF_TX_CH)
+
+/* CPPI bit positions */
+#define EMAC_CPPI_SOP_BIT BIT(31)
+#define EMAC_CPPI_EOP_BIT BIT(30)
+#define EMAC_CPPI_OWNERSHIP_BIT BIT(29)
+#define EMAC_CPPI_EOQ_BIT BIT(28)
+#define EMAC_CPPI_TEARDOWN_COMPLETE_BIT BIT(27)
+#define EMAC_CPPI_PASS_CRC_BIT BIT(26)
+#define EMAC_RX_BD_BUF_SIZE (0xFFFF)
+#define EMAC_BD_LENGTH_FOR_CACHE (16) /* only CPPI bytes */
+#define EMAC_RX_BD_PKT_LENGTH_MASK (0xFFFF)
+
+/* Max hardware defines */
+#define EMAC_MAX_TXRX_CHANNELS (8) /* Max hardware channels */
+#define EMAC_DEF_MAX_MULTICAST_ADDRESSES (64) /* Max mcast addr's */
+
+/* EMAC Peripheral Device Register Memory Layout structure */
+#define EMAC_TXIDVER 0x0
+#define EMAC_TXCONTROL 0x4
+#define EMAC_TXTEARDOWN 0x8
+#define EMAC_RXIDVER 0x10
+#define EMAC_RXCONTROL 0x14
+#define EMAC_RXTEARDOWN 0x18
+#define EMAC_TXINTSTATRAW 0x80
+#define EMAC_TXINTSTATMASKED 0x84
+#define EMAC_TXINTMASKSET 0x88
+#define EMAC_TXINTMASKCLEAR 0x8C
+#define EMAC_MACINVECTOR 0x90
+
+#define EMAC_DM646X_MACEOIVECTOR 0x94
+
+#define EMAC_RXINTSTATRAW 0xA0
+#define EMAC_RXINTSTATMASKED 0xA4
+#define EMAC_RXINTMASKSET 0xA8
+#define EMAC_RXINTMASKCLEAR 0xAC
+#define EMAC_MACINTSTATRAW 0xB0
+#define EMAC_MACINTSTATMASKED 0xB4
+#define EMAC_MACINTMASKSET 0xB8
+#define EMAC_MACINTMASKCLEAR 0xBC
+
+#define EMAC_RXMBPENABLE 0x100
+#define EMAC_RXUNICASTSET 0x104
+#define EMAC_RXUNICASTCLEAR 0x108
+#define EMAC_RXMAXLEN 0x10C
+#define EMAC_RXBUFFEROFFSET 0x110
+#define EMAC_RXFILTERLOWTHRESH 0x114
+
+#define EMAC_MACCONTROL 0x160
+#define EMAC_MACSTATUS 0x164
+#define EMAC_EMCONTROL 0x168
+#define EMAC_FIFOCONTROL 0x16C
+#define EMAC_MACCONFIG 0x170
+#define EMAC_SOFTRESET 0x174
+#define EMAC_MACSRCADDRLO 0x1D0
+#define EMAC_MACSRCADDRHI 0x1D4
+#define EMAC_MACHASH1 0x1D8
+#define EMAC_MACHASH2 0x1DC
+#define EMAC_MACADDRLO 0x500
+#define EMAC_MACADDRHI 0x504
+#define EMAC_MACINDEX 0x508
+
+/* EMAC HDP and Completion registors */
+#define EMAC_TXHDP(ch) (0x600 + (ch * 4))
+#define EMAC_RXHDP(ch) (0x620 + (ch * 4))
+#define EMAC_TXCP(ch) (0x640 + (ch * 4))
+#define EMAC_RXCP(ch) (0x660 + (ch * 4))
+
+/* EMAC statistics registers */
+#define EMAC_RXGOODFRAMES 0x200
+#define EMAC_RXBCASTFRAMES 0x204
+#define EMAC_RXMCASTFRAMES 0x208
+#define EMAC_RXPAUSEFRAMES 0x20C
+#define EMAC_RXCRCERRORS 0x210
+#define EMAC_RXALIGNCODEERRORS 0x214
+#define EMAC_RXOVERSIZED 0x218
+#define EMAC_RXJABBER 0x21C
+#define EMAC_RXUNDERSIZED 0x220
+#define EMAC_RXFRAGMENTS 0x224
+#define EMAC_RXFILTERED 0x228
+#define EMAC_RXQOSFILTERED 0x22C
+#define EMAC_RXOCTETS 0x230
+#define EMAC_TXGOODFRAMES 0x234
+#define EMAC_TXBCASTFRAMES 0x238
+#define EMAC_TXMCASTFRAMES 0x23C
+#define EMAC_TXPAUSEFRAMES 0x240
+#define EMAC_TXDEFERRED 0x244
+#define EMAC_TXCOLLISION 0x248
+#define EMAC_TXSINGLECOLL 0x24C
+#define EMAC_TXMULTICOLL 0x250
+#define EMAC_TXEXCESSIVECOLL 0x254
+#define EMAC_TXLATECOLL 0x258
+#define EMAC_TXUNDERRUN 0x25C
+#define EMAC_TXCARRIERSENSE 0x260
+#define EMAC_TXOCTETS 0x264
+#define EMAC_NETOCTETS 0x280
+#define EMAC_RXSOFOVERRUNS 0x284
+#define EMAC_RXMOFOVERRUNS 0x288
+#define EMAC_RXDMAOVERRUNS 0x28C
+
+/* EMAC DM644x control registers */
+#define EMAC_CTRL_EWCTL (0x4)
+#define EMAC_CTRL_EWINTTCNT (0x8)
+
+/* EMAC MDIO related */
+/* Mask & Control defines */
+#define MDIO_CONTROL_CLKDIV (0xFF)
+#define MDIO_CONTROL_ENABLE BIT(30)
+#define MDIO_USERACCESS_GO BIT(31)
+#define MDIO_USERACCESS_WRITE BIT(30)
+#define MDIO_USERACCESS_READ (0)
+#define MDIO_USERACCESS_REGADR (0x1F << 21)
+#define MDIO_USERACCESS_PHYADR (0x1F << 16)
+#define MDIO_USERACCESS_DATA (0xFFFF)
+#define MDIO_USERPHYSEL_LINKSEL BIT(7)
+#define MDIO_VER_MODID (0xFFFF << 16)
+#define MDIO_VER_REVMAJ (0xFF << 8)
+#define MDIO_VER_REVMIN (0xFF)
+
+#define MDIO_USERACCESS(inst) (0x80 + (inst * 8))
+#define MDIO_USERPHYSEL(inst) (0x84 + (inst * 8))
+#define MDIO_CONTROL (0x04)
+
+/* EMAC DM646X control module registers */
+#define EMAC_DM646X_CMRXINTEN (0x14)
+#define EMAC_DM646X_CMTXINTEN (0x18)
+
+/* EMAC EOI codes for C0 */
+#define EMAC_DM646X_MAC_EOI_C0_RXEN (0x01)
+#define EMAC_DM646X_MAC_EOI_C0_TXEN (0x02)
+
+/** net_buf_obj: EMAC network bufferdata structure
+ *
+ * EMAC network buffer data structure
+ */
+struct emac_netbufobj {
+ void *buf_token;
+ char *data_ptr;
+ int length;
+};
+
+/** net_pkt_obj: EMAC network packet data structure
+ *
+ * EMAC network packet data structure - supports buffer list (for future)
+ */
+struct emac_netpktobj {
+ void *pkt_token; /* data token may hold tx/rx chan id */
+ struct emac_netbufobj *buf_list; /* array of network buffer objects */
+ int num_bufs;
+ int pkt_length;
+};
+
+/** emac_tx_bd: EMAC TX Buffer descriptor data structure
+ *
+ * EMAC TX Buffer descriptor data structure
+ */
+struct emac_tx_bd {
+ int h_next;
+ int buff_ptr;
+ int off_b_len;
+ int mode; /* SOP, EOP, ownership, EOQ, teardown,Qstarv, length */
+ struct emac_tx_bd __iomem *next;
+ void *buf_token;
+};
+
+/** emac_txch: EMAC TX Channel data structure
+ *
+ * EMAC TX Channel data structure
+ */
+struct emac_txch {
+ /* Config related */
+ u32 num_bd;
+ u32 service_max;
+
+ /* CPPI specific */
+ u32 alloc_size;
+ void __iomem *bd_mem;
+ struct emac_tx_bd __iomem *bd_pool_head;
+ struct emac_tx_bd __iomem *active_queue_head;
+ struct emac_tx_bd __iomem *active_queue_tail;
+ struct emac_tx_bd __iomem *last_hw_bdprocessed;
+ u32 queue_active;
+ u32 teardown_pending;
+ u32 *tx_complete;
+
+ /** statistics */
+ u32 proc_count; /* TX: # of times emac_tx_bdproc is called */
+ u32 mis_queued_packets;
+ u32 queue_reinit;
+ u32 end_of_queue_add;
+ u32 out_of_tx_bd;
+ u32 no_active_pkts; /* IRQ when there were no packets to process */
+ u32 active_queue_count;
+};
+
+/** emac_rx_bd: EMAC RX Buffer descriptor data structure
+ *
+ * EMAC RX Buffer descriptor data structure
+ */
+struct emac_rx_bd {
+ int h_next;
+ int buff_ptr;
+ int off_b_len;
+ int mode;
+ struct emac_rx_bd __iomem *next;
+ void *data_ptr;
+ void *buf_token;
+};
+
+/** emac_rxch: EMAC RX Channel data structure
+ *
+ * EMAC RX Channel data structure
+ */
+struct emac_rxch {
+ /* configuration info */
+ u32 num_bd;
+ u32 service_max;
+ u32 buf_size;
+ char mac_addr[6];
+
+ /** CPPI specific */
+ u32 alloc_size;
+ void __iomem *bd_mem;
+ struct emac_rx_bd __iomem *bd_pool_head;
+ struct emac_rx_bd __iomem *active_queue_head;
+ struct emac_rx_bd __iomem *active_queue_tail;
+ u32 queue_active;
+ u32 teardown_pending;
+
+ /* packet and buffer objects */
+ struct emac_netpktobj pkt_queue;
+ struct emac_netbufobj buf_queue;
+
+ /** statistics */
+ u32 proc_count; /* number of times emac_rx_bdproc is called */
+ u32 processed_bd;
+ u32 recycled_bd;
+ u32 out_of_rx_bd;
+ u32 out_of_rx_buffers;
+ u32 queue_reinit;
+ u32 end_of_queue_add;
+ u32 end_of_queue;
+ u32 mis_queued_packets;
+};
+
+/* emac_priv: EMAC private data structure
+ *
+ * EMAC adapter private data structure
+ */
+struct emac_priv {
+ u32 msg_enable;
+ struct net_device *ndev;
+ struct platform_device *pdev;
+ struct napi_struct napi;
+ char mac_addr[6];
+ spinlock_t tx_lock;
+ spinlock_t rx_lock;
+ void __iomem *remap_addr;
+ u32 emac_base_phys;
+ void __iomem *emac_base;
+ void __iomem *ctrl_base;
+ void __iomem *emac_ctrl_ram;
+ u32 ctrl_ram_size;
+ struct emac_txch *txch[EMAC_DEF_MAX_TX_CH];
+ struct emac_rxch *rxch[EMAC_DEF_MAX_RX_CH];
+ u32 link; /* 1=link on, 0=link off */
+ u32 speed; /* 0=Auto Neg, 1=No PHY, 10,100, 1000 - mbps */
+ u32 duplex; /* Link duplex: 0=Half, 1=Full */
+ u32 rx_buf_size;
+ u32 isr_count;
+ u8 rmii_en;
+ u8 version;
+ struct net_device_stats net_dev_stats;
+ u32 mac_hash1;
+ u32 mac_hash2;
+ u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS];
+ u32 rx_addr_type;
+ /* periodic timer required for MDIO polling */
+ struct timer_list periodic_timer;
+ u32 periodic_ticks;
+ u32 timer_active;
+ u32 phy_mask;
+ /* mii_bus,phy members */
+ struct mii_bus *mii_bus;
+ struct phy_device *phydev;
+ spinlock_t lock;
+};
+
+/* clock frequency for EMAC */
+static struct clk *emac_clk;
+static unsigned long emac_bus_frequency;
+static unsigned long mdio_max_freq;
+
+/* EMAC internal utility function */
+static inline u32 emac_virt_to_phys(void __iomem *addr)
+{
+ return (u32 __force) io_v2p(addr);
+}
+
+/* Cache macros - Packet buffers would be from skb pool which is cached */
+#define EMAC_VIRT_NOCACHE(addr) (addr)
+#define EMAC_CACHE_INVALIDATE(addr, size) \
+ dma_cache_maint((void *)addr, size, DMA_FROM_DEVICE)
+#define EMAC_CACHE_WRITEBACK(addr, size) \
+ dma_cache_maint((void *)addr, size, DMA_TO_DEVICE)
+#define EMAC_CACHE_WRITEBACK_INVALIDATE(addr, size) \
+ dma_cache_maint((void *)addr, size, DMA_BIDIRECTIONAL)
+
+/* DM644x does not have BD's in cached memory - so no cache functions */
+#define BD_CACHE_INVALIDATE(addr, size)
+#define BD_CACHE_WRITEBACK(addr, size)
+#define BD_CACHE_WRITEBACK_INVALIDATE(addr, size)
+
+/* EMAC TX Host Error description strings */
+static char *emac_txhost_errcodes[16] = {
+ "No error", "SOP error", "Ownership bit not set in SOP buffer",
+ "Zero Next Buffer Descriptor Pointer Without EOP",
+ "Zero Buffer Pointer", "Zero Buffer Length", "Packet Length Error",
+ "Reserved", "Reserved", "Reserved", "Reserved", "Reserved",
+ "Reserved", "Reserved", "Reserved", "Reserved"
+};
+
+/* EMAC RX Host Error description strings */
+static char *emac_rxhost_errcodes[16] = {
+ "No error", "Reserved", "Ownership bit not set in input buffer",
+ "Reserved", "Zero Buffer Pointer", "Reserved", "Reserved",
+ "Reserved", "Reserved", "Reserved", "Reserved", "Reserved",
+ "Reserved", "Reserved", "Reserved", "Reserved"
+};
+
+/* Helper macros */
+#define emac_read(reg) ioread32(priv->emac_base + (reg))
+#define emac_write(reg, val) iowrite32(val, priv->emac_base + (reg))
+
+#define emac_ctrl_read(reg) ioread32((priv->ctrl_base + (reg)))
+#define emac_ctrl_write(reg, val) iowrite32(val, (priv->ctrl_base + (reg)))
+
+#define emac_mdio_read(reg) ioread32(bus->priv + (reg))
+#define emac_mdio_write(reg, val) iowrite32(val, (bus->priv + (reg)))
+
+/**
+ * emac_dump_regs: Dump important EMAC registers to debug terminal
+ * @priv: The DaVinci EMAC private adapter structure
+ *
+ * Executes ethtool set cmd & sets phy mode
+ *
+ */
+static void emac_dump_regs(struct emac_priv *priv)
+{
+ struct device *emac_dev = &priv->ndev->dev;
+
+ /* Print important registers in EMAC */
+ dev_info(emac_dev, "EMAC Basic registers\n");
+ dev_info(emac_dev, "EMAC: EWCTL: %08X, EWINTTCNT: %08X\n",
+ emac_ctrl_read(EMAC_CTRL_EWCTL),
+ emac_ctrl_read(EMAC_CTRL_EWINTTCNT));
+ dev_info(emac_dev, "EMAC: TXID: %08X %s, RXID: %08X %s\n",
+ emac_read(EMAC_TXIDVER),
+ ((emac_read(EMAC_TXCONTROL)) ? "enabled" : "disabled"),
+ emac_read(EMAC_RXIDVER),
+ ((emac_read(EMAC_RXCONTROL)) ? "enabled" : "disabled"));
+ dev_info(emac_dev, "EMAC: TXIntRaw:%08X, TxIntMasked: %08X, "\
+ "TxIntMasSet: %08X\n", emac_read(EMAC_TXINTSTATRAW),
+ emac_read(EMAC_TXINTSTATMASKED), emac_read(EMAC_TXINTMASKSET));
+ dev_info(emac_dev, "EMAC: RXIntRaw:%08X, RxIntMasked: %08X, "\
+ "RxIntMasSet: %08X\n", emac_read(EMAC_RXINTSTATRAW),
+ emac_read(EMAC_RXINTSTATMASKED), emac_read(EMAC_RXINTMASKSET));
+ dev_info(emac_dev, "EMAC: MacIntRaw:%08X, MacIntMasked: %08X, "\
+ "MacInVector=%08X\n", emac_read(EMAC_MACINTSTATRAW),
+ emac_read(EMAC_MACINTSTATMASKED), emac_read(EMAC_MACINVECTOR));
+ dev_info(emac_dev, "EMAC: EmuControl:%08X, FifoControl: %08X\n",
+ emac_read(EMAC_EMCONTROL), emac_read(EMAC_FIFOCONTROL));
+ dev_info(emac_dev, "EMAC: MBPEnable:%08X, RXUnicastSet: %08X, "\
+ "RXMaxLen=%08X\n", emac_read(EMAC_RXMBPENABLE),
+ emac_read(EMAC_RXUNICASTSET), emac_read(EMAC_RXMAXLEN));
+ dev_info(emac_dev, "EMAC: MacControl:%08X, MacStatus: %08X, "\
+ "MacConfig=%08X\n", emac_read(EMAC_MACCONTROL),
+ emac_read(EMAC_MACSTATUS), emac_read(EMAC_MACCONFIG));
+ dev_info(emac_dev, "EMAC: TXHDP[0]:%08X, RXHDP[0]: %08X\n",
+ emac_read(EMAC_TXHDP(0)), emac_read(EMAC_RXHDP(0)));
+ dev_info(emac_dev, "EMAC Statistics\n");
+ dev_info(emac_dev, "EMAC: rx_good_frames:%d\n",
+ emac_read(EMAC_RXGOODFRAMES));
+ dev_info(emac_dev, "EMAC: rx_broadcast_frames:%d\n",
+ emac_read(EMAC_RXBCASTFRAMES));
+ dev_info(emac_dev, "EMAC: rx_multicast_frames:%d\n",
+ emac_read(EMAC_RXMCASTFRAMES));
+ dev_info(emac_dev, "EMAC: rx_pause_frames:%d\n",
+ emac_read(EMAC_RXPAUSEFRAMES));
+ dev_info(emac_dev, "EMAC: rx_crcerrors:%d\n",
+ emac_read(EMAC_RXCRCERRORS));
+ dev_info(emac_dev, "EMAC: rx_align_code_errors:%d\n",
+ emac_read(EMAC_RXALIGNCODEERRORS));
+ dev_info(emac_dev, "EMAC: rx_oversized_frames:%d\n",
+ emac_read(EMAC_RXOVERSIZED));
+ dev_info(emac_dev, "EMAC: rx_jabber_frames:%d\n",
+ emac_read(EMAC_RXJABBER));
+ dev_info(emac_dev, "EMAC: rx_undersized_frames:%d\n",
+ emac_read(EMAC_RXUNDERSIZED));
+ dev_info(emac_dev, "EMAC: rx_fragments:%d\n",
+ emac_read(EMAC_RXFRAGMENTS));
+ dev_info(emac_dev, "EMAC: rx_filtered_frames:%d\n",
+ emac_read(EMAC_RXFILTERED));
+ dev_info(emac_dev, "EMAC: rx_qos_filtered_frames:%d\n",
+ emac_read(EMAC_RXQOSFILTERED));
+ dev_info(emac_dev, "EMAC: rx_octets:%d\n",
+ emac_read(EMAC_RXOCTETS));
+ dev_info(emac_dev, "EMAC: tx_goodframes:%d\n",
+ emac_read(EMAC_TXGOODFRAMES));
+ dev_info(emac_dev, "EMAC: tx_bcastframes:%d\n",
+ emac_read(EMAC_TXBCASTFRAMES));
+ dev_info(emac_dev, "EMAC: tx_mcastframes:%d\n",
+ emac_read(EMAC_TXMCASTFRAMES));
+ dev_info(emac_dev, "EMAC: tx_pause_frames:%d\n",
+ emac_read(EMAC_TXPAUSEFRAMES));
+ dev_info(emac_dev, "EMAC: tx_deferred_frames:%d\n",
+ emac_read(EMAC_TXDEFERRED));
+ dev_info(emac_dev, "EMAC: tx_collision_frames:%d\n",
+ emac_read(EMAC_TXCOLLISION));
+ dev_info(emac_dev, "EMAC: tx_single_coll_frames:%d\n",
+ emac_read(EMAC_TXSINGLECOLL));
+ dev_info(emac_dev, "EMAC: tx_mult_coll_frames:%d\n",
+ emac_read(EMAC_TXMULTICOLL));
+ dev_info(emac_dev, "EMAC: tx_excessive_collisions:%d\n",
+ emac_read(EMAC_TXEXCESSIVECOLL));
+ dev_info(emac_dev, "EMAC: tx_late_collisions:%d\n",
+ emac_read(EMAC_TXLATECOLL));
+ dev_info(emac_dev, "EMAC: tx_underrun:%d\n",
+ emac_read(EMAC_TXUNDERRUN));
+ dev_info(emac_dev, "EMAC: tx_carrier_sense_errors:%d\n",
+ emac_read(EMAC_TXCARRIERSENSE));
+ dev_info(emac_dev, "EMAC: tx_octets:%d\n",
+ emac_read(EMAC_TXOCTETS));
+ dev_info(emac_dev, "EMAC: net_octets:%d\n",
+ emac_read(EMAC_NETOCTETS));
+ dev_info(emac_dev, "EMAC: rx_sof_overruns:%d\n",
+ emac_read(EMAC_RXSOFOVERRUNS));
+ dev_info(emac_dev, "EMAC: rx_mof_overruns:%d\n",
+ emac_read(EMAC_RXMOFOVERRUNS));
+ dev_info(emac_dev, "EMAC: rx_dma_overruns:%d\n",
+ emac_read(EMAC_RXDMAOVERRUNS));
+}
+
+/*************************************************************************
+ * EMAC MDIO/Phy Functionality
+ *************************************************************************/
+/**
+ * emac_get_drvinfo: Get EMAC driver information
+ * @ndev: The DaVinci EMAC network adapter
+ * @info: ethtool info structure containing name and version
+ *
+ * Returns EMAC driver information (name and version)
+ *
+ */
+static void emac_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, emac_version_string);
+ strcpy(info->version, EMAC_MODULE_VERSION);
+}
+
+/**
+ * emac_get_settings: Get EMAC settings
+ * @ndev: The DaVinci EMAC network adapter
+ * @ecmd: ethtool command
+ *
+ * Executes ethool get command
+ *
+ */
+static int emac_get_settings(struct net_device *ndev,
+ struct ethtool_cmd *ecmd)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ if (priv->phy_mask)
+ return phy_ethtool_gset(priv->phydev, ecmd);
+ else
+ return -EOPNOTSUPP;
+
+}
+
+/**
+ * emac_set_settings: Set EMAC settings
+ * @ndev: The DaVinci EMAC network adapter
+ * @ecmd: ethtool command
+ *
+ * Executes ethool set command
+ *
+ */
+static int emac_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ if (priv->phy_mask)
+ return phy_ethtool_sset(priv->phydev, ecmd);
+ else
+ return -EOPNOTSUPP;
+
+}
+
+/**
+ * ethtool_ops: DaVinci EMAC Ethtool structure
+ *
+ * Ethtool support for EMAC adapter
+ *
+ */
+static const struct ethtool_ops ethtool_ops = {
+ .get_drvinfo = emac_get_drvinfo,
+ .get_settings = emac_get_settings,
+ .set_settings = emac_set_settings,
+ .get_link = ethtool_op_get_link,
+};
+
+/**
+ * emac_update_phystatus: Update Phy status
+ * @priv: The DaVinci EMAC private adapter structure
+ *
+ * Updates phy status and takes action for network queue if required
+ * based upon link status
+ *
+ */
+static void emac_update_phystatus(struct emac_priv *priv)
+{
+ u32 mac_control;
+ u32 new_duplex;
+ u32 cur_duplex;
+ struct net_device *ndev = priv->ndev;
+
+ mac_control = emac_read(EMAC_MACCONTROL);
+ cur_duplex = (mac_control & EMAC_MACCONTROL_FULLDUPLEXEN) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ if (priv->phy_mask)
+ new_duplex = priv->phydev->duplex;
+ else
+ new_duplex = DUPLEX_FULL;
+
+ /* We get called only if link has changed (speed/duplex/status) */
+ if ((priv->link) && (new_duplex != cur_duplex)) {
+ priv->duplex = new_duplex;
+ if (DUPLEX_FULL == priv->duplex)
+ mac_control |= (EMAC_MACCONTROL_FULLDUPLEXEN);
+ else
+ mac_control &= ~(EMAC_MACCONTROL_FULLDUPLEXEN);
+ }
+
+ if (priv->speed == SPEED_1000 && (priv->version == EMAC_VERSION_2)) {
+ mac_control = emac_read(EMAC_MACCONTROL);
+ mac_control |= (EMAC_DM646X_MACCONTORL_GMIIEN |
+ EMAC_DM646X_MACCONTORL_GIG |
+ EMAC_DM646X_MACCONTORL_GIGFORCE);
+ } else {
+ /* Clear the GIG bit and GIGFORCE bit */
+ mac_control &= ~(EMAC_DM646X_MACCONTORL_GIGFORCE |
+ EMAC_DM646X_MACCONTORL_GIG);
+
+ if (priv->rmii_en && (priv->speed == SPEED_100))
+ mac_control |= EMAC_MACCONTROL_RMIISPEED_MASK;
+ else
+ mac_control &= ~EMAC_MACCONTROL_RMIISPEED_MASK;
+ }
+
+ /* Update mac_control if changed */
+ emac_write(EMAC_MACCONTROL, mac_control);
+
+ if (priv->link) {
+ /* link ON */
+ if (!netif_carrier_ok(ndev))
+ netif_carrier_on(ndev);
+ /* reactivate the transmit queue if it is stopped */
+ if (netif_running(ndev) && netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+ } else {
+ /* link OFF */
+ if (netif_carrier_ok(ndev))
+ netif_carrier_off(ndev);
+ if (!netif_queue_stopped(ndev))
+ netif_stop_queue(ndev);
+ }
+}
+
+/**
+ * hash_get: Calculate hash value from mac address
+ * @addr: mac address to delete from hash table
+ *
+ * Calculates hash value from mac address
+ *
+ */
+static u32 hash_get(u8 *addr)
+{
+ u32 hash;
+ u8 tmpval;
+ int cnt;
+ hash = 0;
+
+ for (cnt = 0; cnt < 2; cnt++) {
+ tmpval = *addr++;
+ hash ^= (tmpval >> 2) ^ (tmpval << 4);
+ tmpval = *addr++;
+ hash ^= (tmpval >> 4) ^ (tmpval << 2);
+ tmpval = *addr++;
+ hash ^= (tmpval >> 6) ^ (tmpval);
+ }
+
+ return hash & 0x3F;
+}
+
+/**
+ * hash_add: Hash function to add mac addr from hash table
+ * @priv: The DaVinci EMAC private adapter structure
+ * mac_addr: mac address to delete from hash table
+ *
+ * Adds mac address to the internal hash table
+ *
+ */
+static int hash_add(struct emac_priv *priv, u8 *mac_addr)
+{
+ struct device *emac_dev = &priv->ndev->dev;
+ u32 rc = 0;
+ u32 hash_bit;
+ u32 hash_value = hash_get(mac_addr);
+
+ if (hash_value >= EMAC_NUM_MULTICAST_BITS) {
+ if (netif_msg_drv(priv)) {
+ dev_err(emac_dev, "DaVinci EMAC: hash_add(): Invalid "\
+ "Hash %08x, should not be greater than %08x",
+ hash_value, (EMAC_NUM_MULTICAST_BITS - 1));
+ }
+ return -1;
+ }
+
+ /* set the hash bit only if not previously set */
+ if (priv->multicast_hash_cnt[hash_value] == 0) {
+ rc = 1; /* hash value changed */
+ if (hash_value < 32) {
+ hash_bit = BIT(hash_value);
+ priv->mac_hash1 |= hash_bit;
+ } else {
+ hash_bit = BIT((hash_value - 32));
+ priv->mac_hash2 |= hash_bit;
+ }
+ }
+
+ /* incr counter for num of mcast addr's mapped to "this" hash bit */
+ ++priv->multicast_hash_cnt[hash_value];
+
+ return rc;
+}
+
+/**
+ * hash_del: Hash function to delete mac addr from hash table
+ * @priv: The DaVinci EMAC private adapter structure
+ * mac_addr: mac address to delete from hash table
+ *
+ * Removes mac address from the internal hash table
+ *
+ */
+static int hash_del(struct emac_priv *priv, u8 *mac_addr)
+{
+ u32 hash_value;
+ u32 hash_bit;
+
+ hash_value = hash_get(mac_addr);
+ if (priv->multicast_hash_cnt[hash_value] > 0) {
+ /* dec cntr for num of mcast addr's mapped to this hash bit */
+ --priv->multicast_hash_cnt[hash_value];
+ }
+
+ /* if counter still > 0, at least one multicast address refers
+ * to this hash bit. so return 0 */
+ if (priv->multicast_hash_cnt[hash_value] > 0)
+ return 0;
+
+ if (hash_value < 32) {
+ hash_bit = BIT(hash_value);
+ priv->mac_hash1 &= ~hash_bit;
+ } else {
+ hash_bit = BIT((hash_value - 32));
+ priv->mac_hash2 &= ~hash_bit;
+ }
+
+ /* return 1 to indicate change in mac_hash registers reqd */
+ return 1;
+}
+
+/* EMAC multicast operation */
+#define EMAC_MULTICAST_ADD 0
+#define EMAC_MULTICAST_DEL 1
+#define EMAC_ALL_MULTI_SET 2
+#define EMAC_ALL_MULTI_CLR 3
+
+/**
+ * emac_add_mcast: Set multicast address in the EMAC adapter (Internal)
+ * @priv: The DaVinci EMAC private adapter structure
+ * @action: multicast operation to perform
+ * mac_addr: mac address to set
+ *
+ * Set multicast addresses in EMAC adapter - internal function
+ *
+ */
+static void emac_add_mcast(struct emac_priv *priv, u32 action, u8 *mac_addr)
+{
+ struct device *emac_dev = &priv->ndev->dev;
+ int update = -1;
+
+ switch (action) {
+ case EMAC_MULTICAST_ADD:
+ update = hash_add(priv, mac_addr);
+ break;
+ case EMAC_MULTICAST_DEL:
+ update = hash_del(priv, mac_addr);
+ break;
+ case EMAC_ALL_MULTI_SET:
+ update = 1;
+ priv->mac_hash1 = EMAC_ALL_MULTI_REG_VALUE;
+ priv->mac_hash2 = EMAC_ALL_MULTI_REG_VALUE;
+ break;
+ case EMAC_ALL_MULTI_CLR:
+ update = 1;
+ priv->mac_hash1 = 0;
+ priv->mac_hash2 = 0;
+ memset(&(priv->multicast_hash_cnt[0]), 0,
+ sizeof(priv->multicast_hash_cnt[0]) *
+ EMAC_NUM_MULTICAST_BITS);
+ break;
+ default:
+ if (netif_msg_drv(priv))
+ dev_err(emac_dev, "DaVinci EMAC: add_mcast"\
+ ": bad operation %d", action);
+ break;
+ }
+
+ /* write to the hardware only if the register status chances */
+ if (update > 0) {
+ emac_write(EMAC_MACHASH1, priv->mac_hash1);
+ emac_write(EMAC_MACHASH2, priv->mac_hash2);
+ }
+}
+
+/**
+ * emac_dev_mcast_set: Set multicast address in the EMAC adapter
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Set multicast addresses in EMAC adapter
+ *
+ */
+static void emac_dev_mcast_set(struct net_device *ndev)
+{
+ u32 mbp_enable;
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ mbp_enable = emac_read(EMAC_RXMBPENABLE);
+ if (ndev->flags & IFF_PROMISC) {
+ mbp_enable &= (~EMAC_MBP_PROMISCCH(EMAC_DEF_PROM_CH));
+ mbp_enable |= (EMAC_MBP_RXPROMISC);
+ } else {
+ mbp_enable = (mbp_enable & ~EMAC_MBP_RXPROMISC);
+ if ((ndev->flags & IFF_ALLMULTI) ||
+ (ndev->mc_count > EMAC_DEF_MAX_MULTICAST_ADDRESSES)) {
+ mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
+ emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL);
+ }
+ if (ndev->mc_count > 0) {
+ struct dev_mc_list *mc_ptr;
+ mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
+ emac_add_mcast(priv, EMAC_ALL_MULTI_CLR, NULL);
+ /* program multicast address list into EMAC hardware */
+ for (mc_ptr = ndev->mc_list; mc_ptr;
+ mc_ptr = mc_ptr->next) {
+ emac_add_mcast(priv, EMAC_MULTICAST_ADD,
+ (u8 *)mc_ptr->dmi_addr);
+ }
+ } else {
+ mbp_enable = (mbp_enable & ~EMAC_MBP_RXMCAST);
+ emac_add_mcast(priv, EMAC_ALL_MULTI_CLR, NULL);
+ }
+ }
+ /* Set mbp config register */
+ emac_write(EMAC_RXMBPENABLE, mbp_enable);
+}
+
+/*************************************************************************
+ * EMAC Hardware manipulation
+ *************************************************************************/
+
+/**
+ * emac_int_disable: Disable EMAC module interrupt (from adapter)
+ * @priv: The DaVinci EMAC private adapter structure
+ *
+ * Disable EMAC interrupt on the adapter
+ *
+ */
+static void emac_int_disable(struct emac_priv *priv)
+{
+ if (priv->version == EMAC_VERSION_2) {
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ /* Program C0_Int_En to zero to turn off
+ * interrupts to the CPU */
+ emac_ctrl_write(EMAC_DM646X_CMRXINTEN, 0x0);
+ emac_ctrl_write(EMAC_DM646X_CMTXINTEN, 0x0);
+ /* NOTE: Rx Threshold and Misc interrupts are not disabled */
+
+ local_irq_restore(flags);
+
+ } else {
+ /* Set DM644x control registers for interrupt control */
+ emac_ctrl_write(EMAC_CTRL_EWCTL, 0x0);
+ }
+}
+
+/**
+ * emac_int_enable: Enable EMAC module interrupt (from adapter)
+ * @priv: The DaVinci EMAC private adapter structure
+ *
+ * Enable EMAC interrupt on the adapter
+ *
+ */
+static void emac_int_enable(struct emac_priv *priv)
+{
+ if (priv->version == EMAC_VERSION_2) {
+ emac_ctrl_write(EMAC_DM646X_CMRXINTEN, 0xff);
+ emac_ctrl_write(EMAC_DM646X_CMTXINTEN, 0xff);
+
+ /* In addition to turning on interrupt Enable, we need
+ * ack by writing appropriate values to the EOI
+ * register */
+
+ /* NOTE: Rx Threshold and Misc interrupts are not enabled */
+
+ /* ack rxen only then a new pulse will be generated */
+ emac_write(EMAC_DM646X_MACEOIVECTOR,
+ EMAC_DM646X_MAC_EOI_C0_RXEN);
+
+ /* ack txen- only then a new pulse will be generated */
+ emac_write(EMAC_DM646X_MACEOIVECTOR,
+ EMAC_DM646X_MAC_EOI_C0_TXEN);
+
+ } else {
+ /* Set DM644x control registers for interrupt control */
+ emac_ctrl_write(EMAC_CTRL_EWCTL, 0x1);
+ }
+}
+
+/**
+ * emac_irq: EMAC interrupt handler
+ * @irq: interrupt number
+ * @dev_id: EMAC network adapter data structure ptr
+ *
+ * EMAC Interrupt handler - we only schedule NAPI and not process any packets
+ * here. EVen the interrupt status is checked (TX/RX/Err) in NAPI poll function
+ *
+ * Returns interrupt handled condition
+ */
+static irqreturn_t emac_irq(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ ++priv->isr_count;
+ if (likely(netif_running(priv->ndev))) {
+ emac_int_disable(priv);
+ napi_schedule(&priv->napi);
+ } else {
+ /* we are closing down, so dont process anything */
+ }
+ return IRQ_HANDLED;
+}
+
+/** EMAC on-chip buffer descriptor memory
+ *
+ * WARNING: Please note that the on chip memory is used for both TX and RX
+ * buffer descriptor queues and is equally divided between TX and RX desc's
+ * If the number of TX or RX descriptors change this memory pointers need
+ * to be adjusted. If external memory is allocated then these pointers can
+ * pointer to the memory
+ *
+ */
+#define EMAC_TX_BD_MEM(priv) ((priv)->emac_ctrl_ram)
+#define EMAC_RX_BD_MEM(priv) ((priv)->emac_ctrl_ram + \
+ (((priv)->ctrl_ram_size) >> 1))
+
+/**
+ * emac_init_txch: TX channel initialization
+ * @priv: The DaVinci EMAC private adapter structure
+ * @ch: RX channel number
+ *
+ * Called during device init to setup a TX channel (allocate buffer desc
+ * create free pool and keep ready for transmission
+ *
+ * Returns success(0) or mem alloc failures error code
+ */
+static int emac_init_txch(struct emac_priv *priv, u32 ch)
+{
+ struct device *emac_dev = &priv->ndev->dev;
+ u32 cnt, bd_size;
+ void __iomem *mem;
+ struct emac_tx_bd __iomem *curr_bd;
+ struct emac_txch *txch = NULL;
+
+ txch = kzalloc(sizeof(struct emac_txch), GFP_KERNEL);
+ if (NULL == txch) {
+ dev_err(emac_dev, "DaVinci EMAC: TX Ch mem alloc failed");
+ return -ENOMEM;
+ }
+ priv->txch[ch] = txch;
+ txch->service_max = EMAC_DEF_TX_MAX_SERVICE;
+ txch->active_queue_head = NULL;
+ txch->active_queue_tail = NULL;
+ txch->queue_active = 0;
+ txch->teardown_pending = 0;
+
+ /* allocate memory for TX CPPI channel on a 4 byte boundry */
+ txch->tx_complete = kzalloc(txch->service_max * sizeof(u32),
+ GFP_KERNEL);
+ if (NULL == txch->tx_complete) {
+ dev_err(emac_dev, "DaVinci EMAC: Tx service mem alloc failed");
+ kfree(txch);
+ return -ENOMEM;
+ }
+
+ /* allocate buffer descriptor pool align every BD on four word
+ * boundry for future requirements */
+ bd_size = (sizeof(struct emac_tx_bd) + 0xF) & ~0xF;
+ txch->num_bd = (priv->ctrl_ram_size >> 1) / bd_size;
+ txch->alloc_size = (((bd_size * txch->num_bd) + 0xF) & ~0xF);
+
+ /* alloc TX BD memory */
+ txch->bd_mem = EMAC_TX_BD_MEM(priv);
+ __memzero((void __force *)txch->bd_mem, txch->alloc_size);
+
+ /* initialize the BD linked list */
+ mem = (void __force __iomem *)
+ (((u32 __force) txch->bd_mem + 0xF) & ~0xF);
+ txch->bd_pool_head = NULL;
+ for (cnt = 0; cnt < txch->num_bd; cnt++) {
+ curr_bd = mem + (cnt * bd_size);
+ curr_bd->next = txch->bd_pool_head;
+ txch->bd_pool_head = curr_bd;
+ }
+
+ /* reset statistics counters */
+ txch->out_of_tx_bd = 0;
+ txch->no_active_pkts = 0;
+ txch->active_queue_count = 0;
+
+ return 0;
+}
+
+/**
+ * emac_cleanup_txch: Book-keep function to clean TX channel resources
+ * @priv: The DaVinci EMAC private adapter structure
+ * @ch: TX channel number
+ *
+ * Called to clean up TX channel resources
+ *
+ */
+static void emac_cleanup_txch(struct emac_priv *priv, u32 ch)
+{
+ struct emac_txch *txch = priv->txch[ch];
+
+ if (txch) {
+ if (txch->bd_mem)
+ txch->bd_mem = NULL;
+ kfree(txch->tx_complete);
+ kfree(txch);
+ priv->txch[ch] = NULL;
+ }
+}
+
+/**
+ * emac_net_tx_complete: TX packet completion function
+ * @priv: The DaVinci EMAC private adapter structure
+ * @net_data_tokens: packet token - skb pointer
+ * @num_tokens: number of skb's to free
+ * @ch: TX channel number
+ *
+ * Frees the skb once packet is transmitted
+ *
+ */
+static int emac_net_tx_complete(struct emac_priv *priv,
+ void **net_data_tokens,
+ int num_tokens, u32 ch)
+{
+ u32 cnt;
+
+ if (unlikely(num_tokens && netif_queue_stopped(priv->ndev)))
+ netif_start_queue(priv->ndev);
+ for (cnt = 0; cnt < num_tokens; cnt++) {
+ struct sk_buff *skb = (struct sk_buff *)net_data_tokens[cnt];
+ if (skb == NULL)
+ continue;
+ priv->net_dev_stats.tx_packets++;
+ priv->net_dev_stats.tx_bytes += skb->len;
+ dev_kfree_skb_any(skb);
+ }
+ return 0;
+}
+
+/**
+ * emac_txch_teardown: TX channel teardown
+ * @priv: The DaVinci EMAC private adapter structure
+ * @ch: TX channel number
+ *
+ * Called to teardown TX channel
+ *
+ */
+static void emac_txch_teardown(struct emac_priv *priv, u32 ch)
+{
+ struct device *emac_dev = &priv->ndev->dev;
+ u32 teardown_cnt = 0xFFFFFFF0; /* Some high value */
+ struct emac_txch *txch = priv->txch[ch];
+ struct emac_tx_bd __iomem *curr_bd;
+
+ while ((emac_read(EMAC_TXCP(ch)) & EMAC_TEARDOWN_VALUE) !=
+ EMAC_TEARDOWN_VALUE) {
+ /* wait till tx teardown complete */
+ cpu_relax(); /* TODO: check if this helps ... */
+ --teardown_cnt;
+ if (0 == teardown_cnt) {
+ dev_err(emac_dev, "EMAC: TX teardown aborted\n");
+ break;
+ }
+ }
+ emac_write(EMAC_TXCP(ch), EMAC_TEARDOWN_VALUE);
+
+ /* process sent packets and return skb's to upper layer */
+ if (1 == txch->queue_active) {
+ curr_bd = txch->active_queue_head;
+ while (curr_bd != NULL) {
+ emac_net_tx_complete(priv, (void __force *)
+ &curr_bd->buf_token, 1, ch);
+ if (curr_bd != txch->active_queue_tail)
+ curr_bd = curr_bd->next;
+ else
+ break;
+ }
+ txch->bd_pool_head = txch->active_queue_head;
+ txch->active_queue_head =
+ txch->active_queue_tail = NULL;
+ }
+}
+
+/**
+ * emac_stop_txch: Stop TX channel operation
+ * @priv: The DaVinci EMAC private adapter structure
+ * @ch: TX channel number
+ *
+ * Called to stop TX channel operation
+ *
+ */
+static void emac_stop_txch(struct emac_priv *priv, u32 ch)
+{
+ struct emac_txch *txch = priv->txch[ch];
+
+ if (txch) {
+ txch->teardown_pending = 1;
+ emac_write(EMAC_TXTEARDOWN, 0);
+ emac_txch_teardown(priv, ch);
+ txch->teardown_pending = 0;
+ emac_write(EMAC_TXINTMASKCLEAR, BIT(ch));
+ }
+}
+
+/**
+ * emac_tx_bdproc: TX buffer descriptor (packet) processing
+ * @priv: The DaVinci EMAC private adapter structure
+ * @ch: TX channel number to process buffer descriptors for
+ * @budget: number of packets allowed to process
+ * @pending: indication to caller that packets are pending to process
+ *
+ * Processes TX buffer descriptors after packets are transmitted - checks
+ * ownership bit on the TX * descriptor and requeues it to free pool & frees
+ * the SKB buffer. Only "budget" number of packets are processed and
+ * indication of pending packets provided to the caller
+ *
+ * Returns number of packets processed
+ */
+static int emac_tx_bdproc(struct emac_priv *priv, u32 ch, u32 budget)
+{
+ struct device *emac_dev = &priv->ndev->dev;
+ unsigned long flags;
+ u32 frame_status;
+ u32 pkts_processed = 0;
+ u32 tx_complete_cnt = 0;
+ struct emac_tx_bd __iomem *curr_bd;
+ struct emac_txch *txch = priv->txch[ch];
+ u32 *tx_complete_ptr = txch->tx_complete;
+
+ if (unlikely(1 == txch->teardown_pending)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit()) {
+ dev_err(emac_dev, "DaVinci EMAC:emac_tx_bdproc: "\
+ "teardown pending\n");
+ }
+ return 0; /* dont handle any pkt completions */
+ }
+
+ ++txch->proc_count;
+ spin_lock_irqsave(&priv->tx_lock, flags);
+ curr_bd = txch->active_queue_head;
+ if (NULL == curr_bd) {
+ emac_write(EMAC_TXCP(ch),
+ emac_virt_to_phys(txch->last_hw_bdprocessed));
+ txch->no_active_pkts++;
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+ return 0;
+ }
+ BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
+ frame_status = curr_bd->mode;
+ while ((curr_bd) &&
+ ((frame_status & EMAC_CPPI_OWNERSHIP_BIT) == 0) &&
+ (pkts_processed < budget)) {
+ emac_write(EMAC_TXCP(ch), emac_virt_to_phys(curr_bd));
+ txch->active_queue_head = curr_bd->next;
+ if (frame_status & EMAC_CPPI_EOQ_BIT) {
+ if (curr_bd->next) { /* misqueued packet */
+ emac_write(EMAC_TXHDP(ch), curr_bd->h_next);
+ ++txch->mis_queued_packets;
+ } else {
+ txch->queue_active = 0; /* end of queue */
+ }
+ }
+ *tx_complete_ptr = (u32) curr_bd->buf_token;
+ ++tx_complete_ptr;
+ ++tx_complete_cnt;
+ curr_bd->next = txch->bd_pool_head;
+ txch->bd_pool_head = curr_bd;
+ --txch->active_queue_count;
+ pkts_processed++;
+ txch->last_hw_bdprocessed = curr_bd;
+ curr_bd = txch->active_queue_head;
+ if (curr_bd) {
+ BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
+ frame_status = curr_bd->mode;
+ }
+ } /* end of pkt processing loop */
+
+ emac_net_tx_complete(priv,
+ (void *)&txch->tx_complete[0],
+ tx_complete_cnt, ch);
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+ return pkts_processed;
+}
+
+#define EMAC_ERR_TX_OUT_OF_BD -1
+
+/**
+ * emac_send: EMAC Transmit function (internal)
+ * @priv: The DaVinci EMAC private adapter structure
+ * @pkt: packet pointer (contains skb ptr)
+ * @ch: TX channel number
+ *
+ * Called by the transmit function to queue the packet in EMAC hardware queue
+ *
+ * Returns success(0) or error code (typically out of desc's)
+ */
+static int emac_send(struct emac_priv *priv, struct emac_netpktobj *pkt, u32 ch)
+{
+ unsigned long flags;
+ struct emac_tx_bd __iomem *curr_bd;
+ struct emac_txch *txch;
+ struct emac_netbufobj *buf_list;
+
+ txch = priv->txch[ch];
+ buf_list = pkt->buf_list; /* get handle to the buffer array */
+
+ /* check packet size and pad if short */
+ if (pkt->pkt_length < EMAC_DEF_MIN_ETHPKTSIZE) {
+ buf_list->length += (EMAC_DEF_MIN_ETHPKTSIZE - pkt->pkt_length);
+ pkt->pkt_length = EMAC_DEF_MIN_ETHPKTSIZE;
+ }
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
+ curr_bd = txch->bd_pool_head;
+ if (curr_bd == NULL) {
+ txch->out_of_tx_bd++;
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+ return EMAC_ERR_TX_OUT_OF_BD;
+ }
+
+ txch->bd_pool_head = curr_bd->next;
+ curr_bd->buf_token = buf_list->buf_token;
+ /* FIXME buff_ptr = dma_map_single(... data_ptr ...) */
+ curr_bd->buff_ptr = virt_to_phys(buf_list->data_ptr);
+ curr_bd->off_b_len = buf_list->length;
+ curr_bd->h_next = 0;
+ curr_bd->next = NULL;
+ curr_bd->mode = (EMAC_CPPI_SOP_BIT | EMAC_CPPI_OWNERSHIP_BIT |
+ EMAC_CPPI_EOP_BIT | pkt->pkt_length);
+
+ /* flush the packet from cache if write back cache is present */
+ BD_CACHE_WRITEBACK_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
+
+ /* send the packet */
+ if (txch->active_queue_head == NULL) {
+ txch->active_queue_head = curr_bd;
+ txch->active_queue_tail = curr_bd;
+ if (1 != txch->queue_active) {
+ emac_write(EMAC_TXHDP(ch),
+ emac_virt_to_phys(curr_bd));
+ txch->queue_active = 1;
+ }
+ ++txch->queue_reinit;
+ } else {
+ register struct emac_tx_bd __iomem *tail_bd;
+ register u32 frame_status;
+
+ tail_bd = txch->active_queue_tail;
+ tail_bd->next = curr_bd;
+ txch->active_queue_tail = curr_bd;
+ tail_bd = EMAC_VIRT_NOCACHE(tail_bd);
+ tail_bd->h_next = (int)emac_virt_to_phys(curr_bd);
+ frame_status = tail_bd->mode;
+ if (frame_status & EMAC_CPPI_EOQ_BIT) {
+ emac_write(EMAC_TXHDP(ch), emac_virt_to_phys(curr_bd));
+ frame_status &= ~(EMAC_CPPI_EOQ_BIT);
+ tail_bd->mode = frame_status;
+ ++txch->end_of_queue_add;
+ }
+ }
+ txch->active_queue_count++;
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+ return 0;
+}
+
+/**
+ * emac_dev_xmit: EMAC Transmit function
+ * @skb: SKB pointer
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Called by the system to transmit a packet - we queue the packet in
+ * EMAC hardware transmit queue
+ *
+ * Returns success(NETDEV_TX_OK) or error code (typically out of desc's)
+ */
+static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct device *emac_dev = &ndev->dev;
+ int ret_code;
+ struct emac_netbufobj tx_buf; /* buffer obj-only single frame support */
+ struct emac_netpktobj tx_packet; /* packet object */
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ /* If no link, return */
+ if (unlikely(!priv->link)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ dev_err(emac_dev, "DaVinci EMAC: No link to transmit");
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Build the buffer and packet objects - Since only single fragment is
+ * supported, need not set length and token in both packet & object.
+ * Doing so for completeness sake & to show that this needs to be done
+ * in multifragment case
+ */
+ tx_packet.buf_list = &tx_buf;
+ tx_packet.num_bufs = 1; /* only single fragment supported */
+ tx_packet.pkt_length = skb->len;
+ tx_packet.pkt_token = (void *)skb;
+ tx_buf.length = skb->len;
+ tx_buf.buf_token = (void *)skb;
+ tx_buf.data_ptr = skb->data;
+ EMAC_CACHE_WRITEBACK((unsigned long)skb->data, skb->len);
+ ndev->trans_start = jiffies;
+ ret_code = emac_send(priv, &tx_packet, EMAC_DEF_TX_CH);
+ if (unlikely(ret_code != 0)) {
+ if (ret_code == EMAC_ERR_TX_OUT_OF_BD) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ dev_err(emac_dev, "DaVinci EMAC: xmit() fatal"\
+ " err. Out of TX BD's");
+ netif_stop_queue(priv->ndev);
+ }
+ priv->net_dev_stats.tx_dropped++;
+ return NETDEV_TX_BUSY;
+ }
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * emac_dev_tx_timeout: EMAC Transmit timeout function
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Called when system detects that a skb timeout period has expired
+ * potentially due to a fault in the adapter in not being able to send
+ * it out on the wire. We teardown the TX channel assuming a hardware
+ * error and re-initialize the TX channel for hardware operation
+ *
+ */
+static void emac_dev_tx_timeout(struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct device *emac_dev = &ndev->dev;
+
+ if (netif_msg_tx_err(priv))
+ dev_err(emac_dev, "DaVinci EMAC: xmit timeout, restarting TX");
+
+ priv->net_dev_stats.tx_errors++;
+ emac_int_disable(priv);
+ emac_stop_txch(priv, EMAC_DEF_TX_CH);
+ emac_cleanup_txch(priv, EMAC_DEF_TX_CH);
+ emac_init_txch(priv, EMAC_DEF_TX_CH);
+ emac_write(EMAC_TXHDP(0), 0);
+ emac_write(EMAC_TXINTMASKSET, BIT(EMAC_DEF_TX_CH));
+ emac_int_enable(priv);
+}
+
+/**
+ * emac_net_alloc_rx_buf: Allocate a skb for RX
+ * @priv: The DaVinci EMAC private adapter structure
+ * @buf_size: size of SKB data buffer to allocate
+ * @data_token: data token returned (skb handle for storing in buffer desc)
+ * @ch: RX channel number
+ *
+ * Called during RX channel setup - allocates skb buffer of required size
+ * and provides the skb handle and allocated buffer data pointer to caller
+ *
+ * Returns skb data pointer or 0 on failure to alloc skb
+ */
+static void *emac_net_alloc_rx_buf(struct emac_priv *priv, int buf_size,
+ void **data_token, u32 ch)
+{
+ struct net_device *ndev = priv->ndev;
+ struct device *emac_dev = &ndev->dev;
+ struct sk_buff *p_skb;
+
+ p_skb = dev_alloc_skb(buf_size);
+ if (unlikely(NULL == p_skb)) {
+ if (netif_msg_rx_err(priv) && net_ratelimit())
+ dev_err(emac_dev, "DaVinci EMAC: failed to alloc skb");
+ return NULL;
+ }
+
+ /* set device pointer in skb and reserve space for extra bytes */
+ p_skb->dev = ndev;
+ skb_reserve(p_skb, NET_IP_ALIGN);
+ *data_token = (void *) p_skb;
+ EMAC_CACHE_WRITEBACK_INVALIDATE((unsigned long)p_skb->data, buf_size);
+ return p_skb->data;
+}
+
+/**
+ * emac_init_rxch: RX channel initialization
+ * @priv: The DaVinci EMAC private adapter structure
+ * @ch: RX channel number
+ * @param: mac address for RX channel
+ *
+ * Called during device init to setup a RX channel (allocate buffers and
+ * buffer descriptors, create queue and keep ready for reception
+ *
+ * Returns success(0) or mem alloc failures error code
+ */
+static int emac_init_rxch(struct emac_priv *priv, u32 ch, char *param)
+{
+ struct device *emac_dev = &priv->ndev->dev;
+ u32 cnt, bd_size;
+ void __iomem *mem;
+ struct emac_rx_bd __iomem *curr_bd;
+ struct emac_rxch *rxch = NULL;
+
+ rxch = kzalloc(sizeof(struct emac_rxch), GFP_KERNEL);
+ if (NULL == rxch) {
+ dev_err(emac_dev, "DaVinci EMAC: RX Ch mem alloc failed");
+ return -ENOMEM;
+ }
+ priv->rxch[ch] = rxch;
+ rxch->buf_size = priv->rx_buf_size;
+ rxch->service_max = EMAC_DEF_RX_MAX_SERVICE;
+ rxch->queue_active = 0;
+ rxch->teardown_pending = 0;
+
+ /* save mac address */
+ for (cnt = 0; cnt < 6; cnt++)
+ rxch->mac_addr[cnt] = param[cnt];
+
+ /* allocate buffer descriptor pool align every BD on four word
+ * boundry for future requirements */
+ bd_size = (sizeof(struct emac_rx_bd) + 0xF) & ~0xF;
+ rxch->num_bd = (priv->ctrl_ram_size >> 1) / bd_size;
+ rxch->alloc_size = (((bd_size * rxch->num_bd) + 0xF) & ~0xF);
+ rxch->bd_mem = EMAC_RX_BD_MEM(priv);
+ __memzero((void __force *)rxch->bd_mem, rxch->alloc_size);
+ rxch->pkt_queue.buf_list = &rxch->buf_queue;
+
+ /* allocate RX buffer and initialize the BD linked list */
+ mem = (void __force __iomem *)
+ (((u32 __force) rxch->bd_mem + 0xF) & ~0xF);
+ rxch->active_queue_head = NULL;
+ rxch->active_queue_tail = mem;
+ for (cnt = 0; cnt < rxch->num_bd; cnt++) {
+ curr_bd = mem + (cnt * bd_size);
+ /* for future use the last parameter contains the BD ptr */
+ curr_bd->data_ptr = emac_net_alloc_rx_buf(priv,
+ rxch->buf_size,
+ (void __force **)&curr_bd->buf_token,
+ EMAC_DEF_RX_CH);
+ if (curr_bd->data_ptr == NULL) {
+ dev_err(emac_dev, "DaVinci EMAC: RX buf mem alloc " \
+ "failed for ch %d\n", ch);
+ kfree(rxch);
+ return -ENOMEM;
+ }
+
+ /* populate the hardware descriptor */
+ curr_bd->h_next = emac_virt_to_phys(rxch->active_queue_head);
+ /* FIXME buff_ptr = dma_map_single(... data_ptr ...) */
+ curr_bd->buff_ptr = virt_to_phys(curr_bd->data_ptr);
+ curr_bd->off_b_len = rxch->buf_size;
+ curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT;
+
+ /* write back to hardware memory */
+ BD_CACHE_WRITEBACK_INVALIDATE((u32) curr_bd,
+ EMAC_BD_LENGTH_FOR_CACHE);
+ curr_bd->next = rxch->active_queue_head;
+ rxch->active_queue_head = curr_bd;
+ }
+
+ /* At this point rxCppi->activeQueueHead points to the first
+ RX BD ready to be given to RX HDP and rxch->active_queue_tail
+ points to the last RX BD
+ */
+ return 0;
+}
+
+/**
+ * emac_rxch_teardown: RX channel teardown
+ * @priv: The DaVinci EMAC private adapter structure
+ * @ch: RX channel number
+ *
+ * Called during device stop to teardown RX channel
+ *
+ */
+static void emac_rxch_teardown(struct emac_priv *priv, u32 ch)
+{
+ struct device *emac_dev = &priv->ndev->dev;
+ u32 teardown_cnt = 0xFFFFFFF0; /* Some high value */
+
+ while ((emac_read(EMAC_RXCP(ch)) & EMAC_TEARDOWN_VALUE) !=
+ EMAC_TEARDOWN_VALUE) {
+ /* wait till tx teardown complete */
+ cpu_relax(); /* TODO: check if this helps ... */
+ --teardown_cnt;
+ if (0 == teardown_cnt) {
+ dev_err(emac_dev, "EMAC: RX teardown aborted\n");
+ break;
+ }
+ }
+ emac_write(EMAC_RXCP(ch), EMAC_TEARDOWN_VALUE);
+}
+
+/**
+ * emac_stop_rxch: Stop RX channel operation
+ * @priv: The DaVinci EMAC private adapter structure
+ * @ch: RX channel number
+ *
+ * Called during device stop to stop RX channel operation
+ *
+ */
+static void emac_stop_rxch(struct emac_priv *priv, u32 ch)
+{
+ struct emac_rxch *rxch = priv->rxch[ch];
+
+ if (rxch) {
+ rxch->teardown_pending = 1;
+ emac_write(EMAC_RXTEARDOWN, ch);
+ /* wait for teardown complete */
+ emac_rxch_teardown(priv, ch);
+ rxch->teardown_pending = 0;
+ emac_write(EMAC_RXINTMASKCLEAR, BIT(ch));
+ }
+}
+
+/**
+ * emac_cleanup_rxch: Book-keep function to clean RX channel resources
+ * @priv: The DaVinci EMAC private adapter structure
+ * @ch: RX channel number
+ *
+ * Called during device stop to clean up RX channel resources
+ *
+ */
+static void emac_cleanup_rxch(struct emac_priv *priv, u32 ch)
+{
+ struct emac_rxch *rxch = priv->rxch[ch];
+ struct emac_rx_bd __iomem *curr_bd;
+
+ if (rxch) {
+ /* free the receive buffers previously allocated */
+ curr_bd = rxch->active_queue_head;
+ while (curr_bd) {
+ if (curr_bd->buf_token) {
+ dev_kfree_skb_any((struct sk_buff *)\
+ curr_bd->buf_token);
+ }
+ curr_bd = curr_bd->next;
+ }
+ if (rxch->bd_mem)
+ rxch->bd_mem = NULL;
+ kfree(rxch);
+ priv->rxch[ch] = NULL;
+ }
+}
+
+/**
+ * emac_set_type0addr: Set EMAC Type0 mac address
+ * @priv: The DaVinci EMAC private adapter structure
+ * @ch: RX channel number
+ * @mac_addr: MAC address to set in device
+ *
+ * Called internally to set Type0 mac address of the adapter (Device)
+ *
+ * Returns success (0) or appropriate error code (none as of now)
+ */
+static void emac_set_type0addr(struct emac_priv *priv, u32 ch, char *mac_addr)
+{
+ u32 val;
+ val = ((mac_addr[5] << 8) | (mac_addr[4]));
+ emac_write(EMAC_MACSRCADDRLO, val);
+
+ val = ((mac_addr[3] << 24) | (mac_addr[2] << 16) | \
+ (mac_addr[1] << 8) | (mac_addr[0]));
+ emac_write(EMAC_MACSRCADDRHI, val);
+ val = emac_read(EMAC_RXUNICASTSET);
+ val |= BIT(ch);
+ emac_write(EMAC_RXUNICASTSET, val);
+ val = emac_read(EMAC_RXUNICASTCLEAR);
+ val &= ~BIT(ch);
+ emac_write(EMAC_RXUNICASTCLEAR, val);
+}
+
+/**
+ * emac_set_type1addr: Set EMAC Type1 mac address
+ * @priv: The DaVinci EMAC private adapter structure
+ * @ch: RX channel number
+ * @mac_addr: MAC address to set in device
+ *
+ * Called internally to set Type1 mac address of the adapter (Device)
+ *
+ * Returns success (0) or appropriate error code (none as of now)
+ */
+static void emac_set_type1addr(struct emac_priv *priv, u32 ch, char *mac_addr)
+{
+ u32 val;
+ emac_write(EMAC_MACINDEX, ch);
+ val = ((mac_addr[5] << 8) | mac_addr[4]);
+ emac_write(EMAC_MACADDRLO, val);
+ val = ((mac_addr[3] << 24) | (mac_addr[2] << 16) | \
+ (mac_addr[1] << 8) | (mac_addr[0]));
+ emac_write(EMAC_MACADDRHI, val);
+ emac_set_type0addr(priv, ch, mac_addr);
+}
+
+/**
+ * emac_set_type2addr: Set EMAC Type2 mac address
+ * @priv: The DaVinci EMAC private adapter structure
+ * @ch: RX channel number
+ * @mac_addr: MAC address to set in device
+ * @index: index into RX address entries
+ * @match: match parameter for RX address matching logic
+ *
+ * Called internally to set Type2 mac address of the adapter (Device)
+ *
+ * Returns success (0) or appropriate error code (none as of now)
+ */
+static void emac_set_type2addr(struct emac_priv *priv, u32 ch,
+ char *mac_addr, int index, int match)
+{
+ u32 val;
+ emac_write(EMAC_MACINDEX, index);
+ val = ((mac_addr[3] << 24) | (mac_addr[2] << 16) | \
+ (mac_addr[1] << 8) | (mac_addr[0]));
+ emac_write(EMAC_MACADDRHI, val);
+ val = ((mac_addr[5] << 8) | mac_addr[4] | ((ch & 0x7) << 16) | \
+ (match << 19) | BIT(20));
+ emac_write(EMAC_MACADDRLO, val);
+ emac_set_type0addr(priv, ch, mac_addr);
+}
+
+/**
+ * emac_setmac: Set mac address in the adapter (internal function)
+ * @priv: The DaVinci EMAC private adapter structure
+ * @ch: RX channel number
+ * @mac_addr: MAC address to set in device
+ *
+ * Called internally to set the mac address of the adapter (Device)
+ *
+ * Returns success (0) or appropriate error code (none as of now)
+ */
+static void emac_setmac(struct emac_priv *priv, u32 ch, char *mac_addr)
+{
+ struct device *emac_dev = &priv->ndev->dev;
+
+ if (priv->rx_addr_type == 0) {
+ emac_set_type0addr(priv, ch, mac_addr);
+ } else if (priv->rx_addr_type == 1) {
+ u32 cnt;
+ for (cnt = 0; cnt < EMAC_MAX_TXRX_CHANNELS; cnt++)
+ emac_set_type1addr(priv, ch, mac_addr);
+ } else if (priv->rx_addr_type == 2) {
+ emac_set_type2addr(priv, ch, mac_addr, ch, 1);
+ emac_set_type0addr(priv, ch, mac_addr);
+ } else {
+ if (netif_msg_drv(priv))
+ dev_err(emac_dev, "DaVinci EMAC: Wrong addressing\n");
+ }
+}
+
+/**
+ * emac_dev_setmac_addr: Set mac address in the adapter
+ * @ndev: The DaVinci EMAC network adapter
+ * @addr: MAC address to set in device
+ *
+ * Called by the system to set the mac address of the adapter (Device)
+ *
+ * Returns success (0) or appropriate error code (none as of now)
+ */
+static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct emac_rxch *rxch = priv->rxch[EMAC_DEF_RX_CH];
+ struct device *emac_dev = &priv->ndev->dev;
+ struct sockaddr *sa = addr;
+
+ /* Store mac addr in priv and rx channel and set it in EMAC hw */
+ memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
+ memcpy(rxch->mac_addr, sa->sa_data, ndev->addr_len);
+ memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len);
+ emac_setmac(priv, EMAC_DEF_RX_CH, rxch->mac_addr);
+
+ if (netif_msg_drv(priv))
+ dev_notice(emac_dev, "DaVinci EMAC: emac_dev_setmac_addr %pM\n",
+ priv->mac_addr);
+
+ return 0;
+}
+
+/**
+ * emac_addbd_to_rx_queue: Recycle RX buffer descriptor
+ * @priv: The DaVinci EMAC private adapter structure
+ * @ch: RX channel number to process buffer descriptors for
+ * @curr_bd: current buffer descriptor
+ * @buffer: buffer pointer for descriptor
+ * @buf_token: buffer token (stores skb information)
+ *
+ * Prepares the recycled buffer descriptor and addes it to hardware
+ * receive queue - if queue empty this descriptor becomes the head
+ * else addes the descriptor to end of queue
+ *
+ */
+static void emac_addbd_to_rx_queue(struct emac_priv *priv, u32 ch,
+ struct emac_rx_bd __iomem *curr_bd,
+ char *buffer, void *buf_token)
+{
+ struct emac_rxch *rxch = priv->rxch[ch];
+
+ /* populate the hardware descriptor */
+ curr_bd->h_next = 0;
+ /* FIXME buff_ptr = dma_map_single(... buffer ...) */
+ curr_bd->buff_ptr = virt_to_phys(buffer);
+ curr_bd->off_b_len = rxch->buf_size;
+ curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT;
+ curr_bd->next = NULL;
+ curr_bd->data_ptr = buffer;
+ curr_bd->buf_token = buf_token;
+
+ /* write back */
+ BD_CACHE_WRITEBACK_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
+ if (rxch->active_queue_head == NULL) {
+ rxch->active_queue_head = curr_bd;
+ rxch->active_queue_tail = curr_bd;
+ if (0 != rxch->queue_active) {
+ emac_write(EMAC_RXHDP(ch),
+ emac_virt_to_phys(rxch->active_queue_head));
+ rxch->queue_active = 1;
+ }
+ } else {
+ struct emac_rx_bd __iomem *tail_bd;
+ u32 frame_status;
+
+ tail_bd = rxch->active_queue_tail;
+ rxch->active_queue_tail = curr_bd;
+ tail_bd->next = curr_bd;
+ tail_bd = EMAC_VIRT_NOCACHE(tail_bd);
+ tail_bd->h_next = emac_virt_to_phys(curr_bd);
+ frame_status = tail_bd->mode;
+ if (frame_status & EMAC_CPPI_EOQ_BIT) {
+ emac_write(EMAC_RXHDP(ch),
+ emac_virt_to_phys(curr_bd));
+ frame_status &= ~(EMAC_CPPI_EOQ_BIT);
+ tail_bd->mode = frame_status;
+ ++rxch->end_of_queue_add;
+ }
+ }
+ ++rxch->recycled_bd;
+}
+
+/**
+ * emac_net_rx_cb: Prepares packet and sends to upper layer
+ * @priv: The DaVinci EMAC private adapter structure
+ * @net_pkt_list: Network packet list (received packets)
+ *
+ * Invalidates packet buffer memory and sends the received packet to upper
+ * layer
+ *
+ * Returns success or appropriate error code (none as of now)
+ */
+static int emac_net_rx_cb(struct emac_priv *priv,
+ struct emac_netpktobj *net_pkt_list)
+{
+ struct sk_buff *p_skb;
+ p_skb = (struct sk_buff *)net_pkt_list->pkt_token;
+ /* set length of packet */
+ skb_put(p_skb, net_pkt_list->pkt_length);
+ EMAC_CACHE_INVALIDATE((unsigned long)p_skb->data, p_skb->len);
+ p_skb->protocol = eth_type_trans(p_skb, priv->ndev);
+ p_skb->dev->last_rx = jiffies;
+ netif_receive_skb(p_skb);
+ priv->net_dev_stats.rx_bytes += net_pkt_list->pkt_length;
+ priv->net_dev_stats.rx_packets++;
+ return 0;
+}
+
+/**
+ * emac_rx_bdproc: RX buffer descriptor (packet) processing
+ * @priv: The DaVinci EMAC private adapter structure
+ * @ch: RX channel number to process buffer descriptors for
+ * @budget: number of packets allowed to process
+ * @pending: indication to caller that packets are pending to process
+ *
+ * Processes RX buffer descriptors - checks ownership bit on the RX buffer
+ * descriptor, sends the receive packet to upper layer, allocates a new SKB
+ * and recycles the buffer descriptor (requeues it in hardware RX queue).
+ * Only "budget" number of packets are processed and indication of pending
+ * packets provided to the caller.
+ *
+ * Returns number of packets processed (and indication of pending packets)
+ */
+static int emac_rx_bdproc(struct emac_priv *priv, u32 ch, u32 budget)
+{
+ unsigned long flags;
+ u32 frame_status;
+ u32 pkts_processed = 0;
+ char *new_buffer;
+ struct emac_rx_bd __iomem *curr_bd;
+ struct emac_rx_bd __iomem *last_bd;
+ struct emac_netpktobj *curr_pkt, pkt_obj;
+ struct emac_netbufobj buf_obj;
+ struct emac_netbufobj *rx_buf_obj;
+ void *new_buf_token;
+ struct emac_rxch *rxch = priv->rxch[ch];
+
+ if (unlikely(1 == rxch->teardown_pending))
+ return 0;
+ ++rxch->proc_count;
+ spin_lock_irqsave(&priv->rx_lock, flags);
+ pkt_obj.buf_list = &buf_obj;
+ curr_pkt = &pkt_obj;
+ curr_bd = rxch->active_queue_head;
+ BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
+ frame_status = curr_bd->mode;
+
+ while ((curr_bd) &&
+ ((frame_status & EMAC_CPPI_OWNERSHIP_BIT) == 0) &&
+ (pkts_processed < budget)) {
+
+ new_buffer = emac_net_alloc_rx_buf(priv, rxch->buf_size,
+ &new_buf_token, EMAC_DEF_RX_CH);
+ if (unlikely(NULL == new_buffer)) {
+ ++rxch->out_of_rx_buffers;
+ goto end_emac_rx_bdproc;
+ }
+
+ /* populate received packet data structure */
+ rx_buf_obj = &curr_pkt->buf_list[0];
+ rx_buf_obj->data_ptr = (char *)curr_bd->data_ptr;
+ rx_buf_obj->length = curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE;
+ rx_buf_obj->buf_token = curr_bd->buf_token;
+ curr_pkt->pkt_token = curr_pkt->buf_list->buf_token;
+ curr_pkt->num_bufs = 1;
+ curr_pkt->pkt_length =
+ (frame_status & EMAC_RX_BD_PKT_LENGTH_MASK);
+ emac_write(EMAC_RXCP(ch), emac_virt_to_phys(curr_bd));
+ ++rxch->processed_bd;
+ last_bd = curr_bd;
+ curr_bd = last_bd->next;
+ rxch->active_queue_head = curr_bd;
+
+ /* check if end of RX queue ? */
+ if (frame_status & EMAC_CPPI_EOQ_BIT) {
+ if (curr_bd) {
+ ++rxch->mis_queued_packets;
+ emac_write(EMAC_RXHDP(ch),
+ emac_virt_to_phys(curr_bd));
+ } else {
+ ++rxch->end_of_queue;
+ rxch->queue_active = 0;
+ }
+ }
+
+ /* recycle BD */
+ emac_addbd_to_rx_queue(priv, ch, last_bd, new_buffer,
+ new_buf_token);
+
+ /* return the packet to the user - BD ptr passed in
+ * last parameter for potential *future* use */
+ spin_unlock_irqrestore(&priv->rx_lock, flags);
+ emac_net_rx_cb(priv, curr_pkt);
+ spin_lock_irqsave(&priv->rx_lock, flags);
+ curr_bd = rxch->active_queue_head;
+ if (curr_bd) {
+ BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
+ frame_status = curr_bd->mode;
+ }
+ ++pkts_processed;
+ }
+
+end_emac_rx_bdproc:
+ spin_unlock_irqrestore(&priv->rx_lock, flags);
+ return pkts_processed;
+}
+
+/**
+ * emac_hw_enable: Enable EMAC hardware for packet transmission/reception
+ * @priv: The DaVinci EMAC private adapter structure
+ *
+ * Enables EMAC hardware for packet processing - enables PHY, enables RX
+ * for packet reception and enables device interrupts and then NAPI
+ *
+ * Returns success (0) or appropriate error code (none right now)
+ */
+static int emac_hw_enable(struct emac_priv *priv)
+{
+ u32 ch, val, mbp_enable, mac_control;
+
+ /* Soft reset */
+ emac_write(EMAC_SOFTRESET, 1);
+ while (emac_read(EMAC_SOFTRESET))
+ cpu_relax();
+
+ /* Disable interrupt & Set pacing for more interrupts initially */
+ emac_int_disable(priv);
+
+ /* Full duplex enable bit set when auto negotiation happens */
+ mac_control =
+ (((EMAC_DEF_TXPRIO_FIXED) ? (EMAC_MACCONTROL_TXPTYPE) : 0x0) |
+ ((priv->speed == 1000) ? EMAC_MACCONTROL_GIGABITEN : 0x0) |
+ ((EMAC_DEF_TXPACING_EN) ? (EMAC_MACCONTROL_TXPACEEN) : 0x0) |
+ ((priv->duplex == DUPLEX_FULL) ? 0x1 : 0));
+ emac_write(EMAC_MACCONTROL, mac_control);
+
+ mbp_enable =
+ (((EMAC_DEF_PASS_CRC) ? (EMAC_RXMBP_PASSCRC_MASK) : 0x0) |
+ ((EMAC_DEF_QOS_EN) ? (EMAC_RXMBP_QOSEN_MASK) : 0x0) |
+ ((EMAC_DEF_NO_BUFF_CHAIN) ? (EMAC_RXMBP_NOCHAIN_MASK) : 0x0) |
+ ((EMAC_DEF_MACCTRL_FRAME_EN) ? (EMAC_RXMBP_CMFEN_MASK) : 0x0) |
+ ((EMAC_DEF_SHORT_FRAME_EN) ? (EMAC_RXMBP_CSFEN_MASK) : 0x0) |
+ ((EMAC_DEF_ERROR_FRAME_EN) ? (EMAC_RXMBP_CEFEN_MASK) : 0x0) |
+ ((EMAC_DEF_PROM_EN) ? (EMAC_RXMBP_CAFEN_MASK) : 0x0) |
+ ((EMAC_DEF_PROM_CH & EMAC_RXMBP_CHMASK) << \
+ EMAC_RXMBP_PROMCH_SHIFT) |
+ ((EMAC_DEF_BCAST_EN) ? (EMAC_RXMBP_BROADEN_MASK) : 0x0) |
+ ((EMAC_DEF_BCAST_CH & EMAC_RXMBP_CHMASK) << \
+ EMAC_RXMBP_BROADCH_SHIFT) |
+ ((EMAC_DEF_MCAST_EN) ? (EMAC_RXMBP_MULTIEN_MASK) : 0x0) |
+ ((EMAC_DEF_MCAST_CH & EMAC_RXMBP_CHMASK) << \
+ EMAC_RXMBP_MULTICH_SHIFT));
+ emac_write(EMAC_RXMBPENABLE, mbp_enable);
+ emac_write(EMAC_RXMAXLEN, (EMAC_DEF_MAX_FRAME_SIZE &
+ EMAC_RX_MAX_LEN_MASK));
+ emac_write(EMAC_RXBUFFEROFFSET, (EMAC_DEF_BUFFER_OFFSET &
+ EMAC_RX_BUFFER_OFFSET_MASK));
+ emac_write(EMAC_RXFILTERLOWTHRESH, 0);
+ emac_write(EMAC_RXUNICASTCLEAR, EMAC_RX_UNICAST_CLEAR_ALL);
+ priv->rx_addr_type = (emac_read(EMAC_MACCONFIG) >> 8) & 0xFF;
+
+ val = emac_read(EMAC_TXCONTROL);
+ val |= EMAC_TX_CONTROL_TX_ENABLE_VAL;
+ emac_write(EMAC_TXCONTROL, val);
+ val = emac_read(EMAC_RXCONTROL);
+ val |= EMAC_RX_CONTROL_RX_ENABLE_VAL;
+ emac_write(EMAC_RXCONTROL, val);
+ emac_write(EMAC_MACINTMASKSET, EMAC_MAC_HOST_ERR_INTMASK_VAL);
+
+ for (ch = 0; ch < EMAC_DEF_MAX_TX_CH; ch++) {
+ emac_write(EMAC_TXHDP(ch), 0);
+ emac_write(EMAC_TXINTMASKSET, BIT(ch));
+ }
+ for (ch = 0; ch < EMAC_DEF_MAX_RX_CH; ch++) {
+ struct emac_rxch *rxch = priv->rxch[ch];
+ emac_setmac(priv, ch, rxch->mac_addr);
+ emac_write(EMAC_RXINTMASKSET, BIT(ch));
+ rxch->queue_active = 1;
+ emac_write(EMAC_RXHDP(ch),
+ emac_virt_to_phys(rxch->active_queue_head));
+ }
+
+ /* Enable MII */
+ val = emac_read(EMAC_MACCONTROL);
+ val |= (EMAC_MACCONTROL_MIIEN);
+ emac_write(EMAC_MACCONTROL, val);
+
+ /* Enable NAPI and interrupts */
+ napi_enable(&priv->napi);
+ emac_int_enable(priv);
+ return 0;
+
+}
+
+/**
+ * emac_poll: EMAC NAPI Poll function
+ * @ndev: The DaVinci EMAC network adapter
+ * @budget: Number of receive packets to process (as told by NAPI layer)
+ *
+ * NAPI Poll function implemented to process packets as per budget. We check
+ * the type of interrupt on the device and accordingly call the TX or RX
+ * packet processing functions. We follow the budget for RX processing and
+ * also put a cap on number of TX pkts processed through config param. The
+ * NAPI schedule function is called if more packets pending.
+ *
+ * Returns number of packets received (in most cases; else TX pkts - rarely)
+ */
+static int emac_poll(struct napi_struct *napi, int budget)
+{
+ unsigned int mask;
+ struct emac_priv *priv = container_of(napi, struct emac_priv, napi);
+ struct net_device *ndev = priv->ndev;
+ struct device *emac_dev = &ndev->dev;
+ u32 status = 0;
+ u32 num_pkts = 0;
+
+ if (!netif_running(ndev))
+ return 0;
+
+ /* Check interrupt vectors and call packet processing */
+ status = emac_read(EMAC_MACINVECTOR);
+
+ mask = EMAC_DM644X_MAC_IN_VECTOR_TX_INT_VEC;
+
+ if (priv->version == EMAC_VERSION_2)
+ mask = EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC;
+
+ if (status & mask) {
+ num_pkts = emac_tx_bdproc(priv, EMAC_DEF_TX_CH,
+ EMAC_DEF_TX_MAX_SERVICE);
+ } /* TX processing */
+
+ if (num_pkts)
+ return budget;
+
+ mask = EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC;
+
+ if (priv->version == EMAC_VERSION_2)
+ mask = EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC;
+
+ if (status & mask) {
+ num_pkts = emac_rx_bdproc(priv, EMAC_DEF_RX_CH, budget);
+ } /* RX processing */
+
+ if (num_pkts < budget) {
+ napi_complete(napi);
+ emac_int_enable(priv);
+ }
+
+ if (unlikely(status & EMAC_DM644X_MAC_IN_VECTOR_HOST_INT)) {
+ u32 ch, cause;
+ dev_err(emac_dev, "DaVinci EMAC: Fatal Hardware Error\n");
+ netif_stop_queue(ndev);
+ napi_disable(&priv->napi);
+
+ status = emac_read(EMAC_MACSTATUS);
+ cause = ((status & EMAC_MACSTATUS_TXERRCODE_MASK) >>
+ EMAC_MACSTATUS_TXERRCODE_SHIFT);
+ if (cause) {
+ ch = ((status & EMAC_MACSTATUS_TXERRCH_MASK) >>
+ EMAC_MACSTATUS_TXERRCH_SHIFT);
+ if (net_ratelimit()) {
+ dev_err(emac_dev, "TX Host error %s on ch=%d\n",
+ &emac_txhost_errcodes[cause][0], ch);
+ }
+ }
+ cause = ((status & EMAC_MACSTATUS_RXERRCODE_MASK) >>
+ EMAC_MACSTATUS_RXERRCODE_SHIFT);
+ if (cause) {
+ ch = ((status & EMAC_MACSTATUS_RXERRCH_MASK) >>
+ EMAC_MACSTATUS_RXERRCH_SHIFT);
+ if (netif_msg_hw(priv) && net_ratelimit())
+ dev_err(emac_dev, "RX Host error %s on ch=%d\n",
+ &emac_rxhost_errcodes[cause][0], ch);
+ }
+ } /* Host error processing */
+
+ return num_pkts;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * emac_poll_controller: EMAC Poll controller function
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Polled functionality used by netconsole and others in non interrupt mode
+ *
+ */
+void emac_poll_controller(struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ emac_int_disable(priv);
+ emac_irq(ndev->irq, priv);
+ emac_int_enable(priv);
+}
+#endif
+
+/* PHY/MII bus related */
+
+/* Wait until mdio is ready for next command */
+#define MDIO_WAIT_FOR_USER_ACCESS\
+ while ((emac_mdio_read((MDIO_USERACCESS(0))) &\
+ MDIO_USERACCESS_GO) != 0)
+
+static int emac_mii_read(struct mii_bus *bus, int phy_id, int phy_reg)
+{
+ unsigned int phy_data = 0;
+ unsigned int phy_control;
+
+ /* Wait until mdio is ready for next command */
+ MDIO_WAIT_FOR_USER_ACCESS;
+
+ phy_control = (MDIO_USERACCESS_GO |
+ MDIO_USERACCESS_READ |
+ ((phy_reg << 21) & MDIO_USERACCESS_REGADR) |
+ ((phy_id << 16) & MDIO_USERACCESS_PHYADR) |
+ (phy_data & MDIO_USERACCESS_DATA));
+ emac_mdio_write(MDIO_USERACCESS(0), phy_control);
+
+ /* Wait until mdio is ready for next command */
+ MDIO_WAIT_FOR_USER_ACCESS;
+
+ return emac_mdio_read(MDIO_USERACCESS(0)) & MDIO_USERACCESS_DATA;
+
+}
+
+static int emac_mii_write(struct mii_bus *bus, int phy_id,
+ int phy_reg, u16 phy_data)
+{
+
+ unsigned int control;
+
+ /* until mdio is ready for next command */
+ MDIO_WAIT_FOR_USER_ACCESS;
+
+ control = (MDIO_USERACCESS_GO |
+ MDIO_USERACCESS_WRITE |
+ ((phy_reg << 21) & MDIO_USERACCESS_REGADR) |
+ ((phy_id << 16) & MDIO_USERACCESS_PHYADR) |
+ (phy_data & MDIO_USERACCESS_DATA));
+ emac_mdio_write(MDIO_USERACCESS(0), control);
+
+ return 0;
+}
+
+static int emac_mii_reset(struct mii_bus *bus)
+{
+ unsigned int clk_div;
+ int mdio_bus_freq = emac_bus_frequency;
+
+ if (mdio_max_freq & mdio_bus_freq)
+ clk_div = ((mdio_bus_freq / mdio_max_freq) - 1);
+ else
+ clk_div = 0xFF;
+
+ clk_div &= MDIO_CONTROL_CLKDIV;
+
+ /* Set enable and clock divider in MDIOControl */
+ emac_mdio_write(MDIO_CONTROL, (clk_div | MDIO_CONTROL_ENABLE));
+
+ return 0;
+
+}
+
+static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, PHY_POLL };
+
+/* emac_driver: EMAC MII bus structure */
+
+static struct mii_bus *emac_mii;
+
+static void emac_adjust_link(struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct phy_device *phydev = priv->phydev;
+ unsigned long flags;
+ int new_state = 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (phydev->link) {
+ /* check the mode of operation - full/half duplex */
+ if (phydev->duplex != priv->duplex) {
+ new_state = 1;
+ priv->duplex = phydev->duplex;
+ }
+ if (phydev->speed != priv->speed) {
+ new_state = 1;
+ priv->speed = phydev->speed;
+ }
+ if (!priv->link) {
+ new_state = 1;
+ priv->link = 1;
+ }
+
+ } else if (priv->link) {
+ new_state = 1;
+ priv->link = 0;
+ priv->speed = 0;
+ priv->duplex = ~0;
+ }
+ if (new_state) {
+ emac_update_phystatus(priv);
+ phy_print_status(priv->phydev);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+/*************************************************************************
+ * Linux Driver Model
+ *************************************************************************/
+
+/**
+ * emac_devioctl: EMAC adapter ioctl
+ * @ndev: The DaVinci EMAC network adapter
+ * @ifrq: request parameter
+ * @cmd: command parameter
+ *
+ * EMAC driver ioctl function
+ *
+ * Returns success(0) or appropriate error code
+ */
+static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
+{
+ dev_warn(&ndev->dev, "DaVinci EMAC: ioctl not supported\n");
+
+ if (!(netif_running(ndev)))
+ return -EINVAL;
+
+ /* TODO: Add phy read and write and private statistics get feature */
+
+ return -EOPNOTSUPP;
+}
+
+/**
+ * emac_dev_open: EMAC device open
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Called when system wants to start the interface. We init TX/RX channels
+ * and enable the hardware for packet reception/transmission and start the
+ * network queue.
+ *
+ * Returns 0 for a successful open, or appropriate error code
+ */
+static int emac_dev_open(struct net_device *ndev)
+{
+ struct device *emac_dev = &ndev->dev;
+ u32 rc, cnt, ch;
+ int phy_addr;
+ struct resource *res;
+ int q, m;
+ int i = 0;
+ int k = 0;
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ netif_carrier_off(ndev);
+ for (cnt = 0; cnt <= ETH_ALEN; cnt++)
+ ndev->dev_addr[cnt] = priv->mac_addr[cnt];
+
+ /* Configuration items */
+ priv->rx_buf_size = EMAC_DEF_MAX_FRAME_SIZE + NET_IP_ALIGN;
+
+ /* Clear basic hardware */
+ for (ch = 0; ch < EMAC_MAX_TXRX_CHANNELS; ch++) {
+ emac_write(EMAC_TXHDP(ch), 0);
+ emac_write(EMAC_RXHDP(ch), 0);
+ emac_write(EMAC_RXHDP(ch), 0);
+ emac_write(EMAC_RXINTMASKCLEAR, EMAC_INT_MASK_CLEAR);
+ emac_write(EMAC_TXINTMASKCLEAR, EMAC_INT_MASK_CLEAR);
+ }
+ priv->mac_hash1 = 0;
+ priv->mac_hash2 = 0;
+ emac_write(EMAC_MACHASH1, 0);
+ emac_write(EMAC_MACHASH2, 0);
+
+ /* multi ch not supported - open 1 TX, 1RX ch by default */
+ rc = emac_init_txch(priv, EMAC_DEF_TX_CH);
+ if (0 != rc) {
+ dev_err(emac_dev, "DaVinci EMAC: emac_init_txch() failed");
+ return rc;
+ }
+ rc = emac_init_rxch(priv, EMAC_DEF_RX_CH, priv->mac_addr);
+ if (0 != rc) {
+ dev_err(emac_dev, "DaVinci EMAC: emac_init_rxch() failed");
+ return rc;
+ }
+
+ /* Request IRQ */
+
+ while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
+ for (i = res->start; i <= res->end; i++) {
+ if (request_irq(i, emac_irq, IRQF_DISABLED,
+ ndev->name, ndev))
+ goto rollback;
+ }
+ k++;
+ }
+
+ /* Start/Enable EMAC hardware */
+ emac_hw_enable(priv);
+
+ /* find the first phy */
+ priv->phydev = NULL;
+ if (priv->phy_mask) {
+ emac_mii_reset(priv->mii_bus);
+ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
+ if (priv->mii_bus->phy_map[phy_addr]) {
+ priv->phydev = priv->mii_bus->phy_map[phy_addr];
+ break;
+ }
+ }
+
+ if (!priv->phydev) {
+ printk(KERN_ERR "%s: no PHY found\n", ndev->name);
+ return -1;
+ }
+
+ priv->phydev = phy_connect(ndev, dev_name(&priv->phydev->dev),
+ &emac_adjust_link, 0, PHY_INTERFACE_MODE_MII);
+
+ if (IS_ERR(priv->phydev)) {
+ printk(KERN_ERR "%s: Could not attach to PHY\n",
+ ndev->name);
+ return PTR_ERR(priv->phydev);
+ }
+
+ priv->link = 0;
+ priv->speed = 0;
+ priv->duplex = ~0;
+
+ printk(KERN_INFO "%s: attached PHY driver [%s] "
+ "(mii_bus:phy_addr=%s, id=%x)\n", ndev->name,
+ priv->phydev->drv->name, dev_name(&priv->phydev->dev),
+ priv->phydev->phy_id);
+ } else{
+ /* No PHY , fix the link, speed and duplex settings */
+ priv->link = 1;
+ priv->speed = SPEED_100;
+ priv->duplex = DUPLEX_FULL;
+ emac_update_phystatus(priv);
+ }
+
+ if (!netif_running(ndev)) /* debug only - to avoid compiler warning */
+ emac_dump_regs(priv);
+
+ if (netif_msg_drv(priv))
+ dev_notice(emac_dev, "DaVinci EMAC: Opened %s\n", ndev->name);
+
+ if (priv->phy_mask)
+ phy_start(priv->phydev);
+
+ return 0;
+
+rollback:
+
+ dev_err(emac_dev, "DaVinci EMAC: request_irq() failed");
+
+ for (q = k; k >= 0; k--) {
+ for (m = i; m >= res->start; m--)
+ free_irq(m, ndev);
+ res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k-1);
+ m = res->end;
+ }
+ return -EBUSY;
+}
+
+/**
+ * emac_dev_stop: EMAC device stop
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Called when system wants to stop or down the interface. We stop the network
+ * queue, disable interrupts and cleanup TX/RX channels.
+ *
+ * We return the statistics in net_device_stats structure pulled from emac
+ */
+static int emac_dev_stop(struct net_device *ndev)
+{
+ struct resource *res;
+ int i = 0;
+ int irq_num;
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct device *emac_dev = &ndev->dev;
+
+ /* inform the upper layers. */
+ netif_stop_queue(ndev);
+ napi_disable(&priv->napi);
+
+ netif_carrier_off(ndev);
+ emac_int_disable(priv);
+ emac_stop_txch(priv, EMAC_DEF_TX_CH);
+ emac_stop_rxch(priv, EMAC_DEF_RX_CH);
+ emac_cleanup_txch(priv, EMAC_DEF_TX_CH);
+ emac_cleanup_rxch(priv, EMAC_DEF_RX_CH);
+ emac_write(EMAC_SOFTRESET, 1);
+
+ if (priv->phydev)
+ phy_disconnect(priv->phydev);
+
+ /* Free IRQ */
+ while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) {
+ for (irq_num = res->start; irq_num <= res->end; irq_num++)
+ free_irq(irq_num, priv->ndev);
+ i++;
+ }
+
+ if (netif_msg_drv(priv))
+ dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name);
+
+ return 0;
+}
+
+/**
+ * emac_dev_getnetstats: EMAC get statistics function
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Called when system wants to get statistics from the device.
+ *
+ * We return the statistics in net_device_stats structure pulled from emac
+ */
+static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ /* update emac hardware stats and reset the registers*/
+
+ priv->net_dev_stats.multicast += emac_read(EMAC_RXMCASTFRAMES);
+ emac_write(EMAC_RXMCASTFRAMES, EMAC_ALL_MULTI_REG_VALUE);
+
+ priv->net_dev_stats.collisions += (emac_read(EMAC_TXCOLLISION) +
+ emac_read(EMAC_TXSINGLECOLL) +
+ emac_read(EMAC_TXMULTICOLL));
+ emac_write(EMAC_TXCOLLISION, EMAC_ALL_MULTI_REG_VALUE);
+ emac_write(EMAC_TXSINGLECOLL, EMAC_ALL_MULTI_REG_VALUE);
+ emac_write(EMAC_TXMULTICOLL, EMAC_ALL_MULTI_REG_VALUE);
+
+ priv->net_dev_stats.rx_length_errors += (emac_read(EMAC_RXOVERSIZED) +
+ emac_read(EMAC_RXJABBER) +
+ emac_read(EMAC_RXUNDERSIZED));
+ emac_write(EMAC_RXOVERSIZED, EMAC_ALL_MULTI_REG_VALUE);
+ emac_write(EMAC_RXJABBER, EMAC_ALL_MULTI_REG_VALUE);
+ emac_write(EMAC_RXUNDERSIZED, EMAC_ALL_MULTI_REG_VALUE);
+
+ priv->net_dev_stats.rx_over_errors += (emac_read(EMAC_RXSOFOVERRUNS) +
+ emac_read(EMAC_RXMOFOVERRUNS));
+ emac_write(EMAC_RXSOFOVERRUNS, EMAC_ALL_MULTI_REG_VALUE);
+ emac_write(EMAC_RXMOFOVERRUNS, EMAC_ALL_MULTI_REG_VALUE);
+
+ priv->net_dev_stats.rx_fifo_errors += emac_read(EMAC_RXDMAOVERRUNS);
+ emac_write(EMAC_RXDMAOVERRUNS, EMAC_ALL_MULTI_REG_VALUE);
+
+ priv->net_dev_stats.tx_carrier_errors +=
+ emac_read(EMAC_TXCARRIERSENSE);
+ emac_write(EMAC_TXCARRIERSENSE, EMAC_ALL_MULTI_REG_VALUE);
+
+ priv->net_dev_stats.tx_fifo_errors = emac_read(EMAC_TXUNDERRUN);
+ emac_write(EMAC_TXUNDERRUN, EMAC_ALL_MULTI_REG_VALUE);
+
+ return &priv->net_dev_stats;
+}
+
+static const struct net_device_ops emac_netdev_ops = {
+ .ndo_open = emac_dev_open,
+ .ndo_stop = emac_dev_stop,
+ .ndo_start_xmit = emac_dev_xmit,
+ .ndo_set_multicast_list = emac_dev_mcast_set,
+ .ndo_set_mac_address = emac_dev_setmac_addr,
+ .ndo_do_ioctl = emac_devioctl,
+ .ndo_tx_timeout = emac_dev_tx_timeout,
+ .ndo_get_stats = emac_dev_getnetstats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = emac_poll_controller,
+#endif
+};
+
+/**
+ * davinci_emac_probe: EMAC device probe
+ * @pdev: The DaVinci EMAC device that we are removing
+ *
+ * Called when probing for emac devicesr. We get details of instances and
+ * resource information from platform init and register a network device
+ * and allocate resources necessary for driver to perform
+ */
+static int __devinit davinci_emac_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ struct resource *res;
+ struct net_device *ndev;
+ struct emac_priv *priv;
+ unsigned long size;
+ struct emac_platform_data *pdata;
+ struct device *emac_dev;
+
+ /* obtain emac clock from kernel */
+ emac_clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(emac_clk)) {
+ printk(KERN_ERR "DaVinci EMAC: Failed to get EMAC clock\n");
+ return -EBUSY;
+ }
+ emac_bus_frequency = clk_get_rate(emac_clk);
+ /* TODO: Probe PHY here if possible */
+
+ ndev = alloc_etherdev(sizeof(struct emac_priv));
+ if (!ndev) {
+ printk(KERN_ERR "DaVinci EMAC: Error allocating net_device\n");
+ clk_put(emac_clk);
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, ndev);
+ priv = netdev_priv(ndev);
+ priv->pdev = pdev;
+ priv->ndev = ndev;
+ priv->msg_enable = netif_msg_init(debug_level, DAVINCI_EMAC_DEBUG);
+
+ spin_lock_init(&priv->tx_lock);
+ spin_lock_init(&priv->rx_lock);
+ spin_lock_init(&priv->lock);
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ printk(KERN_ERR "DaVinci EMAC: No platfrom data\n");
+ return -ENODEV;
+ }
+
+ /* MAC addr and PHY mask , RMII enable info from platform_data */
+ memcpy(priv->mac_addr, pdata->mac_addr, 6);
+ priv->phy_mask = pdata->phy_mask;
+ priv->rmii_en = pdata->rmii_en;
+ priv->version = pdata->version;
+ emac_dev = &ndev->dev;
+ /* Get EMAC platform data */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(emac_dev, "DaVinci EMAC: Error getting res\n");
+ rc = -ENOENT;
+ goto probe_quit;
+ }
+
+ priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
+ size = res->end - res->start + 1;
+ if (!request_mem_region(res->start, size, ndev->name)) {
+ dev_err(emac_dev, "DaVinci EMAC: failed request_mem_region() \
+ for regs\n");
+ rc = -ENXIO;
+ goto probe_quit;
+ }
+
+ priv->remap_addr = ioremap(res->start, size);
+ if (!priv->remap_addr) {
+ dev_err(emac_dev, "Unable to map IO\n");
+ rc = -ENOMEM;
+ release_mem_region(res->start, size);
+ goto probe_quit;
+ }
+ priv->emac_base = priv->remap_addr + pdata->ctrl_reg_offset;
+ ndev->base_addr = (unsigned long)priv->remap_addr;
+
+ priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset;
+ priv->ctrl_ram_size = pdata->ctrl_ram_size;
+ priv->emac_ctrl_ram = priv->remap_addr + pdata->ctrl_ram_offset;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(emac_dev, "DaVinci EMAC: Error getting irq res\n");
+ rc = -ENOENT;
+ goto no_irq_res;
+ }
+ ndev->irq = res->start;
+
+ if (!is_valid_ether_addr(priv->mac_addr)) {
+ /* Use random MAC if none passed */
+ random_ether_addr(priv->mac_addr);
+ printk(KERN_WARNING "%s: using random MAC addr: %pM\n",
+ __func__, priv->mac_addr);
+ }
+
+ ndev->netdev_ops = &emac_netdev_ops;
+ SET_ETHTOOL_OPS(ndev, &ethtool_ops);
+ netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT);
+
+ /* register the network device */
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ rc = register_netdev(ndev);
+ if (rc) {
+ dev_err(emac_dev, "DaVinci EMAC: Error in register_netdev\n");
+ rc = -ENODEV;
+ goto netdev_reg_err;
+ }
+
+ clk_enable(emac_clk);
+
+ /* MII/Phy intialisation, mdio bus registration */
+ emac_mii = mdiobus_alloc();
+ if (emac_mii == NULL) {
+ dev_err(emac_dev, "DaVinci EMAC: Error allocating mii_bus\n");
+ rc = -ENOMEM;
+ goto mdio_alloc_err;
+ }
+
+ priv->mii_bus = emac_mii;
+ emac_mii->name = "emac-mii",
+ emac_mii->read = emac_mii_read,
+ emac_mii->write = emac_mii_write,
+ emac_mii->reset = emac_mii_reset,
+ emac_mii->irq = mii_irqs,
+ emac_mii->phy_mask = ~(priv->phy_mask);
+ emac_mii->parent = &pdev->dev;
+ emac_mii->priv = priv->remap_addr + pdata->mdio_reg_offset;
+ snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%x", priv->pdev->id);
+ mdio_max_freq = pdata->mdio_max_freq;
+ emac_mii->reset(emac_mii);
+
+ /* Register the MII bus */
+ rc = mdiobus_register(emac_mii);
+ if (rc)
+ goto mdiobus_quit;
+
+ if (netif_msg_probe(priv)) {
+ dev_notice(emac_dev, "DaVinci EMAC Probe found device "\
+ "(regs: %p, irq: %d)\n",
+ (void *)priv->emac_base_phys, ndev->irq);
+ }
+ return 0;
+
+mdiobus_quit:
+ mdiobus_free(emac_mii);
+
+netdev_reg_err:
+mdio_alloc_err:
+no_irq_res:
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, res->end - res->start + 1);
+ iounmap(priv->remap_addr);
+
+probe_quit:
+ clk_put(emac_clk);
+ free_netdev(ndev);
+ return rc;
+}
+
+/**
+ * davinci_emac_remove: EMAC device remove
+ * @pdev: The DaVinci EMAC device that we are removing
+ *
+ * Called when removing the device driver. We disable clock usage and release
+ * the resources taken up by the driver and unregister network device
+ */
+static int __devexit davinci_emac_remove(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ dev_notice(&ndev->dev, "DaVinci EMAC: davinci_emac_remove()\n");
+
+ clk_disable(emac_clk);
+ platform_set_drvdata(pdev, NULL);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mdiobus_unregister(priv->mii_bus);
+ mdiobus_free(priv->mii_bus);
+
+ release_mem_region(res->start, res->end - res->start + 1);
+
+ unregister_netdev(ndev);
+ free_netdev(ndev);
+ iounmap(priv->remap_addr);
+
+ clk_disable(emac_clk);
+ clk_put(emac_clk);
+
+ return 0;
+}
+
+/**
+ * davinci_emac_driver: EMAC platform driver structure
+ *
+ * We implement only probe and remove functions - suspend/resume and
+ * others not supported by this module
+ */
+static struct platform_driver davinci_emac_driver = {
+ .driver = {
+ .name = "davinci_emac",
+ .owner = THIS_MODULE,
+ },
+ .probe = davinci_emac_probe,
+ .remove = __devexit_p(davinci_emac_remove),
+};
+
+/**
+ * davinci_emac_init: EMAC driver module init
+ *
+ * Called when initializing the driver. We register the driver with
+ * the platform.
+ */
+static int __init davinci_emac_init(void)
+{
+ return platform_driver_register(&davinci_emac_driver);
+}
+module_init(davinci_emac_init);
+
+/**
+ * davinci_emac_exit: EMAC driver module exit
+ *
+ * Called when exiting the driver completely. We unregister the driver with
+ * the platform and exit
+ */
+static void __exit davinci_emac_exit(void)
+{
+ platform_driver_unregister(&davinci_emac_driver);
+}
+module_exit(davinci_emac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("DaVinci EMAC Maintainer: Anant Gole <anantgole@ti.com>");
+MODULE_AUTHOR("DaVinci EMAC Maintainer: Chaithrika U S <chaithrika@ti.com>");
+MODULE_DESCRIPTION("DaVinci EMAC Ethernet driver");
diff --git a/drivers/net/de600.c b/drivers/net/de600.c
index de63f1d41d3..e1af089064b 100644
--- a/drivers/net/de600.c
+++ b/drivers/net/de600.c
@@ -38,14 +38,6 @@ static const char version[] = "de600.c: $Revision: 1.41-2.5 $, Bjorn Ekwall (bj
/* Add more time here if your adapter won't work OK: */
#define DE600_SLOW_DOWN udelay(delay_time)
-/* use 0 for production, 1 for verification, >2 for debug */
-#ifdef DE600_DEBUG
-#define PRINTK(x) if (de600_debug >= 2) printk x
-#else
-#define DE600_DEBUG 0
-#define PRINTK(x) /**/
-#endif
-
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -67,10 +59,6 @@ static const char version[] = "de600.c: $Revision: 1.41-2.5 $, Bjorn Ekwall (bj
#include "de600.h"
-static unsigned int de600_debug = DE600_DEBUG;
-module_param(de600_debug, int, 0);
-MODULE_PARM_DESC(de600_debug, "DE-600 debug level (0-2)");
-
static unsigned int check_lost = 1;
module_param(check_lost, bool, 0);
MODULE_PARM_DESC(check_lost, "If set then check for unplugged de600");
@@ -180,20 +168,20 @@ static int de600_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (free_tx_pages <= 0) { /* Do timeouts, to avoid hangs. */
tickssofar = jiffies - dev->trans_start;
if (tickssofar < 5)
- return 1;
+ return NETDEV_TX_BUSY;
/* else */
printk(KERN_WARNING "%s: transmit timed out (%d), %s?\n", dev->name, tickssofar, "network cable problem");
/* Restart the adapter. */
spin_lock_irqsave(&de600_lock, flags);
if (adapter_init(dev)) {
spin_unlock_irqrestore(&de600_lock, flags);
- return 1;
+ return NETDEV_TX_BUSY;
}
spin_unlock_irqrestore(&de600_lock, flags);
}
/* Start real output */
- PRINTK(("de600_start_xmit:len=%d, page %d/%d\n", skb->len, tx_fifo_in, free_tx_pages));
+ pr_debug("de600_start_xmit:len=%d, page %d/%d\n", skb->len, tx_fifo_in, free_tx_pages);
if ((len = skb->len) < RUNT)
len = RUNT;
@@ -211,7 +199,7 @@ static int de600_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (was_down || (de600_read_byte(READ_DATA, dev) != 0xde)) {
if (adapter_init(dev)) {
spin_unlock_irqrestore(&de600_lock, flags);
- return 1;
+ return NETDEV_TX_BUSY;
}
}
}
@@ -259,7 +247,7 @@ static irqreturn_t de600_interrupt(int irq, void *dev_id)
irq_status = de600_read_status(dev);
do {
- PRINTK(("de600_interrupt (%02X)\n", irq_status));
+ pr_debug("de600_interrupt (%02X)\n", irq_status);
if (irq_status & RX_GOOD)
de600_rx_intr(dev);
@@ -407,8 +395,7 @@ static struct net_device * __init de600_probe(void)
printk(KERN_INFO "%s: D-Link DE-600 pocket adapter", dev->name);
/* Alpha testers must have the version number to report bugs. */
- if (de600_debug > 1)
- printk(version);
+ pr_debug("%s", version);
/* probe for adapter */
err = -ENODEV;
diff --git a/drivers/net/de620.c b/drivers/net/de620.c
index d52f34cc952..55d2bb67cff 100644
--- a/drivers/net/de620.c
+++ b/drivers/net/de620.c
@@ -48,7 +48,6 @@ static const char version[] =
* Compile-time options: (see below for descriptions)
* -DDE620_IO=0x378 (lpt1)
* -DDE620_IRQ=7 (lpt1)
- * -DDE602_DEBUG=...
* -DSHUTDOWN_WHEN_LOST
* -DCOUNT_LOOPS
* -DLOWSPEED
@@ -98,15 +97,6 @@ static const char version[] =
#define SHUTDOWN_WHEN_LOST
*/
-/*
- * Enable debugging by "-DDE620_DEBUG=3" when compiling,
- * OR by enabling the following #define
- *
- * use 0 for production, 1 for verification, >2 for debug
- *
-#define DE620_DEBUG 3
- */
-
#ifdef LOWSPEED
/*
* Enable this #define if you want to see debugging output that show how long
@@ -160,14 +150,6 @@ typedef unsigned char byte;
#define RUNT 60 /* Too small Ethernet packet */
#define GIANT 1514 /* largest legal size packet, no fcs */
-#ifdef DE620_DEBUG /* Compile-time configurable */
-#define PRINTK(x) if (de620_debug >= 2) printk x
-#else
-#define DE620_DEBUG 0
-#define PRINTK(x) /**/
-#endif
-
-
/*
* Force media with insmod:
* insmod de620.o bnc=1
@@ -186,8 +168,6 @@ static int io = DE620_IO;
static int irq = DE620_IRQ;
static int clone = DE620_CLONE;
-static unsigned int de620_debug = DE620_DEBUG;
-
static spinlock_t de620_lock;
module_param(bnc, int, 0);
@@ -195,13 +175,11 @@ module_param(utp, int, 0);
module_param(io, int, 0);
module_param(irq, int, 0);
module_param(clone, int, 0);
-module_param(de620_debug, int, 0);
MODULE_PARM_DESC(bnc, "DE-620 set BNC medium (0-1)");
MODULE_PARM_DESC(utp, "DE-620 set UTP medium (0-1)");
MODULE_PARM_DESC(io, "DE-620 I/O base address,required");
MODULE_PARM_DESC(irq, "DE-620 IRQ number,required");
MODULE_PARM_DESC(clone, "Check also for non-D-Link DE-620 clones (0-1)");
-MODULE_PARM_DESC(de620_debug, "DE-620 debug level (0-2)");
/***********************************************
* *
@@ -533,9 +511,9 @@ static int de620_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Start real output */
- spin_lock_irqsave(&de620_lock, flags)
- PRINTK(("de620_start_xmit: len=%d, bufs 0x%02x\n",
- (int)skb->len, using_txbuf));
+ spin_lock_irqsave(&de620_lock, flags);
+ pr_debug("de620_start_xmit: len=%d, bufs 0x%02x\n",
+ (int)skb->len, using_txbuf);
/* select a free tx buffer. if there is one... */
switch (using_txbuf) {
@@ -553,7 +531,7 @@ static int de620_start_xmit(struct sk_buff *skb, struct net_device *dev)
case (TXBF0 | TXBF1): /* NONE!!! */
printk(KERN_WARNING "%s: No tx-buffer available!\n", dev->name);
spin_unlock_irqrestore(&de620_lock, flags);
- return 1;
+ return NETDEV_TX_BUSY;
}
de620_write_block(dev, buffer, skb->len, len-skb->len);
@@ -585,12 +563,12 @@ de620_interrupt(int irq_in, void *dev_id)
/* Read the status register (_not_ the status port) */
irq_status = de620_get_register(dev, R_STS);
- PRINTK(("de620_interrupt (%2.2X)\n", irq_status));
+ pr_debug("de620_interrupt (%2.2X)\n", irq_status);
if (irq_status & RXGOOD) {
do {
again = de620_rx_intr(dev);
- PRINTK(("again=%d\n", again));
+ pr_debug("again=%d\n", again);
}
while (again && (++bogus_count < 100));
}
@@ -622,7 +600,7 @@ static int de620_rx_intr(struct net_device *dev)
byte pagelink;
byte curr_page;
- PRINTK(("de620_rx_intr: next_rx_page = %d\n", next_rx_page));
+ pr_debug("de620_rx_intr: next_rx_page = %d\n", next_rx_page);
/* Tell the adapter that we are going to read data, and from where */
de620_send_command(dev, W_CR | RRN);
@@ -631,8 +609,9 @@ static int de620_rx_intr(struct net_device *dev)
/* Deep breath, and away we goooooo */
de620_read_block(dev, (byte *)&header_buf, sizeof(struct header_buf));
- PRINTK(("page status=0x%02x, nextpage=%d, packetsize=%d\n",
- header_buf.status, header_buf.Rx_NextPage, header_buf.Rx_ByteCount));
+ pr_debug("page status=0x%02x, nextpage=%d, packetsize=%d\n",
+ header_buf.status, header_buf.Rx_NextPage,
+ header_buf.Rx_ByteCount);
/* Plausible page header? */
pagelink = header_buf.Rx_NextPage;
@@ -683,7 +662,7 @@ static int de620_rx_intr(struct net_device *dev)
buffer = skb_put(skb,size);
/* copy the packet into the buffer */
de620_read_block(dev, buffer, size);
- PRINTK(("Read %d bytes\n", size));
+ pr_debug("Read %d bytes\n", size);
skb->protocol=eth_type_trans(skb,dev);
netif_rx(skb); /* deliver it "upstairs" */
/* count all receives */
@@ -696,7 +675,7 @@ static int de620_rx_intr(struct net_device *dev)
/* NOTE! We're _not_ checking the 'EMPTY'-flag! This seems better... */
curr_page = de620_get_register(dev, R_CPR);
de620_set_register(dev, W_NPRF, next_rx_page);
- PRINTK(("next_rx_page=%d CPR=%d\n", next_rx_page, curr_page));
+ pr_debug("next_rx_page=%d CPR=%d\n", next_rx_page, curr_page);
return (next_rx_page != curr_page); /* That was slightly tricky... */
}
@@ -830,8 +809,7 @@ struct net_device * __init de620_probe(int unit)
netdev_boot_setup_check(dev);
}
- if (de620_debug)
- printk(version);
+ pr_debug("%s", version);
printk(KERN_INFO "D-Link DE-620 pocket adapter");
@@ -878,14 +856,13 @@ struct net_device * __init de620_probe(int unit)
/* base_addr and irq are already set, see above! */
/* dump eeprom */
- if (de620_debug) {
- printk("\nEEPROM contents:\n");
- printk("RAM_Size = 0x%02X\n", nic_data.RAM_Size);
- printk("NodeID = %pM\n", nic_data.NodeID);
- printk("Model = %d\n", nic_data.Model);
- printk("Media = %d\n", nic_data.Media);
- printk("SCR = 0x%02x\n", nic_data.SCR);
- }
+ pr_debug("\nEEPROM contents:\n"
+ "RAM_Size = 0x%02X\n"
+ "NodeID = %pM\n"
+ "Model = %d\n"
+ "Media = %d\n"
+ "SCR = 0x%02x\n", nic_data.RAM_Size, nic_data.NodeID,
+ nic_data.Model, nic_data.Media, nic_data.SCR);
err = register_netdev(dev);
if (err)
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index b62405a6918..2b22e580c4d 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -895,6 +895,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_regs *ll = lp->ll;
volatile u16 *ib = (volatile u16 *)dev->mem_start;
+ unsigned long flags;
int entry, len;
len = skb->len;
@@ -907,6 +908,8 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_bytes += len;
+ spin_lock_irqsave(&lp->lock, flags);
+
entry = lp->tx_new;
*lib_ptr(ib, btx_ring[entry].length, lp->type) = (-len);
*lib_ptr(ib, btx_ring[entry].misc, lp->type) = 0;
@@ -925,6 +928,8 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Kick the lance: transmit now */
writereg(&ll->rdp, LE_C0_INEA | LE_C0_TDMD);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
dev->trans_start = jiffies;
dev_kfree_skb(skb);
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c
index 4ec055dc717..102b8d43971 100644
--- a/drivers/net/defxx.c
+++ b/drivers/net/defxx.c
@@ -3318,7 +3318,7 @@ static int dfx_xmt_queue_pkt(
{
skb_pull(skb,3);
spin_unlock_irqrestore(&bp->lock, flags);
- return(1); /* requeue packet for later */
+ return NETDEV_TX_BUSY; /* requeue packet for later */
}
/*
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 357f565851e..97ea2d6d3fe 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -810,7 +810,7 @@ static int __init depca_hw_init (struct net_device *dev, struct device *device)
dev->mem_start = 0;
- device->driver_data = dev;
+ dev_set_drvdata(device, dev);
SET_NETDEV_DEV (dev, device);
status = register_netdev(dev);
@@ -957,7 +957,7 @@ static int depca_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (TX_BUFFS_AVAIL)
netif_start_queue(dev);
} else
- status = -1;
+ status = NETDEV_TX_LOCKED;
out:
return status;
@@ -1614,7 +1614,7 @@ static int __devexit depca_device_remove (struct device *device)
struct depca_private *lp;
int bus;
- dev = device->driver_data;
+ dev = dev_get_drvdata(device);
lp = netdev_priv(dev);
unregister_netdev (dev);
@@ -1839,7 +1839,7 @@ static int load_packet(struct net_device *dev, struct sk_buff *skb)
lp->tx_new = (++end) & lp->txRingMask; /* update current pointers */
} else {
- status = -1;
+ status = NETDEV_TX_LOCKED;
}
return status;
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 4a1b554654e..895d72143ee 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -539,7 +539,7 @@ rio_tx_timeout (struct net_device *dev)
dev->name, readl (ioaddr + TxStatus));
rio_free_tx(dev, 0);
dev->if_port = 0;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
}
/* allocate and initialize Tx and Rx descriptors */
@@ -610,7 +610,7 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
if (np->link_status == 0) { /* Link Down */
dev_kfree_skb(skb);
- return 0;
+ return NETDEV_TX_OK;
}
ioaddr = dev->base_addr;
entry = np->cur_tx % TX_RING_SIZE;
@@ -665,9 +665,7 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
writel (0, dev->base_addr + TFDListPtr1);
}
- /* NETDEV WATCHDOG timer */
- dev->trans_start = jiffies;
- return 0;
+ return NETDEV_TX_OK;
}
static irqreturn_t
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index d8350860c0f..dd771dea6ae 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -756,7 +756,7 @@ dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
dm9000_dbg(db, 3, "%s:\n", __func__);
if (db->tx_pkt_cnt > 1)
- return 1;
+ return NETDEV_TX_BUSY;
spin_lock_irqsave(&db->lock, flags);
@@ -1170,6 +1170,21 @@ dm9000_stop(struct net_device *ndev)
return 0;
}
+static const struct net_device_ops dm9000_netdev_ops = {
+ .ndo_open = dm9000_open,
+ .ndo_stop = dm9000_stop,
+ .ndo_start_xmit = dm9000_start_xmit,
+ .ndo_tx_timeout = dm9000_timeout,
+ .ndo_set_multicast_list = dm9000_hash_table,
+ .ndo_do_ioctl = dm9000_ioctl,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = dm9000_poll_controller,
+#endif
+};
+
#define res_size(_r) (((_r)->end - (_r)->start) + 1)
/*
@@ -1339,18 +1354,9 @@ dm9000_probe(struct platform_device *pdev)
/* driver system function */
ether_setup(ndev);
- ndev->open = &dm9000_open;
- ndev->hard_start_xmit = &dm9000_start_xmit;
- ndev->tx_timeout = &dm9000_timeout;
- ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
- ndev->stop = &dm9000_stop;
- ndev->set_multicast_list = &dm9000_hash_table;
- ndev->ethtool_ops = &dm9000_ethtool_ops;
- ndev->do_ioctl = &dm9000_ioctl;
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
- ndev->poll_controller = &dm9000_poll_controller;
-#endif
+ ndev->netdev_ops = &dm9000_netdev_ops;
+ ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
+ ndev->ethtool_ops = &dm9000_ethtool_ops;
db->msg_enable = NETIF_MSG_LINK;
db->mii.phy_id_mask = 0x1f;
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 0f9ee134855..f7929e89eb0 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -143,6 +143,8 @@
* FIXES:
* 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
* - Stratus87247: protect MDI control register manipulations
+ * 2009/06/01 - Andreas Mohr <andi at lisas dot de>
+ * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
*/
#include <linux/module.h>
@@ -372,6 +374,7 @@ enum eeprom_op {
enum eeprom_offsets {
eeprom_cnfg_mdix = 0x03,
+ eeprom_phy_iface = 0x06,
eeprom_id = 0x0A,
eeprom_config_asf = 0x0D,
eeprom_smbus_addr = 0x90,
@@ -381,6 +384,18 @@ enum eeprom_cnfg_mdix {
eeprom_mdix_enabled = 0x0080,
};
+enum eeprom_phy_iface {
+ NoSuchPhy = 0,
+ I82553AB,
+ I82553C,
+ I82503,
+ DP83840,
+ S80C240,
+ S80C24,
+ I82555,
+ DP83840A = 10,
+};
+
enum eeprom_id {
eeprom_id_wol = 0x0020,
};
@@ -545,6 +560,7 @@ struct nic {
u32 msg_enable ____cacheline_aligned;
struct net_device *netdev;
struct pci_dev *pdev;
+ u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
struct rx *rxs ____cacheline_aligned;
struct rx *rx_to_use;
@@ -899,7 +915,21 @@ err_unlock:
return err;
}
-static u16 mdio_ctrl(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
+static int mdio_read(struct net_device *netdev, int addr, int reg)
+{
+ struct nic *nic = netdev_priv(netdev);
+ return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0);
+}
+
+static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
+{
+ struct nic *nic = netdev_priv(netdev);
+
+ nic->mdio_ctrl(nic, addr, mdi_write, reg, data);
+}
+
+/* the standard mdio_ctrl() function for usual MII-compliant hardware */
+static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
{
u32 data_out = 0;
unsigned int i;
@@ -938,30 +968,83 @@ static u16 mdio_ctrl(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
return (u16)data_out;
}
-static int mdio_read(struct net_device *netdev, int addr, int reg)
-{
- return mdio_ctrl(netdev_priv(netdev), addr, mdi_read, reg, 0);
+/* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */
+static u16 mdio_ctrl_phy_82552_v(struct nic *nic,
+ u32 addr,
+ u32 dir,
+ u32 reg,
+ u16 data)
+{
+ if ((reg == MII_BMCR) && (dir == mdi_write)) {
+ if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) {
+ u16 advert = mdio_read(nic->netdev, nic->mii.phy_id,
+ MII_ADVERTISE);
+
+ /*
+ * Workaround Si issue where sometimes the part will not
+ * autoneg to 100Mbps even when advertised.
+ */
+ if (advert & ADVERTISE_100FULL)
+ data |= BMCR_SPEED100 | BMCR_FULLDPLX;
+ else if (advert & ADVERTISE_100HALF)
+ data |= BMCR_SPEED100;
+ }
+ }
+ return mdio_ctrl_hw(nic, addr, dir, reg, data);
}
-static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
-{
- struct nic *nic = netdev_priv(netdev);
-
- if ((nic->phy == phy_82552_v) && (reg == MII_BMCR) &&
- (data & (BMCR_ANRESTART | BMCR_ANENABLE))) {
- u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
-
- /*
- * Workaround Si issue where sometimes the part will not
- * autoneg to 100Mbps even when advertised.
- */
- if (advert & ADVERTISE_100FULL)
- data |= BMCR_SPEED100 | BMCR_FULLDPLX;
- else if (advert & ADVERTISE_100HALF)
- data |= BMCR_SPEED100;
+/* Fully software-emulated mdio_ctrl() function for cards without
+ * MII-compliant PHYs.
+ * For now, this is mainly geared towards 80c24 support; in case of further
+ * requirements for other types (i82503, ...?) either extend this mechanism
+ * or split it, whichever is cleaner.
+ */
+static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
+ u32 addr,
+ u32 dir,
+ u32 reg,
+ u16 data)
+{
+ /* might need to allocate a netdev_priv'ed register array eventually
+ * to be able to record state changes, but for now
+ * some fully hardcoded register handling ought to be ok I guess. */
+
+ if (dir == mdi_read) {
+ switch (reg) {
+ case MII_BMCR:
+ /* Auto-negotiation, right? */
+ return BMCR_ANENABLE |
+ BMCR_FULLDPLX;
+ case MII_BMSR:
+ return BMSR_LSTATUS /* for mii_link_ok() */ |
+ BMSR_ANEGCAPABLE |
+ BMSR_10FULL;
+ case MII_ADVERTISE:
+ /* 80c24 is a "combo card" PHY, right? */
+ return ADVERTISE_10HALF |
+ ADVERTISE_10FULL;
+ default:
+ DPRINTK(HW, DEBUG,
+ "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
+ dir == mdi_read ? "READ" : "WRITE", addr, reg, data);
+ return 0xFFFF;
+ }
+ } else {
+ switch (reg) {
+ default:
+ DPRINTK(HW, DEBUG,
+ "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
+ dir == mdi_read ? "READ" : "WRITE", addr, reg, data);
+ return 0xFFFF;
+ }
}
-
- mdio_ctrl(netdev_priv(netdev), addr, mdi_write, reg, data);
+}
+static inline int e100_phy_supports_mii(struct nic *nic)
+{
+ /* for now, just check it by comparing whether we
+ are using MII software emulation.
+ */
+ return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated);
}
static void e100_get_defaults(struct nic *nic)
@@ -1013,7 +1096,8 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */
config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */
config->tx_underrun_retry = 0x3; /* # of underrun retries */
- config->mii_mode = 0x1; /* 1=MII mode, 0=503 mode */
+ if (e100_phy_supports_mii(nic))
+ config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */
config->pad10 = 0x6;
config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */
config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */
@@ -1270,6 +1354,42 @@ static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
offsetof(struct mem, dump_buf));
}
+static int e100_phy_check_without_mii(struct nic *nic)
+{
+ u8 phy_type;
+ int without_mii;
+
+ phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
+
+ switch (phy_type) {
+ case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
+ case I82503: /* Non-MII PHY; UNTESTED! */
+ case S80C24: /* Non-MII PHY; tested and working */
+ /* paragraph from the FreeBSD driver, "FXP_PHY_80C24":
+ * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
+ * doesn't have a programming interface of any sort. The
+ * media is sensed automatically based on how the link partner
+ * is configured. This is, in essence, manual configuration.
+ */
+ DPRINTK(PROBE, INFO,
+ "found MII-less i82503 or 80c24 or other PHY\n");
+
+ nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
+ nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */
+
+ /* these might be needed for certain MII-less cards...
+ * nic->flags |= ich;
+ * nic->flags |= ich_10h_workaround; */
+
+ without_mii = 1;
+ break;
+ default:
+ without_mii = 0;
+ break;
+ }
+ return without_mii;
+}
+
#define NCONFIG_AUTO_SWITCH 0x0080
#define MII_NSC_CONG MII_RESV1
#define NSC_CONG_ENABLE 0x0100
@@ -1290,9 +1410,21 @@ static int e100_phy_init(struct nic *nic)
if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
break;
}
- DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
- if (addr == 32)
- return -EAGAIN;
+ if (addr == 32) {
+ /* uhoh, no PHY detected: check whether we seem to be some
+ * weird, rare variant which is *known* to not have any MII.
+ * But do this AFTER MII checking only, since this does
+ * lookup of EEPROM values which may easily be unreliable. */
+ if (e100_phy_check_without_mii(nic))
+ return 0; /* simply return and hope for the best */
+ else {
+ /* for unknown cases log a fatal error */
+ DPRINTK(HW, ERR,
+ "Failed to locate any known PHY, aborting.\n");
+ return -EAGAIN;
+ }
+ } else
+ DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
/* Isolate all the PHY ids */
for (addr = 0; addr < 32; addr++)
@@ -1320,6 +1452,9 @@ static int e100_phy_init(struct nic *nic)
if (nic->phy == phy_82552_v) {
u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
+ /* assign special tweaked mdio_ctrl() function */
+ nic->mdio_ctrl = mdio_ctrl_phy_82552_v;
+
/* Workaround Si not advertising flow-control during autoneg */
advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
@@ -1581,7 +1716,7 @@ static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/* This is a hard error - log it. */
DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n");
netif_stop_queue(netdev);
- return 1;
+ return NETDEV_TX_BUSY;
}
netdev->trans_start = jiffies;
@@ -2585,6 +2720,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
nic->netdev = netdev;
nic->pdev = pdev;
nic->msg_enable = (1 << debug) - 1;
+ nic->mdio_ctrl = mdio_ctrl_hw;
pci_set_drvdata(pdev, netdev);
if ((err = pci_enable_device(pdev))) {
@@ -2785,7 +2921,7 @@ static int e100_resume(struct pci_dev *pdev)
/* ack any pending wake events, disable PME */
pci_enable_wake(pdev, 0, 0);
- /* disbale reverse auto-negotiation */
+ /* disable reverse auto-negotiation */
if (nic->phy == phy_82552_v) {
u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
E100_82552_SMARTSPEED);
@@ -2822,12 +2958,13 @@ static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel
struct net_device *netdev = pci_get_drvdata(pdev);
struct nic *nic = netdev_priv(netdev);
- /* Similar to calling e100_down(), but avoids adapter I/O. */
- e100_close(netdev);
-
- /* Detach; put netif into a state similar to hotplug unplug. */
- napi_enable(&nic->napi);
netif_device_detach(netdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (netif_running(netdev))
+ e100_down(nic);
pci_disable_device(pdev);
/* Request a slot reset. */
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index b1419e21b46..8d36743c814 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -498,6 +498,8 @@ int e1000_up(struct e1000_adapter *adapter)
e1000_irq_enable(adapter);
+ netif_wake_queue(adapter->netdev);
+
/* fire a link change interrupt to start the watchdog */
ew32(ICS, E1000_ICS_LSC);
return 0;
@@ -1234,15 +1236,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
!e1000_check_mng_mode(hw))
e1000_get_hw_control(adapter);
- /* tell the stack to leave us alone until e1000_open() is called */
- netif_carrier_off(netdev);
- netif_stop_queue(netdev);
-
strcpy(netdev->name, "eth%d");
err = register_netdev(netdev);
if (err)
goto err_register;
+ /* carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(netdev);
+
DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
cards_found++;
@@ -1441,6 +1442,8 @@ static int e1000_open(struct net_device *netdev)
if (test_bit(__E1000_TESTING, &adapter->flags))
return -EBUSY;
+ netif_carrier_off(netdev);
+
/* allocate transmit descriptors */
err = e1000_setup_all_tx_resources(adapter);
if (err)
@@ -2327,7 +2330,8 @@ static void e1000_set_rx_mode(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- struct dev_addr_list *uc_ptr;
+ struct netdev_hw_addr *ha;
+ bool use_uc = false;
struct dev_addr_list *mc_ptr;
u32 rctl;
u32 hash_value;
@@ -2366,12 +2370,11 @@ static void e1000_set_rx_mode(struct net_device *netdev)
rctl |= E1000_RCTL_VFE;
}
- uc_ptr = NULL;
if (netdev->uc_count > rar_entries - 1) {
rctl |= E1000_RCTL_UPE;
} else if (!(netdev->flags & IFF_PROMISC)) {
rctl &= ~E1000_RCTL_UPE;
- uc_ptr = netdev->uc_list;
+ use_uc = true;
}
ew32(RCTL, rctl);
@@ -2389,13 +2392,20 @@ static void e1000_set_rx_mode(struct net_device *netdev)
* if there are not 14 addresses, go ahead and clear the filters
* -- with 82571 controllers only 0-13 entries are filled here
*/
+ i = 1;
+ if (use_uc)
+ list_for_each_entry(ha, &netdev->uc_list, list) {
+ if (i == rar_entries)
+ break;
+ e1000_rar_set(hw, ha->addr, i++);
+ }
+
+ WARN_ON(i == rar_entries);
+
mc_ptr = netdev->mc_list;
- for (i = 1; i < rar_entries; i++) {
- if (uc_ptr) {
- e1000_rar_set(hw, uc_ptr->da_addr, i);
- uc_ptr = uc_ptr->next;
- } else if (mc_ptr) {
+ for (; i < rar_entries; i++) {
+ if (mc_ptr) {
e1000_rar_set(hw, mc_ptr->da_addr, i);
mc_ptr = mc_ptr->next;
} else {
@@ -2405,7 +2415,6 @@ static void e1000_set_rx_mode(struct net_device *netdev)
E1000_WRITE_FLUSH();
}
}
- WARN_ON(uc_ptr != NULL);
/* load any remaining addresses into the hash table */
@@ -2590,7 +2599,6 @@ static void e1000_watchdog(unsigned long data)
ew32(TCTL, tctl);
netif_carrier_on(netdev);
- netif_wake_queue(netdev);
mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
adapter->smartspeed = 0;
} else {
@@ -2607,7 +2615,6 @@ static void e1000_watchdog(unsigned long data)
printk(KERN_INFO "e1000: %s NIC Link is Down\n",
netdev->name);
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
/* 80003ES2LAN workaround--
@@ -2645,6 +2652,8 @@ static void e1000_watchdog(unsigned long data)
* (Do the reset outside of interrupt context). */
adapter->tx_timeout_count++;
schedule_work(&adapter->reset_task);
+ /* return immediately since reset is imminent */
+ return;
}
}
@@ -2989,7 +2998,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
size -= 4;
buffer_info->length = size;
- buffer_info->dma = map[0] + offset;
+ buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
@@ -3030,7 +3039,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
size -= 4;
buffer_info->length = size;
- buffer_info->dma = map[f + 1] + offset;
+ buffer_info->dma = map[f] + offset;
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
@@ -3362,7 +3371,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if (count) {
e1000_tx_queue(adapter, tx_ring, tx_flags, count);
- netdev->trans_start = jiffies;
/* Make sure there is space in the ring for the next send. */
e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
@@ -4027,8 +4035,9 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
PCI_DMA_FROMDEVICE);
length = le16_to_cpu(rx_desc->length);
-
- if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
+ /* !EOP means multiple descriptors were used to store a single
+ * packet, also make sure the frame isn't just CRC only */
+ if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) {
/* All receives must fit into a single buffer */
E1000_DBG("%s: Receive packet consumed multiple"
" buffers\n", netdev->name);
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 6c01a2072c8..b53b40ba88a 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -71,6 +71,7 @@ static s32 e1000_setup_link_82571(struct e1000_hw *hw);
static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw);
static bool e1000_check_mng_mode_82574(struct e1000_hw *hw);
static s32 e1000_led_on_82574(struct e1000_hw *hw);
+static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw);
/**
* e1000_init_phy_params_82571 - Init PHY func ptrs.
@@ -212,6 +213,9 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
struct e1000_mac_info *mac = &hw->mac;
struct e1000_mac_operations *func = &mac->ops;
+ u32 swsm = 0;
+ u32 swsm2 = 0;
+ bool force_clear_smbi = false;
/* Set media type */
switch (adapter->pdev->device) {
@@ -276,6 +280,50 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
break;
}
+ /*
+ * Ensure that the inter-port SWSM.SMBI lock bit is clear before
+ * first NVM or PHY acess. This should be done for single-port
+ * devices, and for one port only on dual-port devices so that
+ * for those devices we can still use the SMBI lock to synchronize
+ * inter-port accesses to the PHY & NVM.
+ */
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ swsm2 = er32(SWSM2);
+
+ if (!(swsm2 & E1000_SWSM2_LOCK)) {
+ /* Only do this for the first interface on this card */
+ ew32(SWSM2,
+ swsm2 | E1000_SWSM2_LOCK);
+ force_clear_smbi = true;
+ } else
+ force_clear_smbi = false;
+ break;
+ default:
+ force_clear_smbi = true;
+ break;
+ }
+
+ if (force_clear_smbi) {
+ /* Make sure SWSM.SMBI is clear */
+ swsm = er32(SWSM);
+ if (swsm & E1000_SWSM_SMBI) {
+ /* This bit should not be set on a first interface, and
+ * indicates that the bootagent or EFI code has
+ * improperly left this bit enabled
+ */
+ hw_dbg(hw, "Please update your 82571 Bootagent\n");
+ }
+ ew32(SWSM, swsm & ~E1000_SWSM_SMBI);
+ }
+
+ /*
+ * Initialze device specific counter of SMBI acquisition
+ * timeouts.
+ */
+ hw->dev_spec.e82571.smb_counter = 0;
+
return 0;
}
@@ -341,8 +389,10 @@ static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
if (e1000_read_nvm(&adapter->hw, NVM_INIT_3GIO_3, 1,
&eeprom_data) < 0)
break;
- if (eeprom_data & NVM_WORD1A_ASPM_MASK)
- adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
+ if (!(eeprom_data & NVM_WORD1A_ASPM_MASK)) {
+ adapter->flags |= FLAG_HAS_JUMBO_FRAMES;
+ adapter->max_hw_frame_size = DEFAULT_JUMBO;
+ }
}
break;
default:
@@ -411,11 +461,37 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
{
u32 swsm;
- s32 timeout = hw->nvm.word_size + 1;
+ s32 sw_timeout = hw->nvm.word_size + 1;
+ s32 fw_timeout = hw->nvm.word_size + 1;
s32 i = 0;
+ /*
+ * If we have timedout 3 times on trying to acquire
+ * the inter-port SMBI semaphore, there is old code
+ * operating on the other port, and it is not
+ * releasing SMBI. Modify the number of times that
+ * we try for the semaphore to interwork with this
+ * older code.
+ */
+ if (hw->dev_spec.e82571.smb_counter > 2)
+ sw_timeout = 1;
+
+ /* Get the SW semaphore */
+ while (i < sw_timeout) {
+ swsm = er32(SWSM);
+ if (!(swsm & E1000_SWSM_SMBI))
+ break;
+
+ udelay(50);
+ i++;
+ }
+
+ if (i == sw_timeout) {
+ hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n");
+ hw->dev_spec.e82571.smb_counter++;
+ }
/* Get the FW semaphore. */
- for (i = 0; i < timeout; i++) {
+ for (i = 0; i < fw_timeout; i++) {
swsm = er32(SWSM);
ew32(SWSM, swsm | E1000_SWSM_SWESMBI);
@@ -426,9 +502,9 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
udelay(50);
}
- if (i == timeout) {
+ if (i == fw_timeout) {
/* Release semaphores */
- e1000e_put_hw_semaphore(hw);
+ e1000_put_hw_semaphore_82571(hw);
hw_dbg(hw, "Driver can't access the NVM\n");
return -E1000_ERR_NVM;
}
@@ -447,9 +523,7 @@ static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
u32 swsm;
swsm = er32(SWSM);
-
- swsm &= ~E1000_SWSM_SWESMBI;
-
+ swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
ew32(SWSM, swsm);
}
@@ -1585,6 +1659,7 @@ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
static struct e1000_mac_operations e82571_mac_ops = {
/* .check_mng_mode: mac type dependent */
/* .check_for_link: media type dependent */
+ .id_led_init = e1000e_id_led_init,
.cleanup_led = e1000e_cleanup_led_generic,
.clear_hw_cntrs = e1000_clear_hw_cntrs_82571,
.get_bus_info = e1000e_get_bus_info_pcie,
@@ -1596,6 +1671,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
.init_hw = e1000_init_hw_82571,
.setup_link = e1000_setup_link_82571,
/* .setup_physical_interface: media type dependent */
+ .setup_led = e1000e_setup_led_generic,
};
static struct e1000_phy_operations e82_phy_ops_igp = {
@@ -1672,6 +1748,7 @@ struct e1000_info e1000_82571_info = {
| FLAG_TARC_SPEED_MODE_BIT /* errata */
| FLAG_APME_CHECK_PORT_B,
.pba = 38,
+ .max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
.phy_ops = &e82_phy_ops_igp,
@@ -1688,6 +1765,7 @@ struct e1000_info e1000_82572_info = {
| FLAG_HAS_CTRLEXT_ON_LOAD
| FLAG_TARC_SPEED_MODE_BIT, /* errata */
.pba = 38,
+ .max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
.phy_ops = &e82_phy_ops_igp,
@@ -1706,6 +1784,7 @@ struct e1000_info e1000_82573_info = {
| FLAG_HAS_ERT
| FLAG_HAS_SWSM_ON_LOAD,
.pba = 20,
+ .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
.phy_ops = &e82_phy_ops_m88,
@@ -1724,6 +1803,7 @@ struct e1000_info e1000_82574_info = {
| FLAG_HAS_AMT
| FLAG_HAS_CTRLEXT_ON_LOAD,
.pba = 20,
+ .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
.phy_ops = &e82_phy_ops_bm,
@@ -1740,6 +1820,7 @@ struct e1000_info e1000_82583_info = {
| FLAG_HAS_AMT
| FLAG_HAS_CTRLEXT_ON_LOAD,
.pba = 20,
+ .max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
.phy_ops = &e82_phy_ops_bm,
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 243aa499fe9..8890c97e112 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -56,6 +56,7 @@
/* Wake Up Control */
#define E1000_WUC_APME 0x00000001 /* APM Enable */
#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
+#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */
/* Wake Up Filter Control */
#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
@@ -65,6 +66,13 @@
#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
+/* Wake Up Status */
+#define E1000_WUS_LNKC E1000_WUFC_LNKC
+#define E1000_WUS_MAG E1000_WUFC_MAG
+#define E1000_WUS_EX E1000_WUFC_EX
+#define E1000_WUS_MC E1000_WUFC_MC
+#define E1000_WUS_BC E1000_WUFC_BC
+
/* Extended Device Control */
#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Definable Pin 7 */
#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
@@ -77,6 +85,7 @@
#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */
#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
+#define E1000_CTRL_EXT_PHYPDEN 0x00100000
/* Receive Descriptor bit definitions */
#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
@@ -140,6 +149,7 @@
#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min threshold size */
#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
+#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */
@@ -153,6 +163,7 @@
#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */
+#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
@@ -255,11 +266,16 @@
#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX
/* LED Control */
+#define E1000_PHY_LED0_MODE_MASK 0x00000007
+#define E1000_PHY_LED0_IVRT 0x00000008
+#define E1000_PHY_LED0_MASK 0x0000001F
+
#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
#define E1000_LEDCTL_LED0_MODE_SHIFT 0
#define E1000_LEDCTL_LED0_IVRT 0x00000040
#define E1000_LEDCTL_LED0_BLINK 0x00000080
+#define E1000_LEDCTL_MODE_LINK_UP 0x2
#define E1000_LEDCTL_MODE_LED_ON 0xE
#define E1000_LEDCTL_MODE_LED_OFF 0xF
@@ -360,6 +376,8 @@
#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */
+#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */
+
/* Interrupt Cause Read */
#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
@@ -469,6 +487,8 @@
#define AUTO_READ_DONE_TIMEOUT 10
/* Flow Control */
+#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */
+#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */
#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
/* Transmit Configuration Word */
@@ -674,6 +694,8 @@
#define IFE_C_E_PHY_ID 0x02A80310
#define BME1000_E_PHY_ID 0x01410CB0
#define BME1000_E_PHY_ID_R2 0x01410CB1
+#define I82577_E_PHY_ID 0x01540050
+#define I82578_E_PHY_ID 0x004DD040
/* M88E1000 Specific Registers */
#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
@@ -727,6 +749,9 @@
#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
+#define I82578_EPSCR_DOWNSHIFT_ENABLE 0x0020
+#define I82578_EPSCR_DOWNSHIFT_COUNTER_MASK 0x001C
+
/* BME1000 PHY Specific Control Register */
#define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index f37360aa12a..981936c1fb4 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -62,7 +62,7 @@ struct e1000_info;
e_printk(KERN_NOTICE, adapter, format, ## arg)
-/* Interrupt modes, as used by the IntMode paramter */
+/* Interrupt modes, as used by the IntMode parameter */
#define E1000E_INT_MODE_LEGACY 0
#define E1000E_INT_MODE_MSI 1
#define E1000E_INT_MODE_MSIX 2
@@ -96,6 +96,51 @@ struct e1000_info;
/* Number of packet split data buffers (not including the header buffer) */
#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
+#define DEFAULT_JUMBO 9234
+
+/* BM/HV Specific Registers */
+#define BM_PORT_CTRL_PAGE 769
+
+#define PHY_UPPER_SHIFT 21
+#define BM_PHY_REG(page, reg) \
+ (((reg) & MAX_PHY_REG_ADDRESS) |\
+ (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\
+ (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)))
+
+/* PHY Wakeup Registers and defines */
+#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0)
+#define BM_WUC PHY_REG(BM_WUC_PAGE, 1)
+#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
+#define BM_WUS PHY_REG(BM_WUC_PAGE, 3)
+#define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2)))
+#define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2)))
+#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2)))
+#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2)))
+#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1)))
+
+#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */
+#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */
+#define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */
+#define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */
+#define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */
+#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */
+#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */
+
+#define HV_SCC_UPPER PHY_REG(778, 16) /* Single Collision Count */
+#define HV_SCC_LOWER PHY_REG(778, 17)
+#define HV_ECOL_UPPER PHY_REG(778, 18) /* Excessive Collision Count */
+#define HV_ECOL_LOWER PHY_REG(778, 19)
+#define HV_MCC_UPPER PHY_REG(778, 20) /* Multiple Collision Count */
+#define HV_MCC_LOWER PHY_REG(778, 21)
+#define HV_LATECOL_UPPER PHY_REG(778, 23) /* Late Collision Count */
+#define HV_LATECOL_LOWER PHY_REG(778, 24)
+#define HV_COLC_UPPER PHY_REG(778, 25) /* Collision Count */
+#define HV_COLC_LOWER PHY_REG(778, 26)
+#define HV_DC_UPPER PHY_REG(778, 27) /* Defer Count */
+#define HV_DC_LOWER PHY_REG(778, 28)
+#define HV_TNCRS_UPPER PHY_REG(778, 29) /* Transmit with no CRS */
+#define HV_TNCRS_LOWER PHY_REG(778, 30)
+
enum e1000_boards {
board_82571,
board_82572,
@@ -106,6 +151,7 @@ enum e1000_boards {
board_ich8lan,
board_ich9lan,
board_ich10lan,
+ board_pchlan,
};
struct e1000_queue_stats {
@@ -293,6 +339,7 @@ struct e1000_adapter {
u32 eeprom_wol;
u32 wol;
u32 pba;
+ u32 max_hw_frame_size;
bool fc_autoneg;
@@ -302,6 +349,7 @@ struct e1000_adapter {
unsigned int flags2;
struct work_struct downshift_task;
struct work_struct update_phy_task;
+ struct work_struct led_blink_task;
};
struct e1000_info {
@@ -309,6 +357,7 @@ struct e1000_info {
unsigned int flags;
unsigned int flags2;
u32 pba;
+ u32 max_hw_frame_size;
s32 (*get_variants)(struct e1000_adapter *);
struct e1000_mac_operations *mac_ops;
struct e1000_phy_operations *phy_ops;
@@ -351,6 +400,7 @@ struct e1000_info {
/* CRC Stripping defines */
#define FLAG2_CRC_STRIPPING (1 << 0)
+#define FLAG2_HAS_PHY_WAKEUP (1 << 1)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -404,6 +454,7 @@ extern struct e1000_info e1000_82583_info;
extern struct e1000_info e1000_ich8_info;
extern struct e1000_info e1000_ich9_info;
extern struct e1000_info e1000_ich10_info;
+extern struct e1000_info e1000_pch_info;
extern struct e1000_info e1000_es2_info;
extern s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num);
@@ -425,6 +476,7 @@ extern void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw);
extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw);
extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw);
extern s32 e1000e_check_for_serdes_link(struct e1000_hw *hw);
+extern s32 e1000e_setup_led_generic(struct e1000_hw *hw);
extern s32 e1000e_cleanup_led_generic(struct e1000_hw *hw);
extern s32 e1000e_led_on_generic(struct e1000_hw *hw);
extern s32 e1000e_led_off_generic(struct e1000_hw *hw);
@@ -493,6 +545,15 @@ extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw);
extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
extern s32 e1000e_check_downshift(struct e1000_hw *hw);
+extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow);
+extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
+extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
+extern s32 e1000_check_polarity_82577(struct e1000_hw *hw);
+extern s32 e1000_get_phy_info_82577(struct e1000_hw *hw);
+extern s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
+extern s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
{
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index 8964838c686..ae5d7368935 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -1366,6 +1366,7 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
}
static struct e1000_mac_operations es2_mac_ops = {
+ .id_led_init = e1000e_id_led_init,
.check_mng_mode = e1000e_check_mng_mode_generic,
/* check_for_link dependent on media type */
.cleanup_led = e1000e_cleanup_led_generic,
@@ -1379,6 +1380,7 @@ static struct e1000_mac_operations es2_mac_ops = {
.init_hw = e1000_init_hw_80003es2lan,
.setup_link = e1000e_setup_link,
/* setup_physical_interface dependent on media type */
+ .setup_led = e1000e_setup_led_generic,
};
static struct e1000_phy_operations es2_phy_ops = {
@@ -1422,6 +1424,7 @@ struct e1000_info e1000_es2_info = {
| FLAG_DISABLE_FC_PAUSE_TIME /* errata */
| FLAG_TIPG_MEDIUM_FOR_80003ESLAN,
.pba = 38,
+ .max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_80003es2lan,
.mac_ops = &es2_mac_ops,
.phy_ops = &es2_phy_ops,
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 4d25ede8836..1bf4d2a5d34 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -167,6 +167,15 @@ static int e1000_get_settings(struct net_device *netdev,
ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) ||
hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+ /* MDI-X => 2; MDI =>1; Invalid =>0 */
+ if ((hw->phy.media_type == e1000_media_type_copper) &&
+ !hw->mac.get_link_status)
+ ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
+ ETH_TP_MDI;
+ else
+ ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+
return 0;
}
@@ -776,6 +785,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
u32 after;
u32 i;
u32 toggle;
+ u32 mask;
/*
* The status register is Read Only, so a write should fail.
@@ -788,17 +798,9 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
case e1000_80003es2lan:
toggle = 0x7FFFF3FF;
break;
- case e1000_82573:
- case e1000_82574:
- case e1000_82583:
- case e1000_ich8lan:
- case e1000_ich9lan:
- case e1000_ich10lan:
+ default:
toggle = 0x7FFFF033;
break;
- default:
- toggle = 0xFFFFF833;
- break;
}
before = er32(STATUS);
@@ -844,11 +846,18 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF);
REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF);
+ mask = 0x8003FFFF;
+ switch (mac->type) {
+ case e1000_ich10lan:
+ case e1000_pchlan:
+ mask |= (1 << 18);
+ break;
+ default:
+ break;
+ }
for (i = 0; i < mac->rar_entry_count; i++)
REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1),
- ((mac->type == e1000_ich10lan) ?
- 0x8007FFFF : 0x8003FFFF),
- 0xFFFFFFFF);
+ mask, 0xFFFFFFFF);
for (i = 0; i < mac->mta_reg_count; i++)
REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF);
@@ -1786,15 +1795,22 @@ static int e1000_set_wol(struct net_device *netdev,
/* bit defines for adapter->led_status */
#define E1000_LED_ON 0
-static void e1000_led_blink_callback(unsigned long data)
+static void e1000e_led_blink_task(struct work_struct *work)
{
- struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+ struct e1000_adapter *adapter = container_of(work,
+ struct e1000_adapter, led_blink_task);
if (test_and_change_bit(E1000_LED_ON, &adapter->led_status))
adapter->hw.mac.ops.led_off(&adapter->hw);
else
adapter->hw.mac.ops.led_on(&adapter->hw);
+}
+
+static void e1000_led_blink_callback(unsigned long data)
+{
+ struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+ schedule_work(&adapter->led_blink_task);
mod_timer(&adapter->blink_timer, jiffies + E1000_ID_INTERVAL);
}
@@ -1807,7 +1823,9 @@ static int e1000_phys_id(struct net_device *netdev, u32 data)
data = INT_MAX;
if ((hw->phy.type == e1000_phy_ife) ||
+ (hw->mac.type == e1000_pchlan) ||
(hw->mac.type == e1000_82574)) {
+ INIT_WORK(&adapter->led_blink_task, e1000e_led_blink_task);
if (!adapter->blink_timer.function) {
init_timer(&adapter->blink_timer);
adapter->blink_timer.function =
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index d8b82296f41..163c1c0cfee 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -193,7 +193,11 @@ enum e1e_registers {
E1000_RXCSUM = 0x05000, /* Rx Checksum Control - RW */
E1000_RFCTL = 0x05008, /* Receive Filter Control */
E1000_MTA = 0x05200, /* Multicast Table Array - RW Array */
- E1000_RA = 0x05400, /* Receive Address - RW Array */
+ E1000_RAL_BASE = 0x05400, /* Receive Address Low - RW */
+#define E1000_RAL(_n) (E1000_RAL_BASE + ((_n) * 8))
+#define E1000_RA (E1000_RAL(0))
+ E1000_RAH_BASE = 0x05404, /* Receive Address High - RW */
+#define E1000_RAH(_n) (E1000_RAH_BASE + ((_n) * 8))
E1000_VFTA = 0x05600, /* VLAN Filter Table Array - RW Array */
E1000_WUC = 0x05800, /* Wakeup Control - RW */
E1000_WUFC = 0x05808, /* Wakeup Filter Control - RW */
@@ -210,6 +214,7 @@ enum e1e_registers {
E1000_FACTPS = 0x05B30, /* Function Active and Power State to MNG */
E1000_SWSM = 0x05B50, /* SW Semaphore */
E1000_FWSM = 0x05B54, /* FW Semaphore */
+ E1000_SWSM2 = 0x05B58, /* Driver-only SW semaphore */
E1000_HICR = 0x08F00, /* Host Interface Control */
};
@@ -253,7 +258,7 @@ enum e1e_registers {
#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
-#define IGP01E1000_PSSR_MDIX 0x0008
+#define IGP01E1000_PSSR_MDIX 0x0800
#define IGP01E1000_PSSR_SPEED_MASK 0xC000
#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
@@ -368,6 +373,10 @@ enum e1e_registers {
#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE
#define E1000_DEV_ID_ICH10_D_BM_LM 0x10DE
#define E1000_DEV_ID_ICH10_D_BM_LF 0x10DF
+#define E1000_DEV_ID_PCH_M_HV_LM 0x10EA
+#define E1000_DEV_ID_PCH_M_HV_LC 0x10EB
+#define E1000_DEV_ID_PCH_D_HV_DM 0x10EF
+#define E1000_DEV_ID_PCH_D_HV_DC 0x10F0
#define E1000_REVISION_4 4
@@ -383,6 +392,7 @@ enum e1000_mac_type {
e1000_ich8lan,
e1000_ich9lan,
e1000_ich10lan,
+ e1000_pchlan,
};
enum e1000_media_type {
@@ -417,6 +427,8 @@ enum e1000_phy_type {
e1000_phy_igp_3,
e1000_phy_ife,
e1000_phy_bm,
+ e1000_phy_82578,
+ e1000_phy_82577,
};
enum e1000_bus_width {
@@ -720,6 +732,7 @@ struct e1000_host_mng_command_info {
/* Function pointers and static data for the MAC. */
struct e1000_mac_operations {
+ s32 (*id_led_init)(struct e1000_hw *);
bool (*check_mng_mode)(struct e1000_hw *);
s32 (*check_for_link)(struct e1000_hw *);
s32 (*cleanup_led)(struct e1000_hw *);
@@ -733,11 +746,13 @@ struct e1000_mac_operations {
s32 (*init_hw)(struct e1000_hw *);
s32 (*setup_link)(struct e1000_hw *);
s32 (*setup_physical_interface)(struct e1000_hw *);
+ s32 (*setup_led)(struct e1000_hw *);
};
/* Function pointers for the PHY. */
struct e1000_phy_operations {
s32 (*acquire_phy)(struct e1000_hw *);
+ s32 (*check_polarity)(struct e1000_hw *);
s32 (*check_reset_block)(struct e1000_hw *);
s32 (*commit_phy)(struct e1000_hw *);
s32 (*force_speed_duplex)(struct e1000_hw *);
@@ -869,6 +884,7 @@ struct e1000_fc_info {
struct e1000_dev_spec_82571 {
bool laa_is_present;
bool alt_mac_addr_is_present;
+ u32 smb_counter;
};
struct e1000_shadow_ram {
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 6d1aab6316b..9e23f50fb9c 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -48,6 +48,10 @@
* 82567LF-3 Gigabit Network Connection
* 82567LM-3 Gigabit Network Connection
* 82567LM-4 Gigabit Network Connection
+ * 82577LM Gigabit Network Connection
+ * 82577LC Gigabit Network Connection
+ * 82578DM Gigabit Network Connection
+ * 82578DC Gigabit Network Connection
*/
#include <linux/netdevice.h>
@@ -116,6 +120,8 @@
#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300
#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200
+#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */
+
/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
/* Offset 04h HSFSTS */
union ich8_hws_flash_status {
@@ -186,6 +192,14 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
+static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
+static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
+static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
+static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
+static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
+static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
+static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
+static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
{
@@ -213,6 +227,41 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
#define ew32flash(reg,val) __ew32flash(hw, (reg), (val))
/**
+ * e1000_init_phy_params_pchlan - Initialize PHY function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Initialize family-specific PHY parameters and function pointers.
+ **/
+static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = 0;
+
+ phy->addr = 1;
+ phy->reset_delay_us = 100;
+
+ phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
+ phy->ops.read_phy_reg = e1000_read_phy_reg_hv;
+ phy->ops.write_phy_reg = e1000_write_phy_reg_hv;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+
+ phy->id = e1000_phy_unknown;
+ e1000e_get_phy_id(hw);
+ phy->type = e1000e_get_phy_type_from_id(phy->id);
+
+ if (phy->type == e1000_phy_82577) {
+ phy->ops.check_polarity = e1000_check_polarity_82577;
+ phy->ops.force_speed_duplex =
+ e1000_phy_force_speed_duplex_82577;
+ phy->ops.get_cable_length = e1000_get_cable_length_82577;
+ phy->ops.get_phy_info = e1000_get_phy_info_82577;
+ phy->ops.commit_phy = e1000e_phy_sw_reset;
+ }
+
+ return ret_val;
+}
+
+/**
* e1000_init_phy_params_ich8lan - Initialize PHY function pointers
* @hw: pointer to the HW structure
*
@@ -273,6 +322,8 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
break;
}
+ phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
+
return 0;
}
@@ -358,6 +409,36 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
/* Set if manageability features are enabled. */
mac->arc_subsystem_valid = 1;
+ /* LED operations */
+ switch (mac->type) {
+ case e1000_ich8lan:
+ case e1000_ich9lan:
+ case e1000_ich10lan:
+ /* ID LED init */
+ mac->ops.id_led_init = e1000e_id_led_init;
+ /* setup LED */
+ mac->ops.setup_led = e1000e_setup_led_generic;
+ /* cleanup LED */
+ mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
+ /* turn on/off LED */
+ mac->ops.led_on = e1000_led_on_ich8lan;
+ mac->ops.led_off = e1000_led_off_ich8lan;
+ break;
+ case e1000_pchlan:
+ /* ID LED init */
+ mac->ops.id_led_init = e1000_id_led_init_pchlan;
+ /* setup LED */
+ mac->ops.setup_led = e1000_setup_led_pchlan;
+ /* cleanup LED */
+ mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
+ /* turn on/off LED */
+ mac->ops.led_on = e1000_led_on_pchlan;
+ mac->ops.led_off = e1000_led_off_pchlan;
+ break;
+ default:
+ break;
+ }
+
/* Enable PCS Lock-loss workaround for ICH8 */
if (mac->type == e1000_ich8lan)
e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, 1);
@@ -378,10 +459,18 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
if (rc)
return rc;
- rc = e1000_init_phy_params_ich8lan(hw);
+ if (hw->mac.type == e1000_pchlan)
+ rc = e1000_init_phy_params_pchlan(hw);
+ else
+ rc = e1000_init_phy_params_ich8lan(hw);
if (rc)
return rc;
+ if (adapter->hw.phy.type == e1000_phy_ife) {
+ adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
+ adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN;
+ }
+
if ((adapter->hw.mac.type == e1000_ich8lan) &&
(adapter->hw.phy.type == e1000_phy_igp_3))
adapter->flags |= FLAG_LSC_GIG_SPEED_DROP;
@@ -410,12 +499,15 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
while (timeout) {
extcnf_ctrl = er32(EXTCNF_CTRL);
- extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
- ew32(EXTCNF_CTRL, extcnf_ctrl);
- extcnf_ctrl = er32(EXTCNF_CTRL);
- if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
- break;
+ if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) {
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
+
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+ if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
+ break;
+ }
mdelay(1);
timeout--;
}
@@ -555,6 +647,53 @@ static s32 e1000_phy_force_speed_duplex_ich8lan(struct e1000_hw *hw)
}
/**
+ * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
+ * done after every PHY reset.
+ **/
+static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+
+ if (hw->mac.type != e1000_pchlan)
+ return ret_val;
+
+ if (((hw->phy.type == e1000_phy_82577) &&
+ ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
+ ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
+ /* Disable generation of early preamble */
+ ret_val = e1e_wphy(hw, PHY_REG(769, 25), 0x4431);
+ if (ret_val)
+ return ret_val;
+
+ /* Preamble tuning for SSC */
+ ret_val = e1e_wphy(hw, PHY_REG(770, 16), 0xA204);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if (hw->phy.type == e1000_phy_82578) {
+ /*
+ * Return registers to default by doing a soft reset then
+ * writing 0x3140 to the control register.
+ */
+ if (hw->phy.revision < 2) {
+ e1000e_phy_sw_reset(hw);
+ ret_val = e1e_wphy(hw, PHY_CONTROL, 0x3140);
+ }
+ }
+
+ /* Select page 0 */
+ ret_val = hw->phy.ops.acquire_phy(hw);
+ if (ret_val)
+ return ret_val;
+ hw->phy.addr = 1;
+ e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
+ hw->phy.ops.release_phy(hw);
+
+ return ret_val;
+}
+
+/**
* e1000_phy_hw_reset_ich8lan - Performs a PHY reset
* @hw: pointer to the HW structure
*
@@ -575,6 +714,12 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
+ if (hw->mac.type == e1000_pchlan) {
+ ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
/*
* Initialize the PHY from the NVM on ICH platforms. This
* is needed due to an issue where the NVM configuration is
@@ -701,7 +846,7 @@ static s32 e1000_get_phy_info_ife_ich8lan(struct e1000_hw *hw)
phy->polarity_correction = (!(data & IFE_PSC_AUTO_POLARITY_DISABLE));
if (phy->polarity_correction) {
- ret_val = e1000_check_polarity_ife_ich8lan(hw);
+ ret_val = phy->ops.check_polarity(hw);
if (ret_val)
return ret_val;
} else {
@@ -741,6 +886,8 @@ static s32 e1000_get_phy_info_ich8lan(struct e1000_hw *hw)
break;
case e1000_phy_igp_3:
case e1000_phy_bm:
+ case e1000_phy_82578:
+ case e1000_phy_82577:
return e1000e_get_phy_info_igp(hw);
break;
default:
@@ -1852,6 +1999,79 @@ static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
}
/**
+ * e1000_id_led_init_pchlan - store LED configurations
+ * @hw: pointer to the HW structure
+ *
+ * PCH does not control LEDs via the LEDCTL register, rather it uses
+ * the PHY LED configuration register.
+ *
+ * PCH also does not have an "always on" or "always off" mode which
+ * complicates the ID feature. Instead of using the "on" mode to indicate
+ * in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init()),
+ * use "link_up" mode. The LEDs will still ID on request if there is no
+ * link based on logic in e1000_led_[on|off]_pchlan().
+ **/
+static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
+ const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
+ u16 data, i, temp, shift;
+
+ /* Get default ID LED modes */
+ ret_val = hw->nvm.ops.valid_led_default(hw, &data);
+ if (ret_val)
+ goto out;
+
+ mac->ledctl_default = er32(LEDCTL);
+ mac->ledctl_mode1 = mac->ledctl_default;
+ mac->ledctl_mode2 = mac->ledctl_default;
+
+ for (i = 0; i < 4; i++) {
+ temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
+ shift = (i * 5);
+ switch (temp) {
+ case ID_LED_ON1_DEF2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_ON1_OFF2:
+ mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
+ mac->ledctl_mode1 |= (ledctl_on << shift);
+ break;
+ case ID_LED_OFF1_DEF2:
+ case ID_LED_OFF1_ON2:
+ case ID_LED_OFF1_OFF2:
+ mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
+ mac->ledctl_mode1 |= (ledctl_off << shift);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ switch (temp) {
+ case ID_LED_DEF1_ON2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_OFF1_ON2:
+ mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
+ mac->ledctl_mode2 |= (ledctl_on << shift);
+ break;
+ case ID_LED_DEF1_OFF2:
+ case ID_LED_ON1_OFF2:
+ case ID_LED_OFF1_OFF2:
+ mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
+ mac->ledctl_mode2 |= (ledctl_off << shift);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ }
+
+out:
+ return ret_val;
+}
+
+/**
* e1000_get_bus_info_ich8lan - Get/Set the bus type and width
* @hw: pointer to the HW structure
*
@@ -1960,6 +2180,9 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
kab |= E1000_KABGTXD_BGSQLBIAS;
ew32(KABGTXD, kab);
+ if (hw->mac.type == e1000_pchlan)
+ ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
+
return ret_val;
}
@@ -1985,7 +2208,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
e1000_initialize_hw_bits_ich8lan(hw);
/* Initialize identification LED */
- ret_val = e1000e_id_led_init(hw);
+ ret_val = mac->ops.id_led_init(hw);
if (ret_val) {
hw_dbg(hw, "Error initializing identification LED\n");
return ret_val;
@@ -2031,6 +2254,16 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
ew32(CTRL_EXT, ctrl_ext);
/*
+ * The 82578 Rx buffer will stall if wakeup is enabled in host and
+ * the ME. Reading the BM_WUC register will clear the host wakeup bit.
+ * Reset the phy after disabling host wakeup to reset the Rx buffer.
+ */
+ if (hw->phy.type == e1000_phy_82578) {
+ e1e_rphy(hw, BM_WUC, &i);
+ e1000e_phy_hw_reset_generic(hw);
+ }
+
+ /*
* Clear all of the statistics registers (clear on read). It is
* important that we do this after we have tried to establish link
* because the symbol error count will increment wildly if there
@@ -2054,6 +2287,9 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
/* Extended Device Control */
reg = er32(CTRL_EXT);
reg |= (1 << 22);
+ /* Enable PHY low-power state when MAC is at D3 w/o WoL */
+ if (hw->mac.type >= e1000_pchlan)
+ reg |= E1000_CTRL_EXT_PHYPDEN;
ew32(CTRL_EXT, reg);
/* Transmit Descriptor Control 0 */
@@ -2112,8 +2348,13 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
* the default flow control setting, so we explicitly
* set it to full.
*/
- if (hw->fc.requested_mode == e1000_fc_default)
- hw->fc.requested_mode = e1000_fc_full;
+ if (hw->fc.requested_mode == e1000_fc_default) {
+ /* Workaround h/w hang when Tx flow control enabled */
+ if (hw->mac.type == e1000_pchlan)
+ hw->fc.requested_mode = e1000_fc_rx_pause;
+ else
+ hw->fc.requested_mode = e1000_fc_full;
+ }
/*
* Save off the requested flow control mode for use later. Depending
@@ -2130,6 +2371,14 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
return ret_val;
ew32(FCTTV, hw->fc.pause_time);
+ if ((hw->phy.type == e1000_phy_82578) ||
+ (hw->phy.type == e1000_phy_82577)) {
+ ret_val = hw->phy.ops.write_phy_reg(hw,
+ PHY_REG(BM_PORT_CTRL_PAGE, 27),
+ hw->fc.pause_time);
+ if (ret_val)
+ return ret_val;
+ }
return e1000e_set_fc_watermarks(hw);
}
@@ -2169,18 +2418,26 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- if (hw->phy.type == e1000_phy_igp_3) {
+ switch (hw->phy.type) {
+ case e1000_phy_igp_3:
ret_val = e1000e_copper_link_setup_igp(hw);
if (ret_val)
return ret_val;
- } else if (hw->phy.type == e1000_phy_bm) {
+ break;
+ case e1000_phy_bm:
+ case e1000_phy_82578:
ret_val = e1000e_copper_link_setup_m88(hw);
if (ret_val)
return ret_val;
- }
-
- if (hw->phy.type == e1000_phy_ife) {
- ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data);
+ break;
+ case e1000_phy_82577:
+ ret_val = e1000_copper_link_setup_82577(hw);
+ if (ret_val)
+ return ret_val;
+ break;
+ case e1000_phy_ife:
+ ret_val = hw->phy.ops.read_phy_reg(hw, IFE_PHY_MDIX_CONTROL,
+ &reg_data);
if (ret_val)
return ret_val;
@@ -2198,9 +2455,13 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
reg_data |= IFE_PMC_AUTO_MDIX;
break;
}
- ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data);
+ ret_val = hw->phy.ops.write_phy_reg(hw, IFE_PHY_MDIX_CONTROL,
+ reg_data);
if (ret_val)
return ret_val;
+ break;
+ default:
+ break;
}
return e1000e_setup_copper_link(hw);
}
@@ -2417,18 +2678,26 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
* 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation
* to a lower speed.
*
- * Should only be called for ICH9 and ICH10 devices.
+ * Should only be called for applicable parts.
**/
void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
{
u32 phy_ctrl;
- if ((hw->mac.type == e1000_ich10lan) ||
- (hw->mac.type == e1000_ich9lan)) {
+ switch (hw->mac.type) {
+ case e1000_ich9lan:
+ case e1000_ich10lan:
+ case e1000_pchlan:
phy_ctrl = er32(PHY_CTRL);
phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU |
E1000_PHY_CTRL_GBE_DISABLE;
ew32(PHY_CTRL, phy_ctrl);
+
+ /* Workaround SWFLAG unexpectedly set during S0->Sx */
+ if (hw->mac.type == e1000_pchlan)
+ udelay(500);
+ default:
+ break;
}
return;
@@ -2482,13 +2751,99 @@ static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
}
/**
+ * e1000_setup_led_pchlan - Configures SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * This prepares the SW controllable LED for use.
+ **/
+static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
+{
+ return hw->phy.ops.write_phy_reg(hw, HV_LED_CONFIG,
+ (u16)hw->mac.ledctl_mode1);
+}
+
+/**
+ * e1000_cleanup_led_pchlan - Restore the default LED operation
+ * @hw: pointer to the HW structure
+ *
+ * Return the LED back to the default configuration.
+ **/
+static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
+{
+ return hw->phy.ops.write_phy_reg(hw, HV_LED_CONFIG,
+ (u16)hw->mac.ledctl_default);
+}
+
+/**
+ * e1000_led_on_pchlan - Turn LEDs on
+ * @hw: pointer to the HW structure
+ *
+ * Turn on the LEDs.
+ **/
+static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
+{
+ u16 data = (u16)hw->mac.ledctl_mode2;
+ u32 i, led;
+
+ /*
+ * If no link, then turn LED on by setting the invert bit
+ * for each LED that's mode is "link_up" in ledctl_mode2.
+ */
+ if (!(er32(STATUS) & E1000_STATUS_LU)) {
+ for (i = 0; i < 3; i++) {
+ led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
+ if ((led & E1000_PHY_LED0_MODE_MASK) !=
+ E1000_LEDCTL_MODE_LINK_UP)
+ continue;
+ if (led & E1000_PHY_LED0_IVRT)
+ data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
+ else
+ data |= (E1000_PHY_LED0_IVRT << (i * 5));
+ }
+ }
+
+ return hw->phy.ops.write_phy_reg(hw, HV_LED_CONFIG, data);
+}
+
+/**
+ * e1000_led_off_pchlan - Turn LEDs off
+ * @hw: pointer to the HW structure
+ *
+ * Turn off the LEDs.
+ **/
+static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
+{
+ u16 data = (u16)hw->mac.ledctl_mode1;
+ u32 i, led;
+
+ /*
+ * If no link, then turn LED off by clearing the invert bit
+ * for each LED that's mode is "link_up" in ledctl_mode1.
+ */
+ if (!(er32(STATUS) & E1000_STATUS_LU)) {
+ for (i = 0; i < 3; i++) {
+ led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
+ if ((led & E1000_PHY_LED0_MODE_MASK) !=
+ E1000_LEDCTL_MODE_LINK_UP)
+ continue;
+ if (led & E1000_PHY_LED0_IVRT)
+ data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
+ else
+ data |= (E1000_PHY_LED0_IVRT << (i * 5));
+ }
+ }
+
+ return hw->phy.ops.write_phy_reg(hw, HV_LED_CONFIG, data);
+}
+
+/**
* e1000_get_cfg_done_ich8lan - Read config done bit
* @hw: pointer to the HW structure
*
* Read the management control register for the config done bit for
* completion status. NOTE: silicon which is EEPROM-less will fail trying
* to read the config done bit, so an error is *ONLY* logged and returns
- * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon
+ * 0. If we were to return with error, EEPROM-less silicon
* would not be able to be reset or change link.
**/
static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
@@ -2498,7 +2853,8 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
e1000e_get_cfg_done(hw);
/* If EEPROM is not marked present, init the IGP 3 PHY manually */
- if (hw->mac.type != e1000_ich10lan) {
+ if ((hw->mac.type != e1000_ich10lan) &&
+ (hw->mac.type != e1000_pchlan)) {
if (((er32(EECD) & E1000_EECD_PRES) == 0) &&
(hw->phy.type == e1000_phy_igp_3)) {
e1000e_phy_init_script_igp3(hw);
@@ -2524,6 +2880,7 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
{
u32 temp;
+ u16 phy_data;
e1000e_clear_hw_cntrs_base(hw);
@@ -2541,22 +2898,42 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
temp = er32(IAC);
temp = er32(ICRXOC);
+ /* Clear PHY statistics registers */
+ if ((hw->phy.type == e1000_phy_82578) ||
+ (hw->phy.type == e1000_phy_82577)) {
+ hw->phy.ops.read_phy_reg(hw, HV_SCC_UPPER, &phy_data);
+ hw->phy.ops.read_phy_reg(hw, HV_SCC_LOWER, &phy_data);
+ hw->phy.ops.read_phy_reg(hw, HV_ECOL_UPPER, &phy_data);
+ hw->phy.ops.read_phy_reg(hw, HV_ECOL_LOWER, &phy_data);
+ hw->phy.ops.read_phy_reg(hw, HV_MCC_UPPER, &phy_data);
+ hw->phy.ops.read_phy_reg(hw, HV_MCC_LOWER, &phy_data);
+ hw->phy.ops.read_phy_reg(hw, HV_LATECOL_UPPER, &phy_data);
+ hw->phy.ops.read_phy_reg(hw, HV_LATECOL_LOWER, &phy_data);
+ hw->phy.ops.read_phy_reg(hw, HV_COLC_UPPER, &phy_data);
+ hw->phy.ops.read_phy_reg(hw, HV_COLC_LOWER, &phy_data);
+ hw->phy.ops.read_phy_reg(hw, HV_DC_UPPER, &phy_data);
+ hw->phy.ops.read_phy_reg(hw, HV_DC_LOWER, &phy_data);
+ hw->phy.ops.read_phy_reg(hw, HV_TNCRS_UPPER, &phy_data);
+ hw->phy.ops.read_phy_reg(hw, HV_TNCRS_LOWER, &phy_data);
+ }
}
static struct e1000_mac_operations ich8_mac_ops = {
+ .id_led_init = e1000e_id_led_init,
.check_mng_mode = e1000_check_mng_mode_ich8lan,
.check_for_link = e1000e_check_for_copper_link,
- .cleanup_led = e1000_cleanup_led_ich8lan,
+ /* cleanup_led dependent on mac type */
.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan,
.get_bus_info = e1000_get_bus_info_ich8lan,
.get_link_up_info = e1000_get_link_up_info_ich8lan,
- .led_on = e1000_led_on_ich8lan,
- .led_off = e1000_led_off_ich8lan,
+ /* led_on dependent on mac type */
+ /* led_off dependent on mac type */
.update_mc_addr_list = e1000e_update_mc_addr_list_generic,
.reset_hw = e1000_reset_hw_ich8lan,
.init_hw = e1000_init_hw_ich8lan,
.setup_link = e1000_setup_link_ich8lan,
.setup_physical_interface= e1000_setup_copper_link_ich8lan,
+ /* id_led_init dependent on mac type */
};
static struct e1000_phy_operations ich8_phy_ops = {
@@ -2595,6 +2972,7 @@ struct e1000_info e1000_ich8_info = {
| FLAG_HAS_FLASH
| FLAG_APME_IN_WUC,
.pba = 8,
+ .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
.get_variants = e1000_get_variants_ich8lan,
.mac_ops = &ich8_mac_ops,
.phy_ops = &ich8_phy_ops,
@@ -2613,6 +2991,7 @@ struct e1000_info e1000_ich9_info = {
| FLAG_HAS_FLASH
| FLAG_APME_IN_WUC,
.pba = 10,
+ .max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_ich8lan,
.mac_ops = &ich8_mac_ops,
.phy_ops = &ich8_phy_ops,
@@ -2631,6 +3010,25 @@ struct e1000_info e1000_ich10_info = {
| FLAG_HAS_FLASH
| FLAG_APME_IN_WUC,
.pba = 10,
+ .max_hw_frame_size = DEFAULT_JUMBO,
+ .get_variants = e1000_get_variants_ich8lan,
+ .mac_ops = &ich8_mac_ops,
+ .phy_ops = &ich8_phy_ops,
+ .nvm_ops = &ich8_nvm_ops,
+};
+
+struct e1000_info e1000_pch_info = {
+ .mac = e1000_pchlan,
+ .flags = FLAG_IS_ICH
+ | FLAG_HAS_WOL
+ | FLAG_RX_CSUM_ENABLED
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_HAS_AMT
+ | FLAG_HAS_FLASH
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_APME_IN_WUC,
+ .pba = 26,
+ .max_hw_frame_size = 4096,
.get_variants = e1000_get_variants_ich8lan,
.mac_ops = &ich8_mac_ops,
.phy_ops = &ich8_phy_ops,
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 18a4f5902f3..be6d9e99037 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -378,6 +378,12 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
mac->get_link_status = 0;
+ if (hw->phy.type == e1000_phy_82578) {
+ ret_val = e1000_link_stall_workaround_hv(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
/*
* Check if there was DownShift, must be checked
* immediately after link-up
@@ -1406,6 +1412,38 @@ s32 e1000e_id_led_init(struct e1000_hw *hw)
}
/**
+ * e1000e_setup_led_generic - Configures SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * This prepares the SW controllable LED for use and saves the current state
+ * of the LED so it can be later restored.
+ **/
+s32 e1000e_setup_led_generic(struct e1000_hw *hw)
+{
+ u32 ledctl;
+
+ if (hw->mac.ops.setup_led != e1000e_setup_led_generic) {
+ return -E1000_ERR_CONFIG;
+ }
+
+ if (hw->phy.media_type == e1000_media_type_fiber) {
+ ledctl = er32(LEDCTL);
+ hw->mac.ledctl_default = ledctl;
+ /* Turn off LED0 */
+ ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
+ E1000_LEDCTL_LED0_BLINK |
+ E1000_LEDCTL_LED0_MODE_MASK);
+ ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
+ E1000_LEDCTL_LED0_MODE_SHIFT);
+ ew32(LEDCTL, ledctl);
+ } else if (hw->phy.media_type == e1000_media_type_copper) {
+ ew32(LEDCTL, hw->mac.ledctl_mode1);
+ }
+
+ return 0;
+}
+
+/**
* e1000e_cleanup_led_generic - Set LED config to default operation
* @hw: pointer to the HW structure
*
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index ca82f19a7ed..677f60490f6 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -48,7 +48,7 @@
#include "e1000.h"
-#define DRV_VERSION "0.3.3.4-k4"
+#define DRV_VERSION "1.0.2-k2"
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
@@ -62,6 +62,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
[board_ich8lan] = &e1000_ich8_info,
[board_ich9lan] = &e1000_ich9_info,
[board_ich10lan] = &e1000_ich10_info,
+ [board_pchlan] = &e1000_pch_info,
};
#ifdef DEBUG
@@ -2255,8 +2256,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
ew32(TARC(1), tarc);
}
- e1000e_config_collision_dist(hw);
-
/* Setup Transmit Descriptor Settings for eop descriptor */
adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
@@ -2269,6 +2268,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
ew32(TCTL, tctl);
+ e1000e_config_collision_dist(hw);
+
adapter->tx_queue_len = adapter->netdev->tx_queue_len;
}
@@ -2308,6 +2309,23 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
if (adapter->flags2 & FLAG2_CRC_STRIPPING)
rctl |= E1000_RCTL_SECRC;
+ /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */
+ if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) {
+ u16 phy_data;
+
+ e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
+ phy_data &= 0xfff8;
+ phy_data |= (1 << 2);
+ e1e_wphy(hw, PHY_REG(770, 26), phy_data);
+
+ e1e_rphy(hw, 22, &phy_data);
+ phy_data &= 0x0fff;
+ phy_data |= (1 << 14);
+ e1e_wphy(hw, 0x10, 0x2823);
+ e1e_wphy(hw, 0x11, 0x0003);
+ e1e_wphy(hw, 22, phy_data);
+ }
+
/* Setup buffer sizes */
rctl &= ~E1000_RCTL_SZ_4096;
rctl |= E1000_RCTL_BSEX;
@@ -2751,23 +2769,25 @@ void e1000e_reset(struct e1000_adapter *adapter)
/*
* flow control settings
*
- * The high water mark must be low enough to fit one full frame
+ * The high water mark must be low enough to fit two full frame
* (or the size used for early receive) above it in the Rx FIFO.
* Set it to the lower of:
* - 90% of the Rx FIFO size, and
* - the full Rx FIFO size minus the early receive size (for parts
* with ERT support assuming ERT set to E1000_ERT_2048), or
- * - the full Rx FIFO size minus one full frame
+ * - the full Rx FIFO size minus two full frames
*/
- if (adapter->flags & FLAG_HAS_ERT)
+ if ((adapter->flags & FLAG_HAS_ERT) &&
+ (adapter->netdev->mtu > ETH_DATA_LEN))
hwm = min(((pba << 10) * 9 / 10),
((pba << 10) - (E1000_ERT_2048 << 3)));
else
hwm = min(((pba << 10) * 9 / 10),
- ((pba << 10) - adapter->max_frame_size));
+ ((pba << 10) - (2 * adapter->max_frame_size)));
- fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
- fc->low_water = fc->high_water - 8;
+ fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
+ fc->low_water = (fc->high_water - (2 * adapter->max_frame_size));
+ fc->low_water &= E1000_FCRTL_RTL; /* 8-byte granularity */
if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
fc->pause_time = 0xFFFF;
@@ -2787,6 +2807,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
e1000_get_hw_control(adapter);
ew32(WUC, 0);
+ if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)
+ e1e_wphy(&adapter->hw, BM_WUC, 0);
if (mac->ops.init_hw(hw))
e_err("Hardware Error\n");
@@ -2799,7 +2821,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
e1000e_reset_adaptive(hw);
e1000_get_phy_info(hw);
- if (!(adapter->flags & FLAG_SMART_POWER_DOWN)) {
+ if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
+ !(adapter->flags & FLAG_SMART_POWER_DOWN)) {
u16 phy_data = 0;
/*
* speed up time to link by disabling smart power down, ignore
@@ -2826,6 +2849,8 @@ int e1000e_up(struct e1000_adapter *adapter)
e1000_configure_msix(adapter);
e1000_irq_enable(adapter);
+ netif_wake_queue(adapter->netdev);
+
/* fire a link change interrupt to start the watchdog */
ew32(ICS, E1000_ICS_LSC);
return 0;
@@ -2848,7 +2873,7 @@ void e1000e_down(struct e1000_adapter *adapter)
ew32(RCTL, rctl & ~E1000_RCTL_EN);
/* flush and sleep below */
- netif_tx_stop_all_queues(netdev);
+ netif_stop_queue(netdev);
/* disable transmits in the hardware */
tctl = er32(TCTL);
@@ -3072,6 +3097,8 @@ static int e1000_open(struct net_device *netdev)
if (test_bit(__E1000_TESTING, &adapter->state))
return -EBUSY;
+ netif_carrier_off(netdev);
+
/* allocate transmit descriptors */
err = e1000e_setup_tx_resources(adapter);
if (err)
@@ -3128,7 +3155,7 @@ static int e1000_open(struct net_device *netdev)
e1000_irq_enable(adapter);
- netif_tx_start_all_queues(netdev);
+ netif_start_queue(netdev);
/* fire a link status change interrupt to start the watchdog */
ew32(ICS, E1000_ICS_LSC);
@@ -3262,6 +3289,7 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
+ u16 phy_data;
/*
* Prevent stats update while adapter is being reset, or if the pci
@@ -3281,11 +3309,34 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
adapter->stats.roc += er32(ROC);
adapter->stats.mpc += er32(MPC);
- adapter->stats.scc += er32(SCC);
- adapter->stats.ecol += er32(ECOL);
- adapter->stats.mcc += er32(MCC);
- adapter->stats.latecol += er32(LATECOL);
- adapter->stats.dc += er32(DC);
+ if ((hw->phy.type == e1000_phy_82578) ||
+ (hw->phy.type == e1000_phy_82577)) {
+ e1e_rphy(hw, HV_SCC_UPPER, &phy_data);
+ e1e_rphy(hw, HV_SCC_LOWER, &phy_data);
+ adapter->stats.scc += phy_data;
+
+ e1e_rphy(hw, HV_ECOL_UPPER, &phy_data);
+ e1e_rphy(hw, HV_ECOL_LOWER, &phy_data);
+ adapter->stats.ecol += phy_data;
+
+ e1e_rphy(hw, HV_MCC_UPPER, &phy_data);
+ e1e_rphy(hw, HV_MCC_LOWER, &phy_data);
+ adapter->stats.mcc += phy_data;
+
+ e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data);
+ e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data);
+ adapter->stats.latecol += phy_data;
+
+ e1e_rphy(hw, HV_DC_UPPER, &phy_data);
+ e1e_rphy(hw, HV_DC_LOWER, &phy_data);
+ adapter->stats.dc += phy_data;
+ } else {
+ adapter->stats.scc += er32(SCC);
+ adapter->stats.ecol += er32(ECOL);
+ adapter->stats.mcc += er32(MCC);
+ adapter->stats.latecol += er32(LATECOL);
+ adapter->stats.dc += er32(DC);
+ }
adapter->stats.xonrxc += er32(XONRXC);
adapter->stats.xontxc += er32(XONTXC);
adapter->stats.xoffrxc += er32(XOFFRXC);
@@ -3303,13 +3354,28 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
hw->mac.tx_packet_delta = er32(TPT);
adapter->stats.tpt += hw->mac.tx_packet_delta;
- hw->mac.collision_delta = er32(COLC);
+ if ((hw->phy.type == e1000_phy_82578) ||
+ (hw->phy.type == e1000_phy_82577)) {
+ e1e_rphy(hw, HV_COLC_UPPER, &phy_data);
+ e1e_rphy(hw, HV_COLC_LOWER, &phy_data);
+ hw->mac.collision_delta = phy_data;
+ } else {
+ hw->mac.collision_delta = er32(COLC);
+ }
adapter->stats.colc += hw->mac.collision_delta;
adapter->stats.algnerrc += er32(ALGNERRC);
adapter->stats.rxerrc += er32(RXERRC);
- if ((hw->mac.type != e1000_82574) && (hw->mac.type != e1000_82583))
- adapter->stats.tncrs += er32(TNCRS);
+ if ((hw->phy.type == e1000_phy_82578) ||
+ (hw->phy.type == e1000_phy_82577)) {
+ e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data);
+ e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data);
+ adapter->stats.tncrs += phy_data;
+ } else {
+ if ((hw->mac.type != e1000_82574) &&
+ (hw->mac.type != e1000_82583))
+ adapter->stats.tncrs += er32(TNCRS);
+ }
adapter->stats.cexterr += er32(CEXTERR);
adapter->stats.tsctc += er32(TSCTC);
adapter->stats.tsctfc += er32(TSCTFC);
@@ -3598,7 +3664,6 @@ static void e1000_watchdog_task(struct work_struct *work)
phy->ops.cfg_on_link_up(hw);
netif_carrier_on(netdev);
- netif_tx_wake_all_queues(netdev);
if (!test_bit(__E1000_DOWN, &adapter->state))
mod_timer(&adapter->phy_info_timer,
@@ -3612,7 +3677,6 @@ static void e1000_watchdog_task(struct work_struct *work)
printk(KERN_INFO "e1000e: %s NIC Link is Down\n",
adapter->netdev->name);
netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
if (!test_bit(__E1000_DOWN, &adapter->state))
mod_timer(&adapter->phy_info_timer,
round_jiffies(jiffies + 2 * HZ));
@@ -3649,6 +3713,8 @@ link_up:
*/
adapter->tx_timeout_count++;
schedule_work(&adapter->reset_task);
+ /* return immediately since reset is imminent */
+ return;
}
}
@@ -3850,7 +3916,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
buffer_info->length = size;
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
- buffer_info->dma = map[0] + offset;
+ buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
count++;
len -= size;
@@ -3881,7 +3947,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
buffer_info->length = size;
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
- buffer_info->dma = map[f + 1] + offset;
+ buffer_info->dma = map[f] + offset;
len -= size;
offset += size;
@@ -4145,7 +4211,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss);
if (count) {
e1000_tx_queue(adapter, tx_flags, count);
- netdev->trans_start = jiffies;
/* Make sure there is space in the ring for the next send. */
e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
@@ -4206,27 +4271,17 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
struct e1000_adapter *adapter = netdev_priv(netdev);
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
- if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
- (max_frame > MAX_JUMBO_FRAME_SIZE)) {
- e_err("Invalid MTU setting\n");
+ /* Jumbo frame support */
+ if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
+ !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
+ e_err("Jumbo Frames not supported.\n");
return -EINVAL;
}
- /* Jumbo frame size limits */
- if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
- if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
- e_err("Jumbo Frames not supported.\n");
- return -EINVAL;
- }
- if (adapter->hw.phy.type == e1000_phy_ife) {
- e_err("Jumbo Frames not supported.\n");
- return -EINVAL;
- }
- }
-
-#define MAX_STD_JUMBO_FRAME_SIZE 9234
- if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
- e_err("MTU > 9216 not supported.\n");
+ /* Supported frame sizes */
+ if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
+ (max_frame > adapter->max_hw_frame_size)) {
+ e_err("Unsupported MTU setting\n");
return -EINVAL;
}
@@ -4346,6 +4401,81 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
}
}
+static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 i, mac_reg;
+ u16 phy_reg;
+ int retval = 0;
+
+ /* copy MAC RARs to PHY RARs */
+ for (i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
+ mac_reg = er32(RAL(i));
+ e1e_wphy(hw, BM_RAR_L(i), (u16)(mac_reg & 0xFFFF));
+ e1e_wphy(hw, BM_RAR_M(i), (u16)((mac_reg >> 16) & 0xFFFF));
+ mac_reg = er32(RAH(i));
+ e1e_wphy(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF));
+ e1e_wphy(hw, BM_RAR_CTRL(i), (u16)((mac_reg >> 16) & 0xFFFF));
+ }
+
+ /* copy MAC MTA to PHY MTA */
+ for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
+ mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
+ e1e_wphy(hw, BM_MTA(i), (u16)(mac_reg & 0xFFFF));
+ e1e_wphy(hw, BM_MTA(i) + 1, (u16)((mac_reg >> 16) & 0xFFFF));
+ }
+
+ /* configure PHY Rx Control register */
+ e1e_rphy(&adapter->hw, BM_RCTL, &phy_reg);
+ mac_reg = er32(RCTL);
+ if (mac_reg & E1000_RCTL_UPE)
+ phy_reg |= BM_RCTL_UPE;
+ if (mac_reg & E1000_RCTL_MPE)
+ phy_reg |= BM_RCTL_MPE;
+ phy_reg &= ~(BM_RCTL_MO_MASK);
+ if (mac_reg & E1000_RCTL_MO_3)
+ phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
+ << BM_RCTL_MO_SHIFT);
+ if (mac_reg & E1000_RCTL_BAM)
+ phy_reg |= BM_RCTL_BAM;
+ if (mac_reg & E1000_RCTL_PMCF)
+ phy_reg |= BM_RCTL_PMCF;
+ mac_reg = er32(CTRL);
+ if (mac_reg & E1000_CTRL_RFCE)
+ phy_reg |= BM_RCTL_RFCE;
+ e1e_wphy(&adapter->hw, BM_RCTL, phy_reg);
+
+ /* enable PHY wakeup in MAC register */
+ ew32(WUFC, wufc);
+ ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
+
+ /* configure and enable PHY wakeup in PHY registers */
+ e1e_wphy(&adapter->hw, BM_WUFC, wufc);
+ e1e_wphy(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
+
+ /* activate PHY wakeup */
+ retval = hw->phy.ops.acquire_phy(hw);
+ if (retval) {
+ e_err("Could not acquire PHY\n");
+ return retval;
+ }
+ e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
+ (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
+ retval = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &phy_reg);
+ if (retval) {
+ e_err("Could not read PHY page 769\n");
+ goto out;
+ }
+ phy_reg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
+ retval = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
+ if (retval)
+ e_err("Could not set PHY Host Wakeup bit\n");
+out:
+ hw->phy.ops.release_phy(hw);
+
+ return retval;
+}
+
static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -4388,8 +4518,9 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
#define E1000_CTRL_ADVD3WUC 0x00100000
/* phy power management enable */
#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
- ctrl |= E1000_CTRL_ADVD3WUC |
- E1000_CTRL_EN_PHY_PWR_MGMT;
+ ctrl |= E1000_CTRL_ADVD3WUC;
+ if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
+ ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
ew32(CTRL, ctrl);
if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
@@ -4407,8 +4538,17 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
/* Allow time for pending master requests to run */
e1000e_disable_pcie_master(&adapter->hw);
- ew32(WUC, E1000_WUC_PME_EN);
- ew32(WUFC, wufc);
+ if ((adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) &&
+ !(hw->mac.ops.check_mng_mode(hw))) {
+ /* enable wakeup by the PHY */
+ retval = e1000_init_phy_wakeup(adapter, wufc);
+ if (retval)
+ return retval;
+ } else {
+ /* enable wakeup by the MAC */
+ ew32(WUFC, wufc);
+ ew32(WUC, E1000_WUC_PME_EN);
+ }
} else {
ew32(WUC, 0);
ew32(WUFC, 0);
@@ -4551,8 +4691,37 @@ static int e1000_resume(struct pci_dev *pdev)
}
e1000e_power_up_phy(adapter);
+
+ /* report the system wakeup cause from S3/S4 */
+ if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
+ u16 phy_data;
+
+ e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
+ if (phy_data) {
+ e_info("PHY Wakeup cause - %s\n",
+ phy_data & E1000_WUS_EX ? "Unicast Packet" :
+ phy_data & E1000_WUS_MC ? "Multicast Packet" :
+ phy_data & E1000_WUS_BC ? "Broadcast Packet" :
+ phy_data & E1000_WUS_MAG ? "Magic Packet" :
+ phy_data & E1000_WUS_LNKC ? "Link Status "
+ " Change" : "other");
+ }
+ e1e_wphy(&adapter->hw, BM_WUS, ~0);
+ } else {
+ u32 wus = er32(WUS);
+ if (wus) {
+ e_info("MAC Wakeup cause - %s\n",
+ wus & E1000_WUS_EX ? "Unicast Packet" :
+ wus & E1000_WUS_MC ? "Multicast Packet" :
+ wus & E1000_WUS_BC ? "Broadcast Packet" :
+ wus & E1000_WUS_MAG ? "Magic Packet" :
+ wus & E1000_WUS_LNKC ? "Link Status Change" :
+ "other");
+ }
+ ew32(WUS, ~0);
+ }
+
e1000e_reset(adapter);
- ew32(WUS, ~0);
e1000_init_manageability(adapter);
@@ -4842,6 +5011,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
adapter->flags2 = ei->flags2;
adapter->hw.adapter = adapter;
adapter->hw.mac.type = ei->mac;
+ adapter->max_hw_frame_size = ei->max_hw_frame_size;
adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
mmio_start = pci_resource_start(pdev, 0);
@@ -4997,6 +5167,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
/* APME bit in EEPROM is mapped to WUC.APME */
eeprom_data = er32(WUC);
eeprom_apme_mask = E1000_WUC_APME;
+ if (eeprom_data & E1000_WUC_PHY_WAKE)
+ adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
} else if (adapter->flags & FLAG_APME_IN_CTRL3) {
if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
(adapter->hw.bus.func == 1))
@@ -5037,15 +5209,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (!(adapter->flags & FLAG_HAS_AMT))
e1000_get_hw_control(adapter);
- /* tell the stack to leave us alone until e1000_open() is called */
- netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
-
strcpy(netdev->name, "eth%d");
err = register_netdev(netdev);
if (err)
goto err_register;
+ /* carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(netdev);
+
e1000_print_device_info(adapter);
return 0;
@@ -5199,6 +5370,11 @@ static struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
+
{ } /* terminate list */
};
MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index e909f96698e..1342e0b1815 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -427,6 +427,8 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
e1000_validate_option(&crc_stripping, &opt, adapter);
if (crc_stripping == OPTION_ENABLED)
adapter->flags2 |= FLAG2_CRC_STRIPPING;
+ } else {
+ adapter->flags2 |= FLAG2_CRC_STRIPPING;
}
}
{ /* Kumeran Lock Loss Workaround */
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index dc4a9cba6a7..e23459cf3d0 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -37,6 +37,9 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw);
static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg);
static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
u16 *data, bool read);
+static u32 e1000_get_phy_addr_for_hv_page(u32 page);
+static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
+ u16 *data, bool read);
/* Cable length tables */
static const u16 e1000_m88_cable_length_table[] =
@@ -54,6 +57,55 @@ static const u16 e1000_igp_2_cable_length_table[] =
#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
ARRAY_SIZE(e1000_igp_2_cable_length_table)
+#define BM_PHY_REG_PAGE(offset) \
+ ((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF))
+#define BM_PHY_REG_NUM(offset) \
+ ((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\
+ (((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\
+ ~MAX_PHY_REG_ADDRESS)))
+
+#define HV_INTC_FC_PAGE_START 768
+#define I82578_ADDR_REG 29
+#define I82577_ADDR_REG 16
+#define I82577_CFG_REG 22
+#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15)
+#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */
+#define I82577_CTRL_REG 23
+#define I82577_CTRL_DOWNSHIFT_MASK (7 << 10)
+
+/* 82577 specific PHY registers */
+#define I82577_PHY_CTRL_2 18
+#define I82577_PHY_STATUS_2 26
+#define I82577_PHY_DIAG_STATUS 31
+
+/* I82577 PHY Status 2 */
+#define I82577_PHY_STATUS2_REV_POLARITY 0x0400
+#define I82577_PHY_STATUS2_MDIX 0x0800
+#define I82577_PHY_STATUS2_SPEED_MASK 0x0300
+#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
+
+/* I82577 PHY Control 2 */
+#define I82577_PHY_CTRL2_AUTO_MDIX 0x0400
+#define I82577_PHY_CTRL2_FORCE_MDI_MDIX 0x0200
+
+/* I82577 PHY Diagnostics Status */
+#define I82577_DSTATUS_CABLE_LENGTH 0x03FC
+#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2
+
+/* BM PHY Copper Specific Control 1 */
+#define BM_CS_CTRL1 16
+
+/* BM PHY Copper Specific Status */
+#define BM_CS_STATUS 17
+#define BM_CS_STATUS_LINK_UP 0x0400
+#define BM_CS_STATUS_RESOLVED 0x0800
+#define BM_CS_STATUS_SPEED_MASK 0xC000
+#define BM_CS_STATUS_SPEED_1000 0x8000
+
+#define HV_MUX_DATA_CTRL PHY_REG(776, 16)
+#define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400
+#define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004
+
/**
* e1000e_check_reset_block_generic - Check if PHY reset is blocked
* @hw: pointer to the HW structure
@@ -82,23 +134,48 @@ s32 e1000e_check_reset_block_generic(struct e1000_hw *hw)
s32 e1000e_get_phy_id(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val;
+ s32 ret_val = 0;
u16 phy_id;
+ u16 retry_count = 0;
- ret_val = e1e_rphy(hw, PHY_ID1, &phy_id);
- if (ret_val)
- return ret_val;
+ if (!(phy->ops.read_phy_reg))
+ goto out;
- phy->id = (u32)(phy_id << 16);
- udelay(20);
- ret_val = e1e_rphy(hw, PHY_ID2, &phy_id);
- if (ret_val)
- return ret_val;
+ while (retry_count < 2) {
+ ret_val = e1e_rphy(hw, PHY_ID1, &phy_id);
+ if (ret_val)
+ goto out;
- phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
- phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
+ phy->id = (u32)(phy_id << 16);
+ udelay(20);
+ ret_val = e1e_rphy(hw, PHY_ID2, &phy_id);
+ if (ret_val)
+ goto out;
- return 0;
+ phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
+ phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
+
+ if (phy->id != 0 && phy->id != PHY_REVISION_MASK)
+ goto out;
+
+ /*
+ * If the PHY ID is still unknown, we may have an 82577i
+ * without link. We will try again after setting Slow
+ * MDIC mode. No harm in trying again in this case since
+ * the PHY ID is unknown at this point anyway
+ */
+ ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
+ if (ret_val)
+ goto out;
+
+ retry_count++;
+ }
+out:
+ /* Revert to MDIO fast mode, if applicable */
+ if (retry_count)
+ ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
+
+ return ret_val;
}
/**
@@ -410,6 +487,43 @@ s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
}
/**
+ * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Sets up Carrier-sense on Transmit and downshift values.
+ **/
+s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+
+ /* Enable CRS on TX. This must be set for half-duplex operation. */
+ ret_val = phy->ops.read_phy_reg(hw, I82577_CFG_REG, &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data |= I82577_CFG_ASSERT_CRS_ON_TX;
+
+ /* Enable downshift */
+ phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
+
+ ret_val = phy->ops.write_phy_reg(hw, I82577_CFG_REG, phy_data);
+ if (ret_val)
+ goto out;
+
+ /* Set number of link attempts before downshift */
+ ret_val = phy->ops.read_phy_reg(hw, I82577_CTRL_REG, &phy_data);
+ if (ret_val)
+ goto out;
+ phy_data &= ~I82577_CTRL_DOWNSHIFT_MASK;
+ ret_val = phy->ops.write_phy_reg(hw, I82577_CTRL_REG, phy_data);
+
+out:
+ return ret_val;
+}
+
+/**
* e1000e_copper_link_setup_m88 - Setup m88 PHY's for copper link
* @hw: pointer to the HW structure
*
@@ -427,8 +541,8 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /* For newer PHYs this bit is downshift enable */
- if (phy->type == e1000_phy_m88)
+ /* For BM PHY this bit is downshift enable */
+ if (phy->type != e1000_phy_bm)
phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
/*
@@ -520,10 +634,27 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
/* Commit the changes. */
ret_val = e1000e_commit_phy(hw);
- if (ret_val)
+ if (ret_val) {
hw_dbg(hw, "Error committing the PHY changes\n");
+ return ret_val;
+ }
- return ret_val;
+ if (phy->type == e1000_phy_82578) {
+ ret_val = phy->ops.read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* 82578 PHY - set the downshift count to 1x. */
+ phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE;
+ phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK;
+ ret_val = phy->ops.write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return 0;
}
/**
@@ -1251,6 +1382,8 @@ s32 e1000e_check_downshift(struct e1000_hw *hw)
switch (phy->type) {
case e1000_phy_m88:
case e1000_phy_gg82563:
+ case e1000_phy_82578:
+ case e1000_phy_82577:
offset = M88E1000_PHY_SPEC_STATUS;
mask = M88E1000_PSSR_DOWNSHIFT;
break;
@@ -1886,6 +2019,12 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id)
case BME1000_E_PHY_ID_R2:
phy_type = e1000_phy_bm;
break;
+ case I82578_E_PHY_ID:
+ phy_type = e1000_phy_82578;
+ break;
+ case I82577_E_PHY_ID:
+ phy_type = e1000_phy_82577;
+ break;
default:
phy_type = e1000_phy_unknown;
break;
@@ -2181,11 +2320,16 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
u16 *data, bool read)
{
s32 ret_val;
- u16 reg = ((u16)offset) & PHY_REG_MASK;
+ u16 reg = BM_PHY_REG_NUM(offset);
u16 phy_reg = 0;
u8 phy_acquired = 1;
+ /* Gig must be disabled for MDIO accesses to page 800 */
+ if ((hw->mac.type == e1000_pchlan) &&
+ (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE)))
+ hw_dbg(hw, "Attempting to access page 800 while gig enabled\n");
+
ret_val = hw->phy.ops.acquire_phy(hw);
if (ret_val) {
phy_acquired = 0;
@@ -2289,3 +2433,524 @@ static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
return 0;
}
+
+s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow)
+{
+ s32 ret_val = 0;
+ u16 data = 0;
+
+ ret_val = hw->phy.ops.acquire_phy(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Set MDIO mode - page 769, register 16: 0x2580==slow, 0x2180==fast */
+ hw->phy.addr = 1;
+ ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
+ (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
+ if (ret_val) {
+ hw->phy.ops.release_phy(hw);
+ return ret_val;
+ }
+ ret_val = e1000e_write_phy_reg_mdic(hw, BM_CS_CTRL1,
+ (0x2180 | (slow << 10)));
+
+ /* dummy read when reverting to fast mode - throw away result */
+ if (!slow)
+ e1000e_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data);
+
+ hw->phy.ops.release_phy(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_phy_reg_hv - Read HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore, if necessary, then reads the PHY register at offset
+ * and storing the retrieved information in data. Release any acquired
+ * semaphore before exiting.
+ **/
+s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ s32 ret_val;
+ u16 page = BM_PHY_REG_PAGE(offset);
+ u16 reg = BM_PHY_REG_NUM(offset);
+ bool in_slow_mode = false;
+
+ /* Workaround failure in MDIO access while cable is disconnected */
+ if ((hw->phy.type == e1000_phy_82577) &&
+ !(er32(STATUS) & E1000_STATUS_LU)) {
+ ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
+ if (ret_val)
+ goto out;
+
+ in_slow_mode = true;
+ }
+
+ /* Page 800 works differently than the rest so it has its own func */
+ if (page == BM_WUC_PAGE) {
+ ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset,
+ data, true);
+ goto out;
+ }
+
+ if (page > 0 && page < HV_INTC_FC_PAGE_START) {
+ ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
+ data, true);
+ goto out;
+ }
+
+ ret_val = hw->phy.ops.acquire_phy(hw);
+ if (ret_val)
+ goto out;
+
+ hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
+
+ if (page == HV_INTC_FC_PAGE_START)
+ page = 0;
+
+ if (reg > MAX_PHY_MULTI_PAGE_REG) {
+ if ((hw->phy.type != e1000_phy_82578) ||
+ ((reg != I82578_ADDR_REG) &&
+ (reg != I82578_ADDR_REG + 1))) {
+ u32 phy_addr = hw->phy.addr;
+
+ hw->phy.addr = 1;
+
+ /* Page is shifted left, PHY expects (page x 32) */
+ ret_val = e1000e_write_phy_reg_mdic(hw,
+ IGP01E1000_PHY_PAGE_SELECT,
+ (page << IGP_PAGE_SHIFT));
+ if (ret_val) {
+ hw->phy.ops.release_phy(hw);
+ goto out;
+ }
+ hw->phy.addr = phy_addr;
+ }
+ }
+
+ ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
+ data);
+ hw->phy.ops.release_phy(hw);
+
+out:
+ /* Revert to MDIO fast mode, if applicable */
+ if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
+ ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_phy_reg_hv - Write HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ s32 ret_val;
+ u16 page = BM_PHY_REG_PAGE(offset);
+ u16 reg = BM_PHY_REG_NUM(offset);
+ bool in_slow_mode = false;
+
+ /* Workaround failure in MDIO access while cable is disconnected */
+ if ((hw->phy.type == e1000_phy_82577) &&
+ !(er32(STATUS) & E1000_STATUS_LU)) {
+ ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
+ if (ret_val)
+ goto out;
+
+ in_slow_mode = true;
+ }
+
+ /* Page 800 works differently than the rest so it has its own func */
+ if (page == BM_WUC_PAGE) {
+ ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset,
+ &data, false);
+ goto out;
+ }
+
+ if (page > 0 && page < HV_INTC_FC_PAGE_START) {
+ ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
+ &data, false);
+ goto out;
+ }
+
+ ret_val = hw->phy.ops.acquire_phy(hw);
+ if (ret_val)
+ goto out;
+
+ hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
+
+ if (page == HV_INTC_FC_PAGE_START)
+ page = 0;
+
+ /*
+ * Workaround MDIO accesses being disabled after entering IEEE Power
+ * Down (whenever bit 11 of the PHY Control register is set)
+ */
+ if ((hw->phy.type == e1000_phy_82578) &&
+ (hw->phy.revision >= 1) &&
+ (hw->phy.addr == 2) &&
+ ((MAX_PHY_REG_ADDRESS & reg) == 0) &&
+ (data & (1 << 11))) {
+ u16 data2 = 0x7EFF;
+ hw->phy.ops.release_phy(hw);
+ ret_val = e1000_access_phy_debug_regs_hv(hw, (1 << 6) | 0x3,
+ &data2, false);
+ if (ret_val)
+ goto out;
+
+ ret_val = hw->phy.ops.acquire_phy(hw);
+ if (ret_val)
+ goto out;
+ }
+
+ if (reg > MAX_PHY_MULTI_PAGE_REG) {
+ if ((hw->phy.type != e1000_phy_82578) ||
+ ((reg != I82578_ADDR_REG) &&
+ (reg != I82578_ADDR_REG + 1))) {
+ u32 phy_addr = hw->phy.addr;
+
+ hw->phy.addr = 1;
+
+ /* Page is shifted left, PHY expects (page x 32) */
+ ret_val = e1000e_write_phy_reg_mdic(hw,
+ IGP01E1000_PHY_PAGE_SELECT,
+ (page << IGP_PAGE_SHIFT));
+ if (ret_val) {
+ hw->phy.ops.release_phy(hw);
+ goto out;
+ }
+ hw->phy.addr = phy_addr;
+ }
+ }
+
+ ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
+ data);
+ hw->phy.ops.release_phy(hw);
+
+out:
+ /* Revert to MDIO fast mode, if applicable */
+ if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
+ ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_phy_addr_for_hv_page - Get PHY adrress based on page
+ * @page: page to be accessed
+ **/
+static u32 e1000_get_phy_addr_for_hv_page(u32 page)
+{
+ u32 phy_addr = 2;
+
+ if (page >= HV_INTC_FC_PAGE_START)
+ phy_addr = 1;
+
+ return phy_addr;
+}
+
+/**
+ * e1000_access_phy_debug_regs_hv - Read HV PHY vendor specific high registers
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read or written
+ * @data: pointer to the data to be read or written
+ * @read: determines if operation is read or written
+ *
+ * Acquires semaphore, if necessary, then reads the PHY register at offset
+ * and storing the retreived information in data. Release any acquired
+ * semaphores before exiting. Note that the procedure to read these regs
+ * uses the address port and data port to read/write.
+ **/
+static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
+ u16 *data, bool read)
+{
+ s32 ret_val;
+ u32 addr_reg = 0;
+ u32 data_reg = 0;
+ u8 phy_acquired = 1;
+
+ /* This takes care of the difference with desktop vs mobile phy */
+ addr_reg = (hw->phy.type == e1000_phy_82578) ?
+ I82578_ADDR_REG : I82577_ADDR_REG;
+ data_reg = addr_reg + 1;
+
+ ret_val = hw->phy.ops.acquire_phy(hw);
+ if (ret_val) {
+ hw_dbg(hw, "Could not acquire PHY\n");
+ phy_acquired = 0;
+ goto out;
+ }
+
+ /* All operations in this function are phy address 2 */
+ hw->phy.addr = 2;
+
+ /* masking with 0x3F to remove the page from offset */
+ ret_val = e1000e_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F);
+ if (ret_val) {
+ hw_dbg(hw, "Could not write PHY the HV address register\n");
+ goto out;
+ }
+
+ /* Read or write the data value next */
+ if (read)
+ ret_val = e1000e_read_phy_reg_mdic(hw, data_reg, data);
+ else
+ ret_val = e1000e_write_phy_reg_mdic(hw, data_reg, *data);
+
+ if (ret_val) {
+ hw_dbg(hw, "Could not read data value from HV data register\n");
+ goto out;
+ }
+
+out:
+ if (phy_acquired == 1)
+ hw->phy.ops.release_phy(hw);
+ return ret_val;
+}
+
+/**
+ * e1000_link_stall_workaround_hv - Si workaround
+ * @hw: pointer to the HW structure
+ *
+ * This function works around a Si bug where the link partner can get
+ * a link up indication before the PHY does. If small packets are sent
+ * by the link partner they can be placed in the packet buffer without
+ * being properly accounted for by the PHY and will stall preventing
+ * further packets from being received. The workaround is to clear the
+ * packet buffer after the PHY detects link up.
+ **/
+s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u16 data;
+
+ if (hw->phy.type != e1000_phy_82578)
+ goto out;
+
+ /* check if link is up and at 1Gbps */
+ ret_val = hw->phy.ops.read_phy_reg(hw, BM_CS_STATUS, &data);
+ if (ret_val)
+ goto out;
+
+ data &= BM_CS_STATUS_LINK_UP |
+ BM_CS_STATUS_RESOLVED |
+ BM_CS_STATUS_SPEED_MASK;
+
+ if (data != (BM_CS_STATUS_LINK_UP |
+ BM_CS_STATUS_RESOLVED |
+ BM_CS_STATUS_SPEED_1000))
+ goto out;
+
+ mdelay(200);
+
+ /* flush the packets in the fifo buffer */
+ ret_val = hw->phy.ops.write_phy_reg(hw, HV_MUX_DATA_CTRL,
+ HV_MUX_DATA_CTRL_GEN_TO_MAC |
+ HV_MUX_DATA_CTRL_FORCE_SPEED);
+ if (ret_val)
+ goto out;
+
+ ret_val = hw->phy.ops.write_phy_reg(hw, HV_MUX_DATA_CTRL,
+ HV_MUX_DATA_CTRL_GEN_TO_MAC);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_check_polarity_82577 - Checks the polarity.
+ * @hw: pointer to the HW structure
+ *
+ * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ * Polarity is determined based on the PHY specific status register.
+ **/
+s32 e1000_check_polarity_82577(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+ ret_val = phy->ops.read_phy_reg(hw, I82577_PHY_STATUS_2, &data);
+
+ if (!ret_val)
+ phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal;
+
+ return ret_val;
+}
+
+/**
+ * e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Calls the PHY setup function to force speed and duplex. Clears the
+ * auto-crossover to force MDI manually. Waits for link and returns
+ * successful if link up is successful, else -E1000_ERR_PHY (-2).
+ **/
+s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
+ ret_val = phy->ops.read_phy_reg(hw, PHY_CONTROL, &phy_data);
+ if (ret_val)
+ goto out;
+
+ e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
+
+ ret_val = phy->ops.write_phy_reg(hw, PHY_CONTROL, phy_data);
+ if (ret_val)
+ goto out;
+
+ /*
+ * Clear Auto-Crossover to force MDI manually. 82577 requires MDI
+ * forced whenever speed and duplex are forced.
+ */
+ ret_val = phy->ops.read_phy_reg(hw, I82577_PHY_CTRL_2, &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data &= ~I82577_PHY_CTRL2_AUTO_MDIX;
+ phy_data &= ~I82577_PHY_CTRL2_FORCE_MDI_MDIX;
+
+ ret_val = phy->ops.write_phy_reg(hw, I82577_PHY_CTRL_2, phy_data);
+ if (ret_val)
+ goto out;
+
+ hw_dbg(hw, "I82577_PHY_CTRL_2: %X\n", phy_data);
+
+ udelay(1);
+
+ if (phy->autoneg_wait_to_complete) {
+ hw_dbg(hw, "Waiting for forced speed/duplex link on 82577 phy\n");
+
+ ret_val = e1000e_phy_has_link_generic(hw,
+ PHY_FORCE_LIMIT,
+ 100000,
+ &link);
+ if (ret_val)
+ goto out;
+
+ if (!link)
+ hw_dbg(hw, "Link taking longer than expected.\n");
+
+ /* Try once more */
+ ret_val = e1000e_phy_has_link_generic(hw,
+ PHY_FORCE_LIMIT,
+ 100000,
+ &link);
+ if (ret_val)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_get_phy_info_82577 - Retrieve I82577 PHY information
+ * @hw: pointer to the HW structure
+ *
+ * Read PHY status to determine if link is up. If link is up, then
+ * set/determine 10base-T extended distance and polarity correction. Read
+ * PHY port status to determine MDI/MDIx and speed. Based on the speed,
+ * determine on the cable length, local and remote receiver.
+ **/
+s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ bool link;
+
+ ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ goto out;
+
+ if (!link) {
+ hw_dbg(hw, "Phy info is only valid if link is up\n");
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+ phy->polarity_correction = true;
+
+ ret_val = e1000_check_polarity_82577(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.read_phy_reg(hw, I82577_PHY_STATUS_2, &data);
+ if (ret_val)
+ goto out;
+
+ phy->is_mdix = (data & I82577_PHY_STATUS2_MDIX) ? true : false;
+
+ if ((data & I82577_PHY_STATUS2_SPEED_MASK) ==
+ I82577_PHY_STATUS2_SPEED_1000MBPS) {
+ ret_val = hw->phy.ops.get_cable_length(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.read_phy_reg(hw, PHY_1000T_STATUS, &data);
+ if (ret_val)
+ goto out;
+
+ phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
+
+ phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
+ } else {
+ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+ phy->local_rx = e1000_1000t_rx_status_undefined;
+ phy->remote_rx = e1000_1000t_rx_status_undefined;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_get_cable_length_82577 - Determine cable length for 82577 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Reads the diagnostic status register and verifies result is valid before
+ * placing it in the phy_cable_length field.
+ **/
+s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, length;
+
+ ret_val = phy->ops.read_phy_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data);
+ if (ret_val)
+ goto out;
+
+ length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
+ I82577_DSTATUS_CABLE_LENGTH_SHIFT;
+
+ if (length == E1000_CABLE_LENGTH_UNDEFINED)
+ ret_val = E1000_ERR_PHY;
+
+ phy->cable_length = length;
+
+out:
+ return ret_val;
+}
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 16a41389575..78952f8324e 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -268,7 +268,7 @@ struct ehea_qp_init_attr {
};
/*
- * Event Queue attributes, passed as paramter
+ * Event Queue attributes, passed as parameter
*/
struct ehea_eq_attr {
u32 type;
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index b22dab9153f..147c4b088fb 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -3261,7 +3261,7 @@ static ssize_t ehea_probe_port(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct ehea_adapter *adapter = dev->driver_data;
+ struct ehea_adapter *adapter = dev_get_drvdata(dev);
struct ehea_port *port;
struct device_node *eth_dn = NULL;
int i;
@@ -3316,7 +3316,7 @@ static ssize_t ehea_remove_port(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct ehea_adapter *adapter = dev->driver_data;
+ struct ehea_adapter *adapter = dev_get_drvdata(dev);
struct ehea_port *port;
int i;
u32 logical_port_id;
@@ -3404,7 +3404,7 @@ static int __devinit ehea_probe_adapter(struct of_device *dev,
adapter->pd = EHEA_PD_ID;
- dev->dev.driver_data = adapter;
+ dev_set_drvdata(&dev->dev, adapter);
/* initialize adapter and ports */
@@ -3468,7 +3468,7 @@ out:
static int __devexit ehea_remove(struct of_device *dev)
{
- struct ehea_adapter *adapter = dev->dev.driver_data;
+ struct ehea_adapter *adapter = dev_get_drvdata(&dev->dev);
int i;
for (i = 0; i < EHEA_MAX_PORTS; i++)
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index 9080f07da8f..8005b602f77 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -661,8 +661,6 @@ static int enic_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev)
if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + 1)
netif_stop_queue(netdev);
- netdev->trans_start = jiffies;
-
spin_unlock_irqrestore(&enic->wq_lock[0], flags);
return NETDEV_TX_OK;
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index 5210bb1027c..19b7dd98394 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -194,6 +194,7 @@ static void __init eql_setup(struct net_device *dev)
dev->type = ARPHRD_SLIP;
dev->tx_queue_len = 5; /* Hands them off fast */
+ dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
}
static int eql_open(struct net_device *dev)
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index 91a9b1a3376..ceb6a9c357a 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -811,7 +811,7 @@ static int ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(skb->len > ETHOC_BUFSIZ)) {
priv->stats.tx_errors++;
- return -EMSGSIZE;
+ goto out;
}
entry = priv->cur_tx % priv->num_tx;
@@ -840,9 +840,9 @@ static int ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
dev->trans_start = jiffies;
- dev_kfree_skb(skb);
-
spin_unlock_irq(&priv->lock);
+out:
+ dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ewrk3.c b/drivers/net/ewrk3.c
index 1a685a04d4b..1e972328140 100644
--- a/drivers/net/ewrk3.c
+++ b/drivers/net/ewrk3.c
@@ -873,7 +873,7 @@ static int ewrk3_queue_pkt (struct sk_buff *skb, struct net_device *dev)
err_out:
ENABLE_IRQs;
spin_unlock_irq (&lp->hw_lock);
- return 1;
+ return NETDEV_TX_BUSY;
}
/*
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 682e7f0b558..0f19b743749 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -86,8 +86,7 @@ static unsigned char fec_mac_default[] = {
#endif
#endif /* CONFIG_M5272 */
-/* Forward declarations of some structures to support different PHYs
-*/
+/* Forward declarations of some structures to support different PHYs */
typedef struct {
uint mii_data;
@@ -123,8 +122,7 @@ typedef struct {
#error "FEC: descriptor ring size constants too large"
#endif
-/* Interrupt events/masks.
-*/
+/* Interrupt events/masks. */
#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */
#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */
@@ -165,7 +163,7 @@ typedef struct {
*/
struct fec_enet_private {
/* Hardware registers of the FEC device */
- volatile fec_t *hwp;
+ void __iomem *hwp;
struct net_device *netdev;
@@ -174,16 +172,20 @@ struct fec_enet_private {
/* The saved address of a sent-in-place packet/buffer, for skfree(). */
unsigned char *tx_bounce[TX_RING_SIZE];
struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
ushort skb_cur;
ushort skb_dirty;
- /* CPM dual port RAM relative addresses.
- */
+ /* CPM dual port RAM relative addresses */
dma_addr_t bd_dma;
- cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */
- cbd_t *tx_bd_base;
- cbd_t *cur_rx, *cur_tx; /* The next free ring entry */
- cbd_t *dirty_tx; /* The ring entries to be free()ed. */
+ /* Address of Rx and Tx buffers */
+ struct bufdesc *rx_bd_base;
+ struct bufdesc *tx_bd_base;
+ /* The next free ring entry */
+ struct bufdesc *cur_rx, *cur_tx;
+ /* The ring entries to be free()ed */
+ struct bufdesc *dirty_tx;
+
uint tx_full;
/* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
spinlock_t hw_lock;
@@ -209,17 +211,13 @@ struct fec_enet_private {
int full_duplex;
};
-static int fec_enet_open(struct net_device *dev);
-static int fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev);
static void fec_enet_mii(struct net_device *dev);
static irqreturn_t fec_enet_interrupt(int irq, void * dev_id);
static void fec_enet_tx(struct net_device *dev);
static void fec_enet_rx(struct net_device *dev);
static int fec_enet_close(struct net_device *dev);
-static void set_multicast_list(struct net_device *dev);
static void fec_restart(struct net_device *dev, int duplex);
static void fec_stop(struct net_device *dev);
-static void fec_set_mac_address(struct net_device *dev);
/* MII processing. We keep this as simple as possible. Requests are
@@ -241,19 +239,16 @@ static mii_list_t *mii_tail;
static int mii_queue(struct net_device *dev, int request,
void (*func)(uint, struct net_device *));
-/* Make MII read/write commands for the FEC.
-*/
+/* Make MII read/write commands for the FEC */
#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \
(VAL & 0xffff))
#define mk_mii_end 0
-/* Transmitter timeout.
-*/
-#define TX_TIMEOUT (2*HZ)
+/* Transmitter timeout */
+#define TX_TIMEOUT (2 * HZ)
-/* Register definitions for the PHY.
-*/
+/* Register definitions for the PHY */
#define MII_REG_CR 0 /* Control Register */
#define MII_REG_SR 1 /* Status Register */
@@ -288,18 +283,14 @@ static int mii_queue(struct net_device *dev, int request,
static int
fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct fec_enet_private *fep;
- volatile fec_t *fecp;
- volatile cbd_t *bdp;
+ struct fec_enet_private *fep = netdev_priv(dev);
+ struct bufdesc *bdp;
unsigned short status;
unsigned long flags;
- fep = netdev_priv(dev);
- fecp = (volatile fec_t*)dev->base_addr;
-
if (!fep->link) {
/* Link is down or autonegotiation is in progress. */
- return 1;
+ return NETDEV_TX_BUSY;
}
spin_lock_irqsave(&fep->hw_lock, flags);
@@ -307,30 +298,27 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
bdp = fep->cur_tx;
status = bdp->cbd_sc;
-#ifndef final_version
+
if (status & BD_ENET_TX_READY) {
/* Ooops. All transmit buffers are full. Bail out.
* This should not happen, since dev->tbusy should be set.
*/
printk("%s: tx queue full!.\n", dev->name);
spin_unlock_irqrestore(&fep->hw_lock, flags);
- return 1;
+ return NETDEV_TX_BUSY;
}
-#endif
- /* Clear all of the status flags.
- */
+ /* Clear all of the status flags */
status &= ~BD_ENET_TX_STATS;
- /* Set buffer length and buffer pointer.
- */
+ /* Set buffer length and buffer pointer */
bdp->cbd_bufaddr = __pa(skb->data);
bdp->cbd_datlen = skb->len;
/*
- * On some FEC implementations data must be aligned on
- * 4-byte boundaries. Use bounce buffers to copy data
- * and get it aligned. Ugh.
+ * On some FEC implementations data must be aligned on
+ * 4-byte boundaries. Use bounce buffers to copy data
+ * and get it aligned. Ugh.
*/
if (bdp->cbd_bufaddr & FEC_ALIGNMENT) {
unsigned int index;
@@ -339,8 +327,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
bdp->cbd_bufaddr = __pa(fep->tx_bounce[index]);
}
- /* Save skb pointer.
- */
+ /* Save skb pointer */
fep->tx_skbuff[fep->skb_cur] = skb;
dev->stats.tx_bytes += skb->len;
@@ -349,13 +336,12 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Push the data cache so the CPM does not get stale memory
* data.
*/
- dma_sync_single(NULL, bdp->cbd_bufaddr,
- bdp->cbd_datlen, DMA_TO_DEVICE);
+ bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data,
+ FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
/* Send it on its way. Tell FEC it's ready, interrupt when done,
* it's the last BD of the frame, and to put the CRC on the end.
*/
-
status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
| BD_ENET_TX_LAST | BD_ENET_TX_TC);
bdp->cbd_sc = status;
@@ -363,22 +349,20 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->trans_start = jiffies;
/* Trigger transmission start */
- fecp->fec_x_des_active = 0;
+ writel(0, fep->hwp + FEC_X_DES_ACTIVE);
- /* If this was the last BD in the ring, start at the beginning again.
- */
- if (status & BD_ENET_TX_WRAP) {
+ /* If this was the last BD in the ring, start at the beginning again. */
+ if (status & BD_ENET_TX_WRAP)
bdp = fep->tx_bd_base;
- } else {
+ else
bdp++;
- }
if (bdp == fep->dirty_tx) {
fep->tx_full = 1;
netif_stop_queue(dev);
}
- fep->cur_tx = (cbd_t *)bdp;
+ fep->cur_tx = bdp;
spin_unlock_irqrestore(&fep->hw_lock, flags);
@@ -390,75 +374,33 @@ fec_timeout(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
- printk("%s: transmit timed out.\n", dev->name);
dev->stats.tx_errors++;
-#ifndef final_version
- {
- int i;
- cbd_t *bdp;
-
- printk("Ring data dump: cur_tx %lx%s, dirty_tx %lx cur_rx: %lx\n",
- (unsigned long)fep->cur_tx, fep->tx_full ? " (full)" : "",
- (unsigned long)fep->dirty_tx,
- (unsigned long)fep->cur_rx);
-
- bdp = fep->tx_bd_base;
- printk(" tx: %u buffers\n", TX_RING_SIZE);
- for (i = 0 ; i < TX_RING_SIZE; i++) {
- printk(" %08x: %04x %04x %08x\n",
- (uint) bdp,
- bdp->cbd_sc,
- bdp->cbd_datlen,
- (int) bdp->cbd_bufaddr);
- bdp++;
- }
- bdp = fep->rx_bd_base;
- printk(" rx: %lu buffers\n", (unsigned long) RX_RING_SIZE);
- for (i = 0 ; i < RX_RING_SIZE; i++) {
- printk(" %08x: %04x %04x %08x\n",
- (uint) bdp,
- bdp->cbd_sc,
- bdp->cbd_datlen,
- (int) bdp->cbd_bufaddr);
- bdp++;
- }
- }
-#endif
fec_restart(dev, fep->full_duplex);
netif_wake_queue(dev);
}
-/* The interrupt handler.
- * This is called from the MPC core interrupt.
- */
static irqreturn_t
fec_enet_interrupt(int irq, void * dev_id)
{
struct net_device *dev = dev_id;
- volatile fec_t *fecp;
+ struct fec_enet_private *fep = netdev_priv(dev);
uint int_events;
irqreturn_t ret = IRQ_NONE;
- fecp = (volatile fec_t*)dev->base_addr;
-
- /* Get the interrupt events that caused us to be here.
- */
do {
- int_events = fecp->fec_ievent;
- fecp->fec_ievent = int_events;
+ int_events = readl(fep->hwp + FEC_IEVENT);
+ writel(int_events, fep->hwp + FEC_IEVENT);
- /* Handle receive event in its own function.
- */
if (int_events & FEC_ENET_RXF) {
ret = IRQ_HANDLED;
fec_enet_rx(dev);
}
/* Transmit OK, or non-fatal error. Update the buffer
- descriptors. FEC handles all errors, we just discover
- them as part of the transmit process.
- */
+ * descriptors. FEC handles all errors, we just discover
+ * them as part of the transmit process.
+ */
if (int_events & FEC_ENET_TXF) {
ret = IRQ_HANDLED;
fec_enet_tx(dev);
@@ -479,7 +421,7 @@ static void
fec_enet_tx(struct net_device *dev)
{
struct fec_enet_private *fep;
- volatile cbd_t *bdp;
+ struct bufdesc *bdp;
unsigned short status;
struct sk_buff *skb;
@@ -488,7 +430,11 @@ fec_enet_tx(struct net_device *dev)
bdp = fep->dirty_tx;
while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
- if (bdp == fep->cur_tx && fep->tx_full == 0) break;
+ if (bdp == fep->cur_tx && fep->tx_full == 0)
+ break;
+
+ dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
+ bdp->cbd_bufaddr = 0;
skb = fep->tx_skbuff[fep->skb_dirty];
/* Check for errors. */
@@ -510,31 +456,27 @@ fec_enet_tx(struct net_device *dev)
dev->stats.tx_packets++;
}
-#ifndef final_version
if (status & BD_ENET_TX_READY)
printk("HEY! Enet xmit interrupt and TX_READY.\n");
-#endif
+
/* Deferred means some collisions occurred during transmit,
* but we eventually sent the packet OK.
*/
if (status & BD_ENET_TX_DEF)
dev->stats.collisions++;
- /* Free the sk buffer associated with this last transmit.
- */
+ /* Free the sk buffer associated with this last transmit */
dev_kfree_skb_any(skb);
fep->tx_skbuff[fep->skb_dirty] = NULL;
fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;
- /* Update pointer to next buffer descriptor to be transmitted.
- */
+ /* Update pointer to next buffer descriptor to be transmitted */
if (status & BD_ENET_TX_WRAP)
bdp = fep->tx_bd_base;
else
bdp++;
- /* Since we have freed up a buffer, the ring is no longer
- * full.
+ /* Since we have freed up a buffer, the ring is no longer full
*/
if (fep->tx_full) {
fep->tx_full = 0;
@@ -542,7 +484,7 @@ fec_enet_tx(struct net_device *dev)
netif_wake_queue(dev);
}
}
- fep->dirty_tx = (cbd_t *)bdp;
+ fep->dirty_tx = bdp;
spin_unlock_irq(&fep->hw_lock);
}
@@ -555,9 +497,8 @@ fec_enet_tx(struct net_device *dev)
static void
fec_enet_rx(struct net_device *dev)
{
- struct fec_enet_private *fep;
- volatile fec_t *fecp;
- volatile cbd_t *bdp;
+ struct fec_enet_private *fep = netdev_priv(dev);
+ struct bufdesc *bdp;
unsigned short status;
struct sk_buff *skb;
ushort pkt_len;
@@ -567,9 +508,6 @@ fec_enet_rx(struct net_device *dev)
flush_cache_all();
#endif
- fep = netdev_priv(dev);
- fecp = (volatile fec_t*)dev->base_addr;
-
spin_lock_irq(&fep->hw_lock);
/* First, grab all of the stats for the incoming packet.
@@ -577,143 +515,121 @@ fec_enet_rx(struct net_device *dev)
*/
bdp = fep->cur_rx;
-while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
+ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
-#ifndef final_version
- /* Since we have allocated space to hold a complete frame,
- * the last indicator should be set.
- */
- if ((status & BD_ENET_RX_LAST) == 0)
- printk("FEC ENET: rcv is not +last\n");
-#endif
+ /* Since we have allocated space to hold a complete frame,
+ * the last indicator should be set.
+ */
+ if ((status & BD_ENET_RX_LAST) == 0)
+ printk("FEC ENET: rcv is not +last\n");
- if (!fep->opened)
- goto rx_processing_done;
+ if (!fep->opened)
+ goto rx_processing_done;
- /* Check for errors. */
- if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
+ /* Check for errors. */
+ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
BD_ENET_RX_CR | BD_ENET_RX_OV)) {
- dev->stats.rx_errors++;
- if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
- /* Frame too long or too short. */
- dev->stats.rx_length_errors++;
+ dev->stats.rx_errors++;
+ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
+ /* Frame too long or too short. */
+ dev->stats.rx_length_errors++;
+ }
+ if (status & BD_ENET_RX_NO) /* Frame alignment */
+ dev->stats.rx_frame_errors++;
+ if (status & BD_ENET_RX_CR) /* CRC Error */
+ dev->stats.rx_crc_errors++;
+ if (status & BD_ENET_RX_OV) /* FIFO overrun */
+ dev->stats.rx_fifo_errors++;
}
- if (status & BD_ENET_RX_NO) /* Frame alignment */
+
+ /* Report late collisions as a frame error.
+ * On this error, the BD is closed, but we don't know what we
+ * have in the buffer. So, just drop this frame on the floor.
+ */
+ if (status & BD_ENET_RX_CL) {
+ dev->stats.rx_errors++;
dev->stats.rx_frame_errors++;
- if (status & BD_ENET_RX_CR) /* CRC Error */
- dev->stats.rx_crc_errors++;
- if (status & BD_ENET_RX_OV) /* FIFO overrun */
- dev->stats.rx_fifo_errors++;
- }
+ goto rx_processing_done;
+ }
- /* Report late collisions as a frame error.
- * On this error, the BD is closed, but we don't know what we
- * have in the buffer. So, just drop this frame on the floor.
- */
- if (status & BD_ENET_RX_CL) {
- dev->stats.rx_errors++;
- dev->stats.rx_frame_errors++;
- goto rx_processing_done;
- }
+ /* Process the incoming frame. */
+ dev->stats.rx_packets++;
+ pkt_len = bdp->cbd_datlen;
+ dev->stats.rx_bytes += pkt_len;
+ data = (__u8*)__va(bdp->cbd_bufaddr);
- /* Process the incoming frame.
- */
- dev->stats.rx_packets++;
- pkt_len = bdp->cbd_datlen;
- dev->stats.rx_bytes += pkt_len;
- data = (__u8*)__va(bdp->cbd_bufaddr);
-
- dma_sync_single(NULL, (unsigned long)__pa(data),
- pkt_len - 4, DMA_FROM_DEVICE);
-
- /* This does 16 byte alignment, exactly what we need.
- * The packet length includes FCS, but we don't want to
- * include that when passing upstream as it messes up
- * bridging applications.
- */
- skb = dev_alloc_skb(pkt_len-4);
+ dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen,
+ DMA_FROM_DEVICE);
- if (skb == NULL) {
- printk("%s: Memory squeeze, dropping packet.\n", dev->name);
- dev->stats.rx_dropped++;
- } else {
- skb_put(skb,pkt_len-4); /* Make room */
- skb_copy_to_linear_data(skb, data, pkt_len-4);
- skb->protocol=eth_type_trans(skb,dev);
- netif_rx(skb);
- }
- rx_processing_done:
+ /* This does 16 byte alignment, exactly what we need.
+ * The packet length includes FCS, but we don't want to
+ * include that when passing upstream as it messes up
+ * bridging applications.
+ */
+ skb = dev_alloc_skb(pkt_len - 4 + NET_IP_ALIGN);
- /* Clear the status flags for this buffer.
- */
- status &= ~BD_ENET_RX_STATS;
+ if (unlikely(!skb)) {
+ printk("%s: Memory squeeze, dropping packet.\n",
+ dev->name);
+ dev->stats.rx_dropped++;
+ } else {
+ skb_reserve(skb, NET_IP_ALIGN);
+ skb_put(skb, pkt_len - 4); /* Make room */
+ skb_copy_to_linear_data(skb, data, pkt_len - 4);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ }
- /* Mark the buffer empty.
- */
- status |= BD_ENET_RX_EMPTY;
- bdp->cbd_sc = status;
+ bdp->cbd_bufaddr = dma_map_single(NULL, data, bdp->cbd_datlen,
+ DMA_FROM_DEVICE);
+rx_processing_done:
+ /* Clear the status flags for this buffer */
+ status &= ~BD_ENET_RX_STATS;
- /* Update BD pointer to next entry.
- */
- if (status & BD_ENET_RX_WRAP)
- bdp = fep->rx_bd_base;
- else
- bdp++;
+ /* Mark the buffer empty */
+ status |= BD_ENET_RX_EMPTY;
+ bdp->cbd_sc = status;
-#if 1
- /* Doing this here will keep the FEC running while we process
- * incoming frames. On a heavily loaded network, we should be
- * able to keep up at the expense of system resources.
- */
- fecp->fec_r_des_active = 0;
-#endif
- } /* while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) */
- fep->cur_rx = (cbd_t *)bdp;
-
-#if 0
- /* Doing this here will allow us to process all frames in the
- * ring before the FEC is allowed to put more there. On a heavily
- * loaded network, some frames may be lost. Unfortunately, this
- * increases the interrupt overhead since we can potentially work
- * our way back to the interrupt return only to come right back
- * here.
- */
- fecp->fec_r_des_active = 0;
-#endif
+ /* Update BD pointer to next entry */
+ if (status & BD_ENET_RX_WRAP)
+ bdp = fep->rx_bd_base;
+ else
+ bdp++;
+ /* Doing this here will keep the FEC running while we process
+ * incoming frames. On a heavily loaded network, we should be
+ * able to keep up at the expense of system resources.
+ */
+ writel(0, fep->hwp + FEC_R_DES_ACTIVE);
+ }
+ fep->cur_rx = bdp;
spin_unlock_irq(&fep->hw_lock);
}
-
/* called from interrupt context */
static void
fec_enet_mii(struct net_device *dev)
{
struct fec_enet_private *fep;
- volatile fec_t *ep;
mii_list_t *mip;
- uint mii_reg;
fep = netdev_priv(dev);
spin_lock_irq(&fep->mii_lock);
- ep = fep->hwp;
- mii_reg = ep->fec_mii_data;
-
if ((mip = mii_head) == NULL) {
printk("MII and no head!\n");
goto unlock;
}
if (mip->mii_func != NULL)
- (*(mip->mii_func))(mii_reg, dev);
+ (*(mip->mii_func))(readl(fep->hwp + FEC_MII_DATA), dev);
mii_head = mip->mii_next;
mip->mii_next = mii_free;
mii_free = mip;
if ((mip = mii_head) != NULL)
- ep->fec_mii_data = mip->mii_regval;
+ writel(mip->mii_regval, fep->hwp + FEC_MII_DATA);
unlock:
spin_unlock_irq(&fep->mii_lock);
@@ -727,8 +643,7 @@ mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_devi
mii_list_t *mip;
int retval;
- /* Add PHY address to register command.
- */
+ /* Add PHY address to register command */
fep = netdev_priv(dev);
spin_lock_irqsave(&fep->mii_lock, flags);
@@ -745,7 +660,7 @@ mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_devi
mii_tail = mip;
} else {
mii_head = mii_tail = mip;
- fep->hwp->fec_mii_data = regval;
+ writel(regval, fep->hwp + FEC_MII_DATA);
}
} else {
retval = 1;
@@ -1246,11 +1161,8 @@ static void __inline__ fec_phy_ack_intr(void)
static void __inline__ fec_get_mac(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
- volatile fec_t *fecp;
unsigned char *iap, tmpaddr[ETH_ALEN];
- fecp = fep->hwp;
-
if (FEC_FLASHMAC) {
/*
* Get MAC address from FLASH.
@@ -1264,8 +1176,8 @@ static void __inline__ fec_get_mac(struct net_device *dev)
(iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
iap = fec_mac_default;
} else {
- *((unsigned long *) &tmpaddr[0]) = fecp->fec_addr_low;
- *((unsigned short *) &tmpaddr[4]) = (fecp->fec_addr_high >> 16);
+ *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW);
+ *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
iap = &tmpaddr[0];
}
@@ -1375,11 +1287,6 @@ static void mii_relink(struct work_struct *work)
fec_restart(dev, duplex);
} else
fec_stop(dev);
-
-#if 0
- enable_irq(fep->mii_irq);
-#endif
-
}
/* mii_queue_relink is called in interrupt context from mii_link_interrupt */
@@ -1388,12 +1295,12 @@ static void mii_queue_relink(uint mii_reg, struct net_device *dev)
struct fec_enet_private *fep = netdev_priv(dev);
/*
- ** We cannot queue phy_task twice in the workqueue. It
- ** would cause an endless loop in the workqueue.
- ** Fortunately, if the last mii_relink entry has not yet been
- ** executed now, it will do the job for the current interrupt,
- ** which is just what we want.
- */
+ * We cannot queue phy_task twice in the workqueue. It
+ * would cause an endless loop in the workqueue.
+ * Fortunately, if the last mii_relink entry has not yet been
+ * executed now, it will do the job for the current interrupt,
+ * which is just what we want.
+ */
if (fep->mii_phy_task_queued)
return;
@@ -1424,8 +1331,7 @@ phy_cmd_t const phy_cmd_config[] = {
{ mk_mii_end, }
};
-/* Read remainder of PHY ID.
-*/
+/* Read remainder of PHY ID. */
static void
mii_discover_phy3(uint mii_reg, struct net_device *dev)
{
@@ -1457,17 +1363,14 @@ static void
mii_discover_phy(uint mii_reg, struct net_device *dev)
{
struct fec_enet_private *fep;
- volatile fec_t *fecp;
uint phytype;
fep = netdev_priv(dev);
- fecp = fep->hwp;
if (fep->phy_addr < 32) {
if ((phytype = (mii_reg & 0xffff)) != 0xffff && phytype != 0) {
- /* Got first part of ID, now get remainder.
- */
+ /* Got first part of ID, now get remainder */
fep->phy_id = phytype << 16;
mii_queue(dev, mk_mii_read(MII_REG_PHYIR2),
mii_discover_phy3);
@@ -1479,15 +1382,15 @@ mii_discover_phy(uint mii_reg, struct net_device *dev)
} else {
printk("FEC: No PHY device found.\n");
/* Disable external MII interface */
- fecp->fec_mii_speed = fep->phy_speed = 0;
+ writel(0, fep->hwp + FEC_MII_SPEED);
+ fep->phy_speed = 0;
#ifdef HAVE_mii_link_interrupt
fec_disable_phy_intr();
#endif
}
}
-/* This interrupt occurs when the PHY detects a link change.
-*/
+/* This interrupt occurs when the PHY detects a link change */
#ifdef HAVE_mii_link_interrupt
static irqreturn_t
mii_link_interrupt(int irq, void * dev_id)
@@ -1497,10 +1400,6 @@ mii_link_interrupt(int irq, void * dev_id)
fec_phy_ack_intr();
-#if 0
- disable_irq(fep->mii_irq); /* disable now, enable later */
-#endif
-
mii_do_cmd(dev, fep->phy->ack_int);
mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */
@@ -1508,19 +1407,91 @@ mii_link_interrupt(int irq, void * dev_id)
}
#endif
+static void fec_enet_free_buffers(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ int i;
+ struct sk_buff *skb;
+ struct bufdesc *bdp;
+
+ bdp = fep->rx_bd_base;
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ skb = fep->rx_skbuff[i];
+
+ if (bdp->cbd_bufaddr)
+ dma_unmap_single(&dev->dev, bdp->cbd_bufaddr,
+ FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
+ if (skb)
+ dev_kfree_skb(skb);
+ bdp++;
+ }
+
+ bdp = fep->tx_bd_base;
+ for (i = 0; i < TX_RING_SIZE; i++)
+ kfree(fep->tx_bounce[i]);
+}
+
+static int fec_enet_alloc_buffers(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ int i;
+ struct sk_buff *skb;
+ struct bufdesc *bdp;
+
+ bdp = fep->rx_bd_base;
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE);
+ if (!skb) {
+ fec_enet_free_buffers(dev);
+ return -ENOMEM;
+ }
+ fep->rx_skbuff[i] = skb;
+
+ bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data,
+ FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
+ bdp->cbd_sc = BD_ENET_RX_EMPTY;
+ bdp++;
+ }
+
+ /* Set the last buffer to wrap. */
+ bdp--;
+ bdp->cbd_sc |= BD_SC_WRAP;
+
+ bdp = fep->tx_bd_base;
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
+
+ bdp->cbd_sc = 0;
+ bdp->cbd_bufaddr = 0;
+ bdp++;
+ }
+
+ /* Set the last buffer to wrap. */
+ bdp--;
+ bdp->cbd_sc |= BD_SC_WRAP;
+
+ return 0;
+}
+
static int
fec_enet_open(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
+ int ret;
/* I should reset the ring buffers here, but I don't yet know
* a simple way to do that.
*/
- fec_set_mac_address(dev);
+
+ ret = fec_enet_alloc_buffers(dev);
+ if (ret)
+ return ret;
fep->sequence_done = 0;
fep->link = 0;
+ fec_restart(dev, 1);
+
if (fep->phy) {
mii_do_cmd(dev, fep->phy->ack_int);
mii_do_cmd(dev, fep->phy->config);
@@ -1537,21 +1508,17 @@ fec_enet_open(struct net_device *dev)
schedule();
mii_do_cmd(dev, fep->phy->startup);
-
- /* Set the initial link state to true. A lot of hardware
- * based on this device does not implement a PHY interrupt,
- * so we are never notified of link change.
- */
- fep->link = 1;
- } else {
- fep->link = 1; /* lets just try it and see */
- /* no phy, go full duplex, it's most likely a hub chip */
- fec_restart(dev, 1);
}
+ /* Set the initial link state to true. A lot of hardware
+ * based on this device does not implement a PHY interrupt,
+ * so we are never notified of link change.
+ */
+ fep->link = 1;
+
netif_start_queue(dev);
fep->opened = 1;
- return 0; /* Success */
+ return 0;
}
static int
@@ -1559,12 +1526,13 @@ fec_enet_close(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
- /* Don't know what to do yet.
- */
+ /* Don't know what to do yet. */
fep->opened = 0;
netif_stop_queue(dev);
fec_stop(dev);
+ fec_enet_free_buffers(dev);
+
return 0;
}
@@ -1583,87 +1551,102 @@ fec_enet_close(struct net_device *dev)
static void set_multicast_list(struct net_device *dev)
{
- struct fec_enet_private *fep;
- volatile fec_t *ep;
+ struct fec_enet_private *fep = netdev_priv(dev);
struct dev_mc_list *dmi;
- unsigned int i, j, bit, data, crc;
+ unsigned int i, j, bit, data, crc, tmp;
unsigned char hash;
- fep = netdev_priv(dev);
- ep = fep->hwp;
+ if (dev->flags & IFF_PROMISC) {
+ tmp = readl(fep->hwp + FEC_R_CNTRL);
+ tmp |= 0x8;
+ writel(tmp, fep->hwp + FEC_R_CNTRL);
+ return;
+ }
- if (dev->flags&IFF_PROMISC) {
- ep->fec_r_cntrl |= 0x0008;
- } else {
+ tmp = readl(fep->hwp + FEC_R_CNTRL);
+ tmp &= ~0x8;
+ writel(tmp, fep->hwp + FEC_R_CNTRL);
+
+ if (dev->flags & IFF_ALLMULTI) {
+ /* Catch all multicast addresses, so set the
+ * filter to all 1's
+ */
+ writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+ writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
- ep->fec_r_cntrl &= ~0x0008;
+ return;
+ }
- if (dev->flags & IFF_ALLMULTI) {
- /* Catch all multicast addresses, so set the
- * filter to all 1's.
- */
- ep->fec_grp_hash_table_high = 0xffffffff;
- ep->fec_grp_hash_table_low = 0xffffffff;
- } else {
- /* Clear filter and add the addresses in hash register.
- */
- ep->fec_grp_hash_table_high = 0;
- ep->fec_grp_hash_table_low = 0;
-
- dmi = dev->mc_list;
-
- for (j = 0; j < dev->mc_count; j++, dmi = dmi->next)
- {
- /* Only support group multicast for now.
- */
- if (!(dmi->dmi_addr[0] & 1))
- continue;
-
- /* calculate crc32 value of mac address
- */
- crc = 0xffffffff;
-
- for (i = 0; i < dmi->dmi_addrlen; i++)
- {
- data = dmi->dmi_addr[i];
- for (bit = 0; bit < 8; bit++, data >>= 1)
- {
- crc = (crc >> 1) ^
- (((crc ^ data) & 1) ? CRC32_POLY : 0);
- }
- }
-
- /* only upper 6 bits (HASH_BITS) are used
- which point to specific bit in he hash registers
- */
- hash = (crc >> (32 - HASH_BITS)) & 0x3f;
-
- if (hash > 31)
- ep->fec_grp_hash_table_high |= 1 << (hash - 32);
- else
- ep->fec_grp_hash_table_low |= 1 << hash;
+ /* Clear filter and add the addresses in hash register
+ */
+ writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+ writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+
+ dmi = dev->mc_list;
+
+ for (j = 0; j < dev->mc_count; j++, dmi = dmi->next) {
+ /* Only support group multicast for now */
+ if (!(dmi->dmi_addr[0] & 1))
+ continue;
+
+ /* calculate crc32 value of mac address */
+ crc = 0xffffffff;
+
+ for (i = 0; i < dmi->dmi_addrlen; i++) {
+ data = dmi->dmi_addr[i];
+ for (bit = 0; bit < 8; bit++, data >>= 1) {
+ crc = (crc >> 1) ^
+ (((crc ^ data) & 1) ? CRC32_POLY : 0);
}
}
+
+ /* only upper 6 bits (HASH_BITS) are used
+ * which point to specific bit in he hash registers
+ */
+ hash = (crc >> (32 - HASH_BITS)) & 0x3f;
+
+ if (hash > 31) {
+ tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+ tmp |= 1 << (hash - 32);
+ writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+ } else {
+ tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+ tmp |= 1 << hash;
+ writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+ }
}
}
-/* Set a MAC change in hardware.
- */
-static void
-fec_set_mac_address(struct net_device *dev)
+/* Set a MAC change in hardware. */
+static int
+fec_set_mac_address(struct net_device *dev, void *p)
{
- volatile fec_t *fecp;
+ struct fec_enet_private *fep = netdev_priv(dev);
+ struct sockaddr *addr = p;
- fecp = ((struct fec_enet_private *)netdev_priv(dev))->hwp;
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
- /* Set station address. */
- fecp->fec_addr_low = dev->dev_addr[3] | (dev->dev_addr[2] << 8) |
- (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24);
- fecp->fec_addr_high = (dev->dev_addr[5] << 16) |
- (dev->dev_addr[4] << 24);
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ writel(dev->dev_addr[3] | (dev->dev_addr[2] << 8) |
+ (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24),
+ fep->hwp + FEC_ADDR_LOW);
+ writel((dev->dev_addr[5] << 16) | (dev->dev_addr[4] << 24),
+ fep + FEC_ADDR_HIGH);
+ return 0;
}
+static const struct net_device_ops fec_netdev_ops = {
+ .ndo_open = fec_enet_open,
+ .ndo_stop = fec_enet_close,
+ .ndo_start_xmit = fec_enet_start_xmit,
+ .ndo_set_multicast_list = set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_tx_timeout = fec_timeout,
+ .ndo_set_mac_address = fec_set_mac_address,
+};
+
/*
* XXX: We need to clean up on failure exits here.
*
@@ -1672,17 +1655,13 @@ fec_set_mac_address(struct net_device *dev)
int __init fec_enet_init(struct net_device *dev, int index)
{
struct fec_enet_private *fep = netdev_priv(dev);
- unsigned long mem_addr;
- volatile cbd_t *bdp;
- cbd_t *cbd_base;
- volatile fec_t *fecp;
- int i, j;
+ struct bufdesc *cbd_base;
+ int i;
- /* Allocate memory for buffer descriptors.
- */
- mem_addr = (unsigned long)dma_alloc_coherent(NULL, PAGE_SIZE,
- &fep->bd_dma, GFP_KERNEL);
- if (mem_addr == 0) {
+ /* Allocate memory for buffer descriptors. */
+ cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
+ GFP_KERNEL);
+ if (!cbd_base) {
printk("FEC: allocate descriptor memory failed?\n");
return -ENOMEM;
}
@@ -1690,146 +1669,47 @@ int __init fec_enet_init(struct net_device *dev, int index)
spin_lock_init(&fep->hw_lock);
spin_lock_init(&fep->mii_lock);
- /* Create an Ethernet device instance.
- */
- fecp = (volatile fec_t *)dev->base_addr;
-
fep->index = index;
- fep->hwp = fecp;
+ fep->hwp = (void __iomem *)dev->base_addr;
fep->netdev = dev;
- /* Whack a reset. We should wait for this.
- */
- fecp->fec_ecntrl = 1;
- udelay(10);
-
/* Set the Ethernet address */
#ifdef CONFIG_M5272
fec_get_mac(dev);
#else
{
unsigned long l;
- l = fecp->fec_addr_low;
+ l = readl(fep->hwp + FEC_ADDR_LOW);
dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24);
dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16);
dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8);
dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0);
- l = fecp->fec_addr_high;
+ l = readl(fep->hwp + FEC_ADDR_HIGH);
dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24);
dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16);
}
#endif
- cbd_base = (cbd_t *)mem_addr;
-
- /* Set receive and transmit descriptor base.
- */
+ /* Set receive and transmit descriptor base. */
fep->rx_bd_base = cbd_base;
fep->tx_bd_base = cbd_base + RX_RING_SIZE;
- fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
- fep->cur_rx = fep->rx_bd_base;
-
- fep->skb_cur = fep->skb_dirty = 0;
-
- /* Initialize the receive buffer descriptors.
- */
- bdp = fep->rx_bd_base;
- for (i=0; i<FEC_ENET_RX_PAGES; i++) {
-
- /* Allocate a page.
- */
- mem_addr = __get_free_page(GFP_KERNEL);
- /* XXX: missing check for allocation failure */
-
- /* Initialize the BD for every fragment in the page.
- */
- for (j=0; j<FEC_ENET_RX_FRPPG; j++) {
- bdp->cbd_sc = BD_ENET_RX_EMPTY;
- bdp->cbd_bufaddr = __pa(mem_addr);
- mem_addr += FEC_ENET_RX_FRSIZE;
- bdp++;
- }
- }
-
- /* Set the last buffer to wrap.
- */
- bdp--;
- bdp->cbd_sc |= BD_SC_WRAP;
-
- /* ...and the same for transmmit.
- */
- bdp = fep->tx_bd_base;
- for (i=0, j=FEC_ENET_TX_FRPPG; i<TX_RING_SIZE; i++) {
- if (j >= FEC_ENET_TX_FRPPG) {
- mem_addr = __get_free_page(GFP_KERNEL);
- j = 1;
- } else {
- mem_addr += FEC_ENET_TX_FRSIZE;
- j++;
- }
- fep->tx_bounce[i] = (unsigned char *) mem_addr;
-
- /* Initialize the BD for every fragment in the page.
- */
- bdp->cbd_sc = 0;
- bdp->cbd_bufaddr = 0;
- bdp++;
- }
-
- /* Set the last buffer to wrap.
- */
- bdp--;
- bdp->cbd_sc |= BD_SC_WRAP;
-
- /* Set receive and transmit descriptor base.
- */
- fecp->fec_r_des_start = fep->bd_dma;
- fecp->fec_x_des_start = (unsigned long)fep->bd_dma + sizeof(cbd_t)
- * RX_RING_SIZE;
-
#ifdef HAVE_mii_link_interrupt
fec_request_mii_intr(dev);
#endif
-
- fecp->fec_grp_hash_table_high = 0;
- fecp->fec_grp_hash_table_low = 0;
- fecp->fec_r_buff_size = PKT_MAXBLR_SIZE;
- fecp->fec_ecntrl = 2;
- fecp->fec_r_des_active = 0;
-#ifndef CONFIG_M5272
- fecp->fec_hash_table_high = 0;
- fecp->fec_hash_table_low = 0;
-#endif
-
- /* The FEC Ethernet specific entries in the device structure. */
- dev->open = fec_enet_open;
- dev->hard_start_xmit = fec_enet_start_xmit;
- dev->tx_timeout = fec_timeout;
+ /* The FEC Ethernet specific entries in the device structure */
dev->watchdog_timeo = TX_TIMEOUT;
- dev->stop = fec_enet_close;
- dev->set_multicast_list = set_multicast_list;
+ dev->netdev_ops = &fec_netdev_ops;
for (i=0; i<NMII-1; i++)
mii_cmds[i].mii_next = &mii_cmds[i+1];
mii_free = mii_cmds;
- /* setup MII interface */
- fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04;
- fecp->fec_x_cntrl = 0x00;
-
- /*
- * Set MII speed to 2.5 MHz
- */
+ /* Set MII speed to 2.5 MHz */
fep->phy_speed = ((((clk_get_rate(fep->clk) / 2 + 4999999)
/ 2500000) / 2) & 0x3F) << 1;
- fecp->fec_mii_speed = fep->phy_speed;
fec_restart(dev, 0);
- /* Clear and enable interrupts */
- fecp->fec_ievent = 0xffc00000;
- fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII);
-
/* Queue up command to detect the PHY and initialize the
* remainder of the interface.
*/
@@ -1847,145 +1727,118 @@ int __init fec_enet_init(struct net_device *dev, int index)
static void
fec_restart(struct net_device *dev, int duplex)
{
- struct fec_enet_private *fep;
- volatile cbd_t *bdp;
- volatile fec_t *fecp;
+ struct fec_enet_private *fep = netdev_priv(dev);
+ struct bufdesc *bdp;
int i;
- fep = netdev_priv(dev);
- fecp = fep->hwp;
-
- /* Whack a reset. We should wait for this.
- */
- fecp->fec_ecntrl = 1;
+ /* Whack a reset. We should wait for this. */
+ writel(1, fep->hwp + FEC_ECNTRL);
udelay(10);
- /* Clear any outstanding interrupt.
- */
- fecp->fec_ievent = 0xffc00000;
+ /* Clear any outstanding interrupt. */
+ writel(0xffc00000, fep->hwp + FEC_IEVENT);
- /* Set station address.
- */
- fec_set_mac_address(dev);
+ /* Reset all multicast. */
+ writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+ writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+#ifndef CONFIG_M5272
+ writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
+ writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
+#endif
- /* Reset all multicast.
- */
- fecp->fec_grp_hash_table_high = 0;
- fecp->fec_grp_hash_table_low = 0;
+ /* Set maximum receive buffer size. */
+ writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
- /* Set maximum receive buffer size.
- */
- fecp->fec_r_buff_size = PKT_MAXBLR_SIZE;
-
- /* Set receive and transmit descriptor base.
- */
- fecp->fec_r_des_start = fep->bd_dma;
- fecp->fec_x_des_start = (unsigned long)fep->bd_dma + sizeof(cbd_t)
- * RX_RING_SIZE;
+ /* Set receive and transmit descriptor base. */
+ writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
+ writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
+ fep->hwp + FEC_X_DES_START);
fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
fep->cur_rx = fep->rx_bd_base;
- /* Reset SKB transmit buffers.
- */
+ /* Reset SKB transmit buffers. */
fep->skb_cur = fep->skb_dirty = 0;
- for (i=0; i<=TX_RING_MOD_MASK; i++) {
- if (fep->tx_skbuff[i] != NULL) {
+ for (i = 0; i <= TX_RING_MOD_MASK; i++) {
+ if (fep->tx_skbuff[i]) {
dev_kfree_skb_any(fep->tx_skbuff[i]);
fep->tx_skbuff[i] = NULL;
}
}
- /* Initialize the receive buffer descriptors.
- */
+ /* Initialize the receive buffer descriptors. */
bdp = fep->rx_bd_base;
- for (i=0; i<RX_RING_SIZE; i++) {
+ for (i = 0; i < RX_RING_SIZE; i++) {
- /* Initialize the BD for every fragment in the page.
- */
+ /* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = BD_ENET_RX_EMPTY;
bdp++;
}
- /* Set the last buffer to wrap.
- */
+ /* Set the last buffer to wrap */
bdp--;
bdp->cbd_sc |= BD_SC_WRAP;
- /* ...and the same for transmmit.
- */
+ /* ...and the same for transmit */
bdp = fep->tx_bd_base;
- for (i=0; i<TX_RING_SIZE; i++) {
+ for (i = 0; i < TX_RING_SIZE; i++) {
- /* Initialize the BD for every fragment in the page.
- */
+ /* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = 0;
bdp->cbd_bufaddr = 0;
bdp++;
}
- /* Set the last buffer to wrap.
- */
+ /* Set the last buffer to wrap */
bdp--;
bdp->cbd_sc |= BD_SC_WRAP;
- /* Enable MII mode.
- */
+ /* Enable MII mode */
if (duplex) {
- fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04;/* MII enable */
- fecp->fec_x_cntrl = 0x04; /* FD enable */
+ /* MII enable / FD enable */
+ writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL);
+ writel(0x04, fep->hwp + FEC_X_CNTRL);
} else {
- /* MII enable|No Rcv on Xmit */
- fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x06;
- fecp->fec_x_cntrl = 0x00;
+ /* MII enable / No Rcv on Xmit */
+ writel(OPT_FRAME_SIZE | 0x06, fep->hwp + FEC_R_CNTRL);
+ writel(0x0, fep->hwp + FEC_X_CNTRL);
}
fep->full_duplex = duplex;
- /* Set MII speed.
- */
- fecp->fec_mii_speed = fep->phy_speed;
+ /* Set MII speed */
+ writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
- /* And last, enable the transmit and receive processing.
- */
- fecp->fec_ecntrl = 2;
- fecp->fec_r_des_active = 0;
+ /* And last, enable the transmit and receive processing */
+ writel(2, fep->hwp + FEC_ECNTRL);
+ writel(0, fep->hwp + FEC_R_DES_ACTIVE);
- /* Enable interrupts we wish to service.
- */
- fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII);
+ /* Enable interrupts we wish to service */
+ writel(FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII,
+ fep->hwp + FEC_IMASK);
}
static void
fec_stop(struct net_device *dev)
{
- volatile fec_t *fecp;
- struct fec_enet_private *fep;
-
- fep = netdev_priv(dev);
- fecp = fep->hwp;
+ struct fec_enet_private *fep = netdev_priv(dev);
- /*
- ** We cannot expect a graceful transmit stop without link !!!
- */
- if (fep->link)
- {
- fecp->fec_x_cntrl = 0x01; /* Graceful transmit stop */
+ /* We cannot expect a graceful transmit stop without link !!! */
+ if (fep->link) {
+ writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
udelay(10);
- if (!(fecp->fec_ievent & FEC_ENET_GRA))
+ if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
printk("fec_stop : Graceful transmit stop did not complete !\n");
- }
+ }
- /* Whack a reset. We should wait for this.
- */
- fecp->fec_ecntrl = 1;
+ /* Whack a reset. We should wait for this. */
+ writel(1, fep->hwp + FEC_ECNTRL);
udelay(10);
- /* Clear outstanding MII command interrupts.
- */
- fecp->fec_ievent = FEC_ENET_MII;
+ /* Clear outstanding MII command interrupts. */
+ writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
- fecp->fec_imask = FEC_ENET_MII;
- fecp->fec_mii_speed = fep->phy_speed;
+ writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
+ writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
}
static int __devinit
diff --git a/drivers/net/fec.h b/drivers/net/fec.h
index 76c64c92e19..30b7dd67133 100644
--- a/drivers/net/fec.h
+++ b/drivers/net/fec.h
@@ -20,82 +20,55 @@
* registers in the same peripheral device on different models
* of the ColdFire!
*/
-typedef struct fec {
- unsigned long fec_reserved0;
- unsigned long fec_ievent; /* Interrupt event reg */
- unsigned long fec_imask; /* Interrupt mask reg */
- unsigned long fec_reserved1;
- unsigned long fec_r_des_active; /* Receive descriptor reg */
- unsigned long fec_x_des_active; /* Transmit descriptor reg */
- unsigned long fec_reserved2[3];
- unsigned long fec_ecntrl; /* Ethernet control reg */
- unsigned long fec_reserved3[6];
- unsigned long fec_mii_data; /* MII manage frame reg */
- unsigned long fec_mii_speed; /* MII speed control reg */
- unsigned long fec_reserved4[7];
- unsigned long fec_mib_ctrlstat; /* MIB control/status reg */
- unsigned long fec_reserved5[7];
- unsigned long fec_r_cntrl; /* Receive control reg */
- unsigned long fec_reserved6[15];
- unsigned long fec_x_cntrl; /* Transmit Control reg */
- unsigned long fec_reserved7[7];
- unsigned long fec_addr_low; /* Low 32bits MAC address */
- unsigned long fec_addr_high; /* High 16bits MAC address */
- unsigned long fec_opd; /* Opcode + Pause duration */
- unsigned long fec_reserved8[10];
- unsigned long fec_hash_table_high; /* High 32bits hash table */
- unsigned long fec_hash_table_low; /* Low 32bits hash table */
- unsigned long fec_grp_hash_table_high;/* High 32bits hash table */
- unsigned long fec_grp_hash_table_low; /* Low 32bits hash table */
- unsigned long fec_reserved9[7];
- unsigned long fec_x_wmrk; /* FIFO transmit water mark */
- unsigned long fec_reserved10;
- unsigned long fec_r_bound; /* FIFO receive bound reg */
- unsigned long fec_r_fstart; /* FIFO receive start reg */
- unsigned long fec_reserved11[11];
- unsigned long fec_r_des_start; /* Receive descriptor ring */
- unsigned long fec_x_des_start; /* Transmit descriptor ring */
- unsigned long fec_r_buff_size; /* Maximum receive buff size */
-} fec_t;
+#define FEC_IEVENT 0x004 /* Interrupt event reg */
+#define FEC_IMASK 0x008 /* Interrupt mask reg */
+#define FEC_R_DES_ACTIVE 0x010 /* Receive descriptor reg */
+#define FEC_X_DES_ACTIVE 0x014 /* Transmit descriptor reg */
+#define FEC_ECNTRL 0x024 /* Ethernet control reg */
+#define FEC_MII_DATA 0x040 /* MII manage frame reg */
+#define FEC_MII_SPEED 0x044 /* MII speed control reg */
+#define FEC_MIB_CTRLSTAT 0x064 /* MIB control/status reg */
+#define FEC_R_CNTRL 0x084 /* Receive control reg */
+#define FEC_X_CNTRL 0x0c4 /* Transmit Control reg */
+#define FEC_ADDR_LOW 0x0e4 /* Low 32bits MAC address */
+#define FEC_ADDR_HIGH 0x0e8 /* High 16bits MAC address */
+#define FEC_OPD 0x0ec /* Opcode + Pause duration */
+#define FEC_HASH_TABLE_HIGH 0x118 /* High 32bits hash table */
+#define FEC_HASH_TABLE_LOW 0x11c /* Low 32bits hash table */
+#define FEC_GRP_HASH_TABLE_HIGH 0x120 /* High 32bits hash table */
+#define FEC_GRP_HASH_TABLE_LOW 0x124 /* Low 32bits hash table */
+#define FEC_X_WMRK 0x144 /* FIFO transmit water mark */
+#define FEC_R_BOUND 0x14c /* FIFO receive bound reg */
+#define FEC_R_FSTART 0x150 /* FIFO receive start reg */
+#define FEC_R_DES_START 0x180 /* Receive descriptor ring */
+#define FEC_X_DES_START 0x184 /* Transmit descriptor ring */
+#define FEC_R_BUFF_SIZE 0x188 /* Maximum receive buff size */
#else
-/*
- * Define device register set address map.
- */
-typedef struct fec {
- unsigned long fec_ecntrl; /* Ethernet control reg */
- unsigned long fec_ievent; /* Interrupt even reg */
- unsigned long fec_imask; /* Interrupt mask reg */
- unsigned long fec_ivec; /* Interrupt vec status reg */
- unsigned long fec_r_des_active; /* Receive descriptor reg */
- unsigned long fec_x_des_active; /* Transmit descriptor reg */
- unsigned long fec_reserved1[10];
- unsigned long fec_mii_data; /* MII manage frame reg */
- unsigned long fec_mii_speed; /* MII speed control reg */
- unsigned long fec_reserved2[17];
- unsigned long fec_r_bound; /* FIFO receive bound reg */
- unsigned long fec_r_fstart; /* FIFO receive start reg */
- unsigned long fec_reserved3[4];
- unsigned long fec_x_wmrk; /* FIFO transmit water mark */
- unsigned long fec_reserved4;
- unsigned long fec_x_fstart; /* FIFO transmit start reg */
- unsigned long fec_reserved5[21];
- unsigned long fec_r_cntrl; /* Receive control reg */
- unsigned long fec_max_frm_len; /* Maximum frame length reg */
- unsigned long fec_reserved6[14];
- unsigned long fec_x_cntrl; /* Transmit Control reg */
- unsigned long fec_reserved7[158];
- unsigned long fec_addr_low; /* Low 32bits MAC address */
- unsigned long fec_addr_high; /* High 16bits MAC address */
- unsigned long fec_grp_hash_table_high;/* High 32bits hash table */
- unsigned long fec_grp_hash_table_low; /* Low 32bits hash table */
- unsigned long fec_r_des_start; /* Receive descriptor ring */
- unsigned long fec_x_des_start; /* Transmit descriptor ring */
- unsigned long fec_r_buff_size; /* Maximum receive buff size */
- unsigned long reserved8[9];
- unsigned long fec_fifo_ram[112]; /* FIFO RAM buffer */
-} fec_t;
+#define FEC_ECNTRL; 0x000 /* Ethernet control reg */
+#define FEC_IEVENT; 0x004 /* Interrupt even reg */
+#define FEC_IMASK; 0x008 /* Interrupt mask reg */
+#define FEC_IVEC; 0x00c /* Interrupt vec status reg */
+#define FEC_R_DES_ACTIVE; 0x010 /* Receive descriptor reg */
+#define FEC_X_DES_ACTIVE; 0x01c /* Transmit descriptor reg */
+#define FEC_MII_DATA 0x040 /* MII manage frame reg */
+#define FEC_MII_SPEED 0x044 /* MII speed control reg */
+#define FEC_R_BOUND 0x08c /* FIFO receive bound reg */
+#define FEC_R_FSTART 0x090 /* FIFO receive start reg */
+#define FEC_X_WMRK 0x0a4 /* FIFO transmit water mark */
+#define FEC_X_FSTART 0x0ac /* FIFO transmit start reg */
+#define FEC_R_CNTRL 0x104 /* Receive control reg */
+#define FEC_MAX_FRM_LEN 0x108 /* Maximum frame length reg */
+#define FEC_X_CNTRL 0x144 /* Transmit Control reg */
+#define FEC_ADDR_LOW 0x3c0 /* Low 32bits MAC address */
+#define FEC_ADDR_HIGH 0x3c4 /* High 16bits MAC address */
+#define FEC_GRP_HASH_TABLE_HIGH 0x3c8 /* High 32bits hash table */
+#define FEC_GRP_HASH_TABLE_LOW 0x3cc /* Low 32bits hash table */
+#define FEC_R_DES_START 0x3d0 /* Receive descriptor ring */
+#define FEC_X_DES_START 0x3d4 /* Transmit descriptor ring */
+#define FEC_R_BUFF_SIZE 0x3d8 /* Maximum receive buff size */
+#define FEC_FIFO_RAM 0x400 /* FIFO RAM buffer */
#endif /* CONFIG_M5272 */
@@ -104,17 +77,17 @@ typedef struct fec {
* Define the buffer descriptor structure.
*/
#ifdef CONFIG_ARCH_MXC
-typedef struct bufdesc {
+struct bufdesc {
unsigned short cbd_datlen; /* Data length */
unsigned short cbd_sc; /* Control and status info */
unsigned long cbd_bufaddr; /* Buffer address */
-} cbd_t;
+};
#else
-typedef struct bufdesc {
+struct bufdesc {
unsigned short cbd_sc; /* Control and status info */
unsigned short cbd_datlen; /* Data length */
unsigned long cbd_bufaddr; /* Buffer address */
-} cbd_t;
+};
#endif
/*
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index 8bbe7f61799..7d443405bbe 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -25,6 +25,7 @@
#include <linux/hardirq.h>
#include <linux/delay.h>
#include <linux/of_device.h>
+#include <linux/of_mdio.h>
#include <linux/of_platform.h>
#include <linux/netdevice.h>
@@ -43,11 +44,9 @@
#define DRIVER_NAME "mpc52xx-fec"
-#define FEC5200_PHYADDR_NONE (-1)
-#define FEC5200_PHYADDR_7WIRE (-2)
-
/* Private driver data structure */
struct mpc52xx_fec_priv {
+ struct net_device *ndev;
int duplex;
int speed;
int r_irq;
@@ -59,10 +58,11 @@ struct mpc52xx_fec_priv {
int msg_enable;
/* MDIO link details */
- int phy_addr;
- unsigned int phy_speed;
+ unsigned int mdio_speed;
+ struct device_node *phy_node;
struct phy_device *phydev;
enum phy_state link;
+ int seven_wire_mode;
};
@@ -211,85 +211,25 @@ static void mpc52xx_fec_adjust_link(struct net_device *dev)
phy_print_status(phydev);
}
-static int mpc52xx_fec_init_phy(struct net_device *dev)
-{
- struct mpc52xx_fec_priv *priv = netdev_priv(dev);
- struct phy_device *phydev;
- char phy_id[BUS_ID_SIZE];
-
- snprintf(phy_id, sizeof(phy_id), "%x:%02x",
- (unsigned int)dev->base_addr, priv->phy_addr);
-
- priv->link = PHY_DOWN;
- priv->speed = 0;
- priv->duplex = -1;
-
- phydev = phy_connect(dev, phy_id, &mpc52xx_fec_adjust_link, 0, PHY_INTERFACE_MODE_MII);
- if (IS_ERR(phydev)) {
- dev_err(&dev->dev, "phy_connect failed\n");
- return PTR_ERR(phydev);
- }
- dev_info(&dev->dev, "attached phy %i to driver %s\n",
- phydev->addr, phydev->drv->name);
-
- priv->phydev = phydev;
-
- return 0;
-}
-
-static int mpc52xx_fec_phy_start(struct net_device *dev)
-{
- struct mpc52xx_fec_priv *priv = netdev_priv(dev);
- int err;
-
- if (priv->phy_addr < 0)
- return 0;
-
- err = mpc52xx_fec_init_phy(dev);
- if (err) {
- dev_err(&dev->dev, "mpc52xx_fec_init_phy failed\n");
- return err;
- }
-
- /* reset phy - this also wakes it from PDOWN */
- phy_write(priv->phydev, MII_BMCR, BMCR_RESET);
- phy_start(priv->phydev);
-
- return 0;
-}
-
-static void mpc52xx_fec_phy_stop(struct net_device *dev)
-{
- struct mpc52xx_fec_priv *priv = netdev_priv(dev);
-
- if (!priv->phydev)
- return;
-
- phy_disconnect(priv->phydev);
- /* power down phy */
- phy_stop(priv->phydev);
- phy_write(priv->phydev, MII_BMCR, BMCR_PDOWN);
-}
-
-static void mpc52xx_fec_phy_hw_init(struct mpc52xx_fec_priv *priv)
-{
- struct mpc52xx_fec __iomem *fec = priv->fec;
-
- if (priv->phydev)
- return;
-
- out_be32(&fec->mii_speed, priv->phy_speed);
-}
-
static int mpc52xx_fec_open(struct net_device *dev)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
int err = -EBUSY;
+ if (priv->phy_node) {
+ priv->phydev = of_phy_connect(priv->ndev, priv->phy_node,
+ mpc52xx_fec_adjust_link, 0, 0);
+ if (!priv->phydev) {
+ dev_err(&dev->dev, "of_phy_connect failed\n");
+ return -ENODEV;
+ }
+ phy_start(priv->phydev);
+ }
+
if (request_irq(dev->irq, &mpc52xx_fec_interrupt, IRQF_SHARED,
DRIVER_NAME "_ctrl", dev)) {
dev_err(&dev->dev, "ctrl interrupt request failed\n");
- goto out;
+ goto free_phy;
}
if (request_irq(priv->r_irq, &mpc52xx_fec_rx_interrupt, 0,
DRIVER_NAME "_rx", dev)) {
@@ -311,10 +251,6 @@ static int mpc52xx_fec_open(struct net_device *dev)
goto free_irqs;
}
- err = mpc52xx_fec_phy_start(dev);
- if (err)
- goto free_skbs;
-
bcom_enable(priv->rx_dmatsk);
bcom_enable(priv->tx_dmatsk);
@@ -324,16 +260,18 @@ static int mpc52xx_fec_open(struct net_device *dev)
return 0;
- free_skbs:
- mpc52xx_fec_free_rx_buffers(dev, priv->rx_dmatsk);
-
free_irqs:
free_irq(priv->t_irq, dev);
free_2irqs:
free_irq(priv->r_irq, dev);
free_ctrl_irq:
free_irq(dev->irq, dev);
- out:
+ free_phy:
+ if (priv->phydev) {
+ phy_stop(priv->phydev);
+ phy_disconnect(priv->phydev);
+ priv->phydev = NULL;
+ }
return err;
}
@@ -352,7 +290,12 @@ static int mpc52xx_fec_close(struct net_device *dev)
free_irq(priv->r_irq, dev);
free_irq(priv->t_irq, dev);
- mpc52xx_fec_phy_stop(dev);
+ if (priv->phydev) {
+ /* power down phy */
+ phy_stop(priv->phydev);
+ phy_disconnect(priv->phydev);
+ priv->phydev = NULL;
+ }
return 0;
}
@@ -696,7 +639,7 @@ static void mpc52xx_fec_hw_init(struct net_device *dev)
/* set phy speed.
* this can't be done in phy driver, since it needs to be called
* before fec stuff (even on resume) */
- mpc52xx_fec_phy_hw_init(priv);
+ out_be32(&fec->mii_speed, priv->mdio_speed);
}
/**
@@ -732,7 +675,7 @@ static void mpc52xx_fec_start(struct net_device *dev)
rcntrl = FEC_RX_BUFFER_SIZE << 16; /* max frame length */
rcntrl |= FEC_RCNTRL_FCE;
- if (priv->phy_addr != FEC5200_PHYADDR_7WIRE)
+ if (!priv->seven_wire_mode)
rcntrl |= FEC_RCNTRL_MII_MODE;
if (priv->duplex == DUPLEX_FULL)
@@ -798,8 +741,6 @@ static void mpc52xx_fec_stop(struct net_device *dev)
/* Stop FEC */
out_be32(&fec->ecntrl, in_be32(&fec->ecntrl) & ~FEC_ECNTRL_ETHER_EN);
-
- return;
}
/* reset fec and bestcomm tasks */
@@ -817,9 +758,11 @@ static void mpc52xx_fec_reset(struct net_device *dev)
mpc52xx_fec_hw_init(dev);
- phy_stop(priv->phydev);
- phy_write(priv->phydev, MII_BMCR, BMCR_RESET);
- phy_start(priv->phydev);
+ if (priv->phydev) {
+ phy_stop(priv->phydev);
+ phy_write(priv->phydev, MII_BMCR, BMCR_RESET);
+ phy_start(priv->phydev);
+ }
bcom_fec_rx_reset(priv->rx_dmatsk);
bcom_fec_tx_reset(priv->tx_dmatsk);
@@ -919,8 +862,6 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match)
struct net_device *ndev;
struct mpc52xx_fec_priv *priv = NULL;
struct resource mem;
- struct device_node *phy_node;
- const phandle *phy_handle;
const u32 *prop;
int prop_size;
@@ -933,6 +874,7 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match)
return -ENOMEM;
priv = netdev_priv(ndev);
+ priv->ndev = ndev;
/* Reserve FEC control zone */
rv = of_address_to_resource(op->node, 0, &mem);
@@ -956,6 +898,7 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match)
ndev->ethtool_ops = &mpc52xx_fec_ethtool_ops;
ndev->watchdog_timeo = FEC_WATCHDOG_TIMEOUT;
ndev->base_addr = mem.start;
+ SET_NETDEV_DEV(ndev, &op->dev);
spin_lock_init(&priv->lock);
@@ -1003,14 +946,9 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match)
*/
/* Start with safe defaults for link connection */
- priv->phy_addr = FEC5200_PHYADDR_NONE;
priv->speed = 100;
priv->duplex = DUPLEX_HALF;
- priv->phy_speed = ((mpc52xx_find_ipb_freq(op->node) >> 20) / 5) << 1;
-
- /* the 7-wire property means don't use MII mode */
- if (of_find_property(op->node, "fsl,7-wire-mode", NULL))
- priv->phy_addr = FEC5200_PHYADDR_7WIRE;
+ priv->mdio_speed = ((mpc52xx_find_ipb_freq(op->node) >> 20) / 5) << 1;
/* The current speed preconfigures the speed of the MII link */
prop = of_get_property(op->node, "current-speed", &prop_size);
@@ -1019,43 +957,23 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match)
priv->duplex = prop[1] ? DUPLEX_FULL : DUPLEX_HALF;
}
- /* If there is a phy handle, setup link to that phy */
- phy_handle = of_get_property(op->node, "phy-handle", &prop_size);
- if (phy_handle && (prop_size >= sizeof(phandle))) {
- phy_node = of_find_node_by_phandle(*phy_handle);
- prop = of_get_property(phy_node, "reg", &prop_size);
- if (prop && (prop_size >= sizeof(u32)))
- if ((*prop >= 0) && (*prop < PHY_MAX_ADDR))
- priv->phy_addr = *prop;
- of_node_put(phy_node);
+ /* If there is a phy handle, then get the PHY node */
+ priv->phy_node = of_parse_phandle(op->node, "phy-handle", 0);
+
+ /* the 7-wire property means don't use MII mode */
+ if (of_find_property(op->node, "fsl,7-wire-mode", NULL)) {
+ priv->seven_wire_mode = 1;
+ dev_info(&ndev->dev, "using 7-wire PHY mode\n");
}
/* Hardware init */
mpc52xx_fec_hw_init(ndev);
-
mpc52xx_fec_reset_stats(ndev);
- SET_NETDEV_DEV(ndev, &op->dev);
-
- /* Register the new network device */
rv = register_netdev(ndev);
if (rv < 0)
goto probe_error;
- /* Now report the link setup */
- switch (priv->phy_addr) {
- case FEC5200_PHYADDR_NONE:
- dev_info(&ndev->dev, "Fixed speed MII link: %i%cD\n",
- priv->speed, priv->duplex ? 'F' : 'H');
- break;
- case FEC5200_PHYADDR_7WIRE:
- dev_info(&ndev->dev, "using 7-wire PHY mode\n");
- break;
- default:
- dev_info(&ndev->dev, "Using PHY at MDIO address %i\n",
- priv->phy_addr);
- }
-
/* We're done ! */
dev_set_drvdata(&op->dev, ndev);
@@ -1065,6 +983,10 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match)
/* Error handling - free everything that might be allocated */
probe_error:
+ if (priv->phy_node)
+ of_node_put(priv->phy_node);
+ priv->phy_node = NULL;
+
irq_dispose_mapping(ndev->irq);
if (priv->rx_dmatsk)
@@ -1093,6 +1015,10 @@ mpc52xx_fec_remove(struct of_device *op)
unregister_netdev(ndev);
+ if (priv->phy_node)
+ of_node_put(priv->phy_node);
+ priv->phy_node = NULL;
+
irq_dispose_mapping(ndev->irq);
bcom_fec_rx_release(priv->rx_dmatsk);
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c
index dd9bfa42ac3..fec9f245116 100644
--- a/drivers/net/fec_mpc52xx_phy.c
+++ b/drivers/net/fec_mpc52xx_phy.c
@@ -14,12 +14,14 @@
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <linux/of_platform.h>
+#include <linux/of_mdio.h>
#include <asm/io.h>
#include <asm/mpc52xx.h>
#include "fec_mpc52xx.h"
struct mpc52xx_fec_mdio_priv {
struct mpc52xx_fec __iomem *regs;
+ int mdio_irqs[PHY_MAX_ADDR];
};
static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id,
@@ -27,7 +29,7 @@ static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id,
{
struct mpc52xx_fec_mdio_priv *priv = bus->priv;
struct mpc52xx_fec __iomem *fec;
- int tries = 100;
+ int tries = 3;
value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK;
value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK;
@@ -38,7 +40,7 @@ static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id,
/* wait for it to finish, this takes about 23 us on lite5200b */
while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries)
- udelay(5);
+ msleep(1);
if (!tries)
return -ETIMEDOUT;
@@ -64,7 +66,6 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of,
{
struct device *dev = &of->dev;
struct device_node *np = of->node;
- struct device_node *child = NULL;
struct mii_bus *bus;
struct mpc52xx_fec_mdio_priv *priv;
struct resource res = {};
@@ -85,22 +86,7 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of,
bus->write = mpc52xx_fec_mdio_write;
/* setup irqs */
- bus->irq = kmalloc(sizeof(bus->irq[0]) * PHY_MAX_ADDR, GFP_KERNEL);
- if (bus->irq == NULL) {
- err = -ENOMEM;
- goto out_free;
- }
- for (i=0; i<PHY_MAX_ADDR; i++)
- bus->irq[i] = PHY_POLL;
-
- while ((child = of_get_next_child(np, child)) != NULL) {
- int irq = irq_of_parse_and_map(child, 0);
- if (irq != NO_IRQ) {
- const u32 *id = of_get_property(child, "reg", NULL);
- if (id)
- bus->irq[*id] = irq;
- }
- }
+ bus->irq = priv->mdio_irqs;
/* setup registers */
err = of_address_to_resource(np, 0, &res);
@@ -122,7 +108,7 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of,
out_be32(&priv->regs->mii_speed,
((mpc52xx_find_ipb_freq(of->node) >> 20) / 5) << 1);
- err = mdiobus_register(bus);
+ err = of_mdiobus_register(bus, np);
if (err)
goto out_unmap;
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index f9a846b1b92..b60a3041b64 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -77,27 +77,31 @@
* Hardware access:
*/
-#define DEV_NEED_TIMERIRQ 0x000001 /* set the timer irq flag in the irq mask */
-#define DEV_NEED_LINKTIMER 0x000002 /* poll link settings. Relies on the timer irq */
-#define DEV_HAS_LARGEDESC 0x000004 /* device supports jumbo frames and needs packet format 2 */
-#define DEV_HAS_HIGH_DMA 0x000008 /* device supports 64bit dma */
-#define DEV_HAS_CHECKSUM 0x000010 /* device supports tx and rx checksum offloads */
-#define DEV_HAS_VLAN 0x000020 /* device supports vlan tagging and striping */
-#define DEV_HAS_MSI 0x000040 /* device supports MSI */
-#define DEV_HAS_MSI_X 0x000080 /* device supports MSI-X */
-#define DEV_HAS_POWER_CNTRL 0x000100 /* device supports power savings */
-#define DEV_HAS_STATISTICS_V1 0x000200 /* device supports hw statistics version 1 */
-#define DEV_HAS_STATISTICS_V2 0x000600 /* device supports hw statistics version 2 */
-#define DEV_HAS_STATISTICS_V3 0x000e00 /* device supports hw statistics version 3 */
-#define DEV_HAS_TEST_EXTENDED 0x001000 /* device supports extended diagnostic test */
-#define DEV_HAS_MGMT_UNIT 0x002000 /* device supports management unit */
-#define DEV_HAS_CORRECT_MACADDR 0x004000 /* device supports correct mac address order */
-#define DEV_HAS_COLLISION_FIX 0x008000 /* device supports tx collision fix */
-#define DEV_HAS_PAUSEFRAME_TX_V1 0x010000 /* device supports tx pause frames version 1 */
-#define DEV_HAS_PAUSEFRAME_TX_V2 0x020000 /* device supports tx pause frames version 2 */
-#define DEV_HAS_PAUSEFRAME_TX_V3 0x040000 /* device supports tx pause frames version 3 */
-#define DEV_NEED_TX_LIMIT 0x080000 /* device needs to limit tx */
-#define DEV_HAS_GEAR_MODE 0x100000 /* device supports gear mode */
+#define DEV_NEED_TIMERIRQ 0x0000001 /* set the timer irq flag in the irq mask */
+#define DEV_NEED_LINKTIMER 0x0000002 /* poll link settings. Relies on the timer irq */
+#define DEV_HAS_LARGEDESC 0x0000004 /* device supports jumbo frames and needs packet format 2 */
+#define DEV_HAS_HIGH_DMA 0x0000008 /* device supports 64bit dma */
+#define DEV_HAS_CHECKSUM 0x0000010 /* device supports tx and rx checksum offloads */
+#define DEV_HAS_VLAN 0x0000020 /* device supports vlan tagging and striping */
+#define DEV_HAS_MSI 0x0000040 /* device supports MSI */
+#define DEV_HAS_MSI_X 0x0000080 /* device supports MSI-X */
+#define DEV_HAS_POWER_CNTRL 0x0000100 /* device supports power savings */
+#define DEV_HAS_STATISTICS_V1 0x0000200 /* device supports hw statistics version 1 */
+#define DEV_HAS_STATISTICS_V2 0x0000600 /* device supports hw statistics version 2 */
+#define DEV_HAS_STATISTICS_V3 0x0000e00 /* device supports hw statistics version 3 */
+#define DEV_HAS_TEST_EXTENDED 0x0001000 /* device supports extended diagnostic test */
+#define DEV_HAS_MGMT_UNIT 0x0002000 /* device supports management unit */
+#define DEV_HAS_CORRECT_MACADDR 0x0004000 /* device supports correct mac address order */
+#define DEV_HAS_COLLISION_FIX 0x0008000 /* device supports tx collision fix */
+#define DEV_HAS_PAUSEFRAME_TX_V1 0x0010000 /* device supports tx pause frames version 1 */
+#define DEV_HAS_PAUSEFRAME_TX_V2 0x0020000 /* device supports tx pause frames version 2 */
+#define DEV_HAS_PAUSEFRAME_TX_V3 0x0040000 /* device supports tx pause frames version 3 */
+#define DEV_NEED_TX_LIMIT 0x0080000 /* device needs to limit tx */
+#define DEV_NEED_TX_LIMIT2 0x0180000 /* device needs to limit tx, expect for some revs */
+#define DEV_HAS_GEAR_MODE 0x0200000 /* device supports gear mode */
+#define DEV_NEED_PHY_INIT_FIX 0x0400000 /* device needs specific phy workaround */
+#define DEV_NEED_LOW_POWER_FIX 0x0800000 /* device needs special power up workaround */
+#define DEV_NEED_MSI_FIX 0x1000000 /* device needs msi workaround */
enum {
NvRegIrqStatus = 0x000,
@@ -343,6 +347,7 @@ enum {
#define NVREG_POWERSTATE2_POWERUP_MASK 0x0F15
#define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
#define NVREG_POWERSTATE2_PHY_RESET 0x0004
+#define NVREG_POWERSTATE2_GATE_CLOCKS 0x0F00
};
/* Big endian: should work, but is untested */
@@ -897,6 +902,12 @@ enum {
};
static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
+/*
+ * Power down phy when interface is down (persists through reboot;
+ * older Linux and other OSes may not power it up again)
+ */
+static int phy_power_down = 0;
+
static inline struct fe_priv *get_nvpriv(struct net_device *dev)
{
return netdev_priv(dev);
@@ -1017,6 +1028,23 @@ static int using_multi_irqs(struct net_device *dev)
return 1;
}
+static void nv_txrx_gate(struct net_device *dev, bool gate)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u32 powerstate;
+
+ if (!np->mac_in_use &&
+ (np->driver_data & DEV_HAS_POWER_CNTRL)) {
+ powerstate = readl(base + NvRegPowerState2);
+ if (gate)
+ powerstate |= NVREG_POWERSTATE2_GATE_CLOCKS;
+ else
+ powerstate &= ~NVREG_POWERSTATE2_GATE_CLOCKS;
+ writel(powerstate, base + NvRegPowerState2);
+ }
+}
+
static void nv_enable_irq(struct net_device *dev)
{
struct fe_priv *np = get_nvpriv(dev);
@@ -1247,14 +1275,7 @@ static int phy_init(struct net_device *dev)
}
}
if (np->phy_model == PHY_MODEL_REALTEK_8201) {
- if (np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_32 ||
- np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_33 ||
- np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_34 ||
- np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_35 ||
- np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_36 ||
- np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_37 ||
- np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_38 ||
- np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_39) {
+ if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
phy_reserved |= PHY_REALTEK_INIT7;
if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
@@ -1445,14 +1466,7 @@ static int phy_init(struct net_device *dev)
}
}
if (np->phy_model == PHY_MODEL_REALTEK_8201) {
- if (np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_32 ||
- np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_33 ||
- np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_34 ||
- np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_35 ||
- np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_36 ||
- np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_37 ||
- np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_38 ||
- np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_39) {
+ if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
phy_reserved |= PHY_REALTEK_INIT7;
if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
@@ -1485,7 +1499,10 @@ static int phy_init(struct net_device *dev)
/* restart auto negotiation, power down phy */
mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
- mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE | BMCR_PDOWN);
+ mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
+ if (phy_power_down) {
+ mii_control |= BMCR_PDOWN;
+ }
if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
return PHY_ERROR;
}
@@ -3394,12 +3411,14 @@ static void nv_linkchange(struct net_device *dev)
if (!netif_carrier_ok(dev)) {
netif_carrier_on(dev);
printk(KERN_INFO "%s: link up.\n", dev->name);
+ nv_txrx_gate(dev, false);
nv_start_rx(dev);
}
} else {
if (netif_carrier_ok(dev)) {
netif_carrier_off(dev);
printk(KERN_INFO "%s: link down.\n", dev->name);
+ nv_txrx_gate(dev, true);
nv_stop_rx(dev);
}
}
@@ -5327,6 +5346,7 @@ static int nv_open(struct net_device *dev)
mii_rw(dev, np->phyaddr, MII_BMCR,
mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN);
+ nv_txrx_gate(dev, false);
/* erase previous misconfiguration */
if (np->driver_data & DEV_HAS_POWER_CNTRL)
nv_mac_reset(dev);
@@ -5513,13 +5533,15 @@ static int nv_close(struct net_device *dev)
nv_drain_rxtx(dev);
- if (np->wolenabled) {
+ if (np->wolenabled || !phy_power_down) {
+ nv_txrx_gate(dev, false);
writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
nv_start_rx(dev);
} else {
/* power down phy */
mii_rw(dev, np->phyaddr, MII_BMCR,
mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN);
+ nv_txrx_gate(dev, true);
}
/* FIXME: power down nic */
@@ -5812,8 +5834,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
/* take phy and nic out of low power mode */
powerstate = readl(base + NvRegPowerState2);
powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
- if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 ||
- id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) &&
+ if ((id->driver_data & DEV_NEED_LOW_POWER_FIX) &&
pci_dev->revision >= 0xA3)
powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
writel(powerstate, base + NvRegPowerState2);
@@ -5869,14 +5890,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
/* Limit the number of tx's outstanding for hw bug */
if (id->driver_data & DEV_NEED_TX_LIMIT) {
np->tx_limit = 1;
- if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_32 ||
- id->device == PCI_DEVICE_ID_NVIDIA_NVENET_33 ||
- id->device == PCI_DEVICE_ID_NVIDIA_NVENET_34 ||
- id->device == PCI_DEVICE_ID_NVIDIA_NVENET_35 ||
- id->device == PCI_DEVICE_ID_NVIDIA_NVENET_36 ||
- id->device == PCI_DEVICE_ID_NVIDIA_NVENET_37 ||
- id->device == PCI_DEVICE_ID_NVIDIA_NVENET_38 ||
- id->device == PCI_DEVICE_ID_NVIDIA_NVENET_39) &&
+ if ((id->driver_data & DEV_NEED_TX_LIMIT2) &&
pci_dev->revision >= 0xA2)
np->tx_limit = 0;
}
@@ -6126,7 +6140,8 @@ static int nv_resume(struct pci_dev *pdev)
for (i = 0;i <= np->register_size/sizeof(u32); i++)
writel(np->saved_config_space[i], base+i*sizeof(u32));
- pci_write_config_dword(pdev, NV_MSI_PRIV_OFFSET, NV_MSI_PRIV_VALUE);
+ if (np->driver_data & DEV_NEED_MSI_FIX)
+ pci_write_config_dword(pdev, NV_MSI_PRIV_OFFSET, NV_MSI_PRIV_VALUE);
/* restore phy state, including autoneg */
phy_init(dev);
@@ -6175,160 +6190,164 @@ static void nv_shutdown(struct pci_dev *pdev)
static struct pci_device_id pci_tbl[] = {
{ /* nForce Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1),
+ PCI_DEVICE(0x10DE, 0x01C3),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
},
{ /* nForce2 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2),
+ PCI_DEVICE(0x10DE, 0x0066),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
},
{ /* nForce3 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3),
+ PCI_DEVICE(0x10DE, 0x00D6),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
},
{ /* nForce3 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4),
+ PCI_DEVICE(0x10DE, 0x0086),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
},
{ /* nForce3 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5),
+ PCI_DEVICE(0x10DE, 0x008C),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
},
{ /* nForce3 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6),
+ PCI_DEVICE(0x10DE, 0x00E6),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
},
{ /* nForce3 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7),
+ PCI_DEVICE(0x10DE, 0x00DF),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
},
{ /* CK804 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
+ PCI_DEVICE(0x10DE, 0x0056),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
},
{ /* CK804 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
+ PCI_DEVICE(0x10DE, 0x0057),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
},
{ /* MCP04 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
+ PCI_DEVICE(0x10DE, 0x0037),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
},
{ /* MCP04 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11),
+ PCI_DEVICE(0x10DE, 0x0038),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
},
{ /* MCP51 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
- .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
+ PCI_DEVICE(0x10DE, 0x0268),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
},
{ /* MCP51 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13),
- .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
+ PCI_DEVICE(0x10DE, 0x0269),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
},
{ /* MCP55 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
- .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT,
+ PCI_DEVICE(0x10DE, 0x0372),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
},
{ /* MCP55 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
- .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT,
+ PCI_DEVICE(0x10DE, 0x0373),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
},
{ /* MCP61 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
- .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
+ PCI_DEVICE(0x10DE, 0x03E5),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
},
{ /* MCP61 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17),
- .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
+ PCI_DEVICE(0x10DE, 0x03E6),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
},
{ /* MCP61 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18),
- .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
+ PCI_DEVICE(0x10DE, 0x03EE),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
},
{ /* MCP61 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19),
- .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
+ PCI_DEVICE(0x10DE, 0x03EF),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
},
{ /* MCP65 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
- .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
+ PCI_DEVICE(0x10DE, 0x0450),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
},
{ /* MCP65 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
- .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
+ PCI_DEVICE(0x10DE, 0x0451),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
},
{ /* MCP65 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
- .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
+ PCI_DEVICE(0x10DE, 0x0452),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
},
{ /* MCP65 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
- .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
+ PCI_DEVICE(0x10DE, 0x0453),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
},
{ /* MCP67 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24),
- .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE,
+ PCI_DEVICE(0x10DE, 0x054C),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
},
{ /* MCP67 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25),
- .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE,
+ PCI_DEVICE(0x10DE, 0x054D),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
},
{ /* MCP67 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26),
- .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE,
+ PCI_DEVICE(0x10DE, 0x054E),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
},
{ /* MCP67 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27),
- .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE,
+ PCI_DEVICE(0x10DE, 0x054F),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
},
{ /* MCP73 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28),
- .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE,
+ PCI_DEVICE(0x10DE, 0x07DC),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
},
{ /* MCP73 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29),
- .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE,
+ PCI_DEVICE(0x10DE, 0x07DD),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
},
{ /* MCP73 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30),
- .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE,
+ PCI_DEVICE(0x10DE, 0x07DE),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
},
{ /* MCP73 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31),
- .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE,
+ PCI_DEVICE(0x10DE, 0x07DF),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
},
{ /* MCP77 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32),
- .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
+ PCI_DEVICE(0x10DE, 0x0760),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
},
{ /* MCP77 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33),
- .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
+ PCI_DEVICE(0x10DE, 0x0761),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
},
{ /* MCP77 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34),
- .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
+ PCI_DEVICE(0x10DE, 0x0762),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
},
{ /* MCP77 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35),
- .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
+ PCI_DEVICE(0x10DE, 0x0763),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
},
{ /* MCP79 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36),
- .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
+ PCI_DEVICE(0x10DE, 0x0AB0),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
},
{ /* MCP79 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37),
- .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
+ PCI_DEVICE(0x10DE, 0x0AB1),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
},
{ /* MCP79 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38),
- .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
+ PCI_DEVICE(0x10DE, 0x0AB2),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
},
{ /* MCP79 Ethernet Controller */
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39),
- .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
+ PCI_DEVICE(0x10DE, 0x0AB3),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
+ },
+ { /* MCP89 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x0D7D),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX,
},
{0,},
};
@@ -6367,6 +6386,8 @@ module_param(dma_64bit, int, 0);
MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
module_param(phy_cross, int, 0);
MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
+module_param(phy_power_down, int, 0);
+MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0).");
MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index a9cbc3191a2..b892c3ad9a7 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -36,6 +36,8 @@
#include <linux/fs.h>
#include <linux/platform_device.h>
#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
@@ -752,9 +754,10 @@ static int fs_init_phy(struct net_device *dev)
fep->oldlink = 0;
fep->oldspeed = 0;
fep->oldduplex = -1;
- if(fep->fpi->bus_id)
- phydev = phy_connect(dev, fep->fpi->bus_id, &fs_adjust_link, 0,
- PHY_INTERFACE_MODE_MII);
+ if(fep->fpi->phy_node)
+ phydev = of_phy_connect(dev, fep->fpi->phy_node,
+ &fs_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
else {
printk("No phy bus ID specified in BSP code\n");
return -EINVAL;
@@ -938,81 +941,6 @@ extern void fs_mii_disconnect(struct net_device *dev);
/**************************************************************************************/
-/* handy pointer to the immap */
-void __iomem *fs_enet_immap = NULL;
-
-static int setup_immap(void)
-{
-#ifdef CONFIG_CPM1
- fs_enet_immap = ioremap(IMAP_ADDR, 0x4000);
- WARN_ON(!fs_enet_immap);
-#elif defined(CONFIG_CPM2)
- fs_enet_immap = cpm2_immr;
-#endif
-
- return 0;
-}
-
-static void cleanup_immap(void)
-{
-#if defined(CONFIG_CPM1)
- iounmap(fs_enet_immap);
-#endif
-}
-
-/**************************************************************************************/
-
-static int __devinit find_phy(struct device_node *np,
- struct fs_platform_info *fpi)
-{
- struct device_node *phynode, *mdionode;
- int ret = 0, len, bus_id;
- const u32 *data;
-
- data = of_get_property(np, "fixed-link", NULL);
- if (data) {
- snprintf(fpi->bus_id, 16, "%x:%02x", 0, *data);
- return 0;
- }
-
- data = of_get_property(np, "phy-handle", &len);
- if (!data || len != 4)
- return -EINVAL;
-
- phynode = of_find_node_by_phandle(*data);
- if (!phynode)
- return -EINVAL;
-
- data = of_get_property(phynode, "reg", &len);
- if (!data || len != 4) {
- ret = -EINVAL;
- goto out_put_phy;
- }
-
- mdionode = of_get_parent(phynode);
- if (!mdionode) {
- ret = -EINVAL;
- goto out_put_phy;
- }
-
- bus_id = of_get_gpio(mdionode, 0);
- if (bus_id < 0) {
- struct resource res;
- ret = of_address_to_resource(mdionode, 0, &res);
- if (ret)
- goto out_put_mdio;
- bus_id = res.start;
- }
-
- snprintf(fpi->bus_id, 16, "%x:%02x", bus_id, *data);
-
-out_put_mdio:
- of_node_put(mdionode);
-out_put_phy:
- of_node_put(phynode);
- return ret;
-}
-
#ifdef CONFIG_FS_ENET_HAS_FEC
#define IS_FEC(match) ((match)->data == &fs_fec_ops)
#else
@@ -1062,9 +990,9 @@ static int __devinit fs_enet_probe(struct of_device *ofdev,
fpi->rx_copybreak = 240;
fpi->use_napi = 1;
fpi->napi_weight = 17;
-
- ret = find_phy(ofdev->node, fpi);
- if (ret)
+ fpi->phy_node = of_parse_phandle(ofdev->node, "phy-handle", 0);
+ if ((!fpi->phy_node) && (!of_get_property(ofdev->node, "fixed-link",
+ NULL)))
goto out_free_fpi;
privsize = sizeof(*fep) +
@@ -1136,6 +1064,7 @@ out_cleanup_data:
out_free_dev:
free_netdev(ndev);
dev_set_drvdata(&ofdev->dev, NULL);
+ of_node_put(fpi->phy_node);
out_free_fpi:
kfree(fpi);
return ret;
@@ -1151,7 +1080,7 @@ static int fs_enet_remove(struct of_device *ofdev)
fep->ops->free_bd(ndev);
fep->ops->cleanup_data(ndev);
dev_set_drvdata(fep->dev, NULL);
-
+ of_node_put(fep->fpi->phy_node);
free_netdev(ndev);
return 0;
}
@@ -1191,25 +1120,12 @@ static struct of_platform_driver fs_enet_driver = {
static int __init fs_init(void)
{
- int r = setup_immap();
- if (r != 0)
- return r;
-
- r = of_register_platform_driver(&fs_enet_driver);
- if (r != 0)
- goto out;
-
- return 0;
-
-out:
- cleanup_immap();
- return r;
+ return of_register_platform_driver(&fs_enet_driver);
}
static void __exit fs_cleanup(void)
{
of_unregister_platform_driver(&fs_enet_driver);
- cleanup_immap();
}
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/fs_enet/fs_enet.h b/drivers/net/fs_enet/fs_enet.h
index 85a4bab7f63..ef01e09781a 100644
--- a/drivers/net/fs_enet/fs_enet.h
+++ b/drivers/net/fs_enet/fs_enet.h
@@ -194,9 +194,4 @@ extern const struct fs_ops fs_scc_ops;
/*******************************************************************/
-/* handy pointer to the immap */
-extern void __iomem *fs_enet_immap;
-
-/*******************************************************************/
-
#endif
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c
index 14e575313c8..ca7bcb8ab3a 100644
--- a/drivers/net/fs_enet/mac-fec.c
+++ b/drivers/net/fs_enet/mac-fec.c
@@ -245,10 +245,6 @@ static void set_multicast_list(struct net_device *dev)
static void restart(struct net_device *dev)
{
-#ifdef CONFIG_DUET
- immap_t *immap = fs_enet_immap;
- u32 cptr;
-#endif
struct fs_enet_private *fep = netdev_priv(dev);
fec_t __iomem *fecp = fep->fec.fecp;
const struct fs_platform_info *fpi = fep->fpi;
@@ -315,36 +311,6 @@ static void restart(struct net_device *dev)
FW(fecp, ievent, 0xffc0);
FW(fecp, ivec, (virq_to_hw(fep->interrupt) / 2) << 29);
- /*
- * adjust to speed (only for DUET & RMII)
- */
-#ifdef CONFIG_DUET
- if (fpi->use_rmii) {
- cptr = in_be32(&immap->im_cpm.cp_cptr);
- switch (fs_get_fec_index(fpi->fs_no)) {
- case 0:
- cptr |= 0x100;
- if (fep->speed == 10)
- cptr |= 0x0000010;
- else if (fep->speed == 100)
- cptr &= ~0x0000010;
- break;
- case 1:
- cptr |= 0x80;
- if (fep->speed == 10)
- cptr |= 0x0000008;
- else if (fep->speed == 100)
- cptr &= ~0x0000008;
- break;
- default:
- BUG(); /* should never happen */
- break;
- }
- out_be32(&immap->im_cpm.cp_cptr, cptr);
- }
-#endif
-
-
FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
/*
* adjust to duplex mode
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c
index 49b6645d7e0..93b481b0e3c 100644
--- a/drivers/net/fs_enet/mii-bitbang.c
+++ b/drivers/net/fs_enet/mii-bitbang.c
@@ -22,6 +22,7 @@
#include <linux/mii.h>
#include <linux/platform_device.h>
#include <linux/mdio-bitbang.h>
+#include <linux/of_mdio.h>
#include <linux/of_platform.h>
#include "fs_enet.h"
@@ -149,31 +150,12 @@ static int __devinit fs_mii_bitbang_init(struct mii_bus *bus,
return 0;
}
-static void __devinit add_phy(struct mii_bus *bus, struct device_node *np)
-{
- const u32 *data;
- int len, id, irq;
-
- data = of_get_property(np, "reg", &len);
- if (!data || len != 4)
- return;
-
- id = *data;
- bus->phy_mask &= ~(1 << id);
-
- irq = of_irq_to_resource(np, 0, NULL);
- if (irq != NO_IRQ)
- bus->irq[id] = irq;
-}
-
static int __devinit fs_enet_mdio_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
- struct device_node *np = NULL;
struct mii_bus *new_bus;
struct bb_info *bitbang;
int ret = -ENOMEM;
- int i;
bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
if (!bitbang)
@@ -196,17 +178,10 @@ static int __devinit fs_enet_mdio_probe(struct of_device *ofdev,
if (!new_bus->irq)
goto out_unmap_regs;
- for (i = 0; i < PHY_MAX_ADDR; i++)
- new_bus->irq[i] = -1;
-
- while ((np = of_get_next_child(ofdev->node, np)))
- if (!strcmp(np->type, "ethernet-phy"))
- add_phy(new_bus, np);
-
new_bus->parent = &ofdev->dev;
dev_set_drvdata(&ofdev->dev, new_bus);
- ret = mdiobus_register(new_bus);
+ ret = of_mdiobus_register(new_bus, ofdev->node);
if (ret)
goto out_free_irqs;
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
index 28077cc1b94..75a09994d66 100644
--- a/drivers/net/fs_enet/mii-fec.c
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -54,8 +54,7 @@ static int fs_enet_fec_mii_read(struct mii_bus *bus , int phy_id, int location)
fec_t __iomem *fecp = fec->fecp;
int i, ret = -1;
- if ((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0)
- BUG();
+ BUG_ON((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0);
/* Add PHY address to register command. */
out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_read(location));
@@ -79,8 +78,7 @@ static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location,
int i;
/* this must never happen */
- if ((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0)
- BUG();
+ BUG_ON((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0);
/* Add PHY address to register command. */
out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_write(location, val));
@@ -102,23 +100,6 @@ static int fs_enet_fec_mii_reset(struct mii_bus *bus)
return 0;
}
-static void __devinit add_phy(struct mii_bus *bus, struct device_node *np)
-{
- const u32 *data;
- int len, id, irq;
-
- data = of_get_property(np, "reg", &len);
- if (!data || len != 4)
- return;
-
- id = *data;
- bus->phy_mask &= ~(1 << id);
-
- irq = of_irq_to_resource(np, 0, NULL);
- if (irq != NO_IRQ)
- bus->irq[id] = irq;
-}
-
static int __devinit fs_enet_mdio_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
@@ -165,17 +146,10 @@ static int __devinit fs_enet_mdio_probe(struct of_device *ofdev,
if (!new_bus->irq)
goto out_unmap_regs;
- for (i = 0; i < PHY_MAX_ADDR; i++)
- new_bus->irq[i] = -1;
-
- while ((np = of_get_next_child(ofdev->node, np)))
- if (!strcmp(np->type, "ethernet-phy"))
- add_phy(new_bus, np);
-
new_bus->parent = &ofdev->dev;
dev_set_drvdata(&ofdev->dev, new_bus);
- ret = mdiobus_register(new_bus);
+ ret = of_mdiobus_register(new_bus, ofdev->node);
if (ret)
goto out_free_irqs;
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index aa1eb88c21f..3af581303ca 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -34,6 +34,7 @@
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/of.h>
+#include <linux/of_mdio.h>
#include <linux/of_platform.h>
#include <asm/io.h>
@@ -154,44 +155,6 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus)
return 0;
}
-/* Allocate an array which provides irq #s for each PHY on the given bus */
-static int *create_irq_map(struct device_node *np)
-{
- int *irqs;
- int i;
- struct device_node *child = NULL;
-
- irqs = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
-
- if (!irqs)
- return NULL;
-
- for (i = 0; i < PHY_MAX_ADDR; i++)
- irqs[i] = PHY_POLL;
-
- while ((child = of_get_next_child(np, child)) != NULL) {
- int irq = irq_of_parse_and_map(child, 0);
- const u32 *id;
-
- if (irq == NO_IRQ)
- continue;
-
- id = of_get_property(child, "reg", NULL);
-
- if (!id)
- continue;
-
- if (*id < PHY_MAX_ADDR && *id >= 0)
- irqs[*id] = irq;
- else
- printk(KERN_WARNING "%s: "
- "%d is not a valid PHY address\n",
- np->full_name, *id);
- }
-
- return irqs;
-}
-
void fsl_pq_mdio_bus_name(char *name, struct device_node *np)
{
const u32 *addr;
@@ -315,7 +278,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
new_bus->priv = (void __force *)regs;
- new_bus->irq = create_irq_map(np);
+ new_bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
if (NULL == new_bus->irq) {
err = -ENOMEM;
@@ -338,13 +301,17 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
of_device_is_compatible(np, "ucc_geth_phy")) {
#ifdef CONFIG_UCC_GETH
u32 id;
+ static u32 mii_mng_master;
tbipa = &regs->utbipar;
if ((err = get_ucc_id_for_range(addr, addr + size, &id)))
goto err_free_irqs;
- ucc_set_qe_mux_mii_mng(id - 1);
+ if (!mii_mng_master) {
+ mii_mng_master = id;
+ ucc_set_qe_mux_mii_mng(id - 1);
+ }
#else
err = -ENODEV;
goto err_free_irqs;
@@ -384,15 +351,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
out_be32(tbipa, tbiaddr);
- /*
- * The TBIPHY-only buses will find PHYs at every address,
- * so we mask them all but the TBI
- */
- if (of_device_is_compatible(np, "fsl,gianfar-tbi"))
- new_bus->phy_mask = ~(1 << tbiaddr);
-
- err = mdiobus_register(new_bus);
-
+ err = of_mdiobus_register(new_bus, np);
if (err) {
printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
new_bus->name);
@@ -460,10 +419,10 @@ int __init fsl_pq_mdio_init(void)
{
return of_register_platform_driver(&fsl_pq_mdio_driver);
}
+module_init(fsl_pq_mdio_init);
void fsl_pq_mdio_exit(void)
{
of_unregister_platform_driver(&fsl_pq_mdio_driver);
}
-subsys_initcall_sync(fsl_pq_mdio_init);
module_exit(fsl_pq_mdio_exit);
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index b2c49679bba..4ae1d259fce 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -75,6 +75,7 @@
#include <linux/if_vlan.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
+#include <linux/of_mdio.h>
#include <linux/of_platform.h>
#include <linux/ip.h>
#include <linux/tcp.h>
@@ -168,17 +169,13 @@ static inline int gfar_uses_fcb(struct gfar_private *priv)
static int gfar_of_init(struct net_device *dev)
{
- struct device_node *phy, *mdio;
- const unsigned int *id;
const char *model;
const char *ctype;
const void *mac_addr;
- const phandle *ph;
u64 addr, size;
int err = 0;
struct gfar_private *priv = netdev_priv(dev);
struct device_node *np = priv->node;
- char bus_name[MII_BUS_ID_SIZE];
const u32 *stash;
const u32 *stash_len;
const u32 *stash_idx;
@@ -264,8 +261,8 @@ static int gfar_of_init(struct net_device *dev)
if (of_get_property(np, "fsl,magic-packet", NULL))
priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
- ph = of_get_property(np, "phy-handle", NULL);
- if (ph == NULL) {
+ priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (!priv->phy_node) {
u32 *fixed_link;
fixed_link = (u32 *)of_get_property(np, "fixed-link", NULL);
@@ -273,57 +270,10 @@ static int gfar_of_init(struct net_device *dev)
err = -ENODEV;
goto err_out;
}
-
- snprintf(priv->phy_bus_id, sizeof(priv->phy_bus_id),
- PHY_ID_FMT, "0", fixed_link[0]);
- } else {
- phy = of_find_node_by_phandle(*ph);
-
- if (phy == NULL) {
- err = -ENODEV;
- goto err_out;
- }
-
- mdio = of_get_parent(phy);
-
- id = of_get_property(phy, "reg", NULL);
-
- of_node_put(phy);
-
- fsl_pq_mdio_bus_name(bus_name, mdio);
- of_node_put(mdio);
- snprintf(priv->phy_bus_id, sizeof(priv->phy_bus_id), "%s:%02x",
- bus_name, *id);
}
/* Find the TBI PHY. If it's not there, we don't support SGMII */
- ph = of_get_property(np, "tbi-handle", NULL);
- if (ph) {
- struct device_node *tbi = of_find_node_by_phandle(*ph);
- struct of_device *ofdev;
- struct mii_bus *bus;
-
- if (!tbi)
- return 0;
-
- mdio = of_get_parent(tbi);
- if (!mdio)
- return 0;
-
- ofdev = of_find_device_by_node(mdio);
-
- of_node_put(mdio);
-
- id = of_get_property(tbi, "reg", NULL);
- if (!id)
- return 0;
-
- of_node_put(tbi);
-
- bus = dev_get_drvdata(&ofdev->dev);
-
- priv->tbiphy = bus->phy_map[*id];
- }
+ priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
return 0;
@@ -529,6 +479,10 @@ static int gfar_probe(struct of_device *ofdev,
register_fail:
iounmap(priv->regs);
regs_fail:
+ if (priv->phy_node)
+ of_node_put(priv->phy_node);
+ if (priv->tbi_node)
+ of_node_put(priv->tbi_node);
free_netdev(dev);
return err;
}
@@ -537,6 +491,11 @@ static int gfar_remove(struct of_device *ofdev)
{
struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
+ if (priv->phy_node)
+ of_node_put(priv->phy_node);
+ if (priv->tbi_node)
+ of_node_put(priv->tbi_node);
+
dev_set_drvdata(&ofdev->dev, NULL);
iounmap(priv->regs);
@@ -690,7 +649,6 @@ static int init_phy(struct net_device *dev)
uint gigabit_support =
priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
SUPPORTED_1000baseT_Full : 0;
- struct phy_device *phydev;
phy_interface_t interface;
priv->oldlink = 0;
@@ -699,21 +657,21 @@ static int init_phy(struct net_device *dev)
interface = gfar_get_interface(dev);
- phydev = phy_connect(dev, priv->phy_bus_id, &adjust_link, 0, interface);
+ if (priv->phy_node) {
+ priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link,
+ 0, interface);
+ if (!priv->phydev) {
+ dev_err(&dev->dev, "error: Could not attach to PHY\n");
+ return -ENODEV;
+ }
+ }
if (interface == PHY_INTERFACE_MODE_SGMII)
gfar_configure_serdes(dev);
- if (IS_ERR(phydev)) {
- printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
- return PTR_ERR(phydev);
- }
-
/* Remove any features not supported by the controller */
- phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
- phydev->advertising = phydev->supported;
-
- priv->phydev = phydev;
+ priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
+ priv->phydev->advertising = priv->phydev->supported;
return 0;
}
@@ -730,10 +688,17 @@ static int init_phy(struct net_device *dev)
static void gfar_configure_serdes(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
+ struct phy_device *tbiphy;
- if (!priv->tbiphy) {
- printk(KERN_WARNING "SGMII mode requires that the device "
- "tree specify a tbi-handle\n");
+ if (!priv->tbi_node) {
+ dev_warn(&dev->dev, "error: SGMII mode requires that the "
+ "device tree specify a tbi-handle\n");
+ return;
+ }
+
+ tbiphy = of_phy_find_device(priv->tbi_node);
+ if (!tbiphy) {
+ dev_err(&dev->dev, "error: Could not get TBI device\n");
return;
}
@@ -743,17 +708,17 @@ static void gfar_configure_serdes(struct net_device *dev)
* everything for us? Resetting it takes the link down and requires
* several seconds for it to come back.
*/
- if (phy_read(priv->tbiphy, MII_BMSR) & BMSR_LSTATUS)
+ if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
return;
/* Single clk mode, mii mode off(for serdes communication) */
- phy_write(priv->tbiphy, MII_TBICON, TBICON_CLK_SELECT);
+ phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
- phy_write(priv->tbiphy, MII_ADVERTISE,
+ phy_write(tbiphy, MII_ADVERTISE,
ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
ADVERTISE_1000XPSE_ASYM);
- phy_write(priv->tbiphy, MII_BMCR, BMCR_ANENABLE |
+ phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE |
BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
}
@@ -1242,7 +1207,8 @@ static int gfar_enet_open(struct net_device *dev)
static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
{
struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
- cacheable_memzero(fcb, GMAC_FCB_LEN);
+
+ memset(fcb, 0, GMAC_FCB_LEN);
return fcb;
}
@@ -1885,8 +1851,17 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
if (unlikely(!newskb))
newskb = skb;
- else if (skb)
+ else if (skb) {
+ /*
+ * We need to reset ->data to what it
+ * was before gfar_new_skb() re-aligned
+ * it to an RXBUF_ALIGNMENT boundary
+ * before we put the skb back on the
+ * recycle list.
+ */
+ skb->data = skb->head + NET_SKB_PAD;
__skb_queue_head(&priv->rx_recycle, skb);
+ }
} else {
/* Increment the number of packets */
dev->stats.rx_packets++;
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 0642d52aef5..2cd94338b5d 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -259,7 +259,7 @@ extern const char gfar_driver_version[];
(IEVENT_RXC | IEVENT_BSY | IEVENT_EBERR | IEVENT_MSRO | \
IEVENT_BABT | IEVENT_TXC | IEVENT_TXE | IEVENT_LC \
| IEVENT_CRL | IEVENT_XFUN | IEVENT_DPE | IEVENT_PERR \
- | IEVENT_MAG)
+ | IEVENT_MAG | IEVENT_BABR)
#define IMASK_INIT_CLEAR 0x00000000
#define IMASK_BABR 0x80000000
@@ -779,7 +779,8 @@ struct gfar_private {
spinlock_t bflock;
phy_interface_t interface;
- char phy_bus_id[BUS_ID_SIZE];
+ struct device_node *phy_node;
+ struct device_node *tbi_node;
u32 device_flags;
unsigned char rx_csum_enable:1,
extended_hash:1,
@@ -793,7 +794,6 @@ struct gfar_private {
/* PHY stuff */
struct phy_device *phydev;
- struct phy_device *tbiphy;
struct mii_bus *mii_bus;
int oldspeed;
int oldduplex;
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 310ee035067..9d5b62cb30f 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -1163,7 +1163,7 @@ static void hamachi_tx_timeout(struct net_device *dev)
hmp->rx_ring[RX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing);
/* Trigger an immediate transmit demand. */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
hmp->stats.tx_errors++;
/* Restart the chip's Tx/Rx processes . */
@@ -1280,7 +1280,7 @@ static int hamachi_start_xmit(struct sk_buff *skb, struct net_device *dev)
status=readw(hmp->base + TxStatus);
if( !(status & 0x0001) || (status & 0x0002))
writew(0x0001, hmp->base + TxCmd);
- return 1;
+ return NETDEV_TX_BUSY;
}
/* Caution: the write order is important here, set the field
@@ -1364,7 +1364,6 @@ static int hamachi_start_xmit(struct sk_buff *skb, struct net_device *dev)
hmp->tx_full = 1;
netif_stop_queue(dev);
}
- dev->trans_start = jiffies;
if (hamachi_debug > 4) {
printk(KERN_DEBUG "%s: Hamachi transmit frame #%d queued in slot %d.\n",
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index bb78c11559c..5e4b7afd068 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -777,7 +777,7 @@ static int baycom_send_packet(struct sk_buff *skb, struct net_device *dev)
return 0;
}
if (bc->skb)
- return -1;
+ return NETDEV_TX_LOCKED;
/* strip KISS byte */
if (skb->len >= HDLCDRV_MAXFLEN+1 || skb->len < 3) {
dev_kfree_skb(skb);
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index d509b371a56..5105548ad50 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -274,7 +274,7 @@ static int bpq_xmit(struct sk_buff *skb, struct net_device *dev)
if ((newskb = skb_realloc_headroom(skb, AX25_BPQ_HEADER_LEN)) == NULL) {
printk(KERN_WARNING "bpqether: out of memory\n");
kfree_skb(skb);
- return -ENOMEM;
+ return NETDEV_TX_OK;
}
if (skb->sk != NULL)
@@ -294,7 +294,7 @@ static int bpq_xmit(struct sk_buff *skb, struct net_device *dev)
if ((dev = bpq_get_ether_dev(dev)) == NULL) {
dev->stats.tx_dropped++;
kfree_skb(skb);
- return -ENODEV;
+ return NETDEV_TX_OK;
}
skb->protocol = ax25_type_trans(skb, dev);
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 61de56e45ee..d034f8ca63c 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -409,7 +409,7 @@ static int hdlcdrv_send_packet(struct sk_buff *skb, struct net_device *dev)
return 0;
}
if (sm->skb)
- return -1;
+ return NETDEV_TX_LOCKED;
netif_stop_queue(dev);
sm->skb = skb;
return 0;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 032c0db4c41..fda2fc83e9a 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -531,7 +531,7 @@ static int ax_xmit(struct sk_buff *skb, struct net_device *dev)
if (!netif_running(dev)) {
printk(KERN_ERR "mkiss: %s: xmit call when iface is down\n", dev->name);
- return 1;
+ return NETDEV_TX_BUSY;
}
if (netif_queue_stopped(dev)) {
@@ -541,7 +541,7 @@ static int ax_xmit(struct sk_buff *skb, struct net_device *dev)
*/
if (time_before(jiffies, dev->trans_start + 20 * HZ)) {
/* 20 sec timeout not reached */
- return 1;
+ return NETDEV_TX_BUSY;
}
printk(KERN_ERR "mkiss: %s: transmit timed out, %s?\n", dev->name,
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index de3f49f991a..8feda9fe829 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -2864,7 +2864,7 @@ static int __init hp100_eisa_probe (struct device *gendev)
printk("hp100: %s: EISA adapter found at 0x%x\n", dev->name,
dev->base_addr);
#endif
- gendev->driver_data = dev;
+ dev_set_drvdata(gendev, dev);
return 0;
out1:
free_netdev(dev);
@@ -2873,7 +2873,7 @@ static int __init hp100_eisa_probe (struct device *gendev)
static int __devexit hp100_eisa_remove (struct device *gendev)
{
- struct net_device *dev = gendev->driver_data;
+ struct net_device *dev = dev_get_drvdata(gendev);
cleanup_dev(dev);
return 0;
}
diff --git a/drivers/net/hplance.c b/drivers/net/hplance.c
index 2e802634d36..3e3528ade25 100644
--- a/drivers/net/hplance.c
+++ b/drivers/net/hplance.c
@@ -71,6 +71,19 @@ static struct dio_driver hplance_driver = {
.remove = __devexit_p(hplance_remove_one),
};
+static const struct net_device_ops hplance_netdev_ops = {
+ .ndo_open = hplance_open,
+ .ndo_stop = hplance_close,
+ .ndo_start_xmit = lance_start_xmit,
+ .ndo_set_multicast_list = lance_set_multicast,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = lance_poll,
+#endif
+};
+
/* Find all the HP Lance boards and initialise them... */
static int __devinit hplance_init_one(struct dio_dev *d,
const struct dio_device_id *ent)
@@ -135,13 +148,7 @@ static void __init hplance_init(struct net_device *dev, struct dio_dev *d)
/* Fill the dev fields */
dev->base_addr = va;
- dev->open = &hplance_open;
- dev->stop = &hplance_close;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = lance_poll;
-#endif
- dev->hard_start_xmit = &lance_start_xmit;
- dev->set_multicast_list = &lance_set_multicast;
+ dev->netdev_ops = &hplance_netdev_ops;
dev->dma = 0;
for (i=0; i<6; i++) {
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 806533c831c..beb84213b67 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -1484,7 +1484,7 @@ static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
stop_queue:
netif_stop_queue(ndev);
DBG2(dev, "stopped TX queue" NL);
- return 1;
+ return NETDEV_TX_BUSY;
}
/* Tx lock BHs */
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index c25bc0bc0b2..448098d3b39 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -815,7 +815,7 @@ static int ibmlana_close(struct net_device *dev)
static int ibmlana_tx(struct sk_buff *skb, struct net_device *dev)
{
ibmlana_priv *priv = netdev_priv(dev);
- int retval = 0, tmplen, addr;
+ int tmplen, addr;
unsigned long flags;
tda_t tda;
int baddr;
@@ -824,7 +824,6 @@ static int ibmlana_tx(struct sk_buff *skb, struct net_device *dev)
the upper layer is in deep desperation and we simply ignore the frame. */
if (priv->txusedcnt >= TXBUFCNT) {
- retval = -EIO;
dev->stats.tx_dropped++;
goto tx_done;
}
@@ -874,7 +873,7 @@ static int ibmlana_tx(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&priv->lock, flags);
tx_done:
dev_kfree_skb(skb);
- return retval;
+ return NETDEV_TX_OK;
}
/* switch receiver mode. */
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 5c6315df86b..0995c438f28 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -1203,6 +1203,20 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
return ret;
}
+static const struct net_device_ops ibmveth_netdev_ops = {
+ .ndo_open = ibmveth_open,
+ .ndo_stop = ibmveth_close,
+ .ndo_start_xmit = ibmveth_start_xmit,
+ .ndo_set_multicast_list = ibmveth_set_multicast_list,
+ .ndo_do_ioctl = ibmveth_ioctl,
+ .ndo_change_mtu = ibmveth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ibmveth_poll_controller,
+#endif
+};
+
static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
{
int rc, i;
@@ -1241,7 +1255,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
return -ENOMEM;
adapter = netdev_priv(netdev);
- dev->dev.driver_data = netdev;
+ dev_set_drvdata(&dev->dev, netdev);
adapter->vdev = dev;
adapter->netdev = netdev;
@@ -1265,21 +1279,13 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
memcpy(&adapter->mac_addr, mac_addr_p, 6);
netdev->irq = dev->irq;
- netdev->open = ibmveth_open;
- netdev->stop = ibmveth_close;
- netdev->hard_start_xmit = ibmveth_start_xmit;
- netdev->set_multicast_list = ibmveth_set_multicast_list;
- netdev->do_ioctl = ibmveth_ioctl;
- netdev->ethtool_ops = &netdev_ethtool_ops;
- netdev->change_mtu = ibmveth_change_mtu;
+ netdev->netdev_ops = &ibmveth_netdev_ops;
+ netdev->ethtool_ops = &netdev_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->dev);
-#ifdef CONFIG_NET_POLL_CONTROLLER
- netdev->poll_controller = ibmveth_poll_controller;
-#endif
netdev->features |= NETIF_F_LLTX;
spin_lock_init(&adapter->stats_lock);
- memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
+ memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
for(i = 0; i<IbmVethNumBufferPools; i++) {
struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
@@ -1335,7 +1341,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
static int __devexit ibmveth_remove(struct vio_dev *dev)
{
- struct net_device *netdev = dev->dev.driver_data;
+ struct net_device *netdev = dev_get_drvdata(&dev->dev);
struct ibmveth_adapter *adapter = netdev_priv(netdev);
int i;
@@ -1368,8 +1374,8 @@ static void ibmveth_proc_unregister_driver(void)
static int ibmveth_show(struct seq_file *seq, void *v)
{
struct ibmveth_adapter *adapter = seq->private;
- char *current_mac = ((char*) &adapter->netdev->dev_addr);
- char *firmware_mac = ((char*) &adapter->mac_addr) ;
+ char *current_mac = (char *) adapter->netdev->dev_addr;
+ char *firmware_mac = (char *) &adapter->mac_addr;
seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version);
@@ -1468,8 +1474,8 @@ const char * buf, size_t count)
struct ibmveth_buff_pool *pool = container_of(kobj,
struct ibmveth_buff_pool,
kobj);
- struct net_device *netdev =
- container_of(kobj->parent, struct device, kobj)->driver_data;
+ struct net_device *netdev = dev_get_drvdata(
+ container_of(kobj->parent, struct device, kobj));
struct ibmveth_adapter *adapter = netdev_priv(netdev);
long value = simple_strtol(buf, NULL, 10);
long rc;
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 60a26300193..96713ef0629 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -156,6 +156,7 @@ static void ifb_setup(struct net_device *dev)
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
+ dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
random_ether_addr(dev->dev_addr);
}
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index eaf97705036..0f16abab256 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -130,6 +130,7 @@ struct e1000_adv_tx_context_desc {
#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
+#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */
/* IPSec Encrypt Enable for ESP */
#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index ad2d319d0f8..3bda3db73f1 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -289,8 +289,9 @@
#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
/* Receive Checksum Control */
+#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */
#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
-#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
+#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */
#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
/* Header split receive */
diff --git a/drivers/net/igb/e1000_mbx.c b/drivers/net/igb/e1000_mbx.c
index 840782fb573..ed9058eca45 100644
--- a/drivers/net/igb/e1000_mbx.c
+++ b/drivers/net/igb/e1000_mbx.c
@@ -140,13 +140,13 @@ static s32 igb_poll_for_msg(struct e1000_hw *hw, u16 mbx_id)
struct e1000_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
- if (!mbx->ops.check_for_msg)
+ if (!countdown || !mbx->ops.check_for_msg)
goto out;
while (mbx->ops.check_for_msg(hw, mbx_id)) {
+ countdown--;
if (!countdown)
break;
- countdown--;
udelay(mbx->usec_delay);
}
out:
@@ -165,13 +165,13 @@ static s32 igb_poll_for_ack(struct e1000_hw *hw, u16 mbx_id)
struct e1000_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
- if (!mbx->ops.check_for_ack)
+ if (!countdown || !mbx->ops.check_for_ack)
goto out;
while (mbx->ops.check_for_ack(hw, mbx_id)) {
+ countdown--;
if (!countdown)
break;
- countdown--;
udelay(mbx->usec_delay);
}
out:
diff --git a/drivers/net/igb/e1000_phy.h b/drivers/net/igb/e1000_phy.h
index 3228a862031..ebe4b616db8 100644
--- a/drivers/net/igb/e1000_phy.h
+++ b/drivers/net/igb/e1000_phy.h
@@ -80,7 +80,7 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw);
#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
-#define IGP01E1000_PSSR_MDIX 0x0008
+#define IGP01E1000_PSSR_MDIX 0x0800
#define IGP01E1000_PSSR_SPEED_MASK 0xC000
#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
#define IGP02E1000_PHY_CHANNEL_NUM 4
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
index 0bd7728fe46..6e5924511e4 100644
--- a/drivers/net/igb/e1000_regs.h
+++ b/drivers/net/igb/e1000_regs.h
@@ -142,6 +142,7 @@ enum {
#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */
#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
+#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40))
/* Split and Replication RX Control - RW */
/*
* Convenience macros
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 4e8464b9df2..b2c98dea9ee 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -137,11 +137,17 @@ struct igb_buffer {
};
};
-struct igb_queue_stats {
+struct igb_tx_queue_stats {
u64 packets;
u64 bytes;
};
+struct igb_rx_queue_stats {
+ u64 packets;
+ u64 bytes;
+ u64 drops;
+};
+
struct igb_ring {
struct igb_adapter *adapter; /* backlink */
void *desc; /* descriptor ring memory */
@@ -167,12 +173,13 @@ struct igb_ring {
union {
/* TX */
struct {
- struct igb_queue_stats tx_stats;
+ struct igb_tx_queue_stats tx_stats;
bool detect_tx_hung;
};
/* RX */
struct {
- struct igb_queue_stats rx_stats;
+ struct igb_rx_queue_stats rx_stats;
+ u64 rx_queue_drops;
struct napi_struct napi;
int set_itr;
struct igb_ring *buddy;
@@ -238,7 +245,6 @@ struct igb_adapter {
u64 hw_csum_err;
u64 hw_csum_good;
u32 alloc_rx_buff_failed;
- bool rx_csum;
u32 gorc;
u64 gorc_old;
u16 rx_ps_hdr_size;
@@ -286,6 +292,7 @@ struct igb_adapter {
#define IGB_FLAG_DCA_ENABLED (1 << 1)
#define IGB_FLAG_QUAD_PORT_A (1 << 2)
#define IGB_FLAG_NEED_CTX_IDX (1 << 3)
+#define IGB_FLAG_RX_CSUM_DISABLED (1 << 4)
enum e1000_state_t {
__IGB_TESTING,
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index 27eae49e79c..9598ac09f4b 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -64,6 +64,7 @@ static const struct igb_stats igb_gstrings_stats[] = {
{ "rx_crc_errors", IGB_STAT(stats.crcerrs) },
{ "rx_frame_errors", IGB_STAT(net_stats.rx_frame_errors) },
{ "rx_no_buffer_count", IGB_STAT(stats.rnbc) },
+ { "rx_queue_drop_packet_count", IGB_STAT(net_stats.rx_fifo_errors) },
{ "rx_missed_errors", IGB_STAT(stats.mpc) },
{ "tx_aborted_errors", IGB_STAT(stats.ecol) },
{ "tx_carrier_errors", IGB_STAT(stats.tncrs) },
@@ -96,9 +97,10 @@ static const struct igb_stats igb_gstrings_stats[] = {
};
#define IGB_QUEUE_STATS_LEN \
- ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues + \
- ((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues) * \
- (sizeof(struct igb_queue_stats) / sizeof(u64)))
+ (((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues)* \
+ (sizeof(struct igb_rx_queue_stats) / sizeof(u64))) + \
+ ((((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues) * \
+ (sizeof(struct igb_tx_queue_stats) / sizeof(u64))))
#define IGB_GLOBAL_STATS_LEN \
sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)
#define IGB_STATS_LEN (IGB_GLOBAL_STATS_LEN + IGB_QUEUE_STATS_LEN)
@@ -275,13 +277,17 @@ static int igb_set_pauseparam(struct net_device *netdev,
static u32 igb_get_rx_csum(struct net_device *netdev)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- return adapter->rx_csum;
+ return !(adapter->flags & IGB_FLAG_RX_CSUM_DISABLED);
}
static int igb_set_rx_csum(struct net_device *netdev, u32 data)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- adapter->rx_csum = data;
+
+ if (data)
+ adapter->flags &= ~IGB_FLAG_RX_CSUM_DISABLED;
+ else
+ adapter->flags |= IGB_FLAG_RX_CSUM_DISABLED;
return 0;
}
@@ -293,10 +299,16 @@ static u32 igb_get_tx_csum(struct net_device *netdev)
static int igb_set_tx_csum(struct net_device *netdev, u32 data)
{
- if (data)
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ if (data) {
netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
- else
- netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ if (adapter->hw.mac.type == e1000_82576)
+ netdev->features |= NETIF_F_SCTP_CSUM;
+ } else {
+ netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_SCTP_CSUM);
+ }
return 0;
}
@@ -1950,7 +1962,8 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
{
struct igb_adapter *adapter = netdev_priv(netdev);
u64 *queue_stat;
- int stat_count = sizeof(struct igb_queue_stats) / sizeof(u64);
+ int stat_count_tx = sizeof(struct igb_tx_queue_stats) / sizeof(u64);
+ int stat_count_rx = sizeof(struct igb_rx_queue_stats) / sizeof(u64);
int j;
int i;
@@ -1963,14 +1976,14 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
for (j = 0; j < adapter->num_tx_queues; j++) {
int k;
queue_stat = (u64 *)&adapter->tx_ring[j].tx_stats;
- for (k = 0; k < stat_count; k++)
+ for (k = 0; k < stat_count_tx; k++)
data[i + k] = queue_stat[k];
i += k;
}
for (j = 0; j < adapter->num_rx_queues; j++) {
int k;
queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats;
- for (k = 0; k < stat_count; k++)
+ for (k = 0; k < stat_count_rx; k++)
data[i + k] = queue_stat[k];
i += k;
}
@@ -2004,6 +2017,8 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_drops", i);
+ p += ETH_GSTRING_LEN;
}
/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
break;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index e25343588fc..ea17319624a 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -942,6 +942,8 @@ int igb_up(struct igb_adapter *adapter)
rd32(E1000_ICR);
igb_irq_enable(adapter);
+ netif_tx_start_all_queues(adapter->netdev);
+
/* Fire a link change interrupt to start the watchdog. */
wr32(E1000_ICS, E1000_ICS_LSC);
return 0;
@@ -994,6 +996,11 @@ void igb_down(struct igb_adapter *adapter)
igb_reset(adapter);
igb_clean_all_tx_rings(adapter);
igb_clean_all_rx_rings(adapter);
+#ifdef CONFIG_IGB_DCA
+
+ /* since we reset the hardware DCA settings were cleared */
+ igb_setup_dca(adapter);
+#endif
}
void igb_reinit_locked(struct igb_adapter *adapter)
@@ -1343,6 +1350,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
+ if (adapter->hw.mac.type == e1000_82576)
+ netdev->features |= NETIF_F_SCTP_CSUM;
+
adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw);
/* before reading the NVM, reset the controller to put the device in a
@@ -1390,8 +1400,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
igb_validate_mdi_setting(hw);
- adapter->rx_csum = 1;
-
/* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
* enable the ACPI Magic Packet filter
*/
@@ -1442,22 +1450,18 @@ static int __devinit igb_probe(struct pci_dev *pdev,
* driver. */
igb_get_hw_control(adapter);
- /* tell the stack to leave us alone until igb_open() is called */
- netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
-
strcpy(netdev->name, "eth%d");
err = register_netdev(netdev);
if (err)
goto err_register;
+ /* carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(netdev);
+
#ifdef CONFIG_IGB_DCA
if (dca_add_requester(&pdev->dev) == 0) {
adapter->flags |= IGB_FLAG_DCA_ENABLED;
dev_info(&pdev->dev, "DCA enabled\n");
- /* Always use CB2 mode, difference is masked
- * in the CB driver. */
- wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
igb_setup_dca(adapter);
}
#endif
@@ -1699,6 +1703,8 @@ static int igb_open(struct net_device *netdev)
if (test_bit(__IGB_TESTING, &adapter->state))
return -EBUSY;
+ netif_carrier_off(netdev);
+
/* allocate transmit descriptors */
err = igb_setup_all_tx_resources(adapter);
if (err)
@@ -2231,29 +2237,24 @@ static void igb_configure_rx(struct igb_adapter *adapter)
mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
-
wr32(E1000_MRQC, mrqc);
-
- /* Multiqueue and raw packet checksumming are mutually
- * exclusive. Note that this not the same as TCP/IP
- * checksumming, which works fine. */
- rxcsum = rd32(E1000_RXCSUM);
- rxcsum |= E1000_RXCSUM_PCSD;
- wr32(E1000_RXCSUM, rxcsum);
- } else {
+ } else if (adapter->vfs_allocated_count) {
/* Enable multi-queue for sr-iov */
- if (adapter->vfs_allocated_count)
- wr32(E1000_MRQC, E1000_MRQC_ENABLE_VMDQ);
- /* Enable Receive Checksum Offload for TCP and UDP */
- rxcsum = rd32(E1000_RXCSUM);
- if (adapter->rx_csum)
- rxcsum |= E1000_RXCSUM_TUOFL | E1000_RXCSUM_IPPCSE;
- else
- rxcsum &= ~(E1000_RXCSUM_TUOFL | E1000_RXCSUM_IPPCSE);
-
- wr32(E1000_RXCSUM, rxcsum);
+ wr32(E1000_MRQC, E1000_MRQC_ENABLE_VMDQ);
}
+ /* Enable Receive Checksum Offload for TCP and UDP */
+ rxcsum = rd32(E1000_RXCSUM);
+ /* Disable raw packet checksumming */
+ rxcsum |= E1000_RXCSUM_PCSD;
+
+ if (adapter->hw.mac.type == e1000_82576)
+ /* Enable Receive Checksum Offload for SCTP */
+ rxcsum |= E1000_RXCSUM_CRCOFL;
+
+ /* Don't need to set TUOFL or IPOFL, they default to 1 */
+ wr32(E1000_RXCSUM, rxcsum);
+
/* Set the default pool for the PF's first queue */
igb_configure_vt_default_pool(adapter);
@@ -2661,7 +2662,6 @@ static void igb_watchdog_task(struct work_struct *work)
}
netif_carrier_on(netdev);
- netif_tx_wake_all_queues(netdev);
igb_ping_all_vfs(adapter);
@@ -2678,7 +2678,6 @@ static void igb_watchdog_task(struct work_struct *work)
printk(KERN_INFO "igb: %s NIC Link is Down\n",
netdev->name);
netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
igb_ping_all_vfs(adapter);
@@ -2712,6 +2711,8 @@ link_up:
* (Do the reset outside of interrupt context). */
adapter->tx_timeout_count++;
schedule_work(&adapter->reset_task);
+ /* return immediately since reset is imminent */
+ return;
}
}
@@ -2895,13 +2896,13 @@ static void igb_set_itr(struct igb_adapter *adapter)
switch (current_itr) {
/* counts and packets in update_itr are dependent on these numbers */
case lowest_latency:
- new_itr = 70000;
+ new_itr = 56; /* aka 70,000 ints/sec */
break;
case low_latency:
- new_itr = 20000; /* aka hwitr = ~200 */
+ new_itr = 196; /* aka 20,000 ints/sec */
break;
case bulk_latency:
- new_itr = 4000;
+ new_itr = 980; /* aka 4,000 ints/sec */
break;
default:
break;
@@ -2920,7 +2921,8 @@ set_itr_now:
* by adding intermediate steps when interrupt rate is
* increasing */
new_itr = new_itr > adapter->itr ?
- min(adapter->itr + (new_itr >> 2), new_itr) :
+ max((new_itr * adapter->itr) /
+ (new_itr + (adapter->itr >> 2)), new_itr) :
new_itr;
/* Don't write the value here; it resets the adapter's
* internal timer, and causes us to delay far longer than
@@ -2929,7 +2931,7 @@ set_itr_now:
* ends up being correct.
*/
adapter->itr = new_itr;
- adapter->rx_ring->itr_val = 1000000000 / (new_itr * 256);
+ adapter->rx_ring->itr_val = new_itr;
adapter->rx_ring->set_itr = 1;
}
@@ -3068,11 +3070,15 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
+ else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
+ tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
break;
case cpu_to_be16(ETH_P_IPV6):
/* XXX what about other V6 headers?? */
if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
+ else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
+ tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
break;
default:
if (unlikely(net_ratelimit()))
@@ -3133,8 +3139,7 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
/* set time_stamp *before* dma to help avoid a possible race */
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
- buffer_info->dma = map[count];
- count++;
+ buffer_info->dma = skb_shinfo(skb)->dma_head;
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
struct skb_frag_struct *frag;
@@ -3158,7 +3163,7 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
tx_ring->buffer_info[i].skb = skb;
tx_ring->buffer_info[first].next_to_watch = i;
- return count;
+ return count + 1;
}
static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
@@ -3338,7 +3343,6 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
if (count) {
igb_tx_queue_adv(adapter, tx_ring, tx_flags, count,
skb->len, hdr_len);
- netdev->trans_start = jiffies;
/* Make sure there is space in the ring for the next send. */
igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
} else {
@@ -3582,8 +3586,35 @@ void igb_update_stats(struct igb_adapter *adapter)
/* Rx Errors */
+ if (hw->mac.type != e1000_82575) {
+ u32 rqdpc_tmp;
+ u64 rqdpc_total = 0;
+ int i;
+ /* Read out drops stats per RX queue. Notice RQDPC (Receive
+ * Queue Drop Packet Count) stats only gets incremented, if
+ * the DROP_EN but it set (in the SRRCTL register for that
+ * queue). If DROP_EN bit is NOT set, then the some what
+ * equivalent count is stored in RNBC (not per queue basis).
+ * Also note the drop count is due to lack of available
+ * descriptors.
+ */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0xFFF;
+ adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp;
+ rqdpc_total += adapter->rx_ring[i].rx_stats.drops;
+ }
+ adapter->net_stats.rx_fifo_errors = rqdpc_total;
+ }
+
+ /* Note RNBC (Receive No Buffers Count) is an not an exact
+ * drop count as the hardware FIFO might save the day. Thats
+ * one of the reason for saving it in rx_fifo_errors, as its
+ * potentially not a true drop.
+ */
+ adapter->net_stats.rx_fifo_errors += adapter->stats.rnbc;
+
/* RLEC on some newer hardware can be incorrect so build
- * our own version based on RUC and ROC */
+ * our own version based on RUC and ROC */
adapter->net_stats.rx_errors = adapter->stats.rxerrc +
adapter->stats.crcerrs + adapter->stats.algnerrc +
adapter->stats.ruc + adapter->stats.roc +
@@ -3767,11 +3798,15 @@ static void igb_update_tx_dca(struct igb_ring *tx_ring)
static void igb_setup_dca(struct igb_adapter *adapter)
{
+ struct e1000_hw *hw = &adapter->hw;
int i;
if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
return;
+ /* Always use CB2 mode, difference is masked in the CB driver. */
+ wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
+
for (i = 0; i < adapter->num_tx_queues; i++) {
adapter->tx_ring[i].cpu = -1;
igb_update_tx_dca(&adapter->tx_ring[i]);
@@ -4434,20 +4469,12 @@ static void igb_receive_skb(struct igb_ring *ring, u8 status,
bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP));
skb_record_rx_queue(skb, ring->queue_index);
- if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
- if (vlan_extracted)
- vlan_gro_receive(&ring->napi, adapter->vlgrp,
- le16_to_cpu(rx_desc->wb.upper.vlan),
- skb);
- else
- napi_gro_receive(&ring->napi, skb);
- } else {
- if (vlan_extracted)
- vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
- le16_to_cpu(rx_desc->wb.upper.vlan));
- else
- netif_receive_skb(skb);
- }
+ if (vlan_extracted)
+ vlan_gro_receive(&ring->napi, adapter->vlgrp,
+ le16_to_cpu(rx_desc->wb.upper.vlan),
+ skb);
+ else
+ napi_gro_receive(&ring->napi, skb);
}
static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
@@ -4456,19 +4483,28 @@ static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
skb->ip_summed = CHECKSUM_NONE;
/* Ignore Checksum bit is set or checksum is disabled through ethtool */
- if ((status_err & E1000_RXD_STAT_IXSM) || !adapter->rx_csum)
+ if ((status_err & E1000_RXD_STAT_IXSM) ||
+ (adapter->flags & IGB_FLAG_RX_CSUM_DISABLED))
return;
/* TCP/UDP checksum error bit is set */
if (status_err &
(E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
+ /*
+ * work around errata with sctp packets where the TCPE aka
+ * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
+ * packets, (aka let the stack check the crc32c)
+ */
+ if (!((adapter->hw.mac.type == e1000_82576) &&
+ (skb->len == 60)))
+ adapter->hw_csum_err++;
/* let the stack verify checksum errors */
- adapter->hw_csum_err++;
return;
}
/* It must be a TCP or UDP packet with a valid checksum */
if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ dev_dbg(&adapter->pdev->dev, "cksum success: bits %08X\n", status_err);
adapter->hw_csum_good++;
}
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
index 1dcaa690531..ee17a097d1c 100644
--- a/drivers/net/igbvf/ethtool.c
+++ b/drivers/net/igbvf/ethtool.c
@@ -133,6 +133,24 @@ static int igbvf_set_pauseparam(struct net_device *netdev,
return -EOPNOTSUPP;
}
+static u32 igbvf_get_rx_csum(struct net_device *netdev)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ return !(adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED);
+}
+
+static int igbvf_set_rx_csum(struct net_device *netdev, u32 data)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+
+ if (data)
+ adapter->flags &= ~IGBVF_FLAG_RX_CSUM_DISABLED;
+ else
+ adapter->flags |= IGBVF_FLAG_RX_CSUM_DISABLED;
+
+ return 0;
+}
+
static u32 igbvf_get_tx_csum(struct net_device *netdev)
{
return ((netdev->features & NETIF_F_IP_CSUM) != 0);
@@ -150,8 +168,6 @@ static int igbvf_set_tx_csum(struct net_device *netdev, u32 data)
static int igbvf_set_tso(struct net_device *netdev, u32 data)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
- int i;
- struct net_device *v_netdev;
if (data) {
netdev->features |= NETIF_F_TSO;
@@ -159,24 +175,10 @@ static int igbvf_set_tso(struct net_device *netdev, u32 data)
} else {
netdev->features &= ~NETIF_F_TSO;
netdev->features &= ~NETIF_F_TSO6;
- /* disable TSO on all VLANs if they're present */
- if (!adapter->vlgrp)
- goto tso_out;
- for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
- v_netdev = vlan_group_get_device(adapter->vlgrp, i);
- if (!v_netdev)
- continue;
-
- v_netdev->features &= ~NETIF_F_TSO;
- v_netdev->features &= ~NETIF_F_TSO6;
- vlan_group_set_device(adapter->vlgrp, i, v_netdev);
- }
}
-tso_out:
dev_info(&adapter->pdev->dev, "TSO is %s\n",
data ? "Enabled" : "Disabled");
- adapter->flags |= FLAG_TSO_FORCE;
return 0;
}
@@ -517,6 +519,8 @@ static const struct ethtool_ops igbvf_ethtool_ops = {
.set_ringparam = igbvf_set_ringparam,
.get_pauseparam = igbvf_get_pauseparam,
.set_pauseparam = igbvf_set_pauseparam,
+ .get_rx_csum = igbvf_get_rx_csum,
+ .set_rx_csum = igbvf_set_rx_csum,
.get_tx_csum = igbvf_get_tx_csum,
.set_tx_csum = igbvf_set_tx_csum,
.get_sg = ethtool_op_get_sg,
diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h
index 4bff35e4687..8e9b67ebbf8 100644
--- a/drivers/net/igbvf/igbvf.h
+++ b/drivers/net/igbvf/igbvf.h
@@ -45,7 +45,7 @@ struct igbvf_adapter;
/* Interrupt defines */
#define IGBVF_START_ITR 648 /* ~6000 ints/sec */
-/* Interrupt modes, as used by the IntMode paramter */
+/* Interrupt modes, as used by the IntMode parameter */
#define IGBVF_INT_MODE_LEGACY 0
#define IGBVF_INT_MODE_MSI 1
#define IGBVF_INT_MODE_MSIX 2
@@ -286,11 +286,7 @@ struct igbvf_info {
};
/* hardware capability, feature, and workaround flags */
-#define FLAG_HAS_HW_VLAN_FILTER (1 << 0)
-#define FLAG_HAS_JUMBO_FRAMES (1 << 1)
-#define FLAG_MSI_ENABLED (1 << 2)
-#define FLAG_RX_CSUM_ENABLED (1 << 3)
-#define FLAG_TSO_FORCE (1 << 4)
+#define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0)
#define IGBVF_RX_DESC_ADV(R, i) \
(&((((R).desc))[i].rx_desc))
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index b774666ad3c..22aadb7884f 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -58,8 +58,7 @@ static void igbvf_reset_interrupt_capability(struct igbvf_adapter *);
static struct igbvf_info igbvf_vf_info = {
.mac = e1000_vfadapt,
- .flags = FLAG_HAS_JUMBO_FRAMES
- | FLAG_RX_CSUM_ENABLED,
+ .flags = 0,
.pba = 10,
.init_ops = e1000_init_function_pointers_vf,
};
@@ -107,8 +106,10 @@ static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
skb->ip_summed = CHECKSUM_NONE;
/* Ignore Checksum bit is set or checksum is disabled through ethtool */
- if ((status_err & E1000_RXD_STAT_IXSM))
+ if ((status_err & E1000_RXD_STAT_IXSM) ||
+ (adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED))
return;
+
/* TCP/UDP checksum error bit is set */
if (status_err &
(E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
@@ -116,6 +117,7 @@ static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
adapter->hw_csum_err++;
return;
}
+
/* It must be a TCP or UDP packet with a valid checksum */
if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -2117,8 +2119,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
/* set time_stamp *before* dma to help avoid a possible race */
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
- buffer_info->dma = map[count];
- count++;
+ buffer_info->dma = skb_shinfo(skb)->dma_head;
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
struct skb_frag_struct *frag;
@@ -2142,7 +2143,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
tx_ring->buffer_info[i].skb = skb;
tx_ring->buffer_info[first].next_to_watch = i;
- return count;
+ return count + 1;
}
static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
@@ -2268,7 +2269,6 @@ static int igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
if (count) {
igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count,
skb->len, hdr_len);
- netdev->trans_start = jiffies;
/* Make sure there is space in the ring for the next send. */
igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4);
} else {
@@ -2351,15 +2351,6 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
return -EINVAL;
}
- /* Jumbo frame size limits */
- if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
- if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
- dev_err(&adapter->pdev->dev,
- "Jumbo Frames not supported.\n");
- return -EINVAL;
- }
- }
-
#define MAX_STD_JUMBO_FRAME_SIZE 9234
if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index c5593f4665a..e3cfefab670 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -530,7 +530,7 @@ static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len)
* case where the checksum is right the higher layers will still
* drop the packet as appropriate.
*/
- if (eh->h_proto != ntohs(ETH_P_IP))
+ if (eh->h_proto != htons(ETH_P_IP))
return;
ih = (struct iphdr *) ((char *)eh + ETH_HLEN);
diff --git a/drivers/net/ipg.h b/drivers/net/ipg.h
index dd9318f1949..dfc2541bb55 100644
--- a/drivers/net/ipg.h
+++ b/drivers/net/ipg.h
@@ -514,7 +514,7 @@ enum ipg_regs {
#define IPG_DMALIST_ALIGN_PAD 0x07
#define IPG_MULTICAST_HASHTABLE_SIZE 0x40
-/* Number of miliseconds to wait after issuing a software reset.
+/* Number of milliseconds to wait after issuing a software reset.
* 0x05 <= IPG_AC_RESETWAIT to account for proper 10Mbps operation.
*/
#define IPG_AC_RESETWAIT 0x05
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index e6317557a53..f7638422142 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -17,6 +17,51 @@ config IRTTY_SIR
If unsure, say Y.
+config BFIN_SIR
+ tristate "Blackfin SIR on UART"
+ depends on BLACKFIN && IRDA
+ default n
+ help
+ Say Y here if your want to enable SIR function on Blackfin UART
+ devices.
+
+ To activate this driver you can start irattach like:
+ "irattach irda0 -s"
+
+ Saying M, it will be built as a module named bfin_sir.
+
+ Note that you need to turn off one of the serial drivers for SIR
+ to use that UART.
+
+config BFIN_SIR0
+ bool "Blackfin SIR on UART0"
+ depends on BFIN_SIR && !SERIAL_BFIN_UART0
+
+config BFIN_SIR1
+ bool "Blackfin SIR on UART1"
+ depends on BFIN_SIR && !SERIAL_BFIN_UART1 && (!BF531 && !BF532 && !BF533 && !BF561)
+
+config BFIN_SIR2
+ bool "Blackfin SIR on UART2"
+ depends on BFIN_SIR && !SERIAL_BFIN_UART2 && (BF54x || BF538 || BF539)
+
+config BFIN_SIR3
+ bool "Blackfin SIR on UART3"
+ depends on BFIN_SIR && !SERIAL_BFIN_UART3 && (BF54x)
+
+choice
+ prompt "SIR Mode"
+ depends on BFIN_SIR
+ default SIR_BFIN_DMA
+
+config SIR_BFIN_DMA
+ bool "DMA mode"
+ depends on !DMA_UNCACHED_NONE
+
+config SIR_BFIN_PIO
+ bool "PIO mode"
+endchoice
+
comment "Dongle support"
config DONGLE
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index 5d20fde32a2..d82e1e3bd8c 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_MCS_FIR) += mcs7780.o
obj-$(CONFIG_AU1000_FIR) += au1k_ir.o
# SIR drivers
obj-$(CONFIG_IRTTY_SIR) += irtty-sir.o sir-dev.o
+obj-$(CONFIG_BFIN_SIR) += bfin_sir.o
# dongle drivers for SIR drivers
obj-$(CONFIG_ESI_DONGLE) += esi-sir.o
obj-$(CONFIG_TEKRAM_DONGLE) += tekram-sir.o
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index 941164076a2..c4361d46659 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -23,6 +23,7 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include <linux/interrupt.h>
@@ -198,6 +199,17 @@ static int au1k_irda_init_iobuf(iobuff_t *io, int size)
return io->head ? 0 : -ENOMEM;
}
+static const struct net_device_ops au1k_irda_netdev_ops = {
+ .ndo_open = au1k_irda_start,
+ .ndo_stop = au1k_irda_stop,
+ .ndo_start_xmit = au1k_irda_hard_xmit,
+ .ndo_tx_timeout = au1k_tx_timeout,
+ .ndo_do_ioctl = au1k_irda_ioctl,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
static int au1k_irda_net_init(struct net_device *dev)
{
struct au1k_private *aup = netdev_priv(dev);
@@ -209,11 +221,7 @@ static int au1k_irda_net_init(struct net_device *dev)
if (err)
goto out1;
- dev->open = au1k_irda_start;
- dev->hard_start_xmit = au1k_irda_hard_xmit;
- dev->stop = au1k_irda_stop;
- dev->do_ioctl = au1k_irda_ioctl;
- dev->tx_timeout = au1k_tx_timeout;
+ dev->netdev_ops = &au1k_irda_netdev_ops;
irda_init_max_qos_capabilies(&aup->qos);
@@ -504,13 +512,13 @@ static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
printk(KERN_DEBUG "%s: tx_full\n", dev->name);
netif_stop_queue(dev);
aup->tx_full = 1;
- return 1;
+ return NETDEV_TX_BUSY;
}
else if (((aup->tx_head + 1) & (NUM_IR_DESC - 1)) == aup->tx_tail) {
printk(KERN_DEBUG "%s: tx_full\n", dev->name);
netif_stop_queue(dev);
aup->tx_full = 1;
- return 1;
+ return NETDEV_TX_BUSY;
}
pDB = aup->tx_db_inuse[aup->tx_head];
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
new file mode 100644
index 00000000000..f3eed6a8fba
--- /dev/null
+++ b/drivers/net/irda/bfin_sir.c
@@ -0,0 +1,820 @@
+/*
+ * Blackfin Infra-red Driver
+ *
+ * Copyright 2006-2009 Analog Devices Inc.
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ *
+ */
+#include "bfin_sir.h"
+
+#ifdef CONFIG_SIR_BFIN_DMA
+#define DMA_SIR_RX_XCNT 10
+#define DMA_SIR_RX_YCNT (PAGE_SIZE / DMA_SIR_RX_XCNT)
+#define DMA_SIR_RX_FLUSH_JIFS (HZ * 4 / 250)
+#endif
+
+#if ANOMALY_05000447
+static int max_rate = 57600;
+#else
+static int max_rate = 115200;
+#endif
+
+static void turnaround_delay(unsigned long last_jif, int mtt)
+{
+ long ticks;
+
+ mtt = mtt < 10000 ? 10000 : mtt;
+ ticks = 1 + mtt / (USEC_PER_SEC / HZ);
+ schedule_timeout_uninterruptible(ticks);
+}
+
+static void __devinit bfin_sir_init_ports(struct bfin_sir_port *sp, struct platform_device *pdev)
+{
+ int i;
+ struct resource *res;
+
+ for (i = 0; i < pdev->num_resources; i++) {
+ res = &pdev->resource[i];
+ switch (res->flags) {
+ case IORESOURCE_MEM:
+ sp->membase = (void __iomem *)res->start;
+ break;
+ case IORESOURCE_IRQ:
+ sp->irq = res->start;
+ break;
+ case IORESOURCE_DMA:
+ sp->rx_dma_channel = res->start;
+ sp->tx_dma_channel = res->end;
+ break;
+ default:
+ break;
+ }
+ }
+
+ sp->clk = get_sclk();
+#ifdef CONFIG_SIR_BFIN_DMA
+ sp->tx_done = 1;
+ init_timer(&(sp->rx_dma_timer));
+#endif
+}
+
+static void bfin_sir_stop_tx(struct bfin_sir_port *port)
+{
+#ifdef CONFIG_SIR_BFIN_DMA
+ disable_dma(port->tx_dma_channel);
+#endif
+
+ while (!(SIR_UART_GET_LSR(port) & THRE)) {
+ cpu_relax();
+ continue;
+ }
+
+ SIR_UART_STOP_TX(port);
+}
+
+static void bfin_sir_enable_tx(struct bfin_sir_port *port)
+{
+ SIR_UART_ENABLE_TX(port);
+}
+
+static void bfin_sir_stop_rx(struct bfin_sir_port *port)
+{
+ SIR_UART_STOP_RX(port);
+}
+
+static void bfin_sir_enable_rx(struct bfin_sir_port *port)
+{
+ SIR_UART_ENABLE_RX(port);
+}
+
+static int bfin_sir_set_speed(struct bfin_sir_port *port, int speed)
+{
+ int ret = -EINVAL;
+ unsigned int quot;
+ unsigned short val, lsr, lcr;
+ static int utime;
+ int count = 10;
+
+ lcr = WLS(8);
+
+ switch (speed) {
+ case 9600:
+ case 19200:
+ case 38400:
+ case 57600:
+ case 115200:
+
+ quot = (port->clk + (8 * speed)) / (16 * speed)\
+ - ANOMALY_05000230;
+
+ do {
+ udelay(utime);
+ lsr = SIR_UART_GET_LSR(port);
+ } while (!(lsr & TEMT) && count--);
+
+ /* The useconds for 1 bits to transmit */
+ utime = 1000000 / speed + 1;
+
+ /* Clear UCEN bit to reset the UART state machine
+ * and control registers
+ */
+ val = SIR_UART_GET_GCTL(port);
+ val &= ~UCEN;
+ SIR_UART_PUT_GCTL(port, val);
+
+ /* Set DLAB in LCR to Access THR RBR IER */
+ SIR_UART_SET_DLAB(port);
+ SSYNC();
+
+ SIR_UART_PUT_DLL(port, quot & 0xFF);
+ SIR_UART_PUT_DLH(port, (quot >> 8) & 0xFF);
+ SSYNC();
+
+ /* Clear DLAB in LCR */
+ SIR_UART_CLEAR_DLAB(port);
+ SSYNC();
+
+ SIR_UART_PUT_LCR(port, lcr);
+
+ val = SIR_UART_GET_GCTL(port);
+ val |= UCEN;
+ SIR_UART_PUT_GCTL(port, val);
+
+ ret = 0;
+ break;
+ default:
+ printk(KERN_WARNING "bfin_sir: Invalid speed %d\n", speed);
+ break;
+ }
+
+ val = SIR_UART_GET_GCTL(port);
+ /* If not add the 'RPOLC', we can't catch the receive interrupt.
+ * It's related with the HW layout and the IR transiver.
+ */
+ val |= IREN | RPOLC;
+ SIR_UART_PUT_GCTL(port, val);
+ return ret;
+}
+
+static int bfin_sir_is_receiving(struct net_device *dev)
+{
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+
+ if (!(SIR_UART_GET_IER(port) & ERBFI))
+ return 0;
+ return self->rx_buff.state != OUTSIDE_FRAME;
+}
+
+#ifdef CONFIG_SIR_BFIN_PIO
+static void bfin_sir_tx_chars(struct net_device *dev)
+{
+ unsigned int chr;
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+
+ if (self->tx_buff.len != 0) {
+ chr = *(self->tx_buff.data);
+ SIR_UART_PUT_CHAR(port, chr);
+ self->tx_buff.data++;
+ self->tx_buff.len--;
+ } else {
+ self->stats.tx_packets++;
+ self->stats.tx_bytes += self->tx_buff.data - self->tx_buff.head;
+ if (self->newspeed) {
+ bfin_sir_set_speed(port, self->newspeed);
+ self->speed = self->newspeed;
+ self->newspeed = 0;
+ }
+ bfin_sir_stop_tx(port);
+ bfin_sir_enable_rx(port);
+ /* I'm hungry! */
+ netif_wake_queue(dev);
+ }
+}
+
+static void bfin_sir_rx_chars(struct net_device *dev)
+{
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+ unsigned char ch;
+
+ SIR_UART_CLEAR_LSR(port);
+ ch = SIR_UART_GET_CHAR(port);
+ async_unwrap_char(dev, &self->stats, &self->rx_buff, ch);
+ dev->last_rx = jiffies;
+}
+
+static irqreturn_t bfin_sir_rx_int(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+
+ spin_lock(&self->lock);
+ while ((SIR_UART_GET_LSR(port) & DR))
+ bfin_sir_rx_chars(dev);
+ spin_unlock(&self->lock);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t bfin_sir_tx_int(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+
+ spin_lock(&self->lock);
+ if (SIR_UART_GET_LSR(port) & THRE)
+ bfin_sir_tx_chars(dev);
+ spin_unlock(&self->lock);
+
+ return IRQ_HANDLED;
+}
+#endif /* CONFIG_SIR_BFIN_PIO */
+
+#ifdef CONFIG_SIR_BFIN_DMA
+static void bfin_sir_dma_tx_chars(struct net_device *dev)
+{
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+
+ if (!port->tx_done)
+ return;
+ port->tx_done = 0;
+
+ if (self->tx_buff.len == 0) {
+ self->stats.tx_packets++;
+ if (self->newspeed) {
+ bfin_sir_set_speed(port, self->newspeed);
+ self->speed = self->newspeed;
+ self->newspeed = 0;
+ }
+ bfin_sir_enable_rx(port);
+ port->tx_done = 1;
+ netif_wake_queue(dev);
+ return;
+ }
+
+ blackfin_dcache_flush_range((unsigned long)(self->tx_buff.data),
+ (unsigned long)(self->tx_buff.data+self->tx_buff.len));
+ set_dma_config(port->tx_dma_channel,
+ set_bfin_dma_config(DIR_READ, DMA_FLOW_STOP,
+ INTR_ON_BUF, DIMENSION_LINEAR, DATA_SIZE_8,
+ DMA_SYNC_RESTART));
+ set_dma_start_addr(port->tx_dma_channel,
+ (unsigned long)(self->tx_buff.data));
+ set_dma_x_count(port->tx_dma_channel, self->tx_buff.len);
+ set_dma_x_modify(port->tx_dma_channel, 1);
+ enable_dma(port->tx_dma_channel);
+}
+
+static irqreturn_t bfin_sir_dma_tx_int(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+
+ spin_lock(&self->lock);
+ if (!(get_dma_curr_irqstat(port->tx_dma_channel) & DMA_RUN)) {
+ clear_dma_irqstat(port->tx_dma_channel);
+ bfin_sir_stop_tx(port);
+
+ self->stats.tx_packets++;
+ self->stats.tx_bytes += self->tx_buff.len;
+ self->tx_buff.len = 0;
+ if (self->newspeed) {
+ bfin_sir_set_speed(port, self->newspeed);
+ self->speed = self->newspeed;
+ self->newspeed = 0;
+ }
+ bfin_sir_enable_rx(port);
+ /* I'm hungry! */
+ netif_wake_queue(dev);
+ port->tx_done = 1;
+ }
+ spin_unlock(&self->lock);
+
+ return IRQ_HANDLED;
+}
+
+static void bfin_sir_dma_rx_chars(struct net_device *dev)
+{
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+ int i;
+
+ SIR_UART_CLEAR_LSR(port);
+
+ for (i = port->rx_dma_buf.head; i < port->rx_dma_buf.tail; i++)
+ async_unwrap_char(dev, &self->stats, &self->rx_buff, port->rx_dma_buf.buf[i]);
+}
+
+void bfin_sir_rx_dma_timeout(struct net_device *dev)
+{
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+ int x_pos, pos;
+ unsigned long flags;
+
+ spin_lock_irqsave(&self->lock, flags);
+ x_pos = DMA_SIR_RX_XCNT - get_dma_curr_xcount(port->rx_dma_channel);
+ if (x_pos == DMA_SIR_RX_XCNT)
+ x_pos = 0;
+
+ pos = port->rx_dma_nrows * DMA_SIR_RX_XCNT + x_pos;
+
+ if (pos > port->rx_dma_buf.tail) {
+ port->rx_dma_buf.tail = pos;
+ bfin_sir_dma_rx_chars(dev);
+ port->rx_dma_buf.head = port->rx_dma_buf.tail;
+ }
+ spin_unlock_irqrestore(&self->lock, flags);
+}
+
+static irqreturn_t bfin_sir_dma_rx_int(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+ unsigned short irqstat;
+
+ spin_lock(&self->lock);
+
+ port->rx_dma_nrows++;
+ port->rx_dma_buf.tail = DMA_SIR_RX_XCNT * port->rx_dma_nrows;
+ bfin_sir_dma_rx_chars(dev);
+ if (port->rx_dma_nrows >= DMA_SIR_RX_YCNT) {
+ port->rx_dma_nrows = 0;
+ port->rx_dma_buf.tail = 0;
+ }
+ port->rx_dma_buf.head = port->rx_dma_buf.tail;
+
+ irqstat = get_dma_curr_irqstat(port->rx_dma_channel);
+ clear_dma_irqstat(port->rx_dma_channel);
+ spin_unlock(&self->lock);
+
+ mod_timer(&port->rx_dma_timer, jiffies + DMA_SIR_RX_FLUSH_JIFS);
+ return IRQ_HANDLED;
+}
+#endif /* CONFIG_SIR_BFIN_DMA */
+
+static int bfin_sir_startup(struct bfin_sir_port *port, struct net_device *dev)
+{
+#ifdef CONFIG_SIR_BFIN_DMA
+ dma_addr_t dma_handle;
+#endif /* CONFIG_SIR_BFIN_DMA */
+
+ if (request_dma(port->rx_dma_channel, "BFIN_UART_RX") < 0) {
+ dev_warn(&dev->dev, "Unable to attach SIR RX DMA channel\n");
+ return -EBUSY;
+ }
+
+ if (request_dma(port->tx_dma_channel, "BFIN_UART_TX") < 0) {
+ dev_warn(&dev->dev, "Unable to attach SIR TX DMA channel\n");
+ free_dma(port->rx_dma_channel);
+ return -EBUSY;
+ }
+
+#ifdef CONFIG_SIR_BFIN_DMA
+
+ set_dma_callback(port->rx_dma_channel, bfin_sir_dma_rx_int, dev);
+ set_dma_callback(port->tx_dma_channel, bfin_sir_dma_tx_int, dev);
+
+ port->rx_dma_buf.buf = (unsigned char *)dma_alloc_coherent(NULL, PAGE_SIZE, &dma_handle, GFP_DMA);
+ port->rx_dma_buf.head = 0;
+ port->rx_dma_buf.tail = 0;
+ port->rx_dma_nrows = 0;
+
+ set_dma_config(port->rx_dma_channel,
+ set_bfin_dma_config(DIR_WRITE, DMA_FLOW_AUTO,
+ INTR_ON_ROW, DIMENSION_2D,
+ DATA_SIZE_8, DMA_SYNC_RESTART));
+ set_dma_x_count(port->rx_dma_channel, DMA_SIR_RX_XCNT);
+ set_dma_x_modify(port->rx_dma_channel, 1);
+ set_dma_y_count(port->rx_dma_channel, DMA_SIR_RX_YCNT);
+ set_dma_y_modify(port->rx_dma_channel, 1);
+ set_dma_start_addr(port->rx_dma_channel, (unsigned long)port->rx_dma_buf.buf);
+ enable_dma(port->rx_dma_channel);
+
+ port->rx_dma_timer.data = (unsigned long)(dev);
+ port->rx_dma_timer.function = (void *)bfin_sir_rx_dma_timeout;
+
+#else
+
+ if (request_irq(port->irq, bfin_sir_rx_int, IRQF_DISABLED, "BFIN_SIR_RX", dev)) {
+ dev_warn(&dev->dev, "Unable to attach SIR RX interrupt\n");
+ return -EBUSY;
+ }
+
+ if (request_irq(port->irq+1, bfin_sir_tx_int, IRQF_DISABLED, "BFIN_SIR_TX", dev)) {
+ dev_warn(&dev->dev, "Unable to attach SIR TX interrupt\n");
+ free_irq(port->irq, dev);
+ return -EBUSY;
+ }
+#endif
+
+ return 0;
+}
+
+static void bfin_sir_shutdown(struct bfin_sir_port *port, struct net_device *dev)
+{
+ unsigned short val;
+
+ bfin_sir_stop_rx(port);
+ SIR_UART_DISABLE_INTS(port);
+
+ val = SIR_UART_GET_GCTL(port);
+ val &= ~(UCEN | IREN | RPOLC);
+ SIR_UART_PUT_GCTL(port, val);
+
+#ifdef CONFIG_SIR_BFIN_DMA
+ disable_dma(port->tx_dma_channel);
+ disable_dma(port->rx_dma_channel);
+ del_timer(&(port->rx_dma_timer));
+ dma_free_coherent(NULL, PAGE_SIZE, port->rx_dma_buf.buf, 0);
+#else
+ free_irq(port->irq+1, dev);
+ free_irq(port->irq, dev);
+#endif
+ free_dma(port->tx_dma_channel);
+ free_dma(port->rx_dma_channel);
+}
+
+#ifdef CONFIG_PM
+static int bfin_sir_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct bfin_sir_port *sir_port;
+ struct net_device *dev;
+ struct bfin_sir_self *self;
+
+ sir_port = platform_get_drvdata(pdev);
+ if (!sir_port)
+ return 0;
+
+ dev = sir_port->dev;
+ self = netdev_priv(dev);
+ if (self->open) {
+ flush_work(&self->work);
+ bfin_sir_shutdown(self->sir_port, dev);
+ netif_device_detach(dev);
+ }
+
+ return 0;
+}
+static int bfin_sir_resume(struct platform_device *pdev)
+{
+ struct bfin_sir_port *sir_port;
+ struct net_device *dev;
+ struct bfin_sir_self *self;
+ struct bfin_sir_port *port;
+
+ sir_port = platform_get_drvdata(pdev);
+ if (!sir_port)
+ return 0;
+
+ dev = sir_port->dev;
+ self = netdev_priv(dev);
+ port = self->sir_port;
+ if (self->open) {
+ if (self->newspeed) {
+ self->speed = self->newspeed;
+ self->newspeed = 0;
+ }
+ bfin_sir_startup(port, dev);
+ bfin_sir_set_speed(port, 9600);
+ bfin_sir_enable_rx(port);
+ netif_device_attach(dev);
+ }
+ return 0;
+}
+#else
+#define bfin_sir_suspend NULL
+#define bfin_sir_resume NULL
+#endif
+
+static void bfin_sir_send_work(struct work_struct *work)
+{
+ struct bfin_sir_self *self = container_of(work, struct bfin_sir_self, work);
+ struct net_device *dev = self->sir_port->dev;
+ struct bfin_sir_port *port = self->sir_port;
+ unsigned short val;
+ int tx_cnt = 10;
+
+ while (bfin_sir_is_receiving(dev) && --tx_cnt)
+ turnaround_delay(dev->last_rx, self->mtt);
+
+ bfin_sir_stop_rx(port);
+
+ /* To avoid losting RX interrupt, we reset IR function before
+ * sending data. We also can set the speed, which will
+ * reset all the UART.
+ */
+ val = SIR_UART_GET_GCTL(port);
+ val &= ~(IREN | RPOLC);
+ SIR_UART_PUT_GCTL(port, val);
+ SSYNC();
+ val |= IREN | RPOLC;
+ SIR_UART_PUT_GCTL(port, val);
+ SSYNC();
+ /* bfin_sir_set_speed(port, self->speed); */
+
+#ifdef CONFIG_SIR_BFIN_DMA
+ bfin_sir_dma_tx_chars(dev);
+#endif
+ bfin_sir_enable_tx(port);
+ dev->trans_start = jiffies;
+}
+
+static int bfin_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bfin_sir_self *self = netdev_priv(dev);
+ int speed = irda_get_next_speed(skb);
+
+ netif_stop_queue(dev);
+
+ self->mtt = irda_get_mtt(skb);
+
+ if (speed != self->speed && speed != -1)
+ self->newspeed = speed;
+
+ self->tx_buff.data = self->tx_buff.head;
+ if (skb->len == 0)
+ self->tx_buff.len = 0;
+ else
+ self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, self->tx_buff.truesize);
+
+ schedule_work(&self->work);
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static int bfin_sir_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
+{
+ struct if_irda_req *rq = (struct if_irda_req *)ifreq;
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+ int ret = 0;
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH:
+ if (capable(CAP_NET_ADMIN)) {
+ if (self->open) {
+ ret = bfin_sir_set_speed(port, rq->ifr_baudrate);
+ bfin_sir_enable_rx(port);
+ } else {
+ dev_warn(&dev->dev, "SIOCSBANDWIDTH: !netif_running\n");
+ ret = 0;
+ }
+ }
+ break;
+
+ case SIOCSMEDIABUSY:
+ ret = -EPERM;
+ if (capable(CAP_NET_ADMIN)) {
+ irda_device_set_media_busy(dev, TRUE);
+ ret = 0;
+ }
+ break;
+
+ case SIOCGRECEIVING:
+ rq->ifr_receiving = bfin_sir_is_receiving(dev);
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static struct net_device_stats *bfin_sir_stats(struct net_device *dev)
+{
+ struct bfin_sir_self *self = netdev_priv(dev);
+
+ return &self->stats;
+}
+
+static int bfin_sir_open(struct net_device *dev)
+{
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+ int err = -ENOMEM;
+
+ self->newspeed = 0;
+ self->speed = 9600;
+
+ spin_lock_init(&self->lock);
+
+ err = bfin_sir_startup(port, dev);
+ if (err)
+ goto err_startup;
+
+ bfin_sir_set_speed(port, 9600);
+
+ self->irlap = irlap_open(dev, &self->qos, DRIVER_NAME);
+ if (!self->irlap)
+ goto err_irlap;
+
+ INIT_WORK(&self->work, bfin_sir_send_work);
+
+ /*
+ * Now enable the interrupt then start the queue
+ */
+ self->open = 1;
+ bfin_sir_enable_rx(port);
+
+ netif_start_queue(dev);
+
+ return 0;
+
+err_irlap:
+ self->open = 0;
+ bfin_sir_shutdown(port, dev);
+err_startup:
+ return err;
+}
+
+static int bfin_sir_stop(struct net_device *dev)
+{
+ struct bfin_sir_self *self = netdev_priv(dev);
+
+ flush_work(&self->work);
+ bfin_sir_shutdown(self->sir_port, dev);
+
+ if (self->rxskb) {
+ dev_kfree_skb(self->rxskb);
+ self->rxskb = NULL;
+ }
+
+ /* Stop IrLAP */
+ if (self->irlap) {
+ irlap_close(self->irlap);
+ self->irlap = NULL;
+ }
+
+ netif_stop_queue(dev);
+ self->open = 0;
+
+ return 0;
+}
+
+static int bfin_sir_init_iobuf(iobuff_t *io, int size)
+{
+ io->head = kmalloc(size, GFP_KERNEL);
+ if (!io->head)
+ return -ENOMEM;
+ io->truesize = size;
+ io->in_frame = FALSE;
+ io->state = OUTSIDE_FRAME;
+ io->data = io->head;
+ return 0;
+}
+
+static int __devinit bfin_sir_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct bfin_sir_self *self;
+ unsigned int baudrate_mask;
+ struct bfin_sir_port *sir_port;
+ int err;
+
+ if (pdev->id >= 0 && pdev->id < ARRAY_SIZE(per) && \
+ per[pdev->id][3] == pdev->id) {
+ err = peripheral_request_list(per[pdev->id], DRIVER_NAME);
+ if (err)
+ return err;
+ } else {
+ dev_err(&pdev->dev, "Invalid pdev id, please check board file\n");
+ return -ENODEV;
+ }
+
+ err = -ENOMEM;
+ sir_port = kmalloc(sizeof(*sir_port), GFP_KERNEL);
+ if (!sir_port)
+ goto err_mem_0;
+
+ bfin_sir_init_ports(sir_port, pdev);
+
+ dev = alloc_irdadev(sizeof(*self));
+ if (!dev)
+ goto err_mem_1;
+
+ self = netdev_priv(dev);
+ self->dev = &pdev->dev;
+ self->sir_port = sir_port;
+ sir_port->dev = dev;
+
+ err = bfin_sir_init_iobuf(&self->rx_buff, IRDA_SKB_MAX_MTU);
+ if (err)
+ goto err_mem_2;
+ err = bfin_sir_init_iobuf(&self->tx_buff, IRDA_SIR_MAX_FRAME);
+ if (err)
+ goto err_mem_3;
+
+ dev->hard_start_xmit = bfin_sir_hard_xmit;
+ dev->open = bfin_sir_open;
+ dev->stop = bfin_sir_stop;
+ dev->do_ioctl = bfin_sir_ioctl;
+ dev->get_stats = bfin_sir_stats;
+ dev->irq = sir_port->irq;
+
+ irda_init_max_qos_capabilies(&self->qos);
+
+ baudrate_mask = IR_9600;
+
+ switch (max_rate) {
+ case 115200:
+ baudrate_mask |= IR_115200;
+ case 57600:
+ baudrate_mask |= IR_57600;
+ case 38400:
+ baudrate_mask |= IR_38400;
+ case 19200:
+ baudrate_mask |= IR_19200;
+ case 9600:
+ break;
+ default:
+ dev_warn(&pdev->dev, "Invalid maximum baud rate, using 9600\n");
+ }
+
+ self->qos.baud_rate.bits &= baudrate_mask;
+
+ self->qos.min_turn_time.bits = 1; /* 10 ms or more */
+
+ irda_qos_bits_to_value(&self->qos);
+
+ err = register_netdev(dev);
+
+ if (err) {
+ kfree(self->tx_buff.head);
+err_mem_3:
+ kfree(self->rx_buff.head);
+err_mem_2:
+ free_netdev(dev);
+err_mem_1:
+ kfree(sir_port);
+err_mem_0:
+ peripheral_free_list(per[pdev->id]);
+ } else
+ platform_set_drvdata(pdev, sir_port);
+
+ return err;
+}
+
+static int __devexit bfin_sir_remove(struct platform_device *pdev)
+{
+ struct bfin_sir_port *sir_port;
+ struct net_device *dev = NULL;
+ struct bfin_sir_self *self;
+
+ sir_port = platform_get_drvdata(pdev);
+ if (!sir_port)
+ return 0;
+ dev = sir_port->dev;
+ self = netdev_priv(dev);
+ unregister_netdev(dev);
+ kfree(self->tx_buff.head);
+ kfree(self->rx_buff.head);
+ free_netdev(dev);
+ kfree(sir_port);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver bfin_ir_driver = {
+ .probe = bfin_sir_probe,
+ .remove = __devexit_p(bfin_sir_remove),
+ .suspend = bfin_sir_suspend,
+ .resume = bfin_sir_resume,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init bfin_sir_init(void)
+{
+ return platform_driver_register(&bfin_ir_driver);
+}
+
+static void __exit bfin_sir_exit(void)
+{
+ platform_driver_unregister(&bfin_ir_driver);
+}
+
+module_init(bfin_sir_init);
+module_exit(bfin_sir_exit);
+
+module_param(max_rate, int, 0);
+MODULE_PARM_DESC(max_rate, "Maximum baud rate (115200, 57600, 38400, 19200, 9600)");
+
+MODULE_AUTHOR("Graf Yang <graf.yang@analog.com>");
+MODULE_DESCRIPTION("Blackfin IrDA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/bfin_sir.h b/drivers/net/irda/bfin_sir.h
new file mode 100644
index 00000000000..dac71b1f4f9
--- /dev/null
+++ b/drivers/net/irda/bfin_sir.h
@@ -0,0 +1,148 @@
+/*
+ * Blackfin Infra-red Driver
+ *
+ * Copyright 2006-2009 Analog Devices Inc.
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ *
+ */
+
+#include <linux/serial.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/irda_device.h>
+
+#include <asm/irq.h>
+#include <asm/cacheflush.h>
+#include <asm/dma.h>
+#include <asm/portmux.h>
+
+#ifdef CONFIG_SIR_BFIN_DMA
+struct dma_rx_buf {
+ char *buf;
+ int head;
+ int tail;
+};
+#endif
+
+struct bfin_sir_port {
+ unsigned char __iomem *membase;
+ unsigned int irq;
+ unsigned int lsr;
+ unsigned long clk;
+ struct net_device *dev;
+#ifdef CONFIG_SIR_BFIN_DMA
+ int tx_done;
+ struct dma_rx_buf rx_dma_buf;
+ struct timer_list rx_dma_timer;
+ int rx_dma_nrows;
+#endif
+ unsigned int tx_dma_channel;
+ unsigned int rx_dma_channel;
+};
+
+struct bfin_sir_port_res {
+ unsigned long base_addr;
+ int irq;
+ unsigned int rx_dma_channel;
+ unsigned int tx_dma_channel;
+};
+
+struct bfin_sir_self {
+ struct bfin_sir_port *sir_port;
+ spinlock_t lock;
+ unsigned int open;
+ int speed;
+ int newspeed;
+
+ struct sk_buff *txskb;
+ struct sk_buff *rxskb;
+ struct net_device_stats stats;
+ struct device *dev;
+ struct irlap_cb *irlap;
+ struct qos_info qos;
+
+ iobuff_t tx_buff;
+ iobuff_t rx_buff;
+
+ struct work_struct work;
+ int mtt;
+};
+
+#define DRIVER_NAME "bfin_sir"
+
+#define SIR_UART_GET_CHAR(port) bfin_read16((port)->membase + OFFSET_RBR)
+#define SIR_UART_GET_DLL(port) bfin_read16((port)->membase + OFFSET_DLL)
+#define SIR_UART_GET_DLH(port) bfin_read16((port)->membase + OFFSET_DLH)
+#define SIR_UART_GET_LCR(port) bfin_read16((port)->membase + OFFSET_LCR)
+#define SIR_UART_GET_GCTL(port) bfin_read16((port)->membase + OFFSET_GCTL)
+
+#define SIR_UART_PUT_CHAR(port, v) bfin_write16(((port)->membase + OFFSET_THR), v)
+#define SIR_UART_PUT_DLL(port, v) bfin_write16(((port)->membase + OFFSET_DLL), v)
+#define SIR_UART_PUT_DLH(port, v) bfin_write16(((port)->membase + OFFSET_DLH), v)
+#define SIR_UART_PUT_LCR(port, v) bfin_write16(((port)->membase + OFFSET_LCR), v)
+#define SIR_UART_PUT_GCTL(port, v) bfin_write16(((port)->membase + OFFSET_GCTL), v)
+
+#ifdef CONFIG_BF54x
+#define SIR_UART_GET_LSR(port) bfin_read16((port)->membase + OFFSET_LSR)
+#define SIR_UART_GET_IER(port) bfin_read16((port)->membase + OFFSET_IER_SET)
+#define SIR_UART_SET_IER(port, v) bfin_write16(((port)->membase + OFFSET_IER_SET), v)
+#define SIR_UART_CLEAR_IER(port, v) bfin_write16(((port)->membase + OFFSET_IER_CLEAR), v)
+#define SIR_UART_PUT_LSR(port, v) bfin_write16(((port)->membase + OFFSET_LSR), v)
+#define SIR_UART_CLEAR_LSR(port) bfin_write16(((port)->membase + OFFSET_LSR), -1)
+
+#define SIR_UART_SET_DLAB(port)
+#define SIR_UART_CLEAR_DLAB(port)
+
+#define SIR_UART_ENABLE_INTS(port, v) SIR_UART_SET_IER(port, v)
+#define SIR_UART_DISABLE_INTS(port) SIR_UART_CLEAR_IER(port, 0xF)
+#define SIR_UART_STOP_TX(port) do { SIR_UART_PUT_LSR(port, TFI); SIR_UART_CLEAR_IER(port, ETBEI); } while (0)
+#define SIR_UART_ENABLE_TX(port) do { SIR_UART_SET_IER(port, ETBEI); } while (0)
+#define SIR_UART_STOP_RX(port) do { SIR_UART_CLEAR_IER(port, ERBFI); } while (0)
+#define SIR_UART_ENABLE_RX(port) do { SIR_UART_SET_IER(port, ERBFI); } while (0)
+#else
+
+#define SIR_UART_GET_IIR(port) bfin_read16((port)->membase + OFFSET_IIR)
+#define SIR_UART_GET_IER(port) bfin_read16((port)->membase + OFFSET_IER)
+#define SIR_UART_PUT_IER(port, v) bfin_write16(((port)->membase + OFFSET_IER), v)
+
+#define SIR_UART_SET_DLAB(port) do { SIR_UART_PUT_LCR(port, SIR_UART_GET_LCR(port) | DLAB); } while (0)
+#define SIR_UART_CLEAR_DLAB(port) do { SIR_UART_PUT_LCR(port, SIR_UART_GET_LCR(port) & ~DLAB); } while (0)
+
+#define SIR_UART_ENABLE_INTS(port, v) SIR_UART_PUT_IER(port, v)
+#define SIR_UART_DISABLE_INTS(port) SIR_UART_PUT_IER(port, 0)
+#define SIR_UART_STOP_TX(port) do { SIR_UART_PUT_IER(port, SIR_UART_GET_IER(port) & ~ETBEI); } while (0)
+#define SIR_UART_ENABLE_TX(port) do { SIR_UART_PUT_IER(port, SIR_UART_GET_IER(port) | ETBEI); } while (0)
+#define SIR_UART_STOP_RX(port) do { SIR_UART_PUT_IER(port, SIR_UART_GET_IER(port) & ~ERBFI); } while (0)
+#define SIR_UART_ENABLE_RX(port) do { SIR_UART_PUT_IER(port, SIR_UART_GET_IER(port) | ERBFI); } while (0)
+
+static inline unsigned int SIR_UART_GET_LSR(struct bfin_sir_port *port)
+{
+ unsigned int lsr = bfin_read16(port->membase + OFFSET_LSR);
+ port->lsr |= (lsr & (BI|FE|PE|OE));
+ return lsr | port->lsr;
+}
+
+static inline void SIR_UART_CLEAR_LSR(struct bfin_sir_port *port)
+{
+ port->lsr = 0;
+ bfin_read16(port->membase + OFFSET_LSR);
+}
+#endif
+
+static const unsigned short per[][4] = {
+ /* rx pin tx pin NULL uart_number */
+ {P_UART0_RX, P_UART0_TX, 0, 0},
+ {P_UART1_RX, P_UART1_TX, 0, 1},
+ {P_UART2_RX, P_UART2_TX, 0, 2},
+ {P_UART3_RX, P_UART3_TX, 0, 3},
+};
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 6b6548b9fda..9a0346e751a 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -994,11 +994,11 @@ toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev)
/* change speed pending, wait for its execution */
if (self->new_speed)
- return -EBUSY;
+ return NETDEV_TX_BUSY;
/* device stopped (apm) wait for restart */
if (self->stopped)
- return -EBUSY;
+ return NETDEV_TX_BUSY;
toshoboe_checkstuck (self);
@@ -1049,7 +1049,7 @@ toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev)
if (self->txpending)
{
spin_unlock_irqrestore(&self->spinlock, flags);
- return -EBUSY;
+ return NETDEV_TX_BUSY;
}
/* If in SIR mode we need to generate a string of XBOFs */
@@ -1105,7 +1105,7 @@ dumpbufs(skb->data,skb->len,'>');
,skb->len, self->ring->tx[self->txs].control, self->txpending);
toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX);
spin_unlock_irqrestore(&self->spinlock, flags);
- return -EBUSY;
+ return NETDEV_TX_BUSY;
}
if (INB (OBOE_ENABLEH) & OBOE_ENABLEH_SIRON)
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 006ba23110d..0c0831c03f6 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -389,7 +389,6 @@ static int irda_usb_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
s32 speed;
s16 xbofs;
int res, mtt;
- int err = 1; /* Failed */
IRDA_DEBUG(4, "%s() on %s\n", __func__, netdev->name);
@@ -430,7 +429,6 @@ static int irda_usb_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
irda_usb_change_speed_xbofs(self);
netdev->trans_start = jiffies;
/* Will netif_wake_queue() in callback */
- err = 0; /* No error */
goto drop;
}
}
@@ -542,7 +540,7 @@ drop:
/* Drop silently the skb and exit */
dev_kfree_skb(skb);
spin_unlock_irqrestore(&self->lock, flags);
- return err; /* Usually 1 */
+ return NETDEV_TX_OK;
}
/*------------------------------------------------------------------*/
@@ -1859,6 +1857,42 @@ static void irda_usb_disconnect(struct usb_interface *intf)
IRDA_DEBUG(0, "%s(), USB IrDA Disconnected\n", __func__);
}
+#ifdef CONFIG_PM
+/* USB suspend, so power off the transmitter/receiver */
+static int irda_usb_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct irda_usb_cb *self = usb_get_intfdata(intf);
+ int i;
+
+ netif_device_detach(self->netdev);
+
+ if (self->tx_urb != NULL)
+ usb_kill_urb(self->tx_urb);
+ if (self->speed_urb != NULL)
+ usb_kill_urb(self->speed_urb);
+ for (i = 0; i < self->max_rx_urb; i++) {
+ if (self->rx_urb[i] != NULL)
+ usb_kill_urb(self->rx_urb[i]);
+ }
+ return 0;
+}
+
+/* Coming out of suspend, so reset hardware */
+static int irda_usb_resume(struct usb_interface *intf)
+{
+ struct irda_usb_cb *self = usb_get_intfdata(intf);
+ int i;
+
+ for (i = 0; i < self->max_rx_urb; i++) {
+ if (self->rx_urb[i] != NULL)
+ usb_submit_urb(self->rx_urb[i], GFP_KERNEL);
+ }
+
+ netif_device_attach(self->netdev);
+ return 0;
+}
+#endif
+
/*------------------------------------------------------------------*/
/*
* USB device callbacks
@@ -1868,6 +1902,10 @@ static struct usb_driver irda_driver = {
.probe = irda_usb_probe,
.disconnect = irda_usb_disconnect,
.id_table = dongles,
+#ifdef CONFIG_PM
+ .suspend = irda_usb_suspend,
+ .resume = irda_usb_resume,
+#endif
};
/************************* MODULE CALLBACKS *************************/
diff --git a/drivers/net/irda/kingsun-sir.c b/drivers/net/irda/kingsun-sir.c
index 9d813bc4502..c3e4e2c435b 100644
--- a/drivers/net/irda/kingsun-sir.c
+++ b/drivers/net/irda/kingsun-sir.c
@@ -156,9 +156,6 @@ static int kingsun_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
int wraplen;
int ret = 0;
- if (skb == NULL || netdev == NULL)
- return -EINVAL;
-
netif_stop_queue(netdev);
/* the IRDA wrapping routines don't deal with non linear skb */
@@ -197,7 +194,7 @@ static int kingsun_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
dev_kfree_skb(skb);
spin_unlock(&kingsun->lock);
- return ret;
+ return NETDEV_TX_OK;
}
/* Receive callback function */
diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c
index b6ffe9715b6..d73b8b64fcb 100644
--- a/drivers/net/irda/ks959-sir.c
+++ b/drivers/net/irda/ks959-sir.c
@@ -391,9 +391,6 @@ static int ks959_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned int wraplen;
int ret = 0;
- if (skb == NULL || netdev == NULL)
- return -EINVAL;
-
netif_stop_queue(netdev);
/* the IRDA wrapping routines don't deal with non linear skb */
@@ -428,7 +425,7 @@ static int ks959_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
dev_kfree_skb(skb);
spin_unlock(&kingsun->lock);
- return ret;
+ return NETDEV_TX_OK;
}
/* Receive callback function */
diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c
index 64df27f2bfd..1ef45ec7442 100644
--- a/drivers/net/irda/ksdazzle-sir.c
+++ b/drivers/net/irda/ksdazzle-sir.c
@@ -304,9 +304,6 @@ static int ksdazzle_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned int wraplen;
int ret = 0;
- if (skb == NULL || netdev == NULL)
- return -EINVAL;
-
netif_stop_queue(netdev);
/* the IRDA wrapping routines don't deal with non linear skb */
@@ -341,7 +338,7 @@ static int ksdazzle_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
dev_kfree_skb(skb);
spin_unlock(&kingsun->lock);
- return ret;
+ return NETDEV_TX_OK;
}
/* Receive callback function */
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index fac504d0cfd..f4df1001983 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -824,10 +824,6 @@ static int mcs_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
int wraplen;
int ret = 0;
-
- if (skb == NULL || ndev == NULL)
- return -EINVAL;
-
netif_stop_queue(ndev);
mcs = netdev_priv(ndev);
@@ -870,7 +866,7 @@ static int mcs_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
dev_kfree_skb(skb);
spin_unlock_irqrestore(&mcs->lock, flags);
- return ret;
+ return NETDEV_TX_OK;
}
static const struct net_device_ops mcs_netdev_ops = {
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index e775338b525..3376a4f39e0 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -14,6 +14,7 @@
*/
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
@@ -797,6 +798,16 @@ static int pxa_irda_init_iobuf(iobuff_t *io, int size)
return io->head ? 0 : -ENOMEM;
}
+static const struct net_device_ops pxa_irda_netdev_ops = {
+ .ndo_open = pxa_irda_start,
+ .ndo_stop = pxa_irda_stop,
+ .ndo_start_xmit = pxa_irda_hard_xmit,
+ .ndo_do_ioctl = pxa_irda_ioctl,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
static int pxa_irda_probe(struct platform_device *pdev)
{
struct net_device *dev;
@@ -845,10 +856,7 @@ static int pxa_irda_probe(struct platform_device *pdev)
if (err)
goto err_startup;
- dev->hard_start_xmit = pxa_irda_hard_xmit;
- dev->open = pxa_irda_start;
- dev->stop = pxa_irda_stop;
- dev->do_ioctl = pxa_irda_ioctl;
+ dev->netdev_ops = &pxa_irda_netdev_ops;
irda_init_max_qos_capabilies(&si->qos);
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index 7a2b003954c..2aeb2e6aec1 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -24,6 +24,7 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include <linux/interrupt.h>
@@ -875,6 +876,16 @@ static int sa1100_irda_init_iobuf(iobuff_t *io, int size)
return io->head ? 0 : -ENOMEM;
}
+static const struct net_device_ops sa1100_irda_netdev_ops = {
+ .ndo_open = sa1100_irda_start,
+ .ndo_stop = sa1100_irda_stop,
+ .ndo_start_xmit = sa1100_irda_hard_xmit,
+ .ndo_do_ioctl = sa1100_irda_ioctl,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
static int sa1100_irda_probe(struct platform_device *pdev)
{
struct net_device *dev;
@@ -913,11 +924,8 @@ static int sa1100_irda_probe(struct platform_device *pdev)
if (err)
goto err_mem_5;
- dev->hard_start_xmit = sa1100_irda_hard_xmit;
- dev->open = sa1100_irda_start;
- dev->stop = sa1100_irda_stop;
- dev->do_ioctl = sa1100_irda_ioctl;
- dev->irq = IRQ_Ser2ICP;
+ dev->netdev_ops = &sa1100_irda_netdev_ops;
+ dev->irq = IRQ_Ser2ICP;
irda_init_max_qos_capabilies(&si->qos);
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index d940809762e..fd0796c3db3 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -607,7 +607,7 @@ static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
* stopped so the network layer will retry after the
* fsm completes and wakes the queue.
*/
- return 1;
+ return NETDEV_TX_BUSY;
}
else if (unlikely(err)) {
/* other fatal error - forget the speed change and
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 59d79807b4d..d0797adb5f8 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -2124,7 +2124,7 @@ static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self)
while (count-- > 0 && !(inb(iobase + UART_LSR) & UART_LSR_TEMT))
udelay(1);
- if (count == 0)
+ if (count < 0)
IRDA_DEBUG(0, "%s(): stuck transmitter\n", __func__);
}
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index cb793c2bade..e44215cb188 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -1021,6 +1021,16 @@ static const struct ethtool_ops ops = {
.get_link = veth_get_link,
};
+static const struct net_device_ops veth_netdev_ops = {
+ .ndo_open = veth_open,
+ .ndo_stop = veth_close,
+ .ndo_start_xmit = veth_start_xmit,
+ .ndo_change_mtu = veth_change_mtu,
+ .ndo_set_multicast_list = veth_set_multicast_list,
+ .ndo_set_mac_address = NULL,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
static struct net_device *veth_probe_one(int vlan,
struct vio_dev *vio_dev)
{
@@ -1067,12 +1077,7 @@ static struct net_device *veth_probe_one(int vlan,
memcpy(&port->mac_addr, mac_addr, ETH_ALEN);
- dev->open = veth_open;
- dev->hard_start_xmit = veth_start_xmit;
- dev->stop = veth_close;
- dev->change_mtu = veth_change_mtu;
- dev->set_mac_address = NULL;
- dev->set_multicast_list = veth_set_multicast_list;
+ dev->netdev_ops = &veth_netdev_ops;
SET_ETHTOOL_OPS(dev, &ops);
SET_NETDEV_DEV(dev, vdev);
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c
index 11dcda0f453..ff67a84e680 100644
--- a/drivers/net/ixgb/ixgb_hw.c
+++ b/drivers/net/ixgb/ixgb_hw.c
@@ -192,7 +192,7 @@ ixgb_identify_xpak_vendor(struct ixgb_hw *hw)
vendor_name[i] = ixgb_read_phy_reg(hw,
MDIO_PMA_PMD_XPAK_VENDOR_NAME
+ i, IXGB_PHY_ADDRESS,
- MDIO_PMA_PMD_DID);
+ MDIO_MMD_PMAPMD);
}
/* Determine the actual vendor */
@@ -1225,15 +1225,15 @@ ixgb_optics_reset(struct ixgb_hw *hw)
u16 mdio_reg;
ixgb_write_phy_reg(hw,
- MDIO_PMA_PMD_CR1,
- IXGB_PHY_ADDRESS,
- MDIO_PMA_PMD_DID,
- MDIO_PMA_PMD_CR1_RESET);
-
- mdio_reg = ixgb_read_phy_reg( hw,
- MDIO_PMA_PMD_CR1,
- IXGB_PHY_ADDRESS,
- MDIO_PMA_PMD_DID);
+ MDIO_CTRL1,
+ IXGB_PHY_ADDRESS,
+ MDIO_MMD_PMAPMD,
+ MDIO_CTRL1_RESET);
+
+ mdio_reg = ixgb_read_phy_reg(hw,
+ MDIO_CTRL1,
+ IXGB_PHY_ADDRESS,
+ MDIO_MMD_PMAPMD);
}
return;
diff --git a/drivers/net/ixgb/ixgb_hw.h b/drivers/net/ixgb/ixgb_hw.h
index 831fe0c58b2..af6ca3aab5a 100644
--- a/drivers/net/ixgb/ixgb_hw.h
+++ b/drivers/net/ixgb/ixgb_hw.h
@@ -29,6 +29,8 @@
#ifndef _IXGB_HW_H_
#define _IXGB_HW_H_
+#include <linux/mdio.h>
+
#include "ixgb_osdep.h"
/* Enums */
@@ -507,18 +509,6 @@ typedef enum {
/* Definitions for the optics devices on the MDIO bus. */
#define IXGB_PHY_ADDRESS 0x0 /* Single PHY, multiple "Devices" */
-/* Standard five-bit Device IDs. See IEEE 802.3ae, clause 45 */
-#define MDIO_PMA_PMD_DID 0x01
-#define MDIO_WIS_DID 0x02
-#define MDIO_PCS_DID 0x03
-#define MDIO_XGXS_DID 0x04
-
-/* Standard PMA/PMD registers and bit definitions. */
-/* Note: This is a very limited set of definitions, */
-/* only implemented features are defined. */
-#define MDIO_PMA_PMD_CR1 0x0000
-#define MDIO_PMA_PMD_CR1_RESET 0x8000
-
#define MDIO_PMA_PMD_XPAK_VENDOR_NAME 0x803A /* XPAK/XENPAK devices only */
/* Vendor-specific MDIO registers */
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 4a0826b8f6f..9c897cf86b9 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -266,6 +266,8 @@ ixgb_up(struct ixgb_adapter *adapter)
napi_enable(&adapter->napi);
ixgb_irq_enable(adapter);
+ netif_wake_queue(netdev);
+
mod_timer(&adapter->watchdog_timer, jiffies);
return 0;
@@ -471,10 +473,8 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_register;
- /* we're going to reset, so assume we have no link for now */
-
+ /* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
DPRINTK(PROBE, INFO, "Intel(R) PRO/10GbE Network Connection\n");
ixgb_check_options(adapter);
@@ -592,6 +592,8 @@ ixgb_open(struct net_device *netdev)
if (err)
goto err_setup_tx;
+ netif_carrier_off(netdev);
+
/* allocate receive descriptors */
err = ixgb_setup_rx_resources(adapter);
@@ -602,6 +604,8 @@ ixgb_open(struct net_device *netdev)
if (err)
goto err_up;
+ netif_start_queue(netdev);
+
return 0;
err_up:
@@ -1116,7 +1120,6 @@ ixgb_watchdog(unsigned long data)
adapter->link_speed = 10000;
adapter->link_duplex = FULL_DUPLEX;
netif_carrier_on(netdev);
- netif_wake_queue(netdev);
}
} else {
if (netif_carrier_ok(netdev)) {
@@ -1125,8 +1128,6 @@ ixgb_watchdog(unsigned long data)
printk(KERN_INFO "ixgb: %s NIC Link is Down\n",
netdev->name);
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
-
}
}
@@ -1139,6 +1140,8 @@ ixgb_watchdog(unsigned long data)
* to get done, so reset controller to flush Tx.
* (Do the reset outside of interrupt context). */
schedule_work(&adapter->tx_timeout_task);
+ /* return immediately since reset is imminent */
+ return;
}
}
@@ -1297,7 +1300,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
buffer_info->length = size;
WARN_ON(buffer_info->dma != 0);
buffer_info->time_stamp = jiffies;
- buffer_info->dma = map[0] + offset;
+ buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
pci_map_single(adapter->pdev,
skb->data + offset,
size,
@@ -1337,7 +1340,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
buffer_info->length = size;
buffer_info->time_stamp = jiffies;
- buffer_info->dma = map[f + 1] + offset;
+ buffer_info->dma = map[f] + offset;
buffer_info->next_to_watch = 0;
len -= size;
@@ -1485,7 +1488,6 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if (count) {
ixgb_tx_queue(adapter, count, vlan_id, tx_flags);
- netdev->trans_start = jiffies;
/* Make sure there is space in the ring for the next send. */
ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
diff --git a/drivers/net/ixgb/ixgb_osdep.h b/drivers/net/ixgb/ixgb_osdep.h
index d92e72bd627..371a6be4d96 100644
--- a/drivers/net/ixgb/ixgb_osdep.h
+++ b/drivers/net/ixgb/ixgb_osdep.h
@@ -40,7 +40,7 @@
#include <linux/sched.h>
#undef ASSERT
-#define ASSERT(x) if (!(x)) BUG()
+#define ASSERT(x) BUG_ON(!(x))
#define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B)
#ifdef DBG
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index b3f8208ec7b..21b41f42b61 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -37,3 +37,5 @@ ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
+
+ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index c26433d1460..cd22323cfd2 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -36,6 +36,10 @@
#include "ixgbe_type.h"
#include "ixgbe_common.h"
#include "ixgbe_dcb.h"
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#define IXGBE_FCOE
+#include "ixgbe_fcoe.h"
+#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */
#ifdef CONFIG_IXGBE_DCA
#include <linux/dca.h>
#endif
@@ -71,6 +75,8 @@
#define IXGBE_RXBUFFER_128 128 /* Used for packet split */
#define IXGBE_RXBUFFER_256 256 /* Used for packet split */
#define IXGBE_RXBUFFER_2048 2048
+#define IXGBE_RXBUFFER_4096 4096
+#define IXGBE_RXBUFFER_8192 8192
#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256
@@ -84,6 +90,8 @@
#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1)
#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2)
#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3)
+#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 4)
+#define IXGBE_TX_FLAGS_FSO (u32)(1 << 5)
#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
@@ -113,17 +121,18 @@ struct ixgbe_queue_stats {
struct ixgbe_ring {
void *desc; /* descriptor ring memory */
- dma_addr_t dma; /* phys. address of descriptor ring */
- unsigned int size; /* length in bytes */
- unsigned int count; /* amount of descriptors */
- unsigned int next_to_use;
- unsigned int next_to_clean;
-
- int queue_index; /* needed for multiqueue queue management */
union {
struct ixgbe_tx_buffer *tx_buffer_info;
struct ixgbe_rx_buffer *rx_buffer_info;
};
+ u8 atr_sample_rate;
+ u8 atr_count;
+ u16 count; /* amount of descriptors */
+ u16 rx_buf_len;
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ u8 queue_index; /* needed for multiqueue queue management */
u16 head;
u16 tail;
@@ -131,22 +140,24 @@ struct ixgbe_ring {
unsigned int total_bytes;
unsigned int total_packets;
- u16 reg_idx; /* holds the special value that gets the hardware register
- * offset associated with this ring, which is different
- * for DCB and RSS modes */
-
#ifdef CONFIG_IXGBE_DCA
/* cpu for tx queue */
int cpu;
#endif
- struct ixgbe_queue_stats stats;
- u64 v_idx; /* maps directly to the index for this ring in the hardware
- * vector array, can also be used for finding the bit in EICR
- * and friends that represents the vector for this ring */
+ u16 work_limit; /* max work per interrupt */
+ u16 reg_idx; /* holds the special value that gets
+ * the hardware register offset
+ * associated with this ring, which is
+ * different for DCB and RSS modes
+ */
- u16 work_limit; /* max work per interrupt */
- u16 rx_buf_len;
+ struct ixgbe_queue_stats stats;
+ unsigned long reinit_state;
+ u64 rsc_count; /* stat for coalesced packets */
+
+ unsigned int size; /* length in bytes */
+ dma_addr_t dma; /* phys. address of descriptor ring */
};
enum ixgbe_ring_f_enum {
@@ -154,6 +165,10 @@ enum ixgbe_ring_f_enum {
RING_F_DCB,
RING_F_VMDQ,
RING_F_RSS,
+ RING_F_FDIR,
+#ifdef IXGBE_FCOE
+ RING_F_FCOE,
+#endif /* IXGBE_FCOE */
RING_F_ARRAY_SIZE /* must be last in enum set */
};
@@ -161,6 +176,10 @@ enum ixgbe_ring_f_enum {
#define IXGBE_MAX_DCB_INDICES 8
#define IXGBE_MAX_RSS_INDICES 16
#define IXGBE_MAX_VMDQ_INDICES 16
+#define IXGBE_MAX_FDIR_INDICES 64
+#ifdef IXGBE_FCOE
+#define IXGBE_MAX_FCOE_INDICES 8
+#endif /* IXGBE_FCOE */
struct ixgbe_ring_feature {
int indices;
int mask;
@@ -178,6 +197,9 @@ struct ixgbe_ring_feature {
*/
struct ixgbe_q_vector {
struct ixgbe_adapter *adapter;
+ unsigned int v_idx; /* index of q_vector within array, also used for
+ * finding the bit in EICR and friends that
+ * represents the vector for this ring */
struct napi_struct napi;
DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
@@ -207,7 +229,15 @@ struct ixgbe_q_vector {
#define IXGBE_TX_CTXTDESC_ADV(R, i) \
(&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i]))
+#define IXGBE_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
+#define IXGBE_TX_DESC(R, i) IXGBE_GET_DESC(R, i, ixgbe_legacy_tx_desc)
+#define IXGBE_RX_DESC(R, i) IXGBE_GET_DESC(R, i, ixgbe_legacy_rx_desc)
+
#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
+#ifdef IXGBE_FCOE
+/* Use 3K as the baby jumbo frame size for FCoE */
+#define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072
+#endif /* IXGBE_FCOE */
#define OTHER_VECTOR 1
#define NON_Q_VECTORS (OTHER_VECTOR)
@@ -229,11 +259,12 @@ struct ixgbe_adapter {
struct vlan_group *vlgrp;
u16 bd_number;
struct work_struct reset_task;
- struct ixgbe_q_vector q_vector[MAX_MSIX_Q_VECTORS];
+ struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
struct ixgbe_dcb_config dcb_cfg;
struct ixgbe_dcb_config temp_dcb_cfg;
u8 dcb_set_bitmap;
+ enum ixgbe_fc_mode last_lfc_mode;
/* Interrupt Throttle Rate */
u32 itr_setting;
@@ -294,7 +325,13 @@ struct ixgbe_adapter {
#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 23)
#define IXGBE_FLAG_IN_SFP_LINK_TASK (u32)(1 << 24)
#define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 25)
+#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 26)
+#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 27)
+#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 29)
+ u32 flags2;
+#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1)
+#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1)
/* default to trying for four seconds */
#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
@@ -303,6 +340,10 @@ struct ixgbe_adapter {
struct pci_dev *pdev;
struct net_device_stats net_stats;
+ u32 test_icr;
+ struct ixgbe_ring test_tx_ring;
+ struct ixgbe_ring test_rx_ring;
+
/* structs defined in ixgbe_hw.h */
struct ixgbe_hw hw;
u16 msg_enable;
@@ -325,6 +366,14 @@ struct ixgbe_adapter {
struct timer_list sfp_timer;
struct work_struct multispeed_fiber_task;
struct work_struct sfp_config_module_task;
+ u32 fdir_pballoc;
+ u32 atr_sample_rate;
+ spinlock_t fdir_perfect_lock;
+ struct work_struct fdir_reinit_task;
+#ifdef IXGBE_FCOE
+ struct ixgbe_fcoe fcoe;
+#endif /* IXGBE_FCOE */
+ u64 rsc_count;
u32 wol;
u16 eeprom_version;
};
@@ -333,6 +382,7 @@ enum ixbge_state_t {
__IXGBE_TESTING,
__IXGBE_RESETTING,
__IXGBE_DOWN,
+ __IXGBE_FDIR_INIT_DONE,
__IXGBE_SFP_MODULE_NOT_FOUND
};
@@ -363,10 +413,77 @@ extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *)
extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
-extern void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter);
extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
-void ixgbe_napi_add_all(struct ixgbe_adapter *adapter);
-void ixgbe_napi_del_all(struct ixgbe_adapter *adapter);
-extern void ixgbe_write_eitr(struct ixgbe_adapter *, int, u32);
+extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
+extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
+extern int ethtool_ioctl(struct ifreq *ifr);
+extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
+extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
+extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+ struct ixgbe_atr_input *input,
+ u8 queue);
+extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
+ struct ixgbe_atr_input *input,
+ u16 soft_id,
+ u8 queue);
+extern u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *input, u32 key);
+extern s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input,
+ u16 vlan_id);
+extern s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input,
+ u32 src_addr);
+extern s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input,
+ u32 dst_addr);
+extern s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input,
+ u32 src_addr_1, u32 src_addr_2,
+ u32 src_addr_3, u32 src_addr_4);
+extern s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input,
+ u32 dst_addr_1, u32 dst_addr_2,
+ u32 dst_addr_3, u32 dst_addr_4);
+extern s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input,
+ u16 src_port);
+extern s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input,
+ u16 dst_port);
+extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input,
+ u16 flex_byte);
+extern s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input,
+ u8 vm_pool);
+extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input,
+ u8 l4type);
+extern s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input,
+ u16 *vlan_id);
+extern s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input,
+ u32 *src_addr);
+extern s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input,
+ u32 *dst_addr);
+extern s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input,
+ u32 *src_addr_1, u32 *src_addr_2,
+ u32 *src_addr_3, u32 *src_addr_4);
+extern s32 ixgbe_atr_get_dst_ipv6_82599(struct ixgbe_atr_input *input,
+ u32 *dst_addr_1, u32 *dst_addr_2,
+ u32 *dst_addr_3, u32 *dst_addr_4);
+extern s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input,
+ u16 *src_port);
+extern s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input,
+ u16 *dst_port);
+extern s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input,
+ u16 *flex_byte);
+extern s32 ixgbe_atr_get_vm_pool_82599(struct ixgbe_atr_input *input,
+ u8 *vm_pool);
+extern s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input,
+ u8 *l4type);
+#ifdef IXGBE_FCOE
+extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
+extern int ixgbe_fso(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *tx_ring, struct sk_buff *skb,
+ u32 tx_flags, u8 *hdr_len);
+extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter);
+extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb);
+extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
+ struct scatterlist *sgl, unsigned int sgc);
+extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
+#endif /* IXGBE_FCOE */
#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 4791238c3f6..b9923047ce1 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -75,18 +75,49 @@ static u16 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw)
static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
+
+ /* Call PHY identify routine to get the phy type */
+ ixgbe_identify_phy_generic(hw);
+
+ mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
+ mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
+ mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
+ mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
+ mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
+ mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_init_phy_ops_82598 - PHY/SFP specific init
+ * @hw: pointer to hardware structure
+ *
+ * Initialize any function pointers that were not able to be
+ * set during get_invariants because the PHY/SFP type was
+ * not known. Perform the SFP init if necessary.
+ *
+ **/
+s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
s32 ret_val = 0;
u16 list_offset, data_offset;
- /* Set the bus information prior to PHY identification */
- mac->ops.get_bus_info(hw);
+ /* Identify the PHY */
+ phy->ops.identify(hw);
- /* Call PHY identify routine to get the phy type */
- ixgbe_identify_phy_generic(hw);
+ /* Overwrite the link function pointers if copper PHY */
+ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
+ mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
+ mac->ops.setup_link_speed =
+ &ixgbe_setup_copper_link_speed_82598;
+ mac->ops.get_link_capabilities =
+ &ixgbe_get_copper_link_capabilities_82598;
+ }
- /* PHY Init */
- switch (phy->type) {
+ switch (hw->phy.type) {
case ixgbe_phy_tn:
phy->ops.check_link = &ixgbe_check_phy_link_tnx;
phy->ops.get_firmware_version =
@@ -106,8 +137,8 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
/* Check to see if SFP+ module is supported */
ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
- &list_offset,
- &data_offset);
+ &list_offset,
+ &data_offset);
if (ret_val != 0) {
ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
goto out;
@@ -117,21 +148,6 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
break;
}
- if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
- mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
- mac->ops.setup_link_speed =
- &ixgbe_setup_copper_link_speed_82598;
- mac->ops.get_link_capabilities =
- &ixgbe_get_copper_link_capabilities_82598;
- }
-
- mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
- mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
- mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
- mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
- mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
- mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw);
-
out:
return ret_val;
}
@@ -149,12 +165,19 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
bool *autoneg)
{
s32 status = 0;
+ u32 autoc = 0;
/*
* Determine link capabilities based on the stored value of AUTOC,
- * which represents EEPROM defaults.
+ * which represents EEPROM defaults. If AUTOC value has not been
+ * stored, use the current register value.
*/
- switch (hw->mac.orig_autoc & IXGBE_AUTOC_LMS_MASK) {
+ if (hw->mac.orig_link_settings_stored)
+ autoc = hw->mac.orig_autoc;
+ else
+ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+ switch (autoc & IXGBE_AUTOC_LMS_MASK) {
case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
*speed = IXGBE_LINK_SPEED_1GB_FULL;
*autoneg = false;
@@ -173,9 +196,9 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
case IXGBE_AUTOC_LMS_KX4_AN:
case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
*speed = IXGBE_LINK_SPEED_UNKNOWN;
- if (hw->mac.orig_autoc & IXGBE_AUTOC_KX4_SUPP)
+ if (autoc & IXGBE_AUTOC_KX4_SUPP)
*speed |= IXGBE_LINK_SPEED_10GB_FULL;
- if (hw->mac.orig_autoc & IXGBE_AUTOC_KX_SUPP)
+ if (autoc & IXGBE_AUTOC_KX_SUPP)
*speed |= IXGBE_LINK_SPEED_1GB_FULL;
*autoneg = true;
break;
@@ -206,14 +229,13 @@ static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
*speed = 0;
*autoneg = true;
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
&speed_ability);
if (status == 0) {
- if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G)
+ if (speed_ability & MDIO_SPEED_10G)
*speed |= IXGBE_LINK_SPEED_10GB_FULL;
- if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G)
+ if (speed_ability & MDIO_PMA_SPEED_1000)
*speed |= IXGBE_LINK_SPEED_1GB_FULL;
}
@@ -271,6 +293,17 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
u32 rmcs_reg;
u32 reg;
+#ifdef CONFIG_DCB
+ if (hw->fc.requested_mode == ixgbe_fc_pfc)
+ goto out;
+
+#endif /* CONFIG_DCB */
+ /* Negotiate the fc mode to use */
+ ret_val = ixgbe_fc_autoneg(hw);
+ if (ret_val)
+ goto out;
+
+ /* Disable any previous flow control settings */
fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
@@ -282,14 +315,20 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
* 0: Flow control is completely disabled
* 1: Rx flow control is enabled (we can receive pause frames,
* but not send pause frames).
- * 2: Tx flow control is enabled (we can send pause frames but
+ * 2: Tx flow control is enabled (we can send pause frames but
* we do not support receiving pause frames).
* 3: Both Rx and Tx flow control (symmetric) are enabled.
* other: Invalid.
+#ifdef CONFIG_DCB
+ * 4: Priority Flow Control is enabled.
+#endif
*/
switch (hw->fc.current_mode) {
case ixgbe_fc_none:
- /* Flow control completely disabled by software override. */
+ /*
+ * Flow control is disabled by software override or autoneg.
+ * The code below will actually disable it in the HW.
+ */
break;
case ixgbe_fc_rx_pause:
/*
@@ -314,6 +353,11 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
fctrl_reg |= IXGBE_FCTRL_RFCE;
rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
break;
+#ifdef CONFIG_DCB
+ case ixgbe_fc_pfc:
+ goto out;
+ break;
+#endif /* CONFIG_DCB */
default:
hw_dbg(hw, "Flow control param set incorrectly\n");
ret_val = -IXGBE_ERR_CONFIG;
@@ -321,7 +365,8 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
break;
}
- /* Enable 802.3x based flow control settings. */
+ /* Set 802.3x based flow control settings. */
+ fctrl_reg |= IXGBE_FCTRL_DPF;
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
@@ -340,7 +385,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
}
/* Configure pause time (2 TCs per register) */
- reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num));
+ reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
if ((packetbuf_num & 1) == 0)
reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
else
@@ -354,77 +399,6 @@ out:
}
/**
- * ixgbe_setup_fc_82598 - Configure flow control settings
- * @hw: pointer to hardware structure
- * @packetbuf_num: packet buffer number (0-7)
- *
- * Configures the flow control settings based on SW configuration. This
- * function is used for 802.3x flow control configuration only.
- **/
-static s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
-{
- s32 ret_val = 0;
- ixgbe_link_speed speed;
- bool link_up;
-
- /* Validate the packetbuf configuration */
- if (packetbuf_num < 0 || packetbuf_num > 7) {
- hw_dbg(hw, "Invalid packet buffer number [%d], expected range is"
- " 0-7\n", packetbuf_num);
- ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
- goto out;
- }
-
- /*
- * Validate the water mark configuration. Zero water marks are invalid
- * because it causes the controller to just blast out fc packets.
- */
- if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) {
- hw_dbg(hw, "Invalid water mark configuration\n");
- ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
- goto out;
- }
-
- /*
- * Validate the requested mode. Strict IEEE mode does not allow
- * ixgbe_fc_rx_pause because it will cause testing anomalies.
- */
- if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
- hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
- ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
- goto out;
- }
-
- /*
- * 10gig parts do not have a word in the EEPROM to determine the
- * default flow control setting, so we explicitly set it to full.
- */
- if (hw->fc.requested_mode == ixgbe_fc_default)
- hw->fc.requested_mode = ixgbe_fc_full;
-
- /*
- * Save off the requested flow control mode for use later. Depending
- * on the link partner's capabilities, we may or may not use this mode.
- */
-
- hw->fc.current_mode = hw->fc.requested_mode;
-
- /* Decide whether to use autoneg or not. */
- hw->mac.ops.check_link(hw, &speed, &link_up, false);
- if (!hw->fc.disable_fc_autoneg && hw->phy.multispeed_fiber &&
- (speed == IXGBE_LINK_SPEED_1GB_FULL))
- ret_val = ixgbe_fc_autoneg(hw);
-
- if (ret_val)
- goto out;
-
- ret_val = ixgbe_fc_enable_82598(hw, packetbuf_num);
-
-out:
- return ret_val;
-}
-
-/**
* ixgbe_setup_mac_link_82598 - Configures MAC link settings
* @hw: pointer to hardware structure
*
@@ -463,13 +437,6 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw)
}
}
- /*
- * We want to save off the original Flow Control configuration just in
- * case we get disconnected and then reconnected into a different hub
- * or switch with different Flow Control capabilities.
- */
- ixgbe_setup_fc_82598(hw, 0);
-
/* Add delay to filter out noises during initial link setup */
msleep(50);
@@ -500,9 +467,9 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
* clear indicates active; set indicates inactive.
*/
if (hw->phy.type == ixgbe_phy_nl) {
- hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
- hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
- hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
+ hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
+ hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
+ hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD,
&adapt_comp_reg);
if (link_up_wait_to_complete) {
for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
@@ -515,10 +482,10 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
}
msleep(100);
hw->phy.ops.read_reg(hw, 0xC79F,
- IXGBE_TWINAX_DEV,
+ MDIO_MMD_PMAPMD,
&link_reg);
hw->phy.ops.read_reg(hw, 0xC00C,
- IXGBE_TWINAX_DEV,
+ MDIO_MMD_PMAPMD,
&adapt_comp_reg);
}
} else {
@@ -556,6 +523,11 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
else
*speed = IXGBE_LINK_SPEED_1GB_FULL;
+ /* if link is down, zero out the current_mode */
+ if (*link_up == false) {
+ hw->fc.current_mode = ixgbe_fc_none;
+ hw->fc.fc_was_autonegged = false;
+ }
out:
return 0;
}
@@ -673,6 +645,7 @@ static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw,
static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
{
s32 status = 0;
+ s32 phy_status = 0;
u32 ctrl;
u32 gheccr;
u32 i;
@@ -716,14 +689,27 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
}
/* Reset PHY */
- if (hw->phy.reset_disable == false)
+ if (hw->phy.reset_disable == false) {
+ /* PHY ops must be identified and initialized prior to reset */
+
+ /* Init PHY and function pointers, perform SFP setup */
+ phy_status = hw->phy.ops.init(hw);
+ if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ goto reset_hw_out;
+ else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto no_phy_reset;
+
+
hw->phy.ops.reset(hw);
+ }
+no_phy_reset:
/*
* Prevent the PCI-E bus from from hanging by disabling PCI-E master
* access and verify no pending requests before reset
*/
- if (ixgbe_disable_pcie_master(hw) != 0) {
+ status = ixgbe_disable_pcie_master(hw);
+ if (status != 0) {
status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
}
@@ -767,9 +753,19 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
}
+ /*
+ * Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table
+ */
+ hw->mac.ops.init_rx_addrs(hw);
+
/* Store the permanent mac address */
hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+reset_hw_out:
+ if (phy_status)
+ status = phy_status;
+
return status;
}
@@ -954,14 +950,14 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
hw->phy.ops.write_reg(hw,
IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ MDIO_MMD_PMAPMD,
sfp_addr);
/* Poll status */
for (i = 0; i < 100; i++) {
hw->phy.ops.read_reg(hw,
IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ MDIO_MMD_PMAPMD,
&sfp_stat);
sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
@@ -977,7 +973,7 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
/* Read data */
hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
+ MDIO_MMD_PMAPMD, &sfp_data);
*eeprom_data = (u8)(sfp_data >> 8);
} else {
@@ -998,35 +994,56 @@ out:
static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
{
u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
+ u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
+ u16 ext_ability = 0;
+
+ hw->phy.ops.identify(hw);
+
+ /* Copper PHY must be checked before AUTOC LMS to determine correct
+ * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
+ if (hw->phy.type == ixgbe_phy_tn ||
+ hw->phy.type == ixgbe_phy_cu_unknown) {
+ hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
+ &ext_ability);
+ if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+ if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+ goto out;
+ }
- switch (hw->device_id) {
- case IXGBE_DEV_ID_82598:
- /* Default device ID is mezzanine card KX/KX4 */
- physical_layer = (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
- IXGBE_PHYSICAL_LAYER_1000BASE_KX);
- break;
- case IXGBE_DEV_ID_82598_BX:
- physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
- case IXGBE_DEV_ID_82598EB_CX4:
- case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
- physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
- break;
- case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
- physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
+ switch (autoc & IXGBE_AUTOC_LMS_MASK) {
+ case IXGBE_AUTOC_LMS_1G_AN:
+ case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
+ if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ else
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
break;
- case IXGBE_DEV_ID_82598AF_DUAL_PORT:
- case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
- case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
- physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+ case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
+ if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
+ else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+ else /* XAUI */
+ physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
break;
- case IXGBE_DEV_ID_82598EB_XF_LR:
- physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+ case IXGBE_AUTOC_LMS_KX4_AN:
+ case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
+ if (autoc & IXGBE_AUTOC_KX_SUPP)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ if (autoc & IXGBE_AUTOC_KX4_SUPP)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
break;
- case IXGBE_DEV_ID_82598AT:
- physical_layer = (IXGBE_PHYSICAL_LAYER_10GBASE_T |
- IXGBE_PHYSICAL_LAYER_1000BASE_T);
+ default:
break;
- case IXGBE_DEV_ID_82598EB_SFP_LOM:
+ }
+
+ if (hw->phy.type == ixgbe_phy_nl) {
hw->phy.ops.identify_sfp(hw);
switch (hw->phy.sfp_type) {
@@ -1043,13 +1060,25 @@ static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
break;
}
- break;
+ }
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
+ physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
+ break;
+ case IXGBE_DEV_ID_82598AF_DUAL_PORT:
+ case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
+ case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+ break;
+ case IXGBE_DEV_ID_82598EB_XF_LR:
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+ break;
default:
- physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
break;
}
+out:
return physical_layer;
}
@@ -1086,7 +1115,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
.disable_mc = &ixgbe_disable_mc_generic,
.clear_vfta = &ixgbe_clear_vfta_82598,
.set_vfta = &ixgbe_set_vfta_82598,
- .setup_fc = &ixgbe_setup_fc_82598,
+ .fc_enable = &ixgbe_fc_enable_82598,
};
static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
@@ -1099,6 +1128,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
static struct ixgbe_phy_operations phy_ops_82598 = {
.identify = &ixgbe_identify_phy_generic,
.identify_sfp = &ixgbe_identify_sfp_module_generic,
+ .init = &ixgbe_init_phy_ops_82598,
.reset = &ixgbe_reset_phy_generic,
.read_reg = &ixgbe_read_phy_reg_generic,
.write_reg = &ixgbe_write_phy_reg_generic,
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 29771fbaa42..1984cab7d48 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -71,10 +71,10 @@ s32 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw);
s32 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw);
s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val);
s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
-s32 ixgbe_start_hw_rev_0_82599(struct ixgbe_hw *hw);
s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw);
u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
+static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
{
@@ -100,22 +100,36 @@ s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
ixgbe_init_mac_link_ops_82599(hw);
+
+ hw->phy.ops.reset = NULL;
+
ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
&data_offset);
if (ret_val != 0)
goto setup_sfp_out;
+ /* PHY config will finish before releasing the semaphore */
+ ret_val = ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val != 0) {
+ ret_val = IXGBE_ERR_SWFW_SYNC;
+ goto setup_sfp_out;
+ }
+
hw->eeprom.ops.read(hw, ++data_offset, &data_value);
while (data_value != 0xffff) {
IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
IXGBE_WRITE_FLUSH(hw);
hw->eeprom.ops.read(hw, ++data_offset, &data_value);
}
- /* Now restart DSP */
- IXGBE_WRITE_REG(hw, IXGBE_CORECTL, 0x00000102);
- IXGBE_WRITE_REG(hw, IXGBE_CORECTL, 0x00000b1d);
- IXGBE_WRITE_FLUSH(hw);
+ /* Now restart DSP by setting Restart_AN */
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
+ (IXGBE_READ_REG(hw, IXGBE_AUTOC) | IXGBE_AUTOC_AN_RESTART));
+
+ /* Release the semaphore */
+ ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+ /* Delay obtaining semaphore again to allow FW access */
+ msleep(hw->eeprom.semaphore_delay);
}
setup_sfp_out:
@@ -146,51 +160,60 @@ u32 ixgbe_get_pcie_msix_count_82599(struct ixgbe_hw *hw)
static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
- struct ixgbe_phy_info *phy = &hw->phy;
- s32 ret_val;
- /* Set the bus information prior to PHY identification */
- mac->ops.get_bus_info(hw);
+ ixgbe_init_mac_link_ops_82599(hw);
- /* Call PHY identify routine to get the Cu or SFI phy type */
- ret_val = phy->ops.identify(hw);
+ mac->mcft_size = IXGBE_82599_MC_TBL_SIZE;
+ mac->vft_size = IXGBE_82599_VFT_TBL_SIZE;
+ mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES;
+ mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES;
+ mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES;
+ mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82599(hw);
- if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
- goto get_invariants_out;
+ return 0;
+}
- ixgbe_init_mac_link_ops_82599(hw);
+/**
+ * ixgbe_init_phy_ops_82599 - PHY/SFP specific init
+ * @hw: pointer to hardware structure
+ *
+ * Initialize any function pointers that were not able to be
+ * set during get_invariants because the PHY/SFP type was
+ * not known. Perform the SFP init if necessary.
+ *
+ **/
+s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+ s32 ret_val = 0;
- /* Setup SFP module if there is one present. */
- ret_val = mac->ops.setup_sfp(hw);
+ /* Identify the PHY or SFP module */
+ ret_val = phy->ops.identify(hw);
+
+ /* Setup function pointers based on detected SFP module and speeds */
+ ixgbe_init_mac_link_ops_82599(hw);
/* If copper media, overwrite with copper function pointers */
if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
mac->ops.setup_link_speed =
- &ixgbe_setup_copper_link_speed_82599;
+ &ixgbe_setup_copper_link_speed_82599;
mac->ops.get_link_capabilities =
&ixgbe_get_copper_link_capabilities_82599;
}
- /* PHY Init */
+ /* Set necessary function pointers based on phy type */
switch (hw->phy.type) {
case ixgbe_phy_tn:
phy->ops.check_link = &ixgbe_check_phy_link_tnx;
phy->ops.get_firmware_version =
- &ixgbe_get_phy_firmware_version_tnx;
+ &ixgbe_get_phy_firmware_version_tnx;
break;
default:
break;
}
- mac->mcft_size = IXGBE_82599_MC_TBL_SIZE;
- mac->vft_size = IXGBE_82599_VFT_TBL_SIZE;
- mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES;
- mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES;
- mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES;
- mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82599(hw);
-
-get_invariants_out:
return ret_val;
}
@@ -207,8 +230,19 @@ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
bool *negotiation)
{
s32 status = 0;
+ u32 autoc = 0;
+
+ /*
+ * Determine link capabilities based on the stored value of AUTOC,
+ * which represents EEPROM defaults. If AUTOC value has not been
+ * stored, use the current register value.
+ */
+ if (hw->mac.orig_link_settings_stored)
+ autoc = hw->mac.orig_autoc;
+ else
+ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
- switch (hw->mac.orig_autoc & IXGBE_AUTOC_LMS_MASK) {
+ switch (autoc & IXGBE_AUTOC_LMS_MASK) {
case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
*speed = IXGBE_LINK_SPEED_1GB_FULL;
*negotiation = false;
@@ -232,22 +266,22 @@ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
case IXGBE_AUTOC_LMS_KX4_KX_KR:
case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
*speed = IXGBE_LINK_SPEED_UNKNOWN;
- if (hw->mac.orig_autoc & IXGBE_AUTOC_KR_SUPP)
+ if (autoc & IXGBE_AUTOC_KR_SUPP)
*speed |= IXGBE_LINK_SPEED_10GB_FULL;
- if (hw->mac.orig_autoc & IXGBE_AUTOC_KX4_SUPP)
+ if (autoc & IXGBE_AUTOC_KX4_SUPP)
*speed |= IXGBE_LINK_SPEED_10GB_FULL;
- if (hw->mac.orig_autoc & IXGBE_AUTOC_KX_SUPP)
+ if (autoc & IXGBE_AUTOC_KX_SUPP)
*speed |= IXGBE_LINK_SPEED_1GB_FULL;
*negotiation = true;
break;
case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
*speed = IXGBE_LINK_SPEED_100_FULL;
- if (hw->mac.orig_autoc & IXGBE_AUTOC_KR_SUPP)
+ if (autoc & IXGBE_AUTOC_KR_SUPP)
*speed |= IXGBE_LINK_SPEED_10GB_FULL;
- if (hw->mac.orig_autoc & IXGBE_AUTOC_KX4_SUPP)
+ if (autoc & IXGBE_AUTOC_KX4_SUPP)
*speed |= IXGBE_LINK_SPEED_10GB_FULL;
- if (hw->mac.orig_autoc & IXGBE_AUTOC_KX_SUPP)
+ if (autoc & IXGBE_AUTOC_KX_SUPP)
*speed |= IXGBE_LINK_SPEED_1GB_FULL;
*negotiation = true;
break;
@@ -291,14 +325,13 @@ static s32 ixgbe_get_copper_link_capabilities_82599(struct ixgbe_hw *hw,
*speed = 0;
*autoneg = true;
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
&speed_ability);
if (status == 0) {
- if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G)
+ if (speed_ability & MDIO_SPEED_10G)
*speed |= IXGBE_LINK_SPEED_10GB_FULL;
- if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G)
+ if (speed_ability & MDIO_PMA_SPEED_1000)
*speed |= IXGBE_LINK_SPEED_1GB_FULL;
}
@@ -323,8 +356,8 @@ enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
}
switch (hw->device_id) {
- case IXGBE_DEV_ID_82599:
case IXGBE_DEV_ID_82599_KX4:
+ case IXGBE_DEV_ID_82599_XAUI_LOM:
/* Default device ID is mezzanine card KX/KX4 */
media_type = ixgbe_media_type_backplane;
break;
@@ -380,9 +413,6 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw)
}
}
- /* Set up flow control */
- status = ixgbe_setup_fc_generic(hw, 0);
-
/* Add delay to filter out noises during initial link setup */
msleep(50);
@@ -428,11 +458,31 @@ s32 ixgbe_setup_mac_link_speed_multispeed_fiber(struct ixgbe_hw *hw,
u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
bool link_up = false;
bool negotiation;
+ int i;
/* Mask off requested but non-supported speeds */
hw->mac.ops.get_link_capabilities(hw, &phy_link_speed, &negotiation);
speed &= phy_link_speed;
+ /* Set autoneg_advertised value based on input link speed */
+ hw->phy.autoneg_advertised = 0;
+
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+ /*
+ * When the driver changes the link speeds that it can support,
+ * it sets autotry_restart to true to indicate that we need to
+ * initiate a new autotry session with the link partner. To do
+ * so, we set the speed then disable and re-enable the tx laser, to
+ * alert the link partner that it also needs to restart autotry on its
+ * end. This is consistent with true clause 37 autoneg, which also
+ * involves a loss of signal.
+ */
+
/*
* Try each speed one by one, highest priority first. We do this in
* software because 10gb fiber doesn't support speed autonegotiation.
@@ -441,21 +491,52 @@ s32 ixgbe_setup_mac_link_speed_multispeed_fiber(struct ixgbe_hw *hw,
speedcnt++;
highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
- /* Set hardware SDP's */
+ /* If we already have link at this speed, just jump out */
+ hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false);
+
+ if ((phy_link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
+ goto out;
+
+ /* Set the module link speed */
esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- ixgbe_setup_mac_link_speed_82599(hw,
- IXGBE_LINK_SPEED_10GB_FULL,
- autoneg,
- autoneg_wait_to_complete);
+ /* Allow module to change analog characteristics (1G->10G) */
+ msleep(40);
- msleep(50);
-
- /* If we have link, just jump out */
- hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false);
- if (link_up)
+ status = ixgbe_setup_mac_link_speed_82599(hw,
+ IXGBE_LINK_SPEED_10GB_FULL,
+ autoneg,
+ autoneg_wait_to_complete);
+ if (status != 0)
goto out;
+
+ /* Flap the tx laser if it has not already been done */
+ if (hw->mac.autotry_restart) {
+ /* Disable tx laser; allow 100us to go dark per spec */
+ esdp_reg |= IXGBE_ESDP_SDP3;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ udelay(100);
+
+ /* Enable tx laser; allow 2ms to light up per spec */
+ esdp_reg &= ~IXGBE_ESDP_SDP3;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ msleep(2);
+
+ hw->mac.autotry_restart = false;
+ }
+
+ /* The controller may take up to 500ms at 10g to acquire link */
+ for (i = 0; i < 5; i++) {
+ /* Wait for the link partner to also set speed */
+ msleep(100);
+
+ /* If we have link, just jump out */
+ hw->mac.ops.check_link(hw, &phy_link_speed,
+ &link_up, false);
+ if (link_up)
+ goto out;
+ }
}
if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
@@ -463,16 +544,44 @@ s32 ixgbe_setup_mac_link_speed_multispeed_fiber(struct ixgbe_hw *hw,
if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
- /* Set hardware SDP's */
+ /* If we already have link at this speed, just jump out */
+ hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false);
+
+ if ((phy_link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
+ goto out;
+
+ /* Set the module link speed */
esdp_reg &= ~IXGBE_ESDP_SDP5;
esdp_reg |= IXGBE_ESDP_SDP5_DIR;
IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- ixgbe_setup_mac_link_speed_82599(
- hw, IXGBE_LINK_SPEED_1GB_FULL, autoneg,
- autoneg_wait_to_complete);
+ /* Allow module to change analog characteristics (10G->1G) */
+ msleep(40);
- msleep(50);
+ status = ixgbe_setup_mac_link_speed_82599(hw,
+ IXGBE_LINK_SPEED_1GB_FULL,
+ autoneg,
+ autoneg_wait_to_complete);
+ if (status != 0)
+ goto out;
+
+ /* Flap the tx laser if it has not already been done */
+ if (hw->mac.autotry_restart) {
+ /* Disable tx laser; allow 100us to go dark per spec */
+ esdp_reg |= IXGBE_ESDP_SDP3;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ udelay(100);
+
+ /* Enable tx laser; allow 2ms to light up per spec */
+ esdp_reg &= ~IXGBE_ESDP_SDP3;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ msleep(2);
+
+ hw->mac.autotry_restart = false;
+ }
+
+ /* Wait for the link partner to also set speed */
+ msleep(100);
/* If we have link, just jump out */
hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false);
@@ -538,6 +647,11 @@ s32 ixgbe_check_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
else
*speed = IXGBE_LINK_SPEED_100_FULL;
+ /* if link is down, zero out the current_mode */
+ if (*link_up == false) {
+ hw->fc.current_mode = ixgbe_fc_none;
+ hw->fc.fc_was_autonegged = false;
+ }
return 0;
}
@@ -558,6 +672,8 @@ s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw,
s32 status = 0;
u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+ u32 start_autoc = autoc;
+ u32 orig_autoc = 0;
u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
@@ -571,15 +687,25 @@ s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw,
if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
status = IXGBE_ERR_LINK_SETUP;
- } else if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
- link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
- link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
+ goto out;
+ }
+
+ /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
+ if (hw->mac.orig_link_settings_stored)
+ orig_autoc = hw->mac.orig_autoc;
+ else
+ orig_autoc = autoc;
+
+
+ if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
+ link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
+ link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
/* Set KX4/KX/KR support according to speed requested */
autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
if (speed & IXGBE_LINK_SPEED_10GB_FULL)
- if (hw->mac.orig_autoc & IXGBE_AUTOC_KX4_SUPP)
+ if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
autoc |= IXGBE_AUTOC_KX4_SUPP;
- if (hw->mac.orig_autoc & IXGBE_AUTOC_KR_SUPP)
+ if (orig_autoc & IXGBE_AUTOC_KR_SUPP)
autoc |= IXGBE_AUTOC_KR_SUPP;
if (speed & IXGBE_LINK_SPEED_1GB_FULL)
autoc |= IXGBE_AUTOC_KX_SUPP;
@@ -605,7 +731,7 @@ s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw,
}
}
- if (status == 0) {
+ if (autoc != start_autoc) {
/* Restart link */
autoc |= IXGBE_AUTOC_AN_RESTART;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
@@ -632,13 +758,11 @@ s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw,
}
}
- /* Set up flow control */
- status = ixgbe_setup_fc_generic(hw, 0);
-
/* Add delay to filter out noises during initial link setup */
msleep(50);
}
+out:
return status;
}
@@ -705,14 +829,30 @@ s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
/* Call adapter stop to disable tx/rx and clear interrupts */
hw->mac.ops.stop_adapter(hw);
+ /* PHY ops must be identified and initialized prior to reset */
+
+ /* Init PHY and function pointers, perform SFP setup */
+ status = hw->phy.ops.init(hw);
+
+ if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ goto reset_hw_out;
+
+ /* Setup SFP module if there is one present. */
+ if (hw->phy.sfp_setup_needed) {
+ status = hw->mac.ops.setup_sfp(hw);
+ hw->phy.sfp_setup_needed = false;
+ }
+
/* Reset PHY */
- hw->phy.ops.reset(hw);
+ if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
+ hw->phy.ops.reset(hw);
/*
* Prevent the PCI-E bus from from hanging by disabling PCI-E master
* access and verify no pending requests before reset
*/
- if (ixgbe_disable_pcie_master(hw) != 0) {
+ status = ixgbe_disable_pcie_master(hw);
+ if (status != 0) {
status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
}
@@ -770,9 +910,30 @@ s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
}
}
+ /*
+ * Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table. Also reset num_rar_entries to 128,
+ * since we modify this value when programming the SAN MAC address.
+ */
+ hw->mac.num_rar_entries = 128;
+ hw->mac.ops.init_rx_addrs(hw);
+
/* Store the permanent mac address */
hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+ /* Store the permanent SAN mac address */
+ hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
+
+ /* Add the SAN MAC address to the RAR only if it's a valid address */
+ if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
+ hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
+ hw->mac.san_addr, 0, IXGBE_RAH_AV);
+
+ /* Reserve the last RAR for the SAN MAC address */
+ hw->mac.num_rar_entries--;
+ }
+
+reset_hw_out:
return status;
}
@@ -1004,6 +1165,931 @@ s32 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
+{
+ int i;
+ u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
+ fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
+
+ /*
+ * Before starting reinitialization process,
+ * FDIRCMD.CMD must be zero.
+ */
+ for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
+ if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
+ IXGBE_FDIRCMD_CMD_MASK))
+ break;
+ udelay(10);
+ }
+ if (i >= IXGBE_FDIRCMD_CMD_POLL) {
+ hw_dbg(hw ,"Flow Director previous command isn't complete, "
+ "aborting table re-initialization. \n");
+ return IXGBE_ERR_FDIR_REINIT_FAILED;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
+ IXGBE_WRITE_FLUSH(hw);
+ /*
+ * 82599 adapters flow director init flow cannot be restarted,
+ * Workaround 82599 silicon errata by performing the following steps
+ * before re-writing the FDIRCTRL control register with the same value.
+ * - write 1 to bit 8 of FDIRCMD register &
+ * - write 0 to bit 8 of FDIRCMD register
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
+ IXGBE_FDIRCMD_CLEARHT));
+ IXGBE_WRITE_FLUSH(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
+ ~IXGBE_FDIRCMD_CLEARHT));
+ IXGBE_WRITE_FLUSH(hw);
+ /*
+ * Clear FDIR Hash register to clear any leftover hashes
+ * waiting to be programmed.
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
+ IXGBE_WRITE_FLUSH(hw);
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Poll init-done after we write FDIRCTRL register */
+ for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
+ if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
+ IXGBE_FDIRCTRL_INIT_DONE)
+ break;
+ udelay(10);
+ }
+ if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
+ hw_dbg(hw, "Flow Director Signature poll time exceeded!\n");
+ return IXGBE_ERR_FDIR_REINIT_FAILED;
+ }
+
+ /* Clear FDIR statistics registers (read to clear) */
+ IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
+ IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
+ IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
+ IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+ IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
+
+ return 0;
+}
+
+/**
+ * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
+ * @hw: pointer to hardware structure
+ * @pballoc: which mode to allocate filters with
+ **/
+s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc)
+{
+ u32 fdirctrl = 0;
+ u32 pbsize;
+ int i;
+
+ /*
+ * Before enabling Flow Director, the Rx Packet Buffer size
+ * must be reduced. The new value is the current size minus
+ * flow director memory usage size.
+ */
+ pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
+ (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
+
+ /*
+ * The defaults in the HW for RX PB 1-7 are not zero and so should be
+ * intialized to zero for non DCB mode otherwise actual total RX PB
+ * would be bigger than programmed and filter space would run into
+ * the PB 0 region.
+ */
+ for (i = 1; i < 8; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
+
+ /* Send interrupt when 64 filters are left */
+ fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
+
+ /* Set the maximum length per hash bucket to 0xA filters */
+ fdirctrl |= 0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT;
+
+ switch (pballoc) {
+ case IXGBE_FDIR_PBALLOC_64K:
+ /* 8k - 1 signature filters */
+ fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
+ break;
+ case IXGBE_FDIR_PBALLOC_128K:
+ /* 16k - 1 signature filters */
+ fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
+ break;
+ case IXGBE_FDIR_PBALLOC_256K:
+ /* 32k - 1 signature filters */
+ fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
+ break;
+ default:
+ /* bad value */
+ return IXGBE_ERR_CONFIG;
+ };
+
+ /* Move the flexible bytes to use the ethertype - shift 6 words */
+ fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
+
+ fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
+
+ /* Prime the keys for hashing */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
+ htonl(IXGBE_ATR_BUCKET_HASH_KEY));
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
+ htonl(IXGBE_ATR_SIGNATURE_HASH_KEY));
+
+ /*
+ * Poll init-done after we write the register. Estimated times:
+ * 10G: PBALLOC = 11b, timing is 60us
+ * 1G: PBALLOC = 11b, timing is 600us
+ * 100M: PBALLOC = 11b, timing is 6ms
+ *
+ * Multiple these timings by 4 if under full Rx load
+ *
+ * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
+ * 1 msec per poll time. If we're at line rate and drop to 100M, then
+ * this might not finish in our poll time, but we can live with that
+ * for now.
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
+ IXGBE_WRITE_FLUSH(hw);
+ for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
+ if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
+ IXGBE_FDIRCTRL_INIT_DONE)
+ break;
+ msleep(1);
+ }
+ if (i >= IXGBE_FDIR_INIT_DONE_POLL)
+ hw_dbg(hw, "Flow Director Signature poll time exceeded!\n");
+
+ return 0;
+}
+
+/**
+ * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
+ * @hw: pointer to hardware structure
+ * @pballoc: which mode to allocate filters with
+ **/
+s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
+{
+ u32 fdirctrl = 0;
+ u32 pbsize;
+ int i;
+
+ /*
+ * Before enabling Flow Director, the Rx Packet Buffer size
+ * must be reduced. The new value is the current size minus
+ * flow director memory usage size.
+ */
+ pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
+ (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
+
+ /*
+ * The defaults in the HW for RX PB 1-7 are not zero and so should be
+ * intialized to zero for non DCB mode otherwise actual total RX PB
+ * would be bigger than programmed and filter space would run into
+ * the PB 0 region.
+ */
+ for (i = 1; i < 8; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
+
+ /* Send interrupt when 64 filters are left */
+ fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
+
+ switch (pballoc) {
+ case IXGBE_FDIR_PBALLOC_64K:
+ /* 2k - 1 perfect filters */
+ fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
+ break;
+ case IXGBE_FDIR_PBALLOC_128K:
+ /* 4k - 1 perfect filters */
+ fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
+ break;
+ case IXGBE_FDIR_PBALLOC_256K:
+ /* 8k - 1 perfect filters */
+ fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
+ break;
+ default:
+ /* bad value */
+ return IXGBE_ERR_CONFIG;
+ };
+
+ /* Turn perfect match filtering on */
+ fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
+ fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
+
+ /* Move the flexible bytes to use the ethertype - shift 6 words */
+ fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
+
+ /* Prime the keys for hashing */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
+ htonl(IXGBE_ATR_BUCKET_HASH_KEY));
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
+ htonl(IXGBE_ATR_SIGNATURE_HASH_KEY));
+
+ /*
+ * Poll init-done after we write the register. Estimated times:
+ * 10G: PBALLOC = 11b, timing is 60us
+ * 1G: PBALLOC = 11b, timing is 600us
+ * 100M: PBALLOC = 11b, timing is 6ms
+ *
+ * Multiple these timings by 4 if under full Rx load
+ *
+ * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
+ * 1 msec per poll time. If we're at line rate and drop to 100M, then
+ * this might not finish in our poll time, but we can live with that
+ * for now.
+ */
+
+ /* Set the maximum length per hash bucket to 0xA filters */
+ fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT);
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
+ IXGBE_WRITE_FLUSH(hw);
+ for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
+ if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
+ IXGBE_FDIRCTRL_INIT_DONE)
+ break;
+ msleep(1);
+ }
+ if (i >= IXGBE_FDIR_INIT_DONE_POLL)
+ hw_dbg(hw, "Flow Director Perfect poll time exceeded!\n");
+
+ return 0;
+}
+
+
+/**
+ * ixgbe_atr_compute_hash_82599 - Compute the hashes for SW ATR
+ * @stream: input bitstream to compute the hash on
+ * @key: 32-bit hash key
+ **/
+u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, u32 key)
+{
+ /*
+ * The algorithm is as follows:
+ * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
+ * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
+ * and A[n] x B[n] is bitwise AND between same length strings
+ *
+ * K[n] is 16 bits, defined as:
+ * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
+ * for n modulo 32 < 15, K[n] =
+ * K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
+ *
+ * S[n] is 16 bits, defined as:
+ * for n >= 15, S[n] = S[n:n - 15]
+ * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
+ *
+ * To simplify for programming, the algorithm is implemented
+ * in software this way:
+ *
+ * Key[31:0], Stream[335:0]
+ *
+ * tmp_key[11 * 32 - 1:0] = 11{Key[31:0] = key concatenated 11 times
+ * int_key[350:0] = tmp_key[351:1]
+ * int_stream[365:0] = Stream[14:0] | Stream[335:0] | Stream[335:321]
+ *
+ * hash[15:0] = 0;
+ * for (i = 0; i < 351; i++) {
+ * if (int_key[i])
+ * hash ^= int_stream[(i + 15):i];
+ * }
+ */
+
+ union {
+ u64 fill[6];
+ u32 key[11];
+ u8 key_stream[44];
+ } tmp_key;
+
+ u8 *stream = (u8 *)atr_input;
+ u8 int_key[44]; /* upper-most bit unused */
+ u8 hash_str[46]; /* upper-most 2 bits unused */
+ u16 hash_result = 0;
+ int i, j, k, h;
+
+ /*
+ * Initialize the fill member to prevent warnings
+ * on some compilers
+ */
+ tmp_key.fill[0] = 0;
+
+ /* First load the temporary key stream */
+ for (i = 0; i < 6; i++) {
+ u64 fillkey = ((u64)key << 32) | key;
+ tmp_key.fill[i] = fillkey;
+ }
+
+ /*
+ * Set the interim key for the hashing. Bit 352 is unused, so we must
+ * shift and compensate when building the key.
+ */
+
+ int_key[0] = tmp_key.key_stream[0] >> 1;
+ for (i = 1, j = 0; i < 44; i++) {
+ unsigned int this_key = tmp_key.key_stream[j] << 7;
+ j++;
+ int_key[i] = (u8)(this_key | (tmp_key.key_stream[j] >> 1));
+ }
+
+ /*
+ * Set the interim bit string for the hashing. Bits 368 and 367 are
+ * unused, so shift and compensate when building the string.
+ */
+ hash_str[0] = (stream[40] & 0x7f) >> 1;
+ for (i = 1, j = 40; i < 46; i++) {
+ unsigned int this_str = stream[j] << 7;
+ j++;
+ if (j > 41)
+ j = 0;
+ hash_str[i] = (u8)(this_str | (stream[j] >> 1));
+ }
+
+ /*
+ * Now compute the hash. i is the index into hash_str, j is into our
+ * key stream, k is counting the number of bits, and h interates within
+ * each byte.
+ */
+ for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) {
+ for (h = 0; h < 8 && k < 351; h++, k++) {
+ if (int_key[j] & (1 << h)) {
+ /*
+ * Key bit is set, XOR in the current 16-bit
+ * string. Example of processing:
+ * h = 0,
+ * tmp = (hash_str[i - 2] & 0 << 16) |
+ * (hash_str[i - 1] & 0xff << 8) |
+ * (hash_str[i] & 0xff >> 0)
+ * So tmp = hash_str[15 + k:k], since the
+ * i + 2 clause rolls off the 16-bit value
+ * h = 7,
+ * tmp = (hash_str[i - 2] & 0x7f << 9) |
+ * (hash_str[i - 1] & 0xff << 1) |
+ * (hash_str[i] & 0x80 >> 7)
+ */
+ int tmp = (hash_str[i] >> h);
+ tmp |= (hash_str[i - 1] << (8 - h));
+ tmp |= (int)(hash_str[i - 2] & ((1 << h) - 1))
+ << (16 - h);
+ hash_result ^= (u16)tmp;
+ }
+ }
+ }
+
+ return hash_result;
+}
+
+/**
+ * ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream
+ * @input: input stream to modify
+ * @vlan: the VLAN id to load
+ **/
+s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan)
+{
+ input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] = vlan >> 8;
+ input->byte_stream[IXGBE_ATR_VLAN_OFFSET] = vlan & 0xff;
+
+ return 0;
+}
+
+/**
+ * ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address
+ * @input: input stream to modify
+ * @src_addr: the IP address to load
+ **/
+s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr)
+{
+ input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] = src_addr >> 24;
+ input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] =
+ (src_addr >> 16) & 0xff;
+ input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] =
+ (src_addr >> 8) & 0xff;
+ input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET] = src_addr & 0xff;
+
+ return 0;
+}
+
+/**
+ * ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address
+ * @input: input stream to modify
+ * @dst_addr: the IP address to load
+ **/
+s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr)
+{
+ input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] = dst_addr >> 24;
+ input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] =
+ (dst_addr >> 16) & 0xff;
+ input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] =
+ (dst_addr >> 8) & 0xff;
+ input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET] = dst_addr & 0xff;
+
+ return 0;
+}
+
+/**
+ * ixgbe_atr_set_src_ipv6_82599 - Sets the source IPv6 address
+ * @input: input stream to modify
+ * @src_addr_1: the first 4 bytes of the IP address to load
+ * @src_addr_2: the second 4 bytes of the IP address to load
+ * @src_addr_3: the third 4 bytes of the IP address to load
+ * @src_addr_4: the fourth 4 bytes of the IP address to load
+ **/
+s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input,
+ u32 src_addr_1, u32 src_addr_2,
+ u32 src_addr_3, u32 src_addr_4)
+{
+ input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET] = src_addr_4 & 0xff;
+ input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] =
+ (src_addr_4 >> 8) & 0xff;
+ input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] =
+ (src_addr_4 >> 16) & 0xff;
+ input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] = src_addr_4 >> 24;
+
+ input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4] = src_addr_3 & 0xff;
+ input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] =
+ (src_addr_3 >> 8) & 0xff;
+ input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] =
+ (src_addr_3 >> 16) & 0xff;
+ input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] = src_addr_3 >> 24;
+
+ input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8] = src_addr_2 & 0xff;
+ input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] =
+ (src_addr_2 >> 8) & 0xff;
+ input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] =
+ (src_addr_2 >> 16) & 0xff;
+ input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] = src_addr_2 >> 24;
+
+ input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12] = src_addr_1 & 0xff;
+ input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] =
+ (src_addr_1 >> 8) & 0xff;
+ input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] =
+ (src_addr_1 >> 16) & 0xff;
+ input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] = src_addr_1 >> 24;
+
+ return 0;
+}
+
+/**
+ * ixgbe_atr_set_dst_ipv6_82599 - Sets the destination IPv6 address
+ * @input: input stream to modify
+ * @dst_addr_1: the first 4 bytes of the IP address to load
+ * @dst_addr_2: the second 4 bytes of the IP address to load
+ * @dst_addr_3: the third 4 bytes of the IP address to load
+ * @dst_addr_4: the fourth 4 bytes of the IP address to load
+ **/
+s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input,
+ u32 dst_addr_1, u32 dst_addr_2,
+ u32 dst_addr_3, u32 dst_addr_4)
+{
+ input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET] = dst_addr_4 & 0xff;
+ input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] =
+ (dst_addr_4 >> 8) & 0xff;
+ input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] =
+ (dst_addr_4 >> 16) & 0xff;
+ input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] = dst_addr_4 >> 24;
+
+ input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4] = dst_addr_3 & 0xff;
+ input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] =
+ (dst_addr_3 >> 8) & 0xff;
+ input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] =
+ (dst_addr_3 >> 16) & 0xff;
+ input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] = dst_addr_3 >> 24;
+
+ input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8] = dst_addr_2 & 0xff;
+ input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] =
+ (dst_addr_2 >> 8) & 0xff;
+ input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] =
+ (dst_addr_2 >> 16) & 0xff;
+ input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] = dst_addr_2 >> 24;
+
+ input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12] = dst_addr_1 & 0xff;
+ input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] =
+ (dst_addr_1 >> 8) & 0xff;
+ input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] =
+ (dst_addr_1 >> 16) & 0xff;
+ input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] = dst_addr_1 >> 24;
+
+ return 0;
+}
+
+/**
+ * ixgbe_atr_set_src_port_82599 - Sets the source port
+ * @input: input stream to modify
+ * @src_port: the source port to load
+ **/
+s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port)
+{
+ input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1] = src_port >> 8;
+ input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] = src_port & 0xff;
+
+ return 0;
+}
+
+/**
+ * ixgbe_atr_set_dst_port_82599 - Sets the destination port
+ * @input: input stream to modify
+ * @dst_port: the destination port to load
+ **/
+s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port)
+{
+ input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1] = dst_port >> 8;
+ input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] = dst_port & 0xff;
+
+ return 0;
+}
+
+/**
+ * ixgbe_atr_set_flex_byte_82599 - Sets the flexible bytes
+ * @input: input stream to modify
+ * @flex_bytes: the flexible bytes to load
+ **/
+s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte)
+{
+ input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] = flex_byte >> 8;
+ input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET] = flex_byte & 0xff;
+
+ return 0;
+}
+
+/**
+ * ixgbe_atr_set_vm_pool_82599 - Sets the Virtual Machine pool
+ * @input: input stream to modify
+ * @vm_pool: the Virtual Machine pool to load
+ **/
+s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input, u8 vm_pool)
+{
+ input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET] = vm_pool;
+
+ return 0;
+}
+
+/**
+ * ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type
+ * @input: input stream to modify
+ * @l4type: the layer 4 type value to load
+ **/
+s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type)
+{
+ input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET] = l4type;
+
+ return 0;
+}
+
+/**
+ * ixgbe_atr_get_vlan_id_82599 - Gets the VLAN id from the ATR input stream
+ * @input: input stream to search
+ * @vlan: the VLAN id to load
+ **/
+s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan)
+{
+ *vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET];
+ *vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8;
+
+ return 0;
+}
+
+/**
+ * ixgbe_atr_get_src_ipv4_82599 - Gets the source IPv4 address
+ * @input: input stream to search
+ * @src_addr: the IP address to load
+ **/
+s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input, u32 *src_addr)
+{
+ *src_addr = input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET];
+ *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] << 8;
+ *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] << 16;
+ *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] << 24;
+
+ return 0;
+}
+
+/**
+ * ixgbe_atr_get_dst_ipv4_82599 - Gets the destination IPv4 address
+ * @input: input stream to search
+ * @dst_addr: the IP address to load
+ **/
+s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 *dst_addr)
+{
+ *dst_addr = input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET];
+ *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] << 8;
+ *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] << 16;
+ *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] << 24;
+
+ return 0;
+}
+
+/**
+ * ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address
+ * @input: input stream to search
+ * @src_addr_1: the first 4 bytes of the IP address to load
+ * @src_addr_2: the second 4 bytes of the IP address to load
+ * @src_addr_3: the third 4 bytes of the IP address to load
+ * @src_addr_4: the fourth 4 bytes of the IP address to load
+ **/
+s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input,
+ u32 *src_addr_1, u32 *src_addr_2,
+ u32 *src_addr_3, u32 *src_addr_4)
+{
+ *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12];
+ *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] << 8;
+ *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] << 16;
+ *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] << 24;
+
+ *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8];
+ *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] << 8;
+ *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] << 16;
+ *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] << 24;
+
+ *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4];
+ *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] << 8;
+ *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] << 16;
+ *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] << 24;
+
+ *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET];
+ *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] << 8;
+ *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] << 16;
+ *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] << 24;
+
+ return 0;
+}
+
+/**
+ * ixgbe_atr_get_dst_ipv6_82599 - Gets the destination IPv6 address
+ * @input: input stream to search
+ * @dst_addr_1: the first 4 bytes of the IP address to load
+ * @dst_addr_2: the second 4 bytes of the IP address to load
+ * @dst_addr_3: the third 4 bytes of the IP address to load
+ * @dst_addr_4: the fourth 4 bytes of the IP address to load
+ **/
+s32 ixgbe_atr_get_dst_ipv6_82599(struct ixgbe_atr_input *input,
+ u32 *dst_addr_1, u32 *dst_addr_2,
+ u32 *dst_addr_3, u32 *dst_addr_4)
+{
+ *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12];
+ *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] << 8;
+ *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] << 16;
+ *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] << 24;
+
+ *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8];
+ *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] << 8;
+ *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] << 16;
+ *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] << 24;
+
+ *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4];
+ *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] << 8;
+ *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] << 16;
+ *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] << 24;
+
+ *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET];
+ *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] << 8;
+ *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] << 16;
+ *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] << 24;
+
+ return 0;
+}
+
+/**
+ * ixgbe_atr_get_src_port_82599 - Gets the source port
+ * @input: input stream to modify
+ * @src_port: the source port to load
+ *
+ * Even though the input is given in big-endian, the FDIRPORT registers
+ * expect the ports to be programmed in little-endian. Hence the need to swap
+ * endianness when retrieving the data. This can be confusing since the
+ * internal hash engine expects it to be big-endian.
+ **/
+s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input, u16 *src_port)
+{
+ *src_port = input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] << 8;
+ *src_port |= input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1];
+
+ return 0;
+}
+
+/**
+ * ixgbe_atr_get_dst_port_82599 - Gets the destination port
+ * @input: input stream to modify
+ * @dst_port: the destination port to load
+ *
+ * Even though the input is given in big-endian, the FDIRPORT registers
+ * expect the ports to be programmed in little-endian. Hence the need to swap
+ * endianness when retrieving the data. This can be confusing since the
+ * internal hash engine expects it to be big-endian.
+ **/
+s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input, u16 *dst_port)
+{
+ *dst_port = input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] << 8;
+ *dst_port |= input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1];
+
+ return 0;
+}
+
+/**
+ * ixgbe_atr_get_flex_byte_82599 - Gets the flexible bytes
+ * @input: input stream to modify
+ * @flex_bytes: the flexible bytes to load
+ **/
+s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input, u16 *flex_byte)
+{
+ *flex_byte = input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET];
+ *flex_byte |= input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] << 8;
+
+ return 0;
+}
+
+/**
+ * ixgbe_atr_get_vm_pool_82599 - Gets the Virtual Machine pool
+ * @input: input stream to modify
+ * @vm_pool: the Virtual Machine pool to load
+ **/
+s32 ixgbe_atr_get_vm_pool_82599(struct ixgbe_atr_input *input, u8 *vm_pool)
+{
+ *vm_pool = input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET];
+
+ return 0;
+}
+
+/**
+ * ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type
+ * @input: input stream to modify
+ * @l4type: the layer 4 type value to load
+ **/
+s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input, u8 *l4type)
+{
+ *l4type = input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET];
+
+ return 0;
+}
+
+/**
+ * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
+ * @hw: pointer to hardware structure
+ * @stream: input bitstream
+ * @queue: queue index to direct traffic to
+ **/
+s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+ struct ixgbe_atr_input *input,
+ u8 queue)
+{
+ u64 fdirhashcmd;
+ u64 fdircmd;
+ u32 fdirhash;
+ u16 bucket_hash, sig_hash;
+ u8 l4type;
+
+ bucket_hash = ixgbe_atr_compute_hash_82599(input,
+ IXGBE_ATR_BUCKET_HASH_KEY);
+
+ /* bucket_hash is only 15 bits */
+ bucket_hash &= IXGBE_ATR_HASH_MASK;
+
+ sig_hash = ixgbe_atr_compute_hash_82599(input,
+ IXGBE_ATR_SIGNATURE_HASH_KEY);
+
+ /* Get the l4type in order to program FDIRCMD properly */
+ /* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */
+ ixgbe_atr_get_l4type_82599(input, &l4type);
+
+ /*
+ * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
+ * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
+ */
+ fdirhash = sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
+
+ fdircmd = (IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
+ IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN);
+
+ switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
+ case IXGBE_ATR_L4TYPE_TCP:
+ fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
+ break;
+ case IXGBE_ATR_L4TYPE_UDP:
+ fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
+ break;
+ case IXGBE_ATR_L4TYPE_SCTP:
+ fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
+ break;
+ default:
+ hw_dbg(hw, "Error on l4type input\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK)
+ fdircmd |= IXGBE_FDIRCMD_IPV6;
+
+ fdircmd |= ((u64)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT);
+ fdirhashcmd = ((fdircmd << 32) | fdirhash);
+
+ IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
+
+ return 0;
+}
+
+/**
+ * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
+ * @hw: pointer to hardware structure
+ * @input: input bitstream
+ * @queue: queue index to direct traffic to
+ *
+ * Note that the caller to this function must lock before calling, since the
+ * hardware writes must be protected from one another.
+ **/
+s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
+ struct ixgbe_atr_input *input,
+ u16 soft_id,
+ u8 queue)
+{
+ u32 fdircmd = 0;
+ u32 fdirhash;
+ u32 src_ipv4, dst_ipv4;
+ u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4;
+ u16 src_port, dst_port, vlan_id, flex_bytes;
+ u16 bucket_hash;
+ u8 l4type;
+
+ /* Get our input values */
+ ixgbe_atr_get_l4type_82599(input, &l4type);
+
+ /*
+ * Check l4type formatting, and bail out before we touch the hardware
+ * if there's a configuration issue
+ */
+ switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
+ case IXGBE_ATR_L4TYPE_TCP:
+ fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
+ break;
+ case IXGBE_ATR_L4TYPE_UDP:
+ fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
+ break;
+ case IXGBE_ATR_L4TYPE_SCTP:
+ fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
+ break;
+ default:
+ hw_dbg(hw, "Error on l4type input\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ bucket_hash = ixgbe_atr_compute_hash_82599(input,
+ IXGBE_ATR_BUCKET_HASH_KEY);
+
+ /* bucket_hash is only 15 bits */
+ bucket_hash &= IXGBE_ATR_HASH_MASK;
+
+ ixgbe_atr_get_vlan_id_82599(input, &vlan_id);
+ ixgbe_atr_get_src_port_82599(input, &src_port);
+ ixgbe_atr_get_dst_port_82599(input, &dst_port);
+ ixgbe_atr_get_flex_byte_82599(input, &flex_bytes);
+
+ fdirhash = soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
+
+ /* Now figure out if we're IPv4 or IPv6 */
+ if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
+ /* IPv6 */
+ ixgbe_atr_get_src_ipv6_82599(input, &src_ipv6_1, &src_ipv6_2,
+ &src_ipv6_3, &src_ipv6_4);
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), src_ipv6_1);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), src_ipv6_2);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), src_ipv6_3);
+ /* The last 4 bytes is the same register as IPv4 */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv6_4);
+
+ fdircmd |= IXGBE_FDIRCMD_IPV6;
+ fdircmd |= IXGBE_FDIRCMD_IPv6DMATCH;
+ } else {
+ /* IPv4 */
+ ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4);
+
+ }
+
+ ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, dst_ipv4);
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id |
+ (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT)));
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port |
+ (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
+
+ fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW;
+ fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE;
+ fdircmd |= IXGBE_FDIRCMD_LAST;
+ fdircmd |= IXGBE_FDIRCMD_QUEUE_EN;
+ fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
+
+ return 0;
+}
+/**
* ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
* @hw: pointer to hardware structure
* @reg: analog register to read
@@ -1056,8 +2142,9 @@ s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
{
u32 q_num;
+ s32 ret_val;
- ixgbe_start_hw_generic(hw);
+ ret_val = ixgbe_start_hw_generic(hw);
/* Clear the rate limiters */
for (q_num = 0; q_num < hw->mac.max_tx_queues; q_num++) {
@@ -1066,7 +2153,13 @@ s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
}
IXGBE_WRITE_FLUSH(hw);
- return 0;
+ /* We need to run link autotry after the driver loads */
+ hw->mac.autotry_restart = true;
+
+ if (ret_val == 0)
+ ret_val = ixgbe_verify_fw_version_82599(hw);
+
+ return ret_val;
}
/**
@@ -1093,53 +2186,100 @@ s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
{
u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+ u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
+ u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
+ u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
+ u16 ext_ability = 0;
u8 comp_codes_10g = 0;
- switch (hw->device_id) {
- case IXGBE_DEV_ID_82599:
- case IXGBE_DEV_ID_82599_KX4:
- /* Default device ID is mezzanine card KX/KX4 */
- physical_layer = (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
- IXGBE_PHYSICAL_LAYER_1000BASE_KX);
+ hw->phy.ops.identify(hw);
+
+ if (hw->phy.type == ixgbe_phy_tn ||
+ hw->phy.type == ixgbe_phy_cu_unknown) {
+ hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
+ &ext_ability);
+ if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+ if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+ goto out;
+ }
+
+ switch (autoc & IXGBE_AUTOC_LMS_MASK) {
+ case IXGBE_AUTOC_LMS_1G_AN:
+ case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
+ if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
+ IXGBE_PHYSICAL_LAYER_1000BASE_BX;
+ goto out;
+ } else
+ /* SFI mode so read SFP module */
+ goto sfp_check;
break;
- case IXGBE_DEV_ID_82599_SFP:
- hw->phy.ops.identify_sfp(hw);
+ case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
+ if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
+ else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+ else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
+ goto out;
+ break;
+ case IXGBE_AUTOC_LMS_10G_SERIAL:
+ if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
+ goto out;
+ } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
+ goto sfp_check;
+ break;
+ case IXGBE_AUTOC_LMS_KX4_KX_KR:
+ case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
+ if (autoc & IXGBE_AUTOC_KX_SUPP)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ if (autoc & IXGBE_AUTOC_KX4_SUPP)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+ if (autoc & IXGBE_AUTOC_KR_SUPP)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
+ goto out;
+ break;
+ default:
+ goto out;
+ break;
+ }
- switch (hw->phy.sfp_type) {
- case ixgbe_sfp_type_da_cu:
- case ixgbe_sfp_type_da_cu_core0:
- case ixgbe_sfp_type_da_cu_core1:
- physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
- break;
- case ixgbe_sfp_type_sr:
+sfp_check:
+ /* SFP check must be done last since DA modules are sometimes used to
+ * test KR mode - we need to id KR mode correctly before SFP module.
+ * Call identify_sfp because the pluggable module may have changed */
+ hw->phy.ops.identify_sfp(hw);
+ if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
+ goto out;
+
+ switch (hw->phy.type) {
+ case ixgbe_phy_tw_tyco:
+ case ixgbe_phy_tw_unknown:
+ physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
+ break;
+ case ixgbe_phy_sfp_avago:
+ case ixgbe_phy_sfp_ftl:
+ case ixgbe_phy_sfp_intel:
+ case ixgbe_phy_sfp_unknown:
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
+ if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
- break;
- case ixgbe_sfp_type_lr:
+ else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
- break;
- case ixgbe_sfp_type_srlr_core0:
- case ixgbe_sfp_type_srlr_core1:
- hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_10GBE_COMP_CODES,
- &comp_codes_10g);
- if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
- physical_layer =
- IXGBE_PHYSICAL_LAYER_10GBASE_SR;
- else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
- physical_layer =
- IXGBE_PHYSICAL_LAYER_10GBASE_LR;
- else
- physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
- default:
- physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
- break;
- }
break;
default:
- physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
break;
}
+out:
return physical_layer;
}
@@ -1187,6 +2327,138 @@ s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
return 0;
}
+/**
+ * ixgbe_get_device_caps_82599 - Get additional device capabilities
+ * @hw: pointer to hardware structure
+ * @device_caps: the EEPROM word with the extra device capabilities
+ *
+ * This function will read the EEPROM location for the device capabilities,
+ * and return the word through device_caps.
+ **/
+s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps)
+{
+ hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_san_mac_addr_offset_82599 - SAN MAC address offset for 82599
+ * @hw: pointer to hardware structure
+ * @san_mac_offset: SAN MAC address offset
+ *
+ * This function will read the EEPROM location for the SAN MAC address
+ * pointer, and returns the value at that location. This is used in both
+ * get and set mac_addr routines.
+ **/
+s32 ixgbe_get_san_mac_addr_offset_82599(struct ixgbe_hw *hw,
+ u16 *san_mac_offset)
+{
+ /*
+ * First read the EEPROM pointer to see if the MAC addresses are
+ * available.
+ */
+ hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_san_mac_addr_82599 - SAN MAC address retrieval for 82599
+ * @hw: pointer to hardware structure
+ * @san_mac_addr: SAN MAC address
+ *
+ * Reads the SAN MAC address from the EEPROM, if it's available. This is
+ * per-port, so set_lan_id() must be called before reading the addresses.
+ * set_lan_id() is called by identify_sfp(), but this cannot be relied
+ * upon for non-SFP connections, so we must call it here.
+ **/
+s32 ixgbe_get_san_mac_addr_82599(struct ixgbe_hw *hw, u8 *san_mac_addr)
+{
+ u16 san_mac_data, san_mac_offset;
+ u8 i;
+
+ /*
+ * First read the EEPROM pointer to see if the MAC addresses are
+ * available. If they're not, no point in calling set_lan_id() here.
+ */
+ ixgbe_get_san_mac_addr_offset_82599(hw, &san_mac_offset);
+
+ if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
+ /*
+ * No addresses available in this EEPROM. It's not an
+ * error though, so just wipe the local address and return.
+ */
+ for (i = 0; i < 6; i++)
+ san_mac_addr[i] = 0xFF;
+
+ goto san_mac_addr_out;
+ }
+
+ /* make sure we know which port we need to program */
+ hw->mac.ops.set_lan_id(hw);
+ /* apply the port offset to the address offset */
+ (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
+ (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
+ for (i = 0; i < 3; i++) {
+ hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
+ san_mac_addr[i * 2] = (u8)(san_mac_data);
+ san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
+ san_mac_offset++;
+ }
+
+san_mac_addr_out:
+ return 0;
+}
+
+/**
+ * ixgbe_verify_fw_version_82599 - verify fw version for 82599
+ * @hw: pointer to hardware structure
+ *
+ * Verifies that installed the firmware version is 0.6 or higher
+ * for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
+ *
+ * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
+ * if the FW version is not supported.
+ **/
+static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_EEPROM_VERSION;
+ u16 fw_offset, fw_ptp_cfg_offset;
+ u16 fw_version = 0;
+
+ /* firmware check is only necessary for SFI devices */
+ if (hw->phy.media_type != ixgbe_media_type_fiber) {
+ status = 0;
+ goto fw_version_out;
+ }
+
+ /* get the offset to the Firmware Module block */
+ hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
+
+ if ((fw_offset == 0) || (fw_offset == 0xFFFF))
+ goto fw_version_out;
+
+ /* get the offset to the Pass Through Patch Configuration block */
+ hw->eeprom.ops.read(hw, (fw_offset +
+ IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
+ &fw_ptp_cfg_offset);
+
+ if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
+ goto fw_version_out;
+
+ /* get the firmware version */
+ hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
+ IXGBE_FW_PATCH_VERSION_4),
+ &fw_version);
+
+ if (fw_version > 0x5)
+ status = 0;
+
+fw_version_out:
+ return status;
+}
+
static struct ixgbe_mac_operations mac_ops_82599 = {
.init_hw = &ixgbe_init_hw_generic,
.reset_hw = &ixgbe_reset_hw_82599,
@@ -1196,6 +2468,8 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82599,
.enable_rx_dma = &ixgbe_enable_rx_dma_82599,
.get_mac_addr = &ixgbe_get_mac_addr_generic,
+ .get_san_mac_addr = &ixgbe_get_san_mac_addr_82599,
+ .get_device_caps = &ixgbe_get_device_caps_82599,
.stop_adapter = &ixgbe_stop_adapter_generic,
.get_bus_info = &ixgbe_get_bus_info_generic,
.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
@@ -1220,7 +2494,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.disable_mc = &ixgbe_disable_mc_generic,
.clear_vfta = &ixgbe_clear_vfta_82599,
.set_vfta = &ixgbe_set_vfta_82599,
- .setup_fc = &ixgbe_setup_fc_generic,
+ .fc_enable = &ixgbe_fc_enable_generic,
.init_uta_tables = &ixgbe_init_uta_tables_82599,
.setup_sfp = &ixgbe_setup_sfp_modules_82599,
};
@@ -1236,6 +2510,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
static struct ixgbe_phy_operations phy_ops_82599 = {
.identify = &ixgbe_identify_phy_82599,
.identify_sfp = &ixgbe_identify_sfp_module_generic,
+ .init = &ixgbe_init_phy_ops_82599,
.reset = &ixgbe_reset_phy_generic,
.read_reg = &ixgbe_read_phy_reg_generic,
.write_reg = &ixgbe_write_phy_reg_generic,
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 186a65069b3..96a18595377 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -28,6 +28,8 @@
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
#include "ixgbe.h"
#include "ixgbe_common.h"
@@ -71,12 +73,6 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
/* Identify the PHY */
hw->phy.ops.identify(hw);
- /*
- * Store MAC address from RAR0, clear receive address registers, and
- * clear the multicast table
- */
- hw->mac.ops.init_rx_addrs(hw);
-
/* Clear the VLAN filter table */
hw->mac.ops.clear_vfta(hw);
@@ -89,6 +85,9 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
IXGBE_WRITE_FLUSH(hw);
+ /* Setup flow control */
+ ixgbe_setup_fc(hw, 0);
+
/* Clear adapter stopped flag */
hw->adapter_stopped = false;
@@ -107,13 +106,17 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
**/
s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
{
+ s32 status;
+
/* Reset the hardware */
- hw->mac.ops.reset_hw(hw);
+ status = hw->mac.ops.reset_hw(hw);
- /* Start the HW */
- hw->mac.ops.start_hw(hw);
+ if (status == 0) {
+ /* Start the HW */
+ status = hw->mac.ops.start_hw(hw);
+ }
- return 0;
+ return status;
}
/**
@@ -1362,15 +1365,14 @@ static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
* Drivers using secondary unicast addresses must set user_set_promisc when
* manually putting the device into promiscuous mode.
**/
-s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
- u32 addr_count, ixgbe_mc_addr_itr next)
+s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
+ struct list_head *uc_list)
{
- u8 *addr;
u32 i;
u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
u32 uc_addr_in_use;
u32 fctrl;
- u32 vmdq;
+ struct netdev_hw_addr *ha;
/*
* Clear accounting of old secondary address list,
@@ -1388,10 +1390,9 @@ s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
}
/* Add the new addresses */
- for (i = 0; i < addr_count; i++) {
+ list_for_each_entry(ha, uc_list, list) {
hw_dbg(hw, " Adding the secondary addresses:\n");
- addr = next(hw, &addr_list, &vmdq);
- ixgbe_add_uc_addr(hw, addr, vmdq);
+ ixgbe_add_uc_addr(hw, ha->addr, 0);
}
if (hw->addr_ctrl.overflow_promisc) {
@@ -1583,19 +1584,30 @@ s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
}
/**
- * ixgbe_fc_enable - Enable flow control
+ * ixgbe_fc_enable_generic - Enable flow control
* @hw: pointer to hardware structure
* @packetbuf_num: packet buffer number (0-7)
*
* Enable flow control according to the current settings.
**/
-s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num)
+s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
{
s32 ret_val = 0;
- u32 mflcn_reg;
- u32 fccfg_reg;
+ u32 mflcn_reg, fccfg_reg;
u32 reg;
+ u32 rx_pba_size;
+#ifdef CONFIG_DCB
+ if (hw->fc.requested_mode == ixgbe_fc_pfc)
+ goto out;
+
+#endif /* CONFIG_DCB */
+ /* Negotiate the fc mode to use */
+ ret_val = ixgbe_fc_autoneg(hw);
+ if (ret_val)
+ goto out;
+
+ /* Disable any previous flow control settings */
mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
mflcn_reg &= ~(IXGBE_MFLCN_RFCE | IXGBE_MFLCN_RPFCE);
@@ -1615,7 +1627,10 @@ s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num)
*/
switch (hw->fc.current_mode) {
case ixgbe_fc_none:
- /* Flow control completely disabled by software override. */
+ /*
+ * Flow control is disabled by software override or autoneg.
+ * The code below will actually disable it in the HW.
+ */
break;
case ixgbe_fc_rx_pause:
/*
@@ -1644,7 +1659,7 @@ s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num)
case ixgbe_fc_pfc:
goto out;
break;
-#endif
+#endif /* CONFIG_DCB */
default:
hw_dbg(hw, "Flow control param set incorrectly\n");
ret_val = -IXGBE_ERR_CONFIG;
@@ -1652,25 +1667,48 @@ s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num)
break;
}
- /* Enable 802.3x based flow control settings. */
+ /* Set 802.3x based flow control settings. */
+ mflcn_reg |= IXGBE_MFLCN_DPF;
IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
- /* Set up and enable Rx high/low water mark thresholds, enable XON. */
- if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
- if (hw->fc.send_xon)
- IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num),
- (hw->fc.low_water | IXGBE_FCRTL_XONE));
- else
- IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num),
- hw->fc.low_water);
+ reg = IXGBE_READ_REG(hw, IXGBE_MTQC);
+ /* Thresholds are different for link flow control when in DCB mode */
+ if (reg & IXGBE_MTQC_RT_ENA) {
+ rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
+
+ /* Always disable XON for LFC when in DCB mode */
+ reg = (rx_pba_size >> 5) & 0xFFE0;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), reg);
+
+ reg = (rx_pba_size >> 2) & 0xFFE0;
+ if (hw->fc.current_mode & ixgbe_fc_tx_pause)
+ reg |= IXGBE_FCRTH_FCEN;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), reg);
+ } else {
+ /*
+ * Set up and enable Rx high/low water mark thresholds,
+ * enable XON.
+ */
+ if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
+ if (hw->fc.send_xon) {
+ IXGBE_WRITE_REG(hw,
+ IXGBE_FCRTL_82599(packetbuf_num),
+ (hw->fc.low_water |
+ IXGBE_FCRTL_XONE));
+ } else {
+ IXGBE_WRITE_REG(hw,
+ IXGBE_FCRTL_82599(packetbuf_num),
+ hw->fc.low_water);
+ }
- IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num),
- (hw->fc.high_water | IXGBE_FCRTH_FCEN));
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num),
+ (hw->fc.high_water | IXGBE_FCRTH_FCEN));
+ }
}
/* Configure pause time (2 TCs per register) */
- reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num));
+ reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
if ((packetbuf_num & 1) == 0)
reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
else
@@ -1687,100 +1725,41 @@ out:
* ixgbe_fc_autoneg - Configure flow control
* @hw: pointer to hardware structure
*
- * Negotiates flow control capabilities with link partner using autoneg and
- * applies the results.
+ * Compares our advertised flow control capabilities to those advertised by
+ * our link partner, and determines the proper flow control mode to use.
**/
s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
{
s32 ret_val = 0;
- u32 i, reg, pcs_anadv_reg, pcs_lpab_reg;
-
- reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+ ixgbe_link_speed speed;
+ u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
+ bool link_up;
/*
- * The possible values of fc.current_mode are:
- * 0: Flow control is completely disabled
- * 1: Rx flow control is enabled (we can receive pause frames,
- * but not send pause frames).
- * 2: Tx flow control is enabled (we can send pause frames but
- * we do not support receiving pause frames).
- * 3: Both Rx and Tx flow control (symmetric) are enabled.
- * 4: Priority Flow Control is enabled.
- * other: Invalid.
+ * AN should have completed when the cable was plugged in.
+ * Look for reasons to bail out. Bail out if:
+ * - FC autoneg is disabled, or if
+ * - we don't have multispeed fiber, or if
+ * - we're not running at 1G, or if
+ * - link is not up, or if
+ * - link is up but AN did not complete, or if
+ * - link is up and AN completed but timed out
+ *
+ * Since we're being called from an LSC, link is already know to be up.
+ * So use link_up_wait_to_complete=false.
*/
- switch (hw->fc.current_mode) {
- case ixgbe_fc_none:
- /* Flow control completely disabled by software override. */
- reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
- break;
- case ixgbe_fc_rx_pause:
- /*
- * Rx Flow control is enabled and Tx Flow control is
- * disabled by software override. Since there really
- * isn't a way to advertise that we are capable of RX
- * Pause ONLY, we will advertise that we support both
- * symmetric and asymmetric Rx PAUSE. Later, we will
- * disable the adapter's ability to send PAUSE frames.
- */
- reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
- break;
- case ixgbe_fc_tx_pause:
- /*
- * Tx Flow control is enabled, and Rx Flow control is
- * disabled by software override.
- */
- reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
- reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
- break;
- case ixgbe_fc_full:
- /* Flow control (both Rx and Tx) is enabled by SW override. */
- reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
- break;
-#ifdef CONFIG_DCB
- case ixgbe_fc_pfc:
- goto out;
- break;
-#endif
- default:
- hw_dbg(hw, "Flow control param set incorrectly\n");
- ret_val = -IXGBE_ERR_CONFIG;
- goto out;
- break;
- }
-
- IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
- reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
-
- /* Set PCS register for autoneg */
- /* Enable and restart autoneg */
- reg |= IXGBE_PCS1GLCTL_AN_ENABLE | IXGBE_PCS1GLCTL_AN_RESTART;
-
- /* Disable AN timeout */
- if (hw->fc.strict_ieee)
- reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
-
- hw_dbg(hw, "Configuring Autoneg; PCS_LCTL = 0x%08X\n", reg);
- IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
-
- /* See if autonegotiation has succeeded */
- hw->mac.autoneg_succeeded = 0;
- for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
- msleep(10);
- reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
- if ((reg & (IXGBE_PCS1GLSTA_LINK_OK |
- IXGBE_PCS1GLSTA_AN_COMPLETE)) ==
- (IXGBE_PCS1GLSTA_LINK_OK |
- IXGBE_PCS1GLSTA_AN_COMPLETE)) {
- if (!(reg & IXGBE_PCS1GLSTA_AN_TIMED_OUT))
- hw->mac.autoneg_succeeded = 1;
- break;
- }
- }
-
- if (!hw->mac.autoneg_succeeded) {
- /* Autoneg failed to achieve a link, so we turn fc off */
- hw->fc.current_mode = ixgbe_fc_none;
- hw_dbg(hw, "Flow Control = NONE.\n");
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
+ linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
+
+ if (hw->fc.disable_fc_autoneg ||
+ !hw->phy.multispeed_fiber ||
+ (speed != IXGBE_LINK_SPEED_1GB_FULL) ||
+ !link_up ||
+ ((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
+ ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ hw_dbg(hw, "Autoneg FC was skipped.\n");
goto out;
}
@@ -1823,21 +1802,23 @@ s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
hw_dbg(hw, "Flow Control = NONE.\n");
}
+ /* Record that current_mode is the result of a successful autoneg */
+ hw->fc.fc_was_autonegged = true;
+
out:
return ret_val;
}
/**
- * ixgbe_setup_fc_generic - Set up flow control
+ * ixgbe_setup_fc - Set up flow control
* @hw: pointer to hardware structure
*
- * Sets up flow control.
+ * Called at init time to set up flow control.
**/
-s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
+s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
{
s32 ret_val = 0;
- ixgbe_link_speed speed;
- bool link_up;
+ u32 reg;
#ifdef CONFIG_DCB
if (hw->fc.requested_mode == ixgbe_fc_pfc) {
@@ -1866,7 +1847,7 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
/*
* Validate the requested mode. Strict IEEE mode does not allow
- * ixgbe_fc_rx_pause because it will cause testing anomalies.
+ * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
*/
if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict "
@@ -1883,21 +1864,77 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
hw->fc.requested_mode = ixgbe_fc_full;
/*
- * Save off the requested flow control mode for use later. Depending
- * on the link partner's capabilities, we may or may not use this mode.
+ * Set up the 1G flow control advertisement registers so the HW will be
+ * able to do fc autoneg once the cable is plugged in. If we end up
+ * using 10g instead, this is harmless.
*/
- hw->fc.current_mode = hw->fc.requested_mode;
-
- /* Decide whether to use autoneg or not. */
- hw->mac.ops.check_link(hw, &speed, &link_up, false);
- if (!hw->fc.disable_fc_autoneg && hw->phy.multispeed_fiber &&
- (speed == IXGBE_LINK_SPEED_1GB_FULL))
- ret_val = ixgbe_fc_autoneg(hw);
+ reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
- if (ret_val)
+ /*
+ * The possible values of fc.requested_mode are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames,
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but
+ * we do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+#ifdef CONFIG_DCB
+ * 4: Priority Flow Control is enabled.
+#endif
+ * other: Invalid.
+ */
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_none:
+ /* Flow control completely disabled by software override. */
+ reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+ break;
+ case ixgbe_fc_rx_pause:
+ /*
+ * Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+ break;
+ case ixgbe_fc_tx_pause:
+ /*
+ * Tx Flow control is enabled, and Rx Flow control is
+ * disabled by software override.
+ */
+ reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
+ reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
+ break;
+ case ixgbe_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by SW override. */
+ reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+ break;
+#ifdef CONFIG_DCB
+ case ixgbe_fc_pfc:
+ goto out;
+ break;
+#endif /* CONFIG_DCB */
+ default:
+ hw_dbg(hw, "Flow control param set incorrectly\n");
+ ret_val = -IXGBE_ERR_CONFIG;
goto out;
+ break;
+ }
- ret_val = ixgbe_fc_enable(hw, packetbuf_num);
+ IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
+ reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
+
+ /* Enable and restart autoneg to inform the link partner */
+ reg |= IXGBE_PCS1GLCTL_AN_ENABLE | IXGBE_PCS1GLCTL_AN_RESTART;
+
+ /* Disable AN timeout */
+ if (hw->fc.strict_ieee)
+ reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
+
+ IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
+ hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
out:
return ret_val;
@@ -2044,6 +2081,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
hw->mac.ops.check_link(hw, &speed, &link_up, false);
if (!link_up) {
+ autoc_reg |= IXGBE_AUTOC_AN_RESTART;
autoc_reg |= IXGBE_AUTOC_FLU;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
msleep(10);
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index dd260890ad0..0d34d4d8244 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -59,13 +59,13 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
u32 mc_addr_count,
ixgbe_mc_addr_itr func);
-s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
- u32 addr_count, ixgbe_mc_addr_itr func);
+s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
+ struct list_head *uc_list);
s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
-s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw, s32 packetbuf_num);
-s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packtetbuf_num);
+s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
+s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packtetbuf_num);
s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw);
s32 ixgbe_validate_mac_addr(u8 *mac_addr);
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c
index 62206273d88..f30263898eb 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c
@@ -294,6 +294,9 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw,
u32 reg, rx_pba_size;
u8 i;
+ if (!dcb_config->pfc_mode_enable)
+ goto out;
+
/* Enable Transmit Priority Flow Control */
reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
reg &= ~IXGBE_RMCS_TFCE_802_3X;
@@ -341,6 +344,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw,
/* Configure flow control refresh threshold value */
IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400);
+out:
return 0;
}
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c
index f4417fc3b0f..589f62c7062 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c
@@ -295,7 +295,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
/* If PFC is disabled globally then fall back to LFC. */
if (!dcb_config->pfc_mode_enable) {
for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
- hw->mac.ops.setup_fc(hw, i);
+ hw->mac.ops.fc_enable(hw, i);
goto out;
}
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index bd0a0c27695..d56890f5c9d 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -28,15 +28,22 @@
#include "ixgbe.h"
#include <linux/dcbnl.h>
+#include "ixgbe_dcb_82598.h"
+#include "ixgbe_dcb_82599.h"
/* Callbacks for DCB netlink in the kernel */
#define BIT_DCB_MODE 0x01
#define BIT_PFC 0x02
#define BIT_PG_RX 0x04
#define BIT_PG_TX 0x08
-#define BIT_BCN 0x10
+#define BIT_RESETLINK 0x40
#define BIT_LINKSPEED 0x80
+/* Responses for the DCB_C_SET_ALL command */
+#define DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */
+#define DCB_NO_HW_CHG 1 /* DCB configuration did not change */
+#define DCB_HW_CHG 2 /* DCB configuration changed, no reset */
+
int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max)
{
@@ -124,15 +131,12 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev);
- ixgbe_reset_interrupt_capability(adapter);
- ixgbe_napi_del_all(adapter);
- INIT_LIST_HEAD(&netdev->napi_list);
- kfree(adapter->tx_ring);
- kfree(adapter->rx_ring);
- adapter->tx_ring = NULL;
- adapter->rx_ring = NULL;
+ ixgbe_clear_interrupt_scheme(adapter);
- adapter->hw.fc.requested_mode = ixgbe_fc_pfc;
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ adapter->last_lfc_mode = adapter->hw.fc.current_mode;
+ adapter->hw.fc.requested_mode = ixgbe_fc_none;
+ }
adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
ixgbe_init_interrupt_scheme(adapter);
@@ -141,17 +145,13 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
} else {
/* Turn off DCB */
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- adapter->hw.fc.requested_mode = ixgbe_fc_default;
if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev);
- ixgbe_reset_interrupt_capability(adapter);
- ixgbe_napi_del_all(adapter);
- INIT_LIST_HEAD(&netdev->napi_list);
- kfree(adapter->tx_ring);
- kfree(adapter->rx_ring);
- adapter->tx_ring = NULL;
- adapter->rx_ring = NULL;
+ ixgbe_clear_interrupt_scheme(adapter);
+ adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
+ adapter->temp_dcb_cfg.pfc_mode_enable = false;
+ adapter->dcb_cfg.pfc_mode_enable = false;
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
ixgbe_init_interrupt_scheme(adapter);
@@ -167,10 +167,15 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
u8 *perm_addr)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- int i;
+ int i, j;
for (i = 0; i < netdev->addr_len; i++)
perm_addr[i] = adapter->hw.mac.perm_addr[i];
+
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ for (j = 0; j < netdev->addr_len; j++, i++)
+ perm_addr[i] = adapter->hw.mac.san_addr[j];
+ }
}
static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
@@ -197,8 +202,10 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
(adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent !=
adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) ||
(adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
- adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap))
+ adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)) {
adapter->dcb_set_bitmap |= BIT_PG_TX;
+ adapter->dcb_set_bitmap |= BIT_RESETLINK;
+ }
}
static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
@@ -209,8 +216,10 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct;
if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] !=
- adapter->dcb_cfg.bw_percentage[0][bwg_id])
+ adapter->dcb_cfg.bw_percentage[0][bwg_id]) {
adapter->dcb_set_bitmap |= BIT_PG_RX;
+ adapter->dcb_set_bitmap |= BIT_RESETLINK;
+ }
}
static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
@@ -237,8 +246,10 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
(adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent !=
adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) ||
(adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
- adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap))
+ adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap)) {
adapter->dcb_set_bitmap |= BIT_PG_RX;
+ adapter->dcb_set_bitmap |= BIT_RESETLINK;
+ }
}
static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
@@ -249,8 +260,10 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct;
if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] !=
- adapter->dcb_cfg.bw_percentage[1][bwg_id])
+ adapter->dcb_cfg.bw_percentage[1][bwg_id]) {
adapter->dcb_set_bitmap |= BIT_PG_RX;
+ adapter->dcb_set_bitmap |= BIT_RESETLINK;
+ }
}
static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
@@ -319,28 +332,60 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
struct ixgbe_adapter *adapter = netdev_priv(netdev);
int ret;
- adapter->dcb_set_bitmap &= ~BIT_BCN; /* no set for BCN */
if (!adapter->dcb_set_bitmap)
- return 1;
+ return DCB_NO_HW_CHG;
- while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
- msleep(1);
+ /*
+ * Only take down the adapter if the configuration change
+ * requires a reset.
+ */
+ if (adapter->dcb_set_bitmap & BIT_RESETLINK) {
+ while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
+ msleep(1);
- if (netif_running(netdev))
- ixgbe_down(adapter);
+ if (netif_running(netdev))
+ ixgbe_down(adapter);
+ }
ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
adapter->ring_feature[RING_F_DCB].indices);
if (ret) {
- clear_bit(__IXGBE_RESETTING, &adapter->state);
- return ret;
+ if (adapter->dcb_set_bitmap & BIT_RESETLINK)
+ clear_bit(__IXGBE_RESETTING, &adapter->state);
+ return DCB_NO_HW_CHG;
+ }
+
+ if (adapter->dcb_cfg.pfc_mode_enable) {
+ if ((adapter->hw.mac.type != ixgbe_mac_82598EB) &&
+ (adapter->hw.fc.current_mode != ixgbe_fc_pfc))
+ adapter->last_lfc_mode = adapter->hw.fc.current_mode;
+ adapter->hw.fc.requested_mode = ixgbe_fc_pfc;
+ } else {
+ if (adapter->hw.mac.type != ixgbe_mac_82598EB)
+ adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
+ else
+ adapter->hw.fc.requested_mode = ixgbe_fc_none;
}
- if (netif_running(netdev))
- ixgbe_up(adapter);
+ if (adapter->dcb_set_bitmap & BIT_RESETLINK) {
+ if (netif_running(netdev))
+ ixgbe_up(adapter);
+ ret = DCB_HW_CHG_RST;
+ } else if (adapter->dcb_set_bitmap & BIT_PFC) {
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ ixgbe_dcb_config_pfc_82598(&adapter->hw,
+ &adapter->dcb_cfg);
+ else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+ ixgbe_dcb_config_pfc_82599(&adapter->hw,
+ &adapter->dcb_cfg);
+ ret = DCB_HW_CHG;
+ }
+ if (adapter->dcb_cfg.pfc_mode_enable)
+ adapter->hw.fc.current_mode = ixgbe_fc_pfc;
+ if (adapter->dcb_set_bitmap & BIT_RESETLINK)
+ clear_bit(__IXGBE_RESETTING, &adapter->state);
adapter->dcb_set_bitmap = 0x00;
- clear_bit(__IXGBE_RESETTING, &adapter->state);
return ret;
}
@@ -416,11 +461,17 @@ static u8 ixgbe_dcbnl_getpfcstate(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED);
+ return adapter->dcb_cfg.pfc_mode_enable;
}
static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ adapter->temp_dcb_cfg.pfc_mode_enable = state;
+ if (adapter->temp_dcb_cfg.pfc_mode_enable !=
+ adapter->dcb_cfg.pfc_mode_enable)
+ adapter->dcb_set_bitmap |= BIT_PFC;
return;
}
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index f0a20facc65..86f4f3e36f2 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -67,6 +67,9 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
{"rx_over_errors", IXGBE_STAT(net_stats.rx_over_errors)},
{"rx_crc_errors", IXGBE_STAT(net_stats.rx_crc_errors)},
{"rx_frame_errors", IXGBE_STAT(net_stats.rx_frame_errors)},
+ {"hw_rsc_count", IXGBE_STAT(rsc_count)},
+ {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
+ {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
{"rx_fifo_errors", IXGBE_STAT(net_stats.rx_fifo_errors)},
{"rx_missed_errors", IXGBE_STAT(net_stats.rx_missed_errors)},
{"tx_aborted_errors", IXGBE_STAT(net_stats.tx_aborted_errors)},
@@ -90,6 +93,14 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
{"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
{"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
{"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
+#ifdef IXGBE_FCOE
+ {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
+ {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
+ {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)},
+ {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)},
+ {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)},
+ {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)},
+#endif /* IXGBE_FCOE */
};
#define IXGBE_QUEUE_STATS_LEN \
@@ -109,6 +120,13 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
IXGBE_PB_STATS_LEN + \
IXGBE_QUEUE_STATS_LEN)
+static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register test (offline)", "Eeprom test (offline)",
+ "Interrupt test (offline)", "Loopback test (offline)",
+ "Link test (on/offline)"
+};
+#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
+
static int ixgbe_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
@@ -120,11 +138,12 @@ static int ixgbe_get_settings(struct net_device *netdev,
ecmd->supported = SUPPORTED_10000baseT_Full;
ecmd->autoneg = AUTONEG_ENABLE;
ecmd->transceiver = XCVR_EXTERNAL;
- if (hw->phy.media_type == ixgbe_media_type_copper) {
+ if ((hw->phy.media_type == ixgbe_media_type_copper) ||
+ (hw->mac.type == ixgbe_mac_82599EB)) {
ecmd->supported |= (SUPPORTED_1000baseT_Full |
- SUPPORTED_TP | SUPPORTED_Autoneg);
+ SUPPORTED_Autoneg);
- ecmd->advertising = (ADVERTISED_TP | ADVERTISED_Autoneg);
+ ecmd->advertising = ADVERTISED_Autoneg;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
ecmd->advertising |= ADVERTISED_10000baseT_Full;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
@@ -139,7 +158,15 @@ static int ixgbe_get_settings(struct net_device *netdev,
ecmd->advertising |= (ADVERTISED_10000baseT_Full |
ADVERTISED_1000baseT_Full);
- ecmd->port = PORT_TP;
+ if (hw->phy.media_type == ixgbe_media_type_copper) {
+ ecmd->supported |= SUPPORTED_TP;
+ ecmd->advertising |= ADVERTISED_TP;
+ ecmd->port = PORT_TP;
+ } else {
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->port = PORT_FIBRE;
+ }
} else if (hw->phy.media_type == ixgbe_media_type_backplane) {
/* Set as FIBRE until SERDES defined in kernel */
switch (hw->device_id) {
@@ -187,16 +214,10 @@ static int ixgbe_set_settings(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 advertised, old;
- s32 err;
+ s32 err = 0;
- switch (hw->phy.media_type) {
- case ixgbe_media_type_fiber:
- if ((ecmd->autoneg == AUTONEG_ENABLE) ||
- (ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
- return -EINVAL;
- /* in this case we currently only support 10Gb/FULL */
- break;
- case ixgbe_media_type_copper:
+ if ((hw->phy.media_type == ixgbe_media_type_copper) ||
+ (hw->mac.type == ixgbe_mac_82599EB)) {
/* 10000/copper and 1000/copper must autoneg
* this function does not support any duplex forcing, but can
* limit the advertising of the adapter to only 10000 or 1000 */
@@ -212,20 +233,23 @@ static int ixgbe_set_settings(struct net_device *netdev,
advertised |= IXGBE_LINK_SPEED_1GB_FULL;
if (old == advertised)
- break;
+ return err;
/* this sets the link speed and restarts auto-neg */
+ hw->mac.autotry_restart = true;
err = hw->mac.ops.setup_link_speed(hw, advertised, true, true);
if (err) {
DPRINTK(PROBE, INFO,
"setup link failed with code %d\n", err);
hw->mac.ops.setup_link_speed(hw, old, true, true);
}
- break;
- default:
- break;
+ } else {
+ /* in this case we currently only support 10Gb/FULL */
+ if ((ecmd->autoneg == AUTONEG_ENABLE) ||
+ (ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
+ return -EINVAL;
}
- return 0;
+ return err;
}
static void ixgbe_get_pauseparam(struct net_device *netdev,
@@ -245,6 +269,13 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
else
pause->autoneg = 1;
+#ifdef CONFIG_DCB
+ if (hw->fc.current_mode == ixgbe_fc_pfc) {
+ pause->rx_pause = 0;
+ pause->tx_pause = 0;
+ }
+
+#endif
if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
pause->rx_pause = 1;
} else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
@@ -260,24 +291,46 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_fc_info fc;
+
+#ifdef CONFIG_DCB
+ if (adapter->dcb_cfg.pfc_mode_enable ||
+ ((hw->mac.type == ixgbe_mac_82598EB) &&
+ (adapter->flags & IXGBE_FLAG_DCB_ENABLED)))
+ return -EINVAL;
+
+#endif
+
+ fc = hw->fc;
if (pause->autoneg != AUTONEG_ENABLE)
- hw->fc.disable_fc_autoneg = true;
+ fc.disable_fc_autoneg = true;
else
- hw->fc.disable_fc_autoneg = false;
+ fc.disable_fc_autoneg = false;
if (pause->rx_pause && pause->tx_pause)
- hw->fc.requested_mode = ixgbe_fc_full;
+ fc.requested_mode = ixgbe_fc_full;
else if (pause->rx_pause && !pause->tx_pause)
- hw->fc.requested_mode = ixgbe_fc_rx_pause;
+ fc.requested_mode = ixgbe_fc_rx_pause;
else if (!pause->rx_pause && pause->tx_pause)
- hw->fc.requested_mode = ixgbe_fc_tx_pause;
+ fc.requested_mode = ixgbe_fc_tx_pause;
else if (!pause->rx_pause && !pause->tx_pause)
- hw->fc.requested_mode = ixgbe_fc_none;
+ fc.requested_mode = ixgbe_fc_none;
else
return -EINVAL;
- hw->mac.ops.setup_fc(hw, 0);
+#ifdef CONFIG_DCB
+ adapter->last_lfc_mode = fc.requested_mode;
+#endif
+
+ /* if the thing changed then we'll update and use new autoneg */
+ if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
+ hw->fc = fc;
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
+ else
+ ixgbe_reset(adapter);
+ }
return 0;
}
@@ -311,10 +364,17 @@ static u32 ixgbe_get_tx_csum(struct net_device *netdev)
static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
{
- if (data)
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ if (data) {
netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
- else
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+ netdev->features |= NETIF_F_SCTP_CSUM;
+ } else {
netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+ netdev->features &= ~NETIF_F_SCTP_CSUM;
+ }
return 0;
}
@@ -710,6 +770,7 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
strncpy(drvinfo->fw_version, firmware_version, 32);
strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
drvinfo->n_stats = IXGBE_STATS_LEN;
+ drvinfo->testinfo_len = IXGBE_TEST_LEN;
drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
}
@@ -781,7 +842,6 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
}
goto err_setup;
}
- temp_tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
}
need_update = true;
}
@@ -811,7 +871,6 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
}
goto err_setup;
}
- temp_rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
}
need_update = true;
}
@@ -851,6 +910,8 @@ err_setup:
static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
{
switch (sset) {
+ case ETH_SS_TEST:
+ return IXGBE_TEST_LEN;
case ETH_SS_STATS:
return IXGBE_STATS_LEN;
default:
@@ -905,6 +966,10 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
int i;
switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *ixgbe_gstrings_test,
+ IXGBE_TEST_LEN * ETH_GSTRING_LEN);
+ break;
case ETH_SS_STATS:
for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
memcpy(p, ixgbe_gstrings_stats[i].stat_string,
@@ -942,6 +1007,815 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
}
}
+static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool link_up;
+ u32 link_speed = 0;
+ *data = 0;
+
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
+ if (link_up)
+ return *data;
+ else
+ *data = 1;
+ return *data;
+}
+
+/* ethtool register test data */
+struct ixgbe_reg_test {
+ u16 reg;
+ u8 array_len;
+ u8 test_type;
+ u32 mask;
+ u32 write;
+};
+
+/* In the hardware, registers are laid out either singly, in arrays
+ * spaced 0x40 bytes apart, or in contiguous tables. We assume
+ * most tests take place on arrays or single registers (handled
+ * as a single-element array) and special-case the tables.
+ * Table tests are always pattern tests.
+ *
+ * We also make provision for some required setup steps by specifying
+ * registers to be written without any read-back testing.
+ */
+
+#define PATTERN_TEST 1
+#define SET_READ_TEST 2
+#define WRITE_NO_TEST 3
+#define TABLE32_TEST 4
+#define TABLE64_TEST_LO 5
+#define TABLE64_TEST_HI 6
+
+/* default 82599 register test */
+static struct ixgbe_reg_test reg_test_82599[] = {
+ { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+ { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+ { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
+ { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
+ { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
+ { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
+ { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+ { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
+ { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
+ { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
+ { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0, 0, 0, 0 }
+};
+
+/* default 82598 register test */
+static struct ixgbe_reg_test reg_test_82598[] = {
+ { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+ { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+ { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
+ { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ /* Enable all four RX queues before testing. */
+ { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
+ /* RDH is read-only for 82598, only test RDT. */
+ { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
+ { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+ { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
+ { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
+ { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
+ { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
+ { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0, 0, 0, 0 }
+};
+
+#define REG_PATTERN_TEST(R, M, W) \
+{ \
+ u32 pat, val, before; \
+ const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
+ for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { \
+ before = readl(adapter->hw.hw_addr + R); \
+ writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \
+ val = readl(adapter->hw.hw_addr + R); \
+ if (val != (_test[pat] & W & M)) { \
+ DPRINTK(DRV, ERR, "pattern test reg %04X failed: got "\
+ "0x%08X expected 0x%08X\n", \
+ R, val, (_test[pat] & W & M)); \
+ *data = R; \
+ writel(before, adapter->hw.hw_addr + R); \
+ return 1; \
+ } \
+ writel(before, adapter->hw.hw_addr + R); \
+ } \
+}
+
+#define REG_SET_AND_CHECK(R, M, W) \
+{ \
+ u32 val, before; \
+ before = readl(adapter->hw.hw_addr + R); \
+ writel((W & M), (adapter->hw.hw_addr + R)); \
+ val = readl(adapter->hw.hw_addr + R); \
+ if ((W & M) != (val & M)) { \
+ DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\
+ "expected 0x%08X\n", R, (val & M), (W & M)); \
+ *data = R; \
+ writel(before, (adapter->hw.hw_addr + R)); \
+ return 1; \
+ } \
+ writel(before, (adapter->hw.hw_addr + R)); \
+}
+
+static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
+{
+ struct ixgbe_reg_test *test;
+ u32 value, before, after;
+ u32 i, toggle;
+
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ toggle = 0x7FFFF30F;
+ test = reg_test_82599;
+ } else {
+ toggle = 0x7FFFF3FF;
+ test = reg_test_82598;
+ }
+
+ /*
+ * Because the status register is such a special case,
+ * we handle it separately from the rest of the register
+ * tests. Some bits are read-only, some toggle, and some
+ * are writeable on newer MACs.
+ */
+ before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS);
+ value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle);
+ after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle;
+ if (value != after) {
+ DPRINTK(DRV, ERR, "failed STATUS register test got: "
+ "0x%08X expected: 0x%08X\n", after, value);
+ *data = 1;
+ return 1;
+ }
+ /* restore previous status */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, before);
+
+ /*
+ * Perform the remainder of the register test, looping through
+ * the test table until we either fail or reach the null entry.
+ */
+ while (test->reg) {
+ for (i = 0; i < test->array_len; i++) {
+ switch (test->test_type) {
+ case PATTERN_TEST:
+ REG_PATTERN_TEST(test->reg + (i * 0x40),
+ test->mask,
+ test->write);
+ break;
+ case SET_READ_TEST:
+ REG_SET_AND_CHECK(test->reg + (i * 0x40),
+ test->mask,
+ test->write);
+ break;
+ case WRITE_NO_TEST:
+ writel(test->write,
+ (adapter->hw.hw_addr + test->reg)
+ + (i * 0x40));
+ break;
+ case TABLE32_TEST:
+ REG_PATTERN_TEST(test->reg + (i * 4),
+ test->mask,
+ test->write);
+ break;
+ case TABLE64_TEST_LO:
+ REG_PATTERN_TEST(test->reg + (i * 8),
+ test->mask,
+ test->write);
+ break;
+ case TABLE64_TEST_HI:
+ REG_PATTERN_TEST((test->reg + 4) + (i * 8),
+ test->mask,
+ test->write);
+ break;
+ }
+ }
+ test++;
+ }
+
+ *data = 0;
+ return 0;
+}
+
+static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ if (hw->eeprom.ops.validate_checksum(hw, NULL))
+ *data = 1;
+ else
+ *data = 0;
+ return *data;
+}
+
+static irqreturn_t ixgbe_test_intr(int irq, void *data)
+{
+ struct net_device *netdev = (struct net_device *) data;
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
+
+ return IRQ_HANDLED;
+}
+
+static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
+{
+ struct net_device *netdev = adapter->netdev;
+ u32 mask, i = 0, shared_int = true;
+ u32 irq = adapter->pdev->irq;
+
+ *data = 0;
+
+ /* Hook up test interrupt handler just for this test */
+ if (adapter->msix_entries) {
+ /* NOTE: we don't test MSI-X interrupts here, yet */
+ return 0;
+ } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
+ shared_int = false;
+ if (request_irq(irq, &ixgbe_test_intr, 0, netdev->name,
+ netdev)) {
+ *data = 1;
+ return -1;
+ }
+ } else if (!request_irq(irq, &ixgbe_test_intr, IRQF_PROBE_SHARED,
+ netdev->name, netdev)) {
+ shared_int = false;
+ } else if (request_irq(irq, &ixgbe_test_intr, IRQF_SHARED,
+ netdev->name, netdev)) {
+ *data = 1;
+ return -1;
+ }
+ DPRINTK(HW, INFO, "testing %s interrupt\n",
+ (shared_int ? "shared" : "unshared"));
+
+ /* Disable all the interrupts */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
+ msleep(10);
+
+ /* Test each interrupt */
+ for (; i < 10; i++) {
+ /* Interrupt to test */
+ mask = 1 << i;
+
+ if (!shared_int) {
+ /*
+ * Disable the interrupts to be reported in
+ * the cause register and then force the same
+ * interrupt and see if one gets posted. If
+ * an interrupt was posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
+ ~mask & 0x00007FFF);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
+ ~mask & 0x00007FFF);
+ msleep(10);
+
+ if (adapter->test_icr & mask) {
+ *data = 3;
+ break;
+ }
+ }
+
+ /*
+ * Enable the interrupt to be reported in the cause
+ * register and then force the same interrupt and see
+ * if one gets posted. If an interrupt was not posted
+ * to the bus, the test failed.
+ */
+ adapter->test_icr = 0;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
+ msleep(10);
+
+ if (!(adapter->test_icr &mask)) {
+ *data = 4;
+ break;
+ }
+
+ if (!shared_int) {
+ /*
+ * Disable the other interrupts to be reported in
+ * the cause register and then force the other
+ * interrupts and see if any get posted. If
+ * an interrupt was posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
+ ~mask & 0x00007FFF);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
+ ~mask & 0x00007FFF);
+ msleep(10);
+
+ if (adapter->test_icr) {
+ *data = 5;
+ break;
+ }
+ }
+ }
+
+ /* Disable all the interrupts */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
+ msleep(10);
+
+ /* Unhook test interrupt handler */
+ free_irq(irq, netdev);
+
+ return *data;
+}
+
+static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
+ struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ u32 reg_ctl;
+ int i;
+
+ /* shut down the DMA engines now so they can be reinitialized later */
+
+ /* first Rx */
+ reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ reg_ctl &= ~IXGBE_RXCTRL_RXEN;
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
+ reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(0));
+ reg_ctl &= ~IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(0), reg_ctl);
+
+ /* now Tx */
+ reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(0));
+ reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(0), reg_ctl);
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+ reg_ctl &= ~IXGBE_DMATXCTL_TE;
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
+ }
+
+ ixgbe_reset(adapter);
+
+ if (tx_ring->desc && tx_ring->tx_buffer_info) {
+ for (i = 0; i < tx_ring->count; i++) {
+ struct ixgbe_tx_buffer *buf =
+ &(tx_ring->tx_buffer_info[i]);
+ if (buf->dma)
+ pci_unmap_single(pdev, buf->dma, buf->length,
+ PCI_DMA_TODEVICE);
+ if (buf->skb)
+ dev_kfree_skb(buf->skb);
+ }
+ }
+
+ if (rx_ring->desc && rx_ring->rx_buffer_info) {
+ for (i = 0; i < rx_ring->count; i++) {
+ struct ixgbe_rx_buffer *buf =
+ &(rx_ring->rx_buffer_info[i]);
+ if (buf->dma)
+ pci_unmap_single(pdev, buf->dma,
+ IXGBE_RXBUFFER_2048,
+ PCI_DMA_FROMDEVICE);
+ if (buf->skb)
+ dev_kfree_skb(buf->skb);
+ }
+ }
+
+ if (tx_ring->desc) {
+ pci_free_consistent(pdev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
+ tx_ring->desc = NULL;
+ }
+ if (rx_ring->desc) {
+ pci_free_consistent(pdev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
+ rx_ring->desc = NULL;
+ }
+
+ kfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+ kfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+
+ return;
+}
+
+static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
+ struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ u32 rctl, reg_data;
+ int i, ret_val;
+
+ /* Setup Tx descriptor ring and Tx buffers */
+
+ if (!tx_ring->count)
+ tx_ring->count = IXGBE_DEFAULT_TXD;
+
+ tx_ring->tx_buffer_info = kcalloc(tx_ring->count,
+ sizeof(struct ixgbe_tx_buffer),
+ GFP_KERNEL);
+ if (!(tx_ring->tx_buffer_info)) {
+ ret_val = 1;
+ goto err_nomem;
+ }
+
+ tx_ring->size = tx_ring->count * sizeof(struct ixgbe_legacy_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+ if (!(tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
+ &tx_ring->dma))) {
+ ret_val = 2;
+ goto err_nomem;
+ }
+ tx_ring->next_to_use = tx_ring->next_to_clean = 0;
+
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAL(0),
+ ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAH(0),
+ ((u64) tx_ring->dma >> 32));
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDLEN(0),
+ tx_ring->count * sizeof(struct ixgbe_legacy_tx_desc));
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDH(0), 0);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), 0);
+
+ reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
+ reg_data |= IXGBE_HLREG0_TXPADEN;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
+
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
+ reg_data |= IXGBE_DMATXCTL_TE;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
+ }
+ reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(0));
+ reg_data |= IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(0), reg_data);
+
+ for (i = 0; i < tx_ring->count; i++) {
+ struct ixgbe_legacy_tx_desc *desc = IXGBE_TX_DESC(*tx_ring, i);
+ struct sk_buff *skb;
+ unsigned int size = 1024;
+
+ skb = alloc_skb(size, GFP_KERNEL);
+ if (!skb) {
+ ret_val = 3;
+ goto err_nomem;
+ }
+ skb_put(skb, size);
+ tx_ring->tx_buffer_info[i].skb = skb;
+ tx_ring->tx_buffer_info[i].length = skb->len;
+ tx_ring->tx_buffer_info[i].dma =
+ pci_map_single(pdev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ desc->buffer_addr = cpu_to_le64(tx_ring->tx_buffer_info[i].dma);
+ desc->lower.data = cpu_to_le32(skb->len);
+ desc->lower.data |= cpu_to_le32(IXGBE_TXD_CMD_EOP |
+ IXGBE_TXD_CMD_IFCS |
+ IXGBE_TXD_CMD_RS);
+ desc->upper.data = 0;
+ }
+
+ /* Setup Rx Descriptor ring and Rx buffers */
+
+ if (!rx_ring->count)
+ rx_ring->count = IXGBE_DEFAULT_RXD;
+
+ rx_ring->rx_buffer_info = kcalloc(rx_ring->count,
+ sizeof(struct ixgbe_rx_buffer),
+ GFP_KERNEL);
+ if (!(rx_ring->rx_buffer_info)) {
+ ret_val = 4;
+ goto err_nomem;
+ }
+
+ rx_ring->size = rx_ring->count * sizeof(struct ixgbe_legacy_rx_desc);
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+ if (!(rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
+ &rx_ring->dma))) {
+ ret_val = 5;
+ goto err_nomem;
+ }
+ rx_ring->next_to_use = rx_ring->next_to_clean = 0;
+
+ rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAL(0),
+ ((u64)rx_ring->dma & 0xFFFFFFFF));
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAH(0),
+ ((u64) rx_ring->dma >> 32));
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDLEN(0), rx_ring->size);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDH(0), 0);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(0), 0);
+
+ reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
+ reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_data);
+
+ reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
+ reg_data &= ~IXGBE_HLREG0_LPBK;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
+
+ reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RDRXCTL);
+#define IXGBE_RDRXCTL_RDMTS_MASK 0x00000003 /* Receive Descriptor Minimum
+ Threshold Size mask */
+ reg_data &= ~IXGBE_RDRXCTL_RDMTS_MASK;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDRXCTL, reg_data);
+
+ reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_MCSTCTRL);
+#define IXGBE_MCSTCTRL_MO_MASK 0x00000003 /* Multicast Offset mask */
+ reg_data &= ~IXGBE_MCSTCTRL_MO_MASK;
+ reg_data |= adapter->hw.mac.mc_filter_type;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_MCSTCTRL, reg_data);
+
+ reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(0));
+ reg_data |= IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data);
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ int j = adapter->rx_ring[0].reg_idx;
+ u32 k;
+ for (k = 0; k < 10; k++) {
+ if (IXGBE_READ_REG(&adapter->hw,
+ IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
+ break;
+ else
+ msleep(1);
+ }
+ }
+
+ rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
+
+ for (i = 0; i < rx_ring->count; i++) {
+ struct ixgbe_legacy_rx_desc *rx_desc =
+ IXGBE_RX_DESC(*rx_ring, i);
+ struct sk_buff *skb;
+
+ skb = alloc_skb(IXGBE_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);
+ if (!skb) {
+ ret_val = 6;
+ goto err_nomem;
+ }
+ skb_reserve(skb, NET_IP_ALIGN);
+ rx_ring->rx_buffer_info[i].skb = skb;
+ rx_ring->rx_buffer_info[i].dma =
+ pci_map_single(pdev, skb->data, IXGBE_RXBUFFER_2048,
+ PCI_DMA_FROMDEVICE);
+ rx_desc->buffer_addr =
+ cpu_to_le64(rx_ring->rx_buffer_info[i].dma);
+ memset(skb->data, 0x00, skb->len);
+ }
+
+ return 0;
+
+err_nomem:
+ ixgbe_free_desc_rings(adapter);
+ return ret_val;
+}
+
+static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 reg_data;
+
+ /* right now we only support MAC loopback in the driver */
+
+ /* Setup MAC loopback */
+ reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
+ reg_data |= IXGBE_HLREG0_LPBK;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
+
+ reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_AUTOC);
+ reg_data &= ~IXGBE_AUTOC_LMS_MASK;
+ reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data);
+
+ /* Disable Atlas Tx lanes; re-enabled in reset path */
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ u8 atlas;
+
+ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
+ atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
+ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
+
+ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
+ atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
+ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
+
+ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
+ atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
+ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
+
+ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
+ atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
+ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
+ }
+
+ return 0;
+}
+
+static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
+{
+ u32 reg_data;
+
+ reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
+ reg_data &= ~IXGBE_HLREG0_LPBK;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
+}
+
+static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
+ unsigned int frame_size)
+{
+ memset(skb->data, 0xFF, frame_size);
+ frame_size &= ~1;
+ memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
+ memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
+ memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
+}
+
+static int ixgbe_check_lbtest_frame(struct sk_buff *skb,
+ unsigned int frame_size)
+{
+ frame_size &= ~1;
+ if (*(skb->data + 3) == 0xFF) {
+ if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
+ (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
+ return 0;
+ }
+ }
+ return 13;
+}
+
+static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
+ struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ int i, j, k, l, lc, good_cnt, ret_val = 0;
+ unsigned long time;
+
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(0), rx_ring->count - 1);
+
+ /*
+ * Calculate the loop count based on the largest descriptor ring
+ * The idea is to wrap the largest ring a number of times using 64
+ * send/receive pairs during each loop
+ */
+
+ if (rx_ring->count <= tx_ring->count)
+ lc = ((tx_ring->count / 64) * 2) + 1;
+ else
+ lc = ((rx_ring->count / 64) * 2) + 1;
+
+ k = l = 0;
+ for (j = 0; j <= lc; j++) {
+ for (i = 0; i < 64; i++) {
+ ixgbe_create_lbtest_frame(
+ tx_ring->tx_buffer_info[k].skb,
+ 1024);
+ pci_dma_sync_single_for_device(pdev,
+ tx_ring->tx_buffer_info[k].dma,
+ tx_ring->tx_buffer_info[k].length,
+ PCI_DMA_TODEVICE);
+ if (unlikely(++k == tx_ring->count))
+ k = 0;
+ }
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), k);
+ msleep(200);
+ /* set the start time for the receive */
+ time = jiffies;
+ good_cnt = 0;
+ do {
+ /* receive the sent packets */
+ pci_dma_sync_single_for_cpu(pdev,
+ rx_ring->rx_buffer_info[l].dma,
+ IXGBE_RXBUFFER_2048,
+ PCI_DMA_FROMDEVICE);
+ ret_val = ixgbe_check_lbtest_frame(
+ rx_ring->rx_buffer_info[l].skb, 1024);
+ if (!ret_val)
+ good_cnt++;
+ if (++l == rx_ring->count)
+ l = 0;
+ /*
+ * time + 20 msecs (200 msecs on 2.4) is more than
+ * enough time to complete the receives, if it's
+ * exceeded, break and error off
+ */
+ } while (good_cnt < 64 && jiffies < (time + 20));
+ if (good_cnt != 64) {
+ /* ret_val is the same as mis-compare */
+ ret_val = 13;
+ break;
+ }
+ if (jiffies >= (time + 20)) {
+ /* Error code for time out error */
+ ret_val = 14;
+ break;
+ }
+ }
+
+ return ret_val;
+}
+
+static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
+{
+ *data = ixgbe_setup_desc_rings(adapter);
+ if (*data)
+ goto out;
+ *data = ixgbe_setup_loopback_test(adapter);
+ if (*data)
+ goto err_loopback;
+ *data = ixgbe_run_loopback_test(adapter);
+ ixgbe_loopback_cleanup(adapter);
+
+err_loopback:
+ ixgbe_free_desc_rings(adapter);
+out:
+ return *data;
+}
+
+static void ixgbe_diag_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ bool if_running = netif_running(netdev);
+
+ set_bit(__IXGBE_TESTING, &adapter->state);
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ /* Offline tests */
+
+ DPRINTK(HW, INFO, "offline testing starting\n");
+
+ /* Link test performed before hardware reset so autoneg doesn't
+ * interfere with test result */
+ if (ixgbe_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ if (if_running)
+ /* indicate we're in test mode */
+ dev_close(netdev);
+ else
+ ixgbe_reset(adapter);
+
+ DPRINTK(HW, INFO, "register testing starting\n");
+ if (ixgbe_reg_test(adapter, &data[0]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ ixgbe_reset(adapter);
+ DPRINTK(HW, INFO, "eeprom testing starting\n");
+ if (ixgbe_eeprom_test(adapter, &data[1]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ ixgbe_reset(adapter);
+ DPRINTK(HW, INFO, "interrupt testing starting\n");
+ if (ixgbe_intr_test(adapter, &data[2]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ ixgbe_reset(adapter);
+ DPRINTK(HW, INFO, "loopback testing starting\n");
+ if (ixgbe_loopback_test(adapter, &data[3]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ ixgbe_reset(adapter);
+
+ clear_bit(__IXGBE_TESTING, &adapter->state);
+ if (if_running)
+ dev_open(netdev);
+ } else {
+ DPRINTK(HW, INFO, "online testing starting\n");
+ /* Online tests */
+ if (ixgbe_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* Online tests aren't run; pass by default */
+ data[0] = 0;
+ data[1] = 0;
+ data[2] = 0;
+ data[3] = 0;
+
+ clear_bit(__IXGBE_TESTING, &adapter->state);
+ }
+ msleep_interruptible(4 * 1000);
+}
static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
struct ethtool_wolinfo *wol)
@@ -1106,20 +1980,40 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
}
for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
- struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
if (q_vector->txr_count && !q_vector->rxr_count)
/* tx vector gets half the rate */
q_vector->eitr = (adapter->eitr_param >> 1);
else
/* rx only or mixed */
q_vector->eitr = adapter->eitr_param;
- ixgbe_write_eitr(adapter, i,
- EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
+ ixgbe_write_eitr(q_vector);
}
return 0;
}
+static int ixgbe_set_flags(struct net_device *netdev, u32 data)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ ethtool_op_set_flags(netdev, data);
+
+ if (!(adapter->flags & IXGBE_FLAG2_RSC_CAPABLE))
+ return 0;
+
+ /* if state changes we need to update adapter->flags and reset */
+ if ((!!(data & ETH_FLAG_LRO)) !=
+ (!!(adapter->flags & IXGBE_FLAG2_RSC_ENABLED))) {
+ adapter->flags ^= IXGBE_FLAG2_RSC_ENABLED;
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
+ else
+ ixgbe_reset(adapter);
+ }
+ return 0;
+
+}
static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_settings = ixgbe_get_settings,
@@ -1147,6 +2041,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.set_msglevel = ixgbe_set_msglevel,
.get_tso = ethtool_op_get_tso,
.set_tso = ixgbe_set_tso,
+ .self_test = ixgbe_diag_test,
.get_strings = ixgbe_get_strings,
.phys_id = ixgbe_phys_id,
.get_sset_count = ixgbe_get_sset_count,
@@ -1154,7 +2049,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_coalesce = ixgbe_get_coalesce,
.set_coalesce = ixgbe_set_coalesce,
.get_flags = ethtool_op_get_flags,
- .set_flags = ethtool_op_set_flags,
+ .set_flags = ixgbe_set_flags,
};
void ixgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
new file mode 100644
index 00000000000..3c3bf1f07b8
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -0,0 +1,556 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+#include "ixgbe.h"
+#include <linux/if_ether.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/fc/fc_fs.h>
+#include <scsi/fc/fc_fcoe.h>
+#include <scsi/libfc.h>
+#include <scsi/libfcoe.h>
+
+/**
+ * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
+ * @rx_desc: advanced rx descriptor
+ *
+ * Returns : true if it is FCoE pkt
+ */
+static inline bool ixgbe_rx_is_fcoe(union ixgbe_adv_rx_desc *rx_desc)
+{
+ u16 p;
+
+ p = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info);
+ if (p & IXGBE_RXDADV_PKTTYPE_ETQF) {
+ p &= IXGBE_RXDADV_PKTTYPE_ETQF_MASK;
+ p >>= IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT;
+ return p == IXGBE_ETQF_FILTER_FCOE;
+ }
+ return false;
+}
+
+/**
+ * ixgbe_fcoe_clear_ddp - clear the given ddp context
+ * @ddp - ptr to the ixgbe_fcoe_ddp
+ *
+ * Returns : none
+ *
+ */
+static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp)
+{
+ ddp->len = 0;
+ ddp->err = 0;
+ ddp->udl = NULL;
+ ddp->udp = 0UL;
+ ddp->sgl = NULL;
+ ddp->sgc = 0;
+}
+
+/**
+ * ixgbe_fcoe_ddp_put - free the ddp context for a given xid
+ * @netdev: the corresponding net_device
+ * @xid: the xid that corresponding ddp will be freed
+ *
+ * This is the implementation of net_device_ops.ndo_fcoe_ddp_done
+ * and it is expected to be called by ULD, i.e., FCP layer of libfc
+ * to release the corresponding ddp context when the I/O is done.
+ *
+ * Returns : data length already ddp-ed in bytes
+ */
+int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
+{
+ int len = 0;
+ struct ixgbe_fcoe *fcoe;
+ struct ixgbe_adapter *adapter;
+ struct ixgbe_fcoe_ddp *ddp;
+
+ if (!netdev)
+ goto out_ddp_put;
+
+ if (xid >= IXGBE_FCOE_DDP_MAX)
+ goto out_ddp_put;
+
+ adapter = netdev_priv(netdev);
+ fcoe = &adapter->fcoe;
+ ddp = &fcoe->ddp[xid];
+ if (!ddp->udl)
+ goto out_ddp_put;
+
+ len = ddp->len;
+ /* if there an error, force to invalidate ddp context */
+ if (ddp->err) {
+ spin_lock_bh(&fcoe->lock);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLT, 0);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLTRW,
+ (xid | IXGBE_FCFLTRW_WE));
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
+ (xid | IXGBE_FCDMARW_WE));
+ spin_unlock_bh(&fcoe->lock);
+ }
+ if (ddp->sgl)
+ pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc,
+ DMA_FROM_DEVICE);
+ pci_pool_free(fcoe->pool, ddp->udl, ddp->udp);
+ ixgbe_fcoe_clear_ddp(ddp);
+
+out_ddp_put:
+ return len;
+}
+
+/**
+ * ixgbe_fcoe_ddp_get - called to set up ddp context
+ * @netdev: the corresponding net_device
+ * @xid: the exchange id requesting ddp
+ * @sgl: the scatter-gather list for this request
+ * @sgc: the number of scatter-gather items
+ *
+ * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
+ * and is expected to be called from ULD, e.g., FCP layer of libfc
+ * to set up ddp for the corresponding xid of the given sglist for
+ * the corresponding I/O.
+ *
+ * Returns : 1 for success and 0 for no ddp
+ */
+int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
+ struct scatterlist *sgl, unsigned int sgc)
+{
+ struct ixgbe_adapter *adapter;
+ struct ixgbe_hw *hw;
+ struct ixgbe_fcoe *fcoe;
+ struct ixgbe_fcoe_ddp *ddp;
+ struct scatterlist *sg;
+ unsigned int i, j, dmacount;
+ unsigned int len;
+ static const unsigned int bufflen = 4096;
+ unsigned int firstoff = 0;
+ unsigned int lastsize;
+ unsigned int thisoff = 0;
+ unsigned int thislen = 0;
+ u32 fcbuff, fcdmarw, fcfltrw;
+ dma_addr_t addr;
+
+ if (!netdev || !sgl)
+ return 0;
+
+ adapter = netdev_priv(netdev);
+ if (xid >= IXGBE_FCOE_DDP_MAX) {
+ DPRINTK(DRV, WARNING, "xid=0x%x out-of-range\n", xid);
+ return 0;
+ }
+
+ fcoe = &adapter->fcoe;
+ if (!fcoe->pool) {
+ DPRINTK(DRV, WARNING, "xid=0x%x no ddp pool for fcoe\n", xid);
+ return 0;
+ }
+
+ ddp = &fcoe->ddp[xid];
+ if (ddp->sgl) {
+ DPRINTK(DRV, ERR, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
+ xid, ddp->sgl, ddp->sgc);
+ return 0;
+ }
+ ixgbe_fcoe_clear_ddp(ddp);
+
+ /* setup dma from scsi command sgl */
+ dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
+ if (dmacount == 0) {
+ DPRINTK(DRV, ERR, "xid 0x%x DMA map error\n", xid);
+ return 0;
+ }
+
+ /* alloc the udl from our ddp pool */
+ ddp->udl = pci_pool_alloc(fcoe->pool, GFP_KERNEL, &ddp->udp);
+ if (!ddp->udl) {
+ DPRINTK(DRV, ERR, "failed allocated ddp context\n");
+ goto out_noddp_unmap;
+ }
+ ddp->sgl = sgl;
+ ddp->sgc = sgc;
+
+ j = 0;
+ for_each_sg(sgl, sg, dmacount, i) {
+ addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ while (len) {
+ /* get the offset of length of current buffer */
+ thisoff = addr & ((dma_addr_t)bufflen - 1);
+ thislen = min((bufflen - thisoff), len);
+ /*
+ * all but the 1st buffer (j == 0)
+ * must be aligned on bufflen
+ */
+ if ((j != 0) && (thisoff))
+ goto out_noddp_free;
+ /*
+ * all but the last buffer
+ * ((i == (dmacount - 1)) && (thislen == len))
+ * must end at bufflen
+ */
+ if (((i != (dmacount - 1)) || (thislen != len))
+ && ((thislen + thisoff) != bufflen))
+ goto out_noddp_free;
+
+ ddp->udl[j] = (u64)(addr - thisoff);
+ /* only the first buffer may have none-zero offset */
+ if (j == 0)
+ firstoff = thisoff;
+ len -= thislen;
+ addr += thislen;
+ j++;
+ /* max number of buffers allowed in one DDP context */
+ if (j > IXGBE_BUFFCNT_MAX) {
+ DPRINTK(DRV, ERR, "xid=%x:%d,%d,%d:addr=%llx "
+ "not enough descriptors\n",
+ xid, i, j, dmacount, (u64)addr);
+ goto out_noddp_free;
+ }
+ }
+ }
+ /* only the last buffer may have non-full bufflen */
+ lastsize = thisoff + thislen;
+
+ fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
+ fcbuff |= (j << IXGBE_FCBUFF_BUFFCNT_SHIFT);
+ fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
+ fcbuff |= (IXGBE_FCBUFF_VALID);
+
+ fcdmarw = xid;
+ fcdmarw |= IXGBE_FCDMARW_WE;
+ fcdmarw |= (lastsize << IXGBE_FCDMARW_LASTSIZE_SHIFT);
+
+ fcfltrw = xid;
+ fcfltrw |= IXGBE_FCFLTRW_WE;
+
+ /* program DMA context */
+ hw = &adapter->hw;
+ spin_lock_bh(&fcoe->lock);
+ IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_32BIT_MASK);
+ IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32);
+ IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff);
+ IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw);
+ /* program filter context */
+ IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID);
+ IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw);
+ spin_unlock_bh(&fcoe->lock);
+
+ return 1;
+
+out_noddp_free:
+ pci_pool_free(fcoe->pool, ddp->udl, ddp->udp);
+ ixgbe_fcoe_clear_ddp(ddp);
+
+out_noddp_unmap:
+ pci_unmap_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
+ return 0;
+}
+
+/**
+ * ixgbe_fcoe_ddp - check ddp status and mark it done
+ * @adapter: ixgbe adapter
+ * @rx_desc: advanced rx descriptor
+ * @skb: the skb holding the received data
+ *
+ * This checks ddp status.
+ *
+ * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates
+ * not passing the skb to ULD, > 0 indicates is the length of data
+ * being ddped.
+ */
+int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ u16 xid;
+ u32 sterr, fceofe, fcerr, fcstat;
+ int rc = -EINVAL;
+ struct ixgbe_fcoe *fcoe;
+ struct ixgbe_fcoe_ddp *ddp;
+ struct fc_frame_header *fh;
+
+ if (!ixgbe_rx_is_fcoe(rx_desc))
+ goto ddp_out;
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ sterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ fcerr = (sterr & IXGBE_RXDADV_ERR_FCERR);
+ fceofe = (sterr & IXGBE_RXDADV_ERR_FCEOFE);
+ if (fcerr == IXGBE_FCERR_BADCRC)
+ skb->ip_summed = CHECKSUM_NONE;
+
+ skb_reset_network_header(skb);
+ skb_set_transport_header(skb, skb_network_offset(skb) +
+ sizeof(struct fcoe_hdr));
+ fh = (struct fc_frame_header *)skb_transport_header(skb);
+ xid = be16_to_cpu(fh->fh_ox_id);
+ if (xid >= IXGBE_FCOE_DDP_MAX)
+ goto ddp_out;
+
+ fcoe = &adapter->fcoe;
+ ddp = &fcoe->ddp[xid];
+ if (!ddp->udl)
+ goto ddp_out;
+
+ ddp->err = (fcerr | fceofe);
+ if (ddp->err)
+ goto ddp_out;
+
+ fcstat = (sterr & IXGBE_RXDADV_STAT_FCSTAT);
+ if (fcstat) {
+ /* update length of DDPed data */
+ ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
+ /* unmap the sg list when FCP_RSP is received */
+ if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_FCPRSP) {
+ pci_unmap_sg(adapter->pdev, ddp->sgl,
+ ddp->sgc, DMA_FROM_DEVICE);
+ ddp->sgl = NULL;
+ ddp->sgc = 0;
+ }
+ /* return 0 to bypass going to ULD for DDPed data */
+ if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_DDP)
+ rc = 0;
+ else
+ rc = ddp->len;
+ }
+
+ddp_out:
+ return rc;
+}
+
+/**
+ * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO)
+ * @adapter: ixgbe adapter
+ * @tx_ring: tx desc ring
+ * @skb: associated skb
+ * @tx_flags: tx flags
+ * @hdr_len: hdr_len to be returned
+ *
+ * This sets up large send offload for FCoE
+ *
+ * Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error
+ */
+int ixgbe_fso(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *tx_ring, struct sk_buff *skb,
+ u32 tx_flags, u8 *hdr_len)
+{
+ u8 sof, eof;
+ u32 vlan_macip_lens;
+ u32 fcoe_sof_eof;
+ u32 type_tucmd;
+ u32 mss_l4len_idx;
+ int mss = 0;
+ unsigned int i;
+ struct ixgbe_tx_buffer *tx_buffer_info;
+ struct ixgbe_adv_tx_context_desc *context_desc;
+ struct fc_frame_header *fh;
+
+ if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) {
+ DPRINTK(DRV, ERR, "Wrong gso type %d:expecting SKB_GSO_FCOE\n",
+ skb_shinfo(skb)->gso_type);
+ return -EINVAL;
+ }
+
+ /* resets the header to point fcoe/fc */
+ skb_set_network_header(skb, skb->mac_len);
+ skb_set_transport_header(skb, skb->mac_len +
+ sizeof(struct fcoe_hdr));
+
+ /* sets up SOF and ORIS */
+ fcoe_sof_eof = 0;
+ sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof;
+ switch (sof) {
+ case FC_SOF_I2:
+ fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIS;
+ break;
+ case FC_SOF_I3:
+ fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF;
+ fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIS;
+ break;
+ case FC_SOF_N2:
+ break;
+ case FC_SOF_N3:
+ fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF;
+ break;
+ default:
+ DPRINTK(DRV, WARNING, "unknown sof = 0x%x\n", sof);
+ return -EINVAL;
+ }
+
+ /* the first byte of the last dword is EOF */
+ skb_copy_bits(skb, skb->len - 4, &eof, 1);
+ /* sets up EOF and ORIE */
+ switch (eof) {
+ case FC_EOF_N:
+ fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N;
+ break;
+ case FC_EOF_T:
+ /* lso needs ORIE */
+ if (skb_is_gso(skb)) {
+ fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N;
+ fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIE;
+ } else {
+ fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T;
+ }
+ break;
+ case FC_EOF_NI:
+ fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI;
+ break;
+ case FC_EOF_A:
+ fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A;
+ break;
+ default:
+ DPRINTK(DRV, WARNING, "unknown eof = 0x%x\n", eof);
+ return -EINVAL;
+ }
+
+ /* sets up PARINC indicating data offset */
+ fh = (struct fc_frame_header *)skb_transport_header(skb);
+ if (fh->fh_f_ctl[2] & FC_FC_REL_OFF)
+ fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC;
+
+ /* hdr_len includes fc_hdr if FCoE lso is enabled */
+ *hdr_len = sizeof(struct fcoe_crc_eof);
+ if (skb_is_gso(skb))
+ *hdr_len += (skb_transport_offset(skb) +
+ sizeof(struct fc_frame_header));
+ /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
+ vlan_macip_lens = (skb_transport_offset(skb) +
+ sizeof(struct fc_frame_header));
+ vlan_macip_lens |= ((skb_transport_offset(skb) - 4)
+ << IXGBE_ADVTXD_MACLEN_SHIFT);
+ vlan_macip_lens |= (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
+
+ /* type_tycmd and mss: set TUCMD.FCoE to enable offload */
+ type_tucmd = IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT |
+ IXGBE_ADVTXT_TUCMD_FCOE;
+ if (skb_is_gso(skb))
+ mss = skb_shinfo(skb)->gso_size;
+ /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */
+ mss_l4len_idx = (mss << IXGBE_ADVTXD_MSS_SHIFT) |
+ (1 << IXGBE_ADVTXD_IDX_SHIFT);
+
+ /* write context desc */
+ i = tx_ring->next_to_use;
+ context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
+ context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+ context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
+ context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
+ context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ tx_buffer_info->time_stamp = jiffies;
+ tx_buffer_info->next_to_watch = i;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ tx_ring->next_to_use = i;
+
+ return skb_is_gso(skb);
+}
+
+/**
+ * ixgbe_configure_fcoe - configures registers for fcoe at start
+ * @adapter: ptr to ixgbe adapter
+ *
+ * This sets up FCoE related registers
+ *
+ * Returns : none
+ */
+void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
+{
+ int i, fcoe_q, fcoe_i;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+ struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
+
+ /* create the pool for ddp if not created yet */
+ if (!fcoe->pool) {
+ /* allocate ddp pool */
+ fcoe->pool = pci_pool_create("ixgbe_fcoe_ddp",
+ adapter->pdev, IXGBE_FCPTR_MAX,
+ IXGBE_FCPTR_ALIGN, PAGE_SIZE);
+ if (!fcoe->pool)
+ DPRINTK(DRV, ERR,
+ "failed to allocated FCoE DDP pool\n");
+
+ spin_lock_init(&fcoe->lock);
+ }
+
+ /* Enable L2 eth type filter for FCoE */
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE),
+ (ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN));
+ if (adapter->ring_feature[RING_F_FCOE].indices) {
+ /* Use multiple rx queues for FCoE by redirection table */
+ for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
+ fcoe_i = f->mask + i % f->indices;
+ fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
+ fcoe_q = adapter->rx_ring[fcoe_i].reg_idx;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
+ IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
+ } else {
+ /* Use single rx queue for FCoE */
+ fcoe_i = f->mask;
+ fcoe_q = adapter->rx_ring[fcoe_i].reg_idx;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE),
+ IXGBE_ETQS_QUEUE_EN |
+ (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL,
+ IXGBE_FCRXCTRL_FCOELLI |
+ IXGBE_FCRXCTRL_FCCRCBO |
+ (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
+}
+
+/**
+ * ixgbe_cleanup_fcoe - release all fcoe ddp context resources
+ * @adapter : ixgbe adapter
+ *
+ * Cleans up outstanding ddp context resources
+ *
+ * Returns : none
+ */
+void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
+{
+ int i;
+ struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+
+ /* release ddp resource */
+ if (fcoe->pool) {
+ for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
+ ixgbe_fcoe_ddp_put(adapter->netdev, i);
+ pci_pool_destroy(fcoe->pool);
+ fcoe->pool = NULL;
+ }
+}
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h
new file mode 100644
index 00000000000..c5b50026a89
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_fcoe.h
@@ -0,0 +1,67 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_FCOE_H
+#define _IXGBE_FCOE_H
+
+#include <scsi/fc/fc_fs.h>
+#include <scsi/fc/fc_fcoe.h>
+
+/* shift bits within STAT fo FCSTAT */
+#define IXGBE_RXDADV_FCSTAT_SHIFT 4
+
+/* ddp user buffer */
+#define IXGBE_BUFFCNT_MAX 256 /* 8 bits bufcnt */
+#define IXGBE_FCPTR_ALIGN 16
+#define IXGBE_FCPTR_MAX (IXGBE_BUFFCNT_MAX * sizeof(dma_addr_t))
+#define IXGBE_FCBUFF_4KB 0x0
+#define IXGBE_FCBUFF_8KB 0x1
+#define IXGBE_FCBUFF_16KB 0x2
+#define IXGBE_FCBUFF_64KB 0x3
+#define IXGBE_FCBUFF_MAX 65536 /* 64KB max */
+#define IXGBE_FCBUFF_MIN 4096 /* 4KB min */
+#define IXGBE_FCOE_DDP_MAX 512 /* 9 bits xid */
+
+/* fcerr */
+#define IXGBE_FCERR_BADCRC 0x00100000
+
+struct ixgbe_fcoe_ddp {
+ int len;
+ u32 err;
+ unsigned int sgc;
+ struct scatterlist *sgl;
+ dma_addr_t udp;
+ u64 *udl;
+};
+
+struct ixgbe_fcoe {
+ spinlock_t lock;
+ struct pci_pool *pool;
+ struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
+};
+
+#endif /* _IXGBE_FCOE_H */
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 07e778d3e5d..a551a96ce67 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -39,6 +39,7 @@
#include <net/ip6_checksum.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
+#include <scsi/fc/fc_fcoe.h>
#include "ixgbe.h"
#include "ixgbe_common.h"
@@ -47,7 +48,7 @@ char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Network Driver";
-#define DRV_VERSION "2.0.8-k2"
+#define DRV_VERSION "2.0.34-k2"
const char ixgbe_driver_version[] = DRV_VERSION;
static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation.";
@@ -89,6 +90,8 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
board_82598 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4),
board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM),
+ board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
board_82599 },
@@ -183,6 +186,22 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
}
}
+static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
+ u64 qmask)
+{
+ u32 mask;
+
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
+ } else {
+ mask = (qmask & 0xFFFFFFFF);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
+ mask = (qmask >> 32);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
+ }
+}
+
static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
struct ixgbe_tx_buffer
*tx_buffer_info)
@@ -245,14 +264,13 @@ static void ixgbe_tx_timeout(struct net_device *netdev);
/**
* ixgbe_clean_tx_irq - Reclaim resources after transmit completes
- * @adapter: board private structure
+ * @q_vector: structure containing interrupt and ring information
* @tx_ring: tx ring to clean
- *
- * returns true if transmit work is done
**/
-static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
+static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *tx_ring)
{
+ struct ixgbe_adapter *adapter = q_vector->adapter;
struct net_device *netdev = adapter->netdev;
union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
struct ixgbe_tx_buffer *tx_buffer_info;
@@ -275,12 +293,24 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
if (cleaned && skb) {
unsigned int segs, bytecount;
+ unsigned int hlen = skb_headlen(skb);
/* gso_segs is currently only valid for tcp */
segs = skb_shinfo(skb)->gso_segs ?: 1;
+#ifdef IXGBE_FCOE
+ /* adjust for FCoE Sequence Offload */
+ if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+ && (skb->protocol == htons(ETH_P_FCOE)) &&
+ skb_is_gso(skb)) {
+ hlen = skb_transport_offset(skb) +
+ sizeof(struct fc_frame_header) +
+ sizeof(struct fcoe_crc_eof);
+ segs = DIV_ROUND_UP(skb->len - hlen,
+ skb_shinfo(skb)->gso_size);
+ }
+#endif /* IXGBE_FCOE */
/* multiply data chunks by size of headers */
- bytecount = ((segs - 1) * skb_headlen(skb)) +
- skb->len;
+ bytecount = ((segs - 1) * hlen) + skb->len;
total_packets += segs;
total_bytes += bytecount;
}
@@ -327,7 +357,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
/* re-arm the interrupt */
if (count >= tx_ring->work_limit)
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->v_idx);
+ ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
tx_ring->total_bytes += total_bytes;
tx_ring->total_packets += total_packets;
@@ -398,6 +428,9 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
return;
+ /* always use CB2 mode, difference is masked in the CB driver */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
+
for (i = 0; i < adapter->num_tx_queues; i++) {
adapter->tx_ring[i].cpu = -1;
ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]);
@@ -419,9 +452,6 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
/* if we're already enabled, don't do it again */
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
break;
- /* Always use CB2 mode, difference is masked
- * in the CB driver. */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
if (dca_add_requester(dev) == 0) {
adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
ixgbe_setup_dca(adapter);
@@ -451,6 +481,7 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
**/
static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
struct sk_buff *skb, u8 status,
+ struct ixgbe_ring *ring,
union ixgbe_adv_rx_desc *rx_desc)
{
struct ixgbe_adapter *adapter = q_vector->adapter;
@@ -458,24 +489,17 @@ static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
bool is_vlan = (status & IXGBE_RXD_STAT_VP);
u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
- skb_record_rx_queue(skb, q_vector - &adapter->q_vector[0]);
- if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ skb_record_rx_queue(skb, ring->queue_index);
+ if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
if (adapter->vlgrp && is_vlan && (tag != 0))
vlan_gro_receive(napi, adapter->vlgrp, tag, skb);
else
napi_gro_receive(napi, skb);
} else {
- if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
- if (adapter->vlgrp && is_vlan && (tag != 0))
- vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag);
- else
- netif_receive_skb(skb);
- } else {
- if (adapter->vlgrp && is_vlan && (tag != 0))
- vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
- else
- netif_rx(skb);
- }
+ if (adapter->vlgrp && is_vlan && (tag != 0))
+ vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
+ else
+ netif_rx(skb);
}
}
@@ -622,6 +646,40 @@ static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
}
+static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
+{
+ return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
+ IXGBE_RXDADV_RSCCNT_MASK) >>
+ IXGBE_RXDADV_RSCCNT_SHIFT;
+}
+
+/**
+ * ixgbe_transform_rsc_queue - change rsc queue into a full packet
+ * @skb: pointer to the last skb in the rsc queue
+ *
+ * This function changes a queue full of hw rsc buffers into a completed
+ * packet. It uses the ->prev pointers to find the first packet and then
+ * turns it into the frag list owner.
+ **/
+static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
+{
+ unsigned int frag_list_size = 0;
+
+ while (skb->prev) {
+ struct sk_buff *prev = skb->prev;
+ frag_list_size += skb->len;
+ skb->prev = NULL;
+ skb = prev;
+ }
+
+ skb_shinfo(skb)->frag_list = skb->next;
+ skb->next = NULL;
+ skb->len += frag_list_size;
+ skb->data_len += frag_list_size;
+ skb->truesize += frag_list_size;
+ return skb;
+}
+
static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *rx_ring,
int *work_done, int work_to_do)
@@ -631,12 +689,15 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
struct sk_buff *skb;
- unsigned int i;
+ unsigned int i, rsc_count = 0;
u32 len, staterr;
u16 hdr_info;
bool cleaned = false;
int cleaned_count = 0;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+#ifdef IXGBE_FCOE
+ int ddp_bytes = 0;
+#endif /* IXGBE_FCOE */
i = rx_ring->next_to_clean;
rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
@@ -667,7 +728,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
prefetch(skb->data - NET_IP_ALIGN);
rx_buffer_info->skb = NULL;
- if (len && !skb_shinfo(skb)->nr_frags) {
+ if (rx_buffer_info->dma) {
pci_unmap_single(pdev, rx_buffer_info->dma,
rx_ring->rx_buf_len,
PCI_DMA_FROMDEVICE);
@@ -697,20 +758,38 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
i++;
if (i == rx_ring->count)
i = 0;
- next_buffer = &rx_ring->rx_buffer_info[i];
next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
prefetch(next_rxd);
-
cleaned_count++;
+
+ if (adapter->flags & IXGBE_FLAG2_RSC_CAPABLE)
+ rsc_count = ixgbe_get_rsc_count(rx_desc);
+
+ if (rsc_count) {
+ u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
+ IXGBE_RXDADV_NEXTP_SHIFT;
+ next_buffer = &rx_ring->rx_buffer_info[nextp];
+ rx_ring->rsc_count += (rsc_count - 1);
+ } else {
+ next_buffer = &rx_ring->rx_buffer_info[i];
+ }
+
if (staterr & IXGBE_RXD_STAT_EOP) {
+ if (skb->prev)
+ skb = ixgbe_transform_rsc_queue(skb);
rx_ring->stats.packets++;
rx_ring->stats.bytes += skb->len;
} else {
- rx_buffer_info->skb = next_buffer->skb;
- rx_buffer_info->dma = next_buffer->dma;
- next_buffer->skb = skb;
- next_buffer->dma = 0;
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ rx_buffer_info->skb = next_buffer->skb;
+ rx_buffer_info->dma = next_buffer->dma;
+ next_buffer->skb = skb;
+ next_buffer->dma = 0;
+ } else {
+ skb->next = next_buffer->skb;
+ skb->next->prev = skb;
+ }
adapter->non_eop_descs++;
goto next_desc;
}
@@ -727,7 +806,15 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
total_rx_packets++;
skb->protocol = eth_type_trans(skb, adapter->netdev);
- ixgbe_receive_skb(q_vector, skb, staterr, rx_desc);
+#ifdef IXGBE_FCOE
+ /* if ddp, not passing to ULD unless for FCP_RSP or error */
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+ ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
+ if (!ddp_bytes)
+ goto next_desc;
+ }
+#endif /* IXGBE_FCOE */
+ ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
next_desc:
rx_desc->wb.upper.status_error = 0;
@@ -740,7 +827,7 @@ next_desc:
/* use prefetched values */
rx_desc = next_rxd;
- rx_buffer_info = next_buffer;
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
}
@@ -751,6 +838,21 @@ next_desc:
if (cleaned_count)
ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+#ifdef IXGBE_FCOE
+ /* include DDPed FCoE data */
+ if (ddp_bytes > 0) {
+ unsigned int mss;
+
+ mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) -
+ sizeof(struct fc_frame_header) -
+ sizeof(struct fcoe_crc_eof);
+ if (mss > 512)
+ mss &= ~511;
+ total_rx_bytes += ddp_bytes;
+ total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
+ }
+#endif /* IXGBE_FCOE */
+
rx_ring->total_packets += total_rx_packets;
rx_ring->total_bytes += total_rx_bytes;
adapter->net_stats.rx_bytes += total_rx_bytes;
@@ -780,7 +882,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
* corresponding register.
*/
for (v_idx = 0; v_idx < q_vectors; v_idx++) {
- q_vector = &adapter->q_vector[v_idx];
+ q_vector = adapter->q_vector[v_idx];
/* XXX for_each_bit(...) */
r_idx = find_first_bit(q_vector->rxr_idx,
adapter->num_rx_queues);
@@ -810,12 +912,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
/* rx only */
q_vector->eitr = adapter->eitr_param;
- /*
- * since this is initial set up don't need to call
- * ixgbe_write_eitr helper
- */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx),
- EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
+ ixgbe_write_eitr(q_vector);
}
if (adapter->hw.mac.type == ixgbe_mac_82598EB)
@@ -900,17 +997,19 @@ update_itr_done:
/**
* ixgbe_write_eitr - write EITR register in hardware specific way
- * @adapter: pointer to adapter struct
- * @v_idx: vector index into q_vector array
- * @itr_reg: new value to be written in *register* format, not ints/s
+ * @q_vector: structure containing interrupt and ring information
*
* This function is made to be called by ethtool and by the driver
* when it needs to update EITR registers at runtime. Hardware
* specific quirks/differences are taken care of here.
*/
-void ixgbe_write_eitr(struct ixgbe_adapter *adapter, int v_idx, u32 itr_reg)
+void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
{
+ struct ixgbe_adapter *adapter = q_vector->adapter;
struct ixgbe_hw *hw = &adapter->hw;
+ int v_idx = q_vector->v_idx;
+ u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
+
if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
/* must write high and low 16 bits to reset counter */
itr_reg |= (itr_reg << 16);
@@ -929,8 +1028,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
struct ixgbe_adapter *adapter = q_vector->adapter;
u32 new_itr;
u8 current_itr, ret_itr;
- int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) /
- sizeof(struct ixgbe_q_vector);
+ int i, r_idx;
struct ixgbe_ring *rx_ring, *tx_ring;
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
@@ -980,14 +1078,13 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
}
if (new_itr != q_vector->eitr) {
- u32 itr_reg;
+ /* do an exponential smoothing */
+ new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
/* save the algorithm value here, not the smoothed one */
q_vector->eitr = new_itr;
- /* do an exponential smoothing */
- new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
- itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
- ixgbe_write_eitr(adapter, v_idx, itr_reg);
+
+ ixgbe_write_eitr(q_vector);
}
return;
@@ -1058,14 +1155,64 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
if (hw->mac.type == ixgbe_mac_82598EB)
ixgbe_check_fan_failure(adapter, eicr);
- if (hw->mac.type == ixgbe_mac_82599EB)
+ if (hw->mac.type == ixgbe_mac_82599EB) {
ixgbe_check_sfp_event(adapter, eicr);
+
+ /* Handle Flow Director Full threshold interrupt */
+ if (eicr & IXGBE_EICR_FLOW_DIR) {
+ int i;
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
+ /* Disable transmits before FDIR Re-initialization */
+ netif_tx_stop_all_queues(netdev);
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *tx_ring =
+ &adapter->tx_ring[i];
+ if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
+ &tx_ring->reinit_state))
+ schedule_work(&adapter->fdir_reinit_task);
+ }
+ }
+ }
if (!test_bit(__IXGBE_DOWN, &adapter->state))
IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
return IRQ_HANDLED;
}
+static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
+ u64 qmask)
+{
+ u32 mask;
+
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
+ } else {
+ mask = (qmask & 0xFFFFFFFF);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
+ mask = (qmask >> 32);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
+ }
+ /* skip the flush */
+}
+
+static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
+ u64 qmask)
+{
+ u32 mask;
+
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
+ } else {
+ mask = (qmask & 0xFFFFFFFF);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask);
+ mask = (qmask >> 32);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask);
+ }
+ /* skip the flush */
+}
+
static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
{
struct ixgbe_q_vector *q_vector = data;
@@ -1079,17 +1226,16 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
tx_ring = &(adapter->tx_ring[r_idx]);
-#ifdef CONFIG_IXGBE_DCA
- if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
- ixgbe_update_tx_dca(adapter, tx_ring);
-#endif
tx_ring->total_bytes = 0;
tx_ring->total_packets = 0;
- ixgbe_clean_tx_irq(adapter, tx_ring);
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
r_idx + 1);
}
+ /* disable interrupts on this vector only */
+ ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
+ napi_schedule(&q_vector->napi);
+
return IRQ_HANDLED;
}
@@ -1121,7 +1267,7 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
rx_ring = &(adapter->rx_ring[r_idx]);
/* disable interrupts on this vector only */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx);
+ ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
@@ -1129,8 +1275,36 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
{
- ixgbe_msix_clean_rx(irq, data);
- ixgbe_msix_clean_tx(irq, data);
+ struct ixgbe_q_vector *q_vector = data;
+ struct ixgbe_adapter *adapter = q_vector->adapter;
+ struct ixgbe_ring *ring;
+ int r_idx;
+ int i;
+
+ if (!q_vector->txr_count && !q_vector->rxr_count)
+ return IRQ_HANDLED;
+
+ r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+ for (i = 0; i < q_vector->txr_count; i++) {
+ ring = &(adapter->tx_ring[r_idx]);
+ ring->total_bytes = 0;
+ ring->total_packets = 0;
+ r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+ r_idx + 1);
+ }
+
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ for (i = 0; i < q_vector->rxr_count; i++) {
+ ring = &(adapter->rx_ring[r_idx]);
+ ring->total_bytes = 0;
+ ring->total_packets = 0;
+ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+ r_idx + 1);
+ }
+
+ /* disable interrupts on this vector only */
+ ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
+ napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
}
@@ -1167,29 +1341,42 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
if (adapter->itr_setting & 1)
ixgbe_set_itr_msix(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rx_ring->v_idx);
+ ixgbe_irq_enable_queues(adapter,
+ ((u64)1 << q_vector->v_idx));
}
return work_done;
}
/**
- * ixgbe_clean_rxonly_many - msix (aka one shot) rx clean routine
+ * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine
* @napi: napi struct with our devices info in it
* @budget: amount of work driver is allowed to do this pass, in packets
*
* This function will clean more than one rx queue associated with a
* q_vector.
**/
-static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
+static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
{
struct ixgbe_q_vector *q_vector =
container_of(napi, struct ixgbe_q_vector, napi);
struct ixgbe_adapter *adapter = q_vector->adapter;
- struct ixgbe_ring *rx_ring = NULL;
+ struct ixgbe_ring *ring = NULL;
int work_done = 0, i;
long r_idx;
- u16 enable_mask = 0;
+ bool tx_clean_complete = true;
+
+ r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+ for (i = 0; i < q_vector->txr_count; i++) {
+ ring = &(adapter->tx_ring[r_idx]);
+#ifdef CONFIG_IXGBE_DCA
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+ ixgbe_update_tx_dca(adapter, ring);
+#endif
+ tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
+ r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+ r_idx + 1);
+ }
/* attempt to distribute budget to each queue fairly, but don't allow
* the budget to go below 1 because we'll exit polling */
@@ -1197,47 +1384,87 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
budget = max(budget, 1);
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- rx_ring = &(adapter->rx_ring[r_idx]);
+ ring = &(adapter->rx_ring[r_idx]);
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
- ixgbe_update_rx_dca(adapter, rx_ring);
+ ixgbe_update_rx_dca(adapter, ring);
#endif
- ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
- enable_mask |= rx_ring->v_idx;
+ ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
r_idx + 1);
}
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- rx_ring = &(adapter->rx_ring[r_idx]);
+ ring = &(adapter->rx_ring[r_idx]);
/* If all Rx work done, exit the polling mode */
if (work_done < budget) {
napi_complete(napi);
if (adapter->itr_setting & 1)
ixgbe_set_itr_msix(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, enable_mask);
+ ixgbe_irq_enable_queues(adapter,
+ ((u64)1 << q_vector->v_idx));
return 0;
}
return work_done;
}
+
+/**
+ * ixgbe_clean_txonly - msix (aka one shot) tx clean routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function is optimized for cleaning one queue only on a single
+ * q_vector!!!
+ **/
+static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
+{
+ struct ixgbe_q_vector *q_vector =
+ container_of(napi, struct ixgbe_q_vector, napi);
+ struct ixgbe_adapter *adapter = q_vector->adapter;
+ struct ixgbe_ring *tx_ring = NULL;
+ int work_done = 0;
+ long r_idx;
+
+ r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+ tx_ring = &(adapter->tx_ring[r_idx]);
+#ifdef CONFIG_IXGBE_DCA
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+ ixgbe_update_tx_dca(adapter, tx_ring);
+#endif
+
+ if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
+ work_done = budget;
+
+ /* If all Rx work done, exit the polling mode */
+ if (work_done < budget) {
+ napi_complete(napi);
+ if (adapter->itr_setting & 1)
+ ixgbe_set_itr_msix(q_vector);
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
+ }
+
+ return work_done;
+}
+
static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
int r_idx)
{
- a->q_vector[v_idx].adapter = a;
- set_bit(r_idx, a->q_vector[v_idx].rxr_idx);
- a->q_vector[v_idx].rxr_count++;
- a->rx_ring[r_idx].v_idx = 1 << v_idx;
+ struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
+
+ set_bit(r_idx, q_vector->rxr_idx);
+ q_vector->rxr_count++;
}
static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
- int r_idx)
+ int t_idx)
{
- a->q_vector[v_idx].adapter = a;
- set_bit(r_idx, a->q_vector[v_idx].txr_idx);
- a->q_vector[v_idx].txr_count++;
- a->tx_ring[r_idx].v_idx = 1 << v_idx;
+ struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
+
+ set_bit(t_idx, q_vector->txr_idx);
+ q_vector->txr_count++;
}
/**
@@ -1333,7 +1560,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
(!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
&ixgbe_msix_clean_many)
for (vector = 0; vector < q_vectors; vector++) {
- handler = SET_HANDLER(&adapter->q_vector[vector]);
+ handler = SET_HANDLER(adapter->q_vector[vector]);
if(handler == &ixgbe_msix_clean_rx) {
sprintf(adapter->name[vector], "%s-%s-%d",
@@ -1349,7 +1576,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
err = request_irq(adapter->msix_entries[vector].vector,
handler, 0, adapter->name[vector],
- &(adapter->q_vector[vector]));
+ adapter->q_vector[vector]);
if (err) {
DPRINTK(PROBE, ERR,
"request_irq failed for MSIX interrupt "
@@ -1372,7 +1599,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
free_queue_irqs:
for (i = vector - 1; i >= 0; i--)
free_irq(adapter->msix_entries[--vector].vector,
- &(adapter->q_vector[i]));
+ adapter->q_vector[i]);
adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
pci_disable_msix(adapter->pdev);
kfree(adapter->msix_entries);
@@ -1383,7 +1610,7 @@ out:
static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
{
- struct ixgbe_q_vector *q_vector = adapter->q_vector;
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
u8 current_itr;
u32 new_itr = q_vector->eitr;
struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
@@ -1416,14 +1643,13 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
}
if (new_itr != q_vector->eitr) {
- u32 itr_reg;
+ /* do an exponential smoothing */
+ new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
/* save the algorithm value here, not the smoothed one */
q_vector->eitr = new_itr;
- /* do an exponential smoothing */
- new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
- itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
- ixgbe_write_eitr(adapter, 0, itr_reg);
+
+ ixgbe_write_eitr(q_vector);
}
return;
@@ -1436,7 +1662,8 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
{
u32 mask;
- mask = IXGBE_EIMS_ENABLE_MASK;
+
+ mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
mask |= IXGBE_EIMS_GPI_SDP1;
if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
@@ -1444,16 +1671,12 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
mask |= IXGBE_EIMS_GPI_SDP1;
mask |= IXGBE_EIMS_GPI_SDP2;
}
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
+ adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
+ mask |= IXGBE_EIMS_FLOW_DIR;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
- if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- /* enable the rest of the queue vectors */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1),
- (IXGBE_EIMS_RTX_QUEUE << 16));
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(2),
- ((IXGBE_EIMS_RTX_QUEUE << 16) |
- IXGBE_EIMS_RTX_QUEUE));
- }
+ ixgbe_irq_enable_queues(adapter, ~0);
IXGBE_WRITE_FLUSH(&adapter->hw);
}
@@ -1467,6 +1690,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
struct net_device *netdev = data;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
u32 eicr;
/*
@@ -1494,13 +1718,13 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
ixgbe_check_fan_failure(adapter, eicr);
- if (napi_schedule_prep(&adapter->q_vector[0].napi)) {
+ if (napi_schedule_prep(&(q_vector->napi))) {
adapter->tx_ring[0].total_packets = 0;
adapter->tx_ring[0].total_bytes = 0;
adapter->rx_ring[0].total_packets = 0;
adapter->rx_ring[0].total_bytes = 0;
/* would disable interrupts here but EIAM disabled it */
- __napi_schedule(&adapter->q_vector[0].napi);
+ __napi_schedule(&(q_vector->napi));
}
return IRQ_HANDLED;
@@ -1511,7 +1735,7 @@ static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
for (i = 0; i < q_vectors; i++) {
- struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
q_vector->rxr_count = 0;
@@ -1562,7 +1786,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
i--;
for (; i >= 0; i--) {
free_irq(adapter->msix_entries[i].vector,
- &(adapter->q_vector[i]));
+ adapter->q_vector[i]);
}
ixgbe_reset_q_vectors(adapter);
@@ -1577,10 +1801,12 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
**/
static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
{
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
- if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
+ } else {
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(2), ~0);
}
IXGBE_WRITE_FLUSH(&adapter->hw);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -1592,18 +1818,6 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
}
}
-static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter)
-{
- u32 mask = IXGBE_EIMS_RTX_QUEUE;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
- if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask << 16);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(2),
- (mask << 16 | mask));
- }
- /* skip the flush */
-}
-
/**
* ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
*
@@ -1673,11 +1887,34 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
u32 srrctl;
int queue0 = 0;
unsigned long mask;
+ struct ixgbe_ring_feature *feature = adapter->ring_feature;
if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- queue0 = index;
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+ int dcb_i = feature[RING_F_DCB].indices;
+ if (dcb_i == 8)
+ queue0 = index >> 4;
+ else if (dcb_i == 4)
+ queue0 = index >> 5;
+ else
+ dev_err(&adapter->pdev->dev, "Invalid DCB "
+ "configuration\n");
+#ifdef IXGBE_FCOE
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+ struct ixgbe_ring_feature *f;
+
+ rx_ring = &adapter->rx_ring[queue0];
+ f = &adapter->ring_feature[RING_F_FCOE];
+ if ((queue0 == 0) && (index > rx_ring->reg_idx))
+ queue0 = f->mask + index -
+ rx_ring->reg_idx - 1;
+ }
+#endif /* IXGBE_FCOE */
+ } else {
+ queue0 = index;
+ }
} else {
- mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask;
+ mask = (unsigned long) feature[RING_F_RSS].mask;
queue0 = index & mask;
index = index & mask;
}
@@ -1689,33 +1926,55 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
+ srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+ IXGBE_SRRCTL_BSIZEHDR_MASK;
+
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
- u16 bufsz = IXGBE_RXBUFFER_2048;
- /* grow the amount we can receive on large page machines */
- if (bufsz < (PAGE_SIZE / 2))
- bufsz = (PAGE_SIZE / 2);
- /* cap the bufsz at our largest descriptor size */
- bufsz = min((u16)IXGBE_MAX_RXBUFFER, bufsz);
-
- srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
+ srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+#else
+ srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+#endif
srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
- srrctl |= ((IXGBE_RX_HDR_SIZE <<
- IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
- IXGBE_SRRCTL_BSIZEHDR_MASK);
} else {
+ srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
+ IXGBE_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
-
- if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
- srrctl |= IXGBE_RXBUFFER_2048 >>
- IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- else
- srrctl |= rx_ring->rx_buf_len >>
- IXGBE_SRRCTL_BSIZEPKT_SHIFT;
}
IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
}
+static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
+{
+ u32 mrqc = 0;
+ int mask;
+
+ if (!(adapter->hw.mac.type == ixgbe_mac_82599EB))
+ return mrqc;
+
+ mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
+#ifdef CONFIG_IXGBE_DCB
+ | IXGBE_FLAG_DCB_ENABLED
+#endif
+ );
+
+ switch (mask) {
+ case (IXGBE_FLAG_RSS_ENABLED):
+ mrqc = IXGBE_MRQC_RSSEN;
+ break;
+#ifdef CONFIG_IXGBE_DCB
+ case (IXGBE_FLAG_DCB_ENABLED):
+ mrqc = IXGBE_MRQC_RT8TCEN;
+ break;
+#endif /* CONFIG_IXGBE_DCB */
+ default:
+ break;
+ }
+
+ return mrqc;
+}
+
/**
* ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
* @adapter: board private structure
@@ -1736,11 +1995,17 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
u32 fctrl, hlreg0;
u32 reta = 0, mrqc = 0;
u32 rdrxctl;
+ u32 rscctrl;
int rx_buf_len;
/* Decide whether to use packet split mode or not */
adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+#ifdef IXGBE_FCOE
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+#endif /* IXGBE_FCOE */
+
/* Set the RX buffer length according to the mode */
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
rx_buf_len = IXGBE_RX_HDR_SIZE;
@@ -1749,11 +2014,13 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
IXGBE_PSRTYPE_UDPHDR |
IXGBE_PSRTYPE_IPV4HDR |
- IXGBE_PSRTYPE_IPV6HDR;
+ IXGBE_PSRTYPE_IPV6HDR |
+ IXGBE_PSRTYPE_L2HDR;
IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
}
} else {
- if (netdev->mtu <= ETH_DATA_LEN)
+ if (!(adapter->flags & IXGBE_FLAG2_RSC_ENABLED) &&
+ (netdev->mtu <= ETH_DATA_LEN))
rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
else
rx_buf_len = ALIGN(max_frame, 1024);
@@ -1770,6 +2037,10 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
else
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
+#ifdef IXGBE_FCOE
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+ hlreg0 |= IXGBE_HLREG0_JUMBOEN;
+#endif
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
@@ -1777,8 +2048,10 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
- /* Setup the HW Rx Head and Tail Descriptor Pointers and
- * the Base and Length of the Rx Descriptor Ring */
+ /*
+ * Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring
+ */
for (i = 0; i < adapter->num_rx_queues; i++) {
rdba = adapter->rx_ring[i].dma;
j = adapter->rx_ring[i].reg_idx;
@@ -1791,6 +2064,17 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
adapter->rx_ring[i].tail = IXGBE_RDT(j);
adapter->rx_ring[i].rx_buf_len = rx_buf_len;
+#ifdef IXGBE_FCOE
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+ struct ixgbe_ring_feature *f;
+ f = &adapter->ring_feature[RING_F_FCOE];
+ if ((rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
+ (i >= f->mask) && (i < f->mask + f->indices))
+ adapter->rx_ring[i].rx_buf_len =
+ IXGBE_FCOE_JUMBO_FRAME_SIZE;
+ }
+
+#endif /* IXGBE_FCOE */
ixgbe_configure_srrctl(adapter, j);
}
@@ -1811,23 +2095,8 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
}
/* Program MRQC for the distribution of queues */
- if (hw->mac.type == ixgbe_mac_82599EB) {
- int mask = adapter->flags & (
- IXGBE_FLAG_RSS_ENABLED
- | IXGBE_FLAG_DCB_ENABLED
- );
+ mrqc = ixgbe_setup_mrqc(adapter);
- switch (mask) {
- case (IXGBE_FLAG_RSS_ENABLED):
- mrqc = IXGBE_MRQC_RSSEN;
- break;
- case (IXGBE_FLAG_DCB_ENABLED):
- mrqc = IXGBE_MRQC_RT8TCEN;
- break;
- default:
- break;
- }
- }
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
/* Fill out redirection table */
for (i = 0, j = 0; i < 128; i++, j++) {
@@ -1875,8 +2144,45 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
if (hw->mac.type == ixgbe_mac_82599EB) {
rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
+ rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
}
+
+ if (adapter->flags & IXGBE_FLAG2_RSC_ENABLED) {
+ /* Enable 82599 HW-RSC */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ j = adapter->rx_ring[i].reg_idx;
+ rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
+ rscctrl |= IXGBE_RSCCTL_RSCEN;
+ /*
+ * we must limit the number of descriptors so that the
+ * total size of max desc * buf_len is not greater
+ * than 65535
+ */
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+#if (MAX_SKB_FRAGS > 16)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
+#elif (MAX_SKB_FRAGS > 8)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
+#elif (MAX_SKB_FRAGS > 4)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
+#else
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
+#endif
+ } else {
+ if (rx_buf_len < IXGBE_RXBUFFER_4096)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
+ else if (rx_buf_len < IXGBE_RXBUFFER_8192)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
+ else
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl);
+ }
+ /* Disable RSC for ACK packets */
+ IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
+ (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
+ }
}
static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
@@ -2015,11 +2321,7 @@ static void ixgbe_set_rx_mode(struct net_device *netdev)
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
/* reprogram secondary unicast list */
- addr_count = netdev->uc_count;
- if (addr_count)
- addr_list = netdev->uc_list->dmi_addr;
- hw->mac.ops.update_uc_addr_list(hw, addr_list, addr_count,
- ixgbe_addr_list_itr);
+ hw->mac.ops.update_uc_addr_list(hw, &netdev->uc_list);
/* reprogram multicast list */
addr_count = netdev->mc_count;
@@ -2041,13 +2343,16 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
struct napi_struct *napi;
- q_vector = &adapter->q_vector[q_idx];
- if (!q_vector->rxr_count)
- continue;
+ q_vector = adapter->q_vector[q_idx];
napi = &q_vector->napi;
- if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) &&
- (q_vector->rxr_count > 1))
- napi->poll = &ixgbe_clean_rxonly_many;
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+ if (!q_vector->rxr_count || !q_vector->txr_count) {
+ if (q_vector->txr_count == 1)
+ napi->poll = &ixgbe_clean_txonly;
+ else if (q_vector->rxr_count == 1)
+ napi->poll = &ixgbe_clean_rxonly;
+ }
+ }
napi_enable(napi);
}
@@ -2064,9 +2369,7 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
q_vectors = 1;
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
- q_vector = &adapter->q_vector[q_idx];
- if (!q_vector->rxr_count)
- continue;
+ q_vector = adapter->q_vector[q_idx];
napi_disable(&q_vector->napi);
}
}
@@ -2124,6 +2427,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
static void ixgbe_configure(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
int i;
ixgbe_set_rx_mode(netdev);
@@ -2140,6 +2444,20 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
netif_set_gso_max_size(netdev, 65536);
#endif
+#ifdef IXGBE_FCOE
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+ ixgbe_configure_fcoe(adapter);
+
+#endif /* IXGBE_FCOE */
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->tx_ring[i].atr_sample_rate =
+ adapter->atr_sample_rate;
+ ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
+ } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
+ ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc);
+ }
+
ixgbe_configure_tx(adapter);
ixgbe_configure_rx(adapter);
for (i = 0; i < adapter->num_rx_queues; i++)
@@ -2294,6 +2612,13 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
}
+#ifdef IXGBE_FCOE
+ /* adjust max frame to be able to do baby jumbo for FCoE */
+ if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
+ (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
+ max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
+
+#endif /* IXGBE_FCOE */
mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
mhadd &= ~IXGBE_MHADD_MFS_MASK;
@@ -2357,6 +2682,17 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
ixgbe_irq_enable(adapter);
/*
+ * If this adapter has a fan, check to see if we had a failure
+ * before we enabled the interrupt.
+ */
+ if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
+ u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ if (esdp & IXGBE_ESDP_SDP1)
+ DPRINTK(DRV, CRIT,
+ "Fan has stopped, replace the adapter\n");
+ }
+
+ /*
* For hot-pluggable SFP+ devices, a new SFP+ module may have
* arrived before interrupts were enabled. We need to kick off
* the SFP+ module setup first, then try to bring up link.
@@ -2378,6 +2714,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
DPRINTK(PROBE, ERR, "link_config FAILED %d\n", err);
}
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ set_bit(__IXGBE_FDIR_INIT_DONE,
+ &(adapter->tx_ring[i].reinit_state));
+
/* enable transmits */
netif_tx_start_all_queues(netdev);
@@ -2404,20 +2744,37 @@ int ixgbe_up(struct ixgbe_adapter *adapter)
/* hardware has been reset, we need to reload some things */
ixgbe_configure(adapter);
- ixgbe_napi_add_all(adapter);
-
return ixgbe_up_complete(adapter);
}
void ixgbe_reset(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- if (hw->mac.ops.init_hw(hw))
- dev_err(&adapter->pdev->dev, "Hardware Error\n");
+ int err;
+
+ err = hw->mac.ops.init_hw(hw);
+ switch (err) {
+ case 0:
+ case IXGBE_ERR_SFP_NOT_PRESENT:
+ break;
+ case IXGBE_ERR_MASTER_REQUESTS_PENDING:
+ dev_err(&adapter->pdev->dev, "master disable timed out\n");
+ break;
+ case IXGBE_ERR_EEPROM_VERSION:
+ /* We are running on a pre-production device, log a warning */
+ dev_warn(&adapter->pdev->dev, "This device is a pre-production "
+ "adapter/LOM. Please be aware there may be issues "
+ "associated with your hardware. If you are "
+ "experiencing problems please contact your Intel or "
+ "hardware representative who provided you with this "
+ "hardware.\n");
+ break;
+ default:
+ dev_err(&adapter->pdev->dev, "Hardware Error: %d\n", err);
+ }
/* reprogram the RAR[0] in case user changed it. */
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
-
}
/**
@@ -2445,8 +2802,13 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
rx_buffer_info->dma = 0;
}
if (rx_buffer_info->skb) {
- dev_kfree_skb(rx_buffer_info->skb);
+ struct sk_buff *skb = rx_buffer_info->skb;
rx_buffer_info->skb = NULL;
+ do {
+ struct sk_buff *this = skb;
+ skb = skb->prev;
+ dev_kfree_skb(this);
+ } while (skb);
}
if (!rx_buffer_info->page)
continue;
@@ -2560,6 +2922,10 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
del_timer_sync(&adapter->watchdog_timer);
cancel_work_sync(&adapter->watchdog_task);
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
+ adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
+ cancel_work_sync(&adapter->fdir_reinit_task);
+
/* disable transmits in the hardware now that interrupts are off */
for (i = 0; i < adapter->num_tx_queues; i++) {
j = adapter->tx_ring[i].reg_idx;
@@ -2575,13 +2941,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
netif_carrier_off(netdev);
-#ifdef CONFIG_IXGBE_DCA
- if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
- adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
- dca_remove_requester(&adapter->pdev->dev);
- }
-
-#endif
if (!pci_channel_offline(adapter->pdev))
ixgbe_reset(adapter);
ixgbe_clean_all_tx_rings(adapter);
@@ -2589,13 +2948,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
#ifdef CONFIG_IXGBE_DCA
/* since we reset the hardware DCA settings were cleared */
- if (dca_add_requester(&adapter->pdev->dev) == 0) {
- adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
- /* always use CB2 mode, difference is masked
- * in the CB driver */
- IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2);
- ixgbe_setup_dca(adapter);
- }
+ ixgbe_setup_dca(adapter);
#endif
}
@@ -2620,7 +2973,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
}
#endif
- tx_clean_complete = ixgbe_clean_tx_irq(adapter, adapter->tx_ring);
+ tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring);
ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget);
if (!tx_clean_complete)
@@ -2632,7 +2985,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
if (adapter->itr_setting & 1)
ixgbe_set_itr(adapter);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
- ixgbe_irq_enable_queues(adapter);
+ ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
}
return work_done;
}
@@ -2668,17 +3021,15 @@ static void ixgbe_reset_task(struct work_struct *work)
static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
{
bool ret = false;
+ struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- adapter->ring_feature[RING_F_DCB].mask = 0x7 << 3;
- adapter->num_rx_queues =
- adapter->ring_feature[RING_F_DCB].indices;
- adapter->num_tx_queues =
- adapter->ring_feature[RING_F_DCB].indices;
- ret = true;
- } else {
- ret = false;
- }
+ if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+ return ret;
+
+ f->mask = 0x7 << 3;
+ adapter->num_rx_queues = f->indices;
+ adapter->num_tx_queues = f->indices;
+ ret = true;
return ret;
}
@@ -2695,13 +3046,12 @@ static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
{
bool ret = false;
+ struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
- adapter->ring_feature[RING_F_RSS].mask = 0xF;
- adapter->num_rx_queues =
- adapter->ring_feature[RING_F_RSS].indices;
- adapter->num_tx_queues =
- adapter->ring_feature[RING_F_RSS].indices;
+ f->mask = 0xF;
+ adapter->num_rx_queues = f->indices;
+ adapter->num_tx_queues = f->indices;
ret = true;
} else {
ret = false;
@@ -2710,6 +3060,79 @@ static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
return ret;
}
+/**
+ * ixgbe_set_fdir_queues: Allocate queues for Flow Director
+ * @adapter: board private structure to initialize
+ *
+ * Flow Director is an advanced Rx filter, attempting to get Rx flows back
+ * to the original CPU that initiated the Tx session. This runs in addition
+ * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
+ * Rx load across CPUs using RSS.
+ *
+ **/
+static bool inline ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
+{
+ bool ret = false;
+ struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
+
+ f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices);
+ f_fdir->mask = 0;
+
+ /* Flow Director must have RSS enabled */
+ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
+ ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
+ (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)))) {
+ adapter->num_tx_queues = f_fdir->indices;
+ adapter->num_rx_queues = f_fdir->indices;
+ ret = true;
+ } else {
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+ }
+ return ret;
+}
+
+#ifdef IXGBE_FCOE
+/**
+ * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
+ * @adapter: board private structure to initialize
+ *
+ * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
+ * The ring feature mask is not used as a mask for FCoE, as it can take any 8
+ * rx queues out of the max number of rx queues, instead, it is used as the
+ * index of the first rx queue used by FCoE.
+ *
+ **/
+static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
+{
+ bool ret = false;
+ struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
+
+ f->indices = min((int)num_online_cpus(), f->indices);
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+#ifdef CONFIG_IXGBE_DCB
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+ DPRINTK(PROBE, INFO, "FCOE enabled with DCB \n");
+ ixgbe_set_dcb_queues(adapter);
+ }
+#endif
+ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
+ DPRINTK(PROBE, INFO, "FCOE enabled with RSS \n");
+ ixgbe_set_rss_queues(adapter);
+ }
+ /* adding FCoE rx rings to the end */
+ f->mask = adapter->num_rx_queues;
+ adapter->num_rx_queues += f->indices;
+ if (adapter->num_tx_queues == 0)
+ adapter->num_tx_queues = f->indices;
+
+ ret = true;
+ }
+
+ return ret;
+}
+
+#endif /* IXGBE_FCOE */
/*
* ixgbe_set_num_queues: Allocate queues for device, feature dependant
* @adapter: board private structure to initialize
@@ -2723,11 +3146,19 @@ static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
**/
static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
{
+#ifdef IXGBE_FCOE
+ if (ixgbe_set_fcoe_queues(adapter))
+ goto done;
+
+#endif /* IXGBE_FCOE */
#ifdef CONFIG_IXGBE_DCB
if (ixgbe_set_dcb_queues(adapter))
goto done;
#endif
+ if (ixgbe_set_fdir_queues(adapter))
+ goto done;
+
if (ixgbe_set_rss_queues(adapter))
goto done;
@@ -2778,9 +3209,6 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
- adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
- adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
- ixgbe_set_num_queues(adapter);
} else {
adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
/*
@@ -2902,6 +3330,64 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
#endif
/**
+ * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
+ * @adapter: board private structure to initialize
+ *
+ * Cache the descriptor ring offsets for Flow Director to the assigned rings.
+ *
+ **/
+static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
+{
+ int i;
+ bool ret = false;
+
+ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
+ ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
+ (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i].reg_idx = i;
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->tx_ring[i].reg_idx = i;
+ ret = true;
+ }
+
+ return ret;
+}
+
+#ifdef IXGBE_FCOE
+/**
+ * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
+ * @adapter: board private structure to initialize
+ *
+ * Cache the descriptor ring offsets for FCoE mode to the assigned rings.
+ *
+ */
+static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
+{
+ int i, fcoe_i = 0;
+ bool ret = false;
+ struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
+
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+#ifdef CONFIG_IXGBE_DCB
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+ ixgbe_cache_ring_dcb(adapter);
+ fcoe_i = adapter->rx_ring[0].reg_idx + 1;
+ }
+#endif /* CONFIG_IXGBE_DCB */
+ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
+ ixgbe_cache_ring_rss(adapter);
+ fcoe_i = f->mask;
+ }
+ for (i = 0; i < f->indices; i++, fcoe_i++)
+ adapter->rx_ring[f->mask + i].reg_idx = fcoe_i;
+ ret = true;
+ }
+ return ret;
+}
+
+#endif /* IXGBE_FCOE */
+/**
* ixgbe_cache_ring_register - Descriptor ring to register mapping
* @adapter: board private structure to initialize
*
@@ -2918,11 +3404,19 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
adapter->rx_ring[0].reg_idx = 0;
adapter->tx_ring[0].reg_idx = 0;
+#ifdef IXGBE_FCOE
+ if (ixgbe_cache_ring_fcoe(adapter))
+ return;
+
+#endif /* IXGBE_FCOE */
#ifdef CONFIG_IXGBE_DCB
if (ixgbe_cache_ring_dcb(adapter))
return;
#endif
+ if (ixgbe_cache_ring_fdir(adapter))
+ return;
+
if (ixgbe_cache_ring_rss(adapter))
return;
}
@@ -3004,31 +3498,23 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
* mean we disable MSI-X capabilities of the adapter. */
adapter->msix_entries = kcalloc(v_budget,
sizeof(struct msix_entry), GFP_KERNEL);
- if (!adapter->msix_entries) {
- adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
- adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
- ixgbe_set_num_queues(adapter);
- kfree(adapter->tx_ring);
- kfree(adapter->rx_ring);
- err = ixgbe_alloc_queues(adapter);
- if (err) {
- DPRINTK(PROBE, ERR, "Unable to allocate memory "
- "for queues\n");
- goto out;
- }
+ if (adapter->msix_entries) {
+ for (vector = 0; vector < v_budget; vector++)
+ adapter->msix_entries[vector].entry = vector;
- goto try_msi;
- }
+ ixgbe_acquire_msix_vectors(adapter, v_budget);
- for (vector = 0; vector < v_budget; vector++)
- adapter->msix_entries[vector].entry = vector;
-
- ixgbe_acquire_msix_vectors(adapter, v_budget);
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ goto out;
+ }
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
- goto out;
+ adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+ adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+ adapter->atr_sample_rate = 0;
+ ixgbe_set_num_queues(adapter);
-try_msi:
err = pci_enable_msi(adapter->pdev);
if (!err) {
adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
@@ -3043,6 +3529,79 @@ out:
return err;
}
+/**
+ * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ **/
+static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
+{
+ int q_idx, num_q_vectors;
+ struct ixgbe_q_vector *q_vector;
+ int napi_vectors;
+ int (*poll)(struct napi_struct *, int);
+
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+ num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ napi_vectors = adapter->num_rx_queues;
+ poll = &ixgbe_clean_rxtx_many;
+ } else {
+ num_q_vectors = 1;
+ napi_vectors = 1;
+ poll = &ixgbe_poll;
+ }
+
+ for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+ q_vector = kzalloc(sizeof(struct ixgbe_q_vector), GFP_KERNEL);
+ if (!q_vector)
+ goto err_out;
+ q_vector->adapter = adapter;
+ q_vector->eitr = adapter->eitr_param;
+ q_vector->v_idx = q_idx;
+ netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
+ adapter->q_vector[q_idx] = q_vector;
+ }
+
+ return 0;
+
+err_out:
+ while (q_idx) {
+ q_idx--;
+ q_vector = adapter->q_vector[q_idx];
+ netif_napi_del(&q_vector->napi);
+ kfree(q_vector);
+ adapter->q_vector[q_idx] = NULL;
+ }
+ return -ENOMEM;
+}
+
+/**
+ * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
+{
+ int q_idx, num_q_vectors;
+
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ else
+ num_q_vectors = 1;
+
+ for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx];
+ adapter->q_vector[q_idx] = NULL;
+ netif_napi_del(&q_vector->napi);
+ kfree(q_vector);
+ }
+}
+
void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
{
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -3074,18 +3633,25 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
/* Number of supported queues */
ixgbe_set_num_queues(adapter);
- err = ixgbe_alloc_queues(adapter);
- if (err) {
- DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
- goto err_alloc_queues;
- }
-
err = ixgbe_set_interrupt_capability(adapter);
if (err) {
DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n");
goto err_set_interrupt;
}
+ err = ixgbe_alloc_q_vectors(adapter);
+ if (err) {
+ DPRINTK(PROBE, ERR, "Unable to allocate memory for queue "
+ "vectors\n");
+ goto err_alloc_q_vectors;
+ }
+
+ err = ixgbe_alloc_queues(adapter);
+ if (err) {
+ DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
+ goto err_alloc_queues;
+ }
+
DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
"Tx Queue count = %u\n",
(adapter->num_rx_queues > 1) ? "Enabled" :
@@ -3095,11 +3661,30 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
return 0;
+err_alloc_queues:
+ ixgbe_free_q_vectors(adapter);
+err_alloc_q_vectors:
+ ixgbe_reset_interrupt_capability(adapter);
err_set_interrupt:
+ return err;
+}
+
+/**
+ * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
+ * @adapter: board private structure to clear interrupt scheme on
+ *
+ * We go through and clear interrupt specific resources and reset the structure
+ * to pre-load conditions
+ **/
+void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
+{
kfree(adapter->tx_ring);
kfree(adapter->rx_ring);
-err_alloc_queues:
- return err;
+ adapter->tx_ring = NULL;
+ adapter->rx_ring = NULL;
+
+ ixgbe_free_q_vectors(adapter);
+ ixgbe_reset_interrupt_capability(adapter);
}
/**
@@ -3185,10 +3770,24 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->ring_feature[RING_F_RSS].indices = rss;
adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
- if (hw->mac.type == ixgbe_mac_82598EB)
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ if (hw->device_id == IXGBE_DEV_ID_82598AT)
+ adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
- else if (hw->mac.type == ixgbe_mac_82599EB)
+ } else if (hw->mac.type == ixgbe_mac_82599EB) {
adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
+ adapter->flags |= IXGBE_FLAG2_RSC_CAPABLE;
+ adapter->flags |= IXGBE_FLAG2_RSC_ENABLED;
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ adapter->ring_feature[RING_F_FDIR].indices =
+ IXGBE_MAX_FDIR_INDICES;
+ adapter->atr_sample_rate = 20;
+ adapter->fdir_pballoc = 0;
+#ifdef IXGBE_FCOE
+ adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
+ adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE;
+#endif /* IXGBE_FCOE */
+ }
#ifdef CONFIG_IXGBE_DCB
/* Configure DCB traffic classes */
@@ -3203,6 +3802,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
adapter->dcb_cfg.rx_pba_cfg = pba_equal;
+ adapter->dcb_cfg.pfc_mode_enable = false;
adapter->dcb_cfg.round_robin_enable = false;
adapter->dcb_set_bitmap = 0x00;
ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
@@ -3213,6 +3813,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
/* default flow control settings */
hw->fc.requested_mode = ixgbe_fc_full;
hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */
+#ifdef CONFIG_DCB
+ adapter->last_lfc_mode = hw->fc.current_mode;
+#endif
hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
@@ -3503,6 +4106,8 @@ static int ixgbe_open(struct net_device *netdev)
if (test_bit(__IXGBE_TESTING, &adapter->state))
return -EBUSY;
+ netif_carrier_off(netdev);
+
/* allocate transmit descriptors */
err = ixgbe_setup_all_tx_resources(adapter);
if (err)
@@ -3515,8 +4120,6 @@ static int ixgbe_open(struct net_device *netdev)
ixgbe_configure(adapter);
- ixgbe_napi_add_all(adapter);
-
err = ixgbe_request_irq(adapter);
if (err)
goto err_req_irq;
@@ -3568,55 +4171,6 @@ static int ixgbe_close(struct net_device *netdev)
return 0;
}
-/**
- * ixgbe_napi_add_all - prep napi structs for use
- * @adapter: private struct
- *
- * helper function to napi_add each possible q_vector->napi
- */
-void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
-{
- int q_idx, q_vectors;
- struct net_device *netdev = adapter->netdev;
- int (*poll)(struct napi_struct *, int);
-
- /* check if we already have our netdev->napi_list populated */
- if (&netdev->napi_list != netdev->napi_list.next)
- return;
-
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
- poll = &ixgbe_clean_rxonly;
- /* Only enable as many vectors as we have rx queues. */
- q_vectors = adapter->num_rx_queues;
- } else {
- poll = &ixgbe_poll;
- /* only one q_vector for legacy modes */
- q_vectors = 1;
- }
-
- for (q_idx = 0; q_idx < q_vectors; q_idx++) {
- struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx];
- netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
- }
-}
-
-void ixgbe_napi_del_all(struct ixgbe_adapter *adapter)
-{
- int q_idx;
- int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-
- /* legacy and MSI only use one vector */
- if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
- q_vectors = 1;
-
- for (q_idx = 0; q_idx < q_vectors; q_idx++) {
- struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx];
- if (!q_vector->rxr_count)
- continue;
- netif_napi_del(&q_vector->napi);
- }
-}
-
#ifdef CONFIG_PM
static int ixgbe_resume(struct pci_dev *pdev)
{
@@ -3626,7 +4180,8 @@ static int ixgbe_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- err = pci_enable_device(pdev);
+
+ err = pci_enable_device_mem(pdev);
if (err) {
printk(KERN_ERR "ixgbe: Cannot enable PCI device from "
"suspend\n");
@@ -3634,8 +4189,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
}
pci_set_master(pdev);
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_enable_wake(pdev, PCI_D3cold, 0);
+ pci_wake_from_d3(pdev, false);
err = ixgbe_init_interrupt_scheme(adapter);
if (err) {
@@ -3679,11 +4233,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
ixgbe_free_all_tx_resources(adapter);
ixgbe_free_all_rx_resources(adapter);
}
- ixgbe_reset_interrupt_capability(adapter);
- ixgbe_napi_del_all(adapter);
- INIT_LIST_HEAD(&netdev->napi_list);
- kfree(adapter->tx_ring);
- kfree(adapter->rx_ring);
+ ixgbe_clear_interrupt_scheme(adapter);
#ifdef CONFIG_PM
retval = pci_save_state(pdev);
@@ -3711,13 +4261,10 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
}
- if (wufc && hw->mac.type == ixgbe_mac_82599EB) {
- pci_enable_wake(pdev, PCI_D3hot, 1);
- pci_enable_wake(pdev, PCI_D3cold, 1);
- } else {
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_enable_wake(pdev, PCI_D3cold, 0);
- }
+ if (wufc && hw->mac.type == ixgbe_mac_82599EB)
+ pci_wake_from_d3(pdev, true);
+ else
+ pci_wake_from_d3(pdev, false);
*enable_wake = !!wufc;
@@ -3772,9 +4319,13 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
if (hw->mac.type == ixgbe_mac_82599EB) {
+ u64 rsc_count = 0;
for (i = 0; i < 16; i++)
adapter->hw_rx_no_dma_resources +=
IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ rsc_count += adapter->rx_ring[i].rsc_count;
+ adapter->rsc_count = rsc_count;
}
adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
@@ -3821,6 +4372,16 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+ adapter->stats.fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
+ adapter->stats.fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+#ifdef IXGBE_FCOE
+ adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
+ adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
+ adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
+ adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
+ adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
+ adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
+#endif /* IXGBE_FCOE */
} else {
adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
@@ -3888,64 +4449,43 @@ static void ixgbe_watchdog(unsigned long data)
{
struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
struct ixgbe_hw *hw = &adapter->hw;
+ u64 eics = 0;
+ int i;
- /* Do the watchdog outside of interrupt context due to the lovely
- * delays that some of the newer hardware requires */
- if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
- u64 eics = 0;
- int i;
+ /*
+ * Do the watchdog outside of interrupt context due to the lovely
+ * delays that some of the newer hardware requires
+ */
- for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++)
- eics |= (1 << i);
+ if (test_bit(__IXGBE_DOWN, &adapter->state))
+ goto watchdog_short_circuit;
- /* Cause software interrupt to ensure rx rings are cleaned */
- switch (hw->mac.type) {
- case ixgbe_mac_82598EB:
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
- IXGBE_WRITE_REG(hw, IXGBE_EICS, (u32)eics);
- } else {
- /*
- * for legacy and MSI interrupts don't set any
- * bits that are enabled for EIAM, because this
- * operation would set *both* EIMS and EICS for
- * any bit in EIAM
- */
- IXGBE_WRITE_REG(hw, IXGBE_EICS,
- (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
- }
- break;
- case ixgbe_mac_82599EB:
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
- /*
- * EICS(0..15) first 0-15 q vectors
- * EICS[1] (16..31) q vectors 16-31
- * EICS[2] (0..31) q vectors 32-63
- */
- IXGBE_WRITE_REG(hw, IXGBE_EICS,
- (u32)(eics & 0xFFFF));
- IXGBE_WRITE_REG(hw, IXGBE_EICS_EX(1),
- (u32)(eics & 0xFFFF0000));
- IXGBE_WRITE_REG(hw, IXGBE_EICS_EX(2),
- (u32)(eics >> 32));
- } else {
- /*
- * for legacy and MSI interrupts don't set any
- * bits that are enabled for EIAM, because this
- * operation would set *both* EIMS and EICS for
- * any bit in EIAM
- */
- IXGBE_WRITE_REG(hw, IXGBE_EICS,
- (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
- }
- break;
- default:
- break;
- }
- /* Reset the timer */
- mod_timer(&adapter->watchdog_timer,
- round_jiffies(jiffies + 2 * HZ));
+ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
+ /*
+ * for legacy and MSI interrupts don't set any bits
+ * that are enabled for EIAM, because this operation
+ * would set *both* EIMS and EICS for any bit in EIAM
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_EICS,
+ (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
+ goto watchdog_reschedule;
}
+ /* get one bit for every active tx/rx interrupt vector */
+ for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
+ struct ixgbe_q_vector *qv = adapter->q_vector[i];
+ if (qv->rxr_count || qv->txr_count)
+ eics |= ((u64)1 << i);
+ }
+
+ /* Cause software interrupt to ensure rx rings are cleaned */
+ ixgbe_irq_rearm_queues(adapter, eics);
+
+watchdog_reschedule:
+ /* Reset the timer */
+ mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
+
+watchdog_short_circuit:
schedule_work(&adapter->watchdog_task);
}
@@ -3999,6 +4539,30 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
}
/**
+ * ixgbe_fdir_reinit_task - worker thread to reinit FDIR filter table
+ * @work: pointer to work_struct containing our data
+ **/
+static void ixgbe_fdir_reinit_task(struct work_struct *work)
+{
+ struct ixgbe_adapter *adapter = container_of(work,
+ struct ixgbe_adapter,
+ fdir_reinit_task);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+
+ if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ set_bit(__IXGBE_FDIR_INIT_DONE,
+ &(adapter->tx_ring[i].reinit_state));
+ } else {
+ DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
+ "ignored adding FDIR ATR filters \n");
+ }
+ /* Done FDIR Re-initialization, enable transmits */
+ netif_tx_start_all_queues(adapter->netdev);
+}
+
+/**
* ixgbe_watchdog_task - worker thread to bring link up
* @work: pointer to work_struct containing our data
**/
@@ -4011,16 +4575,32 @@ static void ixgbe_watchdog_task(struct work_struct *work)
struct ixgbe_hw *hw = &adapter->hw;
u32 link_speed = adapter->link_speed;
bool link_up = adapter->link_up;
+ int i;
+ struct ixgbe_ring *tx_ring;
+ int some_tx_pending = 0;
adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+ if (link_up) {
+#ifdef CONFIG_DCB
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
+ hw->mac.ops.fc_enable(hw, i);
+ } else {
+ hw->mac.ops.fc_enable(hw, 0);
+ }
+#else
+ hw->mac.ops.fc_enable(hw, 0);
+#endif
+ }
+
if (link_up ||
time_after(jiffies, (adapter->link_check_timeout +
IXGBE_TRY_LINK_TIMEOUT))) {
- IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
}
adapter->link_up = link_up;
adapter->link_speed = link_speed;
@@ -4068,6 +4648,25 @@ static void ixgbe_watchdog_task(struct work_struct *work)
}
}
+ if (!netif_carrier_ok(netdev)) {
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ tx_ring = &adapter->tx_ring[i];
+ if (tx_ring->next_to_use != tx_ring->next_to_clean) {
+ some_tx_pending = 1;
+ break;
+ }
+ }
+
+ if (some_tx_pending) {
+ /* We've lost link, so the controller stops DMA,
+ * but we've got queued Tx work that's never going
+ * to get done, so reset controller to flush Tx.
+ * (Do the reset outside of interrupt context).
+ */
+ schedule_work(&adapter->reset_task);
+ }
+ }
+
ixgbe_update_stats(adapter);
adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
}
@@ -4196,12 +4795,18 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
type_tucmd_mlhl |=
IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
+ type_tucmd_mlhl |=
+ IXGBE_ADVTXD_TUCMD_L4T_SCTP;
break;
case cpu_to_be16(ETH_P_IPV6):
/* XXX what about other V6 headers?? */
if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
type_tucmd_mlhl |=
IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
+ type_tucmd_mlhl |=
+ IXGBE_ADVTXD_TUCMD_L4T_SCTP;
break;
default:
if (unlikely(net_ratelimit())) {
@@ -4234,10 +4839,12 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring,
- struct sk_buff *skb, unsigned int first)
+ struct sk_buff *skb, u32 tx_flags,
+ unsigned int first)
{
struct ixgbe_tx_buffer *tx_buffer_info;
- unsigned int len = skb_headlen(skb);
+ unsigned int len;
+ unsigned int total = skb->len;
unsigned int offset = 0, size, count = 0, i;
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned int f;
@@ -4252,16 +4859,22 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
map = skb_shinfo(skb)->dma_maps;
+ if (tx_flags & IXGBE_TX_FLAGS_FCOE)
+ /* excluding fcoe_crc_eof for FCoE */
+ total -= sizeof(struct fcoe_crc_eof);
+
+ len = min(skb_headlen(skb), total);
while (len) {
tx_buffer_info = &tx_ring->tx_buffer_info[i];
size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
tx_buffer_info->length = size;
- tx_buffer_info->dma = map[0] + offset;
+ tx_buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
len -= size;
+ total -= size;
offset += size;
count++;
@@ -4276,7 +4889,7 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
struct skb_frag_struct *frag;
frag = &skb_shinfo(skb)->frags[f];
- len = frag->size;
+ len = min((unsigned int)frag->size, total);
offset = 0;
while (len) {
@@ -4288,14 +4901,17 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
tx_buffer_info->length = size;
- tx_buffer_info->dma = map[f + 1] + offset;
+ tx_buffer_info->dma = map[f] + offset;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
len -= size;
+ total -= size;
offset += size;
count++;
}
+ if (total == 0)
+ break;
}
tx_ring->tx_buffer_info[i].skb = skb;
@@ -4337,6 +4953,13 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
IXGBE_ADVTXD_POPTS_SHIFT;
+ if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
+ olinfo_status |= IXGBE_ADVTXD_CC;
+ olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
+ if (tx_flags & IXGBE_TX_FLAGS_FSO)
+ cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
+ }
+
olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
i = tx_ring->next_to_use;
@@ -4366,6 +4989,58 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
writel(i, adapter->hw.hw_addr + tx_ring->tail);
}
+static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
+ int queue, u32 tx_flags)
+{
+ /* Right now, we support IPv4 only */
+ struct ixgbe_atr_input atr_input;
+ struct tcphdr *th;
+ struct udphdr *uh;
+ struct iphdr *iph = ip_hdr(skb);
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
+ u16 vlan_id, src_port, dst_port, flex_bytes;
+ u32 src_ipv4_addr, dst_ipv4_addr;
+ u8 l4type = 0;
+
+ /* check if we're UDP or TCP */
+ if (iph->protocol == IPPROTO_TCP) {
+ th = tcp_hdr(skb);
+ src_port = th->source;
+ dst_port = th->dest;
+ l4type |= IXGBE_ATR_L4TYPE_TCP;
+ /* l4type IPv4 type is 0, no need to assign */
+ } else if(iph->protocol == IPPROTO_UDP) {
+ uh = udp_hdr(skb);
+ src_port = uh->source;
+ dst_port = uh->dest;
+ l4type |= IXGBE_ATR_L4TYPE_UDP;
+ /* l4type IPv4 type is 0, no need to assign */
+ } else {
+ /* Unsupported L4 header, just bail here */
+ return;
+ }
+
+ memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
+
+ vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
+ IXGBE_TX_FLAGS_VLAN_SHIFT;
+ src_ipv4_addr = iph->saddr;
+ dst_ipv4_addr = iph->daddr;
+ flex_bytes = eth->h_proto;
+
+ ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
+ ixgbe_atr_set_src_port_82599(&atr_input, dst_port);
+ ixgbe_atr_set_dst_port_82599(&atr_input, src_port);
+ ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes);
+ ixgbe_atr_set_l4type_82599(&atr_input, l4type);
+ /* src and dst are inverted, think how the receiver sees them */
+ ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr);
+ ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr);
+
+ /* This assumes the Rx queue and Tx queue are bound to the same CPU */
+ ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
+}
+
static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
struct ixgbe_ring *tx_ring, int size)
{
@@ -4400,6 +5075,9 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
+ return smp_processor_id();
+
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
return 0; /* All traffic should default to class 0 */
@@ -4433,10 +5111,16 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_VLAN;
}
- /* three things can cause us to need a context descriptor */
+
+ if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
+ (skb->protocol == htons(ETH_P_FCOE)))
+ tx_flags |= IXGBE_TX_FLAGS_FCOE;
+
+ /* four things can cause us to need a context descriptor */
if (skb_is_gso(skb) ||
(skb->ip_summed == CHECKSUM_PARTIAL) ||
- (tx_flags & IXGBE_TX_FLAGS_VLAN))
+ (tx_flags & IXGBE_TX_FLAGS_VLAN) ||
+ (tx_flags & IXGBE_TX_FLAGS_FCOE))
count++;
count += TXD_USE_COUNT(skb_headlen(skb));
@@ -4448,27 +5132,49 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
- if (skb->protocol == htons(ETH_P_IP))
- tx_flags |= IXGBE_TX_FLAGS_IPV4;
first = tx_ring->next_to_use;
- tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
- if (tso < 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- if (tso)
- tx_flags |= IXGBE_TX_FLAGS_TSO;
- else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
- (skb->ip_summed == CHECKSUM_PARTIAL))
- tx_flags |= IXGBE_TX_FLAGS_CSUM;
+ if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
+#ifdef IXGBE_FCOE
+ /* setup tx offload for FCoE */
+ tso = ixgbe_fso(adapter, tx_ring, skb, tx_flags, &hdr_len);
+ if (tso < 0) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ if (tso)
+ tx_flags |= IXGBE_TX_FLAGS_FSO;
+#endif /* IXGBE_FCOE */
+ } else {
+ if (skb->protocol == htons(ETH_P_IP))
+ tx_flags |= IXGBE_TX_FLAGS_IPV4;
+ tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
+ if (tso < 0) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
- count = ixgbe_tx_map(adapter, tx_ring, skb, first);
+ if (tso)
+ tx_flags |= IXGBE_TX_FLAGS_TSO;
+ else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
+ (skb->ip_summed == CHECKSUM_PARTIAL))
+ tx_flags |= IXGBE_TX_FLAGS_CSUM;
+ }
+ count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first);
if (count) {
+ /* add the ATR filter if ATR is on */
+ if (tx_ring->atr_sample_rate) {
+ ++tx_ring->atr_count;
+ if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
+ test_bit(__IXGBE_FDIR_INIT_DONE,
+ &tx_ring->reinit_state)) {
+ ixgbe_atr(adapter, skb, tx_ring->queue_index,
+ tx_flags);
+ tx_ring->atr_count = 0;
+ }
+ }
ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
hdr_len);
- netdev->trans_start = jiffies;
ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
} else {
@@ -4519,6 +5225,82 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
return 0;
}
+static int
+ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u16 value;
+ int rc;
+
+ if (prtad != hw->phy.mdio.prtad)
+ return -EINVAL;
+ rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
+ if (!rc)
+ rc = value;
+ return rc;
+}
+
+static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
+ u16 addr, u16 value)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (prtad != hw->phy.mdio.prtad)
+ return -EINVAL;
+ return hw->phy.ops.write_reg(hw, addr, devad, value);
+}
+
+static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
+}
+
+/**
+ * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
+ * netdev->dev_addr_list
+ * @netdev: network interface device structure
+ *
+ * Returns non-zero on failure
+ **/
+static int ixgbe_add_sanmac_netdev(struct net_device *dev)
+{
+ int err = 0;
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_mac_info *mac = &adapter->hw.mac;
+
+ if (is_valid_ether_addr(mac->san_addr)) {
+ rtnl_lock();
+ err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
+ rtnl_unlock();
+ }
+ return err;
+}
+
+/**
+ * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
+ * netdev->dev_addr_list
+ * @netdev: network interface device structure
+ *
+ * Returns non-zero on failure
+ **/
+static int ixgbe_del_sanmac_netdev(struct net_device *dev)
+{
+ int err = 0;
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_mac_info *mac = &adapter->hw.mac;
+
+ if (is_valid_ether_addr(mac->san_addr)) {
+ rtnl_lock();
+ err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
+ rtnl_unlock();
+ }
+ return err;
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
* Polling 'interrupt' - used by things like netconsole to send skbs
@@ -4552,9 +5334,14 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_vlan_rx_register = ixgbe_vlan_rx_register,
.ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
+ .ndo_do_ioctl = ixgbe_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ixgbe_netpoll,
#endif
+#ifdef IXGBE_FCOE
+ .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
+ .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
+#endif /* IXGBE_FCOE */
};
/**
@@ -4577,9 +5364,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
static int cards_found;
int i, err, pci_using_dac;
+#ifdef IXGBE_FCOE
+ u16 device_caps;
+#endif
u32 part_num, eec;
- err = pci_enable_device(pdev);
+ err = pci_enable_device_mem(pdev);
if (err)
return err;
@@ -4599,9 +5389,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
pci_using_dac = 0;
}
- err = pci_request_regions(pdev, ixgbe_driver_name);
+ err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
+ IORESOURCE_MEM), ixgbe_driver_name);
if (err) {
- dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
+ dev_err(&pdev->dev,
+ "pci_request_selected_regions failed 0x%x\n", err);
goto err_pci_reg;
}
@@ -4665,6 +5457,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
/* PHY */
memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+ /* ixgbe_identify_phy_generic will set prtad and mmds properly */
+ hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
+ hw->phy.mdio.mmds = 0;
+ hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+ hw->phy.mdio.dev = netdev;
+ hw->phy.mdio.mdio_read = ixgbe_mdio_read;
+ hw->phy.mdio.mdio_write = ixgbe_mdio_write;
/* set up this timer and work struct before calling get_invariants
* which might start the timer
@@ -4682,29 +5481,42 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
INIT_WORK(&adapter->sfp_config_module_task,
ixgbe_sfp_config_module_task);
- err = ii->get_invariants(hw);
- if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
- /* start a kernel thread to watch for a module to arrive */
- set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
- mod_timer(&adapter->sfp_timer,
- round_jiffies(jiffies + (2 * HZ)));
- err = 0;
- } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- DPRINTK(PROBE, ERR, "failed to load because an "
- "unsupported SFP+ module type was detected.\n");
- goto err_hw_init;
- } else if (err) {
- goto err_hw_init;
- }
+ ii->get_invariants(hw);
/* setup the private structure */
err = ixgbe_sw_init(adapter);
if (err)
goto err_sw_init;
+ /*
+ * If there is a fan on this device and it has failed log the
+ * failure.
+ */
+ if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
+ u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ if (esdp & IXGBE_ESDP_SDP1)
+ DPRINTK(PROBE, CRIT,
+ "Fan has stopped, replace the adapter\n");
+ }
+
/* reset_hw fills in the perm_addr as well */
err = hw->mac.ops.reset_hw(hw);
- if (err) {
+ if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
+ hw->mac.type == ixgbe_mac_82598EB) {
+ /*
+ * Start a kernel thread to watch for a module to arrive.
+ * Only do this for 82598, since 82599 will generate
+ * interrupts on module arrival.
+ */
+ set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
+ mod_timer(&adapter->sfp_timer,
+ round_jiffies(jiffies + (2 * HZ)));
+ err = 0;
+ } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+ dev_err(&adapter->pdev->dev, "failed to load because an "
+ "unsupported SFP+ module type was detected.\n");
+ goto err_sw_init;
+ } else if (err) {
dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
goto err_sw_init;
}
@@ -4720,6 +5532,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
netdev->features |= NETIF_F_TSO6;
netdev->features |= NETIF_F_GRO;
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+ netdev->features |= NETIF_F_SCTP_CSUM;
+
netdev->vlan_features |= NETIF_F_TSO;
netdev->vlan_features |= NETIF_F_TSO6;
netdev->vlan_features |= NETIF_F_IP_CSUM;
@@ -4732,9 +5547,32 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
netdev->dcbnl_ops = &dcbnl_ops;
#endif
+#ifdef IXGBE_FCOE
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+ if (hw->mac.ops.get_device_caps) {
+ hw->mac.ops.get_device_caps(hw, &device_caps);
+ if (!(device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)) {
+ netdev->features |= NETIF_F_FCOE_CRC;
+ netdev->features |= NETIF_F_FSO;
+ netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
+ DPRINTK(DRV, INFO, "FCoE enabled, "
+ "disabling Flow Director\n");
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ adapter->flags &=
+ ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+ adapter->atr_sample_rate = 0;
+ } else {
+ adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
+ }
+ }
+ }
+#endif /* IXGBE_FCOE */
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
+ if (adapter->flags & IXGBE_FLAG2_RSC_ENABLED)
+ netdev->features |= NETIF_F_LRO;
+
/* make sure the EEPROM is good */
if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
@@ -4766,6 +5604,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
case IXGBE_DEV_ID_82599_KX4:
adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
IXGBE_WUFC_MC | IXGBE_WUFC_BC);
+ /* Enable ACPI wakeup in GRC */
+ IXGBE_WRITE_REG(hw, IXGBE_GRC,
+ (IXGBE_READ_REG(hw, IXGBE_GRC) & ~IXGBE_GRC_APME));
break;
default:
adapter->wol = 0;
@@ -4774,6 +5615,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
device_init_wakeup(&adapter->pdev->dev, true);
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+ /* pick up the PCI bus settings for reporting later */
+ hw->mac.ops.get_bus_info(hw);
+
/* print bus type/speed/width info */
dev_info(&pdev->dev, "(PCI Express:%s:%s) %pM\n",
((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
@@ -4805,24 +5649,37 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version);
/* reset the hardware with the new settings */
- hw->mac.ops.start_hw(hw);
-
- netif_carrier_off(netdev);
+ err = hw->mac.ops.start_hw(hw);
+ if (err == IXGBE_ERR_EEPROM_VERSION) {
+ /* We are running on a pre-production device, log a warning */
+ dev_warn(&pdev->dev, "This device is a pre-production "
+ "adapter/LOM. Please be aware there may be issues "
+ "associated with your hardware. If you are "
+ "experiencing problems please contact your Intel or "
+ "hardware representative who provided you with this "
+ "hardware.\n");
+ }
strcpy(netdev->name, "eth%d");
err = register_netdev(netdev);
if (err)
goto err_register;
+ /* carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(netdev);
+
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
+ adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
+ INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task);
+
#ifdef CONFIG_IXGBE_DCA
if (dca_add_requester(&pdev->dev) == 0) {
adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
- /* always use CB2 mode, difference is masked
- * in the CB driver */
- IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2);
ixgbe_setup_dca(adapter);
}
#endif
+ /* add san mac addr to netdev */
+ ixgbe_add_sanmac_netdev(netdev);
dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n");
cards_found++;
@@ -4830,9 +5687,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
err_register:
ixgbe_release_hw_control(adapter);
-err_hw_init:
+ ixgbe_clear_interrupt_scheme(adapter);
err_sw_init:
- ixgbe_reset_interrupt_capability(adapter);
err_eeprom:
clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
del_timer_sync(&adapter->sfp_timer);
@@ -4843,7 +5699,8 @@ err_eeprom:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
- pci_release_regions(pdev);
+ pci_release_selected_regions(pdev, pci_select_bars(pdev,
+ IORESOURCE_MEM));
err_pci_reg:
err_dma:
pci_disable_device(pdev);
@@ -4877,6 +5734,9 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->sfp_task);
cancel_work_sync(&adapter->multispeed_fiber_task);
cancel_work_sync(&adapter->sfp_config_module_task);
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
+ adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
+ cancel_work_sync(&adapter->fdir_reinit_task);
flush_scheduled_work();
#ifdef CONFIG_IXGBE_DCA
@@ -4887,19 +5747,27 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
}
#endif
+#ifdef IXGBE_FCOE
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+ ixgbe_cleanup_fcoe(adapter);
+
+#endif /* IXGBE_FCOE */
+
+ /* remove the added san mac */
+ ixgbe_del_sanmac_netdev(netdev);
+
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
- ixgbe_reset_interrupt_capability(adapter);
+ ixgbe_clear_interrupt_scheme(adapter);
ixgbe_release_hw_control(adapter);
iounmap(adapter->hw.hw_addr);
- pci_release_regions(pdev);
+ pci_release_selected_regions(pdev, pci_select_bars(pdev,
+ IORESOURCE_MEM));
DPRINTK(PROBE, INFO, "complete\n");
- kfree(adapter->tx_ring);
- kfree(adapter->rx_ring);
free_netdev(netdev);
@@ -4927,6 +5795,9 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
netif_device_detach(netdev);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
if (netif_running(netdev))
ixgbe_down(adapter);
pci_disable_device(pdev);
@@ -4948,7 +5819,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
pci_ers_result_t result;
int err;
- if (pci_enable_device(pdev)) {
+ if (pci_enable_device_mem(pdev)) {
DPRINTK(PROBE, ERR,
"Cannot re-enable PCI device after reset.\n");
result = PCI_ERS_RESULT_DISCONNECT;
@@ -4956,8 +5827,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_enable_wake(pdev, PCI_D3cold, 0);
+ pci_wake_from_d3(pdev, false);
ixgbe_reset(adapter);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 14e9606aa3b..453e966762f 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -44,7 +44,6 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
static bool ixgbe_get_i2c_data(u32 *i2cctl);
static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
-static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
@@ -61,8 +60,7 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
if (hw->phy.type == ixgbe_phy_unknown) {
for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
- if (ixgbe_validate_phy_addr(hw, phy_addr)) {
- hw->phy.addr = phy_addr;
+ if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) {
ixgbe_get_phy_id(hw);
hw->phy.type =
ixgbe_get_phy_type_from_id(hw->phy.id);
@@ -78,26 +76,6 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
}
/**
- * ixgbe_validate_phy_addr - Determines phy address is valid
- * @hw: pointer to hardware structure
- *
- **/
-static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr)
-{
- u16 phy_id = 0;
- bool valid = false;
-
- hw->phy.addr = phy_addr;
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id);
-
- if (phy_id != 0xFFFF && phy_id != 0x0)
- valid = true;
-
- return valid;
-}
-
-/**
* ixgbe_get_phy_id - Get the phy type
* @hw: pointer to hardware structure
*
@@ -108,14 +86,12 @@ static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
u16 phy_id_high = 0;
u16 phy_id_low = 0;
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD,
&phy_id_high);
if (status == 0) {
hw->phy.id = (u32)(phy_id_high << 16);
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD,
&phy_id_low);
hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
@@ -160,9 +136,8 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
* Perform soft PHY reset to the PHY_XS.
* This will cause a soft reset to the PHY
*/
- return hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
- IXGBE_MDIO_PHY_XS_DEV_TYPE,
- IXGBE_MDIO_PHY_XS_RESET);
+ return hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
+ MDIO_CTRL1_RESET);
}
/**
@@ -192,7 +167,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
/* Setup and write the address cycle command */
command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
(device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
- (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
(IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
@@ -223,7 +198,8 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
*/
command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
(device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
- (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (hw->phy.mdio.prtad <<
+ IXGBE_MSCA_PHY_ADDR_SHIFT) |
(IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
@@ -292,7 +268,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
/* Setup and write the address cycle command */
command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
(device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
- (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
(IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
@@ -323,7 +299,8 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
*/
command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
(device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
- (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (hw->phy.mdio.prtad <<
+ IXGBE_MSCA_PHY_ADDR_SHIFT) |
(IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
@@ -365,7 +342,7 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
s32 status = IXGBE_NOT_IMPLEMENTED;
u32 time_out;
u32 max_time_out = 10;
- u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
+ u16 autoneg_reg;
/*
* Set advertisement settings in PHY based on autoneg_advertised
@@ -373,36 +350,31 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
* tnx devices cannot be "forced" to a autoneg 10G and fail. But can
* for a 1G.
*/
- hw->phy.ops.read_reg(hw, IXGBE_MII_SPEED_SELECTION_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
+ hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, &autoneg_reg);
if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL)
- autoneg_reg &= 0xEFFF; /* 0 in bit 12 is 1G operation */
+ autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
else
- autoneg_reg |= 0x1000; /* 1 in bit 12 is 10G/1G operation */
+ autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
- hw->phy.ops.write_reg(hw, IXGBE_MII_SPEED_SELECTION_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
+ hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, autoneg_reg);
/* Restart PHY autonegotiation and wait for completion */
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
+ hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_AN, &autoneg_reg);
- autoneg_reg |= IXGBE_MII_RESTART;
+ autoneg_reg |= MDIO_AN_CTRL1_RESTART;
- hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
+ hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_AN, autoneg_reg);
/* Wait for autonegotiation to finish */
for (time_out = 0; time_out < max_time_out; time_out++) {
udelay(10);
/* Restart PHY autonegotiation and wait for completion */
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ status = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN,
&autoneg_reg);
- autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE;
- if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) {
+ autoneg_reg &= MDIO_AN_STAT1_COMPLETE;
+ if (autoneg_reg == MDIO_AN_STAT1_COMPLETE) {
status = 0;
break;
}
@@ -457,23 +429,21 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
s32 ret_val = 0;
u32 i;
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
- IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
+ hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data);
/* reset the PHY and poll for completion */
- hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
- IXGBE_MDIO_PHY_XS_DEV_TYPE,
- (phy_data | IXGBE_MDIO_PHY_XS_RESET));
+ hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
+ (phy_data | MDIO_CTRL1_RESET));
for (i = 0; i < 100; i++) {
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
- IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
- if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) == 0)
+ hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
+ &phy_data);
+ if ((phy_data & MDIO_CTRL1_RESET) == 0)
break;
msleep(10);
}
- if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) != 0) {
+ if ((phy_data & MDIO_CTRL1_RESET) != 0) {
hw_dbg(hw, "PHY reset did not complete.\n");
ret_val = IXGBE_ERR_PHY;
goto out;
@@ -509,7 +479,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
for (i = 0; i < edata; i++) {
hw->eeprom.ops.read(hw, data_offset, &eword);
hw->phy.ops.write_reg(hw, phy_offset,
- IXGBE_TWINAX_DEV, eword);
+ MDIO_MMD_PMAPMD, eword);
hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword,
phy_offset);
data_offset++;
@@ -552,18 +522,30 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
{
s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
u32 vendor_oui = 0;
+ enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
u8 identifier = 0;
u8 comp_codes_1g = 0;
u8 comp_codes_10g = 0;
u8 oui_bytes[3] = {0, 0, 0};
- u8 transmission_media = 0;
+ u8 cable_tech = 0;
u16 enforce_sfp = 0;
+ if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ status = IXGBE_ERR_SFP_NOT_PRESENT;
+ goto out;
+ }
+
status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
&identifier);
- if (status == IXGBE_ERR_SFP_NOT_PRESENT) {
+ if (status == IXGBE_ERR_SFP_NOT_PRESENT || status == IXGBE_ERR_I2C) {
+ status = IXGBE_ERR_SFP_NOT_PRESENT;
hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ if (hw->phy.type != ixgbe_phy_nl) {
+ hw->phy.id = 0;
+ hw->phy.type = ixgbe_phy_unknown;
+ }
goto out;
}
@@ -572,8 +554,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
&comp_codes_1g);
hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_10GBE_COMP_CODES,
&comp_codes_10g);
- hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_TRANSMISSION_MEDIA,
- &transmission_media);
+ hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_CABLE_TECHNOLOGY,
+ &cable_tech);
/* ID Module
* =========
@@ -586,7 +568,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
* 6 SFP_SR/LR_CORE1 - 82599-specific
*/
if (hw->mac.type == ixgbe_mac_82598EB) {
- if (transmission_media & IXGBE_SFF_TWIN_AX_CAPABLE)
+ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
hw->phy.sfp_type = ixgbe_sfp_type_sr;
@@ -595,7 +577,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
else
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
} else if (hw->mac.type == ixgbe_mac_82599EB) {
- if (transmission_media & IXGBE_SFF_TWIN_AX_CAPABLE)
+ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
ixgbe_sfp_type_da_cu_core0;
@@ -620,8 +602,19 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
}
+ if (hw->phy.sfp_type != stored_sfp_type)
+ hw->phy.sfp_setup_needed = true;
+
+ /* Determine if the SFP+ PHY is dual speed or not. */
+ hw->phy.multispeed_fiber = false;
+ if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
+ (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
+ ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
+ (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
+ hw->phy.multispeed_fiber = true;
+
/* Determine PHY vendor */
- if (hw->phy.type == ixgbe_phy_unknown) {
+ if (hw->phy.type != ixgbe_phy_nl) {
hw->phy.id = identifier;
hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_VENDOR_OUI_BYTE0,
@@ -640,8 +633,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
switch (vendor_oui) {
case IXGBE_SFF_VENDOR_OUI_TYCO:
- if (transmission_media &
- IXGBE_SFF_TWIN_AX_CAPABLE)
+ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
hw->phy.type = ixgbe_phy_tw_tyco;
break;
case IXGBE_SFF_VENDOR_OUI_FTL:
@@ -654,31 +646,42 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.type = ixgbe_phy_sfp_intel;
break;
default:
- if (transmission_media &
- IXGBE_SFF_TWIN_AX_CAPABLE)
+ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
hw->phy.type = ixgbe_phy_tw_unknown;
else
hw->phy.type = ixgbe_phy_sfp_unknown;
break;
}
}
- if (hw->mac.type == ixgbe_mac_82598EB ||
- (hw->phy.sfp_type != ixgbe_sfp_type_sr &&
- hw->phy.sfp_type != ixgbe_sfp_type_lr &&
- hw->phy.sfp_type != ixgbe_sfp_type_srlr_core0 &&
- hw->phy.sfp_type != ixgbe_sfp_type_srlr_core1)) {
+
+ /* All passive DA cables are supported */
+ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
+ status = 0;
+ goto out;
+ }
+
+ /* 1G SFP modules are not supported */
+ if (comp_codes_10g == 0) {
+ hw->phy.type = ixgbe_phy_sfp_unsupported;
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ goto out;
+ }
+
+ /* Anything else 82598-based is supported */
+ if (hw->mac.type == ixgbe_mac_82598EB) {
status = 0;
goto out;
}
- hw->eeprom.ops.read(hw, IXGBE_PHY_ENFORCE_INTEL_SFP_OFFSET,
- &enforce_sfp);
- if (!(enforce_sfp & IXGBE_PHY_ALLOW_ANY_SFP)) {
+ /* This is guaranteed to be 82599, no need to check for NULL */
+ hw->mac.ops.get_device_caps(hw, &enforce_sfp);
+ if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) {
/* Make sure we're a supported PHY type */
if (hw->phy.type == ixgbe_phy_sfp_intel) {
status = 0;
} else {
hw_dbg(hw, "SFP+ module not supported\n");
+ hw->phy.type = ixgbe_phy_sfp_unsupported;
status = IXGBE_ERR_SFP_NOT_SUPPORTED;
}
} else {
@@ -1279,7 +1282,7 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
udelay(10);
status = hw->phy.ops.read_reg(hw,
IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ MDIO_MMD_VEND1,
&phy_data);
phy_link = phy_data &
IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
@@ -1307,8 +1310,7 @@ s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
{
s32 status = 0;
- status = hw->phy.ops.read_reg(hw, TNX_FW_REV,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ status = hw->phy.ops.read_reg(hw, TNX_FW_REV, MDIO_MMD_VEND1,
firmware_version);
return status;
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index cc5f1b3287e..9b700f5bf1e 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -39,11 +39,12 @@
#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27
#define IXGBE_SFF_1GBE_COMP_CODES 0x6
#define IXGBE_SFF_10GBE_COMP_CODES 0x3
-#define IXGBE_SFF_TRANSMISSION_MEDIA 0x9
+#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
/* Bitmasks */
-#define IXGBE_SFF_TWIN_AX_CAPABLE 0x80
+#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
+#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
#define IXGBE_I2C_EEPROM_READ_MASK 0x100
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 030ff0a9ea6..fa87309dc08 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -29,6 +29,8 @@
#define _IXGBE_TYPE_H_
#include <linux/types.h>
+#include <linux/mdio.h>
+#include <linux/list.h>
/* Vendor ID */
#define IXGBE_INTEL_VENDOR_ID 0x8086
@@ -45,9 +47,9 @@
#define IXGBE_DEV_ID_82598_DA_DUAL_PORT 0x10F1
#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1
#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4
-#define IXGBE_DEV_ID_82599 0x10D8
#define IXGBE_DEV_ID_82599_KX4 0x10F7
#define IXGBE_DEV_ID_82599_SFP 0x10FB
+#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
/* General Registers */
#define IXGBE_CTRL 0x00000
@@ -229,6 +231,34 @@
#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */
#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */
+/* Flow Director registers */
+#define IXGBE_FDIRCTRL 0x0EE00
+#define IXGBE_FDIRHKEY 0x0EE68
+#define IXGBE_FDIRSKEY 0x0EE6C
+#define IXGBE_FDIRDIP4M 0x0EE3C
+#define IXGBE_FDIRSIP4M 0x0EE40
+#define IXGBE_FDIRTCPM 0x0EE44
+#define IXGBE_FDIRUDPM 0x0EE48
+#define IXGBE_FDIRIP6M 0x0EE74
+#define IXGBE_FDIRM 0x0EE70
+
+/* Flow Director Stats registers */
+#define IXGBE_FDIRFREE 0x0EE38
+#define IXGBE_FDIRLEN 0x0EE4C
+#define IXGBE_FDIRUSTAT 0x0EE50
+#define IXGBE_FDIRFSTAT 0x0EE54
+#define IXGBE_FDIRMATCH 0x0EE58
+#define IXGBE_FDIRMISS 0x0EE5C
+
+/* Flow Director Programming registers */
+#define IXGBE_FDIRSIPv6(_i) (0x0EE0C + ((_i) * 4)) /* 3 of these (0-2) */
+#define IXGBE_FDIRIPSA 0x0EE18
+#define IXGBE_FDIRIPDA 0x0EE1C
+#define IXGBE_FDIRPORT 0x0EE20
+#define IXGBE_FDIRVLAN 0x0EE24
+#define IXGBE_FDIRHASH 0x0EE28
+#define IXGBE_FDIRCMD 0x0EE2C
+
/* Transmit DMA registers */
#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of these (0-31)*/
#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40))
@@ -443,6 +473,21 @@
#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4
+/* HW RSC registers */
+#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
+ (0x0D02C + ((_i - 64) * 0x40)))
+#define IXGBE_RSCDBU 0x03028
+#define IXGBE_RSCCTL_RSCEN 0x01
+#define IXGBE_RSCCTL_MAXDESC_1 0x00
+#define IXGBE_RSCCTL_MAXDESC_4 0x04
+#define IXGBE_RSCCTL_MAXDESC_8 0x08
+#define IXGBE_RSCCTL_MAXDESC_16 0x0C
+#define IXGBE_RXDADV_RSCCNT_SHIFT 17
+#define IXGBE_GPIE_RSC_DELAY_SHIFT 11
+#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000
+#define IXGBE_RSCDBU_RSCACKDIS 0x00000080
+#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000
+
/* DCB registers */
#define IXGBE_RTRPCS 0x02430
#define IXGBE_RTTDCS 0x04900
@@ -462,6 +507,63 @@
#define IXGBE_RTTDTECC_NO_BCN 0x00000100
#define IXGBE_RTTBCNRC 0x04984
+/* FCoE registers */
+#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */
+#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */
+#define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */
+#define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */
+#define IXGBE_FCINVST0 0x03FC0 /* FC Invalid DMA Context Status Reg 0 */
+#define IXGBE_FCINVST(_i) (IXGBE_FCINVST0 + ((_i) * 4))
+#define IXGBE_FCBUFF_VALID (1 << 0) /* DMA Context Valid */
+#define IXGBE_FCBUFF_BUFFSIZE (3 << 3) /* User Buffer Size */
+#define IXGBE_FCBUFF_WRCONTX (1 << 7) /* 0: Initiator, 1: Target */
+#define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */
+#define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */
+#define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3
+#define IXGBE_FCBUFF_BUFFCNT_SHIFT 8
+#define IXGBE_FCBUFF_OFFSET_SHIFT 16
+#define IXGBE_FCDMARW_WE (1 << 14) /* Write enable */
+#define IXGBE_FCDMARW_RE (1 << 15) /* Read enable */
+#define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */
+#define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */
+#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16
+
+/* FCoE SOF/EOF */
+#define IXGBE_TEOFF 0x04A94 /* Tx FC EOF */
+#define IXGBE_TSOFF 0x04A98 /* Tx FC SOF */
+#define IXGBE_REOFF 0x05158 /* Rx FC EOF */
+#define IXGBE_RSOFF 0x051F8 /* Rx FC SOF */
+/* FCoE Filter Context Registers */
+#define IXGBE_FCFLT 0x05108 /* FC FLT Context */
+#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */
+#define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */
+#define IXGBE_FCFLT_VALID (1 << 0) /* Filter Context Valid */
+#define IXGBE_FCFLT_FIRST (1 << 1) /* Filter First */
+#define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */
+#define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */
+#define IXGBE_FCFLTRW_RVALDT (1 << 13) /* Fast Re-Validation */
+#define IXGBE_FCFLTRW_WE (1 << 14) /* Write Enable */
+#define IXGBE_FCFLTRW_RE (1 << 15) /* Read Enable */
+/* FCoE Receive Control */
+#define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */
+#define IXGBE_FCRXCTRL_FCOELLI (1 << 0) /* Low latency interrupt */
+#define IXGBE_FCRXCTRL_SAVBAD (1 << 1) /* Save Bad Frames */
+#define IXGBE_FCRXCTRL_FRSTRDH (1 << 2) /* EN 1st Read Header */
+#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3) /* EN Last Header in Seq */
+#define IXGBE_FCRXCTRL_ALLH (1 << 4) /* EN All Headers */
+#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5) /* EN 1st Seq. Header */
+#define IXGBE_FCRXCTRL_ICRC (1 << 6) /* Ignore Bad FC CRC */
+#define IXGBE_FCRXCTRL_FCCRCBO (1 << 7) /* FC CRC Byte Ordering */
+#define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */
+#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8
+/* FCoE Redirection */
+#define IXGBE_FCRECTL 0x0ED00 /* FC Redirection Control */
+#define IXGBE_FCRETA0 0x0ED10 /* FC Redirection Table 0 */
+#define IXGBE_FCRETA(_i) (IXGBE_FCRETA0 + ((_i) * 4)) /* FCoE Redir */
+#define IXGBE_FCRECTL_ENA 0x1 /* FCoE Redir Table Enable */
+#define IXGBE_FCRETA_SIZE 8 /* Max entries in FCRETA */
+#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */
+
/* Stats registers */
#define IXGBE_CRCERRS 0x04000
#define IXGBE_ILLERRC 0x04004
@@ -533,6 +635,13 @@
#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */
#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */
#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */
+#define IXGBE_FCCRC 0x05118 /* Count of Good Eth CRC w/ Bad FC CRC */
+#define IXGBE_FCOERPDC 0x0241C /* FCoE Rx Packets Dropped Count */
+#define IXGBE_FCLAST 0x02424 /* FCoE Last Error Count */
+#define IXGBE_FCOEPRC 0x02428 /* Number of FCoE Packets Received */
+#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */
+#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */
+#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */
/* Management */
#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
@@ -833,13 +942,7 @@
/* Omer bit masks */
#define IXGBE_CORECTL_WRITE_CMD 0x00010000
-/* Device Type definitions for new protocol MDIO commands */
-#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1
-#define IXGBE_MDIO_PCS_DEV_TYPE 0x3
-#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4
-#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7
-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */
-#define IXGBE_TWINAX_DEV 1
+/* MDIO definitions */
#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */
@@ -850,31 +953,10 @@
#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018
#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010
-#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */
-#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */
-#define IXGBE_MDIO_PHY_XS_CONTROL 0x0 /* PHY_XS Control Reg */
-#define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */
-#define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/
-#define IXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/
-#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */
-#define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */
-#define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */
-#define IXGBE_MDIO_PHY_EXT_ABILITY 0xB /* Ext Ability Reg */
-#define IXGBE_MDIO_PHY_10GBASET_ABILITY 0x0004 /* 10GBaseT capable */
-#define IXGBE_MDIO_PHY_1000BASET_ABILITY 0x0020 /* 1000BaseT capable */
-
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */
-/* MII clause 22/28 definitions */
-#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800
-
-#define IXGBE_MII_SPEED_SELECTION_REG 0x10
-#define IXGBE_MII_RESTART 0x200
-#define IXGBE_MII_AUTONEG_COMPLETE 0x20
-#define IXGBE_MII_AUTONEG_REG 0x0
-
#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0
#define IXGBE_MAX_PHY_ADDR 32
@@ -898,8 +980,6 @@
#define IXGBE_CONTROL_NL 0x000F
#define IXGBE_CONTROL_EOL_NL 0x0FFF
#define IXGBE_CONTROL_SOL_NL 0x0000
-#define IXGBE_PHY_ENFORCE_INTEL_SFP_OFFSET 0x002C
-#define IXGBE_PHY_ALLOW_ANY_SFP 0x1
/* General purpose Interrupt Enable */
#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */
@@ -958,6 +1038,8 @@
#define IXGBE_VT_CTL_DIS_DEFPL 0x20000000 /* disable default pool */
#define IXGBE_VT_CTL_REPLEN 0x40000000 /* replication enabled */
#define IXGBE_VT_CTL_VT_ENABLE 0x00000001 /* Enable VT Mode */
+#define IXGBE_VT_CTL_POOL_SHIFT 7
+#define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT)
/* VMOLR bitmasks */
#define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */
@@ -1148,6 +1230,7 @@
/* Interrupt Vector Allocation Registers */
#define IXGBE_IVAR_REG_NUM 25
+#define IXGBE_IVAR_REG_NUM_82599 64
#define IXGBE_IVAR_TXRX_ENTRY 96
#define IXGBE_IVAR_RX_ENTRY 64
#define IXGBE_IVAR_RX_QUEUE(_i) (0 + (_i))
@@ -1163,6 +1246,7 @@
/* ETYPE Queue Filter/Select Bit Masks */
#define IXGBE_MAX_ETQF_FILTERS 8
+#define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */
#define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */
#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */
#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */
@@ -1185,6 +1269,7 @@
*/
#define IXGBE_ETQF_FILTER_EAPOL 0
#define IXGBE_ETQF_FILTER_BCN 1
+#define IXGBE_ETQF_FILTER_FCOE 2
#define IXGBE_ETQF_FILTER_1588 3
/* VLAN Control Bit Masks */
#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */
@@ -1208,8 +1293,10 @@
#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */
/* ESDP Bit Masks */
-#define IXGBE_ESDP_SDP0 0x00000001
-#define IXGBE_ESDP_SDP1 0x00000002
+#define IXGBE_ESDP_SDP0 0x00000001 /* SDP0 Data Value */
+#define IXGBE_ESDP_SDP1 0x00000002 /* SDP1 Data Value */
+#define IXGBE_ESDP_SDP2 0x00000004 /* SDP2 Data Value */
+#define IXGBE_ESDP_SDP3 0x00000008 /* SDP3 Data Value */
#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */
#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */
#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */
@@ -1309,8 +1396,6 @@
#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */
#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */
-#define FIBER_LINK_UP_LIMIT 50
-
/* PCS1GLSTA Bit Masks */
#define IXGBE_PCS1GLSTA_LINK_OK 1
#define IXGBE_PCS1GLSTA_SYNK_OK 0x10
@@ -1382,6 +1467,8 @@
#define IXGBE_FW_PTR 0x0F
#define IXGBE_PBANUM0_PTR 0x15
#define IXGBE_PBANUM1_PTR 0x16
+#define IXGBE_DEVICE_CAPS 0x2C
+#define IXGBE_SAN_MAC_ADDR_PTR 0x28
#define IXGBE_PCIE_MSIX_82599_CAPS 0x72
#define IXGBE_PCIE_MSIX_82598_CAPS 0x62
@@ -1425,6 +1512,13 @@
#define IXGBE_EERD_ATTEMPTS 100000
#endif
+#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0
+#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3
+#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1
+#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2
+#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4
+#define IXGBE_FW_PATCH_VERSION_4 0x7
+
/* PCI Bus Info */
#define IXGBE_PCI_LINK_STATUS 0xB2
#define IXGBE_PCI_LINK_WIDTH 0x3F0
@@ -1553,7 +1647,8 @@
#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */
#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */
#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */
-#define IXGBE_MTQC_64VF 0x8 /* 2 TX Queues per pool w/64VF's */
+#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */
+#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */
#define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */
/* Receive Descriptor bit definitions */
@@ -1585,6 +1680,11 @@
#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */
#define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */
#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */
+#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCoEFe/IPE */
+#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */
+#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */
+#define IXGBE_RXDADV_ERR_FDIR_DROP 0x00200000 /* FDIR Drop error */
+#define IXGBE_RXDADV_ERR_FDIR_COLL 0x00400000 /* FDIR Collision error */
#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */
#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */
#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */
@@ -1604,12 +1704,19 @@
#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */
#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */
#define IXGBE_RXDADV_STAT_MASK 0x000fffff /* Stat/NEXTP: bit 0-19 */
+#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */
+#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */
+#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
+#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */
/* PSRTYPE bit definitions */
#define IXGBE_PSRTYPE_TCPHDR 0x00000010
#define IXGBE_PSRTYPE_UDPHDR 0x00000020
#define IXGBE_PSRTYPE_IPV4HDR 0x00000100
#define IXGBE_PSRTYPE_IPV6HDR 0x00000200
+#define IXGBE_PSRTYPE_L2HDR 0x00001000
/* SRRCTL bit definitions */
#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */
@@ -1710,6 +1817,82 @@
#endif
+enum ixgbe_fdir_pballoc_type {
+ IXGBE_FDIR_PBALLOC_64K = 0,
+ IXGBE_FDIR_PBALLOC_128K,
+ IXGBE_FDIR_PBALLOC_256K,
+};
+#define IXGBE_FDIR_PBALLOC_SIZE_SHIFT 16
+
+/* Flow Director register values */
+#define IXGBE_FDIRCTRL_PBALLOC_64K 0x00000001
+#define IXGBE_FDIRCTRL_PBALLOC_128K 0x00000002
+#define IXGBE_FDIRCTRL_PBALLOC_256K 0x00000003
+#define IXGBE_FDIRCTRL_INIT_DONE 0x00000008
+#define IXGBE_FDIRCTRL_PERFECT_MATCH 0x00000010
+#define IXGBE_FDIRCTRL_REPORT_STATUS 0x00000020
+#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080
+#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8
+#define IXGBE_FDIRCTRL_FLEX_SHIFT 16
+#define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000
+#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24
+#define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000
+#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT 28
+
+#define IXGBE_FDIRTCPM_DPORTM_SHIFT 16
+#define IXGBE_FDIRUDPM_DPORTM_SHIFT 16
+#define IXGBE_FDIRIP6M_DIPM_SHIFT 16
+#define IXGBE_FDIRM_VLANID 0x00000001
+#define IXGBE_FDIRM_VLANP 0x00000002
+#define IXGBE_FDIRM_POOL 0x00000004
+#define IXGBE_FDIRM_L3P 0x00000008
+#define IXGBE_FDIRM_L4P 0x00000010
+#define IXGBE_FDIRM_FLEX 0x00000020
+#define IXGBE_FDIRM_DIPv6 0x00000040
+
+#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF
+#define IXGBE_FDIRFREE_FREE_SHIFT 0
+#define IXGBE_FDIRFREE_COLL_MASK 0x7FFF0000
+#define IXGBE_FDIRFREE_COLL_SHIFT 16
+#define IXGBE_FDIRLEN_MAXLEN_MASK 0x3F
+#define IXGBE_FDIRLEN_MAXLEN_SHIFT 0
+#define IXGBE_FDIRLEN_MAXHASH_MASK 0x7FFF0000
+#define IXGBE_FDIRLEN_MAXHASH_SHIFT 16
+#define IXGBE_FDIRUSTAT_ADD_MASK 0xFFFF
+#define IXGBE_FDIRUSTAT_ADD_SHIFT 0
+#define IXGBE_FDIRUSTAT_REMOVE_MASK 0xFFFF0000
+#define IXGBE_FDIRUSTAT_REMOVE_SHIFT 16
+#define IXGBE_FDIRFSTAT_FADD_MASK 0x00FF
+#define IXGBE_FDIRFSTAT_FADD_SHIFT 0
+#define IXGBE_FDIRFSTAT_FREMOVE_MASK 0xFF00
+#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT 8
+#define IXGBE_FDIRPORT_DESTINATION_SHIFT 16
+#define IXGBE_FDIRVLAN_FLEX_SHIFT 16
+#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT 15
+#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT 16
+
+#define IXGBE_FDIRCMD_CMD_MASK 0x00000003
+#define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001
+#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002
+#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003
+#define IXGBE_FDIRCMD_CMD_QUERY_REM_HASH 0x00000007
+#define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008
+#define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010
+#define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020
+#define IXGBE_FDIRCMD_L4TYPE_TCP 0x00000040
+#define IXGBE_FDIRCMD_L4TYPE_SCTP 0x00000060
+#define IXGBE_FDIRCMD_IPV6 0x00000080
+#define IXGBE_FDIRCMD_CLEARHT 0x00000100
+#define IXGBE_FDIRCMD_DROP 0x00000200
+#define IXGBE_FDIRCMD_INT 0x00000400
+#define IXGBE_FDIRCMD_LAST 0x00000800
+#define IXGBE_FDIRCMD_COLLISION 0x00001000
+#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000
+#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16
+#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24
+#define IXGBE_FDIR_INIT_DONE_POLL 10
+#define IXGBE_FDIRCMD_CMD_POLL 10
+
/* Transmit Descriptor - Legacy */
struct ixgbe_legacy_tx_desc {
u64 buffer_addr; /* Address of the descriptor's data buffer */
@@ -1836,6 +2019,16 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */
#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */
+#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */
+#define IXGBE_ADVTXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */
+#define IXGBE_ADVTXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */
+#define IXGBE_ADVTXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */
+#define IXGBE_ADVTXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation: End */
+#define IXGBE_ADVTXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation: Start */
+#define IXGBE_ADVTXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */
+#define IXGBE_ADVTXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */
+#define IXGBE_ADVTXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */
+#define IXGBE_ADVTXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */
#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
@@ -1861,7 +2054,7 @@ typedef u32 ixgbe_physical_layer;
#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0
#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x0001
#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x0002
-#define IXGBE_PHYSICAL_LAYER_100BASE_T 0x0004
+#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x0004
#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008
#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x0010
#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x0020
@@ -1870,6 +2063,47 @@ typedef u32 ixgbe_physical_layer;
#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x0100
#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200
#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800
+#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
+
+/* Software ATR hash keys */
+#define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D
+#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x14364D17
+
+/* Software ATR input stream offsets and masks */
+#define IXGBE_ATR_VLAN_OFFSET 0
+#define IXGBE_ATR_SRC_IPV6_OFFSET 2
+#define IXGBE_ATR_SRC_IPV4_OFFSET 14
+#define IXGBE_ATR_DST_IPV6_OFFSET 18
+#define IXGBE_ATR_DST_IPV4_OFFSET 30
+#define IXGBE_ATR_SRC_PORT_OFFSET 34
+#define IXGBE_ATR_DST_PORT_OFFSET 36
+#define IXGBE_ATR_FLEX_BYTE_OFFSET 38
+#define IXGBE_ATR_VM_POOL_OFFSET 40
+#define IXGBE_ATR_L4TYPE_OFFSET 41
+
+#define IXGBE_ATR_L4TYPE_MASK 0x3
+#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
+#define IXGBE_ATR_L4TYPE_UDP 0x1
+#define IXGBE_ATR_L4TYPE_TCP 0x2
+#define IXGBE_ATR_L4TYPE_SCTP 0x3
+#define IXGBE_ATR_HASH_MASK 0x7fff
+
+/* Flow Director ATR input struct. */
+struct ixgbe_atr_input {
+ /* Byte layout in order, all values with MSB first:
+ *
+ * vlan_id - 2 bytes
+ * src_ip - 16 bytes
+ * dst_ip - 16 bytes
+ * src_port - 2 bytes
+ * dst_port - 2 bytes
+ * flex_bytes - 2 bytes
+ * vm_pool - 1 byte
+ * l4type - 1 byte
+ */
+ u8 byte_stream[42];
+};
enum ixgbe_eeprom_type {
ixgbe_eeprom_uninitialized = 0,
@@ -1897,6 +2131,7 @@ enum ixgbe_phy_type {
ixgbe_phy_sfp_ftl,
ixgbe_phy_sfp_unknown,
ixgbe_phy_sfp_intel,
+ ixgbe_phy_sfp_unsupported,
ixgbe_phy_generic
};
@@ -2005,7 +2240,8 @@ struct ixgbe_fc_info {
u16 pause_time; /* Flow Control Pause timer */
bool send_xon; /* Flow control send XON */
bool strict_ieee; /* Strict IEEE mode */
- bool disable_fc_autoneg; /* Turn off autoneg FC mode */
+ bool disable_fc_autoneg; /* Do not autonegotiate FC */
+ bool fc_was_autonegged; /* Is current_mode the result of autonegging? */
enum ixgbe_fc_mode current_mode; /* FC mode in effect */
enum ixgbe_fc_mode requested_mode; /* FC mode requested by caller */
};
@@ -2075,6 +2311,12 @@ struct ixgbe_hw_stats {
u64 fdirfstat_fremove;
u64 fdirmatch;
u64 fdirmiss;
+ u64 fccrc;
+ u64 fcoerpdc;
+ u64 fcoeprc;
+ u64 fcoeptc;
+ u64 fcoedwrc;
+ u64 fcoedwtc;
};
/* forward declaration */
@@ -2101,6 +2343,8 @@ struct ixgbe_mac_operations {
enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
+ s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
+ s32 (*get_device_caps)(struct ixgbe_hw *, u16 *);
s32 (*stop_adapter)(struct ixgbe_hw *);
s32 (*get_bus_info)(struct ixgbe_hw *);
void (*set_lan_id)(struct ixgbe_hw *);
@@ -2129,8 +2373,7 @@ struct ixgbe_mac_operations {
s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
s32 (*init_rx_addrs)(struct ixgbe_hw *);
- s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32,
- ixgbe_mc_addr_itr);
+ s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct list_head *);
s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
ixgbe_mc_addr_itr);
s32 (*enable_mc)(struct ixgbe_hw *);
@@ -2140,12 +2383,13 @@ struct ixgbe_mac_operations {
s32 (*init_uta_tables)(struct ixgbe_hw *);
/* Flow Control */
- s32 (*setup_fc)(struct ixgbe_hw *, s32);
+ s32 (*fc_enable)(struct ixgbe_hw *, s32);
};
struct ixgbe_phy_operations {
s32 (*identify)(struct ixgbe_hw *);
s32 (*identify_sfp)(struct ixgbe_hw *);
+ s32 (*init)(struct ixgbe_hw *);
s32 (*reset)(struct ixgbe_hw *);
s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
@@ -2173,6 +2417,7 @@ struct ixgbe_mac_info {
enum ixgbe_mac_type type;
u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+ u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
s32 mc_filter_type;
u32 mcft_size;
u32 vft_size;
@@ -2185,14 +2430,16 @@ struct ixgbe_mac_info {
bool orig_link_settings_stored;
bool autoneg;
bool autoneg_succeeded;
+ bool autotry_restart;
};
struct ixgbe_phy_info {
struct ixgbe_phy_operations ops;
+ struct mdio_if_info mdio;
enum ixgbe_phy_type type;
- u32 addr;
u32 id;
enum ixgbe_sfp_type sfp_type;
+ bool sfp_setup_needed;
u32 revision;
enum ixgbe_media_type media_type;
bool reset_disable;
@@ -2249,6 +2496,8 @@ struct ixgbe_info {
#define IXGBE_ERR_SFP_NOT_SUPPORTED -19
#define IXGBE_ERR_SFP_NOT_PRESENT -20
#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21
+#define IXGBE_ERR_FDIR_REINIT_FAILED -23
+#define IXGBE_ERR_EEPROM_VERSION -24
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index d3bf2f017cc..2a0174b62e9 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -270,6 +270,18 @@ static int ixpdev_close(struct net_device *dev)
return 0;
}
+static const struct net_device_ops ixpdev_netdev_ops = {
+ .ndo_open = ixpdev_open,
+ .ndo_stop = ixpdev_close,
+ .ndo_start_xmit = ixpdev_xmit,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ixpdev_poll_controller,
+#endif
+};
+
struct net_device *ixpdev_alloc(int channel, int sizeof_priv)
{
struct net_device *dev;
@@ -279,12 +291,7 @@ struct net_device *ixpdev_alloc(int channel, int sizeof_priv)
if (dev == NULL)
return NULL;
- dev->hard_start_xmit = ixpdev_xmit;
- dev->open = ixpdev_open;
- dev->stop = ixpdev_close;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = ixpdev_poll_controller;
-#endif
+ dev->netdev_ops = &ixpdev_netdev_ops;
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
diff --git a/drivers/net/jazzsonic.c b/drivers/net/jazzsonic.c
index 14248cfc3df..d12106b47bf 100644
--- a/drivers/net/jazzsonic.c
+++ b/drivers/net/jazzsonic.c
@@ -96,6 +96,18 @@ static int jazzsonic_close(struct net_device* dev)
return err;
}
+static const struct net_device_ops sonic_netdev_ops = {
+ .ndo_open = jazzsonic_open,
+ .ndo_stop = jazzsonic_close,
+ .ndo_start_xmit = sonic_send_packet,
+ .ndo_get_stats = sonic_get_stats,
+ .ndo_set_multicast_list = sonic_multicast_list,
+ .ndo_tx_timeout = sonic_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
static int __init sonic_probe1(struct net_device *dev)
{
static unsigned version_printed;
@@ -179,12 +191,7 @@ static int __init sonic_probe1(struct net_device *dev)
lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
* SONIC_BUS_SCALE(lp->dma_bitmode));
- dev->open = jazzsonic_open;
- dev->stop = jazzsonic_close;
- dev->hard_start_xmit = sonic_send_packet;
- dev->get_stats = sonic_get_stats;
- dev->set_multicast_list = &sonic_multicast_list;
- dev->tx_timeout = sonic_tx_timeout;
+ dev->netdev_ops = &sonic_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
/*
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index 621a7c0c46b..1e3c63d67b9 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -1939,7 +1939,6 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
TXCS_SELECT_QUEUE0 |
TXCS_QUEUE0S |
TXCS_ENABLE);
- netdev->trans_start = jiffies;
tx_dbg(jme, "xmit: %d+%d@%lu\n", idx,
skb_shinfo(skb)->nr_frags + 2,
diff --git a/drivers/net/korina.c b/drivers/net/korina.c
index 38d6649a29c..b4cf602c32b 100644
--- a/drivers/net/korina.c
+++ b/drivers/net/korina.c
@@ -133,6 +133,7 @@ struct korina_private {
int dma_halt_cnt;
int dma_run_cnt;
struct napi_struct napi;
+ struct timer_list media_check_timer;
struct mii_if_info mii_if;
struct net_device *dev;
int phy_addr;
@@ -664,6 +665,15 @@ static void korina_check_media(struct net_device *dev, unsigned int init_media)
&lp->eth_regs->ethmac2);
}
+static void korina_poll_media(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct korina_private *lp = netdev_priv(dev);
+
+ korina_check_media(dev, 0);
+ mod_timer(&lp->media_check_timer, jiffies + HZ);
+}
+
static void korina_set_carrier(struct mii_if_info *mii)
{
if (mii->force_media) {
@@ -1034,6 +1044,7 @@ static int korina_open(struct net_device *dev)
dev->name, lp->und_irq);
goto err_free_ovr_irq;
}
+ mod_timer(&lp->media_check_timer, jiffies + 1);
out:
return ret;
@@ -1053,6 +1064,8 @@ static int korina_close(struct net_device *dev)
struct korina_private *lp = netdev_priv(dev);
u32 tmp;
+ del_timer(&lp->media_check_timer);
+
/* Disable interrupts */
disable_irq(lp->rx_irq);
disable_irq(lp->tx_irq);
@@ -1081,6 +1094,21 @@ static int korina_close(struct net_device *dev)
return 0;
}
+static const struct net_device_ops korina_netdev_ops = {
+ .ndo_open = korina_open,
+ .ndo_stop = korina_close,
+ .ndo_start_xmit = korina_send_packet,
+ .ndo_set_multicast_list = korina_multicast_list,
+ .ndo_tx_timeout = korina_tx_timeout,
+ .ndo_do_ioctl = korina_ioctl,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = korina_poll_controller,
+#endif
+};
+
static int korina_probe(struct platform_device *pdev)
{
struct korina_device *bif = platform_get_drvdata(pdev);
@@ -1149,17 +1177,9 @@ static int korina_probe(struct platform_device *pdev)
dev->irq = lp->rx_irq;
lp->dev = dev;
- dev->open = korina_open;
- dev->stop = korina_close;
- dev->hard_start_xmit = korina_send_packet;
- dev->set_multicast_list = &korina_multicast_list;
+ dev->netdev_ops = &korina_netdev_ops;
dev->ethtool_ops = &netdev_ethtool_ops;
- dev->tx_timeout = korina_tx_timeout;
dev->watchdog_timeo = TX_TIMEOUT;
- dev->do_ioctl = &korina_ioctl;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = korina_poll_controller;
-#endif
netif_napi_add(dev, &lp->napi, korina_poll, 64);
lp->phy_addr = (((lp->rx_irq == 0x2c? 1:0) << 8) | 0x05);
@@ -1176,6 +1196,7 @@ static int korina_probe(struct platform_device *pdev)
": cannot register net device %d\n", rc);
goto probe_err_register;
}
+ setup_timer(&lp->media_check_timer, korina_poll_media, (unsigned long) dev);
out:
return rc;
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
new file mode 100644
index 00000000000..39b0aea2aab
--- /dev/null
+++ b/drivers/net/ks8842.c
@@ -0,0 +1,732 @@
+/*
+ * ks8842_main.c timberdale KS8842 ethernet driver
+ * Copyright (c) 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* Supports:
+ * The Micrel KS8842 behind the timberdale FPGA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+
+#define DRV_NAME "ks8842"
+
+/* Timberdale specific Registers */
+#define REG_TIMB_RST 0x1c
+
+/* KS8842 registers */
+
+#define REG_SELECT_BANK 0x0e
+
+/* bank 0 registers */
+#define REG_QRFCR 0x04
+
+/* bank 2 registers */
+#define REG_MARL 0x00
+#define REG_MARM 0x02
+#define REG_MARH 0x04
+
+/* bank 3 registers */
+#define REG_GRR 0x06
+
+/* bank 16 registers */
+#define REG_TXCR 0x00
+#define REG_TXSR 0x02
+#define REG_RXCR 0x04
+#define REG_TXMIR 0x08
+#define REG_RXMIR 0x0A
+
+/* bank 17 registers */
+#define REG_TXQCR 0x00
+#define REG_RXQCR 0x02
+#define REG_TXFDPR 0x04
+#define REG_RXFDPR 0x06
+#define REG_QMU_DATA_LO 0x08
+#define REG_QMU_DATA_HI 0x0A
+
+/* bank 18 registers */
+#define REG_IER 0x00
+#define IRQ_LINK_CHANGE 0x8000
+#define IRQ_TX 0x4000
+#define IRQ_RX 0x2000
+#define IRQ_RX_OVERRUN 0x0800
+#define IRQ_TX_STOPPED 0x0200
+#define IRQ_RX_STOPPED 0x0100
+#define IRQ_RX_ERROR 0x0080
+#define ENABLED_IRQS (IRQ_LINK_CHANGE | IRQ_TX | IRQ_RX | IRQ_RX_STOPPED | \
+ IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
+#define REG_ISR 0x02
+#define REG_RXSR 0x04
+#define RXSR_VALID 0x8000
+#define RXSR_BROADCAST 0x80
+#define RXSR_MULTICAST 0x40
+#define RXSR_UNICAST 0x20
+#define RXSR_FRAMETYPE 0x08
+#define RXSR_TOO_LONG 0x04
+#define RXSR_RUNT 0x02
+#define RXSR_CRC_ERROR 0x01
+#define RXSR_ERROR (RXSR_TOO_LONG | RXSR_RUNT | RXSR_CRC_ERROR)
+
+/* bank 32 registers */
+#define REG_SW_ID_AND_ENABLE 0x00
+#define REG_SGCR1 0x02
+#define REG_SGCR2 0x04
+#define REG_SGCR3 0x06
+
+/* bank 39 registers */
+#define REG_MACAR1 0x00
+#define REG_MACAR2 0x02
+#define REG_MACAR3 0x04
+
+/* bank 45 registers */
+#define REG_P1MBCR 0x00
+#define REG_P1MBSR 0x02
+
+/* bank 46 registers */
+#define REG_P2MBCR 0x00
+#define REG_P2MBSR 0x02
+
+/* bank 48 registers */
+#define REG_P1CR2 0x02
+
+/* bank 49 registers */
+#define REG_P1CR4 0x02
+#define REG_P1SR 0x04
+
+struct ks8842_adapter {
+ void __iomem *hw_addr;
+ int irq;
+ struct tasklet_struct tasklet;
+ spinlock_t lock; /* spinlock to be interrupt safe */
+ struct platform_device *pdev;
+};
+
+static inline void ks8842_select_bank(struct ks8842_adapter *adapter, u16 bank)
+{
+ iowrite16(bank, adapter->hw_addr + REG_SELECT_BANK);
+}
+
+static inline void ks8842_write8(struct ks8842_adapter *adapter, u16 bank,
+ u8 value, int offset)
+{
+ ks8842_select_bank(adapter, bank);
+ iowrite8(value, adapter->hw_addr + offset);
+}
+
+static inline void ks8842_write16(struct ks8842_adapter *adapter, u16 bank,
+ u16 value, int offset)
+{
+ ks8842_select_bank(adapter, bank);
+ iowrite16(value, adapter->hw_addr + offset);
+}
+
+static inline void ks8842_enable_bits(struct ks8842_adapter *adapter, u16 bank,
+ u16 bits, int offset)
+{
+ u16 reg;
+ ks8842_select_bank(adapter, bank);
+ reg = ioread16(adapter->hw_addr + offset);
+ reg |= bits;
+ iowrite16(reg, adapter->hw_addr + offset);
+}
+
+static inline void ks8842_clear_bits(struct ks8842_adapter *adapter, u16 bank,
+ u16 bits, int offset)
+{
+ u16 reg;
+ ks8842_select_bank(adapter, bank);
+ reg = ioread16(adapter->hw_addr + offset);
+ reg &= ~bits;
+ iowrite16(reg, adapter->hw_addr + offset);
+}
+
+static inline void ks8842_write32(struct ks8842_adapter *adapter, u16 bank,
+ u32 value, int offset)
+{
+ ks8842_select_bank(adapter, bank);
+ iowrite32(value, adapter->hw_addr + offset);
+}
+
+static inline u8 ks8842_read8(struct ks8842_adapter *adapter, u16 bank,
+ int offset)
+{
+ ks8842_select_bank(adapter, bank);
+ return ioread8(adapter->hw_addr + offset);
+}
+
+static inline u16 ks8842_read16(struct ks8842_adapter *adapter, u16 bank,
+ int offset)
+{
+ ks8842_select_bank(adapter, bank);
+ return ioread16(adapter->hw_addr + offset);
+}
+
+static inline u32 ks8842_read32(struct ks8842_adapter *adapter, u16 bank,
+ int offset)
+{
+ ks8842_select_bank(adapter, bank);
+ return ioread32(adapter->hw_addr + offset);
+}
+
+static void ks8842_reset(struct ks8842_adapter *adapter)
+{
+ /* The KS8842 goes haywire when doing softare reset
+ * a work around in the timberdale IP is implemented to
+ * do a hardware reset instead
+ ks8842_write16(adapter, 3, 1, REG_GRR);
+ msleep(10);
+ iowrite16(0, adapter->hw_addr + REG_GRR);
+ */
+ iowrite16(32, adapter->hw_addr + REG_SELECT_BANK);
+ iowrite32(0x1, adapter->hw_addr + REG_TIMB_RST);
+ msleep(20);
+}
+
+static void ks8842_update_link_status(struct net_device *netdev,
+ struct ks8842_adapter *adapter)
+{
+ /* check the status of the link */
+ if (ks8842_read16(adapter, 45, REG_P1MBSR) & 0x4) {
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+ } else {
+ netif_stop_queue(netdev);
+ netif_carrier_off(netdev);
+ }
+}
+
+static void ks8842_enable_tx(struct ks8842_adapter *adapter)
+{
+ ks8842_enable_bits(adapter, 16, 0x01, REG_TXCR);
+}
+
+static void ks8842_disable_tx(struct ks8842_adapter *adapter)
+{
+ ks8842_clear_bits(adapter, 16, 0x01, REG_TXCR);
+}
+
+static void ks8842_enable_rx(struct ks8842_adapter *adapter)
+{
+ ks8842_enable_bits(adapter, 16, 0x01, REG_RXCR);
+}
+
+static void ks8842_disable_rx(struct ks8842_adapter *adapter)
+{
+ ks8842_clear_bits(adapter, 16, 0x01, REG_RXCR);
+}
+
+static void ks8842_reset_hw(struct ks8842_adapter *adapter)
+{
+ /* reset the HW */
+ ks8842_reset(adapter);
+
+ /* Enable QMU Transmit flow control / transmit padding / Transmit CRC */
+ ks8842_write16(adapter, 16, 0x000E, REG_TXCR);
+
+ /* enable the receiver, uni + multi + broadcast + flow ctrl
+ + crc strip */
+ ks8842_write16(adapter, 16, 0x8 | 0x20 | 0x40 | 0x80 | 0x400,
+ REG_RXCR);
+
+ /* TX frame pointer autoincrement */
+ ks8842_write16(adapter, 17, 0x4000, REG_TXFDPR);
+
+ /* RX frame pointer autoincrement */
+ ks8842_write16(adapter, 17, 0x4000, REG_RXFDPR);
+
+ /* RX 2 kb high watermark */
+ ks8842_write16(adapter, 0, 0x1000, REG_QRFCR);
+
+ /* aggresive back off in half duplex */
+ ks8842_enable_bits(adapter, 32, 1 << 8, REG_SGCR1);
+
+ /* enable no excessive collison drop */
+ ks8842_enable_bits(adapter, 32, 1 << 3, REG_SGCR2);
+
+ /* Enable port 1 force flow control / back pressure / transmit / recv */
+ ks8842_write16(adapter, 48, 0x1E07, REG_P1CR2);
+
+ /* restart port auto-negotiation */
+ ks8842_enable_bits(adapter, 49, 1 << 13, REG_P1CR4);
+ /* only advertise 10Mbps */
+ ks8842_clear_bits(adapter, 49, 3 << 2, REG_P1CR4);
+
+ /* Enable the transmitter */
+ ks8842_enable_tx(adapter);
+
+ /* Enable the receiver */
+ ks8842_enable_rx(adapter);
+
+ /* clear all interrupts */
+ ks8842_write16(adapter, 18, 0xffff, REG_ISR);
+
+ /* enable interrupts */
+ ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
+
+ /* enable the switch */
+ ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE);
+}
+
+static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest)
+{
+ int i;
+ u16 mac;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ dest[ETH_ALEN - i - 1] = ks8842_read8(adapter, 2, REG_MARL + i);
+
+ /* make sure the switch port uses the same MAC as the QMU */
+ mac = ks8842_read16(adapter, 2, REG_MARL);
+ ks8842_write16(adapter, 39, mac, REG_MACAR1);
+ mac = ks8842_read16(adapter, 2, REG_MARM);
+ ks8842_write16(adapter, 39, mac, REG_MACAR2);
+ mac = ks8842_read16(adapter, 2, REG_MARH);
+ ks8842_write16(adapter, 39, mac, REG_MACAR3);
+}
+
+static inline u16 ks8842_tx_fifo_space(struct ks8842_adapter *adapter)
+{
+ return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff;
+}
+
+static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+ int len = skb->len;
+ u32 *ptr = (u32 *)skb->data;
+ u32 ctrl;
+
+ dev_dbg(&adapter->pdev->dev,
+ "%s: len %u head %p data %p tail %p end %p\n",
+ __func__, skb->len, skb->head, skb->data,
+ skb_tail_pointer(skb), skb_end_pointer(skb));
+
+ /* check FIFO buffer space, we need space for CRC and command bits */
+ if (ks8842_tx_fifo_space(adapter) < len + 8)
+ return NETDEV_TX_BUSY;
+
+ /* the control word, enable IRQ, port 1 and the length */
+ ctrl = 0x8000 | 0x100 | (len << 16);
+ ks8842_write32(adapter, 17, ctrl, REG_QMU_DATA_LO);
+
+ netdev->stats.tx_bytes += len;
+
+ /* copy buffer */
+ while (len > 0) {
+ iowrite32(*ptr, adapter->hw_addr + REG_QMU_DATA_LO);
+ len -= sizeof(u32);
+ ptr++;
+ }
+
+ /* enqueue packet */
+ ks8842_write16(adapter, 17, 1, REG_TXQCR);
+
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static void ks8842_rx_frame(struct net_device *netdev,
+ struct ks8842_adapter *adapter)
+{
+ u32 status = ks8842_read32(adapter, 17, REG_QMU_DATA_LO);
+ int len = (status >> 16) & 0x7ff;
+
+ status &= 0xffff;
+
+ dev_dbg(&adapter->pdev->dev, "%s - rx_data: status: %x\n",
+ __func__, status);
+
+ /* check the status */
+ if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
+ struct sk_buff *skb = netdev_alloc_skb(netdev, len + 2);
+
+ dev_dbg(&adapter->pdev->dev, "%s, got package, len: %d\n",
+ __func__, len);
+ if (skb) {
+ u32 *data;
+
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += len;
+ if (status & RXSR_MULTICAST)
+ netdev->stats.multicast++;
+
+ /* Align socket buffer in 4-byte boundary for
+ better performance. */
+ skb_reserve(skb, 2);
+ data = (u32 *)skb_put(skb, len);
+
+ ks8842_select_bank(adapter, 17);
+ while (len > 0) {
+ *data++ = ioread32(adapter->hw_addr +
+ REG_QMU_DATA_LO);
+ len -= sizeof(u32);
+ }
+
+ skb->protocol = eth_type_trans(skb, netdev);
+ netif_rx(skb);
+ } else
+ netdev->stats.rx_dropped++;
+ } else {
+ dev_dbg(&adapter->pdev->dev, "RX error, status: %x\n", status);
+ netdev->stats.rx_errors++;
+ if (status & RXSR_TOO_LONG)
+ netdev->stats.rx_length_errors++;
+ if (status & RXSR_CRC_ERROR)
+ netdev->stats.rx_crc_errors++;
+ if (status & RXSR_RUNT)
+ netdev->stats.rx_frame_errors++;
+ }
+
+ /* set high watermark to 3K */
+ ks8842_clear_bits(adapter, 0, 1 << 12, REG_QRFCR);
+
+ /* release the frame */
+ ks8842_write16(adapter, 17, 0x01, REG_RXQCR);
+
+ /* set high watermark to 2K */
+ ks8842_enable_bits(adapter, 0, 1 << 12, REG_QRFCR);
+}
+
+void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter)
+{
+ u16 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
+ dev_dbg(&adapter->pdev->dev, "%s Entry - rx_data: %d\n",
+ __func__, rx_data);
+ while (rx_data) {
+ ks8842_rx_frame(netdev, adapter);
+ rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
+ }
+}
+
+void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter)
+{
+ u16 sr = ks8842_read16(adapter, 16, REG_TXSR);
+ dev_dbg(&adapter->pdev->dev, "%s - entry, sr: %x\n", __func__, sr);
+ netdev->stats.tx_packets++;
+ if (netif_queue_stopped(netdev))
+ netif_wake_queue(netdev);
+}
+
+void ks8842_handle_rx_overrun(struct net_device *netdev,
+ struct ks8842_adapter *adapter)
+{
+ dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
+ netdev->stats.rx_errors++;
+ netdev->stats.rx_fifo_errors++;
+}
+
+void ks8842_tasklet(unsigned long arg)
+{
+ struct net_device *netdev = (struct net_device *)arg;
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+ u16 isr;
+ unsigned long flags;
+ u16 entry_bank;
+
+ /* read current bank to be able to set it back */
+ spin_lock_irqsave(&adapter->lock, flags);
+ entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK);
+ spin_unlock_irqrestore(&adapter->lock, flags);
+
+ isr = ks8842_read16(adapter, 18, REG_ISR);
+ dev_dbg(&adapter->pdev->dev, "%s - ISR: 0x%x\n", __func__, isr);
+
+ /* Ack */
+ ks8842_write16(adapter, 18, isr, REG_ISR);
+
+ if (!netif_running(netdev))
+ return;
+
+ if (isr & IRQ_LINK_CHANGE)
+ ks8842_update_link_status(netdev, adapter);
+
+ if (isr & (IRQ_RX | IRQ_RX_ERROR))
+ ks8842_handle_rx(netdev, adapter);
+
+ if (isr & IRQ_TX)
+ ks8842_handle_tx(netdev, adapter);
+
+ if (isr & IRQ_RX_OVERRUN)
+ ks8842_handle_rx_overrun(netdev, adapter);
+
+ if (isr & IRQ_TX_STOPPED) {
+ ks8842_disable_tx(adapter);
+ ks8842_enable_tx(adapter);
+ }
+
+ if (isr & IRQ_RX_STOPPED) {
+ ks8842_disable_rx(adapter);
+ ks8842_enable_rx(adapter);
+ }
+
+ /* re-enable interrupts, put back the bank selection register */
+ spin_lock_irqsave(&adapter->lock, flags);
+ ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
+ iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
+ spin_unlock_irqrestore(&adapter->lock, flags);
+}
+
+static irqreturn_t ks8842_irq(int irq, void *devid)
+{
+ struct ks8842_adapter *adapter = devid;
+ u16 isr;
+ u16 entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK);
+ irqreturn_t ret = IRQ_NONE;
+
+ isr = ks8842_read16(adapter, 18, REG_ISR);
+ dev_dbg(&adapter->pdev->dev, "%s - ISR: 0x%x\n", __func__, isr);
+
+ if (isr) {
+ /* disable IRQ */
+ ks8842_write16(adapter, 18, 0x00, REG_IER);
+
+ /* schedule tasklet */
+ tasklet_schedule(&adapter->tasklet);
+
+ ret = IRQ_HANDLED;
+ }
+
+ iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
+
+ return ret;
+}
+
+
+/* Netdevice operations */
+
+static int ks8842_open(struct net_device *netdev)
+{
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ dev_dbg(&adapter->pdev->dev, "%s - entry\n", __func__);
+
+ /* reset the HW */
+ ks8842_reset_hw(adapter);
+
+ ks8842_update_link_status(netdev, adapter);
+
+ err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME,
+ adapter);
+ if (err) {
+ printk(KERN_ERR "Failed to request IRQ: %d: %d\n",
+ adapter->irq, err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int ks8842_close(struct net_device *netdev)
+{
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+
+ dev_dbg(&adapter->pdev->dev, "%s - entry\n", __func__);
+
+ /* free the irq */
+ free_irq(adapter->irq, adapter);
+
+ /* disable the switch */
+ ks8842_write16(adapter, 32, 0x0, REG_SW_ID_AND_ENABLE);
+
+ return 0;
+}
+
+static int ks8842_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ int ret;
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+
+ dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
+
+ ret = ks8842_tx_frame(skb, netdev);
+
+ if (ks8842_tx_fifo_space(adapter) < netdev->mtu + 8)
+ netif_stop_queue(netdev);
+
+ return ret;
+}
+
+static int ks8842_set_mac(struct net_device *netdev, void *p)
+{
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+ unsigned long flags;
+ struct sockaddr *addr = p;
+ char *mac = (u8 *)addr->sa_data;
+ int i;
+
+ dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, mac, netdev->addr_len);
+
+ spin_lock_irqsave(&adapter->lock, flags);
+ for (i = 0; i < ETH_ALEN; i++) {
+ ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i);
+ ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1],
+ REG_MACAR1 + i);
+ }
+ spin_unlock_irqrestore(&adapter->lock, flags);
+ return 0;
+}
+
+static void ks8842_tx_timeout(struct net_device *netdev)
+{
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+ unsigned long flags;
+
+ dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
+
+ spin_lock_irqsave(&adapter->lock, flags);
+ /* disable interrupts */
+ ks8842_write16(adapter, 18, 0, REG_IER);
+ ks8842_write16(adapter, 18, 0xFFFF, REG_ISR);
+ spin_unlock_irqrestore(&adapter->lock, flags);
+
+ ks8842_reset_hw(adapter);
+
+ ks8842_update_link_status(netdev, adapter);
+}
+
+static const struct net_device_ops ks8842_netdev_ops = {
+ .ndo_open = ks8842_open,
+ .ndo_stop = ks8842_close,
+ .ndo_start_xmit = ks8842_xmit_frame,
+ .ndo_set_mac_address = ks8842_set_mac,
+ .ndo_tx_timeout = ks8842_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr
+};
+
+static struct ethtool_ops ks8842_ethtool_ops = {
+ .get_link = ethtool_op_get_link,
+};
+
+static int __devinit ks8842_probe(struct platform_device *pdev)
+{
+ int err = -ENOMEM;
+ struct resource *iomem;
+ struct net_device *netdev;
+ struct ks8842_adapter *adapter;
+ u16 id;
+
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME))
+ goto err_mem_region;
+
+ netdev = alloc_etherdev(sizeof(struct ks8842_adapter));
+ if (!netdev)
+ goto err_alloc_etherdev;
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adapter = netdev_priv(netdev);
+ adapter->hw_addr = ioremap(iomem->start, resource_size(iomem));
+ if (!adapter->hw_addr)
+ goto err_ioremap;
+
+ adapter->irq = platform_get_irq(pdev, 0);
+ if (adapter->irq < 0) {
+ err = adapter->irq;
+ goto err_get_irq;
+ }
+
+ adapter->pdev = pdev;
+
+ tasklet_init(&adapter->tasklet, ks8842_tasklet, (unsigned long)netdev);
+ spin_lock_init(&adapter->lock);
+
+ netdev->netdev_ops = &ks8842_netdev_ops;
+ netdev->ethtool_ops = &ks8842_ethtool_ops;
+
+ ks8842_read_mac_addr(adapter, netdev->dev_addr);
+
+ id = ks8842_read16(adapter, 32, REG_SW_ID_AND_ENABLE);
+
+ strcpy(netdev->name, "eth%d");
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
+ platform_set_drvdata(pdev, netdev);
+
+ printk(KERN_INFO DRV_NAME
+ " Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
+ (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
+
+ return 0;
+
+err_register:
+err_get_irq:
+ iounmap(adapter->hw_addr);
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ release_mem_region(iomem->start, resource_size(iomem));
+err_mem_region:
+ return err;
+}
+
+static int __devexit ks8842_remove(struct platform_device *pdev)
+{
+ struct net_device *netdev = platform_get_drvdata(pdev);
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+ struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ unregister_netdev(netdev);
+ tasklet_kill(&adapter->tasklet);
+ iounmap(adapter->hw_addr);
+ free_netdev(netdev);
+ release_mem_region(iomem->start, resource_size(iomem));
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+
+static struct platform_driver ks8842_platform_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = ks8842_probe,
+ .remove = ks8842_remove,
+};
+
+static int __init ks8842_init(void)
+{
+ return platform_driver_register(&ks8842_platform_driver);
+}
+
+static void __exit ks8842_exit(void)
+{
+ platform_driver_unregister(&ks8842_platform_driver);
+}
+
+module_init(ks8842_init);
+module_exit(ks8842_exit);
+
+MODULE_DESCRIPTION("Timberdale KS8842 ethernet driver");
+MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:ks8842");
+
diff --git a/drivers/net/lasi_82596.c b/drivers/net/lasi_82596.c
index efbae4b8398..a0c578585a5 100644
--- a/drivers/net/lasi_82596.c
+++ b/drivers/net/lasi_82596.c
@@ -161,12 +161,12 @@ lan_init_chip(struct parisc_device *dev)
if (!dev->irq) {
printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n",
- __FILE__, dev->hpa.start);
+ __FILE__, (unsigned long)dev->hpa.start);
return -ENODEV;
}
- printk(KERN_INFO "Found i82596 at 0x%lx, IRQ %d\n", dev->hpa.start,
- dev->irq);
+ printk(KERN_INFO "Found i82596 at 0x%lx, IRQ %d\n",
+ (unsigned long)dev->hpa.start, dev->irq);
netdevice = alloc_etherdev(sizeof(struct i596_private));
if (!netdevice)
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c
index 7415f517491..070fa450087 100644
--- a/drivers/net/lib82596.c
+++ b/drivers/net/lib82596.c
@@ -1036,6 +1036,19 @@ static void print_eth(unsigned char *add, char *str)
printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n",
add, add + 6, add, add[12], add[13], str);
}
+static const struct net_device_ops i596_netdev_ops = {
+ .ndo_open = i596_open,
+ .ndo_stop = i596_close,
+ .ndo_start_xmit = i596_start_xmit,
+ .ndo_set_multicast_list = set_multicast_list,
+ .ndo_tx_timeout = i596_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = i596_poll_controller,
+#endif
+};
static int __devinit i82596_probe(struct net_device *dev)
{
@@ -1062,16 +1075,8 @@ static int __devinit i82596_probe(struct net_device *dev)
return -ENOMEM;
}
- /* The 82596-specific entries in the device structure. */
- dev->open = i596_open;
- dev->stop = i596_close;
- dev->hard_start_xmit = i596_start_xmit;
- dev->set_multicast_list = set_multicast_list;
- dev->tx_timeout = i596_tx_timeout;
+ dev->netdev_ops = &i596_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = i596_poll_controller;
-#endif
memset(dma, 0, sizeof(struct i596_dma));
lp->dma = dma;
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
index 789b6cb744b..f28c2334300 100644
--- a/drivers/net/lib8390.c
+++ b/drivers/net/lib8390.c
@@ -370,7 +370,7 @@ static int __ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock(&ei_local->page_lock);
enable_irq_lockdep_irqrestore(dev->irq, &flags);
dev->stats.tx_errors++;
- return 1;
+ return NETDEV_TX_BUSY;
}
/*
diff --git a/drivers/net/ll_temac.h b/drivers/net/ll_temac.h
new file mode 100644
index 00000000000..1af66a1e691
--- /dev/null
+++ b/drivers/net/ll_temac.h
@@ -0,0 +1,374 @@
+
+#ifndef XILINX_LL_TEMAC_H
+#define XILINX_LL_TEMAC_H
+
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/spinlock.h>
+#include <asm/dcr.h>
+#include <asm/dcr-regs.h>
+
+/* packet size info */
+#define XTE_HDR_SIZE 14 /* size of Ethernet header */
+#define XTE_TRL_SIZE 4 /* size of Ethernet trailer (FCS) */
+#define XTE_JUMBO_MTU 9000
+#define XTE_MAX_JUMBO_FRAME_SIZE (XTE_JUMBO_MTU + XTE_HDR_SIZE + XTE_TRL_SIZE)
+
+/* Configuration options */
+
+/* Accept all incoming packets.
+ * This option defaults to disabled (cleared) */
+#define XTE_OPTION_PROMISC (1 << 0)
+/* Jumbo frame support for Tx & Rx.
+ * This option defaults to disabled (cleared) */
+#define XTE_OPTION_JUMBO (1 << 1)
+/* VLAN Rx & Tx frame support.
+ * This option defaults to disabled (cleared) */
+#define XTE_OPTION_VLAN (1 << 2)
+/* Enable recognition of flow control frames on Rx
+ * This option defaults to enabled (set) */
+#define XTE_OPTION_FLOW_CONTROL (1 << 4)
+/* Strip FCS and PAD from incoming frames.
+ * Note: PAD from VLAN frames is not stripped.
+ * This option defaults to disabled (set) */
+#define XTE_OPTION_FCS_STRIP (1 << 5)
+/* Generate FCS field and add PAD automatically for outgoing frames.
+ * This option defaults to enabled (set) */
+#define XTE_OPTION_FCS_INSERT (1 << 6)
+/* Enable Length/Type error checking for incoming frames. When this option is
+set, the MAC will filter frames that have a mismatched type/length field
+and if XTE_OPTION_REPORT_RXERR is set, the user is notified when these
+types of frames are encountered. When this option is cleared, the MAC will
+allow these types of frames to be received.
+This option defaults to enabled (set) */
+#define XTE_OPTION_LENTYPE_ERR (1 << 7)
+/* Enable the transmitter.
+ * This option defaults to enabled (set) */
+#define XTE_OPTION_TXEN (1 << 11)
+/* Enable the receiver
+* This option defaults to enabled (set) */
+#define XTE_OPTION_RXEN (1 << 12)
+
+/* Default options set when device is initialized or reset */
+#define XTE_OPTION_DEFAULTS \
+ (XTE_OPTION_TXEN | \
+ XTE_OPTION_FLOW_CONTROL | \
+ XTE_OPTION_RXEN)
+
+/* XPS_LL_TEMAC SDMA registers definition */
+
+#define TX_NXTDESC_PTR 0x00 /* r */
+#define TX_CURBUF_ADDR 0x01 /* r */
+#define TX_CURBUF_LENGTH 0x02 /* r */
+#define TX_CURDESC_PTR 0x03 /* rw */
+#define TX_TAILDESC_PTR 0x04 /* rw */
+#define TX_CHNL_CTRL 0x05 /* rw */
+/*
+ 0:7 24:31 IRQTimeout
+ 8:15 16:23 IRQCount
+ 16:20 11:15 Reserved
+ 21 10 0
+ 22 9 UseIntOnEnd
+ 23 8 LdIRQCnt
+ 24 7 IRQEn
+ 25:28 3:6 Reserved
+ 29 2 IrqErrEn
+ 30 1 IrqDlyEn
+ 31 0 IrqCoalEn
+*/
+#define CHNL_CTRL_IRQ_IOE (1 << 9)
+#define CHNL_CTRL_IRQ_EN (1 << 7)
+#define CHNL_CTRL_IRQ_ERR_EN (1 << 2)
+#define CHNL_CTRL_IRQ_DLY_EN (1 << 1)
+#define CHNL_CTRL_IRQ_COAL_EN (1 << 0)
+#define TX_IRQ_REG 0x06 /* rw */
+/*
+ 0:7 24:31 DltTmrValue
+ 8:15 16:23 ClscCntrValue
+ 16:17 14:15 Reserved
+ 18:21 10:13 ClscCnt
+ 22:23 8:9 DlyCnt
+ 24:28 3::7 Reserved
+ 29 2 ErrIrq
+ 30 1 DlyIrq
+ 31 0 CoalIrq
+ */
+#define TX_CHNL_STS 0x07 /* r */
+/*
+ 0:9 22:31 Reserved
+ 10 21 TailPErr
+ 11 20 CmpErr
+ 12 19 AddrErr
+ 13 18 NxtPErr
+ 14 17 CurPErr
+ 15 16 BsyWr
+ 16:23 8:15 Reserved
+ 24 7 Error
+ 25 6 IOE
+ 26 5 SOE
+ 27 4 Cmplt
+ 28 3 SOP
+ 29 2 EOP
+ 30 1 EngBusy
+ 31 0 Reserved
+*/
+
+#define RX_NXTDESC_PTR 0x08 /* r */
+#define RX_CURBUF_ADDR 0x09 /* r */
+#define RX_CURBUF_LENGTH 0x0a /* r */
+#define RX_CURDESC_PTR 0x0b /* rw */
+#define RX_TAILDESC_PTR 0x0c /* rw */
+#define RX_CHNL_CTRL 0x0d /* rw */
+/*
+ 0:7 24:31 IRQTimeout
+ 8:15 16:23 IRQCount
+ 16:20 11:15 Reserved
+ 21 10 0
+ 22 9 UseIntOnEnd
+ 23 8 LdIRQCnt
+ 24 7 IRQEn
+ 25:28 3:6 Reserved
+ 29 2 IrqErrEn
+ 30 1 IrqDlyEn
+ 31 0 IrqCoalEn
+ */
+#define RX_IRQ_REG 0x0e /* rw */
+#define IRQ_COAL (1 << 0)
+#define IRQ_DLY (1 << 1)
+#define IRQ_ERR (1 << 2)
+#define IRQ_DMAERR (1 << 7) /* this is not documented ??? */
+/*
+ 0:7 24:31 DltTmrValue
+ 8:15 16:23 ClscCntrValue
+ 16:17 14:15 Reserved
+ 18:21 10:13 ClscCnt
+ 22:23 8:9 DlyCnt
+ 24:28 3::7 Reserved
+*/
+#define RX_CHNL_STS 0x0f /* r */
+#define CHNL_STS_ENGBUSY (1 << 1)
+#define CHNL_STS_EOP (1 << 2)
+#define CHNL_STS_SOP (1 << 3)
+#define CHNL_STS_CMPLT (1 << 4)
+#define CHNL_STS_SOE (1 << 5)
+#define CHNL_STS_IOE (1 << 6)
+#define CHNL_STS_ERR (1 << 7)
+
+#define CHNL_STS_BSYWR (1 << 16)
+#define CHNL_STS_CURPERR (1 << 17)
+#define CHNL_STS_NXTPERR (1 << 18)
+#define CHNL_STS_ADDRERR (1 << 19)
+#define CHNL_STS_CMPERR (1 << 20)
+#define CHNL_STS_TAILERR (1 << 21)
+/*
+ 0:9 22:31 Reserved
+ 10 21 TailPErr
+ 11 20 CmpErr
+ 12 19 AddrErr
+ 13 18 NxtPErr
+ 14 17 CurPErr
+ 15 16 BsyWr
+ 16:23 8:15 Reserved
+ 24 7 Error
+ 25 6 IOE
+ 26 5 SOE
+ 27 4 Cmplt
+ 28 3 SOP
+ 29 2 EOP
+ 30 1 EngBusy
+ 31 0 Reserved
+*/
+
+#define DMA_CONTROL_REG 0x10 /* rw */
+#define DMA_CONTROL_RST (1 << 0)
+#define DMA_TAIL_ENABLE (1 << 2)
+
+/* XPS_LL_TEMAC direct registers definition */
+
+#define XTE_RAF0_OFFSET 0x00
+#define RAF0_RST (1 << 0)
+#define RAF0_MCSTREJ (1 << 1)
+#define RAF0_BCSTREJ (1 << 2)
+#define XTE_TPF0_OFFSET 0x04
+#define XTE_IFGP0_OFFSET 0x08
+#define XTE_ISR0_OFFSET 0x0c
+#define ISR0_HARDACSCMPLT (1 << 0)
+#define ISR0_AUTONEG (1 << 1)
+#define ISR0_RXCMPLT (1 << 2)
+#define ISR0_RXREJ (1 << 3)
+#define ISR0_RXFIFOOVR (1 << 4)
+#define ISR0_TXCMPLT (1 << 5)
+#define ISR0_RXDCMLCK (1 << 6)
+
+#define XTE_IPR0_OFFSET 0x10
+#define XTE_IER0_OFFSET 0x14
+
+#define XTE_MSW0_OFFSET 0x20
+#define XTE_LSW0_OFFSET 0x24
+#define XTE_CTL0_OFFSET 0x28
+#define XTE_RDY0_OFFSET 0x2c
+
+#define XTE_RSE_MIIM_RR_MASK 0x0002
+#define XTE_RSE_MIIM_WR_MASK 0x0004
+#define XTE_RSE_CFG_RR_MASK 0x0020
+#define XTE_RSE_CFG_WR_MASK 0x0040
+#define XTE_RDY0_HARD_ACS_RDY_MASK (0x10000)
+
+/* XPS_LL_TEMAC indirect registers offset definition */
+
+#define XTE_RXC0_OFFSET 0x00000200 /* Rx configuration word 0 */
+#define XTE_RXC1_OFFSET 0x00000240 /* Rx configuration word 1 */
+#define XTE_RXC1_RXRST_MASK (1 << 31) /* Receiver reset */
+#define XTE_RXC1_RXJMBO_MASK (1 << 30) /* Jumbo frame enable */
+#define XTE_RXC1_RXFCS_MASK (1 << 29) /* FCS not stripped */
+#define XTE_RXC1_RXEN_MASK (1 << 28) /* Receiver enable */
+#define XTE_RXC1_RXVLAN_MASK (1 << 27) /* VLAN enable */
+#define XTE_RXC1_RXHD_MASK (1 << 26) /* Half duplex */
+#define XTE_RXC1_RXLT_MASK (1 << 25) /* Length/type check disable */
+
+#define XTE_TXC_OFFSET 0x00000280 /* Tx configuration */
+#define XTE_TXC_TXRST_MASK (1 << 31) /* Transmitter reset */
+#define XTE_TXC_TXJMBO_MASK (1 << 30) /* Jumbo frame enable */
+#define XTE_TXC_TXFCS_MASK (1 << 29) /* Generate FCS */
+#define XTE_TXC_TXEN_MASK (1 << 28) /* Transmitter enable */
+#define XTE_TXC_TXVLAN_MASK (1 << 27) /* VLAN enable */
+#define XTE_TXC_TXHD_MASK (1 << 26) /* Half duplex */
+
+#define XTE_FCC_OFFSET 0x000002C0 /* Flow control config */
+#define XTE_FCC_RXFLO_MASK (1 << 29) /* Rx flow control enable */
+#define XTE_FCC_TXFLO_MASK (1 << 30) /* Tx flow control enable */
+
+#define XTE_EMCFG_OFFSET 0x00000300 /* EMAC configuration */
+#define XTE_EMCFG_LINKSPD_MASK 0xC0000000 /* Link speed */
+#define XTE_EMCFG_HOSTEN_MASK (1 << 26) /* Host interface enable */
+#define XTE_EMCFG_LINKSPD_10 0x00000000 /* 10 Mbit LINKSPD_MASK */
+#define XTE_EMCFG_LINKSPD_100 (1 << 30) /* 100 Mbit LINKSPD_MASK */
+#define XTE_EMCFG_LINKSPD_1000 (1 << 31) /* 1000 Mbit LINKSPD_MASK */
+
+#define XTE_GMIC_OFFSET 0x00000320 /* RGMII/SGMII config */
+#define XTE_MC_OFFSET 0x00000340 /* MDIO configuration */
+#define XTE_UAW0_OFFSET 0x00000380 /* Unicast address word 0 */
+#define XTE_UAW1_OFFSET 0x00000384 /* Unicast address word 1 */
+
+#define XTE_MAW0_OFFSET 0x00000388 /* Multicast addr word 0 */
+#define XTE_MAW1_OFFSET 0x0000038C /* Multicast addr word 1 */
+#define XTE_AFM_OFFSET 0x00000390 /* Promiscuous mode */
+#define XTE_AFM_EPPRM_MASK (1 << 31) /* Promiscuous mode enable */
+
+/* Interrupt Request status */
+#define XTE_TIS_OFFSET 0x000003A0
+#define TIS_FRIS (1 << 0)
+#define TIS_MRIS (1 << 1)
+#define TIS_MWIS (1 << 2)
+#define TIS_ARIS (1 << 3)
+#define TIS_AWIS (1 << 4)
+#define TIS_CRIS (1 << 5)
+#define TIS_CWIS (1 << 6)
+
+#define XTE_TIE_OFFSET 0x000003A4 /* Interrupt enable */
+
+/** MII Mamagement Control register (MGTCR) */
+#define XTE_MGTDR_OFFSET 0x000003B0 /* MII data */
+#define XTE_MIIMAI_OFFSET 0x000003B4 /* MII control */
+
+#define CNTLREG_WRITE_ENABLE_MASK 0x8000
+#define CNTLREG_EMAC1SEL_MASK 0x0400
+#define CNTLREG_ADDRESSCODE_MASK 0x03ff
+
+/* CDMAC descriptor status bit definitions */
+
+#define STS_CTRL_APP0_ERR (1 << 31)
+#define STS_CTRL_APP0_IRQONEND (1 << 30)
+/* undoccumented */
+#define STS_CTRL_APP0_STOPONEND (1 << 29)
+#define STS_CTRL_APP0_CMPLT (1 << 28)
+#define STS_CTRL_APP0_SOP (1 << 27)
+#define STS_CTRL_APP0_EOP (1 << 26)
+#define STS_CTRL_APP0_ENGBUSY (1 << 25)
+/* undocumented */
+#define STS_CTRL_APP0_ENGRST (1 << 24)
+
+#define TX_CONTROL_CALC_CSUM_MASK 1
+
+#define XTE_ALIGN 32
+#define BUFFER_ALIGN(adr) ((XTE_ALIGN - ((u32) adr)) % XTE_ALIGN)
+
+#define MULTICAST_CAM_TABLE_NUM 4
+
+/* TX/RX CURDESC_PTR points to first descriptor */
+/* TX/RX TAILDESC_PTR points to last descriptor in linked list */
+
+/**
+ * struct cdmac_bd - LocalLink buffer descriptor format
+ *
+ * app0 bits:
+ * 0 Error
+ * 1 IrqOnEnd generate an interrupt at completion of DMA op
+ * 2 reserved
+ * 3 completed Current descriptor completed
+ * 4 SOP TX - marks first desc/ RX marks first desct
+ * 5 EOP TX marks last desc/RX marks last desc
+ * 6 EngBusy DMA is processing
+ * 7 reserved
+ * 8:31 application specific
+ */
+struct cdmac_bd {
+ u32 next; /* Physical address of next buffer descriptor */
+ u32 phys;
+ u32 len;
+ u32 app0;
+ u32 app1; /* TX start << 16 | insert */
+ u32 app2; /* TX csum */
+ u32 app3;
+ u32 app4; /* skb for TX length for RX */
+};
+
+struct temac_local {
+ struct net_device *ndev;
+ struct device *dev;
+
+ /* Connection to PHY device */
+ struct phy_device *phy_dev; /* Pointer to PHY device */
+ struct device_node *phy_node;
+
+ /* MDIO bus data */
+ struct mii_bus *mii_bus; /* MII bus reference */
+ int mdio_irqs[PHY_MAX_ADDR]; /* IRQs table for MDIO bus */
+
+ /* IO registers and IRQs */
+ void __iomem *regs;
+ dcr_host_t sdma_dcrs;
+ int tx_irq;
+ int rx_irq;
+ int emac_num;
+
+ struct sk_buff **rx_skb;
+ spinlock_t rx_lock;
+ struct mutex indirect_mutex;
+ u32 options; /* Current options word */
+ int last_link;
+
+ /* Buffer descriptors */
+ struct cdmac_bd *tx_bd_v;
+ dma_addr_t tx_bd_p;
+ struct cdmac_bd *rx_bd_v;
+ dma_addr_t rx_bd_p;
+ int tx_bd_ci;
+ int tx_bd_next;
+ int tx_bd_tail;
+ int rx_bd_ci;
+};
+
+/* xilinx_temac.c */
+u32 temac_ior(struct temac_local *lp, int offset);
+void temac_iow(struct temac_local *lp, int offset, u32 value);
+int temac_indirect_busywait(struct temac_local *lp);
+u32 temac_indirect_in32(struct temac_local *lp, int reg);
+void temac_indirect_out32(struct temac_local *lp, int reg, u32 value);
+
+
+/* xilinx_temac_mdio.c */
+int temac_mdio_setup(struct temac_local *lp, struct device_node *np);
+void temac_mdio_teardown(struct temac_local *lp);
+
+#endif /* XILINX_LL_TEMAC_H */
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
new file mode 100644
index 00000000000..96e7248876c
--- /dev/null
+++ b/drivers/net/ll_temac_main.c
@@ -0,0 +1,969 @@
+/*
+ * Driver for Xilinx TEMAC Ethernet device
+ *
+ * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
+ * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
+ * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
+ *
+ * This is a driver for the Xilinx ll_temac ipcore which is often used
+ * in the Virtex and Spartan series of chips.
+ *
+ * Notes:
+ * - The ll_temac hardware uses indirect access for many of the TEMAC
+ * registers, include the MDIO bus. However, indirect access to MDIO
+ * registers take considerably more clock cycles than to TEMAC registers.
+ * MDIO accesses are long, so threads doing them should probably sleep
+ * rather than busywait. However, since only one indirect access can be
+ * in progress at any given time, that means that *all* indirect accesses
+ * could end up sleeping (to wait for an MDIO access to complete).
+ * Fortunately none of the indirect accesses are on the 'hot' path for tx
+ * or rx, so this should be okay.
+ *
+ * TODO:
+ * - Fix driver to work on more than just Virtex5. Right now the driver
+ * assumes that the locallink DMA registers are accessed via DCR
+ * instructions.
+ * - Factor out locallink DMA code into separate driver
+ * - Fix multicast assignment.
+ * - Fix support for hardware checksumming.
+ * - Testing. Lots and lots of testing.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/tcp.h> /* needed for sizeof(tcphdr) */
+#include <linux/udp.h> /* needed for sizeof(udphdr) */
+#include <linux/phy.h>
+#include <linux/in.h>
+#include <linux/io.h>
+#include <linux/ip.h>
+
+#include "ll_temac.h"
+
+#define TX_BD_NUM 64
+#define RX_BD_NUM 128
+
+/* ---------------------------------------------------------------------
+ * Low level register access functions
+ */
+
+u32 temac_ior(struct temac_local *lp, int offset)
+{
+ return in_be32((u32 *)(lp->regs + offset));
+}
+
+void temac_iow(struct temac_local *lp, int offset, u32 value)
+{
+ out_be32((u32 *) (lp->regs + offset), value);
+}
+
+int temac_indirect_busywait(struct temac_local *lp)
+{
+ long end = jiffies + 2;
+
+ while (!(temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK)) {
+ if (end - jiffies <= 0) {
+ WARN_ON(1);
+ return -ETIMEDOUT;
+ }
+ msleep(1);
+ }
+ return 0;
+}
+
+/**
+ * temac_indirect_in32
+ *
+ * lp->indirect_mutex must be held when calling this function
+ */
+u32 temac_indirect_in32(struct temac_local *lp, int reg)
+{
+ u32 val;
+
+ if (temac_indirect_busywait(lp))
+ return -ETIMEDOUT;
+ temac_iow(lp, XTE_CTL0_OFFSET, reg);
+ if (temac_indirect_busywait(lp))
+ return -ETIMEDOUT;
+ val = temac_ior(lp, XTE_LSW0_OFFSET);
+
+ return val;
+}
+
+/**
+ * temac_indirect_out32
+ *
+ * lp->indirect_mutex must be held when calling this function
+ */
+void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
+{
+ if (temac_indirect_busywait(lp))
+ return;
+ temac_iow(lp, XTE_LSW0_OFFSET, value);
+ temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
+}
+
+static u32 temac_dma_in32(struct temac_local *lp, int reg)
+{
+ return dcr_read(lp->sdma_dcrs, reg);
+}
+
+static void temac_dma_out32(struct temac_local *lp, int reg, u32 value)
+{
+ dcr_write(lp->sdma_dcrs, reg, value);
+}
+
+/**
+ * temac_dma_bd_init - Setup buffer descriptor rings
+ */
+static int temac_dma_bd_init(struct net_device *ndev)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ struct sk_buff *skb;
+ int i;
+
+ lp->rx_skb = kzalloc(sizeof(struct sk_buff)*RX_BD_NUM, GFP_KERNEL);
+ /* allocate the tx and rx ring buffer descriptors. */
+ /* returns a virtual addres and a physical address. */
+ lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+ &lp->tx_bd_p, GFP_KERNEL);
+ lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+ &lp->rx_bd_p, GFP_KERNEL);
+
+ memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
+ for (i = 0; i < TX_BD_NUM; i++) {
+ lp->tx_bd_v[i].next = lp->tx_bd_p +
+ sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM);
+ }
+
+ memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
+ for (i = 0; i < RX_BD_NUM; i++) {
+ lp->rx_bd_v[i].next = lp->rx_bd_p +
+ sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM);
+
+ skb = alloc_skb(XTE_MAX_JUMBO_FRAME_SIZE
+ + XTE_ALIGN, GFP_ATOMIC);
+ if (skb == 0) {
+ dev_err(&ndev->dev, "alloc_skb error %d\n", i);
+ return -1;
+ }
+ lp->rx_skb[i] = skb;
+ skb_reserve(skb, BUFFER_ALIGN(skb->data));
+ /* returns physical address of skb->data */
+ lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
+ skb->data,
+ XTE_MAX_JUMBO_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+ lp->rx_bd_v[i].len = XTE_MAX_JUMBO_FRAME_SIZE;
+ lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND;
+ }
+
+ temac_dma_out32(lp, TX_CHNL_CTRL, 0x10220400 |
+ CHNL_CTRL_IRQ_EN |
+ CHNL_CTRL_IRQ_DLY_EN |
+ CHNL_CTRL_IRQ_COAL_EN);
+ /* 0x10220483 */
+ /* 0x00100483 */
+ temac_dma_out32(lp, RX_CHNL_CTRL, 0xff010000 |
+ CHNL_CTRL_IRQ_EN |
+ CHNL_CTRL_IRQ_DLY_EN |
+ CHNL_CTRL_IRQ_COAL_EN |
+ CHNL_CTRL_IRQ_IOE);
+ /* 0xff010283 */
+
+ temac_dma_out32(lp, RX_CURDESC_PTR, lp->rx_bd_p);
+ temac_dma_out32(lp, RX_TAILDESC_PTR,
+ lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
+ temac_dma_out32(lp, TX_CURDESC_PTR, lp->tx_bd_p);
+
+ return 0;
+}
+
+/* ---------------------------------------------------------------------
+ * net_device_ops
+ */
+
+static int temac_set_mac_address(struct net_device *ndev, void *address)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+
+ if (address)
+ memcpy(ndev->dev_addr, address, ETH_ALEN);
+
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ random_ether_addr(ndev->dev_addr);
+
+ /* set up unicast MAC address filter set its mac address */
+ mutex_lock(&lp->indirect_mutex);
+ temac_indirect_out32(lp, XTE_UAW0_OFFSET,
+ (ndev->dev_addr[0]) |
+ (ndev->dev_addr[1] << 8) |
+ (ndev->dev_addr[2] << 16) |
+ (ndev->dev_addr[3] << 24));
+ /* There are reserved bits in EUAW1
+ * so don't affect them Set MAC bits [47:32] in EUAW1 */
+ temac_indirect_out32(lp, XTE_UAW1_OFFSET,
+ (ndev->dev_addr[4] & 0x000000ff) |
+ (ndev->dev_addr[5] << 8));
+ mutex_unlock(&lp->indirect_mutex);
+
+ return 0;
+}
+
+static void temac_set_multicast_list(struct net_device *ndev)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ u32 multi_addr_msw, multi_addr_lsw, val;
+ int i;
+
+ mutex_lock(&lp->indirect_mutex);
+ if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC)
+ || ndev->mc_count > MULTICAST_CAM_TABLE_NUM) {
+ /*
+ * We must make the kernel realise we had to move
+ * into promisc mode or we start all out war on
+ * the cable. If it was a promisc request the
+ * flag is already set. If not we assert it.
+ */
+ ndev->flags |= IFF_PROMISC;
+ temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
+ dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
+ } else if (ndev->mc_count) {
+ struct dev_mc_list *mclist = ndev->mc_list;
+ for (i = 0; mclist && i < ndev->mc_count; i++) {
+
+ if (i >= MULTICAST_CAM_TABLE_NUM)
+ break;
+ multi_addr_msw = ((mclist->dmi_addr[3] << 24) |
+ (mclist->dmi_addr[2] << 16) |
+ (mclist->dmi_addr[1] << 8) |
+ (mclist->dmi_addr[0]));
+ temac_indirect_out32(lp, XTE_MAW0_OFFSET,
+ multi_addr_msw);
+ multi_addr_lsw = ((mclist->dmi_addr[5] << 8) |
+ (mclist->dmi_addr[4]) | (i << 16));
+ temac_indirect_out32(lp, XTE_MAW1_OFFSET,
+ multi_addr_lsw);
+ mclist = mclist->next;
+ }
+ } else {
+ val = temac_indirect_in32(lp, XTE_AFM_OFFSET);
+ temac_indirect_out32(lp, XTE_AFM_OFFSET,
+ val & ~XTE_AFM_EPPRM_MASK);
+ temac_indirect_out32(lp, XTE_MAW0_OFFSET, 0);
+ temac_indirect_out32(lp, XTE_MAW1_OFFSET, 0);
+ dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
+ }
+ mutex_unlock(&lp->indirect_mutex);
+}
+
+struct temac_option {
+ int flg;
+ u32 opt;
+ u32 reg;
+ u32 m_or;
+ u32 m_and;
+} temac_options[] = {
+ /* Turn on jumbo packet support for both Rx and Tx */
+ {
+ .opt = XTE_OPTION_JUMBO,
+ .reg = XTE_TXC_OFFSET,
+ .m_or = XTE_TXC_TXJMBO_MASK,
+ },
+ {
+ .opt = XTE_OPTION_JUMBO,
+ .reg = XTE_RXC1_OFFSET,
+ .m_or =XTE_RXC1_RXJMBO_MASK,
+ },
+ /* Turn on VLAN packet support for both Rx and Tx */
+ {
+ .opt = XTE_OPTION_VLAN,
+ .reg = XTE_TXC_OFFSET,
+ .m_or =XTE_TXC_TXVLAN_MASK,
+ },
+ {
+ .opt = XTE_OPTION_VLAN,
+ .reg = XTE_RXC1_OFFSET,
+ .m_or =XTE_RXC1_RXVLAN_MASK,
+ },
+ /* Turn on FCS stripping on receive packets */
+ {
+ .opt = XTE_OPTION_FCS_STRIP,
+ .reg = XTE_RXC1_OFFSET,
+ .m_or =XTE_RXC1_RXFCS_MASK,
+ },
+ /* Turn on FCS insertion on transmit packets */
+ {
+ .opt = XTE_OPTION_FCS_INSERT,
+ .reg = XTE_TXC_OFFSET,
+ .m_or =XTE_TXC_TXFCS_MASK,
+ },
+ /* Turn on length/type field checking on receive packets */
+ {
+ .opt = XTE_OPTION_LENTYPE_ERR,
+ .reg = XTE_RXC1_OFFSET,
+ .m_or =XTE_RXC1_RXLT_MASK,
+ },
+ /* Turn on flow control */
+ {
+ .opt = XTE_OPTION_FLOW_CONTROL,
+ .reg = XTE_FCC_OFFSET,
+ .m_or =XTE_FCC_RXFLO_MASK,
+ },
+ /* Turn on flow control */
+ {
+ .opt = XTE_OPTION_FLOW_CONTROL,
+ .reg = XTE_FCC_OFFSET,
+ .m_or =XTE_FCC_TXFLO_MASK,
+ },
+ /* Turn on promiscuous frame filtering (all frames are received ) */
+ {
+ .opt = XTE_OPTION_PROMISC,
+ .reg = XTE_AFM_OFFSET,
+ .m_or =XTE_AFM_EPPRM_MASK,
+ },
+ /* Enable transmitter if not already enabled */
+ {
+ .opt = XTE_OPTION_TXEN,
+ .reg = XTE_TXC_OFFSET,
+ .m_or =XTE_TXC_TXEN_MASK,
+ },
+ /* Enable receiver? */
+ {
+ .opt = XTE_OPTION_RXEN,
+ .reg = XTE_RXC1_OFFSET,
+ .m_or =XTE_RXC1_RXEN_MASK,
+ },
+ {}
+};
+
+/**
+ * temac_setoptions
+ */
+static u32 temac_setoptions(struct net_device *ndev, u32 options)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ struct temac_option *tp = &temac_options[0];
+ int reg;
+
+ mutex_lock(&lp->indirect_mutex);
+ while (tp->opt) {
+ reg = temac_indirect_in32(lp, tp->reg) & ~tp->m_or;
+ if (options & tp->opt)
+ reg |= tp->m_or;
+ temac_indirect_out32(lp, tp->reg, reg);
+ tp++;
+ }
+ lp->options |= options;
+ mutex_unlock(&lp->indirect_mutex);
+
+ return (0);
+}
+
+/* Initilize temac */
+static void temac_device_reset(struct net_device *ndev)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ u32 timeout;
+ u32 val;
+
+ /* Perform a software reset */
+
+ /* 0x300 host enable bit ? */
+ /* reset PHY through control register ?:1 */
+
+ dev_dbg(&ndev->dev, "%s()\n", __func__);
+
+ mutex_lock(&lp->indirect_mutex);
+ /* Reset the receiver and wait for it to finish reset */
+ temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK);
+ timeout = 1000;
+ while (temac_indirect_in32(lp, XTE_RXC1_OFFSET) & XTE_RXC1_RXRST_MASK) {
+ udelay(1);
+ if (--timeout == 0) {
+ dev_err(&ndev->dev,
+ "temac_device_reset RX reset timeout!!\n");
+ break;
+ }
+ }
+
+ /* Reset the transmitter and wait for it to finish reset */
+ temac_indirect_out32(lp, XTE_TXC_OFFSET, XTE_TXC_TXRST_MASK);
+ timeout = 1000;
+ while (temac_indirect_in32(lp, XTE_TXC_OFFSET) & XTE_TXC_TXRST_MASK) {
+ udelay(1);
+ if (--timeout == 0) {
+ dev_err(&ndev->dev,
+ "temac_device_reset TX reset timeout!!\n");
+ break;
+ }
+ }
+
+ /* Disable the receiver */
+ val = temac_indirect_in32(lp, XTE_RXC1_OFFSET);
+ temac_indirect_out32(lp, XTE_RXC1_OFFSET, val & ~XTE_RXC1_RXEN_MASK);
+
+ /* Reset Local Link (DMA) */
+ temac_dma_out32(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
+ timeout = 1000;
+ while (temac_dma_in32(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
+ udelay(1);
+ if (--timeout == 0) {
+ dev_err(&ndev->dev,
+ "temac_device_reset DMA reset timeout!!\n");
+ break;
+ }
+ }
+ temac_dma_out32(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
+
+ temac_dma_bd_init(ndev);
+
+ temac_indirect_out32(lp, XTE_RXC0_OFFSET, 0);
+ temac_indirect_out32(lp, XTE_RXC1_OFFSET, 0);
+ temac_indirect_out32(lp, XTE_TXC_OFFSET, 0);
+ temac_indirect_out32(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
+
+ mutex_unlock(&lp->indirect_mutex);
+
+ /* Sync default options with HW
+ * but leave receiver and transmitter disabled. */
+ temac_setoptions(ndev,
+ lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN));
+
+ temac_set_mac_address(ndev, NULL);
+
+ /* Set address filter table */
+ temac_set_multicast_list(ndev);
+ if (temac_setoptions(ndev, lp->options))
+ dev_err(&ndev->dev, "Error setting TEMAC options\n");
+
+ /* Init Driver variable */
+ ndev->trans_start = 0;
+}
+
+void temac_adjust_link(struct net_device *ndev)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ struct phy_device *phy = lp->phy_dev;
+ u32 mii_speed;
+ int link_state;
+
+ /* hash together the state values to decide if something has changed */
+ link_state = phy->speed | (phy->duplex << 1) | phy->link;
+
+ mutex_lock(&lp->indirect_mutex);
+ if (lp->last_link != link_state) {
+ mii_speed = temac_indirect_in32(lp, XTE_EMCFG_OFFSET);
+ mii_speed &= ~XTE_EMCFG_LINKSPD_MASK;
+
+ switch (phy->speed) {
+ case SPEED_1000: mii_speed |= XTE_EMCFG_LINKSPD_1000; break;
+ case SPEED_100: mii_speed |= XTE_EMCFG_LINKSPD_100; break;
+ case SPEED_10: mii_speed |= XTE_EMCFG_LINKSPD_10; break;
+ }
+
+ /* Write new speed setting out to TEMAC */
+ temac_indirect_out32(lp, XTE_EMCFG_OFFSET, mii_speed);
+ lp->last_link = link_state;
+ phy_print_status(phy);
+ }
+ mutex_unlock(&lp->indirect_mutex);
+}
+
+static void temac_start_xmit_done(struct net_device *ndev)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ struct cdmac_bd *cur_p;
+ unsigned int stat = 0;
+
+ cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
+ stat = cur_p->app0;
+
+ while (stat & STS_CTRL_APP0_CMPLT) {
+ dma_unmap_single(ndev->dev.parent, cur_p->phys, cur_p->len,
+ DMA_TO_DEVICE);
+ if (cur_p->app4)
+ dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
+ cur_p->app0 = 0;
+
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += cur_p->len;
+
+ lp->tx_bd_ci++;
+ if (lp->tx_bd_ci >= TX_BD_NUM)
+ lp->tx_bd_ci = 0;
+
+ cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
+ stat = cur_p->app0;
+ }
+
+ netif_wake_queue(ndev);
+}
+
+static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ struct cdmac_bd *cur_p;
+ dma_addr_t start_p, tail_p;
+ int ii;
+ unsigned long num_frag;
+ skb_frag_t *frag;
+
+ num_frag = skb_shinfo(skb)->nr_frags;
+ frag = &skb_shinfo(skb)->frags[0];
+ start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
+ cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+
+ if (cur_p->app0 & STS_CTRL_APP0_CMPLT) {
+ if (!netif_queue_stopped(ndev)) {
+ netif_stop_queue(ndev);
+ return NETDEV_TX_BUSY;
+ }
+ return NETDEV_TX_BUSY;
+ }
+
+ cur_p->app0 = 0;
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ const struct iphdr *ip = ip_hdr(skb);
+ int length = 0, start = 0, insert = 0;
+
+ switch (ip->protocol) {
+ case IPPROTO_TCP:
+ start = sizeof(struct iphdr) + ETH_HLEN;
+ insert = sizeof(struct iphdr) + ETH_HLEN + 16;
+ length = ip->tot_len - sizeof(struct iphdr);
+ break;
+ case IPPROTO_UDP:
+ start = sizeof(struct iphdr) + ETH_HLEN;
+ insert = sizeof(struct iphdr) + ETH_HLEN + 6;
+ length = ip->tot_len - sizeof(struct iphdr);
+ break;
+ default:
+ break;
+ }
+ cur_p->app1 = ((start << 16) | insert);
+ cur_p->app2 = csum_tcpudp_magic(ip->saddr, ip->daddr,
+ length, ip->protocol, 0);
+ skb->data[insert] = 0;
+ skb->data[insert + 1] = 0;
+ }
+ cur_p->app0 |= STS_CTRL_APP0_SOP;
+ cur_p->len = skb_headlen(skb);
+ cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ cur_p->app4 = (unsigned long)skb;
+
+ for (ii = 0; ii < num_frag; ii++) {
+ lp->tx_bd_tail++;
+ if (lp->tx_bd_tail >= TX_BD_NUM)
+ lp->tx_bd_tail = 0;
+
+ cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+ cur_p->phys = dma_map_single(ndev->dev.parent,
+ (void *)page_address(frag->page) +
+ frag->page_offset,
+ frag->size, DMA_TO_DEVICE);
+ cur_p->len = frag->size;
+ cur_p->app0 = 0;
+ frag++;
+ }
+ cur_p->app0 |= STS_CTRL_APP0_EOP;
+
+ tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
+ lp->tx_bd_tail++;
+ if (lp->tx_bd_tail >= TX_BD_NUM)
+ lp->tx_bd_tail = 0;
+
+ /* Kick off the transfer */
+ temac_dma_out32(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
+
+ return 0;
+}
+
+
+static void ll_temac_recv(struct net_device *ndev)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ struct sk_buff *skb, *new_skb;
+ unsigned int bdstat;
+ struct cdmac_bd *cur_p;
+ dma_addr_t tail_p;
+ int length;
+ unsigned long skb_vaddr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->rx_lock, flags);
+
+ tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
+ cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
+
+ bdstat = cur_p->app0;
+ while ((bdstat & STS_CTRL_APP0_CMPLT)) {
+
+ skb = lp->rx_skb[lp->rx_bd_ci];
+ length = cur_p->app4;
+
+ skb_vaddr = virt_to_bus(skb->data);
+ dma_unmap_single(ndev->dev.parent, skb_vaddr, length,
+ DMA_FROM_DEVICE);
+
+ skb_put(skb, length);
+ skb->dev = ndev;
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ netif_rx(skb);
+
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += length;
+
+ new_skb = alloc_skb(XTE_MAX_JUMBO_FRAME_SIZE + XTE_ALIGN,
+ GFP_ATOMIC);
+ if (new_skb == 0) {
+ dev_err(&ndev->dev, "no memory for new sk_buff\n");
+ spin_unlock_irqrestore(&lp->rx_lock, flags);
+ return;
+ }
+
+ skb_reserve(new_skb, BUFFER_ALIGN(new_skb->data));
+
+ cur_p->app0 = STS_CTRL_APP0_IRQONEND;
+ cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
+ XTE_MAX_JUMBO_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+ cur_p->len = XTE_MAX_JUMBO_FRAME_SIZE;
+ lp->rx_skb[lp->rx_bd_ci] = new_skb;
+
+ lp->rx_bd_ci++;
+ if (lp->rx_bd_ci >= RX_BD_NUM)
+ lp->rx_bd_ci = 0;
+
+ cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
+ bdstat = cur_p->app0;
+ }
+ temac_dma_out32(lp, RX_TAILDESC_PTR, tail_p);
+
+ spin_unlock_irqrestore(&lp->rx_lock, flags);
+}
+
+static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
+{
+ struct net_device *ndev = _ndev;
+ struct temac_local *lp = netdev_priv(ndev);
+ unsigned int status;
+
+ status = temac_dma_in32(lp, TX_IRQ_REG);
+ temac_dma_out32(lp, TX_IRQ_REG, status);
+
+ if (status & (IRQ_COAL | IRQ_DLY))
+ temac_start_xmit_done(lp->ndev);
+ if (status & 0x080)
+ dev_err(&ndev->dev, "DMA error 0x%x\n", status);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev)
+{
+ struct net_device *ndev = _ndev;
+ struct temac_local *lp = netdev_priv(ndev);
+ unsigned int status;
+
+ /* Read and clear the status registers */
+ status = temac_dma_in32(lp, RX_IRQ_REG);
+ temac_dma_out32(lp, RX_IRQ_REG, status);
+
+ if (status & (IRQ_COAL | IRQ_DLY))
+ ll_temac_recv(lp->ndev);
+
+ return IRQ_HANDLED;
+}
+
+static int temac_open(struct net_device *ndev)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ int rc;
+
+ dev_dbg(&ndev->dev, "temac_open()\n");
+
+ if (lp->phy_node) {
+ lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
+ temac_adjust_link, 0, 0);
+ if (!lp->phy_dev) {
+ dev_err(lp->dev, "of_phy_connect() failed\n");
+ return -ENODEV;
+ }
+
+ phy_start(lp->phy_dev);
+ }
+
+ rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev);
+ if (rc)
+ goto err_tx_irq;
+ rc = request_irq(lp->rx_irq, ll_temac_rx_irq, 0, ndev->name, ndev);
+ if (rc)
+ goto err_rx_irq;
+
+ temac_device_reset(ndev);
+ return 0;
+
+ err_rx_irq:
+ free_irq(lp->tx_irq, ndev);
+ err_tx_irq:
+ if (lp->phy_dev)
+ phy_disconnect(lp->phy_dev);
+ lp->phy_dev = NULL;
+ dev_err(lp->dev, "request_irq() failed\n");
+ return rc;
+}
+
+static int temac_stop(struct net_device *ndev)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+
+ dev_dbg(&ndev->dev, "temac_close()\n");
+
+ free_irq(lp->tx_irq, ndev);
+ free_irq(lp->rx_irq, ndev);
+
+ if (lp->phy_dev)
+ phy_disconnect(lp->phy_dev);
+ lp->phy_dev = NULL;
+
+ return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void
+temac_poll_controller(struct net_device *ndev)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+
+ disable_irq(lp->tx_irq);
+ disable_irq(lp->rx_irq);
+
+ ll_temac_rx_irq(lp->tx_irq, lp);
+ ll_temac_tx_irq(lp->rx_irq, lp);
+
+ enable_irq(lp->tx_irq);
+ enable_irq(lp->rx_irq);
+}
+#endif
+
+static const struct net_device_ops temac_netdev_ops = {
+ .ndo_open = temac_open,
+ .ndo_stop = temac_stop,
+ .ndo_start_xmit = temac_start_xmit,
+ .ndo_set_mac_address = temac_set_mac_address,
+ //.ndo_set_multicast_list = temac_set_multicast_list,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = temac_poll_controller,
+#endif
+};
+
+/* ---------------------------------------------------------------------
+ * SYSFS device attributes
+ */
+static ssize_t temac_show_llink_regs(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct temac_local *lp = netdev_priv(ndev);
+ int i, len = 0;
+
+ for (i = 0; i < 0x11; i++)
+ len += sprintf(buf + len, "%.8x%s", temac_dma_in32(lp, i),
+ (i % 8) == 7 ? "\n" : " ");
+ len += sprintf(buf + len, "\n");
+
+ return len;
+}
+
+static DEVICE_ATTR(llink_regs, 0440, temac_show_llink_regs, NULL);
+
+static struct attribute *temac_device_attrs[] = {
+ &dev_attr_llink_regs.attr,
+ NULL,
+};
+
+static const struct attribute_group temac_attr_group = {
+ .attrs = temac_device_attrs,
+};
+
+static int __init
+temac_of_probe(struct of_device *op, const struct of_device_id *match)
+{
+ struct device_node *np;
+ struct temac_local *lp;
+ struct net_device *ndev;
+ const void *addr;
+ int size, rc = 0;
+ unsigned int dcrs;
+
+ /* Init network device structure */
+ ndev = alloc_etherdev(sizeof(*lp));
+ if (!ndev) {
+ dev_err(&op->dev, "could not allocate device.\n");
+ return -ENOMEM;
+ }
+ ether_setup(ndev);
+ dev_set_drvdata(&op->dev, ndev);
+ SET_NETDEV_DEV(ndev, &op->dev);
+ ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
+ ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
+ ndev->netdev_ops = &temac_netdev_ops;
+#if 0
+ ndev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4. */
+ ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */
+ ndev->features |= NETIF_F_IPV6_CSUM; /* Can checksum IPV6 TCP/UDP */
+ ndev->features |= NETIF_F_HIGHDMA; /* Can DMA to high memory. */
+ ndev->features |= NETIF_F_HW_VLAN_TX; /* Transmit VLAN hw accel */
+ ndev->features |= NETIF_F_HW_VLAN_RX; /* Receive VLAN hw acceleration */
+ ndev->features |= NETIF_F_HW_VLAN_FILTER; /* Receive VLAN filtering */
+ ndev->features |= NETIF_F_VLAN_CHALLENGED; /* cannot handle VLAN pkts */
+ ndev->features |= NETIF_F_GSO; /* Enable software GSO. */
+ ndev->features |= NETIF_F_MULTI_QUEUE; /* Has multiple TX/RX queues */
+ ndev->features |= NETIF_F_LRO; /* large receive offload */
+#endif
+
+ /* setup temac private info structure */
+ lp = netdev_priv(ndev);
+ lp->ndev = ndev;
+ lp->dev = &op->dev;
+ lp->options = XTE_OPTION_DEFAULTS;
+ spin_lock_init(&lp->rx_lock);
+ mutex_init(&lp->indirect_mutex);
+
+ /* map device registers */
+ lp->regs = of_iomap(op->node, 0);
+ if (!lp->regs) {
+ dev_err(&op->dev, "could not map temac regs.\n");
+ goto nodev;
+ }
+
+ /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
+ np = of_parse_phandle(op->node, "llink-connected", 0);
+ if (!np) {
+ dev_err(&op->dev, "could not find DMA node\n");
+ goto nodev;
+ }
+
+ dcrs = dcr_resource_start(np, 0);
+ if (dcrs == 0) {
+ dev_err(&op->dev, "could not get DMA register address\n");
+ goto nodev;;
+ }
+ lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
+ dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
+
+ lp->rx_irq = irq_of_parse_and_map(np, 0);
+ lp->tx_irq = irq_of_parse_and_map(np, 1);
+ if (!lp->rx_irq || !lp->tx_irq) {
+ dev_err(&op->dev, "could not determine irqs\n");
+ rc = -ENOMEM;
+ goto nodev;
+ }
+
+ of_node_put(np); /* Finished with the DMA node; drop the reference */
+
+ /* Retrieve the MAC address */
+ addr = of_get_property(op->node, "local-mac-address", &size);
+ if ((!addr) || (size != 6)) {
+ dev_err(&op->dev, "could not find MAC address\n");
+ rc = -ENODEV;
+ goto nodev;
+ }
+ temac_set_mac_address(ndev, (void *)addr);
+
+ rc = temac_mdio_setup(lp, op->node);
+ if (rc)
+ dev_warn(&op->dev, "error registering MDIO bus\n");
+
+ lp->phy_node = of_parse_phandle(op->node, "phy-handle", 0);
+ if (lp->phy_node)
+ dev_dbg(lp->dev, "using PHY node %s (%p)\n", np->full_name, np);
+
+ /* Add the device attributes */
+ rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group);
+ if (rc) {
+ dev_err(lp->dev, "Error creating sysfs files\n");
+ goto nodev;
+ }
+
+ rc = register_netdev(lp->ndev);
+ if (rc) {
+ dev_err(lp->dev, "register_netdev() error (%i)\n", rc);
+ goto err_register_ndev;
+ }
+
+ return 0;
+
+ err_register_ndev:
+ sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
+ nodev:
+ free_netdev(ndev);
+ ndev = NULL;
+ return rc;
+}
+
+static int __devexit temac_of_remove(struct of_device *op)
+{
+ struct net_device *ndev = dev_get_drvdata(&op->dev);
+ struct temac_local *lp = netdev_priv(ndev);
+
+ temac_mdio_teardown(lp);
+ unregister_netdev(ndev);
+ sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
+ if (lp->phy_node)
+ of_node_put(lp->phy_node);
+ lp->phy_node = NULL;
+ dev_set_drvdata(&op->dev, NULL);
+ free_netdev(ndev);
+ return 0;
+}
+
+static struct of_device_id temac_of_match[] __devinitdata = {
+ { .compatible = "xlnx,xps-ll-temac-1.01.b", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, temac_of_match);
+
+static struct of_platform_driver temac_of_driver = {
+ .match_table = temac_of_match,
+ .probe = temac_of_probe,
+ .remove = __devexit_p(temac_of_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "xilinx_temac",
+ },
+};
+
+static int __init temac_init(void)
+{
+ return of_register_platform_driver(&temac_of_driver);
+}
+module_init(temac_init);
+
+static void __exit temac_exit(void)
+{
+ of_unregister_platform_driver(&temac_of_driver);
+}
+module_exit(temac_exit);
+
+MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
+MODULE_AUTHOR("Yoshio Kashiwagi");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ll_temac_mdio.c b/drivers/net/ll_temac_mdio.c
new file mode 100644
index 00000000000..da0e462308d
--- /dev/null
+++ b/drivers/net/ll_temac_mdio.c
@@ -0,0 +1,120 @@
+/*
+ * MDIO bus driver for the Xilinx TEMAC device
+ *
+ * Copyright (c) 2009 Secret Lab Technologies, Ltd.
+ */
+
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <linux/mutex.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+
+#include "ll_temac.h"
+
+/* ---------------------------------------------------------------------
+ * MDIO Bus functions
+ */
+static int temac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+ struct temac_local *lp = bus->priv;
+ u32 rc;
+
+ /* Write the PHY address to the MIIM Access Initiator register.
+ * When the transfer completes, the PHY register value will appear
+ * in the LSW0 register */
+ mutex_lock(&lp->indirect_mutex);
+ temac_iow(lp, XTE_LSW0_OFFSET, (phy_id << 5) | reg);
+ rc = temac_indirect_in32(lp, XTE_MIIMAI_OFFSET);
+ mutex_unlock(&lp->indirect_mutex);
+
+ dev_dbg(lp->dev, "temac_mdio_read(phy_id=%i, reg=%x) == %x\n",
+ phy_id, reg, rc);
+
+ return rc;
+}
+
+static int temac_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
+{
+ struct temac_local *lp = bus->priv;
+
+ dev_dbg(lp->dev, "temac_mdio_write(phy_id=%i, reg=%x, val=%x)\n",
+ phy_id, reg, val);
+
+ /* First write the desired value into the write data register
+ * and then write the address into the access initiator register
+ */
+ mutex_lock(&lp->indirect_mutex);
+ temac_indirect_out32(lp, XTE_MGTDR_OFFSET, val);
+ temac_indirect_out32(lp, XTE_MIIMAI_OFFSET, (phy_id << 5) | reg);
+ mutex_unlock(&lp->indirect_mutex);
+
+ return 0;
+}
+
+int temac_mdio_setup(struct temac_local *lp, struct device_node *np)
+{
+ struct mii_bus *bus;
+ const u32 *bus_hz;
+ int clk_div;
+ int rc, size;
+ struct resource res;
+
+ /* Calculate a reasonable divisor for the clock rate */
+ clk_div = 0x3f; /* worst-case default setting */
+ bus_hz = of_get_property(np, "clock-frequency", &size);
+ if (bus_hz && size >= sizeof(*bus_hz)) {
+ clk_div = (*bus_hz) / (2500 * 1000 * 2) - 1;
+ if (clk_div < 1)
+ clk_div = 1;
+ if (clk_div > 0x3f)
+ clk_div = 0x3f;
+ }
+
+ /* Enable the MDIO bus by asserting the enable bit and writing
+ * in the clock config */
+ mutex_lock(&lp->indirect_mutex);
+ temac_indirect_out32(lp, XTE_MC_OFFSET, 1 << 6 | clk_div);
+ mutex_unlock(&lp->indirect_mutex);
+
+ bus = mdiobus_alloc();
+ if (!bus)
+ return -ENOMEM;
+
+ of_address_to_resource(np, 0, &res);
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
+ (unsigned long long)res.start);
+ bus->priv = lp;
+ bus->name = "Xilinx TEMAC MDIO";
+ bus->read = temac_mdio_read;
+ bus->write = temac_mdio_write;
+ bus->parent = lp->dev;
+ bus->irq = lp->mdio_irqs; /* preallocated IRQ table */
+
+ lp->mii_bus = bus;
+
+ rc = of_mdiobus_register(bus, np);
+ if (rc)
+ goto err_register;
+
+ mutex_lock(&lp->indirect_mutex);
+ dev_dbg(lp->dev, "MDIO bus registered; MC:%x\n",
+ temac_indirect_in32(lp, XTE_MC_OFFSET));
+ mutex_unlock(&lp->indirect_mutex);
+ return 0;
+
+ err_register:
+ mdiobus_free(bus);
+ return rc;
+}
+
+void temac_mdio_teardown(struct temac_local *lp)
+{
+ mdiobus_unregister(lp->mii_bus);
+ kfree(lp->mii_bus->irq);
+ mdiobus_free(lp->mii_bus);
+ lp->mii_bus = NULL;
+}
+
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index b7d438a367f..da472c68748 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -62,6 +62,7 @@
struct pcpu_lstats {
unsigned long packets;
unsigned long bytes;
+ unsigned long drops;
};
/*
@@ -71,18 +72,22 @@ struct pcpu_lstats {
static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct pcpu_lstats *pcpu_lstats, *lb_stats;
+ int len;
skb_orphan(skb);
- skb->protocol = eth_type_trans(skb,dev);
+ skb->protocol = eth_type_trans(skb, dev);
/* it's OK to use per_cpu_ptr() because BHs are off */
pcpu_lstats = dev->ml_priv;
lb_stats = per_cpu_ptr(pcpu_lstats, smp_processor_id());
- lb_stats->bytes += skb->len;
- lb_stats->packets++;
- netif_rx(skb);
+ len = skb->len;
+ if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
+ lb_stats->bytes += len;
+ lb_stats->packets++;
+ } else
+ lb_stats->drops++;
return 0;
}
@@ -93,6 +98,7 @@ static struct net_device_stats *loopback_get_stats(struct net_device *dev)
struct net_device_stats *stats = &dev->stats;
unsigned long bytes = 0;
unsigned long packets = 0;
+ unsigned long drops = 0;
int i;
pcpu_lstats = dev->ml_priv;
@@ -102,11 +108,14 @@ static struct net_device_stats *loopback_get_stats(struct net_device *dev)
lb_stats = per_cpu_ptr(pcpu_lstats, i);
bytes += lb_stats->bytes;
packets += lb_stats->packets;
+ drops += lb_stats->drops;
}
stats->rx_packets = packets;
stats->tx_packets = packets;
- stats->rx_bytes = bytes;
- stats->tx_bytes = bytes;
+ stats->rx_dropped = drops;
+ stats->rx_errors = drops;
+ stats->rx_bytes = bytes;
+ stats->tx_bytes = bytes;
return stats;
}
@@ -161,6 +170,7 @@ static void loopback_setup(struct net_device *dev)
dev->tx_queue_len = 0;
dev->type = ARPHRD_LOOPBACK; /* 0x0001*/
dev->flags = IFF_LOOPBACK;
+ dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
| NETIF_F_TSO
| NETIF_F_NO_CSUM
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c
index 8e884869a05..f8fa0c3f0f6 100644
--- a/drivers/net/mac8390.c
+++ b/drivers/net/mac8390.c
@@ -304,7 +304,7 @@ struct net_device * __init mac8390_probe(int unit)
if (!MACH_IS_MAC)
return ERR_PTR(-ENODEV);
- dev = alloc_ei_netdev();
+ dev = ____alloc_ei_netdev(0);
if (!dev)
return ERR_PTR(-ENOMEM);
@@ -481,15 +481,15 @@ void cleanup_module(void)
static const struct net_device_ops mac8390_netdev_ops = {
.ndo_open = mac8390_open,
.ndo_stop = mac8390_close,
- .ndo_start_xmit = ei_start_xmit,
- .ndo_tx_timeout = ei_tx_timeout,
- .ndo_get_stats = ei_get_stats,
- .ndo_set_multicast_list = ei_set_multicast_list,
+ .ndo_start_xmit = __ei_start_xmit,
+ .ndo_tx_timeout = __ei_tx_timeout,
+ .ndo_get_stats = __ei_get_stats,
+ .ndo_set_multicast_list = __ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ei_poll,
+ .ndo_poll_controller = __ei_poll,
#endif
};
@@ -620,19 +620,12 @@ static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * nd
/* Good, done, now spit out some messages */
printk(KERN_INFO "%s: %s in slot %X (type %s)\n",
- dev->name, ndev->board->name, ndev->board->slot, cardname[type]);
- printk(KERN_INFO "MAC ");
- {
- int i;
- for (i = 0; i < 6; i++) {
- printk("%2.2x", dev->dev_addr[i]);
- if (i < 5)
- printk(":");
- }
- }
- printk(" IRQ %d, %d KB shared memory at %#lx, %d-bit access.\n",
- dev->irq, (int)((dev->mem_end - dev->mem_start)/0x1000) * 4,
- dev->mem_start, access_bitmode?32:16);
+ dev->name, ndev->board->name, ndev->board->slot, cardname[type]);
+ printk(KERN_INFO
+ "MAC %pM IRQ %d, %d KB shared memory at %#lx, %d-bit access.\n",
+ dev->dev_addr, dev->irq,
+ (unsigned int)(dev->mem_end - dev->mem_start) >> 10,
+ dev->mem_start, access_bitmode ? 32 : 16);
return 0;
}
diff --git a/drivers/net/mac89x0.c b/drivers/net/mac89x0.c
index 384e072de2e..dab45339d3a 100644
--- a/drivers/net/mac89x0.c
+++ b/drivers/net/mac89x0.c
@@ -73,8 +73,6 @@ static char *version =
or override something. */
#include <linux/module.h>
-#define PRINTK(x) printk x
-
/*
Sources:
@@ -402,7 +400,7 @@ net_send_packet(struct sk_buff *skb, struct net_device *dev)
/* Gasp! It hasn't. But that shouldn't happen since
we're waiting for TxOk, so return 1 and requeue this packet. */
local_irq_restore(flags);
- return 1;
+ return NETDEV_TX_BUSY;
}
/* Write the contents of the packet */
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index e82aee41d77..5b5c25368d1 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -599,6 +599,21 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling receive - used by netconsole and other diagnostic tools
+ * to allow network i/o with interrupts disabled.
+ */
+static void macb_poll_controller(struct net_device *dev)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ macb_interrupt(dev->irq, dev);
+ local_irq_restore(flags);
+}
+#endif
+
static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
@@ -630,7 +645,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
"BUG! Tx Ring full when queue awake!\n");
dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n",
bp->tx_head, bp->tx_tail);
- return 1;
+ return NETDEV_TX_BUSY;
}
entry = bp->tx_head;
@@ -1094,6 +1109,9 @@ static const struct net_device_ops macb_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = macb_poll_controller,
+#endif
};
static int __init macb_probe(struct platform_device *pdev)
diff --git a/drivers/net/mace.c b/drivers/net/mace.c
index feebbd92aff..1427755c224 100644
--- a/drivers/net/mace.c
+++ b/drivers/net/mace.c
@@ -94,6 +94,16 @@ static void __mace_set_address(struct net_device *dev, void *addr);
*/
static unsigned char *dummy_buf;
+static const struct net_device_ops mace_netdev_ops = {
+ .ndo_open = mace_open,
+ .ndo_stop = mace_close,
+ .ndo_start_xmit = mace_xmit_start,
+ .ndo_set_multicast_list = mace_set_multicast,
+ .ndo_set_mac_address = mace_set_address,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_id *match)
{
struct device_node *mace = macio_get_of_node(mdev);
@@ -207,11 +217,7 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i
}
}
- dev->open = mace_open;
- dev->stop = mace_close;
- dev->hard_start_xmit = mace_xmit_start;
- dev->set_multicast_list = mace_set_multicast;
- dev->set_mac_address = mace_set_address;
+ dev->netdev_ops = &mace_netdev_ops;
/*
* Most of what is below could be moved to mace_open()
@@ -541,7 +547,7 @@ static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
mp->tx_fullup = 1;
spin_unlock_irqrestore(&mp->lock, flags);
- return 1; /* can't take it at the moment */
+ return NETDEV_TX_BUSY; /* can't take it at the moment */
}
spin_unlock_irqrestore(&mp->lock, flags);
diff --git a/drivers/net/macmace.c b/drivers/net/macmace.c
index 274e99bb63a..44f3c2896f2 100644
--- a/drivers/net/macmace.c
+++ b/drivers/net/macmace.c
@@ -180,6 +180,17 @@ static void mace_dma_off(struct net_device *dev)
psc_write_word(PSC_ENETWR_CMD + PSC_SET1, 0x1100);
}
+static const struct net_device_ops mace_netdev_ops = {
+ .ndo_open = mace_open,
+ .ndo_stop = mace_close,
+ .ndo_start_xmit = mace_xmit_start,
+ .ndo_tx_timeout = mace_tx_timeout,
+ .ndo_set_multicast_list = mace_set_multicast,
+ .ndo_set_mac_address = mace_set_address,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
/*
* Not really much of a probe. The hardware table tells us if this
* model of Macintrash has a MACE (AV macintoshes)
@@ -240,13 +251,8 @@ static int __devinit mace_probe(struct platform_device *pdev)
return -ENODEV;
}
- dev->open = mace_open;
- dev->stop = mace_close;
- dev->hard_start_xmit = mace_xmit_start;
- dev->tx_timeout = mace_tx_timeout;
+ dev->netdev_ops = &mace_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- dev->set_multicast_list = mace_set_multicast;
- dev->set_mac_address = mace_set_address;
printk(KERN_INFO "%s: 68K MACE, hardware address %pM\n",
dev->name, dev->dev_addr);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 214a8cf2b70..99eed9f37c8 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -232,7 +232,7 @@ static int macvlan_open(struct net_device *dev)
if (macvlan_addr_busy(vlan->port, dev->dev_addr))
goto out;
- err = dev_unicast_add(lowerdev, dev->dev_addr, ETH_ALEN);
+ err = dev_unicast_add(lowerdev, dev->dev_addr);
if (err < 0)
goto out;
if (dev->flags & IFF_ALLMULTI) {
@@ -244,7 +244,7 @@ static int macvlan_open(struct net_device *dev)
return 0;
del_unicast:
- dev_unicast_delete(lowerdev, dev->dev_addr, ETH_ALEN);
+ dev_unicast_delete(lowerdev, dev->dev_addr);
out:
return err;
}
@@ -258,7 +258,7 @@ static int macvlan_stop(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI)
dev_set_allmulti(lowerdev, -1);
- dev_unicast_delete(lowerdev, dev->dev_addr, ETH_ALEN);
+ dev_unicast_delete(lowerdev, dev->dev_addr);
macvlan_hash_del(vlan);
return 0;
@@ -282,10 +282,11 @@ static int macvlan_set_mac_address(struct net_device *dev, void *p)
if (macvlan_addr_busy(vlan->port, addr->sa_data))
return -EBUSY;
- if ((err = dev_unicast_add(lowerdev, addr->sa_data, ETH_ALEN)))
+ err = dev_unicast_add(lowerdev, addr->sa_data);
+ if (err)
return err;
- dev_unicast_delete(lowerdev, dev->dev_addr, ETH_ALEN);
+ dev_unicast_delete(lowerdev, dev->dev_addr);
macvlan_hash_change_addr(vlan, addr->sa_data);
}
@@ -358,6 +359,7 @@ static int macvlan_init(struct net_device *dev)
(lowerdev->state & MACVLAN_STATE_MASK);
dev->features = lowerdev->features & MACVLAN_FEATURES;
dev->iflink = lowerdev->ifindex;
+ dev->hard_header_len = lowerdev->hard_header_len;
macvlan_set_lockdep_class(dev);
@@ -374,36 +376,20 @@ static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
static u32 macvlan_ethtool_get_rx_csum(struct net_device *dev)
{
const struct macvlan_dev *vlan = netdev_priv(dev);
- struct net_device *lowerdev = vlan->lowerdev;
-
- if (lowerdev->ethtool_ops == NULL ||
- lowerdev->ethtool_ops->get_rx_csum == NULL)
- return 0;
- return lowerdev->ethtool_ops->get_rx_csum(lowerdev);
+ return dev_ethtool_get_rx_csum(vlan->lowerdev);
}
static int macvlan_ethtool_get_settings(struct net_device *dev,
struct ethtool_cmd *cmd)
{
const struct macvlan_dev *vlan = netdev_priv(dev);
- struct net_device *lowerdev = vlan->lowerdev;
-
- if (!lowerdev->ethtool_ops ||
- !lowerdev->ethtool_ops->get_settings)
- return -EOPNOTSUPP;
-
- return lowerdev->ethtool_ops->get_settings(lowerdev, cmd);
+ return dev_ethtool_get_settings(vlan->lowerdev, cmd);
}
static u32 macvlan_ethtool_get_flags(struct net_device *dev)
{
const struct macvlan_dev *vlan = netdev_priv(dev);
- struct net_device *lowerdev = vlan->lowerdev;
-
- if (!lowerdev->ethtool_ops ||
- !lowerdev->ethtool_ops->get_flags)
- return 0;
- return lowerdev->ethtool_ops->get_flags(lowerdev);
+ return dev_ethtool_get_flags(vlan->lowerdev);
}
static const struct ethtool_ops macvlan_ethtool_ops = {
@@ -430,6 +416,7 @@ static void macvlan_setup(struct net_device *dev)
{
ether_setup(dev);
+ dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
dev->netdev_ops = &macvlan_netdev_ops;
dev->destructor = free_netdev;
dev->header_ops = &macvlan_hard_header_ops,
diff --git a/drivers/net/mdio.c b/drivers/net/mdio.c
new file mode 100644
index 00000000000..dc45e9856c3
--- /dev/null
+++ b/drivers/net/mdio.c
@@ -0,0 +1,431 @@
+/*
+ * mdio.c: Generic support for MDIO-compatible transceivers
+ * Copyright 2006-2009 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/kernel.h>
+#include <linux/capability.h>
+#include <linux/errno.h>
+#include <linux/ethtool.h>
+#include <linux/mdio.h>
+#include <linux/module.h>
+
+/**
+ * mdio45_probe - probe for an MDIO (clause 45) device
+ * @mdio: MDIO interface
+ * @prtad: Expected PHY address
+ *
+ * This sets @prtad and @mmds in the MDIO interface if successful.
+ * Returns 0 on success, negative on error.
+ */
+int mdio45_probe(struct mdio_if_info *mdio, int prtad)
+{
+ int mmd, stat2, devs1, devs2;
+
+ /* Assume PHY must have at least one of PMA/PMD, WIS, PCS, PHY
+ * XS or DTE XS; give up if none is present. */
+ for (mmd = 1; mmd <= 5; mmd++) {
+ /* Is this MMD present? */
+ stat2 = mdio->mdio_read(mdio->dev, prtad, mmd, MDIO_STAT2);
+ if (stat2 < 0 ||
+ (stat2 & MDIO_STAT2_DEVPRST) != MDIO_STAT2_DEVPRST_VAL)
+ continue;
+
+ /* It should tell us about all the other MMDs */
+ devs1 = mdio->mdio_read(mdio->dev, prtad, mmd, MDIO_DEVS1);
+ devs2 = mdio->mdio_read(mdio->dev, prtad, mmd, MDIO_DEVS2);
+ if (devs1 < 0 || devs2 < 0)
+ continue;
+
+ mdio->prtad = prtad;
+ mdio->mmds = devs1 | (devs2 << 16);
+ return 0;
+ }
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL(mdio45_probe);
+
+/**
+ * mdio_set_flag - set or clear flag in an MDIO register
+ * @mdio: MDIO interface
+ * @prtad: PHY address
+ * @devad: MMD address
+ * @addr: Register address
+ * @mask: Mask for flag (single bit set)
+ * @sense: New value of flag
+ *
+ * This debounces changes: it does not write the register if the flag
+ * already has the proper value. Returns 0 on success, negative on error.
+ */
+int mdio_set_flag(const struct mdio_if_info *mdio,
+ int prtad, int devad, u16 addr, int mask,
+ bool sense)
+{
+ int old_val = mdio->mdio_read(mdio->dev, prtad, devad, addr);
+ int new_val;
+
+ if (old_val < 0)
+ return old_val;
+ if (sense)
+ new_val = old_val | mask;
+ else
+ new_val = old_val & ~mask;
+ if (old_val == new_val)
+ return 0;
+ return mdio->mdio_write(mdio->dev, prtad, devad, addr, new_val);
+}
+EXPORT_SYMBOL(mdio_set_flag);
+
+/**
+ * mdio_link_ok - is link status up/OK
+ * @mdio: MDIO interface
+ * @mmd_mask: Mask for MMDs to check
+ *
+ * Returns 1 if the PHY reports link status up/OK, 0 otherwise.
+ * @mmd_mask is normally @mdio->mmds, but if loopback is enabled
+ * the MMDs being bypassed should be excluded from the mask.
+ */
+int mdio45_links_ok(const struct mdio_if_info *mdio, u32 mmd_mask)
+{
+ int devad, reg;
+
+ if (!mmd_mask) {
+ /* Use absence of XGMII faults in lieu of link state */
+ reg = mdio->mdio_read(mdio->dev, mdio->prtad,
+ MDIO_MMD_PHYXS, MDIO_STAT2);
+ return reg >= 0 && !(reg & MDIO_STAT2_RXFAULT);
+ }
+
+ for (devad = 0; mmd_mask; devad++) {
+ if (mmd_mask & (1 << devad)) {
+ mmd_mask &= ~(1 << devad);
+
+ /* Read twice because link state is latched and a
+ * read moves the current state into the register */
+ mdio->mdio_read(mdio->dev, mdio->prtad,
+ devad, MDIO_STAT1);
+ reg = mdio->mdio_read(mdio->dev, mdio->prtad,
+ devad, MDIO_STAT1);
+ if (reg < 0 || !(reg & MDIO_STAT1_LSTATUS))
+ return false;
+ }
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(mdio45_links_ok);
+
+/**
+ * mdio45_nway_restart - restart auto-negotiation for this interface
+ * @mdio: MDIO interface
+ *
+ * Returns 0 on success, negative on error.
+ */
+int mdio45_nway_restart(const struct mdio_if_info *mdio)
+{
+ if (!(mdio->mmds & MDIO_DEVS_AN))
+ return -EOPNOTSUPP;
+
+ mdio_set_flag(mdio, mdio->prtad, MDIO_MMD_AN, MDIO_CTRL1,
+ MDIO_AN_CTRL1_RESTART, true);
+ return 0;
+}
+EXPORT_SYMBOL(mdio45_nway_restart);
+
+static u32 mdio45_get_an(const struct mdio_if_info *mdio, u16 addr)
+{
+ u32 result = 0;
+ int reg;
+
+ reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_AN, addr);
+ if (reg & ADVERTISE_10HALF)
+ result |= ADVERTISED_10baseT_Half;
+ if (reg & ADVERTISE_10FULL)
+ result |= ADVERTISED_10baseT_Full;
+ if (reg & ADVERTISE_100HALF)
+ result |= ADVERTISED_100baseT_Half;
+ if (reg & ADVERTISE_100FULL)
+ result |= ADVERTISED_100baseT_Full;
+ return result;
+}
+
+/**
+ * mdio45_ethtool_gset_npage - get settings for ETHTOOL_GSET
+ * @mdio: MDIO interface
+ * @ecmd: Ethtool request structure
+ * @npage_adv: Modes currently advertised on next pages
+ * @npage_lpa: Modes advertised by link partner on next pages
+ *
+ * Since the CSRs for auto-negotiation using next pages are not fully
+ * standardised, this function does not attempt to decode them. The
+ * caller must pass them in.
+ */
+void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio,
+ struct ethtool_cmd *ecmd,
+ u32 npage_adv, u32 npage_lpa)
+{
+ int reg;
+
+ ecmd->transceiver = XCVR_INTERNAL;
+ ecmd->phy_address = mdio->prtad;
+ ecmd->mdio_support =
+ mdio->mode_support & (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22);
+
+ reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD,
+ MDIO_CTRL2);
+ switch (reg & MDIO_PMA_CTRL2_TYPE) {
+ case MDIO_PMA_CTRL2_10GBT:
+ case MDIO_PMA_CTRL2_1000BT:
+ case MDIO_PMA_CTRL2_100BTX:
+ case MDIO_PMA_CTRL2_10BT:
+ ecmd->port = PORT_TP;
+ ecmd->supported = SUPPORTED_TP;
+ reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD,
+ MDIO_SPEED);
+ if (reg & MDIO_SPEED_10G)
+ ecmd->supported |= SUPPORTED_10000baseT_Full;
+ if (reg & MDIO_PMA_SPEED_1000)
+ ecmd->supported |= (SUPPORTED_1000baseT_Full |
+ SUPPORTED_1000baseT_Half);
+ if (reg & MDIO_PMA_SPEED_100)
+ ecmd->supported |= (SUPPORTED_100baseT_Full |
+ SUPPORTED_100baseT_Half);
+ if (reg & MDIO_PMA_SPEED_10)
+ ecmd->supported |= (SUPPORTED_10baseT_Full |
+ SUPPORTED_10baseT_Half);
+ ecmd->advertising = ADVERTISED_TP;
+ break;
+
+ case MDIO_PMA_CTRL2_10GBCX4:
+ ecmd->port = PORT_OTHER;
+ ecmd->supported = 0;
+ ecmd->advertising = 0;
+ break;
+
+ case MDIO_PMA_CTRL2_10GBKX4:
+ case MDIO_PMA_CTRL2_10GBKR:
+ case MDIO_PMA_CTRL2_1000BKX:
+ ecmd->port = PORT_OTHER;
+ ecmd->supported = SUPPORTED_Backplane;
+ reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD,
+ MDIO_PMA_EXTABLE);
+ if (reg & MDIO_PMA_EXTABLE_10GBKX4)
+ ecmd->supported |= SUPPORTED_10000baseKX4_Full;
+ if (reg & MDIO_PMA_EXTABLE_10GBKR)
+ ecmd->supported |= SUPPORTED_10000baseKR_Full;
+ if (reg & MDIO_PMA_EXTABLE_1000BKX)
+ ecmd->supported |= SUPPORTED_1000baseKX_Full;
+ reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD,
+ MDIO_PMA_10GBR_FECABLE);
+ if (reg & MDIO_PMA_10GBR_FECABLE_ABLE)
+ ecmd->supported |= SUPPORTED_10000baseR_FEC;
+ ecmd->advertising = ADVERTISED_Backplane;
+ break;
+
+ /* All the other defined modes are flavours of optical */
+ default:
+ ecmd->port = PORT_FIBRE;
+ ecmd->supported = SUPPORTED_FIBRE;
+ ecmd->advertising = ADVERTISED_FIBRE;
+ break;
+ }
+
+ if (mdio->mmds & MDIO_DEVS_AN) {
+ ecmd->supported |= SUPPORTED_Autoneg;
+ reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_AN,
+ MDIO_CTRL1);
+ if (reg & MDIO_AN_CTRL1_ENABLE) {
+ ecmd->autoneg = AUTONEG_ENABLE;
+ ecmd->advertising |=
+ ADVERTISED_Autoneg |
+ mdio45_get_an(mdio, MDIO_AN_ADVERTISE) |
+ npage_adv;
+ } else {
+ ecmd->autoneg = AUTONEG_DISABLE;
+ }
+ } else {
+ ecmd->autoneg = AUTONEG_DISABLE;
+ }
+
+ if (ecmd->autoneg) {
+ u32 modes = 0;
+ int an_stat = mdio->mdio_read(mdio->dev, mdio->prtad,
+ MDIO_MMD_AN, MDIO_STAT1);
+
+ /* If AN is complete and successful, report best common
+ * mode, otherwise report best advertised mode. */
+ if (an_stat & MDIO_AN_STAT1_COMPLETE) {
+ ecmd->lp_advertising =
+ mdio45_get_an(mdio, MDIO_AN_LPA) | npage_lpa;
+ if (an_stat & MDIO_AN_STAT1_LPABLE)
+ ecmd->lp_advertising |= ADVERTISED_Autoneg;
+ modes = ecmd->advertising & ecmd->lp_advertising;
+ }
+ if ((modes & ~ADVERTISED_Autoneg) == 0)
+ modes = ecmd->advertising;
+
+ if (modes & (ADVERTISED_10000baseT_Full |
+ ADVERTISED_10000baseKX4_Full |
+ ADVERTISED_10000baseKR_Full)) {
+ ecmd->speed = SPEED_10000;
+ ecmd->duplex = DUPLEX_FULL;
+ } else if (modes & (ADVERTISED_1000baseT_Full |
+ ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseKX_Full)) {
+ ecmd->speed = SPEED_1000;
+ ecmd->duplex = !(modes & ADVERTISED_1000baseT_Half);
+ } else if (modes & (ADVERTISED_100baseT_Full |
+ ADVERTISED_100baseT_Half)) {
+ ecmd->speed = SPEED_100;
+ ecmd->duplex = !!(modes & ADVERTISED_100baseT_Full);
+ } else {
+ ecmd->speed = SPEED_10;
+ ecmd->duplex = !!(modes & ADVERTISED_10baseT_Full);
+ }
+ } else {
+ /* Report forced settings */
+ reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD,
+ MDIO_CTRL1);
+ ecmd->speed = (((reg & MDIO_PMA_CTRL1_SPEED1000) ? 100 : 1) *
+ ((reg & MDIO_PMA_CTRL1_SPEED100) ? 100 : 10));
+ ecmd->duplex = (reg & MDIO_CTRL1_FULLDPLX ||
+ ecmd->speed == SPEED_10000);
+ }
+
+ /* 10GBASE-T MDI/MDI-X */
+ if (ecmd->port == PORT_TP && ecmd->speed == SPEED_10000) {
+ switch (mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD,
+ MDIO_PMA_10GBT_SWAPPOL)) {
+ case MDIO_PMA_10GBT_SWAPPOL_ABNX | MDIO_PMA_10GBT_SWAPPOL_CDNX:
+ ecmd->eth_tp_mdix = ETH_TP_MDI;
+ break;
+ case 0:
+ ecmd->eth_tp_mdix = ETH_TP_MDI_X;
+ break;
+ default:
+ /* It's complicated... */
+ ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL(mdio45_ethtool_gset_npage);
+
+/**
+ * mdio45_ethtool_spauseparam_an - set auto-negotiated pause parameters
+ * @mdio: MDIO interface
+ * @ecmd: Ethtool request structure
+ *
+ * This function assumes that the PHY has an auto-negotiation MMD. It
+ * will enable and disable advertising of flow control as appropriate.
+ */
+void mdio45_ethtool_spauseparam_an(const struct mdio_if_info *mdio,
+ const struct ethtool_pauseparam *ecmd)
+{
+ int adv, old_adv;
+
+ WARN_ON(!(mdio->mmds & MDIO_DEVS_AN));
+
+ old_adv = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_AN,
+ MDIO_AN_ADVERTISE);
+ adv = old_adv & ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+ if (ecmd->autoneg)
+ adv |= mii_advertise_flowctrl(
+ (ecmd->rx_pause ? FLOW_CTRL_RX : 0) |
+ (ecmd->tx_pause ? FLOW_CTRL_TX : 0));
+ if (adv != old_adv) {
+ mdio->mdio_write(mdio->dev, mdio->prtad, MDIO_MMD_AN,
+ MDIO_AN_ADVERTISE, adv);
+ mdio45_nway_restart(mdio);
+ }
+}
+EXPORT_SYMBOL(mdio45_ethtool_spauseparam_an);
+
+/**
+ * mdio_mii_ioctl - MII ioctl interface for MDIO (clause 22 or 45) PHYs
+ * @mdio: MDIO interface
+ * @mii_data: MII ioctl data structure
+ * @cmd: MII ioctl command
+ *
+ * Returns 0 on success, negative on error.
+ */
+int mdio_mii_ioctl(const struct mdio_if_info *mdio,
+ struct mii_ioctl_data *mii_data, int cmd)
+{
+ int prtad, devad;
+ u16 addr = mii_data->reg_num;
+
+ /* Validate/convert cmd to one of SIOC{G,S}MIIREG */
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ if (mdio->prtad == MDIO_PRTAD_NONE)
+ return -EOPNOTSUPP;
+ mii_data->phy_id = mdio->prtad;
+ cmd = SIOCGMIIREG;
+ break;
+ case SIOCGMIIREG:
+ break;
+ case SIOCSMIIREG:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* Validate/convert phy_id */
+ if ((mdio->mode_support & MDIO_SUPPORTS_C45) &&
+ mdio_phy_id_is_c45(mii_data->phy_id)) {
+ prtad = mdio_phy_id_prtad(mii_data->phy_id);
+ devad = mdio_phy_id_devad(mii_data->phy_id);
+ } else if ((mdio->mode_support & MDIO_SUPPORTS_C22) &&
+ mii_data->phy_id < 0x20) {
+ prtad = mii_data->phy_id;
+ devad = MDIO_DEVAD_NONE;
+ addr &= 0x1f;
+ } else if ((mdio->mode_support & MDIO_EMULATE_C22) &&
+ mdio->prtad != MDIO_PRTAD_NONE &&
+ mii_data->phy_id == mdio->prtad) {
+ /* Remap commonly-used MII registers. */
+ prtad = mdio->prtad;
+ switch (addr) {
+ case MII_BMCR:
+ case MII_BMSR:
+ case MII_PHYSID1:
+ case MII_PHYSID2:
+ devad = __ffs(mdio->mmds);
+ break;
+ case MII_ADVERTISE:
+ case MII_LPA:
+ if (!(mdio->mmds & MDIO_DEVS_AN))
+ return -EINVAL;
+ devad = MDIO_MMD_AN;
+ if (addr == MII_ADVERTISE)
+ addr = MDIO_AN_ADVERTISE;
+ else
+ addr = MDIO_AN_LPA;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ if (cmd == SIOCGMIIREG) {
+ int rc = mdio->mdio_read(mdio->dev, prtad, devad, addr);
+ if (rc < 0)
+ return rc;
+ mii_data->val_out = rc;
+ return 0;
+ } else {
+ return mdio->mdio_write(mdio->dev, prtad, devad, addr,
+ mii_data->val_in);
+ }
+}
+EXPORT_SYMBOL(mdio_mii_ioctl);
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index aa08987f6e8..5d04d94f2a2 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -127,11 +127,11 @@ static unsigned long mdio_read(struct meth_private *priv, unsigned long phyreg)
static int mdio_probe(struct meth_private *priv)
{
int i;
- unsigned long p2, p3;
+ unsigned long p2, p3, flags;
/* check if phy is detected already */
if(priv->phy_addr>=0&&priv->phy_addr<32)
return 0;
- spin_lock(&priv->meth_lock);
+ spin_lock_irqsave(&priv->meth_lock, flags);
for (i=0;i<32;++i){
priv->phy_addr=i;
p2=mdio_read(priv,2);
@@ -157,7 +157,7 @@ static int mdio_probe(struct meth_private *priv)
break;
}
}
- spin_unlock(&priv->meth_lock);
+ spin_unlock_irqrestore(&priv->meth_lock, flags);
if(priv->phy_addr<32) {
return 0;
}
@@ -373,14 +373,14 @@ static int meth_release(struct net_device *dev)
static void meth_rx(struct net_device* dev, unsigned long int_status)
{
struct sk_buff *skb;
- unsigned long status;
+ unsigned long status, flags;
struct meth_private *priv = netdev_priv(dev);
unsigned long fifo_rptr = (int_status & METH_INT_RX_RPTR_MASK) >> 8;
- spin_lock(&priv->meth_lock);
+ spin_lock_irqsave(&priv->meth_lock, flags);
priv->dma_ctrl &= ~METH_DMA_RX_INT_EN;
mace->eth.dma_ctrl = priv->dma_ctrl;
- spin_unlock(&priv->meth_lock);
+ spin_unlock_irqrestore(&priv->meth_lock, flags);
if (int_status & METH_INT_RX_UNDERFLOW) {
fifo_rptr = (fifo_rptr - 1) & 0x0f;
@@ -452,12 +452,12 @@ static void meth_rx(struct net_device* dev, unsigned long int_status)
mace->eth.rx_fifo = priv->rx_ring_dmas[priv->rx_write];
ADVANCE_RX_PTR(priv->rx_write);
}
- spin_lock(&priv->meth_lock);
+ spin_lock_irqsave(&priv->meth_lock, flags);
/* In case there was underflow, and Rx DMA was disabled */
priv->dma_ctrl |= METH_DMA_RX_INT_EN | METH_DMA_RX_EN;
mace->eth.dma_ctrl = priv->dma_ctrl;
mace->eth.int_stat = METH_INT_RX_THRESHOLD;
- spin_unlock(&priv->meth_lock);
+ spin_unlock_irqrestore(&priv->meth_lock, flags);
}
static int meth_tx_full(struct net_device *dev)
@@ -470,11 +470,11 @@ static int meth_tx_full(struct net_device *dev)
static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status)
{
struct meth_private *priv = netdev_priv(dev);
- unsigned long status;
+ unsigned long status, flags;
struct sk_buff *skb;
unsigned long rptr = (int_status&TX_INFO_RPTR) >> 16;
- spin_lock(&priv->meth_lock);
+ spin_lock_irqsave(&priv->meth_lock, flags);
/* Stop DMA notification */
priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN);
@@ -527,12 +527,13 @@ static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status)
}
mace->eth.int_stat = METH_INT_TX_EMPTY | METH_INT_TX_PKT;
- spin_unlock(&priv->meth_lock);
+ spin_unlock_irqrestore(&priv->meth_lock, flags);
}
static void meth_error(struct net_device* dev, unsigned status)
{
struct meth_private *priv = netdev_priv(dev);
+ unsigned long flags;
printk(KERN_WARNING "meth: error status: 0x%08x\n",status);
/* check for errors too... */
@@ -547,7 +548,7 @@ static void meth_error(struct net_device* dev, unsigned status)
printk(KERN_WARNING "meth: Rx overflow\n");
if (status & (METH_INT_RX_UNDERFLOW)) {
printk(KERN_WARNING "meth: Rx underflow\n");
- spin_lock(&priv->meth_lock);
+ spin_lock_irqsave(&priv->meth_lock, flags);
mace->eth.int_stat = METH_INT_RX_UNDERFLOW;
/* more underflow interrupts will be delivered,
* effectively throwing us into an infinite loop.
@@ -555,7 +556,7 @@ static void meth_error(struct net_device* dev, unsigned status)
priv->dma_ctrl &= ~METH_DMA_RX_EN;
mace->eth.dma_ctrl = priv->dma_ctrl;
DPRINTK("Disabled meth Rx DMA temporarily\n");
- spin_unlock(&priv->meth_lock);
+ spin_unlock_irqrestore(&priv->meth_lock, flags);
}
mace->eth.int_stat = METH_INT_ERROR;
}
@@ -769,9 +770,17 @@ static int meth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
}
-/*
- * Return statistics to the caller
- */
+static const struct net_device_ops meth_netdev_ops = {
+ .ndo_open = meth_open,
+ .ndo_stop = meth_release,
+ .ndo_start_xmit = meth_tx,
+ .ndo_do_ioctl = meth_ioctl,
+ .ndo_tx_timeout = meth_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
/*
* The init function.
*/
@@ -785,16 +794,10 @@ static int __init meth_probe(struct platform_device *pdev)
if (!dev)
return -ENOMEM;
- dev->open = meth_open;
- dev->stop = meth_release;
- dev->hard_start_xmit = meth_tx;
- dev->do_ioctl = meth_ioctl;
-#ifdef HAVE_TX_TIMEOUT
- dev->tx_timeout = meth_tx_timeout;
- dev->watchdog_timeo = timeout;
-#endif
- dev->irq = MACE_ETHERNET_IRQ;
- dev->base_addr = (unsigned long)&mace->eth;
+ dev->netdev_ops = &meth_netdev_ops;
+ dev->watchdog_timeo = timeout;
+ dev->irq = MACE_ETHERNET_IRQ;
+ dev->base_addr = (unsigned long)&mace->eth;
memcpy(dev->dev_addr, o2meth_eaddr, 6);
priv = netdev_priv(dev);
diff --git a/drivers/net/mii.c b/drivers/net/mii.c
index 92056051f26..d81a5d22a3a 100644
--- a/drivers/net/mii.c
+++ b/drivers/net/mii.c
@@ -31,7 +31,27 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
-#include <linux/mii.h>
+#include <linux/mdio.h>
+
+static u32 mii_get_an(struct mii_if_info *mii, u16 addr)
+{
+ u32 result = 0;
+ int advert;
+
+ advert = mii->mdio_read(mii->dev, mii->phy_id, addr);
+ if (advert & LPA_LPACK)
+ result |= ADVERTISED_Autoneg;
+ if (advert & ADVERTISE_10HALF)
+ result |= ADVERTISED_10baseT_Half;
+ if (advert & ADVERTISE_10FULL)
+ result |= ADVERTISED_10baseT_Full;
+ if (advert & ADVERTISE_100HALF)
+ result |= ADVERTISED_100baseT_Half;
+ if (advert & ADVERTISE_100FULL)
+ result |= ADVERTISED_100baseT_Full;
+
+ return result;
+}
/**
* mii_ethtool_gset - get settings that are specified in @ecmd
@@ -43,8 +63,8 @@
int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
{
struct net_device *dev = mii->dev;
- u32 advert, bmcr, lpa, nego;
- u32 advert2 = 0, bmcr2 = 0, lpa2 = 0;
+ u16 bmcr, bmsr, ctrl1000 = 0, stat1000 = 0;
+ u32 nego;
ecmd->supported =
(SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
@@ -62,50 +82,51 @@ int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
/* this isn't fully supported at higher layers */
ecmd->phy_address = mii->phy_id;
+ ecmd->mdio_support = MDIO_SUPPORTS_C22;
ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
- advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE);
- if (mii->supports_gmii)
- advert2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000);
-
- if (advert & ADVERTISE_10HALF)
- ecmd->advertising |= ADVERTISED_10baseT_Half;
- if (advert & ADVERTISE_10FULL)
- ecmd->advertising |= ADVERTISED_10baseT_Full;
- if (advert & ADVERTISE_100HALF)
- ecmd->advertising |= ADVERTISED_100baseT_Half;
- if (advert & ADVERTISE_100FULL)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
- if (advert2 & ADVERTISE_1000HALF)
- ecmd->advertising |= ADVERTISED_1000baseT_Half;
- if (advert2 & ADVERTISE_1000FULL)
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
- lpa = mii->mdio_read(dev, mii->phy_id, MII_LPA);
+ bmsr = mii->mdio_read(dev, mii->phy_id, MII_BMSR);
if (mii->supports_gmii) {
- bmcr2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000);
- lpa2 = mii->mdio_read(dev, mii->phy_id, MII_STAT1000);
+ ctrl1000 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000);
+ stat1000 = mii->mdio_read(dev, mii->phy_id, MII_STAT1000);
}
if (bmcr & BMCR_ANENABLE) {
ecmd->advertising |= ADVERTISED_Autoneg;
ecmd->autoneg = AUTONEG_ENABLE;
- nego = mii_nway_result(advert & lpa);
- if ((bmcr2 & (ADVERTISE_1000HALF | ADVERTISE_1000FULL)) &
- (lpa2 >> 2))
+ ecmd->advertising |= mii_get_an(mii, MII_ADVERTISE);
+ if (ctrl1000 & ADVERTISE_1000HALF)
+ ecmd->advertising |= ADVERTISED_1000baseT_Half;
+ if (ctrl1000 & ADVERTISE_1000FULL)
+ ecmd->advertising |= ADVERTISED_1000baseT_Full;
+
+ if (bmsr & BMSR_ANEGCOMPLETE) {
+ ecmd->lp_advertising = mii_get_an(mii, MII_LPA);
+ if (stat1000 & LPA_1000HALF)
+ ecmd->lp_advertising |=
+ ADVERTISED_1000baseT_Half;
+ if (stat1000 & LPA_1000FULL)
+ ecmd->lp_advertising |=
+ ADVERTISED_1000baseT_Full;
+ } else {
+ ecmd->lp_advertising = 0;
+ }
+
+ nego = ecmd->advertising & ecmd->lp_advertising;
+
+ if (nego & (ADVERTISED_1000baseT_Full |
+ ADVERTISED_1000baseT_Half)) {
ecmd->speed = SPEED_1000;
- else if (nego == LPA_100FULL || nego == LPA_100HALF)
+ ecmd->duplex = !!(nego & ADVERTISED_1000baseT_Full);
+ } else if (nego & (ADVERTISED_100baseT_Full |
+ ADVERTISED_100baseT_Half)) {
ecmd->speed = SPEED_100;
- else
- ecmd->speed = SPEED_10;
- if ((lpa2 & LPA_1000FULL) || nego == LPA_100FULL ||
- nego == LPA_10FULL) {
- ecmd->duplex = DUPLEX_FULL;
- mii->full_duplex = 1;
+ ecmd->duplex = !!(nego & ADVERTISED_100baseT_Full);
} else {
- ecmd->duplex = DUPLEX_HALF;
- mii->full_duplex = 0;
+ ecmd->speed = SPEED_10;
+ ecmd->duplex = !!(nego & ADVERTISED_10baseT_Full);
}
} else {
ecmd->autoneg = AUTONEG_DISABLE;
@@ -116,6 +137,8 @@ int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
}
+ mii->full_duplex = ecmd->duplex;
+
/* ignore maxtxpkt, maxrxpkt for now */
return 0;
diff --git a/drivers/net/mipsnet.c b/drivers/net/mipsnet.c
index 664835b822f..b3b9a147d09 100644
--- a/drivers/net/mipsnet.c
+++ b/drivers/net/mipsnet.c
@@ -237,6 +237,16 @@ static void mipsnet_set_mclist(struct net_device *dev)
{
}
+static const struct net_device_ops mipsnet_netdev_ops = {
+ .ndo_open = mipsnet_open,
+ .ndo_stop = mipsnet_close,
+ .ndo_start_xmit = mipsnet_xmit,
+ .ndo_set_multicast_list = mipsnet_set_mclist,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
static int __init mipsnet_probe(struct platform_device *dev)
{
struct net_device *netdev;
@@ -250,10 +260,7 @@ static int __init mipsnet_probe(struct platform_device *dev)
platform_set_drvdata(dev, netdev);
- netdev->open = mipsnet_open;
- netdev->stop = mipsnet_close;
- netdev->hard_start_xmit = mipsnet_xmit;
- netdev->set_multicast_list = mipsnet_set_mclist;
+ netdev->netdev_ops = &mipsnet_netdev_ops;
/*
* TODO: probe for these or load them from PARAM
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
index 21040a0d81f..1fd068e1d93 100644
--- a/drivers/net/mlx4/Makefile
+++ b/drivers/net/mlx4/Makefile
@@ -5,5 +5,5 @@ mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
obj-$(CONFIG_MLX4_EN) += mlx4_en.o
-mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o \
+mlx4_en-y := en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \
en_resources.o en_netdev.o
diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
index 91f50de84be..21786ad4455 100644
--- a/drivers/net/mlx4/en_cq.c
+++ b/drivers/net/mlx4/en_cq.c
@@ -89,6 +89,9 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
*cq->mcq.arm_db = 0;
memset(cq->buf, 0, cq->buf_size);
+ if (!cq->is_tx)
+ cq->size = priv->rx_ring[cq->ring].actual_size;
+
err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar,
cq->wqres.db.dma, &cq->mcq, cq->vector, cq->is_tx);
if (err)
@@ -125,8 +128,10 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
if (cq->is_tx)
del_timer(&cq->timer);
- else
+ else {
napi_disable(&cq->napi);
+ netif_napi_del(&cq->napi);
+ }
mlx4_cq_free(mdev->dev, &cq->mcq);
}
diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_ethtool.c
index c1bd040b9e0..091f99052c9 100644
--- a/drivers/net/mlx4/en_params.c
+++ b/drivers/net/mlx4/en_ethtool.c
@@ -38,64 +38,6 @@
#include "mlx4_en.h"
#include "en_port.h"
-#define MLX4_EN_PARM_INT(X, def_val, desc) \
- static unsigned int X = def_val;\
- module_param(X , uint, 0444); \
- MODULE_PARM_DESC(X, desc);
-
-
-/*
- * Device scope module parameters
- */
-
-
-/* Use a XOR rathern than Toeplitz hash function for RSS */
-MLX4_EN_PARM_INT(rss_xor, 0, "Use XOR hash function for RSS");
-
-/* RSS hash type mask - default to <saddr, daddr, sport, dport> */
-MLX4_EN_PARM_INT(rss_mask, 0xf, "RSS hash type bitmask");
-
-/* Number of LRO sessions per Rx ring (rounded up to a power of two) */
-MLX4_EN_PARM_INT(num_lro, MLX4_EN_MAX_LRO_DESCRIPTORS,
- "Number of LRO sessions per ring or disabled (0)");
-
-/* Priority pausing */
-MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
- " Per priority bit mask");
-MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
- " Per priority bit mask");
-
-int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
-{
- struct mlx4_en_profile *params = &mdev->profile;
- int i;
-
- params->rss_xor = (rss_xor != 0);
- params->rss_mask = rss_mask & 0x1f;
- params->num_lro = min_t(int, num_lro , MLX4_EN_MAX_LRO_DESCRIPTORS);
- for (i = 1; i <= MLX4_MAX_PORTS; i++) {
- params->prof[i].rx_pause = 1;
- params->prof[i].rx_ppp = pfcrx;
- params->prof[i].tx_pause = 1;
- params->prof[i].tx_ppp = pfctx;
- params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
- params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
- }
- if (pfcrx || pfctx) {
- params->prof[1].tx_ring_num = MLX4_EN_TX_RING_NUM;
- params->prof[2].tx_ring_num = MLX4_EN_TX_RING_NUM;
- } else {
- params->prof[1].tx_ring_num = 1;
- params->prof[2].tx_ring_num = 1;
- }
-
- return 0;
-}
-
-
-/*
- * Ethtool support
- */
static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
{
@@ -326,8 +268,7 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
priv->rx_frames = (coal->rx_max_coalesced_frames ==
MLX4_EN_AUTO_CONF) ?
- MLX4_EN_RX_COAL_TARGET /
- priv->dev->mtu + 1 :
+ MLX4_EN_RX_COAL_TARGET :
coal->rx_max_coalesced_frames;
priv->rx_usecs = (coal->rx_coalesce_usecs ==
MLX4_EN_AUTO_CONF) ?
@@ -371,7 +312,7 @@ static int mlx4_en_set_pauseparam(struct net_device *dev,
priv->prof->rx_pause,
priv->prof->rx_ppp);
if (err)
- mlx4_err(mdev, "Failed setting pause params to\n");
+ en_err(priv, "Failed setting pause params\n");
return err;
}
@@ -421,13 +362,13 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
err = mlx4_en_alloc_resources(priv);
if (err) {
- mlx4_err(mdev, "Failed reallocating port resources\n");
+ en_err(priv, "Failed reallocating port resources\n");
goto out;
}
if (port_up) {
err = mlx4_en_start_port(dev);
if (err)
- mlx4_err(mdev, "Failed starting port\n");
+ en_err(priv, "Failed starting port\n");
}
out:
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
index 510633fd57f..9ed4a158f89 100644
--- a/drivers/net/mlx4/en_main.c
+++ b/drivers/net/mlx4/en_main.c
@@ -51,6 +51,55 @@ static const char mlx4_en_version[] =
DRV_NAME ": Mellanox ConnectX HCA Ethernet driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
+#define MLX4_EN_PARM_INT(X, def_val, desc) \
+ static unsigned int X = def_val;\
+ module_param(X , uint, 0444); \
+ MODULE_PARM_DESC(X, desc);
+
+
+/*
+ * Device scope module parameters
+ */
+
+
+/* Use a XOR rathern than Toeplitz hash function for RSS */
+MLX4_EN_PARM_INT(rss_xor, 0, "Use XOR hash function for RSS");
+
+/* RSS hash type mask - default to <saddr, daddr, sport, dport> */
+MLX4_EN_PARM_INT(rss_mask, 0xf, "RSS hash type bitmask");
+
+/* Number of LRO sessions per Rx ring (rounded up to a power of two) */
+MLX4_EN_PARM_INT(num_lro, MLX4_EN_MAX_LRO_DESCRIPTORS,
+ "Number of LRO sessions per ring or disabled (0)");
+
+/* Priority pausing */
+MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
+ " Per priority bit mask");
+MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
+ " Per priority bit mask");
+
+static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
+{
+ struct mlx4_en_profile *params = &mdev->profile;
+ int i;
+
+ params->rss_xor = (rss_xor != 0);
+ params->rss_mask = rss_mask & 0x1f;
+ params->num_lro = min_t(int, num_lro , MLX4_EN_MAX_LRO_DESCRIPTORS);
+ for (i = 1; i <= MLX4_MAX_PORTS; i++) {
+ params->prof[i].rx_pause = 1;
+ params->prof[i].rx_ppp = pfcrx;
+ params->prof[i].tx_pause = 1;
+ params->prof[i].tx_ppp = pfctx;
+ params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
+ params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
+ params->prof[i].tx_ring_num = MLX4_EN_NUM_TX_RINGS +
+ (!!pfcrx) * MLX4_EN_NUM_PPP_RINGS;
+ }
+
+ return 0;
+}
+
static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
enum mlx4_dev_event event, int port)
{
@@ -194,28 +243,11 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
/* Create a netdev for each port */
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
mlx4_info(mdev, "Activating port:%d\n", i);
- if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) {
+ if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
mdev->pndev[i] = NULL;
- goto err_free_netdev;
- }
}
return mdev;
-
-err_free_netdev:
- mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
- if (mdev->pndev[i])
- mlx4_en_destroy_netdev(mdev->pndev[i]);
- }
-
- mutex_lock(&mdev->state_lock);
- mdev->device_up = false;
- mutex_unlock(&mdev->state_lock);
- flush_workqueue(mdev->workqueue);
-
- /* Stop event queue before we drop down to release shared SW state */
- destroy_workqueue(mdev->workqueue);
-
err_mr:
mlx4_mr_free(dev, &mdev->mr);
err_uar:
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 7bcc49de163..e02bafdd368 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -51,14 +51,14 @@ static void mlx4_en_vlan_rx_register(struct net_device *dev, struct vlan_group *
struct mlx4_en_dev *mdev = priv->mdev;
int err;
- mlx4_dbg(HW, priv, "Registering VLAN group:%p\n", grp);
+ en_dbg(HW, priv, "Registering VLAN group:%p\n", grp);
priv->vlgrp = grp;
mutex_lock(&mdev->state_lock);
if (mdev->device_up && priv->port_up) {
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, grp);
if (err)
- mlx4_err(mdev, "Failed configuring VLAN filter\n");
+ en_err(priv, "Failed configuring VLAN filter\n");
}
mutex_unlock(&mdev->state_lock);
}
@@ -72,15 +72,15 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
if (!priv->vlgrp)
return;
- mlx4_dbg(HW, priv, "adding VLAN:%d (vlgrp entry:%p)\n",
- vid, vlan_group_get_device(priv->vlgrp, vid));
+ en_dbg(HW, priv, "adding VLAN:%d (vlgrp entry:%p)\n",
+ vid, vlan_group_get_device(priv->vlgrp, vid));
/* Add VID to port VLAN filter */
mutex_lock(&mdev->state_lock);
if (mdev->device_up && priv->port_up) {
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
if (err)
- mlx4_err(mdev, "Failed configuring VLAN filter\n");
+ en_err(priv, "Failed configuring VLAN filter\n");
}
mutex_unlock(&mdev->state_lock);
}
@@ -94,9 +94,8 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
if (!priv->vlgrp)
return;
- mlx4_dbg(HW, priv, "Killing VID:%d (vlgrp:%p vlgrp "
- "entry:%p)\n", vid, priv->vlgrp,
- vlan_group_get_device(priv->vlgrp, vid));
+ en_dbg(HW, priv, "Killing VID:%d (vlgrp:%p vlgrp entry:%p)\n",
+ vid, priv->vlgrp, vlan_group_get_device(priv->vlgrp, vid));
vlan_group_set_device(priv->vlgrp, vid, NULL);
/* Remove VID from port VLAN filter */
@@ -104,7 +103,7 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
if (mdev->device_up && priv->port_up) {
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
if (err)
- mlx4_err(mdev, "Failed configuring VLAN filter\n");
+ en_err(priv, "Failed configuring VLAN filter\n");
}
mutex_unlock(&mdev->state_lock);
}
@@ -150,9 +149,10 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
err = mlx4_register_mac(mdev->dev, priv->port,
priv->mac, &priv->mac_index);
if (err)
- mlx4_err(mdev, "Failed changing HW MAC address\n");
+ en_err(priv, "Failed changing HW MAC address\n");
} else
- mlx4_dbg(HW, priv, "Port is down, exiting...\n");
+ en_dbg(HW, priv, "Port is down while "
+ "registering mac, exiting...\n");
mutex_unlock(&mdev->state_lock);
}
@@ -174,7 +174,6 @@ static void mlx4_en_clear_list(struct net_device *dev)
static void mlx4_en_cache_mclist(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_dev *mdev = priv->mdev;
struct dev_mc_list *mclist;
struct dev_mc_list *tmp;
struct dev_mc_list *plist = NULL;
@@ -182,7 +181,7 @@ static void mlx4_en_cache_mclist(struct net_device *dev)
for (mclist = dev->mc_list; mclist; mclist = mclist->next) {
tmp = kmalloc(sizeof(struct dev_mc_list), GFP_ATOMIC);
if (!tmp) {
- mlx4_err(mdev, "failed to allocate multicast list\n");
+ en_err(priv, "failed to allocate multicast list\n");
mlx4_en_clear_list(dev);
return;
}
@@ -219,13 +218,13 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
mutex_lock(&mdev->state_lock);
if (!mdev->device_up) {
- mlx4_dbg(HW, priv, "Card is not up, ignoring "
- "multicast change.\n");
+ en_dbg(HW, priv, "Card is not up, "
+ "ignoring multicast change.\n");
goto out;
}
if (!priv->port_up) {
- mlx4_dbg(HW, priv, "Port is down, ignoring "
- "multicast change.\n");
+ en_dbg(HW, priv, "Port is down, "
+ "ignoring multicast change.\n");
goto out;
}
@@ -236,29 +235,27 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
if (dev->flags & IFF_PROMISC) {
if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
if (netif_msg_rx_status(priv))
- mlx4_warn(mdev, "Port:%d entering promiscuous mode\n",
- priv->port);
+ en_warn(priv, "Entering promiscuous mode\n");
priv->flags |= MLX4_EN_FLAG_PROMISC;
/* Enable promiscouos mode */
err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
priv->base_qpn, 1);
if (err)
- mlx4_err(mdev, "Failed enabling "
- "promiscous mode\n");
+ en_err(priv, "Failed enabling "
+ "promiscous mode\n");
/* Disable port multicast filter (unconditionally) */
err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
0, MLX4_MCAST_DISABLE);
if (err)
- mlx4_err(mdev, "Failed disabling "
- "multicast filter\n");
+ en_err(priv, "Failed disabling "
+ "multicast filter\n");
/* Disable port VLAN filter */
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL);
if (err)
- mlx4_err(mdev, "Failed disabling "
- "VLAN filter\n");
+ en_err(priv, "Failed disabling VLAN filter\n");
}
goto out;
}
@@ -269,20 +266,19 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
if (priv->flags & MLX4_EN_FLAG_PROMISC) {
if (netif_msg_rx_status(priv))
- mlx4_warn(mdev, "Port:%d leaving promiscuous mode\n",
- priv->port);
+ en_warn(priv, "Leaving promiscuous mode\n");
priv->flags &= ~MLX4_EN_FLAG_PROMISC;
/* Disable promiscouos mode */
err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
priv->base_qpn, 0);
if (err)
- mlx4_err(mdev, "Failed disabling promiscous mode\n");
+ en_err(priv, "Failed disabling promiscous mode\n");
/* Enable port VLAN filter */
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
if (err)
- mlx4_err(mdev, "Failed enabling VLAN filter\n");
+ en_err(priv, "Failed enabling VLAN filter\n");
}
/* Enable/disable the multicast filter according to IFF_ALLMULTI */
@@ -290,12 +286,12 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
0, MLX4_MCAST_DISABLE);
if (err)
- mlx4_err(mdev, "Failed disabling multicast filter\n");
+ en_err(priv, "Failed disabling multicast filter\n");
} else {
err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
0, MLX4_MCAST_DISABLE);
if (err)
- mlx4_err(mdev, "Failed disabling multicast filter\n");
+ en_err(priv, "Failed disabling multicast filter\n");
/* Flush mcast filter and init it with broadcast address */
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
@@ -314,7 +310,7 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
0, MLX4_MCAST_ENABLE);
if (err)
- mlx4_err(mdev, "Failed enabling multicast filter\n");
+ en_err(priv, "Failed enabling multicast filter\n");
mlx4_en_clear_list(dev);
}
@@ -346,10 +342,10 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
struct mlx4_en_dev *mdev = priv->mdev;
if (netif_msg_timer(priv))
- mlx4_warn(mdev, "Tx timeout called on port:%d\n", priv->port);
+ en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
priv->port_stats.tx_timeout++;
- mlx4_dbg(DRV, priv, "Scheduling watchdog\n");
+ en_dbg(DRV, priv, "Scheduling watchdog\n");
queue_work(mdev->workqueue, &priv->watchdog_task);
}
@@ -371,15 +367,15 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
int i;
/* If we haven't received a specific coalescing setting
- * (module param), we set the moderation paramters as follows:
+ * (module param), we set the moderation parameters as follows:
* - moder_cnt is set to the number of mtu sized packets to
* satisfy our coelsing target.
* - moder_time is set to a fixed value.
*/
- priv->rx_frames = MLX4_EN_RX_COAL_TARGET / priv->dev->mtu + 1;
+ priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
- mlx4_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
- "rx_frames:%d rx_usecs:%d\n",
+ en_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
+ "rx_frames:%d rx_usecs:%d\n",
priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
/* Setup cq moderation params */
@@ -412,7 +408,6 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
{
unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
- struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_cq *cq;
unsigned long packets;
unsigned long rate;
@@ -472,11 +467,11 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
moder_time = priv->rx_usecs;
}
- mlx4_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n",
- tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period);
+ en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n",
+ tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period);
- mlx4_dbg(INTR, priv, "Rx moder_time changed from:%d to %d period:%lu "
- "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n",
+ en_dbg(INTR, priv, "Rx moder_time changed from:%d to %d period:%lu "
+ "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n",
priv->last_moder_time, moder_time, period, packets,
avg_pkt_size, rate);
@@ -487,8 +482,7 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
cq->moder_time = moder_time;
err = mlx4_en_set_cq_moder(priv, cq);
if (err) {
- mlx4_err(mdev, "Failed modifying moderation for cq:%d "
- "on port:%d\n", i, priv->port);
+ en_err(priv, "Failed modifying moderation for cq:%d\n", i);
break;
}
}
@@ -511,8 +505,7 @@ static void mlx4_en_do_get_stats(struct work_struct *work)
err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
if (err)
- mlx4_dbg(HW, priv, "Could not update stats for "
- "port:%d\n", priv->port);
+ en_dbg(HW, priv, "Could not update stats \n");
mutex_lock(&mdev->state_lock);
if (mdev->device_up) {
@@ -536,12 +529,10 @@ static void mlx4_en_linkstate(struct work_struct *work)
* report to system log */
if (priv->last_link_state != linkstate) {
if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
- if (netif_msg_link(priv))
- mlx4_info(mdev, "Port %d - link down\n", priv->port);
+ en_dbg(LINK, priv, "Link Down\n");
netif_carrier_off(priv->dev);
} else {
- if (netif_msg_link(priv))
- mlx4_info(mdev, "Port %d - link up\n", priv->port);
+ en_dbg(LINK, priv, "Link Up\n");
netif_carrier_on(priv->dev);
}
}
@@ -556,58 +547,53 @@ int mlx4_en_start_port(struct net_device *dev)
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_cq *cq;
struct mlx4_en_tx_ring *tx_ring;
- struct mlx4_en_rx_ring *rx_ring;
int rx_index = 0;
int tx_index = 0;
- u16 stride;
int err = 0;
int i;
int j;
if (priv->port_up) {
- mlx4_dbg(DRV, priv, "start port called while port already up\n");
+ en_dbg(DRV, priv, "start port called while port already up\n");
return 0;
}
/* Calculate Rx buf size */
dev->mtu = min(dev->mtu, priv->max_mtu);
mlx4_en_calc_rx_buf(dev);
- mlx4_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
- stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
- DS_SIZE * priv->num_frags);
+ en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
+
/* Configure rx cq's and rings */
+ err = mlx4_en_activate_rx_rings(priv);
+ if (err) {
+ en_err(priv, "Failed to activate RX rings\n");
+ return err;
+ }
for (i = 0; i < priv->rx_ring_num; i++) {
cq = &priv->rx_cq[i];
- rx_ring = &priv->rx_ring[i];
err = mlx4_en_activate_cq(priv, cq);
if (err) {
- mlx4_err(mdev, "Failed activating Rx CQ\n");
+ en_err(priv, "Failed activating Rx CQ\n");
goto cq_err;
}
for (j = 0; j < cq->size; j++)
cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
err = mlx4_en_set_cq_moder(priv, cq);
if (err) {
- mlx4_err(mdev, "Failed setting cq moderation parameters");
+ en_err(priv, "Failed setting cq moderation parameters");
mlx4_en_deactivate_cq(priv, cq);
goto cq_err;
}
mlx4_en_arm_cq(priv, cq);
-
+ priv->rx_ring[i].cqn = cq->mcq.cqn;
++rx_index;
}
- err = mlx4_en_activate_rx_rings(priv);
- if (err) {
- mlx4_err(mdev, "Failed to activate RX rings\n");
- goto cq_err;
- }
-
err = mlx4_en_config_rss_steer(priv);
if (err) {
- mlx4_err(mdev, "Failed configuring rss steering\n");
- goto rx_err;
+ en_err(priv, "Failed configuring rss steering\n");
+ goto cq_err;
}
/* Configure tx cq's and rings */
@@ -616,16 +602,16 @@ int mlx4_en_start_port(struct net_device *dev)
cq = &priv->tx_cq[i];
err = mlx4_en_activate_cq(priv, cq);
if (err) {
- mlx4_err(mdev, "Failed allocating Tx CQ\n");
+ en_err(priv, "Failed allocating Tx CQ\n");
goto tx_err;
}
err = mlx4_en_set_cq_moder(priv, cq);
if (err) {
- mlx4_err(mdev, "Failed setting cq moderation parameters");
+ en_err(priv, "Failed setting cq moderation parameters");
mlx4_en_deactivate_cq(priv, cq);
goto tx_err;
}
- mlx4_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
+ en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
cq->buf->wqe_index = cpu_to_be16(0xffff);
/* Configure ring */
@@ -633,7 +619,7 @@ int mlx4_en_start_port(struct net_device *dev)
err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
priv->rx_ring[0].srq.srqn);
if (err) {
- mlx4_err(mdev, "Failed allocating Tx ring\n");
+ en_err(priv, "Failed allocating Tx ring\n");
mlx4_en_deactivate_cq(priv, cq);
goto tx_err;
}
@@ -651,30 +637,30 @@ int mlx4_en_start_port(struct net_device *dev)
priv->prof->rx_pause,
priv->prof->rx_ppp);
if (err) {
- mlx4_err(mdev, "Failed setting port general configurations"
- " for port %d, with error %d\n", priv->port, err);
+ en_err(priv, "Failed setting port general configurations "
+ "for port %d, with error %d\n", priv->port, err);
goto tx_err;
}
/* Set default qp number */
err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
if (err) {
- mlx4_err(mdev, "Failed setting default qp numbers\n");
+ en_err(priv, "Failed setting default qp numbers\n");
goto tx_err;
}
/* Set port mac number */
- mlx4_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
+ en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
err = mlx4_register_mac(mdev->dev, priv->port,
priv->mac, &priv->mac_index);
if (err) {
- mlx4_err(mdev, "Failed setting port mac\n");
+ en_err(priv, "Failed setting port mac\n");
goto tx_err;
}
/* Init port */
- mlx4_dbg(HW, priv, "Initializing port\n");
+ en_dbg(HW, priv, "Initializing port\n");
err = mlx4_INIT_PORT(mdev->dev, priv->port);
if (err) {
- mlx4_err(mdev, "Failed Initializing port\n");
+ en_err(priv, "Failed Initializing port\n");
goto mac_err;
}
@@ -694,12 +680,11 @@ tx_err:
}
mlx4_en_release_rss_steer(priv);
-rx_err:
- for (i = 0; i < priv->rx_ring_num; i++)
- mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
cq_err:
while (rx_index--)
mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
+ for (i = 0; i < priv->rx_ring_num; i++)
+ mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
return err; /* need to close devices */
}
@@ -712,8 +697,7 @@ void mlx4_en_stop_port(struct net_device *dev)
int i;
if (!priv->port_up) {
- mlx4_dbg(DRV, priv, "stop port (%d) called while port already down\n",
- priv->port);
+ en_dbg(DRV, priv, "stop port called while port already down\n");
return;
}
netif_stop_queue(dev);
@@ -758,13 +742,13 @@ static void mlx4_en_restart(struct work_struct *work)
struct mlx4_en_dev *mdev = priv->mdev;
struct net_device *dev = priv->dev;
- mlx4_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
+ en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
mlx4_en_stop_port(dev);
if (mlx4_en_start_port(dev))
- mlx4_err(mdev, "Failed restarting port %d\n", priv->port);
+ en_err(priv, "Failed restarting port %d\n", priv->port);
}
mutex_unlock(&mdev->state_lock);
}
@@ -780,14 +764,14 @@ static int mlx4_en_open(struct net_device *dev)
mutex_lock(&mdev->state_lock);
if (!mdev->device_up) {
- mlx4_err(mdev, "Cannot open - device down/disabled\n");
+ en_err(priv, "Cannot open - device down/disabled\n");
err = -EBUSY;
goto out;
}
/* Reset HW statistics and performance counters */
if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
- mlx4_dbg(HW, priv, "Failed dumping statistics\n");
+ en_dbg(HW, priv, "Failed dumping statistics\n");
memset(&priv->stats, 0, sizeof(priv->stats));
memset(&priv->pstats, 0, sizeof(priv->pstats));
@@ -804,7 +788,7 @@ static int mlx4_en_open(struct net_device *dev)
mlx4_en_set_default_moderation(priv);
err = mlx4_en_start_port(dev);
if (err)
- mlx4_err(mdev, "Failed starting port:%d\n", priv->port);
+ en_err(priv, "Failed starting port:%d\n", priv->port);
out:
mutex_unlock(&mdev->state_lock);
@@ -817,8 +801,7 @@ static int mlx4_en_close(struct net_device *dev)
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
- if (netif_msg_ifdown(priv))
- mlx4_info(mdev, "Close called for port:%d\n", priv->port);
+ en_dbg(IFDOWN, priv, "Close port called\n");
mutex_lock(&mdev->state_lock);
@@ -850,7 +833,6 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
{
- struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_port_profile *prof = priv->prof;
int i;
@@ -879,7 +861,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
return 0;
err:
- mlx4_err(mdev, "Failed to allocate NIC resources\n");
+ en_err(priv, "Failed to allocate NIC resources\n");
return -ENOMEM;
}
@@ -889,7 +871,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
- mlx4_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
+ en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
/* Unregister device - this will close the port if it was up */
if (priv->registered)
@@ -918,11 +900,11 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
struct mlx4_en_dev *mdev = priv->mdev;
int err = 0;
- mlx4_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
+ en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
dev->mtu, new_mtu);
if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
- mlx4_err(mdev, "Bad MTU size:%d.\n", new_mtu);
+ en_err(priv, "Bad MTU size:%d.\n", new_mtu);
return -EPERM;
}
dev->mtu = new_mtu;
@@ -932,13 +914,13 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
if (!mdev->device_up) {
/* NIC is probably restarting - let watchdog task reset
* the port */
- mlx4_dbg(DRV, priv, "Change MTU called with card down!?\n");
+ en_dbg(DRV, priv, "Change MTU called with card down!?\n");
} else {
mlx4_en_stop_port(dev);
mlx4_en_set_default_moderation(priv);
err = mlx4_en_start_port(dev);
if (err) {
- mlx4_err(mdev, "Failed restarting port:%d\n",
+ en_err(priv, "Failed restarting port:%d\n",
priv->port);
queue_work(mdev->workqueue, &priv->watchdog_task);
}
@@ -952,6 +934,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_open = mlx4_en_open,
.ndo_stop = mlx4_en_close,
.ndo_start_xmit = mlx4_en_xmit,
+ .ndo_select_queue = mlx4_en_select_queue,
.ndo_get_stats = mlx4_en_get_stats,
.ndo_set_multicast_list = mlx4_en_set_multicast,
.ndo_set_mac_address = mlx4_en_set_mac,
@@ -974,7 +957,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
int i;
int err;
- dev = alloc_etherdev(sizeof(struct mlx4_en_priv));
+ dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
if (dev == NULL) {
mlx4_err(mdev, "Net device allocation failed\n");
return -ENOMEM;
@@ -1012,7 +995,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
priv->mac = mdev->dev->caps.def_mac[priv->port];
if (ILLEGAL_MAC(priv->mac)) {
- mlx4_err(mdev, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
+ en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
priv->port, priv->mac);
err = -EINVAL;
goto out;
@@ -1031,19 +1014,17 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
if (err) {
- mlx4_err(mdev, "Failed to allocate page for rx qps\n");
+ en_err(priv, "Failed to allocate page for rx qps\n");
goto out;
}
priv->allocated = 1;
- /* Populate Tx priority mappings */
- mlx4_en_set_prio_map(priv, priv->tx_prio_map, prof->tx_ring_num);
-
/*
* Initialize netdev entry points
*/
dev->netdev_ops = &mlx4_netdev_ops;
dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
+ dev->real_num_tx_queues = MLX4_EN_NUM_TX_RINGS;
SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
@@ -1057,7 +1038,9 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
* Set driver features
*/
dev->features |= NETIF_F_SG;
+ dev->vlan_features |= NETIF_F_SG;
dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
dev->features |= NETIF_F_HIGHDMA;
dev->features |= NETIF_F_HW_VLAN_TX |
NETIF_F_HW_VLAN_RX |
@@ -1067,6 +1050,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
if (mdev->LSO_support) {
dev->features |= NETIF_F_TSO;
dev->features |= NETIF_F_TSO6;
+ dev->vlan_features |= NETIF_F_TSO;
+ dev->vlan_features |= NETIF_F_TSO6;
}
mdev->pndev[port] = dev;
@@ -1074,9 +1059,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
netif_carrier_off(dev);
err = register_netdev(dev);
if (err) {
- mlx4_err(mdev, "Netdev registration failed\n");
+ en_err(priv, "Netdev registration failed for port %d\n", port);
goto out;
}
+
+ en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
+ en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
+
priv->registered = 1;
queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
return 0;
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index 9ee873e872b..5a14899c1e2 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -114,8 +114,8 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
goto out;
page_alloc->offset = priv->frag_info[i].frag_align;
- mlx4_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n",
- i, page_alloc->page);
+ en_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n",
+ i, page_alloc->page);
}
return 0;
@@ -136,8 +136,8 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
for (i = 0; i < priv->num_frags; i++) {
page_alloc = &ring->page_alloc[i];
- mlx4_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
- i, page_count(page_alloc->page));
+ en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
+ i, page_count(page_alloc->page));
put_page(page_alloc->page);
page_alloc->page = NULL;
@@ -202,12 +202,34 @@ static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
*ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
}
-static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
+static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring,
+ int index)
{
struct mlx4_en_dev *mdev = priv->mdev;
+ struct skb_frag_struct *skb_frags;
+ struct mlx4_en_rx_desc *rx_desc = ring->buf + (index << ring->log_stride);
+ dma_addr_t dma;
+ int nr;
+
+ skb_frags = ring->rx_info + (index << priv->log_rx_info);
+ for (nr = 0; nr < priv->num_frags; nr++) {
+ en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
+ dma = be64_to_cpu(rx_desc->data[nr].addr);
+
+ en_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma);
+ pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
+ PCI_DMA_FROMDEVICE);
+ put_page(skb_frags[nr].page);
+ }
+}
+
+static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
+{
struct mlx4_en_rx_ring *ring;
int ring_ind;
int buf_ind;
+ int new_size;
for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
@@ -216,22 +238,34 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
if (mlx4_en_prepare_rx_desc(priv, ring,
ring->actual_size)) {
if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
- mlx4_err(mdev, "Failed to allocate "
- "enough rx buffers\n");
+ en_err(priv, "Failed to allocate "
+ "enough rx buffers\n");
return -ENOMEM;
} else {
- if (netif_msg_rx_err(priv))
- mlx4_warn(mdev,
- "Only %d buffers allocated\n",
- ring->actual_size);
- goto out;
+ new_size = rounddown_pow_of_two(ring->actual_size);
+ en_warn(priv, "Only %d buffers allocated "
+ "reducing ring size to %d",
+ ring->actual_size, new_size);
+ goto reduce_rings;
}
}
ring->actual_size++;
ring->prod++;
}
}
-out:
+ return 0;
+
+reduce_rings:
+ for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
+ ring = &priv->rx_ring[ring_ind];
+ while (ring->actual_size > new_size) {
+ ring->actual_size--;
+ ring->prod--;
+ mlx4_en_free_rx_desc(priv, ring, ring->actual_size);
+ }
+ ring->size_mask = ring->actual_size - 1;
+ }
+
return 0;
}
@@ -247,15 +281,14 @@ static int mlx4_en_fill_rx_buf(struct net_device *dev,
ring->size_mask);
if (err) {
if (netif_msg_rx_err(priv))
- mlx4_warn(priv->mdev,
- "Failed preparing rx descriptor\n");
+ en_warn(priv, "Failed preparing rx descriptor\n");
priv->port_stats.rx_alloc_failed++;
break;
}
++num;
++ring->prod;
}
- if ((u32) (ring->prod - ring->cons) == ring->size)
+ if ((u32) (ring->prod - ring->cons) == ring->actual_size)
ring->full = 1;
return num;
@@ -264,33 +297,17 @@ static int mlx4_en_fill_rx_buf(struct net_device *dev,
static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring)
{
- struct mlx4_en_dev *mdev = priv->mdev;
- struct skb_frag_struct *skb_frags;
- struct mlx4_en_rx_desc *rx_desc;
- dma_addr_t dma;
int index;
- int nr;
- mlx4_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
- ring->cons, ring->prod);
+ en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
+ ring->cons, ring->prod);
/* Unmap and free Rx buffers */
- BUG_ON((u32) (ring->prod - ring->cons) > ring->size);
+ BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
while (ring->cons != ring->prod) {
index = ring->cons & ring->size_mask;
- rx_desc = ring->buf + (index << ring->log_stride);
- skb_frags = ring->rx_info + (index << priv->log_rx_info);
- mlx4_dbg(DRV, priv, "Processing descriptor:%d\n", index);
-
- for (nr = 0; nr < priv->num_frags; nr++) {
- mlx4_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
- dma = be64_to_cpu(rx_desc->data[nr].addr);
-
- mlx4_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma);
- pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
- PCI_DMA_FROMDEVICE);
- put_page(skb_frags[nr].page);
- }
+ en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
+ mlx4_en_free_rx_desc(priv, ring, index);
++ring->cons;
}
}
@@ -354,10 +371,10 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
sizeof(struct skb_frag_struct));
ring->rx_info = vmalloc(tmp);
if (!ring->rx_info) {
- mlx4_err(mdev, "Failed allocating rx_info ring\n");
+ en_err(priv, "Failed allocating rx_info ring\n");
return -ENOMEM;
}
- mlx4_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
+ en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
ring->rx_info, tmp);
err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
@@ -367,7 +384,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
err = mlx4_en_map_buffer(&ring->wqres.buf);
if (err) {
- mlx4_err(mdev, "Failed to map RX buffer\n");
+ en_err(priv, "Failed to map RX buffer\n");
goto err_hwq;
}
ring->buf = ring->wqres.buf.direct.buf;
@@ -385,7 +402,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
sizeof(struct net_lro_desc),
GFP_KERNEL);
if (!ring->lro.lro_arr) {
- mlx4_err(mdev, "Failed to allocate lro array\n");
+ en_err(priv, "Failed to allocate lro array\n");
goto err_map;
}
ring->lro.get_frag_header = mlx4_en_get_frag_header;
@@ -436,7 +453,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
/* Initialize page allocators */
err = mlx4_en_init_allocator(priv, ring);
if (err) {
- mlx4_err(mdev, "Failed initializing ring allocator\n");
+ en_err(priv, "Failed initializing ring allocator\n");
ring_ind--;
goto err_allocator;
}
@@ -454,7 +471,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
mlx4_en_update_rx_prod_db(ring);
/* Configure SRQ representing the ring */
- ring->srq.max = ring->size;
+ ring->srq.max = ring->actual_size;
ring->srq.max_gs = max_gs;
ring->srq.wqe_shift = ilog2(ring->stride);
@@ -467,7 +484,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
err = mlx4_srq_alloc(mdev->dev, mdev->priv_pdn, &ring->wqres.mtt,
ring->wqres.db.dma, &ring->srq);
if (err){
- mlx4_err(mdev, "Failed to allocate srq\n");
+ en_err(priv, "Failed to allocate srq\n");
ring_ind--;
goto err_srq;
}
@@ -582,7 +599,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN);
if (!skb) {
- mlx4_dbg(RX_ERR, priv, "Failed allocating skb\n");
+ en_dbg(RX_ERR, priv, "Failed allocating skb\n");
return NULL;
}
skb->dev = priv->dev;
@@ -661,7 +678,6 @@ static void mlx4_en_copy_desc(struct mlx4_en_priv *priv,
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_cqe *cqe;
struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
struct skb_frag_struct *skb_frags;
@@ -698,14 +714,14 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Drop packet on bad receive or bad checksum */
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
MLX4_CQE_OPCODE_ERROR)) {
- mlx4_err(mdev, "CQE completed in error - vendor "
+ en_err(priv, "CQE completed in error - vendor "
"syndrom:%d syndrom:%d\n",
((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
((struct mlx4_err_cqe *) cqe)->syndrome);
goto next;
}
if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
- mlx4_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
+ en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
goto next;
}
@@ -855,7 +871,7 @@ static int mlx4_en_last_alloc_offset(struct mlx4_en_priv *priv, u16 stride, u16
u16 res = MLX4_EN_ALLOC_SIZE % stride;
u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align;
- mlx4_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d "
+ en_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d "
"res:%d offset:%d\n", stride, align, res, offset);
return offset;
}
@@ -900,10 +916,10 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
priv->rx_skb_size = eff_mtu;
priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct));
- mlx4_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
+ en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
"num_frags:%d):\n", eff_mtu, priv->num_frags);
for (i = 0; i < priv->num_frags; i++) {
- mlx4_dbg(DRV, priv, " frag:%d - size:%d prefix:%d align:%d "
+ en_dbg(DRV, priv, " frag:%d - size:%d prefix:%d align:%d "
"stride:%d last_offset:%d\n", i,
priv->frag_info[i].frag_size,
priv->frag_info[i].frag_prefix_size,
@@ -923,12 +939,12 @@ void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
int i;
rss_map->size = roundup_pow_of_two(num_entries);
- mlx4_dbg(DRV, priv, "Setting default RSS map of %d entires\n",
- rss_map->size);
+ en_dbg(DRV, priv, "Setting default RSS map of %d entires\n",
+ rss_map->size);
for (i = 0; i < rss_map->size; i++) {
rss_map->map[i] = i % num_rings;
- mlx4_dbg(DRV, priv, "Entry %d ---> ring %d\n", i, rss_map->map[i]);
+ en_dbg(DRV, priv, "Entry %d ---> ring %d\n", i, rss_map->map[i]);
}
}
@@ -943,13 +959,13 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv,
context = kmalloc(sizeof *context , GFP_KERNEL);
if (!context) {
- mlx4_err(mdev, "Failed to allocate qp context\n");
+ en_err(priv, "Failed to allocate qp context\n");
return -ENOMEM;
}
err = mlx4_qp_alloc(mdev->dev, qpn, qp);
if (err) {
- mlx4_err(mdev, "Failed to allocate qp #%d\n", qpn);
+ en_err(priv, "Failed to allocate qp #%x\n", qpn);
goto out;
}
qp->event = mlx4_en_sqp_event;
@@ -981,12 +997,11 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
int err = 0;
int good_qps = 0;
- mlx4_dbg(DRV, priv, "Configuring rss steering for port %u\n", priv->port);
+ en_dbg(DRV, priv, "Configuring rss steering\n");
err = mlx4_qp_reserve_range(mdev->dev, rss_map->size,
rss_map->size, &rss_map->base_qpn);
if (err) {
- mlx4_err(mdev, "Failed reserving %d qps for port %u\n",
- rss_map->size, priv->port);
+ en_err(priv, "Failed reserving %d qps\n", rss_map->size);
return err;
}
@@ -1006,13 +1021,13 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
/* Configure RSS indirection qp */
err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn);
if (err) {
- mlx4_err(mdev, "Failed to reserve range for RSS "
- "indirection qp\n");
+ en_err(priv, "Failed to reserve range for RSS "
+ "indirection qp\n");
goto rss_err;
}
err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
if (err) {
- mlx4_err(mdev, "Failed to allocate RSS indirection QP\n");
+ en_err(priv, "Failed to allocate RSS indirection QP\n");
goto reserve_err;
}
rss_map->indir_qp.event = mlx4_en_sqp_event;
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index ac6fc499b28..5dc7466ad03 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -68,15 +68,15 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
tmp = size * sizeof(struct mlx4_en_tx_info);
ring->tx_info = vmalloc(tmp);
if (!ring->tx_info) {
- mlx4_err(mdev, "Failed allocating tx_info ring\n");
+ en_err(priv, "Failed allocating tx_info ring\n");
return -ENOMEM;
}
- mlx4_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
+ en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
ring->tx_info, tmp);
ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
if (!ring->bounce_buf) {
- mlx4_err(mdev, "Failed allocating bounce buffer\n");
+ en_err(priv, "Failed allocating bounce buffer\n");
err = -ENOMEM;
goto err_tx;
}
@@ -85,31 +85,31 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
2 * PAGE_SIZE);
if (err) {
- mlx4_err(mdev, "Failed allocating hwq resources\n");
+ en_err(priv, "Failed allocating hwq resources\n");
goto err_bounce;
}
err = mlx4_en_map_buffer(&ring->wqres.buf);
if (err) {
- mlx4_err(mdev, "Failed to map TX buffer\n");
+ en_err(priv, "Failed to map TX buffer\n");
goto err_hwq_res;
}
ring->buf = ring->wqres.buf.direct.buf;
- mlx4_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
- "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
- ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
+ en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
+ "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
+ ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn);
if (err) {
- mlx4_err(mdev, "Failed reserving qp for tx ring.\n");
+ en_err(priv, "Failed reserving qp for tx ring.\n");
goto err_map;
}
err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
if (err) {
- mlx4_err(mdev, "Failed allocating qp %d\n", ring->qpn);
+ en_err(priv, "Failed allocating qp %d\n", ring->qpn);
goto err_reserve;
}
ring->qp.event = mlx4_en_sqp_event;
@@ -135,7 +135,7 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring)
{
struct mlx4_en_dev *mdev = priv->mdev;
- mlx4_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
+ en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
mlx4_qp_remove(mdev->dev, &ring->qp);
mlx4_qp_free(mdev->dev, &ring->qp);
@@ -274,12 +274,12 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
/* Skip last polled descriptor */
ring->cons += ring->last_nr_txbb;
- mlx4_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
+ en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
ring->cons, ring->prod);
if ((u32) (ring->prod - ring->cons) > ring->size) {
if (netif_msg_tx_err(priv))
- mlx4_warn(priv->mdev, "Tx consumer passed producer!\n");
+ en_warn(priv, "Tx consumer passed producer!\n");
return 0;
}
@@ -292,39 +292,11 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
}
if (cnt)
- mlx4_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
+ en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
return cnt;
}
-void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num)
-{
- int block = 8 / ring_num;
- int extra = 8 - (block * ring_num);
- int num = 0;
- u16 ring = 1;
- int prio;
-
- if (ring_num == 1) {
- for (prio = 0; prio < 8; prio++)
- prio_map[prio] = 0;
- return;
- }
-
- for (prio = 0; prio < 8; prio++) {
- if (extra && (num == block + 1)) {
- ring++;
- num = 0;
- extra--;
- } else if (!extra && (num == block)) {
- ring++;
- num = 0;
- }
- prio_map[prio] = ring;
- mlx4_dbg(DRV, priv, " prio:%d --> ring:%d\n", prio, ring);
- num++;
- }
-}
static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
{
@@ -386,18 +358,8 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
if (unlikely(ring->blocked)) {
if ((u32) (ring->prod - ring->cons) <=
ring->size - HEADROOM - MAX_DESC_TXBBS) {
-
- /* TODO: support multiqueue netdevs. Currently, we block
- * when *any* ring is full. Note that:
- * - 2 Tx rings can unblock at the same time and call
- * netif_wake_queue(), which is OK since this
- * operation is idempotent.
- * - We might wake the queue just after another ring
- * stopped it. This is no big deal because the next
- * transmission on that ring would stop the queue.
- */
ring->blocked = 0;
- netif_wake_queue(dev);
+ netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
priv->port_stats.wake_queue++;
}
}
@@ -426,7 +388,7 @@ void mlx4_en_poll_tx_cq(unsigned long data)
INC_PERF_COUNTER(priv->pstats.tx_poll);
- if (!spin_trylock(&ring->comp_lock)) {
+ if (!spin_trylock_irq(&ring->comp_lock)) {
mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
return;
}
@@ -439,7 +401,7 @@ void mlx4_en_poll_tx_cq(unsigned long data)
if (inflight && priv->port_up)
mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
- spin_unlock(&ring->comp_lock);
+ spin_unlock_irq(&ring->comp_lock);
}
static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
@@ -482,9 +444,9 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
/* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
- if (spin_trylock(&ring->comp_lock)) {
+ if (spin_trylock_irq(&ring->comp_lock)) {
mlx4_en_process_tx_cq(priv->dev, cq);
- spin_unlock(&ring->comp_lock);
+ spin_unlock_irq(&ring->comp_lock);
}
}
@@ -539,7 +501,6 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev,
int *lso_header_size)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_dev *mdev = priv->mdev;
int real_size;
if (skb_is_gso(skb)) {
@@ -553,14 +514,14 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev,
real_size += DS_SIZE;
else {
if (netif_msg_tx_err(priv))
- mlx4_warn(mdev, "Non-linear headers\n");
+ en_warn(priv, "Non-linear headers\n");
dev_kfree_skb_any(skb);
return 0;
}
}
if (unlikely(*lso_header_size > MAX_LSO_HDR_SIZE)) {
if (netif_msg_tx_err(priv))
- mlx4_warn(mdev, "LSO header size too big\n");
+ en_warn(priv, "LSO header size too big\n");
dev_kfree_skb_any(skb);
return 0;
}
@@ -617,21 +578,20 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
}
-static int get_vlan_info(struct mlx4_en_priv *priv, struct sk_buff *skb,
- u16 *vlan_tag)
+u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
{
- int tx_ind;
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ u16 vlan_tag = 0;
- /* Obtain VLAN information if present */
- if (priv->vlgrp && vlan_tx_tag_present(skb)) {
- *vlan_tag = vlan_tx_tag_get(skb);
- /* Set the Tx ring to use according to vlan priority */
- tx_ind = priv->tx_prio_map[*vlan_tag >> 13];
- } else {
- *vlan_tag = 0;
- tx_ind = 0;
+ /* If we support per priority flow control and the packet contains
+ * a vlan tag, send the packet to the TX ring assigned to that priority
+ */
+ if (priv->prof->rx_ppp && priv->vlgrp && vlan_tx_tag_present(skb)) {
+ vlan_tag = vlan_tx_tag_get(skb);
+ return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13);
}
- return tx_ind;
+
+ return skb_tx_hash(dev, skb);
}
int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -651,7 +611,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
dma_addr_t dma;
u32 index;
__be32 op_own;
- u16 vlan_tag;
+ u16 vlan_tag = 0;
int i;
int lso_header_size;
void *fragptr;
@@ -669,20 +629,21 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
nr_txbb = desc_size / TXBB_SIZE;
if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
if (netif_msg_tx_err(priv))
- mlx4_warn(mdev, "Oversized header or SG list\n");
+ en_warn(priv, "Oversized header or SG list\n");
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
- tx_ind = get_vlan_info(priv, skb, &vlan_tag);
+ tx_ind = skb->queue_mapping;
ring = &priv->tx_ring[tx_ind];
+ if (priv->vlgrp && vlan_tx_tag_present(skb))
+ vlan_tag = vlan_tx_tag_get(skb);
/* Check available TXBBs And 2K spare for prefetch */
if (unlikely(((int)(ring->prod - ring->cons)) >
ring->size - HEADROOM - MAX_DESC_TXBBS)) {
- /* every full Tx ring stops queue.
- * TODO: implement multi-queue support (per-queue stop) */
- netif_stop_queue(dev);
+ /* every full Tx ring stops queue */
+ netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
ring->blocked = 1;
priv->port_stats.queue_stopped++;
@@ -695,7 +656,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
/* Now that we know what Tx ring to use */
if (unlikely(!priv->port_up)) {
if (netif_msg_tx_err(priv))
- mlx4_warn(mdev, "xmit: port down!\n");
+ en_warn(priv, "xmit: port down!\n");
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -819,7 +780,6 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
/* Ring doorbell! */
wmb();
writel(ring->doorbell_qpn, mdev->uar_map + MLX4_SEND_DOORBELL);
- dev->trans_start = jiffies;
/* Poll CQ here */
mlx4_en_xmit_poll(priv, tx_ind);
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 8830dcb92ec..b9ceddde46c 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -497,8 +497,10 @@ static void mlx4_free_irqs(struct mlx4_dev *dev)
if (eq_table->have_irq)
free_irq(dev->pdev->irq, dev);
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
- if (eq_table->eq[i].have_irq)
+ if (eq_table->eq[i].have_irq) {
free_irq(eq_table->eq[i].irq, eq_table->eq + i);
+ eq_table->eq[i].have_irq = 0;
+ }
kfree(eq_table->irq_names);
}
@@ -623,8 +625,10 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE,
(dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
&priv->eq_table.eq[i]);
- if (err)
+ if (err) {
+ --i;
goto err_out_unmap;
+ }
}
err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 30bea968969..018348c0119 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -100,6 +100,10 @@ module_param_named(use_prio, use_prio, bool, 0444);
MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
"(0/1, default 0)");
+static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
+module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
+MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)");
+
int mlx4_check_port_params(struct mlx4_dev *dev,
enum mlx4_port_type *port_type)
{
@@ -203,12 +207,13 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
dev->caps.reserved_cqs = dev_cap->reserved_cqs;
dev->caps.reserved_eqs = dev_cap->reserved_eqs;
+ dev->caps.mtts_per_seg = 1 << log_mtts_per_seg;
dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts,
- MLX4_MTT_ENTRY_PER_SEG);
+ dev->caps.mtts_per_seg);
dev->caps.reserved_mrws = dev_cap->reserved_mrws;
dev->caps.reserved_uars = dev_cap->reserved_uars;
dev->caps.reserved_pds = dev_cap->reserved_pds;
- dev->caps.mtt_entry_sz = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz;
+ dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
dev->caps.max_msg_sz = dev_cap->max_msg_sz;
dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
dev->caps.flags = dev_cap->flags;
@@ -1304,6 +1309,11 @@ static int __init mlx4_verify_params(void)
return -1;
}
+ if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) {
+ printk(KERN_WARNING "mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
+ return -1;
+ }
+
return 0;
}
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index ef840abbcd3..d43a9e4c2ae 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -49,26 +49,42 @@
#include "en_port.h"
#define DRV_NAME "mlx4_en"
-#define DRV_VERSION "1.4.0"
-#define DRV_RELDATE "Sep 2008"
+#define DRV_VERSION "1.4.1.1"
+#define DRV_RELDATE "June 2009"
#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
-#define mlx4_dbg(mlevel, priv, format, arg...) \
- if (NETIF_MSG_##mlevel & priv->msg_enable) \
- printk(KERN_DEBUG "%s %s: " format , DRV_NAME ,\
- (dev_name(&priv->mdev->pdev->dev)) , ## arg)
+#define en_print(level, priv, format, arg...) \
+ { \
+ if ((priv)->registered) \
+ printk(level "%s: %s: " format, DRV_NAME, \
+ (priv->dev)->name, ## arg); \
+ else \
+ printk(level "%s: %s: Port %d: " format, \
+ DRV_NAME, dev_name(&priv->mdev->pdev->dev), \
+ (priv)->port, ## arg); \
+ }
+
+#define en_dbg(mlevel, priv, format, arg...) \
+ { \
+ if (NETIF_MSG_##mlevel & priv->msg_enable) \
+ en_print(KERN_DEBUG, priv, format, ## arg) \
+ }
+#define en_warn(priv, format, arg...) \
+ en_print(KERN_WARNING, priv, format, ## arg)
+#define en_err(priv, format, arg...) \
+ en_print(KERN_ERR, priv, format, ## arg)
#define mlx4_err(mdev, format, arg...) \
printk(KERN_ERR "%s %s: " format , DRV_NAME ,\
- (dev_name(&mdev->pdev->dev)) , ## arg)
+ dev_name(&mdev->pdev->dev) , ## arg)
#define mlx4_info(mdev, format, arg...) \
printk(KERN_INFO "%s %s: " format , DRV_NAME ,\
- (dev_name(&mdev->pdev->dev)) , ## arg)
+ dev_name(&mdev->pdev->dev) , ## arg)
#define mlx4_warn(mdev, format, arg...) \
printk(KERN_WARNING "%s %s: " format , DRV_NAME ,\
- (dev_name(&mdev->pdev->dev)) , ## arg)
+ dev_name(&mdev->pdev->dev) , ## arg)
/*
* Device constants
@@ -123,12 +139,14 @@ enum {
#define MLX4_EN_MIN_RX_SIZE (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES)
#define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE)
-#define MLX4_EN_TX_RING_NUM 9
-#define MLX4_EN_DEF_TX_RING_SIZE 1024
+#define MLX4_EN_SMALL_PKT_SIZE 64
+#define MLX4_EN_NUM_TX_RINGS 8
+#define MLX4_EN_NUM_PPP_RINGS 8
+#define MLX4_EN_DEF_TX_RING_SIZE 512
#define MLX4_EN_DEF_RX_RING_SIZE 1024
-/* Target number of bytes to coalesce with interrupt moderation */
-#define MLX4_EN_RX_COAL_TARGET 0x20000
+/* Target number of packets to coalesce with interrupt moderation */
+#define MLX4_EN_RX_COAL_TARGET 44
#define MLX4_EN_RX_COAL_TIME 0x10
#define MLX4_EN_TX_COAL_PKTS 5
@@ -462,7 +480,6 @@ struct mlx4_en_priv {
int base_qpn;
struct mlx4_en_rss_map rss_map;
- u16 tx_prio_map[8];
u32 flags;
#define MLX4_EN_FLAG_PROMISC 0x1
u32 tx_ring_num;
@@ -500,8 +517,6 @@ void mlx4_en_stop_port(struct net_device *dev);
void mlx4_en_free_resources(struct mlx4_en_priv *priv);
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
-int mlx4_en_get_profile(struct mlx4_en_dev *mdev);
-
int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
int entries, int ring, enum cq_type mode);
void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
@@ -512,6 +527,7 @@ int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
void mlx4_en_poll_tx_cq(unsigned long data);
void mlx4_en_tx_irq(struct mlx4_cq *mcq);
+u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
@@ -546,7 +562,6 @@ void mlx4_en_calc_rx_buf(struct net_device *dev);
void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
struct mlx4_en_rss_map *rss_map,
int num_entries, int num_rings);
-void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num);
int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index 0caf74cae8b..5887e4764d2 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -209,7 +209,7 @@ int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
} else
mtt->page_shift = page_shift;
- for (mtt->order = 0, i = MLX4_MTT_ENTRY_PER_SEG; i < npages; i <<= 1)
+ for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1)
++mtt->order;
mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order);
@@ -350,7 +350,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
MLX4_MPT_PD_FLAG_RAE);
mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) *
- MLX4_MTT_ENTRY_PER_SEG);
+ dev->caps.mtts_per_seg);
} else {
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
}
@@ -391,7 +391,7 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
(start_index + npages - 1) / (PAGE_SIZE / sizeof (u64)))
return -EINVAL;
- if (start_index & (MLX4_MTT_ENTRY_PER_SEG - 1))
+ if (start_index & (dev->caps.mtts_per_seg - 1))
return -EINVAL;
mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg +
@@ -402,7 +402,8 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
for (i = 0; i < npages; ++i)
mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
- dma_sync_single(&dev->pdev->dev, dma_handle, npages * sizeof (u64), DMA_TO_DEVICE);
+ dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
+ npages * sizeof (u64), DMA_TO_DEVICE);
return 0;
}
@@ -549,8 +550,8 @@ int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list
for (i = 0; i < npages; ++i)
fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
- dma_sync_single(&dev->pdev->dev, fmr->dma_handle,
- npages * sizeof(u64), DMA_TO_DEVICE);
+ dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle,
+ npages * sizeof(u64), DMA_TO_DEVICE);
fmr->mpt->key = cpu_to_be32(key);
fmr->mpt->lkey = cpu_to_be32(key);
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c
index cebdf3243ca..bd22df95adf 100644
--- a/drivers/net/mlx4/profile.c
+++ b/drivers/net/mlx4/profile.c
@@ -98,7 +98,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz;
profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz;
profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz;
- profile[MLX4_RES_MTT].size = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz;
+ profile[MLX4_RES_MTT].size = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
profile[MLX4_RES_MCG].size = MLX4_MGM_ENTRY_SIZE;
profile[MLX4_RES_QP].num = request->num_qp;
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index a400d7115f7..b4e18a58cb1 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -55,6 +55,7 @@
#include <linux/types.h>
#include <linux/inet_lro.h>
#include <asm/system.h>
+#include <linux/list.h>
static char mv643xx_eth_driver_name[] = "mv643xx_eth";
static char mv643xx_eth_driver_version[] = "1.4";
@@ -88,7 +89,24 @@ static char mv643xx_eth_driver_version[] = "1.4";
#define MAC_ADDR_LOW 0x0014
#define MAC_ADDR_HIGH 0x0018
#define SDMA_CONFIG 0x001c
+#define TX_BURST_SIZE_16_64BIT 0x01000000
+#define TX_BURST_SIZE_4_64BIT 0x00800000
+#define BLM_TX_NO_SWAP 0x00000020
+#define BLM_RX_NO_SWAP 0x00000010
+#define RX_BURST_SIZE_16_64BIT 0x00000008
+#define RX_BURST_SIZE_4_64BIT 0x00000004
#define PORT_SERIAL_CONTROL 0x003c
+#define SET_MII_SPEED_TO_100 0x01000000
+#define SET_GMII_SPEED_TO_1000 0x00800000
+#define SET_FULL_DUPLEX_MODE 0x00200000
+#define MAX_RX_PACKET_9700BYTE 0x000a0000
+#define DISABLE_AUTO_NEG_SPEED_GMII 0x00002000
+#define DO_NOT_FORCE_LINK_FAIL 0x00000400
+#define SERIAL_PORT_CONTROL_RESERVED 0x00000200
+#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL 0x00000008
+#define DISABLE_AUTO_NEG_FOR_DUPLEX 0x00000004
+#define FORCE_LINK_PASS 0x00000002
+#define SERIAL_PORT_ENABLE 0x00000001
#define PORT_STATUS 0x0044
#define TX_FIFO_EMPTY 0x00000400
#define TX_IN_PROGRESS 0x00000080
@@ -106,7 +124,9 @@ static char mv643xx_eth_driver_version[] = "1.4";
#define TX_BW_BURST 0x005c
#define INT_CAUSE 0x0060
#define INT_TX_END 0x07f80000
+#define INT_TX_END_0 0x00080000
#define INT_RX 0x000003fc
+#define INT_RX_0 0x00000004
#define INT_EXT 0x00000002
#define INT_CAUSE_EXT 0x0064
#define INT_EXT_LINK_PHY 0x00110000
@@ -135,15 +155,8 @@ static char mv643xx_eth_driver_version[] = "1.4";
/*
- * SDMA configuration register.
+ * SDMA configuration register default value.
*/
-#define RX_BURST_SIZE_4_64BIT (2 << 1)
-#define RX_BURST_SIZE_16_64BIT (4 << 1)
-#define BLM_RX_NO_SWAP (1 << 4)
-#define BLM_TX_NO_SWAP (1 << 5)
-#define TX_BURST_SIZE_4_64BIT (2 << 22)
-#define TX_BURST_SIZE_16_64BIT (4 << 22)
-
#if defined(__BIG_ENDIAN)
#define PORT_SDMA_CONFIG_DEFAULT_VALUE \
(RX_BURST_SIZE_4_64BIT | \
@@ -160,22 +173,11 @@ static char mv643xx_eth_driver_version[] = "1.4";
/*
- * Port serial control register.
+ * Misc definitions.
*/
-#define SET_MII_SPEED_TO_100 (1 << 24)
-#define SET_GMII_SPEED_TO_1000 (1 << 23)
-#define SET_FULL_DUPLEX_MODE (1 << 21)
-#define MAX_RX_PACKET_9700BYTE (5 << 17)
-#define DISABLE_AUTO_NEG_SPEED_GMII (1 << 13)
-#define DO_NOT_FORCE_LINK_FAIL (1 << 10)
-#define SERIAL_PORT_CONTROL_RESERVED (1 << 9)
-#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL (1 << 3)
-#define DISABLE_AUTO_NEG_FOR_DUPLEX (1 << 2)
-#define FORCE_LINK_PASS (1 << 1)
-#define SERIAL_PORT_ENABLE (1 << 0)
-
-#define DEFAULT_RX_QUEUE_SIZE 128
-#define DEFAULT_TX_QUEUE_SIZE 256
+#define DEFAULT_RX_QUEUE_SIZE 128
+#define DEFAULT_TX_QUEUE_SIZE 256
+#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
/*
@@ -393,6 +395,7 @@ struct mv643xx_eth_private {
struct work_struct tx_timeout_task;
struct napi_struct napi;
+ u32 int_mask;
u8 oom;
u8 work_link;
u8 work_tx;
@@ -569,7 +572,7 @@ static int rxq_process(struct rx_queue *rxq, int budget)
if (rxq->rx_curr_desc == rxq->rx_ring_size)
rxq->rx_curr_desc = 0;
- dma_unmap_single(NULL, rx_desc->buf_ptr,
+ dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr,
rx_desc->buf_size, DMA_FROM_DEVICE);
rxq->rx_desc_count--;
rx++;
@@ -651,23 +654,20 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
refilled = 0;
while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
struct sk_buff *skb;
- int unaligned;
int rx;
struct rx_desc *rx_desc;
skb = __skb_dequeue(&mp->rx_recycle);
if (skb == NULL)
- skb = dev_alloc_skb(mp->skb_size +
- dma_get_cache_alignment() - 1);
+ skb = dev_alloc_skb(mp->skb_size);
if (skb == NULL) {
mp->oom = 1;
goto oom;
}
- unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1);
- if (unaligned)
- skb_reserve(skb, dma_get_cache_alignment() - unaligned);
+ if (SKB_DMA_REALIGN)
+ skb_reserve(skb, SKB_DMA_REALIGN);
refilled++;
rxq->rx_desc_count++;
@@ -678,8 +678,9 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
rx_desc = rxq->rx_desc_area + rx;
- rx_desc->buf_ptr = dma_map_single(NULL, skb->data,
- mp->skb_size, DMA_FROM_DEVICE);
+ rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent,
+ skb->data, mp->skb_size,
+ DMA_FROM_DEVICE);
rx_desc->buf_size = mp->skb_size;
rxq->rx_skb[rx] = skb;
wmb();
@@ -718,6 +719,7 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
int nr_frags = skb_shinfo(skb)->nr_frags;
int frag;
@@ -746,10 +748,10 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
desc->l4i_chk = 0;
desc->byte_cnt = this_frag->size;
- desc->buf_ptr = dma_map_page(NULL, this_frag->page,
- this_frag->page_offset,
- this_frag->size,
- DMA_TO_DEVICE);
+ desc->buf_ptr = dma_map_page(mp->dev->dev.parent,
+ this_frag->page,
+ this_frag->page_offset,
+ this_frag->size, DMA_TO_DEVICE);
}
}
@@ -826,7 +828,8 @@ no_csum:
desc->l4i_chk = l4i_chk;
desc->byte_cnt = length;
- desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
+ desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data,
+ length, DMA_TO_DEVICE);
__skb_queue_tail(&txq->tx_skb, skb);
@@ -956,18 +959,17 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
}
if (cmd_sts & TX_FIRST_DESC) {
- dma_unmap_single(NULL, desc->buf_ptr,
+ dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
desc->byte_cnt, DMA_TO_DEVICE);
} else {
- dma_unmap_page(NULL, desc->buf_ptr,
+ dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr,
desc->byte_cnt, DMA_TO_DEVICE);
}
if (skb != NULL) {
if (skb_queue_len(&mp->rx_recycle) <
mp->rx_ring_size &&
- skb_recycle_check(skb, mp->skb_size +
- dma_get_cache_alignment() - 1))
+ skb_recycle_check(skb, mp->skb_size))
__skb_queue_head(&mp->rx_recycle, skb);
else
dev_kfree_skb(skb);
@@ -1720,20 +1722,20 @@ static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
static u32 uc_addr_filter_mask(struct net_device *dev)
{
- struct dev_addr_list *uc_ptr;
+ struct netdev_hw_addr *ha;
u32 nibbles;
if (dev->flags & IFF_PROMISC)
return 0;
nibbles = 1 << (dev->dev_addr[5] & 0x0f);
- for (uc_ptr = dev->uc_list; uc_ptr != NULL; uc_ptr = uc_ptr->next) {
- if (memcmp(dev->dev_addr, uc_ptr->da_addr, 5))
+ list_for_each_entry(ha, &dev->uc_list, list) {
+ if (memcmp(dev->dev_addr, ha->addr, 5))
return 0;
- if ((dev->dev_addr[5] ^ uc_ptr->da_addr[5]) & 0xf0)
+ if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0)
return 0;
- nibbles |= 1 << (uc_ptr->da_addr[5] & 0x0f);
+ nibbles |= 1 << (ha->addr[5] & 0x0f);
}
return nibbles;
@@ -1807,7 +1809,6 @@ static void mv643xx_eth_program_multicast_filter(struct net_device *dev)
if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
int port_num;
u32 accept;
- int i;
oom:
port_num = mp->port_num;
@@ -1894,9 +1895,9 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
mp->rx_desc_sram_size);
rxq->rx_desc_dma = mp->rx_desc_sram_addr;
} else {
- rxq->rx_desc_area = dma_alloc_coherent(NULL, size,
- &rxq->rx_desc_dma,
- GFP_KERNEL);
+ rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
+ size, &rxq->rx_desc_dma,
+ GFP_KERNEL);
}
if (rxq->rx_desc_area == NULL) {
@@ -1947,7 +1948,7 @@ out_free:
if (index == 0 && size <= mp->rx_desc_sram_size)
iounmap(rxq->rx_desc_area);
else
- dma_free_coherent(NULL, size,
+ dma_free_coherent(mp->dev->dev.parent, size,
rxq->rx_desc_area,
rxq->rx_desc_dma);
@@ -1979,7 +1980,7 @@ static void rxq_deinit(struct rx_queue *rxq)
rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
iounmap(rxq->rx_desc_area);
else
- dma_free_coherent(NULL, rxq->rx_desc_area_size,
+ dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size,
rxq->rx_desc_area, rxq->rx_desc_dma);
kfree(rxq->rx_skb);
@@ -2007,9 +2008,9 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
mp->tx_desc_sram_size);
txq->tx_desc_dma = mp->tx_desc_sram_addr;
} else {
- txq->tx_desc_area = dma_alloc_coherent(NULL, size,
- &txq->tx_desc_dma,
- GFP_KERNEL);
+ txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
+ size, &txq->tx_desc_dma,
+ GFP_KERNEL);
}
if (txq->tx_desc_area == NULL) {
@@ -2053,7 +2054,7 @@ static void txq_deinit(struct tx_queue *txq)
txq->tx_desc_area_size <= mp->tx_desc_sram_size)
iounmap(txq->tx_desc_area);
else
- dma_free_coherent(NULL, txq->tx_desc_area_size,
+ dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
txq->tx_desc_area, txq->tx_desc_dma);
}
@@ -2064,15 +2065,16 @@ static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp)
u32 int_cause;
u32 int_cause_ext;
- int_cause = rdlp(mp, INT_CAUSE) & (INT_TX_END | INT_RX | INT_EXT);
+ int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask;
if (int_cause == 0)
return 0;
int_cause_ext = 0;
- if (int_cause & INT_EXT)
+ if (int_cause & INT_EXT) {
+ int_cause &= ~INT_EXT;
int_cause_ext = rdlp(mp, INT_CAUSE_EXT);
+ }
- int_cause &= INT_TX_END | INT_RX;
if (int_cause) {
wrlp(mp, INT_CAUSE, ~int_cause);
mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) &
@@ -2179,6 +2181,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
if (mp->work_link) {
mp->work_link = 0;
handle_link_event(mp);
+ work_done++;
continue;
}
@@ -2217,7 +2220,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
if (mp->oom)
mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
napi_complete(napi);
- wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT);
+ wrlp(mp, INT_MASK, mp->int_mask);
}
return work_done;
@@ -2338,6 +2341,14 @@ static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp)
* size field are ignored by the hardware.
*/
mp->skb_size = (skb_size + 7) & ~7;
+
+ /*
+ * If NET_SKB_PAD is smaller than a cache line,
+ * netdev_alloc_skb() will cause skb->data to be misaligned
+ * to a cache line boundary. If this is the case, include
+ * some extra space to allow re-aligning the data area.
+ */
+ mp->skb_size += SKB_DMA_REALIGN;
}
static int mv643xx_eth_open(struct net_device *dev)
@@ -2363,6 +2374,8 @@ static int mv643xx_eth_open(struct net_device *dev)
skb_queue_head_init(&mp->rx_recycle);
+ mp->int_mask = INT_EXT;
+
for (i = 0; i < mp->rxq_count; i++) {
err = rxq_init(mp, i);
if (err) {
@@ -2372,6 +2385,7 @@ static int mv643xx_eth_open(struct net_device *dev)
}
rxq_refill(mp->rxq + i, INT_MAX);
+ mp->int_mask |= INT_RX_0 << i;
}
if (mp->oom) {
@@ -2386,12 +2400,13 @@ static int mv643xx_eth_open(struct net_device *dev)
txq_deinit(mp->txq + i);
goto out_free;
}
+ mp->int_mask |= INT_TX_END_0 << i;
}
port_start(mp);
wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
- wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT);
+ wrlp(mp, INT_MASK, mp->int_mask);
return 0;
@@ -2535,7 +2550,7 @@ static void mv643xx_eth_netpoll(struct net_device *dev)
mv643xx_eth_irq(dev->irq, dev);
- wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT);
+ wrlp(mp, INT_MASK, mp->int_mask);
}
#endif
diff --git a/drivers/net/mvme147.c b/drivers/net/mvme147.c
index 435e5a847c4..93c709d63e2 100644
--- a/drivers/net/mvme147.c
+++ b/drivers/net/mvme147.c
@@ -57,6 +57,17 @@ typedef void (*writerap_t)(void *, unsigned short);
typedef void (*writerdp_t)(void *, unsigned short);
typedef unsigned short (*readrdp_t)(void *);
+static const struct net_device_ops lance_netdev_ops = {
+ .ndo_open = m147lance_open,
+ .ndo_stop = m147lance_close,
+ .ndo_start_xmit = lance_start_xmit,
+ .ndo_set_multicast_list = lance_set_multicast,
+ .ndo_tx_timeout = lance_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
/* Initialise the one and only on-board 7990 */
struct net_device * __init mvme147lance_probe(int unit)
{
@@ -81,11 +92,7 @@ struct net_device * __init mvme147lance_probe(int unit)
/* Fill the dev fields */
dev->base_addr = (unsigned long)MVME147_LANCE_BASE;
- dev->open = &m147lance_open;
- dev->stop = &m147lance_close;
- dev->hard_start_xmit = &lance_start_xmit;
- dev->set_multicast_list = &lance_set_multicast;
- dev->tx_timeout = &lance_tx_timeout;
+ dev->netdev_ops = &lance_netdev_ops;
dev->dma = 0;
addr=(u_long *)ETHERNET_ADDRESS;
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index f2c4a665e93..1f6e36ea669 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -75,7 +75,7 @@
#include "myri10ge_mcp.h"
#include "myri10ge_mcp_gen_header.h"
-#define MYRI10GE_VERSION_STR "1.4.4-1.401"
+#define MYRI10GE_VERSION_STR "1.5.0-1.418"
MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
MODULE_AUTHOR("Maintainer: help@myri.com");
@@ -255,6 +255,7 @@ struct myri10ge_priv {
u32 read_write_dma;
u32 link_changes;
u32 msg_enable;
+ unsigned int board_number;
};
static char *myri10ge_fw_unaligned = "myri10ge_ethp_z8e.dat";
@@ -266,6 +267,13 @@ static char *myri10ge_fw_name = NULL;
module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name");
+#define MYRI10GE_MAX_BOARDS 8
+static char *myri10ge_fw_names[MYRI10GE_MAX_BOARDS] =
+ {[0 ... (MYRI10GE_MAX_BOARDS - 1)] = NULL };
+module_param_array_named(myri10ge_fw_names, myri10ge_fw_names, charp, NULL,
+ 0444);
+MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image names per board");
+
static int myri10ge_ecrc_enable = 1;
module_param(myri10ge_ecrc_enable, int, S_IRUGO);
MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E");
@@ -319,10 +327,6 @@ static int myri10ge_debug = -1; /* defaults above */
module_param(myri10ge_debug, int, 0);
MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)");
-static int myri10ge_lro = 1;
-module_param(myri10ge_lro, int, S_IRUGO);
-MODULE_PARM_DESC(myri10ge_lro, "Enable large receive offload");
-
static int myri10ge_lro_max_pkts = MYRI10GE_LRO_MAX_PKTS;
module_param(myri10ge_lro_max_pkts, int, S_IRUGO);
MODULE_PARM_DESC(myri10ge_lro_max_pkts,
@@ -361,6 +365,8 @@ static inline void put_be32(__be32 val, __be32 __iomem * p)
__raw_writel((__force __u32) val, (__force void __iomem *)p);
}
+static struct net_device_stats *myri10ge_get_stats(struct net_device *dev);
+
static int
myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd,
struct myri10ge_cmd *data, int atomic)
@@ -1290,7 +1296,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, struct myri10ge_rx_buf *rx,
remainder -= MYRI10GE_ALLOC_SIZE;
}
- if (mgp->csum_flag && myri10ge_lro) {
+ if (dev->features & NETIF_F_LRO) {
rx_frags[0].page_offset += MXGEFW_PAD;
rx_frags[0].size -= MXGEFW_PAD;
len -= MXGEFW_PAD;
@@ -1412,6 +1418,7 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
{
struct myri10ge_rx_done *rx_done = &ss->rx_done;
struct myri10ge_priv *mgp = ss->mgp;
+ struct net_device *netdev = mgp->dev;
unsigned long rx_bytes = 0;
unsigned long rx_packets = 0;
unsigned long rx_ok;
@@ -1445,7 +1452,7 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
ss->stats.rx_packets += rx_packets;
ss->stats.rx_bytes += rx_bytes;
- if (myri10ge_lro)
+ if (netdev->features & NETIF_F_LRO)
lro_flush_all(&rx_done->lro_mgr);
/* restock receive rings if needed */
@@ -1686,7 +1693,7 @@ myri10ge_get_ringparam(struct net_device *netdev,
ring->rx_mini_max_pending = mgp->ss[0].rx_small.mask + 1;
ring->rx_max_pending = mgp->ss[0].rx_big.mask + 1;
ring->rx_jumbo_max_pending = 0;
- ring->tx_max_pending = mgp->ss[0].rx_small.mask + 1;
+ ring->tx_max_pending = mgp->ss[0].tx.mask + 1;
ring->rx_mini_pending = ring->rx_mini_max_pending;
ring->rx_pending = ring->rx_max_pending;
ring->rx_jumbo_pending = ring->rx_jumbo_max_pending;
@@ -1706,12 +1713,17 @@ static u32 myri10ge_get_rx_csum(struct net_device *netdev)
static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
+ int err = 0;
if (csum_enabled)
mgp->csum_flag = MXGEFW_FLAGS_CKSUM;
- else
+ else {
+ u32 flags = ethtool_op_get_flags(netdev);
+ err = ethtool_op_set_flags(netdev, (flags & ~ETH_FLAG_LRO));
mgp->csum_flag = 0;
- return 0;
+
+ }
+ return err;
}
static int myri10ge_set_tso(struct net_device *netdev, u32 tso_enabled)
@@ -1803,6 +1815,8 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
int slice;
int i;
+ /* force stats update */
+ (void)myri10ge_get_stats(netdev);
for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++)
data[i] = ((unsigned long *)&mgp->stats)[i];
@@ -1892,7 +1906,9 @@ static const struct ethtool_ops myri10ge_ethtool_ops = {
.get_sset_count = myri10ge_get_sset_count,
.get_ethtool_stats = myri10ge_get_ethtool_stats,
.set_msglevel = myri10ge_set_msglevel,
- .get_msglevel = myri10ge_get_msglevel
+ .get_msglevel = myri10ge_get_msglevel,
+ .get_flags = ethtool_op_get_flags,
+ .set_flags = ethtool_op_set_flags
};
static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
@@ -2671,7 +2687,7 @@ again:
/* we are out of transmit resources */
tx->stop_queue++;
netif_tx_stop_queue(netdev_queue);
- return 1;
+ return NETDEV_TX_BUSY;
}
/* Setup checksum offloading, if needed */
@@ -2876,7 +2892,6 @@ again:
tx->stop_queue++;
netif_tx_stop_queue(netdev_queue);
}
- dev->trans_start = jiffies;
return 0;
abort_linearize:
@@ -2969,6 +2984,7 @@ static struct net_device_stats *myri10ge_get_stats(struct net_device *dev)
struct net_device_stats *stats = &mgp->stats;
int i;
+ spin_lock(&mgp->stats_lock);
memset(stats, 0, sizeof(*stats));
for (i = 0; i < mgp->num_slices; i++) {
slice_stats = &mgp->ss[i].stats;
@@ -2979,6 +2995,7 @@ static struct net_device_stats *myri10ge_get_stats(struct net_device *dev)
stats->rx_dropped += slice_stats->rx_dropped;
stats->tx_dropped += slice_stats->tx_dropped;
}
+ spin_unlock(&mgp->stats_lock);
return stats;
}
@@ -3253,6 +3270,8 @@ abort:
static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
{
+ int overridden = 0;
+
if (myri10ge_force_firmware == 0) {
int link_width, exp_cap;
u16 lnk;
@@ -3286,10 +3305,18 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
}
}
if (myri10ge_fw_name != NULL) {
- dev_info(&mgp->pdev->dev, "overriding firmware to %s\n",
- myri10ge_fw_name);
+ overridden = 1;
mgp->fw_name = myri10ge_fw_name;
}
+ if (mgp->board_number < MYRI10GE_MAX_BOARDS &&
+ myri10ge_fw_names[mgp->board_number] != NULL &&
+ strlen(myri10ge_fw_names[mgp->board_number])) {
+ mgp->fw_name = myri10ge_fw_names[mgp->board_number];
+ overridden = 1;
+ }
+ if (overridden)
+ dev_info(&mgp->pdev->dev, "overriding firmware to %s\n",
+ mgp->fw_name);
}
#ifdef CONFIG_PM
@@ -3754,6 +3781,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int status = -ENXIO;
int dac_enabled;
unsigned hdr_offset, ss_offset;
+ static int board_number;
netdev = alloc_etherdev_mq(sizeof(*mgp), MYRI10GE_MAX_SLICES);
if (netdev == NULL) {
@@ -3770,6 +3798,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mgp->pause = myri10ge_flow_control;
mgp->intr_coal_delay = myri10ge_intr_coal_delay;
mgp->msg_enable = netif_msg_init(myri10ge_debug, MYRI10GE_MSG_DEFAULT);
+ mgp->board_number = board_number;
init_waitqueue_head(&mgp->down_wq);
if (pci_enable_device(pdev)) {
@@ -3884,6 +3913,13 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (dac_enabled)
netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features |= NETIF_F_LRO;
+
+ netdev->vlan_features |= mgp->features;
+ if (mgp->fw_ver_tiny < 37)
+ netdev->vlan_features &= ~NETIF_F_TSO6;
+ if (mgp->fw_ver_tiny < 32)
+ netdev->vlan_features &= ~NETIF_F_TSO;
/* make sure we can get an irq, and that MSI can be
* setup (if available). Also ensure netdev->irq
@@ -3902,6 +3938,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer,
(unsigned long)mgp);
+ spin_lock_init(&mgp->stats_lock);
SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops);
INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
status = register_netdev(netdev);
@@ -3919,6 +3956,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->irq, mgp->tx_boundary, mgp->fw_name,
(mgp->wc_enabled ? "Enabled" : "Disabled"));
+ board_number++;
return 0;
abort_with_state:
@@ -4008,6 +4046,8 @@ static struct pci_device_id myri10ge_pci_tbl[] = {
{0},
};
+MODULE_DEVICE_TABLE(pci, myri10ge_pci_tbl);
+
static struct pci_driver myri10ge_driver = {
.name = "myri10ge",
.probe = myri10ge_probe,
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index 9a802adba9a..5f0758bda6b 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -640,7 +640,7 @@ static int myri_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!TX_BUFFS_AVAIL(head, tail)) {
DTX(("no buffs available, returning 1\n"));
- return 1;
+ return NETDEV_TX_BUSY;
}
spin_lock_irqsave(&mp->irq_lock, flags);
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index 7d83896b8c2..3fcebb70151 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -374,7 +374,7 @@ static int __devinit ne2k_pci_init_one (struct pci_dev *pdev,
dev->ethtool_ops = &ne2k_pci_ethtool_ops;
NS8390_init(dev, 0);
- memcpy(dev->dev_addr, SA_prom, 6);
+ memcpy(dev->dev_addr, SA_prom, dev->addr_len);
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
i = register_netdev(dev);
diff --git a/drivers/net/ne3210.c b/drivers/net/ne3210.c
index 6a843f7350a..a00bbfb9aed 100644
--- a/drivers/net/ne3210.c
+++ b/drivers/net/ne3210.c
@@ -104,7 +104,7 @@ static int __init ne3210_eisa_probe (struct device *device)
}
SET_NETDEV_DEV(dev, device);
- device->driver_data = dev;
+ dev_set_drvdata(device, dev);
ioaddr = edev->base_addr;
if (!request_region(ioaddr, NE3210_IO_EXTENT, DRV_NAME)) {
@@ -225,7 +225,7 @@ static int __init ne3210_eisa_probe (struct device *device)
static int __devexit ne3210_eisa_remove (struct device *device)
{
- struct net_device *dev = device->driver_data;
+ struct net_device *dev = dev_get_drvdata(device);
unsigned long ioaddr = to_eisa_device (device)->base_addr;
unregister_netdev (dev);
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c
index 1861d5bbd96..946366dcc99 100644
--- a/drivers/net/netx-eth.c
+++ b/drivers/net/netx-eth.c
@@ -301,6 +301,17 @@ netx_eth_phy_write(struct net_device *ndev, int phy_id, int reg, int value)
while (readl(NETX_MIIMU) & MIIMU_SNRDY);
}
+static const struct net_device_ops netx_eth_netdev_ops = {
+ .ndo_open = netx_eth_open,
+ .ndo_stop = netx_eth_close,
+ .ndo_start_xmit = netx_eth_hard_start_xmit,
+ .ndo_tx_timeout = netx_eth_timeout,
+ .ndo_set_multicast_list = netx_eth_set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
static int netx_eth_enable(struct net_device *ndev)
{
struct netx_eth_priv *priv = netdev_priv(ndev);
@@ -309,12 +320,8 @@ static int netx_eth_enable(struct net_device *ndev)
ether_setup(ndev);
- ndev->open = netx_eth_open;
- ndev->stop = netx_eth_close;
- ndev->hard_start_xmit = netx_eth_hard_start_xmit;
- ndev->tx_timeout = netx_eth_timeout;
+ ndev->netdev_ops = &netx_eth_netdev_ops;
ndev->watchdog_timeo = msecs_to_jiffies(5000);
- ndev->set_multicast_list = netx_eth_set_multicast_list;
priv->msg_enable = NETIF_MSG_LINK;
priv->mii.phy_id_mask = 0x1f;
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index c40815169f3..ab11c2b3f0f 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -34,10 +34,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/compiler.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
@@ -46,21 +42,16 @@
#include <linux/in.h>
#include <linux/tcp.h>
#include <linux/skbuff.h>
+#include <linux/firmware.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
-#include <linux/interrupt.h>
#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
#include <linux/vmalloc.h>
-#include <asm/system.h>
#include <asm/io.h>
#include <asm/byteorder.h>
-#include <asm/uaccess.h>
-#include <asm/pgtable.h>
#include "netxen_nic_hw.h"
@@ -84,10 +75,10 @@
(sizeof(struct netxen_rx_buffer) * rds_ring->num_desc)
#define STATUS_DESC_RINGSIZE(sds_ring) \
(sizeof(struct status_desc) * (sds_ring)->num_desc)
-#define TX_BUFF_RINGSIZE(adapter) \
- (sizeof(struct netxen_cmd_buffer) * adapter->num_txd)
-#define TX_DESC_RINGSIZE(adapter) \
- (sizeof(struct cmd_desc_type0) * adapter->num_txd)
+#define TX_BUFF_RINGSIZE(tx_ring) \
+ (sizeof(struct netxen_cmd_buffer) * tx_ring->num_desc)
+#define TX_DESC_RINGSIZE(tx_ring) \
+ (sizeof(struct cmd_desc_type0) * tx_ring->num_desc)
#define find_diff_among(a,b,range) ((a)<(b)?((b)-(a)):((b)+(range)-(a)))
@@ -118,6 +109,7 @@
#define NX_P3_A2 0x30
#define NX_P3_B0 0x40
#define NX_P3_B1 0x41
+#define NX_P3_B2 0x42
#define NX_IS_REVISION_P2(REVISION) (REVISION <= NX_P2_C1)
#define NX_IS_REVISION_P3(REVISION) (REVISION >= NX_P3_A0)
@@ -203,18 +195,10 @@
#define MAX_RCV_DESCRIPTORS_10G 4096
#define MAX_JUMBO_RCV_DESCRIPTORS 1024
#define MAX_LRO_RCV_DESCRIPTORS 8
-#define MAX_RCVSTATUS_DESCRIPTORS MAX_RCV_DESCRIPTORS
-#define MAX_JUMBO_RCV_DESC MAX_JUMBO_RCV_DESCRIPTORS
-#define MAX_RCV_DESC MAX_RCV_DESCRIPTORS
-#define MAX_RCVSTATUS_DESC MAX_RCV_DESCRIPTORS
-#define MAX_EPG_DESCRIPTORS (MAX_CMD_DESCRIPTORS * 8)
-#define NUM_RCV_DESC (MAX_RCV_DESC + MAX_JUMBO_RCV_DESCRIPTORS + \
- MAX_LRO_RCV_DESCRIPTORS)
-#define MIN_TX_COUNT 4096
-#define MIN_RX_COUNT 4096
#define NETXEN_CTX_SIGNATURE 0xdee0
+#define NETXEN_CTX_SIGNATURE_V2 0x0002dee0
+#define NETXEN_CTX_RESET 0xbad0
#define NETXEN_RCV_PRODUCER(ringid) (ringid)
-#define MAX_FRAME_SIZE 0x10000 /* 64K MAX size for LSO */
#define PHAN_PEG_RCV_INITIALIZED 0xff01
#define PHAN_PEG_RCV_START_INITIALIZE 0xff00
@@ -253,12 +237,19 @@ typedef u32 netxen_ctx_msg;
#define netxen_set_msg_opcode(config_word, val) \
((config_word) &= ~(0xf<<28), (config_word) |= (val & 0xf) << 28)
-struct netxen_rcv_context {
- __le64 rcv_ring_addr;
- __le32 rcv_ring_size;
+struct netxen_rcv_ring {
+ __le64 addr;
+ __le32 size;
__le32 rsrvd;
};
+struct netxen_sts_ring {
+ __le64 addr;
+ __le32 size;
+ __le16 msi_index;
+ __le16 rsvd;
+} ;
+
struct netxen_ring_ctx {
/* one command ring */
@@ -268,13 +259,18 @@ struct netxen_ring_ctx {
__le32 rsrvd;
/* three receive rings */
- struct netxen_rcv_context rcv_ctx[3];
+ struct netxen_rcv_ring rcv_rings[NUM_RCV_DESC_RINGS];
- /* one status ring */
__le64 sts_ring_addr;
__le32 sts_ring_size;
__le32 ctx_id;
+
+ __le64 rsrvd_2[3];
+ __le32 sts_ring_count;
+ __le32 rsrvd_3;
+ struct netxen_sts_ring sts_rings[NUM_STS_DESC_RINGS];
+
} __attribute__ ((aligned(64)));
/*
@@ -373,6 +369,7 @@ struct rcv_desc {
/* opcode field in status_desc */
#define NETXEN_NIC_RXPKT_DESC 0x04
#define NETXEN_OLD_RXPKT_DESC 0x3f
+#define NETXEN_NIC_RESPONSE_DESC 0x05
/* for status field in status_desc */
#define STATUS_NEED_CKSUM (1)
@@ -382,13 +379,11 @@ struct rcv_desc {
#define STATUS_OWNER_HOST (0x1ULL << 56)
#define STATUS_OWNER_PHANTOM (0x2ULL << 56)
-/* Note: sizeof(status_desc) should always be a mutliple of 2 */
-
-#define netxen_get_sts_desc_lro_cnt(status_desc) \
- ((status_desc)->lro & 0x7F)
-#define netxen_get_sts_desc_lro_last_frag(status_desc) \
- (((status_desc)->lro & 0x80) >> 7)
-
+/* Status descriptor:
+ 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
+ 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
+ 53-55 desc_cnt, 56-57 owner, 58-63 opcode
+ */
#define netxen_get_sts_port(sts_data) \
((sts_data) & 0x0F)
#define netxen_get_sts_status(sts_data) \
@@ -403,41 +398,15 @@ struct rcv_desc {
(((sts_data) >> 44) & 0x0F)
#define netxen_get_sts_pkt_offset(sts_data) \
(((sts_data) >> 48) & 0x1F)
+#define netxen_get_sts_desc_cnt(sts_data) \
+ (((sts_data) >> 53) & 0x7)
#define netxen_get_sts_opcode(sts_data) \
(((sts_data) >> 58) & 0x03F)
struct status_desc {
- /* Bit pattern: 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
- 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
- 53-55 desc_cnt, 56-57 owner, 58-63 opcode
- */
- __le64 status_desc_data;
- union {
- struct {
- __le32 hash_value;
- u8 hash_type;
- u8 msg_type;
- u8 unused;
- union {
- /* Bit pattern: 0-6 lro_count indicates frag
- * sequence, 7 last_frag indicates last frag
- */
- u8 lro;
-
- /* chained buffers */
- u8 nr_frags;
- };
- };
- struct {
- __le16 frag_handles[4];
- };
- };
+ __le64 status_desc_data[2];
} __attribute__ ((aligned(16)));
-enum {
- NETXEN_RCV_PEG_0 = 0,
- NETXEN_RCV_PEG_1
-};
/* The version of the main data structure */
#define NETXEN_BDINFO_VERSION 1
@@ -447,85 +416,35 @@ enum {
/* Max number of Gig ports on a Phantom board */
#define NETXEN_MAX_PORTS 4
-typedef enum {
- NETXEN_BRDTYPE_P1_BD = 0x0000,
- NETXEN_BRDTYPE_P1_SB = 0x0001,
- NETXEN_BRDTYPE_P1_SMAX = 0x0002,
- NETXEN_BRDTYPE_P1_SOCK = 0x0003,
-
- NETXEN_BRDTYPE_P2_SOCK_31 = 0x0008,
- NETXEN_BRDTYPE_P2_SOCK_35 = 0x0009,
- NETXEN_BRDTYPE_P2_SB35_4G = 0x000a,
- NETXEN_BRDTYPE_P2_SB31_10G = 0x000b,
- NETXEN_BRDTYPE_P2_SB31_2G = 0x000c,
-
- NETXEN_BRDTYPE_P2_SB31_10G_IMEZ = 0x000d,
- NETXEN_BRDTYPE_P2_SB31_10G_HMEZ = 0x000e,
- NETXEN_BRDTYPE_P2_SB31_10G_CX4 = 0x000f,
-
- NETXEN_BRDTYPE_P3_REF_QG = 0x0021,
- NETXEN_BRDTYPE_P3_HMEZ = 0x0022,
- NETXEN_BRDTYPE_P3_10G_CX4_LP = 0x0023,
- NETXEN_BRDTYPE_P3_4_GB = 0x0024,
- NETXEN_BRDTYPE_P3_IMEZ = 0x0025,
- NETXEN_BRDTYPE_P3_10G_SFP_PLUS = 0x0026,
- NETXEN_BRDTYPE_P3_10000_BASE_T = 0x0027,
- NETXEN_BRDTYPE_P3_XG_LOM = 0x0028,
- NETXEN_BRDTYPE_P3_4_GB_MM = 0x0029,
- NETXEN_BRDTYPE_P3_10G_SFP_CT = 0x002a,
- NETXEN_BRDTYPE_P3_10G_SFP_QT = 0x002b,
- NETXEN_BRDTYPE_P3_10G_CX4 = 0x0031,
- NETXEN_BRDTYPE_P3_10G_XFP = 0x0032,
- NETXEN_BRDTYPE_P3_10G_TP = 0x0080
-
-} netxen_brdtype_t;
-
-typedef enum {
- NETXEN_BRDMFG_INVENTEC = 1
-} netxen_brdmfg;
-
-typedef enum {
- MEM_ORG_128Mbx4 = 0x0, /* DDR1 only */
- MEM_ORG_128Mbx8 = 0x1, /* DDR1 only */
- MEM_ORG_128Mbx16 = 0x2, /* DDR1 only */
- MEM_ORG_256Mbx4 = 0x3,
- MEM_ORG_256Mbx8 = 0x4,
- MEM_ORG_256Mbx16 = 0x5,
- MEM_ORG_512Mbx4 = 0x6,
- MEM_ORG_512Mbx8 = 0x7,
- MEM_ORG_512Mbx16 = 0x8,
- MEM_ORG_1Gbx4 = 0x9,
- MEM_ORG_1Gbx8 = 0xa,
- MEM_ORG_1Gbx16 = 0xb,
- MEM_ORG_2Gbx4 = 0xc,
- MEM_ORG_2Gbx8 = 0xd,
- MEM_ORG_2Gbx16 = 0xe,
- MEM_ORG_128Mbx32 = 0x10002, /* GDDR only */
- MEM_ORG_256Mbx32 = 0x10005 /* GDDR only */
-} netxen_mn_mem_org_t;
-
-typedef enum {
- MEM_ORG_512Kx36 = 0x0,
- MEM_ORG_1Mx36 = 0x1,
- MEM_ORG_2Mx36 = 0x2
-} netxen_sn_mem_org_t;
-
-typedef enum {
- MEM_DEPTH_4MB = 0x1,
- MEM_DEPTH_8MB = 0x2,
- MEM_DEPTH_16MB = 0x3,
- MEM_DEPTH_32MB = 0x4,
- MEM_DEPTH_64MB = 0x5,
- MEM_DEPTH_128MB = 0x6,
- MEM_DEPTH_256MB = 0x7,
- MEM_DEPTH_512MB = 0x8,
- MEM_DEPTH_1GB = 0x9,
- MEM_DEPTH_2GB = 0xa,
- MEM_DEPTH_4GB = 0xb,
- MEM_DEPTH_8GB = 0xc,
- MEM_DEPTH_16GB = 0xd,
- MEM_DEPTH_32GB = 0xe
-} netxen_mem_depth_t;
+#define NETXEN_BRDTYPE_P1_BD 0x0000
+#define NETXEN_BRDTYPE_P1_SB 0x0001
+#define NETXEN_BRDTYPE_P1_SMAX 0x0002
+#define NETXEN_BRDTYPE_P1_SOCK 0x0003
+
+#define NETXEN_BRDTYPE_P2_SOCK_31 0x0008
+#define NETXEN_BRDTYPE_P2_SOCK_35 0x0009
+#define NETXEN_BRDTYPE_P2_SB35_4G 0x000a
+#define NETXEN_BRDTYPE_P2_SB31_10G 0x000b
+#define NETXEN_BRDTYPE_P2_SB31_2G 0x000c
+
+#define NETXEN_BRDTYPE_P2_SB31_10G_IMEZ 0x000d
+#define NETXEN_BRDTYPE_P2_SB31_10G_HMEZ 0x000e
+#define NETXEN_BRDTYPE_P2_SB31_10G_CX4 0x000f
+
+#define NETXEN_BRDTYPE_P3_REF_QG 0x0021
+#define NETXEN_BRDTYPE_P3_HMEZ 0x0022
+#define NETXEN_BRDTYPE_P3_10G_CX4_LP 0x0023
+#define NETXEN_BRDTYPE_P3_4_GB 0x0024
+#define NETXEN_BRDTYPE_P3_IMEZ 0x0025
+#define NETXEN_BRDTYPE_P3_10G_SFP_PLUS 0x0026
+#define NETXEN_BRDTYPE_P3_10000_BASE_T 0x0027
+#define NETXEN_BRDTYPE_P3_XG_LOM 0x0028
+#define NETXEN_BRDTYPE_P3_4_GB_MM 0x0029
+#define NETXEN_BRDTYPE_P3_10G_SFP_CT 0x002a
+#define NETXEN_BRDTYPE_P3_10G_SFP_QT 0x002b
+#define NETXEN_BRDTYPE_P3_10G_CX4 0x0031
+#define NETXEN_BRDTYPE_P3_10G_XFP 0x0032
+#define NETXEN_BRDTYPE_P3_10G_TP 0x0080
struct netxen_board_info {
u32 header_version;
@@ -676,17 +595,15 @@ struct netxen_new_user_info {
#define PRIMARY_IMAGE_BAD 0xffffffff
/* Flash memory map */
-typedef enum {
- NETXEN_CRBINIT_START = 0, /* Crbinit section */
- NETXEN_BRDCFG_START = 0x4000, /* board config */
- NETXEN_INITCODE_START = 0x6000, /* pegtune code */
- NETXEN_BOOTLD_START = 0x10000, /* bootld */
- NETXEN_IMAGE_START = 0x43000, /* compressed image */
- NETXEN_SECONDARY_START = 0x200000, /* backup images */
- NETXEN_PXE_START = 0x3E0000, /* user defined region */
- NETXEN_USER_START = 0x3E8000, /* User defined region for new boards */
- NETXEN_FIXED_START = 0x3F0000 /* backup of crbinit */
-} netxen_flash_map_t;
+#define NETXEN_CRBINIT_START 0 /* crbinit section */
+#define NETXEN_BRDCFG_START 0x4000 /* board config */
+#define NETXEN_INITCODE_START 0x6000 /* pegtune code */
+#define NETXEN_BOOTLD_START 0x10000 /* bootld */
+#define NETXEN_IMAGE_START 0x43000 /* compressed image */
+#define NETXEN_SECONDARY_START 0x200000 /* backup images */
+#define NETXEN_PXE_START 0x3E0000 /* PXE boot rom */
+#define NETXEN_USER_START 0x3E8000 /* Firmare info */
+#define NETXEN_FIXED_START 0x3F0000 /* backup of crbinit */
#define NX_FW_VERSION_OFFSET (NETXEN_USER_START+0x408)
#define NX_FW_SIZE_OFFSET (NETXEN_USER_START+0x40c)
@@ -708,21 +625,8 @@ typedef enum {
#define NETXEN_FLASH_SECONDARY_SIZE (NETXEN_USER_START-NETXEN_SECONDARY_START)
#define NETXEN_NUM_PRIMARY_SECTORS (0x20)
#define NETXEN_NUM_CONFIG_SECTORS (1)
-#define PFX "NetXen: "
extern char netxen_nic_driver_name[];
-/* Note: Make sure to not call this before adapter->port is valid */
-#if !defined(NETXEN_DEBUG)
-#define DPRINTK(klevel, fmt, args...) do { \
- } while (0)
-#else
-#define DPRINTK(klevel, fmt, args...) do { \
- printk(KERN_##klevel PFX "%s: %s: " fmt, __func__,\
- (adapter != NULL && adapter->netdev != NULL) ? \
- adapter->netdev->name : NULL, \
- ## args); } while(0)
-#endif
-
/* Number of status descriptors to handle per interrupt */
#define MAX_STATUS_HANDLE (64)
@@ -732,7 +636,7 @@ extern char netxen_nic_driver_name[];
*/
struct netxen_skb_frag {
u64 dma;
- ulong length;
+ u64 length;
};
#define _netxen_set_bits(config_word, start, bits, val) {\
@@ -793,34 +697,24 @@ struct netxen_hardware_context {
u8 cut_through;
u8 revision_id;
+ u8 pci_func;
+ u8 linkup;
u16 port_type;
- int board_type;
- u32 linkup;
- /* Address of cmd ring in Phantom */
- struct cmd_desc_type0 *cmd_desc_head;
- dma_addr_t cmd_desc_phys_addr;
- struct netxen_adapter *adapter;
- int pci_func;
+ u16 board_type;
};
#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */
#define ETHERNET_FCS_SIZE 4
struct netxen_adapter_stats {
- u64 rcvdbadskb;
u64 xmitcalled;
- u64 xmitedframes;
u64 xmitfinished;
- u64 badskblen;
- u64 nocmddescriptor;
- u64 polled;
u64 rxdropped;
u64 txdropped;
u64 csummed;
u64 no_rcv;
u64 rxbytes;
u64 txbytes;
- u64 ints;
};
/*
@@ -852,14 +746,25 @@ struct nx_host_sds_ring {
struct napi_struct napi;
struct list_head free_list[NUM_RCV_DESC_RINGS];
- u16 clean_tx;
- u16 post_rxd;
int irq;
dma_addr_t phys_addr;
char name[IFNAMSIZ+4];
};
+struct nx_host_tx_ring {
+ u32 producer;
+ __le32 *hw_consumer;
+ u32 sw_consumer;
+ u32 crb_cmd_producer;
+ u32 crb_cmd_consumer;
+ u32 num_desc;
+
+ struct netxen_cmd_buffer *cmd_buf_arr;
+ struct cmd_desc_type0 *desc_head;
+ dma_addr_t phys_addr;
+};
+
/*
* Receive context. There is one such structure per instance of the
* receive processing. Any state information that is relevant to
@@ -871,8 +776,11 @@ struct netxen_recv_context {
u16 context_id;
u16 virt_port;
- struct nx_host_rds_ring rds_rings[NUM_RCV_DESC_RINGS];
- struct nx_host_sds_ring sds_rings[NUM_STS_DESC_RINGS];
+ struct nx_host_rds_ring *rds_rings;
+ struct nx_host_sds_ring *sds_rings;
+
+ struct netxen_ring_ctx *hwctx;
+ dma_addr_t phys_addr;
};
/* New HW context creation */
@@ -1111,8 +1019,8 @@ typedef struct {
#define NETXEN_MAC_DEL 2
typedef struct nx_mac_list_s {
- struct nx_mac_list_s *next;
- uint8_t mac_addr[MAX_ADDR_LEN];
+ struct list_head list;
+ uint8_t mac_addr[ETH_ALEN+2];
} nx_mac_list_t;
/*
@@ -1154,31 +1062,118 @@ typedef struct {
#define NX_MAC_EVENT 0x1
-enum {
- NX_NIC_H2C_OPCODE_START = 0,
- NX_NIC_H2C_OPCODE_CONFIG_RSS,
- NX_NIC_H2C_OPCODE_CONFIG_RSS_TBL,
- NX_NIC_H2C_OPCODE_CONFIG_INTR_COALESCE,
- NX_NIC_H2C_OPCODE_CONFIG_LED,
- NX_NIC_H2C_OPCODE_CONFIG_PROMISCUOUS,
- NX_NIC_H2C_OPCODE_CONFIG_L2_MAC,
- NX_NIC_H2C_OPCODE_LRO_REQUEST,
- NX_NIC_H2C_OPCODE_GET_SNMP_STATS,
- NX_NIC_H2C_OPCODE_PROXY_START_REQUEST,
- NX_NIC_H2C_OPCODE_PROXY_STOP_REQUEST,
- NX_NIC_H2C_OPCODE_PROXY_SET_MTU,
- NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE,
- NX_H2P_OPCODE_GET_FINGER_PRINT_REQUEST,
- NX_H2P_OPCODE_INSTALL_LICENSE_REQUEST,
- NX_H2P_OPCODE_GET_LICENSE_CAPABILITY_REQUEST,
- NX_NIC_H2C_OPCODE_GET_NET_STATS,
- NX_NIC_H2C_OPCODE_LAST
-};
+/*
+ * Driver --> Firmware
+ */
+#define NX_NIC_H2C_OPCODE_START 0
+#define NX_NIC_H2C_OPCODE_CONFIG_RSS 1
+#define NX_NIC_H2C_OPCODE_CONFIG_RSS_TBL 2
+#define NX_NIC_H2C_OPCODE_CONFIG_INTR_COALESCE 3
+#define NX_NIC_H2C_OPCODE_CONFIG_LED 4
+#define NX_NIC_H2C_OPCODE_CONFIG_PROMISCUOUS 5
+#define NX_NIC_H2C_OPCODE_CONFIG_L2_MAC 6
+#define NX_NIC_H2C_OPCODE_LRO_REQUEST 7
+#define NX_NIC_H2C_OPCODE_GET_SNMP_STATS 8
+#define NX_NIC_H2C_OPCODE_PROXY_START_REQUEST 9
+#define NX_NIC_H2C_OPCODE_PROXY_STOP_REQUEST 10
+#define NX_NIC_H2C_OPCODE_PROXY_SET_MTU 11
+#define NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE 12
+#define NX_NIC_H2C_OPCODE_GET_FINGER_PRINT_REQUEST 13
+#define NX_NIC_H2C_OPCODE_INSTALL_LICENSE_REQUEST 14
+#define NX_NIC_H2C_OPCODE_GET_LICENSE_CAPABILITY_REQUEST 15
+#define NX_NIC_H2C_OPCODE_GET_NET_STATS 16
+#define NX_NIC_H2C_OPCODE_PROXY_UPDATE_P2V 17
+#define NX_NIC_H2C_OPCODE_CONFIG_IPADDR 18
+#define NX_NIC_H2C_OPCODE_CONFIG_LOOPBACK 19
+#define NX_NIC_H2C_OPCODE_PROXY_STOP_DONE 20
+#define NX_NIC_H2C_OPCODE_GET_LINKEVENT 21
+#define NX_NIC_C2C_OPCODE 22
+#define NX_NIC_H2C_OPCODE_LAST 23
+
+/*
+ * Firmware --> Driver
+ */
+
+#define NX_NIC_C2H_OPCODE_START 128
+#define NX_NIC_C2H_OPCODE_CONFIG_RSS_RESPONSE 129
+#define NX_NIC_C2H_OPCODE_CONFIG_RSS_TBL_RESPONSE 130
+#define NX_NIC_C2H_OPCODE_CONFIG_MAC_RESPONSE 131
+#define NX_NIC_C2H_OPCODE_CONFIG_PROMISCUOUS_RESPONSE 132
+#define NX_NIC_C2H_OPCODE_CONFIG_L2_MAC_RESPONSE 133
+#define NX_NIC_C2H_OPCODE_LRO_DELETE_RESPONSE 134
+#define NX_NIC_C2H_OPCODE_LRO_ADD_FAILURE_RESPONSE 135
+#define NX_NIC_C2H_OPCODE_GET_SNMP_STATS 136
+#define NX_NIC_C2H_OPCODE_GET_FINGER_PRINT_REPLY 137
+#define NX_NIC_C2H_OPCODE_INSTALL_LICENSE_REPLY 138
+#define NX_NIC_C2H_OPCODE_GET_LICENSE_CAPABILITIES_REPLY 139
+#define NX_NIC_C2H_OPCODE_GET_NET_STATS_RESPONSE 140
+#define NX_NIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141
+#define NX_NIC_C2H_OPCODE_LAST 142
#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */
+#define NX_FW_CAPABILITY_LINK_NOTIFICATION (1 << 5)
+#define NX_FW_CAPABILITY_SWITCHING (1 << 6)
+
+/* module types */
+#define LINKEVENT_MODULE_NOT_PRESENT 1
+#define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2
+#define LINKEVENT_MODULE_OPTICAL_SRLR 3
+#define LINKEVENT_MODULE_OPTICAL_LRM 4
+#define LINKEVENT_MODULE_OPTICAL_SFP_1G 5
+#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE 6
+#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN 7
+#define LINKEVENT_MODULE_TWINAX 8
+
+#define LINKSPEED_10GBPS 10000
+#define LINKSPEED_1GBPS 1000
+#define LINKSPEED_100MBPS 100
+#define LINKSPEED_10MBPS 10
+
+#define LINKSPEED_ENCODED_10MBPS 0
+#define LINKSPEED_ENCODED_100MBPS 1
+#define LINKSPEED_ENCODED_1GBPS 2
+
+#define LINKEVENT_AUTONEG_DISABLED 0
+#define LINKEVENT_AUTONEG_ENABLED 1
+
+#define LINKEVENT_HALF_DUPLEX 0
+#define LINKEVENT_FULL_DUPLEX 1
+
+#define LINKEVENT_LINKSPEED_MBPS 0
+#define LINKEVENT_LINKSPEED_ENCODED 1
+
+/* firmware response header:
+ * 63:58 - message type
+ * 57:56 - owner
+ * 55:53 - desc count
+ * 52:48 - reserved
+ * 47:40 - completion id
+ * 39:32 - opcode
+ * 31:16 - error code
+ * 15:00 - reserved
+ */
+#define netxen_get_nic_msgtype(msg_hdr) \
+ ((msg_hdr >> 58) & 0x3F)
+#define netxen_get_nic_msg_compid(msg_hdr) \
+ ((msg_hdr >> 40) & 0xFF)
+#define netxen_get_nic_msg_opcode(msg_hdr) \
+ ((msg_hdr >> 32) & 0xFF)
+#define netxen_get_nic_msg_errcode(msg_hdr) \
+ ((msg_hdr >> 16) & 0xFFFF)
+
+typedef struct {
+ union {
+ struct {
+ u64 hdr;
+ u64 body[7];
+ };
+ u64 words[8];
+ };
+} nx_fw_msg_t;
+
typedef struct {
__le64 qhdr;
__le64 req_hdr;
@@ -1218,99 +1213,96 @@ struct netxen_adapter {
struct net_device *netdev;
struct pci_dev *pdev;
- int pci_using_dac;
- struct net_device_stats net_stats;
- int mtu;
- int portnum;
- u8 physical_port;
- u16 tx_context_id;
-
- uint8_t mc_enabled;
- uint8_t max_mc_count;
- nx_mac_list_t *mac_list;
-
- struct netxen_legacy_intr_set legacy_intr;
-
- struct work_struct watchdog_task;
- struct timer_list watchdog_timer;
- struct work_struct tx_timeout_task;
+ struct list_head mac_list;
u32 curr_window;
u32 crb_win;
rwlock_t adapter_lock;
- u32 cmd_producer;
- __le32 *cmd_consumer;
- u32 last_cmd_consumer;
- u32 crb_addr_cmd_producer;
- u32 crb_addr_cmd_consumer;
spinlock_t tx_clean_lock;
- u32 num_txd;
- u32 num_rxd;
- u32 num_jumbo_rxd;
- u32 num_lro_rxd;
+ u16 num_txd;
+ u16 num_rxd;
+ u16 num_jumbo_rxd;
+ u16 num_lro_rxd;
+
+ u8 max_rds_rings;
+ u8 max_sds_rings;
+ u8 driver_mismatch;
+ u8 msix_supported;
+ u8 rx_csum;
+ u8 pci_using_dac;
+ u8 portnum;
+ u8 physical_port;
+
+ u8 mc_enabled;
+ u8 max_mc_count;
+ u8 rss_supported;
+ u8 resv2;
+ u32 resv3;
+
+ u8 has_link_events;
+ u8 resv1;
+ u16 tx_context_id;
+ u16 mtu;
+ u16 is_up;
- int max_rds_rings;
- int max_sds_rings;
+ u16 link_speed;
+ u16 link_duplex;
+ u16 link_autoneg;
+ u16 module_type;
+ u32 capabilities;
u32 flags;
u32 irq;
- int driver_mismatch;
u32 temp;
- u32 fw_major;
- u32 fw_version;
-
- int msix_supported;
- struct msix_entry msix_entries[MSIX_ENTRIES_PER_ADAPTER];
+ u32 msi_tgt_status;
+ u32 resv4;
struct netxen_adapter_stats stats;
- u16 link_speed;
- u16 link_duplex;
- u16 state;
- u16 link_autoneg;
- int rx_csum;
-
- struct netxen_cmd_buffer *cmd_buf_arr; /* Command buffers for xmit */
-
- /*
- * Receive instances. These can be either one per port,
- * or one per peg, etc.
- */
struct netxen_recv_context recv_ctx;
+ struct nx_host_tx_ring *tx_ring;
- int is_up;
- struct netxen_dummy_dma dummy_dma;
- nx_nic_intr_coalesce_t coal;
-
- /* Context interface shared between card and host */
- struct netxen_ring_ctx *ctx_desc;
- dma_addr_t ctx_desc_phys_addr;
- int intr_scheme;
- int msi_mode;
int (*enable_phy_interrupts) (struct netxen_adapter *);
int (*disable_phy_interrupts) (struct netxen_adapter *);
- int (*macaddr_set) (struct netxen_adapter *, netxen_ethernet_macaddr_t);
+ int (*macaddr_set) (struct netxen_adapter *, u8 *);
int (*set_mtu) (struct netxen_adapter *, int);
int (*set_promisc) (struct netxen_adapter *, u32);
+ void (*set_multi) (struct net_device *);
int (*phy_read) (struct netxen_adapter *, long reg, u32 *);
int (*phy_write) (struct netxen_adapter *, long reg, u32 val);
int (*init_port) (struct netxen_adapter *, int);
int (*stop_port) (struct netxen_adapter *);
- int (*hw_read_wx)(struct netxen_adapter *, ulong, void *, int);
- int (*hw_write_wx)(struct netxen_adapter *, ulong, void *, int);
+ u32 (*hw_read_wx)(struct netxen_adapter *, ulong);
+ int (*hw_write_wx)(struct netxen_adapter *, ulong, u32);
int (*pci_mem_read)(struct netxen_adapter *, u64, void *, int);
int (*pci_mem_write)(struct netxen_adapter *, u64, void *, int);
int (*pci_write_immediate)(struct netxen_adapter *, u64, u32);
u32 (*pci_read_immediate)(struct netxen_adapter *, u64);
- void (*pci_write_normalize)(struct netxen_adapter *, u64, u32);
- u32 (*pci_read_normalize)(struct netxen_adapter *, u64);
unsigned long (*pci_set_window)(struct netxen_adapter *,
unsigned long long);
-}; /* netxen_adapter structure */
+
+ struct netxen_legacy_intr_set legacy_intr;
+
+ struct msix_entry msix_entries[MSIX_ENTRIES_PER_ADAPTER];
+
+ struct netxen_dummy_dma dummy_dma;
+
+ struct work_struct watchdog_task;
+ struct timer_list watchdog_timer;
+ struct work_struct tx_timeout_task;
+
+ struct net_device_stats net_stats;
+
+ nx_nic_intr_coalesce_t coal;
+
+ u32 fw_major;
+ u32 fw_version;
+ const struct firmware *fw;
+};
/*
* NetXen dma watchdog control structure
@@ -1330,46 +1322,6 @@ struct netxen_adapter {
#define netxen_get_dma_watchdog_disabled(config_word) \
(((config_word) >> 1) & 0x1)
-/* Max number of xmit producer threads that can run simultaneously */
-#define MAX_XMIT_PRODUCERS 16
-
-#define PCI_OFFSET_FIRST_RANGE(adapter, off) \
- ((adapter)->ahw.pci_base0 + (off))
-#define PCI_OFFSET_SECOND_RANGE(adapter, off) \
- ((adapter)->ahw.pci_base1 + (off) - SECOND_PAGE_GROUP_START)
-#define PCI_OFFSET_THIRD_RANGE(adapter, off) \
- ((adapter)->ahw.pci_base2 + (off) - THIRD_PAGE_GROUP_START)
-
-static inline void __iomem *pci_base_offset(struct netxen_adapter *adapter,
- unsigned long off)
-{
- if ((off < FIRST_PAGE_GROUP_END) && (off >= FIRST_PAGE_GROUP_START)) {
- return (adapter->ahw.pci_base0 + off);
- } else if ((off < SECOND_PAGE_GROUP_END) &&
- (off >= SECOND_PAGE_GROUP_START)) {
- return (adapter->ahw.pci_base1 + off - SECOND_PAGE_GROUP_START);
- } else if ((off < THIRD_PAGE_GROUP_END) &&
- (off >= THIRD_PAGE_GROUP_START)) {
- return (adapter->ahw.pci_base2 + off - THIRD_PAGE_GROUP_START);
- }
- return NULL;
-}
-
-static inline void __iomem *pci_base(struct netxen_adapter *adapter,
- unsigned long off)
-{
- if ((off < FIRST_PAGE_GROUP_END) && (off >= FIRST_PAGE_GROUP_START)) {
- return adapter->ahw.pci_base0;
- } else if ((off < SECOND_PAGE_GROUP_END) &&
- (off >= SECOND_PAGE_GROUP_START)) {
- return adapter->ahw.pci_base1;
- } else if ((off < THIRD_PAGE_GROUP_END) &&
- (off >= THIRD_PAGE_GROUP_START)) {
- return adapter->ahw.pci_base2;
- }
- return NULL;
-}
-
int netxen_niu_xgbe_enable_phy_interrupts(struct netxen_adapter *adapter);
int netxen_niu_gbe_enable_phy_interrupts(struct netxen_adapter *adapter);
int netxen_niu_xgbe_disable_phy_interrupts(struct netxen_adapter *adapter);
@@ -1382,21 +1334,22 @@ int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter,
/* Functions available from netxen_nic_hw.c */
int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu);
int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu);
-void netxen_nic_reg_write(struct netxen_adapter *adapter, u64 off, u32 val);
-int netxen_nic_reg_read(struct netxen_adapter *adapter, u64 off);
-void netxen_nic_write_w0(struct netxen_adapter *adapter, u32 index, u32 value);
-void netxen_nic_read_w0(struct netxen_adapter *adapter, u32 index, u32 *value);
-void netxen_nic_write_w1(struct netxen_adapter *adapter, u32 index, u32 value);
-void netxen_nic_read_w1(struct netxen_adapter *adapter, u32 index, u32 *value);
+
+int netxen_p2_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr);
+int netxen_p3_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr);
+
+#define NXRD32(adapter, off) \
+ (adapter->hw_read_wx(adapter, off))
+#define NXWR32(adapter, off, val) \
+ (adapter->hw_write_wx(adapter, off, val))
int netxen_nic_get_board_info(struct netxen_adapter *adapter);
void netxen_nic_get_firmware_info(struct netxen_adapter *adapter);
int netxen_nic_wol_supported(struct netxen_adapter *adapter);
-int netxen_nic_hw_read_wx_128M(struct netxen_adapter *adapter,
- ulong off, void *data, int len);
+u32 netxen_nic_hw_read_wx_128M(struct netxen_adapter *adapter, ulong off);
int netxen_nic_hw_write_wx_128M(struct netxen_adapter *adapter,
- ulong off, void *data, int len);
+ ulong off, u32 data);
int netxen_nic_pci_mem_read_128M(struct netxen_adapter *adapter,
u64 off, void *data, int size);
int netxen_nic_pci_mem_write_128M(struct netxen_adapter *adapter,
@@ -1412,16 +1365,13 @@ unsigned long netxen_nic_pci_set_window_128M(struct netxen_adapter *adapter,
void netxen_nic_pci_change_crbwindow_128M(struct netxen_adapter *adapter,
u32 wndw);
-int netxen_nic_hw_read_wx_2M(struct netxen_adapter *adapter,
- ulong off, void *data, int len);
+u32 netxen_nic_hw_read_wx_2M(struct netxen_adapter *adapter, ulong off);
int netxen_nic_hw_write_wx_2M(struct netxen_adapter *adapter,
- ulong off, void *data, int len);
+ ulong off, u32 data);
int netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter,
u64 off, void *data, int size);
int netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter,
u64 off, void *data, int size);
-void netxen_crb_writelit_adapter(struct netxen_adapter *adapter,
- unsigned long off, int data);
int netxen_nic_pci_write_immediate_2M(struct netxen_adapter *adapter,
u64 off, u32 data);
u32 netxen_nic_pci_read_immediate_2M(struct netxen_adapter *adapter, u64 off);
@@ -1435,8 +1385,9 @@ unsigned long netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter,
void netxen_free_adapter_offload(struct netxen_adapter *adapter);
int netxen_initialize_adapter_offload(struct netxen_adapter *adapter);
int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val);
-int netxen_receive_peg_ready(struct netxen_adapter *adapter);
int netxen_load_firmware(struct netxen_adapter *adapter);
+void netxen_request_firmware(struct netxen_adapter *adapter);
+void netxen_release_firmware(struct netxen_adapter *adapter);
int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose);
int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp);
@@ -1475,6 +1426,8 @@ void netxen_p3_free_mac_list(struct netxen_adapter *adapter);
int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32);
int netxen_config_intr_coalesce(struct netxen_adapter *adapter);
int netxen_config_rss(struct netxen_adapter *adapter, int enable);
+int netxen_linkevent_request(struct netxen_adapter *adapter, int enable);
+void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup);
int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu);
int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu);
@@ -1483,7 +1436,7 @@ int netxen_nic_set_mac(struct net_device *netdev, void *p);
struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev);
void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
- uint32_t crb_producer);
+ struct nx_host_tx_ring *tx_ring, uint32_t crb_producer);
/*
* NetXen Board information
@@ -1491,7 +1444,7 @@ void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
#define NETXEN_MAX_SHORT_NAME 32
struct netxen_brdinfo {
- netxen_brdtype_t brdtype; /* type of board */
+ int brdtype; /* type of board */
long ports; /* max no of physical ports */
char short_name[NETXEN_MAX_SHORT_NAME];
};
@@ -1541,17 +1494,15 @@ dma_watchdog_shutdown_request(struct netxen_adapter *adapter)
u32 ctrl;
/* check if already inactive */
- if (adapter->hw_read_wx(adapter,
- NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL), &ctrl, 4))
- printk(KERN_ERR "failed to read dma watchdog status\n");
+ ctrl = adapter->hw_read_wx(adapter,
+ NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL));
if (netxen_get_dma_watchdog_enabled(ctrl) == 0)
return 1;
/* Send the disable request */
netxen_set_dma_watchdog_disable_req(ctrl);
- netxen_crb_writelit_adapter(adapter,
- NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL), ctrl);
+ NXWR32(adapter, NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL), ctrl);
return 0;
}
@@ -1561,9 +1512,8 @@ dma_watchdog_shutdown_poll_result(struct netxen_adapter *adapter)
{
u32 ctrl;
- if (adapter->hw_read_wx(adapter,
- NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL), &ctrl, 4))
- printk(KERN_ERR "failed to read dma watchdog status\n");
+ ctrl = adapter->hw_read_wx(adapter,
+ NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL));
return (netxen_get_dma_watchdog_enabled(ctrl) == 0);
}
@@ -1573,9 +1523,8 @@ dma_watchdog_wakeup(struct netxen_adapter *adapter)
{
u32 ctrl;
- if (adapter->hw_read_wx(adapter,
- NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL), &ctrl, 4))
- printk(KERN_ERR "failed to read dma watchdog status\n");
+ ctrl = adapter->hw_read_wx(adapter,
+ NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL));
if (netxen_get_dma_watchdog_enabled(ctrl))
return 1;
@@ -1583,8 +1532,7 @@ dma_watchdog_wakeup(struct netxen_adapter *adapter)
/* send the wakeup request */
netxen_set_dma_watchdog_enable_req(ctrl);
- netxen_crb_writelit_adapter(adapter,
- NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL), ctrl);
+ NXWR32(adapter, NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL), ctrl);
return 0;
}
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
index 9234473bc08..4754f5cffad 100644
--- a/drivers/net/netxen/netxen_nic_ctx.c
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -41,8 +41,7 @@ netxen_api_lock(struct netxen_adapter *adapter)
for (;;) {
/* Acquire PCIE HW semaphore5 */
- netxen_nic_read_w0(adapter,
- NETXEN_PCIE_REG(PCIE_SEM5_LOCK), &done);
+ done = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM5_LOCK));
if (done == 1)
break;
@@ -56,7 +55,7 @@ netxen_api_lock(struct netxen_adapter *adapter)
}
#if 0
- netxen_nic_write_w1(adapter,
+ NXWR32(adapter,
NETXEN_API_LOCK_ID, NX_OS_API_LOCK_DRIVER);
#endif
return 0;
@@ -65,11 +64,8 @@ netxen_api_lock(struct netxen_adapter *adapter)
static int
netxen_api_unlock(struct netxen_adapter *adapter)
{
- u32 val;
-
/* Release PCIE HW semaphore5 */
- netxen_nic_read_w0(adapter,
- NETXEN_PCIE_REG(PCIE_SEM5_UNLOCK), &val);
+ NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM5_UNLOCK));
return 0;
}
@@ -86,7 +82,7 @@ netxen_poll_rsp(struct netxen_adapter *adapter)
if (++timeout > NX_OS_CRB_RETRY_COUNT)
return NX_CDRP_RSP_TIMEOUT;
- netxen_nic_read_w1(adapter, NX_CDRP_CRB_OFFSET, &rsp);
+ rsp = NXRD32(adapter, NX_CDRP_CRB_OFFSET);
} while (!NX_CDRP_IS_RSP(rsp));
return rsp;
@@ -106,16 +102,15 @@ netxen_issue_cmd(struct netxen_adapter *adapter,
if (netxen_api_lock(adapter))
return NX_RCODE_TIMEOUT;
- netxen_nic_write_w1(adapter, NX_SIGN_CRB_OFFSET, signature);
+ NXWR32(adapter, NX_SIGN_CRB_OFFSET, signature);
- netxen_nic_write_w1(adapter, NX_ARG1_CRB_OFFSET, arg1);
+ NXWR32(adapter, NX_ARG1_CRB_OFFSET, arg1);
- netxen_nic_write_w1(adapter, NX_ARG2_CRB_OFFSET, arg2);
+ NXWR32(adapter, NX_ARG2_CRB_OFFSET, arg2);
- netxen_nic_write_w1(adapter, NX_ARG3_CRB_OFFSET, arg3);
+ NXWR32(adapter, NX_ARG3_CRB_OFFSET, arg3);
- netxen_nic_write_w1(adapter, NX_CDRP_CRB_OFFSET,
- NX_CDRP_FORM_CMD(cmd));
+ NXWR32(adapter, NX_CDRP_CRB_OFFSET, NX_CDRP_FORM_CMD(cmd));
rsp = netxen_poll_rsp(adapter);
@@ -125,7 +120,7 @@ netxen_issue_cmd(struct netxen_adapter *adapter,
rcode = NX_RCODE_TIMEOUT;
} else if (rsp == NX_CDRP_RSP_FAIL) {
- netxen_nic_read_w1(adapter, NX_ARG1_CRB_OFFSET, &rcode);
+ rcode = NXRD32(adapter, NX_ARG1_CRB_OFFSET);
printk(KERN_ERR "%s: failed card response code:0x%x\n",
netxen_nic_driver_name, rcode);
@@ -328,6 +323,8 @@ nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
int err = 0;
u64 offset, phys_addr;
dma_addr_t rq_phys_addr, rsp_phys_addr;
+ struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t);
rq_addr = pci_alloc_consistent(adapter->pdev,
@@ -362,15 +359,13 @@ nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
prq->dummy_dma_addr = cpu_to_le64(adapter->dummy_dma.phys_addr);
- offset = adapter->ctx_desc_phys_addr+sizeof(struct netxen_ring_ctx);
+ offset = recv_ctx->phys_addr + sizeof(struct netxen_ring_ctx);
prq->cmd_cons_dma_addr = cpu_to_le64(offset);
prq_cds = &prq->cds_ring;
- prq_cds->host_phys_addr =
- cpu_to_le64(adapter->ahw.cmd_desc_phys_addr);
-
- prq_cds->ring_size = cpu_to_le32(adapter->num_txd);
+ prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
+ prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
phys_addr = rq_phys_addr;
err = netxen_issue_cmd(adapter,
@@ -383,8 +378,7 @@ nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
if (err == NX_RCODE_SUCCESS) {
temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
- adapter->crb_addr_cmd_producer =
- NETXEN_NIC_REG(temp - 0x200);
+ tx_ring->crb_cmd_producer = NETXEN_NIC_REG(temp - 0x200);
#if 0
adapter->tx_state =
le32_to_cpu(prsp->host_ctx_state);
@@ -448,7 +442,19 @@ static struct netxen_recv_crb recv_crb_registers[] = {
NETXEN_NIC_REG(0x120)
},
/* crb_sts_consumer: */
- NETXEN_NIC_REG(0x138),
+ {
+ NETXEN_NIC_REG(0x138),
+ NETXEN_NIC_REG_2(0x000),
+ NETXEN_NIC_REG_2(0x004),
+ NETXEN_NIC_REG_2(0x008),
+ },
+ /* sw_int_mask */
+ {
+ CRB_SW_INT_MASK_0,
+ NETXEN_NIC_REG_2(0x044),
+ NETXEN_NIC_REG_2(0x048),
+ NETXEN_NIC_REG_2(0x04c),
+ },
},
/* Instance 1 */
{
@@ -461,7 +467,19 @@ static struct netxen_recv_crb recv_crb_registers[] = {
NETXEN_NIC_REG(0x164)
},
/* crb_sts_consumer: */
- NETXEN_NIC_REG(0x17c),
+ {
+ NETXEN_NIC_REG(0x17c),
+ NETXEN_NIC_REG_2(0x020),
+ NETXEN_NIC_REG_2(0x024),
+ NETXEN_NIC_REG_2(0x028),
+ },
+ /* sw_int_mask */
+ {
+ CRB_SW_INT_MASK_1,
+ NETXEN_NIC_REG_2(0x064),
+ NETXEN_NIC_REG_2(0x068),
+ NETXEN_NIC_REG_2(0x06c),
+ },
},
/* Instance 2 */
{
@@ -474,7 +492,19 @@ static struct netxen_recv_crb recv_crb_registers[] = {
NETXEN_NIC_REG(0x208)
},
/* crb_sts_consumer: */
- NETXEN_NIC_REG(0x220),
+ {
+ NETXEN_NIC_REG(0x220),
+ NETXEN_NIC_REG_2(0x03c),
+ NETXEN_NIC_REG_2(0x03c),
+ NETXEN_NIC_REG_2(0x03c),
+ },
+ /* sw_int_mask */
+ {
+ CRB_SW_INT_MASK_2,
+ NETXEN_NIC_REG_2(0x03c),
+ NETXEN_NIC_REG_2(0x03c),
+ NETXEN_NIC_REG_2(0x03c),
+ },
},
/* Instance 3 */
{
@@ -487,7 +517,19 @@ static struct netxen_recv_crb recv_crb_registers[] = {
NETXEN_NIC_REG(0x24c)
},
/* crb_sts_consumer: */
- NETXEN_NIC_REG(0x264),
+ {
+ NETXEN_NIC_REG(0x264),
+ NETXEN_NIC_REG_2(0x03c),
+ NETXEN_NIC_REG_2(0x03c),
+ NETXEN_NIC_REG_2(0x03c),
+ },
+ /* sw_int_mask */
+ {
+ CRB_SW_INT_MASK_3,
+ NETXEN_NIC_REG_2(0x03c),
+ NETXEN_NIC_REG_2(0x03c),
+ NETXEN_NIC_REG_2(0x03c),
+ },
},
};
@@ -497,84 +539,91 @@ netxen_init_old_ctx(struct netxen_adapter *adapter)
struct netxen_recv_context *recv_ctx;
struct nx_host_rds_ring *rds_ring;
struct nx_host_sds_ring *sds_ring;
+ struct nx_host_tx_ring *tx_ring;
int ring;
- int func_id = adapter->portnum;
-
- adapter->ctx_desc->cmd_ring_addr =
- cpu_to_le64(adapter->ahw.cmd_desc_phys_addr);
- adapter->ctx_desc->cmd_ring_size =
- cpu_to_le32(adapter->num_txd);
+ int port = adapter->portnum;
+ struct netxen_ring_ctx *hwctx;
+ u32 signature;
+ tx_ring = adapter->tx_ring;
recv_ctx = &adapter->recv_ctx;
+ hwctx = recv_ctx->hwctx;
+
+ hwctx->cmd_ring_addr = cpu_to_le64(tx_ring->phys_addr);
+ hwctx->cmd_ring_size = cpu_to_le32(tx_ring->num_desc);
+
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
- adapter->ctx_desc->rcv_ctx[ring].rcv_ring_addr =
+ hwctx->rcv_rings[ring].addr =
cpu_to_le64(rds_ring->phys_addr);
- adapter->ctx_desc->rcv_ctx[ring].rcv_ring_size =
+ hwctx->rcv_rings[ring].size =
cpu_to_le32(rds_ring->num_desc);
}
- sds_ring = &recv_ctx->sds_rings[0];
- adapter->ctx_desc->sts_ring_addr = cpu_to_le64(sds_ring->phys_addr);
- adapter->ctx_desc->sts_ring_size = cpu_to_le32(sds_ring->num_desc);
-
- adapter->pci_write_normalize(adapter, CRB_CTX_ADDR_REG_LO(func_id),
- lower32(adapter->ctx_desc_phys_addr));
- adapter->pci_write_normalize(adapter, CRB_CTX_ADDR_REG_HI(func_id),
- upper32(adapter->ctx_desc_phys_addr));
- adapter->pci_write_normalize(adapter, CRB_CTX_SIGNATURE_REG(func_id),
- NETXEN_CTX_SIGNATURE | func_id);
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+
+ if (ring == 0) {
+ hwctx->sts_ring_addr = cpu_to_le64(sds_ring->phys_addr);
+ hwctx->sts_ring_size = cpu_to_le32(sds_ring->num_desc);
+ }
+ hwctx->sts_rings[ring].addr = cpu_to_le64(sds_ring->phys_addr);
+ hwctx->sts_rings[ring].size = cpu_to_le32(sds_ring->num_desc);
+ hwctx->sts_rings[ring].msi_index = cpu_to_le16(ring);
+ }
+ hwctx->sts_ring_count = cpu_to_le32(adapter->max_sds_rings);
+
+ signature = (adapter->max_sds_rings > 1) ?
+ NETXEN_CTX_SIGNATURE_V2 : NETXEN_CTX_SIGNATURE;
+
+ NXWR32(adapter, CRB_CTX_ADDR_REG_LO(port),
+ lower32(recv_ctx->phys_addr));
+ NXWR32(adapter, CRB_CTX_ADDR_REG_HI(port),
+ upper32(recv_ctx->phys_addr));
+ NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
+ signature | port);
return 0;
}
-static uint32_t sw_int_mask[4] = {
- CRB_SW_INT_MASK_0, CRB_SW_INT_MASK_1,
- CRB_SW_INT_MASK_2, CRB_SW_INT_MASK_3
-};
-
int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
{
- struct netxen_hardware_context *hw = &adapter->ahw;
- u32 state = 0;
void *addr;
int err = 0;
int ring;
struct netxen_recv_context *recv_ctx;
struct nx_host_rds_ring *rds_ring;
struct nx_host_sds_ring *sds_ring;
+ struct nx_host_tx_ring *tx_ring;
struct pci_dev *pdev = adapter->pdev;
struct net_device *netdev = adapter->netdev;
+ int port = adapter->portnum;
- err = netxen_receive_peg_ready(adapter);
- if (err) {
- printk(KERN_ERR "Rcv Peg initialization not complete:%x.\n",
- state);
- return err;
- }
+ recv_ctx = &adapter->recv_ctx;
+ tx_ring = adapter->tx_ring;
addr = pci_alloc_consistent(pdev,
sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
- &adapter->ctx_desc_phys_addr);
-
+ &recv_ctx->phys_addr);
if (addr == NULL) {
dev_err(&pdev->dev, "failed to allocate hw context\n");
return -ENOMEM;
}
+
memset(addr, 0, sizeof(struct netxen_ring_ctx));
- adapter->ctx_desc = (struct netxen_ring_ctx *)addr;
- adapter->ctx_desc->ctx_id = cpu_to_le32(adapter->portnum);
- adapter->ctx_desc->cmd_consumer_offset =
- cpu_to_le64(adapter->ctx_desc_phys_addr +
+ recv_ctx->hwctx = (struct netxen_ring_ctx *)addr;
+ recv_ctx->hwctx->ctx_id = cpu_to_le32(port);
+ recv_ctx->hwctx->cmd_consumer_offset =
+ cpu_to_le64(recv_ctx->phys_addr +
sizeof(struct netxen_ring_ctx));
- adapter->cmd_consumer =
+ tx_ring->hw_consumer =
(__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx));
/* cmd desc ring */
- addr = pci_alloc_consistent(pdev,
- TX_DESC_RINGSIZE(adapter),
- &hw->cmd_desc_phys_addr);
+ addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring),
+ &tx_ring->phys_addr);
if (addr == NULL) {
dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n",
@@ -582,9 +631,7 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
return -ENOMEM;
}
- hw->cmd_desc_head = (struct cmd_desc_type0 *)addr;
-
- recv_ctx = &adapter->recv_ctx;
+ tx_ring->desc_head = (struct cmd_desc_type0 *)addr;
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
@@ -602,8 +649,7 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
if (adapter->fw_major < 4)
rds_ring->crb_rcv_producer =
- recv_crb_registers[adapter->portnum].
- crb_rcv_producer[ring];
+ recv_crb_registers[port].crb_rcv_producer[ring];
}
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
@@ -620,13 +666,16 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
goto err_out_free;
}
sds_ring->desc_head = (struct status_desc *)addr;
+
+ sds_ring->crb_sts_consumer =
+ recv_crb_registers[port].crb_sts_consumer[ring];
+
+ sds_ring->crb_intr_mask =
+ recv_crb_registers[port].sw_int_mask[ring];
}
if (adapter->fw_major >= 4) {
- adapter->intr_scheme = INTR_SCHEME_PERPORT;
- adapter->msi_mode = MSI_MODE_MULTIFUNC;
-
err = nx_fw_cmd_create_rx_ctx(adapter);
if (err)
goto err_out_free;
@@ -634,23 +683,11 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
if (err)
goto err_out_free;
} else {
- sds_ring = &recv_ctx->sds_rings[0];
- sds_ring->crb_sts_consumer =
- recv_crb_registers[adapter->portnum].crb_sts_consumer;
-
- adapter->intr_scheme = adapter->pci_read_normalize(adapter,
- CRB_NIC_CAPABILITIES_FW);
- adapter->msi_mode = adapter->pci_read_normalize(adapter,
- CRB_NIC_MSI_MODE_FW);
- recv_ctx->sds_rings[0].crb_intr_mask =
- sw_int_mask[adapter->portnum];
-
err = netxen_init_old_ctx(adapter);
if (err) {
netxen_free_hw_resources(adapter);
return err;
}
-
}
return 0;
@@ -665,32 +702,40 @@ void netxen_free_hw_resources(struct netxen_adapter *adapter)
struct netxen_recv_context *recv_ctx;
struct nx_host_rds_ring *rds_ring;
struct nx_host_sds_ring *sds_ring;
+ struct nx_host_tx_ring *tx_ring;
int ring;
+ int port = adapter->portnum;
+
if (adapter->fw_major >= 4) {
nx_fw_cmd_destroy_tx_ctx(adapter);
nx_fw_cmd_destroy_rx_ctx(adapter);
+ } else {
+ netxen_api_lock(adapter);
+ NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
+ NETXEN_CTX_RESET | port);
+ netxen_api_unlock(adapter);
}
- if (adapter->ctx_desc != NULL) {
+ recv_ctx = &adapter->recv_ctx;
+
+ if (recv_ctx->hwctx != NULL) {
pci_free_consistent(adapter->pdev,
sizeof(struct netxen_ring_ctx) +
sizeof(uint32_t),
- adapter->ctx_desc,
- adapter->ctx_desc_phys_addr);
- adapter->ctx_desc = NULL;
+ recv_ctx->hwctx,
+ recv_ctx->phys_addr);
+ recv_ctx->hwctx = NULL;
}
- if (adapter->ahw.cmd_desc_head != NULL) {
+ tx_ring = adapter->tx_ring;
+ if (tx_ring->desc_head != NULL) {
pci_free_consistent(adapter->pdev,
- sizeof(struct cmd_desc_type0) *
- adapter->num_txd,
- adapter->ahw.cmd_desc_head,
- adapter->ahw.cmd_desc_phys_addr);
- adapter->ahw.cmd_desc_head = NULL;
+ TX_DESC_RINGSIZE(tx_ring),
+ tx_ring->desc_head, tx_ring->phys_addr);
+ tx_ring->desc_head = NULL;
}
- recv_ctx = &adapter->recv_ctx;
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index a677ff89518..e16ea46c24b 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -30,7 +30,6 @@
#include <linux/types.h>
#include <linux/delay.h>
-#include <asm/uaccess.h>
#include <linux/pci.h>
#include <asm/io.h>
#include <linux/netdevice.h>
@@ -53,13 +52,9 @@ struct netxen_nic_stats {
#define NETXEN_NIC_INVALID_DATA 0xDEADBEEF
static const struct netxen_nic_stats netxen_nic_gstrings_stats[] = {
- {"rcvd_bad_skb", NETXEN_NIC_STAT(stats.rcvdbadskb)},
{"xmit_called", NETXEN_NIC_STAT(stats.xmitcalled)},
- {"xmited_frames", NETXEN_NIC_STAT(stats.xmitedframes)},
{"xmit_finished", NETXEN_NIC_STAT(stats.xmitfinished)},
- {"bad_skb_len", NETXEN_NIC_STAT(stats.badskblen)},
- {"no_cmd_desc", NETXEN_NIC_STAT(stats.nocmddescriptor)},
- {"polled", NETXEN_NIC_STAT(stats.polled)},
+ {"rx_dropped", NETXEN_NIC_STAT(stats.rxdropped)},
{"tx_dropped", NETXEN_NIC_STAT(stats.txdropped)},
{"csummed", NETXEN_NIC_STAT(stats.csummed)},
{"no_rcv", NETXEN_NIC_STAT(stats.no_rcv)},
@@ -97,12 +92,9 @@ netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
strncpy(drvinfo->driver, netxen_nic_driver_name, 32);
strncpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID, 32);
write_lock_irqsave(&adapter->adapter_lock, flags);
- fw_major = adapter->pci_read_normalize(adapter,
- NETXEN_FW_VERSION_MAJOR);
- fw_minor = adapter->pci_read_normalize(adapter,
- NETXEN_FW_VERSION_MINOR);
- fw_build = adapter->pci_read_normalize(adapter,
- NETXEN_FW_VERSION_SUB);
+ fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
+ fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
+ fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
write_unlock_irqrestore(&adapter->adapter_lock, flags);
sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
@@ -115,6 +107,7 @@ static int
netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
struct netxen_adapter *adapter = netdev_priv(dev);
+ int check_sfp_module = 0;
/* read which mode */
if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
@@ -139,7 +132,7 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
} else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
u32 val;
- adapter->hw_read_wx(adapter, NETXEN_PORT_MODE_ADDR, &val, 4);
+ val = NXRD32(adapter, NETXEN_PORT_MODE_ADDR);
if (val == NETXEN_PORT_MODE_802_3_AP) {
ecmd->supported = SUPPORTED_1000baseT_Full;
ecmd->advertising = ADVERTISED_1000baseT_Full;
@@ -148,13 +141,19 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
ecmd->advertising = ADVERTISED_10000baseT_Full;
}
+ if (netif_running(dev) && adapter->has_link_events) {
+ ecmd->speed = adapter->link_speed;
+ ecmd->autoneg = adapter->link_autoneg;
+ ecmd->duplex = adapter->link_duplex;
+ goto skip;
+ }
+
ecmd->port = PORT_TP;
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
u16 pcifn = adapter->ahw.pci_func;
- adapter->hw_read_wx(adapter,
- P3_LINK_SPEED_REG(pcifn), &val, 4);
+ val = NXRD32(adapter, P3_LINK_SPEED_REG(pcifn));
ecmd->speed = P3_LINK_SPEED_MHZ *
P3_LINK_SPEED_VAL(pcifn, val);
} else
@@ -165,10 +164,11 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
} else
return -EIO;
+skip:
ecmd->phy_address = adapter->physical_port;
ecmd->transceiver = XCVR_EXTERNAL;
- switch ((netxen_brdtype_t)adapter->ahw.board_type) {
+ switch (adapter->ahw.board_type) {
case NETXEN_BRDTYPE_P2_SB35_4G:
case NETXEN_BRDTYPE_P2_SB31_2G:
case NETXEN_BRDTYPE_P3_REF_QG:
@@ -195,7 +195,7 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
case NETXEN_BRDTYPE_P3_HMEZ:
ecmd->supported |= SUPPORTED_MII;
ecmd->advertising |= ADVERTISED_MII;
- ecmd->port = PORT_FIBRE;
+ ecmd->port = PORT_MII;
ecmd->autoneg = AUTONEG_DISABLE;
break;
case NETXEN_BRDTYPE_P3_10G_SFP_PLUS:
@@ -203,6 +203,8 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
case NETXEN_BRDTYPE_P3_10G_SFP_QT:
ecmd->advertising |= ADVERTISED_TP;
ecmd->supported |= SUPPORTED_TP;
+ check_sfp_module = netif_running(dev) &&
+ adapter->has_link_events;
case NETXEN_BRDTYPE_P2_SB31_10G:
case NETXEN_BRDTYPE_P3_10G_XFP:
ecmd->supported |= SUPPORTED_FIBRE;
@@ -217,6 +219,8 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
ecmd->advertising |=
(ADVERTISED_FIBRE | ADVERTISED_TP);
ecmd->port = PORT_FIBRE;
+ check_sfp_module = netif_running(dev) &&
+ adapter->has_link_events;
} else {
ecmd->autoneg = AUTONEG_ENABLE;
ecmd->supported |= (SUPPORTED_TP |SUPPORTED_Autoneg);
@@ -227,10 +231,28 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
break;
default:
printk(KERN_ERR "netxen-nic: Unsupported board model %d\n",
- (netxen_brdtype_t)adapter->ahw.board_type);
+ adapter->ahw.board_type);
return -EIO;
}
+ if (check_sfp_module) {
+ switch (adapter->module_type) {
+ case LINKEVENT_MODULE_OPTICAL_UNKNOWN:
+ case LINKEVENT_MODULE_OPTICAL_SRLR:
+ case LINKEVENT_MODULE_OPTICAL_LRM:
+ case LINKEVENT_MODULE_OPTICAL_SFP_1G:
+ ecmd->port = PORT_FIBRE;
+ break;
+ case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE:
+ case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN:
+ case LINKEVENT_MODULE_TWINAX:
+ ecmd->port = PORT_TP;
+ break;
+ default:
+ ecmd->port = -1;
+ }
+ }
+
return 0;
}
@@ -398,12 +420,11 @@ netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) |
(adapter->pdev)->device;
/* which mode */
- adapter->hw_read_wx(adapter, NETXEN_NIU_MODE, &regs_buff[0], 4);
+ regs_buff[0] = NXRD32(adapter, NETXEN_NIU_MODE);
mode = regs_buff[0];
/* Common registers to all the modes */
- adapter->hw_read_wx(adapter,
- NETXEN_NIU_STRAP_VALUE_SAVE_HIGHER, &regs_buff[2], 4);
+ regs_buff[2] = NXRD32(adapter, NETXEN_NIU_STRAP_VALUE_SAVE_HIGHER);
/* GB/XGB Mode */
mode = (mode / 2) - 1;
window = 0;
@@ -414,9 +435,8 @@ netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
window = adapter->physical_port *
NETXEN_NIC_PORT_WINDOW;
- adapter->hw_read_wx(adapter,
- niu_registers[mode].reg[i - 3] + window,
- &regs_buff[i], 4);
+ regs_buff[i] = NXRD32(adapter,
+ niu_registers[mode].reg[i - 3] + window);
}
}
@@ -440,7 +460,7 @@ static u32 netxen_nic_test_link(struct net_device *dev)
return !val;
}
} else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
- val = adapter->pci_read_normalize(adapter, CRB_XG_STATE);
+ val = NXRD32(adapter, CRB_XG_STATE);
return (val == XG_LINK_UP) ? 0 : 1;
}
return -EIO;
@@ -504,10 +524,9 @@ netxen_nic_get_pauseparam(struct net_device *dev,
if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS))
return;
/* get flow control settings */
- netxen_nic_read_w0(adapter,NETXEN_NIU_GB_MAC_CONFIG_0(port),
- &val);
+ val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port));
pause->rx_pause = netxen_gb_get_rx_flowctl(val);
- netxen_nic_read_w0(adapter, NETXEN_NIU_GB_PAUSE_CTL, &val);
+ val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL);
switch (port) {
case 0:
pause->tx_pause = !(netxen_gb_get_gb0_mask(val));
@@ -527,7 +546,7 @@ netxen_nic_get_pauseparam(struct net_device *dev,
if ((port < 0) || (port > NETXEN_NIU_MAX_XG_PORTS))
return;
pause->rx_pause = 1;
- netxen_nic_read_w0(adapter, NETXEN_NIU_XG_PAUSE_CTL, &val);
+ val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL);
if (port == 0)
pause->tx_pause = !(netxen_xg_get_xg0_mask(val));
else
@@ -550,18 +569,17 @@ netxen_nic_set_pauseparam(struct net_device *dev,
if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS))
return -EIO;
/* set flow control */
- netxen_nic_read_w0(adapter,
- NETXEN_NIU_GB_MAC_CONFIG_0(port), &val);
+ val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port));
if (pause->rx_pause)
netxen_gb_rx_flowctl(val);
else
netxen_gb_unset_rx_flowctl(val);
- netxen_nic_write_w0(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
+ NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
val);
/* set autoneg */
- netxen_nic_read_w0(adapter, NETXEN_NIU_GB_PAUSE_CTL, &val);
+ val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL);
switch (port) {
case 0:
if (pause->tx_pause)
@@ -589,11 +607,11 @@ netxen_nic_set_pauseparam(struct net_device *dev,
netxen_gb_set_gb3_mask(val);
break;
}
- netxen_nic_write_w0(adapter, NETXEN_NIU_GB_PAUSE_CTL, val);
+ NXWR32(adapter, NETXEN_NIU_GB_PAUSE_CTL, val);
} else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
if ((port < 0) || (port > NETXEN_NIU_MAX_XG_PORTS))
return -EIO;
- netxen_nic_read_w0(adapter, NETXEN_NIU_XG_PAUSE_CTL, &val);
+ val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL);
if (port == 0) {
if (pause->tx_pause)
netxen_xg_unset_xg0_mask(val);
@@ -605,7 +623,7 @@ netxen_nic_set_pauseparam(struct net_device *dev,
else
netxen_xg_set_xg1_mask(val);
}
- netxen_nic_write_w0(adapter, NETXEN_NIU_XG_PAUSE_CTL, val);
+ NXWR32(adapter, NETXEN_NIU_XG_PAUSE_CTL, val);
} else {
printk(KERN_ERR "%s: Unknown board type: %x\n",
netxen_nic_driver_name,
@@ -619,14 +637,14 @@ static int netxen_nic_reg_test(struct net_device *dev)
struct netxen_adapter *adapter = netdev_priv(dev);
u32 data_read, data_written;
- netxen_nic_read_w0(adapter, NETXEN_PCIX_PH_REG(0), &data_read);
+ data_read = NXRD32(adapter, NETXEN_PCIX_PH_REG(0));
if ((data_read & 0xffff) != PHAN_VENDOR_ID)
return 1;
data_written = (u32)0xa5a5a5a5;
- netxen_nic_reg_write(adapter, CRB_SCRATCHPAD_TEST, data_written);
- data_read = adapter->pci_read_normalize(adapter, CRB_SCRATCHPAD_TEST);
+ NXWR32(adapter, CRB_SCRATCHPAD_TEST, data_written);
+ data_read = NXRD32(adapter, CRB_SCRATCHPAD_TEST);
if (data_written != data_read)
return 1;
@@ -743,11 +761,11 @@ netxen_nic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
return;
- wol_cfg = netxen_nic_reg_read(adapter, NETXEN_WOL_CONFIG_NV);
+ wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV);
if (wol_cfg & (1UL << adapter->portnum))
wol->supported |= WAKE_MAGIC;
- wol_cfg = netxen_nic_reg_read(adapter, NETXEN_WOL_CONFIG);
+ wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG);
if (wol_cfg & (1UL << adapter->portnum))
wol->wolopts |= WAKE_MAGIC;
}
@@ -764,16 +782,16 @@ netxen_nic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
if (wol->wolopts & ~WAKE_MAGIC)
return -EOPNOTSUPP;
- wol_cfg = netxen_nic_reg_read(adapter, NETXEN_WOL_CONFIG_NV);
+ wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV);
if (!(wol_cfg & (1 << adapter->portnum)))
return -EOPNOTSUPP;
- wol_cfg = netxen_nic_reg_read(adapter, NETXEN_WOL_CONFIG);
+ wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG);
if (wol->wolopts & WAKE_MAGIC)
wol_cfg |= 1UL << adapter->portnum;
else
wol_cfg &= ~(1UL << adapter->portnum);
- netxen_nic_reg_write(adapter, NETXEN_WOL_CONFIG, wol_cfg);
+ NXWR32(adapter, NETXEN_WOL_CONFIG, wol_cfg);
return 0;
}
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index 016c62129c7..7f0ddbfa7b2 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -31,16 +31,8 @@
#ifndef __NETXEN_NIC_HDR_H_
#define __NETXEN_NIC_HDR_H_
-#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <asm/irq.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
#include <linux/types.h>
-#include <asm/uaccess.h>
-#include <asm/string.h> /* for memset */
/*
* The basic unit of access when reading/writing control registers.
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 5026811c04c..42ffb825ebf 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -32,7 +32,6 @@
#include "netxen_nic_hw.h"
#include "netxen_nic_phan_reg.h"
-#include <linux/firmware.h>
#include <net/ip.h>
#define MASK(n) ((1ULL<<(n))-1)
@@ -48,8 +47,49 @@
#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
#define CRB_INDIRECT_2M (0x1e0000UL)
+#ifndef readq
+static inline u64 readq(void __iomem *addr)
+{
+ return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
+}
+#endif
+
+#ifndef writeq
+static inline void writeq(u64 val, void __iomem *addr)
+{
+ writel(((u32) (val)), (addr));
+ writel(((u32) (val >> 32)), (addr + 4));
+}
+#endif
+
+#define ADDR_IN_RANGE(addr, low, high) \
+ (((addr) < (high)) && ((addr) >= (low)))
+
+#define PCI_OFFSET_FIRST_RANGE(adapter, off) \
+ ((adapter)->ahw.pci_base0 + (off))
+#define PCI_OFFSET_SECOND_RANGE(adapter, off) \
+ ((adapter)->ahw.pci_base1 + (off) - SECOND_PAGE_GROUP_START)
+#define PCI_OFFSET_THIRD_RANGE(adapter, off) \
+ ((adapter)->ahw.pci_base2 + (off) - THIRD_PAGE_GROUP_START)
+
+static void __iomem *pci_base_offset(struct netxen_adapter *adapter,
+ unsigned long off)
+{
+ if (ADDR_IN_RANGE(off, FIRST_PAGE_GROUP_START, FIRST_PAGE_GROUP_END))
+ return PCI_OFFSET_FIRST_RANGE(adapter, off);
+
+ if (ADDR_IN_RANGE(off, SECOND_PAGE_GROUP_START, SECOND_PAGE_GROUP_END))
+ return PCI_OFFSET_SECOND_RANGE(adapter, off);
+
+ if (ADDR_IN_RANGE(off, THIRD_PAGE_GROUP_START, THIRD_PAGE_GROUP_END))
+ return PCI_OFFSET_THIRD_RANGE(adapter, off);
+
+ return NULL;
+}
+
#define CRB_WIN_LOCK_TIMEOUT 100000000
-static crb_128M_2M_block_map_t crb_128M_2M_map[64] = {
+static crb_128M_2M_block_map_t
+crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
{{{0, 0, 0, 0} } }, /* 0: PCI */
{{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */
{1, 0x0110000, 0x0120000, 0x130000},
@@ -279,39 +319,8 @@ static unsigned crb_hub_agt[64] =
/* PCI Windowing for DDR regions. */
-#define ADDR_IN_RANGE(addr, low, high) \
- (((addr) <= (high)) && ((addr) >= (low)))
-
#define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */
-#define NETXEN_NIC_ZERO_PAUSE_ADDR 0ULL
-#define NETXEN_NIC_UNIT_PAUSE_ADDR 0x200ULL
-#define NETXEN_NIC_EPG_PAUSE_ADDR1 0x2200010000c28001ULL
-#define NETXEN_NIC_EPG_PAUSE_ADDR2 0x0100088866554433ULL
-
-#define NETXEN_NIC_WINDOW_MARGIN 0x100000
-
-int netxen_nic_set_mac(struct net_device *netdev, void *p)
-{
- struct netxen_adapter *adapter = netdev_priv(netdev);
- struct sockaddr *addr = p;
-
- if (netif_running(netdev))
- return -EBUSY;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
-
- /* For P3, MAC addr is not set in NIU */
- if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
- if (adapter->macaddr_set)
- adapter->macaddr_set(adapter, addr->sa_data);
-
- return 0;
-}
-
#define NETXEN_UNICAST_ADDR(port, index) \
(NETXEN_UNICAST_ADDR_BASE+(port*32)+(index*8))
#define NETXEN_MCAST_ADDR(port, index) \
@@ -331,22 +340,20 @@ netxen_nic_enable_mcast_filter(struct netxen_adapter *adapter)
if (adapter->mc_enabled)
return 0;
- adapter->hw_read_wx(adapter, NETXEN_MAC_ADDR_CNTL_REG, &val, 4);
+ val = NXRD32(adapter, NETXEN_MAC_ADDR_CNTL_REG);
val |= (1UL << (28+port));
- adapter->hw_write_wx(adapter, NETXEN_MAC_ADDR_CNTL_REG, &val, 4);
+ NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val);
/* add broadcast addr to filter */
val = 0xffffff;
- netxen_crb_writelit_adapter(adapter, NETXEN_UNICAST_ADDR(port, 0), val);
- netxen_crb_writelit_adapter(adapter,
- NETXEN_UNICAST_ADDR(port, 0)+4, val);
+ NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0), val);
+ NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0)+4, val);
/* add station addr to filter */
val = MAC_HI(addr);
- netxen_crb_writelit_adapter(adapter, NETXEN_UNICAST_ADDR(port, 1), val);
+ NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1), val);
val = MAC_LO(addr);
- netxen_crb_writelit_adapter(adapter,
- NETXEN_UNICAST_ADDR(port, 1)+4, val);
+ NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1)+4, val);
adapter->mc_enabled = 1;
return 0;
@@ -362,18 +369,17 @@ netxen_nic_disable_mcast_filter(struct netxen_adapter *adapter)
if (!adapter->mc_enabled)
return 0;
- adapter->hw_read_wx(adapter, NETXEN_MAC_ADDR_CNTL_REG, &val, 4);
+ val = NXRD32(adapter, NETXEN_MAC_ADDR_CNTL_REG);
val &= ~(1UL << (28+port));
- adapter->hw_write_wx(adapter, NETXEN_MAC_ADDR_CNTL_REG, &val, 4);
+ NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val);
val = MAC_HI(addr);
- netxen_crb_writelit_adapter(adapter, NETXEN_UNICAST_ADDR(port, 0), val);
+ NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0), val);
val = MAC_LO(addr);
- netxen_crb_writelit_adapter(adapter,
- NETXEN_UNICAST_ADDR(port, 0)+4, val);
+ NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0)+4, val);
- netxen_crb_writelit_adapter(adapter, NETXEN_UNICAST_ADDR(port, 1), 0);
- netxen_crb_writelit_adapter(adapter, NETXEN_UNICAST_ADDR(port, 1)+4, 0);
+ NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1), 0);
+ NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1)+4, 0);
adapter->mc_enabled = 0;
return 0;
@@ -389,10 +395,8 @@ netxen_nic_set_mcast_addr(struct netxen_adapter *adapter,
lo = MAC_LO(addr);
hi = MAC_HI(addr);
- netxen_crb_writelit_adapter(adapter,
- NETXEN_MCAST_ADDR(port, index), hi);
- netxen_crb_writelit_adapter(adapter,
- NETXEN_MCAST_ADDR(port, index)+4, lo);
+ NXWR32(adapter, NETXEN_MCAST_ADDR(port, index), hi);
+ NXWR32(adapter, NETXEN_MCAST_ADDR(port, index)+4, lo);
return 0;
}
@@ -445,100 +449,58 @@ void netxen_p2_nic_set_multi(struct net_device *netdev)
netxen_nic_set_mcast_addr(adapter, index, null_addr);
}
-static int nx_p3_nic_add_mac(struct netxen_adapter *adapter,
- u8 *addr, nx_mac_list_t **add_list, nx_mac_list_t **del_list)
-{
- nx_mac_list_t *cur, *prev;
-
- /* if in del_list, move it to adapter->mac_list */
- for (cur = *del_list, prev = NULL; cur;) {
- if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) {
- if (prev == NULL)
- *del_list = cur->next;
- else
- prev->next = cur->next;
- cur->next = adapter->mac_list;
- adapter->mac_list = cur;
- return 0;
- }
- prev = cur;
- cur = cur->next;
- }
-
- /* make sure to add each mac address only once */
- for (cur = adapter->mac_list; cur; cur = cur->next) {
- if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0)
- return 0;
- }
- /* not in del_list, create new entry and add to add_list */
- cur = kmalloc(sizeof(*cur), in_atomic()? GFP_ATOMIC : GFP_KERNEL);
- if (cur == NULL) {
- printk(KERN_ERR "%s: cannot allocate memory. MAC filtering may"
- "not work properly from now.\n", __func__);
- return -1;
- }
-
- memcpy(cur->mac_addr, addr, ETH_ALEN);
- cur->next = *add_list;
- *add_list = cur;
- return 0;
-}
-
static int
netxen_send_cmd_descs(struct netxen_adapter *adapter,
- struct cmd_desc_type0 *cmd_desc_arr, int nr_elements)
+ struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
{
- uint32_t i, producer;
+ u32 i, producer, consumer;
struct netxen_cmd_buffer *pbuf;
struct cmd_desc_type0 *cmd_desc;
-
- if (nr_elements > MAX_PENDING_DESC_BLOCK_SIZE || nr_elements == 0) {
- printk(KERN_WARNING "%s: Too many command descriptors in a "
- "request\n", __func__);
- return -EINVAL;
- }
+ struct nx_host_tx_ring *tx_ring;
i = 0;
+ tx_ring = adapter->tx_ring;
netif_tx_lock_bh(adapter->netdev);
- producer = adapter->cmd_producer;
+ producer = tx_ring->producer;
+ consumer = tx_ring->sw_consumer;
+
+ if (nr_desc >= find_diff_among(producer, consumer, tx_ring->num_desc)) {
+ netif_tx_unlock_bh(adapter->netdev);
+ return -EBUSY;
+ }
+
do {
cmd_desc = &cmd_desc_arr[i];
- pbuf = &adapter->cmd_buf_arr[producer];
+ pbuf = &tx_ring->cmd_buf_arr[producer];
pbuf->skb = NULL;
pbuf->frag_count = 0;
- /* adapter->ahw.cmd_desc_head[producer] = *cmd_desc; */
- memcpy(&adapter->ahw.cmd_desc_head[producer],
+ memcpy(&tx_ring->desc_head[producer],
&cmd_desc_arr[i], sizeof(struct cmd_desc_type0));
- producer = get_next_index(producer,
- adapter->num_txd);
+ producer = get_next_index(producer, tx_ring->num_desc);
i++;
- } while (i != nr_elements);
-
- adapter->cmd_producer = producer;
+ } while (i != nr_desc);
- /* write producer index to start the xmit */
+ tx_ring->producer = producer;
- netxen_nic_update_cmd_producer(adapter, adapter->cmd_producer);
+ netxen_nic_update_cmd_producer(adapter, tx_ring, producer);
netif_tx_unlock_bh(adapter->netdev);
return 0;
}
-static int nx_p3_sre_macaddr_change(struct net_device *dev,
- u8 *addr, unsigned op)
+static int
+nx_p3_sre_macaddr_change(struct netxen_adapter *adapter, u8 *addr, unsigned op)
{
- struct netxen_adapter *adapter = netdev_priv(dev);
nx_nic_req_t req;
nx_mac_req_t *mac_req;
u64 word;
- int rv;
memset(&req, 0, sizeof(nx_nic_req_t));
req.qhdr = cpu_to_le64(NX_NIC_REQUEST << 23);
@@ -550,28 +512,51 @@ static int nx_p3_sre_macaddr_change(struct net_device *dev,
mac_req->op = op;
memcpy(mac_req->mac_addr, addr, 6);
- rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
- if (rv != 0) {
- printk(KERN_ERR "ERROR. Could not send mac update\n");
- return rv;
+ return netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+}
+
+static int nx_p3_nic_add_mac(struct netxen_adapter *adapter,
+ u8 *addr, struct list_head *del_list)
+{
+ struct list_head *head;
+ nx_mac_list_t *cur;
+
+ /* look up if already exists */
+ list_for_each(head, del_list) {
+ cur = list_entry(head, nx_mac_list_t, list);
+
+ if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) {
+ list_move_tail(head, &adapter->mac_list);
+ return 0;
+ }
}
- return 0;
+ cur = kzalloc(sizeof(nx_mac_list_t), GFP_ATOMIC);
+ if (cur == NULL) {
+ printk(KERN_ERR "%s: failed to add mac address filter\n",
+ adapter->netdev->name);
+ return -ENOMEM;
+ }
+ memcpy(cur->mac_addr, addr, ETH_ALEN);
+ list_add_tail(&cur->list, &adapter->mac_list);
+ return nx_p3_sre_macaddr_change(adapter,
+ cur->mac_addr, NETXEN_MAC_ADD);
}
void netxen_p3_nic_set_multi(struct net_device *netdev)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
- nx_mac_list_t *cur, *next, *del_list, *add_list = NULL;
struct dev_mc_list *mc_ptr;
u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
u32 mode = VPORT_MISS_MODE_DROP;
+ LIST_HEAD(del_list);
+ struct list_head *head;
+ nx_mac_list_t *cur;
- del_list = adapter->mac_list;
- adapter->mac_list = NULL;
+ list_splice_tail_init(&adapter->mac_list, &del_list);
- nx_p3_nic_add_mac(adapter, netdev->dev_addr, &add_list, &del_list);
- nx_p3_nic_add_mac(adapter, bcast_addr, &add_list, &del_list);
+ nx_p3_nic_add_mac(adapter, netdev->dev_addr, &del_list);
+ nx_p3_nic_add_mac(adapter, bcast_addr, &del_list);
if (netdev->flags & IFF_PROMISC) {
mode = VPORT_MISS_MODE_ACCEPT_ALL;
@@ -587,25 +572,20 @@ void netxen_p3_nic_set_multi(struct net_device *netdev)
if (netdev->mc_count > 0) {
for (mc_ptr = netdev->mc_list; mc_ptr;
mc_ptr = mc_ptr->next) {
- nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr,
- &add_list, &del_list);
+ nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr, &del_list);
}
}
send_fw_cmd:
adapter->set_promisc(adapter, mode);
- for (cur = del_list; cur;) {
- nx_p3_sre_macaddr_change(netdev, cur->mac_addr, NETXEN_MAC_DEL);
- next = cur->next;
+ head = &del_list;
+ while (!list_empty(head)) {
+ cur = list_entry(head->next, nx_mac_list_t, list);
+
+ nx_p3_sre_macaddr_change(adapter,
+ cur->mac_addr, NETXEN_MAC_DEL);
+ list_del(&cur->list);
kfree(cur);
- cur = next;
- }
- for (cur = add_list; cur;) {
- nx_p3_sre_macaddr_change(netdev, cur->mac_addr, NETXEN_MAC_ADD);
- next = cur->next;
- cur->next = adapter->mac_list;
- adapter->mac_list = cur;
- cur = next;
}
}
@@ -630,17 +610,25 @@ int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32 mode)
void netxen_p3_free_mac_list(struct netxen_adapter *adapter)
{
- nx_mac_list_t *cur, *next;
-
- cur = adapter->mac_list;
-
- while (cur) {
- next = cur->next;
+ nx_mac_list_t *cur;
+ struct list_head *head = &adapter->mac_list;
+
+ while (!list_empty(head)) {
+ cur = list_entry(head->next, nx_mac_list_t, list);
+ nx_p3_sre_macaddr_change(adapter,
+ cur->mac_addr, NETXEN_MAC_DEL);
+ list_del(&cur->list);
kfree(cur);
- cur = next;
}
}
+int netxen_p3_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr)
+{
+ /* assuming caller has already copied new addr to netdev */
+ netxen_p3_nic_set_multi(adapter->netdev);
+ return 0;
+}
+
#define NETXEN_CONFIG_INTR_COALESCE 3
/*
@@ -717,6 +705,28 @@ int netxen_config_rss(struct netxen_adapter *adapter, int enable)
return rv;
}
+int netxen_linkevent_request(struct netxen_adapter *adapter, int enable)
+{
+ nx_nic_req_t req;
+ u64 word;
+ int rv;
+
+ memset(&req, 0, sizeof(nx_nic_req_t));
+ req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
+
+ word = NX_NIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+ req.words[0] = cpu_to_le64(enable | (enable << 8));
+
+ rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0) {
+ printk(KERN_ERR "%s: could not configure link notification\n",
+ adapter->netdev->name);
+ }
+
+ return rv;
+}
+
/*
* netxen_nic_change_mtu - Change the Maximum Transfer Unit
* @returns 0 on success, negative on failure
@@ -812,8 +822,8 @@ int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac)
crbaddr = CRB_MAC_BLOCK_START +
(4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1));
- adapter->hw_read_wx(adapter, crbaddr, &mac_lo, 4);
- adapter->hw_read_wx(adapter, crbaddr+4, &mac_hi, 4);
+ mac_lo = NXRD32(adapter, crbaddr);
+ mac_hi = NXRD32(adapter, crbaddr+4);
if (pci_func & 1)
*mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16));
@@ -831,8 +841,7 @@ static int crb_win_lock(struct netxen_adapter *adapter)
while (!done) {
/* acquire semaphore3 from PCI HW block */
- adapter->hw_read_wx(adapter,
- NETXEN_PCIE_REG(PCIE_SEM7_LOCK), &done, 4);
+ done = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM7_LOCK));
if (done == 1)
break;
if (timeout >= CRB_WIN_LOCK_TIMEOUT)
@@ -840,8 +849,7 @@ static int crb_win_lock(struct netxen_adapter *adapter)
timeout++;
udelay(1);
}
- netxen_crb_writelit_adapter(adapter,
- NETXEN_CRB_WIN_LOCK_ID, adapter->portnum);
+ NXWR32(adapter, NETXEN_CRB_WIN_LOCK_ID, adapter->portnum);
return 0;
}
@@ -849,8 +857,7 @@ static void crb_win_unlock(struct netxen_adapter *adapter)
{
int val;
- adapter->hw_read_wx(adapter,
- NETXEN_PCIE_REG(PCIE_SEM7_UNLOCK), &val, 4);
+ val = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM7_UNLOCK));
}
/*
@@ -907,17 +914,15 @@ netxen_nic_pci_change_crbwindow_128M(struct netxen_adapter *adapter, u32 wndw)
* In: 'off' is offset from base in 128M pci map
*/
static int
-netxen_nic_pci_get_crb_addr_2M(struct netxen_adapter *adapter,
- ulong *off, int len)
+netxen_nic_pci_get_crb_addr_2M(struct netxen_adapter *adapter, ulong *off)
{
- unsigned long end = *off + len;
crb_128M_2M_sub_block_map_t *m;
if (*off >= NETXEN_CRB_MAX)
return -1;
- if (*off >= NETXEN_PCI_CAMQM && (end <= NETXEN_PCI_CAMQM_2M_END)) {
+ if (*off >= NETXEN_PCI_CAMQM && (*off < NETXEN_PCI_CAMQM_2M_END)) {
*off = (*off - NETXEN_PCI_CAMQM) + NETXEN_PCI_CAMQM_2M_BASE +
(ulong)adapter->ahw.pci_base0;
return 0;
@@ -927,14 +932,13 @@ netxen_nic_pci_get_crb_addr_2M(struct netxen_adapter *adapter,
return -1;
*off -= NETXEN_PCI_CRBSPACE;
- end = *off + len;
/*
* Try direct map
*/
m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)];
- if (m->valid && (m->start_128M <= *off) && (m->end_128M >= end)) {
+ if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) {
*off = *off + m->start_2M - m->start_128M +
(ulong)adapter->ahw.pci_base0;
return 0;
@@ -972,214 +976,11 @@ netxen_nic_pci_set_crbwindow_2M(struct netxen_adapter *adapter, ulong *off)
(ulong)adapter->ahw.pci_base0;
}
-static int
-netxen_do_load_firmware(struct netxen_adapter *adapter, const char *fwname,
- const struct firmware *fw)
-{
- u64 *ptr64;
- u32 i, flashaddr, size;
- struct pci_dev *pdev = adapter->pdev;
-
- if (fw)
- dev_info(&pdev->dev, "loading firmware from file %s\n", fwname);
- else
- dev_info(&pdev->dev, "loading firmware from flash\n");
-
- if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
- adapter->pci_write_normalize(adapter,
- NETXEN_ROMUSB_GLB_CAS_RST, 1);
-
- if (fw) {
- __le64 data;
-
- size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8;
-
- ptr64 = (u64 *)&fw->data[NETXEN_BOOTLD_START];
- flashaddr = NETXEN_BOOTLD_START;
-
- for (i = 0; i < size; i++) {
- data = cpu_to_le64(ptr64[i]);
- adapter->pci_mem_write(adapter, flashaddr, &data, 8);
- flashaddr += 8;
- }
-
- size = *(u32 *)&fw->data[NX_FW_SIZE_OFFSET];
- size = (__force u32)cpu_to_le32(size) / 8;
-
- ptr64 = (u64 *)&fw->data[NETXEN_IMAGE_START];
- flashaddr = NETXEN_IMAGE_START;
-
- for (i = 0; i < size; i++) {
- data = cpu_to_le64(ptr64[i]);
-
- if (adapter->pci_mem_write(adapter,
- flashaddr, &data, 8))
- return -EIO;
-
- flashaddr += 8;
- }
- } else {
- u32 data;
-
- size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 4;
- flashaddr = NETXEN_BOOTLD_START;
-
- for (i = 0; i < size; i++) {
- if (netxen_rom_fast_read(adapter,
- flashaddr, (int *)&data) != 0)
- return -EIO;
-
- if (adapter->pci_mem_write(adapter,
- flashaddr, &data, 4))
- return -EIO;
-
- flashaddr += 4;
- }
- }
- msleep(1);
-
- if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
- adapter->pci_write_normalize(adapter,
- NETXEN_ROMUSB_GLB_SW_RESET, 0x80001d);
- else {
- adapter->pci_write_normalize(adapter,
- NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL, 0x3fff);
- adapter->pci_write_normalize(adapter,
- NETXEN_ROMUSB_GLB_CAS_RST, 0);
- }
-
- return 0;
-}
-
-static int
-netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname,
- const struct firmware *fw)
-{
- __le32 val;
- u32 major, minor, build, ver, min_ver, bios;
- struct pci_dev *pdev = adapter->pdev;
-
- if (fw->size < NX_FW_MIN_SIZE)
- return -EINVAL;
-
- val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]);
- if ((__force u32)val != NETXEN_BDINFO_MAGIC)
- return -EINVAL;
-
- val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]);
- major = (__force u32)val & 0xff;
- minor = ((__force u32)val >> 8) & 0xff;
- build = (__force u32)val >> 16;
-
- if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
- min_ver = NETXEN_VERSION_CODE(4, 0, 216);
- else
- min_ver = NETXEN_VERSION_CODE(3, 4, 216);
-
- ver = NETXEN_VERSION_CODE(major, minor, build);
-
- if ((major > _NETXEN_NIC_LINUX_MAJOR) || (ver < min_ver)) {
- dev_err(&pdev->dev,
- "%s: firmware version %d.%d.%d unsupported\n",
- fwname, major, minor, build);
- return -EINVAL;
- }
-
- val = cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]);
- netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios);
- if ((__force u32)val != bios) {
- dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
- fwname);
- return -EINVAL;
- }
-
- /* check if flashed firmware is newer */
- if (netxen_rom_fast_read(adapter,
- NX_FW_VERSION_OFFSET, (int *)&val))
- return -EIO;
- major = (__force u32)val & 0xff;
- minor = ((__force u32)val >> 8) & 0xff;
- build = (__force u32)val >> 16;
- if (NETXEN_VERSION_CODE(major, minor, build) > ver)
- return -EINVAL;
-
- netxen_nic_reg_write(adapter, NETXEN_CAM_RAM(0x1fc),
- NETXEN_BDINFO_MAGIC);
- return 0;
-}
-
-static char *fw_name[] = { "nxromimg.bin", "nx3fwct.bin", "nx3fwmn.bin" };
-
-int netxen_load_firmware(struct netxen_adapter *adapter)
-{
- u32 capability, flashed_ver;
- const struct firmware *fw;
- int fw_type;
- struct pci_dev *pdev = adapter->pdev;
- int rc = 0;
-
- if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
- fw_type = NX_P2_MN_ROMIMAGE;
- goto request_fw;
- } else {
- fw_type = NX_P3_CT_ROMIMAGE;
- goto request_fw;
- }
-
-request_mn:
- capability = 0;
-
- netxen_rom_fast_read(adapter,
- NX_FW_VERSION_OFFSET, (int *)&flashed_ver);
- if (flashed_ver >= NETXEN_VERSION_CODE(4, 0, 220)) {
- adapter->hw_read_wx(adapter,
- NX_PEG_TUNE_CAPABILITY, &capability, 4);
- if (capability & NX_PEG_TUNE_MN_PRESENT) {
- fw_type = NX_P3_MN_ROMIMAGE;
- goto request_fw;
- }
- }
-
-request_fw:
- rc = request_firmware(&fw, fw_name[fw_type], &pdev->dev);
- if (rc != 0) {
- if (fw_type == NX_P3_CT_ROMIMAGE) {
- msleep(1);
- goto request_mn;
- }
-
- fw = NULL;
- goto load_fw;
- }
-
- rc = netxen_validate_firmware(adapter, fw_name[fw_type], fw);
- if (rc != 0) {
- release_firmware(fw);
-
- if (fw_type == NX_P3_CT_ROMIMAGE) {
- msleep(1);
- goto request_mn;
- }
-
- fw = NULL;
- }
-
-load_fw:
- rc = netxen_do_load_firmware(adapter, fw_name[fw_type], fw);
-
- if (fw)
- release_firmware(fw);
- return rc;
-}
-
int
-netxen_nic_hw_write_wx_128M(struct netxen_adapter *adapter,
- ulong off, void *data, int len)
+netxen_nic_hw_write_wx_128M(struct netxen_adapter *adapter, ulong off, u32 data)
{
void __iomem *addr;
- BUG_ON(len != 4);
-
if (ADDR_IN_WINDOW1(off)) {
addr = NETXEN_CRB_NORMALIZE(adapter, off);
} else { /* Window 0 */
@@ -1192,7 +993,7 @@ netxen_nic_hw_write_wx_128M(struct netxen_adapter *adapter,
return 1;
}
- writel(*(u32 *) data, addr);
+ writel(data, addr);
if (!ADDR_IN_WINDOW1(off))
netxen_nic_pci_change_crbwindow_128M(adapter, 1);
@@ -1200,13 +1001,11 @@ netxen_nic_hw_write_wx_128M(struct netxen_adapter *adapter,
return 0;
}
-int
-netxen_nic_hw_read_wx_128M(struct netxen_adapter *adapter,
- ulong off, void *data, int len)
+u32
+netxen_nic_hw_read_wx_128M(struct netxen_adapter *adapter, ulong off)
{
void __iomem *addr;
-
- BUG_ON(len != 4);
+ u32 data;
if (ADDR_IN_WINDOW1(off)) { /* Window 1 */
addr = NETXEN_CRB_NORMALIZE(adapter, off);
@@ -1220,24 +1019,21 @@ netxen_nic_hw_read_wx_128M(struct netxen_adapter *adapter,
return 1;
}
- *(u32 *)data = readl(addr);
+ data = readl(addr);
if (!ADDR_IN_WINDOW1(off))
netxen_nic_pci_change_crbwindow_128M(adapter, 1);
- return 0;
+ return data;
}
int
-netxen_nic_hw_write_wx_2M(struct netxen_adapter *adapter,
- ulong off, void *data, int len)
+netxen_nic_hw_write_wx_2M(struct netxen_adapter *adapter, ulong off, u32 data)
{
unsigned long flags = 0;
int rv;
- BUG_ON(len != 4);
-
- rv = netxen_nic_pci_get_crb_addr_2M(adapter, &off, len);
+ rv = netxen_nic_pci_get_crb_addr_2M(adapter, &off);
if (rv == -1) {
printk(KERN_ERR "%s: invalid offset: 0x%016lx\n",
@@ -1250,26 +1046,24 @@ netxen_nic_hw_write_wx_2M(struct netxen_adapter *adapter,
write_lock_irqsave(&adapter->adapter_lock, flags);
crb_win_lock(adapter);
netxen_nic_pci_set_crbwindow_2M(adapter, &off);
- writel(*(uint32_t *)data, (void __iomem *)off);
+ writel(data, (void __iomem *)off);
crb_win_unlock(adapter);
write_unlock_irqrestore(&adapter->adapter_lock, flags);
} else
- writel(*(uint32_t *)data, (void __iomem *)off);
+ writel(data, (void __iomem *)off);
return 0;
}
-int
-netxen_nic_hw_read_wx_2M(struct netxen_adapter *adapter,
- ulong off, void *data, int len)
+u32
+netxen_nic_hw_read_wx_2M(struct netxen_adapter *adapter, ulong off)
{
unsigned long flags = 0;
int rv;
+ u32 data;
- BUG_ON(len != 4);
-
- rv = netxen_nic_pci_get_crb_addr_2M(adapter, &off, len);
+ rv = netxen_nic_pci_get_crb_addr_2M(adapter, &off);
if (rv == -1) {
printk(KERN_ERR "%s: invalid offset: 0x%016lx\n",
@@ -1282,47 +1076,13 @@ netxen_nic_hw_read_wx_2M(struct netxen_adapter *adapter,
write_lock_irqsave(&adapter->adapter_lock, flags);
crb_win_lock(adapter);
netxen_nic_pci_set_crbwindow_2M(adapter, &off);
- *(uint32_t *)data = readl((void __iomem *)off);
+ data = readl((void __iomem *)off);
crb_win_unlock(adapter);
write_unlock_irqrestore(&adapter->adapter_lock, flags);
} else
- *(uint32_t *)data = readl((void __iomem *)off);
-
- return 0;
-}
+ data = readl((void __iomem *)off);
-void netxen_nic_reg_write(struct netxen_adapter *adapter, u64 off, u32 val)
-{
- adapter->hw_write_wx(adapter, off, &val, 4);
-}
-
-int netxen_nic_reg_read(struct netxen_adapter *adapter, u64 off)
-{
- int val;
- adapter->hw_read_wx(adapter, off, &val, 4);
- return val;
-}
-
-/* Change the window to 0, write and change back to window 1. */
-void netxen_nic_write_w0(struct netxen_adapter *adapter, u32 index, u32 value)
-{
- adapter->hw_write_wx(adapter, index, &value, 4);
-}
-
-/* Change the window to 0, read and change back to window 1. */
-void netxen_nic_read_w0(struct netxen_adapter *adapter, u32 index, u32 *value)
-{
- adapter->hw_read_wx(adapter, index, value, 4);
-}
-
-void netxen_nic_write_w1(struct netxen_adapter *adapter, u32 index, u32 value)
-{
- adapter->hw_write_wx(adapter, index, &value, 4);
-}
-
-void netxen_nic_read_w1(struct netxen_adapter *adapter, u32 index, u32 *value)
-{
- adapter->hw_read_wx(adapter, index, value, 4);
+ return data;
}
/*
@@ -1425,17 +1185,6 @@ u32 netxen_nic_pci_read_immediate_128M(struct netxen_adapter *adapter, u64 off)
return readl((void __iomem *)(pci_base_offset(adapter, off)));
}
-void netxen_nic_pci_write_normalize_128M(struct netxen_adapter *adapter,
- u64 off, u32 data)
-{
- writel(data, NETXEN_CRB_NORMALIZE(adapter, off));
-}
-
-u32 netxen_nic_pci_read_normalize_128M(struct netxen_adapter *adapter, u64 off)
-{
- return readl(NETXEN_CRB_NORMALIZE(adapter, off));
-}
-
unsigned long
netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter,
unsigned long long addr)
@@ -1447,12 +1196,10 @@ netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter,
/* DDR network side */
window = MN_WIN(addr);
adapter->ahw.ddr_mn_window = window;
- adapter->hw_write_wx(adapter,
- adapter->ahw.mn_win_crb | NETXEN_PCI_CRBSPACE,
- &window, 4);
- adapter->hw_read_wx(adapter,
- adapter->ahw.mn_win_crb | NETXEN_PCI_CRBSPACE,
- &win_read, 4);
+ NXWR32(adapter, adapter->ahw.mn_win_crb | NETXEN_PCI_CRBSPACE,
+ window);
+ win_read = NXRD32(adapter,
+ adapter->ahw.mn_win_crb | NETXEN_PCI_CRBSPACE);
if ((win_read << 17) != window) {
printk(KERN_INFO "Written MNwin (0x%x) != "
"Read MNwin (0x%x)\n", window, win_read);
@@ -1467,12 +1214,10 @@ netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter,
window = OCM_WIN(addr);
adapter->ahw.ddr_mn_window = window;
- adapter->hw_write_wx(adapter,
- adapter->ahw.mn_win_crb | NETXEN_PCI_CRBSPACE,
- &window, 4);
- adapter->hw_read_wx(adapter,
- adapter->ahw.mn_win_crb | NETXEN_PCI_CRBSPACE,
- &win_read, 4);
+ NXWR32(adapter, adapter->ahw.mn_win_crb | NETXEN_PCI_CRBSPACE,
+ window);
+ win_read = NXRD32(adapter,
+ adapter->ahw.mn_win_crb | NETXEN_PCI_CRBSPACE);
if ((win_read >> 7) != window) {
printk(KERN_INFO "%s: Written OCMwin (0x%x) != "
"Read OCMwin (0x%x)\n",
@@ -1485,12 +1230,10 @@ netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter,
/* QDR network side */
window = MS_WIN(addr);
adapter->ahw.qdr_sn_window = window;
- adapter->hw_write_wx(adapter,
- adapter->ahw.ms_win_crb | NETXEN_PCI_CRBSPACE,
- &window, 4);
- adapter->hw_read_wx(adapter,
- adapter->ahw.ms_win_crb | NETXEN_PCI_CRBSPACE,
- &win_read, 4);
+ NXWR32(adapter, adapter->ahw.ms_win_crb | NETXEN_PCI_CRBSPACE,
+ window);
+ win_read = NXRD32(adapter,
+ adapter->ahw.ms_win_crb | NETXEN_PCI_CRBSPACE);
if (win_read != window) {
printk(KERN_INFO "%s: Written MSwin (0x%x) != "
"Read MSwin (0x%x)\n",
@@ -1936,27 +1679,20 @@ netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter,
for (i = 0; i < loop; i++) {
temp = off8 + (i << 3);
- adapter->hw_write_wx(adapter,
- mem_crb+MIU_TEST_AGT_ADDR_LO, &temp, 4);
+ NXWR32(adapter, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
temp = 0;
- adapter->hw_write_wx(adapter,
- mem_crb+MIU_TEST_AGT_ADDR_HI, &temp, 4);
+ NXWR32(adapter, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
temp = word[i] & 0xffffffff;
- adapter->hw_write_wx(adapter,
- mem_crb+MIU_TEST_AGT_WRDATA_LO, &temp, 4);
+ NXWR32(adapter, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
temp = (word[i] >> 32) & 0xffffffff;
- adapter->hw_write_wx(adapter,
- mem_crb+MIU_TEST_AGT_WRDATA_HI, &temp, 4);
+ NXWR32(adapter, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
- adapter->hw_write_wx(adapter,
- mem_crb+MIU_TEST_AGT_CTRL, &temp, 4);
+ NXWR32(adapter, mem_crb+MIU_TEST_AGT_CTRL, temp);
temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
- adapter->hw_write_wx(adapter,
- mem_crb+MIU_TEST_AGT_CTRL, &temp, 4);
+ NXWR32(adapter, mem_crb+MIU_TEST_AGT_CTRL, temp);
for (j = 0; j < MAX_CTL_CHECK; j++) {
- adapter->hw_read_wx(adapter,
- mem_crb + MIU_TEST_AGT_CTRL, &temp, 4);
+ temp = NXRD32(adapter, mem_crb + MIU_TEST_AGT_CTRL);
if ((temp & MIU_TA_CTL_BUSY) == 0)
break;
}
@@ -2013,21 +1749,16 @@ netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter,
for (i = 0; i < loop; i++) {
temp = off8 + (i << 3);
- adapter->hw_write_wx(adapter,
- mem_crb + MIU_TEST_AGT_ADDR_LO, &temp, 4);
+ NXWR32(adapter, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
temp = 0;
- adapter->hw_write_wx(adapter,
- mem_crb + MIU_TEST_AGT_ADDR_HI, &temp, 4);
+ NXWR32(adapter, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
temp = MIU_TA_CTL_ENABLE;
- adapter->hw_write_wx(adapter,
- mem_crb + MIU_TEST_AGT_CTRL, &temp, 4);
+ NXWR32(adapter, mem_crb + MIU_TEST_AGT_CTRL, temp);
temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
- adapter->hw_write_wx(adapter,
- mem_crb + MIU_TEST_AGT_CTRL, &temp, 4);
+ NXWR32(adapter, mem_crb + MIU_TEST_AGT_CTRL, temp);
for (j = 0; j < MAX_CTL_CHECK; j++) {
- adapter->hw_read_wx(adapter,
- mem_crb + MIU_TEST_AGT_CTRL, &temp, 4);
+ temp = NXRD32(adapter, mem_crb + MIU_TEST_AGT_CTRL);
if ((temp & MIU_TA_CTL_BUSY) == 0)
break;
}
@@ -2042,8 +1773,8 @@ netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter,
start = off0[i] >> 2;
end = (off0[i] + sz[i] - 1) >> 2;
for (k = start; k <= end; k++) {
- adapter->hw_read_wx(adapter,
- mem_crb + MIU_TEST_AGT_RDDATA(k), &temp, 4);
+ temp = NXRD32(adapter,
+ mem_crb + MIU_TEST_AGT_RDDATA(k));
word[i] |= ((uint64_t)temp << (32 * k));
}
}
@@ -2086,29 +1817,14 @@ netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter,
int netxen_nic_pci_write_immediate_2M(struct netxen_adapter *adapter,
u64 off, u32 data)
{
- adapter->hw_write_wx(adapter, off, &data, 4);
+ NXWR32(adapter, off, data);
return 0;
}
u32 netxen_nic_pci_read_immediate_2M(struct netxen_adapter *adapter, u64 off)
{
- u32 temp;
- adapter->hw_read_wx(adapter, off, &temp, 4);
- return temp;
-}
-
-void netxen_nic_pci_write_normalize_2M(struct netxen_adapter *adapter,
- u64 off, u32 data)
-{
- adapter->hw_write_wx(adapter, off, &data, 4);
-}
-
-u32 netxen_nic_pci_read_normalize_2M(struct netxen_adapter *adapter, u64 off)
-{
- u32 temp;
- adapter->hw_read_wx(adapter, off, &temp, 4);
- return temp;
+ return NXRD32(adapter, off);
}
int netxen_nic_get_board_info(struct netxen_adapter *adapter)
@@ -2142,13 +1858,12 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter)
adapter->ahw.board_type = board_type;
if (board_type == NETXEN_BRDTYPE_P3_4_GB_MM) {
- u32 gpio = netxen_nic_reg_read(adapter,
- NETXEN_ROMUSB_GLB_PAD_GPIO_I);
+ u32 gpio = NXRD32(adapter, NETXEN_ROMUSB_GLB_PAD_GPIO_I);
if ((gpio & 0x8000) == 0)
board_type = NETXEN_BRDTYPE_P3_10G_TP;
}
- switch ((netxen_brdtype_t)board_type) {
+ switch (board_type) {
case NETXEN_BRDTYPE_P2_SB35_4G:
adapter->ahw.port_type = NETXEN_NIC_GBE;
break;
@@ -2195,8 +1910,7 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter)
int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu)
{
new_mtu += MTU_FUDGE_FACTOR;
- netxen_nic_write_w0(adapter,
- NETXEN_NIU_GB_MAX_FRAME_SIZE(adapter->physical_port),
+ NXWR32(adapter, NETXEN_NIU_GB_MAX_FRAME_SIZE(adapter->physical_port),
new_mtu);
return 0;
}
@@ -2205,21 +1919,12 @@ int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu)
{
new_mtu += MTU_FUDGE_FACTOR;
if (adapter->physical_port == 0)
- netxen_nic_write_w0(adapter, NETXEN_NIU_XGE_MAX_FRAME_SIZE,
- new_mtu);
+ NXWR32(adapter, NETXEN_NIU_XGE_MAX_FRAME_SIZE, new_mtu);
else
- netxen_nic_write_w0(adapter, NETXEN_NIU_XG1_MAX_FRAME_SIZE,
- new_mtu);
+ NXWR32(adapter, NETXEN_NIU_XG1_MAX_FRAME_SIZE, new_mtu);
return 0;
}
-void
-netxen_crb_writelit_adapter(struct netxen_adapter *adapter,
- unsigned long off, int data)
-{
- adapter->hw_write_wx(adapter, off, &data, 4);
-}
-
void netxen_nic_set_link_parameters(struct netxen_adapter *adapter)
{
__u32 status;
@@ -2234,8 +1939,7 @@ void netxen_nic_set_link_parameters(struct netxen_adapter *adapter)
}
if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
- adapter->hw_read_wx(adapter,
- NETXEN_PORT_MODE_ADDR, &port_mode, 4);
+ port_mode = NXRD32(adapter, NETXEN_PORT_MODE_ADDR);
if (port_mode == NETXEN_PORT_MODE_802_3_AP) {
adapter->link_speed = SPEED_1000;
adapter->link_duplex = DUPLEX_FULL;
@@ -2312,9 +2016,9 @@ void netxen_nic_get_firmware_info(struct netxen_adapter *adapter)
addr += sizeof(u32);
}
- adapter->hw_read_wx(adapter, NETXEN_FW_VERSION_MAJOR, &fw_major, 4);
- adapter->hw_read_wx(adapter, NETXEN_FW_VERSION_MINOR, &fw_minor, 4);
- adapter->hw_read_wx(adapter, NETXEN_FW_VERSION_SUB, &fw_build, 4);
+ fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
+ fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
+ fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
adapter->fw_major = fw_major;
adapter->fw_version = NETXEN_VERSION_CODE(fw_major, fw_minor, fw_build);
@@ -2337,8 +2041,7 @@ void netxen_nic_get_firmware_info(struct netxen_adapter *adapter)
fw_major, fw_minor, fw_build);
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
- adapter->hw_read_wx(adapter,
- NETXEN_MIU_MN_CONTROL, &i, 4);
+ i = NXRD32(adapter, NETXEN_MIU_MN_CONTROL);
adapter->ahw.cut_through = (i & 0x4) ? 1 : 0;
dev_info(&pdev->dev, "firmware running in %s mode\n",
adapter->ahw.cut_through ? "cut-through" : "legacy");
@@ -2353,9 +2056,9 @@ netxen_nic_wol_supported(struct netxen_adapter *adapter)
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
return 0;
- wol_cfg = netxen_nic_reg_read(adapter, NETXEN_WOL_CONFIG_NV);
+ wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV);
if (wol_cfg & (1UL << adapter->portnum)) {
- wol_cfg = netxen_nic_reg_read(adapter, NETXEN_WOL_CONFIG);
+ wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG);
if (wol_cfg & (1 << adapter->portnum))
return 1;
}
diff --git a/drivers/net/netxen/netxen_nic_hw.h b/drivers/net/netxen/netxen_nic_hw.h
index 04b47a7993c..d4e83333978 100644
--- a/drivers/net/netxen/netxen_nic_hw.h
+++ b/drivers/net/netxen/netxen_nic_hw.h
@@ -36,35 +36,13 @@
/* Hardware memory size of 128 meg */
#define NETXEN_MEMADDR_MAX (128 * 1024 * 1024)
-#ifndef readq
-static inline u64 readq(void __iomem * addr)
-{
- return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
-}
-#endif
-
-#ifndef writeq
-static inline void writeq(u64 val, void __iomem * addr)
-{
- writel(((u32) (val)), (addr));
- writel(((u32) (val >> 32)), (addr + 4));
-}
-#endif
-
struct netxen_adapter;
#define NETXEN_PCI_MAPSIZE_BYTES (NETXEN_PCI_MAPSIZE << 20)
-struct netxen_port;
void netxen_nic_set_link_parameters(struct netxen_adapter *adapter);
-typedef u8 netxen_ethernet_macaddr_t[6];
-
/* Nibble or Byte mode for phy interface (GbE mode only) */
-typedef enum {
- NETXEN_NIU_10_100_MB = 0,
- NETXEN_NIU_1000_MB
-} netxen_niu_gbe_ifmode_t;
#define _netxen_crb_get_bit(var, bit) ((var >> bit) & 0x1)
@@ -222,30 +200,28 @@ typedef enum {
/*
* PHY-Specific MII control/status registers.
*/
-typedef enum {
- NETXEN_NIU_GB_MII_MGMT_ADDR_CONTROL = 0,
- NETXEN_NIU_GB_MII_MGMT_ADDR_STATUS = 1,
- NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_ID_0 = 2,
- NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_ID_1 = 3,
- NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG = 4,
- NETXEN_NIU_GB_MII_MGMT_ADDR_LNKPART = 5,
- NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG_MORE = 6,
- NETXEN_NIU_GB_MII_MGMT_ADDR_NEXTPAGE_XMIT = 7,
- NETXEN_NIU_GB_MII_MGMT_ADDR_LNKPART_NEXTPAGE = 8,
- NETXEN_NIU_GB_MII_MGMT_ADDR_1000BT_CONTROL = 9,
- NETXEN_NIU_GB_MII_MGMT_ADDR_1000BT_STATUS = 10,
- NETXEN_NIU_GB_MII_MGMT_ADDR_EXTENDED_STATUS = 15,
- NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL = 16,
- NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS = 17,
- NETXEN_NIU_GB_MII_MGMT_ADDR_INT_ENABLE = 18,
- NETXEN_NIU_GB_MII_MGMT_ADDR_INT_STATUS = 19,
- NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL_MORE = 20,
- NETXEN_NIU_GB_MII_MGMT_ADDR_RECV_ERROR_COUNT = 21,
- NETXEN_NIU_GB_MII_MGMT_ADDR_LED_CONTROL = 24,
- NETXEN_NIU_GB_MII_MGMT_ADDR_LED_OVERRIDE = 25,
- NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL_MORE_YET = 26,
- NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS_MORE = 27
-} netxen_niu_phy_register_t;
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_CONTROL 0
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_STATUS 1
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_ID_0 2
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_ID_1 3
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG 4
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_LNKPART 5
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG_MORE 6
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_NEXTPAGE_XMIT 7
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_LNKPART_NEXTPAGE 8
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_1000BT_CONTROL 9
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_1000BT_STATUS 10
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_EXTENDED_STATUS 15
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL 16
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS 17
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_INT_ENABLE 18
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_INT_STATUS 19
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL_MORE 20
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_RECV_ERROR_COUNT 21
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_LED_CONTROL 24
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_LED_OVERRIDE 25
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL_MORE_YET 26
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS_MORE 27
/*
* PHY-Specific Status Register (reg 17).
@@ -417,14 +393,6 @@ int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter,
int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter,
u32 mode);
-/* set the MAC address for a given MAC */
-int netxen_niu_macaddr_set(struct netxen_adapter *adapter,
- netxen_ethernet_macaddr_t addr);
-
-/* XG version */
-int netxen_niu_xg_macaddr_set(struct netxen_adapter *adapter,
- netxen_ethernet_macaddr_t addr);
-
/* Generic enable for GbE ports. Will detect the speed of the link. */
int netxen_niu_gbe_init_port(struct netxen_adapter *adapter, int port);
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 0759c35f16a..6f77ad58e3b 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -108,42 +108,6 @@ static void crb_addr_transform_setup(void)
crb_addr_transform(I2C0);
}
-int netxen_init_firmware(struct netxen_adapter *adapter)
-{
- u32 state = 0, loops = 0, err = 0;
-
- /* Window 1 call */
- state = adapter->pci_read_normalize(adapter, CRB_CMDPEG_STATE);
-
- if (state == PHAN_INITIALIZE_ACK)
- return 0;
-
- while (state != PHAN_INITIALIZE_COMPLETE && loops < 2000) {
- msleep(1);
- /* Window 1 call */
- state = adapter->pci_read_normalize(adapter, CRB_CMDPEG_STATE);
-
- loops++;
- }
- if (loops >= 2000) {
- printk(KERN_ERR "Cmd Peg initialization not complete:%x.\n",
- state);
- err = -EIO;
- return err;
- }
- /* Window 1 call */
- adapter->pci_write_normalize(adapter,
- CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
- adapter->pci_write_normalize(adapter,
- CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
- adapter->pci_write_normalize(adapter,
- CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
- adapter->pci_write_normalize(adapter,
- CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
-
- return err;
-}
-
void netxen_release_rx_buffers(struct netxen_adapter *adapter)
{
struct netxen_recv_context *recv_ctx;
@@ -173,9 +137,10 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
struct netxen_cmd_buffer *cmd_buf;
struct netxen_skb_frag *buffrag;
int i, j;
+ struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
- cmd_buf = adapter->cmd_buf_arr;
- for (i = 0; i < adapter->num_txd; i++) {
+ cmd_buf = tx_ring->cmd_buf_arr;
+ for (i = 0; i < tx_ring->num_desc; i++) {
buffrag = cmd_buf->frag_array;
if (buffrag->dma) {
pci_unmap_single(adapter->pdev, buffrag->dma,
@@ -203,20 +168,27 @@ void netxen_free_sw_resources(struct netxen_adapter *adapter)
{
struct netxen_recv_context *recv_ctx;
struct nx_host_rds_ring *rds_ring;
+ struct nx_host_tx_ring *tx_ring;
int ring;
recv_ctx = &adapter->recv_ctx;
+
+ if (recv_ctx->rds_rings == NULL)
+ goto skip_rds;
+
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
- if (rds_ring->rx_buf_arr) {
- vfree(rds_ring->rx_buf_arr);
- rds_ring->rx_buf_arr = NULL;
- }
+ vfree(rds_ring->rx_buf_arr);
+ rds_ring->rx_buf_arr = NULL;
}
+ kfree(recv_ctx->rds_rings);
- if (adapter->cmd_buf_arr)
- vfree(adapter->cmd_buf_arr);
- return;
+skip_rds:
+ if (adapter->tx_ring == NULL)
+ return;
+
+ tx_ring = adapter->tx_ring;
+ vfree(tx_ring->cmd_buf_arr);
}
int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
@@ -224,23 +196,45 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
struct netxen_recv_context *recv_ctx;
struct nx_host_rds_ring *rds_ring;
struct nx_host_sds_ring *sds_ring;
+ struct nx_host_tx_ring *tx_ring;
struct netxen_rx_buffer *rx_buf;
- int ring, i, num_rx_bufs;
+ int ring, i, size;
struct netxen_cmd_buffer *cmd_buf_arr;
struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
- cmd_buf_arr =
- (struct netxen_cmd_buffer *)vmalloc(TX_BUFF_RINGSIZE(adapter));
+ size = sizeof(struct nx_host_tx_ring);
+ tx_ring = kzalloc(size, GFP_KERNEL);
+ if (tx_ring == NULL) {
+ dev_err(&pdev->dev, "%s: failed to allocate tx ring struct\n",
+ netdev->name);
+ return -ENOMEM;
+ }
+ adapter->tx_ring = tx_ring;
+
+ tx_ring->num_desc = adapter->num_txd;
+
+ cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring));
if (cmd_buf_arr == NULL) {
- printk(KERN_ERR "%s: Failed to allocate cmd buffer ring\n",
+ dev_err(&pdev->dev, "%s: failed to allocate cmd buffer ring\n",
netdev->name);
return -ENOMEM;
}
- memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(adapter));
- adapter->cmd_buf_arr = cmd_buf_arr;
+ memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
+ tx_ring->cmd_buf_arr = cmd_buf_arr;
recv_ctx = &adapter->recv_ctx;
+
+ size = adapter->max_rds_rings * sizeof (struct nx_host_rds_ring);
+ rds_ring = kzalloc(size, GFP_KERNEL);
+ if (rds_ring == NULL) {
+ dev_err(&pdev->dev, "%s: failed to allocate rds ring struct\n",
+ netdev->name);
+ return -ENOMEM;
+ }
+ recv_ctx->rds_rings = rds_ring;
+
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
switch (ring) {
@@ -292,9 +286,8 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
* Now go through all of them, set reference handles
* and put them in the queues.
*/
- num_rx_bufs = rds_ring->num_desc;
rx_buf = rds_ring->rx_buf_arr;
- for (i = 0; i < num_rx_bufs; i++) {
+ for (i = 0; i < rds_ring->num_desc; i++) {
list_add_tail(&rx_buf->list,
&rds_ring->free_list);
rx_buf->ref_handle = i;
@@ -307,8 +300,6 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
sds_ring->irq = adapter->msix_entries[ring].vector;
- sds_ring->clean_tx = (ring == 0);
- sds_ring->post_rxd = (ring == 0);
sds_ring->adapter = adapter;
sds_ring->num_desc = adapter->num_rxd;
@@ -325,13 +316,15 @@ err_out:
void netxen_initialize_adapter_ops(struct netxen_adapter *adapter)
{
+ adapter->macaddr_set = netxen_p2_nic_set_mac_addr;
+ adapter->set_multi = netxen_p2_nic_set_multi;
+
switch (adapter->ahw.port_type) {
case NETXEN_NIC_GBE:
adapter->enable_phy_interrupts =
netxen_niu_gbe_enable_phy_interrupts;
adapter->disable_phy_interrupts =
netxen_niu_gbe_disable_phy_interrupts;
- adapter->macaddr_set = netxen_niu_macaddr_set;
adapter->set_mtu = netxen_nic_set_mtu_gb;
adapter->set_promisc = netxen_niu_set_promiscuous_mode;
adapter->phy_read = netxen_niu_gbe_phy_read;
@@ -345,7 +338,6 @@ void netxen_initialize_adapter_ops(struct netxen_adapter *adapter)
netxen_niu_xgbe_enable_phy_interrupts;
adapter->disable_phy_interrupts =
netxen_niu_xgbe_disable_phy_interrupts;
- adapter->macaddr_set = netxen_niu_xg_macaddr_set;
adapter->set_mtu = netxen_nic_set_mtu_xgb;
adapter->init_port = netxen_niu_xg_init_port;
adapter->set_promisc = netxen_niu_xg_set_promiscuous_mode;
@@ -359,6 +351,8 @@ void netxen_initialize_adapter_ops(struct netxen_adapter *adapter)
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
adapter->set_mtu = nx_fw_cmd_set_mtu;
adapter->set_promisc = netxen_p3_nic_set_promisc;
+ adapter->macaddr_set = netxen_p3_nic_set_mac_addr;
+ adapter->set_multi = netxen_p3_nic_set_multi;
}
}
@@ -400,8 +394,7 @@ static int rom_lock(struct netxen_adapter *adapter)
while (!done) {
/* acquire semaphore2 from PCI HW block */
- netxen_nic_read_w0(adapter, NETXEN_PCIE_REG(PCIE_SEM2_LOCK),
- &done);
+ done = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM2_LOCK));
if (done == 1)
break;
if (timeout >= rom_lock_timeout)
@@ -418,7 +411,7 @@ static int rom_lock(struct netxen_adapter *adapter)
cpu_relax(); /*This a nop instr on i386 */
}
}
- netxen_nic_reg_write(adapter, NETXEN_ROM_LOCK_ID, ROM_LOCK_DRIVER);
+ NXWR32(adapter, NETXEN_ROM_LOCK_ID, ROM_LOCK_DRIVER);
return 0;
}
@@ -430,7 +423,7 @@ static int netxen_wait_rom_done(struct netxen_adapter *adapter)
cond_resched();
while (done == 0) {
- done = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_GLB_STATUS);
+ done = NXRD32(adapter, NETXEN_ROMUSB_GLB_STATUS);
done &= 2;
timeout++;
if (timeout >= rom_max_timeout) {
@@ -443,30 +436,28 @@ static int netxen_wait_rom_done(struct netxen_adapter *adapter)
static void netxen_rom_unlock(struct netxen_adapter *adapter)
{
- u32 val;
-
/* release semaphore2 */
- netxen_nic_read_w0(adapter, NETXEN_PCIE_REG(PCIE_SEM2_UNLOCK), &val);
+ NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM2_UNLOCK));
}
static int do_rom_fast_read(struct netxen_adapter *adapter,
int addr, int *valp)
{
- netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr);
- netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
- netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
- netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb);
+ NXWR32(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr);
+ NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+ NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
+ NXWR32(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb);
if (netxen_wait_rom_done(adapter)) {
printk("Error waiting for rom done\n");
return -EIO;
}
/* reset abyte_cnt and dummy_byte_cnt */
- netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
+ NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
udelay(10);
- netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+ NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
- *valp = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_ROM_RDATA);
+ *valp = NXRD32(adapter, NETXEN_ROMUSB_ROM_RDATA);
return 0;
}
@@ -530,8 +521,7 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
/* resetall */
rom_lock(adapter);
- netxen_crb_writelit_adapter(adapter, NETXEN_ROMUSB_GLB_SW_RESET,
- 0xffffffff);
+ NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0xffffffff);
netxen_rom_unlock(adapter);
if (verbose) {
@@ -655,7 +645,7 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
}
}
- adapter->hw_write_wx(adapter, off, &buf[i].data, 4);
+ NXWR32(adapter, off, buf[i].data);
msleep(init_delay);
}
@@ -665,36 +655,230 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
/* unreset_net_cache */
if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
- adapter->hw_read_wx(adapter,
- NETXEN_ROMUSB_GLB_SW_RESET, &val, 4);
- netxen_crb_writelit_adapter(adapter,
- NETXEN_ROMUSB_GLB_SW_RESET, (val & 0xffffff0f));
+ val = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET);
+ NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, (val & 0xffffff0f));
}
/* p2dn replyCount */
- netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_D + 0xec, 0x1e);
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0xec, 0x1e);
/* disable_peg_cache 0 */
- netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_D + 0x4c, 8);
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0x4c, 8);
/* disable_peg_cache 1 */
- netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_I + 0x4c, 8);
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_I + 0x4c, 8);
/* peg_clr_all */
/* peg_clr 0 */
- netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_0 + 0x8, 0);
- netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_0 + 0xc, 0);
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x8, 0);
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0xc, 0);
/* peg_clr 1 */
- netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_1 + 0x8, 0);
- netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_1 + 0xc, 0);
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0x8, 0);
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0xc, 0);
/* peg_clr 2 */
- netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_2 + 0x8, 0);
- netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_2 + 0xc, 0);
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0x8, 0);
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0xc, 0);
/* peg_clr 3 */
- netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_3 + 0x8, 0);
- netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_3 + 0xc, 0);
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0x8, 0);
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0xc, 0);
+ return 0;
+}
+
+int
+netxen_load_firmware(struct netxen_adapter *adapter)
+{
+ u64 *ptr64;
+ u32 i, flashaddr, size;
+ const struct firmware *fw = adapter->fw;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 1);
+
+ if (fw) {
+ __le64 data;
+
+ size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8;
+
+ ptr64 = (u64 *)&fw->data[NETXEN_BOOTLD_START];
+ flashaddr = NETXEN_BOOTLD_START;
+
+ for (i = 0; i < size; i++) {
+ data = cpu_to_le64(ptr64[i]);
+ adapter->pci_mem_write(adapter, flashaddr, &data, 8);
+ flashaddr += 8;
+ }
+
+ size = *(u32 *)&fw->data[NX_FW_SIZE_OFFSET];
+ size = (__force u32)cpu_to_le32(size) / 8;
+
+ ptr64 = (u64 *)&fw->data[NETXEN_IMAGE_START];
+ flashaddr = NETXEN_IMAGE_START;
+
+ for (i = 0; i < size; i++) {
+ data = cpu_to_le64(ptr64[i]);
+
+ if (adapter->pci_mem_write(adapter,
+ flashaddr, &data, 8))
+ return -EIO;
+
+ flashaddr += 8;
+ }
+ } else {
+ u32 data;
+
+ size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 4;
+ flashaddr = NETXEN_BOOTLD_START;
+
+ for (i = 0; i < size; i++) {
+ if (netxen_rom_fast_read(adapter,
+ flashaddr, (int *)&data) != 0)
+ return -EIO;
+
+ if (adapter->pci_mem_write(adapter,
+ flashaddr, &data, 4))
+ return -EIO;
+
+ flashaddr += 4;
+ }
+ }
+ msleep(1);
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001d);
+ else {
+ NXWR32(adapter, NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL, 0x3fff);
+ NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 0);
+ }
+
+ return 0;
+}
+
+static int
+netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname)
+{
+ __le32 val;
+ u32 major, minor, build, ver, min_ver, bios;
+ struct pci_dev *pdev = adapter->pdev;
+ const struct firmware *fw = adapter->fw;
+
+ if (fw->size < NX_FW_MIN_SIZE)
+ return -EINVAL;
+
+ val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]);
+ if ((__force u32)val != NETXEN_BDINFO_MAGIC)
+ return -EINVAL;
+
+ val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]);
+ major = (__force u32)val & 0xff;
+ minor = ((__force u32)val >> 8) & 0xff;
+ build = (__force u32)val >> 16;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ min_ver = NETXEN_VERSION_CODE(4, 0, 216);
+ else
+ min_ver = NETXEN_VERSION_CODE(3, 4, 216);
+
+ ver = NETXEN_VERSION_CODE(major, minor, build);
+
+ if ((major > _NETXEN_NIC_LINUX_MAJOR) || (ver < min_ver)) {
+ dev_err(&pdev->dev,
+ "%s: firmware version %d.%d.%d unsupported\n",
+ fwname, major, minor, build);
+ return -EINVAL;
+ }
+
+ val = cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]);
+ netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios);
+ if ((__force u32)val != bios) {
+ dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
+ fwname);
+ return -EINVAL;
+ }
+
+ /* check if flashed firmware is newer */
+ if (netxen_rom_fast_read(adapter,
+ NX_FW_VERSION_OFFSET, (int *)&val))
+ return -EIO;
+ major = (__force u32)val & 0xff;
+ minor = ((__force u32)val >> 8) & 0xff;
+ build = (__force u32)val >> 16;
+ if (NETXEN_VERSION_CODE(major, minor, build) > ver)
+ return -EINVAL;
+
+ NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
return 0;
}
+static char *fw_name[] = { "nxromimg.bin", "nx3fwct.bin", "nx3fwmn.bin" };
+
+void netxen_request_firmware(struct netxen_adapter *adapter)
+{
+ u32 capability, flashed_ver;
+ int fw_type;
+ struct pci_dev *pdev = adapter->pdev;
+ int rc = 0;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ fw_type = NX_P2_MN_ROMIMAGE;
+ goto request_fw;
+ } else {
+ fw_type = NX_P3_CT_ROMIMAGE;
+ goto request_fw;
+ }
+
+request_mn:
+ capability = 0;
+
+ netxen_rom_fast_read(adapter,
+ NX_FW_VERSION_OFFSET, (int *)&flashed_ver);
+ if (flashed_ver >= NETXEN_VERSION_CODE(4, 0, 220)) {
+ capability = NXRD32(adapter, NX_PEG_TUNE_CAPABILITY);
+ if (capability & NX_PEG_TUNE_MN_PRESENT) {
+ fw_type = NX_P3_MN_ROMIMAGE;
+ goto request_fw;
+ }
+ }
+
+request_fw:
+ rc = request_firmware(&adapter->fw, fw_name[fw_type], &pdev->dev);
+ if (rc != 0) {
+ if (fw_type == NX_P3_CT_ROMIMAGE) {
+ msleep(1);
+ goto request_mn;
+ }
+
+ adapter->fw = NULL;
+ goto done;
+ }
+
+ rc = netxen_validate_firmware(adapter, fw_name[fw_type]);
+ if (rc != 0) {
+ release_firmware(adapter->fw);
+
+ if (fw_type == NX_P3_CT_ROMIMAGE) {
+ msleep(1);
+ goto request_mn;
+ }
+
+ adapter->fw = NULL;
+ goto done;
+ }
+
+done:
+ if (adapter->fw)
+ dev_info(&pdev->dev, "loading firmware from file %s\n",
+ fw_name[fw_type]);
+ else
+ dev_info(&pdev->dev, "loading firmware from flash\n");
+}
+
+
+void
+netxen_release_firmware(struct netxen_adapter *adapter)
+{
+ if (adapter->fw)
+ release_firmware(adapter->fw);
+}
+
int netxen_initialize_adapter_offload(struct netxen_adapter *adapter)
{
uint64_t addr;
@@ -715,12 +899,12 @@ int netxen_initialize_adapter_offload(struct netxen_adapter *adapter)
hi = (addr >> 32) & 0xffffffff;
lo = addr & 0xffffffff;
- adapter->pci_write_normalize(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI, hi);
- adapter->pci_write_normalize(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO, lo);
+ NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI, hi);
+ NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO, lo);
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
uint32_t temp = 0;
- adapter->hw_write_wx(adapter, CRB_HOST_DUMMY_BUF, &temp, 4);
+ NXWR32(adapter, CRB_HOST_DUMMY_BUF, temp);
}
return 0;
@@ -762,8 +946,7 @@ int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
if (!pegtune_val) {
do {
- val = adapter->pci_read_normalize(adapter,
- CRB_CMDPEG_STATE);
+ val = NXRD32(adapter, CRB_CMDPEG_STATE);
if (val == PHAN_INITIALIZE_COMPLETE ||
val == PHAN_INITIALIZE_ACK)
@@ -774,7 +957,7 @@ int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
} while (--retries);
if (!retries) {
- pegtune_val = adapter->pci_read_normalize(adapter,
+ pegtune_val = NXRD32(adapter,
NETXEN_ROMUSB_GLB_PEGTUNE_DONE);
printk(KERN_WARNING "netxen_phantom_init: init failed, "
"pegtune_val=%x\n", pegtune_val);
@@ -785,13 +968,14 @@ int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
return 0;
}
-int netxen_receive_peg_ready(struct netxen_adapter *adapter)
+static int
+netxen_receive_peg_ready(struct netxen_adapter *adapter)
{
u32 val = 0;
int retries = 2000;
do {
- val = adapter->pci_read_normalize(adapter, CRB_RCVPEG_STATE);
+ val = NXRD32(adapter, CRB_RCVPEG_STATE);
if (val == PHAN_PEG_RCV_INITIALIZED)
return 0;
@@ -809,6 +993,93 @@ int netxen_receive_peg_ready(struct netxen_adapter *adapter)
return 0;
}
+int netxen_init_firmware(struct netxen_adapter *adapter)
+{
+ int err;
+
+ err = netxen_receive_peg_ready(adapter);
+ if (err)
+ return err;
+
+ NXWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
+ NXWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
+ NXWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
+ NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
+
+ if (adapter->fw_version >= NETXEN_VERSION_CODE(4, 0, 222)) {
+ adapter->capabilities = NXRD32(adapter, CRB_FW_CAPABILITIES_1);
+ }
+
+ return err;
+}
+
+static void
+netxen_handle_linkevent(struct netxen_adapter *adapter, nx_fw_msg_t *msg)
+{
+ u32 cable_OUI;
+ u16 cable_len;
+ u16 link_speed;
+ u8 link_status, module, duplex, autoneg;
+ struct net_device *netdev = adapter->netdev;
+
+ adapter->has_link_events = 1;
+
+ cable_OUI = msg->body[1] & 0xffffffff;
+ cable_len = (msg->body[1] >> 32) & 0xffff;
+ link_speed = (msg->body[1] >> 48) & 0xffff;
+
+ link_status = msg->body[2] & 0xff;
+ duplex = (msg->body[2] >> 16) & 0xff;
+ autoneg = (msg->body[2] >> 24) & 0xff;
+
+ module = (msg->body[2] >> 8) & 0xff;
+ if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE) {
+ printk(KERN_INFO "%s: unsupported cable: OUI 0x%x, length %d\n",
+ netdev->name, cable_OUI, cable_len);
+ } else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN) {
+ printk(KERN_INFO "%s: unsupported cable length %d\n",
+ netdev->name, cable_len);
+ }
+
+ netxen_advert_link_change(adapter, link_status);
+
+ /* update link parameters */
+ if (duplex == LINKEVENT_FULL_DUPLEX)
+ adapter->link_duplex = DUPLEX_FULL;
+ else
+ adapter->link_duplex = DUPLEX_HALF;
+ adapter->module_type = module;
+ adapter->link_autoneg = autoneg;
+ adapter->link_speed = link_speed;
+}
+
+static void
+netxen_handle_fw_message(int desc_cnt, int index,
+ struct nx_host_sds_ring *sds_ring)
+{
+ nx_fw_msg_t msg;
+ struct status_desc *desc;
+ int i = 0, opcode;
+
+ while (desc_cnt > 0 && i < 8) {
+ desc = &sds_ring->desc_head[index];
+ msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
+ msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
+
+ index = get_next_index(index, sds_ring->num_desc);
+ desc_cnt--;
+ }
+
+ opcode = netxen_get_nic_msg_opcode(msg.body[0]);
+ switch (opcode) {
+ case NX_NIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
+ netxen_handle_linkevent(sds_ring->adapter, &msg);
+ break;
+ default:
+ break;
+ }
+}
+
static int
netxen_alloc_rx_skb(struct netxen_adapter *adapter,
struct nx_host_rds_ring *rds_ring,
@@ -874,7 +1145,8 @@ no_skb:
static struct netxen_rx_buffer *
netxen_process_rcv(struct netxen_adapter *adapter,
- int ring, int index, int length, int cksum, int pkt_offset)
+ int ring, int index, int length, int cksum, int pkt_offset,
+ struct nx_host_sds_ring *sds_ring)
{
struct net_device *netdev = adapter->netdev;
struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
@@ -902,7 +1174,7 @@ netxen_process_rcv(struct netxen_adapter *adapter,
skb->protocol = eth_type_trans(skb, netdev);
- netif_receive_skb(skb);
+ napi_gro_receive(&sds_ring->napi, skb);
adapter->stats.no_rcv++;
adapter->stats.rxbytes += length;
@@ -927,35 +1199,53 @@ netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max)
int count = 0;
u64 sts_data;
- int opcode, ring, index, length, cksum, pkt_offset;
+ int opcode, ring, index, length, cksum, pkt_offset, desc_cnt;
while (count < max) {
desc = &sds_ring->desc_head[consumer];
- sts_data = le64_to_cpu(desc->status_desc_data);
+ sts_data = le64_to_cpu(desc->status_desc_data[0]);
if (!(sts_data & STATUS_OWNER_HOST))
break;
+ desc_cnt = netxen_get_sts_desc_cnt(sts_data);
ring = netxen_get_sts_type(sts_data);
+
if (ring > RCV_RING_JUMBO)
- continue;
+ goto skip;
opcode = netxen_get_sts_opcode(sts_data);
+ switch (opcode) {
+ case NETXEN_NIC_RXPKT_DESC:
+ case NETXEN_OLD_RXPKT_DESC:
+ break;
+ case NETXEN_NIC_RESPONSE_DESC:
+ netxen_handle_fw_message(desc_cnt, consumer, sds_ring);
+ default:
+ goto skip;
+ }
+
+ WARN_ON(desc_cnt > 1);
+
index = netxen_get_sts_refhandle(sts_data);
length = netxen_get_sts_totallength(sts_data);
cksum = netxen_get_sts_status(sts_data);
pkt_offset = netxen_get_sts_pkt_offset(sts_data);
rxbuf = netxen_process_rcv(adapter, ring, index,
- length, cksum, pkt_offset);
+ length, cksum, pkt_offset, sds_ring);
if (rxbuf)
list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
- desc->status_desc_data = cpu_to_le64(STATUS_OWNER_PHANTOM);
-
- consumer = get_next_index(consumer, sds_ring->num_desc);
+skip:
+ for (; desc_cnt > 0; desc_cnt--) {
+ desc = &sds_ring->desc_head[consumer];
+ desc->status_desc_data[0] =
+ cpu_to_le64(STATUS_OWNER_PHANTOM);
+ consumer = get_next_index(consumer, sds_ring->num_desc);
+ }
count++;
}
@@ -980,8 +1270,7 @@ netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max)
if (count) {
sds_ring->consumer = consumer;
- adapter->pci_write_normalize(adapter,
- sds_ring->crb_sts_consumer, consumer);
+ NXWR32(adapter, sds_ring->crb_sts_consumer, consumer);
}
return count;
@@ -990,23 +1279,24 @@ netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max)
/* Process Command status ring */
int netxen_process_cmd_ring(struct netxen_adapter *adapter)
{
- u32 last_consumer, consumer;
+ u32 sw_consumer, hw_consumer;
int count = 0, i;
struct netxen_cmd_buffer *buffer;
struct pci_dev *pdev = adapter->pdev;
struct net_device *netdev = adapter->netdev;
struct netxen_skb_frag *frag;
int done = 0;
+ struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
if (!spin_trylock(&adapter->tx_clean_lock))
return 1;
- last_consumer = adapter->last_cmd_consumer;
- barrier(); /* cmd_consumer can change underneath */
- consumer = le32_to_cpu(*(adapter->cmd_consumer));
+ sw_consumer = tx_ring->sw_consumer;
+ barrier(); /* hw_consumer can change underneath */
+ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
- while (last_consumer != consumer) {
- buffer = &adapter->cmd_buf_arr[last_consumer];
+ while (sw_consumer != hw_consumer) {
+ buffer = &tx_ring->cmd_buf_arr[sw_consumer];
if (buffer->skb) {
frag = &buffer->frag_array[0];
pci_unmap_single(pdev, frag->dma, frag->length,
@@ -1024,16 +1314,16 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
buffer->skb = NULL;
}
- last_consumer = get_next_index(last_consumer,
- adapter->num_txd);
+ sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
if (++count >= MAX_STATUS_HANDLE)
break;
}
- if (count) {
- adapter->last_cmd_consumer = last_consumer;
+ tx_ring->sw_consumer = sw_consumer;
+
+ if (count && netif_running(netdev)) {
smp_mb();
- if (netif_queue_stopped(netdev) && netif_running(netdev)) {
+ if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
netif_tx_lock(netdev);
netif_wake_queue(netdev);
smp_mb();
@@ -1053,9 +1343,9 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
* There is still a possible race condition and the host could miss an
* interrupt. The card has to take care of this.
*/
- barrier(); /* cmd_consumer can change underneath */
- consumer = le32_to_cpu(*(adapter->cmd_consumer));
- done = (last_consumer == consumer);
+ barrier(); /* hw_consumer can change underneath */
+ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
+ done = (sw_consumer == hw_consumer);
spin_unlock(&adapter->tx_clean_lock);
return (done);
@@ -1099,8 +1389,7 @@ netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
if (count) {
rds_ring->producer = producer;
- adapter->pci_write_normalize(adapter,
- rds_ring->crb_rcv_producer,
+ NXWR32(adapter, rds_ring->crb_rcv_producer,
(producer-1) & (rds_ring->num_desc-1));
if (adapter->fw_major < 4) {
@@ -1160,10 +1449,8 @@ netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
if (count) {
rds_ring->producer = producer;
- adapter->pci_write_normalize(adapter,
- rds_ring->crb_rcv_producer,
+ NXWR32(adapter, rds_ring->crb_rcv_producer,
(producer - 1) & (rds_ring->num_desc - 1));
- wmb();
}
spin_unlock(&rds_ring->lock);
}
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index aef77289bd3..98737ef7293 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -29,7 +29,7 @@
*/
#include <linux/vmalloc.h>
-#include <linux/highmem.h>
+#include <linux/interrupt.h>
#include "netxen_nic_hw.h"
#include "netxen_nic.h"
@@ -107,10 +107,9 @@ static uint32_t crb_cmd_producer[4] = {
void
netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
- uint32_t crb_producer)
+ struct nx_host_tx_ring *tx_ring, u32 producer)
{
- adapter->pci_write_normalize(adapter,
- adapter->crb_addr_cmd_producer, crb_producer);
+ NXWR32(adapter, tx_ring->crb_cmd_producer, producer);
}
static uint32_t crb_cmd_consumer[4] = {
@@ -120,10 +119,9 @@ static uint32_t crb_cmd_consumer[4] = {
static inline void
netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter,
- u32 crb_consumer)
+ struct nx_host_tx_ring *tx_ring, u32 consumer)
{
- adapter->pci_write_normalize(adapter,
- adapter->crb_addr_cmd_consumer, crb_consumer);
+ NXWR32(adapter, tx_ring->crb_cmd_consumer, consumer);
}
static uint32_t msi_tgt_status[8] = {
@@ -139,37 +137,54 @@ static inline void netxen_nic_disable_int(struct nx_host_sds_ring *sds_ring)
{
struct netxen_adapter *adapter = sds_ring->adapter;
- adapter->pci_write_normalize(adapter, sds_ring->crb_intr_mask, 0);
+ NXWR32(adapter, sds_ring->crb_intr_mask, 0);
}
static inline void netxen_nic_enable_int(struct nx_host_sds_ring *sds_ring)
{
struct netxen_adapter *adapter = sds_ring->adapter;
- adapter->pci_write_normalize(adapter, sds_ring->crb_intr_mask, 0x1);
+ NXWR32(adapter, sds_ring->crb_intr_mask, 0x1);
if (!NETXEN_IS_MSI_FAMILY(adapter))
adapter->pci_write_immediate(adapter,
adapter->legacy_intr.tgt_mask_reg, 0xfbff);
}
+static int
+netxen_alloc_sds_rings(struct netxen_recv_context *recv_ctx, int count)
+{
+ int size = sizeof(struct nx_host_sds_ring) * count;
+
+ recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
+
+ return (recv_ctx->sds_rings == NULL);
+}
+
static void
+netxen_free_sds_rings(struct netxen_recv_context *recv_ctx)
+{
+ if (recv_ctx->sds_rings != NULL)
+ kfree(recv_ctx->sds_rings);
+}
+
+static int
netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev)
{
int ring;
struct nx_host_sds_ring *sds_ring;
struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
- if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
- adapter->max_sds_rings = (num_online_cpus() >= 4) ? 4 : 2;
- else
- adapter->max_sds_rings = 1;
+ if (netxen_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
+ return 1;
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
netif_napi_add(netdev, &sds_ring->napi,
netxen_nic_poll, NETXEN_NETDEV_WEIGHT);
}
+
+ return 0;
}
static void
@@ -195,8 +210,9 @@ netxen_napi_disable(struct netxen_adapter *adapter)
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
- netxen_nic_disable_int(sds_ring);
napi_disable(&sds_ring->napi);
+ netxen_nic_disable_int(sds_ring);
+ synchronize_irq(sds_ring->irq);
}
}
@@ -240,7 +256,7 @@ nx_update_dma_mask(struct netxen_adapter *adapter)
change = 0;
- shift = netxen_nic_reg_read(adapter, CRB_DMA_SHIFT);
+ shift = NXRD32(adapter, CRB_DMA_SHIFT);
if (shift >= 32)
return 0;
@@ -268,10 +284,21 @@ static void netxen_check_options(struct netxen_adapter *adapter)
else if (adapter->ahw.port_type == NETXEN_NIC_GBE)
adapter->num_rxd = MAX_RCV_DESCRIPTORS_1G;
- if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ adapter->msix_supported = 0;
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
adapter->msix_supported = !!use_msi_x;
- else
- adapter->msix_supported = 0;
+ adapter->rss_supported = !!use_msi_x;
+ } else if (adapter->fw_version >= NETXEN_VERSION_CODE(3, 4, 336)) {
+ switch (adapter->ahw.board_type) {
+ case NETXEN_BRDTYPE_P2_SB31_10G:
+ case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
+ adapter->msix_supported = !!use_msi_x;
+ adapter->rss_supported = !!use_msi_x;
+ break;
+ default:
+ break;
+ }
+ }
adapter->num_txd = MAX_CMD_DESCRIPTORS_HOST;
adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS;
@@ -287,43 +314,34 @@ netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot)
if (first_boot == 0x55555555) {
/* This is the first boot after power up */
- adapter->pci_write_normalize(adapter,
- NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
+ NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
return 0;
/* PCI bus master workaround */
- adapter->hw_read_wx(adapter,
- NETXEN_PCIE_REG(0x4), &first_boot, 4);
+ first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4));
if (!(first_boot & 0x4)) {
first_boot |= 0x4;
- adapter->hw_write_wx(adapter,
- NETXEN_PCIE_REG(0x4), &first_boot, 4);
- adapter->hw_read_wx(adapter,
- NETXEN_PCIE_REG(0x4), &first_boot, 4);
+ NXWR32(adapter, NETXEN_PCIE_REG(0x4), first_boot);
+ first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4));
}
/* This is the first boot after power up */
- adapter->hw_read_wx(adapter,
- NETXEN_ROMUSB_GLB_SW_RESET, &first_boot, 4);
+ first_boot = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET);
if (first_boot != 0x80000f) {
/* clear the register for future unloads/loads */
- adapter->pci_write_normalize(adapter,
- NETXEN_CAM_RAM(0x1fc), 0);
+ NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), 0);
return -EIO;
}
/* Start P2 boot loader */
- val = adapter->pci_read_normalize(adapter,
- NETXEN_ROMUSB_GLB_PEGTUNE_DONE);
- adapter->pci_write_normalize(adapter,
- NETXEN_ROMUSB_GLB_PEGTUNE_DONE, val | 0x1);
+ val = NXRD32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE);
+ NXWR32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE, val | 0x1);
timeout = 0;
do {
msleep(1);
- val = adapter->pci_read_normalize(adapter,
- NETXEN_CAM_RAM(0x1fc));
+ val = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc));
if (++timeout > 5000)
return -EIO;
@@ -342,24 +360,19 @@ static void netxen_set_port_mode(struct netxen_adapter *adapter)
(val == NETXEN_BRDTYPE_P3_XG_LOM)) {
if (port_mode == NETXEN_PORT_MODE_802_3_AP) {
data = NETXEN_PORT_MODE_802_3_AP;
- adapter->hw_write_wx(adapter,
- NETXEN_PORT_MODE_ADDR, &data, 4);
+ NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
} else if (port_mode == NETXEN_PORT_MODE_XG) {
data = NETXEN_PORT_MODE_XG;
- adapter->hw_write_wx(adapter,
- NETXEN_PORT_MODE_ADDR, &data, 4);
+ NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
} else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_1G) {
data = NETXEN_PORT_MODE_AUTO_NEG_1G;
- adapter->hw_write_wx(adapter,
- NETXEN_PORT_MODE_ADDR, &data, 4);
+ NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
} else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_XG) {
data = NETXEN_PORT_MODE_AUTO_NEG_XG;
- adapter->hw_write_wx(adapter,
- NETXEN_PORT_MODE_ADDR, &data, 4);
+ NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
} else {
data = NETXEN_PORT_MODE_AUTO_NEG;
- adapter->hw_write_wx(adapter,
- NETXEN_PORT_MODE_ADDR, &data, 4);
+ NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
}
if ((wol_port_mode != NETXEN_PORT_MODE_802_3_AP) &&
@@ -368,8 +381,7 @@ static void netxen_set_port_mode(struct netxen_adapter *adapter)
(wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_XG)) {
wol_port_mode = NETXEN_PORT_MODE_AUTO_NEG;
}
- adapter->hw_write_wx(adapter, NETXEN_WOL_PORT_MODE,
- &wol_port_mode, 4);
+ NXWR32(adapter, NETXEN_WOL_PORT_MODE, wol_port_mode);
}
}
@@ -389,11 +401,11 @@ static void netxen_set_msix_bit(struct pci_dev *pdev, int enable)
}
}
-static void netxen_init_msix_entries(struct netxen_adapter *adapter)
+static void netxen_init_msix_entries(struct netxen_adapter *adapter, int count)
{
int i;
- for (i = 0; i < MSIX_ENTRIES_PER_ADAPTER; i++)
+ for (i = 0; i < count; i++)
adapter->msix_entries[i].entry = i;
}
@@ -424,20 +436,38 @@ netxen_read_mac_addr(struct netxen_adapter *adapter)
if (!is_valid_ether_addr(netdev->perm_addr))
dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr);
- else
- adapter->macaddr_set(adapter, netdev->dev_addr);
return 0;
}
+int netxen_nic_set_mac(struct net_device *netdev, void *p)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ if (netif_running(netdev)) {
+ netif_device_detach(netdev);
+ netxen_napi_disable(adapter);
+ }
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ adapter->macaddr_set(adapter, addr->sa_data);
+
+ if (netif_running(netdev)) {
+ netif_device_attach(netdev);
+ netxen_napi_enable(adapter);
+ }
+ return 0;
+}
+
static void netxen_set_multicast_list(struct net_device *dev)
{
struct netxen_adapter *adapter = netdev_priv(dev);
- if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
- netxen_p3_nic_set_multi(dev);
- else
- netxen_p2_nic_set_multi(dev);
+ adapter->set_multi(dev);
}
static const struct net_device_ops netxen_netdev_ops = {
@@ -460,10 +490,17 @@ netxen_setup_intr(struct netxen_adapter *adapter)
{
struct netxen_legacy_intr_set *legacy_intrp;
struct pci_dev *pdev = adapter->pdev;
+ int err, num_msix;
+
+ if (adapter->rss_supported) {
+ num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
+ MSIX_ENTRIES_PER_ADAPTER : 2;
+ } else
+ num_msix = 1;
+
+ adapter->max_sds_rings = 1;
adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED);
- adapter->intr_scheme = -1;
- adapter->msi_mode = -1;
if (adapter->ahw.revision_id >= NX_P3_B0)
legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
@@ -478,24 +515,36 @@ netxen_setup_intr(struct netxen_adapter *adapter)
if (adapter->msix_supported) {
- netxen_init_msix_entries(adapter);
- if (pci_enable_msix(pdev, adapter->msix_entries,
- MSIX_ENTRIES_PER_ADAPTER))
- goto request_msi;
+ netxen_init_msix_entries(adapter, num_msix);
+ err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
+ if (err == 0) {
+ adapter->flags |= NETXEN_NIC_MSIX_ENABLED;
+ netxen_set_msix_bit(pdev, 1);
- adapter->flags |= NETXEN_NIC_MSIX_ENABLED;
- netxen_set_msix_bit(pdev, 1);
- dev_info(&pdev->dev, "using msi-x interrupts\n");
+ if (adapter->rss_supported)
+ adapter->max_sds_rings = num_msix;
- } else {
-request_msi:
- if (use_msi && !pci_enable_msi(pdev)) {
- adapter->flags |= NETXEN_NIC_MSI_ENABLED;
- dev_info(&pdev->dev, "using msi interrupts\n");
- } else
- dev_info(&pdev->dev, "using legacy interrupts\n");
+ dev_info(&pdev->dev, "using msi-x interrupts\n");
+ return;
+ }
+
+ if (err > 0)
+ pci_disable_msix(pdev);
+
+ /* fall through for msi */
+ }
+
+ if (use_msi && !pci_enable_msi(pdev)) {
+ adapter->flags |= NETXEN_NIC_MSI_ENABLED;
+ adapter->msi_tgt_status =
+ msi_tgt_status[adapter->ahw.pci_func];
+ dev_info(&pdev->dev, "using msi interrupts\n");
adapter->msix_entries[0].vector = pdev->irq;
+ return;
}
+
+ dev_info(&pdev->dev, "using legacy interrupts\n");
+ adapter->msix_entries[0].vector = pdev->irq;
}
static void
@@ -552,8 +601,6 @@ netxen_setup_pci_map(struct netxen_adapter *adapter)
adapter->hw_read_wx = netxen_nic_hw_read_wx_128M;
adapter->pci_read_immediate = netxen_nic_pci_read_immediate_128M;
adapter->pci_write_immediate = netxen_nic_pci_write_immediate_128M;
- adapter->pci_read_normalize = netxen_nic_pci_read_normalize_128M;
- adapter->pci_write_normalize = netxen_nic_pci_write_normalize_128M;
adapter->pci_set_window = netxen_nic_pci_set_window_128M;
adapter->pci_mem_read = netxen_nic_pci_mem_read_128M;
adapter->pci_mem_write = netxen_nic_pci_mem_write_128M;
@@ -575,9 +622,6 @@ netxen_setup_pci_map(struct netxen_adapter *adapter)
adapter->pci_read_immediate = netxen_nic_pci_read_immediate_2M;
adapter->pci_write_immediate =
netxen_nic_pci_write_immediate_2M;
- adapter->pci_read_normalize = netxen_nic_pci_read_normalize_2M;
- adapter->pci_write_normalize =
- netxen_nic_pci_write_normalize_2M;
adapter->pci_set_window = netxen_nic_pci_set_window_2M;
adapter->pci_mem_read = netxen_nic_pci_mem_read_2M;
adapter->pci_mem_write = netxen_nic_pci_mem_write_2M;
@@ -643,25 +687,22 @@ err_out:
}
static int
-netxen_start_firmware(struct netxen_adapter *adapter)
+netxen_start_firmware(struct netxen_adapter *adapter, int request_fw)
{
int val, err, first_boot;
struct pci_dev *pdev = adapter->pdev;
int first_driver = 0;
- if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
- if (adapter->ahw.pci_func == 0)
- first_driver = 1;
- } else {
- if (adapter->portnum == 0)
- first_driver = 1;
- }
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ first_driver = (adapter->portnum == 0);
+ else
+ first_driver = (adapter->ahw.pci_func == 0);
if (!first_driver)
return 0;
- first_boot = adapter->pci_read_normalize(adapter,
- NETXEN_CAM_RAM(0x1fc));
+ first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc));
err = netxen_check_hw_init(adapter, first_boot);
if (err) {
@@ -669,14 +710,16 @@ netxen_start_firmware(struct netxen_adapter *adapter)
return err;
}
+ if (request_fw)
+ netxen_request_firmware(adapter);
+
if (first_boot != 0x55555555) {
- adapter->pci_write_normalize(adapter,
- CRB_CMDPEG_STATE, 0);
+ NXWR32(adapter, CRB_CMDPEG_STATE, 0);
netxen_pinit_from_rom(adapter, 0);
msleep(1);
}
- netxen_nic_reg_write(adapter, CRB_DMA_SHIFT, 0x55555555);
+ NXWR32(adapter, CRB_DMA_SHIFT, 0x55555555);
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
netxen_set_port_mode(adapter);
@@ -688,8 +731,7 @@ netxen_start_firmware(struct netxen_adapter *adapter)
val = 0x7654;
if (adapter->ahw.port_type == NETXEN_NIC_XGBE)
val |= 0x0f000000;
- netxen_crb_writelit_adapter(adapter,
- NETXEN_MAC_ADDR_CNTL_REG, val);
+ NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val);
}
@@ -703,7 +745,7 @@ netxen_start_firmware(struct netxen_adapter *adapter)
val = (_NETXEN_NIC_LINUX_MAJOR << 16)
| ((_NETXEN_NIC_LINUX_MINOR << 8))
| (_NETXEN_NIC_LINUX_SUBVERSION);
- adapter->pci_write_normalize(adapter, CRB_DRIVER_VERSION, val);
+ NXWR32(adapter, CRB_DRIVER_VERSION, val);
/* Handshake with the card before we register the devices. */
err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
@@ -726,15 +768,6 @@ netxen_nic_request_irq(struct netxen_adapter *adapter)
struct net_device *netdev = adapter->netdev;
struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
- if ((adapter->msi_mode != MSI_MODE_MULTIFUNC) ||
- (adapter->intr_scheme != INTR_SCHEME_PERPORT)) {
- printk(KERN_ERR "%s: Firmware interrupt scheme is "
- "incompatible with driver\n",
- netdev->name);
- adapter->driver_mismatch = 1;
- return -EINVAL;
- }
-
if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
handler = netxen_msix_intr;
else if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
@@ -747,7 +780,7 @@ netxen_nic_request_irq(struct netxen_adapter *adapter)
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
- sprintf(sds_ring->name, "%16s[%d]", netdev->name, ring);
+ sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
err = request_irq(sds_ring->irq, handler,
flags, sds_ring->name, sds_ring);
if (err)
@@ -782,22 +815,26 @@ netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
netxen_nic_driver_name, adapter->portnum);
return err;
}
- adapter->macaddr_set(adapter, netdev->dev_addr);
-
- netxen_nic_set_link_parameters(adapter);
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ adapter->macaddr_set(adapter, netdev->dev_addr);
- netxen_set_multicast_list(netdev);
- if (adapter->set_mtu)
- adapter->set_mtu(adapter, netdev->mtu);
+ adapter->set_multi(netdev);
+ adapter->set_mtu(adapter, netdev->mtu);
adapter->ahw.linkup = 0;
- mod_timer(&adapter->watchdog_timer, jiffies);
netxen_napi_enable(adapter);
if (adapter->max_sds_rings > 1)
netxen_config_rss(adapter, 1);
+ if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)
+ netxen_linkevent_request(adapter, 1);
+ else
+ netxen_nic_set_link_parameters(adapter);
+
+ mod_timer(&adapter->watchdog_timer, jiffies);
+
return 0;
}
@@ -806,11 +843,15 @@ netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
{
netif_carrier_off(netdev);
netif_stop_queue(netdev);
- netxen_napi_disable(adapter);
if (adapter->stop_port)
adapter->stop_port(adapter);
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ netxen_p3_free_mac_list(adapter);
+
+ netxen_napi_disable(adapter);
+
netxen_release_tx_buffers(adapter);
FLUSH_SCHEDULED_WORK();
@@ -825,6 +866,7 @@ netxen_nic_attach(struct netxen_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
int err, ring;
struct nx_host_rds_ring *rds_ring;
+ struct nx_host_tx_ring *tx_ring;
err = netxen_init_firmware(adapter);
if (err != 0) {
@@ -854,13 +896,12 @@ netxen_nic_attach(struct netxen_adapter *adapter)
}
if (adapter->fw_major < 4) {
- adapter->crb_addr_cmd_producer =
- crb_cmd_producer[adapter->portnum];
- adapter->crb_addr_cmd_consumer =
- crb_cmd_consumer[adapter->portnum];
+ tx_ring = adapter->tx_ring;
+ tx_ring->crb_cmd_producer = crb_cmd_producer[adapter->portnum];
+ tx_ring->crb_cmd_consumer = crb_cmd_consumer[adapter->portnum];
- netxen_nic_update_cmd_producer(adapter, 0);
- netxen_nic_update_cmd_consumer(adapter, 0);
+ netxen_nic_update_cmd_producer(adapter, tx_ring, 0);
+ netxen_nic_update_cmd_consumer(adapter, tx_ring, 0);
}
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
@@ -889,10 +930,9 @@ err_out_free_sw:
static void
netxen_nic_detach(struct netxen_adapter *adapter)
{
- netxen_nic_free_irq(adapter);
-
netxen_release_rx_buffers(adapter);
netxen_free_hw_resources(adapter);
+ netxen_nic_free_irq(adapter);
netxen_free_sw_resources(adapter);
adapter->is_up = 0;
@@ -957,6 +997,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
rwlock_init(&adapter->adapter_lock);
spin_lock_init(&adapter->tx_clean_lock);
+ INIT_LIST_HEAD(&adapter->mac_list);
err = netxen_setup_pci_map(adapter);
if (err)
@@ -979,6 +1020,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops);
netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
+ netdev->features |= (NETIF_F_GRO);
netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
if (NX_IS_REVISION_P3(revision_id)) {
@@ -1011,7 +1053,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
}
- err = netxen_start_firmware(adapter);
+ err = netxen_start_firmware(adapter, 1);
if (err)
goto err_out_iounmap;
@@ -1024,8 +1066,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
adapter->physical_port = adapter->portnum;
if (adapter->fw_major < 4) {
- i = adapter->pci_read_normalize(adapter,
- CRB_V2P(adapter->portnum));
+ i = NXRD32(adapter, CRB_V2P(adapter->portnum));
if (i != 0x55555555)
adapter->physical_port = i;
}
@@ -1036,10 +1077,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->irq = adapter->msix_entries[0].vector;
- netxen_napi_add(adapter, netdev);
-
- err = netxen_receive_peg_ready(adapter);
- if (err)
+ if (netxen_napi_add(adapter, netdev))
goto err_out_disable_msi;
init_timer(&adapter->watchdog_timer);
@@ -1113,18 +1151,18 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) {
netxen_nic_detach(adapter);
-
- if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
- netxen_p3_free_mac_list(adapter);
}
if (adapter->portnum == 0)
netxen_free_adapter_offload(adapter);
netxen_teardown_intr(adapter);
+ netxen_free_sds_rings(&adapter->recv_ctx);
netxen_cleanup_pci_map(adapter);
+ netxen_release_firmware(adapter);
+
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
@@ -1176,7 +1214,7 @@ netxen_nic_resume(struct pci_dev *pdev)
adapter->curr_window = 255;
- err = netxen_start_firmware(adapter);
+ err = netxen_start_firmware(adapter, 0);
if (err) {
dev_err(&pdev->dev, "failed to start firmware\n");
return err;
@@ -1315,7 +1353,7 @@ static int
netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
- struct netxen_hardware_context *hw = &adapter->ahw;
+ struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
unsigned int first_seg_len = skb->len - skb->data_len;
struct netxen_cmd_buffer *pbuf;
struct netxen_skb_frag *buffrag;
@@ -1326,28 +1364,26 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
u32 producer, consumer;
int frag_count, no_of_desc;
- u32 num_txd = adapter->num_txd;
+ u32 num_txd = tx_ring->num_desc;
bool is_tso = false;
frag_count = skb_shinfo(skb)->nr_frags + 1;
- /* There 4 fragments per descriptor */
+ /* 4 fragments per cmd des */
no_of_desc = (frag_count + 3) >> 2;
- producer = adapter->cmd_producer;
+ producer = tx_ring->producer;
smp_mb();
- consumer = adapter->last_cmd_consumer;
- if ((no_of_desc+2) > find_diff_among(producer, consumer, num_txd)) {
+ consumer = tx_ring->sw_consumer;
+ if ((no_of_desc+2) >= find_diff_among(producer, consumer, num_txd)) {
netif_stop_queue(netdev);
smp_mb();
return NETDEV_TX_BUSY;
}
- /* Copy the descriptors into the hardware */
- hwdesc = &hw->cmd_desc_head[producer];
+ hwdesc = &tx_ring->desc_head[producer];
netxen_clear_cmddesc((u64 *)hwdesc);
- /* Take skb->data itself */
- pbuf = &adapter->cmd_buf_arr[producer];
+ pbuf = &tx_ring->cmd_buf_arr[producer];
is_tso = netxen_tso_check(netdev, hwdesc, skb);
@@ -1376,9 +1412,9 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if ((i & 0x3) == 0) {
k = 0;
producer = get_next_index(producer, num_txd);
- hwdesc = &hw->cmd_desc_head[producer];
+ hwdesc = &tx_ring->desc_head[producer];
netxen_clear_cmddesc((u64 *)hwdesc);
- pbuf = &adapter->cmd_buf_arr[producer];
+ pbuf = &tx_ring->cmd_buf_arr[producer];
pbuf->skb = NULL;
}
frag = &skb_shinfo(skb)->frags[i - 1];
@@ -1430,8 +1466,8 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
more_hdr = 0;
}
/* copy the MAC/IP/TCP headers to the cmd descriptor list */
- hwdesc = &hw->cmd_desc_head[producer];
- pbuf = &adapter->cmd_buf_arr[producer];
+ hwdesc = &tx_ring->desc_head[producer];
+ pbuf = &tx_ring->cmd_buf_arr[producer];
pbuf->skb = NULL;
/* copy the first 64 bytes */
@@ -1440,8 +1476,8 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
producer = get_next_index(producer, num_txd);
if (more_hdr) {
- hwdesc = &hw->cmd_desc_head[producer];
- pbuf = &adapter->cmd_buf_arr[producer];
+ hwdesc = &tx_ring->desc_head[producer];
+ pbuf = &tx_ring->cmd_buf_arr[producer];
pbuf->skb = NULL;
/* copy the next 64 bytes - should be enough except
* for pathological case
@@ -1454,13 +1490,12 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
}
- adapter->cmd_producer = producer;
+ tx_ring->producer = producer;
adapter->stats.txbytes += skb->len;
- netxen_nic_update_cmd_producer(adapter, adapter->cmd_producer);
+ netxen_nic_update_cmd_producer(adapter, tx_ring, producer);
adapter->stats.xmitcalled++;
- netdev->trans_start = jiffies;
return NETDEV_TX_OK;
@@ -1476,7 +1511,7 @@ static int netxen_nic_check_temp(struct netxen_adapter *adapter)
uint32_t temp, temp_state, temp_val;
int rv = 0;
- temp = adapter->pci_read_normalize(adapter, CRB_TEMP_STATE);
+ temp = NXRD32(adapter, CRB_TEMP_STATE);
temp_state = nx_get_temp_state(temp);
temp_val = nx_get_temp_val(temp);
@@ -1510,26 +1545,9 @@ static int netxen_nic_check_temp(struct netxen_adapter *adapter)
return rv;
}
-static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
+void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup)
{
struct net_device *netdev = adapter->netdev;
- u32 val, port, linkup;
-
- port = adapter->physical_port;
-
- if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
- val = adapter->pci_read_normalize(adapter, CRB_XG_STATE_P3);
- val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
- linkup = (val == XG_LINK_UP_P3);
- } else {
- val = adapter->pci_read_normalize(adapter, CRB_XG_STATE);
- if (adapter->ahw.port_type == NETXEN_NIC_GBE)
- linkup = (val >> port) & 1;
- else {
- val = (val >> port*8) & 0xff;
- linkup = (val == XG_LINK_UP);
- }
- }
if (adapter->ahw.linkup && !linkup) {
printk(KERN_INFO "%s: %s NIC Link is down\n",
@@ -1540,7 +1558,9 @@ static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
netif_stop_queue(netdev);
}
- netxen_nic_set_link_parameters(adapter);
+ if (!adapter->has_link_events)
+ netxen_nic_set_link_parameters(adapter);
+
} else if (!adapter->ahw.linkup && linkup) {
printk(KERN_INFO "%s: %s NIC Link is up\n",
netxen_nic_driver_name, netdev->name);
@@ -1550,8 +1570,32 @@ static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
netif_wake_queue(netdev);
}
- netxen_nic_set_link_parameters(adapter);
+ if (!adapter->has_link_events)
+ netxen_nic_set_link_parameters(adapter);
+ }
+}
+
+static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
+{
+ u32 val, port, linkup;
+
+ port = adapter->physical_port;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ val = NXRD32(adapter, CRB_XG_STATE_P3);
+ val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
+ linkup = (val == XG_LINK_UP_P3);
+ } else {
+ val = NXRD32(adapter, CRB_XG_STATE);
+ if (adapter->ahw.port_type == NETXEN_NIC_GBE)
+ linkup = (val >> port) & 1;
+ else {
+ val = (val >> port*8) & 0xff;
+ linkup = (val == XG_LINK_UP);
+ }
}
+
+ netxen_advert_link_change(adapter, linkup);
}
static void netxen_watchdog(unsigned long v)
@@ -1569,7 +1613,8 @@ void netxen_watchdog_task(struct work_struct *work)
if ((adapter->portnum == 0) && netxen_nic_check_temp(adapter))
return;
- netxen_nic_handle_phy_intr(adapter);
+ if (!adapter->has_link_events)
+ netxen_nic_handle_phy_intr(adapter);
if (netif_running(adapter->netdev))
mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
@@ -1598,10 +1643,6 @@ static void netxen_tx_timeout_task(struct work_struct *work)
netif_wake_queue(adapter->netdev);
}
-/*
- * netxen_nic_get_stats - Get System Network Statistics
- * @netdev: network interface device structure
- */
struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
@@ -1609,22 +1650,11 @@ struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
memset(stats, 0, sizeof(*stats));
- /* total packets received */
stats->rx_packets = adapter->stats.no_rcv;
- /* total packets transmitted */
- stats->tx_packets = adapter->stats.xmitedframes +
- adapter->stats.xmitfinished;
- /* total bytes received */
+ stats->tx_packets = adapter->stats.xmitfinished;
stats->rx_bytes = adapter->stats.rxbytes;
- /* total bytes transmitted */
stats->tx_bytes = adapter->stats.txbytes;
- /* bad packets received */
- stats->rx_errors = adapter->stats.rcvdbadskb;
- /* packet transmit problems */
- stats->tx_errors = adapter->stats.nocmddescriptor;
- /* no space in linux buffers */
stats->rx_dropped = adapter->stats.rxdropped;
- /* no space available in linux */
stats->tx_dropped = adapter->stats.txdropped;
return stats;
@@ -1651,15 +1681,14 @@ static irqreturn_t netxen_intr(int irq, void *data)
} else {
unsigned long our_int = 0;
- our_int = adapter->pci_read_normalize(adapter, CRB_INT_VECTOR);
+ our_int = NXRD32(adapter, CRB_INT_VECTOR);
/* not our interrupt */
if (!test_and_clear_bit((7 + adapter->portnum), &our_int))
return IRQ_NONE;
/* claim interrupt */
- adapter->pci_write_normalize(adapter,
- CRB_INT_VECTOR, (our_int & 0xffffffff));
+ NXWR32(adapter, CRB_INT_VECTOR, (our_int & 0xffffffff));
}
/* clear interrupt */
@@ -1685,7 +1714,7 @@ static irqreturn_t netxen_msi_intr(int irq, void *data)
/* clear interrupt */
adapter->pci_write_immediate(adapter,
- msi_tgt_status[adapter->ahw.pci_func], 0xffffffff);
+ adapter->msi_tgt_status, 0xffffffff);
napi_schedule(&sds_ring->napi);
return IRQ_HANDLED;
diff --git a/drivers/net/netxen/netxen_nic_niu.c b/drivers/net/netxen/netxen_nic_niu.c
index d85203203d4..5941c79be72 100644
--- a/drivers/net/netxen/netxen_nic_niu.c
+++ b/drivers/net/netxen/netxen_nic_niu.c
@@ -43,8 +43,7 @@ static int phy_lock(struct netxen_adapter *adapter)
int done = 0, timeout = 0;
while (!done) {
- done = netxen_nic_reg_read(adapter,
- NETXEN_PCIE_REG(PCIE_SEM3_LOCK));
+ done = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM3_LOCK));
if (done == 1)
break;
if (timeout >= phy_lock_timeout) {
@@ -59,8 +58,7 @@ static int phy_lock(struct netxen_adapter *adapter)
}
}
- netxen_crb_writelit_adapter(adapter,
- NETXEN_PHY_LOCK_ID, PHY_LOCK_DRIVER);
+ NXWR32(adapter, NETXEN_PHY_LOCK_ID, PHY_LOCK_DRIVER);
return 0;
}
@@ -105,9 +103,7 @@ int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long reg,
* so it cannot be in reset
*/
- if (adapter->hw_read_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(0),
- &mac_cfg0, 4))
- return -EIO;
+ mac_cfg0 = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(0));
if (netxen_gb_get_soft_reset(mac_cfg0)) {
__u32 temp;
temp = 0;
@@ -115,9 +111,7 @@ int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long reg,
netxen_gb_rx_reset_pb(temp);
netxen_gb_tx_reset_mac(temp);
netxen_gb_rx_reset_mac(temp);
- if (adapter->hw_write_wx(adapter,
- NETXEN_NIU_GB_MAC_CONFIG_0(0),
- &temp, 4))
+ if (NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(0), temp))
return -EIO;
restore = 1;
}
@@ -125,43 +119,32 @@ int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long reg,
address = 0;
netxen_gb_mii_mgmt_reg_addr(address, reg);
netxen_gb_mii_mgmt_phy_addr(address, phy);
- if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR(0),
- &address, 4))
+ if (NXWR32(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR(0), address))
return -EIO;
command = 0; /* turn off any prior activity */
- if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0),
- &command, 4))
+ if (NXWR32(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0), command))
return -EIO;
/* send read command */
netxen_gb_mii_mgmt_set_read_cycle(command);
- if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0),
- &command, 4))
+ if (NXWR32(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0), command))
return -EIO;
status = 0;
do {
- if (adapter->hw_read_wx(adapter,
- NETXEN_NIU_GB_MII_MGMT_INDICATE(0),
- &status, 4))
- return -EIO;
+ status = NXRD32(adapter, NETXEN_NIU_GB_MII_MGMT_INDICATE(0));
timeout++;
} while ((netxen_get_gb_mii_mgmt_busy(status)
|| netxen_get_gb_mii_mgmt_notvalid(status))
&& (timeout++ < NETXEN_NIU_PHY_WAITMAX));
if (timeout < NETXEN_NIU_PHY_WAITMAX) {
- if (adapter->hw_read_wx(adapter,
- NETXEN_NIU_GB_MII_MGMT_STATUS(0),
- readval, 4))
- return -EIO;
+ *readval = NXRD32(adapter, NETXEN_NIU_GB_MII_MGMT_STATUS(0));
result = 0;
} else
result = -1;
if (restore)
- if (adapter->hw_write_wx(adapter,
- NETXEN_NIU_GB_MAC_CONFIG_0(0),
- &mac_cfg0, 4))
+ if (NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(0), mac_cfg0))
return -EIO;
phy_unlock(adapter);
return result;
@@ -197,9 +180,7 @@ int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter, long reg,
* cannot be in reset
*/
- if (adapter->hw_read_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(0),
- &mac_cfg0, 4))
- return -EIO;
+ mac_cfg0 = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(0));
if (netxen_gb_get_soft_reset(mac_cfg0)) {
__u32 temp;
temp = 0;
@@ -208,35 +189,27 @@ int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter, long reg,
netxen_gb_tx_reset_mac(temp);
netxen_gb_rx_reset_mac(temp);
- if (adapter->hw_write_wx(adapter,
- NETXEN_NIU_GB_MAC_CONFIG_0(0),
- &temp, 4))
+ if (NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(0), temp))
return -EIO;
restore = 1;
}
command = 0; /* turn off any prior activity */
- if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0),
- &command, 4))
+ if (NXWR32(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0), command))
return -EIO;
address = 0;
netxen_gb_mii_mgmt_reg_addr(address, reg);
netxen_gb_mii_mgmt_phy_addr(address, phy);
- if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR(0),
- &address, 4))
+ if (NXWR32(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR(0), address))
return -EIO;
- if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_CTRL(0),
- &val, 4))
+ if (NXWR32(adapter, NETXEN_NIU_GB_MII_MGMT_CTRL(0), val))
return -EIO;
status = 0;
do {
- if (adapter->hw_read_wx(adapter,
- NETXEN_NIU_GB_MII_MGMT_INDICATE(0),
- &status, 4))
- return -EIO;
+ status = NXRD32(adapter, NETXEN_NIU_GB_MII_MGMT_INDICATE(0));
timeout++;
} while ((netxen_get_gb_mii_mgmt_busy(status))
&& (timeout++ < NETXEN_NIU_PHY_WAITMAX));
@@ -248,9 +221,7 @@ int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter, long reg,
/* restore the state of port 0 MAC in case we tampered with it */
if (restore)
- if (adapter->hw_write_wx(adapter,
- NETXEN_NIU_GB_MAC_CONFIG_0(0),
- &mac_cfg0, 4))
+ if (NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(0), mac_cfg0))
return -EIO;
return result;
@@ -258,7 +229,7 @@ int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter, long reg,
int netxen_niu_xgbe_enable_phy_interrupts(struct netxen_adapter *adapter)
{
- netxen_crb_writelit_adapter(adapter, NETXEN_NIU_INT_MASK, 0x3f);
+ NXWR32(adapter, NETXEN_NIU_INT_MASK, 0x3f);
return 0;
}
@@ -281,7 +252,7 @@ int netxen_niu_gbe_enable_phy_interrupts(struct netxen_adapter *adapter)
int netxen_niu_xgbe_disable_phy_interrupts(struct netxen_adapter *adapter)
{
- netxen_crb_writelit_adapter(adapter, NETXEN_NIU_INT_MASK, 0x7f);
+ NXWR32(adapter, NETXEN_NIU_INT_MASK, 0x7f);
return 0;
}
@@ -315,36 +286,27 @@ static int netxen_niu_gbe_clear_phy_interrupts(struct netxen_adapter *adapter)
static void netxen_niu_gbe_set_mii_mode(struct netxen_adapter *adapter,
int port, long enable)
{
- netxen_crb_writelit_adapter(adapter, NETXEN_NIU_MODE, 0x2);
- netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
- 0x80000000);
- netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
- 0x0000f0025);
- netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB_MAC_CONFIG_1(port),
- 0xf1ff);
- netxen_crb_writelit_adapter(adapter,
- NETXEN_NIU_GB0_GMII_MODE + (port << 3), 0);
- netxen_crb_writelit_adapter(adapter,
- NETXEN_NIU_GB0_MII_MODE + (port << 3), 1);
- netxen_crb_writelit_adapter(adapter,
- (NETXEN_NIU_GB0_HALF_DUPLEX + port * 4), 0);
- netxen_crb_writelit_adapter(adapter,
- NETXEN_NIU_GB_MII_MGMT_CONFIG(port), 0x7);
+ NXWR32(adapter, NETXEN_NIU_MODE, 0x2);
+ NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), 0x80000000);
+ NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), 0x0000f0025);
+ NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_1(port), 0xf1ff);
+ NXWR32(adapter, NETXEN_NIU_GB0_GMII_MODE + (port << 3), 0);
+ NXWR32(adapter, NETXEN_NIU_GB0_MII_MODE + (port << 3), 1);
+ NXWR32(adapter, (NETXEN_NIU_GB0_HALF_DUPLEX + port * 4), 0);
+ NXWR32(adapter, NETXEN_NIU_GB_MII_MGMT_CONFIG(port), 0x7);
if (enable) {
/*
* Do NOT enable flow control until a suitable solution for
* shutting down pause frames is found.
*/
- netxen_crb_writelit_adapter(adapter,
- NETXEN_NIU_GB_MAC_CONFIG_0(port),
- 0x5);
+ NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), 0x5);
}
if (netxen_niu_gbe_enable_phy_interrupts(adapter))
- printk(KERN_ERR PFX "ERROR enabling PHY interrupts\n");
+ printk(KERN_ERR "ERROR enabling PHY interrupts\n");
if (netxen_niu_gbe_clear_phy_interrupts(adapter))
- printk(KERN_ERR PFX "ERROR clearing PHY interrupts\n");
+ printk(KERN_ERR "ERROR clearing PHY interrupts\n");
}
/*
@@ -353,36 +315,27 @@ static void netxen_niu_gbe_set_mii_mode(struct netxen_adapter *adapter,
static void netxen_niu_gbe_set_gmii_mode(struct netxen_adapter *adapter,
int port, long enable)
{
- netxen_crb_writelit_adapter(adapter, NETXEN_NIU_MODE, 0x2);
- netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
- 0x80000000);
- netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
- 0x0000f0025);
- netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB_MAC_CONFIG_1(port),
- 0xf2ff);
- netxen_crb_writelit_adapter(adapter,
- NETXEN_NIU_GB0_MII_MODE + (port << 3), 0);
- netxen_crb_writelit_adapter(adapter,
- NETXEN_NIU_GB0_GMII_MODE + (port << 3), 1);
- netxen_crb_writelit_adapter(adapter,
- (NETXEN_NIU_GB0_HALF_DUPLEX + port * 4), 0);
- netxen_crb_writelit_adapter(adapter,
- NETXEN_NIU_GB_MII_MGMT_CONFIG(port), 0x7);
+ NXWR32(adapter, NETXEN_NIU_MODE, 0x2);
+ NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), 0x80000000);
+ NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), 0x0000f0025);
+ NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_1(port), 0xf2ff);
+ NXWR32(adapter, NETXEN_NIU_GB0_MII_MODE + (port << 3), 0);
+ NXWR32(adapter, NETXEN_NIU_GB0_GMII_MODE + (port << 3), 1);
+ NXWR32(adapter, (NETXEN_NIU_GB0_HALF_DUPLEX + port * 4), 0);
+ NXWR32(adapter, NETXEN_NIU_GB_MII_MGMT_CONFIG(port), 0x7);
if (enable) {
/*
* Do NOT enable flow control until a suitable solution for
* shutting down pause frames is found.
*/
- netxen_crb_writelit_adapter(adapter,
- NETXEN_NIU_GB_MAC_CONFIG_0(port),
- 0x5);
+ NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), 0x5);
}
if (netxen_niu_gbe_enable_phy_interrupts(adapter))
- printk(KERN_ERR PFX "ERROR enabling PHY interrupts\n");
+ printk(KERN_ERR "ERROR enabling PHY interrupts\n");
if (netxen_niu_gbe_clear_phy_interrupts(adapter))
- printk(KERN_ERR PFX "ERROR clearing PHY interrupts\n");
+ printk(KERN_ERR "ERROR clearing PHY interrupts\n");
}
int netxen_niu_gbe_init_port(struct netxen_adapter *adapter, int port)
@@ -416,25 +369,20 @@ int netxen_niu_gbe_init_port(struct netxen_adapter *adapter, int port)
* plugged in.
*/
- netxen_crb_writelit_adapter(adapter,
- NETXEN_NIU_GB_MAC_CONFIG_0
- (port),
+ NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
NETXEN_GB_MAC_SOFT_RESET);
- netxen_crb_writelit_adapter(adapter,
- NETXEN_NIU_GB_MAC_CONFIG_0
- (port),
- NETXEN_GB_MAC_RESET_PROT_BLK
- | NETXEN_GB_MAC_ENABLE_TX_RX
- |
- NETXEN_GB_MAC_PAUSED_FRMS);
+ NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
+ NETXEN_GB_MAC_RESET_PROT_BLK |
+ NETXEN_GB_MAC_ENABLE_TX_RX |
+ NETXEN_GB_MAC_PAUSED_FRMS);
if (netxen_niu_gbe_clear_phy_interrupts(adapter))
- printk(KERN_ERR PFX
+ printk(KERN_ERR
"ERROR clearing PHY interrupts\n");
if (netxen_niu_gbe_enable_phy_interrupts(adapter))
- printk(KERN_ERR PFX
+ printk(KERN_ERR
"ERROR enabling PHY interrupts\n");
if (netxen_niu_gbe_clear_phy_interrupts(adapter))
- printk(KERN_ERR PFX
+ printk(KERN_ERR
"ERROR clearing PHY interrupts\n");
result = -1;
}
@@ -447,88 +395,10 @@ int netxen_niu_gbe_init_port(struct netxen_adapter *adapter, int port)
int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port)
{
if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
- netxen_crb_writelit_adapter(adapter,
- NETXEN_NIU_XGE_CONFIG_1+(0x10000*port), 0x1447);
- netxen_crb_writelit_adapter(adapter,
- NETXEN_NIU_XGE_CONFIG_0+(0x10000*port), 0x5);
- }
-
- return 0;
-}
-
-/*
- * Return the current station MAC address.
- * Note that the passed-in value must already be in network byte order.
- */
-static int netxen_niu_macaddr_get(struct netxen_adapter *adapter,
- netxen_ethernet_macaddr_t * addr)
-{
- u32 stationhigh;
- u32 stationlow;
- int phy = adapter->physical_port;
- u8 val[8];
-
- if (addr == NULL)
- return -EINVAL;
- if ((phy < 0) || (phy > 3))
- return -EINVAL;
-
- if (adapter->hw_read_wx(adapter, NETXEN_NIU_GB_STATION_ADDR_0(phy),
- &stationhigh, 4))
- return -EIO;
- if (adapter->hw_read_wx(adapter, NETXEN_NIU_GB_STATION_ADDR_1(phy),
- &stationlow, 4))
- return -EIO;
- ((__le32 *)val)[1] = cpu_to_le32(stationhigh);
- ((__le32 *)val)[0] = cpu_to_le32(stationlow);
-
- memcpy(addr, val + 2, 6);
-
- return 0;
-}
-
-/*
- * Set the station MAC address.
- * Note that the passed-in value must already be in network byte order.
- */
-int netxen_niu_macaddr_set(struct netxen_adapter *adapter,
- netxen_ethernet_macaddr_t addr)
-{
- u8 temp[4];
- u32 val;
- int phy = adapter->physical_port;
- unsigned char mac_addr[6];
- int i;
-
- if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
- return 0;
-
- for (i = 0; i < 10; i++) {
- temp[0] = temp[1] = 0;
- memcpy(temp + 2, addr, 2);
- val = le32_to_cpu(*(__le32 *)temp);
- if (adapter->hw_write_wx(adapter,
- NETXEN_NIU_GB_STATION_ADDR_1(phy), &val, 4))
- return -EIO;
-
- memcpy(temp, ((u8 *) addr) + 2, sizeof(__le32));
- val = le32_to_cpu(*(__le32 *)temp);
- if (adapter->hw_write_wx(adapter,
- NETXEN_NIU_GB_STATION_ADDR_0(phy), &val, 4))
- return -2;
-
- netxen_niu_macaddr_get(adapter,
- (netxen_ethernet_macaddr_t *) mac_addr);
- if (memcmp(mac_addr, addr, 6) == 0)
- break;
+ NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_1+(0x10000*port), 0x1447);
+ NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0+(0x10000*port), 0x5);
}
- if (i == 10) {
- printk(KERN_ERR "%s: cannot set Mac addr for %s\n",
- netxen_nic_driver_name, adapter->netdev->name);
- printk(KERN_ERR "MAC address set: %pM.\n", addr);
- printk(KERN_ERR "MAC address get: %pM.\n", mac_addr);
- }
return 0;
}
@@ -545,8 +415,7 @@ int netxen_niu_disable_gbe_port(struct netxen_adapter *adapter)
return -EINVAL;
mac_cfg0 = 0;
netxen_gb_soft_reset(mac_cfg0);
- if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
- &mac_cfg0, 4))
+ if (NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), mac_cfg0))
return -EIO;
return 0;
}
@@ -564,8 +433,8 @@ int netxen_niu_disable_xg_port(struct netxen_adapter *adapter)
return -EINVAL;
mac_cfg = 0;
- if (adapter->hw_write_wx(adapter,
- NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), &mac_cfg, 4))
+ if (NXWR32(adapter,
+ NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg))
return -EIO;
return 0;
}
@@ -581,9 +450,7 @@ int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter,
return -EINVAL;
/* save previous contents */
- if (adapter->hw_read_wx(adapter, NETXEN_NIU_GB_DROP_WRONGADDR,
- &reg, 4))
- return -EIO;
+ reg = NXRD32(adapter, NETXEN_NIU_GB_DROP_WRONGADDR);
if (mode == NETXEN_NIU_PROMISC_MODE) {
switch (port) {
case 0:
@@ -619,67 +486,11 @@ int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter,
return -EIO;
}
}
- if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_DROP_WRONGADDR,
- &reg, 4))
+ if (NXWR32(adapter, NETXEN_NIU_GB_DROP_WRONGADDR, reg))
return -EIO;
return 0;
}
-/*
- * Set the MAC address for an XG port
- * Note that the passed-in value must already be in network byte order.
- */
-int netxen_niu_xg_macaddr_set(struct netxen_adapter *adapter,
- netxen_ethernet_macaddr_t addr)
-{
- int phy = adapter->physical_port;
- u8 temp[4];
- u32 val;
-
- if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
- return 0;
-
- if ((phy < 0) || (phy > NETXEN_NIU_MAX_XG_PORTS))
- return -EIO;
-
- temp[0] = temp[1] = 0;
- switch (phy) {
- case 0:
- memcpy(temp + 2, addr, 2);
- val = le32_to_cpu(*(__le32 *)temp);
- if (adapter->hw_write_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_1,
- &val, 4))
- return -EIO;
-
- memcpy(&temp, ((u8 *) addr) + 2, sizeof(__le32));
- val = le32_to_cpu(*(__le32 *)temp);
- if (adapter->hw_write_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_HI,
- &val, 4))
- return -EIO;
- break;
-
- case 1:
- memcpy(temp + 2, addr, 2);
- val = le32_to_cpu(*(__le32 *)temp);
- if (adapter->hw_write_wx(adapter, NETXEN_NIU_XG1_STATION_ADDR_0_1,
- &val, 4))
- return -EIO;
-
- memcpy(&temp, ((u8 *) addr) + 2, sizeof(__le32));
- val = le32_to_cpu(*(__le32 *)temp);
- if (adapter->hw_write_wx(adapter, NETXEN_NIU_XG1_STATION_ADDR_0_HI,
- &val, 4))
- return -EIO;
- break;
-
- default:
- printk(KERN_ERR "Unknown port %d\n", phy);
- break;
- }
-
- return 0;
-}
-
int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter,
u32 mode)
{
@@ -689,9 +500,7 @@ int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter,
if (port > NETXEN_NIU_MAX_XG_PORTS)
return -EINVAL;
- if (adapter->hw_read_wx(adapter,
- NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port), &reg, 4))
- return -EIO;
+ reg = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port));
if (mode == NETXEN_NIU_PROMISC_MODE)
reg = (reg | 0x2000UL);
else
@@ -702,8 +511,40 @@ int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter,
else
reg = (reg & ~0x1000UL);
- netxen_crb_writelit_adapter(adapter,
- NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port), reg);
+ NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port), reg);
+
+ return 0;
+}
+
+int netxen_p2_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr)
+{
+ u32 mac_hi, mac_lo;
+ u32 reg_hi, reg_lo;
+
+ u8 phy = adapter->physical_port;
+ u8 phy_count = (adapter->ahw.port_type == NETXEN_NIC_XGBE) ?
+ NETXEN_NIU_MAX_XG_PORTS : NETXEN_NIU_MAX_GBE_PORTS;
+
+ if (phy >= phy_count)
+ return -EINVAL;
+
+ mac_lo = ((u32)addr[0] << 16) | ((u32)addr[1] << 24);
+ mac_hi = addr[2] | ((u32)addr[3] << 8) |
+ ((u32)addr[4] << 16) | ((u32)addr[5] << 24);
+
+ if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
+ reg_lo = NETXEN_NIU_XGE_STATION_ADDR_0_1 + (0x10000 * phy);
+ reg_hi = NETXEN_NIU_XGE_STATION_ADDR_0_HI + (0x10000 * phy);
+ } else {
+ reg_lo = NETXEN_NIU_GB_STATION_ADDR_1(phy);
+ reg_hi = NETXEN_NIU_GB_STATION_ADDR_0(phy);
+ }
+
+ /* write twice to flush */
+ if (NXWR32(adapter, reg_lo, mac_lo) || NXWR32(adapter, reg_hi, mac_hi))
+ return -EIO;
+ if (NXWR32(adapter, reg_lo, mac_lo) || NXWR32(adapter, reg_hi, mac_hi))
+ return -EIO;
return 0;
}
diff --git a/drivers/net/netxen/netxen_nic_phan_reg.h b/drivers/net/netxen/netxen_nic_phan_reg.h
index 50183335e43..b73a62ca74f 100644
--- a/drivers/net/netxen/netxen_nic_phan_reg.h
+++ b/drivers/net/netxen/netxen_nic_phan_reg.h
@@ -36,23 +36,25 @@
*/
#define NIC_CRB_BASE NETXEN_CAM_RAM(0x200)
#define NETXEN_NIC_REG(X) (NIC_CRB_BASE+(X))
+#define NIC_CRB_BASE_2 NETXEN_CAM_RAM(0x700)
+#define NETXEN_NIC_REG_2(X) (NIC_CRB_BASE_2+(X))
#define CRB_PHAN_CNTRL_LO_OFFSET NETXEN_NIC_REG(0x00)
#define CRB_PHAN_CNTRL_HI_OFFSET NETXEN_NIC_REG(0x04)
#define CRB_CMD_PRODUCER_OFFSET NETXEN_NIC_REG(0x08)
#define CRB_CMD_CONSUMER_OFFSET NETXEN_NIC_REG(0x0c)
-#define CRB_PAUSE_ADDR_LO NETXEN_NIC_REG(0x10) /* C0 EPG BUG */
+#define CRB_PAUSE_ADDR_LO NETXEN_NIC_REG(0x10)
#define CRB_PAUSE_ADDR_HI NETXEN_NIC_REG(0x14)
#define NX_CDRP_CRB_OFFSET NETXEN_NIC_REG(0x18)
#define NX_ARG1_CRB_OFFSET NETXEN_NIC_REG(0x1c)
#define NX_ARG2_CRB_OFFSET NETXEN_NIC_REG(0x20)
#define NX_ARG3_CRB_OFFSET NETXEN_NIC_REG(0x24)
#define NX_SIGN_CRB_OFFSET NETXEN_NIC_REG(0x28)
-#define CRB_CMD_INTR_LOOP NETXEN_NIC_REG(0x20) /* 4 regs for perf */
+#define CRB_CMD_INTR_LOOP NETXEN_NIC_REG(0x20)
#define CRB_CMD_DMA_LOOP NETXEN_NIC_REG(0x24)
#define CRB_RCV_INTR_LOOP NETXEN_NIC_REG(0x28)
#define CRB_RCV_DMA_LOOP NETXEN_NIC_REG(0x2c)
-#define CRB_ENABLE_TX_INTR NETXEN_NIC_REG(0x30) /* phantom init status */
+#define CRB_ENABLE_TX_INTR NETXEN_NIC_REG(0x30)
#define CRB_MMAP_ADDR_3 NETXEN_NIC_REG(0x34)
#define CRB_CMDPEG_CMDRING NETXEN_NIC_REG(0x38)
#define CRB_HOST_DUMMY_BUF_ADDR_HI NETXEN_NIC_REG(0x3c)
@@ -65,7 +67,7 @@
#define CRB_MMAP_SIZE_1 NETXEN_NIC_REG(0x58)
#define CRB_MMAP_SIZE_2 NETXEN_NIC_REG(0x5c)
#define CRB_MMAP_SIZE_3 NETXEN_NIC_REG(0x60)
-#define CRB_GLOBAL_INT_COAL NETXEN_NIC_REG(0x64) /* interrupt coalescing */
+#define CRB_GLOBAL_INT_COAL NETXEN_NIC_REG(0x64)
#define CRB_INT_COAL_MODE NETXEN_NIC_REG(0x68)
#define CRB_MAX_RCV_BUFS NETXEN_NIC_REG(0x6c)
#define CRB_TX_INT_THRESHOLD NETXEN_NIC_REG(0x70)
@@ -83,13 +85,13 @@
#define CRB_AGENT_TX_TYPE NETXEN_NIC_REG(0xa0)
#define CRB_AGENT_TX_ADDR NETXEN_NIC_REG(0xa4)
#define CRB_AGENT_TX_MSS NETXEN_NIC_REG(0xa8)
-#define CRB_TX_STATE NETXEN_NIC_REG(0xac) /* Debug -performance */
+#define CRB_TX_STATE NETXEN_NIC_REG(0xac)
#define CRB_TX_COUNT NETXEN_NIC_REG(0xb0)
#define CRB_RX_STATE NETXEN_NIC_REG(0xb4)
#define CRB_RX_PERF_DEBUG_1 NETXEN_NIC_REG(0xb8)
-#define CRB_RX_LRO_CONTROL NETXEN_NIC_REG(0xbc) /* LRO On/OFF */
+#define CRB_RX_LRO_CONTROL NETXEN_NIC_REG(0xbc)
#define CRB_RX_LRO_START_NUM NETXEN_NIC_REG(0xc0)
-#define CRB_MPORT_MODE NETXEN_NIC_REG(0xc4) /* Multiport Mode */
+#define CRB_MPORT_MODE NETXEN_NIC_REG(0xc4)
#define CRB_CMD_RING_SIZE NETXEN_NIC_REG(0xc8)
#define CRB_DMA_SHIFT NETXEN_NIC_REG(0xcc)
#define CRB_INT_VECTOR NETXEN_NIC_REG(0xd4)
@@ -109,8 +111,6 @@
#define CRB_CMD_CONSUMER_OFFSET_1 NETXEN_NIC_REG(0x1b0)
#define CRB_CMD_PRODUCER_OFFSET_2 NETXEN_NIC_REG(0x1b8)
#define CRB_CMD_CONSUMER_OFFSET_2 NETXEN_NIC_REG(0x1bc)
-
-// 1c0 to 1cc used for signature reg
#define CRB_CMD_PRODUCER_OFFSET_3 NETXEN_NIC_REG(0x1d0)
#define CRB_CMD_CONSUMER_OFFSET_3 NETXEN_NIC_REG(0x1d4)
#define CRB_TEMP_STATE NETXEN_NIC_REG(0x1b4)
@@ -120,13 +120,13 @@
#define CRB_V2P_2 NETXEN_NIC_REG(0x298)
#define CRB_V2P_3 NETXEN_NIC_REG(0x29c)
#define CRB_V2P(port) (CRB_V2P_0+((port)*4))
-#define CRB_DRIVER_VERSION NETXEN_NIC_REG(0x2a0)
-/* sw int status/mask registers */
+#define CRB_DRIVER_VERSION NETXEN_NIC_REG(0x2a0)
#define CRB_SW_INT_MASK_0 NETXEN_NIC_REG(0x1d8)
#define CRB_SW_INT_MASK_1 NETXEN_NIC_REG(0x1e0)
#define CRB_SW_INT_MASK_2 NETXEN_NIC_REG(0x1e4)
#define CRB_SW_INT_MASK_3 NETXEN_NIC_REG(0x1e8)
+#define CRB_FW_CAPABILITIES_1 NETXEN_CAM_RAM(0x128)
#define CRB_MAC_BLOCK_START NETXEN_CAM_RAM(0x1c0)
/*
@@ -136,7 +136,7 @@
#define CRB_NIC_CAPABILITIES_HOST NETXEN_NIC_REG(0x1a8)
#define CRB_NIC_CAPABILITIES_FW NETXEN_NIC_REG(0x1dc)
#define CRB_NIC_MSI_MODE_HOST NETXEN_NIC_REG(0x270)
-#define CRB_NIC_MSI_MODE_FW NETXEN_NIC_REG(0x274)
+#define CRB_NIC_MSI_MODE_FW NETXEN_NIC_REG(0x274)
#define INTR_SCHEME_PERPORT 0x1
#define MSI_MODE_MULTIFUNC 0x1
@@ -162,7 +162,8 @@
struct netxen_recv_crb {
u32 crb_rcv_producer[NUM_RCV_DESC_RINGS];
- u32 crb_sts_consumer;
+ u32 crb_sts_consumer[NUM_STS_DESC_RINGS];
+ u32 sw_int_mask[NUM_STS_DESC_RINGS];
};
/*
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index 6474f02bf78..1f10ed603e2 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -1165,7 +1165,7 @@ static int ni65_send_packet(struct sk_buff *skb, struct net_device *dev)
if (test_and_set_bit(0, (void*)&p->lock)) {
printk(KERN_ERR "%s: Queue was locked.\n", dev->name);
- return 1;
+ return NETDEV_TX_BUSY;
}
{
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 2b1745328cf..fa61a12c5e1 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -22,6 +22,7 @@
#include <linux/log2.h>
#include <linux/jiffies.h>
#include <linux/crc32.h>
+#include <linux/list.h>
#include <linux/io.h>
@@ -1317,7 +1318,7 @@ static int bcm8704_reset(struct niu *np)
err = mdio_read(np, np->phy_addr,
BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
- if (err < 0)
+ if (err < 0 || err == 0xffff)
return err;
err |= BMCR_RESET;
err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
@@ -2042,7 +2043,7 @@ static int link_status_10g_bcm8706(struct niu *np, int *link_up_p)
err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
BCM8704_PMD_RCV_SIGDET);
- if (err < 0)
+ if (err < 0 || err == 0xffff)
goto out;
if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
err = 0;
@@ -2083,8 +2084,6 @@ static int link_status_10g_bcm8706(struct niu *np, int *link_up_p)
out:
*link_up_p = link_up;
- if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
- err = 0;
return err;
}
@@ -2220,10 +2219,17 @@ static int link_status_10g_hotplug(struct niu *np, int *link_up_p)
if (phy_present != phy_present_prev) {
/* state change */
if (phy_present) {
+ /* A NEM was just plugged in */
np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
if (np->phy_ops->xcvr_init)
err = np->phy_ops->xcvr_init(np);
if (err) {
+ err = mdio_read(np, np->phy_addr,
+ BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
+ if (err == 0xffff) {
+ /* No mdio, back-to-back XAUI */
+ goto out;
+ }
/* debounce */
np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
}
@@ -2234,13 +2240,21 @@ static int link_status_10g_hotplug(struct niu *np, int *link_up_p)
np->dev->name);
}
}
- if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT)
+out:
+ if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) {
err = link_status_10g_bcm8706(np, link_up_p);
+ if (err == 0xffff) {
+ /* No mdio, back-to-back XAUI: it is C10NEM */
+ *link_up_p = 1;
+ np->link_config.active_speed = SPEED_10000;
+ np->link_config.active_duplex = DUPLEX_FULL;
+ }
+ }
}
spin_unlock_irqrestore(&np->lock, flags);
- return err;
+ return 0;
}
static int niu_link_status(struct niu *np, int *link_up_p)
@@ -2312,6 +2326,12 @@ static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = {
.link_status = link_status_10g_hotplug,
};
+static const struct niu_phy_ops phy_ops_niu_10g_hotplug = {
+ .serdes_init = serdes_init_niu_10g_fiber,
+ .xcvr_init = xcvr_init_10g_bcm8706,
+ .link_status = link_status_10g_hotplug,
+};
+
static const struct niu_phy_ops phy_ops_10g_copper = {
.serdes_init = serdes_init_10g,
.link_status = link_status_10g, /* XXX */
@@ -2358,6 +2378,11 @@ static const struct niu_phy_template phy_template_10g_fiber_hotplug = {
.phy_addr_base = 8,
};
+static const struct niu_phy_template phy_template_niu_10g_hotplug = {
+ .ops = &phy_ops_niu_10g_hotplug,
+ .phy_addr_base = 8,
+};
+
static const struct niu_phy_template phy_template_10g_copper = {
.ops = &phy_ops_10g_copper,
.phy_addr_base = 10,
@@ -2542,8 +2567,16 @@ static int niu_determine_phy_disposition(struct niu *np)
case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
/* 10G Fiber */
default:
- tp = &phy_template_niu_10g_fiber;
- phy_addr_off += np->port;
+ if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
+ tp = &phy_template_niu_10g_hotplug;
+ if (np->port == 0)
+ phy_addr_off = 8;
+ if (np->port == 1)
+ phy_addr_off = 12;
+ } else {
+ tp = &phy_template_niu_10g_fiber;
+ phy_addr_off += np->port;
+ }
break;
}
} else {
@@ -2630,11 +2663,11 @@ static int niu_init_link(struct niu *np)
msleep(200);
}
err = niu_serdes_init(np);
- if (err)
+ if (err && !(np->flags & NIU_FLAGS_HOTPLUG_PHY))
return err;
msleep(200);
err = niu_xcvr_init(np);
- if (!err)
+ if (!err || (np->flags & NIU_FLAGS_HOTPLUG_PHY))
niu_link_status(np, &ignore);
return 0;
}
@@ -6330,6 +6363,7 @@ static void niu_set_rx_mode(struct net_device *dev)
struct niu *np = netdev_priv(dev);
int i, alt_cnt, err;
struct dev_addr_list *addr;
+ struct netdev_hw_addr *ha;
unsigned long flags;
u16 hash[16] = { 0, };
@@ -6351,9 +6385,8 @@ static void niu_set_rx_mode(struct net_device *dev)
if (alt_cnt) {
int index = 0;
- for (addr = dev->uc_list; addr; addr = addr->next) {
- err = niu_set_alt_mac(np, index,
- addr->da_addr);
+ list_for_each_entry(ha, &dev->uc_list, list) {
+ err = niu_set_alt_mac(np, index, ha->addr);
if (err)
printk(KERN_WARNING PFX "%s: Error %d "
"adding alt mac %d\n",
@@ -6745,8 +6778,6 @@ static int niu_start_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_wake_queue(txq);
}
- dev->trans_start = jiffies;
-
out:
return NETDEV_TX_OK;
@@ -9346,6 +9377,11 @@ static int __devinit niu_get_of_props(struct niu *np)
if (model)
strcpy(np->vpd.model, model);
+ if (of_find_property(dp, "hot-swappable-phy", &prop_len)) {
+ np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
+ NIU_FLAGS_HOTPLUG_PHY);
+ }
+
return 0;
#else
return -EINVAL;
diff --git a/drivers/net/niu.h b/drivers/net/niu.h
index 8754e44cada..3bd0b5933d5 100644
--- a/drivers/net/niu.h
+++ b/drivers/net/niu.h
@@ -3242,8 +3242,8 @@ struct niu {
struct niu_parent *parent;
u32 flags;
-#define NIU_FLAGS_HOTPLUG_PHY_PRESENT 0x02000000 /* Removebale PHY detected*/
-#define NIU_FLAGS_HOTPLUG_PHY 0x01000000 /* Removebale PHY */
+#define NIU_FLAGS_HOTPLUG_PHY_PRESENT 0x02000000 /* Removeable PHY detected*/
+#define NIU_FLAGS_HOTPLUG_PHY 0x01000000 /* Removeable PHY */
#define NIU_FLAGS_VPD_VALID 0x00800000 /* VPD has valid version */
#define NIU_FLAGS_MSIX 0x00400000 /* MSI-X in use */
#define NIU_FLAGS_MCAST 0x00200000 /* multicast filter enabled */
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index d531614a90b..1576ac07216 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -1097,7 +1097,7 @@ again:
if (unlikely(dev->CFG_cache & CFG_LNKSTS)) {
netif_stop_queue(ndev);
if (unlikely(dev->CFG_cache & CFG_LNKSTS))
- return 1;
+ return NETDEV_TX_BUSY;
netif_start_queue(ndev);
}
@@ -1115,7 +1115,7 @@ again:
netif_start_queue(ndev);
goto again;
}
- return 1;
+ return NETDEV_TX_BUSY;
}
if (free_idx == dev->tx_intr_idx) {
@@ -1204,9 +1204,7 @@ again:
if (stopped && (dev->tx_done_idx != tx_done_idx) && start_tx_okay(dev))
netif_start_queue(ndev);
- /* set the transmit start time to catch transmit timeouts */
- ndev->trans_start = jiffies;
- return 0;
+ return NETDEV_TX_OK;
}
static void ns83820_update_stats(struct ns83820 *dev)
@@ -1626,7 +1624,7 @@ static void ns83820_tx_watch(unsigned long data)
);
#endif
- if (time_after(jiffies, ndev->trans_start + 1*HZ) &&
+ if (time_after(jiffies, dev_trans_start(ndev) + 1*HZ) &&
dev->tx_done_idx != dev->tx_free_idx) {
printk(KERN_DEBUG "%s: ns83820_tx_watch: %u %u %d\n",
ndev->name,
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index 5eeb5a87b73..c254a7f5b9f 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -24,6 +24,7 @@
#include <linux/dmaengine.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
+#include <linux/of_mdio.h>
#include <linux/etherdevice.h>
#include <asm/dma-mapping.h>
#include <linux/in.h>
@@ -1086,34 +1087,17 @@ static int pasemi_mac_phy_init(struct net_device *dev)
struct pasemi_mac *mac = netdev_priv(dev);
struct device_node *dn, *phy_dn;
struct phy_device *phydev;
- unsigned int phy_id;
- const phandle *ph;
- const unsigned int *prop;
- struct resource r;
- int ret;
dn = pci_device_to_OF_node(mac->pdev);
- ph = of_get_property(dn, "phy-handle", NULL);
- if (!ph)
- return -ENODEV;
- phy_dn = of_find_node_by_phandle(*ph);
-
- prop = of_get_property(phy_dn, "reg", NULL);
- ret = of_address_to_resource(phy_dn->parent, 0, &r);
- if (ret)
- goto err;
-
- phy_id = *prop;
- snprintf(mac->phy_id, sizeof(mac->phy_id), "%x:%02x",
- (int)r.start, phy_id);
-
+ phy_dn = of_parse_phandle(dn, "phy-handle", 0);
of_node_put(phy_dn);
mac->link = 0;
mac->speed = 0;
mac->duplex = -1;
- phydev = phy_connect(dev, mac->phy_id, &pasemi_adjust_link, 0, PHY_INTERFACE_MODE_SGMII);
+ phydev = of_phy_connect(dev, phy_dn, &pasemi_adjust_link, 0,
+ PHY_INTERFACE_MODE_SGMII);
if (IS_ERR(phydev)) {
printk(KERN_ERR "%s: Could not attach to phy\n", dev->name);
@@ -1123,10 +1107,6 @@ static int pasemi_mac_phy_init(struct net_device *dev)
mac->phydev = phydev;
return 0;
-
-err:
- of_node_put(phy_dn);
- return -ENODEV;
}
@@ -1735,12 +1715,25 @@ out:
return ret;
}
+static const struct net_device_ops pasemi_netdev_ops = {
+ .ndo_open = pasemi_mac_open,
+ .ndo_stop = pasemi_mac_close,
+ .ndo_start_xmit = pasemi_mac_start_tx,
+ .ndo_set_multicast_list = pasemi_mac_set_rx_mode,
+ .ndo_set_mac_address = pasemi_mac_set_mac_addr,
+ .ndo_change_mtu = pasemi_mac_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = pasemi_mac_netpoll,
+#endif
+};
+
static int __devinit
pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev;
struct pasemi_mac *mac;
- int err;
+ int err, ret;
err = pci_enable_device(pdev);
if (err)
@@ -1798,12 +1791,13 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr));
- mac->dma_if = mac_to_intf(mac);
- if (mac->dma_if < 0) {
+ ret = mac_to_intf(mac);
+ if (ret < 0) {
dev_err(&mac->pdev->dev, "Can't map DMA interface\n");
err = -ENODEV;
goto out;
}
+ mac->dma_if = ret;
switch (pdev->device) {
case 0xa005:
@@ -1817,19 +1811,11 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out;
}
- dev->open = pasemi_mac_open;
- dev->stop = pasemi_mac_close;
- dev->hard_start_xmit = pasemi_mac_start_tx;
- dev->set_multicast_list = pasemi_mac_set_rx_mode;
- dev->set_mac_address = pasemi_mac_set_mac_addr;
+ dev->netdev_ops = &pasemi_netdev_ops;
dev->mtu = PE_DEF_MTU;
/* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
mac->bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = pasemi_mac_netpoll;
-#endif
- dev->change_mtu = pasemi_mac_change_mtu;
dev->ethtool_ops = &pasemi_mac_ethtool_ops;
if (err)
diff --git a/drivers/net/pasemi_mac.h b/drivers/net/pasemi_mac.h
index 1a115ec60b5..e2f4efa8ad4 100644
--- a/drivers/net/pasemi_mac.h
+++ b/drivers/net/pasemi_mac.h
@@ -100,7 +100,6 @@ struct pasemi_mac {
int duplex;
unsigned int msg_enable;
- char phy_id[BUS_ID_SIZE];
};
/* Software status descriptor (ring_info) */
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index c95fd72c3bb..8c1f6988f39 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -728,6 +728,17 @@ err_out:
return rc;
}
+static const struct net_device_ops netdrv_netdev_ops = {
+ .ndo_open = netdrv_open,
+ .ndo_stop = netdrv_close,
+ .ndo_start_xmit = netdrv_start_xmit,
+ .ndo_set_multicast_list = netdrv_set_rx_mode,
+ .ndo_do_ioctl = netdrv_ioctl,
+ .ndo_tx_timeout = netdrv_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+};
static int __devinit netdrv_init_one (struct pci_dev *pdev,
const struct pci_device_id *ent)
@@ -769,13 +780,7 @@ static int __devinit netdrv_init_one (struct pci_dev *pdev,
((u16 *) (dev->dev_addr))[i] =
le16_to_cpu (read_eeprom (ioaddr, i + 7, addr_len));
- /* The Rtl8139-specific entries in the device structure. */
- dev->open = netdrv_open;
- dev->hard_start_xmit = netdrv_start_xmit;
- dev->stop = netdrv_close;
- dev->set_multicast_list = netdrv_set_rx_mode;
- dev->do_ioctl = netdrv_ioctl;
- dev->tx_timeout = netdrv_tx_timeout;
+ dev->netdev_ops = &netdrv_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
dev->irq = pdev->irq;
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index 8f3872b8985..f35c609ba02 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -1195,7 +1195,7 @@ static int el3_close(struct net_device *dev)
static struct pcmcia_device_id tc574_ids[] = {
PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0574),
- PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0101, 0x0556, "3CCFEM556.cis"),
+ PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0101, 0x0556, "cis/3CCFEM556.cis"),
PCMCIA_DEVICE_NULL,
};
MODULE_DEVICE_TABLE(pcmcia, tc574_ids);
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index cdf661a6092..ec7cf5ac4f0 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -967,8 +967,8 @@ static struct pcmcia_device_id tc589_ids[] = {
PCMCIA_MFC_DEVICE_PROD_ID1(0, "Motorola MARQUIS", 0xf03e4e77),
PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0589),
PCMCIA_DEVICE_PROD_ID12("Farallon", "ENet", 0x58d93fc4, 0x992c2202),
- PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0101, 0x0035, "3CXEM556.cis"),
- PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0101, 0x003d, "3CXEM556.cis"),
+ PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0101, 0x0035, "cis/3CXEM556.cis"),
+ PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0101, 0x003d, "cis/3CXEM556.cis"),
PCMCIA_DEVICE_NULL,
};
MODULE_DEVICE_TABLE(pcmcia, tc589_ids);
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 15b8fe61695..0e38d80fd25 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -1130,7 +1130,7 @@ static int axnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
outb_p(ENISR_ALL, e8390_base + EN0_IMR);
spin_unlock_irqrestore(&ei_local->page_lock, flags);
dev->stats.tx_errors++;
- return 1;
+ return NETDEV_TX_BUSY;
}
/*
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 81e6660a433..479d5b49437 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -877,7 +877,7 @@ static int fjn_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (length > ETH_FRAME_LEN) {
printk(KERN_NOTICE "%s: Attempting to send a large packet"
" (%d bytes).\n", dev->name, length);
- return 1;
+ return NETDEV_TX_BUSY;
}
DEBUG(4, "%s: Transmitting a packet of length %lu.\n",
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 48dbb35747d..37e05d3ab89 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -1388,7 +1388,7 @@ static int smc_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_aborted_errors++;
printk(KERN_DEBUG "%s: Internal error -- sent packet while busy.\n",
dev->name);
- return 1;
+ return NETDEV_TX_BUSY;
}
smc->saved_skb = skb;
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index a3685c0d22f..ef37d22c7e1 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -1399,7 +1399,7 @@ do_start_xmit(struct sk_buff *skb, struct net_device *dev)
DEBUG(2 + (okay ? 2 : 0), "%s: avail. tx space=%u%s\n",
dev->name, freespace, okay ? " (okay)":" (not enough)");
if (!okay) { /* not enough space */
- return 1; /* upper layer may decide to requeue this packet */
+ return NETDEV_TX_BUSY; /* upper layer may decide to requeue this packet */
}
/* send the packet */
PutWord(XIRCREG_EDP, (u_short)pktlen);
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 80124fac65f..1c35e1d637a 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -1227,7 +1227,6 @@ static void pcnet32_rx_entry(struct net_device *dev,
dev->stats.rx_dropped++;
return;
}
- skb->dev = dev;
if (!rx_in_place) {
skb_reserve(skb, NET_IP_ALIGN);
skb_put(skb, pkt_len); /* Make room */
@@ -1406,7 +1405,7 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
/* Set interrupt enable. */
lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
- mmiowb();
+
spin_unlock_irqrestore(&lp->lock, flags);
}
return work_done;
@@ -2598,7 +2597,7 @@ pcnet32_interrupt(int irq, void *dev_id)
val = lp->a.read_csr(ioaddr, CSR3);
val |= 0x5f00;
lp->a.write_csr(ioaddr, CSR3, val);
- mmiowb();
+
__napi_schedule(&lp->napi);
break;
}
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 7a3ec9d39a9..dd6f54d1b49 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -243,6 +243,7 @@ static int m88e1111_config_init(struct phy_device *phydev)
temp &= ~(MII_M1111_HWCFG_MODE_MASK);
temp |= MII_M1111_HWCFG_MODE_SGMII_NO_CLK;
+ temp |= MII_M1111_HWCFG_FIBER_COPPER_AUTO;
err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
if (err < 0)
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index b754020cbe7..bd4e8d72dc0 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -113,7 +113,6 @@ int mdiobus_register(struct mii_bus *bus)
bus->reset(bus);
for (i = 0; i < PHY_MAX_ADDR; i++) {
- bus->phy_map[i] = NULL;
if ((bus->phy_mask & (1 << i)) == 0) {
struct phy_device *phydev;
@@ -150,6 +149,7 @@ void mdiobus_unregister(struct mii_bus *bus)
for (i = 0; i < PHY_MAX_ADDR; i++) {
if (bus->phy_map[i])
device_unregister(&bus->phy_map[i]->dev);
+ bus->phy_map[i] = NULL;
}
}
EXPORT_SYMBOL(mdiobus_unregister);
@@ -188,35 +188,12 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
if (IS_ERR(phydev) || phydev == NULL)
return phydev;
- /* There's a PHY at this address
- * We need to set:
- * 1) IRQ
- * 2) bus_id
- * 3) parent
- * 4) bus
- * 5) mii_bus
- * And, we need to register it */
-
- phydev->irq = bus->irq != NULL ? bus->irq[addr] : PHY_POLL;
-
- phydev->dev.parent = bus->parent;
- phydev->dev.bus = &mdio_bus_type;
- dev_set_name(&phydev->dev, PHY_ID_FMT, bus->id, addr);
-
- phydev->bus = bus;
-
- /* Run all of the fixups for this PHY */
- phy_scan_fixups(phydev);
-
- err = device_register(&phydev->dev);
+ err = phy_device_register(phydev);
if (err) {
- printk(KERN_ERR "phy %d failed to register\n", addr);
phy_device_free(phydev);
- phydev = NULL;
+ return NULL;
}
- bus->phy_map[addr] = phydev;
-
return phydev;
}
EXPORT_SYMBOL(mdiobus_scan);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 0a06e4fd37d..a2ece89622d 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -39,20 +39,21 @@ MODULE_DESCRIPTION("PHY library");
MODULE_AUTHOR("Andy Fleming");
MODULE_LICENSE("GPL");
-static struct phy_driver genphy_driver;
-extern int mdio_bus_init(void);
-extern void mdio_bus_exit(void);
-
void phy_device_free(struct phy_device *phydev)
{
kfree(phydev);
}
+EXPORT_SYMBOL(phy_device_free);
static void phy_device_release(struct device *dev)
{
phy_device_free(to_phy_device(dev));
}
+static struct phy_driver genphy_driver;
+extern int mdio_bus_init(void);
+extern void mdio_bus_exit(void);
+
static LIST_HEAD(phy_fixup_list);
static DEFINE_MUTEX(phy_fixup_lock);
@@ -166,6 +167,10 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
dev->addr = addr;
dev->phy_id = phy_id;
dev->bus = bus;
+ dev->dev.parent = bus->parent;
+ dev->dev.bus = &mdio_bus_type;
+ dev->irq = bus->irq != NULL ? bus->irq[addr] : PHY_POLL;
+ dev_set_name(&dev->dev, PHY_ID_FMT, bus->id, addr);
dev->state = PHY_DOWN;
@@ -235,6 +240,38 @@ struct phy_device * get_phy_device(struct mii_bus *bus, int addr)
return dev;
}
+EXPORT_SYMBOL(get_phy_device);
+
+/**
+ * phy_device_register - Register the phy device on the MDIO bus
+ * @phy_device: phy_device structure to be added to the MDIO bus
+ */
+int phy_device_register(struct phy_device *phydev)
+{
+ int err;
+
+ /* Don't register a phy if one is already registered at this
+ * address */
+ if (phydev->bus->phy_map[phydev->addr])
+ return -EINVAL;
+ phydev->bus->phy_map[phydev->addr] = phydev;
+
+ /* Run all of the fixups for this PHY */
+ phy_scan_fixups(phydev);
+
+ err = device_register(&phydev->dev);
+ if (err) {
+ pr_err("phy %d failed to register\n", phydev->addr);
+ goto out;
+ }
+
+ return 0;
+
+ out:
+ phydev->bus->phy_map[phydev->addr] = NULL;
+ return err;
+}
+EXPORT_SYMBOL(phy_device_register);
/**
* phy_prepare_link - prepares the PHY layer to monitor link status
@@ -255,6 +292,33 @@ void phy_prepare_link(struct phy_device *phydev,
}
/**
+ * phy_connect_direct - connect an ethernet device to a specific phy_device
+ * @dev: the network device to connect
+ * @phydev: the pointer to the phy device
+ * @handler: callback function for state change notifications
+ * @flags: PHY device's dev_flags
+ * @interface: PHY device's interface
+ */
+int phy_connect_direct(struct net_device *dev, struct phy_device *phydev,
+ void (*handler)(struct net_device *), u32 flags,
+ phy_interface_t interface)
+{
+ int rc;
+
+ rc = phy_attach_direct(dev, phydev, flags, interface);
+ if (rc)
+ return rc;
+
+ phy_prepare_link(phydev, handler);
+ phy_start_machine(phydev, NULL);
+ if (phydev->irq > 0)
+ phy_start_interrupts(phydev);
+
+ return 0;
+}
+EXPORT_SYMBOL(phy_connect_direct);
+
+/**
* phy_connect - connect an ethernet device to a PHY device
* @dev: the network device to connect
* @bus_id: the id string of the PHY device to connect
@@ -275,18 +339,21 @@ struct phy_device * phy_connect(struct net_device *dev, const char *bus_id,
phy_interface_t interface)
{
struct phy_device *phydev;
+ struct device *d;
+ int rc;
- phydev = phy_attach(dev, bus_id, flags, interface);
-
- if (IS_ERR(phydev))
- return phydev;
-
- phy_prepare_link(phydev, handler);
-
- phy_start_machine(phydev, NULL);
+ /* Search the list of PHY devices on the mdio bus for the
+ * PHY with the requested name */
+ d = bus_find_device_by_name(&mdio_bus_type, NULL, bus_id);
+ if (!d) {
+ pr_err("PHY %s not found\n", bus_id);
+ return ERR_PTR(-ENODEV);
+ }
+ phydev = to_phy_device(d);
- if (phydev->irq > 0)
- phy_start_interrupts(phydev);
+ rc = phy_connect_direct(dev, phydev, handler, flags, interface);
+ if (rc)
+ return ERR_PTR(rc);
return phydev;
}
@@ -310,9 +377,9 @@ void phy_disconnect(struct phy_device *phydev)
EXPORT_SYMBOL(phy_disconnect);
/**
- * phy_attach - attach a network device to a particular PHY device
+ * phy_attach_direct - attach a network device to a given PHY device pointer
* @dev: network device to attach
- * @bus_id: PHY device to attach
+ * @phydev: Pointer to phy_device to attach
* @flags: PHY device's dev_flags
* @interface: PHY device's interface
*
@@ -323,22 +390,10 @@ EXPORT_SYMBOL(phy_disconnect);
* the attaching device, and given a callback for link status
* change. The phy_device is returned to the attaching driver.
*/
-struct phy_device *phy_attach(struct net_device *dev,
- const char *bus_id, u32 flags, phy_interface_t interface)
+int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
+ u32 flags, phy_interface_t interface)
{
- struct bus_type *bus = &mdio_bus_type;
- struct phy_device *phydev;
- struct device *d;
-
- /* Search the list of PHY devices on the mdio bus for the
- * PHY with the requested name */
- d = bus_find_device_by_name(bus, NULL, bus_id);
- if (d) {
- phydev = to_phy_device(d);
- } else {
- printk(KERN_ERR "%s not found\n", bus_id);
- return ERR_PTR(-ENODEV);
- }
+ struct device *d = &phydev->dev;
/* Assume that if there is no driver, that it doesn't
* exist, and we should use the genphy driver. */
@@ -351,13 +406,12 @@ struct phy_device *phy_attach(struct net_device *dev,
err = device_bind_driver(d);
if (err)
- return ERR_PTR(err);
+ return err;
}
if (phydev->attached_dev) {
- printk(KERN_ERR "%s: %s already attached\n",
- dev->name, bus_id);
- return ERR_PTR(-EBUSY);
+ dev_err(&dev->dev, "PHY already attached\n");
+ return -EBUSY;
}
phydev->attached_dev = dev;
@@ -375,14 +429,49 @@ struct phy_device *phy_attach(struct net_device *dev,
err = phy_scan_fixups(phydev);
if (err < 0)
- return ERR_PTR(err);
+ return err;
err = phydev->drv->config_init(phydev);
if (err < 0)
- return ERR_PTR(err);
+ return err;
}
+ return 0;
+}
+EXPORT_SYMBOL(phy_attach_direct);
+
+/**
+ * phy_attach - attach a network device to a particular PHY device
+ * @dev: network device to attach
+ * @bus_id: Bus ID of PHY device to attach
+ * @flags: PHY device's dev_flags
+ * @interface: PHY device's interface
+ *
+ * Description: Same as phy_attach_direct() except that a PHY bus_id
+ * string is passed instead of a pointer to a struct phy_device.
+ */
+struct phy_device *phy_attach(struct net_device *dev,
+ const char *bus_id, u32 flags, phy_interface_t interface)
+{
+ struct bus_type *bus = &mdio_bus_type;
+ struct phy_device *phydev;
+ struct device *d;
+ int rc;
+
+ /* Search the list of PHY devices on the mdio bus for the
+ * PHY with the requested name */
+ d = bus_find_device_by_name(bus, NULL, bus_id);
+ if (!d) {
+ pr_err("PHY %s not found\n", bus_id);
+ return ERR_PTR(-ENODEV);
+ }
+ phydev = to_phy_device(d);
+
+ rc = phy_attach_direct(dev, phydev, flags, interface);
+ if (rc)
+ return ERR_PTR(rc);
+
return phydev;
}
EXPORT_SYMBOL(phy_attach);
diff --git a/drivers/net/plip.c b/drivers/net/plip.c
index 0be0f0b164f..7a62f781fef 100644
--- a/drivers/net/plip.c
+++ b/drivers/net/plip.c
@@ -955,12 +955,12 @@ plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
struct plip_local *snd = &nl->snd_data;
if (netif_queue_stopped(dev))
- return 1;
+ return NETDEV_TX_BUSY;
/* We may need to grab the bus */
if (!nl->port_owner) {
if (parport_claim(nl->pardev))
- return 1;
+ return NETDEV_TX_BUSY;
nl->port_owner = 1;
}
@@ -969,7 +969,7 @@ plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
if (skb->len > dev->mtu + dev->hard_header_len) {
printk(KERN_WARNING "%s: packet too big, %d.\n", dev->name, (int)skb->len);
netif_start_queue (dev);
- return 1;
+ return NETDEV_TX_BUSY;
}
if (net_debug > 2)
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 8ee91421db1..639d11bc444 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -1054,6 +1054,7 @@ static void ppp_setup(struct net_device *dev)
dev->type = ARPHRD_PPP;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
}
/*
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index 5b07dd8e5c0..e7935d09c89 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -433,8 +433,7 @@ static void pppol2tp_recv_dequeue_skb(struct pppol2tp_session *session, struct s
* to the inner packet either
*/
secpath_reset(skb);
- dst_release(skb->dst);
- skb->dst = NULL;
+ skb_dst_drop(skb);
nf_reset(skb);
po = pppox_sk(session_sock);
@@ -976,7 +975,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
/* Calculate UDP checksum if configured to do so */
if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT)
skb->ip_summed = CHECKSUM_NONE;
- else if (!(skb->dst->dev->features & NETIF_F_V4_CSUM)) {
+ else if (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) {
skb->ip_summed = CHECKSUM_COMPLETE;
csum = skb_checksum(skb, 0, udp_len, 0);
uh->check = csum_tcpudp_magic(inet->saddr, inet->daddr,
@@ -1172,14 +1171,14 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
nf_reset(skb);
/* Get routing info from the tunnel socket */
- dst_release(skb->dst);
- skb->dst = dst_clone(__sk_dst_get(sk_tun));
+ skb_dst_drop(skb);
+ skb_dst_set(skb, dst_clone(__sk_dst_get(sk_tun)));
pppol2tp_skb_set_owner_w(skb, sk_tun);
/* Calculate UDP checksum if configured to do so */
if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT)
skb->ip_summed = CHECKSUM_NONE;
- else if (!(skb->dst->dev->features & NETIF_F_V4_CSUM)) {
+ else if (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) {
skb->ip_summed = CHECKSUM_COMPLETE;
csum = skb_checksum(skb, 0, udp_len, 0);
uh->check = csum_tcpudp_magic(inet->saddr, inet->daddr,
@@ -1238,8 +1237,7 @@ static void pppol2tp_tunnel_closeall(struct pppol2tp_tunnel *tunnel)
struct pppol2tp_session *session;
struct sock *sk;
- if (tunnel == NULL)
- BUG();
+ BUG_ON(tunnel == NULL);
PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
"%s: closing all sessions...\n", tunnel->name);
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c
index 30900b30d53..2b38f39924a 100644
--- a/drivers/net/ps3_gelic_net.c
+++ b/drivers/net/ps3_gelic_net.c
@@ -1648,7 +1648,7 @@ static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
result = -ENOMEM;
goto fail_alloc_card;
}
- ps3_system_bus_set_driver_data(dev, card);
+ ps3_system_bus_set_drvdata(dev, card);
card->dev = dev;
/* get internal vlan info */
@@ -1749,7 +1749,7 @@ fail_alloc_irq:
bus_id(card),
0, 0);
fail_status_indicator:
- ps3_system_bus_set_driver_data(dev, NULL);
+ ps3_system_bus_set_drvdata(dev, NULL);
kfree(netdev_card(netdev)->unalign);
free_netdev(netdev);
fail_alloc_card:
@@ -1766,7 +1766,7 @@ fail_open:
static int ps3_gelic_driver_remove(struct ps3_system_bus_device *dev)
{
- struct gelic_card *card = ps3_system_bus_get_driver_data(dev);
+ struct gelic_card *card = ps3_system_bus_get_drvdata(dev);
struct net_device *netdev0;
pr_debug("%s: called\n", __func__);
@@ -1803,7 +1803,7 @@ static int ps3_gelic_driver_remove(struct ps3_system_bus_device *dev)
kfree(netdev_card(netdev0)->unalign);
free_netdev(netdev0);
- ps3_system_bus_set_driver_data(dev, NULL);
+ ps3_system_bus_set_drvdata(dev, NULL);
ps3_dma_region_free(dev->d_region);
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index cadc32c94c1..8a823ecc99a 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -2617,7 +2617,6 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
&port_regs->CommonRegs.reqQProducerIndex,
qdev->req_producer_index);
- ndev->trans_start = jiffies;
if (netif_msg_tx_queued(qdev))
printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n",
ndev->name, qdev->req_producer_index, skb->len);
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index fcb159e4df5..156e02e8905 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -27,6 +27,8 @@
"%s: " fmt, __func__, ##args); \
} while (0)
+#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
+
#define QLGE_VENDOR_ID 0x1077
#define QLGE_DEVICE_ID_8012 0x8012
#define QLGE_DEVICE_ID_8000 0x8000
@@ -39,7 +41,18 @@
#define NUM_SMALL_BUFFERS 512
#define NUM_LARGE_BUFFERS 512
+#define DB_PAGE_SIZE 4096
+
+/* Calculate the number of (4k) pages required to
+ * contain a buffer queue of the given length.
+ */
+#define MAX_DB_PAGES_PER_BQ(x) \
+ (((x * sizeof(u64)) / DB_PAGE_SIZE) + \
+ (((x * sizeof(u64)) % DB_PAGE_SIZE) ? 1 : 0))
+#define RX_RING_SHADOW_SPACE (sizeof(u64) + \
+ MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \
+ MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
#define SMALL_BUFFER_SIZE 256
#define LARGE_BUFFER_SIZE PAGE_SIZE
#define MAX_SPLIT_SIZE 1023
@@ -50,7 +63,7 @@
#define MAX_INTER_FRAME_WAIT 10 /* 10 usec max interframe-wait for coalescing */
#define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2)
#define UDELAY_COUNT 3
-#define UDELAY_DELAY 10
+#define UDELAY_DELAY 100
#define TX_DESC_PER_IOCB 8
@@ -63,7 +76,16 @@
#define TX_DESC_PER_OAL 0
#endif
-#define DB_PAGE_SIZE 4096
+/* MPI test register definitions. This register
+ * is used for determining alternate NIC function's
+ * PCI->func number.
+ */
+enum {
+ MPI_TEST_FUNC_PORT_CFG = 0x1002,
+ MPI_TEST_NIC1_FUNC_SHIFT = 1,
+ MPI_TEST_NIC2_FUNC_SHIFT = 5,
+ MPI_TEST_NIC_FUNC_MASK = 0x00000007,
+};
/*
* Processor Address Register (PROC_ADDR) bit definitions.
@@ -1430,7 +1452,10 @@ struct ql_adapter {
/* Hardware information */
u32 chip_rev_id;
+ u32 fw_rev_id;
u32 func; /* PCI function for this adapter */
+ u32 alt_func; /* PCI function for alternate adapter */
+ u32 port; /* Port number this adapter */
spinlock_t adapter_lock;
spinlock_t hw_lock;
@@ -1580,6 +1605,8 @@ void ql_mpi_idc_work(struct work_struct *work);
void ql_mpi_port_cfg_work(struct work_struct *work);
int ql_mb_get_fw_state(struct ql_adapter *qdev);
int ql_cam_route_initialize(struct ql_adapter *qdev);
+int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
+int ql_mb_about_fw(struct ql_adapter *qdev);
#if 1
#define QL_ALL_DUMP
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 913b2a5fafc..37c99fe7977 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -293,7 +293,10 @@ static void ql_get_drvinfo(struct net_device *ndev,
struct ql_adapter *qdev = netdev_priv(ndev);
strncpy(drvinfo->driver, qlge_driver_name, 32);
strncpy(drvinfo->version, qlge_driver_version, 32);
- strncpy(drvinfo->fw_version, "N/A", 32);
+ snprintf(drvinfo->fw_version, 32, "v%d.%d.%d",
+ (qdev->fw_rev_id & 0x00ff0000) >> 16,
+ (qdev->fw_rev_id & 0x0000ff00) >> 8,
+ (qdev->fw_rev_id & 0x000000ff));
strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
drvinfo->n_stats = 0;
drvinfo->testinfo_len = 0;
@@ -401,6 +404,7 @@ const struct ethtool_ops qlge_ethtool_ops = {
.get_rx_csum = ql_get_rx_csum,
.set_rx_csum = ql_set_rx_csum,
.get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = ethtool_op_set_tx_csum,
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
.get_tso = ethtool_op_get_tso,
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index c92ced24794..90d1f76c0e8 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -675,11 +675,12 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev)
int status;
__le32 *p = (__le32 *)&qdev->flash;
u32 offset;
+ u8 mac_addr[6];
/* Get flash offset for function and adjust
* for dword access.
*/
- if (!qdev->func)
+ if (!qdev->port)
offset = FUNC0_FLASH_OFFSET / sizeof(u32);
else
offset = FUNC1_FLASH_OFFSET / sizeof(u32);
@@ -705,14 +706,26 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev)
goto exit;
}
- if (!is_valid_ether_addr(qdev->flash.flash_params_8000.mac_addr)) {
+ /* Extract either manufacturer or BOFM modified
+ * MAC address.
+ */
+ if (qdev->flash.flash_params_8000.data_type1 == 2)
+ memcpy(mac_addr,
+ qdev->flash.flash_params_8000.mac_addr1,
+ qdev->ndev->addr_len);
+ else
+ memcpy(mac_addr,
+ qdev->flash.flash_params_8000.mac_addr,
+ qdev->ndev->addr_len);
+
+ if (!is_valid_ether_addr(mac_addr)) {
QPRINTK(qdev, IFUP, ERR, "Invalid MAC address.\n");
status = -EINVAL;
goto exit;
}
memcpy(qdev->ndev->dev_addr,
- qdev->flash.flash_params_8000.mac_addr,
+ mac_addr,
qdev->ndev->addr_len);
exit:
@@ -731,7 +744,7 @@ static int ql_get_8012_flash_params(struct ql_adapter *qdev)
/* Second function's parameters follow the first
* function's.
*/
- if (qdev->func)
+ if (qdev->port)
offset = size;
if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
@@ -837,6 +850,13 @@ exit:
static int ql_8000_port_initialize(struct ql_adapter *qdev)
{
int status;
+ /*
+ * Get MPI firmware version for driver banner
+ * and ethool info.
+ */
+ status = ql_mb_about_fw(qdev);
+ if (status)
+ goto exit;
status = ql_mb_get_fw_state(qdev);
if (status)
goto exit;
@@ -1518,6 +1538,22 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
return;
}
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
+ ib_mac_rsp->flags2);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ /* The max framesize filter on this chip is set higher than
+ * MTU since FCoE uses 2k frames.
+ */
+ if (skb->len > ndev->mtu + ETH_HLEN) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
prefetch(skb->data);
skb->dev = ndev;
if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
@@ -1540,7 +1576,6 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
* csum or frame errors.
*/
if (qdev->rx_csum &&
- !(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) &&
!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
/* TCP frame. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
@@ -2108,7 +2143,6 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
wmb();
ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
- ndev->trans_start = jiffies;
QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
tx_ring->prod_idx, skb->len);
@@ -2203,7 +2237,7 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev,
&tx_ring->wq_base_dma);
if ((tx_ring->wq_base == NULL)
- || tx_ring->wq_base_dma & (tx_ring->wq_size - 1)) {
+ || tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
return -ENOMEM;
}
@@ -2518,14 +2552,16 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
{
struct cqicb *cqicb = &rx_ring->cqicb;
void *shadow_reg = qdev->rx_ring_shadow_reg_area +
- (rx_ring->cq_id * sizeof(u64) * 4);
+ (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
- (rx_ring->cq_id * sizeof(u64) * 4);
+ (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
void __iomem *doorbell_area =
qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
int err = 0;
u16 bq_len;
u64 tmp;
+ __le64 *base_indirect_ptr;
+ int page_entries;
/* Set up the shadow registers for this ring. */
rx_ring->prod_idx_sh_reg = shadow_reg;
@@ -2534,8 +2570,8 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
shadow_reg_dma += sizeof(u64);
rx_ring->lbq_base_indirect = shadow_reg;
rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
- shadow_reg += sizeof(u64);
- shadow_reg_dma += sizeof(u64);
+ shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
+ shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
rx_ring->sbq_base_indirect = shadow_reg;
rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
@@ -2572,7 +2608,14 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
if (rx_ring->lbq_len) {
cqicb->flags |= FLAGS_LL; /* Load lbq values */
tmp = (u64)rx_ring->lbq_base_dma;;
- *((__le64 *) rx_ring->lbq_base_indirect) = cpu_to_le64(tmp);
+ base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
+ page_entries = 0;
+ do {
+ *base_indirect_ptr = cpu_to_le64(tmp);
+ tmp += DB_PAGE_SIZE;
+ base_indirect_ptr++;
+ page_entries++;
+ } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
cqicb->lbq_addr =
cpu_to_le64(rx_ring->lbq_base_indirect_dma);
bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
@@ -2589,7 +2632,14 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
if (rx_ring->sbq_len) {
cqicb->flags |= FLAGS_LS; /* Load sbq values */
tmp = (u64)rx_ring->sbq_base_dma;;
- *((__le64 *) rx_ring->sbq_base_indirect) = cpu_to_le64(tmp);
+ base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
+ page_entries = 0;
+ do {
+ *base_indirect_ptr = cpu_to_le64(tmp);
+ tmp += DB_PAGE_SIZE;
+ base_indirect_ptr++;
+ page_entries++;
+ } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
cqicb->sbq_addr =
cpu_to_le64(rx_ring->sbq_base_indirect_dma);
cqicb->sbq_buf_size =
@@ -3174,7 +3224,7 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
if (value & RST_FO_FR) {
QPRINTK(qdev, IFDOWN, ERR,
- "ETIMEOUT!!! errored out of resetting the chip!\n");
+ "ETIMEDOUT!!! errored out of resetting the chip!\n");
status = -ETIMEDOUT;
}
@@ -3186,9 +3236,10 @@ static void ql_display_dev_info(struct net_device *ndev)
struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
QPRINTK(qdev, PROBE, INFO,
- "Function #%d, NIC Roll %d, NIC Rev = %d, "
+ "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
"XG Roll = %d, XG Rev = %d.\n",
qdev->func,
+ qdev->port,
qdev->chip_rev_id & 0x0000000f,
qdev->chip_rev_id >> 4 & 0x0000000f,
qdev->chip_rev_id >> 8 & 0x0000000f,
@@ -3264,7 +3315,6 @@ static int ql_adapter_up(struct ql_adapter *qdev)
err = ql_adapter_initialize(qdev);
if (err) {
QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
- spin_unlock(&qdev->hw_lock);
goto err_init;
}
set_bit(QL_ADAPTER_UP, &qdev->flags);
@@ -3361,7 +3411,6 @@ static int ql_configure_rings(struct ql_adapter *qdev)
* completion handler rx_rings.
*/
qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1;
- netif_set_gso_max_size(qdev->ndev, 65536);
for (i = 0; i < qdev->tx_ring_count; i++) {
tx_ring = &qdev->tx_ring[i];
@@ -3644,12 +3693,53 @@ static struct nic_operations qla8000_nic_ops = {
.port_initialize = ql_8000_port_initialize,
};
+/* Find the pcie function number for the other NIC
+ * on this chip. Since both NIC functions share a
+ * common firmware we have the lowest enabled function
+ * do any common work. Examples would be resetting
+ * after a fatal firmware error, or doing a firmware
+ * coredump.
+ */
+static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
+{
+ int status = 0;
+ u32 temp;
+ u32 nic_func1, nic_func2;
+
+ status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
+ &temp);
+ if (status)
+ return status;
+
+ nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
+ MPI_TEST_NIC_FUNC_MASK);
+ nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
+ MPI_TEST_NIC_FUNC_MASK);
+
+ if (qdev->func == nic_func1)
+ qdev->alt_func = nic_func2;
+ else if (qdev->func == nic_func2)
+ qdev->alt_func = nic_func1;
+ else
+ status = -EIO;
+
+ return status;
+}
-static void ql_get_board_info(struct ql_adapter *qdev)
+static int ql_get_board_info(struct ql_adapter *qdev)
{
+ int status;
qdev->func =
(ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
- if (qdev->func) {
+ if (qdev->func > 3)
+ return -EIO;
+
+ status = ql_get_alt_pcie_func(qdev);
+ if (status)
+ return status;
+
+ qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
+ if (qdev->port) {
qdev->xg_sem_mask = SEM_XGMAC1_MASK;
qdev->port_link_up = STS_PL1;
qdev->port_init = STS_PI1;
@@ -3668,6 +3758,7 @@ static void ql_get_board_info(struct ql_adapter *qdev)
qdev->nic_ops = &qla8012_nic_ops;
else if (qdev->device_id == QLGE_DEVICE_ID_8000)
qdev->nic_ops = &qla8000_nic_ops;
+ return status;
}
static void ql_release_all(struct pci_dev *pdev)
@@ -3762,7 +3853,12 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
qdev->ndev = ndev;
qdev->pdev = pdev;
- ql_get_board_info(qdev);
+ err = ql_get_board_info(qdev);
+ if (err) {
+ dev_err(&pdev->dev, "Register access failed.\n");
+ err = -EIO;
+ goto err_out;
+ }
qdev->msg_enable = netif_msg_init(debug, default_msg);
spin_lock_init(&qdev->hw_lock);
spin_lock_init(&qdev->stats_lock);
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index 9f81b797f10..71afbf8b9c5 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -90,14 +90,14 @@ static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
*/
static int ql_wait_mbx_cmd_cmplt(struct ql_adapter *qdev)
{
- int count = 50; /* TODO: arbitrary for now. */
+ int count = 100;
u32 value;
do {
value = ql_read32(qdev, STS);
if (value & STS_PI)
return 0;
- udelay(UDELAY_DELAY); /* 10us */
+ mdelay(UDELAY_DELAY); /* 100ms */
} while (--count);
return -ETIMEDOUT;
}
@@ -141,7 +141,7 @@ end:
/* We are being asked by firmware to accept
* a change to the port. This is only
* a change to max frame sizes (Tx/Rx), pause
- * paramters, or loopback mode. We wake up a worker
+ * parameters, or loopback mode. We wake up a worker
* to handler processing this since a mailbox command
* will need to be sent to ACK the request.
*/
@@ -371,7 +371,7 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
/* We are being asked by firmware to accept
* a change to the port. This is only
* a change to max frame sizes (Tx/Rx), pause
- * paramters, or loopback mode.
+ * parameters, or loopback mode.
*/
case AEN_IDC_REQ:
status = ql_idc_req_aen(qdev);
@@ -380,7 +380,7 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
/* Process and inbound IDC event.
* This will happen when we're trying to
* change tx/rx max frame size, change pause
- * paramters or loopback mode.
+ * parameters or loopback mode.
*/
case AEN_IDC_CMPLT:
case AEN_IDC_EXT:
@@ -453,6 +453,13 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
}
end:
ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
+ /* Restore the original mailbox count to
+ * what the caller asked for. This can get
+ * changed when a mailbox command is waiting
+ * for a response and an AEN arrives and
+ * is handled.
+ * */
+ mbcp->out_count = orig_count;
return status;
}
@@ -540,6 +547,40 @@ end:
return status;
}
+
+/* Get MPI firmware version. This will be used for
+ * driver banner and for ethtool info.
+ * Returns zero on success.
+ */
+int ql_mb_about_fw(struct ql_adapter *qdev)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status = 0;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 1;
+ mbcp->out_count = 3;
+
+ mbcp->mbox_in[0] = MB_CMD_ABOUT_FW;
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+ QPRINTK(qdev, DRV, ERR,
+ "Failed about firmware command\n");
+ status = -EIO;
+ }
+
+ /* Store the firmware version */
+ qdev->fw_rev_id = mbcp->mbox_out[1];
+
+ return status;
+}
+
/* Get functional state for MPI firmware.
* Returns zero on success.
*/
@@ -754,7 +795,6 @@ void ql_mpi_port_cfg_work(struct work_struct *work)
{
struct ql_adapter *qdev =
container_of(work, struct ql_adapter, mpi_port_cfg_work.work);
- struct net_device *ndev = qdev->ndev;
int status;
status = ql_mb_get_port_cfg(qdev);
@@ -764,9 +804,7 @@ void ql_mpi_port_cfg_work(struct work_struct *work)
goto err;
}
- if (ndev->mtu <= 2500)
- goto end;
- else if (qdev->link_config & CFG_JUMBO_FRAME_SIZE &&
+ if (qdev->link_config & CFG_JUMBO_FRAME_SIZE &&
qdev->max_frame_size ==
CFG_DEFAULT_MAX_FRAME_SIZE)
goto end;
@@ -831,13 +869,19 @@ void ql_mpi_work(struct work_struct *work)
container_of(work, struct ql_adapter, mpi_work.work);
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
+ int err = 0;
mutex_lock(&qdev->mpi_mutex);
while (ql_read32(qdev, STS) & STS_PI) {
memset(mbcp, 0, sizeof(struct mbox_params));
mbcp->out_count = 1;
- ql_mpi_handler(qdev, mbcp);
+ /* Don't continue if an async event
+ * did not complete properly.
+ */
+ err = ql_mpi_handler(qdev, mbcp);
+ if (err)
+ break;
}
mutex_unlock(&qdev->mpi_mutex);
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 6f97b47d74a..ed63d23a645 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -49,8 +49,8 @@
#include <asm/processor.h>
#define DRV_NAME "r6040"
-#define DRV_VERSION "0.22"
-#define DRV_RELDATE "25Mar2009"
+#define DRV_VERSION "0.23"
+#define DRV_RELDATE "05May2009"
/* PHY CHIP Address */
#define PHY1_ADDR 1 /* For MAC1 */
@@ -401,6 +401,9 @@ static void r6040_init_mac_regs(struct net_device *dev)
* we may got called by r6040_tx_timeout which has left
* some unsent tx buffers */
iowrite16(0x01, ioaddr + MTPR);
+
+ /* Check media */
+ mii_check_media(&lp->mii_if, 1, 1);
}
static void r6040_tx_timeout(struct net_device *dev)
@@ -528,6 +531,8 @@ static int r6040_phy_mode_chk(struct net_device *dev)
phy_dat = 0x0000;
}
+ mii_check_media(&lp->mii_if, 0, 1);
+
return phy_dat;
};
@@ -742,6 +747,14 @@ static int r6040_up(struct net_device *dev)
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
int ret;
+ u16 val;
+
+ /* Check presence of a second PHY */
+ val = r6040_phy_read(ioaddr, lp->phy_addr, 2);
+ if (val == 0xFFFF) {
+ printk(KERN_ERR DRV_NAME " no second PHY attached\n");
+ return -EIO;
+ }
/* Initialise and alloc RX/TX buffers */
r6040_init_txbufs(dev);
@@ -802,7 +815,6 @@ static void r6040_timer(unsigned long data)
lp->phy_mode = phy_mode;
lp->mcr0 = (lp->mcr0 & 0x7fff) | phy_mode;
iowrite16(lp->mcr0, ioaddr);
- printk(KERN_INFO "Link Change %x \n", ioread16(ioaddr));
}
/* Timer active again */
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 0b6e8c89683..35196faa084 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -66,7 +66,6 @@ static const int multicast_filter_limit = 32;
#define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
#define EarlyTxThld 0x3F /* 0x3F means NO early transmit */
-#define RxPacketMaxSize 0x3FE8 /* 16K - 1 - ETH_HLEN - VLAN - CRC... */
#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
@@ -94,6 +93,7 @@ static const int multicast_filter_limit = 32;
#define RTL_R32(reg) ((unsigned long) readl (ioaddr + (reg)))
enum mac_version {
+ RTL_GIGA_MAC_NONE = 0x00,
RTL_GIGA_MAC_VER_01 = 0x01, // 8169
RTL_GIGA_MAC_VER_02 = 0x02, // 8169S
RTL_GIGA_MAC_VER_03 = 0x03, // 8110S
@@ -479,7 +479,6 @@ struct rtl8169_private {
u16 intr_event;
u16 napi_event;
u16 intr_mask;
- int phy_auto_nego_reg;
int phy_1000_ctrl_reg;
#ifdef CONFIG_R8169_VLAN
struct vlan_group *vlgrp;
@@ -844,76 +843,81 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
- int auto_nego, giga_ctrl;
-
- auto_nego = mdio_read(ioaddr, MII_ADVERTISE);
- auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
- ADVERTISE_100HALF | ADVERTISE_100FULL);
- giga_ctrl = mdio_read(ioaddr, MII_CTRL1000);
- giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
+ int giga_ctrl, bmcr;
if (autoneg == AUTONEG_ENABLE) {
+ int auto_nego;
+
+ auto_nego = mdio_read(ioaddr, MII_ADVERTISE);
auto_nego |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
ADVERTISE_100HALF | ADVERTISE_100FULL);
- giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
- } else {
- if (speed == SPEED_10)
- auto_nego |= ADVERTISE_10HALF | ADVERTISE_10FULL;
- else if (speed == SPEED_100)
- auto_nego |= ADVERTISE_100HALF | ADVERTISE_100FULL;
- else if (speed == SPEED_1000)
- giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
-
- if (duplex == DUPLEX_HALF)
- auto_nego &= ~(ADVERTISE_10FULL | ADVERTISE_100FULL);
+ auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
- if (duplex == DUPLEX_FULL)
- auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_100HALF);
-
- /* This tweak comes straight from Realtek's driver. */
- if ((speed == SPEED_100) && (duplex == DUPLEX_HALF) &&
- ((tp->mac_version == RTL_GIGA_MAC_VER_13) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_16))) {
- auto_nego = ADVERTISE_100HALF | ADVERTISE_CSMA;
- }
- }
+ giga_ctrl = mdio_read(ioaddr, MII_CTRL1000);
+ giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
- /* The 8100e/8101e/8102e do Fast Ethernet only. */
- if ((tp->mac_version == RTL_GIGA_MAC_VER_07) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_08) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_09) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_10) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_13) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_14) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_15) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_16)) {
- if ((giga_ctrl & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)) &&
- netif_msg_link(tp)) {
+ /* The 8100e/8101e/8102e do Fast Ethernet only. */
+ if ((tp->mac_version != RTL_GIGA_MAC_VER_07) &&
+ (tp->mac_version != RTL_GIGA_MAC_VER_08) &&
+ (tp->mac_version != RTL_GIGA_MAC_VER_09) &&
+ (tp->mac_version != RTL_GIGA_MAC_VER_10) &&
+ (tp->mac_version != RTL_GIGA_MAC_VER_13) &&
+ (tp->mac_version != RTL_GIGA_MAC_VER_14) &&
+ (tp->mac_version != RTL_GIGA_MAC_VER_15) &&
+ (tp->mac_version != RTL_GIGA_MAC_VER_16)) {
+ giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
+ } else if (netif_msg_link(tp)) {
printk(KERN_INFO "%s: PHY does not support 1000Mbps.\n",
dev->name);
}
- giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
- }
- auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
+
+ if ((tp->mac_version == RTL_GIGA_MAC_VER_11) ||
+ (tp->mac_version == RTL_GIGA_MAC_VER_12) ||
+ (tp->mac_version >= RTL_GIGA_MAC_VER_17)) {
+ /*
+ * Wake up the PHY.
+ * Vendor specific (0x1f) and reserved (0x0e) MII
+ * registers.
+ */
+ mdio_write(ioaddr, 0x1f, 0x0000);
+ mdio_write(ioaddr, 0x0e, 0x0000);
+ }
+
+ mdio_write(ioaddr, MII_ADVERTISE, auto_nego);
+ mdio_write(ioaddr, MII_CTRL1000, giga_ctrl);
+ } else {
+ giga_ctrl = 0;
+
+ if (speed == SPEED_10)
+ bmcr = 0;
+ else if (speed == SPEED_100)
+ bmcr = BMCR_SPEED100;
+ else
+ return -EINVAL;
+
+ if (duplex == DUPLEX_FULL)
+ bmcr |= BMCR_FULLDPLX;
- if ((tp->mac_version == RTL_GIGA_MAC_VER_11) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_12) ||
- (tp->mac_version >= RTL_GIGA_MAC_VER_17)) {
- /*
- * Wake up the PHY.
- * Vendor specific (0x1f) and reserved (0x0e) MII registers.
- */
mdio_write(ioaddr, 0x1f, 0x0000);
- mdio_write(ioaddr, 0x0e, 0x0000);
}
- tp->phy_auto_nego_reg = auto_nego;
tp->phy_1000_ctrl_reg = giga_ctrl;
- mdio_write(ioaddr, MII_ADVERTISE, auto_nego);
- mdio_write(ioaddr, MII_CTRL1000, giga_ctrl);
- mdio_write(ioaddr, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART);
+ mdio_write(ioaddr, MII_BMCR, bmcr);
+
+ if ((tp->mac_version == RTL_GIGA_MAC_VER_02) ||
+ (tp->mac_version == RTL_GIGA_MAC_VER_03)) {
+ if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
+ mdio_write(ioaddr, 0x17, 0x2138);
+ mdio_write(ioaddr, 0x0e, 0x0260);
+ } else {
+ mdio_write(ioaddr, 0x17, 0x2108);
+ mdio_write(ioaddr, 0x0e, 0x0000);
+ }
+ }
+
return 0;
}
@@ -1296,7 +1300,8 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
{ 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 },
{ 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 },
- { 0x00000000, 0x00000000, RTL_GIGA_MAC_VER_01 } /* Catch-all */
+ /* Catch-all */
+ { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
}, *p = mac_info;
u32 reg;
@@ -1304,12 +1309,6 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
while ((reg & p->mask) != p->val)
p++;
tp->mac_version = p->mac_version;
-
- if (p->mask == 0x00000000) {
- struct pci_dev *pdev = tp->pci_dev;
-
- dev_info(&pdev->dev, "unknown MAC (%08x)\n", reg);
- }
}
static void rtl8169_print_mac_version(struct rtl8169_private *tp)
@@ -1885,6 +1884,7 @@ static const struct rtl_cfg_info {
u16 intr_event;
u16 napi_event;
unsigned features;
+ u8 default_ver;
} rtl_cfg_infos [] = {
[RTL_CFG_0] = {
.hw_start = rtl_hw_start_8169,
@@ -1893,7 +1893,8 @@ static const struct rtl_cfg_info {
.intr_event = SYSErr | LinkChg | RxOverflow |
RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
.napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
- .features = RTL_FEATURE_GMII
+ .features = RTL_FEATURE_GMII,
+ .default_ver = RTL_GIGA_MAC_VER_01,
},
[RTL_CFG_1] = {
.hw_start = rtl_hw_start_8168,
@@ -1902,7 +1903,8 @@ static const struct rtl_cfg_info {
.intr_event = SYSErr | LinkChg | RxOverflow |
TxErr | TxOK | RxOK | RxErr,
.napi_event = TxErr | TxOK | RxOK | RxOverflow,
- .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI
+ .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
+ .default_ver = RTL_GIGA_MAC_VER_11,
},
[RTL_CFG_2] = {
.hw_start = rtl_hw_start_8101,
@@ -1911,7 +1913,8 @@ static const struct rtl_cfg_info {
.intr_event = SYSErr | LinkChg | RxOverflow | PCSTimeout |
RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
.napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
- .features = RTL_FEATURE_MSI
+ .features = RTL_FEATURE_MSI,
+ .default_ver = RTL_GIGA_MAC_VER_13,
}
};
@@ -2092,6 +2095,15 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Identify chip attached to board */
rtl8169_get_mac_version(tp, ioaddr);
+ /* Use appropriate default if unknown */
+ if (tp->mac_version == RTL_GIGA_MAC_NONE) {
+ if (netif_msg_probe(tp)) {
+ dev_notice(&pdev->dev,
+ "unknown MAC, using family default\n");
+ }
+ tp->mac_version = cfg->default_ver;
+ }
+
rtl8169_print_mac_version(tp);
for (i = 0; i < ARRAY_SIZE(rtl_chip_info); i++) {
@@ -2099,13 +2111,9 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
}
if (i == ARRAY_SIZE(rtl_chip_info)) {
- /* Unknown chip: assume array element #0, original RTL-8169 */
- if (netif_msg_probe(tp)) {
- dev_printk(KERN_DEBUG, &pdev->dev,
- "unknown chip version, assuming %s\n",
- rtl_chip_info[0].name);
- }
- i = 0;
+ dev_err(&pdev->dev,
+ "driver bug, MAC version not found in rtl_chip_info\n");
+ goto err_out_msi_5;
}
tp->chipset = i;
@@ -2357,10 +2365,10 @@ static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
return cmd;
}
-static void rtl_set_rx_max_size(void __iomem *ioaddr)
+static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
{
/* Low hurts. Let's disable the filtering. */
- RTL_W16(RxMaxSize, 16383);
+ RTL_W16(RxMaxSize, rx_buf_sz);
}
static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
@@ -2407,7 +2415,7 @@ static void rtl_hw_start_8169(struct net_device *dev)
RTL_W8(EarlyTxThres, EarlyTxThld);
- rtl_set_rx_max_size(ioaddr);
+ rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz);
if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
(tp->mac_version == RTL_GIGA_MAC_VER_02) ||
@@ -2668,7 +2676,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
RTL_W8(EarlyTxThres, EarlyTxThld);
- rtl_set_rx_max_size(ioaddr);
+ rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz);
tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
@@ -2846,7 +2854,7 @@ static void rtl_hw_start_8101(struct net_device *dev)
RTL_W8(EarlyTxThres, EarlyTxThld);
- rtl_set_rx_max_size(ioaddr);
+ rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz);
tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
@@ -3270,8 +3278,6 @@ static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev)
status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
txd->opts1 = cpu_to_le32(status);
- dev->trans_start = jiffies;
-
tp->cur_tx += frags + 1;
smp_wmb();
@@ -3372,7 +3378,7 @@ static void rtl8169_tx_interrupt(struct net_device *dev,
rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb, tp->TxDescArray + entry);
if (status & LastFrag) {
- dev_kfree_skb_irq(tx_skb->skb);
+ dev_kfree_skb(tx_skb->skb);
tx_skb->skb = NULL;
}
dirty_tx++;
@@ -3554,54 +3560,64 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
int handled = 0;
int status;
+ /* loop handling interrupts until we have no new ones or
+ * we hit a invalid/hotplug case.
+ */
status = RTL_R16(IntrStatus);
+ while (status && status != 0xffff) {
+ handled = 1;
- /* hotplug/major error/no more work/shared irq */
- if ((status == 0xffff) || !status)
- goto out;
-
- handled = 1;
+ /* Handle all of the error cases first. These will reset
+ * the chip, so just exit the loop.
+ */
+ if (unlikely(!netif_running(dev))) {
+ rtl8169_asic_down(ioaddr);
+ break;
+ }
- if (unlikely(!netif_running(dev))) {
- rtl8169_asic_down(ioaddr);
- goto out;
- }
+ /* Work around for rx fifo overflow */
+ if (unlikely(status & RxFIFOOver) &&
+ (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
+ netif_stop_queue(dev);
+ rtl8169_tx_timeout(dev);
+ break;
+ }
- status &= tp->intr_mask;
- RTL_W16(IntrStatus,
- (status & RxFIFOOver) ? (status | RxOverflow) : status);
+ if (unlikely(status & SYSErr)) {
+ rtl8169_pcierr_interrupt(dev);
+ break;
+ }
- if (!(status & tp->intr_event))
- goto out;
+ if (status & LinkChg)
+ rtl8169_check_link_status(dev, tp, ioaddr);
- /* Work around for rx fifo overflow */
- if (unlikely(status & RxFIFOOver) &&
- (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
- netif_stop_queue(dev);
- rtl8169_tx_timeout(dev);
- goto out;
- }
+ /* We need to see the lastest version of tp->intr_mask to
+ * avoid ignoring an MSI interrupt and having to wait for
+ * another event which may never come.
+ */
+ smp_rmb();
+ if (status & tp->intr_mask & tp->napi_event) {
+ RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
+ tp->intr_mask = ~tp->napi_event;
+
+ if (likely(napi_schedule_prep(&tp->napi)))
+ __napi_schedule(&tp->napi);
+ else if (netif_msg_intr(tp)) {
+ printk(KERN_INFO "%s: interrupt %04x in poll\n",
+ dev->name, status);
+ }
+ }
- if (unlikely(status & SYSErr)) {
- rtl8169_pcierr_interrupt(dev);
- goto out;
+ /* We only get a new MSI interrupt when all active irq
+ * sources on the chip have been acknowledged. So, ack
+ * everything we've seen and check if new sources have become
+ * active to avoid blocking all interrupts from the chip.
+ */
+ RTL_W16(IntrStatus,
+ (status & RxFIFOOver) ? (status | RxOverflow) : status);
+ status = RTL_R16(IntrStatus);
}
- if (status & LinkChg)
- rtl8169_check_link_status(dev, tp, ioaddr);
-
- if (status & tp->napi_event) {
- RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
- tp->intr_mask = ~tp->napi_event;
-
- if (likely(napi_schedule_prep(&tp->napi)))
- __napi_schedule(&tp->napi);
- else if (netif_msg_intr(tp)) {
- printk(KERN_INFO "%s: interrupt %04x in poll\n",
- dev->name, status);
- }
- }
-out:
return IRQ_RETVAL(handled);
}
@@ -3617,13 +3633,15 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
napi_complete(napi);
- tp->intr_mask = 0xffff;
- /*
- * 20040426: the barrier is not strictly required but the
- * behavior of the irq handler could be less predictable
- * without it. Btw, the lack of flush for the posted pci
- * write is safe - FR
+
+ /* We need for force the visibility of tp->intr_mask
+ * for other CPUs, as we can loose an MSI interrupt
+ * and potentially wait for a retransmit timeout if we don't.
+ * The posted write to IntrMask is safe, as it will
+ * eventually make it to the chip and we won't loose anything
+ * until it does.
*/
+ tp->intr_mask = 0xffff;
smp_wmb();
RTL_W16(IntrMask, tp->intr_event);
}
@@ -3791,16 +3809,13 @@ static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
return &dev->stats;
}
-#ifdef CONFIG_PM
-
-static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state)
+static void rtl8169_net_suspend(struct net_device *dev)
{
- struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
if (!netif_running(dev))
- goto out_pci_suspend;
+ return;
netif_device_detach(dev);
netif_stop_queue(dev);
@@ -3812,24 +3827,25 @@ static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state)
rtl8169_rx_missed(dev, ioaddr);
spin_unlock_irq(&tp->lock);
+}
-out_pci_suspend:
- pci_save_state(pdev);
- pci_enable_wake(pdev, pci_choose_state(pdev, state),
- (tp->features & RTL_FEATURE_WOL) ? 1 : 0);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
+#ifdef CONFIG_PM
+
+static int rtl8169_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ rtl8169_net_suspend(dev);
return 0;
}
-static int rtl8169_resume(struct pci_dev *pdev)
+static int rtl8169_resume(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- pci_enable_wake(pdev, PCI_D0, 0);
-
if (!netif_running(dev))
goto out;
@@ -3840,23 +3856,42 @@ out:
return 0;
}
+static struct dev_pm_ops rtl8169_pm_ops = {
+ .suspend = rtl8169_suspend,
+ .resume = rtl8169_resume,
+ .freeze = rtl8169_suspend,
+ .thaw = rtl8169_resume,
+ .poweroff = rtl8169_suspend,
+ .restore = rtl8169_resume,
+};
+
+#define RTL8169_PM_OPS (&rtl8169_pm_ops)
+
+#else /* !CONFIG_PM */
+
+#define RTL8169_PM_OPS NULL
+
+#endif /* !CONFIG_PM */
+
static void rtl_shutdown(struct pci_dev *pdev)
{
- rtl8169_suspend(pdev, PMSG_SUSPEND);
-}
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ rtl8169_net_suspend(dev);
-#endif /* CONFIG_PM */
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, true);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
static struct pci_driver rtl8169_pci_driver = {
.name = MODULENAME,
.id_table = rtl8169_pci_tbl,
.probe = rtl8169_init_one,
.remove = __devexit_p(rtl8169_remove_one),
-#ifdef CONFIG_PM
- .suspend = rtl8169_suspend,
- .resume = rtl8169_resume,
.shutdown = rtl_shutdown,
-#endif
+ .driver.pm = RTL8169_PM_OPS,
};
static int __init rtl8169_init_module(void)
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index ec59e29807a..8702e7acdee 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -428,6 +428,15 @@ static const struct ethtool_ops rionet_ethtool_ops = {
.get_link = ethtool_op_get_link,
};
+static const struct net_device_ops rionet_netdev_ops = {
+ .ndo_open = rionet_open,
+ .ndo_stop = rionet_close,
+ .ndo_start_xmit = rionet_start_xmit,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
static int rionet_setup_netdev(struct rio_mport *mport)
{
int rc = 0;
@@ -466,10 +475,7 @@ static int rionet_setup_netdev(struct rio_mport *mport)
ndev->dev_addr[4] = device_id >> 8;
ndev->dev_addr[5] = device_id & 0xff;
- /* Fill in the driver function table */
- ndev->open = &rionet_open;
- ndev->hard_start_xmit = &rionet_start_xmit;
- ndev->stop = &rionet_close;
+ ndev->netdev_ops = &rionet_netdev_ops;
ndev->mtu = RIO_MAX_MSG_SIZE - 14;
ndev->features = NETIF_F_LLTX;
SET_ETHTOOL_OPS(ndev, &rionet_ethtool_ops);
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c
index d890829a9ac..81dbcbb910f 100644
--- a/drivers/net/rrunner.c
+++ b/drivers/net/rrunner.c
@@ -1425,7 +1425,7 @@ static int rr_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!(new_skb = dev_alloc_skb(len + 8))) {
dev_kfree_skb(skb);
netif_wake_queue(dev);
- return -EBUSY;
+ return NETDEV_TX_OK;
}
skb_reserve(new_skb, 8);
skb_put(new_skb, len);
diff --git a/drivers/net/s2io-regs.h b/drivers/net/s2io-regs.h
index f8274f8941e..416669fd68c 100644
--- a/drivers/net/s2io-regs.h
+++ b/drivers/net/s2io-regs.h
@@ -271,11 +271,6 @@ struct XENA_dev_config {
u64 mdio_control;
#define MDIO_MMD_INDX_ADDR(val) vBIT(val, 0, 16)
#define MDIO_MMD_DEV_ADDR(val) vBIT(val, 19, 5)
-#define MDIO_MMD_PMA_DEV_ADDR 0x1
-#define MDIO_MMD_PMD_DEV_ADDR 0x1
-#define MDIO_MMD_WIS_DEV_ADDR 0x2
-#define MDIO_MMD_PCS_DEV_ADDR 0x3
-#define MDIO_MMD_PHYXS_DEV_ADDR 0x4
#define MDIO_MMS_PRT_ADDR(val) vBIT(val, 27, 5)
#define MDIO_CTRL_START_TRANS(val) vBIT(val, 56, 4)
#define MDIO_OP(val) vBIT(val, 60, 2)
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 1a4979f27fb..458daa06ed4 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -63,6 +63,7 @@
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/mdio.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -1763,7 +1764,7 @@ static int init_nic(struct s2io_nic *nic)
* by then we return error.
*/
time = 0;
- while (TRUE) {
+ while (true) {
val64 = readq(&bar0->rti_command_mem);
if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
break;
@@ -2136,7 +2137,7 @@ static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
herc = (sp->device_type == XFRAME_II_DEVICE);
- if (flag == FALSE) {
+ if (flag == false) {
if ((!herc && (sp->pdev->revision >= 4)) || herc) {
if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
ret = 1;
@@ -3328,9 +3329,9 @@ static void s2io_updt_xpak_counter(struct net_device *dev)
struct stat_block *stat_info = sp->mac_control.stats_info;
/* Check the communication with the MDIO slave */
- addr = 0x0000;
+ addr = MDIO_CTRL1;
val64 = 0x0;
- val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
+ val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
if((val64 == 0xFFFF) || (val64 == 0x0000))
{
DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
@@ -3338,24 +3339,24 @@ static void s2io_updt_xpak_counter(struct net_device *dev)
return;
}
- /* Check for the expecte value of 2040 at PMA address 0x0000 */
- if(val64 != 0x2040)
+ /* Check for the expected value of control reg 1 */
+ if(val64 != MDIO_CTRL1_SPEED10G)
{
DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
- DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
- (unsigned long long)val64);
+ DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x%x\n",
+ (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
return;
}
/* Loading the DOM register to MDIO register */
addr = 0xA100;
- s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
- val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
+ s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
+ val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
/* Reading the Alarm flags */
addr = 0xA070;
val64 = 0x0;
- val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
+ val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
flag = CHECKBIT(val64, 0x7);
type = 1;
@@ -3387,7 +3388,7 @@ static void s2io_updt_xpak_counter(struct net_device *dev)
/* Reading the Warning flags */
addr = 0xA074;
val64 = 0x0;
- val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
+ val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
if(CHECKBIT(val64, 0x7))
stat_info->xpak_stat.warn_transceiver_temp_high++;
@@ -3586,7 +3587,7 @@ static void s2io_reset(struct s2io_nic * sp)
writeq(val64, &bar0->pcc_err_reg);
}
- sp->device_enabled_once = FALSE;
+ sp->device_enabled_once = false;
}
/**
@@ -4298,7 +4299,6 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
s2io_stop_tx_queue(sp, fifo->fifo_no);
}
mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
- dev->trans_start = jiffies;
spin_unlock_irqrestore(&fifo->tx_lock, flags);
if (sp->config.intr_type == MSI_X)
@@ -5572,10 +5572,10 @@ static void s2io_ethtool_getpause_data(struct net_device *dev,
val64 = readq(&bar0->rmac_pause_cfg);
if (val64 & RMAC_PAUSE_GEN_ENABLE)
- ep->tx_pause = TRUE;
+ ep->tx_pause = true;
if (val64 & RMAC_PAUSE_RX_ENABLE)
- ep->rx_pause = TRUE;
- ep->autoneg = FALSE;
+ ep->rx_pause = true;
+ ep->autoneg = false;
}
/**
@@ -6806,7 +6806,7 @@ static void s2io_set_link(struct work_struct *work)
val64 |= ADAPTER_LED_ON;
writeq(val64, &bar0->adapter_control);
}
- nic->device_enabled_once = TRUE;
+ nic->device_enabled_once = true;
} else {
DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
@@ -7754,7 +7754,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
struct s2io_nic *sp;
struct net_device *dev;
int i, j, ret;
- int dma_flag = FALSE;
+ int dma_flag = false;
u32 mac_up, mac_down;
u64 val64 = 0, tmp64 = 0;
struct XENA_dev_config __iomem *bar0 = NULL;
@@ -7777,7 +7777,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
- dma_flag = TRUE;
+ dma_flag = true;
if (pci_set_consistent_dma_mask
(pdev, DMA_BIT_MASK(64))) {
DBG_PRINT(ERR_DBG,
@@ -7818,7 +7818,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
sp->dev = dev;
sp->pdev = pdev;
sp->high_dma_flag = dma_flag;
- sp->device_enabled_once = FALSE;
+ sp->device_enabled_once = false;
if (rx_ring_mode == 1)
sp->rxd_mode = RXD_MODE_1;
if (rx_ring_mode == 2)
@@ -7964,7 +7964,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
- if (sp->high_dma_flag == TRUE)
+ if (sp->high_dma_flag == true)
dev->features |= NETIF_F_HIGHDMA;
dev->features |= NETIF_F_TSO;
dev->features |= NETIF_F_TSO6;
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 55cb943f23f..d5c5be6c07b 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -18,15 +18,6 @@
#define vBIT(val, loc, sz) (((u64)val) << (64-loc-sz))
#define INV(d) ((d&0xff)<<24) | (((d>>8)&0xff)<<16) | (((d>>16)&0xff)<<8)| ((d>>24)&0xff)
-#ifndef BOOL
-#define BOOL int
-#endif
-
-#ifndef TRUE
-#define TRUE 1
-#define FALSE 0
-#endif
-
#undef SUCCESS
#define SUCCESS 0
#define FAILURE -1
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index ce7551e17ba..d8c9cf1b901 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -2084,7 +2084,7 @@ static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
spin_unlock_irqrestore(&sc->sbm_lock, flags);
- return 1;
+ return NETDEV_TX_BUSY;
}
dev->trans_start = jiffies;
@@ -2271,6 +2271,21 @@ static int sb1250_change_mtu(struct net_device *_dev, int new_mtu)
return 0;
}
+static const struct net_device_ops sbmac_netdev_ops = {
+ .ndo_open = sbmac_open,
+ .ndo_stop = sbmac_close,
+ .ndo_start_xmit = sbmac_start_tx,
+ .ndo_set_multicast_list = sbmac_set_rx_mode,
+ .ndo_tx_timeout = sbmac_tx_timeout,
+ .ndo_do_ioctl = sbmac_mii_ioctl,
+ .ndo_change_mtu = sb1250_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = sbmac_netpoll,
+#endif
+};
+
/**********************************************************************
* SBMAC_INIT(dev)
*
@@ -2285,7 +2300,7 @@ static int sb1250_change_mtu(struct net_device *_dev, int new_mtu)
static int sbmac_init(struct platform_device *pldev, long long base)
{
- struct net_device *dev = pldev->dev.driver_data;
+ struct net_device *dev = dev_get_drvdata(&pldev->dev);
int idx = pldev->id;
struct sbmac_softc *sc = netdev_priv(dev);
unsigned char *eaddr;
@@ -2327,21 +2342,11 @@ static int sbmac_init(struct platform_device *pldev, long long base)
spin_lock_init(&(sc->sbm_lock));
- dev->open = sbmac_open;
- dev->hard_start_xmit = sbmac_start_tx;
- dev->stop = sbmac_close;
- dev->set_multicast_list = sbmac_set_rx_mode;
- dev->do_ioctl = sbmac_mii_ioctl;
- dev->tx_timeout = sbmac_tx_timeout;
- dev->watchdog_timeo = TX_TIMEOUT;
+ dev->netdev_ops = &sbmac_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
netif_napi_add(dev, &sc->napi, sbmac_poll, 16);
- dev->change_mtu = sb1250_change_mtu;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = sbmac_netpoll;
-#endif
-
dev->irq = UNIT_INT(idx);
/* This is needed for PASS2 for Rx H/W checksum feature */
@@ -2726,7 +2731,7 @@ static int __init sbmac_probe(struct platform_device *pldev)
goto out_unmap;
}
- pldev->dev.driver_data = dev;
+ dev_set_drvdata(&pldev->dev, dev);
SET_NETDEV_DEV(dev, &pldev->dev);
sc = netdev_priv(dev);
@@ -2751,7 +2756,7 @@ out_out:
static int __exit sbmac_remove(struct platform_device *pldev)
{
- struct net_device *dev = pldev->dev.driver_data;
+ struct net_device *dev = dev_get_drvdata(&pldev->dev);
struct sbmac_softc *sc = netdev_priv(dev);
unregister_netdev(dev);
diff --git a/drivers/net/sfc/Kconfig b/drivers/net/sfc/Kconfig
index 12a82966b57..260aafaac23 100644
--- a/drivers/net/sfc/Kconfig
+++ b/drivers/net/sfc/Kconfig
@@ -1,7 +1,7 @@
config SFC
tristate "Solarflare Solarstorm SFC4000 support"
depends on PCI && INET
- select MII
+ select MDIO
select CRC32
select I2C
select I2C_ALGOBIT
diff --git a/drivers/net/sfc/boards.c b/drivers/net/sfc/boards.c
index 5182ac5a103..4a4c74c891b 100644
--- a/drivers/net/sfc/boards.c
+++ b/drivers/net/sfc/boards.c
@@ -172,7 +172,6 @@ static const u8 sfe4002_lm87_regs[] = {
static struct i2c_board_info sfe4002_hwmon_info = {
I2C_BOARD_INFO("lm87", 0x2e),
.platform_data = &sfe4002_lm87_channel,
- .irq = -1,
};
/****************************************************************************/
@@ -247,7 +246,6 @@ static const u8 sfn4112f_lm87_regs[] = {
static struct i2c_board_info sfn4112f_hwmon_info = {
I2C_BOARD_INFO("lm87", 0x2e),
.platform_data = &sfn4112f_lm87_channel,
- .irq = -1,
};
#define SFN4112F_ACT_LED 0
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 7269a426051..343e8da1fa3 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -50,16 +50,6 @@ static struct workqueue_struct *reset_workqueue;
*************************************************************************/
/*
- * Enable large receive offload (LRO) aka soft segment reassembly (SSR)
- *
- * This sets the default for new devices. It can be controlled later
- * using ethtool.
- */
-static int lro = true;
-module_param(lro, int, 0644);
-MODULE_PARM_DESC(lro, "Large receive offload acceleration");
-
-/*
* Use separate channels for TX and RX events
*
* Set this to 1 to use separate channels for TX and RX. It allows us
@@ -894,9 +884,9 @@ static int efx_wanted_rx_queues(void)
int count;
int cpu;
- if (!alloc_cpumask_var(&core_mask, GFP_KERNEL)) {
+ if (unlikely(!alloc_cpumask_var(&core_mask, GFP_KERNEL))) {
printk(KERN_WARNING
- "efx.c: allocation failure, irq balancing hobbled\n");
+ "sfc: RSS disabled due to allocation failure\n");
return 1;
}
@@ -1300,10 +1290,16 @@ out_requeue:
static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
{
struct efx_nic *efx = netdev_priv(net_dev);
+ struct mii_ioctl_data *data = if_mii(ifr);
EFX_ASSERT_RESET_SERIALISED(efx);
- return generic_mii_ioctl(&efx->mii, if_mii(ifr), cmd, NULL);
+ /* Convert phy_id from older PRTAD/DEVAD format */
+ if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
+ (data->phy_id & 0xfc00) == 0x0400)
+ data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
+
+ return mdio_mii_ioctl(&efx->mdio, data, cmd);
}
/**************************************************************************
@@ -1945,7 +1941,7 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
mutex_init(&efx->mac_lock);
efx->mac_op = &efx_dummy_mac_operations;
efx->phy_op = &efx_dummy_phy_operations;
- efx->mii.dev = net_dev;
+ efx->mdio.dev = net_dev;
INIT_WORK(&efx->phy_work, efx_phy_work);
INIT_WORK(&efx->mac_work, efx_mac_work);
atomic_set(&efx->netif_stop_count, 1);
@@ -2161,9 +2157,8 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
if (!net_dev)
return -ENOMEM;
net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG |
- NETIF_F_HIGHDMA | NETIF_F_TSO);
- if (lro)
- net_dev->features |= NETIF_F_GRO;
+ NETIF_F_HIGHDMA | NETIF_F_TSO |
+ NETIF_F_GRO);
/* Mask for features that also apply to VLAN devices */
net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_TSO);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 64309f4e8b1..997ea2a3d53 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -10,6 +10,7 @@
#include <linux/netdevice.h>
#include <linux/ethtool.h>
+#include <linux/mdio.h>
#include <linux/rtnetlink.h>
#include "net_driver.h"
#include "workarounds.h"
@@ -345,8 +346,8 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
unsigned int n = 0, i;
enum efx_loopback_mode mode;
- efx_fill_test(n++, strings, data, &tests->mii,
- "core", 0, "mii", NULL);
+ efx_fill_test(n++, strings, data, &tests->mdio,
+ "core", 0, "mdio", NULL);
efx_fill_test(n++, strings, data, &tests->nvram,
"core", 0, "nvram", NULL);
efx_fill_test(n++, strings, data, &tests->interrupt,
@@ -529,14 +530,7 @@ static int efx_ethtool_nway_reset(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
- if (efx->phy_op->mmds & DEV_PRESENT_BIT(MDIO_MMD_AN)) {
- mdio_clause45_set_flag(efx, efx->mii.phy_id, MDIO_MMD_AN,
- MDIO_MMDREG_CTRL1,
- __ffs(BMCR_ANRESTART), true);
- return 0;
- }
-
- return -EOPNOTSUPP;
+ return mdio45_nway_restart(&efx->mdio);
}
static u32 efx_ethtool_get_link(struct net_device *net_dev)
@@ -689,7 +683,7 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
return -EINVAL;
}
- if (!(efx->phy_op->mmds & DEV_PRESENT_BIT(MDIO_MMD_AN)) &&
+ if (!(efx->phy_op->mmds & MDIO_DEVS_AN) &&
(wanted_fc & EFX_FC_AUTO)) {
EFX_LOG(efx, "PHY does not support flow control "
"autonegotiation\n");
@@ -717,7 +711,8 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
mutex_lock(&efx->mac_lock);
efx->wanted_fc = wanted_fc;
- mdio_clause45_set_pause(efx);
+ if (efx->phy_op->mmds & MDIO_DEVS_AN)
+ mdio45_ethtool_spauseparam_an(&efx->mdio, pause);
__efx_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 466a8abb005..c049364aec4 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -2063,26 +2063,6 @@ int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
**************************************************************************
*/
-/* Use the top bit of the MII PHY id to indicate the PHY type
- * (1G/10G), with the remaining bits as the actual PHY id.
- *
- * This allows us to avoid leaking information from the mii_if_info
- * structure into other data structures.
- */
-#define FALCON_PHY_ID_ID_WIDTH EFX_WIDTH(MD_PRT_DEV_ADR)
-#define FALCON_PHY_ID_ID_MASK ((1 << FALCON_PHY_ID_ID_WIDTH) - 1)
-#define FALCON_PHY_ID_WIDTH (FALCON_PHY_ID_ID_WIDTH + 1)
-#define FALCON_PHY_ID_MASK ((1 << FALCON_PHY_ID_WIDTH) - 1)
-#define FALCON_PHY_ID_10G (1 << (FALCON_PHY_ID_WIDTH - 1))
-
-
-/* Packing the clause 45 port and device fields into a single value */
-#define MD_PRT_ADR_COMP_LBN (MD_PRT_ADR_LBN - MD_DEV_ADR_LBN)
-#define MD_PRT_ADR_COMP_WIDTH MD_PRT_ADR_WIDTH
-#define MD_DEV_ADR_COMP_LBN 0
-#define MD_DEV_ADR_COMP_WIDTH MD_DEV_ADR_WIDTH
-
-
/* Wait for GMII access to complete */
static int falcon_gmii_wait(struct efx_nic *efx)
{
@@ -2108,49 +2088,29 @@ static int falcon_gmii_wait(struct efx_nic *efx)
return -ETIMEDOUT;
}
-/* Writes a GMII register of a PHY connected to Falcon using MDIO. */
-static void falcon_mdio_write(struct net_device *net_dev, int phy_id,
- int addr, int value)
+/* Write an MDIO register of a PHY connected to Falcon. */
+static int falcon_mdio_write(struct net_device *net_dev,
+ int prtad, int devad, u16 addr, u16 value)
{
struct efx_nic *efx = netdev_priv(net_dev);
- unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK;
efx_oword_t reg;
+ int rc;
- /* The 'generic' prt/dev packing in mdio_10g.h is conveniently
- * chosen so that the only current user, Falcon, can take the
- * packed value and use them directly.
- * Fail to build if this assumption is broken.
- */
- BUILD_BUG_ON(FALCON_PHY_ID_10G != MDIO45_XPRT_ID_IS10G);
- BUILD_BUG_ON(FALCON_PHY_ID_ID_WIDTH != MDIO45_PRT_DEV_WIDTH);
- BUILD_BUG_ON(MD_PRT_ADR_COMP_LBN != MDIO45_PRT_ID_COMP_LBN);
- BUILD_BUG_ON(MD_DEV_ADR_COMP_LBN != MDIO45_DEV_ID_COMP_LBN);
-
- if (phy_id2 == PHY_ADDR_INVALID)
- return;
-
- /* See falcon_mdio_read for an explanation. */
- if (!(phy_id & FALCON_PHY_ID_10G)) {
- int mmd = ffs(efx->phy_op->mmds) - 1;
- EFX_TRACE(efx, "Fixing erroneous clause22 write\n");
- phy_id2 = mdio_clause45_pack(phy_id2, mmd)
- & FALCON_PHY_ID_ID_MASK;
- }
-
- EFX_REGDUMP(efx, "writing GMII %d register %02x with %04x\n", phy_id,
- addr, value);
+ EFX_REGDUMP(efx, "writing MDIO %d register %d.%d with 0x%04x\n",
+ prtad, devad, addr, value);
spin_lock_bh(&efx->phy_lock);
- /* Check MII not currently being accessed */
- if (falcon_gmii_wait(efx) != 0)
+ /* Check MDIO not currently being accessed */
+ rc = falcon_gmii_wait(efx);
+ if (rc)
goto out;
/* Write the address/ID register */
EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr);
falcon_write(efx, &reg, MD_PHY_ADR_REG_KER);
- EFX_POPULATE_OWORD_1(reg, MD_PRT_DEV_ADR, phy_id2);
+ EFX_POPULATE_OWORD_2(reg, MD_PRT_ADR, prtad, MD_DEV_ADR, devad);
falcon_write(efx, &reg, MD_ID_REG_KER);
/* Write data */
@@ -2163,7 +2123,8 @@ static void falcon_mdio_write(struct net_device *net_dev, int phy_id,
falcon_write(efx, &reg, MD_CS_REG_KER);
/* Wait for data to be written */
- if (falcon_gmii_wait(efx) != 0) {
+ rc = falcon_gmii_wait(efx);
+ if (rc) {
/* Abort the write operation */
EFX_POPULATE_OWORD_2(reg,
MD_WRC, 0,
@@ -2174,45 +2135,28 @@ static void falcon_mdio_write(struct net_device *net_dev, int phy_id,
out:
spin_unlock_bh(&efx->phy_lock);
+ return rc;
}
-/* Reads a GMII register from a PHY connected to Falcon. If no value
- * could be read, -1 will be returned. */
-static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr)
+/* Read an MDIO register of a PHY connected to Falcon. */
+static int falcon_mdio_read(struct net_device *net_dev,
+ int prtad, int devad, u16 addr)
{
struct efx_nic *efx = netdev_priv(net_dev);
- unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK;
efx_oword_t reg;
- int value = -1;
-
- if (phy_addr == PHY_ADDR_INVALID)
- return -1;
-
- /* Our PHY code knows whether it needs to talk clause 22(1G) or 45(10G)
- * but the generic Linux code does not make any distinction or have
- * any state for this.
- * We spot the case where someone tried to talk 22 to a 45 PHY and
- * redirect the request to the lowest numbered MMD as a clause45
- * request. This is enough to allow simple queries like id and link
- * state to succeed. TODO: We may need to do more in future.
- */
- if (!(phy_id & FALCON_PHY_ID_10G)) {
- int mmd = ffs(efx->phy_op->mmds) - 1;
- EFX_TRACE(efx, "Fixing erroneous clause22 read\n");
- phy_addr = mdio_clause45_pack(phy_addr, mmd)
- & FALCON_PHY_ID_ID_MASK;
- }
+ int rc;
spin_lock_bh(&efx->phy_lock);
- /* Check MII not currently being accessed */
- if (falcon_gmii_wait(efx) != 0)
+ /* Check MDIO not currently being accessed */
+ rc = falcon_gmii_wait(efx);
+ if (rc)
goto out;
EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr);
falcon_write(efx, &reg, MD_PHY_ADR_REG_KER);
- EFX_POPULATE_OWORD_1(reg, MD_PRT_DEV_ADR, phy_addr);
+ EFX_POPULATE_OWORD_2(reg, MD_PRT_ADR, prtad, MD_DEV_ADR, devad);
falcon_write(efx, &reg, MD_ID_REG_KER);
/* Request data to be read */
@@ -2220,12 +2164,12 @@ static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr)
falcon_write(efx, &reg, MD_CS_REG_KER);
/* Wait for data to become available */
- value = falcon_gmii_wait(efx);
- if (value == 0) {
+ rc = falcon_gmii_wait(efx);
+ if (rc == 0) {
falcon_read(efx, &reg, MD_RXD_REG_KER);
- value = EFX_OWORD_FIELD(reg, MD_RXD);
- EFX_REGDUMP(efx, "read from GMII %d register %02x, got %04x\n",
- phy_id, addr, value);
+ rc = EFX_OWORD_FIELD(reg, MD_RXD);
+ EFX_REGDUMP(efx, "read from MDIO %d register %d.%d, got %04x\n",
+ prtad, devad, addr, rc);
} else {
/* Abort the read operation */
EFX_POPULATE_OWORD_2(reg,
@@ -2233,22 +2177,13 @@ static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr)
MD_GC, 1);
falcon_write(efx, &reg, MD_CS_REG_KER);
- EFX_LOG(efx, "read from GMII 0x%x register %02x, got "
- "error %d\n", phy_id, addr, value);
+ EFX_LOG(efx, "read from MDIO %d register %d.%d, got error %d\n",
+ prtad, devad, addr, rc);
}
out:
spin_unlock_bh(&efx->phy_lock);
-
- return value;
-}
-
-static void falcon_init_mdio(struct mii_if_info *gmii)
-{
- gmii->mdio_read = falcon_mdio_read;
- gmii->mdio_write = falcon_mdio_write;
- gmii->phy_id_mask = FALCON_PHY_ID_MASK;
- gmii->reg_num_mask = ((1 << EFX_WIDTH(MD_PHY_ADR)) - 1);
+ return rc;
}
static int falcon_probe_phy(struct efx_nic *efx)
@@ -2342,9 +2277,11 @@ int falcon_probe_port(struct efx_nic *efx)
if (rc)
return rc;
- /* Set up GMII structure for PHY */
- efx->mii.supports_gmii = true;
- falcon_init_mdio(&efx->mii);
+ /* Set up MDIO structure for PHY */
+ efx->mdio.mmds = efx->phy_op->mmds;
+ efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+ efx->mdio.mdio_read = falcon_mdio_read;
+ efx->mdio.mdio_write = falcon_mdio_write;
/* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
if (falcon_rev(efx) >= FALCON_REV_B0)
@@ -2761,7 +2698,7 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
if (rc == -EINVAL) {
EFX_ERR(efx, "NVRAM is invalid therefore using defaults\n");
efx->phy_type = PHY_TYPE_NONE;
- efx->mii.phy_id = PHY_ADDR_INVALID;
+ efx->mdio.prtad = MDIO_PRTAD_NONE;
board_rev = 0;
rc = 0;
} else if (rc) {
@@ -2771,7 +2708,7 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
struct falcon_nvconfig_board_v3 *v3 = &nvconfig->board_v3;
efx->phy_type = v2->port0_phy_type;
- efx->mii.phy_id = v2->port0_phy_addr;
+ efx->mdio.prtad = v2->port0_phy_addr;
board_rev = le16_to_cpu(v2->board_revision);
if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
@@ -2793,7 +2730,7 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
/* Read the MAC addresses */
memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN);
- EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mii.phy_id);
+ EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad);
efx_set_board_info(efx, board_rev);
diff --git a/drivers/net/sfc/falcon_hwdefs.h b/drivers/net/sfc/falcon_hwdefs.h
index bda8d5bb72e..375e2a5961e 100644
--- a/drivers/net/sfc/falcon_hwdefs.h
+++ b/drivers/net/sfc/falcon_hwdefs.h
@@ -456,9 +456,6 @@
#define MD_PRT_ADR_WIDTH 5
#define MD_DEV_ADR_LBN 6
#define MD_DEV_ADR_WIDTH 5
-/* Used for writing both at once */
-#define MD_PRT_DEV_ADR_LBN 6
-#define MD_PRT_DEV_ADR_WIDTH 10
/* PHY management status & mask register (DWORD read only) */
#define MD_STAT_REG_KER 0xc50
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index 5a03713685a..2b3269c0326 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -133,7 +133,7 @@ bool falcon_xaui_link_ok(struct efx_nic *efx)
/* If the link is up, then check the phy side of the xaui link */
if (efx->link_up && link_ok)
if (efx->phy_op->mmds & (1 << MDIO_MMD_PHYXS))
- link_ok = mdio_clause45_phyxgxs_lane_sync(efx);
+ link_ok = efx_mdio_phyxgxs_lane_sync(efx);
return link_ok;
}
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index 9f5ec3eb341..6c33459f9ea 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -17,7 +17,7 @@
#include "boards.h"
#include "workarounds.h"
-unsigned mdio_id_oui(u32 id)
+unsigned efx_mdio_id_oui(u32 id)
{
unsigned oui = 0;
int i;
@@ -32,52 +32,45 @@ unsigned mdio_id_oui(u32 id)
return oui;
}
-int mdio_clause45_reset_mmd(struct efx_nic *port, int mmd,
+int efx_mdio_reset_mmd(struct efx_nic *port, int mmd,
int spins, int spintime)
{
u32 ctrl;
- int phy_id = port->mii.phy_id;
/* Catch callers passing values in the wrong units (or just silly) */
EFX_BUG_ON_PARANOID(spins * spintime >= 5000);
- mdio_clause45_write(port, phy_id, mmd, MDIO_MMDREG_CTRL1,
- (1 << MDIO_MMDREG_CTRL1_RESET_LBN));
+ efx_mdio_write(port, mmd, MDIO_CTRL1, MDIO_CTRL1_RESET);
/* Wait for the reset bit to clear. */
do {
msleep(spintime);
- ctrl = mdio_clause45_read(port, phy_id, mmd, MDIO_MMDREG_CTRL1);
+ ctrl = efx_mdio_read(port, mmd, MDIO_CTRL1);
spins--;
- } while (spins && (ctrl & (1 << MDIO_MMDREG_CTRL1_RESET_LBN)));
+ } while (spins && (ctrl & MDIO_CTRL1_RESET));
return spins ? spins : -ETIMEDOUT;
}
-static int mdio_clause45_check_mmd(struct efx_nic *efx, int mmd,
- int fault_fatal)
+static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd, int fault_fatal)
{
int status;
- int phy_id = efx->mii.phy_id;
if (LOOPBACK_INTERNAL(efx))
return 0;
if (mmd != MDIO_MMD_AN) {
/* Read MMD STATUS2 to check it is responding. */
- status = mdio_clause45_read(efx, phy_id, mmd,
- MDIO_MMDREG_STAT2);
- if (((status >> MDIO_MMDREG_STAT2_PRESENT_LBN) &
- ((1 << MDIO_MMDREG_STAT2_PRESENT_WIDTH) - 1)) !=
- MDIO_MMDREG_STAT2_PRESENT_VAL) {
+ status = efx_mdio_read(efx, mmd, MDIO_STAT2);
+ if ((status & MDIO_STAT2_DEVPRST) != MDIO_STAT2_DEVPRST_VAL) {
EFX_ERR(efx, "PHY MMD %d not responding.\n", mmd);
return -EIO;
}
}
/* Read MMD STATUS 1 to check for fault. */
- status = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_STAT1);
- if ((status & (1 << MDIO_MMDREG_STAT1_FAULT_LBN)) != 0) {
+ status = efx_mdio_read(efx, mmd, MDIO_STAT1);
+ if (status & MDIO_STAT1_FAULT) {
if (fault_fatal) {
EFX_ERR(efx, "PHY MMD %d reporting fatal"
" fault: status %x\n", mmd, status);
@@ -94,8 +87,7 @@ static int mdio_clause45_check_mmd(struct efx_nic *efx, int mmd,
#define MDIO45_RESET_TIME 1000 /* ms */
#define MDIO45_RESET_ITERS 100
-int mdio_clause45_wait_reset_mmds(struct efx_nic *efx,
- unsigned int mmd_mask)
+int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask)
{
const int spintime = MDIO45_RESET_TIME / MDIO45_RESET_ITERS;
int tries = MDIO45_RESET_ITERS;
@@ -109,16 +101,13 @@ int mdio_clause45_wait_reset_mmds(struct efx_nic *efx,
in_reset = 0;
while (mask) {
if (mask & 1) {
- stat = mdio_clause45_read(efx,
- efx->mii.phy_id,
- mmd,
- MDIO_MMDREG_CTRL1);
+ stat = efx_mdio_read(efx, mmd, MDIO_CTRL1);
if (stat < 0) {
EFX_ERR(efx, "failed to read status of"
" MMD %d\n", mmd);
return -EIO;
}
- if (stat & (1 << MDIO_MMDREG_CTRL1_RESET_LBN))
+ if (stat & MDIO_CTRL1_RESET)
in_reset |= (1 << mmd);
}
mask = mask >> 1;
@@ -137,28 +126,26 @@ int mdio_clause45_wait_reset_mmds(struct efx_nic *efx,
return rc;
}
-int mdio_clause45_check_mmds(struct efx_nic *efx,
- unsigned int mmd_mask, unsigned int fatal_mask)
+int efx_mdio_check_mmds(struct efx_nic *efx,
+ unsigned int mmd_mask, unsigned int fatal_mask)
{
- int mmd = 0, probe_mmd, devs0, devs1;
+ int mmd = 0, probe_mmd, devs1, devs2;
u32 devices;
/* Historically we have probed the PHYXS to find out what devices are
* present,but that doesn't work so well if the PHYXS isn't expected
* to exist, if so just find the first item in the list supplied. */
- probe_mmd = (mmd_mask & MDIO_MMDREG_DEVS_PHYXS) ? MDIO_MMD_PHYXS :
+ probe_mmd = (mmd_mask & MDIO_DEVS_PHYXS) ? MDIO_MMD_PHYXS :
__ffs(mmd_mask);
/* Check all the expected MMDs are present */
- devs0 = mdio_clause45_read(efx, efx->mii.phy_id,
- probe_mmd, MDIO_MMDREG_DEVS0);
- devs1 = mdio_clause45_read(efx, efx->mii.phy_id,
- probe_mmd, MDIO_MMDREG_DEVS1);
- if (devs0 < 0 || devs1 < 0) {
+ devs1 = efx_mdio_read(efx, probe_mmd, MDIO_DEVS1);
+ devs2 = efx_mdio_read(efx, probe_mmd, MDIO_DEVS2);
+ if (devs1 < 0 || devs2 < 0) {
EFX_ERR(efx, "failed to read devices present\n");
return -EIO;
}
- devices = devs0 | (devs1 << 16);
+ devices = devs1 | (devs2 << 16);
if ((devices & mmd_mask) != mmd_mask) {
EFX_ERR(efx, "required MMDs not present: got %x, "
"wanted %x\n", devices, mmd_mask);
@@ -170,7 +157,7 @@ int mdio_clause45_check_mmds(struct efx_nic *efx,
while (mmd_mask) {
if (mmd_mask & 1) {
int fault_fatal = fatal_mask & 1;
- if (mdio_clause45_check_mmd(efx, mmd, fault_fatal))
+ if (efx_mdio_check_mmd(efx, mmd, fault_fatal))
return -EIO;
}
mmd_mask = mmd_mask >> 1;
@@ -181,13 +168,8 @@ int mdio_clause45_check_mmds(struct efx_nic *efx,
return 0;
}
-bool mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
+bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
{
- int phy_id = efx->mii.phy_id;
- u32 reg;
- bool ok = true;
- int mmd = 0;
-
/* If the port is in loopback, then we should only consider a subset
* of mmd's */
if (LOOPBACK_INTERNAL(efx))
@@ -197,241 +179,75 @@ bool mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
else if (efx_phy_mode_disabled(efx->phy_mode))
return false;
else if (efx->loopback_mode == LOOPBACK_PHYXS)
- mmd_mask &= ~(MDIO_MMDREG_DEVS_PHYXS |
- MDIO_MMDREG_DEVS_PCS |
- MDIO_MMDREG_DEVS_PMAPMD |
- MDIO_MMDREG_DEVS_AN);
+ mmd_mask &= ~(MDIO_DEVS_PHYXS |
+ MDIO_DEVS_PCS |
+ MDIO_DEVS_PMAPMD |
+ MDIO_DEVS_AN);
else if (efx->loopback_mode == LOOPBACK_PCS)
- mmd_mask &= ~(MDIO_MMDREG_DEVS_PCS |
- MDIO_MMDREG_DEVS_PMAPMD |
- MDIO_MMDREG_DEVS_AN);
+ mmd_mask &= ~(MDIO_DEVS_PCS |
+ MDIO_DEVS_PMAPMD |
+ MDIO_DEVS_AN);
else if (efx->loopback_mode == LOOPBACK_PMAPMD)
- mmd_mask &= ~(MDIO_MMDREG_DEVS_PMAPMD |
- MDIO_MMDREG_DEVS_AN);
-
- if (!mmd_mask) {
- /* Use presence of XGMII faults in leui of link state */
- reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS,
- MDIO_PHYXS_STATUS2);
- return !(reg & (1 << MDIO_PHYXS_STATUS2_RX_FAULT_LBN));
- }
+ mmd_mask &= ~(MDIO_DEVS_PMAPMD |
+ MDIO_DEVS_AN);
- while (mmd_mask) {
- if (mmd_mask & 1) {
- /* Double reads because link state is latched, and a
- * read moves the current state into the register */
- reg = mdio_clause45_read(efx, phy_id,
- mmd, MDIO_MMDREG_STAT1);
- reg = mdio_clause45_read(efx, phy_id,
- mmd, MDIO_MMDREG_STAT1);
- ok = ok && (reg & (1 << MDIO_MMDREG_STAT1_LINK_LBN));
- }
- mmd_mask = (mmd_mask >> 1);
- mmd++;
- }
- return ok;
+ return mdio45_links_ok(&efx->mdio, mmd_mask);
}
-void mdio_clause45_transmit_disable(struct efx_nic *efx)
+void efx_mdio_transmit_disable(struct efx_nic *efx)
{
- mdio_clause45_set_flag(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
- MDIO_MMDREG_TXDIS, MDIO_MMDREG_TXDIS_GLOBAL_LBN,
- efx->phy_mode & PHY_MODE_TX_DISABLED);
+ efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD,
+ MDIO_PMA_TXDIS, MDIO_PMD_TXDIS_GLOBAL,
+ efx->phy_mode & PHY_MODE_TX_DISABLED);
}
-void mdio_clause45_phy_reconfigure(struct efx_nic *efx)
+void efx_mdio_phy_reconfigure(struct efx_nic *efx)
{
- int phy_id = efx->mii.phy_id;
-
- mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_PMAPMD,
- MDIO_MMDREG_CTRL1, MDIO_PMAPMD_CTRL1_LBACK_LBN,
- efx->loopback_mode == LOOPBACK_PMAPMD);
- mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_PCS,
- MDIO_MMDREG_CTRL1, MDIO_MMDREG_CTRL1_LBACK_LBN,
- efx->loopback_mode == LOOPBACK_PCS);
- mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_PHYXS,
- MDIO_MMDREG_CTRL1, MDIO_MMDREG_CTRL1_LBACK_LBN,
- efx->loopback_mode == LOOPBACK_NETWORK);
+ efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD,
+ MDIO_CTRL1, MDIO_PMA_CTRL1_LOOPBACK,
+ efx->loopback_mode == LOOPBACK_PMAPMD);
+ efx_mdio_set_flag(efx, MDIO_MMD_PCS,
+ MDIO_CTRL1, MDIO_PCS_CTRL1_LOOPBACK,
+ efx->loopback_mode == LOOPBACK_PCS);
+ efx_mdio_set_flag(efx, MDIO_MMD_PHYXS,
+ MDIO_CTRL1, MDIO_PHYXS_CTRL1_LOOPBACK,
+ efx->loopback_mode == LOOPBACK_NETWORK);
}
-static void mdio_clause45_set_mmd_lpower(struct efx_nic *efx,
- int lpower, int mmd)
+static void efx_mdio_set_mmd_lpower(struct efx_nic *efx,
+ int lpower, int mmd)
{
- int phy = efx->mii.phy_id;
- int stat = mdio_clause45_read(efx, phy, mmd, MDIO_MMDREG_STAT1);
+ int stat = efx_mdio_read(efx, mmd, MDIO_STAT1);
EFX_TRACE(efx, "Setting low power mode for MMD %d to %d\n",
mmd, lpower);
- if (stat & (1 << MDIO_MMDREG_STAT1_LPABLE_LBN)) {
- mdio_clause45_set_flag(efx, phy, mmd, MDIO_MMDREG_CTRL1,
- MDIO_MMDREG_CTRL1_LPOWER_LBN, lpower);
+ if (stat & MDIO_STAT1_LPOWERABLE) {
+ efx_mdio_set_flag(efx, mmd, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER, lpower);
}
}
-void mdio_clause45_set_mmds_lpower(struct efx_nic *efx,
- int low_power, unsigned int mmd_mask)
+void efx_mdio_set_mmds_lpower(struct efx_nic *efx,
+ int low_power, unsigned int mmd_mask)
{
int mmd = 0;
- mmd_mask &= ~MDIO_MMDREG_DEVS_AN;
+ mmd_mask &= ~MDIO_DEVS_AN;
while (mmd_mask) {
if (mmd_mask & 1)
- mdio_clause45_set_mmd_lpower(efx, low_power, mmd);
+ efx_mdio_set_mmd_lpower(efx, low_power, mmd);
mmd_mask = (mmd_mask >> 1);
mmd++;
}
}
-static u32 mdio_clause45_get_an(struct efx_nic *efx, u16 addr)
-{
- int phy_id = efx->mii.phy_id;
- u32 result = 0;
- int reg;
-
- reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN, addr);
- if (reg & ADVERTISE_10HALF)
- result |= ADVERTISED_10baseT_Half;
- if (reg & ADVERTISE_10FULL)
- result |= ADVERTISED_10baseT_Full;
- if (reg & ADVERTISE_100HALF)
- result |= ADVERTISED_100baseT_Half;
- if (reg & ADVERTISE_100FULL)
- result |= ADVERTISED_100baseT_Full;
- return result;
-}
-
-/**
- * mdio_clause45_get_settings - Read (some of) the PHY settings over MDIO.
- * @efx: Efx NIC
- * @ecmd: Buffer for settings
- *
- * On return the 'port', 'speed', 'supported' and 'advertising' fields of
- * ecmd have been filled out.
- */
-void mdio_clause45_get_settings(struct efx_nic *efx,
- struct ethtool_cmd *ecmd)
-{
- mdio_clause45_get_settings_ext(efx, ecmd, 0, 0);
-}
-
-/**
- * mdio_clause45_get_settings_ext - Read (some of) the PHY settings over MDIO.
- * @efx: Efx NIC
- * @ecmd: Buffer for settings
- * @xnp: Advertised Extended Next Page state
- * @xnp_lpa: Link Partner's advertised XNP state
- *
- * On return the 'port', 'speed', 'supported' and 'advertising' fields of
- * ecmd have been filled out.
- */
-void mdio_clause45_get_settings_ext(struct efx_nic *efx,
- struct ethtool_cmd *ecmd,
- u32 npage_adv, u32 npage_lpa)
-{
- int phy_id = efx->mii.phy_id;
- int reg;
-
- ecmd->transceiver = XCVR_INTERNAL;
- ecmd->phy_address = phy_id;
-
- reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
- MDIO_MMDREG_CTRL2);
- switch (reg & MDIO_PMAPMD_CTRL2_TYPE_MASK) {
- case MDIO_PMAPMD_CTRL2_10G_BT:
- case MDIO_PMAPMD_CTRL2_1G_BT:
- case MDIO_PMAPMD_CTRL2_100_BT:
- case MDIO_PMAPMD_CTRL2_10_BT:
- ecmd->port = PORT_TP;
- ecmd->supported = SUPPORTED_TP;
- reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
- MDIO_MMDREG_SPEED);
- if (reg & (1 << MDIO_MMDREG_SPEED_10G_LBN))
- ecmd->supported |= SUPPORTED_10000baseT_Full;
- if (reg & (1 << MDIO_MMDREG_SPEED_1000M_LBN))
- ecmd->supported |= (SUPPORTED_1000baseT_Full |
- SUPPORTED_1000baseT_Half);
- if (reg & (1 << MDIO_MMDREG_SPEED_100M_LBN))
- ecmd->supported |= (SUPPORTED_100baseT_Full |
- SUPPORTED_100baseT_Half);
- if (reg & (1 << MDIO_MMDREG_SPEED_10M_LBN))
- ecmd->supported |= (SUPPORTED_10baseT_Full |
- SUPPORTED_10baseT_Half);
- ecmd->advertising = ADVERTISED_TP;
- break;
-
- /* We represent CX4 as fibre in the absence of anything better */
- case MDIO_PMAPMD_CTRL2_10G_CX4:
- /* All the other defined modes are flavours of optical */
- default:
- ecmd->port = PORT_FIBRE;
- ecmd->supported = SUPPORTED_FIBRE;
- ecmd->advertising = ADVERTISED_FIBRE;
- break;
- }
-
- if (efx->phy_op->mmds & DEV_PRESENT_BIT(MDIO_MMD_AN)) {
- ecmd->supported |= SUPPORTED_Autoneg;
- reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
- MDIO_MMDREG_CTRL1);
- if (reg & BMCR_ANENABLE) {
- ecmd->autoneg = AUTONEG_ENABLE;
- ecmd->advertising |=
- ADVERTISED_Autoneg |
- mdio_clause45_get_an(efx, MDIO_AN_ADVERTISE) |
- npage_adv;
- } else
- ecmd->autoneg = AUTONEG_DISABLE;
- } else
- ecmd->autoneg = AUTONEG_DISABLE;
-
- if (ecmd->autoneg) {
- /* If AN is complete, report best common mode,
- * otherwise report best advertised mode. */
- u32 modes = 0;
- if (mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
- MDIO_MMDREG_STAT1) &
- (1 << MDIO_AN_STATUS_AN_DONE_LBN))
- modes = (ecmd->advertising &
- (mdio_clause45_get_an(efx, MDIO_AN_LPA) |
- npage_lpa));
- if (modes == 0)
- modes = ecmd->advertising;
-
- if (modes & ADVERTISED_10000baseT_Full) {
- ecmd->speed = SPEED_10000;
- ecmd->duplex = DUPLEX_FULL;
- } else if (modes & (ADVERTISED_1000baseT_Full |
- ADVERTISED_1000baseT_Half)) {
- ecmd->speed = SPEED_1000;
- ecmd->duplex = !!(modes & ADVERTISED_1000baseT_Full);
- } else if (modes & (ADVERTISED_100baseT_Full |
- ADVERTISED_100baseT_Half)) {
- ecmd->speed = SPEED_100;
- ecmd->duplex = !!(modes & ADVERTISED_100baseT_Full);
- } else {
- ecmd->speed = SPEED_10;
- ecmd->duplex = !!(modes & ADVERTISED_10baseT_Full);
- }
- } else {
- /* Report forced settings */
- reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
- MDIO_MMDREG_CTRL1);
- ecmd->speed = (((reg & BMCR_SPEED1000) ? 100 : 1) *
- ((reg & BMCR_SPEED100) ? 100 : 10));
- ecmd->duplex = (reg & BMCR_FULLDPLX ||
- ecmd->speed == SPEED_10000);
- }
-}
-
/**
- * mdio_clause45_set_settings - Set (some of) the PHY settings over MDIO.
+ * efx_mdio_set_settings - Set (some of) the PHY settings over MDIO.
* @efx: Efx NIC
* @ecmd: New settings
*/
-int mdio_clause45_set_settings(struct efx_nic *efx,
- struct ethtool_cmd *ecmd)
+int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
- int phy_id = efx->mii.phy_id;
struct ethtool_cmd prev;
u32 required;
int reg;
@@ -488,95 +304,48 @@ int mdio_clause45_set_settings(struct efx_nic *efx,
else if (ecmd->advertising & (ADVERTISED_1000baseT_Half |
ADVERTISED_1000baseT_Full))
reg |= ADVERTISE_NPAGE;
- reg |= efx_fc_advertise(efx->wanted_fc);
- mdio_clause45_write(efx, phy_id, MDIO_MMD_AN,
- MDIO_AN_ADVERTISE, reg);
+ reg |= mii_advertise_flowctrl(efx->wanted_fc);
+ efx_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
/* Set up the (extended) next page if necessary */
if (efx->phy_op->set_npage_adv)
efx->phy_op->set_npage_adv(efx, ecmd->advertising);
/* Enable and restart AN */
- reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
- MDIO_MMDREG_CTRL1);
- reg |= BMCR_ANENABLE;
+ reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_CTRL1);
+ reg |= MDIO_AN_CTRL1_ENABLE;
if (!(EFX_WORKAROUND_15195(efx) &&
LOOPBACK_MASK(efx) & efx->phy_op->loopbacks))
- reg |= BMCR_ANRESTART;
+ reg |= MDIO_AN_CTRL1_RESTART;
if (xnp)
- reg |= 1 << MDIO_AN_CTRL_XNP_LBN;
+ reg |= MDIO_AN_CTRL1_XNP;
else
- reg &= ~(1 << MDIO_AN_CTRL_XNP_LBN);
- mdio_clause45_write(efx, phy_id, MDIO_MMD_AN,
- MDIO_MMDREG_CTRL1, reg);
+ reg &= ~MDIO_AN_CTRL1_XNP;
+ efx_mdio_write(efx, MDIO_MMD_AN, MDIO_CTRL1, reg);
} else {
/* Disable AN */
- mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_AN,
- MDIO_MMDREG_CTRL1,
- __ffs(BMCR_ANENABLE), false);
+ efx_mdio_set_flag(efx, MDIO_MMD_AN, MDIO_CTRL1,
+ MDIO_AN_CTRL1_ENABLE, false);
/* Set the basic control bits */
- reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
- MDIO_MMDREG_CTRL1);
- reg &= ~(BMCR_SPEED1000 | BMCR_SPEED100 | BMCR_FULLDPLX |
- 0x003c);
+ reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1);
+ reg &= ~(MDIO_CTRL1_SPEEDSEL | MDIO_CTRL1_FULLDPLX);
if (ecmd->speed == SPEED_100)
- reg |= BMCR_SPEED100;
+ reg |= MDIO_PMA_CTRL1_SPEED100;
if (ecmd->duplex)
- reg |= BMCR_FULLDPLX;
- mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
- MDIO_MMDREG_CTRL1, reg);
+ reg |= MDIO_CTRL1_FULLDPLX;
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1, reg);
}
return 0;
}
-void mdio_clause45_set_pause(struct efx_nic *efx)
-{
- int phy_id = efx->mii.phy_id;
- int reg;
-
- if (efx->phy_op->mmds & DEV_PRESENT_BIT(MDIO_MMD_AN)) {
- /* Set pause capability advertising */
- reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
- MDIO_AN_ADVERTISE);
- reg &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
- reg |= efx_fc_advertise(efx->wanted_fc);
- mdio_clause45_write(efx, phy_id, MDIO_MMD_AN,
- MDIO_AN_ADVERTISE, reg);
-
- /* Restart auto-negotiation */
- reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
- MDIO_MMDREG_CTRL1);
- if (reg & BMCR_ANENABLE) {
- reg |= BMCR_ANRESTART;
- mdio_clause45_write(efx, phy_id, MDIO_MMD_AN,
- MDIO_MMDREG_CTRL1, reg);
- }
- }
-}
-
-enum efx_fc_type mdio_clause45_get_pause(struct efx_nic *efx)
+enum efx_fc_type efx_mdio_get_pause(struct efx_nic *efx)
{
- int phy_id = efx->mii.phy_id;
int lpa;
- if (!(efx->phy_op->mmds & DEV_PRESENT_BIT(MDIO_MMD_AN)))
+ if (!(efx->phy_op->mmds & MDIO_DEVS_AN))
return efx->wanted_fc;
- lpa = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN, MDIO_AN_LPA);
+ lpa = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_LPA);
return efx_fc_resolve(efx->wanted_fc, lpa);
}
-
-void mdio_clause45_set_flag(struct efx_nic *efx, u8 prt, u8 dev,
- u16 addr, int bit, bool sense)
-{
- int old_val = mdio_clause45_read(efx, prt, dev, addr);
- int new_val;
-
- if (sense)
- new_val = old_val | (1 << bit);
- else
- new_val = old_val & ~(1 << bit);
- if (old_val != new_val)
- mdio_clause45_write(efx, prt, dev, addr, new_val);
-}
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
index 7014d2279c2..6b14421a744 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/sfc/mdio_10g.h
@@ -10,247 +10,53 @@
#ifndef EFX_MDIO_10G_H
#define EFX_MDIO_10G_H
+#include <linux/mdio.h>
+
/*
- * Definitions needed for doing 10G MDIO as specified in clause 45
- * MDIO, which do not appear in Linux yet. Also some helper functions.
+ * Helper functions for doing 10G MDIO as specified in IEEE 802.3 clause 45.
*/
#include "efx.h"
#include "boards.h"
-/* Numbering of the MDIO Manageable Devices (MMDs) */
-/* Physical Medium Attachment/ Physical Medium Dependent sublayer */
-#define MDIO_MMD_PMAPMD (1)
-/* WAN Interface Sublayer */
-#define MDIO_MMD_WIS (2)
-/* Physical Coding Sublayer */
-#define MDIO_MMD_PCS (3)
-/* PHY Extender Sublayer */
-#define MDIO_MMD_PHYXS (4)
-/* Extender Sublayer */
-#define MDIO_MMD_DTEXS (5)
-/* Transmission convergence */
-#define MDIO_MMD_TC (6)
-/* Auto negotiation */
-#define MDIO_MMD_AN (7)
-/* Clause 22 extension */
-#define MDIO_MMD_C22EXT 29
-
-/* Generic register locations */
-#define MDIO_MMDREG_CTRL1 (0)
-#define MDIO_MMDREG_STAT1 (1)
-#define MDIO_MMDREG_IDHI (2)
-#define MDIO_MMDREG_IDLOW (3)
-#define MDIO_MMDREG_SPEED (4)
-#define MDIO_MMDREG_DEVS0 (5)
-#define MDIO_MMDREG_DEVS1 (6)
-#define MDIO_MMDREG_CTRL2 (7)
-#define MDIO_MMDREG_STAT2 (8)
-#define MDIO_MMDREG_TXDIS (9)
-
-/* Bits in MMDREG_CTRL1 */
-/* Reset */
-#define MDIO_MMDREG_CTRL1_RESET_LBN (15)
-#define MDIO_MMDREG_CTRL1_RESET_WIDTH (1)
-/* Loopback */
-/* Loopback bit for WIS, PCS, PHYSX and DTEXS */
-#define MDIO_MMDREG_CTRL1_LBACK_LBN (14)
-#define MDIO_MMDREG_CTRL1_LBACK_WIDTH (1)
-/* Low power */
-#define MDIO_MMDREG_CTRL1_LPOWER_LBN (11)
-#define MDIO_MMDREG_CTRL1_LPOWER_WIDTH (1)
-
-/* Bits in MMDREG_STAT1 */
-#define MDIO_MMDREG_STAT1_FAULT_LBN (7)
-#define MDIO_MMDREG_STAT1_FAULT_WIDTH (1)
-/* Link state */
-#define MDIO_MMDREG_STAT1_LINK_LBN (2)
-#define MDIO_MMDREG_STAT1_LINK_WIDTH (1)
-/* Low power ability */
-#define MDIO_MMDREG_STAT1_LPABLE_LBN (1)
-#define MDIO_MMDREG_STAT1_LPABLE_WIDTH (1)
-
-/* Bits in combined ID regs */
-static inline unsigned mdio_id_rev(u32 id) { return id & 0xf; }
-static inline unsigned mdio_id_model(u32 id) { return (id >> 4) & 0x3f; }
-extern unsigned mdio_id_oui(u32 id);
-
-/* Bits in MMDREG_DEVS0/1. Someone thoughtfully layed things out
- * so the 'bit present' bit number of an MMD is the number of
- * that MMD */
-#define DEV_PRESENT_BIT(_b) (1 << _b)
-
-#define MDIO_MMDREG_DEVS_PHYXS DEV_PRESENT_BIT(MDIO_MMD_PHYXS)
-#define MDIO_MMDREG_DEVS_PCS DEV_PRESENT_BIT(MDIO_MMD_PCS)
-#define MDIO_MMDREG_DEVS_PMAPMD DEV_PRESENT_BIT(MDIO_MMD_PMAPMD)
-#define MDIO_MMDREG_DEVS_AN DEV_PRESENT_BIT(MDIO_MMD_AN)
-#define MDIO_MMDREG_DEVS_C22EXT DEV_PRESENT_BIT(MDIO_MMD_C22EXT)
-
-/* Bits in MMDREG_SPEED */
-#define MDIO_MMDREG_SPEED_10G_LBN 0
-#define MDIO_MMDREG_SPEED_10G_WIDTH 1
-#define MDIO_MMDREG_SPEED_1000M_LBN 4
-#define MDIO_MMDREG_SPEED_1000M_WIDTH 1
-#define MDIO_MMDREG_SPEED_100M_LBN 5
-#define MDIO_MMDREG_SPEED_100M_WIDTH 1
-#define MDIO_MMDREG_SPEED_10M_LBN 6
-#define MDIO_MMDREG_SPEED_10M_WIDTH 1
-
-/* Bits in MMDREG_STAT2 */
-#define MDIO_MMDREG_STAT2_PRESENT_VAL (2)
-#define MDIO_MMDREG_STAT2_PRESENT_LBN (14)
-#define MDIO_MMDREG_STAT2_PRESENT_WIDTH (2)
-
-/* Bits in MMDREG_TXDIS */
-#define MDIO_MMDREG_TXDIS_GLOBAL_LBN (0)
-#define MDIO_MMDREG_TXDIS_GLOBAL_WIDTH (1)
-
-/* MMD-specific bits, ordered by MMD, then register */
-#define MDIO_PMAPMD_CTRL1_LBACK_LBN (0)
-#define MDIO_PMAPMD_CTRL1_LBACK_WIDTH (1)
-
-/* PMA type (4 bits) */
-#define MDIO_PMAPMD_CTRL2_10G_CX4 (0x0)
-#define MDIO_PMAPMD_CTRL2_10G_EW (0x1)
-#define MDIO_PMAPMD_CTRL2_10G_LW (0x2)
-#define MDIO_PMAPMD_CTRL2_10G_SW (0x3)
-#define MDIO_PMAPMD_CTRL2_10G_LX4 (0x4)
-#define MDIO_PMAPMD_CTRL2_10G_ER (0x5)
-#define MDIO_PMAPMD_CTRL2_10G_LR (0x6)
-#define MDIO_PMAPMD_CTRL2_10G_SR (0x7)
-/* Reserved */
-#define MDIO_PMAPMD_CTRL2_10G_BT (0x9)
-/* Reserved */
-/* Reserved */
-#define MDIO_PMAPMD_CTRL2_1G_BT (0xc)
-/* Reserved */
-#define MDIO_PMAPMD_CTRL2_100_BT (0xe)
-#define MDIO_PMAPMD_CTRL2_10_BT (0xf)
-#define MDIO_PMAPMD_CTRL2_TYPE_MASK (0xf)
-
-/* PMA 10GBT registers */
-#define MDIO_PMAPMD_10GBT_TXPWR (131)
-#define MDIO_PMAPMD_10GBT_TXPWR_SHORT_LBN (0)
-#define MDIO_PMAPMD_10GBT_TXPWR_SHORT_WIDTH (1)
-
-/* PHY XGXS Status 2 */
-#define MDIO_PHYXS_STATUS2 (8)
-#define MDIO_PHYXS_STATUS2_RX_FAULT_LBN 10
-
-/* PHY XGXS lane state */
-#define MDIO_PHYXS_LANE_STATE (0x18)
-#define MDIO_PHYXS_LANE_ALIGNED_LBN (12)
-
-/* AN registers */
-#define MDIO_AN_CTRL_XNP_LBN 13
-#define MDIO_AN_STATUS (1)
-#define MDIO_AN_STATUS_XNP_LBN (7)
-#define MDIO_AN_STATUS_PAGE_LBN (6)
-#define MDIO_AN_STATUS_AN_DONE_LBN (5)
-#define MDIO_AN_STATUS_LP_AN_CAP_LBN (0)
-
-#define MDIO_AN_ADVERTISE 16
-#define MDIO_AN_ADVERTISE_XNP_LBN 12
-#define MDIO_AN_LPA 19
-#define MDIO_AN_XNP 22
-#define MDIO_AN_LPA_XNP 25
-
-#define MDIO_AN_10GBT_CTRL 32
-#define MDIO_AN_10GBT_CTRL_ADV_10G_LBN 12
-#define MDIO_AN_10GBT_STATUS (33)
-#define MDIO_AN_10GBT_STATUS_MS_FLT_LBN (15) /* MASTER/SLAVE config fault */
-#define MDIO_AN_10GBT_STATUS_MS_LBN (14) /* MASTER/SLAVE config */
-#define MDIO_AN_10GBT_STATUS_LOC_OK_LBN (13) /* Local OK */
-#define MDIO_AN_10GBT_STATUS_REM_OK_LBN (12) /* Remote OK */
-#define MDIO_AN_10GBT_STATUS_LP_10G_LBN (11) /* Link partner is 10GBT capable */
-#define MDIO_AN_10GBT_STATUS_LP_LTA_LBN (10) /* LP loop timing ability */
-#define MDIO_AN_10GBT_STATUS_LP_TRR_LBN (9) /* LP Training Reset Request */
-
+static inline unsigned efx_mdio_id_rev(u32 id) { return id & 0xf; }
+static inline unsigned efx_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; }
+extern unsigned efx_mdio_id_oui(u32 id);
-/* Packing of the prt and dev arguments of clause 45 style MDIO into a
- * single int so they can be passed into the mdio_read/write functions
- * that currently exist. Note that as Falcon is the only current user,
- * the packed form is chosen to match what Falcon needs to write into
- * a register. This is checked at compile-time so do not change it. If
- * your target chip needs things layed out differently you will need
- * to unpack the arguments in your chip-specific mdio functions.
- */
- /* These are defined by the standard. */
-#define MDIO45_PRT_ID_WIDTH (5)
-#define MDIO45_DEV_ID_WIDTH (5)
-
-/* The prt ID is just packed in immediately to the left of the dev ID */
-#define MDIO45_PRT_DEV_WIDTH (MDIO45_PRT_ID_WIDTH + MDIO45_DEV_ID_WIDTH)
-
-#define MDIO45_PRT_ID_MASK ((1 << MDIO45_PRT_DEV_WIDTH) - 1)
-/* This is the prt + dev extended by 1 bit to hold the 'is clause 45' flag. */
-#define MDIO45_XPRT_ID_WIDTH (MDIO45_PRT_DEV_WIDTH + 1)
-#define MDIO45_XPRT_ID_MASK ((1 << MDIO45_XPRT_ID_WIDTH) - 1)
-#define MDIO45_XPRT_ID_IS10G (1 << (MDIO45_XPRT_ID_WIDTH - 1))
-
-
-#define MDIO45_PRT_ID_COMP_LBN MDIO45_DEV_ID_WIDTH
-#define MDIO45_PRT_ID_COMP_WIDTH MDIO45_PRT_ID_WIDTH
-#define MDIO45_DEV_ID_COMP_LBN 0
-#define MDIO45_DEV_ID_COMP_WIDTH MDIO45_DEV_ID_WIDTH
-
-/* Compose port and device into a phy_id */
-static inline int mdio_clause45_pack(u8 prt, u8 dev)
-{
- efx_dword_t phy_id;
- EFX_POPULATE_DWORD_2(phy_id, MDIO45_PRT_ID_COMP, prt,
- MDIO45_DEV_ID_COMP, dev);
- return MDIO45_XPRT_ID_IS10G | EFX_DWORD_VAL(phy_id);
-}
-
-static inline void mdio_clause45_unpack(u32 val, u8 *prt, u8 *dev)
+static inline int efx_mdio_read(struct efx_nic *efx, int devad, int addr)
{
- efx_dword_t phy_id;
- EFX_POPULATE_DWORD_1(phy_id, EFX_DWORD_0, val);
- *prt = EFX_DWORD_FIELD(phy_id, MDIO45_PRT_ID_COMP);
- *dev = EFX_DWORD_FIELD(phy_id, MDIO45_DEV_ID_COMP);
+ return efx->mdio.mdio_read(efx->net_dev, efx->mdio.prtad, devad, addr);
}
-static inline int mdio_clause45_read(struct efx_nic *efx,
- u8 prt, u8 dev, u16 addr)
+static inline void
+efx_mdio_write(struct efx_nic *efx, int devad, int addr, int value)
{
- return efx->mii.mdio_read(efx->net_dev,
- mdio_clause45_pack(prt, dev), addr);
+ efx->mdio.mdio_write(efx->net_dev, efx->mdio.prtad, devad, addr, value);
}
-static inline void mdio_clause45_write(struct efx_nic *efx,
- u8 prt, u8 dev, u16 addr, int value)
+static inline u32 efx_mdio_read_id(struct efx_nic *efx, int mmd)
{
- efx->mii.mdio_write(efx->net_dev,
- mdio_clause45_pack(prt, dev), addr, value);
-}
-
-
-static inline u32 mdio_clause45_read_id(struct efx_nic *efx, int mmd)
-{
- int phy_id = efx->mii.phy_id;
- u16 id_low = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_IDLOW);
- u16 id_hi = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_IDHI);
+ u16 id_low = efx_mdio_read(efx, mmd, MDIO_DEVID2);
+ u16 id_hi = efx_mdio_read(efx, mmd, MDIO_DEVID1);
return (id_hi << 16) | (id_low);
}
-static inline bool mdio_clause45_phyxgxs_lane_sync(struct efx_nic *efx)
+static inline bool efx_mdio_phyxgxs_lane_sync(struct efx_nic *efx)
{
int i, lane_status;
bool sync;
for (i = 0; i < 2; ++i)
- lane_status = mdio_clause45_read(efx, efx->mii.phy_id,
- MDIO_MMD_PHYXS,
- MDIO_PHYXS_LANE_STATE);
+ lane_status = efx_mdio_read(efx, MDIO_MMD_PHYXS,
+ MDIO_PHYXS_LNSTAT);
- sync = !!(lane_status & (1 << MDIO_PHYXS_LANE_ALIGNED_LBN));
+ sync = !!(lane_status & MDIO_PHYXS_LNSTAT_ALIGN);
if (!sync)
EFX_LOG(efx, "XGXS lane status: %x\n", lane_status);
return sync;
}
-extern const char *mdio_clause45_mmd_name(int mmd);
+extern const char *efx_mdio_mmd_name(int mmd);
/*
* Reset a specific MMD and wait for reset to clear.
@@ -258,54 +64,44 @@ extern const char *mdio_clause45_mmd_name(int mmd);
*
* This function will sleep
*/
-extern int mdio_clause45_reset_mmd(struct efx_nic *efx, int mmd,
- int spins, int spintime);
+extern int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd,
+ int spins, int spintime);
-/* As mdio_clause45_check_mmd but for multiple MMDs */
-int mdio_clause45_check_mmds(struct efx_nic *efx,
- unsigned int mmd_mask, unsigned int fatal_mask);
+/* As efx_mdio_check_mmd but for multiple MMDs */
+int efx_mdio_check_mmds(struct efx_nic *efx,
+ unsigned int mmd_mask, unsigned int fatal_mask);
/* Check the link status of specified mmds in bit mask */
-extern bool mdio_clause45_links_ok(struct efx_nic *efx,
- unsigned int mmd_mask);
+extern bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask);
/* Generic transmit disable support though PMAPMD */
-extern void mdio_clause45_transmit_disable(struct efx_nic *efx);
+extern void efx_mdio_transmit_disable(struct efx_nic *efx);
/* Generic part of reconfigure: set/clear loopback bits */
-extern void mdio_clause45_phy_reconfigure(struct efx_nic *efx);
+extern void efx_mdio_phy_reconfigure(struct efx_nic *efx);
/* Set the power state of the specified MMDs */
-extern void mdio_clause45_set_mmds_lpower(struct efx_nic *efx,
- int low_power, unsigned int mmd_mask);
-
-/* Read (some of) the PHY settings over MDIO */
-extern void mdio_clause45_get_settings(struct efx_nic *efx,
- struct ethtool_cmd *ecmd);
-
-/* Read (some of) the PHY settings over MDIO */
-extern void
-mdio_clause45_get_settings_ext(struct efx_nic *efx, struct ethtool_cmd *ecmd,
- u32 xnp, u32 xnp_lpa);
+extern void efx_mdio_set_mmds_lpower(struct efx_nic *efx,
+ int low_power, unsigned int mmd_mask);
/* Set (some of) the PHY settings over MDIO */
-extern int mdio_clause45_set_settings(struct efx_nic *efx,
- struct ethtool_cmd *ecmd);
-
-/* Set pause parameters to be advertised through AN (if available) */
-extern void mdio_clause45_set_pause(struct efx_nic *efx);
+extern int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd);
/* Get pause parameters from AN if available (otherwise return
* requested pause parameters)
*/
-enum efx_fc_type mdio_clause45_get_pause(struct efx_nic *efx);
+enum efx_fc_type efx_mdio_get_pause(struct efx_nic *efx);
/* Wait for specified MMDs to exit reset within a timeout */
-extern int mdio_clause45_wait_reset_mmds(struct efx_nic *efx,
- unsigned int mmd_mask);
+extern int efx_mdio_wait_reset_mmds(struct efx_nic *efx,
+ unsigned int mmd_mask);
/* Set or clear flag, debouncing */
-extern void mdio_clause45_set_flag(struct efx_nic *efx, u8 prt, u8 dev,
- u16 addr, int bit, bool sense);
+static inline void
+efx_mdio_set_flag(struct efx_nic *efx, int devad, int addr,
+ int mask, bool state)
+{
+ mdio_set_flag(&efx->mdio, efx->mdio.prtad, devad, addr, mask, state);
+}
#endif /* EFX_MDIO_10G_H */
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index e169e5dcd1e..5eabede9ac1 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -19,7 +19,7 @@
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/timer.h>
-#include <linux/mii.h>
+#include <linux/mdio.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/device.h>
@@ -458,8 +458,6 @@ enum phy_type {
PHY_TYPE_MAX /* Insert any new items before this */
};
-#define PHY_ADDR_INVALID 0xff
-
#define EFX_IS10G(efx) ((efx)->link_speed == 10000)
enum nic_state {
@@ -497,8 +495,8 @@ struct efx_nic;
/* Pseudo bit-mask flow control field */
enum efx_fc_type {
- EFX_FC_RX = 1,
- EFX_FC_TX = 2,
+ EFX_FC_RX = FLOW_CTRL_RX,
+ EFX_FC_TX = FLOW_CTRL_TX,
EFX_FC_AUTO = 4,
};
@@ -508,33 +506,15 @@ enum efx_mac_type {
EFX_XMAC = 2,
};
-static inline unsigned int efx_fc_advertise(enum efx_fc_type wanted_fc)
-{
- unsigned int adv = 0;
- if (wanted_fc & EFX_FC_RX)
- adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
- if (wanted_fc & EFX_FC_TX)
- adv ^= ADVERTISE_PAUSE_ASYM;
- return adv;
-}
-
static inline enum efx_fc_type efx_fc_resolve(enum efx_fc_type wanted_fc,
unsigned int lpa)
{
- unsigned int adv = efx_fc_advertise(wanted_fc);
+ BUILD_BUG_ON(EFX_FC_AUTO & (EFX_FC_RX | EFX_FC_TX));
if (!(wanted_fc & EFX_FC_AUTO))
return wanted_fc;
- if (adv & lpa & ADVERTISE_PAUSE_CAP)
- return EFX_FC_RX | EFX_FC_TX;
- if (adv & lpa & ADVERTISE_PAUSE_ASYM) {
- if (adv & ADVERTISE_PAUSE_CAP)
- return EFX_FC_RX;
- if (lpa & ADVERTISE_PAUSE_CAP)
- return EFX_FC_TX;
- }
- return 0;
+ return mii_resolve_flowctrl_fdx(mii_advertise_flowctrl(wanted_fc), lpa);
}
/**
@@ -758,7 +738,7 @@ union efx_multicast_hash {
* @phy_lock: PHY access lock
* @phy_op: PHY interface
* @phy_data: PHY private data (including PHY-specific stats)
- * @mii: PHY interface
+ * @mdio: PHY MDIO interface
* @phy_mode: PHY operating mode. Serialised by @mac_lock.
* @mac_up: MAC link state
* @link_up: Link status
@@ -845,7 +825,7 @@ struct efx_nic {
struct work_struct phy_work;
struct efx_phy_operations *phy_op;
void *phy_data;
- struct mii_if_info mii;
+ struct mdio_if_info mdio;
enum efx_phy_mode phy_mode;
bool mac_up;
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 66d7fe3db3e..01f9432c31e 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -450,17 +450,27 @@ static void efx_rx_packet_lro(struct efx_channel *channel,
/* Pass the skb/page into the LRO engine */
if (rx_buf->page) {
- struct napi_gro_fraginfo info;
+ struct sk_buff *skb = napi_get_frags(napi);
- info.frags[0].page = rx_buf->page;
- info.frags[0].page_offset = efx_rx_buf_offset(rx_buf);
- info.frags[0].size = rx_buf->len;
- info.nr_frags = 1;
- info.ip_summed = CHECKSUM_UNNECESSARY;
- info.len = rx_buf->len;
+ if (!skb) {
+ put_page(rx_buf->page);
+ goto out;
+ }
+
+ skb_shinfo(skb)->frags[0].page = rx_buf->page;
+ skb_shinfo(skb)->frags[0].page_offset =
+ efx_rx_buf_offset(rx_buf);
+ skb_shinfo(skb)->frags[0].size = rx_buf->len;
+ skb_shinfo(skb)->nr_frags = 1;
+
+ skb->len = rx_buf->len;
+ skb->data_len = rx_buf->len;
+ skb->truesize += rx_buf->len;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
- napi_gro_frags(napi, &info);
+ napi_gro_frags(napi);
+out:
EFX_BUG_ON_PARANOID(rx_buf->skb);
rx_buf->page = NULL;
} else {
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 0a598084c51..b67ccca3fc1 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -80,39 +80,38 @@ struct efx_loopback_state {
*
**************************************************************************/
-static int efx_test_mii(struct efx_nic *efx, struct efx_self_tests *tests)
+static int efx_test_mdio(struct efx_nic *efx, struct efx_self_tests *tests)
{
int rc = 0;
+ int devad = __ffs(efx->mdio.mmds);
u16 physid1, physid2;
- struct mii_if_info *mii = &efx->mii;
- struct net_device *net_dev = efx->net_dev;
if (efx->phy_type == PHY_TYPE_NONE)
return 0;
mutex_lock(&efx->mac_lock);
- tests->mii = -1;
+ tests->mdio = -1;
- physid1 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID1);
- physid2 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID2);
+ physid1 = efx_mdio_read(efx, devad, MDIO_DEVID1);
+ physid2 = efx_mdio_read(efx, devad, MDIO_DEVID2);
if ((physid1 == 0x0000) || (physid1 == 0xffff) ||
(physid2 == 0x0000) || (physid2 == 0xffff)) {
- EFX_ERR(efx, "no MII PHY present with ID %d\n",
- mii->phy_id);
+ EFX_ERR(efx, "no MDIO PHY present with ID %d\n",
+ efx->mdio.prtad);
rc = -EINVAL;
goto out;
}
if (EFX_IS10G(efx)) {
- rc = mdio_clause45_check_mmds(efx, efx->phy_op->mmds, 0);
+ rc = efx_mdio_check_mmds(efx, efx->phy_op->mmds, 0);
if (rc)
goto out;
}
out:
mutex_unlock(&efx->mac_lock);
- tests->mii = rc ? -1 : 1;
+ tests->mdio = rc ? -1 : 1;
return rc;
}
@@ -439,6 +438,7 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
kfree_skb(skb);
return -EPIPE;
}
+ efx->net_dev->trans_start = jiffies;
}
return 0;
@@ -673,7 +673,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
/* Online (i.e. non-disruptive) testing
* This checks interrupt generation, event delivery and PHY presence. */
- rc = efx_test_mii(efx, tests);
+ rc = efx_test_mdio(efx, tests);
if (rc && !rc_test)
rc_test = rc;
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h
index 39451cf938c..f6feee04c96 100644
--- a/drivers/net/sfc/selftest.h
+++ b/drivers/net/sfc/selftest.h
@@ -32,7 +32,7 @@ struct efx_loopback_self_tests {
*/
struct efx_self_tests {
/* online tests */
- int mii;
+ int mdio;
int nvram;
int interrupt;
int eventq_dma[EFX_MAX_CHANNELS];
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c
index 4eac5da81e5..cee00ad49b5 100644
--- a/drivers/net/sfc/sfe4001.c
+++ b/drivers/net/sfc/sfe4001.c
@@ -296,7 +296,6 @@ static int sfe4001_check_hw(struct efx_nic *efx)
static struct i2c_board_info sfe4001_hwmon_info = {
I2C_BOARD_INFO("max6647", 0x4e),
- .irq = -1,
};
/* This board uses an I2C expander to provider power to the PHY, which needs to
@@ -389,12 +388,10 @@ static void sfn4111t_fini(struct efx_nic *efx)
static struct i2c_board_info sfn4111t_a0_hwmon_info = {
I2C_BOARD_INFO("max6647", 0x4e),
- .irq = -1,
};
static struct i2c_board_info sfn4111t_r5_hwmon_info = {
I2C_BOARD_INFO("max6646", 0x4d),
- .irq = -1,
};
int sfn4111t_init(struct efx_nic *efx)
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index e61dc4d4741..f4d509015f7 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -23,10 +23,10 @@
* clause 22 extension MMD, but since it doesn't have all the generic
* MMD registers it is pointless to include it here.
*/
-#define TENXPRESS_REQUIRED_DEVS (MDIO_MMDREG_DEVS_PMAPMD | \
- MDIO_MMDREG_DEVS_PCS | \
- MDIO_MMDREG_DEVS_PHYXS | \
- MDIO_MMDREG_DEVS_AN)
+#define TENXPRESS_REQUIRED_DEVS (MDIO_DEVS_PMAPMD | \
+ MDIO_DEVS_PCS | \
+ MDIO_DEVS_PHYXS | \
+ MDIO_DEVS_AN)
#define SFX7101_LOOPBACKS ((1 << LOOPBACK_PHYXS) | \
(1 << LOOPBACK_PCS) | \
@@ -44,18 +44,6 @@
*/
#define MAX_BAD_LP_TRIES (5)
-/* LASI Control */
-#define PMA_PMD_LASI_CTRL 36866
-#define PMA_PMD_LASI_STATUS 36869
-#define PMA_PMD_LS_ALARM_LBN 0
-#define PMA_PMD_LS_ALARM_WIDTH 1
-#define PMA_PMD_TX_ALARM_LBN 1
-#define PMA_PMD_TX_ALARM_WIDTH 1
-#define PMA_PMD_RX_ALARM_LBN 2
-#define PMA_PMD_RX_ALARM_WIDTH 1
-#define PMA_PMD_AN_ALARM_LBN 3
-#define PMA_PMD_AN_ALARM_WIDTH 1
-
/* Extended control register */
#define PMA_PMD_XCONTROL_REG 49152
#define PMA_PMD_EXT_GMII_EN_LBN 1
@@ -75,6 +63,7 @@
/* extended status register */
#define PMA_PMD_XSTATUS_REG 49153
+#define PMA_PMD_XSTAT_MDIX_LBN 14
#define PMA_PMD_XSTAT_FLP_LBN (12)
/* LED control register */
@@ -153,10 +142,6 @@
#define LOOPBACK_NEAR_LBN (8)
#define LOOPBACK_NEAR_WIDTH (1)
-#define PCS_10GBASET_STAT1 32
-#define PCS_10GBASET_BLKLK_LBN 0
-#define PCS_10GBASET_BLKLK_WIDTH 1
-
/* Boot status register */
#define PCS_BOOT_STATUS_REG 53248
#define PCS_BOOT_FATAL_ERROR_LBN 0
@@ -206,10 +191,8 @@ static ssize_t show_phy_short_reach(struct device *dev,
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
int reg;
- reg = mdio_clause45_read(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
- MDIO_PMAPMD_10GBT_TXPWR);
- return sprintf(buf, "%d\n",
- !!(reg & (1 << MDIO_PMAPMD_10GBT_TXPWR_SHORT_LBN)));
+ reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR);
+ return sprintf(buf, "%d\n", !!(reg & MDIO_PMA_10GBT_TXPWR_SHORT));
}
static ssize_t set_phy_short_reach(struct device *dev,
@@ -219,10 +202,9 @@ static ssize_t set_phy_short_reach(struct device *dev,
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
rtnl_lock();
- mdio_clause45_set_flag(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
- MDIO_PMAPMD_10GBT_TXPWR,
- MDIO_PMAPMD_10GBT_TXPWR_SHORT_LBN,
- count != 0 && *buf != '0');
+ efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR,
+ MDIO_PMA_10GBT_TXPWR_SHORT,
+ count != 0 && *buf != '0');
efx_reconfigure_port(efx);
rtnl_unlock();
@@ -238,9 +220,8 @@ int sft9001_wait_boot(struct efx_nic *efx)
int boot_stat;
for (;;) {
- boot_stat = mdio_clause45_read(efx, efx->mii.phy_id,
- MDIO_MMD_PCS,
- PCS_BOOT_STATUS_REG);
+ boot_stat = efx_mdio_read(efx, MDIO_MMD_PCS,
+ PCS_BOOT_STATUS_REG);
if (boot_stat >= 0) {
EFX_LOG(efx, "PHY boot status = %#x\n", boot_stat);
switch (boot_stat &
@@ -286,38 +267,32 @@ int sft9001_wait_boot(struct efx_nic *efx)
static int tenxpress_init(struct efx_nic *efx)
{
- int phy_id = efx->mii.phy_id;
int reg;
if (efx->phy_type == PHY_TYPE_SFX7101) {
/* Enable 312.5 MHz clock */
- mdio_clause45_write(efx, phy_id,
- MDIO_MMD_PCS, PCS_TEST_SELECT_REG,
- 1 << CLK312_EN_LBN);
+ efx_mdio_write(efx, MDIO_MMD_PCS, PCS_TEST_SELECT_REG,
+ 1 << CLK312_EN_LBN);
} else {
/* Enable 312.5 MHz clock and GMII */
- reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
- PMA_PMD_XCONTROL_REG);
+ reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG);
reg |= ((1 << PMA_PMD_EXT_GMII_EN_LBN) |
(1 << PMA_PMD_EXT_CLK_OUT_LBN) |
(1 << PMA_PMD_EXT_CLK312_LBN) |
(1 << PMA_PMD_EXT_ROBUST_LBN));
- mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
- PMA_PMD_XCONTROL_REG, reg);
- mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_C22EXT,
- GPHY_XCONTROL_REG, GPHY_ISOLATE_LBN,
- false);
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
+ efx_mdio_set_flag(efx, MDIO_MMD_C22EXT,
+ GPHY_XCONTROL_REG, 1 << GPHY_ISOLATE_LBN,
+ false);
}
/* Set the LEDs up as: Green = Link, Amber = Link/Act, Red = Off */
if (efx->phy_type == PHY_TYPE_SFX7101) {
- mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_PMAPMD,
- PMA_PMD_LED_CTRL_REG,
- PMA_PMA_LED_ACTIVITY_LBN,
- true);
- mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
- PMA_PMD_LED_OVERR_REG, PMA_PMD_LED_DEFAULT);
+ efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_CTRL_REG,
+ 1 << PMA_PMA_LED_ACTIVITY_LBN, true);
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG,
+ PMA_PMD_LED_DEFAULT);
}
return 0;
@@ -337,22 +312,19 @@ static int tenxpress_phy_init(struct efx_nic *efx)
if (!(efx->phy_mode & PHY_MODE_SPECIAL)) {
if (efx->phy_type == PHY_TYPE_SFT9001A) {
int reg;
- reg = mdio_clause45_read(efx, efx->mii.phy_id,
- MDIO_MMD_PMAPMD,
- PMA_PMD_XCONTROL_REG);
+ reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD,
+ PMA_PMD_XCONTROL_REG);
reg |= (1 << PMA_PMD_EXT_SSR_LBN);
- mdio_clause45_write(efx, efx->mii.phy_id,
- MDIO_MMD_PMAPMD,
- PMA_PMD_XCONTROL_REG, reg);
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD,
+ PMA_PMD_XCONTROL_REG, reg);
mdelay(200);
}
- rc = mdio_clause45_wait_reset_mmds(efx,
- TENXPRESS_REQUIRED_DEVS);
+ rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS);
if (rc < 0)
goto fail;
- rc = mdio_clause45_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0);
+ rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0);
if (rc < 0)
goto fail;
}
@@ -360,7 +332,6 @@ static int tenxpress_phy_init(struct efx_nic *efx)
rc = tenxpress_init(efx);
if (rc < 0)
goto fail;
- mdio_clause45_set_pause(efx);
if (efx->phy_type == PHY_TYPE_SFT9001B) {
rc = device_create_file(&efx->pci_dev->dev,
@@ -395,17 +366,14 @@ static int tenxpress_special_reset(struct efx_nic *efx)
efx_stats_disable(efx);
/* Initiate reset */
- reg = mdio_clause45_read(efx, efx->mii.phy_id,
- MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG);
+ reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG);
reg |= (1 << PMA_PMD_EXT_SSR_LBN);
- mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
- PMA_PMD_XCONTROL_REG, reg);
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
mdelay(200);
/* Wait for the blocks to come out of reset */
- rc = mdio_clause45_wait_reset_mmds(efx,
- TENXPRESS_REQUIRED_DEVS);
+ rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS);
if (rc < 0)
goto out;
@@ -424,7 +392,6 @@ out:
static void sfx7101_check_bad_lp(struct efx_nic *efx, bool link_ok)
{
struct tenxpress_phy_data *pd = efx->phy_data;
- int phy_id = efx->mii.phy_id;
bool bad_lp;
int reg;
@@ -432,11 +399,10 @@ static void sfx7101_check_bad_lp(struct efx_nic *efx, bool link_ok)
bad_lp = false;
} else {
/* Check that AN has started but not completed. */
- reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
- MDIO_AN_STATUS);
- if (!(reg & (1 << MDIO_AN_STATUS_LP_AN_CAP_LBN)))
+ reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_STAT1);
+ if (!(reg & MDIO_AN_STAT1_LPABLE))
return; /* LP status is unknown */
- bad_lp = !(reg & (1 << MDIO_AN_STATUS_AN_DONE_LBN));
+ bad_lp = !(reg & MDIO_AN_STAT1_COMPLETE);
if (bad_lp)
pd->bad_lp_tries++;
}
@@ -448,8 +414,8 @@ static void sfx7101_check_bad_lp(struct efx_nic *efx, bool link_ok)
/* Use the RX (red) LED as an error indicator once we've seen AN
* failure several times in a row, and also log a message. */
if (!bad_lp || pd->bad_lp_tries == MAX_BAD_LP_TRIES) {
- reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
- PMA_PMD_LED_OVERR_REG);
+ reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD,
+ PMA_PMD_LED_OVERR_REG);
reg &= ~(PMA_PMD_LED_MASK << PMA_PMD_LED_RX_LBN);
if (!bad_lp) {
reg |= PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN;
@@ -460,23 +426,22 @@ static void sfx7101_check_bad_lp(struct efx_nic *efx, bool link_ok)
" supports 10GBASE-T ONLY, so no link can"
" be established\n");
}
- mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
- PMA_PMD_LED_OVERR_REG, reg);
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD,
+ PMA_PMD_LED_OVERR_REG, reg);
pd->bad_lp_tries = bad_lp;
}
}
static bool sfx7101_link_ok(struct efx_nic *efx)
{
- return mdio_clause45_links_ok(efx,
- MDIO_MMDREG_DEVS_PMAPMD |
- MDIO_MMDREG_DEVS_PCS |
- MDIO_MMDREG_DEVS_PHYXS);
+ return efx_mdio_links_ok(efx,
+ MDIO_DEVS_PMAPMD |
+ MDIO_DEVS_PCS |
+ MDIO_DEVS_PHYXS);
}
static bool sft9001_link_ok(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
- int phy_id = efx->mii.phy_id;
u32 reg;
if (efx_phy_mode_disabled(efx->phy_mode))
@@ -484,50 +449,43 @@ static bool sft9001_link_ok(struct efx_nic *efx, struct ethtool_cmd *ecmd)
else if (efx->loopback_mode == LOOPBACK_GPHY)
return true;
else if (efx->loopback_mode)
- return mdio_clause45_links_ok(efx,
- MDIO_MMDREG_DEVS_PMAPMD |
- MDIO_MMDREG_DEVS_PHYXS);
+ return efx_mdio_links_ok(efx,
+ MDIO_DEVS_PMAPMD |
+ MDIO_DEVS_PHYXS);
/* We must use the same definition of link state as LASI,
* otherwise we can miss a link state transition
*/
if (ecmd->speed == 10000) {
- reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PCS,
- PCS_10GBASET_STAT1);
- return reg & (1 << PCS_10GBASET_BLKLK_LBN);
+ reg = efx_mdio_read(efx, MDIO_MMD_PCS, MDIO_PCS_10GBRT_STAT1);
+ return reg & MDIO_PCS_10GBRT_STAT1_BLKLK;
} else {
- reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_C22EXT,
- C22EXT_STATUS_REG);
+ reg = efx_mdio_read(efx, MDIO_MMD_C22EXT, C22EXT_STATUS_REG);
return reg & (1 << C22EXT_STATUS_LINK_LBN);
}
}
static void tenxpress_ext_loopback(struct efx_nic *efx)
{
- int phy_id = efx->mii.phy_id;
-
- mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_PHYXS,
- PHYXS_TEST1, LOOPBACK_NEAR_LBN,
- efx->loopback_mode == LOOPBACK_PHYXS);
+ efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, PHYXS_TEST1,
+ 1 << LOOPBACK_NEAR_LBN,
+ efx->loopback_mode == LOOPBACK_PHYXS);
if (efx->phy_type != PHY_TYPE_SFX7101)
- mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_C22EXT,
- GPHY_XCONTROL_REG,
- GPHY_LOOPBACK_NEAR_LBN,
- efx->loopback_mode == LOOPBACK_GPHY);
+ efx_mdio_set_flag(efx, MDIO_MMD_C22EXT, GPHY_XCONTROL_REG,
+ 1 << GPHY_LOOPBACK_NEAR_LBN,
+ efx->loopback_mode == LOOPBACK_GPHY);
}
static void tenxpress_low_power(struct efx_nic *efx)
{
- int phy_id = efx->mii.phy_id;
-
if (efx->phy_type == PHY_TYPE_SFX7101)
- mdio_clause45_set_mmds_lpower(
+ efx_mdio_set_mmds_lpower(
efx, !!(efx->phy_mode & PHY_MODE_LOW_POWER),
TENXPRESS_REQUIRED_DEVS);
else
- mdio_clause45_set_flag(
- efx, phy_id, MDIO_MMD_PMAPMD,
- PMA_PMD_XCONTROL_REG, PMA_PMD_EXT_LPOWER_LBN,
+ efx_mdio_set_flag(
+ efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG,
+ 1 << PMA_PMD_EXT_LPOWER_LBN,
!!(efx->phy_mode & PHY_MODE_LOW_POWER));
}
@@ -568,8 +526,8 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx)
WARN_ON(rc);
}
- mdio_clause45_transmit_disable(efx);
- mdio_clause45_phy_reconfigure(efx);
+ efx_mdio_transmit_disable(efx);
+ efx_mdio_phy_reconfigure(efx);
tenxpress_ext_loopback(efx);
phy_data->loopback_mode = efx->loopback_mode;
@@ -585,7 +543,7 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx)
efx->link_fd = ecmd.duplex == DUPLEX_FULL;
efx->link_up = sft9001_link_ok(efx, &ecmd);
}
- efx->link_fc = mdio_clause45_get_pause(efx);
+ efx->link_fc = efx_mdio_get_pause(efx);
}
/* Poll PHY for interrupt */
@@ -599,7 +557,7 @@ static void tenxpress_phy_poll(struct efx_nic *efx)
if (link_ok != efx->link_up) {
change = true;
} else {
- unsigned int link_fc = mdio_clause45_get_pause(efx);
+ unsigned int link_fc = efx_mdio_get_pause(efx);
if (link_fc != efx->link_fc)
change = true;
}
@@ -609,10 +567,9 @@ static void tenxpress_phy_poll(struct efx_nic *efx)
if (link_ok != efx->link_up)
change = true;
} else {
- u32 status = mdio_clause45_read(efx, efx->mii.phy_id,
- MDIO_MMD_PMAPMD,
- PMA_PMD_LASI_STATUS);
- if (status & (1 << PMA_PMD_LS_ALARM_LBN))
+ int status = efx_mdio_read(efx, MDIO_MMD_PMAPMD,
+ MDIO_PMA_LASI_STAT);
+ if (status & MDIO_PMA_LASI_LSALARM)
change = true;
}
@@ -634,8 +591,7 @@ static void tenxpress_phy_fini(struct efx_nic *efx)
if (efx->phy_type == PHY_TYPE_SFX7101) {
/* Power down the LNPGA */
reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN);
- mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
- PMA_PMD_XCONTROL_REG, reg);
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
/* Waiting here ensures that the board fini, which can turn
* off the power to the PHY, won't get run until the LNPGA
@@ -661,8 +617,7 @@ void tenxpress_phy_blink(struct efx_nic *efx, bool blink)
else
reg = PMA_PMD_LED_DEFAULT;
- mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
- PMA_PMD_LED_OVERR_REG, reg);
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG, reg);
}
static const char *const sfx7101_test_names[] = {
@@ -698,7 +653,6 @@ static const char *const sft9001_test_names[] = {
static int sft9001_run_tests(struct efx_nic *efx, int *results, unsigned flags)
{
struct ethtool_cmd ecmd;
- int phy_id = efx->mii.phy_id;
int rc = 0, rc2, i, ctrl_reg, res_reg;
if (flags & ETH_TEST_FL_OFFLINE)
@@ -717,11 +671,10 @@ static int sft9001_run_tests(struct efx_nic *efx, int *results, unsigned flags)
* must reset the PHY to resume normal service. */
ctrl_reg |= (1 << CDIAG_CTRL_BRK_LINK_LBN);
}
- mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
- PMA_PMD_CDIAG_CTRL_REG, ctrl_reg);
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_CDIAG_CTRL_REG,
+ ctrl_reg);
i = 0;
- while (mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
- PMA_PMD_CDIAG_CTRL_REG) &
+ while (efx_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_CDIAG_CTRL_REG) &
(1 << CDIAG_CTRL_IN_PROG_LBN)) {
if (++i == 50) {
rc = -ETIMEDOUT;
@@ -729,15 +682,13 @@ static int sft9001_run_tests(struct efx_nic *efx, int *results, unsigned flags)
}
msleep(100);
}
- res_reg = mdio_clause45_read(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
- PMA_PMD_CDIAG_RES_REG);
+ res_reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_CDIAG_RES_REG);
for (i = 0; i < 4; i++) {
int pair_res =
(res_reg >> (CDIAG_RES_A_LBN - i * CDIAG_RES_WIDTH))
& ((1 << CDIAG_RES_WIDTH) - 1);
- int len_reg = mdio_clause45_read(efx, efx->mii.phy_id,
- MDIO_MMD_PMAPMD,
- PMA_PMD_CDIAG_LEN_REG + i);
+ int len_reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD,
+ PMA_PMD_CDIAG_LEN_REG + i);
if (pair_res == CDIAG_RES_OK)
results[1 + i] = 1;
else if (pair_res == CDIAG_RES_INVALID)
@@ -769,36 +720,39 @@ out:
static void
tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
- int phy_id = efx->mii.phy_id;
u32 adv = 0, lpa = 0;
int reg;
if (efx->phy_type != PHY_TYPE_SFX7101) {
- reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_C22EXT,
- C22EXT_MSTSLV_CTRL);
+ reg = efx_mdio_read(efx, MDIO_MMD_C22EXT, C22EXT_MSTSLV_CTRL);
if (reg & (1 << C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN))
adv |= ADVERTISED_1000baseT_Full;
- reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_C22EXT,
- C22EXT_MSTSLV_STATUS);
+ reg = efx_mdio_read(efx, MDIO_MMD_C22EXT, C22EXT_MSTSLV_STATUS);
if (reg & (1 << C22EXT_MSTSLV_STATUS_LP_1000_HD_LBN))
lpa |= ADVERTISED_1000baseT_Half;
if (reg & (1 << C22EXT_MSTSLV_STATUS_LP_1000_FD_LBN))
lpa |= ADVERTISED_1000baseT_Full;
}
- reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
- MDIO_AN_10GBT_CTRL);
- if (reg & (1 << MDIO_AN_10GBT_CTRL_ADV_10G_LBN))
+ reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL);
+ if (reg & MDIO_AN_10GBT_CTRL_ADV10G)
adv |= ADVERTISED_10000baseT_Full;
- reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
- MDIO_AN_10GBT_STATUS);
- if (reg & (1 << MDIO_AN_10GBT_STATUS_LP_10G_LBN))
+ reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_10GBT_STAT);
+ if (reg & MDIO_AN_10GBT_STAT_LP10G)
lpa |= ADVERTISED_10000baseT_Full;
- mdio_clause45_get_settings_ext(efx, ecmd, adv, lpa);
+ mdio45_ethtool_gset_npage(&efx->mdio, ecmd, adv, lpa);
- if (efx->phy_type != PHY_TYPE_SFX7101)
+ if (efx->phy_type != PHY_TYPE_SFX7101) {
ecmd->supported |= (SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Full);
+ if (ecmd->speed != SPEED_10000) {
+ ecmd->eth_tp_mdix =
+ (efx_mdio_read(efx, MDIO_MMD_PMAPMD,
+ PMA_PMD_XSTATUS_REG) &
+ (1 << PMA_PMD_XSTAT_MDIX_LBN))
+ ? ETH_TP_MDI_X : ETH_TP_MDI;
+ }
+ }
/* In loopback, the PHY automatically brings up the correct interface,
* but doesn't advertise the correct speed. So override it */
@@ -813,29 +767,24 @@ static int tenxpress_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
if (!ecmd->autoneg)
return -EINVAL;
- return mdio_clause45_set_settings(efx, ecmd);
+ return efx_mdio_set_settings(efx, ecmd);
}
static void sfx7101_set_npage_adv(struct efx_nic *efx, u32 advertising)
{
- mdio_clause45_set_flag(efx, efx->mii.phy_id, MDIO_MMD_AN,
- MDIO_AN_10GBT_CTRL,
- MDIO_AN_10GBT_CTRL_ADV_10G_LBN,
- advertising & ADVERTISED_10000baseT_Full);
+ efx_mdio_set_flag(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
+ MDIO_AN_10GBT_CTRL_ADV10G,
+ advertising & ADVERTISED_10000baseT_Full);
}
static void sft9001_set_npage_adv(struct efx_nic *efx, u32 advertising)
{
- int phy_id = efx->mii.phy_id;
-
- mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_C22EXT,
- C22EXT_MSTSLV_CTRL,
- C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN,
- advertising & ADVERTISED_1000baseT_Full);
- mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_AN,
- MDIO_AN_10GBT_CTRL,
- MDIO_AN_10GBT_CTRL_ADV_10G_LBN,
- advertising & ADVERTISED_10000baseT_Full);
+ efx_mdio_set_flag(efx, MDIO_MMD_C22EXT, C22EXT_MSTSLV_CTRL,
+ 1 << C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN,
+ advertising & ADVERTISED_1000baseT_Full);
+ efx_mdio_set_flag(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
+ MDIO_AN_10GBT_CTRL_ADV10G,
+ advertising & ADVERTISED_10000baseT_Full);
}
struct efx_phy_operations falcon_sfx7101_phy_ops = {
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index d6681edb701..14a14788566 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -360,13 +360,6 @@ inline int efx_xmit(struct efx_nic *efx,
/* Map fragments for DMA and add to TX queue */
rc = efx_enqueue_skb(tx_queue, skb);
- if (unlikely(rc != NETDEV_TX_OK))
- goto out;
-
- /* Update last TX timer */
- efx->net_dev->trans_start = jiffies;
-
- out:
return rc;
}
diff --git a/drivers/net/sfc/xenpack.h b/drivers/net/sfc/xenpack.h
deleted file mode 100644
index b0d1f225b70..00000000000
--- a/drivers/net/sfc/xenpack.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-#ifndef EFX_XENPACK_H
-#define EFX_XENPACK_H
-
-/* Exported functions from Xenpack standard PHY control */
-
-#include "mdio_10g.h"
-
-/****************************************************************************/
-/* XENPACK MDIO register extensions */
-#define MDIO_XP_LASI_RX_CTRL (0x9000)
-#define MDIO_XP_LASI_TX_CTRL (0x9001)
-#define MDIO_XP_LASI_CTRL (0x9002)
-#define MDIO_XP_LASI_RX_STAT (0x9003)
-#define MDIO_XP_LASI_TX_STAT (0x9004)
-#define MDIO_XP_LASI_STAT (0x9005)
-
-/* Control/Status bits */
-#define XP_LASI_LS_ALARM (1 << 0)
-#define XP_LASI_TX_ALARM (1 << 1)
-#define XP_LASI_RX_ALARM (1 << 2)
-/* These two are Quake vendor extensions to the standard XENPACK defines */
-#define XP_LASI_LS_INTB (1 << 3)
-#define XP_LASI_TEST (1 << 7)
-
-/* Enable LASI interrupts for PHY */
-static inline void xenpack_enable_lasi_irqs(struct efx_nic *efx)
-{
- int reg;
- int phy_id = efx->mii.phy_id;
- /* Read to clear LASI status register */
- reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
- MDIO_XP_LASI_STAT);
-
- mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
- MDIO_XP_LASI_CTRL, XP_LASI_LS_ALARM);
-}
-
-/* Read the LASI interrupt status to clear the interrupt. */
-static inline int xenpack_clear_lasi_irqs(struct efx_nic *efx)
-{
- /* Read to clear link status alarm */
- return mdio_clause45_read(efx, efx->mii.phy_id,
- MDIO_MMD_PMAPMD, MDIO_XP_LASI_STAT);
-}
-
-/* Turn off LASI interrupts */
-static inline void xenpack_disable_lasi_irqs(struct efx_nic *efx)
-{
- mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
- MDIO_XP_LASI_CTRL, 0);
-}
-
-#endif /* EFX_XENPACK_H */
diff --git a/drivers/net/sfc/xfp_phy.c b/drivers/net/sfc/xfp_phy.c
index bb1ef77d5f5..bb2e6afd082 100644
--- a/drivers/net/sfc/xfp_phy.c
+++ b/drivers/net/sfc/xfp_phy.c
@@ -15,13 +15,12 @@
#include <linux/delay.h>
#include "efx.h"
#include "mdio_10g.h"
-#include "xenpack.h"
#include "phy.h"
#include "falcon.h"
-#define XFP_REQUIRED_DEVS (MDIO_MMDREG_DEVS_PCS | \
- MDIO_MMDREG_DEVS_PMAPMD | \
- MDIO_MMDREG_DEVS_PHYXS)
+#define XFP_REQUIRED_DEVS (MDIO_DEVS_PCS | \
+ MDIO_DEVS_PMAPMD | \
+ MDIO_DEVS_PHYXS)
#define XFP_LOOPBACKS ((1 << LOOPBACK_PCS) | \
(1 << LOOPBACK_PMAPMD) | \
@@ -49,8 +48,7 @@
void xfp_set_led(struct efx_nic *p, int led, int mode)
{
int addr = MDIO_QUAKE_LED0_REG + led;
- mdio_clause45_write(p, p->mii.phy_id, MDIO_MMD_PMAPMD, addr,
- mode);
+ efx_mdio_write(p, MDIO_MMD_PMAPMD, addr, mode);
}
struct xfp_phy_data {
@@ -63,14 +61,12 @@ struct xfp_phy_data {
static int qt2025c_wait_reset(struct efx_nic *efx)
{
unsigned long timeout = jiffies + 10 * HZ;
- int phy_id = efx->mii.phy_id;
int reg, old_counter = 0;
/* Wait for firmware heartbeat to start */
for (;;) {
int counter;
- reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PCS,
- PCS_FW_HEARTBEAT_REG);
+ reg = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_FW_HEARTBEAT_REG);
if (reg < 0)
return reg;
counter = ((reg >> PCS_FW_HEARTB_LBN) &
@@ -86,8 +82,7 @@ static int qt2025c_wait_reset(struct efx_nic *efx)
/* Wait for firmware status to look good */
for (;;) {
- reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PCS,
- PCS_UC8051_STATUS_REG);
+ reg = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_UC8051_STATUS_REG);
if (reg < 0)
return reg;
if ((reg &
@@ -109,9 +104,9 @@ static int xfp_reset_phy(struct efx_nic *efx)
{
int rc;
- rc = mdio_clause45_reset_mmd(efx, MDIO_MMD_PHYXS,
- XFP_MAX_RESET_TIME / XFP_RESET_WAIT,
- XFP_RESET_WAIT);
+ rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PHYXS,
+ XFP_MAX_RESET_TIME / XFP_RESET_WAIT,
+ XFP_RESET_WAIT);
if (rc < 0)
goto fail;
@@ -126,8 +121,7 @@ static int xfp_reset_phy(struct efx_nic *efx)
/* Check that all the MMDs we expect are present and responding. We
* expect faults on some if the link is down, but not on the PHY XS */
- rc = mdio_clause45_check_mmds(efx, XFP_REQUIRED_DEVS,
- MDIO_MMDREG_DEVS_PHYXS);
+ rc = efx_mdio_check_mmds(efx, XFP_REQUIRED_DEVS, MDIO_DEVS_PHYXS);
if (rc < 0)
goto fail;
@@ -143,7 +137,7 @@ static int xfp_reset_phy(struct efx_nic *efx)
static int xfp_phy_init(struct efx_nic *efx)
{
struct xfp_phy_data *phy_data;
- u32 devid = mdio_clause45_read_id(efx, MDIO_MMD_PHYXS);
+ u32 devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS);
int rc;
phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL);
@@ -152,8 +146,8 @@ static int xfp_phy_init(struct efx_nic *efx)
efx->phy_data = phy_data;
EFX_INFO(efx, "PHY ID reg %x (OUI %06x model %02x revision %x)\n",
- devid, mdio_id_oui(devid), mdio_id_model(devid),
- mdio_id_rev(devid));
+ devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid),
+ efx_mdio_id_rev(devid));
phy_data->phy_mode = efx->phy_mode;
@@ -174,12 +168,13 @@ static int xfp_phy_init(struct efx_nic *efx)
static void xfp_phy_clear_interrupt(struct efx_nic *efx)
{
- xenpack_clear_lasi_irqs(efx);
+ /* Read to clear link status alarm */
+ efx_mdio_read(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT);
}
static int xfp_link_ok(struct efx_nic *efx)
{
- return mdio_clause45_links_ok(efx, XFP_REQUIRED_DEVS);
+ return efx_mdio_links_ok(efx, XFP_REQUIRED_DEVS);
}
static void xfp_phy_poll(struct efx_nic *efx)
@@ -200,9 +195,9 @@ static void xfp_phy_reconfigure(struct efx_nic *efx)
* or optical transceivers, varying somewhat between
* firmware versions. Only 'static mode' appears to
* cover everything. */
- mdio_clause45_set_flag(
- efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
- PMA_PMD_FTX_CTRL2_REG, PMA_PMD_FTX_STATIC_LBN,
+ mdio_set_flag(
+ &efx->mdio, efx->mdio.prtad, MDIO_MMD_PMAPMD,
+ PMA_PMD_FTX_CTRL2_REG, 1 << PMA_PMD_FTX_STATIC_LBN,
efx->phy_mode & PHY_MODE_TX_DISABLED ||
efx->phy_mode & PHY_MODE_LOW_POWER ||
efx->loopback_mode == LOOPBACK_PCS ||
@@ -213,10 +208,10 @@ static void xfp_phy_reconfigure(struct efx_nic *efx)
(phy_data->phy_mode & PHY_MODE_TX_DISABLED))
xfp_reset_phy(efx);
- mdio_clause45_transmit_disable(efx);
+ efx_mdio_transmit_disable(efx);
}
- mdio_clause45_phy_reconfigure(efx);
+ efx_mdio_phy_reconfigure(efx);
phy_data->phy_mode = efx->phy_mode;
efx->link_up = xfp_link_ok(efx);
@@ -225,6 +220,10 @@ static void xfp_phy_reconfigure(struct efx_nic *efx)
efx->link_fc = efx->wanted_fc;
}
+static void xfp_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+{
+ mdio45_ethtool_gset(&efx->mdio, ecmd);
+}
static void xfp_phy_fini(struct efx_nic *efx)
{
@@ -243,8 +242,8 @@ struct efx_phy_operations falcon_xfp_phy_ops = {
.poll = xfp_phy_poll,
.fini = xfp_phy_fini,
.clear_interrupt = xfp_phy_clear_interrupt,
- .get_settings = mdio_clause45_get_settings,
- .set_settings = mdio_clause45_set_settings,
+ .get_settings = xfp_phy_get_settings,
+ .set_settings = efx_mdio_set_settings,
.mmds = XFP_REQUIRED_DEVS,
.loopbacks = XFP_LOOPBACKS,
};
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index 97d68560067..5fb88ca6dd7 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -709,6 +709,17 @@ static inline void setup_rx_ring(struct net_device *dev,
dma_sync_desc_dev(dev, &buf[i]);
}
+static const struct net_device_ops sgiseeq_netdev_ops = {
+ .ndo_open = sgiseeq_open,
+ .ndo_stop = sgiseeq_close,
+ .ndo_start_xmit = sgiseeq_start_xmit,
+ .ndo_tx_timeout = timeout,
+ .ndo_set_multicast_list = sgiseeq_set_multicast,
+ .ndo_set_mac_address = sgiseeq_set_mac_address,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
static int __init sgiseeq_probe(struct platform_device *pdev)
{
struct sgiseeq_platform_data *pd = pdev->dev.platform_data;
@@ -775,13 +786,8 @@ static int __init sgiseeq_probe(struct platform_device *pdev)
SEEQ_CTRL_SFLAG | SEEQ_CTRL_ESHORT |
SEEQ_CTRL_ENCARR;
- dev->open = sgiseeq_open;
- dev->stop = sgiseeq_close;
- dev->hard_start_xmit = sgiseeq_start_xmit;
- dev->tx_timeout = timeout;
+ dev->netdev_ops = &sgiseeq_netdev_ops;
dev->watchdog_timeo = (200 * HZ) / 1000;
- dev->set_multicast_list = sgiseeq_set_multicast;
- dev->set_mac_address = sgiseeq_set_mac_address;
dev->irq = irq;
if (register_netdev(dev)) {
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 3ab28bb00c1..341882f959f 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -2,7 +2,7 @@
* SuperH Ethernet device driver
*
* Copyright (C) 2006-2008 Nobuhiro Iwamatsu
- * Copyright (C) 2008 Renesas Solutions Corp.
+ * Copyright (C) 2008-2009 Renesas Solutions Corp.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -33,6 +33,226 @@
#include "sh_eth.h"
+/* There is CPU dependent code */
+#if defined(CONFIG_CPU_SUBTYPE_SH7724)
+#define SH_ETH_RESET_DEFAULT 1
+static void sh_eth_set_duplex(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ u32 ioaddr = ndev->base_addr;
+
+ if (mdp->duplex) /* Full */
+ ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
+ else /* Half */
+ ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
+}
+
+static void sh_eth_set_rate(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ u32 ioaddr = ndev->base_addr;
+
+ switch (mdp->speed) {
+ case 10: /* 10BASE */
+ ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_RTM, ioaddr + ECMR);
+ break;
+ case 100:/* 100BASE */
+ ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_RTM, ioaddr + ECMR);
+ break;
+ default:
+ break;
+ }
+}
+
+/* SH7724 */
+static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
+ .set_duplex = sh_eth_set_duplex,
+ .set_rate = sh_eth_set_rate,
+
+ .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
+ .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
+ .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f,
+
+ .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
+ .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
+ EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
+ .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
+
+ .apr = 1,
+ .mpr = 1,
+ .tpauser = 1,
+ .hw_swap = 1,
+};
+
+#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
+#define SH_ETH_HAS_TSU 1
+static void sh_eth_chip_reset(struct net_device *ndev)
+{
+ /* reset device */
+ ctrl_outl(ARSTR_ARSTR, ARSTR);
+ mdelay(1);
+}
+
+static void sh_eth_reset(struct net_device *ndev)
+{
+ u32 ioaddr = ndev->base_addr;
+ int cnt = 100;
+
+ ctrl_outl(EDSR_ENALL, ioaddr + EDSR);
+ ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
+ while (cnt > 0) {
+ if (!(ctrl_inl(ioaddr + EDMR) & 0x3))
+ break;
+ mdelay(1);
+ cnt--;
+ }
+ if (cnt < 0)
+ printk(KERN_ERR "Device reset fail\n");
+
+ /* Table Init */
+ ctrl_outl(0x0, ioaddr + TDLAR);
+ ctrl_outl(0x0, ioaddr + TDFAR);
+ ctrl_outl(0x0, ioaddr + TDFXR);
+ ctrl_outl(0x0, ioaddr + TDFFR);
+ ctrl_outl(0x0, ioaddr + RDLAR);
+ ctrl_outl(0x0, ioaddr + RDFAR);
+ ctrl_outl(0x0, ioaddr + RDFXR);
+ ctrl_outl(0x0, ioaddr + RDFFR);
+}
+
+static void sh_eth_set_duplex(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ u32 ioaddr = ndev->base_addr;
+
+ if (mdp->duplex) /* Full */
+ ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
+ else /* Half */
+ ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
+}
+
+static void sh_eth_set_rate(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ u32 ioaddr = ndev->base_addr;
+
+ switch (mdp->speed) {
+ case 10: /* 10BASE */
+ ctrl_outl(GECMR_10, ioaddr + GECMR);
+ break;
+ case 100:/* 100BASE */
+ ctrl_outl(GECMR_100, ioaddr + GECMR);
+ break;
+ case 1000: /* 1000BASE */
+ ctrl_outl(GECMR_1000, ioaddr + GECMR);
+ break;
+ default:
+ break;
+ }
+}
+
+/* sh7763 */
+static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
+ .chip_reset = sh_eth_chip_reset,
+ .set_duplex = sh_eth_set_duplex,
+ .set_rate = sh_eth_set_rate,
+
+ .ecsr_value = ECSR_ICD | ECSR_MPD,
+ .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
+ .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+
+ .tx_check = EESR_TC1 | EESR_FTC,
+ .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
+ EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
+ EESR_ECI,
+ .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
+ EESR_TFE,
+
+ .apr = 1,
+ .mpr = 1,
+ .tpauser = 1,
+ .bculr = 1,
+ .hw_swap = 1,
+ .rpadir = 1,
+ .no_trimd = 1,
+ .no_ade = 1,
+};
+
+#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
+#define SH_ETH_RESET_DEFAULT 1
+static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
+ .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+
+ .apr = 1,
+ .mpr = 1,
+ .tpauser = 1,
+ .hw_swap = 1,
+};
+#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
+#define SH_ETH_RESET_DEFAULT 1
+#define SH_ETH_HAS_TSU 1
+static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
+ .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+};
+#endif
+
+static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
+{
+ if (!cd->ecsr_value)
+ cd->ecsr_value = DEFAULT_ECSR_INIT;
+
+ if (!cd->ecsipr_value)
+ cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
+
+ if (!cd->fcftr_value)
+ cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \
+ DEFAULT_FIFO_F_D_RFD;
+
+ if (!cd->fdr_value)
+ cd->fdr_value = DEFAULT_FDR_INIT;
+
+ if (!cd->rmcr_value)
+ cd->rmcr_value = DEFAULT_RMCR_VALUE;
+
+ if (!cd->tx_check)
+ cd->tx_check = DEFAULT_TX_CHECK;
+
+ if (!cd->eesr_err_check)
+ cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
+
+ if (!cd->tx_error_check)
+ cd->tx_error_check = DEFAULT_TX_ERROR_CHECK;
+}
+
+#if defined(SH_ETH_RESET_DEFAULT)
+/* Chip Reset */
+static void sh_eth_reset(struct net_device *ndev)
+{
+ u32 ioaddr = ndev->base_addr;
+
+ ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
+ mdelay(3);
+ ctrl_outl(ctrl_inl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR);
+}
+#endif
+
+#if defined(CONFIG_CPU_SH4)
+static void sh_eth_set_receive_align(struct sk_buff *skb)
+{
+ int reserve;
+
+ reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
+ if (reserve)
+ skb_reserve(skb, reserve);
+}
+#else
+static void sh_eth_set_receive_align(struct sk_buff *skb)
+{
+ skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
+}
+#endif
+
+
/* CPU <-> EDMAC endian convert */
static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
{
@@ -165,41 +385,6 @@ static struct mdiobb_ops bb_ops = {
.get_mdio_data = sh_get_mdio,
};
-/* Chip Reset */
-static void sh_eth_reset(struct net_device *ndev)
-{
- u32 ioaddr = ndev->base_addr;
-
-#if defined(CONFIG_CPU_SUBTYPE_SH7763)
- int cnt = 100;
-
- ctrl_outl(EDSR_ENALL, ioaddr + EDSR);
- ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
- while (cnt > 0) {
- if (!(ctrl_inl(ioaddr + EDMR) & 0x3))
- break;
- mdelay(1);
- cnt--;
- }
- if (cnt < 0)
- printk(KERN_ERR "Device reset fail\n");
-
- /* Table Init */
- ctrl_outl(0x0, ioaddr + TDLAR);
- ctrl_outl(0x0, ioaddr + TDFAR);
- ctrl_outl(0x0, ioaddr + TDFXR);
- ctrl_outl(0x0, ioaddr + TDFFR);
- ctrl_outl(0x0, ioaddr + RDLAR);
- ctrl_outl(0x0, ioaddr + RDFAR);
- ctrl_outl(0x0, ioaddr + RDFXR);
- ctrl_outl(0x0, ioaddr + RDFFR);
-#else
- ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
- mdelay(3);
- ctrl_outl(ctrl_inl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR);
-#endif
-}
-
/* free skb and descriptor buffer */
static void sh_eth_ring_free(struct net_device *ndev)
{
@@ -228,7 +413,7 @@ static void sh_eth_ring_free(struct net_device *ndev)
/* format skb and descriptor buffer */
static void sh_eth_ring_format(struct net_device *ndev)
{
- u32 ioaddr = ndev->base_addr, reserve = 0;
+ u32 ioaddr = ndev->base_addr;
struct sh_eth_private *mdp = netdev_priv(ndev);
int i;
struct sk_buff *skb;
@@ -250,37 +435,27 @@ static void sh_eth_ring_format(struct net_device *ndev)
mdp->rx_skbuff[i] = skb;
if (skb == NULL)
break;
+ dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz,
+ DMA_FROM_DEVICE);
skb->dev = ndev; /* Mark as being used by this device. */
-#if defined(CONFIG_CPU_SUBTYPE_SH7763)
- reserve = SH7763_SKB_ALIGN
- - ((uint32_t)skb->data & (SH7763_SKB_ALIGN-1));
- if (reserve)
- skb_reserve(skb, reserve);
-#else
- skb_reserve(skb, RX_OFFSET);
-#endif
+ sh_eth_set_receive_align(skb);
+
/* RX descriptor */
rxdesc = &mdp->rx_ring[i];
- rxdesc->addr = (u32)skb->data & ~0x3UL;
+ rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
/* The size of the buffer is 16 byte boundary. */
- rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F;
+ rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
/* Rx descriptor address set */
if (i == 0) {
- ctrl_outl((u32)rxdesc, ioaddr + RDLAR);
+ ctrl_outl(mdp->rx_desc_dma, ioaddr + RDLAR);
#if defined(CONFIG_CPU_SUBTYPE_SH7763)
- ctrl_outl((u32)rxdesc, ioaddr + RDFAR);
+ ctrl_outl(mdp->rx_desc_dma, ioaddr + RDFAR);
#endif
}
}
- /* Rx descriptor address set */
-#if defined(CONFIG_CPU_SUBTYPE_SH7763)
- ctrl_outl((u32)rxdesc, ioaddr + RDFXR);
- ctrl_outl(0x1, ioaddr + RDFFR);
-#endif
-
mdp->dirty_rx = (u32) (i - RX_RING_SIZE);
/* Mark the last entry as wrapping the ring. */
@@ -296,19 +471,13 @@ static void sh_eth_ring_format(struct net_device *ndev)
txdesc->buffer_length = 0;
if (i == 0) {
/* Tx descriptor address set */
- ctrl_outl((u32)txdesc, ioaddr + TDLAR);
+ ctrl_outl(mdp->tx_desc_dma, ioaddr + TDLAR);
#if defined(CONFIG_CPU_SUBTYPE_SH7763)
- ctrl_outl((u32)txdesc, ioaddr + TDFAR);
+ ctrl_outl(mdp->tx_desc_dma, ioaddr + TDFAR);
#endif
}
}
- /* Tx descriptor address set */
-#if defined(CONFIG_CPU_SUBTYPE_SH7763)
- ctrl_outl((u32)txdesc, ioaddr + TDFXR);
- ctrl_outl(0x1, ioaddr + TDFFR);
-#endif
-
txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
}
@@ -331,7 +500,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE,
GFP_KERNEL);
if (!mdp->rx_skbuff) {
- printk(KERN_ERR "%s: Cannot allocate Rx skb\n", ndev->name);
+ dev_err(&ndev->dev, "Cannot allocate Rx skb\n");
ret = -ENOMEM;
return ret;
}
@@ -339,7 +508,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE,
GFP_KERNEL);
if (!mdp->tx_skbuff) {
- printk(KERN_ERR "%s: Cannot allocate Tx skb\n", ndev->name);
+ dev_err(&ndev->dev, "Cannot allocate Tx skb\n");
ret = -ENOMEM;
goto skb_ring_free;
}
@@ -350,8 +519,8 @@ static int sh_eth_ring_init(struct net_device *ndev)
GFP_KERNEL);
if (!mdp->rx_ring) {
- printk(KERN_ERR "%s: Cannot allocate Rx Ring (size %d bytes)\n",
- ndev->name, rx_ringsize);
+ dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n",
+ rx_ringsize);
ret = -ENOMEM;
goto desc_ring_free;
}
@@ -363,8 +532,8 @@ static int sh_eth_ring_init(struct net_device *ndev)
mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
GFP_KERNEL);
if (!mdp->tx_ring) {
- printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
- ndev->name, tx_ringsize);
+ dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n",
+ tx_ringsize);
ret = -ENOMEM;
goto desc_ring_free;
}
@@ -394,44 +563,43 @@ static int sh_eth_dev_init(struct net_device *ndev)
/* Descriptor format */
sh_eth_ring_format(ndev);
- ctrl_outl(RPADIR_INIT, ioaddr + RPADIR);
+ if (mdp->cd->rpadir)
+ ctrl_outl(mdp->cd->rpadir_value, ioaddr + RPADIR);
/* all sh_eth int mask */
ctrl_outl(0, ioaddr + EESIPR);
-#if defined(CONFIG_CPU_SUBTYPE_SH7763)
- ctrl_outl(EDMR_EL, ioaddr + EDMR);
-#else
- ctrl_outl(0, ioaddr + EDMR); /* Endian change */
+#if defined(__LITTLE_ENDIAN__)
+ if (mdp->cd->hw_swap)
+ ctrl_outl(EDMR_EL, ioaddr + EDMR);
+ else
#endif
+ ctrl_outl(0, ioaddr + EDMR);
/* FIFO size set */
- ctrl_outl((FIFO_SIZE_T | FIFO_SIZE_R), ioaddr + FDR);
+ ctrl_outl(mdp->cd->fdr_value, ioaddr + FDR);
ctrl_outl(0, ioaddr + TFTR);
/* Frame recv control */
- ctrl_outl(0, ioaddr + RMCR);
+ ctrl_outl(mdp->cd->rmcr_value, ioaddr + RMCR);
rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
ctrl_outl(rx_int_var | tx_int_var, ioaddr + TRSCER);
-#if defined(CONFIG_CPU_SUBTYPE_SH7763)
- /* Burst sycle set */
- ctrl_outl(0x800, ioaddr + BCULR);
-#endif
+ if (mdp->cd->bculr)
+ ctrl_outl(0x800, ioaddr + BCULR); /* Burst sycle set */
- ctrl_outl((FIFO_F_D_RFF | FIFO_F_D_RFD), ioaddr + FCFTR);
+ ctrl_outl(mdp->cd->fcftr_value, ioaddr + FCFTR);
-#if !defined(CONFIG_CPU_SUBTYPE_SH7763)
- ctrl_outl(0, ioaddr + TRIMD);
-#endif
+ if (!mdp->cd->no_trimd)
+ ctrl_outl(0, ioaddr + TRIMD);
/* Recv frame limit set register */
ctrl_outl(RFLR_VALUE, ioaddr + RFLR);
ctrl_outl(ctrl_inl(ioaddr + EESR), ioaddr + EESR);
- ctrl_outl((DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff), ioaddr + EESIPR);
+ ctrl_outl(mdp->cd->eesipr_value, ioaddr + EESIPR);
/* PAUSE Prohibition */
val = (ctrl_inl(ioaddr + ECMR) & ECMR_DM) |
@@ -439,24 +607,25 @@ static int sh_eth_dev_init(struct net_device *ndev)
ctrl_outl(val, ioaddr + ECMR);
+ if (mdp->cd->set_rate)
+ mdp->cd->set_rate(ndev);
+
/* E-MAC Status Register clear */
- ctrl_outl(ECSR_INIT, ioaddr + ECSR);
+ ctrl_outl(mdp->cd->ecsr_value, ioaddr + ECSR);
/* E-MAC Interrupt Enable register */
- ctrl_outl(ECSIPR_INIT, ioaddr + ECSIPR);
+ ctrl_outl(mdp->cd->ecsipr_value, ioaddr + ECSIPR);
/* Set MAC address */
update_mac_address(ndev);
/* mask reset */
-#if defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7763)
- ctrl_outl(APR_AP, ioaddr + APR);
- ctrl_outl(MPR_MP, ioaddr + MPR);
- ctrl_outl(TPAUSER_UNLIMITED, ioaddr + TPAUSER);
-#endif
-#if defined(CONFIG_CPU_SUBTYPE_SH7710)
- ctrl_outl(BCFR_UNLIMITED, ioaddr + BCFR);
-#endif
+ if (mdp->cd->apr)
+ ctrl_outl(APR_AP, ioaddr + APR);
+ if (mdp->cd->mpr)
+ ctrl_outl(MPR_MP, ioaddr + MPR);
+ if (mdp->cd->tpauser)
+ ctrl_outl(TPAUSER_UNLIMITED, ioaddr + TPAUSER);
/* Setting the Rx mode will start the Rx process. */
ctrl_outl(EDRRR_R, ioaddr + EDRRR);
@@ -505,7 +674,7 @@ static int sh_eth_rx(struct net_device *ndev)
int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx;
struct sk_buff *skb;
u16 pkt_len = 0;
- u32 desc_status, reserve = 0;
+ u32 desc_status;
rxdesc = &mdp->rx_ring[entry];
while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
@@ -534,7 +703,10 @@ static int sh_eth_rx(struct net_device *ndev)
if (desc_status & RD_RFS10)
mdp->stats.rx_over_errors++;
} else {
- swaps((char *)(rxdesc->addr & ~0x3), pkt_len + 2);
+ if (!mdp->cd->hw_swap)
+ sh_eth_soft_swap(
+ phys_to_virt(ALIGN(rxdesc->addr, 4)),
+ pkt_len + 2);
skb = mdp->rx_skbuff[entry];
mdp->rx_skbuff[entry] = NULL;
skb_put(skb, pkt_len);
@@ -545,6 +717,7 @@ static int sh_eth_rx(struct net_device *ndev)
}
rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
entry = (++mdp->cur_rx) % RX_RING_SIZE;
+ rxdesc = &mdp->rx_ring[entry];
}
/* Refill the Rx ring buffers. */
@@ -552,24 +725,20 @@ static int sh_eth_rx(struct net_device *ndev)
entry = mdp->dirty_rx % RX_RING_SIZE;
rxdesc = &mdp->rx_ring[entry];
/* The size of the buffer is 16 byte boundary. */
- rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F;
+ rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
if (mdp->rx_skbuff[entry] == NULL) {
skb = dev_alloc_skb(mdp->rx_buf_sz);
mdp->rx_skbuff[entry] = skb;
if (skb == NULL)
break; /* Better luck next round. */
+ dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz,
+ DMA_FROM_DEVICE);
skb->dev = ndev;
-#if defined(CONFIG_CPU_SUBTYPE_SH7763)
- reserve = SH7763_SKB_ALIGN
- - ((uint32_t)skb->data & (SH7763_SKB_ALIGN-1));
- if (reserve)
- skb_reserve(skb, reserve);
-#else
- skb_reserve(skb, RX_OFFSET);
-#endif
+ sh_eth_set_receive_align(skb);
+
skb->ip_summed = CHECKSUM_NONE;
- rxdesc->addr = (u32)skb->data & ~0x3UL;
+ rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
}
if (entry >= RX_RING_SIZE - 1)
rxdesc->status |=
@@ -593,6 +762,8 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
struct sh_eth_private *mdp = netdev_priv(ndev);
u32 ioaddr = ndev->base_addr;
u32 felic_stat;
+ u32 link_stat;
+ u32 mask;
if (intr_status & EESR_ECI) {
felic_stat = ctrl_inl(ioaddr + ECSR);
@@ -601,7 +772,14 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
mdp->stats.tx_carrier_errors++;
if (felic_stat & ECSR_LCHNG) {
/* Link Changed */
- u32 link_stat = (ctrl_inl(ioaddr + PSR));
+ if (mdp->cd->no_psr) {
+ if (mdp->link == PHY_DOWN)
+ link_stat = 0;
+ else
+ link_stat = PHY_ST_LINK;
+ } else {
+ link_stat = (ctrl_inl(ioaddr + PSR));
+ }
if (!(link_stat & PHY_ST_LINK)) {
/* Link Down : disable tx and rx */
ctrl_outl(ctrl_inl(ioaddr + ECMR) &
@@ -633,17 +811,15 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
if (intr_status & EESR_RFRMER) {
/* Receive Frame Overflow int */
mdp->stats.rx_frame_errors++;
- printk(KERN_ERR "Receive Frame Overflow\n");
+ dev_err(&ndev->dev, "Receive Frame Overflow\n");
}
}
-#if !defined(CONFIG_CPU_SUBTYPE_SH7763)
- if (intr_status & EESR_ADE) {
- if (intr_status & EESR_TDE) {
- if (intr_status & EESR_TFE)
- mdp->stats.tx_fifo_errors++;
- }
+
+ if (!mdp->cd->no_ade) {
+ if (intr_status & EESR_ADE && intr_status & EESR_TDE &&
+ intr_status & EESR_TFE)
+ mdp->stats.tx_fifo_errors++;
}
-#endif
if (intr_status & EESR_RDE) {
/* Receive Descriptor Empty int */
@@ -651,24 +827,24 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
if (ctrl_inl(ioaddr + EDRRR) ^ EDRRR_R)
ctrl_outl(EDRRR_R, ioaddr + EDRRR);
- printk(KERN_ERR "Receive Descriptor Empty\n");
+ dev_err(&ndev->dev, "Receive Descriptor Empty\n");
}
if (intr_status & EESR_RFE) {
/* Receive FIFO Overflow int */
mdp->stats.rx_fifo_errors++;
- printk(KERN_ERR "Receive FIFO Overflow\n");
+ dev_err(&ndev->dev, "Receive FIFO Overflow\n");
}
- if (intr_status & (EESR_TWB | EESR_TABT |
-#if !defined(CONFIG_CPU_SUBTYPE_SH7763)
- EESR_ADE |
-#endif
- EESR_TDE | EESR_TFE)) {
+
+ mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
+ if (mdp->cd->no_ade)
+ mask &= ~EESR_ADE;
+ if (intr_status & mask) {
/* Tx error */
u32 edtrr = ctrl_inl(ndev->base_addr + EDTRR);
/* dmesg */
- printk(KERN_ERR "%s:TX error. status=%8.8x cur_tx=%8.8x ",
- ndev->name, intr_status, mdp->cur_tx);
- printk(KERN_ERR "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
+ dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
+ intr_status, mdp->cur_tx);
+ dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
mdp->dirty_tx, (u32) ndev->state, edtrr);
/* dirty buffer free */
sh_eth_txfree(ndev);
@@ -687,6 +863,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
{
struct net_device *ndev = netdev;
struct sh_eth_private *mdp = netdev_priv(ndev);
+ struct sh_eth_cpu_data *cd = mdp->cd;
irqreturn_t ret = IRQ_NONE;
u32 ioaddr, boguscnt = RX_RING_SIZE;
u32 intr_status = 0;
@@ -699,7 +876,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
/* Clear interrupt */
if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
- TX_CHECK | EESR_ERR_CHECK)) {
+ cd->tx_check | cd->eesr_err_check)) {
ctrl_outl(intr_status, ioaddr + EESR);
ret = IRQ_HANDLED;
} else
@@ -716,12 +893,12 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
}
/* Tx Check */
- if (intr_status & TX_CHECK) {
+ if (intr_status & cd->tx_check) {
sh_eth_txfree(ndev);
netif_wake_queue(ndev);
}
- if (intr_status & EESR_ERR_CHECK)
+ if (intr_status & cd->eesr_err_check)
sh_eth_error(ndev, intr_status);
if (--boguscnt < 0) {
@@ -756,32 +933,15 @@ static void sh_eth_adjust_link(struct net_device *ndev)
if (phydev->duplex != mdp->duplex) {
new_state = 1;
mdp->duplex = phydev->duplex;
-#if defined(CONFIG_CPU_SUBTYPE_SH7763)
- if (mdp->duplex) { /* FULL */
- ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM,
- ioaddr + ECMR);
- } else { /* Half */
- ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM,
- ioaddr + ECMR);
- }
-#endif
+ if (mdp->cd->set_duplex)
+ mdp->cd->set_duplex(ndev);
}
if (phydev->speed != mdp->speed) {
new_state = 1;
mdp->speed = phydev->speed;
-#if defined(CONFIG_CPU_SUBTYPE_SH7763)
- switch (mdp->speed) {
- case 10: /* 10BASE */
- ctrl_outl(GECMR_10, ioaddr + GECMR); break;
- case 100:/* 100BASE */
- ctrl_outl(GECMR_100, ioaddr + GECMR); break;
- case 1000: /* 1000BASE */
- ctrl_outl(GECMR_1000, ioaddr + GECMR); break;
- default:
- break;
- }
-#endif
+ if (mdp->cd->set_rate)
+ mdp->cd->set_rate(ndev);
}
if (mdp->link == PHY_DOWN) {
ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_TXF)
@@ -804,7 +964,7 @@ static void sh_eth_adjust_link(struct net_device *ndev)
static int sh_eth_phy_init(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- char phy_id[BUS_ID_SIZE];
+ char phy_id[MII_BUS_ID_SIZE + 3];
struct phy_device *phydev = NULL;
snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
@@ -821,8 +981,9 @@ static int sh_eth_phy_init(struct net_device *ndev)
dev_err(&ndev->dev, "phy_connect failed\n");
return PTR_ERR(phydev);
}
+
dev_info(&ndev->dev, "attached phy %i to driver %s\n",
- phydev->addr, phydev->drv->name);
+ phydev->addr, phydev->drv->name);
mdp->phydev = phydev;
@@ -860,7 +1021,7 @@ static int sh_eth_open(struct net_device *ndev)
#endif
ndev->name, ndev);
if (ret) {
- printk(KERN_ERR "Can not assign IRQ number to %s\n", CARDNAME);
+ dev_err(&ndev->dev, "Can not assign IRQ number\n");
return ret;
}
@@ -947,7 +1108,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (!sh_eth_txfree(ndev)) {
netif_stop_queue(ndev);
spin_unlock_irqrestore(&mdp->lock, flags);
- return 1;
+ return NETDEV_TX_BUSY;
}
}
spin_unlock_irqrestore(&mdp->lock, flags);
@@ -955,9 +1116,11 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
entry = mdp->cur_tx % TX_RING_SIZE;
mdp->tx_skbuff[entry] = skb;
txdesc = &mdp->tx_ring[entry];
- txdesc->addr = (u32)(skb->data);
+ txdesc->addr = virt_to_phys(skb->data);
/* soft swap. */
- swaps((char *)(txdesc->addr & ~0x3), skb->len + 2);
+ if (!mdp->cd->hw_swap)
+ sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
+ skb->len + 2);
/* write back */
__flush_purge_region(skb->data, skb->len);
if (skb->len < ETHERSMALL)
@@ -1059,7 +1222,7 @@ static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
return phy_mii_ioctl(phydev, if_mii(rq), cmd);
}
-
+#if defined(SH_ETH_HAS_TSU)
/* Multicast reception directions set */
static void sh_eth_set_multicast_list(struct net_device *ndev)
{
@@ -1104,6 +1267,7 @@ static void sh_eth_tsu_init(u32 ioaddr)
ctrl_outl(0, ioaddr + TSU_POST3); /* Disable CAM entry [16-23] */
ctrl_outl(0, ioaddr + TSU_POST4); /* Disable CAM entry [24-31] */
}
+#endif /* SH_ETH_HAS_TSU */
/* MDIO bus release function */
static int sh_mdio_release(struct net_device *ndev)
@@ -1193,7 +1357,9 @@ static const struct net_device_ops sh_eth_netdev_ops = {
.ndo_stop = sh_eth_close,
.ndo_start_xmit = sh_eth_start_xmit,
.ndo_get_stats = sh_eth_get_stats,
+#if defined(SH_ETH_HAS_TSU)
.ndo_set_multicast_list = sh_eth_set_multicast_list,
+#endif
.ndo_tx_timeout = sh_eth_tx_timeout,
.ndo_do_ioctl = sh_eth_do_ioctl,
.ndo_validate_addr = eth_validate_addr,
@@ -1219,7 +1385,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
ndev = alloc_etherdev(sizeof(struct sh_eth_private));
if (!ndev) {
- printk(KERN_ERR "%s: could not allocate device.\n", CARDNAME);
+ dev_err(&pdev->dev, "Could not allocate device.\n");
ret = -ENOMEM;
goto out;
}
@@ -1252,6 +1418,10 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
/* EDMAC endian */
mdp->edmac_endian = pd->edmac_endian;
+ /* set cpu data */
+ mdp->cd = &sh_eth_my_cpu_data;
+ sh_eth_set_default_cpu_data(mdp->cd);
+
/* set function */
ndev->netdev_ops = &sh_eth_netdev_ops;
ndev->watchdog_timeo = TX_TIMEOUT;
@@ -1264,13 +1434,10 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
/* First device only init */
if (!devno) {
-#if defined(ARSTR)
- /* reset device */
- ctrl_outl(ARSTR_ARSTR, ARSTR);
- mdelay(1);
-#endif
+ if (mdp->cd->chip_reset)
+ mdp->cd->chip_reset(ndev);
-#if defined(SH_TSU_ADDR)
+#if defined(SH_ETH_HAS_TSU)
/* TSU init (Init only)*/
sh_eth_tsu_init(SH_TSU_ADDR);
#endif
@@ -1287,8 +1454,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
goto out_unregister;
/* pritnt device infomation */
- printk(KERN_INFO "%s: %s at 0x%x, ",
- ndev->name, CARDNAME, (u32) ndev->base_addr);
+ pr_info("Base address at 0x%x, ",
+ (u32)ndev->base_addr);
for (i = 0; i < 5; i++)
printk("%02X:", ndev->dev_addr[i]);
diff --git a/drivers/net/sh_eth.h b/drivers/net/sh_eth.h
index 1537e13e623..9afe5b4c855 100644
--- a/drivers/net/sh_eth.h
+++ b/drivers/net/sh_eth.h
@@ -2,7 +2,7 @@
* SuperH Ethernet device driver
*
* Copyright (C) 2006-2008 Nobuhiro Iwamatsu
- * Copyright (C) 2008 Renesas Solutions Corp.
+ * Copyright (C) 2008-2009 Renesas Solutions Corp.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -39,12 +39,12 @@
#define ETHERSMALL 60
#define PKT_BUF_SZ 1538
-#ifdef CONFIG_CPU_SUBTYPE_SH7763
+#if defined(CONFIG_CPU_SUBTYPE_SH7763)
+/* This CPU register maps is very difference by other SH4 CPU */
-#define SH7763_SKB_ALIGN 32
/* Chip Base Address */
# define SH_TSU_ADDR 0xFEE01800
-# define ARSTR SH_TSU_ADDR
+# define ARSTR SH_TSU_ADDR
/* Chip Registers */
/* E-DMAC */
@@ -143,8 +143,60 @@
# define FWNLCR1 0xB0
# define FWALCR1 0x40
-#else /* CONFIG_CPU_SUBTYPE_SH7763 */
-# define RX_OFFSET 2 /* skb offset */
+#elif defined(CONFIG_CPU_SH4) /* #if defined(CONFIG_CPU_SUBTYPE_SH7763) */
+/* EtherC */
+#define ECMR 0x100
+#define RFLR 0x108
+#define ECSR 0x110
+#define ECSIPR 0x118
+#define PIR 0x120
+#define PSR 0x128
+#define RDMLR 0x140
+#define IPGR 0x150
+#define APR 0x154
+#define MPR 0x158
+#define TPAUSER 0x164
+#define RFCF 0x160
+#define TPAUSECR 0x168
+#define BCFRR 0x16c
+#define MAHR 0x1c0
+#define MALR 0x1c8
+#define TROCR 0x1d0
+#define CDCR 0x1d4
+#define LCCR 0x1d8
+#define CNDCR 0x1dc
+#define CEFCR 0x1e4
+#define FRECR 0x1e8
+#define TSFRCR 0x1ec
+#define TLFRCR 0x1f0
+#define RFCR 0x1f4
+#define MAFCR 0x1f8
+#define RTRATE 0x1fc
+
+/* E-DMAC */
+#define EDMR 0x000
+#define EDTRR 0x008
+#define EDRRR 0x010
+#define TDLAR 0x018
+#define RDLAR 0x020
+#define EESR 0x028
+#define EESIPR 0x030
+#define TRSCER 0x038
+#define RMFCR 0x040
+#define TFTR 0x048
+#define FDR 0x050
+#define RMCR 0x058
+#define TFUCR 0x064
+#define RFOCR 0x068
+#define FCFTR 0x070
+#define RPADIR 0x078
+#define TRIMD 0x07c
+#define RBWAR 0x0c8
+#define RDFAR 0x0cc
+#define TBRAR 0x0d4
+#define TDFAR 0x0d8
+#else /* #elif defined(CONFIG_CPU_SH4) */
+/* This section is SH3 or SH2 */
#ifndef CONFIG_CPU_SUBTYPE_SH7619
/* Chip base address */
# define SH_TSU_ADDR 0xA7000804
@@ -243,6 +295,30 @@
#endif /* CONFIG_CPU_SUBTYPE_SH7763 */
+/* There are avoid compile error... */
+#if !defined(BCULR)
+#define BCULR 0x0fc
+#endif
+#if !defined(TRIMD)
+#define TRIMD 0x0fc
+#endif
+#if !defined(APR)
+#define APR 0x0fc
+#endif
+#if !defined(MPR)
+#define MPR 0x0fc
+#endif
+#if !defined(TPAUSER)
+#define TPAUSER 0x0fc
+#endif
+
+/* Driver's parameters */
+#if defined(CONFIG_CPU_SH4)
+#define SH4_SKB_RX_ALIGN 32
+#else
+#define SH2_SH3_SKB_RX_ALIGN 2
+#endif
+
/*
* Register's bits
*/
@@ -261,11 +337,10 @@ enum GECMR_BIT {
/* EDMR */
enum DMAC_M_BIT {
+ EDMR_EL = 0x40, /* Litte endian */
EDMR_DL1 = 0x20, EDMR_DL0 = 0x10,
#ifdef CONFIG_CPU_SUBTYPE_SH7763
- EDMR_SRST = 0x03,
- EMDR_DESC_R = 0x30, /* Descriptor reserve size */
- EDMR_EL = 0x40, /* Litte endian */
+ EDMR_SRST = 0x03,
#else /* CONFIG_CPU_SUBTYPE_SH7763 */
EDMR_SRST = 0x01,
#endif
@@ -307,47 +382,43 @@ enum PHY_STATUS_BIT { PHY_ST_LINK = 0x01, };
/* EESR */
enum EESR_BIT {
-#ifndef CONFIG_CPU_SUBTYPE_SH7763
- EESR_TWB = 0x40000000,
-#else
- EESR_TWB = 0xC0000000,
- EESR_TC1 = 0x20000000,
- EESR_TUC = 0x10000000,
- EESR_ROC = 0x80000000,
-#endif
- EESR_TABT = 0x04000000,
- EESR_RABT = 0x02000000, EESR_RFRMER = 0x01000000,
-#ifndef CONFIG_CPU_SUBTYPE_SH7763
- EESR_ADE = 0x00800000,
-#endif
- EESR_ECI = 0x00400000,
- EESR_FTC = 0x00200000, EESR_TDE = 0x00100000,
- EESR_TFE = 0x00080000, EESR_FRC = 0x00040000,
- EESR_RDE = 0x00020000, EESR_RFE = 0x00010000,
-#ifndef CONFIG_CPU_SUBTYPE_SH7763
- EESR_CND = 0x00000800,
-#endif
- EESR_DLC = 0x00000400,
- EESR_CD = 0x00000200, EESR_RTO = 0x00000100,
- EESR_RMAF = 0x00000080, EESR_CEEF = 0x00000040,
- EESR_CELF = 0x00000020, EESR_RRF = 0x00000010,
- EESR_RTLF = 0x00000008, EESR_RTSF = 0x00000004,
- EESR_PRE = 0x00000002, EESR_CERF = 0x00000001,
-};
-
-
-#ifdef CONFIG_CPU_SUBTYPE_SH7763
-# define TX_CHECK (EESR_TC1 | EESR_FTC)
-# define EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE \
- | EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI)
-# define TX_ERROR_CEHCK (EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE)
-
-#else
-# define TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO)
-# define EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE \
- | EESR_RFRMER | EESR_ADE | EESR_TFE | EESR_TDE | EESR_ECI)
-# define TX_ERROR_CEHCK (EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE)
-#endif
+ EESR_TWB1 = 0x80000000,
+ EESR_TWB = 0x40000000, /* same as TWB0 */
+ EESR_TC1 = 0x20000000,
+ EESR_TUC = 0x10000000,
+ EESR_ROC = 0x08000000,
+ EESR_TABT = 0x04000000,
+ EESR_RABT = 0x02000000,
+ EESR_RFRMER = 0x01000000, /* same as RFCOF */
+ EESR_ADE = 0x00800000,
+ EESR_ECI = 0x00400000,
+ EESR_FTC = 0x00200000, /* same as TC or TC0 */
+ EESR_TDE = 0x00100000,
+ EESR_TFE = 0x00080000, /* same as TFUF */
+ EESR_FRC = 0x00040000, /* same as FR */
+ EESR_RDE = 0x00020000,
+ EESR_RFE = 0x00010000,
+ EESR_CND = 0x00000800,
+ EESR_DLC = 0x00000400,
+ EESR_CD = 0x00000200,
+ EESR_RTO = 0x00000100,
+ EESR_RMAF = 0x00000080,
+ EESR_CEEF = 0x00000040,
+ EESR_CELF = 0x00000020,
+ EESR_RRF = 0x00000010,
+ EESR_RTLF = 0x00000008,
+ EESR_RTSF = 0x00000004,
+ EESR_PRE = 0x00000002,
+ EESR_CERF = 0x00000001,
+};
+
+#define DEFAULT_TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | \
+ EESR_RTO)
+#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | \
+ EESR_RDE | EESR_RFRMER | EESR_ADE | \
+ EESR_TFE | EESR_TDE | EESR_ECI)
+#define DEFAULT_TX_ERROR_CHECK (EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | \
+ EESR_TFE)
/* EESIPR */
enum DMAC_IM_BIT {
@@ -386,12 +457,8 @@ enum FCFTR_BIT {
FCFTR_RFF0 = 0x00010000, FCFTR_RFD2 = 0x00000004,
FCFTR_RFD1 = 0x00000002, FCFTR_RFD0 = 0x00000001,
};
-#define FIFO_F_D_RFF (FCFTR_RFF2|FCFTR_RFF1|FCFTR_RFF0)
-#ifndef CONFIG_CPU_SUBTYPE_SH7619
-#define FIFO_F_D_RFD (FCFTR_RFD2|FCFTR_RFD1|FCFTR_RFD0)
-#else
-#define FIFO_F_D_RFD (FCFTR_RFD0)
-#endif
+#define DEFAULT_FIFO_F_D_RFF (FCFTR_RFF2 | FCFTR_RFF1 | FCFTR_RFF0)
+#define DEFAULT_FIFO_F_D_RFD (FCFTR_RFD2 | FCFTR_RFD1 | FCFTR_RFD0)
/* Transfer descriptor bit */
enum TD_STS_BIT {
@@ -404,60 +471,38 @@ enum TD_STS_BIT {
#define TD_TFP (TD_TFP1|TD_TFP0)
/* RMCR */
-enum RECV_RST_BIT { RMCR_RST = 0x01, };
+#define DEFAULT_RMCR_VALUE 0x00000000
+
/* ECMR */
enum FELIC_MODE_BIT {
-#ifdef CONFIG_CPU_SUBTYPE_SH7763
ECMR_TRCCM = 0x04000000, ECMR_RCSC = 0x00800000,
ECMR_DPAD = 0x00200000, ECMR_RZPF = 0x00100000,
-#endif
ECMR_ZPF = 0x00080000, ECMR_PFR = 0x00040000, ECMR_RXF = 0x00020000,
ECMR_TXF = 0x00010000, ECMR_MCT = 0x00002000, ECMR_PRCEF = 0x00001000,
ECMR_PMDE = 0x00000200, ECMR_RE = 0x00000040, ECMR_TE = 0x00000020,
- ECMR_ILB = 0x00000008, ECMR_ELB = 0x00000004, ECMR_DM = 0x00000002,
- ECMR_PRM = 0x00000001,
+ ECMR_RTM = 0x00000010, ECMR_ILB = 0x00000008, ECMR_ELB = 0x00000004,
+ ECMR_DM = 0x00000002, ECMR_PRM = 0x00000001,
};
-#ifdef CONFIG_CPU_SUBTYPE_SH7763
-#define ECMR_CHG_DM (ECMR_TRCCM | ECMR_RZPF | ECMR_ZPF |\
- ECMR_PFR | ECMR_RXF | ECMR_TXF | ECMR_MCT)
-#elif CONFIG_CPU_SUBTYPE_SH7619
-#define ECMR_CHG_DM (ECMR_ZPF | ECMR_PFR | ECMR_RXF | ECMR_TXF)
-#else
-#define ECMR_CHG_DM (ECMR_ZPF | ECMR_PFR | ECMR_RXF | ECMR_TXF | ECMR_MCT)
-#endif
-
/* ECSR */
enum ECSR_STATUS_BIT {
-#ifndef CONFIG_CPU_SUBTYPE_SH7763
ECSR_BRCRX = 0x20, ECSR_PSRTO = 0x10,
-#endif
ECSR_LCHNG = 0x04,
ECSR_MPD = 0x02, ECSR_ICD = 0x01,
};
-#ifdef CONFIG_CPU_SUBTYPE_SH7763
-# define ECSR_INIT (ECSR_ICD | ECSIPR_MPDIP)
-#else
-# define ECSR_INIT (ECSR_BRCRX | ECSR_PSRTO | \
- ECSR_LCHNG | ECSR_ICD | ECSIPR_MPDIP)
-#endif
+#define DEFAULT_ECSR_INIT (ECSR_BRCRX | ECSR_PSRTO | ECSR_LCHNG | \
+ ECSR_ICD | ECSIPR_MPDIP)
/* ECSIPR */
enum ECSIPR_STATUS_MASK_BIT {
-#ifndef CONFIG_CPU_SUBTYPE_SH7763
ECSIPR_BRCRXIP = 0x20, ECSIPR_PSRTOIP = 0x10,
-#endif
ECSIPR_LCHNGIP = 0x04,
ECSIPR_MPDIP = 0x02, ECSIPR_ICDIP = 0x01,
};
-#ifdef CONFIG_CPU_SUBTYPE_SH7763
-# define ECSIPR_INIT (ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP)
-#else
-# define ECSIPR_INIT (ECSIPR_BRCRXIP | ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | \
- ECSIPR_ICDIP | ECSIPR_MPDIP)
-#endif
+#define DEFAULT_ECSIPR_INIT (ECSIPR_BRCRXIP | ECSIPR_PSRTOIP | \
+ ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP)
/* APR */
enum APR_BIT {
@@ -483,23 +528,12 @@ enum RPADIR_BIT {
RPADIR_PADR = 0x0003f,
};
-#if defined(CONFIG_CPU_SUBTYPE_SH7763)
-# define RPADIR_INIT (0x00)
-#else
-# define RPADIR_INIT (RPADIR_PADS1)
-#endif
-
/* RFLR */
#define RFLR_VALUE 0x1000
/* FDR */
-enum FIFO_SIZE_BIT {
-#ifndef CONFIG_CPU_SUBTYPE_SH7619
- FIFO_SIZE_T = 0x00000700, FIFO_SIZE_R = 0x00000007,
-#else
- FIFO_SIZE_T = 0x00000100, FIFO_SIZE_R = 0x00000001,
-#endif
-};
+#define DEFAULT_FDR_INIT 0x00000707
+
enum phy_offsets {
PHY_CTRL = 0, PHY_STAT = 1, PHY_IDT1 = 2, PHY_IDT2 = 3,
PHY_ANA = 4, PHY_ANL = 5, PHY_ANE = 6,
@@ -633,7 +667,43 @@ struct sh_eth_rxdesc {
u32 pad0; /* padding data */
} __attribute__((aligned(2), packed));
+/* This structure is used by each CPU dependency handling. */
+struct sh_eth_cpu_data {
+ /* optional functions */
+ void (*chip_reset)(struct net_device *ndev);
+ void (*set_duplex)(struct net_device *ndev);
+ void (*set_rate)(struct net_device *ndev);
+
+ /* mandatory initialize value */
+ unsigned long eesipr_value;
+
+ /* optional initialize value */
+ unsigned long ecsr_value;
+ unsigned long ecsipr_value;
+ unsigned long fdr_value;
+ unsigned long fcftr_value;
+ unsigned long rpadir_value;
+ unsigned long rmcr_value;
+
+ /* interrupt checking mask */
+ unsigned long tx_check;
+ unsigned long eesr_err_check;
+ unsigned long tx_error_check;
+
+ /* hardware features */
+ unsigned no_psr:1; /* EtherC DO NOT have PSR */
+ unsigned apr:1; /* EtherC have APR */
+ unsigned mpr:1; /* EtherC have MPR */
+ unsigned tpauser:1; /* EtherC have TPAUSER */
+ unsigned bculr:1; /* EtherC have BCULR */
+ unsigned hw_swap:1; /* E-DMAC have DE bit in EDMR */
+ unsigned rpadir:1; /* E-DMAC have RPADIR */
+ unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */
+ unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */
+};
+
struct sh_eth_private {
+ struct sh_eth_cpu_data *cd;
dma_addr_t rx_desc_dma;
dma_addr_t tx_desc_dma;
struct sh_eth_rxdesc *rx_ring;
@@ -661,11 +731,7 @@ struct sh_eth_private {
struct net_device_stats tsu_stats; /* TSU forward status */
};
-#ifdef CONFIG_CPU_SUBTYPE_SH7763
-/* SH7763 has endian control register */
-#define swaps(x, y)
-#else
-static void swaps(char *src, int len)
+static inline void sh_eth_soft_swap(char *src, int len)
{
#ifdef __LITTLE_ENDIAN__
u32 *p = (u32 *)src;
@@ -676,5 +742,5 @@ static void swaps(char *src, int len)
*p = swab32(*p);
#endif
}
-#endif /* CONFIG_CPU_SUBTYPE_SH7763 */
-#endif
+
+#endif /* #ifndef __SH_ETH_H__ */
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 55ccd51d247..e2247669a49 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -47,7 +47,7 @@
#define PHY_ID_ANY 0x1f
#define MII_REG_ANY 0x1f
-#define DRV_VERSION "1.2"
+#define DRV_VERSION "1.3"
#define DRV_NAME "sis190"
#define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
#define PFX DRV_NAME ": "
@@ -317,6 +317,7 @@ static struct mii_chip_info {
unsigned int type;
u32 feature;
} mii_chip_table[] = {
+ { "Atheros PHY", { 0x004d, 0xd010 }, LAN, 0 },
{ "Atheros PHY AR8012", { 0x004d, 0xd020 }, LAN, 0 },
{ "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
{ "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 },
@@ -347,7 +348,7 @@ static struct {
u32 msg_enable;
} debug = { -1 };
-MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver");
+MODULE_DESCRIPTION("SiS sis190/191 Gigabit Ethernet driver");
module_param(rx_copybreak, int, 0);
MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
module_param_named(debug, debug.msg_enable, int, 0);
@@ -539,8 +540,8 @@ static bool sis190_try_rx_copy(struct sis190_private *tp,
if (!skb)
goto out;
- pci_dma_sync_single_for_device(tp->pci_dev, addr, pkt_size,
- PCI_DMA_FROMDEVICE);
+ pci_dma_sync_single_for_cpu(tp->pci_dev, addr, tp->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
skb_reserve(skb, 2);
skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
*sk_buff = skb;
@@ -942,9 +943,9 @@ static void sis190_phy_task(struct work_struct *work)
u32 ctl;
const char *msg;
} reg31[] = {
- { LPA_1000XFULL | LPA_SLCT, 0x07000c00 | 0x00001000,
+ { LPA_1000FULL, 0x07000c00 | 0x00001000,
"1000 Mbps Full Duplex" },
- { LPA_1000XHALF | LPA_SLCT, 0x07000c00,
+ { LPA_1000HALF, 0x07000c00,
"1000 Mbps Half Duplex" },
{ LPA_100FULL, 0x04000800 | 0x00001000,
"100 Mbps Full Duplex" },
@@ -955,22 +956,35 @@ static void sis190_phy_task(struct work_struct *work)
{ LPA_10HALF, 0x04000400,
"10 Mbps Half Duplex" },
{ 0, 0x04000400, "unknown" }
- }, *p;
- u16 adv;
+ }, *p = NULL;
+ u16 adv, autoexp, gigadv, gigrec;
val = mdio_read(ioaddr, phy_id, 0x1f);
net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
val = mdio_read(ioaddr, phy_id, MII_LPA);
adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
- net_link(tp, KERN_INFO "%s: mii lpa = %04x adv = %04x.\n",
- dev->name, val, adv);
-
- val &= adv;
+ autoexp = mdio_read(ioaddr, phy_id, MII_EXPANSION);
+ net_link(tp, KERN_INFO "%s: mii lpa=%04x adv=%04x exp=%04x.\n",
+ dev->name, val, adv, autoexp);
+
+ if (val & LPA_NPAGE && autoexp & EXPANSION_NWAY) {
+ /* check for gigabit speed */
+ gigadv = mdio_read(ioaddr, phy_id, MII_CTRL1000);
+ gigrec = mdio_read(ioaddr, phy_id, MII_STAT1000);
+ val = (gigadv & (gigrec >> 2));
+ if (val & ADVERTISE_1000FULL)
+ p = reg31;
+ else if (val & ADVERTISE_1000HALF)
+ p = reg31 + 1;
+ }
+ if (!p) {
+ val &= adv;
- for (p = reg31; p->val; p++) {
- if ((val & p->val) == p->val)
- break;
+ for (p = reg31; p->val; p++) {
+ if ((val & p->val) == p->val)
+ break;
+ }
}
p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;
@@ -1204,8 +1218,6 @@ static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
- dev->trans_start = jiffies;
-
dirty_tx = tp->dirty_tx;
if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
netif_stop_queue(dev);
@@ -1315,12 +1327,15 @@ static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
LAN : HOME) : p->type;
tp->features |= p->feature;
- } else
+ net_probe(tp, KERN_INFO "%s: %s transceiver at address %d.\n",
+ pci_name(tp->pci_dev), p->name, phy_id);
+ } else {
phy->type = UNKNOWN;
-
- net_probe(tp, KERN_INFO "%s: %s transceiver at address %d.\n",
- pci_name(tp->pci_dev),
- (phy->type == UNKNOWN) ? "Unknown PHY" : p->name, phy_id);
+ net_probe(tp, KERN_INFO
+ "%s: unknown PHY 0x%x:0x%x transceiver at address %d\n",
+ pci_name(tp->pci_dev),
+ phy->id[0], (phy->id[1] & 0xfff0), phy_id);
+ }
}
static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 2d4617b3e20..a9a897bb42d 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -1584,7 +1584,7 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
/* Don't transmit data before the complete of auto-negotiation */
if(!sis_priv->autong_complete){
netif_stop_queue(net_dev);
- return 1;
+ return NETDEV_TX_BUSY;
}
spin_lock_irqsave(&sis_priv->lock, flags);
diff --git a/drivers/net/skfp/h/smt.h b/drivers/net/skfp/h/smt.h
index 1ff589988d1..2976757a36f 100644
--- a/drivers/net/skfp/h/smt.h
+++ b/drivers/net/skfp/h/smt.h
@@ -413,7 +413,7 @@ struct smt_p_reason {
#define SMT_RDF_SUCCESS 0x00000003 /* success (PMF) */
#define SMT_RDF_BADSET 0x00000004 /* bad set count (PMF) */
#define SMT_RDF_ILLEGAL 0x00000005 /* read only (PMF) */
-#define SMT_RDF_NOPARAM 0x6 /* paramter not supported (PMF) */
+#define SMT_RDF_NOPARAM 0x6 /* parameter not supported (PMF) */
#define SMT_RDF_RANGE 0x8 /* out of range */
#define SMT_RDF_AUTHOR 0x9 /* not autohorized */
#define SMT_RDF_LENGTH 0x0a /* length error */
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index e14aec0a733..088fe26484e 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -159,12 +159,6 @@ MODULE_AUTHOR("Mirko Lindner <mlindner@syskonnect.de>");
static int num_boards; /* total number of adapters configured */
-#ifdef DRIVERDEBUG
-#define PRINTK(s, args...) printk(s, ## args)
-#else
-#define PRINTK(s, args...)
-#endif // DRIVERDEBUG
-
static const struct net_device_ops skfp_netdev_ops = {
.ndo_open = skfp_open,
.ndo_stop = skfp_close,
@@ -213,7 +207,7 @@ static int skfp_init_one(struct pci_dev *pdev,
void __iomem *mem;
int err;
- PRINTK(KERN_INFO "entering skfp_init_one\n");
+ pr_debug(KERN_INFO "entering skfp_init_one\n");
if (num_boards == 0)
printk("%s\n", boot_msg);
@@ -389,7 +383,7 @@ static int skfp_driver_init(struct net_device *dev)
skfddi_priv *bp = &smc->os;
int err = -EIO;
- PRINTK(KERN_INFO "entering skfp_driver_init\n");
+ pr_debug(KERN_INFO "entering skfp_driver_init\n");
// set the io address in private structures
bp->base_addr = dev->base_addr;
@@ -409,7 +403,7 @@ static int skfp_driver_init(struct net_device *dev)
// Determine the required size of the 'shared' memory area.
bp->SharedMemSize = mac_drv_check_space();
- PRINTK(KERN_INFO "Memory for HWM: %ld\n", bp->SharedMemSize);
+ pr_debug(KERN_INFO "Memory for HWM: %ld\n", bp->SharedMemSize);
if (bp->SharedMemSize > 0) {
bp->SharedMemSize += 16; // for descriptor alignment
@@ -433,13 +427,13 @@ static int skfp_driver_init(struct net_device *dev)
card_stop(smc); // Reset adapter.
- PRINTK(KERN_INFO "mac_drv_init()..\n");
+ pr_debug(KERN_INFO "mac_drv_init()..\n");
if (mac_drv_init(smc) != 0) {
- PRINTK(KERN_INFO "mac_drv_init() failed.\n");
+ pr_debug(KERN_INFO "mac_drv_init() failed.\n");
goto fail;
}
read_address(smc, NULL);
- PRINTK(KERN_INFO "HW-Addr: %02x %02x %02x %02x %02x %02x\n",
+ pr_debug(KERN_INFO "HW-Addr: %02x %02x %02x %02x %02x %02x\n",
smc->hw.fddi_canon_addr.a[0],
smc->hw.fddi_canon_addr.a[1],
smc->hw.fddi_canon_addr.a[2],
@@ -495,7 +489,7 @@ static int skfp_open(struct net_device *dev)
struct s_smc *smc = netdev_priv(dev);
int err;
- PRINTK(KERN_INFO "entering skfp_open\n");
+ pr_debug(KERN_INFO "entering skfp_open\n");
/* Register IRQ - support shared interrupts by passing device ptr */
err = request_irq(dev->irq, skfp_interrupt, IRQF_SHARED,
dev->name, dev);
@@ -868,12 +862,12 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
/* Enable promiscuous mode, if necessary */
if (dev->flags & IFF_PROMISC) {
mac_drv_rx_mode(smc, RX_ENABLE_PROMISC);
- PRINTK(KERN_INFO "PROMISCUOUS MODE ENABLED\n");
+ pr_debug(KERN_INFO "PROMISCUOUS MODE ENABLED\n");
}
/* Else, update multicast address table */
else {
mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
- PRINTK(KERN_INFO "PROMISCUOUS MODE DISABLED\n");
+ pr_debug(KERN_INFO "PROMISCUOUS MODE DISABLED\n");
// Reset all MC addresses
mac_clear_multicast(smc);
@@ -881,7 +875,7 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI) {
mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
- PRINTK(KERN_INFO "ENABLE ALL MC ADDRESSES\n");
+ pr_debug(KERN_INFO "ENABLE ALL MC ADDRESSES\n");
} else if (dev->mc_count > 0) {
if (dev->mc_count <= FPMAX_MULTICAST) {
/* use exact filtering */
@@ -894,12 +888,12 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
(struct fddi_addr *)dmi->dmi_addr,
1);
- PRINTK(KERN_INFO "ENABLE MC ADDRESS:");
- PRINTK(" %02x %02x %02x ",
+ pr_debug(KERN_INFO "ENABLE MC ADDRESS:");
+ pr_debug(" %02x %02x %02x ",
dmi->dmi_addr[0],
dmi->dmi_addr[1],
dmi->dmi_addr[2]);
- PRINTK("%02x %02x %02x\n",
+ pr_debug("%02x %02x %02x\n",
dmi->dmi_addr[3],
dmi->dmi_addr[4],
dmi->dmi_addr[5]);
@@ -909,11 +903,11 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
} else { // more MC addresses than HW supports
mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
- PRINTK(KERN_INFO "ENABLE ALL MC ADDRESSES\n");
+ pr_debug(KERN_INFO "ENABLE ALL MC ADDRESSES\n");
}
} else { // no MC addresses
- PRINTK(KERN_INFO "DISABLE ALL MC ADDRESSES\n");
+ pr_debug(KERN_INFO "DISABLE ALL MC ADDRESSES\n");
}
/* Update adapter filters */
@@ -1067,7 +1061,7 @@ static int skfp_send_pkt(struct sk_buff *skb, struct net_device *dev)
struct s_smc *smc = netdev_priv(dev);
skfddi_priv *bp = &smc->os;
- PRINTK(KERN_INFO "skfp_send_pkt\n");
+ pr_debug(KERN_INFO "skfp_send_pkt\n");
/*
* Verify that incoming transmit request is OK
@@ -1088,7 +1082,7 @@ static int skfp_send_pkt(struct sk_buff *skb, struct net_device *dev)
if (bp->QueueSkb == 0) { // return with tbusy set: queue full
netif_stop_queue(dev);
- return 1;
+ return NETDEV_TX_BUSY;
}
bp->QueueSkb--;
skb_queue_tail(&bp->SendSkbQueue, skb);
@@ -1137,13 +1131,13 @@ static void send_queued_packets(struct s_smc *smc)
int frame_status; // HWM tx frame status.
- PRINTK(KERN_INFO "send queued packets\n");
+ pr_debug(KERN_INFO "send queued packets\n");
for (;;) {
// send first buffer from queue
skb = skb_dequeue(&bp->SendSkbQueue);
if (!skb) {
- PRINTK(KERN_INFO "queue empty\n");
+ pr_debug(KERN_INFO "queue empty\n");
return;
} // queue empty !
@@ -1174,11 +1168,11 @@ static void send_queued_packets(struct s_smc *smc)
if ((frame_status & RING_DOWN) != 0) {
// Ring is down.
- PRINTK("Tx attempt while ring down.\n");
+ pr_debug("Tx attempt while ring down.\n");
} else if ((frame_status & OUT_OF_TXD) != 0) {
- PRINTK("%s: out of TXDs.\n", bp->dev->name);
+ pr_debug("%s: out of TXDs.\n", bp->dev->name);
} else {
- PRINTK("%s: out of transmit resources",
+ pr_debug("%s: out of transmit resources",
bp->dev->name);
}
@@ -1255,7 +1249,7 @@ static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr)
static void ResetAdapter(struct s_smc *smc)
{
- PRINTK(KERN_INFO "[fddi: ResetAdapter]\n");
+ pr_debug(KERN_INFO "[fddi: ResetAdapter]\n");
// Stop the adapter.
@@ -1301,7 +1295,7 @@ void llc_restart_tx(struct s_smc *smc)
{
skfddi_priv *bp = &smc->os;
- PRINTK(KERN_INFO "[llc_restart_tx]\n");
+ pr_debug(KERN_INFO "[llc_restart_tx]\n");
// Try to send queued packets
spin_unlock(&bp->DriverLock);
@@ -1331,7 +1325,7 @@ void *mac_drv_get_space(struct s_smc *smc, unsigned int size)
{
void *virt;
- PRINTK(KERN_INFO "mac_drv_get_space (%d bytes), ", size);
+ pr_debug(KERN_INFO "mac_drv_get_space (%d bytes), ", size);
virt = (void *) (smc->os.SharedMemAddr + smc->os.SharedMemHeap);
if ((smc->os.SharedMemHeap + size) > smc->os.SharedMemSize) {
@@ -1340,9 +1334,9 @@ void *mac_drv_get_space(struct s_smc *smc, unsigned int size)
}
smc->os.SharedMemHeap += size; // Move heap pointer.
- PRINTK(KERN_INFO "mac_drv_get_space end\n");
- PRINTK(KERN_INFO "virt addr: %lx\n", (ulong) virt);
- PRINTK(KERN_INFO "bus addr: %lx\n", (ulong)
+ pr_debug(KERN_INFO "mac_drv_get_space end\n");
+ pr_debug(KERN_INFO "virt addr: %lx\n", (ulong) virt);
+ pr_debug(KERN_INFO "bus addr: %lx\n", (ulong)
(smc->os.SharedMemDMA +
((char *) virt - (char *)smc->os.SharedMemAddr)));
return (virt);
@@ -1372,7 +1366,7 @@ void *mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size)
char *virt;
- PRINTK(KERN_INFO "mac_drv_get_desc_mem\n");
+ pr_debug(KERN_INFO "mac_drv_get_desc_mem\n");
// Descriptor memory must be aligned on 16-byte boundary.
@@ -1381,8 +1375,8 @@ void *mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size)
size = (u_int) (16 - (((unsigned long) virt) & 15UL));
size = size % 16;
- PRINTK("Allocate %u bytes alignment gap ", size);
- PRINTK("for descriptor memory.\n");
+ pr_debug("Allocate %u bytes alignment gap ", size);
+ pr_debug("for descriptor memory.\n");
if (!mac_drv_get_space(smc, size)) {
printk("fddi: Unable to align descriptor memory.\n");
@@ -1516,11 +1510,11 @@ void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd)
{
struct sk_buff *skb;
- PRINTK(KERN_INFO "entering mac_drv_tx_complete\n");
+ pr_debug(KERN_INFO "entering mac_drv_tx_complete\n");
// Check if this TxD points to a skb
if (!(skb = txd->txd_os.skb)) {
- PRINTK("TXD with no skb assigned.\n");
+ pr_debug("TXD with no skb assigned.\n");
return;
}
txd->txd_os.skb = NULL;
@@ -1536,7 +1530,7 @@ void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd)
// free the skb
dev_kfree_skb_irq(skb);
- PRINTK(KERN_INFO "leaving mac_drv_tx_complete\n");
+ pr_debug(KERN_INFO "leaving mac_drv_tx_complete\n");
} // mac_drv_tx_complete
@@ -1603,7 +1597,7 @@ void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
unsigned short ri;
u_int RifLength;
- PRINTK(KERN_INFO "entering mac_drv_rx_complete (len=%d)\n", len);
+ pr_debug(KERN_INFO "entering mac_drv_rx_complete (len=%d)\n", len);
if (frag_count != 1) { // This is not allowed to happen.
printk("fddi: Multi-fragment receive!\n");
@@ -1612,7 +1606,7 @@ void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
}
skb = rxd->rxd_os.skb;
if (!skb) {
- PRINTK(KERN_INFO "No skb in rxd\n");
+ pr_debug(KERN_INFO "No skb in rxd\n");
smc->os.MacStat.gen.rx_errors++;
goto RequeueRxd;
}
@@ -1642,7 +1636,7 @@ void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
else {
int n;
// goos: RIF removal has still to be tested
- PRINTK(KERN_INFO "RIF found\n");
+ pr_debug(KERN_INFO "RIF found\n");
// Get RIF length from Routing Control (RC) field.
cp = virt + FDDI_MAC_HDR_LEN; // Point behind MAC header.
@@ -1687,7 +1681,7 @@ void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
return;
RequeueRxd:
- PRINTK(KERN_INFO "Rx: re-queue RXD.\n");
+ pr_debug(KERN_INFO "Rx: re-queue RXD.\n");
mac_drv_requeue_rxd(smc, rxd, frag_count);
smc->os.MacStat.gen.rx_errors++; // Count receive packets
// not indicated.
@@ -1736,7 +1730,7 @@ void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
skb = src_rxd->rxd_os.skb;
if (skb == NULL) { // this should not happen
- PRINTK("Requeue with no skb in rxd!\n");
+ pr_debug("Requeue with no skb in rxd!\n");
skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
if (skb) {
// we got a skb
@@ -1751,7 +1745,7 @@ void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
rxd->rxd_os.dma_addr = b_addr;
} else {
// no skb available, use local buffer
- PRINTK("Queueing invalid buffer!\n");
+ pr_debug("Queueing invalid buffer!\n");
rxd->rxd_os.skb = NULL;
v_addr = smc->os.LocalRxBuffer;
b_addr = smc->os.LocalRxBufferDMA;
@@ -1798,7 +1792,7 @@ void mac_drv_fill_rxd(struct s_smc *smc)
struct sk_buff *skb;
volatile struct s_smt_fp_rxd *rxd;
- PRINTK(KERN_INFO "entering mac_drv_fill_rxd\n");
+ pr_debug(KERN_INFO "entering mac_drv_fill_rxd\n");
// Walk through the list of free receive buffers, passing receive
// buffers to the HWM as long as RXDs are available.
@@ -1806,7 +1800,7 @@ void mac_drv_fill_rxd(struct s_smc *smc)
MaxFrameSize = smc->os.MaxFrameSize;
// Check if there is any RXD left.
while (HWM_GET_RX_FREE(smc) > 0) {
- PRINTK(KERN_INFO ".\n");
+ pr_debug(KERN_INFO ".\n");
rxd = HWM_GET_CURR_RXD(smc);
skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
@@ -1826,7 +1820,7 @@ void mac_drv_fill_rxd(struct s_smc *smc)
// keep the receiver running in hope of better times.
// Multiple descriptors may point to this local buffer,
// so data in it must be considered invalid.
- PRINTK("Queueing invalid buffer!\n");
+ pr_debug("Queueing invalid buffer!\n");
v_addr = smc->os.LocalRxBuffer;
b_addr = smc->os.LocalRxBufferDMA;
}
@@ -1837,7 +1831,7 @@ void mac_drv_fill_rxd(struct s_smc *smc)
hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize,
FIRST_FRAG | LAST_FRAG);
}
- PRINTK(KERN_INFO "leaving mac_drv_fill_rxd\n");
+ pr_debug(KERN_INFO "leaving mac_drv_fill_rxd\n");
} // mac_drv_fill_rxd
@@ -1863,7 +1857,7 @@ void mac_drv_clear_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
struct sk_buff *skb;
- PRINTK("entering mac_drv_clear_rxd\n");
+ pr_debug("entering mac_drv_clear_rxd\n");
if (frag_count != 1) // This is not allowed to happen.
@@ -1919,19 +1913,19 @@ int mac_drv_rx_init(struct s_smc *smc, int len, int fc,
{
struct sk_buff *skb;
- PRINTK("entering mac_drv_rx_init(len=%d)\n", len);
+ pr_debug("entering mac_drv_rx_init(len=%d)\n", len);
// "Received" a SMT or NSA frame of the local SMT.
if (len != la_len || len < FDDI_MAC_HDR_LEN || !look_ahead) {
- PRINTK("fddi: Discard invalid local SMT frame\n");
- PRINTK(" len=%d, la_len=%d, (ULONG) look_ahead=%08lXh.\n",
+ pr_debug("fddi: Discard invalid local SMT frame\n");
+ pr_debug(" len=%d, la_len=%d, (ULONG) look_ahead=%08lXh.\n",
len, la_len, (unsigned long) look_ahead);
return (0);
}
skb = alloc_skb(len + 3, GFP_ATOMIC);
if (!skb) {
- PRINTK("fddi: Local SMT: skb memory exhausted.\n");
+ pr_debug("fddi: Local SMT: skb memory exhausted.\n");
return (0);
}
skb_reserve(skb, 3);
@@ -1981,40 +1975,40 @@ void smt_timer_poll(struct s_smc *smc)
************************/
void ring_status_indication(struct s_smc *smc, u_long status)
{
- PRINTK("ring_status_indication( ");
+ pr_debug("ring_status_indication( ");
if (status & RS_RES15)
- PRINTK("RS_RES15 ");
+ pr_debug("RS_RES15 ");
if (status & RS_HARDERROR)
- PRINTK("RS_HARDERROR ");
+ pr_debug("RS_HARDERROR ");
if (status & RS_SOFTERROR)
- PRINTK("RS_SOFTERROR ");
+ pr_debug("RS_SOFTERROR ");
if (status & RS_BEACON)
- PRINTK("RS_BEACON ");
+ pr_debug("RS_BEACON ");
if (status & RS_PATHTEST)
- PRINTK("RS_PATHTEST ");
+ pr_debug("RS_PATHTEST ");
if (status & RS_SELFTEST)
- PRINTK("RS_SELFTEST ");
+ pr_debug("RS_SELFTEST ");
if (status & RS_RES9)
- PRINTK("RS_RES9 ");
+ pr_debug("RS_RES9 ");
if (status & RS_DISCONNECT)
- PRINTK("RS_DISCONNECT ");
+ pr_debug("RS_DISCONNECT ");
if (status & RS_RES7)
- PRINTK("RS_RES7 ");
+ pr_debug("RS_RES7 ");
if (status & RS_DUPADDR)
- PRINTK("RS_DUPADDR ");
+ pr_debug("RS_DUPADDR ");
if (status & RS_NORINGOP)
- PRINTK("RS_NORINGOP ");
+ pr_debug("RS_NORINGOP ");
if (status & RS_VERSION)
- PRINTK("RS_VERSION ");
+ pr_debug("RS_VERSION ");
if (status & RS_STUCKBYPASSS)
- PRINTK("RS_STUCKBYPASSS ");
+ pr_debug("RS_STUCKBYPASSS ");
if (status & RS_EVENT)
- PRINTK("RS_EVENT ");
+ pr_debug("RS_EVENT ");
if (status & RS_RINGOPCHANGE)
- PRINTK("RS_RINGOPCHANGE ");
+ pr_debug("RS_RINGOPCHANGE ");
if (status & RS_RES0)
- PRINTK("RS_RES0 ");
- PRINTK("]\n");
+ pr_debug("RS_RES0 ");
+ pr_debug("]\n");
} // ring_status_indication
@@ -2057,17 +2051,17 @@ void smt_stat_counter(struct s_smc *smc, int stat)
{
// BOOLEAN RingIsUp ;
- PRINTK(KERN_INFO "smt_stat_counter\n");
+ pr_debug(KERN_INFO "smt_stat_counter\n");
switch (stat) {
case 0:
- PRINTK(KERN_INFO "Ring operational change.\n");
+ pr_debug(KERN_INFO "Ring operational change.\n");
break;
case 1:
- PRINTK(KERN_INFO "Receive fifo overflow.\n");
+ pr_debug(KERN_INFO "Receive fifo overflow.\n");
smc->os.MacStat.gen.rx_errors++;
break;
default:
- PRINTK(KERN_INFO "Unknown status (%d).\n", stat);
+ pr_debug(KERN_INFO "Unknown status (%d).\n", stat);
break;
}
} // smt_stat_counter
@@ -2123,10 +2117,10 @@ void cfm_state_change(struct s_smc *smc, int c_state)
s = "SC11_C_WRAP_S";
break;
default:
- PRINTK(KERN_INFO "cfm_state_change: unknown %d\n", c_state);
+ pr_debug(KERN_INFO "cfm_state_change: unknown %d\n", c_state);
return;
}
- PRINTK(KERN_INFO "cfm_state_change: %s\n", s);
+ pr_debug(KERN_INFO "cfm_state_change: %s\n", s);
#endif // DRIVERDEBUG
} // cfm_state_change
@@ -2181,7 +2175,7 @@ void ecm_state_change(struct s_smc *smc, int e_state)
s = "unknown";
break;
}
- PRINTK(KERN_INFO "ecm_state_change: %s\n", s);
+ pr_debug(KERN_INFO "ecm_state_change: %s\n", s);
#endif //DRIVERDEBUG
} // ecm_state_change
@@ -2236,7 +2230,7 @@ void rmt_state_change(struct s_smc *smc, int r_state)
s = "unknown";
break;
}
- PRINTK(KERN_INFO "[rmt_state_change: %s]\n", s);
+ pr_debug(KERN_INFO "[rmt_state_change: %s]\n", s);
#endif // DRIVERDEBUG
} // rmt_state_change
@@ -2256,7 +2250,7 @@ void rmt_state_change(struct s_smc *smc, int r_state)
************************/
void drv_reset_indication(struct s_smc *smc)
{
- PRINTK(KERN_INFO "entering drv_reset_indication\n");
+ pr_debug(KERN_INFO "entering drv_reset_indication\n");
smc->os.ResetRequested = TRUE; // Set flag.
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index c11cdd08ec5..60d502eef4f 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -2837,8 +2837,6 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
}
- dev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index a2ff9cb1e7a..6b5946fe8ae 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -1690,7 +1690,6 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
mapping_unwind:
diff --git a/drivers/net/smc-mca.c b/drivers/net/smc-mca.c
index 8d36d40649e..c791ef76c1d 100644
--- a/drivers/net/smc-mca.c
+++ b/drivers/net/smc-mca.c
@@ -370,7 +370,7 @@ static int __init ultramca_probe(struct device *gen_dev)
outb(reg4, ioaddr + 4);
- gen_dev->driver_data = dev;
+ dev_set_drvdata(gen_dev, dev);
/* The 8390 isn't at the base address, so fake the offset
*/
@@ -531,7 +531,7 @@ static int ultramca_close_card(struct net_device *dev)
static int ultramca_remove(struct device *gen_dev)
{
struct mca_device *mca_dev = to_mca_device(gen_dev);
- struct net_device *dev = (struct net_device *)gen_dev->driver_data;
+ struct net_device *dev = dev_get_drvdata(gen_dev);
if (dev) {
/* NB: ultra_close_card() does free_irq */
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index 293610334a7..bc4976ac871 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -1774,6 +1774,20 @@ static int __devinit smc911x_findirq(struct net_device *dev)
return probe_irq_off(cookie);
}
+static const struct net_device_ops smc911x_netdev_ops = {
+ .ndo_open = smc911x_open,
+ .ndo_stop = smc911x_close,
+ .ndo_start_xmit = smc911x_hard_start_xmit,
+ .ndo_tx_timeout = smc911x_timeout,
+ .ndo_set_multicast_list = smc911x_set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = smc911x_poll_controller,
+#endif
+};
+
/*
* Function: smc911x_probe(unsigned long ioaddr)
*
@@ -1940,16 +1954,9 @@ static int __devinit smc911x_probe(struct net_device *dev)
/* Fill in the fields of the device structure with ethernet values. */
ether_setup(dev);
- dev->open = smc911x_open;
- dev->stop = smc911x_close;
- dev->hard_start_xmit = smc911x_hard_start_xmit;
- dev->tx_timeout = smc911x_timeout;
+ dev->netdev_ops = &smc911x_netdev_ops;
dev->watchdog_timeo = msecs_to_jiffies(watchdog);
- dev->set_multicast_list = smc911x_set_multicast_list;
dev->ethtool_ops = &smc911x_ethtool_ops;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = smc911x_poll_controller;
-#endif
INIT_WORK(&lp->phy_configure, smc911x_phy_configure);
lp->mii.phy_id_mask = 0x1f;
diff --git a/drivers/net/smc9194.c b/drivers/net/smc9194.c
index 9a7973a5411..e02471b2f2b 100644
--- a/drivers/net/smc9194.c
+++ b/drivers/net/smc9194.c
@@ -503,7 +503,7 @@ static int smc_wait_to_send_packet( struct sk_buff * skb, struct net_device * de
/* THIS SHOULD NEVER HAPPEN. */
dev->stats.tx_aborted_errors++;
printk(CARDNAME": Bad Craziness - sent packet while busy.\n" );
- return 1;
+ return NETDEV_TX_BUSY;
}
lp->saved_skb = skb;
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 329f890e290..f1f773b17fe 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -45,7 +45,8 @@
defined(CONFIG_MACH_ZYLONITE) ||\
defined(CONFIG_MACH_LITTLETON) ||\
defined(CONFIG_MACH_ZYLONITE2) ||\
- defined(CONFIG_ARCH_VIPER)
+ defined(CONFIG_ARCH_VIPER) ||\
+ defined(CONFIG_MACH_STARGATE2)
#include <asm/mach-types.h>
@@ -73,7 +74,7 @@
/* We actually can't write halfwords properly if not word aligned */
static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
{
- if (machine_is_mainstone() && reg & 2) {
+ if ((machine_is_mainstone() || machine_is_stargate2()) && reg & 2) {
unsigned int v = val << 16;
v |= readl(ioaddr + (reg & ~2)) & 0xffff;
writel(v, ioaddr + (reg & ~2));
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index eb7db032a78..b60639bd181 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -47,6 +47,7 @@
#include <linux/bitops.h>
#include <linux/irq.h>
#include <linux/io.h>
+#include <linux/swab.h>
#include <linux/phy.h>
#include <linux/smsc911x.h>
#include "smsc911x.h"
@@ -175,6 +176,12 @@ static inline void
smsc911x_tx_writefifo(struct smsc911x_data *pdata, unsigned int *buf,
unsigned int wordcount)
{
+ if (pdata->config.flags & SMSC911X_SWAP_FIFO) {
+ while (wordcount--)
+ smsc911x_reg_write(pdata, TX_DATA_FIFO, swab32(*buf++));
+ return;
+ }
+
if (pdata->config.flags & SMSC911X_USE_32BIT) {
writesl(pdata->ioaddr + TX_DATA_FIFO, buf, wordcount);
return;
@@ -194,6 +201,12 @@ static inline void
smsc911x_rx_readfifo(struct smsc911x_data *pdata, unsigned int *buf,
unsigned int wordcount)
{
+ if (pdata->config.flags & SMSC911X_SWAP_FIFO) {
+ while (wordcount--)
+ *buf++ = swab32(smsc911x_reg_read(pdata, RX_DATA_FIFO));
+ return;
+ }
+
if (pdata->config.flags & SMSC911X_USE_32BIT) {
readsl(pdata->ioaddr + RX_DATA_FIFO, buf, wordcount);
return;
@@ -1963,7 +1976,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
retval = -ENODEV;
goto out_0;
}
- res_size = res->end - res->start;
+ res_size = res->end - res->start + 1;
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!irq_res) {
@@ -2096,12 +2109,58 @@ out_0:
return retval;
}
+#ifdef CONFIG_PM
+/* This implementation assumes the devices remains powered on its VDDVARIO
+ * pins during suspend. */
+
+static int smsc911x_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct smsc911x_data *pdata = netdev_priv(dev);
+
+ /* enable wake on LAN, energy detection and the external PME
+ * signal. */
+ smsc911x_reg_write(pdata, PMT_CTRL,
+ PMT_CTRL_PM_MODE_D1_ | PMT_CTRL_WOL_EN_ |
+ PMT_CTRL_ED_EN_ | PMT_CTRL_PME_EN_);
+
+ return 0;
+}
+
+static int smsc911x_resume(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ unsigned int to = 100;
+
+ /* Note 3.11 from the datasheet:
+ * "When the LAN9220 is in a power saving state, a write of any
+ * data to the BYTE_TEST register will wake-up the device."
+ */
+ smsc911x_reg_write(pdata, BYTE_TEST, 0);
+
+ /* poll the READY bit in PMT_CTRL. Any other access to the device is
+ * forbidden while this bit isn't set. Try for 100ms and return -EIO
+ * if it failed. */
+ while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to)
+ udelay(1000);
+
+ return (to == 0) ? -EIO : 0;
+}
+
+#else
+#define smsc911x_suspend NULL
+#define smsc911x_resume NULL
+#endif
+
static struct platform_driver smsc911x_driver = {
.probe = smsc911x_drv_probe,
- .remove = smsc911x_drv_remove,
+ .remove = __devexit_p(smsc911x_drv_remove),
.driver = {
.name = SMSC_CHIPNAME,
},
+ .suspend = smsc911x_suspend,
+ .resume = smsc911x_resume,
};
/* Entry point for loading the module */
diff --git a/drivers/net/sonic.c b/drivers/net/sonic.c
index 211e805c122..e4255d82938 100644
--- a/drivers/net/sonic.c
+++ b/drivers/net/sonic.c
@@ -223,7 +223,7 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
if (!laddr) {
printk(KERN_ERR "%s: failed to map tx DMA buffer.\n", dev->name);
dev_kfree_skb(skb);
- return 1;
+ return NETDEV_TX_BUSY
}
sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index fcb943fca4f..838cce8b8ff 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -1236,7 +1236,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
*/
if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) {
netif_stop_queue(dev);
- return 1;
+ return NETDEV_TX_BUSY;
}
#if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c
index a39c0b9ba8b..7bb27426dbd 100644
--- a/drivers/net/sun3_82586.c
+++ b/drivers/net/sun3_82586.c
@@ -1023,7 +1023,7 @@ static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
#if(NUM_XMIT_BUFFS > 1)
if(test_and_set_bit(0,(void *) &p->lock)) {
printk("%s: Queue was locked\n",dev->name);
- return 1;
+ return NETDEV_TX_BUSY;
}
else
#endif
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c
index e5beb299cbd..534dfe3eef6 100644
--- a/drivers/net/sun3lance.c
+++ b/drivers/net/sun3lance.c
@@ -294,6 +294,16 @@ out:
return ERR_PTR(err);
}
+static const struct net_device_ops lance_netdev_ops = {
+ .ndo_open = lance_open,
+ .ndo_stop = lance_close,
+ .ndo_start_xmit = lance_start_xmit,
+ .ndo_set_multicast_list = set_multicast_list,
+ .ndo_set_mac_address = NULL,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
static int __init lance_probe( struct net_device *dev)
{
unsigned long ioaddr;
@@ -397,12 +407,7 @@ static int __init lance_probe( struct net_device *dev)
if (did_version++ == 0)
printk( version );
- /* The LANCE-specific entries in the device structure. */
- dev->open = &lance_open;
- dev->hard_start_xmit = &lance_start_xmit;
- dev->stop = &lance_close;
- dev->set_multicast_list = &set_multicast_list;
- dev->set_mac_address = NULL;
+ dev->netdev_ops = &lance_netdev_ops;
// KLUDGE -- REMOVE ME
set_bit(__LINK_STATE_PRESENT, &dev->state);
@@ -521,7 +526,7 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
if (netif_queue_stopped(dev)) {
int tickssofar = jiffies - dev->trans_start;
if (tickssofar < 20)
- return( 1 );
+ return NETDEV_TX_BUSY;
DPRINTK( 1, ( "%s: transmit timed out, status %04x, resetting.\n",
dev->name, DREG ));
@@ -572,7 +577,7 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
if (test_and_set_bit( 0, (void*)&lp->lock ) != 0) {
printk( "%s: tx queue lock!.\n", dev->name);
/* don't clear dev->tbusy flag. */
- return 1;
+ return NETDEV_TX_BUSY;
}
AREG = CSR0;
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index c399b1955c1..545f81b34ad 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -369,7 +369,6 @@ struct netdev_private {
struct sk_buff* tx_skbuff[TX_RING_SIZE];
dma_addr_t tx_ring_dma;
dma_addr_t rx_ring_dma;
- struct net_device_stats stats;
struct timer_list timer; /* Media monitoring timer. */
/* Frequently used values: keep some adjacent for cache effect. */
spinlock_t lock;
@@ -975,7 +974,7 @@ static void tx_timeout(struct net_device *dev)
dev->if_port = 0;
dev->trans_start = jiffies;
- np->stats.tx_errors++;
+ dev->stats.tx_errors++;
if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
netif_wake_queue(dev);
}
@@ -1123,7 +1122,7 @@ reset_tx (struct net_device *dev)
else
dev_kfree_skb (skb);
np->tx_skbuff[i] = NULL;
- np->stats.tx_dropped++;
+ dev->stats.tx_dropped++;
}
}
np->cur_tx = np->dirty_tx = 0;
@@ -1181,15 +1180,15 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
if (netif_msg_tx_err(np))
printk("%s: Transmit error status %4.4x.\n",
dev->name, tx_status);
- np->stats.tx_errors++;
+ dev->stats.tx_errors++;
if (tx_status & 0x10)
- np->stats.tx_fifo_errors++;
+ dev->stats.tx_fifo_errors++;
if (tx_status & 0x08)
- np->stats.collisions++;
+ dev->stats.collisions++;
if (tx_status & 0x04)
- np->stats.tx_fifo_errors++;
+ dev->stats.tx_fifo_errors++;
if (tx_status & 0x02)
- np->stats.tx_window_errors++;
+ dev->stats.tx_window_errors++;
/*
** This reset has been verified on
@@ -1313,11 +1312,15 @@ static void rx_poll(unsigned long data)
if (netif_msg_rx_err(np))
printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
frame_status);
- np->stats.rx_errors++;
- if (frame_status & 0x00100000) np->stats.rx_length_errors++;
- if (frame_status & 0x00010000) np->stats.rx_fifo_errors++;
- if (frame_status & 0x00060000) np->stats.rx_frame_errors++;
- if (frame_status & 0x00080000) np->stats.rx_crc_errors++;
+ dev->stats.rx_errors++;
+ if (frame_status & 0x00100000)
+ dev->stats.rx_length_errors++;
+ if (frame_status & 0x00010000)
+ dev->stats.rx_fifo_errors++;
+ if (frame_status & 0x00060000)
+ dev->stats.rx_frame_errors++;
+ if (frame_status & 0x00080000)
+ dev->stats.rx_crc_errors++;
if (frame_status & 0x00100000) {
printk(KERN_WARNING "%s: Oversized Ethernet frame,"
" status %8.8x.\n",
@@ -1485,22 +1488,22 @@ static struct net_device_stats *get_stats(struct net_device *dev)
the vulnerability window is very small and statistics are
non-critical. */
/* The chip only need report frame silently dropped. */
- np->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
- np->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
- np->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
- np->stats.collisions += ioread8(ioaddr + StatsLateColl);
- np->stats.collisions += ioread8(ioaddr + StatsMultiColl);
- np->stats.collisions += ioread8(ioaddr + StatsOneColl);
- np->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
+ dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
+ dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
+ dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
+ dev->stats.collisions += ioread8(ioaddr + StatsLateColl);
+ dev->stats.collisions += ioread8(ioaddr + StatsMultiColl);
+ dev->stats.collisions += ioread8(ioaddr + StatsOneColl);
+ dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
ioread8(ioaddr + StatsTxDefer);
for (i = StatsTxDefer; i <= StatsMcastRx; i++)
ioread8(ioaddr + i);
- np->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
- np->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
- np->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
- np->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
+ dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
+ dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
+ dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
+ dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
- return &np->stats;
+ return &dev->stats;
}
static void set_rx_mode(struct net_device *dev)
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 4e9bd380a5c..4ef729198e1 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -2275,7 +2275,7 @@ static int happy_meal_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irq(&hp->happy_lock);
printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
dev->name);
- return 1;
+ return NETDEV_TX_BUSY;
}
entry = hp->tx_new;
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index 0ce2db6ce2b..d737f6b8f87 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -688,14 +688,11 @@ static void tc_handle_link_change(struct net_device *dev)
if (status_change && netif_msg_link(lp)) {
phy_print_status(phydev);
-#ifdef DEBUG
- printk(KERN_DEBUG
- "%s: MII BMCR %04x BMSR %04x LPA %04x\n",
- dev->name,
- phy_read(phydev, MII_BMCR),
- phy_read(phydev, MII_BMSR),
- phy_read(phydev, MII_LPA));
-#endif
+ pr_debug("%s: MII BMCR %04x BMSR %04x LPA %04x\n",
+ dev->name,
+ phy_read(phydev, MII_BMCR),
+ phy_read(phydev, MII_BMSR),
+ phy_read(phydev, MII_LPA));
}
}
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index 7f4a9683ba1..3c2679cd196 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -948,8 +948,7 @@ static void print_rxfd(struct rxf_desc *rxfd);
static void bdx_rxdb_destroy(struct rxdb *db)
{
- if (db)
- vfree(db);
+ vfree(db);
}
static struct rxdb *bdx_rxdb_create(int nelem)
@@ -1482,10 +1481,8 @@ static void bdx_tx_db_close(struct txdb *d)
{
BDX_ASSERT(d == NULL);
- if (d->start) {
- vfree(d->start);
- d->start = NULL;
- }
+ vfree(d->start);
+ d->start = NULL;
}
/*************************************************************************
@@ -1718,8 +1715,9 @@ static int bdx_tx_transmit(struct sk_buff *skb, struct net_device *ndev)
WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
#endif
- ndev->trans_start = jiffies;
-
+#ifdef BDX_LLTX
+ ndev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
+#endif
priv->net_stats.tx_packets++;
priv->net_stats.tx_bytes += skb->len;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 201be425643..46a3f86125b 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -68,8 +68,8 @@
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "3.98"
-#define DRV_MODULE_RELDATE "February 25, 2009"
+#define DRV_MODULE_VERSION "3.99"
+#define DRV_MODULE_RELDATE "April 20, 2009"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -1950,7 +1950,8 @@ static void tg3_frob_aux_power(struct tg3 *tp)
GRC_LCLCTRL_GPIO_OUTPUT0 |
GRC_LCLCTRL_GPIO_OUTPUT1),
100);
- } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
+ } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
GRC_LCLCTRL_GPIO_OE1 |
@@ -2455,8 +2456,6 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
}
}
- __tg3_set_mac_addr(tp, 0);
-
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
u32 val;
@@ -4656,6 +4655,7 @@ static int tg3_poll(struct napi_struct *napi, int budget)
* so we must read it before checking for more work.
*/
tp->last_tag = sblk->status_tag;
+ tp->last_irq_tag = tp->last_tag;
rmb();
} else
sblk->status &= ~SD_STATUS_UPDATED;
@@ -4811,7 +4811,7 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
* Reading the PCI State register will confirm whether the
* interrupt is ours and will flush the status block.
*/
- if (unlikely(sblk->status_tag == tp->last_tag)) {
+ if (unlikely(sblk->status_tag == tp->last_irq_tag)) {
if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
handled = 0;
@@ -4831,18 +4831,22 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
* excessive spurious interrupts can be worse in some cases.
*/
tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
+
+ /*
+ * In a shared interrupt configuration, sometimes other devices'
+ * interrupts will scream. We record the current status tag here
+ * so that the above check can report that the screaming interrupts
+ * are unhandled. Eventually they will be silenced.
+ */
+ tp->last_irq_tag = sblk->status_tag;
+
if (tg3_irq_sync(tp))
goto out;
- if (napi_schedule_prep(&tp->napi)) {
- prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
- /* Update last_tag to mark that this status has been
- * seen. Because interrupt may be shared, we may be
- * racing with tg3_poll(), so only update last_tag
- * if tg3_poll() is not scheduled.
- */
- tp->last_tag = sblk->status_tag;
- __napi_schedule(&tp->napi);
- }
+
+ prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
+
+ napi_schedule(&tp->napi);
+
out:
return IRQ_RETVAL(handled);
}
@@ -5017,7 +5021,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
/* New SKB is guaranteed to be linear. */
entry = *start;
ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
- new_addr = skb_shinfo(new_skb)->dma_maps[0];
+ new_addr = skb_shinfo(new_skb)->dma_head;
/* Make sure new skb does not cross any 4G boundaries.
* Drop the packet if it does.
@@ -5151,7 +5155,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
sp = skb_shinfo(skb);
- mapping = sp->dma_maps[0];
+ mapping = sp->dma_head;
tp->tx_buffers[entry].skb = skb;
@@ -5169,7 +5173,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
len = frag->size;
- mapping = sp->dma_maps[i + 1];
+ mapping = sp->dma_maps[i];
tp->tx_buffers[entry].skb = NULL;
tg3_set_txd(tp, entry, mapping, len,
@@ -5190,9 +5194,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
out_unlock:
- mmiowb();
-
- dev->trans_start = jiffies;
+ mmiowb();
return NETDEV_TX_OK;
}
@@ -5329,7 +5331,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
sp = skb_shinfo(skb);
- mapping = sp->dma_maps[0];
+ mapping = sp->dma_head;
tp->tx_buffers[entry].skb = skb;
@@ -5354,7 +5356,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
len = frag->size;
- mapping = sp->dma_maps[i + 1];
+ mapping = sp->dma_maps[i];
tp->tx_buffers[entry].skb = NULL;
@@ -5403,9 +5405,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
}
out_unlock:
- mmiowb();
-
- dev->trans_start = jiffies;
+ mmiowb();
return NETDEV_TX_OK;
}
@@ -6156,6 +6156,7 @@ static int tg3_chip_reset(struct tg3 *tp)
tp->hw_status->status_tag = 0;
}
tp->last_tag = 0;
+ tp->last_irq_tag = 0;
smp_mb();
synchronize_irq(tp->pdev->irq);
@@ -6350,6 +6351,8 @@ static int tg3_halt(struct tg3 *tp, int kind, int silent)
tg3_abort_hw(tp, silent);
err = tg3_chip_reset(tp);
+ __tg3_set_mac_addr(tp, 0);
+
tg3_write_sig_legacy(tp, kind);
tg3_write_sig_post_reset(tp, kind);
@@ -6711,6 +6714,13 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(TG3_CPMU_HST_ACC, val);
}
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
+ val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
+ val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
+ PCIE_PWR_MGMT_L1_THRESH_4MS;
+ tw32(PCIE_PWR_MGMT_THRESH, val);
+ }
+
/* This works around an issue with Athlon chipsets on
* B3 tigon3 silicon. This bit has no effect on any
* other revision. But do not set this on PCI Express
@@ -7138,7 +7148,6 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
udelay(100);
tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
- tp->last_tag = 0;
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
@@ -8539,6 +8548,9 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
u32 i, offset, len, b_offset, b_count;
__be32 val;
+ if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
+ return -EINVAL;
+
if (tp->link_config.phy_is_low_power)
return -EAGAIN;
@@ -8604,7 +8616,8 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
if (tp->link_config.phy_is_low_power)
return -EAGAIN;
- if (eeprom->magic != TG3_EEPROM_MAGIC)
+ if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
+ eeprom->magic != TG3_EEPROM_MAGIC)
return -EINVAL;
offset = eeprom->offset;
@@ -9201,6 +9214,9 @@ static int tg3_test_nvram(struct tg3 *tp)
__be32 *buf;
int i, j, k, err = 0, size;
+ if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
+ return 0;
+
if (tg3_nvram_read(tp, 0, &magic) != 0)
return -EIO;
@@ -10183,7 +10199,8 @@ static void __devinit tg3_get_nvram_size(struct tg3 *tp)
{
u32 val;
- if (tg3_nvram_read(tp, 0, &val) != 0)
+ if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
+ tg3_nvram_read(tp, 0, &val) != 0)
return;
/* Selfboot format */
@@ -10565,6 +10582,7 @@ static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
}
break;
default:
+ tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
return;
}
@@ -11365,7 +11383,8 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
unsigned int i;
u32 magic;
- if (tg3_nvram_read(tp, 0x0, &magic))
+ if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
+ tg3_nvram_read(tp, 0x0, &magic))
goto out_not_found;
if (magic == TG3_EEPROM_MAGIC) {
@@ -11457,6 +11476,15 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
out_not_found:
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
strcpy(tp->board_part_number, "BCM95906");
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
+ strcpy(tp->board_part_number, "BCM57780");
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
+ strcpy(tp->board_part_number, "BCM57760");
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
+ strcpy(tp->board_part_number, "BCM57790");
else
strcpy(tp->board_part_number, "none");
}
@@ -11667,6 +11695,14 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp)
{
u32 val;
+ if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
+ tp->fw_ver[0] = 's';
+ tp->fw_ver[1] = 'b';
+ tp->fw_ver[2] = '\0';
+
+ return;
+ }
+
if (tg3_nvram_read(tp, 0, &val))
return;
@@ -11952,7 +11988,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+ tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
}
} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
@@ -12144,7 +12181,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
- if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
+ if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
/* Turn off the debug UART. */
tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
@@ -12454,7 +12492,8 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
}
if (!addr_ok) {
/* Next, try NVRAM. */
- if (!tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
+ if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) &&
+ !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
!tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index cb4c62abdd2..b3347c41a1a 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -95,6 +95,8 @@
#define CHIPREV_ID_5752_A1 0x6001
#define CHIPREV_ID_5714_A2 0x9002
#define CHIPREV_ID_5906_A1 0xc001
+#define CHIPREV_ID_57780_A0 0x57780000
+#define CHIPREV_ID_57780_A1 0x57780001
#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12)
#define ASIC_REV_5700 0x07
#define ASIC_REV_5701 0x00
@@ -1697,6 +1699,8 @@
#define PCIE_PWR_MGMT_THRESH 0x00007d28
#define PCIE_PWR_MGMT_L1_THRESH_MSK 0x0000ff00
+#define PCIE_PWR_MGMT_L1_THRESH_4MS 0x0000ff00
+#define PCIE_PWR_MGMT_EXT_ASPM_TMR_EN 0x01000000
/* OTP bit definitions */
@@ -2501,6 +2505,7 @@ struct tg3 {
struct tg3_hw_status *hw_status;
dma_addr_t status_mapping;
u32 last_tag;
+ u32 last_irq_tag;
u32 msg_enable;
@@ -2635,6 +2640,7 @@ struct tg3 {
#define TG3_FLG3_CLKREQ_BUG 0x00000800
#define TG3_FLG3_PHY_ENABLE_APD 0x00001000
#define TG3_FLG3_5755_PLUS 0x00002000
+#define TG3_FLG3_NO_NVRAM 0x00004000
struct timer_list timer;
u16 timer_counter;
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index aa6964922d5..384cb5e2839 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -1111,7 +1111,7 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
dev->name, priv->txHead, priv->txTail );
netif_stop_queue(dev);
priv->txBusyCount++;
- return 1;
+ return NETDEV_TX_BUSY;
}
tail_list->forward = 0;
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index 534c0f38483..b40b6de2d08 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -79,7 +79,7 @@ MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver \n") ;
MODULE_FIRMWARE(FW_NAME);
-/* Module paramters */
+/* Module parameters */
/* Ring Speed 0,4,16
* 0 = Autosense
@@ -1243,7 +1243,7 @@ static int xl_xmit(struct sk_buff *skb, struct net_device *dev)
return 0;
} else {
spin_unlock_irqrestore(&xl_priv->xl_lock,flags) ;
- return 1;
+ return NETDEV_TX_BUSY;
}
}
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index 2e70ee8f145..b3715efdce5 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -169,7 +169,7 @@ static char *open_min_error[] = {
"Monitor Contention failer for RPL", "FDX Protocol Error"
};
-/* Module paramters */
+/* Module parameters */
/* Ring Speed 0,4,16
* 0 = Autosense
@@ -1187,7 +1187,7 @@ static int streamer_xmit(struct sk_buff *skb, struct net_device *dev)
} else {
netif_stop_queue(dev);
spin_unlock_irqrestore(&streamer_priv->streamer_lock,flags);
- return 1;
+ return NETDEV_TX_BUSY;
}
}
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index d068a9d3688..451b54136ed 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -132,7 +132,7 @@ static char *open_min_error[] = {"No error", "Function Failure", "Signal Lost",
"Reserved", "Reserved", "No Monitor Detected for RPL",
"Monitor Contention failer for RPL", "FDX Protocol Error"};
-/* Module paramters */
+/* Module parameters */
MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
MODULE_DESCRIPTION("Olympic PCI/Cardbus Chipset Driver") ;
@@ -1055,7 +1055,7 @@ static int olympic_xmit(struct sk_buff *skb, struct net_device *dev)
return 0;
} else {
spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
- return 1;
+ return NETDEV_TX_BUSY;
}
}
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c
index a91d9c55d78..54ad4ed0337 100644
--- a/drivers/net/tokenring/smctr.c
+++ b/drivers/net/tokenring/smctr.c
@@ -4601,7 +4601,7 @@ static int smctr_send_packet(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
if(tp->QueueSkb == 0)
- return (1); /* Return with tbusy set: queue full */
+ return NETDEV_TX_BUSY; /* Return with tbusy set: queue full */
tp->QueueSkb--;
skb_queue_tail(&tp->SendSkbQueue, skb);
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index b11bb72dc7a..a2eab72b507 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -633,7 +633,7 @@ static int tms380tr_hardware_send_packet(struct sk_buff *skb, struct net_device
if (tms380tr_debug > 0)
printk(KERN_DEBUG "%s: No free TPL\n", dev->name);
spin_unlock_irqrestore(&tp->lock, flags);
- return 1;
+ return NETDEV_TX_BUSY;
}
dmabuf = 0;
diff --git a/drivers/net/tulip/Kconfig b/drivers/net/tulip/Kconfig
index d913405bc39..1cc8cf4425d 100644
--- a/drivers/net/tulip/Kconfig
+++ b/drivers/net/tulip/Kconfig
@@ -27,6 +27,18 @@ config DE2104X
To compile this driver as a module, choose M here. The module will
be called de2104x.
+config DE2104X_DSL
+ int "Descriptor Skip Length in 32 bit longwords"
+ depends on DE2104X
+ range 0 31
+ default 0
+ help
+ Setting this value allows to align ring buffer descriptors into their
+ own cache lines. Value of 4 corresponds to the typical 32 byte line
+ (the descriptor is 16 bytes). This is necessary on systems that lack
+ cache coherence, an example is PowerMac 5500. Otherwise 0 is safe.
+ Default is 0, and range is 0 to 31.
+
config TULIP
tristate "DECchip Tulip (dc2114x) PCI support"
depends on PCI
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index d4c5ecc51f7..81f054dbb88 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -82,6 +82,13 @@ MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copi
NETIF_MSG_RX_ERR | \
NETIF_MSG_TX_ERR)
+/* Descriptor skip length in 32 bit longwords. */
+#ifndef CONFIG_DE2104X_DSL
+#define DSL 0
+#else
+#define DSL CONFIG_DE2104X_DSL
+#endif
+
#define DE_RX_RING_SIZE 64
#define DE_TX_RING_SIZE 64
#define DE_RING_BYTES \
@@ -153,6 +160,7 @@ enum {
CmdReset = (1 << 0),
CacheAlign16 = 0x00008000,
BurstLen4 = 0x00000400,
+ DescSkipLen = (DSL << 2),
/* Rx/TxPoll bits */
NormalTxPoll = (1 << 0),
@@ -246,7 +254,7 @@ static const u32 de_intr_mask =
* Set the programmable burst length to 4 longwords for all:
* DMA errors result without these values. Cache align 16 long.
*/
-static const u32 de_bus_mode = CacheAlign16 | BurstLen4;
+static const u32 de_bus_mode = CacheAlign16 | BurstLen4 | DescSkipLen;
struct de_srom_media_block {
u8 opts;
@@ -266,6 +274,9 @@ struct de_desc {
__le32 opts2;
__le32 addr1;
__le32 addr2;
+#if DSL
+ __le32 skip[DSL];
+#endif
};
struct media_info {
@@ -601,7 +612,7 @@ static int de_start_xmit (struct sk_buff *skb, struct net_device *dev)
if (tx_free == 0) {
netif_stop_queue(dev);
spin_unlock_irq(&de->lock);
- return 1;
+ return NETDEV_TX_BUSY;
}
tx_free--;
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index f9491bd787d..eb72d2e9ab3 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -1099,7 +1099,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
struct pci_dev *pdev = NULL;
int i, status=0;
- gendev->driver_data = dev;
+ dev_set_drvdata(gendev, dev);
/* Ensure we're not sleeping */
if (lp->bus == EISA) {
@@ -1461,12 +1461,12 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
{
struct de4x5_private *lp = netdev_priv(dev);
u_long iobase = dev->base_addr;
- int status = 0;
+ int status = NETDEV_TX_OK;
u_long flags = 0;
netif_stop_queue(dev);
if (!lp->tx_enable) { /* Cannot send for now */
- return -1;
+ return NETDEV_TX_LOCKED;
}
/*
@@ -1480,7 +1480,7 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
/* Test if cache is already locked - requeue skb if so */
if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt)
- return -1;
+ return NETDEV_TX_LOCKED;
/* Transmit descriptor ring full or stale skb */
if (netif_queue_stopped(dev) || (u_long) lp->tx_skb[lp->tx_new] > 1) {
@@ -2094,7 +2094,7 @@ static int __devexit de4x5_eisa_remove (struct device *device)
struct net_device *dev;
u_long iobase;
- dev = device->driver_data;
+ dev = dev_get_drvdata(device);
iobase = dev->base_addr;
unregister_netdev (dev);
@@ -2338,7 +2338,7 @@ static void __devexit de4x5_pci_remove (struct pci_dev *pdev)
struct net_device *dev;
u_long iobase;
- dev = pdev->dev.driver_data;
+ dev = dev_get_drvdata(&pdev->dev);
iobase = dev->base_addr;
unregister_netdev (dev);
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index f2e669974c7..8e78f003f08 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -686,7 +686,7 @@ static int dmfe_start_xmit(struct sk_buff *skb, struct DEVICE *dev)
spin_unlock_irqrestore(&db->lock, flags);
printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n",
db->tx_queue_cnt);
- return 1;
+ return NETDEV_TX_BUSY;
}
/* Disable NIC interrupt */
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index 8761a5a5bd7..9277ce8febe 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -591,7 +591,7 @@ static int uli526x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (db->tx_packet_cnt >= TX_FREE_DESC_CNT) {
spin_unlock_irqrestore(&db->lock, flags);
printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n", db->tx_packet_cnt);
- return 1;
+ return NETDEV_TX_BUSY;
}
/* Disable NIC interrupt */
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 264e61404f3..842b1a2c40d 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -1601,8 +1601,7 @@ static int w840_suspend (struct pci_dev *pdev, pm_message_t state)
/* no more hardware accesses behind this line. */
- BUG_ON(np->csr6);
- if (ioread32(ioaddr + IntrEnable)) BUG();
+ BUG_ON(np->csr6 || ioread32(ioaddr + IntrEnable));
/* pci_power_off(pdev, -1); */
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 735bf41c654..11a0ba47b67 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -540,31 +540,38 @@ static inline struct sk_buff *tun_alloc_skb(struct tun_struct *tun,
/* Get packet from user space buffer */
static __inline__ ssize_t tun_get_user(struct tun_struct *tun,
- struct iovec *iv, size_t count,
+ const struct iovec *iv, size_t count,
int noblock)
{
struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
struct sk_buff *skb;
size_t len = count, align = 0;
struct virtio_net_hdr gso = { 0 };
+ int offset = 0;
if (!(tun->flags & TUN_NO_PI)) {
if ((len -= sizeof(pi)) > count)
return -EINVAL;
- if(memcpy_fromiovec((void *)&pi, iv, sizeof(pi)))
+ if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
return -EFAULT;
+ offset += sizeof(pi);
}
if (tun->flags & TUN_VNET_HDR) {
if ((len -= sizeof(gso)) > count)
return -EINVAL;
- if (memcpy_fromiovec((void *)&gso, iv, sizeof(gso)))
+ if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
return -EFAULT;
+ if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
+ gso.csum_start + gso.csum_offset + 2 > gso.hdr_len)
+ gso.hdr_len = gso.csum_start + gso.csum_offset + 2;
+
if (gso.hdr_len > len)
return -EINVAL;
+ offset += sizeof(gso);
}
if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
@@ -581,7 +588,7 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun,
return PTR_ERR(skb);
}
- if (skb_copy_datagram_from_iovec(skb, 0, iv, len)) {
+ if (skb_copy_datagram_from_iovec(skb, 0, iv, offset, len)) {
tun->dev->stats.rx_dropped++;
kfree_skb(skb);
return -EFAULT;
@@ -673,7 +680,7 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
DBG(KERN_INFO "%s: tun_chr_write %ld\n", tun->dev->name, count);
- result = tun_get_user(tun, (struct iovec *)iv, iov_length(iv, count),
+ result = tun_get_user(tun, iv, iov_length(iv, count),
file->f_flags & O_NONBLOCK);
tun_put(tun);
@@ -683,7 +690,7 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
/* Put packet to the user space buffer */
static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
struct sk_buff *skb,
- struct iovec *iv, int len)
+ const struct iovec *iv, int len)
{
struct tun_pi pi = { 0, skb->protocol };
ssize_t total = 0;
@@ -697,7 +704,7 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
pi.flags |= TUN_PKT_STRIP;
}
- if (memcpy_toiovec(iv, (void *) &pi, sizeof(pi)))
+ if (memcpy_toiovecend(iv, (void *) &pi, 0, sizeof(pi)))
return -EFAULT;
total += sizeof(pi);
}
@@ -730,14 +737,15 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
gso.csum_offset = skb->csum_offset;
} /* else everything is zero */
- if (unlikely(memcpy_toiovec(iv, (void *)&gso, sizeof(gso))))
+ if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
+ sizeof(gso))))
return -EFAULT;
total += sizeof(gso);
}
len = min_t(int, skb->len, len);
- skb_copy_datagram_iovec(skb, 0, iv, len);
+ skb_copy_datagram_const_iovec(skb, 0, iv, total, len);
total += len;
tun->dev->stats.tx_packets++;
@@ -792,7 +800,7 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
}
netif_wake_queue(tun->dev);
- ret = tun_put_user(tun, skb, (struct iovec *) iv, len);
+ ret = tun_put_user(tun, skb, iv, len);
kfree_skb(skb);
break;
}
@@ -840,12 +848,12 @@ static void tun_sock_write_space(struct sock *sk)
if (!sock_writeable(sk))
return;
- if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible_sync(sk->sk_sleep);
-
if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
return;
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible_sync(sk->sk_sleep);
+
tun = container_of(sk, struct tun_sock, sk)->tun;
kill_fasync(&tun->fasync, SIGIO, POLL_OUT);
}
@@ -861,6 +869,52 @@ static struct proto tun_proto = {
.obj_size = sizeof(struct tun_sock),
};
+static int tun_flags(struct tun_struct *tun)
+{
+ int flags = 0;
+
+ if (tun->flags & TUN_TUN_DEV)
+ flags |= IFF_TUN;
+ else
+ flags |= IFF_TAP;
+
+ if (tun->flags & TUN_NO_PI)
+ flags |= IFF_NO_PI;
+
+ if (tun->flags & TUN_ONE_QUEUE)
+ flags |= IFF_ONE_QUEUE;
+
+ if (tun->flags & TUN_VNET_HDR)
+ flags |= IFF_VNET_HDR;
+
+ return flags;
+}
+
+static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tun_struct *tun = netdev_priv(to_net_dev(dev));
+ return sprintf(buf, "0x%x\n", tun_flags(tun));
+}
+
+static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tun_struct *tun = netdev_priv(to_net_dev(dev));
+ return sprintf(buf, "%d\n", tun->owner);
+}
+
+static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tun_struct *tun = netdev_priv(to_net_dev(dev));
+ return sprintf(buf, "%d\n", tun->group);
+}
+
+static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
+static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL);
+static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
+
static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
{
struct sock *sk;
@@ -870,6 +924,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
dev = __dev_get_by_name(net, ifr->ifr_name);
if (dev) {
+ if (ifr->ifr_flags & IFF_TUN_EXCL)
+ return -EBUSY;
if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
tun = netdev_priv(dev);
else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
@@ -944,6 +1000,11 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (err < 0)
goto err_free_sk;
+ if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
+ device_create_file(&tun->dev->dev, &dev_attr_owner) ||
+ device_create_file(&tun->dev->dev, &dev_attr_group))
+ printk(KERN_ERR "Failed to create tun sysfs files\n");
+
sk->sk_destruct = tun_sock_destruct;
err = tun_attach(tun, file);
@@ -996,21 +1057,7 @@ static int tun_get_iff(struct net *net, struct file *file, struct ifreq *ifr)
strcpy(ifr->ifr_name, tun->dev->name);
- ifr->ifr_flags = 0;
-
- if (ifr->ifr_flags & TUN_TUN_DEV)
- ifr->ifr_flags |= IFF_TUN;
- else
- ifr->ifr_flags |= IFF_TAP;
-
- if (tun->flags & TUN_NO_PI)
- ifr->ifr_flags |= IFF_NO_PI;
-
- if (tun->flags & TUN_ONE_QUEUE)
- ifr->ifr_flags |= IFF_ONE_QUEUE;
-
- if (tun->flags & TUN_VNET_HDR)
- ifr->ifr_flags |= IFF_VNET_HDR;
+ ifr->ifr_flags = tun_flags(tun);
tun_put(tun);
return 0;
@@ -1275,21 +1322,22 @@ static int tun_chr_open(struct inode *inode, struct file * file)
static int tun_chr_close(struct inode *inode, struct file *file)
{
struct tun_file *tfile = file->private_data;
- struct tun_struct *tun = __tun_get(tfile);
+ struct tun_struct *tun;
+ rtnl_lock();
+ tun = __tun_get(tfile);
if (tun) {
DBG(KERN_INFO "%s: tun_chr_close\n", tun->dev->name);
- rtnl_lock();
__tun_detach(tun);
/* If desireable, unregister the netdevice. */
if (!(tun->flags & TUN_PERSIST))
unregister_netdevice(tun->dev);
- rtnl_unlock();
}
+ rtnl_unlock();
tun = tfile->tun;
if (tun)
@@ -1318,6 +1366,7 @@ static const struct file_operations tun_fops = {
static struct miscdevice tun_miscdev = {
.minor = TUN_MINOR,
.name = "tun",
+ .devnode = "net/tun",
.fops = &tun_fops,
};
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 44f8392da11..e2f2e91cfdd 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006-2007 Freescale Semicondutor, Inc. All rights reserved.
+ * Copyright (C) 2006-2009 Freescale Semicondutor, Inc. All rights reserved.
*
* Author: Shlomi Gridish <gridish@freescale.com>
* Li Yang <leoli@freescale.com>
@@ -27,6 +27,7 @@
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/workqueue.h>
+#include <linux/of_mdio.h>
#include <linux/of_platform.h>
#include <asm/uaccess.h>
@@ -64,6 +65,8 @@
static DEFINE_SPINLOCK(ugeth_lock);
+static void uec_configure_serdes(struct net_device *dev);
+
static struct {
u32 msg_enable;
} debug = { -1 };
@@ -270,7 +273,7 @@ static int fill_init_enet_entries(struct ucc_geth_private *ugeth,
u8 num_entries,
u32 thread_size,
u32 thread_alignment,
- enum qe_risc_allocation risc,
+ unsigned int risc,
int skip_page_for_first_entry)
{
u32 init_enet_offset;
@@ -307,7 +310,7 @@ static int fill_init_enet_entries(struct ucc_geth_private *ugeth,
static int return_init_enet_entries(struct ucc_geth_private *ugeth,
u32 *p_start,
u8 num_entries,
- enum qe_risc_allocation risc,
+ unsigned int risc,
int skip_page_for_first_entry)
{
u32 init_enet_offset;
@@ -342,7 +345,7 @@ static int dump_init_enet_entries(struct ucc_geth_private *ugeth,
u32 __iomem *p_start,
u8 num_entries,
u32 thread_size,
- enum qe_risc_allocation risc,
+ unsigned int risc,
int skip_page_for_first_entry)
{
u32 init_enet_offset;
@@ -1409,6 +1412,9 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
(ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
upsmr |= UCC_GETH_UPSMR_TBIM;
}
+ if ((ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII))
+ upsmr |= UCC_GETH_UPSMR_SGMM;
+
out_be32(&uf_regs->upsmr, upsmr);
/* Disable autonegotiation in tbi mode, because by default it
@@ -1543,14 +1549,19 @@ static int init_phy(struct net_device *dev)
priv->oldspeed = 0;
priv->oldduplex = -1;
- phydev = phy_connect(dev, ug_info->phy_bus_id, &adjust_link, 0,
- priv->phy_interface);
+ if (!ug_info->phy_node)
+ return 0;
- if (IS_ERR(phydev)) {
+ phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0,
+ priv->phy_interface);
+ if (!phydev) {
printk("%s: Could not attach to PHY\n", dev->name);
- return PTR_ERR(phydev);
+ return -ENODEV;
}
+ if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII)
+ uec_configure_serdes(dev);
+
phydev->supported &= (ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
@@ -1566,7 +1577,41 @@ static int init_phy(struct net_device *dev)
return 0;
}
+/* Initialize TBI PHY interface for communicating with the
+ * SERDES lynx PHY on the chip. We communicate with this PHY
+ * through the MDIO bus on each controller, treating it as a
+ * "normal" PHY at the address found in the UTBIPA register. We assume
+ * that the UTBIPA register is valid. Either the MDIO bus code will set
+ * it to a value that doesn't conflict with other PHYs on the bus, or the
+ * value doesn't matter, as there are no other PHYs on the bus.
+ */
+static void uec_configure_serdes(struct net_device *dev)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+ if (!ugeth->tbiphy) {
+ printk(KERN_WARNING "SGMII mode requires that the device "
+ "tree specify a tbi-handle\n");
+ return;
+ }
+
+ /*
+ * If the link is already up, we must already be ok, and don't need to
+ * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
+ * everything for us? Resetting it takes the link down and requires
+ * several seconds for it to come back.
+ */
+ if (phy_read(ugeth->tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS)
+ return;
+
+ /* Single clk mode, mii mode off(for serdes communication) */
+ phy_write(ugeth->tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS);
+
+ phy_write(ugeth->tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT);
+
+ phy_write(ugeth->tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS);
+
+}
static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth)
{
@@ -2135,6 +2180,14 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
return -ENOMEM;
}
+ /* read the number of risc engines, update the riscTx and riscRx
+ * if there are 4 riscs in QE
+ */
+ if (qe_get_num_of_risc() == 4) {
+ ug_info->riscTx = QE_RISC_ALLOCATION_FOUR_RISCS;
+ ug_info->riscRx = QE_RISC_ALLOCATION_FOUR_RISCS;
+ }
+
ugeth->ug_regs = ioremap(uf_info->regs, sizeof(*ugeth->ug_regs));
if (!ugeth->ug_regs) {
if (netif_msg_probe(ugeth))
@@ -3217,7 +3270,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
dev->stats.tx_packets++;
/* Free the sk buffer associated with this TxBD */
- dev_kfree_skb_irq(ugeth->
+ dev_kfree_skb(ugeth->
tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]);
ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
ugeth->skb_dirtytx[txQ] =
@@ -3251,9 +3304,15 @@ static int ucc_geth_poll(struct napi_struct *napi, int budget)
for (i = 0; i < ug_info->numQueuesRx; i++)
howmany += ucc_geth_rx(ugeth, i, budget - howmany);
+ /* Tx event processing */
+ spin_lock(&ugeth->lock);
+ for (i = 0; i < ug_info->numQueuesTx; i++)
+ ucc_geth_tx(ugeth->ndev, i);
+ spin_unlock(&ugeth->lock);
+
if (howmany < budget) {
napi_complete(napi);
- setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS);
+ setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS | UCCE_TX_EVENTS);
}
return howmany;
@@ -3267,8 +3326,6 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
struct ucc_geth_info *ug_info;
register u32 ucce;
register u32 uccm;
- register u32 tx_mask;
- u8 i;
ugeth_vdbg("%s: IN", __func__);
@@ -3282,27 +3339,14 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
out_be32(uccf->p_ucce, ucce);
/* check for receive events that require processing */
- if (ucce & UCCE_RX_EVENTS) {
+ if (ucce & (UCCE_RX_EVENTS | UCCE_TX_EVENTS)) {
if (napi_schedule_prep(&ugeth->napi)) {
- uccm &= ~UCCE_RX_EVENTS;
+ uccm &= ~(UCCE_RX_EVENTS | UCCE_TX_EVENTS);
out_be32(uccf->p_uccm, uccm);
__napi_schedule(&ugeth->napi);
}
}
- /* Tx event processing */
- if (ucce & UCCE_TX_EVENTS) {
- spin_lock(&ugeth->lock);
- tx_mask = UCC_GETH_UCCE_TXB0;
- for (i = 0; i < ug_info->numQueuesTx; i++) {
- if (ucce & tx_mask)
- ucc_geth_tx(dev, i);
- ucce &= ~tx_mask;
- tx_mask <<= 1;
- }
- spin_unlock(&ugeth->lock);
- }
-
/* Errors and other events */
if (ucce & UCCE_OTHER) {
if (ucce & UCC_GETH_UCCE_BSY)
@@ -3331,6 +3375,37 @@ static void ucc_netpoll(struct net_device *dev)
}
#endif /* CONFIG_NET_POLL_CONTROLLER */
+static int ucc_geth_set_mac_addr(struct net_device *dev, void *p)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ /*
+ * If device is not running, we will set mac addr register
+ * when opening the device.
+ */
+ if (!netif_running(dev))
+ return 0;
+
+ spin_lock_irq(&ugeth->lock);
+ init_mac_station_addr_regs(dev->dev_addr[0],
+ dev->dev_addr[1],
+ dev->dev_addr[2],
+ dev->dev_addr[3],
+ dev->dev_addr[4],
+ dev->dev_addr[5],
+ &ugeth->ug_regs->macstnaddr1,
+ &ugeth->ug_regs->macstnaddr2);
+ spin_unlock_irq(&ugeth->lock);
+
+ return 0;
+}
+
/* Called when something needs to use the ethernet device */
/* Returns 0 for success. */
static int ucc_geth_open(struct net_device *dev)
@@ -3498,6 +3573,8 @@ static phy_interface_t to_phy_interface(const char *phy_connection_type)
return PHY_INTERFACE_MODE_RGMII_RXID;
if (strcasecmp(phy_connection_type, "rtbi") == 0)
return PHY_INTERFACE_MODE_RTBI;
+ if (strcasecmp(phy_connection_type, "sgmii") == 0)
+ return PHY_INTERFACE_MODE_SGMII;
return PHY_INTERFACE_MODE_MII;
}
@@ -3507,7 +3584,7 @@ static const struct net_device_ops ucc_geth_netdev_ops = {
.ndo_stop = ucc_geth_close,
.ndo_start_xmit = ucc_geth_start_xmit,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = ucc_geth_set_mac_addr,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_multicast_list = ucc_geth_set_multi,
.ndo_tx_timeout = ucc_geth_timeout,
@@ -3520,14 +3597,12 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
{
struct device *device = &ofdev->dev;
struct device_node *np = ofdev->node;
- struct device_node *mdio;
struct net_device *dev = NULL;
struct ucc_geth_private *ugeth = NULL;
struct ucc_geth_info *ug_info;
struct resource res;
struct device_node *phy;
int err, ucc_num, max_speed = 0;
- const phandle *ph;
const u32 *fixed_link;
const unsigned int *prop;
const char *sprop;
@@ -3544,6 +3619,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII,
PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII,
PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI,
+ PHY_INTERFACE_MODE_SGMII,
};
ugeth_vdbg("%s: IN", __func__);
@@ -3627,40 +3703,13 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
fixed_link = of_get_property(np, "fixed-link", NULL);
if (fixed_link) {
- snprintf(ug_info->phy_bus_id, sizeof(ug_info->phy_bus_id),
- PHY_ID_FMT, "0", fixed_link[0]);
phy = NULL;
} else {
- char bus_name[MII_BUS_ID_SIZE];
-
- ph = of_get_property(np, "phy-handle", NULL);
- phy = of_find_node_by_phandle(*ph);
-
+ phy = of_parse_phandle(np, "phy-handle", 0);
if (phy == NULL)
return -ENODEV;
-
- /* set the PHY address */
- prop = of_get_property(phy, "reg", NULL);
- if (prop == NULL)
- return -1;
-
- /* Set the bus id */
- mdio = of_get_parent(phy);
-
- if (mdio == NULL)
- return -ENODEV;
-
- err = of_address_to_resource(mdio, 0, &res);
-
- if (err) {
- of_node_put(mdio);
- return err;
- }
- fsl_pq_mdio_bus_name(bus_name, mdio);
- of_node_put(mdio);
- snprintf(ug_info->phy_bus_id, sizeof(ug_info->phy_bus_id),
- "%s:%02x", bus_name, *prop);
}
+ ug_info->phy_node = phy;
/* get the phy interface type, or default to MII */
prop = of_get_property(np, "phy-connection-type", NULL);
@@ -3686,6 +3735,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
case PHY_INTERFACE_MODE_RGMII_TXID:
case PHY_INTERFACE_MODE_TBI:
case PHY_INTERFACE_MODE_RTBI:
+ case PHY_INTERFACE_MODE_SGMII:
max_speed = SPEED_1000;
break;
default:
@@ -3702,7 +3752,15 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
ug_info->uf_info.utfet = UCC_GETH_UTFET_GIGA_INIT;
ug_info->uf_info.utftt = UCC_GETH_UTFTT_GIGA_INIT;
ug_info->numThreadsTx = UCC_GETH_NUM_OF_THREADS_4;
- ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_4;
+
+ /* If QE's snum number is 46 which means we need to support
+ * 4 UECs at 1000Base-T simultaneously, we need to allocate
+ * more Threads to Rx.
+ */
+ if (qe_get_num_of_snums() == 46)
+ ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_6;
+ else
+ ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_4;
}
if (netif_msg_probe(&debug))
@@ -3735,7 +3793,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
dev->netdev_ops = &ucc_geth_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work);
- netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, UCC_GETH_DEV_WEIGHT);
+ netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64);
dev->mtu = 1500;
ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
@@ -3760,6 +3818,37 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
ugeth->ndev = dev;
ugeth->node = np;
+ /* Find the TBI PHY. If it's not there, we don't support SGMII */
+ ph = of_get_property(np, "tbi-handle", NULL);
+ if (ph) {
+ struct device_node *tbi = of_find_node_by_phandle(*ph);
+ struct of_device *ofdev;
+ struct mii_bus *bus;
+ const unsigned int *id;
+
+ if (!tbi)
+ return 0;
+
+ mdio = of_get_parent(tbi);
+ if (!mdio)
+ return 0;
+
+ ofdev = of_find_device_by_node(mdio);
+
+ of_node_put(mdio);
+
+ id = of_get_property(tbi, "reg", NULL);
+ if (!id)
+ return 0;
+ of_node_put(tbi);
+
+ bus = dev_get_drvdata(&ofdev->dev);
+ if (!bus)
+ return 0;
+
+ ugeth->tbiphy = bus->phy_map[*id];
+ }
+
return 0;
}
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index 2f8ee7c87ef..5beba4c1453 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
+ * Copyright (C) Freescale Semicondutor, Inc. 2006-2009. All rights reserved.
*
* Author: Shlomi Gridish <gridish@freescale.com>
*
@@ -193,6 +193,31 @@ struct ucc_geth {
#define ENET_TBI_MII_JD 0x10 /* Jitter diagnostics */
#define ENET_TBI_MII_TBICON 0x11 /* TBI control */
+/* TBI MDIO register bit fields*/
+#define TBISR_LSTATUS 0x0004
+#define TBICON_CLK_SELECT 0x0020
+#define TBIANA_ASYMMETRIC_PAUSE 0x0100
+#define TBIANA_SYMMETRIC_PAUSE 0x0080
+#define TBIANA_HALF_DUPLEX 0x0040
+#define TBIANA_FULL_DUPLEX 0x0020
+#define TBICR_PHY_RESET 0x8000
+#define TBICR_ANEG_ENABLE 0x1000
+#define TBICR_RESTART_ANEG 0x0200
+#define TBICR_FULL_DUPLEX 0x0100
+#define TBICR_SPEED1_SET 0x0040
+
+#define TBIANA_SETTINGS ( \
+ TBIANA_ASYMMETRIC_PAUSE \
+ | TBIANA_SYMMETRIC_PAUSE \
+ | TBIANA_FULL_DUPLEX \
+ )
+#define TBICR_SETTINGS ( \
+ TBICR_PHY_RESET \
+ | TBICR_ANEG_ENABLE \
+ | TBICR_FULL_DUPLEX \
+ | TBICR_SPEED1_SET \
+ )
+
/* UCC GETH MACCFG1 (MAC Configuration 1 Register) */
#define MACCFG1_FLOW_RX 0x00000020 /* Flow Control
Rx */
@@ -852,7 +877,6 @@ struct ucc_geth_hardware_statistics {
/* Driver definitions */
#define TX_BD_RING_LEN 0x10
#define RX_BD_RING_LEN 0x10
-#define UCC_GETH_DEV_WEIGHT TX_BD_RING_LEN
#define TX_RING_MOD_MASK(size) (size-1)
#define RX_RING_MOD_MASK(size) (size-1)
@@ -1100,7 +1124,7 @@ struct ucc_geth_info {
u32 eventRegMask;
u16 pausePeriod;
u16 extensionField;
- char phy_bus_id[BUS_ID_SIZE];
+ struct device_node *phy_node;
u8 weightfactor[NUM_TX_QUEUES];
u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES];
u8 l2qt[UCC_GETH_VLAN_PRIORITY_MAX];
@@ -1120,8 +1144,8 @@ struct ucc_geth_info {
enum ucc_geth_maccfg2_pad_and_crc_mode padAndCrc;
enum ucc_geth_num_of_threads numThreadsTx;
enum ucc_geth_num_of_threads numThreadsRx;
- enum qe_risc_allocation riscTx;
- enum qe_risc_allocation riscRx;
+ unsigned int riscTx;
+ unsigned int riscRx;
};
/* structure representing UCC GETH */
@@ -1189,6 +1213,7 @@ struct ucc_geth_private {
struct ugeth_mii_info *mii_info;
struct phy_device *phydev;
+ struct phy_device *tbiphy;
phy_interface_t phy_interface;
int max_speed;
uint32_t msg_enable;
diff --git a/drivers/net/ucc_geth_ethtool.c b/drivers/net/ucc_geth_ethtool.c
index 6fcb500257b..61fe80dda3e 100644
--- a/drivers/net/ucc_geth_ethtool.c
+++ b/drivers/net/ucc_geth_ethtool.c
@@ -7,7 +7,7 @@
*
* Limitation:
* Can only get/set setttings of the first queue.
- * Need to re-open the interface manually after changing some paramters.
+ * Need to re-open the interface manually after changing some parameters.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index dfc6cf765fb..3717569828b 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -359,4 +359,12 @@ config USB_HSO
To compile this driver as a module, choose M here: the
module will be called hso.
+config USB_NET_INT51X1
+ tristate "Intellon PLC based usb adapter"
+ depends on USB_USBNET
+ help
+ Choose this option if you're using a 14Mb USB-based PLC
+ (Powerline Communications) solution with an Intellon
+ INT51x1/INT5200 chip, like the "devolo dLan duo".
+
endmenu
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index c8aef62cf2b..b870b0b1cbe 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -20,4 +20,5 @@ obj-$(CONFIG_USB_NET_CDC_SUBSET) += cdc_subset.o
obj-$(CONFIG_USB_NET_ZAURUS) += zaurus.o
obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o
obj-$(CONFIG_USB_USBNET) += usbnet.o
+obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 55e8ecc3a9e..01fd528306e 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -25,7 +25,6 @@
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <linux/ctype.h>
#include <linux/ethtool.h>
#include <linux/workqueue.h>
#include <linux/mii.h>
@@ -389,36 +388,6 @@ static void cdc_status(struct usbnet *dev, struct urb *urb)
}
}
-static u8 nibble(unsigned char c)
-{
- if (likely(isdigit(c)))
- return c - '0';
- c = toupper(c);
- if (likely(isxdigit(c)))
- return 10 + c - 'A';
- return 0;
-}
-
-static inline int
-get_ethernet_addr(struct usbnet *dev, struct usb_cdc_ether_desc *e)
-{
- int tmp, i;
- unsigned char buf [13];
-
- tmp = usb_string(dev->udev, e->iMACAddress, buf, sizeof buf);
- if (tmp != 12) {
- dev_dbg(&dev->udev->dev,
- "bad MAC string %d fetch, %d\n", e->iMACAddress, tmp);
- if (tmp >= 0)
- tmp = -EINVAL;
- return tmp;
- }
- for (i = tmp = 0; i < 6; i++, tmp += 2)
- dev->net->dev_addr [i] =
- (nibble(buf [tmp]) << 4) + nibble(buf [tmp + 1]);
- return 0;
-}
-
static int cdc_bind(struct usbnet *dev, struct usb_interface *intf)
{
int status;
@@ -428,7 +397,7 @@ static int cdc_bind(struct usbnet *dev, struct usb_interface *intf)
if (status < 0)
return status;
- status = get_ethernet_addr(dev, info->ether);
+ status = usbnet_get_ethernet_addr(dev, info->ether->iMACAddress);
if (status < 0) {
usb_set_intfdata(info->data, NULL);
usb_driver_release_interface(driver_of(intf), info->data);
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 6fc4f82b0be..7ae82446b93 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -497,10 +497,10 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
int len;
/* format:
- b0: rx status
- b1: packet length (incl crc) low
- b2: packet length (incl crc) high
- b3..n-4: packet data
+ b1: rx status
+ b2: packet length (incl crc) low
+ b3: packet length (incl crc) high
+ b4..n-4: packet data
bn-3..bn: ethernet crc
*/
@@ -533,8 +533,8 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
int len;
/* format:
- b0: packet length low
- b1: packet length high
+ b1: packet length low
+ b2: packet length high
b3..n: packet data
*/
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index f84b78d94c4..f8c6d7ea726 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -482,7 +482,7 @@ static ssize_t hso_sysfs_show_porttype(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct hso_device *hso_dev = dev->driver_data;
+ struct hso_device *hso_dev = dev_get_drvdata(dev);
char *port_name;
if (!hso_dev)
@@ -816,7 +816,7 @@ static int hso_net_start_xmit(struct sk_buff *skb, struct net_device *net)
}
dev_kfree_skb(skb);
/* we're done */
- return result;
+ return NETDEV_TX_OK;
}
static void hso_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
@@ -899,15 +899,14 @@ static void packetizeRx(struct hso_net *odev, unsigned char *ip_pkt,
continue;
}
/* Allocate an sk_buff */
- odev->skb_rx_buf = dev_alloc_skb(frame_len);
+ odev->skb_rx_buf = netdev_alloc_skb(odev->net,
+ frame_len);
if (!odev->skb_rx_buf) {
/* We got no receive buffer. */
D1("could not allocate memory");
odev->rx_parse_state = WAIT_SYNC;
return;
}
- /* Here's where it came from */
- odev->skb_rx_buf->dev = odev->net;
/* Copy what we got so far. make room for iphdr
* after tail. */
@@ -2313,7 +2312,7 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
serial->parent->dev = tty_register_device(tty_drv, minor,
&serial->parent->interface->dev);
dev = serial->parent->dev;
- dev->driver_data = serial->parent;
+ dev_set_drvdata(dev, serial->parent);
i = device_create_file(dev, &dev_attr_hsotype);
/* fill in specific data for later use */
@@ -2481,10 +2480,10 @@ static int add_net_device(struct hso_device *hso_dev)
return 0;
}
-static int hso_radio_toggle(void *data, enum rfkill_state state)
+static int hso_rfkill_set_block(void *data, bool blocked)
{
struct hso_device *hso_dev = data;
- int enabled = (state == RFKILL_STATE_ON);
+ int enabled = !blocked;
int rv;
mutex_lock(&hso_dev->mutex);
@@ -2498,6 +2497,10 @@ static int hso_radio_toggle(void *data, enum rfkill_state state)
return rv;
}
+static const struct rfkill_ops hso_rfkill_ops = {
+ .set_block = hso_rfkill_set_block,
+};
+
/* Creates and sets up everything for rfkill */
static void hso_create_rfkill(struct hso_device *hso_dev,
struct usb_interface *interface)
@@ -2506,29 +2509,25 @@ static void hso_create_rfkill(struct hso_device *hso_dev,
struct device *dev = &hso_net->net->dev;
char *rfkn;
- hso_net->rfkill = rfkill_allocate(&interface_to_usbdev(interface)->dev,
- RFKILL_TYPE_WWAN);
- if (!hso_net->rfkill) {
- dev_err(dev, "%s - Out of memory\n", __func__);
- return;
- }
rfkn = kzalloc(20, GFP_KERNEL);
- if (!rfkn) {
- rfkill_free(hso_net->rfkill);
- hso_net->rfkill = NULL;
+ if (!rfkn)
dev_err(dev, "%s - Out of memory\n", __func__);
- return;
- }
+
snprintf(rfkn, 20, "hso-%d",
interface->altsetting->desc.bInterfaceNumber);
- hso_net->rfkill->name = rfkn;
- hso_net->rfkill->state = RFKILL_STATE_ON;
- hso_net->rfkill->data = hso_dev;
- hso_net->rfkill->toggle_radio = hso_radio_toggle;
+
+ hso_net->rfkill = rfkill_alloc(rfkn,
+ &interface_to_usbdev(interface)->dev,
+ RFKILL_TYPE_WWAN,
+ &hso_rfkill_ops, hso_dev);
+ if (!hso_net->rfkill) {
+ dev_err(dev, "%s - Out of memory\n", __func__);
+ kfree(rfkn);
+ return;
+ }
if (rfkill_register(hso_net->rfkill) < 0) {
+ rfkill_destroy(hso_net->rfkill);
kfree(rfkn);
- hso_net->rfkill->name = NULL;
- rfkill_free(hso_net->rfkill);
hso_net->rfkill = NULL;
dev_err(dev, "%s - Failed to register rfkill\n", __func__);
return;
@@ -3165,8 +3164,10 @@ static void hso_free_interface(struct usb_interface *interface)
hso_stop_net_device(network_table[i]);
cancel_work_sync(&network_table[i]->async_put_intf);
cancel_work_sync(&network_table[i]->async_get_intf);
- if (rfk)
+ if (rfk) {
rfkill_unregister(rfk);
+ rfkill_destroy(rfk);
+ }
hso_free_net_device(network_table[i]);
}
}
diff --git a/drivers/net/usb/int51x1.c b/drivers/net/usb/int51x1.c
new file mode 100644
index 00000000000..55cf7081de1
--- /dev/null
+++ b/drivers/net/usb/int51x1.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2009 Peter Holik
+ *
+ * Intellon usb PLC (Powerline Communications) usb net driver
+ *
+ * http://www.tandel.be/downloads/INT51X1_Datasheet.pdf
+ *
+ * Based on the work of Jan 'RedBully' Seiffert
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or.
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/usb/usbnet.h>
+
+#define INT51X1_VENDOR_ID 0x09e1
+#define INT51X1_PRODUCT_ID 0x5121
+
+#define INT51X1_HEADER_SIZE 2 /* 2 byte header */
+
+#define PACKET_TYPE_PROMISCUOUS (1 << 0)
+#define PACKET_TYPE_ALL_MULTICAST (1 << 1) /* no filter */
+#define PACKET_TYPE_DIRECTED (1 << 2)
+#define PACKET_TYPE_BROADCAST (1 << 3)
+#define PACKET_TYPE_MULTICAST (1 << 4) /* filtered */
+
+#define SET_ETHERNET_PACKET_FILTER 0x43
+
+static int int51x1_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+ int len;
+
+ if (!(pskb_may_pull(skb, INT51X1_HEADER_SIZE))) {
+ deverr(dev, "unexpected tiny rx frame");
+ return 0;
+ }
+
+ len = le16_to_cpu(*(__le16 *)&skb->data[skb->len - 2]);
+
+ skb_trim(skb, len);
+
+ return 1;
+}
+
+static struct sk_buff *int51x1_tx_fixup(struct usbnet *dev,
+ struct sk_buff *skb, gfp_t flags)
+{
+ int pack_len = skb->len;
+ int pack_with_header_len = pack_len + INT51X1_HEADER_SIZE;
+ int headroom = skb_headroom(skb);
+ int tailroom = skb_tailroom(skb);
+ int need_tail = 0;
+ __le16 *len;
+
+ /* if packet and our header is smaler than 64 pad to 64 (+ ZLP) */
+ if ((pack_with_header_len) < dev->maxpacket)
+ need_tail = dev->maxpacket - pack_with_header_len + 1;
+ /*
+ * usbnet would send a ZLP if packetlength mod urbsize == 0 for us,
+ * but we need to know ourself, because this would add to the length
+ * we send down to the device...
+ */
+ else if (!(pack_with_header_len % dev->maxpacket))
+ need_tail = 1;
+
+ if (!skb_cloned(skb) &&
+ (headroom + tailroom >= need_tail + INT51X1_HEADER_SIZE)) {
+ if (headroom < INT51X1_HEADER_SIZE || tailroom < need_tail) {
+ skb->data = memmove(skb->head + INT51X1_HEADER_SIZE,
+ skb->data, skb->len);
+ skb_set_tail_pointer(skb, skb->len);
+ }
+ } else {
+ struct sk_buff *skb2;
+
+ skb2 = skb_copy_expand(skb,
+ INT51X1_HEADER_SIZE,
+ need_tail,
+ flags);
+ dev_kfree_skb_any(skb);
+ if (!skb2)
+ return NULL;
+ skb = skb2;
+ }
+
+ pack_len += need_tail;
+ pack_len &= 0x07ff;
+
+ len = (__le16 *) __skb_push(skb, INT51X1_HEADER_SIZE);
+ *len = cpu_to_le16(pack_len);
+
+ if(need_tail)
+ memset(__skb_put(skb, need_tail), 0, need_tail);
+
+ return skb;
+}
+
+static void int51x1_async_cmd_callback(struct urb *urb)
+{
+ struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
+ int status = urb->status;
+
+ if (status < 0)
+ dev_warn(&urb->dev->dev, "async callback failed with %d\n", status);
+
+ kfree(req);
+ usb_free_urb(urb);
+}
+
+static void int51x1_set_multicast(struct net_device *netdev)
+{
+ struct usb_ctrlrequest *req;
+ int status;
+ struct urb *urb;
+ struct usbnet *dev = netdev_priv(netdev);
+ u16 filter = PACKET_TYPE_DIRECTED | PACKET_TYPE_BROADCAST;
+
+ if (netdev->flags & IFF_PROMISC) {
+ /* do not expect to see traffic of other PLCs */
+ filter |= PACKET_TYPE_PROMISCUOUS;
+ devinfo(dev, "promiscuous mode enabled");
+ } else if (netdev->mc_count ||
+ (netdev->flags & IFF_ALLMULTI)) {
+ filter |= PACKET_TYPE_ALL_MULTICAST;
+ devdbg(dev, "receive all multicast enabled");
+ } else {
+ /* ~PROMISCUOUS, ~MULTICAST */
+ devdbg(dev, "receive own packets only");
+ }
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ devwarn(dev, "Error allocating URB");
+ return;
+ }
+
+ req = kmalloc(sizeof(*req), GFP_ATOMIC);
+ if (!req) {
+ devwarn(dev, "Error allocating control msg");
+ goto out;
+ }
+
+ req->bRequestType = USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
+ req->bRequest = SET_ETHERNET_PACKET_FILTER;
+ req->wValue = cpu_to_le16(filter);
+ req->wIndex = 0;
+ req->wLength = 0;
+
+ usb_fill_control_urb(urb, dev->udev, usb_sndctrlpipe(dev->udev, 0),
+ (void *)req, NULL, 0,
+ int51x1_async_cmd_callback,
+ (void *)req);
+
+ status = usb_submit_urb(urb, GFP_ATOMIC);
+ if (status < 0) {
+ devwarn(dev, "Error submitting control msg, sts=%d", status);
+ goto out1;
+ }
+ return;
+out1:
+ kfree(req);
+out:
+ usb_free_urb(urb);
+}
+
+static const struct net_device_ops int51x1_netdev_ops = {
+ .ndo_open = usbnet_open,
+ .ndo_stop = usbnet_stop,
+ .ndo_start_xmit = usbnet_start_xmit,
+ .ndo_tx_timeout = usbnet_tx_timeout,
+ .ndo_change_mtu = usbnet_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_multicast_list = int51x1_set_multicast,
+};
+
+static int int51x1_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ int status = usbnet_get_ethernet_addr(dev, 3);
+
+ if (status)
+ return status;
+
+ dev->net->hard_header_len += INT51X1_HEADER_SIZE;
+ dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+ dev->net->netdev_ops = &int51x1_netdev_ops;
+
+ return usbnet_get_endpoints(dev, intf);
+}
+
+static const struct driver_info int51x1_info = {
+ .description = "Intellon usb powerline adapter",
+ .bind = int51x1_bind,
+ .rx_fixup = int51x1_rx_fixup,
+ .tx_fixup = int51x1_tx_fixup,
+ .in = 1,
+ .out = 2,
+ .flags = FLAG_ETHER,
+};
+
+static const struct usb_device_id products[] = {
+ {
+ USB_DEVICE(INT51X1_VENDOR_ID, INT51X1_PRODUCT_ID),
+ .driver_info = (unsigned long) &int51x1_info,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(usb, products);
+
+static struct usb_driver int51x1_driver = {
+ .name = "int51x1",
+ .id_table = products,
+ .probe = usbnet_probe,
+ .disconnect = usbnet_disconnect,
+ .suspend = usbnet_suspend,
+ .resume = usbnet_resume,
+};
+
+static int __init int51x1_init(void)
+{
+ return usb_register(&int51x1_driver);
+}
+module_init(int51x1_init);
+
+static void __exit int51x1_exit(void)
+{
+ usb_deregister(&int51x1_driver);
+}
+module_exit(int51x1_exit);
+
+MODULE_AUTHOR("Peter Holik");
+MODULE_DESCRIPTION("Intellon usb powerline adapter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 3d0d0b0b37c..e0131478971 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -31,7 +31,6 @@
****************************************************************/
/* TODO:
- * Fix in_interrupt() problem
* Develop test procedures for USB net interfaces
* Run test procedures
* Fix bugs from previous two steps
@@ -606,14 +605,30 @@ static void kaweth_usb_receive(struct urb *urb)
struct sk_buff *skb;
- if(unlikely(status == -ECONNRESET || status == -ESHUTDOWN))
- /* we are killed - set a flag and wake the disconnect handler */
- {
+ if (unlikely(status == -EPIPE)) {
+ kaweth->stats.rx_errors++;
kaweth->end = 1;
wake_up(&kaweth->term_wait);
+ dbg("Status was -EPIPE.");
return;
}
-
+ if (unlikely(status == -ECONNRESET || status == -ESHUTDOWN)) {
+ /* we are killed - set a flag and wake the disconnect handler */
+ kaweth->end = 1;
+ wake_up(&kaweth->term_wait);
+ dbg("Status was -ECONNRESET or -ESHUTDOWN.");
+ return;
+ }
+ if (unlikely(status == -EPROTO || status == -ETIME ||
+ status == -EILSEQ)) {
+ kaweth->stats.rx_errors++;
+ dbg("Status was -EPROTO, -ETIME, or -EILSEQ.");
+ return;
+ }
+ if (unlikely(status == -EOVERFLOW)) {
+ kaweth->stats.rx_errors++;
+ dbg("Status was -EOVERFLOW.");
+ }
spin_lock(&kaweth->device_lock);
if (IS_BLOCKED(kaweth->status)) {
spin_unlock(&kaweth->device_lock);
@@ -883,13 +898,16 @@ static void kaweth_set_rx_mode(struct net_device *net)
****************************************************************/
static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth)
{
+ int result;
__u16 packet_filter_bitmap = kaweth->packet_filter_bitmap;
+
kaweth->packet_filter_bitmap = 0;
if (packet_filter_bitmap == 0)
return;
- {
- int result;
+ if (in_interrupt())
+ return;
+
result = kaweth_control(kaweth,
usb_sndctrlpipe(kaweth->dev, 0),
KAWETH_COMMAND_SET_PACKET_FILTER,
@@ -906,7 +924,6 @@ static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth)
else {
dbg("Set Rx mode to %d", packet_filter_bitmap);
}
- }
}
/****************************************************************
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index f9fb454ffa8..fcc6fa0905d 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -221,7 +221,8 @@ static void ctrl_callback(struct urb *urb)
case -ENOENT:
break;
default:
- dev_warn(&urb->dev->dev, "ctrl urb status %d\n", status);
+ if (printk_ratelimit())
+ dev_warn(&urb->dev->dev, "ctrl urb status %d\n", status);
}
dev = urb->context;
clear_bit(RX_REG_SET, &dev->flags);
@@ -442,10 +443,12 @@ static void read_bulk_callback(struct urb *urb)
case -ENOENT:
return; /* the urb is in unlink state */
case -ETIME:
- dev_warn(&urb->dev->dev, "may be reset is needed?..\n");
+ if (printk_ratelimit())
+ dev_warn(&urb->dev->dev, "may be reset is needed?..\n");
goto goon;
default:
- dev_warn(&urb->dev->dev, "Rx status %d\n", status);
+ if (printk_ratelimit())
+ dev_warn(&urb->dev->dev, "Rx status %d\n", status);
goto goon;
}
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 5a7283372b5..89a91f8c22d 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1134,7 +1134,7 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if (skb->len == size) {
if (pdata->use_rx_csum)
smsc95xx_rx_csum_offload(skb);
-
+ skb_trim(skb, skb->len - 4); /* remove fcs */
skb->truesize = size + sizeof(struct sk_buff);
return 1;
@@ -1152,7 +1152,7 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if (pdata->use_rx_csum)
smsc95xx_rx_csum_offload(ax_skb);
-
+ skb_trim(ax_skb, ax_skb->len - 4); /* remove fcs */
ax_skb->truesize = size + sizeof(struct sk_buff);
usbnet_skb_return(dev, ax_skb);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index f3a2fce6166..22c0585a031 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -37,6 +37,7 @@
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/ctype.h>
#include <linux/ethtool.h>
#include <linux/workqueue.h>
#include <linux/mii.h>
@@ -156,6 +157,36 @@ int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf)
}
EXPORT_SYMBOL_GPL(usbnet_get_endpoints);
+static u8 nibble(unsigned char c)
+{
+ if (likely(isdigit(c)))
+ return c - '0';
+ c = toupper(c);
+ if (likely(isxdigit(c)))
+ return 10 + c - 'A';
+ return 0;
+}
+
+int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
+{
+ int tmp, i;
+ unsigned char buf [13];
+
+ tmp = usb_string(dev->udev, iMACAddress, buf, sizeof buf);
+ if (tmp != 12) {
+ dev_dbg(&dev->udev->dev,
+ "bad MAC string %d fetch, %d\n", iMACAddress, tmp);
+ if (tmp >= 0)
+ tmp = -EINVAL;
+ return tmp;
+ }
+ for (i = tmp = 0; i < 6; i++, tmp += 2)
+ dev->net->dev_addr [i] =
+ (nibble(buf [tmp]) << 4) + nibble(buf [tmp + 1]);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr);
+
static void intr_complete (struct urb *urb);
static int init_status (struct usbnet *dev, struct usb_interface *intf)
@@ -398,7 +429,7 @@ static void rx_complete (struct urb *urb)
/* stalls need manual reset. this is rare ... except that
* when going through USB 2.0 TTs, unplug appears this way.
- * we avoid the highspeed version of the ETIMEOUT/EILSEQ
+ * we avoid the highspeed version of the ETIMEDOUT/EILSEQ
* storm, recovering as needed.
*/
case -EPIPE:
@@ -1185,12 +1216,6 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
#endif
net->netdev_ops = &usbnet_netdev_ops;
-#ifdef CONFIG_COMPAT_NET_DEV_OPS
- net->hard_start_xmit = usbnet_start_xmit;
- net->open = usbnet_open;
- net->stop = usbnet_stop;
- net->tx_timeout = usbnet_tx_timeout;
-#endif
net->watchdog_timeo = TX_TIMEOUT_JIFFIES;
net->ethtool_ops = &usbnet_ethtool_ops;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 8e56fcf0a0e..87197dd9c78 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -176,8 +176,6 @@ static int veth_xmit(struct sk_buff *skb, struct net_device *dev)
if (dev->features & NETIF_F_NO_CSUM)
skb->ip_summed = rcv_priv->ip_summed;
- dst_release(skb->dst);
- skb->dst = NULL;
skb->mark = 0;
secpath_reset(skb);
nf_reset(skb);
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 45daba726b6..d3489a3c4c0 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -388,7 +388,6 @@ struct rhine_private {
long pioaddr;
struct net_device *dev;
struct napi_struct napi;
- struct net_device_stats stats;
spinlock_t lock;
/* Frequently used values: keep some adjacent for cache effect. */
@@ -1209,7 +1208,7 @@ static void rhine_tx_timeout(struct net_device *dev)
enable_irq(rp->pdev->irq);
dev->trans_start = jiffies;
- rp->stats.tx_errors++;
+ dev->stats.tx_errors++;
netif_wake_queue(dev);
}
@@ -1237,7 +1236,7 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
/* packet too long, drop it */
dev_kfree_skb(skb);
rp->tx_skbuff[entry] = NULL;
- rp->stats.tx_dropped++;
+ dev->stats.tx_dropped++;
return 0;
}
@@ -1378,29 +1377,33 @@ static void rhine_tx(struct net_device *dev)
printk(KERN_DEBUG "%s: Transmit error, "
"Tx status %8.8x.\n",
dev->name, txstatus);
- rp->stats.tx_errors++;
- if (txstatus & 0x0400) rp->stats.tx_carrier_errors++;
- if (txstatus & 0x0200) rp->stats.tx_window_errors++;
- if (txstatus & 0x0100) rp->stats.tx_aborted_errors++;
- if (txstatus & 0x0080) rp->stats.tx_heartbeat_errors++;
+ dev->stats.tx_errors++;
+ if (txstatus & 0x0400)
+ dev->stats.tx_carrier_errors++;
+ if (txstatus & 0x0200)
+ dev->stats.tx_window_errors++;
+ if (txstatus & 0x0100)
+ dev->stats.tx_aborted_errors++;
+ if (txstatus & 0x0080)
+ dev->stats.tx_heartbeat_errors++;
if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
(txstatus & 0x0800) || (txstatus & 0x1000)) {
- rp->stats.tx_fifo_errors++;
+ dev->stats.tx_fifo_errors++;
rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
break; /* Keep the skb - we try again */
}
/* Transmitter restarted in 'abnormal' handler. */
} else {
if (rp->quirks & rqRhineI)
- rp->stats.collisions += (txstatus >> 3) & 0x0F;
+ dev->stats.collisions += (txstatus >> 3) & 0x0F;
else
- rp->stats.collisions += txstatus & 0x0F;
+ dev->stats.collisions += txstatus & 0x0F;
if (debug > 6)
printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n",
(txstatus >> 3) & 0xF,
txstatus & 0xF);
- rp->stats.tx_bytes += rp->tx_skbuff[entry]->len;
- rp->stats.tx_packets++;
+ dev->stats.tx_bytes += rp->tx_skbuff[entry]->len;
+ dev->stats.tx_packets++;
}
/* Free the original skb. */
if (rp->tx_skbuff_dma[entry]) {
@@ -1455,21 +1458,24 @@ static int rhine_rx(struct net_device *dev, int limit)
printk(KERN_WARNING "%s: Oversized Ethernet "
"frame %p vs %p.\n", dev->name,
rp->rx_head_desc, &rp->rx_ring[entry]);
- rp->stats.rx_length_errors++;
+ dev->stats.rx_length_errors++;
} else if (desc_status & RxErr) {
/* There was a error. */
if (debug > 2)
printk(KERN_DEBUG "rhine_rx() Rx "
"error was %8.8x.\n",
desc_status);
- rp->stats.rx_errors++;
- if (desc_status & 0x0030) rp->stats.rx_length_errors++;
- if (desc_status & 0x0048) rp->stats.rx_fifo_errors++;
- if (desc_status & 0x0004) rp->stats.rx_frame_errors++;
+ dev->stats.rx_errors++;
+ if (desc_status & 0x0030)
+ dev->stats.rx_length_errors++;
+ if (desc_status & 0x0048)
+ dev->stats.rx_fifo_errors++;
+ if (desc_status & 0x0004)
+ dev->stats.rx_frame_errors++;
if (desc_status & 0x0002) {
/* this can also be updated outside the interrupt handler */
spin_lock(&rp->lock);
- rp->stats.rx_crc_errors++;
+ dev->stats.rx_crc_errors++;
spin_unlock(&rp->lock);
}
}
@@ -1513,8 +1519,8 @@ static int rhine_rx(struct net_device *dev, int limit)
}
skb->protocol = eth_type_trans(skb, dev);
netif_receive_skb(skb);
- rp->stats.rx_bytes += pkt_len;
- rp->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ dev->stats.rx_packets++;
}
entry = (++rp->cur_rx) % RX_RING_SIZE;
rp->rx_head_desc = &rp->rx_ring[entry];
@@ -1599,8 +1605,8 @@ static void rhine_error(struct net_device *dev, int intr_status)
if (intr_status & IntrLinkChange)
rhine_check_media(dev, 0);
if (intr_status & IntrStatsMax) {
- rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
- rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
+ dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
+ dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
clear_tally_counters(ioaddr);
}
if (intr_status & IntrTxAborted) {
@@ -1654,12 +1660,12 @@ static struct net_device_stats *rhine_get_stats(struct net_device *dev)
unsigned long flags;
spin_lock_irqsave(&rp->lock, flags);
- rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
- rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
+ dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
+ dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
clear_tally_counters(ioaddr);
spin_unlock_irqrestore(&rp->lock, flags);
- return &rp->stats;
+ return &dev->stats;
}
static void rhine_set_rx_mode(struct net_device *dev)
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 754a4b182c1..e2a7725e567 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1385,7 +1385,7 @@ static void velocity_free_td_ring(struct velocity_info *vptr)
static int velocity_rx_srv(struct velocity_info *vptr, int status)
{
- struct net_device_stats *stats = &vptr->stats;
+ struct net_device_stats *stats = &vptr->dev->stats;
int rd_curr = vptr->rx.curr;
int works = 0;
@@ -1519,7 +1519,7 @@ static inline void velocity_iph_realign(struct velocity_info *vptr,
static int velocity_receive_frame(struct velocity_info *vptr, int idx)
{
void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
- struct net_device_stats *stats = &vptr->stats;
+ struct net_device_stats *stats = &vptr->dev->stats;
struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
struct rx_desc *rd = &(vptr->rx.ring[idx]);
int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
@@ -1532,7 +1532,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
}
if (rd->rdesc0.RSR & RSR_MAR)
- vptr->stats.multicast++;
+ stats->multicast++;
skb = rd_info->skb;
@@ -1634,7 +1634,7 @@ static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
int idx;
int works = 0;
struct velocity_td_info *tdinfo;
- struct net_device_stats *stats = &vptr->stats;
+ struct net_device_stats *stats = &vptr->dev->stats;
for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
@@ -2324,22 +2324,22 @@ static struct net_device_stats *velocity_get_stats(struct net_device *dev)
/* If the hardware is down, don't touch MII */
if(!netif_running(dev))
- return &vptr->stats;
+ return &dev->stats;
spin_lock_irq(&vptr->lock);
velocity_update_hw_mibs(vptr);
spin_unlock_irq(&vptr->lock);
- vptr->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
- vptr->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
- vptr->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
+ dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
+ dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
+ dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
// unsigned long rx_dropped; /* no space in linux buffers */
- vptr->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
+ dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
/* detailed rx_errors: */
// unsigned long rx_length_errors;
// unsigned long rx_over_errors; /* receiver ring buff overflow */
- vptr->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
+ dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
// unsigned long rx_frame_errors; /* recv'd frame alignment error */
// unsigned long rx_fifo_errors; /* recv'r fifo overrun */
// unsigned long rx_missed_errors; /* receiver missed packet */
@@ -2347,7 +2347,7 @@ static struct net_device_stats *velocity_get_stats(struct net_device *dev)
/* detailed tx_errors */
// unsigned long tx_fifo_errors;
- return &vptr->stats;
+ return &dev->stats;
}
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index ea43e1832af..4cd3f6c9737 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -1503,7 +1503,6 @@ struct velocity_info {
struct pci_dev *pdev;
struct net_device *dev;
- struct net_device_stats stats;
struct vlan_group *vlgrp;
u8 ip_addr[4];
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4d1d47953fc..52198f6797a 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -283,10 +283,11 @@ static void try_fill_recv_maxbufs(struct virtnet_info *vi)
for (;;) {
struct virtio_net_hdr *hdr;
- skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN);
+ skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN);
if (unlikely(!skb))
break;
+ skb_reserve(skb, NET_IP_ALIGN);
skb_put(skb, MAX_PACKET_LEN);
hdr = skb_vnet_hdr(skb);
@@ -470,7 +471,7 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
}
if (skb_is_gso(skb)) {
- hdr->hdr_len = skb_transport_header(skb) - skb->data;
+ hdr->hdr_len = skb_headlen(skb);
hdr->gso_size = skb_shinfo(skb)->gso_size;
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
@@ -622,12 +623,9 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
unsigned int tmp;
int i;
- if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
- BUG(); /* Caller should know better */
- return false;
- }
-
- BUG_ON(out + in > VIRTNET_SEND_COMMAND_SG_MAX);
+ /* Caller should know better */
+ BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ||
+ (out + in > VIRTNET_SEND_COMMAND_SG_MAX));
out++; /* Add header */
in++; /* Add return status */
@@ -642,8 +640,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
- if (vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) != 0)
- BUG();
+ BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi));
vi->cvq->vq_ops->kick(vi->cvq);
@@ -684,6 +681,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
u8 promisc, allmulti;
struct virtio_net_ctrl_mac *mac_data;
struct dev_addr_list *addr;
+ struct netdev_hw_addr *ha;
void *buf;
int i;
@@ -722,9 +720,9 @@ static void virtnet_set_rx_mode(struct net_device *dev)
/* Store the unicast list and count in the front of the buffer */
mac_data->entries = dev->uc_count;
- addr = dev->uc_list;
- for (i = 0; i < dev->uc_count; i++, addr = addr->next)
- memcpy(&mac_data->macs[i][0], addr->da_addr, ETH_ALEN);
+ i = 0;
+ list_for_each_entry(ha, &dev->uc_list, list)
+ memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
sg_set_buf(&sg[0], mac_data,
sizeof(mac_data->entries) + (dev->uc_count * ETH_ALEN));
@@ -845,6 +843,10 @@ static int virtnet_probe(struct virtio_device *vdev)
int err;
struct net_device *dev;
struct virtnet_info *vi;
+ struct virtqueue *vqs[3];
+ vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL};
+ const char *names[] = { "input", "output", "control" };
+ int nvqs;
/* Allocate ourselves a network device with room for our info */
dev = alloc_etherdev(sizeof(struct virtnet_info));
@@ -905,25 +907,19 @@ static int virtnet_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
vi->mergeable_rx_bufs = true;
- /* We expect two virtqueues, receive then send. */
- vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done);
- if (IS_ERR(vi->rvq)) {
- err = PTR_ERR(vi->rvq);
+ /* We expect two virtqueues, receive then send,
+ * and optionally control. */
+ nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
+
+ err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
+ if (err)
goto free;
- }
- vi->svq = vdev->config->find_vq(vdev, 1, skb_xmit_done);
- if (IS_ERR(vi->svq)) {
- err = PTR_ERR(vi->svq);
- goto free_recv;
- }
+ vi->rvq = vqs[0];
+ vi->svq = vqs[1];
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
- vi->cvq = vdev->config->find_vq(vdev, 2, NULL);
- if (IS_ERR(vi->cvq)) {
- err = PTR_ERR(vi->svq);
- goto free_send;
- }
+ vi->cvq = vqs[2];
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
dev->features |= NETIF_F_HW_VLAN_FILTER;
@@ -941,7 +937,7 @@ static int virtnet_probe(struct virtio_device *vdev)
err = register_netdev(dev);
if (err) {
pr_debug("virtio_net: registering device failed\n");
- goto free_ctrl;
+ goto free_vqs;
}
/* Last of all, set up some receive buffers. */
@@ -962,13 +958,8 @@ static int virtnet_probe(struct virtio_device *vdev)
unregister:
unregister_netdev(dev);
-free_ctrl:
- if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ))
- vdev->config->del_vq(vi->cvq);
-free_send:
- vdev->config->del_vq(vi->svq);
-free_recv:
- vdev->config->del_vq(vi->rvq);
+free_vqs:
+ vdev->config->del_vqs(vdev);
free:
free_netdev(dev);
return err;
@@ -994,12 +985,10 @@ static void virtnet_remove(struct virtio_device *vdev)
BUG_ON(vi->num != 0);
- vdev->config->del_vq(vi->svq);
- vdev->config->del_vq(vi->rvq);
- if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ))
- vdev->config->del_vq(vi->cvq);
unregister_netdev(vi->dev);
+ vdev->config->del_vqs(vi->vdev);
+
while (vi->pages)
__free_pages(get_a_page(vi, GFP_KERNEL), 0);
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 6b41c884a33..26cde573af4 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -1884,17 +1884,13 @@ void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
mempool->memblock_size, dma_object);
}
- if (mempool->items_arr)
- vfree(mempool->items_arr);
+ vfree(mempool->items_arr);
- if (mempool->memblocks_dma_arr)
- vfree(mempool->memblocks_dma_arr);
+ vfree(mempool->memblocks_dma_arr);
- if (mempool->memblocks_priv_arr)
- vfree(mempool->memblocks_priv_arr);
+ vfree(mempool->memblocks_priv_arr);
- if (mempool->memblocks_arr)
- vfree(mempool->memblocks_arr);
+ vfree(mempool->memblocks_arr);
vfree(mempool);
}
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index b7f08f3e524..6c838b3e063 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -677,7 +677,7 @@ vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
return VXGE_HW_OK;
}
-/* select a vpath to trasmit the packet */
+/* select a vpath to transmit the packet */
static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb,
int *do_lock)
{
@@ -992,7 +992,9 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN);
vxge_hw_fifo_txdl_post(fifo_hw, dtr);
- dev->trans_start = jiffies;
+#ifdef NETIF_F_LLTX
+ dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
+#endif
spin_unlock_irqrestore(&fifo->tx_lock, flags);
VXGE_COMPLETE_VPATH_TX(fifo);
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index 7be0ae10d69..370f55cbbad 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -115,7 +115,7 @@ enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR), 0, 32),
+ VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
&vp_reg->kdfcctl_errors_mask);
__vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
@@ -1923,7 +1923,7 @@ enum vxge_hw_status __vxge_hw_vpath_alarm_process(
if (vpath == NULL) {
alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
alarm_event);
- goto out;
+ goto out2;
}
hldev = vpath->hldev;
@@ -2161,7 +2161,7 @@ enum vxge_hw_status __vxge_hw_vpath_alarm_process(
}
out:
hldev->stats.sw_dev_err_stats.vpath_alarms++;
-
+out2:
if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
(alarm_event == VXGE_HW_EVENT_UNKNOWN))
return VXGE_HW_OK;
diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
index 35dea3bea95..f525f9fe74d 100644
--- a/drivers/net/wan/cycx_x25.c
+++ b/drivers/net/wan/cycx_x25.c
@@ -615,7 +615,7 @@ static int cycx_netdevice_hard_start_xmit(struct sk_buff *skb,
case WAN_DISCONNECTED:
if (cycx_x25_chan_connect(dev)) {
netif_stop_queue(dev);
- return -EBUSY;
+ return NETDEV_TX_BUSY;
}
/* fall thru */
case WAN_CONNECTED:
@@ -624,7 +624,7 @@ static int cycx_netdevice_hard_start_xmit(struct sk_buff *skb,
netif_stop_queue(dev);
if (cycx_x25_chan_send(dev, skb))
- return -EBUSY;
+ return NETDEV_TX_BUSY;
break;
default:
@@ -656,7 +656,7 @@ static int cycx_netdevice_hard_start_xmit(struct sk_buff *skb,
if (cycx_x25_chan_send(dev, skb)) {
/* prepare for future retransmissions */
skb_push(skb, 1);
- return -EBUSY;
+ return NETDEV_TX_BUSY;
}
}
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index e8d155c3e59..2fa275a58f9 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -205,15 +205,15 @@ static int dlci_transmit(struct sk_buff *skb, struct net_device *dev)
{
case DLCI_RET_OK:
dev->stats.tx_packets++;
- ret = 0;
+ ret = NETDEV_TX_OK;
break;
case DLCI_RET_ERR:
dev->stats.tx_errors++;
- ret = 0;
+ ret = NETDEV_TX_OK;
break;
case DLCI_RET_DROP:
dev->stats.tx_dropped++;
- ret = 1;
+ ret = NETDEV_TX_BUSY;
break;
}
/* Alan Cox recommends always returning 0, and always freeing the packet */
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 80053010109..bfa0161a02d 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -1054,6 +1054,7 @@ static void pvc_setup(struct net_device *dev)
dev->flags = IFF_POINTOPOINT;
dev->hard_header_len = 10;
dev->addr_len = 2;
+ dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
}
static const struct net_device_ops pvc_ops = {
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 765a7f5d6aa..bb719b6114c 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -579,7 +579,8 @@ static inline void queue_put_desc(unsigned int queue, u32 phys,
debug_desc(phys, desc);
BUG_ON(phys & 0x1F);
qmgr_put_entry(queue, phys);
- BUG_ON(qmgr_stat_overflow(queue));
+ /* Don't check for queue overflow here, we've allocated sufficient
+ length and queues >= 32 don't support this check anyway. */
}
@@ -731,8 +732,8 @@ static int hss_hdlc_poll(struct napi_struct *napi, int budget)
dma_unmap_single(&dev->dev, desc->data,
RX_SIZE, DMA_FROM_DEVICE);
#else
- dma_sync_single(&dev->dev, desc->data,
- RX_SIZE, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(&dev->dev, desc->data,
+ RX_SIZE, DMA_FROM_DEVICE);
memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
ALIGN(desc->pkt_len, 4) / 4);
#endif
@@ -789,10 +790,10 @@ static void hss_hdlc_txdone_irq(void *pdev)
free_buffer_irq(port->tx_buff_tab[n_desc]);
port->tx_buff_tab[n_desc] = NULL;
- start = qmgr_stat_empty(port->plat->txreadyq);
+ start = qmgr_stat_below_low_watermark(port->plat->txreadyq);
queue_put_desc(port->plat->txreadyq,
tx_desc_phys(port, n_desc), desc);
- if (start) {
+ if (start) { /* TX-ready queue was empty */
#if DEBUG_TX
printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq xmit"
" ready\n", dev->name);
@@ -867,13 +868,13 @@ static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc);
dev->trans_start = jiffies;
- if (qmgr_stat_empty(txreadyq)) {
+ if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
#if DEBUG_TX
printk(KERN_DEBUG "%s: hss_hdlc_xmit queue full\n", dev->name);
#endif
netif_stop_queue(dev);
/* we could miss TX ready interrupt */
- if (!qmgr_stat_empty(txreadyq)) {
+ if (!qmgr_stat_below_low_watermark(txreadyq)) {
#if DEBUG_TX
printk(KERN_DEBUG "%s: hss_hdlc_xmit ready again\n",
dev->name);
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index c23fde0c034..79dabc557bd 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -225,6 +225,7 @@ static char rcsid[] =
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <linux/spinlock.h>
#include <linux/if.h>
#include <net/arp.h>
@@ -3246,6 +3247,16 @@ static inline void show_version(void)
rcsvers, rcsdate, __DATE__, __TIME__);
} /* show_version */
+static const struct net_device_ops cpc_netdev_ops = {
+ .ndo_open = cpc_open,
+ .ndo_stop = cpc_close,
+ .ndo_tx_timeout = cpc_tx_timeout,
+ .ndo_set_mac_address = NULL,
+ .ndo_change_mtu = cpc_change_mtu,
+ .ndo_do_ioctl = cpc_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
static void cpc_init_card(pc300_t * card)
{
int i, devcount = 0;
@@ -3357,18 +3368,11 @@ static void cpc_init_card(pc300_t * card)
dev->mem_start = card->hw.ramphys;
dev->mem_end = card->hw.ramphys + card->hw.ramsize - 1;
dev->irq = card->hw.irq;
- dev->init = NULL;
dev->tx_queue_len = PC300_TX_QUEUE_LEN;
dev->mtu = PC300_DEF_MTU;
- dev->open = cpc_open;
- dev->stop = cpc_close;
- dev->tx_timeout = cpc_tx_timeout;
+ dev->netdev_ops = &cpc_netdev_ops;
dev->watchdog_timeo = PC300_TX_TIMEOUT;
- dev->set_multicast_list = NULL;
- dev->set_mac_address = NULL;
- dev->change_mtu = cpc_change_mtu;
- dev->do_ioctl = cpc_ioctl;
if (register_hdlc_device(dev) == 0) {
printk("%s: Cyclades-PC300/", dev->name);
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index f4211fe0f44..3fb9dbc88a1 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -469,7 +469,7 @@ sbni_start_xmit( struct sk_buff *skb, struct net_device *dev )
}
}
- return 1;
+ return NETDEV_TX_BUSY;
}
#else /* CONFIG_SBNI_MULTILINE */
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index 8130b79a8a9..e4ad7b6b52e 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -283,7 +283,7 @@ static int wanxl_xmit(struct sk_buff *skb, struct net_device *dev)
#endif
netif_stop_queue(dev);
spin_unlock_irq(&port->lock);
- return 1; /* request packet to be queued */
+ return NETDEV_TX_BUSY; /* request packet to be queued */
}
#ifdef DEBUG_PKT
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index b3cadb626fe..07308686dbc 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -292,8 +292,6 @@ void i2400m_report_tlv_system_state(struct i2400m *i2400m,
d_fnstart(3, dev, "(i2400m %p ss %p [%u])\n", i2400m, ss, i2400m_state);
- if (unlikely(i2400m->ready == 0)) /* act if up */
- goto out;
if (i2400m->state != i2400m_state) {
i2400m->state = i2400m_state;
wake_up_all(&i2400m->state_wq);
@@ -341,7 +339,6 @@ void i2400m_report_tlv_system_state(struct i2400m *i2400m,
i2400m->bus_reset(i2400m, I2400M_RT_WARM);
break;
};
-out:
d_fnend(3, dev, "(i2400m %p ss %p [%u]) = void\n",
i2400m, ss, i2400m_state);
}
@@ -372,8 +369,6 @@ void i2400m_report_tlv_media_status(struct i2400m *i2400m,
d_fnstart(3, dev, "(i2400m %p ms %p [%u])\n", i2400m, ms, status);
- if (unlikely(i2400m->ready == 0)) /* act if up */
- goto out;
switch (status) {
case I2400M_MEDIA_STATUS_LINK_UP:
netif_carrier_on(net_dev);
@@ -393,14 +388,59 @@ void i2400m_report_tlv_media_status(struct i2400m *i2400m,
dev_err(dev, "HW BUG? unknown media status %u\n",
status);
};
-out:
d_fnend(3, dev, "(i2400m %p ms %p [%u]) = void\n",
i2400m, ms, status);
}
/*
- * Parse a 'state report' and extract carrier on/off information
+ * Process a TLV from a 'state report'
+ *
+ * @i2400m: device descriptor
+ * @tlv: pointer to the TLV header; it has been already validated for
+ * consistent size.
+ * @tag: for error messages
+ *
+ * Act on the TLVs from a 'state report'.
+ */
+static
+void i2400m_report_state_parse_tlv(struct i2400m *i2400m,
+ const struct i2400m_tlv_hdr *tlv,
+ const char *tag)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ const struct i2400m_tlv_media_status *ms;
+ const struct i2400m_tlv_system_state *ss;
+ const struct i2400m_tlv_rf_switches_status *rfss;
+
+ if (0 == i2400m_tlv_match(tlv, I2400M_TLV_SYSTEM_STATE, sizeof(*ss))) {
+ ss = container_of(tlv, typeof(*ss), hdr);
+ d_printf(2, dev, "%s: system state TLV "
+ "found (0x%04x), state 0x%08x\n",
+ tag, I2400M_TLV_SYSTEM_STATE,
+ le32_to_cpu(ss->state));
+ i2400m_report_tlv_system_state(i2400m, ss);
+ }
+ if (0 == i2400m_tlv_match(tlv, I2400M_TLV_RF_STATUS, sizeof(*rfss))) {
+ rfss = container_of(tlv, typeof(*rfss), hdr);
+ d_printf(2, dev, "%s: RF status TLV "
+ "found (0x%04x), sw 0x%02x hw 0x%02x\n",
+ tag, I2400M_TLV_RF_STATUS,
+ le32_to_cpu(rfss->sw_rf_switch),
+ le32_to_cpu(rfss->hw_rf_switch));
+ i2400m_report_tlv_rf_switches_status(i2400m, rfss);
+ }
+ if (0 == i2400m_tlv_match(tlv, I2400M_TLV_MEDIA_STATUS, sizeof(*ms))) {
+ ms = container_of(tlv, typeof(*ms), hdr);
+ d_printf(2, dev, "%s: Media Status TLV: %u\n",
+ tag, le32_to_cpu(ms->media_status));
+ i2400m_report_tlv_media_status(i2400m, ms);
+ }
+}
+
+
+/*
+ * Parse a 'state report' and extract information
*
* @i2400m: device descriptor
* @l3l4_hdr: pointer to message; it has been already validated for
@@ -409,13 +449,7 @@ out:
* declaration is assumed to be congruent with @size (as in
* sizeof(*l3l4_hdr) + l3l4_hdr->length == size)
*
- * Extract from the report state the system state TLV and infer from
- * there if we have a carrier or not. Update our local state and tell
- * netdev.
- *
- * When setting the carrier, it's fine to set OFF twice (for example),
- * as netif_carrier_off() will not generate two OFF events (just on
- * the transitions).
+ * Walk over the TLVs in a report state and act on them.
*/
static
void i2400m_report_state_hook(struct i2400m *i2400m,
@@ -424,9 +458,6 @@ void i2400m_report_state_hook(struct i2400m *i2400m,
{
struct device *dev = i2400m_dev(i2400m);
const struct i2400m_tlv_hdr *tlv;
- const struct i2400m_tlv_system_state *ss;
- const struct i2400m_tlv_rf_switches_status *rfss;
- const struct i2400m_tlv_media_status *ms;
size_t tlv_size = le16_to_cpu(l3l4_hdr->length);
d_fnstart(4, dev, "(i2400m %p, l3l4_hdr %p, size %zu, %s)\n",
@@ -434,34 +465,8 @@ void i2400m_report_state_hook(struct i2400m *i2400m,
tlv = NULL;
while ((tlv = i2400m_tlv_buffer_walk(i2400m, &l3l4_hdr->pl,
- tlv_size, tlv))) {
- if (0 == i2400m_tlv_match(tlv, I2400M_TLV_SYSTEM_STATE,
- sizeof(*ss))) {
- ss = container_of(tlv, typeof(*ss), hdr);
- d_printf(2, dev, "%s: system state TLV "
- "found (0x%04x), state 0x%08x\n",
- tag, I2400M_TLV_SYSTEM_STATE,
- le32_to_cpu(ss->state));
- i2400m_report_tlv_system_state(i2400m, ss);
- }
- if (0 == i2400m_tlv_match(tlv, I2400M_TLV_RF_STATUS,
- sizeof(*rfss))) {
- rfss = container_of(tlv, typeof(*rfss), hdr);
- d_printf(2, dev, "%s: RF status TLV "
- "found (0x%04x), sw 0x%02x hw 0x%02x\n",
- tag, I2400M_TLV_RF_STATUS,
- le32_to_cpu(rfss->sw_rf_switch),
- le32_to_cpu(rfss->hw_rf_switch));
- i2400m_report_tlv_rf_switches_status(i2400m, rfss);
- }
- if (0 == i2400m_tlv_match(tlv, I2400M_TLV_MEDIA_STATUS,
- sizeof(*ms))) {
- ms = container_of(tlv, typeof(*ms), hdr);
- d_printf(2, dev, "%s: Media Status TLV: %u\n",
- tag, le32_to_cpu(ms->media_status));
- i2400m_report_tlv_media_status(i2400m, ms);
- }
- }
+ tlv_size, tlv)))
+ i2400m_report_state_parse_tlv(i2400m, tlv, tag);
d_fnend(4, dev, "(i2400m %p, l3l4_hdr %p, size %zu, %s) = void\n",
i2400m, l3l4_hdr, size, tag);
}
@@ -500,8 +505,15 @@ void i2400m_report_hook(struct i2400m *i2400m,
* it. */
case I2400M_MT_REPORT_POWERSAVE_READY: /* zzzzz */
if (l3l4_hdr->status == cpu_to_le16(I2400M_MS_DONE_OK)) {
- d_printf(1, dev, "ready for powersave, requesting\n");
- i2400m_cmd_enter_powersave(i2400m);
+ if (i2400m_power_save_disabled)
+ d_printf(1, dev, "ready for powersave, "
+ "not requesting (disabled by module "
+ "parameter)\n");
+ else {
+ d_printf(1, dev, "ready for powersave, "
+ "requesting\n");
+ i2400m_cmd_enter_powersave(i2400m);
+ }
}
break;
};
@@ -683,8 +695,9 @@ struct sk_buff *i2400m_msg_to_dev(struct i2400m *i2400m,
d_fnstart(3, dev, "(i2400m %p buf %p len %zu)\n",
i2400m, buf, buf_len);
+ rmb(); /* Make sure we see what i2400m_dev_reset_handle() */
if (i2400m->boot_mode)
- return ERR_PTR(-ENODEV);
+ return ERR_PTR(-EL3RST);
msg_l3l4_hdr = buf;
/* Check msg & payload consistency */
@@ -721,6 +734,8 @@ struct sk_buff *i2400m_msg_to_dev(struct i2400m *i2400m,
ack_timeout = HZ;
};
+ if (unlikely(i2400m->trace_msg_from_user))
+ wimax_msg(&i2400m->wimax_dev, "echo", buf, buf_len, GFP_KERNEL);
/* The RX path in rx.c will put any response for this message
* in i2400m->ack_skb and wake us up. If we cancel the wait,
* we need to change the value of i2400m->ack_skb to something
@@ -755,6 +770,9 @@ struct sk_buff *i2400m_msg_to_dev(struct i2400m *i2400m,
ack_l3l4_hdr = wimax_msg_data_len(ack_skb, &ack_len);
/* Check the ack and deliver it if it is ok */
+ if (unlikely(i2400m->trace_msg_from_user))
+ wimax_msg(&i2400m->wimax_dev, "echo",
+ ack_l3l4_hdr, ack_len, GFP_KERNEL);
result = i2400m_msg_size_check(i2400m, ack_l3l4_hdr, ack_len);
if (result < 0) {
dev_err(dev, "HW BUG? reply to message 0x%04x: %d\n",
@@ -1379,16 +1397,16 @@ error:
*
* @i2400m: device descriptor
*
- * Gracefully stops the device, moving it to the lowest power
- * consumption state possible.
+ * Release resources acquired during the running of the device; in
+ * theory, should also tell the device to go to sleep, switch off the
+ * radio, all that, but at this point, in most cases (driver
+ * disconnection, reset handling) we can't even talk to the device.
*/
void i2400m_dev_shutdown(struct i2400m *i2400m)
{
- int result = -ENODEV;
struct device *dev = i2400m_dev(i2400m);
d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
- result = i2400m->bus_reset(i2400m, I2400M_RT_WARM);
- d_fnend(3, dev, "(i2400m %p) = void [%d]\n", i2400m, result);
+ d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
return;
}
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 07a54bad237..304f0443ca4 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -62,6 +62,7 @@
* unregister_netdev()
*/
#include "i2400m.h"
+#include <linux/etherdevice.h>
#include <linux/wimax/i2400m.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -81,6 +82,14 @@ module_param_named(rx_reorder_disabled, i2400m_rx_reorder_disabled, int, 0644);
MODULE_PARM_DESC(rx_reorder_disabled,
"If true, RX reordering will be disabled.");
+int i2400m_power_save_disabled; /* 0 (power saving enabled) by default */
+module_param_named(power_save_disabled, i2400m_power_save_disabled, int, 0644);
+MODULE_PARM_DESC(power_save_disabled,
+ "If true, the driver will not tell the device to enter "
+ "power saving mode when it reports it is ready for it. "
+ "False by default (so the device is told to do power "
+ "saving).");
+
/**
* i2400m_queue_work - schedule work on a i2400m's queue
*
@@ -171,7 +180,6 @@ int i2400m_schedule_work(struct i2400m *i2400m,
int result;
struct i2400m_work *iw;
- BUG_ON(i2400m->work_queue == NULL);
result = -ENOMEM;
iw = kzalloc(sizeof(*iw), gfp_flags);
if (iw == NULL)
@@ -234,9 +242,6 @@ int i2400m_op_msg_from_user(struct wimax_dev *wimax_dev,
result = PTR_ERR(ack_skb);
if (IS_ERR(ack_skb))
goto error_msg_to_dev;
- if (unlikely(i2400m->trace_msg_from_user))
- wimax_msg(&i2400m->wimax_dev, "trace",
- msg_buf, msg_len, GFP_KERNEL);
result = wimax_msg_send(&i2400m->wimax_dev, ack_skb);
error_msg_to_dev:
d_fnend(4, dev, "(wimax_dev %p [i2400m %p] msg_buf %p msg_len %zu "
@@ -379,6 +384,11 @@ error:
* Uploads firmware and brings up all the resources needed to be able
* to communicate with the device.
*
+ * The workqueue has to be setup early, at least before RX handling
+ * (it's only real user for now) so it can process reports as they
+ * arrive. We also want to destroy it if we retry, to make sure it is
+ * flushed...easier like this.
+ *
* TX needs to be setup before the bus-specific code (otherwise on
* shutdown, the bus-tx code could try to access it).
*/
@@ -389,7 +399,7 @@ int __i2400m_dev_start(struct i2400m *i2400m, enum i2400m_bri flags)
struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
struct net_device *net_dev = wimax_dev->net_dev;
struct device *dev = i2400m_dev(i2400m);
- int times = 3;
+ int times = i2400m->bus_bm_retries;
d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
retry:
@@ -404,15 +414,15 @@ retry:
result = i2400m_rx_setup(i2400m);
if (result < 0)
goto error_rx_setup;
- result = i2400m->bus_dev_start(i2400m);
- if (result < 0)
- goto error_bus_dev_start;
i2400m->work_queue = create_singlethread_workqueue(wimax_dev->name);
if (i2400m->work_queue == NULL) {
result = -ENOMEM;
dev_err(dev, "cannot create workqueue\n");
goto error_create_workqueue;
}
+ result = i2400m->bus_dev_start(i2400m);
+ if (result < 0)
+ goto error_bus_dev_start;
result = i2400m_firmware_check(i2400m); /* fw versions ok? */
if (result < 0)
goto error_fw_check;
@@ -434,17 +444,17 @@ retry:
error_dev_initialize:
error_check_mac_addr:
error_fw_check:
- destroy_workqueue(i2400m->work_queue);
-error_create_workqueue:
i2400m->bus_dev_stop(i2400m);
error_bus_dev_start:
+ destroy_workqueue(i2400m->work_queue);
+error_create_workqueue:
i2400m_rx_release(i2400m);
error_rx_setup:
i2400m_tx_release(i2400m);
error_tx_setup:
error_bootstrap:
- if (result == -ERESTARTSYS && times-- > 0) {
- flags = I2400M_BRI_SOFT;
+ if (result == -EL3RST && times-- > 0) {
+ flags = I2400M_BRI_SOFT|I2400M_BRI_MAC_REINIT;
goto retry;
}
d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n",
@@ -473,7 +483,9 @@ int i2400m_dev_start(struct i2400m *i2400m, enum i2400m_bri bm_flags)
*
* Returns: 0 if ok, < 0 errno code on error.
*
- * Releases all the resources allocated to communicate with the device.
+ * Releases all the resources allocated to communicate with the
+ * device. Note we cannot destroy the workqueue earlier as until RX is
+ * fully destroyed, it could still try to schedule jobs.
*/
static
void __i2400m_dev_stop(struct i2400m *i2400m)
@@ -485,8 +497,8 @@ void __i2400m_dev_stop(struct i2400m *i2400m)
wimax_state_change(wimax_dev, __WIMAX_ST_QUIESCING);
i2400m_dev_shutdown(i2400m);
i2400m->ready = 0;
- destroy_workqueue(i2400m->work_queue);
i2400m->bus_dev_stop(i2400m);
+ destroy_workqueue(i2400m->work_queue);
i2400m_rx_release(i2400m);
i2400m_tx_release(i2400m);
wimax_state_change(wimax_dev, WIMAX_ST_DOWN);
@@ -548,7 +560,7 @@ void __i2400m_dev_reset_handle(struct work_struct *ws)
* i2400m_dev_stop() [we are shutting down anyway, so
* ignore it] or we are resetting somewhere else. */
dev_err(dev, "device rebooted\n");
- i2400m_msg_to_dev_cancel_wait(i2400m, -ERESTARTSYS);
+ i2400m_msg_to_dev_cancel_wait(i2400m, -EL3RST);
complete(&i2400m->msg_completion);
goto out;
}
@@ -598,6 +610,8 @@ out:
*/
int i2400m_dev_reset_handle(struct i2400m *i2400m)
{
+ i2400m->boot_mode = 1;
+ wmb(); /* Make sure i2400m_msg_to_dev() sees boot_mode */
return i2400m_schedule_work(i2400m, __i2400m_dev_reset_handle,
GFP_ATOMIC);
}
@@ -650,6 +664,7 @@ int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags)
result = i2400m_read_mac_addr(i2400m);
if (result < 0)
goto error_read_mac_addr;
+ random_ether_addr(i2400m->src_mac_addr);
result = register_netdev(net_dev); /* Okey dokey, bring it up */
if (result < 0) {
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index 675c6ce810c..e81750e5445 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -397,7 +397,7 @@ static int i2400m_download_chunk(struct i2400m *i2400m, const void *chunk,
unsigned int direct, unsigned int do_csum)
{
int ret;
- size_t chunk_len = ALIGN(__chunk_len, I2400M_PL_PAD);
+ size_t chunk_len = ALIGN(__chunk_len, I2400M_PL_ALIGN);
struct device *dev = i2400m_dev(i2400m);
struct {
struct i2400m_bootrom_header cmd;
@@ -532,14 +532,14 @@ int i2400m_dnload_finalize(struct i2400m *i2400m,
cmd = (void *) bcf + offset;
if (i2400m->sboot == 0) {
struct i2400m_bootrom_header jump_ack;
- d_printf(3, dev, "unsecure boot, jumping to 0x%08x\n",
+ d_printf(1, dev, "unsecure boot, jumping to 0x%08x\n",
le32_to_cpu(cmd->target_addr));
i2400m_brh_set_opcode(cmd, I2400M_BRH_JUMP);
cmd->data_size = 0;
ret = i2400m_bm_cmd(i2400m, cmd, sizeof(*cmd),
&jump_ack, sizeof(jump_ack), 0);
} else {
- d_printf(3, dev, "secure boot, jumping to 0x%08x\n",
+ d_printf(1, dev, "secure boot, jumping to 0x%08x\n",
le32_to_cpu(cmd->target_addr));
cmd_buf = i2400m->bm_cmd_buf;
memcpy(&cmd_buf->cmd, cmd, sizeof(*cmd));
@@ -696,8 +696,7 @@ error_dev_gone:
return result;
error_timeout:
- dev_err(dev, "Timed out waiting for reboot ack, resetting\n");
- i2400m->bus_reset(i2400m, I2400M_RT_BUS);
+ dev_err(dev, "Timed out waiting for reboot ack\n");
result = -ETIMEDOUT;
goto exit_timeout;
}
@@ -770,40 +769,21 @@ error_read_mac:
static
int i2400m_dnload_init_nonsigned(struct i2400m *i2400m)
{
-#define POKE(a, d) { \
- .address = cpu_to_le32(a), \
- .data = cpu_to_le32(d) \
-}
- static const struct {
- __le32 address;
- __le32 data;
- } i2400m_pokes[] = {
- POKE(0x081A58, 0xA7810230),
- POKE(0x080040, 0x00000000),
- POKE(0x080048, 0x00000082),
- POKE(0x08004C, 0x0000081F),
- POKE(0x080054, 0x00000085),
- POKE(0x080058, 0x00000180),
- POKE(0x08005C, 0x00000018),
- POKE(0x080060, 0x00000010),
- POKE(0x080574, 0x00000001),
- POKE(0x080550, 0x00000005),
- POKE(0xAE0000, 0x00000000),
- };
-#undef POKE
- unsigned i;
- int ret;
+ unsigned i = 0;
+ int ret = 0;
struct device *dev = i2400m_dev(i2400m);
-
- dev_warn(dev, "WARNING!!! non-signed boot UNTESTED PATH!\n");
-
d_fnstart(5, dev, "(i2400m %p)\n", i2400m);
- for (i = 0; i < ARRAY_SIZE(i2400m_pokes); i++) {
- ret = i2400m_download_chunk(i2400m, &i2400m_pokes[i].data,
- sizeof(i2400m_pokes[i].data),
- i2400m_pokes[i].address, 1, 1);
- if (ret < 0)
- break;
+ if (i2400m->bus_bm_pokes_table) {
+ while (i2400m->bus_bm_pokes_table[i].address) {
+ ret = i2400m_download_chunk(
+ i2400m,
+ &i2400m->bus_bm_pokes_table[i].data,
+ sizeof(i2400m->bus_bm_pokes_table[i].data),
+ i2400m->bus_bm_pokes_table[i].address, 1, 1);
+ if (ret < 0)
+ break;
+ i++;
+ }
}
d_fnend(5, dev, "(i2400m %p) = %d\n", i2400m, ret);
return ret;
@@ -980,11 +960,12 @@ int i2400m_fw_dnload(struct i2400m *i2400m, const struct i2400m_bcf_hdr *bcf,
{
int ret = 0;
struct device *dev = i2400m_dev(i2400m);
- int count = I2400M_BOOT_RETRIES;
+ int count = i2400m->bus_bm_retries;
d_fnstart(5, dev, "(i2400m %p bcf %p size %zu)\n",
i2400m, bcf, bcf_size);
i2400m->boot_mode = 1;
+ wmb(); /* Make sure other readers see it */
hw_reboot:
if (count-- == 0) {
ret = -ERESTARTSYS;
@@ -1033,6 +1014,7 @@ hw_reboot:
d_printf(2, dev, "fw %s successfully uploaded\n",
i2400m->fw_name);
i2400m->boot_mode = 0;
+ wmb(); /* Make sure i2400m_msg_to_dev() sees boot_mode */
error_dnload_finalize:
error_dnload_bcf:
error_dnload_init:
diff --git a/drivers/net/wimax/i2400m/i2400m-sdio.h b/drivers/net/wimax/i2400m/i2400m-sdio.h
index 08c2fb73923..9c4e3189f7b 100644
--- a/drivers/net/wimax/i2400m/i2400m-sdio.h
+++ b/drivers/net/wimax/i2400m/i2400m-sdio.h
@@ -78,6 +78,8 @@ enum {
/* The number of ticks to wait for the device to signal that
* it is ready */
I2400MS_INIT_SLEEP_INTERVAL = 10,
+ /* How long to wait for the device to settle after reset */
+ I2400MS_SETTLE_TIME = 40,
};
@@ -105,6 +107,10 @@ struct i2400ms {
char tx_wq_name[32];
struct dentry *debugfs_dentry;
+
+ wait_queue_head_t bm_wfa_wq;
+ int bm_wait_result;
+ size_t bm_ack_size;
};
@@ -129,4 +135,7 @@ extern ssize_t i2400ms_bus_bm_cmd_send(struct i2400m *,
extern ssize_t i2400ms_bus_bm_wait_for_ack(struct i2400m *,
struct i2400m_bootrom_header *,
size_t);
+extern void i2400ms_bus_bm_release(struct i2400m *);
+extern int i2400ms_bus_bm_setup(struct i2400m *);
+
#endif /* #ifndef __I2400M_SDIO_H__ */
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 3ae2df38b59..60330f313f2 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -150,11 +150,33 @@
enum {
/* Firmware uploading */
I2400M_BOOT_RETRIES = 3,
+ I3200_BOOT_RETRIES = 3,
/* Size of the Boot Mode Command buffer */
I2400M_BM_CMD_BUF_SIZE = 16 * 1024,
I2400M_BM_ACK_BUF_SIZE = 256,
};
+/**
+ * struct i2400m_poke_table - Hardware poke table for the Intel 2400m
+ *
+ * This structure will be used to create a device specific poke table
+ * to put the device in a consistant state at boot time.
+ *
+ * @address: The device address to poke
+ *
+ * @data: The data value to poke to the device address
+ *
+ */
+struct i2400m_poke_table{
+ __le32 address;
+ __le32 data;
+};
+
+#define I2400M_FW_POKE(a, d) { \
+ .address = cpu_to_le32(a), \
+ .data = cpu_to_le32(d) \
+}
+
/**
* i2400m_reset_type - methods to reset a device
@@ -224,6 +246,17 @@ struct i2400m_roq;
* process, so it cannot rely on common infrastructure being laid
* out.
*
+ * @bus_bm_retries: [fill] How many times shall a firmware upload /
+ * device initialization be retried? Different models of the same
+ * device might need different values, hence it is set by the
+ * bus-specific driver. Note this value is used in two places,
+ * i2400m_fw_dnload() and __i2400m_dev_start(); they won't become
+ * multiplicative (__i2400m_dev_start() calling N times
+ * i2400m_fw_dnload() and this trying N times to download the
+ * firmware), as if __i2400m_dev_start() only retries if the
+ * firmware crashed while initializing the device (not in a
+ * general case).
+ *
* @bus_bm_cmd_send: [fill] Function called to send a boot-mode
* command. Flags are defined in 'enum i2400m_bm_cmd_flags'. This
* is synchronous and has to return 0 if ok or < 0 errno code in
@@ -252,6 +285,12 @@ struct i2400m_roq;
* address provided in boot mode is kind of broken and needs to
* be re-read later on.
*
+ * @bus_bm_pokes_table: [fill/optional] A table of device addresses
+ * and values that will be poked at device init time to move the
+ * device to the correct state for the type of boot/firmware being
+ * used. This table MUST be terminated with (0x000000,
+ * 0x00000000) or bad things will happen.
+ *
*
* @wimax_dev: WiMAX generic device for linkage into the kernel WiMAX
* stack. Due to the way a net_device is allocated, we need to
@@ -323,6 +362,10 @@ struct i2400m_roq;
* delivered. Then the driver can release them to the host. See
* drivers/net/i2400m/rx.c for details.
*
+ * @src_mac_addr: MAC address used to make ethernet packets be coming
+ * from. This is generated at i2400m_setup() time and used during
+ * the life cycle of the instance. See i2400m_fake_eth_header().
+ *
* @init_mutex: Mutex used for serializing the device bringup
* sequence; this way if the device reboots in the middle, we
* don't try to do a bringup again while we are tearing down the
@@ -389,12 +432,14 @@ struct i2400m {
unsigned ready:1; /* all probing steps done */
unsigned rx_reorder:1; /* RX reorder is enabled */
u8 trace_msg_from_user; /* echo rx msgs to 'trace' pipe */
- /* typed u8 so debugfs/u8 can tweak */
+ /* typed u8 so /sys/kernel/debug/u8 can tweak */
enum i2400m_system_state state;
wait_queue_head_t state_wq; /* Woken up when on state updates */
size_t bus_tx_block_size;
size_t bus_pl_size_max;
+ unsigned bus_bm_retries;
+
int (*bus_dev_start)(struct i2400m *);
void (*bus_dev_stop)(struct i2400m *);
void (*bus_tx_kick)(struct i2400m *);
@@ -406,6 +451,7 @@ struct i2400m {
struct i2400m_bootrom_header *, size_t);
const char **bus_fw_names;
unsigned bus_bm_mac_addr_impaired:1;
+ const struct i2400m_poke_table *bus_bm_pokes_table;
spinlock_t tx_lock; /* protect TX state */
void *tx_buf;
@@ -421,6 +467,7 @@ struct i2400m {
unsigned rx_pl_num, rx_pl_max, rx_pl_min,
rx_num, rx_size_acc, rx_size_min, rx_size_max;
struct i2400m_roq *rx_roq; /* not under rx_lock! */
+ u8 src_mac_addr[ETH_HLEN];
struct mutex msg_mutex; /* serialize command execution */
struct completion msg_completion;
@@ -704,6 +751,7 @@ static const __le32 i2400m_SBOOT_BARKER[4] = {
cpu_to_le32(I2400M_SBOOT_BARKER)
};
+extern int i2400m_power_save_disabled;
/*
* Utility functions
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index 6b1fe7a81f2..9653f478b38 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -404,10 +404,12 @@ static
void i2400m_rx_fake_eth_header(struct net_device *net_dev,
void *_eth_hdr, __be16 protocol)
{
+ struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
struct ethhdr *eth_hdr = _eth_hdr;
memcpy(eth_hdr->h_dest, net_dev->dev_addr, sizeof(eth_hdr->h_dest));
- memset(eth_hdr->h_source, 0, sizeof(eth_hdr->h_dest));
+ memcpy(eth_hdr->h_source, i2400m->src_mac_addr,
+ sizeof(eth_hdr->h_source));
eth_hdr->h_proto = protocol;
}
diff --git a/drivers/net/wimax/i2400m/op-rfkill.c b/drivers/net/wimax/i2400m/op-rfkill.c
index 487ec58cea4..43927b5d7ad 100644
--- a/drivers/net/wimax/i2400m/op-rfkill.c
+++ b/drivers/net/wimax/i2400m/op-rfkill.c
@@ -54,8 +54,10 @@ int i2400m_radio_is(struct i2400m *i2400m, enum wimax_rf_state state)
/* state == WIMAX_RF_ON */
return i2400m->state != I2400M_SS_RF_OFF
&& i2400m->state != I2400M_SS_RF_SHUTDOWN;
- else
+ else {
BUG();
+ return -EINVAL; /* shut gcc warnings on certain arches */
+ }
}
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
index 02419bfd64b..07c32e68909 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -177,7 +177,8 @@ void i2400m_report_hook_work(struct work_struct *ws)
struct i2400m_work *iw =
container_of(ws, struct i2400m_work, ws);
struct i2400m_report_hook_args *args = (void *) iw->pl;
- i2400m_report_hook(iw->i2400m, args->l3l4_hdr, args->size);
+ if (iw->i2400m->ready)
+ i2400m_report_hook(iw->i2400m, args->l3l4_hdr, args->size);
kfree_skb(args->skb_rx);
i2400m_put(iw->i2400m);
kfree(iw);
@@ -309,6 +310,9 @@ void i2400m_rx_ctl(struct i2400m *i2400m, struct sk_buff *skb_rx,
skb_get(skb_rx);
i2400m_queue_work(i2400m, i2400m_report_hook_work,
GFP_KERNEL, &args, sizeof(args));
+ if (unlikely(i2400m->trace_msg_from_user))
+ wimax_msg(&i2400m->wimax_dev, "echo",
+ l3l4_hdr, size, GFP_KERNEL);
result = wimax_msg(&i2400m->wimax_dev, NULL, l3l4_hdr, size,
GFP_KERNEL);
if (result < 0)
@@ -819,10 +823,9 @@ void i2400m_roq_queue_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
roq_data = (struct i2400m_roq_data *) &skb->cb;
i2400m_net_erx(i2400m, skb, roq_data->cs);
}
- else {
+ else
__i2400m_roq_queue(i2400m, roq, skb, sn, nsn);
- __i2400m_roq_update_ws(i2400m, roq, sn + 1);
- }
+ __i2400m_roq_update_ws(i2400m, roq, sn + 1);
i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS,
old_ws, len, sn, nsn, roq->ws);
}
@@ -1145,7 +1148,7 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
num_pls = le16_to_cpu(msg_hdr->num_pls);
pl_itr = sizeof(*msg_hdr) + /* Check payload descriptor(s) */
num_pls * sizeof(msg_hdr->pld[0]);
- pl_itr = ALIGN(pl_itr, I2400M_PL_PAD);
+ pl_itr = ALIGN(pl_itr, I2400M_PL_ALIGN);
if (pl_itr > skb->len) { /* got all the payload descriptors? */
dev_err(dev, "RX: HW BUG? message too short (%u bytes) for "
"%u payload descriptors (%zu each, total %zu)\n",
@@ -1163,7 +1166,7 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
single_last = num_pls == 1 || i == num_pls - 1;
i2400m_rx_payload(i2400m, skb, single_last, &msg_hdr->pld[i],
skb->data + pl_itr);
- pl_itr += ALIGN(pl_size, I2400M_PL_PAD);
+ pl_itr += ALIGN(pl_size, I2400M_PL_ALIGN);
cond_resched(); /* Don't monopolize */
}
kfree_skb(skb);
diff --git a/drivers/net/wimax/i2400m/sdio-fw.c b/drivers/net/wimax/i2400m/sdio-fw.c
index 3487205d8f5..7d6ec0f475f 100644
--- a/drivers/net/wimax/i2400m/sdio-fw.c
+++ b/drivers/net/wimax/i2400m/sdio-fw.c
@@ -46,17 +46,24 @@
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
* - SDIO rehash for changes in the bus-driver model
*
+ * Dirk Brandewie <dirk.j.brandewie@intel.com>
+ * - Make it IRQ based, not polling
+ *
* THE PROCEDURE
*
* See fw.c for the generic description of this procedure.
*
* This file implements only the SDIO specifics. It boils down to how
* to send a command and waiting for an acknowledgement from the
- * device. We do polled reads.
+ * device.
+ *
+ * All this code is sequential -- all i2400ms_bus_bm_*() functions are
+ * executed in the same thread, except i2400ms_bm_irq() [on its own by
+ * the SDIO driver]. This makes it possible to avoid locking.
*
* COMMAND EXECUTION
*
- * THe generic firmware upload code will call i2400m_bus_bm_cmd_send()
+ * The generic firmware upload code will call i2400m_bus_bm_cmd_send()
* to send commands.
*
* The SDIO devices expects things in 256 byte blocks, so it will pad
@@ -64,12 +71,15 @@
*
* ACK RECEPTION
*
- * This works in polling mode -- the fw loader says when to wait for
- * data and for that it calls i2400ms_bus_bm_wait_for_ack().
+ * This works in IRQ mode -- the fw loader says when to wait for data
+ * and for that it calls i2400ms_bus_bm_wait_for_ack().
*
- * This will poll the device for data until it is received. We need to
- * receive at least as much bytes as where asked for (although it'll
- * always be a multiple of 256 bytes).
+ * This checks if there is any data available (RX size > 0); if not,
+ * waits for the IRQ handler to notify about it. Once there is data,
+ * it is read and passed to the caller. Doing it this way we don't
+ * need much coordination/locking, and it makes it much more difficult
+ * for an interrupt to be lost and the wait_for_ack() function getting
+ * stuck even when data is pending.
*/
#include <linux/mmc/sdio_func.h>
#include "i2400m-sdio.h"
@@ -78,6 +88,7 @@
#define D_SUBMODULE fw
#include "sdio-debug-levels.h"
+
/*
* Send a boot-mode command to the SDIO function
*
@@ -139,7 +150,7 @@ error_too_big:
/*
- * Read an ack from the device's boot-mode (polling)
+ * Read an ack from the device's boot-mode
*
* @i2400m:
* @_ack: pointer to where to store the read data
@@ -150,75 +161,49 @@ error_too_big:
* The ACK for a BM command is always at least sizeof(*ack) bytes, so
* check for that. We don't need to check for device reboots
*
- * NOTE: We do an artificial timeout of 1 sec over the SDIO timeout;
- * this way we have control over it...there is no way that I know
- * of setting an SDIO transaction timeout.
*/
ssize_t i2400ms_bus_bm_wait_for_ack(struct i2400m *i2400m,
struct i2400m_bootrom_header *ack,
size_t ack_size)
{
- int result;
- ssize_t rx_size;
- u64 timeout;
+ ssize_t result;
struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
struct sdio_func *func = i2400ms->func;
struct device *dev = &func->dev;
+ int size;
BUG_ON(sizeof(*ack) > ack_size);
d_fnstart(5, dev, "(i2400m %p ack %p size %zu)\n",
i2400m, ack, ack_size);
- timeout = get_jiffies_64() + 2 * HZ;
- sdio_claim_host(func);
- while (1) {
- if (time_after64(get_jiffies_64(), timeout)) {
- rx_size = -ETIMEDOUT;
- dev_err(dev, "timeout waiting for ack data\n");
- goto error_timedout;
- }
+ spin_lock(&i2400m->rx_lock);
+ i2400ms->bm_ack_size = -EINPROGRESS;
+ spin_unlock(&i2400m->rx_lock);
- /* Find the RX size, check if it fits or not -- it if
- * doesn't fit, fail, as we have no way to dispose of
- * the extra data. */
- rx_size = __i2400ms_rx_get_size(i2400ms);
- if (rx_size < 0)
- goto error_rx_get_size;
- result = -ENOSPC; /* Check it fits */
- if (rx_size < sizeof(*ack)) {
- rx_size = -EIO;
- dev_err(dev, "HW BUG? received is too small (%zu vs "
- "%zu needed)\n", sizeof(*ack), rx_size);
- goto error_too_small;
- }
- if (rx_size > I2400M_BM_ACK_BUF_SIZE) {
- dev_err(dev, "SW BUG? BM_ACK_BUF is too small (%u vs "
- "%zu needed)\n", I2400M_BM_ACK_BUF_SIZE,
- rx_size);
- goto error_too_small;
- }
+ result = wait_event_timeout(i2400ms->bm_wfa_wq,
+ i2400ms->bm_ack_size != -EINPROGRESS,
+ 2 * HZ);
+ if (result == 0) {
+ result = -ETIMEDOUT;
+ dev_err(dev, "BM: error waiting for an ack\n");
+ goto error_timeout;
+ }
- /* Read it */
- result = sdio_memcpy_fromio(func, i2400m->bm_ack_buf,
- I2400MS_DATA_ADDR, rx_size);
- if (result == -ETIMEDOUT || result == -ETIME)
- continue;
- if (result < 0) {
- dev_err(dev, "BM SDIO receive (%zu B) failed: %d\n",
- rx_size, result);
- goto error_read;
- } else
- break;
+ spin_lock(&i2400m->rx_lock);
+ result = i2400ms->bm_ack_size;
+ BUG_ON(result == -EINPROGRESS);
+ if (result < 0) /* so we exit when rx_release() is called */
+ dev_err(dev, "BM: %s failed: %zd\n", __func__, result);
+ else {
+ size = min(ack_size, i2400ms->bm_ack_size);
+ memcpy(ack, i2400m->bm_ack_buf, size);
}
- rx_size = min((ssize_t)ack_size, rx_size);
- memcpy(ack, i2400m->bm_ack_buf, rx_size);
-error_read:
-error_too_small:
-error_rx_get_size:
-error_timedout:
- sdio_release_host(func);
- d_fnend(5, dev, "(i2400m %p ack %p size %zu) = %ld\n",
- i2400m, ack, ack_size, (long) rx_size);
- return rx_size;
+ i2400ms->bm_ack_size = -EINPROGRESS;
+ spin_unlock(&i2400m->rx_lock);
+
+error_timeout:
+ d_fnend(5, dev, "(i2400m %p ack %p size %zu) = %zd\n",
+ i2400m, ack, ack_size, result);
+ return result;
}
diff --git a/drivers/net/wimax/i2400m/sdio-rx.c b/drivers/net/wimax/i2400m/sdio-rx.c
index a3008b904f7..321beadf6e4 100644
--- a/drivers/net/wimax/i2400m/sdio-rx.c
+++ b/drivers/net/wimax/i2400m/sdio-rx.c
@@ -69,6 +69,13 @@
#define D_SUBMODULE rx
#include "sdio-debug-levels.h"
+static const __le32 i2400m_ACK_BARKER[4] = {
+ __constant_cpu_to_le32(I2400M_ACK_BARKER),
+ __constant_cpu_to_le32(I2400M_ACK_BARKER),
+ __constant_cpu_to_le32(I2400M_ACK_BARKER),
+ __constant_cpu_to_le32(I2400M_ACK_BARKER)
+};
+
/*
* Read and return the amount of bytes available for RX
@@ -131,25 +138,35 @@ void i2400ms_rx(struct i2400ms *i2400ms)
ret = rx_size;
goto error_get_size;
}
+
ret = -ENOMEM;
skb = alloc_skb(rx_size, GFP_ATOMIC);
if (NULL == skb) {
dev_err(dev, "RX: unable to alloc skb\n");
goto error_alloc_skb;
}
-
ret = sdio_memcpy_fromio(func, skb->data,
I2400MS_DATA_ADDR, rx_size);
if (ret < 0) {
dev_err(dev, "RX: SDIO data read failed: %d\n", ret);
goto error_memcpy_fromio;
}
- /* Check if device has reset */
- if (!memcmp(skb->data, i2400m_NBOOT_BARKER,
- sizeof(i2400m_NBOOT_BARKER))
- || !memcmp(skb->data, i2400m_SBOOT_BARKER,
- sizeof(i2400m_SBOOT_BARKER))) {
+
+ rmb(); /* make sure we get boot_mode from dev_reset_handle */
+ if (i2400m->boot_mode == 1) {
+ spin_lock(&i2400m->rx_lock);
+ i2400ms->bm_ack_size = rx_size;
+ spin_unlock(&i2400m->rx_lock);
+ memcpy(i2400m->bm_ack_buf, skb->data, rx_size);
+ wake_up(&i2400ms->bm_wfa_wq);
+ dev_err(dev, "RX: SDIO boot mode message\n");
+ kfree_skb(skb);
+ } else if (unlikely(!memcmp(skb->data, i2400m_NBOOT_BARKER,
+ sizeof(i2400m_NBOOT_BARKER))
+ || !memcmp(skb->data, i2400m_SBOOT_BARKER,
+ sizeof(i2400m_SBOOT_BARKER)))) {
ret = i2400m_dev_reset_handle(i2400m);
+ dev_err(dev, "RX: SDIO reboot barker\n");
kfree_skb(skb);
} else {
skb_put(skb, rx_size);
@@ -179,7 +196,6 @@ void i2400ms_irq(struct sdio_func *func)
{
int ret;
struct i2400ms *i2400ms = sdio_get_drvdata(func);
- struct i2400m *i2400m = &i2400ms->i2400m;
struct device *dev = &func->dev;
int val;
@@ -194,10 +210,7 @@ void i2400ms_irq(struct sdio_func *func)
goto error_no_irq;
}
sdio_writeb(func, 1, I2400MS_INTR_CLEAR_ADDR, &ret);
- if (WARN_ON(i2400m->boot_mode != 0))
- dev_err(dev, "RX: SW BUG? boot mode and IRQ is up?\n");
- else
- i2400ms_rx(i2400ms);
+ i2400ms_rx(i2400ms);
error_no_irq:
d_fnend(6, dev, "(i2400ms %p) = void\n", i2400ms);
return;
@@ -214,8 +227,15 @@ int i2400ms_rx_setup(struct i2400ms *i2400ms)
int result;
struct sdio_func *func = i2400ms->func;
struct device *dev = &func->dev;
+ struct i2400m *i2400m = &i2400ms->i2400m;
d_fnstart(5, dev, "(i2400ms %p)\n", i2400ms);
+
+ init_waitqueue_head(&i2400ms->bm_wfa_wq);
+ spin_lock(&i2400m->rx_lock);
+ i2400ms->bm_wait_result = -EINPROGRESS;
+ spin_unlock(&i2400m->rx_lock);
+
sdio_claim_host(func);
result = sdio_claim_irq(func, i2400ms_irq);
if (result < 0) {
@@ -245,8 +265,13 @@ void i2400ms_rx_release(struct i2400ms *i2400ms)
int result;
struct sdio_func *func = i2400ms->func;
struct device *dev = &func->dev;
+ struct i2400m *i2400m = &i2400ms->i2400m;
d_fnstart(5, dev, "(i2400ms %p)\n", i2400ms);
+ spin_lock(&i2400m->rx_lock);
+ i2400ms->bm_ack_size = -EINTR;
+ spin_unlock(&i2400m->rx_lock);
+ wake_up_all(&i2400ms->bm_wfa_wq);
sdio_claim_host(func);
sdio_writeb(func, 0, I2400MS_INTR_ENABLE_ADDR, &result);
sdio_release_irq(func);
diff --git a/drivers/net/wimax/i2400m/sdio.c b/drivers/net/wimax/i2400m/sdio.c
index 5ac5e76701c..2538825d1c6 100644
--- a/drivers/net/wimax/i2400m/sdio.c
+++ b/drivers/net/wimax/i2400m/sdio.c
@@ -78,6 +78,14 @@ static const char *i2400ms_bus_fw_names[] = {
};
+static const struct i2400m_poke_table i2400ms_pokes[] = {
+ I2400M_FW_POKE(0x6BE260, 0x00000088),
+ I2400M_FW_POKE(0x080550, 0x00000005),
+ I2400M_FW_POKE(0xAE0000, 0x00000000),
+ I2400M_FW_POKE(0x000000, 0x00000000), /* MUST be 0 terminated or bad
+ * things will happen */
+};
+
/*
* Enable the SDIO function
*
@@ -148,19 +156,14 @@ int i2400ms_bus_dev_start(struct i2400m *i2400m)
d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
msleep(200);
- result = i2400ms_rx_setup(i2400ms);
- if (result < 0)
- goto error_rx_setup;
result = i2400ms_tx_setup(i2400ms);
if (result < 0)
goto error_tx_setup;
d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
return result;
- i2400ms_tx_release(i2400ms);
error_tx_setup:
- i2400ms_rx_release(i2400ms);
-error_rx_setup:
+ i2400ms_tx_release(i2400ms);
d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
return result;
}
@@ -174,7 +177,6 @@ void i2400ms_bus_dev_stop(struct i2400m *i2400m)
struct device *dev = &func->dev;
d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
- i2400ms_rx_release(i2400ms);
i2400ms_tx_release(i2400ms);
d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
}
@@ -255,7 +257,7 @@ error_kzalloc:
static
int i2400ms_bus_reset(struct i2400m *i2400m, enum i2400m_reset_type rt)
{
- int result;
+ int result = 0;
struct i2400ms *i2400ms =
container_of(i2400m, struct i2400ms, i2400m);
struct device *dev = i2400m_dev(i2400m);
@@ -280,8 +282,25 @@ int i2400ms_bus_reset(struct i2400m *i2400m, enum i2400m_reset_type rt)
sizeof(i2400m_COLD_BOOT_BARKER));
else if (rt == I2400M_RT_BUS) {
do_bus_reset:
- dev_err(dev, "FIXME: SDIO bus reset not implemented\n");
- result = rt == I2400M_RT_WARM ? -ENODEV : -ENOSYS;
+ /* call netif_tx_disable() before sending IOE disable,
+ * so that all the tx from network layer are stopped
+ * while IOE is being reset. Make sure it is called
+ * only after register_netdev() was issued.
+ */
+ if (i2400m->wimax_dev.net_dev->reg_state == NETREG_REGISTERED)
+ netif_tx_disable(i2400m->wimax_dev.net_dev);
+
+ i2400ms_rx_release(i2400ms);
+ sdio_claim_host(i2400ms->func);
+ sdio_disable_func(i2400ms->func);
+ sdio_release_host(i2400ms->func);
+
+ /* Wait for the device to settle */
+ msleep(40);
+
+ result = i2400ms_enable_function(i2400ms->func);
+ if (result >= 0)
+ i2400ms_rx_setup(i2400ms);
} else
BUG();
if (result < 0 && rt != I2400M_RT_BUS) {
@@ -404,24 +423,32 @@ int i2400ms_probe(struct sdio_func *func,
i2400m->bus_dev_stop = i2400ms_bus_dev_stop;
i2400m->bus_tx_kick = i2400ms_bus_tx_kick;
i2400m->bus_reset = i2400ms_bus_reset;
+ /* The iwmc3200-wimax sometimes requires the driver to try
+ * hard when we paint it into a corner. */
+ i2400m->bus_bm_retries = I3200_BOOT_RETRIES;
i2400m->bus_bm_cmd_send = i2400ms_bus_bm_cmd_send;
i2400m->bus_bm_wait_for_ack = i2400ms_bus_bm_wait_for_ack;
i2400m->bus_fw_names = i2400ms_bus_fw_names;
i2400m->bus_bm_mac_addr_impaired = 1;
-
- result = i2400ms_enable_function(i2400ms->func);
- if (result < 0) {
- dev_err(dev, "Cannot enable SDIO function: %d\n", result);
- goto error_func_enable;
- }
+ i2400m->bus_bm_pokes_table = &i2400ms_pokes[0];
sdio_claim_host(func);
result = sdio_set_block_size(func, I2400MS_BLK_SIZE);
+ sdio_release_host(func);
if (result < 0) {
dev_err(dev, "Failed to set block size: %d\n", result);
goto error_set_blk_size;
}
- sdio_release_host(func);
+
+ result = i2400ms_enable_function(i2400ms->func);
+ if (result < 0) {
+ dev_err(dev, "Cannot enable SDIO function: %d\n", result);
+ goto error_func_enable;
+ }
+
+ result = i2400ms_rx_setup(i2400ms);
+ if (result < 0)
+ goto error_rx_setup;
result = i2400m_setup(i2400m, I2400M_BRI_NO_REBOOT);
if (result < 0) {
@@ -440,12 +467,14 @@ int i2400ms_probe(struct sdio_func *func,
error_debugfs_add:
i2400m_release(i2400m);
error_setup:
- sdio_set_drvdata(func, NULL);
+ i2400ms_rx_release(i2400ms);
+error_rx_setup:
sdio_claim_host(func);
-error_set_blk_size:
sdio_disable_func(func);
sdio_release_host(func);
error_func_enable:
+error_set_blk_size:
+ sdio_set_drvdata(func, NULL);
free_netdev(net_dev);
error_alloc_netdev:
return result;
@@ -462,6 +491,7 @@ void i2400ms_remove(struct sdio_func *func)
d_fnstart(3, dev, "SDIO func %p\n", func);
debugfs_remove_recursive(i2400ms->debugfs_dentry);
+ i2400ms_rx_release(i2400ms);
i2400m_release(i2400m);
sdio_set_drvdata(func, NULL);
sdio_claim_host(func);
diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/net/wimax/i2400m/tx.c
index 613a88ffd65..fa16ccf8e26 100644
--- a/drivers/net/wimax/i2400m/tx.c
+++ b/drivers/net/wimax/i2400m/tx.c
@@ -278,6 +278,48 @@ enum {
#define TAIL_FULL ((void *)~(unsigned long)NULL)
/*
+ * Calculate how much tail room is available
+ *
+ * Note the trick here. This path is ONLY caleed for Case A (see
+ * i2400m_tx_fifo_push() below), where we have:
+ *
+ * Case A
+ * N ___________
+ * | tail room |
+ * | |
+ * |<- IN ->|
+ * | |
+ * | data |
+ * | |
+ * |<- OUT ->|
+ * | |
+ * | head room |
+ * 0 -----------
+ *
+ * When calculating the tail_room, tx_in might get to be zero if
+ * i2400m->tx_in is right at the end of the buffer (really full
+ * buffer) if there is no head room. In this case, tail_room would be
+ * I2400M_TX_BUF_SIZE, although it is actually zero. Hence the final
+ * mod (%) operation. However, when doing this kind of optimization,
+ * i2400m->tx_in being zero would fail, so we treat is an a special
+ * case.
+ */
+static inline
+size_t __i2400m_tx_tail_room(struct i2400m *i2400m)
+{
+ size_t tail_room;
+ size_t tx_in;
+
+ if (unlikely(i2400m->tx_in) == 0)
+ return I2400M_TX_BUF_SIZE;
+ tx_in = i2400m->tx_in % I2400M_TX_BUF_SIZE;
+ tail_room = I2400M_TX_BUF_SIZE - tx_in;
+ tail_room %= I2400M_TX_BUF_SIZE;
+ return tail_room;
+}
+
+
+/*
* Allocate @size bytes in the TX fifo, return a pointer to it
*
* @i2400m: device descriptor
@@ -338,7 +380,7 @@ void *i2400m_tx_fifo_push(struct i2400m *i2400m, size_t size, size_t padding)
return NULL;
}
/* Is there space at the tail? */
- tail_room = I2400M_TX_BUF_SIZE - i2400m->tx_in % I2400M_TX_BUF_SIZE;
+ tail_room = __i2400m_tx_tail_room(i2400m);
if (tail_room < needed_size) {
if (i2400m->tx_out % I2400M_TX_BUF_SIZE
< i2400m->tx_in % I2400M_TX_BUF_SIZE) {
@@ -367,17 +409,29 @@ void *i2400m_tx_fifo_push(struct i2400m *i2400m, size_t size, size_t padding)
* (I2400M_PL_PAD for the payloads, I2400M_TX_PLD_SIZE for the
* header).
*
+ * Tail room can get to be zero if a message was opened when there was
+ * space only for a header. _tx_close() will mark it as to-skip (as it
+ * will have no payloads) and there will be no more space to flush, so
+ * nothing has to be done here. This is probably cheaper than ensuring
+ * in _tx_new() that there is some space for payloads...as we could
+ * always possibly hit the same problem if the payload wouldn't fit.
+ *
* Note:
*
* Assumes i2400m->tx_lock is taken, and we use that as a barrier
+ *
+ * This path is only taken for Case A FIFO situations [see
+ * i2400m_tx_fifo_push()]
*/
static
void i2400m_tx_skip_tail(struct i2400m *i2400m)
{
struct device *dev = i2400m_dev(i2400m);
size_t tx_in = i2400m->tx_in % I2400M_TX_BUF_SIZE;
- size_t tail_room = I2400M_TX_BUF_SIZE - tx_in;
+ size_t tail_room = __i2400m_tx_tail_room(i2400m);
struct i2400m_msg_hdr *msg = i2400m->tx_buf + tx_in;
+ if (unlikely(tail_room == 0))
+ return;
BUG_ON(tail_room < sizeof(*msg));
msg->size = tail_room | I2400M_TX_SKIP;
d_printf(2, dev, "skip tail: skipping %zu bytes @%zu\n",
@@ -474,10 +528,18 @@ void i2400m_tx_close(struct i2400m *i2400m)
struct i2400m_msg_hdr *tx_msg_moved;
size_t aligned_size, padding, hdr_size;
void *pad_buf;
+ unsigned num_pls;
if (tx_msg->size & I2400M_TX_SKIP) /* a skipper? nothing to do */
goto out;
-
+ num_pls = le16_to_cpu(tx_msg->num_pls);
+ /* We can get this situation when a new message was started
+ * and there was no space to add payloads before hitting the
+ tail (and taking padding into consideration). */
+ if (num_pls == 0) {
+ tx_msg->size |= I2400M_TX_SKIP;
+ goto out;
+ }
/* Relocate the message header
*
* Find the current header size, align it to 16 and if we need
@@ -491,7 +553,7 @@ void i2400m_tx_close(struct i2400m *i2400m)
*/
hdr_size = sizeof(*tx_msg)
+ le16_to_cpu(tx_msg->num_pls) * sizeof(tx_msg->pld[0]);
- hdr_size = ALIGN(hdr_size, I2400M_PL_PAD);
+ hdr_size = ALIGN(hdr_size, I2400M_PL_ALIGN);
tx_msg->offset = I2400M_TX_PLD_SIZE - hdr_size;
tx_msg_moved = (void *) tx_msg + tx_msg->offset;
memmove(tx_msg_moved, tx_msg, hdr_size);
@@ -574,7 +636,7 @@ int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len,
d_fnstart(3, dev, "(i2400m %p skb %p [%zu bytes] pt %u)\n",
i2400m, buf, buf_len, pl_type);
- padded_len = ALIGN(buf_len, I2400M_PL_PAD);
+ padded_len = ALIGN(buf_len, I2400M_PL_ALIGN);
d_printf(5, dev, "padded_len %zd buf_len %zd\n", padded_len, buf_len);
/* If there is no current TX message, create one; if the
* current one is out of payload slots or we have a singleton,
@@ -591,6 +653,8 @@ try_new:
i2400m_tx_close(i2400m);
i2400m_tx_new(i2400m);
}
+ if (i2400m->tx_msg == NULL)
+ goto error_tx_new;
if (i2400m->tx_msg->size + padded_len > I2400M_TX_BUF_SIZE / 2) {
d_printf(2, dev, "TX: message too big, going new\n");
i2400m_tx_close(i2400m);
@@ -773,7 +837,6 @@ void i2400m_tx_msg_sent(struct i2400m *i2400m)
n = i2400m->tx_out / I2400M_TX_BUF_SIZE;
i2400m->tx_out %= I2400M_TX_BUF_SIZE;
i2400m->tx_in -= n * I2400M_TX_BUF_SIZE;
- netif_start_queue(i2400m->wimax_dev.net_dev);
spin_unlock_irqrestore(&i2400m->tx_lock, flags);
d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
}
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index ca4151a9e22..cfdaf69da9d 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -254,8 +254,10 @@ do_bus_reset:
dev_err(dev, "USB reset failed (%d), giving up!\n",
result);
}
- } else
+ } else {
+ result = -EINVAL; /* shut gcc up in certain arches */
BUG();
+ }
if (result < 0
&& result != -EINVAL /* device is gone */
&& rt != I2400M_RT_BUS) {
@@ -399,6 +401,7 @@ int i2400mu_probe(struct usb_interface *iface,
i2400m->bus_dev_stop = i2400mu_bus_dev_stop;
i2400m->bus_tx_kick = i2400mu_bus_tx_kick;
i2400m->bus_reset = i2400mu_bus_reset;
+ i2400m->bus_bm_retries = I2400M_BOOT_RETRIES;
i2400m->bus_bm_cmd_send = i2400mu_bus_bm_cmd_send;
i2400m->bus_bm_wait_for_ack = i2400mu_bus_bm_wait_for_ack;
i2400m->bus_fw_names = i2400mu_bus_fw_names;
@@ -505,27 +508,52 @@ int i2400mu_suspend(struct usb_interface *iface, pm_message_t pm_msg)
#ifdef CONFIG_PM
struct usb_device *usb_dev = i2400mu->usb_dev;
#endif
+ unsigned is_autosuspend = 0;
struct i2400m *i2400m = &i2400mu->i2400m;
+#ifdef CONFIG_PM
+ if (usb_dev->auto_pm > 0)
+ is_autosuspend = 1;
+#endif
+
d_fnstart(3, dev, "(iface %p pm_msg %u)\n", iface, pm_msg.event);
if (i2400m->updown == 0)
goto no_firmware;
- d_printf(1, dev, "fw up, requesting standby\n");
+ if (i2400m->state == I2400M_SS_DATA_PATH_CONNECTED && is_autosuspend) {
+ /* ugh -- the device is connected and this suspend
+ * request is an autosuspend one (not a system standby
+ * / hibernate).
+ *
+ * The only way the device can go to standby is if the
+ * link with the base station is in IDLE mode; that
+ * were the case, we'd be in status
+ * I2400M_SS_CONNECTED_IDLE. But we are not.
+ *
+ * If we *tell* him to go power save now, it'll reset
+ * as a precautionary measure, so if this is an
+ * autosuspend thing, say no and it'll come back
+ * later, when the link is IDLE
+ */
+ result = -EBADF;
+ d_printf(1, dev, "fw up, link up, not-idle, autosuspend: "
+ "not entering powersave\n");
+ goto error_not_now;
+ }
+ d_printf(1, dev, "fw up: entering powersave\n");
atomic_dec(&i2400mu->do_autopm);
result = i2400m_cmd_enter_powersave(i2400m);
atomic_inc(&i2400mu->do_autopm);
-#ifdef CONFIG_PM
- if (result < 0 && usb_dev->auto_pm == 0) {
+ if (result < 0 && !is_autosuspend) {
/* System suspend, can't fail */
dev_err(dev, "failed to suspend, will reset on resume\n");
result = 0;
}
-#endif
if (result < 0)
goto error_enter_powersave;
i2400mu_notification_release(i2400mu);
- d_printf(1, dev, "fw up, got standby\n");
+ d_printf(1, dev, "powersave requested\n");
error_enter_powersave:
+error_not_now:
no_firmware:
d_fnend(3, dev, "(iface %p pm_msg %u) = %d\n",
iface, pm_msg.event, result);
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 8a0823588c5..5bc00db21b2 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -146,14 +146,14 @@ config LIBERTAS_CS
A driver for Marvell Libertas 8385 CompactFlash devices.
config LIBERTAS_SDIO
- tristate "Marvell Libertas 8385 and 8686 SDIO 802.11b/g cards"
+ tristate "Marvell Libertas 8385/8686/8688 SDIO 802.11b/g cards"
depends on LIBERTAS && MMC
---help---
- A driver for Marvell Libertas 8385 and 8686 SDIO devices.
+ A driver for Marvell Libertas 8385/8686/8688 SDIO devices.
config LIBERTAS_SPI
tristate "Marvell Libertas 8686 SPI 802.11b/g cards"
- depends on LIBERTAS && SPI && GENERIC_GPIO
+ depends on LIBERTAS && SPI
---help---
A driver for Marvell Libertas 8686 SPI devices.
@@ -310,7 +310,7 @@ config PRISM54
If you want to compile the driver as a module ( = code which can be
inserted in and removed from the running kernel whenever you want),
say M here and read <file:Documentation/kbuild/modules.txt>.
- The module will be called prism54.ko.
+ The module will be called prism54.
config USB_ZD1201
tristate "USB ZD1201 based Wireless device support"
@@ -333,6 +333,7 @@ config USB_ZD1201
config USB_NET_RNDIS_WLAN
tristate "Wireless RNDIS USB support"
depends on USB && WLAN_80211 && EXPERIMENTAL
+ depends on CFG80211
select USB_USBNET
select USB_NET_CDCETHER
select USB_NET_RNDIS_HOST
@@ -430,9 +431,17 @@ config RTL8187
ASUS P5B Deluxe
Toshiba Satellite Pro series of laptops
Asus Wireless Link
+ Linksys WUSB54GC-EU
Thanks to Realtek for their support!
+# If possible, automatically enable LEDs for RTL8187.
+
+config RTL8187_LEDS
+ bool
+ depends on RTL8187 && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = RTL8187)
+ default y
+
config ADM8211
tristate "ADMtek ADM8211 support"
depends on MAC80211 && PCI && WLAN_80211 && EXPERIMENTAL
@@ -483,9 +492,7 @@ config MWL8K
will be called mwl8k. If unsure, say N.
source "drivers/net/wireless/p54/Kconfig"
-source "drivers/net/wireless/ath5k/Kconfig"
-source "drivers/net/wireless/ath9k/Kconfig"
-source "drivers/net/wireless/ar9170/Kconfig"
+source "drivers/net/wireless/ath/Kconfig"
source "drivers/net/wireless/ipw2x00/Kconfig"
source "drivers/net/wireless/iwlwifi/Kconfig"
source "drivers/net/wireless/hostap/Kconfig"
@@ -494,5 +501,7 @@ source "drivers/net/wireless/b43legacy/Kconfig"
source "drivers/net/wireless/zd1211rw/Kconfig"
source "drivers/net/wireless/rt2x00/Kconfig"
source "drivers/net/wireless/orinoco/Kconfig"
+source "drivers/net/wireless/wl12xx/Kconfig"
+source "drivers/net/wireless/iwmc3200wifi/Kconfig"
endmenu
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 50e7fba7f0e..7a4647e78fd 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -55,8 +55,10 @@ obj-$(CONFIG_RT2X00) += rt2x00/
obj-$(CONFIG_P54_COMMON) += p54/
-obj-$(CONFIG_ATH5K) += ath5k/
-obj-$(CONFIG_ATH9K) += ath9k/
-obj-$(CONFIG_AR9170_USB) += ar9170/
+obj-$(CONFIG_ATH_COMMON) += ath/
obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o
+
+obj-$(CONFIG_WL12XX) += wl12xx/
+
+obj-$(CONFIG_IWM) += iwmc3200wifi/
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index f7182179501..2b9e379994a 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1311,18 +1311,20 @@ static int adm8211_config(struct ieee80211_hw *dev, u32 changed)
return 0;
}
-static int adm8211_config_interface(struct ieee80211_hw *dev,
- struct ieee80211_vif *vif,
- struct ieee80211_if_conf *conf)
+static void adm8211_bss_info_changed(struct ieee80211_hw *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *conf,
+ u32 changes)
{
struct adm8211_priv *priv = dev->priv;
+ if (!(changes & BSS_CHANGED_BSSID))
+ return;
+
if (memcmp(conf->bssid, priv->bssid, ETH_ALEN)) {
adm8211_set_bssid(dev, conf->bssid);
memcpy(priv->bssid, conf->bssid, ETH_ALEN);
}
-
- return 0;
}
static void adm8211_configure_filter(struct ieee80211_hw *dev,
@@ -1753,7 +1755,7 @@ static const struct ieee80211_ops adm8211_ops = {
.add_interface = adm8211_add_interface,
.remove_interface = adm8211_remove_interface,
.config = adm8211_config,
- .config_interface = adm8211_config_interface,
+ .bss_info_changed = adm8211_bss_info_changed,
.configure_filter = adm8211_configure_filter,
.get_stats = adm8211_get_stats,
.get_tx_stats = adm8211_get_tx_stats,
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index d7347573912..c70604f0329 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -1935,7 +1935,7 @@ static int mpi_start_xmit(struct sk_buff *skb, struct net_device *dev) {
netif_stop_queue (dev);
if (npacks > MAXTXQ) {
dev->stats.tx_fifo_errors++;
- return 1;
+ return NETDEV_TX_BUSY;
}
skb_queue_tail (&ai->txq, skb);
return 0;
@@ -2139,7 +2139,7 @@ static int airo_start_xmit(struct sk_buff *skb, struct net_device *dev) {
if (i == MAX_FIDS / 2) {
dev->stats.tx_fifo_errors++;
- return 1;
+ return NETDEV_TX_BUSY;
}
}
/* check min length*/
@@ -2193,7 +2193,8 @@ static int airo_start_xmit11(struct sk_buff *skb, struct net_device *dev) {
if (test_bit(FLAG_MPI, &priv->flags)) {
/* Not implemented yet for MPI350 */
netif_stop_queue(dev);
- return -ENETDOWN;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
}
if ( skb == NULL ) {
@@ -2210,7 +2211,7 @@ static int airo_start_xmit11(struct sk_buff *skb, struct net_device *dev) {
if (i == MAX_FIDS) {
dev->stats.tx_fifo_errors++;
- return 1;
+ return NETDEV_TX_BUSY;
}
}
/* check min length*/
@@ -6467,6 +6468,7 @@ static int airo_get_encode(struct net_device *dev,
{
struct airo_info *local = dev->ml_priv;
int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+ int wep_key_len;
u8 buf[16];
if (!local->wep_capable)
@@ -6500,11 +6502,13 @@ static int airo_get_encode(struct net_device *dev,
dwrq->flags |= index + 1;
/* Copy the key to the user buffer */
- dwrq->length = get_wep_key(local, index, &buf[0], sizeof(buf));
- if (dwrq->length != -1)
- memcpy(extra, buf, dwrq->length);
- else
+ wep_key_len = get_wep_key(local, index, &buf[0], sizeof(buf));
+ if (wep_key_len < 0) {
dwrq->length = 0;
+ } else {
+ dwrq->length = wep_key_len;
+ memcpy(extra, buf, dwrq->length);
+ }
return 0;
}
@@ -6617,7 +6621,7 @@ static int airo_get_encodeext(struct net_device *dev,
struct airo_info *local = dev->ml_priv;
struct iw_point *encoding = &wrqu->encoding;
struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
- int idx, max_key_len;
+ int idx, max_key_len, wep_key_len;
u8 buf[16];
if (!local->wep_capable)
@@ -6661,11 +6665,13 @@ static int airo_get_encodeext(struct net_device *dev,
memset(extra, 0, 16);
/* Copy the key to the user buffer */
- ext->key_len = get_wep_key(local, idx, &buf[0], sizeof(buf));
- if (ext->key_len != -1)
- memcpy(extra, buf, ext->key_len);
- else
+ wep_key_len = get_wep_key(local, idx, &buf[0], sizeof(buf));
+ if (wep_key_len < 0) {
ext->key_len = 0;
+ } else {
+ ext->key_len = wep_key_len;
+ memcpy(extra, buf, ext->key_len);
+ }
return 0;
}
diff --git a/drivers/net/wireless/arlan-main.c b/drivers/net/wireless/arlan-main.c
index a54a67c425c..d84caf198a2 100644
--- a/drivers/net/wireless/arlan-main.c
+++ b/drivers/net/wireless/arlan-main.c
@@ -1199,7 +1199,7 @@ bad_end:
arlan_process_interrupt(dev);
netif_stop_queue (dev);
ARLAN_DEBUG_EXIT("arlan_tx");
- return 1;
+ return NETDEV_TX_BUSY;
}
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 744f4f4dd3d..4efbdbe6d6b 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -1873,18 +1873,18 @@ static void at76_dwork_hw_scan(struct work_struct *work)
if (ret != CMD_STATUS_COMPLETE) {
queue_delayed_work(priv->hw->workqueue, &priv->dwork_hw_scan,
SCAN_POLL_INTERVAL);
- goto exit;
+ mutex_unlock(&priv->mtx);
+ return;
}
- ieee80211_scan_completed(priv->hw, false);
-
if (is_valid_ether_addr(priv->bssid))
at76_join(priv);
- ieee80211_wake_queues(priv->hw);
-
-exit:
mutex_unlock(&priv->mtx);
+
+ ieee80211_scan_completed(priv->hw, false);
+
+ ieee80211_wake_queues(priv->hw);
}
static int at76_hw_scan(struct ieee80211_hw *hw,
@@ -1965,13 +1965,18 @@ static int at76_config(struct ieee80211_hw *hw, u32 changed)
return 0;
}
-static int at76_config_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_if_conf *conf)
+static void at76_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *conf,
+ u32 changed)
{
struct at76_priv *priv = hw->priv;
at76_dbg(DBG_MAC80211, "%s():", __func__);
+
+ if (!(changed & BSS_CHANGED_BSSID))
+ return;
+
at76_dbg_dump(DBG_MAC80211, conf->bssid, ETH_ALEN, "bssid:");
mutex_lock(&priv->mtx);
@@ -1983,8 +1988,6 @@ static int at76_config_interface(struct ieee80211_hw *hw,
at76_join(priv);
mutex_unlock(&priv->mtx);
-
- return 0;
}
/* must be atomic */
@@ -2076,7 +2079,7 @@ static const struct ieee80211_ops at76_ops = {
.add_interface = at76_add_interface,
.remove_interface = at76_remove_interface,
.config = at76_config,
- .config_interface = at76_config_interface,
+ .bss_info_changed = at76_bss_info_changed,
.configure_filter = at76_configure_filter,
.start = at76_mac80211_start,
.stop = at76_mac80211_stop,
@@ -2250,6 +2253,7 @@ static int at76_init_new_device(struct at76_priv *priv,
/* mac80211 initialisation */
priv->hw->wiphy->max_scan_ssids = 1;
+ priv->hw->wiphy->max_scan_ie_len = 0;
priv->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &at76_supported_band;
priv->hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
@@ -2311,8 +2315,7 @@ static void at76_delete_device(struct at76_priv *priv)
del_timer_sync(&ledtrig_tx_timer);
- if (priv->rx_skb)
- kfree_skb(priv->rx_skb);
+ kfree_skb(priv->rx_skb);
usb_put_dev(priv->udev);
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
new file mode 100644
index 00000000000..d26e7b48531
--- /dev/null
+++ b/drivers/net/wireless/ath/Kconfig
@@ -0,0 +1,8 @@
+config ATH_COMMON
+ tristate "Atheros Wireless Cards"
+ depends on ATH5K || ATH9K || AR9170_USB
+
+source "drivers/net/wireless/ath/ath5k/Kconfig"
+source "drivers/net/wireless/ath/ath9k/Kconfig"
+source "drivers/net/wireless/ath/ar9170/Kconfig"
+
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
new file mode 100644
index 00000000000..4bb0132ada3
--- /dev/null
+++ b/drivers/net/wireless/ath/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_ATH5K) += ath5k/
+obj-$(CONFIG_ATH9K) += ath9k/
+obj-$(CONFIG_AR9170_USB) += ar9170/
+
+obj-$(CONFIG_ATH_COMMON) += ath.o
+ath-objs := main.o regd.o
diff --git a/drivers/net/wireless/ar9170/Kconfig b/drivers/net/wireless/ath/ar9170/Kconfig
index de4281fda12..b99e3263ee6 100644
--- a/drivers/net/wireless/ar9170/Kconfig
+++ b/drivers/net/wireless/ath/ar9170/Kconfig
@@ -2,6 +2,7 @@ config AR9170_USB
tristate "Atheros AR9170 802.11n USB support"
depends on USB && MAC80211 && WLAN_80211 && EXPERIMENTAL
select FW_LOADER
+ select ATH_COMMON
help
This is a driver for the Atheros "otus" 802.11n USB devices.
diff --git a/drivers/net/wireless/ar9170/Makefile b/drivers/net/wireless/ath/ar9170/Makefile
index 8d91c7ee321..8d91c7ee321 100644
--- a/drivers/net/wireless/ar9170/Makefile
+++ b/drivers/net/wireless/ath/ar9170/Makefile
diff --git a/drivers/net/wireless/ar9170/ar9170.h b/drivers/net/wireless/ath/ar9170/ar9170.h
index f4fb2e94aea..bb97981fb24 100644
--- a/drivers/net/wireless/ar9170/ar9170.h
+++ b/drivers/net/wireless/ath/ar9170/ar9170.h
@@ -40,7 +40,7 @@
#include <linux/completion.h>
#include <linux/spinlock.h>
-#include <net/wireless.h>
+#include <net/cfg80211.h>
#include <net/mac80211.h>
#ifdef CONFIG_AR9170_LEDS
#include <linux/leds.h>
@@ -48,6 +48,8 @@
#include "eeprom.h"
#include "hw.h"
+#include "../regd.h"
+
#define PAYLOAD_MAX (AR9170_MAX_CMD_LEN/4 - 1)
enum ar9170_bw {
@@ -58,6 +60,21 @@ enum ar9170_bw {
__AR9170_NUM_BW,
};
+static inline enum ar9170_bw nl80211_to_ar9170(enum nl80211_channel_type type)
+{
+ switch (type) {
+ case NL80211_CHAN_NO_HT:
+ case NL80211_CHAN_HT20:
+ return AR9170_BW_20;
+ case NL80211_CHAN_HT40MINUS:
+ return AR9170_BW_40_BELOW;
+ case NL80211_CHAN_HT40PLUS:
+ return AR9170_BW_40_ABOVE;
+ default:
+ BUG();
+ }
+}
+
enum ar9170_rf_init_mode {
AR9170_RFI_NONE,
AR9170_RFI_WARM,
@@ -74,6 +91,7 @@ struct ar9170_led {
struct led_classdev l;
char name[32];
unsigned int toggled;
+ bool last_state;
bool registered;
};
@@ -84,20 +102,31 @@ enum ar9170_device_state {
AR9170_STOPPED,
AR9170_IDLE,
AR9170_STARTED,
- AR9170_ASSOCIATED,
};
+struct ar9170_rxstream_mpdu_merge {
+ struct ar9170_rx_head plcp;
+ bool has_plcp;
+};
+
+#define AR9170_QUEUE_TIMEOUT 64
+#define AR9170_TX_TIMEOUT 8
+#define AR9170_JANITOR_DELAY 128
+#define AR9170_TX_INVALID_RATE 0xffffffff
+
struct ar9170 {
struct ieee80211_hw *hw;
struct mutex mutex;
enum ar9170_device_state state;
+ unsigned long bad_hw_nagger;
int (*open)(struct ar9170 *);
void (*stop)(struct ar9170 *);
- int (*tx)(struct ar9170 *, struct sk_buff *, bool, unsigned int);
+ int (*tx)(struct ar9170 *, struct sk_buff *);
int (*exec_cmd)(struct ar9170 *, enum ar9170_cmd, u32 ,
void *, u32 , void *);
void (*callback_cmd)(struct ar9170 *, u32 , void *);
+ int (*flush)(struct ar9170 *);
/* interface mode settings */
struct ieee80211_vif *vif;
@@ -117,7 +146,8 @@ struct ar9170 {
struct work_struct filter_config_work;
u64 cur_mc_hash, want_mc_hash;
u32 cur_filter, want_filter;
- unsigned int filter_changed;
+ unsigned long filter_changed;
+ unsigned int filter_state;
bool sniffer_enabled;
/* PHY */
@@ -151,21 +181,35 @@ struct ar9170 {
/* EEPROM */
struct ar9170_eeprom eeprom;
+ struct ath_regulatory regulatory;
- /* global tx status for unregistered Stations. */
- struct sk_buff_head global_tx_status;
- struct sk_buff_head global_tx_status_waste;
- struct delayed_work tx_status_janitor;
+ /* tx queues - as seen by hw - */
+ struct sk_buff_head tx_pending[__AR9170_NUM_TXQ];
+ struct sk_buff_head tx_status[__AR9170_NUM_TXQ];
+ struct delayed_work tx_janitor;
+
+ /* rxstream mpdu merge */
+ struct ar9170_rxstream_mpdu_merge rx_mpdu;
+ struct sk_buff *rx_failover;
+ int rx_failover_missing;
};
struct ar9170_sta_info {
- struct sk_buff_head tx_status[__AR9170_NUM_TXQ];
};
-#define IS_STARTED(a) (a->state >= AR9170_STARTED)
-#define IS_ACCEPTING_CMD(a) (a->state >= AR9170_IDLE)
+#define AR9170_TX_FLAG_WAIT_FOR_ACK BIT(0)
+#define AR9170_TX_FLAG_NO_ACK BIT(1)
+#define AR9170_TX_FLAG_BLOCK_ACK BIT(2)
+
+struct ar9170_tx_info {
+ unsigned long timeout;
+ unsigned int flags;
+};
+
+#define IS_STARTED(a) (((struct ar9170 *)a)->state >= AR9170_STARTED)
+#define IS_ACCEPTING_CMD(a) (((struct ar9170 *)a)->state >= AR9170_IDLE)
-#define AR9170_FILTER_CHANGED_PROMISC BIT(0)
+#define AR9170_FILTER_CHANGED_MODE BIT(0)
#define AR9170_FILTER_CHANGED_MULTICAST BIT(1)
#define AR9170_FILTER_CHANGED_FRAMEFILTER BIT(2)
@@ -174,8 +218,9 @@ void *ar9170_alloc(size_t priv_size);
int ar9170_register(struct ar9170 *ar, struct device *pdev);
void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb);
void ar9170_unregister(struct ar9170 *ar);
-void ar9170_handle_tx_status(struct ar9170 *ar, struct sk_buff *skb,
- bool update_statistics, u16 tx_status);
+void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb);
+void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len);
+int ar9170_nag_limiter(struct ar9170 *ar);
/* MAC */
int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
@@ -185,6 +230,9 @@ int ar9170_update_multicast(struct ar9170 *ar);
int ar9170_update_frame_filter(struct ar9170 *ar);
int ar9170_set_operating_mode(struct ar9170 *ar);
int ar9170_set_beacon_timers(struct ar9170 *ar);
+int ar9170_set_dyn_sifs_ack(struct ar9170 *ar);
+int ar9170_set_slot_time(struct ar9170 *ar);
+int ar9170_set_basic_rates(struct ar9170 *ar);
int ar9170_set_hwretry_limit(struct ar9170 *ar, u32 max_retry);
int ar9170_update_beacon(struct ar9170 *ar);
void ar9170_new_beacon(struct work_struct *work);
diff --git a/drivers/net/wireless/ar9170/cmd.c b/drivers/net/wireless/ath/ar9170/cmd.c
index f57a6200167..f57a6200167 100644
--- a/drivers/net/wireless/ar9170/cmd.c
+++ b/drivers/net/wireless/ath/ar9170/cmd.c
diff --git a/drivers/net/wireless/ar9170/cmd.h b/drivers/net/wireless/ath/ar9170/cmd.h
index a4f0e50e52b..a4f0e50e52b 100644
--- a/drivers/net/wireless/ar9170/cmd.h
+++ b/drivers/net/wireless/ath/ar9170/cmd.h
diff --git a/drivers/net/wireless/ar9170/eeprom.h b/drivers/net/wireless/ath/ar9170/eeprom.h
index d2c8cc83f1d..d2c8cc83f1d 100644
--- a/drivers/net/wireless/ar9170/eeprom.h
+++ b/drivers/net/wireless/ath/ar9170/eeprom.h
diff --git a/drivers/net/wireless/ar9170/hw.h b/drivers/net/wireless/ath/ar9170/hw.h
index 53e250a4278..6cbfb2f8339 100644
--- a/drivers/net/wireless/ar9170/hw.h
+++ b/drivers/net/wireless/ath/ar9170/hw.h
@@ -207,6 +207,9 @@ enum ar9170_cmd {
#define AR9170_MAC_REG_AC1_AC0_TXOP (AR9170_MAC_REG_BASE + 0xB44)
#define AR9170_MAC_REG_AC3_AC2_TXOP (AR9170_MAC_REG_BASE + 0xB48)
+#define AR9170_MAC_REG_AMPDU_FACTOR (AR9170_MAC_REG_BASE + 0xB9C)
+#define AR9170_MAC_REG_AMPDU_DENSITY (AR9170_MAC_REG_BASE + 0xBA0)
+
#define AR9170_MAC_REG_ACK_TABLE (AR9170_MAC_REG_BASE + 0xC00)
#define AR9170_MAC_REG_AMPDU_RX_THRESH (AR9170_MAC_REG_BASE + 0xC50)
@@ -312,7 +315,7 @@ struct ar9170_rx_head {
u8 plcp[12];
} __packed;
-struct ar9170_rx_tail {
+struct ar9170_rx_phystatus {
union {
struct {
u8 rssi_ant0, rssi_ant1, rssi_ant2,
@@ -324,6 +327,9 @@ struct ar9170_rx_tail {
u8 evm_stream0[6], evm_stream1[6];
u8 phy_err;
+} __packed;
+
+struct ar9170_rx_macstatus {
u8 SAidx, DAidx;
u8 error;
u8 status;
@@ -339,7 +345,7 @@ struct ar9170_rx_tail {
#define AR9170_RX_ENC_SOFTWARE 0x8
-static inline u8 ar9170_get_decrypt_type(struct ar9170_rx_tail *t)
+static inline u8 ar9170_get_decrypt_type(struct ar9170_rx_macstatus *t)
{
return (t->SAidx & 0xc0) >> 4 |
(t->DAidx & 0xc0) >> 6;
@@ -357,10 +363,9 @@ static inline u8 ar9170_get_decrypt_type(struct ar9170_rx_tail *t)
#define AR9170_RX_STATUS_MPDU_MASK 0x30
#define AR9170_RX_STATUS_MPDU_SINGLE 0x00
-#define AR9170_RX_STATUS_MPDU_FIRST 0x10
-#define AR9170_RX_STATUS_MPDU_MIDDLE 0x20
-#define AR9170_RX_STATUS_MPDU_LAST 0x30
-
+#define AR9170_RX_STATUS_MPDU_FIRST 0x20
+#define AR9170_RX_STATUS_MPDU_MIDDLE 0x30
+#define AR9170_RX_STATUS_MPDU_LAST 0x10
#define AR9170_RX_ERROR_RXTO 0x01
#define AR9170_RX_ERROR_OVERRUN 0x02
@@ -369,9 +374,9 @@ static inline u8 ar9170_get_decrypt_type(struct ar9170_rx_tail *t)
#define AR9170_RX_ERROR_WRONG_RA 0x10
#define AR9170_RX_ERROR_PLCP 0x20
#define AR9170_RX_ERROR_MMIC 0x40
+#define AR9170_RX_ERROR_FATAL 0x80
struct ar9170_cmd_tx_status {
- __le16 unkn;
u8 dst[ETH_ALEN];
__le32 rate;
__le16 status;
@@ -389,6 +394,7 @@ struct ar9170_cmd_ba_failed_count {
struct ar9170_cmd_response {
u8 flag;
u8 type;
+ __le16 padding;
union {
struct ar9170_cmd_tx_status tx_status;
@@ -414,4 +420,7 @@ enum ar9170_txq {
__AR9170_NUM_TXQ,
};
+#define AR9170_TXQ_DEPTH 32
+#define AR9170_TX_MAX_PENDING 128
+
#endif /* __AR9170_HW_H */
diff --git a/drivers/net/wireless/ar9170/led.c b/drivers/net/wireless/ath/ar9170/led.c
index 341cead7f60..63fda6cd210 100644
--- a/drivers/net/wireless/ar9170/led.c
+++ b/drivers/net/wireless/ath/ar9170/led.c
@@ -74,7 +74,7 @@ static void ar9170_update_leds(struct work_struct *work)
mutex_lock(&ar->mutex);
for (i = 0; i < AR9170_NUM_LEDS; i++)
- if (ar->leds[i].toggled) {
+ if (ar->leds[i].registered && ar->leds[i].toggled) {
led_val |= 1 << i;
tmp = 70 + 200 / (ar->leds[i].toggled);
@@ -101,9 +101,15 @@ static void ar9170_led_brightness_set(struct led_classdev *led,
struct ar9170_led *arl = container_of(led, struct ar9170_led, l);
struct ar9170 *ar = arl->ar;
- arl->toggled++;
+ if (unlikely(!arl->registered))
+ return ;
+
+ if (arl->last_state != !!brightness) {
+ arl->toggled++;
+ arl->last_state = !!brightness;
+ }
- if (likely(IS_ACCEPTING_CMD(ar) && brightness))
+ if (likely(IS_ACCEPTING_CMD(ar) && arl->toggled))
queue_delayed_work(ar->hw->workqueue, &ar->led_work, HZ/10);
}
@@ -136,13 +142,14 @@ void ar9170_unregister_leds(struct ar9170 *ar)
{
int i;
- cancel_delayed_work_sync(&ar->led_work);
-
for (i = 0; i < AR9170_NUM_LEDS; i++)
if (ar->leds[i].registered) {
led_classdev_unregister(&ar->leds[i].l);
ar->leds[i].registered = false;
+ ar->leds[i].toggled = 0;
}
+
+ cancel_delayed_work_sync(&ar->led_work);
}
int ar9170_register_leds(struct ar9170 *ar)
diff --git a/drivers/net/wireless/ar9170/mac.c b/drivers/net/wireless/ath/ar9170/mac.c
index c8fa3073169..d9f1f46de18 100644
--- a/drivers/net/wireless/ar9170/mac.c
+++ b/drivers/net/wireless/ath/ar9170/mac.c
@@ -38,6 +38,55 @@
#include "ar9170.h"
#include "cmd.h"
+int ar9170_set_dyn_sifs_ack(struct ar9170 *ar)
+{
+ u32 val;
+
+ if (conf_is_ht40(&ar->hw->conf))
+ val = 0x010a;
+ else {
+ if (ar->hw->conf.channel->band == IEEE80211_BAND_2GHZ)
+ val = 0x105;
+ else
+ val = 0x104;
+ }
+
+ return ar9170_write_reg(ar, AR9170_MAC_REG_DYNAMIC_SIFS_ACK, val);
+}
+
+int ar9170_set_slot_time(struct ar9170 *ar)
+{
+ u32 slottime = 20;
+
+ if (!ar->vif)
+ return 0;
+
+ if ((ar->hw->conf.channel->band == IEEE80211_BAND_5GHZ) ||
+ ar->vif->bss_conf.use_short_slot)
+ slottime = 9;
+
+ return ar9170_write_reg(ar, AR9170_MAC_REG_SLOT_TIME, slottime << 10);
+}
+
+int ar9170_set_basic_rates(struct ar9170 *ar)
+{
+ u8 cck, ofdm;
+
+ if (!ar->vif)
+ return 0;
+
+ ofdm = ar->vif->bss_conf.basic_rates >> 4;
+
+ /* FIXME: is still necessary? */
+ if (ar->hw->conf.channel->band == IEEE80211_BAND_5GHZ)
+ cck = 0;
+ else
+ cck = ar->vif->bss_conf.basic_rates & 0xf;
+
+ return ar9170_write_reg(ar, AR9170_MAC_REG_BASIC_RATE,
+ ofdm << 8 | cck);
+}
+
int ar9170_set_qos(struct ar9170 *ar)
{
ar9170_regwrite_begin(ar);
@@ -72,6 +121,24 @@ int ar9170_set_qos(struct ar9170 *ar)
return ar9170_regwrite_result();
}
+static int ar9170_set_ampdu_density(struct ar9170 *ar, u8 mpdudensity)
+{
+ u32 val;
+
+ /* don't allow AMPDU density > 8us */
+ if (mpdudensity > 6)
+ return -EINVAL;
+
+ /* Watch out! Otus uses slightly different density values. */
+ val = 0x140a00 | (mpdudensity ? (mpdudensity + 1) : 0);
+
+ ar9170_regwrite_begin(ar);
+ ar9170_regwrite(AR9170_MAC_REG_AMPDU_DENSITY, val);
+ ar9170_regwrite_finish();
+
+ return ar9170_regwrite_result();
+}
+
int ar9170_init_mac(struct ar9170 *ar)
{
ar9170_regwrite_begin(ar);
@@ -265,9 +332,9 @@ int ar9170_set_operating_mode(struct ar9170 *ar)
case NL80211_IFTYPE_ADHOC:
pm_mode |= AR9170_MAC_REG_POWERMGT_IBSS;
break;
-/* case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP:
pm_mode |= AR9170_MAC_REG_POWERMGT_AP;
- break;*/
+ break;
case NL80211_IFTYPE_WDS:
pm_mode |= AR9170_MAC_REG_POWERMGT_AP_WDS;
break;
@@ -296,6 +363,11 @@ int ar9170_set_operating_mode(struct ar9170 *ar)
if (err)
return err;
+ /* set AMPDU density to 8us. */
+ err = ar9170_set_ampdu_density(ar, 6);
+ if (err)
+ return err;
+
ar9170_regwrite_begin(ar);
ar9170_regwrite(AR9170_MAC_REG_POWERMANAGEMENT, pm_mode);
@@ -316,9 +388,9 @@ int ar9170_set_beacon_timers(struct ar9170 *ar)
u32 v = 0;
u32 pretbtt = 0;
- v |= ar->hw->conf.beacon_int;
-
if (ar->vif) {
+ v |= ar->vif->bss_conf.beacon_int;
+
switch (ar->vif->type) {
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_ADHOC:
@@ -326,7 +398,7 @@ int ar9170_set_beacon_timers(struct ar9170 *ar)
break;
case NL80211_IFTYPE_AP:
v |= BIT(24);
- pretbtt = (ar->hw->conf.beacon_int - 6) << 16;
+ pretbtt = (ar->vif->bss_conf.beacon_int - 6) << 16;
break;
default:
break;
@@ -375,10 +447,10 @@ int ar9170_update_beacon(struct ar9170 *ar)
/* XXX: use skb->cb info */
if (ar->hw->conf.channel->band == IEEE80211_BAND_2GHZ)
ar9170_regwrite(AR9170_MAC_REG_BCN_PLCP,
- ((skb->len + 4) << (3+16)) + 0x0400);
+ ((skb->len + 4) << (3 + 16)) + 0x0400);
else
ar9170_regwrite(AR9170_MAC_REG_BCN_PLCP,
- ((skb->len + 4) << (3+16)) + 0x0400);
+ ((skb->len + 4) << 16) + 0x001b);
ar9170_regwrite(AR9170_MAC_REG_BCN_LENGTH, skb->len + 4);
ar9170_regwrite(AR9170_MAC_REG_BCN_ADDR, AR9170_BEACON_BUFFER_ADDRESS);
diff --git a/drivers/net/wireless/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index 5996ff9f7f4..9d38cf60a0d 100644
--- a/drivers/net/wireless/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -142,73 +142,153 @@ static struct ieee80211_channel ar9170_5ghz_chantable[] = {
};
#undef CHAN
+#define AR9170_HT_CAP \
+{ \
+ .ht_supported = true, \
+ .cap = IEEE80211_HT_CAP_MAX_AMSDU | \
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
+ IEEE80211_HT_CAP_SGI_40 | \
+ IEEE80211_HT_CAP_DSSSCCK40 | \
+ IEEE80211_HT_CAP_SM_PS, \
+ .ampdu_factor = 3, \
+ .ampdu_density = 6, \
+ .mcs = { \
+ .rx_mask = { 0xFF, 0xFF, 0, 0, 0, 0, 0, 0, 0, 0, }, \
+ }, \
+}
+
static struct ieee80211_supported_band ar9170_band_2GHz = {
.channels = ar9170_2ghz_chantable,
.n_channels = ARRAY_SIZE(ar9170_2ghz_chantable),
.bitrates = ar9170_g_ratetable,
.n_bitrates = ar9170_g_ratetable_size,
+ .ht_cap = AR9170_HT_CAP,
};
-#ifdef AR9170_QUEUE_DEBUG
-/*
- * In case some wants works with AR9170's crazy tx_status queueing techniques.
- * He might need this rather useful probing function.
- *
- * NOTE: caller must hold the queue's spinlock!
- */
+static struct ieee80211_supported_band ar9170_band_5GHz = {
+ .channels = ar9170_5ghz_chantable,
+ .n_channels = ARRAY_SIZE(ar9170_5ghz_chantable),
+ .bitrates = ar9170_a_ratetable,
+ .n_bitrates = ar9170_a_ratetable_size,
+ .ht_cap = AR9170_HT_CAP,
+};
+static void ar9170_tx(struct ar9170 *ar);
+
+#ifdef AR9170_QUEUE_DEBUG
static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb)
{
struct ar9170_tx_control *txc = (void *) skb->data;
- struct ieee80211_hdr *hdr = (void *)txc->frame_data;
+ struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
+ struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data;
+ struct ieee80211_hdr *hdr = (void *) txc->frame_data;
- printk(KERN_DEBUG "%s: => FRAME [skb:%p, queue:%d, DA:[%pM] "
- "mac_control:%04x, phy_control:%08x]\n",
+ printk(KERN_DEBUG "%s: => FRAME [skb:%p, q:%d, DA:[%pM] flags:%x "
+ "mac_ctrl:%04x, phy_ctrl:%08x, timeout:[%d ms]]\n",
wiphy_name(ar->hw->wiphy), skb, skb_get_queue_mapping(skb),
- ieee80211_get_DA(hdr), le16_to_cpu(txc->mac_control),
- le32_to_cpu(txc->phy_control));
+ ieee80211_get_DA(hdr), arinfo->flags,
+ le16_to_cpu(txc->mac_control), le32_to_cpu(txc->phy_control),
+ jiffies_to_msecs(arinfo->timeout - jiffies));
}
-static void ar9170_dump_station_tx_status_queue(struct ar9170 *ar,
- struct sk_buff_head *queue)
+static void __ar9170_dump_txqueue(struct ar9170 *ar,
+ struct sk_buff_head *queue)
{
struct sk_buff *skb;
int i = 0;
printk(KERN_DEBUG "---[ cut here ]---\n");
- printk(KERN_DEBUG "%s: %d entries in tx_status queue.\n",
+ printk(KERN_DEBUG "%s: %d entries in queue.\n",
wiphy_name(ar->hw->wiphy), skb_queue_len(queue));
skb_queue_walk(queue, skb) {
- struct ar9170_tx_control *txc = (void *) skb->data;
- struct ieee80211_hdr *hdr = (void *)txc->frame_data;
-
- printk(KERN_DEBUG "index:%d => \n", i);
+ printk(KERN_DEBUG "index:%d => \n", i++);
ar9170_print_txheader(ar, skb);
}
+ if (i != skb_queue_len(queue))
+ printk(KERN_DEBUG "WARNING: queue frame counter "
+ "mismatch %d != %d\n", skb_queue_len(queue), i);
printk(KERN_DEBUG "---[ end ]---\n");
}
-#endif /* AR9170_QUEUE_DEBUG */
-static struct ieee80211_supported_band ar9170_band_5GHz = {
- .channels = ar9170_5ghz_chantable,
- .n_channels = ARRAY_SIZE(ar9170_5ghz_chantable),
- .bitrates = ar9170_a_ratetable,
- .n_bitrates = ar9170_a_ratetable_size,
-};
+static void ar9170_dump_txqueue(struct ar9170 *ar,
+ struct sk_buff_head *queue)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->lock, flags);
+ __ar9170_dump_txqueue(ar, queue);
+ spin_unlock_irqrestore(&queue->lock, flags);
+}
-void ar9170_handle_tx_status(struct ar9170 *ar, struct sk_buff *skb,
- bool valid_status, u16 tx_status)
+static void __ar9170_dump_txstats(struct ar9170 *ar)
+{
+ int i;
+
+ printk(KERN_DEBUG "%s: QoS queue stats\n",
+ wiphy_name(ar->hw->wiphy));
+
+ for (i = 0; i < __AR9170_NUM_TXQ; i++)
+ printk(KERN_DEBUG "%s: queue:%d limit:%d len:%d waitack:%d\n",
+ wiphy_name(ar->hw->wiphy), i, ar->tx_stats[i].limit,
+ ar->tx_stats[i].len, skb_queue_len(&ar->tx_status[i]));
+}
+
+static void ar9170_dump_txstats(struct ar9170 *ar)
{
- struct ieee80211_tx_info *txinfo;
- unsigned int retries = 0, queue = skb_get_queue_mapping(skb);
unsigned long flags;
spin_lock_irqsave(&ar->tx_stats_lock, flags);
- ar->tx_stats[queue].len--;
- if (ieee80211_queue_stopped(ar->hw, queue))
- ieee80211_wake_queue(ar->hw, queue);
+ __ar9170_dump_txstats(ar);
spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
+}
+#endif /* AR9170_QUEUE_DEBUG */
+
+/* caller must guarantee exclusive access for _bin_ queue. */
+static void ar9170_recycle_expired(struct ar9170 *ar,
+ struct sk_buff_head *queue,
+ struct sk_buff_head *bin)
+{
+ struct sk_buff *skb, *old = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->lock, flags);
+ while ((skb = skb_peek(queue))) {
+ struct ieee80211_tx_info *txinfo;
+ struct ar9170_tx_info *arinfo;
+
+ txinfo = IEEE80211_SKB_CB(skb);
+ arinfo = (void *) txinfo->rate_driver_data;
+
+ if (time_is_before_jiffies(arinfo->timeout)) {
+#ifdef AR9170_QUEUE_DEBUG
+ printk(KERN_DEBUG "%s: [%ld > %ld] frame expired => "
+ "recycle \n", wiphy_name(ar->hw->wiphy),
+ jiffies, arinfo->timeout);
+ ar9170_print_txheader(ar, skb);
+#endif /* AR9170_QUEUE_DEBUG */
+ __skb_unlink(skb, queue);
+ __skb_queue_tail(bin, skb);
+ } else {
+ break;
+ }
+
+ if (unlikely(old == skb)) {
+ /* bail out - queue is shot. */
+
+ WARN_ON(1);
+ break;
+ }
+ old = skb;
+ }
+ spin_unlock_irqrestore(&queue->lock, flags);
+}
+
+static void ar9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
+ u16 tx_status)
+{
+ struct ieee80211_tx_info *txinfo;
+ unsigned int retries = 0;
txinfo = IEEE80211_SKB_CB(skb);
ieee80211_tx_info_clear_status(txinfo);
@@ -230,45 +310,61 @@ void ar9170_handle_tx_status(struct ar9170 *ar, struct sk_buff *skb,
break;
}
- if (valid_status)
- txinfo->status.rates[0].count = retries + 1;
-
+ txinfo->status.rates[0].count = retries + 1;
skb_pull(skb, sizeof(struct ar9170_tx_control));
ieee80211_tx_status_irqsafe(ar->hw, skb);
}
-static struct sk_buff *ar9170_find_skb_in_queue(struct ar9170 *ar,
- const u8 *mac,
- const u32 queue,
- struct sk_buff_head *q)
+void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ar9170_tx_info *arinfo = (void *) info->rate_driver_data;
+ unsigned int queue = skb_get_queue_mapping(skb);
unsigned long flags;
- struct sk_buff *skb;
- spin_lock_irqsave(&q->lock, flags);
- skb_queue_walk(q, skb) {
- struct ar9170_tx_control *txc = (void *) skb->data;
- struct ieee80211_hdr *hdr = (void *) txc->frame_data;
- u32 txc_queue = (le32_to_cpu(txc->phy_control) &
- AR9170_TX_PHY_QOS_MASK) >>
- AR9170_TX_PHY_QOS_SHIFT;
+ spin_lock_irqsave(&ar->tx_stats_lock, flags);
+ ar->tx_stats[queue].len--;
- if ((queue != txc_queue) ||
- (compare_ether_addr(ieee80211_get_DA(hdr), mac)))
- continue;
+ if (skb_queue_empty(&ar->tx_pending[queue])) {
+#ifdef AR9170_QUEUE_STOP_DEBUG
+ printk(KERN_DEBUG "%s: wake queue %d\n",
+ wiphy_name(ar->hw->wiphy), queue);
+ __ar9170_dump_txstats(ar);
+#endif /* AR9170_QUEUE_STOP_DEBUG */
+ ieee80211_wake_queue(ar->hw, queue);
+ }
+ spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
- __skb_unlink(skb, q);
- spin_unlock_irqrestore(&q->lock, flags);
- return skb;
+ if (arinfo->flags & AR9170_TX_FLAG_BLOCK_ACK) {
+ dev_kfree_skb_any(skb);
+ } else if (arinfo->flags & AR9170_TX_FLAG_WAIT_FOR_ACK) {
+ arinfo->timeout = jiffies +
+ msecs_to_jiffies(AR9170_TX_TIMEOUT);
+
+ skb_queue_tail(&ar->tx_status[queue], skb);
+ } else if (arinfo->flags & AR9170_TX_FLAG_NO_ACK) {
+ ar9170_tx_status(ar, skb, AR9170_TX_STATUS_FAILED);
+ } else {
+#ifdef AR9170_QUEUE_DEBUG
+ printk(KERN_DEBUG "%s: unsupported frame flags!\n",
+ wiphy_name(ar->hw->wiphy));
+ ar9170_print_txheader(ar, skb);
+#endif /* AR9170_QUEUE_DEBUG */
+ dev_kfree_skb_any(skb);
+ }
+
+ if (!ar->tx_stats[queue].len &&
+ !skb_queue_empty(&ar->tx_pending[queue])) {
+ ar9170_tx(ar);
}
- spin_unlock_irqrestore(&q->lock, flags);
- return NULL;
}
-static struct sk_buff *ar9170_find_queued_skb(struct ar9170 *ar, const u8 *mac,
- const u32 queue)
+static struct sk_buff *ar9170_get_queued_skb(struct ar9170 *ar,
+ const u8 *mac,
+ struct sk_buff_head *queue,
+ const u32 rate)
{
- struct ieee80211_sta *sta;
+ unsigned long flags;
struct sk_buff *skb;
/*
@@ -279,85 +375,94 @@ static struct sk_buff *ar9170_find_queued_skb(struct ar9170 *ar, const u8 *mac,
* the firmware provided (-> destination MAC, and phy_control) -
* and hope that we picked the right one...
*/
- rcu_read_lock();
- sta = ieee80211_find_sta(ar->hw, mac);
-
- if (likely(sta)) {
- struct ar9170_sta_info *sta_priv = (void *) sta->drv_priv;
- skb = skb_dequeue(&sta_priv->tx_status[queue]);
- rcu_read_unlock();
- if (likely(skb))
- return skb;
- } else
- rcu_read_unlock();
-
- /* scan the waste queue for candidates */
- skb = ar9170_find_skb_in_queue(ar, mac, queue,
- &ar->global_tx_status_waste);
- if (!skb) {
- /* so it still _must_ be in the global list. */
- skb = ar9170_find_skb_in_queue(ar, mac, queue,
- &ar->global_tx_status);
- }
+ spin_lock_irqsave(&queue->lock, flags);
+ skb_queue_walk(queue, skb) {
+ struct ar9170_tx_control *txc = (void *) skb->data;
+ struct ieee80211_hdr *hdr = (void *) txc->frame_data;
+ u32 r;
+
+ if (mac && compare_ether_addr(ieee80211_get_DA(hdr), mac)) {
+#ifdef AR9170_QUEUE_DEBUG
+ printk(KERN_DEBUG "%s: skip frame => DA %pM != %pM\n",
+ wiphy_name(ar->hw->wiphy), mac,
+ ieee80211_get_DA(hdr));
+ ar9170_print_txheader(ar, skb);
+#endif /* AR9170_QUEUE_DEBUG */
+ continue;
+ }
+
+ r = (le32_to_cpu(txc->phy_control) & AR9170_TX_PHY_MCS_MASK) >>
+ AR9170_TX_PHY_MCS_SHIFT;
+
+ if ((rate != AR9170_TX_INVALID_RATE) && (r != rate)) {
#ifdef AR9170_QUEUE_DEBUG
- if (unlikely((!skb) && net_ratelimit())) {
- printk(KERN_ERR "%s: ESS:[%pM] does not have any "
- "outstanding frames in this queue (%d).\n",
- wiphy_name(ar->hw->wiphy), mac, queue);
+ printk(KERN_DEBUG "%s: skip frame => rate %d != %d\n",
+ wiphy_name(ar->hw->wiphy), rate, r);
+ ar9170_print_txheader(ar, skb);
+#endif /* AR9170_QUEUE_DEBUG */
+ continue;
+ }
+
+ __skb_unlink(skb, queue);
+ spin_unlock_irqrestore(&queue->lock, flags);
+ return skb;
}
+
+#ifdef AR9170_QUEUE_DEBUG
+ printk(KERN_ERR "%s: ESS:[%pM] does not have any "
+ "outstanding frames in queue.\n",
+ wiphy_name(ar->hw->wiphy), mac);
+ __ar9170_dump_txqueue(ar, queue);
#endif /* AR9170_QUEUE_DEBUG */
- return skb;
+ spin_unlock_irqrestore(&queue->lock, flags);
+
+ return NULL;
}
/*
- * This worker tries to keep the global tx_status queue empty.
- * So we can guarantee that incoming tx_status reports for
- * unregistered stations are always synced with the actual
- * frame - which we think - belongs to.
+ * This worker tries to keeps an maintain tx_status queues.
+ * So we can guarantee that incoming tx_status reports are
+ * actually for a pending frame.
*/
-static void ar9170_tx_status_janitor(struct work_struct *work)
+static void ar9170_tx_janitor(struct work_struct *work)
{
struct ar9170 *ar = container_of(work, struct ar9170,
- tx_status_janitor.work);
- struct sk_buff *skb;
+ tx_janitor.work);
+ struct sk_buff_head waste;
+ unsigned int i;
+ bool resched = false;
if (unlikely(!IS_STARTED(ar)))
return ;
- mutex_lock(&ar->mutex);
- /* recycle the garbage back to mac80211... one by one. */
- while ((skb = skb_dequeue(&ar->global_tx_status_waste))) {
+ skb_queue_head_init(&waste);
+
+ for (i = 0; i < __AR9170_NUM_TXQ; i++) {
#ifdef AR9170_QUEUE_DEBUG
- printk(KERN_DEBUG "%s: dispose queued frame =>\n",
- wiphy_name(ar->hw->wiphy));
- ar9170_print_txheader(ar, skb);
+ printk(KERN_DEBUG "%s: garbage collector scans queue:%d\n",
+ wiphy_name(ar->hw->wiphy), i);
+ ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
+ ar9170_dump_txqueue(ar, &ar->tx_status[i]);
#endif /* AR9170_QUEUE_DEBUG */
- ar9170_handle_tx_status(ar, skb, false,
- AR9170_TX_STATUS_FAILED);
- }
- while ((skb = skb_dequeue(&ar->global_tx_status))) {
-#ifdef AR9170_QUEUE_DEBUG
- printk(KERN_DEBUG "%s: moving frame into waste queue =>\n",
- wiphy_name(ar->hw->wiphy));
+ ar9170_recycle_expired(ar, &ar->tx_status[i], &waste);
+ ar9170_recycle_expired(ar, &ar->tx_pending[i], &waste);
+ skb_queue_purge(&waste);
- ar9170_print_txheader(ar, skb);
-#endif /* AR9170_QUEUE_DEBUG */
- skb_queue_tail(&ar->global_tx_status_waste, skb);
+ if (!skb_queue_empty(&ar->tx_status[i]) ||
+ !skb_queue_empty(&ar->tx_pending[i]))
+ resched = true;
}
- /* recall the janitor in 100ms - if there's garbage in the can. */
- if (skb_queue_len(&ar->global_tx_status_waste) > 0)
- queue_delayed_work(ar->hw->workqueue, &ar->tx_status_janitor,
- msecs_to_jiffies(100));
-
- mutex_unlock(&ar->mutex);
+ if (resched)
+ queue_delayed_work(ar->hw->workqueue,
+ &ar->tx_janitor,
+ msecs_to_jiffies(AR9170_JANITOR_DELAY));
}
-static void ar9170_handle_command_response(struct ar9170 *ar,
- void *buf, u32 len)
+void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
{
struct ar9170_cmd_response *cmd = (void *) buf;
@@ -381,15 +486,21 @@ static void ar9170_handle_command_response(struct ar9170 *ar,
*/
struct sk_buff *skb;
- u32 queue = (le32_to_cpu(cmd->tx_status.rate) &
- AR9170_TX_PHY_QOS_MASK) >> AR9170_TX_PHY_QOS_SHIFT;
+ u32 phy = le32_to_cpu(cmd->tx_status.rate);
+ u32 q = (phy & AR9170_TX_PHY_QOS_MASK) >>
+ AR9170_TX_PHY_QOS_SHIFT;
+#ifdef AR9170_QUEUE_DEBUG
+ printk(KERN_DEBUG "%s: recv tx_status for %pM, p:%08x, q:%d\n",
+ wiphy_name(ar->hw->wiphy), cmd->tx_status.dst, phy, q);
+#endif /* AR9170_QUEUE_DEBUG */
- skb = ar9170_find_queued_skb(ar, cmd->tx_status.dst, queue);
+ skb = ar9170_get_queued_skb(ar, cmd->tx_status.dst,
+ &ar->tx_status[q],
+ AR9170_TX_INVALID_RATE);
if (unlikely(!skb))
return ;
- ar9170_handle_tx_status(ar, skb, true,
- le16_to_cpu(cmd->tx_status.status));
+ ar9170_tx_status(ar, skb, le16_to_cpu(cmd->tx_status.status));
break;
}
@@ -429,6 +540,38 @@ static void ar9170_handle_command_response(struct ar9170 *ar,
/* retransmission issue / SIFS/EIFS collision ?! */
break;
+ /* firmware debug */
+ case 0xca:
+ printk(KERN_DEBUG "ar9170 FW: %.*s\n", len - 4, (char *)buf + 4);
+ break;
+ case 0xcb:
+ len -= 4;
+
+ switch (len) {
+ case 1:
+ printk(KERN_DEBUG "ar9170 FW: u8: %#.2x\n",
+ *((char *)buf + 4));
+ break;
+ case 2:
+ printk(KERN_DEBUG "ar9170 FW: u8: %#.4x\n",
+ le16_to_cpup((__le16 *)((char *)buf + 4)));
+ break;
+ case 4:
+ printk(KERN_DEBUG "ar9170 FW: u8: %#.8x\n",
+ le32_to_cpup((__le32 *)((char *)buf + 4)));
+ break;
+ case 8:
+ printk(KERN_DEBUG "ar9170 FW: u8: %#.16lx\n",
+ (unsigned long)le64_to_cpup(
+ (__le64 *)((char *)buf + 4)));
+ break;
+ }
+ break;
+ case 0xcc:
+ print_hex_dump_bytes("ar9170 FW:", DUMP_PREFIX_NONE,
+ (char *)buf + 4, len - 4);
+ break;
+
default:
printk(KERN_INFO "received unhandled event %x\n", cmd->type);
print_hex_dump_bytes("dump:", DUMP_PREFIX_NONE, buf, len);
@@ -436,214 +579,430 @@ static void ar9170_handle_command_response(struct ar9170 *ar,
}
}
-/*
- * If the frame alignment is right (or the kernel has
- * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS), and there
- * is only a single MPDU in the USB frame, then we can
- * submit to mac80211 the SKB directly. However, since
- * there may be multiple packets in one SKB in stream
- * mode, and we need to observe the proper ordering,
- * this is non-trivial.
- */
-static void ar9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
+static void ar9170_rx_reset_rx_mpdu(struct ar9170 *ar)
{
- struct sk_buff *skb;
- struct ar9170_rx_head *head = (void *)buf;
- struct ar9170_rx_tail *tail;
- struct ieee80211_rx_status status;
- int mpdu_len, i;
- u8 error, antennas = 0, decrypt;
- __le16 fc;
- int reserved;
+ memset(&ar->rx_mpdu.plcp, 0, sizeof(struct ar9170_rx_head));
+ ar->rx_mpdu.has_plcp = false;
+}
- if (unlikely(!IS_STARTED(ar)))
- return ;
+int ar9170_nag_limiter(struct ar9170 *ar)
+{
+ bool print_message;
+
+ /*
+ * we expect all sorts of errors in promiscuous mode.
+ * don't bother with it, it's OK!
+ */
+ if (ar->sniffer_enabled)
+ return false;
+
+ /*
+ * only go for frequent errors! The hardware tends to
+ * do some stupid thing once in a while under load, in
+ * noisy environments or just for fun!
+ */
+ if (time_before(jiffies, ar->bad_hw_nagger) && net_ratelimit())
+ print_message = true;
+ else
+ print_message = false;
+
+ /* reset threshold for "once in a while" */
+ ar->bad_hw_nagger = jiffies + HZ / 4;
+ return print_message;
+}
+
+static int ar9170_rx_mac_status(struct ar9170 *ar,
+ struct ar9170_rx_head *head,
+ struct ar9170_rx_macstatus *mac,
+ struct ieee80211_rx_status *status)
+{
+ u8 error, decrypt;
- /* Received MPDU */
- mpdu_len = len;
- mpdu_len -= sizeof(struct ar9170_rx_head);
- mpdu_len -= sizeof(struct ar9170_rx_tail);
BUILD_BUG_ON(sizeof(struct ar9170_rx_head) != 12);
- BUILD_BUG_ON(sizeof(struct ar9170_rx_tail) != 24);
+ BUILD_BUG_ON(sizeof(struct ar9170_rx_macstatus) != 4);
- if (mpdu_len <= FCS_LEN)
- return;
+ error = mac->error;
+ if (error & AR9170_RX_ERROR_MMIC) {
+ status->flag |= RX_FLAG_MMIC_ERROR;
+ error &= ~AR9170_RX_ERROR_MMIC;
+ }
- tail = (void *)(buf + sizeof(struct ar9170_rx_head) + mpdu_len);
+ if (error & AR9170_RX_ERROR_PLCP) {
+ status->flag |= RX_FLAG_FAILED_PLCP_CRC;
+ error &= ~AR9170_RX_ERROR_PLCP;
- for (i = 0; i < 3; i++)
- if (tail->rssi[i] != 0x80)
- antennas |= BIT(i);
+ if (!(ar->filter_state & FIF_PLCPFAIL))
+ return -EINVAL;
+ }
- /* post-process RSSI */
- for (i = 0; i < 7; i++)
- if (tail->rssi[i] & 0x80)
- tail->rssi[i] = ((tail->rssi[i] & 0x7f) + 1) & 0x7f;
+ if (error & AR9170_RX_ERROR_FCS) {
+ status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ error &= ~AR9170_RX_ERROR_FCS;
- memset(&status, 0, sizeof(status));
+ if (!(ar->filter_state & FIF_FCSFAIL))
+ return -EINVAL;
+ }
+
+ decrypt = ar9170_get_decrypt_type(mac);
+ if (!(decrypt & AR9170_RX_ENC_SOFTWARE) &&
+ decrypt != AR9170_ENC_ALG_NONE)
+ status->flag |= RX_FLAG_DECRYPTED;
+
+ /* ignore wrong RA errors */
+ error &= ~AR9170_RX_ERROR_WRONG_RA;
+
+ if (error & AR9170_RX_ERROR_DECRYPT) {
+ error &= ~AR9170_RX_ERROR_DECRYPT;
+ /*
+ * Rx decryption is done in place,
+ * the original data is lost anyway.
+ */
+
+ return -EINVAL;
+ }
+
+ /* drop any other error frames */
+ if (unlikely(error)) {
+ /* TODO: update netdevice's RX dropped/errors statistics */
+
+ if (ar9170_nag_limiter(ar))
+ printk(KERN_DEBUG "%s: received frame with "
+ "suspicious error code (%#x).\n",
+ wiphy_name(ar->hw->wiphy), error);
+
+ return -EINVAL;
+ }
- status.band = ar->channel->band;
- status.freq = ar->channel->center_freq;
- status.signal = ar->noise[0] + tail->rssi_combined;
- status.noise = ar->noise[0];
- status.antenna = antennas;
+ status->band = ar->channel->band;
+ status->freq = ar->channel->center_freq;
- switch (tail->status & AR9170_RX_STATUS_MODULATION_MASK) {
+ switch (mac->status & AR9170_RX_STATUS_MODULATION_MASK) {
case AR9170_RX_STATUS_MODULATION_CCK:
- if (tail->status & AR9170_RX_STATUS_SHORT_PREAMBLE)
- status.flag |= RX_FLAG_SHORTPRE;
+ if (mac->status & AR9170_RX_STATUS_SHORT_PREAMBLE)
+ status->flag |= RX_FLAG_SHORTPRE;
switch (head->plcp[0]) {
case 0x0a:
- status.rate_idx = 0;
+ status->rate_idx = 0;
break;
case 0x14:
- status.rate_idx = 1;
+ status->rate_idx = 1;
break;
case 0x37:
- status.rate_idx = 2;
+ status->rate_idx = 2;
break;
case 0x6e:
- status.rate_idx = 3;
+ status->rate_idx = 3;
break;
default:
- if ((!ar->sniffer_enabled) && (net_ratelimit()))
+ if (ar9170_nag_limiter(ar))
printk(KERN_ERR "%s: invalid plcp cck rate "
"(%x).\n", wiphy_name(ar->hw->wiphy),
head->plcp[0]);
- return;
+ return -EINVAL;
}
break;
+
case AR9170_RX_STATUS_MODULATION_OFDM:
- switch (head->plcp[0] & 0xF) {
- case 0xB:
- status.rate_idx = 0;
+ switch (head->plcp[0] & 0xf) {
+ case 0xb:
+ status->rate_idx = 0;
break;
- case 0xF:
- status.rate_idx = 1;
+ case 0xf:
+ status->rate_idx = 1;
break;
- case 0xA:
- status.rate_idx = 2;
+ case 0xa:
+ status->rate_idx = 2;
break;
- case 0xE:
- status.rate_idx = 3;
+ case 0xe:
+ status->rate_idx = 3;
break;
case 0x9:
- status.rate_idx = 4;
+ status->rate_idx = 4;
break;
- case 0xD:
- status.rate_idx = 5;
+ case 0xd:
+ status->rate_idx = 5;
break;
case 0x8:
- status.rate_idx = 6;
+ status->rate_idx = 6;
break;
- case 0xC:
- status.rate_idx = 7;
+ case 0xc:
+ status->rate_idx = 7;
break;
default:
- if ((!ar->sniffer_enabled) && (net_ratelimit()))
+ if (ar9170_nag_limiter(ar))
printk(KERN_ERR "%s: invalid plcp ofdm rate "
"(%x).\n", wiphy_name(ar->hw->wiphy),
head->plcp[0]);
- return;
+ return -EINVAL;
}
- if (status.band == IEEE80211_BAND_2GHZ)
- status.rate_idx += 4;
+ if (status->band == IEEE80211_BAND_2GHZ)
+ status->rate_idx += 4;
break;
+
case AR9170_RX_STATUS_MODULATION_HT:
+ if (head->plcp[3] & 0x80)
+ status->flag |= RX_FLAG_40MHZ;
+ if (head->plcp[6] & 0x80)
+ status->flag |= RX_FLAG_SHORT_GI;
+
+ status->rate_idx = clamp(0, 75, head->plcp[6] & 0x7f);
+ status->flag |= RX_FLAG_HT;
+ break;
+
case AR9170_RX_STATUS_MODULATION_DUPOFDM:
/* XXX */
-
- if (net_ratelimit())
+ if (ar9170_nag_limiter(ar))
printk(KERN_ERR "%s: invalid modulation\n",
wiphy_name(ar->hw->wiphy));
- return;
+ return -EINVAL;
}
- error = tail->error;
+ return 0;
+}
+
+static void ar9170_rx_phy_status(struct ar9170 *ar,
+ struct ar9170_rx_phystatus *phy,
+ struct ieee80211_rx_status *status)
+{
+ int i;
- if (error & AR9170_RX_ERROR_MMIC) {
- status.flag |= RX_FLAG_MMIC_ERROR;
- error &= ~AR9170_RX_ERROR_MMIC;
- }
+ BUILD_BUG_ON(sizeof(struct ar9170_rx_phystatus) != 20);
- if (error & AR9170_RX_ERROR_PLCP) {
- status.flag |= RX_FLAG_FAILED_PLCP_CRC;
- error &= ~AR9170_RX_ERROR_PLCP;
+ for (i = 0; i < 3; i++)
+ if (phy->rssi[i] != 0x80)
+ status->antenna |= BIT(i);
+
+ /* post-process RSSI */
+ for (i = 0; i < 7; i++)
+ if (phy->rssi[i] & 0x80)
+ phy->rssi[i] = ((phy->rssi[i] & 0x7f) + 1) & 0x7f;
+
+ /* TODO: we could do something with phy_errors */
+ status->signal = ar->noise[0] + phy->rssi_combined;
+ status->noise = ar->noise[0];
+}
+
+static struct sk_buff *ar9170_rx_copy_data(u8 *buf, int len)
+{
+ struct sk_buff *skb;
+ int reserved = 0;
+ struct ieee80211_hdr *hdr = (void *) buf;
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ reserved += NET_IP_ALIGN;
+
+ if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
+ reserved += NET_IP_ALIGN;
}
- if (error & AR9170_RX_ERROR_FCS) {
- status.flag |= RX_FLAG_FAILED_FCS_CRC;
- error &= ~AR9170_RX_ERROR_FCS;
+ if (ieee80211_has_a4(hdr->frame_control))
+ reserved += NET_IP_ALIGN;
+
+ reserved = 32 + (reserved & NET_IP_ALIGN);
+
+ skb = dev_alloc_skb(len + reserved);
+ if (likely(skb)) {
+ skb_reserve(skb, reserved);
+ memcpy(skb_put(skb, len), buf, len);
}
- decrypt = ar9170_get_decrypt_type(tail);
- if (!(decrypt & AR9170_RX_ENC_SOFTWARE) &&
- decrypt != AR9170_ENC_ALG_NONE)
- status.flag |= RX_FLAG_DECRYPTED;
+ return skb;
+}
- /* ignore wrong RA errors */
- error &= ~AR9170_RX_ERROR_WRONG_RA;
+/*
+ * If the frame alignment is right (or the kernel has
+ * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS), and there
+ * is only a single MPDU in the USB frame, then we could
+ * submit to mac80211 the SKB directly. However, since
+ * there may be multiple packets in one SKB in stream
+ * mode, and we need to observe the proper ordering,
+ * this is non-trivial.
+ */
- if (error & AR9170_RX_ERROR_DECRYPT) {
- error &= ~AR9170_RX_ERROR_DECRYPT;
+static void ar9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
+{
+ struct ar9170_rx_head *head;
+ struct ar9170_rx_macstatus *mac;
+ struct ar9170_rx_phystatus *phy = NULL;
+ struct ieee80211_rx_status status;
+ struct sk_buff *skb;
+ int mpdu_len;
+
+ if (unlikely(!IS_STARTED(ar) || len < (sizeof(*mac))))
+ return ;
+
+ /* Received MPDU */
+ mpdu_len = len - sizeof(*mac);
+
+ mac = (void *)(buf + mpdu_len);
+ if (unlikely(mac->error & AR9170_RX_ERROR_FATAL)) {
+ /* this frame is too damaged and can't be used - drop it */
- /*
- * Rx decryption is done in place,
- * the original data is lost anyway.
- */
return ;
}
- /* drop any other error frames */
- if ((error) && (net_ratelimit())) {
- printk(KERN_DEBUG "%s: errors: %#x\n",
- wiphy_name(ar->hw->wiphy), error);
- return;
+ switch (mac->status & AR9170_RX_STATUS_MPDU_MASK) {
+ case AR9170_RX_STATUS_MPDU_FIRST:
+ /* first mpdu packet has the plcp header */
+ if (likely(mpdu_len >= sizeof(struct ar9170_rx_head))) {
+ head = (void *) buf;
+ memcpy(&ar->rx_mpdu.plcp, (void *) buf,
+ sizeof(struct ar9170_rx_head));
+
+ mpdu_len -= sizeof(struct ar9170_rx_head);
+ buf += sizeof(struct ar9170_rx_head);
+ ar->rx_mpdu.has_plcp = true;
+ } else {
+ if (ar9170_nag_limiter(ar))
+ printk(KERN_ERR "%s: plcp info is clipped.\n",
+ wiphy_name(ar->hw->wiphy));
+ return ;
+ }
+ break;
+
+ case AR9170_RX_STATUS_MPDU_LAST:
+ /* last mpdu has a extra tail with phy status information */
+
+ if (likely(mpdu_len >= sizeof(struct ar9170_rx_phystatus))) {
+ mpdu_len -= sizeof(struct ar9170_rx_phystatus);
+ phy = (void *)(buf + mpdu_len);
+ } else {
+ if (ar9170_nag_limiter(ar))
+ printk(KERN_ERR "%s: frame tail is clipped.\n",
+ wiphy_name(ar->hw->wiphy));
+ return ;
+ }
+
+ case AR9170_RX_STATUS_MPDU_MIDDLE:
+ /* middle mpdus are just data */
+ if (unlikely(!ar->rx_mpdu.has_plcp)) {
+ if (!ar9170_nag_limiter(ar))
+ return ;
+
+ printk(KERN_ERR "%s: rx stream did not start "
+ "with a first_mpdu frame tag.\n",
+ wiphy_name(ar->hw->wiphy));
+
+ return ;
+ }
+
+ head = &ar->rx_mpdu.plcp;
+ break;
+
+ case AR9170_RX_STATUS_MPDU_SINGLE:
+ /* single mpdu - has plcp (head) and phy status (tail) */
+ head = (void *) buf;
+
+ mpdu_len -= sizeof(struct ar9170_rx_head);
+ mpdu_len -= sizeof(struct ar9170_rx_phystatus);
+
+ buf += sizeof(struct ar9170_rx_head);
+ phy = (void *)(buf + mpdu_len);
+ break;
+
+ default:
+ BUG_ON(1);
+ break;
}
- buf += sizeof(struct ar9170_rx_head);
- fc = *(__le16 *)buf;
+ if (unlikely(mpdu_len < FCS_LEN))
+ return ;
- if (ieee80211_is_data_qos(fc) ^ ieee80211_has_a4(fc))
- reserved = 32 + 2;
- else
- reserved = 32;
+ memset(&status, 0, sizeof(status));
+ if (unlikely(ar9170_rx_mac_status(ar, head, mac, &status)))
+ return ;
- skb = dev_alloc_skb(mpdu_len + reserved);
- if (!skb)
- return;
+ if (phy)
+ ar9170_rx_phy_status(ar, phy, &status);
- skb_reserve(skb, reserved);
- memcpy(skb_put(skb, mpdu_len), buf, mpdu_len);
- ieee80211_rx_irqsafe(ar->hw, skb, &status);
+ skb = ar9170_rx_copy_data(buf, mpdu_len);
+ if (likely(skb))
+ ieee80211_rx_irqsafe(ar->hw, skb, &status);
}
void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb)
{
- unsigned int i, tlen, resplen;
+ unsigned int i, tlen, resplen, wlen = 0, clen = 0;
u8 *tbuf, *respbuf;
tbuf = skb->data;
tlen = skb->len;
while (tlen >= 4) {
- int clen = tbuf[1] << 8 | tbuf[0];
- int wlen = (clen + 3) & ~3;
+ clen = tbuf[1] << 8 | tbuf[0];
+ wlen = ALIGN(clen, 4);
- /*
- * parse stream (if any)
- */
+ /* check if this is stream has a valid tag.*/
if (tbuf[2] != 0 || tbuf[3] != 0x4e) {
- printk(KERN_ERR "%s: missing tag!\n",
- wiphy_name(ar->hw->wiphy));
+ /*
+ * TODO: handle the highly unlikely event that the
+ * corrupted stream has the TAG at the right position.
+ */
+
+ /* check if the frame can be repaired. */
+ if (!ar->rx_failover_missing) {
+ /* this is no "short read". */
+ if (ar9170_nag_limiter(ar)) {
+ printk(KERN_ERR "%s: missing tag!\n",
+ wiphy_name(ar->hw->wiphy));
+ goto err_telluser;
+ } else
+ goto err_silent;
+ }
+
+ if (ar->rx_failover_missing > tlen) {
+ if (ar9170_nag_limiter(ar)) {
+ printk(KERN_ERR "%s: possible multi "
+ "stream corruption!\n",
+ wiphy_name(ar->hw->wiphy));
+ goto err_telluser;
+ } else
+ goto err_silent;
+ }
+
+ memcpy(skb_put(ar->rx_failover, tlen), tbuf, tlen);
+ ar->rx_failover_missing -= tlen;
+
+ if (ar->rx_failover_missing <= 0) {
+ /*
+ * nested ar9170_rx call!
+ * termination is guranteed, even when the
+ * combined frame also have a element with
+ * a bad tag.
+ */
+
+ ar->rx_failover_missing = 0;
+ ar9170_rx(ar, ar->rx_failover);
+
+ skb_reset_tail_pointer(ar->rx_failover);
+ skb_trim(ar->rx_failover, 0);
+ }
+
return ;
}
+
+ /* check if stream is clipped */
if (wlen > tlen - 4) {
- printk(KERN_ERR "%s: invalid RX (%d, %d, %d)\n",
- wiphy_name(ar->hw->wiphy), clen, wlen, tlen);
- print_hex_dump(KERN_DEBUG, "data: ",
- DUMP_PREFIX_OFFSET,
- 16, 1, tbuf, tlen, true);
+ if (ar->rx_failover_missing) {
+ /* TODO: handle double stream corruption. */
+ if (ar9170_nag_limiter(ar)) {
+ printk(KERN_ERR "%s: double rx stream "
+ "corruption!\n",
+ wiphy_name(ar->hw->wiphy));
+ goto err_telluser;
+ } else
+ goto err_silent;
+ }
+
+ /*
+ * save incomplete data set.
+ * the firmware will resend the missing bits when
+ * the rx - descriptor comes round again.
+ */
+
+ memcpy(skb_put(ar->rx_failover, tlen), tbuf, tlen);
+ ar->rx_failover_missing = clen - tlen;
return ;
}
resplen = clen;
@@ -668,12 +1027,44 @@ void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb)
if (i == 12)
ar9170_handle_command_response(ar, respbuf, resplen);
else
- ar9170_handle_mpdu(ar, respbuf, resplen);
+ ar9170_handle_mpdu(ar, respbuf, clen);
}
- if (tlen)
- printk(KERN_ERR "%s: buffer remains!\n",
- wiphy_name(ar->hw->wiphy));
+ if (tlen) {
+ if (net_ratelimit())
+ printk(KERN_ERR "%s: %d bytes of unprocessed "
+ "data left in rx stream!\n",
+ wiphy_name(ar->hw->wiphy), tlen);
+
+ goto err_telluser;
+ }
+
+ return ;
+
+err_telluser:
+ printk(KERN_ERR "%s: damaged RX stream data [want:%d, "
+ "data:%d, rx:%d, pending:%d ]\n",
+ wiphy_name(ar->hw->wiphy), clen, wlen, tlen,
+ ar->rx_failover_missing);
+
+ if (ar->rx_failover_missing)
+ print_hex_dump_bytes("rxbuf:", DUMP_PREFIX_OFFSET,
+ ar->rx_failover->data,
+ ar->rx_failover->len);
+
+ print_hex_dump_bytes("stream:", DUMP_PREFIX_OFFSET,
+ skb->data, skb->len);
+
+ printk(KERN_ERR "%s: please check your hardware and cables, if "
+ "you see this message frequently.\n",
+ wiphy_name(ar->hw->wiphy));
+
+err_silent:
+ if (ar->rx_failover_missing) {
+ skb_reset_tail_pointer(ar->rx_failover);
+ skb_trim(ar->rx_failover, 0);
+ ar->rx_failover_missing = 0;
+ }
}
#define AR9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop) \
@@ -691,10 +1082,12 @@ static int ar9170_op_start(struct ieee80211_hw *hw)
mutex_lock(&ar->mutex);
+ ar->filter_changed = 0;
+
/* reinitialize queues statistics */
memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
- for (i = 0; i < ARRAY_SIZE(ar->tx_stats); i++)
- ar->tx_stats[i].limit = 8;
+ for (i = 0; i < __AR9170_NUM_TXQ; i++)
+ ar->tx_stats[i].limit = AR9170_TXQ_DEPTH;
/* reset QoS defaults */
AR9170_FILL_QUEUE(ar->edcf[0], 3, 15, 1023, 0); /* BEST EFFORT*/
@@ -703,6 +1096,8 @@ static int ar9170_op_start(struct ieee80211_hw *hw)
AR9170_FILL_QUEUE(ar->edcf[3], 2, 3, 7, 47); /* VOICE */
AR9170_FILL_QUEUE(ar->edcf[4], 2, 3, 7, 0); /* SPECIAL */
+ ar->bad_hw_nagger = jiffies;
+
err = ar->open(ar);
if (err)
goto out;
@@ -738,17 +1133,17 @@ out:
static void ar9170_op_stop(struct ieee80211_hw *hw)
{
struct ar9170 *ar = hw->priv;
+ unsigned int i;
if (IS_STARTED(ar))
ar->state = AR9170_IDLE;
- mutex_lock(&ar->mutex);
+ flush_workqueue(ar->hw->workqueue);
- cancel_delayed_work_sync(&ar->tx_status_janitor);
+ cancel_delayed_work_sync(&ar->tx_janitor);
cancel_work_sync(&ar->filter_config_work);
cancel_work_sync(&ar->beacon_work);
- skb_queue_purge(&ar->global_tx_status_waste);
- skb_queue_purge(&ar->global_tx_status);
+ mutex_lock(&ar->mutex);
if (IS_ACCEPTING_CMD(ar)) {
ar9170_set_leds_state(ar, 0);
@@ -758,51 +1153,32 @@ static void ar9170_op_stop(struct ieee80211_hw *hw)
ar->stop(ar);
}
+ for (i = 0; i < __AR9170_NUM_TXQ; i++) {
+ skb_queue_purge(&ar->tx_pending[i]);
+ skb_queue_purge(&ar->tx_status[i]);
+ }
mutex_unlock(&ar->mutex);
}
-int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
{
- struct ar9170 *ar = hw->priv;
struct ieee80211_hdr *hdr;
struct ar9170_tx_control *txc;
struct ieee80211_tx_info *info;
- struct ieee80211_rate *rate = NULL;
struct ieee80211_tx_rate *txrate;
+ struct ar9170_tx_info *arinfo;
unsigned int queue = skb_get_queue_mapping(skb);
- unsigned long flags = 0;
- struct ar9170_sta_info *sta_info = NULL;
- u32 power, chains;
u16 keytype = 0;
u16 len, icv = 0;
- int err;
- bool tx_status;
- if (unlikely(!IS_STARTED(ar)))
- goto err_free;
+ BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
hdr = (void *)skb->data;
info = IEEE80211_SKB_CB(skb);
len = skb->len;
- spin_lock_irqsave(&ar->tx_stats_lock, flags);
- if (ar->tx_stats[queue].limit < ar->tx_stats[queue].len) {
- spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
- return NETDEV_TX_OK;
- }
-
- ar->tx_stats[queue].len++;
- ar->tx_stats[queue].count++;
- if (ar->tx_stats[queue].limit == ar->tx_stats[queue].len)
- ieee80211_stop_queue(hw, queue);
-
- spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
-
txc = (void *)skb_push(skb, sizeof(*txc));
- tx_status = (((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) != 0) ||
- ((info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) != 0));
-
if (info->control.hw_key) {
icv = info->control.hw_key->icv_len;
@@ -818,7 +1194,7 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
break;
default:
WARN_ON(1);
- goto err_dequeue;
+ goto err_out;
}
}
@@ -835,16 +1211,65 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_NO_ACK);
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
-
txrate = &info->control.rates[0];
-
if (txrate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
else if (txrate->flags & IEEE80211_TX_RC_USE_RTS_CTS)
txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
+ arinfo = (void *)info->rate_driver_data;
+ arinfo->timeout = jiffies + msecs_to_jiffies(AR9170_QUEUE_TIMEOUT);
+
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
+ (is_valid_ether_addr(ieee80211_get_DA(hdr)))) {
+ if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+ if (unlikely(!info->control.sta))
+ goto err_out;
+
+ txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
+ arinfo->flags = AR9170_TX_FLAG_BLOCK_ACK;
+ goto out;
+ }
+
+ txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
+ /*
+ * WARNING:
+ * Putting the QoS queue bits into an unexplored territory is
+ * certainly not elegant.
+ *
+ * In my defense: This idea provides a reasonable way to
+ * smuggle valuable information to the tx_status callback.
+ * Also, the idea behind this bit-abuse came straight from
+ * the original driver code.
+ */
+
+ txc->phy_control |=
+ cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT);
+ arinfo->flags = AR9170_TX_FLAG_WAIT_FOR_ACK;
+ } else {
+ arinfo->flags = AR9170_TX_FLAG_NO_ACK;
+ }
+
+out:
+ return 0;
+
+err_out:
+ skb_pull(skb, sizeof(*txc));
+ return -EINVAL;
+}
+
+static void ar9170_tx_prepare_phy(struct ar9170 *ar, struct sk_buff *skb)
+{
+ struct ar9170_tx_control *txc;
+ struct ieee80211_tx_info *info;
+ struct ieee80211_rate *rate = NULL;
+ struct ieee80211_tx_rate *txrate;
+ u32 power, chains;
+
+ txc = (void *) skb->data;
+ info = IEEE80211_SKB_CB(skb);
+ txrate = &info->control.rates[0];
+
if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD);
@@ -864,9 +1289,12 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
u32 r = txrate->idx;
u8 *txpower;
+ /* heavy clip control */
+ txc->phy_control |= cpu_to_le32((r & 0x7) << 7);
+
r <<= AR9170_TX_PHY_MCS_SHIFT;
- if (WARN_ON(r & ~AR9170_TX_PHY_MCS_MASK))
- goto err_dequeue;
+ BUG_ON(r & ~AR9170_TX_PHY_MCS_MASK);
+
txc->phy_control |= cpu_to_le32(r & AR9170_TX_PHY_MCS_MASK);
txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_MOD_HT);
@@ -928,53 +1356,154 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
chains = AR9170_TX_PHY_TXCHAIN_1;
}
txc->phy_control |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_SHIFT);
+}
- if (tx_status) {
- txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
- /*
- * WARNING:
- * Putting the QoS queue bits into an unexplored territory is
- * certainly not elegant.
- *
- * In my defense: This idea provides a reasonable way to
- * smuggle valuable information to the tx_status callback.
- * Also, the idea behind this bit-abuse came straight from
- * the original driver code.
- */
+static void ar9170_tx(struct ar9170 *ar)
+{
+ struct sk_buff *skb;
+ unsigned long flags;
+ struct ieee80211_tx_info *info;
+ struct ar9170_tx_info *arinfo;
+ unsigned int i, frames, frames_failed, remaining_space;
+ int err;
+ bool schedule_garbagecollector = false;
- txc->phy_control |=
- cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT);
+ BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
- if (info->control.sta) {
- sta_info = (void *) info->control.sta->drv_priv;
- skb_queue_tail(&sta_info->tx_status[queue], skb);
- } else {
- skb_queue_tail(&ar->global_tx_status, skb);
+ if (unlikely(!IS_STARTED(ar)))
+ return ;
+
+ remaining_space = AR9170_TX_MAX_PENDING;
+
+ for (i = 0; i < __AR9170_NUM_TXQ; i++) {
+ spin_lock_irqsave(&ar->tx_stats_lock, flags);
+ if (ar->tx_stats[i].len >= ar->tx_stats[i].limit) {
+#ifdef AR9170_QUEUE_DEBUG
+ printk(KERN_DEBUG "%s: queue %d full\n",
+ wiphy_name(ar->hw->wiphy), i);
+
+ __ar9170_dump_txstats(ar);
+ printk(KERN_DEBUG "stuck frames: ===> \n");
+ ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
+ ar9170_dump_txqueue(ar, &ar->tx_status[i]);
+#endif /* AR9170_QUEUE_DEBUG */
+ ieee80211_stop_queue(ar->hw, i);
+ spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
+ continue;
+ }
+
+ frames = min(ar->tx_stats[i].limit - ar->tx_stats[i].len,
+ skb_queue_len(&ar->tx_pending[i]));
+
+ if (remaining_space < frames) {
+#ifdef AR9170_QUEUE_DEBUG
+ printk(KERN_DEBUG "%s: tx quota reached queue:%d, "
+ "remaining slots:%d, needed:%d\n",
+ wiphy_name(ar->hw->wiphy), i, remaining_space,
+ frames);
+
+ ar9170_dump_txstats(ar);
+#endif /* AR9170_QUEUE_DEBUG */
+ frames = remaining_space;
+ }
+
+ ar->tx_stats[i].len += frames;
+ ar->tx_stats[i].count += frames;
+ spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
+
+ if (!frames)
+ continue;
+
+ frames_failed = 0;
+ while (frames) {
+ skb = skb_dequeue(&ar->tx_pending[i]);
+ if (unlikely(!skb)) {
+ frames_failed += frames;
+ frames = 0;
+ break;
+ }
+
+ info = IEEE80211_SKB_CB(skb);
+ arinfo = (void *) info->rate_driver_data;
+
+ /* TODO: cancel stuck frames */
+ arinfo->timeout = jiffies +
+ msecs_to_jiffies(AR9170_TX_TIMEOUT);
+
+#ifdef AR9170_QUEUE_DEBUG
+ printk(KERN_DEBUG "%s: send frame q:%d =>\n",
+ wiphy_name(ar->hw->wiphy), i);
+ ar9170_print_txheader(ar, skb);
+#endif /* AR9170_QUEUE_DEBUG */
+
+ err = ar->tx(ar, skb);
+ if (unlikely(err)) {
+ frames_failed++;
+ dev_kfree_skb_any(skb);
+ } else {
+ remaining_space--;
+ schedule_garbagecollector = true;
+ }
+
+ frames--;
+ }
+
+#ifdef AR9170_QUEUE_DEBUG
+ printk(KERN_DEBUG "%s: ar9170_tx report for queue %d\n",
+ wiphy_name(ar->hw->wiphy), i);
- queue_delayed_work(ar->hw->workqueue,
- &ar->tx_status_janitor,
- msecs_to_jiffies(100));
+ printk(KERN_DEBUG "%s: unprocessed pending frames left:\n",
+ wiphy_name(ar->hw->wiphy));
+ ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
+#endif /* AR9170_QUEUE_DEBUG */
+
+ if (unlikely(frames_failed)) {
+#ifdef AR9170_QUEUE_DEBUG
+ printk(KERN_DEBUG "%s: frames failed =>\n",
+ wiphy_name(ar->hw->wiphy), frames_failed);
+#endif /* AR9170_QUEUE_DEBUG */
+
+ spin_lock_irqsave(&ar->tx_stats_lock, flags);
+ ar->tx_stats[i].len -= frames_failed;
+ ar->tx_stats[i].count -= frames_failed;
+ ieee80211_wake_queue(ar->hw, i);
+ spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
}
}
- err = ar->tx(ar, skb, tx_status, 0);
- if (unlikely(tx_status && err)) {
- if (info->control.sta)
- skb_unlink(skb, &sta_info->tx_status[queue]);
- else
- skb_unlink(skb, &ar->global_tx_status);
+ if (schedule_garbagecollector)
+ queue_delayed_work(ar->hw->workqueue,
+ &ar->tx_janitor,
+ msecs_to_jiffies(AR9170_JANITOR_DELAY));
+}
+
+int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct ar9170 *ar = hw->priv;
+ struct ieee80211_tx_info *info;
+
+ if (unlikely(!IS_STARTED(ar)))
+ goto err_free;
+
+ if (unlikely(ar9170_tx_prepare(ar, skb)))
+ goto err_free;
+
+ info = IEEE80211_SKB_CB(skb);
+ if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+ /* drop frame, we do not allow TX A-MPDU aggregation yet. */
+ goto err_free;
+ } else {
+ unsigned int queue = skb_get_queue_mapping(skb);
+
+ ar9170_tx_prepare_phy(ar, skb);
+ skb_queue_tail(&ar->tx_pending[queue], skb);
}
+ ar9170_tx(ar);
return NETDEV_TX_OK;
-err_dequeue:
- spin_lock_irqsave(&ar->tx_stats_lock, flags);
- ar->tx_stats[queue].len--;
- ar->tx_stats[queue].count--;
- spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
-
err_free:
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -1037,11 +1566,6 @@ static int ar9170_op_config(struct ieee80211_hw *hw, u32 changed)
mutex_lock(&ar->mutex);
- if (changed & IEEE80211_CONF_CHANGE_RADIO_ENABLED) {
- /* TODO */
- err = 0;
- }
-
if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
/* TODO */
err = 0;
@@ -1068,48 +1592,28 @@ static int ar9170_op_config(struct ieee80211_hw *hw, u32 changed)
goto out;
}
- if (changed & IEEE80211_CONF_CHANGE_BEACON_INTERVAL) {
+ if (changed & BSS_CHANGED_BEACON_INT) {
err = ar9170_set_beacon_timers(ar);
if (err)
goto out;
}
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- err = ar9170_set_channel(ar, hw->conf.channel,
- AR9170_RFI_NONE, AR9170_BW_20);
+
+ /* adjust slot time for 5 GHz */
+ err = ar9170_set_slot_time(ar);
if (err)
goto out;
- /* adjust slot time for 5 GHz */
- if (hw->conf.channel->band == IEEE80211_BAND_5GHZ)
- err = ar9170_write_reg(ar, AR9170_MAC_REG_SLOT_TIME,
- 9 << 10);
- }
-
-out:
- mutex_unlock(&ar->mutex);
- return err;
-}
-
-static int ar9170_op_config_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_if_conf *conf)
-{
- struct ar9170 *ar = hw->priv;
- int err = 0;
-
- mutex_lock(&ar->mutex);
- if (conf->changed & IEEE80211_IFCC_BSSID) {
- memcpy(ar->bssid, conf->bssid, ETH_ALEN);
- err = ar9170_set_operating_mode(ar);
- }
-
- if (conf->changed & IEEE80211_IFCC_BEACON) {
- err = ar9170_update_beacon(ar);
+ err = ar9170_set_dyn_sifs_ack(ar);
+ if (err)
+ goto out;
+ err = ar9170_set_channel(ar, hw->conf.channel,
+ AR9170_RFI_NONE,
+ nl80211_to_ar9170(hw->conf.channel_type));
if (err)
goto out;
- err = ar9170_set_beacon_timers(ar);
}
out:
@@ -1123,24 +1627,30 @@ static void ar9170_set_filters(struct work_struct *work)
filter_config_work);
int err;
- mutex_lock(&ar->mutex);
if (unlikely(!IS_STARTED(ar)))
- goto unlock;
+ return ;
- if (ar->filter_changed & AR9170_FILTER_CHANGED_PROMISC) {
+ mutex_lock(&ar->mutex);
+ if (test_and_clear_bit(AR9170_FILTER_CHANGED_MODE,
+ &ar->filter_changed)) {
err = ar9170_set_operating_mode(ar);
if (err)
goto unlock;
}
- if (ar->filter_changed & AR9170_FILTER_CHANGED_MULTICAST) {
+ if (test_and_clear_bit(AR9170_FILTER_CHANGED_MULTICAST,
+ &ar->filter_changed)) {
err = ar9170_update_multicast(ar);
if (err)
goto unlock;
}
- if (ar->filter_changed & AR9170_FILTER_CHANGED_FRAMEFILTER)
+ if (test_and_clear_bit(AR9170_FILTER_CHANGED_FRAMEFILTER,
+ &ar->filter_changed)) {
err = ar9170_update_frame_filter(ar);
+ if (err)
+ goto unlock;
+ }
unlock:
mutex_unlock(&ar->mutex);
@@ -1155,8 +1665,8 @@ static void ar9170_op_configure_filter(struct ieee80211_hw *hw,
/* mask supported flags */
*new_flags &= FIF_ALLMULTI | FIF_CONTROL | FIF_BCN_PRBRESP_PROMISC |
- FIF_PROMISC_IN_BSS;
-
+ FIF_PROMISC_IN_BSS | FIF_FCSFAIL | FIF_PLCPFAIL;
+ ar->filter_state = *new_flags;
/*
* We can support more by setting the sniffer bit and
* then checking the error flags, later.
@@ -1170,7 +1680,7 @@ static void ar9170_op_configure_filter(struct ieee80211_hw *hw,
int i;
/* always get broadcast frames */
- mchash = 1ULL << (0xff>>2);
+ mchash = 1ULL << (0xff >> 2);
for (i = 0; i < mc_count; i++) {
if (WARN_ON(!mclist))
@@ -1180,7 +1690,7 @@ static void ar9170_op_configure_filter(struct ieee80211_hw *hw,
}
ar->want_mc_hash = mchash;
}
- ar->filter_changed |= AR9170_FILTER_CHANGED_MULTICAST;
+ set_bit(AR9170_FILTER_CHANGED_MULTICAST, &ar->filter_changed);
}
if (changed_flags & FIF_CONTROL) {
@@ -1196,12 +1706,14 @@ static void ar9170_op_configure_filter(struct ieee80211_hw *hw,
else
ar->want_filter = ar->cur_filter & ~filter;
- ar->filter_changed |= AR9170_FILTER_CHANGED_FRAMEFILTER;
+ set_bit(AR9170_FILTER_CHANGED_FRAMEFILTER,
+ &ar->filter_changed);
}
if (changed_flags & FIF_PROMISC_IN_BSS) {
ar->sniffer_enabled = ((*new_flags) & FIF_PROMISC_IN_BSS) != 0;
- ar->filter_changed |= AR9170_FILTER_CHANGED_PROMISC;
+ set_bit(AR9170_FILTER_CHANGED_MODE,
+ &ar->filter_changed);
}
if (likely(IS_STARTED(ar)))
@@ -1218,48 +1730,54 @@ static void ar9170_op_bss_info_changed(struct ieee80211_hw *hw,
mutex_lock(&ar->mutex);
- ar9170_regwrite_begin(ar);
+ if (changed & BSS_CHANGED_BSSID) {
+ memcpy(ar->bssid, bss_conf->bssid, ETH_ALEN);
+ err = ar9170_set_operating_mode(ar);
+ if (err)
+ goto out;
+ }
- if (changed & BSS_CHANGED_ASSOC) {
- ar->state = bss_conf->assoc ? AR9170_ASSOCIATED : ar->state;
+ if (changed & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED)) {
+ err = ar9170_update_beacon(ar);
+ if (err)
+ goto out;
+ err = ar9170_set_beacon_timers(ar);
+ if (err)
+ goto out;
+ }
+
+ if (changed & BSS_CHANGED_ASSOC) {
#ifndef CONFIG_AR9170_LEDS
/* enable assoc LED. */
err = ar9170_set_leds_state(ar, bss_conf->assoc ? 2 : 0);
#endif /* CONFIG_AR9170_LEDS */
}
+ if (changed & BSS_CHANGED_BEACON_INT) {
+ err = ar9170_set_beacon_timers(ar);
+ if (err)
+ goto out;
+ }
+
if (changed & BSS_CHANGED_HT) {
/* TODO */
err = 0;
}
if (changed & BSS_CHANGED_ERP_SLOT) {
- u32 slottime = 20;
-
- if (bss_conf->use_short_slot)
- slottime = 9;
-
- ar9170_regwrite(AR9170_MAC_REG_SLOT_TIME, slottime << 10);
+ err = ar9170_set_slot_time(ar);
+ if (err)
+ goto out;
}
if (changed & BSS_CHANGED_BASIC_RATES) {
- u32 cck, ofdm;
-
- if (hw->conf.channel->band == IEEE80211_BAND_5GHZ) {
- ofdm = bss_conf->basic_rates;
- cck = 0;
- } else {
- /* four cck rates */
- cck = bss_conf->basic_rates & 0xf;
- ofdm = bss_conf->basic_rates >> 4;
- }
- ar9170_regwrite(AR9170_MAC_REG_BASIC_RATE,
- ofdm << 8 | cck);
+ err = ar9170_set_basic_rates(ar);
+ if (err)
+ goto out;
}
- ar9170_regwrite_finish();
- err = ar9170_regwrite_result();
+out:
mutex_unlock(&ar->mutex);
}
@@ -1298,7 +1816,7 @@ static int ar9170_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
switch (key->alg) {
case ALG_WEP:
- if (key->keylen == LEN_WEP40)
+ if (key->keylen == WLAN_KEY_LEN_WEP40)
ktype = AR9170_ENC_ALG_WEP64;
else
ktype = AR9170_ENC_ALG_WEP128;
@@ -1411,43 +1929,6 @@ static void ar9170_sta_notify(struct ieee80211_hw *hw,
enum sta_notify_cmd cmd,
struct ieee80211_sta *sta)
{
- struct ar9170 *ar = hw->priv;
- struct ar9170_sta_info *info = (void *) sta->drv_priv;
- struct sk_buff *skb;
- unsigned int i;
-
- switch (cmd) {
- case STA_NOTIFY_ADD:
- for (i = 0; i < ar->hw->queues; i++)
- skb_queue_head_init(&info->tx_status[i]);
- break;
-
- case STA_NOTIFY_REMOVE:
-
- /*
- * transfer all outstanding frames that need a tx_status
- * reports to the global tx_status queue
- */
-
- for (i = 0; i < ar->hw->queues; i++) {
- while ((skb = skb_dequeue(&info->tx_status[i]))) {
-#ifdef AR9170_QUEUE_DEBUG
- printk(KERN_DEBUG "%s: queueing frame in "
- "global tx_status queue =>\n",
- wiphy_name(ar->hw->wiphy));
-
- ar9170_print_txheader(ar, skb);
-#endif /* AR9170_QUEUE_DEBUG */
- skb_queue_tail(&ar->global_tx_status, skb);
- }
- }
- queue_delayed_work(ar->hw->workqueue, &ar->tx_status_janitor,
- msecs_to_jiffies(100));
- break;
-
- default:
- break;
- }
}
static int ar9170_get_stats(struct ieee80211_hw *hw,
@@ -1486,7 +1967,7 @@ static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
int ret;
mutex_lock(&ar->mutex);
- if ((param) && !(queue > ar->hw->queues)) {
+ if ((param) && !(queue > __AR9170_NUM_TXQ)) {
memcpy(&ar->edcf[ar9170_qos_hwmap[queue]],
param, sizeof(*param));
@@ -1498,6 +1979,24 @@ static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
return ret;
}
+static int ar9170_ampdu_action(struct ieee80211_hw *hw,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+{
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ case IEEE80211_AMPDU_RX_STOP:
+ /*
+ * Something goes wrong -- RX locks up
+ * after a while of receiving aggregated
+ * frames -- not enabling for now.
+ */
+ return -EOPNOTSUPP;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct ieee80211_ops ar9170_ops = {
.start = ar9170_op_start,
.stop = ar9170_op_stop,
@@ -1505,7 +2004,6 @@ static const struct ieee80211_ops ar9170_ops = {
.add_interface = ar9170_op_add_interface,
.remove_interface = ar9170_op_remove_interface,
.config = ar9170_op_config,
- .config_interface = ar9170_op_config_interface,
.configure_filter = ar9170_op_configure_filter,
.conf_tx = ar9170_conf_tx,
.bss_info_changed = ar9170_op_bss_info_changed,
@@ -1514,29 +2012,45 @@ static const struct ieee80211_ops ar9170_ops = {
.sta_notify = ar9170_sta_notify,
.get_stats = ar9170_get_stats,
.get_tx_stats = ar9170_get_tx_stats,
+ .ampdu_action = ar9170_ampdu_action,
};
void *ar9170_alloc(size_t priv_size)
{
struct ieee80211_hw *hw;
struct ar9170 *ar;
+ struct sk_buff *skb;
int i;
+ /*
+ * this buffer is used for rx stream reconstruction.
+ * Under heavy load this device (or the transport layer?)
+ * tends to split the streams into seperate rx descriptors.
+ */
+
+ skb = __dev_alloc_skb(AR9170_MAX_RX_BUFFER_SIZE, GFP_KERNEL);
+ if (!skb)
+ goto err_nomem;
+
hw = ieee80211_alloc_hw(priv_size, &ar9170_ops);
if (!hw)
- return ERR_PTR(-ENOMEM);
+ goto err_nomem;
ar = hw->priv;
ar->hw = hw;
+ ar->rx_failover = skb;
mutex_init(&ar->mutex);
spin_lock_init(&ar->cmdlock);
spin_lock_init(&ar->tx_stats_lock);
- skb_queue_head_init(&ar->global_tx_status);
- skb_queue_head_init(&ar->global_tx_status_waste);
+ for (i = 0; i < __AR9170_NUM_TXQ; i++) {
+ skb_queue_head_init(&ar->tx_status[i]);
+ skb_queue_head_init(&ar->tx_pending[i]);
+ }
+ ar9170_rx_reset_rx_mpdu(ar);
INIT_WORK(&ar->filter_config_work, ar9170_set_filters);
INIT_WORK(&ar->beacon_work, ar9170_new_beacon);
- INIT_DELAYED_WORK(&ar->tx_status_janitor, ar9170_tx_status_janitor);
+ INIT_DELAYED_WORK(&ar->tx_janitor, ar9170_tx_janitor);
/* all hw supports 2.4 GHz, so set channel to 1 by default */
ar->channel = &ar9170_2ghz_chantable[0];
@@ -1561,6 +2075,10 @@ void *ar9170_alloc(size_t priv_size)
ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
return ar;
+
+err_nomem:
+ kfree_skb(skb);
+ return ERR_PTR(-ENOMEM);
}
static int ar9170_read_eeprom(struct ar9170 *ar)
@@ -1619,12 +2137,24 @@ static int ar9170_read_eeprom(struct ar9170 *ar)
else
ar->hw->channel_change_time = 80 * 1000;
+ ar->regulatory.current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
+ ar->regulatory.current_rd_ext = le16_to_cpu(ar->eeprom.reg_domain[1]);
+
/* second part of wiphy init */
SET_IEEE80211_PERM_ADDR(ar->hw, addr);
return bands ? 0 : -EINVAL;
}
+static int ar9170_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct ar9170 *ar = hw->priv;
+
+ return ath_reg_notifier_apply(wiphy, request, &ar->regulatory);
+}
+
int ar9170_register(struct ar9170 *ar, struct device *pdev)
{
int err;
@@ -1634,10 +2164,18 @@ int ar9170_register(struct ar9170 *ar, struct device *pdev)
if (err)
goto err_out;
+ err = ath_regd_init(&ar->regulatory, ar->hw->wiphy,
+ ar9170_reg_notifier);
+ if (err)
+ goto err_out;
+
err = ieee80211_register_hw(ar->hw);
if (err)
goto err_out;
+ if (!ath_is_world_regd(&ar->regulatory))
+ regulatory_hint(ar->hw->wiphy, ar->regulatory.alpha2);
+
err = ar9170_init_leds(ar);
if (err)
goto err_unreg;
@@ -1666,6 +2204,7 @@ void ar9170_unregister(struct ar9170 *ar)
ar9170_unregister_leds(ar);
#endif /* CONFIG_AR9170_LEDS */
+ kfree_skb(ar->rx_failover);
ieee80211_unregister_hw(ar->hw);
mutex_destroy(&ar->mutex);
}
diff --git a/drivers/net/wireless/ar9170/phy.c b/drivers/net/wireless/ath/ar9170/phy.c
index 6ce20754b8e..df86f70cd81 100644
--- a/drivers/net/wireless/ar9170/phy.c
+++ b/drivers/net/wireless/ath/ar9170/phy.c
@@ -401,7 +401,7 @@ int ar9170_init_phy(struct ar9170 *ar, enum ieee80211_band band)
int i, err;
u32 val;
bool is_2ghz = band == IEEE80211_BAND_2GHZ;
- bool is_40mhz = false; /* XXX: for now */
+ bool is_40mhz = conf_is_ht40(&ar->hw->conf);
ar9170_regwrite_begin(ar);
@@ -1200,7 +1200,7 @@ int ar9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
return -ENOSYS;
}
- if (0 /* 2 streams capable */)
+ if (ar->eeprom.tx_mask != 1)
tmp |= 0x100;
err = ar9170_write_reg(ar, 0x1c5804, tmp);
@@ -1214,7 +1214,7 @@ int ar9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
freqpar = ar9170_get_hw_dyn_params(channel, bw);
vals[0] = cpu_to_le32(channel->center_freq * 1000);
- vals[1] = cpu_to_le32(bw == AR9170_BW_20 ? 0 : 1);
+ vals[1] = cpu_to_le32(conf_is_ht40(&ar->hw->conf));
vals[2] = cpu_to_le32(offs << 2 | 1);
vals[3] = cpu_to_le32(freqpar->coeff_exp);
vals[4] = cpu_to_le32(freqpar->coeff_man);
diff --git a/drivers/net/wireless/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index fddda477095..754b1f8d8da 100644
--- a/drivers/net/wireless/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -51,9 +51,14 @@ MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Atheros AR9170 802.11n USB wireless");
+MODULE_FIRMWARE("ar9170.fw");
MODULE_FIRMWARE("ar9170-1.fw");
MODULE_FIRMWARE("ar9170-2.fw");
+enum ar9170_requirements {
+ AR9170_REQ_FW1_ONLY = 1,
+};
+
static struct usb_device_id ar9170_usb_ids[] = {
/* Atheros 9170 */
{ USB_DEVICE(0x0cf3, 0x9170) },
@@ -81,25 +86,74 @@ static struct usb_device_id ar9170_usb_ids[] = {
{ USB_DEVICE(0x2019, 0x5304) },
/* IO-Data WNGDNUS2 */
{ USB_DEVICE(0x04bb, 0x093f) },
+ /* AVM FRITZ!WLAN USB Stick N */
+ { USB_DEVICE(0x057C, 0x8401) },
+ /* AVM FRITZ!WLAN USB Stick N 2.4 */
+ { USB_DEVICE(0x057C, 0x8402), .driver_info = AR9170_REQ_FW1_ONLY },
/* terminate */
{}
};
MODULE_DEVICE_TABLE(usb, ar9170_usb_ids);
-static void ar9170_usb_tx_urb_complete_free(struct urb *urb)
+static void ar9170_usb_submit_urb(struct ar9170_usb *aru)
+{
+ struct urb *urb;
+ unsigned long flags;
+ int err;
+
+ if (unlikely(!IS_STARTED(&aru->common)))
+ return ;
+
+ spin_lock_irqsave(&aru->tx_urb_lock, flags);
+ if (aru->tx_submitted_urbs >= AR9170_NUM_TX_URBS) {
+ spin_unlock_irqrestore(&aru->tx_urb_lock, flags);
+ return ;
+ }
+ aru->tx_submitted_urbs++;
+
+ urb = usb_get_from_anchor(&aru->tx_pending);
+ if (!urb) {
+ aru->tx_submitted_urbs--;
+ spin_unlock_irqrestore(&aru->tx_urb_lock, flags);
+
+ return ;
+ }
+ spin_unlock_irqrestore(&aru->tx_urb_lock, flags);
+
+ aru->tx_pending_urbs--;
+ usb_anchor_urb(urb, &aru->tx_submitted);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (unlikely(err)) {
+ if (ar9170_nag_limiter(&aru->common))
+ dev_err(&aru->udev->dev, "submit_urb failed (%d).\n",
+ err);
+
+ usb_unanchor_urb(urb);
+ aru->tx_submitted_urbs--;
+ ar9170_tx_callback(&aru->common, urb->context);
+ }
+
+ usb_free_urb(urb);
+}
+
+static void ar9170_usb_tx_urb_complete_frame(struct urb *urb)
{
struct sk_buff *skb = urb->context;
struct ar9170_usb *aru = (struct ar9170_usb *)
usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
- if (!aru) {
+ if (unlikely(!aru)) {
dev_kfree_skb_irq(skb);
return ;
}
- ar9170_handle_tx_status(&aru->common, skb, false,
- AR9170_TX_STATUS_COMPLETE);
+ aru->tx_submitted_urbs--;
+
+ ar9170_tx_callback(&aru->common, skb);
+
+ ar9170_usb_submit_urb(aru);
}
static void ar9170_usb_tx_urb_complete(struct urb *urb)
@@ -126,8 +180,8 @@ static void ar9170_usb_irq_completed(struct urb *urb)
goto resubmit;
}
- print_hex_dump_bytes("ar9170 irq: ", DUMP_PREFIX_OFFSET,
- urb->transfer_buffer, urb->actual_length);
+ ar9170_handle_command_response(&aru->common, urb->transfer_buffer,
+ urb->actual_length);
resubmit:
usb_anchor_urb(urb, &aru->rx_submitted);
@@ -177,16 +231,15 @@ resubmit:
usb_anchor_urb(urb, &aru->rx_submitted);
err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err) {
+ if (unlikely(err)) {
usb_unanchor_urb(urb);
- dev_kfree_skb_irq(skb);
+ goto free;
}
return ;
free:
dev_kfree_skb_irq(skb);
- return;
}
static int ar9170_usb_prep_rx_urb(struct ar9170_usb *aru,
@@ -282,21 +335,47 @@ err_out:
return err;
}
-static void ar9170_usb_cancel_urbs(struct ar9170_usb *aru)
+static int ar9170_usb_flush(struct ar9170 *ar)
{
- int ret;
+ struct ar9170_usb *aru = (void *) ar;
+ struct urb *urb;
+ int ret, err = 0;
- aru->common.state = AR9170_UNKNOWN_STATE;
+ if (IS_STARTED(ar))
+ aru->common.state = AR9170_IDLE;
- usb_unlink_anchored_urbs(&aru->tx_submitted);
+ usb_wait_anchor_empty_timeout(&aru->tx_pending,
+ msecs_to_jiffies(800));
+ while ((urb = usb_get_from_anchor(&aru->tx_pending))) {
+ ar9170_tx_callback(&aru->common, (void *) urb->context);
+ usb_free_urb(urb);
+ }
- /* give the LED OFF command and the deauth frame a chance to air. */
+ /* lets wait a while until the tx - queues are dried out */
ret = usb_wait_anchor_empty_timeout(&aru->tx_submitted,
msecs_to_jiffies(100));
if (ret == 0)
- dev_err(&aru->udev->dev, "kill pending tx urbs.\n");
- usb_poison_anchored_urbs(&aru->tx_submitted);
+ err = -ETIMEDOUT;
+
+ usb_kill_anchored_urbs(&aru->tx_submitted);
+ if (IS_ACCEPTING_CMD(ar))
+ aru->common.state = AR9170_STARTED;
+
+ return err;
+}
+
+static void ar9170_usb_cancel_urbs(struct ar9170_usb *aru)
+{
+ int err;
+
+ aru->common.state = AR9170_UNKNOWN_STATE;
+
+ err = ar9170_usb_flush(&aru->common);
+ if (err)
+ dev_err(&aru->udev->dev, "stuck tx urbs!\n");
+
+ usb_poison_anchored_urbs(&aru->tx_submitted);
usb_poison_anchored_urbs(&aru->rx_submitted);
}
@@ -337,7 +416,7 @@ static int ar9170_usb_exec_cmd(struct ar9170 *ar, enum ar9170_cmd cmd,
usb_anchor_urb(urb, &aru->tx_submitted);
err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err) {
+ if (unlikely(err)) {
usb_unanchor_urb(urb);
usb_free_urb(urb);
goto err_unbuf;
@@ -350,7 +429,7 @@ static int ar9170_usb_exec_cmd(struct ar9170 *ar, enum ar9170_cmd cmd,
goto err_unbuf;
}
- if (outlen >= 0 && aru->readlen != outlen) {
+ if (aru->readlen != outlen) {
err = -EMSGSIZE;
goto err_unbuf;
}
@@ -380,12 +459,10 @@ err_free:
return err;
}
-static int ar9170_usb_tx(struct ar9170 *ar, struct sk_buff *skb,
- bool txstatus_needed, unsigned int extra_len)
+static int ar9170_usb_tx(struct ar9170 *ar, struct sk_buff *skb)
{
struct ar9170_usb *aru = (struct ar9170_usb *) ar;
struct urb *urb;
- int err;
if (unlikely(!IS_STARTED(ar))) {
/* Seriously, what were you drink... err... thinking!? */
@@ -398,18 +475,17 @@ static int ar9170_usb_tx(struct ar9170 *ar, struct sk_buff *skb,
usb_fill_bulk_urb(urb, aru->udev,
usb_sndbulkpipe(aru->udev, AR9170_EP_TX),
- skb->data, skb->len + extra_len, (txstatus_needed ?
- ar9170_usb_tx_urb_complete :
- ar9170_usb_tx_urb_complete_free), skb);
+ skb->data, skb->len,
+ ar9170_usb_tx_urb_complete_frame, skb);
urb->transfer_flags |= URB_ZERO_PACKET;
- usb_anchor_urb(urb, &aru->tx_submitted);
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (unlikely(err))
- usb_unanchor_urb(urb);
+ usb_anchor_urb(urb, &aru->tx_pending);
+ aru->tx_pending_urbs++;
usb_free_urb(urb);
- return err;
+
+ ar9170_usb_submit_urb(aru);
+ return 0;
}
static void ar9170_usb_callback_cmd(struct ar9170 *ar, u32 len , void *buffer)
@@ -418,7 +494,7 @@ static void ar9170_usb_callback_cmd(struct ar9170 *ar, u32 len , void *buffer)
unsigned long flags;
u32 in, out;
- if (!buffer)
+ if (unlikely(!buffer))
return ;
in = le32_to_cpup((__le32 *)buffer);
@@ -504,17 +580,29 @@ static int ar9170_usb_request_firmware(struct ar9170_usb *aru)
{
int err = 0;
- err = request_firmware(&aru->init_values, "ar9170-1.fw",
+ err = request_firmware(&aru->firmware, "ar9170.fw",
&aru->udev->dev);
- if (err) {
- dev_err(&aru->udev->dev, "file with init values not found.\n");
- return err;
+ if (!err) {
+ aru->init_values = NULL;
+ return 0;
}
+ if (aru->req_one_stage_fw) {
+ dev_err(&aru->udev->dev, "ar9170.fw firmware file "
+ "not found and is required for this device\n");
+ return -EINVAL;
+ }
+
+ dev_err(&aru->udev->dev, "ar9170.fw firmware file "
+ "not found, trying old firmware...\n");
+
+ err = request_firmware(&aru->init_values, "ar9170-1.fw",
+ &aru->udev->dev);
+
err = request_firmware(&aru->firmware, "ar9170-2.fw", &aru->udev->dev);
if (err) {
release_firmware(aru->init_values);
- dev_err(&aru->udev->dev, "firmware file not found.\n");
+ dev_err(&aru->udev->dev, "file with init values not found.\n");
return err;
}
@@ -548,6 +636,9 @@ static int ar9170_usb_upload_firmware(struct ar9170_usb *aru)
{
int err;
+ if (!aru->init_values)
+ goto upload_fw_start;
+
/* First, upload initial values to device RAM */
err = ar9170_usb_upload(aru, aru->init_values->data,
aru->init_values->size, 0x102800, false);
@@ -557,6 +648,8 @@ static int ar9170_usb_upload_firmware(struct ar9170_usb *aru)
return err;
}
+upload_fw_start:
+
/* Then, upload the firmware itself and start it */
return ar9170_usb_upload(aru, aru->firmware->data, aru->firmware->size,
0x200000, true);
@@ -592,10 +685,8 @@ static void ar9170_usb_stop(struct ar9170 *ar)
if (IS_ACCEPTING_CMD(ar))
aru->common.state = AR9170_STOPPED;
- /* lets wait a while until the tx - queues are dried out */
- ret = usb_wait_anchor_empty_timeout(&aru->tx_submitted,
- msecs_to_jiffies(1000));
- if (ret == 0)
+ ret = ar9170_usb_flush(ar);
+ if (ret)
dev_err(&aru->udev->dev, "kill pending tx urbs.\n");
usb_poison_anchored_urbs(&aru->tx_submitted);
@@ -656,6 +747,15 @@ err_out:
return err;
}
+static bool ar9170_requires_one_stage(const struct usb_device_id *id)
+{
+ if (!id->driver_info)
+ return false;
+ if (id->driver_info == AR9170_REQ_FW1_ONLY)
+ return true;
+ return false;
+}
+
static int ar9170_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -676,19 +776,30 @@ static int ar9170_usb_probe(struct usb_interface *intf,
aru->intf = intf;
ar = &aru->common;
+ aru->req_one_stage_fw = ar9170_requires_one_stage(id);
+
usb_set_intfdata(intf, aru);
SET_IEEE80211_DEV(ar->hw, &udev->dev);
init_usb_anchor(&aru->rx_submitted);
+ init_usb_anchor(&aru->tx_pending);
init_usb_anchor(&aru->tx_submitted);
init_completion(&aru->cmd_wait);
+ spin_lock_init(&aru->tx_urb_lock);
+
+ aru->tx_pending_urbs = 0;
+ aru->tx_submitted_urbs = 0;
aru->common.stop = ar9170_usb_stop;
+ aru->common.flush = ar9170_usb_flush;
aru->common.open = ar9170_usb_open;
aru->common.tx = ar9170_usb_tx;
aru->common.exec_cmd = ar9170_usb_exec_cmd;
aru->common.callback_cmd = ar9170_usb_callback_cmd;
+#ifdef CONFIG_PM
+ udev->reset_resume = 1;
+#endif /* CONFIG_PM */
err = ar9170_usb_reset(aru);
if (err)
goto err_freehw;
@@ -773,11 +884,6 @@ static int ar9170_resume(struct usb_interface *intf)
usb_unpoison_anchored_urbs(&aru->rx_submitted);
usb_unpoison_anchored_urbs(&aru->tx_submitted);
- /*
- * FIXME: firmware upload will fail on resume.
- * but this is better than a hang!
- */
-
err = ar9170_usb_init_device(aru);
if (err)
goto err_unrx;
@@ -805,6 +911,7 @@ static struct usb_driver ar9170_driver = {
#ifdef CONFIG_PM
.suspend = ar9170_suspend,
.resume = ar9170_resume,
+ .reset_resume = ar9170_resume,
#endif /* CONFIG_PM */
};
diff --git a/drivers/net/wireless/ar9170/usb.h b/drivers/net/wireless/ath/ar9170/usb.h
index f5852924cd6..d098f4d5d2f 100644
--- a/drivers/net/wireless/ar9170/usb.h
+++ b/drivers/net/wireless/ath/ar9170/usb.h
@@ -43,7 +43,7 @@
#include <linux/completion.h>
#include <linux/spinlock.h>
#include <linux/leds.h>
-#include <net/wireless.h>
+#include <net/cfg80211.h>
#include <net/mac80211.h>
#include <linux/firmware.h>
#include "eeprom.h"
@@ -51,6 +51,7 @@
#include "ar9170.h"
#define AR9170_NUM_RX_URBS 16
+#define AR9170_NUM_TX_URBS 8
struct firmware;
@@ -60,9 +61,15 @@ struct ar9170_usb {
struct usb_interface *intf;
struct usb_anchor rx_submitted;
+ struct usb_anchor tx_pending;
struct usb_anchor tx_submitted;
- spinlock_t cmdlock;
+ bool req_one_stage_fw;
+
+ spinlock_t tx_urb_lock;
+ unsigned int tx_submitted_urbs;
+ unsigned int tx_pending_urbs;
+
struct completion cmd_wait;
int readlen;
u8 *readbuf;
diff --git a/drivers/net/wireless/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig
index 75383a5df99..daf0c83527d 100644
--- a/drivers/net/wireless/ath5k/Kconfig
+++ b/drivers/net/wireless/ath/ath5k/Kconfig
@@ -1,6 +1,7 @@
config ATH5K
tristate "Atheros 5xxx wireless cards support"
depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL
+ select ATH_COMMON
select MAC80211_LEDS
select LEDS_CLASS
select NEW_LEDS
@@ -27,11 +28,10 @@ config ATH5K_DEBUG
Say Y, if and you will get debug options for ath5k.
To use this, you need to mount debugfs:
- mkdir /debug/
- mount -t debugfs debug /debug/
+ mount -t debugfs debug /sys/kernel/debug
You will get access to files under:
- /debug/ath5k/phy0/
+ /sys/kernel/debug/ath5k/phy0/
To enable debug, pass the debug level to the debug module
parameter. For example:
diff --git a/drivers/net/wireless/ath5k/Makefile b/drivers/net/wireless/ath/ath5k/Makefile
index 84a74c5248e..090dc6d268a 100644
--- a/drivers/net/wireless/ath5k/Makefile
+++ b/drivers/net/wireless/ath/ath5k/Makefile
@@ -11,5 +11,6 @@ ath5k-y += reset.o
ath5k-y += attach.o
ath5k-y += base.o
ath5k-y += led.o
+ath5k-y += rfkill.o
ath5k-$(CONFIG_ATH5K_DEBUG) += debug.o
obj-$(CONFIG_ATH5K) += ath5k.o
diff --git a/drivers/net/wireless/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 0b616e72fe0..6358233bac9 100644
--- a/drivers/net/wireless/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -27,6 +27,8 @@
#include <linux/types.h>
#include <net/mac80211.h>
+#include "../regd.h"
+
/* RX/TX descriptor hw structs
* TODO: Driver part should only see sw structs */
#include "desc.h"
@@ -207,7 +209,6 @@
#define AR5K_TUNE_MAX_TXPOWER 63
#define AR5K_TUNE_DEFAULT_TXPOWER 25
#define AR5K_TUNE_TPC_TXPOWER false
-#define AR5K_TUNE_ANT_DIVERSITY true
#define AR5K_TUNE_HWTXTRIES 4
#define AR5K_INIT_CARR_SENSE_EN 1
@@ -418,6 +419,17 @@ enum ath5k_driver_mode {
AR5K_MODE_MAX = 5
};
+enum ath5k_ant_mode {
+ AR5K_ANTMODE_DEFAULT = 0, /* default antenna setup */
+ AR5K_ANTMODE_FIXED_A = 1, /* only antenna A is present */
+ AR5K_ANTMODE_FIXED_B = 2, /* only antenna B is present */
+ AR5K_ANTMODE_SINGLE_AP = 3, /* sta locked on a single ap */
+ AR5K_ANTMODE_SECTOR_AP = 4, /* AP with tx antenna set on tx desc */
+ AR5K_ANTMODE_SECTOR_STA = 5, /* STA with tx antenna set on tx desc */
+ AR5K_ANTMODE_DEBUG = 6, /* Debug mode -A -> Rx, B-> Tx- */
+ AR5K_ANTMODE_MAX,
+};
+
/****************\
TX DEFINITIONS
@@ -1039,8 +1051,6 @@ struct ath5k_hw {
bool ah_5ghz;
bool ah_2ghz;
-#define ah_regdomain ah_capabilities.cap_regdomain.reg_current
-#define ah_regdomain_hw ah_capabilities.cap_regdomain.reg_hw
#define ah_modes ah_capabilities.cap_mode
#define ah_ee_version ah_capabilities.cap_eeprom.ee_version
@@ -1051,8 +1061,11 @@ struct ath5k_hw {
bool ah_software_retry;
u32 ah_limit_tx_retries;
- u32 ah_antenna[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX];
- bool ah_ant_diversity;
+ /* Antenna Control */
+ u32 ah_ant_ctl[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX];
+ u8 ah_ant_mode;
+ u8 ah_tx_ant;
+ u8 ah_def_ant;
u8 ah_sta_id[ETH_ALEN];
@@ -1065,6 +1078,7 @@ struct ath5k_hw {
u32 ah_gpio[AR5K_MAX_GPIO];
int ah_gpio_npins;
+ struct ath_regulatory ah_regulatory;
struct ath5k_capabilities ah_capabilities;
struct ath5k_txq_info ah_txq[AR5K_NUM_TX_QUEUES];
@@ -1099,11 +1113,12 @@ struct ath5k_hw {
/* Values in 0.25dB units */
s16 txp_min_pwr;
s16 txp_max_pwr;
+ /* Values in 0.5dB units */
s16 txp_offset;
s16 txp_ofdm;
- /* Values in dB units */
- s16 txp_cck_ofdm_pwr_delta;
s16 txp_cck_ofdm_gainf_delta;
+ /* Value in dB units */
+ s16 txp_cck_ofdm_pwr_delta;
} ah_txpower;
struct {
@@ -1241,6 +1256,10 @@ extern u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio);
extern int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val);
extern void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio, u32 interrupt_level);
+/* rfkill Functions */
+extern void ath5k_rfkill_hw_start(struct ath5k_hw *ah);
+extern void ath5k_rfkill_hw_stop(struct ath5k_hw *ah);
+
/* Misc functions */
int ath5k_hw_set_capabilities(struct ath5k_hw *ah);
extern int ath5k_hw_get_capability(struct ath5k_hw *ah, enum ath5k_capability_type cap_type, u32 capability, u32 *result);
@@ -1263,14 +1282,21 @@ extern int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *chann
/* PHY calibration */
extern int ath5k_hw_phy_calibrate(struct ath5k_hw *ah, struct ieee80211_channel *channel);
extern int ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq);
+/* Spur mitigation */
+bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel);
+void ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel);
/* Misc PHY functions */
extern u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan);
-extern void ath5k_hw_set_def_antenna(struct ath5k_hw *ah, unsigned int ant);
-extern unsigned int ath5k_hw_get_def_antenna(struct ath5k_hw *ah);
extern int ath5k_hw_phy_disable(struct ath5k_hw *ah);
+/* Antenna control */
+extern void ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode);
+extern void ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant);
+extern unsigned int ath5k_hw_get_def_antenna(struct ath5k_hw *ah);
/* TX power setup */
extern int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, u8 ee_mode, u8 txpower);
-extern int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 ee_mode, u8 txpower);
+extern int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower);
/*
* Functions used internaly
diff --git a/drivers/net/wireless/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index 70d376c63aa..c41ef58393e 100644
--- a/drivers/net/wireless/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -133,7 +133,6 @@ struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc, u8 mac_version)
ah->ah_cw_min = AR5K_TUNE_CWMIN;
ah->ah_limit_tx_retries = AR5K_INIT_TX_RETRY;
ah->ah_software_retry = false;
- ah->ah_ant_diversity = AR5K_TUNE_ANT_DIVERSITY;
/*
* Set the mac version based on the pci id
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 32df27a9c7a..55f7de09d13 100644
--- a/drivers/net/wireless/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -61,9 +61,13 @@
static int ath5k_calinterval = 10; /* Calibrate PHY every 10 secs (TODO: Fixme) */
static int modparam_nohwcrypt;
-module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
+module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
+static int modparam_all_channels;
+module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
+MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");
+
/******************\
* Internal defines *
@@ -223,9 +227,6 @@ static int ath5k_add_interface(struct ieee80211_hw *hw,
static void ath5k_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_if_init_conf *conf);
static int ath5k_config(struct ieee80211_hw *hw, u32 changed);
-static int ath5k_config_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_if_conf *conf);
static void ath5k_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *new_flags,
@@ -241,8 +242,8 @@ static int ath5k_get_tx_stats(struct ieee80211_hw *hw,
static u64 ath5k_get_tsf(struct ieee80211_hw *hw);
static void ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf);
static void ath5k_reset_tsf(struct ieee80211_hw *hw);
-static int ath5k_beacon_update(struct ath5k_softc *sc,
- struct sk_buff *skb);
+static int ath5k_beacon_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
@@ -255,7 +256,6 @@ static const struct ieee80211_ops ath5k_hw_ops = {
.add_interface = ath5k_add_interface,
.remove_interface = ath5k_remove_interface,
.config = ath5k_config,
- .config_interface = ath5k_config_interface,
.configure_filter = ath5k_configure_filter,
.set_key = ath5k_set_key,
.get_stats = ath5k_get_stats,
@@ -516,6 +516,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
IEEE80211_HW_NOISE_DBM;
hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_MESH_POINT);
@@ -705,6 +706,15 @@ err_no_irq:
* Driver Initialization *
\***********************/
+static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct ath5k_softc *sc = hw->priv;
+ struct ath_regulatory *reg = &sc->ah->ah_regulatory;
+
+ return ath_reg_notifier_apply(wiphy, request, reg);
+}
+
static int
ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
{
@@ -793,12 +803,23 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
memset(sc->bssidmask, 0xff, ETH_ALEN);
ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask);
+ ah->ah_regulatory.current_rd =
+ ah->ah_capabilities.cap_eeprom.ee_regdomain;
+ ret = ath_regd_init(&ah->ah_regulatory, hw->wiphy, ath5k_reg_notifier);
+ if (ret) {
+ ATH5K_ERR(sc, "can't initialize regulatory system\n");
+ goto err_queues;
+ }
+
ret = ieee80211_register_hw(hw);
if (ret) {
ATH5K_ERR(sc, "can't register ieee80211 hw\n");
goto err_queues;
}
+ if (!ath_is_world_regd(&sc->ah->ah_regulatory))
+ regulatory_hint(hw->wiphy, sc->ah->ah_regulatory.alpha2);
+
ath5k_init_leds(sc);
return 0;
@@ -862,6 +883,20 @@ ath5k_ieee2mhz(short chan)
return 2212 + chan * 20;
}
+/*
+ * Returns true for the channel numbers used without all_channels modparam.
+ */
+static bool ath5k_is_standard_channel(short chan)
+{
+ return ((chan <= 14) ||
+ /* UNII 1,2 */
+ ((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
+ /* midband */
+ ((chan & 3) == 0 && chan >= 100 && chan <= 140) ||
+ /* UNII-3 */
+ ((chan & 3) == 1 && chan >= 149 && chan <= 165));
+}
+
static unsigned int
ath5k_copy_channels(struct ath5k_hw *ah,
struct ieee80211_channel *channels,
@@ -899,6 +934,9 @@ ath5k_copy_channels(struct ath5k_hw *ah,
if (!ath5k_channel_ok(ah, freq, chfreq))
continue;
+ if (!modparam_all_channels && !ath5k_is_standard_channel(ch))
+ continue;
+
/* Write channel info and increment counter */
channels[count].center_freq = freq;
channels[count].band = (chfreq == CHANNEL_2GHZ) ?
@@ -1238,7 +1276,7 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
ieee80211_get_hdrlen_from_skb(skb), AR5K_PKT_TYPE_NORMAL,
(sc->power_level * 2),
hw_rate,
- info->control.rates[0].count, keyidx, 0, flags,
+ info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
cts_rate, duration);
if (ret)
goto err_unmap;
@@ -1574,9 +1612,8 @@ ath5k_rx_start(struct ath5k_softc *sc)
ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "cachelsz %u rxbufsize %u\n",
sc->cachelsz, sc->rxbufsize);
- sc->rxlink = NULL;
-
spin_lock_bh(&sc->rxbuflock);
+ sc->rxlink = NULL;
list_for_each_entry(bf, &sc->rxbuf, list) {
ret = ath5k_rxbuf_setup(sc, bf);
if (ret != 0) {
@@ -1585,9 +1622,9 @@ ath5k_rx_start(struct ath5k_softc *sc)
}
}
bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list);
+ ath5k_hw_set_rxdp(ah, bf->daddr);
spin_unlock_bh(&sc->rxbuflock);
- ath5k_hw_set_rxdp(ah, bf->daddr);
ath5k_hw_start_rx_dma(ah); /* enable recv descriptors */
ath5k_mode_setup(sc); /* set filters, etc. */
ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */
@@ -1699,35 +1736,6 @@ ath5k_check_ibss_tsf(struct ath5k_softc *sc, struct sk_buff *skb,
}
}
-static void ath5k_tasklet_beacon(unsigned long data)
-{
- struct ath5k_softc *sc = (struct ath5k_softc *) data;
-
- /*
- * Software beacon alert--time to send a beacon.
- *
- * In IBSS mode we use this interrupt just to
- * keep track of the next TBTT (target beacon
- * transmission time) in order to detect wether
- * automatic TSF updates happened.
- */
- if (sc->opmode == NL80211_IFTYPE_ADHOC) {
- /* XXX: only if VEOL suppported */
- u64 tsf = ath5k_hw_get_tsf64(sc->ah);
- sc->nexttbtt += sc->bintval;
- ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
- "SWBA nexttbtt: %x hw_tu: %x "
- "TSF: %llx\n",
- sc->nexttbtt,
- TSF_TO_TU(tsf),
- (unsigned long long) tsf);
- } else {
- spin_lock(&sc->block);
- ath5k_beacon_send(sc);
- spin_unlock(&sc->block);
- }
-}
-
static void
ath5k_tasklet_rx(unsigned long data)
{
@@ -1736,7 +1744,7 @@ ath5k_tasklet_rx(unsigned long data)
struct sk_buff *skb, *next_skb;
dma_addr_t next_skb_addr;
struct ath5k_softc *sc = (void *)data;
- struct ath5k_buf *bf, *bf_last;
+ struct ath5k_buf *bf;
struct ath5k_desc *ds;
int ret;
int hdrlen;
@@ -1747,7 +1755,6 @@ ath5k_tasklet_rx(unsigned long data)
ATH5K_WARN(sc, "empty rx buf pool\n");
goto unlock;
}
- bf_last = list_entry(sc->rxbuf.prev, struct ath5k_buf, list);
do {
rxs.flag = 0;
@@ -1756,24 +1763,9 @@ ath5k_tasklet_rx(unsigned long data)
skb = bf->skb;
ds = bf->desc;
- /*
- * last buffer must not be freed to ensure proper hardware
- * function. When the hardware finishes also a packet next to
- * it, we are sure, it doesn't use it anymore and we can go on.
- */
- if (bf_last == bf)
- bf->flags |= 1;
- if (bf->flags) {
- struct ath5k_buf *bf_next = list_entry(bf->list.next,
- struct ath5k_buf, list);
- ret = sc->ah->ah_proc_rx_desc(sc->ah, bf_next->desc,
- &rs);
- if (ret)
- break;
- bf->flags &= ~1;
- /* skip the overwritten one (even status is martian) */
- goto next;
- }
+ /* bail if HW is still using self-linked descriptor */
+ if (ath5k_hw_get_rxdp(sc->ah) == bf->daddr)
+ break;
ret = sc->ah->ah_proc_rx_desc(sc->ah, ds, &rs);
if (unlikely(ret == -EINPROGRESS))
@@ -2014,7 +2006,8 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ath5k_hw *ah = sc->ah;
struct ath5k_desc *ds;
- int ret, antenna = 0;
+ int ret = 0;
+ u8 antenna;
u32 flags;
bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len,
@@ -2028,23 +2021,35 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
}
ds = bf->desc;
+ antenna = ah->ah_tx_ant;
flags = AR5K_TXDESC_NOACK;
if (sc->opmode == NL80211_IFTYPE_ADHOC && ath5k_hw_hasveol(ah)) {
ds->ds_link = bf->daddr; /* self-linked */
flags |= AR5K_TXDESC_VEOL;
- /*
- * Let hardware handle antenna switching if txantenna is not set
- */
- } else {
+ } else
ds->ds_link = 0;
- /*
- * Switch antenna every 4 beacons if txantenna is not set
- * XXX assumes two antennas
- */
- if (antenna == 0)
- antenna = sc->bsent & 4 ? 2 : 1;
- }
+
+ /*
+ * If we use multiple antennas on AP and use
+ * the Sectored AP scenario, switch antenna every
+ * 4 beacons to make sure everybody hears our AP.
+ * When a client tries to associate, hw will keep
+ * track of the tx antenna to be used for this client
+ * automaticaly, based on ACKed packets.
+ *
+ * Note: AP still listens and transmits RTS on the
+ * default antenna which is supposed to be an omni.
+ *
+ * Note2: On sectored scenarios it's possible to have
+ * multiple antennas (1omni -the default- and 14 sectors)
+ * so if we choose to actually support this mode we need
+ * to allow user to set how many antennas we have and tweak
+ * the code below to send beacons on all of them.
+ */
+ if (ah->ah_ant_mode == AR5K_ANTMODE_SECTOR_AP)
+ antenna = sc->bsent & 4 ? 2 : 1;
+
/* FIXME: If we are in g mode and rate is a CCK rate
* subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
@@ -2065,6 +2070,13 @@ err_unmap:
return ret;
}
+static void ath5k_beacon_disable(struct ath5k_softc *sc)
+{
+ sc->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA);
+ ath5k_hw_set_imr(sc->ah, sc->imask);
+ ath5k_hw_stop_tx_dma(sc->ah, sc->bhalq);
+}
+
/*
* Transmit a beacon frame at SWBA. Dynamic updates to the
* frame contents are done as needed and the slot time is
@@ -2097,7 +2109,7 @@ ath5k_beacon_send(struct ath5k_softc *sc)
sc->bmisscount++;
ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
"missed %u consecutive beacons\n", sc->bmisscount);
- if (sc->bmisscount > 3) { /* NB: 3 is a guess */
+ if (sc->bmisscount > 10) { /* NB: 10 is a guess */
ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
"stuck beacon time (%u missed)\n",
sc->bmisscount);
@@ -2118,10 +2130,14 @@ ath5k_beacon_send(struct ath5k_softc *sc)
* are still pending on the queue.
*/
if (unlikely(ath5k_hw_stop_tx_dma(ah, sc->bhalq))) {
- ATH5K_WARN(sc, "beacon queue %u didn't stop?\n", sc->bhalq);
+ ATH5K_WARN(sc, "beacon queue %u didn't start/stop ?\n", sc->bhalq);
/* NB: hw still stops DMA, so proceed */
}
+ /* refresh the beacon for AP mode */
+ if (sc->opmode == NL80211_IFTYPE_AP)
+ ath5k_beacon_update(sc->hw, sc->vif);
+
ath5k_hw_set_txdp(ah, sc->bhalq, bf->daddr);
ath5k_hw_start_tx_dma(ah, sc->bhalq);
ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n",
@@ -2278,6 +2294,35 @@ ath5k_beacon_config(struct ath5k_softc *sc)
ath5k_hw_set_imr(ah, sc->imask);
}
+static void ath5k_tasklet_beacon(unsigned long data)
+{
+ struct ath5k_softc *sc = (struct ath5k_softc *) data;
+
+ /*
+ * Software beacon alert--time to send a beacon.
+ *
+ * In IBSS mode we use this interrupt just to
+ * keep track of the next TBTT (target beacon
+ * transmission time) in order to detect wether
+ * automatic TSF updates happened.
+ */
+ if (sc->opmode == NL80211_IFTYPE_ADHOC) {
+ /* XXX: only if VEOL suppported */
+ u64 tsf = ath5k_hw_get_tsf64(sc->ah);
+ sc->nexttbtt += sc->bintval;
+ ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
+ "SWBA nexttbtt: %x hw_tu: %x "
+ "TSF: %llx\n",
+ sc->nexttbtt,
+ TSF_TO_TU(tsf),
+ (unsigned long long) tsf);
+ } else {
+ spin_lock(&sc->block);
+ ath5k_beacon_send(sc);
+ spin_unlock(&sc->block);
+ }
+}
+
/********************\
* Interrupt handling *
@@ -2315,6 +2360,8 @@ ath5k_init(struct ath5k_softc *sc)
if (ret)
goto done;
+ ath5k_rfkill_hw_start(ah);
+
/*
* Reset the key cache since some parts do not reset the
* contents on initial power up or resume from suspend.
@@ -2423,6 +2470,8 @@ ath5k_stop_hw(struct ath5k_softc *sc)
tasklet_kill(&sc->restq);
tasklet_kill(&sc->beacontq);
+ ath5k_rfkill_hw_stop(sc->ah);
+
return ret;
}
@@ -2452,7 +2501,7 @@ ath5k_intr(int irq, void *dev_id)
tasklet_schedule(&sc->restq);
} else {
if (status & AR5K_INT_SWBA) {
- tasklet_schedule(&sc->beacontq);
+ tasklet_hi_schedule(&sc->beacontq);
}
if (status & AR5K_INT_RXEOL) {
/*
@@ -2481,8 +2530,11 @@ ath5k_intr(int irq, void *dev_id)
*/
ath5k_hw_update_mib_counters(ah, &sc->ll_stats);
}
+ if (status & AR5K_INT_GPIO)
+ tasklet_schedule(&sc->rf_kill.toggleq);
+
}
- } while (ath5k_hw_is_intr_pending(ah) && counter-- > 0);
+ } while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
if (unlikely(!counter))
ATH5K_WARN(sc, "too many interrupts, giving up for now\n");
@@ -2719,6 +2771,7 @@ ath5k_remove_interface(struct ieee80211_hw *hw,
goto end;
ath5k_hw_set_lladdr(sc->ah, mac);
+ ath5k_beacon_disable(sc);
sc->vif = NULL;
end:
mutex_unlock(&sc->lock);
@@ -2731,53 +2784,43 @@ static int
ath5k_config(struct ieee80211_hw *hw, u32 changed)
{
struct ath5k_softc *sc = hw->priv;
+ struct ath5k_hw *ah = sc->ah;
struct ieee80211_conf *conf = &hw->conf;
- int ret;
+ int ret = 0;
mutex_lock(&sc->lock);
- sc->bintval = conf->beacon_int;
- sc->power_level = conf->power_level;
-
ret = ath5k_chan_set(sc, conf->channel);
+ if (ret < 0)
+ goto unlock;
- mutex_unlock(&sc->lock);
- return ret;
-}
-
-static int
-ath5k_config_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_if_conf *conf)
-{
- struct ath5k_softc *sc = hw->priv;
- struct ath5k_hw *ah = sc->ah;
- int ret = 0;
+ if ((changed & IEEE80211_CONF_CHANGE_POWER) &&
+ (sc->power_level != conf->power_level)) {
+ sc->power_level = conf->power_level;
- mutex_lock(&sc->lock);
- if (sc->vif != vif) {
- ret = -EIO;
- goto unlock;
- }
- if (conf->changed & IEEE80211_IFCC_BSSID && conf->bssid) {
- /* Cache for later use during resets */
- memcpy(ah->ah_bssid, conf->bssid, ETH_ALEN);
- /* XXX: assoc id is set to 0 for now, mac80211 doesn't have
- * a clean way of letting us retrieve this yet. */
- ath5k_hw_set_associd(ah, ah->ah_bssid, 0);
- mmiowb();
- }
- if (conf->changed & IEEE80211_IFCC_BEACON &&
- (vif->type == NL80211_IFTYPE_ADHOC ||
- vif->type == NL80211_IFTYPE_MESH_POINT ||
- vif->type == NL80211_IFTYPE_AP)) {
- struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
- if (!beacon) {
- ret = -ENOMEM;
- goto unlock;
- }
- ath5k_beacon_update(sc, beacon);
+ /* Half dB steps */
+ ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2));
}
+ /* TODO:
+ * 1) Move this on config_interface and handle each case
+ * separately eg. when we have only one STA vif, use
+ * AR5K_ANTMODE_SINGLE_AP
+ *
+ * 2) Allow the user to change antenna mode eg. when only
+ * one antenna is present
+ *
+ * 3) Allow the user to set default/tx antenna when possible
+ *
+ * 4) Default mode should handle 90% of the cases, together
+ * with fixed a/b and single AP modes we should be able to
+ * handle 99%. Sectored modes are extreme cases and i still
+ * haven't found a usage for them. If we decide to support them,
+ * then we must allow the user to set how many tx antennas we
+ * have available
+ */
+ ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT);
+
unlock:
mutex_unlock(&sc->lock);
return ret;
@@ -3020,28 +3063,62 @@ ath5k_reset_tsf(struct ieee80211_hw *hw)
ath5k_hw_reset_tsf(sc->ah);
}
+/*
+ * Updates the beacon that is sent by ath5k_beacon_send. For adhoc,
+ * this is called only once at config_bss time, for AP we do it every
+ * SWBA interrupt so that the TIM will reflect buffered frames.
+ *
+ * Called with the beacon lock.
+ */
static int
-ath5k_beacon_update(struct ath5k_softc *sc, struct sk_buff *skb)
+ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
- unsigned long flags;
int ret;
+ struct ath5k_softc *sc = hw->priv;
+ struct sk_buff *skb;
+
+ if (WARN_ON(!vif)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ skb = ieee80211_beacon_get(hw, vif);
+
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out;
+ }
ath5k_debug_dump_skb(sc, skb, "BC ", 1);
- spin_lock_irqsave(&sc->block, flags);
ath5k_txbuf_free(sc, sc->bbuf);
sc->bbuf->skb = skb;
ret = ath5k_beacon_setup(sc, sc->bbuf);
if (ret)
sc->bbuf->skb = NULL;
+out:
+ return ret;
+}
+
+/*
+ * Update the beacon and reconfigure the beacon queues.
+ */
+static void
+ath5k_beacon_reconfig(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ int ret;
+ unsigned long flags;
+ struct ath5k_softc *sc = hw->priv;
+
+ spin_lock_irqsave(&sc->block, flags);
+ ret = ath5k_beacon_update(hw, vif);
spin_unlock_irqrestore(&sc->block, flags);
- if (!ret) {
+ if (ret == 0) {
ath5k_beacon_config(sc);
mmiowb();
}
-
- return ret;
}
+
static void
set_beacon_filter(struct ieee80211_hw *hw, bool enable)
{
@@ -3063,11 +3140,37 @@ static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
u32 changes)
{
struct ath5k_softc *sc = hw->priv;
+ struct ath5k_hw *ah = sc->ah;
+
+ mutex_lock(&sc->lock);
+ if (WARN_ON(sc->vif != vif))
+ goto unlock;
+
+ if (changes & BSS_CHANGED_BSSID) {
+ /* Cache for later use during resets */
+ memcpy(ah->ah_bssid, bss_conf->bssid, ETH_ALEN);
+ /* XXX: assoc id is set to 0 for now, mac80211 doesn't have
+ * a clean way of letting us retrieve this yet. */
+ ath5k_hw_set_associd(ah, ah->ah_bssid, 0);
+ mmiowb();
+ }
+
+ if (changes & BSS_CHANGED_BEACON_INT)
+ sc->bintval = bss_conf->beacon_int;
+
if (changes & BSS_CHANGED_ASSOC) {
- mutex_lock(&sc->lock);
sc->assoc = bss_conf->assoc;
if (sc->opmode == NL80211_IFTYPE_STATION)
set_beacon_filter(hw, sc->assoc);
- mutex_unlock(&sc->lock);
}
+
+ if (changes & BSS_CHANGED_BEACON &&
+ (vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT ||
+ vif->type == NL80211_IFTYPE_AP)) {
+ ath5k_beacon_reconfig(hw, vif);
+ }
+
+ unlock:
+ mutex_unlock(&sc->lock);
}
diff --git a/drivers/net/wireless/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index 822956114cd..f9b7f2f819b 100644
--- a/drivers/net/wireless/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -46,6 +46,7 @@
#include <linux/wireless.h>
#include <linux/if_ether.h>
#include <linux/leds.h>
+#include <linux/rfkill.h>
#include "ath5k.h"
#include "debug.h"
@@ -56,7 +57,6 @@
struct ath5k_buf {
struct list_head list;
- unsigned int flags; /* rx descriptor flags */
struct ath5k_desc *desc; /* virtual addr of desc */
dma_addr_t daddr; /* physical addr of desc */
struct sk_buff *skb; /* skbuff for buf */
@@ -92,6 +92,15 @@ struct ath5k_led
struct led_classdev led_dev; /* led classdev */
};
+/* Rfkill */
+struct ath5k_rfkill {
+ /* GPIO PIN for rfkill */
+ u16 gpio;
+ /* polarity of rfkill GPIO PIN */
+ bool polarity;
+ /* RFKILL toggle tasklet */
+ struct tasklet_struct toggleq;
+};
#if CHAN_DEBUG
#define ATH_CHAN_MAX (26+26+26+200+200)
@@ -168,6 +177,8 @@ struct ath5k_softc {
struct tasklet_struct txtq; /* tx intr tasklet */
struct ath5k_led tx_led; /* tx led */
+ struct ath5k_rfkill rf_kill;
+
spinlock_t block; /* protects beacon */
struct tasklet_struct beacontq; /* beacon intr tasklet */
struct ath5k_buf *bbuf; /* beacon buffer */
diff --git a/drivers/net/wireless/ath5k/caps.c b/drivers/net/wireless/ath/ath5k/caps.c
index 367a6c7d3cc..367a6c7d3cc 100644
--- a/drivers/net/wireless/ath5k/caps.c
+++ b/drivers/net/wireless/ath/ath5k/caps.c
diff --git a/drivers/net/wireless/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 4904a07e4b5..4904a07e4b5 100644
--- a/drivers/net/wireless/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
diff --git a/drivers/net/wireless/ath5k/debug.h b/drivers/net/wireless/ath/ath5k/debug.h
index 66f69f04e55..66f69f04e55 100644
--- a/drivers/net/wireless/ath5k/debug.h
+++ b/drivers/net/wireless/ath/ath5k/debug.h
diff --git a/drivers/net/wireless/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c
index dc30a2b70a6..dc30a2b70a6 100644
--- a/drivers/net/wireless/ath5k/desc.c
+++ b/drivers/net/wireless/ath/ath5k/desc.c
diff --git a/drivers/net/wireless/ath5k/desc.h b/drivers/net/wireless/ath/ath5k/desc.h
index 56158c804e3..56158c804e3 100644
--- a/drivers/net/wireless/ath5k/desc.h
+++ b/drivers/net/wireless/ath/ath5k/desc.h
diff --git a/drivers/net/wireless/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
index b65b4feb2d2..941b51130a6 100644
--- a/drivers/net/wireless/ath5k/dma.c
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -80,8 +80,6 @@ int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
* ath5k_hw_get_rxdp - Get RX Descriptor's address
*
* @ah: The &struct ath5k_hw
- *
- * XXX: Is RXDP read and clear ?
*/
u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah)
{
diff --git a/drivers/net/wireless/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index c0fb3b09ba4..c56b494d417 100644
--- a/drivers/net/wireless/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -156,6 +156,17 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah)
ee->ee_db[AR5K_EEPROM_MODE_11G][0] = (val >> 3) & 0x7;
}
+ AR5K_EEPROM_READ(AR5K_EEPROM_IS_HB63, val);
+
+ if ((ah->ah_mac_version == (AR5K_SREV_AR2425 >> 4)) && val)
+ ee->ee_is_hb63 = true;
+ else
+ ee->ee_is_hb63 = false;
+
+ AR5K_EEPROM_READ(AR5K_EEPROM_RFKILL, val);
+ ee->ee_rfkill_pin = (u8) AR5K_REG_MS(val, AR5K_EEPROM_RFKILL_GPIO_SEL);
+ ee->ee_rfkill_pol = val & AR5K_EEPROM_RFKILL_POLARITY ? true : false;
+
return 0;
}
@@ -197,16 +208,16 @@ static int ath5k_eeprom_read_ants(struct ath5k_hw *ah, u32 *offset,
ee->ee_ant_control[mode][i++] = (val >> 6) & 0x3f;
ee->ee_ant_control[mode][i++] = val & 0x3f;
- /* Get antenna modes */
- ah->ah_antenna[mode][0] =
+ /* Get antenna switch tables */
+ ah->ah_ant_ctl[mode][AR5K_ANT_CTL] =
(ee->ee_ant_control[mode][0] << 4);
- ah->ah_antenna[mode][AR5K_ANT_FIXED_A] =
+ ah->ah_ant_ctl[mode][AR5K_ANT_SWTABLE_A] =
ee->ee_ant_control[mode][1] |
(ee->ee_ant_control[mode][2] << 6) |
(ee->ee_ant_control[mode][3] << 12) |
(ee->ee_ant_control[mode][4] << 18) |
(ee->ee_ant_control[mode][5] << 24);
- ah->ah_antenna[mode][AR5K_ANT_FIXED_B] =
+ ah->ah_ant_ctl[mode][AR5K_ANT_SWTABLE_B] =
ee->ee_ant_control[mode][6] |
(ee->ee_ant_control[mode][7] << 6) |
(ee->ee_ant_control[mode][8] << 12) |
@@ -640,9 +651,9 @@ ath5k_eeprom_init_11bg_2413(struct ath5k_hw *ah, unsigned int mode, int offset)
static inline void
ath5k_get_pcdac_intercepts(struct ath5k_hw *ah, u8 min, u8 max, u8 *vp)
{
- const static u16 intercepts3[] =
+ static const u16 intercepts3[] =
{ 0, 5, 10, 20, 30, 50, 70, 85, 90, 95, 100 };
- const static u16 intercepts3_2[] =
+ static const u16 intercepts3_2[] =
{ 0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100 };
const u16 *ip;
int i;
@@ -1694,9 +1705,40 @@ ath5k_eeprom_read_ctl_info(struct ath5k_hw *ah)
return 0;
}
+static int
+ath5k_eeprom_read_spur_chans(struct ath5k_hw *ah)
+{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ u32 offset;
+ u16 val;
+ int ret = 0, i;
+
+ offset = AR5K_EEPROM_CTL(ee->ee_version) +
+ AR5K_EEPROM_N_CTLS(ee->ee_version);
+
+ if (ee->ee_version < AR5K_EEPROM_VERSION_5_3) {
+ /* No spur info for 5GHz */
+ ee->ee_spur_chans[0][0] = AR5K_EEPROM_NO_SPUR;
+ /* 2 channels for 2GHz (2464/2420) */
+ ee->ee_spur_chans[0][1] = AR5K_EEPROM_5413_SPUR_CHAN_1;
+ ee->ee_spur_chans[1][1] = AR5K_EEPROM_5413_SPUR_CHAN_2;
+ ee->ee_spur_chans[2][1] = AR5K_EEPROM_NO_SPUR;
+ } else if (ee->ee_version >= AR5K_EEPROM_VERSION_5_3) {
+ for (i = 0; i < AR5K_EEPROM_N_SPUR_CHANS; i++) {
+ AR5K_EEPROM_READ(offset, val);
+ ee->ee_spur_chans[i][0] = val;
+ AR5K_EEPROM_READ(offset + AR5K_EEPROM_N_SPUR_CHANS,
+ val);
+ ee->ee_spur_chans[i][1] = val;
+ offset++;
+ }
+ }
+
+ return ret;
+}
/*
- * Initialize eeprom power tables
+ * Initialize eeprom data structure
*/
int
ath5k_eeprom_init(struct ath5k_hw *ah)
@@ -1719,6 +1761,10 @@ ath5k_eeprom_init(struct ath5k_hw *ah)
if (err < 0)
return err;
+ err = ath5k_eeprom_read_spur_chans(ah);
+ if (err < 0)
+ return err;
+
return 0;
}
@@ -1754,16 +1800,3 @@ int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
return 0;
}
-
-bool ath5k_eeprom_is_hb63(struct ath5k_hw *ah)
-{
- u16 data;
-
- ath5k_hw_eeprom_read(ah, AR5K_EEPROM_IS_HB63, &data);
-
- if ((ah->ah_mac_version == (AR5K_SREV_AR2425 >> 4)) && data)
- return true;
- else
- return false;
-}
-
diff --git a/drivers/net/wireless/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h
index b0c0606dea0..64be73a5eda 100644
--- a/drivers/net/wireless/ath5k/eeprom.h
+++ b/drivers/net/wireless/ath/ath5k/eeprom.h
@@ -26,6 +26,13 @@
#define AR5K_EEPROM_MAGIC_5210 0x0000145a /* 5210 */
#define AR5K_EEPROM_IS_HB63 0x000b /* Talon detect */
+
+#define AR5K_EEPROM_RFKILL 0x0f
+#define AR5K_EEPROM_RFKILL_GPIO_SEL 0x0000001c
+#define AR5K_EEPROM_RFKILL_GPIO_SEL_S 2
+#define AR5K_EEPROM_RFKILL_POLARITY 0x00000002
+#define AR5K_EEPROM_RFKILL_POLARITY_S 1
+
#define AR5K_EEPROM_REG_DOMAIN 0x00bf /* EEPROM regdom */
#define AR5K_EEPROM_CHECKSUM 0x00c0 /* EEPROM checksum */
#define AR5K_EEPROM_INFO_BASE 0x00c0 /* EEPROM header */
@@ -66,11 +73,6 @@
#define AR5K_EEPROM_HDR_RFKILL(_v) (((_v) >> 14) & 0x1) /* Device has RFKill support */
#define AR5K_EEPROM_HDR_T_5GHZ_DIS(_v) (((_v) >> 15) & 0x1) /* Disable turbo for 5Ghz */
-#define AR5K_EEPROM_RFKILL_GPIO_SEL 0x0000001c
-#define AR5K_EEPROM_RFKILL_GPIO_SEL_S 2
-#define AR5K_EEPROM_RFKILL_POLARITY 0x00000002
-#define AR5K_EEPROM_RFKILL_POLARITY_S 1
-
/* Newer EEPROMs are using a different offset */
#define AR5K_EEPROM_OFF(_v, _v3_0, _v3_3) \
(((_v) >= AR5K_EEPROM_VERSION_3_3) ? _v3_3 : _v3_0)
@@ -211,6 +213,23 @@
#define AR5K_EEPROM_I_GAIN 10
#define AR5K_EEPROM_CCK_OFDM_DELTA 15
#define AR5K_EEPROM_N_IQ_CAL 2
+/* 5GHz/2GHz */
+enum ath5k_eeprom_freq_bands{
+ AR5K_EEPROM_BAND_5GHZ = 0,
+ AR5K_EEPROM_BAND_2GHZ = 1,
+ AR5K_EEPROM_N_FREQ_BANDS,
+};
+/* Spur chans per freq band */
+#define AR5K_EEPROM_N_SPUR_CHANS 5
+/* fbin value for chan 2464 x2 */
+#define AR5K_EEPROM_5413_SPUR_CHAN_1 1640
+/* fbin value for chan 2420 x2 */
+#define AR5K_EEPROM_5413_SPUR_CHAN_2 1200
+#define AR5K_EEPROM_SPUR_CHAN_MASK 0x3FFF
+#define AR5K_EEPROM_NO_SPUR 0x8000
+#define AR5K_SPUR_CHAN_WIDTH 87
+#define AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz 3125
+#define AR5K_SPUR_SYMBOL_WIDTH_TURBO_100Hz 6250
#define AR5K_EEPROM_READ(_o, _v) do { \
ret = ath5k_hw_eeprom_read(ah, (_o), &(_v)); \
@@ -221,11 +240,11 @@
#define AR5K_EEPROM_READ_HDR(_o, _v) \
AR5K_EEPROM_READ(_o, ah->ah_capabilities.cap_eeprom._v); \
-enum ath5k_ant_setting {
- AR5K_ANT_VARIABLE = 0, /* variable by programming */
- AR5K_ANT_FIXED_A = 1, /* fixed to 11a frequencies */
- AR5K_ANT_FIXED_B = 2, /* fixed to 11b frequencies */
- AR5K_ANT_MAX = 3,
+enum ath5k_ant_table {
+ AR5K_ANT_CTL = 0, /* Idle switch table settings */
+ AR5K_ANT_SWTABLE_A = 1, /* Switch table for antenna A */
+ AR5K_ANT_SWTABLE_B = 2, /* Switch table for antenna B */
+ AR5K_ANT_MAX,
};
enum ath5k_ctl_mode {
@@ -369,6 +388,9 @@ struct ath5k_eeprom_info {
u16 ee_version;
u16 ee_header;
u16 ee_ant_gain;
+ u8 ee_rfkill_pin;
+ bool ee_rfkill_pol;
+ bool ee_is_hb63;
u16 ee_misc0;
u16 ee_misc1;
u16 ee_misc2;
@@ -436,6 +458,10 @@ struct ath5k_eeprom_info {
s8 ee_pga_desired_size_turbo[AR5K_EEPROM_N_MODES];
s8 ee_pd_gain_overlap;
+ /* Spur mitigation data (fbin values for spur channels) */
+ u16 ee_spur_chans[AR5K_EEPROM_N_SPUR_CHANS][AR5K_EEPROM_N_FREQ_BANDS];
+
+ /* Antenna raw switch tables */
u32 ee_antenna[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX];
};
diff --git a/drivers/net/wireless/ath5k/gpio.c b/drivers/net/wireless/ath/ath5k/gpio.c
index 64a27e73d02..64a27e73d02 100644
--- a/drivers/net/wireless/ath5k/gpio.c
+++ b/drivers/net/wireless/ath/ath5k/gpio.c
diff --git a/drivers/net/wireless/ath5k/initvals.c b/drivers/net/wireless/ath/ath5k/initvals.c
index 61fb621ed20..18eb5190ce4 100644
--- a/drivers/net/wireless/ath5k/initvals.c
+++ b/drivers/net/wireless/ath/ath5k/initvals.c
@@ -537,8 +537,6 @@ static const struct ath5k_ini ar5212_ini_common_start[] = {
{ AR5K_DCU_TX_FILTER_1(15), 0x00000000 },
{ AR5K_DCU_TX_FILTER_CLR, 0x00000000 },
{ AR5K_DCU_TX_FILTER_SET, 0x00000000 },
- { AR5K_DCU_TX_FILTER_CLR, 0x00000000 },
- { AR5K_DCU_TX_FILTER_SET, 0x00000000 },
{ AR5K_STA_ID1, 0x00000000 },
{ AR5K_BSS_ID0, 0x00000000 },
{ AR5K_BSS_ID1, 0x00000000 },
@@ -669,7 +667,7 @@ static const struct ath5k_ini ar5212_ini_common_start[] = {
/*{ AR5K_PHY(650), 0x000001b5 },*/
{ AR5K_PHY(651), 0x00000000 },
{ AR5K_PHY_TXPOWER_RATE3, 0x20202020 },
- { AR5K_PHY_TXPOWER_RATE2, 0x20202020 },
+ { AR5K_PHY_TXPOWER_RATE4, 0x20202020 },
/*{ AR5K_PHY(655), 0x13c889af },*/
{ AR5K_PHY(656), 0x38490a20 },
{ AR5K_PHY(657), 0x00007bb6 },
@@ -718,7 +716,7 @@ static const struct ath5k_ini_mode ar5212_ini_mode_start[] = {
{ AR5K_PHY_SETTLING,
{ 0x1372161c, 0x13721c25, 0x13721722, 0x137216a2, 0x13721c25 } },
{ AR5K_PHY_AGCCTL,
- { 0x00009d10, 0x00009d10, 0x00009d18, 0x00009d18, 0x00009d18 } },
+ { 0x00009d10, 0x00009d10, 0x00009d18, 0x00009d18, 0x00009d10 } },
{ AR5K_PHY_NF,
{ 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 } },
{ AR5K_PHY_WEAK_OFDM_HIGH_THR,
@@ -799,7 +797,7 @@ static const struct ath5k_ini_mode rf5112_ini_mode_end[] = {
{ AR5K_PHY_DESIRED_SIZE,
{ 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0 } },
{ AR5K_PHY_SIG,
- { 0x7e800d2e, 0x7e800d2e, 0x7ee80d2e, 0x7ee80d2e, 0x7ee80d2e } },
+ { 0x7e800d2e, 0x7e800d2e, 0x7ee80d2e, 0x7ee80d2e, 0x7e800d2e } },
{ AR5K_PHY_AGCCOARSE,
{ 0x3137665e, 0x3137665e, 0x3137665e, 0x3137665e, 0x3137665e } },
{ AR5K_PHY_WEAK_OFDM_LOW_THR,
diff --git a/drivers/net/wireless/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
index 19555fb79c9..876725f08b6 100644
--- a/drivers/net/wireless/ath5k/led.c
+++ b/drivers/net/wireless/ath/ath5k/led.c
@@ -53,8 +53,6 @@
/* Devices we match on for LED config info (typically laptops) */
static const struct pci_device_id ath5k_led_devices[] = {
- /* IBM-specific AR5212 */
- { PCI_VDEVICE(ATHEROS, PCI_DEVICE_ID_ATHEROS_AR5212_IBM), ATH_LED(0, 0) },
/* AR5211 */
{ PCI_VDEVICE(ATHEROS, PCI_DEVICE_ID_ATHEROS_AR5211), ATH_LED(0, 0) },
/* HP Compaq nc6xx, nc4000, nx6000 */
@@ -67,6 +65,12 @@ static const struct pci_device_id ath5k_led_devices[] = {
{ ATH_SDEVICE(PCI_VENDOR_ID_AMBIT, 0x0428), ATH_LED(3, 0) },
/* Acer Extensa 5620z (nekoreeve@gmail.com) */
{ ATH_SDEVICE(PCI_VENDOR_ID_QMI, 0x0105), ATH_LED(3, 0) },
+ /* Fukato Datacask Jupiter 1014a (mrb74@gmx.at) */
+ { ATH_SDEVICE(PCI_VENDOR_ID_AZWAVE, 0x1026), ATH_LED(3, 0) },
+ /* IBM ThinkPad AR5BXB6 (legovini@spiro.fisica.unipd.it) */
+ { ATH_SDEVICE(PCI_VENDOR_ID_IBM, 0x058a), ATH_LED(1, 0) },
+ /* IBM-specific AR5212 (all others) */
+ { PCI_VDEVICE(ATHEROS, PCI_DEVICE_ID_ATHEROS_AR5212_IBM), ATH_LED(0, 0) },
{ }
};
@@ -78,7 +82,7 @@ void ath5k_led_enable(struct ath5k_softc *sc)
}
}
-void ath5k_led_on(struct ath5k_softc *sc)
+static void ath5k_led_on(struct ath5k_softc *sc)
{
if (!test_bit(ATH_STAT_LEDSOFT, sc->status))
return;
diff --git a/drivers/net/wireless/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index 55122f1e198..ec35503f6a4 100644
--- a/drivers/net/wireless/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -736,8 +736,8 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
/* When in AP mode zero timer0 to start TSF */
if (ah->ah_op_mode == NL80211_IFTYPE_AP)
ath5k_hw_reg_write(ah, 0, AR5K_TIMER0);
- else
- ath5k_hw_reg_write(ah, next_beacon, AR5K_TIMER0);
+
+ ath5k_hw_reg_write(ah, next_beacon, AR5K_TIMER0);
ath5k_hw_reg_write(ah, timer1, AR5K_TIMER1);
ath5k_hw_reg_write(ah, timer2, AR5K_TIMER2);
ath5k_hw_reg_write(ah, timer3, AR5K_TIMER3);
@@ -1003,7 +1003,7 @@ int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry)
* Note2: Windows driver (ndiswrapper) sets this to
* 0x00000714 instead of 0x00000007
*/
- if (ah->ah_version > AR5K_AR5211) {
+ if (ah->ah_version >= AR5K_AR5211) {
ath5k_hw_reg_write(ah, AR5K_KEYTABLE_TYPE_NULL,
AR5K_KEYTABLE_TYPE(entry));
@@ -1038,9 +1038,9 @@ int ath5k_keycache_type(const struct ieee80211_key_conf *key)
case ALG_CCMP:
return AR5K_KEYTABLE_TYPE_CCM;
case ALG_WEP:
- if (key->keylen == LEN_WEP40)
+ if (key->keylen == WLAN_KEY_LEN_WEP40)
return AR5K_KEYTABLE_TYPE_40;
- else if (key->keylen == LEN_WEP104)
+ else if (key->keylen == WLAN_KEY_LEN_WEP104)
return AR5K_KEYTABLE_TYPE_104;
return -EINVAL;
default:
diff --git a/drivers/net/wireless/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 9e2faae5ae9..a876ca8d69e 100644
--- a/drivers/net/wireless/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -168,9 +168,6 @@ int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah)
* tx power and a Peak to Average Power Detector (PAPD) will try
* to measure the gain.
*
- * TODO: Use propper tx power setting for the probe packet so
- * that we don't observe a serious power drop on the receiver
- *
* XXX: How about forcing a tx packet (bypassing PCU arbitrator etc)
* just after we enable the probe so that we don't mess with
* standard traffic ? Maybe it's time to use sw interrupts and
@@ -186,7 +183,7 @@ static void ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah)
/* Send the packet with 2dB below max power as
* patent doc suggest */
- ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txpower.txp_max_pwr - 4,
+ ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txpower.txp_ofdm - 4,
AR5K_PHY_PAPD_PROBE_TXPOWER) |
AR5K_PHY_PAPD_PROBE_TX_NEXT, AR5K_PHY_PAPD_PROBE);
@@ -1356,6 +1353,257 @@ int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
return ret;
}
+/***************************\
+* Spur mitigation functions *
+\***************************/
+
+bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel)
+{
+ u8 refclk_freq;
+
+ if ((ah->ah_radio == AR5K_RF5112) ||
+ (ah->ah_radio == AR5K_RF5413) ||
+ (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4)))
+ refclk_freq = 40;
+ else
+ refclk_freq = 32;
+
+ if ((channel->center_freq % refclk_freq != 0) &&
+ ((channel->center_freq % refclk_freq < 10) ||
+ (channel->center_freq % refclk_freq > 22)))
+ return true;
+ else
+ return false;
+}
+
+void
+ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel)
+{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ u32 mag_mask[4] = {0, 0, 0, 0};
+ u32 pilot_mask[2] = {0, 0};
+ /* Note: fbin values are scaled up by 2 */
+ u16 spur_chan_fbin, chan_fbin, symbol_width, spur_detection_window;
+ s32 spur_delta_phase, spur_freq_sigma_delta;
+ s32 spur_offset, num_symbols_x16;
+ u8 num_symbol_offsets, i, freq_band;
+
+ /* Convert current frequency to fbin value (the same way channels
+ * are stored on EEPROM, check out ath5k_eeprom_bin2freq) and scale
+ * up by 2 so we can compare it later */
+ if (channel->hw_value & CHANNEL_2GHZ) {
+ chan_fbin = (channel->center_freq - 2300) * 10;
+ freq_band = AR5K_EEPROM_BAND_2GHZ;
+ } else {
+ chan_fbin = (channel->center_freq - 4900) * 10;
+ freq_band = AR5K_EEPROM_BAND_5GHZ;
+ }
+
+ /* Check if any spur_chan_fbin from EEPROM is
+ * within our current channel's spur detection range */
+ spur_chan_fbin = AR5K_EEPROM_NO_SPUR;
+ spur_detection_window = AR5K_SPUR_CHAN_WIDTH;
+ /* XXX: Half/Quarter channels ?*/
+ if (channel->hw_value & CHANNEL_TURBO)
+ spur_detection_window *= 2;
+
+ for (i = 0; i < AR5K_EEPROM_N_SPUR_CHANS; i++) {
+ spur_chan_fbin = ee->ee_spur_chans[i][freq_band];
+
+ /* Note: mask cleans AR5K_EEPROM_NO_SPUR flag
+ * so it's zero if we got nothing from EEPROM */
+ if (spur_chan_fbin == AR5K_EEPROM_NO_SPUR) {
+ spur_chan_fbin &= AR5K_EEPROM_SPUR_CHAN_MASK;
+ break;
+ }
+
+ if ((chan_fbin - spur_detection_window <=
+ (spur_chan_fbin & AR5K_EEPROM_SPUR_CHAN_MASK)) &&
+ (chan_fbin + spur_detection_window >=
+ (spur_chan_fbin & AR5K_EEPROM_SPUR_CHAN_MASK))) {
+ spur_chan_fbin &= AR5K_EEPROM_SPUR_CHAN_MASK;
+ break;
+ }
+ }
+
+ /* We need to enable spur filter for this channel */
+ if (spur_chan_fbin) {
+ spur_offset = spur_chan_fbin - chan_fbin;
+ /*
+ * Calculate deltas:
+ * spur_freq_sigma_delta -> spur_offset / sample_freq << 21
+ * spur_delta_phase -> spur_offset / chip_freq << 11
+ * Note: Both values have 100KHz resolution
+ */
+ /* XXX: Half/Quarter rate channels ? */
+ switch (channel->hw_value) {
+ case CHANNEL_A:
+ /* Both sample_freq and chip_freq are 40MHz */
+ spur_delta_phase = (spur_offset << 17) / 25;
+ spur_freq_sigma_delta = (spur_delta_phase >> 10);
+ symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz;
+ break;
+ case CHANNEL_G:
+ /* sample_freq -> 40MHz chip_freq -> 44MHz
+ * (for b compatibility) */
+ spur_freq_sigma_delta = (spur_offset << 8) / 55;
+ spur_delta_phase = (spur_offset << 17) / 25;
+ symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz;
+ break;
+ case CHANNEL_T:
+ case CHANNEL_TG:
+ /* Both sample_freq and chip_freq are 80MHz */
+ spur_delta_phase = (spur_offset << 16) / 25;
+ spur_freq_sigma_delta = (spur_delta_phase >> 10);
+ symbol_width = AR5K_SPUR_SYMBOL_WIDTH_TURBO_100Hz;
+ break;
+ default:
+ return;
+ }
+
+ /* Calculate pilot and magnitude masks */
+
+ /* Scale up spur_offset by 1000 to switch to 100HZ resolution
+ * and divide by symbol_width to find how many symbols we have
+ * Note: number of symbols is scaled up by 16 */
+ num_symbols_x16 = ((spur_offset * 1000) << 4) / symbol_width;
+
+ /* Spur is on a symbol if num_symbols_x16 % 16 is zero */
+ if (!(num_symbols_x16 & 0xF))
+ /* _X_ */
+ num_symbol_offsets = 3;
+ else
+ /* _xx_ */
+ num_symbol_offsets = 4;
+
+ for (i = 0; i < num_symbol_offsets; i++) {
+
+ /* Calculate pilot mask */
+ s32 curr_sym_off =
+ (num_symbols_x16 / 16) + i + 25;
+
+ /* Pilot magnitude mask seems to be a way to
+ * declare the boundaries for our detection
+ * window or something, it's 2 for the middle
+ * value(s) where the symbol is expected to be
+ * and 1 on the boundary values */
+ u8 plt_mag_map =
+ (i == 0 || i == (num_symbol_offsets - 1))
+ ? 1 : 2;
+
+ if (curr_sym_off >= 0 && curr_sym_off <= 32) {
+ if (curr_sym_off <= 25)
+ pilot_mask[0] |= 1 << curr_sym_off;
+ else if (curr_sym_off >= 27)
+ pilot_mask[0] |= 1 << (curr_sym_off - 1);
+ } else if (curr_sym_off >= 33 && curr_sym_off <= 52)
+ pilot_mask[1] |= 1 << (curr_sym_off - 33);
+
+ /* Calculate magnitude mask (for viterbi decoder) */
+ if (curr_sym_off >= -1 && curr_sym_off <= 14)
+ mag_mask[0] |=
+ plt_mag_map << (curr_sym_off + 1) * 2;
+ else if (curr_sym_off >= 15 && curr_sym_off <= 30)
+ mag_mask[1] |=
+ plt_mag_map << (curr_sym_off - 15) * 2;
+ else if (curr_sym_off >= 31 && curr_sym_off <= 46)
+ mag_mask[2] |=
+ plt_mag_map << (curr_sym_off - 31) * 2;
+ else if (curr_sym_off >= 46 && curr_sym_off <= 53)
+ mag_mask[3] |=
+ plt_mag_map << (curr_sym_off - 47) * 2;
+
+ }
+
+ /* Write settings on hw to enable spur filter */
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_BIN_MASK_CTL,
+ AR5K_PHY_BIN_MASK_CTL_RATE, 0xff);
+ /* XXX: Self correlator also ? */
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
+ AR5K_PHY_IQ_PILOT_MASK_EN |
+ AR5K_PHY_IQ_CHAN_MASK_EN |
+ AR5K_PHY_IQ_SPUR_FILT_EN);
+
+ /* Set delta phase and freq sigma delta */
+ ath5k_hw_reg_write(ah,
+ AR5K_REG_SM(spur_delta_phase,
+ AR5K_PHY_TIMING_11_SPUR_DELTA_PHASE) |
+ AR5K_REG_SM(spur_freq_sigma_delta,
+ AR5K_PHY_TIMING_11_SPUR_FREQ_SD) |
+ AR5K_PHY_TIMING_11_USE_SPUR_IN_AGC,
+ AR5K_PHY_TIMING_11);
+
+ /* Write pilot masks */
+ ath5k_hw_reg_write(ah, pilot_mask[0], AR5K_PHY_TIMING_7);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_8,
+ AR5K_PHY_TIMING_8_PILOT_MASK_2,
+ pilot_mask[1]);
+
+ ath5k_hw_reg_write(ah, pilot_mask[0], AR5K_PHY_TIMING_9);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_10,
+ AR5K_PHY_TIMING_10_PILOT_MASK_2,
+ pilot_mask[1]);
+
+ /* Write magnitude masks */
+ ath5k_hw_reg_write(ah, mag_mask[0], AR5K_PHY_BIN_MASK_1);
+ ath5k_hw_reg_write(ah, mag_mask[1], AR5K_PHY_BIN_MASK_2);
+ ath5k_hw_reg_write(ah, mag_mask[2], AR5K_PHY_BIN_MASK_3);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_BIN_MASK_CTL,
+ AR5K_PHY_BIN_MASK_CTL_MASK_4,
+ mag_mask[3]);
+
+ ath5k_hw_reg_write(ah, mag_mask[0], AR5K_PHY_BIN_MASK2_1);
+ ath5k_hw_reg_write(ah, mag_mask[1], AR5K_PHY_BIN_MASK2_2);
+ ath5k_hw_reg_write(ah, mag_mask[2], AR5K_PHY_BIN_MASK2_3);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_BIN_MASK2_4,
+ AR5K_PHY_BIN_MASK2_4_MASK_4,
+ mag_mask[3]);
+
+ } else if (ath5k_hw_reg_read(ah, AR5K_PHY_IQ) &
+ AR5K_PHY_IQ_SPUR_FILT_EN) {
+ /* Clean up spur mitigation settings and disable fliter */
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_BIN_MASK_CTL,
+ AR5K_PHY_BIN_MASK_CTL_RATE, 0);
+ AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_IQ,
+ AR5K_PHY_IQ_PILOT_MASK_EN |
+ AR5K_PHY_IQ_CHAN_MASK_EN |
+ AR5K_PHY_IQ_SPUR_FILT_EN);
+ ath5k_hw_reg_write(ah, 0, AR5K_PHY_TIMING_11);
+
+ /* Clear pilot masks */
+ ath5k_hw_reg_write(ah, 0, AR5K_PHY_TIMING_7);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_8,
+ AR5K_PHY_TIMING_8_PILOT_MASK_2,
+ 0);
+
+ ath5k_hw_reg_write(ah, 0, AR5K_PHY_TIMING_9);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_10,
+ AR5K_PHY_TIMING_10_PILOT_MASK_2,
+ 0);
+
+ /* Clear magnitude masks */
+ ath5k_hw_reg_write(ah, 0, AR5K_PHY_BIN_MASK_1);
+ ath5k_hw_reg_write(ah, 0, AR5K_PHY_BIN_MASK_2);
+ ath5k_hw_reg_write(ah, 0, AR5K_PHY_BIN_MASK_3);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_BIN_MASK_CTL,
+ AR5K_PHY_BIN_MASK_CTL_MASK_4,
+ 0);
+
+ ath5k_hw_reg_write(ah, 0, AR5K_PHY_BIN_MASK2_1);
+ ath5k_hw_reg_write(ah, 0, AR5K_PHY_BIN_MASK2_2);
+ ath5k_hw_reg_write(ah, 0, AR5K_PHY_BIN_MASK2_3);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_BIN_MASK2_4,
+ AR5K_PHY_BIN_MASK2_4_MASK_4,
+ 0);
+ }
+}
+
+/********************\
+ Misc PHY functions
+\********************/
+
int ath5k_hw_phy_disable(struct ath5k_hw *ah)
{
ATH5K_TRACE(ah->ah_sc);
@@ -1365,10 +1613,6 @@ int ath5k_hw_phy_disable(struct ath5k_hw *ah)
return 0;
}
-/********************\
- Misc PHY functions
-\********************/
-
/*
* Get the PHY Chip revision
*/
@@ -1417,25 +1661,189 @@ u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan)
return ret;
}
+/*****************\
+* Antenna control *
+\*****************/
+
void /*TODO:Boundary check*/
-ath5k_hw_set_def_antenna(struct ath5k_hw *ah, unsigned int ant)
+ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant)
{
ATH5K_TRACE(ah->ah_sc);
- /*Just a try M.F.*/
+
if (ah->ah_version != AR5K_AR5210)
- ath5k_hw_reg_write(ah, ant, AR5K_DEFAULT_ANTENNA);
+ ath5k_hw_reg_write(ah, ant & 0x7, AR5K_DEFAULT_ANTENNA);
}
unsigned int ath5k_hw_get_def_antenna(struct ath5k_hw *ah)
{
ATH5K_TRACE(ah->ah_sc);
- /*Just a try M.F.*/
+
if (ah->ah_version != AR5K_AR5210)
- return ath5k_hw_reg_read(ah, AR5K_DEFAULT_ANTENNA);
+ return ath5k_hw_reg_read(ah, AR5K_DEFAULT_ANTENNA) & 0x7;
return false; /*XXX: What do we return for 5210 ?*/
}
+/*
+ * Enable/disable fast rx antenna diversity
+ */
+static void
+ath5k_hw_set_fast_div(struct ath5k_hw *ah, u8 ee_mode, bool enable)
+{
+ switch (ee_mode) {
+ case AR5K_EEPROM_MODE_11G:
+ /* XXX: This is set to
+ * disabled on initvals !!! */
+ case AR5K_EEPROM_MODE_11A:
+ if (enable)
+ AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_AGCCTL,
+ AR5K_PHY_AGCCTL_OFDM_DIV_DIS);
+ else
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
+ AR5K_PHY_AGCCTL_OFDM_DIV_DIS);
+ break;
+ case AR5K_EEPROM_MODE_11B:
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
+ AR5K_PHY_AGCCTL_OFDM_DIV_DIS);
+ break;
+ default:
+ return;
+ }
+
+ if (enable) {
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_RESTART,
+ AR5K_PHY_RESTART_DIV_GC, 0xc);
+
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_FAST_ANT_DIV,
+ AR5K_PHY_FAST_ANT_DIV_EN);
+ } else {
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_RESTART,
+ AR5K_PHY_RESTART_DIV_GC, 0x8);
+
+ AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_FAST_ANT_DIV,
+ AR5K_PHY_FAST_ANT_DIV_EN);
+ }
+}
+
+/*
+ * Set antenna operating mode
+ */
+void
+ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
+{
+ struct ieee80211_channel *channel = &ah->ah_current_channel;
+ bool use_def_for_tx, update_def_on_tx, use_def_for_rts, fast_div;
+ bool use_def_for_sg;
+ u8 def_ant, tx_ant, ee_mode;
+ u32 sta_id1 = 0;
+
+ def_ant = ah->ah_def_ant;
+
+ ATH5K_TRACE(ah->ah_sc);
+
+ switch (channel->hw_value & CHANNEL_MODES) {
+ case CHANNEL_A:
+ case CHANNEL_T:
+ case CHANNEL_XR:
+ ee_mode = AR5K_EEPROM_MODE_11A;
+ break;
+ case CHANNEL_G:
+ case CHANNEL_TG:
+ ee_mode = AR5K_EEPROM_MODE_11G;
+ break;
+ case CHANNEL_B:
+ ee_mode = AR5K_EEPROM_MODE_11B;
+ break;
+ default:
+ ATH5K_ERR(ah->ah_sc,
+ "invalid channel: %d\n", channel->center_freq);
+ return;
+ }
+
+ switch (ant_mode) {
+ case AR5K_ANTMODE_DEFAULT:
+ tx_ant = 0;
+ use_def_for_tx = false;
+ update_def_on_tx = false;
+ use_def_for_rts = false;
+ use_def_for_sg = false;
+ fast_div = true;
+ break;
+ case AR5K_ANTMODE_FIXED_A:
+ def_ant = 1;
+ tx_ant = 0;
+ use_def_for_tx = true;
+ update_def_on_tx = false;
+ use_def_for_rts = true;
+ use_def_for_sg = true;
+ fast_div = false;
+ break;
+ case AR5K_ANTMODE_FIXED_B:
+ def_ant = 2;
+ tx_ant = 0;
+ use_def_for_tx = true;
+ update_def_on_tx = false;
+ use_def_for_rts = true;
+ use_def_for_sg = true;
+ fast_div = false;
+ break;
+ case AR5K_ANTMODE_SINGLE_AP:
+ def_ant = 1; /* updated on tx */
+ tx_ant = 0;
+ use_def_for_tx = true;
+ update_def_on_tx = true;
+ use_def_for_rts = true;
+ use_def_for_sg = true;
+ fast_div = true;
+ break;
+ case AR5K_ANTMODE_SECTOR_AP:
+ tx_ant = 1; /* variable */
+ use_def_for_tx = false;
+ update_def_on_tx = false;
+ use_def_for_rts = true;
+ use_def_for_sg = false;
+ fast_div = false;
+ break;
+ case AR5K_ANTMODE_SECTOR_STA:
+ tx_ant = 1; /* variable */
+ use_def_for_tx = true;
+ update_def_on_tx = false;
+ use_def_for_rts = true;
+ use_def_for_sg = false;
+ fast_div = true;
+ break;
+ case AR5K_ANTMODE_DEBUG:
+ def_ant = 1;
+ tx_ant = 2;
+ use_def_for_tx = false;
+ update_def_on_tx = false;
+ use_def_for_rts = false;
+ use_def_for_sg = false;
+ fast_div = false;
+ break;
+ default:
+ return;
+ }
+
+ ah->ah_tx_ant = tx_ant;
+ ah->ah_ant_mode = ant_mode;
+
+ sta_id1 |= use_def_for_tx ? AR5K_STA_ID1_DEFAULT_ANTENNA : 0;
+ sta_id1 |= update_def_on_tx ? AR5K_STA_ID1_DESC_ANTENNA : 0;
+ sta_id1 |= use_def_for_rts ? AR5K_STA_ID1_RTS_DEF_ANTENNA : 0;
+ sta_id1 |= use_def_for_sg ? AR5K_STA_ID1_SELFGEN_DEF_ANT : 0;
+
+ AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, AR5K_STA_ID1_ANTENNA_SETTINGS);
+
+ if (sta_id1)
+ AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, sta_id1);
+
+ /* Note: set diversity before default antenna
+ * because it won't work correctly */
+ ath5k_hw_set_fast_div(ah, ee_mode, fast_div);
+ ath5k_hw_set_def_antenna(ah, def_ant);
+}
+
/****************\
* TX power setup *
@@ -1487,28 +1895,38 @@ ath5k_get_linear_pcdac_min(const u8 *stepL, const u8 *stepR,
{
s8 tmp;
s16 min_pwrL, min_pwrR;
- s16 pwr_i = pwrL[0];
-
- do {
- pwr_i--;
- tmp = (s8) ath5k_get_interpolated_value(pwr_i,
- pwrL[0], pwrL[1],
- stepL[0], stepL[1]);
-
- } while (tmp > 1);
+ s16 pwr_i;
- min_pwrL = pwr_i;
-
- pwr_i = pwrR[0];
- do {
- pwr_i--;
- tmp = (s8) ath5k_get_interpolated_value(pwr_i,
- pwrR[0], pwrR[1],
- stepR[0], stepR[1]);
+ if (WARN_ON(stepL[0] == stepL[1] || stepR[0] == stepR[1]))
+ return 0;
- } while (tmp > 1);
+ if (pwrL[0] == pwrL[1])
+ min_pwrL = pwrL[0];
+ else {
+ pwr_i = pwrL[0];
+ do {
+ pwr_i--;
+ tmp = (s8) ath5k_get_interpolated_value(pwr_i,
+ pwrL[0], pwrL[1],
+ stepL[0], stepL[1]);
+ } while (tmp > 1);
+
+ min_pwrL = pwr_i;
+ }
- min_pwrR = pwr_i;
+ if (pwrR[0] == pwrR[1])
+ min_pwrR = pwrR[0];
+ else {
+ pwr_i = pwrR[0];
+ do {
+ pwr_i--;
+ tmp = (s8) ath5k_get_interpolated_value(pwr_i,
+ pwrR[0], pwrR[1],
+ stepR[0], stepR[1]);
+ } while (tmp > 1);
+
+ min_pwrR = pwr_i;
+ }
/* Keep the right boundary so that it works for both curves */
return max(min_pwrL, min_pwrR);
@@ -1743,8 +2161,6 @@ done:
* Get the max edge power for this channel if
* we have such data from EEPROM's Conformance Test
* Limits (CTL), and limit max power if needed.
- *
- * FIXME: Only works for world regulatory domains
*/
static void
ath5k_get_max_ctl_power(struct ath5k_hw *ah,
@@ -1760,26 +2176,23 @@ ath5k_get_max_ctl_power(struct ath5k_hw *ah,
u8 ctl_idx = 0xFF;
u32 target = channel->center_freq;
- /* Find out a CTL for our mode that's not mapped
- * on a specific reg domain.
- *
- * TODO: Map our current reg domain to one of the 3 available
- * reg domain ids so that we can support more CTLs. */
+ ctl_mode = ath_regd_get_band_ctl(&ah->ah_regulatory, channel->band);
+
switch (channel->hw_value & CHANNEL_MODES) {
case CHANNEL_A:
- ctl_mode = AR5K_CTL_11A | AR5K_CTL_NO_REGDOMAIN;
+ ctl_mode |= AR5K_CTL_11A;
break;
case CHANNEL_G:
- ctl_mode = AR5K_CTL_11G | AR5K_CTL_NO_REGDOMAIN;
+ ctl_mode |= AR5K_CTL_11G;
break;
case CHANNEL_B:
- ctl_mode = AR5K_CTL_11B | AR5K_CTL_NO_REGDOMAIN;
+ ctl_mode |= AR5K_CTL_11B;
break;
case CHANNEL_T:
- ctl_mode = AR5K_CTL_TURBO | AR5K_CTL_NO_REGDOMAIN;
+ ctl_mode |= AR5K_CTL_TURBO;
break;
case CHANNEL_TG:
- ctl_mode = AR5K_CTL_TURBOG | AR5K_CTL_NO_REGDOMAIN;
+ ctl_mode |= AR5K_CTL_TURBOG;
break;
case CHANNEL_XR:
/* Fall through */
@@ -2475,8 +2888,19 @@ ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
for (i = 8; i <= 15; i++)
rates[i] -= ah->ah_txpower.txp_cck_ofdm_gainf_delta;
- ah->ah_txpower.txp_min_pwr = rates[7];
- ah->ah_txpower.txp_max_pwr = rates[0];
+ /* Now that we have all rates setup use table offset to
+ * match the power range set by user with the power indices
+ * on PCDAC/PDADC table */
+ for (i = 0; i < 16; i++) {
+ rates[i] += ah->ah_txpower.txp_offset;
+ /* Don't get out of bounds */
+ if (rates[i] > 63)
+ rates[i] = 63;
+ }
+
+ /* Min/max in 0.25dB units */
+ ah->ah_txpower.txp_min_pwr = 2 * rates[7];
+ ah->ah_txpower.txp_max_pwr = 2 * rates[0];
ah->ah_txpower.txp_ofdm = rates[7];
}
@@ -2584,16 +3008,37 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
return 0;
}
-int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 mode, u8 txpower)
+int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
{
/*Just a try M.F.*/
struct ieee80211_channel *channel = &ah->ah_current_channel;
+ u8 ee_mode;
ATH5K_TRACE(ah->ah_sc);
+
+ switch (channel->hw_value & CHANNEL_MODES) {
+ case CHANNEL_A:
+ case CHANNEL_T:
+ case CHANNEL_XR:
+ ee_mode = AR5K_EEPROM_MODE_11A;
+ break;
+ case CHANNEL_G:
+ case CHANNEL_TG:
+ ee_mode = AR5K_EEPROM_MODE_11G;
+ break;
+ case CHANNEL_B:
+ ee_mode = AR5K_EEPROM_MODE_11B;
+ break;
+ default:
+ ATH5K_ERR(ah->ah_sc,
+ "invalid channel: %d\n", channel->center_freq);
+ return -EINVAL;
+ }
+
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_TXPOWER,
"changing txpower to %d\n", txpower);
- return ath5k_hw_txpower(ah, channel, mode, txpower);
+ return ath5k_hw_txpower(ah, channel, ee_mode, txpower);
}
#undef _ATH5K_PHY
diff --git a/drivers/net/wireless/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index 5094c394a4b..73407b3f53e 100644
--- a/drivers/net/wireless/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -160,7 +160,8 @@ u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
if (ah->ah_version == AR5K_AR5210)
return false;
- pending = (AR5K_QUEUE_STATUS(queue) & AR5K_QCU_STS_FRMPENDCNT);
+ pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue));
+ pending &= AR5K_QCU_STS_FRMPENDCNT;
/* It's possible to have no frames pending even if TXE
* is set. To indicate that q has not stopped return
@@ -401,14 +402,16 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
(AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
AR5K_DCU_MISC_ARBLOCK_CTL_S) |
+ AR5K_DCU_MISC_ARBLOCK_IGNORE |
AR5K_DCU_MISC_POST_FR_BKOFF_DIS |
AR5K_DCU_MISC_BCN_ENABLE);
break;
case AR5K_TX_QUEUE_CAB:
AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
- AR5K_QCU_MISC_FRSHED_DBA_GT |
+ AR5K_QCU_MISC_FRSHED_BCN_SENT_GT |
AR5K_QCU_MISC_CBREXP_DIS |
+ AR5K_QCU_MISC_RDY_VEOL_POLICY |
AR5K_QCU_MISC_CBREXP_BCN_DIS);
ath5k_hw_reg_write(ah, ((AR5K_TUNE_BEACON_INTERVAL -
diff --git a/drivers/net/wireless/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index 7070d1543cd..6809b54a2ad 100644
--- a/drivers/net/wireless/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -1148,6 +1148,11 @@
#define AR5K_STA_ID1_CBCIV_ENDIAN 0x40000000 /* ??? */
#define AR5K_STA_ID1_KEYSRCH_MCAST 0x80000000 /* Do key cache search for mcast frames */
+#define AR5K_STA_ID1_ANTENNA_SETTINGS (AR5K_STA_ID1_DEFAULT_ANTENNA | \
+ AR5K_STA_ID1_DESC_ANTENNA | \
+ AR5K_STA_ID1_RTS_DEF_ANTENNA | \
+ AR5K_STA_ID1_SELFGEN_DEF_ANT)
+
/*
* First BSSID register (MAC address, lower 32bits)
*/
@@ -2028,7 +2033,9 @@
#define AR5K_PHY_AGCCTL 0x9860 /* Register address */
#define AR5K_PHY_AGCCTL_CAL 0x00000001 /* Enable PHY calibration */
#define AR5K_PHY_AGCCTL_NF 0x00000002 /* Enable Noise Floor calibration */
+#define AR5K_PHY_AGCCTL_OFDM_DIV_DIS 0x00000008 /* Disable antenna diversity on OFDM modes */
#define AR5K_PHY_AGCCTL_NF_EN 0x00008000 /* Enable nf calibration to happen (?) */
+#define AR5K_PHY_AGCTL_FLTR_CAL 0x00010000 /* Allow filter calibration (?) */
#define AR5K_PHY_AGCCTL_NF_NOUPDATE 0x00020000 /* Don't update nf automaticaly */
/*
@@ -2528,7 +2535,7 @@
* PHY CCK Cross-correlator Barker RSSI threshold register [5212+]
*/
#define AR5K_PHY_CCK_CROSSCORR 0xa208
-#define AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR 0x0000000f
+#define AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR 0x0000003f
#define AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR_S 0
/* Same address is used for antenna diversity activation */
diff --git a/drivers/net/wireless/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 7a17d31b2fd..bd0a97a38d3 100644
--- a/drivers/net/wireless/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -26,7 +26,7 @@
\*****************************/
#include <linux/pci.h> /* To determine if a card is pci-e */
-#include <linux/bitops.h> /* For get_bitmask_order */
+#include <linux/log2.h>
#include "ath5k.h"
#include "reg.h"
#include "base.h"
@@ -54,9 +54,8 @@ static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
u32 coef_scaled, coef_exp, coef_man,
ds_coef_exp, ds_coef_man, clock;
- if (!(ah->ah_version == AR5K_AR5212) ||
- !(channel->hw_value & CHANNEL_OFDM))
- BUG();
+ BUG_ON(!(ah->ah_version == AR5K_AR5212) ||
+ !(channel->hw_value & CHANNEL_OFDM));
/* Get coefficient
* ALGO: coef = (5 * clock * carrier_freq) / 2)
@@ -69,10 +68,10 @@ static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
/* Get exponent
* ALGO: coef_exp = 14 - highest set bit position */
- coef_exp = get_bitmask_order(coef_scaled);
+ coef_exp = ilog2(coef_scaled);
/* Doesn't make sense if it's zero*/
- if (!coef_exp)
+ if (!coef_scaled || !coef_exp)
return -EINVAL;
/* Note: we've shifted coef_scaled by 24 */
@@ -359,7 +358,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
mode |= AR5K_PHY_MODE_FREQ_5GHZ;
if (ah->ah_radio == AR5K_RF5413)
- clock |= AR5K_PHY_PLL_40MHZ_5413;
+ clock = AR5K_PHY_PLL_40MHZ_5413;
else
clock |= AR5K_PHY_PLL_40MHZ;
@@ -508,7 +507,7 @@ static void ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
if (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4))
scal = AR5K_PHY_SCAL_32MHZ_2417;
- else if (ath5k_eeprom_is_hb63(ah))
+ else if (ee->ee_is_hb63)
scal = AR5K_PHY_SCAL_32MHZ_HB63;
else
scal = AR5K_PHY_SCAL_32MHZ;
@@ -537,26 +536,6 @@ static void ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
return;
}
-static bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
- struct ieee80211_channel *channel)
-{
- u8 refclk_freq;
-
- if ((ah->ah_radio == AR5K_RF5112) ||
- (ah->ah_radio == AR5K_RF5413) ||
- (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4)))
- refclk_freq = 40;
- else
- refclk_freq = 32;
-
- if ((channel->center_freq % refclk_freq != 0) &&
- ((channel->center_freq % refclk_freq < 10) ||
- (channel->center_freq % refclk_freq > 22)))
- return true;
- else
- return false;
-}
-
/* TODO: Half/Quarter rate */
static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
@@ -599,9 +578,10 @@ static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
/* Set DAC/ADC delays */
if (ah->ah_version == AR5K_AR5212) {
u32 scal;
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
if (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4))
scal = AR5K_PHY_SCAL_32MHZ_2417;
- else if (ath5k_eeprom_is_hb63(ah))
+ else if (ee->ee_is_hb63)
scal = AR5K_PHY_SCAL_32MHZ_HB63;
else
scal = AR5K_PHY_SCAL_32MHZ;
@@ -698,13 +678,13 @@ static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
/* Set antenna idle switch table */
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_ANT_CTL,
AR5K_PHY_ANT_CTL_SWTABLE_IDLE,
- (ah->ah_antenna[ee_mode][0] |
+ (ah->ah_ant_ctl[ee_mode][0] |
AR5K_PHY_ANT_CTL_TXRX_EN));
- /* Set antenna switch table */
- ath5k_hw_reg_write(ah, ah->ah_antenna[ee_mode][ant[0]],
+ /* Set antenna switch tables */
+ ath5k_hw_reg_write(ah, ah->ah_ant_ctl[ee_mode][ant[0]],
AR5K_PHY_ANT_SWITCH_TABLE_0);
- ath5k_hw_reg_write(ah, ah->ah_antenna[ee_mode][ant[1]],
+ ath5k_hw_reg_write(ah, ah->ah_ant_ctl[ee_mode][ant[1]],
AR5K_PHY_ANT_SWITCH_TABLE_1);
/* Noise floor threshold */
@@ -998,10 +978,10 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
ath5k_hw_tweak_initval_settings(ah, channel);
/*
- * Set TX power (FIXME)
+ * Set TX power
*/
ret = ath5k_hw_txpower(ah, channel, ee_mode,
- AR5K_TUNE_DEFAULT_TXPOWER);
+ ah->ah_txpower.txp_max_pwr / 2);
if (ret)
return ret;
@@ -1024,9 +1004,22 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
/* Write OFDM timings on 5212*/
if (ah->ah_version == AR5K_AR5212 &&
channel->hw_value & CHANNEL_OFDM) {
+ struct ath5k_eeprom_info *ee =
+ &ah->ah_capabilities.cap_eeprom;
+
ret = ath5k_hw_write_ofdm_timings(ah, channel);
if (ret)
return ret;
+
+ /* Note: According to docs we can have a newer
+ * EEPROM on old hardware, so we need to verify
+ * that our hardware is new enough to have spur
+ * mitigation registers (delta phase etc) */
+ if (ah->ah_mac_srev >= AR5K_SREV_AR5424 ||
+ (ah->ah_mac_srev >= AR5K_SREV_AR5424 &&
+ ee->ee_version >= AR5K_EEPROM_VERSION_5_3))
+ ath5k_hw_set_spur_mitigation_filter(ah,
+ channel);
}
/*Enable/disable 802.11b mode on 5111
@@ -1042,17 +1035,15 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
/*
* In case a fixed antenna was set as default
- * write the same settings on both AR5K_PHY_ANT_SWITCH_TABLE
- * registers.
+ * use the same switch table twice.
*/
- if (s_ant != 0) {
- if (s_ant == AR5K_ANT_FIXED_A) /* 1 - Main */
- ant[0] = ant[1] = AR5K_ANT_FIXED_A;
- else /* 2 - Aux */
- ant[0] = ant[1] = AR5K_ANT_FIXED_B;
- } else {
- ant[0] = AR5K_ANT_FIXED_A;
- ant[1] = AR5K_ANT_FIXED_B;
+ if (ah->ah_ant_mode == AR5K_ANTMODE_FIXED_A)
+ ant[0] = ant[1] = AR5K_ANT_SWTABLE_A;
+ else if (ah->ah_ant_mode == AR5K_ANTMODE_FIXED_B)
+ ant[0] = ant[1] = AR5K_ANT_SWTABLE_B;
+ else {
+ ant[0] = AR5K_ANT_SWTABLE_A;
+ ant[1] = AR5K_ANT_SWTABLE_B;
}
/* Commit values from EEPROM */
@@ -1260,6 +1251,8 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
*/
ath5k_hw_noise_floor_calibration(ah, channel->center_freq);
+ /* Restore antenna mode */
+ ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
/*
* Configure QCUs/DCUs
@@ -1311,23 +1304,6 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
if (ah->ah_version != AR5K_AR5210)
ath5k_hw_set_imr(ah, ah->ah_imr);
- /*
- * Setup RFKill interrupt if rfkill flag is set on eeprom.
- * TODO: Use gpio pin and polarity infos from eeprom
- * TODO: Handle this in ath5k_intr because it'll result
- * a nasty interrupt storm.
- */
-#if 0
- if (AR5K_EEPROM_HDR_RFKILL(ah->ah_capabilities.cap_eeprom.ee_header)) {
- ath5k_hw_set_gpio_input(ah, 0);
- ah->ah_gpio[0] = ath5k_hw_get_gpio(ah, 0);
- if (ah->ah_gpio[0] == 0)
- ath5k_hw_set_gpio_intr(ah, 0, 1);
- else
- ath5k_hw_set_gpio_intr(ah, 0, 0);
- }
-#endif
-
/* Enable 32KHz clock function for AR5212+ chips
* Set clocks to 32KHz operation and use an
* external 32KHz crystal when sleeping if one
diff --git a/drivers/net/wireless/ath5k/rfbuffer.h b/drivers/net/wireless/ath/ath5k/rfbuffer.h
index e50baff6617..e50baff6617 100644
--- a/drivers/net/wireless/ath5k/rfbuffer.h
+++ b/drivers/net/wireless/ath/ath5k/rfbuffer.h
diff --git a/drivers/net/wireless/ath5k/rfgain.h b/drivers/net/wireless/ath/ath5k/rfgain.h
index 1354d8c392c..1354d8c392c 100644
--- a/drivers/net/wireless/ath5k/rfgain.h
+++ b/drivers/net/wireless/ath/ath5k/rfgain.h
diff --git a/drivers/net/wireless/ath/ath5k/rfkill.c b/drivers/net/wireless/ath/ath5k/rfkill.c
new file mode 100644
index 00000000000..41a877b73fc
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/rfkill.c
@@ -0,0 +1,121 @@
+/*
+ * RFKILL support for ath5k
+ *
+ * Copyright (c) 2009 Tobias Doerffel <tobias.doerffel@gmail.com>
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
+ * redistribution must be conditioned upon including a substantially
+ * similar Disclaimer requirement for further binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
+ * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include "base.h"
+
+
+static inline void ath5k_rfkill_disable(struct ath5k_softc *sc)
+{
+ ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "rfkill disable (gpio:%d polarity:%d)\n",
+ sc->rf_kill.gpio, sc->rf_kill.polarity);
+ ath5k_hw_set_gpio_output(sc->ah, sc->rf_kill.gpio);
+ ath5k_hw_set_gpio(sc->ah, sc->rf_kill.gpio, !sc->rf_kill.polarity);
+}
+
+
+static inline void ath5k_rfkill_enable(struct ath5k_softc *sc)
+{
+ ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "rfkill enable (gpio:%d polarity:%d)\n",
+ sc->rf_kill.gpio, sc->rf_kill.polarity);
+ ath5k_hw_set_gpio_output(sc->ah, sc->rf_kill.gpio);
+ ath5k_hw_set_gpio(sc->ah, sc->rf_kill.gpio, sc->rf_kill.polarity);
+}
+
+static inline void ath5k_rfkill_set_intr(struct ath5k_softc *sc, bool enable)
+{
+ struct ath5k_hw *ah = sc->ah;
+ u32 curval;
+
+ ath5k_hw_set_gpio_input(ah, sc->rf_kill.gpio);
+ curval = ath5k_hw_get_gpio(ah, sc->rf_kill.gpio);
+ ath5k_hw_set_gpio_intr(ah, sc->rf_kill.gpio, enable ?
+ !!curval : !curval);
+}
+
+static bool
+ath5k_is_rfkill_set(struct ath5k_softc *sc)
+{
+ /* configuring GPIO for input for some reason disables rfkill */
+ /*ath5k_hw_set_gpio_input(sc->ah, sc->rf_kill.gpio);*/
+ return ath5k_hw_get_gpio(sc->ah, sc->rf_kill.gpio) ==
+ sc->rf_kill.polarity;
+}
+
+static void
+ath5k_tasklet_rfkill_toggle(unsigned long data)
+{
+ struct ath5k_softc *sc = (void *)data;
+ bool blocked;
+
+ blocked = ath5k_is_rfkill_set(sc);
+ wiphy_rfkill_set_hw_state(sc->hw->wiphy, blocked);
+}
+
+
+void
+ath5k_rfkill_hw_start(struct ath5k_hw *ah)
+{
+ struct ath5k_softc *sc = ah->ah_sc;
+
+ /* read rfkill GPIO configuration from EEPROM header */
+ sc->rf_kill.gpio = ah->ah_capabilities.cap_eeprom.ee_rfkill_pin;
+ sc->rf_kill.polarity = ah->ah_capabilities.cap_eeprom.ee_rfkill_pol;
+
+ tasklet_init(&sc->rf_kill.toggleq, ath5k_tasklet_rfkill_toggle,
+ (unsigned long)sc);
+
+ ath5k_rfkill_disable(sc);
+
+ /* enable interrupt for rfkill switch */
+ if (AR5K_EEPROM_HDR_RFKILL(ah->ah_capabilities.cap_eeprom.ee_header))
+ ath5k_rfkill_set_intr(sc, true);
+}
+
+
+void
+ath5k_rfkill_hw_stop(struct ath5k_hw *ah)
+{
+ struct ath5k_softc *sc = ah->ah_sc;
+
+ /* disable interrupt for rfkill switch */
+ if (AR5K_EEPROM_HDR_RFKILL(ah->ah_capabilities.cap_eeprom.ee_header))
+ ath5k_rfkill_set_intr(sc, false);
+
+ tasklet_kill(&sc->rf_kill.toggleq);
+
+ /* enable RFKILL when stopping HW so Wifi LED is turned off */
+ ath5k_rfkill_enable(sc);
+}
+
diff --git a/drivers/net/wireless/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 90a8dd87378..0ed1ac312aa 100644
--- a/drivers/net/wireless/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -2,6 +2,7 @@ config ATH9K
tristate "Atheros 802.11n wireless cards support"
depends on PCI && MAC80211 && WLAN_80211
depends on RFKILL || RFKILL=n
+ select ATH_COMMON
select MAC80211_LEDS
select LEDS_CLASS
select NEW_LEDS
diff --git a/drivers/net/wireless/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 1a4d4eab6fe..783bc39eb2f 100644
--- a/drivers/net/wireless/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -4,7 +4,6 @@ ath9k-y += hw.o \
calib.o \
ani.o \
phy.o \
- regd.o \
beacon.o \
main.o \
recv.o \
diff --git a/drivers/net/wireless/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 0e65c51ba17..0e65c51ba17 100644
--- a/drivers/net/wireless/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
diff --git a/drivers/net/wireless/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index 6c5e887d50d..1aeafb511dd 100644
--- a/drivers/net/wireless/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -569,8 +569,7 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah,
DPRINTF(ah->ah_sc, ATH_DBG_ANI,
"phyCnt1 0x%x, resetting "
"counter value to 0x%x\n",
- phyCnt1,
- aniState->ofdmPhyErrBase);
+ phyCnt1, aniState->ofdmPhyErrBase);
REG_WRITE(ah, AR_PHY_ERR_1,
aniState->ofdmPhyErrBase);
REG_WRITE(ah, AR_PHY_ERR_MASK_1,
@@ -580,8 +579,7 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah,
DPRINTF(ah->ah_sc, ATH_DBG_ANI,
"phyCnt2 0x%x, resetting "
"counter value to 0x%x\n",
- phyCnt2,
- aniState->cckPhyErrBase);
+ phyCnt2, aniState->cckPhyErrBase);
REG_WRITE(ah, AR_PHY_ERR_2,
aniState->cckPhyErrBase);
REG_WRITE(ah, AR_PHY_ERR_MASK_2,
@@ -667,7 +665,7 @@ u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hw *ah,
u32 cc = REG_READ(ah, AR_CCCNT);
if (cycles == 0 || cycles > cc) {
- DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
+ DPRINTF(ah->ah_sc, ATH_DBG_ANI,
"cycle counter wrap. ExtBusy = 0\n");
good = 0;
} else {
diff --git a/drivers/net/wireless/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h
index 08b4e7ed5ff..08b4e7ed5ff 100644
--- a/drivers/net/wireless/ath9k/ani.h
+++ b/drivers/net/wireless/ath/ath9k/ani.h
diff --git a/drivers/net/wireless/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 2689a08a284..515880aa211 100644
--- a/drivers/net/wireless/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -53,11 +53,7 @@ struct ath_node;
#define A_MAX(a, b) ((a) > (b) ? (a) : (b))
-#define ASSERT(exp) do { \
- if (unlikely(!(exp))) { \
- BUG(); \
- } \
- } while (0)
+#define ASSERT(exp) BUG_ON(!(exp))
#define TSF_TO_TU(_h,_l) \
((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
@@ -70,7 +66,6 @@ struct ath_config {
u32 ath_aggr_prot;
u16 txpowlimit;
u8 cabqReadytime;
- u8 swBeaconProcess;
};
/*************************/
@@ -78,13 +73,17 @@ struct ath_config {
/*************************/
#define ATH_TXBUF_RESET(_bf) do { \
- (_bf)->bf_status = 0; \
+ (_bf)->bf_stale = false; \
(_bf)->bf_lastbf = NULL; \
(_bf)->bf_next = NULL; \
memset(&((_bf)->bf_state), 0, \
sizeof(struct ath_buf_state)); \
} while (0)
+#define ATH_RXBUF_RESET(_bf) do { \
+ (_bf)->bf_stale = false; \
+ } while (0)
+
/**
* enum buffer_type - Buffer type flags
*
@@ -110,7 +109,7 @@ struct ath_buf_state {
int bfs_seqno;
int bfs_tidno;
int bfs_retries;
- u32 bf_type;
+ u8 bf_type;
u32 bfs_keyix;
enum ath9k_key_type bfs_keytype;
};
@@ -134,26 +133,21 @@ struct ath_buf {
struct ath_buf *bf_lastbf; /* last buf of this unit (a frame or
an aggregate) */
struct ath_buf *bf_next; /* next subframe in the aggregate */
- void *bf_mpdu; /* enclosing frame structure */
+ struct sk_buff *bf_mpdu; /* enclosing frame structure */
struct ath_desc *bf_desc; /* virtual addr of desc */
dma_addr_t bf_daddr; /* physical addr of desc */
dma_addr_t bf_buf_addr; /* physical addr of data buffer */
- u32 bf_status;
+ bool bf_stale;
u16 bf_flags;
struct ath_buf_state bf_state;
dma_addr_t bf_dmacontext;
};
-#define ATH_RXBUF_RESET(_bf) ((_bf)->bf_status = 0)
-#define ATH_BUFSTATUS_STALE 0x00000002
-
struct ath_descdma {
- const char *dd_name;
struct ath_desc *dd_desc;
dma_addr_t dd_desc_paddr;
u32 dd_desc_len;
struct ath_buf *dd_bufptr;
- dma_addr_t dd_dmacontext;
};
int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
@@ -299,26 +293,6 @@ struct ath_tx_control {
#define ATH_TX_XRETRY 0x02
#define ATH_TX_BAR 0x04
-/* All RSSI values are noise floor adjusted */
-struct ath_tx_stat {
- int rssi;
- int rssictl[ATH_MAX_ANTENNA];
- int rssiextn[ATH_MAX_ANTENNA];
- int rateieee;
- int rateKbps;
- int ratecode;
- int flags;
- u32 airtime; /* time on air per final tx rate */
-};
-
-struct aggr_rifs_param {
- int param_max_frames;
- int param_max_len;
- int param_rl;
- int param_al;
- struct ath_rc_series *param_rcs;
-};
-
struct ath_node {
struct ath_softc *an_sc;
struct ath_atx_tid tid[WME_NUM_TID];
@@ -366,7 +340,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an);
void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an);
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq);
int ath_tx_init(struct ath_softc *sc, int nbufs);
-int ath_tx_cleanup(struct ath_softc *sc);
+void ath_tx_cleanup(struct ath_softc *sc);
struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb);
int ath_txq_update(struct ath_softc *sc, int qnum,
struct ath9k_tx_queue_info *q);
@@ -486,12 +460,9 @@ struct ath_led {
bool registered;
};
-/* Rfkill */
-#define ATH_RFKILL_POLL_INTERVAL 2000 /* msecs */
-
struct ath_rfkill {
struct rfkill *rfkill;
- struct delayed_work rfkill_poll;
+ struct rfkill_ops ops;
char rfkill_name[32];
};
@@ -529,19 +500,20 @@ struct ath_rfkill {
#define SC_OP_BEACONS BIT(1)
#define SC_OP_RXAGGR BIT(2)
#define SC_OP_TXAGGR BIT(3)
-#define SC_OP_CHAINMASK_UPDATE BIT(4)
-#define SC_OP_FULL_RESET BIT(5)
-#define SC_OP_PREAMBLE_SHORT BIT(6)
-#define SC_OP_PROTECT_ENABLE BIT(7)
-#define SC_OP_RXFLUSH BIT(8)
-#define SC_OP_LED_ASSOCIATED BIT(9)
-#define SC_OP_RFKILL_REGISTERED BIT(10)
-#define SC_OP_RFKILL_SW_BLOCKED BIT(11)
-#define SC_OP_RFKILL_HW_BLOCKED BIT(12)
-#define SC_OP_WAIT_FOR_BEACON BIT(13)
-#define SC_OP_LED_ON BIT(14)
-#define SC_OP_SCANNING BIT(15)
-#define SC_OP_TSF_RESET BIT(16)
+#define SC_OP_FULL_RESET BIT(4)
+#define SC_OP_PREAMBLE_SHORT BIT(5)
+#define SC_OP_PROTECT_ENABLE BIT(6)
+#define SC_OP_RXFLUSH BIT(7)
+#define SC_OP_LED_ASSOCIATED BIT(8)
+#define SC_OP_RFKILL_REGISTERED BIT(9)
+#define SC_OP_WAIT_FOR_BEACON BIT(12)
+#define SC_OP_LED_ON BIT(13)
+#define SC_OP_SCANNING BIT(14)
+#define SC_OP_TSF_RESET BIT(15)
+#define SC_OP_WAIT_FOR_CAB BIT(16)
+#define SC_OP_WAIT_FOR_PSPOLL_DATA BIT(17)
+#define SC_OP_WAIT_FOR_TX_ACK BIT(18)
+#define SC_OP_BEACON_SYNC BIT(19)
struct ath_bus_ops {
void (*read_cachesize)(struct ath_softc *sc, int *csz);
@@ -603,8 +575,8 @@ struct ath_softc {
struct ath_tx tx;
struct ath_beacon beacon;
struct ieee80211_rate rates[IEEE80211_NUM_BANDS][ATH_RATE_MAX];
- struct ath_rate_table *hw_rate_table[ATH9K_MODE_MAX];
- struct ath_rate_table *cur_rate_table;
+ const struct ath_rate_table *hw_rate_table[ATH9K_MODE_MAX];
+ const struct ath_rate_table *cur_rate_table;
struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
struct ath_led radio_led;
@@ -617,6 +589,8 @@ struct ath_softc {
int led_on_cnt;
int led_off_cnt;
+ int beacon_interval;
+
struct ath_rfkill rf_kill;
struct ath_ani ani;
struct ath9k_node_stats nodestats;
@@ -624,6 +598,7 @@ struct ath_softc {
struct ath9k_debug debug;
#endif
struct ath_bus_ops *bus_ops;
+ struct ath_beacon_config cur_beacon_conf;
};
struct ath_wiphy {
@@ -701,7 +676,9 @@ static inline void ath9k_ps_restore(struct ath_softc *sc)
{
if (atomic_dec_and_test(&sc->ps_usecount))
if ((sc->hw->conf.flags & IEEE80211_CONF_PS) &&
- !(sc->sc_flags & SC_OP_WAIT_FOR_BEACON))
+ !(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
+ SC_OP_WAIT_FOR_PSPOLL_DATA |
+ SC_OP_WAIT_FOR_TX_ACK)))
ath9k_hw_setpower(sc->sc_ah,
sc->sc_ah->restore_mode);
}
@@ -722,36 +699,7 @@ void ath9k_wiphy_pause_all_forced(struct ath_softc *sc,
bool ath9k_wiphy_scanning(struct ath_softc *sc);
void ath9k_wiphy_work(struct work_struct *work);
-/*
- * Read and write, they both share the same lock. We do this to serialize
- * reads and writes on Atheros 802.11n PCI devices only. This is required
- * as the FIFO on these devices can only accept sanely 2 requests. After
- * that the device goes bananas. Serializing the reads/writes prevents this
- * from happening.
- */
-
-static inline void ath9k_iowrite32(struct ath_hw *ah, u32 reg_offset, u32 val)
-{
- if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
- unsigned long flags;
- spin_lock_irqsave(&ah->ah_sc->sc_serial_rw, flags);
- iowrite32(val, ah->ah_sc->mem + reg_offset);
- spin_unlock_irqrestore(&ah->ah_sc->sc_serial_rw, flags);
- } else
- iowrite32(val, ah->ah_sc->mem + reg_offset);
-}
-
-static inline unsigned int ath9k_ioread32(struct ath_hw *ah, u32 reg_offset)
-{
- u32 val;
- if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
- unsigned long flags;
- spin_lock_irqsave(&ah->ah_sc->sc_serial_rw, flags);
- val = ioread32(ah->ah_sc->mem + reg_offset);
- spin_unlock_irqrestore(&ah->ah_sc->sc_serial_rw, flags);
- } else
- val = ioread32(ah->ah_sc->mem + reg_offset);
- return val;
-}
+void ath9k_iowrite32(struct ath_hw *ah, u32 reg_offset, u32 val);
+unsigned int ath9k_ioread32(struct ath_hw *ah, u32 reg_offset);
#endif /* ATH9K_H */
diff --git a/drivers/net/wireless/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index ec995730632..3639a2e6987 100644
--- a/drivers/net/wireless/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -43,7 +43,7 @@ static int ath_beaconq_config(struct ath_softc *sc)
if (!ath9k_hw_set_txq_props(ah, sc->beacon.beaconq, &qi)) {
DPRINTF(sc, ATH_DBG_FATAL,
- "unable to update h/w beacon queue parameters\n");
+ "Unable to update h/w beacon queue parameters\n");
return 0;
} else {
ath9k_hw_resettxqueue(ah, sc->beacon.beaconq);
@@ -59,11 +59,11 @@ static int ath_beaconq_config(struct ath_softc *sc)
static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
struct ath_buf *bf)
{
- struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
+ struct sk_buff *skb = bf->bf_mpdu;
struct ath_hw *ah = sc->sc_ah;
struct ath_desc *ds;
struct ath9k_11n_rate_series series[4];
- struct ath_rate_table *rt;
+ const struct ath_rate_table *rt;
int flags, antenna, ctsrate = 0, ctsduration = 0;
u8 rate;
@@ -132,16 +132,13 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
avp = (void *)vif->drv_priv;
cabq = sc->beacon.cabq;
- if (avp->av_bcbuf == NULL) {
- DPRINTF(sc, ATH_DBG_BEACON, "avp=%p av_bcbuf=%p\n",
- avp, avp->av_bcbuf);
+ if (avp->av_bcbuf == NULL)
return NULL;
- }
/* Release the old beacon first */
bf = avp->av_bcbuf;
- skb = (struct sk_buff *)bf->bf_mpdu;
+ skb = bf->bf_mpdu;
if (skb) {
dma_unmap_single(sc->dev, bf->bf_dmacontext,
skb->len, DMA_TO_DEVICE);
@@ -229,7 +226,7 @@ static void ath_beacon_start_adhoc(struct ath_softc *sc,
return;
bf = avp->av_bcbuf;
- skb = (struct sk_buff *) bf->bf_mpdu;
+ skb = bf->bf_mpdu;
ath_beacon_setup(sc, avp, bf);
@@ -302,7 +299,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
/* release the previous beacon frame, if it already exists. */
bf = avp->av_bcbuf;
if (bf->bf_mpdu != NULL) {
- skb = (struct sk_buff *)bf->bf_mpdu;
+ skb = bf->bf_mpdu;
dma_unmap_single(sc->dev, bf->bf_dmacontext,
skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
@@ -323,8 +320,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
u64 tsfadjust;
int intval;
- intval = sc->hw->conf.beacon_int ?
- sc->hw->conf.beacon_int : ATH_DEFAULT_BINTVAL;
+ intval = sc->beacon_interval ? : ATH_DEFAULT_BINTVAL;
/*
* Calculate the TSF offset for this beacon slot, i.e., the
@@ -374,7 +370,7 @@ void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp)
bf = avp->av_bcbuf;
if (bf->bf_mpdu != NULL) {
- struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
+ struct sk_buff *skb = bf->bf_mpdu;
dma_unmap_single(sc->dev, bf->bf_dmacontext,
skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
@@ -415,6 +411,7 @@ void ath_beacon_tasklet(unsigned long data)
} else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) {
DPRINTF(sc, ATH_DBG_BEACON,
"beacon is officially stuck\n");
+ sc->sc_flags |= SC_OP_TSF_RESET;
ath_reset(sc, false);
}
@@ -434,8 +431,7 @@ void ath_beacon_tasklet(unsigned long data)
* on the tsf to safeguard against missing an swba.
*/
- intval = sc->hw->conf.beacon_int ?
- sc->hw->conf.beacon_int : ATH_DEFAULT_BINTVAL;
+ intval = sc->beacon_interval ? : ATH_DEFAULT_BINTVAL;
tsf = ath9k_hw_gettsf64(ah);
tsftu = TSF_TO_TU(tsf>>32, tsf);
@@ -512,8 +508,7 @@ void ath_beacon_tasklet(unsigned long data)
* slot. Slots that are not occupied will generate nothing.
*/
static void ath_beacon_config_ap(struct ath_softc *sc,
- struct ath_beacon_config *conf,
- struct ath_vif *avp)
+ struct ath_beacon_config *conf)
{
u32 nexttbtt, intval;
@@ -558,14 +553,14 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
* we've associated with.
*/
static void ath_beacon_config_sta(struct ath_softc *sc,
- struct ath_beacon_config *conf,
- struct ath_vif *avp)
+ struct ath_beacon_config *conf)
{
struct ath9k_beacon_state bs;
int dtimperiod, dtimcount, sleepduration;
int cfpperiod, cfpcount;
u32 nexttbtt = 0, intval, tsftu;
u64 tsf;
+ int num_beacons, offset, dtim_dec_count, cfp_dec_count;
memset(&bs, 0, sizeof(bs));
intval = conf->beacon_interval & ATH9K_BEACON_PERIOD;
@@ -593,14 +588,27 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
*/
tsf = ath9k_hw_gettsf64(sc->sc_ah);
tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
- do {
+
+ num_beacons = tsftu / intval + 1;
+ offset = tsftu % intval;
+ nexttbtt = tsftu - offset;
+ if (offset)
nexttbtt += intval;
- if (--dtimcount < 0) {
- dtimcount = dtimperiod - 1;
- if (--cfpcount < 0)
- cfpcount = cfpperiod - 1;
- }
- } while (nexttbtt < tsftu);
+
+ /* DTIM Beacon every dtimperiod Beacon */
+ dtim_dec_count = num_beacons % dtimperiod;
+ /* CFP every cfpperiod DTIM Beacon */
+ cfp_dec_count = (num_beacons / dtimperiod) % cfpperiod;
+ if (dtim_dec_count)
+ cfp_dec_count++;
+
+ dtimcount -= dtim_dec_count;
+ if (dtimcount < 0)
+ dtimcount += dtimperiod;
+
+ cfpcount -= cfp_dec_count;
+ if (cfpcount < 0)
+ cfpcount += cfpperiod;
bs.bs_intval = intval;
bs.bs_nexttbtt = nexttbtt;
@@ -659,7 +667,6 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
static void ath_beacon_config_adhoc(struct ath_softc *sc,
struct ath_beacon_config *conf,
- struct ath_vif *avp,
struct ieee80211_vif *vif)
{
u64 tsf;
@@ -667,6 +674,14 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
intval = conf->beacon_interval & ATH9K_BEACON_PERIOD;
+ /*
+ * It looks like mac80211 may end up using beacon interval of zero in
+ * some cases (at least for mesh point). Avoid getting into an
+ * infinite loop by using a bit safer value instead..
+ */
+ if (intval == 0)
+ intval = 100;
+
/* Pull nexttbtt forward to reflect the current TSF */
nexttbtt = TSF_TO_TU(sc->beacon.bc_tstamp >> 32, sc->beacon.bc_tstamp);
@@ -703,44 +718,50 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
sc->beacon.bmisscnt = 0;
ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)
+ /* FIXME: Handle properly when vif is NULL */
+ if (vif && sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)
ath_beacon_start_adhoc(sc, vif);
}
void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
{
- struct ath_beacon_config conf;
+ struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
+ enum nl80211_iftype iftype;
/* Setup the beacon configuration parameters */
- memset(&conf, 0, sizeof(struct ath_beacon_config));
- conf.beacon_interval = sc->hw->conf.beacon_int ?
- sc->hw->conf.beacon_int : ATH_DEFAULT_BINTVAL;
- conf.listen_interval = 1;
- conf.dtim_period = conf.beacon_interval;
- conf.dtim_count = 1;
- conf.bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf.beacon_interval;
-
if (vif) {
- struct ath_vif *avp = (struct ath_vif *)vif->drv_priv;
-
- switch(avp->av_opmode) {
- case NL80211_IFTYPE_AP:
- ath_beacon_config_ap(sc, &conf, avp);
- break;
- case NL80211_IFTYPE_ADHOC:
- case NL80211_IFTYPE_MESH_POINT:
- ath_beacon_config_adhoc(sc, &conf, avp, vif);
- break;
- case NL80211_IFTYPE_STATION:
- ath_beacon_config_sta(sc, &conf, avp);
- break;
- default:
- DPRINTF(sc, ATH_DBG_CONFIG,
- "Unsupported beaconing mode\n");
- return;
- }
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+
+ iftype = vif->type;
+
+ cur_conf->beacon_interval = bss_conf->beacon_int;
+ cur_conf->dtim_period = bss_conf->dtim_period;
+ cur_conf->listen_interval = 1;
+ cur_conf->dtim_count = 1;
+ cur_conf->bmiss_timeout =
+ ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
+ } else {
+ iftype = sc->sc_ah->opmode;
+ }
+
- sc->sc_flags |= SC_OP_BEACONS;
+ switch (iftype) {
+ case NL80211_IFTYPE_AP:
+ ath_beacon_config_ap(sc, cur_conf);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_MESH_POINT:
+ ath_beacon_config_adhoc(sc, cur_conf, vif);
+ break;
+ case NL80211_IFTYPE_STATION:
+ ath_beacon_config_sta(sc, cur_conf);
+ break;
+ default:
+ DPRINTF(sc, ATH_DBG_CONFIG,
+ "Unsupported beaconing mode\n");
+ return;
}
+
+ sc->sc_flags |= SC_OP_BEACONS;
}
diff --git a/drivers/net/wireless/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index e2d62e97131..a32d7e7fecb 100644
--- a/drivers/net/wireless/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -186,7 +186,7 @@ static bool getNoiseFloorThresh(struct ath_hw *ah,
}
static void ath9k_hw_setup_calibration(struct ath_hw *ah,
- struct hal_cal_list *currCal)
+ struct ath9k_cal_list *currCal)
{
REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(0),
AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX,
@@ -220,7 +220,7 @@ static void ath9k_hw_setup_calibration(struct ath_hw *ah,
}
static void ath9k_hw_reset_calibration(struct ath_hw *ah,
- struct hal_cal_list *currCal)
+ struct ath9k_cal_list *currCal)
{
int i;
@@ -238,13 +238,12 @@ static void ath9k_hw_reset_calibration(struct ath_hw *ah,
ah->cal_samples = 0;
}
-static void ath9k_hw_per_calibration(struct ath_hw *ah,
+static bool ath9k_hw_per_calibration(struct ath_hw *ah,
struct ath9k_channel *ichan,
u8 rxchainmask,
- struct hal_cal_list *currCal,
- bool *isCalDone)
+ struct ath9k_cal_list *currCal)
{
- *isCalDone = false;
+ bool iscaldone = false;
if (currCal->calState == CAL_RUNNING) {
if (!(REG_READ(ah, AR_PHY_TIMING_CTRL4(0)) &
@@ -263,7 +262,7 @@ static void ath9k_hw_per_calibration(struct ath_hw *ah,
currCal->calData->calPostProc(ah, numChains);
ichan->CalValid |= currCal->calData->calType;
currCal->calState = CAL_DONE;
- *isCalDone = true;
+ iscaldone = true;
} else {
ath9k_hw_setup_calibration(ah, currCal);
}
@@ -271,11 +270,13 @@ static void ath9k_hw_per_calibration(struct ath_hw *ah,
} else if (!(ichan->CalValid & currCal->calData->calType)) {
ath9k_hw_reset_calibration(ah, currCal);
}
+
+ return iscaldone;
}
/* Assumes you are talking about the currently configured channel */
static bool ath9k_hw_iscal_supported(struct ath_hw *ah,
- enum hal_cal_types calType)
+ enum ath9k_cal_types calType)
{
struct ieee80211_conf *conf = &ah->ah_sc->hw->conf;
@@ -284,8 +285,8 @@ static bool ath9k_hw_iscal_supported(struct ath_hw *ah,
return true;
case ADC_GAIN_CAL:
case ADC_DC_CAL:
- if (conf->channel->band == IEEE80211_BAND_5GHZ &&
- conf_is_ht20(conf))
+ if (!(conf->channel->band == IEEE80211_BAND_2GHZ &&
+ conf_is_ht20(conf)))
return true;
break;
}
@@ -498,7 +499,7 @@ static void ath9k_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains)
{
u32 iOddMeasOffset, iEvenMeasOffset, val, i;
int32_t qOddMeasOffset, qEvenMeasOffset, qDcMismatch, iDcMismatch;
- const struct hal_percal_data *calData =
+ const struct ath9k_percal_data *calData =
ah->cal_list_curr->calData;
u32 numSamples =
(1 << (calData->calCountMax + 5)) * calData->calNumSamples;
@@ -555,7 +556,7 @@ static void ath9k_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains)
bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
{
struct ieee80211_conf *conf = &ah->ah_sc->hw->conf;
- struct hal_cal_list *currCal = ah->cal_list_curr;
+ struct ath9k_cal_list *currCal = ah->cal_list_curr;
if (!ah->curchan)
return true;
@@ -841,30 +842,28 @@ static inline void ath9k_hw_9285_pa_cal(struct ath_hw *ah)
}
bool ath9k_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
- u8 rxchainmask, bool longcal,
- bool *isCalDone)
+ u8 rxchainmask, bool longcal)
{
- struct hal_cal_list *currCal = ah->cal_list_curr;
-
- *isCalDone = true;
+ bool iscaldone = true;
+ struct ath9k_cal_list *currCal = ah->cal_list_curr;
if (currCal &&
(currCal->calState == CAL_RUNNING ||
currCal->calState == CAL_WAITING)) {
- ath9k_hw_per_calibration(ah, chan, rxchainmask, currCal,
- isCalDone);
- if (*isCalDone) {
+ iscaldone = ath9k_hw_per_calibration(ah, chan,
+ rxchainmask, currCal);
+ if (iscaldone) {
ah->cal_list_curr = currCal = currCal->calNext;
if (currCal->calState == CAL_WAITING) {
- *isCalDone = false;
+ iscaldone = false;
ath9k_hw_reset_calibration(ah, currCal);
}
}
}
if (longcal) {
- if (AR_SREV_9285(ah) && AR_SREV_9285_11_OR_LATER(ah))
+ if (AR_SREV_9285_11_OR_LATER(ah))
ath9k_hw_9285_pa_cal(ah);
if (OLC_FOR_AR9280_20_LATER)
@@ -872,18 +871,15 @@ bool ath9k_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_getnf(ah, chan);
ath9k_hw_loadnf(ah, ah->curchan);
ath9k_hw_start_nfcal(ah);
-
- if (chan->channelFlags & CHANNEL_CW_INT)
- chan->channelFlags &= ~CHANNEL_CW_INT;
}
- return true;
+ return iscaldone;
}
static bool ar9285_clc(struct ath_hw *ah, struct ath9k_channel *chan)
{
REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
- if (chan->channelFlags & CHANNEL_HT20) {
+ if (IS_CHAN_HT20(chan)) {
REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE);
REG_SET_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
@@ -919,83 +915,66 @@ static bool ar9285_clc(struct ath_hw *ah, struct ath9k_channel *chan)
return true;
}
-bool ath9k_hw_init_cal(struct ath_hw *ah,
- struct ath9k_channel *chan)
+bool ath9k_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
{
- if (AR_SREV_9285(ah) && AR_SREV_9285_12_OR_LATER(ah)) {
+ if (AR_SREV_9285_12_OR_LATER(ah)) {
if (!ar9285_clc(ah, chan))
return false;
- } else if (AR_SREV_9280_10_OR_LATER(ah)) {
- REG_CLR_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
- REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
+ } else {
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ REG_CLR_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
+ }
- /* Kick off the cal */
+ /* Calibrate the AGC */
REG_WRITE(ah, AR_PHY_AGC_CONTROL,
- REG_READ(ah, AR_PHY_AGC_CONTROL) |
- AR_PHY_AGC_CONTROL_CAL);
+ REG_READ(ah, AR_PHY_AGC_CONTROL) |
+ AR_PHY_AGC_CONTROL_CAL);
- if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
- AR_PHY_AGC_CONTROL_CAL, 0,
- AH_WAIT_TIMEOUT)) {
+ /* Poll for offset calibration complete */
+ if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
+ 0, AH_WAIT_TIMEOUT)) {
DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
"offset calibration failed to complete in 1ms; "
"noisy environment?\n");
return false;
}
- REG_CLR_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
- REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
- }
-
- /* Calibrate the AGC */
- REG_WRITE(ah, AR_PHY_AGC_CONTROL,
- REG_READ(ah, AR_PHY_AGC_CONTROL) |
- AR_PHY_AGC_CONTROL_CAL);
-
- if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
- 0, AH_WAIT_TIMEOUT)) {
- DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
- "offset calibration failed to complete in 1ms; "
- "noisy environment?\n");
- return false;
- }
-
- if (AR_SREV_9280_10_OR_LATER(ah)) {
- REG_SET_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ REG_SET_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
+ }
}
/* Do PA Calibration */
- if (AR_SREV_9285(ah) && AR_SREV_9285_11_OR_LATER(ah))
+ if (AR_SREV_9285_11_OR_LATER(ah))
ath9k_hw_9285_pa_cal(ah);
- /* Do NF Calibration */
+ /* Do NF Calibration after DC offset and other calibrations */
REG_WRITE(ah, AR_PHY_AGC_CONTROL,
- REG_READ(ah, AR_PHY_AGC_CONTROL) |
- AR_PHY_AGC_CONTROL_NF);
+ REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_NF);
ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
+ /* Enable IQ, ADC Gain and ADC DC offset CALs */
if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) {
if (ath9k_hw_iscal_supported(ah, ADC_GAIN_CAL)) {
INIT_CAL(&ah->adcgain_caldata);
INSERT_CAL(ah, &ah->adcgain_caldata);
DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
- "enabling ADC Gain Calibration.\n");
+ "enabling ADC Gain Calibration.\n");
}
if (ath9k_hw_iscal_supported(ah, ADC_DC_CAL)) {
INIT_CAL(&ah->adcdc_caldata);
INSERT_CAL(ah, &ah->adcdc_caldata);
DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
- "enabling ADC DC Calibration.\n");
+ "enabling ADC DC Calibration.\n");
}
if (ath9k_hw_iscal_supported(ah, IQ_MISMATCH_CAL)) {
INIT_CAL(&ah->iq_caldata);
INSERT_CAL(ah, &ah->iq_caldata);
DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
- "enabling IQ Calibration.\n");
+ "enabling IQ Calibration.\n");
}
ah->cal_list_curr = ah->cal_list;
@@ -1009,49 +988,49 @@ bool ath9k_hw_init_cal(struct ath_hw *ah,
return true;
}
-const struct hal_percal_data iq_cal_multi_sample = {
+const struct ath9k_percal_data iq_cal_multi_sample = {
IQ_MISMATCH_CAL,
MAX_CAL_SAMPLES,
PER_MIN_LOG_COUNT,
ath9k_hw_iqcal_collect,
ath9k_hw_iqcalibrate
};
-const struct hal_percal_data iq_cal_single_sample = {
+const struct ath9k_percal_data iq_cal_single_sample = {
IQ_MISMATCH_CAL,
MIN_CAL_SAMPLES,
PER_MAX_LOG_COUNT,
ath9k_hw_iqcal_collect,
ath9k_hw_iqcalibrate
};
-const struct hal_percal_data adc_gain_cal_multi_sample = {
+const struct ath9k_percal_data adc_gain_cal_multi_sample = {
ADC_GAIN_CAL,
MAX_CAL_SAMPLES,
PER_MIN_LOG_COUNT,
ath9k_hw_adc_gaincal_collect,
ath9k_hw_adc_gaincal_calibrate
};
-const struct hal_percal_data adc_gain_cal_single_sample = {
+const struct ath9k_percal_data adc_gain_cal_single_sample = {
ADC_GAIN_CAL,
MIN_CAL_SAMPLES,
PER_MAX_LOG_COUNT,
ath9k_hw_adc_gaincal_collect,
ath9k_hw_adc_gaincal_calibrate
};
-const struct hal_percal_data adc_dc_cal_multi_sample = {
+const struct ath9k_percal_data adc_dc_cal_multi_sample = {
ADC_DC_CAL,
MAX_CAL_SAMPLES,
PER_MIN_LOG_COUNT,
ath9k_hw_adc_dccal_collect,
ath9k_hw_adc_dccal_calibrate
};
-const struct hal_percal_data adc_dc_cal_single_sample = {
+const struct ath9k_percal_data adc_dc_cal_single_sample = {
ADC_DC_CAL,
MIN_CAL_SAMPLES,
PER_MAX_LOG_COUNT,
ath9k_hw_adc_dccal_collect,
ath9k_hw_adc_dccal_calibrate
};
-const struct hal_percal_data adc_init_dc_cal = {
+const struct ath9k_percal_data adc_init_dc_cal = {
ADC_DC_INIT_CAL,
MIN_CAL_SAMPLES,
INIT_LOG_COUNT,
diff --git a/drivers/net/wireless/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index 1c74bd50700..fe5367f1414 100644
--- a/drivers/net/wireless/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -17,13 +17,13 @@
#ifndef CALIB_H
#define CALIB_H
-extern const struct hal_percal_data iq_cal_multi_sample;
-extern const struct hal_percal_data iq_cal_single_sample;
-extern const struct hal_percal_data adc_gain_cal_multi_sample;
-extern const struct hal_percal_data adc_gain_cal_single_sample;
-extern const struct hal_percal_data adc_dc_cal_multi_sample;
-extern const struct hal_percal_data adc_dc_cal_single_sample;
-extern const struct hal_percal_data adc_init_dc_cal;
+extern const struct ath9k_percal_data iq_cal_multi_sample;
+extern const struct ath9k_percal_data iq_cal_single_sample;
+extern const struct ath9k_percal_data adc_gain_cal_multi_sample;
+extern const struct ath9k_percal_data adc_gain_cal_single_sample;
+extern const struct ath9k_percal_data adc_dc_cal_multi_sample;
+extern const struct ath9k_percal_data adc_dc_cal_single_sample;
+extern const struct ath9k_percal_data adc_init_dc_cal;
#define AR_PHY_CCA_MAX_GOOD_VALUE -85
#define AR_PHY_CCA_MAX_HIGH_VALUE -62
@@ -67,14 +67,14 @@ struct ar5416IniArray {
} \
} while (0)
-enum hal_cal_types {
+enum ath9k_cal_types {
ADC_DC_INIT_CAL = 0x1,
ADC_GAIN_CAL = 0x2,
ADC_DC_CAL = 0x4,
IQ_MISMATCH_CAL = 0x8
};
-enum hal_cal_state {
+enum ath9k_cal_state {
CAL_INACTIVE,
CAL_WAITING,
CAL_RUNNING,
@@ -87,18 +87,18 @@ enum hal_cal_state {
#define PER_MIN_LOG_COUNT 2
#define PER_MAX_LOG_COUNT 10
-struct hal_percal_data {
- enum hal_cal_types calType;
+struct ath9k_percal_data {
+ enum ath9k_cal_types calType;
u32 calNumSamples;
u32 calCountMax;
void (*calCollect) (struct ath_hw *);
void (*calPostProc) (struct ath_hw *, u8);
};
-struct hal_cal_list {
- const struct hal_percal_data *calData;
- enum hal_cal_state calState;
- struct hal_cal_list *calNext;
+struct ath9k_cal_list {
+ const struct ath9k_percal_data *calData;
+ enum ath9k_cal_state calState;
+ struct ath9k_cal_list *calNext;
};
struct ath9k_nfcal_hist {
@@ -116,8 +116,7 @@ int16_t ath9k_hw_getnf(struct ath_hw *ah,
void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah);
s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan);
bool ath9k_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
- u8 rxchainmask, bool longcal,
- bool *isCalDone);
+ u8 rxchainmask, bool longcal);
bool ath9k_hw_init_cal(struct ath_hw *ah,
struct ath9k_channel *chan);
diff --git a/drivers/net/wireless/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index fdf9528fa49..6d20725d645 100644
--- a/drivers/net/wireless/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -44,6 +44,44 @@ static int ath9k_debugfs_open(struct inode *inode, struct file *file)
return 0;
}
+static ssize_t read_file_debug(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = snprintf(buf, sizeof(buf), "0x%08x\n", sc->debug.debug_mask);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ unsigned long mask;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EINVAL;
+
+ buf[len] = '\0';
+ if (strict_strtoul(buf, 0, &mask))
+ return -EINVAL;
+
+ sc->debug.debug_mask = mask;
+ return count;
+}
+
+static const struct file_operations fops_debug = {
+ .read = read_file_debug,
+ .write = write_file_debug,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE
+};
+
static ssize_t read_file_dma(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -224,111 +262,66 @@ static const struct file_operations fops_interrupt = {
.owner = THIS_MODULE
};
-static void ath_debug_stat_11n_rc(struct ath_softc *sc, struct sk_buff *skb)
-{
- struct ath_tx_info_priv *tx_info_priv = NULL;
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- struct ieee80211_tx_rate *rates = tx_info->status.rates;
- int final_ts_idx, idx;
-
- tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
- final_ts_idx = tx_info_priv->tx.ts_rateindex;
- idx = sc->cur_rate_table->info[rates[final_ts_idx].idx].dot11rate;
-
- sc->debug.stats.n_rcstats[idx].success++;
-}
-
-static void ath_debug_stat_legacy_rc(struct ath_softc *sc, struct sk_buff *skb)
+void ath_debug_stat_rc(struct ath_softc *sc, struct sk_buff *skb)
{
struct ath_tx_info_priv *tx_info_priv = NULL;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_rate *rates = tx_info->status.rates;
int final_ts_idx, idx;
+ struct ath_rc_stats *stats;
tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
final_ts_idx = tx_info_priv->tx.ts_rateindex;
idx = rates[final_ts_idx].idx;
-
- sc->debug.stats.legacy_rcstats[idx].success++;
+ stats = &sc->debug.stats.rcstats[idx];
+ stats->success++;
}
-void ath_debug_stat_rc(struct ath_softc *sc, struct sk_buff *skb)
-{
- if (conf_is_ht(&sc->hw->conf))
- ath_debug_stat_11n_rc(sc, skb);
- else
- ath_debug_stat_legacy_rc(sc, skb);
-}
-
-/* FIXME: legacy rates, later on .. */
void ath_debug_stat_retries(struct ath_softc *sc, int rix,
int xretries, int retries, u8 per)
{
- if (conf_is_ht(&sc->hw->conf)) {
- int idx = sc->cur_rate_table->info[rix].dot11rate;
+ struct ath_rc_stats *stats = &sc->debug.stats.rcstats[rix];
- sc->debug.stats.n_rcstats[idx].xretries += xretries;
- sc->debug.stats.n_rcstats[idx].retries += retries;
- sc->debug.stats.n_rcstats[idx].per = per;
- }
+ stats->xretries += xretries;
+ stats->retries += retries;
+ stats->per = per;
}
-static ssize_t ath_read_file_stat_11n_rc(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
+static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
- char buf[1024];
- unsigned int len = 0;
+ char *buf;
+ unsigned int len = 0, max;
int i = 0;
+ ssize_t retval;
- len += sprintf(buf, "%7s %13s %8s %8s %6s\n\n", "Rate", "Success",
- "Retries", "XRetries", "PER");
-
- for (i = 0; i <= 15; i++) {
- len += snprintf(buf + len, sizeof(buf) - len,
- "%5s%3d: %8u %8u %8u %8u\n", "MCS", i,
- sc->debug.stats.n_rcstats[i].success,
- sc->debug.stats.n_rcstats[i].retries,
- sc->debug.stats.n_rcstats[i].xretries,
- sc->debug.stats.n_rcstats[i].per);
- }
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
+ if (sc->cur_rate_table == NULL)
+ return 0;
-static ssize_t ath_read_file_stat_legacy_rc(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- char buf[512];
- unsigned int len = 0;
- int i = 0;
+ max = 80 + sc->cur_rate_table->rate_cnt * 64;
+ buf = kmalloc(max + 1, GFP_KERNEL);
+ if (buf == NULL)
+ return 0;
+ buf[max] = 0;
- len += sprintf(buf, "%7s %13s\n\n", "Rate", "Success");
+ len += sprintf(buf, "%5s %15s %8s %9s %3s\n\n", "Rate", "Success",
+ "Retries", "XRetries", "PER");
for (i = 0; i < sc->cur_rate_table->rate_cnt; i++) {
- len += snprintf(buf + len, sizeof(buf) - len, "%5u: %12u\n",
- sc->cur_rate_table->info[i].ratekbps / 1000,
- sc->debug.stats.legacy_rcstats[i].success);
+ u32 ratekbps = sc->cur_rate_table->info[i].ratekbps;
+ struct ath_rc_stats *stats = &sc->debug.stats.rcstats[i];
+
+ len += snprintf(buf + len, max - len,
+ "%3u.%d: %8u %8u %8u %8u\n", ratekbps / 1000,
+ (ratekbps % 1000) / 100, stats->success,
+ stats->retries, stats->xretries,
+ stats->per);
}
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
-
- if (sc->cur_rate_table == NULL)
- return 0;
-
- if (conf_is_ht(&sc->hw->conf))
- return ath_read_file_stat_11n_rc(file, user_buf, count, ppos);
- else
- return ath_read_file_stat_legacy_rc(file, user_buf, count ,ppos);
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+ return retval;
}
static const struct file_operations fops_rcstat = {
@@ -498,11 +491,19 @@ int ath9k_init_debug(struct ath_softc *sc)
{
sc->debug.debug_mask = ath9k_debug;
+ if (!ath9k_debugfs_root)
+ return -ENOENT;
+
sc->debug.debugfs_phy = debugfs_create_dir(wiphy_name(sc->hw->wiphy),
ath9k_debugfs_root);
if (!sc->debug.debugfs_phy)
goto err;
+ sc->debug.debugfs_debug = debugfs_create_file("debug",
+ S_IRUGO | S_IWUSR, sc->debug.debugfs_phy, sc, &fops_debug);
+ if (!sc->debug.debugfs_debug)
+ goto err;
+
sc->debug.debugfs_dma = debugfs_create_file("dma", S_IRUGO,
sc->debug.debugfs_phy, sc, &fops_dma);
if (!sc->debug.debugfs_dma)
@@ -540,6 +541,7 @@ void ath9k_exit_debug(struct ath_softc *sc)
debugfs_remove(sc->debug.debugfs_rcstat);
debugfs_remove(sc->debug.debugfs_interrupt);
debugfs_remove(sc->debug.debugfs_dma);
+ debugfs_remove(sc->debug.debugfs_debug);
debugfs_remove(sc->debug.debugfs_phy);
}
diff --git a/drivers/net/wireless/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 7b0e5419d2b..edda15bf2c1 100644
--- a/drivers/net/wireless/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -19,20 +19,17 @@
enum ATH_DEBUG {
ATH_DBG_RESET = 0x00000001,
- ATH_DBG_REG_IO = 0x00000002,
- ATH_DBG_QUEUE = 0x00000004,
- ATH_DBG_EEPROM = 0x00000008,
- ATH_DBG_CALIBRATE = 0x00000010,
- ATH_DBG_CHANNEL = 0x00000020,
- ATH_DBG_INTERRUPT = 0x00000040,
- ATH_DBG_REGULATORY = 0x00000080,
- ATH_DBG_ANI = 0x00000100,
- ATH_DBG_POWER_MGMT = 0x00000200,
- ATH_DBG_XMIT = 0x00000400,
- ATH_DBG_BEACON = 0x00001000,
- ATH_DBG_CONFIG = 0x00002000,
- ATH_DBG_KEYCACHE = 0x00004000,
- ATH_DBG_FATAL = 0x00008000,
+ ATH_DBG_QUEUE = 0x00000002,
+ ATH_DBG_EEPROM = 0x00000004,
+ ATH_DBG_CALIBRATE = 0x00000008,
+ ATH_DBG_INTERRUPT = 0x00000010,
+ ATH_DBG_REGULATORY = 0x00000020,
+ ATH_DBG_ANI = 0x00000040,
+ ATH_DBG_XMIT = 0x00000080,
+ ATH_DBG_BEACON = 0x00000100,
+ ATH_DBG_CONFIG = 0x00000200,
+ ATH_DBG_FATAL = 0x00000400,
+ ATH_DBG_PS = 0x00000800,
ATH_DBG_ANY = 0xffffffff
};
@@ -83,11 +80,7 @@ struct ath_interrupt_stats {
u32 dtim;
};
-struct ath_legacy_rc_stats {
- u32 success;
-};
-
-struct ath_11n_rc_stats {
+struct ath_rc_stats {
u32 success;
u32 retries;
u32 xretries;
@@ -96,13 +89,13 @@ struct ath_11n_rc_stats {
struct ath_stats {
struct ath_interrupt_stats istats;
- struct ath_legacy_rc_stats legacy_rcstats[12]; /* max(11a,11b,11g) */
- struct ath_11n_rc_stats n_rcstats[16]; /* 0..15 MCS rates */
+ struct ath_rc_stats rcstats[RATE_TABLE_SIZE];
};
struct ath9k_debug {
int debug_mask;
struct dentry *debugfs_phy;
+ struct dentry *debugfs_debug;
struct dentry *debugfs_dma;
struct dentry *debugfs_interrupt;
struct dentry *debugfs_rcstat;
diff --git a/drivers/net/wireless/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index ffc36b0361c..a2fda702b62 100644
--- a/drivers/net/wireless/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -694,7 +694,7 @@ static void ath9k_hw_get_4k_gain_boundaries_pdadcs(struct ath_hw *ah,
#undef TMP_VAL_VPD_TABLE
}
-static bool ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
+static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
struct ath9k_channel *chan,
int16_t *pTxPowerIndexOffset)
{
@@ -783,11 +783,11 @@ static bool ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
((pdadcValues[4 * j + 3] & 0xFF) << 24);
REG_WRITE(ah, regOffset, reg32);
- DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
+ DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
"PDADC (%d,%4x): %4.4x %8.8x\n",
i, regChainOffset, regOffset,
reg32);
- DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
+ DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
"PDADC: Chain %d | "
"PDADC %3d Value %3d | "
"PDADC %3d Value %3d | "
@@ -805,11 +805,9 @@ static bool ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
}
*pTxPowerIndexOffset = 0;
-
- return true;
}
-static bool ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
+static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
struct ath9k_channel *chan,
int16_t *ratesArray,
u16 cfgCtl,
@@ -910,7 +908,7 @@ static bool ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
ah->eep_ops->get_eeprom_rev(ah) <= 2)
twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
- DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
+ DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
"LOOP-Mode ctlMode %d < %d, isHt40CtlMode %d, "
"EXT_ADDITIVE %d\n",
ctlMode, numCtlModes, isHt40CtlMode,
@@ -918,7 +916,7 @@ static bool ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
for (i = 0; (i < AR5416_NUM_CTLS) &&
pEepData->ctlIndex[i]; i++) {
- DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
+ DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
" LOOP-Ctlidx %d: cfgCtl 0x%2.2x "
"pCtlMode 0x%2.2x ctlIndex 0x%2.2x "
"chan %d\n",
@@ -941,7 +939,7 @@ static bool ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
IS_CHAN_2GHZ(chan),
AR5416_EEP4K_NUM_BAND_EDGES);
- DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
+ DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
" MATCH-EE_IDX %d: ch %d is2 %d "
"2xMinEdge %d chainmask %d chains %d\n",
i, freq, IS_CHAN_2GHZ(chan),
@@ -961,7 +959,7 @@ static bool ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower);
- DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
+ DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
" SEL-Min ctlMode %d pCtlMode %d "
"2xMaxEdge %d sP %d minCtlPwr %d\n",
ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower,
@@ -1041,10 +1039,9 @@ static bool ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
ratesArray[rateExtOfdm] = targetPowerOfdmExt.tPow2x[0];
ratesArray[rateExtCck] = targetPowerCckExt.tPow2x[0];
}
- return true;
}
-static int ath9k_hw_4k_set_txpower(struct ath_hw *ah,
+static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
struct ath9k_channel *chan,
u16 cfgCtl,
u8 twiceAntennaReduction,
@@ -1065,22 +1062,13 @@ static int ath9k_hw_4k_set_txpower(struct ath_hw *ah,
ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc;
}
- if (!ath9k_hw_set_4k_power_per_rate_table(ah, chan,
+ ath9k_hw_set_4k_power_per_rate_table(ah, chan,
&ratesArray[0], cfgCtl,
twiceAntennaReduction,
twiceMaxRegulatoryPower,
- powerLimit)) {
- DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
- "ath9k_hw_set_txpower: unable to set "
- "tx power per rate table\n");
- return -EIO;
- }
+ powerLimit);
- if (!ath9k_hw_set_4k_power_cal_table(ah, chan, &txPowerIndexOffset)) {
- DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
- "ath9k_hw_set_txpower: unable to set power table\n");
- return -EIO;
- }
+ ath9k_hw_set_4k_power_cal_table(ah, chan, &txPowerIndexOffset);
for (i = 0; i < ARRAY_SIZE(ratesArray); i++) {
ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]);
@@ -1168,7 +1156,6 @@ static int ath9k_hw_4k_set_txpower(struct ath_hw *ah,
else
ah->regulatory.max_power_level = ratesArray[i];
- return 0;
}
static void ath9k_hw_4k_set_addac(struct ath_hw *ah,
@@ -2103,7 +2090,7 @@ static void ath9k_hw_get_def_gain_boundaries_pdadcs(struct ath_hw *ah,
return;
}
-static bool ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
+static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
struct ath9k_channel *chan,
int16_t *pTxPowerIndexOffset)
{
@@ -2234,11 +2221,11 @@ static bool ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
((pdadcValues[4 * j + 3] & 0xFF) << 24);
REG_WRITE(ah, regOffset, reg32);
- DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
+ DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
"PDADC (%d,%4x): %4.4x %8.8x\n",
i, regChainOffset, regOffset,
reg32);
- DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
+ DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
"PDADC: Chain %d | PDADC %3d "
"Value %3d | PDADC %3d Value %3d | "
"PDADC %3d Value %3d | PDADC %3d "
@@ -2255,13 +2242,11 @@ static bool ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
}
*pTxPowerIndexOffset = 0;
-
- return true;
#undef SM_PD_GAIN
#undef SM_PDGAIN_B
}
-static bool ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
+static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
struct ath9k_channel *chan,
int16_t *ratesArray,
u16 cfgCtl,
@@ -2415,14 +2400,14 @@ static bool ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
ah->eep_ops->get_eeprom_rev(ah) <= 2)
twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
- DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
+ DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
"LOOP-Mode ctlMode %d < %d, isHt40CtlMode %d, "
"EXT_ADDITIVE %d\n",
ctlMode, numCtlModes, isHt40CtlMode,
(pCtlMode[ctlMode] & EXT_ADDITIVE));
for (i = 0; (i < AR5416_NUM_CTLS) && pEepData->ctlIndex[i]; i++) {
- DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
+ DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
" LOOP-Ctlidx %d: cfgCtl 0x%2.2x "
"pCtlMode 0x%2.2x ctlIndex 0x%2.2x "
"chan %d\n",
@@ -2441,7 +2426,7 @@ static bool ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
rep->ctlEdges[ar5416_get_ntxchains(tx_chainmask) - 1],
IS_CHAN_2GHZ(chan), AR5416_NUM_BAND_EDGES);
- DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
+ DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
" MATCH-EE_IDX %d: ch %d is2 %d "
"2xMinEdge %d chainmask %d chains %d\n",
i, freq, IS_CHAN_2GHZ(chan),
@@ -2460,7 +2445,7 @@ static bool ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
minCtlPower = min(twiceMaxEdgePower, scaledPower);
- DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
+ DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
" SEL-Min ctlMode %d pCtlMode %d "
"2xMaxEdge %d sP %d minCtlPwr %d\n",
ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower,
@@ -2549,10 +2534,9 @@ static bool ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
targetPowerCckExt.tPow2x[0];
}
}
- return true;
}
-static int ath9k_hw_def_set_txpower(struct ath_hw *ah,
+static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
struct ath9k_channel *chan,
u16 cfgCtl,
u8 twiceAntennaReduction,
@@ -2575,22 +2559,13 @@ static int ath9k_hw_def_set_txpower(struct ath_hw *ah,
ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc;
}
- if (!ath9k_hw_set_def_power_per_rate_table(ah, chan,
+ ath9k_hw_set_def_power_per_rate_table(ah, chan,
&ratesArray[0], cfgCtl,
twiceAntennaReduction,
twiceMaxRegulatoryPower,
- powerLimit)) {
- DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
- "ath9k_hw_set_txpower: unable to set "
- "tx power per rate table\n");
- return -EIO;
- }
+ powerLimit);
- if (!ath9k_hw_set_def_power_cal_table(ah, chan, &txPowerIndexOffset)) {
- DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
- "ath9k_hw_set_txpower: unable to set power table\n");
- return -EIO;
- }
+ ath9k_hw_set_def_power_cal_table(ah, chan, &txPowerIndexOffset);
for (i = 0; i < ARRAY_SIZE(ratesArray); i++) {
ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]);
@@ -2717,8 +2692,6 @@ static int ath9k_hw_def_set_txpower(struct ath_hw *ah,
"Invalid chainmask configuration\n");
break;
}
-
- return 0;
}
static u8 ath9k_hw_def_get_num_ant_config(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 25b68c881ff..67b8bd12941 100644
--- a/drivers/net/wireless/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -17,6 +17,8 @@
#ifndef EEPROM_H
#define EEPROM_H
+#include <net/cfg80211.h>
+
#define AH_USE_EEPROM 0x1
#ifdef __BIG_ENDIAN
@@ -492,7 +494,7 @@ struct eeprom_ops {
struct ath9k_channel *chan);
void (*set_board_values)(struct ath_hw *hw, struct ath9k_channel *chan);
void (*set_addac)(struct ath_hw *hw, struct ath9k_channel *chan);
- int (*set_txpower)(struct ath_hw *hw, struct ath9k_channel *chan,
+ void (*set_txpower)(struct ath_hw *hw, struct ath9k_channel *chan,
u16 cfgCtl, u8 twiceAntennaReduction,
u8 twiceMaxRegulatoryPower, u8 powerLimit);
u16 (*get_spur_channel)(struct ath_hw *ah, u16 i, bool is2GHz);
diff --git a/drivers/net/wireless/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index b15eaf8417f..1579c9407ed 100644
--- a/drivers/net/wireless/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -84,6 +84,38 @@ static u32 ath9k_hw_mac_to_clks(struct ath_hw *ah, u32 usecs)
return ath9k_hw_mac_clks(ah, usecs);
}
+/*
+ * Read and write, they both share the same lock. We do this to serialize
+ * reads and writes on Atheros 802.11n PCI devices only. This is required
+ * as the FIFO on these devices can only accept sanely 2 requests. After
+ * that the device goes bananas. Serializing the reads/writes prevents this
+ * from happening.
+ */
+
+void ath9k_iowrite32(struct ath_hw *ah, u32 reg_offset, u32 val)
+{
+ if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
+ unsigned long flags;
+ spin_lock_irqsave(&ah->ah_sc->sc_serial_rw, flags);
+ iowrite32(val, ah->ah_sc->mem + reg_offset);
+ spin_unlock_irqrestore(&ah->ah_sc->sc_serial_rw, flags);
+ } else
+ iowrite32(val, ah->ah_sc->mem + reg_offset);
+}
+
+unsigned int ath9k_ioread32(struct ath_hw *ah, u32 reg_offset)
+{
+ u32 val;
+ if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
+ unsigned long flags;
+ spin_lock_irqsave(&ah->ah_sc->sc_serial_rw, flags);
+ val = ioread32(ah->ah_sc->mem + reg_offset);
+ spin_unlock_irqrestore(&ah->ah_sc->sc_serial_rw, flags);
+ } else
+ val = ioread32(ah->ah_sc->mem + reg_offset);
+ return val;
+}
+
bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout)
{
int i;
@@ -97,7 +129,7 @@ bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout)
udelay(AH_TIME_QUANTUM);
}
- DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
+ DPRINTF(ah->ah_sc, ATH_DBG_ANY,
"timeout (%d us) on reg 0x%x: 0x%08x & 0x%08x != 0x%08x\n",
timeout, reg, REG_READ(ah, reg), mask, val);
@@ -136,7 +168,7 @@ bool ath9k_get_channel_edges(struct ath_hw *ah,
}
u16 ath9k_hw_computetxtime(struct ath_hw *ah,
- struct ath_rate_table *rates,
+ const struct ath_rate_table *rates,
u32 frameLen, u16 rateix,
bool shortPreamble)
{
@@ -181,7 +213,7 @@ u16 ath9k_hw_computetxtime(struct ath_hw *ah,
}
break;
default:
- DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
+ DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
"Unknown phy %u (rate ix %u)\n",
rates->info[rateix].phy, rateix);
txTime = 0;
@@ -306,7 +338,7 @@ static bool ath9k_hw_chip_test(struct ath_hw *ah)
REG_WRITE(ah, addr, wrData);
rdData = REG_READ(ah, addr);
if (rdData != wrData) {
- DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
+ DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
"address test failed "
"addr: 0x%08x - wr:0x%08x != rd:0x%08x\n",
addr, wrData, rdData);
@@ -318,7 +350,7 @@ static bool ath9k_hw_chip_test(struct ath_hw *ah)
REG_WRITE(ah, addr, wrData);
rdData = REG_READ(ah, addr);
if (wrData != rdData) {
- DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
+ DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
"address test failed "
"addr: 0x%08x - wr:0x%08x != rd:0x%08x\n",
addr, wrData, rdData);
@@ -363,10 +395,7 @@ static void ath9k_hw_set_defaults(struct ath_hw *ah)
ah->config.ack_6mb = 0x0;
ah->config.cwm_ignore_extcca = 0;
ah->config.pcie_powersave_enable = 0;
- ah->config.pcie_l1skp_enable = 0;
ah->config.pcie_clock_req = 0;
- ah->config.pcie_power_reset = 0x100;
- ah->config.pcie_restore = 0;
ah->config.pcie_waen = 0;
ah->config.analog_shiftreg = 1;
ah->config.ht_enable = 1;
@@ -375,13 +404,6 @@ static void ath9k_hw_set_defaults(struct ath_hw *ah)
ah->config.cck_trig_high = 200;
ah->config.cck_trig_low = 100;
ah->config.enable_ani = 1;
- ah->config.noise_immunity_level = 4;
- ah->config.ofdm_weaksignal_det = 1;
- ah->config.cck_weaksignal_thr = 0;
- ah->config.spur_immunity_level = 2;
- ah->config.firstep_level = 0;
- ah->config.rssi_thr_high = 40;
- ah->config.rssi_thr_low = 7;
ah->config.diversity_control = 0;
ah->config.antenna_switch_swap = 0;
@@ -390,7 +412,7 @@ static void ath9k_hw_set_defaults(struct ath_hw *ah)
ah->config.spurchans[i][1] = AR_NO_SPUR;
}
- ah->config.intr_mitigation = 1;
+ ah->config.intr_mitigation = true;
/*
* We need this for PCI devices only (Cardbus, PCI, miniPCI)
@@ -463,8 +485,8 @@ static int ath9k_hw_rfattach(struct ath_hw *ah)
rfStatus = ath9k_hw_init_rf(ah, &ecode);
if (!rfStatus) {
- DPRINTF(ah->ah_sc, ATH_DBG_RESET,
- "RF setup failed, status %u\n", ecode);
+ DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
+ "RF setup failed, status: %u\n", ecode);
return ecode;
}
@@ -488,10 +510,9 @@ static int ath9k_hw_rf_claim(struct ath_hw *ah)
case AR_RAD2122_SREV_MAJOR:
break;
default:
- DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
- "5G Radio Chip Rev 0x%02X is not "
- "supported by this driver\n",
- ah->hw_version.analog5GhzRev);
+ DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
+ "Radio Chip Rev 0x%02X not supported\n",
+ val & AR_RADIO_SREV_MAJOR);
return -EOPNOTSUPP;
}
@@ -513,12 +534,8 @@ static int ath9k_hw_init_macaddr(struct ath_hw *ah)
ah->macaddr[2 * i] = eeval >> 8;
ah->macaddr[2 * i + 1] = eeval & 0xff;
}
- if (sum == 0 || sum == 0xffff * 3) {
- DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
- "mac address read failed: %pM\n",
- ah->macaddr);
+ if (sum == 0 || sum == 0xffff * 3)
return -EADDRNOTAVAIL;
- }
return 0;
}
@@ -575,11 +592,8 @@ static int ath9k_hw_post_attach(struct ath_hw *ah)
{
int ecode;
- if (!ath9k_hw_chip_test(ah)) {
- DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
- "hardware self-test failed\n");
+ if (!ath9k_hw_chip_test(ah))
return -ENODEV;
- }
ecode = ath9k_hw_rf_claim(ah);
if (ecode != 0)
@@ -617,17 +631,14 @@ static struct ath_hw *ath9k_hw_do_attach(u16 devid, struct ath_softc *sc,
ath9k_hw_set_defaults(ah);
- if (ah->config.intr_mitigation != 0)
- ah->intr_mitigation = true;
-
if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
- DPRINTF(sc, ATH_DBG_RESET, "Couldn't reset chip\n");
+ DPRINTF(sc, ATH_DBG_FATAL, "Couldn't reset chip\n");
ecode = -EIO;
goto bad;
}
if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) {
- DPRINTF(sc, ATH_DBG_RESET, "Couldn't wakeup chip\n");
+ DPRINTF(sc, ATH_DBG_FATAL, "Couldn't wakeup chip\n");
ecode = -EIO;
goto bad;
}
@@ -650,7 +661,7 @@ static struct ath_hw *ath9k_hw_do_attach(u16 devid, struct ath_softc *sc,
(ah->hw_version.macVersion != AR_SREV_VERSION_5416_PCIE) &&
(ah->hw_version.macVersion != AR_SREV_VERSION_9160) &&
(!AR_SREV_9100(ah)) && (!AR_SREV_9280(ah)) && (!AR_SREV_9285(ah))) {
- DPRINTF(sc, ATH_DBG_RESET,
+ DPRINTF(sc, ATH_DBG_FATAL,
"Mac Chip Rev 0x%02x.%x is not supported by "
"this driver\n", ah->hw_version.macVersion,
ah->hw_version.macRev);
@@ -690,10 +701,6 @@ static struct ath_hw *ath9k_hw_do_attach(u16 devid, struct ath_softc *sc,
if (AR_SREV_9280_10_OR_LATER(ah))
ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL;
- DPRINTF(sc, ATH_DBG_RESET,
- "This Mac Chip Rev 0x%02x.%x is \n",
- ah->hw_version.macVersion, ah->hw_version.macRev);
-
if (AR_SREV_9285_12_OR_LATER(ah)) {
INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285_1_2,
@@ -859,11 +866,7 @@ static struct ath_hw *ath9k_hw_do_attach(u16 devid, struct ath_softc *sc,
if (AR_SREV_9280_20(ah))
ath9k_hw_init_txgain_ini(ah);
- if (!ath9k_hw_fill_cap_info(ah)) {
- DPRINTF(sc, ATH_DBG_RESET, "failed ath9k_hw_fill_cap_info\n");
- ecode = -EINVAL;
- goto bad;
- }
+ ath9k_hw_fill_cap_info(ah);
if ((ah->hw_version.devid == AR9280_DEVID_PCI) &&
test_bit(ATH9K_MODE_11A, ah->caps.wireless_modes)) {
@@ -885,8 +888,8 @@ static struct ath_hw *ath9k_hw_do_attach(u16 devid, struct ath_softc *sc,
ecode = ath9k_hw_init_macaddr(ah);
if (ecode != 0) {
- DPRINTF(sc, ATH_DBG_RESET,
- "failed initializing mac address\n");
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "Failed to initialize MAC address\n");
goto bad;
}
@@ -1054,7 +1057,7 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
AR_IMR_RXORN |
AR_IMR_BCNMISC;
- if (ah->intr_mitigation)
+ if (ah->config.intr_mitigation)
ah->mask_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
else
ah->mask_reg |= AR_IMR_RXOK;
@@ -1203,23 +1206,23 @@ static u32 ath9k_hw_def_ini_fixup(struct ath_hw *ah,
switch (ah->hw_version.devid) {
case AR9280_DEVID_PCI:
if (reg == 0x7894) {
- DPRINTF(ah->ah_sc, ATH_DBG_ANY,
+ DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
"ini VAL: %x EEPROM: %x\n", value,
(pBase->version & 0xff));
if ((pBase->version & 0xff) > 0x0a) {
- DPRINTF(ah->ah_sc, ATH_DBG_ANY,
+ DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
"PWDCLKIND: %d\n",
pBase->pwdclkind);
value &= ~AR_AN_TOP2_PWDCLKIND;
value |= AR_AN_TOP2_PWDCLKIND &
(pBase->pwdclkind << AR_AN_TOP2_PWDCLKIND_S);
} else {
- DPRINTF(ah->ah_sc, ATH_DBG_ANY,
+ DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
"PWDCLKIND Earlier Rev\n");
}
- DPRINTF(ah->ah_sc, ATH_DBG_ANY,
+ DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
"final ini VAL: %x\n", value);
}
break;
@@ -1249,6 +1252,21 @@ static void ath9k_olc_init(struct ath_hw *ah)
ah->PDADCdelta = 0;
}
+static u32 ath9k_regd_get_ctl(struct ath_regulatory *reg,
+ struct ath9k_channel *chan)
+{
+ u32 ctl = ath_regd_get_band_ctl(reg, chan->chan->band);
+
+ if (IS_CHAN_B(chan))
+ ctl |= CTL_11B;
+ else if (IS_CHAN_G(chan))
+ ctl |= CTL_11G;
+ else
+ ctl |= CTL_11A;
+
+ return ctl;
+}
+
static int ath9k_hw_process_ini(struct ath_hw *ah,
struct ath9k_channel *chan,
enum ath9k_ht_macmode macmode)
@@ -1256,7 +1274,6 @@ static int ath9k_hw_process_ini(struct ath_hw *ah,
int i, regWrites = 0;
struct ieee80211_channel *channel = chan->chan;
u32 modesIndex, freqIndex;
- int status;
switch (chan->chanmode) {
case CHANNEL_A:
@@ -1327,8 +1344,7 @@ static int ath9k_hw_process_ini(struct ath_hw *ah,
if (AR_SREV_9280(ah))
REG_WRITE_ARRAY(&ah->iniModesRxGain, modesIndex, regWrites);
- if (AR_SREV_9280(ah) || (AR_SREV_9285(ah) &&
- AR_SREV_9285_12_OR_LATER(ah)))
+ if (AR_SREV_9280(ah) || AR_SREV_9285_12_OR_LATER(ah))
REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
for (i = 0; i < ah->iniCommon.ia_rows; i++) {
@@ -1359,20 +1375,15 @@ static int ath9k_hw_process_ini(struct ath_hw *ah,
if (OLC_FOR_AR9280_20_LATER)
ath9k_olc_init(ah);
- status = ah->eep_ops->set_txpower(ah, chan,
- ath9k_regd_get_ctl(ah, chan),
- channel->max_antenna_gain * 2,
- channel->max_power * 2,
- min((u32) MAX_RATE_POWER,
- (u32) ah->regulatory.power_limit));
- if (status != 0) {
- DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
- "error init'ing transmit power\n");
- return -EIO;
- }
+ ah->eep_ops->set_txpower(ah, chan,
+ ath9k_regd_get_ctl(&ah->regulatory, chan),
+ channel->max_antenna_gain * 2,
+ channel->max_power * 2,
+ min((u32) MAX_RATE_POWER,
+ (u32) ah->regulatory.power_limit));
if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
- DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
+ DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
"ar5416SetRfRegs failed\n");
return -EIO;
}
@@ -1600,11 +1611,9 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
switch (type) {
case ATH9K_RESET_POWER_ON:
return ath9k_hw_set_reset_power_on(ah);
- break;
case ATH9K_RESET_WARM:
case ATH9K_RESET_COLD:
return ath9k_hw_set_reset(ah, type);
- break;
default:
return false;
}
@@ -1678,7 +1687,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN);
if (!ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN,
AR_PHY_RFBUS_GRANT_EN, AH_WAIT_TIMEOUT)) {
- DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
+ DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
"Could not kill baseband RX\n");
return false;
}
@@ -1686,29 +1695,21 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
ath9k_hw_set_regs(ah, chan, macmode);
if (AR_SREV_9280_10_OR_LATER(ah)) {
- if (!(ath9k_hw_ar9280_set_channel(ah, chan))) {
- DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
- "failed to set channel\n");
- return false;
- }
+ ath9k_hw_ar9280_set_channel(ah, chan);
} else {
if (!(ath9k_hw_set_channel(ah, chan))) {
- DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
- "failed to set channel\n");
+ DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
+ "Failed to set channel\n");
return false;
}
}
- if (ah->eep_ops->set_txpower(ah, chan,
- ath9k_regd_get_ctl(ah, chan),
+ ah->eep_ops->set_txpower(ah, chan,
+ ath9k_regd_get_ctl(&ah->regulatory, chan),
channel->max_antenna_gain * 2,
channel->max_power * 2,
min((u32) MAX_RATE_POWER,
- (u32) ah->regulatory.power_limit)) != 0) {
- DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
- "error init'ing transmit power\n");
- return false;
- }
+ (u32) ah->regulatory.power_limit));
synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
if (IS_CHAN_B(chan))
@@ -2199,14 +2200,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ah->txchainmask = sc->tx_chainmask;
ah->rxchainmask = sc->rx_chainmask;
- if (AR_SREV_9285(ah)) {
- ah->txchainmask &= 0x1;
- ah->rxchainmask &= 0x1;
- } else if (AR_SREV_9280(ah)) {
- ah->txchainmask &= 0x3;
- ah->rxchainmask &= 0x3;
- }
-
if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
return -EIO;
@@ -2242,7 +2235,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_mark_phy_inactive(ah);
if (!ath9k_hw_chip_reset(ah, chan)) {
- DPRINTF(ah->ah_sc, ATH_DBG_RESET, "chip reset failed\n");
+ DPRINTF(ah->ah_sc, ATH_DBG_FATAL, "Chip reset failed\n");
return -EINVAL;
}
@@ -2304,13 +2297,11 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
- if (AR_SREV_9280_10_OR_LATER(ah)) {
- if (!(ath9k_hw_ar9280_set_channel(ah, chan)))
- return -EIO;
- } else {
+ if (AR_SREV_9280_10_OR_LATER(ah))
+ ath9k_hw_ar9280_set_channel(ah, chan);
+ else
if (!(ath9k_hw_set_channel(ah, chan)))
return -EIO;
- }
for (i = 0; i < AR_NUM_DCU; i++)
REG_WRITE(ah, AR_DQCUMASK(i), 1 << i);
@@ -2335,8 +2326,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
REG_WRITE(ah, AR_OBS, 8);
- if (ah->intr_mitigation) {
-
+ if (ah->config.intr_mitigation) {
REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500);
REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000);
}
@@ -2385,8 +2375,8 @@ bool ath9k_hw_keyreset(struct ath_hw *ah, u16 entry)
u32 keyType;
if (entry >= ah->caps.keycache_size) {
- DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
- "entry %u out of range\n", entry);
+ DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
+ "keychache entry %u out of range\n", entry);
return false;
}
@@ -2422,8 +2412,8 @@ bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac)
u32 macHi, macLo;
if (entry >= ah->caps.keycache_size) {
- DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
- "entry %u out of range\n", entry);
+ DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
+ "keychache entry %u out of range\n", entry);
return false;
}
@@ -2454,8 +2444,8 @@ bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
u32 keyType;
if (entry >= pCap->keycache_size) {
- DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
- "entry %u out of range\n", entry);
+ DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
+ "keycache entry %u out of range\n", entry);
return false;
}
@@ -2465,7 +2455,7 @@ bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
break;
case ATH9K_CIPHER_AES_CCM:
if (!(pCap->hw_caps & ATH9K_HW_CAP_CIPHER_AESCCM)) {
- DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
+ DPRINTF(ah->ah_sc, ATH_DBG_ANY,
"AES-CCM not supported by mac rev 0x%x\n",
ah->hw_version.macRev);
return false;
@@ -2476,20 +2466,20 @@ bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
keyType = AR_KEYTABLE_TYPE_TKIP;
if (ATH9K_IS_MIC_ENABLED(ah)
&& entry + 64 >= pCap->keycache_size) {
- DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
+ DPRINTF(ah->ah_sc, ATH_DBG_ANY,
"entry %u inappropriate for TKIP\n", entry);
return false;
}
break;
case ATH9K_CIPHER_WEP:
- if (k->kv_len < LEN_WEP40) {
- DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
+ if (k->kv_len < WLAN_KEY_LEN_WEP40) {
+ DPRINTF(ah->ah_sc, ATH_DBG_ANY,
"WEP key length %u too small\n", k->kv_len);
return false;
}
- if (k->kv_len <= LEN_WEP40)
+ if (k->kv_len <= WLAN_KEY_LEN_WEP40)
keyType = AR_KEYTABLE_TYPE_40;
- else if (k->kv_len <= LEN_WEP104)
+ else if (k->kv_len <= WLAN_KEY_LEN_WEP104)
keyType = AR_KEYTABLE_TYPE_104;
else
keyType = AR_KEYTABLE_TYPE_128;
@@ -2498,7 +2488,7 @@ bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
keyType = AR_KEYTABLE_TYPE_CLR;
break;
default:
- DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
+ DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
"cipher %u not supported\n", k->kv_type);
return false;
}
@@ -2508,7 +2498,7 @@ bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
key2 = get_unaligned_le32(k->kv_val + 6);
key3 = get_unaligned_le16(k->kv_val + 10);
key4 = get_unaligned_le32(k->kv_val + 12);
- if (k->kv_len <= LEN_WEP104)
+ if (k->kv_len <= WLAN_KEY_LEN_WEP104)
key4 &= 0xff;
/*
@@ -2716,7 +2706,7 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
AR_RTC_FORCE_WAKE_EN);
}
if (i == 0) {
- DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
+ DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
"Failed to wakeup in %uus\n", POWER_UP_TIME / 20);
return false;
}
@@ -2737,9 +2727,8 @@ bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
"UNDEFINED"
};
- DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT, "%s -> %s (%s)\n",
- modes[ah->power_mode], modes[mode],
- setChip ? "set chip " : "");
+ DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s -> %s\n",
+ modes[ah->power_mode], modes[mode]);
switch (mode) {
case ATH9K_PM_AWAKE:
@@ -2753,7 +2742,7 @@ bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
ath9k_set_power_network_sleep(ah, setChip);
break;
default:
- DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
+ DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
"Unknown power mode %u\n", mode);
return false;
}
@@ -2943,7 +2932,7 @@ bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
*masked = isr & ATH9K_INT_COMMON;
- if (ah->intr_mitigation) {
+ if (ah->config.intr_mitigation) {
if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
*masked |= ATH9K_INT_RX;
}
@@ -3000,6 +2989,7 @@ bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
DPRINTF(ah->ah_sc, ATH_DBG_ANY,
"received PCI PERR interrupt\n");
}
+ *masked |= ATH9K_INT_FATAL;
}
if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT,
@@ -3061,7 +3051,7 @@ enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
}
if (ints & ATH9K_INT_RX) {
mask |= AR_IMR_RXERR;
- if (ah->intr_mitigation)
+ if (ah->config.intr_mitigation)
mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
else
mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
@@ -3259,7 +3249,7 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
/* HW Capabilities */
/*******************/
-bool ath9k_hw_fill_cap_info(struct ath_hw *ah)
+void ath9k_hw_fill_cap_info(struct ath_hw *ah)
{
struct ath9k_hw_capabilities *pCap = &ah->caps;
u16 capField = 0, eeval;
@@ -3343,8 +3333,6 @@ bool ath9k_hw_fill_cap_info(struct ath_hw *ah)
pCap->hw_caps |= ATH9K_HW_CAP_MIC_TKIP;
pCap->hw_caps |= ATH9K_HW_CAP_MIC_AESCCM;
- pCap->hw_caps |= ATH9K_HW_CAP_CHAN_SPREAD;
-
if (ah->config.ht_enable)
pCap->hw_caps |= ATH9K_HW_CAP_HT;
else
@@ -3368,7 +3356,6 @@ bool ath9k_hw_fill_cap_info(struct ath_hw *ah)
pCap->keycache_size = AR_KEYTABLE_SIZE;
pCap->hw_caps |= ATH9K_HW_CAP_FASTCC;
- pCap->num_mr_retries = 4;
pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD;
if (AR_SREV_9285_10_OR_LATER(ah))
@@ -3378,14 +3365,6 @@ bool ath9k_hw_fill_cap_info(struct ath_hw *ah)
else
pCap->num_gpio_pins = AR_NUM_GPIO;
- if (AR_SREV_9280_10_OR_LATER(ah)) {
- pCap->hw_caps |= ATH9K_HW_CAP_WOW;
- pCap->hw_caps |= ATH9K_HW_CAP_WOW_MATCHPATTERN_EXACT;
- } else {
- pCap->hw_caps &= ~ATH9K_HW_CAP_WOW;
- pCap->hw_caps &= ~ATH9K_HW_CAP_WOW_MATCHPATTERN_EXACT;
- }
-
if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah)) {
pCap->hw_caps |= ATH9K_HW_CAP_CST;
pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX;
@@ -3411,7 +3390,8 @@ bool ath9k_hw_fill_cap_info(struct ath_hw *ah)
(ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCIE) ||
(ah->hw_version.macVersion == AR_SREV_VERSION_9160) ||
(ah->hw_version.macVersion == AR_SREV_VERSION_9100) ||
- (ah->hw_version.macVersion == AR_SREV_VERSION_9280))
+ (ah->hw_version.macVersion == AR_SREV_VERSION_9280) ||
+ (ah->hw_version.macVersion == AR_SREV_VERSION_9285))
pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP;
else
pCap->hw_caps |= ATH9K_HW_CAP_AUTOSLEEP;
@@ -3445,8 +3425,6 @@ bool ath9k_hw_fill_cap_info(struct ath_hw *ah)
ah->btactive_gpio = 6;
ah->wlanactive_gpio = 5;
}
-
- return true;
}
bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type,
@@ -3754,22 +3732,19 @@ bool ath9k_hw_disable(struct ath_hw *ah)
return ath9k_hw_set_reset_reg(ah, ATH9K_RESET_COLD);
}
-bool ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit)
+void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit)
{
struct ath9k_channel *chan = ah->curchan;
struct ieee80211_channel *channel = chan->chan;
ah->regulatory.power_limit = min(limit, (u32) MAX_RATE_POWER);
- if (ah->eep_ops->set_txpower(ah, chan,
- ath9k_regd_get_ctl(ah, chan),
- channel->max_antenna_gain * 2,
- channel->max_power * 2,
- min((u32) MAX_RATE_POWER,
- (u32) ah->regulatory.power_limit)) != 0)
- return false;
-
- return true;
+ ah->eep_ops->set_txpower(ah, chan,
+ ath9k_regd_get_ctl(&ah->regulatory, chan),
+ channel->max_antenna_gain * 2,
+ channel->max_power * 2,
+ min((u32) MAX_RATE_POWER,
+ (u32) ah->regulatory.power_limit));
}
void ath9k_hw_setmac(struct ath_hw *ah, const u8 *mac)
diff --git a/drivers/net/wireless/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 0b594e0ee26..dd8508ef6e0 100644
--- a/drivers/net/wireless/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -25,10 +25,11 @@
#include "ani.h"
#include "eeprom.h"
#include "calib.h"
-#include "regd.h"
#include "reg.h"
#include "phy.h"
+#include "../regd.h"
+
#define ATHEROS_VENDOR_ID 0x168c
#define AR5416_DEVID_PCI 0x0023
#define AR5416_DEVID_PCIE 0x0024
@@ -124,29 +125,24 @@ enum wireless_mode {
};
enum ath9k_hw_caps {
- ATH9K_HW_CAP_CHAN_SPREAD = BIT(0),
- ATH9K_HW_CAP_MIC_AESCCM = BIT(1),
- ATH9K_HW_CAP_MIC_CKIP = BIT(2),
- ATH9K_HW_CAP_MIC_TKIP = BIT(3),
- ATH9K_HW_CAP_CIPHER_AESCCM = BIT(4),
- ATH9K_HW_CAP_CIPHER_CKIP = BIT(5),
- ATH9K_HW_CAP_CIPHER_TKIP = BIT(6),
- ATH9K_HW_CAP_VEOL = BIT(7),
- ATH9K_HW_CAP_BSSIDMASK = BIT(8),
- ATH9K_HW_CAP_MCAST_KEYSEARCH = BIT(9),
- ATH9K_HW_CAP_CHAN_HALFRATE = BIT(10),
- ATH9K_HW_CAP_CHAN_QUARTERRATE = BIT(11),
- ATH9K_HW_CAP_HT = BIT(12),
- ATH9K_HW_CAP_GTT = BIT(13),
- ATH9K_HW_CAP_FASTCC = BIT(14),
- ATH9K_HW_CAP_RFSILENT = BIT(15),
- ATH9K_HW_CAP_WOW = BIT(16),
- ATH9K_HW_CAP_CST = BIT(17),
- ATH9K_HW_CAP_ENHANCEDPM = BIT(18),
- ATH9K_HW_CAP_AUTOSLEEP = BIT(19),
- ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(20),
- ATH9K_HW_CAP_WOW_MATCHPATTERN_EXACT = BIT(21),
- ATH9K_HW_CAP_BT_COEX = BIT(22)
+ ATH9K_HW_CAP_MIC_AESCCM = BIT(0),
+ ATH9K_HW_CAP_MIC_CKIP = BIT(1),
+ ATH9K_HW_CAP_MIC_TKIP = BIT(2),
+ ATH9K_HW_CAP_CIPHER_AESCCM = BIT(3),
+ ATH9K_HW_CAP_CIPHER_CKIP = BIT(4),
+ ATH9K_HW_CAP_CIPHER_TKIP = BIT(5),
+ ATH9K_HW_CAP_VEOL = BIT(6),
+ ATH9K_HW_CAP_BSSIDMASK = BIT(7),
+ ATH9K_HW_CAP_MCAST_KEYSEARCH = BIT(8),
+ ATH9K_HW_CAP_HT = BIT(9),
+ ATH9K_HW_CAP_GTT = BIT(10),
+ ATH9K_HW_CAP_FASTCC = BIT(11),
+ ATH9K_HW_CAP_RFSILENT = BIT(12),
+ ATH9K_HW_CAP_CST = BIT(13),
+ ATH9K_HW_CAP_ENHANCEDPM = BIT(14),
+ ATH9K_HW_CAP_AUTOSLEEP = BIT(15),
+ ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(16),
+ ATH9K_HW_CAP_BT_COEX = BIT(17)
};
enum ath9k_capability_type {
@@ -166,7 +162,6 @@ struct ath9k_hw_capabilities {
u16 keycache_size;
u16 low_5ghz_chan, high_5ghz_chan;
u16 low_2ghz_chan, high_2ghz_chan;
- u16 num_mr_retries;
u16 rts_aggr_limit;
u8 tx_chainmask;
u8 rx_chainmask;
@@ -184,11 +179,8 @@ struct ath9k_ops_config {
int ack_6mb;
int cwm_ignore_extcca;
u8 pcie_powersave_enable;
- u8 pcie_l1skp_enable;
u8 pcie_clock_req;
u32 pcie_waen;
- int pcie_power_reset;
- u8 pcie_restore;
u8 analog_shiftreg;
u8 ht_enable;
u32 ofdm_trig_low;
@@ -196,17 +188,10 @@ struct ath9k_ops_config {
u32 cck_trig_high;
u32 cck_trig_low;
u32 enable_ani;
- u8 noise_immunity_level;
- u32 ofdm_weaksignal_det;
- u32 cck_weaksignal_thr;
- u8 spur_immunity_level;
- u8 firstep_level;
- int8_t rssi_thr_high;
- int8_t rssi_thr_low;
u16 diversity_control;
u16 antenna_switch_swap;
int serialize_regmode;
- int intr_mitigation;
+ bool intr_mitigation;
#define SPUR_DISABLE 0
#define SPUR_ENABLE_IOCTL 1
#define SPUR_ENABLE_EEPROM 2
@@ -281,13 +266,6 @@ enum ath9k_int {
#define CHANNEL_HT40PLUS 0x20000
#define CHANNEL_HT40MINUS 0x40000
-#define CHANNEL_INTERFERENCE 0x01
-#define CHANNEL_DFS 0x02
-#define CHANNEL_4MS_LIMIT 0x04
-#define CHANNEL_DFS_CLEAR 0x08
-#define CHANNEL_DISALLOW_ADHOC 0x10
-#define CHANNEL_PER_11D_ADHOC 0x20
-
#define CHANNEL_A (CHANNEL_5GHZ|CHANNEL_OFDM)
#define CHANNEL_B (CHANNEL_2GHZ|CHANNEL_CCK)
#define CHANNEL_G (CHANNEL_2GHZ|CHANNEL_OFDM)
@@ -318,10 +296,6 @@ struct ath9k_channel {
int16_t rawNoiseFloor;
};
-#define IS_CHAN_A(_c) ((((_c)->channelFlags & CHANNEL_A) == CHANNEL_A) || \
- (((_c)->channelFlags & CHANNEL_A_HT20) == CHANNEL_A_HT20) || \
- (((_c)->channelFlags & CHANNEL_A_HT40PLUS) == CHANNEL_A_HT40PLUS) || \
- (((_c)->channelFlags & CHANNEL_A_HT40MINUS) == CHANNEL_A_HT40MINUS))
#define IS_CHAN_G(_c) ((((_c)->channelFlags & (CHANNEL_G)) == CHANNEL_G) || \
(((_c)->channelFlags & CHANNEL_G_HT20) == CHANNEL_G_HT20) || \
(((_c)->channelFlags & CHANNEL_G_HT40PLUS) == CHANNEL_G_HT40PLUS) || \
@@ -329,7 +303,6 @@ struct ath9k_channel {
#define IS_CHAN_OFDM(_c) (((_c)->channelFlags & CHANNEL_OFDM) != 0)
#define IS_CHAN_5GHZ(_c) (((_c)->channelFlags & CHANNEL_5GHZ) != 0)
#define IS_CHAN_2GHZ(_c) (((_c)->channelFlags & CHANNEL_2GHZ) != 0)
-#define IS_CHAN_PASSIVE(_c) (((_c)->channelFlags & CHANNEL_PASSIVE) != 0)
#define IS_CHAN_HALF_RATE(_c) (((_c)->channelFlags & CHANNEL_HALF) != 0)
#define IS_CHAN_QUARTER_RATE(_c) (((_c)->channelFlags & CHANNEL_QUARTER) != 0)
#define IS_CHAN_A_5MHZ_SPACED(_c) \
@@ -420,7 +393,7 @@ struct ath_hw {
struct ath9k_hw_version hw_version;
struct ath9k_ops_config config;
struct ath9k_hw_capabilities caps;
- struct ath9k_regulatory regulatory;
+ struct ath_regulatory regulatory;
struct ath9k_channel channels[38];
struct ath9k_channel *curchan;
@@ -463,14 +436,14 @@ struct ath_hw {
enum ath9k_ant_setting diversity_control;
/* Calibration */
- enum hal_cal_types supp_cals;
- struct hal_cal_list iq_caldata;
- struct hal_cal_list adcgain_caldata;
- struct hal_cal_list adcdc_calinitdata;
- struct hal_cal_list adcdc_caldata;
- struct hal_cal_list *cal_list;
- struct hal_cal_list *cal_list_last;
- struct hal_cal_list *cal_list_curr;
+ enum ath9k_cal_types supp_cals;
+ struct ath9k_cal_list iq_caldata;
+ struct ath9k_cal_list adcgain_caldata;
+ struct ath9k_cal_list adcdc_calinitdata;
+ struct ath9k_cal_list adcdc_caldata;
+ struct ath9k_cal_list *cal_list;
+ struct ath9k_cal_list *cal_list_last;
+ struct ath9k_cal_list *cal_list_curr;
#define totalPowerMeasI meas0.unsign
#define totalPowerMeasQ meas1.unsign
#define totalIqCorrMeas meas2.sign
@@ -540,7 +513,6 @@ struct ath_hw {
enum ath9k_ani_cmd ani_function;
u32 intr_txqs;
- bool intr_mitigation;
enum ath9k_ht_extprotspacing extprotspacing;
u8 txchainmask;
u8 rxchainmask;
@@ -573,7 +545,7 @@ struct ath_hw *ath9k_hw_attach(u16 devid, struct ath_softc *sc, int *error);
void ath9k_hw_rfdetach(struct ath_hw *ah);
int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
bool bChannelChange);
-bool ath9k_hw_fill_cap_info(struct ath_hw *ah);
+void ath9k_hw_fill_cap_info(struct ath_hw *ah);
bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type,
u32 capability, u32 *result);
bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
@@ -608,7 +580,8 @@ bool ath9k_hw_setantennaswitch(struct ath_hw *ah,
bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout);
u32 ath9k_hw_reverse_bits(u32 val, u32 n);
bool ath9k_get_channel_edges(struct ath_hw *ah, u16 flags, u16 *low, u16 *high);
-u16 ath9k_hw_computetxtime(struct ath_hw *ah, struct ath_rate_table *rates,
+u16 ath9k_hw_computetxtime(struct ath_hw *ah,
+ const struct ath_rate_table *rates,
u32 frameLen, u16 rateix, bool shortPreamble);
void ath9k_hw_get_channel_centers(struct ath_hw *ah,
struct ath9k_channel *chan,
@@ -617,7 +590,7 @@ u32 ath9k_hw_getrxfilter(struct ath_hw *ah);
void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits);
bool ath9k_hw_phy_disable(struct ath_hw *ah);
bool ath9k_hw_disable(struct ath_hw *ah);
-bool ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit);
+void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit);
void ath9k_hw_setmac(struct ath_hw *ah, const u8 *mac);
void ath9k_hw_setopmode(struct ath_hw *ah);
void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1);
diff --git a/drivers/net/wireless/ath9k/initvals.h b/drivers/net/wireless/ath/ath9k/initvals.h
index e2f0a34b79a..e2f0a34b79a 100644
--- a/drivers/net/wireless/ath9k/initvals.h
+++ b/drivers/net/wireless/ath/ath9k/initvals.h
diff --git a/drivers/net/wireless/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index e0a6dee4583..8ae4ec21667 100644
--- a/drivers/net/wireless/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -49,7 +49,7 @@ bool ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp)
bool ath9k_hw_txstart(struct ath_hw *ah, u32 q)
{
- DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "queue %u\n", q);
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Enable TXE on queue: %u\n", q);
REG_WRITE(ah, AR_Q_TXE, 1 << q);
@@ -110,13 +110,15 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
u32 wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;
if (q >= pCap->total_queues) {
- DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "invalid queue num %u\n", q);
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Stopping TX DMA, "
+ "invalid queue: %u\n", q);
return false;
}
qi = &ah->txq[q];
if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
- DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "inactive queue\n");
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Stopping TX DMA, "
+ "inactive queue: %u\n", q);
return false;
}
@@ -146,7 +148,7 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
break;
DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
- "TSF have moved while trying to set "
+ "TSF has moved while trying to set "
"quiet time TSF: 0x%08x\n", tsfLow);
}
@@ -158,8 +160,8 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
wait = wait_time;
while (ath9k_hw_numtxpending(ah, q)) {
if ((--wait) == 0) {
- DPRINTF(ah->ah_sc, ATH_DBG_XMIT,
- "Failed to stop Tx DMA in 100 "
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
+ "Failed to stop TX DMA in 100 "
"msec after killing last frame\n");
break;
}
@@ -454,17 +456,19 @@ bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
struct ath9k_tx_queue_info *qi;
if (q >= pCap->total_queues) {
- DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "invalid queue num %u\n", q);
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Set TXQ properties, "
+ "invalid queue: %u\n", q);
return false;
}
qi = &ah->txq[q];
if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
- DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "inactive queue\n");
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Set TXQ properties, "
+ "inactive queue: %u\n", q);
return false;
}
- DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "queue %p\n", qi);
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q);
qi->tqi_ver = qinfo->tqi_ver;
qi->tqi_subtype = qinfo->tqi_subtype;
@@ -521,13 +525,15 @@ bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
struct ath9k_tx_queue_info *qi;
if (q >= pCap->total_queues) {
- DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "invalid queue num %u\n", q);
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Get TXQ properties, "
+ "invalid queue: %u\n", q);
return false;
}
qi = &ah->txq[q];
if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
- DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "inactive queue\n");
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Get TXQ properties, "
+ "inactive queue: %u\n", q);
return false;
}
@@ -575,22 +581,23 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
ATH9K_TX_QUEUE_INACTIVE)
break;
if (q == pCap->total_queues) {
- DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
- "no available tx queue\n");
+ DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
+ "No available TX queue\n");
return -1;
}
break;
default:
- DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "bad tx queue type %u\n", type);
+ DPRINTF(ah->ah_sc, ATH_DBG_FATAL, "Invalid TX queue type: %u\n",
+ type);
return -1;
}
- DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "queue %u\n", q);
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q);
qi = &ah->txq[q];
if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
- DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
- "tx queue %u already active\n", q);
+ DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
+ "TX queue: %u already active\n", q);
return -1;
}
memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
@@ -620,16 +627,18 @@ bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
struct ath9k_tx_queue_info *qi;
if (q >= pCap->total_queues) {
- DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "invalid queue num %u\n", q);
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Release TXQ, "
+ "invalid queue: %u\n", q);
return false;
}
qi = &ah->txq[q];
if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
- DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "inactive queue %u\n", q);
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Release TXQ, "
+ "inactive queue: %u\n", q);
return false;
}
- DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "release queue %u\n", q);
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Release TX queue: %u\n", q);
qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
ah->txok_interrupt_mask &= ~(1 << q);
@@ -650,17 +659,19 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
u32 cwMin, chanCwMin, value;
if (q >= pCap->total_queues) {
- DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "invalid queue num %u\n", q);
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Reset TXQ, "
+ "invalid queue: %u\n", q);
return false;
}
qi = &ah->txq[q];
if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
- DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "inactive queue %u\n", q);
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Reset TXQ, "
+ "inactive queue: %u\n", q);
return true;
}
- DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "reset queue %u\n", q);
+ DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q);
if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
if (chan && IS_CHAN_B(chan))
@@ -894,7 +905,7 @@ bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set)
reg = REG_READ(ah, AR_OBS_BUS_1);
DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
- "rx failed to go idle in 10 ms RXSM=0x%x\n", reg);
+ "RX failed to go idle in 10 ms RXSM=0x%x\n", reg);
return false;
}
@@ -949,8 +960,8 @@ bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
}
if (i == 0) {
- DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
- "dma failed to stop in %d ms "
+ DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
+ "DMA failed to stop in %d ms "
"AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
AH_RX_STOP_DMA_TIMEOUT / 1000,
REG_READ(ah, AR_CR),
diff --git a/drivers/net/wireless/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 1176bce8b76..1176bce8b76 100644
--- a/drivers/net/wireless/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
diff --git a/drivers/net/wireless/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 13d4e6756c9..f7baa406918 100644
--- a/drivers/net/wireless/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -35,14 +35,14 @@ MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
#define CHAN2G(_freq, _idx) { \
.center_freq = (_freq), \
.hw_value = (_idx), \
- .max_power = 30, \
+ .max_power = 20, \
}
#define CHAN5G(_freq, _idx) { \
.band = IEEE80211_BAND_5GHZ, \
.center_freq = (_freq), \
.hw_value = (_idx), \
- .max_power = 30, \
+ .max_power = 20, \
}
/* Some 2 GHz radios are actually tunable on 2312-2732
@@ -189,7 +189,7 @@ static u8 parse_mpdudensity(u8 mpdudensity)
static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band)
{
- struct ath_rate_table *rate_table = NULL;
+ const struct ath_rate_table *rate_table = NULL;
struct ieee80211_supported_band *sband;
struct ieee80211_rate *rate;
int i, maxrates;
@@ -280,14 +280,13 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
if (r) {
DPRINTF(sc, ATH_DBG_FATAL,
"Unable to reset channel (%u Mhz) "
- "reset status %u\n",
+ "reset status %d\n",
channel->center_freq, r);
spin_unlock_bh(&sc->sc_resetlock);
return r;
}
spin_unlock_bh(&sc->sc_resetlock);
- sc->sc_flags &= ~SC_OP_CHAINMASK_UPDATE;
sc->sc_flags &= ~SC_OP_FULL_RESET;
if (ath_startrecv(sc) != 0) {
@@ -330,6 +329,12 @@ static void ath_ani_calibrate(unsigned long data)
if (sc->sc_flags & SC_OP_SCANNING)
goto set_timer;
+ /* Only calibrate if awake */
+ if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE)
+ goto set_timer;
+
+ ath9k_ps_wakeup(sc);
+
/* Long calibration runs independently of short calibration. */
if ((timestamp - sc->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) {
longcal = true;
@@ -368,31 +373,21 @@ static void ath_ani_calibrate(unsigned long data)
/* Perform calibration if necessary */
if (longcal || shortcal) {
- bool iscaldone = false;
-
- if (ath9k_hw_calibrate(ah, ah->curchan,
- sc->rx_chainmask, longcal,
- &iscaldone)) {
- if (longcal)
- sc->ani.noise_floor =
- ath9k_hw_getchan_noise(ah,
- ah->curchan);
-
- DPRINTF(sc, ATH_DBG_ANI,
- "calibrate chan %u/%x nf: %d\n",
- ah->curchan->channel,
- ah->curchan->channelFlags,
- sc->ani.noise_floor);
- } else {
- DPRINTF(sc, ATH_DBG_ANY,
- "calibrate chan %u/%x failed\n",
- ah->curchan->channel,
- ah->curchan->channelFlags);
- }
- sc->ani.caldone = iscaldone;
+ sc->ani.caldone = ath9k_hw_calibrate(ah, ah->curchan,
+ sc->rx_chainmask, longcal);
+
+ if (longcal)
+ sc->ani.noise_floor = ath9k_hw_getchan_noise(ah,
+ ah->curchan);
+
+ DPRINTF(sc, ATH_DBG_ANI," calibrate chan %u/%x nf: %d\n",
+ ah->curchan->channel, ah->curchan->channelFlags,
+ sc->ani.noise_floor);
}
}
+ ath9k_ps_restore(sc);
+
set_timer:
/*
* Set timer interval based on previous results.
@@ -408,6 +403,18 @@ set_timer:
mod_timer(&sc->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
}
+static void ath_start_ani(struct ath_softc *sc)
+{
+ unsigned long timestamp = jiffies_to_msecs(jiffies);
+
+ sc->ani.longcal_timer = timestamp;
+ sc->ani.shortcal_timer = timestamp;
+ sc->ani.checkani_timer = timestamp;
+
+ mod_timer(&sc->ani.timer,
+ jiffies + msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
+}
+
/*
* Update tx/rx chainmask. For legacy association,
* hard code chainmask to 1x1, for 11n association, use
@@ -416,7 +423,6 @@ set_timer:
*/
void ath_update_chainmask(struct ath_softc *sc, int is_ht)
{
- sc->sc_flags |= SC_OP_CHAINMASK_UPDATE;
if (is_ht ||
(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BT_COEX)) {
sc->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
@@ -436,12 +442,12 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
an = (struct ath_node *)sta->drv_priv;
- if (sc->sc_flags & SC_OP_TXAGGR)
+ if (sc->sc_flags & SC_OP_TXAGGR) {
ath_tx_node_init(sc, an);
-
- an->maxampdu = 1 << (IEEE80211_HTCAP_MAXRXAMPDU_FACTOR +
- sta->ht_cap.ampdu_factor);
- an->mpdudensity = parse_mpdudensity(sta->ht_cap.ampdu_density);
+ an->maxampdu = 1 << (IEEE80211_HTCAP_MAXRXAMPDU_FACTOR +
+ sta->ht_cap.ampdu_factor);
+ an->mpdudensity = parse_mpdudensity(sta->ht_cap.ampdu_density);
+ }
}
static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
@@ -457,133 +463,130 @@ static void ath9k_tasklet(unsigned long data)
struct ath_softc *sc = (struct ath_softc *)data;
u32 status = sc->intrstatus;
+ ath9k_ps_wakeup(sc);
+
if (status & ATH9K_INT_FATAL) {
- /* need a chip reset */
ath_reset(sc, false);
+ ath9k_ps_restore(sc);
return;
- } else {
+ }
- if (status &
- (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) {
- spin_lock_bh(&sc->rx.rxflushlock);
- ath_rx_tasklet(sc, 0);
- spin_unlock_bh(&sc->rx.rxflushlock);
- }
- /* XXX: optimize this */
- if (status & ATH9K_INT_TX)
- ath_tx_tasklet(sc);
+ if (status & (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) {
+ spin_lock_bh(&sc->rx.rxflushlock);
+ ath_rx_tasklet(sc, 0);
+ spin_unlock_bh(&sc->rx.rxflushlock);
+ }
+
+ if (status & ATH9K_INT_TX)
+ ath_tx_tasklet(sc);
+
+ if ((status & ATH9K_INT_TSFOOR) &&
+ (sc->hw->conf.flags & IEEE80211_CONF_PS)) {
+ /*
+ * TSF sync does not look correct; remain awake to sync with
+ * the next Beacon.
+ */
+ DPRINTF(sc, ATH_DBG_PS, "TSFOOR - Sync with next Beacon\n");
+ sc->sc_flags |= SC_OP_WAIT_FOR_BEACON | SC_OP_BEACON_SYNC;
}
/* re-enable hardware interrupt */
ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
+ ath9k_ps_restore(sc);
}
irqreturn_t ath_isr(int irq, void *dev)
{
+#define SCHED_INTR ( \
+ ATH9K_INT_FATAL | \
+ ATH9K_INT_RXORN | \
+ ATH9K_INT_RXEOL | \
+ ATH9K_INT_RX | \
+ ATH9K_INT_TX | \
+ ATH9K_INT_BMISS | \
+ ATH9K_INT_CST | \
+ ATH9K_INT_TSFOOR)
+
struct ath_softc *sc = dev;
struct ath_hw *ah = sc->sc_ah;
enum ath9k_int status;
bool sched = false;
- do {
- if (sc->sc_flags & SC_OP_INVALID) {
- /*
- * The hardware is not ready/present, don't
- * touch anything. Note this can happen early
- * on if the IRQ is shared.
- */
- return IRQ_NONE;
- }
- if (!ath9k_hw_intrpend(ah)) { /* shared irq, not for us */
- return IRQ_NONE;
- }
+ /*
+ * The hardware is not ready/present, don't
+ * touch anything. Note this can happen early
+ * on if the IRQ is shared.
+ */
+ if (sc->sc_flags & SC_OP_INVALID)
+ return IRQ_NONE;
- /*
- * Figure out the reason(s) for the interrupt. Note
- * that the hal returns a pseudo-ISR that may include
- * bits we haven't explicitly enabled so we mask the
- * value to insure we only process bits we requested.
- */
- ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
- status &= sc->imask; /* discard unasked-for bits */
+ /* shared irq, not for us */
- /*
- * If there are no status bits set, then this interrupt was not
- * for me (should have been caught above).
- */
- if (!status)
- return IRQ_NONE;
+ if (!ath9k_hw_intrpend(ah))
+ return IRQ_NONE;
- sc->intrstatus = status;
- ath9k_ps_wakeup(sc);
+ /*
+ * Figure out the reason(s) for the interrupt. Note
+ * that the hal returns a pseudo-ISR that may include
+ * bits we haven't explicitly enabled so we mask the
+ * value to insure we only process bits we requested.
+ */
+ ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
+ status &= sc->imask; /* discard unasked-for bits */
- if (status & ATH9K_INT_FATAL) {
- /* need a chip reset */
- sched = true;
- } else if (status & ATH9K_INT_RXORN) {
- /* need a chip reset */
- sched = true;
- } else {
- if (status & ATH9K_INT_SWBA) {
- /* schedule a tasklet for beacon handling */
- tasklet_schedule(&sc->bcon_tasklet);
- }
- if (status & ATH9K_INT_RXEOL) {
- /*
- * NB: the hardware should re-read the link when
- * RXE bit is written, but it doesn't work
- * at least on older hardware revs.
- */
- sched = true;
- }
+ /*
+ * If there are no status bits set, then this interrupt was not
+ * for me (should have been caught above).
+ */
+ if (!status)
+ return IRQ_NONE;
- if (status & ATH9K_INT_TXURN)
- /* bump tx trigger level */
- ath9k_hw_updatetxtriglevel(ah, true);
- /* XXX: optimize this */
- if (status & ATH9K_INT_RX)
- sched = true;
- if (status & ATH9K_INT_TX)
- sched = true;
- if (status & ATH9K_INT_BMISS)
- sched = true;
- /* carrier sense timeout */
- if (status & ATH9K_INT_CST)
- sched = true;
- if (status & ATH9K_INT_MIB) {
- /*
- * Disable interrupts until we service the MIB
- * interrupt; otherwise it will continue to
- * fire.
- */
- ath9k_hw_set_interrupts(ah, 0);
- /*
- * Let the hal handle the event. We assume
- * it will clear whatever condition caused
- * the interrupt.
- */
- ath9k_hw_procmibevent(ah, &sc->nodestats);
- ath9k_hw_set_interrupts(ah, sc->imask);
- }
- if (status & ATH9K_INT_TIM_TIMER) {
- if (!(ah->caps.hw_caps &
- ATH9K_HW_CAP_AUTOSLEEP)) {
- /* Clear RxAbort bit so that we can
- * receive frames */
- ath9k_hw_setpower(ah, ATH9K_PM_AWAKE);
- ath9k_hw_setrxabort(ah, 0);
- sched = true;
- sc->sc_flags |= SC_OP_WAIT_FOR_BEACON;
- }
- }
- if (status & ATH9K_INT_TSFOOR) {
- /* FIXME: Handle this interrupt for power save */
- sched = true;
- }
+ /* Cache the status */
+ sc->intrstatus = status;
+
+ if (status & SCHED_INTR)
+ sched = true;
+
+ /*
+ * If a FATAL or RXORN interrupt is received, we have to reset the
+ * chip immediately.
+ */
+ if (status & (ATH9K_INT_FATAL | ATH9K_INT_RXORN))
+ goto chip_reset;
+
+ if (status & ATH9K_INT_SWBA)
+ tasklet_schedule(&sc->bcon_tasklet);
+
+ if (status & ATH9K_INT_TXURN)
+ ath9k_hw_updatetxtriglevel(ah, true);
+
+ if (status & ATH9K_INT_MIB) {
+ /*
+ * Disable interrupts until we service the MIB
+ * interrupt; otherwise it will continue to
+ * fire.
+ */
+ ath9k_hw_set_interrupts(ah, 0);
+ /*
+ * Let the hal handle the event. We assume
+ * it will clear whatever condition caused
+ * the interrupt.
+ */
+ ath9k_hw_procmibevent(ah, &sc->nodestats);
+ ath9k_hw_set_interrupts(ah, sc->imask);
+ }
+
+ if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
+ if (status & ATH9K_INT_TIM_TIMER) {
+ /* Clear RxAbort bit so that we can
+ * receive frames */
+ ath9k_hw_setpower(ah, ATH9K_PM_AWAKE);
+ ath9k_hw_setrxabort(sc->sc_ah, 0);
+ sc->sc_flags |= SC_OP_WAIT_FOR_BEACON;
}
- ath9k_ps_restore(sc);
- } while (0);
+
+chip_reset:
ath_debug_stat_interrupt(sc, status);
@@ -594,6 +597,8 @@ irqreturn_t ath_isr(int irq, void *dev)
}
return IRQ_HANDLED;
+
+#undef SCHED_INTR
}
static u32 ath_get_extchanmode(struct ath_softc *sc,
@@ -676,7 +681,7 @@ static int ath_setkey_tkip(struct ath_softc *sc, u16 keyix, const u8 *key,
memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
if (!ath9k_hw_set_keycache_entry(sc->sc_ah, keyix, hk, NULL)) {
/* TX MIC entry failed. No need to proceed further */
- DPRINTF(sc, ATH_DBG_KEYCACHE,
+ DPRINTF(sc, ATH_DBG_FATAL,
"Setting TX MIC Key Failed\n");
return 0;
}
@@ -909,6 +914,13 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
if (avp->av_opmode == NL80211_IFTYPE_STATION) {
sc->curaid = bss_conf->aid;
ath9k_hw_write_associd(sc);
+
+ /*
+ * Request a re-configuration of Beacon related timers
+ * on the receipt of the first Beacon frame (i.e.,
+ * after time sync with the AP).
+ */
+ sc->sc_flags |= SC_OP_BEACON_SYNC;
}
/* Configure the beacon */
@@ -920,11 +932,9 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
sc->nodestats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
sc->nodestats.ns_avgtxrate = ATH_RATE_DUMMY_MARKER;
- /* Start ANI */
- mod_timer(&sc->ani.timer,
- jiffies + msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
+ ath_start_ani(sc);
} else {
- DPRINTF(sc, ATH_DBG_CONFIG, "Bss Info DISSOC\n");
+ DPRINTF(sc, ATH_DBG_CONFIG, "Bss Info DISASSOC\n");
sc->curaid = 0;
}
}
@@ -1098,14 +1108,14 @@ void ath_radio_enable(struct ath_softc *sc)
int r;
ath9k_ps_wakeup(sc);
- spin_lock_bh(&sc->sc_resetlock);
+ ath9k_hw_configpcipowersave(ah, 0);
+ spin_lock_bh(&sc->sc_resetlock);
r = ath9k_hw_reset(ah, ah->curchan, false);
-
if (r) {
DPRINTF(sc, ATH_DBG_FATAL,
"Unable to reset channel %u (%uMhz) ",
- "reset status %u\n",
+ "reset status %d\n",
channel->center_freq, r);
}
spin_unlock_bh(&sc->sc_resetlock);
@@ -1157,12 +1167,13 @@ void ath_radio_disable(struct ath_softc *sc)
if (r) {
DPRINTF(sc, ATH_DBG_FATAL,
"Unable to reset channel %u (%uMhz) "
- "reset status %u\n",
+ "reset status %d\n",
channel->center_freq, r);
}
spin_unlock_bh(&sc->sc_resetlock);
ath9k_hw_phy_disable(ah);
+ ath9k_hw_configpcipowersave(ah, 1);
ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
ath9k_ps_restore(sc);
}
@@ -1181,121 +1192,69 @@ static bool ath_is_rfkill_set(struct ath_softc *sc)
ah->rfkill_polarity;
}
-/* h/w rfkill poll function */
-static void ath_rfkill_poll(struct work_struct *work)
+/* s/w rfkill handlers */
+static int ath_rfkill_set_block(void *data, bool blocked)
{
- struct ath_softc *sc = container_of(work, struct ath_softc,
- rf_kill.rfkill_poll.work);
- bool radio_on;
-
- if (sc->sc_flags & SC_OP_INVALID)
- return;
-
- radio_on = !ath_is_rfkill_set(sc);
-
- /*
- * enable/disable radio only when there is a
- * state change in RF switch
- */
- if (radio_on == !!(sc->sc_flags & SC_OP_RFKILL_HW_BLOCKED)) {
- enum rfkill_state state;
-
- if (sc->sc_flags & SC_OP_RFKILL_SW_BLOCKED) {
- state = radio_on ? RFKILL_STATE_SOFT_BLOCKED
- : RFKILL_STATE_HARD_BLOCKED;
- } else if (radio_on) {
- ath_radio_enable(sc);
- state = RFKILL_STATE_UNBLOCKED;
- } else {
- ath_radio_disable(sc);
- state = RFKILL_STATE_HARD_BLOCKED;
- }
-
- if (state == RFKILL_STATE_HARD_BLOCKED)
- sc->sc_flags |= SC_OP_RFKILL_HW_BLOCKED;
- else
- sc->sc_flags &= ~SC_OP_RFKILL_HW_BLOCKED;
+ struct ath_softc *sc = data;
- rfkill_force_state(sc->rf_kill.rfkill, state);
- }
+ if (blocked)
+ ath_radio_disable(sc);
+ else
+ ath_radio_enable(sc);
- queue_delayed_work(sc->hw->workqueue, &sc->rf_kill.rfkill_poll,
- msecs_to_jiffies(ATH_RFKILL_POLL_INTERVAL));
+ return 0;
}
-/* s/w rfkill handler */
-static int ath_sw_toggle_radio(void *data, enum rfkill_state state)
+static void ath_rfkill_poll_state(struct rfkill *rfkill, void *data)
{
struct ath_softc *sc = data;
+ bool blocked = !!ath_is_rfkill_set(sc);
- switch (state) {
- case RFKILL_STATE_SOFT_BLOCKED:
- if (!(sc->sc_flags & (SC_OP_RFKILL_HW_BLOCKED |
- SC_OP_RFKILL_SW_BLOCKED)))
- ath_radio_disable(sc);
- sc->sc_flags |= SC_OP_RFKILL_SW_BLOCKED;
- return 0;
- case RFKILL_STATE_UNBLOCKED:
- if ((sc->sc_flags & SC_OP_RFKILL_SW_BLOCKED)) {
- sc->sc_flags &= ~SC_OP_RFKILL_SW_BLOCKED;
- if (sc->sc_flags & SC_OP_RFKILL_HW_BLOCKED) {
- DPRINTF(sc, ATH_DBG_FATAL, "Can't turn on the"
- "radio as it is disabled by h/w\n");
- return -EPERM;
- }
- ath_radio_enable(sc);
- }
- return 0;
- default:
- return -EINVAL;
- }
+ if (rfkill_set_hw_state(rfkill, blocked))
+ ath_radio_disable(sc);
+ else
+ ath_radio_enable(sc);
}
/* Init s/w rfkill */
static int ath_init_sw_rfkill(struct ath_softc *sc)
{
- sc->rf_kill.rfkill = rfkill_allocate(wiphy_dev(sc->hw->wiphy),
- RFKILL_TYPE_WLAN);
+ sc->rf_kill.ops.set_block = ath_rfkill_set_block;
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
+ sc->rf_kill.ops.poll = ath_rfkill_poll_state;
+
+ snprintf(sc->rf_kill.rfkill_name, sizeof(sc->rf_kill.rfkill_name),
+ "ath9k-%s::rfkill", wiphy_name(sc->hw->wiphy));
+
+ sc->rf_kill.rfkill = rfkill_alloc(sc->rf_kill.rfkill_name,
+ wiphy_dev(sc->hw->wiphy),
+ RFKILL_TYPE_WLAN,
+ &sc->rf_kill.ops, sc);
if (!sc->rf_kill.rfkill) {
DPRINTF(sc, ATH_DBG_FATAL, "Failed to allocate rfkill\n");
return -ENOMEM;
}
- snprintf(sc->rf_kill.rfkill_name, sizeof(sc->rf_kill.rfkill_name),
- "ath9k-%s::rfkill", wiphy_name(sc->hw->wiphy));
- sc->rf_kill.rfkill->name = sc->rf_kill.rfkill_name;
- sc->rf_kill.rfkill->data = sc;
- sc->rf_kill.rfkill->toggle_radio = ath_sw_toggle_radio;
- sc->rf_kill.rfkill->state = RFKILL_STATE_UNBLOCKED;
- sc->rf_kill.rfkill->user_claim_unsupported = 1;
-
return 0;
}
/* Deinitialize rfkill */
static void ath_deinit_rfkill(struct ath_softc *sc)
{
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
- cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll);
-
if (sc->sc_flags & SC_OP_RFKILL_REGISTERED) {
rfkill_unregister(sc->rf_kill.rfkill);
+ rfkill_destroy(sc->rf_kill.rfkill);
sc->sc_flags &= ~SC_OP_RFKILL_REGISTERED;
- sc->rf_kill.rfkill = NULL;
}
}
static int ath_start_rfkill_poll(struct ath_softc *sc)
{
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
- queue_delayed_work(sc->hw->workqueue,
- &sc->rf_kill.rfkill_poll, 0);
-
if (!(sc->sc_flags & SC_OP_RFKILL_REGISTERED)) {
if (rfkill_register(sc->rf_kill.rfkill)) {
DPRINTF(sc, ATH_DBG_FATAL,
"Unable to register rfkill\n");
- rfkill_free(sc->rf_kill.rfkill);
+ rfkill_destroy(sc->rf_kill.rfkill);
/* Deinitialize the device */
ath_cleanup(sc);
@@ -1362,6 +1321,17 @@ void ath_detach(struct ath_softc *sc)
ath9k_ps_restore(sc);
}
+static int ath9k_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct ath_wiphy *aphy = hw->priv;
+ struct ath_softc *sc = aphy->sc;
+ struct ath_regulatory *reg = &sc->sc_ah->regulatory;
+
+ return ath_reg_notifier_apply(wiphy, request, reg);
+}
+
static int ath_init(u16 devid, struct ath_softc *sc)
{
struct ath_hw *ah = NULL;
@@ -1403,7 +1373,7 @@ static int ath_init(u16 devid, struct ath_softc *sc)
/* Get the hardware key cache size. */
sc->keymax = ah->caps.keycache_size;
if (sc->keymax > ATH_KEYMAX) {
- DPRINTF(sc, ATH_DBG_KEYCACHE,
+ DPRINTF(sc, ATH_DBG_ANY,
"Warning, using only %u entries in %u key cache\n",
ATH_KEYMAX, sc->keymax);
sc->keymax = ATH_KEYMAX;
@@ -1416,7 +1386,7 @@ static int ath_init(u16 devid, struct ath_softc *sc)
for (i = 0; i < sc->keymax; i++)
ath9k_hw_keyreset(ah, (u16) i);
- if (ath9k_regd_init(sc->sc_ah))
+ if (error)
goto bad;
/* default to MONITOR mode */
@@ -1545,9 +1515,6 @@ static int ath_init(u16 devid, struct ath_softc *sc)
sc->beacon.bslot_aphy[i] = NULL;
}
- /* save MISC configurations */
- sc->config.swBeaconProcess = 1;
-
/* setup channels and rates */
sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
@@ -1602,9 +1569,6 @@ void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_MESH_POINT);
- hw->wiphy->reg_notifier = ath9k_reg_notifier;
- hw->wiphy->strict_regulatory = true;
-
hw->queues = 4;
hw->max_rates = 4;
hw->channel_change_time = 5000;
@@ -1625,8 +1589,8 @@ void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
int ath_attach(u16 devid, struct ath_softc *sc)
{
struct ieee80211_hw *hw = sc->hw;
- const struct ieee80211_regdomain *regd;
int error = 0, i;
+ struct ath_regulatory *reg;
DPRINTF(sc, ATH_DBG_CONFIG, "Attach ATH hw\n");
@@ -1640,6 +1604,13 @@ int ath_attach(u16 devid, struct ath_softc *sc)
ath_set_hw_capab(sc, hw);
+ error = ath_regd_init(&sc->sc_ah->regulatory, sc->hw->wiphy,
+ ath9k_reg_notifier);
+ if (error)
+ return error;
+
+ reg = &sc->sc_ah->regulatory;
+
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
@@ -1656,41 +1627,20 @@ int ath_attach(u16 devid, struct ath_softc *sc)
goto error_attach;
#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
- /* Initialze h/w Rfkill */
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
- INIT_DELAYED_WORK(&sc->rf_kill.rfkill_poll, ath_rfkill_poll);
-
/* Initialize s/w rfkill */
error = ath_init_sw_rfkill(sc);
if (error)
goto error_attach;
#endif
- if (ath9k_is_world_regd(sc->sc_ah)) {
- /* Anything applied here (prior to wiphy registration) gets
- * saved on the wiphy orig_* parameters */
- regd = ath9k_world_regdomain(sc->sc_ah);
- hw->wiphy->custom_regulatory = true;
- hw->wiphy->strict_regulatory = false;
- } else {
- /* This gets applied in the case of the absense of CRDA,
- * it's our own custom world regulatory domain, similar to
- * cfg80211's but we enable passive scanning */
- regd = ath9k_default_world_regdomain();
- }
- wiphy_apply_custom_regulatory(hw->wiphy, regd);
- ath9k_reg_apply_radar_flags(hw->wiphy);
- ath9k_reg_apply_world_flags(hw->wiphy, NL80211_REGDOM_SET_BY_DRIVER);
-
INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
sc->wiphy_scheduler_int = msecs_to_jiffies(500);
error = ieee80211_register_hw(hw);
- if (!ath9k_is_world_regd(sc->sc_ah)) {
- error = regulatory_hint(hw->wiphy,
- sc->sc_ah->regulatory.alpha2);
+ if (!ath_is_world_regd(reg)) {
+ error = regulatory_hint(hw->wiphy, reg->alpha2);
if (error)
goto error_attach;
}
@@ -1728,7 +1678,7 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
if (r)
DPRINTF(sc, ATH_DBG_FATAL,
- "Unable to reset hardware; reset status %u\n", r);
+ "Unable to reset hardware; reset status %d\n", r);
spin_unlock_bh(&sc->sc_resetlock);
if (ath_startrecv(sc) != 0)
@@ -1792,7 +1742,6 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
goto fail;
}
- dd->dd_name = name;
dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
/*
@@ -1822,7 +1771,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
}
ds = dd->dd_desc;
DPRINTF(sc, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
- dd->dd_name, ds, (u32) dd->dd_desc_len,
+ name, ds, (u32) dd->dd_desc_len,
ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
/* allocate buffers */
@@ -2021,7 +1970,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
r = ath9k_hw_reset(sc->sc_ah, init_channel, false);
if (r) {
DPRINTF(sc, ATH_DBG_FATAL,
- "Unable to reset hardware; reset status %u "
+ "Unable to reset hardware; reset status %d "
"(freq %u MHz)\n", r,
curchan->center_freq);
spin_unlock_bh(&sc->sc_resetlock);
@@ -2043,8 +1992,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
* here except setup the interrupt mask.
*/
if (ath_startrecv(sc) != 0) {
- DPRINTF(sc, ATH_DBG_FATAL,
- "Unable to start recv logic\n");
+ DPRINTF(sc, ATH_DBG_FATAL, "Unable to start recv logic\n");
r = -EIO;
goto mutex_unlock;
}
@@ -2095,6 +2043,46 @@ static int ath9k_tx(struct ieee80211_hw *hw,
goto exit;
}
+ if (sc->hw->conf.flags & IEEE80211_CONF_PS) {
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ /*
+ * mac80211 does not set PM field for normal data frames, so we
+ * need to update that based on the current PS mode.
+ */
+ if (ieee80211_is_data(hdr->frame_control) &&
+ !ieee80211_is_nullfunc(hdr->frame_control) &&
+ !ieee80211_has_pm(hdr->frame_control)) {
+ DPRINTF(sc, ATH_DBG_PS, "Add PM=1 for a TX frame "
+ "while in PS mode\n");
+ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
+ }
+ }
+
+ if (unlikely(sc->sc_ah->power_mode != ATH9K_PM_AWAKE)) {
+ /*
+ * We are using PS-Poll and mac80211 can request TX while in
+ * power save mode. Need to wake up hardware for the TX to be
+ * completed and if needed, also for RX of buffered frames.
+ */
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ ath9k_ps_wakeup(sc);
+ ath9k_hw_setrxabort(sc->sc_ah, 0);
+ if (ieee80211_is_pspoll(hdr->frame_control)) {
+ DPRINTF(sc, ATH_DBG_PS, "Sending PS-Poll to pick a "
+ "buffered frame\n");
+ sc->sc_flags |= SC_OP_WAIT_FOR_PSPOLL_DATA;
+ } else {
+ DPRINTF(sc, ATH_DBG_PS, "Wake up to complete TX\n");
+ sc->sc_flags |= SC_OP_WAIT_FOR_TX_ACK;
+ }
+ /*
+ * The actual restore operation will happen only after
+ * the sc_flags bit is cleared. We are just dropping
+ * the ps_usecount here.
+ */
+ ath9k_ps_restore(sc);
+ }
+
memset(&txctl, 0, sizeof(struct ath_tx_control));
/*
@@ -2171,10 +2159,8 @@ static void ath9k_stop(struct ieee80211_hw *hw)
} else
sc->rx.rxlink = NULL;
-#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
- cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll);
-#endif
+ rfkill_pause_polling(sc->rf_kill.rfkill);
+
/* disable HAL and put h/w to sleep */
ath9k_hw_disable(sc->sc_ah);
ath9k_hw_configpcipowersave(sc->sc_ah, 1);
@@ -2257,25 +2243,10 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
sc->imask |= ATH9K_INT_TSFOOR;
}
- /*
- * Some hardware processes the TIM IE and fires an
- * interrupt when the TIM bit is set. For hardware
- * that does, if not overridden by configuration,
- * enable the TIM interrupt when operating as station.
- */
- if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
- (conf->type == NL80211_IFTYPE_STATION) &&
- !sc->config.swBeaconProcess)
- sc->imask |= ATH9K_INT_TIM;
-
ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
- if (conf->type == NL80211_IFTYPE_AP) {
- /* TODO: is this a suitable place to start ANI for AP mode? */
- /* Start ANI */
- mod_timer(&sc->ani.timer,
- jiffies + msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
- }
+ if (conf->type == NL80211_IFTYPE_AP)
+ ath_start_ani(sc);
out:
mutex_unlock(&sc->mutex);
@@ -2326,26 +2297,36 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
struct ieee80211_conf *conf = &hw->conf;
+ struct ath_hw *ah = sc->sc_ah;
mutex_lock(&sc->mutex);
if (changed & IEEE80211_CONF_CHANGE_PS) {
if (conf->flags & IEEE80211_CONF_PS) {
- if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
- sc->imask |= ATH9K_INT_TIM_TIMER;
- ath9k_hw_set_interrupts(sc->sc_ah,
- sc->imask);
+ if (!(ah->caps.hw_caps &
+ ATH9K_HW_CAP_AUTOSLEEP)) {
+ if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
+ sc->imask |= ATH9K_INT_TIM_TIMER;
+ ath9k_hw_set_interrupts(sc->sc_ah,
+ sc->imask);
+ }
+ ath9k_hw_setrxabort(sc->sc_ah, 1);
}
- ath9k_hw_setrxabort(sc->sc_ah, 1);
ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_NETWORK_SLEEP);
} else {
ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
- ath9k_hw_setrxabort(sc->sc_ah, 0);
- sc->sc_flags &= ~SC_OP_WAIT_FOR_BEACON;
- if (sc->imask & ATH9K_INT_TIM_TIMER) {
- sc->imask &= ~ATH9K_INT_TIM_TIMER;
- ath9k_hw_set_interrupts(sc->sc_ah,
- sc->imask);
+ if (!(ah->caps.hw_caps &
+ ATH9K_HW_CAP_AUTOSLEEP)) {
+ ath9k_hw_setrxabort(sc->sc_ah, 0);
+ sc->sc_flags &= ~(SC_OP_WAIT_FOR_BEACON |
+ SC_OP_WAIT_FOR_CAB |
+ SC_OP_WAIT_FOR_PSPOLL_DATA |
+ SC_OP_WAIT_FOR_TX_ACK);
+ if (sc->imask & ATH9K_INT_TIM_TIMER) {
+ sc->imask &= ~ATH9K_INT_TIM_TIMER;
+ ath9k_hw_set_interrupts(sc->sc_ah,
+ sc->imask);
+ }
}
}
}
@@ -2387,114 +2368,6 @@ skip_chan_change:
if (changed & IEEE80211_CONF_CHANGE_POWER)
sc->config.txpowlimit = 2 * conf->power_level;
- /*
- * The HW TSF has to be reset when the beacon interval changes.
- * We set the flag here, and ath_beacon_config_ap() would take this
- * into account when it gets called through the subsequent
- * config_interface() call - with IFCC_BEACON in the changed field.
- */
-
- if (changed & IEEE80211_CONF_CHANGE_BEACON_INTERVAL)
- sc->sc_flags |= SC_OP_TSF_RESET;
-
- mutex_unlock(&sc->mutex);
-
- return 0;
-}
-
-static int ath9k_config_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_if_conf *conf)
-{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
- struct ath_hw *ah = sc->sc_ah;
- struct ath_vif *avp = (void *)vif->drv_priv;
- u32 rfilt = 0;
- int error, i;
-
- mutex_lock(&sc->mutex);
-
- /* TODO: Need to decide which hw opmode to use for multi-interface
- * cases */
- if (vif->type == NL80211_IFTYPE_AP &&
- ah->opmode != NL80211_IFTYPE_AP) {
- ah->opmode = NL80211_IFTYPE_STATION;
- ath9k_hw_setopmode(ah);
- memcpy(sc->curbssid, sc->sc_ah->macaddr, ETH_ALEN);
- sc->curaid = 0;
- ath9k_hw_write_associd(sc);
- /* Request full reset to get hw opmode changed properly */
- sc->sc_flags |= SC_OP_FULL_RESET;
- }
-
- if ((conf->changed & IEEE80211_IFCC_BSSID) &&
- !is_zero_ether_addr(conf->bssid)) {
- switch (vif->type) {
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_ADHOC:
- case NL80211_IFTYPE_MESH_POINT:
- /* Set BSSID */
- memcpy(sc->curbssid, conf->bssid, ETH_ALEN);
- memcpy(avp->bssid, conf->bssid, ETH_ALEN);
- sc->curaid = 0;
- ath9k_hw_write_associd(sc);
-
- /* Set aggregation protection mode parameters */
- sc->config.ath_aggr_prot = 0;
-
- DPRINTF(sc, ATH_DBG_CONFIG,
- "RX filter 0x%x bssid %pM aid 0x%x\n",
- rfilt, sc->curbssid, sc->curaid);
-
- /* need to reconfigure the beacon */
- sc->sc_flags &= ~SC_OP_BEACONS ;
-
- break;
- default:
- break;
- }
- }
-
- if ((vif->type == NL80211_IFTYPE_ADHOC) ||
- (vif->type == NL80211_IFTYPE_AP) ||
- (vif->type == NL80211_IFTYPE_MESH_POINT)) {
- if ((conf->changed & IEEE80211_IFCC_BEACON) ||
- (conf->changed & IEEE80211_IFCC_BEACON_ENABLED &&
- conf->enable_beacon)) {
- /*
- * Allocate and setup the beacon frame.
- *
- * Stop any previous beacon DMA. This may be
- * necessary, for example, when an ibss merge
- * causes reconfiguration; we may be called
- * with beacon transmission active.
- */
- ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
-
- error = ath_beacon_alloc(aphy, vif);
- if (error != 0) {
- mutex_unlock(&sc->mutex);
- return error;
- }
-
- ath_beacon_config(sc, vif);
- }
- }
-
- /* Check for WLAN_CAPABILITY_PRIVACY ? */
- if ((avp->av_opmode != NL80211_IFTYPE_STATION)) {
- for (i = 0; i < IEEE80211_WEP_NKID; i++)
- if (ath9k_hw_keyisvalid(sc->sc_ah, (u16)i))
- ath9k_hw_keysetmac(sc->sc_ah,
- (u16)i,
- sc->curbssid);
- }
-
- /* Only legacy IBSS for now */
- if (vif->type == NL80211_IFTYPE_ADHOC)
- ath_update_chainmask(sc, 0);
-
mutex_unlock(&sc->mutex);
return 0;
@@ -2523,8 +2396,10 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw,
*total_flags &= SUPPORTED_FILTERS;
sc->rx.rxfilter = *total_flags;
+ ath9k_ps_wakeup(sc);
rfilt = ath_calcrxfilter(sc);
ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
+ ath9k_ps_restore(sc);
DPRINTF(sc, ATH_DBG_CONFIG, "Set HW RX filter: 0x%x\n", sc->rx.rxfilter);
}
@@ -2562,6 +2437,8 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
mutex_lock(&sc->mutex);
+ memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
+
qi.tqi_aifs = params->aifs;
qi.tqi_cwmin = params->cw_min;
qi.tqi_cwmax = params->cw_max;
@@ -2598,7 +2475,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
- DPRINTF(sc, ATH_DBG_KEYCACHE, "Set HW Key\n");
+ DPRINTF(sc, ATH_DBG_CONFIG, "Set HW Key\n");
switch (cmd) {
case SET_KEY:
@@ -2634,9 +2511,92 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
{
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_vif *avp = (void *)vif->drv_priv;
+ u32 rfilt = 0;
+ int error, i;
mutex_lock(&sc->mutex);
+ /*
+ * TODO: Need to decide which hw opmode to use for
+ * multi-interface cases
+ * XXX: This belongs into add_interface!
+ */
+ if (vif->type == NL80211_IFTYPE_AP &&
+ ah->opmode != NL80211_IFTYPE_AP) {
+ ah->opmode = NL80211_IFTYPE_STATION;
+ ath9k_hw_setopmode(ah);
+ memcpy(sc->curbssid, sc->sc_ah->macaddr, ETH_ALEN);
+ sc->curaid = 0;
+ ath9k_hw_write_associd(sc);
+ /* Request full reset to get hw opmode changed properly */
+ sc->sc_flags |= SC_OP_FULL_RESET;
+ }
+
+ if ((changed & BSS_CHANGED_BSSID) &&
+ !is_zero_ether_addr(bss_conf->bssid)) {
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_MESH_POINT:
+ /* Set BSSID */
+ memcpy(sc->curbssid, bss_conf->bssid, ETH_ALEN);
+ memcpy(avp->bssid, bss_conf->bssid, ETH_ALEN);
+ sc->curaid = 0;
+ ath9k_hw_write_associd(sc);
+
+ /* Set aggregation protection mode parameters */
+ sc->config.ath_aggr_prot = 0;
+
+ DPRINTF(sc, ATH_DBG_CONFIG,
+ "RX filter 0x%x bssid %pM aid 0x%x\n",
+ rfilt, sc->curbssid, sc->curaid);
+
+ /* need to reconfigure the beacon */
+ sc->sc_flags &= ~SC_OP_BEACONS ;
+
+ break;
+ default:
+ break;
+ }
+ }
+
+ if ((vif->type == NL80211_IFTYPE_ADHOC) ||
+ (vif->type == NL80211_IFTYPE_AP) ||
+ (vif->type == NL80211_IFTYPE_MESH_POINT)) {
+ if ((changed & BSS_CHANGED_BEACON) ||
+ (changed & BSS_CHANGED_BEACON_ENABLED &&
+ bss_conf->enable_beacon)) {
+ /*
+ * Allocate and setup the beacon frame.
+ *
+ * Stop any previous beacon DMA. This may be
+ * necessary, for example, when an ibss merge
+ * causes reconfiguration; we may be called
+ * with beacon transmission active.
+ */
+ ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
+
+ error = ath_beacon_alloc(aphy, vif);
+ if (!error)
+ ath_beacon_config(sc, vif);
+ }
+ }
+
+ /* Check for WLAN_CAPABILITY_PRIVACY ? */
+ if ((avp->av_opmode != NL80211_IFTYPE_STATION)) {
+ for (i = 0; i < IEEE80211_WEP_NKID; i++)
+ if (ath9k_hw_keyisvalid(sc->sc_ah, (u16)i))
+ ath9k_hw_keysetmac(sc->sc_ah,
+ (u16)i,
+ sc->curbssid);
+ }
+
+ /* Only legacy IBSS for now */
+ if (vif->type == NL80211_IFTYPE_ADHOC)
+ ath_update_chainmask(sc, 0);
+
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
DPRINTF(sc, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n",
bss_conf->use_short_preamble);
@@ -2662,6 +2622,18 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
ath9k_bss_assoc_info(sc, vif, bss_conf);
}
+ /*
+ * The HW TSF has to be reset when the beacon interval changes.
+ * We set the flag here, and ath_beacon_config_ap() would take this
+ * into account when it gets called through the subsequent
+ * config_interface() call - with IFCC_BEACON in the changed field.
+ */
+
+ if (changed & BSS_CHANGED_BEACON_INT) {
+ sc->sc_flags |= SC_OP_TSF_RESET;
+ sc->beacon_interval = bss_conf->beacon_int;
+ }
+
mutex_unlock(&sc->mutex);
}
@@ -2771,6 +2743,7 @@ static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
mutex_lock(&sc->mutex);
aphy->state = ATH_WIPHY_ACTIVE;
sc->sc_flags &= ~SC_OP_SCANNING;
+ sc->sc_flags |= SC_OP_FULL_RESET;
mutex_unlock(&sc->mutex);
}
@@ -2781,7 +2754,6 @@ struct ieee80211_ops ath9k_ops = {
.add_interface = ath9k_add_interface,
.remove_interface = ath9k_remove_interface,
.config = ath9k_config,
- .config_interface = ath9k_config_interface,
.configure_filter = ath9k_configure_filter,
.sta_notify = ath9k_sta_notify,
.conf_tx = ath9k_conf_tx,
diff --git a/drivers/net/wireless/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 168411d322a..ccdf20a2e9b 100644
--- a/drivers/net/wireless/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -227,11 +227,6 @@ static int ath_pci_suspend(struct pci_dev *pdev, pm_message_t state)
ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1);
-#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
- cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll);
-#endif
-
pci_save_state(pdev);
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
@@ -256,16 +251,6 @@ static int ath_pci_resume(struct pci_dev *pdev)
AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1);
-#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
- /*
- * check the h/w rfkill state on resume
- * and start the rfkill poll timer
- */
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
- queue_delayed_work(sc->hw->workqueue,
- &sc->rf_kill.rfkill_poll, 0);
-#endif
-
return 0;
}
diff --git a/drivers/net/wireless/ath9k/phy.c b/drivers/net/wireless/ath/ath9k/phy.c
index 8bcba906929..aaa941561c3 100644
--- a/drivers/net/wireless/ath9k/phy.c
+++ b/drivers/net/wireless/ath/ath9k/phy.c
@@ -46,7 +46,7 @@ ath9k_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
channelSel = ((freq - 704) * 2 - 3040) / 10;
bModeSynth = 1;
} else {
- DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
+ DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
"Invalid channel %u MHz\n", freq);
return false;
}
@@ -79,7 +79,7 @@ ath9k_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
channelSel = ath9k_hw_reverse_bits((freq - 4800) / 5, 8);
aModeRefSel = ath9k_hw_reverse_bits(1, 2);
} else {
- DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
+ DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
"Invalid channel %u MHz\n", freq);
return false;
}
@@ -96,9 +96,8 @@ ath9k_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
return true;
}
-bool
-ath9k_hw_ar9280_set_channel(struct ath_hw *ah,
- struct ath9k_channel *chan)
+void ath9k_hw_ar9280_set_channel(struct ath_hw *ah,
+ struct ath9k_channel *chan)
{
u16 bMode, fracMode, aModeRefSel = 0;
u32 freq, ndiv, channelSel = 0, channelFrac = 0, reg32 = 0;
@@ -169,8 +168,6 @@ ath9k_hw_ar9280_set_channel(struct ath_hw *ah,
ah->curchan = chan;
ah->curchan_rad_index = -1;
-
- return true;
}
static void
diff --git a/drivers/net/wireless/ath9k/phy.h b/drivers/net/wireless/ath/ath9k/phy.h
index 0f7f8e0c9c9..c70f530642f 100644
--- a/drivers/net/wireless/ath9k/phy.h
+++ b/drivers/net/wireless/ath/ath9k/phy.h
@@ -17,7 +17,7 @@
#ifndef PHY_H
#define PHY_H
-bool ath9k_hw_ar9280_set_channel(struct ath_hw *ah,
+void ath9k_hw_ar9280_set_channel(struct ath_hw *ah,
struct ath9k_channel
*chan);
bool ath9k_hw_set_channel(struct ath_hw *ah,
@@ -556,9 +556,6 @@ bool ath9k_hw_init_rf(struct ath_hw *ah,
int r; \
for (r = 0; r < ((iniarray)->ia_rows); r++) { \
REG_WRITE(ah, INI_RA((iniarray), r, 0), (regData)[r]); \
- DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL, \
- "RF 0x%x V 0x%x\n", \
- INI_RA((iniarray), r, 0), (regData)[r]); \
DO_DELAY(regWr); \
} \
} while (0)
diff --git a/drivers/net/wireless/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 824ccbb8b7b..ba06e78b2f5 100644
--- a/drivers/net/wireless/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -17,7 +17,7 @@
#include "ath9k.h"
-static struct ath_rate_table ar5416_11na_ratetable = {
+static const struct ath_rate_table ar5416_11na_ratetable = {
42,
{
{ VALID, VALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
@@ -155,7 +155,7 @@ static struct ath_rate_table ar5416_11na_ratetable = {
/* 4ms frame limit not used for NG mode. The values filled
* for HT are the 64K max aggregate limit */
-static struct ath_rate_table ar5416_11ng_ratetable = {
+static const struct ath_rate_table ar5416_11ng_ratetable = {
46,
{
{ VALID_ALL, VALID_ALL, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */
@@ -302,7 +302,7 @@ static struct ath_rate_table ar5416_11ng_ratetable = {
WLAN_RC_HT_FLAG, /* Phy rates allowed initially */
};
-static struct ath_rate_table ar5416_11a_ratetable = {
+static const struct ath_rate_table ar5416_11a_ratetable = {
8,
{
{ VALID, VALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
@@ -335,7 +335,7 @@ static struct ath_rate_table ar5416_11a_ratetable = {
0, /* Phy rates allowed initially */
};
-static struct ath_rate_table ar5416_11g_ratetable = {
+static const struct ath_rate_table ar5416_11g_ratetable = {
12,
{
{ VALID, VALID, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */
@@ -380,7 +380,7 @@ static struct ath_rate_table ar5416_11g_ratetable = {
0, /* Phy rates allowed initially */
};
-static struct ath_rate_table ar5416_11b_ratetable = {
+static const struct ath_rate_table ar5416_11b_ratetable = {
4,
{
{ VALID, VALID, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */
@@ -420,7 +420,7 @@ static inline int8_t median(int8_t a, int8_t b, int8_t c)
}
}
-static void ath_rc_sort_validrates(struct ath_rate_table *rate_table,
+static void ath_rc_sort_validrates(const struct ath_rate_table *rate_table,
struct ath_rate_priv *ath_rc_priv)
{
u8 i, j, idx, idx_next;
@@ -461,10 +461,11 @@ static inline int ath_rc_isvalid_txmask(struct ath_rate_priv *ath_rc_priv,
return ath_rc_priv->valid_rate_index[index];
}
-static inline int ath_rc_get_nextvalid_txrate(struct ath_rate_table *rate_table,
- struct ath_rate_priv *ath_rc_priv,
- u8 cur_valid_txrate,
- u8 *next_idx)
+static inline
+int ath_rc_get_nextvalid_txrate(const struct ath_rate_table *rate_table,
+ struct ath_rate_priv *ath_rc_priv,
+ u8 cur_valid_txrate,
+ u8 *next_idx)
{
u8 i;
@@ -500,7 +501,7 @@ static int ath_rc_valid_phyrate(u32 phy, u32 capflag, int ignore_cw)
}
static inline int
-ath_rc_get_nextlowervalid_txrate(struct ath_rate_table *rate_table,
+ath_rc_get_nextlowervalid_txrate(const struct ath_rate_table *rate_table,
struct ath_rate_priv *ath_rc_priv,
u8 cur_valid_txrate, u8 *next_idx)
{
@@ -517,14 +518,14 @@ ath_rc_get_nextlowervalid_txrate(struct ath_rate_table *rate_table,
}
static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv,
- struct ath_rate_table *rate_table,
+ const struct ath_rate_table *rate_table,
u32 capflag)
{
u8 i, hi = 0;
u32 valid;
for (i = 0; i < rate_table->rate_cnt; i++) {
- valid = (ath_rc_priv->single_stream ?
+ valid = (!(ath_rc_priv->ht_cap & WLAN_RC_DS_FLAG) ?
rate_table->info[i].valid_single_stream :
rate_table->info[i].valid);
if (valid == 1) {
@@ -547,7 +548,7 @@ static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv,
}
static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv,
- struct ath_rate_table *rate_table,
+ const struct ath_rate_table *rate_table,
struct ath_rateset *rateset,
u32 capflag)
{
@@ -557,9 +558,9 @@ static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv,
for (i = 0; i < rateset->rs_nrates; i++) {
for (j = 0; j < rate_table->rate_cnt; j++) {
u32 phy = rate_table->info[j].phy;
- u32 valid = (ath_rc_priv->single_stream ?
- rate_table->info[j].valid_single_stream :
- rate_table->info[j].valid);
+ u32 valid = (!(ath_rc_priv->ht_cap & WLAN_RC_DS_FLAG) ?
+ rate_table->info[j].valid_single_stream :
+ rate_table->info[j].valid);
u8 rate = rateset->rs_rates[i];
u8 dot11rate = rate_table->info[j].dot11rate;
@@ -592,7 +593,7 @@ static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv,
}
static u8 ath_rc_setvalid_htrates(struct ath_rate_priv *ath_rc_priv,
- struct ath_rate_table *rate_table,
+ const struct ath_rate_table *rate_table,
u8 *mcs_set, u32 capflag)
{
struct ath_rateset *rateset = (struct ath_rateset *)mcs_set;
@@ -603,7 +604,7 @@ static u8 ath_rc_setvalid_htrates(struct ath_rate_priv *ath_rc_priv,
for (i = 0; i < rateset->rs_nrates; i++) {
for (j = 0; j < rate_table->rate_cnt; j++) {
u32 phy = rate_table->info[j].phy;
- u32 valid = (ath_rc_priv->single_stream ?
+ u32 valid = (!(ath_rc_priv->ht_cap & WLAN_RC_DS_FLAG) ?
rate_table->info[j].valid_single_stream :
rate_table->info[j].valid);
u8 rate = rateset->rs_rates[i];
@@ -630,7 +631,7 @@ static u8 ath_rc_setvalid_htrates(struct ath_rate_priv *ath_rc_priv,
static u8 ath_rc_ratefind_ht(struct ath_softc *sc,
struct ath_rate_priv *ath_rc_priv,
- struct ath_rate_table *rate_table,
+ const struct ath_rate_table *rate_table,
int *is_probing)
{
u32 dt, best_thruput, this_thruput, now_msec;
@@ -740,14 +741,15 @@ static u8 ath_rc_ratefind_ht(struct ath_softc *sc,
if (rate > (ath_rc_priv->rate_table_size - 1))
rate = ath_rc_priv->rate_table_size - 1;
- ASSERT((rate_table->info[rate].valid && !ath_rc_priv->single_stream) ||
+ ASSERT((rate_table->info[rate].valid &&
+ (ath_rc_priv->ht_cap & WLAN_RC_DS_FLAG)) ||
(rate_table->info[rate].valid_single_stream &&
- ath_rc_priv->single_stream));
+ !(ath_rc_priv->ht_cap & WLAN_RC_DS_FLAG)));
return rate;
}
-static void ath_rc_rate_set_series(struct ath_rate_table *rate_table,
+static void ath_rc_rate_set_series(const struct ath_rate_table *rate_table,
struct ieee80211_tx_rate *rate,
struct ieee80211_tx_rate_control *txrc,
u8 tries, u8 rix, int rtsctsenable)
@@ -768,7 +770,7 @@ static void ath_rc_rate_set_series(struct ath_rate_table *rate_table,
}
static void ath_rc_rate_set_rtscts(struct ath_softc *sc,
- struct ath_rate_table *rate_table,
+ const struct ath_rate_table *rate_table,
struct ieee80211_tx_info *tx_info)
{
struct ieee80211_tx_rate *rates = tx_info->control.rates;
@@ -806,12 +808,12 @@ static void ath_rc_rate_set_rtscts(struct ath_softc *sc,
static u8 ath_rc_rate_getidx(struct ath_softc *sc,
struct ath_rate_priv *ath_rc_priv,
- struct ath_rate_table *rate_table,
+ const struct ath_rate_table *rate_table,
u8 rix, u16 stepdown,
u16 min_rate)
{
u32 j;
- u8 nextindex;
+ u8 nextindex = 0;
if (min_rate) {
for (j = RATE_TABLE_SIZE; j > 0; j--) {
@@ -837,7 +839,7 @@ static void ath_rc_ratefind(struct ath_softc *sc,
struct ath_rate_priv *ath_rc_priv,
struct ieee80211_tx_rate_control *txrc)
{
- struct ath_rate_table *rate_table;
+ const struct ath_rate_table *rate_table;
struct sk_buff *skb = txrc->skb;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_rate *rates = tx_info->control.rates;
@@ -936,7 +938,7 @@ static void ath_rc_ratefind(struct ath_softc *sc,
}
static bool ath_rc_update_per(struct ath_softc *sc,
- struct ath_rate_table *rate_table,
+ const struct ath_rate_table *rate_table,
struct ath_rate_priv *ath_rc_priv,
struct ath_tx_info_priv *tx_info_priv,
int tx_rate, int xretries, int retries,
@@ -1141,7 +1143,7 @@ static void ath_rc_update_ht(struct ath_softc *sc,
int rate;
u8 last_per;
bool state_change = false;
- struct ath_rate_table *rate_table = sc->cur_rate_table;
+ const struct ath_rate_table *rate_table = sc->cur_rate_table;
int size = ath_rc_priv->rate_table_size;
if ((tx_rate < 0) || (tx_rate > rate_table->rate_cnt))
@@ -1275,7 +1277,7 @@ static void ath_rc_update_ht(struct ath_softc *sc,
#undef CHK_RSSI
}
-static int ath_rc_get_rateindex(struct ath_rate_table *rate_table,
+static int ath_rc_get_rateindex(const struct ath_rate_table *rate_table,
struct ieee80211_tx_rate *rate)
{
int rix;
@@ -1299,7 +1301,7 @@ static void ath_rc_tx_status(struct ath_softc *sc,
int final_ts_idx, int xretries, int long_retry)
{
struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
- struct ath_rate_table *rate_table;
+ const struct ath_rate_table *rate_table;
struct ieee80211_tx_rate *rates = tx_info->status.rates;
u8 flags;
u32 i = 0, rix;
@@ -1320,7 +1322,7 @@ static void ath_rc_tx_status(struct ath_softc *sc,
* 40 to 20 => don't update */
if ((flags & IEEE80211_TX_RC_40_MHZ_WIDTH) &&
- (ath_rc_priv->rc_phy_mode != WLAN_RC_40_FLAG))
+ !(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG))
return;
rix = ath_rc_get_rateindex(rate_table, &rates[i]);
@@ -1345,18 +1347,19 @@ static void ath_rc_tx_status(struct ath_softc *sc,
/* If HT40 and we have switched mode from 40 to 20 => don't update */
if ((flags & IEEE80211_TX_RC_40_MHZ_WIDTH) &&
- (ath_rc_priv->rc_phy_mode != WLAN_RC_40_FLAG)) {
+ !(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG))
return;
- }
rix = ath_rc_get_rateindex(rate_table, &rates[i]);
ath_rc_update_ht(sc, ath_rc_priv, tx_info_priv, rix,
xretries, long_retry);
}
-static struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc,
- enum ieee80211_band band,
- bool is_ht, bool is_cw_40)
+static const
+struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc,
+ enum ieee80211_band band,
+ bool is_ht,
+ bool is_cw_40)
{
int mode = 0;
@@ -1390,7 +1393,7 @@ static void ath_rc_init(struct ath_softc *sc,
struct ath_rate_priv *ath_rc_priv,
struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta,
- struct ath_rate_table *rate_table)
+ const struct ath_rate_table *rate_table)
{
struct ath_rateset *rateset = &ath_rc_priv->neg_rates;
u8 *ht_mcs = (u8 *)&ath_rc_priv->neg_ht_rates;
@@ -1420,10 +1423,6 @@ static void ath_rc_init(struct ath_softc *sc,
ath_rc_priv->valid_phy_rateidx[i][j] = 0;
ath_rc_priv->valid_phy_ratecnt[i] = 0;
}
- ath_rc_priv->rc_phy_mode = ath_rc_priv->ht_cap & WLAN_RC_40_FLAG;
-
- /* Set stream capability */
- ath_rc_priv->single_stream = (ath_rc_priv->ht_cap & WLAN_RC_DS_FLAG) ? 0 : 1;
if (!rateset->rs_nrates) {
/* No working rate, just initialize valid rates */
@@ -1572,12 +1571,13 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
struct ath_rate_priv *ath_rc_priv = priv_sta;
__le16 fc = hdr->frame_control;
- /* lowest rate for management and multicast/broadcast frames */
- if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1) ||
- !sta) {
+ /* lowest rate for management and NO_ACK frames */
+ if (!ieee80211_is_data(fc) ||
+ tx_info->flags & IEEE80211_TX_CTL_NO_ACK || !sta) {
tx_info->control.rates[0].idx = rate_lowest_index(sband, sta);
tx_info->control.rates[0].count =
- is_multicast_ether_addr(hdr->addr1) ? 1 : ATH_MGT_TXMAXTRY;
+ (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) ?
+ 1 : ATH_MGT_TXMAXTRY;
return;
}
@@ -1590,7 +1590,7 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
{
struct ath_softc *sc = priv;
struct ath_rate_priv *ath_rc_priv = priv_sta;
- struct ath_rate_table *rate_table = NULL;
+ const struct ath_rate_table *rate_table = NULL;
bool is_cw40, is_sgi40;
int i, j = 0;
@@ -1639,7 +1639,7 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
{
struct ath_softc *sc = priv;
struct ath_rate_priv *ath_rc_priv = priv_sta;
- struct ath_rate_table *rate_table = NULL;
+ const struct ath_rate_table *rate_table = NULL;
bool oper_cw40 = false, oper_sgi40;
bool local_cw40 = (ath_rc_priv->ht_cap & WLAN_RC_40_FLAG) ?
true : false;
diff --git a/drivers/net/wireless/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index 199a3ce57d6..e3abd76103f 100644
--- a/drivers/net/wireless/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -24,7 +24,6 @@ struct ath_softc;
#define ATH_RATE_MAX 30
#define RATE_TABLE_SIZE 64
#define MAX_TX_RATE_PHY 48
-#define WLAN_CTRL_FRAME_SIZE (2+2+6+4)
/* VALID_ALL - valid for 20/40/Legacy,
* VALID - Legacy only,
@@ -158,7 +157,6 @@ struct ath_rateset {
* @probe_interval: interval for ratectrl to probe for other rates
* @prev_data_rix: rate idx of last data frame
* @ht_cap: HT capabilities
- * @single_stream: When TRUE, only single TX stream possible
* @neg_rates: Negotatied rates
* @neg_ht_rates: Negotiated HT rates
*/
@@ -176,10 +174,8 @@ struct ath_rate_priv {
u8 max_valid_rate;
u8 valid_rate_index[RATE_TABLE_SIZE];
u8 ht_cap;
- u8 single_stream;
u8 valid_phy_ratecnt[WLAN_RC_PHY_MAX];
u8 valid_phy_rateidx[WLAN_RC_PHY_MAX][RATE_TABLE_SIZE];
- u8 rc_phy_mode;
u8 rate_max_phy;
u32 rssi_time;
u32 rssi_down_time;
diff --git a/drivers/net/wireless/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index dd1f3015674..5014a19b0f7 100644
--- a/drivers/net/wireless/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -283,54 +283,51 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
struct ath_buf *bf;
int error = 0;
- do {
- spin_lock_init(&sc->rx.rxflushlock);
- sc->sc_flags &= ~SC_OP_RXFLUSH;
- spin_lock_init(&sc->rx.rxbuflock);
-
- sc->rx.bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
- min(sc->cachelsz,
- (u16)64));
+ spin_lock_init(&sc->rx.rxflushlock);
+ sc->sc_flags &= ~SC_OP_RXFLUSH;
+ spin_lock_init(&sc->rx.rxbuflock);
- DPRINTF(sc, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
- sc->cachelsz, sc->rx.bufsize);
+ sc->rx.bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
+ min(sc->cachelsz, (u16)64));
- /* Initialize rx descriptors */
+ DPRINTF(sc, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
+ sc->cachelsz, sc->rx.bufsize);
- error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
- "rx", nbufs, 1);
- if (error != 0) {
- DPRINTF(sc, ATH_DBG_FATAL,
- "failed to allocate rx descriptors: %d\n", error);
- break;
- }
+ /* Initialize rx descriptors */
- list_for_each_entry(bf, &sc->rx.rxbuf, list) {
- skb = ath_rxbuf_alloc(sc, sc->rx.bufsize, GFP_KERNEL);
- if (skb == NULL) {
- error = -ENOMEM;
- break;
- }
+ error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
+ "rx", nbufs, 1);
+ if (error != 0) {
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "failed to allocate rx descriptors: %d\n", error);
+ goto err;
+ }
- bf->bf_mpdu = skb;
- bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
- sc->rx.bufsize,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(sc->dev,
- bf->bf_buf_addr))) {
- dev_kfree_skb_any(skb);
- bf->bf_mpdu = NULL;
- DPRINTF(sc, ATH_DBG_CONFIG,
- "dma_mapping_error() on RX init\n");
- error = -ENOMEM;
- break;
- }
- bf->bf_dmacontext = bf->bf_buf_addr;
+ list_for_each_entry(bf, &sc->rx.rxbuf, list) {
+ skb = ath_rxbuf_alloc(sc, sc->rx.bufsize, GFP_KERNEL);
+ if (skb == NULL) {
+ error = -ENOMEM;
+ goto err;
}
- sc->rx.rxlink = NULL;
- } while (0);
+ bf->bf_mpdu = skb;
+ bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
+ sc->rx.bufsize,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(sc->dev,
+ bf->bf_buf_addr))) {
+ dev_kfree_skb_any(skb);
+ bf->bf_mpdu = NULL;
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "dma_mapping_error() on RX init\n");
+ error = -ENOMEM;
+ goto err;
+ }
+ bf->bf_dmacontext = bf->bf_buf_addr;
+ }
+ sc->rx.rxlink = NULL;
+err:
if (error)
ath_rx_cleanup(sc);
@@ -345,10 +342,8 @@ void ath_rx_cleanup(struct ath_softc *sc)
list_for_each_entry(bf, &sc->rx.rxbuf, list) {
skb = bf->bf_mpdu;
if (skb) {
- dma_unmap_single(sc->dev,
- bf->bf_buf_addr,
- sc->rx.bufsize,
- DMA_FROM_DEVICE);
+ dma_unmap_single(sc->dev, bf->bf_buf_addr,
+ sc->rx.bufsize, DMA_FROM_DEVICE);
dev_kfree_skb(skb);
}
}
@@ -478,6 +473,159 @@ void ath_flushrecv(struct ath_softc *sc)
spin_unlock_bh(&sc->rx.rxflushlock);
}
+static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
+{
+ /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
+ struct ieee80211_mgmt *mgmt;
+ u8 *pos, *end, id, elen;
+ struct ieee80211_tim_ie *tim;
+
+ mgmt = (struct ieee80211_mgmt *)skb->data;
+ pos = mgmt->u.beacon.variable;
+ end = skb->data + skb->len;
+
+ while (pos + 2 < end) {
+ id = *pos++;
+ elen = *pos++;
+ if (pos + elen > end)
+ break;
+
+ if (id == WLAN_EID_TIM) {
+ if (elen < sizeof(*tim))
+ break;
+ tim = (struct ieee80211_tim_ie *) pos;
+ if (tim->dtim_count != 0)
+ break;
+ return tim->bitmap_ctrl & 0x01;
+ }
+
+ pos += elen;
+ }
+
+ return false;
+}
+
+static void ath_rx_ps_back_to_sleep(struct ath_softc *sc)
+{
+ sc->sc_flags &= ~(SC_OP_WAIT_FOR_BEACON | SC_OP_WAIT_FOR_CAB);
+}
+
+static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
+{
+ struct ieee80211_mgmt *mgmt;
+
+ if (skb->len < 24 + 8 + 2 + 2)
+ return;
+
+ mgmt = (struct ieee80211_mgmt *)skb->data;
+ if (memcmp(sc->curbssid, mgmt->bssid, ETH_ALEN) != 0)
+ return; /* not from our current AP */
+
+ if (sc->sc_flags & SC_OP_BEACON_SYNC) {
+ sc->sc_flags &= ~SC_OP_BEACON_SYNC;
+ DPRINTF(sc, ATH_DBG_PS, "Reconfigure Beacon timers based on "
+ "timestamp from the AP\n");
+ ath_beacon_config(sc, NULL);
+ }
+
+ if (!(sc->hw->conf.flags & IEEE80211_CONF_PS)) {
+ /* We are not in PS mode anymore; remain awake */
+ DPRINTF(sc, ATH_DBG_PS, "Not in PS mode anymore, remain "
+ "awake\n");
+ sc->sc_flags &= ~(SC_OP_WAIT_FOR_BEACON | SC_OP_WAIT_FOR_CAB);
+ return;
+ }
+
+ if (ath_beacon_dtim_pending_cab(skb)) {
+ /*
+ * Remain awake waiting for buffered broadcast/multicast
+ * frames.
+ */
+ DPRINTF(sc, ATH_DBG_PS, "Received DTIM beacon indicating "
+ "buffered broadcast/multicast frame(s)\n");
+ sc->sc_flags |= SC_OP_WAIT_FOR_CAB;
+ return;
+ }
+
+ if (sc->sc_flags & SC_OP_WAIT_FOR_CAB) {
+ /*
+ * This can happen if a broadcast frame is dropped or the AP
+ * fails to send a frame indicating that all CAB frames have
+ * been delivered.
+ */
+ DPRINTF(sc, ATH_DBG_PS, "PS wait for CAB frames timed out\n");
+ }
+
+ /* No more broadcast/multicast frames to be received at this point. */
+ ath_rx_ps_back_to_sleep(sc);
+}
+
+static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+
+ /* Process Beacon and CAB receive in PS state */
+ if ((sc->sc_flags & SC_OP_WAIT_FOR_BEACON) &&
+ ieee80211_is_beacon(hdr->frame_control))
+ ath_rx_ps_beacon(sc, skb);
+ else if ((sc->sc_flags & SC_OP_WAIT_FOR_CAB) &&
+ (ieee80211_is_data(hdr->frame_control) ||
+ ieee80211_is_action(hdr->frame_control)) &&
+ is_multicast_ether_addr(hdr->addr1) &&
+ !ieee80211_has_moredata(hdr->frame_control)) {
+ DPRINTF(sc, ATH_DBG_PS, "All PS CAB frames received, back to "
+ "sleep\n");
+ /*
+ * No more broadcast/multicast frames to be received at this
+ * point.
+ */
+ ath_rx_ps_back_to_sleep(sc);
+ } else if ((sc->sc_flags & SC_OP_WAIT_FOR_PSPOLL_DATA) &&
+ !is_multicast_ether_addr(hdr->addr1) &&
+ !ieee80211_has_morefrags(hdr->frame_control)) {
+ sc->sc_flags &= ~SC_OP_WAIT_FOR_PSPOLL_DATA;
+ DPRINTF(sc, ATH_DBG_PS, "Going back to sleep after having "
+ "received PS-Poll data (0x%x)\n",
+ sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
+ SC_OP_WAIT_FOR_CAB |
+ SC_OP_WAIT_FOR_PSPOLL_DATA |
+ SC_OP_WAIT_FOR_TX_ACK));
+ }
+}
+
+static void ath_rx_send_to_mac80211(struct ath_softc *sc, struct sk_buff *skb,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ieee80211_hdr *hdr;
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+
+ /* Send the frame to mac80211 */
+ if (is_multicast_ether_addr(hdr->addr1)) {
+ int i;
+ /*
+ * Deliver broadcast/multicast frames to all suitable
+ * virtual wiphys.
+ */
+ /* TODO: filter based on channel configuration */
+ for (i = 0; i < sc->num_sec_wiphy; i++) {
+ struct ath_wiphy *aphy = sc->sec_wiphy[i];
+ struct sk_buff *nskb;
+ if (aphy == NULL)
+ continue;
+ nskb = skb_copy(skb, GFP_ATOMIC);
+ if (nskb)
+ __ieee80211_rx(aphy->hw, nskb, rx_status);
+ }
+ __ieee80211_rx(sc->hw, skb, rx_status);
+ } else {
+ /* Deliver unicast frames based on receiver address */
+ __ieee80211_rx(ath_get_virt_hw(sc, hdr), skb, rx_status);
+ }
+}
+
int ath_rx_tasklet(struct ath_softc *sc, int flush)
{
#define PA2DESC(_sc, _pa) \
@@ -627,7 +775,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error) {
rx_status.flag |= RX_FLAG_DECRYPTED;
- } else if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_PROTECTED)
+ } else if (ieee80211_has_protected(fc)
&& !decrypt_error && skb->len >= hdrlen + 4) {
keyix = skb->data[hdrlen + 3] >> 6;
@@ -636,36 +784,11 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
}
if (ah->sw_mgmt_crypto &&
(rx_status.flag & RX_FLAG_DECRYPTED) &&
- ieee80211_is_mgmt(hdr->frame_control)) {
+ ieee80211_is_mgmt(fc)) {
/* Use software decrypt for management frames. */
rx_status.flag &= ~RX_FLAG_DECRYPTED;
}
- /* Send the frame to mac80211 */
- if (hdr->addr1[5] & 0x01) {
- int i;
- /*
- * Deliver broadcast/multicast frames to all suitable
- * virtual wiphys.
- */
- /* TODO: filter based on channel configuration */
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- struct ath_wiphy *aphy = sc->sec_wiphy[i];
- struct sk_buff *nskb;
- if (aphy == NULL)
- continue;
- nskb = skb_copy(skb, GFP_ATOMIC);
- if (nskb)
- __ieee80211_rx(aphy->hw, nskb,
- &rx_status);
- }
- __ieee80211_rx(sc->hw, skb, &rx_status);
- } else {
- /* Deliver unicast frames based on receiver address */
- __ieee80211_rx(ath_get_virt_hw(sc, hdr), skb,
- &rx_status);
- }
-
/* We will now give hardware our shiny new allocated skb */
bf->bf_mpdu = requeue_skb;
bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
@@ -675,8 +798,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
bf->bf_buf_addr))) {
dev_kfree_skb_any(requeue_skb);
bf->bf_mpdu = NULL;
- DPRINTF(sc, ATH_DBG_CONFIG,
+ DPRINTF(sc, ATH_DBG_FATAL,
"dma_mapping_error() on RX\n");
+ ath_rx_send_to_mac80211(sc, skb, &rx_status);
break;
}
bf->bf_dmacontext = bf->bf_buf_addr;
@@ -692,11 +816,12 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
sc->rx.rxotherant = 0;
}
- if (ieee80211_is_beacon(fc) &&
- (sc->sc_flags & SC_OP_WAIT_FOR_BEACON)) {
- sc->sc_flags &= ~SC_OP_WAIT_FOR_BEACON;
- ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_NETWORK_SLEEP);
- }
+ if (unlikely(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
+ SC_OP_WAIT_FOR_PSPOLL_DATA)))
+ ath_rx_ps(sc, skb);
+
+ ath_rx_send_to_mac80211(sc, skb, &rx_status);
+
requeue:
list_move_tail(&bf->list, &sc->rx.rxbuf);
ath_rx_buf_link(sc, bf);
diff --git a/drivers/net/wireless/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 52605246679..52605246679 100644
--- a/drivers/net/wireless/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
diff --git a/drivers/net/wireless/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c
index 1ff429b027d..1ff429b027d 100644
--- a/drivers/net/wireless/ath9k/virtual.c
+++ b/drivers/net/wireless/ath/ath9k/virtual.c
diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 689bdbf7880..b61a071788a 100644
--- a/drivers/net/wireless/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -283,7 +283,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
bool rc_update = true;
- skb = (struct sk_buff *)bf->bf_mpdu;
+ skb = bf->bf_mpdu;
hdr = (struct ieee80211_hdr *)skb->data;
rcu_read_lock();
@@ -380,8 +380,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar);
} else {
/* retry the un-acked ones */
- if (bf->bf_next == NULL &&
- bf_last->bf_status & ATH_BUFSTATUS_STALE) {
+ if (bf->bf_next == NULL && bf_last->bf_stale) {
struct ath_buf *tbf;
tbf = ath_clone_txbuf(sc, bf_last);
@@ -435,7 +434,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
struct ath_atx_tid *tid)
{
- struct ath_rate_table *rate_table = sc->cur_rate_table;
+ const struct ath_rate_table *rate_table = sc->cur_rate_table;
struct sk_buff *skb;
struct ieee80211_tx_info *tx_info;
struct ieee80211_tx_rate *rates;
@@ -444,7 +443,7 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
u16 aggr_limit, legacy = 0, maxampdu;
int i;
- skb = (struct sk_buff *)bf->bf_mpdu;
+ skb = bf->bf_mpdu;
tx_info = IEEE80211_SKB_CB(skb);
rates = tx_info->control.rates;
tx_info_priv = (struct ath_tx_info_priv *)tx_info->rate_driver_data[0];
@@ -498,7 +497,7 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
struct ath_buf *bf, u16 frmlen)
{
- struct ath_rate_table *rt = sc->cur_rate_table;
+ const struct ath_rate_table *rt = sc->cur_rate_table;
struct sk_buff *skb = bf->bf_mpdu;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
u32 nsymbits, nsymbols, mpdudensity;
@@ -712,6 +711,7 @@ int ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
return 0;
if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
+ txtid->state &= ~AGGR_ADDBA_PROGRESS;
txtid->addba_exchangeattempts = 0;
return 0;
}
@@ -972,7 +972,7 @@ int ath_cabq_update(struct ath_softc *sc)
else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
- qi.tqi_readyTime = (sc->hw->conf.beacon_int *
+ qi.tqi_readyTime = (sc->beacon_interval *
sc->config.cabqReadytime) / 100;
ath_txq_update(sc, qnum, &qi);
@@ -1004,7 +1004,7 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
- if (bf->bf_status & ATH_BUFSTATUS_STALE) {
+ if (bf->bf_stale) {
list_del(&bf->list);
spin_unlock_bh(&txq->axq_lock);
@@ -1071,7 +1071,7 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
r = ath9k_hw_reset(ah, sc->sc_ah->curchan, true);
if (r)
DPRINTF(sc, ATH_DBG_FATAL,
- "Unable to reset hardware; reset status %u\n",
+ "Unable to reset hardware; reset status %d\n",
r);
spin_unlock_bh(&sc->sc_resetlock);
}
@@ -1408,7 +1408,7 @@ static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb,
static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
int width, int half_gi, bool shortPreamble)
{
- struct ath_rate_table *rate_table = sc->cur_rate_table;
+ const struct ath_rate_table *rate_table = sc->cur_rate_table;
u32 nbits, nsymbits, duration, nsymbols;
u8 rc;
int streams, pktlen;
@@ -1440,7 +1440,7 @@ static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
{
- struct ath_rate_table *rt = sc->cur_rate_table;
+ const struct ath_rate_table *rt = sc->cur_rate_table;
struct ath9k_11n_rate_series series[4];
struct sk_buff *skb;
struct ieee80211_tx_info *tx_info;
@@ -1452,7 +1452,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
- skb = (struct sk_buff *)bf->bf_mpdu;
+ skb = bf->bf_mpdu;
tx_info = IEEE80211_SKB_CB(skb);
rates = tx_info->control.rates;
hdr = (struct ieee80211_hdr *)skb->data;
@@ -1573,8 +1573,9 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
skb->len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(sc->dev, bf->bf_dmacontext))) {
bf->bf_mpdu = NULL;
- DPRINTF(sc, ATH_DBG_CONFIG,
- "dma_mapping_error() on TX\n");
+ kfree(tx_info_priv);
+ tx_info->rate_driver_data[0] = NULL;
+ DPRINTF(sc, ATH_DBG_FATAL, "dma_mapping_error() on TX\n");
return -ENOMEM;
}
@@ -1586,7 +1587,7 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_control *txctl)
{
- struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
+ struct sk_buff *skb = bf->bf_mpdu;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ath_node *an = NULL;
@@ -1790,6 +1791,16 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
skb_pull(skb, padsize);
}
+ if (sc->sc_flags & SC_OP_WAIT_FOR_TX_ACK) {
+ sc->sc_flags &= ~SC_OP_WAIT_FOR_TX_ACK;
+ DPRINTF(sc, ATH_DBG_PS, "Going back to sleep after having "
+ "received TX status (0x%x)\n",
+ sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
+ SC_OP_WAIT_FOR_CAB |
+ SC_OP_WAIT_FOR_PSPOLL_DATA |
+ SC_OP_WAIT_FOR_TX_ACK));
+ }
+
if (frame_type == ATH9K_NOT_INTERNAL)
ieee80211_tx_status(hw, skb);
else
@@ -1860,7 +1871,7 @@ static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds,
int nbad, int txok, bool update_rc)
{
- struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
+ struct sk_buff *skb = bf->bf_mpdu;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
@@ -1941,7 +1952,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
* it with the STALE flag.
*/
bf_held = NULL;
- if (bf->bf_status & ATH_BUFSTATUS_STALE) {
+ if (bf->bf_stale) {
bf_held = bf;
if (list_is_last(&bf_held->list, &txq->axq_q)) {
txq->axq_link = NULL;
@@ -1982,7 +1993,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
* however leave the last descriptor back as the holding
* descriptor for hw.
*/
- lastbf->bf_status |= ATH_BUFSTATUS_STALE;
+ lastbf->bf_stale = true;
INIT_LIST_HEAD(&bf_head);
if (!list_is_singular(&lastbf->list))
list_cut_position(&bf_head,
@@ -2048,44 +2059,38 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
{
int error = 0;
- do {
- spin_lock_init(&sc->tx.txbuflock);
-
- error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
- "tx", nbufs, 1);
- if (error != 0) {
- DPRINTF(sc, ATH_DBG_FATAL,
- "Failed to allocate tx descriptors: %d\n",
- error);
- break;
- }
+ spin_lock_init(&sc->tx.txbuflock);
- error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
- "beacon", ATH_BCBUF, 1);
- if (error != 0) {
- DPRINTF(sc, ATH_DBG_FATAL,
- "Failed to allocate beacon descriptors: %d\n",
- error);
- break;
- }
+ error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
+ "tx", nbufs, 1);
+ if (error != 0) {
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "Failed to allocate tx descriptors: %d\n", error);
+ goto err;
+ }
- } while (0);
+ error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
+ "beacon", ATH_BCBUF, 1);
+ if (error != 0) {
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "Failed to allocate beacon descriptors: %d\n", error);
+ goto err;
+ }
+err:
if (error != 0)
ath_tx_cleanup(sc);
return error;
}
-int ath_tx_cleanup(struct ath_softc *sc)
+void ath_tx_cleanup(struct ath_softc *sc)
{
if (sc->beacon.bdma.dd_desc_len != 0)
ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
if (sc->tx.txdma.dd_desc_len != 0)
ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
-
- return 0;
}
void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
diff --git a/drivers/net/wireless/ath/main.c b/drivers/net/wireless/ath/main.c
new file mode 100644
index 00000000000..9949b11cb15
--- /dev/null
+++ b/drivers/net/wireless/ath/main.c
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+MODULE_AUTHOR("Atheros Communications");
+MODULE_DESCRIPTION("Shared library for Atheros wireless LAN cards.");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath9k/regd.c b/drivers/net/wireless/ath/regd.c
index 4ca62510229..eef370bd121 100644
--- a/drivers/net/wireless/ath9k/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -16,7 +16,9 @@
#include <linux/kernel.h>
#include <linux/slab.h>
-#include "ath9k.h"
+#include <net/cfg80211.h>
+#include <net/mac80211.h>
+#include "regd.h"
#include "regd_common.h"
/*
@@ -55,7 +57,7 @@
/* Can be used for:
* 0x60, 0x61, 0x62 */
-static const struct ieee80211_regdomain ath9k_world_regdom_60_61_62 = {
+static const struct ieee80211_regdomain ath_world_regdom_60_61_62 = {
.n_reg_rules = 5,
.alpha2 = "99",
.reg_rules = {
@@ -65,7 +67,7 @@ static const struct ieee80211_regdomain ath9k_world_regdom_60_61_62 = {
};
/* Can be used by 0x63 and 0x65 */
-static const struct ieee80211_regdomain ath9k_world_regdom_63_65 = {
+static const struct ieee80211_regdomain ath_world_regdom_63_65 = {
.n_reg_rules = 4,
.alpha2 = "99",
.reg_rules = {
@@ -76,7 +78,7 @@ static const struct ieee80211_regdomain ath9k_world_regdom_63_65 = {
};
/* Can be used by 0x64 only */
-static const struct ieee80211_regdomain ath9k_world_regdom_64 = {
+static const struct ieee80211_regdomain ath_world_regdom_64 = {
.n_reg_rules = 3,
.alpha2 = "99",
.reg_rules = {
@@ -86,7 +88,7 @@ static const struct ieee80211_regdomain ath9k_world_regdom_64 = {
};
/* Can be used by 0x66 and 0x69 */
-static const struct ieee80211_regdomain ath9k_world_regdom_66_69 = {
+static const struct ieee80211_regdomain ath_world_regdom_66_69 = {
.n_reg_rules = 3,
.alpha2 = "99",
.reg_rules = {
@@ -96,7 +98,7 @@ static const struct ieee80211_regdomain ath9k_world_regdom_66_69 = {
};
/* Can be used by 0x67, 0x6A and 0x68 */
-static const struct ieee80211_regdomain ath9k_world_regdom_67_68_6A = {
+static const struct ieee80211_regdomain ath_world_regdom_67_68_6A = {
.n_reg_rules = 4,
.alpha2 = "99",
.reg_rules = {
@@ -112,49 +114,51 @@ static inline bool is_wwr_sku(u16 regd)
(regd == WORLD);
}
-static u16 ath9k_regd_get_eepromRD(struct ath_hw *ah)
+static u16 ath_regd_get_eepromRD(struct ath_regulatory *reg)
{
- return ah->regulatory.current_rd & ~WORLDWIDE_ROAMING_FLAG;
+ return reg->current_rd & ~WORLDWIDE_ROAMING_FLAG;
}
-bool ath9k_is_world_regd(struct ath_hw *ah)
+bool ath_is_world_regd(struct ath_regulatory *reg)
{
- return is_wwr_sku(ath9k_regd_get_eepromRD(ah));
+ return is_wwr_sku(ath_regd_get_eepromRD(reg));
}
+EXPORT_SYMBOL(ath_is_world_regd);
-const struct ieee80211_regdomain *ath9k_default_world_regdomain(void)
+static const struct ieee80211_regdomain *ath_default_world_regdomain(void)
{
/* this is the most restrictive */
- return &ath9k_world_regdom_64;
+ return &ath_world_regdom_64;
}
-const struct ieee80211_regdomain *ath9k_world_regdomain(struct ath_hw *ah)
+static const struct
+ieee80211_regdomain *ath_world_regdomain(struct ath_regulatory *reg)
{
- switch (ah->regulatory.regpair->regDmnEnum) {
+ switch (reg->regpair->regDmnEnum) {
case 0x60:
case 0x61:
case 0x62:
- return &ath9k_world_regdom_60_61_62;
+ return &ath_world_regdom_60_61_62;
case 0x63:
case 0x65:
- return &ath9k_world_regdom_63_65;
+ return &ath_world_regdom_63_65;
case 0x64:
- return &ath9k_world_regdom_64;
+ return &ath_world_regdom_64;
case 0x66:
case 0x69:
- return &ath9k_world_regdom_66_69;
+ return &ath_world_regdom_66_69;
case 0x67:
case 0x68:
case 0x6A:
- return &ath9k_world_regdom_67_68_6A;
+ return &ath_world_regdom_67_68_6A;
default:
WARN_ON(1);
- return ath9k_default_world_regdomain();
+ return ath_default_world_regdomain();
}
}
/* Frequency is one where radar detection is required */
-static bool ath9k_is_radar_freq(u16 center_freq)
+static bool ath_is_radar_freq(u16 center_freq)
{
return (center_freq >= 5260 && center_freq <= 5700);
}
@@ -168,9 +172,9 @@ static bool ath9k_is_radar_freq(u16 center_freq)
* received a beacon on a channel we can enable active scan and
* adhoc (or beaconing).
*/
-static void ath9k_reg_apply_beaconing_flags(
- struct wiphy *wiphy,
- enum nl80211_reg_initiator initiator)
+static void
+ath_reg_apply_beaconing_flags(struct wiphy *wiphy,
+ enum nl80211_reg_initiator initiator)
{
enum ieee80211_band band;
struct ieee80211_supported_band *sband;
@@ -191,13 +195,15 @@ static void ath9k_reg_apply_beaconing_flags(
ch = &sband->channels[i];
- if (ath9k_is_radar_freq(ch->center_freq) ||
+ if (ath_is_radar_freq(ch->center_freq) ||
(ch->flags & IEEE80211_CHAN_RADAR))
continue;
if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) {
- r = freq_reg_info(wiphy, ch->center_freq,
- &bandwidth, &reg_rule);
+ r = freq_reg_info(wiphy,
+ ch->center_freq,
+ bandwidth,
+ &reg_rule);
if (r)
continue;
/*
@@ -227,9 +233,9 @@ static void ath9k_reg_apply_beaconing_flags(
}
/* Allows active scan scan on Ch 12 and 13 */
-static void ath9k_reg_apply_active_scan_flags(
- struct wiphy *wiphy,
- enum nl80211_reg_initiator initiator)
+static void
+ath_reg_apply_active_scan_flags(struct wiphy *wiphy,
+ enum nl80211_reg_initiator initiator)
{
struct ieee80211_supported_band *sband;
struct ieee80211_channel *ch;
@@ -261,7 +267,7 @@ static void ath9k_reg_apply_active_scan_flags(
*/
ch = &sband->channels[11]; /* CH 12 */
- r = freq_reg_info(wiphy, ch->center_freq, &bandwidth, &reg_rule);
+ r = freq_reg_info(wiphy, ch->center_freq, bandwidth, &reg_rule);
if (!r) {
if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
@@ -269,7 +275,7 @@ static void ath9k_reg_apply_active_scan_flags(
}
ch = &sband->channels[12]; /* CH 13 */
- r = freq_reg_info(wiphy, ch->center_freq, &bandwidth, &reg_rule);
+ r = freq_reg_info(wiphy, ch->center_freq, bandwidth, &reg_rule);
if (!r) {
if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
@@ -278,7 +284,7 @@ static void ath9k_reg_apply_active_scan_flags(
}
/* Always apply Radar/DFS rules on freq range 5260 MHz - 5700 MHz */
-void ath9k_reg_apply_radar_flags(struct wiphy *wiphy)
+static void ath_reg_apply_radar_flags(struct wiphy *wiphy)
{
struct ieee80211_supported_band *sband;
struct ieee80211_channel *ch;
@@ -291,7 +297,7 @@ void ath9k_reg_apply_radar_flags(struct wiphy *wiphy)
for (i = 0; i < sband->n_channels; i++) {
ch = &sband->channels[i];
- if (!ath9k_is_radar_freq(ch->center_freq))
+ if (!ath_is_radar_freq(ch->center_freq))
continue;
/* We always enable radar detection/DFS on this
* frequency range. Additionally we also apply on
@@ -310,37 +316,31 @@ void ath9k_reg_apply_radar_flags(struct wiphy *wiphy)
}
}
-void ath9k_reg_apply_world_flags(struct wiphy *wiphy,
- enum nl80211_reg_initiator initiator)
+static void ath_reg_apply_world_flags(struct wiphy *wiphy,
+ enum nl80211_reg_initiator initiator,
+ struct ath_regulatory *reg)
{
- struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
- struct ath_hw *ah = sc->sc_ah;
-
- switch (ah->regulatory.regpair->regDmnEnum) {
+ switch (reg->regpair->regDmnEnum) {
case 0x60:
case 0x63:
case 0x66:
case 0x67:
- ath9k_reg_apply_beaconing_flags(wiphy, initiator);
+ ath_reg_apply_beaconing_flags(wiphy, initiator);
break;
case 0x68:
- ath9k_reg_apply_beaconing_flags(wiphy, initiator);
- ath9k_reg_apply_active_scan_flags(wiphy, initiator);
+ ath_reg_apply_beaconing_flags(wiphy, initiator);
+ ath_reg_apply_active_scan_flags(wiphy, initiator);
break;
}
return;
}
-int ath9k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
+int ath_reg_notifier_apply(struct wiphy *wiphy,
+ struct regulatory_request *request,
+ struct ath_regulatory *reg)
{
- struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
-
/* We always apply this */
- ath9k_reg_apply_radar_flags(wiphy);
+ ath_reg_apply_radar_flags(wiphy);
switch (request->initiator) {
case NL80211_REGDOM_SET_BY_DRIVER:
@@ -348,39 +348,47 @@ int ath9k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
case NL80211_REGDOM_SET_BY_USER:
break;
case NL80211_REGDOM_SET_BY_COUNTRY_IE:
- if (ath9k_is_world_regd(sc->sc_ah))
- ath9k_reg_apply_world_flags(wiphy, request->initiator);
+ if (ath_is_world_regd(reg))
+ ath_reg_apply_world_flags(wiphy, request->initiator,
+ reg);
break;
}
return 0;
}
+EXPORT_SYMBOL(ath_reg_notifier_apply);
-bool ath9k_regd_is_eeprom_valid(struct ath_hw *ah)
+static bool ath_regd_is_eeprom_valid(struct ath_regulatory *reg)
{
- u16 rd = ath9k_regd_get_eepromRD(ah);
+ u16 rd = ath_regd_get_eepromRD(reg);
int i;
if (rd & COUNTRY_ERD_FLAG) {
/* EEPROM value is a country code */
u16 cc = rd & ~COUNTRY_ERD_FLAG;
+ printk(KERN_DEBUG
+ "ath: EEPROM indicates we should expect "
+ "a country code\n");
for (i = 0; i < ARRAY_SIZE(allCountries); i++)
if (allCountries[i].countryCode == cc)
return true;
} else {
/* EEPROM value is a regpair value */
+ if (rd != CTRY_DEFAULT)
+ printk(KERN_DEBUG "ath: EEPROM indicates we "
+ "should expect a direct regpair map\n");
for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++)
if (regDomainPairs[i].regDmnEnum == rd)
return true;
}
- DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
- "invalid regulatory domain/country code 0x%x\n", rd);
+ printk(KERN_DEBUG
+ "ath: invalid regulatory domain/country code 0x%x\n", rd);
return false;
}
/* EEPROM country code to regpair mapping */
static struct country_code_to_enum_rd*
-ath9k_regd_find_country(u16 countryCode)
+ath_regd_find_country(u16 countryCode)
{
int i;
@@ -393,7 +401,7 @@ ath9k_regd_find_country(u16 countryCode)
/* EEPROM rd code to regpair mapping */
static struct country_code_to_enum_rd*
-ath9k_regd_find_country_by_rd(int regdmn)
+ath_regd_find_country_by_rd(int regdmn)
{
int i;
@@ -405,13 +413,13 @@ ath9k_regd_find_country_by_rd(int regdmn)
}
/* Returns the map of the EEPROM set RD to a country code */
-static u16 ath9k_regd_get_default_country(u16 rd)
+static u16 ath_regd_get_default_country(u16 rd)
{
if (rd & COUNTRY_ERD_FLAG) {
struct country_code_to_enum_rd *country = NULL;
u16 cc = rd & ~COUNTRY_ERD_FLAG;
- country = ath9k_regd_find_country(cc);
+ country = ath_regd_find_country(cc);
if (country != NULL)
return cc;
}
@@ -420,7 +428,7 @@ static u16 ath9k_regd_get_default_country(u16 rd)
}
static struct reg_dmn_pair_mapping*
-ath9k_get_regpair(int regdmn)
+ath_get_regpair(int regdmn)
{
int i;
@@ -433,87 +441,135 @@ ath9k_get_regpair(int regdmn)
return NULL;
}
-int ath9k_regd_init(struct ath_hw *ah)
+static int
+ath_regd_init_wiphy(struct ath_regulatory *reg,
+ struct wiphy *wiphy,
+ int (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request))
+{
+ const struct ieee80211_regdomain *regd;
+
+ wiphy->reg_notifier = reg_notifier;
+ wiphy->strict_regulatory = true;
+
+ if (ath_is_world_regd(reg)) {
+ /*
+ * Anything applied here (prior to wiphy registration) gets
+ * saved on the wiphy orig_* parameters
+ */
+ regd = ath_world_regdomain(reg);
+ wiphy->custom_regulatory = true;
+ wiphy->strict_regulatory = false;
+ } else {
+ /*
+ * This gets applied in the case of the absense of CRDA,
+ * it's our own custom world regulatory domain, similar to
+ * cfg80211's but we enable passive scanning.
+ */
+ regd = ath_default_world_regdomain();
+ }
+ wiphy_apply_custom_regulatory(wiphy, regd);
+ ath_reg_apply_radar_flags(wiphy);
+ ath_reg_apply_world_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER, reg);
+ return 0;
+}
+
+int
+ath_regd_init(struct ath_regulatory *reg,
+ struct wiphy *wiphy,
+ int (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request))
{
struct country_code_to_enum_rd *country = NULL;
u16 regdmn;
- if (!ath9k_regd_is_eeprom_valid(ah)) {
- DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
- "Invalid EEPROM contents\n");
+ if (!reg)
+ return -EINVAL;
+
+ printk(KERN_DEBUG "ath: EEPROM regdomain: 0x%0x\n", reg->current_rd);
+
+ if (!ath_regd_is_eeprom_valid(reg)) {
+ printk(KERN_ERR "ath: Invalid EEPROM contents\n");
return -EINVAL;
}
- regdmn = ath9k_regd_get_eepromRD(ah);
- ah->regulatory.country_code = ath9k_regd_get_default_country(regdmn);
+ regdmn = ath_regd_get_eepromRD(reg);
+ reg->country_code = ath_regd_get_default_country(regdmn);
- if (ah->regulatory.country_code == CTRY_DEFAULT &&
- regdmn == CTRY_DEFAULT)
- ah->regulatory.country_code = CTRY_UNITED_STATES;
+ if (reg->country_code == CTRY_DEFAULT &&
+ regdmn == CTRY_DEFAULT) {
+ printk(KERN_DEBUG "ath: EEPROM indicates default "
+ "country code should be used\n");
+ reg->country_code = CTRY_UNITED_STATES;
+ }
- if (ah->regulatory.country_code == CTRY_DEFAULT) {
+ if (reg->country_code == CTRY_DEFAULT) {
country = NULL;
} else {
- country = ath9k_regd_find_country(ah->regulatory.country_code);
+ printk(KERN_DEBUG "ath: doing EEPROM country->regdmn "
+ "map search\n");
+ country = ath_regd_find_country(reg->country_code);
if (country == NULL) {
- DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
- "Country is NULL!!!!, cc= %d\n",
- ah->regulatory.country_code);
+ printk(KERN_DEBUG
+ "ath: no valid country maps found for "
+ "country code: 0x%0x\n",
+ reg->country_code);
return -EINVAL;
- } else
+ } else {
regdmn = country->regDmnEnum;
+ printk(KERN_DEBUG "ath: country maps to "
+ "regdmn code: 0x%0x\n",
+ regdmn);
+ }
}
- ah->regulatory.regpair = ath9k_get_regpair(regdmn);
+ reg->regpair = ath_get_regpair(regdmn);
- if (!ah->regulatory.regpair) {
- DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
+ if (!reg->regpair) {
+ printk(KERN_DEBUG "ath: "
"No regulatory domain pair found, cannot continue\n");
return -EINVAL;
}
if (!country)
- country = ath9k_regd_find_country_by_rd(regdmn);
+ country = ath_regd_find_country_by_rd(regdmn);
if (country) {
- ah->regulatory.alpha2[0] = country->isoName[0];
- ah->regulatory.alpha2[1] = country->isoName[1];
+ reg->alpha2[0] = country->isoName[0];
+ reg->alpha2[1] = country->isoName[1];
} else {
- ah->regulatory.alpha2[0] = '0';
- ah->regulatory.alpha2[1] = '0';
+ reg->alpha2[0] = '0';
+ reg->alpha2[1] = '0';
}
- DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
- "Country alpha2 being used: %c%c\n"
- "Regulatory.Regpair detected: 0x%0x\n",
- ah->regulatory.alpha2[0], ah->regulatory.alpha2[1],
- ah->regulatory.regpair->regDmnEnum);
+ printk(KERN_DEBUG "ath: Country alpha2 being used: %c%c\n",
+ reg->alpha2[0], reg->alpha2[1]);
+ printk(KERN_DEBUG "ath: Regpair used: 0x%0x\n",
+ reg->regpair->regDmnEnum);
+ ath_regd_init_wiphy(reg, wiphy, reg_notifier);
return 0;
}
+EXPORT_SYMBOL(ath_regd_init);
-u32 ath9k_regd_get_ctl(struct ath_hw *ah, struct ath9k_channel *chan)
+u32 ath_regd_get_band_ctl(struct ath_regulatory *reg,
+ enum ieee80211_band band)
{
- u32 ctl = NO_CTL;
-
- if (!ah->regulatory.regpair ||
- (ah->regulatory.country_code == CTRY_DEFAULT &&
- is_wwr_sku(ath9k_regd_get_eepromRD(ah)))) {
- if (IS_CHAN_B(chan))
- ctl = SD_NO_CTL | CTL_11B;
- else if (IS_CHAN_G(chan))
- ctl = SD_NO_CTL | CTL_11G;
- else
- ctl = SD_NO_CTL | CTL_11A;
- return ctl;
+ if (!reg->regpair ||
+ (reg->country_code == CTRY_DEFAULT &&
+ is_wwr_sku(ath_regd_get_eepromRD(reg)))) {
+ return SD_NO_CTL;
}
- if (IS_CHAN_B(chan))
- ctl = ah->regulatory.regpair->reg_2ghz_ctl | CTL_11B;
- else if (IS_CHAN_G(chan))
- ctl = ah->regulatory.regpair->reg_2ghz_ctl | CTL_11G;
- else
- ctl = ah->regulatory.regpair->reg_5ghz_ctl | CTL_11A;
+ switch (band) {
+ case IEEE80211_BAND_2GHZ:
+ return reg->regpair->reg_2ghz_ctl;
+ case IEEE80211_BAND_5GHZ:
+ return reg->regpair->reg_5ghz_ctl;
+ default:
+ return NO_CTL;
+ }
- return ctl;
+ return NO_CTL;
}
+EXPORT_SYMBOL(ath_regd_get_band_ctl);
diff --git a/drivers/net/wireless/ath9k/regd.h b/drivers/net/wireless/ath/regd.h
index 9f5fbd4eea7..07291ccb23f 100644
--- a/drivers/net/wireless/ath9k/regd.h
+++ b/drivers/net/wireless/ath/regd.h
@@ -17,6 +17,25 @@
#ifndef REGD_H
#define REGD_H
+#include <linux/nl80211.h>
+
+#include <net/cfg80211.h>
+
+#define NO_CTL 0xff
+#define SD_NO_CTL 0xE0
+#define NO_CTL 0xff
+#define CTL_MODE_M 7
+#define CTL_11A 0
+#define CTL_11B 1
+#define CTL_11G 2
+#define CTL_2GHT20 5
+#define CTL_5GHT20 6
+#define CTL_2GHT40 7
+#define CTL_5GHT40 8
+
+#define CTRY_DEBUG 0x1ff
+#define CTRY_DEFAULT 0
+
#define COUNTRY_ERD_FLAG 0x8000
#define WORLDWIDE_ROAMING_FLAG 0x4000
@@ -40,7 +59,7 @@ struct country_code_to_enum_rd {
const char *isoName;
};
-struct ath9k_regulatory {
+struct ath_regulatory {
char alpha2[2];
u16 country_code;
u16 max_power_level;
@@ -233,15 +252,14 @@ enum CountryCode {
CTRY_BELGIUM2 = 5002
};
-bool ath9k_is_world_regd(struct ath_hw *ah);
-const struct ieee80211_regdomain *ath9k_world_regdomain(struct ath_hw *ah);
-const struct ieee80211_regdomain *ath9k_default_world_regdomain(void);
-void ath9k_reg_apply_world_flags(struct wiphy *wiphy,
- enum nl80211_reg_initiator initiator);
-void ath9k_reg_apply_radar_flags(struct wiphy *wiphy);
-int ath9k_regd_init(struct ath_hw *ah);
-bool ath9k_regd_is_eeprom_valid(struct ath_hw *ah);
-u32 ath9k_regd_get_ctl(struct ath_hw *ah, struct ath9k_channel *chan);
-int ath9k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request);
+bool ath_is_world_regd(struct ath_regulatory *reg);
+int ath_regd_init(struct ath_regulatory *reg, struct wiphy *wiphy,
+ int (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request));
+u32 ath_regd_get_band_ctl(struct ath_regulatory *reg,
+ enum ieee80211_band band);
+int ath_reg_notifier_apply(struct wiphy *wiphy,
+ struct regulatory_request *request,
+ struct ath_regulatory *reg);
#endif
diff --git a/drivers/net/wireless/ath9k/regd_common.h b/drivers/net/wireless/ath/regd_common.h
index 4d0e298cd1c..4d0e298cd1c 100644
--- a/drivers/net/wireless/ath9k/regd_common.h
+++ b/drivers/net/wireless/ath/regd_common.h
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 27eef8fb710..291a94bd46f 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -818,7 +818,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&priv->irqlock, flags);
spin_unlock_bh(&priv->timerlock);
netif_stop_queue(dev);
- return 1;
+ return NETDEV_TX_BUSY;
}
frame_ctl = IEEE80211_FTYPE_DATA;
diff --git a/drivers/net/wireless/atmel_cs.c b/drivers/net/wireless/atmel_cs.c
index 77406245dc7..ddaa859c349 100644
--- a/drivers/net/wireless/atmel_cs.c
+++ b/drivers/net/wireless/atmel_cs.c
@@ -279,7 +279,7 @@ static int atmel_config(struct pcmcia_device *link)
struct pcmcia_device_id *did;
dev = link->priv;
- did = handle_to_dev(link).driver_data;
+ did = dev_get_drvdata(&handle_to_dev(link));
DEBUG(0, "atmel_config(0x%p)\n", link);
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index aab71a70ba7..67f564e3722 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -3,7 +3,6 @@ config B43
depends on SSB_POSSIBLE && MAC80211 && WLAN_80211 && HAS_DMA
select SSB
select FW_LOADER
- select HW_RANDOM
---help---
b43 is a driver for the Broadcom 43xx series wireless devices.
@@ -99,11 +98,11 @@ config B43_LEDS
depends on B43 && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = B43)
default y
-# This config option automatically enables b43 RFKILL support,
-# if it's possible.
-config B43_RFKILL
+# This config option automatically enables b43 HW-RNG support,
+# if the HW-RNG core is enabled.
+config B43_HWRNG
bool
- depends on B43 && (RFKILL = y || RFKILL = B43) && RFKILL_INPUT && (INPUT_POLLDEV = y || INPUT_POLLDEV = B43)
+ depends on B43 && (HW_RANDOM = y || HW_RANDOM = B43)
default y
config B43_DEBUG
diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile
index 281ef831035..da379f4b0c3 100644
--- a/drivers/net/wireless/b43/Makefile
+++ b/drivers/net/wireless/b43/Makefile
@@ -13,7 +13,7 @@ b43-y += lo.o
b43-y += wa.o
b43-y += dma.o
b43-$(CONFIG_B43_PIO) += pio.o
-b43-$(CONFIG_B43_RFKILL) += rfkill.o
+b43-y += rfkill.o
b43-$(CONFIG_B43_LEDS) += leds.o
b43-$(CONFIG_B43_PCMCIA) += pcmcia.o
b43-$(CONFIG_B43_DEBUG) += debugfs.o
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index beaf18d6e8a..f580c2812d9 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -163,6 +163,7 @@ enum {
#define B43_SHM_SH_WLCOREREV 0x0016 /* 802.11 core revision */
#define B43_SHM_SH_PCTLWDPOS 0x0008
#define B43_SHM_SH_RXPADOFF 0x0034 /* RX Padding data offset (PIO only) */
+#define B43_SHM_SH_FWCAPA 0x0042 /* Firmware capabilities (Opensource firmware only) */
#define B43_SHM_SH_PHYVER 0x0050 /* PHY version */
#define B43_SHM_SH_PHYTYPE 0x0052 /* PHY type */
#define B43_SHM_SH_ANTSWAP 0x005C /* Antenna swap threshold */
@@ -297,6 +298,10 @@ enum {
#define B43_HF_MLADVW 0x001000000000ULL /* N PHY ML ADV workaround (rev >= 13 only) */
#define B43_HF_PR45960W 0x080000000000ULL /* PR 45960 workaround (rev >= 13 only) */
+/* Firmware capabilities field in SHM (Opensource firmware only) */
+#define B43_FWCAPA_HWCRYPTO 0x0001
+#define B43_FWCAPA_QOS 0x0002
+
/* MacFilter offsets. */
#define B43_MACFILTER_SELF 0x0000
#define B43_MACFILTER_BSSID 0x0003
@@ -596,6 +601,13 @@ struct b43_wl {
/* Pointer to the ieee80211 hardware data structure */
struct ieee80211_hw *hw;
+ /* The number of queues that were registered with the mac80211 subsystem
+ * initially. This is a backup copy of hw->queues in case hw->queues has
+ * to be dynamically lowered at runtime (Firmware does not support QoS).
+ * hw->queues has to be restored to the original value before unregistering
+ * from the mac80211 subsystem. */
+ u16 mac80211_initially_registered_queues;
+
struct mutex mutex;
spinlock_t irq_lock;
/* R/W lock for data transmission.
@@ -625,12 +637,11 @@ struct b43_wl {
/* Stats about the wireless interface */
struct ieee80211_low_level_stats ieee_stats;
+#ifdef CONFIG_B43_HWRNG
struct hwrng rng;
- u8 rng_initialized;
+ bool rng_initialized;
char rng_name[30 + 1];
-
- /* The RF-kill button */
- struct b43_rfkill rfkill;
+#endif /* CONFIG_B43_HWRNG */
/* List of all wireless devices on this chip */
struct list_head devlist;
@@ -750,6 +761,8 @@ struct b43_wldev {
bool dfq_valid; /* Directed frame queue valid (IBSS PS mode, ATIM) */
bool radio_hw_enable; /* saved state of radio hardware enabled state */
bool suspend_in_progress; /* TRUE, if we are in a suspend/resume cycle */
+ bool qos_enabled; /* TRUE, if QoS is used. */
+ bool hwcrypto_enabled; /* TRUE, if HW crypto acceleration is enabled. */
/* PHY/Radio device. */
struct b43_phy phy;
@@ -776,8 +789,8 @@ struct b43_wldev {
/* Reason code of the last interrupt. */
u32 irq_reason;
u32 dma_reason[6];
- /* saved irq enable/disable state bitfield. */
- u32 irq_savedstate;
+ /* The currently active generic-interrupt mask. */
+ u32 irq_mask;
/* Link Quality calculation context. */
struct b43_noise_calculation noisecalc;
/* if > 0 MAC is suspended. if == 0 MAC is enabled. */
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index eae680b5305..7964cc32b25 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1285,7 +1285,7 @@ static struct b43_dmaring *select_ring_by_priority(struct b43_wldev *dev,
{
struct b43_dmaring *ring;
- if (b43_modparam_qos) {
+ if (dev->qos_enabled) {
/* 0 = highest priority */
switch (queue_prio) {
default:
diff --git a/drivers/net/wireless/b43/leds.c b/drivers/net/wireless/b43/leds.c
index 76f4c7bad8b..c8b317094c3 100644
--- a/drivers/net/wireless/b43/leds.c
+++ b/drivers/net/wireless/b43/leds.c
@@ -28,6 +28,7 @@
#include "b43.h"
#include "leds.h"
+#include "rfkill.h"
static void b43_led_turn_on(struct b43_wldev *dev, u8 led_index,
@@ -87,7 +88,7 @@ static void b43_led_brightness_set(struct led_classdev *led_dev,
}
static int b43_register_led(struct b43_wldev *dev, struct b43_led *led,
- const char *name, char *default_trigger,
+ const char *name, const char *default_trigger,
u8 led_index, bool activelow)
{
int err;
@@ -164,10 +165,10 @@ static void b43_map_led(struct b43_wldev *dev,
snprintf(name, sizeof(name),
"b43-%s::radio", wiphy_name(hw->wiphy));
b43_register_led(dev, &dev->led_radio, name,
- b43_rfkill_led_name(dev),
+ ieee80211_get_radio_led_name(hw),
led_index, activelow);
- /* Sync the RF-kill LED state with the switch state. */
- if (dev->radio_hw_enable)
+ /* Sync the RF-kill LED state with radio and switch states. */
+ if (dev->phy.radio_on && b43_is_hw_radio_enabled(dev))
b43_led_turn_on(dev, led_index, activelow);
break;
case B43_LED_WEIRD:
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 79b685e300c..6456afebdba 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -80,8 +80,8 @@ static int modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
-int b43_modparam_qos = 1;
-module_param_named(qos, b43_modparam_qos, int, 0444);
+static int modparam_qos = 1;
+module_param_named(qos, modparam_qos, int, 0444);
MODULE_PARM_DESC(qos, "Enable QOS support (default on)");
static int modparam_btcoex = 1;
@@ -538,6 +538,13 @@ void b43_hf_write(struct b43_wldev *dev, u64 value)
b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFHI, hi);
}
+/* Read the firmware capabilities bitmask (Opensource firmware only) */
+static u16 b43_fwcapa_read(struct b43_wldev *dev)
+{
+ B43_WARN_ON(!dev->fw.opensource);
+ return b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_FWCAPA);
+}
+
void b43_tsf_read(struct b43_wldev *dev, u64 *tsf)
{
u32 low, high;
@@ -673,32 +680,6 @@ static void b43_short_slot_timing_disable(struct b43_wldev *dev)
b43_set_slot_time(dev, 20);
}
-/* Enable a Generic IRQ. "mask" is the mask of which IRQs to enable.
- * Returns the _previously_ enabled IRQ mask.
- */
-static inline u32 b43_interrupt_enable(struct b43_wldev *dev, u32 mask)
-{
- u32 old_mask;
-
- old_mask = b43_read32(dev, B43_MMIO_GEN_IRQ_MASK);
- b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, old_mask | mask);
-
- return old_mask;
-}
-
-/* Disable a Generic IRQ. "mask" is the mask of which IRQs to disable.
- * Returns the _previously_ enabled IRQ mask.
- */
-static inline u32 b43_interrupt_disable(struct b43_wldev *dev, u32 mask)
-{
- u32 old_mask;
-
- old_mask = b43_read32(dev, B43_MMIO_GEN_IRQ_MASK);
- b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, old_mask & ~mask);
-
- return old_mask;
-}
-
/* Synchronize IRQ top- and bottom-half.
* IRQs must be masked before calling this.
* This must not be called with the irq_lock held.
@@ -1593,7 +1574,7 @@ static void handle_irq_beacon(struct b43_wldev *dev)
/* This is the bottom half of the asynchronous beacon update. */
/* Ignore interrupt in the future. */
- dev->irq_savedstate &= ~B43_IRQ_BEACON;
+ dev->irq_mask &= ~B43_IRQ_BEACON;
cmd = b43_read32(dev, B43_MMIO_MACCMD);
beacon0_valid = (cmd & B43_MACCMD_BEACON0_VALID);
@@ -1602,7 +1583,7 @@ static void handle_irq_beacon(struct b43_wldev *dev)
/* Schedule interrupt manually, if busy. */
if (beacon0_valid && beacon1_valid) {
b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, B43_IRQ_BEACON);
- dev->irq_savedstate |= B43_IRQ_BEACON;
+ dev->irq_mask |= B43_IRQ_BEACON;
return;
}
@@ -1641,11 +1622,9 @@ static void b43_beacon_update_trigger_work(struct work_struct *work)
if (likely(dev && (b43_status(dev) >= B43_STAT_INITIALIZED))) {
spin_lock_irq(&wl->irq_lock);
/* update beacon right away or defer to irq */
- dev->irq_savedstate = b43_read32(dev, B43_MMIO_GEN_IRQ_MASK);
handle_irq_beacon(dev);
/* The handler might have updated the IRQ mask. */
- b43_write32(dev, B43_MMIO_GEN_IRQ_MASK,
- dev->irq_savedstate);
+ b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, dev->irq_mask);
mmiowb();
spin_unlock_irq(&wl->irq_lock);
}
@@ -1879,7 +1858,7 @@ static void b43_interrupt_tasklet(struct b43_wldev *dev)
if (reason & B43_IRQ_TX_OK)
handle_irq_transmit_status(dev);
- b43_interrupt_enable(dev, dev->irq_savedstate);
+ b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, dev->irq_mask);
mmiowb();
spin_unlock_irqrestore(&dev->wl->irq_lock, flags);
}
@@ -1893,7 +1872,9 @@ static void b43_interrupt_ack(struct b43_wldev *dev, u32 reason)
b43_write32(dev, B43_MMIO_DMA2_REASON, dev->dma_reason[2]);
b43_write32(dev, B43_MMIO_DMA3_REASON, dev->dma_reason[3]);
b43_write32(dev, B43_MMIO_DMA4_REASON, dev->dma_reason[4]);
+/* Unused ring
b43_write32(dev, B43_MMIO_DMA5_REASON, dev->dma_reason[5]);
+*/
}
/* Interrupt handler top-half */
@@ -1903,18 +1884,19 @@ static irqreturn_t b43_interrupt_handler(int irq, void *dev_id)
struct b43_wldev *dev = dev_id;
u32 reason;
- if (!dev)
- return IRQ_NONE;
+ B43_WARN_ON(!dev);
spin_lock(&dev->wl->irq_lock);
- if (b43_status(dev) < B43_STAT_STARTED)
+ if (unlikely(b43_status(dev) < B43_STAT_STARTED)) {
+ /* This can only happen on shared IRQ lines. */
goto out;
+ }
reason = b43_read32(dev, B43_MMIO_GEN_IRQ_REASON);
if (reason == 0xffffffff) /* shared IRQ */
goto out;
ret = IRQ_HANDLED;
- reason &= b43_read32(dev, B43_MMIO_GEN_IRQ_MASK);
+ reason &= dev->irq_mask;
if (!reason)
goto out;
@@ -1928,16 +1910,18 @@ static irqreturn_t b43_interrupt_handler(int irq, void *dev_id)
& 0x0001DC00;
dev->dma_reason[4] = b43_read32(dev, B43_MMIO_DMA4_REASON)
& 0x0000DC00;
+/* Unused ring
dev->dma_reason[5] = b43_read32(dev, B43_MMIO_DMA5_REASON)
& 0x0000DC00;
+*/
b43_interrupt_ack(dev, reason);
/* disable all IRQs. They are enabled again in the bottom half. */
- dev->irq_savedstate = b43_interrupt_disable(dev, B43_IRQ_ALL);
+ b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, 0);
/* save the reason code and call our bottom half. */
dev->irq_reason = reason;
tasklet_schedule(&dev->isr_tasklet);
- out:
+out:
mmiowb();
spin_unlock(&dev->wl->irq_lock);
@@ -2330,12 +2314,34 @@ static int b43_upload_microcode(struct b43_wldev *dev)
dev->fw.patch = fwpatch;
dev->fw.opensource = (fwdate == 0xFFFF);
+ /* Default to use-all-queues. */
+ dev->wl->hw->queues = dev->wl->mac80211_initially_registered_queues;
+ dev->qos_enabled = !!modparam_qos;
+ /* Default to firmware/hardware crypto acceleration. */
+ dev->hwcrypto_enabled = 1;
+
if (dev->fw.opensource) {
+ u16 fwcapa;
+
/* Patchlevel info is encoded in the "time" field. */
dev->fw.patch = fwtime;
- b43info(dev->wl, "Loading OpenSource firmware version %u.%u%s\n",
- dev->fw.rev, dev->fw.patch,
- dev->fw.pcm_request_failed ? " (Hardware crypto not supported)" : "");
+ b43info(dev->wl, "Loading OpenSource firmware version %u.%u\n",
+ dev->fw.rev, dev->fw.patch);
+
+ fwcapa = b43_fwcapa_read(dev);
+ if (!(fwcapa & B43_FWCAPA_HWCRYPTO) || dev->fw.pcm_request_failed) {
+ b43info(dev->wl, "Hardware crypto acceleration not supported by firmware\n");
+ /* Disable hardware crypto and fall back to software crypto. */
+ dev->hwcrypto_enabled = 0;
+ }
+ if (!(fwcapa & B43_FWCAPA_QOS)) {
+ b43info(dev->wl, "QoS not supported by firmware\n");
+ /* Disable QoS. Tweak hw->queues to 1. It will be restored before
+ * ieee80211_unregister to make sure the networking core can
+ * properly free possible resources. */
+ dev->wl->hw->queues = 1;
+ dev->qos_enabled = 0;
+ }
} else {
b43info(dev->wl, "Loading firmware version %u.%u "
"(20%.2i-%.2i-%.2i %.2i:%.2i:%.2i)\n",
@@ -2980,6 +2986,7 @@ static void b43_security_init(struct b43_wldev *dev)
b43_clear_keys(dev);
}
+#ifdef CONFIG_B43_HWRNG
static int b43_rng_read(struct hwrng *rng, u32 *data)
{
struct b43_wl *wl = (struct b43_wl *)rng->priv;
@@ -2995,17 +3002,21 @@ static int b43_rng_read(struct hwrng *rng, u32 *data)
return (sizeof(u16));
}
+#endif /* CONFIG_B43_HWRNG */
static void b43_rng_exit(struct b43_wl *wl)
{
+#ifdef CONFIG_B43_HWRNG
if (wl->rng_initialized)
hwrng_unregister(&wl->rng);
+#endif /* CONFIG_B43_HWRNG */
}
static int b43_rng_init(struct b43_wl *wl)
{
- int err;
+ int err = 0;
+#ifdef CONFIG_B43_HWRNG
snprintf(wl->rng_name, ARRAY_SIZE(wl->rng_name),
"%s_%s", KBUILD_MODNAME, wiphy_name(wl->hw->wiphy));
wl->rng.name = wl->rng_name;
@@ -3018,6 +3029,7 @@ static int b43_rng_init(struct b43_wl *wl)
b43err(wl, "Failed to register the random "
"number generator (%d)\n", err);
}
+#endif /* CONFIG_B43_HWRNG */
return err;
}
@@ -3485,14 +3497,9 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
if (phy->ops->set_rx_antenna)
phy->ops->set_rx_antenna(dev, antenna);
- /* Update templates for AP/mesh mode. */
- if (b43_is_mode(wl, NL80211_IFTYPE_AP) ||
- b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT))
- b43_set_beacon_int(dev, conf->beacon_int);
-
if (!!conf->radio_enabled != phy->radio_on) {
if (conf->radio_enabled) {
- b43_software_rfkill(dev, RFKILL_STATE_UNBLOCKED);
+ b43_software_rfkill(dev, false);
b43info(dev->wl, "Radio turned on by software\n");
if (!dev->radio_hw_enable) {
b43info(dev->wl, "The hardware RF-kill button "
@@ -3500,7 +3507,7 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
"Press the button to turn it on.\n");
}
} else {
- b43_software_rfkill(dev, RFKILL_STATE_SOFT_BLOCKED);
+ b43_software_rfkill(dev, true);
b43info(dev->wl, "Radio turned off by software\n");
}
}
@@ -3565,14 +3572,45 @@ static void b43_op_bss_info_changed(struct ieee80211_hw *hw,
{
struct b43_wl *wl = hw_to_b43_wl(hw);
struct b43_wldev *dev;
+ unsigned long flags;
mutex_lock(&wl->mutex);
dev = wl->current_dev;
if (!dev || b43_status(dev) < B43_STAT_STARTED)
goto out_unlock_mutex;
+
+ B43_WARN_ON(wl->vif != vif);
+
+ spin_lock_irqsave(&wl->irq_lock, flags);
+ if (changed & BSS_CHANGED_BSSID) {
+ if (conf->bssid)
+ memcpy(wl->bssid, conf->bssid, ETH_ALEN);
+ else
+ memset(wl->bssid, 0, ETH_ALEN);
+ }
+
+ if (b43_status(dev) >= B43_STAT_INITIALIZED) {
+ if (changed & BSS_CHANGED_BEACON &&
+ (b43_is_mode(wl, NL80211_IFTYPE_AP) ||
+ b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT) ||
+ b43_is_mode(wl, NL80211_IFTYPE_ADHOC)))
+ b43_update_templates(wl);
+
+ if (changed & BSS_CHANGED_BSSID)
+ b43_write_mac_bssid_templates(dev);
+ }
+ spin_unlock_irqrestore(&wl->irq_lock, flags);
+
b43_mac_suspend(dev);
+ /* Update templates for AP/mesh mode. */
+ if (changed & BSS_CHANGED_BEACON_INT &&
+ (b43_is_mode(wl, NL80211_IFTYPE_AP) ||
+ b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT) ||
+ b43_is_mode(wl, NL80211_IFTYPE_ADHOC)))
+ b43_set_beacon_int(dev, conf->beacon_int);
+
if (changed & BSS_CHANGED_BASIC_RATES)
b43_update_basic_rates(dev, conf->basic_rates);
@@ -3586,8 +3624,6 @@ static void b43_op_bss_info_changed(struct ieee80211_hw *hw,
b43_mac_enable(dev);
out_unlock_mutex:
mutex_unlock(&wl->mutex);
-
- return;
}
static int b43_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@@ -3620,7 +3656,7 @@ static int b43_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (!dev || b43_status(dev) < B43_STAT_INITIALIZED)
goto out_unlock;
- if (dev->fw.pcm_request_failed) {
+ if (dev->fw.pcm_request_failed || !dev->hwcrypto_enabled) {
/* We don't have firmware for the crypto engine.
* Must use software-crypto. */
err = -EOPNOTSUPP;
@@ -3630,7 +3666,7 @@ static int b43_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
err = -EINVAL;
switch (key->alg) {
case ALG_WEP:
- if (key->keylen == LEN_WEP40)
+ if (key->keylen == WLAN_KEY_LEN_WEP40)
algorithm = B43_SEC_ALGO_WEP40;
else
algorithm = B43_SEC_ALGO_WEP104;
@@ -3745,41 +3781,6 @@ static void b43_op_configure_filter(struct ieee80211_hw *hw,
spin_unlock_irqrestore(&wl->irq_lock, flags);
}
-static int b43_op_config_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_if_conf *conf)
-{
- struct b43_wl *wl = hw_to_b43_wl(hw);
- struct b43_wldev *dev = wl->current_dev;
- unsigned long flags;
-
- if (!dev)
- return -ENODEV;
- mutex_lock(&wl->mutex);
- spin_lock_irqsave(&wl->irq_lock, flags);
- B43_WARN_ON(wl->vif != vif);
- if (conf->bssid)
- memcpy(wl->bssid, conf->bssid, ETH_ALEN);
- else
- memset(wl->bssid, 0, ETH_ALEN);
- if (b43_status(dev) >= B43_STAT_INITIALIZED) {
- if (b43_is_mode(wl, NL80211_IFTYPE_AP) ||
- b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT)) {
- B43_WARN_ON(vif->type != wl->if_type);
- if (conf->changed & IEEE80211_IFCC_BEACON)
- b43_update_templates(wl);
- } else if (b43_is_mode(wl, NL80211_IFTYPE_ADHOC)) {
- if (conf->changed & IEEE80211_IFCC_BEACON)
- b43_update_templates(wl);
- }
- b43_write_mac_bssid_templates(dev);
- }
- spin_unlock_irqrestore(&wl->irq_lock, flags);
- mutex_unlock(&wl->mutex);
-
- return 0;
-}
-
/* Locking: wl->mutex */
static void b43_wireless_core_stop(struct b43_wldev *dev)
{
@@ -3793,7 +3794,7 @@ static void b43_wireless_core_stop(struct b43_wldev *dev)
* setting the status to INITIALIZED, as the interrupt handler
* won't care about IRQs then. */
spin_lock_irqsave(&wl->irq_lock, flags);
- dev->irq_savedstate = b43_interrupt_disable(dev, B43_IRQ_ALL);
+ b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, 0);
b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); /* flush */
spin_unlock_irqrestore(&wl->irq_lock, flags);
b43_synchronize_irq(dev);
@@ -3834,7 +3835,7 @@ static int b43_wireless_core_start(struct b43_wldev *dev)
/* Start data flow (TX/RX). */
b43_mac_enable(dev);
- b43_interrupt_enable(dev, dev->irq_savedstate);
+ b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, dev->irq_mask);
/* Start maintainance work */
b43_periodic_tasks_setup(dev);
@@ -3997,9 +3998,9 @@ static void setup_struct_wldev_for_init(struct b43_wldev *dev)
/* IRQ related flags */
dev->irq_reason = 0;
memset(dev->dma_reason, 0, sizeof(dev->dma_reason));
- dev->irq_savedstate = B43_IRQ_MASKTEMPLATE;
+ dev->irq_mask = B43_IRQ_MASKTEMPLATE;
if (b43_modparam_verbose < B43_VERBOSITY_DEBUG)
- dev->irq_savedstate &= ~B43_IRQ_PHY_TXERR;
+ dev->irq_mask &= ~B43_IRQ_PHY_TXERR;
dev->mac_suspended = 1;
@@ -4326,7 +4327,6 @@ static int b43_op_start(struct ieee80211_hw *hw)
struct b43_wldev *dev = wl->current_dev;
int did_init = 0;
int err = 0;
- bool do_rfkill_exit = 0;
/* Kill all old instance specific information to make sure
* the card won't use it in the short timeframe between start
@@ -4340,18 +4340,12 @@ static int b43_op_start(struct ieee80211_hw *hw)
wl->beacon1_uploaded = 0;
wl->beacon_templates_virgin = 1;
- /* First register RFkill.
- * LEDs that are registered later depend on it. */
- b43_rfkill_init(dev);
-
mutex_lock(&wl->mutex);
if (b43_status(dev) < B43_STAT_INITIALIZED) {
err = b43_wireless_core_init(dev);
- if (err) {
- do_rfkill_exit = 1;
+ if (err)
goto out_mutex_unlock;
- }
did_init = 1;
}
@@ -4360,17 +4354,16 @@ static int b43_op_start(struct ieee80211_hw *hw)
if (err) {
if (did_init)
b43_wireless_core_exit(dev);
- do_rfkill_exit = 1;
goto out_mutex_unlock;
}
}
+ /* XXX: only do if device doesn't support rfkill irq */
+ wiphy_rfkill_start_polling(hw->wiphy);
+
out_mutex_unlock:
mutex_unlock(&wl->mutex);
- if (do_rfkill_exit)
- b43_rfkill_exit(dev);
-
return err;
}
@@ -4379,7 +4372,6 @@ static void b43_op_stop(struct ieee80211_hw *hw)
struct b43_wl *wl = hw_to_b43_wl(hw);
struct b43_wldev *dev = wl->current_dev;
- b43_rfkill_exit(dev);
cancel_work_sync(&(wl->beacon_update_trigger));
mutex_lock(&wl->mutex);
@@ -4449,7 +4441,6 @@ static const struct ieee80211_ops b43_hw_ops = {
.remove_interface = b43_op_remove_interface,
.config = b43_op_config,
.bss_info_changed = b43_op_bss_info_changed,
- .config_interface = b43_op_config_interface,
.configure_filter = b43_op_configure_filter,
.set_key = b43_op_set_key,
.get_stats = b43_op_get_stats,
@@ -4462,6 +4453,7 @@ static const struct ieee80211_ops b43_hw_ops = {
.sta_notify = b43_op_sta_notify,
.sw_scan_start = b43_op_sw_scan_start_notifier,
.sw_scan_complete = b43_op_sw_scan_complete_notifier,
+ .rfkill_poll = b43_rfkill_poll,
};
/* Hard-reset the chip. Do not call this directly.
@@ -4764,6 +4756,7 @@ static int b43_wireless_init(struct ssb_device *dev)
b43err(NULL, "Could not allocate ieee80211 device\n");
goto out;
}
+ wl = hw_to_b43_wl(hw);
/* fill hw info */
hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
@@ -4777,7 +4770,8 @@ static int b43_wireless_init(struct ssb_device *dev)
BIT(NL80211_IFTYPE_WDS) |
BIT(NL80211_IFTYPE_ADHOC);
- hw->queues = b43_modparam_qos ? 4 : 1;
+ hw->queues = modparam_qos ? 4 : 1;
+ wl->mac80211_initially_registered_queues = hw->queues;
hw->max_rates = 2;
SET_IEEE80211_DEV(hw, dev->dev);
if (is_valid_ether_addr(sprom->et1mac))
@@ -4785,9 +4779,7 @@ static int b43_wireless_init(struct ssb_device *dev)
else
SET_IEEE80211_PERM_ADDR(hw, sprom->il0mac);
- /* Get and initialize struct b43_wl */
- wl = hw_to_b43_wl(hw);
- memset(wl, 0, sizeof(*wl));
+ /* Initialize struct b43_wl */
wl->hw = hw;
spin_lock_init(&wl->irq_lock);
rwlock_init(&wl->tx_lock);
@@ -4853,8 +4845,13 @@ static void b43_remove(struct ssb_device *dev)
cancel_work_sync(&wldev->restart_work);
B43_WARN_ON(!wl);
- if (wl->current_dev == wldev)
+ if (wl->current_dev == wldev) {
+ /* Restore the queues count before unregistering, because firmware detect
+ * might have modified it. Restoring is important, so the networking
+ * stack can properly free resources. */
+ wl->hw->queues = wl->mac80211_initially_registered_queues;
ieee80211_unregister_hw(wl->hw);
+ }
b43_one_core_detach(dev);
@@ -4949,7 +4946,7 @@ static struct ssb_driver b43_ssb_driver = {
static void b43_print_driverinfo(void)
{
const char *feat_pci = "", *feat_pcmcia = "", *feat_nphy = "",
- *feat_leds = "", *feat_rfkill = "";
+ *feat_leds = "";
#ifdef CONFIG_B43_PCI_AUTOSELECT
feat_pci = "P";
@@ -4963,14 +4960,11 @@ static void b43_print_driverinfo(void)
#ifdef CONFIG_B43_LEDS
feat_leds = "L";
#endif
-#ifdef CONFIG_B43_RFKILL
- feat_rfkill = "R";
-#endif
printk(KERN_INFO "Broadcom 43xx driver loaded "
- "[ Features: %s%s%s%s%s, Firmware-ID: "
+ "[ Features: %s%s%s%s, Firmware-ID: "
B43_SUPPORTED_FIRMWARE_ID " ]\n",
feat_pci, feat_pcmcia, feat_nphy,
- feat_leds, feat_rfkill);
+ feat_leds);
}
static int __init b43_init(void)
diff --git a/drivers/net/wireless/b43/main.h b/drivers/net/wireless/b43/main.h
index 40abcf5d1b4..950fb1b0546 100644
--- a/drivers/net/wireless/b43/main.h
+++ b/drivers/net/wireless/b43/main.h
@@ -39,7 +39,6 @@
#define PAD_BYTES(nr_bytes) P4D_BYTES( __LINE__ , (nr_bytes))
-extern int b43_modparam_qos;
extern int b43_modparam_verbose;
/* Logmessage verbosity levels. Update the b43_modparam_verbose helptext, if
diff --git a/drivers/net/wireless/b43/phy_a.c b/drivers/net/wireless/b43/phy_a.c
index c836c077d51..816e028a262 100644
--- a/drivers/net/wireless/b43/phy_a.c
+++ b/drivers/net/wireless/b43/phy_a.c
@@ -480,11 +480,11 @@ static bool b43_aphy_op_supports_hwpctl(struct b43_wldev *dev)
}
static void b43_aphy_op_software_rfkill(struct b43_wldev *dev,
- enum rfkill_state state)
+ bool blocked)
{
struct b43_phy *phy = &dev->phy;
- if (state == RFKILL_STATE_UNBLOCKED) {
+ if (!blocked) {
if (phy->radio_on)
return;
b43_radio_write16(dev, 0x0004, 0x00C0);
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index e176b6e0d9c..6d241622210 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -84,7 +84,7 @@ int b43_phy_init(struct b43_wldev *dev)
phy->channel = ops->get_default_chan(dev);
- ops->software_rfkill(dev, RFKILL_STATE_UNBLOCKED);
+ ops->software_rfkill(dev, false);
err = ops->init(dev);
if (err) {
b43err(dev->wl, "PHY init failed\n");
@@ -104,7 +104,7 @@ err_phy_exit:
if (ops->exit)
ops->exit(dev);
err_block_rf:
- ops->software_rfkill(dev, RFKILL_STATE_SOFT_BLOCKED);
+ ops->software_rfkill(dev, true);
return err;
}
@@ -113,7 +113,7 @@ void b43_phy_exit(struct b43_wldev *dev)
{
const struct b43_phy_operations *ops = dev->phy.ops;
- ops->software_rfkill(dev, RFKILL_STATE_SOFT_BLOCKED);
+ ops->software_rfkill(dev, true);
if (ops->exit)
ops->exit(dev);
}
@@ -295,18 +295,13 @@ err_restore_cookie:
return err;
}
-void b43_software_rfkill(struct b43_wldev *dev, enum rfkill_state state)
+void b43_software_rfkill(struct b43_wldev *dev, bool blocked)
{
struct b43_phy *phy = &dev->phy;
- if (state == RFKILL_STATE_HARD_BLOCKED) {
- /* We cannot hardware-block the device */
- state = RFKILL_STATE_SOFT_BLOCKED;
- }
-
b43_mac_suspend(dev);
- phy->ops->software_rfkill(dev, state);
- phy->radio_on = (state == RFKILL_STATE_UNBLOCKED);
+ phy->ops->software_rfkill(dev, blocked);
+ phy->radio_on = !blocked;
b43_mac_enable(dev);
}
diff --git a/drivers/net/wireless/b43/phy_common.h b/drivers/net/wireless/b43/phy_common.h
index b2d99101947..44cc918e4fc 100644
--- a/drivers/net/wireless/b43/phy_common.h
+++ b/drivers/net/wireless/b43/phy_common.h
@@ -1,7 +1,7 @@
#ifndef LINUX_B43_PHY_COMMON_H_
#define LINUX_B43_PHY_COMMON_H_
-#include <linux/rfkill.h>
+#include <linux/types.h>
struct b43_wldev;
@@ -159,7 +159,7 @@ struct b43_phy_operations {
/* Radio */
bool (*supports_hwpctl)(struct b43_wldev *dev);
- void (*software_rfkill)(struct b43_wldev *dev, enum rfkill_state state);
+ void (*software_rfkill)(struct b43_wldev *dev, bool blocked);
void (*switch_analog)(struct b43_wldev *dev, bool on);
int (*switch_channel)(struct b43_wldev *dev, unsigned int new_channel);
unsigned int (*get_default_chan)(struct b43_wldev *dev);
@@ -364,7 +364,7 @@ int b43_switch_channel(struct b43_wldev *dev, unsigned int new_channel);
/**
* b43_software_rfkill - Turn the radio ON or OFF in software.
*/
-void b43_software_rfkill(struct b43_wldev *dev, enum rfkill_state state);
+void b43_software_rfkill(struct b43_wldev *dev, bool blocked);
/**
* b43_phy_txpower_check - Check TX power output.
diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c
index e7b98f013b0..5300232449f 100644
--- a/drivers/net/wireless/b43/phy_g.c
+++ b/drivers/net/wireless/b43/phy_g.c
@@ -2592,7 +2592,7 @@ static bool b43_gphy_op_supports_hwpctl(struct b43_wldev *dev)
}
static void b43_gphy_op_software_rfkill(struct b43_wldev *dev,
- enum rfkill_state state)
+ bool blocked)
{
struct b43_phy *phy = &dev->phy;
struct b43_phy_g *gphy = phy->g;
@@ -2600,7 +2600,7 @@ static void b43_gphy_op_software_rfkill(struct b43_wldev *dev,
might_sleep();
- if (state == RFKILL_STATE_UNBLOCKED) {
+ if (!blocked) {
/* Turn radio ON */
if (phy->radio_on)
return;
diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
index 58e319d6b1e..ea0d3a3a6a6 100644
--- a/drivers/net/wireless/b43/phy_lp.c
+++ b/drivers/net/wireless/b43/phy_lp.c
@@ -488,7 +488,7 @@ static void b43_lpphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
}
static void b43_lpphy_op_software_rfkill(struct b43_wldev *dev,
- enum rfkill_state state)
+ bool blocked)
{
//TODO
}
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 8bcfda5f3f0..be7b5604947 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -579,7 +579,7 @@ static void b43_nphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
}
static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
- enum rfkill_state state)
+ bool blocked)
{//TODO
}
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index 8cd9776752e..69138e8c1db 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -313,7 +313,7 @@ static struct b43_pio_txqueue *select_queue_by_priority(struct b43_wldev *dev,
{
struct b43_pio_txqueue *q;
- if (b43_modparam_qos) {
+ if (dev->qos_enabled) {
/* 0 = highest priority */
switch (queue_prio) {
default:
diff --git a/drivers/net/wireless/b43/rfkill.c b/drivers/net/wireless/b43/rfkill.c
index afad4235869..31e55999893 100644
--- a/drivers/net/wireless/b43/rfkill.c
+++ b/drivers/net/wireless/b43/rfkill.c
@@ -22,15 +22,11 @@
*/
-#include "rfkill.h"
#include "b43.h"
-#include "phy_common.h"
-
-#include <linux/kmod.h>
/* Returns TRUE, if the radio is enabled in hardware. */
-static bool b43_is_hw_radio_enabled(struct b43_wldev *dev)
+bool b43_is_hw_radio_enabled(struct b43_wldev *dev)
{
if (dev->phy.rev >= 3) {
if (!(b43_read32(dev, B43_MMIO_RADIO_HWENABLED_HI)
@@ -45,166 +41,39 @@ static bool b43_is_hw_radio_enabled(struct b43_wldev *dev)
}
/* The poll callback for the hardware button. */
-static void b43_rfkill_poll(struct input_polled_dev *poll_dev)
+void b43_rfkill_poll(struct ieee80211_hw *hw)
{
- struct b43_wldev *dev = poll_dev->private;
- struct b43_wl *wl = dev->wl;
+ struct b43_wl *wl = hw_to_b43_wl(hw);
+ struct b43_wldev *dev = wl->current_dev;
+ struct ssb_bus *bus = dev->dev->bus;
bool enabled;
- bool report_change = 0;
+ bool brought_up = false;
mutex_lock(&wl->mutex);
if (unlikely(b43_status(dev) < B43_STAT_INITIALIZED)) {
- mutex_unlock(&wl->mutex);
- return;
+ if (ssb_bus_powerup(bus, 0)) {
+ mutex_unlock(&wl->mutex);
+ return;
+ }
+ ssb_device_enable(dev->dev, 0);
+ brought_up = true;
}
+
enabled = b43_is_hw_radio_enabled(dev);
+
if (unlikely(enabled != dev->radio_hw_enable)) {
dev->radio_hw_enable = enabled;
- report_change = 1;
b43info(wl, "Radio hardware status changed to %s\n",
enabled ? "ENABLED" : "DISABLED");
+ wiphy_rfkill_set_hw_state(hw->wiphy, !enabled);
+ if (enabled != dev->phy.radio_on)
+ b43_software_rfkill(dev, !enabled);
}
- mutex_unlock(&wl->mutex);
- /* send the radio switch event to the system - note both a key press
- * and a release are required */
- if (unlikely(report_change)) {
- input_report_key(poll_dev->input, KEY_WLAN, 1);
- input_report_key(poll_dev->input, KEY_WLAN, 0);
+ if (brought_up) {
+ ssb_device_disable(dev->dev, 0);
+ ssb_bus_may_powerdown(bus);
}
-}
-
-/* Called when the RFKILL toggled in software. */
-static int b43_rfkill_soft_toggle(void *data, enum rfkill_state state)
-{
- struct b43_wldev *dev = data;
- struct b43_wl *wl = dev->wl;
- int err = -EBUSY;
- if (!wl->rfkill.registered)
- return 0;
-
- mutex_lock(&wl->mutex);
- if (b43_status(dev) < B43_STAT_INITIALIZED)
- goto out_unlock;
- err = 0;
- switch (state) {
- case RFKILL_STATE_UNBLOCKED:
- if (!dev->radio_hw_enable) {
- /* No luck. We can't toggle the hardware RF-kill
- * button from software. */
- err = -EBUSY;
- goto out_unlock;
- }
- if (!dev->phy.radio_on)
- b43_software_rfkill(dev, state);
- break;
- case RFKILL_STATE_SOFT_BLOCKED:
- if (dev->phy.radio_on)
- b43_software_rfkill(dev, state);
- break;
- default:
- b43warn(wl, "Received unexpected rfkill state %d.\n", state);
- break;
- }
-out_unlock:
mutex_unlock(&wl->mutex);
-
- return err;
-}
-
-char *b43_rfkill_led_name(struct b43_wldev *dev)
-{
- struct b43_rfkill *rfk = &(dev->wl->rfkill);
-
- if (!rfk->registered)
- return NULL;
- return rfkill_get_led_name(rfk->rfkill);
-}
-
-void b43_rfkill_init(struct b43_wldev *dev)
-{
- struct b43_wl *wl = dev->wl;
- struct b43_rfkill *rfk = &(wl->rfkill);
- int err;
-
- rfk->registered = 0;
-
- rfk->rfkill = rfkill_allocate(dev->dev->dev, RFKILL_TYPE_WLAN);
- if (!rfk->rfkill)
- goto out_error;
- snprintf(rfk->name, sizeof(rfk->name),
- "b43-%s", wiphy_name(wl->hw->wiphy));
- rfk->rfkill->name = rfk->name;
- rfk->rfkill->state = RFKILL_STATE_UNBLOCKED;
- rfk->rfkill->data = dev;
- rfk->rfkill->toggle_radio = b43_rfkill_soft_toggle;
- rfk->rfkill->user_claim_unsupported = 1;
-
- rfk->poll_dev = input_allocate_polled_device();
- if (!rfk->poll_dev) {
- rfkill_free(rfk->rfkill);
- goto err_freed_rfk;
- }
-
- rfk->poll_dev->private = dev;
- rfk->poll_dev->poll = b43_rfkill_poll;
- rfk->poll_dev->poll_interval = 1000; /* msecs */
-
- rfk->poll_dev->input->name = rfk->name;
- rfk->poll_dev->input->id.bustype = BUS_HOST;
- rfk->poll_dev->input->id.vendor = dev->dev->bus->boardinfo.vendor;
- rfk->poll_dev->input->evbit[0] = BIT(EV_KEY);
- set_bit(KEY_WLAN, rfk->poll_dev->input->keybit);
-
- err = rfkill_register(rfk->rfkill);
- if (err)
- goto err_free_polldev;
-
-#ifdef CONFIG_RFKILL_INPUT_MODULE
- /* B43 RF-kill isn't useful without the rfkill-input subsystem.
- * Try to load the module. */
- err = request_module("rfkill-input");
- if (err)
- b43warn(wl, "Failed to load the rfkill-input module. "
- "The built-in radio LED will not work.\n");
-#endif /* CONFIG_RFKILL_INPUT */
-
-#if !defined(CONFIG_RFKILL_INPUT) && !defined(CONFIG_RFKILL_INPUT_MODULE)
- b43warn(wl, "The rfkill-input subsystem is not available. "
- "The built-in radio LED will not work.\n");
-#endif
-
- err = input_register_polled_device(rfk->poll_dev);
- if (err)
- goto err_unreg_rfk;
-
- rfk->registered = 1;
-
- return;
-err_unreg_rfk:
- rfkill_unregister(rfk->rfkill);
-err_free_polldev:
- input_free_polled_device(rfk->poll_dev);
- rfk->poll_dev = NULL;
-err_freed_rfk:
- rfk->rfkill = NULL;
-out_error:
- rfk->registered = 0;
- b43warn(wl, "RF-kill button init failed\n");
-}
-
-void b43_rfkill_exit(struct b43_wldev *dev)
-{
- struct b43_rfkill *rfk = &(dev->wl->rfkill);
-
- if (!rfk->registered)
- return;
- rfk->registered = 0;
-
- input_unregister_polled_device(rfk->poll_dev);
- rfkill_unregister(rfk->rfkill);
- input_free_polled_device(rfk->poll_dev);
- rfk->poll_dev = NULL;
- rfk->rfkill = NULL;
}
diff --git a/drivers/net/wireless/b43/rfkill.h b/drivers/net/wireless/b43/rfkill.h
index adacf936d81..f046c3ca051 100644
--- a/drivers/net/wireless/b43/rfkill.h
+++ b/drivers/net/wireless/b43/rfkill.h
@@ -1,52 +1,11 @@
#ifndef B43_RFKILL_H_
#define B43_RFKILL_H_
+struct ieee80211_hw;
struct b43_wldev;
+void b43_rfkill_poll(struct ieee80211_hw *hw);
-#ifdef CONFIG_B43_RFKILL
-
-#include <linux/rfkill.h>
-#include <linux/input-polldev.h>
-
-
-struct b43_rfkill {
- /* The RFKILL subsystem data structure */
- struct rfkill *rfkill;
- /* The poll device for the RFKILL input button */
- struct input_polled_dev *poll_dev;
- /* Did initialization succeed? Used for freeing. */
- bool registered;
- /* The unique name of this rfkill switch */
- char name[sizeof("b43-phy4294967295")];
-};
-
-/* The init function returns void, because we are not interested
- * in failing the b43 init process when rfkill init failed. */
-void b43_rfkill_init(struct b43_wldev *dev);
-void b43_rfkill_exit(struct b43_wldev *dev);
-
-char * b43_rfkill_led_name(struct b43_wldev *dev);
-
-
-#else /* CONFIG_B43_RFKILL */
-/* No RFKILL support. */
-
-struct b43_rfkill {
- /* empty */
-};
-
-static inline void b43_rfkill_init(struct b43_wldev *dev)
-{
-}
-static inline void b43_rfkill_exit(struct b43_wldev *dev)
-{
-}
-static inline char * b43_rfkill_led_name(struct b43_wldev *dev)
-{
- return NULL;
-}
-
-#endif /* CONFIG_B43_RFKILL */
+bool b43_is_hw_radio_enabled(struct b43_wldev *dev);
#endif /* B43_RFKILL_H_ */
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index a63d88841df..55f36a7254d 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -118,7 +118,6 @@ u8 b43_plcp_get_ratecode_ofdm(const u8 bitrate)
void b43_generate_plcp_hdr(struct b43_plcp_hdr4 *plcp,
const u16 octets, const u8 bitrate)
{
- __le32 *data = &(plcp->data);
__u8 *raw = plcp->raw;
if (b43_is_ofdm_rate(bitrate)) {
@@ -127,7 +126,7 @@ void b43_generate_plcp_hdr(struct b43_plcp_hdr4 *plcp,
d = b43_plcp_get_ratecode_ofdm(bitrate);
B43_WARN_ON(octets & 0xF000);
d |= (octets << 5);
- *data = cpu_to_le32(d);
+ plcp->data = cpu_to_le32(d);
} else {
u32 plen;
@@ -141,7 +140,7 @@ void b43_generate_plcp_hdr(struct b43_plcp_hdr4 *plcp,
raw[1] = 0x04;
} else
raw[1] = 0x04;
- *data |= cpu_to_le32(plen << 16);
+ plcp->data |= cpu_to_le32(plen << 16);
raw[0] = b43_plcp_get_ratecode_cck(bitrate);
}
}
diff --git a/drivers/net/wireless/b43legacy/Kconfig b/drivers/net/wireless/b43legacy/Kconfig
index aef2298d37a..94a46347805 100644
--- a/drivers/net/wireless/b43legacy/Kconfig
+++ b/drivers/net/wireless/b43legacy/Kconfig
@@ -3,7 +3,6 @@ config B43LEGACY
depends on SSB_POSSIBLE && MAC80211 && WLAN_80211 && HAS_DMA
select SSB
select FW_LOADER
- select HW_RANDOM
---help---
b43legacy is a driver for 802.11b devices from Broadcom (BCM4301 and
BCM4303) and early model 802.11g chips (BCM4306 Ver. 2) used in the
@@ -43,12 +42,11 @@ config B43LEGACY_LEDS
depends on B43LEGACY && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = B43LEGACY)
default y
-# RFKILL support
-# This config option automatically enables b43legacy RFKILL support,
-# if it's possible.
-config B43LEGACY_RFKILL
+# This config option automatically enables b43 HW-RNG support,
+# if the HW-RNG core is enabled.
+config B43LEGACY_HWRNG
bool
- depends on B43LEGACY && (RFKILL = y || RFKILL = B43LEGACY) && RFKILL_INPUT && (INPUT_POLLDEV = y || INPUT_POLLDEV = B43LEGACY)
+ depends on B43LEGACY && (HW_RANDOM = y || HW_RANDOM = B43LEGACY)
default y
config B43LEGACY_DEBUG
diff --git a/drivers/net/wireless/b43legacy/Makefile b/drivers/net/wireless/b43legacy/Makefile
index 80cdb73bd14..227a77e8436 100644
--- a/drivers/net/wireless/b43legacy/Makefile
+++ b/drivers/net/wireless/b43legacy/Makefile
@@ -6,7 +6,7 @@ b43legacy-y += radio.o
b43legacy-y += sysfs.o
b43legacy-y += xmit.o
# b43 RFKILL button support
-b43legacy-$(CONFIG_B43LEGACY_RFKILL) += rfkill.o
+b43legacy-y += rfkill.o
# b43legacy LED support
b43legacy-$(CONFIG_B43LEGACY_LEDS) += leds.o
# b43legacy debugging
diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h
index 97b0e06dfe2..77fda148ac4 100644
--- a/drivers/net/wireless/b43legacy/b43legacy.h
+++ b/drivers/net/wireless/b43legacy/b43legacy.h
@@ -59,7 +59,8 @@
#define B43legacy_MMIO_XMITSTAT_1 0x174
#define B43legacy_MMIO_REV3PLUS_TSF_LOW 0x180 /* core rev >= 3 only */
#define B43legacy_MMIO_REV3PLUS_TSF_HIGH 0x184 /* core rev >= 3 only */
-
+#define B43legacy_MMIO_TSF_CFP_REP 0x188
+#define B43legacy_MMIO_TSF_CFP_START 0x18C
/* 32-bit DMA */
#define B43legacy_MMIO_DMA32_BASE0 0x200
#define B43legacy_MMIO_DMA32_BASE1 0x220
@@ -258,7 +259,6 @@
#define B43legacy_IRQ_ALL 0xFFFFFFFF
#define B43legacy_IRQ_MASKTEMPLATE (B43legacy_IRQ_MAC_SUSPENDED | \
- B43legacy_IRQ_BEACON | \
B43legacy_IRQ_TBTT_INDI | \
B43legacy_IRQ_ATIM_END | \
B43legacy_IRQ_PMQ | \
@@ -596,12 +596,11 @@ struct b43legacy_wl {
/* Stats about the wireless interface */
struct ieee80211_low_level_stats ieee_stats;
+#ifdef CONFIG_B43LEGACY_HWRNG
struct hwrng rng;
u8 rng_initialized;
char rng_name[30 + 1];
-
- /* The RF-kill button */
- struct b43legacy_rfkill rfkill;
+#endif
/* List of all wireless devices on this chip */
struct list_head devlist;
@@ -614,6 +613,8 @@ struct b43legacy_wl {
struct sk_buff *current_beacon;
bool beacon0_uploaded;
bool beacon1_uploaded;
+ bool beacon_templates_virgin; /* Never wrote the templates? */
+ struct work_struct beacon_update_trigger;
};
/* Pointers to the firmware data and meta information about it. */
@@ -690,8 +691,8 @@ struct b43legacy_wldev {
/* Reason code of the last interrupt. */
u32 irq_reason;
u32 dma_reason[6];
- /* saved irq enable/disable state bitfield. */
- u32 irq_savedstate;
+ /* The currently active generic-interrupt mask. */
+ u32 irq_mask;
/* Link Quality calculation context. */
struct b43legacy_noise_calculation noisecalc;
/* if > 0 MAC is suspended. if == 0 MAC is enabled. */
diff --git a/drivers/net/wireless/b43legacy/leds.c b/drivers/net/wireless/b43legacy/leds.c
index 3ea55b18c70..37e9be89356 100644
--- a/drivers/net/wireless/b43legacy/leds.c
+++ b/drivers/net/wireless/b43legacy/leds.c
@@ -28,6 +28,7 @@
#include "b43legacy.h"
#include "leds.h"
+#include "rfkill.h"
static void b43legacy_led_turn_on(struct b43legacy_wldev *dev, u8 led_index,
@@ -86,7 +87,8 @@ static void b43legacy_led_brightness_set(struct led_classdev *led_dev,
static int b43legacy_register_led(struct b43legacy_wldev *dev,
struct b43legacy_led *led,
- const char *name, char *default_trigger,
+ const char *name,
+ const char *default_trigger,
u8 led_index, bool activelow)
{
int err;
@@ -163,10 +165,10 @@ static void b43legacy_map_led(struct b43legacy_wldev *dev,
snprintf(name, sizeof(name),
"b43legacy-%s::radio", wiphy_name(hw->wiphy));
b43legacy_register_led(dev, &dev->led_radio, name,
- b43legacy_rfkill_led_name(dev),
+ ieee80211_get_radio_led_name(hw),
led_index, activelow);
- /* Sync the RF-kill LED state with the switch state. */
- if (dev->radio_hw_enable)
+ /* Sync the RF-kill LED state with radio and switch states. */
+ if (dev->phy.radio_on && b43legacy_is_hw_radio_enabled(dev))
b43legacy_led_turn_on(dev, led_index, activelow);
break;
case B43legacy_LED_WEIRD:
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 879edc78671..e5136fb65dd 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -583,35 +583,6 @@ static void b43legacy_short_slot_timing_disable(struct b43legacy_wldev *dev)
b43legacy_set_slot_time(dev, 20);
}
-/* Enable a Generic IRQ. "mask" is the mask of which IRQs to enable.
- * Returns the _previously_ enabled IRQ mask.
- */
-static inline u32 b43legacy_interrupt_enable(struct b43legacy_wldev *dev,
- u32 mask)
-{
- u32 old_mask;
-
- old_mask = b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_MASK);
- b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, old_mask |
- mask);
-
- return old_mask;
-}
-
-/* Disable a Generic IRQ. "mask" is the mask of which IRQs to disable.
- * Returns the _previously_ enabled IRQ mask.
- */
-static inline u32 b43legacy_interrupt_disable(struct b43legacy_wldev *dev,
- u32 mask)
-{
- u32 old_mask;
-
- old_mask = b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_MASK);
- b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, old_mask & ~mask);
-
- return old_mask;
-}
-
/* Synchronize IRQ top- and bottom-half.
* IRQs must be masked before calling this.
* This must not be called with the irq_lock held.
@@ -955,23 +926,54 @@ static void b43legacy_write_template_common(struct b43legacy_wldev *dev,
size + sizeof(struct b43legacy_plcp_hdr6));
}
+/* Convert a b43legacy antenna number value to the PHY TX control value. */
+static u16 b43legacy_antenna_to_phyctl(int antenna)
+{
+ switch (antenna) {
+ case B43legacy_ANTENNA0:
+ return B43legacy_TX4_PHY_ANT0;
+ case B43legacy_ANTENNA1:
+ return B43legacy_TX4_PHY_ANT1;
+ }
+ return B43legacy_TX4_PHY_ANTLAST;
+}
+
static void b43legacy_write_beacon_template(struct b43legacy_wldev *dev,
u16 ram_offset,
- u16 shm_size_offset, u8 rate)
+ u16 shm_size_offset)
{
unsigned int i, len, variable_len;
const struct ieee80211_mgmt *bcn;
const u8 *ie;
bool tim_found = 0;
+ unsigned int rate;
+ u16 ctl;
+ int antenna;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(dev->wl->current_beacon);
bcn = (const struct ieee80211_mgmt *)(dev->wl->current_beacon->data);
len = min((size_t)dev->wl->current_beacon->len,
0x200 - sizeof(struct b43legacy_plcp_hdr6));
+ rate = ieee80211_get_tx_rate(dev->wl->hw, info)->hw_value;
b43legacy_write_template_common(dev, (const u8 *)bcn, len, ram_offset,
shm_size_offset, rate);
+ /* Write the PHY TX control parameters. */
+ antenna = B43legacy_ANTENNA_DEFAULT;
+ antenna = b43legacy_antenna_to_phyctl(antenna);
+ ctl = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED,
+ B43legacy_SHM_SH_BEACPHYCTL);
+ /* We can't send beacons with short preamble. Would get PHY errors. */
+ ctl &= ~B43legacy_TX4_PHY_SHORTPRMBL;
+ ctl &= ~B43legacy_TX4_PHY_ANT;
+ ctl &= ~B43legacy_TX4_PHY_ENC;
+ ctl |= antenna;
+ ctl |= B43legacy_TX4_PHY_ENC_CCK;
+ b43legacy_shm_write16(dev, B43legacy_SHM_SHARED,
+ B43legacy_SHM_SH_BEACPHYCTL, ctl);
+
/* Find the position of the TIM and the DTIM_period value
* and write them to SHM. */
ie = bcn->u.beacon.variable;
@@ -1013,7 +1015,8 @@ static void b43legacy_write_beacon_template(struct b43legacy_wldev *dev,
b43legacywarn(dev->wl, "Did not find a valid TIM IE in the "
"beacon template packet. AP or IBSS operation "
"may be broken.\n");
- }
+ } else
+ b43legacydbg(dev->wl, "Updated beacon template\n");
}
static void b43legacy_write_probe_resp_plcp(struct b43legacy_wldev *dev,
@@ -1025,7 +1028,7 @@ static void b43legacy_write_probe_resp_plcp(struct b43legacy_wldev *dev,
__le16 dur;
plcp.data = 0;
- b43legacy_generate_plcp_hdr(&plcp, size + FCS_LEN, rate->bitrate);
+ b43legacy_generate_plcp_hdr(&plcp, size + FCS_LEN, rate->hw_value);
dur = ieee80211_generic_frame_duration(dev->wl->hw,
dev->wl->vif,
size,
@@ -1129,10 +1132,103 @@ static void b43legacy_write_probe_resp_template(struct b43legacy_wldev *dev,
0x200 - sizeof(struct b43legacy_plcp_hdr6));
b43legacy_write_template_common(dev, probe_resp_data,
size, ram_offset,
- shm_size_offset, rate->bitrate);
+ shm_size_offset, rate->hw_value);
kfree(probe_resp_data);
}
+static void b43legacy_upload_beacon0(struct b43legacy_wldev *dev)
+{
+ struct b43legacy_wl *wl = dev->wl;
+
+ if (wl->beacon0_uploaded)
+ return;
+ b43legacy_write_beacon_template(dev, 0x68, 0x18);
+ /* FIXME: Probe resp upload doesn't really belong here,
+ * but we don't use that feature anyway. */
+ b43legacy_write_probe_resp_template(dev, 0x268, 0x4A,
+ &__b43legacy_ratetable[3]);
+ wl->beacon0_uploaded = 1;
+}
+
+static void b43legacy_upload_beacon1(struct b43legacy_wldev *dev)
+{
+ struct b43legacy_wl *wl = dev->wl;
+
+ if (wl->beacon1_uploaded)
+ return;
+ b43legacy_write_beacon_template(dev, 0x468, 0x1A);
+ wl->beacon1_uploaded = 1;
+}
+
+static void handle_irq_beacon(struct b43legacy_wldev *dev)
+{
+ struct b43legacy_wl *wl = dev->wl;
+ u32 cmd, beacon0_valid, beacon1_valid;
+
+ if (!b43legacy_is_mode(wl, NL80211_IFTYPE_AP))
+ return;
+
+ /* This is the bottom half of the asynchronous beacon update. */
+
+ /* Ignore interrupt in the future. */
+ dev->irq_mask &= ~B43legacy_IRQ_BEACON;
+
+ cmd = b43legacy_read32(dev, B43legacy_MMIO_MACCMD);
+ beacon0_valid = (cmd & B43legacy_MACCMD_BEACON0_VALID);
+ beacon1_valid = (cmd & B43legacy_MACCMD_BEACON1_VALID);
+
+ /* Schedule interrupt manually, if busy. */
+ if (beacon0_valid && beacon1_valid) {
+ b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_REASON, B43legacy_IRQ_BEACON);
+ dev->irq_mask |= B43legacy_IRQ_BEACON;
+ return;
+ }
+
+ if (unlikely(wl->beacon_templates_virgin)) {
+ /* We never uploaded a beacon before.
+ * Upload both templates now, but only mark one valid. */
+ wl->beacon_templates_virgin = 0;
+ b43legacy_upload_beacon0(dev);
+ b43legacy_upload_beacon1(dev);
+ cmd = b43legacy_read32(dev, B43legacy_MMIO_MACCMD);
+ cmd |= B43legacy_MACCMD_BEACON0_VALID;
+ b43legacy_write32(dev, B43legacy_MMIO_MACCMD, cmd);
+ } else {
+ if (!beacon0_valid) {
+ b43legacy_upload_beacon0(dev);
+ cmd = b43legacy_read32(dev, B43legacy_MMIO_MACCMD);
+ cmd |= B43legacy_MACCMD_BEACON0_VALID;
+ b43legacy_write32(dev, B43legacy_MMIO_MACCMD, cmd);
+ } else if (!beacon1_valid) {
+ b43legacy_upload_beacon1(dev);
+ cmd = b43legacy_read32(dev, B43legacy_MMIO_MACCMD);
+ cmd |= B43legacy_MACCMD_BEACON1_VALID;
+ b43legacy_write32(dev, B43legacy_MMIO_MACCMD, cmd);
+ }
+ }
+}
+
+static void b43legacy_beacon_update_trigger_work(struct work_struct *work)
+{
+ struct b43legacy_wl *wl = container_of(work, struct b43legacy_wl,
+ beacon_update_trigger);
+ struct b43legacy_wldev *dev;
+
+ mutex_lock(&wl->mutex);
+ dev = wl->current_dev;
+ if (likely(dev && (b43legacy_status(dev) >= B43legacy_STAT_INITIALIZED))) {
+ spin_lock_irq(&wl->irq_lock);
+ /* Update beacon right away or defer to IRQ. */
+ handle_irq_beacon(dev);
+ /* The handler might have updated the IRQ mask. */
+ b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK,
+ dev->irq_mask);
+ mmiowb();
+ spin_unlock_irq(&wl->irq_lock);
+ }
+ mutex_unlock(&wl->mutex);
+}
+
/* Asynchronously update the packet templates in template RAM.
* Locking: Requires wl->irq_lock to be locked. */
static void b43legacy_update_templates(struct b43legacy_wl *wl)
@@ -1156,54 +1252,24 @@ static void b43legacy_update_templates(struct b43legacy_wl *wl)
wl->current_beacon = beacon;
wl->beacon0_uploaded = 0;
wl->beacon1_uploaded = 0;
+ queue_work(wl->hw->workqueue, &wl->beacon_update_trigger);
}
static void b43legacy_set_beacon_int(struct b43legacy_wldev *dev,
u16 beacon_int)
{
b43legacy_time_lock(dev);
- if (dev->dev->id.revision >= 3)
- b43legacy_write32(dev, 0x188, (beacon_int << 16));
- else {
+ if (dev->dev->id.revision >= 3) {
+ b43legacy_write32(dev, B43legacy_MMIO_TSF_CFP_REP,
+ (beacon_int << 16));
+ b43legacy_write32(dev, B43legacy_MMIO_TSF_CFP_START,
+ (beacon_int << 10));
+ } else {
b43legacy_write16(dev, 0x606, (beacon_int >> 6));
b43legacy_write16(dev, 0x610, beacon_int);
}
b43legacy_time_unlock(dev);
-}
-
-static void handle_irq_beacon(struct b43legacy_wldev *dev)
-{
- struct b43legacy_wl *wl = dev->wl;
- u32 cmd;
-
- if (!b43legacy_is_mode(wl, NL80211_IFTYPE_AP))
- return;
-
- /* This is the bottom half of the asynchronous beacon update. */
-
- cmd = b43legacy_read32(dev, B43legacy_MMIO_MACCMD);
- if (!(cmd & B43legacy_MACCMD_BEACON0_VALID)) {
- if (!wl->beacon0_uploaded) {
- b43legacy_write_beacon_template(dev, 0x68,
- B43legacy_SHM_SH_BTL0,
- B43legacy_CCK_RATE_1MB);
- b43legacy_write_probe_resp_template(dev, 0x268,
- B43legacy_SHM_SH_PRTLEN,
- &__b43legacy_ratetable[3]);
- wl->beacon0_uploaded = 1;
- }
- cmd |= B43legacy_MACCMD_BEACON0_VALID;
- }
- if (!(cmd & B43legacy_MACCMD_BEACON1_VALID)) {
- if (!wl->beacon1_uploaded) {
- b43legacy_write_beacon_template(dev, 0x468,
- B43legacy_SHM_SH_BTL1,
- B43legacy_CCK_RATE_1MB);
- wl->beacon1_uploaded = 1;
- }
- cmd |= B43legacy_MACCMD_BEACON1_VALID;
- }
- b43legacy_write32(dev, B43legacy_MMIO_MACCMD, cmd);
+ b43legacydbg(dev->wl, "Set beacon interval to %u\n", beacon_int);
}
static void handle_irq_ucode_debug(struct b43legacy_wldev *dev)
@@ -1302,7 +1368,7 @@ static void b43legacy_interrupt_tasklet(struct b43legacy_wldev *dev)
if (reason & B43legacy_IRQ_TX_OK)
handle_irq_transmit_status(dev);
- b43legacy_interrupt_enable(dev, dev->irq_savedstate);
+ b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, dev->irq_mask);
mmiowb();
spin_unlock_irqrestore(&dev->wl->irq_lock, flags);
}
@@ -1354,18 +1420,18 @@ static irqreturn_t b43legacy_interrupt_handler(int irq, void *dev_id)
struct b43legacy_wldev *dev = dev_id;
u32 reason;
- if (!dev)
- return IRQ_NONE;
+ B43legacy_WARN_ON(!dev);
spin_lock(&dev->wl->irq_lock);
- if (b43legacy_status(dev) < B43legacy_STAT_STARTED)
+ if (unlikely(b43legacy_status(dev) < B43legacy_STAT_STARTED))
+ /* This can only happen on shared IRQ lines. */
goto out;
reason = b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_REASON);
if (reason == 0xffffffff) /* shared IRQ */
goto out;
ret = IRQ_HANDLED;
- reason &= b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_MASK);
+ reason &= dev->irq_mask;
if (!reason)
goto out;
@@ -1389,10 +1455,9 @@ static irqreturn_t b43legacy_interrupt_handler(int irq, void *dev_id)
& 0x0000DC00;
b43legacy_interrupt_ack(dev, reason);
- /* disable all IRQs. They are enabled again in the bottom half. */
- dev->irq_savedstate = b43legacy_interrupt_disable(dev,
- B43legacy_IRQ_ALL);
- /* save the reason code and call our bottom half. */
+ /* Disable all IRQs. They are enabled again in the bottom half. */
+ b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, 0);
+ /* Save the reason code and call our bottom half. */
dev->irq_reason = reason;
tasklet_schedule(&dev->isr_tasklet);
out:
@@ -1852,7 +1917,8 @@ void b43legacy_mac_enable(struct b43legacy_wldev *dev)
/* Re-enable IRQs. */
spin_lock_irq(&dev->wl->irq_lock);
- b43legacy_interrupt_enable(dev, dev->irq_savedstate);
+ b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK,
+ dev->irq_mask);
spin_unlock_irq(&dev->wl->irq_lock);
}
}
@@ -1871,10 +1937,9 @@ void b43legacy_mac_suspend(struct b43legacy_wldev *dev)
/* Mask IRQs before suspending MAC. Otherwise
* the MAC stays busy and won't suspend. */
spin_lock_irq(&dev->wl->irq_lock);
- tmp = b43legacy_interrupt_disable(dev, B43legacy_IRQ_ALL);
+ b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, 0);
spin_unlock_irq(&dev->wl->irq_lock);
b43legacy_synchronize_irq(dev);
- dev->irq_savedstate = tmp;
b43legacy_power_saving_ctl_bits(dev, -1, 1);
b43legacy_write32(dev, B43legacy_MMIO_MACCTL,
@@ -2297,6 +2362,7 @@ static void b43legacy_security_init(struct b43legacy_wldev *dev)
dev->max_nr_keys - 8);
}
+#ifdef CONFIG_B43LEGACY_HWRNG
static int b43legacy_rng_read(struct hwrng *rng, u32 *data)
{
struct b43legacy_wl *wl = (struct b43legacy_wl *)rng->priv;
@@ -2312,17 +2378,21 @@ static int b43legacy_rng_read(struct hwrng *rng, u32 *data)
return (sizeof(u16));
}
+#endif
static void b43legacy_rng_exit(struct b43legacy_wl *wl)
{
+#ifdef CONFIG_B43LEGACY_HWRNG
if (wl->rng_initialized)
hwrng_unregister(&wl->rng);
+#endif
}
static int b43legacy_rng_init(struct b43legacy_wl *wl)
{
- int err;
+ int err = 0;
+#ifdef CONFIG_B43LEGACY_HWRNG
snprintf(wl->rng_name, ARRAY_SIZE(wl->rng_name),
"%s_%s", KBUILD_MODNAME, wiphy_name(wl->hw->wiphy));
wl->rng.name = wl->rng_name;
@@ -2336,6 +2406,7 @@ static int b43legacy_rng_init(struct b43legacy_wl *wl)
"number generator (%d)\n", err);
}
+#endif
return err;
}
@@ -2557,7 +2628,6 @@ static int b43legacy_op_dev_config(struct ieee80211_hw *hw,
int antenna_tx;
int antenna_rx;
int err = 0;
- u32 savedirqs;
antenna_tx = B43legacy_ANTENNA_DEFAULT;
antenna_rx = B43legacy_ANTENNA_DEFAULT;
@@ -2597,7 +2667,7 @@ static int b43legacy_op_dev_config(struct ieee80211_hw *hw,
spin_unlock_irqrestore(&wl->irq_lock, flags);
goto out_unlock_mutex;
}
- savedirqs = b43legacy_interrupt_disable(dev, B43legacy_IRQ_ALL);
+ b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, 0);
spin_unlock_irqrestore(&wl->irq_lock, flags);
b43legacy_synchronize_irq(dev);
@@ -2619,11 +2689,6 @@ static int b43legacy_op_dev_config(struct ieee80211_hw *hw,
/* Antennas for RX and management frame TX. */
b43legacy_mgmtframe_txantenna(dev, antenna_tx);
- /* Update templates for AP mode. */
- if (b43legacy_is_mode(wl, NL80211_IFTYPE_AP))
- b43legacy_set_beacon_int(dev, conf->beacon_int);
-
-
if (!!conf->radio_enabled != phy->radio_on) {
if (conf->radio_enabled) {
b43legacy_radio_turn_on(dev);
@@ -2641,7 +2706,7 @@ static int b43legacy_op_dev_config(struct ieee80211_hw *hw,
}
spin_lock_irqsave(&wl->irq_lock, flags);
- b43legacy_interrupt_enable(dev, savedirqs);
+ b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, dev->irq_mask);
mmiowb();
spin_unlock_irqrestore(&wl->irq_lock, flags);
out_unlock_mutex:
@@ -2704,9 +2769,9 @@ static void b43legacy_op_bss_info_changed(struct ieee80211_hw *hw,
struct b43legacy_wldev *dev;
struct b43legacy_phy *phy;
unsigned long flags;
- u32 savedirqs;
mutex_lock(&wl->mutex);
+ B43legacy_WARN_ON(wl->vif != vif);
dev = wl->current_dev;
phy = &dev->phy;
@@ -2719,12 +2784,35 @@ static void b43legacy_op_bss_info_changed(struct ieee80211_hw *hw,
spin_unlock_irqrestore(&wl->irq_lock, flags);
goto out_unlock_mutex;
}
- savedirqs = b43legacy_interrupt_disable(dev, B43legacy_IRQ_ALL);
+ b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, 0);
+
+ if (changed & BSS_CHANGED_BSSID) {
+ b43legacy_synchronize_irq(dev);
+
+ if (conf->bssid)
+ memcpy(wl->bssid, conf->bssid, ETH_ALEN);
+ else
+ memset(wl->bssid, 0, ETH_ALEN);
+ }
+
+ if (b43legacy_status(dev) >= B43legacy_STAT_INITIALIZED) {
+ if (changed & BSS_CHANGED_BEACON &&
+ (b43legacy_is_mode(wl, NL80211_IFTYPE_AP) ||
+ b43legacy_is_mode(wl, NL80211_IFTYPE_ADHOC)))
+ b43legacy_update_templates(wl);
+
+ if (changed & BSS_CHANGED_BSSID)
+ b43legacy_write_mac_bssid_templates(dev);
+ }
spin_unlock_irqrestore(&wl->irq_lock, flags);
- b43legacy_synchronize_irq(dev);
b43legacy_mac_suspend(dev);
+ if (changed & BSS_CHANGED_BEACON_INT &&
+ (b43legacy_is_mode(wl, NL80211_IFTYPE_AP) ||
+ b43legacy_is_mode(wl, NL80211_IFTYPE_ADHOC)))
+ b43legacy_set_beacon_int(dev, conf->beacon_int);
+
if (changed & BSS_CHANGED_BASIC_RATES)
b43legacy_update_basic_rates(dev, conf->basic_rates);
@@ -2738,14 +2826,12 @@ static void b43legacy_op_bss_info_changed(struct ieee80211_hw *hw,
b43legacy_mac_enable(dev);
spin_lock_irqsave(&wl->irq_lock, flags);
- b43legacy_interrupt_enable(dev, savedirqs);
+ b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, dev->irq_mask);
/* XXX: why? */
mmiowb();
spin_unlock_irqrestore(&wl->irq_lock, flags);
out_unlock_mutex:
mutex_unlock(&wl->mutex);
-
- return;
}
static void b43legacy_op_configure_filter(struct ieee80211_hw *hw,
@@ -2787,40 +2873,6 @@ static void b43legacy_op_configure_filter(struct ieee80211_hw *hw,
spin_unlock_irqrestore(&wl->irq_lock, flags);
}
-static int b43legacy_op_config_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_if_conf *conf)
-{
- struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
- struct b43legacy_wldev *dev = wl->current_dev;
- unsigned long flags;
-
- if (!dev)
- return -ENODEV;
- mutex_lock(&wl->mutex);
- spin_lock_irqsave(&wl->irq_lock, flags);
- B43legacy_WARN_ON(wl->vif != vif);
- if (conf->bssid)
- memcpy(wl->bssid, conf->bssid, ETH_ALEN);
- else
- memset(wl->bssid, 0, ETH_ALEN);
- if (b43legacy_status(dev) >= B43legacy_STAT_INITIALIZED) {
- if (b43legacy_is_mode(wl, NL80211_IFTYPE_AP)) {
- B43legacy_WARN_ON(vif->type != NL80211_IFTYPE_AP);
- if (conf->changed & IEEE80211_IFCC_BEACON)
- b43legacy_update_templates(wl);
- } else if (b43legacy_is_mode(wl, NL80211_IFTYPE_ADHOC)) {
- if (conf->changed & IEEE80211_IFCC_BEACON)
- b43legacy_update_templates(wl);
- }
- b43legacy_write_mac_bssid_templates(dev);
- }
- spin_unlock_irqrestore(&wl->irq_lock, flags);
- mutex_unlock(&wl->mutex);
-
- return 0;
-}
-
/* Locking: wl->mutex */
static void b43legacy_wireless_core_stop(struct b43legacy_wldev *dev)
{
@@ -2834,8 +2886,7 @@ static void b43legacy_wireless_core_stop(struct b43legacy_wldev *dev)
* setting the status to INITIALIZED, as the interrupt handler
* won't care about IRQs then. */
spin_lock_irqsave(&wl->irq_lock, flags);
- dev->irq_savedstate = b43legacy_interrupt_disable(dev,
- B43legacy_IRQ_ALL);
+ b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, 0);
b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_MASK); /* flush */
spin_unlock_irqrestore(&wl->irq_lock, flags);
b43legacy_synchronize_irq(dev);
@@ -2875,7 +2926,7 @@ static int b43legacy_wireless_core_start(struct b43legacy_wldev *dev)
/* Start data flow (TX/RX) */
b43legacy_mac_enable(dev);
- b43legacy_interrupt_enable(dev, dev->irq_savedstate);
+ b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, dev->irq_mask);
/* Start maintenance work */
b43legacy_periodic_tasks_setup(dev);
@@ -3038,7 +3089,7 @@ static void setup_struct_wldev_for_init(struct b43legacy_wldev *dev)
/* IRQ related flags */
dev->irq_reason = 0;
memset(dev->dma_reason, 0, sizeof(dev->dma_reason));
- dev->irq_savedstate = B43legacy_IRQ_MASKTEMPLATE;
+ dev->irq_mask = B43legacy_IRQ_MASKTEMPLATE;
dev->mac_suspended = 1;
@@ -3380,11 +3431,6 @@ static int b43legacy_op_start(struct ieee80211_hw *hw)
struct b43legacy_wldev *dev = wl->current_dev;
int did_init = 0;
int err = 0;
- bool do_rfkill_exit = 0;
-
- /* First register RFkill.
- * LEDs that are registered later depend on it. */
- b43legacy_rfkill_init(dev);
/* Kill all old instance specific information to make sure
* the card won't use it in the short timeframe between start
@@ -3392,15 +3438,16 @@ static int b43legacy_op_start(struct ieee80211_hw *hw)
memset(wl->bssid, 0, ETH_ALEN);
memset(wl->mac_addr, 0, ETH_ALEN);
wl->filter_flags = 0;
+ wl->beacon0_uploaded = 0;
+ wl->beacon1_uploaded = 0;
+ wl->beacon_templates_virgin = 1;
mutex_lock(&wl->mutex);
if (b43legacy_status(dev) < B43legacy_STAT_INITIALIZED) {
err = b43legacy_wireless_core_init(dev);
- if (err) {
- do_rfkill_exit = 1;
+ if (err)
goto out_mutex_unlock;
- }
did_init = 1;
}
@@ -3409,17 +3456,15 @@ static int b43legacy_op_start(struct ieee80211_hw *hw)
if (err) {
if (did_init)
b43legacy_wireless_core_exit(dev);
- do_rfkill_exit = 1;
goto out_mutex_unlock;
}
}
+ wiphy_rfkill_start_polling(hw->wiphy);
+
out_mutex_unlock:
mutex_unlock(&wl->mutex);
- if (do_rfkill_exit)
- b43legacy_rfkill_exit(dev);
-
return err;
}
@@ -3428,7 +3473,7 @@ static void b43legacy_op_stop(struct ieee80211_hw *hw)
struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
struct b43legacy_wldev *dev = wl->current_dev;
- b43legacy_rfkill_exit(dev);
+ cancel_work_sync(&(wl->beacon_update_trigger));
mutex_lock(&wl->mutex);
if (b43legacy_status(dev) >= B43legacy_STAT_STARTED)
@@ -3457,13 +3502,13 @@ static const struct ieee80211_ops b43legacy_hw_ops = {
.remove_interface = b43legacy_op_remove_interface,
.config = b43legacy_op_dev_config,
.bss_info_changed = b43legacy_op_bss_info_changed,
- .config_interface = b43legacy_op_config_interface,
.configure_filter = b43legacy_op_configure_filter,
.get_stats = b43legacy_op_get_stats,
.get_tx_stats = b43legacy_op_get_tx_stats,
.start = b43legacy_op_start,
.stop = b43legacy_op_stop,
.set_tim = b43legacy_op_beacon_set_tim,
+ .rfkill_poll = b43legacy_rfkill_poll,
};
/* Hard-reset the chip. Do not call this directly.
@@ -3760,6 +3805,7 @@ static int b43legacy_wireless_init(struct ssb_device *dev)
spin_lock_init(&wl->leds_lock);
mutex_init(&wl->mutex);
INIT_LIST_HEAD(&wl->devlist);
+ INIT_WORK(&wl->beacon_update_trigger, b43legacy_beacon_update_trigger_work);
ssb_set_devtypedata(dev, wl);
b43legacyinfo(wl, "Broadcom %04X WLAN found\n", dev->bus->chip_id);
diff --git a/drivers/net/wireless/b43legacy/pio.c b/drivers/net/wireless/b43legacy/pio.c
index 746d5361bba..51866c9a276 100644
--- a/drivers/net/wireless/b43legacy/pio.c
+++ b/drivers/net/wireless/b43legacy/pio.c
@@ -443,7 +443,7 @@ int b43legacy_pio_init(struct b43legacy_wldev *dev)
pio->queue3 = queue;
if (dev->dev->id.revision < 3)
- dev->irq_savedstate |= B43legacy_IRQ_PIO_WORKAROUND;
+ dev->irq_mask |= B43legacy_IRQ_PIO_WORKAROUND;
b43legacydbg(dev->wl, "PIO initialized\n");
err = 0;
diff --git a/drivers/net/wireless/b43legacy/rfkill.c b/drivers/net/wireless/b43legacy/rfkill.c
index b32bf6a94f1..8783022db11 100644
--- a/drivers/net/wireless/b43legacy/rfkill.c
+++ b/drivers/net/wireless/b43legacy/rfkill.c
@@ -22,15 +22,12 @@
*/
-#include "rfkill.h"
#include "radio.h"
#include "b43legacy.h"
-#include <linux/kmod.h>
-
/* Returns TRUE, if the radio is enabled in hardware. */
-static bool b43legacy_is_hw_radio_enabled(struct b43legacy_wldev *dev)
+bool b43legacy_is_hw_radio_enabled(struct b43legacy_wldev *dev)
{
if (dev->phy.rev >= 3) {
if (!(b43legacy_read32(dev, B43legacy_MMIO_RADIO_HWENABLED_HI)
@@ -45,165 +42,43 @@ static bool b43legacy_is_hw_radio_enabled(struct b43legacy_wldev *dev)
}
/* The poll callback for the hardware button. */
-static void b43legacy_rfkill_poll(struct input_polled_dev *poll_dev)
+void b43legacy_rfkill_poll(struct ieee80211_hw *hw)
{
- struct b43legacy_wldev *dev = poll_dev->private;
- struct b43legacy_wl *wl = dev->wl;
+ struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
+ struct b43legacy_wldev *dev = wl->current_dev;
+ struct ssb_bus *bus = dev->dev->bus;
bool enabled;
- bool report_change = 0;
+ bool brought_up = false;
mutex_lock(&wl->mutex);
if (unlikely(b43legacy_status(dev) < B43legacy_STAT_INITIALIZED)) {
- mutex_unlock(&wl->mutex);
- return;
+ if (ssb_bus_powerup(bus, 0)) {
+ mutex_unlock(&wl->mutex);
+ return;
+ }
+ ssb_device_enable(dev->dev, 0);
+ brought_up = true;
}
+
enabled = b43legacy_is_hw_radio_enabled(dev);
+
if (unlikely(enabled != dev->radio_hw_enable)) {
dev->radio_hw_enable = enabled;
- report_change = 1;
b43legacyinfo(wl, "Radio hardware status changed to %s\n",
enabled ? "ENABLED" : "DISABLED");
- }
- mutex_unlock(&wl->mutex);
-
- /* send the radio switch event to the system - note both a key press
- * and a release are required */
- if (unlikely(report_change)) {
- input_report_key(poll_dev->input, KEY_WLAN, 1);
- input_report_key(poll_dev->input, KEY_WLAN, 0);
- }
-}
-
-/* Called when the RFKILL toggled in software.
- * This is called without locking. */
-static int b43legacy_rfkill_soft_toggle(void *data, enum rfkill_state state)
-{
- struct b43legacy_wldev *dev = data;
- struct b43legacy_wl *wl = dev->wl;
- int err = -EBUSY;
-
- if (!wl->rfkill.registered)
- return 0;
-
- mutex_lock(&wl->mutex);
- if (b43legacy_status(dev) < B43legacy_STAT_INITIALIZED)
- goto out_unlock;
- err = 0;
- switch (state) {
- case RFKILL_STATE_UNBLOCKED:
- if (!dev->radio_hw_enable) {
- /* No luck. We can't toggle the hardware RF-kill
- * button from software. */
- err = -EBUSY;
- goto out_unlock;
+ wiphy_rfkill_set_hw_state(hw->wiphy, !enabled);
+ if (enabled != dev->phy.radio_on) {
+ if (enabled)
+ b43legacy_radio_turn_on(dev);
+ else
+ b43legacy_radio_turn_off(dev, 0);
}
- if (!dev->phy.radio_on)
- b43legacy_radio_turn_on(dev);
- break;
- case RFKILL_STATE_SOFT_BLOCKED:
- if (dev->phy.radio_on)
- b43legacy_radio_turn_off(dev, 0);
- break;
- default:
- b43legacywarn(wl, "Received unexpected rfkill state %d.\n",
- state);
- break;
}
-out_unlock:
- mutex_unlock(&wl->mutex);
-
- return err;
-}
-
-char *b43legacy_rfkill_led_name(struct b43legacy_wldev *dev)
-{
- struct b43legacy_rfkill *rfk = &(dev->wl->rfkill);
-
- if (!rfk->registered)
- return NULL;
- return rfkill_get_led_name(rfk->rfkill);
-}
-
-void b43legacy_rfkill_init(struct b43legacy_wldev *dev)
-{
- struct b43legacy_wl *wl = dev->wl;
- struct b43legacy_rfkill *rfk = &(wl->rfkill);
- int err;
-
- rfk->registered = 0;
-
- rfk->rfkill = rfkill_allocate(dev->dev->dev, RFKILL_TYPE_WLAN);
- if (!rfk->rfkill)
- goto out_error;
- snprintf(rfk->name, sizeof(rfk->name),
- "b43legacy-%s", wiphy_name(wl->hw->wiphy));
- rfk->rfkill->name = rfk->name;
- rfk->rfkill->state = RFKILL_STATE_UNBLOCKED;
- rfk->rfkill->data = dev;
- rfk->rfkill->toggle_radio = b43legacy_rfkill_soft_toggle;
- rfk->rfkill->user_claim_unsupported = 1;
-
- rfk->poll_dev = input_allocate_polled_device();
- if (!rfk->poll_dev) {
- rfkill_free(rfk->rfkill);
- goto err_freed_rfk;
+ if (brought_up) {
+ ssb_device_disable(dev->dev, 0);
+ ssb_bus_may_powerdown(bus);
}
- rfk->poll_dev->private = dev;
- rfk->poll_dev->poll = b43legacy_rfkill_poll;
- rfk->poll_dev->poll_interval = 1000; /* msecs */
-
- rfk->poll_dev->input->name = rfk->name;
- rfk->poll_dev->input->id.bustype = BUS_HOST;
- rfk->poll_dev->input->id.vendor = dev->dev->bus->boardinfo.vendor;
- rfk->poll_dev->input->evbit[0] = BIT(EV_KEY);
- set_bit(KEY_WLAN, rfk->poll_dev->input->keybit);
-
- err = rfkill_register(rfk->rfkill);
- if (err)
- goto err_free_polldev;
-
-#ifdef CONFIG_RFKILL_INPUT_MODULE
- /* B43legacy RF-kill isn't useful without the rfkill-input subsystem.
- * Try to load the module. */
- err = request_module("rfkill-input");
- if (err)
- b43legacywarn(wl, "Failed to load the rfkill-input module."
- "The built-in radio LED will not work.\n");
-#endif /* CONFIG_RFKILL_INPUT */
-
- err = input_register_polled_device(rfk->poll_dev);
- if (err)
- goto err_unreg_rfk;
-
- rfk->registered = 1;
-
- return;
-err_unreg_rfk:
- rfkill_unregister(rfk->rfkill);
-err_free_polldev:
- input_free_polled_device(rfk->poll_dev);
- rfk->poll_dev = NULL;
-err_freed_rfk:
- rfk->rfkill = NULL;
-out_error:
- rfk->registered = 0;
- b43legacywarn(wl, "RF-kill button init failed\n");
-}
-
-void b43legacy_rfkill_exit(struct b43legacy_wldev *dev)
-{
- struct b43legacy_rfkill *rfk = &(dev->wl->rfkill);
-
- if (!rfk->registered)
- return;
- rfk->registered = 0;
-
- input_unregister_polled_device(rfk->poll_dev);
- rfkill_unregister(rfk->rfkill);
- input_free_polled_device(rfk->poll_dev);
- rfk->poll_dev = NULL;
- rfk->rfkill = NULL;
+ mutex_unlock(&wl->mutex);
}
-
diff --git a/drivers/net/wireless/b43legacy/rfkill.h b/drivers/net/wireless/b43legacy/rfkill.h
index 11150a8032f..75585571c54 100644
--- a/drivers/net/wireless/b43legacy/rfkill.h
+++ b/drivers/net/wireless/b43legacy/rfkill.h
@@ -1,59 +1,11 @@
#ifndef B43legacy_RFKILL_H_
#define B43legacy_RFKILL_H_
+struct ieee80211_hw;
struct b43legacy_wldev;
-#ifdef CONFIG_B43LEGACY_RFKILL
+void b43legacy_rfkill_poll(struct ieee80211_hw *hw);
-#include <linux/rfkill.h>
-#include <linux/workqueue.h>
-#include <linux/input-polldev.h>
-
-
-
-struct b43legacy_rfkill {
- /* The RFKILL subsystem data structure */
- struct rfkill *rfkill;
- /* The poll device for the RFKILL input button */
- struct input_polled_dev *poll_dev;
- /* Did initialization succeed? Used for freeing. */
- bool registered;
- /* The unique name of this rfkill switch */
- char name[sizeof("b43legacy-phy4294967295")];
-};
-
-/* The init function returns void, because we are not interested
- * in failing the b43 init process when rfkill init failed. */
-void b43legacy_rfkill_init(struct b43legacy_wldev *dev);
-void b43legacy_rfkill_exit(struct b43legacy_wldev *dev);
-
-char *b43legacy_rfkill_led_name(struct b43legacy_wldev *dev);
-
-
-#else /* CONFIG_B43LEGACY_RFKILL */
-/* No RFKILL support. */
-
-struct b43legacy_rfkill {
- /* empty */
-};
-
-static inline void b43legacy_rfkill_alloc(struct b43legacy_wldev *dev)
-{
-}
-static inline void b43legacy_rfkill_free(struct b43legacy_wldev *dev)
-{
-}
-static inline void b43legacy_rfkill_init(struct b43legacy_wldev *dev)
-{
-}
-static inline void b43legacy_rfkill_exit(struct b43legacy_wldev *dev)
-{
-}
-static inline char *b43legacy_rfkill_led_name(struct b43legacy_wldev *dev)
-{
- return NULL;
-}
-
-#endif /* CONFIG_B43LEGACY_RFKILL */
+bool b43legacy_is_hw_radio_enabled(struct b43legacy_wldev *dev);
#endif /* B43legacy_RFKILL_H_ */
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index 12fca99f757..b8e39dd06e9 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -274,7 +274,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
/* PHY TX Control word */
if (rate_ofdm)
- phy_ctl |= B43legacy_TX4_PHY_OFDM;
+ phy_ctl |= B43legacy_TX4_PHY_ENC_OFDM;
if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
phy_ctl |= B43legacy_TX4_PHY_SHORTPRMBL;
switch (info->antenna_sel_tx) {
diff --git a/drivers/net/wireless/b43legacy/xmit.h b/drivers/net/wireless/b43legacy/xmit.h
index 62e09d02788..91633087a20 100644
--- a/drivers/net/wireless/b43legacy/xmit.h
+++ b/drivers/net/wireless/b43legacy/xmit.h
@@ -67,7 +67,9 @@ struct b43legacy_txhdr_fw3 {
#define B43legacy_TX4_EFT_RTSFBOFDM 0x0010 /* RTS/CTS fallback rate type */
/* PHY TX control word */
-#define B43legacy_TX4_PHY_OFDM 0x0001 /* Data frame rate type */
+#define B43legacy_TX4_PHY_ENC 0x0003 /* Data frame encoding */
+#define B43legacy_TX4_PHY_ENC_CCK 0x0000 /* CCK */
+#define B43legacy_TX4_PHY_ENC_OFDM 0x0001 /* Data frame rate type */
#define B43legacy_TX4_PHY_SHORTPRMBL 0x0010 /* Use short preamble */
#define B43legacy_TX4_PHY_ANT 0x03C0 /* Antenna selection */
#define B43legacy_TX4_PHY_ANT0 0x0000 /* Use antenna 0 */
diff --git a/drivers/net/wireless/hostap/Kconfig b/drivers/net/wireless/hostap/Kconfig
index 932d207bce2..c15db229351 100644
--- a/drivers/net/wireless/hostap/Kconfig
+++ b/drivers/net/wireless/hostap/Kconfig
@@ -29,7 +29,7 @@ config HOSTAP
PLX/PCI/CS version of the driver to actually use the driver.
The driver can be compiled as a module and it will be called
- "hostap.ko".
+ hostap.
config HOSTAP_FIRMWARE
bool "Support downloading firmware images with Host AP driver"
@@ -68,7 +68,7 @@ config HOSTAP_PLX
driver.
The driver can be compiled as a module and will be named
- "hostap_plx.ko".
+ hostap_plx.
config HOSTAP_PCI
tristate "Host AP driver for Prism2.5 PCI adaptors"
@@ -81,7 +81,7 @@ config HOSTAP_PCI
driver.
The driver can be compiled as a module and will be named
- "hostap_pci.ko".
+ hostap_pci.
config HOSTAP_CS
tristate "Host AP driver for Prism2/2.5/3 PC Cards"
@@ -94,4 +94,4 @@ config HOSTAP_CS
driver.
The driver can be compiled as a module and will be named
- "hostap_cs.ko".
+ hostap_cs.
diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/hostap/hostap_80211_tx.c
index 6693423f63f..d313b005114 100644
--- a/drivers/net/wireless/hostap/hostap_80211_tx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_tx.c
@@ -377,7 +377,7 @@ int hostap_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct hostap_interface *iface;
local_info_t *local;
- int ret = 1;
+ int ret = NETDEV_TX_BUSY;
u16 fc;
struct hostap_tx_data tx;
ap_tx_ret tx_ret;
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index 3dad1cf8f24..ff9b5c88218 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -1423,7 +1423,7 @@ static int prism2_hw_init2(struct net_device *dev, int initial)
prism2_check_sta_fw_version(local);
if (hfa384x_get_rid(dev, HFA384X_RID_CNFOWNMACADDR,
- &dev->dev_addr, 6, 1) < 0) {
+ dev->dev_addr, 6, 1) < 0) {
printk("%s: could not get own MAC address\n",
dev->name);
}
diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c
index cbf15d70320..0e5d51086a4 100644
--- a/drivers/net/wireless/hostap/hostap_plx.c
+++ b/drivers/net/wireless/hostap/hostap_plx.c
@@ -435,7 +435,7 @@ static int prism2_plx_probe(struct pci_dev *pdev,
unsigned long pccard_attr_mem;
unsigned int pccard_attr_len;
void __iomem *attr_mem = NULL;
- unsigned int cor_offset, cor_index;
+ unsigned int cor_offset = 0, cor_index = 0;
u32 reg;
local_info_t *local = NULL;
struct net_device *dev = NULL;
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 97e5647ff05..742432388ca 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -3488,7 +3488,7 @@ static DEVICE_ATTR(pci, S_IRUGO, show_pci, NULL);
static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
char *buf)
{
- struct ipw2100_priv *p = d->driver_data;
+ struct ipw2100_priv *p = dev_get_drvdata(d);
return sprintf(buf, "0x%08x\n", (int)p->config);
}
@@ -3497,7 +3497,7 @@ static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
static ssize_t show_status(struct device *d, struct device_attribute *attr,
char *buf)
{
- struct ipw2100_priv *p = d->driver_data;
+ struct ipw2100_priv *p = dev_get_drvdata(d);
return sprintf(buf, "0x%08x\n", (int)p->status);
}
@@ -3506,7 +3506,7 @@ static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
static ssize_t show_capability(struct device *d, struct device_attribute *attr,
char *buf)
{
- struct ipw2100_priv *p = d->driver_data;
+ struct ipw2100_priv *p = dev_get_drvdata(d);
return sprintf(buf, "0x%08x\n", (int)p->capability);
}
@@ -4224,7 +4224,7 @@ static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1 - SW based RF kill active (sysfs)
2 - HW based RF kill active
3 - Both HW and SW baed RF kill active */
- struct ipw2100_priv *priv = (struct ipw2100_priv *)d->driver_data;
+ struct ipw2100_priv *priv = dev_get_drvdata(d);
int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
(rf_kill_active(priv) ? 0x2 : 0x0);
return sprintf(buf, "%i\n", val);
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index bd4dbcfe1bb..44c29b3f672 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -1527,7 +1527,7 @@ static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
static ssize_t show_status(struct device *d,
struct device_attribute *attr, char *buf)
{
- struct ipw_priv *p = d->driver_data;
+ struct ipw_priv *p = dev_get_drvdata(d);
return sprintf(buf, "0x%08x\n", (int)p->status);
}
@@ -1536,7 +1536,7 @@ static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
char *buf)
{
- struct ipw_priv *p = d->driver_data;
+ struct ipw_priv *p = dev_get_drvdata(d);
return sprintf(buf, "0x%08x\n", (int)p->config);
}
@@ -1545,7 +1545,7 @@ static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
static ssize_t show_nic_type(struct device *d,
struct device_attribute *attr, char *buf)
{
- struct ipw_priv *priv = d->driver_data;
+ struct ipw_priv *priv = dev_get_drvdata(d);
return sprintf(buf, "TYPE: %d\n", priv->nic_type);
}
@@ -1555,7 +1555,7 @@ static ssize_t show_ucode_version(struct device *d,
struct device_attribute *attr, char *buf)
{
u32 len = sizeof(u32), tmp = 0;
- struct ipw_priv *p = d->driver_data;
+ struct ipw_priv *p = dev_get_drvdata(d);
if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
return 0;
@@ -1569,7 +1569,7 @@ static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
char *buf)
{
u32 len = sizeof(u32), tmp = 0;
- struct ipw_priv *p = d->driver_data;
+ struct ipw_priv *p = dev_get_drvdata(d);
if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
return 0;
@@ -1586,14 +1586,15 @@ static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
static ssize_t show_eeprom_delay(struct device *d,
struct device_attribute *attr, char *buf)
{
- int n = ((struct ipw_priv *)d->driver_data)->eeprom_delay;
+ struct ipw_priv *p = dev_get_drvdata(d);
+ int n = p->eeprom_delay;
return sprintf(buf, "%i\n", n);
}
static ssize_t store_eeprom_delay(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct ipw_priv *p = d->driver_data;
+ struct ipw_priv *p = dev_get_drvdata(d);
sscanf(buf, "%i", &p->eeprom_delay);
return strnlen(buf, count);
}
@@ -1605,7 +1606,7 @@ static ssize_t show_command_event_reg(struct device *d,
struct device_attribute *attr, char *buf)
{
u32 reg = 0;
- struct ipw_priv *p = d->driver_data;
+ struct ipw_priv *p = dev_get_drvdata(d);
reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
return sprintf(buf, "0x%08x\n", reg);
@@ -1615,7 +1616,7 @@ static ssize_t store_command_event_reg(struct device *d,
const char *buf, size_t count)
{
u32 reg;
- struct ipw_priv *p = d->driver_data;
+ struct ipw_priv *p = dev_get_drvdata(d);
sscanf(buf, "%x", &reg);
ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
@@ -1629,7 +1630,7 @@ static ssize_t show_mem_gpio_reg(struct device *d,
struct device_attribute *attr, char *buf)
{
u32 reg = 0;
- struct ipw_priv *p = d->driver_data;
+ struct ipw_priv *p = dev_get_drvdata(d);
reg = ipw_read_reg32(p, 0x301100);
return sprintf(buf, "0x%08x\n", reg);
@@ -1639,7 +1640,7 @@ static ssize_t store_mem_gpio_reg(struct device *d,
const char *buf, size_t count)
{
u32 reg;
- struct ipw_priv *p = d->driver_data;
+ struct ipw_priv *p = dev_get_drvdata(d);
sscanf(buf, "%x", &reg);
ipw_write_reg32(p, 0x301100, reg);
@@ -1653,7 +1654,7 @@ static ssize_t show_indirect_dword(struct device *d,
struct device_attribute *attr, char *buf)
{
u32 reg = 0;
- struct ipw_priv *priv = d->driver_data;
+ struct ipw_priv *priv = dev_get_drvdata(d);
if (priv->status & STATUS_INDIRECT_DWORD)
reg = ipw_read_reg32(priv, priv->indirect_dword);
@@ -1666,7 +1667,7 @@ static ssize_t store_indirect_dword(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct ipw_priv *priv = d->driver_data;
+ struct ipw_priv *priv = dev_get_drvdata(d);
sscanf(buf, "%x", &priv->indirect_dword);
priv->status |= STATUS_INDIRECT_DWORD;
@@ -1680,7 +1681,7 @@ static ssize_t show_indirect_byte(struct device *d,
struct device_attribute *attr, char *buf)
{
u8 reg = 0;
- struct ipw_priv *priv = d->driver_data;
+ struct ipw_priv *priv = dev_get_drvdata(d);
if (priv->status & STATUS_INDIRECT_BYTE)
reg = ipw_read_reg8(priv, priv->indirect_byte);
@@ -1693,7 +1694,7 @@ static ssize_t store_indirect_byte(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct ipw_priv *priv = d->driver_data;
+ struct ipw_priv *priv = dev_get_drvdata(d);
sscanf(buf, "%x", &priv->indirect_byte);
priv->status |= STATUS_INDIRECT_BYTE;
@@ -1707,7 +1708,7 @@ static ssize_t show_direct_dword(struct device *d,
struct device_attribute *attr, char *buf)
{
u32 reg = 0;
- struct ipw_priv *priv = d->driver_data;
+ struct ipw_priv *priv = dev_get_drvdata(d);
if (priv->status & STATUS_DIRECT_DWORD)
reg = ipw_read32(priv, priv->direct_dword);
@@ -1720,7 +1721,7 @@ static ssize_t store_direct_dword(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct ipw_priv *priv = d->driver_data;
+ struct ipw_priv *priv = dev_get_drvdata(d);
sscanf(buf, "%x", &priv->direct_dword);
priv->status |= STATUS_DIRECT_DWORD;
@@ -1747,7 +1748,7 @@ static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1 - SW based RF kill active (sysfs)
2 - HW based RF kill active
3 - Both HW and SW baed RF kill active */
- struct ipw_priv *priv = d->driver_data;
+ struct ipw_priv *priv = dev_get_drvdata(d);
int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
(rf_kill_active(priv) ? 0x2 : 0x0);
return sprintf(buf, "%i\n", val);
@@ -1791,7 +1792,7 @@ static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct ipw_priv *priv = d->driver_data;
+ struct ipw_priv *priv = dev_get_drvdata(d);
ipw_radio_kill_sw(priv, buf[0] == '1');
@@ -1803,7 +1804,7 @@ static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
char *buf)
{
- struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
+ struct ipw_priv *priv = dev_get_drvdata(d);
int pos = 0, len = 0;
if (priv->config & CFG_SPEED_SCAN) {
while (priv->speed_scan[pos] != 0)
@@ -1818,7 +1819,7 @@ static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
+ struct ipw_priv *priv = dev_get_drvdata(d);
int channel, pos = 0;
const char *p = buf;
@@ -1857,14 +1858,14 @@ static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
char *buf)
{
- struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
+ struct ipw_priv *priv = dev_get_drvdata(d);
return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
}
static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
+ struct ipw_priv *priv = dev_get_drvdata(d);
if (buf[0] == '1')
priv->config |= CFG_NET_STATS;
else
@@ -3176,11 +3177,8 @@ static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
/* Start the Dma */
rc = ipw_fw_dma_enable(priv);
- if (priv->sram_desc.last_cb_index > 0) {
- /* the DMA is already ready this would be a bug. */
- BUG();
- goto out;
- }
+ /* the DMA is already ready this would be a bug. */
+ BUG_ON(priv->sram_desc.last_cb_index > 0);
do {
chunk = (struct fw_chunk *)(data + offset);
@@ -11526,7 +11524,8 @@ static int ipw_prom_stop(struct net_device *dev)
static int ipw_prom_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
IPW_DEBUG_INFO("prom dev->xmit\n");
- return -EOPNOTSUPP;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
}
static const struct net_device_ops ipw_prom_netdev_ops = {
diff --git a/drivers/net/wireless/ipw2x00/libipw_module.c b/drivers/net/wireless/ipw2x00/libipw_module.c
index 92a26922e79..8ce6e961c5d 100644
--- a/drivers/net/wireless/ipw2x00/libipw_module.c
+++ b/drivers/net/wireless/ipw2x00/libipw_module.c
@@ -154,10 +154,6 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
goto failed;
}
ieee = netdev_priv(dev);
-#ifdef CONFIG_COMPAT_NET_DEV_OPS
- dev->hard_start_xmit = ieee80211_xmit;
- dev->change_mtu = ieee80211_change_mtu;
-#endif
ieee->dev = dev;
diff --git a/drivers/net/wireless/ipw2x00/libipw_tx.c b/drivers/net/wireless/ipw2x00/libipw_tx.c
index 65a8195b3d9..da2ad5437ce 100644
--- a/drivers/net/wireless/ipw2x00/libipw_tx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_tx.c
@@ -539,7 +539,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&ieee->lock, flags);
netif_stop_queue(dev);
dev->stats.tx_errors++;
- return 1;
+ return NETDEV_TX_BUSY;
}
EXPORT_SYMBOL(ieee80211_xmit);
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 8304f6406a1..e092af09d6b 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -5,16 +5,11 @@ config IWLWIFI
select FW_LOADER
select MAC80211_LEDS if IWLWIFI_LEDS
select LEDS_CLASS if IWLWIFI_LEDS
- select RFKILL if IWLWIFI_RFKILL
config IWLWIFI_LEDS
bool "Enable LED support in iwlagn and iwl3945 drivers"
depends on IWLWIFI
-config IWLWIFI_RFKILL
- bool "Enable RF kill support in iwlagn and iwl3945 drivers"
- depends on IWLWIFI
-
config IWLWIFI_SPECTRUM_MEASUREMENT
bool "Enable Spectrum Measurement in iwlagn driver"
depends on IWLWIFI
@@ -75,7 +70,7 @@ config IWLAGN
If you want to compile the driver as a module ( = code which can be
inserted in and removed from the running kernel whenever you want),
say M here and read <file:Documentation/kbuild/modules.txt>. The
- module will be called iwlagn.ko.
+ module will be called iwlagn.
config IWL4965
@@ -113,7 +108,7 @@ config IWL3945
If you want to compile the driver as a module ( = code which can be
inserted in and removed from the running kernel whenever you want),
say M here and read <file:Documentation/kbuild/modules.txt>. The
- module will be called iwl3945.ko.
+ module will be called iwl3945.
config IWL3945_SPECTRUM_MEASUREMENT
bool "Enable Spectrum Measurement in iwl3945 driver"
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index d79d97ad61a..1d4e0a226fd 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -4,7 +4,6 @@ iwlcore-objs += iwl-rx.o iwl-tx.o iwl-sta.o iwl-calib.o
iwlcore-objs += iwl-scan.o
iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
iwlcore-$(CONFIG_IWLWIFI_LEDS) += iwl-led.o
-iwlcore-$(CONFIG_IWLWIFI_RFKILL) += iwl-rfkill.o
iwlcore-$(CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT) += iwl-spectrum.o
obj-$(CONFIG_IWLAGN) += iwlagn.o
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.c b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
index ac22f59be9e..225e5f88934 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
@@ -44,6 +44,15 @@
#include "iwl-core.h"
#include "iwl-dev.h"
+#ifdef CONFIG_IWLWIFI_DEBUG
+static const char *led_type_str[] = {
+ __stringify(IWL_LED_TRG_TX),
+ __stringify(IWL_LED_TRG_RX),
+ __stringify(IWL_LED_TRG_ASSOC),
+ __stringify(IWL_LED_TRG_RADIO),
+ NULL
+};
+#endif /* CONFIG_IWLWIFI_DEBUG */
static const struct {
u16 brightness;
@@ -61,7 +70,7 @@ static const struct {
{10, 110, 110},
{5, 130, 130},
{0, 167, 167},
- /*SOLID_ON*/
+ /* SOLID_ON */
{-1, IWL_LED_SOLID, 0}
};
@@ -143,6 +152,26 @@ static int iwl3945_led_off(struct iwl_priv *priv, int led_id)
}
/*
+ * Set led on in case of association
+ * */
+static int iwl3945_led_associate(struct iwl_priv *priv, int led_id)
+{
+ IWL_DEBUG_LED(priv, "Associated\n");
+
+ priv->allow_blinking = 1;
+ return iwl3945_led_on(priv, led_id);
+}
+/* Set Led off in case of disassociation */
+static int iwl3945_led_disassociate(struct iwl_priv *priv, int led_id)
+{
+ IWL_DEBUG_LED(priv, "Disassociated\n");
+
+ priv->allow_blinking = 0;
+
+ return 0;
+}
+
+/*
* brightness call back function for Tx/Rx LED
*/
static int iwl3945_led_associated(struct iwl_priv *priv, int led_id)
@@ -165,26 +194,21 @@ static void iwl3945_led_brightness_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
struct iwl_led *led = container_of(led_cdev,
- struct iwl_led, led_dev);
+ struct iwl_led, led_dev);
struct iwl_priv *priv = led->priv;
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
+ IWL_DEBUG_LED(priv, "Led type = %s brightness = %d\n",
+ led_type_str[led->type], brightness);
+
switch (brightness) {
case LED_FULL:
- if (led->type == IWL_LED_TRG_ASSOC) {
- priv->allow_blinking = 1;
- IWL_DEBUG_LED(priv, "MAC is associated\n");
- }
if (led->led_on)
led->led_on(priv, IWL_LED_LINK);
break;
case LED_OFF:
- if (led->type == IWL_LED_TRG_ASSOC) {
- priv->allow_blinking = 0;
- IWL_DEBUG_LED(priv, "MAC is disassociated\n");
- }
if (led->led_off)
led->led_off(priv, IWL_LED_LINK);
break;
@@ -197,8 +221,6 @@ static void iwl3945_led_brightness_set(struct led_classdev *led_cdev,
}
}
-
-
/*
* Register led class with the system
*/
@@ -237,12 +259,12 @@ static int iwl3945_led_register_led(struct iwl_priv *priv,
static inline u8 get_blink_rate(struct iwl_priv *priv)
{
int index;
- u64 current_tpt = priv->rxtxpackets;
- s64 tpt = current_tpt - priv->led_tpt;
+ s64 tpt = priv->rxtxpackets;
if (tpt < 0)
tpt = -tpt;
- priv->led_tpt = current_tpt;
+
+ IWL_DEBUG_LED(priv, "tpt %lld \n", (long long)tpt);
if (!priv->allow_blinking)
index = IWL_MAX_BLINK_TBL;
@@ -250,13 +272,9 @@ static inline u8 get_blink_rate(struct iwl_priv *priv)
for (index = 0; index < IWL_MAX_BLINK_TBL; index++)
if (tpt > (blink_tbl[index].brightness * IWL_1MB_RATE))
break;
- return index;
-}
-static inline int is_rf_kill(struct iwl_priv *priv)
-{
- return test_bit(STATUS_RF_KILL_HW, &priv->status) ||
- test_bit(STATUS_RF_KILL_SW, &priv->status);
+ IWL_DEBUG_LED(priv, "LED BLINK IDX=%d\n", index);
+ return index;
}
/*
@@ -272,7 +290,7 @@ void iwl3945_led_background(struct iwl_priv *priv)
priv->last_blink_time = 0;
return;
}
- if (is_rf_kill(priv)) {
+ if (iwl_is_rfkill(priv)) {
priv->last_blink_time = 0;
return;
}
@@ -341,8 +359,8 @@ int iwl3945_led_register(struct iwl_priv *priv)
IWL_LED_TRG_ASSOC, 0, trigger);
/* for assoc always turn led on */
- priv->led[IWL_LED_TRG_ASSOC].led_on = iwl3945_led_on;
- priv->led[IWL_LED_TRG_ASSOC].led_off = iwl3945_led_on;
+ priv->led[IWL_LED_TRG_ASSOC].led_on = iwl3945_led_associate;
+ priv->led[IWL_LED_TRG_ASSOC].led_off = iwl3945_led_disassociate;
priv->led[IWL_LED_TRG_ASSOC].led_pattern = NULL;
if (ret)
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index af6b9d44477..5eb538d18a8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -38,6 +38,7 @@
#include "iwl-commands.h"
#include "iwl-3945.h"
+#include "iwl-sta.h"
#define RS_NAME "iwl-3945-rs"
@@ -683,11 +684,10 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
if (sta)
rate_mask = sta->supp_rates[sband->band];
- /* Send management frames and broadcast/multicast data using lowest
- * rate. */
+ /* Send management frames and NO_ACK data using lowest rate. */
fc = le16_to_cpu(hdr->frame_control);
if ((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA ||
- is_multicast_ether_addr(hdr->addr1) ||
+ info->flags & IEEE80211_TX_CTL_NO_ACK ||
!sta || !priv_sta) {
IWL_DEBUG_RATE(priv, "leave: No STA priv data to update!\n");
if (!rate_mask)
@@ -696,6 +696,8 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
else
info->control.rates[0].idx =
rate_lowest_index(sband, sta);
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+ info->control.rates[0].count = 1;
return;
}
@@ -713,13 +715,13 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
!rs_sta->ibss_sta_added) {
- u8 sta_id = iwl3945_hw_find_station(priv, hdr->addr1);
+ u8 sta_id = iwl_find_station(priv, hdr->addr1);
if (sta_id == IWL_INVALID_STATION) {
IWL_DEBUG_RATE(priv, "LQ: ADD station %pm\n",
hdr->addr1);
- sta_id = iwl3945_add_station(priv,
- hdr->addr1, 0, CMD_ASYNC);
+ sta_id = iwl_add_station(priv, hdr->addr1, false,
+ CMD_ASYNC, NULL);
}
if (sta_id != IWL_INVALID_STATION)
rs_sta->ibss_sta_added = 1;
@@ -974,7 +976,7 @@ void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
rcu_read_lock();
- sta = ieee80211_find_sta(hw, priv->stations_39[sta_id].sta.sta.addr);
+ sta = ieee80211_find_sta(hw, priv->stations[sta_id].sta.sta.addr);
if (!sta) {
rcu_read_unlock();
return;
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 527525cc091..46288e72488 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -98,7 +98,6 @@ const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945] = {
* ... and set IWL_EVT_DISABLE to 1. */
void iwl3945_disable_events(struct iwl_priv *priv)
{
- int ret;
int i;
u32 base; /* SRAM address of event log header */
u32 disable_ptr; /* SRAM address of event-disable bitmap array */
@@ -159,26 +158,17 @@ void iwl3945_disable_events(struct iwl_priv *priv)
return;
}
- ret = iwl_grab_nic_access(priv);
- if (ret) {
- IWL_WARN(priv, "Can not read from adapter at this time.\n");
- return;
- }
-
disable_ptr = iwl_read_targ_mem(priv, base + (4 * sizeof(u32)));
array_size = iwl_read_targ_mem(priv, base + (5 * sizeof(u32)));
- iwl_release_nic_access(priv);
if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) {
IWL_DEBUG_INFO(priv, "Disabling selected uCode log events at 0x%x\n",
disable_ptr);
- ret = iwl_grab_nic_access(priv);
for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++)
iwl_write_targ_mem(priv,
disable_ptr + (i * sizeof(u32)),
evt_disable[i]);
- iwl_release_nic_access(priv);
} else {
IWL_DEBUG_INFO(priv, "Selected uCode log events may be disabled\n");
IWL_DEBUG_INFO(priv, " by writing \"1\"s into disable bitmap\n");
@@ -779,35 +769,6 @@ void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
return ;
}
-u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *addr)
-{
- int i, start = IWL_AP_ID;
- int ret = IWL_INVALID_STATION;
- unsigned long flags;
-
- if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) ||
- (priv->iw_mode == NL80211_IFTYPE_AP))
- start = IWL_STA_ID;
-
- if (is_broadcast_ether_addr(addr))
- return priv->hw_params.bcast_sta_id;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- for (i = start; i < priv->hw_params.max_stations; i++)
- if ((priv->stations_39[i].used) &&
- (!compare_ether_addr
- (priv->stations_39[i].sta.sta.addr, addr))) {
- ret = i;
- goto out;
- }
-
- IWL_DEBUG_INFO(priv, "can not find STA %pM (total %d)\n",
- addr, priv->num_stations);
- out:
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return ret;
-}
-
/**
* iwl3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD:
*
@@ -885,13 +846,13 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv, struct iwl_cmd *cmd,
u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate, u8 flags)
{
unsigned long flags_spin;
- struct iwl3945_station_entry *station;
+ struct iwl_station_entry *station;
if (sta_id == IWL_INVALID_STATION)
return IWL_INVALID_STATION;
spin_lock_irqsave(&priv->sta_lock, flags_spin);
- station = &priv->stations_39[sta_id];
+ station = &priv->stations[sta_id];
station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
station->sta.rate_n_flags = cpu_to_le16(tx_rate);
@@ -899,8 +860,7 @@ u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate, u8 flags)
spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- iwl_send_add_sta(priv,
- (struct iwl_addsta_cmd *)&station->sta, flags);
+ iwl_send_add_sta(priv, &station->sta, flags);
IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n",
sta_id, tx_rate);
return sta_id;
@@ -908,55 +868,30 @@ u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate, u8 flags)
static int iwl3945_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src)
{
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- ret = iwl_grab_nic_access(priv);
- if (ret) {
- spin_unlock_irqrestore(&priv->lock, flags);
- return ret;
- }
-
if (src == IWL_PWR_SRC_VAUX) {
if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) {
iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
~APMG_PS_CTRL_MSK_PWR_SRC);
- iwl_release_nic_access(priv);
iwl_poll_bit(priv, CSR_GPIO_IN,
CSR_GPIO_IN_VAL_VAUX_PWR_SRC,
CSR_GPIO_IN_BIT_AUX_POWER, 5000);
- } else {
- iwl_release_nic_access(priv);
}
} else {
iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
~APMG_PS_CTRL_MSK_PWR_SRC);
- iwl_release_nic_access(priv);
iwl_poll_bit(priv, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC,
CSR_GPIO_IN_BIT_AUX_POWER, 5000); /* uS */
}
- spin_unlock_irqrestore(&priv->lock, flags);
- return ret;
+ return 0;
}
static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
{
- int rc;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- rc = iwl_grab_nic_access(priv);
- if (rc) {
- spin_unlock_irqrestore(&priv->lock, flags);
- return rc;
- }
-
iwl_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->dma_addr);
iwl_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma);
iwl_write_direct32(priv, FH39_RCSR_WPTR(0), 0);
@@ -973,23 +908,11 @@ static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
/* fake read to flush all prev I/O */
iwl_read_direct32(priv, FH39_RSSR_CTRL);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
return 0;
}
static int iwl3945_tx_reset(struct iwl_priv *priv)
{
- int rc;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- rc = iwl_grab_nic_access(priv);
- if (rc) {
- spin_unlock_irqrestore(&priv->lock, flags);
- return rc;
- }
/* bypass mode */
iwl_write_prph(priv, ALM_SCD_MODE_REG, 0x2);
@@ -1017,8 +940,6 @@ static int iwl3945_tx_reset(struct iwl_priv *priv)
FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH |
FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
@@ -1061,7 +982,7 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
static int iwl3945_apm_init(struct iwl_priv *priv)
{
- int ret = 0;
+ int ret;
iwl_power_initialize(priv);
@@ -1083,10 +1004,6 @@ static int iwl3945_apm_init(struct iwl_priv *priv)
goto out;
}
- ret = iwl_grab_nic_access(priv);
- if (ret)
- goto out;
-
/* enable DMA */
iwl_write_prph(priv, APMG_CLK_CTRL_REG, APMG_CLK_VAL_DMA_CLK_RQT |
APMG_CLK_VAL_BSM_CLK_RQT);
@@ -1097,7 +1014,6 @@ static int iwl3945_apm_init(struct iwl_priv *priv)
iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
- iwl_release_nic_access(priv);
out:
return ret;
}
@@ -1110,6 +1026,11 @@ static void iwl3945_nic_config(struct iwl_priv *priv)
spin_lock_irqsave(&priv->lock, flags);
+ /* Determine HW type */
+ pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id);
+
+ IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
+
if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
IWL_DEBUG_INFO(priv, "RTP type \n");
else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
@@ -1163,7 +1084,6 @@ static void iwl3945_nic_config(struct iwl_priv *priv)
int iwl3945_hw_nic_init(struct iwl_priv *priv)
{
- u8 rev_id;
int rc;
unsigned long flags;
struct iwl_rx_queue *rxq = &priv->rxq;
@@ -1172,12 +1092,6 @@ int iwl3945_hw_nic_init(struct iwl_priv *priv)
priv->cfg->ops->lib->apm_ops.init(priv);
spin_unlock_irqrestore(&priv->lock, flags);
- /* Determine HW type */
- rc = pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id);
- if (rc)
- return rc;
- IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
-
rc = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN);
if (rc)
return rc;
@@ -1198,22 +1112,13 @@ int iwl3945_hw_nic_init(struct iwl_priv *priv)
iwl3945_rx_init(priv, rxq);
- spin_lock_irqsave(&priv->lock, flags);
/* Look at using this instead:
rxq->need_update = 1;
iwl_rx_queue_update_write_ptr(priv, rxq);
*/
- rc = iwl_grab_nic_access(priv);
- if (rc) {
- spin_unlock_irqrestore(&priv->lock, flags);
- return rc;
- }
iwl_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7);
- iwl_release_nic_access(priv);
-
- spin_unlock_irqrestore(&priv->lock, flags);
rc = iwl3945_txq_ctx_reset(priv);
if (rc)
@@ -1245,14 +1150,6 @@ void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv)
void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
{
int txq_id;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- if (iwl_grab_nic_access(priv)) {
- spin_unlock_irqrestore(&priv->lock, flags);
- iwl3945_hw_txq_ctx_free(priv);
- return;
- }
/* stop SCD */
iwl_write_prph(priv, ALM_SCD_MODE_REG, 0);
@@ -1265,9 +1162,6 @@ void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
1000);
}
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
iwl3945_hw_txq_ctx_free(priv);
}
@@ -1312,12 +1206,8 @@ static void iwl3945_apm_stop(struct iwl_priv *priv)
static int iwl3945_apm_reset(struct iwl_priv *priv)
{
- int rc;
- unsigned long flags;
-
iwl3945_apm_stop_master(priv);
- spin_lock_irqsave(&priv->lock, flags);
iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
udelay(10);
@@ -1327,36 +1217,31 @@ static int iwl3945_apm_reset(struct iwl_priv *priv)
iwl_poll_direct_bit(priv, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
- rc = iwl_grab_nic_access(priv);
- if (!rc) {
- iwl_write_prph(priv, APMG_CLK_CTRL_REG,
- APMG_CLK_VAL_BSM_CLK_RQT);
+ iwl_write_prph(priv, APMG_CLK_CTRL_REG,
+ APMG_CLK_VAL_BSM_CLK_RQT);
- iwl_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
- iwl_write_prph(priv, APMG_RTC_INT_STT_REG,
+ iwl_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
+ iwl_write_prph(priv, APMG_RTC_INT_STT_REG,
0xFFFFFFFF);
- /* enable DMA */
- iwl_write_prph(priv, APMG_CLK_EN_REG,
- APMG_CLK_VAL_DMA_CLK_RQT |
- APMG_CLK_VAL_BSM_CLK_RQT);
- udelay(10);
+ /* enable DMA */
+ iwl_write_prph(priv, APMG_CLK_EN_REG,
+ APMG_CLK_VAL_DMA_CLK_RQT |
+ APMG_CLK_VAL_BSM_CLK_RQT);
+ udelay(10);
- iwl_set_bits_prph(priv, APMG_PS_CTRL_REG,
+ iwl_set_bits_prph(priv, APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_RESET_REQ);
- udelay(5);
- iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG,
+ udelay(5);
+ iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_RESET_REQ);
- iwl_release_nic_access(priv);
- }
/* Clear the 'host command active' bit... */
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
wake_up_interruptible(&priv->wait_command_queue);
- spin_unlock_irqrestore(&priv->lock, flags);
- return rc;
+ return 0;
}
/**
@@ -1964,6 +1849,193 @@ int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
return 0;
}
+static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
+{
+ int rc = 0;
+ struct iwl_rx_packet *res = NULL;
+ struct iwl3945_rxon_assoc_cmd rxon_assoc;
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_RXON_ASSOC,
+ .len = sizeof(rxon_assoc),
+ .meta.flags = CMD_WANT_SKB,
+ .data = &rxon_assoc,
+ };
+ const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
+ const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
+
+ if ((rxon1->flags == rxon2->flags) &&
+ (rxon1->filter_flags == rxon2->filter_flags) &&
+ (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
+ (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
+ IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
+ return 0;
+ }
+
+ rxon_assoc.flags = priv->staging_rxon.flags;
+ rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
+ rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
+ rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
+ rxon_assoc.reserved = 0;
+
+ rc = iwl_send_cmd_sync(priv, &cmd);
+ if (rc)
+ return rc;
+
+ res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
+ if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
+ IWL_ERR(priv, "Bad return from REPLY_RXON_ASSOC command\n");
+ rc = -EIO;
+ }
+
+ priv->alloc_rxb_skb--;
+ dev_kfree_skb_any(cmd.meta.u.skb);
+
+ return rc;
+}
+
+/**
+ * iwl3945_commit_rxon - commit staging_rxon to hardware
+ *
+ * The RXON command in staging_rxon is committed to the hardware and
+ * the active_rxon structure is updated with the new data. This
+ * function correctly transitions out of the RXON_ASSOC_MSK state if
+ * a HW tune is required based on the RXON structure changes.
+ */
+static int iwl3945_commit_rxon(struct iwl_priv *priv)
+{
+ /* cast away the const for active_rxon in this function */
+ struct iwl3945_rxon_cmd *active_rxon = (void *)&priv->active_rxon;
+ struct iwl3945_rxon_cmd *staging_rxon = (void *)&priv->staging_rxon;
+ int rc = 0;
+ bool new_assoc =
+ !!(priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK);
+
+ if (!iwl_is_alive(priv))
+ return -1;
+
+ /* always get timestamp with Rx frame */
+ staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK;
+
+ /* select antenna */
+ staging_rxon->flags &=
+ ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
+ staging_rxon->flags |= iwl3945_get_antenna_flags(priv);
+
+ rc = iwl_check_rxon_cmd(priv);
+ if (rc) {
+ IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
+ return -EINVAL;
+ }
+
+ /* If we don't need to send a full RXON, we can use
+ * iwl3945_rxon_assoc_cmd which is used to reconfigure filter
+ * and other flags for the current radio configuration. */
+ if (!iwl_full_rxon_required(priv)) {
+ rc = iwl_send_rxon_assoc(priv);
+ if (rc) {
+ IWL_ERR(priv, "Error setting RXON_ASSOC "
+ "configuration (%d).\n", rc);
+ return rc;
+ }
+
+ memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
+
+ return 0;
+ }
+
+ /* If we are currently associated and the new config requires
+ * an RXON_ASSOC and the new config wants the associated mask enabled,
+ * we must clear the associated from the active configuration
+ * before we apply the new config */
+ if (iwl_is_associated(priv) && new_assoc) {
+ IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
+ active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+
+ /*
+ * reserved4 and 5 could have been filled by the iwlcore code.
+ * Let's clear them before pushing to the 3945.
+ */
+ active_rxon->reserved4 = 0;
+ active_rxon->reserved5 = 0;
+ rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
+ sizeof(struct iwl3945_rxon_cmd),
+ &priv->active_rxon);
+
+ /* If the mask clearing failed then we set
+ * active_rxon back to what it was previously */
+ if (rc) {
+ active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
+ IWL_ERR(priv, "Error clearing ASSOC_MSK on current "
+ "configuration (%d).\n", rc);
+ return rc;
+ }
+ }
+
+ IWL_DEBUG_INFO(priv, "Sending RXON\n"
+ "* with%s RXON_FILTER_ASSOC_MSK\n"
+ "* channel = %d\n"
+ "* bssid = %pM\n",
+ (new_assoc ? "" : "out"),
+ le16_to_cpu(staging_rxon->channel),
+ staging_rxon->bssid_addr);
+
+ /*
+ * reserved4 and 5 could have been filled by the iwlcore code.
+ * Let's clear them before pushing to the 3945.
+ */
+ staging_rxon->reserved4 = 0;
+ staging_rxon->reserved5 = 0;
+
+ iwl_set_rxon_hwcrypto(priv, !priv->hw_params.sw_crypto);
+
+ /* Apply the new configuration */
+ rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
+ sizeof(struct iwl3945_rxon_cmd),
+ staging_rxon);
+ if (rc) {
+ IWL_ERR(priv, "Error setting new configuration (%d).\n", rc);
+ return rc;
+ }
+
+ memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
+
+ iwl_clear_stations_table(priv);
+
+ /* If we issue a new RXON command which required a tune then we must
+ * send a new TXPOWER command or we won't be able to Tx any frames */
+ rc = priv->cfg->ops->lib->send_tx_power(priv);
+ if (rc) {
+ IWL_ERR(priv, "Error setting Tx power (%d).\n", rc);
+ return rc;
+ }
+
+ /* Add the broadcast address so we can send broadcast frames */
+ if (iwl_add_station(priv, iwl_bcast_addr, false, CMD_SYNC, NULL) ==
+ IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Error adding BROADCAST address for transmit.\n");
+ return -EIO;
+ }
+
+ /* If we have set the ASSOC_MSK and we are in BSS mode then
+ * add the IWL_AP_ID to the station rate table */
+ if (iwl_is_associated(priv) &&
+ (priv->iw_mode == NL80211_IFTYPE_STATION))
+ if (iwl_add_station(priv, priv->active_rxon.bssid_addr,
+ true, CMD_SYNC, NULL) == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Error adding AP address for transmit\n");
+ return -EIO;
+ }
+
+ /* Init the hardware's rate fallback order based on the band */
+ rc = iwl3945_init_hw_rate_table(priv);
+ if (rc) {
+ IWL_ERR(priv, "Error setting HW rate table: %02X\n", rc);
+ return -EIO;
+ }
+
+ return 0;
+}
+
/* will add 3945 channel switch cmd handling later */
int iwl3945_hw_channel_switch(struct iwl_priv *priv, u16 channel)
{
@@ -2314,14 +2386,6 @@ int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv)
int iwl3945_hw_rxq_stop(struct iwl_priv *priv)
{
int rc;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- rc = iwl_grab_nic_access(priv);
- if (rc) {
- spin_unlock_irqrestore(&priv->lock, flags);
- return rc;
- }
iwl_write_direct32(priv, FH39_RCSR_CONFIG(0), 0);
rc = iwl_poll_direct_bit(priv, FH39_RSSR_STATUS,
@@ -2329,28 +2393,17 @@ int iwl3945_hw_rxq_stop(struct iwl_priv *priv)
if (rc < 0)
IWL_ERR(priv, "Can't stop Rx DMA.\n");
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
return 0;
}
int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
{
- int rc;
- unsigned long flags;
int txq_id = txq->q.id;
struct iwl3945_shared *shared_data = priv->shared_virt;
shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr);
- spin_lock_irqsave(&priv->lock, flags);
- rc = iwl_grab_nic_access(priv);
- if (rc) {
- spin_unlock_irqrestore(&priv->lock, flags);
- return rc;
- }
iwl_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0);
iwl_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0);
@@ -2360,11 +2413,9 @@ int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL |
FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE);
- iwl_release_nic_access(priv);
/* fake read to flush all prev. writes */
iwl_read32(priv, FH39_TSSR_CBB_BASE);
- spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
@@ -2384,13 +2435,25 @@ static u16 iwl3945_get_hcmd_size(u8 cmd_id, u16 len)
}
}
+
static u16 iwl3945_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
{
- u16 size = (u16)sizeof(struct iwl3945_addsta_cmd);
- memcpy(data, cmd, size);
- return size;
+ struct iwl3945_addsta_cmd *addsta = (struct iwl3945_addsta_cmd *)data;
+ addsta->mode = cmd->mode;
+ memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
+ memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
+ addsta->station_flags = cmd->station_flags;
+ addsta->station_flags_msk = cmd->station_flags_msk;
+ addsta->tid_disable_tx = cpu_to_le16(0);
+ addsta->rate_n_flags = cmd->rate_n_flags;
+ addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
+ addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
+ addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
+
+ return (u16)sizeof(struct iwl3945_addsta_cmd);
}
+
/**
* iwl3945_init_hw_rate_table - Initialize the hardware rate fallback table
*/
@@ -2672,10 +2735,6 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
inst_len = priv->ucode_init.len;
data_len = priv->ucode_init_data.len;
- rc = iwl_grab_nic_access(priv);
- if (rc)
- return rc;
-
iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
@@ -2689,10 +2748,8 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
le32_to_cpu(*image));
rc = iwl3945_verify_bsm(priv);
- if (rc) {
- iwl_release_nic_access(priv);
+ if (rc)
return rc;
- }
/* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
@@ -2724,11 +2781,14 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
iwl_write_prph(priv, BSM_WR_CTRL_REG,
BSM_WR_CTRL_REG_BIT_START_EN);
- iwl_release_nic_access(priv);
-
return 0;
}
+static struct iwl_hcmd_ops iwl3945_hcmd = {
+ .rxon_assoc = iwl3945_send_rxon_assoc,
+ .commit_rxon = iwl3945_commit_rxon,
+};
+
static struct iwl_lib_ops iwl3945_lib = {
.txq_attach_buf_to_tfd = iwl3945_hw_txq_attach_buf_to_tfd,
.txq_free_tfd = iwl3945_hw_txq_free_tfd,
@@ -2758,6 +2818,9 @@ static struct iwl_lib_ops iwl3945_lib = {
},
.send_tx_power = iwl3945_send_tx_power,
.is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr,
+ .post_associate = iwl3945_post_associate,
+ .isr = iwl_isr_legacy,
+ .config_ap = iwl3945_config_ap,
};
static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
@@ -2767,6 +2830,7 @@ static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
static struct iwl_ops iwl3945_ops = {
.lib = &iwl3945_lib,
+ .hcmd = &iwl3945_hcmd,
.utils = &iwl3945_hcmd_utils,
};
@@ -2779,7 +2843,8 @@ static struct iwl_cfg iwl3945_bg_cfg = {
.eeprom_size = IWL3945_EEPROM_IMG_SIZE,
.eeprom_ver = EEPROM_3945_EEPROM_VERSION,
.ops = &iwl3945_ops,
- .mod_params = &iwl3945_mod_params
+ .mod_params = &iwl3945_mod_params,
+ .use_isr_legacy = true
};
static struct iwl_cfg iwl3945_abg_cfg = {
@@ -2791,7 +2856,8 @@ static struct iwl_cfg iwl3945_abg_cfg = {
.eeprom_size = IWL3945_EEPROM_IMG_SIZE,
.eeprom_ver = EEPROM_3945_EEPROM_VERSION,
.ops = &iwl3945_ops,
- .mod_params = &iwl3945_mod_params
+ .mod_params = &iwl3945_mod_params,
+ .use_isr_legacy = true
};
struct pci_device_id iwl3945_hw_card_ids[] = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index 55188844657..fbb3a573463 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -36,10 +36,6 @@
#include <linux/kernel.h>
#include <net/ieee80211_radiotap.h>
-/*used for rfkill*/
-#include <linux/rfkill.h>
-#include <linux/input.h>
-
/* Hardware specific file defines the PCI IDs table for that hardware module */
extern struct pci_device_id iwl3945_hw_card_ids[];
@@ -155,14 +151,12 @@ struct iwl3945_frame {
#define STATUS_HCMD_SYNC_ACTIVE 1 /* sync host command in progress */
#define STATUS_INT_ENABLED 2
#define STATUS_RF_KILL_HW 3
-#define STATUS_RF_KILL_SW 4
#define STATUS_INIT 5
#define STATUS_ALIVE 6
#define STATUS_READY 7
#define STATUS_TEMPERATURE 8
#define STATUS_GEO_CONFIGURED 9
#define STATUS_EXIT_PENDING 10
-#define STATUS_IN_SUSPEND 11
#define STATUS_STATISTICS 12
#define STATUS_SCANNING 13
#define STATUS_SCAN_ABORTING 14
@@ -203,11 +197,6 @@ struct iwl3945_ibss_seq {
* for use by iwl-*.c
*
*****************************************************************************/
-struct iwl3945_addsta_cmd;
-extern int iwl3945_send_add_station(struct iwl_priv *priv,
- struct iwl3945_addsta_cmd *sta, u8 flags);
-extern u8 iwl3945_add_station(struct iwl_priv *priv, const u8 *bssid,
- int is_ap, u8 flags);
extern int iwl3945_power_init_handle(struct iwl_priv *priv);
extern int iwl3945_eeprom_init(struct iwl_priv *priv);
extern int iwl3945_calc_db_from_ratio(int sig_ratio);
@@ -278,6 +267,8 @@ extern void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
extern void iwl3945_disable_events(struct iwl_priv *priv);
extern int iwl4965_get_temperature(const struct iwl_priv *priv);
+extern void iwl3945_post_associate(struct iwl_priv *priv);
+extern void iwl3945_config_ap(struct iwl_priv *priv);
/**
* iwl3945_hw_find_station - Find station id for a given BSSID
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 847a6220c5e..8f3d4bc6a03 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -163,10 +163,6 @@ static int iwl4965_load_bsm(struct iwl_priv *priv)
inst_len = priv->ucode_init.len;
data_len = priv->ucode_init_data.len;
- ret = iwl_grab_nic_access(priv);
- if (ret)
- return ret;
-
iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
@@ -179,10 +175,8 @@ static int iwl4965_load_bsm(struct iwl_priv *priv)
_iwl_write_prph(priv, reg_offset, le32_to_cpu(*image));
ret = iwl4965_verify_bsm(priv);
- if (ret) {
- iwl_release_nic_access(priv);
+ if (ret)
return ret;
- }
/* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
@@ -211,7 +205,6 @@ static int iwl4965_load_bsm(struct iwl_priv *priv)
* (e.g. when powering back up after power-save shutdown) */
iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
- iwl_release_nic_access(priv);
return 0;
}
@@ -229,20 +222,12 @@ static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
{
dma_addr_t pinst;
dma_addr_t pdata;
- unsigned long flags;
int ret = 0;
/* bits 35:4 for 4965 */
pinst = priv->ucode_code.p_addr >> 4;
pdata = priv->ucode_data_backup.p_addr >> 4;
- spin_lock_irqsave(&priv->lock, flags);
- ret = iwl_grab_nic_access(priv);
- if (ret) {
- spin_unlock_irqrestore(&priv->lock, flags);
- return ret;
- }
-
/* Tell bootstrap uCode where to find image to load */
iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
@@ -253,10 +238,6 @@ static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
* that all new ptr/size info is in place */
iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
priv->ucode_code.len | BSM_DRAM_INST_LOAD);
- iwl_release_nic_access(priv);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
return ret;
@@ -312,10 +293,12 @@ restart:
queue_work(priv->workqueue, &priv->restart);
}
-static int is_fat_channel(__le32 rxon_flags)
+static bool is_fat_channel(__le32 rxon_flags)
{
- return (rxon_flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) ||
- (rxon_flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK);
+ int chan_mod = le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK)
+ >> RXON_FLG_CHANNEL_MODE_POS;
+ return ((chan_mod == CHANNEL_MODE_PURE_40) ||
+ (chan_mod == CHANNEL_MODE_MIXED));
}
/*
@@ -358,10 +341,6 @@ static int iwl4965_apm_init(struct iwl_priv *priv)
goto out;
}
- ret = iwl_grab_nic_access(priv);
- if (ret)
- goto out;
-
/* enable DMA */
iwl_write_prph(priv, APMG_CLK_CTRL_REG, APMG_CLK_VAL_DMA_CLK_RQT |
APMG_CLK_VAL_BSM_CLK_RQT);
@@ -372,7 +351,6 @@ static int iwl4965_apm_init(struct iwl_priv *priv)
iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
- iwl_release_nic_access(priv);
out:
return ret;
}
@@ -454,11 +432,9 @@ static void iwl4965_apm_stop(struct iwl_priv *priv)
static int iwl4965_apm_reset(struct iwl_priv *priv)
{
int ret = 0;
- unsigned long flags;
iwl4965_apm_stop_master(priv);
- spin_lock_irqsave(&priv->lock, flags);
iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
@@ -475,9 +451,6 @@ static int iwl4965_apm_reset(struct iwl_priv *priv)
udelay(10);
- ret = iwl_grab_nic_access(priv);
- if (ret)
- goto out;
/* Enable DMA and BSM Clock */
iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT |
APMG_CLK_VAL_BSM_CLK_RQT);
@@ -488,14 +461,10 @@ static int iwl4965_apm_reset(struct iwl_priv *priv)
iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
- iwl_release_nic_access(priv);
-
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
wake_up_interruptible(&priv->wait_command_queue);
out:
- spin_unlock_irqrestore(&priv->lock, flags);
-
return ret;
}
@@ -681,18 +650,11 @@ static int iwl4965_alive_notify(struct iwl_priv *priv)
{
u32 a;
unsigned long flags;
- int ret;
int i, chan;
u32 reg_val;
spin_lock_irqsave(&priv->lock, flags);
- ret = iwl_grab_nic_access(priv);
- if (ret) {
- spin_unlock_irqrestore(&priv->lock, flags);
- return ret;
- }
-
/* Clear 4965's internal Tx Scheduler data base */
priv->scd_base_addr = iwl_read_prph(priv, IWL49_SCD_SRAM_BASE_ADDR);
a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
@@ -759,10 +721,9 @@ static int iwl4965_alive_notify(struct iwl_priv *priv)
iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
}
- iwl_release_nic_access(priv);
spin_unlock_irqrestore(&priv->lock, flags);
- return ret;
+ return 0;
}
static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
@@ -788,6 +749,12 @@ static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
.nrg_th_ofdm = 100,
};
+static void iwl4965_set_ct_threshold(struct iwl_priv *priv)
+{
+ /* want Kelvin */
+ priv->hw_params.ct_kill_threshold = CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD);
+}
+
/**
* iwl4965_hw_set_hw_params
*
@@ -822,7 +789,8 @@ static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
priv->hw_params.rx_chains_num = 2;
priv->hw_params.valid_tx_ant = ANT_A | ANT_B;
priv->hw_params.valid_rx_ant = ANT_A | ANT_B;
- priv->hw_params.ct_kill_threshold = CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD);
+ if (priv->cfg->ops->lib->temp_ops.set_ct_kill)
+ priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
priv->hw_params.sens = &iwl4965_sensitivity;
@@ -1524,7 +1492,7 @@ static int iwl4965_send_tx_power(struct iwl_priv *priv)
struct iwl4965_txpowertable_cmd cmd = { 0 };
int ret;
u8 band = 0;
- u8 is_fat = 0;
+ bool is_fat = false;
u8 ctrl_chan_high = 0;
if (test_bit(STATUS_SCANNING, &priv->status)) {
@@ -1602,7 +1570,7 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
{
int rc;
u8 band = 0;
- u8 is_fat = 0;
+ bool is_fat = false;
u8 ctrl_chan_high = 0;
struct iwl4965_channel_switch_cmd cmd = { 0 };
const struct iwl_channel_info *ch_info;
@@ -1833,8 +1801,6 @@ static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
u16 ssn_idx, u8 tx_fifo)
{
- int ret = 0;
-
if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
(IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES <= txq_id)) {
IWL_WARN(priv,
@@ -1844,10 +1810,6 @@ static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
return -EINVAL;
}
- ret = iwl_grab_nic_access(priv);
- if (ret)
- return ret;
-
iwl4965_tx_queue_stop_scheduler(priv, txq_id);
iwl_clear_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
@@ -1861,8 +1823,6 @@ static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
iwl_txq_ctx_deactivate(priv, txq_id);
iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
- iwl_release_nic_access(priv);
-
return 0;
}
@@ -1904,7 +1864,6 @@ static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
int tx_fifo, int sta_id, int tid, u16 ssn_idx)
{
unsigned long flags;
- int ret;
u16 ra_tid;
if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
@@ -1922,11 +1881,6 @@ static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
spin_lock_irqsave(&priv->lock, flags);
- ret = iwl_grab_nic_access(priv);
- if (ret) {
- spin_unlock_irqrestore(&priv->lock, flags);
- return ret;
- }
/* Stop this Tx queue before configuring it */
iwl4965_tx_queue_stop_scheduler(priv, txq_id);
@@ -1959,7 +1913,6 @@ static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
- iwl_release_nic_access(priv);
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
@@ -2268,9 +2221,10 @@ static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
cancel_work_sync(&priv->txpower_work);
}
-
static struct iwl_hcmd_ops iwl4965_hcmd = {
.rxon_assoc = iwl4965_send_rxon_assoc,
+ .commit_rxon = iwl_commit_rxon,
+ .set_rxon_chain = iwl_set_rxon_chain,
};
static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
@@ -2323,7 +2277,13 @@ static struct iwl_lib_ops iwl4965_lib = {
},
.send_tx_power = iwl4965_send_tx_power,
.update_chain_flags = iwl_update_chain_flags,
- .temperature = iwl4965_temperature_calib,
+ .post_associate = iwl_post_associate,
+ .config_ap = iwl_config_ap,
+ .isr = iwl_isr_legacy,
+ .temp_ops = {
+ .temperature = iwl4965_temperature_calib,
+ .set_ct_kill = iwl4965_set_ct_threshold,
+ },
};
static struct iwl_ops iwl4965_ops = {
@@ -2343,6 +2303,7 @@ struct iwl_cfg iwl4965_agn_cfg = {
.eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
.ops = &iwl4965_ops,
.mod_params = &iwl4965_mod_params,
+ .use_isr_legacy = true
};
/* Module firmware */
@@ -2350,8 +2311,6 @@ MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX));
module_param_named(antenna, iwl4965_mod_params.antenna, int, 0444);
MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
-module_param_named(disable, iwl4965_mod_params.disable, int, 0444);
-MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, 0444);
MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
module_param_named(debug, iwl4965_mod_params.debug, uint, 0444);
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
index 15cac70e36e..4ef6804a455 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
@@ -87,6 +87,18 @@
#define IWL50_NUM_AMPDU_QUEUES 10
#define IWL50_FIRST_AMPDU_QUEUE 10
+/* 5150 only */
+#define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5)
+
+static inline s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
+{
+ u16 *temp_calib = (u16 *)iwl_eeprom_query_addr(priv,
+ EEPROM_5000_TEMPERATURE);
+ /* offset = temperature - voltage / coef */
+ s32 offset = (s32)(temp_calib[0] - temp_calib[1] / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
+ return offset;
+}
+
/* Fixed (non-configurable) rx data from phy */
/**
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index e5ca2511a81..b3c648ce8c7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -46,7 +46,7 @@
#include "iwl-6000-hw.h"
/* Highest firmware API version supported */
-#define IWL5000_UCODE_API_MAX 1
+#define IWL5000_UCODE_API_MAX 2
#define IWL5150_UCODE_API_MAX 2
/* Lowest firmware API version supported */
@@ -124,10 +124,6 @@ static int iwl5000_apm_init(struct iwl_priv *priv)
return ret;
}
- ret = iwl_grab_nic_access(priv);
- if (ret)
- return ret;
-
/* enable DMA */
iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
@@ -137,8 +133,6 @@ static int iwl5000_apm_init(struct iwl_priv *priv)
iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
- iwl_release_nic_access(priv);
-
return ret;
}
@@ -165,12 +159,9 @@ static void iwl5000_apm_stop(struct iwl_priv *priv)
static int iwl5000_apm_reset(struct iwl_priv *priv)
{
int ret = 0;
- unsigned long flags;
iwl5000_apm_stop_master(priv);
- spin_lock_irqsave(&priv->lock, flags);
-
iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
udelay(10);
@@ -193,10 +184,6 @@ static int iwl5000_apm_reset(struct iwl_priv *priv)
goto out;
}
- ret = iwl_grab_nic_access(priv);
- if (ret)
- goto out;
-
/* enable DMA */
iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
@@ -205,11 +192,7 @@ static int iwl5000_apm_reset(struct iwl_priv *priv)
/* disable L1-Active */
iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
-
- iwl_release_nic_access(priv);
-
out:
- spin_unlock_irqrestore(&priv->lock, flags);
return ret;
}
@@ -252,11 +235,9 @@ static void iwl5000_nic_config(struct iwl_priv *priv)
* (PCIe power is lost before PERST# is asserted),
* causing ME FW to lose ownership and not being able to obtain it back.
*/
- iwl_grab_nic_access(priv);
iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
- iwl_release_nic_access(priv);
spin_unlock_irqrestore(&priv->lock, flags);
}
@@ -434,15 +415,19 @@ static const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv,
return &priv->eeprom[address];
}
-static s32 iwl5150_get_ct_threshold(struct iwl_priv *priv)
+static void iwl5150_set_ct_threshold(struct iwl_priv *priv)
{
- const s32 volt2temp_coef = -5;
- u16 *temp_calib = (u16 *)iwl_eeprom_query_addr(priv,
- EEPROM_5000_TEMPERATURE);
- /* offset = temperate - voltage / coef */
- s32 offset = temp_calib[0] - temp_calib[1] / volt2temp_coef;
- s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD) - offset;
- return threshold * volt2temp_coef;
+ const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF;
+ s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD) -
+ iwl_temp_calib_to_offset(priv);
+
+ priv->hw_params.ct_kill_threshold = threshold * volt2temp_coef;
+}
+
+static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
+{
+ /* want Celsius */
+ priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
}
/*
@@ -533,19 +518,9 @@ static int iwl5000_load_section(struct iwl_priv *priv,
struct fw_desc *image,
u32 dst_addr)
{
- int ret = 0;
- unsigned long flags;
-
dma_addr_t phy_addr = image->p_addr;
u32 byte_cnt = image->len;
- spin_lock_irqsave(&priv->lock, flags);
- ret = iwl_grab_nic_access(priv);
- if (ret) {
- spin_unlock_irqrestore(&priv->lock, flags);
- return ret;
- }
-
iwl_write_direct32(priv,
FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
@@ -574,8 +549,6 @@ static int iwl5000_load_section(struct iwl_priv *priv,
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
@@ -736,18 +709,11 @@ static int iwl5000_alive_notify(struct iwl_priv *priv)
{
u32 a;
unsigned long flags;
- int ret;
int i, chan;
u32 reg_val;
spin_lock_irqsave(&priv->lock, flags);
- ret = iwl_grab_nic_access(priv);
- if (ret) {
- spin_unlock_irqrestore(&priv->lock, flags);
- return ret;
- }
-
priv->scd_base_addr = iwl_read_prph(priv, IWL50_SCD_SRAM_BASE_ADDR);
a = priv->scd_base_addr + IWL50_SCD_CONTEXT_DATA_OFFSET;
for (; a < priv->scd_base_addr + IWL50_SCD_TX_STTS_BITMAP_OFFSET;
@@ -815,7 +781,6 @@ static int iwl5000_alive_notify(struct iwl_priv *priv)
iwl_txq_ctx_activate(priv, 8);
iwl_txq_ctx_activate(priv, 9);
- iwl_release_nic_access(priv);
spin_unlock_irqrestore(&priv->lock, flags);
@@ -868,17 +833,8 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
- switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) {
- case CSR_HW_REV_TYPE_5150:
- /* 5150 wants in Kelvin */
- priv->hw_params.ct_kill_threshold =
- iwl5150_get_ct_threshold(priv);
- break;
- default:
- /* all others want Celsius */
- priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
- break;
- }
+ if (priv->cfg->ops->lib->temp_ops.set_ct_kill)
+ priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
/* Set initial calibration set */
switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) {
@@ -900,7 +856,6 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
break;
}
-
return 0;
}
@@ -1006,7 +961,6 @@ static int iwl5000_txq_agg_enable(struct iwl_priv *priv, int txq_id,
int tx_fifo, int sta_id, int tid, u16 ssn_idx)
{
unsigned long flags;
- int ret;
u16 ra_tid;
if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) ||
@@ -1024,11 +978,6 @@ static int iwl5000_txq_agg_enable(struct iwl_priv *priv, int txq_id,
iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
spin_lock_irqsave(&priv->lock, flags);
- ret = iwl_grab_nic_access(priv);
- if (ret) {
- spin_unlock_irqrestore(&priv->lock, flags);
- return ret;
- }
/* Stop this Tx queue before configuring it */
iwl5000_tx_queue_stop_scheduler(priv, txq_id);
@@ -1064,7 +1013,6 @@ static int iwl5000_txq_agg_enable(struct iwl_priv *priv, int txq_id,
/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
iwl5000_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
- iwl_release_nic_access(priv);
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
@@ -1073,8 +1021,6 @@ static int iwl5000_txq_agg_enable(struct iwl_priv *priv, int txq_id,
static int iwl5000_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
u16 ssn_idx, u8 tx_fifo)
{
- int ret;
-
if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) ||
(IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES <= txq_id)) {
IWL_ERR(priv,
@@ -1084,10 +1030,6 @@ static int iwl5000_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
return -EINVAL;
}
- ret = iwl_grab_nic_access(priv);
- if (ret)
- return ret;
-
iwl5000_tx_queue_stop_scheduler(priv, txq_id);
iwl_clear_bits_prph(priv, IWL50_SCD_AGGR_SEL, (1 << txq_id));
@@ -1101,15 +1043,16 @@ static int iwl5000_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
iwl_txq_ctx_deactivate(priv, txq_id);
iwl5000_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
- iwl_release_nic_access(priv);
-
return 0;
}
u16 iwl5000_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
{
u16 size = (u16)sizeof(struct iwl_addsta_cmd);
- memcpy(data, cmd, size);
+ struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data;
+ memcpy(addsta, cmd, size);
+ /* resrved in 5000 */
+ addsta->rate_n_flags = cpu_to_le16(0);
return size;
}
@@ -1434,6 +1377,17 @@ static void iwl5000_temperature(struct iwl_priv *priv)
priv->temperature = le32_to_cpu(priv->statistics.general.temperature);
}
+static void iwl5150_temperature(struct iwl_priv *priv)
+{
+ u32 vt = 0;
+ s32 offset = iwl_temp_calib_to_offset(priv);
+
+ vt = le32_to_cpu(priv->statistics.general.temperature);
+ vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset;
+ /* now vt hold the temperature in Kelvin */
+ priv->temperature = KELVIN_TO_CELSIUS(vt);
+}
+
/* Calc max signal level (dBm) among 3 possible receivers */
int iwl5000_calc_rssi(struct iwl_priv *priv,
struct iwl_rx_phy_res *rx_resp)
@@ -1474,6 +1428,8 @@ int iwl5000_calc_rssi(struct iwl_priv *priv,
struct iwl_hcmd_ops iwl5000_hcmd = {
.rxon_assoc = iwl5000_send_rxon_assoc,
+ .commit_rxon = iwl_commit_rxon,
+ .set_rxon_chain = iwl_set_rxon_chain,
};
struct iwl_hcmd_utils_ops iwl5000_hcmd_utils = {
@@ -1502,7 +1458,6 @@ struct iwl_lib_ops iwl5000_lib = {
.init_alive_start = iwl5000_init_alive_start,
.alive_notify = iwl5000_alive_notify,
.send_tx_power = iwl5000_send_tx_power,
- .temperature = iwl5000_temperature,
.update_chain_flags = iwl_update_chain_flags,
.apm_ops = {
.init = iwl5000_apm_init,
@@ -1527,6 +1482,63 @@ struct iwl_lib_ops iwl5000_lib = {
.calib_version = iwl5000_eeprom_calib_version,
.query_addr = iwl5000_eeprom_query_addr,
},
+ .post_associate = iwl_post_associate,
+ .isr = iwl_isr_ict,
+ .config_ap = iwl_config_ap,
+ .temp_ops = {
+ .temperature = iwl5000_temperature,
+ .set_ct_kill = iwl5000_set_ct_threshold,
+ },
+};
+
+static struct iwl_lib_ops iwl5150_lib = {
+ .set_hw_params = iwl5000_hw_set_hw_params,
+ .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
+ .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
+ .txq_set_sched = iwl5000_txq_set_sched,
+ .txq_agg_enable = iwl5000_txq_agg_enable,
+ .txq_agg_disable = iwl5000_txq_agg_disable,
+ .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
+ .txq_free_tfd = iwl_hw_txq_free_tfd,
+ .txq_init = iwl_hw_tx_queue_init,
+ .rx_handler_setup = iwl5000_rx_handler_setup,
+ .setup_deferred_work = iwl5000_setup_deferred_work,
+ .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
+ .load_ucode = iwl5000_load_ucode,
+ .init_alive_start = iwl5000_init_alive_start,
+ .alive_notify = iwl5000_alive_notify,
+ .send_tx_power = iwl5000_send_tx_power,
+ .update_chain_flags = iwl_update_chain_flags,
+ .apm_ops = {
+ .init = iwl5000_apm_init,
+ .reset = iwl5000_apm_reset,
+ .stop = iwl5000_apm_stop,
+ .config = iwl5000_nic_config,
+ .set_pwr_src = iwl_set_pwr_src,
+ },
+ .eeprom_ops = {
+ .regulatory_bands = {
+ EEPROM_5000_REG_BAND_1_CHANNELS,
+ EEPROM_5000_REG_BAND_2_CHANNELS,
+ EEPROM_5000_REG_BAND_3_CHANNELS,
+ EEPROM_5000_REG_BAND_4_CHANNELS,
+ EEPROM_5000_REG_BAND_5_CHANNELS,
+ EEPROM_5000_REG_BAND_24_FAT_CHANNELS,
+ EEPROM_5000_REG_BAND_52_FAT_CHANNELS
+ },
+ .verify_signature = iwlcore_eeprom_verify_signature,
+ .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
+ .release_semaphore = iwlcore_eeprom_release_semaphore,
+ .calib_version = iwl5000_eeprom_calib_version,
+ .query_addr = iwl5000_eeprom_query_addr,
+ },
+ .post_associate = iwl_post_associate,
+ .isr = iwl_isr_ict,
+ .config_ap = iwl_config_ap,
+ .temp_ops = {
+ .temperature = iwl5150_temperature,
+ .set_ct_kill = iwl5150_set_ct_threshold,
+ },
};
struct iwl_ops iwl5000_ops = {
@@ -1535,6 +1547,12 @@ struct iwl_ops iwl5000_ops = {
.utils = &iwl5000_hcmd_utils,
};
+static struct iwl_ops iwl5150_ops = {
+ .lib = &iwl5150_lib,
+ .hcmd = &iwl5000_hcmd,
+ .utils = &iwl5000_hcmd_utils,
+};
+
struct iwl_mod_params iwl50_mod_params = {
.num_of_queues = IWL50_NUM_QUEUES,
.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
@@ -1630,7 +1648,7 @@ struct iwl_cfg iwl5150_agn_cfg = {
.ucode_api_max = IWL5150_UCODE_API_MAX,
.ucode_api_min = IWL5150_UCODE_API_MIN,
.sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
- .ops = &iwl5000_ops,
+ .ops = &iwl5150_ops,
.eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
.eeprom_ver = EEPROM_5050_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
@@ -1643,9 +1661,6 @@ struct iwl_cfg iwl5150_agn_cfg = {
MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX));
-module_param_named(disable50, iwl50_mod_params.disable, int, 0444);
-MODULE_PARM_DESC(disable50,
- "manually disable the 50XX radio (default 0 [radio on])");
module_param_named(swcrypto50, iwl50_mod_params.sw_crypto, bool, 0444);
MODULE_PARM_DESC(swcrypto50,
"using software crypto engine (default 0 [hardware])\n");
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index cab7842a73a..ff20e5048a5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -52,7 +52,7 @@
/* max allowed rate miss before sync LQ cmd */
#define IWL_MISSED_RATE_MAX 15
/* max time to accum history 2 seconds */
-#define IWL_RATE_SCALE_FLUSH_INTVL (2*HZ)
+#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ)
static u8 rs_ht_to_legacy[] = {
IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
@@ -100,6 +100,7 @@ struct iwl_scale_tbl_info {
u8 is_fat; /* 1 = 40 MHz channel width */
u8 is_dup; /* 1 = duplicated data streams */
u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
+ u8 max_search; /* maximun number of tables we can search */
s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
u32 current_rate; /* rate_n_flags, uCode API format */
struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
@@ -135,7 +136,7 @@ struct iwl_lq_sta {
u32 table_count;
u32 total_failed; /* total failed frames, any/all rates */
u32 total_success; /* total successful frames, any/all rates */
- u32 flush_timer; /* time staying in mode before new search */
+ u64 flush_timer; /* time staying in mode before new search */
u8 action_counter; /* # mode-switch actions tried */
u8 is_green;
@@ -160,6 +161,7 @@ struct iwl_lq_sta {
#ifdef CONFIG_MAC80211_DEBUGFS
struct dentry *rs_sta_dbgfs_scale_table_file;
struct dentry *rs_sta_dbgfs_stats_table_file;
+ struct dentry *rs_sta_dbgfs_rate_scale_data_file;
struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
u32 dbg_fixed_rate;
#endif
@@ -167,10 +169,12 @@ struct iwl_lq_sta {
/* used to be in sta_info */
int last_txrate_idx;
+ /* last tx rate_n_flags */
+ u32 last_rate_n_flags;
};
static void rs_rate_scale_perform(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr,
+ struct sk_buff *skb,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta);
static void rs_fill_link_cmd(const struct iwl_priv *priv,
@@ -191,7 +195,7 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
* 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
* "G" is the only table that supports CCK (the first 4 rates).
*/
-/*FIXME:RS:need to separate tables for MIMO2/MIMO3*/
+
static s32 expected_tpt_A[IWL_RATE_COUNT] = {
0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186, 186
};
@@ -208,11 +212,11 @@ static s32 expected_tpt_siso20MHzSGI[IWL_RATE_COUNT] = {
0, 0, 0, 0, 46, 46, 82, 110, 132, 168, 192, 202, 211
};
-static s32 expected_tpt_mimo20MHz[IWL_RATE_COUNT] = {
+static s32 expected_tpt_mimo2_20MHz[IWL_RATE_COUNT] = {
0, 0, 0, 0, 74, 74, 123, 155, 179, 214, 236, 244, 251
};
-static s32 expected_tpt_mimo20MHzSGI[IWL_RATE_COUNT] = {
+static s32 expected_tpt_mimo2_20MHzSGI[IWL_RATE_COUNT] = {
0, 0, 0, 0, 81, 81, 131, 164, 188, 222, 243, 251, 257
};
@@ -224,14 +228,50 @@ static s32 expected_tpt_siso40MHzSGI[IWL_RATE_COUNT] = {
0, 0, 0, 0, 83, 83, 135, 169, 193, 229, 250, 257, 264
};
-static s32 expected_tpt_mimo40MHz[IWL_RATE_COUNT] = {
+static s32 expected_tpt_mimo2_40MHz[IWL_RATE_COUNT] = {
0, 0, 0, 0, 123, 123, 182, 214, 235, 264, 279, 285, 289
};
-static s32 expected_tpt_mimo40MHzSGI[IWL_RATE_COUNT] = {
+static s32 expected_tpt_mimo2_40MHzSGI[IWL_RATE_COUNT] = {
0, 0, 0, 0, 131, 131, 191, 222, 242, 270, 284, 289, 293
};
+/* Expected throughput metric MIMO3 */
+static s32 expected_tpt_mimo3_20MHz[IWL_RATE_COUNT] = {
+ 0, 0, 0, 0, 99, 99, 153, 186, 208, 239, 256, 263, 268
+};
+
+static s32 expected_tpt_mimo3_20MHzSGI[IWL_RATE_COUNT] = {
+ 0, 0, 0, 0, 106, 106, 162, 194, 215, 246, 262, 268, 273
+};
+
+static s32 expected_tpt_mimo3_40MHz[IWL_RATE_COUNT] = {
+ 0, 0, 0, 0, 152, 152, 211, 239, 255, 279, 290, 294, 297
+};
+
+static s32 expected_tpt_mimo3_40MHzSGI[IWL_RATE_COUNT] = {
+ 0, 0, 0, 0, 160, 160, 219, 245, 261, 284, 294, 297, 300
+};
+
+/* mbps, mcs */
+const static struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
+ {"1", ""},
+ {"2", ""},
+ {"5.5", ""},
+ {"11", ""},
+ {"6", "BPSK 1/2"},
+ {"9", "BPSK 1/2"},
+ {"12", "QPSK 1/2"},
+ {"18", "QPSK 3/4"},
+ {"24", "16QAM 1/2"},
+ {"36", "16QAM 3/4"},
+ {"48", "64QAM 2/3"},
+ {"54", "64QAM 3/4"},
+ {"60", "64QAM 5/6"}
+};
+
+#define MCS_INDEX_PER_STREAM (8)
+
static inline u8 rs_extract_rate(u32 rate_n_flags)
{
return (u8)(rate_n_flags & 0xFF);
@@ -543,6 +583,7 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
tbl->is_dup = 0;
tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
tbl->lq_type = LQ_NONE;
+ tbl->max_search = IWL_MAX_SEARCH;
/* legacy rate format */
if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
@@ -576,8 +617,10 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
tbl->lq_type = LQ_MIMO2;
/* MIMO3 */
} else {
- if (num_of_ant == 3)
+ if (num_of_ant == 3) {
+ tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
tbl->lq_type = LQ_MIMO3;
+ }
}
}
return 0;
@@ -611,19 +654,19 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
return 1;
}
-/* FIXME:RS: in 4965 we don't use greenfield at all */
-/* FIXME:RS: don't use greenfield for now in TX */
-#if 0
-static inline u8 rs_use_green(struct iwl_priv *priv, struct ieee80211_conf *conf)
+/* in 4965 we don't use greenfield at all */
+static inline u8 rs_use_green(struct iwl_priv *priv,
+ struct ieee80211_conf *conf)
{
- return (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) &&
- priv->current_ht_config.is_green_field &&
- !priv->current_ht_config.non_GF_STA_present;
-}
-#endif
-static inline u8 rs_use_green(struct iwl_priv *priv, struct ieee80211_conf *conf)
-{
- return 0;
+ u8 is_green;
+
+ if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)
+ is_green = 0;
+ else
+ is_green = (conf_is_ht(conf) &&
+ priv->current_ht_config.is_green_field &&
+ !priv->current_ht_config.non_GF_STA_present);
+ return is_green;
}
/**
@@ -735,6 +778,7 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
tbl->is_fat = 0;
tbl->is_SGI = 0;
+ tbl->max_search = IWL_MAX_SEARCH;
}
rate_mask = rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
@@ -793,7 +837,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
if (!ieee80211_is_data(hdr->frame_control) ||
- is_multicast_ether_addr(hdr->addr1))
+ info->flags & IEEE80211_TX_CTL_NO_ACK)
return;
/* This packet was aggregated but doesn't carry rate scale info */
@@ -902,6 +946,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
* else look up the rate that was, finally, successful.
*/
tx_rate = le32_to_cpu(table->rs_table[index].rate_n_flags);
+ lq_sta->last_rate_n_flags = tx_rate;
rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, &rs_index);
/* Update frame history window with "success" if Tx got ACKed ... */
@@ -958,7 +1003,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
/* See if there's a better rate or modulation mode to try. */
if (sta && sta->supp_rates[sband->band])
- rs_rate_scale_perform(priv, hdr, sta, lq_sta);
+ rs_rate_scale_perform(priv, skb, sta, lq_sta);
out:
return;
}
@@ -988,6 +1033,8 @@ static void rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
lq_sta->table_count = 0;
lq_sta->total_failed = 0;
lq_sta->total_success = 0;
+ lq_sta->flush_timer = jiffies;
+ lq_sta->action_counter = 0;
}
/*
@@ -1011,17 +1058,26 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
tbl->expected_tpt = expected_tpt_siso20MHzSGI;
else
tbl->expected_tpt = expected_tpt_siso20MHz;
-
- } else if (is_mimo(tbl->lq_type)) { /* FIXME:need to separate mimo2/3 */
+ } else if (is_mimo2(tbl->lq_type)) {
if (tbl->is_fat && !lq_sta->is_dup)
if (tbl->is_SGI)
- tbl->expected_tpt = expected_tpt_mimo40MHzSGI;
+ tbl->expected_tpt = expected_tpt_mimo2_40MHzSGI;
else
- tbl->expected_tpt = expected_tpt_mimo40MHz;
+ tbl->expected_tpt = expected_tpt_mimo2_40MHz;
else if (tbl->is_SGI)
- tbl->expected_tpt = expected_tpt_mimo20MHzSGI;
+ tbl->expected_tpt = expected_tpt_mimo2_20MHzSGI;
else
- tbl->expected_tpt = expected_tpt_mimo20MHz;
+ tbl->expected_tpt = expected_tpt_mimo2_20MHz;
+ } else if (is_mimo3(tbl->lq_type)) {
+ if (tbl->is_fat && !lq_sta->is_dup)
+ if (tbl->is_SGI)
+ tbl->expected_tpt = expected_tpt_mimo3_40MHzSGI;
+ else
+ tbl->expected_tpt = expected_tpt_mimo3_40MHz;
+ else if (tbl->is_SGI)
+ tbl->expected_tpt = expected_tpt_mimo3_20MHzSGI;
+ else
+ tbl->expected_tpt = expected_tpt_mimo3_20MHz;
} else
tbl->expected_tpt = expected_tpt_G;
}
@@ -1130,7 +1186,7 @@ static s32 rs_get_best_rate(struct iwl_priv *priv,
}
/*
- * Set up search table for MIMO
+ * Set up search table for MIMO2
*/
static int rs_switch_to_mimo2(struct iwl_priv *priv,
struct iwl_lq_sta *lq_sta,
@@ -1158,10 +1214,10 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
tbl->lq_type = LQ_MIMO2;
tbl->is_dup = lq_sta->is_dup;
tbl->action = 0;
+ tbl->max_search = IWL_MAX_SEARCH;
rate_mask = lq_sta->active_mimo2_rate;
- if (priv->current_ht_config.supported_chan_width
- == IWL_CHANNEL_WIDTH_40MHZ)
+ if (iwl_is_fat_tx_allowed(priv, &sta->ht_cap))
tbl->is_fat = 1;
else
tbl->is_fat = 0;
@@ -1183,7 +1239,73 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
IWL_DEBUG_RATE(priv, "LQ: MIMO2 best rate %d mask %X\n", rate, rate_mask);
+ if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
+ IWL_DEBUG_RATE(priv, "Can't switch with index %d rate mask %x\n",
+ rate, rate_mask);
+ return -1;
+ }
+ tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, rate, is_green);
+
+ IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
+ tbl->current_rate, is_green);
+ return 0;
+}
+
+/*
+ * Set up search table for MIMO3
+ */
+static int rs_switch_to_mimo3(struct iwl_priv *priv,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta,
+ struct iwl_scale_tbl_info *tbl, int index)
+{
+ u16 rate_mask;
+ s32 rate;
+ s8 is_green = lq_sta->is_green;
+
+ if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+ return -1;
+
+ if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
+ == WLAN_HT_CAP_SM_PS_STATIC)
+ return -1;
+
+ /* Need both Tx chains/antennas to support MIMO */
+ if (priv->hw_params.tx_chains_num < 3)
+ return -1;
+
+ IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO3\n");
+
+ tbl->lq_type = LQ_MIMO3;
+ tbl->is_dup = lq_sta->is_dup;
+ tbl->action = 0;
+ tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
+ rate_mask = lq_sta->active_mimo3_rate;
+
+ if (iwl_is_fat_tx_allowed(priv, &sta->ht_cap))
+ tbl->is_fat = 1;
+ else
+ tbl->is_fat = 0;
+ /* FIXME: - don't toggle SGI here
+ if (tbl->is_fat) {
+ if (priv->current_ht_config.sgf & HT_SHORT_GI_40MHZ_ONLY)
+ tbl->is_SGI = 1;
+ else
+ tbl->is_SGI = 0;
+ } else if (priv->current_ht_config.sgf & HT_SHORT_GI_20MHZ_ONLY)
+ tbl->is_SGI = 1;
+ else
+ tbl->is_SGI = 0;
+ */
+
+ rs_set_expected_tpt_table(lq_sta, tbl);
+
+ rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
+
+ IWL_DEBUG_RATE(priv, "LQ: MIMO3 best rate %d mask %X\n",
+ rate, rate_mask);
if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
IWL_DEBUG_RATE(priv, "Can't switch with index %d rate mask %x\n",
rate, rate_mask);
@@ -1217,10 +1339,10 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
tbl->is_dup = lq_sta->is_dup;
tbl->lq_type = LQ_SISO;
tbl->action = 0;
+ tbl->max_search = IWL_MAX_SEARCH;
rate_mask = lq_sta->active_siso_rate;
- if (priv->current_ht_config.supported_chan_width
- == IWL_CHANNEL_WIDTH_40MHZ)
+ if (iwl_is_fat_tx_allowed(priv, &sta->ht_cap))
tbl->is_fat = 1;
else
tbl->is_fat = 0;
@@ -1274,15 +1396,15 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
u8 tx_chains_num = priv->hw_params.tx_chains_num;
int ret = 0;
+ u8 update_search_tbl_counter = 0;
for (; ;) {
+ lq_sta->action_counter++;
switch (tbl->action) {
case IWL_LEGACY_SWITCH_ANTENNA1:
case IWL_LEGACY_SWITCH_ANTENNA2:
IWL_DEBUG_RATE(priv, "LQ: Legacy toggle Antenna\n");
- lq_sta->action_counter++;
-
if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
tx_chains_num <= 1) ||
(tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
@@ -1298,6 +1420,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
if (rs_toggle_antenna(valid_tx_ant,
&search_tbl->current_rate, search_tbl)) {
+ update_search_tbl_counter = 1;
rs_set_expected_tpt_table(lq_sta, search_tbl);
goto out;
}
@@ -1342,9 +1465,29 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
goto out;
}
break;
+
+ case IWL_LEGACY_SWITCH_MIMO3_ABC:
+ IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO3\n");
+
+ /* Set up search table to try MIMO3 */
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+
+ search_tbl->ant_type = ANT_ABC;
+
+ if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
+ break;
+
+ ret = rs_switch_to_mimo3(priv, lq_sta, conf, sta,
+ search_tbl, index);
+ if (!ret) {
+ lq_sta->action_counter = 0;
+ goto out;
+ }
+ break;
}
tbl->action++;
- if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
+ if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
if (tbl->action == start_action)
@@ -1357,8 +1500,10 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
out:
lq_sta->search_better_tbl = 1;
tbl->action++;
- if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
+ if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
+ if (update_search_tbl_counter)
+ search_tbl->action = tbl->action;
return 0;
}
@@ -1381,6 +1526,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
u8 start_action = tbl->action;
u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
u8 tx_chains_num = priv->hw_params.tx_chains_num;
+ u8 update_search_tbl_counter = 0;
int ret;
for (;;) {
@@ -1401,8 +1547,10 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
memcpy(search_tbl, tbl, sz);
if (rs_toggle_antenna(valid_tx_ant,
- &search_tbl->current_rate, search_tbl))
+ &search_tbl->current_rate, search_tbl)) {
+ update_search_tbl_counter = 1;
goto out;
+ }
break;
case IWL_SISO_SWITCH_MIMO2_AB:
case IWL_SISO_SWITCH_MIMO2_AC:
@@ -1456,10 +1604,25 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
search_tbl->current_rate =
rate_n_flags_from_tbl(priv, search_tbl,
index, is_green);
+ update_search_tbl_counter = 1;
goto out;
+ case IWL_SISO_SWITCH_MIMO3_ABC:
+ IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO3\n");
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+ search_tbl->ant_type = ANT_ABC;
+
+ if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
+ break;
+
+ ret = rs_switch_to_mimo3(priv, lq_sta, conf, sta,
+ search_tbl, index);
+ if (!ret)
+ goto out;
+ break;
}
tbl->action++;
- if (tbl->action > IWL_SISO_SWITCH_GI)
+ if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
if (tbl->action == start_action)
@@ -1471,15 +1634,18 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
out:
lq_sta->search_better_tbl = 1;
tbl->action++;
- if (tbl->action > IWL_SISO_SWITCH_GI)
+ if (tbl->action > IWL_SISO_SWITCH_MIMO3_ABC)
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+ if (update_search_tbl_counter)
+ search_tbl->action = tbl->action;
+
return 0;
}
/*
- * Try to switch to new modulation mode from MIMO
+ * Try to switch to new modulation mode from MIMO2
*/
-static int rs_move_mimo_to_other(struct iwl_priv *priv,
+static int rs_move_mimo2_to_other(struct iwl_priv *priv,
struct iwl_lq_sta *lq_sta,
struct ieee80211_conf *conf,
struct ieee80211_sta *sta, int index)
@@ -1494,6 +1660,7 @@ static int rs_move_mimo_to_other(struct iwl_priv *priv,
u8 start_action = tbl->action;
u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
u8 tx_chains_num = priv->hw_params.tx_chains_num;
+ u8 update_search_tbl_counter = 0;
int ret;
for (;;) {
@@ -1501,7 +1668,7 @@ static int rs_move_mimo_to_other(struct iwl_priv *priv,
switch (tbl->action) {
case IWL_MIMO2_SWITCH_ANTENNA1:
case IWL_MIMO2_SWITCH_ANTENNA2:
- IWL_DEBUG_RATE(priv, "LQ: MIMO toggle Antennas\n");
+ IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle Antennas\n");
if (tx_chains_num <= 2)
break;
@@ -1511,8 +1678,10 @@ static int rs_move_mimo_to_other(struct iwl_priv *priv,
memcpy(search_tbl, tbl, sz);
if (rs_toggle_antenna(valid_tx_ant,
- &search_tbl->current_rate, search_tbl))
+ &search_tbl->current_rate, search_tbl)) {
+ update_search_tbl_counter = 1;
goto out;
+ }
break;
case IWL_MIMO2_SWITCH_SISO_A:
case IWL_MIMO2_SWITCH_SISO_B:
@@ -1549,9 +1718,9 @@ static int rs_move_mimo_to_other(struct iwl_priv *priv,
HT_SHORT_GI_40MHZ))
break;
- IWL_DEBUG_RATE(priv, "LQ: MIMO toggle SGI/NGI\n");
+ IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle SGI/NGI\n");
- /* Set up new search table for MIMO */
+ /* Set up new search table for MIMO2 */
memcpy(search_tbl, tbl, sz);
search_tbl->is_SGI = !tbl->is_SGI;
rs_set_expected_tpt_table(lq_sta, search_tbl);
@@ -1569,11 +1738,27 @@ static int rs_move_mimo_to_other(struct iwl_priv *priv,
search_tbl->current_rate =
rate_n_flags_from_tbl(priv, search_tbl,
index, is_green);
+ update_search_tbl_counter = 1;
goto out;
+ case IWL_MIMO2_SWITCH_MIMO3_ABC:
+ IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to MIMO3\n");
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+ search_tbl->ant_type = ANT_ABC;
+
+ if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
+ break;
+
+ ret = rs_switch_to_mimo3(priv, lq_sta, conf, sta,
+ search_tbl, index);
+ if (!ret)
+ goto out;
+
+ break;
}
tbl->action++;
- if (tbl->action > IWL_MIMO2_SWITCH_GI)
+ if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC)
tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
if (tbl->action == start_action)
@@ -1584,8 +1769,153 @@ static int rs_move_mimo_to_other(struct iwl_priv *priv,
out:
lq_sta->search_better_tbl = 1;
tbl->action++;
- if (tbl->action > IWL_MIMO2_SWITCH_GI)
+ if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC)
tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
+ if (update_search_tbl_counter)
+ search_tbl->action = tbl->action;
+
+ return 0;
+
+}
+
+/*
+ * Try to switch to new modulation mode from MIMO3
+ */
+static int rs_move_mimo3_to_other(struct iwl_priv *priv,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta, int index)
+{
+ s8 is_green = lq_sta->is_green;
+ struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct iwl_scale_tbl_info *search_tbl =
+ &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ struct iwl_rate_scale_data *window = &(tbl->win[index]);
+ u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+ (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
+ u8 start_action = tbl->action;
+ u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
+ u8 tx_chains_num = priv->hw_params.tx_chains_num;
+ int ret;
+ u8 update_search_tbl_counter = 0;
+
+ for (;;) {
+ lq_sta->action_counter++;
+ switch (tbl->action) {
+ case IWL_MIMO3_SWITCH_ANTENNA1:
+ case IWL_MIMO3_SWITCH_ANTENNA2:
+ IWL_DEBUG_RATE(priv, "LQ: MIMO3 toggle Antennas\n");
+
+ if (tx_chains_num <= 3)
+ break;
+
+ if (window->success_ratio >= IWL_RS_GOOD_RATIO)
+ break;
+
+ memcpy(search_tbl, tbl, sz);
+ if (rs_toggle_antenna(valid_tx_ant,
+ &search_tbl->current_rate, search_tbl))
+ goto out;
+ break;
+ case IWL_MIMO3_SWITCH_SISO_A:
+ case IWL_MIMO3_SWITCH_SISO_B:
+ case IWL_MIMO3_SWITCH_SISO_C:
+ IWL_DEBUG_RATE(priv, "LQ: MIMO3 switch to SISO\n");
+
+ /* Set up new search table for SISO */
+ memcpy(search_tbl, tbl, sz);
+
+ if (tbl->action == IWL_MIMO3_SWITCH_SISO_A)
+ search_tbl->ant_type = ANT_A;
+ else if (tbl->action == IWL_MIMO3_SWITCH_SISO_B)
+ search_tbl->ant_type = ANT_B;
+ else
+ search_tbl->ant_type = ANT_C;
+
+ if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
+ break;
+
+ ret = rs_switch_to_siso(priv, lq_sta, conf, sta,
+ search_tbl, index);
+ if (!ret)
+ goto out;
+
+ break;
+
+ case IWL_MIMO3_SWITCH_MIMO2_AB:
+ case IWL_MIMO3_SWITCH_MIMO2_AC:
+ case IWL_MIMO3_SWITCH_MIMO2_BC:
+ IWL_DEBUG_RATE(priv, "LQ: MIMO3 switch to MIMO2\n");
+
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+ if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AB)
+ search_tbl->ant_type = ANT_AB;
+ else if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AC)
+ search_tbl->ant_type = ANT_AC;
+ else
+ search_tbl->ant_type = ANT_BC;
+
+ if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
+ break;
+
+ ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta,
+ search_tbl, index);
+ if (!ret)
+ goto out;
+
+ break;
+
+ case IWL_MIMO3_SWITCH_GI:
+ if (!tbl->is_fat &&
+ !(priv->current_ht_config.sgf &
+ HT_SHORT_GI_20MHZ))
+ break;
+ if (tbl->is_fat &&
+ !(priv->current_ht_config.sgf &
+ HT_SHORT_GI_40MHZ))
+ break;
+
+ IWL_DEBUG_RATE(priv, "LQ: MIMO3 toggle SGI/NGI\n");
+
+ /* Set up new search table for MIMO */
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = !tbl->is_SGI;
+ rs_set_expected_tpt_table(lq_sta, search_tbl);
+ /*
+ * If active table already uses the fastest possible
+ * modulation (dual stream with short guard interval),
+ * and it's working well, there's no need to look
+ * for a better type of modulation!
+ */
+ if (tbl->is_SGI) {
+ s32 tpt = lq_sta->last_tpt / 100;
+ if (tpt >= search_tbl->expected_tpt[index])
+ break;
+ }
+ search_tbl->current_rate =
+ rate_n_flags_from_tbl(priv, search_tbl,
+ index, is_green);
+ update_search_tbl_counter = 1;
+ goto out;
+ }
+ tbl->action++;
+ if (tbl->action > IWL_MIMO3_SWITCH_GI)
+ tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
+
+ if (tbl->action == start_action)
+ break;
+ }
+ search_tbl->lq_type = LQ_NONE;
+ return 0;
+ out:
+ lq_sta->search_better_tbl = 1;
+ tbl->action++;
+ if (tbl->action > IWL_MIMO3_SWITCH_GI)
+ tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
+ if (update_search_tbl_counter)
+ search_tbl->action = tbl->action;
+
return 0;
}
@@ -1616,8 +1946,8 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta)
/* Elapsed time using current modulation mode */
if (lq_sta->flush_timer)
flush_interval_passed =
- time_after(jiffies,
- (unsigned long)(lq_sta->flush_timer +
+ time_after(jiffies,
+ (unsigned long)(lq_sta->flush_timer +
IWL_RATE_SCALE_FLUSH_INTVL));
/*
@@ -1676,12 +2006,14 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta)
* Do rate scaling and search for new modulation mode.
*/
static void rs_rate_scale_perform(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr,
+ struct sk_buff *skb,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta)
{
struct ieee80211_hw *hw = priv->hw;
struct ieee80211_conf *conf = &hw->conf;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
int low = IWL_RATE_INVALID;
int high = IWL_RATE_INVALID;
int index;
@@ -1707,11 +2039,10 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n");
- /* Send management frames and broadcast/multicast data using
- * lowest rate. */
+ /* Send management frames and NO_ACK data using lowest rate. */
/* TODO: this could probably be improved.. */
if (!ieee80211_is_data(hdr->frame_control) ||
- is_multicast_ether_addr(hdr->addr1))
+ info->flags & IEEE80211_TX_CTL_NO_ACK)
return;
if (!sta || !lq_sta)
@@ -1732,6 +2063,10 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
active_tbl = 1 - lq_sta->active_tbl;
tbl = &(lq_sta->lq_info[active_tbl]);
+ if (is_legacy(tbl->lq_type))
+ lq_sta->is_green = 0;
+ else
+ lq_sta->is_green = rs_use_green(priv, conf);
is_green = lq_sta->is_green;
/* current tx rate */
@@ -1951,6 +2286,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
update_lq = 1;
index = low;
}
+
break;
case 1:
/* Increase starting rate, update uCode's rate table */
@@ -1997,8 +2333,10 @@ lq_update:
rs_move_legacy_other(priv, lq_sta, conf, sta, index);
else if (is_siso(tbl->lq_type))
rs_move_siso_to_other(priv, lq_sta, conf, sta, index);
+ else if (is_mimo2(tbl->lq_type))
+ rs_move_mimo2_to_other(priv, lq_sta, conf, sta, index);
else
- rs_move_mimo_to_other(priv, lq_sta, conf, sta, index);
+ rs_move_mimo3_to_other(priv, lq_sta, conf, sta, index);
/* If new "search" mode was selected, set up in uCode table */
if (lq_sta->search_better_tbl) {
@@ -2014,8 +2352,11 @@ lq_update:
tbl->current_rate, index);
rs_fill_link_cmd(priv, lq_sta, tbl->current_rate);
iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
- }
+ } else
+ done_search = 1;
+ }
+ if (done_search && !lq_sta->stay_in_tbl) {
/* If the "active" (non-search) mode was legacy,
* and we've tried switching antennas,
* but we haven't been able to try HT modes (not available),
@@ -2023,8 +2364,7 @@ lq_update:
* before next round of mode comparisons. */
tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
- lq_sta->action_counter >= 1) {
- lq_sta->action_counter = 0;
+ lq_sta->action_counter > tbl1->max_search) {
IWL_DEBUG_RATE(priv, "LQ: STAY in legacy table\n");
rs_set_stay_in_table(priv, 1, lq_sta);
}
@@ -2033,7 +2373,7 @@ lq_update:
* have been tried and compared, stay in this best modulation
* mode for a while before next round of mode comparisons. */
if (lq_sta->enable_counter &&
- (lq_sta->action_counter >= IWL_ACTION_LIMIT)) {
+ (lq_sta->action_counter >= tbl1->max_search)) {
if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
(lq_sta->tx_agg_tid_en & (1 << tid)) &&
(tid != MAX_TID_COUNT)) {
@@ -2047,20 +2387,8 @@ lq_update:
lq_sta, sta);
}
}
- lq_sta->action_counter = 0;
rs_set_stay_in_table(priv, 0, lq_sta);
}
-
- /*
- * Else, don't search for a new modulation mode.
- * Put new timestamp in stay-in-modulation-mode flush timer if:
- * 1) Not changing rates right now
- * 2) Not just finishing up a search
- * 3) flush timer is empty
- */
- } else {
- if ((!update_lq) && (!done_search) && (!lq_sta->flush_timer))
- lq_sta->flush_timer = jiffies;
}
out:
@@ -2156,16 +2484,17 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
if (sta)
mask_bit = sta->supp_rates[sband->band];
- /* Send management frames and broadcast/multicast data using lowest
- * rate. */
+ /* Send management frames and NO_ACK data using lowest rate. */
if (!ieee80211_is_data(hdr->frame_control) ||
- is_multicast_ether_addr(hdr->addr1) || !sta || !lq_sta) {
+ info->flags & IEEE80211_TX_CTL_NO_ACK || !sta || !lq_sta) {
if (!mask_bit)
info->control.rates[0].idx =
rate_lowest_index(sband, NULL);
else
info->control.rates[0].idx =
rate_lowest_index(sband, sta);
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+ info->control.rates[0].count = 1;
return;
}
@@ -2178,8 +2507,8 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
if (sta_id == IWL_INVALID_STATION) {
IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n",
hdr->addr1);
- sta_id = iwl_add_station_flags(priv, hdr->addr1,
- 0, CMD_ASYNC, NULL);
+ sta_id = iwl_add_station(priv, hdr->addr1,
+ false, CMD_ASYNC, NULL);
}
if ((sta_id != IWL_INVALID_STATION)) {
lq_sta->lq.sta_id = sta_id;
@@ -2189,12 +2518,33 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
}
}
- if (rate_idx < 0 || rate_idx > IWL_RATE_COUNT)
- rate_idx = rate_lowest_index(sband, sta);
- else if (sband->band == IEEE80211_BAND_5GHZ)
+ if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
rate_idx -= IWL_FIRST_OFDM_RATE;
-
+ /* 6M and 9M shared same MCS index */
+ rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
+ if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
+ IWL_RATE_MIMO3_6M_PLCP)
+ rate_idx = rate_idx + (2 * MCS_INDEX_PER_STREAM);
+ else if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
+ IWL_RATE_MIMO2_6M_PLCP)
+ rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
+ info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
+ if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
+ info->control.rates[0].flags |= IEEE80211_TX_RC_SHORT_GI;
+ if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
+ info->control.rates[0].flags |= IEEE80211_TX_RC_DUP_DATA;
+ if (lq_sta->last_rate_n_flags & RATE_MCS_FAT_MSK)
+ info->control.rates[0].flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
+ info->control.rates[0].flags |= IEEE80211_TX_RC_GREEN_FIELD;
+ } else {
+ if (rate_idx < 0 || rate_idx > IWL_RATE_COUNT)
+ rate_idx = rate_lowest_index(sband, sta);
+ else if (sband->band == IEEE80211_BAND_5GHZ)
+ rate_idx -= IWL_FIRST_OFDM_RATE;
+ }
info->control.rates[0].idx = rate_idx;
+
}
static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
@@ -2246,15 +2596,16 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
lq_sta->ibss_sta_added = 0;
if (priv->iw_mode == NL80211_IFTYPE_AP) {
- u8 sta_id = iwl_find_station(priv, sta->addr);
+ u8 sta_id = iwl_find_station(priv,
+ sta->addr);
/* for IBSS the call are from tasklet */
IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n", sta->addr);
if (sta_id == IWL_INVALID_STATION) {
IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n", sta->addr);
- sta_id = iwl_add_station_flags(priv, sta->addr,
- 0, CMD_ASYNC, NULL);
+ sta_id = iwl_add_station(priv, sta->addr, false,
+ CMD_ASYNC, NULL);
}
if ((sta_id != IWL_INVALID_STATION)) {
lq_sta->lq.sta_id = sta_id;
@@ -2436,9 +2787,10 @@ static void rs_fill_link_cmd(const struct iwl_priv *priv,
repeat_rate--;
}
- lq_cmd->agg_params.agg_frame_cnt_limit = 64;
- lq_cmd->agg_params.agg_dis_start_th = 3;
- lq_cmd->agg_params.agg_time_limit = cpu_to_le16(4000);
+ lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_MAX;
+ lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+ lq_cmd->agg_params.agg_time_limit =
+ cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
}
static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
@@ -2539,6 +2891,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
char *buff;
int desc = 0;
int i = 0;
+ int index = 0;
ssize_t ret;
struct iwl_lq_sta *lq_sta = file->private_data;
@@ -2568,8 +2921,11 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
((is_mimo2(tbl->lq_type)) ? "MIMO2" : "MIMO3"));
desc += sprintf(buff+desc, " %s",
(tbl->is_fat) ? "40MHz" : "20MHz");
- desc += sprintf(buff+desc, " %s\n", (tbl->is_SGI) ? "SGI" : "");
+ desc += sprintf(buff+desc, " %s %s\n", (tbl->is_SGI) ? "SGI" : "",
+ (lq_sta->is_green) ? "GF enabled" : "");
}
+ desc += sprintf(buff+desc, "last tx rate=0x%X\n",
+ lq_sta->last_rate_n_flags);
desc += sprintf(buff+desc, "general:"
"flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
lq_sta->lq.general_params.flags,
@@ -2590,10 +2946,19 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
lq_sta->lq.general_params.start_rate_index[2],
lq_sta->lq.general_params.start_rate_index[3]);
-
- for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
- desc += sprintf(buff+desc, " rate[%d] 0x%X\n",
- i, le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags));
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+ index = iwl_hwrate_to_plcp_idx(
+ le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags));
+ if (is_legacy(tbl->lq_type)) {
+ desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n",
+ i, le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
+ iwl_rate_mcs[index].mbps);
+ } else {
+ desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps (%s)\n",
+ i, le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
+ iwl_rate_mcs[index].mbps, iwl_rate_mcs[index].mcs);
+ }
+ }
ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
kfree(buff);
@@ -2620,13 +2985,14 @@ static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
return -ENOMEM;
for (i = 0; i < LQ_SIZE; i++) {
- desc += sprintf(buff+desc, "%s type=%d SGI=%d FAT=%d DUP=%d\n"
+ desc += sprintf(buff+desc, "%s type=%d SGI=%d FAT=%d DUP=%d GF=%d\n"
"rate=0x%X\n",
lq_sta->active_tbl == i ? "*" : "x",
lq_sta->lq_info[i].lq_type,
lq_sta->lq_info[i].is_SGI,
lq_sta->lq_info[i].is_fat,
lq_sta->lq_info[i].is_dup,
+ lq_sta->is_green,
lq_sta->lq_info[i].current_rate);
for (j = 0; j < IWL_RATE_COUNT; j++) {
desc += sprintf(buff+desc,
@@ -2646,6 +3012,43 @@ static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
.open = open_file_generic,
};
+static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buff[120];
+ int desc = 0;
+ ssize_t ret;
+
+ struct iwl_lq_sta *lq_sta = file->private_data;
+ struct iwl_priv *priv;
+ struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
+
+ priv = lq_sta->drv;
+
+ if (is_Ht(tbl->lq_type))
+ desc += sprintf(buff+desc,
+ "Bit Rate= %d Mb/s\n",
+ tbl->expected_tpt[lq_sta->last_txrate_idx]);
+ else
+ desc += sprintf(buff+desc,
+ "Bit Rate= %d Mb/s\n",
+ iwl_rates[lq_sta->last_txrate_idx].ieee >> 1);
+ desc += sprintf(buff+desc,
+ "Signal Level= %d dBm\tNoise Level= %d dBm\n",
+ priv->last_rx_rssi, priv->last_rx_noise);
+ desc += sprintf(buff+desc,
+ "Tsf= 0x%llx\tBeacon time= 0x%08X\n",
+ priv->last_tsf, priv->last_beacon_time);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+ return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
+ .read = rs_sta_dbgfs_rate_scale_data_read,
+ .open = open_file_generic,
+};
+
static void rs_add_debugfs(void *priv, void *priv_sta,
struct dentry *dir)
{
@@ -2656,6 +3059,9 @@ static void rs_add_debugfs(void *priv, void *priv_sta,
lq_sta->rs_sta_dbgfs_stats_table_file =
debugfs_create_file("rate_stats_table", 0600, dir,
lq_sta, &rs_sta_dbgfs_stats_table_ops);
+ lq_sta->rs_sta_dbgfs_rate_scale_data_file =
+ debugfs_create_file("rate_scale_data", 0600, dir,
+ lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
debugfs_create_u8("tx_agg_tid_enable", 0600, dir,
&lq_sta->tx_agg_tid_en);
@@ -2667,6 +3073,7 @@ static void rs_remove_debugfs(void *priv, void *priv_sta)
struct iwl_lq_sta *lq_sta = priv_sta;
debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
+ debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
}
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index ab59acc405d..25050bf315a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -241,6 +241,7 @@ enum {
#define IWL_LEGACY_SWITCH_MIMO2_AB 3
#define IWL_LEGACY_SWITCH_MIMO2_AC 4
#define IWL_LEGACY_SWITCH_MIMO2_BC 5
+#define IWL_LEGACY_SWITCH_MIMO3_ABC 6
/* possible actions when in siso mode */
#define IWL_SISO_SWITCH_ANTENNA1 0
@@ -249,6 +250,8 @@ enum {
#define IWL_SISO_SWITCH_MIMO2_AC 3
#define IWL_SISO_SWITCH_MIMO2_BC 4
#define IWL_SISO_SWITCH_GI 5
+#define IWL_SISO_SWITCH_MIMO3_ABC 6
+
/* possible actions when in mimo mode */
#define IWL_MIMO2_SWITCH_ANTENNA1 0
@@ -257,6 +260,23 @@ enum {
#define IWL_MIMO2_SWITCH_SISO_B 3
#define IWL_MIMO2_SWITCH_SISO_C 4
#define IWL_MIMO2_SWITCH_GI 5
+#define IWL_MIMO2_SWITCH_MIMO3_ABC 6
+
+
+/* possible actions when in mimo3 mode */
+#define IWL_MIMO3_SWITCH_ANTENNA1 0
+#define IWL_MIMO3_SWITCH_ANTENNA2 1
+#define IWL_MIMO3_SWITCH_SISO_A 2
+#define IWL_MIMO3_SWITCH_SISO_B 3
+#define IWL_MIMO3_SWITCH_SISO_C 4
+#define IWL_MIMO3_SWITCH_MIMO2_AB 5
+#define IWL_MIMO3_SWITCH_MIMO2_AC 6
+#define IWL_MIMO3_SWITCH_MIMO2_BC 7
+#define IWL_MIMO3_SWITCH_GI 8
+
+
+#define IWL_MAX_11N_MIMO3_SEARCH IWL_MIMO3_SWITCH_GI
+#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_MIMO3_ABC
/*FIXME:RS:add possible actions for MIMO3*/
@@ -307,6 +327,13 @@ enum iwl_table_type {
#define ANT_BC (ANT_B | ANT_C)
#define ANT_ABC (ANT_AB | ANT_C)
+#define IWL_MAX_MCS_DISPLAY_SIZE 12
+
+struct iwl_rate_mcs_info {
+ char mbps[IWL_MAX_MCS_DISPLAY_SIZE];
+ char mcs[IWL_MAX_MCS_DISPLAY_SIZE];
+};
+
static inline u8 num_of_ant(u8 mask)
{
return !!((mask) & ANT_A) +
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 3bb28db4a40..a5637c4aa85 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -102,7 +102,7 @@ MODULE_ALIAS("iwl4965");
* function correctly transitions out of the RXON_ASSOC_MSK state if
* a HW tune is required based on the RXON structure changes.
*/
-static int iwl_commit_rxon(struct iwl_priv *priv)
+int iwl_commit_rxon(struct iwl_priv *priv)
{
/* cast away the const for active_rxon in this function */
struct iwl_rxon_cmd *active_rxon = (void *)&priv->active_rxon;
@@ -190,8 +190,7 @@ static int iwl_commit_rxon(struct iwl_priv *priv)
iwl_clear_stations_table(priv);
- if (!priv->error_recovering)
- priv->start_calib = 0;
+ priv->start_calib = 0;
/* Add the broadcast address so we can send broadcast frames */
if (iwl_rxon_add_station(priv, iwl_bcast_addr, 0) ==
@@ -246,8 +245,9 @@ static int iwl_commit_rxon(struct iwl_priv *priv)
void iwl_update_chain_flags(struct iwl_priv *priv)
{
- iwl_set_rxon_chain(priv);
- iwl_commit_rxon(priv);
+ if (priv->cfg->ops->hcmd->set_rxon_chain)
+ priv->cfg->ops->hcmd->set_rxon_chain(priv);
+ iwlcore_commit_rxon(priv);
}
static void iwl_clear_free_frames(struct iwl_priv *priv)
@@ -503,24 +503,12 @@ int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
int iwl_hw_tx_queue_init(struct iwl_priv *priv,
struct iwl_tx_queue *txq)
{
- int ret;
- unsigned long flags;
int txq_id = txq->q.id;
- spin_lock_irqsave(&priv->lock, flags);
- ret = iwl_grab_nic_access(priv);
- if (ret) {
- spin_unlock_irqrestore(&priv->lock, flags);
- return ret;
- }
-
/* Circular buffer (TFD queue in DRAM) physical base address */
iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
txq->q.dma_addr >> 8);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
return 0;
}
@@ -531,76 +519,6 @@ int iwl_hw_tx_queue_init(struct iwl_priv *priv,
*
******************************************************************************/
-static void iwl_ht_conf(struct iwl_priv *priv,
- struct ieee80211_bss_conf *bss_conf)
-{
- struct ieee80211_sta_ht_cap *ht_conf;
- struct iwl_ht_info *iwl_conf = &priv->current_ht_config;
- struct ieee80211_sta *sta;
-
- IWL_DEBUG_MAC80211(priv, "enter: \n");
-
- if (!iwl_conf->is_ht)
- return;
-
-
- /*
- * It is totally wrong to base global information on something
- * that is valid only when associated, alas, this driver works
- * that way and I don't know how to fix it.
- */
-
- rcu_read_lock();
- sta = ieee80211_find_sta(priv->hw, priv->bssid);
- if (!sta) {
- rcu_read_unlock();
- return;
- }
- ht_conf = &sta->ht_cap;
-
- if (ht_conf->cap & IEEE80211_HT_CAP_SGI_20)
- iwl_conf->sgf |= HT_SHORT_GI_20MHZ;
- if (ht_conf->cap & IEEE80211_HT_CAP_SGI_40)
- iwl_conf->sgf |= HT_SHORT_GI_40MHZ;
-
- iwl_conf->is_green_field = !!(ht_conf->cap & IEEE80211_HT_CAP_GRN_FLD);
- iwl_conf->max_amsdu_size =
- !!(ht_conf->cap & IEEE80211_HT_CAP_MAX_AMSDU);
-
- iwl_conf->supported_chan_width =
- !!(ht_conf->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40);
-
- /*
- * XXX: The HT configuration needs to be moved into iwl_mac_config()
- * to be done there correctly.
- */
-
- iwl_conf->extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
- if (conf_is_ht40_minus(&priv->hw->conf))
- iwl_conf->extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
- else if (conf_is_ht40_plus(&priv->hw->conf))
- iwl_conf->extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
-
- /* If no above or below channel supplied disable FAT channel */
- if (iwl_conf->extension_chan_offset != IEEE80211_HT_PARAM_CHA_SEC_ABOVE &&
- iwl_conf->extension_chan_offset != IEEE80211_HT_PARAM_CHA_SEC_BELOW)
- iwl_conf->supported_chan_width = 0;
-
- iwl_conf->sm_ps = (u8)((ht_conf->cap & IEEE80211_HT_CAP_SM_PS) >> 2);
-
- memcpy(&iwl_conf->mcs, &ht_conf->mcs, 16);
-
- iwl_conf->tx_chan_width = iwl_conf->supported_chan_width != 0;
- iwl_conf->ht_protection =
- bss_conf->ht.operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
- iwl_conf->non_GF_STA_present =
- !!(bss_conf->ht.operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
-
- rcu_read_unlock();
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
#define MAX_UCODE_BEACON_INTERVAL 4096
static u16 iwl_adjust_beacon_interval(u16 beacon_val)
@@ -636,7 +554,8 @@ static void iwl_setup_rxon_timing(struct iwl_priv *priv)
beacon_int = iwl_adjust_beacon_interval(priv->beacon_int);
priv->rxon_timing.atim_window = 0;
} else {
- beacon_int = iwl_adjust_beacon_interval(conf->beacon_int);
+ beacon_int = iwl_adjust_beacon_interval(
+ priv->vif->bss_conf.beacon_int);
/* TODO: we need to get atim_window from upper stack
* for now we set to 0 */
@@ -657,30 +576,6 @@ static void iwl_setup_rxon_timing(struct iwl_priv *priv)
le16_to_cpu(priv->rxon_timing.atim_window));
}
-static int iwl_set_mode(struct iwl_priv *priv, int mode)
-{
- iwl_connection_init_rx_config(priv, mode);
- iwl_set_rxon_chain(priv);
- memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
-
- iwl_clear_stations_table(priv);
-
- /* dont commit rxon if rf-kill is on*/
- if (!iwl_is_ready_rf(priv))
- return -EAGAIN;
-
- cancel_delayed_work(&priv->scan_check);
- if (iwl_scan_cancel_timeout(priv, 100)) {
- IWL_WARN(priv, "Aborted scan still in progress after 100ms\n");
- IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n");
- return -EAGAIN;
- }
-
- iwl_commit_rxon(priv);
-
- return 0;
-}
-
/******************************************************************************
*
* Generic RX handler implementations
@@ -802,6 +697,7 @@ static void iwl_rx_card_state_notif(struct iwl_priv *priv,
struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
unsigned long status = priv->status;
+ unsigned long reg_flags;
IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s\n",
(flags & HW_CARD_DISABLED) ? "Kill" : "On",
@@ -813,32 +709,25 @@ static void iwl_rx_card_state_notif(struct iwl_priv *priv,
iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
- if (!iwl_grab_nic_access(priv)) {
- iwl_write_direct32(
- priv, HBUS_TARG_MBX_C,
- HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
-
- iwl_release_nic_access(priv);
- }
+ iwl_write_direct32(priv, HBUS_TARG_MBX_C,
+ HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
if (!(flags & RXON_CARD_DISABLED)) {
iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
- if (!iwl_grab_nic_access(priv)) {
- iwl_write_direct32(
- priv, HBUS_TARG_MBX_C,
+ iwl_write_direct32(priv, HBUS_TARG_MBX_C,
HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
- iwl_release_nic_access(priv);
- }
}
if (flags & RF_CARD_DISABLED) {
iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
iwl_read32(priv, CSR_UCODE_DRV_GP1);
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
if (!iwl_grab_nic_access(priv))
iwl_release_nic_access(priv);
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
}
}
@@ -848,33 +737,19 @@ static void iwl_rx_card_state_notif(struct iwl_priv *priv,
clear_bit(STATUS_RF_KILL_HW, &priv->status);
- if (flags & SW_CARD_DISABLED)
- set_bit(STATUS_RF_KILL_SW, &priv->status);
- else
- clear_bit(STATUS_RF_KILL_SW, &priv->status);
-
if (!(flags & RXON_CARD_DISABLED))
iwl_scan_cancel(priv);
if ((test_bit(STATUS_RF_KILL_HW, &status) !=
- test_bit(STATUS_RF_KILL_HW, &priv->status)) ||
- (test_bit(STATUS_RF_KILL_SW, &status) !=
- test_bit(STATUS_RF_KILL_SW, &priv->status)))
- queue_work(priv->workqueue, &priv->rf_kill);
+ test_bit(STATUS_RF_KILL_HW, &priv->status)))
+ wiphy_rfkill_set_hw_state(priv->hw->wiphy,
+ test_bit(STATUS_RF_KILL_HW, &priv->status));
else
wake_up_interruptible(&priv->wait_command_queue);
}
int iwl_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src)
{
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- ret = iwl_grab_nic_access(priv);
- if (ret)
- goto err;
-
if (src == IWL_PWR_SRC_VAUX) {
if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
@@ -886,10 +761,7 @@ int iwl_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src)
~APMG_PS_CTRL_MSK_PWR_SRC);
}
- iwl_release_nic_access(priv);
-err:
- spin_unlock_irqrestore(&priv->lock, flags);
- return ret;
+ return 0;
}
/**
@@ -953,6 +825,7 @@ void iwl_rx_handle(struct iwl_priv *priv)
unsigned long flags;
u8 fill_rx = 0;
u32 count = 8;
+ int total_empty;
/* uCode's read index (stored in shared DRAM) indicates the last Rx
* buffer that the driver may process (last buffer filled by ucode). */
@@ -963,7 +836,12 @@ void iwl_rx_handle(struct iwl_priv *priv)
if (i == r)
IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
- if (iwl_rx_queue_space(rxq) > (RX_QUEUE_SIZE / 2))
+ /* calculate total frames need to be restock after handling RX */
+ total_empty = r - priv->rxq.write_actual;
+ if (total_empty < 0)
+ total_empty += RX_QUEUE_SIZE;
+
+ if (total_empty > (RX_QUEUE_SIZE / 2))
fill_rx = 1;
while (i != r) {
@@ -1002,6 +880,7 @@ void iwl_rx_handle(struct iwl_priv *priv)
IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
+ priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
} else {
/* No handling needed */
IWL_DEBUG_RX(priv,
@@ -1039,7 +918,7 @@ void iwl_rx_handle(struct iwl_priv *priv)
count++;
if (count >= 8) {
priv->rxq.read = i;
- iwl_rx_queue_restock(priv);
+ iwl_rx_replenish_now(priv);
count = 0;
}
}
@@ -1047,7 +926,10 @@ void iwl_rx_handle(struct iwl_priv *priv)
/* Backtrack one entry */
priv->rxq.read = i;
- iwl_rx_queue_restock(priv);
+ if (fill_rx)
+ iwl_rx_replenish_now(priv);
+ else
+ iwl_rx_queue_restock(priv);
}
/* call this function to flush any scheduled tasklet */
@@ -1058,24 +940,7 @@ static inline void iwl_synchronize_irq(struct iwl_priv *priv)
tasklet_kill(&priv->irq_tasklet);
}
-static void iwl_error_recovery(struct iwl_priv *priv)
-{
- unsigned long flags;
-
- memcpy(&priv->staging_rxon, &priv->recovery_rxon,
- sizeof(priv->staging_rxon));
- priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl_commit_rxon(priv);
-
- iwl_rxon_add_station(priv, priv->bssid, 1);
-
- spin_lock_irqsave(&priv->lock, flags);
- priv->assoc_id = le16_to_cpu(priv->staging_rxon.assoc_id);
- priv->error_recovering = 0;
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-static void iwl_irq_tasklet(struct iwl_priv *priv)
+static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
{
u32 inta, handled = 0;
u32 inta_fh;
@@ -1123,6 +988,7 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
/* Tell the device to stop sending interrupts */
iwl_disable_interrupts(priv);
+ priv->isr_stats.hw++;
iwl_irq_handle_error(priv);
handled |= CSR_INT_BIT_HW_ERR;
@@ -1135,13 +1001,17 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
#ifdef CONFIG_IWLWIFI_DEBUG
if (priv->debug_level & (IWL_DL_ISR)) {
/* NIC fires this, but we don't use it, redundant with WAKEUP */
- if (inta & CSR_INT_BIT_SCD)
+ if (inta & CSR_INT_BIT_SCD) {
IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
"the frame/frames.\n");
+ priv->isr_stats.sch++;
+ }
/* Alive notification via Rx interrupt will do the real work */
- if (inta & CSR_INT_BIT_ALIVE)
+ if (inta & CSR_INT_BIT_ALIVE) {
IWL_DEBUG_ISR(priv, "Alive interrupt\n");
+ priv->isr_stats.alive++;
+ }
}
#endif
/* Safely ignore these bits for debug checks below */
@@ -1157,6 +1027,8 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
IWL_DEBUG_RF_KILL(priv, "RF_KILL bit toggled to %s.\n",
hw_rf_kill ? "disable radio" : "enable radio");
+ priv->isr_stats.rfkill++;
+
/* driver only loads ucode once setting the interface up.
* the driver allows loading the ucode even if the radio
* is killed. Hence update the killswitch state here. The
@@ -1167,7 +1039,7 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
set_bit(STATUS_RF_KILL_HW, &priv->status);
else
clear_bit(STATUS_RF_KILL_HW, &priv->status);
- queue_work(priv->workqueue, &priv->rf_kill);
+ wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
}
handled |= CSR_INT_BIT_RF_KILL;
@@ -1176,6 +1048,7 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
/* Chip got too hot and stopped itself */
if (inta & CSR_INT_BIT_CT_KILL) {
IWL_ERR(priv, "Microcode CT kill error detected.\n");
+ priv->isr_stats.ctkill++;
handled |= CSR_INT_BIT_CT_KILL;
}
@@ -1183,6 +1056,8 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
if (inta & CSR_INT_BIT_SW_ERR) {
IWL_ERR(priv, "Microcode SW error detected. "
" Restarting 0x%X.\n", inta);
+ priv->isr_stats.sw++;
+ priv->isr_stats.sw_err = inta;
iwl_irq_handle_error(priv);
handled |= CSR_INT_BIT_SW_ERR;
}
@@ -1198,6 +1073,8 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
iwl_txq_update_write_ptr(priv, &priv->txq[4]);
iwl_txq_update_write_ptr(priv, &priv->txq[5]);
+ priv->isr_stats.wakeup++;
+
handled |= CSR_INT_BIT_WAKEUP;
}
@@ -1206,23 +1083,27 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
* notifications from uCode come through here*/
if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
iwl_rx_handle(priv);
+ priv->isr_stats.rx++;
handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
}
if (inta & CSR_INT_BIT_FH_TX) {
IWL_DEBUG_ISR(priv, "Tx interrupt\n");
+ priv->isr_stats.tx++;
handled |= CSR_INT_BIT_FH_TX;
/* FH finished to write, send event */
priv->ucode_write_complete = 1;
wake_up_interruptible(&priv->wait_command_queue);
}
- if (inta & ~handled)
+ if (inta & ~handled) {
IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
+ priv->isr_stats.unhandled++;
+ }
- if (inta & ~CSR_INI_SET_MASK) {
+ if (inta & ~(priv->inta_mask)) {
IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
- inta & ~CSR_INI_SET_MASK);
+ inta & ~priv->inta_mask);
IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
}
@@ -1243,6 +1124,200 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
spin_unlock_irqrestore(&priv->lock, flags);
}
+/* tasklet for iwlagn interrupt */
+static void iwl_irq_tasklet(struct iwl_priv *priv)
+{
+ u32 inta = 0;
+ u32 handled = 0;
+ unsigned long flags;
+#ifdef CONFIG_IWLWIFI_DEBUG
+ u32 inta_mask;
+#endif
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Ack/clear/reset pending uCode interrupts.
+ * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
+ */
+ iwl_write32(priv, CSR_INT, priv->inta);
+
+ inta = priv->inta;
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ if (priv->debug_level & IWL_DL_ISR) {
+ /* just for debug */
+ inta_mask = iwl_read32(priv, CSR_INT_MASK);
+ IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x\n ",
+ inta, inta_mask);
+ }
+#endif
+ /* saved interrupt in inta variable now we can reset priv->inta */
+ priv->inta = 0;
+
+ /* Now service all interrupt bits discovered above. */
+ if (inta & CSR_INT_BIT_HW_ERR) {
+ IWL_ERR(priv, "Microcode HW error detected. Restarting.\n");
+
+ /* Tell the device to stop sending interrupts */
+ iwl_disable_interrupts(priv);
+
+ priv->isr_stats.hw++;
+ iwl_irq_handle_error(priv);
+
+ handled |= CSR_INT_BIT_HW_ERR;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return;
+ }
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ if (priv->debug_level & (IWL_DL_ISR)) {
+ /* NIC fires this, but we don't use it, redundant with WAKEUP */
+ if (inta & CSR_INT_BIT_SCD) {
+ IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
+ "the frame/frames.\n");
+ priv->isr_stats.sch++;
+ }
+
+ /* Alive notification via Rx interrupt will do the real work */
+ if (inta & CSR_INT_BIT_ALIVE) {
+ IWL_DEBUG_ISR(priv, "Alive interrupt\n");
+ priv->isr_stats.alive++;
+ }
+ }
+#endif
+ /* Safely ignore these bits for debug checks below */
+ inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
+
+ /* HW RF KILL switch toggled */
+ if (inta & CSR_INT_BIT_RF_KILL) {
+ int hw_rf_kill = 0;
+ if (!(iwl_read32(priv, CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
+ hw_rf_kill = 1;
+
+ IWL_DEBUG_RF_KILL(priv, "RF_KILL bit toggled to %s.\n",
+ hw_rf_kill ? "disable radio" : "enable radio");
+
+ priv->isr_stats.rfkill++;
+
+ /* driver only loads ucode once setting the interface up.
+ * the driver allows loading the ucode even if the radio
+ * is killed. Hence update the killswitch state here. The
+ * rfkill handler will care about restarting if needed.
+ */
+ if (!test_bit(STATUS_ALIVE, &priv->status)) {
+ if (hw_rf_kill)
+ set_bit(STATUS_RF_KILL_HW, &priv->status);
+ else
+ clear_bit(STATUS_RF_KILL_HW, &priv->status);
+ wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
+ }
+
+ handled |= CSR_INT_BIT_RF_KILL;
+ }
+
+ /* Chip got too hot and stopped itself */
+ if (inta & CSR_INT_BIT_CT_KILL) {
+ IWL_ERR(priv, "Microcode CT kill error detected.\n");
+ priv->isr_stats.ctkill++;
+ handled |= CSR_INT_BIT_CT_KILL;
+ }
+
+ /* Error detected by uCode */
+ if (inta & CSR_INT_BIT_SW_ERR) {
+ IWL_ERR(priv, "Microcode SW error detected. "
+ " Restarting 0x%X.\n", inta);
+ priv->isr_stats.sw++;
+ priv->isr_stats.sw_err = inta;
+ iwl_irq_handle_error(priv);
+ handled |= CSR_INT_BIT_SW_ERR;
+ }
+
+ /* uCode wakes up after power-down sleep */
+ if (inta & CSR_INT_BIT_WAKEUP) {
+ IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
+ iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
+ iwl_txq_update_write_ptr(priv, &priv->txq[0]);
+ iwl_txq_update_write_ptr(priv, &priv->txq[1]);
+ iwl_txq_update_write_ptr(priv, &priv->txq[2]);
+ iwl_txq_update_write_ptr(priv, &priv->txq[3]);
+ iwl_txq_update_write_ptr(priv, &priv->txq[4]);
+ iwl_txq_update_write_ptr(priv, &priv->txq[5]);
+
+ priv->isr_stats.wakeup++;
+
+ handled |= CSR_INT_BIT_WAKEUP;
+ }
+
+ /* All uCode command responses, including Tx command responses,
+ * Rx "responses" (frame-received notification), and other
+ * notifications from uCode come through here*/
+ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
+ CSR_INT_BIT_RX_PERIODIC)) {
+ IWL_DEBUG_ISR(priv, "Rx interrupt\n");
+ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
+ handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
+ iwl_write32(priv, CSR_FH_INT_STATUS,
+ CSR49_FH_INT_RX_MASK);
+ }
+ if (inta & CSR_INT_BIT_RX_PERIODIC) {
+ handled |= CSR_INT_BIT_RX_PERIODIC;
+ iwl_write32(priv, CSR_INT, CSR_INT_BIT_RX_PERIODIC);
+ }
+ /* Sending RX interrupt require many steps to be done in the
+ * the device:
+ * 1- write interrupt to current index in ICT table.
+ * 2- dma RX frame.
+ * 3- update RX shared data to indicate last write index.
+ * 4- send interrupt.
+ * This could lead to RX race, driver could receive RX interrupt
+ * but the shared data changes does not reflect this.
+ * this could lead to RX race, RX periodic will solve this race
+ */
+ iwl_write32(priv, CSR_INT_PERIODIC_REG,
+ CSR_INT_PERIODIC_DIS);
+ iwl_rx_handle(priv);
+ /* Only set RX periodic if real RX is received. */
+ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
+ iwl_write32(priv, CSR_INT_PERIODIC_REG,
+ CSR_INT_PERIODIC_ENA);
+
+ priv->isr_stats.rx++;
+ }
+
+ if (inta & CSR_INT_BIT_FH_TX) {
+ iwl_write32(priv, CSR_FH_INT_STATUS, CSR49_FH_INT_TX_MASK);
+ IWL_DEBUG_ISR(priv, "Tx interrupt\n");
+ priv->isr_stats.tx++;
+ handled |= CSR_INT_BIT_FH_TX;
+ /* FH finished to write, send event */
+ priv->ucode_write_complete = 1;
+ wake_up_interruptible(&priv->wait_command_queue);
+ }
+
+ if (inta & ~handled) {
+ IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
+ priv->isr_stats.unhandled++;
+ }
+
+ if (inta & ~(priv->inta_mask)) {
+ IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
+ inta & ~priv->inta_mask);
+ }
+
+
+ /* Re-enable all interrupts */
+ /* only Re-enable if diabled by irq */
+ if (test_bit(STATUS_INT_ENABLED, &priv->status))
+ iwl_enable_interrupts(priv);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+}
+
+
/******************************************************************************
*
* uCode download functions
@@ -1508,10 +1583,6 @@ static int iwl_read_ucode(struct iwl_priv *priv)
return ret;
}
-/* temporary */
-static int iwl_mac_beacon_update(struct ieee80211_hw *hw,
- struct sk_buff *skb);
-
/**
* iwl_alive_start - called after REPLY_ALIVE notification received
* from protocol/runtime uCode (initialization uCode's
@@ -1568,7 +1639,10 @@ static void iwl_alive_start(struct iwl_priv *priv)
} else {
/* Initialize our rx_config data */
iwl_connection_init_rx_config(priv, priv->iw_mode);
- iwl_set_rxon_chain(priv);
+
+ if (priv->cfg->ops->hcmd->set_rxon_chain)
+ priv->cfg->ops->hcmd->set_rxon_chain(priv);
+
memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
}
@@ -1578,7 +1652,7 @@ static void iwl_alive_start(struct iwl_priv *priv)
iwl_reset_run_time_calib(priv);
/* Configure the adapter for unassociated operation */
- iwl_commit_rxon(priv);
+ iwlcore_commit_rxon(priv);
/* At this point, the NIC is initialized and operational */
iwl_rf_kill_ct_config(priv);
@@ -1589,9 +1663,6 @@ static void iwl_alive_start(struct iwl_priv *priv)
set_bit(STATUS_READY, &priv->status);
wake_up_interruptible(&priv->wait_command_queue);
- if (priv->error_recovering)
- iwl_error_recovery(priv);
-
iwl_power_update_mode(priv, 1);
/* reassociate for ADHOC mode */
@@ -1649,36 +1720,30 @@ static void __iwl_down(struct iwl_priv *priv)
ieee80211_stop_queues(priv->hw);
/* If we have not previously called iwl_init() then
- * clear all bits but the RF Kill and SUSPEND bits and return */
+ * clear all bits but the RF Kill bit and return */
if (!iwl_is_init(priv)) {
priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
STATUS_RF_KILL_HW |
- test_bit(STATUS_RF_KILL_SW, &priv->status) <<
- STATUS_RF_KILL_SW |
test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
STATUS_GEO_CONFIGURED |
- test_bit(STATUS_IN_SUSPEND, &priv->status) <<
- STATUS_IN_SUSPEND |
test_bit(STATUS_EXIT_PENDING, &priv->status) <<
STATUS_EXIT_PENDING;
goto exit;
}
- /* ...otherwise clear out all the status bits but the RF Kill and
- * SUSPEND bits and continue taking the NIC down. */
+ /* ...otherwise clear out all the status bits but the RF Kill
+ * bit and continue taking the NIC down. */
priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
STATUS_RF_KILL_HW |
- test_bit(STATUS_RF_KILL_SW, &priv->status) <<
- STATUS_RF_KILL_SW |
test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
STATUS_GEO_CONFIGURED |
- test_bit(STATUS_IN_SUSPEND, &priv->status) <<
- STATUS_IN_SUSPEND |
test_bit(STATUS_FW_ERROR, &priv->status) <<
STATUS_FW_ERROR |
test_bit(STATUS_EXIT_PENDING, &priv->status) <<
STATUS_EXIT_PENDING;
+ /* device going down, Stop using ICT table */
+ iwl_disable_ict(priv);
spin_lock_irqsave(&priv->lock, flags);
iwl_clear_bit(priv, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
@@ -1687,18 +1752,13 @@ static void __iwl_down(struct iwl_priv *priv)
iwl_txq_ctx_stop(priv);
iwl_rxq_stop(priv);
- spin_lock_irqsave(&priv->lock, flags);
- if (!iwl_grab_nic_access(priv)) {
- iwl_write_prph(priv, APMG_CLK_DIS_REG,
- APMG_CLK_VAL_DMA_CLK_RQT);
- iwl_release_nic_access(priv);
- }
- spin_unlock_irqrestore(&priv->lock, flags);
+ iwl_write_prph(priv, APMG_CLK_DIS_REG,
+ APMG_CLK_VAL_DMA_CLK_RQT);
udelay(5);
/* FIXME: apm_ops.suspend(priv) */
- if (exit_pending || test_bit(STATUS_IN_SUSPEND, &priv->status))
+ if (exit_pending)
priv->cfg->ops->lib->apm_ops.stop(priv);
else
priv->cfg->ops->lib->apm_ops.reset(priv);
@@ -1722,6 +1782,49 @@ static void iwl_down(struct iwl_priv *priv)
iwl_cancel_deferred_work(priv);
}
+#define HW_READY_TIMEOUT (50)
+
+static int iwl_set_hw_ready(struct iwl_priv *priv)
+{
+ int ret = 0;
+
+ iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
+
+ /* See if we got it */
+ ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ HW_READY_TIMEOUT);
+ if (ret != -ETIMEDOUT)
+ priv->hw_ready = true;
+ else
+ priv->hw_ready = false;
+
+ IWL_DEBUG_INFO(priv, "hardware %s\n",
+ (priv->hw_ready == 1) ? "ready" : "not ready");
+ return ret;
+}
+
+static int iwl_prepare_card_hw(struct iwl_priv *priv)
+{
+ int ret = 0;
+
+ IWL_DEBUG_INFO(priv, "iwl_prepare_card_hw enter \n");
+
+ iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_PREPARE);
+
+ ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
+ ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
+
+ if (ret != -ETIMEDOUT)
+ iwl_set_hw_ready(priv);
+
+ return ret;
+}
+
#define MAX_HW_RESTARTS 5
static int __iwl_up(struct iwl_priv *priv)
@@ -1739,6 +1842,13 @@ static int __iwl_up(struct iwl_priv *priv)
return -EIO;
}
+ iwl_prepare_card_hw(priv);
+
+ if (!priv->hw_ready) {
+ IWL_WARN(priv, "Exit HW not ready\n");
+ return -EIO;
+ }
+
/* If platform's RF_KILL switch is NOT set to KILL */
if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
clear_bit(STATUS_RF_KILL_HW, &priv->status);
@@ -1746,9 +1856,10 @@ static int __iwl_up(struct iwl_priv *priv)
set_bit(STATUS_RF_KILL_HW, &priv->status);
if (iwl_is_rfkill(priv)) {
+ wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
+
iwl_enable_interrupts(priv);
- IWL_WARN(priv, "Radio disabled by %s RF Kill switch\n",
- test_bit(STATUS_RF_KILL_HW, &priv->status) ? "HW" : "SW");
+ IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
return 0;
}
@@ -1794,9 +1905,6 @@ static int __iwl_up(struct iwl_priv *priv)
continue;
}
- /* Clear out the uCode error bit if it is set */
- clear_bit(STATUS_FW_ERROR, &priv->status);
-
/* start card; "initialize" will load runtime ucode */
iwl_nic_start(priv);
@@ -1843,6 +1951,9 @@ static void iwl_bg_alive_start(struct work_struct *data)
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
+ /* enable dram interrupt */
+ iwl_reset_ict(priv);
+
mutex_lock(&priv->mutex);
iwl_alive_start(priv);
mutex_unlock(&priv->mutex);
@@ -1881,7 +1992,6 @@ static void iwl_bg_up(struct work_struct *data)
mutex_lock(&priv->mutex);
__iwl_up(priv);
mutex_unlock(&priv->mutex);
- iwl_rfkill_set_hw_state(priv);
}
static void iwl_bg_restart(struct work_struct *data)
@@ -1891,8 +2001,17 @@ static void iwl_bg_restart(struct work_struct *data)
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
- iwl_down(priv);
- queue_work(priv->workqueue, &priv->up);
+ if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
+ mutex_lock(&priv->mutex);
+ priv->vif = NULL;
+ priv->is_open = 0;
+ mutex_unlock(&priv->mutex);
+ iwl_down(priv);
+ ieee80211_restart_hw(priv->hw);
+ } else {
+ iwl_down(priv);
+ queue_work(priv->workqueue, &priv->up);
+ }
}
static void iwl_bg_rx_replenish(struct work_struct *data)
@@ -1910,7 +2029,7 @@ static void iwl_bg_rx_replenish(struct work_struct *data)
#define IWL_DELAY_NEXT_SCAN (HZ*2)
-static void iwl_post_associate(struct iwl_priv *priv)
+void iwl_post_associate(struct iwl_priv *priv)
{
struct ieee80211_conf *conf = NULL;
int ret = 0;
@@ -1932,13 +2051,12 @@ static void iwl_post_associate(struct iwl_priv *priv)
if (!priv->vif || !priv->is_open)
return;
- iwl_power_cancel_timeout(priv);
iwl_scan_cancel_timeout(priv, 200);
conf = ieee80211_get_hw_conf(priv->hw);
priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl_commit_rxon(priv);
+ iwlcore_commit_rxon(priv);
iwl_setup_rxon_timing(priv);
ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
@@ -1951,7 +2069,9 @@ static void iwl_post_associate(struct iwl_priv *priv)
iwl_set_rxon_ht(priv, &priv->current_ht_config);
- iwl_set_rxon_chain(priv);
+ if (priv->cfg->ops->hcmd->set_rxon_chain)
+ priv->cfg->ops->hcmd->set_rxon_chain(priv);
+
priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
@@ -1973,7 +2093,7 @@ static void iwl_post_associate(struct iwl_priv *priv)
}
- iwl_commit_rxon(priv);
+ iwlcore_commit_rxon(priv);
switch (priv->iw_mode) {
case NL80211_IFTYPE_STATION:
@@ -2006,7 +2126,7 @@ static void iwl_post_associate(struct iwl_priv *priv)
* If chain noise has already been run, then we need to enable
* power management here */
if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
- iwl_power_enable_management(priv);
+ iwl_power_update_mode(priv, 0);
/* Enable Rx differential gain and sensitivity calibrations */
iwl_chain_noise_reset(priv);
@@ -2049,8 +2169,6 @@ static int iwl_mac_start(struct ieee80211_hw *hw)
mutex_unlock(&priv->mutex);
- iwl_rfkill_set_hw_state(priv);
-
if (ret)
return ret;
@@ -2059,9 +2177,6 @@ static int iwl_mac_start(struct ieee80211_hw *hw)
IWL_DEBUG_INFO(priv, "Start UP work done.\n");
- if (test_bit(STATUS_IN_SUSPEND, &priv->status))
- return 0;
-
/* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
* mac80211 will not be run successfully. */
ret = wait_event_interruptible_timeout(priv->wait_command_queue,
@@ -2087,10 +2202,8 @@ static void iwl_mac_stop(struct ieee80211_hw *hw)
IWL_DEBUG_MAC80211(priv, "enter\n");
- if (!priv->is_open) {
- IWL_DEBUG_MAC80211(priv, "leave - skip\n");
+ if (!priv->is_open)
return;
- }
priv->is_open = 0;
@@ -2130,175 +2243,7 @@ static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
return NETDEV_TX_OK;
}
-static int iwl_mac_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
-{
- struct iwl_priv *priv = hw->priv;
- unsigned long flags;
-
- IWL_DEBUG_MAC80211(priv, "enter: type %d\n", conf->type);
-
- if (priv->vif) {
- IWL_DEBUG_MAC80211(priv, "leave - vif != NULL\n");
- return -EOPNOTSUPP;
- }
-
- spin_lock_irqsave(&priv->lock, flags);
- priv->vif = conf->vif;
- priv->iw_mode = conf->type;
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- mutex_lock(&priv->mutex);
-
- if (conf->mac_addr) {
- IWL_DEBUG_MAC80211(priv, "Set %pM\n", conf->mac_addr);
- memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN);
- }
-
- if (iwl_set_mode(priv, conf->type) == -EAGAIN)
- /* we are not ready, will run again when ready */
- set_bit(STATUS_MODE_PENDING, &priv->status);
-
- mutex_unlock(&priv->mutex);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
- return 0;
-}
-
-/**
- * iwl_mac_config - mac80211 config callback
- *
- * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to
- * be set inappropriately and the driver currently sets the hardware up to
- * use it whenever needed.
- */
-static int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
-{
- struct iwl_priv *priv = hw->priv;
- const struct iwl_channel_info *ch_info;
- struct ieee80211_conf *conf = &hw->conf;
- unsigned long flags = 0;
- int ret = 0;
- u16 ch;
- int scan_active = 0;
-
- mutex_lock(&priv->mutex);
- IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
- conf->channel->hw_value, changed);
-
- if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
- test_bit(STATUS_SCANNING, &priv->status))) {
- scan_active = 1;
- IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
- }
-
-
- /* during scanning mac80211 will delay channel setting until
- * scan finish with changed = 0
- */
- if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
- if (scan_active)
- goto set_ch_out;
-
- ch = ieee80211_frequency_to_channel(conf->channel->center_freq);
- ch_info = iwl_get_channel_info(priv, conf->channel->band, ch);
- if (!is_channel_valid(ch_info)) {
- IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
- ret = -EINVAL;
- goto set_ch_out;
- }
-
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
- !is_channel_ibss(ch_info)) {
- IWL_ERR(priv, "channel %d in band %d not "
- "IBSS channel\n",
- conf->channel->hw_value, conf->channel->band);
- ret = -EINVAL;
- goto set_ch_out;
- }
-
- priv->current_ht_config.is_ht = conf_is_ht(conf);
-
- spin_lock_irqsave(&priv->lock, flags);
-
-
- /* if we are switching from ht to 2.4 clear flags
- * from any ht related info since 2.4 does not
- * support ht */
- if ((le16_to_cpu(priv->staging_rxon.channel) != ch))
- priv->staging_rxon.flags = 0;
-
- iwl_set_rxon_channel(priv, conf->channel);
-
- iwl_set_flags_for_band(priv, conf->channel->band);
- spin_unlock_irqrestore(&priv->lock, flags);
- set_ch_out:
- /* The list of supported rates and rate mask can be different
- * for each band; since the band may have changed, reset
- * the rate mask to what mac80211 lists */
- iwl_set_rate(priv);
- }
-
- if (changed & IEEE80211_CONF_CHANGE_PS) {
- if (conf->flags & IEEE80211_CONF_PS)
- ret = iwl_power_set_user_mode(priv, IWL_POWER_INDEX_3);
- else
- ret = iwl_power_set_user_mode(priv, IWL_POWER_MODE_CAM);
- if (ret)
- IWL_DEBUG_MAC80211(priv, "Error setting power level\n");
-
- }
-
- if (changed & IEEE80211_CONF_CHANGE_POWER) {
- IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
- priv->tx_power_user_lmt, conf->power_level);
-
- iwl_set_tx_power(priv, conf->power_level, false);
- }
-
- /* call to ensure that 4965 rx_chain is set properly in monitor mode */
- iwl_set_rxon_chain(priv);
-
- if (changed & IEEE80211_CONF_CHANGE_RADIO_ENABLED) {
- if (conf->radio_enabled &&
- iwl_radio_kill_sw_enable_radio(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - RF-KILL - "
- "waiting for uCode\n");
- goto out;
- }
-
- if (!conf->radio_enabled)
- iwl_radio_kill_sw_disable_radio(priv);
- }
-
- if (!conf->radio_enabled) {
- IWL_DEBUG_MAC80211(priv, "leave - radio disabled\n");
- goto out;
- }
-
- if (!iwl_is_ready(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
- goto out;
- }
-
- if (scan_active)
- goto out;
-
- if (memcmp(&priv->active_rxon,
- &priv->staging_rxon, sizeof(priv->staging_rxon)))
- iwl_commit_rxon(priv);
- else
- IWL_DEBUG_INFO(priv, "No re-sending same RXON configuration.\n");
-
-
-out:
- IWL_DEBUG_MAC80211(priv, "leave\n");
- mutex_unlock(&priv->mutex);
- return ret;
-}
-
-static void iwl_config_ap(struct iwl_priv *priv)
+void iwl_config_ap(struct iwl_priv *priv)
{
int ret = 0;
unsigned long flags;
@@ -2311,7 +2256,7 @@ static void iwl_config_ap(struct iwl_priv *priv)
/* RXON - unassoc (to set timing command) */
priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl_commit_rxon(priv);
+ iwlcore_commit_rxon(priv);
/* RXON Timing */
iwl_setup_rxon_timing(priv);
@@ -2321,7 +2266,8 @@ static void iwl_config_ap(struct iwl_priv *priv)
IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
"Attempting to continue.\n");
- iwl_set_rxon_chain(priv);
+ if (priv->cfg->ops->hcmd->set_rxon_chain)
+ priv->cfg->ops->hcmd->set_rxon_chain(priv);
/* FIXME: what should be the assoc_id for AP? */
priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
@@ -2347,7 +2293,7 @@ static void iwl_config_ap(struct iwl_priv *priv)
}
/* restore RXON assoc */
priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
- iwl_commit_rxon(priv);
+ iwlcore_commit_rxon(priv);
spin_lock_irqsave(&priv->lock, flags);
iwl_activate_qos(priv, 1);
spin_unlock_irqrestore(&priv->lock, flags);
@@ -2360,194 +2306,6 @@ static void iwl_config_ap(struct iwl_priv *priv)
* clear sta table, add BCAST sta... */
}
-
-static int iwl_mac_config_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_if_conf *conf)
-{
- struct iwl_priv *priv = hw->priv;
- int rc;
-
- if (conf == NULL)
- return -EIO;
-
- if (priv->vif != vif) {
- IWL_DEBUG_MAC80211(priv, "leave - priv->vif != vif\n");
- return 0;
- }
-
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
- conf->changed & IEEE80211_IFCC_BEACON) {
- struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
- if (!beacon)
- return -ENOMEM;
- mutex_lock(&priv->mutex);
- rc = iwl_mac_beacon_update(hw, beacon);
- mutex_unlock(&priv->mutex);
- if (rc)
- return rc;
- }
-
- if (!iwl_is_alive(priv))
- return -EAGAIN;
-
- mutex_lock(&priv->mutex);
-
- if (conf->bssid)
- IWL_DEBUG_MAC80211(priv, "bssid: %pM\n", conf->bssid);
-
-/*
- * very dubious code was here; the probe filtering flag is never set:
- *
- if (unlikely(test_bit(STATUS_SCANNING, &priv->status)) &&
- !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) {
- */
-
- if (priv->iw_mode == NL80211_IFTYPE_AP) {
- if (!conf->bssid) {
- conf->bssid = priv->mac_addr;
- memcpy(priv->bssid, priv->mac_addr, ETH_ALEN);
- IWL_DEBUG_MAC80211(priv, "bssid was set to: %pM\n",
- conf->bssid);
- }
- if (priv->ibss_beacon)
- dev_kfree_skb(priv->ibss_beacon);
-
- priv->ibss_beacon = ieee80211_beacon_get(hw, vif);
- }
-
- if (iwl_is_rfkill(priv))
- goto done;
-
- if (conf->bssid && !is_zero_ether_addr(conf->bssid) &&
- !is_multicast_ether_addr(conf->bssid)) {
- /* If there is currently a HW scan going on in the background
- * then we need to cancel it else the RXON below will fail. */
- if (iwl_scan_cancel_timeout(priv, 100)) {
- IWL_WARN(priv, "Aborted scan still in progress "
- "after 100ms\n");
- IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n");
- mutex_unlock(&priv->mutex);
- return -EAGAIN;
- }
- memcpy(priv->staging_rxon.bssid_addr, conf->bssid, ETH_ALEN);
-
- /* TODO: Audit driver for usage of these members and see
- * if mac80211 deprecates them (priv->bssid looks like it
- * shouldn't be there, but I haven't scanned the IBSS code
- * to verify) - jpk */
- memcpy(priv->bssid, conf->bssid, ETH_ALEN);
-
- if (priv->iw_mode == NL80211_IFTYPE_AP)
- iwl_config_ap(priv);
- else {
- rc = iwl_commit_rxon(priv);
- if ((priv->iw_mode == NL80211_IFTYPE_STATION) && rc)
- iwl_rxon_add_station(
- priv, priv->active_rxon.bssid_addr, 1);
- }
-
- } else {
- iwl_scan_cancel_timeout(priv, 100);
- priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl_commit_rxon(priv);
- }
-
- done:
- IWL_DEBUG_MAC80211(priv, "leave\n");
- mutex_unlock(&priv->mutex);
-
- return 0;
-}
-
-static void iwl_mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
-{
- struct iwl_priv *priv = hw->priv;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- mutex_lock(&priv->mutex);
-
- if (iwl_is_ready_rf(priv)) {
- iwl_scan_cancel_timeout(priv, 100);
- priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl_commit_rxon(priv);
- }
- if (priv->vif == conf->vif) {
- priv->vif = NULL;
- memset(priv->bssid, 0, ETH_ALEN);
- }
- mutex_unlock(&priv->mutex);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
-}
-
-#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
-static void iwl_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *bss_conf,
- u32 changes)
-{
- struct iwl_priv *priv = hw->priv;
-
- IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
-
- if (changes & BSS_CHANGED_ERP_PREAMBLE) {
- IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
- bss_conf->use_short_preamble);
- if (bss_conf->use_short_preamble)
- priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
- else
- priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
- }
-
- if (changes & BSS_CHANGED_ERP_CTS_PROT) {
- IWL_DEBUG_MAC80211(priv, "ERP_CTS %d\n", bss_conf->use_cts_prot);
- if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
- priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK;
- else
- priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
- }
-
- if (changes & BSS_CHANGED_HT) {
- iwl_ht_conf(priv, bss_conf);
- iwl_set_rxon_chain(priv);
- }
-
- if (changes & BSS_CHANGED_ASSOC) {
- IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
- /* This should never happen as this function should
- * never be called from interrupt context. */
- if (WARN_ON_ONCE(in_interrupt()))
- return;
- if (bss_conf->assoc) {
- priv->assoc_id = bss_conf->aid;
- priv->beacon_int = bss_conf->beacon_int;
- priv->power_data.dtim_period = bss_conf->dtim_period;
- priv->timestamp = bss_conf->timestamp;
- priv->assoc_capability = bss_conf->assoc_capability;
-
- /* we have just associated, don't start scan too early
- * leave time for EAPOL exchange to complete
- */
- priv->next_scan_jiffies = jiffies +
- IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
- mutex_lock(&priv->mutex);
- iwl_post_associate(priv);
- mutex_unlock(&priv->mutex);
- } else {
- priv->assoc_id = 0;
- IWL_DEBUG_MAC80211(priv, "DISASSOC %d\n", bss_conf->assoc);
- }
- } else if (changes && iwl_is_associated(priv) && priv->assoc_id) {
- IWL_DEBUG_MAC80211(priv, "Associated Changes %d\n", changes);
- iwl_send_rxon_assoc(priv);
- }
-
-}
-
static void iwl_mac_update_tkip_key(struct ieee80211_hw *hw,
struct ieee80211_key_conf *keyconf, const u8 *addr,
u32 iv32, u16 *phase1key)
@@ -2630,49 +2388,6 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return ret;
}
-static int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
- const struct ieee80211_tx_queue_params *params)
-{
- struct iwl_priv *priv = hw->priv;
- unsigned long flags;
- int q;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (!iwl_is_ready_rf(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
- return -EIO;
- }
-
- if (queue >= AC_NUM) {
- IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
- return 0;
- }
-
- q = AC_NUM - 1 - queue;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- priv->qos_data.def_qos_parm.ac[q].cw_min = cpu_to_le16(params->cw_min);
- priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max);
- priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
- priv->qos_data.def_qos_parm.ac[q].edca_txop =
- cpu_to_le16((params->txop * 32));
-
- priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
- priv->qos_data.qos_active = 1;
-
- if (priv->iw_mode == NL80211_IFTYPE_AP)
- iwl_activate_qos(priv, 1);
- else if (priv->assoc_id && iwl_is_associated(priv))
- iwl_activate_qos(priv, 0);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
- return 0;
-}
-
static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid, u16 *ssn)
@@ -2715,41 +2430,6 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
return 0;
}
-static int iwl_mac_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct iwl_priv *priv = hw->priv;
- int i, avail;
- struct iwl_tx_queue *txq;
- struct iwl_queue *q;
- unsigned long flags;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (!iwl_is_ready_rf(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
- return -EIO;
- }
-
- spin_lock_irqsave(&priv->lock, flags);
-
- for (i = 0; i < AC_NUM; i++) {
- txq = &priv->txq[i];
- q = &txq->q;
- avail = iwl_queue_space(q);
-
- stats[i].len = q->n_window - avail;
- stats[i].limit = q->n_window - q->high_mark;
- stats[i].count = q->n_window;
-
- }
- spin_unlock_irqrestore(&priv->lock, flags);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return 0;
-}
-
static int iwl_mac_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
@@ -2762,120 +2442,6 @@ static int iwl_mac_get_stats(struct ieee80211_hw *hw,
return 0;
}
-static void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
- unsigned long flags;
-
- mutex_lock(&priv->mutex);
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- spin_lock_irqsave(&priv->lock, flags);
- memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_info));
- spin_unlock_irqrestore(&priv->lock, flags);
-
- iwl_reset_qos(priv);
-
- spin_lock_irqsave(&priv->lock, flags);
- priv->assoc_id = 0;
- priv->assoc_capability = 0;
- priv->assoc_station_added = 0;
-
- /* new association get rid of ibss beacon skb */
- if (priv->ibss_beacon)
- dev_kfree_skb(priv->ibss_beacon);
-
- priv->ibss_beacon = NULL;
-
- priv->beacon_int = priv->hw->conf.beacon_int;
- priv->timestamp = 0;
- if ((priv->iw_mode == NL80211_IFTYPE_STATION))
- priv->beacon_int = 0;
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (!iwl_is_ready_rf(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
- mutex_unlock(&priv->mutex);
- return;
- }
-
- /* we are restarting association process
- * clear RXON_FILTER_ASSOC_MSK bit
- */
- if (priv->iw_mode != NL80211_IFTYPE_AP) {
- iwl_scan_cancel_timeout(priv, 100);
- priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl_commit_rxon(priv);
- }
-
- iwl_power_update_mode(priv, 0);
-
- /* Per mac80211.h: This is only used in IBSS mode... */
- if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
-
- /* switch to CAM during association period.
- * the ucode will block any association/authentication
- * frome during assiciation period if it can not hear
- * the AP because of PM. the timer enable PM back is
- * association do not complete
- */
- if (priv->hw->conf.channel->flags & (IEEE80211_CHAN_PASSIVE_SCAN |
- IEEE80211_CHAN_RADAR))
- iwl_power_disable_management(priv, 3000);
-
- IWL_DEBUG_MAC80211(priv, "leave - not in IBSS\n");
- mutex_unlock(&priv->mutex);
- return;
- }
-
- iwl_set_rate(priv);
-
- mutex_unlock(&priv->mutex);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-static int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
- struct iwl_priv *priv = hw->priv;
- unsigned long flags;
- __le64 timestamp;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (!iwl_is_ready_rf(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
- return -EIO;
- }
-
- if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
- IWL_DEBUG_MAC80211(priv, "leave - not IBSS\n");
- return -EIO;
- }
-
- spin_lock_irqsave(&priv->lock, flags);
-
- if (priv->ibss_beacon)
- dev_kfree_skb(priv->ibss_beacon);
-
- priv->ibss_beacon = skb;
-
- priv->assoc_id = 0;
- timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
- priv->timestamp = le64_to_cpu(timestamp);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
- spin_unlock_irqrestore(&priv->lock, flags);
-
- iwl_reset_qos(priv);
-
- iwl_post_associate(priv);
-
-
- return 0;
-}
-
/*****************************************************************************
*
* sysfs attributes
@@ -2895,7 +2461,7 @@ static int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
static ssize_t show_debug_level(struct device *d,
struct device_attribute *attr, char *buf)
{
- struct iwl_priv *priv = d->driver_data;
+ struct iwl_priv *priv = dev_get_drvdata(d);
return sprintf(buf, "0x%08X\n", priv->debug_level);
}
@@ -2903,7 +2469,7 @@ static ssize_t store_debug_level(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct iwl_priv *priv = d->driver_data;
+ struct iwl_priv *priv = dev_get_drvdata(d);
unsigned long val;
int ret;
@@ -2926,7 +2492,7 @@ static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
static ssize_t show_version(struct device *d,
struct device_attribute *attr, char *buf)
{
- struct iwl_priv *priv = d->driver_data;
+ struct iwl_priv *priv = dev_get_drvdata(d);
struct iwl_alive_resp *palive = &priv->card_alive;
ssize_t pos = 0;
u16 eeprom_ver;
@@ -2943,8 +2509,10 @@ static ssize_t show_version(struct device *d,
if (priv->eeprom) {
eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
- pos += sprintf(buf + pos, "EEPROM version: 0x%x\n",
- eeprom_ver);
+ pos += sprintf(buf + pos, "NVM Type: %s, version: 0x%x\n",
+ (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
+ ? "OTP" : "EEPROM", eeprom_ver);
+
} else {
pos += sprintf(buf + pos, "EEPROM not initialzed\n");
}
@@ -2957,7 +2525,7 @@ static DEVICE_ATTR(version, S_IWUSR | S_IRUGO, show_version, NULL);
static ssize_t show_temperature(struct device *d,
struct device_attribute *attr, char *buf)
{
- struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
+ struct iwl_priv *priv = dev_get_drvdata(d);
if (!iwl_is_alive(priv))
return -EAGAIN;
@@ -2970,7 +2538,7 @@ static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);
static ssize_t show_tx_power(struct device *d,
struct device_attribute *attr, char *buf)
{
- struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
+ struct iwl_priv *priv = dev_get_drvdata(d);
if (!iwl_is_ready_rf(priv))
return sprintf(buf, "off\n");
@@ -2982,7 +2550,7 @@ static ssize_t store_tx_power(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
+ struct iwl_priv *priv = dev_get_drvdata(d);
unsigned long val;
int ret;
@@ -3000,7 +2568,7 @@ static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
static ssize_t show_flags(struct device *d,
struct device_attribute *attr, char *buf)
{
- struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
+ struct iwl_priv *priv = dev_get_drvdata(d);
return sprintf(buf, "0x%04X\n", priv->active_rxon.flags);
}
@@ -3009,7 +2577,7 @@ static ssize_t store_flags(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
+ struct iwl_priv *priv = dev_get_drvdata(d);
unsigned long val;
u32 flags;
int ret = strict_strtoul(buf, 0, &val);
@@ -3025,7 +2593,7 @@ static ssize_t store_flags(struct device *d,
else {
IWL_DEBUG_INFO(priv, "Commit rxon.flags = 0x%04X\n", flags);
priv->staging_rxon.flags = cpu_to_le32(flags);
- iwl_commit_rxon(priv);
+ iwlcore_commit_rxon(priv);
}
}
mutex_unlock(&priv->mutex);
@@ -3038,7 +2606,7 @@ static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags);
static ssize_t show_filter_flags(struct device *d,
struct device_attribute *attr, char *buf)
{
- struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
+ struct iwl_priv *priv = dev_get_drvdata(d);
return sprintf(buf, "0x%04X\n",
le32_to_cpu(priv->active_rxon.filter_flags));
@@ -3048,7 +2616,7 @@ static ssize_t store_filter_flags(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
+ struct iwl_priv *priv = dev_get_drvdata(d);
unsigned long val;
u32 filter_flags;
int ret = strict_strtoul(buf, 0, &val);
@@ -3066,7 +2634,7 @@ static ssize_t store_filter_flags(struct device *d,
"0x%04X\n", filter_flags);
priv->staging_rxon.filter_flags =
cpu_to_le32(filter_flags);
- iwl_commit_rxon(priv);
+ iwlcore_commit_rxon(priv);
}
}
mutex_unlock(&priv->mutex);
@@ -3109,32 +2677,37 @@ static ssize_t show_power_level(struct device *d,
{
struct iwl_priv *priv = dev_get_drvdata(d);
int mode = priv->power_data.user_power_setting;
- int system = priv->power_data.system_power_setting;
int level = priv->power_data.power_mode;
char *p = buf;
- switch (system) {
- case IWL_POWER_SYS_AUTO:
- p += sprintf(p, "SYSTEM:auto");
- break;
- case IWL_POWER_SYS_AC:
- p += sprintf(p, "SYSTEM:ac");
- break;
- case IWL_POWER_SYS_BATTERY:
- p += sprintf(p, "SYSTEM:battery");
- break;
- }
-
- p += sprintf(p, "\tMODE:%s", (mode < IWL_POWER_AUTO) ?
- "fixed" : "auto");
- p += sprintf(p, "\tINDEX:%d", level);
- p += sprintf(p, "\n");
+ p += sprintf(p, "INDEX:%d\t", level);
+ p += sprintf(p, "USER:%d\n", mode);
return p - buf + 1;
}
static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level,
store_power_level);
+static ssize_t show_qos(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct iwl_priv *priv = dev_get_drvdata(d);
+ char *p = buf;
+ int q;
+
+ for (q = 0; q < AC_NUM; q++) {
+ p += sprintf(p, "\tcw_min\tcw_max\taifsn\ttxop\n");
+ p += sprintf(p, "AC[%d]\t%u\t%u\t%u\t%u\n", q,
+ priv->qos_data.def_qos_parm.ac[q].cw_min,
+ priv->qos_data.def_qos_parm.ac[q].cw_max,
+ priv->qos_data.def_qos_parm.ac[q].aifsn,
+ priv->qos_data.def_qos_parm.ac[q].edca_txop);
+ }
+
+ return p - buf + 1;
+}
+
+static DEVICE_ATTR(qos, S_IRUGO, show_qos, NULL);
static ssize_t show_statistics(struct device *d,
struct device_attribute *attr, char *buf)
@@ -3190,14 +2763,12 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
INIT_WORK(&priv->up, iwl_bg_up);
INIT_WORK(&priv->restart, iwl_bg_restart);
INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish);
- INIT_WORK(&priv->rf_kill, iwl_bg_rf_kill);
INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
INIT_WORK(&priv->run_time_calib_work, iwl_bg_run_time_calib_work);
INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start);
INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start);
iwl_setup_scan_deferred_work(priv);
- iwl_setup_power_deferred_work(priv);
if (priv->cfg->ops->lib->setup_deferred_work)
priv->cfg->ops->lib->setup_deferred_work(priv);
@@ -3206,8 +2777,12 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
priv->statistics_periodic.data = (unsigned long)priv;
priv->statistics_periodic.function = iwl_bg_statistics_periodic;
- tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
- iwl_irq_tasklet, (unsigned long)priv);
+ if (!priv->cfg->use_isr_legacy)
+ tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
+ iwl_irq_tasklet, (unsigned long)priv);
+ else
+ tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
+ iwl_irq_tasklet_legacy, (unsigned long)priv);
}
static void iwl_cancel_deferred_work(struct iwl_priv *priv)
@@ -3217,7 +2792,6 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
cancel_delayed_work_sync(&priv->init_alive_start);
cancel_delayed_work(&priv->scan_check);
- cancel_delayed_work_sync(&priv->set_power_save);
cancel_delayed_work(&priv->alive_start);
cancel_work_sync(&priv->beacon_update);
del_timer_sync(&priv->statistics_periodic);
@@ -3234,7 +2808,7 @@ static struct attribute *iwl_sysfs_entries[] = {
&dev_attr_debug_level.attr,
#endif
&dev_attr_version.attr,
-
+ &dev_attr_qos.attr,
NULL
};
@@ -3250,7 +2824,6 @@ static struct ieee80211_ops iwl_hw_ops = {
.add_interface = iwl_mac_add_interface,
.remove_interface = iwl_mac_remove_interface,
.config = iwl_mac_config,
- .config_interface = iwl_mac_config_interface,
.configure_filter = iwl_configure_filter,
.set_key = iwl_mac_set_key,
.update_tkip_key = iwl_mac_update_tkip_key,
@@ -3298,6 +2871,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
priv->cfg = cfg;
priv->pci_dev = pdev;
+ priv->inta_mask = CSR_INI_SET_MASK;
#ifdef CONFIG_IWLWIFI_DEBUG
priv->debug_level = priv->cfg->mod_params->debug;
@@ -3348,6 +2922,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
(unsigned long long) pci_resource_len(pdev, 0));
IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
+ /* this spin lock will be used in apm_ops.init and EEPROM access
+ * we should init now
+ */
+ spin_lock_init(&priv->reg_lock);
iwl_hw_detect(priv);
IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s REV=0x%X\n",
priv->cfg->name, priv->hw_rev);
@@ -3356,6 +2934,12 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* PCI Tx retries from interfering with C3 CPU state */
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+ iwl_prepare_card_hw(priv);
+ if (!priv->hw_ready) {
+ IWL_WARN(priv, "Failed, HW not ready\n");
+ goto out_iounmap;
+ }
+
/* amp init */
err = priv->cfg->ops->lib->apm_ops.init(priv);
if (err < 0) {
@@ -3397,18 +2981,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_free_eeprom;
/* At this point both hw and priv are initialized. */
- /**********************************
- * 7. Initialize module parameters
- **********************************/
-
- /* Disable radio (SW RF KILL) via parameter when loading driver */
- if (priv->cfg->mod_params->disable) {
- set_bit(STATUS_RF_KILL_SW, &priv->status);
- IWL_DEBUG_INFO(priv, "Radio disabled.\n");
- }
-
/********************
- * 8. Setup services
+ * 7. Setup services
********************/
spin_lock_irqsave(&priv->lock, flags);
iwl_disable_interrupts(priv);
@@ -3416,8 +2990,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_enable_msi(priv->pci_dev);
- err = request_irq(priv->pci_dev->irq, iwl_isr, IRQF_SHARED,
- DRV_NAME, priv);
+ iwl_alloc_isr_ict(priv);
+ err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr,
+ IRQF_SHARED, DRV_NAME, priv);
if (err) {
IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
goto out_disable_msi;
@@ -3432,7 +3007,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
iwl_setup_rx_handlers(priv);
/**********************************
- * 9. Setup and register mac80211
+ * 8. Setup and register mac80211
**********************************/
/* enable interrupts if needed: hw bug w/a */
@@ -3450,7 +3025,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = iwl_dbgfs_register(priv, DRV_NAME);
if (err)
- IWL_ERR(priv, "failed to create debugfs files\n");
+ IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
/* If platform's RF_KILL switch is NOT set to KILL */
if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
@@ -3458,12 +3033,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
else
set_bit(STATUS_RF_KILL_HW, &priv->status);
- err = iwl_rfkill_init(priv);
- if (err)
- IWL_ERR(priv, "Unable to initialize RFKILL system. "
- "Ignoring error: %d\n", err);
- else
- iwl_rfkill_set_hw_state(priv);
+ wiphy_rfkill_set_hw_state(priv->hw->wiphy,
+ test_bit(STATUS_RF_KILL_HW, &priv->status));
iwl_power_initialize(priv);
return 0;
@@ -3474,6 +3045,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
out_free_irq:
free_irq(priv->pci_dev->irq, priv);
+ iwl_free_isr_ict(priv);
out_disable_msi:
pci_disable_msi(priv->pci_dev);
iwl_uninit_drv(priv);
@@ -3526,7 +3098,6 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
iwl_synchronize_irq(priv);
- iwl_rfkill_unregister(priv);
iwl_dealloc_ucode_pci(priv);
if (priv->rxq.bd)
@@ -3555,51 +3126,14 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
iwl_uninit_drv(priv);
+ iwl_free_isr_ict(priv);
+
if (priv->ibss_beacon)
dev_kfree_skb(priv->ibss_beacon);
ieee80211_free_hw(priv->hw);
}
-#ifdef CONFIG_PM
-
-static int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- struct iwl_priv *priv = pci_get_drvdata(pdev);
-
- if (priv->is_open) {
- set_bit(STATUS_IN_SUSPEND, &priv->status);
- iwl_mac_stop(priv->hw);
- priv->is_open = 1;
- }
-
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
-
- return 0;
-}
-
-static int iwl_pci_resume(struct pci_dev *pdev)
-{
- struct iwl_priv *priv = pci_get_drvdata(pdev);
- int ret;
-
- pci_set_power_state(pdev, PCI_D0);
- ret = pci_enable_device(pdev);
- if (ret)
- return ret;
- pci_restore_state(pdev);
- iwl_enable_interrupts(priv);
-
- if (priv->is_open)
- iwl_mac_start(priv->hw);
-
- clear_bit(STATUS_IN_SUSPEND, &priv->status);
- return 0;
-}
-
-#endif /* CONFIG_PM */
/*****************************************************************************
*
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c
index 735f3f19928..a5d63672ad3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.c
@@ -857,7 +857,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
priv->cfg->ops->lib->update_chain_flags(priv);
data->state = IWL_CHAIN_NOISE_DONE;
- iwl_power_enable_management(priv);
+ iwl_power_update_mode(priv, 0);
}
EXPORT_SYMBOL(iwl_chain_noise_calibration);
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 29d40746da6..c87033bf3ad 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -614,8 +614,18 @@ enum {
#define RXON_FLG_CHANNEL_MODE_POS (25)
#define RXON_FLG_CHANNEL_MODE_MSK cpu_to_le32(0x3 << 25)
-#define RXON_FLG_CHANNEL_MODE_PURE_40_MSK cpu_to_le32(0x1 << 25)
-#define RXON_FLG_CHANNEL_MODE_MIXED_MSK cpu_to_le32(0x2 << 25)
+
+/* channel mode */
+enum {
+ CHANNEL_MODE_LEGACY = 0,
+ CHANNEL_MODE_PURE_40 = 1,
+ CHANNEL_MODE_MIXED = 2,
+ CHANNEL_MODE_RESERVED = 3,
+};
+#define RXON_FLG_CHANNEL_MODE_LEGACY cpu_to_le32(CHANNEL_MODE_LEGACY << RXON_FLG_CHANNEL_MODE_POS)
+#define RXON_FLG_CHANNEL_MODE_PURE_40 cpu_to_le32(CHANNEL_MODE_PURE_40 << RXON_FLG_CHANNEL_MODE_POS)
+#define RXON_FLG_CHANNEL_MODE_MIXED cpu_to_le32(CHANNEL_MODE_MIXED << RXON_FLG_CHANNEL_MODE_POS)
+
/* CTS to self (if spec allows) flag */
#define RXON_FLG_SELF_CTS_EN cpu_to_le32(0x1<<30)
@@ -1057,7 +1067,7 @@ struct iwl_addsta_cmd {
* Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
__le16 tid_disable_tx;
- __le16 reserved1;
+ __le16 rate_n_flags; /* 3945 only */
/* TID for which to add block-ack support.
* Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
@@ -1903,6 +1913,18 @@ struct iwl_link_qual_general_params {
u8 start_rate_index[LINK_QUAL_AC_NUM];
} __attribute__ ((packed));
+#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
+#define LINK_QUAL_AGG_TIME_LIMIT_MAX (65535)
+#define LINK_QUAL_AGG_TIME_LIMIT_MIN (0)
+
+#define LINK_QUAL_AGG_DISABLE_START_DEF (3)
+#define LINK_QUAL_AGG_DISABLE_START_MAX (255)
+#define LINK_QUAL_AGG_DISABLE_START_MIN (0)
+
+#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (31)
+#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (64)
+#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
+
/**
* struct iwl_link_qual_agg_params
*
@@ -2469,11 +2491,12 @@ struct iwl_ssid_ie {
u8 ssid[32];
} __attribute__ ((packed));
-#define PROBE_OPTION_MAX_API1 0x4
-#define PROBE_OPTION_MAX 0x14
+#define PROBE_OPTION_MAX_3945 4
+#define PROBE_OPTION_MAX 20
#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
#define IWL_GOOD_CRC_TH cpu_to_le16(1)
#define IWL_MAX_SCAN_SIZE 1024
+#define IWL_MAX_PROBE_REQUEST 200
/*
* REPLY_SCAN_CMD = 0x80 (command)
@@ -2552,7 +2575,7 @@ struct iwl3945_scan_cmd {
struct iwl3945_tx_cmd tx_cmd;
/* For directed active scans (set to all-0s otherwise) */
- struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX_API1];
+ struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX_3945];
/*
* Probe request frame, followed by channel list.
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index c54fb93e9d7..f9d16ca5b3d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -36,9 +36,9 @@
#include "iwl-debug.h"
#include "iwl-core.h"
#include "iwl-io.h"
-#include "iwl-rfkill.h"
#include "iwl-power.h"
#include "iwl-sta.h"
+#include "iwl-helpers.h"
MODULE_DESCRIPTION("iwl core");
@@ -59,6 +59,8 @@ MODULE_LICENSE("GPL");
IWL_RATE_##pp##M_INDEX, \
IWL_RATE_##np##M_INDEX }
+static irqreturn_t iwl_isr(int irq, void *data);
+
/*
* Parameter order:
* rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
@@ -273,6 +275,14 @@ void iwl_activate_qos(struct iwl_priv *priv, u8 force)
}
EXPORT_SYMBOL(iwl_activate_qos);
+/*
+ * AC CWmin CW max AIFSN TXOP Limit TXOP Limit
+ * (802.11b) (802.11a/g)
+ * AC_BK 15 1023 7 0 0
+ * AC_BE 15 1023 3 0 0
+ * AC_VI 7 15 2 6.016ms 3.008ms
+ * AC_VO 3 7 2 3.264ms 1.504ms
+ */
void iwl_reset_qos(struct iwl_priv *priv)
{
u16 cw_min = 15;
@@ -304,6 +314,7 @@ void iwl_reset_qos(struct iwl_priv *priv)
if (priv->qos_data.qos_active)
aifs = 3;
+ /* AC_BE */
priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min);
priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max);
priv->qos_data.def_qos_parm.ac[0].aifsn = aifs;
@@ -311,6 +322,7 @@ void iwl_reset_qos(struct iwl_priv *priv)
priv->qos_data.def_qos_parm.ac[0].reserved1 = 0;
if (priv->qos_data.qos_active) {
+ /* AC_BK */
i = 1;
priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min);
priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max);
@@ -318,11 +330,12 @@ void iwl_reset_qos(struct iwl_priv *priv)
priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
+ /* AC_VI */
i = 2;
priv->qos_data.def_qos_parm.ac[i].cw_min =
cpu_to_le16((cw_min + 1) / 2 - 1);
priv->qos_data.def_qos_parm.ac[i].cw_max =
- cpu_to_le16(cw_max);
+ cpu_to_le16(cw_min);
priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
if (is_legacy)
priv->qos_data.def_qos_parm.ac[i].edca_txop =
@@ -332,11 +345,12 @@ void iwl_reset_qos(struct iwl_priv *priv)
cpu_to_le16(3008);
priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
+ /* AC_VO */
i = 3;
priv->qos_data.def_qos_parm.ac[i].cw_min =
cpu_to_le16((cw_min + 1) / 4 - 1);
priv->qos_data.def_qos_parm.ac[i].cw_max =
- cpu_to_le16((cw_max + 1) / 2 - 1);
+ cpu_to_le16((cw_min + 1) / 2 - 1);
priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
if (is_legacy)
@@ -591,10 +605,10 @@ static u8 iwl_is_channel_extension(struct iwl_priv *priv,
if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
return !(ch_info->fat_extension_channel &
- IEEE80211_CHAN_NO_FAT_ABOVE);
+ IEEE80211_CHAN_NO_HT40PLUS);
else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
return !(ch_info->fat_extension_channel &
- IEEE80211_CHAN_NO_FAT_BELOW);
+ IEEE80211_CHAN_NO_HT40MINUS);
return 0;
}
@@ -605,19 +619,23 @@ u8 iwl_is_fat_tx_allowed(struct iwl_priv *priv,
struct iwl_ht_info *iwl_ht_conf = &priv->current_ht_config;
if ((!iwl_ht_conf->is_ht) ||
- (iwl_ht_conf->supported_chan_width != IWL_CHANNEL_WIDTH_40MHZ) ||
- (iwl_ht_conf->extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_NONE))
+ (iwl_ht_conf->supported_chan_width != IWL_CHANNEL_WIDTH_40MHZ))
return 0;
+ /* We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
+ * the bit will not set if it is pure 40MHz case
+ */
if (sta_ht_inf) {
- if ((!sta_ht_inf->ht_supported) ||
- (!(sta_ht_inf->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)))
+ if (!sta_ht_inf->ht_supported)
return 0;
}
- return iwl_is_channel_extension(priv, priv->band,
- le16_to_cpu(priv->staging_rxon.channel),
- iwl_ht_conf->extension_chan_offset);
+ if (iwl_ht_conf->ht_protection & IEEE80211_HT_OP_MODE_PROTECTION_20MHZ)
+ return 1;
+ else
+ return iwl_is_channel_extension(priv, priv->band,
+ le16_to_cpu(priv->staging_rxon.channel),
+ iwl_ht_conf->extension_chan_offset);
}
EXPORT_SYMBOL(iwl_is_fat_tx_allowed);
@@ -735,6 +753,8 @@ int iwl_full_rxon_required(struct iwl_priv *priv)
priv->active_rxon.ofdm_ht_single_stream_basic_rates) ||
(priv->staging_rxon.ofdm_ht_dual_stream_basic_rates !=
priv->active_rxon.ofdm_ht_dual_stream_basic_rates) ||
+ (priv->staging_rxon.ofdm_ht_triple_stream_basic_rates !=
+ priv->active_rxon.ofdm_ht_triple_stream_basic_rates) ||
(priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id))
return 1;
@@ -785,43 +805,53 @@ EXPORT_SYMBOL(iwl_rate_get_lowest_plcp);
void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
{
struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
- u32 val;
if (!ht_info->is_ht) {
- rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
- RXON_FLG_CHANNEL_MODE_PURE_40_MSK |
+ rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
RXON_FLG_FAT_PROT_MSK |
RXON_FLG_HT_PROT_MSK);
return;
}
- /* Set up channel bandwidth: 20 MHz only, or 20/40 mixed if fat ok */
- if (iwl_is_fat_tx_allowed(priv, NULL))
- rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED_MSK;
- else
- rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
- RXON_FLG_CHANNEL_MODE_PURE_40_MSK);
-
- /* Note: control channel is opposite of extension channel */
- switch (ht_info->extension_chan_offset) {
- case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
- rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
- break;
- case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
- rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
- break;
- case IEEE80211_HT_PARAM_CHA_SEC_NONE:
- default:
- rxon->flags &= ~RXON_FLG_CHANNEL_MODE_MIXED_MSK;
- break;
+ /* FIXME: if the definition of ht_protection changed, the "translation"
+ * will be needed for rxon->flags
+ */
+ rxon->flags |= cpu_to_le32(ht_info->ht_protection << RXON_FLG_HT_OPERATING_MODE_POS);
+
+ /* Set up channel bandwidth:
+ * 20 MHz only, 20/40 mixed or pure 40 if fat ok */
+ /* clear the HT channel mode before set the mode */
+ rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
+ RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
+ if (iwl_is_fat_tx_allowed(priv, NULL)) {
+ /* pure 40 fat */
+ if (rxon->flags & RXON_FLG_FAT_PROT_MSK)
+ rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
+ else {
+ /* Note: control channel is opposite of extension channel */
+ switch (ht_info->extension_chan_offset) {
+ case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+ rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
+ rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+ rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+ rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_NONE:
+ default:
+ /* channel location only valid if in Mixed mode */
+ IWL_ERR(priv, "invalid extension channel offset\n");
+ break;
+ }
+ }
+ } else {
+ rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
}
- val = ht_info->ht_protection;
-
- rxon->flags |= cpu_to_le32(val << RXON_FLG_HT_OPERATING_MODE_POS);
-
- iwl_set_rxon_chain(priv);
+ if (priv->cfg->ops->hcmd->set_rxon_chain)
+ priv->cfg->ops->hcmd->set_rxon_chain(priv);
IWL_DEBUG_ASSOC(priv, "supported HT rate 0x%X 0x%X 0x%X "
"rxon flags 0x%X operation mode :0x%X "
@@ -901,10 +931,11 @@ static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
* never called for monitor mode. The only way mac80211 informs us about
* monitor mode is through configuring filters (call to configure_filter).
*/
-static bool iwl_is_monitor_mode(struct iwl_priv *priv)
+bool iwl_is_monitor_mode(struct iwl_priv *priv)
{
return !!(priv->staging_rxon.filter_flags & RXON_FILTER_PROMISC_MSK);
}
+EXPORT_SYMBOL(iwl_is_monitor_mode);
/**
* iwl_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
@@ -956,10 +987,10 @@ void iwl_set_rxon_chain(struct iwl_priv *priv)
if (iwl_is_monitor_mode(priv) &&
!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) &&
((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)) {
- rx_chain = 0x07 << RXON_RX_CHAIN_VALID_POS;
- rx_chain |= 0x06 << RXON_RX_CHAIN_FORCE_SEL_POS;
- rx_chain |= 0x07 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
- rx_chain |= 0x01 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
+ rx_chain = ANT_ABC << RXON_RX_CHAIN_VALID_POS;
+ rx_chain |= ANT_BC << RXON_RX_CHAIN_FORCE_SEL_POS;
+ rx_chain |= ANT_ABC << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
+ rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
}
priv->staging_rxon.rx_chain = cpu_to_le16(rx_chain);
@@ -1068,11 +1099,6 @@ void iwl_connection_init_rx_config(struct iwl_priv *priv, int mode)
RXON_FILTER_ACCEPT_GRP_MSK;
break;
- case NL80211_IFTYPE_MONITOR:
- priv->staging_rxon.dev_type = RXON_DEV_TYPE_SNIFFER;
- priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK |
- RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
- break;
default:
IWL_ERR(priv, "Unsupported interface type %d\n", mode);
break;
@@ -1111,16 +1137,18 @@ void iwl_connection_init_rx_config(struct iwl_priv *priv, int mode)
priv->staging_rxon.cck_basic_rates =
(IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
- priv->staging_rxon.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
- RXON_FLG_CHANNEL_MODE_PURE_40_MSK);
+ /* clear both MIX and PURE40 mode flag */
+ priv->staging_rxon.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
+ RXON_FLG_CHANNEL_MODE_PURE_40);
memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
memcpy(priv->staging_rxon.wlap_bssid_addr, priv->mac_addr, ETH_ALEN);
priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff;
priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff;
+ priv->staging_rxon.ofdm_ht_triple_stream_basic_rates = 0xff;
}
EXPORT_SYMBOL(iwl_connection_init_rx_config);
-void iwl_set_rate(struct iwl_priv *priv)
+static void iwl_set_rate(struct iwl_priv *priv)
{
const struct ieee80211_supported_band *hw = NULL;
struct ieee80211_rate *rate;
@@ -1166,7 +1194,6 @@ void iwl_set_rate(struct iwl_priv *priv)
priv->staging_rxon.ofdm_basic_rates =
(IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
}
-EXPORT_SYMBOL(iwl_set_rate);
void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
{
@@ -1230,11 +1257,6 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
"Restarting adapter due to uCode error.\n");
- if (iwl_is_associated(priv)) {
- memcpy(&priv->recovery_rxon, &priv->active_rxon,
- sizeof(priv->recovery_rxon));
- priv->error_recovering = 1;
- }
if (priv->cfg->mod_params->restart_fw)
queue_work(priv->workqueue, &priv->restart);
}
@@ -1298,19 +1320,20 @@ int iwl_setup_mac(struct iwl_priv *priv)
hw->flags = IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_NOISE_DBM |
IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_SUPPORTS_PS;
+ IEEE80211_HW_SPECTRUM_MGMT;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC);
hw->wiphy->custom_regulatory = true;
- hw->wiphy->max_scan_ssids = 1;
+
+ hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
+ /* we create the 802.11 header and a zero-length SSID element */
+ hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2;
/* Default value; 4 EDCA QOS priorities */
hw->queues = 4;
- hw->conf.beacon_int = 100;
hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
@@ -1357,7 +1380,6 @@ int iwl_init_drv(struct iwl_priv *priv)
priv->ibss_beacon = NULL;
spin_lock_init(&priv->lock);
- spin_lock_init(&priv->power_data.lock);
spin_lock_init(&priv->sta_lock);
spin_lock_init(&priv->hcmd_lock);
@@ -1378,7 +1400,9 @@ int iwl_init_drv(struct iwl_priv *priv)
priv->current_ht_config.sm_ps = WLAN_HT_CAP_SM_PS_DISABLED;
/* Choose which receivers/antennas to use */
- iwl_set_rxon_chain(priv);
+ if (priv->cfg->ops->hcmd->set_rxon_chain)
+ priv->cfg->ops->hcmd->set_rxon_chain(priv);
+
iwl_init_scan_params(priv);
iwl_reset_qos(priv);
@@ -1475,11 +1499,273 @@ void iwl_enable_interrupts(struct iwl_priv *priv)
{
IWL_DEBUG_ISR(priv, "Enabling interrupts\n");
set_bit(STATUS_INT_ENABLED, &priv->status);
- iwl_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK);
+ iwl_write32(priv, CSR_INT_MASK, priv->inta_mask);
}
EXPORT_SYMBOL(iwl_enable_interrupts);
-irqreturn_t iwl_isr(int irq, void *data)
+
+#define ICT_COUNT (PAGE_SIZE/sizeof(u32))
+
+/* Free dram table */
+void iwl_free_isr_ict(struct iwl_priv *priv)
+{
+ if (priv->ict_tbl_vir) {
+ pci_free_consistent(priv->pci_dev, (sizeof(u32) * ICT_COUNT) +
+ PAGE_SIZE, priv->ict_tbl_vir,
+ priv->ict_tbl_dma);
+ priv->ict_tbl_vir = NULL;
+ }
+}
+EXPORT_SYMBOL(iwl_free_isr_ict);
+
+
+/* allocate dram shared table it is a PAGE_SIZE aligned
+ * also reset all data related to ICT table interrupt.
+ */
+int iwl_alloc_isr_ict(struct iwl_priv *priv)
+{
+
+ if (priv->cfg->use_isr_legacy)
+ return 0;
+ /* allocate shrared data table */
+ priv->ict_tbl_vir = pci_alloc_consistent(priv->pci_dev, (sizeof(u32) *
+ ICT_COUNT) + PAGE_SIZE,
+ &priv->ict_tbl_dma);
+ if (!priv->ict_tbl_vir)
+ return -ENOMEM;
+
+ /* align table to PAGE_SIZE boundry */
+ priv->aligned_ict_tbl_dma = ALIGN(priv->ict_tbl_dma, PAGE_SIZE);
+
+ IWL_DEBUG_ISR(priv, "ict dma addr %Lx dma aligned %Lx diff %d\n",
+ (unsigned long long)priv->ict_tbl_dma,
+ (unsigned long long)priv->aligned_ict_tbl_dma,
+ (int)(priv->aligned_ict_tbl_dma - priv->ict_tbl_dma));
+
+ priv->ict_tbl = priv->ict_tbl_vir +
+ (priv->aligned_ict_tbl_dma - priv->ict_tbl_dma);
+
+ IWL_DEBUG_ISR(priv, "ict vir addr %p vir aligned %p diff %d\n",
+ priv->ict_tbl, priv->ict_tbl_vir,
+ (int)(priv->aligned_ict_tbl_dma - priv->ict_tbl_dma));
+
+ /* reset table and index to all 0 */
+ memset(priv->ict_tbl_vir,0, (sizeof(u32) * ICT_COUNT) + PAGE_SIZE);
+ priv->ict_index = 0;
+
+ /* add periodic RX interrupt */
+ priv->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
+ return 0;
+}
+EXPORT_SYMBOL(iwl_alloc_isr_ict);
+
+/* Device is going up inform it about using ICT interrupt table,
+ * also we need to tell the driver to start using ICT interrupt.
+ */
+int iwl_reset_ict(struct iwl_priv *priv)
+{
+ u32 val;
+ unsigned long flags;
+
+ if (!priv->ict_tbl_vir)
+ return 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ iwl_disable_interrupts(priv);
+
+ memset(&priv->ict_tbl[0],0, sizeof(u32) * ICT_COUNT);
+
+ val = priv->aligned_ict_tbl_dma >> PAGE_SHIFT;
+
+ val |= CSR_DRAM_INT_TBL_ENABLE;
+ val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
+
+ IWL_DEBUG_ISR(priv, "CSR_DRAM_INT_TBL_REG =0x%X "
+ "aligned dma address %Lx\n",
+ val, (unsigned long long)priv->aligned_ict_tbl_dma);
+
+ iwl_write32(priv, CSR_DRAM_INT_TBL_REG, val);
+ priv->use_ict = true;
+ priv->ict_index = 0;
+ iwl_write32(priv, CSR_INT, priv->inta_mask);
+ iwl_enable_interrupts(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(iwl_reset_ict);
+
+/* Device is going down disable ict interrupt usage */
+void iwl_disable_ict(struct iwl_priv *priv)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->use_ict = false;
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+EXPORT_SYMBOL(iwl_disable_ict);
+
+/* interrupt handler using ict table, with this interrupt driver will
+ * stop using INTA register to get device's interrupt, reading this register
+ * is expensive, device will write interrupts in ICT dram table, increment
+ * index then will fire interrupt to driver, driver will OR all ICT table
+ * entries from current index up to table entry with 0 value. the result is
+ * the interrupt we need to service, driver will set the entries back to 0 and
+ * set index.
+ */
+irqreturn_t iwl_isr_ict(int irq, void *data)
+{
+ struct iwl_priv *priv = data;
+ u32 inta, inta_mask;
+ u32 val = 0;
+
+ if (!priv)
+ return IRQ_NONE;
+
+ /* dram interrupt table not set yet,
+ * use legacy interrupt.
+ */
+ if (!priv->use_ict)
+ return iwl_isr(irq, data);
+
+ spin_lock(&priv->lock);
+
+ /* Disable (but don't clear!) interrupts here to avoid
+ * back-to-back ISRs and sporadic interrupts from our NIC.
+ * If we have something to service, the tasklet will re-enable ints.
+ * If we *don't* have something, we'll re-enable before leaving here.
+ */
+ inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
+ iwl_write32(priv, CSR_INT_MASK, 0x00000000);
+
+
+ /* Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC. */
+ if (!priv->ict_tbl[priv->ict_index]) {
+ IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
+ goto none;
+ }
+
+ /* read all entries that not 0 start with ict_index */
+ while (priv->ict_tbl[priv->ict_index]) {
+
+ val |= priv->ict_tbl[priv->ict_index];
+ IWL_DEBUG_ISR(priv, "ICT index %d value 0x%08X\n",
+ priv->ict_index,
+ priv->ict_tbl[priv->ict_index]);
+ priv->ict_tbl[priv->ict_index] = 0;
+ priv->ict_index = iwl_queue_inc_wrap(priv->ict_index,
+ ICT_COUNT);
+
+ }
+
+ /* We should not get this value, just ignore it. */
+ if (val == 0xffffffff)
+ val = 0;
+
+ inta = (0xff & val) | ((0xff00 & val) << 16);
+ IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
+ inta, inta_mask, val);
+
+ inta &= priv->inta_mask;
+ priv->inta |= inta;
+
+ /* iwl_irq_tasklet() will service interrupts and re-enable them */
+ if (likely(inta))
+ tasklet_schedule(&priv->irq_tasklet);
+ else if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta) {
+ /* Allow interrupt if was disabled by this handler and
+ * no tasklet was schedules, We should not enable interrupt,
+ * tasklet will enable it.
+ */
+ iwl_enable_interrupts(priv);
+ }
+
+ spin_unlock(&priv->lock);
+ return IRQ_HANDLED;
+
+ none:
+ /* re-enable interrupts here since we don't have anything to service.
+ * only Re-enable if disabled by irq.
+ */
+ if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta)
+ iwl_enable_interrupts(priv);
+
+ spin_unlock(&priv->lock);
+ return IRQ_NONE;
+}
+EXPORT_SYMBOL(iwl_isr_ict);
+
+
+static irqreturn_t iwl_isr(int irq, void *data)
+{
+ struct iwl_priv *priv = data;
+ u32 inta, inta_mask;
+#ifdef CONFIG_IWLWIFI_DEBUG
+ u32 inta_fh;
+#endif
+ if (!priv)
+ return IRQ_NONE;
+
+ spin_lock(&priv->lock);
+
+ /* Disable (but don't clear!) interrupts here to avoid
+ * back-to-back ISRs and sporadic interrupts from our NIC.
+ * If we have something to service, the tasklet will re-enable ints.
+ * If we *don't* have something, we'll re-enable before leaving here. */
+ inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
+ iwl_write32(priv, CSR_INT_MASK, 0x00000000);
+
+ /* Discover which interrupts are active/pending */
+ inta = iwl_read32(priv, CSR_INT);
+
+ /* Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC. */
+ if (!inta) {
+ IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
+ goto none;
+ }
+
+ if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
+ /* Hardware disappeared. It might have already raised
+ * an interrupt */
+ IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
+ goto unplugged;
+ }
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ if (priv->debug_level & (IWL_DL_ISR)) {
+ inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
+ IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, "
+ "fh 0x%08x\n", inta, inta_mask, inta_fh);
+ }
+#endif
+
+ priv->inta |= inta;
+ /* iwl_irq_tasklet() will service interrupts and re-enable them */
+ if (likely(inta))
+ tasklet_schedule(&priv->irq_tasklet);
+ else if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta)
+ iwl_enable_interrupts(priv);
+
+ unplugged:
+ spin_unlock(&priv->lock);
+ return IRQ_HANDLED;
+
+ none:
+ /* re-enable interrupts here since we don't have anything to service. */
+ /* only Re-enable if diabled by irq and no schedules tasklet. */
+ if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta)
+ iwl_enable_interrupts(priv);
+
+ spin_unlock(&priv->lock);
+ return IRQ_NONE;
+}
+
+irqreturn_t iwl_isr_legacy(int irq, void *data)
{
struct iwl_priv *priv = data;
u32 inta, inta_mask;
@@ -1536,7 +1822,7 @@ irqreturn_t iwl_isr(int irq, void *data)
spin_unlock(&priv->lock);
return IRQ_NONE;
}
-EXPORT_SYMBOL(iwl_isr);
+EXPORT_SYMBOL(iwl_isr_legacy);
int iwl_send_bt_config(struct iwl_priv *priv)
{
@@ -1580,10 +1866,6 @@ static int iwlcore_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32
IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
- ret = iwl_grab_nic_access(priv);
- if (ret)
- return ret;
-
for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
/* read data comes through single port, auto-incr addr */
/* NOTE: Use the debugless read so we don't flood kernel log
@@ -1599,8 +1881,6 @@ static int iwlcore_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32
}
}
- iwl_release_nic_access(priv);
-
return ret;
}
@@ -1618,10 +1898,6 @@ static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 *image,
IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
- ret = iwl_grab_nic_access(priv);
- if (ret)
- return ret;
-
iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
IWL49_RTC_INST_LOWER_BOUND);
@@ -1642,8 +1918,6 @@ static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 *image,
}
}
- iwl_release_nic_access(priv);
-
if (!errcnt)
IWL_DEBUG_INFO(priv,
"ucode image in INSTRUCTION memory is good\n");
@@ -1752,7 +2026,6 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
u32 data2, line;
u32 desc, time, count, base, data1;
u32 blink1, blink2, ilink1, ilink2;
- int ret;
if (priv->ucode_type == UCODE_INIT)
base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
@@ -1764,12 +2037,6 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
return;
}
- ret = iwl_grab_nic_access(priv);
- if (ret) {
- IWL_WARN(priv, "Can not read from adapter at this time.\n");
- return;
- }
-
count = iwl_read_targ_mem(priv, base);
if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
@@ -1796,7 +2063,6 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2,
ilink1, ilink2);
- iwl_release_nic_access(priv);
}
EXPORT_SYMBOL(iwl_dump_nic_error_log);
@@ -1805,7 +2071,6 @@ EXPORT_SYMBOL(iwl_dump_nic_error_log);
/**
* iwl_print_event_log - Dump error event log to syslog
*
- * NOTE: Must be called with iwl_grab_nic_access() already obtained!
*/
static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
u32 num_events, u32 mode)
@@ -1851,7 +2116,6 @@ static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
void iwl_dump_nic_event_log(struct iwl_priv *priv)
{
- int ret;
u32 base; /* SRAM byte address of event log header */
u32 capacity; /* event log capacity in # entries */
u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
@@ -1869,12 +2133,6 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv)
return;
}
- ret = iwl_grab_nic_access(priv);
- if (ret) {
- IWL_WARN(priv, "Can not read from adapter at this time.\n");
- return;
- }
-
/* event log header */
capacity = iwl_read_targ_mem(priv, base);
mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
@@ -1886,7 +2144,6 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv)
/* bail out if nothing in log */
if (size == 0) {
IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
- iwl_release_nic_access(priv);
return;
}
@@ -1901,7 +2158,6 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv)
/* (then/else) start at top of log */
iwl_print_event_log(priv, 0, next_entry, mode);
- iwl_release_nic_access(priv);
}
EXPORT_SYMBOL(iwl_dump_nic_event_log);
@@ -1954,161 +2210,680 @@ int iwl_send_card_state(struct iwl_priv *priv, u32 flags, u8 meta_flag)
}
EXPORT_SYMBOL(iwl_send_card_state);
-void iwl_radio_kill_sw_disable_radio(struct iwl_priv *priv)
+void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
{
- unsigned long flags;
+#ifdef CONFIG_IWLWIFI_DEBUG
+ struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+ struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
+ IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
+ sleep->pm_sleep_mode, sleep->pm_wakeup_src);
+#endif
+}
+EXPORT_SYMBOL(iwl_rx_pm_sleep_notif);
- if (test_bit(STATUS_RF_KILL_SW, &priv->status))
- return;
+void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+ IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
+ "notification for %s:\n",
+ le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd));
+ iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, le32_to_cpu(pkt->len));
+}
+EXPORT_SYMBOL(iwl_rx_pm_debug_statistics_notif);
- IWL_DEBUG_RF_KILL(priv, "Manual SW RF KILL set to: RADIO OFF\n");
+void iwl_rx_reply_error(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
- iwl_scan_cancel(priv);
- /* FIXME: This is a workaround for AP */
- if (priv->iw_mode != NL80211_IFTYPE_AP) {
- spin_lock_irqsave(&priv->lock, flags);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
- CSR_UCODE_SW_BIT_RFKILL);
- spin_unlock_irqrestore(&priv->lock, flags);
- /* call the host command only if no hw rf-kill set */
- if (!test_bit(STATUS_RF_KILL_HW, &priv->status) &&
- iwl_is_ready(priv))
- iwl_send_card_state(priv,
- CARD_STATE_CMD_DISABLE, 0);
- set_bit(STATUS_RF_KILL_SW, &priv->status);
- /* make sure mac80211 stop sending Tx frame */
- if (priv->mac80211_registered)
- ieee80211_stop_queues(priv->hw);
- }
+ IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
+ "seq 0x%04X ser 0x%08X\n",
+ le32_to_cpu(pkt->u.err_resp.error_type),
+ get_cmd_string(pkt->u.err_resp.cmd_id),
+ pkt->u.err_resp.cmd_id,
+ le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
+ le32_to_cpu(pkt->u.err_resp.error_info));
}
-EXPORT_SYMBOL(iwl_radio_kill_sw_disable_radio);
+EXPORT_SYMBOL(iwl_rx_reply_error);
+
+void iwl_clear_isr_stats(struct iwl_priv *priv)
+{
+ memset(&priv->isr_stats, 0, sizeof(priv->isr_stats));
+}
+EXPORT_SYMBOL(iwl_clear_isr_stats);
-int iwl_radio_kill_sw_enable_radio(struct iwl_priv *priv)
+int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
{
+ struct iwl_priv *priv = hw->priv;
unsigned long flags;
+ int q;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ if (!iwl_is_ready_rf(priv)) {
+ IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
+ return -EIO;
+ }
- if (!test_bit(STATUS_RF_KILL_SW, &priv->status))
+ if (queue >= AC_NUM) {
+ IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
return 0;
+ }
- IWL_DEBUG_RF_KILL(priv, "Manual SW RF KILL set to: RADIO ON\n");
+ q = AC_NUM - 1 - queue;
spin_lock_irqsave(&priv->lock, flags);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- /* If the driver is up it will receive CARD_STATE_NOTIFICATION
- * notification where it will clear SW rfkill status.
- * Setting it here would break the handler. Only if the
- * interface is down we can set here since we don't
- * receive any further notification.
- */
- if (!priv->is_open)
- clear_bit(STATUS_RF_KILL_SW, &priv->status);
- spin_unlock_irqrestore(&priv->lock, flags);
+ priv->qos_data.def_qos_parm.ac[q].cw_min = cpu_to_le16(params->cw_min);
+ priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max);
+ priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
+ priv->qos_data.def_qos_parm.ac[q].edca_txop =
+ cpu_to_le16((params->txop * 32));
- /* wake up ucode */
- msleep(10);
+ priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
+ priv->qos_data.qos_active = 1;
+
+ if (priv->iw_mode == NL80211_IFTYPE_AP)
+ iwl_activate_qos(priv, 1);
+ else if (priv->assoc_id && iwl_is_associated(priv))
+ iwl_activate_qos(priv, 0);
- spin_lock_irqsave(&priv->lock, flags);
- iwl_read32(priv, CSR_UCODE_DRV_GP1);
- if (!iwl_grab_nic_access(priv))
- iwl_release_nic_access(priv);
spin_unlock_irqrestore(&priv->lock, flags);
- if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
- IWL_DEBUG_RF_KILL(priv, "Can not turn radio back on - "
- "disabled by HW switch\n");
- return 0;
- }
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+ return 0;
+}
+EXPORT_SYMBOL(iwl_mac_conf_tx);
+
+static void iwl_ht_conf(struct iwl_priv *priv,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct ieee80211_sta_ht_cap *ht_conf;
+ struct iwl_ht_info *iwl_conf = &priv->current_ht_config;
+ struct ieee80211_sta *sta;
+
+ IWL_DEBUG_MAC80211(priv, "enter: \n");
+
+ if (!iwl_conf->is_ht)
+ return;
+
- /* when driver is up while rfkill is on, it wont receive
- * any CARD_STATE_NOTIFICATION notifications so we have to
- * restart it in here
+ /*
+ * It is totally wrong to base global information on something
+ * that is valid only when associated, alas, this driver works
+ * that way and I don't know how to fix it.
*/
- if (priv->is_open && !test_bit(STATUS_ALIVE, &priv->status)) {
- clear_bit(STATUS_RF_KILL_SW, &priv->status);
- if (!iwl_is_rfkill(priv))
- queue_work(priv->workqueue, &priv->up);
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(priv->hw, priv->bssid);
+ if (!sta) {
+ rcu_read_unlock();
+ return;
}
+ ht_conf = &sta->ht_cap;
+
+ if (ht_conf->cap & IEEE80211_HT_CAP_SGI_20)
+ iwl_conf->sgf |= HT_SHORT_GI_20MHZ;
+ if (ht_conf->cap & IEEE80211_HT_CAP_SGI_40)
+ iwl_conf->sgf |= HT_SHORT_GI_40MHZ;
- /* If the driver is already loaded, it will receive
- * CARD_STATE_NOTIFICATION notifications and the handler will
- * call restart to reload the driver.
+ iwl_conf->is_green_field = !!(ht_conf->cap & IEEE80211_HT_CAP_GRN_FLD);
+ iwl_conf->max_amsdu_size =
+ !!(ht_conf->cap & IEEE80211_HT_CAP_MAX_AMSDU);
+
+ iwl_conf->supported_chan_width =
+ !!(ht_conf->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40);
+
+ /*
+ * XXX: The HT configuration needs to be moved into iwl_mac_config()
+ * to be done there correctly.
*/
- return 1;
+
+ iwl_conf->extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
+ if (conf_is_ht40_minus(&priv->hw->conf))
+ iwl_conf->extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ else if (conf_is_ht40_plus(&priv->hw->conf))
+ iwl_conf->extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+
+ /* If no above or below channel supplied disable FAT channel */
+ if (iwl_conf->extension_chan_offset != IEEE80211_HT_PARAM_CHA_SEC_ABOVE &&
+ iwl_conf->extension_chan_offset != IEEE80211_HT_PARAM_CHA_SEC_BELOW)
+ iwl_conf->supported_chan_width = 0;
+
+ iwl_conf->sm_ps = (u8)((ht_conf->cap & IEEE80211_HT_CAP_SM_PS) >> 2);
+
+ memcpy(&iwl_conf->mcs, &ht_conf->mcs, 16);
+
+ iwl_conf->tx_chan_width = iwl_conf->supported_chan_width != 0;
+ iwl_conf->ht_protection =
+ bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
+ iwl_conf->non_GF_STA_present =
+ !!(bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+
+ rcu_read_unlock();
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
}
-EXPORT_SYMBOL(iwl_radio_kill_sw_enable_radio);
-void iwl_bg_rf_kill(struct work_struct *work)
+#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
+void iwl_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changes)
{
- struct iwl_priv *priv = container_of(work, struct iwl_priv, rf_kill);
+ struct iwl_priv *priv = hw->priv;
+ int ret;
- wake_up_interruptible(&priv->wait_command_queue);
+ IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ if (!iwl_is_alive(priv))
return;
mutex_lock(&priv->mutex);
- if (!iwl_is_rfkill(priv)) {
- IWL_DEBUG_RF_KILL(priv,
- "HW and/or SW RF Kill no longer active, restarting "
- "device\n");
- if (!test_bit(STATUS_EXIT_PENDING, &priv->status) &&
- test_bit(STATUS_ALIVE, &priv->status))
- queue_work(priv->workqueue, &priv->restart);
- } else {
- /* make sure mac80211 stop sending Tx frame */
- if (priv->mac80211_registered)
- ieee80211_stop_queues(priv->hw);
+ if (changes & BSS_CHANGED_BEACON &&
+ priv->iw_mode == NL80211_IFTYPE_AP) {
+ dev_kfree_skb(priv->ibss_beacon);
+ priv->ibss_beacon = ieee80211_beacon_get(hw, vif);
+ }
+
+ if ((changes & BSS_CHANGED_BSSID) && !iwl_is_rfkill(priv)) {
+ /* If there is currently a HW scan going on in the background
+ * then we need to cancel it else the RXON below will fail. */
+ if (iwl_scan_cancel_timeout(priv, 100)) {
+ IWL_WARN(priv, "Aborted scan still in progress "
+ "after 100ms\n");
+ IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n");
+ mutex_unlock(&priv->mutex);
+ return;
+ }
+ memcpy(priv->staging_rxon.bssid_addr,
+ bss_conf->bssid, ETH_ALEN);
+
+ /* TODO: Audit driver for usage of these members and see
+ * if mac80211 deprecates them (priv->bssid looks like it
+ * shouldn't be there, but I haven't scanned the IBSS code
+ * to verify) - jpk */
+ memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
+
+ if (priv->iw_mode == NL80211_IFTYPE_AP)
+ iwlcore_config_ap(priv);
+ else {
+ int rc = iwlcore_commit_rxon(priv);
+ if ((priv->iw_mode == NL80211_IFTYPE_STATION) && rc)
+ iwl_rxon_add_station(
+ priv, priv->active_rxon.bssid_addr, 1);
+ }
+ } else if (!iwl_is_rfkill(priv)) {
+ iwl_scan_cancel_timeout(priv, 100);
+ priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ iwlcore_commit_rxon(priv);
+ }
+
+ if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
+ changes & BSS_CHANGED_BEACON) {
+ struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
+
+ if (beacon)
+ iwl_mac_beacon_update(hw, beacon);
+ }
- if (!test_bit(STATUS_RF_KILL_HW, &priv->status))
- IWL_DEBUG_RF_KILL(priv, "Can not turn radio back on - "
- "disabled by SW switch\n");
+ mutex_unlock(&priv->mutex);
+
+ if (changes & BSS_CHANGED_ERP_PREAMBLE) {
+ IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
+ bss_conf->use_short_preamble);
+ if (bss_conf->use_short_preamble)
+ priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
else
- IWL_WARN(priv, "Radio Frequency Kill Switch is On:\n"
- "Kill switch must be turned off for "
- "wireless networking to work.\n");
+ priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+ }
+
+ if (changes & BSS_CHANGED_ERP_CTS_PROT) {
+ IWL_DEBUG_MAC80211(priv, "ERP_CTS %d\n", bss_conf->use_cts_prot);
+ if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
+ priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK;
+ else
+ priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
+ }
+
+ if (changes & BSS_CHANGED_HT) {
+ iwl_ht_conf(priv, bss_conf);
+
+ if (priv->cfg->ops->hcmd->set_rxon_chain)
+ priv->cfg->ops->hcmd->set_rxon_chain(priv);
+ }
+
+ if (changes & BSS_CHANGED_ASSOC) {
+ IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
+ /* This should never happen as this function should
+ * never be called from interrupt context. */
+ if (WARN_ON_ONCE(in_interrupt()))
+ return;
+ if (bss_conf->assoc) {
+ priv->assoc_id = bss_conf->aid;
+ priv->beacon_int = bss_conf->beacon_int;
+ priv->power_data.dtim_period = bss_conf->dtim_period;
+ priv->timestamp = bss_conf->timestamp;
+ priv->assoc_capability = bss_conf->assoc_capability;
+
+ /* we have just associated, don't start scan too early
+ * leave time for EAPOL exchange to complete
+ */
+ priv->next_scan_jiffies = jiffies +
+ IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
+ mutex_lock(&priv->mutex);
+ priv->cfg->ops->lib->post_associate(priv);
+ mutex_unlock(&priv->mutex);
+ } else {
+ priv->assoc_id = 0;
+ IWL_DEBUG_MAC80211(priv, "DISASSOC %d\n", bss_conf->assoc);
+ }
+ } else if (changes && iwl_is_associated(priv) && priv->assoc_id) {
+ IWL_DEBUG_MAC80211(priv, "Associated Changes %d\n", changes);
+ ret = iwl_send_rxon_assoc(priv);
+ if (!ret)
+ /* Sync active_rxon with latest change. */
+ memcpy((void *)&priv->active_rxon,
+ &priv->staging_rxon,
+ sizeof(struct iwl_rxon_cmd));
+ }
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+EXPORT_SYMBOL(iwl_bss_info_changed);
+
+int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct iwl_priv *priv = hw->priv;
+ unsigned long flags;
+ __le64 timestamp;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ if (!iwl_is_ready_rf(priv)) {
+ IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
+ return -EIO;
+ }
+
+ if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
+ IWL_DEBUG_MAC80211(priv, "leave - not IBSS\n");
+ return -EIO;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (priv->ibss_beacon)
+ dev_kfree_skb(priv->ibss_beacon);
+
+ priv->ibss_beacon = skb;
+
+ priv->assoc_id = 0;
+ timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
+ priv->timestamp = le64_to_cpu(timestamp);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ iwl_reset_qos(priv);
+
+ priv->cfg->ops->lib->post_associate(priv);
+
+
+ return 0;
+}
+EXPORT_SYMBOL(iwl_mac_beacon_update);
+
+int iwl_set_mode(struct iwl_priv *priv, int mode)
+{
+ if (mode == NL80211_IFTYPE_ADHOC) {
+ const struct iwl_channel_info *ch_info;
+
+ ch_info = iwl_get_channel_info(priv,
+ priv->band,
+ le16_to_cpu(priv->staging_rxon.channel));
+
+ if (!ch_info || !is_channel_ibss(ch_info)) {
+ IWL_ERR(priv, "channel %d not IBSS channel\n",
+ le16_to_cpu(priv->staging_rxon.channel));
+ return -EINVAL;
+ }
+ }
+
+ iwl_connection_init_rx_config(priv, mode);
+
+ if (priv->cfg->ops->hcmd->set_rxon_chain)
+ priv->cfg->ops->hcmd->set_rxon_chain(priv);
+
+ memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
+
+ iwl_clear_stations_table(priv);
+
+ /* dont commit rxon if rf-kill is on*/
+ if (!iwl_is_ready_rf(priv))
+ return -EAGAIN;
+
+ iwlcore_commit_rxon(priv);
+
+ return 0;
+}
+EXPORT_SYMBOL(iwl_set_mode);
+
+int iwl_mac_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_if_init_conf *conf)
+{
+ struct iwl_priv *priv = hw->priv;
+ unsigned long flags;
+
+ IWL_DEBUG_MAC80211(priv, "enter: type %d\n", conf->type);
+
+ if (priv->vif) {
+ IWL_DEBUG_MAC80211(priv, "leave - vif != NULL\n");
+ return -EOPNOTSUPP;
}
+
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->vif = conf->vif;
+ priv->iw_mode = conf->type;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ mutex_lock(&priv->mutex);
+
+ if (conf->mac_addr) {
+ IWL_DEBUG_MAC80211(priv, "Set %pM\n", conf->mac_addr);
+ memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN);
+ }
+
+ if (iwl_set_mode(priv, conf->type) == -EAGAIN)
+ /* we are not ready, will run again when ready */
+ set_bit(STATUS_MODE_PENDING, &priv->status);
+
mutex_unlock(&priv->mutex);
- iwl_rfkill_set_hw_state(priv);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+ return 0;
}
-EXPORT_SYMBOL(iwl_bg_rf_kill);
+EXPORT_SYMBOL(iwl_mac_add_interface);
-void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+void iwl_mac_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_if_init_conf *conf)
{
-#ifdef CONFIG_IWLWIFI_DEBUG
- struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
- struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
- IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
- sleep->pm_sleep_mode, sleep->pm_wakeup_src);
-#endif
+ struct iwl_priv *priv = hw->priv;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ mutex_lock(&priv->mutex);
+
+ if (iwl_is_ready_rf(priv)) {
+ iwl_scan_cancel_timeout(priv, 100);
+ priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ iwlcore_commit_rxon(priv);
+ }
+ if (priv->vif == conf->vif) {
+ priv->vif = NULL;
+ memset(priv->bssid, 0, ETH_ALEN);
+ }
+ mutex_unlock(&priv->mutex);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
}
-EXPORT_SYMBOL(iwl_rx_pm_sleep_notif);
+EXPORT_SYMBOL(iwl_mac_remove_interface);
-void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+/**
+ * iwl_mac_config - mac80211 config callback
+ *
+ * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to
+ * be set inappropriately and the driver currently sets the hardware up to
+ * use it whenever needed.
+ */
+int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
{
- struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
- IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
- "notification for %s:\n",
- le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd));
- iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, le32_to_cpu(pkt->len));
+ struct iwl_priv *priv = hw->priv;
+ const struct iwl_channel_info *ch_info;
+ struct ieee80211_conf *conf = &hw->conf;
+ unsigned long flags = 0;
+ int ret = 0;
+ u16 ch;
+ int scan_active = 0;
+
+ mutex_lock(&priv->mutex);
+
+ IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
+ conf->channel->hw_value, changed);
+
+ if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
+ test_bit(STATUS_SCANNING, &priv->status))) {
+ scan_active = 1;
+ IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
+ }
+
+
+ /* during scanning mac80211 will delay channel setting until
+ * scan finish with changed = 0
+ */
+ if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
+ if (scan_active)
+ goto set_ch_out;
+
+ ch = ieee80211_frequency_to_channel(conf->channel->center_freq);
+ ch_info = iwl_get_channel_info(priv, conf->channel->band, ch);
+ if (!is_channel_valid(ch_info)) {
+ IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
+ ret = -EINVAL;
+ goto set_ch_out;
+ }
+
+ if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
+ !is_channel_ibss(ch_info)) {
+ IWL_ERR(priv, "channel %d in band %d not "
+ "IBSS channel\n",
+ conf->channel->hw_value, conf->channel->band);
+ ret = -EINVAL;
+ goto set_ch_out;
+ }
+
+ priv->current_ht_config.is_ht = conf_is_ht(conf);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+
+ /* if we are switching from ht to 2.4 clear flags
+ * from any ht related info since 2.4 does not
+ * support ht */
+ if ((le16_to_cpu(priv->staging_rxon.channel) != ch))
+ priv->staging_rxon.flags = 0;
+
+ iwl_set_rxon_channel(priv, conf->channel);
+
+ iwl_set_flags_for_band(priv, conf->channel->band);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ set_ch_out:
+ /* The list of supported rates and rate mask can be different
+ * for each band; since the band may have changed, reset
+ * the rate mask to what mac80211 lists */
+ iwl_set_rate(priv);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_PS &&
+ priv->iw_mode == NL80211_IFTYPE_STATION) {
+ priv->power_data.power_disabled =
+ !(conf->flags & IEEE80211_CONF_PS);
+ ret = iwl_power_update_mode(priv, 0);
+ if (ret)
+ IWL_DEBUG_MAC80211(priv, "Error setting power level\n");
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
+ priv->tx_power_user_lmt, conf->power_level);
+
+ iwl_set_tx_power(priv, conf->power_level, false);
+ }
+
+ /* call to ensure that 4965 rx_chain is set properly in monitor mode */
+ if (priv->cfg->ops->hcmd->set_rxon_chain)
+ priv->cfg->ops->hcmd->set_rxon_chain(priv);
+
+ if (!iwl_is_ready(priv)) {
+ IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
+ goto out;
+ }
+
+ if (scan_active)
+ goto out;
+
+ if (memcmp(&priv->active_rxon,
+ &priv->staging_rxon, sizeof(priv->staging_rxon)))
+ iwlcore_commit_rxon(priv);
+ else
+ IWL_DEBUG_INFO(priv, "Not re-sending same RXON configuration.\n");
+
+
+out:
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+ mutex_unlock(&priv->mutex);
+ return ret;
}
-EXPORT_SYMBOL(iwl_rx_pm_debug_statistics_notif);
+EXPORT_SYMBOL(iwl_mac_config);
-void iwl_rx_reply_error(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+int iwl_mac_get_tx_stats(struct ieee80211_hw *hw,
+ struct ieee80211_tx_queue_stats *stats)
{
- struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+ struct iwl_priv *priv = hw->priv;
+ int i, avail;
+ struct iwl_tx_queue *txq;
+ struct iwl_queue *q;
+ unsigned long flags;
- IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
- "seq 0x%04X ser 0x%08X\n",
- le32_to_cpu(pkt->u.err_resp.error_type),
- get_cmd_string(pkt->u.err_resp.cmd_id),
- pkt->u.err_resp.cmd_id,
- le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
- le32_to_cpu(pkt->u.err_resp.error_info));
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ if (!iwl_is_ready_rf(priv)) {
+ IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
+ return -EIO;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ for (i = 0; i < AC_NUM; i++) {
+ txq = &priv->txq[i];
+ q = &txq->q;
+ avail = iwl_queue_space(q);
+
+ stats[i].len = q->n_window - avail;
+ stats[i].limit = q->n_window - q->high_mark;
+ stats[i].count = q->n_window;
+
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ return 0;
}
-EXPORT_SYMBOL(iwl_rx_reply_error);
+EXPORT_SYMBOL(iwl_mac_get_tx_stats);
+
+void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
+{
+ struct iwl_priv *priv = hw->priv;
+ unsigned long flags;
+
+ mutex_lock(&priv->mutex);
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ spin_lock_irqsave(&priv->lock, flags);
+ memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_info));
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ iwl_reset_qos(priv);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->assoc_id = 0;
+ priv->assoc_capability = 0;
+ priv->assoc_station_added = 0;
+
+ /* new association get rid of ibss beacon skb */
+ if (priv->ibss_beacon)
+ dev_kfree_skb(priv->ibss_beacon);
+
+ priv->ibss_beacon = NULL;
+
+ priv->beacon_int = priv->vif->bss_conf.beacon_int;
+ priv->timestamp = 0;
+ if ((priv->iw_mode == NL80211_IFTYPE_STATION))
+ priv->beacon_int = 0;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (!iwl_is_ready_rf(priv)) {
+ IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
+ mutex_unlock(&priv->mutex);
+ return;
+ }
+
+ /* we are restarting association process
+ * clear RXON_FILTER_ASSOC_MSK bit
+ */
+ if (priv->iw_mode != NL80211_IFTYPE_AP) {
+ iwl_scan_cancel_timeout(priv, 100);
+ priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ iwlcore_commit_rxon(priv);
+ }
+
+ if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
+ IWL_DEBUG_MAC80211(priv, "leave - not in IBSS\n");
+ mutex_unlock(&priv->mutex);
+ return;
+ }
+
+ iwl_set_rate(priv);
+
+ mutex_unlock(&priv->mutex);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+EXPORT_SYMBOL(iwl_mac_reset_tsf);
+
+#ifdef CONFIG_PM
+
+int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct iwl_priv *priv = pci_get_drvdata(pdev);
+
+ /*
+ * This function is called when system goes into suspend state
+ * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
+ * first but since iwl_mac_stop() has no knowledge of who the caller is,
+ * it will not call apm_ops.stop() to stop the DMA operation.
+ * Calling apm_ops.stop here to make sure we stop the DMA.
+ */
+ priv->cfg->ops->lib->apm_ops.stop(priv);
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+EXPORT_SYMBOL(iwl_pci_suspend);
+
+int iwl_pci_resume(struct pci_dev *pdev)
+{
+ struct iwl_priv *priv = pci_get_drvdata(pdev);
+ int ret;
+
+ pci_set_power_state(pdev, PCI_D0);
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+ pci_restore_state(pdev);
+ iwl_enable_interrupts(priv);
+
+ return 0;
+}
+EXPORT_SYMBOL(iwl_pci_resume);
+#endif /* CONFIG_PM */
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index a8eac8c3c1f..dabf663e36e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -85,7 +85,10 @@ struct iwl_cmd;
struct iwl_hcmd_ops {
int (*rxon_assoc)(struct iwl_priv *priv);
+ int (*commit_rxon)(struct iwl_priv *priv);
+ void (*set_rxon_chain)(struct iwl_priv *priv);
};
+
struct iwl_hcmd_utils_ops {
u16 (*get_hcmd_size)(u8 cmd_id, u16 len);
u16 (*build_addsta_hcmd)(const struct iwl_addsta_cmd *cmd, u8 *data);
@@ -100,6 +103,19 @@ struct iwl_hcmd_utils_ops {
struct iwl_rx_phy_res *rx_resp);
};
+struct iwl_apm_ops {
+ int (*init)(struct iwl_priv *priv);
+ int (*reset)(struct iwl_priv *priv);
+ void (*stop)(struct iwl_priv *priv);
+ void (*config)(struct iwl_priv *priv);
+ int (*set_pwr_src)(struct iwl_priv *priv, enum iwl_pwr_src src);
+};
+
+struct iwl_temp_ops {
+ void (*temperature)(struct iwl_priv *priv);
+ void (*set_ct_kill)(struct iwl_priv *priv);
+};
+
struct iwl_lib_ops {
/* set hw dependent parameters */
int (*set_hw_params)(struct iwl_priv *priv);
@@ -137,20 +153,21 @@ struct iwl_lib_ops {
int (*is_valid_rtc_data_addr)(u32 addr);
/* 1st ucode load */
int (*load_ucode)(struct iwl_priv *priv);
- /* power management */
- struct {
- int (*init)(struct iwl_priv *priv);
- int (*reset)(struct iwl_priv *priv);
- void (*stop)(struct iwl_priv *priv);
- void (*config)(struct iwl_priv *priv);
- int (*set_pwr_src)(struct iwl_priv *priv, enum iwl_pwr_src src);
- } apm_ops;
+ /* power management */
+ struct iwl_apm_ops apm_ops;
+
/* power */
int (*send_tx_power) (struct iwl_priv *priv);
void (*update_chain_flags)(struct iwl_priv *priv);
- void (*temperature) (struct iwl_priv *priv);
+ void (*post_associate) (struct iwl_priv *priv);
+ void (*config_ap) (struct iwl_priv *priv);
+ irqreturn_t (*isr) (int irq, void *data);
+
/* eeprom operations (as defined in iwl-eeprom.h) */
struct iwl_eeprom_ops eeprom_ops;
+
+ /* temperature */
+ struct iwl_temp_ops temp_ops;
};
struct iwl_ops {
@@ -160,13 +177,12 @@ struct iwl_ops {
};
struct iwl_mod_params {
- int disable; /* def: 0 = enable radio */
int sw_crypto; /* def: 0 = using hardware encryption */
u32 debug; /* def: 0 = minimal debug log messages */
int disable_hw_scan; /* def: 0 = use h/w scan */
int num_of_queues; /* def: HW dependent */
int num_of_ampdu_queues;/* def: HW dependent */
- int disable_11n; /* def: 0 = disable 11n capabilities */
+ int disable_11n; /* def: 0 = 11n capabilities enabled */
int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
int antenna; /* def: 0 = both antennas (use diversity) */
int restart_fw; /* def: 1 = restart firmware */
@@ -214,6 +230,7 @@ struct iwl_cfg {
u8 valid_tx_ant;
u8 valid_rx_ant;
bool need_pll_cfg;
+ bool use_isr_legacy;
};
/***************************
@@ -225,6 +242,8 @@ struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
void iwl_hw_detect(struct iwl_priv *priv);
void iwl_reset_qos(struct iwl_priv *priv);
void iwl_activate_qos(struct iwl_priv *priv, u8 force);
+int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
+ const struct ieee80211_tx_queue_params *params);
void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt);
int iwl_check_rxon_cmd(struct iwl_priv *priv);
int iwl_full_rxon_required(struct iwl_priv *priv);
@@ -249,6 +268,24 @@ int iwl_setup_mac(struct iwl_priv *priv);
int iwl_set_hw_params(struct iwl_priv *priv);
int iwl_init_drv(struct iwl_priv *priv);
void iwl_uninit_drv(struct iwl_priv *priv);
+bool iwl_is_monitor_mode(struct iwl_priv *priv);
+void iwl_post_associate(struct iwl_priv *priv);
+void iwl_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changes);
+int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb);
+int iwl_commit_rxon(struct iwl_priv *priv);
+int iwl_set_mode(struct iwl_priv *priv, int mode);
+int iwl_mac_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_if_init_conf *conf);
+void iwl_mac_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_if_init_conf *conf);
+int iwl_mac_config(struct ieee80211_hw *hw, u32 changed);
+void iwl_config_ap(struct iwl_priv *priv);
+int iwl_mac_get_tx_stats(struct ieee80211_hw *hw,
+ struct ieee80211_tx_queue_stats *stats);
+void iwl_mac_reset_tsf(struct ieee80211_hw *hw);
/*****************************************************
* RX handlers.
@@ -271,10 +308,11 @@ int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
struct iwl_rx_queue *q);
void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
void iwl_rx_replenish(struct iwl_priv *priv);
+void iwl_rx_replenish_now(struct iwl_priv *priv);
int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
int iwl_rx_queue_restock(struct iwl_priv *priv);
int iwl_rx_queue_space(const struct iwl_rx_queue *q);
-void iwl_rx_allocate(struct iwl_priv *priv);
+void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority);
void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
/* Handlers */
@@ -310,14 +348,6 @@ int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id);
****************************************************/
int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
-/*****************************************************
- * RF -Kill - here and not in iwl-rfkill.h to be available when
- * RF-kill subsystem is not compiled.
- ****************************************************/
-void iwl_bg_rf_kill(struct work_struct *work);
-void iwl_radio_kill_sw_disable_radio(struct iwl_priv *priv);
-int iwl_radio_kill_sw_enable_radio(struct iwl_priv *priv);
-
/*******************************************************************************
* Rate
******************************************************************************/
@@ -328,8 +358,6 @@ int iwl_hwrate_to_plcp_idx(u32 rate_n_flags);
u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv);
-void iwl_set_rate(struct iwl_priv *priv);
-
u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx);
static inline u32 iwl_ant_idx_to_flags(u8 ant_idx)
@@ -358,8 +386,8 @@ int iwl_scan_cancel(struct iwl_priv *priv);
int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
int iwl_scan_initiate(struct iwl_priv *priv);
int iwl_mac_hw_scan(struct ieee80211_hw *hw, struct cfg80211_scan_request *req);
-u16 iwl_fill_probe_req(struct iwl_priv *priv, enum ieee80211_band band,
- struct ieee80211_mgmt *frame, int left);
+u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
+ const u8 *ie, int ie_len, int left);
void iwl_setup_rx_scan_handlers(struct iwl_priv *priv);
u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
enum ieee80211_band band,
@@ -423,7 +451,13 @@ int iwl_send_card_state(struct iwl_priv *priv, u32 flags,
*****************************************************/
void iwl_disable_interrupts(struct iwl_priv *priv);
void iwl_enable_interrupts(struct iwl_priv *priv);
-irqreturn_t iwl_isr(int irq, void *data);
+irqreturn_t iwl_isr_legacy(int irq, void *data);
+int iwl_reset_ict(struct iwl_priv *priv);
+void iwl_disable_ict(struct iwl_priv *priv);
+int iwl_alloc_isr_ict(struct iwl_priv *priv);
+void iwl_free_isr_ict(struct iwl_priv *priv);
+irqreturn_t iwl_isr_ict(int irq, void *data);
+
static inline u16 iwl_pcie_link_ctl(struct iwl_priv *priv)
{
int pos;
@@ -432,12 +466,17 @@ static inline u16 iwl_pcie_link_ctl(struct iwl_priv *priv)
pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
return pci_lnk_ctl;
}
+#ifdef CONFIG_PM
+int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state);
+int iwl_pci_resume(struct pci_dev *pdev);
+#endif /* CONFIG_PM */
/*****************************************************
* Error Handling Debugging
******************************************************/
void iwl_dump_nic_error_log(struct iwl_priv *priv);
void iwl_dump_nic_event_log(struct iwl_priv *priv);
+void iwl_clear_isr_stats(struct iwl_priv *priv);
/*****************************************************
* GEOS
@@ -451,14 +490,12 @@ void iwlcore_free_geos(struct iwl_priv *priv);
#define STATUS_HCMD_SYNC_ACTIVE 1 /* sync host command in progress */
#define STATUS_INT_ENABLED 2
#define STATUS_RF_KILL_HW 3
-#define STATUS_RF_KILL_SW 4
#define STATUS_INIT 5
#define STATUS_ALIVE 6
#define STATUS_READY 7
#define STATUS_TEMPERATURE 8
#define STATUS_GEO_CONFIGURED 9
#define STATUS_EXIT_PENDING 10
-#define STATUS_IN_SUSPEND 11
#define STATUS_STATISTICS 12
#define STATUS_SCANNING 13
#define STATUS_SCAN_ABORTING 14
@@ -487,11 +524,6 @@ static inline int iwl_is_init(struct iwl_priv *priv)
return test_bit(STATUS_INIT, &priv->status);
}
-static inline int iwl_is_rfkill_sw(struct iwl_priv *priv)
-{
- return test_bit(STATUS_RF_KILL_SW, &priv->status);
-}
-
static inline int iwl_is_rfkill_hw(struct iwl_priv *priv)
{
return test_bit(STATUS_RF_KILL_HW, &priv->status);
@@ -499,7 +531,7 @@ static inline int iwl_is_rfkill_hw(struct iwl_priv *priv)
static inline int iwl_is_rfkill(struct iwl_priv *priv)
{
- return iwl_is_rfkill_hw(priv) || iwl_is_rfkill_sw(priv);
+ return iwl_is_rfkill_hw(priv);
}
static inline int iwl_is_ready_rf(struct iwl_priv *priv)
@@ -528,7 +560,14 @@ static inline int iwl_send_rxon_assoc(struct iwl_priv *priv)
{
return priv->cfg->ops->hcmd->rxon_assoc(priv);
}
-
+static inline int iwlcore_commit_rxon(struct iwl_priv *priv)
+{
+ return priv->cfg->ops->hcmd->commit_rxon(priv);
+}
+static inline void iwlcore_config_ap(struct iwl_priv *priv)
+{
+ priv->cfg->ops->lib->config_ap(priv);
+}
static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
struct iwl_priv *priv, enum ieee80211_band band)
{
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 6e983149b83..f03dae1b2f3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -89,6 +89,7 @@
/* EEPROM reads */
#define CSR_EEPROM_REG (CSR_BASE+0x02c)
#define CSR_EEPROM_GP (CSR_BASE+0x030)
+#define CSR_OTP_GP_REG (CSR_BASE+0x034)
#define CSR_GIO_REG (CSR_BASE+0x03C)
#define CSR_GP_UCODE (CSR_BASE+0x044)
#define CSR_UCODE_DRV_GP1 (CSR_BASE+0x054)
@@ -96,8 +97,10 @@
#define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c)
#define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060)
#define CSR_LED_REG (CSR_BASE+0x094)
+#define CSR_DRAM_INT_TBL_REG (CSR_BASE+0x0A0)
#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100)
+#define CSR_INT_PERIODIC_REG (CSR_BASE+0x005)
/* Analog phase-lock-loop configuration */
#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c)
/*
@@ -123,16 +126,18 @@
#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000)
#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
-#define CSR_HW_IF_CONFIG_REG_BIT_PCI_OWN_SEM (0x00400000)
-#define CSR_HW_IF_CONFIG_REG_BIT_ME_OWN (0x02000000)
-#define CSR_HW_IF_CONFIG_REG_BIT_WAKE_ME (0x08000000)
+#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000)
+#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000)
+#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000)
+#define CSR_INT_PERIODIC_DIS (0x00)
+#define CSR_INT_PERIODIC_ENA (0xFF)
/* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
* acknowledged (reset) by host writing "1" to flagged bits. */
#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */
#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */
-#define CSR_INT_BIT_DNLD (1 << 28) /* uCode Download */
+#define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */
#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */
#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */
#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
@@ -226,6 +231,10 @@
#define CSR_EEPROM_GP_VALID_MSK (0x00000007)
#define CSR_EEPROM_GP_BAD_SIGNATURE (0x00000000)
#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180)
+#define CSR_OTP_GP_REG_DEVICE_SELECT (0x00010000) /* 0 - EEPROM, 1 - OTP */
+#define CSR_OTP_GP_REG_OTP_ACCESS_MODE (0x00020000) /* 0 - absolute, 1 - relative */
+#define CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK (0x00100000) /* bit 20 */
+#define CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK (0x00200000) /* bit 21 */
/* CSR GIO */
#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002)
@@ -251,6 +260,11 @@
/* HPET MEM debug */
#define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000)
+
+/* DRAM INT TABLE */
+#define CSR_DRAM_INT_TBL_ENABLE (1 << 31)
+#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
+
/*=== HBUS (Host-side Bus) ===*/
#define HBUS_BASE (0x400)
/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 65d1a7f2db9..2cf014f523b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -68,13 +68,14 @@ struct iwl_debugfs {
struct dentry *dir_rf;
struct dir_data_files {
struct dentry *file_sram;
- struct dentry *file_eeprom;
+ struct dentry *file_nvm;
struct dentry *file_stations;
struct dentry *file_rx_statistics;
struct dentry *file_tx_statistics;
struct dentry *file_log_event;
struct dentry *file_channels;
struct dentry *file_status;
+ struct dentry *file_interrupt;
} dbgfs_data_files;
struct dir_rf_files {
struct dentry *file_disable_sensitivity;
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 64eb585f157..11e08c06891 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -172,7 +172,6 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
const size_t bufsz = sizeof(buf);
- iwl_grab_nic_access(priv);
for (i = priv->dbgfs->sram_len; i > 0; i -= 4) {
val = iwl_read_targ_mem(priv, priv->dbgfs->sram_offset + \
priv->dbgfs->sram_len - i);
@@ -192,7 +191,6 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
}
pos += scnprintf(buf + pos, bufsz - pos, "\n");
- iwl_release_nic_access(priv);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
return ret;
@@ -292,7 +290,7 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
return ret;
}
-static ssize_t iwl_dbgfs_eeprom_read(struct file *file,
+static ssize_t iwl_dbgfs_nvm_read(struct file *file,
char __user *user_buf,
size_t count,
loff_t *ppos)
@@ -306,7 +304,7 @@ static ssize_t iwl_dbgfs_eeprom_read(struct file *file,
buf_size = 4 * eeprom_len + 256;
if (eeprom_len % 16) {
- IWL_ERR(priv, "EEPROM size is not multiple of 16.\n");
+ IWL_ERR(priv, "NVM size is not multiple of 16.\n");
return -ENODATA;
}
@@ -318,6 +316,13 @@ static ssize_t iwl_dbgfs_eeprom_read(struct file *file,
}
ptr = priv->eeprom;
+ if (!ptr) {
+ IWL_ERR(priv, "Invalid EEPROM/OTP memory\n");
+ return -ENOMEM;
+ }
+ pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s\n",
+ (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
+ ? "OTP" : "EEPROM");
for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
@@ -375,51 +380,53 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
}
supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ);
- channels = supp_band->channels;
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "Displaying %d channels in 2.4GHz band 802.11bg):\n",
- supp_band->n_channels);
+ if (supp_band) {
+ channels = supp_band->channels;
- for (i = 0; i < supp_band->n_channels; i++)
pos += scnprintf(buf + pos, bufsz - pos,
- "%d: %ddBm: BSS%s%s, %s.\n",
- ieee80211_frequency_to_channel(
- channels[i].center_freq),
- channels[i].max_power,
- channels[i].flags & IEEE80211_CHAN_RADAR ?
- " (IEEE 802.11h required)" : "",
- (!(channels[i].flags & IEEE80211_CHAN_NO_IBSS)
- || (channels[i].flags &
- IEEE80211_CHAN_RADAR)) ? "" :
- ", IBSS",
- channels[i].flags &
- IEEE80211_CHAN_PASSIVE_SCAN ?
- "passive only" : "active/passive");
+ "Displaying %d channels in 2.4GHz band 802.11bg):\n",
+ supp_band->n_channels);
+ for (i = 0; i < supp_band->n_channels; i++)
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "%d: %ddBm: BSS%s%s, %s.\n",
+ ieee80211_frequency_to_channel(
+ channels[i].center_freq),
+ channels[i].max_power,
+ channels[i].flags & IEEE80211_CHAN_RADAR ?
+ " (IEEE 802.11h required)" : "",
+ ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
+ || (channels[i].flags &
+ IEEE80211_CHAN_RADAR)) ? "" :
+ ", IBSS",
+ channels[i].flags &
+ IEEE80211_CHAN_PASSIVE_SCAN ?
+ "passive only" : "active/passive");
+ }
supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ);
- channels = supp_band->channels;
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "Displaying %d channels in 5.2GHz band (802.11a)\n",
- supp_band->n_channels);
+ if (supp_band) {
+ channels = supp_band->channels;
- for (i = 0; i < supp_band->n_channels; i++)
pos += scnprintf(buf + pos, bufsz - pos,
- "%d: %ddBm: BSS%s%s, %s.\n",
- ieee80211_frequency_to_channel(
- channels[i].center_freq),
- channels[i].max_power,
- channels[i].flags & IEEE80211_CHAN_RADAR ?
- " (IEEE 802.11h required)" : "",
- ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
- || (channels[i].flags &
- IEEE80211_CHAN_RADAR)) ? "" :
- ", IBSS",
- channels[i].flags &
- IEEE80211_CHAN_PASSIVE_SCAN ?
- "passive only" : "active/passive");
+ "Displaying %d channels in 5.2GHz band (802.11a)\n",
+ supp_band->n_channels);
+ for (i = 0; i < supp_band->n_channels; i++)
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "%d: %ddBm: BSS%s%s, %s.\n",
+ ieee80211_frequency_to_channel(
+ channels[i].center_freq),
+ channels[i].max_power,
+ channels[i].flags & IEEE80211_CHAN_RADAR ?
+ " (IEEE 802.11h required)" : "",
+ ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
+ || (channels[i].flags &
+ IEEE80211_CHAN_RADAR)) ? "" :
+ ", IBSS",
+ channels[i].flags &
+ IEEE80211_CHAN_PASSIVE_SCAN ?
+ "passive only" : "active/passive");
+ }
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
@@ -442,8 +449,6 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
test_bit(STATUS_INT_ENABLED, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
test_bit(STATUS_RF_KILL_HW, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_SW:\t %d\n",
- test_bit(STATUS_RF_KILL_SW, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n",
test_bit(STATUS_INIT, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
@@ -456,8 +461,6 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
test_bit(STATUS_GEO_CONFIGURED, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
test_bit(STATUS_EXIT_PENDING, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_IN_SUSPEND:\t %d\n",
- test_bit(STATUS_IN_SUSPEND, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
test_bit(STATUS_STATISTICS, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n",
@@ -475,14 +478,104 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
+static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ int pos = 0;
+ int cnt = 0;
+ char *buf;
+ int bufsz = 24 * 64; /* 24 items * 64 char per item */
+ ssize_t ret;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "Interrupt Statistics Report:\n");
+
+ pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
+ priv->isr_stats.hw);
+ pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
+ priv->isr_stats.sw);
+ if (priv->isr_stats.sw > 0) {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tLast Restarting Code: 0x%X\n",
+ priv->isr_stats.sw_err);
+ }
+#ifdef CONFIG_IWLWIFI_DEBUG
+ pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
+ priv->isr_stats.sch);
+ pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
+ priv->isr_stats.alive);
+#endif
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "HW RF KILL switch toggled:\t %u\n",
+ priv->isr_stats.rfkill);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
+ priv->isr_stats.ctkill);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
+ priv->isr_stats.wakeup);
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "Rx command responses:\t\t %u\n",
+ priv->isr_stats.rx);
+ for (cnt = 0; cnt < REPLY_MAX; cnt++) {
+ if (priv->isr_stats.rx_handlers[cnt] > 0)
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tRx handler[%36s]:\t\t %u\n",
+ get_cmd_string(cnt),
+ priv->isr_stats.rx_handlers[cnt]);
+ }
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
+ priv->isr_stats.tx);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
+ priv->isr_stats.unhandled);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ u32 reset_flag;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%x", &reset_flag) != 1)
+ return -EFAULT;
+ if (reset_flag == 0)
+ iwl_clear_isr_stats(priv);
+
+ return count;
+}
+
+
DEBUGFS_READ_WRITE_FILE_OPS(sram);
DEBUGFS_WRITE_FILE_OPS(log_event);
-DEBUGFS_READ_FILE_OPS(eeprom);
+DEBUGFS_READ_FILE_OPS(nvm);
DEBUGFS_READ_FILE_OPS(stations);
DEBUGFS_READ_FILE_OPS(rx_statistics);
DEBUGFS_READ_FILE_OPS(tx_statistics);
DEBUGFS_READ_FILE_OPS(channels);
DEBUGFS_READ_FILE_OPS(status);
+DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
/*
* Create the debugfs files and directories
@@ -510,7 +603,7 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
DEBUGFS_ADD_DIR(data, dbgfs->dir_drv);
DEBUGFS_ADD_DIR(rf, dbgfs->dir_drv);
- DEBUGFS_ADD_FILE(eeprom, data);
+ DEBUGFS_ADD_FILE(nvm, data);
DEBUGFS_ADD_FILE(sram, data);
DEBUGFS_ADD_FILE(log_event, data);
DEBUGFS_ADD_FILE(stations, data);
@@ -518,6 +611,7 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
DEBUGFS_ADD_FILE(tx_statistics, data);
DEBUGFS_ADD_FILE(channels, data);
DEBUGFS_ADD_FILE(status, data);
+ DEBUGFS_ADD_FILE(interrupt, data);
DEBUGFS_ADD_BOOL(disable_sensitivity, rf, &priv->disable_sens_cal);
DEBUGFS_ADD_BOOL(disable_chain_noise, rf,
&priv->disable_chain_noise_cal);
@@ -540,7 +634,7 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv)
if (!priv->dbgfs)
return;
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_eeprom);
+ DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_nvm);
DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_rx_statistics);
DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_tx_statistics);
DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_sram);
@@ -548,6 +642,7 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv)
DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_stations);
DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_channels);
DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_status);
+ DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_interrupt);
DEBUGFS_REMOVE(priv->dbgfs->dir_data);
DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_sensitivity);
DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_chain_noise);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index cf7f0db58fc..e2d620f0b6e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -41,7 +41,6 @@
#include "iwl-prph.h"
#include "iwl-fh.h"
#include "iwl-debug.h"
-#include "iwl-rfkill.h"
#include "iwl-4965-hw.h"
#include "iwl-3945-hw.h"
#include "iwl-3945-led.h"
@@ -289,11 +288,11 @@ struct iwl_frame {
#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
enum {
- /* CMD_SIZE_NORMAL = 0, */
+ CMD_SYNC = 0,
+ CMD_SIZE_NORMAL = 0,
+ CMD_NO_SKB = 0,
CMD_SIZE_HUGE = (1 << 0),
- /* CMD_SYNC = 0, */
CMD_ASYNC = (1 << 1),
- /* CMD_NO_SKB = 0, */
CMD_WANT_SKB = (1 << 2),
};
@@ -381,6 +380,7 @@ struct iwl_rx_queue {
u32 read;
u32 write;
u32 free_count;
+ u32 write_actual;
struct list_head rx_free;
struct list_head rx_used;
int need_update;
@@ -498,22 +498,13 @@ struct iwl_qos_info {
#define STA_PS_STATUS_WAKE 0
#define STA_PS_STATUS_SLEEP 1
-struct iwl3945_tid_data {
- u16 seq_number;
-};
-
-struct iwl3945_hw_key {
- enum ieee80211_key_alg alg;
- int keylen;
- u8 key[32];
-};
struct iwl3945_station_entry {
struct iwl3945_addsta_cmd sta;
- struct iwl3945_tid_data tid[MAX_TID_COUNT];
+ struct iwl_tid_data tid[MAX_TID_COUNT];
u8 used;
u8 ps_status;
- struct iwl3945_hw_key keyinfo;
+ struct iwl_hw_key keyinfo;
};
struct iwl_station_entry {
@@ -822,6 +813,26 @@ enum {
MEASUREMENT_ACTIVE = (1 << 1),
};
+enum iwl_nvm_type {
+ NVM_DEVICE_TYPE_EEPROM = 0,
+ NVM_DEVICE_TYPE_OTP,
+};
+
+/* interrupt statistics */
+struct isr_statistics {
+ u32 hw;
+ u32 sw;
+ u32 sw_err;
+ u32 sch;
+ u32 alive;
+ u32 rfkill;
+ u32 ctkill;
+ u32 wakeup;
+ u32 rx;
+ u32 rx_handlers[REPLY_MAX];
+ u32 tx;
+ u32 unhandled;
+};
#define IWL_MAX_NUM_QUEUES 20 /* FIXME: do dynamic allocation */
@@ -877,15 +888,14 @@ struct iwl_priv {
unsigned long scan_start_tsf;
void *scan;
int scan_bands;
- int one_direct_scan;
- u8 direct_ssid_len;
- u8 direct_ssid[IW_ESSID_MAX_SIZE];
+ struct cfg80211_scan_request *scan_request;
u8 scan_tx_ant[IEEE80211_NUM_BANDS];
u8 mgmt_tx_ant;
/* spinlock */
spinlock_t lock; /* protect general shared data */
spinlock_t hcmd_lock; /* protect hcmd */
+ spinlock_t reg_lock; /* protect hw register access */
struct mutex mutex;
/* basic pci-network driver stuff */
@@ -919,16 +929,12 @@ struct iwl_priv {
const struct iwl_rxon_cmd active_rxon;
struct iwl_rxon_cmd staging_rxon;
- int error_recovering;
struct iwl_rxon_cmd recovery_rxon;
/* 1st responses from initialize and runtime uCode images.
* 4965's initialize alive response contains some calibration data. */
struct iwl_init_alive_resp card_alive_init;
struct iwl_alive_resp card_alive;
-#if defined(CONFIG_IWLWIFI_RFKILL)
- struct rfkill *rfkill;
-#endif
#ifdef CONFIG_IWLWIFI_LEDS
unsigned long last_blink_time;
@@ -978,6 +984,9 @@ struct iwl_priv {
u64 bytes;
} tx_stats[3], rx_stats[3];
+ /* counts interrupts */
+ struct isr_statistics isr_stats;
+
struct iwl_power_mgr power_data;
struct iwl_notif_statistics statistics;
@@ -1017,6 +1026,7 @@ struct iwl_priv {
/* eeprom */
u8 *eeprom;
+ int nvm_device_type;
struct iwl_eeprom_calib_info *calib_info;
enum nl80211_iftype iw_mode;
@@ -1034,7 +1044,16 @@ struct iwl_priv {
/*End*/
struct iwl_hw_params hw_params;
+ /* INT ICT Table */
+ u32 *ict_tbl;
+ dma_addr_t ict_tbl_dma;
+ dma_addr_t aligned_ict_tbl_dma;
+ int ict_index;
+ void *ict_tbl_vir;
+ u32 inta;
+ bool use_ict;
+ u32 inta_mask;
/* Current association information needed to configure the
* hardware */
u16 assoc_id;
@@ -1049,7 +1068,6 @@ struct iwl_priv {
struct work_struct calibrated_work;
struct work_struct scan_completed;
struct work_struct rx_replenish;
- struct work_struct rf_kill;
struct work_struct abort_scan;
struct work_struct update_link_led;
struct work_struct auth_work;
@@ -1059,7 +1077,6 @@ struct iwl_priv {
struct tasklet_struct irq_tasklet;
- struct delayed_work set_power_save;
struct delayed_work init_alive_start;
struct delayed_work alive_start;
struct delayed_work scan_check;
@@ -1090,14 +1107,12 @@ struct iwl_priv {
u32 disable_tx_power_cal;
struct work_struct run_time_calib_work;
struct timer_list statistics_periodic;
-
+ bool hw_ready;
/*For 3945*/
#define IWL_DEFAULT_TX_POWER 0x0F
struct iwl3945_notif_statistics statistics_39;
- struct iwl3945_station_entry stations_39[IWL_STATION_COUNT];
-
u32 sta_supp_rates;
}; /*iwl_priv */
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 75517d05df0..7d7554a2f34 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -152,6 +152,32 @@ int iwlcore_eeprom_verify_signature(struct iwl_priv *priv)
}
EXPORT_SYMBOL(iwlcore_eeprom_verify_signature);
+static int iwlcore_get_nvm_type(struct iwl_priv *priv)
+{
+ u32 otpgp;
+ int nvm_type;
+
+ /* OTP only valid for CP/PP and after */
+ switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) {
+ case CSR_HW_REV_TYPE_3945:
+ case CSR_HW_REV_TYPE_4965:
+ case CSR_HW_REV_TYPE_5300:
+ case CSR_HW_REV_TYPE_5350:
+ case CSR_HW_REV_TYPE_5100:
+ case CSR_HW_REV_TYPE_5150:
+ nvm_type = NVM_DEVICE_TYPE_EEPROM;
+ break;
+ default:
+ otpgp = iwl_read32(priv, CSR_OTP_GP_REG);
+ if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT)
+ nvm_type = NVM_DEVICE_TYPE_OTP;
+ else
+ nvm_type = NVM_DEVICE_TYPE_EEPROM;
+ break;
+ }
+ return nvm_type;
+}
+
/*
* The device's EEPROM semaphore prevents conflicts between driver and uCode
* when accessing the EEPROM; each access is a series of pulses to/from the
@@ -198,6 +224,31 @@ const u8 *iwlcore_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
}
EXPORT_SYMBOL(iwlcore_eeprom_query_addr);
+static int iwl_init_otp_access(struct iwl_priv *priv)
+{
+ int ret;
+
+ /* Enable 40MHz radio clock */
+ _iwl_write32(priv, CSR_GP_CNTRL,
+ _iwl_read32(priv, CSR_GP_CNTRL) |
+ CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /* wait for clock to be ready */
+ ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ 25000);
+ if (ret < 0)
+ IWL_ERR(priv, "Time out access OTP\n");
+ else {
+ iwl_set_bits_prph(priv, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_RESET_REQ);
+ udelay(5);
+ iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_RESET_REQ);
+ }
+ return ret;
+}
+
/**
* iwl_eeprom_init - read EEPROM contents
*
@@ -209,11 +260,18 @@ int iwl_eeprom_init(struct iwl_priv *priv)
{
u16 *e;
u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
- int sz = priv->cfg->eeprom_size;
+ int sz;
int ret;
u16 addr;
+ u32 otpgp;
+
+ priv->nvm_device_type = iwlcore_get_nvm_type(priv);
/* allocate eeprom */
+ if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
+ priv->cfg->eeprom_size =
+ OTP_BLOCK_SIZE * OTP_LOWER_BLOCKS_TOTAL;
+ sz = priv->cfg->eeprom_size;
priv->eeprom = kzalloc(sz, GFP_KERNEL);
if (!priv->eeprom) {
ret = -ENOMEM;
@@ -235,30 +293,77 @@ int iwl_eeprom_init(struct iwl_priv *priv)
ret = -ENOENT;
goto err;
}
-
- /* eeprom is an array of 16bit values */
- for (addr = 0; addr < sz; addr += sizeof(u16)) {
- u32 r;
-
- _iwl_write32(priv, CSR_EEPROM_REG,
- CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
-
- ret = iwl_poll_direct_bit(priv, CSR_EEPROM_REG,
- CSR_EEPROM_REG_READ_VALID_MSK,
- IWL_EEPROM_ACCESS_TIMEOUT);
- if (ret < 0) {
- IWL_ERR(priv, "Time out reading EEPROM[%d]\n", addr);
- goto done;
+ if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) {
+ ret = iwl_init_otp_access(priv);
+ if (ret) {
+ IWL_ERR(priv, "Failed to initialize OTP access.\n");
+ ret = -ENOENT;
+ goto err;
+ }
+ _iwl_write32(priv, CSR_EEPROM_GP,
+ iwl_read32(priv, CSR_EEPROM_GP) &
+ ~CSR_EEPROM_GP_IF_OWNER_MSK);
+ /* clear */
+ _iwl_write32(priv, CSR_OTP_GP_REG,
+ iwl_read32(priv, CSR_OTP_GP_REG) |
+ CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
+ CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
+
+ for (addr = 0; addr < sz; addr += sizeof(u16)) {
+ u32 r;
+
+ _iwl_write32(priv, CSR_EEPROM_REG,
+ CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
+
+ ret = iwl_poll_direct_bit(priv, CSR_EEPROM_REG,
+ CSR_EEPROM_REG_READ_VALID_MSK,
+ IWL_EEPROM_ACCESS_TIMEOUT);
+ if (ret < 0) {
+ IWL_ERR(priv, "Time out reading OTP[%d]\n", addr);
+ goto done;
+ }
+ r = _iwl_read_direct32(priv, CSR_EEPROM_REG);
+ /* check for ECC errors: */
+ otpgp = iwl_read32(priv, CSR_OTP_GP_REG);
+ if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
+ /* stop in this case */
+ IWL_ERR(priv, "Uncorrectable OTP ECC error, Abort OTP read\n");
+ goto done;
+ }
+ if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
+ /* continue in this case */
+ _iwl_write32(priv, CSR_OTP_GP_REG,
+ iwl_read32(priv, CSR_OTP_GP_REG) |
+ CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
+ IWL_ERR(priv, "Correctable OTP ECC error, continue read\n");
+ }
+ e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16));
+ }
+ } else {
+ /* eeprom is an array of 16bit values */
+ for (addr = 0; addr < sz; addr += sizeof(u16)) {
+ u32 r;
+
+ _iwl_write32(priv, CSR_EEPROM_REG,
+ CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
+
+ ret = iwl_poll_direct_bit(priv, CSR_EEPROM_REG,
+ CSR_EEPROM_REG_READ_VALID_MSK,
+ IWL_EEPROM_ACCESS_TIMEOUT);
+ if (ret < 0) {
+ IWL_ERR(priv, "Time out reading EEPROM[%d]\n", addr);
+ goto done;
+ }
+ r = _iwl_read_direct32(priv, CSR_EEPROM_REG);
+ e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16));
}
- r = _iwl_read_direct32(priv, CSR_EEPROM_REG);
- e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16));
}
ret = 0;
done:
priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv);
err:
if (ret)
- kfree(priv->eeprom);
+ iwl_eeprom_free(priv);
alloc_err:
return ret;
}
@@ -285,7 +390,7 @@ int iwl_eeprom_check_version(struct iwl_priv *priv)
return 0;
err:
- IWL_ERR(priv, "Unsupported EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
+ IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
eeprom_ver, priv->cfg->eeprom_ver,
calib_ver, priv->cfg->eeprom_calib_ver);
return -EINVAL;
@@ -301,6 +406,8 @@ EXPORT_SYMBOL(iwl_eeprom_query_addr);
u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
{
+ if (!priv->eeprom)
+ return 0;
return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
}
EXPORT_SYMBOL(iwl_eeprom_query16);
@@ -481,8 +588,8 @@ int iwl_init_channel_map(struct iwl_priv *priv)
/* First write that fat is not enabled, and then enable
* one by one */
ch_info->fat_extension_channel =
- (IEEE80211_CHAN_NO_FAT_ABOVE |
- IEEE80211_CHAN_NO_FAT_BELOW);
+ (IEEE80211_CHAN_NO_HT40PLUS |
+ IEEE80211_CHAN_NO_HT40MINUS);
if (!(is_channel_valid(ch_info))) {
IWL_DEBUG_INFO(priv, "Ch. %d Flags %x [%sGHz] - "
@@ -561,7 +668,7 @@ int iwl_init_channel_map(struct iwl_priv *priv)
fat_extension_chan = 0;
else
fat_extension_chan =
- IEEE80211_CHAN_NO_FAT_BELOW;
+ IEEE80211_CHAN_NO_HT40MINUS;
/* Set up driver's info for lower half */
iwl_set_fat_chan_info(priv, ieeeband,
@@ -573,7 +680,7 @@ int iwl_init_channel_map(struct iwl_priv *priv)
iwl_set_fat_chan_info(priv, ieeeband,
(eeprom_ch_index[ch] + 4),
&(eeprom_ch_info[ch]),
- IEEE80211_CHAN_NO_FAT_ABOVE);
+ IEEE80211_CHAN_NO_HT40PLUS);
}
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 3479153d96c..195b4ef12c2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -179,6 +179,10 @@ struct iwl_eeprom_channel {
#define EEPROM_5050_TX_POWER_VERSION (4)
#define EEPROM_5050_EEPROM_VERSION (0x21E)
+/* OTP */
+#define OTP_LOWER_BLOCKS_TOTAL (3)
+#define OTP_BLOCK_SIZE (0x400)
+
/* 2.4 GHz */
extern const u8 iwl_eeprom_band_1[14];
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 083ea1ffbe8..d30cb0275d1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -131,9 +131,23 @@ static inline void __iwl_set_bit(const char *f, u32 l,
IWL_DEBUG_IO(priv, "set_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val);
_iwl_write32(priv, reg, val);
}
-#define iwl_set_bit(p, r, m) __iwl_set_bit(__FILE__, __LINE__, p, r, m)
+static inline void iwl_set_bit(struct iwl_priv *p, u32 r, u32 m)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&p->reg_lock, reg_flags);
+ __iwl_set_bit(__FILE__, __LINE__, p, r, m);
+ spin_unlock_irqrestore(&p->reg_lock, reg_flags);
+}
#else
-#define iwl_set_bit(p, r, m) _iwl_set_bit(p, r, m)
+static inline void iwl_set_bit(struct iwl_priv *p, u32 r, u32 m)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&p->reg_lock, reg_flags);
+ _iwl_set_bit(p, r, m);
+ spin_unlock_irqrestore(&p->reg_lock, reg_flags);
+}
#endif
static inline void _iwl_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask)
@@ -148,19 +162,30 @@ static inline void __iwl_clear_bit(const char *f, u32 l,
IWL_DEBUG_IO(priv, "clear_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val);
_iwl_write32(priv, reg, val);
}
-#define iwl_clear_bit(p, r, m) __iwl_clear_bit(__FILE__, __LINE__, p, r, m)
+static inline void iwl_clear_bit(struct iwl_priv *p, u32 r, u32 m)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&p->reg_lock, reg_flags);
+ __iwl_clear_bit(__FILE__, __LINE__, p, r, m);
+ spin_unlock_irqrestore(&p->reg_lock, reg_flags);
+}
#else
-#define iwl_clear_bit(p, r, m) _iwl_clear_bit(p, r, m)
+static inline void iwl_clear_bit(struct iwl_priv *p, u32 r, u32 m)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&p->reg_lock, reg_flags);
+ _iwl_clear_bit(p, r, m);
+ spin_unlock_irqrestore(&p->reg_lock, reg_flags);
+}
#endif
static inline int _iwl_grab_nic_access(struct iwl_priv *priv)
{
int ret;
u32 val;
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (atomic_read(&priv->restrict_refcnt))
- return 0;
-#endif
+
/* this bit wakes up the NIC */
_iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
ret = _iwl_poll_bit(priv, CSR_GP_CNTRL,
@@ -170,12 +195,10 @@ static inline int _iwl_grab_nic_access(struct iwl_priv *priv)
if (ret < 0) {
val = _iwl_read32(priv, CSR_GP_CNTRL);
IWL_ERR(priv, "MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
+ _iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
return -EIO;
}
-#ifdef CONFIG_IWLWIFI_DEBUG
- atomic_inc(&priv->restrict_refcnt);
-#endif
return 0;
}
@@ -183,9 +206,6 @@ static inline int _iwl_grab_nic_access(struct iwl_priv *priv)
static inline int __iwl_grab_nic_access(const char *f, u32 l,
struct iwl_priv *priv)
{
- if (atomic_read(&priv->restrict_refcnt))
- IWL_ERR(priv, "Grabbing access while already held %s %d.\n", f, l);
-
IWL_DEBUG_IO(priv, "grabbing nic access - %s %d\n", f, l);
return _iwl_grab_nic_access(priv);
}
@@ -198,18 +218,13 @@ static inline int __iwl_grab_nic_access(const char *f, u32 l,
static inline void _iwl_release_nic_access(struct iwl_priv *priv)
{
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (atomic_dec_and_test(&priv->restrict_refcnt))
-#endif
- _iwl_clear_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ _iwl_clear_bit(priv, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
}
#ifdef CONFIG_IWLWIFI_DEBUG
static inline void __iwl_release_nic_access(const char *f, u32 l,
struct iwl_priv *priv)
{
- if (atomic_read(&priv->restrict_refcnt) <= 0)
- IWL_ERR(priv, "Release unheld nic access at line %s %d.\n", f, l);
IWL_DEBUG_IO(priv, "releasing nic access - %s %d\n", f, l);
_iwl_release_nic_access(priv);
@@ -230,16 +245,37 @@ static inline u32 __iwl_read_direct32(const char *f, u32 l,
struct iwl_priv *priv, u32 reg)
{
u32 value = _iwl_read_direct32(priv, reg);
- if (!atomic_read(&priv->restrict_refcnt))
- IWL_ERR(priv, "Nic access not held from %s %d\n", f, l);
IWL_DEBUG_IO(priv, "read_direct32(0x%4X) = 0x%08x - %s %d \n", reg, value,
f, l);
return value;
}
-#define iwl_read_direct32(priv, reg) \
- __iwl_read_direct32(__FILE__, __LINE__, priv, reg)
+static inline u32 iwl_read_direct32(struct iwl_priv *priv, u32 reg)
+{
+ u32 value;
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ iwl_grab_nic_access(priv);
+ value = __iwl_read_direct32(__FILE__, __LINE__, priv, reg);
+ iwl_release_nic_access(priv);
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+ return value;
+}
+
#else
-#define iwl_read_direct32 _iwl_read_direct32
+static inline u32 iwl_read_direct32(struct iwl_priv *priv, u32 reg)
+{
+ u32 value;
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ iwl_grab_nic_access(priv);
+ value = _iwl_read_direct32(priv, reg);
+ iwl_release_nic_access(priv);
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+ return value;
+
+}
#endif
static inline void _iwl_write_direct32(struct iwl_priv *priv,
@@ -247,19 +283,17 @@ static inline void _iwl_write_direct32(struct iwl_priv *priv,
{
_iwl_write32(priv, reg, value);
}
-#ifdef CONFIG_IWLWIFI_DEBUG
-static void __iwl_write_direct32(const char *f , u32 line,
- struct iwl_priv *priv, u32 reg, u32 value)
+static inline void iwl_write_direct32(struct iwl_priv *priv, u32 reg, u32 value)
{
- if (!atomic_read(&priv->restrict_refcnt))
- IWL_ERR(priv, "Nic access not held from %s line %d\n", f, line);
- _iwl_write_direct32(priv, reg, value);
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ if (!iwl_grab_nic_access(priv)) {
+ _iwl_write_direct32(priv, reg, value);
+ iwl_release_nic_access(priv);
+ }
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
}
-#define iwl_write_direct32(priv, reg, value) \
- __iwl_write_direct32(__func__, __LINE__, priv, reg, value)
-#else
-#define iwl_write_direct32 _iwl_write_direct32
-#endif
static inline void iwl_write_reg_buf(struct iwl_priv *priv,
u32 reg, u32 len, u32 *values)
@@ -268,14 +302,23 @@ static inline void iwl_write_reg_buf(struct iwl_priv *priv,
if ((priv != NULL) && (values != NULL)) {
for (; 0 < len; len -= count, reg += count, values++)
- _iwl_write_direct32(priv, reg, *values);
+ iwl_write_direct32(priv, reg, *values);
}
}
static inline int _iwl_poll_direct_bit(struct iwl_priv *priv, u32 addr,
u32 mask, int timeout)
{
- return _iwl_poll_bit(priv, addr, mask, mask, timeout);
+ int t = 0;
+
+ do {
+ if ((iwl_read_direct32(priv, addr) & mask) == mask)
+ return t;
+ udelay(IWL_POLL_INTERVAL);
+ t += IWL_POLL_INTERVAL;
+ } while (t < timeout);
+
+ return -ETIMEDOUT;
}
#ifdef CONFIG_IWLWIFI_DEBUG
@@ -305,20 +348,18 @@ static inline u32 _iwl_read_prph(struct iwl_priv *priv, u32 reg)
rmb();
return _iwl_read_direct32(priv, HBUS_TARG_PRPH_RDAT);
}
-#ifdef CONFIG_IWLWIFI_DEBUG
-static inline u32 __iwl_read_prph(const char *f, u32 line,
- struct iwl_priv *priv, u32 reg)
+static inline u32 iwl_read_prph(struct iwl_priv *priv, u32 reg)
{
- if (!atomic_read(&priv->restrict_refcnt))
- IWL_ERR(priv, "Nic access not held from %s line %d\n", f, line);
- return _iwl_read_prph(priv, reg);
-}
+ unsigned long reg_flags;
+ u32 val;
-#define iwl_read_prph(priv, reg) \
- __iwl_read_prph(__func__, __LINE__, priv, reg)
-#else
-#define iwl_read_prph _iwl_read_prph
-#endif
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ iwl_grab_nic_access(priv);
+ val = _iwl_read_prph(priv, reg);
+ iwl_release_nic_access(priv);
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+ return val;
+}
static inline void _iwl_write_prph(struct iwl_priv *priv,
u32 addr, u32 val)
@@ -328,83 +369,107 @@ static inline void _iwl_write_prph(struct iwl_priv *priv,
wmb();
_iwl_write_direct32(priv, HBUS_TARG_PRPH_WDAT, val);
}
-#ifdef CONFIG_IWLWIFI_DEBUG
-static inline void __iwl_write_prph(const char *f, u32 line,
- struct iwl_priv *priv, u32 addr, u32 val)
+
+static inline void iwl_write_prph(struct iwl_priv *priv, u32 addr, u32 val)
{
- if (!atomic_read(&priv->restrict_refcnt))
- IWL_ERR(priv, "Nic access not held from %s line %d\n", f, line);
- _iwl_write_prph(priv, addr, val);
-}
+ unsigned long reg_flags;
-#define iwl_write_prph(priv, addr, val) \
- __iwl_write_prph(__func__, __LINE__, priv, addr, val);
-#else
-#define iwl_write_prph _iwl_write_prph
-#endif
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ if (!iwl_grab_nic_access(priv)) {
+ _iwl_write_prph(priv, addr, val);
+ iwl_release_nic_access(priv);
+ }
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
#define _iwl_set_bits_prph(priv, reg, mask) \
_iwl_write_prph(priv, reg, (_iwl_read_prph(priv, reg) | mask))
-#ifdef CONFIG_IWLWIFI_DEBUG
-static inline void __iwl_set_bits_prph(const char *f, u32 line,
- struct iwl_priv *priv,
- u32 reg, u32 mask)
+
+static inline void iwl_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask)
{
- if (!atomic_read(&priv->restrict_refcnt))
- IWL_ERR(priv, "Nic access not held from %s line %d\n", f, line);
+ unsigned long reg_flags;
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ iwl_grab_nic_access(priv);
_iwl_set_bits_prph(priv, reg, mask);
+ iwl_release_nic_access(priv);
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
}
-#define iwl_set_bits_prph(priv, reg, mask) \
- __iwl_set_bits_prph(__func__, __LINE__, priv, reg, mask)
-#else
-#define iwl_set_bits_prph _iwl_set_bits_prph
-#endif
#define _iwl_set_bits_mask_prph(priv, reg, bits, mask) \
_iwl_write_prph(priv, reg, ((_iwl_read_prph(priv, reg) & mask) | bits))
-#ifdef CONFIG_IWLWIFI_DEBUG
-static inline void __iwl_set_bits_mask_prph(const char *f, u32 line,
- struct iwl_priv *priv, u32 reg, u32 bits, u32 mask)
+static inline void iwl_set_bits_mask_prph(struct iwl_priv *priv, u32 reg,
+ u32 bits, u32 mask)
{
- if (!atomic_read(&priv->restrict_refcnt))
- IWL_ERR(priv, "Nic access not held from %s line %d\n", f, line);
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ iwl_grab_nic_access(priv);
_iwl_set_bits_mask_prph(priv, reg, bits, mask);
+ iwl_release_nic_access(priv);
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
}
-#define iwl_set_bits_mask_prph(priv, reg, bits, mask) \
- __iwl_set_bits_mask_prph(__func__, __LINE__, priv, reg, bits, mask)
-#else
-#define iwl_set_bits_mask_prph _iwl_set_bits_mask_prph
-#endif
static inline void iwl_clear_bits_prph(struct iwl_priv
*priv, u32 reg, u32 mask)
{
- u32 val = _iwl_read_prph(priv, reg);
+ unsigned long reg_flags;
+ u32 val;
+
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ iwl_grab_nic_access(priv);
+ val = _iwl_read_prph(priv, reg);
_iwl_write_prph(priv, reg, (val & ~mask));
+ iwl_release_nic_access(priv);
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
}
static inline u32 iwl_read_targ_mem(struct iwl_priv *priv, u32 addr)
{
- iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, addr);
+ unsigned long reg_flags;
+ u32 value;
+
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ iwl_grab_nic_access(priv);
+
+ _iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, addr);
rmb();
- return iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ value = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+
+ iwl_release_nic_access(priv);
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+ return value;
}
static inline void iwl_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val)
{
- iwl_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
- wmb();
- iwl_write_direct32(priv, HBUS_TARG_MEM_WDAT, val);
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ if (!iwl_grab_nic_access(priv)) {
+ _iwl_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
+ wmb();
+ _iwl_write_direct32(priv, HBUS_TARG_MEM_WDAT, val);
+ iwl_release_nic_access(priv);
+ }
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
}
static inline void iwl_write_targ_mem_buf(struct iwl_priv *priv, u32 addr,
u32 len, u32 *values)
{
- iwl_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
- wmb();
- for (; 0 < len; len -= sizeof(u32), values++)
- iwl_write_direct32(priv, HBUS_TARG_MEM_WDAT, *values);
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ if (!iwl_grab_nic_access(priv)) {
+ _iwl_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
+ wmb();
+ for (; 0 < len; len -= sizeof(u32), values++)
+ _iwl_write_direct32(priv, HBUS_TARG_MEM_WDAT, *values);
+
+ iwl_release_nic_access(priv);
+ }
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
}
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index 19680f72087..5e64252f80f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -176,10 +176,6 @@ static int iwl_led_associate(struct iwl_priv *priv, int led_id)
static int iwl_led_disassociate(struct iwl_priv *priv, int led_id)
{
priv->allow_blinking = 0;
- if (iwl_is_rfkill(priv))
- iwl4965_led_off_reg(priv, led_id);
- else
- iwl4965_led_on_reg(priv, led_id);
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 47c894530eb..f2ea3f05f6e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -41,38 +41,33 @@
#include "iwl-power.h"
/*
- * Setting power level allow the card to go to sleep when not busy
- * there are three factor that decide the power level to go to, they
- * are list here with its priority
- * 1- critical_power_setting this will be set according to card temperature.
- * 2- system_power_setting this will be set by system PM manager.
- * 3- user_power_setting this will be set by user either by writing to sys or
- * mac80211
+ * Setting power level allow the card to go to sleep when not busy.
*
- * if system_power_setting and user_power_setting is set to auto
- * the power level will be decided according to association status and battery
- * status.
+ * The power level is set to INDEX_1 (the least deep state) by
+ * default, and will, in the future, be the deepest state unless
+ * otherwise required by pm_qos network latency requirements.
*
+ * Using INDEX_1 without pm_qos is ok because mac80211 will disable
+ * PS when even checking every beacon for the TIM bit would exceed
+ * the required latency.
*/
-#define MSEC_TO_USEC 1024
#define IWL_POWER_RANGE_0_MAX (2)
#define IWL_POWER_RANGE_1_MAX (10)
-
-#define IWL_POWER_ON_BATTERY IWL_POWER_INDEX_5
-#define IWL_POWER_ON_AC_DISASSOC IWL_POWER_MODE_CAM
-#define IWL_POWER_ON_AC_ASSOC IWL_POWER_MODE_CAM
-
-
-#define IWL_CT_KILL_TEMPERATURE 110
-#define IWL_MIN_POWER_TEMPERATURE 100
-#define IWL_REDUCED_POWER_TEMPERATURE 95
-
+#define NOSLP cpu_to_le16(0), 0, 0
+#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0
+#define TU_TO_USEC 1024
+#define SLP_TOUT(T) cpu_to_le32((T) * TU_TO_USEC)
+#define SLP_VEC(X0, X1, X2, X3, X4) {cpu_to_le32(X0), \
+ cpu_to_le32(X1), \
+ cpu_to_le32(X2), \
+ cpu_to_le32(X3), \
+ cpu_to_le32(X4)}
/* default power management (not Tx power) table values */
-/* for TIM 0-10 */
-static struct iwl_power_vec_entry range_0[IWL_POWER_MAX] = {
+/* for DTIM period 0 through IWL_POWER_RANGE_0_MAX */
+static const struct iwl_power_vec_entry range_0[IWL_POWER_NUM] = {
{{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
{{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0},
{{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0},
@@ -82,8 +77,8 @@ static struct iwl_power_vec_entry range_0[IWL_POWER_MAX] = {
};
-/* for TIM = 3-10 */
-static struct iwl_power_vec_entry range_1[IWL_POWER_MAX] = {
+/* for DTIM period IWL_POWER_RANGE_0_MAX + 1 through IWL_POWER_RANGE_1_MAX */
+static const struct iwl_power_vec_entry range_1[IWL_POWER_NUM] = {
{{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
{{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
{{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7)}, 0},
@@ -92,8 +87,8 @@ static struct iwl_power_vec_entry range_1[IWL_POWER_MAX] = {
{{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 7, 10, 10)}, 2}
};
-/* for TIM > 11 */
-static struct iwl_power_vec_entry range_2[IWL_POWER_MAX] = {
+/* for DTIM period > IWL_POWER_RANGE_1_MAX */
+static const struct iwl_power_vec_entry range_2[IWL_POWER_NUM] = {
{{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
{{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
{{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
@@ -106,39 +101,15 @@ static struct iwl_power_vec_entry range_2[IWL_POWER_MAX] = {
/* set card power command */
static int iwl_set_power(struct iwl_priv *priv, void *cmd)
{
- return iwl_send_cmd_pdu_async(priv, POWER_TABLE_CMD,
- sizeof(struct iwl_powertable_cmd),
- cmd, NULL);
-}
-/* decide the right power level according to association status
- * and battery status
- */
-static u16 iwl_get_auto_power_mode(struct iwl_priv *priv)
-{
- u16 mode;
-
- switch (priv->power_data.user_power_setting) {
- case IWL_POWER_AUTO:
- /* if running on battery */
- if (priv->power_data.is_battery_active)
- mode = IWL_POWER_ON_BATTERY;
- else if (iwl_is_associated(priv))
- mode = IWL_POWER_ON_AC_ASSOC;
- else
- mode = IWL_POWER_ON_AC_DISASSOC;
- break;
- default:
- mode = priv->power_data.user_power_setting;
- break;
- }
- return mode;
+ return iwl_send_cmd_pdu(priv, POWER_TABLE_CMD,
+ sizeof(struct iwl_powertable_cmd), cmd);
}
/* initialize to default */
static void iwl_power_init_handle(struct iwl_priv *priv)
{
struct iwl_power_mgr *pow_data;
- int size = sizeof(struct iwl_power_vec_entry) * IWL_POWER_MAX;
+ int size = sizeof(struct iwl_power_vec_entry) * IWL_POWER_NUM;
struct iwl_powertable_cmd *cmd;
int i;
u16 lctl;
@@ -157,7 +128,7 @@ static void iwl_power_init_handle(struct iwl_priv *priv)
IWL_DEBUG_POWER(priv, "adjust power command flags\n");
- for (i = 0; i < IWL_POWER_MAX; i++) {
+ for (i = 0; i < IWL_POWER_NUM; i++) {
cmd = &pow_data->pwr_range_0[i].cmd;
if (lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN)
@@ -247,33 +218,12 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
- /* If on battery, set to 3,
- * if plugged into AC power, set to CAM ("continuously aware mode"),
- * else user level */
-
- switch (setting->system_power_setting) {
- case IWL_POWER_SYS_AUTO:
- final_mode = iwl_get_auto_power_mode(priv);
- break;
- case IWL_POWER_SYS_BATTERY:
- final_mode = IWL_POWER_INDEX_3;
- break;
- case IWL_POWER_SYS_AC:
- final_mode = IWL_POWER_MODE_CAM;
- break;
- default:
- final_mode = IWL_POWER_INDEX_3;
- WARN_ON(1);
- }
-
- if (setting->critical_power_setting > final_mode)
- final_mode = setting->critical_power_setting;
+ final_mode = priv->power_data.user_power_setting;
- /* driver only support CAM for non STA network */
- if (priv->iw_mode != NL80211_IFTYPE_STATION)
+ if (setting->power_disabled)
final_mode = IWL_POWER_MODE_CAM;
- if (iwl_is_ready_rf(priv) && !setting->power_disabled &&
+ if (iwl_is_ready_rf(priv) &&
((setting->power_mode != final_mode) || force)) {
struct iwl_powertable_cmd cmd;
@@ -290,8 +240,6 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
if (final_mode == IWL_POWER_MODE_CAM)
clear_bit(STATUS_POWER_PMI, &priv->status);
- else
- set_bit(STATUS_POWER_PMI, &priv->status);
if (priv->cfg->ops->lib->update_chain_flags && update_chains)
priv->cfg->ops->lib->update_chain_flags(priv);
@@ -307,51 +255,10 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
}
EXPORT_SYMBOL(iwl_power_update_mode);
-/* Allow other iwl code to disable/enable power management active
- * this will be useful for rate scale to disable PM during heavy
- * Tx/Rx activities
- */
-int iwl_power_disable_management(struct iwl_priv *priv, u32 ms)
-{
- u16 prev_mode;
- int ret = 0;
-
- if (priv->power_data.power_disabled)
- return -EBUSY;
-
- prev_mode = priv->power_data.user_power_setting;
- priv->power_data.user_power_setting = IWL_POWER_MODE_CAM;
- ret = iwl_power_update_mode(priv, 0);
- priv->power_data.power_disabled = 1;
- priv->power_data.user_power_setting = prev_mode;
- cancel_delayed_work(&priv->set_power_save);
- if (ms)
- queue_delayed_work(priv->workqueue, &priv->set_power_save,
- msecs_to_jiffies(ms));
-
-
- return ret;
-}
-EXPORT_SYMBOL(iwl_power_disable_management);
-
-/* Allow other iwl code to disable/enable power management active
- * this will be useful for rate scale to disable PM during high
- * volume activities
- */
-int iwl_power_enable_management(struct iwl_priv *priv)
-{
- int ret = 0;
-
- priv->power_data.power_disabled = 0;
- ret = iwl_power_update_mode(priv, 0);
- return ret;
-}
-EXPORT_SYMBOL(iwl_power_enable_management);
-
/* set user_power_setting */
int iwl_power_set_user_mode(struct iwl_priv *priv, u16 mode)
{
- if (mode > IWL_POWER_MAX)
+ if (mode >= IWL_POWER_NUM)
return -EINVAL;
priv->power_data.user_power_setting = mode;
@@ -360,86 +267,12 @@ int iwl_power_set_user_mode(struct iwl_priv *priv, u16 mode)
}
EXPORT_SYMBOL(iwl_power_set_user_mode);
-/* set system_power_setting. This should be set by over all
- * PM application.
- */
-int iwl_power_set_system_mode(struct iwl_priv *priv, u16 mode)
-{
- if (mode < IWL_POWER_SYS_MAX)
- priv->power_data.system_power_setting = mode;
- else
- return -EINVAL;
- return iwl_power_update_mode(priv, 0);
-}
-EXPORT_SYMBOL(iwl_power_set_system_mode);
-
/* initialize to default */
void iwl_power_initialize(struct iwl_priv *priv)
{
iwl_power_init_handle(priv);
- priv->power_data.user_power_setting = IWL_POWER_AUTO;
- priv->power_data.system_power_setting = IWL_POWER_SYS_AUTO;
- priv->power_data.power_disabled = 0;
- priv->power_data.is_battery_active = 0;
- priv->power_data.critical_power_setting = 0;
+ priv->power_data.user_power_setting = IWL_POWER_INDEX_1;
+ /* default to disabled until mac80211 says otherwise */
+ priv->power_data.power_disabled = 1;
}
EXPORT_SYMBOL(iwl_power_initialize);
-
-/* set critical_power_setting according to temperature value */
-int iwl_power_temperature_change(struct iwl_priv *priv)
-{
- int ret = 0;
- s32 temperature = KELVIN_TO_CELSIUS(priv->last_temperature);
- u16 new_critical = priv->power_data.critical_power_setting;
-
- if (temperature > IWL_CT_KILL_TEMPERATURE)
- return 0;
- else if (temperature > IWL_MIN_POWER_TEMPERATURE)
- new_critical = IWL_POWER_INDEX_5;
- else if (temperature > IWL_REDUCED_POWER_TEMPERATURE)
- new_critical = IWL_POWER_INDEX_3;
- else
- new_critical = IWL_POWER_MODE_CAM;
-
- if (new_critical != priv->power_data.critical_power_setting)
- priv->power_data.critical_power_setting = new_critical;
-
- if (priv->power_data.critical_power_setting >
- priv->power_data.power_mode)
- ret = iwl_power_update_mode(priv, 0);
-
- return ret;
-}
-EXPORT_SYMBOL(iwl_power_temperature_change);
-
-static void iwl_bg_set_power_save(struct work_struct *work)
-{
- struct iwl_priv *priv = container_of(work,
- struct iwl_priv, set_power_save.work);
- IWL_DEBUG_POWER(priv, "update power\n");
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- mutex_lock(&priv->mutex);
-
- /* on starting association we disable power management
- * until association, if association failed then this
- * timer will expire and enable PM again.
- */
- if (!iwl_is_associated(priv))
- iwl_power_enable_management(priv);
-
- mutex_unlock(&priv->mutex);
-}
-void iwl_setup_power_deferred_work(struct iwl_priv *priv)
-{
- INIT_DELAYED_WORK(&priv->set_power_save, iwl_bg_set_power_save);
-}
-EXPORT_SYMBOL(iwl_setup_power_deferred_work);
-
-void iwl_power_cancel_timeout(struct iwl_priv *priv)
-{
- cancel_delayed_work(&priv->set_power_save);
-}
-EXPORT_SYMBOL(iwl_power_cancel_timeout);
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h
index 18963392121..37ba3bb7a25 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.h
+++ b/drivers/net/wireless/iwlwifi/iwl-power.h
@@ -40,56 +40,29 @@ enum {
IWL_POWER_INDEX_3,
IWL_POWER_INDEX_4,
IWL_POWER_INDEX_5,
- IWL_POWER_AUTO,
- IWL_POWER_MAX = IWL_POWER_AUTO,
+ IWL_POWER_NUM
};
-enum {
- IWL_POWER_SYS_AUTO,
- IWL_POWER_SYS_AC,
- IWL_POWER_SYS_BATTERY,
- IWL_POWER_SYS_MAX,
-};
-
-
/* Power management (not Tx power) structures */
-#define NOSLP cpu_to_le16(0), 0, 0
-#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0
-#define SLP_TOUT(T) cpu_to_le32((T) * MSEC_TO_USEC)
-#define SLP_VEC(X0, X1, X2, X3, X4) {cpu_to_le32(X0), \
- cpu_to_le32(X1), \
- cpu_to_le32(X2), \
- cpu_to_le32(X3), \
- cpu_to_le32(X4)}
struct iwl_power_vec_entry {
struct iwl_powertable_cmd cmd;
u8 no_dtim;
};
struct iwl_power_mgr {
- spinlock_t lock;
- struct iwl_power_vec_entry pwr_range_0[IWL_POWER_MAX];
- struct iwl_power_vec_entry pwr_range_1[IWL_POWER_MAX];
- struct iwl_power_vec_entry pwr_range_2[IWL_POWER_MAX];
+ struct iwl_power_vec_entry pwr_range_0[IWL_POWER_NUM];
+ struct iwl_power_vec_entry pwr_range_1[IWL_POWER_NUM];
+ struct iwl_power_vec_entry pwr_range_2[IWL_POWER_NUM];
u32 dtim_period;
/* final power level that used to calculate final power command */
u8 power_mode;
- u8 user_power_setting; /* set by user through mac80211 or sysfs */
- u8 system_power_setting; /* set by kernel system tools */
- u8 critical_power_setting; /* set if driver over heated */
- u8 is_battery_active; /* DC/AC power */
- u8 power_disabled; /* flag to disable using power saving level */
+ u8 user_power_setting; /* set by user through sysfs */
+ u8 power_disabled; /* set by mac80211's CONF_PS */
};
-void iwl_setup_power_deferred_work(struct iwl_priv *priv);
-void iwl_power_cancel_timeout(struct iwl_priv *priv);
int iwl_power_update_mode(struct iwl_priv *priv, bool force);
-int iwl_power_disable_management(struct iwl_priv *priv, u32 ms);
-int iwl_power_enable_management(struct iwl_priv *priv);
int iwl_power_set_user_mode(struct iwl_priv *priv, u16 mode);
-int iwl_power_set_system_mode(struct iwl_priv *priv, u16 mode);
void iwl_power_initialize(struct iwl_priv *priv);
-int iwl_power_temperature_change(struct iwl_priv *priv);
#endif /* __iwl_power_setting_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-rfkill.c b/drivers/net/wireless/iwlwifi/iwl-rfkill.c
deleted file mode 100644
index 2ad9faf1508..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-rfkill.c
+++ /dev/null
@@ -1,145 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-
-#include <net/mac80211.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-
-/* software rf-kill from user */
-static int iwl_rfkill_soft_rf_kill(void *data, enum rfkill_state state)
-{
- struct iwl_priv *priv = data;
- int err = 0;
-
- if (!priv->rfkill)
- return 0;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return 0;
-
- IWL_DEBUG_RF_KILL(priv, "we received soft RFKILL set to state %d\n", state);
- mutex_lock(&priv->mutex);
-
- switch (state) {
- case RFKILL_STATE_UNBLOCKED:
- if (iwl_is_rfkill_hw(priv)) {
- err = -EBUSY;
- goto out_unlock;
- }
- iwl_radio_kill_sw_enable_radio(priv);
- break;
- case RFKILL_STATE_SOFT_BLOCKED:
- iwl_radio_kill_sw_disable_radio(priv);
- break;
- default:
- IWL_WARN(priv, "we received unexpected RFKILL state %d\n",
- state);
- break;
- }
-out_unlock:
- mutex_unlock(&priv->mutex);
-
- return err;
-}
-
-int iwl_rfkill_init(struct iwl_priv *priv)
-{
- struct device *device = wiphy_dev(priv->hw->wiphy);
- int ret = 0;
-
- BUG_ON(device == NULL);
-
- IWL_DEBUG_RF_KILL(priv, "Initializing RFKILL.\n");
- priv->rfkill = rfkill_allocate(device, RFKILL_TYPE_WLAN);
- if (!priv->rfkill) {
- IWL_ERR(priv, "Unable to allocate RFKILL device.\n");
- ret = -ENOMEM;
- goto error;
- }
-
- priv->rfkill->name = priv->cfg->name;
- priv->rfkill->data = priv;
- priv->rfkill->state = RFKILL_STATE_UNBLOCKED;
- priv->rfkill->toggle_radio = iwl_rfkill_soft_rf_kill;
- priv->rfkill->user_claim_unsupported = 1;
-
- priv->rfkill->dev.class->suspend = NULL;
- priv->rfkill->dev.class->resume = NULL;
-
- ret = rfkill_register(priv->rfkill);
- if (ret) {
- IWL_ERR(priv, "Unable to register RFKILL: %d\n", ret);
- goto free_rfkill;
- }
-
- IWL_DEBUG_RF_KILL(priv, "RFKILL initialization complete.\n");
- return ret;
-
-free_rfkill:
- if (priv->rfkill != NULL)
- rfkill_free(priv->rfkill);
- priv->rfkill = NULL;
-
-error:
- IWL_DEBUG_RF_KILL(priv, "RFKILL initialization complete.\n");
- return ret;
-}
-EXPORT_SYMBOL(iwl_rfkill_init);
-
-void iwl_rfkill_unregister(struct iwl_priv *priv)
-{
-
- if (priv->rfkill)
- rfkill_unregister(priv->rfkill);
-
- priv->rfkill = NULL;
-}
-EXPORT_SYMBOL(iwl_rfkill_unregister);
-
-/* set RFKILL to the right state. */
-void iwl_rfkill_set_hw_state(struct iwl_priv *priv)
-{
- if (!priv->rfkill)
- return;
-
- if (iwl_is_rfkill_hw(priv)) {
- rfkill_force_state(priv->rfkill, RFKILL_STATE_HARD_BLOCKED);
- return;
- }
-
- if (!iwl_is_rfkill_sw(priv))
- rfkill_force_state(priv->rfkill, RFKILL_STATE_UNBLOCKED);
- else
- rfkill_force_state(priv->rfkill, RFKILL_STATE_SOFT_BLOCKED);
-}
-EXPORT_SYMBOL(iwl_rfkill_set_hw_state);
diff --git a/drivers/net/wireless/iwlwifi/iwl-rfkill.h b/drivers/net/wireless/iwlwifi/iwl-rfkill.h
deleted file mode 100644
index 633dafb4bf1..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-rfkill.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-#ifndef __iwl_rf_kill_h__
-#define __iwl_rf_kill_h__
-
-struct iwl_priv;
-
-#include <linux/rfkill.h>
-
-#ifdef CONFIG_IWLWIFI_RFKILL
-
-void iwl_rfkill_set_hw_state(struct iwl_priv *priv);
-void iwl_rfkill_unregister(struct iwl_priv *priv);
-int iwl_rfkill_init(struct iwl_priv *priv);
-#else
-static inline void iwl_rfkill_set_hw_state(struct iwl_priv *priv) {}
-static inline void iwl_rfkill_unregister(struct iwl_priv *priv) {}
-static inline int iwl_rfkill_init(struct iwl_priv *priv) { return 0; }
-#endif
-
-
-
-#endif /* __iwl_rf_kill_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 8f65908f66f..2b8d40b37a1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -145,18 +145,14 @@ int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
goto exit_unlock;
}
- ret = iwl_grab_nic_access(priv);
- if (ret)
- goto exit_unlock;
-
- /* Device expects a multiple of 8 */
- iwl_write_direct32(priv, rx_wrt_ptr_reg, q->write & ~0x7);
- iwl_release_nic_access(priv);
+ q->write_actual = (q->write & ~0x7);
+ iwl_write_direct32(priv, rx_wrt_ptr_reg, q->write_actual);
/* Else device is assumed to be awake */
} else {
/* Device expects a multiple of 8 */
- iwl_write32(priv, rx_wrt_ptr_reg, q->write & ~0x7);
+ q->write_actual = (q->write & ~0x7);
+ iwl_write_direct32(priv, rx_wrt_ptr_reg, q->write_actual);
}
q->need_update = 0;
@@ -218,7 +214,7 @@ int iwl_rx_queue_restock(struct iwl_priv *priv)
/* If we've added more space for the firmware to place data, tell it.
* Increment device's write pointer in multiples of 8. */
- if (write != (rxq->write & ~0x7)) {
+ if (rxq->write_actual != (rxq->write & ~0x7)) {
spin_lock_irqsave(&rxq->lock, flags);
rxq->need_update = 1;
spin_unlock_irqrestore(&rxq->lock, flags);
@@ -238,7 +234,7 @@ EXPORT_SYMBOL(iwl_rx_queue_restock);
* Also restock the Rx queue via iwl_rx_queue_restock.
* This is called as a scheduled work item (except for during initialization)
*/
-void iwl_rx_allocate(struct iwl_priv *priv)
+void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
{
struct iwl_rx_queue *rxq = &priv->rxq;
struct list_head *element;
@@ -260,7 +256,8 @@ void iwl_rx_allocate(struct iwl_priv *priv)
/* Alloc a new receive buffer */
rxb->skb = alloc_skb(priv->hw_params.rx_buf_size + 256,
- GFP_KERNEL);
+ priority);
+
if (!rxb->skb) {
IWL_CRIT(priv, "Can not allocate SKB buffers\n");
/* We don't reschedule replenish work here -- we will
@@ -295,7 +292,7 @@ void iwl_rx_replenish(struct iwl_priv *priv)
{
unsigned long flags;
- iwl_rx_allocate(priv);
+ iwl_rx_allocate(priv, GFP_KERNEL);
spin_lock_irqsave(&priv->lock, flags);
iwl_rx_queue_restock(priv);
@@ -303,6 +300,14 @@ void iwl_rx_replenish(struct iwl_priv *priv)
}
EXPORT_SYMBOL(iwl_rx_replenish);
+void iwl_rx_replenish_now(struct iwl_priv *priv)
+{
+ iwl_rx_allocate(priv, GFP_ATOMIC);
+
+ iwl_rx_queue_restock(priv);
+}
+EXPORT_SYMBOL(iwl_rx_replenish_now);
+
/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
* If an SKB has been detached, the POOL needs to have its SKB set to NULL
@@ -358,6 +363,7 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv)
/* Set us so that we have processed and used all buffers, but have
* not restocked the Rx queue with fresh buffers */
rxq->read = rxq->write = 0;
+ rxq->write_actual = 0;
rxq->free_count = 0;
rxq->need_update = 0;
return 0;
@@ -396,6 +402,7 @@ void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
/* Set us so that we have processed and used all buffers, but have
* not restocked the Rx queue with fresh buffers */
rxq->read = rxq->write = 0;
+ rxq->write_actual = 0;
rxq->free_count = 0;
spin_unlock_irqrestore(&rxq->lock, flags);
}
@@ -403,18 +410,12 @@ EXPORT_SYMBOL(iwl_rx_queue_reset);
int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
{
- int ret;
- unsigned long flags;
u32 rb_size;
const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
- const u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT why this stalls RX */
+ u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
- spin_lock_irqsave(&priv->lock, flags);
- ret = iwl_grab_nic_access(priv);
- if (ret) {
- spin_unlock_irqrestore(&priv->lock, flags);
- return ret;
- }
+ if (!priv->cfg->use_isr_legacy)
+ rb_timeout = RX_RB_TIMEOUT;
if (priv->cfg->mod_params->amsdu_size_8K)
rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
@@ -452,35 +453,19 @@ int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
(rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
(rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
- iwl_release_nic_access(priv);
-
iwl_write32(priv, CSR_INT_COALESCING, 0x40);
- spin_unlock_irqrestore(&priv->lock, flags);
-
return 0;
}
int iwl_rxq_stop(struct iwl_priv *priv)
{
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- ret = iwl_grab_nic_access(priv);
- if (unlikely(ret)) {
- spin_unlock_irqrestore(&priv->lock, flags);
- return ret;
- }
/* stop Rx DMA */
iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
return 0;
}
EXPORT_SYMBOL(iwl_rxq_stop);
@@ -582,8 +567,8 @@ void iwl_rx_statistics(struct iwl_priv *priv,
iwl_leds_background(priv);
- if (priv->cfg->ops->lib->temperature && change)
- priv->cfg->ops->lib->temperature(priv);
+ if (priv->cfg->ops->lib->temp_ops.temperature && change)
+ priv->cfg->ops->lib->temp_ops.temperature(priv);
}
EXPORT_SYMBOL(iwl_rx_statistics);
@@ -1102,13 +1087,6 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
if (rx_start->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
rx_status.flag |= RX_FLAG_SHORTPRE;
- /* Take shortcut when only in monitor mode */
- if (priv->iw_mode == NL80211_IFTYPE_MONITOR) {
- iwl_pass_packet_to_mac80211(priv, include_phy,
- rxb, &rx_status);
- return;
- }
-
network_packet = iwl_is_network_packet(priv, header);
if (network_packet) {
priv->last_rx_rssi = rx_status.signal;
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index e7c65c4f741..e26875dbe85 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -227,9 +227,6 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
/* The HW is no longer scanning */
clear_bit(STATUS_SCAN_HW, &priv->status);
- /* The scan completion notification came in, so kill that timer... */
- cancel_delayed_work(&priv->scan_check);
-
IWL_DEBUG_INFO(priv, "Scan pass on %sGHz took %dms\n",
(priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) ?
"2.4" : "5.2",
@@ -448,13 +445,6 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
unsigned long flags;
struct iwl_priv *priv = hw->priv;
int ret;
- u8 *ssid = NULL;
- size_t ssid_len = 0;
-
- if (req->n_ssids) {
- ssid = req->ssids[0].ssid;
- ssid_len = req->ssids[0].ssid_len;
- }
IWL_DEBUG_MAC80211(priv, "enter\n");
@@ -488,13 +478,7 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
goto out_unlock;
}
- if (ssid_len) {
- priv->one_direct_scan = 1;
- priv->direct_ssid_len = ssid_len;
- memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len);
- } else {
- priv->one_direct_scan = 0;
- }
+ priv->scan_request = req;
ret = iwl_scan_initiate(priv);
@@ -533,73 +517,14 @@ void iwl_bg_scan_check(struct work_struct *data)
EXPORT_SYMBOL(iwl_bg_scan_check);
/**
- * iwl_supported_rate_to_ie - fill in the supported rate in IE field
- *
- * return : set the bit for each supported rate insert in ie
- */
-static u16 iwl_supported_rate_to_ie(u8 *ie, u16 supported_rate,
- u16 basic_rate, int *left)
-{
- u16 ret_rates = 0, bit;
- int i;
- u8 *cnt = ie;
- u8 *rates = ie + 1;
-
- for (bit = 1, i = 0; i < IWL_RATE_COUNT; i++, bit <<= 1) {
- if (bit & supported_rate) {
- ret_rates |= bit;
- rates[*cnt] = iwl_rates[i].ieee |
- ((bit & basic_rate) ? 0x80 : 0x00);
- (*cnt)++;
- (*left)--;
- if ((*left <= 0) ||
- (*cnt >= IWL_SUPPORTED_RATES_IE_LEN))
- break;
- }
- }
-
- return ret_rates;
-}
-
-
-static void iwl_ht_cap_to_ie(const struct ieee80211_supported_band *sband,
- u8 *pos, int *left)
-{
- struct ieee80211_ht_cap *ht_cap;
-
- if (!sband || !sband->ht_cap.ht_supported)
- return;
-
- if (*left < sizeof(struct ieee80211_ht_cap))
- return;
-
- *pos++ = sizeof(struct ieee80211_ht_cap);
- ht_cap = (struct ieee80211_ht_cap *) pos;
-
- ht_cap->cap_info = cpu_to_le16(sband->ht_cap.cap);
- memcpy(&ht_cap->mcs, &sband->ht_cap.mcs, 16);
- ht_cap->ampdu_params_info =
- (sband->ht_cap.ampdu_factor & IEEE80211_HT_AMPDU_PARM_FACTOR) |
- ((sband->ht_cap.ampdu_density << 2) &
- IEEE80211_HT_AMPDU_PARM_DENSITY);
- *left -= sizeof(struct ieee80211_ht_cap);
-}
-
-/**
* iwl_fill_probe_req - fill in all required fields and IE for probe request
*/
-u16 iwl_fill_probe_req(struct iwl_priv *priv,
- enum ieee80211_band band,
- struct ieee80211_mgmt *frame,
- int left)
+u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
+ const u8 *ies, int ie_len, int left)
{
int len = 0;
u8 *pos = NULL;
- u16 active_rates, ret_rates, cck_rates, active_rate_basic;
- const struct ieee80211_supported_band *sband =
- iwl_get_hw_mode(priv, band);
-
/* Make sure there is enough space for the probe request,
* two mandatory IEs and the data */
@@ -627,62 +552,12 @@ u16 iwl_fill_probe_req(struct iwl_priv *priv,
len += 2;
- /* fill in supported rate */
- left -= 2;
- if (left < 0)
- return 0;
+ if (WARN_ON(left < ie_len))
+ return len;
- *pos++ = WLAN_EID_SUPP_RATES;
- *pos = 0;
-
- /* exclude 60M rate */
- active_rates = priv->rates_mask;
- active_rates &= ~IWL_RATE_60M_MASK;
-
- active_rate_basic = active_rates & IWL_BASIC_RATES_MASK;
-
- cck_rates = IWL_CCK_RATES_MASK & active_rates;
- ret_rates = iwl_supported_rate_to_ie(pos, cck_rates,
- active_rate_basic, &left);
- active_rates &= ~ret_rates;
-
- ret_rates = iwl_supported_rate_to_ie(pos, active_rates,
- active_rate_basic, &left);
- active_rates &= ~ret_rates;
-
- len += 2 + *pos;
- pos += (*pos) + 1;
-
- if (active_rates == 0)
- goto fill_end;
-
- /* fill in supported extended rate */
- /* ...next IE... */
- left -= 2;
- if (left < 0)
- return 0;
- /* ... fill it in... */
- *pos++ = WLAN_EID_EXT_SUPP_RATES;
- *pos = 0;
- iwl_supported_rate_to_ie(pos, active_rates, active_rate_basic, &left);
- if (*pos > 0) {
- len += 2 + *pos;
- pos += (*pos) + 1;
- } else {
- pos--;
- }
-
- fill_end:
-
- left -= 2;
- if (left < 0)
- return 0;
-
- *pos++ = WLAN_EID_HT_CAPABILITY;
- *pos = 0;
- iwl_ht_cap_to_ie(sband, pos, &left);
- if (*pos > 0)
- len += 2 + *pos;
+ memcpy(pos, ies, ie_len);
+ len += ie_len;
+ left -= ie_len;
return (u16)len;
}
@@ -702,16 +577,20 @@ static void iwl_bg_request_scan(struct work_struct *data)
int ret = 0;
u32 rate_flags = 0;
u16 cmd_len;
+ u16 rx_chain = 0;
enum ieee80211_band band;
- u8 n_probes = 2;
- u8 rx_chain = priv->hw_params.valid_rx_ant;
+ u8 n_probes = 0;
+ u8 rx_ant = priv->hw_params.valid_rx_ant;
u8 rate;
- DECLARE_SSID_BUF(ssid);
+ bool is_active = false;
+ int chan_mod;
conf = ieee80211_get_hw_conf(priv->hw);
mutex_lock(&priv->mutex);
+ cancel_delayed_work(&priv->scan_check);
+
if (!iwl_is_ready(priv)) {
IWL_WARN(priv, "request scan called when driver not ready.\n");
goto done;
@@ -796,19 +675,25 @@ static void iwl_bg_request_scan(struct work_struct *data)
scan_suspend_time, interval);
}
- /* We should add the ability for user to lock to PASSIVE ONLY */
- if (priv->one_direct_scan) {
- IWL_DEBUG_SCAN(priv, "Start direct scan for '%s'\n",
- print_ssid(ssid, priv->direct_ssid,
- priv->direct_ssid_len));
- scan->direct_scan[0].id = WLAN_EID_SSID;
- scan->direct_scan[0].len = priv->direct_ssid_len;
- memcpy(scan->direct_scan[0].ssid,
- priv->direct_ssid, priv->direct_ssid_len);
- n_probes++;
- } else {
- IWL_DEBUG_SCAN(priv, "Start indirect scan.\n");
- }
+ if (priv->scan_request->n_ssids) {
+ int i, p = 0;
+ IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
+ for (i = 0; i < priv->scan_request->n_ssids; i++) {
+ /* always does wildcard anyway */
+ if (!priv->scan_request->ssids[i].ssid_len)
+ continue;
+ scan->direct_scan[p].id = WLAN_EID_SSID;
+ scan->direct_scan[p].len =
+ priv->scan_request->ssids[i].ssid_len;
+ memcpy(scan->direct_scan[p].ssid,
+ priv->scan_request->ssids[i].ssid,
+ priv->scan_request->ssids[i].ssid_len);
+ n_probes++;
+ p++;
+ }
+ is_active = true;
+ } else
+ IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
scan->tx_cmd.sta_id = priv->hw_params.bcast_sta_id;
@@ -818,7 +703,9 @@ static void iwl_bg_request_scan(struct work_struct *data)
if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) {
band = IEEE80211_BAND_2GHZ;
scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
- if (priv->active_rxon.flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) {
+ chan_mod = le32_to_cpu(priv->active_rxon.flags & RXON_FLG_CHANNEL_MODE_MSK)
+ >> RXON_FLG_CHANNEL_MODE_POS;
+ if (chan_mod == CHANNEL_MODE_PURE_40) {
rate = IWL_RATE_6M_PLCP;
} else {
rate = IWL_RATE_1M_PLCP;
@@ -828,13 +715,18 @@ static void iwl_bg_request_scan(struct work_struct *data)
} else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) {
band = IEEE80211_BAND_5GHZ;
rate = IWL_RATE_6M_PLCP;
- scan->good_CRC_th = IWL_GOOD_CRC_TH;
+ /*
+ * If active scaning is requested but a certain channel
+ * is marked passive, we can do active scanning if we
+ * detect transmissions.
+ */
+ scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH : 0;
/* Force use of chains B and C (0x6) for scan Rx for 4965
* Avoid A (0x1) because of its off-channel reception on A-band.
*/
if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)
- rx_chain = 0x6;
+ rx_ant = ANT_BC;
} else {
IWL_WARN(priv, "Invalid scan band count\n");
goto done;
@@ -846,26 +738,27 @@ static void iwl_bg_request_scan(struct work_struct *data)
scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
/* MIMO is not used here, but value is required */
- scan->rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
- cpu_to_le16((0x7 << RXON_RX_CHAIN_VALID_POS) |
- (rx_chain << RXON_RX_CHAIN_FORCE_SEL_POS) |
- (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
-
- cmd_len = iwl_fill_probe_req(priv, band,
- (struct ieee80211_mgmt *)scan->data,
- IWL_MAX_SCAN_SIZE - sizeof(*scan));
+ rx_chain |= ANT_ABC << RXON_RX_CHAIN_VALID_POS;
+ rx_chain |= ANT_ABC << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
+ rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
+ rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
+ scan->rx_chain = cpu_to_le16(rx_chain);
+ cmd_len = iwl_fill_probe_req(priv,
+ (struct ieee80211_mgmt *)scan->data,
+ priv->scan_request->ie,
+ priv->scan_request->ie_len,
+ IWL_MAX_SCAN_SIZE - sizeof(*scan));
scan->tx_cmd.len = cpu_to_le16(cmd_len);
- if (priv->iw_mode == NL80211_IFTYPE_MONITOR)
+ if (iwl_is_monitor_mode(priv))
scan->filter_flags = RXON_FILTER_PROMISC_MSK;
scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
RXON_FILTER_BCON_AWARE_MSK);
scan->channel_count =
- iwl_get_channels_for_scan(priv, band, 1, /* active */
- n_probes,
+ iwl_get_channels_for_scan(priv, band, is_active, n_probes,
(void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
if (scan->channel_count == 0) {
@@ -925,6 +818,8 @@ void iwl_bg_scan_completed(struct work_struct *work)
IWL_DEBUG_SCAN(priv, "SCAN complete scan\n");
+ cancel_delayed_work(&priv->scan_check);
+
ieee80211_scan_completed(priv->hw, false);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 44ab03a12e4..2addf735b19 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -86,8 +86,7 @@ static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
spin_lock_irqsave(&priv->sta_lock, flags);
- if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
- !(priv->stations_39[sta_id].used & IWL_STA_DRIVER_ACTIVE))
+ if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
IWL_ERR(priv, "ACTIVATE a non DRIVER active station %d\n",
sta_id);
@@ -228,15 +227,16 @@ static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
}
/**
- * iwl_add_station_flags - Add station to tables in driver and device
+ * iwl_add_station - Add station to tables in driver and device
*/
-u8 iwl_add_station_flags(struct iwl_priv *priv, const u8 *addr, int is_ap,
- u8 flags, struct ieee80211_sta_ht_cap *ht_info)
+u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags,
+ struct ieee80211_sta_ht_cap *ht_info)
{
- int i;
- int sta_id = IWL_INVALID_STATION;
struct iwl_station_entry *station;
unsigned long flags_spin;
+ int i;
+ int sta_id = IWL_INVALID_STATION;
+ u16 rate;
spin_lock_irqsave(&priv->sta_lock, flags_spin);
if (is_ap)
@@ -288,6 +288,12 @@ u8 iwl_add_station_flags(struct iwl_priv *priv, const u8 *addr, int is_ap,
priv->iw_mode != NL80211_IFTYPE_ADHOC)
iwl_set_ht_add_station(priv, sta_id, ht_info);
+ /* 3945 only */
+ rate = (priv->band == IEEE80211_BAND_5GHZ) ?
+ IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP;
+ /* Turn on both antennas for the station... */
+ station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
+
spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
/* Add station to device's station table */
@@ -295,7 +301,7 @@ u8 iwl_add_station_flags(struct iwl_priv *priv, const u8 *addr, int is_ap,
return sta_id;
}
-EXPORT_SYMBOL(iwl_add_station_flags);
+EXPORT_SYMBOL(iwl_add_station);
static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, const char *addr)
{
@@ -408,7 +414,7 @@ static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
/**
* iwl_remove_station - Remove driver's knowledge of station.
*/
-int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
+int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, bool is_ap)
{
int sta_id = IWL_INVALID_STATION;
int i, ret = -EINVAL;
@@ -490,7 +496,7 @@ void iwl_clear_stations_table(struct iwl_priv *priv)
/* keep track of static keys */
for (i = 0; i < WEP_KEYS_MAX ; i++) {
if (priv->wep_keys[i].key_size)
- test_and_set_bit(i, &priv->ucode_key_table);
+ set_bit(i, &priv->ucode_key_table);
}
spin_unlock_irqrestore(&priv->sta_lock, flags);
@@ -946,7 +952,7 @@ EXPORT_SYMBOL(iwl_send_lq_cmd);
* calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
* which requires station table entry to exist).
*/
-static void iwl_sta_init_lq(struct iwl_priv *priv, const u8 *addr, int is_ap)
+static void iwl_sta_init_lq(struct iwl_priv *priv, const u8 *addr, bool is_ap)
{
int i, r;
struct iwl_link_quality_cmd link_cmd = {
@@ -979,8 +985,9 @@ static void iwl_sta_init_lq(struct iwl_priv *priv, const u8 *addr, int is_ap)
link_cmd.general_params.single_stream_ant_msk =
first_antenna(priv->hw_params.valid_tx_ant);
link_cmd.general_params.dual_stream_ant_msk = 3;
- link_cmd.agg_params.agg_dis_start_th = 3;
- link_cmd.agg_params.agg_time_limit = cpu_to_le16(4000);
+ link_cmd.agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+ link_cmd.agg_params.agg_time_limit =
+ cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
/* Update the rate scaling for control frame Tx to AP */
link_cmd.sta_id = is_ap ? IWL_AP_ID : priv->hw_params.bcast_sta_id;
@@ -995,7 +1002,7 @@ static void iwl_sta_init_lq(struct iwl_priv *priv, const u8 *addr, int is_ap)
* there is only one AP station with id= IWL_AP_ID
* NOTE: mutex must be held before calling this function
*/
-int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
+int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap)
{
struct ieee80211_sta *sta;
struct ieee80211_sta_ht_cap ht_config;
@@ -1020,8 +1027,7 @@ int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
rcu_read_unlock();
}
- sta_id = iwl_add_station_flags(priv, addr, is_ap,
- 0, cur_ht_config);
+ sta_id = iwl_add_station(priv, addr, is_ap, CMD_SYNC, cur_ht_config);
/* Set up default rate scaling table in device's station table */
iwl_sta_init_lq(priv, addr, is_ap);
@@ -1067,8 +1073,8 @@ int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
return sta_id;
/* Create new station table entry */
- sta_id = iwl_add_station_flags(priv, hdr->addr1,
- 0, CMD_ASYNC, NULL);
+ sta_id = iwl_add_station(priv, hdr->addr1, false,
+ CMD_ASYNC, NULL);
if (sta_id != IWL_INVALID_STATION)
return sta_id;
@@ -1079,11 +1085,6 @@ int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
iwl_print_hex_dump(priv, IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
return priv->hw_params.bcast_sta_id;
- /* If we are in monitor mode, use BCAST. This is required for
- * packet injection. */
- case NL80211_IFTYPE_MONITOR:
- return priv->hw_params.bcast_sta_id;
-
default:
IWL_WARN(priv, "Unknown mode of operation: %d\n",
priv->iw_mode);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
index 59a586b6b56..6deebade636 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -51,16 +51,15 @@ void iwl_update_tkip_key(struct iwl_priv *priv,
struct ieee80211_key_conf *keyconf,
const u8 *addr, u32 iv32, u16 *phase1key);
-int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap);
-int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap);
+int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap);
+int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, bool is_ap);
void iwl_clear_stations_table(struct iwl_priv *priv);
int iwl_get_free_ucode_key_index(struct iwl_priv *priv);
int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr);
int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr);
int iwl_send_add_sta(struct iwl_priv *priv,
struct iwl_addsta_cmd *sta, u8 flags);
-u8 iwl_add_station_flags(struct iwl_priv *priv, const u8 *addr,
- int is_ap, u8 flags,
+u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags,
struct ieee80211_sta_ht_cap *ht_info);
void iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid);
int iwl_sta_rx_agg_start(struct iwl_priv *priv,
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 71d5b8a1a73..85ae7a62109 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -102,13 +102,8 @@ int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
return ret;
}
- /* restore this queue's parameters in nic hardware. */
- ret = iwl_grab_nic_access(priv);
- if (ret)
- return ret;
iwl_write_direct32(priv, HBUS_TARG_WRPTR,
txq->q.write_ptr | (txq_id << 8));
- iwl_release_nic_access(priv);
/* else not in power-save mode, uCode will never sleep when we're
* trying to tx (during RFKILL, we're not trying to tx). */
@@ -429,11 +424,6 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv)
goto error_kw;
}
spin_lock_irqsave(&priv->lock, flags);
- ret = iwl_grab_nic_access(priv);
- if (unlikely(ret)) {
- spin_unlock_irqrestore(&priv->lock, flags);
- goto error_reset;
- }
/* Turn off all Tx DMA fifos */
priv->cfg->ops->lib->txq_set_sched(priv, 0);
@@ -441,7 +431,6 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv)
/* Tell NIC where to find the "keep warm" buffer */
iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
- iwl_release_nic_access(priv);
spin_unlock_irqrestore(&priv->lock, flags);
/* Alloc and init all Tx queues, including the command queue (#4) */
@@ -460,7 +449,6 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv)
error:
iwl_hw_txq_ctx_free(priv);
- error_reset:
iwl_free_dma_ptr(priv, &priv->kw);
error_kw:
iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
@@ -478,10 +466,6 @@ void iwl_txq_ctx_stop(struct iwl_priv *priv)
/* Turn off all Tx DMA fifos */
spin_lock_irqsave(&priv->lock, flags);
- if (iwl_grab_nic_access(priv)) {
- spin_unlock_irqrestore(&priv->lock, flags);
- return;
- }
priv->cfg->ops->lib->txq_set_sched(priv, 0);
@@ -492,7 +476,6 @@ void iwl_txq_ctx_stop(struct iwl_priv *priv)
FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
1000);
}
- iwl_release_nic_access(priv);
spin_unlock_irqrestore(&priv->lock, flags);
/* Deallocate memory for all Tx queues */
@@ -728,7 +711,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
/* drop all data frame if we are not associated */
if (ieee80211_is_data(fc) &&
- (priv->iw_mode != NL80211_IFTYPE_MONITOR ||
+ (!iwl_is_monitor_mode(priv) ||
!(info->flags & IEEE80211_TX_CTL_INJECTED)) && /* packet injection */
(!iwl_is_associated(priv) ||
((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id) ||
@@ -1183,8 +1166,10 @@ int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
__func__, ra, tid);
sta_id = iwl_find_station(priv, ra);
- if (sta_id == IWL_INVALID_STATION)
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Start AGG on invalid station\n");
return -ENXIO;
+ }
if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
@@ -1192,8 +1177,10 @@ int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
}
txq_id = iwl_txq_ctx_activate_free(priv);
- if (txq_id == -1)
+ if (txq_id == -1) {
+ IWL_ERR(priv, "No free aggregation queue available\n");
return -ENXIO;
+ }
spin_lock_irqsave(&priv->sta_lock, flags);
tid_data = &priv->stations[sta_id].tid[tid];
@@ -1207,7 +1194,7 @@ int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
return ret;
if (tid_data->tfds_in_queue == 0) {
- IWL_ERR(priv, "HW queue is empty\n");
+ IWL_DEBUG_HT(priv, "HW queue is empty\n");
tid_data->agg.state = IWL_AGG_ON;
ieee80211_start_tx_ba_cb_irqsafe(priv->hw, ra, tid);
} else {
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 4cce6613350..83d31606dd0 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -95,188 +95,6 @@ struct iwl_mod_params iwl3945_mod_params = {
/* the rest are 0 by default */
};
-/*************** STATION TABLE MANAGEMENT ****
- * mac80211 should be examined to determine if sta_info is duplicating
- * the functionality provided here
- */
-
-/**************************************************************/
-#if 0 /* temporary disable till we add real remove station */
-/**
- * iwl3945_remove_station - Remove driver's knowledge of station.
- *
- * NOTE: This does not remove station from device's station table.
- */
-static u8 iwl3945_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
-{
- int index = IWL_INVALID_STATION;
- int i;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- if (is_ap)
- index = IWL_AP_ID;
- else if (is_broadcast_ether_addr(addr))
- index = priv->hw_params.bcast_sta_id;
- else
- for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++)
- if (priv->stations_39[i].used &&
- !compare_ether_addr(priv->stations_39[i].sta.sta.addr,
- addr)) {
- index = i;
- break;
- }
-
- if (unlikely(index == IWL_INVALID_STATION))
- goto out;
-
- if (priv->stations_39[index].used) {
- priv->stations_39[index].used = 0;
- priv->num_stations--;
- }
-
- BUG_ON(priv->num_stations < 0);
-
-out:
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return 0;
-}
-#endif
-
-/**
- * iwl3945_clear_stations_table - Clear the driver's station table
- *
- * NOTE: This does not clear or otherwise alter the device's station table.
- */
-static void iwl3945_clear_stations_table(struct iwl_priv *priv)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- priv->num_stations = 0;
- memset(priv->stations_39, 0, sizeof(priv->stations_39));
-
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-}
-
-/**
- * iwl3945_add_station - Add station to station tables in driver and device
- */
-u8 iwl3945_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap, u8 flags)
-{
- int i;
- int index = IWL_INVALID_STATION;
- struct iwl3945_station_entry *station;
- unsigned long flags_spin;
- u8 rate;
-
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- if (is_ap)
- index = IWL_AP_ID;
- else if (is_broadcast_ether_addr(addr))
- index = priv->hw_params.bcast_sta_id;
- else
- for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
- if (!compare_ether_addr(priv->stations_39[i].sta.sta.addr,
- addr)) {
- index = i;
- break;
- }
-
- if (!priv->stations_39[i].used &&
- index == IWL_INVALID_STATION)
- index = i;
- }
-
- /* These two conditions has the same outcome but keep them separate
- since they have different meaning */
- if (unlikely(index == IWL_INVALID_STATION)) {
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- return index;
- }
-
- if (priv->stations_39[index].used &&
- !compare_ether_addr(priv->stations_39[index].sta.sta.addr, addr)) {
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- return index;
- }
-
- IWL_DEBUG_ASSOC(priv, "Add STA ID %d: %pM\n", index, addr);
- station = &priv->stations_39[index];
- station->used = 1;
- priv->num_stations++;
-
- /* Set up the REPLY_ADD_STA command to send to device */
- memset(&station->sta, 0, sizeof(struct iwl3945_addsta_cmd));
- memcpy(station->sta.sta.addr, addr, ETH_ALEN);
- station->sta.mode = 0;
- station->sta.sta.sta_id = index;
- station->sta.station_flags = 0;
-
- if (priv->band == IEEE80211_BAND_5GHZ)
- rate = IWL_RATE_6M_PLCP;
- else
- rate = IWL_RATE_1M_PLCP;
-
- /* Turn on both antennas for the station... */
- station->sta.rate_n_flags =
- iwl3945_hw_set_rate_n_flags(rate, RATE_MCS_ANT_AB_MSK);
-
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-
- /* Add station to device's station table */
- iwl_send_add_sta(priv,
- (struct iwl_addsta_cmd *)&station->sta, flags);
- return index;
-
-}
-
-static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
-{
- int rc = 0;
- struct iwl_rx_packet *res = NULL;
- struct iwl3945_rxon_assoc_cmd rxon_assoc;
- struct iwl_host_cmd cmd = {
- .id = REPLY_RXON_ASSOC,
- .len = sizeof(rxon_assoc),
- .meta.flags = CMD_WANT_SKB,
- .data = &rxon_assoc,
- };
- const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
- const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
-
- if ((rxon1->flags == rxon2->flags) &&
- (rxon1->filter_flags == rxon2->filter_flags) &&
- (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
- (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
- IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
- return 0;
- }
-
- rxon_assoc.flags = priv->staging_rxon.flags;
- rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
- rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
- rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
- rxon_assoc.reserved = 0;
-
- rc = iwl_send_cmd_sync(priv, &cmd);
- if (rc)
- return rc;
-
- res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
- if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_RXON_ASSOC command\n");
- rc = -EIO;
- }
-
- priv->alloc_rxb_skb--;
- dev_kfree_skb_any(cmd.meta.u.skb);
-
- return rc;
-}
-
/**
* iwl3945_get_antenna_flags - Get antenna flags for RXON command
* @priv: eeprom and antenna fields are used to determine antenna flags
@@ -314,150 +132,6 @@ __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv)
return 0; /* "diversity" is default if error */
}
-/**
- * iwl3945_commit_rxon - commit staging_rxon to hardware
- *
- * The RXON command in staging_rxon is committed to the hardware and
- * the active_rxon structure is updated with the new data. This
- * function correctly transitions out of the RXON_ASSOC_MSK state if
- * a HW tune is required based on the RXON structure changes.
- */
-static int iwl3945_commit_rxon(struct iwl_priv *priv)
-{
- /* cast away the const for active_rxon in this function */
- struct iwl3945_rxon_cmd *active_rxon = (void *)&priv->active_rxon;
- struct iwl3945_rxon_cmd *staging_rxon = (void *)&priv->staging_rxon;
- int rc = 0;
- bool new_assoc =
- !!(priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK);
-
- if (!iwl_is_alive(priv))
- return -1;
-
- /* always get timestamp with Rx frame */
- staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK;
-
- /* select antenna */
- staging_rxon->flags &=
- ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
- staging_rxon->flags |= iwl3945_get_antenna_flags(priv);
-
- rc = iwl_check_rxon_cmd(priv);
- if (rc) {
- IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
- return -EINVAL;
- }
-
- /* If we don't need to send a full RXON, we can use
- * iwl3945_rxon_assoc_cmd which is used to reconfigure filter
- * and other flags for the current radio configuration. */
- if (!iwl_full_rxon_required(priv)) {
- rc = iwl3945_send_rxon_assoc(priv);
- if (rc) {
- IWL_ERR(priv, "Error setting RXON_ASSOC "
- "configuration (%d).\n", rc);
- return rc;
- }
-
- memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
-
- return 0;
- }
-
- /* If we are currently associated and the new config requires
- * an RXON_ASSOC and the new config wants the associated mask enabled,
- * we must clear the associated from the active configuration
- * before we apply the new config */
- if (iwl_is_associated(priv) && new_assoc) {
- IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
- active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-
- /*
- * reserved4 and 5 could have been filled by the iwlcore code.
- * Let's clear them before pushing to the 3945.
- */
- active_rxon->reserved4 = 0;
- active_rxon->reserved5 = 0;
- rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
- sizeof(struct iwl3945_rxon_cmd),
- &priv->active_rxon);
-
- /* If the mask clearing failed then we set
- * active_rxon back to what it was previously */
- if (rc) {
- active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
- IWL_ERR(priv, "Error clearing ASSOC_MSK on current "
- "configuration (%d).\n", rc);
- return rc;
- }
- }
-
- IWL_DEBUG_INFO(priv, "Sending RXON\n"
- "* with%s RXON_FILTER_ASSOC_MSK\n"
- "* channel = %d\n"
- "* bssid = %pM\n",
- (new_assoc ? "" : "out"),
- le16_to_cpu(staging_rxon->channel),
- staging_rxon->bssid_addr);
-
- /*
- * reserved4 and 5 could have been filled by the iwlcore code.
- * Let's clear them before pushing to the 3945.
- */
- staging_rxon->reserved4 = 0;
- staging_rxon->reserved5 = 0;
-
- iwl_set_rxon_hwcrypto(priv, !priv->hw_params.sw_crypto);
-
- /* Apply the new configuration */
- rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
- sizeof(struct iwl3945_rxon_cmd),
- staging_rxon);
- if (rc) {
- IWL_ERR(priv, "Error setting new configuration (%d).\n", rc);
- return rc;
- }
-
- memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
-
- iwl3945_clear_stations_table(priv);
-
- /* If we issue a new RXON command which required a tune then we must
- * send a new TXPOWER command or we won't be able to Tx any frames */
- rc = priv->cfg->ops->lib->send_tx_power(priv);
- if (rc) {
- IWL_ERR(priv, "Error setting Tx power (%d).\n", rc);
- return rc;
- }
-
- /* Add the broadcast address so we can send broadcast frames */
- if (iwl3945_add_station(priv, iwl_bcast_addr, 0, 0) ==
- IWL_INVALID_STATION) {
- IWL_ERR(priv, "Error adding BROADCAST address for transmit.\n");
- return -EIO;
- }
-
- /* If we have set the ASSOC_MSK and we are in BSS mode then
- * add the IWL_AP_ID to the station rate table */
- if (iwl_is_associated(priv) &&
- (priv->iw_mode == NL80211_IFTYPE_STATION))
- if (iwl3945_add_station(priv, priv->active_rxon.bssid_addr,
- 1, 0)
- == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Error adding AP address for transmit\n");
- return -EIO;
- }
-
- /* Init the hardware's rate fallback order based on the band */
- rc = iwl3945_init_hw_rate_table(priv);
- if (rc) {
- IWL_ERR(priv, "Error setting HW rate table: %02X\n", rc);
- return -EIO;
- }
-
- return 0;
-}
-
static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
struct ieee80211_key_conf *keyconf,
u8 sta_id)
@@ -477,32 +151,31 @@ static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
key_flags &= ~STA_KEY_FLG_INVALID;
spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations_39[sta_id].keyinfo.alg = keyconf->alg;
- priv->stations_39[sta_id].keyinfo.keylen = keyconf->keylen;
- memcpy(priv->stations_39[sta_id].keyinfo.key, keyconf->key,
+ priv->stations[sta_id].keyinfo.alg = keyconf->alg;
+ priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
+ memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
keyconf->keylen);
- memcpy(priv->stations_39[sta_id].sta.key.key, keyconf->key,
+ memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
keyconf->keylen);
- if ((priv->stations_39[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
+ if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
== STA_KEY_FLG_NO_ENC)
- priv->stations_39[sta_id].sta.key.key_offset =
+ priv->stations[sta_id].sta.key.key_offset =
iwl_get_free_ucode_key_index(priv);
/* else, we are overriding an existing key => no need to allocated room
* in uCode. */
- WARN(priv->stations_39[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+ WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
"no space for a new key");
- priv->stations_39[sta_id].sta.key.key_flags = key_flags;
- priv->stations_39[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
- priv->stations_39[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ priv->stations[sta_id].sta.key.key_flags = key_flags;
+ priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n");
- ret = iwl_send_add_sta(priv,
- (struct iwl_addsta_cmd *)&priv->stations_39[sta_id].sta, CMD_ASYNC);
+ ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
spin_unlock_irqrestore(&priv->sta_lock, flags);
@@ -528,17 +201,16 @@ static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
unsigned long flags;
spin_lock_irqsave(&priv->sta_lock, flags);
- memset(&priv->stations_39[sta_id].keyinfo, 0, sizeof(struct iwl3945_hw_key));
- memset(&priv->stations_39[sta_id].sta.key, 0,
+ memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key));
+ memset(&priv->stations[sta_id].sta.key, 0,
sizeof(struct iwl4965_keyinfo));
- priv->stations_39[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
- priv->stations_39[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
- priv->stations_39[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
+ priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
spin_unlock_irqrestore(&priv->sta_lock, flags);
IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n");
- iwl_send_add_sta(priv,
- (struct iwl_addsta_cmd *)&priv->stations_39[sta_id].sta, 0);
+ iwl_send_add_sta(priv, &priv->stations[sta_id].sta, 0);
return 0;
}
@@ -739,7 +411,8 @@ static void iwl3945_setup_rxon_timing(struct iwl_priv *priv)
priv->rxon_timing.atim_window = 0;
} else {
priv->rxon_timing.beacon_interval =
- iwl3945_adjust_beacon_interval(conf->beacon_int);
+ iwl3945_adjust_beacon_interval(
+ priv->vif->bss_conf.beacon_int);
/* TODO: we need to get atim_window from upper stack
* for now we set to 0 */
priv->rxon_timing.atim_window = 0;
@@ -758,42 +431,6 @@ static void iwl3945_setup_rxon_timing(struct iwl_priv *priv)
le16_to_cpu(priv->rxon_timing.atim_window));
}
-static int iwl3945_set_mode(struct iwl_priv *priv, int mode)
-{
- if (mode == NL80211_IFTYPE_ADHOC) {
- const struct iwl_channel_info *ch_info;
-
- ch_info = iwl_get_channel_info(priv,
- priv->band,
- le16_to_cpu(priv->staging_rxon.channel));
-
- if (!ch_info || !is_channel_ibss(ch_info)) {
- IWL_ERR(priv, "channel %d not IBSS channel\n",
- le16_to_cpu(priv->staging_rxon.channel));
- return -EINVAL;
- }
- }
-
- iwl_connection_init_rx_config(priv, mode);
-
- iwl3945_clear_stations_table(priv);
-
- /* don't commit rxon if rf-kill is on*/
- if (!iwl_is_ready_rf(priv))
- return -EAGAIN;
-
- cancel_delayed_work(&priv->scan_check);
- if (iwl_scan_cancel_timeout(priv, 100)) {
- IWL_WARN(priv, "Aborted scan still in progress after 100ms\n");
- IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n");
- return -EAGAIN;
- }
-
- iwl3945_commit_rxon(priv);
-
- return 0;
-}
-
static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
struct ieee80211_tx_info *info,
struct iwl_cmd *cmd,
@@ -801,8 +438,7 @@ static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
int sta_id)
{
struct iwl3945_tx_cmd *tx = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
- struct iwl3945_hw_key *keyinfo =
- &priv->stations_39[sta_id].keyinfo;
+ struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo;
switch (keyinfo->alg) {
case ALG_CCMP:
@@ -900,64 +536,6 @@ static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
tx->next_frame_len = 0;
}
-/**
- * iwl3945_get_sta_id - Find station's index within station table
- */
-static int iwl3945_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
-{
- int sta_id;
- u16 fc = le16_to_cpu(hdr->frame_control);
-
- /* If this frame is broadcast or management, use broadcast station id */
- if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) ||
- is_multicast_ether_addr(hdr->addr1))
- return priv->hw_params.bcast_sta_id;
-
- switch (priv->iw_mode) {
-
- /* If we are a client station in a BSS network, use the special
- * AP station entry (that's the only station we communicate with) */
- case NL80211_IFTYPE_STATION:
- return IWL_AP_ID;
-
- /* If we are an AP, then find the station, or use BCAST */
- case NL80211_IFTYPE_AP:
- sta_id = iwl3945_hw_find_station(priv, hdr->addr1);
- if (sta_id != IWL_INVALID_STATION)
- return sta_id;
- return priv->hw_params.bcast_sta_id;
-
- /* If this frame is going out to an IBSS network, find the station,
- * or create a new station table entry */
- case NL80211_IFTYPE_ADHOC: {
- /* Create new station table entry */
- sta_id = iwl3945_hw_find_station(priv, hdr->addr1);
- if (sta_id != IWL_INVALID_STATION)
- return sta_id;
-
- sta_id = iwl3945_add_station(priv, hdr->addr1, 0, CMD_ASYNC);
-
- if (sta_id != IWL_INVALID_STATION)
- return sta_id;
-
- IWL_DEBUG_DROP(priv, "Station %pM not in station map. "
- "Defaulting to broadcast...\n",
- hdr->addr1);
- iwl_print_hex_dump(priv, IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
- return priv->hw_params.bcast_sta_id;
- }
- /* If we are in monitor mode, use BCAST. This is required for
- * packet injection. */
- case NL80211_IFTYPE_MONITOR:
- return priv->hw_params.bcast_sta_id;
-
- default:
- IWL_WARN(priv, "Unknown mode of operation: %d\n",
- priv->iw_mode);
- return priv->hw_params.bcast_sta_id;
- }
-}
-
/*
* start REPLY_TX command process
*/
@@ -1011,7 +589,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
/* drop all data frame if we are not associated */
if (ieee80211_is_data(fc) &&
- (priv->iw_mode != NL80211_IFTYPE_MONITOR) && /* packet injection */
+ (!iwl_is_monitor_mode(priv)) && /* packet injection */
(!iwl_is_associated(priv) ||
((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id))) {
IWL_DEBUG_DROP(priv, "Dropping - !iwl_is_associated\n");
@@ -1023,7 +601,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
hdr_len = ieee80211_hdrlen(fc);
/* Find (or create) index into station table for destination station */
- sta_id = iwl3945_get_sta_id(priv, hdr);
+ sta_id = iwl_get_sta_id(priv, hdr);
if (sta_id == IWL_INVALID_STATION) {
IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
hdr->addr1);
@@ -1035,7 +613,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
if (ieee80211_is_data_qos(fc)) {
qc = ieee80211_get_qos_ctl(hdr);
tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
- seq_number = priv->stations_39[sta_id].tid[tid].seq_number &
+ seq_number = priv->stations[sta_id].tid[tid].seq_number &
IEEE80211_SCTL_SEQ;
hdr->seq_ctrl = cpu_to_le16(seq_number) |
(hdr->seq_ctrl &
@@ -1095,7 +673,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
if (!ieee80211_has_morefrags(hdr->frame_control)) {
txq->need_update = 1;
if (qc)
- priv->stations_39[sta_id].tid[tid].seq_number = seq_number;
+ priv->stations[sta_id].tid[tid].seq_number = seq_number;
} else {
wait_write_ptr = 1;
txq->need_update = 0;
@@ -1431,18 +1009,12 @@ static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
clear_bit(STATUS_RF_KILL_HW, &priv->status);
- if (flags & SW_CARD_DISABLED)
- set_bit(STATUS_RF_KILL_SW, &priv->status);
- else
- clear_bit(STATUS_RF_KILL_SW, &priv->status);
-
iwl_scan_cancel(priv);
if ((test_bit(STATUS_RF_KILL_HW, &status) !=
- test_bit(STATUS_RF_KILL_HW, &priv->status)) ||
- (test_bit(STATUS_RF_KILL_SW, &status) !=
- test_bit(STATUS_RF_KILL_SW, &priv->status)))
- queue_work(priv->workqueue, &priv->rf_kill);
+ test_bit(STATUS_RF_KILL_HW, &priv->status)))
+ wiphy_rfkill_set_hw_state(priv->hw->wiphy,
+ test_bit(STATUS_RF_KILL_HW, &priv->status));
else
wake_up_interruptible(&priv->wait_command_queue);
}
@@ -1598,7 +1170,7 @@ static int iwl3945_rx_queue_restock(struct iwl_priv *priv)
/* If we've added more space for the firmware to place data, tell it.
* Increment device's write pointer in multiples of 8. */
- if ((write != (rxq->write & ~0x7))
+ if ((rxq->write_actual != (rxq->write & ~0x7))
|| (abs(rxq->write - rxq->read) > 7)) {
spin_lock_irqsave(&rxq->lock, flags);
rxq->need_update = 1;
@@ -1619,21 +1191,30 @@ static int iwl3945_rx_queue_restock(struct iwl_priv *priv)
* Also restock the Rx queue via iwl3945_rx_queue_restock.
* This is called as a scheduled work item (except for during initialization)
*/
-static void iwl3945_rx_allocate(struct iwl_priv *priv)
+static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
{
struct iwl_rx_queue *rxq = &priv->rxq;
struct list_head *element;
struct iwl_rx_mem_buffer *rxb;
unsigned long flags;
- spin_lock_irqsave(&rxq->lock, flags);
- while (!list_empty(&rxq->rx_used)) {
+
+ while (1) {
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ return;
+ }
+
element = rxq->rx_used.next;
rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
+ list_del(element);
+ spin_unlock_irqrestore(&rxq->lock, flags);
/* Alloc a new receive buffer */
rxb->skb =
alloc_skb(priv->hw_params.rx_buf_size,
- __GFP_NOWARN | GFP_ATOMIC);
+ priority);
if (!rxb->skb) {
if (net_ratelimit())
IWL_CRIT(priv, ": Can not allocate SKB buffers\n");
@@ -1651,18 +1232,18 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv)
*/
skb_reserve(rxb->skb, 4);
- priv->alloc_rxb_skb++;
- list_del(element);
-
/* Get physical address of RB/SKB */
rxb->real_dma_addr = pci_map_single(priv->pci_dev,
rxb->skb->data,
priv->hw_params.rx_buf_size,
PCI_DMA_FROMDEVICE);
+
+ spin_lock_irqsave(&rxq->lock, flags);
list_add_tail(&rxb->list, &rxq->rx_free);
+ priv->alloc_rxb_skb++;
rxq->free_count++;
+ spin_unlock_irqrestore(&rxq->lock, flags);
}
- spin_unlock_irqrestore(&rxq->lock, flags);
}
void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
@@ -1692,33 +1273,30 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
* not restocked the Rx queue with fresh buffers */
rxq->read = rxq->write = 0;
rxq->free_count = 0;
+ rxq->write_actual = 0;
spin_unlock_irqrestore(&rxq->lock, flags);
}
-/*
- * this should be called while priv->lock is locked
- */
-static void __iwl3945_rx_replenish(void *data)
-{
- struct iwl_priv *priv = data;
-
- iwl3945_rx_allocate(priv);
- iwl3945_rx_queue_restock(priv);
-}
-
-
void iwl3945_rx_replenish(void *data)
{
struct iwl_priv *priv = data;
unsigned long flags;
- iwl3945_rx_allocate(priv);
+ iwl3945_rx_allocate(priv, GFP_KERNEL);
spin_lock_irqsave(&priv->lock, flags);
iwl3945_rx_queue_restock(priv);
spin_unlock_irqrestore(&priv->lock, flags);
}
+static void iwl3945_rx_replenish_now(struct iwl_priv *priv)
+{
+ iwl3945_rx_allocate(priv, GFP_ATOMIC);
+
+ iwl3945_rx_queue_restock(priv);
+}
+
+
/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
* If an SKB has been detached, the POOL needs to have its SKB set to NULL
* This free routine walks the list of POOL entries and if SKB is set to
@@ -1841,13 +1419,19 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
unsigned long flags;
u8 fill_rx = 0;
u32 count = 8;
+ int total_empty = 0;
/* uCode's read index (stored in shared DRAM) indicates the last Rx
* buffer that the driver may process (last buffer filled by ucode). */
r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
i = rxq->read;
- if (iwl_rx_queue_space(rxq) > (RX_QUEUE_SIZE / 2))
+ /* calculate total frames need to be restock after handling RX */
+ total_empty = r - priv->rxq.write_actual;
+ if (total_empty < 0)
+ total_empty += RX_QUEUE_SIZE;
+
+ if (total_empty > (RX_QUEUE_SIZE / 2))
fill_rx = 1;
/* Rx interrupt, but nothing sent from uCode */
if (i == r)
@@ -1886,6 +1470,7 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
"r = %d, i = %d, %s, 0x%02x\n", r, i,
get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
+ priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
} else {
/* No handling needed */
IWL_DEBUG(priv, IWL_DL_HCMD | IWL_DL_RX | IWL_DL_ISR,
@@ -1923,7 +1508,7 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
count++;
if (count >= 8) {
priv->rxq.read = i;
- __iwl3945_rx_replenish(priv);
+ iwl3945_rx_replenish_now(priv);
count = 0;
}
}
@@ -1931,7 +1516,10 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
/* Backtrack one entry */
priv->rxq.read = i;
- iwl3945_rx_queue_restock(priv);
+ if (fill_rx)
+ iwl3945_rx_replenish_now(priv);
+ else
+ iwl3945_rx_queue_restock(priv);
}
/* call this function to flush any scheduled tasklet */
@@ -1970,7 +1558,6 @@ static void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
u32 i;
u32 desc, time, count, base, data1;
u32 blink1, blink2, ilink1, ilink2;
- int rc;
base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
@@ -1979,11 +1566,6 @@ static void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
return;
}
- rc = iwl_grab_nic_access(priv);
- if (rc) {
- IWL_WARN(priv, "Can not read from adapter at this time.\n");
- return;
- }
count = iwl_read_targ_mem(priv, base);
@@ -2018,8 +1600,6 @@ static void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
ilink1, ilink2, data1);
}
- iwl_release_nic_access(priv);
-
}
#define EVENT_START_OFFSET (6 * sizeof(u32))
@@ -2027,7 +1607,6 @@ static void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
/**
* iwl3945_print_event_log - Dump error event log to syslog
*
- * NOTE: Must be called with iwl_grab_nic_access() already obtained!
*/
static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
u32 num_events, u32 mode)
@@ -2070,7 +1649,6 @@ static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
static void iwl3945_dump_nic_event_log(struct iwl_priv *priv)
{
- int rc;
u32 base; /* SRAM byte address of event log header */
u32 capacity; /* event log capacity in # entries */
u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
@@ -2084,12 +1662,6 @@ static void iwl3945_dump_nic_event_log(struct iwl_priv *priv)
return;
}
- rc = iwl_grab_nic_access(priv);
- if (rc) {
- IWL_WARN(priv, "Can not read from adapter at this time.\n");
- return;
- }
-
/* event log header */
capacity = iwl_read_targ_mem(priv, base);
mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
@@ -2101,7 +1673,6 @@ static void iwl3945_dump_nic_event_log(struct iwl_priv *priv)
/* bail out if nothing in log */
if (size == 0) {
IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
- iwl_release_nic_access(priv);
return;
}
@@ -2117,24 +1688,6 @@ static void iwl3945_dump_nic_event_log(struct iwl_priv *priv)
/* (then/else) start at top of log */
iwl3945_print_event_log(priv, 0, next_entry, mode);
- iwl_release_nic_access(priv);
-}
-
-static void iwl3945_error_recovery(struct iwl_priv *priv)
-{
- unsigned long flags;
-
- memcpy(&priv->staging_rxon, &priv->recovery_rxon,
- sizeof(priv->staging_rxon));
- priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl3945_commit_rxon(priv);
-
- iwl3945_add_station(priv, priv->bssid, 1, 0);
-
- spin_lock_irqsave(&priv->lock, flags);
- priv->assoc_id = le16_to_cpu(priv->staging_rxon.assoc_id);
- priv->error_recovering = 0;
- spin_unlock_irqrestore(&priv->lock, flags);
}
static void iwl3945_irq_tasklet(struct iwl_priv *priv)
@@ -2185,6 +1738,7 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
/* Tell the device to stop sending interrupts */
iwl_disable_interrupts(priv);
+ priv->isr_stats.hw++;
iwl_irq_handle_error(priv);
handled |= CSR_INT_BIT_HW_ERR;
@@ -2197,13 +1751,17 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
#ifdef CONFIG_IWLWIFI_DEBUG
if (priv->debug_level & (IWL_DL_ISR)) {
/* NIC fires this, but we don't use it, redundant with WAKEUP */
- if (inta & CSR_INT_BIT_SCD)
+ if (inta & CSR_INT_BIT_SCD) {
IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
"the frame/frames.\n");
+ priv->isr_stats.sch++;
+ }
/* Alive notification via Rx interrupt will do the real work */
- if (inta & CSR_INT_BIT_ALIVE)
+ if (inta & CSR_INT_BIT_ALIVE) {
IWL_DEBUG_ISR(priv, "Alive interrupt\n");
+ priv->isr_stats.alive++;
+ }
}
#endif
/* Safely ignore these bits for debug checks below */
@@ -2213,6 +1771,8 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
if (inta & CSR_INT_BIT_SW_ERR) {
IWL_ERR(priv, "Microcode SW error detected. "
"Restarting 0x%X.\n", inta);
+ priv->isr_stats.sw++;
+ priv->isr_stats.sw_err = inta;
iwl_irq_handle_error(priv);
handled |= CSR_INT_BIT_SW_ERR;
}
@@ -2228,6 +1788,7 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
iwl_txq_update_write_ptr(priv, &priv->txq[4]);
iwl_txq_update_write_ptr(priv, &priv->txq[5]);
+ priv->isr_stats.wakeup++;
handled |= CSR_INT_BIT_WAKEUP;
}
@@ -2236,27 +1797,28 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
* notifications from uCode come through here*/
if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
iwl3945_rx_handle(priv);
+ priv->isr_stats.rx++;
handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
}
if (inta & CSR_INT_BIT_FH_TX) {
IWL_DEBUG_ISR(priv, "Tx interrupt\n");
+ priv->isr_stats.tx++;
iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6));
- if (!iwl_grab_nic_access(priv)) {
- iwl_write_direct32(priv, FH39_TCSR_CREDIT
- (FH39_SRVC_CHNL), 0x0);
- iwl_release_nic_access(priv);
- }
+ iwl_write_direct32(priv, FH39_TCSR_CREDIT
+ (FH39_SRVC_CHNL), 0x0);
handled |= CSR_INT_BIT_FH_TX;
}
- if (inta & ~handled)
+ if (inta & ~handled) {
IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
+ priv->isr_stats.unhandled++;
+ }
- if (inta & ~CSR_INI_SET_MASK) {
+ if (inta & ~priv->inta_mask) {
IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
- inta & ~CSR_INI_SET_MASK);
+ inta & ~priv->inta_mask);
IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
}
@@ -2420,10 +1982,6 @@ static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 le
IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
- rc = iwl_grab_nic_access(priv);
- if (rc)
- return rc;
-
iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
IWL39_RTC_INST_LOWER_BOUND);
@@ -2444,7 +2002,6 @@ static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 le
}
}
- iwl_release_nic_access(priv);
if (!errcnt)
IWL_DEBUG_INFO(priv,
@@ -2468,10 +2025,6 @@ static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32
IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
- rc = iwl_grab_nic_access(priv);
- if (rc)
- return rc;
-
for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
/* read data comes through single port, auto-incr addr */
/* NOTE: Use the debugless read so we don't flood kernel log
@@ -2492,8 +2045,6 @@ static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32
}
}
- iwl_release_nic_access(priv);
-
return rc;
}
@@ -2817,20 +2368,11 @@ static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv)
{
dma_addr_t pinst;
dma_addr_t pdata;
- int rc = 0;
- unsigned long flags;
/* bits 31:0 for 3945 */
pinst = priv->ucode_code.p_addr;
pdata = priv->ucode_data_backup.p_addr;
- spin_lock_irqsave(&priv->lock, flags);
- rc = iwl_grab_nic_access(priv);
- if (rc) {
- spin_unlock_irqrestore(&priv->lock, flags);
- return rc;
- }
-
/* Tell bootstrap uCode where to find image to load */
iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
@@ -2842,13 +2384,9 @@ static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv)
iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
priv->ucode_code.len | BSM_DRAM_INST_LOAD);
- iwl_release_nic_access(priv);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
- return rc;
+ return 0;
}
/**
@@ -2894,11 +2432,6 @@ static void iwl3945_init_alive_start(struct iwl_priv *priv)
queue_work(priv->workqueue, &priv->restart);
}
-
-/* temporary */
-static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw,
- struct sk_buff *skb);
-
/**
* iwl3945_alive_start - called after REPLY_ALIVE notification received
* from protocol/runtime uCode (initialization uCode's
@@ -2906,7 +2439,6 @@ static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw,
*/
static void iwl3945_alive_start(struct iwl_priv *priv)
{
- int rc = 0;
int thermal_spin = 0;
u32 rfkill;
@@ -2929,17 +2461,10 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
goto restart;
}
- iwl3945_clear_stations_table(priv);
-
- rc = iwl_grab_nic_access(priv);
- if (rc) {
- IWL_WARN(priv, "Can not read RFKILL status from adapter\n");
- return;
- }
+ iwl_clear_stations_table(priv);
rfkill = iwl_read_prph(priv, APMG_RFKILL_REG);
IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill);
- iwl_release_nic_access(priv);
if (rfkill & 0x1) {
clear_bit(STATUS_RF_KILL_HW, &priv->status);
@@ -2959,9 +2484,6 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
/* After the ALIVE response, we can send commands to 3945 uCode */
set_bit(STATUS_ALIVE, &priv->status);
- /* Clear out the uCode error bit if it is set */
- clear_bit(STATUS_FW_ERROR, &priv->status);
-
if (iwl_is_rfkill(priv))
return;
@@ -2988,7 +2510,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
iwl_send_bt_config(priv);
/* Configure the adapter for unassociated operation */
- iwl3945_commit_rxon(priv);
+ iwlcore_commit_rxon(priv);
iwl3945_reg_txpower_periodic(priv);
@@ -2998,17 +2520,17 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
set_bit(STATUS_READY, &priv->status);
wake_up_interruptible(&priv->wait_command_queue);
- if (priv->error_recovering)
- iwl3945_error_recovery(priv);
-
/* reassociate for ADHOC mode */
if (priv->vif && (priv->iw_mode == NL80211_IFTYPE_ADHOC)) {
struct sk_buff *beacon = ieee80211_beacon_get(priv->hw,
priv->vif);
if (beacon)
- iwl3945_mac_beacon_update(priv->hw, beacon);
+ iwl_mac_beacon_update(priv->hw, beacon);
}
+ if (test_and_clear_bit(STATUS_MODE_PENDING, &priv->status))
+ iwl_set_mode(priv, priv->iw_mode);
+
return;
restart:
@@ -3031,7 +2553,7 @@ static void __iwl3945_down(struct iwl_priv *priv)
set_bit(STATUS_EXIT_PENDING, &priv->status);
iwl3945_led_unregister(priv);
- iwl3945_clear_stations_table(priv);
+ iwl_clear_stations_table(priv);
/* Unblock any waiting calls */
wake_up_interruptible_all(&priv->wait_command_queue);
@@ -3054,31 +2576,23 @@ static void __iwl3945_down(struct iwl_priv *priv)
ieee80211_stop_queues(priv->hw);
/* If we have not previously called iwl3945_init() then
- * clear all bits but the RF Kill and SUSPEND bits and return */
+ * clear all bits but the RF Kill bits and return */
if (!iwl_is_init(priv)) {
priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
STATUS_RF_KILL_HW |
- test_bit(STATUS_RF_KILL_SW, &priv->status) <<
- STATUS_RF_KILL_SW |
test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
STATUS_GEO_CONFIGURED |
- test_bit(STATUS_IN_SUSPEND, &priv->status) <<
- STATUS_IN_SUSPEND |
test_bit(STATUS_EXIT_PENDING, &priv->status) <<
STATUS_EXIT_PENDING;
goto exit;
}
- /* ...otherwise clear out all the status bits but the RF Kill and
- * SUSPEND bits and continue taking the NIC down. */
+ /* ...otherwise clear out all the status bits but the RF Kill
+ * bit and continue taking the NIC down. */
priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
STATUS_RF_KILL_HW |
- test_bit(STATUS_RF_KILL_SW, &priv->status) <<
- STATUS_RF_KILL_SW |
test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
STATUS_GEO_CONFIGURED |
- test_bit(STATUS_IN_SUSPEND, &priv->status) <<
- STATUS_IN_SUSPEND |
test_bit(STATUS_FW_ERROR, &priv->status) <<
STATUS_FW_ERROR |
test_bit(STATUS_EXIT_PENDING, &priv->status) <<
@@ -3092,17 +2606,12 @@ static void __iwl3945_down(struct iwl_priv *priv)
iwl3945_hw_txq_ctx_stop(priv);
iwl3945_hw_rxq_stop(priv);
- spin_lock_irqsave(&priv->lock, flags);
- if (!iwl_grab_nic_access(priv)) {
- iwl_write_prph(priv, APMG_CLK_DIS_REG,
- APMG_CLK_VAL_DMA_CLK_RQT);
- iwl_release_nic_access(priv);
- }
- spin_unlock_irqrestore(&priv->lock, flags);
+ iwl_write_prph(priv, APMG_CLK_DIS_REG,
+ APMG_CLK_VAL_DMA_CLK_RQT);
udelay(5);
- if (exit_pending || test_bit(STATUS_IN_SUSPEND, &priv->status))
+ if (exit_pending)
priv->cfg->ops->lib->apm_ops.stop(priv);
else
priv->cfg->ops->lib->apm_ops.reset(priv);
@@ -3138,12 +2647,6 @@ static int __iwl3945_up(struct iwl_priv *priv)
return -EIO;
}
- if (test_bit(STATUS_RF_KILL_SW, &priv->status)) {
- IWL_WARN(priv, "Radio disabled by SW RF kill (module "
- "parameter)\n");
- return -ENODEV;
- }
-
if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
IWL_ERR(priv, "ucode not available for device bring up\n");
return -EIO;
@@ -3155,10 +2658,8 @@ static int __iwl3945_up(struct iwl_priv *priv)
clear_bit(STATUS_RF_KILL_HW, &priv->status);
else {
set_bit(STATUS_RF_KILL_HW, &priv->status);
- if (!test_bit(STATUS_IN_SUSPEND, &priv->status)) {
- IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
- return -ENODEV;
- }
+ IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
+ return -ENODEV;
}
iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
@@ -3194,7 +2695,7 @@ static int __iwl3945_up(struct iwl_priv *priv)
for (i = 0; i < MAX_HW_RESTARTS; i++) {
- iwl3945_clear_stations_table(priv);
+ iwl_clear_stations_table(priv);
/* load bootstrap state machine,
* load bootstrap program into processor's memory,
@@ -3262,15 +2763,14 @@ static void iwl3945_rfkill_poll(struct work_struct *data)
{
struct iwl_priv *priv =
container_of(data, struct iwl_priv, rfkill_poll.work);
- unsigned long status = priv->status;
if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
clear_bit(STATUS_RF_KILL_HW, &priv->status);
else
set_bit(STATUS_RF_KILL_HW, &priv->status);
- if (test_bit(STATUS_RF_KILL_HW, &status) != test_bit(STATUS_RF_KILL_HW, &priv->status))
- queue_work(priv->workqueue, &priv->rf_kill);
+ wiphy_rfkill_set_hw_state(priv->hw->wiphy,
+ test_bit(STATUS_RF_KILL_HW, &priv->status));
queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
round_jiffies_relative(2 * HZ));
@@ -3290,14 +2790,16 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
int rc = 0;
struct iwl3945_scan_cmd *scan;
struct ieee80211_conf *conf = NULL;
- u8 n_probes = 2;
+ u8 n_probes = 0;
enum ieee80211_band band;
- DECLARE_SSID_BUF(ssid);
+ bool is_active = false;
conf = ieee80211_get_hw_conf(priv->hw);
mutex_lock(&priv->mutex);
+ cancel_delayed_work(&priv->scan_check);
+
if (!iwl_is_ready(priv)) {
IWL_WARN(priv, "request scan called when driver not ready.\n");
goto done;
@@ -3391,18 +2893,25 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
scan_suspend_time, interval);
}
- /* We should add the ability for user to lock to PASSIVE ONLY */
- if (priv->one_direct_scan) {
- IWL_DEBUG_SCAN(priv, "Kicking off one direct scan for '%s'\n",
- print_ssid(ssid, priv->direct_ssid,
- priv->direct_ssid_len));
- scan->direct_scan[0].id = WLAN_EID_SSID;
- scan->direct_scan[0].len = priv->direct_ssid_len;
- memcpy(scan->direct_scan[0].ssid,
- priv->direct_ssid, priv->direct_ssid_len);
- n_probes++;
+ if (priv->scan_request->n_ssids) {
+ int i, p = 0;
+ IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
+ for (i = 0; i < priv->scan_request->n_ssids; i++) {
+ /* always does wildcard anyway */
+ if (!priv->scan_request->ssids[i].ssid_len)
+ continue;
+ scan->direct_scan[p].id = WLAN_EID_SSID;
+ scan->direct_scan[p].len =
+ priv->scan_request->ssids[i].ssid_len;
+ memcpy(scan->direct_scan[p].ssid,
+ priv->scan_request->ssids[i].ssid,
+ priv->scan_request->ssids[i].ssid_len);
+ n_probes++;
+ p++;
+ }
+ is_active = true;
} else
- IWL_DEBUG_SCAN(priv, "Kicking off one indirect scan.\n");
+ IWL_DEBUG_SCAN(priv, "Kicking off passive scan.\n");
/* We don't build a direct scan probe request; the uCode will do
* that based on the direct_mask added to each channel entry */
@@ -3419,7 +2928,12 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
band = IEEE80211_BAND_2GHZ;
} else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) {
scan->tx_cmd.rate = IWL_RATE_6M_PLCP;
- scan->good_CRC_th = IWL_GOOD_CRC_TH;
+ /*
+ * If active scaning is requested but a certain channel
+ * is marked passive, we can do active scanning if we
+ * detect transmissions.
+ */
+ scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH : 0;
band = IEEE80211_BAND_5GHZ;
} else {
IWL_WARN(priv, "Invalid scan band count\n");
@@ -3427,19 +2941,20 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
}
scan->tx_cmd.len = cpu_to_le16(
- iwl_fill_probe_req(priv, band,
- (struct ieee80211_mgmt *)scan->data,
- IWL_MAX_SCAN_SIZE - sizeof(*scan)));
+ iwl_fill_probe_req(priv,
+ (struct ieee80211_mgmt *)scan->data,
+ priv->scan_request->ie,
+ priv->scan_request->ie_len,
+ IWL_MAX_SCAN_SIZE - sizeof(*scan)));
/* select Rx antennas */
scan->flags |= iwl3945_get_antenna_flags(priv);
- if (priv->iw_mode == NL80211_IFTYPE_MONITOR)
+ if (iwl_is_monitor_mode(priv))
scan->filter_flags = RXON_FILTER_PROMISC_MSK;
scan->channel_count =
- iwl3945_get_channels_for_scan(priv, band, 1, /* active */
- n_probes,
+ iwl3945_get_channels_for_scan(priv, band, is_active, n_probes,
(void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
if (scan->channel_count == 0) {
@@ -3487,7 +3002,6 @@ static void iwl3945_bg_up(struct work_struct *data)
mutex_lock(&priv->mutex);
__iwl3945_up(priv);
mutex_unlock(&priv->mutex);
- iwl_rfkill_set_hw_state(priv);
}
static void iwl3945_bg_restart(struct work_struct *data)
@@ -3497,8 +3011,17 @@ static void iwl3945_bg_restart(struct work_struct *data)
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
- iwl3945_down(priv);
- queue_work(priv->workqueue, &priv->up);
+ if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
+ mutex_lock(&priv->mutex);
+ priv->vif = NULL;
+ priv->is_open = 0;
+ mutex_unlock(&priv->mutex);
+ iwl3945_down(priv);
+ ieee80211_restart_hw(priv->hw);
+ } else {
+ iwl3945_down(priv);
+ queue_work(priv->workqueue, &priv->up);
+ }
}
static void iwl3945_bg_rx_replenish(struct work_struct *data)
@@ -3516,7 +3039,7 @@ static void iwl3945_bg_rx_replenish(struct work_struct *data)
#define IWL_DELAY_NEXT_SCAN (HZ*2)
-static void iwl3945_post_associate(struct iwl_priv *priv)
+void iwl3945_post_associate(struct iwl_priv *priv)
{
int rc = 0;
struct ieee80211_conf *conf = NULL;
@@ -3541,7 +3064,7 @@ static void iwl3945_post_associate(struct iwl_priv *priv)
conf = ieee80211_get_hw_conf(priv->hw);
priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl3945_commit_rxon(priv);
+ iwlcore_commit_rxon(priv);
memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd));
iwl3945_setup_rxon_timing(priv);
@@ -3574,7 +3097,7 @@ static void iwl3945_post_associate(struct iwl_priv *priv)
}
- iwl3945_commit_rxon(priv);
+ iwlcore_commit_rxon(priv);
switch (priv->iw_mode) {
case NL80211_IFTYPE_STATION:
@@ -3584,7 +3107,7 @@ static void iwl3945_post_associate(struct iwl_priv *priv)
case NL80211_IFTYPE_ADHOC:
priv->assoc_id = 1;
- iwl3945_add_station(priv, priv->bssid, 0, 0);
+ iwl_add_station(priv, priv->bssid, 0, CMD_SYNC, NULL);
iwl3945_sync_sta(priv, IWL_STA_ID,
(priv->band == IEEE80211_BAND_5GHZ) ?
IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP,
@@ -3606,8 +3129,6 @@ static void iwl3945_post_associate(struct iwl_priv *priv)
priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN;
}
-static int iwl3945_mac_config(struct ieee80211_hw *hw, u32 changed);
-
/*****************************************************************************
*
* mac80211 entry point functions
@@ -3643,16 +3164,11 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw)
mutex_unlock(&priv->mutex);
- iwl_rfkill_set_hw_state(priv);
-
if (ret)
goto out_release_irq;
IWL_DEBUG_INFO(priv, "Start UP work.\n");
- if (test_bit(STATUS_IN_SUSPEND, &priv->status))
- return 0;
-
/* Wait for START_ALIVE from ucode. Otherwise callbacks from
* mac80211 will not be run successfully. */
ret = wait_event_interruptible_timeout(priv->wait_command_queue,
@@ -3731,144 +3247,7 @@ static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
return NETDEV_TX_OK;
}
-static int iwl3945_mac_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
-{
- struct iwl_priv *priv = hw->priv;
- unsigned long flags;
-
- IWL_DEBUG_MAC80211(priv, "enter: type %d\n", conf->type);
-
- if (priv->vif) {
- IWL_DEBUG_MAC80211(priv, "leave - vif != NULL\n");
- return -EOPNOTSUPP;
- }
-
- spin_lock_irqsave(&priv->lock, flags);
- priv->vif = conf->vif;
- priv->iw_mode = conf->type;
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- mutex_lock(&priv->mutex);
-
- if (conf->mac_addr) {
- IWL_DEBUG_MAC80211(priv, "Set: %pM\n", conf->mac_addr);
- memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN);
- }
-
- if (iwl_is_ready(priv))
- iwl3945_set_mode(priv, conf->type);
-
- mutex_unlock(&priv->mutex);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
- return 0;
-}
-
-/**
- * iwl3945_mac_config - mac80211 config callback
- *
- * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to
- * be set inappropriately and the driver currently sets the hardware up to
- * use it whenever needed.
- */
-static int iwl3945_mac_config(struct ieee80211_hw *hw, u32 changed)
-{
- struct iwl_priv *priv = hw->priv;
- const struct iwl_channel_info *ch_info;
- struct ieee80211_conf *conf = &hw->conf;
- unsigned long flags;
- int ret = 0;
-
- mutex_lock(&priv->mutex);
- IWL_DEBUG_MAC80211(priv, "enter to channel %d\n",
- conf->channel->hw_value);
-
- if (!iwl_is_ready(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
- ret = -EIO;
- goto out;
- }
-
- if (unlikely(!iwl3945_mod_params.disable_hw_scan &&
- test_bit(STATUS_SCANNING, &priv->status))) {
- IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
- set_bit(STATUS_CONF_PENDING, &priv->status);
- mutex_unlock(&priv->mutex);
- return 0;
- }
-
- spin_lock_irqsave(&priv->lock, flags);
-
- ch_info = iwl_get_channel_info(priv, conf->channel->band,
- conf->channel->hw_value);
- if (!is_channel_valid(ch_info)) {
- IWL_DEBUG_SCAN(priv,
- "Channel %d [%d] is INVALID for this band.\n",
- conf->channel->hw_value, conf->channel->band);
- IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
- spin_unlock_irqrestore(&priv->lock, flags);
- ret = -EINVAL;
- goto out;
- }
-
- iwl_set_rxon_channel(priv, conf->channel);
-
- iwl_set_flags_for_band(priv, conf->channel->band);
-
- /* The list of supported rates and rate mask can be different
- * for each phymode; since the phymode may have changed, reset
- * the rate mask to what mac80211 lists */
- iwl_set_rate(priv);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
-#ifdef IEEE80211_CONF_CHANNEL_SWITCH
- if (conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) {
- iwl3945_hw_channel_switch(priv, conf->channel);
- goto out;
- }
-#endif
-
- if (changed & IEEE80211_CONF_CHANGE_RADIO_ENABLED) {
- if (conf->radio_enabled &&
- iwl_radio_kill_sw_enable_radio(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - RF-KILL - "
- "waiting for uCode\n");
- goto out;
- }
-
- if (!conf->radio_enabled) {
- iwl_radio_kill_sw_disable_radio(priv);
- IWL_DEBUG_MAC80211(priv, "leave - radio disabled\n");
- goto out;
- }
- }
-
- if (iwl_is_rfkill(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - RF kill\n");
- ret = -EIO;
- goto out;
- }
-
- iwl_set_rate(priv);
-
- if (memcmp(&priv->active_rxon,
- &priv->staging_rxon, sizeof(priv->staging_rxon)))
- iwl3945_commit_rxon(priv);
- else
- IWL_DEBUG_INFO(priv, "Not re-sending same RXON configuration\n");
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
-out:
- clear_bit(STATUS_CONF_PENDING, &priv->status);
- mutex_unlock(&priv->mutex);
- return ret;
-}
-
-static void iwl3945_config_ap(struct iwl_priv *priv)
+void iwl3945_config_ap(struct iwl_priv *priv)
{
int rc = 0;
@@ -3880,7 +3259,7 @@ static void iwl3945_config_ap(struct iwl_priv *priv)
/* RXON - unassoc (to set timing command) */
priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl3945_commit_rxon(priv);
+ iwlcore_commit_rxon(priv);
/* RXON Timing */
memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd));
@@ -3916,8 +3295,8 @@ static void iwl3945_config_ap(struct iwl_priv *priv)
}
/* restore RXON assoc */
priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
- iwl3945_commit_rxon(priv);
- iwl3945_add_station(priv, iwl_bcast_addr, 0, 0);
+ iwlcore_commit_rxon(priv);
+ iwl_add_station(priv, iwl_bcast_addr, 0, CMD_SYNC, NULL);
}
iwl3945_send_beacon_cmd(priv);
@@ -3926,189 +3305,6 @@ static void iwl3945_config_ap(struct iwl_priv *priv)
* clear sta table, add BCAST sta... */
}
-static int iwl3945_mac_config_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_if_conf *conf)
-{
- struct iwl_priv *priv = hw->priv;
- int rc;
-
- if (conf == NULL)
- return -EIO;
-
- if (priv->vif != vif) {
- IWL_DEBUG_MAC80211(priv, "leave - priv->vif != vif\n");
- return 0;
- }
-
- /* handle this temporarily here */
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
- conf->changed & IEEE80211_IFCC_BEACON) {
- struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
- if (!beacon)
- return -ENOMEM;
- mutex_lock(&priv->mutex);
- rc = iwl3945_mac_beacon_update(hw, beacon);
- mutex_unlock(&priv->mutex);
- if (rc)
- return rc;
- }
-
- if (!iwl_is_alive(priv))
- return -EAGAIN;
-
- mutex_lock(&priv->mutex);
-
- if (conf->bssid)
- IWL_DEBUG_MAC80211(priv, "bssid: %pM\n", conf->bssid);
-
-/*
- * very dubious code was here; the probe filtering flag is never set:
- *
- if (unlikely(test_bit(STATUS_SCANNING, &priv->status)) &&
- !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) {
- */
-
- if (priv->iw_mode == NL80211_IFTYPE_AP) {
- if (!conf->bssid) {
- conf->bssid = priv->mac_addr;
- memcpy(priv->bssid, priv->mac_addr, ETH_ALEN);
- IWL_DEBUG_MAC80211(priv, "bssid was set to: %pM\n",
- conf->bssid);
- }
- if (priv->ibss_beacon)
- dev_kfree_skb(priv->ibss_beacon);
-
- priv->ibss_beacon = ieee80211_beacon_get(hw, vif);
- }
-
- if (iwl_is_rfkill(priv))
- goto done;
-
- if (conf->bssid && !is_zero_ether_addr(conf->bssid) &&
- !is_multicast_ether_addr(conf->bssid)) {
- /* If there is currently a HW scan going on in the background
- * then we need to cancel it else the RXON below will fail. */
- if (iwl_scan_cancel_timeout(priv, 100)) {
- IWL_WARN(priv, "Aborted scan still in progress "
- "after 100ms\n");
- IWL_DEBUG_MAC80211(priv, "leaving:scan abort failed\n");
- mutex_unlock(&priv->mutex);
- return -EAGAIN;
- }
- memcpy(priv->staging_rxon.bssid_addr, conf->bssid, ETH_ALEN);
-
- /* TODO: Audit driver for usage of these members and see
- * if mac80211 deprecates them (priv->bssid looks like it
- * shouldn't be there, but I haven't scanned the IBSS code
- * to verify) - jpk */
- memcpy(priv->bssid, conf->bssid, ETH_ALEN);
-
- if (priv->iw_mode == NL80211_IFTYPE_AP)
- iwl3945_config_ap(priv);
- else {
- rc = iwl3945_commit_rxon(priv);
- if ((priv->iw_mode == NL80211_IFTYPE_STATION) && rc)
- iwl3945_add_station(priv,
- priv->active_rxon.bssid_addr, 1, 0);
- }
-
- } else {
- iwl_scan_cancel_timeout(priv, 100);
- priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl3945_commit_rxon(priv);
- }
-
- done:
- IWL_DEBUG_MAC80211(priv, "leave\n");
- mutex_unlock(&priv->mutex);
-
- return 0;
-}
-
-static void iwl3945_mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
-{
- struct iwl_priv *priv = hw->priv;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- mutex_lock(&priv->mutex);
-
- if (iwl_is_ready_rf(priv)) {
- iwl_scan_cancel_timeout(priv, 100);
- priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl3945_commit_rxon(priv);
- }
- if (priv->vif == conf->vif) {
- priv->vif = NULL;
- memset(priv->bssid, 0, ETH_ALEN);
- }
- mutex_unlock(&priv->mutex);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
-
-static void iwl3945_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *bss_conf,
- u32 changes)
-{
- struct iwl_priv *priv = hw->priv;
-
- IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
-
- if (changes & BSS_CHANGED_ERP_PREAMBLE) {
- IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
- bss_conf->use_short_preamble);
- if (bss_conf->use_short_preamble)
- priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
- else
- priv->staging_rxon.flags &=
- ~RXON_FLG_SHORT_PREAMBLE_MSK;
- }
-
- if (changes & BSS_CHANGED_ERP_CTS_PROT) {
- IWL_DEBUG_MAC80211(priv, "ERP_CTS %d\n",
- bss_conf->use_cts_prot);
- if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
- priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK;
- else
- priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
- }
-
- if (changes & BSS_CHANGED_ASSOC) {
- IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
- /* This should never happen as this function should
- * never be called from interrupt context. */
- if (WARN_ON_ONCE(in_interrupt()))
- return;
- if (bss_conf->assoc) {
- priv->assoc_id = bss_conf->aid;
- priv->beacon_int = bss_conf->beacon_int;
- priv->timestamp = bss_conf->timestamp;
- priv->assoc_capability = bss_conf->assoc_capability;
- priv->power_data.dtim_period = bss_conf->dtim_period;
- priv->next_scan_jiffies = jiffies +
- IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
- mutex_lock(&priv->mutex);
- iwl3945_post_associate(priv);
- mutex_unlock(&priv->mutex);
- } else {
- priv->assoc_id = 0;
- IWL_DEBUG_MAC80211(priv,
- "DISASSOC %d\n", bss_conf->assoc);
- }
- } else if (changes && iwl_is_associated(priv) && priv->assoc_id) {
- IWL_DEBUG_MAC80211(priv,
- "Associated Changes %d\n", changes);
- iwl3945_send_rxon_assoc(priv);
- }
-
-}
-
static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -4131,7 +3327,7 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
static_key = !iwl_is_associated(priv);
if (!static_key) {
- sta_id = iwl3945_hw_find_station(priv, addr);
+ sta_id = iwl_find_station(priv, addr);
if (sta_id == IWL_INVALID_STATION) {
IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n",
addr);
@@ -4167,185 +3363,6 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return ret;
}
-static int iwl3945_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
- const struct ieee80211_tx_queue_params *params)
-{
- struct iwl_priv *priv = hw->priv;
- unsigned long flags;
- int q;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (!iwl_is_ready_rf(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
- return -EIO;
- }
-
- if (queue >= AC_NUM) {
- IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
- return 0;
- }
-
- q = AC_NUM - 1 - queue;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- priv->qos_data.def_qos_parm.ac[q].cw_min = cpu_to_le16(params->cw_min);
- priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max);
- priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
- priv->qos_data.def_qos_parm.ac[q].edca_txop =
- cpu_to_le16((params->txop * 32));
-
- priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
- priv->qos_data.qos_active = 1;
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- mutex_lock(&priv->mutex);
- if (priv->iw_mode == NL80211_IFTYPE_AP)
- iwl_activate_qos(priv, 1);
- else if (priv->assoc_id && iwl_is_associated(priv))
- iwl_activate_qos(priv, 0);
-
- mutex_unlock(&priv->mutex);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
- return 0;
-}
-
-static int iwl3945_mac_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct iwl_priv *priv = hw->priv;
- int i, avail;
- struct iwl_tx_queue *txq;
- struct iwl_queue *q;
- unsigned long flags;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (!iwl_is_ready_rf(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
- return -EIO;
- }
-
- spin_lock_irqsave(&priv->lock, flags);
-
- for (i = 0; i < AC_NUM; i++) {
- txq = &priv->txq[i];
- q = &txq->q;
- avail = iwl_queue_space(q);
-
- stats[i].len = q->n_window - avail;
- stats[i].limit = q->n_window - q->high_mark;
- stats[i].count = q->n_window;
-
- }
- spin_unlock_irqrestore(&priv->lock, flags);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return 0;
-}
-
-static void iwl3945_mac_reset_tsf(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
- unsigned long flags;
-
- mutex_lock(&priv->mutex);
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- iwl_reset_qos(priv);
-
- spin_lock_irqsave(&priv->lock, flags);
- priv->assoc_id = 0;
- priv->assoc_capability = 0;
-
- /* new association get rid of ibss beacon skb */
- if (priv->ibss_beacon)
- dev_kfree_skb(priv->ibss_beacon);
-
- priv->ibss_beacon = NULL;
-
- priv->beacon_int = priv->hw->conf.beacon_int;
- priv->timestamp = 0;
- if ((priv->iw_mode == NL80211_IFTYPE_STATION))
- priv->beacon_int = 0;
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (!iwl_is_ready_rf(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
- mutex_unlock(&priv->mutex);
- return;
- }
-
- /* we are restarting association process
- * clear RXON_FILTER_ASSOC_MSK bit
- */
- if (priv->iw_mode != NL80211_IFTYPE_AP) {
- iwl_scan_cancel_timeout(priv, 100);
- priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl3945_commit_rxon(priv);
- }
-
- /* Per mac80211.h: This is only used in IBSS mode... */
- if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
-
- IWL_DEBUG_MAC80211(priv, "leave - not in IBSS\n");
- mutex_unlock(&priv->mutex);
- return;
- }
-
- iwl_set_rate(priv);
-
- mutex_unlock(&priv->mutex);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
-}
-
-static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
- struct iwl_priv *priv = hw->priv;
- unsigned long flags;
- __le64 timestamp;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (!iwl_is_ready_rf(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
- return -EIO;
- }
-
- if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
- IWL_DEBUG_MAC80211(priv, "leave - not IBSS\n");
- return -EIO;
- }
-
- spin_lock_irqsave(&priv->lock, flags);
-
- if (priv->ibss_beacon)
- dev_kfree_skb(priv->ibss_beacon);
-
- priv->ibss_beacon = skb;
-
- priv->assoc_id = 0;
- timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
- priv->timestamp = le64_to_cpu(timestamp);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
- spin_unlock_irqrestore(&priv->lock, flags);
-
- iwl_reset_qos(priv);
-
- iwl3945_post_associate(priv);
-
-
- return 0;
-}
-
/*****************************************************************************
*
* sysfs attributes
@@ -4364,7 +3381,7 @@ static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *sk
static ssize_t show_debug_level(struct device *d,
struct device_attribute *attr, char *buf)
{
- struct iwl_priv *priv = d->driver_data;
+ struct iwl_priv *priv = dev_get_drvdata(d);
return sprintf(buf, "0x%08X\n", priv->debug_level);
}
@@ -4372,7 +3389,7 @@ static ssize_t store_debug_level(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct iwl_priv *priv = d->driver_data;
+ struct iwl_priv *priv = dev_get_drvdata(d);
unsigned long val;
int ret;
@@ -4393,7 +3410,7 @@ static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
static ssize_t show_temperature(struct device *d,
struct device_attribute *attr, char *buf)
{
- struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
+ struct iwl_priv *priv = dev_get_drvdata(d);
if (!iwl_is_alive(priv))
return -EAGAIN;
@@ -4406,7 +3423,7 @@ static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);
static ssize_t show_tx_power(struct device *d,
struct device_attribute *attr, char *buf)
{
- struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
+ struct iwl_priv *priv = dev_get_drvdata(d);
return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
}
@@ -4414,7 +3431,7 @@ static ssize_t store_tx_power(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
+ struct iwl_priv *priv = dev_get_drvdata(d);
char *p = (char *)buf;
u32 val;
@@ -4432,7 +3449,7 @@ static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
static ssize_t show_flags(struct device *d,
struct device_attribute *attr, char *buf)
{
- struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
+ struct iwl_priv *priv = dev_get_drvdata(d);
return sprintf(buf, "0x%04X\n", priv->active_rxon.flags);
}
@@ -4441,7 +3458,7 @@ static ssize_t store_flags(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
+ struct iwl_priv *priv = dev_get_drvdata(d);
u32 flags = simple_strtoul(buf, NULL, 0);
mutex_lock(&priv->mutex);
@@ -4453,7 +3470,7 @@ static ssize_t store_flags(struct device *d,
IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n",
flags);
priv->staging_rxon.flags = cpu_to_le32(flags);
- iwl3945_commit_rxon(priv);
+ iwlcore_commit_rxon(priv);
}
}
mutex_unlock(&priv->mutex);
@@ -4466,7 +3483,7 @@ static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags);
static ssize_t show_filter_flags(struct device *d,
struct device_attribute *attr, char *buf)
{
- struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
+ struct iwl_priv *priv = dev_get_drvdata(d);
return sprintf(buf, "0x%04X\n",
le32_to_cpu(priv->active_rxon.filter_flags));
@@ -4476,7 +3493,7 @@ static ssize_t store_filter_flags(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
+ struct iwl_priv *priv = dev_get_drvdata(d);
u32 filter_flags = simple_strtoul(buf, NULL, 0);
mutex_lock(&priv->mutex);
@@ -4489,7 +3506,7 @@ static ssize_t store_filter_flags(struct device *d,
"0x%04X\n", filter_flags);
priv->staging_rxon.filter_flags =
cpu_to_le32(filter_flags);
- iwl3945_commit_rxon(priv);
+ iwlcore_commit_rxon(priv);
}
}
mutex_unlock(&priv->mutex);
@@ -4629,26 +3646,11 @@ static ssize_t show_power_level(struct device *d,
{
struct iwl_priv *priv = dev_get_drvdata(d);
int mode = priv->power_data.user_power_setting;
- int system = priv->power_data.system_power_setting;
int level = priv->power_data.power_mode;
char *p = buf;
- switch (system) {
- case IWL_POWER_SYS_AUTO:
- p += sprintf(p, "SYSTEM:auto");
- break;
- case IWL_POWER_SYS_AC:
- p += sprintf(p, "SYSTEM:ac");
- break;
- case IWL_POWER_SYS_BATTERY:
- p += sprintf(p, "SYSTEM:battery");
- break;
- }
-
- p += sprintf(p, "\tMODE:%s", (mode < IWL_POWER_AUTO) ?
- "fixed" : "auto");
- p += sprintf(p, "\tINDEX:%d", level);
- p += sprintf(p, "\n");
+ p += sprintf(p, "INDEX:%d\t", level);
+ p += sprintf(p, "USER:%d\n", mode);
return p - buf + 1;
}
@@ -4761,7 +3763,7 @@ static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna);
static ssize_t show_status(struct device *d,
struct device_attribute *attr, char *buf)
{
- struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
+ struct iwl_priv *priv = dev_get_drvdata(d);
if (!iwl_is_alive(priv))
return -EAGAIN;
return sprintf(buf, "0x%08x\n", (int)priv->status);
@@ -4773,10 +3775,11 @@ static ssize_t dump_error_log(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct iwl_priv *priv = dev_get_drvdata(d);
char *p = (char *)buf;
if (p[0] == '1')
- iwl3945_dump_nic_error_log((struct iwl_priv *)d->driver_data);
+ iwl3945_dump_nic_error_log(priv);
return strnlen(buf, count);
}
@@ -4787,10 +3790,11 @@ static ssize_t dump_event_log(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct iwl_priv *priv = dev_get_drvdata(d);
char *p = (char *)buf;
if (p[0] == '1')
- iwl3945_dump_nic_event_log((struct iwl_priv *)d->driver_data);
+ iwl3945_dump_nic_event_log(priv);
return strnlen(buf, count);
}
@@ -4812,7 +3816,6 @@ static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
INIT_WORK(&priv->up, iwl3945_bg_up);
INIT_WORK(&priv->restart, iwl3945_bg_restart);
INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
- INIT_WORK(&priv->rf_kill, iwl_bg_rf_kill);
INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
@@ -4869,16 +3872,15 @@ static struct ieee80211_ops iwl3945_hw_ops = {
.tx = iwl3945_mac_tx,
.start = iwl3945_mac_start,
.stop = iwl3945_mac_stop,
- .add_interface = iwl3945_mac_add_interface,
- .remove_interface = iwl3945_mac_remove_interface,
- .config = iwl3945_mac_config,
- .config_interface = iwl3945_mac_config_interface,
+ .add_interface = iwl_mac_add_interface,
+ .remove_interface = iwl_mac_remove_interface,
+ .config = iwl_mac_config,
.configure_filter = iwl_configure_filter,
.set_key = iwl3945_mac_set_key,
- .get_tx_stats = iwl3945_mac_get_tx_stats,
- .conf_tx = iwl3945_mac_conf_tx,
- .reset_tsf = iwl3945_mac_reset_tsf,
- .bss_info_changed = iwl3945_bss_info_changed,
+ .get_tx_stats = iwl_mac_get_tx_stats,
+ .conf_tx = iwl_mac_conf_tx,
+ .reset_tsf = iwl_mac_reset_tsf,
+ .bss_info_changed = iwl_bss_info_changed,
.hw_scan = iwl_mac_hw_scan
};
@@ -4891,7 +3893,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
priv->ibss_beacon = NULL;
spin_lock_init(&priv->lock);
- spin_lock_init(&priv->power_data.lock);
spin_lock_init(&priv->sta_lock);
spin_lock_init(&priv->hcmd_lock);
@@ -4900,7 +3901,7 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
mutex_init(&priv->mutex);
/* Clear the driver's (not device's) station table */
- iwl3945_clear_stations_table(priv);
+ iwl_clear_stations_table(priv);
priv->data_retry_limit = -1;
priv->ieee_channels = NULL;
@@ -4971,13 +3972,13 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
hw->wiphy->custom_regulatory = true;
- hw->wiphy->max_scan_ssids = 1; /* WILL FIX */
+ hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
+ /* we create the 802.11 header and a zero-length SSID element */
+ hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2;
/* Default value; 4 EDCA QOS priorities */
hw->queues = 4;
- hw->conf.beacon_int = 100;
-
if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
&priv->bands[IEEE80211_BAND_2GHZ];
@@ -5042,6 +4043,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
priv->cfg = cfg;
priv->pci_dev = pdev;
+ priv->inta_mask = CSR_INI_SET_MASK;
#ifdef CONFIG_IWLWIFI_DEBUG
priv->debug_level = iwl3945_mod_params.debug;
@@ -5088,6 +4090,11 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
* PCI Tx retries from interfering with C3 CPU state */
pci_write_config_byte(pdev, 0x41, 0x00);
+ /* this spin lock will be used in apm_ops.init and EEPROM access
+ * we should init now
+ */
+ spin_lock_init(&priv->reg_lock);
+
/* amp init */
err = priv->cfg->ops->lib->apm_ops.init(priv);
if (err < 0) {
@@ -5133,20 +4140,8 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s\n",
priv->cfg->name);
- /***********************************
- * 7. Initialize Module Parameters
- * **********************************/
-
- /* Initialize module parameter values here */
- /* Disable radio (SW RF KILL) via parameter when loading driver */
- if (iwl3945_mod_params.disable) {
- set_bit(STATUS_RF_KILL_SW, &priv->status);
- IWL_DEBUG_INFO(priv, "Radio disabled.\n");
- }
-
-
/***********************
- * 8. Setup Services
+ * 7. Setup Services
* ********************/
spin_lock_irqsave(&priv->lock, flags);
@@ -5155,8 +4150,8 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
pci_enable_msi(priv->pci_dev);
- err = request_irq(priv->pci_dev->irq, iwl_isr, IRQF_SHARED,
- DRV_NAME, priv);
+ err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr,
+ IRQF_SHARED, DRV_NAME, priv);
if (err) {
IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
goto out_disable_msi;
@@ -5174,7 +4169,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
iwl3945_setup_rx_handlers(priv);
/*********************************
- * 9. Setup and Register mac80211
+ * 8. Setup and Register mac80211
* *******************************/
iwl_enable_interrupts(priv);
@@ -5183,12 +4178,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
if (err)
goto out_remove_sysfs;
- err = iwl_rfkill_init(priv);
+ err = iwl_dbgfs_register(priv, DRV_NAME);
if (err)
- IWL_ERR(priv, "Unable to initialize RFKILL system. "
- "Ignoring error: %d\n", err);
- else
- iwl_rfkill_set_hw_state(priv);
+ IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
/* Start monitoring the killswitch */
queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
@@ -5233,6 +4225,8 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
+ iwl_dbgfs_unregister(priv);
+
set_bit(STATUS_EXIT_PENDING, &priv->status);
if (priv->mac80211_registered) {
@@ -5253,7 +4247,6 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
- iwl_rfkill_unregister(priv);
cancel_delayed_work_sync(&priv->rfkill_poll);
iwl3945_dealloc_ucode_pci(priv);
@@ -5263,7 +4256,7 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
iwl3945_hw_txq_ctx_free(priv);
iwl3945_unset_hw_params(priv);
- iwl3945_clear_stations_table(priv);
+ iwl_clear_stations_table(priv);
/*netif_stop_queue(dev); */
flush_workqueue(priv->workqueue);
@@ -5291,43 +4284,6 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
ieee80211_free_hw(priv->hw);
}
-#ifdef CONFIG_PM
-
-static int iwl3945_pci_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- struct iwl_priv *priv = pci_get_drvdata(pdev);
-
- if (priv->is_open) {
- set_bit(STATUS_IN_SUSPEND, &priv->status);
- iwl3945_mac_stop(priv->hw);
- priv->is_open = 1;
- }
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
-
- return 0;
-}
-
-static int iwl3945_pci_resume(struct pci_dev *pdev)
-{
- struct iwl_priv *priv = pci_get_drvdata(pdev);
- int ret;
-
- pci_set_power_state(pdev, PCI_D0);
- ret = pci_enable_device(pdev);
- if (ret)
- return ret;
- pci_restore_state(pdev);
-
- if (priv->is_open)
- iwl3945_mac_start(priv->hw);
-
- clear_bit(STATUS_IN_SUSPEND, &priv->status);
- return 0;
-}
-
-#endif /* CONFIG_PM */
/*****************************************************************************
*
@@ -5341,8 +4297,8 @@ static struct pci_driver iwl3945_driver = {
.probe = iwl3945_pci_probe,
.remove = __devexit_p(iwl3945_pci_remove),
#ifdef CONFIG_PM
- .suspend = iwl3945_pci_suspend,
- .resume = iwl3945_pci_resume,
+ .suspend = iwl_pci_suspend,
+ .resume = iwl_pci_resume,
#endif
};
@@ -5383,8 +4339,6 @@ MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX));
module_param_named(antenna, iwl3945_mod_params.antenna, int, 0444);
MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
-module_param_named(disable, iwl3945_mod_params.disable, int, 0444);
-MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, 0444);
MODULE_PARM_DESC(swcrypto,
"using software crypto (default 1 [software])\n");
diff --git a/drivers/net/wireless/iwmc3200wifi/Kconfig b/drivers/net/wireless/iwmc3200wifi/Kconfig
new file mode 100644
index 00000000000..1eccb6df46d
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/Kconfig
@@ -0,0 +1,23 @@
+config IWM
+ tristate "Intel Wireless Multicomm 3200 WiFi driver"
+ depends on MMC && WLAN_80211 && EXPERIMENTAL
+ depends on CFG80211
+ select WIRELESS_EXT
+ select FW_LOADER
+
+config IWM_DEBUG
+ bool "Enable full debugging output in iwmc3200wifi"
+ depends on IWM && DEBUG_FS
+ ---help---
+ This option will enable debug tracing and setting for iwm
+
+ You can set the debug level and module through debugfs. By
+ default all modules are set to the IWL_DL_ERR level.
+ To see the list of debug modules and levels, see iwm/debug.h
+
+ For example, if you want the full MLME debug output:
+ echo 0xff > /debug/iwm/phyN/debug/mlme
+
+ Or, if you want the full debug, for all modules:
+ echo 0xff > /debug/iwm/phyN/debug/level
+ echo 0xff > /debug/iwm/phyN/debug/modules
diff --git a/drivers/net/wireless/iwmc3200wifi/Makefile b/drivers/net/wireless/iwmc3200wifi/Makefile
new file mode 100644
index 00000000000..927f022545c
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_IWM) := iwmc3200wifi.o
+iwmc3200wifi-objs += main.o netdev.o rx.o tx.o sdio.o hal.o fw.o
+iwmc3200wifi-objs += commands.o wext.o cfg80211.o eeprom.o
+
+iwmc3200wifi-$(CONFIG_IWM_DEBUG) += debugfs.o
diff --git a/drivers/net/wireless/iwmc3200wifi/bus.h b/drivers/net/wireless/iwmc3200wifi/bus.h
new file mode 100644
index 00000000000..836663eec25
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/bus.h
@@ -0,0 +1,57 @@
+/*
+ * Intel Wireless Multicomm 3200 WiFi driver
+ *
+ * Copyright (C) 2009 Intel Corporation <ilw@linux.intel.com>
+ * Samuel Ortiz <samuel.ortiz@intel.com>
+ * Zhu Yi <yi.zhu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __IWM_BUS_H__
+#define __IWM_BUS_H__
+
+#include "iwm.h"
+
+struct iwm_if_ops {
+ int (*enable)(struct iwm_priv *iwm);
+ int (*disable)(struct iwm_priv *iwm);
+ int (*send_chunk)(struct iwm_priv *iwm, u8* buf, int count);
+
+ int (*debugfs_init)(struct iwm_priv *iwm, struct dentry *parent_dir);
+ void (*debugfs_exit)(struct iwm_priv *iwm);
+
+ const char *umac_name;
+ const char *calib_lmac_name;
+ const char *lmac_name;
+};
+
+static inline int iwm_bus_send_chunk(struct iwm_priv *iwm, u8 *buf, int count)
+{
+ return iwm->bus_ops->send_chunk(iwm, buf, count);
+}
+
+static inline int iwm_bus_enable(struct iwm_priv *iwm)
+{
+ return iwm->bus_ops->enable(iwm);
+}
+
+static inline int iwm_bus_disable(struct iwm_priv *iwm)
+{
+ return iwm->bus_ops->disable(iwm);
+}
+
+#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
new file mode 100644
index 00000000000..96f714e6e12
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
@@ -0,0 +1,409 @@
+/*
+ * Intel Wireless Multicomm 3200 WiFi driver
+ *
+ * Copyright (C) 2009 Intel Corporation <ilw@linux.intel.com>
+ * Samuel Ortiz <samuel.ortiz@intel.com>
+ * Zhu Yi <yi.zhu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <linux/ieee80211.h>
+#include <net/cfg80211.h>
+
+#include "iwm.h"
+#include "commands.h"
+#include "cfg80211.h"
+#include "debug.h"
+
+#define RATETAB_ENT(_rate, _rateid, _flags) \
+ { \
+ .bitrate = (_rate), \
+ .hw_value = (_rateid), \
+ .flags = (_flags), \
+ }
+
+#define CHAN2G(_channel, _freq, _flags) { \
+ .band = IEEE80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_channel), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define CHAN5G(_channel, _flags) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .center_freq = 5000 + (5 * (_channel)), \
+ .hw_value = (_channel), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+static struct ieee80211_rate iwm_rates[] = {
+ RATETAB_ENT(10, 0x1, 0),
+ RATETAB_ENT(20, 0x2, 0),
+ RATETAB_ENT(55, 0x4, 0),
+ RATETAB_ENT(110, 0x8, 0),
+ RATETAB_ENT(60, 0x10, 0),
+ RATETAB_ENT(90, 0x20, 0),
+ RATETAB_ENT(120, 0x40, 0),
+ RATETAB_ENT(180, 0x80, 0),
+ RATETAB_ENT(240, 0x100, 0),
+ RATETAB_ENT(360, 0x200, 0),
+ RATETAB_ENT(480, 0x400, 0),
+ RATETAB_ENT(540, 0x800, 0),
+};
+
+#define iwm_a_rates (iwm_rates + 4)
+#define iwm_a_rates_size 8
+#define iwm_g_rates (iwm_rates + 0)
+#define iwm_g_rates_size 12
+
+static struct ieee80211_channel iwm_2ghz_channels[] = {
+ CHAN2G(1, 2412, 0),
+ CHAN2G(2, 2417, 0),
+ CHAN2G(3, 2422, 0),
+ CHAN2G(4, 2427, 0),
+ CHAN2G(5, 2432, 0),
+ CHAN2G(6, 2437, 0),
+ CHAN2G(7, 2442, 0),
+ CHAN2G(8, 2447, 0),
+ CHAN2G(9, 2452, 0),
+ CHAN2G(10, 2457, 0),
+ CHAN2G(11, 2462, 0),
+ CHAN2G(12, 2467, 0),
+ CHAN2G(13, 2472, 0),
+ CHAN2G(14, 2484, 0),
+};
+
+static struct ieee80211_channel iwm_5ghz_a_channels[] = {
+ CHAN5G(34, 0), CHAN5G(36, 0),
+ CHAN5G(38, 0), CHAN5G(40, 0),
+ CHAN5G(42, 0), CHAN5G(44, 0),
+ CHAN5G(46, 0), CHAN5G(48, 0),
+ CHAN5G(52, 0), CHAN5G(56, 0),
+ CHAN5G(60, 0), CHAN5G(64, 0),
+ CHAN5G(100, 0), CHAN5G(104, 0),
+ CHAN5G(108, 0), CHAN5G(112, 0),
+ CHAN5G(116, 0), CHAN5G(120, 0),
+ CHAN5G(124, 0), CHAN5G(128, 0),
+ CHAN5G(132, 0), CHAN5G(136, 0),
+ CHAN5G(140, 0), CHAN5G(149, 0),
+ CHAN5G(153, 0), CHAN5G(157, 0),
+ CHAN5G(161, 0), CHAN5G(165, 0),
+ CHAN5G(184, 0), CHAN5G(188, 0),
+ CHAN5G(192, 0), CHAN5G(196, 0),
+ CHAN5G(200, 0), CHAN5G(204, 0),
+ CHAN5G(208, 0), CHAN5G(212, 0),
+ CHAN5G(216, 0),
+};
+
+static struct ieee80211_supported_band iwm_band_2ghz = {
+ .channels = iwm_2ghz_channels,
+ .n_channels = ARRAY_SIZE(iwm_2ghz_channels),
+ .bitrates = iwm_g_rates,
+ .n_bitrates = iwm_g_rates_size,
+};
+
+static struct ieee80211_supported_band iwm_band_5ghz = {
+ .channels = iwm_5ghz_a_channels,
+ .n_channels = ARRAY_SIZE(iwm_5ghz_a_channels),
+ .bitrates = iwm_a_rates,
+ .n_bitrates = iwm_a_rates_size,
+};
+
+int iwm_cfg80211_inform_bss(struct iwm_priv *iwm)
+{
+ struct wiphy *wiphy = iwm_to_wiphy(iwm);
+ struct iwm_bss_info *bss, *next;
+ struct iwm_umac_notif_bss_info *umac_bss;
+ struct ieee80211_mgmt *mgmt;
+ struct ieee80211_channel *channel;
+ struct ieee80211_supported_band *band;
+ s32 signal;
+ int freq;
+
+ list_for_each_entry_safe(bss, next, &iwm->bss_list, node) {
+ umac_bss = bss->bss;
+ mgmt = (struct ieee80211_mgmt *)(umac_bss->frame_buf);
+
+ if (umac_bss->band == UMAC_BAND_2GHZ)
+ band = wiphy->bands[IEEE80211_BAND_2GHZ];
+ else if (umac_bss->band == UMAC_BAND_5GHZ)
+ band = wiphy->bands[IEEE80211_BAND_5GHZ];
+ else {
+ IWM_ERR(iwm, "Invalid band: %d\n", umac_bss->band);
+ return -EINVAL;
+ }
+
+ freq = ieee80211_channel_to_frequency(umac_bss->channel);
+ channel = ieee80211_get_channel(wiphy, freq);
+ signal = umac_bss->rssi * 100;
+
+ if (!cfg80211_inform_bss_frame(wiphy, channel, mgmt,
+ le16_to_cpu(umac_bss->frame_len),
+ signal, GFP_KERNEL))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int iwm_cfg80211_change_iface(struct wiphy *wiphy, int ifindex,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params)
+{
+ struct net_device *ndev;
+ struct wireless_dev *wdev;
+ struct iwm_priv *iwm;
+ u32 old_mode;
+
+ /* we're under RTNL */
+ ndev = __dev_get_by_index(&init_net, ifindex);
+ if (!ndev)
+ return -ENODEV;
+
+ wdev = ndev->ieee80211_ptr;
+ iwm = ndev_to_iwm(ndev);
+ old_mode = iwm->conf.mode;
+
+ switch (type) {
+ case NL80211_IFTYPE_STATION:
+ iwm->conf.mode = UMAC_MODE_BSS;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ iwm->conf.mode = UMAC_MODE_IBSS;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ wdev->iftype = type;
+
+ if ((old_mode == iwm->conf.mode) || !iwm->umac_profile)
+ return 0;
+
+ iwm->umac_profile->mode = cpu_to_le32(iwm->conf.mode);
+
+ if (iwm->umac_profile_active) {
+ int ret = iwm_invalidate_mlme_profile(iwm);
+ if (ret < 0)
+ IWM_ERR(iwm, "Couldn't invalidate profile\n");
+ }
+
+ return 0;
+}
+
+static int iwm_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request)
+{
+ struct iwm_priv *iwm = ndev_to_iwm(ndev);
+ int ret;
+
+ if (!test_bit(IWM_STATUS_READY, &iwm->status)) {
+ IWM_ERR(iwm, "Scan while device is not ready\n");
+ return -EIO;
+ }
+
+ if (test_bit(IWM_STATUS_SCANNING, &iwm->status)) {
+ IWM_ERR(iwm, "Scanning already\n");
+ return -EAGAIN;
+ }
+
+ if (test_bit(IWM_STATUS_SCAN_ABORTING, &iwm->status)) {
+ IWM_ERR(iwm, "Scanning being aborted\n");
+ return -EAGAIN;
+ }
+
+ set_bit(IWM_STATUS_SCANNING, &iwm->status);
+
+ ret = iwm_scan_ssids(iwm, request->ssids, request->n_ssids);
+ if (ret) {
+ clear_bit(IWM_STATUS_SCANNING, &iwm->status);
+ return ret;
+ }
+
+ iwm->scan_request = request;
+ return 0;
+}
+
+static int iwm_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+{
+ struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
+
+ if (changed & WIPHY_PARAM_RTS_THRESHOLD &&
+ (iwm->conf.rts_threshold != wiphy->rts_threshold)) {
+ int ret;
+
+ iwm->conf.rts_threshold = wiphy->rts_threshold;
+
+ ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
+ CFG_RTS_THRESHOLD,
+ iwm->conf.rts_threshold);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (changed & WIPHY_PARAM_FRAG_THRESHOLD &&
+ (iwm->conf.frag_threshold != wiphy->frag_threshold)) {
+ int ret;
+
+ iwm->conf.frag_threshold = wiphy->frag_threshold;
+
+ ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_FA_CFG_FIX,
+ CFG_FRAG_THRESHOLD,
+ iwm->conf.frag_threshold);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int iwm_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_ibss_params *params)
+{
+ struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
+ struct ieee80211_channel *chan = params->channel;
+ struct cfg80211_bss *bss;
+
+ if (!test_bit(IWM_STATUS_READY, &iwm->status))
+ return -EIO;
+
+ /* UMAC doesn't support creating IBSS network with specified bssid.
+ * This should be removed after we have join only mode supported. */
+ if (params->bssid)
+ return -EOPNOTSUPP;
+
+ bss = cfg80211_get_ibss(iwm_to_wiphy(iwm), NULL,
+ params->ssid, params->ssid_len);
+ if (!bss) {
+ iwm_scan_one_ssid(iwm, params->ssid, params->ssid_len);
+ schedule_timeout_interruptible(2 * HZ);
+ bss = cfg80211_get_ibss(iwm_to_wiphy(iwm), NULL,
+ params->ssid, params->ssid_len);
+ }
+ /* IBSS join only mode is not supported by UMAC ATM */
+ if (bss) {
+ cfg80211_put_bss(bss);
+ return -EOPNOTSUPP;
+ }
+
+ iwm->channel = ieee80211_frequency_to_channel(chan->center_freq);
+ iwm->umac_profile->ibss.band = chan->band;
+ iwm->umac_profile->ibss.channel = iwm->channel;
+ iwm->umac_profile->ssid.ssid_len = params->ssid_len;
+ memcpy(iwm->umac_profile->ssid.ssid, params->ssid, params->ssid_len);
+
+ if (params->bssid)
+ memcpy(&iwm->umac_profile->bssid[0], params->bssid, ETH_ALEN);
+
+ return iwm_send_mlme_profile(iwm);
+}
+
+static int iwm_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
+{
+ struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
+
+ if (iwm->umac_profile_active)
+ return iwm_invalidate_mlme_profile(iwm);
+
+ return 0;
+}
+
+static struct cfg80211_ops iwm_cfg80211_ops = {
+ .change_virtual_intf = iwm_cfg80211_change_iface,
+ .scan = iwm_cfg80211_scan,
+ .set_wiphy_params = iwm_cfg80211_set_wiphy_params,
+ .join_ibss = iwm_cfg80211_join_ibss,
+ .leave_ibss = iwm_cfg80211_leave_ibss,
+};
+
+struct wireless_dev *iwm_wdev_alloc(int sizeof_bus, struct device *dev)
+{
+ int ret = 0;
+ struct wireless_dev *wdev;
+
+ /*
+ * We're trying to have the following memory
+ * layout:
+ *
+ * +-------------------------+
+ * | struct wiphy |
+ * +-------------------------+
+ * | struct iwm_priv |
+ * +-------------------------+
+ * | bus private data |
+ * | (e.g. iwm_priv_sdio) |
+ * +-------------------------+
+ *
+ */
+
+ wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
+ if (!wdev) {
+ dev_err(dev, "Couldn't allocate wireless device\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ wdev->wiphy = wiphy_new(&iwm_cfg80211_ops,
+ sizeof(struct iwm_priv) + sizeof_bus);
+ if (!wdev->wiphy) {
+ dev_err(dev, "Couldn't allocate wiphy device\n");
+ ret = -ENOMEM;
+ goto out_err_new;
+ }
+
+ set_wiphy_dev(wdev->wiphy, dev);
+ wdev->wiphy->max_scan_ssids = UMAC_WIFI_IF_PROBE_OPTION_MAX;
+ wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC);
+ wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &iwm_band_2ghz;
+ wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &iwm_band_5ghz;
+ wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+
+ ret = wiphy_register(wdev->wiphy);
+ if (ret < 0) {
+ dev_err(dev, "Couldn't register wiphy device\n");
+ goto out_err_register;
+ }
+
+ return wdev;
+
+ out_err_register:
+ wiphy_free(wdev->wiphy);
+
+ out_err_new:
+ kfree(wdev);
+
+ return ERR_PTR(ret);
+}
+
+void iwm_wdev_free(struct iwm_priv *iwm)
+{
+ struct wireless_dev *wdev = iwm_to_wdev(iwm);
+
+ if (!wdev)
+ return;
+
+ wiphy_unregister(wdev->wiphy);
+ wiphy_free(wdev->wiphy);
+ kfree(wdev);
+}
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.h b/drivers/net/wireless/iwmc3200wifi/cfg80211.h
new file mode 100644
index 00000000000..56a34145acb
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.h
@@ -0,0 +1,31 @@
+/*
+ * Intel Wireless Multicomm 3200 WiFi driver
+ *
+ * Copyright (C) 2009 Intel Corporation <ilw@linux.intel.com>
+ * Samuel Ortiz <samuel.ortiz@intel.com>
+ * Zhu Yi <yi.zhu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __IWM_CFG80211_H__
+#define __IWM_CFG80211_H__
+
+int iwm_cfg80211_inform_bss(struct iwm_priv *iwm);
+struct wireless_dev *iwm_wdev_alloc(int sizeof_bus, struct device *dev);
+void iwm_wdev_free(struct iwm_priv *iwm);
+
+#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c
new file mode 100644
index 00000000000..834a7f544e5
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/commands.c
@@ -0,0 +1,920 @@
+/*
+ * Intel Wireless Multicomm 3200 WiFi driver
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <ilw@linux.intel.com>
+ * Samuel Ortiz <samuel.ortiz@intel.com>
+ * Zhu Yi <yi.zhu@intel.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/wireless.h>
+#include <linux/etherdevice.h>
+#include <linux/ieee80211.h>
+
+#include "iwm.h"
+#include "bus.h"
+#include "hal.h"
+#include "umac.h"
+#include "commands.h"
+#include "debug.h"
+
+static int iwm_send_lmac_ptrough_cmd(struct iwm_priv *iwm,
+ u8 lmac_cmd_id,
+ const void *lmac_payload,
+ u16 lmac_payload_size,
+ u8 resp)
+{
+ struct iwm_udma_wifi_cmd udma_cmd = UDMA_LMAC_INIT;
+ struct iwm_umac_cmd umac_cmd;
+ struct iwm_lmac_cmd lmac_cmd;
+
+ lmac_cmd.id = lmac_cmd_id;
+
+ umac_cmd.id = UMAC_CMD_OPCODE_WIFI_PASS_THROUGH;
+ umac_cmd.resp = resp;
+
+ return iwm_hal_send_host_cmd(iwm, &udma_cmd, &umac_cmd, &lmac_cmd,
+ lmac_payload, lmac_payload_size);
+}
+
+int iwm_send_wifi_if_cmd(struct iwm_priv *iwm, void *payload, u16 payload_size,
+ bool resp)
+{
+ struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
+ struct iwm_umac_cmd umac_cmd;
+
+ umac_cmd.id = UMAC_CMD_OPCODE_WIFI_IF_WRAPPER;
+ umac_cmd.resp = resp;
+
+ return iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd,
+ payload, payload_size);
+}
+
+static struct coex_event iwm_sta_xor_prio_tbl[COEX_EVENTS_NUM] =
+{
+ {4, 3, 0, COEX_UNASSOC_IDLE_FLAGS},
+ {4, 3, 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
+ {4, 3, 0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
+ {4, 3, 0, COEX_CALIBRATION_FLAGS},
+ {4, 3, 0, COEX_PERIODIC_CALIBRATION_FLAGS},
+ {4, 3, 0, COEX_CONNECTION_ESTAB_FLAGS},
+ {4, 3, 0, COEX_ASSOCIATED_IDLE_FLAGS},
+ {4, 3, 0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
+ {4, 3, 0, COEX_ASSOC_AUTO_SCAN_FLAGS},
+ {4, 3, 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
+ {6, 3, 0, COEX_XOR_RF_ON_FLAGS},
+ {4, 3, 0, COEX_RF_OFF_FLAGS},
+ {6, 6, 0, COEX_STAND_ALONE_DEBUG_FLAGS},
+ {4, 3, 0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
+ {4, 3, 0, COEX_RSRVD1_FLAGS},
+ {4, 3, 0, COEX_RSRVD2_FLAGS}
+};
+
+static struct coex_event iwm_sta_cm_prio_tbl[COEX_EVENTS_NUM] =
+{
+ {1, 1, 0, COEX_UNASSOC_IDLE_FLAGS},
+ {4, 3, 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
+ {3, 3, 0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
+ {5, 5, 0, COEX_CALIBRATION_FLAGS},
+ {4, 4, 0, COEX_PERIODIC_CALIBRATION_FLAGS},
+ {5, 4, 0, COEX_CONNECTION_ESTAB_FLAGS},
+ {4, 4, 0, COEX_ASSOCIATED_IDLE_FLAGS},
+ {4, 4, 0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
+ {4, 4, 0, COEX_ASSOC_AUTO_SCAN_FLAGS},
+ {4, 4, 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
+ {1, 1, 0, COEX_RF_ON_FLAGS},
+ {1, 1, 0, COEX_RF_OFF_FLAGS},
+ {6, 6, 0, COEX_STAND_ALONE_DEBUG_FLAGS},
+ {5, 4, 0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
+ {1, 1, 0, COEX_RSRVD1_FLAGS},
+ {1, 1, 0, COEX_RSRVD2_FLAGS}
+};
+
+int iwm_send_prio_table(struct iwm_priv *iwm)
+{
+ struct iwm_coex_prio_table_cmd coex_table_cmd;
+ u32 coex_enabled, mode_enabled;
+
+ memset(&coex_table_cmd, 0, sizeof(struct iwm_coex_prio_table_cmd));
+
+ coex_table_cmd.flags = COEX_FLAGS_STA_TABLE_VALID_MSK;
+
+ switch (iwm->conf.coexist_mode) {
+ case COEX_MODE_XOR:
+ case COEX_MODE_CM:
+ coex_enabled = 1;
+ break;
+ default:
+ coex_enabled = 0;
+ break;
+ }
+
+ switch (iwm->conf.mode) {
+ case UMAC_MODE_BSS:
+ case UMAC_MODE_IBSS:
+ mode_enabled = 1;
+ break;
+ default:
+ mode_enabled = 0;
+ break;
+ }
+
+ if (coex_enabled && mode_enabled) {
+ coex_table_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK |
+ COEX_FLAGS_ASSOC_WAKEUP_UMASK_MSK |
+ COEX_FLAGS_UNASSOC_WAKEUP_UMASK_MSK;
+
+ switch (iwm->conf.coexist_mode) {
+ case COEX_MODE_XOR:
+ memcpy(coex_table_cmd.sta_prio, iwm_sta_xor_prio_tbl,
+ sizeof(iwm_sta_xor_prio_tbl));
+ break;
+ case COEX_MODE_CM:
+ memcpy(coex_table_cmd.sta_prio, iwm_sta_cm_prio_tbl,
+ sizeof(iwm_sta_cm_prio_tbl));
+ break;
+ default:
+ IWM_ERR(iwm, "Invalid coex_mode 0x%x\n",
+ iwm->conf.coexist_mode);
+ break;
+ }
+ } else
+ IWM_WARN(iwm, "coexistense disabled\n");
+
+ return iwm_send_lmac_ptrough_cmd(iwm, COEX_PRIORITY_TABLE_CMD,
+ &coex_table_cmd,
+ sizeof(struct iwm_coex_prio_table_cmd), 1);
+}
+
+int iwm_send_init_calib_cfg(struct iwm_priv *iwm, u8 calib_requested)
+{
+ struct iwm_lmac_cal_cfg_cmd cal_cfg_cmd;
+
+ memset(&cal_cfg_cmd, 0, sizeof(struct iwm_lmac_cal_cfg_cmd));
+
+ cal_cfg_cmd.ucode_cfg.init.enable = cpu_to_le32(calib_requested);
+ cal_cfg_cmd.ucode_cfg.init.start = cpu_to_le32(calib_requested);
+ cal_cfg_cmd.ucode_cfg.init.send_res = cpu_to_le32(calib_requested);
+ cal_cfg_cmd.ucode_cfg.flags =
+ cpu_to_le32(CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_AFTER_MSK);
+
+ return iwm_send_lmac_ptrough_cmd(iwm, CALIBRATION_CFG_CMD, &cal_cfg_cmd,
+ sizeof(struct iwm_lmac_cal_cfg_cmd), 1);
+}
+
+int iwm_send_periodic_calib_cfg(struct iwm_priv *iwm, u8 calib_requested)
+{
+ struct iwm_lmac_cal_cfg_cmd cal_cfg_cmd;
+
+ memset(&cal_cfg_cmd, 0, sizeof(struct iwm_lmac_cal_cfg_cmd));
+
+ cal_cfg_cmd.ucode_cfg.periodic.enable = cpu_to_le32(calib_requested);
+ cal_cfg_cmd.ucode_cfg.periodic.start = cpu_to_le32(calib_requested);
+
+ return iwm_send_lmac_ptrough_cmd(iwm, CALIBRATION_CFG_CMD, &cal_cfg_cmd,
+ sizeof(struct iwm_lmac_cal_cfg_cmd), 0);
+}
+
+int iwm_store_rxiq_calib_result(struct iwm_priv *iwm)
+{
+ struct iwm_calib_rxiq *rxiq;
+ u8 *eeprom_rxiq = iwm_eeprom_access(iwm, IWM_EEPROM_CALIB_RXIQ);
+ int grplen = sizeof(struct iwm_calib_rxiq_group);
+
+ rxiq = kzalloc(sizeof(struct iwm_calib_rxiq), GFP_KERNEL);
+ if (!rxiq) {
+ IWM_ERR(iwm, "Couldn't alloc memory for RX IQ\n");
+ return -ENOMEM;
+ }
+
+ eeprom_rxiq = iwm_eeprom_access(iwm, IWM_EEPROM_CALIB_RXIQ);
+ if (IS_ERR(eeprom_rxiq)) {
+ IWM_ERR(iwm, "Couldn't access EEPROM RX IQ entry\n");
+ return PTR_ERR(eeprom_rxiq);
+ }
+
+ iwm->calib_res[SHILOH_PHY_CALIBRATE_RX_IQ_CMD].buf = (u8 *)rxiq;
+ iwm->calib_res[SHILOH_PHY_CALIBRATE_RX_IQ_CMD].size = sizeof(*rxiq);
+
+ rxiq->hdr.opcode = SHILOH_PHY_CALIBRATE_RX_IQ_CMD;
+ rxiq->hdr.first_grp = 0;
+ rxiq->hdr.grp_num = 1;
+ rxiq->hdr.all_data_valid = 1;
+
+ memcpy(&rxiq->group[0], eeprom_rxiq, 4 * grplen);
+ memcpy(&rxiq->group[4], eeprom_rxiq + 6 * grplen, grplen);
+
+ return 0;
+}
+
+int iwm_send_calib_results(struct iwm_priv *iwm)
+{
+ int i, ret = 0;
+
+ for (i = PHY_CALIBRATE_OPCODES_NUM; i < CALIBRATION_CMD_NUM; i++) {
+ if (test_bit(i - PHY_CALIBRATE_OPCODES_NUM,
+ &iwm->calib_done_map)) {
+ IWM_DBG_CMD(iwm, DBG,
+ "Send calibration %d result\n", i);
+ ret |= iwm_send_lmac_ptrough_cmd(iwm,
+ REPLY_PHY_CALIBRATION_CMD,
+ iwm->calib_res[i].buf,
+ iwm->calib_res[i].size, 0);
+
+ kfree(iwm->calib_res[i].buf);
+ iwm->calib_res[i].buf = NULL;
+ iwm->calib_res[i].size = 0;
+ }
+ }
+
+ return ret;
+}
+
+int iwm_send_umac_reset(struct iwm_priv *iwm, __le32 reset_flags, bool resp)
+{
+ struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
+ struct iwm_umac_cmd umac_cmd;
+ struct iwm_umac_cmd_reset reset;
+
+ reset.flags = reset_flags;
+
+ umac_cmd.id = UMAC_CMD_OPCODE_RESET;
+ umac_cmd.resp = resp;
+
+ return iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd, &reset,
+ sizeof(struct iwm_umac_cmd_reset));
+}
+
+int iwm_umac_set_config_fix(struct iwm_priv *iwm, u16 tbl, u16 key, u32 value)
+{
+ struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
+ struct iwm_umac_cmd umac_cmd;
+ struct iwm_umac_cmd_set_param_fix param;
+
+ if ((tbl != UMAC_PARAM_TBL_CFG_FIX) &&
+ (tbl != UMAC_PARAM_TBL_FA_CFG_FIX))
+ return -EINVAL;
+
+ umac_cmd.id = UMAC_CMD_OPCODE_SET_PARAM_FIX;
+ umac_cmd.resp = 0;
+
+ param.tbl = cpu_to_le16(tbl);
+ param.key = cpu_to_le16(key);
+ param.value = cpu_to_le32(value);
+
+ return iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd, &param,
+ sizeof(struct iwm_umac_cmd_set_param_fix));
+}
+
+int iwm_umac_set_config_var(struct iwm_priv *iwm, u16 key,
+ void *payload, u16 payload_size)
+{
+ struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
+ struct iwm_umac_cmd umac_cmd;
+ struct iwm_umac_cmd_set_param_var *param_hdr;
+ u8 *param;
+ int ret;
+
+ param = kzalloc(payload_size +
+ sizeof(struct iwm_umac_cmd_set_param_var), GFP_KERNEL);
+ if (!param) {
+ IWM_ERR(iwm, "Couldn't allocate param\n");
+ return -ENOMEM;
+ }
+
+ param_hdr = (struct iwm_umac_cmd_set_param_var *)param;
+
+ umac_cmd.id = UMAC_CMD_OPCODE_SET_PARAM_VAR;
+ umac_cmd.resp = 0;
+
+ param_hdr->tbl = cpu_to_le16(UMAC_PARAM_TBL_CFG_VAR);
+ param_hdr->key = cpu_to_le16(key);
+ param_hdr->len = cpu_to_le16(payload_size);
+ memcpy(param + sizeof(struct iwm_umac_cmd_set_param_var),
+ payload, payload_size);
+
+ ret = iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd, param,
+ sizeof(struct iwm_umac_cmd_set_param_var) +
+ payload_size);
+ kfree(param);
+
+ return ret;
+}
+
+int iwm_send_umac_config(struct iwm_priv *iwm,
+ __le32 reset_flags)
+{
+ int ret;
+
+ /* Use UMAC default values */
+ ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
+ CFG_POWER_INDEX, iwm->conf.power_index);
+ if (ret < 0)
+ return ret;
+
+ ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_FA_CFG_FIX,
+ CFG_FRAG_THRESHOLD,
+ iwm->conf.frag_threshold);
+ if (ret < 0)
+ return ret;
+
+ ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
+ CFG_RTS_THRESHOLD,
+ iwm->conf.rts_threshold);
+ if (ret < 0)
+ return ret;
+
+ ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
+ CFG_CTS_TO_SELF, iwm->conf.cts_to_self);
+ if (ret < 0)
+ return ret;
+
+ ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
+ CFG_COEX_MODE, iwm->conf.coexist_mode);
+ if (ret < 0)
+ return ret;
+
+ /*
+ ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
+ CFG_ASSOCIATION_TIMEOUT,
+ iwm->conf.assoc_timeout);
+ if (ret < 0)
+ return ret;
+
+ ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
+ CFG_ROAM_TIMEOUT,
+ iwm->conf.roam_timeout);
+ if (ret < 0)
+ return ret;
+
+ ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
+ CFG_WIRELESS_MODE,
+ WIRELESS_MODE_11A | WIRELESS_MODE_11G);
+ if (ret < 0)
+ return ret;
+ */
+
+ ret = iwm_umac_set_config_var(iwm, CFG_NET_ADDR,
+ iwm_to_ndev(iwm)->dev_addr, ETH_ALEN);
+ if (ret < 0)
+ return ret;
+
+ /* UMAC PM static configurations */
+ ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
+ CFG_PM_LEGACY_RX_TIMEOUT, 0x12C);
+ if (ret < 0)
+ return ret;
+
+ ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
+ CFG_PM_LEGACY_TX_TIMEOUT, 0x15E);
+ if (ret < 0)
+ return ret;
+
+ ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
+ CFG_PM_CTRL_FLAGS, 0x30001);
+ if (ret < 0)
+ return ret;
+
+ ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
+ CFG_PM_KEEP_ALIVE_IN_BEACONS, 0x80);
+ if (ret < 0)
+ return ret;
+
+ /* reset UMAC */
+ ret = iwm_send_umac_reset(iwm, reset_flags, 1);
+ if (ret < 0)
+ return ret;
+
+ ret = iwm_notif_handle(iwm, UMAC_CMD_OPCODE_RESET, IWM_SRC_UMAC,
+ WAIT_NOTIF_TIMEOUT);
+ if (ret) {
+ IWM_ERR(iwm, "Wait for UMAC RESET timeout\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+int iwm_send_packet(struct iwm_priv *iwm, struct sk_buff *skb, int pool_id)
+{
+ struct iwm_udma_wifi_cmd udma_cmd;
+ struct iwm_umac_cmd umac_cmd;
+ struct iwm_tx_info *tx_info = skb_to_tx_info(skb);
+
+ udma_cmd.eop = 1; /* always set eop for non-concatenated Tx */
+ udma_cmd.credit_group = pool_id;
+ udma_cmd.ra_tid = tx_info->sta << 4 | tx_info->tid;
+ udma_cmd.lmac_offset = 0;
+
+ umac_cmd.id = REPLY_TX;
+ umac_cmd.color = tx_info->color;
+ umac_cmd.resp = 0;
+
+ return iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd,
+ skb->data, skb->len);
+}
+
+static int iwm_target_read(struct iwm_priv *iwm, __le32 address,
+ u8 *response, u32 resp_size)
+{
+ struct iwm_udma_nonwifi_cmd target_cmd;
+ struct iwm_nonwifi_cmd *cmd;
+ u16 seq_num;
+ int ret = 0;
+
+ target_cmd.opcode = UMAC_HDI_OUT_OPCODE_READ;
+ target_cmd.addr = address;
+ target_cmd.op1_sz = cpu_to_le32(resp_size);
+ target_cmd.op2 = 0;
+ target_cmd.handle_by_hw = 0;
+ target_cmd.resp = 1;
+ target_cmd.eop = 1;
+
+ ret = iwm_hal_send_target_cmd(iwm, &target_cmd, NULL);
+ if (ret < 0)
+ IWM_ERR(iwm, "Couldn't send READ command\n");
+
+ /* When succeding, the send_target routine returns the seq number */
+ seq_num = ret;
+
+ ret = wait_event_interruptible_timeout(iwm->nonwifi_queue,
+ (cmd = iwm_get_pending_nonwifi_cmd(iwm, seq_num,
+ UMAC_HDI_OUT_OPCODE_READ)) != NULL,
+ 2 * HZ);
+
+ if (!ret) {
+ IWM_ERR(iwm, "Didn't receive a target READ answer\n");
+ return ret;
+ }
+
+ memcpy(response, cmd->buf.hdr + sizeof(struct iwm_udma_in_hdr),
+ resp_size);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+int iwm_read_mac(struct iwm_priv *iwm, u8 *mac)
+{
+ int ret;
+ u8 mac_align[ALIGN(ETH_ALEN, 8)];
+
+ ret = iwm_target_read(iwm, cpu_to_le32(WICO_MAC_ADDRESS_ADDR),
+ mac_align, sizeof(mac_align));
+ if (ret < 0)
+ return ret;
+
+ if (is_valid_ether_addr(mac_align))
+ memcpy(mac, mac_align, ETH_ALEN);
+ else {
+ IWM_ERR(iwm, "Invalid EEPROM MAC\n");
+ memcpy(mac, iwm->conf.mac_addr, ETH_ALEN);
+ get_random_bytes(&mac[3], 3);
+ }
+
+ return 0;
+}
+
+int iwm_set_tx_key(struct iwm_priv *iwm, u8 key_idx)
+{
+ struct iwm_umac_tx_key_id tx_key_id;
+
+ if (!iwm->default_key || !iwm->default_key->in_use)
+ return -EINVAL;
+
+ tx_key_id.hdr.oid = UMAC_WIFI_IF_CMD_GLOBAL_TX_KEY_ID;
+ tx_key_id.hdr.buf_size = cpu_to_le16(sizeof(struct iwm_umac_tx_key_id) -
+ sizeof(struct iwm_umac_wifi_if));
+
+ tx_key_id.key_idx = key_idx;
+
+ return iwm_send_wifi_if_cmd(iwm, &tx_key_id, sizeof(tx_key_id), 1);
+}
+
+static int iwm_check_profile(struct iwm_priv *iwm)
+{
+ if (!iwm->umac_profile_active)
+ return -EAGAIN;
+
+ if (iwm->umac_profile->sec.ucast_cipher != UMAC_CIPHER_TYPE_WEP_40 &&
+ iwm->umac_profile->sec.ucast_cipher != UMAC_CIPHER_TYPE_WEP_104 &&
+ iwm->umac_profile->sec.ucast_cipher != UMAC_CIPHER_TYPE_TKIP &&
+ iwm->umac_profile->sec.ucast_cipher != UMAC_CIPHER_TYPE_CCMP) {
+ IWM_ERR(iwm, "Wrong unicast cipher: 0x%x\n",
+ iwm->umac_profile->sec.ucast_cipher);
+ return -EAGAIN;
+ }
+
+ if (iwm->umac_profile->sec.mcast_cipher != UMAC_CIPHER_TYPE_WEP_40 &&
+ iwm->umac_profile->sec.mcast_cipher != UMAC_CIPHER_TYPE_WEP_104 &&
+ iwm->umac_profile->sec.mcast_cipher != UMAC_CIPHER_TYPE_TKIP &&
+ iwm->umac_profile->sec.mcast_cipher != UMAC_CIPHER_TYPE_CCMP) {
+ IWM_ERR(iwm, "Wrong multicast cipher: 0x%x\n",
+ iwm->umac_profile->sec.mcast_cipher);
+ return -EAGAIN;
+ }
+
+ if ((iwm->umac_profile->sec.ucast_cipher == UMAC_CIPHER_TYPE_WEP_40 ||
+ iwm->umac_profile->sec.ucast_cipher == UMAC_CIPHER_TYPE_WEP_104) &&
+ (iwm->umac_profile->sec.ucast_cipher !=
+ iwm->umac_profile->sec.mcast_cipher)) {
+ IWM_ERR(iwm, "Unicast and multicast ciphers differ for WEP\n");
+ }
+
+ return 0;
+}
+
+int iwm_set_key(struct iwm_priv *iwm, bool remove, bool set_tx_key,
+ struct iwm_key *key)
+{
+ int ret;
+ u8 cmd[64], *sta_addr, *key_data, key_len;
+ s8 key_idx;
+ u16 cmd_size = 0;
+ struct iwm_umac_key_hdr *key_hdr = &key->hdr;
+ struct iwm_umac_key_wep40 *wep40 = (struct iwm_umac_key_wep40 *)cmd;
+ struct iwm_umac_key_wep104 *wep104 = (struct iwm_umac_key_wep104 *)cmd;
+ struct iwm_umac_key_tkip *tkip = (struct iwm_umac_key_tkip *)cmd;
+ struct iwm_umac_key_ccmp *ccmp = (struct iwm_umac_key_ccmp *)cmd;
+
+ if (set_tx_key)
+ iwm->default_key = key;
+
+ /*
+ * We check if our current profile is valid.
+ * If not, we dont push the key, we just cache them,
+ * so that with the next siwsessid call, the keys
+ * will be actually pushed.
+ */
+ if (!remove) {
+ ret = iwm_check_profile(iwm);
+ if (ret < 0)
+ return ret;
+ }
+
+ sta_addr = key->hdr.mac;
+ key_data = key->key;
+ key_len = key->key_len;
+ key_idx = key->hdr.key_idx;
+
+ if (!remove) {
+ IWM_DBG_WEXT(iwm, DBG, "key_idx:%d set tx key:%d\n",
+ key_idx, set_tx_key);
+ IWM_DBG_WEXT(iwm, DBG, "key_len:%d\n", key_len);
+ IWM_DBG_WEXT(iwm, DBG, "MAC:%pM, idx:%d, multicast:%d\n",
+ key_hdr->mac, key_hdr->key_idx, key_hdr->multicast);
+
+ IWM_DBG_WEXT(iwm, DBG, "profile: mcast:0x%x, ucast:0x%x\n",
+ iwm->umac_profile->sec.mcast_cipher,
+ iwm->umac_profile->sec.ucast_cipher);
+ IWM_DBG_WEXT(iwm, DBG, "profile: auth_type:0x%x, flags:0x%x\n",
+ iwm->umac_profile->sec.auth_type,
+ iwm->umac_profile->sec.flags);
+
+ switch (key->alg) {
+ case UMAC_CIPHER_TYPE_WEP_40:
+ wep40->hdr.oid = UMAC_WIFI_IF_CMD_ADD_WEP40_KEY;
+ wep40->hdr.buf_size =
+ cpu_to_le16(sizeof(struct iwm_umac_key_wep40) -
+ sizeof(struct iwm_umac_wifi_if));
+
+ memcpy(&wep40->key_hdr, key_hdr,
+ sizeof(struct iwm_umac_key_hdr));
+ memcpy(wep40->key, key_data, key_len);
+ wep40->static_key = 1;
+
+ cmd_size = sizeof(struct iwm_umac_key_wep40);
+ break;
+
+ case UMAC_CIPHER_TYPE_WEP_104:
+ wep104->hdr.oid = UMAC_WIFI_IF_CMD_ADD_WEP104_KEY;
+ wep104->hdr.buf_size =
+ cpu_to_le16(sizeof(struct iwm_umac_key_wep104) -
+ sizeof(struct iwm_umac_wifi_if));
+
+ memcpy(&wep104->key_hdr, key_hdr,
+ sizeof(struct iwm_umac_key_hdr));
+ memcpy(wep104->key, key_data, key_len);
+ wep104->static_key = 1;
+
+ cmd_size = sizeof(struct iwm_umac_key_wep104);
+ break;
+
+ case UMAC_CIPHER_TYPE_CCMP:
+ key_hdr->key_idx++;
+ ccmp->hdr.oid = UMAC_WIFI_IF_CMD_ADD_CCMP_KEY;
+ ccmp->hdr.buf_size =
+ cpu_to_le16(sizeof(struct iwm_umac_key_ccmp) -
+ sizeof(struct iwm_umac_wifi_if));
+
+ memcpy(&ccmp->key_hdr, key_hdr,
+ sizeof(struct iwm_umac_key_hdr));
+
+ memcpy(ccmp->key, key_data, key_len);
+
+ if (key->flags & IW_ENCODE_EXT_RX_SEQ_VALID)
+ memcpy(ccmp->iv_count, key->rx_seq, 6);
+
+ cmd_size = sizeof(struct iwm_umac_key_ccmp);
+ break;
+
+ case UMAC_CIPHER_TYPE_TKIP:
+ key_hdr->key_idx++;
+ tkip->hdr.oid = UMAC_WIFI_IF_CMD_ADD_TKIP_KEY;
+ tkip->hdr.buf_size =
+ cpu_to_le16(sizeof(struct iwm_umac_key_tkip) -
+ sizeof(struct iwm_umac_wifi_if));
+
+ memcpy(&tkip->key_hdr, key_hdr,
+ sizeof(struct iwm_umac_key_hdr));
+
+ memcpy(tkip->tkip_key, key_data, IWM_TKIP_KEY_SIZE);
+ memcpy(tkip->mic_tx_key, key_data + IWM_TKIP_KEY_SIZE,
+ IWM_TKIP_MIC_SIZE);
+ memcpy(tkip->mic_rx_key,
+ key_data + IWM_TKIP_KEY_SIZE + IWM_TKIP_MIC_SIZE,
+ IWM_TKIP_MIC_SIZE);
+
+ if (key->flags & IW_ENCODE_EXT_RX_SEQ_VALID)
+ memcpy(ccmp->iv_count, key->rx_seq, 6);
+
+ cmd_size = sizeof(struct iwm_umac_key_tkip);
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ if ((key->alg == UMAC_CIPHER_TYPE_CCMP) ||
+ (key->alg == UMAC_CIPHER_TYPE_TKIP))
+ /*
+ * UGLY_UGLY_UGLY
+ * Copied HACK from the MWG driver.
+ * Without it, the key is set before the second
+ * EAPOL frame is sent, and the latter is thus
+ * encrypted.
+ */
+ schedule_timeout_interruptible(usecs_to_jiffies(300));
+
+ ret = iwm_send_wifi_if_cmd(iwm, cmd, cmd_size, 1);
+ if (ret < 0)
+ goto err;
+
+ /*
+ * We need a default key only if it is set and
+ * if we're doing WEP.
+ */
+ if (iwm->default_key == key &&
+ ((key->alg == UMAC_CIPHER_TYPE_WEP_40) ||
+ (key->alg == UMAC_CIPHER_TYPE_WEP_104))) {
+ ret = iwm_set_tx_key(iwm, key_idx);
+ if (ret < 0)
+ goto err;
+ }
+ } else {
+ struct iwm_umac_key_remove key_remove;
+
+ key_remove.hdr.oid = UMAC_WIFI_IF_CMD_REMOVE_KEY;
+ key_remove.hdr.buf_size =
+ cpu_to_le16(sizeof(struct iwm_umac_key_remove) -
+ sizeof(struct iwm_umac_wifi_if));
+ memcpy(&key_remove.key_hdr, key_hdr,
+ sizeof(struct iwm_umac_key_hdr));
+
+ ret = iwm_send_wifi_if_cmd(iwm, &key_remove,
+ sizeof(struct iwm_umac_key_remove),
+ 1);
+ if (ret < 0)
+ return ret;
+
+ iwm->keys[key_idx].in_use = 0;
+ }
+
+ return 0;
+
+ err:
+ kfree(key);
+ return ret;
+}
+
+
+int iwm_send_mlme_profile(struct iwm_priv *iwm)
+{
+ int ret, i;
+ struct iwm_umac_profile profile;
+
+ memcpy(&profile, iwm->umac_profile, sizeof(profile));
+
+ profile.hdr.oid = UMAC_WIFI_IF_CMD_SET_PROFILE;
+ profile.hdr.buf_size = cpu_to_le16(sizeof(struct iwm_umac_profile) -
+ sizeof(struct iwm_umac_wifi_if));
+
+ ret = iwm_send_wifi_if_cmd(iwm, &profile, sizeof(profile), 1);
+ if (ret < 0) {
+ IWM_ERR(iwm, "Send profile command failed\n");
+ return ret;
+ }
+
+ /* Wait for the profile to be active */
+ ret = wait_event_interruptible_timeout(iwm->mlme_queue,
+ iwm->umac_profile_active == 1,
+ 3 * HZ);
+ if (!ret)
+ return -EBUSY;
+
+
+ for (i = 0; i < IWM_NUM_KEYS; i++)
+ if (iwm->keys[i].in_use) {
+ int default_key = 0;
+ struct iwm_key *key = &iwm->keys[i];
+
+ if (key == iwm->default_key)
+ default_key = 1;
+
+ /* Wait for the profile before sending the keys */
+ wait_event_interruptible_timeout(iwm->mlme_queue,
+ (test_bit(IWM_STATUS_ASSOCIATING, &iwm->status) ||
+ test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)),
+ 3 * HZ);
+
+ ret = iwm_set_key(iwm, 0, default_key, key);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+int iwm_invalidate_mlme_profile(struct iwm_priv *iwm)
+{
+ int ret;
+ struct iwm_umac_invalidate_profile invalid;
+
+ invalid.hdr.oid = UMAC_WIFI_IF_CMD_INVALIDATE_PROFILE;
+ invalid.hdr.buf_size =
+ cpu_to_le16(sizeof(struct iwm_umac_invalidate_profile) -
+ sizeof(struct iwm_umac_wifi_if));
+
+ invalid.reason = WLAN_REASON_UNSPECIFIED;
+
+ ret = iwm_send_wifi_if_cmd(iwm, &invalid, sizeof(invalid), 1);
+ if (ret < 0)
+ return ret;
+
+ ret = wait_event_interruptible_timeout(iwm->mlme_queue,
+ (iwm->umac_profile_active == 0),
+ 2 * HZ);
+ if (!ret)
+ return -EBUSY;
+
+ return 0;
+}
+
+int iwm_send_umac_stats_req(struct iwm_priv *iwm, u32 flags)
+{
+ struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
+ struct iwm_umac_cmd umac_cmd;
+ struct iwm_umac_cmd_stats_req stats_req;
+
+ stats_req.flags = cpu_to_le32(flags);
+
+ umac_cmd.id = UMAC_CMD_OPCODE_STATISTIC_REQUEST;
+ umac_cmd.resp = 0;
+
+ return iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd, &stats_req,
+ sizeof(struct iwm_umac_cmd_stats_req));
+}
+
+int iwm_send_umac_channel_list(struct iwm_priv *iwm)
+{
+ struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
+ struct iwm_umac_cmd umac_cmd;
+ struct iwm_umac_cmd_get_channel_list *ch_list;
+ int size = sizeof(struct iwm_umac_cmd_get_channel_list) +
+ sizeof(struct iwm_umac_channel_info) * 4;
+ int ret;
+
+ ch_list = kzalloc(size, GFP_KERNEL);
+ if (!ch_list) {
+ IWM_ERR(iwm, "Couldn't allocate channel list cmd\n");
+ return -ENOMEM;
+ }
+
+ ch_list->ch[0].band = UMAC_BAND_2GHZ;
+ ch_list->ch[0].type = UMAC_CHANNEL_WIDTH_20MHZ;
+ ch_list->ch[0].flags = UMAC_CHANNEL_FLAG_VALID;
+
+ ch_list->ch[1].band = UMAC_BAND_5GHZ;
+ ch_list->ch[1].type = UMAC_CHANNEL_WIDTH_20MHZ;
+ ch_list->ch[1].flags = UMAC_CHANNEL_FLAG_VALID;
+
+ ch_list->ch[2].band = UMAC_BAND_2GHZ;
+ ch_list->ch[2].type = UMAC_CHANNEL_WIDTH_20MHZ;
+ ch_list->ch[2].flags = UMAC_CHANNEL_FLAG_VALID | UMAC_CHANNEL_FLAG_IBSS;
+
+ ch_list->ch[3].band = UMAC_BAND_5GHZ;
+ ch_list->ch[3].type = UMAC_CHANNEL_WIDTH_20MHZ;
+ ch_list->ch[3].flags = UMAC_CHANNEL_FLAG_VALID | UMAC_CHANNEL_FLAG_IBSS;
+
+ ch_list->count = cpu_to_le16(4);
+
+ umac_cmd.id = UMAC_CMD_OPCODE_GET_CHAN_INFO_LIST;
+ umac_cmd.resp = 1;
+
+ ret = iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd, ch_list, size);
+
+ kfree(ch_list);
+
+ return ret;
+}
+
+int iwm_scan_ssids(struct iwm_priv *iwm, struct cfg80211_ssid *ssids,
+ int ssid_num)
+{
+ struct iwm_umac_cmd_scan_request req;
+ int i, ret;
+
+ memset(&req, 0, sizeof(struct iwm_umac_cmd_scan_request));
+
+ req.hdr.oid = UMAC_WIFI_IF_CMD_SCAN_REQUEST;
+ req.hdr.buf_size = cpu_to_le16(sizeof(struct iwm_umac_cmd_scan_request)
+ - sizeof(struct iwm_umac_wifi_if));
+ req.type = UMAC_WIFI_IF_SCAN_TYPE_USER;
+ req.timeout = 2;
+ req.seq_num = iwm->scan_id;
+ req.ssid_num = min(ssid_num, UMAC_WIFI_IF_PROBE_OPTION_MAX);
+
+ for (i = 0; i < req.ssid_num; i++) {
+ memcpy(req.ssids[i].ssid, ssids[i].ssid, ssids[i].ssid_len);
+ req.ssids[i].ssid_len = ssids[i].ssid_len;
+ }
+
+ ret = iwm_send_wifi_if_cmd(iwm, &req, sizeof(req), 0);
+ if (ret < 0) {
+ IWM_ERR(iwm, "Couldn't send scan request\n");
+ return ret;
+ }
+
+ iwm->scan_id = iwm->scan_id++ % IWM_SCAN_ID_MAX;
+
+ return 0;
+}
+
+int iwm_scan_one_ssid(struct iwm_priv *iwm, u8 *ssid, int ssid_len)
+{
+ struct cfg80211_ssid one_ssid;
+
+ if (test_and_set_bit(IWM_STATUS_SCANNING, &iwm->status))
+ return 0;
+
+ one_ssid.ssid_len = min(ssid_len, IEEE80211_MAX_SSID_LEN);
+ memcpy(&one_ssid.ssid, ssid, one_ssid.ssid_len);
+
+ return iwm_scan_ssids(iwm, &one_ssid, 1);
+}
+
+int iwm_target_reset(struct iwm_priv *iwm)
+{
+ struct iwm_udma_nonwifi_cmd target_cmd;
+
+ target_cmd.opcode = UMAC_HDI_OUT_OPCODE_REBOOT;
+ target_cmd.addr = 0;
+ target_cmd.op1_sz = 0;
+ target_cmd.op2 = 0;
+ target_cmd.handle_by_hw = 0;
+ target_cmd.resp = 0;
+ target_cmd.eop = 1;
+
+ return iwm_hal_send_target_cmd(iwm, &target_cmd, NULL);
+}
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.h b/drivers/net/wireless/iwmc3200wifi/commands.h
new file mode 100644
index 00000000000..36b13a13059
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/commands.h
@@ -0,0 +1,419 @@
+/*
+ * Intel Wireless Multicomm 3200 WiFi driver
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <ilw@linux.intel.com>
+ * Samuel Ortiz <samuel.ortiz@intel.com>
+ * Zhu Yi <yi.zhu@intel.com>
+ *
+ */
+
+#ifndef __IWM_COMMANDS_H__
+#define __IWM_COMMANDS_H__
+
+#include <linux/ieee80211.h>
+
+#define IWM_BARKER_REBOOT_NOTIFICATION 0xF
+#define IWM_ACK_BARKER_NOTIFICATION 0x10
+
+/* UMAC commands */
+#define UMAC_RST_CTRL_FLG_LARC_CLK_EN 0x0001
+#define UMAC_RST_CTRL_FLG_LARC_RESET 0x0002
+#define UMAC_RST_CTRL_FLG_FUNC_RESET 0x0004
+#define UMAC_RST_CTRL_FLG_DEV_RESET 0x0008
+#define UMAC_RST_CTRL_FLG_WIFI_CORE_EN 0x0010
+#define UMAC_RST_CTRL_FLG_WIFI_LINK_EN 0x0040
+#define UMAC_RST_CTRL_FLG_WIFI_MLME_EN 0x0080
+#define UMAC_RST_CTRL_FLG_NVM_RELOAD 0x0100
+
+struct iwm_umac_cmd_reset {
+ __le32 flags;
+} __attribute__ ((packed));
+
+#define UMAC_PARAM_TBL_ORD_FIX 0x0
+#define UMAC_PARAM_TBL_ORD_VAR 0x1
+#define UMAC_PARAM_TBL_CFG_FIX 0x2
+#define UMAC_PARAM_TBL_CFG_VAR 0x3
+#define UMAC_PARAM_TBL_BSS_TRK 0x4
+#define UMAC_PARAM_TBL_FA_CFG_FIX 0x5
+#define UMAC_PARAM_TBL_STA 0x6
+#define UMAC_PARAM_TBL_CHN 0x7
+#define UMAC_PARAM_TBL_STATISTICS 0x8
+
+/* fast access table */
+enum {
+ CFG_FRAG_THRESHOLD = 0,
+ CFG_FRAME_RETRY_LIMIT,
+ CFG_OS_QUEUE_UTIL_TH,
+ CFG_RX_FILTER,
+ /* <-- LAST --> */
+ FAST_ACCESS_CFG_TBL_FIX_LAST
+};
+
+/* fixed size table */
+enum {
+ CFG_POWER_INDEX = 0,
+ CFG_PM_LEGACY_RX_TIMEOUT,
+ CFG_PM_LEGACY_TX_TIMEOUT,
+ CFG_PM_CTRL_FLAGS,
+ CFG_PM_KEEP_ALIVE_IN_BEACONS,
+ CFG_BT_ON_THRESHOLD,
+ CFG_RTS_THRESHOLD,
+ CFG_CTS_TO_SELF,
+ CFG_COEX_MODE,
+ CFG_WIRELESS_MODE,
+ CFG_ASSOCIATION_TIMEOUT,
+ CFG_ROAM_TIMEOUT,
+ CFG_CAPABILITY_SUPPORTED_RATES,
+ CFG_SCAN_ALLOWED_UNASSOC_FLAGS,
+ CFG_SCAN_ALLOWED_MAIN_ASSOC_FLAGS,
+ CFG_SCAN_ALLOWED_PAN_ASSOC_FLAGS,
+ CFG_SCAN_INTERNAL_PERIODIC_ENABLED,
+ CFG_SCAN_IMM_INTERNAL_PERIODIC_SCAN_ON_INIT,
+ CFG_SCAN_DEFAULT_PERIODIC_FREQ_SEC,
+ CFG_SCAN_NUM_PASSIVE_CHAN_PER_PARTIAL_SCAN,
+ CFG_TLC_SUPPORTED_TX_HT_RATES,
+ CFG_TLC_SUPPORTED_TX_RATES,
+ CFG_TLC_VALID_ANTENNA,
+ CFG_TLC_SPATIAL_STREAM_SUPPORTED,
+ CFG_TLC_RETRY_PER_RATE,
+ CFG_TLC_RETRY_PER_HT_RATE,
+ CFG_TLC_FIXED_RATE,
+ CFG_TLC_FIXED_RATE_FLAGS,
+ CFG_TLC_CONTROL_FLAGS,
+ CFG_TLC_SR_MIN_FAIL,
+ CFG_TLC_SR_MIN_PASS,
+ CFG_TLC_HT_STAY_IN_COL_PASS_THRESH,
+ CFG_TLC_HT_STAY_IN_COL_FAIL_THRESH,
+ CFG_TLC_LEGACY_STAY_IN_COL_PASS_THRESH,
+ CFG_TLC_LEGACY_STAY_IN_COL_FAIL_THRESH,
+ CFG_TLC_HT_FLUSH_STATS_PACKETS,
+ CFG_TLC_LEGACY_FLUSH_STATS_PACKETS,
+ CFG_TLC_LEGACY_FLUSH_STATS_MS,
+ CFG_TLC_HT_FLUSH_STATS_MS,
+ CFG_TLC_STAY_IN_COL_TIME_OUT,
+ CFG_TLC_AGG_SHORT_LIM,
+ CFG_TLC_AGG_LONG_LIM,
+ CFG_TLC_HT_SR_NO_DECREASE,
+ CFG_TLC_LEGACY_SR_NO_DECREASE,
+ CFG_TLC_SR_FORCE_DECREASE,
+ CFG_TLC_SR_ALLOW_INCREASE,
+ CFG_TLC_AGG_SET_LONG,
+ CFG_TLC_AUTO_AGGREGATION,
+ CFG_TLC_AGG_THRESHOLD,
+ CFG_TLC_TID_LOAD_THRESHOLD,
+ CFG_TLC_BLOCK_ACK_TIMEOUT,
+ CFG_TLC_NO_BA_COUNTED_AS_ONE,
+ CFG_TLC_NUM_BA_STREAMS_ALLOWED,
+ CFG_TLC_NUM_BA_STREAMS_PRESENT,
+ CFG_TLC_RENEW_ADDBA_DELAY,
+ CFG_TLC_NUM_OF_MULTISEC_TO_COUN_LOAD,
+ CFG_TLC_IS_STABLE_IN_HT,
+ CFG_RLC_CHAIN_CTRL,
+ CFG_TRK_TABLE_OP_MODE,
+ CFG_TRK_TABLE_RSSI_THRESHOLD,
+ CFG_TX_PWR_TARGET, /* Used By xVT */
+ CFG_TX_PWR_LIMIT_USR,
+ CFG_TX_PWR_LIMIT_BSS, /* 11d limit */
+ CFG_TX_PWR_LIMIT_BSS_CONSTRAINT, /* 11h constraint */
+ CFG_TX_PWR_MODE,
+ CFG_MLME_DBG_NOTIF_BLOCK,
+ CFG_BT_OFF_BECONS_INTERVALS,
+ CFG_BT_FRAG_DURATION,
+
+ /* <-- LAST --> */
+ CFG_TBL_FIX_LAST
+};
+
+/* variable size table */
+enum {
+ CFG_NET_ADDR = 0,
+ CFG_PROFILE,
+ /* <-- LAST --> */
+ CFG_TBL_VAR_LAST
+};
+
+struct iwm_umac_cmd_set_param_fix {
+ __le16 tbl;
+ __le16 key;
+ __le32 value;
+} __attribute__ ((packed));
+
+struct iwm_umac_cmd_set_param_var {
+ __le16 tbl;
+ __le16 key;
+ __le16 len;
+ __le16 reserved;
+} __attribute__ ((packed));
+
+struct iwm_umac_cmd_get_param {
+ __le16 tbl;
+ __le16 key;
+} __attribute__ ((packed));
+
+struct iwm_umac_cmd_get_param_resp {
+ __le16 tbl;
+ __le16 key;
+ __le16 len;
+ __le16 reserved;
+} __attribute__ ((packed));
+
+struct iwm_umac_cmd_eeprom_proxy_hdr {
+ __le32 type;
+ __le32 offset;
+ __le32 len;
+} __attribute__ ((packed));
+
+struct iwm_umac_cmd_eeprom_proxy {
+ struct iwm_umac_cmd_eeprom_proxy_hdr hdr;
+ u8 buf[0];
+} __attribute__ ((packed));
+
+#define IWM_UMAC_CMD_EEPROM_TYPE_READ 0x1
+#define IWM_UMAC_CMD_EEPROM_TYPE_WRITE 0x2
+
+#define UMAC_CHANNEL_FLAG_VALID BIT(0)
+#define UMAC_CHANNEL_FLAG_IBSS BIT(1)
+#define UMAC_CHANNEL_FLAG_ACTIVE BIT(3)
+#define UMAC_CHANNEL_FLAG_RADAR BIT(4)
+#define UMAC_CHANNEL_FLAG_DFS BIT(7)
+
+struct iwm_umac_channel_info {
+ u8 band;
+ u8 type;
+ u8 reserved;
+ u8 flags;
+ __le32 channels_mask;
+} __attribute__ ((packed));
+
+struct iwm_umac_cmd_get_channel_list {
+ __le16 count;
+ __le16 reserved;
+ struct iwm_umac_channel_info ch[0];
+} __attribute__ ((packed));
+
+
+/* UMAC WiFi interface commands */
+
+/* Coexistence mode */
+#define COEX_MODE_SA 0x1
+#define COEX_MODE_XOR 0x2
+#define COEX_MODE_CM 0x3
+#define COEX_MODE_MAX 0x4
+
+/* Wireless mode */
+#define WIRELESS_MODE_11A 0x1
+#define WIRELESS_MODE_11G 0x2
+
+#define UMAC_PROFILE_EX_IE_REQUIRED 0x1
+#define UMAC_PROFILE_QOS_ALLOWED 0x2
+
+/* Scanning */
+#define UMAC_WIFI_IF_PROBE_OPTION_MAX 10
+
+#define UMAC_WIFI_IF_SCAN_TYPE_USER 0x0
+#define UMAC_WIFI_IF_SCAN_TYPE_UMAC_RESERVED 0x1
+#define UMAC_WIFI_IF_SCAN_TYPE_HOST_PERIODIC 0x2
+#define UMAC_WIFI_IF_SCAN_TYPE_MAX 0x3
+
+struct iwm_umac_ssid {
+ u8 ssid_len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 reserved[3];
+} __attribute__ ((packed));
+
+struct iwm_umac_cmd_scan_request {
+ struct iwm_umac_wifi_if hdr;
+ __le32 type; /* UMAC_WIFI_IF_SCAN_TYPE_* */
+ u8 ssid_num;
+ u8 seq_num;
+ u8 timeout; /* In seconds */
+ u8 reserved;
+ struct iwm_umac_ssid ssids[UMAC_WIFI_IF_PROBE_OPTION_MAX];
+} __attribute__ ((packed));
+
+#define UMAC_CIPHER_TYPE_NONE 0xFF
+#define UMAC_CIPHER_TYPE_USE_GROUPCAST 0x00
+#define UMAC_CIPHER_TYPE_WEP_40 0x01
+#define UMAC_CIPHER_TYPE_WEP_104 0x02
+#define UMAC_CIPHER_TYPE_TKIP 0x04
+#define UMAC_CIPHER_TYPE_CCMP 0x08
+
+/* Supported authentication types - bitmap */
+#define UMAC_AUTH_TYPE_OPEN 0x00
+#define UMAC_AUTH_TYPE_LEGACY_PSK 0x01
+#define UMAC_AUTH_TYPE_8021X 0x02
+#define UMAC_AUTH_TYPE_RSNA_PSK 0x04
+
+/* iwm_umac_security.flag is WPA supported -- bits[0:0] */
+#define UMAC_SEC_FLG_WPA_ON_POS 0
+#define UMAC_SEC_FLG_WPA_ON_SEED 1
+#define UMAC_SEC_FLG_WPA_ON_MSK (UMAC_SEC_FLG_WPA_ON_SEED << \
+ UMAC_SEC_FLG_WPA_ON_POS)
+
+/* iwm_umac_security.flag is WPA2 supported -- bits [1:1] */
+#define UMAC_SEC_FLG_RSNA_ON_POS 1
+#define UMAC_SEC_FLG_RSNA_ON_SEED 1
+#define UMAC_SEC_FLG_RSNA_ON_MSK (UMAC_SEC_FLG_RSNA_ON_SEED << \
+ UMAC_SEC_FLG_RSNA_ON_POS)
+
+/* iwm_umac_security.flag is WSC mode on -- bits [2:2] */
+#define UMAC_SEC_FLG_WSC_ON_POS 2
+#define UMAC_SEC_FLG_WSC_ON_SEED 1
+
+/* Legacy profile can use only WEP40 and WEP104 for encryption and
+ * OPEN or PSK for authentication */
+#define UMAC_SEC_FLG_LEGACY_PROFILE 0
+
+struct iwm_umac_security {
+ u8 auth_type;
+ u8 ucast_cipher;
+ u8 mcast_cipher;
+ u8 flags;
+} __attribute__ ((packed));
+
+struct iwm_umac_ibss {
+ u8 beacon_interval; /* in millisecond */
+ u8 atim; /* in millisecond */
+ s8 join_only;
+ u8 band;
+ u8 channel;
+ u8 reserved[3];
+} __attribute__ ((packed));
+
+#define UMAC_MODE_BSS 0
+#define UMAC_MODE_IBSS 1
+
+#define UMAC_BSSID_MAX 4
+
+struct iwm_umac_profile {
+ struct iwm_umac_wifi_if hdr;
+ __le32 mode;
+ struct iwm_umac_ssid ssid;
+ u8 bssid[UMAC_BSSID_MAX][ETH_ALEN];
+ struct iwm_umac_security sec;
+ struct iwm_umac_ibss ibss;
+ __le32 channel_2ghz;
+ __le32 channel_5ghz;
+ __le16 flags;
+ u8 wireless_mode;
+ u8 bss_num;
+} __attribute__ ((packed));
+
+struct iwm_umac_invalidate_profile {
+ struct iwm_umac_wifi_if hdr;
+ u8 reason;
+ u8 reserved[3];
+} __attribute__ ((packed));
+
+/* Encryption key commands */
+struct iwm_umac_key_wep40 {
+ struct iwm_umac_wifi_if hdr;
+ struct iwm_umac_key_hdr key_hdr;
+ u8 key[WLAN_KEY_LEN_WEP40];
+ u8 static_key;
+ u8 reserved[2];
+} __attribute__ ((packed));
+
+struct iwm_umac_key_wep104 {
+ struct iwm_umac_wifi_if hdr;
+ struct iwm_umac_key_hdr key_hdr;
+ u8 key[WLAN_KEY_LEN_WEP104];
+ u8 static_key;
+ u8 reserved[2];
+} __attribute__ ((packed));
+
+#define IWM_TKIP_KEY_SIZE 16
+#define IWM_TKIP_MIC_SIZE 8
+struct iwm_umac_key_tkip {
+ struct iwm_umac_wifi_if hdr;
+ struct iwm_umac_key_hdr key_hdr;
+ u8 iv_count[6];
+ u8 reserved[2];
+ u8 tkip_key[IWM_TKIP_KEY_SIZE];
+ u8 mic_rx_key[IWM_TKIP_MIC_SIZE];
+ u8 mic_tx_key[IWM_TKIP_MIC_SIZE];
+} __attribute__ ((packed));
+
+struct iwm_umac_key_ccmp {
+ struct iwm_umac_wifi_if hdr;
+ struct iwm_umac_key_hdr key_hdr;
+ u8 iv_count[6];
+ u8 reserved[2];
+ u8 key[WLAN_KEY_LEN_CCMP];
+} __attribute__ ((packed));
+
+struct iwm_umac_key_remove {
+ struct iwm_umac_wifi_if hdr;
+ struct iwm_umac_key_hdr key_hdr;
+} __attribute__ ((packed));
+
+struct iwm_umac_tx_key_id {
+ struct iwm_umac_wifi_if hdr;
+ u8 key_idx;
+ u8 reserved[3];
+} __attribute__ ((packed));
+
+struct iwm_umac_cmd_stats_req {
+ __le32 flags;
+} __attribute__ ((packed));
+
+/* LMAC commands */
+int iwm_read_mac(struct iwm_priv *iwm, u8 *mac);
+int iwm_send_prio_table(struct iwm_priv *iwm);
+int iwm_send_init_calib_cfg(struct iwm_priv *iwm, u8 calib_requested);
+int iwm_send_periodic_calib_cfg(struct iwm_priv *iwm, u8 calib_requested);
+int iwm_send_calib_results(struct iwm_priv *iwm);
+int iwm_store_rxiq_calib_result(struct iwm_priv *iwm);
+
+/* UMAC commands */
+int iwm_send_wifi_if_cmd(struct iwm_priv *iwm, void *payload, u16 payload_size,
+ bool resp);
+int iwm_send_umac_reset(struct iwm_priv *iwm, __le32 reset_flags, bool resp);
+int iwm_umac_set_config_fix(struct iwm_priv *iwm, u16 tbl, u16 key, u32 value);
+int iwm_umac_set_config_var(struct iwm_priv *iwm, u16 key,
+ void *payload, u16 payload_size);
+int iwm_send_umac_config(struct iwm_priv *iwm, __le32 reset_flags);
+int iwm_send_mlme_profile(struct iwm_priv *iwm);
+int iwm_invalidate_mlme_profile(struct iwm_priv *iwm);
+int iwm_send_packet(struct iwm_priv *iwm, struct sk_buff *skb, int pool_id);
+int iwm_set_tx_key(struct iwm_priv *iwm, u8 key_idx);
+int iwm_set_key(struct iwm_priv *iwm, bool remove, bool set_tx_key,
+ struct iwm_key *key);
+int iwm_send_umac_stats_req(struct iwm_priv *iwm, u32 flags);
+int iwm_send_umac_channel_list(struct iwm_priv *iwm);
+int iwm_scan_ssids(struct iwm_priv *iwm, struct cfg80211_ssid *ssids,
+ int ssid_num);
+int iwm_scan_one_ssid(struct iwm_priv *iwm, u8 *ssid, int ssid_len);
+
+/* UDMA commands */
+int iwm_target_reset(struct iwm_priv *iwm);
+#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/debug.h b/drivers/net/wireless/iwmc3200wifi/debug.h
new file mode 100644
index 00000000000..8fbb42d9c21
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/debug.h
@@ -0,0 +1,124 @@
+/*
+ * Intel Wireless Multicomm 3200 WiFi driver
+ *
+ * Copyright (C) 2009 Intel Corporation <ilw@linux.intel.com>
+ * Samuel Ortiz <samuel.ortiz@intel.com>
+ * Zhu Yi <yi.zhu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __IWM_DEBUG_H__
+#define __IWM_DEBUG_H__
+
+#define IWM_ERR(p, f, a...) dev_err(iwm_to_dev(p), f, ## a)
+#define IWM_WARN(p, f, a...) dev_warn(iwm_to_dev(p), f, ## a)
+#define IWM_INFO(p, f, a...) dev_info(iwm_to_dev(p), f, ## a)
+#define IWM_CRIT(p, f, a...) dev_crit(iwm_to_dev(p), f, ## a)
+
+#ifdef CONFIG_IWM_DEBUG
+
+#define IWM_DEBUG_MODULE(i, level, module, f, a...) \
+do { \
+ if (unlikely(i->dbg.dbg_module[IWM_DM_##module] >= (IWM_DL_##level)))\
+ dev_printk(KERN_INFO, (iwm_to_dev(i)), \
+ "%s " f, __func__ , ## a); \
+} while (0)
+
+#define IWM_HEXDUMP(i, level, module, pref, buf, len) \
+do { \
+ if (unlikely(i->dbg.dbg_module[IWM_DM_##module] >= (IWM_DL_##level)))\
+ print_hex_dump(KERN_INFO, pref, DUMP_PREFIX_OFFSET, \
+ 16, 1, buf, len, 1); \
+} while (0)
+
+#else
+
+#define IWM_DEBUG_MODULE(i, level, module, f, a...)
+#define IWM_HEXDUMP(i, level, module, pref, buf, len)
+
+#endif /* CONFIG_IWM_DEBUG */
+
+/* Debug modules */
+enum iwm_debug_module_id {
+ IWM_DM_BOOT = 0,
+ IWM_DM_FW,
+ IWM_DM_SDIO,
+ IWM_DM_NTF,
+ IWM_DM_RX,
+ IWM_DM_TX,
+ IWM_DM_MLME,
+ IWM_DM_CMD,
+ IWM_DM_WEXT,
+ __IWM_DM_NR,
+};
+#define IWM_DM_DEFAULT 0
+
+#define IWM_DBG_BOOT(i, l, f, a...) IWM_DEBUG_MODULE(i, l, BOOT, f, ## a)
+#define IWM_DBG_FW(i, l, f, a...) IWM_DEBUG_MODULE(i, l, FW, f, ## a)
+#define IWM_DBG_SDIO(i, l, f, a...) IWM_DEBUG_MODULE(i, l, SDIO, f, ## a)
+#define IWM_DBG_NTF(i, l, f, a...) IWM_DEBUG_MODULE(i, l, NTF, f, ## a)
+#define IWM_DBG_RX(i, l, f, a...) IWM_DEBUG_MODULE(i, l, RX, f, ## a)
+#define IWM_DBG_TX(i, l, f, a...) IWM_DEBUG_MODULE(i, l, TX, f, ## a)
+#define IWM_DBG_MLME(i, l, f, a...) IWM_DEBUG_MODULE(i, l, MLME, f, ## a)
+#define IWM_DBG_CMD(i, l, f, a...) IWM_DEBUG_MODULE(i, l, CMD, f, ## a)
+#define IWM_DBG_WEXT(i, l, f, a...) IWM_DEBUG_MODULE(i, l, WEXT, f, ## a)
+
+/* Debug levels */
+enum iwm_debug_level {
+ IWM_DL_NONE = 0,
+ IWM_DL_ERR,
+ IWM_DL_WARN,
+ IWM_DL_INFO,
+ IWM_DL_DBG,
+};
+#define IWM_DL_DEFAULT IWM_DL_ERR
+
+struct iwm_debugfs {
+ struct iwm_priv *iwm;
+ struct dentry *rootdir;
+ struct dentry *devdir;
+ struct dentry *dbgdir;
+ struct dentry *txdir;
+ struct dentry *rxdir;
+ struct dentry *busdir;
+
+ u32 dbg_level;
+ struct dentry *dbg_level_dentry;
+
+ unsigned long dbg_modules;
+ struct dentry *dbg_modules_dentry;
+
+ u8 dbg_module[__IWM_DM_NR];
+ struct dentry *dbg_module_dentries[__IWM_DM_NR];
+
+ struct dentry *txq_dentry;
+ struct dentry *tx_credit_dentry;
+ struct dentry *rx_ticket_dentry;
+};
+
+#ifdef CONFIG_IWM_DEBUG
+int iwm_debugfs_init(struct iwm_priv *iwm);
+void iwm_debugfs_exit(struct iwm_priv *iwm);
+#else
+static inline int iwm_debugfs_init(struct iwm_priv *iwm)
+{
+ return 0;
+}
+static inline void iwm_debugfs_exit(struct iwm_priv *iwm) {}
+#endif
+
+#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
new file mode 100644
index 00000000000..0fa7b9150d5
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
@@ -0,0 +1,453 @@
+/*
+ * Intel Wireless Multicomm 3200 WiFi driver
+ *
+ * Copyright (C) 2009 Intel Corporation <ilw@linux.intel.com>
+ * Samuel Ortiz <samuel.ortiz@intel.com>
+ * Zhu Yi <yi.zhu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+
+#include "iwm.h"
+#include "bus.h"
+#include "rx.h"
+#include "debug.h"
+
+static struct {
+ u8 id;
+ char *name;
+} iwm_debug_module[__IWM_DM_NR] = {
+ {IWM_DM_BOOT, "boot"},
+ {IWM_DM_FW, "fw"},
+ {IWM_DM_SDIO, "sdio"},
+ {IWM_DM_NTF, "ntf"},
+ {IWM_DM_RX, "rx"},
+ {IWM_DM_TX, "tx"},
+ {IWM_DM_MLME, "mlme"},
+ {IWM_DM_CMD, "cmd"},
+ {IWM_DM_WEXT, "wext"},
+};
+
+#define add_dbg_module(dbg, name, id, initlevel) \
+do { \
+ struct dentry *d; \
+ dbg.dbg_module[id] = (initlevel); \
+ d = debugfs_create_x8(name, 0600, dbg.dbgdir, \
+ &(dbg.dbg_module[id])); \
+ if (!IS_ERR(d)) \
+ dbg.dbg_module_dentries[id] = d; \
+} while (0)
+
+static int iwm_debugfs_u32_read(void *data, u64 *val)
+{
+ struct iwm_priv *iwm = data;
+
+ *val = iwm->dbg.dbg_level;
+ return 0;
+}
+
+static int iwm_debugfs_dbg_level_write(void *data, u64 val)
+{
+ struct iwm_priv *iwm = data;
+ int i;
+
+ iwm->dbg.dbg_level = val;
+
+ for (i = 0; i < __IWM_DM_NR; i++)
+ iwm->dbg.dbg_module[i] = val;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(fops_iwm_dbg_level,
+ iwm_debugfs_u32_read, iwm_debugfs_dbg_level_write,
+ "%llu\n");
+
+static int iwm_debugfs_dbg_modules_write(void *data, u64 val)
+{
+ struct iwm_priv *iwm = data;
+ int i, bit;
+
+ iwm->dbg.dbg_modules = val;
+
+ for (i = 0; i < __IWM_DM_NR; i++)
+ iwm->dbg.dbg_module[i] = 0;
+
+ for_each_bit(bit, &iwm->dbg.dbg_modules, __IWM_DM_NR)
+ iwm->dbg.dbg_module[bit] = iwm->dbg.dbg_level;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(fops_iwm_dbg_modules,
+ iwm_debugfs_u32_read, iwm_debugfs_dbg_modules_write,
+ "%llu\n");
+
+static int iwm_txrx_open(struct inode *inode, struct file *filp)
+{
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+
+static ssize_t iwm_debugfs_txq_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct iwm_priv *iwm = filp->private_data;
+ char *buf;
+ int i, buf_len = 4096;
+ size_t len = 0;
+ ssize_t ret;
+
+ if (*ppos != 0)
+ return 0;
+ if (count < sizeof(buf))
+ return -ENOSPC;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (i = 0; i < IWM_TX_QUEUES; i++) {
+ struct iwm_tx_queue *txq = &iwm->txq[i];
+ struct sk_buff *skb;
+ int j;
+ unsigned long flags;
+
+ spin_lock_irqsave(&txq->queue.lock, flags);
+
+ skb = (struct sk_buff *)&txq->queue;
+
+ len += snprintf(buf + len, buf_len - len, "TXQ #%d\n", i);
+ len += snprintf(buf + len, buf_len - len, "\tStopped: %d\n",
+ __netif_subqueue_stopped(iwm_to_ndev(iwm),
+ txq->id));
+ len += snprintf(buf + len, buf_len - len, "\tConcat count:%d\n",
+ txq->concat_count);
+ len += snprintf(buf + len, buf_len - len, "\tQueue len: %d\n",
+ skb_queue_len(&txq->queue));
+ for (j = 0; j < skb_queue_len(&txq->queue); j++) {
+ struct iwm_tx_info *tx_info;
+
+ skb = skb->next;
+ tx_info = skb_to_tx_info(skb);
+
+ len += snprintf(buf + len, buf_len - len,
+ "\tSKB #%d\n", j);
+ len += snprintf(buf + len, buf_len - len,
+ "\t\tsta: %d\n", tx_info->sta);
+ len += snprintf(buf + len, buf_len - len,
+ "\t\tcolor: %d\n", tx_info->color);
+ len += snprintf(buf + len, buf_len - len,
+ "\t\ttid: %d\n", tx_info->tid);
+ }
+
+ spin_unlock_irqrestore(&txq->queue.lock, flags);
+ }
+
+ ret = simple_read_from_buffer(buffer, len, ppos, buf, buf_len);
+ kfree(buf);
+
+ return ret;
+}
+
+static ssize_t iwm_debugfs_tx_credit_read(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct iwm_priv *iwm = filp->private_data;
+ struct iwm_tx_credit *credit = &iwm->tx_credit;
+ char *buf;
+ int i, buf_len = 4096;
+ size_t len = 0;
+ ssize_t ret;
+
+ if (*ppos != 0)
+ return 0;
+ if (count < sizeof(buf))
+ return -ENOSPC;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += snprintf(buf + len, buf_len - len,
+ "NR pools: %d\n", credit->pool_nr);
+ len += snprintf(buf + len, buf_len - len,
+ "pools map: 0x%lx\n", credit->full_pools_map);
+
+ len += snprintf(buf + len, buf_len - len, "\n### POOLS ###\n");
+ for (i = 0; i < IWM_MACS_OUT_GROUPS; i++) {
+ len += snprintf(buf + len, buf_len - len,
+ "pools entry #%d\n", i);
+ len += snprintf(buf + len, buf_len - len,
+ "\tid: %d\n",
+ credit->pools[i].id);
+ len += snprintf(buf + len, buf_len - len,
+ "\tsid: %d\n",
+ credit->pools[i].sid);
+ len += snprintf(buf + len, buf_len - len,
+ "\tmin_pages: %d\n",
+ credit->pools[i].min_pages);
+ len += snprintf(buf + len, buf_len - len,
+ "\tmax_pages: %d\n",
+ credit->pools[i].max_pages);
+ len += snprintf(buf + len, buf_len - len,
+ "\talloc_pages: %d\n",
+ credit->pools[i].alloc_pages);
+ len += snprintf(buf + len, buf_len - len,
+ "\tfreed_pages: %d\n",
+ credit->pools[i].total_freed_pages);
+ }
+
+ len += snprintf(buf + len, buf_len - len, "\n### SPOOLS ###\n");
+ for (i = 0; i < IWM_MACS_OUT_SGROUPS; i++) {
+ len += snprintf(buf + len, buf_len - len,
+ "spools entry #%d\n", i);
+ len += snprintf(buf + len, buf_len - len,
+ "\tid: %d\n",
+ credit->spools[i].id);
+ len += snprintf(buf + len, buf_len - len,
+ "\tmax_pages: %d\n",
+ credit->spools[i].max_pages);
+ len += snprintf(buf + len, buf_len - len,
+ "\talloc_pages: %d\n",
+ credit->spools[i].alloc_pages);
+
+ }
+
+ ret = simple_read_from_buffer(buffer, len, ppos, buf, buf_len);
+ kfree(buf);
+
+ return ret;
+}
+
+static ssize_t iwm_debugfs_rx_ticket_read(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct iwm_priv *iwm = filp->private_data;
+ struct iwm_rx_ticket_node *ticket, *next;
+ char *buf;
+ int buf_len = 4096, i;
+ size_t len = 0;
+ ssize_t ret;
+
+ if (*ppos != 0)
+ return 0;
+ if (count < sizeof(buf))
+ return -ENOSPC;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ list_for_each_entry_safe(ticket, next, &iwm->rx_tickets, node) {
+ len += snprintf(buf + len, buf_len - len, "Ticket #%d\n",
+ ticket->ticket->id);
+ len += snprintf(buf + len, buf_len - len, "\taction: 0x%x\n",
+ ticket->ticket->action);
+ len += snprintf(buf + len, buf_len - len, "\tflags: 0x%x\n",
+ ticket->ticket->flags);
+ }
+
+ for (i = 0; i < IWM_RX_ID_HASH; i++) {
+ struct iwm_rx_packet *packet, *nxt;
+ struct list_head *pkt_list = &iwm->rx_packets[i];
+ if (!list_empty(pkt_list)) {
+ len += snprintf(buf + len, buf_len - len,
+ "Packet hash #%d\n", i);
+ list_for_each_entry_safe(packet, nxt, pkt_list, node) {
+ len += snprintf(buf + len, buf_len - len,
+ "\tPacket id: %d\n",
+ packet->id);
+ len += snprintf(buf + len, buf_len - len,
+ "\tPacket length: %lu\n",
+ packet->pkt_size);
+ }
+ }
+ }
+
+ ret = simple_read_from_buffer(buffer, len, ppos, buf, buf_len);
+ kfree(buf);
+
+ return ret;
+}
+
+
+static const struct file_operations iwm_debugfs_txq_fops = {
+ .owner = THIS_MODULE,
+ .open = iwm_txrx_open,
+ .read = iwm_debugfs_txq_read,
+};
+
+static const struct file_operations iwm_debugfs_tx_credit_fops = {
+ .owner = THIS_MODULE,
+ .open = iwm_txrx_open,
+ .read = iwm_debugfs_tx_credit_read,
+};
+
+static const struct file_operations iwm_debugfs_rx_ticket_fops = {
+ .owner = THIS_MODULE,
+ .open = iwm_txrx_open,
+ .read = iwm_debugfs_rx_ticket_read,
+};
+
+int iwm_debugfs_init(struct iwm_priv *iwm)
+{
+ int i, result;
+ char devdir[16];
+
+ iwm->dbg.rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ result = PTR_ERR(iwm->dbg.rootdir);
+ if (!result || IS_ERR(iwm->dbg.rootdir)) {
+ if (result == -ENODEV) {
+ IWM_ERR(iwm, "DebugFS (CONFIG_DEBUG_FS) not "
+ "enabled in kernel config\n");
+ result = 0; /* No debugfs support */
+ }
+ IWM_ERR(iwm, "Couldn't create rootdir: %d\n", result);
+ goto error;
+ }
+
+ snprintf(devdir, sizeof(devdir), "%s", wiphy_name(iwm_to_wiphy(iwm)));
+
+ iwm->dbg.devdir = debugfs_create_dir(devdir, iwm->dbg.rootdir);
+ result = PTR_ERR(iwm->dbg.devdir);
+ if (IS_ERR(iwm->dbg.devdir) && (result != -ENODEV)) {
+ IWM_ERR(iwm, "Couldn't create devdir: %d\n", result);
+ goto error;
+ }
+
+ iwm->dbg.dbgdir = debugfs_create_dir("debug", iwm->dbg.devdir);
+ result = PTR_ERR(iwm->dbg.dbgdir);
+ if (IS_ERR(iwm->dbg.dbgdir) && (result != -ENODEV)) {
+ IWM_ERR(iwm, "Couldn't create dbgdir: %d\n", result);
+ goto error;
+ }
+
+ iwm->dbg.rxdir = debugfs_create_dir("rx", iwm->dbg.devdir);
+ result = PTR_ERR(iwm->dbg.rxdir);
+ if (IS_ERR(iwm->dbg.rxdir) && (result != -ENODEV)) {
+ IWM_ERR(iwm, "Couldn't create rx dir: %d\n", result);
+ goto error;
+ }
+
+ iwm->dbg.txdir = debugfs_create_dir("tx", iwm->dbg.devdir);
+ result = PTR_ERR(iwm->dbg.txdir);
+ if (IS_ERR(iwm->dbg.txdir) && (result != -ENODEV)) {
+ IWM_ERR(iwm, "Couldn't create tx dir: %d\n", result);
+ goto error;
+ }
+
+ iwm->dbg.busdir = debugfs_create_dir("bus", iwm->dbg.devdir);
+ result = PTR_ERR(iwm->dbg.busdir);
+ if (IS_ERR(iwm->dbg.busdir) && (result != -ENODEV)) {
+ IWM_ERR(iwm, "Couldn't create bus dir: %d\n", result);
+ goto error;
+ }
+
+ if (iwm->bus_ops->debugfs_init) {
+ result = iwm->bus_ops->debugfs_init(iwm, iwm->dbg.busdir);
+ if (result < 0) {
+ IWM_ERR(iwm, "Couldn't create bus entry: %d\n", result);
+ goto error;
+ }
+ }
+
+
+ iwm->dbg.dbg_level = IWM_DL_NONE;
+ iwm->dbg.dbg_level_dentry =
+ debugfs_create_file("level", 0200, iwm->dbg.dbgdir, iwm,
+ &fops_iwm_dbg_level);
+ result = PTR_ERR(iwm->dbg.dbg_level_dentry);
+ if (IS_ERR(iwm->dbg.dbg_level_dentry) && (result != -ENODEV)) {
+ IWM_ERR(iwm, "Couldn't create dbg_level: %d\n", result);
+ goto error;
+ }
+
+
+ iwm->dbg.dbg_modules = IWM_DM_DEFAULT;
+ iwm->dbg.dbg_modules_dentry =
+ debugfs_create_file("modules", 0200, iwm->dbg.dbgdir, iwm,
+ &fops_iwm_dbg_modules);
+ result = PTR_ERR(iwm->dbg.dbg_modules_dentry);
+ if (IS_ERR(iwm->dbg.dbg_modules_dentry) && (result != -ENODEV)) {
+ IWM_ERR(iwm, "Couldn't create dbg_modules: %d\n", result);
+ goto error;
+ }
+
+ for (i = 0; i < __IWM_DM_NR; i++)
+ add_dbg_module(iwm->dbg, iwm_debug_module[i].name,
+ iwm_debug_module[i].id, IWM_DL_DEFAULT);
+
+ iwm->dbg.txq_dentry = debugfs_create_file("queues", 0200,
+ iwm->dbg.txdir, iwm,
+ &iwm_debugfs_txq_fops);
+ result = PTR_ERR(iwm->dbg.txq_dentry);
+ if (IS_ERR(iwm->dbg.txq_dentry) && (result != -ENODEV)) {
+ IWM_ERR(iwm, "Couldn't create tx queue: %d\n", result);
+ goto error;
+ }
+
+ iwm->dbg.tx_credit_dentry = debugfs_create_file("credits", 0200,
+ iwm->dbg.txdir, iwm,
+ &iwm_debugfs_tx_credit_fops);
+ result = PTR_ERR(iwm->dbg.tx_credit_dentry);
+ if (IS_ERR(iwm->dbg.tx_credit_dentry) && (result != -ENODEV)) {
+ IWM_ERR(iwm, "Couldn't create tx credit: %d\n", result);
+ goto error;
+ }
+
+ iwm->dbg.rx_ticket_dentry = debugfs_create_file("tickets", 0200,
+ iwm->dbg.rxdir, iwm,
+ &iwm_debugfs_rx_ticket_fops);
+ result = PTR_ERR(iwm->dbg.rx_ticket_dentry);
+ if (IS_ERR(iwm->dbg.rx_ticket_dentry) && (result != -ENODEV)) {
+ IWM_ERR(iwm, "Couldn't create rx ticket: %d\n", result);
+ goto error;
+ }
+
+ return 0;
+
+ error:
+ return result;
+}
+
+void iwm_debugfs_exit(struct iwm_priv *iwm)
+{
+ int i;
+
+ for (i = 0; i < __IWM_DM_NR; i++)
+ debugfs_remove(iwm->dbg.dbg_module_dentries[i]);
+
+ debugfs_remove(iwm->dbg.dbg_modules_dentry);
+ debugfs_remove(iwm->dbg.dbg_level_dentry);
+ debugfs_remove(iwm->dbg.txq_dentry);
+ debugfs_remove(iwm->dbg.tx_credit_dentry);
+ debugfs_remove(iwm->dbg.rx_ticket_dentry);
+ if (iwm->bus_ops->debugfs_exit)
+ iwm->bus_ops->debugfs_exit(iwm);
+
+ debugfs_remove(iwm->dbg.busdir);
+ debugfs_remove(iwm->dbg.dbgdir);
+ debugfs_remove(iwm->dbg.txdir);
+ debugfs_remove(iwm->dbg.rxdir);
+ debugfs_remove(iwm->dbg.devdir);
+ debugfs_remove(iwm->dbg.rootdir);
+}
diff --git a/drivers/net/wireless/iwmc3200wifi/eeprom.c b/drivers/net/wireless/iwmc3200wifi/eeprom.c
new file mode 100644
index 00000000000..0f34b84fd2e
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/eeprom.c
@@ -0,0 +1,187 @@
+/*
+ * Intel Wireless Multicomm 3200 WiFi driver
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <ilw@linux.intel.com>
+ * Samuel Ortiz <samuel.ortiz@intel.com>
+ * Zhu Yi <yi.zhu@intel.com>
+ *
+ */
+
+#include <linux/kernel.h>
+
+#include "iwm.h"
+#include "umac.h"
+#include "commands.h"
+#include "eeprom.h"
+
+static struct iwm_eeprom_entry eeprom_map[] = {
+ [IWM_EEPROM_SIG] =
+ {"Signature", IWM_EEPROM_SIG_OFF, IWM_EEPROM_SIG_LEN},
+
+ [IWM_EEPROM_VERSION] =
+ {"Version", IWM_EEPROM_VERSION_OFF, IWM_EEPROM_VERSION_LEN},
+
+ [IWM_EEPROM_OEM_HW_VERSION] =
+ {"OEM HW version", IWM_EEPROM_OEM_HW_VERSION_OFF,
+ IWM_EEPROM_OEM_HW_VERSION_LEN},
+
+ [IWM_EEPROM_MAC_VERSION] =
+ {"MAC version", IWM_EEPROM_MAC_VERSION_OFF, IWM_EEPROM_MAC_VERSION_LEN},
+
+ [IWM_EEPROM_CARD_ID] =
+ {"Card ID", IWM_EEPROM_CARD_ID_OFF, IWM_EEPROM_CARD_ID_LEN},
+
+ [IWM_EEPROM_RADIO_CONF] =
+ {"Radio config", IWM_EEPROM_RADIO_CONF_OFF, IWM_EEPROM_RADIO_CONF_LEN},
+
+ [IWM_EEPROM_SKU_CAP] =
+ {"SKU capabilities", IWM_EEPROM_SKU_CAP_OFF, IWM_EEPROM_SKU_CAP_LEN},
+
+ [IWM_EEPROM_CALIB_RXIQ_OFFSET] =
+ {"RX IQ offset", IWM_EEPROM_CALIB_RXIQ_OFF, IWM_EEPROM_INDIRECT_LEN},
+
+ [IWM_EEPROM_CALIB_RXIQ] =
+ {"Calib RX IQ", 0, IWM_EEPROM_CALIB_RXIQ_LEN},
+};
+
+
+static int iwm_eeprom_read(struct iwm_priv *iwm, u8 eeprom_id)
+{
+ int ret;
+ u32 entry_size, chunk_size, data_offset = 0, addr_offset = 0;
+ u32 addr;
+ struct iwm_udma_wifi_cmd udma_cmd;
+ struct iwm_umac_cmd umac_cmd;
+ struct iwm_umac_cmd_eeprom_proxy eeprom_cmd;
+
+ if (eeprom_id > (IWM_EEPROM_LAST - 1))
+ return -EINVAL;
+
+ entry_size = eeprom_map[eeprom_id].length;
+
+ if (eeprom_id >= IWM_EEPROM_INDIRECT_DATA) {
+ /* indirect data */
+ u32 off_id = eeprom_id - IWM_EEPROM_INDIRECT_DATA +
+ IWM_EEPROM_INDIRECT_OFFSET;
+
+ eeprom_map[eeprom_id].offset =
+ *(u16 *)(iwm->eeprom + eeprom_map[off_id].offset) << 1;
+ }
+
+ addr = eeprom_map[eeprom_id].offset;
+
+ udma_cmd.eop = 1;
+ udma_cmd.credit_group = 0x4;
+ udma_cmd.ra_tid = UMAC_HDI_ACT_TBL_IDX_HOST_CMD;
+ udma_cmd.lmac_offset = 0;
+
+ umac_cmd.id = UMAC_CMD_OPCODE_EEPROM_PROXY;
+ umac_cmd.resp = 1;
+
+ while (entry_size > 0) {
+ chunk_size = min_t(u32, entry_size, IWM_MAX_EEPROM_DATA_LEN);
+
+ eeprom_cmd.hdr.type =
+ cpu_to_le32(IWM_UMAC_CMD_EEPROM_TYPE_READ);
+ eeprom_cmd.hdr.offset = cpu_to_le32(addr + addr_offset);
+ eeprom_cmd.hdr.len = cpu_to_le32(chunk_size);
+
+ ret = iwm_hal_send_umac_cmd(iwm, &udma_cmd,
+ &umac_cmd, &eeprom_cmd,
+ sizeof(struct iwm_umac_cmd_eeprom_proxy));
+ if (ret < 0) {
+ IWM_ERR(iwm, "Couldn't read eeprom\n");
+ return ret;
+ }
+
+ ret = iwm_notif_handle(iwm, UMAC_CMD_OPCODE_EEPROM_PROXY,
+ IWM_SRC_UMAC, 2*HZ);
+ if (ret < 0) {
+ IWM_ERR(iwm, "Did not get any eeprom answer\n");
+ return ret;
+ }
+
+ data_offset += chunk_size;
+ addr_offset += chunk_size;
+ entry_size -= chunk_size;
+ }
+
+ return 0;
+}
+
+u8 *iwm_eeprom_access(struct iwm_priv *iwm, u8 eeprom_id)
+{
+ if (!iwm->eeprom)
+ return ERR_PTR(-ENODEV);
+
+ return iwm->eeprom + eeprom_map[eeprom_id].offset;
+}
+
+int iwm_eeprom_init(struct iwm_priv *iwm)
+{
+ int i, ret = 0;
+ char name[32];
+
+ iwm->eeprom = kzalloc(IWM_EEPROM_LEN, GFP_KERNEL);
+ if (!iwm->eeprom)
+ return -ENOMEM;
+
+ for (i = IWM_EEPROM_FIRST; i < IWM_EEPROM_LAST; i++) {
+#ifdef CONFIG_IWM_B0_HW_SUPPORT
+ if (iwm->conf.hw_b0 && (i >= IWM_EEPROM_INDIRECT_OFFSET))
+ break;
+#endif
+ ret = iwm_eeprom_read(iwm, i);
+ if (ret < 0) {
+ IWM_ERR(iwm, "Couldn't read eeprom entry #%d: %s\n",
+ i, eeprom_map[i].name);
+ break;
+ }
+ }
+
+ IWM_DBG_BOOT(iwm, DBG, "EEPROM dump:\n");
+ for (i = IWM_EEPROM_FIRST; i < IWM_EEPROM_LAST; i++) {
+ memset(name, 0, 32);
+ sprintf(name, "%s: ", eeprom_map[i].name);
+
+ IWM_HEXDUMP(iwm, DBG, BOOT, name,
+ iwm->eeprom + eeprom_map[i].offset,
+ eeprom_map[i].length);
+ }
+
+ return ret;
+}
+
+void iwm_eeprom_exit(struct iwm_priv *iwm)
+{
+ kfree(iwm->eeprom);
+}
diff --git a/drivers/net/wireless/iwmc3200wifi/eeprom.h b/drivers/net/wireless/iwmc3200wifi/eeprom.h
new file mode 100644
index 00000000000..cdb31a6a1f5
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/eeprom.h
@@ -0,0 +1,114 @@
+/*
+ * Intel Wireless Multicomm 3200 WiFi driver
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <ilw@linux.intel.com>
+ * Samuel Ortiz <samuel.ortiz@intel.com>
+ * Zhu Yi <yi.zhu@intel.com>
+ *
+ */
+
+#ifndef __IWM_EEPROM_H__
+#define __IWM_EEPROM_H__
+
+enum {
+ IWM_EEPROM_SIG = 0,
+ IWM_EEPROM_FIRST = IWM_EEPROM_SIG,
+ IWM_EEPROM_VERSION,
+ IWM_EEPROM_OEM_HW_VERSION,
+ IWM_EEPROM_MAC_VERSION,
+ IWM_EEPROM_CARD_ID,
+ IWM_EEPROM_RADIO_CONF,
+ IWM_EEPROM_SKU_CAP,
+
+ IWM_EEPROM_INDIRECT_OFFSET,
+ IWM_EEPROM_CALIB_RXIQ_OFFSET = IWM_EEPROM_INDIRECT_OFFSET,
+
+ IWM_EEPROM_INDIRECT_DATA,
+ IWM_EEPROM_CALIB_RXIQ = IWM_EEPROM_INDIRECT_DATA,
+
+ IWM_EEPROM_LAST,
+};
+
+#define IWM_EEPROM_SIG_OFF 0x00
+#define IWM_EEPROM_VERSION_OFF (0x54 << 1)
+#define IWM_EEPROM_OEM_HW_VERSION_OFF (0x56 << 1)
+#define IWM_EEPROM_MAC_VERSION_OFF (0x30 << 1)
+#define IWM_EEPROM_CARD_ID_OFF (0x5d << 1)
+#define IWM_EEPROM_RADIO_CONF_OFF (0x58 << 1)
+#define IWM_EEPROM_SKU_CAP_OFF (0x55 << 1)
+#define IWM_EEPROM_CALIB_CONFIG_OFF (0x7c << 1)
+
+#define IWM_EEPROM_SIG_LEN 4
+#define IWM_EEPROM_VERSION_LEN 2
+#define IWM_EEPROM_OEM_HW_VERSION_LEN 2
+#define IWM_EEPROM_MAC_VERSION_LEN 1
+#define IWM_EEPROM_CARD_ID_LEN 2
+#define IWM_EEPROM_RADIO_CONF_LEN 2
+#define IWM_EEPROM_SKU_CAP_LEN 2
+#define IWM_EEPROM_INDIRECT_LEN 2
+
+#define IWM_MAX_EEPROM_DATA_LEN 240
+#define IWM_EEPROM_LEN 0x800
+
+#define IWM_EEPROM_MIN_ALLOWED_VERSION 0x0610
+#define IWM_EEPROM_MAX_ALLOWED_VERSION 0x0700
+#define IWM_EEPROM_CURRENT_VERSION 0x0612
+
+#define IWM_EEPROM_SKU_CAP_BAND_24GHZ (1 << 4)
+#define IWM_EEPROM_SKU_CAP_BAND_52GHZ (1 << 5)
+#define IWM_EEPROM_SKU_CAP_11N_ENABLE (1 << 6)
+
+enum {
+ IWM_EEPROM_CALIB_CAL_HDR,
+ IWM_EEPROM_CALIB_TX_POWER,
+ IWM_EEPROM_CALIB_XTAL,
+ IWM_EEPROM_CALIB_TEMPERATURE,
+ IWM_EEPROM_CALIB_RX_BB_FILTER,
+ IWM_EEPROM_CALIB_RX_IQ,
+ IWM_EEPROM_CALIB_MAX,
+};
+
+#define IWM_EEPROM_CALIB_RXIQ_OFF (IWM_EEPROM_CALIB_CONFIG_OFF + \
+ (IWM_EEPROM_CALIB_RX_IQ << 1))
+#define IWM_EEPROM_CALIB_RXIQ_LEN sizeof(struct iwm_lmac_calib_rxiq)
+
+struct iwm_eeprom_entry {
+ char *name;
+ u32 offset;
+ u32 length;
+};
+
+int iwm_eeprom_init(struct iwm_priv *iwm);
+void iwm_eeprom_exit(struct iwm_priv *iwm);
+u8 *iwm_eeprom_access(struct iwm_priv *iwm, u8 eeprom_id);
+
+#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/fw.c b/drivers/net/wireless/iwmc3200wifi/fw.c
new file mode 100644
index 00000000000..ec1a15a5a0e
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/fw.c
@@ -0,0 +1,388 @@
+/*
+ * Intel Wireless Multicomm 3200 WiFi driver
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <ilw@linux.intel.com>
+ * Samuel Ortiz <samuel.ortiz@intel.com>
+ * Zhu Yi <yi.zhu@intel.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+
+#include "iwm.h"
+#include "bus.h"
+#include "hal.h"
+#include "umac.h"
+#include "debug.h"
+#include "fw.h"
+#include "commands.h"
+
+static const char fw_barker[] = "*WESTOPFORNOONE*";
+
+/*
+ * @op_code: Op code we're looking for.
+ * @index: There can be several instances of the same opcode within
+ * the firmware. Index specifies which one we're looking for.
+ */
+static int iwm_fw_op_offset(struct iwm_priv *iwm, const struct firmware *fw,
+ u16 op_code, u32 index)
+{
+ int offset = -EINVAL, fw_offset;
+ u32 op_index = 0;
+ const u8 *fw_ptr;
+ struct iwm_fw_hdr_rec *rec;
+
+ fw_offset = 0;
+ fw_ptr = fw->data;
+
+ /* We first need to look for the firmware barker */
+ if (memcmp(fw_ptr, fw_barker, IWM_HDR_BARKER_LEN)) {
+ IWM_ERR(iwm, "No barker string in this FW\n");
+ return -EINVAL;
+ }
+
+ if (fw->size < IWM_HDR_LEN) {
+ IWM_ERR(iwm, "FW is too small (%zu)\n", fw->size);
+ return -EINVAL;
+ }
+
+ fw_offset += IWM_HDR_BARKER_LEN;
+
+ while (fw_offset < fw->size) {
+ rec = (struct iwm_fw_hdr_rec *)(fw_ptr + fw_offset);
+
+ IWM_DBG_FW(iwm, DBG, "FW: op_code: 0x%x, len: %d @ 0x%x\n",
+ rec->op_code, rec->len, fw_offset);
+
+ if (rec->op_code == IWM_HDR_REC_OP_INVALID) {
+ IWM_DBG_FW(iwm, DBG, "Reached INVALID op code\n");
+ break;
+ }
+
+ if (rec->op_code == op_code) {
+ if (op_index == index) {
+ fw_offset += sizeof(struct iwm_fw_hdr_rec);
+ offset = fw_offset;
+ goto out;
+ }
+ op_index++;
+ }
+
+ fw_offset += sizeof(struct iwm_fw_hdr_rec) + rec->len;
+ }
+
+ out:
+ return offset;
+}
+
+static int iwm_load_firmware_chunk(struct iwm_priv *iwm,
+ const struct firmware *fw,
+ struct iwm_fw_img_desc *img_desc)
+{
+ struct iwm_udma_nonwifi_cmd target_cmd;
+ u32 chunk_size;
+ const u8 *chunk_ptr;
+ int ret = 0;
+
+ IWM_DBG_FW(iwm, INFO, "Loading FW chunk: %d bytes @ 0x%x\n",
+ img_desc->length, img_desc->address);
+
+ target_cmd.opcode = UMAC_HDI_OUT_OPCODE_WRITE;
+ target_cmd.handle_by_hw = 1;
+ target_cmd.op2 = 0;
+ target_cmd.resp = 0;
+ target_cmd.eop = 1;
+
+ chunk_size = img_desc->length;
+ chunk_ptr = fw->data + img_desc->offset;
+
+ while (chunk_size > 0) {
+ u32 tmp_chunk_size;
+
+ tmp_chunk_size = min_t(u32, chunk_size,
+ IWM_MAX_NONWIFI_CMD_BUFF_SIZE);
+
+ target_cmd.addr = cpu_to_le32(img_desc->address +
+ (chunk_ptr - fw->data - img_desc->offset));
+ target_cmd.op1_sz = cpu_to_le32(tmp_chunk_size);
+
+ IWM_DBG_FW(iwm, DBG, "\t%d bytes @ 0x%x\n",
+ tmp_chunk_size, target_cmd.addr);
+
+ ret = iwm_hal_send_target_cmd(iwm, &target_cmd, chunk_ptr);
+ if (ret < 0) {
+ IWM_ERR(iwm, "Couldn't load FW chunk\n");
+ break;
+ }
+
+ chunk_size -= tmp_chunk_size;
+ chunk_ptr += tmp_chunk_size;
+ }
+
+ return ret;
+}
+/*
+ * To load a fw image to the target, we basically go through the
+ * fw, looking for OP_MEM_DESC records. Once we found one, we
+ * pass it to iwm_load_firmware_chunk().
+ * The OP_MEM_DESC records contain the actuall memory chunk to be
+ * sent, but also the destination address.
+ */
+static int iwm_load_img(struct iwm_priv *iwm, const char *img_name)
+{
+ const struct firmware *fw;
+ struct iwm_fw_img_desc *img_desc;
+ struct iwm_fw_img_ver *ver;
+ int ret = 0, fw_offset;
+ u32 opcode_idx = 0, build_date;
+ char *build_tag;
+
+ ret = request_firmware(&fw, img_name, iwm_to_dev(iwm));
+ if (ret) {
+ IWM_ERR(iwm, "Request firmware failed");
+ return ret;
+ }
+
+ IWM_DBG_FW(iwm, INFO, "Start to load FW %s\n", img_name);
+
+ while (1) {
+ fw_offset = iwm_fw_op_offset(iwm, fw,
+ IWM_HDR_REC_OP_MEM_DESC,
+ opcode_idx);
+ if (fw_offset < 0)
+ break;
+
+ img_desc = (struct iwm_fw_img_desc *)(fw->data + fw_offset);
+ ret = iwm_load_firmware_chunk(iwm, fw, img_desc);
+ if (ret < 0)
+ goto err_release_fw;
+ opcode_idx++;
+ };
+
+ /* Read firmware version */
+ fw_offset = iwm_fw_op_offset(iwm, fw, IWM_HDR_REC_OP_SW_VER, 0);
+ if (fw_offset < 0)
+ goto err_release_fw;
+
+ ver = (struct iwm_fw_img_ver *)(fw->data + fw_offset);
+
+ /* Read build tag */
+ fw_offset = iwm_fw_op_offset(iwm, fw, IWM_HDR_REC_OP_BUILD_TAG, 0);
+ if (fw_offset < 0)
+ goto err_release_fw;
+
+ build_tag = (char *)(fw->data + fw_offset);
+
+ /* Read build date */
+ fw_offset = iwm_fw_op_offset(iwm, fw, IWM_HDR_REC_OP_BUILD_DATE, 0);
+ if (fw_offset < 0)
+ goto err_release_fw;
+
+ build_date = *(u32 *)(fw->data + fw_offset);
+
+ IWM_INFO(iwm, "%s:\n", img_name);
+ IWM_INFO(iwm, "\tVersion: %02X.%02X\n", ver->major, ver->minor);
+ IWM_INFO(iwm, "\tBuild tag: %s\n", build_tag);
+ IWM_INFO(iwm, "\tBuild date: %x-%x-%x\n",
+ IWM_BUILD_YEAR(build_date), IWM_BUILD_MONTH(build_date),
+ IWM_BUILD_DAY(build_date));
+
+
+ err_release_fw:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int iwm_load_umac(struct iwm_priv *iwm)
+{
+ struct iwm_udma_nonwifi_cmd target_cmd;
+ int ret;
+
+ ret = iwm_load_img(iwm, iwm->bus_ops->umac_name);
+ if (ret < 0)
+ return ret;
+
+ /* We've loaded the UMAC, we can tell the target to jump there */
+ target_cmd.opcode = UMAC_HDI_OUT_OPCODE_JUMP;
+ target_cmd.addr = cpu_to_le32(UMAC_MU_FW_INST_DATA_12_ADDR);
+ target_cmd.op1_sz = 0;
+ target_cmd.op2 = 0;
+ target_cmd.handle_by_hw = 0;
+ target_cmd.resp = 1 ;
+ target_cmd.eop = 1;
+
+ ret = iwm_hal_send_target_cmd(iwm, &target_cmd, NULL);
+ if (ret < 0)
+ IWM_ERR(iwm, "Couldn't send JMP command\n");
+
+ return ret;
+}
+
+static int iwm_load_lmac(struct iwm_priv *iwm, const char *img_name)
+{
+ int ret;
+
+ ret = iwm_load_img(iwm, img_name);
+ if (ret < 0)
+ return ret;
+
+ return iwm_send_umac_reset(iwm,
+ cpu_to_le32(UMAC_RST_CTRL_FLG_LARC_CLK_EN), 0);
+}
+
+/*
+ * We currently have to load 3 FWs:
+ * 1) The UMAC (Upper MAC).
+ * 2) The calibration LMAC (Lower MAC).
+ * We then send the calibration init command, so that the device can
+ * run a first calibration round.
+ * 3) The operational LMAC, which replaces the calibration one when it's
+ * done with the first calibration round.
+ *
+ * Once those 3 FWs have been loaded, we send the periodic calibration
+ * command, and then the device is available for regular 802.11 operations.
+ */
+int iwm_load_fw(struct iwm_priv *iwm)
+{
+ int ret;
+
+ /* We first start downloading the UMAC */
+ ret = iwm_load_umac(iwm);
+ if (ret < 0) {
+ IWM_ERR(iwm, "UMAC loading failed\n");
+ return ret;
+ }
+
+ /* Handle UMAC_ALIVE notification */
+ ret = iwm_notif_handle(iwm, UMAC_NOTIFY_OPCODE_ALIVE, IWM_SRC_UMAC,
+ WAIT_NOTIF_TIMEOUT);
+ if (ret) {
+ IWM_ERR(iwm, "Handle UMAC_ALIVE failed: %d\n", ret);
+ return ret;
+ }
+
+ /* UMAC is alive, we can download the calibration LMAC */
+ ret = iwm_load_lmac(iwm, iwm->bus_ops->calib_lmac_name);
+ if (ret) {
+ IWM_ERR(iwm, "Calibration LMAC loading failed\n");
+ return ret;
+ }
+
+ /* Handle UMAC_INIT_COMPLETE notification */
+ ret = iwm_notif_handle(iwm, UMAC_NOTIFY_OPCODE_INIT_COMPLETE,
+ IWM_SRC_UMAC, WAIT_NOTIF_TIMEOUT);
+ if (ret) {
+ IWM_ERR(iwm, "Handle INIT_COMPLETE failed for calibration "
+ "LMAC: %d\n", ret);
+ return ret;
+ }
+
+ /* Read EEPROM data */
+ ret = iwm_eeprom_init(iwm);
+ if (ret < 0) {
+ IWM_ERR(iwm, "Couldn't init eeprom array\n");
+ return ret;
+ }
+
+#ifdef CONFIG_IWM_B0_HW_SUPPORT
+ if (iwm->conf.hw_b0) {
+ clear_bit(PHY_CALIBRATE_RX_IQ_CMD, &iwm->conf.init_calib_map);
+ clear_bit(PHY_CALIBRATE_RX_IQ_CMD,
+ &iwm->conf.periodic_calib_map);
+ }
+#endif
+ /* Read RX IQ calibration result from EEPROM */
+ if (test_bit(PHY_CALIBRATE_RX_IQ_CMD, &iwm->conf.init_calib_map)) {
+ iwm_store_rxiq_calib_result(iwm);
+ set_bit(PHY_CALIBRATE_RX_IQ_CMD, &iwm->calib_done_map);
+ }
+
+ iwm_send_prio_table(iwm);
+ iwm_send_init_calib_cfg(iwm, iwm->conf.init_calib_map);
+
+ while (iwm->calib_done_map != iwm->conf.init_calib_map) {
+ ret = iwm_notif_handle(iwm, CALIBRATION_RES_NOTIFICATION,
+ IWM_SRC_LMAC, WAIT_NOTIF_TIMEOUT);
+ if (ret) {
+ IWM_ERR(iwm, "Wait for calibration result timeout\n");
+ goto out;
+ }
+ IWM_DBG_FW(iwm, DBG, "Got calibration result. calib_done_map: "
+ "0x%lx, requested calibrations: 0x%lx\n",
+ iwm->calib_done_map, iwm->conf.init_calib_map);
+ }
+
+ /* Handle LMAC CALIBRATION_COMPLETE notification */
+ ret = iwm_notif_handle(iwm, CALIBRATION_COMPLETE_NOTIFICATION,
+ IWM_SRC_LMAC, WAIT_NOTIF_TIMEOUT);
+ if (ret) {
+ IWM_ERR(iwm, "Wait for CALIBRATION_COMPLETE timeout\n");
+ goto out;
+ }
+
+ IWM_INFO(iwm, "LMAC calibration done: 0x%lx\n", iwm->calib_done_map);
+
+ iwm_send_umac_reset(iwm, cpu_to_le32(UMAC_RST_CTRL_FLG_LARC_RESET), 1);
+
+ ret = iwm_notif_handle(iwm, UMAC_CMD_OPCODE_RESET, IWM_SRC_UMAC,
+ WAIT_NOTIF_TIMEOUT);
+ if (ret) {
+ IWM_ERR(iwm, "Wait for UMAC RESET timeout\n");
+ goto out;
+ }
+
+ /* Download the operational LMAC */
+ ret = iwm_load_lmac(iwm, iwm->bus_ops->lmac_name);
+ if (ret) {
+ IWM_ERR(iwm, "LMAC loading failed\n");
+ goto out;
+ }
+
+ ret = iwm_notif_handle(iwm, UMAC_NOTIFY_OPCODE_INIT_COMPLETE,
+ IWM_SRC_UMAC, WAIT_NOTIF_TIMEOUT);
+ if (ret) {
+ IWM_ERR(iwm, "Handle INIT_COMPLETE failed for LMAC: %d\n", ret);
+ goto out;
+ }
+
+ iwm_send_prio_table(iwm);
+ iwm_send_calib_results(iwm);
+ iwm_send_periodic_calib_cfg(iwm, iwm->conf.periodic_calib_map);
+
+ return 0;
+
+ out:
+ iwm_eeprom_exit(iwm);
+ return ret;
+}
diff --git a/drivers/net/wireless/iwmc3200wifi/fw.h b/drivers/net/wireless/iwmc3200wifi/fw.h
new file mode 100644
index 00000000000..c70a3b40dad
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/fw.h
@@ -0,0 +1,100 @@
+/*
+ * Intel Wireless Multicomm 3200 WiFi driver
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <ilw@linux.intel.com>
+ * Samuel Ortiz <samuel.ortiz@intel.com>
+ * Zhu Yi <yi.zhu@intel.com>
+ *
+ */
+
+#ifndef __IWM_FW_H__
+#define __IWM_FW_H__
+
+/**
+ * struct iwm_fw_hdr_rec - An iwm firmware image is a
+ * concatenation of various records. Each of them is
+ * defined by an ID (aka op code), a length, and the
+ * actual data.
+ * @op_code: The record ID, see IWM_HDR_REC_OP_*
+ *
+ * @len: The record payload length
+ *
+ * @buf: The record payload
+ */
+struct iwm_fw_hdr_rec {
+ u16 op_code;
+ u16 len;
+ u8 buf[0];
+};
+
+/* Header's definitions */
+#define IWM_HDR_LEN (512)
+#define IWM_HDR_BARKER_LEN (16)
+
+/* Header's opcodes */
+#define IWM_HDR_REC_OP_INVALID (0x00)
+#define IWM_HDR_REC_OP_BUILD_DATE (0x01)
+#define IWM_HDR_REC_OP_BUILD_TAG (0x02)
+#define IWM_HDR_REC_OP_SW_VER (0x03)
+#define IWM_HDR_REC_OP_HW_SKU (0x04)
+#define IWM_HDR_REC_OP_BUILD_OPT (0x05)
+#define IWM_HDR_REC_OP_MEM_DESC (0x06)
+#define IWM_HDR_REC_USERDEFS (0x07)
+
+/* Header's records length (in bytes) */
+#define IWM_HDR_REC_LEN_BUILD_DATE (4)
+#define IWM_HDR_REC_LEN_BUILD_TAG (64)
+#define IWM_HDR_REC_LEN_SW_VER (4)
+#define IWM_HDR_REC_LEN_HW_SKU (4)
+#define IWM_HDR_REC_LEN_BUILD_OPT (4)
+#define IWM_HDR_REC_LEN_MEM_DESC (12)
+#define IWM_HDR_REC_LEN_USERDEF (64)
+
+#define IWM_BUILD_YEAR(date) ((date >> 16) & 0xffff)
+#define IWM_BUILD_MONTH(date) ((date >> 8) & 0xff)
+#define IWM_BUILD_DAY(date) (date & 0xff)
+
+struct iwm_fw_img_desc {
+ u32 offset;
+ u32 address;
+ u32 length;
+};
+
+struct iwm_fw_img_ver {
+ u8 minor;
+ u8 major;
+ u16 reserved;
+};
+
+int iwm_load_fw(struct iwm_priv *iwm);
+
+#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/hal.c b/drivers/net/wireless/iwmc3200wifi/hal.c
new file mode 100644
index 00000000000..ee127fe4f43
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/hal.c
@@ -0,0 +1,464 @@
+/*
+ * Intel Wireless Multicomm 3200 WiFi driver
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <ilw@linux.intel.com>
+ * Samuel Ortiz <samuel.ortiz@intel.com>
+ * Zhu Yi <yi.zhu@intel.com>
+ *
+ */
+
+/*
+ * Hardware Abstraction Layer for iwm.
+ *
+ * This file mostly defines an abstraction API for
+ * sending various commands to the target.
+ *
+ * We have 2 types of commands: wifi and non-wifi ones.
+ *
+ * - wifi commands:
+ * They are used for sending LMAC and UMAC commands,
+ * and thus are the most commonly used ones.
+ * There are 2 different wifi command types, the regular
+ * one and the LMAC one. The former is used to send
+ * UMAC commands (see UMAC_CMD_OPCODE_* from umac.h)
+ * while the latter is used for sending commands to the
+ * LMAC. If you look at LMAC commands you'll se that they
+ * are actually regular iwlwifi target commands encapsulated
+ * into a special UMAC command called UMAC passthrough.
+ * This is due to the fact the the host talks exclusively
+ * to the UMAC and so there needs to be a special UMAC
+ * command for talking to the LMAC.
+ * This is how a wifi command is layed out:
+ * ------------------------
+ * | iwm_udma_out_wifi_hdr |
+ * ------------------------
+ * | SW meta_data (32 bits) |
+ * ------------------------
+ * | iwm_dev_cmd_hdr |
+ * ------------------------
+ * | payload |
+ * | .... |
+ *
+ * - non-wifi, or general commands:
+ * Those commands are handled by the device's bootrom,
+ * and are typically sent when the UMAC and the LMAC
+ * are not yet available.
+ * * This is how a non-wifi command is layed out:
+ * ---------------------------
+ * | iwm_udma_out_nonwifi_hdr |
+ * ---------------------------
+ * | payload |
+ * | .... |
+
+ *
+ * All the commands start with a UDMA header, which is
+ * basically a 32 bits field. The 4 LSB there define
+ * an opcode that allows the target to differentiate
+ * between wifi (opcode is 0xf) and non-wifi commands
+ * (opcode is [0..0xe]).
+ *
+ * When a command (wifi or non-wifi) is supposed to receive
+ * an answer, we queue the command buffer. When we do receive
+ * a command response from the UMAC, we go through the list
+ * of pending command, and pass both the command and the answer
+ * to the rx handler. Each command is sent with a unique
+ * sequence id, and the answer is sent with the same one. This
+ * is how we're supposed to match an answer with its command.
+ * See rx.c:iwm_rx_handle_[non]wifi() and iwm_get_pending_[non]wifi()
+ * for the implementation details.
+ */
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+
+#include "iwm.h"
+#include "bus.h"
+#include "hal.h"
+#include "umac.h"
+#include "debug.h"
+
+static void iwm_nonwifi_cmd_init(struct iwm_priv *iwm,
+ struct iwm_nonwifi_cmd *cmd,
+ struct iwm_udma_nonwifi_cmd *udma_cmd)
+{
+ INIT_LIST_HEAD(&cmd->pending);
+
+ spin_lock(&iwm->cmd_lock);
+
+ cmd->resp_received = 0;
+
+ cmd->seq_num = iwm->nonwifi_seq_num;
+ udma_cmd->seq_num = cpu_to_le16(cmd->seq_num);
+
+ cmd->seq_num = iwm->nonwifi_seq_num++;
+ iwm->nonwifi_seq_num %= UMAC_NONWIFI_SEQ_NUM_MAX;
+
+ if (udma_cmd->resp)
+ list_add_tail(&cmd->pending, &iwm->nonwifi_pending_cmd);
+
+ spin_unlock(&iwm->cmd_lock);
+
+ cmd->buf.start = cmd->buf.payload;
+ cmd->buf.len = 0;
+
+ memcpy(&cmd->udma_cmd, udma_cmd, sizeof(*udma_cmd));
+}
+
+u16 iwm_alloc_wifi_cmd_seq(struct iwm_priv *iwm)
+{
+ u16 seq_num = iwm->wifi_seq_num;
+
+ iwm->wifi_seq_num++;
+ iwm->wifi_seq_num %= UMAC_WIFI_SEQ_NUM_MAX;
+
+ return seq_num;
+}
+
+static void iwm_wifi_cmd_init(struct iwm_priv *iwm,
+ struct iwm_wifi_cmd *cmd,
+ struct iwm_udma_wifi_cmd *udma_cmd,
+ struct iwm_umac_cmd *umac_cmd,
+ struct iwm_lmac_cmd *lmac_cmd,
+ u16 payload_size)
+{
+ INIT_LIST_HEAD(&cmd->pending);
+
+ spin_lock(&iwm->cmd_lock);
+
+ cmd->seq_num = iwm_alloc_wifi_cmd_seq(iwm);
+ umac_cmd->seq_num = cpu_to_le16(cmd->seq_num);
+
+ if (umac_cmd->resp)
+ list_add_tail(&cmd->pending, &iwm->wifi_pending_cmd);
+
+ spin_unlock(&iwm->cmd_lock);
+
+ cmd->buf.start = cmd->buf.payload;
+ cmd->buf.len = 0;
+
+ if (lmac_cmd) {
+ cmd->buf.start -= sizeof(struct iwm_lmac_hdr);
+
+ lmac_cmd->seq_num = cpu_to_le16(cmd->seq_num);
+ lmac_cmd->count = cpu_to_le16(payload_size);
+
+ memcpy(&cmd->lmac_cmd, lmac_cmd, sizeof(*lmac_cmd));
+
+ umac_cmd->count = cpu_to_le16(sizeof(struct iwm_lmac_hdr));
+ } else
+ umac_cmd->count = 0;
+
+ umac_cmd->count = cpu_to_le16(payload_size +
+ le16_to_cpu(umac_cmd->count));
+ udma_cmd->count = cpu_to_le16(sizeof(struct iwm_umac_fw_cmd_hdr) +
+ le16_to_cpu(umac_cmd->count));
+
+ memcpy(&cmd->udma_cmd, udma_cmd, sizeof(*udma_cmd));
+ memcpy(&cmd->umac_cmd, umac_cmd, sizeof(*umac_cmd));
+}
+
+void iwm_cmd_flush(struct iwm_priv *iwm)
+{
+ struct iwm_wifi_cmd *wcmd, *wnext;
+ struct iwm_nonwifi_cmd *nwcmd, *nwnext;
+
+ list_for_each_entry_safe(wcmd, wnext, &iwm->wifi_pending_cmd, pending) {
+ list_del(&wcmd->pending);
+ kfree(wcmd);
+ }
+
+ list_for_each_entry_safe(nwcmd, nwnext, &iwm->nonwifi_pending_cmd,
+ pending) {
+ list_del(&nwcmd->pending);
+ kfree(nwcmd);
+ }
+}
+
+struct iwm_wifi_cmd *iwm_get_pending_wifi_cmd(struct iwm_priv *iwm, u16 seq_num)
+{
+ struct iwm_wifi_cmd *cmd, *next;
+
+ list_for_each_entry_safe(cmd, next, &iwm->wifi_pending_cmd, pending)
+ if (cmd->seq_num == seq_num) {
+ list_del(&cmd->pending);
+ return cmd;
+ }
+
+ return NULL;
+}
+
+struct iwm_nonwifi_cmd *
+iwm_get_pending_nonwifi_cmd(struct iwm_priv *iwm, u8 seq_num, u8 cmd_opcode)
+{
+ struct iwm_nonwifi_cmd *cmd, *next;
+
+ list_for_each_entry_safe(cmd, next, &iwm->nonwifi_pending_cmd, pending)
+ if ((cmd->seq_num == seq_num) &&
+ (cmd->udma_cmd.opcode == cmd_opcode) &&
+ (cmd->resp_received)) {
+ list_del(&cmd->pending);
+ return cmd;
+ }
+
+ return NULL;
+}
+
+static void iwm_build_udma_nonwifi_hdr(struct iwm_priv *iwm,
+ struct iwm_udma_out_nonwifi_hdr *hdr,
+ struct iwm_udma_nonwifi_cmd *cmd)
+{
+ memset(hdr, 0, sizeof(*hdr));
+
+ SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_OPCODE, cmd->opcode);
+ SET_VAL32(hdr->cmd, UDMA_HDI_OUT_NW_CMD_RESP, cmd->resp);
+ SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_EOT, 1);
+ SET_VAL32(hdr->cmd, UDMA_HDI_OUT_NW_CMD_HANDLE_BY_HW,
+ cmd->handle_by_hw);
+ SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_SIGNATURE, UMAC_HDI_OUT_SIGNATURE);
+ SET_VAL32(hdr->cmd, UDMA_HDI_OUT_CMD_NON_WIFI_HW_SEQ_NUM,
+ le16_to_cpu(cmd->seq_num));
+
+ hdr->addr = cmd->addr;
+ hdr->op1_sz = cmd->op1_sz;
+ hdr->op2 = cmd->op2;
+}
+
+static int iwm_send_udma_nonwifi_cmd(struct iwm_priv *iwm,
+ struct iwm_nonwifi_cmd *cmd)
+{
+ struct iwm_udma_out_nonwifi_hdr *udma_hdr;
+ struct iwm_nonwifi_cmd_buff *buf;
+ struct iwm_udma_nonwifi_cmd *udma_cmd = &cmd->udma_cmd;
+
+ buf = &cmd->buf;
+
+ buf->start -= sizeof(struct iwm_umac_nonwifi_out_hdr);
+ buf->len += sizeof(struct iwm_umac_nonwifi_out_hdr);
+
+ udma_hdr = (struct iwm_udma_out_nonwifi_hdr *)(buf->start);
+
+ iwm_build_udma_nonwifi_hdr(iwm, udma_hdr, udma_cmd);
+
+ IWM_DBG_CMD(iwm, DBG,
+ "Send UDMA nonwifi cmd: opcode = 0x%x, resp = 0x%x, "
+ "hw = 0x%x, seqnum = %d, addr = 0x%x, op1_sz = 0x%x, "
+ "op2 = 0x%x\n", udma_cmd->opcode, udma_cmd->resp,
+ udma_cmd->handle_by_hw, cmd->seq_num, udma_cmd->addr,
+ udma_cmd->op1_sz, udma_cmd->op2);
+
+ return iwm_bus_send_chunk(iwm, buf->start, buf->len);
+}
+
+void iwm_udma_wifi_hdr_set_eop(struct iwm_priv *iwm, u8 *buf, u8 eop)
+{
+ struct iwm_udma_out_wifi_hdr *hdr = (struct iwm_udma_out_wifi_hdr *)buf;
+
+ SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_EOT, eop);
+}
+
+void iwm_build_udma_wifi_hdr(struct iwm_priv *iwm,
+ struct iwm_udma_out_wifi_hdr *hdr,
+ struct iwm_udma_wifi_cmd *cmd)
+{
+ memset(hdr, 0, sizeof(*hdr));
+
+ SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_OPCODE, UMAC_HDI_OUT_OPCODE_WIFI);
+ SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_EOT, cmd->eop);
+ SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_SIGNATURE, UMAC_HDI_OUT_SIGNATURE);
+
+ SET_VAL32(hdr->meta_data, UMAC_HDI_OUT_BYTE_COUNT,
+ le16_to_cpu(cmd->count));
+ SET_VAL32(hdr->meta_data, UMAC_HDI_OUT_CREDIT_GRP, cmd->credit_group);
+ SET_VAL32(hdr->meta_data, UMAC_HDI_OUT_RATID, cmd->ra_tid);
+ SET_VAL32(hdr->meta_data, UMAC_HDI_OUT_LMAC_OFFSET, cmd->lmac_offset);
+}
+
+void iwm_build_umac_hdr(struct iwm_priv *iwm,
+ struct iwm_umac_fw_cmd_hdr *hdr,
+ struct iwm_umac_cmd *cmd)
+{
+ memset(hdr, 0, sizeof(*hdr));
+
+ SET_VAL32(hdr->meta_data, UMAC_FW_CMD_BYTE_COUNT,
+ le16_to_cpu(cmd->count));
+ SET_VAL32(hdr->meta_data, UMAC_FW_CMD_TX_STA_COLOR, cmd->color);
+ SET_VAL8(hdr->cmd.flags, UMAC_DEV_CMD_FLAGS_RESP_REQ, cmd->resp);
+
+ hdr->cmd.cmd = cmd->id;
+ hdr->cmd.seq_num = cmd->seq_num;
+}
+
+static int iwm_send_udma_wifi_cmd(struct iwm_priv *iwm,
+ struct iwm_wifi_cmd *cmd)
+{
+ struct iwm_umac_wifi_out_hdr *umac_hdr;
+ struct iwm_wifi_cmd_buff *buf;
+ struct iwm_udma_wifi_cmd *udma_cmd = &cmd->udma_cmd;
+ struct iwm_umac_cmd *umac_cmd = &cmd->umac_cmd;
+ int ret;
+
+ buf = &cmd->buf;
+
+ buf->start -= sizeof(struct iwm_umac_wifi_out_hdr);
+ buf->len += sizeof(struct iwm_umac_wifi_out_hdr);
+
+ umac_hdr = (struct iwm_umac_wifi_out_hdr *)(buf->start);
+
+ iwm_build_udma_wifi_hdr(iwm, &umac_hdr->hw_hdr, udma_cmd);
+ iwm_build_umac_hdr(iwm, &umac_hdr->sw_hdr, umac_cmd);
+
+ IWM_DBG_CMD(iwm, DBG,
+ "Send UDMA wifi cmd: opcode = 0x%x, UMAC opcode = 0x%x, "
+ "eop = 0x%x, count = 0x%x, credit_group = 0x%x, "
+ "ra_tid = 0x%x, lmac_offset = 0x%x, seqnum = %d\n",
+ UMAC_HDI_OUT_OPCODE_WIFI, umac_cmd->id,
+ udma_cmd->eop, udma_cmd->count, udma_cmd->credit_group,
+ udma_cmd->ra_tid, udma_cmd->lmac_offset, cmd->seq_num);
+
+ if (umac_cmd->id == UMAC_CMD_OPCODE_WIFI_PASS_THROUGH)
+ IWM_DBG_CMD(iwm, DBG, "\tLMAC opcode: 0x%x\n",
+ cmd->lmac_cmd.id);
+
+ ret = iwm_tx_credit_alloc(iwm, udma_cmd->credit_group, buf->len);
+
+ /* We keep sending UMAC reset regardless of the command credits.
+ * The UMAC is supposed to be reset anyway and the Tx credits are
+ * reinitialized afterwards. If we are lucky, the reset could
+ * still be done even though we have run out of credits for the
+ * command pool at this moment.*/
+ if (ret && (umac_cmd->id != UMAC_CMD_OPCODE_RESET)) {
+ IWM_DBG_TX(iwm, DBG, "Failed to alloc tx credit for cmd %d\n",
+ umac_cmd->id);
+ return ret;
+ }
+
+ return iwm_bus_send_chunk(iwm, buf->start, buf->len);
+}
+
+/* target_cmd a.k.a udma_nonwifi_cmd can be sent when UMAC is not available */
+int iwm_hal_send_target_cmd(struct iwm_priv *iwm,
+ struct iwm_udma_nonwifi_cmd *udma_cmd,
+ const void *payload)
+{
+ struct iwm_nonwifi_cmd *cmd;
+ int ret;
+
+ cmd = kzalloc(sizeof(struct iwm_nonwifi_cmd), GFP_KERNEL);
+ if (!cmd) {
+ IWM_ERR(iwm, "Couldn't alloc memory for hal cmd\n");
+ return -ENOMEM;
+ }
+
+ iwm_nonwifi_cmd_init(iwm, cmd, udma_cmd);
+
+ if (cmd->udma_cmd.opcode == UMAC_HDI_OUT_OPCODE_WRITE ||
+ cmd->udma_cmd.opcode == UMAC_HDI_OUT_OPCODE_WRITE_PERSISTENT) {
+ cmd->buf.len = le32_to_cpu(cmd->udma_cmd.op1_sz);
+ memcpy(&cmd->buf.payload, payload, cmd->buf.len);
+ }
+
+ ret = iwm_send_udma_nonwifi_cmd(iwm, cmd);
+
+ if (!udma_cmd->resp)
+ kfree(cmd);
+
+ if (ret < 0)
+ return ret;
+
+ return cmd->seq_num;
+}
+
+static void iwm_build_lmac_hdr(struct iwm_priv *iwm, struct iwm_lmac_hdr *hdr,
+ struct iwm_lmac_cmd *cmd)
+{
+ memset(hdr, 0, sizeof(*hdr));
+
+ hdr->id = cmd->id;
+ hdr->flags = 0; /* Is this ever used? */
+ hdr->seq_num = cmd->seq_num;
+}
+
+/*
+ * iwm_hal_send_host_cmd(): sends commands to the UMAC or the LMAC.
+ * Sending command to the LMAC is equivalent to sending a
+ * regular UMAC command with the LMAC passtrough or the LMAC
+ * wrapper UMAC command IDs.
+ */
+int iwm_hal_send_host_cmd(struct iwm_priv *iwm,
+ struct iwm_udma_wifi_cmd *udma_cmd,
+ struct iwm_umac_cmd *umac_cmd,
+ struct iwm_lmac_cmd *lmac_cmd,
+ const void *payload, u16 payload_size)
+{
+ struct iwm_wifi_cmd *cmd;
+ struct iwm_lmac_hdr *hdr;
+ int lmac_hdr_len = 0;
+ int ret;
+
+ cmd = kzalloc(sizeof(struct iwm_wifi_cmd), GFP_KERNEL);
+ if (!cmd) {
+ IWM_ERR(iwm, "Couldn't alloc memory for wifi hal cmd\n");
+ return -ENOMEM;
+ }
+
+ iwm_wifi_cmd_init(iwm, cmd, udma_cmd, umac_cmd, lmac_cmd, payload_size);
+
+ if (lmac_cmd) {
+ hdr = (struct iwm_lmac_hdr *)(cmd->buf.start);
+
+ iwm_build_lmac_hdr(iwm, hdr, &cmd->lmac_cmd);
+ lmac_hdr_len = sizeof(struct iwm_lmac_hdr);
+ }
+
+ memcpy(cmd->buf.payload, payload, payload_size);
+ cmd->buf.len = le16_to_cpu(umac_cmd->count);
+
+ ret = iwm_send_udma_wifi_cmd(iwm, cmd);
+
+ /* We free the cmd if we're not expecting any response */
+ if (!umac_cmd->resp)
+ kfree(cmd);
+ return ret;
+}
+
+/*
+ * iwm_hal_send_umac_cmd(): This is a special case for
+ * iwm_hal_send_host_cmd() to send direct UMAC cmd (without
+ * LMAC involved).
+ */
+int iwm_hal_send_umac_cmd(struct iwm_priv *iwm,
+ struct iwm_udma_wifi_cmd *udma_cmd,
+ struct iwm_umac_cmd *umac_cmd,
+ const void *payload, u16 payload_size)
+{
+ return iwm_hal_send_host_cmd(iwm, udma_cmd, umac_cmd, NULL,
+ payload, payload_size);
+}
diff --git a/drivers/net/wireless/iwmc3200wifi/hal.h b/drivers/net/wireless/iwmc3200wifi/hal.h
new file mode 100644
index 00000000000..0adfdc85765
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/hal.h
@@ -0,0 +1,236 @@
+/*
+ * Intel Wireless Multicomm 3200 WiFi driver
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <ilw@linux.intel.com>
+ * Samuel Ortiz <samuel.ortiz@intel.com>
+ * Zhu Yi <yi.zhu@intel.com>
+ *
+ */
+
+#ifndef _IWM_HAL_H_
+#define _IWM_HAL_H_
+
+#include "umac.h"
+
+#define GET_VAL8(s, name) ((s >> name##_POS) & name##_SEED)
+#define GET_VAL16(s, name) ((le16_to_cpu(s) >> name##_POS) & name##_SEED)
+#define GET_VAL32(s, name) ((le32_to_cpu(s) >> name##_POS) & name##_SEED)
+
+#define SET_VAL8(s, name, val) \
+do { \
+ s = (s & ~(name##_SEED << name##_POS)) | \
+ ((val & name##_SEED) << name##_POS); \
+} while (0)
+
+#define SET_VAL16(s, name, val) \
+do { \
+ s = cpu_to_le16((le16_to_cpu(s) & ~(name##_SEED << name##_POS)) | \
+ ((val & name##_SEED) << name##_POS)); \
+} while (0)
+
+#define SET_VAL32(s, name, val) \
+do { \
+ s = cpu_to_le32((le32_to_cpu(s) & ~(name##_SEED << name##_POS)) | \
+ ((val & name##_SEED) << name##_POS)); \
+} while (0)
+
+
+#define UDMA_UMAC_INIT { .eop = 1, \
+ .credit_group = 0x4, \
+ .ra_tid = UMAC_HDI_ACT_TBL_IDX_HOST_CMD, \
+ .lmac_offset = 0 }
+#define UDMA_LMAC_INIT { .eop = 1, \
+ .credit_group = 0x4, \
+ .ra_tid = UMAC_HDI_ACT_TBL_IDX_HOST_CMD, \
+ .lmac_offset = 4 }
+
+
+/* UDMA IN OP CODE -- cmd bits [3:0] */
+#define UDMA_IN_OPCODE_MASK 0xF
+
+#define UDMA_IN_OPCODE_GENERAL_RESP 0x0
+#define UDMA_IN_OPCODE_READ_RESP 0x1
+#define UDMA_IN_OPCODE_WRITE_RESP 0x2
+#define UDMA_IN_OPCODE_PERS_WRITE_RESP 0x5
+#define UDMA_IN_OPCODE_PERS_READ_RESP 0x6
+#define UDMA_IN_OPCODE_RD_MDFY_WR_RESP 0x7
+#define UDMA_IN_OPCODE_EP_MNGMT_MSG 0x8
+#define UDMA_IN_OPCODE_CRDT_CHNG_MSG 0x9
+#define UDMA_IN_OPCODE_CNTRL_DATABASE_MSG 0xA
+#define UDMA_IN_OPCODE_SW_MSG 0xB
+#define UDMA_IN_OPCODE_WIFI 0xF
+#define UDMA_IN_OPCODE_WIFI_LMAC 0x1F
+#define UDMA_IN_OPCODE_WIFI_UMAC 0x2F
+
+/* HW API: udma_hdi_nonwifi API (OUT and IN) */
+
+/* iwm_udma_nonwifi_cmd request response -- bits [9:9] */
+#define UDMA_HDI_OUT_NW_CMD_RESP_POS 9
+#define UDMA_HDI_OUT_NW_CMD_RESP_SEED 0x1
+
+/* iwm_udma_nonwifi_cmd handle by HW -- bits [11:11] */
+#define UDMA_HDI_OUT_NW_CMD_HANDLE_BY_HW_POS 11
+#define UDMA_HDI_OUT_NW_CMD_HANDLE_BY_HW_SEED 0x1
+
+/* iwm_udma_nonwifi_cmd sequence-number -- bits [12:15] */
+#define UDMA_HDI_OUT_NW_CMD_SEQ_NUM_POS 12
+#define UDMA_HDI_OUT_NW_CMD_SEQ_NUM_SEED 0xF
+
+/* UDMA IN Non-WIFI HW sequence number -- bits [12:15] */
+#define UDMA_IN_NW_HW_SEQ_NUM_POS 12
+#define UDMA_IN_NW_HW_SEQ_NUM_SEED 0xF
+
+/* UDMA IN Non-WIFI HW signature -- bits [16:31] */
+#define UDMA_IN_NW_HW_SIG_POS 16
+#define UDMA_IN_NW_HW_SIG_SEED 0xFFFF
+
+/* fixed signature */
+#define UDMA_IN_NW_HW_SIG 0xCBBC
+
+/* UDMA IN Non-WIFI HW block length -- bits [32:35] */
+#define UDMA_IN_NW_HW_LENGTH_SEED 0xF
+#define UDMA_IN_NW_HW_LENGTH_POS 32
+
+/* End of HW API: udma_hdi_nonwifi API (OUT and IN) */
+
+#define IWM_SDIO_FW_MAX_CHUNK_SIZE 2032
+#define IWM_MAX_WIFI_HEADERS_SIZE 32
+#define IWM_MAX_NONWIFI_HEADERS_SIZE 16
+#define IWM_MAX_NONWIFI_CMD_BUFF_SIZE (IWM_SDIO_FW_MAX_CHUNK_SIZE - \
+ IWM_MAX_NONWIFI_HEADERS_SIZE)
+#define IWM_MAX_WIFI_CMD_BUFF_SIZE (IWM_SDIO_FW_MAX_CHUNK_SIZE - \
+ IWM_MAX_WIFI_HEADERS_SIZE)
+
+#define IWM_HAL_CONCATENATE_BUF_SIZE 8192
+
+struct iwm_wifi_cmd_buff {
+ u16 len;
+ u8 *start;
+ u8 hdr[IWM_MAX_WIFI_HEADERS_SIZE];
+ u8 payload[IWM_MAX_WIFI_CMD_BUFF_SIZE];
+};
+
+struct iwm_nonwifi_cmd_buff {
+ u16 len;
+ u8 *start;
+ u8 hdr[IWM_MAX_NONWIFI_HEADERS_SIZE];
+ u8 payload[IWM_MAX_NONWIFI_CMD_BUFF_SIZE];
+};
+
+struct iwm_udma_nonwifi_cmd {
+ u8 opcode;
+ u8 eop;
+ u8 resp;
+ u8 handle_by_hw;
+ __le32 addr;
+ __le32 op1_sz;
+ __le32 op2;
+ __le16 seq_num;
+};
+
+struct iwm_udma_wifi_cmd {
+ __le16 count;
+ u8 eop;
+ u8 credit_group;
+ u8 ra_tid;
+ u8 lmac_offset;
+};
+
+struct iwm_umac_cmd {
+ u8 id;
+ __le16 count;
+ u8 resp;
+ __le16 seq_num;
+ u8 color;
+};
+
+struct iwm_lmac_cmd {
+ u8 id;
+ __le16 count;
+ u8 resp;
+ __le16 seq_num;
+};
+
+struct iwm_nonwifi_cmd {
+ u16 seq_num;
+ bool resp_received;
+ struct list_head pending;
+ struct iwm_udma_nonwifi_cmd udma_cmd;
+ struct iwm_umac_cmd umac_cmd;
+ struct iwm_lmac_cmd lmac_cmd;
+ struct iwm_nonwifi_cmd_buff buf;
+ u32 flags;
+};
+
+struct iwm_wifi_cmd {
+ u16 seq_num;
+ struct list_head pending;
+ struct iwm_udma_wifi_cmd udma_cmd;
+ struct iwm_umac_cmd umac_cmd;
+ struct iwm_lmac_cmd lmac_cmd;
+ struct iwm_wifi_cmd_buff buf;
+ u32 flags;
+};
+
+void iwm_cmd_flush(struct iwm_priv *iwm);
+
+struct iwm_wifi_cmd *iwm_get_pending_wifi_cmd(struct iwm_priv *iwm,
+ u16 seq_num);
+struct iwm_nonwifi_cmd *iwm_get_pending_nonwifi_cmd(struct iwm_priv *iwm,
+ u8 seq_num, u8 cmd_opcode);
+
+
+int iwm_hal_send_target_cmd(struct iwm_priv *iwm,
+ struct iwm_udma_nonwifi_cmd *ucmd,
+ const void *payload);
+
+int iwm_hal_send_host_cmd(struct iwm_priv *iwm,
+ struct iwm_udma_wifi_cmd *udma_cmd,
+ struct iwm_umac_cmd *umac_cmd,
+ struct iwm_lmac_cmd *lmac_cmd,
+ const void *payload, u16 payload_size);
+
+int iwm_hal_send_umac_cmd(struct iwm_priv *iwm,
+ struct iwm_udma_wifi_cmd *udma_cmd,
+ struct iwm_umac_cmd *umac_cmd,
+ const void *payload, u16 payload_size);
+
+u16 iwm_alloc_wifi_cmd_seq(struct iwm_priv *iwm);
+
+void iwm_udma_wifi_hdr_set_eop(struct iwm_priv *iwm, u8 *buf, u8 eop);
+void iwm_build_udma_wifi_hdr(struct iwm_priv *iwm,
+ struct iwm_udma_out_wifi_hdr *hdr,
+ struct iwm_udma_wifi_cmd *cmd);
+void iwm_build_umac_hdr(struct iwm_priv *iwm,
+ struct iwm_umac_fw_cmd_hdr *hdr,
+ struct iwm_umac_cmd *cmd);
+#endif /* _IWM_HAL_H_ */
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
new file mode 100644
index 00000000000..635c16ee618
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
@@ -0,0 +1,346 @@
+/*
+ * Intel Wireless Multicomm 3200 WiFi driver
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <ilw@linux.intel.com>
+ * Samuel Ortiz <samuel.ortiz@intel.com>
+ * Zhu Yi <yi.zhu@intel.com>
+ *
+ */
+
+#ifndef __IWM_H__
+#define __IWM_H__
+
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <net/cfg80211.h>
+
+#include "debug.h"
+#include "hal.h"
+#include "umac.h"
+#include "lmac.h"
+#include "eeprom.h"
+
+#define IWM_COPYRIGHT "Copyright(c) 2009 Intel Corporation"
+#define IWM_AUTHOR "<ilw@linux.intel.com>"
+
+#define CONFIG_IWM_B0_HW_SUPPORT 1
+
+#define IWM_SRC_LMAC UMAC_HDI_IN_SOURCE_FHRX
+#define IWM_SRC_UDMA UMAC_HDI_IN_SOURCE_UDMA
+#define IWM_SRC_UMAC UMAC_HDI_IN_SOURCE_FW
+#define IWM_SRC_NUM 3
+
+#define IWM_POWER_INDEX_MIN 0
+#define IWM_POWER_INDEX_MAX 5
+#define IWM_POWER_INDEX_DEFAULT 3
+
+struct iwm_conf {
+ u32 sdio_ior_timeout;
+ unsigned long init_calib_map;
+ unsigned long periodic_calib_map;
+ bool reset_on_fatal_err;
+ bool auto_connect;
+ bool wimax_not_present;
+ bool enable_qos;
+ u32 mode;
+
+ u32 power_index;
+ u32 frag_threshold;
+ u32 rts_threshold;
+ bool cts_to_self;
+
+ u32 assoc_timeout;
+ u32 roam_timeout;
+ u32 wireless_mode;
+ u32 coexist_mode;
+
+ u8 ibss_band;
+ u8 ibss_channel;
+
+ u8 mac_addr[ETH_ALEN];
+#ifdef CONFIG_IWM_B0_HW_SUPPORT
+ bool hw_b0;
+#endif
+};
+
+enum {
+ COEX_MODE_SA = 1,
+ COEX_MODE_XOR,
+ COEX_MODE_CM,
+ COEX_MODE_MAX,
+};
+
+struct iwm_if_ops;
+struct iwm_wifi_cmd;
+
+struct pool_entry {
+ int id; /* group id */
+ int sid; /* super group id */
+ int min_pages; /* min capacity in pages */
+ int max_pages; /* max capacity in pages */
+ int alloc_pages; /* allocated # of pages. incresed by driver */
+ int total_freed_pages; /* total freed # of pages. incresed by UMAC */
+};
+
+struct spool_entry {
+ int id;
+ int max_pages;
+ int alloc_pages;
+};
+
+struct iwm_tx_credit {
+ spinlock_t lock;
+ int pool_nr;
+ unsigned long full_pools_map; /* bitmap for # of filled tx pools */
+ struct pool_entry pools[IWM_MACS_OUT_GROUPS];
+ struct spool_entry spools[IWM_MACS_OUT_SGROUPS];
+};
+
+struct iwm_notif {
+ struct list_head pending;
+ u32 cmd_id;
+ void *cmd;
+ u8 src;
+ void *buf;
+ unsigned long buf_size;
+};
+
+struct iwm_sta_info {
+ u8 addr[ETH_ALEN];
+ bool valid;
+ bool qos;
+ u8 color;
+};
+
+struct iwm_tx_info {
+ u8 sta;
+ u8 color;
+ u8 tid;
+};
+
+struct iwm_rx_info {
+ unsigned long rx_size;
+ unsigned long rx_buf_size;
+};
+
+#define IWM_NUM_KEYS 4
+
+struct iwm_umac_key_hdr {
+ u8 mac[ETH_ALEN];
+ u8 key_idx;
+ u8 multicast; /* BCast encrypt & BCast decrypt of frames FROM mac */
+} __attribute__ ((packed));
+
+struct iwm_key {
+ struct iwm_umac_key_hdr hdr;
+ u8 in_use;
+ u8 alg;
+ u32 flags;
+ u8 tx_seq[IW_ENCODE_SEQ_MAX_SIZE];
+ u8 rx_seq[IW_ENCODE_SEQ_MAX_SIZE];
+ u8 key_len;
+ u8 key[32];
+};
+
+#define IWM_RX_ID_HASH 0xff
+#define IWM_RX_ID_GET_HASH(id) ((id) % IWM_RX_ID_HASH)
+
+#define IWM_STA_TABLE_NUM 16
+#define IWM_TX_LIST_SIZE 64
+#define IWM_RX_LIST_SIZE 256
+
+#define IWM_SCAN_ID_MAX 0xff
+
+#define IWM_STATUS_READY 0
+#define IWM_STATUS_SCANNING 1
+#define IWM_STATUS_SCAN_ABORTING 2
+#define IWM_STATUS_ASSOCIATING 3
+#define IWM_STATUS_ASSOCIATED 4
+
+#define IWM_RADIO_RFKILL_OFF 0
+#define IWM_RADIO_RFKILL_HW 1
+#define IWM_RADIO_RFKILL_SW 2
+
+struct iwm_tx_queue {
+ int id;
+ struct sk_buff_head queue;
+ struct workqueue_struct *wq;
+ struct work_struct worker;
+ u8 concat_buf[IWM_HAL_CONCATENATE_BUF_SIZE];
+ int concat_count;
+ u8 *concat_ptr;
+};
+
+/* Queues 0 ~ 3 for AC data, 5 for iPAN */
+#define IWM_TX_QUEUES 5
+#define IWM_TX_DATA_QUEUES 4
+#define IWM_TX_CMD_QUEUE 4
+
+struct iwm_bss_info {
+ struct list_head node;
+ struct cfg80211_bss *cfg_bss;
+ struct iwm_umac_notif_bss_info *bss;
+};
+
+typedef int (*iwm_handler)(struct iwm_priv *priv, u8 *buf,
+ unsigned long buf_size, struct iwm_wifi_cmd *cmd);
+
+#define IWM_WATCHDOG_PERIOD (6 * HZ)
+
+struct iwm_priv {
+ struct wireless_dev *wdev;
+ struct iwm_if_ops *bus_ops;
+
+ struct iwm_conf conf;
+
+ unsigned long status;
+ unsigned long radio;
+
+ struct list_head pending_notif;
+ wait_queue_head_t notif_queue;
+
+ wait_queue_head_t nonwifi_queue;
+
+ unsigned long calib_done_map;
+ struct {
+ u8 *buf;
+ u32 size;
+ } calib_res[CALIBRATION_CMD_NUM];
+
+ struct iwm_umac_profile *umac_profile;
+ bool umac_profile_active;
+
+ u8 bssid[ETH_ALEN];
+ u8 channel;
+ u16 rate;
+
+ struct iwm_sta_info sta_table[IWM_STA_TABLE_NUM];
+ struct list_head bss_list;
+
+ void (*nonwifi_rx_handlers[UMAC_HDI_IN_OPCODE_NONWIFI_MAX])
+ (struct iwm_priv *priv, u8 *buf, unsigned long buf_size);
+
+ const iwm_handler *umac_handlers;
+ const iwm_handler *lmac_handlers;
+ DECLARE_BITMAP(lmac_handler_map, LMAC_COMMAND_ID_NUM);
+ DECLARE_BITMAP(umac_handler_map, LMAC_COMMAND_ID_NUM);
+ DECLARE_BITMAP(udma_handler_map, LMAC_COMMAND_ID_NUM);
+
+ struct list_head wifi_pending_cmd;
+ struct list_head nonwifi_pending_cmd;
+ u16 wifi_seq_num;
+ u8 nonwifi_seq_num;
+ spinlock_t cmd_lock;
+
+ u32 core_enabled;
+
+ u8 scan_id;
+ struct cfg80211_scan_request *scan_request;
+
+ struct sk_buff_head rx_list;
+ struct list_head rx_tickets;
+ struct list_head rx_packets[IWM_RX_ID_HASH];
+ struct workqueue_struct *rx_wq;
+ struct work_struct rx_worker;
+
+ struct iwm_tx_credit tx_credit;
+ struct iwm_tx_queue txq[IWM_TX_QUEUES];
+
+ struct iwm_key keys[IWM_NUM_KEYS];
+ struct iwm_key *default_key;
+
+ wait_queue_head_t mlme_queue;
+
+ struct iw_statistics wstats;
+ struct delayed_work stats_request;
+
+ struct iwm_debugfs dbg;
+
+ u8 *eeprom;
+ struct timer_list watchdog;
+ struct work_struct reset_worker;
+ struct rfkill *rfkill;
+
+ char private[0] __attribute__((__aligned__(NETDEV_ALIGN)));
+};
+
+static inline void *iwm_private(struct iwm_priv *iwm)
+{
+ BUG_ON(!iwm);
+ return &iwm->private;
+}
+
+#define hw_to_iwm(h) (h->iwm)
+#define iwm_to_dev(i) (wiphy_dev(i->wdev->wiphy))
+#define iwm_to_wiphy(i) (i->wdev->wiphy)
+#define wiphy_to_iwm(w) (struct iwm_priv *)(wiphy_priv(w))
+#define iwm_to_wdev(i) (i->wdev)
+#define wdev_to_iwm(w) (struct iwm_priv *)(wdev_priv(w))
+#define iwm_to_ndev(i) (i->wdev->netdev)
+#define ndev_to_iwm(n) (wdev_to_iwm(n->ieee80211_ptr))
+#define skb_to_rx_info(s) ((struct iwm_rx_info *)(s->cb))
+#define skb_to_tx_info(s) ((struct iwm_tx_info *)s->cb)
+
+extern const struct iw_handler_def iwm_iw_handler_def;
+
+void *iwm_if_alloc(int sizeof_bus, struct device *dev,
+ struct iwm_if_ops *if_ops);
+void iwm_if_free(struct iwm_priv *iwm);
+int iwm_mode_to_nl80211_iftype(int mode);
+int iwm_priv_init(struct iwm_priv *iwm);
+void iwm_reset(struct iwm_priv *iwm);
+void iwm_tx_credit_init_pools(struct iwm_priv *iwm,
+ struct iwm_umac_notif_alive *alive);
+int iwm_tx_credit_alloc(struct iwm_priv *iwm, int id, int nb);
+int iwm_notif_send(struct iwm_priv *iwm, struct iwm_wifi_cmd *cmd,
+ u8 cmd_id, u8 source, u8 *buf, unsigned long buf_size);
+int iwm_notif_handle(struct iwm_priv *iwm, u32 cmd, u8 source, long timeout);
+void iwm_init_default_profile(struct iwm_priv *iwm,
+ struct iwm_umac_profile *profile);
+void iwm_link_on(struct iwm_priv *iwm);
+void iwm_link_off(struct iwm_priv *iwm);
+int iwm_up(struct iwm_priv *iwm);
+int iwm_down(struct iwm_priv *iwm);
+
+/* TX API */
+void iwm_tx_credit_inc(struct iwm_priv *iwm, int id, int total_freed_pages);
+void iwm_tx_worker(struct work_struct *work);
+int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+
+/* RX API */
+void iwm_rx_setup_handlers(struct iwm_priv *iwm);
+int iwm_rx_handle(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size);
+int iwm_rx_handle_resp(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size,
+ struct iwm_wifi_cmd *cmd);
+void iwm_rx_free(struct iwm_priv *iwm);
+
+#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/lmac.h b/drivers/net/wireless/iwmc3200wifi/lmac.h
new file mode 100644
index 00000000000..db2e5eea189
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/lmac.h
@@ -0,0 +1,457 @@
+/*
+ * Intel Wireless Multicomm 3200 WiFi driver
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <ilw@linux.intel.com>
+ * Samuel Ortiz <samuel.ortiz@intel.com>
+ * Zhu Yi <yi.zhu@intel.com>
+ *
+ */
+
+#ifndef __IWM_LMAC_H__
+#define __IWM_LMAC_H__
+
+struct iwm_lmac_hdr {
+ u8 id;
+ u8 flags;
+ __le16 seq_num;
+} __attribute__ ((packed));
+
+/* LMAC commands */
+#define CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_AFTER_MSK 0x1
+
+struct iwm_lmac_cal_cfg_elt {
+ __le32 enable; /* 1 means LMAC needs to do something */
+ __le32 start; /* 1 to start calibration, 0 to stop */
+ __le32 send_res; /* 1 for sending back results */
+ __le32 apply_res; /* 1 for applying calibration results to HW */
+ __le32 reserved;
+} __attribute__ ((packed));
+
+struct iwm_lmac_cal_cfg_status {
+ struct iwm_lmac_cal_cfg_elt init;
+ struct iwm_lmac_cal_cfg_elt periodic;
+ __le32 flags; /* CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_AFTER_MSK */
+} __attribute__ ((packed));
+
+struct iwm_lmac_cal_cfg_cmd {
+ struct iwm_lmac_cal_cfg_status ucode_cfg;
+ struct iwm_lmac_cal_cfg_status driver_cfg;
+ __le32 reserved;
+} __attribute__ ((packed));
+
+struct iwm_lmac_cal_cfg_resp {
+ __le32 status;
+} __attribute__ ((packed));
+
+#define IWM_CARD_STATE_SW_HW_ENABLED 0x00
+#define IWM_CARD_STATE_HW_DISABLED 0x01
+#define IWM_CARD_STATE_SW_DISABLED 0x02
+#define IWM_CARD_STATE_CTKILL_DISABLED 0x04
+#define IWM_CARD_STATE_IS_RXON 0x10
+
+struct iwm_lmac_card_state {
+ __le32 flags;
+} __attribute__ ((packed));
+
+/**
+ * COEX_PRIORITY_TABLE_CMD
+ *
+ * Priority entry for each state
+ * Will keep two tables, for STA and WIPAN
+ */
+enum {
+ /* UN-ASSOCIATION PART */
+ COEX_UNASSOC_IDLE = 0,
+ COEX_UNASSOC_MANUAL_SCAN,
+ COEX_UNASSOC_AUTO_SCAN,
+
+ /* CALIBRATION */
+ COEX_CALIBRATION,
+ COEX_PERIODIC_CALIBRATION,
+
+ /* CONNECTION */
+ COEX_CONNECTION_ESTAB,
+
+ /* ASSOCIATION PART */
+ COEX_ASSOCIATED_IDLE,
+ COEX_ASSOC_MANUAL_SCAN,
+ COEX_ASSOC_AUTO_SCAN,
+ COEX_ASSOC_ACTIVE_LEVEL,
+
+ /* RF ON/OFF */
+ COEX_RF_ON,
+ COEX_RF_OFF,
+ COEX_STAND_ALONE_DEBUG,
+
+ /* IPNN */
+ COEX_IPAN_ASSOC_LEVEL,
+
+ /* RESERVED */
+ COEX_RSRVD1,
+ COEX_RSRVD2,
+
+ COEX_EVENTS_NUM
+};
+
+#define COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK 0x1
+#define COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK 0x2
+#define COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_MSK 0x4
+
+struct coex_event {
+ u8 req_prio;
+ u8 win_med_prio;
+ u8 reserved;
+ u8 flags;
+} __attribute__ ((packed));
+
+#define COEX_FLAGS_STA_TABLE_VALID_MSK 0x1
+#define COEX_FLAGS_UNASSOC_WAKEUP_UMASK_MSK 0x4
+#define COEX_FLAGS_ASSOC_WAKEUP_UMASK_MSK 0x8
+#define COEX_FLAGS_COEX_ENABLE_MSK 0x80
+
+struct iwm_coex_prio_table_cmd {
+ u8 flags;
+ u8 reserved[3];
+ struct coex_event sta_prio[COEX_EVENTS_NUM];
+} __attribute__ ((packed));
+
+/* Coexistence definitions
+ *
+ * Constants to fill in the Priorities' Tables
+ * RP - Requested Priority
+ * WP - Win Medium Priority: priority assigned when the contention has been won
+ * FLAGS - Combination of COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK and
+ * COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK
+ */
+
+#define COEX_UNASSOC_IDLE_FLAGS 0
+#define COEX_UNASSOC_MANUAL_SCAN_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
+ COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK)
+#define COEX_UNASSOC_AUTO_SCAN_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
+ COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK)
+#define COEX_CALIBRATION_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
+ COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK)
+#define COEX_PERIODIC_CALIBRATION_FLAGS 0
+/* COEX_CONNECTION_ESTAB: we need DELAY_MEDIUM_FREE_NTFY to let WiMAX
+ * disconnect from network. */
+#define COEX_CONNECTION_ESTAB_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
+ COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK | \
+ COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_MSK)
+#define COEX_ASSOCIATED_IDLE_FLAGS 0
+#define COEX_ASSOC_MANUAL_SCAN_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
+ COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK)
+#define COEX_ASSOC_AUTO_SCAN_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
+ COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK)
+#define COEX_ASSOC_ACTIVE_LEVEL_FLAGS 0
+#define COEX_RF_ON_FLAGS 0
+#define COEX_RF_OFF_FLAGS 0
+#define COEX_STAND_ALONE_DEBUG_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
+ COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK)
+#define COEX_IPAN_ASSOC_LEVEL_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
+ COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK | \
+ COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_MSK)
+#define COEX_RSRVD1_FLAGS 0
+#define COEX_RSRVD2_FLAGS 0
+/* XOR_RF_ON is the event wrapping all radio ownership. We need
+ * DELAY_MEDIUM_FREE_NTFY to let WiMAX disconnect from network. */
+#define COEX_XOR_RF_ON_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
+ COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK | \
+ COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_MSK)
+
+/* LMAC OP CODES */
+#define REPLY_PAD 0x0
+#define REPLY_ALIVE 0x1
+#define REPLY_ERROR 0x2
+#define REPLY_ECHO 0x3
+#define REPLY_HALT 0x6
+
+/* RXON state commands */
+#define REPLY_RX_ON 0x10
+#define REPLY_RX_ON_ASSOC 0x11
+#define REPLY_RX_OFF 0x12
+#define REPLY_QOS_PARAM 0x13
+#define REPLY_RX_ON_TIMING 0x14
+#define REPLY_INTERNAL_QOS_PARAM 0x15
+#define REPLY_RX_INT_TIMEOUT_CNFG 0x16
+#define REPLY_NULL 0x17
+
+/* Multi-Station support */
+#define REPLY_ADD_STA 0x18
+#define REPLY_REMOVE_STA 0x19
+#define REPLY_RESET_ALL_STA 0x1a
+
+/* RX, TX */
+#define REPLY_ALM_RX 0x1b
+#define REPLY_TX 0x1c
+#define REPLY_TXFIFO_FLUSH 0x1e
+
+/* MISC commands */
+#define REPLY_MGMT_MCAST_KEY 0x1f
+#define REPLY_WEPKEY 0x20
+#define REPLY_INIT_IV 0x21
+#define REPLY_WRITE_MIB 0x22
+#define REPLY_READ_MIB 0x23
+#define REPLY_RADIO_FE 0x24
+#define REPLY_TXFIFO_CFG 0x25
+#define REPLY_WRITE_READ 0x26
+#define REPLY_INSTALL_SEC_KEY 0x27
+
+
+#define REPLY_RATE_SCALE 0x47
+#define REPLY_LEDS_CMD 0x48
+#define REPLY_TX_LINK_QUALITY_CMD 0x4e
+#define REPLY_ANA_MIB_OVERRIDE_CMD 0x4f
+#define REPLY_WRITE2REG_CMD 0x50
+
+/* winfi-wifi coexistence */
+#define COEX_PRIORITY_TABLE_CMD 0x5a
+#define COEX_MEDIUM_NOTIFICATION 0x5b
+#define COEX_EVENT_CMD 0x5c
+
+/* more Protocol and Protocol-test commands */
+#define REPLY_MAX_SLEEP_TIME_CMD 0x61
+#define CALIBRATION_CFG_CMD 0x65
+#define CALIBRATION_RES_NOTIFICATION 0x66
+#define CALIBRATION_COMPLETE_NOTIFICATION 0x67
+
+/* Measurements */
+#define REPLY_QUIET_CMD 0x71
+#define REPLY_CHANNEL_SWITCH 0x72
+#define CHANNEL_SWITCH_NOTIFICATION 0x73
+
+#define REPLY_SPECTRUM_MEASUREMENT_CMD 0x74
+#define SPECTRUM_MEASURE_NOTIFICATION 0x75
+#define REPLY_MEASUREMENT_ABORT_CMD 0x76
+
+/* Power Management */
+#define POWER_TABLE_CMD 0x77
+#define SAVE_RESTORE_ADRESS_CMD 0x78
+#define REPLY_WATERMARK_CMD 0x79
+#define PM_DEBUG_STATISTIC_NOTIFIC 0x7B
+#define PD_FLUSH_N_NOTIFICATION 0x7C
+
+/* Scan commands and notifications */
+#define REPLY_SCAN_REQUEST_CMD 0x80
+#define REPLY_SCAN_ABORT_CMD 0x81
+#define SCAN_START_NOTIFICATION 0x82
+#define SCAN_RESULTS_NOTIFICATION 0x83
+#define SCAN_COMPLETE_NOTIFICATION 0x84
+
+/* Continuous TX commands */
+#define REPLY_CONT_TX_CMD 0x85
+#define END_OF_CONT_TX_NOTIFICATION 0x86
+
+/* Timer/Eeprom commands */
+#define TIMER_CMD 0x87
+#define EEPROM_WRITE_CMD 0x88
+
+/* PAPD commands */
+#define FEEDBACK_REQUEST_NOTIFICATION 0x8b
+#define REPLY_CW_CMD 0x8c
+
+/* IBSS/AP commands Continue */
+#define BEACON_NOTIFICATION 0x90
+#define REPLY_TX_BEACON 0x91
+#define REPLY_REQUEST_ATIM 0x93
+#define WHO_IS_AWAKE_NOTIFICATION 0x94
+#define TX_PWR_DBM_LIMIT_CMD 0x95
+#define QUIET_NOTIFICATION 0x96
+#define TX_PWR_TABLE_CMD 0x97
+#define TX_ANT_CONFIGURATION_CMD 0x98
+#define MEASURE_ABORT_NOTIFICATION 0x99
+#define REPLY_CALIBRATION_TUNE 0x9a
+
+/* bt config command */
+#define REPLY_BT_CONFIG 0x9b
+#define REPLY_STATISTICS_CMD 0x9c
+#define STATISTICS_NOTIFICATION 0x9d
+
+/* RF-KILL commands and notifications */
+#define REPLY_CARD_STATE_CMD 0xa0
+#define CARD_STATE_NOTIFICATION 0xa1
+
+/* Missed beacons notification */
+#define MISSED_BEACONS_NOTIFICATION 0xa2
+#define MISSED_BEACONS_NOTIFICATION_TH_CMD 0xa3
+
+#define REPLY_CT_KILL_CONFIG_CMD 0xa4
+
+/* HD commands and notifications */
+#define REPLY_HD_PARAMS_CMD 0xa6
+#define HD_PARAMS_NOTIFICATION 0xa7
+#define SENSITIVITY_CMD 0xa8
+#define U_APSD_PARAMS_CMD 0xa9
+#define NOISY_PLATFORM_CMD 0xaa
+#define ILLEGAL_CMD 0xac
+#define REPLY_PHY_CALIBRATION_CMD 0xb0
+#define REPLAY_RX_GAIN_CALIB_CMD 0xb1
+
+/* WiPAN commands */
+#define REPLY_WIPAN_PARAMS_CMD 0xb2
+#define REPLY_WIPAN_RX_ON_CMD 0xb3
+#define REPLY_WIPAN_RX_ON_TIMING 0xb4
+#define REPLY_WIPAN_TX_PWR_TABLE_CMD 0xb5
+#define REPLY_WIPAN_RXON_ASSOC_CMD 0xb6
+#define REPLY_WIPAN_QOS_PARAM 0xb7
+#define WIPAN_REPLY_WEPKEY 0xb8
+
+/* BeamForming commands */
+#define BEAMFORMER_CFG_CMD 0xba
+#define BEAMFORMEE_NOTIFICATION 0xbb
+
+/* TGn new Commands */
+#define REPLY_RX_PHY_CMD 0xc0
+#define REPLY_RX_MPDU_CMD 0xc1
+#define REPLY_MULTICAST_HASH 0xc2
+#define REPLY_KDR_RX 0xc3
+#define REPLY_RX_DSP_EXT_INFO 0xc4
+#define REPLY_COMPRESSED_BA 0xc5
+
+/* PNC commands */
+#define PNC_CONFIG_CMD 0xc8
+#define PNC_UPDATE_TABLE_CMD 0xc9
+#define XVT_GENERAL_CTRL_CMD 0xca
+#define REPLY_LEGACY_RADIO_FE 0xdd
+
+/* WoWLAN commands */
+#define WOWLAN_PATTERNS 0xe0
+#define WOWLAN_WAKEUP_FILTER 0xe1
+#define WOWLAN_TSC_RSC_PARAM 0xe2
+#define WOWLAN_TKIP_PARAM 0xe3
+#define WOWLAN_KEK_KCK_MATERIAL 0xe4
+#define WOWLAN_GET_STATUSES 0xe5
+#define WOWLAN_TX_POWER_PER_DB 0xe6
+#define REPLY_WOWLAN_GET_STATUSES WOWLAN_GET_STATUSES
+
+#define REPLY_DEBUG_CMD 0xf0
+#define REPLY_DSP_DEBUG_CMD 0xf1
+#define REPLY_DEBUG_MONITOR_CMD 0xf2
+#define REPLY_DEBUG_XVT_CMD 0xf3
+#define REPLY_DEBUG_DC_CALIB 0xf4
+#define REPLY_DYNAMIC_BP 0xf5
+
+/* General purpose Commands */
+#define REPLY_GP1_CMD 0xfa
+#define REPLY_GP2_CMD 0xfb
+#define REPLY_GP3_CMD 0xfc
+#define REPLY_GP4_CMD 0xfd
+#define REPLY_REPLAY_WRAPPER 0xfe
+#define REPLY_FRAME_DURATION_CALC_CMD 0xff
+
+#define LMAC_COMMAND_ID_MAX 0xff
+#define LMAC_COMMAND_ID_NUM (LMAC_COMMAND_ID_MAX + 1)
+
+
+/* Calibration */
+
+enum {
+ PHY_CALIBRATE_DC_CMD = 0,
+ PHY_CALIBRATE_LO_CMD = 1,
+ PHY_CALIBRATE_RX_BB_CMD = 2,
+ PHY_CALIBRATE_TX_IQ_CMD = 3,
+ PHY_CALIBRATE_RX_IQ_CMD = 4,
+ PHY_CALIBRATION_NOISE_CMD = 5,
+ PHY_CALIBRATE_AGC_TABLE_CMD = 6,
+ PHY_CALIBRATE_CRYSTAL_FRQ_CMD = 7,
+ PHY_CALIBRATE_OPCODES_NUM,
+ SHILOH_PHY_CALIBRATE_DC_CMD = 8,
+ SHILOH_PHY_CALIBRATE_LO_CMD = 9,
+ SHILOH_PHY_CALIBRATE_RX_BB_CMD = 10,
+ SHILOH_PHY_CALIBRATE_TX_IQ_CMD = 11,
+ SHILOH_PHY_CALIBRATE_RX_IQ_CMD = 12,
+ SHILOH_PHY_CALIBRATION_NOISE_CMD = 13,
+ SHILOH_PHY_CALIBRATE_AGC_TABLE_CMD = 14,
+ SHILOH_PHY_CALIBRATE_CRYSTAL_FRQ_CMD = 15,
+ SHILOH_PHY_CALIBRATE_BASE_BAND_CMD = 16,
+ SHILOH_PHY_CALIBRATE_TXIQ_PERIODIC_CMD = 17,
+ CALIBRATION_CMD_NUM,
+};
+
+struct iwm_lmac_calib_hdr {
+ u8 opcode;
+ u8 first_grp;
+ u8 grp_num;
+ u8 all_data_valid;
+} __attribute__ ((packed));
+
+#define IWM_LMAC_CALIB_FREQ_GROUPS_NR 7
+#define IWM_CALIB_FREQ_GROUPS_NR 5
+#define IWM_CALIB_DC_MODES_NR 12
+
+struct iwm_calib_rxiq_entry {
+ u16 ptam_postdist_ars;
+ u16 ptam_postdist_arc;
+} __attribute__ ((packed));
+
+struct iwm_calib_rxiq_group {
+ struct iwm_calib_rxiq_entry mode[IWM_CALIB_DC_MODES_NR];
+} __attribute__ ((packed));
+
+struct iwm_lmac_calib_rxiq {
+ struct iwm_calib_rxiq_group group[IWM_LMAC_CALIB_FREQ_GROUPS_NR];
+} __attribute__ ((packed));
+
+struct iwm_calib_rxiq {
+ struct iwm_lmac_calib_hdr hdr;
+ struct iwm_calib_rxiq_group group[IWM_CALIB_FREQ_GROUPS_NR];
+} __attribute__ ((packed));
+
+#define LMAC_STA_ID_SEED 0x0f
+#define LMAC_STA_ID_POS 0
+
+#define LMAC_STA_COLOR_SEED 0x7
+#define LMAC_STA_COLOR_POS 4
+
+struct iwm_lmac_power_report {
+ u8 pa_status;
+ u8 pa_integ_res_A[3];
+ u8 pa_integ_res_B[3];
+ u8 pa_integ_res_C[3];
+} __attribute__ ((packed));
+
+struct iwm_lmac_tx_resp {
+ u8 frame_cnt; /* 1-no aggregation, greater then 1 - aggregation */
+ u8 bt_kill_cnt;
+ __le16 retry_cnt;
+ __le32 initial_tx_rate;
+ __le16 wireless_media_time;
+ struct iwm_lmac_power_report power_report;
+ __le32 tfd_info;
+ __le16 seq_ctl;
+ __le16 byte_cnt;
+ u8 tlc_rate_info;
+ u8 ra_tid;
+ __le16 frame_ctl;
+ __le32 status;
+} __attribute__ ((packed));
+
+#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c
new file mode 100644
index 00000000000..6a2640f16b6
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/main.c
@@ -0,0 +1,680 @@
+/*
+ * Intel Wireless Multicomm 3200 WiFi driver
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <ilw@linux.intel.com>
+ * Samuel Ortiz <samuel.ortiz@intel.com>
+ * Zhu Yi <yi.zhu@intel.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/ieee80211.h>
+#include <linux/wireless.h>
+
+#include "iwm.h"
+#include "debug.h"
+#include "bus.h"
+#include "umac.h"
+#include "commands.h"
+#include "hal.h"
+#include "fw.h"
+#include "rx.h"
+
+static struct iwm_conf def_iwm_conf = {
+
+ .sdio_ior_timeout = 5000,
+ .init_calib_map = BIT(PHY_CALIBRATE_DC_CMD) |
+ BIT(PHY_CALIBRATE_LO_CMD) |
+ BIT(PHY_CALIBRATE_TX_IQ_CMD) |
+ BIT(PHY_CALIBRATE_RX_IQ_CMD),
+ .periodic_calib_map = BIT(PHY_CALIBRATE_DC_CMD) |
+ BIT(PHY_CALIBRATE_LO_CMD) |
+ BIT(PHY_CALIBRATE_TX_IQ_CMD) |
+ BIT(PHY_CALIBRATE_RX_IQ_CMD) |
+ BIT(SHILOH_PHY_CALIBRATE_BASE_BAND_CMD),
+ .reset_on_fatal_err = 1,
+ .auto_connect = 1,
+ .wimax_not_present = 0,
+ .enable_qos = 1,
+ .mode = UMAC_MODE_BSS,
+
+ /* UMAC configuration */
+ .power_index = 0,
+ .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD,
+ .rts_threshold = IEEE80211_MAX_RTS_THRESHOLD,
+ .cts_to_self = 0,
+
+ .assoc_timeout = 2,
+ .roam_timeout = 10,
+ .wireless_mode = WIRELESS_MODE_11A | WIRELESS_MODE_11G,
+ .coexist_mode = COEX_MODE_CM,
+
+ /* IBSS */
+ .ibss_band = UMAC_BAND_2GHZ,
+ .ibss_channel = 1,
+
+ .mac_addr = {0x00, 0x02, 0xb3, 0x01, 0x02, 0x03},
+};
+
+static int modparam_reset;
+module_param_named(reset, modparam_reset, bool, 0644);
+MODULE_PARM_DESC(reset, "reset on firmware errors (default 0 [not reset])");
+
+int iwm_mode_to_nl80211_iftype(int mode)
+{
+ switch (mode) {
+ case UMAC_MODE_BSS:
+ return NL80211_IFTYPE_STATION;
+ case UMAC_MODE_IBSS:
+ return NL80211_IFTYPE_ADHOC;
+ default:
+ return NL80211_IFTYPE_UNSPECIFIED;
+ }
+
+ return 0;
+}
+
+static void iwm_statistics_request(struct work_struct *work)
+{
+ struct iwm_priv *iwm =
+ container_of(work, struct iwm_priv, stats_request.work);
+
+ iwm_send_umac_stats_req(iwm, 0);
+}
+
+static void iwm_reset_worker(struct work_struct *work)
+{
+ struct iwm_priv *iwm;
+ struct iwm_umac_profile *profile = NULL;
+ int uninitialized_var(ret), retry = 0;
+
+ iwm = container_of(work, struct iwm_priv, reset_worker);
+
+ if (iwm->umac_profile_active) {
+ profile = kmalloc(sizeof(struct iwm_umac_profile), GFP_KERNEL);
+ if (profile)
+ memcpy(profile, iwm->umac_profile, sizeof(*profile));
+ else
+ IWM_ERR(iwm, "Couldn't alloc memory for profile\n");
+ }
+
+ iwm_down(iwm);
+
+ while (retry++ < 3) {
+ ret = iwm_up(iwm);
+ if (!ret)
+ break;
+
+ schedule_timeout_uninterruptible(10 * HZ);
+ }
+
+ if (ret) {
+ IWM_WARN(iwm, "iwm_up() failed: %d\n", ret);
+
+ kfree(profile);
+ return;
+ }
+
+ if (profile) {
+ IWM_DBG_MLME(iwm, DBG, "Resend UMAC profile\n");
+ memcpy(iwm->umac_profile, profile, sizeof(*profile));
+ iwm_send_mlme_profile(iwm);
+ kfree(profile);
+ }
+}
+
+static void iwm_watchdog(unsigned long data)
+{
+ struct iwm_priv *iwm = (struct iwm_priv *)data;
+
+ IWM_WARN(iwm, "Watchdog expired: UMAC stalls!\n");
+
+ if (modparam_reset)
+ schedule_work(&iwm->reset_worker);
+}
+
+int iwm_priv_init(struct iwm_priv *iwm)
+{
+ int i;
+ char name[32];
+
+ iwm->status = 0;
+ INIT_LIST_HEAD(&iwm->pending_notif);
+ init_waitqueue_head(&iwm->notif_queue);
+ init_waitqueue_head(&iwm->nonwifi_queue);
+ init_waitqueue_head(&iwm->mlme_queue);
+ memcpy(&iwm->conf, &def_iwm_conf, sizeof(struct iwm_conf));
+ spin_lock_init(&iwm->tx_credit.lock);
+ INIT_LIST_HEAD(&iwm->wifi_pending_cmd);
+ INIT_LIST_HEAD(&iwm->nonwifi_pending_cmd);
+ iwm->wifi_seq_num = UMAC_WIFI_SEQ_NUM_BASE;
+ iwm->nonwifi_seq_num = UMAC_NONWIFI_SEQ_NUM_BASE;
+ spin_lock_init(&iwm->cmd_lock);
+ iwm->scan_id = 1;
+ INIT_DELAYED_WORK(&iwm->stats_request, iwm_statistics_request);
+ INIT_WORK(&iwm->reset_worker, iwm_reset_worker);
+ INIT_LIST_HEAD(&iwm->bss_list);
+
+ skb_queue_head_init(&iwm->rx_list);
+ INIT_LIST_HEAD(&iwm->rx_tickets);
+ for (i = 0; i < IWM_RX_ID_HASH; i++)
+ INIT_LIST_HEAD(&iwm->rx_packets[i]);
+
+ INIT_WORK(&iwm->rx_worker, iwm_rx_worker);
+
+ iwm->rx_wq = create_singlethread_workqueue(KBUILD_MODNAME "_rx");
+ if (!iwm->rx_wq)
+ return -EAGAIN;
+
+ for (i = 0; i < IWM_TX_QUEUES; i++) {
+ INIT_WORK(&iwm->txq[i].worker, iwm_tx_worker);
+ snprintf(name, 32, KBUILD_MODNAME "_tx_%d", i);
+ iwm->txq[i].id = i;
+ iwm->txq[i].wq = create_singlethread_workqueue(name);
+ if (!iwm->txq[i].wq)
+ return -EAGAIN;
+
+ skb_queue_head_init(&iwm->txq[i].queue);
+ }
+
+ for (i = 0; i < IWM_NUM_KEYS; i++)
+ memset(&iwm->keys[i], 0, sizeof(struct iwm_key));
+
+ iwm->default_key = NULL;
+
+ init_timer(&iwm->watchdog);
+ iwm->watchdog.function = iwm_watchdog;
+ iwm->watchdog.data = (unsigned long)iwm;
+
+ return 0;
+}
+
+/*
+ * We reset all the structures, and we reset the UMAC.
+ * After calling this routine, you're expected to reload
+ * the firmware.
+ */
+void iwm_reset(struct iwm_priv *iwm)
+{
+ struct iwm_notif *notif, *next;
+
+ if (test_bit(IWM_STATUS_READY, &iwm->status))
+ iwm_target_reset(iwm);
+
+ iwm->status = 0;
+ iwm->scan_id = 1;
+
+ list_for_each_entry_safe(notif, next, &iwm->pending_notif, pending) {
+ list_del(&notif->pending);
+ kfree(notif->buf);
+ kfree(notif);
+ }
+
+ iwm_cmd_flush(iwm);
+
+ flush_workqueue(iwm->rx_wq);
+
+ iwm_link_off(iwm);
+}
+
+/*
+ * Notification code:
+ *
+ * We're faced with the following issue: Any host command can
+ * have an answer or not, and if there's an answer to expect,
+ * it can be treated synchronously or asynchronously.
+ * To work around the synchronous answer case, we implemented
+ * our notification mechanism.
+ * When a code path needs to wait for a command response
+ * synchronously, it calls notif_handle(), which waits for the
+ * right notification to show up, and then process it. Before
+ * starting to wait, it registered as a waiter for this specific
+ * answer (by toggling a bit in on of the handler_map), so that
+ * the rx code knows that it needs to send a notification to the
+ * waiting processes. It does so by calling iwm_notif_send(),
+ * which adds the notification to the pending notifications list,
+ * and then wakes the waiting processes up.
+ */
+int iwm_notif_send(struct iwm_priv *iwm, struct iwm_wifi_cmd *cmd,
+ u8 cmd_id, u8 source, u8 *buf, unsigned long buf_size)
+{
+ struct iwm_notif *notif;
+
+ notif = kzalloc(sizeof(struct iwm_notif), GFP_KERNEL);
+ if (!notif) {
+ IWM_ERR(iwm, "Couldn't alloc memory for notification\n");
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&notif->pending);
+ notif->cmd = cmd;
+ notif->cmd_id = cmd_id;
+ notif->src = source;
+ notif->buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!notif->buf) {
+ IWM_ERR(iwm, "Couldn't alloc notification buffer\n");
+ kfree(notif);
+ return -ENOMEM;
+ }
+ notif->buf_size = buf_size;
+ memcpy(notif->buf, buf, buf_size);
+ list_add_tail(&notif->pending, &iwm->pending_notif);
+
+ wake_up_interruptible(&iwm->notif_queue);
+
+ return 0;
+}
+
+static struct iwm_notif *iwm_notif_find(struct iwm_priv *iwm, u32 cmd,
+ u8 source)
+{
+ struct iwm_notif *notif, *next;
+
+ list_for_each_entry_safe(notif, next, &iwm->pending_notif, pending) {
+ if ((notif->cmd_id == cmd) && (notif->src == source)) {
+ list_del(&notif->pending);
+ return notif;
+ }
+ }
+
+ return NULL;
+}
+
+static struct iwm_notif *iwm_notif_wait(struct iwm_priv *iwm, u32 cmd,
+ u8 source, long timeout)
+{
+ int ret;
+ struct iwm_notif *notif;
+ unsigned long *map = NULL;
+
+ switch (source) {
+ case IWM_SRC_LMAC:
+ map = &iwm->lmac_handler_map[0];
+ break;
+ case IWM_SRC_UMAC:
+ map = &iwm->umac_handler_map[0];
+ break;
+ case IWM_SRC_UDMA:
+ map = &iwm->udma_handler_map[0];
+ break;
+ }
+
+ set_bit(cmd, map);
+
+ ret = wait_event_interruptible_timeout(iwm->notif_queue,
+ ((notif = iwm_notif_find(iwm, cmd, source)) != NULL),
+ timeout);
+ clear_bit(cmd, map);
+
+ if (!ret)
+ return NULL;
+
+ return notif;
+}
+
+int iwm_notif_handle(struct iwm_priv *iwm, u32 cmd, u8 source, long timeout)
+{
+ int ret;
+ struct iwm_notif *notif;
+
+ notif = iwm_notif_wait(iwm, cmd, source, timeout);
+ if (!notif)
+ return -ETIME;
+
+ ret = iwm_rx_handle_resp(iwm, notif->buf, notif->buf_size, notif->cmd);
+ kfree(notif->buf);
+ kfree(notif);
+
+ return ret;
+}
+
+static int iwm_config_boot_params(struct iwm_priv *iwm)
+{
+ struct iwm_udma_nonwifi_cmd target_cmd;
+ int ret;
+
+ /* check Wimax is off and config debug monitor */
+ if (iwm->conf.wimax_not_present) {
+ u32 data1 = 0x1f;
+ u32 addr1 = 0x606BE258;
+
+ u32 data2_set = 0x0;
+ u32 data2_clr = 0x1;
+ u32 addr2 = 0x606BE100;
+
+ u32 data3 = 0x1;
+ u32 addr3 = 0x606BEC00;
+
+ target_cmd.resp = 0;
+ target_cmd.handle_by_hw = 0;
+ target_cmd.eop = 1;
+
+ target_cmd.opcode = UMAC_HDI_OUT_OPCODE_WRITE;
+ target_cmd.addr = cpu_to_le32(addr1);
+ target_cmd.op1_sz = cpu_to_le32(sizeof(u32));
+ target_cmd.op2 = 0;
+
+ ret = iwm_hal_send_target_cmd(iwm, &target_cmd, &data1);
+ if (ret < 0) {
+ IWM_ERR(iwm, "iwm_hal_send_target_cmd failed\n");
+ return ret;
+ }
+
+ target_cmd.opcode = UMAC_HDI_OUT_OPCODE_READ_MODIFY_WRITE;
+ target_cmd.addr = cpu_to_le32(addr2);
+ target_cmd.op1_sz = cpu_to_le32(data2_set);
+ target_cmd.op2 = cpu_to_le32(data2_clr);
+
+ ret = iwm_hal_send_target_cmd(iwm, &target_cmd, &data1);
+ if (ret < 0) {
+ IWM_ERR(iwm, "iwm_hal_send_target_cmd failed\n");
+ return ret;
+ }
+
+ target_cmd.opcode = UMAC_HDI_OUT_OPCODE_WRITE;
+ target_cmd.addr = cpu_to_le32(addr3);
+ target_cmd.op1_sz = cpu_to_le32(sizeof(u32));
+ target_cmd.op2 = 0;
+
+ ret = iwm_hal_send_target_cmd(iwm, &target_cmd, &data3);
+ if (ret < 0) {
+ IWM_ERR(iwm, "iwm_hal_send_target_cmd failed\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void iwm_init_default_profile(struct iwm_priv *iwm,
+ struct iwm_umac_profile *profile)
+{
+ memset(profile, 0, sizeof(struct iwm_umac_profile));
+
+ profile->sec.auth_type = UMAC_AUTH_TYPE_OPEN;
+ profile->sec.flags = UMAC_SEC_FLG_LEGACY_PROFILE;
+ profile->sec.ucast_cipher = UMAC_CIPHER_TYPE_NONE;
+ profile->sec.mcast_cipher = UMAC_CIPHER_TYPE_NONE;
+
+ if (iwm->conf.enable_qos)
+ profile->flags |= cpu_to_le16(UMAC_PROFILE_QOS_ALLOWED);
+
+ profile->wireless_mode = iwm->conf.wireless_mode;
+ profile->mode = cpu_to_le32(iwm->conf.mode);
+
+ profile->ibss.atim = 0;
+ profile->ibss.beacon_interval = 100;
+ profile->ibss.join_only = 0;
+ profile->ibss.band = iwm->conf.ibss_band;
+ profile->ibss.channel = iwm->conf.ibss_channel;
+}
+
+void iwm_link_on(struct iwm_priv *iwm)
+{
+ netif_carrier_on(iwm_to_ndev(iwm));
+ netif_tx_wake_all_queues(iwm_to_ndev(iwm));
+
+ iwm_send_umac_stats_req(iwm, 0);
+}
+
+void iwm_link_off(struct iwm_priv *iwm)
+{
+ struct iw_statistics *wstats = &iwm->wstats;
+ int i;
+
+ netif_tx_stop_all_queues(iwm_to_ndev(iwm));
+ netif_carrier_off(iwm_to_ndev(iwm));
+
+ for (i = 0; i < IWM_TX_QUEUES; i++) {
+ skb_queue_purge(&iwm->txq[i].queue);
+
+ iwm->txq[i].concat_count = 0;
+ iwm->txq[i].concat_ptr = iwm->txq[i].concat_buf;
+
+ flush_workqueue(iwm->txq[i].wq);
+ }
+
+ iwm_rx_free(iwm);
+
+ cancel_delayed_work(&iwm->stats_request);
+ memset(wstats, 0, sizeof(struct iw_statistics));
+ wstats->qual.updated = IW_QUAL_ALL_INVALID;
+
+ del_timer_sync(&iwm->watchdog);
+}
+
+static void iwm_bss_list_clean(struct iwm_priv *iwm)
+{
+ struct iwm_bss_info *bss, *next;
+
+ list_for_each_entry_safe(bss, next, &iwm->bss_list, node) {
+ list_del(&bss->node);
+ kfree(bss->bss);
+ kfree(bss);
+ }
+}
+
+static int iwm_channels_init(struct iwm_priv *iwm)
+{
+ int ret;
+
+#ifdef CONFIG_IWM_B0_HW_SUPPORT
+ if (iwm->conf.hw_b0) {
+ IWM_INFO(iwm, "Workaround EEPROM channels for B0 hardware\n");
+ return 0;
+ }
+#endif
+
+ ret = iwm_send_umac_channel_list(iwm);
+ if (ret) {
+ IWM_ERR(iwm, "Send channel list failed\n");
+ return ret;
+ }
+
+ ret = iwm_notif_handle(iwm, UMAC_CMD_OPCODE_GET_CHAN_INFO_LIST,
+ IWM_SRC_UMAC, WAIT_NOTIF_TIMEOUT);
+ if (ret) {
+ IWM_ERR(iwm, "Didn't get a channel list notification\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int iwm_up(struct iwm_priv *iwm)
+{
+ int ret;
+ struct iwm_notif *notif_reboot, *notif_ack = NULL;
+
+ ret = iwm_bus_enable(iwm);
+ if (ret) {
+ IWM_ERR(iwm, "Couldn't enable function\n");
+ return ret;
+ }
+
+ iwm_rx_setup_handlers(iwm);
+
+ /* Wait for initial BARKER_REBOOT from hardware */
+ notif_reboot = iwm_notif_wait(iwm, IWM_BARKER_REBOOT_NOTIFICATION,
+ IWM_SRC_UDMA, 2 * HZ);
+ if (!notif_reboot) {
+ IWM_ERR(iwm, "Wait for REBOOT_BARKER timeout\n");
+ goto err_disable;
+ }
+
+ /* We send the barker back */
+ ret = iwm_bus_send_chunk(iwm, notif_reboot->buf, 16);
+ if (ret) {
+ IWM_ERR(iwm, "REBOOT barker response failed\n");
+ kfree(notif_reboot);
+ goto err_disable;
+ }
+
+ kfree(notif_reboot->buf);
+ kfree(notif_reboot);
+
+ /* Wait for ACK_BARKER from hardware */
+ notif_ack = iwm_notif_wait(iwm, IWM_ACK_BARKER_NOTIFICATION,
+ IWM_SRC_UDMA, 2 * HZ);
+ if (!notif_ack) {
+ IWM_ERR(iwm, "Wait for ACK_BARKER timeout\n");
+ goto err_disable;
+ }
+
+ kfree(notif_ack->buf);
+ kfree(notif_ack);
+
+ /* We start to config static boot parameters */
+ ret = iwm_config_boot_params(iwm);
+ if (ret) {
+ IWM_ERR(iwm, "Config boot parameters failed\n");
+ goto err_disable;
+ }
+
+ ret = iwm_read_mac(iwm, iwm_to_ndev(iwm)->dev_addr);
+ if (ret) {
+ IWM_ERR(iwm, "MAC reading failed\n");
+ goto err_disable;
+ }
+
+ /* We can load the FWs */
+ ret = iwm_load_fw(iwm);
+ if (ret) {
+ IWM_ERR(iwm, "FW loading failed\n");
+ goto err_disable;
+ }
+
+ /* We configure the UMAC and enable the wifi module */
+ ret = iwm_send_umac_config(iwm,
+ cpu_to_le32(UMAC_RST_CTRL_FLG_WIFI_CORE_EN) |
+ cpu_to_le32(UMAC_RST_CTRL_FLG_WIFI_LINK_EN) |
+ cpu_to_le32(UMAC_RST_CTRL_FLG_WIFI_MLME_EN));
+ if (ret) {
+ IWM_ERR(iwm, "UMAC config failed\n");
+ goto err_fw;
+ }
+
+ ret = iwm_notif_handle(iwm, UMAC_NOTIFY_OPCODE_WIFI_CORE_STATUS,
+ IWM_SRC_UMAC, WAIT_NOTIF_TIMEOUT);
+ if (ret) {
+ IWM_ERR(iwm, "Didn't get a wifi core status notification\n");
+ goto err_fw;
+ }
+
+ if (iwm->core_enabled != (UMAC_NTFY_WIFI_CORE_STATUS_LINK_EN |
+ UMAC_NTFY_WIFI_CORE_STATUS_MLME_EN)) {
+ IWM_DBG_BOOT(iwm, DBG, "Not all cores enabled:0x%x\n",
+ iwm->core_enabled);
+ ret = iwm_notif_handle(iwm, UMAC_NOTIFY_OPCODE_WIFI_CORE_STATUS,
+ IWM_SRC_UMAC, WAIT_NOTIF_TIMEOUT);
+ if (ret) {
+ IWM_ERR(iwm, "Didn't get a core status notification\n");
+ goto err_fw;
+ }
+
+ if (iwm->core_enabled != (UMAC_NTFY_WIFI_CORE_STATUS_LINK_EN |
+ UMAC_NTFY_WIFI_CORE_STATUS_MLME_EN)) {
+ IWM_ERR(iwm, "Not all cores enabled: 0x%x\n",
+ iwm->core_enabled);
+ goto err_fw;
+ } else {
+ IWM_INFO(iwm, "All cores enabled\n");
+ }
+ }
+
+ iwm->umac_profile = kmalloc(sizeof(struct iwm_umac_profile),
+ GFP_KERNEL);
+ if (!iwm->umac_profile) {
+ IWM_ERR(iwm, "Couldn't alloc memory for profile\n");
+ goto err_fw;
+ }
+
+ iwm_init_default_profile(iwm, iwm->umac_profile);
+
+ ret = iwm_channels_init(iwm);
+ if (ret < 0) {
+ IWM_ERR(iwm, "Couldn't init channels\n");
+ goto err_profile;
+ }
+
+ /* Set the READY bit to indicate interface is brought up successfully */
+ set_bit(IWM_STATUS_READY, &iwm->status);
+
+ return 0;
+
+ err_profile:
+ kfree(iwm->umac_profile);
+ iwm->umac_profile = NULL;
+
+ err_fw:
+ iwm_eeprom_exit(iwm);
+
+ err_disable:
+ ret = iwm_bus_disable(iwm);
+ if (ret < 0)
+ IWM_ERR(iwm, "Couldn't disable function\n");
+
+ return -EIO;
+}
+
+int iwm_down(struct iwm_priv *iwm)
+{
+ int ret;
+
+ /* The interface is already down */
+ if (!test_bit(IWM_STATUS_READY, &iwm->status))
+ return 0;
+
+ if (iwm->scan_request) {
+ cfg80211_scan_done(iwm->scan_request, true);
+ iwm->scan_request = NULL;
+ }
+
+ clear_bit(IWM_STATUS_READY, &iwm->status);
+
+ iwm_eeprom_exit(iwm);
+ kfree(iwm->umac_profile);
+ iwm->umac_profile = NULL;
+ iwm_bss_list_clean(iwm);
+
+ iwm->default_key = NULL;
+ iwm->core_enabled = 0;
+
+ ret = iwm_bus_disable(iwm);
+ if (ret < 0) {
+ IWM_ERR(iwm, "Couldn't disable function\n");
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c
new file mode 100644
index 00000000000..68e2c3b6c7a
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/netdev.c
@@ -0,0 +1,162 @@
+/*
+ * Intel Wireless Multicomm 3200 WiFi driver
+ *
+ * Copyright (C) 2009 Intel Corporation <ilw@linux.intel.com>
+ * Samuel Ortiz <samuel.ortiz@intel.com>
+ * Zhu Yi <yi.zhu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+/*
+ * This is the netdev related hooks for iwm.
+ *
+ * Some interesting code paths:
+ *
+ * iwm_open() (Called at netdev interface bringup time)
+ * -> iwm_up() (main.c)
+ * -> iwm_bus_enable()
+ * -> if_sdio_enable() (In case of an SDIO bus)
+ * -> sdio_enable_func()
+ * -> iwm_notif_wait(BARKER_REBOOT) (wait for reboot barker)
+ * -> iwm_notif_wait(ACK_BARKER) (wait for ACK barker)
+ * -> iwm_load_fw() (fw.c)
+ * -> iwm_load_umac()
+ * -> iwm_load_lmac() (Calibration LMAC)
+ * -> iwm_load_lmac() (Operational LMAC)
+ * -> iwm_send_umac_config()
+ *
+ * iwm_stop() (Called at netdev interface bringdown time)
+ * -> iwm_down()
+ * -> iwm_bus_disable()
+ * -> if_sdio_disable() (In case of an SDIO bus)
+ * -> sdio_disable_func()
+ */
+#include <linux/netdevice.h>
+
+#include "iwm.h"
+#include "cfg80211.h"
+#include "debug.h"
+
+static int iwm_open(struct net_device *ndev)
+{
+ struct iwm_priv *iwm = ndev_to_iwm(ndev);
+ int ret = 0;
+
+ if (!test_bit(IWM_RADIO_RFKILL_SW, &iwm->radio))
+ ret = iwm_up(iwm);
+
+ return ret;
+}
+
+static int iwm_stop(struct net_device *ndev)
+{
+ struct iwm_priv *iwm = ndev_to_iwm(ndev);
+ int ret = 0;
+
+ if (!test_bit(IWM_RADIO_RFKILL_SW, &iwm->radio))
+ ret = iwm_down(iwm);
+
+ return ret;
+}
+
+/*
+ * iwm AC to queue mapping
+ *
+ * AC_VO -> queue 3
+ * AC_VI -> queue 2
+ * AC_BE -> queue 1
+ * AC_BK -> queue 0
+ */
+static const u16 iwm_1d_to_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
+
+static u16 iwm_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+ skb->priority = cfg80211_classify8021d(skb);
+
+ return iwm_1d_to_queue[skb->priority];
+}
+
+static const struct net_device_ops iwm_netdev_ops = {
+ .ndo_open = iwm_open,
+ .ndo_stop = iwm_stop,
+ .ndo_start_xmit = iwm_xmit_frame,
+ .ndo_select_queue = iwm_select_queue,
+};
+
+void *iwm_if_alloc(int sizeof_bus, struct device *dev,
+ struct iwm_if_ops *if_ops)
+{
+ struct net_device *ndev;
+ struct wireless_dev *wdev;
+ struct iwm_priv *iwm;
+ int ret = 0;
+
+ wdev = iwm_wdev_alloc(sizeof_bus, dev);
+ if (!wdev) {
+ dev_err(dev, "no memory for wireless device instance\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ iwm = wdev_to_iwm(wdev);
+ iwm->bus_ops = if_ops;
+ iwm->wdev = wdev;
+ iwm_priv_init(iwm);
+ wdev->iftype = iwm_mode_to_nl80211_iftype(iwm->conf.mode);
+
+ ndev = alloc_netdev_mq(0, "wlan%d", ether_setup,
+ IWM_TX_QUEUES);
+ if (!ndev) {
+ dev_err(dev, "no memory for network device instance\n");
+ goto out_wdev;
+ }
+
+ ndev->netdev_ops = &iwm_netdev_ops;
+ ndev->wireless_handlers = &iwm_iw_handler_def;
+ ndev->ieee80211_ptr = wdev;
+ SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
+ ret = register_netdev(ndev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register netdev: %d\n", ret);
+ goto out_ndev;
+ }
+
+ wdev->netdev = ndev;
+
+ return iwm;
+
+ out_ndev:
+ free_netdev(ndev);
+
+ out_wdev:
+ iwm_wdev_free(iwm);
+ return ERR_PTR(ret);
+}
+
+void iwm_if_free(struct iwm_priv *iwm)
+{
+ int i;
+
+ if (!iwm_to_ndev(iwm))
+ return;
+
+ unregister_netdev(iwm_to_ndev(iwm));
+ free_netdev(iwm_to_ndev(iwm));
+ iwm_wdev_free(iwm);
+ destroy_workqueue(iwm->rx_wq);
+ for (i = 0; i < IWM_TX_QUEUES; i++)
+ destroy_workqueue(iwm->txq[i].wq);
+}
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
new file mode 100644
index 00000000000..d73cf96c6dc
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -0,0 +1,1431 @@
+/*
+ * Intel Wireless Multicomm 3200 WiFi driver
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <ilw@linux.intel.com>
+ * Samuel Ortiz <samuel.ortiz@intel.com>
+ * Zhu Yi <yi.zhu@intel.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <linux/ieee80211.h>
+#include <linux/if_arp.h>
+#include <linux/list.h>
+#include <net/iw_handler.h>
+
+#include "iwm.h"
+#include "debug.h"
+#include "hal.h"
+#include "umac.h"
+#include "lmac.h"
+#include "commands.h"
+#include "rx.h"
+#include "cfg80211.h"
+#include "eeprom.h"
+
+static int iwm_rx_check_udma_hdr(struct iwm_udma_in_hdr *hdr)
+{
+ if ((le32_to_cpu(hdr->cmd) == UMAC_PAD_TERMINAL) ||
+ (le32_to_cpu(hdr->size) == UMAC_PAD_TERMINAL))
+ return -EINVAL;
+
+ return 0;
+}
+
+static inline int iwm_rx_resp_size(struct iwm_udma_in_hdr *hdr)
+{
+ return ALIGN(le32_to_cpu(hdr->size) + sizeof(struct iwm_udma_in_hdr),
+ 16);
+}
+
+/*
+ * Notification handlers:
+ *
+ * For every possible notification we can receive from the
+ * target, we have a handler.
+ * When we get a target notification, and there is no one
+ * waiting for it, it's just processed through the rx code
+ * path:
+ *
+ * iwm_rx_handle()
+ * -> iwm_rx_handle_umac()
+ * -> iwm_rx_handle_wifi()
+ * -> iwm_rx_handle_resp()
+ * -> iwm_ntf_*()
+ *
+ * OR
+ *
+ * -> iwm_rx_handle_non_wifi()
+ *
+ * If there are processes waiting for this notification, then
+ * iwm_rx_handle_wifi() just wakes those processes up and they
+ * grab the pending notification.
+ */
+static int iwm_ntf_error(struct iwm_priv *iwm, u8 *buf,
+ unsigned long buf_size, struct iwm_wifi_cmd *cmd)
+{
+ struct iwm_umac_notif_error *error;
+ struct iwm_fw_error_hdr *fw_err;
+
+ error = (struct iwm_umac_notif_error *)buf;
+ fw_err = &error->err;
+
+
+ IWM_ERR(iwm, "%cMAC FW ERROR:\n",
+ (le32_to_cpu(fw_err->category) == UMAC_SYS_ERR_CAT_LMAC) ? 'L' : 'U');
+ IWM_ERR(iwm, "\tCategory: %d\n", le32_to_cpu(fw_err->category));
+ IWM_ERR(iwm, "\tStatus: 0x%x\n", le32_to_cpu(fw_err->status));
+ IWM_ERR(iwm, "\tPC: 0x%x\n", le32_to_cpu(fw_err->pc));
+ IWM_ERR(iwm, "\tblink1: %d\n", le32_to_cpu(fw_err->blink1));
+ IWM_ERR(iwm, "\tblink2: %d\n", le32_to_cpu(fw_err->blink2));
+ IWM_ERR(iwm, "\tilink1: %d\n", le32_to_cpu(fw_err->ilink1));
+ IWM_ERR(iwm, "\tilink2: %d\n", le32_to_cpu(fw_err->ilink2));
+ IWM_ERR(iwm, "\tData1: 0x%x\n", le32_to_cpu(fw_err->data1));
+ IWM_ERR(iwm, "\tData2: 0x%x\n", le32_to_cpu(fw_err->data2));
+ IWM_ERR(iwm, "\tLine number: %d\n", le32_to_cpu(fw_err->line_num));
+ IWM_ERR(iwm, "\tUMAC status: 0x%x\n", le32_to_cpu(fw_err->umac_status));
+ IWM_ERR(iwm, "\tLMAC status: 0x%x\n", le32_to_cpu(fw_err->lmac_status));
+ IWM_ERR(iwm, "\tSDIO status: 0x%x\n", le32_to_cpu(fw_err->sdio_status));
+
+ return 0;
+}
+
+static int iwm_ntf_umac_alive(struct iwm_priv *iwm, u8 *buf,
+ unsigned long buf_size, struct iwm_wifi_cmd *cmd)
+{
+ struct iwm_umac_notif_alive *alive_resp =
+ (struct iwm_umac_notif_alive *)(buf);
+ u16 status = le16_to_cpu(alive_resp->status);
+
+ if (status == UMAC_NTFY_ALIVE_STATUS_ERR) {
+ IWM_ERR(iwm, "Receive error UMAC_ALIVE\n");
+ return -EIO;
+ }
+
+ iwm_tx_credit_init_pools(iwm, alive_resp);
+
+ return 0;
+}
+
+static int iwm_ntf_init_complete(struct iwm_priv *iwm, u8 *buf,
+ unsigned long buf_size,
+ struct iwm_wifi_cmd *cmd)
+{
+ struct iwm_umac_notif_init_complete *init_complete =
+ (struct iwm_umac_notif_init_complete *)(buf);
+ u16 status = le16_to_cpu(init_complete->status);
+
+ if (status == UMAC_NTFY_INIT_COMPLETE_STATUS_ERR) {
+ IWM_DBG_NTF(iwm, DBG, "Hardware rf kill is on (radio off)\n");
+ set_bit(IWM_RADIO_RFKILL_HW, &iwm->radio);
+ } else {
+ IWM_DBG_NTF(iwm, DBG, "Hardware rf kill is off (radio on)\n");
+ clear_bit(IWM_RADIO_RFKILL_HW, &iwm->radio);
+ }
+
+ return 0;
+}
+
+static int iwm_ntf_tx_credit_update(struct iwm_priv *iwm, u8 *buf,
+ unsigned long buf_size,
+ struct iwm_wifi_cmd *cmd)
+{
+ int pool_nr, total_freed_pages;
+ unsigned long pool_map;
+ int i, id;
+ struct iwm_umac_notif_page_dealloc *dealloc =
+ (struct iwm_umac_notif_page_dealloc *)buf;
+
+ pool_nr = GET_VAL32(dealloc->changes, UMAC_DEALLOC_NTFY_CHANGES_CNT);
+ pool_map = GET_VAL32(dealloc->changes, UMAC_DEALLOC_NTFY_CHANGES_MSK);
+
+ IWM_DBG_TX(iwm, DBG, "UMAC dealloc notification: pool nr %d, "
+ "update map 0x%lx\n", pool_nr, pool_map);
+
+ spin_lock(&iwm->tx_credit.lock);
+
+ for (i = 0; i < pool_nr; i++) {
+ id = GET_VAL32(dealloc->grp_info[i],
+ UMAC_DEALLOC_NTFY_GROUP_NUM);
+ if (test_bit(id, &pool_map)) {
+ total_freed_pages = GET_VAL32(dealloc->grp_info[i],
+ UMAC_DEALLOC_NTFY_PAGE_CNT);
+ iwm_tx_credit_inc(iwm, id, total_freed_pages);
+ }
+ }
+
+ spin_unlock(&iwm->tx_credit.lock);
+
+ return 0;
+}
+
+static int iwm_ntf_umac_reset(struct iwm_priv *iwm, u8 *buf,
+ unsigned long buf_size, struct iwm_wifi_cmd *cmd)
+{
+ IWM_DBG_NTF(iwm, DBG, "UMAC RESET done\n");
+
+ return 0;
+}
+
+static int iwm_ntf_lmac_version(struct iwm_priv *iwm, u8 *buf,
+ unsigned long buf_size,
+ struct iwm_wifi_cmd *cmd)
+{
+ IWM_DBG_NTF(iwm, INFO, "LMAC Version: %x.%x\n", buf[9], buf[8]);
+
+ return 0;
+}
+
+static int iwm_ntf_tx(struct iwm_priv *iwm, u8 *buf,
+ unsigned long buf_size, struct iwm_wifi_cmd *cmd)
+{
+ struct iwm_lmac_tx_resp *tx_resp;
+ struct iwm_umac_wifi_in_hdr *hdr;
+
+ tx_resp = (struct iwm_lmac_tx_resp *)
+ (buf + sizeof(struct iwm_umac_wifi_in_hdr));
+ hdr = (struct iwm_umac_wifi_in_hdr *)buf;
+
+ IWM_DBG_NTF(iwm, DBG, "REPLY_TX, buf size: %lu\n", buf_size);
+
+ IWM_DBG_NTF(iwm, DBG, "Seqnum: %d\n",
+ le16_to_cpu(hdr->sw_hdr.cmd.seq_num));
+ IWM_DBG_NTF(iwm, DBG, "\tFrame cnt: %d\n", tx_resp->frame_cnt);
+ IWM_DBG_NTF(iwm, DBG, "\tRetry cnt: %d\n",
+ le16_to_cpu(tx_resp->retry_cnt));
+ IWM_DBG_NTF(iwm, DBG, "\tSeq ctl: %d\n", le16_to_cpu(tx_resp->seq_ctl));
+ IWM_DBG_NTF(iwm, DBG, "\tByte cnt: %d\n",
+ le16_to_cpu(tx_resp->byte_cnt));
+ IWM_DBG_NTF(iwm, DBG, "\tStatus: 0x%x\n", le32_to_cpu(tx_resp->status));
+
+ return 0;
+}
+
+
+static int iwm_ntf_calib_res(struct iwm_priv *iwm, u8 *buf,
+ unsigned long buf_size, struct iwm_wifi_cmd *cmd)
+{
+ u8 opcode;
+ u8 *calib_buf;
+ struct iwm_lmac_calib_hdr *hdr = (struct iwm_lmac_calib_hdr *)
+ (buf + sizeof(struct iwm_umac_wifi_in_hdr));
+
+ opcode = hdr->opcode;
+
+ BUG_ON(opcode >= CALIBRATION_CMD_NUM ||
+ opcode < PHY_CALIBRATE_OPCODES_NUM);
+
+ IWM_DBG_NTF(iwm, DBG, "Store calibration result for opcode: %d\n",
+ opcode);
+
+ buf_size -= sizeof(struct iwm_umac_wifi_in_hdr);
+ calib_buf = iwm->calib_res[opcode].buf;
+
+ if (!calib_buf || (iwm->calib_res[opcode].size < buf_size)) {
+ kfree(calib_buf);
+ calib_buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!calib_buf) {
+ IWM_ERR(iwm, "Memory allocation failed: calib_res\n");
+ return -ENOMEM;
+ }
+ iwm->calib_res[opcode].buf = calib_buf;
+ iwm->calib_res[opcode].size = buf_size;
+ }
+
+ memcpy(calib_buf, hdr, buf_size);
+ set_bit(opcode - PHY_CALIBRATE_OPCODES_NUM, &iwm->calib_done_map);
+
+ return 0;
+}
+
+static int iwm_ntf_calib_complete(struct iwm_priv *iwm, u8 *buf,
+ unsigned long buf_size,
+ struct iwm_wifi_cmd *cmd)
+{
+ IWM_DBG_NTF(iwm, DBG, "Calibration completed\n");
+
+ return 0;
+}
+
+static int iwm_ntf_calib_cfg(struct iwm_priv *iwm, u8 *buf,
+ unsigned long buf_size, struct iwm_wifi_cmd *cmd)
+{
+ struct iwm_lmac_cal_cfg_resp *cal_resp;
+
+ cal_resp = (struct iwm_lmac_cal_cfg_resp *)
+ (buf + sizeof(struct iwm_umac_wifi_in_hdr));
+
+ IWM_DBG_NTF(iwm, DBG, "Calibration CFG command status: %d\n",
+ le32_to_cpu(cal_resp->status));
+
+ return 0;
+}
+
+static int iwm_ntf_wifi_status(struct iwm_priv *iwm, u8 *buf,
+ unsigned long buf_size, struct iwm_wifi_cmd *cmd)
+{
+ struct iwm_umac_notif_wifi_status *status =
+ (struct iwm_umac_notif_wifi_status *)buf;
+
+ iwm->core_enabled |= le16_to_cpu(status->status);
+
+ return 0;
+}
+
+static struct iwm_rx_ticket_node *
+iwm_rx_ticket_node_alloc(struct iwm_priv *iwm, struct iwm_rx_ticket *ticket)
+{
+ struct iwm_rx_ticket_node *ticket_node;
+
+ ticket_node = kzalloc(sizeof(struct iwm_rx_ticket_node), GFP_KERNEL);
+ if (!ticket_node) {
+ IWM_ERR(iwm, "Couldn't allocate ticket node\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ticket_node->ticket = kzalloc(sizeof(struct iwm_rx_ticket), GFP_KERNEL);
+ if (!ticket_node->ticket) {
+ IWM_ERR(iwm, "Couldn't allocate RX ticket\n");
+ kfree(ticket_node);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ memcpy(ticket_node->ticket, ticket, sizeof(struct iwm_rx_ticket));
+ INIT_LIST_HEAD(&ticket_node->node);
+
+ return ticket_node;
+}
+
+static void iwm_rx_ticket_node_free(struct iwm_rx_ticket_node *ticket_node)
+{
+ kfree(ticket_node->ticket);
+ kfree(ticket_node);
+}
+
+static struct iwm_rx_packet *iwm_rx_packet_get(struct iwm_priv *iwm, u16 id)
+{
+ u8 id_hash = IWM_RX_ID_GET_HASH(id);
+ struct list_head *packet_list;
+ struct iwm_rx_packet *packet, *next;
+
+ packet_list = &iwm->rx_packets[id_hash];
+
+ list_for_each_entry_safe(packet, next, packet_list, node)
+ if (packet->id == id)
+ return packet;
+
+ return NULL;
+}
+
+static struct iwm_rx_packet *iwm_rx_packet_alloc(struct iwm_priv *iwm, u8 *buf,
+ u32 size, u16 id)
+{
+ struct iwm_rx_packet *packet;
+
+ packet = kzalloc(sizeof(struct iwm_rx_packet), GFP_KERNEL);
+ if (!packet) {
+ IWM_ERR(iwm, "Couldn't allocate packet\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ packet->skb = dev_alloc_skb(size);
+ if (!packet->skb) {
+ IWM_ERR(iwm, "Couldn't allocate packet SKB\n");
+ kfree(packet);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ packet->pkt_size = size;
+
+ skb_put(packet->skb, size);
+ memcpy(packet->skb->data, buf, size);
+ INIT_LIST_HEAD(&packet->node);
+ packet->id = id;
+
+ return packet;
+}
+
+void iwm_rx_free(struct iwm_priv *iwm)
+{
+ struct iwm_rx_ticket_node *ticket, *nt;
+ struct iwm_rx_packet *packet, *np;
+ int i;
+
+ list_for_each_entry_safe(ticket, nt, &iwm->rx_tickets, node) {
+ list_del(&ticket->node);
+ iwm_rx_ticket_node_free(ticket);
+ }
+
+ for (i = 0; i < IWM_RX_ID_HASH; i++) {
+ list_for_each_entry_safe(packet, np, &iwm->rx_packets[i],
+ node) {
+ list_del(&packet->node);
+ kfree_skb(packet->skb);
+ kfree(packet);
+ }
+ }
+}
+
+static int iwm_ntf_rx_ticket(struct iwm_priv *iwm, u8 *buf,
+ unsigned long buf_size, struct iwm_wifi_cmd *cmd)
+{
+ struct iwm_umac_notif_rx_ticket *ntf_rx_ticket =
+ (struct iwm_umac_notif_rx_ticket *)buf;
+ struct iwm_rx_ticket *ticket =
+ (struct iwm_rx_ticket *)ntf_rx_ticket->tickets;
+ int i, schedule_rx = 0;
+
+ for (i = 0; i < ntf_rx_ticket->num_tickets; i++) {
+ struct iwm_rx_ticket_node *ticket_node;
+
+ switch (le16_to_cpu(ticket->action)) {
+ case IWM_RX_TICKET_RELEASE:
+ case IWM_RX_TICKET_DROP:
+ /* We can push the packet to the stack */
+ ticket_node = iwm_rx_ticket_node_alloc(iwm, ticket);
+ if (IS_ERR(ticket_node))
+ return PTR_ERR(ticket_node);
+
+ IWM_DBG_NTF(iwm, DBG, "TICKET RELEASE(%d)\n",
+ ticket->id);
+ list_add_tail(&ticket_node->node, &iwm->rx_tickets);
+
+ /*
+ * We received an Rx ticket, most likely there's
+ * a packet pending for it, it's not worth going
+ * through the packet hash list to double check.
+ * Let's just fire the rx worker..
+ */
+ schedule_rx = 1;
+
+ break;
+
+ default:
+ IWM_ERR(iwm, "Invalid RX ticket action: 0x%x\n",
+ ticket->action);
+ }
+
+ ticket++;
+ }
+
+ if (schedule_rx)
+ queue_work(iwm->rx_wq, &iwm->rx_worker);
+
+ return 0;
+}
+
+static int iwm_ntf_rx_packet(struct iwm_priv *iwm, u8 *buf,
+ unsigned long buf_size, struct iwm_wifi_cmd *cmd)
+{
+ struct iwm_umac_wifi_in_hdr *wifi_hdr;
+ struct iwm_rx_packet *packet;
+ u16 id, buf_offset;
+ u32 packet_size;
+
+ IWM_DBG_NTF(iwm, DBG, "\n");
+
+ wifi_hdr = (struct iwm_umac_wifi_in_hdr *)buf;
+ id = le16_to_cpu(wifi_hdr->sw_hdr.cmd.seq_num);
+ buf_offset = sizeof(struct iwm_umac_wifi_in_hdr);
+ packet_size = buf_size - sizeof(struct iwm_umac_wifi_in_hdr);
+
+ IWM_DBG_NTF(iwm, DBG, "CMD:0x%x, seqnum: %d, packet size: %d\n",
+ wifi_hdr->sw_hdr.cmd.cmd, id, packet_size);
+ IWM_DBG_RX(iwm, DBG, "Packet id: %d\n", id);
+ IWM_HEXDUMP(iwm, DBG, RX, "PACKET: ", buf + buf_offset, packet_size);
+
+ packet = iwm_rx_packet_alloc(iwm, buf + buf_offset, packet_size, id);
+ if (IS_ERR(packet))
+ return PTR_ERR(packet);
+
+ list_add_tail(&packet->node, &iwm->rx_packets[IWM_RX_ID_GET_HASH(id)]);
+
+ /* We might (unlikely) have received the packet _after_ the ticket */
+ queue_work(iwm->rx_wq, &iwm->rx_worker);
+
+ return 0;
+}
+
+/* MLME handlers */
+static int iwm_mlme_assoc_start(struct iwm_priv *iwm, u8 *buf,
+ unsigned long buf_size,
+ struct iwm_wifi_cmd *cmd)
+{
+ struct iwm_umac_notif_assoc_start *start;
+
+ start = (struct iwm_umac_notif_assoc_start *)buf;
+
+ set_bit(IWM_STATUS_ASSOCIATING, &iwm->status);
+
+ IWM_DBG_MLME(iwm, INFO, "Association with %pM Started, reason: %d\n",
+ start->bssid, le32_to_cpu(start->roam_reason));
+
+ wake_up_interruptible(&iwm->mlme_queue);
+
+ return 0;
+}
+
+static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
+ unsigned long buf_size,
+ struct iwm_wifi_cmd *cmd)
+{
+ struct iwm_umac_notif_assoc_complete *complete =
+ (struct iwm_umac_notif_assoc_complete *)buf;
+ union iwreq_data wrqu;
+
+ IWM_DBG_MLME(iwm, INFO, "Association with %pM completed, status: %d\n",
+ complete->bssid, complete->status);
+
+ memset(&wrqu, 0, sizeof(wrqu));
+
+ clear_bit(IWM_STATUS_ASSOCIATING, &iwm->status);
+
+ switch (le32_to_cpu(complete->status)) {
+ case UMAC_ASSOC_COMPLETE_SUCCESS:
+ set_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
+ memcpy(iwm->bssid, complete->bssid, ETH_ALEN);
+ iwm->channel = complete->channel;
+
+ iwm_link_on(iwm);
+
+ memcpy(wrqu.ap_addr.sa_data, complete->bssid, ETH_ALEN);
+ break;
+ case UMAC_ASSOC_COMPLETE_FAILURE:
+ clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
+ memset(iwm->bssid, 0, ETH_ALEN);
+ iwm->channel = 0;
+
+ iwm_link_off(iwm);
+ default:
+ break;
+ }
+
+ if (iwm->conf.mode == UMAC_MODE_IBSS) {
+ cfg80211_ibss_joined(iwm_to_ndev(iwm), iwm->bssid, GFP_KERNEL);
+ return 0;
+ }
+
+ wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+ wireless_send_event(iwm_to_ndev(iwm), SIOCGIWAP, &wrqu, NULL);
+
+ return 0;
+}
+
+static int iwm_mlme_profile_invalidate(struct iwm_priv *iwm, u8 *buf,
+ unsigned long buf_size,
+ struct iwm_wifi_cmd *cmd)
+{
+ struct iwm_umac_notif_profile_invalidate *invalid;
+
+ invalid = (struct iwm_umac_notif_profile_invalidate *)buf;
+
+ IWM_DBG_MLME(iwm, INFO, "Profile Invalidated. Reason: %d\n",
+ le32_to_cpu(invalid->reason));
+
+ clear_bit(IWM_STATUS_ASSOCIATING, &iwm->status);
+ clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
+
+ iwm->umac_profile_active = 0;
+ memset(iwm->bssid, 0, ETH_ALEN);
+ iwm->channel = 0;
+
+ iwm_link_off(iwm);
+
+ wake_up_interruptible(&iwm->mlme_queue);
+
+ return 0;
+}
+
+static int iwm_mlme_scan_complete(struct iwm_priv *iwm, u8 *buf,
+ unsigned long buf_size,
+ struct iwm_wifi_cmd *cmd)
+{
+ int ret;
+ struct iwm_umac_notif_scan_complete *scan_complete =
+ (struct iwm_umac_notif_scan_complete *)buf;
+ u32 result = le32_to_cpu(scan_complete->result);
+
+ IWM_DBG_MLME(iwm, INFO, "type:0x%x result:0x%x seq:%d\n",
+ le32_to_cpu(scan_complete->type),
+ le32_to_cpu(scan_complete->result),
+ scan_complete->seq_num);
+
+ if (!test_and_clear_bit(IWM_STATUS_SCANNING, &iwm->status)) {
+ IWM_ERR(iwm, "Scan complete while device not scanning\n");
+ return -EIO;
+ }
+ if (!iwm->scan_request)
+ return 0;
+
+ ret = iwm_cfg80211_inform_bss(iwm);
+
+ cfg80211_scan_done(iwm->scan_request,
+ (result & UMAC_SCAN_RESULT_ABORTED) ? 1 : !!ret);
+ iwm->scan_request = NULL;
+
+ return ret;
+}
+
+static int iwm_mlme_update_sta_table(struct iwm_priv *iwm, u8 *buf,
+ unsigned long buf_size,
+ struct iwm_wifi_cmd *cmd)
+{
+ struct iwm_umac_notif_sta_info *umac_sta =
+ (struct iwm_umac_notif_sta_info *)buf;
+ struct iwm_sta_info *sta;
+ int i;
+
+ switch (le32_to_cpu(umac_sta->opcode)) {
+ case UMAC_OPCODE_ADD_MODIFY:
+ sta = &iwm->sta_table[GET_VAL8(umac_sta->sta_id, LMAC_STA_ID)];
+
+ IWM_DBG_MLME(iwm, INFO, "%s STA: ID = %d, Color = %d, "
+ "addr = %pM, qos = %d\n",
+ sta->valid ? "Modify" : "Add",
+ GET_VAL8(umac_sta->sta_id, LMAC_STA_ID),
+ GET_VAL8(umac_sta->sta_id, LMAC_STA_COLOR),
+ umac_sta->mac_addr,
+ umac_sta->flags & UMAC_STA_FLAG_QOS);
+
+ sta->valid = 1;
+ sta->qos = umac_sta->flags & UMAC_STA_FLAG_QOS;
+ sta->color = GET_VAL8(umac_sta->sta_id, LMAC_STA_COLOR);
+ memcpy(sta->addr, umac_sta->mac_addr, ETH_ALEN);
+ break;
+ case UMAC_OPCODE_REMOVE:
+ IWM_DBG_MLME(iwm, INFO, "Remove STA: ID = %d, Color = %d, "
+ "addr = %pM\n",
+ GET_VAL8(umac_sta->sta_id, LMAC_STA_ID),
+ GET_VAL8(umac_sta->sta_id, LMAC_STA_COLOR),
+ umac_sta->mac_addr);
+
+ sta = &iwm->sta_table[GET_VAL8(umac_sta->sta_id, LMAC_STA_ID)];
+
+ if (!memcmp(sta->addr, umac_sta->mac_addr, ETH_ALEN))
+ sta->valid = 0;
+
+ break;
+ case UMAC_OPCODE_CLEAR_ALL:
+ for (i = 0; i < IWM_STA_TABLE_NUM; i++)
+ iwm->sta_table[i].valid = 0;
+
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
+ unsigned long buf_size,
+ struct iwm_wifi_cmd *cmd)
+{
+ struct wiphy *wiphy = iwm_to_wiphy(iwm);
+ struct ieee80211_mgmt *mgmt;
+ struct iwm_umac_notif_bss_info *umac_bss =
+ (struct iwm_umac_notif_bss_info *)buf;
+ struct ieee80211_channel *channel;
+ struct ieee80211_supported_band *band;
+ struct iwm_bss_info *bss, *next;
+ s32 signal;
+ int freq;
+ u16 frame_len = le16_to_cpu(umac_bss->frame_len);
+ size_t bss_len = sizeof(struct iwm_umac_notif_bss_info) + frame_len;
+
+ mgmt = (struct ieee80211_mgmt *)(umac_bss->frame_buf);
+
+ IWM_DBG_MLME(iwm, DBG, "New BSS info entry: %pM\n", mgmt->bssid);
+ IWM_DBG_MLME(iwm, DBG, "\tType: 0x%x\n", le32_to_cpu(umac_bss->type));
+ IWM_DBG_MLME(iwm, DBG, "\tTimestamp: %d\n",
+ le32_to_cpu(umac_bss->timestamp));
+ IWM_DBG_MLME(iwm, DBG, "\tTable Index: %d\n",
+ le16_to_cpu(umac_bss->table_idx));
+ IWM_DBG_MLME(iwm, DBG, "\tBand: %d\n", umac_bss->band);
+ IWM_DBG_MLME(iwm, DBG, "\tChannel: %d\n", umac_bss->channel);
+ IWM_DBG_MLME(iwm, DBG, "\tRSSI: %d\n", umac_bss->rssi);
+ IWM_DBG_MLME(iwm, DBG, "\tFrame Length: %d\n", frame_len);
+
+ list_for_each_entry_safe(bss, next, &iwm->bss_list, node)
+ if (bss->bss->table_idx == umac_bss->table_idx)
+ break;
+
+ if (&bss->node != &iwm->bss_list) {
+ /* Remove the old BSS entry, we will add it back later. */
+ list_del(&bss->node);
+ kfree(bss->bss);
+ } else {
+ /* New BSS entry */
+
+ bss = kzalloc(sizeof(struct iwm_bss_info), GFP_KERNEL);
+ if (!bss) {
+ IWM_ERR(iwm, "Couldn't allocate bss_info\n");
+ return -ENOMEM;
+ }
+ }
+
+ bss->bss = kzalloc(bss_len, GFP_KERNEL);
+ if (!bss) {
+ kfree(bss);
+ IWM_ERR(iwm, "Couldn't allocate bss\n");
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&bss->node);
+ memcpy(bss->bss, umac_bss, bss_len);
+
+ if (umac_bss->band == UMAC_BAND_2GHZ)
+ band = wiphy->bands[IEEE80211_BAND_2GHZ];
+ else if (umac_bss->band == UMAC_BAND_5GHZ)
+ band = wiphy->bands[IEEE80211_BAND_5GHZ];
+ else {
+ IWM_ERR(iwm, "Invalid band: %d\n", umac_bss->band);
+ goto err;
+ }
+
+ freq = ieee80211_channel_to_frequency(umac_bss->channel);
+ channel = ieee80211_get_channel(wiphy, freq);
+ signal = umac_bss->rssi * 100;
+
+ bss->cfg_bss = cfg80211_inform_bss_frame(wiphy, channel,
+ mgmt, frame_len,
+ signal, GFP_KERNEL);
+ if (!bss->cfg_bss)
+ goto err;
+
+ list_add_tail(&bss->node, &iwm->bss_list);
+
+ return 0;
+ err:
+ kfree(bss->bss);
+ kfree(bss);
+
+ return -EINVAL;
+}
+
+static int iwm_mlme_remove_bss(struct iwm_priv *iwm, u8 *buf,
+ unsigned long buf_size, struct iwm_wifi_cmd *cmd)
+{
+ struct iwm_umac_notif_bss_removed *bss_rm =
+ (struct iwm_umac_notif_bss_removed *)buf;
+ struct iwm_bss_info *bss, *next;
+ u16 table_idx;
+ int i;
+
+ for (i = 0; i < le32_to_cpu(bss_rm->count); i++) {
+ table_idx = (le16_to_cpu(bss_rm->entries[i])
+ & IWM_BSS_REMOVE_INDEX_MSK);
+ list_for_each_entry_safe(bss, next, &iwm->bss_list, node)
+ if (bss->bss->table_idx == cpu_to_le16(table_idx)) {
+ struct ieee80211_mgmt *mgmt;
+
+ mgmt = (struct ieee80211_mgmt *)
+ (bss->bss->frame_buf);
+ IWM_DBG_MLME(iwm, ERR,
+ "BSS removed: %pM\n",
+ mgmt->bssid);
+ list_del(&bss->node);
+ kfree(bss->bss);
+ kfree(bss);
+ }
+ }
+
+ return 0;
+}
+
+static int iwm_mlme_mgt_frame(struct iwm_priv *iwm, u8 *buf,
+ unsigned long buf_size, struct iwm_wifi_cmd *cmd)
+{
+ struct iwm_umac_notif_mgt_frame *mgt_frame =
+ (struct iwm_umac_notif_mgt_frame *)buf;
+ struct ieee80211_mgmt *mgt = (struct ieee80211_mgmt *)mgt_frame->frame;
+ u8 *ie;
+ unsigned int event;
+ union iwreq_data wrqu;
+
+ IWM_HEXDUMP(iwm, DBG, MLME, "MGT: ", mgt_frame->frame,
+ le16_to_cpu(mgt_frame->len));
+
+ if (ieee80211_is_assoc_req(mgt->frame_control)) {
+ ie = mgt->u.assoc_req.variable;;
+ event = IWEVASSOCREQIE;
+ } else if (ieee80211_is_reassoc_req(mgt->frame_control)) {
+ ie = mgt->u.reassoc_req.variable;;
+ event = IWEVASSOCREQIE;
+ } else if (ieee80211_is_assoc_resp(mgt->frame_control)) {
+ ie = mgt->u.assoc_resp.variable;;
+ event = IWEVASSOCRESPIE;
+ } else if (ieee80211_is_reassoc_resp(mgt->frame_control)) {
+ ie = mgt->u.reassoc_resp.variable;;
+ event = IWEVASSOCRESPIE;
+ } else {
+ IWM_ERR(iwm, "Unsupported management frame");
+ return 0;
+ }
+
+ wrqu.data.length = le16_to_cpu(mgt_frame->len) - (ie - (u8 *)mgt);
+
+ IWM_HEXDUMP(iwm, DBG, MLME, "EVT: ", ie, wrqu.data.length);
+ wireless_send_event(iwm_to_ndev(iwm), event, &wrqu, ie);
+
+ return 0;
+}
+
+static int iwm_ntf_mlme(struct iwm_priv *iwm, u8 *buf,
+ unsigned long buf_size, struct iwm_wifi_cmd *cmd)
+{
+ struct iwm_umac_notif_wifi_if *notif =
+ (struct iwm_umac_notif_wifi_if *)buf;
+
+ switch (notif->status) {
+ case WIFI_IF_NTFY_ASSOC_START:
+ return iwm_mlme_assoc_start(iwm, buf, buf_size, cmd);
+ case WIFI_IF_NTFY_ASSOC_COMPLETE:
+ return iwm_mlme_assoc_complete(iwm, buf, buf_size, cmd);
+ case WIFI_IF_NTFY_PROFILE_INVALIDATE_COMPLETE:
+ return iwm_mlme_profile_invalidate(iwm, buf, buf_size, cmd);
+ case WIFI_IF_NTFY_CONNECTION_TERMINATED:
+ IWM_DBG_MLME(iwm, DBG, "Connection terminated\n");
+ break;
+ case WIFI_IF_NTFY_SCAN_COMPLETE:
+ return iwm_mlme_scan_complete(iwm, buf, buf_size, cmd);
+ case WIFI_IF_NTFY_STA_TABLE_CHANGE:
+ return iwm_mlme_update_sta_table(iwm, buf, buf_size, cmd);
+ case WIFI_IF_NTFY_EXTENDED_IE_REQUIRED:
+ IWM_DBG_MLME(iwm, DBG, "Extended IE required\n");
+ break;
+ case WIFI_IF_NTFY_BSS_TRK_TABLE_CHANGED:
+ return iwm_mlme_update_bss_table(iwm, buf, buf_size, cmd);
+ case WIFI_IF_NTFY_BSS_TRK_ENTRIES_REMOVED:
+ return iwm_mlme_remove_bss(iwm, buf, buf_size, cmd);
+ break;
+ case WIFI_IF_NTFY_MGMT_FRAME:
+ return iwm_mlme_mgt_frame(iwm, buf, buf_size, cmd);
+ case WIFI_DBG_IF_NTFY_SCAN_SUPER_JOB_START:
+ case WIFI_DBG_IF_NTFY_SCAN_SUPER_JOB_COMPLETE:
+ case WIFI_DBG_IF_NTFY_SCAN_CHANNEL_START:
+ case WIFI_DBG_IF_NTFY_SCAN_CHANNEL_RESULT:
+ case WIFI_DBG_IF_NTFY_SCAN_MINI_JOB_START:
+ case WIFI_DBG_IF_NTFY_SCAN_MINI_JOB_COMPLETE:
+ case WIFI_DBG_IF_NTFY_CNCT_ATC_START:
+ case WIFI_DBG_IF_NTFY_COEX_NOTIFICATION:
+ case WIFI_DBG_IF_NTFY_COEX_HANDLE_ENVELOP:
+ case WIFI_DBG_IF_NTFY_COEX_HANDLE_RELEASE_ENVELOP:
+ IWM_DBG_MLME(iwm, DBG, "MLME debug notification: 0x%x\n",
+ notif->status);
+ break;
+ default:
+ IWM_ERR(iwm, "Unhandled notification: 0x%x\n", notif->status);
+ break;
+ }
+
+ return 0;
+}
+
+#define IWM_STATS_UPDATE_INTERVAL (2 * HZ)
+
+static int iwm_ntf_statistics(struct iwm_priv *iwm, u8 *buf,
+ unsigned long buf_size, struct iwm_wifi_cmd *cmd)
+{
+ struct iwm_umac_notif_stats *stats = (struct iwm_umac_notif_stats *)buf;
+ struct iw_statistics *wstats = &iwm->wstats;
+ u16 max_rate = 0;
+ int i;
+
+ IWM_DBG_MLME(iwm, DBG, "Statistics notification received\n");
+
+ if (test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)) {
+ for (i = 0; i < UMAC_NTF_RATE_SAMPLE_NR; i++) {
+ max_rate = max_t(u16, max_rate,
+ max(le16_to_cpu(stats->tx_rate[i]),
+ le16_to_cpu(stats->rx_rate[i])));
+ }
+ /* UMAC passes rate info multiplies by 2 */
+ iwm->rate = max_rate >> 1;
+ }
+
+ wstats->status = 0;
+
+ wstats->discard.nwid = le32_to_cpu(stats->rx_drop_other_bssid);
+ wstats->discard.code = le32_to_cpu(stats->rx_drop_decode);
+ wstats->discard.fragment = le32_to_cpu(stats->rx_drop_reassembly);
+ wstats->discard.retries = le32_to_cpu(stats->tx_drop_max_retry);
+
+ wstats->miss.beacon = le32_to_cpu(stats->missed_beacons);
+
+ /* according to cfg80211 */
+ if (stats->rssi_dbm < -110)
+ wstats->qual.qual = 0;
+ else if (stats->rssi_dbm > -40)
+ wstats->qual.qual = 70;
+ else
+ wstats->qual.qual = stats->rssi_dbm + 110;
+
+ wstats->qual.level = stats->rssi_dbm;
+ wstats->qual.noise = stats->noise_dbm;
+ wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
+
+ schedule_delayed_work(&iwm->stats_request, IWM_STATS_UPDATE_INTERVAL);
+
+ mod_timer(&iwm->watchdog, round_jiffies(jiffies + IWM_WATCHDOG_PERIOD));
+
+ return 0;
+}
+
+static int iwm_ntf_eeprom_proxy(struct iwm_priv *iwm, u8 *buf,
+ unsigned long buf_size,
+ struct iwm_wifi_cmd *cmd)
+{
+ struct iwm_umac_cmd_eeprom_proxy *eeprom_proxy =
+ (struct iwm_umac_cmd_eeprom_proxy *)
+ (buf + sizeof(struct iwm_umac_wifi_in_hdr));
+ struct iwm_umac_cmd_eeprom_proxy_hdr *hdr = &eeprom_proxy->hdr;
+ u32 hdr_offset = le32_to_cpu(hdr->offset);
+ u32 hdr_len = le32_to_cpu(hdr->len);
+ u32 hdr_type = le32_to_cpu(hdr->type);
+
+ IWM_DBG_NTF(iwm, DBG, "type: 0x%x, len: %d, offset: 0x%x\n",
+ hdr_type, hdr_len, hdr_offset);
+
+ if ((hdr_offset + hdr_len) > IWM_EEPROM_LEN)
+ return -EINVAL;
+
+#ifdef CONFIG_IWM_B0_HW_SUPPORT
+ if (hdr_offset == IWM_EEPROM_SKU_CAP_OFF) {
+ if (eeprom_proxy->buf[0] == 0xff)
+ iwm->conf.hw_b0 = 1;
+ }
+#endif
+
+ switch (hdr_type) {
+ case IWM_UMAC_CMD_EEPROM_TYPE_READ:
+ memcpy(iwm->eeprom + hdr_offset, eeprom_proxy->buf, hdr_len);
+ break;
+ case IWM_UMAC_CMD_EEPROM_TYPE_WRITE:
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int iwm_ntf_channel_info_list(struct iwm_priv *iwm, u8 *buf,
+ unsigned long buf_size,
+ struct iwm_wifi_cmd *cmd)
+{
+ struct iwm_umac_cmd_get_channel_list *ch_list =
+ (struct iwm_umac_cmd_get_channel_list *)
+ (buf + sizeof(struct iwm_umac_wifi_in_hdr));
+ struct wiphy *wiphy = iwm_to_wiphy(iwm);
+ struct ieee80211_supported_band *band;
+ int i;
+
+ band = wiphy->bands[IEEE80211_BAND_2GHZ];
+
+ for (i = 0; i < band->n_channels; i++) {
+ unsigned long ch_mask_0 =
+ le32_to_cpu(ch_list->ch[0].channels_mask);
+ unsigned long ch_mask_2 =
+ le32_to_cpu(ch_list->ch[2].channels_mask);
+
+ if (!test_bit(i, &ch_mask_0))
+ band->channels[i].flags |= IEEE80211_CHAN_DISABLED;
+
+ if (!test_bit(i, &ch_mask_2))
+ band->channels[i].flags |= IEEE80211_CHAN_NO_IBSS;
+ }
+
+ band = wiphy->bands[IEEE80211_BAND_5GHZ];
+
+ for (i = 0; i < min(band->n_channels, 32); i++) {
+ unsigned long ch_mask_1 =
+ le32_to_cpu(ch_list->ch[1].channels_mask);
+ unsigned long ch_mask_3 =
+ le32_to_cpu(ch_list->ch[3].channels_mask);
+
+ if (!test_bit(i, &ch_mask_1))
+ band->channels[i].flags |= IEEE80211_CHAN_DISABLED;
+
+ if (!test_bit(i, &ch_mask_3))
+ band->channels[i].flags |= IEEE80211_CHAN_NO_IBSS;
+ }
+
+ return 0;
+}
+
+static int iwm_ntf_wifi_if_wrapper(struct iwm_priv *iwm, u8 *buf,
+ unsigned long buf_size,
+ struct iwm_wifi_cmd *cmd)
+{
+ struct iwm_umac_wifi_if *hdr =
+ (struct iwm_umac_wifi_if *)cmd->buf.payload;
+
+ IWM_DBG_NTF(iwm, DBG, "WIFI_IF_WRAPPER cmd is delivered to UMAC: "
+ "oid is %d\n", hdr->oid);
+
+ switch (hdr->oid) {
+ case UMAC_WIFI_IF_CMD_SET_PROFILE:
+ iwm->umac_profile_active = 1;
+ wake_up_interruptible(&iwm->mlme_queue);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int iwm_ntf_card_state(struct iwm_priv *iwm, u8 *buf,
+ unsigned long buf_size, struct iwm_wifi_cmd *cmd)
+{
+ struct iwm_lmac_card_state *state = (struct iwm_lmac_card_state *)
+ (buf + sizeof(struct iwm_umac_wifi_in_hdr));
+ u32 flags = le32_to_cpu(state->flags);
+
+ IWM_INFO(iwm, "HW RF Kill %s, CT Kill %s\n",
+ flags & IWM_CARD_STATE_HW_DISABLED ? "ON" : "OFF",
+ flags & IWM_CARD_STATE_CTKILL_DISABLED ? "ON" : "OFF");
+
+ if (flags & IWM_CARD_STATE_HW_DISABLED)
+ set_bit(IWM_RADIO_RFKILL_HW, &iwm->radio);
+ else
+ clear_bit(IWM_RADIO_RFKILL_HW, &iwm->radio);
+
+ return 0;
+}
+
+static int iwm_rx_handle_wifi(struct iwm_priv *iwm, u8 *buf,
+ unsigned long buf_size)
+{
+ struct iwm_umac_wifi_in_hdr *wifi_hdr;
+ struct iwm_wifi_cmd *cmd;
+ u8 source, cmd_id;
+ u16 seq_num;
+ u32 count;
+ u8 resp;
+
+ wifi_hdr = (struct iwm_umac_wifi_in_hdr *)buf;
+ cmd_id = wifi_hdr->sw_hdr.cmd.cmd;
+
+ source = GET_VAL32(wifi_hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE);
+ if (source >= IWM_SRC_NUM) {
+ IWM_CRIT(iwm, "invalid source %d\n", source);
+ return -EINVAL;
+ }
+
+ count = (GET_VAL32(wifi_hdr->sw_hdr.meta_data, UMAC_FW_CMD_BYTE_COUNT));
+ count += sizeof(struct iwm_umac_wifi_in_hdr) -
+ sizeof(struct iwm_dev_cmd_hdr);
+ if (count > buf_size) {
+ IWM_CRIT(iwm, "count %d, buf size:%ld\n", count, buf_size);
+ return -EINVAL;
+ }
+
+ resp = GET_VAL32(wifi_hdr->sw_hdr.meta_data, UMAC_FW_CMD_STATUS);
+
+ seq_num = le16_to_cpu(wifi_hdr->sw_hdr.cmd.seq_num);
+
+ IWM_DBG_RX(iwm, DBG, "CMD:0x%x, source: 0x%x, seqnum: %d\n",
+ cmd_id, source, seq_num);
+
+ /*
+ * If this is a response to a previously sent command, there must
+ * be a pending command for this sequence number.
+ */
+ cmd = iwm_get_pending_wifi_cmd(iwm, seq_num);
+
+ /* Notify the caller only for sync commands. */
+ switch (source) {
+ case UMAC_HDI_IN_SOURCE_FHRX:
+ if (iwm->lmac_handlers[cmd_id] &&
+ test_bit(cmd_id, &iwm->lmac_handler_map[0]))
+ return iwm_notif_send(iwm, cmd, cmd_id, source,
+ buf, count);
+ break;
+ case UMAC_HDI_IN_SOURCE_FW:
+ if (iwm->umac_handlers[cmd_id] &&
+ test_bit(cmd_id, &iwm->umac_handler_map[0]))
+ return iwm_notif_send(iwm, cmd, cmd_id, source,
+ buf, count);
+ break;
+ case UMAC_HDI_IN_SOURCE_UDMA:
+ break;
+ }
+
+ return iwm_rx_handle_resp(iwm, buf, count, cmd);
+}
+
+int iwm_rx_handle_resp(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size,
+ struct iwm_wifi_cmd *cmd)
+{
+ u8 source, cmd_id;
+ struct iwm_umac_wifi_in_hdr *wifi_hdr;
+ int ret = 0;
+
+ wifi_hdr = (struct iwm_umac_wifi_in_hdr *)buf;
+ cmd_id = wifi_hdr->sw_hdr.cmd.cmd;
+
+ source = GET_VAL32(wifi_hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE);
+
+ IWM_DBG_RX(iwm, DBG, "CMD:0x%x, source: 0x%x\n", cmd_id, source);
+
+ switch (source) {
+ case UMAC_HDI_IN_SOURCE_FHRX:
+ if (iwm->lmac_handlers[cmd_id])
+ ret = iwm->lmac_handlers[cmd_id]
+ (iwm, buf, buf_size, cmd);
+ break;
+ case UMAC_HDI_IN_SOURCE_FW:
+ if (iwm->umac_handlers[cmd_id])
+ ret = iwm->umac_handlers[cmd_id]
+ (iwm, buf, buf_size, cmd);
+ break;
+ case UMAC_HDI_IN_SOURCE_UDMA:
+ ret = -EINVAL;
+ break;
+ }
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static int iwm_rx_handle_nonwifi(struct iwm_priv *iwm, u8 *buf,
+ unsigned long buf_size)
+{
+ u8 seq_num;
+ struct iwm_udma_in_hdr *hdr = (struct iwm_udma_in_hdr *)buf;
+ struct iwm_nonwifi_cmd *cmd, *next;
+
+ seq_num = GET_VAL32(hdr->cmd, UDMA_HDI_IN_CMD_NON_WIFI_HW_SEQ_NUM);
+
+ /*
+ * We received a non wifi answer.
+ * Let's check if there's a pending command for it, and if so
+ * replace the command payload with the buffer, and then wake the
+ * callers up.
+ * That means we only support synchronised non wifi command response
+ * schemes.
+ */
+ list_for_each_entry_safe(cmd, next, &iwm->nonwifi_pending_cmd, pending)
+ if (cmd->seq_num == seq_num) {
+ cmd->resp_received = 1;
+ cmd->buf.len = buf_size;
+ memcpy(cmd->buf.hdr, buf, buf_size);
+ wake_up_interruptible(&iwm->nonwifi_queue);
+ }
+
+ return 0;
+}
+
+static int iwm_rx_handle_umac(struct iwm_priv *iwm, u8 *buf,
+ unsigned long buf_size)
+{
+ int ret = 0;
+ u8 op_code;
+ unsigned long buf_offset = 0;
+ struct iwm_udma_in_hdr *hdr;
+
+ /*
+ * To allow for a more efficient bus usage, UMAC
+ * messages are encapsulated into UDMA ones. This
+ * way we can have several UMAC messages in one bus
+ * transfer.
+ * A UDMA frame size is always aligned on 16 bytes,
+ * and a UDMA frame must not start with a UMAC_PAD_TERMINAL
+ * word. This is how we parse a bus frame into several
+ * UDMA ones.
+ */
+ while (buf_offset < buf_size) {
+
+ hdr = (struct iwm_udma_in_hdr *)(buf + buf_offset);
+
+ if (iwm_rx_check_udma_hdr(hdr) < 0) {
+ IWM_DBG_RX(iwm, DBG, "End of frame\n");
+ break;
+ }
+
+ op_code = GET_VAL32(hdr->cmd, UMAC_HDI_IN_CMD_OPCODE);
+
+ IWM_DBG_RX(iwm, DBG, "Op code: 0x%x\n", op_code);
+
+ if (op_code == UMAC_HDI_IN_OPCODE_WIFI) {
+ ret |= iwm_rx_handle_wifi(iwm, buf + buf_offset,
+ buf_size - buf_offset);
+ } else if (op_code < UMAC_HDI_IN_OPCODE_NONWIFI_MAX) {
+ if (GET_VAL32(hdr->cmd,
+ UDMA_HDI_IN_CMD_NON_WIFI_HW_SIG) !=
+ UDMA_HDI_IN_CMD_NON_WIFI_HW_SIG) {
+ IWM_ERR(iwm, "Incorrect hw signature\n");
+ return -EINVAL;
+ }
+ ret |= iwm_rx_handle_nonwifi(iwm, buf + buf_offset,
+ buf_size - buf_offset);
+ } else {
+ IWM_ERR(iwm, "Invalid RX opcode: 0x%x\n", op_code);
+ ret |= -EINVAL;
+ }
+
+ buf_offset += iwm_rx_resp_size(hdr);
+ }
+
+ return ret;
+}
+
+int iwm_rx_handle(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size)
+{
+ struct iwm_udma_in_hdr *hdr;
+
+ hdr = (struct iwm_udma_in_hdr *)buf;
+
+ switch (le32_to_cpu(hdr->cmd)) {
+ case UMAC_REBOOT_BARKER:
+ return iwm_notif_send(iwm, NULL, IWM_BARKER_REBOOT_NOTIFICATION,
+ IWM_SRC_UDMA, buf, buf_size);
+ case UMAC_ACK_BARKER:
+ return iwm_notif_send(iwm, NULL, IWM_ACK_BARKER_NOTIFICATION,
+ IWM_SRC_UDMA, NULL, 0);
+ default:
+ IWM_DBG_RX(iwm, DBG, "Received cmd: 0x%x\n", hdr->cmd);
+ return iwm_rx_handle_umac(iwm, buf, buf_size);
+ }
+
+ return 0;
+}
+
+static const iwm_handler iwm_umac_handlers[] =
+{
+ [UMAC_NOTIFY_OPCODE_ERROR] = iwm_ntf_error,
+ [UMAC_NOTIFY_OPCODE_ALIVE] = iwm_ntf_umac_alive,
+ [UMAC_NOTIFY_OPCODE_INIT_COMPLETE] = iwm_ntf_init_complete,
+ [UMAC_NOTIFY_OPCODE_WIFI_CORE_STATUS] = iwm_ntf_wifi_status,
+ [UMAC_NOTIFY_OPCODE_WIFI_IF_WRAPPER] = iwm_ntf_mlme,
+ [UMAC_NOTIFY_OPCODE_PAGE_DEALLOC] = iwm_ntf_tx_credit_update,
+ [UMAC_NOTIFY_OPCODE_RX_TICKET] = iwm_ntf_rx_ticket,
+ [UMAC_CMD_OPCODE_RESET] = iwm_ntf_umac_reset,
+ [UMAC_NOTIFY_OPCODE_STATS] = iwm_ntf_statistics,
+ [UMAC_CMD_OPCODE_EEPROM_PROXY] = iwm_ntf_eeprom_proxy,
+ [UMAC_CMD_OPCODE_GET_CHAN_INFO_LIST] = iwm_ntf_channel_info_list,
+ [REPLY_RX_MPDU_CMD] = iwm_ntf_rx_packet,
+ [UMAC_CMD_OPCODE_WIFI_IF_WRAPPER] = iwm_ntf_wifi_if_wrapper,
+};
+
+static const iwm_handler iwm_lmac_handlers[] =
+{
+ [REPLY_TX] = iwm_ntf_tx,
+ [REPLY_ALIVE] = iwm_ntf_lmac_version,
+ [CALIBRATION_RES_NOTIFICATION] = iwm_ntf_calib_res,
+ [CALIBRATION_COMPLETE_NOTIFICATION] = iwm_ntf_calib_complete,
+ [CALIBRATION_CFG_CMD] = iwm_ntf_calib_cfg,
+ [REPLY_RX_MPDU_CMD] = iwm_ntf_rx_packet,
+ [CARD_STATE_NOTIFICATION] = iwm_ntf_card_state,
+};
+
+void iwm_rx_setup_handlers(struct iwm_priv *iwm)
+{
+ iwm->umac_handlers = (iwm_handler *) iwm_umac_handlers;
+ iwm->lmac_handlers = (iwm_handler *) iwm_lmac_handlers;
+}
+
+static void iwm_remove_iv(struct sk_buff *skb, u32 hdr_total_len)
+{
+ struct ieee80211_hdr *hdr;
+ unsigned int hdr_len;
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+
+ if (!ieee80211_has_protected(hdr->frame_control))
+ return;
+
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ if (hdr_total_len <= hdr_len)
+ return;
+
+ memmove(skb->data + (hdr_total_len - hdr_len), skb->data, hdr_len);
+ skb_pull(skb, (hdr_total_len - hdr_len));
+}
+
+static void iwm_rx_adjust_packet(struct iwm_priv *iwm,
+ struct iwm_rx_packet *packet,
+ struct iwm_rx_ticket_node *ticket_node)
+{
+ u32 payload_offset = 0, payload_len;
+ struct iwm_rx_ticket *ticket = ticket_node->ticket;
+ struct iwm_rx_mpdu_hdr *mpdu_hdr;
+ struct ieee80211_hdr *hdr;
+
+ mpdu_hdr = (struct iwm_rx_mpdu_hdr *)packet->skb->data;
+ payload_offset += sizeof(struct iwm_rx_mpdu_hdr);
+ /* Padding is 0 or 2 bytes */
+ payload_len = le16_to_cpu(mpdu_hdr->len) +
+ (le16_to_cpu(ticket->flags) & IWM_RX_TICKET_PAD_SIZE_MSK);
+ payload_len -= ticket->tail_len;
+
+ IWM_DBG_RX(iwm, DBG, "Packet adjusted, len:%d, offset:%d, "
+ "ticket offset:%d ticket tail len:%d\n",
+ payload_len, payload_offset, ticket->payload_offset,
+ ticket->tail_len);
+
+ IWM_HEXDUMP(iwm, DBG, RX, "RAW: ", packet->skb->data, packet->skb->len);
+
+ skb_pull(packet->skb, payload_offset);
+ skb_trim(packet->skb, payload_len);
+
+ iwm_remove_iv(packet->skb, ticket->payload_offset);
+
+ hdr = (struct ieee80211_hdr *) packet->skb->data;
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ /* UMAC handed QOS_DATA frame with 2 padding bytes appended
+ * to the qos_ctl field in IEEE 802.11 headers. */
+ memmove(packet->skb->data + IEEE80211_QOS_CTL_LEN + 2,
+ packet->skb->data,
+ ieee80211_hdrlen(hdr->frame_control) -
+ IEEE80211_QOS_CTL_LEN);
+ hdr = (struct ieee80211_hdr *) skb_pull(packet->skb,
+ IEEE80211_QOS_CTL_LEN + 2);
+ hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
+ }
+
+ IWM_HEXDUMP(iwm, DBG, RX, "ADJUSTED: ",
+ packet->skb->data, packet->skb->len);
+}
+
+static void classify8023(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ /* frame has qos control */
+ skb->priority = *qc & IEEE80211_QOS_CTL_TID_MASK;
+ } else {
+ skb->priority = 0;
+ }
+}
+
+static void iwm_rx_process_packet(struct iwm_priv *iwm,
+ struct iwm_rx_packet *packet,
+ struct iwm_rx_ticket_node *ticket_node)
+{
+ int ret;
+ struct sk_buff *skb = packet->skb;
+ struct wireless_dev *wdev = iwm_to_wdev(iwm);
+ struct net_device *ndev = iwm_to_ndev(iwm);
+
+ IWM_DBG_RX(iwm, DBG, "Processing packet ID %d\n", packet->id);
+
+ switch (le16_to_cpu(ticket_node->ticket->action)) {
+ case IWM_RX_TICKET_RELEASE:
+ IWM_DBG_RX(iwm, DBG, "RELEASE packet\n");
+ classify8023(skb);
+ iwm_rx_adjust_packet(iwm, packet, ticket_node);
+ ret = ieee80211_data_to_8023(skb, ndev->dev_addr, wdev->iftype);
+ if (ret < 0) {
+ IWM_DBG_RX(iwm, DBG, "Couldn't convert 802.11 header - "
+ "%d\n", ret);
+ break;
+ }
+
+ IWM_HEXDUMP(iwm, DBG, RX, "802.3: ", skb->data, skb->len);
+
+ skb->dev = iwm_to_ndev(iwm);
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ memset(skb->cb, 0, sizeof(skb->cb));
+
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += skb->len;
+
+ if (netif_rx(skb) == NET_RX_DROP) {
+ IWM_ERR(iwm, "Packet dropped\n");
+ ndev->stats.rx_dropped++;
+ }
+ break;
+ case IWM_RX_TICKET_DROP:
+ IWM_DBG_RX(iwm, DBG, "DROP packet\n");
+ kfree_skb(packet->skb);
+ break;
+ default:
+ IWM_ERR(iwm, "Unknow ticket action: %d\n",
+ le16_to_cpu(ticket_node->ticket->action));
+ kfree_skb(packet->skb);
+ }
+
+ kfree(packet);
+ iwm_rx_ticket_node_free(ticket_node);
+}
+
+/*
+ * Rx data processing:
+ *
+ * We're receiving Rx packet from the LMAC, and Rx ticket from
+ * the UMAC.
+ * To forward a target data packet upstream (i.e. to the
+ * kernel network stack), we must have received an Rx ticket
+ * that tells us we're allowed to release this packet (ticket
+ * action is IWM_RX_TICKET_RELEASE). The Rx ticket also indicates,
+ * among other things, where valid data actually starts in the Rx
+ * packet.
+ */
+void iwm_rx_worker(struct work_struct *work)
+{
+ struct iwm_priv *iwm;
+ struct iwm_rx_ticket_node *ticket, *next;
+
+ iwm = container_of(work, struct iwm_priv, rx_worker);
+
+ /*
+ * We go through the tickets list and if there is a pending
+ * packet for it, we push it upstream.
+ * We stop whenever a ticket is missing its packet, as we're
+ * supposed to send the packets in order.
+ */
+ list_for_each_entry_safe(ticket, next, &iwm->rx_tickets, node) {
+ struct iwm_rx_packet *packet =
+ iwm_rx_packet_get(iwm, le16_to_cpu(ticket->ticket->id));
+
+ if (!packet) {
+ IWM_DBG_RX(iwm, DBG, "Skip rx_work: Wait for ticket %d "
+ "to be handled first\n",
+ le16_to_cpu(ticket->ticket->id));
+ return;
+ }
+
+ list_del(&ticket->node);
+ list_del(&packet->node);
+ iwm_rx_process_packet(iwm, packet, ticket);
+ }
+}
+
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.h b/drivers/net/wireless/iwmc3200wifi/rx.h
new file mode 100644
index 00000000000..da0db91cee5
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/rx.h
@@ -0,0 +1,60 @@
+/*
+ * Intel Wireless Multicomm 3200 WiFi driver
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <ilw@linux.intel.com>
+ * Samuel Ortiz <samuel.ortiz@intel.com>
+ * Zhu Yi <yi.zhu@intel.com>
+ *
+ */
+
+#ifndef __IWM_RX_H__
+#define __IWM_RX_H__
+
+#include <linux/skbuff.h>
+
+#include "umac.h"
+
+struct iwm_rx_ticket_node {
+ struct list_head node;
+ struct iwm_rx_ticket *ticket;
+};
+
+struct iwm_rx_packet {
+ struct list_head node;
+ u16 id;
+ struct sk_buff *skb;
+ unsigned long pkt_size;
+};
+
+void iwm_rx_worker(struct work_struct *work);
+
+#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.c b/drivers/net/wireless/iwmc3200wifi/sdio.c
new file mode 100644
index 00000000000..b54da677b37
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/sdio.c
@@ -0,0 +1,516 @@
+/*
+ * Intel Wireless Multicomm 3200 WiFi driver
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <ilw@linux.intel.com>
+ * Samuel Ortiz <samuel.ortiz@intel.com>
+ * Zhu Yi <yi.zhu@intel.com>
+ *
+ */
+
+/*
+ * This is the SDIO bus specific hooks for iwm.
+ * It also is the module's entry point.
+ *
+ * Interesting code paths:
+ * iwm_sdio_probe() (Called by an SDIO bus scan)
+ * -> iwm_if_alloc() (netdev.c)
+ * -> iwm_wdev_alloc() (cfg80211.c, allocates and register our wiphy)
+ * -> wiphy_new()
+ * -> wiphy_register()
+ * -> alloc_netdev_mq()
+ * -> register_netdev()
+ *
+ * iwm_sdio_remove()
+ * -> iwm_if_free() (netdev.c)
+ * -> unregister_netdev()
+ * -> iwm_wdev_free() (cfg80211.c)
+ * -> wiphy_unregister()
+ * -> wiphy_free()
+ *
+ * iwm_sdio_isr() (called in process context from the SDIO core code)
+ * -> queue_work(.., isr_worker)
+ * -- [async] --> iwm_sdio_isr_worker()
+ * -> iwm_rx_handle()
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/debugfs.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sdio_func.h>
+
+#include "iwm.h"
+#include "debug.h"
+#include "bus.h"
+#include "sdio.h"
+
+static void iwm_sdio_isr_worker(struct work_struct *work)
+{
+ struct iwm_sdio_priv *hw;
+ struct iwm_priv *iwm;
+ struct iwm_rx_info *rx_info;
+ struct sk_buff *skb;
+ u8 *rx_buf;
+ unsigned long rx_size;
+
+ hw = container_of(work, struct iwm_sdio_priv, isr_worker);
+ iwm = hw_to_iwm(hw);
+
+ while (!skb_queue_empty(&iwm->rx_list)) {
+ skb = skb_dequeue(&iwm->rx_list);
+ rx_info = skb_to_rx_info(skb);
+ rx_size = rx_info->rx_size;
+ rx_buf = skb->data;
+
+ IWM_HEXDUMP(iwm, DBG, SDIO, "RX: ", rx_buf, rx_size);
+ if (iwm_rx_handle(iwm, rx_buf, rx_size) < 0)
+ IWM_WARN(iwm, "RX error\n");
+
+ kfree_skb(skb);
+ }
+}
+
+static void iwm_sdio_isr(struct sdio_func *func)
+{
+ struct iwm_priv *iwm;
+ struct iwm_sdio_priv *hw;
+ struct iwm_rx_info *rx_info;
+ struct sk_buff *skb;
+ unsigned long buf_size, read_size;
+ int ret;
+ u8 val;
+
+ hw = sdio_get_drvdata(func);
+ iwm = hw_to_iwm(hw);
+
+ buf_size = hw->blk_size;
+
+ /* We're checking the status */
+ val = sdio_readb(func, IWM_SDIO_INTR_STATUS_ADDR, &ret);
+ if (val == 0 || ret < 0) {
+ IWM_ERR(iwm, "Wrong INTR_STATUS\n");
+ return;
+ }
+
+ /* See if we have free buffers */
+ if (skb_queue_len(&iwm->rx_list) > IWM_RX_LIST_SIZE) {
+ IWM_ERR(iwm, "No buffer for more Rx frames\n");
+ return;
+ }
+
+ /* We first read the transaction size */
+ read_size = sdio_readb(func, IWM_SDIO_INTR_GET_SIZE_ADDR + 1, &ret);
+ read_size = read_size << 8;
+
+ if (ret < 0) {
+ IWM_ERR(iwm, "Couldn't read the xfer size\n");
+ return;
+ }
+
+ /* We need to clear the INT register */
+ sdio_writeb(func, 1, IWM_SDIO_INTR_CLEAR_ADDR, &ret);
+ if (ret < 0) {
+ IWM_ERR(iwm, "Couldn't clear the INT register\n");
+ return;
+ }
+
+ while (buf_size < read_size)
+ buf_size <<= 1;
+
+ skb = dev_alloc_skb(buf_size);
+ if (!skb) {
+ IWM_ERR(iwm, "Couldn't alloc RX skb\n");
+ return;
+ }
+ rx_info = skb_to_rx_info(skb);
+ rx_info->rx_size = read_size;
+ rx_info->rx_buf_size = buf_size;
+
+ /* Now we can read the actual buffer */
+ ret = sdio_memcpy_fromio(func, skb_put(skb, read_size),
+ IWM_SDIO_DATA_ADDR, read_size);
+
+ /* The skb is put on a driver's specific Rx SKB list */
+ skb_queue_tail(&iwm->rx_list, skb);
+
+ /* We can now schedule the actual worker */
+ queue_work(hw->isr_wq, &hw->isr_worker);
+}
+
+static void iwm_sdio_rx_free(struct iwm_sdio_priv *hw)
+{
+ struct iwm_priv *iwm = hw_to_iwm(hw);
+
+ flush_workqueue(hw->isr_wq);
+
+ skb_queue_purge(&iwm->rx_list);
+}
+
+/* Bus ops */
+static int if_sdio_enable(struct iwm_priv *iwm)
+{
+ struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
+ int ret;
+
+ sdio_claim_host(hw->func);
+
+ ret = sdio_enable_func(hw->func);
+ if (ret) {
+ IWM_ERR(iwm, "Couldn't enable the device: is TOP driver "
+ "loaded and functional?\n");
+ goto release_host;
+ }
+
+ iwm_reset(iwm);
+
+ ret = sdio_claim_irq(hw->func, iwm_sdio_isr);
+ if (ret) {
+ IWM_ERR(iwm, "Failed to claim irq: %d\n", ret);
+ goto release_host;
+ }
+
+ sdio_writeb(hw->func, 1, IWM_SDIO_INTR_ENABLE_ADDR, &ret);
+ if (ret < 0) {
+ IWM_ERR(iwm, "Couldn't enable INTR: %d\n", ret);
+ goto release_irq;
+ }
+
+ sdio_release_host(hw->func);
+
+ IWM_DBG_SDIO(iwm, INFO, "IWM SDIO enable\n");
+
+ return 0;
+
+ release_irq:
+ sdio_release_irq(hw->func);
+ release_host:
+ sdio_release_host(hw->func);
+
+ return ret;
+}
+
+static int if_sdio_disable(struct iwm_priv *iwm)
+{
+ struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
+ int ret;
+
+ iwm_reset(iwm);
+
+ sdio_claim_host(hw->func);
+ sdio_writeb(hw->func, 0, IWM_SDIO_INTR_ENABLE_ADDR, &ret);
+ if (ret < 0)
+ IWM_WARN(iwm, "Couldn't disable INTR: %d\n", ret);
+
+ sdio_release_irq(hw->func);
+ sdio_disable_func(hw->func);
+ sdio_release_host(hw->func);
+
+ iwm_sdio_rx_free(hw);
+
+ IWM_DBG_SDIO(iwm, INFO, "IWM SDIO disable\n");
+
+ return 0;
+}
+
+static int if_sdio_send_chunk(struct iwm_priv *iwm, u8 *buf, int count)
+{
+ struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
+ int aligned_count = ALIGN(count, hw->blk_size);
+ int ret;
+
+ if ((unsigned long)buf & 0x3) {
+ IWM_ERR(iwm, "buf <%p> is not dword aligned\n", buf);
+ /* TODO: Is this a hardware limitation? use get_unligned */
+ return -EINVAL;
+ }
+
+ sdio_claim_host(hw->func);
+ ret = sdio_memcpy_toio(hw->func, IWM_SDIO_DATA_ADDR, buf,
+ aligned_count);
+ sdio_release_host(hw->func);
+
+ return ret;
+}
+
+/* debugfs hooks */
+static int iwm_debugfs_sdio_open(struct inode *inode, struct file *filp)
+{
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t iwm_debugfs_sdio_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct iwm_priv *iwm = filp->private_data;
+ struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
+ char *buf;
+ u8 cccr;
+ int buf_len = 4096, ret;
+ size_t len = 0;
+
+ if (*ppos != 0)
+ return 0;
+ if (count < sizeof(buf))
+ return -ENOSPC;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ sdio_claim_host(hw->func);
+
+ cccr = sdio_f0_readb(hw->func, SDIO_CCCR_IOEx, &ret);
+ if (ret) {
+ IWM_ERR(iwm, "Could not read SDIO_CCCR_IOEx\n");
+ goto err;
+ }
+ len += snprintf(buf + len, buf_len - len, "CCCR_IOEx: 0x%x\n", cccr);
+
+ cccr = sdio_f0_readb(hw->func, SDIO_CCCR_IORx, &ret);
+ if (ret) {
+ IWM_ERR(iwm, "Could not read SDIO_CCCR_IORx\n");
+ goto err;
+ }
+ len += snprintf(buf + len, buf_len - len, "CCCR_IORx: 0x%x\n", cccr);
+
+
+ cccr = sdio_f0_readb(hw->func, SDIO_CCCR_IENx, &ret);
+ if (ret) {
+ IWM_ERR(iwm, "Could not read SDIO_CCCR_IENx\n");
+ goto err;
+ }
+ len += snprintf(buf + len, buf_len - len, "CCCR_IENx: 0x%x\n", cccr);
+
+
+ cccr = sdio_f0_readb(hw->func, SDIO_CCCR_INTx, &ret);
+ if (ret) {
+ IWM_ERR(iwm, "Could not read SDIO_CCCR_INTx\n");
+ goto err;
+ }
+ len += snprintf(buf + len, buf_len - len, "CCCR_INTx: 0x%x\n", cccr);
+
+
+ cccr = sdio_f0_readb(hw->func, SDIO_CCCR_ABORT, &ret);
+ if (ret) {
+ IWM_ERR(iwm, "Could not read SDIO_CCCR_ABORTx\n");
+ goto err;
+ }
+ len += snprintf(buf + len, buf_len - len, "CCCR_ABORT: 0x%x\n", cccr);
+
+ cccr = sdio_f0_readb(hw->func, SDIO_CCCR_IF, &ret);
+ if (ret) {
+ IWM_ERR(iwm, "Could not read SDIO_CCCR_IF\n");
+ goto err;
+ }
+ len += snprintf(buf + len, buf_len - len, "CCCR_IF: 0x%x\n", cccr);
+
+
+ cccr = sdio_f0_readb(hw->func, SDIO_CCCR_CAPS, &ret);
+ if (ret) {
+ IWM_ERR(iwm, "Could not read SDIO_CCCR_CAPS\n");
+ goto err;
+ }
+ len += snprintf(buf + len, buf_len - len, "CCCR_CAPS: 0x%x\n", cccr);
+
+ cccr = sdio_f0_readb(hw->func, SDIO_CCCR_CIS, &ret);
+ if (ret) {
+ IWM_ERR(iwm, "Could not read SDIO_CCCR_CIS\n");
+ goto err;
+ }
+ len += snprintf(buf + len, buf_len - len, "CCCR_CIS: 0x%x\n", cccr);
+
+ ret = simple_read_from_buffer(buffer, len, ppos, buf, buf_len);
+err:
+ sdio_release_host(hw->func);
+
+ kfree(buf);
+
+ return ret;
+}
+
+static const struct file_operations iwm_debugfs_sdio_fops = {
+ .owner = THIS_MODULE,
+ .open = iwm_debugfs_sdio_open,
+ .read = iwm_debugfs_sdio_read,
+};
+
+static int if_sdio_debugfs_init(struct iwm_priv *iwm, struct dentry *parent_dir)
+{
+ int result;
+ struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
+
+ hw->cccr_dentry = debugfs_create_file("cccr", 0200,
+ parent_dir, iwm,
+ &iwm_debugfs_sdio_fops);
+ result = PTR_ERR(hw->cccr_dentry);
+ if (IS_ERR(hw->cccr_dentry) && (result != -ENODEV)) {
+ IWM_ERR(iwm, "Couldn't create CCCR entry: %d\n", result);
+ return result;
+ }
+
+ return 0;
+}
+
+static void if_sdio_debugfs_exit(struct iwm_priv *iwm)
+{
+ struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
+
+ debugfs_remove(hw->cccr_dentry);
+}
+
+static struct iwm_if_ops if_sdio_ops = {
+ .enable = if_sdio_enable,
+ .disable = if_sdio_disable,
+ .send_chunk = if_sdio_send_chunk,
+ .debugfs_init = if_sdio_debugfs_init,
+ .debugfs_exit = if_sdio_debugfs_exit,
+ .umac_name = "iwmc3200wifi-umac-sdio.bin",
+ .calib_lmac_name = "iwmc3200wifi-calib-sdio.bin",
+ .lmac_name = "iwmc3200wifi-lmac-sdio.bin",
+};
+
+static int iwm_sdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ struct iwm_priv *iwm;
+ struct iwm_sdio_priv *hw;
+ struct device *dev = &func->dev;
+ int ret;
+
+ /* check if TOP has already initialized the card */
+ sdio_claim_host(func);
+ ret = sdio_enable_func(func);
+ if (ret) {
+ dev_err(dev, "wait for TOP to enable the device\n");
+ sdio_release_host(func);
+ return ret;
+ }
+
+ ret = sdio_set_block_size(func, IWM_SDIO_BLK_SIZE);
+
+ sdio_disable_func(func);
+ sdio_release_host(func);
+
+ if (ret < 0) {
+ dev_err(dev, "Failed to set block size: %d\n", ret);
+ return ret;
+ }
+
+ iwm = iwm_if_alloc(sizeof(struct iwm_sdio_priv), dev, &if_sdio_ops);
+ if (IS_ERR(iwm)) {
+ dev_err(dev, "allocate SDIO interface failed\n");
+ return PTR_ERR(iwm);
+ }
+
+ hw = iwm_private(iwm);
+ hw->iwm = iwm;
+
+ ret = iwm_debugfs_init(iwm);
+ if (ret < 0) {
+ IWM_ERR(iwm, "Debugfs registration failed\n");
+ goto if_free;
+ }
+
+ sdio_set_drvdata(func, hw);
+
+ hw->func = func;
+ hw->blk_size = IWM_SDIO_BLK_SIZE;
+
+ hw->isr_wq = create_singlethread_workqueue(KBUILD_MODNAME "_sdio");
+ if (!hw->isr_wq) {
+ ret = -ENOMEM;
+ goto debugfs_exit;
+ }
+
+ INIT_WORK(&hw->isr_worker, iwm_sdio_isr_worker);
+
+ dev_info(dev, "IWM SDIO probe\n");
+
+ return 0;
+
+ debugfs_exit:
+ iwm_debugfs_exit(iwm);
+ if_free:
+ iwm_if_free(iwm);
+ return ret;
+}
+
+static void iwm_sdio_remove(struct sdio_func *func)
+{
+ struct iwm_sdio_priv *hw = sdio_get_drvdata(func);
+ struct iwm_priv *iwm = hw_to_iwm(hw);
+ struct device *dev = &func->dev;
+
+ iwm_debugfs_exit(iwm);
+ iwm_if_free(iwm);
+ destroy_workqueue(hw->isr_wq);
+
+ sdio_set_drvdata(func, NULL);
+
+ dev_info(dev, "IWM SDIO remove\n");
+
+ return;
+}
+
+static const struct sdio_device_id iwm_sdio_ids[] = {
+ { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, SDIO_DEVICE_ID_IWM) },
+ { /* end: all zeroes */ },
+};
+MODULE_DEVICE_TABLE(sdio, iwm_sdio_ids);
+
+static struct sdio_driver iwm_sdio_driver = {
+ .name = "iwm_sdio",
+ .id_table = iwm_sdio_ids,
+ .probe = iwm_sdio_probe,
+ .remove = iwm_sdio_remove,
+};
+
+static int __init iwm_sdio_init_module(void)
+{
+ int ret;
+
+ ret = sdio_register_driver(&iwm_sdio_driver);
+
+ return ret;
+}
+
+static void __exit iwm_sdio_exit_module(void)
+{
+ sdio_unregister_driver(&iwm_sdio_driver);
+}
+
+module_init(iwm_sdio_init_module);
+module_exit(iwm_sdio_exit_module);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR(IWM_COPYRIGHT " " IWM_AUTHOR);
diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.h b/drivers/net/wireless/iwmc3200wifi/sdio.h
new file mode 100644
index 00000000000..b3c156b08dd
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/sdio.h
@@ -0,0 +1,67 @@
+/*
+ * Intel Wireless Multicomm 3200 WiFi driver
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <ilw@linux.intel.com>
+ * Samuel Ortiz <samuel.ortiz@intel.com>
+ * Zhu Yi <yi.zhu@intel.com>
+ *
+ */
+
+#ifndef __IWM_SDIO_H__
+#define __IWM_SDIO_H__
+
+#define SDIO_VENDOR_ID_INTEL 0x89
+#define SDIO_DEVICE_ID_IWM 0x1403
+
+#define IWM_SDIO_DATA_ADDR 0x0
+#define IWM_SDIO_INTR_ENABLE_ADDR 0x14
+#define IWM_SDIO_INTR_STATUS_ADDR 0x13
+#define IWM_SDIO_INTR_CLEAR_ADDR 0x13
+#define IWM_SDIO_INTR_GET_SIZE_ADDR 0x2C
+
+#define IWM_SDIO_BLK_SIZE 256
+
+#define iwm_to_if_sdio(i) (struct iwm_sdio_priv *)(iwm->private)
+
+struct iwm_sdio_priv {
+ struct sdio_func *func;
+ struct iwm_priv *iwm;
+
+ struct workqueue_struct *isr_wq;
+ struct work_struct isr_worker;
+
+ struct dentry *cccr_dentry;
+
+ unsigned int blk_size;
+};
+
+#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/tx.c b/drivers/net/wireless/iwmc3200wifi/tx.c
new file mode 100644
index 00000000000..e3b4f7902da
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/tx.c
@@ -0,0 +1,492 @@
+/*
+ * Intel Wireless Multicomm 3200 WiFi driver
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <ilw@linux.intel.com>
+ * Samuel Ortiz <samuel.ortiz@intel.com>
+ * Zhu Yi <yi.zhu@intel.com>
+ *
+ */
+
+/*
+ * iwm Tx theory of operation:
+ *
+ * 1) We receive a 802.3 frame from the stack
+ * 2) We convert it to a 802.11 frame [iwm_xmit_frame]
+ * 3) We queue it to its corresponding tx queue [iwm_xmit_frame]
+ * 4) We schedule the tx worker. There is one worker per tx
+ * queue. [iwm_xmit_frame]
+ * 5) The tx worker is scheduled
+ * 6) We go through every queued skb on the tx queue, and for each
+ * and every one of them: [iwm_tx_worker]
+ * a) We check if we have enough Tx credits (see below for a Tx
+ * credits description) for the frame length. [iwm_tx_worker]
+ * b) If we do, we aggregate the Tx frame into a UDMA one, by
+ * concatenating one REPLY_TX command per Tx frame. [iwm_tx_worker]
+ * c) When we run out of credits, or when we reach the maximum
+ * concatenation size, we actually send the concatenated UDMA
+ * frame. [iwm_tx_worker]
+ *
+ * When we run out of Tx credits, the skbs are filling the tx queue,
+ * and eventually we will stop the netdev queue. [iwm_tx_worker]
+ * The tx queue is emptied as we're getting new tx credits, by
+ * scheduling the tx_worker. [iwm_tx_credit_inc]
+ * The netdev queue is started again when we have enough tx credits,
+ * and when our tx queue has some reasonable amout of space available
+ * (i.e. half of the max size). [iwm_tx_worker]
+ */
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/ieee80211.h>
+
+#include "iwm.h"
+#include "debug.h"
+#include "commands.h"
+#include "hal.h"
+#include "umac.h"
+#include "bus.h"
+
+#define IWM_UMAC_PAGE_ALLOC_WRAP 0xffff
+
+#define BYTES_TO_PAGES(n) (1 + ((n) >> ilog2(IWM_UMAC_PAGE_SIZE)) - \
+ (((n) & (IWM_UMAC_PAGE_SIZE - 1)) == 0))
+
+#define pool_id_to_queue(id) ((id < IWM_TX_CMD_QUEUE) ? id : id - 1)
+#define queue_to_pool_id(q) ((q < IWM_TX_CMD_QUEUE) ? q : q + 1)
+
+/* require to hold tx_credit lock */
+static int iwm_tx_credit_get(struct iwm_tx_credit *tx_credit, int id)
+{
+ struct pool_entry *pool = &tx_credit->pools[id];
+ struct spool_entry *spool = &tx_credit->spools[pool->sid];
+ int spool_pages;
+
+ /* number of pages can be taken from spool by this pool */
+ spool_pages = spool->max_pages - spool->alloc_pages +
+ max(pool->min_pages - pool->alloc_pages, 0);
+
+ return min(pool->max_pages - pool->alloc_pages, spool_pages);
+}
+
+static bool iwm_tx_credit_ok(struct iwm_priv *iwm, int id, int nb)
+{
+ u32 npages = BYTES_TO_PAGES(nb);
+
+ if (npages <= iwm_tx_credit_get(&iwm->tx_credit, id))
+ return 1;
+
+ set_bit(id, &iwm->tx_credit.full_pools_map);
+
+ IWM_DBG_TX(iwm, DBG, "LINK: stop txq[%d], available credit: %d\n",
+ pool_id_to_queue(id),
+ iwm_tx_credit_get(&iwm->tx_credit, id));
+
+ return 0;
+}
+
+void iwm_tx_credit_inc(struct iwm_priv *iwm, int id, int total_freed_pages)
+{
+ struct pool_entry *pool;
+ struct spool_entry *spool;
+ int freed_pages;
+ int queue;
+
+ BUG_ON(id >= IWM_MACS_OUT_GROUPS);
+
+ pool = &iwm->tx_credit.pools[id];
+ spool = &iwm->tx_credit.spools[pool->sid];
+
+ freed_pages = total_freed_pages - pool->total_freed_pages;
+ IWM_DBG_TX(iwm, DBG, "Free %d pages for pool[%d]\n", freed_pages, id);
+
+ if (!freed_pages) {
+ IWM_DBG_TX(iwm, DBG, "No pages are freed by UMAC\n");
+ return;
+ } else if (freed_pages < 0)
+ freed_pages += IWM_UMAC_PAGE_ALLOC_WRAP + 1;
+
+ if (pool->alloc_pages > pool->min_pages) {
+ int spool_pages = pool->alloc_pages - pool->min_pages;
+ spool_pages = min(spool_pages, freed_pages);
+ spool->alloc_pages -= spool_pages;
+ }
+
+ pool->alloc_pages -= freed_pages;
+ pool->total_freed_pages = total_freed_pages;
+
+ IWM_DBG_TX(iwm, DBG, "Pool[%d] pages alloc: %d, total_freed: %d, "
+ "Spool[%d] pages alloc: %d\n", id, pool->alloc_pages,
+ pool->total_freed_pages, pool->sid, spool->alloc_pages);
+
+ if (test_bit(id, &iwm->tx_credit.full_pools_map) &&
+ (pool->alloc_pages < pool->max_pages / 2)) {
+ clear_bit(id, &iwm->tx_credit.full_pools_map);
+
+ queue = pool_id_to_queue(id);
+
+ IWM_DBG_TX(iwm, DBG, "LINK: start txq[%d], available "
+ "credit: %d\n", queue,
+ iwm_tx_credit_get(&iwm->tx_credit, id));
+ queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker);
+ }
+}
+
+static void iwm_tx_credit_dec(struct iwm_priv *iwm, int id, int alloc_pages)
+{
+ struct pool_entry *pool;
+ struct spool_entry *spool;
+ int spool_pages;
+
+ IWM_DBG_TX(iwm, DBG, "Allocate %d pages for pool[%d]\n",
+ alloc_pages, id);
+
+ BUG_ON(id >= IWM_MACS_OUT_GROUPS);
+
+ pool = &iwm->tx_credit.pools[id];
+ spool = &iwm->tx_credit.spools[pool->sid];
+
+ spool_pages = pool->alloc_pages + alloc_pages - pool->min_pages;
+
+ if (pool->alloc_pages >= pool->min_pages)
+ spool->alloc_pages += alloc_pages;
+ else if (spool_pages > 0)
+ spool->alloc_pages += spool_pages;
+
+ pool->alloc_pages += alloc_pages;
+
+ IWM_DBG_TX(iwm, DBG, "Pool[%d] pages alloc: %d, total_freed: %d, "
+ "Spool[%d] pages alloc: %d\n", id, pool->alloc_pages,
+ pool->total_freed_pages, pool->sid, spool->alloc_pages);
+}
+
+int iwm_tx_credit_alloc(struct iwm_priv *iwm, int id, int nb)
+{
+ u32 npages = BYTES_TO_PAGES(nb);
+ int ret = 0;
+
+ spin_lock(&iwm->tx_credit.lock);
+
+ if (!iwm_tx_credit_ok(iwm, id, nb)) {
+ IWM_DBG_TX(iwm, DBG, "No credit avaliable for pool[%d]\n", id);
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ iwm_tx_credit_dec(iwm, id, npages);
+
+ out:
+ spin_unlock(&iwm->tx_credit.lock);
+ return ret;
+}
+
+/*
+ * Since we're on an SDIO or USB bus, we are not sharing memory
+ * for storing to be transmitted frames. The host needs to push
+ * them upstream. As a consequence there needs to be a way for
+ * the target to let us know if it can actually take more TX frames
+ * or not. This is what Tx credits are for.
+ *
+ * For each Tx HW queue, we have a Tx pool, and then we have one
+ * unique super pool (spool), which is actually a global pool of
+ * all the UMAC pages.
+ * For each Tx pool we have a min_pages, a max_pages fields, and a
+ * alloc_pages fields. The alloc_pages tracks the number of pages
+ * currently allocated from the tx pool.
+ * Here are the rules to check if given a tx frame we have enough
+ * tx credits for it:
+ * 1) We translate the frame length into a number of UMAC pages.
+ * Let's call them n_pages.
+ * 2) For the corresponding tx pool, we check if n_pages +
+ * pool->alloc_pages is higher than pool->min_pages. min_pages
+ * represent a set of pre-allocated pages on the tx pool. If
+ * that's the case, then we need to allocate those pages from
+ * the spool. We can do so until we reach spool->max_pages.
+ * 3) Each tx pool is not allowed to allocate more than pool->max_pages
+ * from the spool, so once we're over min_pages, we can allocate
+ * pages from the spool, but not more than max_pages.
+ *
+ * When the tx code path needs to send a tx frame, it checks first
+ * if it has enough tx credits, following those rules. [iwm_tx_credit_get]
+ * If it does, it then updates the pool and spool counters and
+ * then send the frame. [iwm_tx_credit_alloc and iwm_tx_credit_dec]
+ * On the other side, when the UMAC is done transmitting frames, it
+ * will send a credit update notification to the host. This is when
+ * the pool and spool counters gets to be decreased. [iwm_tx_credit_inc,
+ * called from rx.c:iwm_ntf_tx_credit_update]
+ *
+ */
+void iwm_tx_credit_init_pools(struct iwm_priv *iwm,
+ struct iwm_umac_notif_alive *alive)
+{
+ int i, sid, pool_pages;
+
+ spin_lock(&iwm->tx_credit.lock);
+
+ iwm->tx_credit.pool_nr = le16_to_cpu(alive->page_grp_count);
+ iwm->tx_credit.full_pools_map = 0;
+ memset(&iwm->tx_credit.spools[0], 0, sizeof(struct spool_entry));
+
+ IWM_DBG_TX(iwm, DBG, "Pools number is %d\n", iwm->tx_credit.pool_nr);
+
+ for (i = 0; i < iwm->tx_credit.pool_nr; i++) {
+ __le32 page_grp_state = alive->page_grp_state[i];
+
+ iwm->tx_credit.pools[i].id = GET_VAL32(page_grp_state,
+ UMAC_ALIVE_PAGE_STS_GRP_NUM);
+ iwm->tx_credit.pools[i].sid = GET_VAL32(page_grp_state,
+ UMAC_ALIVE_PAGE_STS_SGRP_NUM);
+ iwm->tx_credit.pools[i].min_pages = GET_VAL32(page_grp_state,
+ UMAC_ALIVE_PAGE_STS_GRP_MIN_SIZE);
+ iwm->tx_credit.pools[i].max_pages = GET_VAL32(page_grp_state,
+ UMAC_ALIVE_PAGE_STS_GRP_MAX_SIZE);
+ iwm->tx_credit.pools[i].alloc_pages = 0;
+ iwm->tx_credit.pools[i].total_freed_pages = 0;
+
+ sid = iwm->tx_credit.pools[i].sid;
+ pool_pages = iwm->tx_credit.pools[i].min_pages;
+
+ if (iwm->tx_credit.spools[sid].max_pages == 0) {
+ iwm->tx_credit.spools[sid].id = sid;
+ iwm->tx_credit.spools[sid].max_pages =
+ GET_VAL32(page_grp_state,
+ UMAC_ALIVE_PAGE_STS_SGRP_MAX_SIZE);
+ iwm->tx_credit.spools[sid].alloc_pages = 0;
+ }
+
+ iwm->tx_credit.spools[sid].alloc_pages += pool_pages;
+
+ IWM_DBG_TX(iwm, DBG, "Pool idx: %d, id: %d, sid: %d, capacity "
+ "min: %d, max: %d, pool alloc: %d, total_free: %d, "
+ "super poll alloc: %d\n",
+ i, iwm->tx_credit.pools[i].id,
+ iwm->tx_credit.pools[i].sid,
+ iwm->tx_credit.pools[i].min_pages,
+ iwm->tx_credit.pools[i].max_pages,
+ iwm->tx_credit.pools[i].alloc_pages,
+ iwm->tx_credit.pools[i].total_freed_pages,
+ iwm->tx_credit.spools[sid].alloc_pages);
+ }
+
+ spin_unlock(&iwm->tx_credit.lock);
+}
+
+#define IWM_UDMA_HDR_LEN sizeof(struct iwm_umac_wifi_out_hdr)
+
+static int iwm_tx_build_packet(struct iwm_priv *iwm, struct sk_buff *skb,
+ int pool_id, u8 *buf)
+{
+ struct iwm_umac_wifi_out_hdr *hdr = (struct iwm_umac_wifi_out_hdr *)buf;
+ struct iwm_udma_wifi_cmd udma_cmd;
+ struct iwm_umac_cmd umac_cmd;
+ struct iwm_tx_info *tx_info = skb_to_tx_info(skb);
+
+ udma_cmd.count = cpu_to_le16(skb->len +
+ sizeof(struct iwm_umac_fw_cmd_hdr));
+ /* set EOP to 0 here. iwm_udma_wifi_hdr_set_eop() will be
+ * called later to set EOP for the last packet. */
+ udma_cmd.eop = 0;
+ udma_cmd.credit_group = pool_id;
+ udma_cmd.ra_tid = tx_info->sta << 4 | tx_info->tid;
+ udma_cmd.lmac_offset = 0;
+
+ umac_cmd.id = REPLY_TX;
+ umac_cmd.count = cpu_to_le16(skb->len);
+ umac_cmd.color = tx_info->color;
+ umac_cmd.resp = 0;
+ umac_cmd.seq_num = cpu_to_le16(iwm_alloc_wifi_cmd_seq(iwm));
+
+ iwm_build_udma_wifi_hdr(iwm, &hdr->hw_hdr, &udma_cmd);
+ iwm_build_umac_hdr(iwm, &hdr->sw_hdr, &umac_cmd);
+
+ memcpy(buf + sizeof(*hdr), skb->data, skb->len);
+
+ return 0;
+}
+
+static int iwm_tx_send_concat_packets(struct iwm_priv *iwm,
+ struct iwm_tx_queue *txq)
+{
+ int ret;
+
+ if (!txq->concat_count)
+ return 0;
+
+ IWM_DBG_TX(iwm, DBG, "Send concatenated Tx: queue %d, %d bytes\n",
+ txq->id, txq->concat_count);
+
+ /* mark EOP for the last packet */
+ iwm_udma_wifi_hdr_set_eop(iwm, txq->concat_ptr, 1);
+
+ ret = iwm_bus_send_chunk(iwm, txq->concat_buf, txq->concat_count);
+
+ txq->concat_count = 0;
+ txq->concat_ptr = txq->concat_buf;
+
+ return ret;
+}
+
+#define CONFIG_IWM_TX_CONCATENATED 1
+
+void iwm_tx_worker(struct work_struct *work)
+{
+ struct iwm_priv *iwm;
+ struct iwm_tx_info *tx_info = NULL;
+ struct sk_buff *skb;
+ int cmdlen, ret;
+ struct iwm_tx_queue *txq;
+ int pool_id;
+
+ txq = container_of(work, struct iwm_tx_queue, worker);
+ iwm = container_of(txq, struct iwm_priv, txq[txq->id]);
+
+ pool_id = queue_to_pool_id(txq->id);
+
+ while (!test_bit(pool_id, &iwm->tx_credit.full_pools_map) &&
+ !skb_queue_empty(&txq->queue)) {
+
+ skb = skb_dequeue(&txq->queue);
+ tx_info = skb_to_tx_info(skb);
+ cmdlen = IWM_UDMA_HDR_LEN + skb->len;
+
+ IWM_DBG_TX(iwm, DBG, "Tx frame on queue %d: skb: 0x%p, sta: "
+ "%d, color: %d\n", txq->id, skb, tx_info->sta,
+ tx_info->color);
+
+#if !CONFIG_IWM_TX_CONCATENATED
+ /* temporarily keep this to comparing the performance */
+ ret = iwm_send_packet(iwm, skb, pool_id);
+#else
+
+ if (txq->concat_count + cmdlen > IWM_HAL_CONCATENATE_BUF_SIZE)
+ iwm_tx_send_concat_packets(iwm, txq);
+
+ ret = iwm_tx_credit_alloc(iwm, pool_id, cmdlen);
+ if (ret) {
+ IWM_DBG_TX(iwm, DBG, "not enough tx_credit for queue "
+ "%d, Tx worker stopped\n", txq->id);
+ skb_queue_head(&txq->queue, skb);
+ break;
+ }
+
+ txq->concat_ptr = txq->concat_buf + txq->concat_count;
+ iwm_tx_build_packet(iwm, skb, pool_id, txq->concat_ptr);
+ txq->concat_count += ALIGN(cmdlen, 16);
+#endif
+ kfree_skb(skb);
+ }
+
+ iwm_tx_send_concat_packets(iwm, txq);
+
+ if (__netif_subqueue_stopped(iwm_to_ndev(iwm), txq->id) &&
+ !test_bit(pool_id, &iwm->tx_credit.full_pools_map) &&
+ (skb_queue_len(&txq->queue) < IWM_TX_LIST_SIZE / 2)) {
+ IWM_DBG_TX(iwm, DBG, "LINK: start netif_subqueue[%d]", txq->id);
+ netif_wake_subqueue(iwm_to_ndev(iwm), txq->id);
+ }
+}
+
+int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct iwm_priv *iwm = ndev_to_iwm(netdev);
+ struct net_device *ndev = iwm_to_ndev(iwm);
+ struct wireless_dev *wdev = iwm_to_wdev(iwm);
+ u8 *dst_addr;
+ struct iwm_tx_info *tx_info;
+ struct iwm_tx_queue *txq;
+ struct iwm_sta_info *sta_info;
+ u8 sta_id;
+ u16 queue;
+ int ret;
+
+ if (!test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)) {
+ IWM_DBG_TX(iwm, DBG, "LINK: stop netif_all_queues: "
+ "not associated\n");
+ netif_tx_stop_all_queues(netdev);
+ goto drop;
+ }
+
+ queue = skb_get_queue_mapping(skb);
+ BUG_ON(queue >= IWM_TX_DATA_QUEUES); /* no iPAN yet */
+
+ txq = &iwm->txq[queue];
+
+ /* No free space for Tx, tx_worker is too slow */
+ if (skb_queue_len(&txq->queue) > IWM_TX_LIST_SIZE) {
+ IWM_DBG_TX(iwm, DBG, "LINK: stop netif_subqueue[%d]\n", queue);
+ netif_stop_subqueue(netdev, queue);
+ return NETDEV_TX_BUSY;
+ }
+
+ ret = ieee80211_data_from_8023(skb, netdev->dev_addr, wdev->iftype,
+ iwm->bssid, 0);
+ if (ret) {
+ IWM_ERR(iwm, "build wifi header failed\n");
+ goto drop;
+ }
+
+ dst_addr = ((struct ieee80211_hdr *)(skb->data))->addr1;
+
+ for (sta_id = 0; sta_id < IWM_STA_TABLE_NUM; sta_id++) {
+ sta_info = &iwm->sta_table[sta_id];
+ if (sta_info->valid &&
+ !memcmp(dst_addr, sta_info->addr, ETH_ALEN))
+ break;
+ }
+
+ if (sta_id == IWM_STA_TABLE_NUM) {
+ IWM_ERR(iwm, "STA %pM not found in sta_table, Tx ignored\n",
+ dst_addr);
+ goto drop;
+ }
+
+ tx_info = skb_to_tx_info(skb);
+ tx_info->sta = sta_id;
+ tx_info->color = sta_info->color;
+ /* UMAC uses TID 8 (vs. 0) for non QoS packets */
+ if (sta_info->qos)
+ tx_info->tid = skb->priority;
+ else
+ tx_info->tid = IWM_UMAC_MGMT_TID;
+
+ skb_queue_tail(&iwm->txq[queue].queue, skb);
+
+ queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker);
+
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+ return NETDEV_TX_OK;
+
+ drop:
+ ndev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
diff --git a/drivers/net/wireless/iwmc3200wifi/umac.h b/drivers/net/wireless/iwmc3200wifi/umac.h
new file mode 100644
index 00000000000..4a95cce1f0a
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/umac.h
@@ -0,0 +1,744 @@
+/*
+ * Intel Wireless Multicomm 3200 WiFi driver
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <ilw@linux.intel.com>
+ * Samuel Ortiz <samuel.ortiz@intel.com>
+ * Zhu Yi <yi.zhu@intel.com>
+ *
+ */
+
+#ifndef __IWM_UMAC_H__
+#define __IWM_UMAC_H__
+
+struct iwm_udma_in_hdr {
+ __le32 cmd;
+ __le32 size;
+} __attribute__ ((packed));
+
+struct iwm_udma_out_nonwifi_hdr {
+ __le32 cmd;
+ __le32 addr;
+ __le32 op1_sz;
+ __le32 op2;
+} __attribute__ ((packed));
+
+struct iwm_udma_out_wifi_hdr {
+ __le32 cmd;
+ __le32 meta_data;
+} __attribute__ ((packed));
+
+/* Sequence numbering */
+#define UMAC_WIFI_SEQ_NUM_BASE 1
+#define UMAC_WIFI_SEQ_NUM_MAX 0x4000
+#define UMAC_NONWIFI_SEQ_NUM_BASE 1
+#define UMAC_NONWIFI_SEQ_NUM_MAX 0x10
+
+/* MAC address address */
+#define WICO_MAC_ADDRESS_ADDR 0x604008F8
+
+/* RA / TID */
+#define UMAC_HDI_ACT_TBL_IDX_TID_POS 0
+#define UMAC_HDI_ACT_TBL_IDX_TID_SEED 0xF
+
+#define UMAC_HDI_ACT_TBL_IDX_RA_POS 4
+#define UMAC_HDI_ACT_TBL_IDX_RA_SEED 0xF
+
+#define UMAC_HDI_ACT_TBL_IDX_RA_UMAC 0xF
+#define UMAC_HDI_ACT_TBL_IDX_TID_UMAC 0x9
+#define UMAC_HDI_ACT_TBL_IDX_TID_LMAC 0xA
+
+#define UMAC_HDI_ACT_TBL_IDX_HOST_CMD \
+ ((UMAC_HDI_ACT_TBL_IDX_RA_UMAC << UMAC_HDI_ACT_TBL_IDX_RA_POS) |\
+ (UMAC_HDI_ACT_TBL_IDX_TID_UMAC << UMAC_HDI_ACT_TBL_IDX_TID_POS))
+#define UMAC_HDI_ACT_TBL_IDX_UMAC_CMD \
+ ((UMAC_HDI_ACT_TBL_IDX_RA_UMAC << UMAC_HDI_ACT_TBL_IDX_RA_POS) |\
+ (UMAC_HDI_ACT_TBL_IDX_TID_LMAC << UMAC_HDI_ACT_TBL_IDX_TID_POS))
+
+/* iwm_umac_notif_alive.page_grp_state Group number -- bits [3:0] */
+#define UMAC_ALIVE_PAGE_STS_GRP_NUM_POS 0
+#define UMAC_ALIVE_PAGE_STS_GRP_NUM_SEED 0xF
+
+/* iwm_umac_notif_alive.page_grp_state Super group number -- bits [7:4] */
+#define UMAC_ALIVE_PAGE_STS_SGRP_NUM_POS 4
+#define UMAC_ALIVE_PAGE_STS_SGRP_NUM_SEED 0xF
+
+/* iwm_umac_notif_alive.page_grp_state Group min size -- bits [15:8] */
+#define UMAC_ALIVE_PAGE_STS_GRP_MIN_SIZE_POS 8
+#define UMAC_ALIVE_PAGE_STS_GRP_MIN_SIZE_SEED 0xFF
+
+/* iwm_umac_notif_alive.page_grp_state Group max size -- bits [23:16] */
+#define UMAC_ALIVE_PAGE_STS_GRP_MAX_SIZE_POS 16
+#define UMAC_ALIVE_PAGE_STS_GRP_MAX_SIZE_SEED 0xFF
+
+/* iwm_umac_notif_alive.page_grp_state Super group max size -- bits [31:24] */
+#define UMAC_ALIVE_PAGE_STS_SGRP_MAX_SIZE_POS 24
+#define UMAC_ALIVE_PAGE_STS_SGRP_MAX_SIZE_SEED 0xFF
+
+/* Barkers */
+#define UMAC_REBOOT_BARKER 0xdeadbeef
+#define UMAC_ACK_BARKER 0xfeedbabe
+#define UMAC_PAD_TERMINAL 0xadadadad
+
+/* UMAC JMP address */
+#define UMAC_MU_FW_INST_DATA_12_ADDR 0xBF0000
+
+/* iwm_umac_hdi_out_hdr.cmd OP code -- bits [3:0] */
+#define UMAC_HDI_OUT_CMD_OPCODE_POS 0
+#define UMAC_HDI_OUT_CMD_OPCODE_SEED 0xF
+
+/* iwm_umac_hdi_out_hdr.cmd End-Of-Transfer -- bits [10:10] */
+#define UMAC_HDI_OUT_CMD_EOT_POS 10
+#define UMAC_HDI_OUT_CMD_EOT_SEED 0x1
+
+/* iwm_umac_hdi_out_hdr.cmd UTFD only usage -- bits [11:11] */
+#define UMAC_HDI_OUT_CMD_UTFD_ONLY_POS 11
+#define UMAC_HDI_OUT_CMD_UTFD_ONLY_SEED 0x1
+
+/* iwm_umac_hdi_out_hdr.cmd Non-WiFi HW sequence number -- bits [12:15] */
+#define UDMA_HDI_OUT_CMD_NON_WIFI_HW_SEQ_NUM_POS 12
+#define UDMA_HDI_OUT_CMD_NON_WIFI_HW_SEQ_NUM_SEED 0xF
+
+/* iwm_umac_hdi_out_hdr.cmd Signature -- bits [31:16] */
+#define UMAC_HDI_OUT_CMD_SIGNATURE_POS 16
+#define UMAC_HDI_OUT_CMD_SIGNATURE_SEED 0xFFFF
+
+/* iwm_umac_hdi_out_hdr.meta_data Byte count -- bits [11:0] */
+#define UMAC_HDI_OUT_BYTE_COUNT_POS 0
+#define UMAC_HDI_OUT_BYTE_COUNT_SEED 0xFFF
+
+/* iwm_umac_hdi_out_hdr.meta_data Credit group -- bits [15:12] */
+#define UMAC_HDI_OUT_CREDIT_GRP_POS 12
+#define UMAC_HDI_OUT_CREDIT_GRP_SEED 0xF
+
+/* iwm_umac_hdi_out_hdr.meta_data RA/TID -- bits [23:16] */
+#define UMAC_HDI_OUT_RATID_POS 16
+#define UMAC_HDI_OUT_RATID_SEED 0xFF
+
+/* iwm_umac_hdi_out_hdr.meta_data LMAC offset -- bits [31:24] */
+#define UMAC_HDI_OUT_LMAC_OFFSET_POS 24
+#define UMAC_HDI_OUT_LMAC_OFFSET_SEED 0xFF
+
+/* Signature */
+#define UMAC_HDI_OUT_SIGNATURE 0xCBBC
+
+/* buffer alignment */
+#define UMAC_HDI_BUF_ALIGN_MSK 0xF
+
+/* iwm_umac_hdi_in_hdr.cmd OP code -- bits [3:0] */
+#define UMAC_HDI_IN_CMD_OPCODE_POS 0
+#define UMAC_HDI_IN_CMD_OPCODE_SEED 0xF
+
+/* iwm_umac_hdi_in_hdr.cmd Non-WiFi API response -- bits [6:4] */
+#define UMAC_HDI_IN_CMD_NON_WIFI_RESP_POS 4
+#define UMAC_HDI_IN_CMD_NON_WIFI_RESP_SEED 0x7
+
+/* iwm_umac_hdi_in_hdr.cmd WiFi API source -- bits [5:4] */
+#define UMAC_HDI_IN_CMD_SOURCE_POS 4
+#define UMAC_HDI_IN_CMD_SOURCE_SEED 0x3
+
+/* iwm_umac_hdi_in_hdr.cmd WiFi API EOT -- bits [6:6] */
+#define UMAC_HDI_IN_CMD_EOT_POS 6
+#define UMAC_HDI_IN_CMD_EOT_SEED 0x1
+
+/* iwm_umac_hdi_in_hdr.cmd timestamp present -- bits [7:7] */
+#define UMAC_HDI_IN_CMD_TIME_STAMP_PRESENT_POS 7
+#define UMAC_HDI_IN_CMD_TIME_STAMP_PRESENT_SEED 0x1
+
+/* iwm_umac_hdi_in_hdr.cmd WiFi Non-last AMSDU -- bits [8:8] */
+#define UMAC_HDI_IN_CMD_NON_LAST_AMSDU_POS 8
+#define UMAC_HDI_IN_CMD_NON_LAST_AMSDU_SEED 0x1
+
+/* iwm_umac_hdi_in_hdr.cmd WiFi HW sequence number -- bits [31:9] */
+#define UMAC_HDI_IN_CMD_HW_SEQ_NUM_POS 9
+#define UMAC_HDI_IN_CMD_HW_SEQ_NUM_SEED 0x7FFFFF
+
+/* iwm_umac_hdi_in_hdr.cmd Non-WiFi HW sequence number -- bits [12:15] */
+#define UDMA_HDI_IN_CMD_NON_WIFI_HW_SEQ_NUM_POS 12
+#define UDMA_HDI_IN_CMD_NON_WIFI_HW_SEQ_NUM_SEED 0xF
+
+/* iwm_umac_hdi_in_hdr.cmd Non-WiFi HW signature -- bits [16:31] */
+#define UDMA_HDI_IN_CMD_NON_WIFI_HW_SIG_POS 16
+#define UDMA_HDI_IN_CMD_NON_WIFI_HW_SIG_SEED 0xFFFF
+
+/* Fixed Non-WiFi signature */
+#define UDMA_HDI_IN_CMD_NON_WIFI_HW_SIG 0xCBBC
+
+/* IN NTFY op-codes */
+#define UMAC_NOTIFY_OPCODE_ALIVE 0xA1
+#define UMAC_NOTIFY_OPCODE_INIT_COMPLETE 0xA2
+#define UMAC_NOTIFY_OPCODE_WIFI_CORE_STATUS 0xA3
+#define UMAC_NOTIFY_OPCODE_ERROR 0xA4
+#define UMAC_NOTIFY_OPCODE_DEBUG 0xA5
+#define UMAC_NOTIFY_OPCODE_WIFI_IF_WRAPPER 0xB0
+#define UMAC_NOTIFY_OPCODE_STATS 0xB1
+#define UMAC_NOTIFY_OPCODE_PAGE_DEALLOC 0xB3
+#define UMAC_NOTIFY_OPCODE_RX_TICKET 0xB4
+#define UMAC_NOTIFY_OPCODE_MAX (UMAC_NOTIFY_OPCODE_RX_TICKET -\
+ UMAC_NOTIFY_OPCODE_ALIVE + 1)
+#define UMAC_NOTIFY_OPCODE_FIRST (UMAC_NOTIFY_OPCODE_ALIVE)
+
+/* HDI OUT OP CODE */
+#define UMAC_HDI_OUT_OPCODE_PING 0x0
+#define UMAC_HDI_OUT_OPCODE_READ 0x1
+#define UMAC_HDI_OUT_OPCODE_WRITE 0x2
+#define UMAC_HDI_OUT_OPCODE_JUMP 0x3
+#define UMAC_HDI_OUT_OPCODE_REBOOT 0x4
+#define UMAC_HDI_OUT_OPCODE_WRITE_PERSISTENT 0x5
+#define UMAC_HDI_OUT_OPCODE_READ_PERSISTENT 0x6
+#define UMAC_HDI_OUT_OPCODE_READ_MODIFY_WRITE 0x7
+/* #define UMAC_HDI_OUT_OPCODE_RESERVED 0x8..0xA */
+#define UMAC_HDI_OUT_OPCODE_WRITE_AUX_REG 0xB
+#define UMAC_HDI_OUT_OPCODE_WIFI 0xF
+
+/* HDI IN OP CODE -- Non WiFi*/
+#define UMAC_HDI_IN_OPCODE_PING 0x0
+#define UMAC_HDI_IN_OPCODE_READ 0x1
+#define UMAC_HDI_IN_OPCODE_WRITE 0x2
+#define UMAC_HDI_IN_OPCODE_WRITE_PERSISTENT 0x5
+#define UMAC_HDI_IN_OPCODE_READ_PERSISTENT 0x6
+#define UMAC_HDI_IN_OPCODE_READ_MODIFY_WRITE 0x7
+#define UMAC_HDI_IN_OPCODE_EP_MGMT 0x8
+#define UMAC_HDI_IN_OPCODE_CREDIT_CHANGE 0x9
+#define UMAC_HDI_IN_OPCODE_CTRL_DATABASE 0xA
+#define UMAC_HDI_IN_OPCODE_WRITE_AUX_REG 0xB
+#define UMAC_HDI_IN_OPCODE_NONWIFI_MAX \
+ (UMAC_HDI_IN_OPCODE_WRITE_AUX_REG + 1)
+#define UMAC_HDI_IN_OPCODE_WIFI 0xF
+
+/* HDI IN SOURCE */
+#define UMAC_HDI_IN_SOURCE_FHRX 0x0
+#define UMAC_HDI_IN_SOURCE_UDMA 0x1
+#define UMAC_HDI_IN_SOURCE_FW 0x2
+#define UMAC_HDI_IN_SOURCE_RESERVED 0x3
+
+/* OUT CMD op-codes */
+#define UMAC_CMD_OPCODE_ECHO 0x01
+#define UMAC_CMD_OPCODE_HALT 0x02
+#define UMAC_CMD_OPCODE_RESET 0x03
+#define UMAC_CMD_OPCODE_BULK_EP_INACT_TIMEOUT 0x09
+#define UMAC_CMD_OPCODE_URB_CANCEL_ACK 0x0A
+#define UMAC_CMD_OPCODE_DCACHE_FLUSH 0x0B
+#define UMAC_CMD_OPCODE_EEPROM_PROXY 0x0C
+#define UMAC_CMD_OPCODE_TX_ECHO 0x0D
+#define UMAC_CMD_OPCODE_DBG_MON 0x0E
+#define UMAC_CMD_OPCODE_INTERNAL_TX 0x0F
+#define UMAC_CMD_OPCODE_SET_PARAM_FIX 0x10
+#define UMAC_CMD_OPCODE_SET_PARAM_VAR 0x11
+#define UMAC_CMD_OPCODE_GET_PARAM 0x12
+#define UMAC_CMD_OPCODE_DBG_EVENT_WRAPPER 0x13
+#define UMAC_CMD_OPCODE_TARGET 0x14
+#define UMAC_CMD_OPCODE_STATISTIC_REQUEST 0x15
+#define UMAC_CMD_OPCODE_GET_CHAN_INFO_LIST 0x16
+#define UMAC_CMD_OPCODE_SET_PARAM_LIST 0x17
+#define UMAC_CMD_OPCODE_GET_PARAM_LIST 0x18
+#define UMAC_CMD_OPCODE_BASE_WRAPPER 0xFA
+#define UMAC_CMD_OPCODE_LMAC_WRAPPER 0xFB
+#define UMAC_CMD_OPCODE_HW_TEST_WRAPPER 0xFC
+#define UMAC_CMD_OPCODE_WIFI_IF_WRAPPER 0xFD
+#define UMAC_CMD_OPCODE_WIFI_WRAPPER 0xFE
+#define UMAC_CMD_OPCODE_WIFI_PASS_THROUGH 0xFF
+
+/* UMAC WiFi interface op-codes */
+#define UMAC_WIFI_IF_CMD_SET_PROFILE 0x11
+#define UMAC_WIFI_IF_CMD_INVALIDATE_PROFILE 0x12
+#define UMAC_WIFI_IF_CMD_SET_EXCLUDE_LIST 0x13
+#define UMAC_WIFI_IF_CMD_SCAN_REQUEST 0x14
+#define UMAC_WIFI_IF_CMD_SCAN_CONFIG 0x15
+#define UMAC_WIFI_IF_CMD_ADD_WEP40_KEY 0x16
+#define UMAC_WIFI_IF_CMD_ADD_WEP104_KEY 0x17
+#define UMAC_WIFI_IF_CMD_ADD_TKIP_KEY 0x18
+#define UMAC_WIFI_IF_CMD_ADD_CCMP_KEY 0x19
+#define UMAC_WIFI_IF_CMD_REMOVE_KEY 0x1A
+#define UMAC_WIFI_IF_CMD_GLOBAL_TX_KEY_ID 0x1B
+#define UMAC_WIFI_IF_CMD_SET_HOST_EXTENDED_IE 0x1C
+#define UMAC_WIFI_IF_CMD_GET_SUPPORTED_CHANNELS 0x1E
+#define UMAC_WIFI_IF_CMD_TX_PWR_TRIGGER 0x20
+
+/* UMAC WiFi interface ports */
+#define UMAC_WIFI_IF_FLG_PORT_DEF 0x00
+#define UMAC_WIFI_IF_FLG_PORT_PAN 0x01
+#define UMAC_WIFI_IF_FLG_PORT_PAN_INVALID WIFI_IF_FLG_PORT_DEF
+
+/* UMAC WiFi interface actions */
+#define UMAC_WIFI_IF_FLG_ACT_GET 0x10
+#define UMAC_WIFI_IF_FLG_ACT_SET 0x20
+
+/* iwm_umac_fw_cmd_hdr.meta_data byte count -- bits [11:0] */
+#define UMAC_FW_CMD_BYTE_COUNT_POS 0
+#define UMAC_FW_CMD_BYTE_COUNT_SEED 0xFFF
+
+/* iwm_umac_fw_cmd_hdr.meta_data status -- bits [15:12] */
+#define UMAC_FW_CMD_STATUS_POS 12
+#define UMAC_FW_CMD_STATUS_SEED 0xF
+
+/* iwm_umac_fw_cmd_hdr.meta_data full TX command by Driver -- bits [16:16] */
+#define UMAC_FW_CMD_TX_DRV_FULL_CMD_POS 16
+#define UMAC_FW_CMD_TX_DRV_FULL_CMD_SEED 0x1
+
+/* iwm_umac_fw_cmd_hdr.meta_data TX command by FW -- bits [17:17] */
+#define UMAC_FW_CMD_TX_FW_CMD_POS 17
+#define UMAC_FW_CMD_TX_FW_CMD_SEED 0x1
+
+/* iwm_umac_fw_cmd_hdr.meta_data TX plaintext mode -- bits [18:18] */
+#define UMAC_FW_CMD_TX_PLAINTEXT_POS 18
+#define UMAC_FW_CMD_TX_PLAINTEXT_SEED 0x1
+
+/* iwm_umac_fw_cmd_hdr.meta_data STA color -- bits [22:20] */
+#define UMAC_FW_CMD_TX_STA_COLOR_POS 20
+#define UMAC_FW_CMD_TX_STA_COLOR_SEED 0x7
+
+/* iwm_umac_fw_cmd_hdr.meta_data TX life time (TU) -- bits [31:24] */
+#define UMAC_FW_CMD_TX_LIFETIME_TU_POS 24
+#define UMAC_FW_CMD_TX_LIFETIME_TU_SEED 0xFF
+
+/* iwm_dev_cmd_hdr.flags Response required -- bits [5:5] */
+#define UMAC_DEV_CMD_FLAGS_RESP_REQ_POS 5
+#define UMAC_DEV_CMD_FLAGS_RESP_REQ_SEED 0x1
+
+/* iwm_dev_cmd_hdr.flags Aborted command -- bits [6:6] */
+#define UMAC_DEV_CMD_FLAGS_ABORT_POS 6
+#define UMAC_DEV_CMD_FLAGS_ABORT_SEED 0x1
+
+/* iwm_dev_cmd_hdr.flags Internal command -- bits [7:7] */
+#define DEV_CMD_FLAGS_FLD_INTERNAL_POS 7
+#define DEV_CMD_FLAGS_FLD_INTERNAL_SEED 0x1
+
+/* Rx */
+/* Rx actions */
+#define IWM_RX_TICKET_DROP 0x0
+#define IWM_RX_TICKET_RELEASE 0x1
+#define IWM_RX_TICKET_SNIFFER 0x2
+#define IWM_RX_TICKET_ENQUEUE 0x3
+
+/* Rx flags */
+#define IWM_RX_TICKET_PAD_SIZE_MSK 0x2
+#define IWM_RX_TICKET_SPECIAL_SNAP_MSK 0x4
+#define IWM_RX_TICKET_AMSDU_MSK 0x8
+#define IWM_RX_TICKET_DROP_REASON_POS 4
+#define IWM_RX_TICKET_DROP_REASON_MSK (0x1F << RX_TICKET_FLAGS_DROP_REASON_POS)
+
+#define IWM_RX_DROP_NO_DROP 0x0
+#define IWM_RX_DROP_BAD_CRC 0x1
+/* L2P no address match */
+#define IWM_RX_DROP_LMAC_ADDR_FILTER 0x2
+/* Multicast address not in list */
+#define IWM_RX_DROP_MCAST_ADDR_FILTER 0x3
+/* Control frames are not sent to the driver */
+#define IWM_RX_DROP_CTL_FRAME 0x4
+/* Our frame is back */
+#define IWM_RX_DROP_OUR_TX 0x5
+/* Association class filtering */
+#define IWM_RX_DROP_CLASS_FILTER 0x6
+/* Duplicated frame */
+#define IWM_RX_DROP_DUPLICATE_FILTER 0x7
+/* Decryption error */
+#define IWM_RX_DROP_SEC_ERR 0x8
+/* Unencrypted frame while encryption is on */
+#define IWM_RX_DROP_SEC_NO_ENCRYPTION 0x9
+/* Replay check failure */
+#define IWM_RX_DROP_SEC_REPLAY_ERR 0xa
+/* uCode and FW key color mismatch, check before replay */
+#define IWM_RX_DROP_SEC_KEY_COLOR_MISMATCH 0xb
+#define IWM_RX_DROP_SEC_TKIP_COUNTER_MEASURE 0xc
+/* No fragmentations Db is found */
+#define IWM_RX_DROP_FRAG_NO_RESOURCE 0xd
+/* Fragmention Db has seqCtl mismatch Vs. non-1st frag */
+#define IWM_RX_DROP_FRAG_ERR 0xe
+#define IWM_RX_DROP_FRAG_LOST 0xf
+#define IWM_RX_DROP_FRAG_COMPLETE 0x10
+/* Should be handled by UMAC */
+#define IWM_RX_DROP_MANAGEMENT 0x11
+/* STA not found by UMAC */
+#define IWM_RX_DROP_NO_STATION 0x12
+/* NULL or QoS NULL */
+#define IWM_RX_DROP_NULL_DATA 0x13
+#define IWM_RX_DROP_BA_REORDER_OLD_SEQCTL 0x14
+#define IWM_RX_DROP_BA_REORDER_DUPLICATE 0x15
+
+struct iwm_rx_ticket {
+ __le16 action;
+ __le16 id;
+ __le16 flags;
+ u8 payload_offset; /* includes: MAC header, pad, IV */
+ u8 tail_len; /* includes: MIC, ICV, CRC (w/o STATUS) */
+} __attribute__ ((packed));
+
+struct iwm_rx_mpdu_hdr {
+ __le16 len;
+ __le16 reserved;
+} __attribute__ ((packed));
+
+/* UMAC SW WIFI API */
+
+struct iwm_dev_cmd_hdr {
+ u8 cmd;
+ u8 flags;
+ __le16 seq_num;
+} __attribute__ ((packed));
+
+struct iwm_umac_fw_cmd_hdr {
+ __le32 meta_data;
+ struct iwm_dev_cmd_hdr cmd;
+} __attribute__ ((packed));
+
+struct iwm_umac_wifi_out_hdr {
+ struct iwm_udma_out_wifi_hdr hw_hdr;
+ struct iwm_umac_fw_cmd_hdr sw_hdr;
+} __attribute__ ((packed));
+
+struct iwm_umac_nonwifi_out_hdr {
+ struct iwm_udma_out_nonwifi_hdr hw_hdr;
+} __attribute__ ((packed));
+
+struct iwm_umac_wifi_in_hdr {
+ struct iwm_udma_in_hdr hw_hdr;
+ struct iwm_umac_fw_cmd_hdr sw_hdr;
+} __attribute__ ((packed));
+
+struct iwm_umac_nonwifi_in_hdr {
+ struct iwm_udma_in_hdr hw_hdr;
+ __le32 time_stamp;
+} __attribute__ ((packed));
+
+#define IWM_UMAC_PAGE_SIZE 0x200
+
+/* Notify structures */
+struct iwm_fw_version {
+ u8 minor;
+ u8 major;
+ __le16 id;
+};
+
+struct iwm_fw_build {
+ u8 type;
+ u8 subtype;
+ u8 platform;
+ u8 opt;
+};
+
+struct iwm_fw_alive_hdr {
+ struct iwm_fw_version ver;
+ struct iwm_fw_build build;
+ __le32 os_build;
+ __le32 log_hdr_addr;
+ __le32 log_buf_addr;
+ __le32 sys_timer_addr;
+};
+
+#define WAIT_NOTIF_TIMEOUT (2 * HZ)
+#define SCAN_COMPLETE_TIMEOUT (3 * HZ)
+
+#define UMAC_NTFY_ALIVE_STATUS_ERR 0xDEAD
+#define UMAC_NTFY_ALIVE_STATUS_OK 0xCAFE
+
+#define UMAC_NTFY_INIT_COMPLETE_STATUS_ERR 0xDEAD
+#define UMAC_NTFY_INIT_COMPLETE_STATUS_OK 0xCAFE
+
+#define UMAC_NTFY_WIFI_CORE_STATUS_LINK_EN 0x40
+#define UMAC_NTFY_WIFI_CORE_STATUS_MLME_EN 0x80
+
+#define IWM_MACS_OUT_GROUPS 6
+#define IWM_MACS_OUT_SGROUPS 1
+
+
+#define WIFI_IF_NTFY_ASSOC_START 0x80
+#define WIFI_IF_NTFY_ASSOC_COMPLETE 0x81
+#define WIFI_IF_NTFY_PROFILE_INVALIDATE_COMPLETE 0x82
+#define WIFI_IF_NTFY_CONNECTION_TERMINATED 0x83
+#define WIFI_IF_NTFY_SCAN_COMPLETE 0x84
+#define WIFI_IF_NTFY_STA_TABLE_CHANGE 0x85
+#define WIFI_IF_NTFY_EXTENDED_IE_REQUIRED 0x86
+#define WIFI_IF_NTFY_RADIO_PREEMPTION 0x87
+#define WIFI_IF_NTFY_BSS_TRK_TABLE_CHANGED 0x88
+#define WIFI_IF_NTFY_BSS_TRK_ENTRIES_REMOVED 0x89
+#define WIFI_IF_NTFY_LINK_QUALITY_STATISTICS 0x8A
+#define WIFI_IF_NTFY_MGMT_FRAME 0x8B
+
+/* DEBUG INDICATIONS */
+#define WIFI_DBG_IF_NTFY_SCAN_SUPER_JOB_START 0xE0
+#define WIFI_DBG_IF_NTFY_SCAN_SUPER_JOB_COMPLETE 0xE1
+#define WIFI_DBG_IF_NTFY_SCAN_CHANNEL_START 0xE2
+#define WIFI_DBG_IF_NTFY_SCAN_CHANNEL_RESULT 0xE3
+#define WIFI_DBG_IF_NTFY_SCAN_MINI_JOB_START 0xE4
+#define WIFI_DBG_IF_NTFY_SCAN_MINI_JOB_COMPLETE 0xE5
+#define WIFI_DBG_IF_NTFY_CNCT_ATC_START 0xE6
+#define WIFI_DBG_IF_NTFY_COEX_NOTIFICATION 0xE7
+#define WIFI_DBG_IF_NTFY_COEX_HANDLE_ENVELOP 0xE8
+#define WIFI_DBG_IF_NTFY_COEX_HANDLE_RELEASE_ENVELOP 0xE9
+
+/* Notification structures */
+struct iwm_umac_notif_wifi_if {
+ struct iwm_umac_wifi_in_hdr hdr;
+ u8 status;
+ u8 flags;
+ __le16 buf_size;
+} __attribute__ ((packed));
+
+#define UMAC_ROAM_REASON_FIRST_SELECTION 0x1
+#define UMAC_ROAM_REASON_AP_DEAUTH 0x2
+#define UMAC_ROAM_REASON_AP_CONNECT_LOST 0x3
+#define UMAC_ROAM_REASON_RSSI 0x4
+#define UMAC_ROAM_REASON_AP_ASSISTED_ROAM 0x5
+#define UMAC_ROAM_REASON_IBSS_COALESCING 0x6
+
+struct iwm_umac_notif_assoc_start {
+ struct iwm_umac_notif_wifi_if mlme_hdr;
+ __le32 roam_reason;
+ u8 bssid[ETH_ALEN];
+ u8 reserved[2];
+} __attribute__ ((packed));
+
+#define UMAC_ASSOC_COMPLETE_SUCCESS 0x0
+#define UMAC_ASSOC_COMPLETE_FAILURE 0x1
+
+struct iwm_umac_notif_assoc_complete {
+ struct iwm_umac_notif_wifi_if mlme_hdr;
+ __le32 status;
+ u8 bssid[ETH_ALEN];
+ u8 band;
+ u8 channel;
+} __attribute__ ((packed));
+
+#define UMAC_PROFILE_INVALID_ASSOC_TIMEOUT 0x0
+#define UMAC_PROFILE_INVALID_ROAM_TIMEOUT 0x1
+#define UMAC_PROFILE_INVALID_REQUEST 0x2
+#define UMAC_PROFILE_INVALID_RF_PREEMPTED 0x3
+
+struct iwm_umac_notif_profile_invalidate {
+ struct iwm_umac_notif_wifi_if mlme_hdr;
+ __le32 reason;
+} __attribute__ ((packed));
+
+#define UMAC_SCAN_RESULT_SUCCESS 0x0
+#define UMAC_SCAN_RESULT_ABORTED 0x1
+#define UMAC_SCAN_RESULT_REJECTED 0x2
+#define UMAC_SCAN_RESULT_FAILED 0x3
+
+struct iwm_umac_notif_scan_complete {
+ struct iwm_umac_notif_wifi_if mlme_hdr;
+ __le32 type;
+ __le32 result;
+ u8 seq_num;
+} __attribute__ ((packed));
+
+#define UMAC_OPCODE_ADD_MODIFY 0x0
+#define UMAC_OPCODE_REMOVE 0x1
+#define UMAC_OPCODE_CLEAR_ALL 0x2
+
+#define UMAC_STA_FLAG_QOS 0x1
+
+struct iwm_umac_notif_sta_info {
+ struct iwm_umac_notif_wifi_if mlme_hdr;
+ __le32 opcode;
+ u8 mac_addr[ETH_ALEN];
+ u8 sta_id; /* bits 0-3: station ID, bits 4-7: station color */
+ u8 flags;
+} __attribute__ ((packed));
+
+#define UMAC_BAND_2GHZ 0
+#define UMAC_BAND_5GHZ 1
+
+#define UMAC_CHANNEL_WIDTH_20MHZ 0
+#define UMAC_CHANNEL_WIDTH_40MHZ 1
+
+struct iwm_umac_notif_bss_info {
+ struct iwm_umac_notif_wifi_if mlme_hdr;
+ __le32 type;
+ __le32 timestamp;
+ __le16 table_idx;
+ __le16 frame_len;
+ u8 band;
+ u8 channel;
+ s8 rssi;
+ u8 reserved;
+ u8 frame_buf[1];
+} __attribute__ ((packed));
+
+#define IWM_BSS_REMOVE_INDEX_MSK 0x0fff
+#define IWM_BSS_REMOVE_FLAGS_MSK 0xfc00
+
+#define IWM_BSS_REMOVE_FLG_AGE 0x1000
+#define IWM_BSS_REMOVE_FLG_TIMEOUT 0x2000
+#define IWM_BSS_REMOVE_FLG_TABLE_FULL 0x4000
+
+struct iwm_umac_notif_bss_removed {
+ struct iwm_umac_notif_wifi_if mlme_hdr;
+ __le32 count;
+ __le16 entries[0];
+} __attribute__ ((packed));
+
+struct iwm_umac_notif_mgt_frame {
+ struct iwm_umac_notif_wifi_if mlme_hdr;
+ __le16 len;
+ u8 frame[1];
+} __attribute__ ((packed));
+
+struct iwm_umac_notif_alive {
+ struct iwm_umac_wifi_in_hdr hdr;
+ __le16 status;
+ __le16 reserved1;
+ struct iwm_fw_alive_hdr alive_data;
+ __le16 reserved2;
+ __le16 page_grp_count;
+ __le32 page_grp_state[IWM_MACS_OUT_GROUPS];
+} __attribute__ ((packed));
+
+struct iwm_umac_notif_init_complete {
+ __le16 status;
+ __le16 reserved;
+} __attribute__ ((packed));
+
+/* error categories */
+enum {
+ UMAC_SYS_ERR_CAT_NONE = 0,
+ UMAC_SYS_ERR_CAT_BOOT,
+ UMAC_SYS_ERR_CAT_UMAC,
+ UMAC_SYS_ERR_CAT_UAXM,
+ UMAC_SYS_ERR_CAT_LMAC,
+ UMAC_SYS_ERR_CAT_MAX
+};
+
+struct iwm_fw_error_hdr {
+ __le32 category;
+ __le32 status;
+ __le32 pc;
+ __le32 blink1;
+ __le32 blink2;
+ __le32 ilink1;
+ __le32 ilink2;
+ __le32 data1;
+ __le32 data2;
+ __le32 line_num;
+ __le32 umac_status;
+ __le32 lmac_status;
+ __le32 sdio_status;
+} __attribute__ ((packed));
+
+struct iwm_umac_notif_error {
+ struct iwm_umac_wifi_in_hdr hdr;
+ struct iwm_fw_error_hdr err;
+} __attribute__ ((packed));
+
+#define UMAC_DEALLOC_NTFY_CHANGES_CNT_POS 0
+#define UMAC_DEALLOC_NTFY_CHANGES_CNT_SEED 0xff
+#define UMAC_DEALLOC_NTFY_CHANGES_MSK_POS 8
+#define UMAC_DEALLOC_NTFY_CHANGES_MSK_SEED 0xffffff
+#define UMAC_DEALLOC_NTFY_PAGE_CNT_POS 0
+#define UMAC_DEALLOC_NTFY_PAGE_CNT_SEED 0xffffff
+#define UMAC_DEALLOC_NTFY_GROUP_NUM_POS 24
+#define UMAC_DEALLOC_NTFY_GROUP_NUM_SEED 0xf
+
+struct iwm_umac_notif_page_dealloc {
+ struct iwm_umac_wifi_in_hdr hdr;
+ __le32 changes;
+ __le32 grp_info[IWM_MACS_OUT_GROUPS];
+} __attribute__ ((packed));
+
+struct iwm_umac_notif_wifi_status {
+ struct iwm_umac_wifi_in_hdr hdr;
+ __le16 status;
+ __le16 reserved;
+} __attribute__ ((packed));
+
+struct iwm_umac_notif_rx_ticket {
+ struct iwm_umac_wifi_in_hdr hdr;
+ u8 num_tickets;
+ u8 reserved[3];
+ struct iwm_rx_ticket tickets[1];
+} __attribute__ ((packed));
+
+/* Tx/Rx rates window (number of max of last update window per second) */
+#define UMAC_NTF_RATE_SAMPLE_NR 4
+
+#define IWM_UMAC_MGMT_TID 8
+#define IWM_UMAC_TID_NR 8
+
+struct iwm_umac_notif_stats {
+ struct iwm_umac_wifi_in_hdr hdr;
+ __le32 flags;
+ __le32 timestamp;
+ __le16 tid_load[IWM_UMAC_TID_NR + 2]; /* 1 non-QoS + 1 dword align */
+ __le16 tx_rate[UMAC_NTF_RATE_SAMPLE_NR];
+ __le16 rx_rate[UMAC_NTF_RATE_SAMPLE_NR];
+ s32 rssi_dbm;
+ s32 noise_dbm;
+ __le32 supp_rates;
+ __le32 missed_beacons;
+ __le32 rx_beacons;
+ __le32 rx_dir_pkts;
+ __le32 rx_nondir_pkts;
+ __le32 rx_multicast;
+ __le32 rx_errors;
+ __le32 rx_drop_other_bssid;
+ __le32 rx_drop_decode;
+ __le32 rx_drop_reassembly;
+ __le32 rx_drop_bad_len;
+ __le32 rx_drop_overflow;
+ __le32 rx_drop_crc;
+ __le32 rx_drop_missed;
+ __le32 tx_dir_pkts;
+ __le32 tx_nondir_pkts;
+ __le32 tx_failure;
+ __le32 tx_errors;
+ __le32 tx_drop_max_retry;
+ __le32 tx_err_abort;
+ __le32 tx_err_carrier;
+ __le32 rx_bytes;
+ __le32 tx_bytes;
+ __le32 tx_power;
+ __le32 tx_max_power;
+ __le32 roam_threshold;
+ __le32 ap_assoc_nr;
+ __le32 scan_full;
+ __le32 scan_abort;
+ __le32 ap_nr;
+ __le32 roam_nr;
+ __le32 roam_missed_beacons;
+ __le32 roam_rssi;
+ __le32 roam_unassoc;
+ __le32 roam_deauth;
+ __le32 roam_ap_loadblance;
+} __attribute__ ((packed));
+
+/* WiFi interface wrapper header */
+struct iwm_umac_wifi_if {
+ u8 oid;
+ u8 flags;
+ __le16 buf_size;
+} __attribute__ ((packed));
+
+#define IWM_SEQ_NUM_HOST_MSK 0x0000
+#define IWM_SEQ_NUM_UMAC_MSK 0x4000
+#define IWM_SEQ_NUM_LMAC_MSK 0x8000
+#define IWM_SEQ_NUM_MSK 0xC000
+
+#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/wext.c b/drivers/net/wireless/iwmc3200wifi/wext.c
new file mode 100644
index 00000000000..584c94d0f39
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/wext.c
@@ -0,0 +1,723 @@
+/*
+ * Intel Wireless Multicomm 3200 WiFi driver
+ *
+ * Copyright (C) 2009 Intel Corporation <ilw@linux.intel.com>
+ * Samuel Ortiz <samuel.ortiz@intel.com>
+ * Zhu Yi <yi.zhu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+#include <net/cfg80211.h>
+#include <net/iw_handler.h>
+
+#include "iwm.h"
+#include "umac.h"
+#include "commands.h"
+#include "debug.h"
+
+static struct iw_statistics *iwm_get_wireless_stats(struct net_device *dev)
+{
+ struct iwm_priv *iwm = ndev_to_iwm(dev);
+ struct iw_statistics *wstats = &iwm->wstats;
+
+ if (!test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)) {
+ memset(wstats, 0, sizeof(struct iw_statistics));
+ wstats->qual.updated = IW_QUAL_ALL_INVALID;
+ }
+
+ return wstats;
+}
+
+static int iwm_wext_siwfreq(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_freq *freq, char *extra)
+{
+ struct iwm_priv *iwm = ndev_to_iwm(dev);
+
+ if (freq->flags == IW_FREQ_AUTO)
+ return 0;
+
+ /* frequency/channel can only be set in IBSS mode */
+ if (iwm->conf.mode != UMAC_MODE_IBSS)
+ return -EOPNOTSUPP;
+
+ return cfg80211_ibss_wext_siwfreq(dev, info, freq, extra);
+}
+
+static int iwm_wext_giwfreq(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_freq *freq, char *extra)
+{
+ struct iwm_priv *iwm = ndev_to_iwm(dev);
+
+ if (iwm->conf.mode == UMAC_MODE_IBSS)
+ return cfg80211_ibss_wext_giwfreq(dev, info, freq, extra);
+
+ freq->e = 0;
+ freq->m = iwm->channel;
+
+ return 0;
+}
+
+static int iwm_wext_siwap(struct net_device *dev, struct iw_request_info *info,
+ struct sockaddr *ap_addr, char *extra)
+{
+ struct iwm_priv *iwm = ndev_to_iwm(dev);
+
+ if (iwm->conf.mode == UMAC_MODE_IBSS)
+ return cfg80211_ibss_wext_siwap(dev, info, ap_addr, extra);
+
+ if (!test_bit(IWM_STATUS_READY, &iwm->status))
+ return -EIO;
+
+ if (is_zero_ether_addr(ap_addr->sa_data) ||
+ is_broadcast_ether_addr(ap_addr->sa_data)) {
+ IWM_DBG_WEXT(iwm, DBG, "clear mandatory bssid %pM\n",
+ iwm->umac_profile->bssid[0]);
+ memset(&iwm->umac_profile->bssid[0], 0, ETH_ALEN);
+ iwm->umac_profile->bss_num = 0;
+ } else {
+ IWM_DBG_WEXT(iwm, DBG, "add mandatory bssid %pM\n",
+ ap_addr->sa_data);
+ memcpy(&iwm->umac_profile->bssid[0], ap_addr->sa_data,
+ ETH_ALEN);
+ iwm->umac_profile->bss_num = 1;
+ }
+
+ if (iwm->umac_profile_active) {
+ if (!memcmp(&iwm->umac_profile->bssid[0], iwm->bssid, ETH_ALEN))
+ return 0;
+
+ iwm_invalidate_mlme_profile(iwm);
+ }
+
+ if (iwm->umac_profile->ssid.ssid_len)
+ return iwm_send_mlme_profile(iwm);
+
+ return 0;
+}
+
+static int iwm_wext_giwap(struct net_device *dev, struct iw_request_info *info,
+ struct sockaddr *ap_addr, char *extra)
+{
+ struct iwm_priv *iwm = ndev_to_iwm(dev);
+
+ switch (iwm->conf.mode) {
+ case UMAC_MODE_IBSS:
+ return cfg80211_ibss_wext_giwap(dev, info, ap_addr, extra);
+ case UMAC_MODE_BSS:
+ if (test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)) {
+ ap_addr->sa_family = ARPHRD_ETHER;
+ memcpy(&ap_addr->sa_data, iwm->bssid, ETH_ALEN);
+ } else
+ memset(&ap_addr->sa_data, 0, ETH_ALEN);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int iwm_wext_siwessid(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *ssid)
+{
+ struct iwm_priv *iwm = ndev_to_iwm(dev);
+ size_t len = data->length;
+ int ret;
+
+ if (iwm->conf.mode == UMAC_MODE_IBSS)
+ return cfg80211_ibss_wext_siwessid(dev, info, data, ssid);
+
+ if (!test_bit(IWM_STATUS_READY, &iwm->status))
+ return -EIO;
+
+ if (len > 0 && ssid[len - 1] == '\0')
+ len--;
+
+ if (iwm->umac_profile_active) {
+ if (iwm->umac_profile->ssid.ssid_len == len &&
+ !memcmp(iwm->umac_profile->ssid.ssid, ssid, len))
+ return 0;
+
+ ret = iwm_invalidate_mlme_profile(iwm);
+ if (ret < 0) {
+ IWM_ERR(iwm, "Couldn't invalidate profile\n");
+ return ret;
+ }
+ }
+
+ iwm->umac_profile->ssid.ssid_len = len;
+ memcpy(iwm->umac_profile->ssid.ssid, ssid, len);
+
+ return iwm_send_mlme_profile(iwm);
+}
+
+static int iwm_wext_giwessid(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *ssid)
+{
+ struct iwm_priv *iwm = ndev_to_iwm(dev);
+
+ if (iwm->conf.mode == UMAC_MODE_IBSS)
+ return cfg80211_ibss_wext_giwessid(dev, info, data, ssid);
+
+ if (!test_bit(IWM_STATUS_READY, &iwm->status))
+ return -EIO;
+
+ data->length = iwm->umac_profile->ssid.ssid_len;
+ if (data->length) {
+ memcpy(ssid, iwm->umac_profile->ssid.ssid, data->length);
+ data->flags = 1;
+ } else
+ data->flags = 0;
+
+ return 0;
+}
+
+static struct iwm_key *
+iwm_key_init(struct iwm_priv *iwm, u8 key_idx, bool in_use,
+ struct iw_encode_ext *ext, u8 alg)
+{
+ struct iwm_key *key = &iwm->keys[key_idx];
+
+ memset(key, 0, sizeof(struct iwm_key));
+ memcpy(key->hdr.mac, ext->addr.sa_data, ETH_ALEN);
+ key->hdr.key_idx = key_idx;
+ if (is_broadcast_ether_addr(ext->addr.sa_data))
+ key->hdr.multicast = 1;
+
+ key->in_use = in_use;
+ key->flags = ext->ext_flags;
+ key->alg = alg;
+ key->key_len = ext->key_len;
+ memcpy(key->key, ext->key, ext->key_len);
+
+ return key;
+}
+
+static int iwm_wext_giwrate(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *rate, char *extra)
+{
+ struct iwm_priv *iwm = ndev_to_iwm(dev);
+
+ rate->value = iwm->rate * 1000000;
+
+ return 0;
+}
+
+static int iwm_wext_siwencode(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *erq, char *key_buf)
+{
+ struct iwm_priv *iwm = ndev_to_iwm(dev);
+ struct iwm_key *uninitialized_var(key);
+ int idx, i, uninitialized_var(alg), remove = 0, ret;
+
+ IWM_DBG_WEXT(iwm, DBG, "key len: %d\n", erq->length);
+ IWM_DBG_WEXT(iwm, DBG, "flags: 0x%x\n", erq->flags);
+
+ if (!iwm->umac_profile) {
+ IWM_ERR(iwm, "UMAC profile not allocated yet\n");
+ return -ENODEV;
+ }
+
+ if (erq->length == WLAN_KEY_LEN_WEP40) {
+ alg = UMAC_CIPHER_TYPE_WEP_40;
+ iwm->umac_profile->sec.ucast_cipher = UMAC_CIPHER_TYPE_WEP_40;
+ iwm->umac_profile->sec.mcast_cipher = UMAC_CIPHER_TYPE_WEP_40;
+ } else if (erq->length == WLAN_KEY_LEN_WEP104) {
+ alg = UMAC_CIPHER_TYPE_WEP_104;
+ iwm->umac_profile->sec.ucast_cipher = UMAC_CIPHER_TYPE_WEP_104;
+ iwm->umac_profile->sec.mcast_cipher = UMAC_CIPHER_TYPE_WEP_104;
+ }
+
+ if (erq->flags & IW_ENCODE_RESTRICTED)
+ iwm->umac_profile->sec.auth_type = UMAC_AUTH_TYPE_LEGACY_PSK;
+ else
+ iwm->umac_profile->sec.auth_type = UMAC_AUTH_TYPE_OPEN;
+
+ idx = erq->flags & IW_ENCODE_INDEX;
+ if (idx == 0) {
+ if (iwm->default_key)
+ for (i = 0; i < IWM_NUM_KEYS; i++) {
+ if (iwm->default_key == &iwm->keys[i]) {
+ idx = i;
+ break;
+ }
+ }
+ else
+ iwm->default_key = &iwm->keys[idx];
+ } else if (idx < 1 || idx > 4) {
+ return -EINVAL;
+ } else
+ idx--;
+
+ if (erq->flags & IW_ENCODE_DISABLED)
+ remove = 1;
+ else if (erq->length == 0) {
+ if (!iwm->keys[idx].in_use)
+ return -EINVAL;
+ iwm->default_key = &iwm->keys[idx];
+ }
+
+ if (erq->length) {
+ key = &iwm->keys[idx];
+ memset(key, 0, sizeof(struct iwm_key));
+ memset(key->hdr.mac, 0xff, ETH_ALEN);
+ key->hdr.key_idx = idx;
+ key->hdr.multicast = 1;
+ key->in_use = !remove;
+ key->alg = alg;
+ key->key_len = erq->length;
+ memcpy(key->key, key_buf, erq->length);
+
+ IWM_DBG_WEXT(iwm, DBG, "Setting key %d, default: %d\n",
+ idx, !!iwm->default_key);
+ }
+
+ if (remove) {
+ if ((erq->flags & IW_ENCODE_NOKEY) || (erq->length == 0)) {
+ int j;
+ for (j = 0; j < IWM_NUM_KEYS; j++)
+ if (iwm->keys[j].in_use) {
+ struct iwm_key *k = &iwm->keys[j];
+
+ k->in_use = 0;
+ ret = iwm_set_key(iwm, remove, 0, k);
+ if (ret < 0)
+ return ret;
+ }
+
+ iwm->umac_profile->sec.ucast_cipher =
+ UMAC_CIPHER_TYPE_NONE;
+ iwm->umac_profile->sec.mcast_cipher =
+ UMAC_CIPHER_TYPE_NONE;
+ iwm->umac_profile->sec.auth_type =
+ UMAC_AUTH_TYPE_OPEN;
+
+ return 0;
+ } else {
+ key->in_use = 0;
+ return iwm_set_key(iwm, remove, 0, key);
+ }
+ }
+
+ /*
+ * If we havent set a profile yet, we cant set keys.
+ * Keys will be pushed after we're associated.
+ */
+ if (!iwm->umac_profile_active)
+ return 0;
+
+ /*
+ * If there is a current active profile, but no
+ * default key, it's not worth trying to associate again.
+ */
+ if (!iwm->default_key)
+ return 0;
+
+ /*
+ * Here we have an active profile, but a key setting changed.
+ * We thus have to invalidate the current profile, and push the
+ * new one. Keys will be pushed when association takes place.
+ */
+ ret = iwm_invalidate_mlme_profile(iwm);
+ if (ret < 0) {
+ IWM_ERR(iwm, "Couldn't invalidate profile\n");
+ return ret;
+ }
+
+ return iwm_send_mlme_profile(iwm);
+}
+
+static int iwm_wext_giwencode(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *erq, char *key)
+{
+ struct iwm_priv *iwm = ndev_to_iwm(dev);
+ int idx, i;
+
+ idx = erq->flags & IW_ENCODE_INDEX;
+ if (idx < 1 || idx > 4) {
+ idx = -1;
+ if (!iwm->default_key) {
+ erq->length = 0;
+ erq->flags |= IW_ENCODE_NOKEY;
+ return 0;
+ } else
+ for (i = 0; i < IWM_NUM_KEYS; i++) {
+ if (iwm->default_key == &iwm->keys[i]) {
+ idx = i;
+ break;
+ }
+ }
+ if (idx < 0)
+ return -EINVAL;
+ } else
+ idx--;
+
+ erq->flags = idx + 1;
+
+ if (!iwm->keys[idx].in_use) {
+ erq->length = 0;
+ erq->flags |= IW_ENCODE_DISABLED;
+ return 0;
+ }
+
+ memcpy(key, iwm->keys[idx].key,
+ min_t(int, erq->length, iwm->keys[idx].key_len));
+ erq->length = iwm->keys[idx].key_len;
+ erq->flags |= IW_ENCODE_ENABLED;
+
+ if (iwm->umac_profile->mode == UMAC_MODE_BSS) {
+ switch (iwm->umac_profile->sec.auth_type) {
+ case UMAC_AUTH_TYPE_OPEN:
+ erq->flags |= IW_ENCODE_OPEN;
+ break;
+ default:
+ erq->flags |= IW_ENCODE_RESTRICTED;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int iwm_set_wpa_version(struct iwm_priv *iwm, u8 wpa_version)
+{
+ if (wpa_version & IW_AUTH_WPA_VERSION_WPA2)
+ iwm->umac_profile->sec.flags = UMAC_SEC_FLG_RSNA_ON_MSK;
+ else if (wpa_version & IW_AUTH_WPA_VERSION_WPA)
+ iwm->umac_profile->sec.flags = UMAC_SEC_FLG_WPA_ON_MSK;
+ else
+ iwm->umac_profile->sec.flags = UMAC_SEC_FLG_LEGACY_PROFILE;
+
+ return 0;
+}
+
+static int iwm_wext_siwpower(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *wrq, char *extra)
+{
+ struct iwm_priv *iwm = ndev_to_iwm(dev);
+ u32 power_index;
+
+ if (wrq->disabled) {
+ power_index = IWM_POWER_INDEX_MIN;
+ goto set;
+ } else
+ power_index = IWM_POWER_INDEX_DEFAULT;
+
+ switch (wrq->flags & IW_POWER_MODE) {
+ case IW_POWER_ON:
+ case IW_POWER_MODE:
+ case IW_POWER_ALL_R:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ set:
+ if (power_index == iwm->conf.power_index)
+ return 0;
+
+ iwm->conf.power_index = power_index;
+
+ return iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
+ CFG_POWER_INDEX, iwm->conf.power_index);
+}
+
+static int iwm_wext_giwpower(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct iwm_priv *iwm = ndev_to_iwm(dev);
+
+ wrqu->power.disabled = (iwm->conf.power_index == IWM_POWER_INDEX_MIN);
+
+ return 0;
+}
+
+static int iwm_set_key_mgt(struct iwm_priv *iwm, u8 key_mgt)
+{
+ u8 *auth_type = &iwm->umac_profile->sec.auth_type;
+
+ if (key_mgt == IW_AUTH_KEY_MGMT_802_1X)
+ *auth_type = UMAC_AUTH_TYPE_8021X;
+ else if (key_mgt == IW_AUTH_KEY_MGMT_PSK) {
+ if (iwm->umac_profile->sec.flags &
+ (UMAC_SEC_FLG_WPA_ON_MSK | UMAC_SEC_FLG_RSNA_ON_MSK))
+ *auth_type = UMAC_AUTH_TYPE_RSNA_PSK;
+ else
+ *auth_type = UMAC_AUTH_TYPE_LEGACY_PSK;
+ } else {
+ IWM_ERR(iwm, "Invalid key mgt: 0x%x\n", key_mgt);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int iwm_set_cipher(struct iwm_priv *iwm, u8 cipher, u8 ucast)
+{
+ u8 *profile_cipher = ucast ? &iwm->umac_profile->sec.ucast_cipher :
+ &iwm->umac_profile->sec.mcast_cipher;
+
+ switch (cipher) {
+ case IW_AUTH_CIPHER_NONE:
+ *profile_cipher = UMAC_CIPHER_TYPE_NONE;
+ break;
+ case IW_AUTH_CIPHER_WEP40:
+ *profile_cipher = UMAC_CIPHER_TYPE_WEP_40;
+ break;
+ case IW_AUTH_CIPHER_TKIP:
+ *profile_cipher = UMAC_CIPHER_TYPE_TKIP;
+ break;
+ case IW_AUTH_CIPHER_CCMP:
+ *profile_cipher = UMAC_CIPHER_TYPE_CCMP;
+ break;
+ case IW_AUTH_CIPHER_WEP104:
+ *profile_cipher = UMAC_CIPHER_TYPE_WEP_104;
+ break;
+ default:
+ IWM_ERR(iwm, "Unsupported cipher: 0x%x\n", cipher);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int iwm_set_auth_alg(struct iwm_priv *iwm, u8 auth_alg)
+{
+ u8 *auth_type = &iwm->umac_profile->sec.auth_type;
+
+ switch (auth_alg) {
+ case IW_AUTH_ALG_OPEN_SYSTEM:
+ *auth_type = UMAC_AUTH_TYPE_OPEN;
+ break;
+ case IW_AUTH_ALG_SHARED_KEY:
+ if (iwm->umac_profile->sec.flags &
+ (UMAC_SEC_FLG_WPA_ON_MSK | UMAC_SEC_FLG_RSNA_ON_MSK)) {
+ if (*auth_type == UMAC_AUTH_TYPE_8021X)
+ return -EINVAL;
+ *auth_type = UMAC_AUTH_TYPE_RSNA_PSK;
+ } else {
+ *auth_type = UMAC_AUTH_TYPE_LEGACY_PSK;
+ }
+ break;
+ case IW_AUTH_ALG_LEAP:
+ default:
+ IWM_ERR(iwm, "Unsupported auth alg: 0x%x\n", auth_alg);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int iwm_wext_siwauth(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *data, char *extra)
+{
+ struct iwm_priv *iwm = ndev_to_iwm(dev);
+ int ret;
+
+ if ((data->flags) &
+ (IW_AUTH_WPA_VERSION | IW_AUTH_KEY_MGMT |
+ IW_AUTH_WPA_ENABLED | IW_AUTH_80211_AUTH_ALG)) {
+ /* We need to invalidate the current profile */
+ if (iwm->umac_profile_active) {
+ ret = iwm_invalidate_mlme_profile(iwm);
+ if (ret < 0) {
+ IWM_ERR(iwm, "Couldn't invalidate profile\n");
+ return ret;
+ }
+ }
+ }
+
+ switch (data->flags & IW_AUTH_INDEX) {
+ case IW_AUTH_WPA_VERSION:
+ return iwm_set_wpa_version(iwm, data->value);
+ break;
+ case IW_AUTH_CIPHER_PAIRWISE:
+ return iwm_set_cipher(iwm, data->value, 1);
+ break;
+ case IW_AUTH_CIPHER_GROUP:
+ return iwm_set_cipher(iwm, data->value, 0);
+ break;
+ case IW_AUTH_KEY_MGMT:
+ return iwm_set_key_mgt(iwm, data->value);
+ break;
+ case IW_AUTH_80211_AUTH_ALG:
+ return iwm_set_auth_alg(iwm, data->value);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int iwm_wext_giwauth(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *data, char *extra)
+{
+ return 0;
+}
+
+static int iwm_wext_siwencodeext(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *erq, char *extra)
+{
+ struct iwm_priv *iwm = ndev_to_iwm(dev);
+ struct iwm_key *key;
+ struct iw_encode_ext *ext = (struct iw_encode_ext *) extra;
+ int uninitialized_var(alg), idx, i, remove = 0;
+
+ IWM_DBG_WEXT(iwm, DBG, "alg: 0x%x\n", ext->alg);
+ IWM_DBG_WEXT(iwm, DBG, "key len: %d\n", ext->key_len);
+ IWM_DBG_WEXT(iwm, DBG, "ext_flags: 0x%x\n", ext->ext_flags);
+ IWM_DBG_WEXT(iwm, DBG, "flags: 0x%x\n", erq->flags);
+ IWM_DBG_WEXT(iwm, DBG, "length: 0x%x\n", erq->length);
+
+ switch (ext->alg) {
+ case IW_ENCODE_ALG_NONE:
+ remove = 1;
+ break;
+ case IW_ENCODE_ALG_WEP:
+ if (ext->key_len == WLAN_KEY_LEN_WEP40)
+ alg = UMAC_CIPHER_TYPE_WEP_40;
+ else if (ext->key_len == WLAN_KEY_LEN_WEP104)
+ alg = UMAC_CIPHER_TYPE_WEP_104;
+ else {
+ IWM_ERR(iwm, "Invalid key length: %d\n", ext->key_len);
+ return -EINVAL;
+ }
+
+ break;
+ case IW_ENCODE_ALG_TKIP:
+ alg = UMAC_CIPHER_TYPE_TKIP;
+ break;
+ case IW_ENCODE_ALG_CCMP:
+ alg = UMAC_CIPHER_TYPE_CCMP;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ idx = erq->flags & IW_ENCODE_INDEX;
+
+ if (idx == 0) {
+ if (iwm->default_key)
+ for (i = 0; i < IWM_NUM_KEYS; i++) {
+ if (iwm->default_key == &iwm->keys[i]) {
+ idx = i;
+ break;
+ }
+ }
+ } else if (idx < 1 || idx > 4) {
+ return -EINVAL;
+ } else
+ idx--;
+
+ if (erq->flags & IW_ENCODE_DISABLED)
+ remove = 1;
+ else if ((erq->length == 0) ||
+ (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)) {
+ iwm->default_key = &iwm->keys[idx];
+ if (iwm->umac_profile_active && ext->alg == IW_ENCODE_ALG_WEP)
+ return iwm_set_tx_key(iwm, idx);
+ }
+
+ key = iwm_key_init(iwm, idx, !remove, ext, alg);
+
+ return iwm_set_key(iwm, remove, !iwm->default_key, key);
+}
+
+static const iw_handler iwm_handlers[] =
+{
+ (iw_handler) NULL, /* SIOCSIWCOMMIT */
+ (iw_handler) cfg80211_wext_giwname, /* SIOCGIWNAME */
+ (iw_handler) NULL, /* SIOCSIWNWID */
+ (iw_handler) NULL, /* SIOCGIWNWID */
+ (iw_handler) iwm_wext_siwfreq, /* SIOCSIWFREQ */
+ (iw_handler) iwm_wext_giwfreq, /* SIOCGIWFREQ */
+ (iw_handler) cfg80211_wext_siwmode, /* SIOCSIWMODE */
+ (iw_handler) cfg80211_wext_giwmode, /* SIOCGIWMODE */
+ (iw_handler) NULL, /* SIOCSIWSENS */
+ (iw_handler) NULL, /* SIOCGIWSENS */
+ (iw_handler) NULL /* not used */, /* SIOCSIWRANGE */
+ (iw_handler) cfg80211_wext_giwrange, /* SIOCGIWRANGE */
+ (iw_handler) NULL /* not used */, /* SIOCSIWPRIV */
+ (iw_handler) NULL /* kernel code */, /* SIOCGIWPRIV */
+ (iw_handler) NULL /* not used */, /* SIOCSIWSTATS */
+ (iw_handler) NULL /* kernel code */, /* SIOCGIWSTATS */
+ (iw_handler) NULL, /* SIOCSIWSPY */
+ (iw_handler) NULL, /* SIOCGIWSPY */
+ (iw_handler) NULL, /* SIOCSIWTHRSPY */
+ (iw_handler) NULL, /* SIOCGIWTHRSPY */
+ (iw_handler) iwm_wext_siwap, /* SIOCSIWAP */
+ (iw_handler) iwm_wext_giwap, /* SIOCGIWAP */
+ (iw_handler) NULL, /* SIOCSIWMLME */
+ (iw_handler) NULL, /* SIOCGIWAPLIST */
+ (iw_handler) cfg80211_wext_siwscan, /* SIOCSIWSCAN */
+ (iw_handler) cfg80211_wext_giwscan, /* SIOCGIWSCAN */
+ (iw_handler) iwm_wext_siwessid, /* SIOCSIWESSID */
+ (iw_handler) iwm_wext_giwessid, /* SIOCGIWESSID */
+ (iw_handler) NULL, /* SIOCSIWNICKN */
+ (iw_handler) NULL, /* SIOCGIWNICKN */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) NULL, /* SIOCSIWRATE */
+ (iw_handler) iwm_wext_giwrate, /* SIOCGIWRATE */
+ (iw_handler) cfg80211_wext_siwrts, /* SIOCSIWRTS */
+ (iw_handler) cfg80211_wext_giwrts, /* SIOCGIWRTS */
+ (iw_handler) cfg80211_wext_siwfrag, /* SIOCSIWFRAG */
+ (iw_handler) cfg80211_wext_giwfrag, /* SIOCGIWFRAG */
+ (iw_handler) NULL, /* SIOCSIWTXPOW */
+ (iw_handler) NULL, /* SIOCGIWTXPOW */
+ (iw_handler) NULL, /* SIOCSIWRETRY */
+ (iw_handler) NULL, /* SIOCGIWRETRY */
+ (iw_handler) iwm_wext_siwencode, /* SIOCSIWENCODE */
+ (iw_handler) iwm_wext_giwencode, /* SIOCGIWENCODE */
+ (iw_handler) iwm_wext_siwpower, /* SIOCSIWPOWER */
+ (iw_handler) iwm_wext_giwpower, /* SIOCGIWPOWER */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) NULL, /* SIOCSIWGENIE */
+ (iw_handler) NULL, /* SIOCGIWGENIE */
+ (iw_handler) iwm_wext_siwauth, /* SIOCSIWAUTH */
+ (iw_handler) iwm_wext_giwauth, /* SIOCGIWAUTH */
+ (iw_handler) iwm_wext_siwencodeext, /* SIOCSIWENCODEEXT */
+ (iw_handler) NULL, /* SIOCGIWENCODEEXT */
+ (iw_handler) NULL, /* SIOCSIWPMKSA */
+ (iw_handler) NULL, /* -- hole -- */
+};
+
+const struct iw_handler_def iwm_iw_handler_def = {
+ .num_standard = ARRAY_SIZE(iwm_handlers),
+ .standard = (iw_handler *) iwm_handlers,
+ .get_wireless_stats = iwm_get_wireless_stats,
+};
+
diff --git a/drivers/net/wireless/libertas/11d.c b/drivers/net/wireless/libertas/11d.c
index 4bc46a60ae2..9a5408e7d94 100644
--- a/drivers/net/wireless/libertas/11d.c
+++ b/drivers/net/wireless/libertas/11d.c
@@ -207,7 +207,7 @@ static int generate_domain_info_11d(struct parsed_region_chan_11d
lbs_deb_11d("nr_subband=%x\n", domaininfo->nr_subband);
lbs_deb_hex(LBS_DEB_11D, "domaininfo", (char *)domaininfo,
COUNTRY_CODE_LEN + 1 +
- sizeof(struct ieeetypes_subbandset) * nr_subband);
+ sizeof(struct ieee_subbandset) * nr_subband);
return 0;
}
@@ -302,11 +302,9 @@ done:
* @param parsed_region_chan pointer to parsed_region_chan_11d
* @return 0
*/
-static int parse_domain_info_11d(struct ieeetypes_countryinfofullset*
- countryinfo,
+static int parse_domain_info_11d(struct ieee_ie_country_info_full_set *countryinfo,
u8 band,
- struct parsed_region_chan_11d *
- parsed_region_chan)
+ struct parsed_region_chan_11d *parsed_region_chan)
{
u8 nr_subband, nrchan;
u8 lastchan, firstchan;
@@ -331,7 +329,7 @@ static int parse_domain_info_11d(struct ieeetypes_countryinfofullset*
lbs_deb_hex(LBS_DEB_11D, "countryinfo", (u8 *) countryinfo, 30);
if ((*(countryinfo->countrycode)) == 0
- || (countryinfo->len <= COUNTRY_CODE_LEN)) {
+ || (countryinfo->header.len <= COUNTRY_CODE_LEN)) {
/* No region Info or Wrong region info: treat as No 11D info */
goto done;
}
@@ -349,8 +347,8 @@ static int parse_domain_info_11d(struct ieeetypes_countryinfofullset*
memcpy(parsed_region_chan->countrycode, countryinfo->countrycode,
COUNTRY_CODE_LEN);
- nr_subband = (countryinfo->len - COUNTRY_CODE_LEN) /
- sizeof(struct ieeetypes_subbandset);
+ nr_subband = (countryinfo->header.len - COUNTRY_CODE_LEN) /
+ sizeof(struct ieee_subbandset);
for (j = 0, lastchan = 0; j < nr_subband; j++) {
@@ -502,7 +500,7 @@ int lbs_cmd_802_11d_domain_info(struct lbs_private *priv,
{
struct cmd_ds_802_11d_domain_info *pdomaininfo =
&cmd->params.domaininfo;
- struct mrvlietypes_domainparamset *domain = &pdomaininfo->domain;
+ struct mrvl_ie_domain_param_set *domain = &pdomaininfo->domain;
u8 nr_subband = priv->domainreg.nr_subband;
lbs_deb_enter(LBS_DEB_11D);
@@ -524,16 +522,16 @@ int lbs_cmd_802_11d_domain_info(struct lbs_private *priv,
sizeof(domain->countrycode));
domain->header.len =
- cpu_to_le16(nr_subband * sizeof(struct ieeetypes_subbandset) +
+ cpu_to_le16(nr_subband * sizeof(struct ieee_subbandset) +
sizeof(domain->countrycode));
if (nr_subband) {
memcpy(domain->subband, priv->domainreg.subband,
- nr_subband * sizeof(struct ieeetypes_subbandset));
+ nr_subband * sizeof(struct ieee_subbandset));
cmd->size = cpu_to_le16(sizeof(pdomaininfo->action) +
le16_to_cpu(domain->header.len) +
- sizeof(struct mrvlietypesheader) +
+ sizeof(struct mrvl_ie_header) +
S_DS_GEN);
} else {
cmd->size =
@@ -556,7 +554,7 @@ done:
int lbs_ret_802_11d_domain_info(struct cmd_ds_command *resp)
{
struct cmd_ds_802_11d_domain_info *domaininfo = &resp->params.domaininforesp;
- struct mrvlietypes_domainparamset *domain = &domaininfo->domain;
+ struct mrvl_ie_domain_param_set *domain = &domaininfo->domain;
u16 action = le16_to_cpu(domaininfo->action);
s16 ret = 0;
u8 nr_subband = 0;
@@ -567,7 +565,7 @@ int lbs_ret_802_11d_domain_info(struct cmd_ds_command *resp)
(int)le16_to_cpu(resp->size));
nr_subband = (le16_to_cpu(domain->header.len) - COUNTRY_CODE_LEN) /
- sizeof(struct ieeetypes_subbandset);
+ sizeof(struct ieee_subbandset);
lbs_deb_11d("domain info resp: nr_subband %d\n", nr_subband);
diff --git a/drivers/net/wireless/libertas/11d.h b/drivers/net/wireless/libertas/11d.h
index 4f4f47f0f87..fb75d3e321a 100644
--- a/drivers/net/wireless/libertas/11d.h
+++ b/drivers/net/wireless/libertas/11d.h
@@ -20,35 +20,36 @@
struct cmd_ds_command;
/** Data structure for Country IE*/
-struct ieeetypes_subbandset {
+struct ieee_subbandset {
u8 firstchan;
u8 nrchan;
u8 maxtxpwr;
} __attribute__ ((packed));
-struct ieeetypes_countryinfoset {
- u8 element_id;
- u8 len;
+struct ieee_ie_country_info_set {
+ struct ieee_ie_header header;
+
u8 countrycode[COUNTRY_CODE_LEN];
- struct ieeetypes_subbandset subband[1];
+ struct ieee_subbandset subband[1];
};
-struct ieeetypes_countryinfofullset {
- u8 element_id;
- u8 len;
+struct ieee_ie_country_info_full_set {
+ struct ieee_ie_header header;
+
u8 countrycode[COUNTRY_CODE_LEN];
- struct ieeetypes_subbandset subband[MRVDRV_MAX_SUBBAND_802_11D];
+ struct ieee_subbandset subband[MRVDRV_MAX_SUBBAND_802_11D];
} __attribute__ ((packed));
-struct mrvlietypes_domainparamset {
- struct mrvlietypesheader header;
+struct mrvl_ie_domain_param_set {
+ struct mrvl_ie_header header;
+
u8 countrycode[COUNTRY_CODE_LEN];
- struct ieeetypes_subbandset subband[1];
+ struct ieee_subbandset subband[1];
} __attribute__ ((packed));
struct cmd_ds_802_11d_domain_info {
__le16 action;
- struct mrvlietypes_domainparamset domain;
+ struct mrvl_ie_domain_param_set domain;
} __attribute__ ((packed));
/** domain regulatory information */
@@ -57,7 +58,7 @@ struct lbs_802_11d_domain_reg {
u8 countrycode[COUNTRY_CODE_LEN];
/** No. of subband*/
u8 nr_subband;
- struct ieeetypes_subbandset subband[MRVDRV_MAX_SUBBAND_802_11D];
+ struct ieee_subbandset subband[MRVDRV_MAX_SUBBAND_802_11D];
};
struct chan_power_11d {
diff --git a/drivers/net/wireless/libertas/README b/drivers/net/wireless/libertas/README
index d860fc37575..ab6a2d518af 100644
--- a/drivers/net/wireless/libertas/README
+++ b/drivers/net/wireless/libertas/README
@@ -72,7 +72,7 @@ rdrf
location that is to be read. This parameter must be specified in
hexadecimal (its possible to preceed preceding the number with a "0x").
- Path: /debugfs/libertas_wireless/ethX/registers/
+ Path: /sys/kernel/debug/libertas_wireless/ethX/registers/
Usage:
echo "0xa123" > rdmac ; cat rdmac
@@ -95,7 +95,7 @@ wrrf
sleepparams
This command is used to set the sleepclock configurations
- Path: /debugfs/libertas_wireless/ethX/
+ Path: /sys/kernel/debug/libertas_wireless/ethX/
Usage:
cat sleepparams: reads the current sleepclock configuration
@@ -115,7 +115,7 @@ subscribed_events
The subscribed_events directory contains the interface for the
subscribed events API.
- Path: /debugfs/libertas_wireless/ethX/subscribed_events/
+ Path: /sys/kernel/debug/libertas_wireless/ethX/subscribed_events/
Each event is represented by a filename. Each filename consists of the
following three fields:
@@ -165,7 +165,7 @@ subscribed_events
extscan
This command is used to do a specific scan.
- Path: /debugfs/libertas_wireless/ethX/
+ Path: /sys/kernel/debug/libertas_wireless/ethX/
Usage: echo "SSID" > extscan
@@ -179,7 +179,7 @@ getscantable
Display the current contents of the driver scan table (ie. get the
scan results).
- Path: /debugfs/libertas_wireless/ethX/
+ Path: /sys/kernel/debug/libertas_wireless/ethX/
Usage:
cat getscantable
@@ -188,7 +188,7 @@ setuserscan
Initiate a customized scan and retrieve the results
- Path: /debugfs/libertas_wireless/ethX/
+ Path: /sys/kernel/debug/libertas_wireless/ethX/
Usage:
echo "[ARGS]" > setuserscan
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c
index a0e440cd896..b9b37411903 100644
--- a/drivers/net/wireless/libertas/assoc.c
+++ b/drivers/net/wireless/libertas/assoc.c
@@ -12,15 +12,14 @@
#include "scan.h"
#include "cmd.h"
-static int lbs_adhoc_post(struct lbs_private *priv, struct cmd_header *resp);
-
static const u8 bssid_any[ETH_ALEN] __attribute__ ((aligned (2))) =
{ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
static const u8 bssid_off[ETH_ALEN] __attribute__ ((aligned (2))) =
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
-/* The firmware needs certain bits masked out of the beacon-derviced capability
- * field when associating/joining to BSSs.
+/* The firmware needs the following bits masked out of the beacon-derived
+ * capability field when associating/joining to a BSS:
+ * 9 (QoS), 11 (APSD), 12 (unused), 14 (unused), 15 (unused)
*/
#define CAPINFO_MASK (~(0xda00))
@@ -102,6 +101,295 @@ static void lbs_set_basic_rate_flags(u8 *rates, size_t len)
}
+static u8 iw_auth_to_ieee_auth(u8 auth)
+{
+ if (auth == IW_AUTH_ALG_OPEN_SYSTEM)
+ return 0x00;
+ else if (auth == IW_AUTH_ALG_SHARED_KEY)
+ return 0x01;
+ else if (auth == IW_AUTH_ALG_LEAP)
+ return 0x80;
+
+ lbs_deb_join("%s: invalid auth alg 0x%X\n", __func__, auth);
+ return 0;
+}
+
+/**
+ * @brief This function prepares the authenticate command. AUTHENTICATE only
+ * sets the authentication suite for future associations, as the firmware
+ * handles authentication internally during the ASSOCIATE command.
+ *
+ * @param priv A pointer to struct lbs_private structure
+ * @param bssid The peer BSSID with which to authenticate
+ * @param auth The authentication mode to use (from wireless.h)
+ *
+ * @return 0 or -1
+ */
+static int lbs_set_authentication(struct lbs_private *priv, u8 bssid[6], u8 auth)
+{
+ struct cmd_ds_802_11_authenticate cmd;
+ int ret = -1;
+ DECLARE_MAC_BUF(mac);
+
+ lbs_deb_enter(LBS_DEB_JOIN);
+
+ cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+ memcpy(cmd.bssid, bssid, ETH_ALEN);
+
+ cmd.authtype = iw_auth_to_ieee_auth(auth);
+
+ lbs_deb_join("AUTH_CMD: BSSID %s, auth 0x%x\n",
+ print_mac(mac, bssid), cmd.authtype);
+
+ ret = lbs_cmd_with_response(priv, CMD_802_11_AUTHENTICATE, &cmd);
+
+ lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
+ return ret;
+}
+
+
+static int lbs_assoc_post(struct lbs_private *priv,
+ struct cmd_ds_802_11_associate_response *resp)
+{
+ int ret = 0;
+ union iwreq_data wrqu;
+ struct bss_descriptor *bss;
+ u16 status_code;
+
+ lbs_deb_enter(LBS_DEB_ASSOC);
+
+ if (!priv->in_progress_assoc_req) {
+ lbs_deb_assoc("ASSOC_RESP: no in-progress assoc request\n");
+ ret = -1;
+ goto done;
+ }
+ bss = &priv->in_progress_assoc_req->bss;
+
+ /*
+ * Older FW versions map the IEEE 802.11 Status Code in the association
+ * response to the following values returned in resp->statuscode:
+ *
+ * IEEE Status Code Marvell Status Code
+ * 0 -> 0x0000 ASSOC_RESULT_SUCCESS
+ * 13 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
+ * 14 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
+ * 15 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
+ * 16 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
+ * others -> 0x0003 ASSOC_RESULT_REFUSED
+ *
+ * Other response codes:
+ * 0x0001 -> ASSOC_RESULT_INVALID_PARAMETERS (unused)
+ * 0x0002 -> ASSOC_RESULT_TIMEOUT (internal timer expired waiting for
+ * association response from the AP)
+ */
+
+ status_code = le16_to_cpu(resp->statuscode);
+ if (priv->fwrelease < 0x09000000) {
+ switch (status_code) {
+ case 0x00:
+ break;
+ case 0x01:
+ lbs_deb_assoc("ASSOC_RESP: invalid parameters\n");
+ break;
+ case 0x02:
+ lbs_deb_assoc("ASSOC_RESP: internal timer "
+ "expired while waiting for the AP\n");
+ break;
+ case 0x03:
+ lbs_deb_assoc("ASSOC_RESP: association "
+ "refused by AP\n");
+ break;
+ case 0x04:
+ lbs_deb_assoc("ASSOC_RESP: authentication "
+ "refused by AP\n");
+ break;
+ default:
+ lbs_deb_assoc("ASSOC_RESP: failure reason 0x%02x "
+ " unknown\n", status_code);
+ break;
+ }
+ } else {
+ /* v9+ returns the AP's association response */
+ lbs_deb_assoc("ASSOC_RESP: failure reason 0x%02x\n", status_code);
+ }
+
+ if (status_code) {
+ lbs_mac_event_disconnected(priv);
+ ret = -1;
+ goto done;
+ }
+
+ lbs_deb_hex(LBS_DEB_ASSOC, "ASSOC_RESP",
+ (void *) (resp + sizeof (resp->hdr)),
+ le16_to_cpu(resp->hdr.size) - sizeof (resp->hdr));
+
+ /* Send a Media Connected event, according to the Spec */
+ priv->connect_status = LBS_CONNECTED;
+
+ /* Update current SSID and BSSID */
+ memcpy(&priv->curbssparams.ssid, &bss->ssid, IW_ESSID_MAX_SIZE);
+ priv->curbssparams.ssid_len = bss->ssid_len;
+ memcpy(priv->curbssparams.bssid, bss->bssid, ETH_ALEN);
+
+ priv->SNR[TYPE_RXPD][TYPE_AVG] = 0;
+ priv->NF[TYPE_RXPD][TYPE_AVG] = 0;
+
+ memset(priv->rawSNR, 0x00, sizeof(priv->rawSNR));
+ memset(priv->rawNF, 0x00, sizeof(priv->rawNF));
+ priv->nextSNRNF = 0;
+ priv->numSNRNF = 0;
+
+ netif_carrier_on(priv->dev);
+ if (!priv->tx_pending_len)
+ netif_wake_queue(priv->dev);
+
+ memcpy(wrqu.ap_addr.sa_data, priv->curbssparams.bssid, ETH_ALEN);
+ wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+ wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
+
+done:
+ lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
+ return ret;
+}
+
+/**
+ * @brief This function prepares an association-class command.
+ *
+ * @param priv A pointer to struct lbs_private structure
+ * @param assoc_req The association request describing the BSS to associate
+ * or reassociate with
+ * @param command The actual command, either CMD_802_11_ASSOCIATE or
+ * CMD_802_11_REASSOCIATE
+ *
+ * @return 0 or -1
+ */
+static int lbs_associate(struct lbs_private *priv,
+ struct assoc_request *assoc_req,
+ u16 command)
+{
+ struct cmd_ds_802_11_associate cmd;
+ int ret = 0;
+ struct bss_descriptor *bss = &assoc_req->bss;
+ u8 *pos = &(cmd.iebuf[0]);
+ u16 tmpcap, tmplen, tmpauth;
+ struct mrvl_ie_ssid_param_set *ssid;
+ struct mrvl_ie_ds_param_set *ds;
+ struct mrvl_ie_cf_param_set *cf;
+ struct mrvl_ie_rates_param_set *rates;
+ struct mrvl_ie_rsn_param_set *rsn;
+ struct mrvl_ie_auth_type *auth;
+
+ lbs_deb_enter(LBS_DEB_ASSOC);
+
+ BUG_ON((command != CMD_802_11_ASSOCIATE) &&
+ (command != CMD_802_11_REASSOCIATE));
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.hdr.command = cpu_to_le16(command);
+
+ /* Fill in static fields */
+ memcpy(cmd.bssid, bss->bssid, ETH_ALEN);
+ cmd.listeninterval = cpu_to_le16(MRVDRV_DEFAULT_LISTEN_INTERVAL);
+
+ /* Capability info */
+ tmpcap = (bss->capability & CAPINFO_MASK);
+ if (bss->mode == IW_MODE_INFRA)
+ tmpcap |= WLAN_CAPABILITY_ESS;
+ cmd.capability = cpu_to_le16(tmpcap);
+ lbs_deb_assoc("ASSOC_CMD: capability 0x%04x\n", tmpcap);
+
+ /* SSID */
+ ssid = (struct mrvl_ie_ssid_param_set *) pos;
+ ssid->header.type = cpu_to_le16(TLV_TYPE_SSID);
+ tmplen = bss->ssid_len;
+ ssid->header.len = cpu_to_le16(tmplen);
+ memcpy(ssid->ssid, bss->ssid, tmplen);
+ pos += sizeof(ssid->header) + tmplen;
+
+ ds = (struct mrvl_ie_ds_param_set *) pos;
+ ds->header.type = cpu_to_le16(TLV_TYPE_PHY_DS);
+ ds->header.len = cpu_to_le16(1);
+ ds->channel = bss->phy.ds.channel;
+ pos += sizeof(ds->header) + 1;
+
+ cf = (struct mrvl_ie_cf_param_set *) pos;
+ cf->header.type = cpu_to_le16(TLV_TYPE_CF);
+ tmplen = sizeof(*cf) - sizeof (cf->header);
+ cf->header.len = cpu_to_le16(tmplen);
+ /* IE payload should be zeroed, firmware fills it in for us */
+ pos += sizeof(*cf);
+
+ rates = (struct mrvl_ie_rates_param_set *) pos;
+ rates->header.type = cpu_to_le16(TLV_TYPE_RATES);
+ memcpy(&rates->rates, &bss->rates, MAX_RATES);
+ tmplen = MAX_RATES;
+ if (get_common_rates(priv, rates->rates, &tmplen)) {
+ ret = -1;
+ goto done;
+ }
+ pos += sizeof(rates->header) + tmplen;
+ rates->header.len = cpu_to_le16(tmplen);
+ lbs_deb_assoc("ASSOC_CMD: num rates %u\n", tmplen);
+
+ /* Copy the infra. association rates into Current BSS state structure */
+ memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates));
+ memcpy(&priv->curbssparams.rates, &rates->rates, tmplen);
+
+ /* Set MSB on basic rates as the firmware requires, but _after_
+ * copying to current bss rates.
+ */
+ lbs_set_basic_rate_flags(rates->rates, tmplen);
+
+ /* Firmware v9+ indicate authentication suites as a TLV */
+ if (priv->fwrelease >= 0x09000000) {
+ DECLARE_MAC_BUF(mac);
+
+ auth = (struct mrvl_ie_auth_type *) pos;
+ auth->header.type = cpu_to_le16(TLV_TYPE_AUTH_TYPE);
+ auth->header.len = cpu_to_le16(2);
+ tmpauth = iw_auth_to_ieee_auth(priv->secinfo.auth_mode);
+ auth->auth = cpu_to_le16(tmpauth);
+ pos += sizeof(auth->header) + 2;
+
+ lbs_deb_join("AUTH_CMD: BSSID %s, auth 0x%x\n",
+ print_mac(mac, bss->bssid), priv->secinfo.auth_mode);
+ }
+
+ /* WPA/WPA2 IEs */
+ if (assoc_req->secinfo.WPAenabled || assoc_req->secinfo.WPA2enabled) {
+ rsn = (struct mrvl_ie_rsn_param_set *) pos;
+ /* WPA_IE or WPA2_IE */
+ rsn->header.type = cpu_to_le16((u16) assoc_req->wpa_ie[0]);
+ tmplen = (u16) assoc_req->wpa_ie[1];
+ rsn->header.len = cpu_to_le16(tmplen);
+ memcpy(rsn->rsnie, &assoc_req->wpa_ie[2], tmplen);
+ lbs_deb_hex(LBS_DEB_JOIN, "ASSOC_CMD: WPA/RSN IE", (u8 *) rsn,
+ sizeof(rsn->header) + tmplen);
+ pos += sizeof(rsn->header) + tmplen;
+ }
+
+ cmd.hdr.size = cpu_to_le16((sizeof(cmd) - sizeof(cmd.iebuf)) +
+ (u16)(pos - (u8 *) &cmd.iebuf));
+
+ /* update curbssparams */
+ priv->curbssparams.channel = bss->phy.ds.channel;
+
+ if (lbs_parse_dnld_countryinfo_11d(priv, bss)) {
+ ret = -1;
+ goto done;
+ }
+
+ ret = lbs_cmd_with_response(priv, command, &cmd);
+ if (ret == 0) {
+ ret = lbs_assoc_post(priv,
+ (struct cmd_ds_802_11_associate_response *) &cmd);
+ }
+
+done:
+ lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
+ return ret;
+}
+
/**
* @brief Associate to a specific BSS discovered in a scan
*
@@ -110,7 +398,7 @@ static void lbs_set_basic_rate_flags(u8 *rates, size_t len)
*
* @return 0-success, otherwise fail
*/
-static int lbs_associate(struct lbs_private *priv,
+static int lbs_try_associate(struct lbs_private *priv,
struct assoc_request *assoc_req)
{
int ret;
@@ -118,11 +406,15 @@ static int lbs_associate(struct lbs_private *priv,
lbs_deb_enter(LBS_DEB_ASSOC);
- ret = lbs_prepare_and_send_command(priv, CMD_802_11_AUTHENTICATE,
- 0, CMD_OPTION_WAITFORRSP,
- 0, assoc_req->bss.bssid);
- if (ret)
- goto out;
+ /* FW v9 and higher indicate authentication suites as a TLV in the
+ * association command, not as a separate authentication command.
+ */
+ if (priv->fwrelease < 0x09000000) {
+ ret = lbs_set_authentication(priv, assoc_req->bss.bssid,
+ priv->secinfo.auth_mode);
+ if (ret)
+ goto out;
+ }
/* Use short preamble only when both the BSS and firmware support it */
if ((priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) &&
@@ -133,14 +425,78 @@ static int lbs_associate(struct lbs_private *priv,
if (ret)
goto out;
- ret = lbs_prepare_and_send_command(priv, CMD_802_11_ASSOCIATE,
- 0, CMD_OPTION_WAITFORRSP, 0, assoc_req);
+ ret = lbs_associate(priv, assoc_req, CMD_802_11_ASSOCIATE);
out:
lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
return ret;
}
+static int lbs_adhoc_post(struct lbs_private *priv,
+ struct cmd_ds_802_11_ad_hoc_result *resp)
+{
+ int ret = 0;
+ u16 command = le16_to_cpu(resp->hdr.command);
+ u16 result = le16_to_cpu(resp->hdr.result);
+ union iwreq_data wrqu;
+ struct bss_descriptor *bss;
+ DECLARE_SSID_BUF(ssid);
+
+ lbs_deb_enter(LBS_DEB_JOIN);
+
+ if (!priv->in_progress_assoc_req) {
+ lbs_deb_join("ADHOC_RESP: no in-progress association "
+ "request\n");
+ ret = -1;
+ goto done;
+ }
+ bss = &priv->in_progress_assoc_req->bss;
+
+ /*
+ * Join result code 0 --> SUCCESS
+ */
+ if (result) {
+ lbs_deb_join("ADHOC_RESP: failed (result 0x%X)\n", result);
+ if (priv->connect_status == LBS_CONNECTED)
+ lbs_mac_event_disconnected(priv);
+ ret = -1;
+ goto done;
+ }
+
+ /* Send a Media Connected event, according to the Spec */
+ priv->connect_status = LBS_CONNECTED;
+
+ if (command == CMD_RET(CMD_802_11_AD_HOC_START)) {
+ /* Update the created network descriptor with the new BSSID */
+ memcpy(bss->bssid, resp->bssid, ETH_ALEN);
+ }
+
+ /* Set the BSSID from the joined/started descriptor */
+ memcpy(&priv->curbssparams.bssid, bss->bssid, ETH_ALEN);
+
+ /* Set the new SSID to current SSID */
+ memcpy(&priv->curbssparams.ssid, &bss->ssid, IW_ESSID_MAX_SIZE);
+ priv->curbssparams.ssid_len = bss->ssid_len;
+
+ netif_carrier_on(priv->dev);
+ if (!priv->tx_pending_len)
+ netif_wake_queue(priv->dev);
+
+ memset(&wrqu, 0, sizeof(wrqu));
+ memcpy(wrqu.ap_addr.sa_data, priv->curbssparams.bssid, ETH_ALEN);
+ wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+ wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
+
+ lbs_deb_join("ADHOC_RESP: Joined/started '%s', BSSID %pM, channel %d\n",
+ print_ssid(ssid, bss->ssid, bss->ssid_len),
+ priv->curbssparams.bssid,
+ priv->curbssparams.channel);
+
+done:
+ lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
+ return ret;
+}
+
/**
* @brief Join an adhoc network found in a previous scan
*
@@ -219,11 +575,10 @@ static int lbs_adhoc_join(struct lbs_private *priv,
memcpy(&cmd.bss.bssid, &bss->bssid, ETH_ALEN);
memcpy(&cmd.bss.ssid, &bss->ssid, bss->ssid_len);
- memcpy(&cmd.bss.phyparamset, &bss->phyparamset,
- sizeof(union ieeetypes_phyparamset));
+ memcpy(&cmd.bss.ds, &bss->phy.ds, sizeof(struct ieee_ie_ds_param_set));
- memcpy(&cmd.bss.ssparamset, &bss->ssparamset,
- sizeof(union IEEEtypes_ssparamset));
+ memcpy(&cmd.bss.ibss, &bss->ss.ibss,
+ sizeof(struct ieee_ie_ibss_param_set));
cmd.bss.capability = cpu_to_le16(bss->capability & CAPINFO_MASK);
lbs_deb_join("ADHOC_J_CMD: tmpcap=%4X CAPINFO_MASK=%4X\n",
@@ -260,7 +615,7 @@ static int lbs_adhoc_join(struct lbs_private *priv,
*/
lbs_set_basic_rate_flags(cmd.bss.rates, ratesize);
- cmd.bss.ssparamset.ibssparamset.atimwindow = cpu_to_le16(bss->atimwindow);
+ cmd.bss.ibss.atimwindow = bss->atimwindow;
if (assoc_req->secinfo.wep_enabled) {
u16 tmp = le16_to_cpu(cmd.bss.capability);
@@ -287,8 +642,10 @@ static int lbs_adhoc_join(struct lbs_private *priv,
}
ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_JOIN, &cmd);
- if (ret == 0)
- ret = lbs_adhoc_post(priv, (struct cmd_header *) &cmd);
+ if (ret == 0) {
+ ret = lbs_adhoc_post(priv,
+ (struct cmd_ds_802_11_ad_hoc_result *)&cmd);
+ }
out:
lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
@@ -343,22 +700,24 @@ static int lbs_adhoc_start(struct lbs_private *priv,
WARN_ON(!assoc_req->channel);
/* set Physical parameter set */
- cmd.phyparamset.dsparamset.elementid = WLAN_EID_DS_PARAMS;
- cmd.phyparamset.dsparamset.len = 1;
- cmd.phyparamset.dsparamset.currentchan = assoc_req->channel;
+ cmd.ds.header.id = WLAN_EID_DS_PARAMS;
+ cmd.ds.header.len = 1;
+ cmd.ds.channel = assoc_req->channel;
/* set IBSS parameter set */
- cmd.ssparamset.ibssparamset.elementid = WLAN_EID_IBSS_PARAMS;
- cmd.ssparamset.ibssparamset.len = 2;
- cmd.ssparamset.ibssparamset.atimwindow = 0;
+ cmd.ibss.header.id = WLAN_EID_IBSS_PARAMS;
+ cmd.ibss.header.len = 2;
+ cmd.ibss.atimwindow = cpu_to_le16(0);
/* set capability info */
tmpcap = WLAN_CAPABILITY_IBSS;
- if (assoc_req->secinfo.wep_enabled) {
- lbs_deb_join("ADHOC_START: WEP enabled, setting privacy on\n");
+ if (assoc_req->secinfo.wep_enabled ||
+ assoc_req->secinfo.WPAenabled ||
+ assoc_req->secinfo.WPA2enabled) {
+ lbs_deb_join("ADHOC_START: WEP/WPA enabled, privacy on\n");
tmpcap |= WLAN_CAPABILITY_PRIVACY;
} else
- lbs_deb_join("ADHOC_START: WEP disabled, setting privacy off\n");
+ lbs_deb_join("ADHOC_START: WEP disabled, privacy off\n");
cmd.capability = cpu_to_le16(tmpcap);
@@ -395,7 +754,8 @@ static int lbs_adhoc_start(struct lbs_private *priv,
ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_START, &cmd);
if (ret == 0)
- ret = lbs_adhoc_post(priv, (struct cmd_header *) &cmd);
+ ret = lbs_adhoc_post(priv,
+ (struct cmd_ds_802_11_ad_hoc_result *)&cmd);
out:
lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
@@ -720,7 +1080,7 @@ static int assoc_helper_essid(struct lbs_private *priv,
assoc_req->ssid_len, NULL, IW_MODE_INFRA, channel);
if (bss != NULL) {
memcpy(&assoc_req->bss, bss, sizeof(struct bss_descriptor));
- ret = lbs_associate(priv, assoc_req);
+ ret = lbs_try_associate(priv, assoc_req);
} else {
lbs_deb_assoc("SSID not found; cannot associate\n");
}
@@ -772,8 +1132,9 @@ static int assoc_helper_bssid(struct lbs_private *priv,
memcpy(&assoc_req->bss, bss, sizeof(struct bss_descriptor));
if (assoc_req->mode == IW_MODE_INFRA) {
- ret = lbs_associate(priv, assoc_req);
- lbs_deb_assoc("ASSOC: lbs_associate(bssid) returned %d\n", ret);
+ ret = lbs_try_associate(priv, assoc_req);
+ lbs_deb_assoc("ASSOC: lbs_try_associate(bssid) returned %d\n",
+ ret);
} else if (assoc_req->mode == IW_MODE_ADHOC) {
lbs_adhoc_join(priv, assoc_req);
}
@@ -1467,57 +1828,6 @@ struct assoc_request *lbs_get_association_request(struct lbs_private *priv)
/**
- * @brief This function prepares command of authenticate.
- *
- * @param priv A pointer to struct lbs_private structure
- * @param cmd A pointer to cmd_ds_command structure
- * @param pdata_buf Void cast of pointer to a BSSID to authenticate with
- *
- * @return 0 or -1
- */
-int lbs_cmd_80211_authenticate(struct lbs_private *priv,
- struct cmd_ds_command *cmd,
- void *pdata_buf)
-{
- struct cmd_ds_802_11_authenticate *pauthenticate = &cmd->params.auth;
- int ret = -1;
- u8 *bssid = pdata_buf;
-
- lbs_deb_enter(LBS_DEB_JOIN);
-
- cmd->command = cpu_to_le16(CMD_802_11_AUTHENTICATE);
- cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_authenticate)
- + S_DS_GEN);
-
- /* translate auth mode to 802.11 defined wire value */
- switch (priv->secinfo.auth_mode) {
- case IW_AUTH_ALG_OPEN_SYSTEM:
- pauthenticate->authtype = 0x00;
- break;
- case IW_AUTH_ALG_SHARED_KEY:
- pauthenticate->authtype = 0x01;
- break;
- case IW_AUTH_ALG_LEAP:
- pauthenticate->authtype = 0x80;
- break;
- default:
- lbs_deb_join("AUTH_CMD: invalid auth alg 0x%X\n",
- priv->secinfo.auth_mode);
- goto out;
- }
-
- memcpy(pauthenticate->macaddr, bssid, ETH_ALEN);
-
- lbs_deb_join("AUTH_CMD: BSSID %pM, auth 0x%x\n",
- bssid, pauthenticate->authtype);
- ret = 0;
-
-out:
- lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
- return ret;
-}
-
-/**
* @brief Deauthenticate from a specific BSS
*
* @param priv A pointer to struct lbs_private structure
@@ -1550,285 +1860,3 @@ int lbs_cmd_80211_deauthenticate(struct lbs_private *priv, u8 bssid[ETH_ALEN],
return ret;
}
-int lbs_cmd_80211_associate(struct lbs_private *priv,
- struct cmd_ds_command *cmd, void *pdata_buf)
-{
- struct cmd_ds_802_11_associate *passo = &cmd->params.associate;
- int ret = 0;
- struct assoc_request *assoc_req = pdata_buf;
- struct bss_descriptor *bss = &assoc_req->bss;
- u8 *pos;
- u16 tmpcap, tmplen;
- struct mrvlietypes_ssidparamset *ssid;
- struct mrvlietypes_phyparamset *phy;
- struct mrvlietypes_ssparamset *ss;
- struct mrvlietypes_ratesparamset *rates;
- struct mrvlietypes_rsnparamset *rsn;
-
- lbs_deb_enter(LBS_DEB_ASSOC);
-
- pos = (u8 *) passo;
-
- if (!priv) {
- ret = -1;
- goto done;
- }
-
- cmd->command = cpu_to_le16(CMD_802_11_ASSOCIATE);
-
- memcpy(passo->peerstaaddr, bss->bssid, sizeof(passo->peerstaaddr));
- pos += sizeof(passo->peerstaaddr);
-
- /* set the listen interval */
- passo->listeninterval = cpu_to_le16(MRVDRV_DEFAULT_LISTEN_INTERVAL);
-
- pos += sizeof(passo->capability);
- pos += sizeof(passo->listeninterval);
- pos += sizeof(passo->bcnperiod);
- pos += sizeof(passo->dtimperiod);
-
- ssid = (struct mrvlietypes_ssidparamset *) pos;
- ssid->header.type = cpu_to_le16(TLV_TYPE_SSID);
- tmplen = bss->ssid_len;
- ssid->header.len = cpu_to_le16(tmplen);
- memcpy(ssid->ssid, bss->ssid, tmplen);
- pos += sizeof(ssid->header) + tmplen;
-
- phy = (struct mrvlietypes_phyparamset *) pos;
- phy->header.type = cpu_to_le16(TLV_TYPE_PHY_DS);
- tmplen = sizeof(phy->fh_ds.dsparamset);
- phy->header.len = cpu_to_le16(tmplen);
- memcpy(&phy->fh_ds.dsparamset,
- &bss->phyparamset.dsparamset.currentchan,
- tmplen);
- pos += sizeof(phy->header) + tmplen;
-
- ss = (struct mrvlietypes_ssparamset *) pos;
- ss->header.type = cpu_to_le16(TLV_TYPE_CF);
- tmplen = sizeof(ss->cf_ibss.cfparamset);
- ss->header.len = cpu_to_le16(tmplen);
- pos += sizeof(ss->header) + tmplen;
-
- rates = (struct mrvlietypes_ratesparamset *) pos;
- rates->header.type = cpu_to_le16(TLV_TYPE_RATES);
- memcpy(&rates->rates, &bss->rates, MAX_RATES);
- tmplen = MAX_RATES;
- if (get_common_rates(priv, rates->rates, &tmplen)) {
- ret = -1;
- goto done;
- }
- pos += sizeof(rates->header) + tmplen;
- rates->header.len = cpu_to_le16(tmplen);
- lbs_deb_assoc("ASSOC_CMD: num rates %u\n", tmplen);
-
- /* Copy the infra. association rates into Current BSS state structure */
- memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates));
- memcpy(&priv->curbssparams.rates, &rates->rates, tmplen);
-
- /* Set MSB on basic rates as the firmware requires, but _after_
- * copying to current bss rates.
- */
- lbs_set_basic_rate_flags(rates->rates, tmplen);
-
- if (assoc_req->secinfo.WPAenabled || assoc_req->secinfo.WPA2enabled) {
- rsn = (struct mrvlietypes_rsnparamset *) pos;
- /* WPA_IE or WPA2_IE */
- rsn->header.type = cpu_to_le16((u16) assoc_req->wpa_ie[0]);
- tmplen = (u16) assoc_req->wpa_ie[1];
- rsn->header.len = cpu_to_le16(tmplen);
- memcpy(rsn->rsnie, &assoc_req->wpa_ie[2], tmplen);
- lbs_deb_hex(LBS_DEB_JOIN, "ASSOC_CMD: RSN IE", (u8 *) rsn,
- sizeof(rsn->header) + tmplen);
- pos += sizeof(rsn->header) + tmplen;
- }
-
- /* update curbssparams */
- priv->curbssparams.channel = bss->phyparamset.dsparamset.currentchan;
-
- if (lbs_parse_dnld_countryinfo_11d(priv, bss)) {
- ret = -1;
- goto done;
- }
-
- cmd->size = cpu_to_le16((u16) (pos - (u8 *) passo) + S_DS_GEN);
-
- /* set the capability info */
- tmpcap = (bss->capability & CAPINFO_MASK);
- if (bss->mode == IW_MODE_INFRA)
- tmpcap |= WLAN_CAPABILITY_ESS;
- passo->capability = cpu_to_le16(tmpcap);
- lbs_deb_assoc("ASSOC_CMD: capability 0x%04x\n", tmpcap);
-
-done:
- lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
- return ret;
-}
-
-int lbs_ret_80211_associate(struct lbs_private *priv,
- struct cmd_ds_command *resp)
-{
- int ret = 0;
- union iwreq_data wrqu;
- struct ieeetypes_assocrsp *passocrsp;
- struct bss_descriptor *bss;
- u16 status_code;
-
- lbs_deb_enter(LBS_DEB_ASSOC);
-
- if (!priv->in_progress_assoc_req) {
- lbs_deb_assoc("ASSOC_RESP: no in-progress assoc request\n");
- ret = -1;
- goto done;
- }
- bss = &priv->in_progress_assoc_req->bss;
-
- passocrsp = (struct ieeetypes_assocrsp *) &resp->params;
-
- /*
- * Older FW versions map the IEEE 802.11 Status Code in the association
- * response to the following values returned in passocrsp->statuscode:
- *
- * IEEE Status Code Marvell Status Code
- * 0 -> 0x0000 ASSOC_RESULT_SUCCESS
- * 13 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
- * 14 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
- * 15 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
- * 16 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
- * others -> 0x0003 ASSOC_RESULT_REFUSED
- *
- * Other response codes:
- * 0x0001 -> ASSOC_RESULT_INVALID_PARAMETERS (unused)
- * 0x0002 -> ASSOC_RESULT_TIMEOUT (internal timer expired waiting for
- * association response from the AP)
- */
-
- status_code = le16_to_cpu(passocrsp->statuscode);
- switch (status_code) {
- case 0x00:
- break;
- case 0x01:
- lbs_deb_assoc("ASSOC_RESP: invalid parameters\n");
- break;
- case 0x02:
- lbs_deb_assoc("ASSOC_RESP: internal timer "
- "expired while waiting for the AP\n");
- break;
- case 0x03:
- lbs_deb_assoc("ASSOC_RESP: association "
- "refused by AP\n");
- break;
- case 0x04:
- lbs_deb_assoc("ASSOC_RESP: authentication "
- "refused by AP\n");
- break;
- default:
- lbs_deb_assoc("ASSOC_RESP: failure reason 0x%02x "
- " unknown\n", status_code);
- break;
- }
-
- if (status_code) {
- lbs_mac_event_disconnected(priv);
- ret = -1;
- goto done;
- }
-
- lbs_deb_hex(LBS_DEB_ASSOC, "ASSOC_RESP", (void *)&resp->params,
- le16_to_cpu(resp->size) - S_DS_GEN);
-
- /* Send a Media Connected event, according to the Spec */
- priv->connect_status = LBS_CONNECTED;
-
- /* Update current SSID and BSSID */
- memcpy(&priv->curbssparams.ssid, &bss->ssid, IW_ESSID_MAX_SIZE);
- priv->curbssparams.ssid_len = bss->ssid_len;
- memcpy(priv->curbssparams.bssid, bss->bssid, ETH_ALEN);
-
- priv->SNR[TYPE_RXPD][TYPE_AVG] = 0;
- priv->NF[TYPE_RXPD][TYPE_AVG] = 0;
-
- memset(priv->rawSNR, 0x00, sizeof(priv->rawSNR));
- memset(priv->rawNF, 0x00, sizeof(priv->rawNF));
- priv->nextSNRNF = 0;
- priv->numSNRNF = 0;
-
- netif_carrier_on(priv->dev);
- if (!priv->tx_pending_len)
- netif_wake_queue(priv->dev);
-
- memcpy(wrqu.ap_addr.sa_data, priv->curbssparams.bssid, ETH_ALEN);
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
-
-done:
- lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
- return ret;
-}
-
-static int lbs_adhoc_post(struct lbs_private *priv, struct cmd_header *resp)
-{
- int ret = 0;
- u16 command = le16_to_cpu(resp->command);
- u16 result = le16_to_cpu(resp->result);
- struct cmd_ds_802_11_ad_hoc_result *adhoc_resp;
- union iwreq_data wrqu;
- struct bss_descriptor *bss;
- DECLARE_SSID_BUF(ssid);
-
- lbs_deb_enter(LBS_DEB_JOIN);
-
- adhoc_resp = (struct cmd_ds_802_11_ad_hoc_result *) resp;
-
- if (!priv->in_progress_assoc_req) {
- lbs_deb_join("ADHOC_RESP: no in-progress association "
- "request\n");
- ret = -1;
- goto done;
- }
- bss = &priv->in_progress_assoc_req->bss;
-
- /*
- * Join result code 0 --> SUCCESS
- */
- if (result) {
- lbs_deb_join("ADHOC_RESP: failed (result 0x%X)\n", result);
- if (priv->connect_status == LBS_CONNECTED)
- lbs_mac_event_disconnected(priv);
- ret = -1;
- goto done;
- }
-
- /* Send a Media Connected event, according to the Spec */
- priv->connect_status = LBS_CONNECTED;
-
- if (command == CMD_RET(CMD_802_11_AD_HOC_START)) {
- /* Update the created network descriptor with the new BSSID */
- memcpy(bss->bssid, adhoc_resp->bssid, ETH_ALEN);
- }
-
- /* Set the BSSID from the joined/started descriptor */
- memcpy(&priv->curbssparams.bssid, bss->bssid, ETH_ALEN);
-
- /* Set the new SSID to current SSID */
- memcpy(&priv->curbssparams.ssid, &bss->ssid, IW_ESSID_MAX_SIZE);
- priv->curbssparams.ssid_len = bss->ssid_len;
-
- netif_carrier_on(priv->dev);
- if (!priv->tx_pending_len)
- netif_wake_queue(priv->dev);
-
- memset(&wrqu, 0, sizeof(wrqu));
- memcpy(wrqu.ap_addr.sa_data, priv->curbssparams.bssid, ETH_ALEN);
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
-
- lbs_deb_join("ADHOC_RESP: Joined/started '%s', BSSID %pM, channel %d\n",
- print_ssid(ssid, bss->ssid, bss->ssid_len),
- priv->curbssparams.bssid,
- priv->curbssparams.channel);
-
-done:
- lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
- return ret;
-}
-
diff --git a/drivers/net/wireless/libertas/assoc.h b/drivers/net/wireless/libertas/assoc.h
index 8b7336dd02a..6e765e9f91a 100644
--- a/drivers/net/wireless/libertas/assoc.h
+++ b/drivers/net/wireless/libertas/assoc.h
@@ -8,22 +8,9 @@
void lbs_association_worker(struct work_struct *work);
struct assoc_request *lbs_get_association_request(struct lbs_private *priv);
-struct cmd_ds_command;
-int lbs_cmd_80211_authenticate(struct lbs_private *priv,
- struct cmd_ds_command *cmd,
- void *pdata_buf);
-
int lbs_adhoc_stop(struct lbs_private *priv);
int lbs_cmd_80211_deauthenticate(struct lbs_private *priv,
u8 bssid[ETH_ALEN], u16 reason);
-int lbs_cmd_80211_associate(struct lbs_private *priv,
- struct cmd_ds_command *cmd,
- void *pdata_buf);
-
-int lbs_ret_80211_ad_hoc_start(struct lbs_private *priv,
- struct cmd_ds_command *resp);
-int lbs_ret_80211_associate(struct lbs_private *priv,
- struct cmd_ds_command *resp);
#endif /* _LBS_ASSOC_H */
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 8c3605cdc64..01db705a38e 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -119,6 +119,19 @@ int lbs_update_hw_spec(struct lbs_private *priv)
lbs_deb_cmd("GET_HW_SPEC: hardware interface 0x%x, hardware spec 0x%04x\n",
cmd.hwifversion, cmd.version);
+ /* Determine mesh_fw_ver from fwrelease and fwcapinfo */
+ /* 5.0.16p0 9.0.0.p0 is known to NOT support any mesh */
+ /* 5.110.22 have mesh command with 0xa3 command id */
+ /* 10.0.0.p0 FW brings in mesh config command with different id */
+ /* Check FW version MSB and initialize mesh_fw_ver */
+ if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5)
+ priv->mesh_fw_ver = MESH_FW_OLD;
+ else if ((MRVL_FW_MAJOR_REV(priv->fwrelease) >= MRVL_FW_V10) &&
+ (priv->fwcapinfo & MESH_CAPINFO_ENABLE_MASK))
+ priv->mesh_fw_ver = MESH_FW_NEW;
+ else
+ priv->mesh_fw_ver = MESH_NONE;
+
/* Clamp region code to 8-bit since FW spec indicates that it should
* only ever be 8-bit, even though the field size is 16-bit. Some firmware
* returns non-zero high 8 bits here.
@@ -1036,17 +1049,26 @@ static int __lbs_mesh_config_send(struct lbs_private *priv,
uint16_t action, uint16_t type)
{
int ret;
+ u16 command = CMD_MESH_CONFIG_OLD;
lbs_deb_enter(LBS_DEB_CMD);
- cmd->hdr.command = cpu_to_le16(CMD_MESH_CONFIG);
+ /*
+ * Command id is 0xac for v10 FW along with mesh interface
+ * id in bits 14-13-12.
+ */
+ if (priv->mesh_fw_ver == MESH_FW_NEW)
+ command = CMD_MESH_CONFIG |
+ (MESH_IFACE_ID << MESH_IFACE_BIT_OFFSET);
+
+ cmd->hdr.command = cpu_to_le16(command);
cmd->hdr.size = cpu_to_le16(sizeof(struct cmd_ds_mesh_config));
cmd->hdr.result = 0;
cmd->type = cpu_to_le16(type);
cmd->action = cpu_to_le16(action);
- ret = lbs_cmd_with_response(priv, CMD_MESH_CONFIG, cmd);
+ ret = lbs_cmd_with_response(priv, command, cmd);
lbs_deb_leave(LBS_DEB_CMD);
return ret;
@@ -1198,8 +1220,7 @@ static void lbs_submit_command(struct lbs_private *priv,
command = le16_to_cpu(cmd->command);
/* These commands take longer */
- if (command == CMD_802_11_SCAN || command == CMD_802_11_ASSOCIATE ||
- command == CMD_802_11_AUTHENTICATE)
+ if (command == CMD_802_11_SCAN || command == CMD_802_11_ASSOCIATE)
timeo = 5 * HZ;
lbs_deb_cmd("DNLD_CMD: command 0x%04x, seq %d, size %d\n",
@@ -1393,15 +1414,6 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
ret = lbs_cmd_802_11_ps_mode(cmdptr, cmd_action);
break;
- case CMD_802_11_ASSOCIATE:
- case CMD_802_11_REASSOCIATE:
- ret = lbs_cmd_80211_associate(priv, cmdptr, pdata_buf);
- break;
-
- case CMD_802_11_AUTHENTICATE:
- ret = lbs_cmd_80211_authenticate(priv, cmdptr, pdata_buf);
- break;
-
case CMD_MAC_REG_ACCESS:
case CMD_BBP_REG_ACCESS:
case CMD_RF_REG_ACCESS:
@@ -1448,8 +1460,8 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
break;
case CMD_802_11_LED_GPIO_CTRL:
{
- struct mrvlietypes_ledgpio *gpio =
- (struct mrvlietypes_ledgpio*)
+ struct mrvl_ie_ledgpio *gpio =
+ (struct mrvl_ie_ledgpio*)
cmdptr->params.ledgpio.data;
memmove(&cmdptr->params.ledgpio,
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index bcf2a9756fb..c42d3faa266 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -5,7 +5,7 @@
#include <linux/delay.h>
#include <linux/if_arp.h>
#include <linux/netdevice.h>
-
+#include <asm/unaligned.h>
#include <net/iw_handler.h>
#include "host.h"
@@ -154,11 +154,11 @@ static int lbs_ret_802_11_rssi(struct lbs_private *priv,
lbs_deb_enter(LBS_DEB_CMD);
/* store the non average value */
- priv->SNR[TYPE_BEACON][TYPE_NOAVG] = le16_to_cpu(rssirsp->SNR);
- priv->NF[TYPE_BEACON][TYPE_NOAVG] = le16_to_cpu(rssirsp->noisefloor);
+ priv->SNR[TYPE_BEACON][TYPE_NOAVG] = get_unaligned_le16(&rssirsp->SNR);
+ priv->NF[TYPE_BEACON][TYPE_NOAVG] = get_unaligned_le16(&rssirsp->noisefloor);
- priv->SNR[TYPE_BEACON][TYPE_AVG] = le16_to_cpu(rssirsp->avgSNR);
- priv->NF[TYPE_BEACON][TYPE_AVG] = le16_to_cpu(rssirsp->avgnoisefloor);
+ priv->SNR[TYPE_BEACON][TYPE_AVG] = get_unaligned_le16(&rssirsp->avgSNR);
+ priv->NF[TYPE_BEACON][TYPE_AVG] = get_unaligned_le16(&rssirsp->avgnoisefloor);
priv->RSSI[TYPE_BEACON][TYPE_NOAVG] =
CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_NOAVG],
@@ -210,12 +210,6 @@ static inline int handle_cmd_response(struct lbs_private *priv,
ret = lbs_ret_reg_access(priv, respcmd, resp);
break;
- case CMD_RET_802_11_ASSOCIATE:
- case CMD_RET(CMD_802_11_ASSOCIATE):
- case CMD_RET(CMD_802_11_REASSOCIATE):
- ret = lbs_ret_80211_associate(priv, resp);
- break;
-
case CMD_RET(CMD_802_11_SET_AFC):
case CMD_RET(CMD_802_11_GET_AFC):
spin_lock_irqsave(&priv->driver_lock, flags);
@@ -225,7 +219,6 @@ static inline int handle_cmd_response(struct lbs_private *priv,
break;
- case CMD_RET(CMD_802_11_AUTHENTICATE):
case CMD_RET(CMD_802_11_BEACON_STOP):
break;
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index 50e28a0cdfe..811ffc3ef41 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -183,12 +183,12 @@ out_unlock:
*/
static void *lbs_tlv_find(uint16_t tlv_type, const uint8_t *tlv, uint16_t size)
{
- struct mrvlietypesheader *tlv_h;
+ struct mrvl_ie_header *tlv_h;
uint16_t length;
ssize_t pos = 0;
while (pos < size) {
- tlv_h = (struct mrvlietypesheader *) tlv;
+ tlv_h = (struct mrvl_ie_header *) tlv;
if (!tlv_h->len)
return NULL;
if (tlv_h->type == cpu_to_le16(tlv_type))
@@ -206,7 +206,7 @@ static ssize_t lbs_threshold_read(uint16_t tlv_type, uint16_t event_mask,
size_t count, loff_t *ppos)
{
struct cmd_ds_802_11_subscribe_event *subscribed;
- struct mrvlietypes_thresholds *got;
+ struct mrvl_ie_thresholds *got;
struct lbs_private *priv = file->private_data;
ssize_t ret = 0;
size_t pos = 0;
@@ -259,7 +259,7 @@ static ssize_t lbs_threshold_write(uint16_t tlv_type, uint16_t event_mask,
loff_t *ppos)
{
struct cmd_ds_802_11_subscribe_event *events;
- struct mrvlietypes_thresholds *tlv;
+ struct mrvl_ie_thresholds *tlv;
struct lbs_private *priv = file->private_data;
ssize_t buf_size;
int value, freq, new_mask;
diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h
index e8dfde39abf..48da157d6cd 100644
--- a/drivers/net/wireless/libertas/defs.h
+++ b/drivers/net/wireless/libertas/defs.h
@@ -227,6 +227,20 @@ static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, in
#define TxPD_CONTROL_WDS_FRAME (1<<17)
#define TxPD_MESH_FRAME TxPD_CONTROL_WDS_FRAME
+/** Mesh interface ID */
+#define MESH_IFACE_ID 0x0001
+/** Mesh id should be in bits 14-13-12 */
+#define MESH_IFACE_BIT_OFFSET 0x000c
+/** Mesh enable bit in FW capability */
+#define MESH_CAPINFO_ENABLE_MASK (1<<16)
+
+/** FW definition from Marvell v5 */
+#define MRVL_FW_V5 (0x05)
+/** FW definition from Marvell v10 */
+#define MRVL_FW_V10 (0x0a)
+/** FW major revision definition */
+#define MRVL_FW_MAJOR_REV(x) ((x)>>24)
+
/** RxPD status */
#define MRVDRV_RXPD_STATUS_OK 0x0001
@@ -380,6 +394,13 @@ enum KEY_INFO_WPA {
KEY_INFO_WPA_ENABLED = 0x04
};
+/** mesh_fw_ver */
+enum _mesh_fw_ver {
+ MESH_NONE = 0, /* MESH is not supported */
+ MESH_FW_OLD, /* MESH is supported in FW V5 */
+ MESH_FW_NEW, /* MESH is supported in FW V10 and newer */
+};
+
/* Default values for fwt commands. */
#define FWT_DEFAULT_METRIC 0
#define FWT_DEFAULT_DIR 1
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index 27e81fd97c9..f9ec69e0473 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -101,6 +101,7 @@ struct lbs_mesh_stats {
/** Private structure for the MV device */
struct lbs_private {
int mesh_open;
+ int mesh_fw_ver;
int infra_open;
int mesh_autostart_enabled;
@@ -337,7 +338,7 @@ struct bss_descriptor {
u32 rssi;
u32 channel;
u16 beaconperiod;
- u32 atimwindow;
+ __le16 atimwindow;
/* IW_MODE_AUTO, IW_MODE_ADHOC, IW_MODE_INFRA */
u8 mode;
@@ -347,10 +348,10 @@ struct bss_descriptor {
unsigned long last_scanned;
- union ieeetypes_phyparamset phyparamset;
- union IEEEtypes_ssparamset ssparamset;
+ union ieee_phy_param_set phy;
+ union ieee_ss_param_set ss;
- struct ieeetypes_countryinfofullset countryinfo;
+ struct ieee_ie_country_info_full_set countryinfo;
u8 wpa_ie[MAX_WPA_IE_LEN];
size_t wpa_ie_len;
diff --git a/drivers/net/wireless/libertas/host.h b/drivers/net/wireless/libertas/host.h
index d4457ef808a..fe8f0cb737b 100644
--- a/drivers/net/wireless/libertas/host.h
+++ b/drivers/net/wireless/libertas/host.h
@@ -83,8 +83,11 @@
#define CMD_FWT_ACCESS 0x0095
#define CMD_802_11_MONITOR_MODE 0x0098
#define CMD_MESH_ACCESS 0x009b
-#define CMD_MESH_CONFIG 0x00a3
+#define CMD_MESH_CONFIG_OLD 0x00a3
+#define CMD_MESH_CONFIG 0x00ac
#define CMD_SET_BOOT2_VER 0x00a5
+#define CMD_FUNC_INIT 0x00a9
+#define CMD_FUNC_SHUTDOWN 0x00aa
#define CMD_802_11_BEACON_CTRL 0x00b0
/* For the IEEE Power Save */
diff --git a/drivers/net/wireless/libertas/hostcmd.h b/drivers/net/wireless/libertas/hostcmd.h
index a899aeb676b..0a2e29140ad 100644
--- a/drivers/net/wireless/libertas/hostcmd.h
+++ b/drivers/net/wireless/libertas/hostcmd.h
@@ -13,8 +13,19 @@
/* TxPD descriptor */
struct txpd {
- /* Current Tx packet status */
- __le32 tx_status;
+ /* union to cope up with later FW revisions */
+ union {
+ /* Current Tx packet status */
+ __le32 tx_status;
+ struct {
+ /* BSS type: client, AP, etc. */
+ u8 bss_type;
+ /* BSS number */
+ u8 bss_num;
+ /* Reserved */
+ __le16 reserved;
+ } bss;
+ } u;
/* Tx control */
__le32 tx_control;
__le32 tx_packet_location;
@@ -36,8 +47,17 @@ struct txpd {
/* RxPD Descriptor */
struct rxpd {
- /* Current Rx packet status */
- __le16 status;
+ /* union to cope up with later FW revisions */
+ union {
+ /* Current Rx packet status */
+ __le16 status;
+ struct {
+ /* BSS type: client, AP, etc. */
+ u8 bss_type;
+ /* BSS number */
+ u8 bss_num;
+ } bss;
+ } u;
/* SNR */
u8 snr;
@@ -230,7 +250,9 @@ struct cmd_ds_gspi_bus_config {
} __attribute__ ((packed));
struct cmd_ds_802_11_authenticate {
- u8 macaddr[ETH_ALEN];
+ struct cmd_header hdr;
+
+ u8 bssid[ETH_ALEN];
u8 authtype;
u8 reserved[10];
} __attribute__ ((packed));
@@ -243,22 +265,23 @@ struct cmd_ds_802_11_deauthenticate {
} __attribute__ ((packed));
struct cmd_ds_802_11_associate {
- u8 peerstaaddr[6];
+ struct cmd_header hdr;
+
+ u8 bssid[6];
__le16 capability;
__le16 listeninterval;
__le16 bcnperiod;
u8 dtimperiod;
-
-#if 0
- mrvlietypes_ssidparamset_t ssidParamSet;
- mrvlietypes_phyparamset_t phyparamset;
- mrvlietypes_ssparamset_t ssparamset;
- mrvlietypes_ratesparamset_t ratesParamSet;
-#endif
+ u8 iebuf[512]; /* Enough for required and most optional IEs */
} __attribute__ ((packed));
-struct cmd_ds_802_11_associate_rsp {
- struct ieeetypes_assocrsp assocRsp;
+struct cmd_ds_802_11_associate_response {
+ struct cmd_header hdr;
+
+ __le16 capability;
+ __le16 statuscode;
+ __le16 aid;
+ u8 iebuf[512];
} __attribute__ ((packed));
struct cmd_ds_802_11_set_wep {
@@ -515,9 +538,11 @@ struct cmd_ds_802_11_ad_hoc_start {
u8 bsstype;
__le16 beaconperiod;
u8 dtimperiod; /* Reserved on v9 and later */
- union IEEEtypes_ssparamset ssparamset;
- union ieeetypes_phyparamset phyparamset;
- __le16 probedelay;
+ struct ieee_ie_ibss_param_set ibss;
+ u8 reserved1[4];
+ struct ieee_ie_ds_param_set ds;
+ u8 reserved2[4];
+ __le16 probedelay; /* Reserved on v9 and later */
__le16 capability;
u8 rates[MAX_RATES];
u8 tlv_memory_size_pad[100];
@@ -538,8 +563,10 @@ struct adhoc_bssdesc {
u8 dtimperiod;
__le64 timestamp;
__le64 localtime;
- union ieeetypes_phyparamset phyparamset;
- union IEEEtypes_ssparamset ssparamset;
+ struct ieee_ie_ds_param_set ds;
+ u8 reserved1[4];
+ struct ieee_ie_ibss_param_set ibss;
+ u8 reserved2[4];
__le16 capability;
u8 rates[MAX_RATES];
@@ -745,8 +772,6 @@ struct cmd_ds_command {
/* command Body */
union {
struct cmd_ds_802_11_ps_mode psmode;
- struct cmd_ds_802_11_associate associate;
- struct cmd_ds_802_11_authenticate auth;
struct cmd_ds_802_11_get_stat gstat;
struct cmd_ds_802_3_get_stat gstat_8023;
struct cmd_ds_802_11_rf_antenna rant;
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index cedeac6322f..2a5b083bf9b 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -273,7 +273,28 @@ static int if_cs_poll_while_fw_download(struct if_cs_card *card, uint addr, u8 r
*/
#define IF_CS_PRODUCT_ID 0x0000001C
#define IF_CS_CF8385_B1_REV 0x12
+#define IF_CS_CF8381_B3_REV 0x04
+/*
+ * Used to detect other cards than CF8385 since their revisions of silicon
+ * doesn't match those from CF8385, eg. CF8381 B3 works with this driver.
+ */
+#define CF8381_MANFID 0x02db
+#define CF8381_CARDID 0x6064
+#define CF8385_MANFID 0x02df
+#define CF8385_CARDID 0x8103
+
+static inline int if_cs_hw_is_cf8381(struct pcmcia_device *p_dev)
+{
+ return (p_dev->manf_id == CF8381_MANFID &&
+ p_dev->card_id == CF8381_CARDID);
+}
+
+static inline int if_cs_hw_is_cf8385(struct pcmcia_device *p_dev)
+{
+ return (p_dev->manf_id == CF8385_MANFID &&
+ p_dev->card_id == CF8385_CARDID);
+}
/********************************************************************/
/* I/O and interrupt handling */
@@ -757,6 +778,7 @@ static void if_cs_release(struct pcmcia_device *p_dev)
static int if_cs_probe(struct pcmcia_device *p_dev)
{
int ret = -ENOMEM;
+ unsigned int prod_id;
struct lbs_private *priv;
struct if_cs_card *card;
/* CIS parsing */
@@ -859,7 +881,14 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
p_dev->io.BasePort1 + p_dev->io.NumPorts1 - 1);
/* Check if we have a current silicon */
- if (if_cs_read8(card, IF_CS_PRODUCT_ID) < IF_CS_CF8385_B1_REV) {
+ prod_id = if_cs_read8(card, IF_CS_PRODUCT_ID);
+ if (if_cs_hw_is_cf8381(p_dev) && prod_id < IF_CS_CF8381_B3_REV) {
+ lbs_pr_err("old chips like 8381 rev B3 aren't supported\n");
+ ret = -ENODEV;
+ goto out2;
+ }
+
+ if (if_cs_hw_is_cf8385(p_dev) && prod_id < IF_CS_CF8385_B1_REV) {
lbs_pr_err("old chips like 8385 rev B1 aren't supported\n");
ret = -ENODEV;
goto out2;
@@ -950,7 +979,8 @@ static void if_cs_detach(struct pcmcia_device *p_dev)
/********************************************************************/
static struct pcmcia_device_id if_cs_ids[] = {
- PCMCIA_DEVICE_MANF_CARD(0x02df, 0x8103),
+ PCMCIA_DEVICE_MANF_CARD(CF8381_MANFID, CF8381_CARDID),
+ PCMCIA_DEVICE_MANF_CARD(CF8385_MANFID, CF8385_CARDID),
PCMCIA_DEVICE_NULL,
};
MODULE_DEVICE_TABLE(pcmcia, if_cs_ids);
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 76f4c653d64..8cdb88c6ca2 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -39,8 +39,24 @@
#include "decl.h"
#include "defs.h"
#include "dev.h"
+#include "cmd.h"
#include "if_sdio.h"
+/* The if_sdio_remove() callback function is called when
+ * user removes this module from kernel space or ejects
+ * the card from the slot. The driver handles these 2 cases
+ * differently for SD8688 combo chip.
+ * If the user is removing the module, the FUNC_SHUTDOWN
+ * command for SD8688 is sent to the firmware.
+ * If the card is removed, there is no need to send this command.
+ *
+ * The variable 'user_rmmod' is used to distinguish these two
+ * scenarios. This flag is initialized as FALSE in case the card
+ * is removed, and will be set to TRUE for module removal when
+ * module_exit function is called.
+ */
+static u8 user_rmmod;
+
static char *lbs_helper_name = NULL;
module_param_named(helper_name, lbs_helper_name, charp, 0644);
@@ -48,8 +64,11 @@ static char *lbs_fw_name = NULL;
module_param_named(fw_name, lbs_fw_name, charp, 0644);
static const struct sdio_device_id if_sdio_ids[] = {
- { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_LIBERTAS) },
- { /* end: all zeroes */ },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL,
+ SDIO_DEVICE_ID_MARVELL_LIBERTAS) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL,
+ SDIO_DEVICE_ID_MARVELL_8688WLAN) },
+ { /* end: all zeroes */ },
};
MODULE_DEVICE_TABLE(sdio, if_sdio_ids);
@@ -63,16 +82,22 @@ struct if_sdio_model {
static struct if_sdio_model if_sdio_models[] = {
{
/* 8385 */
- .model = 0x04,
+ .model = IF_SDIO_MODEL_8385,
.helper = "sd8385_helper.bin",
.firmware = "sd8385.bin",
},
{
/* 8686 */
- .model = 0x0B,
+ .model = IF_SDIO_MODEL_8686,
.helper = "sd8686_helper.bin",
.firmware = "sd8686.bin",
},
+ {
+ /* 8688 */
+ .model = IF_SDIO_MODEL_8688,
+ .helper = "sd8688_helper.bin",
+ .firmware = "sd8688.bin",
+ },
};
struct if_sdio_packet {
@@ -87,6 +112,7 @@ struct if_sdio_card {
int model;
unsigned long ioport;
+ unsigned int scratch_reg;
const char *helper;
const char *firmware;
@@ -98,25 +124,29 @@ struct if_sdio_card {
struct workqueue_struct *workqueue;
struct work_struct packet_worker;
+
+ u8 rx_unit;
};
/********************************************************************/
/* I/O */
/********************************************************************/
+/*
+ * For SD8385/SD8686, this function reads firmware status after
+ * the image is downloaded, or reads RX packet length when
+ * interrupt (with IF_SDIO_H_INT_UPLD bit set) is received.
+ * For SD8688, this function reads firmware status only.
+ */
static u16 if_sdio_read_scratch(struct if_sdio_card *card, int *err)
{
- int ret, reg;
+ int ret;
u16 scratch;
- if (card->model == 0x04)
- reg = IF_SDIO_SCRATCH_OLD;
- else
- reg = IF_SDIO_SCRATCH;
-
- scratch = sdio_readb(card->func, reg, &ret);
+ scratch = sdio_readb(card->func, card->scratch_reg, &ret);
if (!ret)
- scratch |= sdio_readb(card->func, reg + 1, &ret) << 8;
+ scratch |= sdio_readb(card->func, card->scratch_reg + 1,
+ &ret) << 8;
if (err)
*err = ret;
@@ -127,6 +157,46 @@ static u16 if_sdio_read_scratch(struct if_sdio_card *card, int *err)
return scratch;
}
+static u8 if_sdio_read_rx_unit(struct if_sdio_card *card)
+{
+ int ret;
+ u8 rx_unit;
+
+ rx_unit = sdio_readb(card->func, IF_SDIO_RX_UNIT, &ret);
+
+ if (ret)
+ rx_unit = 0;
+
+ return rx_unit;
+}
+
+static u16 if_sdio_read_rx_len(struct if_sdio_card *card, int *err)
+{
+ int ret;
+ u16 rx_len;
+
+ switch (card->model) {
+ case IF_SDIO_MODEL_8385:
+ case IF_SDIO_MODEL_8686:
+ rx_len = if_sdio_read_scratch(card, &ret);
+ break;
+ case IF_SDIO_MODEL_8688:
+ default: /* for newer chipsets */
+ rx_len = sdio_readb(card->func, IF_SDIO_RX_LEN, &ret);
+ if (!ret)
+ rx_len <<= card->rx_unit;
+ else
+ rx_len = 0xffff; /* invalid length */
+
+ break;
+ }
+
+ if (err)
+ *err = ret;
+
+ return rx_len;
+}
+
static int if_sdio_handle_cmd(struct if_sdio_card *card,
u8 *buffer, unsigned size)
{
@@ -207,7 +277,7 @@ static int if_sdio_handle_event(struct if_sdio_card *card,
lbs_deb_enter(LBS_DEB_SDIO);
- if (card->model == 0x04) {
+ if (card->model == IF_SDIO_MODEL_8385) {
event = sdio_readb(card->func, IF_SDIO_EVENT, &ret);
if (ret)
goto out;
@@ -245,7 +315,7 @@ static int if_sdio_card_to_host(struct if_sdio_card *card)
lbs_deb_enter(LBS_DEB_SDIO);
- size = if_sdio_read_scratch(card, &ret);
+ size = if_sdio_read_rx_len(card, &ret);
if (ret)
goto out;
@@ -488,7 +558,6 @@ static int if_sdio_prog_helper(struct if_sdio_card *card)
ret = 0;
release:
- sdio_set_block_size(card->func, 0);
sdio_release_host(card->func);
kfree(chunk_buffer);
release_fw:
@@ -624,7 +693,6 @@ static int if_sdio_prog_real(struct if_sdio_card *card)
ret = 0;
release:
- sdio_set_block_size(card->func, 0);
sdio_release_host(card->func);
kfree(chunk_buffer);
release_fw:
@@ -653,6 +721,8 @@ static int if_sdio_prog_firmware(struct if_sdio_card *card)
if (ret)
goto out;
+ lbs_deb_sdio("firmware status = %#x\n", scratch);
+
if (scratch == IF_SDIO_FIRMWARE_OK) {
lbs_deb_sdio("firmware already loaded\n");
goto success;
@@ -667,6 +737,9 @@ static int if_sdio_prog_firmware(struct if_sdio_card *card)
goto out;
success:
+ sdio_claim_host(card->func);
+ sdio_set_block_size(card->func, IF_SDIO_BLOCK_SIZE);
+ sdio_release_host(card->func);
ret = 0;
out:
@@ -820,10 +893,10 @@ static int if_sdio_probe(struct sdio_func *func,
if (sscanf(func->card->info[i],
"ID: %x", &model) == 1)
break;
- if (!strcmp(func->card->info[i], "IBIS Wireless SDIO Card")) {
- model = 4;
- break;
- }
+ if (!strcmp(func->card->info[i], "IBIS Wireless SDIO Card")) {
+ model = IF_SDIO_MODEL_8385;
+ break;
+ }
}
if (i == func->card->num_info) {
@@ -837,6 +910,20 @@ static int if_sdio_probe(struct sdio_func *func,
card->func = func;
card->model = model;
+
+ switch (card->model) {
+ case IF_SDIO_MODEL_8385:
+ card->scratch_reg = IF_SDIO_SCRATCH_OLD;
+ break;
+ case IF_SDIO_MODEL_8686:
+ card->scratch_reg = IF_SDIO_SCRATCH;
+ break;
+ case IF_SDIO_MODEL_8688:
+ default: /* for newer chipsets */
+ card->scratch_reg = IF_SDIO_FW_STATUS;
+ break;
+ }
+
spin_lock_init(&card->lock);
card->workqueue = create_workqueue("libertas_sdio");
INIT_WORK(&card->packet_worker, if_sdio_host_to_card_worker);
@@ -914,15 +1001,40 @@ static int if_sdio_probe(struct sdio_func *func,
priv->fw_ready = 1;
+ sdio_claim_host(func);
+
+ /*
+ * Get rx_unit if the chip is SD8688 or newer.
+ * SD8385 & SD8686 do not have rx_unit.
+ */
+ if ((card->model != IF_SDIO_MODEL_8385)
+ && (card->model != IF_SDIO_MODEL_8686))
+ card->rx_unit = if_sdio_read_rx_unit(card);
+ else
+ card->rx_unit = 0;
+
/*
* Enable interrupts now that everything is set up
*/
- sdio_claim_host(func);
sdio_writeb(func, 0x0f, IF_SDIO_H_INT_MASK, &ret);
sdio_release_host(func);
if (ret)
goto reclaim;
+ /*
+ * FUNC_INIT is required for SD8688 WLAN/BT multiple functions
+ */
+ if (card->model == IF_SDIO_MODEL_8688) {
+ struct cmd_header cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ lbs_deb_sdio("send function INIT command\n");
+ if (__lbs_cmd(priv, CMD_FUNC_INIT, &cmd, sizeof(cmd),
+ lbs_cmd_copyback, (unsigned long) &cmd))
+ lbs_pr_alert("CMD_FUNC_INIT cmd failed\n");
+ }
+
ret = lbs_start_card(priv);
if (ret)
goto err_activate_card;
@@ -968,6 +1080,22 @@ static void if_sdio_remove(struct sdio_func *func)
card = sdio_get_drvdata(func);
+ if (user_rmmod && (card->model == IF_SDIO_MODEL_8688)) {
+ /*
+ * FUNC_SHUTDOWN is required for SD8688 WLAN/BT
+ * multiple functions
+ */
+ struct cmd_header cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ lbs_deb_sdio("send function SHUTDOWN command\n");
+ if (__lbs_cmd(card->priv, CMD_FUNC_SHUTDOWN,
+ &cmd, sizeof(cmd), lbs_cmd_copyback,
+ (unsigned long) &cmd))
+ lbs_pr_alert("CMD_FUNC_SHUTDOWN cmd failed\n");
+ }
+
card->priv->surpriseremoved = 1;
lbs_deb_sdio("call remove card\n");
@@ -1015,6 +1143,9 @@ static int __init if_sdio_init_module(void)
ret = sdio_register_driver(&if_sdio_driver);
+ /* Clear the flag in case user removes the card. */
+ user_rmmod = 0;
+
lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
return ret;
@@ -1024,6 +1155,9 @@ static void __exit if_sdio_exit_module(void)
{
lbs_deb_enter(LBS_DEB_SDIO);
+ /* Set the flag as user is removing this module. */
+ user_rmmod = 1;
+
sdio_unregister_driver(&if_sdio_driver);
lbs_deb_leave(LBS_DEB_SDIO);
diff --git a/drivers/net/wireless/libertas/if_sdio.h b/drivers/net/wireless/libertas/if_sdio.h
index 533bdfbf5d2..60c9b2fcef0 100644
--- a/drivers/net/wireless/libertas/if_sdio.h
+++ b/drivers/net/wireless/libertas/if_sdio.h
@@ -12,6 +12,10 @@
#ifndef _LBS_IF_SDIO_H
#define _LBS_IF_SDIO_H
+#define IF_SDIO_MODEL_8385 0x04
+#define IF_SDIO_MODEL_8686 0x0b
+#define IF_SDIO_MODEL_8688 0x10
+
#define IF_SDIO_IOPORT 0x00
#define IF_SDIO_H_INT_MASK 0x04
@@ -38,8 +42,14 @@
#define IF_SDIO_SCRATCH 0x34
#define IF_SDIO_SCRATCH_OLD 0x80fe
+#define IF_SDIO_FW_STATUS 0x40
#define IF_SDIO_FIRMWARE_OK 0xfedc
+#define IF_SDIO_RX_LEN 0x42
+#define IF_SDIO_RX_UNIT 0x43
+
#define IF_SDIO_EVENT 0x80fc
+#define IF_SDIO_BLOCK_SIZE 256
+
#endif
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 07311e71af9..06a46d7b3d6 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -19,7 +19,6 @@
#include <linux/moduleparam.h>
#include <linux/firmware.h>
-#include <linux/gpio.h>
#include <linux/jiffies.h>
#include <linux/kthread.h>
#include <linux/list.h>
@@ -44,20 +43,13 @@ struct if_spi_card {
struct lbs_private *priv;
struct libertas_spi_platform_data *pdata;
- char helper_fw_name[FIRMWARE_NAME_MAX];
- char main_fw_name[FIRMWARE_NAME_MAX];
+ char helper_fw_name[IF_SPI_FW_NAME_MAX];
+ char main_fw_name[IF_SPI_FW_NAME_MAX];
/* The card ID and card revision, as reported by the hardware. */
u16 card_id;
u8 card_rev;
- /* Pin number for our GPIO chip-select. */
- /* TODO: Once the generic SPI layer has some additional features, we
- * should take this out and use the normal chip select here.
- * We need support for chip select delays, and not dropping chipselect
- * after each word. */
- int gpio_cs;
-
/* The last time that we initiated an SPU operation */
unsigned long prev_xfer_time;
@@ -119,9 +111,6 @@ static struct chip_ident chip_id_to_device_name[] = {
* First we have to put a SPU register name on the bus. Then we can
* either read from or write to that register.
*
- * For 16-bit transactions, byte order on the bus is big-endian.
- * We don't have to worry about that here, though.
- * The translation takes place in the SPI routines.
*/
static void spu_transaction_init(struct if_spi_card *card)
@@ -133,12 +122,10 @@ static void spu_transaction_init(struct if_spi_card *card)
* If not, we have to busy-wait to be on the safe side. */
ndelay(400);
}
- gpio_set_value(card->gpio_cs, 0); /* assert CS */
}
static void spu_transaction_finish(struct if_spi_card *card)
{
- gpio_set_value(card->gpio_cs, 1); /* drop CS */
card->prev_xfer_time = jiffies;
}
@@ -147,7 +134,14 @@ static void spu_transaction_finish(struct if_spi_card *card)
static int spu_write(struct if_spi_card *card, u16 reg, const u8 *buf, int len)
{
int err = 0;
- u16 reg_out = reg | IF_SPI_WRITE_OPERATION_MASK;
+ u16 reg_out = cpu_to_le16(reg | IF_SPI_WRITE_OPERATION_MASK);
+ struct spi_message m;
+ struct spi_transfer reg_trans;
+ struct spi_transfer data_trans;
+
+ spi_message_init(&m);
+ memset(&reg_trans, 0, sizeof(reg_trans));
+ memset(&data_trans, 0, sizeof(data_trans));
/* You must give an even number of bytes to the SPU, even if it
* doesn't care about the last one. */
@@ -156,29 +150,26 @@ static int spu_write(struct if_spi_card *card, u16 reg, const u8 *buf, int len)
spu_transaction_init(card);
/* write SPU register index */
- err = spi_write(card->spi, (u8 *)&reg_out, sizeof(u16));
- if (err)
- goto out;
+ reg_trans.tx_buf = &reg_out;
+ reg_trans.len = sizeof(reg_out);
- err = spi_write(card->spi, buf, len);
+ data_trans.tx_buf = buf;
+ data_trans.len = len;
-out:
+ spi_message_add_tail(&reg_trans, &m);
+ spi_message_add_tail(&data_trans, &m);
+
+ err = spi_sync(card->spi, &m);
spu_transaction_finish(card);
return err;
}
static inline int spu_write_u16(struct if_spi_card *card, u16 reg, u16 val)
{
- return spu_write(card, reg, (u8 *)&val, sizeof(u16));
-}
+ u16 buff;
-static inline int spu_write_u32(struct if_spi_card *card, u16 reg, u32 val)
-{
- /* The lower 16 bits are written first. */
- u16 out[2];
- out[0] = val & 0xffff;
- out[1] = (val & 0xffff0000) >> 16;
- return spu_write(card, reg, (u8 *)&out, sizeof(u32));
+ buff = cpu_to_le16(val);
+ return spu_write(card, reg, (u8 *)&buff, sizeof(u16));
}
static inline int spu_reg_is_port_reg(u16 reg)
@@ -195,10 +186,13 @@ static inline int spu_reg_is_port_reg(u16 reg)
static int spu_read(struct if_spi_card *card, u16 reg, u8 *buf, int len)
{
- unsigned int i, delay;
+ unsigned int delay;
int err = 0;
- u16 zero = 0;
- u16 reg_out = reg | IF_SPI_READ_OPERATION_MASK;
+ u16 reg_out = cpu_to_le16(reg | IF_SPI_READ_OPERATION_MASK);
+ struct spi_message m;
+ struct spi_transfer reg_trans;
+ struct spi_transfer dummy_trans;
+ struct spi_transfer data_trans;
/* You must take an even number of bytes from the SPU, even if you
* don't care about the last one. */
@@ -206,29 +200,34 @@ static int spu_read(struct if_spi_card *card, u16 reg, u8 *buf, int len)
spu_transaction_init(card);
+ spi_message_init(&m);
+ memset(&reg_trans, 0, sizeof(reg_trans));
+ memset(&dummy_trans, 0, sizeof(dummy_trans));
+ memset(&data_trans, 0, sizeof(data_trans));
+
/* write SPU register index */
- err = spi_write(card->spi, (u8 *)&reg_out, sizeof(u16));
- if (err)
- goto out;
+ reg_trans.tx_buf = &reg_out;
+ reg_trans.len = sizeof(reg_out);
+ spi_message_add_tail(&reg_trans, &m);
delay = spu_reg_is_port_reg(reg) ? card->spu_port_delay :
card->spu_reg_delay;
if (card->use_dummy_writes) {
/* Clock in dummy cycles while the SPU fills the FIFO */
- for (i = 0; i < delay / 16; ++i) {
- err = spi_write(card->spi, (u8 *)&zero, sizeof(u16));
- if (err)
- return err;
- }
+ dummy_trans.len = delay / 8;
+ spi_message_add_tail(&dummy_trans, &m);
} else {
/* Busy-wait while the SPU fills the FIFO */
- ndelay(100 + (delay * 10));
+ reg_trans.delay_usecs =
+ DIV_ROUND_UP((100 + (delay * 10)), 1000);
}
/* read in data */
- err = spi_read(card->spi, buf, len);
+ data_trans.rx_buf = buf;
+ data_trans.len = len;
+ spi_message_add_tail(&data_trans, &m);
-out:
+ err = spi_sync(card->spi, &m);
spu_transaction_finish(card);
return err;
}
@@ -236,18 +235,25 @@ out:
/* Read 16 bits from an SPI register */
static inline int spu_read_u16(struct if_spi_card *card, u16 reg, u16 *val)
{
- return spu_read(card, reg, (u8 *)val, sizeof(u16));
+ u16 buf;
+ int ret;
+
+ ret = spu_read(card, reg, (u8 *)&buf, sizeof(buf));
+ if (ret == 0)
+ *val = le16_to_cpup(&buf);
+ return ret;
}
/* Read 32 bits from an SPI register.
* The low 16 bits are read first. */
static int spu_read_u32(struct if_spi_card *card, u16 reg, u32 *val)
{
- u16 buf[2];
+ u32 buf;
int err;
- err = spu_read(card, reg, (u8 *)buf, sizeof(u32));
+
+ err = spu_read(card, reg, (u8 *)&buf, sizeof(buf));
if (!err)
- *val = buf[0] | (buf[1] << 16);
+ *val = le32_to_cpup(&buf);
return err;
}
@@ -731,7 +737,7 @@ static int if_spi_c2h_data(struct if_spi_card *card)
goto out;
} else if (len > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE) {
lbs_pr_err("%s: error: card has %d bytes of data, but "
- "our maximum skb size is %u\n",
+ "our maximum skb size is %lu\n",
__func__, len, MRVDRV_ETH_RX_PACKET_BUFFER_SIZE);
err = -EINVAL;
goto out;
@@ -814,6 +820,13 @@ static void if_spi_e2h(struct if_spi_card *card)
if (err)
goto out;
+ /* re-enable the card event interrupt */
+ spu_write_u16(card, IF_SPI_HOST_INT_STATUS_REG,
+ ~IF_SPI_HICU_CARD_EVENT);
+
+ /* generate a card interrupt */
+ spu_write_u16(card, IF_SPI_CARD_INT_CAUSE_REG, IF_SPI_CIC_HOST_EVENT);
+
spin_lock_irqsave(&priv->driver_lock, flags);
lbs_queue_event(priv, cause & 0xff);
spin_unlock_irqrestore(&priv->driver_lock, flags);
@@ -1006,9 +1019,9 @@ static int if_spi_calculate_fw_names(u16 card_id,
lbs_pr_err("Unsupported chip_id: 0x%02x\n", card_id);
return -EAFNOSUPPORT;
}
- snprintf(helper_fw, FIRMWARE_NAME_MAX, "libertas/gspi%d_hlp.bin",
+ snprintf(helper_fw, IF_SPI_FW_NAME_MAX, "libertas/gspi%d_hlp.bin",
chip_id_to_device_name[i].name);
- snprintf(main_fw, FIRMWARE_NAME_MAX, "libertas/gspi%d.bin",
+ snprintf(main_fw, IF_SPI_FW_NAME_MAX, "libertas/gspi%d.bin",
chip_id_to_device_name[i].name);
return 0;
}
@@ -1020,6 +1033,7 @@ static int __devinit if_spi_probe(struct spi_device *spi)
struct libertas_spi_platform_data *pdata = spi->dev.platform_data;
int err = 0;
u32 scratch;
+ struct sched_param param = { .sched_priority = 1 };
lbs_deb_enter(LBS_DEB_SPI);
@@ -1043,7 +1057,6 @@ static int __devinit if_spi_probe(struct spi_device *spi)
spi_set_drvdata(spi, card);
card->pdata = pdata;
card->spi = spi;
- card->gpio_cs = pdata->gpio_cs;
card->prev_xfer_time = jiffies;
sema_init(&card->spi_ready, 0);
@@ -1052,26 +1065,18 @@ static int __devinit if_spi_probe(struct spi_device *spi)
INIT_LIST_HEAD(&card->data_packet_list);
spin_lock_init(&card->buffer_lock);
- /* set up GPIO CS line. TODO: use regular CS line */
- err = gpio_request(card->gpio_cs, "if_spi_gpio_chip_select");
- if (err)
- goto free_card;
- err = gpio_direction_output(card->gpio_cs, 1);
- if (err)
- goto free_gpio;
-
/* Initialize the SPI Interface Unit */
err = spu_init(card, pdata->use_dummy_writes);
if (err)
- goto free_gpio;
+ goto free_card;
err = spu_get_chip_revision(card, &card->card_id, &card->card_rev);
if (err)
- goto free_gpio;
+ goto free_card;
/* Firmware load */
err = spu_read_u32(card, IF_SPI_SCRATCH_4_REG, &scratch);
if (err)
- goto free_gpio;
+ goto free_card;
if (scratch == SUCCESSFUL_FW_DOWNLOAD_MAGIC)
lbs_deb_spi("Firmware is already loaded for "
"Marvell WLAN 802.11 adapter\n");
@@ -1079,7 +1084,7 @@ static int __devinit if_spi_probe(struct spi_device *spi)
err = if_spi_calculate_fw_names(card->card_id,
card->helper_fw_name, card->main_fw_name);
if (err)
- goto free_gpio;
+ goto free_card;
lbs_deb_spi("Initializing FW for Marvell WLAN 802.11 adapter "
"(chip_id = 0x%04x, chip_rev = 0x%02x) "
@@ -1090,23 +1095,23 @@ static int __devinit if_spi_probe(struct spi_device *spi)
spi->max_speed_hz);
err = if_spi_prog_helper_firmware(card);
if (err)
- goto free_gpio;
+ goto free_card;
err = if_spi_prog_main_firmware(card);
if (err)
- goto free_gpio;
+ goto free_card;
lbs_deb_spi("loaded FW for Marvell WLAN 802.11 adapter\n");
}
err = spu_set_interrupt_mode(card, 0, 1);
if (err)
- goto free_gpio;
+ goto free_card;
/* Register our card with libertas.
* This will call alloc_etherdev */
priv = lbs_add_card(card, &spi->dev);
if (!priv) {
err = -ENOMEM;
- goto free_gpio;
+ goto free_card;
}
card->priv = priv;
priv->card = card;
@@ -1123,6 +1128,9 @@ static int __devinit if_spi_probe(struct spi_device *spi)
lbs_pr_err("error creating SPI thread: err=%d\n", err);
goto remove_card;
}
+ if (sched_setscheduler(card->spi_thread, SCHED_FIFO, &param))
+ lbs_pr_err("Error setting scheduler, using default.\n");
+
err = request_irq(spi->irq, if_spi_host_interrupt,
IRQF_TRIGGER_FALLING, "libertas_spi", card);
if (err) {
@@ -1148,8 +1156,6 @@ terminate_thread:
if_spi_terminate_spi_thread(card);
remove_card:
lbs_remove_card(priv); /* will call free_netdev */
-free_gpio:
- gpio_free(card->gpio_cs);
free_card:
free_if_spi_card(card);
out:
@@ -1170,7 +1176,6 @@ static int __devexit libertas_spi_remove(struct spi_device *spi)
free_irq(spi->irq, card);
if_spi_terminate_spi_thread(card);
lbs_remove_card(priv); /* will call free_netdev */
- gpio_free(card->gpio_cs);
if (card->pdata->teardown)
card->pdata->teardown(spi);
free_if_spi_card(card);
diff --git a/drivers/net/wireless/libertas/if_spi.h b/drivers/net/wireless/libertas/if_spi.h
index 2103869cc5b..f87eec41084 100644
--- a/drivers/net/wireless/libertas/if_spi.h
+++ b/drivers/net/wireless/libertas/if_spi.h
@@ -22,6 +22,9 @@
#define IF_SPI_CMD_BUF_SIZE 2400
/***************** Firmware *****************/
+
+#define IF_SPI_FW_NAME_MAX 30
+
struct chip_ident {
u16 chip_id;
u16 name;
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index ea3dc038be7..1844c5adf6e 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -61,11 +61,9 @@ static ssize_t if_usb_firmware_set(struct device *dev,
{
struct lbs_private *priv = to_net_dev(dev)->ml_priv;
struct if_usb_card *cardp = priv->card;
- char fwname[FIRMWARE_NAME_MAX];
int ret;
- sscanf(buf, "%29s", fwname); /* FIRMWARE_NAME_MAX - 1 = 29 */
- ret = if_usb_prog_firmware(cardp, fwname, BOOT_CMD_UPDATE_FW);
+ ret = if_usb_prog_firmware(cardp, buf, BOOT_CMD_UPDATE_FW);
if (ret == 0)
return count;
@@ -88,11 +86,9 @@ static ssize_t if_usb_boot2_set(struct device *dev,
{
struct lbs_private *priv = to_net_dev(dev)->ml_priv;
struct if_usb_card *cardp = priv->card;
- char fwname[FIRMWARE_NAME_MAX];
int ret;
- sscanf(buf, "%29s", fwname); /* FIRMWARE_NAME_MAX - 1 = 29 */
- ret = if_usb_prog_firmware(cardp, fwname, BOOT_CMD_UPDATE_BOOT2);
+ ret = if_usb_prog_firmware(cardp, buf, BOOT_CMD_UPDATE_BOOT2);
if (ret == 0)
return count;
@@ -686,8 +682,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
return;
}
- if (!in_interrupt())
- BUG();
+ BUG_ON(!in_interrupt());
spin_lock(&priv->driver_lock);
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 8ae935ac32f..89575e44801 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -1307,8 +1307,10 @@ int lbs_start_card(struct lbs_private *priv)
lbs_update_channel(priv);
- /* 5.0.16p0 is known to NOT support any mesh */
- if (priv->fwrelease > 0x05001000) {
+ /* Check mesh FW version and appropriately send the mesh start
+ * command
+ */
+ if (priv->mesh_fw_ver == MESH_FW_OLD) {
/* Enable mesh, if supported, and work out which TLV it uses.
0x100 + 291 is an unofficial value used in 5.110.20.pXX
0x100 + 37 is the official value used in 5.110.21.pXX
@@ -1322,27 +1324,35 @@ int lbs_start_card(struct lbs_private *priv)
It's just that 5.110.20.pXX will not have done anything
useful */
- priv->mesh_tlv = 0x100 + 291;
+ priv->mesh_tlv = TLV_TYPE_OLD_MESH_ID;
if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
priv->curbssparams.channel)) {
- priv->mesh_tlv = 0x100 + 37;
+ priv->mesh_tlv = TLV_TYPE_MESH_ID;
if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
priv->curbssparams.channel))
priv->mesh_tlv = 0;
}
- if (priv->mesh_tlv) {
- lbs_add_mesh(priv);
-
- if (device_create_file(&dev->dev, &dev_attr_lbs_mesh))
- lbs_pr_err("cannot register lbs_mesh attribute\n");
-
- /* While rtap isn't related to mesh, only mesh-enabled
- * firmware implements the rtap functionality via
- * CMD_802_11_MONITOR_MODE.
- */
- if (device_create_file(&dev->dev, &dev_attr_lbs_rtap))
- lbs_pr_err("cannot register lbs_rtap attribute\n");
- }
+ } else if (priv->mesh_fw_ver == MESH_FW_NEW) {
+ /* 10.0.0.pXX new firmwares should succeed with TLV
+ * 0x100+37; Do not invoke command with old TLV.
+ */
+ priv->mesh_tlv = TLV_TYPE_MESH_ID;
+ if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
+ priv->curbssparams.channel))
+ priv->mesh_tlv = 0;
+ }
+ if (priv->mesh_tlv) {
+ lbs_add_mesh(priv);
+
+ if (device_create_file(&dev->dev, &dev_attr_lbs_mesh))
+ lbs_pr_err("cannot register lbs_mesh attribute\n");
+
+ /* While rtap isn't related to mesh, only mesh-enabled
+ * firmware implements the rtap functionality via
+ * CMD_802_11_MONITOR_MODE.
+ */
+ if (device_create_file(&dev->dev, &dev_attr_lbs_rtap))
+ lbs_pr_err("cannot register lbs_rtap attribute\n");
}
lbs_debugfs_init_one(priv, dev);
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index 8e669775cb5..65f02cc6752 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -25,7 +25,6 @@ struct rfc1042hdr {
} __attribute__ ((packed));
struct rxpackethdr {
- struct rxpd rx_pd;
struct eth803hdr eth803_hdr;
struct rfc1042hdr rfc1042_hdr;
} __attribute__ ((packed));
@@ -158,10 +157,18 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
if (priv->monitormode)
return process_rxed_802_11_packet(priv, skb);
- p_rx_pkt = (struct rxpackethdr *) skb->data;
- p_rx_pd = &p_rx_pkt->rx_pd;
- if (priv->mesh_dev && (p_rx_pd->rx_control & RxPD_MESH_FRAME))
- dev = priv->mesh_dev;
+ p_rx_pd = (struct rxpd *) skb->data;
+ p_rx_pkt = (struct rxpackethdr *) ((u8 *)p_rx_pd +
+ le32_to_cpu(p_rx_pd->pkt_ptr));
+ if (priv->mesh_dev) {
+ if (priv->mesh_fw_ver == MESH_FW_OLD) {
+ if (p_rx_pd->rx_control & RxPD_MESH_FRAME)
+ dev = priv->mesh_dev;
+ } else if (priv->mesh_fw_ver == MESH_FW_NEW) {
+ if (p_rx_pd->u.bss.bss_num == MESH_IFACE_ID)
+ dev = priv->mesh_dev;
+ }
+ }
lbs_deb_hex(LBS_DEB_RX, "RX Data: Before chop rxpd", skb->data,
min_t(unsigned int, skb->len, 100));
@@ -174,20 +181,9 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
goto done;
}
- /*
- * Check rxpd status and update 802.3 stat,
- */
- if (!(p_rx_pd->status & cpu_to_le16(MRVDRV_RXPD_STATUS_OK))) {
- lbs_deb_rx("rx err: frame received with bad status\n");
- lbs_pr_alert("rxpd not ok\n");
- dev->stats.rx_errors++;
- ret = 0;
- dev_kfree_skb(skb);
- goto done;
- }
-
- lbs_deb_rx("rx data: skb->len-sizeof(RxPd) = %d-%zd = %zd\n",
- skb->len, sizeof(struct rxpd), skb->len - sizeof(struct rxpd));
+ lbs_deb_rx("rx data: skb->len - pkt_ptr = %d-%zd = %zd\n",
+ skb->len, (size_t)le32_to_cpu(p_rx_pd->pkt_ptr),
+ skb->len - (size_t)le32_to_cpu(p_rx_pd->pkt_ptr));
lbs_deb_hex(LBS_DEB_RX, "RX Data: Dest", p_rx_pkt->eth803_hdr.dest_addr,
sizeof(p_rx_pkt->eth803_hdr.dest_addr));
@@ -221,14 +217,14 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
/* Chop off the rxpd + the excess memory from the 802.2/llc/snap header
* that was removed
*/
- hdrchop = (u8 *) p_ethhdr - (u8 *) p_rx_pkt;
+ hdrchop = (u8 *)p_ethhdr - (u8 *)p_rx_pd;
} else {
lbs_deb_hex(LBS_DEB_RX, "RX Data: LLC/SNAP",
(u8 *) & p_rx_pkt->rfc1042_hdr,
sizeof(p_rx_pkt->rfc1042_hdr));
/* Chop off the rxpd */
- hdrchop = (u8 *) & p_rx_pkt->eth803_hdr - (u8 *) p_rx_pkt;
+ hdrchop = (u8 *)&p_rx_pkt->eth803_hdr - (u8 *)p_rx_pd;
}
/* Chop off the leading header bytes so the skb points to the start of
@@ -334,14 +330,6 @@ static int process_rxed_802_11_packet(struct lbs_private *priv,
goto done;
}
- /*
- * Check rxpd status and update 802.3 stat,
- */
- if (!(prxpd->status & cpu_to_le16(MRVDRV_RXPD_STATUS_OK))) {
- //lbs_deb_rx("rx err: frame received with bad status\n");
- dev->stats.rx_errors++;
- }
-
lbs_deb_rx("rx data: skb->len-sizeof(RxPd) = %d-%zd = %zd\n",
skb->len, sizeof(struct rxpd), skb->len - sizeof(struct rxpd));
@@ -353,8 +341,6 @@ static int process_rxed_802_11_packet(struct lbs_private *priv,
radiotap_hdr.hdr.it_pad = 0;
radiotap_hdr.hdr.it_len = cpu_to_le16 (sizeof(struct rx_radiotap_hdr));
radiotap_hdr.hdr.it_present = cpu_to_le32 (RX_RADIOTAP_PRESENT);
- if (!(prxpd->status & cpu_to_le16(MRVDRV_RXPD_STATUS_OK)))
- radiotap_hdr.flags |= IEEE80211_RADIOTAP_F_BADFCS;
radiotap_hdr.rate = convert_mv_rate_to_radiotap(prxpd->rx_rate);
/* XXX must check no carryout */
radiotap_hdr.antsignal = prxpd->snr + prxpd->nf;
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
index 8124db36aaf..601b5424967 100644
--- a/drivers/net/wireless/libertas/scan.c
+++ b/drivers/net/wireless/libertas/scan.c
@@ -27,12 +27,12 @@
+ 40) /* 40 for WPAIE */
//! Memory needed to store a max sized channel List TLV for a firmware scan
-#define CHAN_TLV_MAX_SIZE (sizeof(struct mrvlietypesheader) \
+#define CHAN_TLV_MAX_SIZE (sizeof(struct mrvl_ie_header) \
+ (MRVDRV_MAX_CHANNELS_PER_SCAN \
* sizeof(struct chanscanparamset)))
//! Memory needed to store a max number/size SSID TLV for a firmware scan
-#define SSID_TLV_MAX_SIZE (1 * sizeof(struct mrvlietypes_ssidparamset))
+#define SSID_TLV_MAX_SIZE (1 * sizeof(struct mrvl_ie_ssid_param_set))
//! Maximum memory needed for a cmd_ds_802_11_scan with all TLVs at max
#define MAX_SCAN_CFG_ALLOC (sizeof(struct cmd_ds_802_11_scan) \
@@ -211,7 +211,7 @@ static int lbs_scan_create_channel_list(struct lbs_private *priv,
*/
static int lbs_scan_add_ssid_tlv(struct lbs_private *priv, u8 *tlv)
{
- struct mrvlietypes_ssidparamset *ssid_tlv = (void *)tlv;
+ struct mrvl_ie_ssid_param_set *ssid_tlv = (void *)tlv;
ssid_tlv->header.type = cpu_to_le16(TLV_TYPE_SSID);
ssid_tlv->header.len = cpu_to_le16(priv->scan_ssid_len);
@@ -249,7 +249,7 @@ static int lbs_scan_add_chanlist_tlv(uint8_t *tlv,
int chan_count)
{
size_t size = sizeof(struct chanscanparamset) *chan_count;
- struct mrvlietypes_chanlistparamset *chan_tlv = (void *)tlv;
+ struct mrvl_ie_chanlist_param_set *chan_tlv = (void *)tlv;
chan_tlv->header.type = cpu_to_le16(TLV_TYPE_CHANLIST);
memcpy(chan_tlv->chanscanparam, chan_list, size);
@@ -270,7 +270,7 @@ static int lbs_scan_add_chanlist_tlv(uint8_t *tlv,
static int lbs_scan_add_rates_tlv(uint8_t *tlv)
{
int i;
- struct mrvlietypes_ratesparamset *rate_tlv = (void *)tlv;
+ struct mrvl_ie_rates_param_set *rate_tlv = (void *)tlv;
rate_tlv->header.type = cpu_to_le16(TLV_TYPE_RATES);
tlv += sizeof(rate_tlv->header);
@@ -513,12 +513,12 @@ void lbs_scan_worker(struct work_struct *work)
static int lbs_process_bss(struct bss_descriptor *bss,
uint8_t **pbeaconinfo, int *bytesleft)
{
- struct ieeetypes_fhparamset *pFH;
- struct ieeetypes_dsparamset *pDS;
- struct ieeetypes_cfparamset *pCF;
- struct ieeetypes_ibssparamset *pibss;
+ struct ieee_ie_fh_param_set *fh;
+ struct ieee_ie_ds_param_set *ds;
+ struct ieee_ie_cf_param_set *cf;
+ struct ieee_ie_ibss_param_set *ibss;
DECLARE_SSID_BUF(ssid);
- struct ieeetypes_countryinfoset *pcountryinfo;
+ struct ieee_ie_country_info_set *pcountryinfo;
uint8_t *pos, *end, *p;
uint8_t n_ex_rates = 0, got_basic_rates = 0, n_basic_rates = 0;
uint16_t beaconsize = 0;
@@ -616,50 +616,49 @@ static int lbs_process_bss(struct bss_descriptor *bss,
break;
case WLAN_EID_FH_PARAMS:
- pFH = (struct ieeetypes_fhparamset *) pos;
- memmove(&bss->phyparamset.fhparamset, pFH,
- sizeof(struct ieeetypes_fhparamset));
+ fh = (struct ieee_ie_fh_param_set *) pos;
+ memcpy(&bss->phy.fh, fh, sizeof(*fh));
lbs_deb_scan("got FH IE\n");
break;
case WLAN_EID_DS_PARAMS:
- pDS = (struct ieeetypes_dsparamset *) pos;
- bss->channel = pDS->currentchan;
- memcpy(&bss->phyparamset.dsparamset, pDS,
- sizeof(struct ieeetypes_dsparamset));
+ ds = (struct ieee_ie_ds_param_set *) pos;
+ bss->channel = ds->channel;
+ memcpy(&bss->phy.ds, ds, sizeof(*ds));
lbs_deb_scan("got DS IE, channel %d\n", bss->channel);
break;
case WLAN_EID_CF_PARAMS:
- pCF = (struct ieeetypes_cfparamset *) pos;
- memcpy(&bss->ssparamset.cfparamset, pCF,
- sizeof(struct ieeetypes_cfparamset));
+ cf = (struct ieee_ie_cf_param_set *) pos;
+ memcpy(&bss->ss.cf, cf, sizeof(*cf));
lbs_deb_scan("got CF IE\n");
break;
case WLAN_EID_IBSS_PARAMS:
- pibss = (struct ieeetypes_ibssparamset *) pos;
- bss->atimwindow = le16_to_cpu(pibss->atimwindow);
- memmove(&bss->ssparamset.ibssparamset, pibss,
- sizeof(struct ieeetypes_ibssparamset));
+ ibss = (struct ieee_ie_ibss_param_set *) pos;
+ bss->atimwindow = ibss->atimwindow;
+ memcpy(&bss->ss.ibss, ibss, sizeof(*ibss));
lbs_deb_scan("got IBSS IE\n");
break;
case WLAN_EID_COUNTRY:
- pcountryinfo = (struct ieeetypes_countryinfoset *) pos;
+ pcountryinfo = (struct ieee_ie_country_info_set *) pos;
lbs_deb_scan("got COUNTRY IE\n");
- if (pcountryinfo->len < sizeof(pcountryinfo->countrycode)
- || pcountryinfo->len > 254) {
- lbs_deb_scan("process_bss: 11D- Err CountryInfo len %d, min %zd, max 254\n",
- pcountryinfo->len, sizeof(pcountryinfo->countrycode));
+ if (pcountryinfo->header.len < sizeof(pcountryinfo->countrycode)
+ || pcountryinfo->header.len > 254) {
+ lbs_deb_scan("%s: 11D- Err CountryInfo len %d, min %zd, max 254\n",
+ __func__,
+ pcountryinfo->header.len,
+ sizeof(pcountryinfo->countrycode));
ret = -1;
goto done;
}
- memcpy(&bss->countryinfo, pcountryinfo, pcountryinfo->len + 2);
+ memcpy(&bss->countryinfo, pcountryinfo,
+ pcountryinfo->header.len + 2);
lbs_deb_hex(LBS_DEB_SCAN, "process_bss: 11d countryinfo",
(uint8_t *) pcountryinfo,
- (int) (pcountryinfo->len + 2));
+ (int) (pcountryinfo->header.len + 2));
break;
case WLAN_EID_EXT_SUPP_RATES:
@@ -1130,7 +1129,7 @@ static int lbs_ret_80211_scan(struct lbs_private *priv, unsigned long dummy,
goto done;
}
- bytesleft = le16_to_cpu(scanresp->bssdescriptsize);
+ bytesleft = get_unaligned_le16(&scanresp->bssdescriptsize);
lbs_deb_scan("SCAN_RESP: bssdescriptsize %d\n", bytesleft);
scanrespsize = le16_to_cpu(resp->size);
diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c
index f10aa39a6b6..160cfd8311c 100644
--- a/drivers/net/wireless/libertas/tx.c
+++ b/drivers/net/wireless/libertas/tx.c
@@ -132,8 +132,12 @@ int lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
txpd->tx_packet_length = cpu_to_le16(pkt_len);
txpd->tx_packet_location = cpu_to_le32(sizeof(struct txpd));
- if (dev == priv->mesh_dev)
- txpd->tx_control |= cpu_to_le32(TxPD_MESH_FRAME);
+ if (dev == priv->mesh_dev) {
+ if (priv->mesh_fw_ver == MESH_FW_OLD)
+ txpd->tx_control |= cpu_to_le32(TxPD_MESH_FRAME);
+ else if (priv->mesh_fw_ver == MESH_FW_NEW)
+ txpd->u.bss.bss_num = MESH_IFACE_ID;
+ }
lbs_deb_hex(LBS_DEB_TX, "txpd", (u8 *) &txpd, sizeof(struct txpd));
diff --git a/drivers/net/wireless/libertas/types.h b/drivers/net/wireless/libertas/types.h
index fb7a2d1a252..99905df65b2 100644
--- a/drivers/net/wireless/libertas/types.h
+++ b/drivers/net/wireless/libertas/types.h
@@ -8,9 +8,14 @@
#include <asm/byteorder.h>
#include <linux/wireless.h>
-struct ieeetypes_cfparamset {
- u8 elementid;
+struct ieee_ie_header {
+ u8 id;
u8 len;
+} __attribute__ ((packed));
+
+struct ieee_ie_cf_param_set {
+ struct ieee_ie_header header;
+
u8 cfpcnt;
u8 cfpperiod;
__le16 cfpmaxduration;
@@ -18,42 +23,35 @@ struct ieeetypes_cfparamset {
} __attribute__ ((packed));
-struct ieeetypes_ibssparamset {
- u8 elementid;
- u8 len;
+struct ieee_ie_ibss_param_set {
+ struct ieee_ie_header header;
+
__le16 atimwindow;
} __attribute__ ((packed));
-union IEEEtypes_ssparamset {
- struct ieeetypes_cfparamset cfparamset;
- struct ieeetypes_ibssparamset ibssparamset;
+union ieee_ss_param_set {
+ struct ieee_ie_cf_param_set cf;
+ struct ieee_ie_ibss_param_set ibss;
} __attribute__ ((packed));
-struct ieeetypes_fhparamset {
- u8 elementid;
- u8 len;
+struct ieee_ie_fh_param_set {
+ struct ieee_ie_header header;
+
__le16 dwelltime;
u8 hopset;
u8 hoppattern;
u8 hopindex;
} __attribute__ ((packed));
-struct ieeetypes_dsparamset {
- u8 elementid;
- u8 len;
- u8 currentchan;
-} __attribute__ ((packed));
+struct ieee_ie_ds_param_set {
+ struct ieee_ie_header header;
-union ieeetypes_phyparamset {
- struct ieeetypes_fhparamset fhparamset;
- struct ieeetypes_dsparamset dsparamset;
+ u8 channel;
} __attribute__ ((packed));
-struct ieeetypes_assocrsp {
- __le16 capability;
- __le16 statuscode;
- __le16 aid;
- u8 iebuffer[1];
+union ieee_phy_param_set {
+ struct ieee_ie_fh_param_set fh;
+ struct ieee_ie_ds_param_set ds;
} __attribute__ ((packed));
/** TLV type ID definition */
@@ -94,30 +92,33 @@ struct ieeetypes_assocrsp {
#define TLV_TYPE_TSFTIMESTAMP (PROPRIETARY_TLV_BASE_ID + 19)
#define TLV_TYPE_RSSI_HIGH (PROPRIETARY_TLV_BASE_ID + 22)
#define TLV_TYPE_SNR_HIGH (PROPRIETARY_TLV_BASE_ID + 23)
+#define TLV_TYPE_AUTH_TYPE (PROPRIETARY_TLV_BASE_ID + 31)
+#define TLV_TYPE_MESH_ID (PROPRIETARY_TLV_BASE_ID + 37)
+#define TLV_TYPE_OLD_MESH_ID (PROPRIETARY_TLV_BASE_ID + 291)
/** TLV related data structures*/
-struct mrvlietypesheader {
+struct mrvl_ie_header {
__le16 type;
__le16 len;
} __attribute__ ((packed));
-struct mrvlietypes_data {
- struct mrvlietypesheader header;
+struct mrvl_ie_data {
+ struct mrvl_ie_header header;
u8 Data[1];
} __attribute__ ((packed));
-struct mrvlietypes_ratesparamset {
- struct mrvlietypesheader header;
+struct mrvl_ie_rates_param_set {
+ struct mrvl_ie_header header;
u8 rates[1];
} __attribute__ ((packed));
-struct mrvlietypes_ssidparamset {
- struct mrvlietypesheader header;
+struct mrvl_ie_ssid_param_set {
+ struct mrvl_ie_header header;
u8 ssid[1];
} __attribute__ ((packed));
-struct mrvlietypes_wildcardssidparamset {
- struct mrvlietypesheader header;
+struct mrvl_ie_wildcard_ssid_param_set {
+ struct mrvl_ie_header header;
u8 MaxSsidlength;
u8 ssid[1];
} __attribute__ ((packed));
@@ -142,91 +143,72 @@ struct chanscanparamset {
__le16 maxscantime;
} __attribute__ ((packed));
-struct mrvlietypes_chanlistparamset {
- struct mrvlietypesheader header;
+struct mrvl_ie_chanlist_param_set {
+ struct mrvl_ie_header header;
struct chanscanparamset chanscanparam[1];
} __attribute__ ((packed));
-struct cfparamset {
+struct mrvl_ie_cf_param_set {
+ struct mrvl_ie_header header;
u8 cfpcnt;
u8 cfpperiod;
__le16 cfpmaxduration;
__le16 cfpdurationremaining;
} __attribute__ ((packed));
-struct ibssparamset {
- __le16 atimwindow;
-} __attribute__ ((packed));
-
-struct mrvlietypes_ssparamset {
- struct mrvlietypesheader header;
- union {
- struct cfparamset cfparamset[1];
- struct ibssparamset ibssparamset[1];
- } cf_ibss;
+struct mrvl_ie_ds_param_set {
+ struct mrvl_ie_header header;
+ u8 channel;
} __attribute__ ((packed));
-struct fhparamset {
- __le16 dwelltime;
- u8 hopset;
- u8 hoppattern;
- u8 hopindex;
-} __attribute__ ((packed));
-
-struct dsparamset {
- u8 currentchan;
-} __attribute__ ((packed));
-
-struct mrvlietypes_phyparamset {
- struct mrvlietypesheader header;
- union {
- struct fhparamset fhparamset[1];
- struct dsparamset dsparamset[1];
- } fh_ds;
-} __attribute__ ((packed));
-
-struct mrvlietypes_rsnparamset {
- struct mrvlietypesheader header;
+struct mrvl_ie_rsn_param_set {
+ struct mrvl_ie_header header;
u8 rsnie[1];
} __attribute__ ((packed));
-struct mrvlietypes_tsftimestamp {
- struct mrvlietypesheader header;
+struct mrvl_ie_tsf_timestamp {
+ struct mrvl_ie_header header;
__le64 tsftable[1];
} __attribute__ ((packed));
+/* v9 and later firmware only */
+struct mrvl_ie_auth_type {
+ struct mrvl_ie_header header;
+ __le16 auth;
+} __attribute__ ((packed));
+
/** Local Power capability */
-struct mrvlietypes_powercapability {
- struct mrvlietypesheader header;
+struct mrvl_ie_power_capability {
+ struct mrvl_ie_header header;
s8 minpower;
s8 maxpower;
} __attribute__ ((packed));
/* used in CMD_802_11_SUBSCRIBE_EVENT for SNR, RSSI and Failure */
-struct mrvlietypes_thresholds {
- struct mrvlietypesheader header;
+struct mrvl_ie_thresholds {
+ struct mrvl_ie_header header;
u8 value;
u8 freq;
} __attribute__ ((packed));
-struct mrvlietypes_beaconsmissed {
- struct mrvlietypesheader header;
+struct mrvl_ie_beacons_missed {
+ struct mrvl_ie_header header;
u8 beaconmissed;
u8 reserved;
} __attribute__ ((packed));
-struct mrvlietypes_numprobes {
- struct mrvlietypesheader header;
+struct mrvl_ie_num_probes {
+ struct mrvl_ie_header header;
__le16 numprobes;
} __attribute__ ((packed));
-struct mrvlietypes_bcastprobe {
- struct mrvlietypesheader header;
+struct mrvl_ie_bcast_probe {
+ struct mrvl_ie_header header;
__le16 bcastprobe;
} __attribute__ ((packed));
-struct mrvlietypes_numssidprobe {
- struct mrvlietypesheader header;
+struct mrvl_ie_num_ssid_probe {
+ struct mrvl_ie_header header;
__le16 numssidprobe;
} __attribute__ ((packed));
@@ -235,8 +217,8 @@ struct led_pin {
u8 pin;
} __attribute__ ((packed));
-struct mrvlietypes_ledgpio {
- struct mrvlietypesheader header;
+struct mrvl_ie_ledgpio {
+ struct mrvl_ie_header header;
struct led_pin ledpin[1];
} __attribute__ ((packed));
@@ -248,8 +230,8 @@ struct led_bhv {
} __attribute__ ((packed));
-struct mrvlietypes_ledbhv {
- struct mrvlietypesheader header;
+struct mrvl_ie_ledbhv {
+ struct mrvl_ie_header header;
struct led_bhv ledbhv[1];
} __attribute__ ((packed));
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
index 59634c33b1f..392337b37b1 100644
--- a/drivers/net/wireless/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -461,8 +461,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
return;
}
- if (!in_interrupt())
- BUG();
+ BUG_ON(!in_interrupt());
spin_lock(&priv->driver_lock);
memcpy(priv->cmd_resp_buff, recvbuff + MESSAGE_HEADER_LEN,
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index e7289e2e7f1..10a99e26d39 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -366,36 +366,6 @@ static int lbtf_op_config(struct ieee80211_hw *hw, u32 changed)
return 0;
}
-static int lbtf_op_config_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_if_conf *conf)
-{
- struct lbtf_private *priv = hw->priv;
- struct sk_buff *beacon;
-
- switch (priv->vif->type) {
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_MESH_POINT:
- beacon = ieee80211_beacon_get(hw, vif);
- if (beacon) {
- lbtf_beacon_set(priv, beacon);
- kfree_skb(beacon);
- lbtf_beacon_ctrl(priv, 1, hw->conf.beacon_int);
- }
- break;
- default:
- break;
- }
-
- if (conf->bssid) {
- u8 null_bssid[ETH_ALEN] = {0};
- bool activate = compare_ether_addr(conf->bssid, null_bssid);
- lbtf_set_bssid(priv, activate, conf->bssid);
- }
-
- return 0;
-}
-
#define SUPPORTED_FIF_FLAGS (FIF_PROMISC_IN_BSS | FIF_ALLMULTI)
static void lbtf_op_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
@@ -451,6 +421,29 @@ static void lbtf_op_bss_info_changed(struct ieee80211_hw *hw,
u32 changes)
{
struct lbtf_private *priv = hw->priv;
+ struct sk_buff *beacon;
+
+ if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_INT)) {
+ switch (priv->vif->type) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_MESH_POINT:
+ beacon = ieee80211_beacon_get(hw, vif);
+ if (beacon) {
+ lbtf_beacon_set(priv, beacon);
+ kfree_skb(beacon);
+ lbtf_beacon_ctrl(priv, 1,
+ bss_conf->beacon_int);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (changes & BSS_CHANGED_BSSID) {
+ bool activate = !is_zero_ether_addr(bss_conf->bssid);
+ lbtf_set_bssid(priv, activate, bss_conf->bssid);
+ }
if (changes & BSS_CHANGED_ERP_PREAMBLE) {
if (bss_conf->use_short_preamble)
@@ -459,8 +452,6 @@ static void lbtf_op_bss_info_changed(struct ieee80211_hw *hw,
priv->preamble = CMD_TYPE_LONG_PREAMBLE;
lbtf_set_radio_control(priv);
}
-
- return;
}
static const struct ieee80211_ops lbtf_ops = {
@@ -470,7 +461,6 @@ static const struct ieee80211_ops lbtf_ops = {
.add_interface = lbtf_op_add_interface,
.remove_interface = lbtf_op_remove_interface,
.config = lbtf_op_config,
- .config_interface = lbtf_op_config_interface,
.configure_filter = lbtf_op_configure_filter,
.bss_info_changed = lbtf_op_bss_info_changed,
};
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index d4fdc8b7d7d..e789c6e9938 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -280,7 +280,6 @@ struct mac80211_hwsim_data {
struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)];
struct ieee80211_channel *channel;
- int radio_enabled;
unsigned long beacon_int; /* in jiffies unit */
unsigned int rx_filter;
int started;
@@ -291,6 +290,14 @@ struct mac80211_hwsim_data {
bool ps_poll_pending;
struct dentry *debugfs;
struct dentry *debugfs_ps;
+
+ /*
+ * Only radios in the same group can communicate together (the
+ * channel has to match too). Each bit represents a group. A
+ * radio can be in more then one group.
+ */
+ u64 group;
+ struct dentry *debugfs_group;
};
@@ -410,9 +417,9 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
if (data == data2)
continue;
- if (!data2->started || !data2->radio_enabled ||
- !hwsim_ps_rx_ok(data2, skb) ||
- data->channel->center_freq != data2->channel->center_freq)
+ if (!data2->started || !hwsim_ps_rx_ok(data2, skb) ||
+ data->channel->center_freq != data2->channel->center_freq ||
+ !(data->group & data2->group))
continue;
nskb = skb_copy(skb, GFP_ATOMIC);
@@ -432,7 +439,6 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
- struct mac80211_hwsim_data *data = hw->priv;
bool ack;
struct ieee80211_tx_info *txi;
@@ -444,13 +450,6 @@ static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
return NETDEV_TX_OK;
}
- if (!data->radio_enabled) {
- printk(KERN_DEBUG "%s: dropped TX frame since radio "
- "disabled\n", wiphy_name(hw->wiphy));
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-
ack = mac80211_hwsim_tx_frame(hw, skb);
txi = IEEE80211_SKB_CB(skb);
@@ -537,7 +536,7 @@ static void mac80211_hwsim_beacon(unsigned long arg)
struct ieee80211_hw *hw = (struct ieee80211_hw *) arg;
struct mac80211_hwsim_data *data = hw->priv;
- if (!data->started || !data->radio_enabled)
+ if (!data->started)
return;
ieee80211_iterate_active_interfaces_atomic(
@@ -553,18 +552,14 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
struct mac80211_hwsim_data *data = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
- printk(KERN_DEBUG "%s:%s (freq=%d radio_enabled=%d beacon_int=%d)\n",
+ printk(KERN_DEBUG "%s:%s (freq=%d idle=%d ps=%d)\n",
wiphy_name(hw->wiphy), __func__,
- conf->channel->center_freq, conf->radio_enabled,
- conf->beacon_int);
+ conf->channel->center_freq,
+ !!(conf->flags & IEEE80211_CONF_IDLE),
+ !!(conf->flags & IEEE80211_CONF_PS));
data->channel = conf->channel;
- data->radio_enabled = conf->radio_enabled;
- data->beacon_int = 1024 * conf->beacon_int / 1000 * HZ / 1000;
- if (data->beacon_int < 1)
- data->beacon_int = 1;
-
- if (!data->started || !data->radio_enabled)
+ if (!data->started || !data->beacon_int)
del_timer(&data->beacon_timer);
else
mod_timer(&data->beacon_timer, jiffies + data->beacon_int);
@@ -592,35 +587,26 @@ static void mac80211_hwsim_configure_filter(struct ieee80211_hw *hw,
*total_flags = data->rx_filter;
}
-static int mac80211_hwsim_config_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_if_conf *conf)
-{
- struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
-
- hwsim_check_magic(vif);
- if (conf->changed & IEEE80211_IFCC_BSSID) {
- DECLARE_MAC_BUF(mac);
- printk(KERN_DEBUG "%s:%s: BSSID changed: %pM\n",
- wiphy_name(hw->wiphy), __func__,
- conf->bssid);
- memcpy(vp->bssid, conf->bssid, ETH_ALEN);
- }
- return 0;
-}
-
static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
u32 changed)
{
struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
+ struct mac80211_hwsim_data *data = hw->priv;
hwsim_check_magic(vif);
printk(KERN_DEBUG "%s:%s(changed=0x%x)\n",
wiphy_name(hw->wiphy), __func__, changed);
+ if (changed & BSS_CHANGED_BSSID) {
+ printk(KERN_DEBUG "%s:%s: BSSID changed: %pM\n",
+ wiphy_name(hw->wiphy), __func__,
+ info->bssid);
+ memcpy(vp->bssid, info->bssid, ETH_ALEN);
+ }
+
if (changed & BSS_CHANGED_ASSOC) {
printk(KERN_DEBUG " %s: ASSOC: assoc=%d aid=%d\n",
wiphy_name(hw->wiphy), info->assoc, info->aid);
@@ -628,6 +614,14 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
vp->aid = info->aid;
}
+ if (changed & BSS_CHANGED_BEACON_INT) {
+ printk(KERN_DEBUG " %s: BCNINT: %d\n",
+ wiphy_name(hw->wiphy), info->beacon_int);
+ data->beacon_int = 1024 * info->beacon_int / 1000 * HZ / 1000;
+ if (WARN_ON(!data->beacon_int))
+ data->beacon_int = 1;
+ }
+
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
printk(KERN_DEBUG " %s: ERP_CTS_PROT: %d\n",
wiphy_name(hw->wiphy), info->use_cts_prot);
@@ -646,7 +640,7 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_HT) {
printk(KERN_DEBUG " %s: HT: op_mode=0x%x\n",
wiphy_name(hw->wiphy),
- info->ht.operation_mode);
+ info->ht_operation_mode);
}
if (changed & BSS_CHANGED_BASIC_RATES) {
@@ -704,7 +698,6 @@ static const struct ieee80211_ops mac80211_hwsim_ops =
.remove_interface = mac80211_hwsim_remove_interface,
.config = mac80211_hwsim_config,
.configure_filter = mac80211_hwsim_configure_filter,
- .config_interface = mac80211_hwsim_config_interface,
.bss_info_changed = mac80211_hwsim_bss_info_changed,
.sta_notify = mac80211_hwsim_sta_notify,
.set_tim = mac80211_hwsim_set_tim,
@@ -725,6 +718,7 @@ static void mac80211_hwsim_free(void)
spin_unlock_bh(&hwsim_radio_lock);
list_for_each_entry(data, &tmplist, list) {
+ debugfs_remove(data->debugfs_group);
debugfs_remove(data->debugfs_ps);
debugfs_remove(data->debugfs);
ieee80211_unregister_hw(data->hw);
@@ -782,8 +776,7 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
pspoll->aid = cpu_to_le16(0xc000 | vp->aid);
memcpy(pspoll->bssid, vp->bssid, ETH_ALEN);
memcpy(pspoll->ta, mac, ETH_ALEN);
- if (data->radio_enabled &&
- !mac80211_hwsim_tx_frame(data->hw, skb))
+ if (!mac80211_hwsim_tx_frame(data->hw, skb))
printk(KERN_DEBUG "%s: PS-Poll frame not ack'ed\n", __func__);
dev_kfree_skb(skb);
}
@@ -814,8 +807,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
memcpy(hdr->addr1, vp->bssid, ETH_ALEN);
memcpy(hdr->addr2, mac, ETH_ALEN);
memcpy(hdr->addr3, vp->bssid, ETH_ALEN);
- if (data->radio_enabled &&
- !mac80211_hwsim_tx_frame(data->hw, skb))
+ if (!mac80211_hwsim_tx_frame(data->hw, skb))
printk(KERN_DEBUG "%s: nullfunc frame not ack'ed\n", __func__);
dev_kfree_skb(skb);
}
@@ -877,6 +869,24 @@ DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_ps, hwsim_fops_ps_read, hwsim_fops_ps_write,
"%llu\n");
+static int hwsim_fops_group_read(void *dat, u64 *val)
+{
+ struct mac80211_hwsim_data *data = dat;
+ *val = data->group;
+ return 0;
+}
+
+static int hwsim_fops_group_write(void *dat, u64 val)
+{
+ struct mac80211_hwsim_data *data = dat;
+ data->group = val;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_group,
+ hwsim_fops_group_read, hwsim_fops_group_write,
+ "%llx\n");
+
static int __init init_mac80211_hwsim(void)
{
int i, err = 0;
@@ -981,6 +991,8 @@ static int __init init_mac80211_hwsim(void)
hw->wiphy->bands[band] = sband;
}
+ /* By default all radios are belonging to the first group */
+ data->group = 1;
/* Work to be done prior to ieee80211_register_hw() */
switch (regtest) {
@@ -1105,6 +1117,9 @@ static int __init init_mac80211_hwsim(void)
data->debugfs_ps = debugfs_create_file("ps", 0666,
data->debugfs, data,
&hwsim_fops_ps);
+ data->debugfs_group = debugfs_create_file("group", 0666,
+ data->debugfs, data,
+ &hwsim_fops_group);
setup_timer(&data->beacon_timer, mac80211_hwsim_beacon,
(unsigned long) hw);
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index a9a970469c2..a263d5c84c0 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -2369,7 +2369,7 @@ static int mwl8k_cmd_set_aid(struct ieee80211_hw *hw,
if (info->use_cts_prot) {
prot_mode = MWL8K_FRAME_PROT_11G;
} else {
- switch (info->ht.operation_mode &
+ switch (info->ht_operation_mode &
IEEE80211_HT_OP_MODE_PROTECTION) {
case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
prot_mode = MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY;
@@ -3089,19 +3089,6 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
return rc ? -EINVAL : 0;
}
-static int mwl8k_config_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_if_conf *conf)
-{
- struct mwl8k_vif *mv_vif = MWL8K_VIF(vif);
- u32 changed = conf->changed;
-
- if (changed & IEEE80211_IFCC_BSSID)
- memcpy(mv_vif->bssid, conf->bssid, IEEE80211_ADDR_LEN);
-
- return 0;
-}
-
struct mwl8k_bss_info_changed_worker {
struct mwl8k_work_struct header;
struct ieee80211_vif *vif;
@@ -3183,8 +3170,12 @@ static void mwl8k_bss_info_changed(struct ieee80211_hw *hw,
{
struct mwl8k_bss_info_changed_worker *worker;
struct mwl8k_priv *priv = hw->priv;
+ struct mwl8k_vif *mv_vif = MWL8K_VIF(vif);
int rc;
+ if (changed & BSS_CHANGED_BSSID)
+ memcpy(mv_vif->bssid, info->bssid, IEEE80211_ADDR_LEN);
+
if ((changed & BSS_CHANGED_ASSOC) == 0)
return;
@@ -3442,7 +3433,6 @@ static const struct ieee80211_ops mwl8k_ops = {
.add_interface = mwl8k_add_interface,
.remove_interface = mwl8k_remove_interface,
.config = mwl8k_config,
- .config_interface = mwl8k_config_interface,
.bss_info_changed = mwl8k_bss_info_changed,
.configure_filter = mwl8k_configure_filter,
.set_rts_threshold = mwl8k_set_rts_threshold,
diff --git a/drivers/net/wireless/p54/p54.h b/drivers/net/wireless/p54/p54.h
index ecf8b6ed5a4..db3df947d8e 100644
--- a/drivers/net/wireless/p54/p54.h
+++ b/drivers/net/wireless/p54/p54.h
@@ -125,6 +125,7 @@ struct p54_led_dev {
struct led_classdev led_dev;
char name[P54_LED_MAX_NAME_LEN + 1];
+ unsigned int toggled;
unsigned int index;
unsigned int registered;
};
@@ -133,55 +134,74 @@ struct p54_led_dev {
struct p54_common {
struct ieee80211_hw *hw;
- u32 rx_start;
- u32 rx_end;
- struct sk_buff_head tx_queue;
+ struct ieee80211_vif *vif;
void (*tx)(struct ieee80211_hw *dev, struct sk_buff *skb);
int (*open)(struct ieee80211_hw *dev);
void (*stop)(struct ieee80211_hw *dev);
- int mode;
+ struct sk_buff_head tx_queue;
+ struct mutex conf_mutex;
+
+ /* memory management (as seen by the firmware) */
+ u32 rx_start;
+ u32 rx_end;
u16 rx_mtu;
u8 headroom;
u8 tailroom;
- struct mutex conf_mutex;
- u8 mac_addr[ETH_ALEN];
- u8 bssid[ETH_ALEN];
+
+ /* firmware/hardware info */
+ unsigned int tx_hdr_len;
+ unsigned int fw_var;
+ unsigned int fw_interface;
+ u8 version;
+
+ /* (e)DCF / QOS state */
+ bool use_short_slot;
+ struct ieee80211_tx_queue_stats tx_stats[8];
+ struct p54_edcf_queue_param qos_params[8];
+
+ /* Radio data */
+ u16 rxhw;
u8 rx_diversity_mask;
u8 tx_diversity_mask;
+ unsigned int output_power;
+ int noise;
+ /* calibration, output power limit and rssi<->dBm conversation data */
struct pda_iq_autocal_entry *iq_autocal;
unsigned int iq_autocal_len;
- struct p54_cal_database *output_limit;
struct p54_cal_database *curve_data;
+ struct p54_cal_database *output_limit;
struct p54_rssi_linear_approximation rssical_db[IEEE80211_NUM_BANDS];
+
+ /* BBP/MAC state */
+ u8 mac_addr[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
+ u16 wakeup_timer;
unsigned int filter_flags;
- bool use_short_slot;
- u16 rxhw;
- u8 version;
- unsigned int tx_hdr_len;
- unsigned int fw_var;
- unsigned int fw_interface;
- unsigned int output_power;
- u32 tsf_low32;
- u32 tsf_high32;
+ int mode;
+ u32 tsf_low32, tsf_high32;
u32 basic_rate_mask;
- u16 wakeup_timer;
u16 aid;
- struct ieee80211_tx_queue_stats tx_stats[8];
- struct p54_edcf_queue_param qos_params[8];
- struct ieee80211_low_level_stats stats;
- struct delayed_work work;
struct sk_buff *cached_beacon;
- int noise;
- void *eeprom;
- struct completion eeprom_comp;
+
+ /* cryptographic engine information */
u8 privacy_caps;
u8 rx_keycache_size;
+ unsigned long *used_rxkeys;
+
/* LED management */
#ifdef CONFIG_P54_LEDS
- struct p54_led_dev assoc_led;
- struct p54_led_dev tx_led;
+ struct p54_led_dev leds[4];
+ struct delayed_work led_work;
#endif /* CONFIG_P54_LEDS */
u16 softled_state; /* bit field of glowing LEDs */
+
+ /* statistics */
+ struct ieee80211_low_level_stats stats;
+ struct delayed_work work;
+
+ /* eeprom handling */
+ void *eeprom;
+ struct completion eeprom_comp;
};
int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb);
diff --git a/drivers/net/wireless/p54/p54common.c b/drivers/net/wireless/p54/p54common.c
index c8f0232ee5e..b618bd14583 100644
--- a/drivers/net/wireless/p54/p54common.c
+++ b/drivers/net/wireless/p54/p54common.c
@@ -249,7 +249,7 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
dev->queues = P54_QUEUE_AC_NUM;
}
- if (!modparam_nohwcrypt)
+ if (!modparam_nohwcrypt) {
printk(KERN_INFO "%s: cryptographic accelerator "
"WEP:%s, TKIP:%s, CCMP:%s\n",
wiphy_name(dev->wiphy),
@@ -259,6 +259,26 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
(priv->privacy_caps & BR_DESC_PRIV_CAP_AESCCMP) ?
"YES" : "no");
+ if (priv->rx_keycache_size) {
+ /*
+ * NOTE:
+ *
+ * The firmware provides at most 255 (0 - 254) slots
+ * for keys which are then used to offload decryption.
+ * As a result the 255 entry (aka 0xff) can be used
+ * safely by the driver to mark keys that didn't fit
+ * into the full cache. This trick saves us from
+ * keeping a extra list for uploaded keys.
+ */
+
+ priv->used_rxkeys = kzalloc(BITS_TO_LONGS(
+ priv->rx_keycache_size), GFP_KERNEL);
+
+ if (!priv->used_rxkeys)
+ return -ENOMEM;
+ }
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(p54_parse_firmware);
@@ -749,8 +769,6 @@ static int p54_rx_data(struct ieee80211_hw *dev, struct sk_buff *skb)
rx_status.signal = p54_rssi_to_dbm(dev, hdr->rssi);
rx_status.noise = priv->noise;
- /* XX correct? */
- rx_status.qual = (100 * hdr->rssi) / 127;
if (hdr->rate & 0x10)
rx_status.flag |= RX_FLAG_SHORTPRE;
if (dev->conf.channel->band == IEEE80211_BAND_5GHZ)
@@ -804,44 +822,37 @@ void p54_free_skb(struct ieee80211_hw *dev, struct sk_buff *skb)
struct ieee80211_tx_info *info;
struct p54_tx_info *range;
unsigned long flags;
- u32 freed = 0, last_addr = priv->rx_start;
- if (unlikely(!skb || !dev || !skb_queue_len(&priv->tx_queue)))
+ if (unlikely(!skb || !dev || skb_queue_empty(&priv->tx_queue)))
return;
- /*
- * don't try to free an already unlinked skb
+ /* There used to be a check here to see if the SKB was on the
+ * TX queue or not. This can never happen because all SKBs we
+ * see here successfully went through p54_assign_address()
+ * which means the SKB is on the ->tx_queue.
*/
- if (unlikely((!skb->next) || (!skb->prev)))
- return;
spin_lock_irqsave(&priv->tx_queue.lock, flags);
info = IEEE80211_SKB_CB(skb);
range = (void *)info->rate_driver_data;
- if (skb->prev != (struct sk_buff *)&priv->tx_queue) {
+ if (!skb_queue_is_first(&priv->tx_queue, skb)) {
struct ieee80211_tx_info *ni;
struct p54_tx_info *mr;
- ni = IEEE80211_SKB_CB(skb->prev);
+ ni = IEEE80211_SKB_CB(skb_queue_prev(&priv->tx_queue, skb));
mr = (struct p54_tx_info *)ni->rate_driver_data;
- last_addr = mr->end_addr;
}
- if (skb->next != (struct sk_buff *)&priv->tx_queue) {
+ if (!skb_queue_is_last(&priv->tx_queue, skb)) {
struct ieee80211_tx_info *ni;
struct p54_tx_info *mr;
- ni = IEEE80211_SKB_CB(skb->next);
+ ni = IEEE80211_SKB_CB(skb_queue_next(&priv->tx_queue, skb));
mr = (struct p54_tx_info *)ni->rate_driver_data;
- freed = mr->start_addr - last_addr;
- } else
- freed = priv->rx_end - last_addr;
+ }
__skb_unlink(skb, &priv->tx_queue);
spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
dev_kfree_skb_any(skb);
-
- if (freed >= priv->headroom + sizeof(struct p54_hdr) + 48 +
- IEEE80211_MAX_RTS_THRESHOLD + priv->tailroom)
- p54_wake_free_queues(dev);
+ p54_wake_free_queues(dev);
}
EXPORT_SYMBOL_GPL(p54_free_skb);
@@ -853,15 +864,13 @@ static struct sk_buff *p54_find_tx_entry(struct ieee80211_hw *dev,
unsigned long flags;
spin_lock_irqsave(&priv->tx_queue.lock, flags);
- entry = priv->tx_queue.next;
- while (entry != (struct sk_buff *)&priv->tx_queue) {
+ skb_queue_walk(&priv->tx_queue, entry) {
struct p54_hdr *hdr = (struct p54_hdr *) entry->data;
if (hdr->req_id == req_id) {
spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
return entry;
}
- entry = entry->next;
}
spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
return NULL;
@@ -875,37 +884,29 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
struct sk_buff *entry;
u32 addr = le32_to_cpu(hdr->req_id) - priv->headroom;
struct p54_tx_info *range = NULL;
- u32 freed = 0;
- u32 last_addr = priv->rx_start;
unsigned long flags;
int count, idx;
spin_lock_irqsave(&priv->tx_queue.lock, flags);
- entry = (struct sk_buff *) priv->tx_queue.next;
- while (entry != (struct sk_buff *)&priv->tx_queue) {
+ skb_queue_walk(&priv->tx_queue, entry) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(entry);
struct p54_hdr *entry_hdr;
struct p54_tx_data *entry_data;
unsigned int pad = 0, frame_len;
range = (void *)info->rate_driver_data;
- if (range->start_addr != addr) {
- last_addr = range->end_addr;
- entry = entry->next;
+ if (range->start_addr != addr)
continue;
- }
- if (entry->next != (struct sk_buff *)&priv->tx_queue) {
+ if (!skb_queue_is_last(&priv->tx_queue, entry)) {
struct ieee80211_tx_info *ni;
struct p54_tx_info *mr;
- ni = IEEE80211_SKB_CB(entry->next);
+ ni = IEEE80211_SKB_CB(skb_queue_next(&priv->tx_queue,
+ entry));
mr = (struct p54_tx_info *)ni->rate_driver_data;
- freed = mr->start_addr - last_addr;
- } else
- freed = priv->rx_end - last_addr;
+ }
- last_addr = range->end_addr;
__skb_unlink(entry, &priv->tx_queue);
spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
@@ -992,9 +993,7 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
out:
- if (freed >= priv->headroom + sizeof(struct p54_hdr) + 48 +
- IEEE80211_MAX_RTS_THRESHOLD + priv->tailroom)
- p54_wake_free_queues(dev);
+ p54_wake_free_queues(dev);
}
static void p54_rx_eeprom_readback(struct ieee80211_hw *dev,
@@ -1044,6 +1043,7 @@ static void p54_rx_stats(struct ieee80211_hw *dev, struct sk_buff *skb)
static void p54_rx_trap(struct ieee80211_hw *dev, struct sk_buff *skb)
{
+ struct p54_common *priv = dev->priv;
struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
struct p54_trap *trap = (struct p54_trap *) hdr->data;
u16 event = le16_to_cpu(trap->event);
@@ -1057,6 +1057,8 @@ static void p54_rx_trap(struct ieee80211_hw *dev, struct sk_buff *skb)
wiphy_name(dev->wiphy), freq);
break;
case P54_TRAP_NO_BEACON:
+ if (priv->vif)
+ ieee80211_beacon_loss(priv->vif);
break;
case P54_TRAP_SCAN:
break;
@@ -1162,23 +1164,21 @@ static int p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb,
}
}
- entry = priv->tx_queue.next;
- while (left--) {
+ skb_queue_walk(&priv->tx_queue, entry) {
u32 hole_size;
info = IEEE80211_SKB_CB(entry);
range = (void *)info->rate_driver_data;
hole_size = range->start_addr - last_addr;
if (!target_skb && hole_size >= len) {
- target_skb = entry->prev;
+ target_skb = skb_queue_prev(&priv->tx_queue, entry);
hole_size -= len;
target_addr = last_addr;
}
largest_hole = max(largest_hole, hole_size);
last_addr = range->end_addr;
- entry = entry->next;
}
if (!target_skb && priv->rx_end - last_addr >= len) {
- target_skb = priv->tx_queue.prev;
+ target_skb = skb_peek_tail(&priv->tx_queue);
largest_hole = max(largest_hole, priv->rx_end - last_addr - len);
if (!skb_queue_empty(&priv->tx_queue)) {
info = IEEE80211_SKB_CB(target_skb);
@@ -1452,7 +1452,8 @@ static int p54_tx_fill(struct ieee80211_hw *dev, struct sk_buff *skb,
if (info->control.sta)
*aid = info->control.sta->aid;
- else
+
+ if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
*flags |= P54_HDR_FLAG_DATA_OUT_NOCANCEL;
break;
}
@@ -1939,7 +1940,8 @@ static int p54_set_ps(struct ieee80211_hw *dev)
int i;
if (dev->conf.flags & IEEE80211_CONF_PS)
- mode = P54_PSM | P54_PSM_DTIM | P54_PSM_MCBC;
+ mode = P54_PSM | P54_PSM_BEACON_TIMEOUT | P54_PSM_DTIM |
+ P54_PSM_CHECKSUM | P54_PSM_MCBC;
else
mode = P54_PSM_CAM;
@@ -1957,9 +1959,10 @@ static int p54_set_ps(struct ieee80211_hw *dev)
psm->intervals[i].periods = cpu_to_le16(1);
}
- psm->beacon_rssi_skip_max = 60;
+ psm->beacon_rssi_skip_max = 200;
psm->rssi_delta_threshold = 0;
- psm->nr = 0;
+ psm->nr = 10;
+ psm->exclude[0] = 0;
priv->tx(dev, skb);
@@ -2081,20 +2084,21 @@ out:
static void p54_stop(struct ieee80211_hw *dev)
{
struct p54_common *priv = dev->priv;
- struct sk_buff *skb;
mutex_lock(&priv->conf_mutex);
priv->mode = NL80211_IFTYPE_UNSPECIFIED;
priv->softled_state = 0;
p54_set_leds(dev);
+#ifdef CONFIG_P54_LEDS
+ cancel_delayed_work_sync(&priv->led_work);
+#endif /* CONFIG_P54_LEDS */
cancel_delayed_work_sync(&priv->work);
if (priv->cached_beacon)
p54_tx_cancel(dev, priv->cached_beacon);
priv->stop(dev);
- while ((skb = skb_dequeue(&priv->tx_queue)))
- kfree_skb(skb);
+ skb_queue_purge(&priv->tx_queue);
priv->cached_beacon = NULL;
priv->tsf_high32 = priv->tsf_low32 = 0;
mutex_unlock(&priv->conf_mutex);
@@ -2111,6 +2115,8 @@ static int p54_add_interface(struct ieee80211_hw *dev,
return -EOPNOTSUPP;
}
+ priv->vif = conf->vif;
+
switch (conf->type) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
@@ -2135,6 +2141,7 @@ static void p54_remove_interface(struct ieee80211_hw *dev,
struct p54_common *priv = dev->priv;
mutex_lock(&priv->conf_mutex);
+ priv->vif = NULL;
if (priv->cached_beacon)
p54_tx_cancel(dev, priv->cached_beacon);
priv->mode = NL80211_IFTYPE_MONITOR;
@@ -2174,41 +2181,6 @@ out:
return ret;
}
-static int p54_config_interface(struct ieee80211_hw *dev,
- struct ieee80211_vif *vif,
- struct ieee80211_if_conf *conf)
-{
- struct p54_common *priv = dev->priv;
- int ret = 0;
-
- mutex_lock(&priv->conf_mutex);
- if (conf->changed & IEEE80211_IFCC_BSSID) {
- memcpy(priv->bssid, conf->bssid, ETH_ALEN);
- ret = p54_setup_mac(dev);
- if (ret)
- goto out;
- }
-
- if (conf->changed & IEEE80211_IFCC_BEACON) {
- ret = p54_scan(dev, P54_SCAN_EXIT, 0);
- if (ret)
- goto out;
- ret = p54_setup_mac(dev);
- if (ret)
- goto out;
- ret = p54_beacon_update(dev, vif);
- if (ret)
- goto out;
- ret = p54_set_edcf(dev);
- if (ret)
- goto out;
- }
-
-out:
- mutex_unlock(&priv->conf_mutex);
- return ret;
-}
-
static void p54_configure_filter(struct ieee80211_hw *dev,
unsigned int changed_flags,
unsigned int *total_flags,
@@ -2312,8 +2284,32 @@ static void p54_bss_info_changed(struct ieee80211_hw *dev,
u32 changed)
{
struct p54_common *priv = dev->priv;
+ int ret;
+
+ mutex_lock(&priv->conf_mutex);
+ if (changed & BSS_CHANGED_BSSID) {
+ memcpy(priv->bssid, info->bssid, ETH_ALEN);
+ ret = p54_setup_mac(dev);
+ if (ret)
+ goto out;
+ }
+
+ if (changed & BSS_CHANGED_BEACON) {
+ ret = p54_scan(dev, P54_SCAN_EXIT, 0);
+ if (ret)
+ goto out;
+ ret = p54_setup_mac(dev);
+ if (ret)
+ goto out;
+ ret = p54_beacon_update(dev, vif);
+ if (ret)
+ goto out;
+ }
+ /* XXX: this mimics having two callbacks... clean up */
+ out:
+ mutex_unlock(&priv->conf_mutex);
- if (changed & BSS_CHANGED_ERP_SLOT) {
+ if (changed & (BSS_CHANGED_ERP_SLOT | BSS_CHANGED_BEACON)) {
priv->use_short_slot = info->use_short_slot;
p54_set_edcf(dev);
}
@@ -2334,7 +2330,6 @@ static void p54_bss_info_changed(struct ieee80211_hw *dev,
p54_setup_mac(dev);
}
}
-
}
static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
@@ -2344,61 +2339,84 @@ static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
struct p54_common *priv = dev->priv;
struct sk_buff *skb;
struct p54_keycache *rxkey;
+ int slot, ret = 0;
u8 algo = 0;
if (modparam_nohwcrypt)
return -EOPNOTSUPP;
- if (cmd == DISABLE_KEY)
- algo = 0;
- else {
+ mutex_lock(&priv->conf_mutex);
+ if (cmd == SET_KEY) {
switch (key->alg) {
case ALG_TKIP:
if (!(priv->privacy_caps & (BR_DESC_PRIV_CAP_MICHAEL |
- BR_DESC_PRIV_CAP_TKIP)))
- return -EOPNOTSUPP;
+ BR_DESC_PRIV_CAP_TKIP))) {
+ ret = -EOPNOTSUPP;
+ goto out_unlock;
+ }
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
algo = P54_CRYPTO_TKIPMICHAEL;
break;
case ALG_WEP:
- if (!(priv->privacy_caps & BR_DESC_PRIV_CAP_WEP))
- return -EOPNOTSUPP;
+ if (!(priv->privacy_caps & BR_DESC_PRIV_CAP_WEP)) {
+ ret = -EOPNOTSUPP;
+ goto out_unlock;
+ }
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
algo = P54_CRYPTO_WEP;
break;
case ALG_CCMP:
- if (!(priv->privacy_caps & BR_DESC_PRIV_CAP_AESCCMP))
- return -EOPNOTSUPP;
+ if (!(priv->privacy_caps & BR_DESC_PRIV_CAP_AESCCMP)) {
+ ret = -EOPNOTSUPP;
+ goto out_unlock;
+ }
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
algo = P54_CRYPTO_AESCCMP;
break;
default:
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto out_unlock;
}
- }
+ slot = bitmap_find_free_region(priv->used_rxkeys,
+ priv->rx_keycache_size, 0);
- if (key->keyidx > priv->rx_keycache_size) {
- /*
- * The device supports the choosen algorithm, but the firmware
- * does not provide enough key slots to store all of them.
- * So, incoming frames have to be decoded by the mac80211 stack,
- * but we can still offload encryption for outgoing frames.
- */
+ if (slot < 0) {
+ /*
+ * The device supports the choosen algorithm, but the
+ * firmware does not provide enough key slots to store
+ * all of them.
+ * But encryption offload for outgoing frames is always
+ * possible, so we just pretend that the upload was
+ * successful and do the decryption in software.
+ */
- return 0;
+ /* mark the key as invalid. */
+ key->hw_key_idx = 0xff;
+ goto out_unlock;
+ }
+ } else {
+ slot = key->hw_key_idx;
+
+ if (slot == 0xff) {
+ /* This key was not uploaded into the rx key cache. */
+
+ goto out_unlock;
+ }
+
+ bitmap_release_region(priv->used_rxkeys, slot, 0);
+ algo = 0;
}
- mutex_lock(&priv->conf_mutex);
skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*rxkey),
- P54_CONTROL_TYPE_RX_KEYCACHE, GFP_ATOMIC);
+ P54_CONTROL_TYPE_RX_KEYCACHE, GFP_KERNEL);
if (!skb) {
- mutex_unlock(&priv->conf_mutex);
- return -ENOMEM;
+ bitmap_release_region(priv->used_rxkeys, slot, 0);
+ ret = -ENOSPC;
+ goto out_unlock;
}
- /* TODO: some devices have 4 more free slots for rx keys */
rxkey = (struct p54_keycache *)skb_put(skb, sizeof(*rxkey));
- rxkey->entry = key->keyidx;
+ rxkey->entry = slot;
rxkey->key_id = key->keyidx;
rxkey->key_type = algo;
if (sta)
@@ -2416,11 +2434,51 @@ static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
}
priv->tx(dev, skb);
+ key->hw_key_idx = slot;
+
+out_unlock:
mutex_unlock(&priv->conf_mutex);
- return 0;
+ return ret;
}
#ifdef CONFIG_P54_LEDS
+static void p54_update_leds(struct work_struct *work)
+{
+ struct p54_common *priv = container_of(work, struct p54_common,
+ led_work.work);
+ int err, i, tmp, blink_delay = 400;
+ bool rerun = false;
+
+ /* Don't toggle the LED, when the device is down. */
+ if (priv->mode == NL80211_IFTYPE_UNSPECIFIED)
+ return ;
+
+ for (i = 0; i < ARRAY_SIZE(priv->leds); i++)
+ if (priv->leds[i].toggled) {
+ priv->softled_state |= BIT(i);
+
+ tmp = 70 + 200 / (priv->leds[i].toggled);
+ if (tmp < blink_delay)
+ blink_delay = tmp;
+
+ if (priv->leds[i].led_dev.brightness == LED_OFF)
+ rerun = true;
+
+ priv->leds[i].toggled =
+ !!priv->leds[i].led_dev.brightness;
+ } else
+ priv->softled_state &= ~BIT(i);
+
+ err = p54_set_leds(priv->hw);
+ if (err && net_ratelimit())
+ printk(KERN_ERR "%s: failed to update LEDs.\n",
+ wiphy_name(priv->hw->wiphy));
+
+ if (rerun)
+ queue_delayed_work(priv->hw->workqueue, &priv->led_work,
+ msecs_to_jiffies(blink_delay));
+}
+
static void p54_led_brightness_set(struct led_classdev *led_dev,
enum led_brightness brightness)
{
@@ -2428,28 +2486,23 @@ static void p54_led_brightness_set(struct led_classdev *led_dev,
led_dev);
struct ieee80211_hw *dev = led->hw_dev;
struct p54_common *priv = dev->priv;
- int err;
- /* Don't toggle the LED, when the device is down. */
if (priv->mode == NL80211_IFTYPE_UNSPECIFIED)
return ;
- if (brightness != LED_OFF)
- priv->softled_state |= BIT(led->index);
- else
- priv->softled_state &= ~BIT(led->index);
-
- err = p54_set_leds(dev);
- if (err && net_ratelimit())
- printk(KERN_ERR "%s: failed to update %s LED.\n",
- wiphy_name(dev->wiphy), led_dev->name);
+ if (brightness) {
+ led->toggled++;
+ queue_delayed_work(priv->hw->workqueue, &priv->led_work,
+ HZ/10);
+ }
}
static int p54_register_led(struct ieee80211_hw *dev,
- struct p54_led_dev *led,
unsigned int led_index,
char *name, char *trigger)
{
+ struct p54_common *priv = dev->priv;
+ struct p54_led_dev *led = &priv->leds[led_index];
int err;
if (led->registered)
@@ -2482,19 +2535,30 @@ static int p54_init_leds(struct ieee80211_hw *dev)
* TODO:
* Figure out if the EEPROM contains some hints about the number
* of available/programmable LEDs of the device.
- * But for now, we can assume that we have two programmable LEDs.
*/
- err = p54_register_led(dev, &priv->assoc_led, 0, "assoc",
+ INIT_DELAYED_WORK(&priv->led_work, p54_update_leds);
+
+ err = p54_register_led(dev, 0, "assoc",
ieee80211_get_assoc_led_name(dev));
if (err)
return err;
- err = p54_register_led(dev, &priv->tx_led, 1, "tx",
+ err = p54_register_led(dev, 1, "tx",
ieee80211_get_tx_led_name(dev));
if (err)
return err;
+ err = p54_register_led(dev, 2, "rx",
+ ieee80211_get_rx_led_name(dev));
+ if (err)
+ return err;
+
+ err = p54_register_led(dev, 3, "radio",
+ ieee80211_get_radio_led_name(dev));
+ if (err)
+ return err;
+
err = p54_set_leds(dev);
return err;
}
@@ -2502,11 +2566,11 @@ static int p54_init_leds(struct ieee80211_hw *dev)
static void p54_unregister_leds(struct ieee80211_hw *dev)
{
struct p54_common *priv = dev->priv;
+ int i;
- if (priv->tx_led.registered)
- led_classdev_unregister(&priv->tx_led.led_dev);
- if (priv->assoc_led.registered)
- led_classdev_unregister(&priv->assoc_led.led_dev);
+ for (i = 0; i < ARRAY_SIZE(priv->leds); i++)
+ if (priv->leds[i].registered)
+ led_classdev_unregister(&priv->leds[i].led_dev);
}
#endif /* CONFIG_P54_LEDS */
@@ -2520,7 +2584,6 @@ static const struct ieee80211_ops p54_ops = {
.sta_notify = p54_sta_notify,
.set_key = p54_set_key,
.config = p54_config,
- .config_interface = p54_config_interface,
.bss_info_changed = p54_bss_info_changed,
.configure_filter = p54_configure_filter,
.conf_tx = p54_conf_tx,
@@ -2607,21 +2670,10 @@ void p54_free_common(struct ieee80211_hw *dev)
kfree(priv->iq_autocal);
kfree(priv->output_limit);
kfree(priv->curve_data);
+ kfree(priv->used_rxkeys);
#ifdef CONFIG_P54_LEDS
p54_unregister_leds(dev);
#endif /* CONFIG_P54_LEDS */
}
EXPORT_SYMBOL_GPL(p54_free_common);
-
-static int __init p54_init(void)
-{
- return 0;
-}
-
-static void __exit p54_exit(void)
-{
-}
-
-module_init(p54_init);
-module_exit(p54_exit);
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index d1fe577de3d..83116baeb11 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -96,7 +96,7 @@ static void p54spi_spi_write(struct p54s_priv *priv, u8 address,
spi_message_add_tail(&t[0], &m);
t[1].tx_buf = buf;
- t[1].len = len;
+ t[1].len = len & ~1;
spi_message_add_tail(&t[1], &m);
if (len % 2) {
@@ -167,15 +167,31 @@ static const struct p54spi_spi_reg p54spi_registers_array[] =
static int p54spi_wait_bit(struct p54s_priv *priv, u16 reg, __le32 bits)
{
int i;
- __le32 buffer;
for (i = 0; i < 2000; i++) {
- p54spi_spi_read(priv, reg, &buffer, sizeof(buffer));
- if (buffer == bits)
+ __le32 buffer = p54spi_read32(priv, reg);
+ if ((buffer & bits) == bits)
return 1;
+ }
+ return 0;
+}
- msleep(1);
+static int p54spi_spi_write_dma(struct p54s_priv *priv, __le32 base,
+ const void *buf, size_t len)
+{
+ if (!p54spi_wait_bit(priv, SPI_ADRS_DMA_WRITE_CTRL,
+ cpu_to_le32(HOST_ALLOWED))) {
+ dev_err(&priv->spi->dev, "spi_write_dma not allowed "
+ "to DMA write.\n");
+ return -EAGAIN;
}
+
+ p54spi_write16(priv, SPI_ADRS_DMA_WRITE_CTRL,
+ cpu_to_le16(SPI_DMA_WRITE_CTRL_ENABLE));
+
+ p54spi_write16(priv, SPI_ADRS_DMA_WRITE_LEN, cpu_to_le16(len));
+ p54spi_write32(priv, SPI_ADRS_DMA_WRITE_BASE, base);
+ p54spi_spi_write(priv, SPI_ADRS_DMA_DATA, buf, len);
return 0;
}
@@ -228,8 +244,15 @@ static int p54spi_request_eeprom(struct ieee80211_hw *dev)
static int p54spi_upload_firmware(struct ieee80211_hw *dev)
{
struct p54s_priv *priv = dev->priv;
- unsigned long fw_len, fw_addr;
- long _fw_len;
+ unsigned long fw_len, _fw_len;
+ unsigned int offset = 0;
+ int err = 0;
+ u8 *fw;
+
+ fw_len = priv->firmware->size;
+ fw = kmemdup(priv->firmware->data, fw_len, GFP_KERNEL);
+ if (!fw)
+ return -ENOMEM;
/* stop the device */
p54spi_write16(priv, SPI_ADRS_DEV_CTRL_STAT, cpu_to_le16(
@@ -244,36 +267,17 @@ static int p54spi_upload_firmware(struct ieee80211_hw *dev)
msleep(TARGET_BOOT_SLEEP);
- fw_addr = ISL38XX_DEV_FIRMWARE_ADDR;
- fw_len = priv->firmware->size;
-
while (fw_len > 0) {
_fw_len = min_t(long, fw_len, SPI_MAX_PACKET_SIZE);
- p54spi_write16(priv, SPI_ADRS_DMA_WRITE_CTRL,
- cpu_to_le16(SPI_DMA_WRITE_CTRL_ENABLE));
-
- if (p54spi_wait_bit(priv, SPI_ADRS_DMA_WRITE_CTRL,
- cpu_to_le32(HOST_ALLOWED)) == 0) {
- dev_err(&priv->spi->dev, "fw_upload not allowed "
- "to DMA write.");
- return -EAGAIN;
- }
-
- p54spi_write16(priv, SPI_ADRS_DMA_WRITE_LEN,
- cpu_to_le16(_fw_len));
- p54spi_write32(priv, SPI_ADRS_DMA_WRITE_BASE,
- cpu_to_le32(fw_addr));
-
- p54spi_spi_write(priv, SPI_ADRS_DMA_DATA,
- &priv->firmware->data, _fw_len);
+ err = p54spi_spi_write_dma(priv, cpu_to_le32(
+ ISL38XX_DEV_FIRMWARE_ADDR + offset),
+ (fw + offset), _fw_len);
+ if (err < 0)
+ goto out;
fw_len -= _fw_len;
- fw_addr += _fw_len;
-
- /* FIXME: I think this doesn't work if firmware is large,
- * this loop goes to second round. fw->data is not
- * increased at all! */
+ offset += _fw_len;
}
BUG_ON(fw_len != 0);
@@ -292,7 +296,10 @@ static int p54spi_upload_firmware(struct ieee80211_hw *dev)
p54spi_write16(priv, SPI_ADRS_DEV_CTRL_STAT, cpu_to_le16(
SPI_CTRL_STAT_HOST_OVERRIDE | SPI_CTRL_STAT_RAM_BOOT));
msleep(TARGET_BOOT_SLEEP);
- return 0;
+
+out:
+ kfree(fw);
+ return err;
}
static void p54spi_power_off(struct p54s_priv *priv)
@@ -318,29 +325,21 @@ static inline void p54spi_int_ack(struct p54s_priv *priv, u32 val)
p54spi_write32(priv, SPI_ADRS_HOST_INT_ACK, cpu_to_le32(val));
}
-static void p54spi_wakeup(struct p54s_priv *priv)
+static int p54spi_wakeup(struct p54s_priv *priv)
{
- unsigned long timeout;
- u32 ints;
-
/* wake the chip */
p54spi_write32(priv, SPI_ADRS_ARM_INTERRUPTS,
cpu_to_le32(SPI_TARGET_INT_WAKEUP));
/* And wait for the READY interrupt */
- timeout = jiffies + HZ;
-
- ints = p54spi_read32(priv, SPI_ADRS_HOST_INTERRUPTS);
- while (!(ints & SPI_HOST_INT_READY)) {
- if (time_after(jiffies, timeout))
- goto out;
- ints = p54spi_read32(priv, SPI_ADRS_HOST_INTERRUPTS);
+ if (!p54spi_wait_bit(priv, SPI_ADRS_HOST_INTERRUPTS,
+ cpu_to_le32(SPI_HOST_INT_READY))) {
+ dev_err(&priv->spi->dev, "INT_READY timeout\n");
+ return -EBUSY;
}
p54spi_int_ack(priv, SPI_HOST_INT_READY);
-
-out:
- return;
+ return 0;
}
static inline void p54spi_sleep(struct p54s_priv *priv)
@@ -372,27 +371,48 @@ static int p54spi_rx(struct p54s_priv *priv)
{
struct sk_buff *skb;
u16 len;
+ u16 rx_head[2];
+#define READAHEAD_SZ (sizeof(rx_head)-sizeof(u16))
- p54spi_wakeup(priv);
-
- /* dummy read to flush SPI DMA controller bug */
- p54spi_read16(priv, SPI_ADRS_GEN_PURP_1);
+ if (p54spi_wakeup(priv) < 0)
+ return -EBUSY;
- len = p54spi_read16(priv, SPI_ADRS_DMA_DATA);
+ /* Read data size and first data word in one SPI transaction
+ * This is workaround for firmware/DMA bug,
+ * when first data word gets lost under high load.
+ */
+ p54spi_spi_read(priv, SPI_ADRS_DMA_DATA, rx_head, sizeof(rx_head));
+ len = rx_head[0];
if (len == 0) {
- dev_err(&priv->spi->dev, "rx request of zero bytes");
+ p54spi_sleep(priv);
+ dev_err(&priv->spi->dev, "rx request of zero bytes\n");
return 0;
}
- skb = dev_alloc_skb(len);
+ /* Firmware may insert up to 4 padding bytes after the lmac header,
+ * but it does not amend the size of SPI data transfer.
+ * Such packets has correct data size in header, thus referencing
+ * past the end of allocated skb. Reserve extra 4 bytes for this case */
+ skb = dev_alloc_skb(len + 4);
if (!skb) {
+ p54spi_sleep(priv);
dev_err(&priv->spi->dev, "could not alloc skb");
- return 0;
+ return -ENOMEM;
}
- p54spi_spi_read(priv, SPI_ADRS_DMA_DATA, skb_put(skb, len), len);
+ if (len <= READAHEAD_SZ) {
+ memcpy(skb_put(skb, len), rx_head + 1, len);
+ } else {
+ memcpy(skb_put(skb, READAHEAD_SZ), rx_head + 1, READAHEAD_SZ);
+ p54spi_spi_read(priv, SPI_ADRS_DMA_DATA,
+ skb_put(skb, len - READAHEAD_SZ),
+ len - READAHEAD_SZ);
+ }
p54spi_sleep(priv);
+ /* Put additional bytes to compensate for the possible
+ * alignment-caused truncation */
+ skb_put(skb, 4);
if (p54_rx(priv->hw, skb) == 0)
dev_kfree_skb(skb);
@@ -414,39 +434,28 @@ static irqreturn_t p54spi_interrupt(int irq, void *config)
static int p54spi_tx_frame(struct p54s_priv *priv, struct sk_buff *skb)
{
struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
- struct p54s_dma_regs dma_regs;
- unsigned long timeout;
int ret = 0;
- u32 ints;
-
- p54spi_wakeup(priv);
- dma_regs.cmd = cpu_to_le16(SPI_DMA_WRITE_CTRL_ENABLE);
- dma_regs.len = cpu_to_le16(skb->len);
- dma_regs.addr = hdr->req_id;
+ if (p54spi_wakeup(priv) < 0)
+ return -EBUSY;
- p54spi_spi_write(priv, SPI_ADRS_DMA_WRITE_CTRL, &dma_regs,
- sizeof(dma_regs));
-
- p54spi_spi_write(priv, SPI_ADRS_DMA_DATA, skb->data, skb->len);
+ ret = p54spi_spi_write_dma(priv, hdr->req_id, skb->data, skb->len);
+ if (ret < 0)
+ goto out;
- timeout = jiffies + 2 * HZ;
- ints = p54spi_read32(priv, SPI_ADRS_HOST_INTERRUPTS);
- while (!(ints & SPI_HOST_INT_WR_READY)) {
- if (time_after(jiffies, timeout)) {
- dev_err(&priv->spi->dev, "WR_READY timeout");
- ret = -1;
- goto out;
- }
- ints = p54spi_read32(priv, SPI_ADRS_HOST_INTERRUPTS);
+ if (!p54spi_wait_bit(priv, SPI_ADRS_HOST_INTERRUPTS,
+ cpu_to_le32(SPI_HOST_INT_WR_READY))) {
+ dev_err(&priv->spi->dev, "WR_READY timeout\n");
+ ret = -EAGAIN;
+ goto out;
}
p54spi_int_ack(priv, SPI_HOST_INT_WR_READY);
- p54spi_sleep(priv);
-out:
if (FREE_AFTER_TX(skb))
p54_free_skb(priv->hw, skb);
+out:
+ p54spi_sleep(priv);
return ret;
}
@@ -516,8 +525,7 @@ static void p54spi_work(struct work_struct *work)
mutex_lock(&priv->mutex);
- if (priv->fw_state == FW_STATE_OFF &&
- priv->fw_state == FW_STATE_RESET)
+ if (priv->fw_state == FW_STATE_OFF)
goto out;
ints = p54spi_read32(priv, SPI_ADRS_HOST_INTERRUPTS);
@@ -544,11 +552,6 @@ static void p54spi_work(struct work_struct *work)
}
ret = p54spi_wq_tx(priv);
- if (ret < 0)
- goto out;
-
- ints = p54spi_read32(priv, SPI_ADRS_HOST_INTERRUPTS);
-
out:
mutex_unlock(&priv->mutex);
}
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 6cc6cbc9234..0e877a104a8 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -81,6 +81,29 @@ static struct usb_device_id p54u_table[] __devinitdata = {
MODULE_DEVICE_TABLE(usb, p54u_table);
+static const struct {
+ u32 intf;
+ enum p54u_hw_type type;
+ const char *fw;
+ const char *fw_legacy;
+ char hw[20];
+} p54u_fwlist[__NUM_P54U_HWTYPES] = {
+ {
+ .type = P54U_NET2280,
+ .intf = FW_LM86,
+ .fw = "isl3886usb",
+ .fw_legacy = "isl3890usb",
+ .hw = "ISL3886 + net2280",
+ },
+ {
+ .type = P54U_3887,
+ .intf = FW_LM87,
+ .fw = "isl3887usb",
+ .fw_legacy = "isl3887usb_bare",
+ .hw = "ISL3887",
+ },
+};
+
static void p54u_rx_cb(struct urb *urb)
{
struct sk_buff *skb = (struct sk_buff *) urb->context;
@@ -125,11 +148,7 @@ static void p54u_rx_cb(struct urb *urb)
}
skb_reset_tail_pointer(skb);
skb_trim(skb, 0);
- if (urb->transfer_buffer != skb_tail_pointer(skb)) {
- /* this should not happen */
- WARN_ON(1);
- urb->transfer_buffer = skb_tail_pointer(skb);
- }
+ urb->transfer_buffer = skb_tail_pointer(skb);
}
skb_queue_tail(&priv->rx_queue, skb);
usb_anchor_urb(urb, &priv->submitted);
@@ -206,53 +225,6 @@ static int p54u_init_urbs(struct ieee80211_hw *dev)
return ret;
}
-static void p54u_tx_3887(struct ieee80211_hw *dev, struct sk_buff *skb)
-{
- struct p54u_priv *priv = dev->priv;
- struct urb *addr_urb, *data_urb;
- int err = 0;
-
- addr_urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!addr_urb)
- return;
-
- data_urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!data_urb) {
- usb_free_urb(addr_urb);
- return;
- }
-
- usb_fill_bulk_urb(addr_urb, priv->udev,
- usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA),
- &((struct p54_hdr *)skb->data)->req_id, 4,
- p54u_tx_dummy_cb, dev);
- usb_fill_bulk_urb(data_urb, priv->udev,
- usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA),
- skb->data, skb->len, FREE_AFTER_TX(skb) ?
- p54u_tx_cb : p54u_tx_dummy_cb, skb);
- addr_urb->transfer_flags |= URB_ZERO_PACKET;
- data_urb->transfer_flags |= URB_ZERO_PACKET;
-
- usb_anchor_urb(addr_urb, &priv->submitted);
- err = usb_submit_urb(addr_urb, GFP_ATOMIC);
- if (err) {
- usb_unanchor_urb(addr_urb);
- goto out;
- }
-
- usb_anchor_urb(data_urb, &priv->submitted);
- err = usb_submit_urb(data_urb, GFP_ATOMIC);
- if (err)
- usb_unanchor_urb(data_urb);
-
- out:
- usb_free_urb(addr_urb);
- usb_free_urb(data_urb);
-
- if (err)
- p54_free_skb(dev, skb);
-}
-
static __le32 p54u_lm87_chksum(const __le32 *data, size_t length)
{
u32 chk = 0;
@@ -425,20 +397,16 @@ static int p54u_bulk_msg(struct p54u_priv *priv, unsigned int ep,
data, len, &alen, 2000);
}
-static const char p54u_romboot_3887[] = "~~~~";
-static const char p54u_firmware_upload_3887[] = "<\r";
-
-static int p54u_device_reset_3887(struct ieee80211_hw *dev)
+static int p54u_device_reset(struct ieee80211_hw *dev)
{
struct p54u_priv *priv = dev->priv;
int ret, lock = (priv->intf->condition != USB_INTERFACE_BINDING);
- u8 buf[4];
if (lock) {
ret = usb_lock_device_for_reset(priv->udev, priv->intf);
if (ret < 0) {
dev_err(&priv->udev->dev, "(p54usb) unable to lock "
- " device for reset: %d\n", ret);
+ "device for reset (%d)!\n", ret);
return ret;
}
}
@@ -447,26 +415,34 @@ static int p54u_device_reset_3887(struct ieee80211_hw *dev)
if (lock)
usb_unlock_device(priv->udev);
- if (ret) {
+ if (ret)
dev_err(&priv->udev->dev, "(p54usb) unable to reset "
- "device: %d\n", ret);
- return ret;
- }
+ "device (%d)!\n", ret);
+
+ return ret;
+}
+
+static const char p54u_romboot_3887[] = "~~~~";
+static int p54u_firmware_reset_3887(struct ieee80211_hw *dev)
+{
+ struct p54u_priv *priv = dev->priv;
+ u8 buf[4];
+ int ret;
memcpy(&buf, p54u_romboot_3887, sizeof(buf));
ret = p54u_bulk_msg(priv, P54U_PIPE_DATA,
buf, sizeof(buf));
if (ret)
dev_err(&priv->udev->dev, "(p54usb) unable to jump to "
- "boot ROM: %d\n", ret);
+ "boot ROM (%d)!\n", ret);
return ret;
}
+static const char p54u_firmware_upload_3887[] = "<\r";
static int p54u_upload_firmware_3887(struct ieee80211_hw *dev)
{
struct p54u_priv *priv = dev->priv;
- const struct firmware *fw_entry = NULL;
int err, alen;
u8 carry = 0;
u8 *buf, *tmp;
@@ -475,51 +451,29 @@ static int p54u_upload_firmware_3887(struct ieee80211_hw *dev)
struct x2_header *hdr;
unsigned long timeout;
+ err = p54u_firmware_reset_3887(dev);
+ if (err)
+ return err;
+
tmp = buf = kmalloc(P54U_FW_BLOCK, GFP_KERNEL);
if (!buf) {
dev_err(&priv->udev->dev, "(p54usb) cannot allocate firmware"
"upload buffer!\n");
- err = -ENOMEM;
- goto err_bufalloc;
- }
-
- err = p54u_device_reset_3887(dev);
- if (err)
- goto err_reset;
-
- err = request_firmware(&fw_entry, "isl3887usb", &priv->udev->dev);
- if (err) {
- dev_err(&priv->udev->dev, "p54usb: cannot find firmware "
- "(isl3887usb)\n");
- err = request_firmware(&fw_entry, "isl3887usb_bare",
- &priv->udev->dev);
- if (err)
- goto err_req_fw_failed;
- }
-
- err = p54_parse_firmware(dev, fw_entry);
- if (err)
- goto err_upload_failed;
-
- if (priv->common.fw_interface != FW_LM87) {
- dev_err(&priv->udev->dev, "wrong firmware, "
- "please get a LM87 firmware and try again.\n");
- err = -EINVAL;
- goto err_upload_failed;
+ return -ENOMEM;
}
- left = block_size = min((size_t)P54U_FW_BLOCK, fw_entry->size);
+ left = block_size = min((size_t)P54U_FW_BLOCK, priv->fw->size);
strcpy(buf, p54u_firmware_upload_3887);
left -= strlen(p54u_firmware_upload_3887);
tmp += strlen(p54u_firmware_upload_3887);
- data = fw_entry->data;
- remains = fw_entry->size;
+ data = priv->fw->data;
+ remains = priv->fw->size;
hdr = (struct x2_header *)(buf + strlen(p54u_firmware_upload_3887));
memcpy(hdr->signature, X2_SIGNATURE, X2_SIGNATURE_SIZE);
hdr->fw_load_addr = cpu_to_le32(ISL38XX_DEV_FIRMWARE_ADDR);
- hdr->fw_length = cpu_to_le32(fw_entry->size);
+ hdr->fw_length = cpu_to_le32(priv->fw->size);
hdr->crc = cpu_to_le32(~crc32_le(~0, (void *)&hdr->fw_load_addr,
sizeof(u32)*2));
left -= sizeof(*hdr);
@@ -561,7 +515,8 @@ static int p54u_upload_firmware_3887(struct ieee80211_hw *dev)
left = block_size = min((unsigned int)P54U_FW_BLOCK, remains);
}
- *((__le32 *)buf) = cpu_to_le32(~crc32_le(~0, fw_entry->data, fw_entry->size));
+ *((__le32 *)buf) = cpu_to_le32(~crc32_le(~0, priv->fw->data,
+ priv->fw->size));
err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, sizeof(u32));
if (err) {
dev_err(&priv->udev->dev, "(p54usb) firmware upload failed!\n");
@@ -612,19 +567,14 @@ static int p54u_upload_firmware_3887(struct ieee80211_hw *dev)
if (err)
goto err_upload_failed;
- err_upload_failed:
- release_firmware(fw_entry);
- err_req_fw_failed:
- err_reset:
+err_upload_failed:
kfree(buf);
- err_bufalloc:
return err;
}
static int p54u_upload_firmware_net2280(struct ieee80211_hw *dev)
{
struct p54u_priv *priv = dev->priv;
- const struct firmware *fw_entry = NULL;
const struct p54p_csr *devreg = (const struct p54p_csr *) P54U_DEV_BASE;
int err, alen;
void *buf;
@@ -639,33 +589,6 @@ static int p54u_upload_firmware_net2280(struct ieee80211_hw *dev)
return -ENOMEM;
}
- err = request_firmware(&fw_entry, "isl3886usb", &priv->udev->dev);
- if (err) {
- dev_err(&priv->udev->dev, "(p54usb) cannot find firmware "
- "(isl3886usb)\n");
- err = request_firmware(&fw_entry, "isl3890usb",
- &priv->udev->dev);
- if (err) {
- kfree(buf);
- return err;
- }
- }
-
- err = p54_parse_firmware(dev, fw_entry);
- if (err) {
- kfree(buf);
- release_firmware(fw_entry);
- return err;
- }
-
- if (priv->common.fw_interface != FW_LM86) {
- dev_err(&priv->udev->dev, "wrong firmware, "
- "please get a LM86(USB) firmware and try again.\n");
- kfree(buf);
- release_firmware(fw_entry);
- return -EINVAL;
- }
-
#define P54U_WRITE(type, addr, data) \
do {\
err = p54u_write(priv, buf, type,\
@@ -765,8 +688,8 @@ static int p54u_upload_firmware_net2280(struct ieee80211_hw *dev)
P54U_WRITE(NET2280_DEV_U32, &devreg->int_ack, reg);
/* finally, we can upload firmware now! */
- remains = fw_entry->size;
- data = fw_entry->data;
+ remains = priv->fw->size;
+ data = priv->fw->data;
offset = ISL38XX_DEV_FIRMWARE_ADDR;
while (remains) {
@@ -875,12 +798,54 @@ static int p54u_upload_firmware_net2280(struct ieee80211_hw *dev)
#undef P54U_WRITE
#undef P54U_READ
- fail:
- release_firmware(fw_entry);
+fail:
kfree(buf);
return err;
}
+static int p54u_load_firmware(struct ieee80211_hw *dev)
+{
+ struct p54u_priv *priv = dev->priv;
+ int err, i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(p54u_fwlist) != __NUM_P54U_HWTYPES);
+
+ for (i = 0; i < __NUM_P54U_HWTYPES; i++)
+ if (p54u_fwlist[i].type == priv->hw_type)
+ break;
+
+ if (i == __NUM_P54U_HWTYPES)
+ return -EOPNOTSUPP;
+
+ err = request_firmware(&priv->fw, p54u_fwlist[i].fw, &priv->udev->dev);
+ if (err) {
+ dev_err(&priv->udev->dev, "(p54usb) cannot load firmware %s "
+ "(%d)!\n", p54u_fwlist[i].fw, err);
+
+ err = request_firmware(&priv->fw, p54u_fwlist[i].fw_legacy,
+ &priv->udev->dev);
+ if (err)
+ return err;
+ }
+
+ err = p54_parse_firmware(dev, priv->fw);
+ if (err)
+ goto out;
+
+ if (priv->common.fw_interface != p54u_fwlist[i].intf) {
+ dev_err(&priv->udev->dev, "wrong firmware, please get "
+ "a firmware for \"%s\" and try again.\n",
+ p54u_fwlist[i].hw);
+ err = -EINVAL;
+ }
+
+out:
+ if (err)
+ release_firmware(priv->fw);
+
+ return err;
+}
+
static int p54u_open(struct ieee80211_hw *dev)
{
struct p54u_priv *priv = dev->priv;
@@ -922,6 +887,7 @@ static int __devinit p54u_probe(struct usb_interface *intf,
}
priv = dev->priv;
+ priv->hw_type = P54U_INVALID_HW;
SET_IEEE80211_DEV(dev, &intf->dev);
usb_set_intfdata(intf, dev);
@@ -953,37 +919,48 @@ static int __devinit p54u_probe(struct usb_interface *intf,
priv->common.open = p54u_open;
priv->common.stop = p54u_stop;
if (recognized_pipes < P54U_PIPE_NUMBER) {
+#ifdef CONFIG_PM
+ /* ISL3887 needs a full reset on resume */
+ udev->reset_resume = 1;
+ err = p54u_device_reset(dev);
+#endif
+
priv->hw_type = P54U_3887;
- err = p54u_upload_firmware_3887(dev);
- if (priv->common.fw_interface == FW_LM87) {
- dev->extra_tx_headroom += sizeof(struct lm87_tx_hdr);
- priv->common.tx_hdr_len = sizeof(struct lm87_tx_hdr);
- priv->common.tx = p54u_tx_lm87;
- } else
- priv->common.tx = p54u_tx_3887;
+ dev->extra_tx_headroom += sizeof(struct lm87_tx_hdr);
+ priv->common.tx_hdr_len = sizeof(struct lm87_tx_hdr);
+ priv->common.tx = p54u_tx_lm87;
+ priv->upload_fw = p54u_upload_firmware_3887;
} else {
priv->hw_type = P54U_NET2280;
dev->extra_tx_headroom += sizeof(struct net2280_tx_hdr);
priv->common.tx_hdr_len = sizeof(struct net2280_tx_hdr);
priv->common.tx = p54u_tx_net2280;
- err = p54u_upload_firmware_net2280(dev);
+ priv->upload_fw = p54u_upload_firmware_net2280;
}
+ err = p54u_load_firmware(dev);
if (err)
goto err_free_dev;
+ err = priv->upload_fw(dev);
+ if (err)
+ goto err_free_fw;
+
p54u_open(dev);
err = p54_read_eeprom(dev);
p54u_stop(dev);
if (err)
- goto err_free_dev;
+ goto err_free_fw;
err = p54_register_common(dev, &udev->dev);
if (err)
- goto err_free_dev;
+ goto err_free_fw;
return 0;
- err_free_dev:
+err_free_fw:
+ release_firmware(priv->fw);
+
+err_free_dev:
ieee80211_free_hw(dev);
usb_set_intfdata(intf, NULL);
usb_put_dev(udev);
@@ -1002,20 +979,64 @@ static void __devexit p54u_disconnect(struct usb_interface *intf)
priv = dev->priv;
usb_put_dev(interface_to_usbdev(intf));
+ release_firmware(priv->fw);
p54_free_common(dev);
ieee80211_free_hw(dev);
}
static int p54u_pre_reset(struct usb_interface *intf)
{
+ struct ieee80211_hw *dev = usb_get_intfdata(intf);
+
+ if (!dev)
+ return -ENODEV;
+
+ p54u_stop(dev);
return 0;
}
+static int p54u_resume(struct usb_interface *intf)
+{
+ struct ieee80211_hw *dev = usb_get_intfdata(intf);
+ struct p54u_priv *priv;
+
+ if (!dev)
+ return -ENODEV;
+
+ priv = dev->priv;
+ if (unlikely(!(priv->upload_fw && priv->fw)))
+ return 0;
+
+ return priv->upload_fw(dev);
+}
+
static int p54u_post_reset(struct usb_interface *intf)
{
+ struct ieee80211_hw *dev = usb_get_intfdata(intf);
+ struct p54u_priv *priv;
+ int err;
+
+ err = p54u_resume(intf);
+ if (err)
+ return err;
+
+ /* reinitialize old device state */
+ priv = dev->priv;
+ if (priv->common.mode != NL80211_IFTYPE_UNSPECIFIED)
+ ieee80211_restart_hw(dev);
+
return 0;
}
+#ifdef CONFIG_PM
+
+static int p54u_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ return p54u_pre_reset(intf);
+}
+
+#endif /* CONFIG_PM */
+
static struct usb_driver p54u_driver = {
.name = "p54usb",
.id_table = p54u_table,
@@ -1023,6 +1044,11 @@ static struct usb_driver p54u_driver = {
.disconnect = p54u_disconnect,
.pre_reset = p54u_pre_reset,
.post_reset = p54u_post_reset,
+#ifdef CONFIG_PM
+ .suspend = p54u_suspend,
+ .resume = p54u_resume,
+ .reset_resume = p54u_resume,
+#endif /* CONFIG_PM */
.soft_unbind = 1,
};
diff --git a/drivers/net/wireless/p54/p54usb.h b/drivers/net/wireless/p54/p54usb.h
index 8bc58982d8d..e935b79f7f7 100644
--- a/drivers/net/wireless/p54/p54usb.h
+++ b/drivers/net/wireless/p54/p54usb.h
@@ -123,18 +123,26 @@ struct p54u_rx_info {
struct ieee80211_hw *dev;
};
+enum p54u_hw_type {
+ P54U_INVALID_HW,
+ P54U_NET2280,
+ P54U_3887,
+
+ /* keep last */
+ __NUM_P54U_HWTYPES,
+};
+
struct p54u_priv {
struct p54_common common;
struct usb_device *udev;
struct usb_interface *intf;
- enum {
- P54U_NET2280 = 0,
- P54U_3887
- } hw_type;
+ int (*upload_fw)(struct ieee80211_hw *dev);
+ enum p54u_hw_type hw_type;
spinlock_t lock;
struct sk_buff_head rx_queue;
struct usb_anchor submitted;
+ const struct firmware *fw;
};
#endif /* P54USB_H */
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index ef3ef4551b3..8f621099344 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -87,7 +87,6 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
unsigned long flags;
unsigned char wds_mac[6];
u32 curr_frag;
- int err = 0;
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_transmit \n");
@@ -107,8 +106,6 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
isl38xx_w32_flush(priv->device_base, ISL38XX_DEV_INT_UPDATE,
ISL38XX_DEV_INT_REG);
udelay(ISL38XX_WRITEIO_DELAY);
-
- err = -EBUSY;
goto drop_free;
}
/* Check alignment and WDS frame formatting. The start of the packet should
@@ -152,7 +149,6 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
if (unlikely(newskb == NULL)) {
printk(KERN_ERR "%s: Cannot allocate skb\n",
ndev->name);
- err = -ENOMEM;
goto drop_free;
}
newskb_offset = (4 - (long) newskb->data) & 0x03;
@@ -197,8 +193,6 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
if (unlikely(pci_map_address == 0)) {
printk(KERN_WARNING "%s: cannot map buffer to PCI\n",
ndev->name);
-
- err = -EIO;
goto drop_free;
}
/* Place the fragment in the control block structure. */
@@ -246,7 +240,7 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
ndev->stats.tx_dropped++;
spin_unlock_irqrestore(&priv->slock, flags);
dev_kfree_skb(skb);
- return err;
+ return NETDEV_TX_OK;
}
static inline int
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index fa90d1d8d82..b10b0383dfa 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -892,7 +892,7 @@ static int ray_dev_init(struct net_device *dev)
#endif /* RAY_IMMEDIATE_INIT */
/* copy mac and broadcast addresses to linux device */
- memcpy(&dev->dev_addr, &local->sparm.b4.a_mac_addr, ADDRLEN);
+ memcpy(dev->dev_addr, &local->sparm.b4.a_mac_addr, ADDRLEN);
memset(dev->broadcast, 0xff, ETH_ALEN);
DEBUG(2, "ray_dev_init ending\n");
@@ -923,7 +923,7 @@ static int ray_dev_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!(pcmcia_dev_present(link))) {
DEBUG(2, "ray_dev_start_xmit - device not present\n");
- return -1;
+ return NETDEV_TX_LOCKED;
}
DEBUG(3, "ray_dev_start_xmit(skb=%p, dev=%p)\n", skb, dev);
if (local->authentication_state == NEED_TO_AUTH) {
@@ -931,7 +931,7 @@ static int ray_dev_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!build_auth_frame(local, local->auth_id, OPEN_AUTH_REQUEST)) {
local->authentication_state = AUTHENTICATED;
netif_stop_queue(dev);
- return 1;
+ return NETDEV_TX_BUSY;
}
}
@@ -944,7 +944,7 @@ static int ray_dev_start_xmit(struct sk_buff *skb, struct net_device *dev)
case XMIT_NO_CCS:
case XMIT_NEED_AUTH:
netif_stop_queue(dev);
- return 1;
+ return NETDEV_TX_BUSY;
case XMIT_NO_INTR:
case XMIT_MSG_BAD:
case XMIT_OK:
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index bebf735cd4b..3bec3dbd345 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2,7 +2,7 @@
* Driver for RNDIS based wireless USB devices.
*
* Copyright (C) 2007 by Bjorge Dijkstra <bjd@jooz.net>
- * Copyright (C) 2008 by Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
+ * Copyright (C) 2008-2009 by Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -42,6 +42,7 @@
#include <linux/ctype.h>
#include <linux/spinlock.h>
#include <net/iw_handler.h>
+#include <net/cfg80211.h>
#include <linux/usb/usbnet.h>
#include <linux/usb/rndis_host.h>
@@ -156,43 +157,55 @@ MODULE_PARM_DESC(workaround_interval,
#define NDIS_802_11_LENGTH_RATES_EX 16
enum ndis_80211_net_type {
- ndis_80211_type_freq_hop,
- ndis_80211_type_direct_seq,
- ndis_80211_type_ofdm_a,
- ndis_80211_type_ofdm_g
+ NDIS_80211_TYPE_FREQ_HOP,
+ NDIS_80211_TYPE_DIRECT_SEQ,
+ NDIS_80211_TYPE_OFDM_A,
+ NDIS_80211_TYPE_OFDM_G
};
enum ndis_80211_net_infra {
- ndis_80211_infra_adhoc,
- ndis_80211_infra_infra,
- ndis_80211_infra_auto_unknown
+ NDIS_80211_INFRA_ADHOC,
+ NDIS_80211_INFRA_INFRA,
+ NDIS_80211_INFRA_AUTO_UNKNOWN
};
enum ndis_80211_auth_mode {
- ndis_80211_auth_open,
- ndis_80211_auth_shared,
- ndis_80211_auth_auto_switch,
- ndis_80211_auth_wpa,
- ndis_80211_auth_wpa_psk,
- ndis_80211_auth_wpa_none,
- ndis_80211_auth_wpa2,
- ndis_80211_auth_wpa2_psk
+ NDIS_80211_AUTH_OPEN,
+ NDIS_80211_AUTH_SHARED,
+ NDIS_80211_AUTH_AUTO_SWITCH,
+ NDIS_80211_AUTH_WPA,
+ NDIS_80211_AUTH_WPA_PSK,
+ NDIS_80211_AUTH_WPA_NONE,
+ NDIS_80211_AUTH_WPA2,
+ NDIS_80211_AUTH_WPA2_PSK
};
enum ndis_80211_encr_status {
- ndis_80211_encr_wep_enabled,
- ndis_80211_encr_disabled,
- ndis_80211_encr_wep_key_absent,
- ndis_80211_encr_not_supported,
- ndis_80211_encr_tkip_enabled,
- ndis_80211_encr_tkip_key_absent,
- ndis_80211_encr_ccmp_enabled,
- ndis_80211_encr_ccmp_key_absent
+ NDIS_80211_ENCR_WEP_ENABLED,
+ NDIS_80211_ENCR_DISABLED,
+ NDIS_80211_ENCR_WEP_KEY_ABSENT,
+ NDIS_80211_ENCR_NOT_SUPPORTED,
+ NDIS_80211_ENCR_TKIP_ENABLED,
+ NDIS_80211_ENCR_TKIP_KEY_ABSENT,
+ NDIS_80211_ENCR_CCMP_ENABLED,
+ NDIS_80211_ENCR_CCMP_KEY_ABSENT
};
enum ndis_80211_priv_filter {
- ndis_80211_priv_accept_all,
- ndis_80211_priv_8021x_wep
+ NDIS_80211_PRIV_ACCEPT_ALL,
+ NDIS_80211_PRIV_8021X_WEP
+};
+
+enum ndis_80211_addkey_bits {
+ NDIS_80211_ADDKEY_8021X_AUTH = cpu_to_le32(1 << 28),
+ NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ = cpu_to_le32(1 << 29),
+ NDIS_80211_ADDKEY_PAIRWISE_KEY = cpu_to_le32(1 << 30),
+ NDIS_80211_ADDKEY_TRANSMIT_KEY = cpu_to_le32(1 << 31)
+};
+
+enum ndis_80211_addwep_bits {
+ NDIS_80211_ADDWEP_PERCLIENT_KEY = cpu_to_le32(1 << 30),
+ NDIS_80211_ADDWEP_TRANSMIT_KEY = cpu_to_le32(1 << 31)
};
struct ndis_80211_ssid {
@@ -308,7 +321,6 @@ enum wpa_key_mgmt { KEY_MGMT_802_1X, KEY_MGMT_PSK, KEY_MGMT_NONE,
#define CAP_MODE_80211B 2
#define CAP_MODE_80211G 4
#define CAP_MODE_MASK 7
-#define CAP_SUPPORT_TXPOWER 8
#define WORK_LINK_UP (1<<0)
#define WORK_LINK_DOWN (1<<1)
@@ -316,25 +328,61 @@ enum wpa_key_mgmt { KEY_MGMT_802_1X, KEY_MGMT_PSK, KEY_MGMT_NONE,
#define COMMAND_BUFFER_SIZE (CONTROL_BUFFER_SIZE + sizeof(struct rndis_set))
-/* RNDIS device private data */
-struct rndis_wext_private {
- char name[32];
+static const struct ieee80211_channel rndis_channels[] = {
+ { .center_freq = 2412 },
+ { .center_freq = 2417 },
+ { .center_freq = 2422 },
+ { .center_freq = 2427 },
+ { .center_freq = 2432 },
+ { .center_freq = 2437 },
+ { .center_freq = 2442 },
+ { .center_freq = 2447 },
+ { .center_freq = 2452 },
+ { .center_freq = 2457 },
+ { .center_freq = 2462 },
+ { .center_freq = 2467 },
+ { .center_freq = 2472 },
+ { .center_freq = 2484 },
+};
+static const struct ieee80211_rate rndis_rates[] = {
+ { .bitrate = 10 },
+ { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 60 },
+ { .bitrate = 90 },
+ { .bitrate = 120 },
+ { .bitrate = 180 },
+ { .bitrate = 240 },
+ { .bitrate = 360 },
+ { .bitrate = 480 },
+ { .bitrate = 540 }
+};
+
+/* RNDIS device private data */
+struct rndis_wlan_private {
struct usbnet *usbdev;
+ struct wireless_dev wdev;
+
+ struct cfg80211_scan_request *scan_request;
+
struct workqueue_struct *workqueue;
struct delayed_work stats_work;
+ struct delayed_work scan_work;
struct work_struct work;
struct mutex command_lock;
spinlock_t stats_lock;
unsigned long work_pending;
+ struct ieee80211_supported_band band;
+ struct ieee80211_channel channels[ARRAY_SIZE(rndis_channels)];
+ struct ieee80211_rate rates[ARRAY_SIZE(rndis_rates)];
+
struct iw_statistics iwstats;
struct iw_statistics privstats;
- int nick_len;
- char nick[32];
-
int caps;
int multicast_size;
@@ -357,6 +405,7 @@ struct rndis_wext_private {
int encr_tx_key_index;
char encr_keys[4][32];
int encr_key_len[4];
+ char encr_key_wpa[4];
int wpa_version;
int wpa_keymgmt;
int wpa_authalg;
@@ -368,8 +417,22 @@ struct rndis_wext_private {
u8 command_buffer[COMMAND_BUFFER_SIZE];
};
+/*
+ * cfg80211 ops
+ */
+static int rndis_change_virtual_intf(struct wiphy *wiphy, int ifindex,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params);
+
+static int rndis_scan(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_scan_request *request);
-static const int rates_80211g[8] = { 6, 9, 12, 18, 24, 36, 48, 54 };
+static struct cfg80211_ops rndis_config_ops = {
+ .change_virtual_intf = rndis_change_virtual_intf,
+ .scan = rndis_scan,
+};
+
+static void *rndis_wiphy_privid = &rndis_wiphy_privid;
static const int bcm4320_power_output[4] = { 25, 50, 75, 100 };
@@ -378,13 +441,13 @@ static const unsigned char ffff_bssid[ETH_ALEN] = { 0xff, 0xff, 0xff,
0xff, 0xff, 0xff };
-static struct rndis_wext_private *get_rndis_wext_priv(struct usbnet *dev)
+static struct rndis_wlan_private *get_rndis_wlan_priv(struct usbnet *dev)
{
- return (struct rndis_wext_private *)dev->driver_priv;
+ return (struct rndis_wlan_private *)dev->driver_priv;
}
-static u32 get_bcm4320_power(struct rndis_wext_private *priv)
+static u32 get_bcm4320_power(struct rndis_wlan_private *priv)
{
return BCM4320_DEFAULT_TXPOWER *
bcm4320_power_output[priv->param_power_output] / 100;
@@ -417,7 +480,7 @@ static int rndis_error_status(__le32 rndis_status)
static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len)
{
- struct rndis_wext_private *priv = get_rndis_wext_priv(dev);
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(dev);
union {
void *buf;
struct rndis_msg_hdr *header;
@@ -463,7 +526,7 @@ static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len)
static int rndis_set_oid(struct usbnet *dev, __le32 oid, void *data, int len)
{
- struct rndis_wext_private *priv = get_rndis_wext_priv(dev);
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(dev);
union {
void *buf;
struct rndis_msg_hdr *header;
@@ -584,7 +647,7 @@ static int rndis_set_config_parameter(struct usbnet *dev, char *param,
ret = rndis_set_oid(dev, OID_GEN_RNDIS_CONFIG_PARAMETER,
infobuf, info_len);
if (ret != 0)
- devdbg(dev, "setting rndis config paramater failed, %d.", ret);
+ devdbg(dev, "setting rndis config parameter failed, %d.", ret);
kfree(infobuf);
return ret;
@@ -684,7 +747,7 @@ static int get_essid(struct usbnet *usbdev, struct ndis_80211_ssid *ssid)
static int set_essid(struct usbnet *usbdev, struct ndis_80211_ssid *ssid)
{
- struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
int ret;
ret = rndis_set_oid(usbdev, OID_802_11_SSID, ssid, sizeof(*ssid));
@@ -731,7 +794,7 @@ static int is_associated(struct usbnet *usbdev)
static int disassociate(struct usbnet *usbdev, int reset_ssid)
{
- struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
struct ndis_80211_ssid ssid;
int i, ret = 0;
@@ -763,7 +826,7 @@ static int disassociate(struct usbnet *usbdev, int reset_ssid)
static int set_auth_mode(struct usbnet *usbdev, int wpa_version, int authalg)
{
- struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
__le32 tmp;
int auth_mode, ret;
@@ -772,23 +835,23 @@ static int set_auth_mode(struct usbnet *usbdev, int wpa_version, int authalg)
if (wpa_version & IW_AUTH_WPA_VERSION_WPA2) {
if (priv->wpa_keymgmt & IW_AUTH_KEY_MGMT_802_1X)
- auth_mode = ndis_80211_auth_wpa2;
+ auth_mode = NDIS_80211_AUTH_WPA2;
else
- auth_mode = ndis_80211_auth_wpa2_psk;
+ auth_mode = NDIS_80211_AUTH_WPA2_PSK;
} else if (wpa_version & IW_AUTH_WPA_VERSION_WPA) {
if (priv->wpa_keymgmt & IW_AUTH_KEY_MGMT_802_1X)
- auth_mode = ndis_80211_auth_wpa;
+ auth_mode = NDIS_80211_AUTH_WPA;
else if (priv->wpa_keymgmt & IW_AUTH_KEY_MGMT_PSK)
- auth_mode = ndis_80211_auth_wpa_psk;
+ auth_mode = NDIS_80211_AUTH_WPA_PSK;
else
- auth_mode = ndis_80211_auth_wpa_none;
+ auth_mode = NDIS_80211_AUTH_WPA_NONE;
} else if (authalg & IW_AUTH_ALG_SHARED_KEY) {
if (authalg & IW_AUTH_ALG_OPEN_SYSTEM)
- auth_mode = ndis_80211_auth_auto_switch;
+ auth_mode = NDIS_80211_AUTH_AUTO_SWITCH;
else
- auth_mode = ndis_80211_auth_shared;
+ auth_mode = NDIS_80211_AUTH_SHARED;
} else
- auth_mode = ndis_80211_auth_open;
+ auth_mode = NDIS_80211_AUTH_OPEN;
tmp = cpu_to_le32(auth_mode);
ret = rndis_set_oid(usbdev, OID_802_11_AUTHENTICATION_MODE, &tmp,
@@ -806,16 +869,16 @@ static int set_auth_mode(struct usbnet *usbdev, int wpa_version, int authalg)
static int set_priv_filter(struct usbnet *usbdev)
{
- struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
__le32 tmp;
devdbg(usbdev, "set_priv_filter: wpa_version=0x%x", priv->wpa_version);
if (priv->wpa_version & IW_AUTH_WPA_VERSION_WPA2 ||
priv->wpa_version & IW_AUTH_WPA_VERSION_WPA)
- tmp = cpu_to_le32(ndis_80211_priv_8021x_wep);
+ tmp = cpu_to_le32(NDIS_80211_PRIV_8021X_WEP);
else
- tmp = cpu_to_le32(ndis_80211_priv_accept_all);
+ tmp = cpu_to_le32(NDIS_80211_PRIV_ACCEPT_ALL);
return rndis_set_oid(usbdev, OID_802_11_PRIVACY_FILTER, &tmp,
sizeof(tmp));
@@ -824,7 +887,7 @@ static int set_priv_filter(struct usbnet *usbdev)
static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise)
{
- struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
__le32 tmp;
int encr_mode, ret;
@@ -833,18 +896,18 @@ static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise)
groupwise);
if (pairwise & IW_AUTH_CIPHER_CCMP)
- encr_mode = ndis_80211_encr_ccmp_enabled;
+ encr_mode = NDIS_80211_ENCR_CCMP_ENABLED;
else if (pairwise & IW_AUTH_CIPHER_TKIP)
- encr_mode = ndis_80211_encr_tkip_enabled;
+ encr_mode = NDIS_80211_ENCR_TKIP_ENABLED;
else if (pairwise &
(IW_AUTH_CIPHER_WEP40 | IW_AUTH_CIPHER_WEP104))
- encr_mode = ndis_80211_encr_wep_enabled;
+ encr_mode = NDIS_80211_ENCR_WEP_ENABLED;
else if (groupwise & IW_AUTH_CIPHER_CCMP)
- encr_mode = ndis_80211_encr_ccmp_enabled;
+ encr_mode = NDIS_80211_ENCR_CCMP_ENABLED;
else if (groupwise & IW_AUTH_CIPHER_TKIP)
- encr_mode = ndis_80211_encr_tkip_enabled;
+ encr_mode = NDIS_80211_ENCR_TKIP_ENABLED;
else
- encr_mode = ndis_80211_encr_disabled;
+ encr_mode = NDIS_80211_ENCR_DISABLED;
tmp = cpu_to_le32(encr_mode);
ret = rndis_set_oid(usbdev, OID_802_11_ENCRYPTION_STATUS, &tmp,
@@ -862,7 +925,7 @@ static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise)
static int set_assoc_params(struct usbnet *usbdev)
{
- struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
set_auth_mode(usbdev, priv->wpa_version, priv->wpa_authalg);
set_priv_filter(usbdev);
@@ -874,7 +937,7 @@ static int set_assoc_params(struct usbnet *usbdev)
static int set_infra_mode(struct usbnet *usbdev, int mode)
{
- struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
__le32 tmp;
int ret, i;
@@ -894,7 +957,7 @@ static int set_infra_mode(struct usbnet *usbdev, int mode)
if (priv->wpa_keymgmt == 0 ||
priv->wpa_keymgmt == IW_AUTH_KEY_MGMT_802_1X) {
for (i = 0; i < 4; i++) {
- if (priv->encr_key_len[i] > 0)
+ if (priv->encr_key_len[i] > 0 && !priv->encr_key_wpa[i])
add_wep_key(usbdev, priv->encr_keys[i],
priv->encr_key_len[i], i);
}
@@ -907,12 +970,12 @@ static int set_infra_mode(struct usbnet *usbdev, int mode)
static void set_default_iw_params(struct usbnet *usbdev)
{
- struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
priv->wpa_keymgmt = 0;
priv->wpa_version = 0;
- set_infra_mode(usbdev, ndis_80211_infra_infra);
+ set_infra_mode(usbdev, NDIS_80211_INFRA_INFRA);
set_auth_mode(usbdev, IW_AUTH_WPA_VERSION_DISABLED,
IW_AUTH_ALG_OPEN_SYSTEM);
set_priv_filter(usbdev);
@@ -933,7 +996,7 @@ static int deauthenticate(struct usbnet *usbdev)
/* index must be 0 - N, as per NDIS */
static int add_wep_key(struct usbnet *usbdev, char *key, int key_len, int index)
{
- struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
struct ndis_80211_wep_key ndis_key;
int ret;
@@ -948,7 +1011,7 @@ static int add_wep_key(struct usbnet *usbdev, char *key, int key_len, int index)
memcpy(&ndis_key.material, key, key_len);
if (index == priv->encr_tx_key_index) {
- ndis_key.index |= cpu_to_le32(1 << 31);
+ ndis_key.index |= NDIS_80211_ADDWEP_TRANSMIT_KEY;
ret = set_encr_mode(usbdev, IW_AUTH_CIPHER_WEP104,
IW_AUTH_CIPHER_NONE);
if (ret)
@@ -965,16 +1028,85 @@ static int add_wep_key(struct usbnet *usbdev, char *key, int key_len, int index)
}
priv->encr_key_len[index] = key_len;
+ priv->encr_key_wpa[index] = 0;
memcpy(&priv->encr_keys[index], key, key_len);
return 0;
}
+static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len,
+ int index, const struct sockaddr *addr,
+ const u8 *rx_seq, int alg, int flags)
+{
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
+ struct ndis_80211_key ndis_key;
+ int ret;
+
+ if (index < 0 || index >= 4)
+ return -EINVAL;
+ if (key_len > sizeof(ndis_key.material) || key_len < 0)
+ return -EINVAL;
+ if ((flags & NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ) && !rx_seq)
+ return -EINVAL;
+ if ((flags & NDIS_80211_ADDKEY_PAIRWISE_KEY) && !addr)
+ return -EINVAL;
+
+ devdbg(usbdev, "add_wpa_key(%i): flags:%i%i%i", index,
+ !!(flags & NDIS_80211_ADDKEY_TRANSMIT_KEY),
+ !!(flags & NDIS_80211_ADDKEY_PAIRWISE_KEY),
+ !!(flags & NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ));
+
+ memset(&ndis_key, 0, sizeof(ndis_key));
+
+ ndis_key.size = cpu_to_le32(sizeof(ndis_key) -
+ sizeof(ndis_key.material) + key_len);
+ ndis_key.length = cpu_to_le32(key_len);
+ ndis_key.index = cpu_to_le32(index) | flags;
+
+ if (alg == IW_ENCODE_ALG_TKIP && key_len == 32) {
+ /* wpa_supplicant gives us the Michael MIC RX/TX keys in
+ * different order than NDIS spec, so swap the order here. */
+ memcpy(ndis_key.material, key, 16);
+ memcpy(ndis_key.material + 16, key + 24, 8);
+ memcpy(ndis_key.material + 24, key + 16, 8);
+ } else
+ memcpy(ndis_key.material, key, key_len);
+
+ if (flags & NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ)
+ memcpy(ndis_key.rsc, rx_seq, 6);
+
+ if (flags & NDIS_80211_ADDKEY_PAIRWISE_KEY) {
+ /* pairwise key */
+ memcpy(ndis_key.bssid, addr->sa_data, ETH_ALEN);
+ } else {
+ /* group key */
+ if (priv->infra_mode == NDIS_80211_INFRA_ADHOC)
+ memset(ndis_key.bssid, 0xff, ETH_ALEN);
+ else
+ get_bssid(usbdev, ndis_key.bssid);
+ }
+
+ ret = rndis_set_oid(usbdev, OID_802_11_ADD_KEY, &ndis_key,
+ le32_to_cpu(ndis_key.size));
+ devdbg(usbdev, "add_wpa_key: OID_802_11_ADD_KEY -> %08X", ret);
+ if (ret != 0)
+ return ret;
+
+ priv->encr_key_len[index] = key_len;
+ priv->encr_key_wpa[index] = 1;
+
+ if (flags & NDIS_80211_ADDKEY_TRANSMIT_KEY)
+ priv->encr_tx_key_index = index;
+
+ return 0;
+}
+
+
/* remove_key is for both wep and wpa */
static int remove_key(struct usbnet *usbdev, int index, u8 bssid[ETH_ALEN])
{
- struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
struct ndis_80211_remove_key remove_key;
__le32 keyindex;
int ret;
@@ -983,6 +1115,7 @@ static int remove_key(struct usbnet *usbdev, int index, u8 bssid[ETH_ALEN])
return 0;
priv->encr_key_len[index] = 0;
+ priv->encr_key_wpa[index] = 0;
memset(&priv->encr_keys[index], 0, sizeof(priv->encr_keys[index]));
if (priv->wpa_cipher_pair == IW_AUTH_CIPHER_TKIP ||
@@ -994,7 +1127,8 @@ static int remove_key(struct usbnet *usbdev, int index, u8 bssid[ETH_ALEN])
if (bssid) {
/* pairwise key */
if (memcmp(bssid, ffff_bssid, ETH_ALEN) != 0)
- remove_key.index |= cpu_to_le32(1 << 30);
+ remove_key.index |=
+ NDIS_80211_ADDKEY_PAIRWISE_KEY;
memcpy(remove_key.bssid, bssid,
sizeof(remove_key.bssid));
} else
@@ -1027,7 +1161,7 @@ static int remove_key(struct usbnet *usbdev, int index, u8 bssid[ETH_ALEN])
static void set_multicast_list(struct usbnet *usbdev)
{
- struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
struct dev_mc_list *mclist;
__le32 filter;
int ret, i, size;
@@ -1086,131 +1220,180 @@ static void set_multicast_list(struct usbnet *usbdev)
/*
- * wireless extension handlers
+ * cfg80211 ops
*/
-
-static int rndis_iw_commit(struct net_device *dev,
- struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
+static int rndis_change_virtual_intf(struct wiphy *wiphy, int ifindex,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params)
{
- /* dummy op */
- return 0;
+ struct net_device *dev;
+ struct usbnet *usbdev;
+ int mode;
+
+ /* we're under RTNL */
+ dev = __dev_get_by_index(&init_net, ifindex);
+ if (!dev)
+ return -ENODEV;
+ usbdev = netdev_priv(dev);
+
+ switch (type) {
+ case NL80211_IFTYPE_ADHOC:
+ mode = NDIS_80211_INFRA_ADHOC;
+ break;
+ case NL80211_IFTYPE_STATION:
+ mode = NDIS_80211_INFRA_INFRA;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return set_infra_mode(usbdev, mode);
}
-static int rndis_iw_get_range(struct net_device *dev,
- struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
+#define SCAN_DELAY_JIFFIES (HZ)
+static int rndis_scan(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_scan_request *request)
{
- struct iw_range *range = (struct iw_range *)extra;
struct usbnet *usbdev = netdev_priv(dev);
- struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
- int len, ret, i, j, num, has_80211g_rates;
- u8 rates[8];
- __le32 tx_power;
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
+ int ret;
+ __le32 tmp;
+
+ devdbg(usbdev, "cfg80211.scan");
- devdbg(usbdev, "SIOCGIWRANGE");
+ if (!request)
+ return -EINVAL;
- /* clear iw_range struct */
- memset(range, 0, sizeof(*range));
- wrqu->data.length = sizeof(*range);
+ if (priv->scan_request && priv->scan_request != request)
+ return -EBUSY;
- range->txpower_capa = IW_TXPOW_MWATT;
- range->num_txpower = 1;
- if (priv->caps & CAP_SUPPORT_TXPOWER) {
- len = sizeof(tx_power);
- ret = rndis_query_oid(usbdev, OID_802_11_TX_POWER_LEVEL,
- &tx_power, &len);
- if (ret == 0 && le32_to_cpu(tx_power) != 0xFF)
- range->txpower[0] = le32_to_cpu(tx_power);
- else
- range->txpower[0] = get_bcm4320_power(priv);
- } else
- range->txpower[0] = get_bcm4320_power(priv);
+ priv->scan_request = request;
- len = sizeof(rates);
- ret = rndis_query_oid(usbdev, OID_802_11_SUPPORTED_RATES, &rates,
- &len);
- has_80211g_rates = 0;
+ tmp = cpu_to_le32(1);
+ ret = rndis_set_oid(usbdev, OID_802_11_BSSID_LIST_SCAN, &tmp,
+ sizeof(tmp));
if (ret == 0) {
- j = 0;
- for (i = 0; i < len; i++) {
- if (rates[i] == 0)
- break;
- range->bitrate[j] = (rates[i] & 0x7f) * 500000;
- /* check for non 802.11b rates */
- if (range->bitrate[j] == 6000000 ||
- range->bitrate[j] == 9000000 ||
- (range->bitrate[j] >= 12000000 &&
- range->bitrate[j] != 22000000))
- has_80211g_rates = 1;
- j++;
- }
- range->num_bitrates = j;
- } else
- range->num_bitrates = 0;
-
- /* fill in 802.11g rates */
- if (has_80211g_rates) {
- num = range->num_bitrates;
- for (i = 0; i < ARRAY_SIZE(rates_80211g); i++) {
- for (j = 0; j < num; j++) {
- if (range->bitrate[j] ==
- rates_80211g[i] * 1000000)
- break;
- }
- if (j == num)
- range->bitrate[range->num_bitrates++] =
- rates_80211g[i] * 1000000;
- if (range->num_bitrates == IW_MAX_BITRATES)
- break;
- }
+ /* Wait before retrieving scan results from device */
+ queue_delayed_work(priv->workqueue, &priv->scan_work,
+ SCAN_DELAY_JIFFIES);
+ }
- /* estimated max real througput in bps */
- range->throughput = 54 * 1000 * 1000 / 2;
+ return ret;
+}
- /* ~35% more with afterburner */
- if (priv->param_afterburner)
- range->throughput = range->throughput / 100 * 135;
- } else {
- /* estimated max real througput in bps */
- range->throughput = 11 * 1000 * 1000 / 2;
+
+static struct cfg80211_bss *rndis_bss_info_update(struct usbnet *usbdev,
+ struct ndis_80211_bssid_ex *bssid)
+{
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
+ struct ieee80211_channel *channel;
+ s32 signal;
+ u64 timestamp;
+ u16 capability;
+ u16 beacon_interval;
+ struct ndis_80211_fixed_ies *fixed;
+ int ie_len, bssid_len;
+ u8 *ie;
+
+ /* parse bssid structure */
+ bssid_len = le32_to_cpu(bssid->length);
+
+ if (bssid_len < sizeof(struct ndis_80211_bssid_ex) +
+ sizeof(struct ndis_80211_fixed_ies))
+ return NULL;
+
+ fixed = (struct ndis_80211_fixed_ies *)bssid->ies;
+
+ ie = (void *)(bssid->ies + sizeof(struct ndis_80211_fixed_ies));
+ ie_len = min(bssid_len - (int)sizeof(*bssid),
+ (int)le32_to_cpu(bssid->ie_length));
+ ie_len -= sizeof(struct ndis_80211_fixed_ies);
+ if (ie_len < 0)
+ return NULL;
+
+ /* extract data for cfg80211_inform_bss */
+ channel = ieee80211_get_channel(priv->wdev.wiphy,
+ KHZ_TO_MHZ(le32_to_cpu(bssid->config.ds_config)));
+ if (!channel)
+ return NULL;
+
+ signal = level_to_qual(le32_to_cpu(bssid->rssi));
+ timestamp = le64_to_cpu(*(__le64 *)fixed->timestamp);
+ capability = le16_to_cpu(fixed->capabilities);
+ beacon_interval = le16_to_cpu(fixed->beacon_interval);
+
+ return cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid->mac,
+ timestamp, capability, beacon_interval, ie, ie_len, signal,
+ GFP_KERNEL);
+}
+
+
+static int rndis_check_bssid_list(struct usbnet *usbdev)
+{
+ void *buf = NULL;
+ struct ndis_80211_bssid_list_ex *bssid_list;
+ struct ndis_80211_bssid_ex *bssid;
+ int ret = -EINVAL, len, count, bssid_len;
+
+ devdbg(usbdev, "check_bssid_list");
+
+ len = CONTROL_BUFFER_SIZE;
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto out;
}
- range->num_channels = 14;
+ ret = rndis_query_oid(usbdev, OID_802_11_BSSID_LIST, buf, &len);
+ if (ret != 0)
+ goto out;
- for (i = 0; (i < 14) && (i < IW_MAX_FREQUENCIES); i++) {
- range->freq[i].i = i + 1;
- range->freq[i].m = ieee80211_dsss_chan_to_freq(i + 1) * 100000;
- range->freq[i].e = 1;
+ bssid_list = buf;
+ bssid = bssid_list->bssid;
+ bssid_len = le32_to_cpu(bssid->length);
+ count = le32_to_cpu(bssid_list->num_items);
+ devdbg(usbdev, "check_bssid_list: %d BSSIDs found", count);
+
+ while (count && ((void *)bssid + bssid_len) <= (buf + len)) {
+ rndis_bss_info_update(usbdev, bssid);
+
+ bssid = (void *)bssid + bssid_len;
+ bssid_len = le32_to_cpu(bssid->length);
+ count--;
}
- range->num_frequency = i;
- range->min_rts = 0;
- range->max_rts = 2347;
- range->min_frag = 256;
- range->max_frag = 2346;
+out:
+ kfree(buf);
+ return ret;
+}
- range->max_qual.qual = 100;
- range->max_qual.level = 154;
- range->max_qual.updated = IW_QUAL_QUAL_UPDATED
- | IW_QUAL_LEVEL_UPDATED
- | IW_QUAL_NOISE_INVALID;
- range->we_version_compiled = WIRELESS_EXT;
- range->we_version_source = WIRELESS_EXT;
+static void rndis_get_scan_results(struct work_struct *work)
+{
+ struct rndis_wlan_private *priv =
+ container_of(work, struct rndis_wlan_private, scan_work.work);
+ struct usbnet *usbdev = priv->usbdev;
+ int ret;
+
+ devdbg(usbdev, "get_scan_results");
- range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
- IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
- return 0;
+ ret = rndis_check_bssid_list(usbdev);
+
+ cfg80211_scan_done(priv->scan_request, ret < 0);
+
+ priv->scan_request = NULL;
}
-static int rndis_iw_get_name(struct net_device *dev,
+/*
+ * wireless extension handlers
+ */
+
+static int rndis_iw_commit(struct net_device *dev,
struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
{
- struct usbnet *usbdev = netdev_priv(dev);
- struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
-
- strcpy(wrqu->name, priv->name);
+ /* dummy op */
return 0;
}
@@ -1314,7 +1497,7 @@ static int rndis_iw_set_auth(struct net_device *dev,
{
struct iw_param *p = &wrqu->param;
struct usbnet *usbdev = netdev_priv(dev);
- struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
int ret = -ENOTSUPP;
switch (p->flags & IW_AUTH_INDEX) {
@@ -1395,7 +1578,7 @@ static int rndis_iw_get_auth(struct net_device *dev,
{
struct iw_param *p = &wrqu->param;
struct usbnet *usbdev = netdev_priv(dev);
- struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
switch (p->flags & IW_AUTH_INDEX) {
case IW_AUTH_WPA_VERSION:
@@ -1422,60 +1605,11 @@ static int rndis_iw_get_auth(struct net_device *dev,
}
-static int rndis_iw_get_mode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct usbnet *usbdev = netdev_priv(dev);
- struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
-
- switch (priv->infra_mode) {
- case ndis_80211_infra_adhoc:
- wrqu->mode = IW_MODE_ADHOC;
- break;
- case ndis_80211_infra_infra:
- wrqu->mode = IW_MODE_INFRA;
- break;
- /*case ndis_80211_infra_auto_unknown:*/
- default:
- wrqu->mode = IW_MODE_AUTO;
- break;
- }
- devdbg(usbdev, "SIOCGIWMODE: %08x", wrqu->mode);
- return 0;
-}
-
-
-static int rndis_iw_set_mode(struct net_device *dev,
- struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
-{
- struct usbnet *usbdev = netdev_priv(dev);
- int mode;
-
- devdbg(usbdev, "SIOCSIWMODE: %08x", wrqu->mode);
-
- switch (wrqu->mode) {
- case IW_MODE_ADHOC:
- mode = ndis_80211_infra_adhoc;
- break;
- case IW_MODE_INFRA:
- mode = ndis_80211_infra_infra;
- break;
- /*case IW_MODE_AUTO:*/
- default:
- mode = ndis_80211_infra_auto_unknown;
- break;
- }
-
- return set_infra_mode(usbdev, mode);
-}
-
-
static int rndis_iw_set_encode(struct net_device *dev,
struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
{
struct usbnet *usbdev = netdev_priv(dev);
- struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
int ret, index, key_len;
u8 *key;
@@ -1538,10 +1672,8 @@ static int rndis_iw_set_encode_ext(struct net_device *dev,
{
struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
struct usbnet *usbdev = netdev_priv(dev);
- struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
- struct ndis_80211_key ndis_key;
- int keyidx, ret;
- u8 *addr;
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
+ int keyidx, flags;
keyidx = wrqu->encoding.flags & IW_ENCODE_INDEX;
@@ -1564,250 +1696,16 @@ static int rndis_iw_set_encode_ext(struct net_device *dev,
ext->alg == IW_ENCODE_ALG_NONE || ext->key_len == 0)
return remove_key(usbdev, keyidx, NULL);
- if (ext->key_len > sizeof(ndis_key.material))
- return -1;
-
- memset(&ndis_key, 0, sizeof(ndis_key));
-
- ndis_key.size = cpu_to_le32(sizeof(ndis_key) -
- sizeof(ndis_key.material) + ext->key_len);
- ndis_key.length = cpu_to_le32(ext->key_len);
- ndis_key.index = cpu_to_le32(keyidx);
-
- if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
- memcpy(ndis_key.rsc, ext->rx_seq, 6);
- ndis_key.index |= cpu_to_le32(1 << 29);
- }
-
- addr = ext->addr.sa_data;
- if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
- /* group key */
- if (priv->infra_mode == ndis_80211_infra_adhoc)
- memset(ndis_key.bssid, 0xff, ETH_ALEN);
- else
- get_bssid(usbdev, ndis_key.bssid);
- } else {
- /* pairwise key */
- ndis_key.index |= cpu_to_le32(1 << 30);
- memcpy(ndis_key.bssid, addr, ETH_ALEN);
- }
-
- if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)
- ndis_key.index |= cpu_to_le32(1 << 31);
-
- if (ext->alg == IW_ENCODE_ALG_TKIP && ext->key_len == 32) {
- /* wpa_supplicant gives us the Michael MIC RX/TX keys in
- * different order than NDIS spec, so swap the order here. */
- memcpy(ndis_key.material, ext->key, 16);
- memcpy(ndis_key.material + 16, ext->key + 24, 8);
- memcpy(ndis_key.material + 24, ext->key + 16, 8);
- } else
- memcpy(ndis_key.material, ext->key, ext->key_len);
-
- ret = rndis_set_oid(usbdev, OID_802_11_ADD_KEY, &ndis_key,
- le32_to_cpu(ndis_key.size));
- devdbg(usbdev, "SIOCSIWENCODEEXT: OID_802_11_ADD_KEY -> %08X", ret);
- if (ret != 0)
- return ret;
-
- priv->encr_key_len[keyidx] = ext->key_len;
- memcpy(&priv->encr_keys[keyidx], ndis_key.material, ext->key_len);
+ flags = 0;
+ if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID)
+ flags |= NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ;
+ if (!(ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY))
+ flags |= NDIS_80211_ADDKEY_PAIRWISE_KEY;
if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)
- priv->encr_tx_key_index = keyidx;
-
- return 0;
-}
-
-
-static int rndis_iw_set_scan(struct net_device *dev,
- struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
-{
- struct usbnet *usbdev = netdev_priv(dev);
- union iwreq_data evt;
- int ret = -EINVAL;
- __le32 tmp;
-
- devdbg(usbdev, "SIOCSIWSCAN");
-
- if (wrqu->data.flags == 0) {
- tmp = cpu_to_le32(1);
- ret = rndis_set_oid(usbdev, OID_802_11_BSSID_LIST_SCAN, &tmp,
- sizeof(tmp));
- evt.data.flags = 0;
- evt.data.length = 0;
- wireless_send_event(dev, SIOCGIWSCAN, &evt, NULL);
- }
- return ret;
-}
-
-
-static char *rndis_translate_scan(struct net_device *dev,
- struct iw_request_info *info, char *cev,
- char *end_buf,
- struct ndis_80211_bssid_ex *bssid)
-{
- struct usbnet *usbdev = netdev_priv(dev);
- u8 *ie;
- char *current_val;
- int bssid_len, ie_len, i;
- u32 beacon, atim;
- struct iw_event iwe;
- unsigned char sbuf[32];
-
- bssid_len = le32_to_cpu(bssid->length);
-
- devdbg(usbdev, "BSSID %pM", bssid->mac);
- iwe.cmd = SIOCGIWAP;
- iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
- memcpy(iwe.u.ap_addr.sa_data, bssid->mac, ETH_ALEN);
- cev = iwe_stream_add_event(info, cev, end_buf, &iwe, IW_EV_ADDR_LEN);
-
- devdbg(usbdev, "SSID(%d) %s", le32_to_cpu(bssid->ssid.length),
- bssid->ssid.essid);
- iwe.cmd = SIOCGIWESSID;
- iwe.u.essid.length = le32_to_cpu(bssid->ssid.length);
- iwe.u.essid.flags = 1;
- cev = iwe_stream_add_point(info, cev, end_buf, &iwe, bssid->ssid.essid);
-
- devdbg(usbdev, "MODE %d", le32_to_cpu(bssid->net_infra));
- iwe.cmd = SIOCGIWMODE;
- switch (le32_to_cpu(bssid->net_infra)) {
- case ndis_80211_infra_adhoc:
- iwe.u.mode = IW_MODE_ADHOC;
- break;
- case ndis_80211_infra_infra:
- iwe.u.mode = IW_MODE_INFRA;
- break;
- /*case ndis_80211_infra_auto_unknown:*/
- default:
- iwe.u.mode = IW_MODE_AUTO;
- break;
- }
- cev = iwe_stream_add_event(info, cev, end_buf, &iwe, IW_EV_UINT_LEN);
-
- devdbg(usbdev, "FREQ %d kHz", le32_to_cpu(bssid->config.ds_config));
- iwe.cmd = SIOCGIWFREQ;
- dsconfig_to_freq(le32_to_cpu(bssid->config.ds_config), &iwe.u.freq);
- cev = iwe_stream_add_event(info, cev, end_buf, &iwe, IW_EV_FREQ_LEN);
-
- devdbg(usbdev, "QUAL %d", le32_to_cpu(bssid->rssi));
- iwe.cmd = IWEVQUAL;
- iwe.u.qual.qual = level_to_qual(le32_to_cpu(bssid->rssi));
- iwe.u.qual.level = le32_to_cpu(bssid->rssi);
- iwe.u.qual.updated = IW_QUAL_QUAL_UPDATED
- | IW_QUAL_LEVEL_UPDATED
- | IW_QUAL_NOISE_INVALID;
- cev = iwe_stream_add_event(info, cev, end_buf, &iwe, IW_EV_QUAL_LEN);
-
- devdbg(usbdev, "ENCODE %d", le32_to_cpu(bssid->privacy));
- iwe.cmd = SIOCGIWENCODE;
- iwe.u.data.length = 0;
- if (le32_to_cpu(bssid->privacy) == ndis_80211_priv_accept_all)
- iwe.u.data.flags = IW_ENCODE_DISABLED;
- else
- iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
-
- cev = iwe_stream_add_point(info, cev, end_buf, &iwe, NULL);
-
- devdbg(usbdev, "RATES:");
- current_val = cev + iwe_stream_lcp_len(info);
- iwe.cmd = SIOCGIWRATE;
- for (i = 0; i < sizeof(bssid->rates); i++) {
- if (bssid->rates[i] & 0x7f) {
- iwe.u.bitrate.value =
- ((bssid->rates[i] & 0x7f) *
- 500000);
- devdbg(usbdev, " %d", iwe.u.bitrate.value);
- current_val = iwe_stream_add_value(info, cev,
- current_val, end_buf, &iwe,
- IW_EV_PARAM_LEN);
- }
- }
-
- if ((current_val - cev) > iwe_stream_lcp_len(info))
- cev = current_val;
-
- beacon = le32_to_cpu(bssid->config.beacon_period);
- devdbg(usbdev, "BCN_INT %d", beacon);
- iwe.cmd = IWEVCUSTOM;
- snprintf(sbuf, sizeof(sbuf), "bcn_int=%d", beacon);
- iwe.u.data.length = strlen(sbuf);
- cev = iwe_stream_add_point(info, cev, end_buf, &iwe, sbuf);
-
- atim = le32_to_cpu(bssid->config.atim_window);
- devdbg(usbdev, "ATIM %d", atim);
- iwe.cmd = IWEVCUSTOM;
- snprintf(sbuf, sizeof(sbuf), "atim=%u", atim);
- iwe.u.data.length = strlen(sbuf);
- cev = iwe_stream_add_point(info, cev, end_buf, &iwe, sbuf);
-
- ie = (void *)(bssid->ies + sizeof(struct ndis_80211_fixed_ies));
- ie_len = min(bssid_len - (int)sizeof(*bssid),
- (int)le32_to_cpu(bssid->ie_length));
- ie_len -= sizeof(struct ndis_80211_fixed_ies);
- while (ie_len >= 2 && 2 + ie[1] <= ie_len) {
- if ((ie[0] == WLAN_EID_GENERIC && ie[1] >= 4 &&
- memcmp(ie + 2, "\x00\x50\xf2\x01", 4) == 0) ||
- ie[0] == WLAN_EID_RSN) {
- devdbg(usbdev, "IE: WPA%d",
- (ie[0] == WLAN_EID_RSN) ? 2 : 1);
- iwe.cmd = IWEVGENIE;
- /* arbitrary cut-off at 64 */
- iwe.u.data.length = min(ie[1] + 2, 64);
- cev = iwe_stream_add_point(info, cev, end_buf, &iwe, ie);
- }
-
- ie_len -= 2 + ie[1];
- ie += 2 + ie[1];
- }
-
- return cev;
-}
-
-
-static int rndis_iw_get_scan(struct net_device *dev,
- struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
-{
- struct usbnet *usbdev = netdev_priv(dev);
- void *buf = NULL;
- char *cev = extra;
- struct ndis_80211_bssid_list_ex *bssid_list;
- struct ndis_80211_bssid_ex *bssid;
- int ret = -EINVAL, len, count, bssid_len;
-
- devdbg(usbdev, "SIOCGIWSCAN");
+ flags |= NDIS_80211_ADDKEY_TRANSMIT_KEY;
- len = CONTROL_BUFFER_SIZE;
- buf = kmalloc(len, GFP_KERNEL);
- if (!buf) {
- ret = -ENOMEM;
- goto out;
- }
-
- ret = rndis_query_oid(usbdev, OID_802_11_BSSID_LIST, buf, &len);
-
- if (ret != 0)
- goto out;
-
- bssid_list = buf;
- bssid = bssid_list->bssid;
- bssid_len = le32_to_cpu(bssid->length);
- count = le32_to_cpu(bssid_list->num_items);
- devdbg(usbdev, "SIOCGIWSCAN: %d BSSIDs found", count);
-
- while (count && ((void *)bssid + bssid_len) <= (buf + len)) {
- cev = rndis_translate_scan(dev, info, cev,
- extra + IW_SCAN_MAX_DATA, bssid);
- bssid = (void *)bssid + bssid_len;
- bssid_len = le32_to_cpu(bssid->length);
- count--;
- }
-
-out:
- wrqu->data.length = cev - extra;
- wrqu->data.flags = 0;
- kfree(buf);
- return ret;
+ return add_wpa_key(usbdev, ext->key, ext->key_len, keyidx, &ext->addr,
+ ext->rx_seq, ext->alg, flags);
}
@@ -1815,7 +1713,7 @@ static int rndis_iw_set_genie(struct net_device *dev,
struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
{
struct usbnet *usbdev = netdev_priv(dev);
- struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
int ret = 0;
#ifdef DEBUG
@@ -1849,7 +1747,7 @@ static int rndis_iw_get_genie(struct net_device *dev,
struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
{
struct usbnet *usbdev = netdev_priv(dev);
- struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
devdbg(usbdev, "SIOCGIWGENIE");
@@ -1936,39 +1834,6 @@ static int rndis_iw_get_frag(struct net_device *dev,
}
-static int rndis_iw_set_nick(struct net_device *dev,
- struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
-{
- struct usbnet *usbdev = netdev_priv(dev);
- struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
-
- devdbg(usbdev, "SIOCSIWNICK");
-
- priv->nick_len = wrqu->data.length;
- if (priv->nick_len > 32)
- priv->nick_len = 32;
-
- memcpy(priv->nick, extra, priv->nick_len);
- return 0;
-}
-
-
-static int rndis_iw_get_nick(struct net_device *dev,
- struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
-{
- struct usbnet *usbdev = netdev_priv(dev);
- struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
-
- wrqu->data.flags = 1;
- wrqu->data.length = priv->nick_len;
- memcpy(extra, priv->nick, priv->nick_len);
-
- devdbg(usbdev, "SIOCGIWNICK: '%s'", priv->nick);
-
- return 0;
-}
-
-
static int rndis_iw_set_freq(struct net_device *dev,
struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
{
@@ -2021,20 +1886,12 @@ static int rndis_iw_get_txpower(struct net_device *dev,
struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
{
struct usbnet *usbdev = netdev_priv(dev);
- struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
__le32 tx_power;
- int ret = 0, len;
if (priv->radio_on) {
- if (priv->caps & CAP_SUPPORT_TXPOWER) {
- len = sizeof(tx_power);
- ret = rndis_query_oid(usbdev, OID_802_11_TX_POWER_LEVEL,
- &tx_power, &len);
- if (ret != 0)
- return ret;
- } else
- /* fake incase not supported */
- tx_power = cpu_to_le32(get_bcm4320_power(priv));
+ /* fake since changing tx_power (by userlevel) not supported */
+ tx_power = cpu_to_le32(get_bcm4320_power(priv));
wrqu->txpower.flags = IW_TXPOW_MWATT;
wrqu->txpower.value = le32_to_cpu(tx_power);
@@ -2047,7 +1904,7 @@ static int rndis_iw_get_txpower(struct net_device *dev,
devdbg(usbdev, "SIOCGIWTXPOW: %d", wrqu->txpower.value);
- return ret;
+ return 0;
}
@@ -2055,9 +1912,8 @@ static int rndis_iw_set_txpower(struct net_device *dev,
struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
{
struct usbnet *usbdev = netdev_priv(dev);
- struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
__le32 tx_power = 0;
- int ret = 0;
if (!wrqu->txpower.disabled) {
if (wrqu->txpower.flags == IW_TXPOW_MWATT)
@@ -2080,22 +1936,10 @@ static int rndis_iw_set_txpower(struct net_device *dev,
devdbg(usbdev, "SIOCSIWTXPOW: %d", le32_to_cpu(tx_power));
if (le32_to_cpu(tx_power) != 0) {
- if (priv->caps & CAP_SUPPORT_TXPOWER) {
- /* turn radio on first */
- if (!priv->radio_on)
- disassociate(usbdev, 1);
-
- ret = rndis_set_oid(usbdev, OID_802_11_TX_POWER_LEVEL,
- &tx_power, sizeof(tx_power));
- if (ret != 0)
- ret = -EOPNOTSUPP;
- return ret;
- } else {
- /* txpower unsupported, just turn radio on */
- if (!priv->radio_on)
- return disassociate(usbdev, 1);
- return 0; /* all ready on */
- }
+ /* txpower unsupported, just turn radio on */
+ if (!priv->radio_on)
+ return disassociate(usbdev, 1);
+ return 0; /* all ready on */
}
/* tx_power == 0, turn off radio */
@@ -2125,7 +1969,7 @@ static int rndis_iw_set_mlme(struct net_device *dev,
struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
{
struct usbnet *usbdev = netdev_priv(dev);
- struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
struct iw_mlme *mlme = (struct iw_mlme *)extra;
unsigned char bssid[ETH_ALEN];
@@ -2150,7 +1994,7 @@ static int rndis_iw_set_mlme(struct net_device *dev,
static struct iw_statistics *rndis_get_wireless_stats(struct net_device *dev)
{
struct usbnet *usbdev = netdev_priv(dev);
- struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
unsigned long flags;
spin_lock_irqsave(&priv->stats_lock, flags);
@@ -2165,20 +2009,18 @@ static struct iw_statistics *rndis_get_wireless_stats(struct net_device *dev)
static const iw_handler rndis_iw_handler[] =
{
IW_IOCTL(SIOCSIWCOMMIT) = rndis_iw_commit,
- IW_IOCTL(SIOCGIWNAME) = rndis_iw_get_name,
+ IW_IOCTL(SIOCGIWNAME) = (iw_handler) cfg80211_wext_giwname,
IW_IOCTL(SIOCSIWFREQ) = rndis_iw_set_freq,
IW_IOCTL(SIOCGIWFREQ) = rndis_iw_get_freq,
- IW_IOCTL(SIOCSIWMODE) = rndis_iw_set_mode,
- IW_IOCTL(SIOCGIWMODE) = rndis_iw_get_mode,
- IW_IOCTL(SIOCGIWRANGE) = rndis_iw_get_range,
+ IW_IOCTL(SIOCSIWMODE) = (iw_handler) cfg80211_wext_siwmode,
+ IW_IOCTL(SIOCGIWMODE) = (iw_handler) cfg80211_wext_giwmode,
+ IW_IOCTL(SIOCGIWRANGE) = (iw_handler) cfg80211_wext_giwrange,
IW_IOCTL(SIOCSIWAP) = rndis_iw_set_bssid,
IW_IOCTL(SIOCGIWAP) = rndis_iw_get_bssid,
- IW_IOCTL(SIOCSIWSCAN) = rndis_iw_set_scan,
- IW_IOCTL(SIOCGIWSCAN) = rndis_iw_get_scan,
+ IW_IOCTL(SIOCSIWSCAN) = (iw_handler) cfg80211_wext_siwscan,
+ IW_IOCTL(SIOCGIWSCAN) = (iw_handler) cfg80211_wext_giwscan,
IW_IOCTL(SIOCSIWESSID) = rndis_iw_set_essid,
IW_IOCTL(SIOCGIWESSID) = rndis_iw_get_essid,
- IW_IOCTL(SIOCSIWNICKN) = rndis_iw_set_nick,
- IW_IOCTL(SIOCGIWNICKN) = rndis_iw_get_nick,
IW_IOCTL(SIOCGIWRATE) = rndis_iw_get_rate,
IW_IOCTL(SIOCSIWRTS) = rndis_iw_set_rts,
IW_IOCTL(SIOCGIWRTS) = rndis_iw_get_rts,
@@ -2195,28 +2037,28 @@ static const iw_handler rndis_iw_handler[] =
IW_IOCTL(SIOCSIWMLME) = rndis_iw_set_mlme,
};
-static const iw_handler rndis_wext_private_handler[] = {
+static const iw_handler rndis_wlan_private_handler[] = {
};
-static const struct iw_priv_args rndis_wext_private_args[] = {
+static const struct iw_priv_args rndis_wlan_private_args[] = {
};
static const struct iw_handler_def rndis_iw_handlers = {
.num_standard = ARRAY_SIZE(rndis_iw_handler),
- .num_private = ARRAY_SIZE(rndis_wext_private_handler),
- .num_private_args = ARRAY_SIZE(rndis_wext_private_args),
+ .num_private = ARRAY_SIZE(rndis_wlan_private_handler),
+ .num_private_args = ARRAY_SIZE(rndis_wlan_private_args),
.standard = (iw_handler *)rndis_iw_handler,
- .private = (iw_handler *)rndis_wext_private_handler,
- .private_args = (struct iw_priv_args *)rndis_wext_private_args,
+ .private = (iw_handler *)rndis_wlan_private_handler,
+ .private_args = (struct iw_priv_args *)rndis_wlan_private_args,
.get_wireless_stats = rndis_get_wireless_stats,
};
-static void rndis_wext_worker(struct work_struct *work)
+static void rndis_wlan_worker(struct work_struct *work)
{
- struct rndis_wext_private *priv =
- container_of(work, struct rndis_wext_private, work);
+ struct rndis_wlan_private *priv =
+ container_of(work, struct rndis_wlan_private, work);
struct usbnet *usbdev = priv->usbdev;
union iwreq_data evt;
unsigned char bssid[ETH_ALEN];
@@ -2277,10 +2119,10 @@ get_bssid:
set_multicast_list(usbdev);
}
-static void rndis_wext_set_multicast_list(struct net_device *dev)
+static void rndis_wlan_set_multicast_list(struct net_device *dev)
{
struct usbnet *usbdev = netdev_priv(dev);
- struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
if (test_bit(WORK_SET_MULTICAST_LIST, &priv->work_pending))
return;
@@ -2289,9 +2131,9 @@ static void rndis_wext_set_multicast_list(struct net_device *dev)
queue_work(priv->workqueue, &priv->work);
}
-static void rndis_wext_link_change(struct usbnet *usbdev, int state)
+static void rndis_wlan_link_change(struct usbnet *usbdev, int state)
{
- struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
/* queue work to avoid recursive calls into rndis_command */
set_bit(state ? WORK_LINK_UP : WORK_LINK_DOWN, &priv->work_pending);
@@ -2299,22 +2141,14 @@ static void rndis_wext_link_change(struct usbnet *usbdev, int state)
}
-static int rndis_wext_get_caps(struct usbnet *usbdev)
+static int rndis_wlan_get_caps(struct usbnet *usbdev)
{
struct {
__le32 num_items;
__le32 items[8];
} networks_supported;
int len, retval, i, n;
- __le32 tx_power;
- struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
-
- /* determine if supports setting txpower */
- len = sizeof(tx_power);
- retval = rndis_query_oid(usbdev, OID_802_11_TX_POWER_LEVEL, &tx_power,
- &len);
- if (retval == 0 && le32_to_cpu(tx_power) != 0xFF)
- priv->caps |= CAP_SUPPORT_TXPOWER;
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
/* determine supported modes */
len = sizeof(networks_supported);
@@ -2326,24 +2160,18 @@ static int rndis_wext_get_caps(struct usbnet *usbdev)
n = 8;
for (i = 0; i < n; i++) {
switch (le32_to_cpu(networks_supported.items[i])) {
- case ndis_80211_type_freq_hop:
- case ndis_80211_type_direct_seq:
+ case NDIS_80211_TYPE_FREQ_HOP:
+ case NDIS_80211_TYPE_DIRECT_SEQ:
priv->caps |= CAP_MODE_80211B;
break;
- case ndis_80211_type_ofdm_a:
+ case NDIS_80211_TYPE_OFDM_A:
priv->caps |= CAP_MODE_80211A;
break;
- case ndis_80211_type_ofdm_g:
+ case NDIS_80211_TYPE_OFDM_G:
priv->caps |= CAP_MODE_80211G;
break;
}
}
- if (priv->caps & CAP_MODE_80211A)
- strcat(priv->name, "a");
- if (priv->caps & CAP_MODE_80211B)
- strcat(priv->name, "b");
- if (priv->caps & CAP_MODE_80211G)
- strcat(priv->name, "g");
}
return retval;
@@ -2353,8 +2181,8 @@ static int rndis_wext_get_caps(struct usbnet *usbdev)
#define STATS_UPDATE_JIFFIES (HZ)
static void rndis_update_wireless_stats(struct work_struct *work)
{
- struct rndis_wext_private *priv =
- container_of(work, struct rndis_wext_private, stats_work.work);
+ struct rndis_wlan_private *priv =
+ container_of(work, struct rndis_wlan_private, stats_work.work);
struct usbnet *usbdev = priv->usbdev;
struct iw_statistics iwstats;
__le32 rssi, tmp;
@@ -2387,7 +2215,7 @@ static void rndis_update_wireless_stats(struct work_struct *work)
if (ret == 0) {
memset(&iwstats.qual, 0, sizeof(iwstats.qual));
iwstats.qual.qual = level_to_qual(le32_to_cpu(rssi));
- iwstats.qual.level = le32_to_cpu(rssi);
+ iwstats.qual.level = level_to_qual(le32_to_cpu(rssi));
iwstats.qual.updated = IW_QUAL_QUAL_UPDATED
| IW_QUAL_LEVEL_UPDATED
| IW_QUAL_NOISE_INVALID;
@@ -2457,9 +2285,19 @@ end:
}
-static int bcm4320_early_init(struct usbnet *usbdev)
+static int bcm4320a_early_init(struct usbnet *usbdev)
{
- struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
+ /* bcm4320a doesn't handle configuration parameters well. Try
+ * set any and you get partially zeroed mac and broken device.
+ */
+
+ return 0;
+}
+
+
+static int bcm4320b_early_init(struct usbnet *usbdev)
+{
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
char buf[8];
/* Early initialization settings, setting these won't have effect
@@ -2525,33 +2363,41 @@ static int bcm4320_early_init(struct usbnet *usbdev)
}
/* same as rndis_netdev_ops but with local multicast handler */
-static const struct net_device_ops rndis_wext_netdev_ops = {
+static const struct net_device_ops rndis_wlan_netdev_ops = {
.ndo_open = usbnet_open,
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_multicast_list = rndis_wext_set_multicast_list,
+ .ndo_set_multicast_list = rndis_wlan_set_multicast_list,
};
-static int rndis_wext_bind(struct usbnet *usbdev, struct usb_interface *intf)
+static int rndis_wlan_bind(struct usbnet *usbdev, struct usb_interface *intf)
{
- struct rndis_wext_private *priv;
+ struct wiphy *wiphy;
+ struct rndis_wlan_private *priv;
int retval, len;
__le32 tmp;
- /* allocate rndis private data */
- priv = kzalloc(sizeof(struct rndis_wext_private), GFP_KERNEL);
- if (!priv)
+ /* allocate wiphy and rndis private data
+ * NOTE: We only support a single virtual interface, so wiphy
+ * and wireless_dev are somewhat synonymous for this device.
+ */
+ wiphy = wiphy_new(&rndis_config_ops, sizeof(struct rndis_wlan_private));
+ if (!wiphy)
return -ENOMEM;
+ priv = wiphy_priv(wiphy);
+ usbdev->net->ieee80211_ptr = &priv->wdev;
+ priv->wdev.wiphy = wiphy;
+ priv->wdev.iftype = NL80211_IFTYPE_STATION;
+
/* These have to be initialized before calling generic_rndis_bind().
- * Otherwise we'll be in big trouble in rndis_wext_early_init().
+ * Otherwise we'll be in big trouble in rndis_wlan_early_init().
*/
usbdev->driver_priv = priv;
- strcpy(priv->name, "IEEE802.11");
usbdev->net->wireless_handlers = &rndis_iw_handlers;
priv->usbdev = usbdev;
@@ -2560,8 +2406,9 @@ static int rndis_wext_bind(struct usbnet *usbdev, struct usb_interface *intf)
/* because rndis_command() sleeps we need to use workqueue */
priv->workqueue = create_singlethread_workqueue("rndis_wlan");
- INIT_WORK(&priv->work, rndis_wext_worker);
+ INIT_WORK(&priv->work, rndis_wlan_worker);
INIT_DELAYED_WORK(&priv->stats_work, rndis_update_wireless_stats);
+ INIT_DELAYED_WORK(&priv->scan_work, rndis_get_scan_results);
/* try bind rndis_host */
retval = generic_rndis_bind(usbdev, intf, FLAG_RNDIS_PHYM_WIRELESS);
@@ -2573,9 +2420,9 @@ static int rndis_wext_bind(struct usbnet *usbdev, struct usb_interface *intf)
* picks up rssi to closest station instead of to access point).
*
* rndis_host wants to avoid all OID as much as possible
- * so do promisc/multicast handling in rndis_wext.
+ * so do promisc/multicast handling in rndis_wlan.
*/
- usbdev->net->netdev_ops = &rndis_wext_netdev_ops;
+ usbdev->net->netdev_ops = &rndis_wlan_netdev_ops;
tmp = RNDIS_PACKET_TYPE_DIRECTED | RNDIS_PACKET_TYPE_BROADCAST;
retval = rndis_set_oid(usbdev, OID_GEN_CURRENT_PACKET_FILTER, &tmp,
@@ -2600,7 +2447,32 @@ static int rndis_wext_bind(struct usbnet *usbdev, struct usb_interface *intf)
| IW_QUAL_QUAL_INVALID
| IW_QUAL_LEVEL_INVALID;
- rndis_wext_get_caps(usbdev);
+ /* fill-out wiphy structure and register w/ cfg80211 */
+ memcpy(wiphy->perm_addr, usbdev->net->dev_addr, ETH_ALEN);
+ wiphy->privid = rndis_wiphy_privid;
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION)
+ | BIT(NL80211_IFTYPE_ADHOC);
+ wiphy->max_scan_ssids = 1;
+
+ /* TODO: fill-out band information based on priv->caps */
+ rndis_wlan_get_caps(usbdev);
+
+ memcpy(priv->channels, rndis_channels, sizeof(rndis_channels));
+ memcpy(priv->rates, rndis_rates, sizeof(rndis_rates));
+ priv->band.channels = priv->channels;
+ priv->band.n_channels = ARRAY_SIZE(rndis_channels);
+ priv->band.bitrates = priv->rates;
+ priv->band.n_bitrates = ARRAY_SIZE(rndis_rates);
+ wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+ wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
+
+ set_wiphy_dev(wiphy, &usbdev->udev->dev);
+
+ if (wiphy_register(wiphy)) {
+ retval = -ENODEV;
+ goto fail;
+ }
+
set_default_iw_params(usbdev);
/* turn radio on */
@@ -2615,36 +2487,40 @@ static int rndis_wext_bind(struct usbnet *usbdev, struct usb_interface *intf)
fail:
cancel_delayed_work_sync(&priv->stats_work);
+ cancel_delayed_work_sync(&priv->scan_work);
cancel_work_sync(&priv->work);
flush_workqueue(priv->workqueue);
destroy_workqueue(priv->workqueue);
- kfree(priv);
+ wiphy_free(wiphy);
return retval;
}
-static void rndis_wext_unbind(struct usbnet *usbdev, struct usb_interface *intf)
+static void rndis_wlan_unbind(struct usbnet *usbdev, struct usb_interface *intf)
{
- struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
/* turn radio off */
disassociate(usbdev, 0);
cancel_delayed_work_sync(&priv->stats_work);
+ cancel_delayed_work_sync(&priv->scan_work);
cancel_work_sync(&priv->work);
flush_workqueue(priv->workqueue);
destroy_workqueue(priv->workqueue);
if (priv && priv->wpa_ie_len)
kfree(priv->wpa_ie);
- kfree(priv);
rndis_unbind(usbdev, intf);
+
+ wiphy_unregister(priv->wdev.wiphy);
+ wiphy_free(priv->wdev.wiphy);
}
-static int rndis_wext_reset(struct usbnet *usbdev)
+static int rndis_wlan_reset(struct usbnet *usbdev)
{
return deauthenticate(usbdev);
}
@@ -2653,40 +2529,40 @@ static int rndis_wext_reset(struct usbnet *usbdev)
static const struct driver_info bcm4320b_info = {
.description = "Wireless RNDIS device, BCM4320b based",
.flags = FLAG_WLAN | FLAG_FRAMING_RN | FLAG_NO_SETINT,
- .bind = rndis_wext_bind,
- .unbind = rndis_wext_unbind,
+ .bind = rndis_wlan_bind,
+ .unbind = rndis_wlan_unbind,
.status = rndis_status,
.rx_fixup = rndis_rx_fixup,
.tx_fixup = rndis_tx_fixup,
- .reset = rndis_wext_reset,
- .early_init = bcm4320_early_init,
- .link_change = rndis_wext_link_change,
+ .reset = rndis_wlan_reset,
+ .early_init = bcm4320b_early_init,
+ .link_change = rndis_wlan_link_change,
};
static const struct driver_info bcm4320a_info = {
.description = "Wireless RNDIS device, BCM4320a based",
.flags = FLAG_WLAN | FLAG_FRAMING_RN | FLAG_NO_SETINT,
- .bind = rndis_wext_bind,
- .unbind = rndis_wext_unbind,
+ .bind = rndis_wlan_bind,
+ .unbind = rndis_wlan_unbind,
.status = rndis_status,
.rx_fixup = rndis_rx_fixup,
.tx_fixup = rndis_tx_fixup,
- .reset = rndis_wext_reset,
- .early_init = bcm4320_early_init,
- .link_change = rndis_wext_link_change,
+ .reset = rndis_wlan_reset,
+ .early_init = bcm4320a_early_init,
+ .link_change = rndis_wlan_link_change,
};
-static const struct driver_info rndis_wext_info = {
+static const struct driver_info rndis_wlan_info = {
.description = "Wireless RNDIS device",
.flags = FLAG_WLAN | FLAG_FRAMING_RN | FLAG_NO_SETINT,
- .bind = rndis_wext_bind,
- .unbind = rndis_wext_unbind,
+ .bind = rndis_wlan_bind,
+ .unbind = rndis_wlan_unbind,
.status = rndis_status,
.rx_fixup = rndis_rx_fixup,
.tx_fixup = rndis_tx_fixup,
- .reset = rndis_wext_reset,
- .early_init = bcm4320_early_init,
- .link_change = rndis_wext_link_change,
+ .reset = rndis_wlan_reset,
+ .early_init = bcm4320a_early_init,
+ .link_change = rndis_wlan_link_change,
};
/*-------------------------------------------------------------------------*/
@@ -2796,11 +2672,11 @@ static const struct usb_device_id products [] = {
{
/* RNDIS is MSFT's un-official variant of CDC ACM */
USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
- .driver_info = (unsigned long) &rndis_wext_info,
+ .driver_info = (unsigned long) &rndis_wlan_info,
}, {
/* "ActiveSync" is an undocumented variant of RNDIS, used in WM5 */
USB_INTERFACE_INFO(USB_CLASS_MISC, 1, 1),
- .driver_info = (unsigned long) &rndis_wext_info,
+ .driver_info = (unsigned long) &rndis_wlan_info,
},
{ }, // END
};
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index bfc5d9cf716..8aab3e6754b 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -9,11 +9,11 @@ menuconfig RT2X00
When building one of the individual drivers, the rt2x00 library
will also be created. That library (when the driver is built as
- a module) will be called "rt2x00lib.ko".
+ a module) will be called rt2x00lib.
Additionally PCI and USB libraries will also be build depending
on the types of drivers being selected, these libraries will be
- called "rt2x00pci.ko" and "rt2x00usb.ko".
+ called rt2x00pci and rt2x00usb.
if RT2X00
@@ -26,7 +26,7 @@ config RT2400PCI
This adds support for rt2400 wireless chipset family.
Supported chips: RT2460.
- When compiled as a module, this driver will be called "rt2400pci.ko".
+ When compiled as a module, this driver will be called rt2400pci.
config RT2500PCI
tristate "Ralink rt2500 (PCI/PCMCIA) support"
@@ -37,7 +37,7 @@ config RT2500PCI
This adds support for rt2500 wireless chipset family.
Supported chips: RT2560.
- When compiled as a module, this driver will be called "rt2500pci.ko".
+ When compiled as a module, this driver will be called rt2500pci.
config RT61PCI
tristate "Ralink rt2501/rt61 (PCI/PCMCIA) support"
@@ -51,7 +51,7 @@ config RT61PCI
This adds support for rt2501 wireless chipset family.
Supported chips: RT2561, RT2561S & RT2661.
- When compiled as a module, this driver will be called "rt61pci.ko".
+ When compiled as a module, this driver will be called rt61pci.
config RT2500USB
tristate "Ralink rt2500 (USB) support"
@@ -62,7 +62,7 @@ config RT2500USB
This adds support for rt2500 wireless chipset family.
Supported chips: RT2571 & RT2572.
- When compiled as a module, this driver will be called "rt2500usb.ko".
+ When compiled as a module, this driver will be called rt2500usb.
config RT73USB
tristate "Ralink rt2501/rt73 (USB) support"
@@ -75,7 +75,21 @@ config RT73USB
This adds support for rt2501 wireless chipset family.
Supported chips: RT2571W, RT2573 & RT2671.
- When compiled as a module, this driver will be called "rt73usb.ko".
+ When compiled as a module, this driver will be called rt73usb.
+
+config RT2800USB
+ tristate "Ralink rt2800 (USB) support"
+ depends on USB
+ select RT2X00_LIB_USB
+ select RT2X00_LIB_HT
+ select RT2X00_LIB_FIRMWARE
+ select RT2X00_LIB_CRYPTO
+ select CRC_CCITT
+ ---help---
+ This adds support for rt2800 wireless chipset family.
+ Supported chips: RT2770, RT2870 & RT3070.
+
+ When compiled as a module, this driver will be called "rt2800usb.ko".
config RT2X00_LIB_PCI
tristate
@@ -88,6 +102,9 @@ config RT2X00_LIB_USB
config RT2X00_LIB
tristate
+config RT2X00_LIB_HT
+ boolean
+
config RT2X00_LIB_FIRMWARE
boolean
select FW_LOADER
diff --git a/drivers/net/wireless/rt2x00/Makefile b/drivers/net/wireless/rt2x00/Makefile
index f22d808d8c5..bfc7226f0af 100644
--- a/drivers/net/wireless/rt2x00/Makefile
+++ b/drivers/net/wireless/rt2x00/Makefile
@@ -8,6 +8,7 @@ rt2x00lib-$(CONFIG_RT2X00_LIB_CRYPTO) += rt2x00crypto.o
rt2x00lib-$(CONFIG_RT2X00_LIB_RFKILL) += rt2x00rfkill.o
rt2x00lib-$(CONFIG_RT2X00_LIB_FIRMWARE) += rt2x00firmware.o
rt2x00lib-$(CONFIG_RT2X00_LIB_LEDS) += rt2x00leds.o
+rt2x00lib-$(CONFIG_RT2X00_LIB_HT) += rt2x00ht.o
obj-$(CONFIG_RT2X00_LIB) += rt2x00lib.o
obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o
@@ -17,3 +18,4 @@ obj-$(CONFIG_RT2500PCI) += rt2500pci.o
obj-$(CONFIG_RT61PCI) += rt61pci.o
obj-$(CONFIG_RT2500USB) += rt2500usb.o
obj-$(CONFIG_RT73USB) += rt73usb.o
+obj-$(CONFIG_RT2800USB) += rt2800usb.o
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 0f08773328c..435f945fe64 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -335,10 +335,11 @@ static void rt2400pci_config_erp(struct rt2x00_dev *rt2x00dev,
preamble_mask = erp->short_preamble << 3;
rt2x00pci_register_read(rt2x00dev, TXCSR1, &reg);
- rt2x00_set_field32(&reg, TXCSR1_ACK_TIMEOUT,
- erp->ack_timeout);
+ rt2x00_set_field32(&reg, TXCSR1_ACK_TIMEOUT, erp->ack_timeout);
rt2x00_set_field32(&reg, TXCSR1_ACK_CONSUME_TIME,
erp->ack_consume_time);
+ rt2x00_set_field32(&reg, TXCSR1_TSF_OFFSET, IEEE80211_HEADER);
+ rt2x00_set_field32(&reg, TXCSR1_AUTORESPONDER, 1);
rt2x00pci_register_write(rt2x00dev, TXCSR1, reg);
rt2x00pci_register_read(rt2x00dev, ARCSR2, &reg);
@@ -371,6 +372,11 @@ static void rt2400pci_config_erp(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&reg, CSR11_SLOT_TIME, erp->slot_time);
rt2x00pci_register_write(rt2x00dev, CSR11, reg);
+ rt2x00pci_register_read(rt2x00dev, CSR12, &reg);
+ rt2x00_set_field32(&reg, CSR12_BEACON_INTERVAL, erp->beacon_int * 16);
+ rt2x00_set_field32(&reg, CSR12_CFP_MAX_DURATION, erp->beacon_int * 16);
+ rt2x00pci_register_write(rt2x00dev, CSR12, reg);
+
rt2x00pci_register_read(rt2x00dev, CSR18, &reg);
rt2x00_set_field32(&reg, CSR18_SIFS, erp->sifs);
rt2x00_set_field32(&reg, CSR18_PIFS, erp->pifs);
@@ -503,24 +509,6 @@ static void rt2400pci_config_retry_limit(struct rt2x00_dev *rt2x00dev,
rt2x00pci_register_write(rt2x00dev, CSR11, reg);
}
-static void rt2400pci_config_duration(struct rt2x00_dev *rt2x00dev,
- struct rt2x00lib_conf *libconf)
-{
- u32 reg;
-
- rt2x00pci_register_read(rt2x00dev, TXCSR1, &reg);
- rt2x00_set_field32(&reg, TXCSR1_TSF_OFFSET, IEEE80211_HEADER);
- rt2x00_set_field32(&reg, TXCSR1_AUTORESPONDER, 1);
- rt2x00pci_register_write(rt2x00dev, TXCSR1, reg);
-
- rt2x00pci_register_read(rt2x00dev, CSR12, &reg);
- rt2x00_set_field32(&reg, CSR12_BEACON_INTERVAL,
- libconf->conf->beacon_int * 16);
- rt2x00_set_field32(&reg, CSR12_CFP_MAX_DURATION,
- libconf->conf->beacon_int * 16);
- rt2x00pci_register_write(rt2x00dev, CSR12, reg);
-}
-
static void rt2400pci_config_ps(struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_conf *libconf)
{
@@ -532,7 +520,7 @@ static void rt2400pci_config_ps(struct rt2x00_dev *rt2x00dev,
if (state == STATE_SLEEP) {
rt2x00pci_register_read(rt2x00dev, CSR20, &reg);
rt2x00_set_field32(&reg, CSR20_DELAY_AFTER_TBCN,
- (libconf->conf->beacon_int - 20) * 16);
+ (rt2x00dev->beacon_int - 20) * 16);
rt2x00_set_field32(&reg, CSR20_TBCN_BEFORE_WAKEUP,
libconf->conf->listen_interval - 1);
@@ -558,8 +546,6 @@ static void rt2400pci_config(struct rt2x00_dev *rt2x00dev,
libconf->conf->power_level);
if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
rt2400pci_config_retry_limit(rt2x00dev, libconf);
- if (flags & IEEE80211_CONF_CHANGE_BEACON_INTERVAL)
- rt2400pci_config_duration(rt2x00dev, libconf);
if (flags & IEEE80211_CONF_CHANGE_PS)
rt2400pci_config_ps(rt2x00dev, libconf);
}
@@ -1361,7 +1347,7 @@ static int rt2400pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
*/
value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
rt2x00pci_register_read(rt2x00dev, CSR0, &reg);
- rt2x00_set_chip(rt2x00dev, RT2460, value, reg);
+ rt2x00_set_chip_rf(rt2x00dev, value, reg);
if (!rt2x00_rf(&rt2x00dev->chip, RF2420) &&
!rt2x00_rf(&rt2x00dev->chip, RF2421)) {
@@ -1580,7 +1566,6 @@ static const struct ieee80211_ops rt2400pci_mac80211_ops = {
.add_interface = rt2x00mac_add_interface,
.remove_interface = rt2x00mac_remove_interface,
.config = rt2x00mac_config,
- .config_interface = rt2x00mac_config_interface,
.configure_filter = rt2x00mac_configure_filter,
.get_stats = rt2x00mac_get_stats,
.bss_info_changed = rt2x00mac_bss_info_changed,
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 276a8232aaa..08b30d01e67 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -341,10 +341,11 @@ static void rt2500pci_config_erp(struct rt2x00_dev *rt2x00dev,
preamble_mask = erp->short_preamble << 3;
rt2x00pci_register_read(rt2x00dev, TXCSR1, &reg);
- rt2x00_set_field32(&reg, TXCSR1_ACK_TIMEOUT,
- erp->ack_timeout);
+ rt2x00_set_field32(&reg, TXCSR1_ACK_TIMEOUT, erp->ack_timeout);
rt2x00_set_field32(&reg, TXCSR1_ACK_CONSUME_TIME,
erp->ack_consume_time);
+ rt2x00_set_field32(&reg, TXCSR1_TSF_OFFSET, IEEE80211_HEADER);
+ rt2x00_set_field32(&reg, TXCSR1_AUTORESPONDER, 1);
rt2x00pci_register_write(rt2x00dev, TXCSR1, reg);
rt2x00pci_register_read(rt2x00dev, ARCSR2, &reg);
@@ -377,6 +378,11 @@ static void rt2500pci_config_erp(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&reg, CSR11_SLOT_TIME, erp->slot_time);
rt2x00pci_register_write(rt2x00dev, CSR11, reg);
+ rt2x00pci_register_read(rt2x00dev, CSR12, &reg);
+ rt2x00_set_field32(&reg, CSR12_BEACON_INTERVAL, erp->beacon_int * 16);
+ rt2x00_set_field32(&reg, CSR12_CFP_MAX_DURATION, erp->beacon_int * 16);
+ rt2x00pci_register_write(rt2x00dev, CSR12, reg);
+
rt2x00pci_register_read(rt2x00dev, CSR18, &reg);
rt2x00_set_field32(&reg, CSR18_SIFS, erp->sifs);
rt2x00_set_field32(&reg, CSR18_PIFS, erp->pifs);
@@ -552,24 +558,6 @@ static void rt2500pci_config_retry_limit(struct rt2x00_dev *rt2x00dev,
rt2x00pci_register_write(rt2x00dev, CSR11, reg);
}
-static void rt2500pci_config_duration(struct rt2x00_dev *rt2x00dev,
- struct rt2x00lib_conf *libconf)
-{
- u32 reg;
-
- rt2x00pci_register_read(rt2x00dev, TXCSR1, &reg);
- rt2x00_set_field32(&reg, TXCSR1_TSF_OFFSET, IEEE80211_HEADER);
- rt2x00_set_field32(&reg, TXCSR1_AUTORESPONDER, 1);
- rt2x00pci_register_write(rt2x00dev, TXCSR1, reg);
-
- rt2x00pci_register_read(rt2x00dev, CSR12, &reg);
- rt2x00_set_field32(&reg, CSR12_BEACON_INTERVAL,
- libconf->conf->beacon_int * 16);
- rt2x00_set_field32(&reg, CSR12_CFP_MAX_DURATION,
- libconf->conf->beacon_int * 16);
- rt2x00pci_register_write(rt2x00dev, CSR12, reg);
-}
-
static void rt2500pci_config_ps(struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_conf *libconf)
{
@@ -581,7 +569,7 @@ static void rt2500pci_config_ps(struct rt2x00_dev *rt2x00dev,
if (state == STATE_SLEEP) {
rt2x00pci_register_read(rt2x00dev, CSR20, &reg);
rt2x00_set_field32(&reg, CSR20_DELAY_AFTER_TBCN,
- (libconf->conf->beacon_int - 20) * 16);
+ (rt2x00dev->beacon_int - 20) * 16);
rt2x00_set_field32(&reg, CSR20_TBCN_BEFORE_WAKEUP,
libconf->conf->listen_interval - 1);
@@ -609,8 +597,6 @@ static void rt2500pci_config(struct rt2x00_dev *rt2x00dev,
libconf->conf->power_level);
if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
rt2500pci_config_retry_limit(rt2x00dev, libconf);
- if (flags & IEEE80211_CONF_CHANGE_BEACON_INTERVAL)
- rt2500pci_config_duration(rt2x00dev, libconf);
if (flags & IEEE80211_CONF_CHANGE_PS)
rt2500pci_config_ps(rt2x00dev, libconf);
}
@@ -1525,7 +1511,7 @@ static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
*/
value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
rt2x00pci_register_read(rt2x00dev, CSR0, &reg);
- rt2x00_set_chip(rt2x00dev, RT2560, value, reg);
+ rt2x00_set_chip_rf(rt2x00dev, value, reg);
if (!rt2x00_rf(&rt2x00dev->chip, RF2522) &&
!rt2x00_rf(&rt2x00dev->chip, RF2523) &&
@@ -1879,7 +1865,6 @@ static const struct ieee80211_ops rt2500pci_mac80211_ops = {
.add_interface = rt2x00mac_add_interface,
.remove_interface = rt2x00mac_remove_interface,
.config = rt2x00mac_config,
- .config_interface = rt2x00mac_config_interface,
.configure_filter = rt2x00mac_configure_filter,
.get_stats = rt2x00mac_get_stats,
.bss_info_changed = rt2x00mac_bss_info_changed,
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 9e630e70fc9..66daf68ff0e 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -503,6 +503,10 @@ static void rt2500usb_config_erp(struct rt2x00_dev *rt2x00dev,
rt2500usb_register_write(rt2x00dev, TXRX_CSR11, erp->basic_rates);
+ rt2500usb_register_read(rt2x00dev, TXRX_CSR18, &reg);
+ rt2x00_set_field16(&reg, TXRX_CSR18_INTERVAL, erp->beacon_int * 4);
+ rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg);
+
rt2500usb_register_write(rt2x00dev, MAC_CSR10, erp->slot_time);
rt2500usb_register_write(rt2x00dev, MAC_CSR11, erp->sifs);
rt2500usb_register_write(rt2x00dev, MAC_CSR12, erp->eifs);
@@ -632,17 +636,6 @@ static void rt2500usb_config_txpower(struct rt2x00_dev *rt2x00dev,
rt2500usb_rf_write(rt2x00dev, 3, rf3);
}
-static void rt2500usb_config_duration(struct rt2x00_dev *rt2x00dev,
- struct rt2x00lib_conf *libconf)
-{
- u16 reg;
-
- rt2500usb_register_read(rt2x00dev, TXRX_CSR18, &reg);
- rt2x00_set_field16(&reg, TXRX_CSR18_INTERVAL,
- libconf->conf->beacon_int * 4);
- rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg);
-}
-
static void rt2500usb_config_ps(struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_conf *libconf)
{
@@ -654,7 +647,7 @@ static void rt2500usb_config_ps(struct rt2x00_dev *rt2x00dev,
if (state == STATE_SLEEP) {
rt2500usb_register_read(rt2x00dev, MAC_CSR18, &reg);
rt2x00_set_field16(&reg, MAC_CSR18_DELAY_AFTER_BEACON,
- libconf->conf->beacon_int - 20);
+ rt2x00dev->beacon_int - 20);
rt2x00_set_field16(&reg, MAC_CSR18_BEACONS_BEFORE_WAKEUP,
libconf->conf->listen_interval - 1);
@@ -680,8 +673,6 @@ static void rt2500usb_config(struct rt2x00_dev *rt2x00dev,
!(flags & IEEE80211_CONF_CHANGE_CHANNEL))
rt2500usb_config_txpower(rt2x00dev,
libconf->conf->power_level);
- if (flags & IEEE80211_CONF_CHANGE_BEACON_INTERVAL)
- rt2500usb_config_duration(rt2x00dev, libconf);
if (flags & IEEE80211_CONF_CHANGE_PS)
rt2500usb_config_ps(rt2x00dev, libconf);
}
@@ -1559,7 +1550,7 @@ static int rt2500usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
rt2500usb_register_read(rt2x00dev, MAC_CSR0, &reg);
rt2x00_set_chip(rt2x00dev, RT2570, value, reg);
- if (!rt2x00_check_rev(&rt2x00dev->chip, 0)) {
+ if (!rt2x00_check_rev(&rt2x00dev->chip, 0x000ffff0, 0)) {
ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
return -ENODEV;
}
@@ -1908,7 +1899,6 @@ static const struct ieee80211_ops rt2500usb_mac80211_ops = {
.add_interface = rt2x00mac_add_interface,
.remove_interface = rt2x00mac_remove_interface,
.config = rt2x00mac_config,
- .config_interface = rt2x00mac_config_interface,
.configure_filter = rt2x00mac_configure_filter,
.set_key = rt2x00mac_set_key,
.get_stats = rt2x00mac_get_stats,
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
new file mode 100644
index 00000000000..37561667925
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -0,0 +1,3078 @@
+/*
+ Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+ <http://rt2x00.serialmonkey.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the
+ Free Software Foundation, Inc.,
+ 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+ Module: rt2800usb
+ Abstract: rt2800usb device specific routines.
+ Supported chipsets: RT2800U.
+ */
+
+#include <linux/crc-ccitt.h>
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "rt2x00.h"
+#include "rt2x00usb.h"
+#include "rt2800usb.h"
+
+/*
+ * Allow hardware encryption to be disabled.
+ */
+static int modparam_nohwcrypt = 1;
+module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
+MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
+
+/*
+ * Register access.
+ * All access to the CSR registers will go through the methods
+ * rt2x00usb_register_read and rt2x00usb_register_write.
+ * BBP and RF register require indirect register access,
+ * and use the CSR registers BBPCSR and RFCSR to achieve this.
+ * These indirect registers work with busy bits,
+ * and we will try maximal REGISTER_BUSY_COUNT times to access
+ * the register while taking a REGISTER_BUSY_DELAY us delay
+ * between each attampt. When the busy bit is still set at that time,
+ * the access attempt is considered to have failed,
+ * and we will print an error.
+ * The _lock versions must be used if you already hold the csr_mutex
+ */
+#define WAIT_FOR_BBP(__dev, __reg) \
+ rt2x00usb_regbusy_read((__dev), BBP_CSR_CFG, BBP_CSR_CFG_BUSY, (__reg))
+#define WAIT_FOR_RFCSR(__dev, __reg) \
+ rt2x00usb_regbusy_read((__dev), RF_CSR_CFG, RF_CSR_CFG_BUSY, (__reg))
+#define WAIT_FOR_RF(__dev, __reg) \
+ rt2x00usb_regbusy_read((__dev), RF_CSR_CFG0, RF_CSR_CFG0_BUSY, (__reg))
+#define WAIT_FOR_MCU(__dev, __reg) \
+ rt2x00usb_regbusy_read((__dev), H2M_MAILBOX_CSR, \
+ H2M_MAILBOX_CSR_OWNER, (__reg))
+
+static void rt2800usb_bbp_write(struct rt2x00_dev *rt2x00dev,
+ const unsigned int word, const u8 value)
+{
+ u32 reg;
+
+ mutex_lock(&rt2x00dev->csr_mutex);
+
+ /*
+ * Wait until the BBP becomes available, afterwards we
+ * can safely write the new data into the register.
+ */
+ if (WAIT_FOR_BBP(rt2x00dev, &reg)) {
+ reg = 0;
+ rt2x00_set_field32(&reg, BBP_CSR_CFG_VALUE, value);
+ rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word);
+ rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1);
+ rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 0);
+
+ rt2x00usb_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg);
+ }
+
+ mutex_unlock(&rt2x00dev->csr_mutex);
+}
+
+static void rt2800usb_bbp_read(struct rt2x00_dev *rt2x00dev,
+ const unsigned int word, u8 *value)
+{
+ u32 reg;
+
+ mutex_lock(&rt2x00dev->csr_mutex);
+
+ /*
+ * Wait until the BBP becomes available, afterwards we
+ * can safely write the read request into the register.
+ * After the data has been written, we wait until hardware
+ * returns the correct value, if at any time the register
+ * doesn't become available in time, reg will be 0xffffffff
+ * which means we return 0xff to the caller.
+ */
+ if (WAIT_FOR_BBP(rt2x00dev, &reg)) {
+ reg = 0;
+ rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word);
+ rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1);
+ rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 1);
+
+ rt2x00usb_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg);
+
+ WAIT_FOR_BBP(rt2x00dev, &reg);
+ }
+
+ *value = rt2x00_get_field32(reg, BBP_CSR_CFG_VALUE);
+
+ mutex_unlock(&rt2x00dev->csr_mutex);
+}
+
+static void rt2800usb_rfcsr_write(struct rt2x00_dev *rt2x00dev,
+ const unsigned int word, const u8 value)
+{
+ u32 reg;
+
+ mutex_lock(&rt2x00dev->csr_mutex);
+
+ /*
+ * Wait until the RFCSR becomes available, afterwards we
+ * can safely write the new data into the register.
+ */
+ if (WAIT_FOR_RFCSR(rt2x00dev, &reg)) {
+ reg = 0;
+ rt2x00_set_field32(&reg, RF_CSR_CFG_DATA, value);
+ rt2x00_set_field32(&reg, RF_CSR_CFG_REGNUM, word);
+ rt2x00_set_field32(&reg, RF_CSR_CFG_WRITE, 1);
+ rt2x00_set_field32(&reg, RF_CSR_CFG_BUSY, 1);
+
+ rt2x00usb_register_write_lock(rt2x00dev, RF_CSR_CFG, reg);
+ }
+
+ mutex_unlock(&rt2x00dev->csr_mutex);
+}
+
+static void rt2800usb_rfcsr_read(struct rt2x00_dev *rt2x00dev,
+ const unsigned int word, u8 *value)
+{
+ u32 reg;
+
+ mutex_lock(&rt2x00dev->csr_mutex);
+
+ /*
+ * Wait until the RFCSR becomes available, afterwards we
+ * can safely write the read request into the register.
+ * After the data has been written, we wait until hardware
+ * returns the correct value, if at any time the register
+ * doesn't become available in time, reg will be 0xffffffff
+ * which means we return 0xff to the caller.
+ */
+ if (WAIT_FOR_RFCSR(rt2x00dev, &reg)) {
+ reg = 0;
+ rt2x00_set_field32(&reg, RF_CSR_CFG_REGNUM, word);
+ rt2x00_set_field32(&reg, RF_CSR_CFG_WRITE, 0);
+ rt2x00_set_field32(&reg, RF_CSR_CFG_BUSY, 1);
+
+ rt2x00usb_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg);
+
+ WAIT_FOR_RFCSR(rt2x00dev, &reg);
+ }
+
+ *value = rt2x00_get_field32(reg, RF_CSR_CFG_DATA);
+
+ mutex_unlock(&rt2x00dev->csr_mutex);
+}
+
+static void rt2800usb_rf_write(struct rt2x00_dev *rt2x00dev,
+ const unsigned int word, const u32 value)
+{
+ u32 reg;
+
+ mutex_lock(&rt2x00dev->csr_mutex);
+
+ /*
+ * Wait until the RF becomes available, afterwards we
+ * can safely write the new data into the register.
+ */
+ if (WAIT_FOR_RF(rt2x00dev, &reg)) {
+ reg = 0;
+ rt2x00_set_field32(&reg, RF_CSR_CFG0_REG_VALUE_BW, value);
+ rt2x00_set_field32(&reg, RF_CSR_CFG0_STANDBYMODE, 0);
+ rt2x00_set_field32(&reg, RF_CSR_CFG0_SEL, 0);
+ rt2x00_set_field32(&reg, RF_CSR_CFG0_BUSY, 1);
+
+ rt2x00usb_register_write_lock(rt2x00dev, RF_CSR_CFG0, reg);
+ rt2x00_rf_write(rt2x00dev, word, value);
+ }
+
+ mutex_unlock(&rt2x00dev->csr_mutex);
+}
+
+static void rt2800usb_mcu_request(struct rt2x00_dev *rt2x00dev,
+ const u8 command, const u8 token,
+ const u8 arg0, const u8 arg1)
+{
+ u32 reg;
+
+ mutex_lock(&rt2x00dev->csr_mutex);
+
+ /*
+ * Wait until the MCU becomes available, afterwards we
+ * can safely write the new data into the register.
+ */
+ if (WAIT_FOR_MCU(rt2x00dev, &reg)) {
+ rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_OWNER, 1);
+ rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_CMD_TOKEN, token);
+ rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_ARG0, arg0);
+ rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_ARG1, arg1);
+ rt2x00usb_register_write_lock(rt2x00dev, H2M_MAILBOX_CSR, reg);
+
+ reg = 0;
+ rt2x00_set_field32(&reg, HOST_CMD_CSR_HOST_COMMAND, command);
+ rt2x00usb_register_write_lock(rt2x00dev, HOST_CMD_CSR, reg);
+ }
+
+ mutex_unlock(&rt2x00dev->csr_mutex);
+}
+
+#ifdef CONFIG_RT2X00_LIB_DEBUGFS
+static const struct rt2x00debug rt2800usb_rt2x00debug = {
+ .owner = THIS_MODULE,
+ .csr = {
+ .read = rt2x00usb_register_read,
+ .write = rt2x00usb_register_write,
+ .flags = RT2X00DEBUGFS_OFFSET,
+ .word_base = CSR_REG_BASE,
+ .word_size = sizeof(u32),
+ .word_count = CSR_REG_SIZE / sizeof(u32),
+ },
+ .eeprom = {
+ .read = rt2x00_eeprom_read,
+ .write = rt2x00_eeprom_write,
+ .word_base = EEPROM_BASE,
+ .word_size = sizeof(u16),
+ .word_count = EEPROM_SIZE / sizeof(u16),
+ },
+ .bbp = {
+ .read = rt2800usb_bbp_read,
+ .write = rt2800usb_bbp_write,
+ .word_base = BBP_BASE,
+ .word_size = sizeof(u8),
+ .word_count = BBP_SIZE / sizeof(u8),
+ },
+ .rf = {
+ .read = rt2x00_rf_read,
+ .write = rt2800usb_rf_write,
+ .word_base = RF_BASE,
+ .word_size = sizeof(u32),
+ .word_count = RF_SIZE / sizeof(u32),
+ },
+};
+#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
+
+#ifdef CONFIG_RT2X00_LIB_RFKILL
+static int rt2800usb_rfkill_poll(struct rt2x00_dev *rt2x00dev)
+{
+ u32 reg;
+
+ rt2x00usb_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
+ return rt2x00_get_field32(reg, GPIO_CTRL_CFG_BIT2);
+}
+#else
+#define rt2800usb_rfkill_poll NULL
+#endif /* CONFIG_RT2X00_LIB_RFKILL */
+
+#ifdef CONFIG_RT2X00_LIB_LEDS
+static void rt2800usb_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct rt2x00_led *led =
+ container_of(led_cdev, struct rt2x00_led, led_dev);
+ unsigned int enabled = brightness != LED_OFF;
+ unsigned int bg_mode =
+ (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_2GHZ);
+ unsigned int polarity =
+ rt2x00_get_field16(led->rt2x00dev->led_mcu_reg,
+ EEPROM_FREQ_LED_POLARITY);
+ unsigned int ledmode =
+ rt2x00_get_field16(led->rt2x00dev->led_mcu_reg,
+ EEPROM_FREQ_LED_MODE);
+
+ if (led->type == LED_TYPE_RADIO) {
+ rt2800usb_mcu_request(led->rt2x00dev, MCU_LED, 0xff, ledmode,
+ enabled ? 0x20 : 0);
+ } else if (led->type == LED_TYPE_ASSOC) {
+ rt2800usb_mcu_request(led->rt2x00dev, MCU_LED, 0xff, ledmode,
+ enabled ? (bg_mode ? 0x60 : 0xa0) : 0x20);
+ } else if (led->type == LED_TYPE_QUALITY) {
+ /*
+ * The brightness is divided into 6 levels (0 - 5),
+ * The specs tell us the following levels:
+ * 0, 1 ,3, 7, 15, 31
+ * to determine the level in a simple way we can simply
+ * work with bitshifting:
+ * (1 << level) - 1
+ */
+ rt2800usb_mcu_request(led->rt2x00dev, MCU_LED_STRENGTH, 0xff,
+ (1 << brightness / (LED_FULL / 6)) - 1,
+ polarity);
+ }
+}
+
+static int rt2800usb_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct rt2x00_led *led =
+ container_of(led_cdev, struct rt2x00_led, led_dev);
+ u32 reg;
+
+ rt2x00usb_register_read(led->rt2x00dev, LED_CFG, &reg);
+ rt2x00_set_field32(&reg, LED_CFG_ON_PERIOD, *delay_on);
+ rt2x00_set_field32(&reg, LED_CFG_OFF_PERIOD, *delay_off);
+ rt2x00_set_field32(&reg, LED_CFG_SLOW_BLINK_PERIOD, 3);
+ rt2x00_set_field32(&reg, LED_CFG_R_LED_MODE, 3);
+ rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, 12);
+ rt2x00_set_field32(&reg, LED_CFG_Y_LED_MODE, 3);
+ rt2x00_set_field32(&reg, LED_CFG_LED_POLAR, 1);
+ rt2x00usb_register_write(led->rt2x00dev, LED_CFG, reg);
+
+ return 0;
+}
+
+static void rt2800usb_init_led(struct rt2x00_dev *rt2x00dev,
+ struct rt2x00_led *led,
+ enum led_type type)
+{
+ led->rt2x00dev = rt2x00dev;
+ led->type = type;
+ led->led_dev.brightness_set = rt2800usb_brightness_set;
+ led->led_dev.blink_set = rt2800usb_blink_set;
+ led->flags = LED_INITIALIZED;
+}
+#endif /* CONFIG_RT2X00_LIB_LEDS */
+
+/*
+ * Configuration handlers.
+ */
+static void rt2800usb_config_wcid_attr(struct rt2x00_dev *rt2x00dev,
+ struct rt2x00lib_crypto *crypto,
+ struct ieee80211_key_conf *key)
+{
+ struct mac_wcid_entry wcid_entry;
+ struct mac_iveiv_entry iveiv_entry;
+ u32 offset;
+ u32 reg;
+
+ offset = MAC_WCID_ATTR_ENTRY(key->hw_key_idx);
+
+ rt2x00usb_register_read(rt2x00dev, offset, &reg);
+ rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_KEYTAB,
+ !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
+ rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_CIPHER,
+ (crypto->cmd == SET_KEY) * crypto->cipher);
+ rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_BSS_IDX,
+ (crypto->cmd == SET_KEY) * crypto->bssidx);
+ rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_RX_WIUDF, crypto->cipher);
+ rt2x00usb_register_write(rt2x00dev, offset, reg);
+
+ offset = MAC_IVEIV_ENTRY(key->hw_key_idx);
+
+ memset(&iveiv_entry, 0, sizeof(iveiv_entry));
+ if ((crypto->cipher == CIPHER_TKIP) ||
+ (crypto->cipher == CIPHER_TKIP_NO_MIC) ||
+ (crypto->cipher == CIPHER_AES))
+ iveiv_entry.iv[3] |= 0x20;
+ iveiv_entry.iv[3] |= key->keyidx << 6;
+ rt2x00usb_register_multiwrite(rt2x00dev, offset,
+ &iveiv_entry, sizeof(iveiv_entry));
+
+ offset = MAC_WCID_ENTRY(key->hw_key_idx);
+
+ memset(&wcid_entry, 0, sizeof(wcid_entry));
+ if (crypto->cmd == SET_KEY)
+ memcpy(&wcid_entry, crypto->address, ETH_ALEN);
+ rt2x00usb_register_multiwrite(rt2x00dev, offset,
+ &wcid_entry, sizeof(wcid_entry));
+}
+
+static int rt2800usb_config_shared_key(struct rt2x00_dev *rt2x00dev,
+ struct rt2x00lib_crypto *crypto,
+ struct ieee80211_key_conf *key)
+{
+ struct hw_key_entry key_entry;
+ struct rt2x00_field32 field;
+ int timeout;
+ u32 offset;
+ u32 reg;
+
+ if (crypto->cmd == SET_KEY) {
+ key->hw_key_idx = (4 * crypto->bssidx) + key->keyidx;
+
+ memcpy(key_entry.key, crypto->key,
+ sizeof(key_entry.key));
+ memcpy(key_entry.tx_mic, crypto->tx_mic,
+ sizeof(key_entry.tx_mic));
+ memcpy(key_entry.rx_mic, crypto->rx_mic,
+ sizeof(key_entry.rx_mic));
+
+ offset = SHARED_KEY_ENTRY(key->hw_key_idx);
+ timeout = REGISTER_TIMEOUT32(sizeof(key_entry));
+ rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
+ USB_VENDOR_REQUEST_OUT,
+ offset, &key_entry,
+ sizeof(key_entry),
+ timeout);
+ }
+
+ /*
+ * The cipher types are stored over multiple registers
+ * starting with SHARED_KEY_MODE_BASE each word will have
+ * 32 bits and contains the cipher types for 2 bssidx each.
+ * Using the correct defines correctly will cause overhead,
+ * so just calculate the correct offset.
+ */
+ field.bit_offset = 4 * (key->hw_key_idx % 8);
+ field.bit_mask = 0x7 << field.bit_offset;
+
+ offset = SHARED_KEY_MODE_ENTRY(key->hw_key_idx / 8);
+
+ rt2x00usb_register_read(rt2x00dev, offset, &reg);
+ rt2x00_set_field32(&reg, field,
+ (crypto->cmd == SET_KEY) * crypto->cipher);
+ rt2x00usb_register_write(rt2x00dev, offset, reg);
+
+ /*
+ * Update WCID information
+ */
+ rt2800usb_config_wcid_attr(rt2x00dev, crypto, key);
+
+ return 0;
+}
+
+static int rt2800usb_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
+ struct rt2x00lib_crypto *crypto,
+ struct ieee80211_key_conf *key)
+{
+ struct hw_key_entry key_entry;
+ int timeout;
+ u32 offset;
+
+ if (crypto->cmd == SET_KEY) {
+ /*
+ * 1 pairwise key is possible per AID, this means that the AID
+ * equals our hw_key_idx. Make sure the WCID starts _after_ the
+ * last possible shared key entry.
+ */
+ if (crypto->aid > (256 - 32))
+ return -ENOSPC;
+
+ key->hw_key_idx = 32 + crypto->aid;
+
+ memcpy(key_entry.key, crypto->key,
+ sizeof(key_entry.key));
+ memcpy(key_entry.tx_mic, crypto->tx_mic,
+ sizeof(key_entry.tx_mic));
+ memcpy(key_entry.rx_mic, crypto->rx_mic,
+ sizeof(key_entry.rx_mic));
+
+ offset = PAIRWISE_KEY_ENTRY(key->hw_key_idx);
+ timeout = REGISTER_TIMEOUT32(sizeof(key_entry));
+ rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
+ USB_VENDOR_REQUEST_OUT,
+ offset, &key_entry,
+ sizeof(key_entry),
+ timeout);
+ }
+
+ /*
+ * Update WCID information
+ */
+ rt2800usb_config_wcid_attr(rt2x00dev, crypto, key);
+
+ return 0;
+}
+
+static void rt2800usb_config_filter(struct rt2x00_dev *rt2x00dev,
+ const unsigned int filter_flags)
+{
+ u32 reg;
+
+ /*
+ * Start configuration steps.
+ * Note that the version error will always be dropped
+ * and broadcast frames will always be accepted since
+ * there is no filter for it at this time.
+ */
+ rt2x00usb_register_read(rt2x00dev, RX_FILTER_CFG, &reg);
+ rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CRC_ERROR,
+ !(filter_flags & FIF_FCSFAIL));
+ rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PHY_ERROR,
+ !(filter_flags & FIF_PLCPFAIL));
+ rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_TO_ME,
+ !(filter_flags & FIF_PROMISC_IN_BSS));
+ rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_MY_BSSD, 0);
+ rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_VER_ERROR, 1);
+ rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_MULTICAST,
+ !(filter_flags & FIF_ALLMULTI));
+ rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BROADCAST, 0);
+ rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_DUPLICATE, 1);
+ rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CF_END_ACK,
+ !(filter_flags & FIF_CONTROL));
+ rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CF_END,
+ !(filter_flags & FIF_CONTROL));
+ rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_ACK,
+ !(filter_flags & FIF_CONTROL));
+ rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CTS,
+ !(filter_flags & FIF_CONTROL));
+ rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_RTS,
+ !(filter_flags & FIF_CONTROL));
+ rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PSPOLL,
+ !(filter_flags & FIF_CONTROL));
+ rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BA, 1);
+ rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BAR, 0);
+ rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CNTL,
+ !(filter_flags & FIF_CONTROL));
+ rt2x00usb_register_write(rt2x00dev, RX_FILTER_CFG, reg);
+}
+
+static void rt2800usb_config_intf(struct rt2x00_dev *rt2x00dev,
+ struct rt2x00_intf *intf,
+ struct rt2x00intf_conf *conf,
+ const unsigned int flags)
+{
+ unsigned int beacon_base;
+ u32 reg;
+
+ if (flags & CONFIG_UPDATE_TYPE) {
+ /*
+ * Clear current synchronisation setup.
+ * For the Beacon base registers we only need to clear
+ * the first byte since that byte contains the VALID and OWNER
+ * bits which (when set to 0) will invalidate the entire beacon.
+ */
+ beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
+ rt2x00usb_register_write(rt2x00dev, beacon_base, 0);
+
+ /*
+ * Enable synchronisation.
+ */
+ rt2x00usb_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, conf->sync);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
+ rt2x00usb_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+ }
+
+ if (flags & CONFIG_UPDATE_MAC) {
+ reg = le32_to_cpu(conf->mac[1]);
+ rt2x00_set_field32(&reg, MAC_ADDR_DW1_UNICAST_TO_ME_MASK, 0xff);
+ conf->mac[1] = cpu_to_le32(reg);
+
+ rt2x00usb_register_multiwrite(rt2x00dev, MAC_ADDR_DW0,
+ conf->mac, sizeof(conf->mac));
+ }
+
+ if (flags & CONFIG_UPDATE_BSSID) {
+ reg = le32_to_cpu(conf->bssid[1]);
+ rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_ID_MASK, 0);
+ rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_BCN_NUM, 0);
+ conf->bssid[1] = cpu_to_le32(reg);
+
+ rt2x00usb_register_multiwrite(rt2x00dev, MAC_BSSID_DW0,
+ conf->bssid, sizeof(conf->bssid));
+ }
+}
+
+static void rt2800usb_config_erp(struct rt2x00_dev *rt2x00dev,
+ struct rt2x00lib_erp *erp)
+{
+ u32 reg;
+
+ rt2x00usb_register_read(rt2x00dev, TX_TIMEOUT_CFG, &reg);
+ rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_RX_ACK_TIMEOUT,
+ DIV_ROUND_UP(erp->ack_timeout, erp->slot_time));
+ rt2x00usb_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg);
+
+ rt2x00usb_register_read(rt2x00dev, AUTO_RSP_CFG, &reg);
+ rt2x00_set_field32(&reg, AUTO_RSP_CFG_BAC_ACK_POLICY,
+ !!erp->short_preamble);
+ rt2x00_set_field32(&reg, AUTO_RSP_CFG_AR_PREAMBLE,
+ !!erp->short_preamble);
+ rt2x00usb_register_write(rt2x00dev, AUTO_RSP_CFG, reg);
+
+ rt2x00usb_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
+ rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL,
+ erp->cts_protection ? 2 : 0);
+ rt2x00usb_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
+
+ rt2x00usb_register_write(rt2x00dev, LEGACY_BASIC_RATE,
+ erp->basic_rates);
+ rt2x00usb_register_write(rt2x00dev, HT_BASIC_RATE, 0x00008003);
+
+ rt2x00usb_register_read(rt2x00dev, BKOFF_SLOT_CFG, &reg);
+ rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_SLOT_TIME, erp->slot_time);
+ rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_CC_DELAY_TIME, 2);
+ rt2x00usb_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg);
+
+ rt2x00usb_register_read(rt2x00dev, XIFS_TIME_CFG, &reg);
+ rt2x00_set_field32(&reg, XIFS_TIME_CFG_CCKM_SIFS_TIME, erp->sifs);
+ rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_SIFS_TIME, erp->sifs);
+ rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_XIFS_TIME, 4);
+ rt2x00_set_field32(&reg, XIFS_TIME_CFG_EIFS, erp->eifs);
+ rt2x00_set_field32(&reg, XIFS_TIME_CFG_BB_RXEND_ENABLE, 1);
+ rt2x00usb_register_write(rt2x00dev, XIFS_TIME_CFG, reg);
+
+ rt2x00usb_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
+ erp->beacon_int * 16);
+ rt2x00usb_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+}
+
+static void rt2800usb_config_ant(struct rt2x00_dev *rt2x00dev,
+ struct antenna_setup *ant)
+{
+ u8 r1;
+ u8 r3;
+
+ rt2800usb_bbp_read(rt2x00dev, 1, &r1);
+ rt2800usb_bbp_read(rt2x00dev, 3, &r3);
+
+ /*
+ * Configure the TX antenna.
+ */
+ switch ((int)ant->tx) {
+ case 1:
+ rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0);
+ break;
+ case 2:
+ rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2);
+ break;
+ case 3:
+ /* Do nothing */
+ break;
+ }
+
+ /*
+ * Configure the RX antenna.
+ */
+ switch ((int)ant->rx) {
+ case 1:
+ rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0);
+ break;
+ case 2:
+ rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 1);
+ break;
+ case 3:
+ rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 2);
+ break;
+ }
+
+ rt2800usb_bbp_write(rt2x00dev, 3, r3);
+ rt2800usb_bbp_write(rt2x00dev, 1, r1);
+}
+
+static void rt2800usb_config_lna_gain(struct rt2x00_dev *rt2x00dev,
+ struct rt2x00lib_conf *libconf)
+{
+ u16 eeprom;
+ short lna_gain;
+
+ if (libconf->rf.channel <= 14) {
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
+ lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_BG);
+ } else if (libconf->rf.channel <= 64) {
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
+ lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_A0);
+ } else if (libconf->rf.channel <= 128) {
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom);
+ lna_gain = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG2_LNA_A1);
+ } else {
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom);
+ lna_gain = rt2x00_get_field16(eeprom, EEPROM_RSSI_A2_LNA_A2);
+ }
+
+ rt2x00dev->lna_gain = lna_gain;
+}
+
+static void rt2800usb_config_channel_rt2x(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_conf *conf,
+ struct rf_channel *rf,
+ struct channel_info *info)
+{
+ rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
+
+ if (rt2x00dev->default_ant.tx == 1)
+ rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_TX1, 1);
+
+ if (rt2x00dev->default_ant.rx == 1) {
+ rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX1, 1);
+ rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1);
+ } else if (rt2x00dev->default_ant.rx == 2)
+ rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1);
+
+ if (rf->channel > 14) {
+ /*
+ * When TX power is below 0, we should increase it by 7 to
+ * make it a positive value (Minumum value is -7).
+ * However this means that values between 0 and 7 have
+ * double meaning, and we should set a 7DBm boost flag.
+ */
+ rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A_7DBM_BOOST,
+ (info->tx_power1 >= 0));
+
+ if (info->tx_power1 < 0)
+ info->tx_power1 += 7;
+
+ rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A,
+ TXPOWER_A_TO_DEV(info->tx_power1));
+
+ rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A_7DBM_BOOST,
+ (info->tx_power2 >= 0));
+
+ if (info->tx_power2 < 0)
+ info->tx_power2 += 7;
+
+ rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A,
+ TXPOWER_A_TO_DEV(info->tx_power2));
+ } else {
+ rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_G,
+ TXPOWER_G_TO_DEV(info->tx_power1));
+ rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_G,
+ TXPOWER_G_TO_DEV(info->tx_power2));
+ }
+
+ rt2x00_set_field32(&rf->rf4, RF4_HT40, conf_is_ht40(conf));
+
+ rt2800usb_rf_write(rt2x00dev, 1, rf->rf1);
+ rt2800usb_rf_write(rt2x00dev, 2, rf->rf2);
+ rt2800usb_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004);
+ rt2800usb_rf_write(rt2x00dev, 4, rf->rf4);
+
+ udelay(200);
+
+ rt2800usb_rf_write(rt2x00dev, 1, rf->rf1);
+ rt2800usb_rf_write(rt2x00dev, 2, rf->rf2);
+ rt2800usb_rf_write(rt2x00dev, 3, rf->rf3 | 0x00000004);
+ rt2800usb_rf_write(rt2x00dev, 4, rf->rf4);
+
+ udelay(200);
+
+ rt2800usb_rf_write(rt2x00dev, 1, rf->rf1);
+ rt2800usb_rf_write(rt2x00dev, 2, rf->rf2);
+ rt2800usb_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004);
+ rt2800usb_rf_write(rt2x00dev, 4, rf->rf4);
+}
+
+static void rt2800usb_config_channel_rt3x(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_conf *conf,
+ struct rf_channel *rf,
+ struct channel_info *info)
+{
+ u8 rfcsr;
+
+ rt2800usb_rfcsr_write(rt2x00dev, 2, rf->rf1);
+ rt2800usb_rfcsr_write(rt2x00dev, 2, rf->rf3);
+
+ rt2800usb_rfcsr_read(rt2x00dev, 6, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR6_R, rf->rf2);
+ rt2800usb_rfcsr_write(rt2x00dev, 6, rfcsr);
+
+ rt2800usb_rfcsr_read(rt2x00dev, 12, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER,
+ TXPOWER_G_TO_DEV(info->tx_power1));
+ rt2800usb_rfcsr_write(rt2x00dev, 12, rfcsr);
+
+ rt2800usb_rfcsr_read(rt2x00dev, 23, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset);
+ rt2800usb_rfcsr_write(rt2x00dev, 23, rfcsr);
+
+ rt2800usb_rfcsr_write(rt2x00dev, 24,
+ rt2x00dev->calibration[conf_is_ht40(conf)]);
+
+ rt2800usb_rfcsr_read(rt2x00dev, 23, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
+ rt2800usb_rfcsr_write(rt2x00dev, 23, rfcsr);
+}
+
+static void rt2800usb_config_channel(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_conf *conf,
+ struct rf_channel *rf,
+ struct channel_info *info)
+{
+ u32 reg;
+ unsigned int tx_pin;
+ u8 bbp;
+
+ if (rt2x00_rev(&rt2x00dev->chip) != RT3070_VERSION)
+ rt2800usb_config_channel_rt2x(rt2x00dev, conf, rf, info);
+ else
+ rt2800usb_config_channel_rt3x(rt2x00dev, conf, rf, info);
+
+ /*
+ * Change BBP settings
+ */
+ rt2800usb_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
+ rt2800usb_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
+ rt2800usb_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
+ rt2800usb_bbp_write(rt2x00dev, 86, 0);
+
+ if (rf->channel <= 14) {
+ if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) {
+ rt2800usb_bbp_write(rt2x00dev, 82, 0x62);
+ rt2800usb_bbp_write(rt2x00dev, 75, 0x46);
+ } else {
+ rt2800usb_bbp_write(rt2x00dev, 82, 0x84);
+ rt2800usb_bbp_write(rt2x00dev, 75, 0x50);
+ }
+ } else {
+ rt2800usb_bbp_write(rt2x00dev, 82, 0xf2);
+
+ if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags))
+ rt2800usb_bbp_write(rt2x00dev, 75, 0x46);
+ else
+ rt2800usb_bbp_write(rt2x00dev, 75, 0x50);
+ }
+
+ rt2x00usb_register_read(rt2x00dev, TX_BAND_CFG, &reg);
+ rt2x00_set_field32(&reg, TX_BAND_CFG_HT40_PLUS, conf_is_ht40_plus(conf));
+ rt2x00_set_field32(&reg, TX_BAND_CFG_A, rf->channel > 14);
+ rt2x00_set_field32(&reg, TX_BAND_CFG_BG, rf->channel <= 14);
+ rt2x00usb_register_write(rt2x00dev, TX_BAND_CFG, reg);
+
+ tx_pin = 0;
+
+ /* Turn on unused PA or LNA when not using 1T or 1R */
+ if (rt2x00dev->default_ant.tx != 1) {
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN, 1);
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN, 1);
+ }
+
+ /* Turn on unused PA or LNA when not using 1T or 1R */
+ if (rt2x00dev->default_ant.rx != 1) {
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1);
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1);
+ }
+
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1);
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1);
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_RFTR_EN, 1);
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_TRSW_EN, 1);
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, rf->channel <= 14);
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN, rf->channel > 14);
+
+ rt2x00usb_register_write(rt2x00dev, TX_PIN_CFG, tx_pin);
+
+ rt2800usb_bbp_read(rt2x00dev, 4, &bbp);
+ rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * conf_is_ht40(conf));
+ rt2800usb_bbp_write(rt2x00dev, 4, bbp);
+
+ rt2800usb_bbp_read(rt2x00dev, 3, &bbp);
+ rt2x00_set_field8(&bbp, BBP3_HT40_PLUS, conf_is_ht40_plus(conf));
+ rt2800usb_bbp_write(rt2x00dev, 3, bbp);
+
+ if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) {
+ if (conf_is_ht40(conf)) {
+ rt2800usb_bbp_write(rt2x00dev, 69, 0x1a);
+ rt2800usb_bbp_write(rt2x00dev, 70, 0x0a);
+ rt2800usb_bbp_write(rt2x00dev, 73, 0x16);
+ } else {
+ rt2800usb_bbp_write(rt2x00dev, 69, 0x16);
+ rt2800usb_bbp_write(rt2x00dev, 70, 0x08);
+ rt2800usb_bbp_write(rt2x00dev, 73, 0x11);
+ }
+ }
+
+ msleep(1);
+}
+
+static void rt2800usb_config_txpower(struct rt2x00_dev *rt2x00dev,
+ const int txpower)
+{
+ u32 reg;
+ u32 value = TXPOWER_G_TO_DEV(txpower);
+ u8 r1;
+
+ rt2800usb_bbp_read(rt2x00dev, 1, &r1);
+ rt2x00_set_field8(&reg, BBP1_TX_POWER, 0);
+ rt2800usb_bbp_write(rt2x00dev, 1, r1);
+
+ rt2x00usb_register_read(rt2x00dev, TX_PWR_CFG_0, &reg);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_0_1MBS, value);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_0_2MBS, value);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_0_55MBS, value);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_0_11MBS, value);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_0_6MBS, value);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_0_9MBS, value);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_0_12MBS, value);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_0_18MBS, value);
+ rt2x00usb_register_write(rt2x00dev, TX_PWR_CFG_0, reg);
+
+ rt2x00usb_register_read(rt2x00dev, TX_PWR_CFG_1, &reg);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_1_24MBS, value);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_1_36MBS, value);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_1_48MBS, value);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_1_54MBS, value);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS0, value);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS1, value);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS2, value);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS3, value);
+ rt2x00usb_register_write(rt2x00dev, TX_PWR_CFG_1, reg);
+
+ rt2x00usb_register_read(rt2x00dev, TX_PWR_CFG_2, &reg);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS4, value);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS5, value);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS6, value);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS7, value);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS8, value);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS9, value);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS10, value);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS11, value);
+ rt2x00usb_register_write(rt2x00dev, TX_PWR_CFG_2, reg);
+
+ rt2x00usb_register_read(rt2x00dev, TX_PWR_CFG_3, &reg);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS12, value);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS13, value);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS14, value);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS15, value);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN1, value);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN2, value);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN3, value);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN4, value);
+ rt2x00usb_register_write(rt2x00dev, TX_PWR_CFG_3, reg);
+
+ rt2x00usb_register_read(rt2x00dev, TX_PWR_CFG_4, &reg);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN5, value);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN6, value);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN7, value);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN8, value);
+ rt2x00usb_register_write(rt2x00dev, TX_PWR_CFG_4, reg);
+}
+
+static void rt2800usb_config_retry_limit(struct rt2x00_dev *rt2x00dev,
+ struct rt2x00lib_conf *libconf)
+{
+ u32 reg;
+
+ rt2x00usb_register_read(rt2x00dev, TX_RTY_CFG, &reg);
+ rt2x00_set_field32(&reg, TX_RTY_CFG_SHORT_RTY_LIMIT,
+ libconf->conf->short_frame_max_tx_count);
+ rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_LIMIT,
+ libconf->conf->long_frame_max_tx_count);
+ rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_THRE, 2000);
+ rt2x00_set_field32(&reg, TX_RTY_CFG_NON_AGG_RTY_MODE, 0);
+ rt2x00_set_field32(&reg, TX_RTY_CFG_AGG_RTY_MODE, 0);
+ rt2x00_set_field32(&reg, TX_RTY_CFG_TX_AUTO_FB_ENABLE, 1);
+ rt2x00usb_register_write(rt2x00dev, TX_RTY_CFG, reg);
+}
+
+static void rt2800usb_config_ps(struct rt2x00_dev *rt2x00dev,
+ struct rt2x00lib_conf *libconf)
+{
+ enum dev_state state =
+ (libconf->conf->flags & IEEE80211_CONF_PS) ?
+ STATE_SLEEP : STATE_AWAKE;
+ u32 reg;
+
+ if (state == STATE_SLEEP) {
+ rt2x00usb_register_write(rt2x00dev, AUTOWAKEUP_CFG, 0);
+
+ rt2x00usb_register_read(rt2x00dev, AUTOWAKEUP_CFG, &reg);
+ rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTO_LEAD_TIME, 5);
+ rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE,
+ libconf->conf->listen_interval - 1);
+ rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTOWAKE, 1);
+ rt2x00usb_register_write(rt2x00dev, AUTOWAKEUP_CFG, reg);
+
+ rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
+ } else {
+ rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
+
+ rt2x00usb_register_read(rt2x00dev, AUTOWAKEUP_CFG, &reg);
+ rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTO_LEAD_TIME, 0);
+ rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE, 0);
+ rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTOWAKE, 0);
+ rt2x00usb_register_write(rt2x00dev, AUTOWAKEUP_CFG, reg);
+ }
+}
+
+static void rt2800usb_config(struct rt2x00_dev *rt2x00dev,
+ struct rt2x00lib_conf *libconf,
+ const unsigned int flags)
+{
+ /* Always recalculate LNA gain before changing configuration */
+ rt2800usb_config_lna_gain(rt2x00dev, libconf);
+
+ if (flags & IEEE80211_CONF_CHANGE_CHANNEL)
+ rt2800usb_config_channel(rt2x00dev, libconf->conf,
+ &libconf->rf, &libconf->channel);
+ if (flags & IEEE80211_CONF_CHANGE_POWER)
+ rt2800usb_config_txpower(rt2x00dev, libconf->conf->power_level);
+ if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
+ rt2800usb_config_retry_limit(rt2x00dev, libconf);
+ if (flags & IEEE80211_CONF_CHANGE_PS)
+ rt2800usb_config_ps(rt2x00dev, libconf);
+}
+
+/*
+ * Link tuning
+ */
+static void rt2800usb_link_stats(struct rt2x00_dev *rt2x00dev,
+ struct link_qual *qual)
+{
+ u32 reg;
+
+ /*
+ * Update FCS error count from register.
+ */
+ rt2x00usb_register_read(rt2x00dev, RX_STA_CNT0, &reg);
+ qual->rx_failed = rt2x00_get_field32(reg, RX_STA_CNT0_CRC_ERR);
+}
+
+static u8 rt2800usb_get_default_vgc(struct rt2x00_dev *rt2x00dev)
+{
+ if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
+ if (rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION)
+ return 0x1c + (2 * rt2x00dev->lna_gain);
+ else
+ return 0x2e + rt2x00dev->lna_gain;
+ }
+
+ if (!test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
+ return 0x32 + (rt2x00dev->lna_gain * 5) / 3;
+ else
+ return 0x3a + (rt2x00dev->lna_gain * 5) / 3;
+}
+
+static inline void rt2800usb_set_vgc(struct rt2x00_dev *rt2x00dev,
+ struct link_qual *qual, u8 vgc_level)
+{
+ if (qual->vgc_level != vgc_level) {
+ rt2800usb_bbp_write(rt2x00dev, 66, vgc_level);
+ qual->vgc_level = vgc_level;
+ qual->vgc_level_reg = vgc_level;
+ }
+}
+
+static void rt2800usb_reset_tuner(struct rt2x00_dev *rt2x00dev,
+ struct link_qual *qual)
+{
+ rt2800usb_set_vgc(rt2x00dev, qual,
+ rt2800usb_get_default_vgc(rt2x00dev));
+}
+
+static void rt2800usb_link_tuner(struct rt2x00_dev *rt2x00dev,
+ struct link_qual *qual, const u32 count)
+{
+ if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION)
+ return;
+
+ /*
+ * When RSSI is better then -80 increase VGC level with 0x10
+ */
+ rt2800usb_set_vgc(rt2x00dev, qual,
+ rt2800usb_get_default_vgc(rt2x00dev) +
+ ((qual->rssi > -80) * 0x10));
+}
+
+/*
+ * Firmware functions
+ */
+static char *rt2800usb_get_firmware_name(struct rt2x00_dev *rt2x00dev)
+{
+ return FIRMWARE_RT2870;
+}
+
+static bool rt2800usb_check_crc(const u8 *data, const size_t len)
+{
+ u16 fw_crc;
+ u16 crc;
+
+ /*
+ * The last 2 bytes in the firmware array are the crc checksum itself,
+ * this means that we should never pass those 2 bytes to the crc
+ * algorithm.
+ */
+ fw_crc = (data[len - 2] << 8 | data[len - 1]);
+
+ /*
+ * Use the crc ccitt algorithm.
+ * This will return the same value as the legacy driver which
+ * used bit ordering reversion on the both the firmware bytes
+ * before input input as well as on the final output.
+ * Obviously using crc ccitt directly is much more efficient.
+ */
+ crc = crc_ccitt(~0, data, len - 2);
+
+ /*
+ * There is a small difference between the crc-itu-t + bitrev and
+ * the crc-ccitt crc calculation. In the latter method the 2 bytes
+ * will be swapped, use swab16 to convert the crc to the correct
+ * value.
+ */
+ crc = swab16(crc);
+
+ return fw_crc == crc;
+}
+
+static int rt2800usb_check_firmware(struct rt2x00_dev *rt2x00dev,
+ const u8 *data, const size_t len)
+{
+ u16 chipset = (rt2x00_rev(&rt2x00dev->chip) >> 16) & 0xffff;
+ size_t offset = 0;
+
+ /*
+ * Firmware files:
+ * There are 2 variations of the rt2870 firmware.
+ * a) size: 4kb
+ * b) size: 8kb
+ * Note that (b) contains 2 seperate firmware blobs of 4k
+ * within the file. The first blob is the same firmware as (a),
+ * but the second blob is for the additional chipsets.
+ */
+ if (len != 4096 && len != 8192)
+ return FW_BAD_LENGTH;
+
+ /*
+ * Check if we need the upper 4kb firmware data or not.
+ */
+ if ((len == 4096) &&
+ (chipset != 0x2860) &&
+ (chipset != 0x2872) &&
+ (chipset != 0x3070))
+ return FW_BAD_VERSION;
+
+ /*
+ * 8kb firmware files must be checked as if it were
+ * 2 seperate firmware files.
+ */
+ while (offset < len) {
+ if (!rt2800usb_check_crc(data + offset, 4096))
+ return FW_BAD_CRC;
+
+ offset += 4096;
+ }
+
+ return FW_OK;
+}
+
+static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev,
+ const u8 *data, const size_t len)
+{
+ unsigned int i;
+ int status;
+ u32 reg;
+ u32 offset;
+ u32 length;
+ u16 chipset = (rt2x00_rev(&rt2x00dev->chip) >> 16) & 0xffff;
+
+ /*
+ * Check which section of the firmware we need.
+ */
+ if ((chipset == 0x2860) ||
+ (chipset == 0x2872) ||
+ (chipset == 0x3070)) {
+ offset = 0;
+ length = 4096;
+ } else {
+ offset = 4096;
+ length = 4096;
+ }
+
+ /*
+ * Wait for stable hardware.
+ */
+ for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ rt2x00usb_register_read(rt2x00dev, MAC_CSR0, &reg);
+ if (reg && reg != ~0)
+ break;
+ msleep(1);
+ }
+
+ if (i == REGISTER_BUSY_COUNT) {
+ ERROR(rt2x00dev, "Unstable hardware.\n");
+ return -EBUSY;
+ }
+
+ /*
+ * Write firmware to device.
+ */
+ rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
+ USB_VENDOR_REQUEST_OUT,
+ FIRMWARE_IMAGE_BASE,
+ data + offset, length,
+ REGISTER_TIMEOUT32(length));
+
+ rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
+ rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0);
+
+ /*
+ * Send firmware request to device to load firmware,
+ * we need to specify a long timeout time.
+ */
+ status = rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE,
+ 0, USB_MODE_FIRMWARE,
+ REGISTER_TIMEOUT_FIRMWARE);
+ if (status < 0) {
+ ERROR(rt2x00dev, "Failed to write Firmware to device.\n");
+ return status;
+ }
+
+ msleep(10);
+ rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
+
+ /*
+ * Send signal to firmware during boot time.
+ */
+ rt2800usb_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0xff, 0, 0);
+
+ if ((chipset == 0x3070) ||
+ (chipset == 0x3071) ||
+ (chipset == 0x3572)) {
+ udelay(200);
+ rt2800usb_mcu_request(rt2x00dev, MCU_CURRENT, 0, 0, 0);
+ udelay(10);
+ }
+
+ /*
+ * Wait for device to stabilize.
+ */
+ for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ rt2x00usb_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
+ if (rt2x00_get_field32(reg, PBF_SYS_CTRL_READY))
+ break;
+ msleep(1);
+ }
+
+ if (i == REGISTER_BUSY_COUNT) {
+ ERROR(rt2x00dev, "PBF system register not ready.\n");
+ return -EBUSY;
+ }
+
+ /*
+ * Initialize firmware.
+ */
+ rt2x00usb_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
+ rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
+ msleep(1);
+
+ return 0;
+}
+
+/*
+ * Initialization functions.
+ */
+static int rt2800usb_init_registers(struct rt2x00_dev *rt2x00dev)
+{
+ u32 reg;
+ unsigned int i;
+
+ /*
+ * Wait untill BBP and RF are ready.
+ */
+ for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ rt2x00usb_register_read(rt2x00dev, MAC_CSR0, &reg);
+ if (reg && reg != ~0)
+ break;
+ msleep(1);
+ }
+
+ if (i == REGISTER_BUSY_COUNT) {
+ ERROR(rt2x00dev, "Unstable hardware.\n");
+ return -EBUSY;
+ }
+
+ rt2x00usb_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
+ rt2x00usb_register_write(rt2x00dev, PBF_SYS_CTRL, reg & ~0x00002000);
+
+ rt2x00usb_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+ rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
+ rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
+ rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+
+ rt2x00usb_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000);
+
+ rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
+ USB_MODE_RESET, REGISTER_TIMEOUT);
+
+ rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
+
+ rt2x00usb_register_read(rt2x00dev, BCN_OFFSET0, &reg);
+ rt2x00_set_field32(&reg, BCN_OFFSET0_BCN0, 0xe0); /* 0x3800 */
+ rt2x00_set_field32(&reg, BCN_OFFSET0_BCN1, 0xe8); /* 0x3a00 */
+ rt2x00_set_field32(&reg, BCN_OFFSET0_BCN2, 0xf0); /* 0x3c00 */
+ rt2x00_set_field32(&reg, BCN_OFFSET0_BCN3, 0xf8); /* 0x3e00 */
+ rt2x00usb_register_write(rt2x00dev, BCN_OFFSET0, reg);
+
+ rt2x00usb_register_read(rt2x00dev, BCN_OFFSET1, &reg);
+ rt2x00_set_field32(&reg, BCN_OFFSET1_BCN4, 0xc8); /* 0x3200 */
+ rt2x00_set_field32(&reg, BCN_OFFSET1_BCN5, 0xd0); /* 0x3400 */
+ rt2x00_set_field32(&reg, BCN_OFFSET1_BCN6, 0x77); /* 0x1dc0 */
+ rt2x00_set_field32(&reg, BCN_OFFSET1_BCN7, 0x6f); /* 0x1bc0 */
+ rt2x00usb_register_write(rt2x00dev, BCN_OFFSET1, reg);
+
+ rt2x00usb_register_write(rt2x00dev, LEGACY_BASIC_RATE, 0x0000013f);
+ rt2x00usb_register_write(rt2x00dev, HT_BASIC_RATE, 0x00008003);
+
+ rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
+
+ rt2x00usb_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL, 0);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, 0);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TX_TIME_COMPENSATE, 0);
+ rt2x00usb_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+ if (rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION) {
+ rt2x00usb_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
+ rt2x00usb_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
+ rt2x00usb_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
+ } else {
+ rt2x00usb_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000);
+ rt2x00usb_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
+ }
+
+ rt2x00usb_register_read(rt2x00dev, TX_LINK_CFG, &reg);
+ rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_MFB_LIFETIME, 32);
+ rt2x00_set_field32(&reg, TX_LINK_CFG_MFB_ENABLE, 0);
+ rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_UMFS_ENABLE, 0);
+ rt2x00_set_field32(&reg, TX_LINK_CFG_TX_MRQ_EN, 0);
+ rt2x00_set_field32(&reg, TX_LINK_CFG_TX_RDG_EN, 0);
+ rt2x00_set_field32(&reg, TX_LINK_CFG_TX_CF_ACK_EN, 1);
+ rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_MFB, 0);
+ rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_MFS, 0);
+ rt2x00usb_register_write(rt2x00dev, TX_LINK_CFG, reg);
+
+ rt2x00usb_register_read(rt2x00dev, TX_TIMEOUT_CFG, &reg);
+ rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_MPDU_LIFETIME, 9);
+ rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_TX_OP_TIMEOUT, 10);
+ rt2x00usb_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg);
+
+ rt2x00usb_register_read(rt2x00dev, MAX_LEN_CFG, &reg);
+ rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE);
+ if (rt2x00_rev(&rt2x00dev->chip) >= RT2880E_VERSION &&
+ rt2x00_rev(&rt2x00dev->chip) < RT3070_VERSION)
+ rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 2);
+ else
+ rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 1);
+ rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_PSDU, 0);
+ rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_MPDU, 0);
+ rt2x00usb_register_write(rt2x00dev, MAX_LEN_CFG, reg);
+
+ rt2x00usb_register_write(rt2x00dev, PBF_MAX_PCNT, 0x1f3fbf9f);
+
+ rt2x00usb_register_read(rt2x00dev, AUTO_RSP_CFG, &reg);
+ rt2x00_set_field32(&reg, AUTO_RSP_CFG_AUTORESPONDER, 1);
+ rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MMODE, 0);
+ rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MREF, 0);
+ rt2x00_set_field32(&reg, AUTO_RSP_CFG_DUAL_CTS_EN, 0);
+ rt2x00_set_field32(&reg, AUTO_RSP_CFG_ACK_CTS_PSM_BIT, 0);
+ rt2x00usb_register_write(rt2x00dev, AUTO_RSP_CFG, reg);
+
+ rt2x00usb_register_read(rt2x00dev, CCK_PROT_CFG, &reg);
+ rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_RATE, 8);
+ rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_CTRL, 0);
+ rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_NAV, 1);
+ rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_CCK, 1);
+ rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
+ rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM20, 1);
+ rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM40, 1);
+ rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF20, 1);
+ rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF40, 1);
+ rt2x00usb_register_write(rt2x00dev, CCK_PROT_CFG, reg);
+
+ rt2x00usb_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
+ rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_RATE, 8);
+ rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL, 0);
+ rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_NAV, 1);
+ rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_CCK, 1);
+ rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
+ rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM20, 1);
+ rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM40, 1);
+ rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF20, 1);
+ rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF40, 1);
+ rt2x00usb_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
+
+ rt2x00usb_register_read(rt2x00dev, MM20_PROT_CFG, &reg);
+ rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_RATE, 0x4004);
+ rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_CTRL, 0);
+ rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_NAV, 1);
+ rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_CCK, 1);
+ rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
+ rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM20, 1);
+ rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM40, 0);
+ rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_GF20, 1);
+ rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_GF40, 0);
+ rt2x00usb_register_write(rt2x00dev, MM20_PROT_CFG, reg);
+
+ rt2x00usb_register_read(rt2x00dev, MM40_PROT_CFG, &reg);
+ rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_RATE, 0x4084);
+ rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_CTRL, 0);
+ rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_NAV, 1);
+ rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
+ rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
+ rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM20, 1);
+ rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM40, 1);
+ rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_GF20, 1);
+ rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_GF40, 1);
+ rt2x00usb_register_write(rt2x00dev, MM40_PROT_CFG, reg);
+
+ rt2x00usb_register_read(rt2x00dev, GF20_PROT_CFG, &reg);
+ rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_RATE, 0x4004);
+ rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_CTRL, 0);
+ rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_NAV, 1);
+ rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_CCK, 1);
+ rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
+ rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM20, 1);
+ rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM40, 0);
+ rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_GF20, 1);
+ rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_GF40, 0);
+ rt2x00usb_register_write(rt2x00dev, GF20_PROT_CFG, reg);
+
+ rt2x00usb_register_read(rt2x00dev, GF40_PROT_CFG, &reg);
+ rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_RATE, 0x4084);
+ rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_CTRL, 0);
+ rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_NAV, 1);
+ rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
+ rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
+ rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM20, 1);
+ rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM40, 1);
+ rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF20, 1);
+ rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF40, 1);
+ rt2x00usb_register_write(rt2x00dev, GF40_PROT_CFG, reg);
+
+ rt2x00usb_register_write(rt2x00dev, PBF_CFG, 0xf40006);
+
+ rt2x00usb_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
+ rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
+ rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
+ rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
+ rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
+ rt2x00_set_field32(&reg, WPDMA_GLO_CFG_WP_DMA_BURST_SIZE, 3);
+ rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 0);
+ rt2x00_set_field32(&reg, WPDMA_GLO_CFG_BIG_ENDIAN, 0);
+ rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_HDR_SCATTER, 0);
+ rt2x00_set_field32(&reg, WPDMA_GLO_CFG_HDR_SEG_LEN, 0);
+ rt2x00usb_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
+
+ rt2x00usb_register_write(rt2x00dev, TXOP_CTRL_CFG, 0x0000583f);
+ rt2x00usb_register_write(rt2x00dev, TXOP_HLDR_ET, 0x00000002);
+
+ rt2x00usb_register_read(rt2x00dev, TX_RTS_CFG, &reg);
+ rt2x00_set_field32(&reg, TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT, 32);
+ rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_THRES,
+ IEEE80211_MAX_RTS_THRESHOLD);
+ rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_FBK_EN, 0);
+ rt2x00usb_register_write(rt2x00dev, TX_RTS_CFG, reg);
+
+ rt2x00usb_register_write(rt2x00dev, EXP_ACK_TIME, 0x002400ca);
+ rt2x00usb_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
+
+ /*
+ * ASIC will keep garbage value after boot, clear encryption keys.
+ */
+ for (i = 0; i < 256; i++) {
+ u32 wcid[2] = { 0xffffffff, 0x00ffffff };
+ rt2x00usb_register_multiwrite(rt2x00dev, MAC_WCID_ENTRY(i),
+ wcid, sizeof(wcid));
+
+ rt2x00usb_register_write(rt2x00dev, MAC_WCID_ATTR_ENTRY(i), 1);
+ rt2x00usb_register_write(rt2x00dev, MAC_IVEIV_ENTRY(i), 0);
+ }
+
+ for (i = 0; i < 16; i++)
+ rt2x00usb_register_write(rt2x00dev,
+ SHARED_KEY_MODE_ENTRY(i), 0);
+
+ /*
+ * Clear all beacons
+ * For the Beacon base registers we only need to clear
+ * the first byte since that byte contains the VALID and OWNER
+ * bits which (when set to 0) will invalidate the entire beacon.
+ */
+ rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE0, 0);
+ rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE1, 0);
+ rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE2, 0);
+ rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE3, 0);
+ rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE4, 0);
+ rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE5, 0);
+ rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE6, 0);
+ rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE7, 0);
+
+ rt2x00usb_register_read(rt2x00dev, USB_CYC_CFG, &reg);
+ rt2x00_set_field32(&reg, USB_CYC_CFG_CLOCK_CYCLE, 30);
+ rt2x00usb_register_write(rt2x00dev, USB_CYC_CFG, reg);
+
+ rt2x00usb_register_read(rt2x00dev, HT_FBK_CFG0, &reg);
+ rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS0FBK, 0);
+ rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS1FBK, 0);
+ rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS2FBK, 1);
+ rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS3FBK, 2);
+ rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS4FBK, 3);
+ rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS5FBK, 4);
+ rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS6FBK, 5);
+ rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS7FBK, 6);
+ rt2x00usb_register_write(rt2x00dev, HT_FBK_CFG0, reg);
+
+ rt2x00usb_register_read(rt2x00dev, HT_FBK_CFG1, &reg);
+ rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS8FBK, 8);
+ rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS9FBK, 8);
+ rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS10FBK, 9);
+ rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS11FBK, 10);
+ rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS12FBK, 11);
+ rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS13FBK, 12);
+ rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS14FBK, 13);
+ rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS15FBK, 14);
+ rt2x00usb_register_write(rt2x00dev, HT_FBK_CFG1, reg);
+
+ rt2x00usb_register_read(rt2x00dev, LG_FBK_CFG0, &reg);
+ rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS0FBK, 8);
+ rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS1FBK, 8);
+ rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS2FBK, 3);
+ rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS3FBK, 10);
+ rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS4FBK, 11);
+ rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS5FBK, 12);
+ rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS6FBK, 13);
+ rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS7FBK, 14);
+ rt2x00usb_register_write(rt2x00dev, LG_FBK_CFG0, reg);
+
+ rt2x00usb_register_read(rt2x00dev, LG_FBK_CFG1, &reg);
+ rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS0FBK, 0);
+ rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS1FBK, 0);
+ rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS2FBK, 1);
+ rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS3FBK, 2);
+ rt2x00usb_register_write(rt2x00dev, LG_FBK_CFG1, reg);
+
+ /*
+ * We must clear the error counters.
+ * These registers are cleared on read,
+ * so we may pass a useless variable to store the value.
+ */
+ rt2x00usb_register_read(rt2x00dev, RX_STA_CNT0, &reg);
+ rt2x00usb_register_read(rt2x00dev, RX_STA_CNT1, &reg);
+ rt2x00usb_register_read(rt2x00dev, RX_STA_CNT2, &reg);
+ rt2x00usb_register_read(rt2x00dev, TX_STA_CNT0, &reg);
+ rt2x00usb_register_read(rt2x00dev, TX_STA_CNT1, &reg);
+ rt2x00usb_register_read(rt2x00dev, TX_STA_CNT2, &reg);
+
+ return 0;
+}
+
+static int rt2800usb_wait_bbp_rf_ready(struct rt2x00_dev *rt2x00dev)
+{
+ unsigned int i;
+ u32 reg;
+
+ for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ rt2x00usb_register_read(rt2x00dev, MAC_STATUS_CFG, &reg);
+ if (!rt2x00_get_field32(reg, MAC_STATUS_CFG_BBP_RF_BUSY))
+ return 0;
+
+ udelay(REGISTER_BUSY_DELAY);
+ }
+
+ ERROR(rt2x00dev, "BBP/RF register access failed, aborting.\n");
+ return -EACCES;
+}
+
+static int rt2800usb_wait_bbp_ready(struct rt2x00_dev *rt2x00dev)
+{
+ unsigned int i;
+ u8 value;
+
+ /*
+ * BBP was enabled after firmware was loaded,
+ * but we need to reactivate it now.
+ */
+ rt2x00usb_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
+ rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
+ msleep(1);
+
+ for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ rt2800usb_bbp_read(rt2x00dev, 0, &value);
+ if ((value != 0xff) && (value != 0x00))
+ return 0;
+ udelay(REGISTER_BUSY_DELAY);
+ }
+
+ ERROR(rt2x00dev, "BBP register access failed, aborting.\n");
+ return -EACCES;
+}
+
+static int rt2800usb_init_bbp(struct rt2x00_dev *rt2x00dev)
+{
+ unsigned int i;
+ u16 eeprom;
+ u8 reg_id;
+ u8 value;
+
+ if (unlikely(rt2800usb_wait_bbp_rf_ready(rt2x00dev) ||
+ rt2800usb_wait_bbp_ready(rt2x00dev)))
+ return -EACCES;
+
+ rt2800usb_bbp_write(rt2x00dev, 65, 0x2c);
+ rt2800usb_bbp_write(rt2x00dev, 66, 0x38);
+ rt2800usb_bbp_write(rt2x00dev, 69, 0x12);
+ rt2800usb_bbp_write(rt2x00dev, 70, 0x0a);
+ rt2800usb_bbp_write(rt2x00dev, 73, 0x10);
+ rt2800usb_bbp_write(rt2x00dev, 81, 0x37);
+ rt2800usb_bbp_write(rt2x00dev, 82, 0x62);
+ rt2800usb_bbp_write(rt2x00dev, 83, 0x6a);
+ rt2800usb_bbp_write(rt2x00dev, 84, 0x99);
+ rt2800usb_bbp_write(rt2x00dev, 86, 0x00);
+ rt2800usb_bbp_write(rt2x00dev, 91, 0x04);
+ rt2800usb_bbp_write(rt2x00dev, 92, 0x00);
+ rt2800usb_bbp_write(rt2x00dev, 103, 0x00);
+ rt2800usb_bbp_write(rt2x00dev, 105, 0x05);
+
+ if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) {
+ rt2800usb_bbp_write(rt2x00dev, 69, 0x16);
+ rt2800usb_bbp_write(rt2x00dev, 73, 0x12);
+ }
+
+ if (rt2x00_rev(&rt2x00dev->chip) > RT2860D_VERSION) {
+ rt2800usb_bbp_write(rt2x00dev, 84, 0x19);
+ }
+
+ if (rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION) {
+ rt2800usb_bbp_write(rt2x00dev, 70, 0x0a);
+ rt2800usb_bbp_write(rt2x00dev, 84, 0x99);
+ rt2800usb_bbp_write(rt2x00dev, 105, 0x05);
+ }
+
+ for (i = 0; i < EEPROM_BBP_SIZE; i++) {
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
+
+ if (eeprom != 0xffff && eeprom != 0x0000) {
+ reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID);
+ value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE);
+ rt2800usb_bbp_write(rt2x00dev, reg_id, value);
+ }
+ }
+
+ return 0;
+}
+
+static u8 rt2800usb_init_rx_filter(struct rt2x00_dev *rt2x00dev,
+ bool bw40, u8 rfcsr24, u8 filter_target)
+{
+ unsigned int i;
+ u8 bbp;
+ u8 rfcsr;
+ u8 passband;
+ u8 stopband;
+ u8 overtuned = 0;
+
+ rt2800usb_rfcsr_write(rt2x00dev, 24, rfcsr24);
+
+ rt2800usb_bbp_read(rt2x00dev, 4, &bbp);
+ rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * bw40);
+ rt2800usb_bbp_write(rt2x00dev, 4, bbp);
+
+ rt2800usb_rfcsr_read(rt2x00dev, 22, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 1);
+ rt2800usb_rfcsr_write(rt2x00dev, 22, rfcsr);
+
+ /*
+ * Set power & frequency of passband test tone
+ */
+ rt2800usb_bbp_write(rt2x00dev, 24, 0);
+
+ for (i = 0; i < 100; i++) {
+ rt2800usb_bbp_write(rt2x00dev, 25, 0x90);
+ msleep(1);
+
+ rt2800usb_bbp_read(rt2x00dev, 55, &passband);
+ if (passband)
+ break;
+ }
+
+ /*
+ * Set power & frequency of stopband test tone
+ */
+ rt2800usb_bbp_write(rt2x00dev, 24, 0x06);
+
+ for (i = 0; i < 100; i++) {
+ rt2800usb_bbp_write(rt2x00dev, 25, 0x90);
+ msleep(1);
+
+ rt2800usb_bbp_read(rt2x00dev, 55, &stopband);
+
+ if ((passband - stopband) <= filter_target) {
+ rfcsr24++;
+ overtuned += ((passband - stopband) == filter_target);
+ } else
+ break;
+
+ rt2800usb_rfcsr_write(rt2x00dev, 24, rfcsr24);
+ }
+
+ rfcsr24 -= !!overtuned;
+
+ rt2800usb_rfcsr_write(rt2x00dev, 24, rfcsr24);
+ return rfcsr24;
+}
+
+static int rt2800usb_init_rfcsr(struct rt2x00_dev *rt2x00dev)
+{
+ u8 rfcsr;
+ u8 bbp;
+
+ if (rt2x00_rev(&rt2x00dev->chip) != RT3070_VERSION)
+ return 0;
+
+ /*
+ * Init RF calibration.
+ */
+ rt2800usb_rfcsr_read(rt2x00dev, 30, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
+ rt2800usb_rfcsr_write(rt2x00dev, 30, rfcsr);
+ msleep(1);
+ rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
+ rt2800usb_rfcsr_write(rt2x00dev, 30, rfcsr);
+
+ rt2800usb_rfcsr_write(rt2x00dev, 4, 0x40);
+ rt2800usb_rfcsr_write(rt2x00dev, 5, 0x03);
+ rt2800usb_rfcsr_write(rt2x00dev, 6, 0x02);
+ rt2800usb_rfcsr_write(rt2x00dev, 7, 0x70);
+ rt2800usb_rfcsr_write(rt2x00dev, 9, 0x0f);
+ rt2800usb_rfcsr_write(rt2x00dev, 10, 0x71);
+ rt2800usb_rfcsr_write(rt2x00dev, 11, 0x21);
+ rt2800usb_rfcsr_write(rt2x00dev, 12, 0x7b);
+ rt2800usb_rfcsr_write(rt2x00dev, 14, 0x90);
+ rt2800usb_rfcsr_write(rt2x00dev, 15, 0x58);
+ rt2800usb_rfcsr_write(rt2x00dev, 16, 0xb3);
+ rt2800usb_rfcsr_write(rt2x00dev, 17, 0x92);
+ rt2800usb_rfcsr_write(rt2x00dev, 18, 0x2c);
+ rt2800usb_rfcsr_write(rt2x00dev, 19, 0x02);
+ rt2800usb_rfcsr_write(rt2x00dev, 20, 0xba);
+ rt2800usb_rfcsr_write(rt2x00dev, 21, 0xdb);
+ rt2800usb_rfcsr_write(rt2x00dev, 24, 0x16);
+ rt2800usb_rfcsr_write(rt2x00dev, 25, 0x01);
+ rt2800usb_rfcsr_write(rt2x00dev, 27, 0x03);
+ rt2800usb_rfcsr_write(rt2x00dev, 29, 0x1f);
+
+ /*
+ * Set RX Filter calibration for 20MHz and 40MHz
+ */
+ rt2x00dev->calibration[0] =
+ rt2800usb_init_rx_filter(rt2x00dev, false, 0x07, 0x16);
+ rt2x00dev->calibration[1] =
+ rt2800usb_init_rx_filter(rt2x00dev, true, 0x27, 0x19);
+
+ /*
+ * Set back to initial state
+ */
+ rt2800usb_bbp_write(rt2x00dev, 24, 0);
+
+ rt2800usb_rfcsr_read(rt2x00dev, 22, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 0);
+ rt2800usb_rfcsr_write(rt2x00dev, 22, rfcsr);
+
+ /*
+ * set BBP back to BW20
+ */
+ rt2800usb_bbp_read(rt2x00dev, 4, &bbp);
+ rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0);
+ rt2800usb_bbp_write(rt2x00dev, 4, bbp);
+
+ return 0;
+}
+
+/*
+ * Device state switch handlers.
+ */
+static void rt2800usb_toggle_rx(struct rt2x00_dev *rt2x00dev,
+ enum dev_state state)
+{
+ u32 reg;
+
+ rt2x00usb_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+ rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX,
+ (state == STATE_RADIO_RX_ON) ||
+ (state == STATE_RADIO_RX_ON_LINK));
+ rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+}
+
+static int rt2800usb_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
+{
+ unsigned int i;
+ u32 reg;
+
+ for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ rt2x00usb_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
+ if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
+ !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
+ return 0;
+
+ msleep(1);
+ }
+
+ ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
+ return -EACCES;
+}
+
+static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
+{
+ u32 reg;
+ u16 word;
+
+ /*
+ * Initialize all registers.
+ */
+ if (unlikely(rt2800usb_wait_wpdma_ready(rt2x00dev) ||
+ rt2800usb_init_registers(rt2x00dev) ||
+ rt2800usb_init_bbp(rt2x00dev) ||
+ rt2800usb_init_rfcsr(rt2x00dev)))
+ return -EIO;
+
+ rt2x00usb_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+ rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
+ rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+
+ udelay(50);
+
+ rt2x00usb_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
+ rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
+ rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 1);
+ rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 1);
+ rt2x00usb_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
+
+
+ rt2x00usb_register_read(rt2x00dev, USB_DMA_CFG, &reg);
+ rt2x00_set_field32(&reg, USB_DMA_CFG_PHY_CLEAR, 0);
+ /* Don't use bulk in aggregation when working with USB 1.1 */
+ rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_EN,
+ (rt2x00dev->rx->usb_maxpacket == 512));
+ rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_TIMEOUT, 128);
+ /*
+ * Total room for RX frames in kilobytes, PBF might still exceed
+ * this limit so reduce the number to prevent errors.
+ */
+ rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_LIMIT,
+ ((RX_ENTRIES * DATA_FRAME_SIZE) / 1024) - 3);
+ rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_EN, 1);
+ rt2x00_set_field32(&reg, USB_DMA_CFG_TX_BULK_EN, 1);
+ rt2x00usb_register_write(rt2x00dev, USB_DMA_CFG, reg);
+
+ rt2x00usb_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+ rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
+ rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
+ rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+
+ /*
+ * Initialize LED control
+ */
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_LED1, &word);
+ rt2800usb_mcu_request(rt2x00dev, MCU_LED_1, 0xff,
+ word & 0xff, (word >> 8) & 0xff);
+
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_LED2, &word);
+ rt2800usb_mcu_request(rt2x00dev, MCU_LED_2, 0xff,
+ word & 0xff, (word >> 8) & 0xff);
+
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_LED3, &word);
+ rt2800usb_mcu_request(rt2x00dev, MCU_LED_3, 0xff,
+ word & 0xff, (word >> 8) & 0xff);
+
+ return 0;
+}
+
+static void rt2800usb_disable_radio(struct rt2x00_dev *rt2x00dev)
+{
+ u32 reg;
+
+ rt2x00usb_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
+ rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
+ rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
+ rt2x00usb_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
+
+ rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, 0);
+ rt2x00usb_register_write(rt2x00dev, PWR_PIN_CFG, 0);
+ rt2x00usb_register_write(rt2x00dev, TX_PIN_CFG, 0);
+
+ /* Wait for DMA, ignore error */
+ rt2800usb_wait_wpdma_ready(rt2x00dev);
+
+ rt2x00usb_disable_radio(rt2x00dev);
+}
+
+static int rt2800usb_set_state(struct rt2x00_dev *rt2x00dev,
+ enum dev_state state)
+{
+ if (state == STATE_AWAKE)
+ rt2800usb_mcu_request(rt2x00dev, MCU_WAKEUP, 0xff, 0, 0);
+ else
+ rt2800usb_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0, 2);
+
+ return 0;
+}
+
+static int rt2800usb_set_device_state(struct rt2x00_dev *rt2x00dev,
+ enum dev_state state)
+{
+ int retval = 0;
+
+ switch (state) {
+ case STATE_RADIO_ON:
+ /*
+ * Before the radio can be enabled, the device first has
+ * to be woken up. After that it needs a bit of time
+ * to be fully awake and the radio can be enabled.
+ */
+ rt2800usb_set_state(rt2x00dev, STATE_AWAKE);
+ msleep(1);
+ retval = rt2800usb_enable_radio(rt2x00dev);
+ break;
+ case STATE_RADIO_OFF:
+ /*
+ * After the radio has been disablee, the device should
+ * be put to sleep for powersaving.
+ */
+ rt2800usb_disable_radio(rt2x00dev);
+ rt2800usb_set_state(rt2x00dev, STATE_SLEEP);
+ break;
+ case STATE_RADIO_RX_ON:
+ case STATE_RADIO_RX_ON_LINK:
+ case STATE_RADIO_RX_OFF:
+ case STATE_RADIO_RX_OFF_LINK:
+ rt2800usb_toggle_rx(rt2x00dev, state);
+ break;
+ case STATE_RADIO_IRQ_ON:
+ case STATE_RADIO_IRQ_OFF:
+ /* No support, but no error either */
+ break;
+ case STATE_DEEP_SLEEP:
+ case STATE_SLEEP:
+ case STATE_STANDBY:
+ case STATE_AWAKE:
+ retval = rt2800usb_set_state(rt2x00dev, state);
+ break;
+ default:
+ retval = -ENOTSUPP;
+ break;
+ }
+
+ if (unlikely(retval))
+ ERROR(rt2x00dev, "Device failed to enter state %d (%d).\n",
+ state, retval);
+
+ return retval;
+}
+
+/*
+ * TX descriptor initialization
+ */
+static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
+ struct sk_buff *skb,
+ struct txentry_desc *txdesc)
+{
+ struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
+ __le32 *txi = skbdesc->desc;
+ __le32 *txwi = &txi[TXINFO_DESC_SIZE / sizeof(__le32)];
+ u32 word;
+
+ /*
+ * Initialize TX Info descriptor
+ */
+ rt2x00_desc_read(txwi, 0, &word);
+ rt2x00_set_field32(&word, TXWI_W0_FRAG,
+ test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
+ rt2x00_set_field32(&word, TXWI_W0_MIMO_PS, 0);
+ rt2x00_set_field32(&word, TXWI_W0_CF_ACK, 0);
+ rt2x00_set_field32(&word, TXWI_W0_TS,
+ test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
+ rt2x00_set_field32(&word, TXWI_W0_AMPDU,
+ test_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags));
+ rt2x00_set_field32(&word, TXWI_W0_MPDU_DENSITY, txdesc->mpdu_density);
+ rt2x00_set_field32(&word, TXWI_W0_TX_OP, txdesc->ifs);
+ rt2x00_set_field32(&word, TXWI_W0_MCS, txdesc->mcs);
+ rt2x00_set_field32(&word, TXWI_W0_BW,
+ test_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags));
+ rt2x00_set_field32(&word, TXWI_W0_SHORT_GI,
+ test_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags));
+ rt2x00_set_field32(&word, TXWI_W0_STBC, txdesc->stbc);
+ rt2x00_set_field32(&word, TXWI_W0_PHYMODE, txdesc->rate_mode);
+ rt2x00_desc_write(txwi, 0, word);
+
+ rt2x00_desc_read(txwi, 1, &word);
+ rt2x00_set_field32(&word, TXWI_W1_ACK,
+ test_bit(ENTRY_TXD_ACK, &txdesc->flags));
+ rt2x00_set_field32(&word, TXWI_W1_NSEQ,
+ test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
+ rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->ba_size);
+ rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID,
+ test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ?
+ txdesc->key_idx : 0xff);
+ rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT,
+ skb->len - txdesc->l2pad);
+ rt2x00_set_field32(&word, TXWI_W1_PACKETID,
+ skbdesc->entry->entry_idx);
+ rt2x00_desc_write(txwi, 1, word);
+
+ /*
+ * Always write 0 to IV/EIV fields, hardware will insert the IV
+ * from the IVEIV register when TXINFO_W0_WIV is set to 0.
+ * When TXINFO_W0_WIV is set to 1 it will use the IV data
+ * from the descriptor. The TXWI_W1_WIRELESS_CLI_ID indicates which
+ * crypto entry in the registers should be used to encrypt the frame.
+ */
+ _rt2x00_desc_write(txwi, 2, 0 /* skbdesc->iv[0] */);
+ _rt2x00_desc_write(txwi, 3, 0 /* skbdesc->iv[1] */);
+
+ /*
+ * Initialize TX descriptor
+ */
+ rt2x00_desc_read(txi, 0, &word);
+ rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN,
+ skb->len + TXWI_DESC_SIZE);
+ rt2x00_set_field32(&word, TXINFO_W0_WIV,
+ !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
+ rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2);
+ rt2x00_set_field32(&word, TXINFO_W0_SW_USE_LAST_ROUND, 0);
+ rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_NEXT_VALID, 0);
+ rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_BURST,
+ test_bit(ENTRY_TXD_BURST, &txdesc->flags));
+ rt2x00_desc_write(txi, 0, word);
+}
+
+/*
+ * TX data initialization
+ */
+static void rt2800usb_write_beacon(struct queue_entry *entry)
+{
+ struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+ struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
+ unsigned int beacon_base;
+ u32 reg;
+
+ /*
+ * Add the descriptor in front of the skb.
+ */
+ skb_push(entry->skb, entry->queue->desc_size);
+ memcpy(entry->skb->data, skbdesc->desc, skbdesc->desc_len);
+ skbdesc->desc = entry->skb->data;
+
+ /*
+ * Disable beaconing while we are reloading the beacon data,
+ * otherwise we might be sending out invalid data.
+ */
+ rt2x00usb_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
+ rt2x00usb_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+ /*
+ * Write entire beacon with descriptor to register.
+ */
+ beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
+ rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
+ USB_VENDOR_REQUEST_OUT, beacon_base,
+ entry->skb->data, entry->skb->len,
+ REGISTER_TIMEOUT32(entry->skb->len));
+
+ /*
+ * Clean up the beacon skb.
+ */
+ dev_kfree_skb(entry->skb);
+ entry->skb = NULL;
+}
+
+static int rt2800usb_get_tx_data_len(struct queue_entry *entry)
+{
+ int length;
+
+ /*
+ * The length _must_ include 4 bytes padding,
+ * it should always be multiple of 4,
+ * but it must _not_ be a multiple of the USB packet size.
+ */
+ length = roundup(entry->skb->len + 4, 4);
+ length += (4 * !(length % entry->queue->usb_maxpacket));
+
+ return length;
+}
+
+static void rt2800usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
+ const enum data_queue_qid queue)
+{
+ u32 reg;
+
+ if (queue != QID_BEACON) {
+ rt2x00usb_kick_tx_queue(rt2x00dev, queue);
+ return;
+ }
+
+ rt2x00usb_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+ if (!rt2x00_get_field32(reg, BCN_TIME_CFG_BEACON_GEN)) {
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
+ rt2x00usb_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+ }
+}
+
+/*
+ * RX control handlers
+ */
+static void rt2800usb_fill_rxdone(struct queue_entry *entry,
+ struct rxdone_entry_desc *rxdesc)
+{
+ struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+ struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
+ __le32 *rxd = (__le32 *)entry->skb->data;
+ __le32 *rxwi;
+ u32 rxd0;
+ u32 rxwi0;
+ u32 rxwi1;
+ u32 rxwi2;
+ u32 rxwi3;
+
+ /*
+ * Copy descriptor to the skbdesc->desc buffer, making it safe from
+ * moving of frame data in rt2x00usb.
+ */
+ memcpy(skbdesc->desc, rxd, skbdesc->desc_len);
+ rxd = (__le32 *)skbdesc->desc;
+ rxwi = &rxd[RXD_DESC_SIZE / sizeof(__le32)];
+
+ /*
+ * It is now safe to read the descriptor on all architectures.
+ */
+ rt2x00_desc_read(rxd, 0, &rxd0);
+ rt2x00_desc_read(rxwi, 0, &rxwi0);
+ rt2x00_desc_read(rxwi, 1, &rxwi1);
+ rt2x00_desc_read(rxwi, 2, &rxwi2);
+ rt2x00_desc_read(rxwi, 3, &rxwi3);
+
+ if (rt2x00_get_field32(rxd0, RXD_W0_CRC_ERROR))
+ rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
+
+ if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
+ rxdesc->cipher = rt2x00_get_field32(rxwi0, RXWI_W0_UDF);
+ rxdesc->cipher_status =
+ rt2x00_get_field32(rxd0, RXD_W0_CIPHER_ERROR);
+ }
+
+ if (rt2x00_get_field32(rxd0, RXD_W0_DECRYPTED)) {
+ /*
+ * Hardware has stripped IV/EIV data from 802.11 frame during
+ * decryption. Unfortunately the descriptor doesn't contain
+ * any fields with the EIV/IV data either, so they can't
+ * be restored by rt2x00lib.
+ */
+ rxdesc->flags |= RX_FLAG_IV_STRIPPED;
+
+ if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
+ rxdesc->flags |= RX_FLAG_DECRYPTED;
+ else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
+ rxdesc->flags |= RX_FLAG_MMIC_ERROR;
+ }
+
+ if (rt2x00_get_field32(rxd0, RXD_W0_MY_BSS))
+ rxdesc->dev_flags |= RXDONE_MY_BSS;
+
+ if (rt2x00_get_field32(rxd0, RXD_W0_L2PAD))
+ rxdesc->dev_flags |= RXDONE_L2PAD;
+
+ if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI))
+ rxdesc->flags |= RX_FLAG_SHORT_GI;
+
+ if (rt2x00_get_field32(rxwi1, RXWI_W1_BW))
+ rxdesc->flags |= RX_FLAG_40MHZ;
+
+ /*
+ * Detect RX rate, always use MCS as signal type.
+ */
+ rxdesc->dev_flags |= RXDONE_SIGNAL_MCS;
+ rxdesc->rate_mode = rt2x00_get_field32(rxwi1, RXWI_W1_PHYMODE);
+ rxdesc->signal = rt2x00_get_field32(rxwi1, RXWI_W1_MCS);
+
+ /*
+ * Mask of 0x8 bit to remove the short preamble flag.
+ */
+ if (rxdesc->rate_mode == RATE_MODE_CCK)
+ rxdesc->signal &= ~0x8;
+
+ rxdesc->rssi =
+ (rt2x00_get_field32(rxwi2, RXWI_W2_RSSI0) +
+ rt2x00_get_field32(rxwi2, RXWI_W2_RSSI1)) / 2;
+
+ rxdesc->noise =
+ (rt2x00_get_field32(rxwi3, RXWI_W3_SNR0) +
+ rt2x00_get_field32(rxwi3, RXWI_W3_SNR1)) / 2;
+
+ rxdesc->size = rt2x00_get_field32(rxwi0, RXWI_W0_MPDU_TOTAL_BYTE_COUNT);
+
+ /*
+ * Remove RXWI descriptor from start of buffer.
+ */
+ skb_pull(entry->skb, skbdesc->desc_len);
+ skb_trim(entry->skb, rxdesc->size);
+}
+
+/*
+ * Device probe functions.
+ */
+static int rt2800usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
+{
+ u16 word;
+ u8 *mac;
+ u8 default_lna_gain;
+
+ rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom, EEPROM_SIZE);
+
+ /*
+ * Start validation of the data that has been read.
+ */
+ mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
+ if (!is_valid_ether_addr(mac)) {
+ DECLARE_MAC_BUF(macbuf);
+
+ random_ether_addr(mac);
+ EEPROM(rt2x00dev, "MAC: %s\n", print_mac(macbuf, mac));
+ }
+
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word);
+ if (word == 0xffff) {
+ rt2x00_set_field16(&word, EEPROM_ANTENNA_RXPATH, 2);
+ rt2x00_set_field16(&word, EEPROM_ANTENNA_TXPATH, 1);
+ rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2820);
+ rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word);
+ EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word);
+ } else if (rt2x00_rev(&rt2x00dev->chip) < RT2883_VERSION) {
+ /*
+ * There is a max of 2 RX streams for RT2870 series
+ */
+ if (rt2x00_get_field16(word, EEPROM_ANTENNA_RXPATH) > 2)
+ rt2x00_set_field16(&word, EEPROM_ANTENNA_RXPATH, 2);
+ rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word);
+ }
+
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word);
+ if (word == 0xffff) {
+ rt2x00_set_field16(&word, EEPROM_NIC_HW_RADIO, 0);
+ rt2x00_set_field16(&word, EEPROM_NIC_DYNAMIC_TX_AGC, 0);
+ rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_BG, 0);
+ rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_A, 0);
+ rt2x00_set_field16(&word, EEPROM_NIC_CARDBUS_ACCEL, 0);
+ rt2x00_set_field16(&word, EEPROM_NIC_BW40M_SB_BG, 0);
+ rt2x00_set_field16(&word, EEPROM_NIC_BW40M_SB_A, 0);
+ rt2x00_set_field16(&word, EEPROM_NIC_WPS_PBC, 0);
+ rt2x00_set_field16(&word, EEPROM_NIC_BW40M_BG, 0);
+ rt2x00_set_field16(&word, EEPROM_NIC_BW40M_A, 0);
+ rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word);
+ EEPROM(rt2x00dev, "NIC: 0x%04x\n", word);
+ }
+
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &word);
+ if ((word & 0x00ff) == 0x00ff) {
+ rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0);
+ rt2x00_set_field16(&word, EEPROM_FREQ_LED_MODE,
+ LED_MODE_TXRX_ACTIVITY);
+ rt2x00_set_field16(&word, EEPROM_FREQ_LED_POLARITY, 0);
+ rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word);
+ rt2x00_eeprom_write(rt2x00dev, EEPROM_LED1, 0x5555);
+ rt2x00_eeprom_write(rt2x00dev, EEPROM_LED2, 0x2221);
+ rt2x00_eeprom_write(rt2x00dev, EEPROM_LED3, 0xa9f8);
+ EEPROM(rt2x00dev, "Freq: 0x%04x\n", word);
+ }
+
+ /*
+ * During the LNA validation we are going to use
+ * lna0 as correct value. Note that EEPROM_LNA
+ * is never validated.
+ */
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &word);
+ default_lna_gain = rt2x00_get_field16(word, EEPROM_LNA_A0);
+
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &word);
+ if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET0)) > 10)
+ rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET0, 0);
+ if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET1)) > 10)
+ rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET1, 0);
+ rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_BG, word);
+
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &word);
+ if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG2_OFFSET2)) > 10)
+ rt2x00_set_field16(&word, EEPROM_RSSI_BG2_OFFSET2, 0);
+ if (rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0x00 ||
+ rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0xff)
+ rt2x00_set_field16(&word, EEPROM_RSSI_BG2_LNA_A1,
+ default_lna_gain);
+ rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_BG2, word);
+
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &word);
+ if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET0)) > 10)
+ rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET0, 0);
+ if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET1)) > 10)
+ rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET1, 0);
+ rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A, word);
+
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &word);
+ if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A2_OFFSET2)) > 10)
+ rt2x00_set_field16(&word, EEPROM_RSSI_A2_OFFSET2, 0);
+ if (rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0x00 ||
+ rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0xff)
+ rt2x00_set_field16(&word, EEPROM_RSSI_A2_LNA_A2,
+ default_lna_gain);
+ rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word);
+
+ return 0;
+}
+
+static int rt2800usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
+{
+ u32 reg;
+ u16 value;
+ u16 eeprom;
+
+ /*
+ * Read EEPROM word for configuration.
+ */
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
+
+ /*
+ * Identify RF chipset.
+ */
+ value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
+ rt2x00usb_register_read(rt2x00dev, MAC_CSR0, &reg);
+ rt2x00_set_chip(rt2x00dev, RT2870, value, reg);
+
+ /*
+ * The check for rt2860 is not a typo, some rt2870 hardware
+ * identifies itself as rt2860 in the CSR register.
+ */
+ if (!rt2x00_check_rev(&rt2x00dev->chip, 0xfff00000, 0x28600000) &&
+ !rt2x00_check_rev(&rt2x00dev->chip, 0xfff00000, 0x28700000) &&
+ !rt2x00_check_rev(&rt2x00dev->chip, 0xfff00000, 0x28800000) &&
+ !rt2x00_check_rev(&rt2x00dev->chip, 0xffff0000, 0x30700000)) {
+ ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
+ return -ENODEV;
+ }
+
+ if (!rt2x00_rf(&rt2x00dev->chip, RF2820) &&
+ !rt2x00_rf(&rt2x00dev->chip, RF2850) &&
+ !rt2x00_rf(&rt2x00dev->chip, RF2720) &&
+ !rt2x00_rf(&rt2x00dev->chip, RF2750) &&
+ !rt2x00_rf(&rt2x00dev->chip, RF3020) &&
+ !rt2x00_rf(&rt2x00dev->chip, RF2020)) {
+ ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Identify default antenna configuration.
+ */
+ rt2x00dev->default_ant.tx =
+ rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH);
+ rt2x00dev->default_ant.rx =
+ rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH);
+
+ /*
+ * Read frequency offset and RF programming sequence.
+ */
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
+ rt2x00dev->freq_offset = rt2x00_get_field16(eeprom, EEPROM_FREQ_OFFSET);
+
+ /*
+ * Read external LNA informations.
+ */
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
+
+ if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_A))
+ __set_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags);
+ if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_BG))
+ __set_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags);
+
+ /*
+ * Detect if this device has an hardware controlled radio.
+ */
+#ifdef CONFIG_RT2X00_LIB_RFKILL
+ if (rt2x00_get_field16(eeprom, EEPROM_NIC_HW_RADIO))
+ __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags);
+#endif /* CONFIG_RT2X00_LIB_RFKILL */
+
+ /*
+ * Store led settings, for correct led behaviour.
+ */
+#ifdef CONFIG_RT2X00_LIB_LEDS
+ rt2800usb_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO);
+ rt2800usb_init_led(rt2x00dev, &rt2x00dev->led_assoc, LED_TYPE_ASSOC);
+ rt2800usb_init_led(rt2x00dev, &rt2x00dev->led_qual, LED_TYPE_QUALITY);
+
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ,
+ &rt2x00dev->led_mcu_reg);
+#endif /* CONFIG_RT2X00_LIB_LEDS */
+
+ return 0;
+}
+
+/*
+ * RF value list for rt2870
+ * Supports: 2.4 GHz (all) & 5.2 GHz (RF2850 & RF2750)
+ */
+static const struct rf_channel rf_vals[] = {
+ { 1, 0x18402ecc, 0x184c0786, 0x1816b455, 0x1800510b },
+ { 2, 0x18402ecc, 0x184c0786, 0x18168a55, 0x1800519f },
+ { 3, 0x18402ecc, 0x184c078a, 0x18168a55, 0x1800518b },
+ { 4, 0x18402ecc, 0x184c078a, 0x18168a55, 0x1800519f },
+ { 5, 0x18402ecc, 0x184c078e, 0x18168a55, 0x1800518b },
+ { 6, 0x18402ecc, 0x184c078e, 0x18168a55, 0x1800519f },
+ { 7, 0x18402ecc, 0x184c0792, 0x18168a55, 0x1800518b },
+ { 8, 0x18402ecc, 0x184c0792, 0x18168a55, 0x1800519f },
+ { 9, 0x18402ecc, 0x184c0796, 0x18168a55, 0x1800518b },
+ { 10, 0x18402ecc, 0x184c0796, 0x18168a55, 0x1800519f },
+ { 11, 0x18402ecc, 0x184c079a, 0x18168a55, 0x1800518b },
+ { 12, 0x18402ecc, 0x184c079a, 0x18168a55, 0x1800519f },
+ { 13, 0x18402ecc, 0x184c079e, 0x18168a55, 0x1800518b },
+ { 14, 0x18402ecc, 0x184c07a2, 0x18168a55, 0x18005193 },
+
+ /* 802.11 UNI / HyperLan 2 */
+ { 36, 0x18402ecc, 0x184c099a, 0x18158a55, 0x180ed1a3 },
+ { 38, 0x18402ecc, 0x184c099e, 0x18158a55, 0x180ed193 },
+ { 40, 0x18402ec8, 0x184c0682, 0x18158a55, 0x180ed183 },
+ { 44, 0x18402ec8, 0x184c0682, 0x18158a55, 0x180ed1a3 },
+ { 46, 0x18402ec8, 0x184c0686, 0x18158a55, 0x180ed18b },
+ { 48, 0x18402ec8, 0x184c0686, 0x18158a55, 0x180ed19b },
+ { 52, 0x18402ec8, 0x184c068a, 0x18158a55, 0x180ed193 },
+ { 54, 0x18402ec8, 0x184c068a, 0x18158a55, 0x180ed1a3 },
+ { 56, 0x18402ec8, 0x184c068e, 0x18158a55, 0x180ed18b },
+ { 60, 0x18402ec8, 0x184c0692, 0x18158a55, 0x180ed183 },
+ { 62, 0x18402ec8, 0x184c0692, 0x18158a55, 0x180ed193 },
+ { 64, 0x18402ec8, 0x184c0692, 0x18158a55, 0x180ed1a3 },
+
+ /* 802.11 HyperLan 2 */
+ { 100, 0x18402ec8, 0x184c06b2, 0x18178a55, 0x180ed783 },
+ { 102, 0x18402ec8, 0x184c06b2, 0x18578a55, 0x180ed793 },
+ { 104, 0x18402ec8, 0x185c06b2, 0x18578a55, 0x180ed1a3 },
+ { 108, 0x18402ecc, 0x185c0a32, 0x18578a55, 0x180ed193 },
+ { 110, 0x18402ecc, 0x184c0a36, 0x18178a55, 0x180ed183 },
+ { 112, 0x18402ecc, 0x184c0a36, 0x18178a55, 0x180ed19b },
+ { 116, 0x18402ecc, 0x184c0a3a, 0x18178a55, 0x180ed1a3 },
+ { 118, 0x18402ecc, 0x184c0a3e, 0x18178a55, 0x180ed193 },
+ { 120, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed183 },
+ { 124, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed193 },
+ { 126, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed15b },
+ { 128, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed1a3 },
+ { 132, 0x18402ec4, 0x184c0386, 0x18178a55, 0x180ed18b },
+ { 134, 0x18402ec4, 0x184c0386, 0x18178a55, 0x180ed193 },
+ { 136, 0x18402ec4, 0x184c0386, 0x18178a55, 0x180ed19b },
+ { 140, 0x18402ec4, 0x184c038a, 0x18178a55, 0x180ed183 },
+
+ /* 802.11 UNII */
+ { 149, 0x18402ec4, 0x184c038a, 0x18178a55, 0x180ed1a7 },
+ { 151, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed187 },
+ { 153, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed18f },
+ { 157, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed19f },
+ { 159, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed1a7 },
+ { 161, 0x18402ec4, 0x184c0392, 0x18178a55, 0x180ed187 },
+ { 165, 0x18402ec4, 0x184c0392, 0x18178a55, 0x180ed197 },
+ { 167, 0x18402ec4, 0x184c03d2, 0x18179855, 0x1815531f },
+ { 169, 0x18402ec4, 0x184c03d2, 0x18179855, 0x18155327 },
+ { 171, 0x18402ec4, 0x184c03d6, 0x18179855, 0x18155307 },
+ { 173, 0x18402ec4, 0x184c03d6, 0x18179855, 0x1815530f },
+
+ /* 802.11 Japan */
+ { 184, 0x15002ccc, 0x1500491e, 0x1509be55, 0x150c0a0b },
+ { 188, 0x15002ccc, 0x15004922, 0x1509be55, 0x150c0a13 },
+ { 192, 0x15002ccc, 0x15004926, 0x1509be55, 0x150c0a1b },
+ { 196, 0x15002ccc, 0x1500492a, 0x1509be55, 0x150c0a23 },
+ { 208, 0x15002ccc, 0x1500493a, 0x1509be55, 0x150c0a13 },
+ { 212, 0x15002ccc, 0x1500493e, 0x1509be55, 0x150c0a1b },
+ { 216, 0x15002ccc, 0x15004982, 0x1509be55, 0x150c0a23 },
+};
+
+/*
+ * RF value list for rt3070
+ * Supports: 2.4 GHz
+ */
+static const struct rf_channel rf_vals_3070[] = {
+ {1, 241, 2, 2 },
+ {2, 241, 2, 7 },
+ {3, 242, 2, 2 },
+ {4, 242, 2, 7 },
+ {5, 243, 2, 2 },
+ {6, 243, 2, 7 },
+ {7, 244, 2, 2 },
+ {8, 244, 2, 7 },
+ {9, 245, 2, 2 },
+ {10, 245, 2, 7 },
+ {11, 246, 2, 2 },
+ {12, 246, 2, 7 },
+ {13, 247, 2, 2 },
+ {14, 248, 2, 4 },
+};
+
+static int rt2800usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
+{
+ struct hw_mode_spec *spec = &rt2x00dev->spec;
+ struct channel_info *info;
+ char *tx_power1;
+ char *tx_power2;
+ unsigned int i;
+ u16 eeprom;
+
+ /*
+ * Initialize all hw fields.
+ */
+ rt2x00dev->hw->flags =
+ IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
+ IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_PS_NULLFUNC_STACK;
+ rt2x00dev->hw->extra_tx_headroom = TXINFO_DESC_SIZE + TXWI_DESC_SIZE;
+
+ SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
+ SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
+ rt2x00_eeprom_addr(rt2x00dev,
+ EEPROM_MAC_ADDR_0));
+
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
+
+ /*
+ * Initialize HT information.
+ */
+ spec->ht.ht_supported = true;
+ spec->ht.cap =
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_GRN_FLD |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_TX_STBC |
+ IEEE80211_HT_CAP_RX_STBC |
+ IEEE80211_HT_CAP_PSMP_SUPPORT;
+ spec->ht.ampdu_factor = 3;
+ spec->ht.ampdu_density = 4;
+ spec->ht.mcs.tx_params =
+ IEEE80211_HT_MCS_TX_DEFINED |
+ IEEE80211_HT_MCS_TX_RX_DIFF |
+ ((rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH) - 1) <<
+ IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+
+ switch (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH)) {
+ case 3:
+ spec->ht.mcs.rx_mask[2] = 0xff;
+ case 2:
+ spec->ht.mcs.rx_mask[1] = 0xff;
+ case 1:
+ spec->ht.mcs.rx_mask[0] = 0xff;
+ spec->ht.mcs.rx_mask[4] = 0x1; /* MCS32 */
+ break;
+ }
+
+ /*
+ * Initialize hw_mode information.
+ */
+ spec->supported_bands = SUPPORT_BAND_2GHZ;
+ spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
+
+ if (rt2x00_rf(&rt2x00dev->chip, RF2820) ||
+ rt2x00_rf(&rt2x00dev->chip, RF2720)) {
+ spec->num_channels = 14;
+ spec->channels = rf_vals;
+ } else if (rt2x00_rf(&rt2x00dev->chip, RF2850) ||
+ rt2x00_rf(&rt2x00dev->chip, RF2750)) {
+ spec->supported_bands |= SUPPORT_BAND_5GHZ;
+ spec->num_channels = ARRAY_SIZE(rf_vals);
+ spec->channels = rf_vals;
+ } else if (rt2x00_rf(&rt2x00dev->chip, RF3020) ||
+ rt2x00_rf(&rt2x00dev->chip, RF2020)) {
+ spec->num_channels = ARRAY_SIZE(rf_vals_3070);
+ spec->channels = rf_vals_3070;
+ }
+
+ /*
+ * Create channel information array
+ */
+ info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ spec->channels_info = info;
+
+ tx_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1);
+ tx_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2);
+
+ for (i = 0; i < 14; i++) {
+ info[i].tx_power1 = TXPOWER_G_FROM_DEV(tx_power1[i]);
+ info[i].tx_power2 = TXPOWER_G_FROM_DEV(tx_power2[i]);
+ }
+
+ if (spec->num_channels > 14) {
+ tx_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1);
+ tx_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2);
+
+ for (i = 14; i < spec->num_channels; i++) {
+ info[i].tx_power1 = TXPOWER_A_FROM_DEV(tx_power1[i]);
+ info[i].tx_power2 = TXPOWER_A_FROM_DEV(tx_power2[i]);
+ }
+ }
+
+ return 0;
+}
+
+static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
+{
+ int retval;
+
+ /*
+ * Allocate eeprom data.
+ */
+ retval = rt2800usb_validate_eeprom(rt2x00dev);
+ if (retval)
+ return retval;
+
+ retval = rt2800usb_init_eeprom(rt2x00dev);
+ if (retval)
+ return retval;
+
+ /*
+ * Initialize hw specifications.
+ */
+ retval = rt2800usb_probe_hw_mode(rt2x00dev);
+ if (retval)
+ return retval;
+
+ /*
+ * This device requires firmware.
+ */
+ __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags);
+ __set_bit(DRIVER_REQUIRE_SCHEDULED, &rt2x00dev->flags);
+ __set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags);
+ if (!modparam_nohwcrypt)
+ __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags);
+
+ /*
+ * Set the rssi offset.
+ */
+ rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET;
+
+ return 0;
+}
+
+/*
+ * IEEE80211 stack callback functions.
+ */
+static void rt2800usb_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx,
+ u32 *iv32, u16 *iv16)
+{
+ struct rt2x00_dev *rt2x00dev = hw->priv;
+ struct mac_iveiv_entry iveiv_entry;
+ u32 offset;
+
+ offset = MAC_IVEIV_ENTRY(hw_key_idx);
+ rt2x00usb_register_multiread(rt2x00dev, offset,
+ &iveiv_entry, sizeof(iveiv_entry));
+
+ memcpy(&iveiv_entry.iv[0], iv16, sizeof(iv16));
+ memcpy(&iveiv_entry.iv[4], iv32, sizeof(iv32));
+}
+
+static int rt2800usb_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct rt2x00_dev *rt2x00dev = hw->priv;
+ u32 reg;
+ bool enabled = (value < IEEE80211_MAX_RTS_THRESHOLD);
+
+ rt2x00usb_register_read(rt2x00dev, TX_RTS_CFG, &reg);
+ rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_THRES, value);
+ rt2x00usb_register_write(rt2x00dev, TX_RTS_CFG, reg);
+
+ rt2x00usb_register_read(rt2x00dev, CCK_PROT_CFG, &reg);
+ rt2x00_set_field32(&reg, CCK_PROT_CFG_RTS_TH_EN, enabled);
+ rt2x00usb_register_write(rt2x00dev, CCK_PROT_CFG, reg);
+
+ rt2x00usb_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
+ rt2x00_set_field32(&reg, OFDM_PROT_CFG_RTS_TH_EN, enabled);
+ rt2x00usb_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
+
+ rt2x00usb_register_read(rt2x00dev, MM20_PROT_CFG, &reg);
+ rt2x00_set_field32(&reg, MM20_PROT_CFG_RTS_TH_EN, enabled);
+ rt2x00usb_register_write(rt2x00dev, MM20_PROT_CFG, reg);
+
+ rt2x00usb_register_read(rt2x00dev, MM40_PROT_CFG, &reg);
+ rt2x00_set_field32(&reg, MM40_PROT_CFG_RTS_TH_EN, enabled);
+ rt2x00usb_register_write(rt2x00dev, MM40_PROT_CFG, reg);
+
+ rt2x00usb_register_read(rt2x00dev, GF20_PROT_CFG, &reg);
+ rt2x00_set_field32(&reg, GF20_PROT_CFG_RTS_TH_EN, enabled);
+ rt2x00usb_register_write(rt2x00dev, GF20_PROT_CFG, reg);
+
+ rt2x00usb_register_read(rt2x00dev, GF40_PROT_CFG, &reg);
+ rt2x00_set_field32(&reg, GF40_PROT_CFG_RTS_TH_EN, enabled);
+ rt2x00usb_register_write(rt2x00dev, GF40_PROT_CFG, reg);
+
+ return 0;
+}
+
+static int rt2800usb_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct rt2x00_dev *rt2x00dev = hw->priv;
+ struct data_queue *queue;
+ struct rt2x00_field32 field;
+ int retval;
+ u32 reg;
+ u32 offset;
+
+ /*
+ * First pass the configuration through rt2x00lib, that will
+ * update the queue settings and validate the input. After that
+ * we are free to update the registers based on the value
+ * in the queue parameter.
+ */
+ retval = rt2x00mac_conf_tx(hw, queue_idx, params);
+ if (retval)
+ return retval;
+
+ /*
+ * We only need to perform additional register initialization
+ * for WMM queues/
+ */
+ if (queue_idx >= 4)
+ return 0;
+
+ queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
+
+ /* Update WMM TXOP register */
+ offset = WMM_TXOP0_CFG + (sizeof(u32) * (!!(queue_idx & 2)));
+ field.bit_offset = (queue_idx & 1) * 16;
+ field.bit_mask = 0xffff << field.bit_offset;
+
+ rt2x00usb_register_read(rt2x00dev, offset, &reg);
+ rt2x00_set_field32(&reg, field, queue->txop);
+ rt2x00usb_register_write(rt2x00dev, offset, reg);
+
+ /* Update WMM registers */
+ field.bit_offset = queue_idx * 4;
+ field.bit_mask = 0xf << field.bit_offset;
+
+ rt2x00usb_register_read(rt2x00dev, WMM_AIFSN_CFG, &reg);
+ rt2x00_set_field32(&reg, field, queue->aifs);
+ rt2x00usb_register_write(rt2x00dev, WMM_AIFSN_CFG, reg);
+
+ rt2x00usb_register_read(rt2x00dev, WMM_CWMIN_CFG, &reg);
+ rt2x00_set_field32(&reg, field, queue->cw_min);
+ rt2x00usb_register_write(rt2x00dev, WMM_CWMIN_CFG, reg);
+
+ rt2x00usb_register_read(rt2x00dev, WMM_CWMAX_CFG, &reg);
+ rt2x00_set_field32(&reg, field, queue->cw_max);
+ rt2x00usb_register_write(rt2x00dev, WMM_CWMAX_CFG, reg);
+
+ /* Update EDCA registers */
+ offset = EDCA_AC0_CFG + (sizeof(u32) * queue_idx);
+
+ rt2x00usb_register_read(rt2x00dev, offset, &reg);
+ rt2x00_set_field32(&reg, EDCA_AC0_CFG_TX_OP, queue->txop);
+ rt2x00_set_field32(&reg, EDCA_AC0_CFG_AIFSN, queue->aifs);
+ rt2x00_set_field32(&reg, EDCA_AC0_CFG_CWMIN, queue->cw_min);
+ rt2x00_set_field32(&reg, EDCA_AC0_CFG_CWMAX, queue->cw_max);
+ rt2x00usb_register_write(rt2x00dev, offset, reg);
+
+ return 0;
+}
+
+static u64 rt2800usb_get_tsf(struct ieee80211_hw *hw)
+{
+ struct rt2x00_dev *rt2x00dev = hw->priv;
+ u64 tsf;
+ u32 reg;
+
+ rt2x00usb_register_read(rt2x00dev, TSF_TIMER_DW1, &reg);
+ tsf = (u64) rt2x00_get_field32(reg, TSF_TIMER_DW1_HIGH_WORD) << 32;
+ rt2x00usb_register_read(rt2x00dev, TSF_TIMER_DW0, &reg);
+ tsf |= rt2x00_get_field32(reg, TSF_TIMER_DW0_LOW_WORD);
+
+ return tsf;
+}
+
+static const struct ieee80211_ops rt2800usb_mac80211_ops = {
+ .tx = rt2x00mac_tx,
+ .start = rt2x00mac_start,
+ .stop = rt2x00mac_stop,
+ .add_interface = rt2x00mac_add_interface,
+ .remove_interface = rt2x00mac_remove_interface,
+ .config = rt2x00mac_config,
+ .configure_filter = rt2x00mac_configure_filter,
+ .set_key = rt2x00mac_set_key,
+ .get_stats = rt2x00mac_get_stats,
+ .get_tkip_seq = rt2800usb_get_tkip_seq,
+ .set_rts_threshold = rt2800usb_set_rts_threshold,
+ .bss_info_changed = rt2x00mac_bss_info_changed,
+ .conf_tx = rt2800usb_conf_tx,
+ .get_tx_stats = rt2x00mac_get_tx_stats,
+ .get_tsf = rt2800usb_get_tsf,
+};
+
+static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
+ .probe_hw = rt2800usb_probe_hw,
+ .get_firmware_name = rt2800usb_get_firmware_name,
+ .check_firmware = rt2800usb_check_firmware,
+ .load_firmware = rt2800usb_load_firmware,
+ .initialize = rt2x00usb_initialize,
+ .uninitialize = rt2x00usb_uninitialize,
+ .clear_entry = rt2x00usb_clear_entry,
+ .set_device_state = rt2800usb_set_device_state,
+ .rfkill_poll = rt2800usb_rfkill_poll,
+ .link_stats = rt2800usb_link_stats,
+ .reset_tuner = rt2800usb_reset_tuner,
+ .link_tuner = rt2800usb_link_tuner,
+ .write_tx_desc = rt2800usb_write_tx_desc,
+ .write_tx_data = rt2x00usb_write_tx_data,
+ .write_beacon = rt2800usb_write_beacon,
+ .get_tx_data_len = rt2800usb_get_tx_data_len,
+ .kick_tx_queue = rt2800usb_kick_tx_queue,
+ .kill_tx_queue = rt2x00usb_kill_tx_queue,
+ .fill_rxdone = rt2800usb_fill_rxdone,
+ .config_shared_key = rt2800usb_config_shared_key,
+ .config_pairwise_key = rt2800usb_config_pairwise_key,
+ .config_filter = rt2800usb_config_filter,
+ .config_intf = rt2800usb_config_intf,
+ .config_erp = rt2800usb_config_erp,
+ .config_ant = rt2800usb_config_ant,
+ .config = rt2800usb_config,
+};
+
+static const struct data_queue_desc rt2800usb_queue_rx = {
+ .entry_num = RX_ENTRIES,
+ .data_size = AGGREGATION_SIZE,
+ .desc_size = RXD_DESC_SIZE + RXWI_DESC_SIZE,
+ .priv_size = sizeof(struct queue_entry_priv_usb),
+};
+
+static const struct data_queue_desc rt2800usb_queue_tx = {
+ .entry_num = TX_ENTRIES,
+ .data_size = AGGREGATION_SIZE,
+ .desc_size = TXINFO_DESC_SIZE + TXWI_DESC_SIZE,
+ .priv_size = sizeof(struct queue_entry_priv_usb),
+};
+
+static const struct data_queue_desc rt2800usb_queue_bcn = {
+ .entry_num = 8 * BEACON_ENTRIES,
+ .data_size = MGMT_FRAME_SIZE,
+ .desc_size = TXINFO_DESC_SIZE + TXWI_DESC_SIZE,
+ .priv_size = sizeof(struct queue_entry_priv_usb),
+};
+
+static const struct rt2x00_ops rt2800usb_ops = {
+ .name = KBUILD_MODNAME,
+ .max_sta_intf = 1,
+ .max_ap_intf = 8,
+ .eeprom_size = EEPROM_SIZE,
+ .rf_size = RF_SIZE,
+ .tx_queues = NUM_TX_QUEUES,
+ .rx = &rt2800usb_queue_rx,
+ .tx = &rt2800usb_queue_tx,
+ .bcn = &rt2800usb_queue_bcn,
+ .lib = &rt2800usb_rt2x00_ops,
+ .hw = &rt2800usb_mac80211_ops,
+#ifdef CONFIG_RT2X00_LIB_DEBUGFS
+ .debugfs = &rt2800usb_rt2x00debug,
+#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
+};
+
+/*
+ * rt2800usb module information.
+ */
+static struct usb_device_id rt2800usb_device_table[] = {
+ /* Abocom */
+ { USB_DEVICE(0x07b8, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07b8, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07b8, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07b8, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07b8, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1482, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* AirTies */
+ { USB_DEVICE(0x1eda, 0x2310), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Amigo */
+ { USB_DEVICE(0x0e0b, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0e0b, 0x9041), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Amit */
+ { USB_DEVICE(0x15c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* ASUS */
+ { USB_DEVICE(0x0b05, 0x1731), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0b05, 0x1732), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0b05, 0x1742), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* AzureWave */
+ { USB_DEVICE(0x13d3, 0x3247), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Belkin */
+ { USB_DEVICE(0x050d, 0x8053), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x050d, 0x805c), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x050d, 0x815c), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x050d, 0x825a), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Buffalo */
+ { USB_DEVICE(0x0411, 0x00e8), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0411, 0x012e), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Conceptronic */
+ { USB_DEVICE(0x14b2, 0x3c06), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x14b2, 0x3c07), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x14b2, 0x3c08), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x14b2, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x14b2, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x14b2, 0x3c12), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x14b2, 0x3c23), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x14b2, 0x3c25), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x14b2, 0x3c27), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x14b2, 0x3c28), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Corega */
+ { USB_DEVICE(0x07aa, 0x002f), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07aa, 0x003c), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07aa, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x18c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x18c5, 0x0012), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* D-Link */
+ { USB_DEVICE(0x07d1, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07d1, 0x3c0a), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07d1, 0x3c0b), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07d1, 0x3c0d), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07d1, 0x3c0e), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07d1, 0x3c0f), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07d1, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Edimax */
+ { USB_DEVICE(0x7392, 0x7711), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x7392, 0x7717), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x7392, 0x7718), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Encore */
+ { USB_DEVICE(0x203d, 0x1480), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* EnGenius */
+ { USB_DEVICE(0X1740, 0x9701), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1740, 0x9702), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1740, 0x9703), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1740, 0x9705), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1740, 0x9706), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1740, 0x9801), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Gemtek */
+ { USB_DEVICE(0x15a9, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Gigabyte */
+ { USB_DEVICE(0x1044, 0x800b), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1044, 0x800c), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1044, 0x800d), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Hawking */
+ { USB_DEVICE(0x0e66, 0x0001), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0e66, 0x0003), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0e66, 0x0009), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0e66, 0x000b), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* I-O DATA */
+ { USB_DEVICE(0x04bb, 0x0945), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* LevelOne */
+ { USB_DEVICE(0x1740, 0x0605), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1740, 0x0615), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Linksys */
+ { USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1737, 0x0077), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Logitec */
+ { USB_DEVICE(0x0789, 0x0162), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0789, 0x0163), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0789, 0x0164), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Motorola */
+ { USB_DEVICE(0x100d, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x100d, 0x9032), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Ovislink */
+ { USB_DEVICE(0x1b75, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Pegatron */
+ { USB_DEVICE(0x1d4d, 0x0002), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1d4d, 0x000c), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1d4d, 0x000e), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Philips */
+ { USB_DEVICE(0x0471, 0x200f), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Planex */
+ { USB_DEVICE(0x2019, 0xed06), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x2019, 0xab24), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x2019, 0xab25), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Qcom */
+ { USB_DEVICE(0x18e8, 0x6259), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Quanta */
+ { USB_DEVICE(0x1a32, 0x0304), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Ralink */
+ { USB_DEVICE(0x0db0, 0x3820), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x6899), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x148f, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x148f, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x148f, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x148f, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x148f, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x148f, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x148f, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Samsung */
+ { USB_DEVICE(0x04e8, 0x2018), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Siemens */
+ { USB_DEVICE(0x129b, 0x1828), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Sitecom */
+ { USB_DEVICE(0x0df6, 0x0017), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x002b), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x002c), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x002d), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x0039), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x003b), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x003c), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x003d), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x003e), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x0040), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* SMC */
+ { USB_DEVICE(0x083a, 0x6618), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0x7511), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0x7512), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0x7522), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0x8522), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0xa512), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0xa618), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0xb522), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0xc522), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Sparklan */
+ { USB_DEVICE(0x15a9, 0x0006), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Sweex */
+ { USB_DEVICE(0x177f, 0x0153), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x177f, 0x0302), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x177f, 0x0313), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* U-Media*/
+ { USB_DEVICE(0x157e, 0x300e), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* ZCOM */
+ { USB_DEVICE(0x0cde, 0x0022), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0cde, 0x0025), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Zinwell */
+ { USB_DEVICE(0x5a57, 0x0280), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x5a57, 0x0282), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x5a57, 0x0283), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x5a57, 0x5257), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Zyxel */
+ { USB_DEVICE(0x0586, 0x3416), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0586, 0x341a), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { 0, }
+};
+
+MODULE_AUTHOR(DRV_PROJECT);
+MODULE_VERSION(DRV_VERSION);
+MODULE_DESCRIPTION("Ralink RT2800 USB Wireless LAN driver.");
+MODULE_SUPPORTED_DEVICE("Ralink RT2870 USB chipset based cards");
+MODULE_DEVICE_TABLE(usb, rt2800usb_device_table);
+MODULE_FIRMWARE(FIRMWARE_RT2870);
+MODULE_LICENSE("GPL");
+
+static struct usb_driver rt2800usb_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = rt2800usb_device_table,
+ .probe = rt2x00usb_probe,
+ .disconnect = rt2x00usb_disconnect,
+ .suspend = rt2x00usb_suspend,
+ .resume = rt2x00usb_resume,
+};
+
+static int __init rt2800usb_init(void)
+{
+ return usb_register(&rt2800usb_driver);
+}
+
+static void __exit rt2800usb_exit(void)
+{
+ usb_deregister(&rt2800usb_driver);
+}
+
+module_init(rt2800usb_init);
+module_exit(rt2800usb_exit);
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.h b/drivers/net/wireless/rt2x00/rt2800usb.h
new file mode 100644
index 00000000000..61a8be61d3f
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800usb.h
@@ -0,0 +1,1945 @@
+/*
+ Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+ <http://rt2x00.serialmonkey.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the
+ Free Software Foundation, Inc.,
+ 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+ Module: rt2800usb
+ Abstract: Data structures and registers for the rt2800usb module.
+ Supported chipsets: RT2800U.
+ */
+
+#ifndef RT2800USB_H
+#define RT2800USB_H
+
+/*
+ * RF chip defines.
+ *
+ * RF2820 2.4G 2T3R
+ * RF2850 2.4G/5G 2T3R
+ * RF2720 2.4G 1T2R
+ * RF2750 2.4G/5G 1T2R
+ * RF3020 2.4G 1T1R
+ * RF2020 2.4G B/G
+ */
+#define RF2820 0x0001
+#define RF2850 0x0002
+#define RF2720 0x0003
+#define RF2750 0x0004
+#define RF3020 0x0005
+#define RF2020 0x0006
+
+/*
+ * RT2870 version
+ */
+#define RT2860C_VERSION 0x28600100
+#define RT2860D_VERSION 0x28600101
+#define RT2880E_VERSION 0x28720200
+#define RT2883_VERSION 0x28830300
+#define RT3070_VERSION 0x30700200
+
+/*
+ * Signal information.
+ * Defaul offset is required for RSSI <-> dBm conversion.
+ */
+#define DEFAULT_RSSI_OFFSET 120 /* FIXME */
+
+/*
+ * Register layout information.
+ */
+#define CSR_REG_BASE 0x1000
+#define CSR_REG_SIZE 0x0800
+#define EEPROM_BASE 0x0000
+#define EEPROM_SIZE 0x0110
+#define BBP_BASE 0x0000
+#define BBP_SIZE 0x0080
+#define RF_BASE 0x0004
+#define RF_SIZE 0x0010
+
+/*
+ * Number of TX queues.
+ */
+#define NUM_TX_QUEUES 4
+
+/*
+ * USB registers.
+ */
+
+/*
+ * HOST-MCU shared memory
+ */
+#define HOST_CMD_CSR 0x0404
+#define HOST_CMD_CSR_HOST_COMMAND FIELD32(0x000000ff)
+
+/*
+ * INT_SOURCE_CSR: Interrupt source register.
+ * Write one to clear corresponding bit.
+ * TX_FIFO_STATUS: FIFO Statistics is full, sw should read 0x171c
+ */
+#define INT_SOURCE_CSR 0x0200
+#define INT_SOURCE_CSR_RXDELAYINT FIELD32(0x00000001)
+#define INT_SOURCE_CSR_TXDELAYINT FIELD32(0x00000002)
+#define INT_SOURCE_CSR_RX_DONE FIELD32(0x00000004)
+#define INT_SOURCE_CSR_AC0_DMA_DONE FIELD32(0x00000008)
+#define INT_SOURCE_CSR_AC1_DMA_DONE FIELD32(0x00000010)
+#define INT_SOURCE_CSR_AC2_DMA_DONE FIELD32(0x00000020)
+#define INT_SOURCE_CSR_AC3_DMA_DONE FIELD32(0x00000040)
+#define INT_SOURCE_CSR_HCCA_DMA_DONE FIELD32(0x00000080)
+#define INT_SOURCE_CSR_MGMT_DMA_DONE FIELD32(0x00000100)
+#define INT_SOURCE_CSR_MCU_COMMAND FIELD32(0x00000200)
+#define INT_SOURCE_CSR_RXTX_COHERENT FIELD32(0x00000400)
+#define INT_SOURCE_CSR_TBTT FIELD32(0x00000800)
+#define INT_SOURCE_CSR_PRE_TBTT FIELD32(0x00001000)
+#define INT_SOURCE_CSR_TX_FIFO_STATUS FIELD32(0x00002000)
+#define INT_SOURCE_CSR_AUTO_WAKEUP FIELD32(0x00004000)
+#define INT_SOURCE_CSR_GPTIMER FIELD32(0x00008000)
+#define INT_SOURCE_CSR_RX_COHERENT FIELD32(0x00010000)
+#define INT_SOURCE_CSR_TX_COHERENT FIELD32(0x00020000)
+
+/*
+ * INT_MASK_CSR: Interrupt MASK register. 1: the interrupt is mask OFF.
+ */
+#define INT_MASK_CSR 0x0204
+#define INT_MASK_CSR_RXDELAYINT FIELD32(0x00000001)
+#define INT_MASK_CSR_TXDELAYINT FIELD32(0x00000002)
+#define INT_MASK_CSR_RX_DONE FIELD32(0x00000004)
+#define INT_MASK_CSR_AC0_DMA_DONE FIELD32(0x00000008)
+#define INT_MASK_CSR_AC1_DMA_DONE FIELD32(0x00000010)
+#define INT_MASK_CSR_AC2_DMA_DONE FIELD32(0x00000020)
+#define INT_MASK_CSR_AC3_DMA_DONE FIELD32(0x00000040)
+#define INT_MASK_CSR_HCCA_DMA_DONE FIELD32(0x00000080)
+#define INT_MASK_CSR_MGMT_DMA_DONE FIELD32(0x00000100)
+#define INT_MASK_CSR_MCU_COMMAND FIELD32(0x00000200)
+#define INT_MASK_CSR_RXTX_COHERENT FIELD32(0x00000400)
+#define INT_MASK_CSR_TBTT FIELD32(0x00000800)
+#define INT_MASK_CSR_PRE_TBTT FIELD32(0x00001000)
+#define INT_MASK_CSR_TX_FIFO_STATUS FIELD32(0x00002000)
+#define INT_MASK_CSR_AUTO_WAKEUP FIELD32(0x00004000)
+#define INT_MASK_CSR_GPTIMER FIELD32(0x00008000)
+#define INT_MASK_CSR_RX_COHERENT FIELD32(0x00010000)
+#define INT_MASK_CSR_TX_COHERENT FIELD32(0x00020000)
+
+/*
+ * WPDMA_GLO_CFG
+ */
+#define WPDMA_GLO_CFG 0x0208
+#define WPDMA_GLO_CFG_ENABLE_TX_DMA FIELD32(0x00000001)
+#define WPDMA_GLO_CFG_TX_DMA_BUSY FIELD32(0x00000002)
+#define WPDMA_GLO_CFG_ENABLE_RX_DMA FIELD32(0x00000004)
+#define WPDMA_GLO_CFG_RX_DMA_BUSY FIELD32(0x00000008)
+#define WPDMA_GLO_CFG_WP_DMA_BURST_SIZE FIELD32(0x00000030)
+#define WPDMA_GLO_CFG_TX_WRITEBACK_DONE FIELD32(0x00000040)
+#define WPDMA_GLO_CFG_BIG_ENDIAN FIELD32(0x00000080)
+#define WPDMA_GLO_CFG_RX_HDR_SCATTER FIELD32(0x0000ff00)
+#define WPDMA_GLO_CFG_HDR_SEG_LEN FIELD32(0xffff0000)
+
+/*
+ * WPDMA_RST_IDX
+ */
+#define WPDMA_RST_IDX 0x020c
+#define WPDMA_RST_IDX_DTX_IDX0 FIELD32(0x00000001)
+#define WPDMA_RST_IDX_DTX_IDX1 FIELD32(0x00000002)
+#define WPDMA_RST_IDX_DTX_IDX2 FIELD32(0x00000004)
+#define WPDMA_RST_IDX_DTX_IDX3 FIELD32(0x00000008)
+#define WPDMA_RST_IDX_DTX_IDX4 FIELD32(0x00000010)
+#define WPDMA_RST_IDX_DTX_IDX5 FIELD32(0x00000020)
+#define WPDMA_RST_IDX_DRX_IDX0 FIELD32(0x00010000)
+
+/*
+ * DELAY_INT_CFG
+ */
+#define DELAY_INT_CFG 0x0210
+#define DELAY_INT_CFG_RXMAX_PTIME FIELD32(0x000000ff)
+#define DELAY_INT_CFG_RXMAX_PINT FIELD32(0x00007f00)
+#define DELAY_INT_CFG_RXDLY_INT_EN FIELD32(0x00008000)
+#define DELAY_INT_CFG_TXMAX_PTIME FIELD32(0x00ff0000)
+#define DELAY_INT_CFG_TXMAX_PINT FIELD32(0x7f000000)
+#define DELAY_INT_CFG_TXDLY_INT_EN FIELD32(0x80000000)
+
+/*
+ * WMM_AIFSN_CFG: Aifsn for each EDCA AC
+ * AIFSN0: AC_BE
+ * AIFSN1: AC_BK
+ * AIFSN1: AC_VI
+ * AIFSN1: AC_VO
+ */
+#define WMM_AIFSN_CFG 0x0214
+#define WMM_AIFSN_CFG_AIFSN0 FIELD32(0x0000000f)
+#define WMM_AIFSN_CFG_AIFSN1 FIELD32(0x000000f0)
+#define WMM_AIFSN_CFG_AIFSN2 FIELD32(0x00000f00)
+#define WMM_AIFSN_CFG_AIFSN3 FIELD32(0x0000f000)
+
+/*
+ * WMM_CWMIN_CSR: CWmin for each EDCA AC
+ * CWMIN0: AC_BE
+ * CWMIN1: AC_BK
+ * CWMIN1: AC_VI
+ * CWMIN1: AC_VO
+ */
+#define WMM_CWMIN_CFG 0x0218
+#define WMM_CWMIN_CFG_CWMIN0 FIELD32(0x0000000f)
+#define WMM_CWMIN_CFG_CWMIN1 FIELD32(0x000000f0)
+#define WMM_CWMIN_CFG_CWMIN2 FIELD32(0x00000f00)
+#define WMM_CWMIN_CFG_CWMIN3 FIELD32(0x0000f000)
+
+/*
+ * WMM_CWMAX_CSR: CWmax for each EDCA AC
+ * CWMAX0: AC_BE
+ * CWMAX1: AC_BK
+ * CWMAX1: AC_VI
+ * CWMAX1: AC_VO
+ */
+#define WMM_CWMAX_CFG 0x021c
+#define WMM_CWMAX_CFG_CWMAX0 FIELD32(0x0000000f)
+#define WMM_CWMAX_CFG_CWMAX1 FIELD32(0x000000f0)
+#define WMM_CWMAX_CFG_CWMAX2 FIELD32(0x00000f00)
+#define WMM_CWMAX_CFG_CWMAX3 FIELD32(0x0000f000)
+
+/*
+ * AC_TXOP0: AC_BK/AC_BE TXOP register
+ * AC0TXOP: AC_BK in unit of 32us
+ * AC1TXOP: AC_BE in unit of 32us
+ */
+#define WMM_TXOP0_CFG 0x0220
+#define WMM_TXOP0_CFG_AC0TXOP FIELD32(0x0000ffff)
+#define WMM_TXOP0_CFG_AC1TXOP FIELD32(0xffff0000)
+
+/*
+ * AC_TXOP1: AC_VO/AC_VI TXOP register
+ * AC2TXOP: AC_VI in unit of 32us
+ * AC3TXOP: AC_VO in unit of 32us
+ */
+#define WMM_TXOP1_CFG 0x0224
+#define WMM_TXOP1_CFG_AC2TXOP FIELD32(0x0000ffff)
+#define WMM_TXOP1_CFG_AC3TXOP FIELD32(0xffff0000)
+
+/*
+ * GPIO_CTRL_CFG:
+ */
+#define GPIO_CTRL_CFG 0x0228
+#define GPIO_CTRL_CFG_BIT0 FIELD32(0x00000001)
+#define GPIO_CTRL_CFG_BIT1 FIELD32(0x00000002)
+#define GPIO_CTRL_CFG_BIT2 FIELD32(0x00000004)
+#define GPIO_CTRL_CFG_BIT3 FIELD32(0x00000008)
+#define GPIO_CTRL_CFG_BIT4 FIELD32(0x00000010)
+#define GPIO_CTRL_CFG_BIT5 FIELD32(0x00000020)
+#define GPIO_CTRL_CFG_BIT6 FIELD32(0x00000040)
+#define GPIO_CTRL_CFG_BIT7 FIELD32(0x00000080)
+#define GPIO_CTRL_CFG_BIT8 FIELD32(0x00000100)
+
+/*
+ * MCU_CMD_CFG
+ */
+#define MCU_CMD_CFG 0x022c
+
+/*
+ * AC_BK register offsets
+ */
+#define TX_BASE_PTR0 0x0230
+#define TX_MAX_CNT0 0x0234
+#define TX_CTX_IDX0 0x0238
+#define TX_DTX_IDX0 0x023c
+
+/*
+ * AC_BE register offsets
+ */
+#define TX_BASE_PTR1 0x0240
+#define TX_MAX_CNT1 0x0244
+#define TX_CTX_IDX1 0x0248
+#define TX_DTX_IDX1 0x024c
+
+/*
+ * AC_VI register offsets
+ */
+#define TX_BASE_PTR2 0x0250
+#define TX_MAX_CNT2 0x0254
+#define TX_CTX_IDX2 0x0258
+#define TX_DTX_IDX2 0x025c
+
+/*
+ * AC_VO register offsets
+ */
+#define TX_BASE_PTR3 0x0260
+#define TX_MAX_CNT3 0x0264
+#define TX_CTX_IDX3 0x0268
+#define TX_DTX_IDX3 0x026c
+
+/*
+ * HCCA register offsets
+ */
+#define TX_BASE_PTR4 0x0270
+#define TX_MAX_CNT4 0x0274
+#define TX_CTX_IDX4 0x0278
+#define TX_DTX_IDX4 0x027c
+
+/*
+ * MGMT register offsets
+ */
+#define TX_BASE_PTR5 0x0280
+#define TX_MAX_CNT5 0x0284
+#define TX_CTX_IDX5 0x0288
+#define TX_DTX_IDX5 0x028c
+
+/*
+ * RX register offsets
+ */
+#define RX_BASE_PTR 0x0290
+#define RX_MAX_CNT 0x0294
+#define RX_CRX_IDX 0x0298
+#define RX_DRX_IDX 0x029c
+
+/*
+ * USB_DMA_CFG
+ * RX_BULK_AGG_TIMEOUT: Rx Bulk Aggregation TimeOut in unit of 33ns.
+ * RX_BULK_AGG_LIMIT: Rx Bulk Aggregation Limit in unit of 256 bytes.
+ * PHY_CLEAR: phy watch dog enable.
+ * TX_CLEAR: Clear USB DMA TX path.
+ * TXOP_HALT: Halt TXOP count down when TX buffer is full.
+ * RX_BULK_AGG_EN: Enable Rx Bulk Aggregation.
+ * RX_BULK_EN: Enable USB DMA Rx.
+ * TX_BULK_EN: Enable USB DMA Tx.
+ * EP_OUT_VALID: OUT endpoint data valid.
+ * RX_BUSY: USB DMA RX FSM busy.
+ * TX_BUSY: USB DMA TX FSM busy.
+ */
+#define USB_DMA_CFG 0x02a0
+#define USB_DMA_CFG_RX_BULK_AGG_TIMEOUT FIELD32(0x000000ff)
+#define USB_DMA_CFG_RX_BULK_AGG_LIMIT FIELD32(0x0000ff00)
+#define USB_DMA_CFG_PHY_CLEAR FIELD32(0x00010000)
+#define USB_DMA_CFG_TX_CLEAR FIELD32(0x00080000)
+#define USB_DMA_CFG_TXOP_HALT FIELD32(0x00100000)
+#define USB_DMA_CFG_RX_BULK_AGG_EN FIELD32(0x00200000)
+#define USB_DMA_CFG_RX_BULK_EN FIELD32(0x00400000)
+#define USB_DMA_CFG_TX_BULK_EN FIELD32(0x00800000)
+#define USB_DMA_CFG_EP_OUT_VALID FIELD32(0x3f000000)
+#define USB_DMA_CFG_RX_BUSY FIELD32(0x40000000)
+#define USB_DMA_CFG_TX_BUSY FIELD32(0x80000000)
+
+/*
+ * USB_CYC_CFG
+ */
+#define USB_CYC_CFG 0x02a4
+#define USB_CYC_CFG_CLOCK_CYCLE FIELD32(0x000000ff)
+
+/*
+ * PBF_SYS_CTRL
+ * HOST_RAM_WRITE: enable Host program ram write selection
+ */
+#define PBF_SYS_CTRL 0x0400
+#define PBF_SYS_CTRL_READY FIELD32(0x00000080)
+#define PBF_SYS_CTRL_HOST_RAM_WRITE FIELD32(0x00010000)
+
+/*
+ * PBF registers
+ * Most are for debug. Driver doesn't touch PBF register.
+ */
+#define PBF_CFG 0x0408
+#define PBF_MAX_PCNT 0x040c
+#define PBF_CTRL 0x0410
+#define PBF_INT_STA 0x0414
+#define PBF_INT_ENA 0x0418
+
+/*
+ * BCN_OFFSET0:
+ */
+#define BCN_OFFSET0 0x042c
+#define BCN_OFFSET0_BCN0 FIELD32(0x000000ff)
+#define BCN_OFFSET0_BCN1 FIELD32(0x0000ff00)
+#define BCN_OFFSET0_BCN2 FIELD32(0x00ff0000)
+#define BCN_OFFSET0_BCN3 FIELD32(0xff000000)
+
+/*
+ * BCN_OFFSET1:
+ */
+#define BCN_OFFSET1 0x0430
+#define BCN_OFFSET1_BCN4 FIELD32(0x000000ff)
+#define BCN_OFFSET1_BCN5 FIELD32(0x0000ff00)
+#define BCN_OFFSET1_BCN6 FIELD32(0x00ff0000)
+#define BCN_OFFSET1_BCN7 FIELD32(0xff000000)
+
+/*
+ * PBF registers
+ * Most are for debug. Driver doesn't touch PBF register.
+ */
+#define TXRXQ_PCNT 0x0438
+#define PBF_DBG 0x043c
+
+/*
+ * RF registers
+ */
+#define RF_CSR_CFG 0x0500
+#define RF_CSR_CFG_DATA FIELD32(0x000000ff)
+#define RF_CSR_CFG_REGNUM FIELD32(0x00001f00)
+#define RF_CSR_CFG_WRITE FIELD32(0x00010000)
+#define RF_CSR_CFG_BUSY FIELD32(0x00020000)
+
+/*
+ * MAC Control/Status Registers(CSR).
+ * Some values are set in TU, whereas 1 TU == 1024 us.
+ */
+
+/*
+ * MAC_CSR0: ASIC revision number.
+ * ASIC_REV: 0
+ * ASIC_VER: 2870
+ */
+#define MAC_CSR0 0x1000
+#define MAC_CSR0_ASIC_REV FIELD32(0x0000ffff)
+#define MAC_CSR0_ASIC_VER FIELD32(0xffff0000)
+
+/*
+ * MAC_SYS_CTRL:
+ */
+#define MAC_SYS_CTRL 0x1004
+#define MAC_SYS_CTRL_RESET_CSR FIELD32(0x00000001)
+#define MAC_SYS_CTRL_RESET_BBP FIELD32(0x00000002)
+#define MAC_SYS_CTRL_ENABLE_TX FIELD32(0x00000004)
+#define MAC_SYS_CTRL_ENABLE_RX FIELD32(0x00000008)
+#define MAC_SYS_CTRL_CONTINUOUS_TX FIELD32(0x00000010)
+#define MAC_SYS_CTRL_LOOPBACK FIELD32(0x00000020)
+#define MAC_SYS_CTRL_WLAN_HALT FIELD32(0x00000040)
+#define MAC_SYS_CTRL_RX_TIMESTAMP FIELD32(0x00000080)
+
+/*
+ * MAC_ADDR_DW0: STA MAC register 0
+ */
+#define MAC_ADDR_DW0 0x1008
+#define MAC_ADDR_DW0_BYTE0 FIELD32(0x000000ff)
+#define MAC_ADDR_DW0_BYTE1 FIELD32(0x0000ff00)
+#define MAC_ADDR_DW0_BYTE2 FIELD32(0x00ff0000)
+#define MAC_ADDR_DW0_BYTE3 FIELD32(0xff000000)
+
+/*
+ * MAC_ADDR_DW1: STA MAC register 1
+ * UNICAST_TO_ME_MASK:
+ * Used to mask off bits from byte 5 of the MAC address
+ * to determine the UNICAST_TO_ME bit for RX frames.
+ * The full mask is complemented by BSS_ID_MASK:
+ * MASK = BSS_ID_MASK & UNICAST_TO_ME_MASK
+ */
+#define MAC_ADDR_DW1 0x100c
+#define MAC_ADDR_DW1_BYTE4 FIELD32(0x000000ff)
+#define MAC_ADDR_DW1_BYTE5 FIELD32(0x0000ff00)
+#define MAC_ADDR_DW1_UNICAST_TO_ME_MASK FIELD32(0x00ff0000)
+
+/*
+ * MAC_BSSID_DW0: BSSID register 0
+ */
+#define MAC_BSSID_DW0 0x1010
+#define MAC_BSSID_DW0_BYTE0 FIELD32(0x000000ff)
+#define MAC_BSSID_DW0_BYTE1 FIELD32(0x0000ff00)
+#define MAC_BSSID_DW0_BYTE2 FIELD32(0x00ff0000)
+#define MAC_BSSID_DW0_BYTE3 FIELD32(0xff000000)
+
+/*
+ * MAC_BSSID_DW1: BSSID register 1
+ * BSS_ID_MASK:
+ * 0: 1-BSSID mode (BSS index = 0)
+ * 1: 2-BSSID mode (BSS index: Byte5, bit 0)
+ * 2: 4-BSSID mode (BSS index: byte5, bit 0 - 1)
+ * 3: 8-BSSID mode (BSS index: byte5, bit 0 - 2)
+ * This mask is used to mask off bits 0, 1 and 2 of byte 5 of the
+ * BSSID. This will make sure that those bits will be ignored
+ * when determining the MY_BSS of RX frames.
+ */
+#define MAC_BSSID_DW1 0x1014
+#define MAC_BSSID_DW1_BYTE4 FIELD32(0x000000ff)
+#define MAC_BSSID_DW1_BYTE5 FIELD32(0x0000ff00)
+#define MAC_BSSID_DW1_BSS_ID_MASK FIELD32(0x00030000)
+#define MAC_BSSID_DW1_BSS_BCN_NUM FIELD32(0x001c0000)
+
+/*
+ * MAX_LEN_CFG: Maximum frame length register.
+ * MAX_MPDU: rt2860b max 16k bytes
+ * MAX_PSDU: Maximum PSDU length
+ * (power factor) 0:2^13, 1:2^14, 2:2^15, 3:2^16
+ */
+#define MAX_LEN_CFG 0x1018
+#define MAX_LEN_CFG_MAX_MPDU FIELD32(0x00000fff)
+#define MAX_LEN_CFG_MAX_PSDU FIELD32(0x00003000)
+#define MAX_LEN_CFG_MIN_PSDU FIELD32(0x0000c000)
+#define MAX_LEN_CFG_MIN_MPDU FIELD32(0x000f0000)
+
+/*
+ * BBP_CSR_CFG: BBP serial control register
+ * VALUE: Register value to program into BBP
+ * REG_NUM: Selected BBP register
+ * READ_CONTROL: 0 write BBP, 1 read BBP
+ * BUSY: ASIC is busy executing BBP commands
+ * BBP_PAR_DUR: 0 4 MAC clocks, 1 8 MAC clocks
+ * BBP_RW_MODE: 0 serial, 1 paralell
+ */
+#define BBP_CSR_CFG 0x101c
+#define BBP_CSR_CFG_VALUE FIELD32(0x000000ff)
+#define BBP_CSR_CFG_REGNUM FIELD32(0x0000ff00)
+#define BBP_CSR_CFG_READ_CONTROL FIELD32(0x00010000)
+#define BBP_CSR_CFG_BUSY FIELD32(0x00020000)
+#define BBP_CSR_CFG_BBP_PAR_DUR FIELD32(0x00040000)
+#define BBP_CSR_CFG_BBP_RW_MODE FIELD32(0x00080000)
+
+/*
+ * RF_CSR_CFG0: RF control register
+ * REGID_AND_VALUE: Register value to program into RF
+ * BITWIDTH: Selected RF register
+ * STANDBYMODE: 0 high when standby, 1 low when standby
+ * SEL: 0 RF_LE0 activate, 1 RF_LE1 activate
+ * BUSY: ASIC is busy executing RF commands
+ */
+#define RF_CSR_CFG0 0x1020
+#define RF_CSR_CFG0_REGID_AND_VALUE FIELD32(0x00ffffff)
+#define RF_CSR_CFG0_BITWIDTH FIELD32(0x1f000000)
+#define RF_CSR_CFG0_REG_VALUE_BW FIELD32(0x1fffffff)
+#define RF_CSR_CFG0_STANDBYMODE FIELD32(0x20000000)
+#define RF_CSR_CFG0_SEL FIELD32(0x40000000)
+#define RF_CSR_CFG0_BUSY FIELD32(0x80000000)
+
+/*
+ * RF_CSR_CFG1: RF control register
+ * REGID_AND_VALUE: Register value to program into RF
+ * RFGAP: Gap between BB_CONTROL_RF and RF_LE
+ * 0: 3 system clock cycle (37.5usec)
+ * 1: 5 system clock cycle (62.5usec)
+ */
+#define RF_CSR_CFG1 0x1024
+#define RF_CSR_CFG1_REGID_AND_VALUE FIELD32(0x00ffffff)
+#define RF_CSR_CFG1_RFGAP FIELD32(0x1f000000)
+
+/*
+ * RF_CSR_CFG2: RF control register
+ * VALUE: Register value to program into RF
+ * RFGAP: Gap between BB_CONTROL_RF and RF_LE
+ * 0: 3 system clock cycle (37.5usec)
+ * 1: 5 system clock cycle (62.5usec)
+ */
+#define RF_CSR_CFG2 0x1028
+#define RF_CSR_CFG2_VALUE FIELD32(0x00ffffff)
+
+/*
+ * LED_CFG: LED control
+ * color LED's:
+ * 0: off
+ * 1: blinking upon TX2
+ * 2: periodic slow blinking
+ * 3: always on
+ * LED polarity:
+ * 0: active low
+ * 1: active high
+ */
+#define LED_CFG 0x102c
+#define LED_CFG_ON_PERIOD FIELD32(0x000000ff)
+#define LED_CFG_OFF_PERIOD FIELD32(0x0000ff00)
+#define LED_CFG_SLOW_BLINK_PERIOD FIELD32(0x003f0000)
+#define LED_CFG_R_LED_MODE FIELD32(0x03000000)
+#define LED_CFG_G_LED_MODE FIELD32(0x0c000000)
+#define LED_CFG_Y_LED_MODE FIELD32(0x30000000)
+#define LED_CFG_LED_POLAR FIELD32(0x40000000)
+
+/*
+ * XIFS_TIME_CFG: MAC timing
+ * CCKM_SIFS_TIME: unit 1us. Applied after CCK RX/TX
+ * OFDM_SIFS_TIME: unit 1us. Applied after OFDM RX/TX
+ * OFDM_XIFS_TIME: unit 1us. Applied after OFDM RX
+ * when MAC doesn't reference BBP signal BBRXEND
+ * EIFS: unit 1us
+ * BB_RXEND_ENABLE: reference RXEND signal to begin XIFS defer
+ *
+ */
+#define XIFS_TIME_CFG 0x1100
+#define XIFS_TIME_CFG_CCKM_SIFS_TIME FIELD32(0x000000ff)
+#define XIFS_TIME_CFG_OFDM_SIFS_TIME FIELD32(0x0000ff00)
+#define XIFS_TIME_CFG_OFDM_XIFS_TIME FIELD32(0x000f0000)
+#define XIFS_TIME_CFG_EIFS FIELD32(0x1ff00000)
+#define XIFS_TIME_CFG_BB_RXEND_ENABLE FIELD32(0x20000000)
+
+/*
+ * BKOFF_SLOT_CFG:
+ */
+#define BKOFF_SLOT_CFG 0x1104
+#define BKOFF_SLOT_CFG_SLOT_TIME FIELD32(0x000000ff)
+#define BKOFF_SLOT_CFG_CC_DELAY_TIME FIELD32(0x0000ff00)
+
+/*
+ * NAV_TIME_CFG:
+ */
+#define NAV_TIME_CFG 0x1108
+#define NAV_TIME_CFG_SIFS FIELD32(0x000000ff)
+#define NAV_TIME_CFG_SLOT_TIME FIELD32(0x0000ff00)
+#define NAV_TIME_CFG_EIFS FIELD32(0x01ff0000)
+#define NAV_TIME_ZERO_SIFS FIELD32(0x02000000)
+
+/*
+ * CH_TIME_CFG: count as channel busy
+ */
+#define CH_TIME_CFG 0x110c
+
+/*
+ * PBF_LIFE_TIMER: TX/RX MPDU timestamp timer (free run) Unit: 1us
+ */
+#define PBF_LIFE_TIMER 0x1110
+
+/*
+ * BCN_TIME_CFG:
+ * BEACON_INTERVAL: in unit of 1/16 TU
+ * TSF_TICKING: Enable TSF auto counting
+ * TSF_SYNC: Enable TSF sync, 00: disable, 01: infra mode, 10: ad-hoc mode
+ * BEACON_GEN: Enable beacon generator
+ */
+#define BCN_TIME_CFG 0x1114
+#define BCN_TIME_CFG_BEACON_INTERVAL FIELD32(0x0000ffff)
+#define BCN_TIME_CFG_TSF_TICKING FIELD32(0x00010000)
+#define BCN_TIME_CFG_TSF_SYNC FIELD32(0x00060000)
+#define BCN_TIME_CFG_TBTT_ENABLE FIELD32(0x00080000)
+#define BCN_TIME_CFG_BEACON_GEN FIELD32(0x00100000)
+#define BCN_TIME_CFG_TX_TIME_COMPENSATE FIELD32(0xf0000000)
+
+/*
+ * TBTT_SYNC_CFG:
+ */
+#define TBTT_SYNC_CFG 0x1118
+
+/*
+ * TSF_TIMER_DW0: Local lsb TSF timer, read-only
+ */
+#define TSF_TIMER_DW0 0x111c
+#define TSF_TIMER_DW0_LOW_WORD FIELD32(0xffffffff)
+
+/*
+ * TSF_TIMER_DW1: Local msb TSF timer, read-only
+ */
+#define TSF_TIMER_DW1 0x1120
+#define TSF_TIMER_DW1_HIGH_WORD FIELD32(0xffffffff)
+
+/*
+ * TBTT_TIMER: TImer remains till next TBTT, read-only
+ */
+#define TBTT_TIMER 0x1124
+
+/*
+ * INT_TIMER_CFG:
+ */
+#define INT_TIMER_CFG 0x1128
+
+/*
+ * INT_TIMER_EN: GP-timer and pre-tbtt Int enable
+ */
+#define INT_TIMER_EN 0x112c
+
+/*
+ * CH_IDLE_STA: channel idle time
+ */
+#define CH_IDLE_STA 0x1130
+
+/*
+ * CH_BUSY_STA: channel busy time
+ */
+#define CH_BUSY_STA 0x1134
+
+/*
+ * MAC_STATUS_CFG:
+ * BBP_RF_BUSY: When set to 0, BBP and RF are stable.
+ * if 1 or higher one of the 2 registers is busy.
+ */
+#define MAC_STATUS_CFG 0x1200
+#define MAC_STATUS_CFG_BBP_RF_BUSY FIELD32(0x00000003)
+
+/*
+ * PWR_PIN_CFG:
+ */
+#define PWR_PIN_CFG 0x1204
+
+/*
+ * AUTOWAKEUP_CFG: Manual power control / status register
+ * TBCN_BEFORE_WAKE: ForceWake has high privilege than PutToSleep when both set
+ * AUTOWAKE: 0:sleep, 1:awake
+ */
+#define AUTOWAKEUP_CFG 0x1208
+#define AUTOWAKEUP_CFG_AUTO_LEAD_TIME FIELD32(0x000000ff)
+#define AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE FIELD32(0x00007f00)
+#define AUTOWAKEUP_CFG_AUTOWAKE FIELD32(0x00008000)
+
+/*
+ * EDCA_AC0_CFG:
+ */
+#define EDCA_AC0_CFG 0x1300
+#define EDCA_AC0_CFG_TX_OP FIELD32(0x000000ff)
+#define EDCA_AC0_CFG_AIFSN FIELD32(0x00000f00)
+#define EDCA_AC0_CFG_CWMIN FIELD32(0x0000f000)
+#define EDCA_AC0_CFG_CWMAX FIELD32(0x000f0000)
+
+/*
+ * EDCA_AC1_CFG:
+ */
+#define EDCA_AC1_CFG 0x1304
+#define EDCA_AC1_CFG_TX_OP FIELD32(0x000000ff)
+#define EDCA_AC1_CFG_AIFSN FIELD32(0x00000f00)
+#define EDCA_AC1_CFG_CWMIN FIELD32(0x0000f000)
+#define EDCA_AC1_CFG_CWMAX FIELD32(0x000f0000)
+
+/*
+ * EDCA_AC2_CFG:
+ */
+#define EDCA_AC2_CFG 0x1308
+#define EDCA_AC2_CFG_TX_OP FIELD32(0x000000ff)
+#define EDCA_AC2_CFG_AIFSN FIELD32(0x00000f00)
+#define EDCA_AC2_CFG_CWMIN FIELD32(0x0000f000)
+#define EDCA_AC2_CFG_CWMAX FIELD32(0x000f0000)
+
+/*
+ * EDCA_AC3_CFG:
+ */
+#define EDCA_AC3_CFG 0x130c
+#define EDCA_AC3_CFG_TX_OP FIELD32(0x000000ff)
+#define EDCA_AC3_CFG_AIFSN FIELD32(0x00000f00)
+#define EDCA_AC3_CFG_CWMIN FIELD32(0x0000f000)
+#define EDCA_AC3_CFG_CWMAX FIELD32(0x000f0000)
+
+/*
+ * EDCA_TID_AC_MAP:
+ */
+#define EDCA_TID_AC_MAP 0x1310
+
+/*
+ * TX_PWR_CFG_0:
+ */
+#define TX_PWR_CFG_0 0x1314
+#define TX_PWR_CFG_0_1MBS FIELD32(0x0000000f)
+#define TX_PWR_CFG_0_2MBS FIELD32(0x000000f0)
+#define TX_PWR_CFG_0_55MBS FIELD32(0x00000f00)
+#define TX_PWR_CFG_0_11MBS FIELD32(0x0000f000)
+#define TX_PWR_CFG_0_6MBS FIELD32(0x000f0000)
+#define TX_PWR_CFG_0_9MBS FIELD32(0x00f00000)
+#define TX_PWR_CFG_0_12MBS FIELD32(0x0f000000)
+#define TX_PWR_CFG_0_18MBS FIELD32(0xf0000000)
+
+/*
+ * TX_PWR_CFG_1:
+ */
+#define TX_PWR_CFG_1 0x1318
+#define TX_PWR_CFG_1_24MBS FIELD32(0x0000000f)
+#define TX_PWR_CFG_1_36MBS FIELD32(0x000000f0)
+#define TX_PWR_CFG_1_48MBS FIELD32(0x00000f00)
+#define TX_PWR_CFG_1_54MBS FIELD32(0x0000f000)
+#define TX_PWR_CFG_1_MCS0 FIELD32(0x000f0000)
+#define TX_PWR_CFG_1_MCS1 FIELD32(0x00f00000)
+#define TX_PWR_CFG_1_MCS2 FIELD32(0x0f000000)
+#define TX_PWR_CFG_1_MCS3 FIELD32(0xf0000000)
+
+/*
+ * TX_PWR_CFG_2:
+ */
+#define TX_PWR_CFG_2 0x131c
+#define TX_PWR_CFG_2_MCS4 FIELD32(0x0000000f)
+#define TX_PWR_CFG_2_MCS5 FIELD32(0x000000f0)
+#define TX_PWR_CFG_2_MCS6 FIELD32(0x00000f00)
+#define TX_PWR_CFG_2_MCS7 FIELD32(0x0000f000)
+#define TX_PWR_CFG_2_MCS8 FIELD32(0x000f0000)
+#define TX_PWR_CFG_2_MCS9 FIELD32(0x00f00000)
+#define TX_PWR_CFG_2_MCS10 FIELD32(0x0f000000)
+#define TX_PWR_CFG_2_MCS11 FIELD32(0xf0000000)
+
+/*
+ * TX_PWR_CFG_3:
+ */
+#define TX_PWR_CFG_3 0x1320
+#define TX_PWR_CFG_3_MCS12 FIELD32(0x0000000f)
+#define TX_PWR_CFG_3_MCS13 FIELD32(0x000000f0)
+#define TX_PWR_CFG_3_MCS14 FIELD32(0x00000f00)
+#define TX_PWR_CFG_3_MCS15 FIELD32(0x0000f000)
+#define TX_PWR_CFG_3_UKNOWN1 FIELD32(0x000f0000)
+#define TX_PWR_CFG_3_UKNOWN2 FIELD32(0x00f00000)
+#define TX_PWR_CFG_3_UKNOWN3 FIELD32(0x0f000000)
+#define TX_PWR_CFG_3_UKNOWN4 FIELD32(0xf0000000)
+
+/*
+ * TX_PWR_CFG_4:
+ */
+#define TX_PWR_CFG_4 0x1324
+#define TX_PWR_CFG_4_UKNOWN5 FIELD32(0x0000000f)
+#define TX_PWR_CFG_4_UKNOWN6 FIELD32(0x000000f0)
+#define TX_PWR_CFG_4_UKNOWN7 FIELD32(0x00000f00)
+#define TX_PWR_CFG_4_UKNOWN8 FIELD32(0x0000f000)
+
+/*
+ * TX_PIN_CFG:
+ */
+#define TX_PIN_CFG 0x1328
+#define TX_PIN_CFG_PA_PE_A0_EN FIELD32(0x00000001)
+#define TX_PIN_CFG_PA_PE_G0_EN FIELD32(0x00000002)
+#define TX_PIN_CFG_PA_PE_A1_EN FIELD32(0x00000004)
+#define TX_PIN_CFG_PA_PE_G1_EN FIELD32(0x00000008)
+#define TX_PIN_CFG_PA_PE_A0_POL FIELD32(0x00000010)
+#define TX_PIN_CFG_PA_PE_G0_POL FIELD32(0x00000020)
+#define TX_PIN_CFG_PA_PE_A1_POL FIELD32(0x00000040)
+#define TX_PIN_CFG_PA_PE_G1_POL FIELD32(0x00000080)
+#define TX_PIN_CFG_LNA_PE_A0_EN FIELD32(0x00000100)
+#define TX_PIN_CFG_LNA_PE_G0_EN FIELD32(0x00000200)
+#define TX_PIN_CFG_LNA_PE_A1_EN FIELD32(0x00000400)
+#define TX_PIN_CFG_LNA_PE_G1_EN FIELD32(0x00000800)
+#define TX_PIN_CFG_LNA_PE_A0_POL FIELD32(0x00001000)
+#define TX_PIN_CFG_LNA_PE_G0_POL FIELD32(0x00002000)
+#define TX_PIN_CFG_LNA_PE_A1_POL FIELD32(0x00004000)
+#define TX_PIN_CFG_LNA_PE_G1_POL FIELD32(0x00008000)
+#define TX_PIN_CFG_RFTR_EN FIELD32(0x00010000)
+#define TX_PIN_CFG_RFTR_POL FIELD32(0x00020000)
+#define TX_PIN_CFG_TRSW_EN FIELD32(0x00040000)
+#define TX_PIN_CFG_TRSW_POL FIELD32(0x00080000)
+
+/*
+ * TX_BAND_CFG: 0x1 use upper 20MHz, 0x0 use lower 20MHz
+ */
+#define TX_BAND_CFG 0x132c
+#define TX_BAND_CFG_HT40_PLUS FIELD32(0x00000001)
+#define TX_BAND_CFG_A FIELD32(0x00000002)
+#define TX_BAND_CFG_BG FIELD32(0x00000004)
+
+/*
+ * TX_SW_CFG0:
+ */
+#define TX_SW_CFG0 0x1330
+
+/*
+ * TX_SW_CFG1:
+ */
+#define TX_SW_CFG1 0x1334
+
+/*
+ * TX_SW_CFG2:
+ */
+#define TX_SW_CFG2 0x1338
+
+/*
+ * TXOP_THRES_CFG:
+ */
+#define TXOP_THRES_CFG 0x133c
+
+/*
+ * TXOP_CTRL_CFG:
+ */
+#define TXOP_CTRL_CFG 0x1340
+
+/*
+ * TX_RTS_CFG:
+ * RTS_THRES: unit:byte
+ * RTS_FBK_EN: enable rts rate fallback
+ */
+#define TX_RTS_CFG 0x1344
+#define TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT FIELD32(0x000000ff)
+#define TX_RTS_CFG_RTS_THRES FIELD32(0x00ffff00)
+#define TX_RTS_CFG_RTS_FBK_EN FIELD32(0x01000000)
+
+/*
+ * TX_TIMEOUT_CFG:
+ * MPDU_LIFETIME: expiration time = 2^(9+MPDU LIFE TIME) us
+ * RX_ACK_TIMEOUT: unit:slot. Used for TX procedure
+ * TX_OP_TIMEOUT: TXOP timeout value for TXOP truncation.
+ * it is recommended that:
+ * (SLOT_TIME) > (TX_OP_TIMEOUT) > (RX_ACK_TIMEOUT)
+ */
+#define TX_TIMEOUT_CFG 0x1348
+#define TX_TIMEOUT_CFG_MPDU_LIFETIME FIELD32(0x000000f0)
+#define TX_TIMEOUT_CFG_RX_ACK_TIMEOUT FIELD32(0x0000ff00)
+#define TX_TIMEOUT_CFG_TX_OP_TIMEOUT FIELD32(0x00ff0000)
+
+/*
+ * TX_RTY_CFG:
+ * SHORT_RTY_LIMIT: short retry limit
+ * LONG_RTY_LIMIT: long retry limit
+ * LONG_RTY_THRE: Long retry threshoold
+ * NON_AGG_RTY_MODE: Non-Aggregate MPDU retry mode
+ * 0:expired by retry limit, 1: expired by mpdu life timer
+ * AGG_RTY_MODE: Aggregate MPDU retry mode
+ * 0:expired by retry limit, 1: expired by mpdu life timer
+ * TX_AUTO_FB_ENABLE: Tx retry PHY rate auto fallback enable
+ */
+#define TX_RTY_CFG 0x134c
+#define TX_RTY_CFG_SHORT_RTY_LIMIT FIELD32(0x000000ff)
+#define TX_RTY_CFG_LONG_RTY_LIMIT FIELD32(0x0000ff00)
+#define TX_RTY_CFG_LONG_RTY_THRE FIELD32(0x0fff0000)
+#define TX_RTY_CFG_NON_AGG_RTY_MODE FIELD32(0x10000000)
+#define TX_RTY_CFG_AGG_RTY_MODE FIELD32(0x20000000)
+#define TX_RTY_CFG_TX_AUTO_FB_ENABLE FIELD32(0x40000000)
+
+/*
+ * TX_LINK_CFG:
+ * REMOTE_MFB_LIFETIME: remote MFB life time. unit: 32us
+ * MFB_ENABLE: TX apply remote MFB 1:enable
+ * REMOTE_UMFS_ENABLE: remote unsolicit MFB enable
+ * 0: not apply remote remote unsolicit (MFS=7)
+ * TX_MRQ_EN: MCS request TX enable
+ * TX_RDG_EN: RDG TX enable
+ * TX_CF_ACK_EN: Piggyback CF-ACK enable
+ * REMOTE_MFB: remote MCS feedback
+ * REMOTE_MFS: remote MCS feedback sequence number
+ */
+#define TX_LINK_CFG 0x1350
+#define TX_LINK_CFG_REMOTE_MFB_LIFETIME FIELD32(0x000000ff)
+#define TX_LINK_CFG_MFB_ENABLE FIELD32(0x00000100)
+#define TX_LINK_CFG_REMOTE_UMFS_ENABLE FIELD32(0x00000200)
+#define TX_LINK_CFG_TX_MRQ_EN FIELD32(0x00000400)
+#define TX_LINK_CFG_TX_RDG_EN FIELD32(0x00000800)
+#define TX_LINK_CFG_TX_CF_ACK_EN FIELD32(0x00001000)
+#define TX_LINK_CFG_REMOTE_MFB FIELD32(0x00ff0000)
+#define TX_LINK_CFG_REMOTE_MFS FIELD32(0xff000000)
+
+/*
+ * HT_FBK_CFG0:
+ */
+#define HT_FBK_CFG0 0x1354
+#define HT_FBK_CFG0_HTMCS0FBK FIELD32(0x0000000f)
+#define HT_FBK_CFG0_HTMCS1FBK FIELD32(0x000000f0)
+#define HT_FBK_CFG0_HTMCS2FBK FIELD32(0x00000f00)
+#define HT_FBK_CFG0_HTMCS3FBK FIELD32(0x0000f000)
+#define HT_FBK_CFG0_HTMCS4FBK FIELD32(0x000f0000)
+#define HT_FBK_CFG0_HTMCS5FBK FIELD32(0x00f00000)
+#define HT_FBK_CFG0_HTMCS6FBK FIELD32(0x0f000000)
+#define HT_FBK_CFG0_HTMCS7FBK FIELD32(0xf0000000)
+
+/*
+ * HT_FBK_CFG1:
+ */
+#define HT_FBK_CFG1 0x1358
+#define HT_FBK_CFG1_HTMCS8FBK FIELD32(0x0000000f)
+#define HT_FBK_CFG1_HTMCS9FBK FIELD32(0x000000f0)
+#define HT_FBK_CFG1_HTMCS10FBK FIELD32(0x00000f00)
+#define HT_FBK_CFG1_HTMCS11FBK FIELD32(0x0000f000)
+#define HT_FBK_CFG1_HTMCS12FBK FIELD32(0x000f0000)
+#define HT_FBK_CFG1_HTMCS13FBK FIELD32(0x00f00000)
+#define HT_FBK_CFG1_HTMCS14FBK FIELD32(0x0f000000)
+#define HT_FBK_CFG1_HTMCS15FBK FIELD32(0xf0000000)
+
+/*
+ * LG_FBK_CFG0:
+ */
+#define LG_FBK_CFG0 0x135c
+#define LG_FBK_CFG0_OFDMMCS0FBK FIELD32(0x0000000f)
+#define LG_FBK_CFG0_OFDMMCS1FBK FIELD32(0x000000f0)
+#define LG_FBK_CFG0_OFDMMCS2FBK FIELD32(0x00000f00)
+#define LG_FBK_CFG0_OFDMMCS3FBK FIELD32(0x0000f000)
+#define LG_FBK_CFG0_OFDMMCS4FBK FIELD32(0x000f0000)
+#define LG_FBK_CFG0_OFDMMCS5FBK FIELD32(0x00f00000)
+#define LG_FBK_CFG0_OFDMMCS6FBK FIELD32(0x0f000000)
+#define LG_FBK_CFG0_OFDMMCS7FBK FIELD32(0xf0000000)
+
+/*
+ * LG_FBK_CFG1:
+ */
+#define LG_FBK_CFG1 0x1360
+#define LG_FBK_CFG0_CCKMCS0FBK FIELD32(0x0000000f)
+#define LG_FBK_CFG0_CCKMCS1FBK FIELD32(0x000000f0)
+#define LG_FBK_CFG0_CCKMCS2FBK FIELD32(0x00000f00)
+#define LG_FBK_CFG0_CCKMCS3FBK FIELD32(0x0000f000)
+
+/*
+ * CCK_PROT_CFG: CCK Protection
+ * PROTECT_RATE: Protection control frame rate for CCK TX(RTS/CTS/CFEnd)
+ * PROTECT_CTRL: Protection control frame type for CCK TX
+ * 0:none, 1:RTS/CTS, 2:CTS-to-self
+ * PROTECT_NAV: TXOP protection type for CCK TX
+ * 0:none, 1:ShortNAVprotect, 2:LongNAVProtect
+ * TX_OP_ALLOW_CCK: CCK TXOP allowance, 0:disallow
+ * TX_OP_ALLOW_OFDM: CCK TXOP allowance, 0:disallow
+ * TX_OP_ALLOW_MM20: CCK TXOP allowance, 0:disallow
+ * TX_OP_ALLOW_MM40: CCK TXOP allowance, 0:disallow
+ * TX_OP_ALLOW_GF20: CCK TXOP allowance, 0:disallow
+ * TX_OP_ALLOW_GF40: CCK TXOP allowance, 0:disallow
+ * RTS_TH_EN: RTS threshold enable on CCK TX
+ */
+#define CCK_PROT_CFG 0x1364
+#define CCK_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
+#define CCK_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
+#define CCK_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
+#define CCK_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
+#define CCK_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
+#define CCK_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
+#define CCK_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
+#define CCK_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
+#define CCK_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
+#define CCK_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
+
+/*
+ * OFDM_PROT_CFG: OFDM Protection
+ */
+#define OFDM_PROT_CFG 0x1368
+#define OFDM_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
+#define OFDM_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
+#define OFDM_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
+#define OFDM_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
+#define OFDM_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
+#define OFDM_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
+#define OFDM_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
+#define OFDM_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
+#define OFDM_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
+#define OFDM_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
+
+/*
+ * MM20_PROT_CFG: MM20 Protection
+ */
+#define MM20_PROT_CFG 0x136c
+#define MM20_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
+#define MM20_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
+#define MM20_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
+#define MM20_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
+#define MM20_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
+#define MM20_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
+#define MM20_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
+#define MM20_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
+#define MM20_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
+#define MM20_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
+
+/*
+ * MM40_PROT_CFG: MM40 Protection
+ */
+#define MM40_PROT_CFG 0x1370
+#define MM40_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
+#define MM40_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
+#define MM40_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
+#define MM40_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
+#define MM40_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
+#define MM40_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
+#define MM40_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
+#define MM40_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
+#define MM40_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
+#define MM40_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
+
+/*
+ * GF20_PROT_CFG: GF20 Protection
+ */
+#define GF20_PROT_CFG 0x1374
+#define GF20_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
+#define GF20_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
+#define GF20_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
+#define GF20_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
+#define GF20_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
+#define GF20_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
+#define GF20_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
+#define GF20_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
+#define GF20_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
+#define GF20_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
+
+/*
+ * GF40_PROT_CFG: GF40 Protection
+ */
+#define GF40_PROT_CFG 0x1378
+#define GF40_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
+#define GF40_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
+#define GF40_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
+#define GF40_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
+#define GF40_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
+#define GF40_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
+#define GF40_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
+#define GF40_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
+#define GF40_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
+#define GF40_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
+
+/*
+ * EXP_CTS_TIME:
+ */
+#define EXP_CTS_TIME 0x137c
+
+/*
+ * EXP_ACK_TIME:
+ */
+#define EXP_ACK_TIME 0x1380
+
+/*
+ * RX_FILTER_CFG: RX configuration register.
+ */
+#define RX_FILTER_CFG 0x1400
+#define RX_FILTER_CFG_DROP_CRC_ERROR FIELD32(0x00000001)
+#define RX_FILTER_CFG_DROP_PHY_ERROR FIELD32(0x00000002)
+#define RX_FILTER_CFG_DROP_NOT_TO_ME FIELD32(0x00000004)
+#define RX_FILTER_CFG_DROP_NOT_MY_BSSD FIELD32(0x00000008)
+#define RX_FILTER_CFG_DROP_VER_ERROR FIELD32(0x00000010)
+#define RX_FILTER_CFG_DROP_MULTICAST FIELD32(0x00000020)
+#define RX_FILTER_CFG_DROP_BROADCAST FIELD32(0x00000040)
+#define RX_FILTER_CFG_DROP_DUPLICATE FIELD32(0x00000080)
+#define RX_FILTER_CFG_DROP_CF_END_ACK FIELD32(0x00000100)
+#define RX_FILTER_CFG_DROP_CF_END FIELD32(0x00000200)
+#define RX_FILTER_CFG_DROP_ACK FIELD32(0x00000400)
+#define RX_FILTER_CFG_DROP_CTS FIELD32(0x00000800)
+#define RX_FILTER_CFG_DROP_RTS FIELD32(0x00001000)
+#define RX_FILTER_CFG_DROP_PSPOLL FIELD32(0x00002000)
+#define RX_FILTER_CFG_DROP_BA FIELD32(0x00004000)
+#define RX_FILTER_CFG_DROP_BAR FIELD32(0x00008000)
+#define RX_FILTER_CFG_DROP_CNTL FIELD32(0x00010000)
+
+/*
+ * AUTO_RSP_CFG:
+ * AUTORESPONDER: 0: disable, 1: enable
+ * BAC_ACK_POLICY: 0:long, 1:short preamble
+ * CTS_40_MMODE: Response CTS 40MHz duplicate mode
+ * CTS_40_MREF: Response CTS 40MHz duplicate mode
+ * AR_PREAMBLE: Auto responder preamble 0:long, 1:short preamble
+ * DUAL_CTS_EN: Power bit value in control frame
+ * ACK_CTS_PSM_BIT:Power bit value in control frame
+ */
+#define AUTO_RSP_CFG 0x1404
+#define AUTO_RSP_CFG_AUTORESPONDER FIELD32(0x00000001)
+#define AUTO_RSP_CFG_BAC_ACK_POLICY FIELD32(0x00000002)
+#define AUTO_RSP_CFG_CTS_40_MMODE FIELD32(0x00000004)
+#define AUTO_RSP_CFG_CTS_40_MREF FIELD32(0x00000008)
+#define AUTO_RSP_CFG_AR_PREAMBLE FIELD32(0x00000010)
+#define AUTO_RSP_CFG_DUAL_CTS_EN FIELD32(0x00000040)
+#define AUTO_RSP_CFG_ACK_CTS_PSM_BIT FIELD32(0x00000080)
+
+/*
+ * LEGACY_BASIC_RATE:
+ */
+#define LEGACY_BASIC_RATE 0x1408
+
+/*
+ * HT_BASIC_RATE:
+ */
+#define HT_BASIC_RATE 0x140c
+
+/*
+ * HT_CTRL_CFG:
+ */
+#define HT_CTRL_CFG 0x1410
+
+/*
+ * SIFS_COST_CFG:
+ */
+#define SIFS_COST_CFG 0x1414
+
+/*
+ * RX_PARSER_CFG:
+ * Set NAV for all received frames
+ */
+#define RX_PARSER_CFG 0x1418
+
+/*
+ * TX_SEC_CNT0:
+ */
+#define TX_SEC_CNT0 0x1500
+
+/*
+ * RX_SEC_CNT0:
+ */
+#define RX_SEC_CNT0 0x1504
+
+/*
+ * CCMP_FC_MUTE:
+ */
+#define CCMP_FC_MUTE 0x1508
+
+/*
+ * TXOP_HLDR_ADDR0:
+ */
+#define TXOP_HLDR_ADDR0 0x1600
+
+/*
+ * TXOP_HLDR_ADDR1:
+ */
+#define TXOP_HLDR_ADDR1 0x1604
+
+/*
+ * TXOP_HLDR_ET:
+ */
+#define TXOP_HLDR_ET 0x1608
+
+/*
+ * QOS_CFPOLL_RA_DW0:
+ */
+#define QOS_CFPOLL_RA_DW0 0x160c
+
+/*
+ * QOS_CFPOLL_RA_DW1:
+ */
+#define QOS_CFPOLL_RA_DW1 0x1610
+
+/*
+ * QOS_CFPOLL_QC:
+ */
+#define QOS_CFPOLL_QC 0x1614
+
+/*
+ * RX_STA_CNT0: RX PLCP error count & RX CRC error count
+ */
+#define RX_STA_CNT0 0x1700
+#define RX_STA_CNT0_CRC_ERR FIELD32(0x0000ffff)
+#define RX_STA_CNT0_PHY_ERR FIELD32(0xffff0000)
+
+/*
+ * RX_STA_CNT1: RX False CCA count & RX LONG frame count
+ */
+#define RX_STA_CNT1 0x1704
+#define RX_STA_CNT1_FALSE_CCA FIELD32(0x0000ffff)
+#define RX_STA_CNT1_PLCP_ERR FIELD32(0xffff0000)
+
+/*
+ * RX_STA_CNT2:
+ */
+#define RX_STA_CNT2 0x1708
+#define RX_STA_CNT2_RX_DUPLI_COUNT FIELD32(0x0000ffff)
+#define RX_STA_CNT2_RX_FIFO_OVERFLOW FIELD32(0xffff0000)
+
+/*
+ * TX_STA_CNT0: TX Beacon count
+ */
+#define TX_STA_CNT0 0x170c
+#define TX_STA_CNT0_TX_FAIL_COUNT FIELD32(0x0000ffff)
+#define TX_STA_CNT0_TX_BEACON_COUNT FIELD32(0xffff0000)
+
+/*
+ * TX_STA_CNT1: TX tx count
+ */
+#define TX_STA_CNT1 0x1710
+#define TX_STA_CNT1_TX_SUCCESS FIELD32(0x0000ffff)
+#define TX_STA_CNT1_TX_RETRANSMIT FIELD32(0xffff0000)
+
+/*
+ * TX_STA_CNT2: TX tx count
+ */
+#define TX_STA_CNT2 0x1714
+#define TX_STA_CNT2_TX_ZERO_LEN_COUNT FIELD32(0x0000ffff)
+#define TX_STA_CNT2_TX_UNDER_FLOW_COUNT FIELD32(0xffff0000)
+
+/*
+ * TX_STA_FIFO: TX Result for specific PID status fifo register
+ */
+#define TX_STA_FIFO 0x1718
+#define TX_STA_FIFO_VALID FIELD32(0x00000001)
+#define TX_STA_FIFO_PID_TYPE FIELD32(0x0000001e)
+#define TX_STA_FIFO_TX_SUCCESS FIELD32(0x00000020)
+#define TX_STA_FIFO_TX_AGGRE FIELD32(0x00000040)
+#define TX_STA_FIFO_TX_ACK_REQUIRED FIELD32(0x00000080)
+#define TX_STA_FIFO_WCID FIELD32(0x0000ff00)
+#define TX_STA_FIFO_SUCCESS_RATE FIELD32(0xffff0000)
+
+/*
+ * TX_AGG_CNT: Debug counter
+ */
+#define TX_AGG_CNT 0x171c
+#define TX_AGG_CNT_NON_AGG_TX_COUNT FIELD32(0x0000ffff)
+#define TX_AGG_CNT_AGG_TX_COUNT FIELD32(0xffff0000)
+
+/*
+ * TX_AGG_CNT0:
+ */
+#define TX_AGG_CNT0 0x1720
+#define TX_AGG_CNT0_AGG_SIZE_1_COUNT FIELD32(0x0000ffff)
+#define TX_AGG_CNT0_AGG_SIZE_2_COUNT FIELD32(0xffff0000)
+
+/*
+ * TX_AGG_CNT1:
+ */
+#define TX_AGG_CNT1 0x1724
+#define TX_AGG_CNT1_AGG_SIZE_3_COUNT FIELD32(0x0000ffff)
+#define TX_AGG_CNT1_AGG_SIZE_4_COUNT FIELD32(0xffff0000)
+
+/*
+ * TX_AGG_CNT2:
+ */
+#define TX_AGG_CNT2 0x1728
+#define TX_AGG_CNT2_AGG_SIZE_5_COUNT FIELD32(0x0000ffff)
+#define TX_AGG_CNT2_AGG_SIZE_6_COUNT FIELD32(0xffff0000)
+
+/*
+ * TX_AGG_CNT3:
+ */
+#define TX_AGG_CNT3 0x172c
+#define TX_AGG_CNT3_AGG_SIZE_7_COUNT FIELD32(0x0000ffff)
+#define TX_AGG_CNT3_AGG_SIZE_8_COUNT FIELD32(0xffff0000)
+
+/*
+ * TX_AGG_CNT4:
+ */
+#define TX_AGG_CNT4 0x1730
+#define TX_AGG_CNT4_AGG_SIZE_9_COUNT FIELD32(0x0000ffff)
+#define TX_AGG_CNT4_AGG_SIZE_10_COUNT FIELD32(0xffff0000)
+
+/*
+ * TX_AGG_CNT5:
+ */
+#define TX_AGG_CNT5 0x1734
+#define TX_AGG_CNT5_AGG_SIZE_11_COUNT FIELD32(0x0000ffff)
+#define TX_AGG_CNT5_AGG_SIZE_12_COUNT FIELD32(0xffff0000)
+
+/*
+ * TX_AGG_CNT6:
+ */
+#define TX_AGG_CNT6 0x1738
+#define TX_AGG_CNT6_AGG_SIZE_13_COUNT FIELD32(0x0000ffff)
+#define TX_AGG_CNT6_AGG_SIZE_14_COUNT FIELD32(0xffff0000)
+
+/*
+ * TX_AGG_CNT7:
+ */
+#define TX_AGG_CNT7 0x173c
+#define TX_AGG_CNT7_AGG_SIZE_15_COUNT FIELD32(0x0000ffff)
+#define TX_AGG_CNT7_AGG_SIZE_16_COUNT FIELD32(0xffff0000)
+
+/*
+ * MPDU_DENSITY_CNT:
+ * TX_ZERO_DEL: TX zero length delimiter count
+ * RX_ZERO_DEL: RX zero length delimiter count
+ */
+#define MPDU_DENSITY_CNT 0x1740
+#define MPDU_DENSITY_CNT_TX_ZERO_DEL FIELD32(0x0000ffff)
+#define MPDU_DENSITY_CNT_RX_ZERO_DEL FIELD32(0xffff0000)
+
+/*
+ * Security key table memory.
+ * MAC_WCID_BASE: 8-bytes (use only 6 bytes) * 256 entry
+ * PAIRWISE_KEY_TABLE_BASE: 32-byte * 256 entry
+ * MAC_IVEIV_TABLE_BASE: 8-byte * 256-entry
+ * MAC_WCID_ATTRIBUTE_BASE: 4-byte * 256-entry
+ * SHARED_KEY_TABLE_BASE: 32-byte * 16-entry
+ * SHARED_KEY_MODE_BASE: 4-byte * 16-entry
+ */
+#define MAC_WCID_BASE 0x1800
+#define PAIRWISE_KEY_TABLE_BASE 0x4000
+#define MAC_IVEIV_TABLE_BASE 0x6000
+#define MAC_WCID_ATTRIBUTE_BASE 0x6800
+#define SHARED_KEY_TABLE_BASE 0x6c00
+#define SHARED_KEY_MODE_BASE 0x7000
+
+#define MAC_WCID_ENTRY(__idx) \
+ ( MAC_WCID_BASE + ((__idx) * sizeof(struct mac_wcid_entry)) )
+#define PAIRWISE_KEY_ENTRY(__idx) \
+ ( PAIRWISE_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) )
+#define MAC_IVEIV_ENTRY(__idx) \
+ ( MAC_IVEIV_TABLE_BASE + ((__idx) & sizeof(struct mac_iveiv_entry)) )
+#define MAC_WCID_ATTR_ENTRY(__idx) \
+ ( MAC_WCID_ATTRIBUTE_BASE + ((__idx) * sizeof(u32)) )
+#define SHARED_KEY_ENTRY(__idx) \
+ ( SHARED_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) )
+#define SHARED_KEY_MODE_ENTRY(__idx) \
+ ( SHARED_KEY_MODE_BASE + ((__idx) * sizeof(u32)) )
+
+struct mac_wcid_entry {
+ u8 mac[6];
+ u8 reserved[2];
+} __attribute__ ((packed));
+
+struct hw_key_entry {
+ u8 key[16];
+ u8 tx_mic[8];
+ u8 rx_mic[8];
+} __attribute__ ((packed));
+
+struct mac_iveiv_entry {
+ u8 iv[8];
+} __attribute__ ((packed));
+
+/*
+ * MAC_WCID_ATTRIBUTE:
+ */
+#define MAC_WCID_ATTRIBUTE_KEYTAB FIELD32(0x00000001)
+#define MAC_WCID_ATTRIBUTE_CIPHER FIELD32(0x0000000e)
+#define MAC_WCID_ATTRIBUTE_BSS_IDX FIELD32(0x00000070)
+#define MAC_WCID_ATTRIBUTE_RX_WIUDF FIELD32(0x00000380)
+
+/*
+ * SHARED_KEY_MODE:
+ */
+#define SHARED_KEY_MODE_BSS0_KEY0 FIELD32(0x00000007)
+#define SHARED_KEY_MODE_BSS0_KEY1 FIELD32(0x00000070)
+#define SHARED_KEY_MODE_BSS0_KEY2 FIELD32(0x00000700)
+#define SHARED_KEY_MODE_BSS0_KEY3 FIELD32(0x00007000)
+#define SHARED_KEY_MODE_BSS1_KEY0 FIELD32(0x00070000)
+#define SHARED_KEY_MODE_BSS1_KEY1 FIELD32(0x00700000)
+#define SHARED_KEY_MODE_BSS1_KEY2 FIELD32(0x07000000)
+#define SHARED_KEY_MODE_BSS1_KEY3 FIELD32(0x70000000)
+
+/*
+ * HOST-MCU communication
+ */
+
+/*
+ * H2M_MAILBOX_CSR: Host-to-MCU Mailbox.
+ */
+#define H2M_MAILBOX_CSR 0x7010
+#define H2M_MAILBOX_CSR_ARG0 FIELD32(0x000000ff)
+#define H2M_MAILBOX_CSR_ARG1 FIELD32(0x0000ff00)
+#define H2M_MAILBOX_CSR_CMD_TOKEN FIELD32(0x00ff0000)
+#define H2M_MAILBOX_CSR_OWNER FIELD32(0xff000000)
+
+/*
+ * H2M_MAILBOX_CID:
+ */
+#define H2M_MAILBOX_CID 0x7014
+#define H2M_MAILBOX_CID_CMD0 FIELD32(0x000000ff)
+#define H2M_MAILBOX_CID_CMD1 FIELD32(0x0000ff00)
+#define H2M_MAILBOX_CID_CMD2 FIELD32(0x00ff0000)
+#define H2M_MAILBOX_CID_CMD3 FIELD32(0xff000000)
+
+/*
+ * H2M_MAILBOX_STATUS:
+ */
+#define H2M_MAILBOX_STATUS 0x701c
+
+/*
+ * H2M_INT_SRC:
+ */
+#define H2M_INT_SRC 0x7024
+
+/*
+ * H2M_BBP_AGENT:
+ */
+#define H2M_BBP_AGENT 0x7028
+
+/*
+ * MCU_LEDCS: LED control for MCU Mailbox.
+ */
+#define MCU_LEDCS_LED_MODE FIELD8(0x1f)
+#define MCU_LEDCS_POLARITY FIELD8(0x01)
+
+/*
+ * HW_CS_CTS_BASE:
+ * Carrier-sense CTS frame base address.
+ * It's where mac stores carrier-sense frame for carrier-sense function.
+ */
+#define HW_CS_CTS_BASE 0x7700
+
+/*
+ * HW_DFS_CTS_BASE:
+ * FS CTS frame base address. It's where mac stores CTS frame for DFS.
+ */
+#define HW_DFS_CTS_BASE 0x7780
+
+/*
+ * TXRX control registers - base address 0x3000
+ */
+
+/*
+ * TXRX_CSR1:
+ * rt2860b UNKNOWN reg use R/O Reg Addr 0x77d0 first..
+ */
+#define TXRX_CSR1 0x77d0
+
+/*
+ * HW_DEBUG_SETTING_BASE:
+ * since NULL frame won't be that long (256 byte)
+ * We steal 16 tail bytes to save debugging settings
+ */
+#define HW_DEBUG_SETTING_BASE 0x77f0
+#define HW_DEBUG_SETTING_BASE2 0x7770
+
+/*
+ * HW_BEACON_BASE
+ * In order to support maximum 8 MBSS and its maximum length
+ * is 512 bytes for each beacon
+ * Three section discontinue memory segments will be used.
+ * 1. The original region for BCN 0~3
+ * 2. Extract memory from FCE table for BCN 4~5
+ * 3. Extract memory from Pair-wise key table for BCN 6~7
+ * It occupied those memory of wcid 238~253 for BCN 6
+ * and wcid 222~237 for BCN 7
+ *
+ * IMPORTANT NOTE: Not sure why legacy driver does this,
+ * but HW_BEACON_BASE7 is 0x0200 bytes below HW_BEACON_BASE6.
+ */
+#define HW_BEACON_BASE0 0x7800
+#define HW_BEACON_BASE1 0x7a00
+#define HW_BEACON_BASE2 0x7c00
+#define HW_BEACON_BASE3 0x7e00
+#define HW_BEACON_BASE4 0x7200
+#define HW_BEACON_BASE5 0x7400
+#define HW_BEACON_BASE6 0x5dc0
+#define HW_BEACON_BASE7 0x5bc0
+
+#define HW_BEACON_OFFSET(__index) \
+ ( ((__index) < 4) ? ( HW_BEACON_BASE0 + (__index * 0x0200) ) : \
+ (((__index) < 6) ? ( HW_BEACON_BASE4 + ((__index - 4) * 0x0200) ) : \
+ (HW_BEACON_BASE6 - ((__index - 6) * 0x0200))) )
+
+/*
+ * 8051 firmware image.
+ */
+#define FIRMWARE_RT2870 "rt2870.bin"
+#define FIRMWARE_IMAGE_BASE 0x3000
+
+/*
+ * BBP registers.
+ * The wordsize of the BBP is 8 bits.
+ */
+
+/*
+ * BBP 1: TX Antenna
+ */
+#define BBP1_TX_POWER FIELD8(0x07)
+#define BBP1_TX_ANTENNA FIELD8(0x18)
+
+/*
+ * BBP 3: RX Antenna
+ */
+#define BBP3_RX_ANTENNA FIELD8(0x18)
+#define BBP3_HT40_PLUS FIELD8(0x20)
+
+/*
+ * BBP 4: Bandwidth
+ */
+#define BBP4_TX_BF FIELD8(0x01)
+#define BBP4_BANDWIDTH FIELD8(0x18)
+
+/*
+ * RFCSR registers
+ * The wordsize of the RFCSR is 8 bits.
+ */
+
+/*
+ * RFCSR 6:
+ */
+#define RFCSR6_R FIELD8(0x03)
+
+/*
+ * RFCSR 7:
+ */
+#define RFCSR7_RF_TUNING FIELD8(0x01)
+
+/*
+ * RFCSR 12:
+ */
+#define RFCSR12_TX_POWER FIELD8(0x1f)
+
+/*
+ * RFCSR 22:
+ */
+#define RFCSR22_BASEBAND_LOOPBACK FIELD8(0x01)
+
+/*
+ * RFCSR 23:
+ */
+#define RFCSR23_FREQ_OFFSET FIELD8(0x7f)
+
+/*
+ * RFCSR 30:
+ */
+#define RFCSR30_RF_CALIBRATION FIELD8(0x80)
+
+/*
+ * RF registers
+ */
+
+/*
+ * RF 2
+ */
+#define RF2_ANTENNA_RX2 FIELD32(0x00000040)
+#define RF2_ANTENNA_TX1 FIELD32(0x00004000)
+#define RF2_ANTENNA_RX1 FIELD32(0x00020000)
+
+/*
+ * RF 3
+ */
+#define RF3_TXPOWER_G FIELD32(0x00003e00)
+#define RF3_TXPOWER_A_7DBM_BOOST FIELD32(0x00000200)
+#define RF3_TXPOWER_A FIELD32(0x00003c00)
+
+/*
+ * RF 4
+ */
+#define RF4_TXPOWER_G FIELD32(0x000007c0)
+#define RF4_TXPOWER_A_7DBM_BOOST FIELD32(0x00000040)
+#define RF4_TXPOWER_A FIELD32(0x00000780)
+#define RF4_FREQ_OFFSET FIELD32(0x001f8000)
+#define RF4_HT40 FIELD32(0x00200000)
+
+/*
+ * EEPROM content.
+ * The wordsize of the EEPROM is 16 bits.
+ */
+
+/*
+ * EEPROM Version
+ */
+#define EEPROM_VERSION 0x0001
+#define EEPROM_VERSION_FAE FIELD16(0x00ff)
+#define EEPROM_VERSION_VERSION FIELD16(0xff00)
+
+/*
+ * HW MAC address.
+ */
+#define EEPROM_MAC_ADDR_0 0x0002
+#define EEPROM_MAC_ADDR_BYTE0 FIELD16(0x00ff)
+#define EEPROM_MAC_ADDR_BYTE1 FIELD16(0xff00)
+#define EEPROM_MAC_ADDR_1 0x0003
+#define EEPROM_MAC_ADDR_BYTE2 FIELD16(0x00ff)
+#define EEPROM_MAC_ADDR_BYTE3 FIELD16(0xff00)
+#define EEPROM_MAC_ADDR_2 0x0004
+#define EEPROM_MAC_ADDR_BYTE4 FIELD16(0x00ff)
+#define EEPROM_MAC_ADDR_BYTE5 FIELD16(0xff00)
+
+/*
+ * EEPROM ANTENNA config
+ * RXPATH: 1: 1R, 2: 2R, 3: 3R
+ * TXPATH: 1: 1T, 2: 2T
+ */
+#define EEPROM_ANTENNA 0x001a
+#define EEPROM_ANTENNA_RXPATH FIELD16(0x000f)
+#define EEPROM_ANTENNA_TXPATH FIELD16(0x00f0)
+#define EEPROM_ANTENNA_RF_TYPE FIELD16(0x0f00)
+
+/*
+ * EEPROM NIC config
+ * CARDBUS_ACCEL: 0 - enable, 1 - disable
+ */
+#define EEPROM_NIC 0x001b
+#define EEPROM_NIC_HW_RADIO FIELD16(0x0001)
+#define EEPROM_NIC_DYNAMIC_TX_AGC FIELD16(0x0002)
+#define EEPROM_NIC_EXTERNAL_LNA_BG FIELD16(0x0004)
+#define EEPROM_NIC_EXTERNAL_LNA_A FIELD16(0x0008)
+#define EEPROM_NIC_CARDBUS_ACCEL FIELD16(0x0010)
+#define EEPROM_NIC_BW40M_SB_BG FIELD16(0x0020)
+#define EEPROM_NIC_BW40M_SB_A FIELD16(0x0040)
+#define EEPROM_NIC_WPS_PBC FIELD16(0x0080)
+#define EEPROM_NIC_BW40M_BG FIELD16(0x0100)
+#define EEPROM_NIC_BW40M_A FIELD16(0x0200)
+
+/*
+ * EEPROM frequency
+ */
+#define EEPROM_FREQ 0x001d
+#define EEPROM_FREQ_OFFSET FIELD16(0x00ff)
+#define EEPROM_FREQ_LED_MODE FIELD16(0x7f00)
+#define EEPROM_FREQ_LED_POLARITY FIELD16(0x1000)
+
+/*
+ * EEPROM LED
+ * POLARITY_RDY_G: Polarity RDY_G setting.
+ * POLARITY_RDY_A: Polarity RDY_A setting.
+ * POLARITY_ACT: Polarity ACT setting.
+ * POLARITY_GPIO_0: Polarity GPIO0 setting.
+ * POLARITY_GPIO_1: Polarity GPIO1 setting.
+ * POLARITY_GPIO_2: Polarity GPIO2 setting.
+ * POLARITY_GPIO_3: Polarity GPIO3 setting.
+ * POLARITY_GPIO_4: Polarity GPIO4 setting.
+ * LED_MODE: Led mode.
+ */
+#define EEPROM_LED1 0x001e
+#define EEPROM_LED2 0x001f
+#define EEPROM_LED3 0x0020
+#define EEPROM_LED_POLARITY_RDY_BG FIELD16(0x0001)
+#define EEPROM_LED_POLARITY_RDY_A FIELD16(0x0002)
+#define EEPROM_LED_POLARITY_ACT FIELD16(0x0004)
+#define EEPROM_LED_POLARITY_GPIO_0 FIELD16(0x0008)
+#define EEPROM_LED_POLARITY_GPIO_1 FIELD16(0x0010)
+#define EEPROM_LED_POLARITY_GPIO_2 FIELD16(0x0020)
+#define EEPROM_LED_POLARITY_GPIO_3 FIELD16(0x0040)
+#define EEPROM_LED_POLARITY_GPIO_4 FIELD16(0x0080)
+#define EEPROM_LED_LED_MODE FIELD16(0x1f00)
+
+/*
+ * EEPROM LNA
+ */
+#define EEPROM_LNA 0x0022
+#define EEPROM_LNA_BG FIELD16(0x00ff)
+#define EEPROM_LNA_A0 FIELD16(0xff00)
+
+/*
+ * EEPROM RSSI BG offset
+ */
+#define EEPROM_RSSI_BG 0x0023
+#define EEPROM_RSSI_BG_OFFSET0 FIELD16(0x00ff)
+#define EEPROM_RSSI_BG_OFFSET1 FIELD16(0xff00)
+
+/*
+ * EEPROM RSSI BG2 offset
+ */
+#define EEPROM_RSSI_BG2 0x0024
+#define EEPROM_RSSI_BG2_OFFSET2 FIELD16(0x00ff)
+#define EEPROM_RSSI_BG2_LNA_A1 FIELD16(0xff00)
+
+/*
+ * EEPROM RSSI A offset
+ */
+#define EEPROM_RSSI_A 0x0025
+#define EEPROM_RSSI_A_OFFSET0 FIELD16(0x00ff)
+#define EEPROM_RSSI_A_OFFSET1 FIELD16(0xff00)
+
+/*
+ * EEPROM RSSI A2 offset
+ */
+#define EEPROM_RSSI_A2 0x0026
+#define EEPROM_RSSI_A2_OFFSET2 FIELD16(0x00ff)
+#define EEPROM_RSSI_A2_LNA_A2 FIELD16(0xff00)
+
+/*
+ * EEPROM TXpower delta: 20MHZ AND 40 MHZ use different power.
+ * This is delta in 40MHZ.
+ * VALUE: Tx Power dalta value (MAX=4)
+ * TYPE: 1: Plus the delta value, 0: minus the delta value
+ * TXPOWER: Enable:
+ */
+#define EEPROM_TXPOWER_DELTA 0x0028
+#define EEPROM_TXPOWER_DELTA_VALUE FIELD16(0x003f)
+#define EEPROM_TXPOWER_DELTA_TYPE FIELD16(0x0040)
+#define EEPROM_TXPOWER_DELTA_TXPOWER FIELD16(0x0080)
+
+/*
+ * EEPROM TXPOWER 802.11BG
+ */
+#define EEPROM_TXPOWER_BG1 0x0029
+#define EEPROM_TXPOWER_BG2 0x0030
+#define EEPROM_TXPOWER_BG_SIZE 7
+#define EEPROM_TXPOWER_BG_1 FIELD16(0x00ff)
+#define EEPROM_TXPOWER_BG_2 FIELD16(0xff00)
+
+/*
+ * EEPROM TXPOWER 802.11A
+ */
+#define EEPROM_TXPOWER_A1 0x003c
+#define EEPROM_TXPOWER_A2 0x0053
+#define EEPROM_TXPOWER_A_SIZE 6
+#define EEPROM_TXPOWER_A_1 FIELD16(0x00ff)
+#define EEPROM_TXPOWER_A_2 FIELD16(0xff00)
+
+/*
+ * EEPROM TXpower byrate: 20MHZ power
+ */
+#define EEPROM_TXPOWER_BYRATE 0x006f
+
+/*
+ * EEPROM BBP.
+ */
+#define EEPROM_BBP_START 0x0078
+#define EEPROM_BBP_SIZE 16
+#define EEPROM_BBP_VALUE FIELD16(0x00ff)
+#define EEPROM_BBP_REG_ID FIELD16(0xff00)
+
+/*
+ * MCU mailbox commands.
+ */
+#define MCU_SLEEP 0x30
+#define MCU_WAKEUP 0x31
+#define MCU_RADIO_OFF 0x35
+#define MCU_CURRENT 0x36
+#define MCU_LED 0x50
+#define MCU_LED_STRENGTH 0x51
+#define MCU_LED_1 0x52
+#define MCU_LED_2 0x53
+#define MCU_LED_3 0x54
+#define MCU_RADAR 0x60
+#define MCU_BOOT_SIGNAL 0x72
+#define MCU_BBP_SIGNAL 0x80
+#define MCU_POWER_SAVE 0x83
+
+/*
+ * MCU mailbox tokens
+ */
+#define TOKEN_WAKUP 3
+
+/*
+ * DMA descriptor defines.
+ */
+#define TXD_DESC_SIZE ( 4 * sizeof(__le32) )
+#define TXINFO_DESC_SIZE ( 1 * sizeof(__le32) )
+#define TXWI_DESC_SIZE ( 4 * sizeof(__le32) )
+#define RXD_DESC_SIZE ( 1 * sizeof(__le32) )
+#define RXWI_DESC_SIZE ( 4 * sizeof(__le32) )
+
+/*
+ * TX descriptor format for TX, PRIO and Beacon Ring.
+ */
+
+/*
+ * Word0
+ */
+#define TXD_W0_SD_PTR0 FIELD32(0xffffffff)
+
+/*
+ * Word1
+ */
+#define TXD_W1_SD_LEN1 FIELD32(0x00003fff)
+#define TXD_W1_LAST_SEC1 FIELD32(0x00004000)
+#define TXD_W1_BURST FIELD32(0x00008000)
+#define TXD_W1_SD_LEN0 FIELD32(0x3fff0000)
+#define TXD_W1_LAST_SEC0 FIELD32(0x40000000)
+#define TXD_W1_DMA_DONE FIELD32(0x80000000)
+
+/*
+ * Word2
+ */
+#define TXD_W2_SD_PTR1 FIELD32(0xffffffff)
+
+/*
+ * Word3
+ * WIV: Wireless Info Valid. 1: Driver filled WI, 0: DMA needs to copy WI
+ * QSEL: Select on-chip FIFO ID for 2nd-stage output scheduler.
+ * 0:MGMT, 1:HCCA 2:EDCA
+ */
+#define TXD_W3_WIV FIELD32(0x01000000)
+#define TXD_W3_QSEL FIELD32(0x06000000)
+#define TXD_W3_TCO FIELD32(0x20000000)
+#define TXD_W3_UCO FIELD32(0x40000000)
+#define TXD_W3_ICO FIELD32(0x80000000)
+
+/*
+ * TX Info structure
+ */
+
+/*
+ * Word0
+ * WIV: Wireless Info Valid. 1: Driver filled WI, 0: DMA needs to copy WI
+ * QSEL: Select on-chip FIFO ID for 2nd-stage output scheduler.
+ * 0:MGMT, 1:HCCA 2:EDCA
+ * USB_DMA_NEXT_VALID: Used ONLY in USB bulk Aggregation, NextValid
+ * DMA_TX_BURST: used ONLY in USB bulk Aggregation.
+ * Force USB DMA transmit frame from current selected endpoint
+ */
+#define TXINFO_W0_USB_DMA_TX_PKT_LEN FIELD32(0x0000ffff)
+#define TXINFO_W0_WIV FIELD32(0x01000000)
+#define TXINFO_W0_QSEL FIELD32(0x06000000)
+#define TXINFO_W0_SW_USE_LAST_ROUND FIELD32(0x08000000)
+#define TXINFO_W0_USB_DMA_NEXT_VALID FIELD32(0x40000000)
+#define TXINFO_W0_USB_DMA_TX_BURST FIELD32(0x80000000)
+
+/*
+ * TX WI structure
+ */
+
+/*
+ * Word0
+ * FRAG: 1 To inform TKIP engine this is a fragment.
+ * MIMO_PS: The remote peer is in dynamic MIMO-PS mode
+ * TX_OP: 0:HT TXOP rule , 1:PIFS TX ,2:Backoff, 3:sifs
+ * BW: Channel bandwidth 20MHz or 40 MHz
+ * STBC: 1: STBC support MCS =0-7, 2,3 : RESERVED
+ */
+#define TXWI_W0_FRAG FIELD32(0x00000001)
+#define TXWI_W0_MIMO_PS FIELD32(0x00000002)
+#define TXWI_W0_CF_ACK FIELD32(0x00000004)
+#define TXWI_W0_TS FIELD32(0x00000008)
+#define TXWI_W0_AMPDU FIELD32(0x00000010)
+#define TXWI_W0_MPDU_DENSITY FIELD32(0x000000e0)
+#define TXWI_W0_TX_OP FIELD32(0x00000300)
+#define TXWI_W0_MCS FIELD32(0x007f0000)
+#define TXWI_W0_BW FIELD32(0x00800000)
+#define TXWI_W0_SHORT_GI FIELD32(0x01000000)
+#define TXWI_W0_STBC FIELD32(0x06000000)
+#define TXWI_W0_IFS FIELD32(0x08000000)
+#define TXWI_W0_PHYMODE FIELD32(0xc0000000)
+
+/*
+ * Word1
+ */
+#define TXWI_W1_ACK FIELD32(0x00000001)
+#define TXWI_W1_NSEQ FIELD32(0x00000002)
+#define TXWI_W1_BW_WIN_SIZE FIELD32(0x000000fc)
+#define TXWI_W1_WIRELESS_CLI_ID FIELD32(0x0000ff00)
+#define TXWI_W1_MPDU_TOTAL_BYTE_COUNT FIELD32(0x0fff0000)
+#define TXWI_W1_PACKETID FIELD32(0xf0000000)
+
+/*
+ * Word2
+ */
+#define TXWI_W2_IV FIELD32(0xffffffff)
+
+/*
+ * Word3
+ */
+#define TXWI_W3_EIV FIELD32(0xffffffff)
+
+/*
+ * RX descriptor format for RX Ring.
+ */
+
+/*
+ * Word0
+ * UNICAST_TO_ME: This RX frame is unicast to me.
+ * MULTICAST: This is a multicast frame.
+ * BROADCAST: This is a broadcast frame.
+ * MY_BSS: this frame belongs to the same BSSID.
+ * CRC_ERROR: CRC error.
+ * CIPHER_ERROR: 0: decryption okay, 1:ICV error, 2:MIC error, 3:KEY not valid.
+ * AMSDU: rx with 802.3 header, not 802.11 header.
+ */
+
+#define RXD_W0_BA FIELD32(0x00000001)
+#define RXD_W0_DATA FIELD32(0x00000002)
+#define RXD_W0_NULLDATA FIELD32(0x00000004)
+#define RXD_W0_FRAG FIELD32(0x00000008)
+#define RXD_W0_UNICAST_TO_ME FIELD32(0x00000010)
+#define RXD_W0_MULTICAST FIELD32(0x00000020)
+#define RXD_W0_BROADCAST FIELD32(0x00000040)
+#define RXD_W0_MY_BSS FIELD32(0x00000080)
+#define RXD_W0_CRC_ERROR FIELD32(0x00000100)
+#define RXD_W0_CIPHER_ERROR FIELD32(0x00000600)
+#define RXD_W0_AMSDU FIELD32(0x00000800)
+#define RXD_W0_HTC FIELD32(0x00001000)
+#define RXD_W0_RSSI FIELD32(0x00002000)
+#define RXD_W0_L2PAD FIELD32(0x00004000)
+#define RXD_W0_AMPDU FIELD32(0x00008000)
+#define RXD_W0_DECRYPTED FIELD32(0x00010000)
+#define RXD_W0_PLCP_RSSI FIELD32(0x00020000)
+#define RXD_W0_CIPHER_ALG FIELD32(0x00040000)
+#define RXD_W0_LAST_AMSDU FIELD32(0x00080000)
+#define RXD_W0_PLCP_SIGNAL FIELD32(0xfff00000)
+
+/*
+ * RX WI structure
+ */
+
+/*
+ * Word0
+ */
+#define RXWI_W0_WIRELESS_CLI_ID FIELD32(0x000000ff)
+#define RXWI_W0_KEY_INDEX FIELD32(0x00000300)
+#define RXWI_W0_BSSID FIELD32(0x00001c00)
+#define RXWI_W0_UDF FIELD32(0x0000e000)
+#define RXWI_W0_MPDU_TOTAL_BYTE_COUNT FIELD32(0x0fff0000)
+#define RXWI_W0_TID FIELD32(0xf0000000)
+
+/*
+ * Word1
+ */
+#define RXWI_W1_FRAG FIELD32(0x0000000f)
+#define RXWI_W1_SEQUENCE FIELD32(0x0000fff0)
+#define RXWI_W1_MCS FIELD32(0x007f0000)
+#define RXWI_W1_BW FIELD32(0x00800000)
+#define RXWI_W1_SHORT_GI FIELD32(0x01000000)
+#define RXWI_W1_STBC FIELD32(0x06000000)
+#define RXWI_W1_PHYMODE FIELD32(0xc0000000)
+
+/*
+ * Word2
+ */
+#define RXWI_W2_RSSI0 FIELD32(0x000000ff)
+#define RXWI_W2_RSSI1 FIELD32(0x0000ff00)
+#define RXWI_W2_RSSI2 FIELD32(0x00ff0000)
+
+/*
+ * Word3
+ */
+#define RXWI_W3_SNR0 FIELD32(0x000000ff)
+#define RXWI_W3_SNR1 FIELD32(0x0000ff00)
+
+/*
+ * Macro's for converting txpower from EEPROM to mac80211 value
+ * and from mac80211 value to register value.
+ */
+#define MIN_G_TXPOWER 0
+#define MIN_A_TXPOWER -7
+#define MAX_G_TXPOWER 31
+#define MAX_A_TXPOWER 15
+#define DEFAULT_TXPOWER 5
+
+#define TXPOWER_G_FROM_DEV(__txpower) \
+ ((__txpower) > MAX_G_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
+
+#define TXPOWER_G_TO_DEV(__txpower) \
+ clamp_t(char, __txpower, MIN_G_TXPOWER, MAX_G_TXPOWER)
+
+#define TXPOWER_A_FROM_DEV(__txpower) \
+ ((__txpower) > MAX_A_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
+
+#define TXPOWER_A_TO_DEV(__txpower) \
+ clamp_t(char, __txpower, MIN_A_TXPOWER, MAX_A_TXPOWER)
+
+#endif /* RT2800USB_H */
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 84bd6f19acb..a498dde024e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -103,6 +103,15 @@
#define GET_DURATION_RES(__size, __rate)(((__size) * 8 * 10) % (__rate))
/*
+ * Determine the alignment requirement,
+ * to make sure the 802.11 payload is padded to a 4-byte boundrary
+ * we must determine the address of the payload and calculate the
+ * amount of bytes needed to move the data.
+ */
+#define ALIGN_SIZE(__skb, __header) \
+ ( ((unsigned long)((__skb)->data + (__header))) & 3 )
+
+/*
* Standard timing and size defines.
* These values should follow the ieee80211 specifications.
*/
@@ -138,6 +147,7 @@ struct rt2x00_chip {
#define RT2561 0x0302
#define RT2661 0x0401
#define RT2571 0x1300
+#define RT2870 0x1600
u16 rf;
u32 rev;
@@ -357,6 +367,7 @@ static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
* for @tx_power_a, @tx_power_bg and @channels.
* @channels: Device/chipset specific channel values (See &struct rf_channel).
* @channels_info: Additional information for channels (See &struct channel_info).
+ * @ht: Driver HT Capabilities (See &ieee80211_sta_ht_cap).
*/
struct hw_mode_spec {
unsigned int supported_bands;
@@ -370,6 +381,8 @@ struct hw_mode_spec {
unsigned int num_channels;
const struct rf_channel *channels;
const struct channel_info *channels_info;
+
+ struct ieee80211_sta_ht_cap ht;
};
/*
@@ -404,6 +417,8 @@ struct rt2x00lib_erp {
short pifs;
short difs;
short eifs;
+
+ u16 beacon_int;
};
/*
@@ -590,6 +605,7 @@ enum rt2x00_flags {
DRIVER_REQUIRE_SCHEDULED,
DRIVER_REQUIRE_DMA,
DRIVER_REQUIRE_COPY_IV,
+ DRIVER_REQUIRE_L2PAD,
/*
* Driver features
@@ -606,6 +622,7 @@ enum rt2x00_flags {
CONFIG_EXTERNAL_LNA_BG,
CONFIG_DOUBLE_ANTENNA,
CONFIG_DISABLE_LINK_TUNING,
+ CONFIG_CHANNEL_HT40,
};
/*
@@ -672,6 +689,12 @@ struct rt2x00_dev {
unsigned long flags;
/*
+ * Device information, Bus IRQ and name (PCI, SoC)
+ */
+ int irq;
+ const char *name;
+
+ /*
* Chipset identification.
*/
struct rt2x00_chip chip;
@@ -772,6 +795,18 @@ struct rt2x00_dev {
u8 freq_offset;
/*
+ * Calibration information (for rt2800usb & rt2800pci).
+ * [0] -> BW20
+ * [1] -> BW40
+ */
+ u8 calibration[2];
+
+ /*
+ * Beacon interval.
+ */
+ u16 beacon_int;
+
+ /*
* Low level statistics which will have
* to be kept up to date while device is running.
*/
@@ -860,6 +895,18 @@ static inline void rt2x00_set_chip(struct rt2x00_dev *rt2x00dev,
rt2x00dev->chip.rev = rev;
}
+static inline void rt2x00_set_chip_rt(struct rt2x00_dev *rt2x00dev,
+ const u16 rt)
+{
+ rt2x00dev->chip.rt = rt;
+}
+
+static inline void rt2x00_set_chip_rf(struct rt2x00_dev *rt2x00dev,
+ const u16 rf, const u32 rev)
+{
+ rt2x00_set_chip(rt2x00dev, rt2x00dev->chip.rt, rf, rev);
+}
+
static inline char rt2x00_rt(const struct rt2x00_chip *chipset, const u16 chip)
{
return (chipset->rt == chip);
@@ -875,11 +922,10 @@ static inline u32 rt2x00_rev(const struct rt2x00_chip *chipset)
return chipset->rev;
}
-static inline u16 rt2x00_check_rev(const struct rt2x00_chip *chipset,
- const u32 rev)
+static inline bool rt2x00_check_rev(const struct rt2x00_chip *chipset,
+ const u32 mask, const u32 rev)
{
- return (((chipset->rev & 0xffff0) == rev) &&
- !!(chipset->rev & 0x0000f));
+ return ((chipset->rev & mask) == rev);
}
/**
@@ -925,9 +971,6 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_if_init_conf *conf);
int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed);
-int rt2x00mac_config_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_if_conf *conf);
void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *total_flags,
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index 9c2f5517af2..3e019a12df2 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -106,6 +106,10 @@ void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev,
}
erp.basic_rates = bss_conf->basic_rates;
+ erp.beacon_int = bss_conf->beacon_int;
+
+ /* Update global beacon interval time, this is needed for PS support */
+ rt2x00dev->beacon_int = bss_conf->beacon_int;
rt2x00dev->ops->lib->config_erp(rt2x00dev, &erp);
}
@@ -173,6 +177,11 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
libconf.conf = conf;
if (ieee80211_flags & IEEE80211_CONF_CHANGE_CHANNEL) {
+ if (conf_is_ht40(conf))
+ __set_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags);
+ else
+ __clear_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags);
+
memcpy(&libconf.rf,
&rt2x00dev->spec.channels[conf->channel->hw_value],
sizeof(libconf.rf));
diff --git a/drivers/net/wireless/rt2x00/rt2x00crypto.c b/drivers/net/wireless/rt2x00/rt2x00crypto.c
index 0b41845d954..bc4e81e2184 100644
--- a/drivers/net/wireless/rt2x00/rt2x00crypto.c
+++ b/drivers/net/wireless/rt2x00/rt2x00crypto.c
@@ -33,7 +33,7 @@ enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *key)
{
switch (key->alg) {
case ALG_WEP:
- if (key->keylen == LEN_WEP40)
+ if (key->keylen == WLAN_KEY_LEN_WEP40)
return CIPHER_WEP64;
else
return CIPHER_WEP128;
@@ -65,7 +65,8 @@ void rt2x00crypto_create_tx_descriptor(struct queue_entry *entry,
__set_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags);
txdesc->key_idx = hw_key->hw_key_idx;
- txdesc->iv_offset = ieee80211_get_hdrlen_from_skb(entry->skb);
+ txdesc->iv_offset = txdesc->header_length;
+ txdesc->iv_len = hw_key->iv_len;
if (!(hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV))
__set_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags);
@@ -103,47 +104,44 @@ unsigned int rt2x00crypto_tx_overhead(struct rt2x00_dev *rt2x00dev,
return overhead;
}
-void rt2x00crypto_tx_copy_iv(struct sk_buff *skb, unsigned int iv_len)
+void rt2x00crypto_tx_copy_iv(struct sk_buff *skb, struct txentry_desc *txdesc)
{
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
- unsigned int header_length = ieee80211_get_hdrlen_from_skb(skb);
- if (unlikely(!iv_len))
+ if (unlikely(!txdesc->iv_len))
return;
/* Copy IV/EIV data */
- memcpy(skbdesc->iv, skb->data + header_length, iv_len);
+ memcpy(skbdesc->iv, skb->data + txdesc->iv_offset, txdesc->iv_len);
}
-void rt2x00crypto_tx_remove_iv(struct sk_buff *skb, unsigned int iv_len)
+void rt2x00crypto_tx_remove_iv(struct sk_buff *skb, struct txentry_desc *txdesc)
{
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
- unsigned int header_length = ieee80211_get_hdrlen_from_skb(skb);
- if (unlikely(!iv_len))
+ if (unlikely(!txdesc->iv_len))
return;
/* Copy IV/EIV data */
- memcpy(skbdesc->iv, skb->data + header_length, iv_len);
+ memcpy(skbdesc->iv, skb->data + txdesc->iv_offset, txdesc->iv_len);
/* Move ieee80211 header */
- memmove(skb->data + iv_len, skb->data, header_length);
+ memmove(skb->data + txdesc->iv_len, skb->data, txdesc->iv_offset);
/* Pull buffer to correct size */
- skb_pull(skb, iv_len);
+ skb_pull(skb, txdesc->iv_len);
/* IV/EIV data has officially be stripped */
- skbdesc->flags |= FRAME_DESC_IV_STRIPPED;
+ skbdesc->flags |= SKBDESC_IV_STRIPPED;
}
-void rt2x00crypto_tx_insert_iv(struct sk_buff *skb)
+void rt2x00crypto_tx_insert_iv(struct sk_buff *skb, unsigned int header_length)
{
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
- unsigned int header_length = ieee80211_get_hdrlen_from_skb(skb);
const unsigned int iv_len =
((!!(skbdesc->iv[0])) * 4) + ((!!(skbdesc->iv[1])) * 4);
- if (!(skbdesc->flags & FRAME_DESC_IV_STRIPPED))
+ if (!(skbdesc->flags & SKBDESC_IV_STRIPPED))
return;
skb_push(skb, iv_len);
@@ -155,14 +153,15 @@ void rt2x00crypto_tx_insert_iv(struct sk_buff *skb)
memcpy(skb->data + header_length, skbdesc->iv, iv_len);
/* IV/EIV data has returned into the frame */
- skbdesc->flags &= ~FRAME_DESC_IV_STRIPPED;
+ skbdesc->flags &= ~SKBDESC_IV_STRIPPED;
}
-void rt2x00crypto_rx_insert_iv(struct sk_buff *skb, unsigned int align,
+void rt2x00crypto_rx_insert_iv(struct sk_buff *skb, bool l2pad,
unsigned int header_length,
struct rxdone_entry_desc *rxdesc)
{
unsigned int payload_len = rxdesc->size - header_length;
+ unsigned int align = ALIGN_SIZE(skb, header_length);
unsigned int iv_len;
unsigned int icv_len;
unsigned int transfer = 0;
@@ -192,32 +191,48 @@ void rt2x00crypto_rx_insert_iv(struct sk_buff *skb, unsigned int align,
}
/*
- * Make room for new data, note that we increase both
- * headsize and tailsize when required. The tailsize is
- * only needed when ICV data needs to be inserted and
- * the padding is smaller than the ICV data.
- * When alignment requirements is greater than the
- * ICV data we must trim the skb to the correct size
- * because we need to remove the extra bytes.
+ * Make room for new data. There are 2 possibilities
+ * either the alignment is already present between
+ * the 802.11 header and payload. In that case we
+ * we have to move the header less then the iv_len
+ * since we can use the already available l2pad bytes
+ * for the iv data.
+ * When the alignment must be added manually we must
+ * move the header more then iv_len since we must
+ * make room for the payload move as well.
*/
- skb_push(skb, iv_len + align);
- if (align < icv_len)
- skb_put(skb, icv_len - align);
- else if (align > icv_len)
- skb_trim(skb, rxdesc->size + iv_len + icv_len);
+ if (l2pad) {
+ skb_push(skb, iv_len - align);
+ skb_put(skb, icv_len);
- /* Move ieee80211 header */
- memmove(skb->data + transfer,
- skb->data + transfer + iv_len + align,
- header_length);
- transfer += header_length;
+ /* Move ieee80211 header */
+ memmove(skb->data + transfer,
+ skb->data + transfer + (iv_len - align),
+ header_length);
+ transfer += header_length;
+ } else {
+ skb_push(skb, iv_len + align);
+ if (align < icv_len)
+ skb_put(skb, icv_len - align);
+ else if (align > icv_len)
+ skb_trim(skb, rxdesc->size + iv_len + icv_len);
+
+ /* Move ieee80211 header */
+ memmove(skb->data + transfer,
+ skb->data + transfer + iv_len + align,
+ header_length);
+ transfer += header_length;
+ }
/* Copy IV/EIV data */
memcpy(skb->data + transfer, rxdesc->iv, iv_len);
transfer += iv_len;
- /* Move payload */
- if (align) {
+ /*
+ * Move payload for alignment purposes. Note that
+ * this is only needed when no l2 padding is present.
+ */
+ if (!l2pad) {
memmove(skb->data + transfer,
skb->data + transfer + align,
payload_len);
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index 07d378ef0b4..7b3ee8c2eae 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -138,7 +138,7 @@ void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev,
if (cipher == CIPHER_TKIP_NO_MIC)
cipher = CIPHER_TKIP;
- if (cipher == CIPHER_NONE || cipher > CIPHER_MAX)
+ if (cipher == CIPHER_NONE || cipher >= CIPHER_MAX)
return;
/* Remove CIPHER_NONE index */
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 5752aaae906..57813e72c80 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -227,6 +227,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
enum data_queue_qid qid = skb_get_queue_mapping(entry->skb);
+ unsigned int header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
u8 rate_idx, rate_flags;
/*
@@ -235,13 +236,19 @@ void rt2x00lib_txdone(struct queue_entry *entry,
rt2x00queue_unmap_skb(rt2x00dev, entry->skb);
/*
+ * Remove L2 padding which was added during
+ */
+ if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags))
+ rt2x00queue_payload_align(entry->skb, true, header_length);
+
+ /*
* If the IV/EIV data was stripped from the frame before it was
* passed to the hardware, we should now reinsert it again because
* mac80211 will expect the the same data to be present it the
* frame as it was passed to us.
*/
if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags))
- rt2x00crypto_tx_insert_iv(entry->skb);
+ rt2x00crypto_tx_insert_iv(entry->skb, header_length);
/*
* Send frame to debugfs immediately, after this call is completed
@@ -253,7 +260,8 @@ void rt2x00lib_txdone(struct queue_entry *entry,
* Update TX statistics.
*/
rt2x00dev->link.qual.tx_success +=
- test_bit(TXDONE_SUCCESS, &txdesc->flags);
+ test_bit(TXDONE_SUCCESS, &txdesc->flags) ||
+ test_bit(TXDONE_UNKNOWN, &txdesc->flags);
rt2x00dev->link.qual.tx_failed +=
test_bit(TXDONE_FAILURE, &txdesc->flags);
@@ -271,14 +279,16 @@ void rt2x00lib_txdone(struct queue_entry *entry,
tx_info->status.rates[1].idx = -1; /* terminate */
if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) {
- if (test_bit(TXDONE_SUCCESS, &txdesc->flags))
+ if (test_bit(TXDONE_SUCCESS, &txdesc->flags) ||
+ test_bit(TXDONE_UNKNOWN, &txdesc->flags))
tx_info->flags |= IEEE80211_TX_STAT_ACK;
else if (test_bit(TXDONE_FAILURE, &txdesc->flags))
rt2x00dev->low_level_stats.dot11ACKFailureCount++;
}
if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
- if (test_bit(TXDONE_SUCCESS, &txdesc->flags))
+ if (test_bit(TXDONE_SUCCESS, &txdesc->flags) ||
+ test_bit(TXDONE_UNKNOWN, &txdesc->flags))
rt2x00dev->low_level_stats.dot11RTSSuccessCount++;
else if (test_bit(TXDONE_FAILURE, &txdesc->flags))
rt2x00dev->low_level_stats.dot11RTSFailureCount++;
@@ -316,19 +326,54 @@ void rt2x00lib_txdone(struct queue_entry *entry,
}
EXPORT_SYMBOL_GPL(rt2x00lib_txdone);
+static int rt2x00lib_rxdone_read_signal(struct rt2x00_dev *rt2x00dev,
+ struct rxdone_entry_desc *rxdesc)
+{
+ struct ieee80211_supported_band *sband;
+ const struct rt2x00_rate *rate;
+ unsigned int i;
+ int signal;
+ int type;
+
+ /*
+ * For non-HT rates the MCS value needs to contain the
+ * actually used rate modulation (CCK or OFDM).
+ */
+ if (rxdesc->dev_flags & RXDONE_SIGNAL_MCS)
+ signal = RATE_MCS(rxdesc->rate_mode, rxdesc->signal);
+ else
+ signal = rxdesc->signal;
+
+ type = (rxdesc->dev_flags & RXDONE_SIGNAL_MASK);
+
+ sband = &rt2x00dev->bands[rt2x00dev->curr_band];
+ for (i = 0; i < sband->n_bitrates; i++) {
+ rate = rt2x00_get_rate(sband->bitrates[i].hw_value);
+
+ if (((type == RXDONE_SIGNAL_PLCP) &&
+ (rate->plcp == signal)) ||
+ ((type == RXDONE_SIGNAL_BITRATE) &&
+ (rate->bitrate == signal)) ||
+ ((type == RXDONE_SIGNAL_MCS) &&
+ (rate->mcs == signal))) {
+ return i;
+ }
+ }
+
+ WARNING(rt2x00dev, "Frame received with unrecognized signal, "
+ "signal=0x%.4x, type=%d.\n", signal, type);
+ return 0;
+}
+
void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
struct queue_entry *entry)
{
struct rxdone_entry_desc rxdesc;
struct sk_buff *skb;
struct ieee80211_rx_status *rx_status = &rt2x00dev->rx_status;
- struct ieee80211_supported_band *sband;
- const struct rt2x00_rate *rate;
unsigned int header_length;
- unsigned int align;
- unsigned int i;
- int idx = -1;
-
+ bool l2pad;
+ int rate_idx;
/*
* Allocate a new sk_buffer. If no new buffer available, drop the
* received frame and reuse the existing buffer.
@@ -348,12 +393,15 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
memset(&rxdesc, 0, sizeof(rxdesc));
rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
+ /* Trim buffer to correct size */
+ skb_trim(entry->skb, rxdesc.size);
+
/*
* The data behind the ieee80211 header must be
* aligned on a 4 byte boundary.
*/
header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
- align = ((unsigned long)(entry->skb->data + header_length)) & 3;
+ l2pad = !!(rxdesc.dev_flags & RXDONE_L2PAD);
/*
* Hardware might have stripped the IV/EIV/ICV data,
@@ -362,40 +410,24 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
* in which case we should reinsert the data into the frame.
*/
if ((rxdesc.dev_flags & RXDONE_CRYPTO_IV) &&
- (rxdesc.flags & RX_FLAG_IV_STRIPPED)) {
- rt2x00crypto_rx_insert_iv(entry->skb, align,
- header_length, &rxdesc);
- } else if (align) {
- skb_push(entry->skb, align);
- /* Move entire frame in 1 command */
- memmove(entry->skb->data, entry->skb->data + align,
- rxdesc.size);
- }
-
- /* Update data pointers, trim buffer to correct size */
- skb_trim(entry->skb, rxdesc.size);
+ (rxdesc.flags & RX_FLAG_IV_STRIPPED))
+ rt2x00crypto_rx_insert_iv(entry->skb, l2pad, header_length,
+ &rxdesc);
+ else
+ rt2x00queue_payload_align(entry->skb, l2pad, header_length);
/*
- * Update RX statistics.
+ * Check if the frame was received using HT. In that case,
+ * the rate is the MCS index and should be passed to mac80211
+ * directly. Otherwise we need to translate the signal to
+ * the correct bitrate index.
*/
- sband = &rt2x00dev->bands[rt2x00dev->curr_band];
- for (i = 0; i < sband->n_bitrates; i++) {
- rate = rt2x00_get_rate(sband->bitrates[i].hw_value);
-
- if (((rxdesc.dev_flags & RXDONE_SIGNAL_PLCP) &&
- (rate->plcp == rxdesc.signal)) ||
- ((rxdesc.dev_flags & RXDONE_SIGNAL_BITRATE) &&
- (rate->bitrate == rxdesc.signal))) {
- idx = i;
- break;
- }
- }
-
- if (idx < 0) {
- WARNING(rt2x00dev, "Frame received with unrecognized signal,"
- "signal=0x%.2x, type=%d.\n", rxdesc.signal,
- (rxdesc.dev_flags & RXDONE_SIGNAL_MASK));
- idx = 0;
+ if (rxdesc.rate_mode == RATE_MODE_CCK ||
+ rxdesc.rate_mode == RATE_MODE_OFDM) {
+ rate_idx = rt2x00lib_rxdone_read_signal(rt2x00dev, &rxdesc);
+ } else {
+ rxdesc.flags |= RX_FLAG_HT;
+ rate_idx = rxdesc.signal;
}
/*
@@ -405,7 +437,7 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
rt2x00debug_update_crypto(rt2x00dev, &rxdesc);
rx_status->mactime = rxdesc.timestamp;
- rx_status->rate_idx = idx;
+ rx_status->rate_idx = rate_idx;
rx_status->qual = rt2x00link_calculate_signal(rt2x00dev, rxdesc.rssi);
rx_status->signal = rxdesc.rssi;
rx_status->noise = rxdesc.noise;
@@ -440,72 +472,84 @@ const struct rt2x00_rate rt2x00_supported_rates[12] = {
.bitrate = 10,
.ratemask = BIT(0),
.plcp = 0x00,
+ .mcs = RATE_MCS(RATE_MODE_CCK, 0),
},
{
.flags = DEV_RATE_CCK | DEV_RATE_SHORT_PREAMBLE,
.bitrate = 20,
.ratemask = BIT(1),
.plcp = 0x01,
+ .mcs = RATE_MCS(RATE_MODE_CCK, 1),
},
{
.flags = DEV_RATE_CCK | DEV_RATE_SHORT_PREAMBLE,
.bitrate = 55,
.ratemask = BIT(2),
.plcp = 0x02,
+ .mcs = RATE_MCS(RATE_MODE_CCK, 2),
},
{
.flags = DEV_RATE_CCK | DEV_RATE_SHORT_PREAMBLE,
.bitrate = 110,
.ratemask = BIT(3),
.plcp = 0x03,
+ .mcs = RATE_MCS(RATE_MODE_CCK, 3),
},
{
.flags = DEV_RATE_OFDM,
.bitrate = 60,
.ratemask = BIT(4),
.plcp = 0x0b,
+ .mcs = RATE_MCS(RATE_MODE_OFDM, 0),
},
{
.flags = DEV_RATE_OFDM,
.bitrate = 90,
.ratemask = BIT(5),
.plcp = 0x0f,
+ .mcs = RATE_MCS(RATE_MODE_OFDM, 1),
},
{
.flags = DEV_RATE_OFDM,
.bitrate = 120,
.ratemask = BIT(6),
.plcp = 0x0a,
+ .mcs = RATE_MCS(RATE_MODE_OFDM, 2),
},
{
.flags = DEV_RATE_OFDM,
.bitrate = 180,
.ratemask = BIT(7),
.plcp = 0x0e,
+ .mcs = RATE_MCS(RATE_MODE_OFDM, 3),
},
{
.flags = DEV_RATE_OFDM,
.bitrate = 240,
.ratemask = BIT(8),
.plcp = 0x09,
+ .mcs = RATE_MCS(RATE_MODE_OFDM, 4),
},
{
.flags = DEV_RATE_OFDM,
.bitrate = 360,
.ratemask = BIT(9),
.plcp = 0x0d,
+ .mcs = RATE_MCS(RATE_MODE_OFDM, 5),
},
{
.flags = DEV_RATE_OFDM,
.bitrate = 480,
.ratemask = BIT(10),
.plcp = 0x08,
+ .mcs = RATE_MCS(RATE_MODE_OFDM, 6),
},
{
.flags = DEV_RATE_OFDM,
.bitrate = 540,
.ratemask = BIT(11),
.plcp = 0x0c,
+ .mcs = RATE_MCS(RATE_MODE_OFDM, 7),
},
};
@@ -581,6 +625,8 @@ static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
rt2x00dev->bands[IEEE80211_BAND_2GHZ].bitrates = rates;
hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
&rt2x00dev->bands[IEEE80211_BAND_2GHZ];
+ memcpy(&rt2x00dev->bands[IEEE80211_BAND_2GHZ].ht_cap,
+ &spec->ht, sizeof(spec->ht));
}
/*
@@ -597,6 +643,8 @@ static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
rt2x00dev->bands[IEEE80211_BAND_5GHZ].bitrates = &rates[4];
hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
&rt2x00dev->bands[IEEE80211_BAND_5GHZ];
+ memcpy(&rt2x00dev->bands[IEEE80211_BAND_5GHZ].ht_cap,
+ &spec->ht, sizeof(spec->ht));
}
return 0;
diff --git a/drivers/net/wireless/rt2x00/rt2x00ht.c b/drivers/net/wireless/rt2x00/rt2x00ht.c
new file mode 100644
index 00000000000..e3cec839e54
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2x00ht.c
@@ -0,0 +1,69 @@
+/*
+ Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+ <http://rt2x00.serialmonkey.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the
+ Free Software Foundation, Inc.,
+ 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+ Module: rt2x00lib
+ Abstract: rt2x00 HT specific routines.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "rt2x00.h"
+#include "rt2x00lib.h"
+
+void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
+ struct txentry_desc *txdesc,
+ const struct rt2x00_rate *hwrate)
+{
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
+ struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
+
+ if (tx_info->control.sta)
+ txdesc->mpdu_density =
+ tx_info->control.sta->ht_cap.ampdu_density;
+ else
+ txdesc->mpdu_density = 0;
+
+ txdesc->ba_size = 7; /* FIXME: What value is needed? */
+ txdesc->stbc = 0; /* FIXME: What value is needed? */
+
+ txdesc->mcs = rt2x00_get_rate_mcs(hwrate->mcs);
+ if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ txdesc->mcs |= 0x08;
+
+ /*
+ * Convert flags
+ */
+ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
+ __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
+
+ /*
+ * Determine HT Mix/Greenfield rate mode
+ */
+ if (txrate->flags & IEEE80211_TX_RC_MCS)
+ txdesc->rate_mode = RATE_MODE_HT_MIX;
+ if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+ txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
+ if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags);
+ if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
+ __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags);
+}
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index a631613177d..0bf2715fa93 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -32,8 +32,8 @@
* Interval defines
* Both the link tuner as the rfkill will be called once per second.
*/
-#define LINK_TUNE_INTERVAL ( round_jiffies_relative(HZ) )
-#define RFKILL_POLL_INTERVAL ( 1000 )
+#define LINK_TUNE_INTERVAL round_jiffies_relative(HZ)
+#define RFKILL_POLL_INTERVAL 1000
/*
* rt2x00_rate: Per rate device information
@@ -48,6 +48,7 @@ struct rt2x00_rate {
unsigned short ratemask;
unsigned short plcp;
+ unsigned short mcs;
};
extern const struct rt2x00_rate rt2x00_supported_rates[12];
@@ -57,6 +58,14 @@ static inline const struct rt2x00_rate *rt2x00_get_rate(const u16 hw_value)
return &rt2x00_supported_rates[hw_value & 0xff];
}
+#define RATE_MCS(__mode, __mcs) \
+ ( (((__mode) & 0x00ff) << 8) | ((__mcs) & 0x00ff) )
+
+static inline int rt2x00_get_rate_mcs(const u16 mcs_value)
+{
+ return (mcs_value & 0x00ff);
+}
+
/*
* Radio control handlers.
*/
@@ -113,6 +122,23 @@ void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb);
void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb);
/**
+ * rt2x00queue_payload_align - Align 802.11 payload to 4-byte boundary
+ * @skb: The skb to align
+ * @l2pad: Should L2 padding be used
+ * @header_length: Length of 802.11 header
+ *
+ * This function prepares the @skb to be send to the device or mac80211.
+ * If @l2pad is set to true padding will occur between the 802.11 header
+ * and payload. Otherwise the padding will be done in front of the 802.11
+ * header.
+ * When @l2pad is set the function will check for the &SKBDESC_L2_PADDED
+ * flag in &skb_frame_desc. If that flag is set, the padding is removed
+ * and the flag cleared. Otherwise the padding is added and the flag is set.
+ */
+void rt2x00queue_payload_align(struct sk_buff *skb,
+ bool l2pad, unsigned int header_length);
+
+/**
* rt2x00queue_write_tx_frame - Write TX frame to hardware
* @queue: Queue over which the frame should be send
* @skb: The skb to send
@@ -235,7 +261,7 @@ void rt2x00link_reset_tuner(struct rt2x00_dev *rt2x00dev, bool antenna);
* @rt2x00dev: Pointer to &struct rt2x00_dev.
*
* Initialize work structure and all link tuning related
- * paramters. This will not start the link tuning process itself.
+ * parameters. This will not start the link tuning process itself.
*/
void rt2x00link_register(struct rt2x00_dev *rt2x00dev);
@@ -295,10 +321,12 @@ void rt2x00crypto_create_tx_descriptor(struct queue_entry *entry,
struct txentry_desc *txdesc);
unsigned int rt2x00crypto_tx_overhead(struct rt2x00_dev *rt2x00dev,
struct sk_buff *skb);
-void rt2x00crypto_tx_copy_iv(struct sk_buff *skb, unsigned int iv_len);
-void rt2x00crypto_tx_remove_iv(struct sk_buff *skb, unsigned int iv_len);
-void rt2x00crypto_tx_insert_iv(struct sk_buff *skb);
-void rt2x00crypto_rx_insert_iv(struct sk_buff *skb, unsigned int align,
+void rt2x00crypto_tx_copy_iv(struct sk_buff *skb,
+ struct txentry_desc *txdesc);
+void rt2x00crypto_tx_remove_iv(struct sk_buff *skb,
+ struct txentry_desc *txdesc);
+void rt2x00crypto_tx_insert_iv(struct sk_buff *skb, unsigned int header_length);
+void rt2x00crypto_rx_insert_iv(struct sk_buff *skb, bool l2pad,
unsigned int header_length,
struct rxdone_entry_desc *rxdesc);
#else
@@ -319,21 +347,21 @@ static inline unsigned int rt2x00crypto_tx_overhead(struct rt2x00_dev *rt2x00dev
}
static inline void rt2x00crypto_tx_copy_iv(struct sk_buff *skb,
- unsigned int iv_len)
+ struct txentry_desc *txdesc)
{
}
static inline void rt2x00crypto_tx_remove_iv(struct sk_buff *skb,
- unsigned int iv_len)
+ struct txentry_desc *txdesc)
{
}
-static inline void rt2x00crypto_tx_insert_iv(struct sk_buff *skb)
+static inline void rt2x00crypto_tx_insert_iv(struct sk_buff *skb,
+ unsigned int header_length)
{
}
-static inline void rt2x00crypto_rx_insert_iv(struct sk_buff *skb,
- unsigned int align,
+static inline void rt2x00crypto_rx_insert_iv(struct sk_buff *skb, bool l2pad,
unsigned int header_length,
struct rxdone_entry_desc *rxdesc)
{
@@ -341,6 +369,21 @@ static inline void rt2x00crypto_rx_insert_iv(struct sk_buff *skb,
#endif /* CONFIG_RT2X00_LIB_CRYPTO */
/*
+ * HT handlers.
+ */
+#ifdef CONFIG_RT2X00_LIB_HT
+void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
+ struct txentry_desc *txdesc,
+ const struct rt2x00_rate *hwrate);
+#else
+static inline void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
+ struct txentry_desc *txdesc,
+ const struct rt2x00_rate *hwrate)
+{
+}
+#endif /* CONFIG_RT2X00_LIB_HT */
+
+/*
* RFkill handlers.
*/
#ifdef CONFIG_RT2X00_LIB_RFKILL
diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c
index 7eb5cd7e5f3..eb9b981b913 100644
--- a/drivers/net/wireless/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/rt2x00/rt2x00link.c
@@ -387,7 +387,7 @@ void rt2x00link_reset_tuner(struct rt2x00_dev *rt2x00dev, bool antenna)
rt2x00link_antenna_reset(rt2x00dev);
}
-void rt2x00link_reset_qual(struct rt2x00_dev *rt2x00dev)
+static void rt2x00link_reset_qual(struct rt2x00_dev *rt2x00dev)
{
struct link_qual *qual = &rt2x00dev->link.qual;
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index c41a0b9e473..c4c06b4e1f0 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -390,56 +390,6 @@ int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed)
}
EXPORT_SYMBOL_GPL(rt2x00mac_config);
-int rt2x00mac_config_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_if_conf *conf)
-{
- struct rt2x00_dev *rt2x00dev = hw->priv;
- struct rt2x00_intf *intf = vif_to_intf(vif);
- int update_bssid = 0;
- int status = 0;
-
- /*
- * Mac80211 might be calling this function while we are trying
- * to remove the device or perhaps suspending it.
- */
- if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
- return 0;
-
- spin_lock(&intf->lock);
-
- /*
- * conf->bssid can be NULL if coming from the internal
- * beacon update routine.
- */
- if (conf->changed & IEEE80211_IFCC_BSSID && conf->bssid) {
- update_bssid = 1;
- memcpy(&intf->bssid, conf->bssid, ETH_ALEN);
- }
-
- spin_unlock(&intf->lock);
-
- /*
- * Call rt2x00_config_intf() outside of the spinlock context since
- * the call will sleep for USB drivers. By using the ieee80211_if_conf
- * values as arguments we make keep access to rt2x00_intf thread safe
- * even without the lock.
- */
- rt2x00lib_config_intf(rt2x00dev, intf, vif->type, NULL,
- update_bssid ? conf->bssid : NULL);
-
- /*
- * Update the beacon.
- */
- if (conf->changed & (IEEE80211_IFCC_BEACON |
- IEEE80211_IFCC_BEACON_ENABLED))
- status = rt2x00queue_update_beacon(rt2x00dev, vif,
- conf->enable_beacon);
-
- return status;
-}
-EXPORT_SYMBOL_GPL(rt2x00mac_config_interface);
-
void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *total_flags,
@@ -623,6 +573,44 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
struct rt2x00_dev *rt2x00dev = hw->priv;
struct rt2x00_intf *intf = vif_to_intf(vif);
unsigned int delayed = 0;
+ int update_bssid = 0;
+
+ /*
+ * Mac80211 might be calling this function while we are trying
+ * to remove the device or perhaps suspending it.
+ */
+ if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
+ return;
+
+ spin_lock(&intf->lock);
+
+ /*
+ * conf->bssid can be NULL if coming from the internal
+ * beacon update routine.
+ */
+ if (changes & BSS_CHANGED_BSSID) {
+ update_bssid = 1;
+ memcpy(&intf->bssid, bss_conf->bssid, ETH_ALEN);
+ }
+
+ spin_unlock(&intf->lock);
+
+ /*
+ * Call rt2x00_config_intf() outside of the spinlock context since
+ * the call will sleep for USB drivers. By using the ieee80211_if_conf
+ * values as arguments we make keep access to rt2x00_intf thread safe
+ * even without the lock.
+ */
+ if (changes & BSS_CHANGED_BSSID)
+ rt2x00lib_config_intf(rt2x00dev, intf, vif->type, NULL,
+ update_bssid ? bss_conf->bssid : NULL);
+
+ /*
+ * Update the beacon.
+ */
+ if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED))
+ rt2x00queue_update_beacon(rt2x00dev, vif,
+ bss_conf->enable_beacon);
/*
* When the association status has changed we must reset the link
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 9730b4f8fd2..cdd5154bd4c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -170,7 +170,6 @@ static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
{
- struct pci_dev *pci_dev = to_pci_dev(rt2x00dev->dev);
struct data_queue *queue;
int status;
@@ -186,11 +185,11 @@ int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
/*
* Register interrupt handler.
*/
- status = request_irq(pci_dev->irq, rt2x00dev->ops->lib->irq_handler,
- IRQF_SHARED, pci_name(pci_dev), rt2x00dev);
+ status = request_irq(rt2x00dev->irq, rt2x00dev->ops->lib->irq_handler,
+ IRQF_SHARED, rt2x00dev->name, rt2x00dev);
if (status) {
ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
- pci_dev->irq, status);
+ rt2x00dev->irq, status);
goto exit;
}
@@ -270,6 +269,7 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
struct ieee80211_hw *hw;
struct rt2x00_dev *rt2x00dev;
int retval;
+ u16 chip;
retval = pci_request_regions(pci_dev, pci_name(pci_dev));
if (retval) {
@@ -307,6 +307,14 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
rt2x00dev->dev = &pci_dev->dev;
rt2x00dev->ops = ops;
rt2x00dev->hw = hw;
+ rt2x00dev->irq = pci_dev->irq;
+ rt2x00dev->name = pci_name(pci_dev);
+
+ /*
+ * Determine RT chipset by reading PCI header.
+ */
+ pci_read_config_word(pci_dev, PCI_DEVICE_ID, &chip);
+ rt2x00_set_chip_rt(rt2x00dev, chip);
retval = rt2x00pci_alloc_reg(rt2x00dev);
if (retval)
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index a5664bd8493..44e5b3279ca 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -148,6 +148,35 @@ void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
dev_kfree_skb_any(skb);
}
+void rt2x00queue_payload_align(struct sk_buff *skb,
+ bool l2pad, unsigned int header_length)
+{
+ struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
+ unsigned int frame_length = skb->len;
+ unsigned int align = ALIGN_SIZE(skb, header_length);
+
+ if (!align)
+ return;
+
+ if (l2pad) {
+ if (skbdesc->flags & SKBDESC_L2_PADDED) {
+ /* Remove L2 padding */
+ memmove(skb->data + align, skb->data, header_length);
+ skb_pull(skb, align);
+ skbdesc->flags &= ~SKBDESC_L2_PADDED;
+ } else {
+ /* Add L2 padding */
+ skb_push(skb, align);
+ memmove(skb->data, skb->data + align, header_length);
+ skbdesc->flags |= SKBDESC_L2_PADDED;
+ }
+ } else {
+ /* Generic payload alignment to 4-byte boundary */
+ skb_push(skb, align);
+ memmove(skb->data, skb->data + align, frame_length);
+ }
+}
+
static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
struct txentry_desc *txdesc)
{
@@ -259,6 +288,12 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
txdesc->aifs = entry->queue->aifs;
/*
+ * Header and alignment information.
+ */
+ txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
+ txdesc->l2pad = ALIGN_SIZE(entry->skb, txdesc->header_length);
+
+ /*
* Check whether this frame is to be acked.
*/
if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
@@ -326,6 +361,7 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
* Apply TX descriptor handling by components
*/
rt2x00crypto_create_tx_descriptor(entry, txdesc);
+ rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate);
rt2x00queue_create_tx_descriptor_seq(entry, txdesc);
rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
}
@@ -368,7 +404,6 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb)
struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
struct txentry_desc txdesc;
struct skb_frame_desc *skbdesc;
- unsigned int iv_len = 0;
u8 rate_idx, rate_flags;
if (unlikely(rt2x00queue_full(queue)))
@@ -390,9 +425,6 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb)
entry->skb = skb;
rt2x00queue_create_tx_descriptor(entry, &txdesc);
- if (IEEE80211_SKB_CB(skb)->control.hw_key != NULL)
- iv_len = IEEE80211_SKB_CB(skb)->control.hw_key->iv_len;
-
/*
* All information is retrieved from the skb->cb array,
* now we should claim ownership of the driver part of that
@@ -415,11 +447,15 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb)
if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
!test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
if (test_bit(DRIVER_REQUIRE_COPY_IV, &queue->rt2x00dev->flags))
- rt2x00crypto_tx_copy_iv(skb, iv_len);
+ rt2x00crypto_tx_copy_iv(skb, &txdesc);
else
- rt2x00crypto_tx_remove_iv(skb, iv_len);
+ rt2x00crypto_tx_remove_iv(skb, &txdesc);
}
+ if (test_bit(DRIVER_REQUIRE_L2PAD, &queue->rt2x00dev->flags))
+ rt2x00queue_payload_align(entry->skb, true,
+ txdesc.header_length);
+
/*
* It could be possible that the queue was corrupted and this
* call failed. Since we always return NETDEV_TX_OK to mac80211,
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index 97e2ab08f08..b5e06347c8a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -35,9 +35,12 @@
* for USB devices this restriction does not apply, but the value of
* 2432 makes sense since it is big enough to contain the maximum fragment
* size according to the ieee802.11 specs.
+ * The aggregation size depends on support from the driver, but should
+ * be something around 3840 bytes.
*/
-#define DATA_FRAME_SIZE 2432
-#define MGMT_FRAME_SIZE 256
+#define DATA_FRAME_SIZE 2432
+#define MGMT_FRAME_SIZE 256
+#define AGGREGATION_SIZE 3840
/**
* DOC: Number of entries per queue
@@ -87,13 +90,16 @@ enum data_queue_qid {
*
* @SKBDESC_DMA_MAPPED_RX: &skb_dma field has been mapped for RX
* @SKBDESC_DMA_MAPPED_TX: &skb_dma field has been mapped for TX
- * @FRAME_DESC_IV_STRIPPED: Frame contained a IV/EIV provided by
+ * @SKBDESC_IV_STRIPPED: Frame contained a IV/EIV provided by
* mac80211 but was stripped for processing by the driver.
+ * @SKBDESC_L2_PADDED: Payload has been padded for 4-byte alignment,
+ * the padded bytes are located between header and payload.
*/
enum skb_frame_desc_flags {
SKBDESC_DMA_MAPPED_RX = 1 << 0,
SKBDESC_DMA_MAPPED_TX = 1 << 1,
- FRAME_DESC_IV_STRIPPED = 1 << 2,
+ SKBDESC_IV_STRIPPED = 1 << 2,
+ SKBDESC_L2_PADDED = 1 << 3
};
/**
@@ -145,16 +151,20 @@ static inline struct skb_frame_desc* get_skb_frame_desc(struct sk_buff *skb)
*
* @RXDONE_SIGNAL_PLCP: Signal field contains the plcp value.
* @RXDONE_SIGNAL_BITRATE: Signal field contains the bitrate value.
+ * @RXDONE_SIGNAL_MCS: Signal field contains the mcs value.
* @RXDONE_MY_BSS: Does this frame originate from device's BSS.
* @RXDONE_CRYPTO_IV: Driver provided IV/EIV data.
* @RXDONE_CRYPTO_ICV: Driver provided ICV data.
+ * @RXDONE_L2PAD: 802.11 payload has been padded to 4-byte boundary.
*/
enum rxdone_entry_desc_flags {
- RXDONE_SIGNAL_PLCP = 1 << 0,
- RXDONE_SIGNAL_BITRATE = 1 << 1,
- RXDONE_MY_BSS = 1 << 2,
- RXDONE_CRYPTO_IV = 1 << 3,
- RXDONE_CRYPTO_ICV = 1 << 4,
+ RXDONE_SIGNAL_PLCP = BIT(0),
+ RXDONE_SIGNAL_BITRATE = BIT(1),
+ RXDONE_SIGNAL_MCS = BIT(2),
+ RXDONE_MY_BSS = BIT(3),
+ RXDONE_CRYPTO_IV = BIT(4),
+ RXDONE_CRYPTO_ICV = BIT(5),
+ RXDONE_L2PAD = BIT(6),
};
/**
@@ -163,7 +173,7 @@ enum rxdone_entry_desc_flags {
* from &rxdone_entry_desc to a signal value type.
*/
#define RXDONE_SIGNAL_MASK \
- ( RXDONE_SIGNAL_PLCP | RXDONE_SIGNAL_BITRATE )
+ ( RXDONE_SIGNAL_PLCP | RXDONE_SIGNAL_BITRATE | RXDONE_SIGNAL_MCS )
/**
* struct rxdone_entry_desc: RX Entry descriptor
@@ -177,6 +187,7 @@ enum rxdone_entry_desc_flags {
* @size: Data size of the received frame.
* @flags: MAC80211 receive flags (See &enum mac80211_rx_flags).
* @dev_flags: Ralink receive flags (See &enum rxdone_entry_desc_flags).
+ * @rate_mode: Rate mode (See @enum rate_modulation).
* @cipher: Cipher type used during decryption.
* @cipher_status: Decryption status.
* @iv: IV/EIV data used during decryption.
@@ -190,6 +201,7 @@ struct rxdone_entry_desc {
int size;
int flags;
int dev_flags;
+ u16 rate_mode;
u8 cipher;
u8 cipher_status;
@@ -243,6 +255,9 @@ struct txdone_entry_desc {
* @ENTRY_TXD_ENCRYPT_PAIRWISE: Use pairwise key table (instead of shared).
* @ENTRY_TXD_ENCRYPT_IV: Generate IV/EIV in hardware.
* @ENTRY_TXD_ENCRYPT_MMIC: Generate MIC in hardware.
+ * @ENTRY_TXD_HT_AMPDU: This frame is part of an AMPDU.
+ * @ENTRY_TXD_HT_BW_40: Use 40MHz Bandwidth.
+ * @ENTRY_TXD_HT_SHORT_GI: Use short GI.
*/
enum txentry_desc_flags {
ENTRY_TXD_RTS_FRAME,
@@ -258,6 +273,9 @@ enum txentry_desc_flags {
ENTRY_TXD_ENCRYPT_PAIRWISE,
ENTRY_TXD_ENCRYPT_IV,
ENTRY_TXD_ENCRYPT_MMIC,
+ ENTRY_TXD_HT_AMPDU,
+ ENTRY_TXD_HT_BW_40,
+ ENTRY_TXD_HT_SHORT_GI,
};
/**
@@ -267,11 +285,17 @@ enum txentry_desc_flags {
*
* @flags: Descriptor flags (See &enum queue_entry_flags).
* @queue: Queue identification (See &enum data_queue_qid).
+ * @header_length: Length of 802.11 header.
+ * @l2pad: Amount of padding to align 802.11 payload to 4-byte boundrary.
* @length_high: PLCP length high word.
* @length_low: PLCP length low word.
* @signal: PLCP signal.
* @service: PLCP service.
+ * @msc: MCS.
+ * @stbc: STBC.
+ * @ba_size: BA size.
* @rate_mode: Rate mode (See @enum rate_modulation).
+ * @mpdu_density: MDPU density.
* @retry_limit: Max number of retries.
* @aifs: AIFS value.
* @ifs: IFS value.
@@ -280,18 +304,26 @@ enum txentry_desc_flags {
* @cipher: Cipher type used for encryption.
* @key_idx: Key index used for encryption.
* @iv_offset: Position where IV should be inserted by hardware.
+ * @iv_len: Length of IV data.
*/
struct txentry_desc {
unsigned long flags;
enum data_queue_qid queue;
+ u16 header_length;
+ u16 l2pad;
+
u16 length_high;
u16 length_low;
u16 signal;
u16 service;
+ u16 mcs;
+ u16 stbc;
+ u16 ba_size;
u16 rate_mode;
+ u16 mpdu_density;
short retry_limit;
short aifs;
@@ -302,6 +334,7 @@ struct txentry_desc {
enum cipher cipher;
u16 key_idx;
u16 iv_offset;
+ u16 iv_len;
};
/**
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 2ca8b7a9722..49b29ff90c4 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -603,15 +603,22 @@ static void rt61pci_config_erp(struct rt2x00_dev *rt2x00dev,
rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, &reg);
rt2x00_set_field32(&reg, TXRX_CSR0_RX_ACK_TIMEOUT, erp->ack_timeout);
+ rt2x00_set_field32(&reg, TXRX_CSR0_TSF_OFFSET, IEEE80211_HEADER);
rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg);
rt2x00pci_register_read(rt2x00dev, TXRX_CSR4, &reg);
+ rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_ENABLE, 1);
rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_PREAMBLE,
!!erp->short_preamble);
rt2x00pci_register_write(rt2x00dev, TXRX_CSR4, reg);
rt2x00pci_register_write(rt2x00dev, TXRX_CSR5, erp->basic_rates);
+ rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
+ rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_INTERVAL,
+ erp->beacon_int * 16);
+ rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
+
rt2x00pci_register_read(rt2x00dev, MAC_CSR9, &reg);
rt2x00_set_field32(&reg, MAC_CSR9_SLOT_TIME, erp->slot_time);
rt2x00pci_register_write(rt2x00dev, MAC_CSR9, reg);
@@ -938,25 +945,6 @@ static void rt61pci_config_retry_limit(struct rt2x00_dev *rt2x00dev,
rt2x00pci_register_write(rt2x00dev, TXRX_CSR4, reg);
}
-static void rt61pci_config_duration(struct rt2x00_dev *rt2x00dev,
- struct rt2x00lib_conf *libconf)
-{
- u32 reg;
-
- rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, &reg);
- rt2x00_set_field32(&reg, TXRX_CSR0_TSF_OFFSET, IEEE80211_HEADER);
- rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg);
-
- rt2x00pci_register_read(rt2x00dev, TXRX_CSR4, &reg);
- rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_ENABLE, 1);
- rt2x00pci_register_write(rt2x00dev, TXRX_CSR4, reg);
-
- rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
- rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_INTERVAL,
- libconf->conf->beacon_int * 16);
- rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
-}
-
static void rt61pci_config_ps(struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_conf *libconf)
{
@@ -968,7 +956,7 @@ static void rt61pci_config_ps(struct rt2x00_dev *rt2x00dev,
if (state == STATE_SLEEP) {
rt2x00pci_register_read(rt2x00dev, MAC_CSR11, &reg);
rt2x00_set_field32(&reg, MAC_CSR11_DELAY_AFTER_TBCN,
- libconf->conf->beacon_int - 10);
+ rt2x00dev->beacon_int - 10);
rt2x00_set_field32(&reg, MAC_CSR11_TBCN_BEFORE_WAKEUP,
libconf->conf->listen_interval - 1);
rt2x00_set_field32(&reg, MAC_CSR11_WAKEUP_LATENCY, 5);
@@ -1016,8 +1004,6 @@ static void rt61pci_config(struct rt2x00_dev *rt2x00dev,
rt61pci_config_txpower(rt2x00dev, libconf->conf->power_level);
if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
rt61pci_config_retry_limit(rt2x00dev, libconf);
- if (flags & IEEE80211_CONF_CHANGE_BEACON_INTERVAL)
- rt61pci_config_duration(rt2x00dev, libconf);
if (flags & IEEE80211_CONF_CHANGE_PS)
rt61pci_config_ps(rt2x00dev, libconf);
}
@@ -2308,7 +2294,6 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
u32 reg;
u16 value;
u16 eeprom;
- u16 device;
/*
* Read EEPROM word for configuration.
@@ -2317,14 +2302,10 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
/*
* Identify RF chipset.
- * To determine the RT chip we have to read the
- * PCI header of the device.
*/
- pci_read_config_word(to_pci_dev(rt2x00dev->dev),
- PCI_CONFIG_HEADER_DEVICE, &device);
value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
rt2x00pci_register_read(rt2x00dev, MAC_CSR0, &reg);
- rt2x00_set_chip(rt2x00dev, device, value, reg);
+ rt2x00_set_chip_rf(rt2x00dev, value, reg);
if (!rt2x00_rf(&rt2x00dev->chip, RF5225) &&
!rt2x00_rf(&rt2x00dev->chip, RF5325) &&
@@ -2740,7 +2721,6 @@ static const struct ieee80211_ops rt61pci_mac80211_ops = {
.add_interface = rt2x00mac_add_interface,
.remove_interface = rt2x00mac_remove_interface,
.config = rt2x00mac_config,
- .config_interface = rt2x00mac_config_interface,
.configure_filter = rt2x00mac_configure_filter,
.set_key = rt2x00mac_set_key,
.get_stats = rt2x00mac_get_stats,
diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h
index 41e8959919f..6c71f77c816 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.h
+++ b/drivers/net/wireless/rt2x00/rt61pci.h
@@ -63,12 +63,6 @@
*/
/*
- * PCI Configuration Header
- */
-#define PCI_CONFIG_HEADER_VENDOR 0x0000
-#define PCI_CONFIG_HEADER_DEVICE 0x0002
-
-/*
* HOST_CMD_CSR: For HOST to interrupt embedded processor
*/
#define HOST_CMD_CSR 0x0008
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 853b2b279b6..c18848836f2 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -566,15 +566,22 @@ static void rt73usb_config_erp(struct rt2x00_dev *rt2x00dev,
rt2x00usb_register_read(rt2x00dev, TXRX_CSR0, &reg);
rt2x00_set_field32(&reg, TXRX_CSR0_RX_ACK_TIMEOUT, erp->ack_timeout);
+ rt2x00_set_field32(&reg, TXRX_CSR0_TSF_OFFSET, IEEE80211_HEADER);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg);
rt2x00usb_register_read(rt2x00dev, TXRX_CSR4, &reg);
+ rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_ENABLE, 1);
rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_PREAMBLE,
!!erp->short_preamble);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR4, reg);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR5, erp->basic_rates);
+ rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
+ rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_INTERVAL,
+ erp->beacon_int * 16);
+ rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
+
rt2x00usb_register_read(rt2x00dev, MAC_CSR9, &reg);
rt2x00_set_field32(&reg, MAC_CSR9_SLOT_TIME, erp->slot_time);
rt2x00usb_register_write(rt2x00dev, MAC_CSR9, reg);
@@ -834,25 +841,6 @@ static void rt73usb_config_retry_limit(struct rt2x00_dev *rt2x00dev,
rt2x00usb_register_write(rt2x00dev, TXRX_CSR4, reg);
}
-static void rt73usb_config_duration(struct rt2x00_dev *rt2x00dev,
- struct rt2x00lib_conf *libconf)
-{
- u32 reg;
-
- rt2x00usb_register_read(rt2x00dev, TXRX_CSR0, &reg);
- rt2x00_set_field32(&reg, TXRX_CSR0_TSF_OFFSET, IEEE80211_HEADER);
- rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg);
-
- rt2x00usb_register_read(rt2x00dev, TXRX_CSR4, &reg);
- rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_ENABLE, 1);
- rt2x00usb_register_write(rt2x00dev, TXRX_CSR4, reg);
-
- rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
- rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_INTERVAL,
- libconf->conf->beacon_int * 16);
- rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
-}
-
static void rt73usb_config_ps(struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_conf *libconf)
{
@@ -864,7 +852,7 @@ static void rt73usb_config_ps(struct rt2x00_dev *rt2x00dev,
if (state == STATE_SLEEP) {
rt2x00usb_register_read(rt2x00dev, MAC_CSR11, &reg);
rt2x00_set_field32(&reg, MAC_CSR11_DELAY_AFTER_TBCN,
- libconf->conf->beacon_int - 10);
+ rt2x00dev->beacon_int - 10);
rt2x00_set_field32(&reg, MAC_CSR11_TBCN_BEFORE_WAKEUP,
libconf->conf->listen_interval - 1);
rt2x00_set_field32(&reg, MAC_CSR11_WAKEUP_LATENCY, 5);
@@ -906,8 +894,6 @@ static void rt73usb_config(struct rt2x00_dev *rt2x00dev,
rt73usb_config_txpower(rt2x00dev, libconf->conf->power_level);
if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
rt73usb_config_retry_limit(rt2x00dev, libconf);
- if (flags & IEEE80211_CONF_CHANGE_BEACON_INTERVAL)
- rt73usb_config_duration(rt2x00dev, libconf);
if (flags & IEEE80211_CONF_CHANGE_PS)
rt73usb_config_ps(rt2x00dev, libconf);
}
@@ -1846,7 +1832,8 @@ static int rt73usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00usb_register_read(rt2x00dev, MAC_CSR0, &reg);
rt2x00_set_chip(rt2x00dev, RT2571, value, reg);
- if (!rt2x00_check_rev(&rt2x00dev->chip, 0x25730)) {
+ if (!rt2x00_check_rev(&rt2x00dev->chip, 0x000ffff0, 0x25730) ||
+ rt2x00_check_rev(&rt2x00dev->chip, 0x0000000f, 0)) {
ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
return -ENODEV;
}
@@ -2259,7 +2246,6 @@ static const struct ieee80211_ops rt73usb_mac80211_ops = {
.add_interface = rt2x00mac_add_interface,
.remove_interface = rt2x00mac_remove_interface,
.config = rt2x00mac_config,
- .config_interface = rt2x00mac_config_interface,
.configure_filter = rt2x00mac_configure_filter,
.set_key = rt2x00mac_set_key,
.get_stats = rt2x00mac_get_stats,
diff --git a/drivers/net/wireless/rtl818x/Makefile b/drivers/net/wireless/rtl818x/Makefile
index c113b3e6904..37e3d4db0c4 100644
--- a/drivers/net/wireless/rtl818x/Makefile
+++ b/drivers/net/wireless/rtl818x/Makefile
@@ -1,5 +1,5 @@
rtl8180-objs := rtl8180_dev.o rtl8180_rtl8225.o rtl8180_sa2400.o rtl8180_max2820.o rtl8180_grf5101.o
-rtl8187-objs := rtl8187_dev.o rtl8187_rtl8225.o
+rtl8187-objs := rtl8187_dev.o rtl8187_rtl8225.o rtl8187_leds.o
obj-$(CONFIG_RTL8180) += rtl8180.o
obj-$(CONFIG_RTL8187) += rtl8187.o
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index 387c133ec0f..7e65d7c3180 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -702,30 +702,26 @@ static int rtl8180_config(struct ieee80211_hw *dev, u32 changed)
return 0;
}
-static int rtl8180_config_interface(struct ieee80211_hw *dev,
- struct ieee80211_vif *vif,
- struct ieee80211_if_conf *conf)
-{
- struct rtl8180_priv *priv = dev->priv;
- int i;
-
- for (i = 0; i < ETH_ALEN; i++)
- rtl818x_iowrite8(priv, &priv->map->BSSID[i], conf->bssid[i]);
-
- if (is_valid_ether_addr(conf->bssid))
- rtl818x_iowrite8(priv, &priv->map->MSR, RTL818X_MSR_INFRA);
- else
- rtl818x_iowrite8(priv, &priv->map->MSR, RTL818X_MSR_NO_LINK);
-
- return 0;
-}
-
static void rtl8180_bss_info_changed(struct ieee80211_hw *dev,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
u32 changed)
{
struct rtl8180_priv *priv = dev->priv;
+ int i;
+
+ if (changed & BSS_CHANGED_BSSID) {
+ for (i = 0; i < ETH_ALEN; i++)
+ rtl818x_iowrite8(priv, &priv->map->BSSID[i],
+ info->bssid[i]);
+
+ if (is_valid_ether_addr(info->bssid))
+ rtl818x_iowrite8(priv, &priv->map->MSR,
+ RTL818X_MSR_INFRA);
+ else
+ rtl818x_iowrite8(priv, &priv->map->MSR,
+ RTL818X_MSR_NO_LINK);
+ }
if (changed & BSS_CHANGED_ERP_SLOT && priv->rf->conf_erp)
priv->rf->conf_erp(dev, info);
@@ -770,7 +766,6 @@ static const struct ieee80211_ops rtl8180_ops = {
.add_interface = rtl8180_add_interface,
.remove_interface = rtl8180_remove_interface,
.config = rtl8180_config,
- .config_interface = rtl8180_config_interface,
.bss_info_changed = rtl8180_bss_info_changed,
.configure_filter = rtl8180_configure_filter,
};
diff --git a/drivers/net/wireless/rtl818x/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187.h
index edeff82a4d0..c09bfefc70f 100644
--- a/drivers/net/wireless/rtl818x/rtl8187.h
+++ b/drivers/net/wireless/rtl818x/rtl8187.h
@@ -16,6 +16,7 @@
#define RTL8187_H
#include "rtl818x.h"
+#include "rtl8187_leds.h"
#define RTL8187_EEPROM_TXPWR_BASE 0x05
#define RTL8187_EEPROM_MAC_ADDR 0x07
@@ -102,6 +103,12 @@ struct rtl8187_priv {
struct usb_anchor anchored;
struct delayed_work work;
struct ieee80211_hw *dev;
+#ifdef CONFIG_RTL8187_LEDS
+ struct rtl8187_led led_tx;
+ struct rtl8187_led led_rx;
+ struct delayed_work led_on;
+ struct delayed_work led_off;
+#endif
u16 txpwr_base;
u8 asic_rev;
u8 is_rtl8187b;
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index bac6cfba6ab..294250e294d 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -29,6 +29,9 @@
#include "rtl8187.h"
#include "rtl8187_rtl8225.h"
+#ifdef CONFIG_RTL8187_LEDS
+#include "rtl8187_leds.h"
+#endif
MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>");
@@ -71,6 +74,8 @@ static struct usb_device_id rtl8187_table[] __devinitdata = {
{USB_DEVICE(0x18E8, 0x6232), .driver_info = DEVICE_RTL8187},
/* AirLive */
{USB_DEVICE(0x1b75, 0x8187), .driver_info = DEVICE_RTL8187},
+ /* Linksys */
+ {USB_DEVICE(0x1737, 0x0073), .driver_info = DEVICE_RTL8187B},
{}
};
@@ -318,12 +323,7 @@ static void rtl8187_rx_cb(struct urb *urb)
unsigned long f;
spin_lock_irqsave(&priv->rx_queue.lock, f);
- if (skb->next)
- __skb_unlink(skb, &priv->rx_queue);
- else {
- spin_unlock_irqrestore(&priv->rx_queue.lock, f);
- return;
- }
+ __skb_unlink(skb, &priv->rx_queue);
spin_unlock_irqrestore(&priv->rx_queue.lock, f);
skb_put(skb, urb->actual_length);
@@ -734,10 +734,10 @@ static const u8 rtl8187b_reg_table[][3] = {
{0x85, 0x24, 0}, {0x88, 0x54, 0}, {0x8B, 0xB8, 0}, {0x8C, 0x07, 0},
{0x8D, 0x00, 0}, {0x94, 0x1B, 0}, {0x95, 0x12, 0}, {0x96, 0x00, 0},
{0x97, 0x06, 0}, {0x9D, 0x1A, 0}, {0x9F, 0x10, 0}, {0xB4, 0x22, 0},
- {0xBE, 0x80, 0}, {0xDB, 0x00, 0}, {0xEE, 0x00, 0}, {0x91, 0x03, 0},
+ {0xBE, 0x80, 0}, {0xDB, 0x00, 0}, {0xEE, 0x00, 0}, {0x4C, 0x00, 2},
- {0x4C, 0x00, 2}, {0x9F, 0x00, 3}, {0x8C, 0x01, 0}, {0x8D, 0x10, 0},
- {0x8E, 0x08, 0}, {0x8F, 0x00, 0}
+ {0x9F, 0x00, 3}, {0x8C, 0x01, 0}, {0x8D, 0x10, 0}, {0x8E, 0x08, 0},
+ {0x8F, 0x00, 0}
};
static int rtl8187b_init_hw(struct ieee80211_hw *dev)
@@ -1087,32 +1087,6 @@ static int rtl8187_config(struct ieee80211_hw *dev, u32 changed)
return 0;
}
-static int rtl8187_config_interface(struct ieee80211_hw *dev,
- struct ieee80211_vif *vif,
- struct ieee80211_if_conf *conf)
-{
- struct rtl8187_priv *priv = dev->priv;
- int i;
- u8 reg;
-
- mutex_lock(&priv->conf_mutex);
- for (i = 0; i < ETH_ALEN; i++)
- rtl818x_iowrite8(priv, &priv->map->BSSID[i], conf->bssid[i]);
-
- if (is_valid_ether_addr(conf->bssid)) {
- reg = RTL818X_MSR_INFRA;
- if (priv->is_rtl8187b)
- reg |= RTL818X_MSR_ENEDCA;
- rtl818x_iowrite8(priv, &priv->map->MSR, reg);
- } else {
- reg = RTL818X_MSR_NO_LINK;
- rtl818x_iowrite8(priv, &priv->map->MSR, reg);
- }
-
- mutex_unlock(&priv->conf_mutex);
- return 0;
-}
-
/*
* With 8187B, AC_*_PARAM clashes with FEMR definition in struct rtl818x_csr for
* example. Thus we have to use raw values for AC_*_PARAM register addresses.
@@ -1190,6 +1164,27 @@ static void rtl8187_bss_info_changed(struct ieee80211_hw *dev,
u32 changed)
{
struct rtl8187_priv *priv = dev->priv;
+ int i;
+ u8 reg;
+
+ if (changed & BSS_CHANGED_BSSID) {
+ mutex_lock(&priv->conf_mutex);
+ for (i = 0; i < ETH_ALEN; i++)
+ rtl818x_iowrite8(priv, &priv->map->BSSID[i],
+ info->bssid[i]);
+
+ if (is_valid_ether_addr(info->bssid)) {
+ reg = RTL818X_MSR_INFRA;
+ if (priv->is_rtl8187b)
+ reg |= RTL818X_MSR_ENEDCA;
+ rtl818x_iowrite8(priv, &priv->map->MSR, reg);
+ } else {
+ reg = RTL818X_MSR_NO_LINK;
+ rtl818x_iowrite8(priv, &priv->map->MSR, reg);
+ }
+
+ mutex_unlock(&priv->conf_mutex);
+ }
if (changed & (BSS_CHANGED_ERP_SLOT | BSS_CHANGED_ERP_PREAMBLE))
rtl8187_conf_erp(priv, info->use_short_slot,
@@ -1271,7 +1266,6 @@ static const struct ieee80211_ops rtl8187_ops = {
.add_interface = rtl8187_add_interface,
.remove_interface = rtl8187_remove_interface,
.config = rtl8187_config,
- .config_interface = rtl8187_config_interface,
.bss_info_changed = rtl8187_bss_info_changed,
.configure_filter = rtl8187_configure_filter,
.conf_tx = rtl8187_conf_tx
@@ -1478,9 +1472,6 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
(*channel++).hw_value = txpwr >> 8;
}
- if (priv->is_rtl8187b)
- printk(KERN_WARNING "rtl8187: 8187B chip detected.\n");
-
/*
* XXX: Once this driver supports anything that requires
* beacons it must implement IEEE80211_TX_CTL_ASSIGN_SEQ.
@@ -1512,6 +1503,12 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
wiphy_name(dev->wiphy), dev->wiphy->perm_addr,
chip_name, priv->asic_rev, priv->rf->name);
+#ifdef CONFIG_RTL8187_LEDS
+ eeprom_93cx6_read(&eeprom, 0x3F, &reg);
+ reg &= 0xFF;
+ rtl8187_leds_init(dev, reg);
+#endif
+
return 0;
err_free_dmabuf:
@@ -1531,6 +1528,9 @@ static void __devexit rtl8187_disconnect(struct usb_interface *intf)
if (!dev)
return;
+#ifdef CONFIG_RTL8187_LEDS
+ rtl8187_leds_exit(dev);
+#endif
ieee80211_unregister_hw(dev);
priv = dev->priv;
diff --git a/drivers/net/wireless/rtl818x/rtl8187_leds.c b/drivers/net/wireless/rtl818x/rtl8187_leds.c
new file mode 100644
index 00000000000..b4425359224
--- /dev/null
+++ b/drivers/net/wireless/rtl818x/rtl8187_leds.c
@@ -0,0 +1,218 @@
+/*
+ * Linux LED driver for RTL8187
+ *
+ * Copyright 2009 Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ * Based on the LED handling in the r8187 driver, which is:
+ * Copyright (c) Realtek Semiconductor Corp. All rights reserved.
+ *
+ * Thanks to Realtek for their support!
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifdef CONFIG_RTL8187_LEDS
+
+#include <net/mac80211.h>
+#include <linux/usb.h>
+#include <linux/eeprom_93cx6.h>
+
+#include "rtl8187.h"
+#include "rtl8187_leds.h"
+
+static void led_turn_on(struct work_struct *work)
+{
+ /* As this routine does read/write operations on the hardware, it must
+ * be run from a work queue.
+ */
+ u8 reg;
+ struct rtl8187_priv *priv = container_of(work, struct rtl8187_priv,
+ led_on.work);
+ struct rtl8187_led *led = &priv->led_tx;
+
+ /* Don't change the LED, when the device is down. */
+ if (priv->mode == NL80211_IFTYPE_UNSPECIFIED)
+ return ;
+
+ /* Skip if the LED is not registered. */
+ if (!led->dev)
+ return;
+ mutex_lock(&priv->conf_mutex);
+ switch (led->ledpin) {
+ case LED_PIN_GPIO0:
+ rtl818x_iowrite8(priv, &priv->map->GPIO, 0x01);
+ rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0x00);
+ break;
+ case LED_PIN_LED0:
+ reg = rtl818x_ioread8(priv, &priv->map->PGSELECT) & ~(1 << 4);
+ rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg);
+ break;
+ case LED_PIN_LED1:
+ reg = rtl818x_ioread8(priv, &priv->map->PGSELECT) & ~(1 << 5);
+ rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg);
+ break;
+ case LED_PIN_HW:
+ default:
+ break;
+ }
+ mutex_unlock(&priv->conf_mutex);
+}
+
+static void led_turn_off(struct work_struct *work)
+{
+ /* As this routine does read/write operations on the hardware, it must
+ * be run from a work queue.
+ */
+ u8 reg;
+ struct rtl8187_priv *priv = container_of(work, struct rtl8187_priv,
+ led_off.work);
+ struct rtl8187_led *led = &priv->led_tx;
+
+ /* Don't change the LED, when the device is down. */
+ if (priv->mode == NL80211_IFTYPE_UNSPECIFIED)
+ return ;
+
+ /* Skip if the LED is not registered. */
+ if (!led->dev)
+ return;
+ mutex_lock(&priv->conf_mutex);
+ switch (led->ledpin) {
+ case LED_PIN_GPIO0:
+ rtl818x_iowrite8(priv, &priv->map->GPIO, 0x01);
+ rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0x01);
+ break;
+ case LED_PIN_LED0:
+ reg = rtl818x_ioread8(priv, &priv->map->PGSELECT) | (1 << 4);
+ rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg);
+ break;
+ case LED_PIN_LED1:
+ reg = rtl818x_ioread8(priv, &priv->map->PGSELECT) | (1 << 5);
+ rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg);
+ break;
+ case LED_PIN_HW:
+ default:
+ break;
+ }
+ mutex_unlock(&priv->conf_mutex);
+}
+
+/* Callback from the LED subsystem. */
+static void rtl8187_led_brightness_set(struct led_classdev *led_dev,
+ enum led_brightness brightness)
+{
+ struct rtl8187_led *led = container_of(led_dev, struct rtl8187_led,
+ led_dev);
+ struct ieee80211_hw *hw = led->dev;
+ struct rtl8187_priv *priv = hw->priv;
+
+ if (brightness == LED_OFF) {
+ queue_delayed_work(hw->workqueue, &priv->led_off, 0);
+ /* The LED is off for 1/20 sec so that it just blinks. */
+ queue_delayed_work(hw->workqueue, &priv->led_on, HZ / 20);
+ } else
+ queue_delayed_work(hw->workqueue, &priv->led_on, 0);
+}
+
+static int rtl8187_register_led(struct ieee80211_hw *dev,
+ struct rtl8187_led *led, const char *name,
+ const char *default_trigger, u8 ledpin)
+{
+ int err;
+ struct rtl8187_priv *priv = dev->priv;
+
+ if (led->dev)
+ return -EEXIST;
+ if (!default_trigger)
+ return -EINVAL;
+ led->dev = dev;
+ led->ledpin = ledpin;
+ strncpy(led->name, name, sizeof(led->name));
+
+ led->led_dev.name = led->name;
+ led->led_dev.default_trigger = default_trigger;
+ led->led_dev.brightness_set = rtl8187_led_brightness_set;
+
+ err = led_classdev_register(&priv->udev->dev, &led->led_dev);
+ if (err) {
+ printk(KERN_INFO "LEDs: Failed to register %s\n", name);
+ led->dev = NULL;
+ return err;
+ }
+ return 0;
+}
+
+static void rtl8187_unregister_led(struct rtl8187_led *led)
+{
+ led_classdev_unregister(&led->led_dev);
+ led->dev = NULL;
+}
+
+void rtl8187_leds_init(struct ieee80211_hw *dev, u16 custid)
+{
+ struct rtl8187_priv *priv = dev->priv;
+ char name[RTL8187_LED_MAX_NAME_LEN + 1];
+ u8 ledpin;
+ int err;
+
+ /* According to the vendor driver, the LED operation depends on the
+ * customer ID encoded in the EEPROM
+ */
+ printk(KERN_INFO "rtl8187: Customer ID is 0x%02X\n", custid);
+ switch (custid) {
+ case EEPROM_CID_RSVD0:
+ case EEPROM_CID_RSVD1:
+ case EEPROM_CID_SERCOMM_PS:
+ case EEPROM_CID_QMI:
+ case EEPROM_CID_DELL:
+ case EEPROM_CID_TOSHIBA:
+ ledpin = LED_PIN_GPIO0;
+ break;
+ case EEPROM_CID_ALPHA0:
+ ledpin = LED_PIN_LED0;
+ break;
+ case EEPROM_CID_HW:
+ ledpin = LED_PIN_HW;
+ break;
+ default:
+ ledpin = LED_PIN_GPIO0;
+ }
+
+ INIT_DELAYED_WORK(&priv->led_on, led_turn_on);
+ INIT_DELAYED_WORK(&priv->led_off, led_turn_off);
+
+ snprintf(name, sizeof(name),
+ "rtl8187-%s::tx", wiphy_name(dev->wiphy));
+ err = rtl8187_register_led(dev, &priv->led_tx, name,
+ ieee80211_get_tx_led_name(dev), ledpin);
+ if (err)
+ goto error;
+ snprintf(name, sizeof(name),
+ "rtl8187-%s::rx", wiphy_name(dev->wiphy));
+ err = rtl8187_register_led(dev, &priv->led_rx, name,
+ ieee80211_get_rx_led_name(dev), ledpin);
+ if (!err) {
+ queue_delayed_work(dev->workqueue, &priv->led_on, 0);
+ return;
+ }
+ /* registration of RX LED failed - unregister TX */
+ rtl8187_unregister_led(&priv->led_tx);
+error:
+ /* If registration of either failed, cancel delayed work */
+ cancel_delayed_work_sync(&priv->led_off);
+ cancel_delayed_work_sync(&priv->led_on);
+}
+
+void rtl8187_leds_exit(struct ieee80211_hw *dev)
+{
+ struct rtl8187_priv *priv = dev->priv;
+
+ rtl8187_unregister_led(&priv->led_tx);
+ /* turn the LED off before exiting */
+ queue_delayed_work(dev->workqueue, &priv->led_off, 0);
+ cancel_delayed_work_sync(&priv->led_off);
+ rtl8187_unregister_led(&priv->led_rx);
+}
+#endif /* def CONFIG_RTL8187_LED */
+
diff --git a/drivers/net/wireless/rtl818x/rtl8187_leds.h b/drivers/net/wireless/rtl818x/rtl8187_leds.h
new file mode 100644
index 00000000000..a0332027aea
--- /dev/null
+++ b/drivers/net/wireless/rtl818x/rtl8187_leds.h
@@ -0,0 +1,57 @@
+/*
+ * Definitions for RTL8187 leds
+ *
+ * Copyright 2009 Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ * Based on the LED handling in the r8187 driver, which is:
+ * Copyright (c) Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef RTL8187_LED_H
+#define RTL8187_LED_H
+
+#ifdef CONFIG_RTL8187_LEDS
+
+#define RTL8187_LED_MAX_NAME_LEN 21
+
+#include <linux/leds.h>
+#include <linux/types.h>
+
+enum {
+ LED_PIN_LED0,
+ LED_PIN_LED1,
+ LED_PIN_GPIO0,
+ LED_PIN_HW
+};
+
+enum {
+ EEPROM_CID_RSVD0 = 0x00,
+ EEPROM_CID_RSVD1 = 0xFF,
+ EEPROM_CID_ALPHA0 = 0x01,
+ EEPROM_CID_SERCOMM_PS = 0x02,
+ EEPROM_CID_HW = 0x03,
+ EEPROM_CID_TOSHIBA = 0x04,
+ EEPROM_CID_QMI = 0x07,
+ EEPROM_CID_DELL = 0x08
+};
+
+struct rtl8187_led {
+ struct ieee80211_hw *dev;
+ /* The LED class device */
+ struct led_classdev led_dev;
+ /* The pin/method used to control the led */
+ u8 ledpin;
+ /* The unique name string for this LED device. */
+ char name[RTL8187_LED_MAX_NAME_LEN + 1];
+};
+
+void rtl8187_leds_init(struct ieee80211_hw *dev, u16 code);
+void rtl8187_leds_exit(struct ieee80211_hw *dev);
+
+#endif /* def CONFIG_RTL8187_LED */
+
+#endif /* RTL8187_LED_H */
diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c
index f9520463269..38366a56b71 100644
--- a/drivers/net/wireless/strip.c
+++ b/drivers/net/wireless/strip.c
@@ -1540,7 +1540,7 @@ static int strip_xmit(struct sk_buff *skb, struct net_device *dev)
if (!netif_running(dev)) {
printk(KERN_ERR "%s: xmit call when iface is down\n",
dev->name);
- return (1);
+ return NETDEV_TX_BUSY;
}
netif_stop_queue(dev);
@@ -2509,7 +2509,7 @@ static void strip_dev_setup(struct net_device *dev)
* netdev_priv(dev) Already holds a pointer to our struct strip
*/
- *(MetricomAddress *) & dev->broadcast = broadcast_address;
+ *(MetricomAddress *)dev->broadcast = broadcast_address;
dev->dev_addr[0] = 0;
dev->addr_len = sizeof(MetricomAddress);
diff --git a/drivers/net/wireless/wavelan.c b/drivers/net/wireless/wavelan.c
index 3ab3eb95718..ab7fc5c0c8b 100644
--- a/drivers/net/wireless/wavelan.c
+++ b/drivers/net/wireless/wavelan.c
@@ -2867,12 +2867,8 @@ static int wavelan_packet_xmit(struct sk_buff *skb, struct net_device * dev)
spin_unlock_irqrestore(&lp->spinlock, flags);
/* Check that we can continue */
if (lp->tx_n_in_use == (NTXBLOCKS - 1))
- return 1;
+ return NETDEV_TX_BUSY;
}
-#ifdef DEBUG_TX_ERROR
- if (skb->next)
- printk(KERN_INFO "skb has next\n");
-#endif
/* Do we need some padding? */
/* Note : on wireless the propagation time is in the order of 1us,
@@ -2884,10 +2880,10 @@ static int wavelan_packet_xmit(struct sk_buff *skb, struct net_device * dev)
skb_copy_from_linear_data(skb, data, skb->len);
/* Write packet on the card */
if(wv_packet_write(dev, data, ETH_ZLEN))
- return 1; /* We failed */
+ return NETDEV_TX_BUSY; /* We failed */
}
else if(wv_packet_write(dev, skb->data, skb->len))
- return 1; /* We failed */
+ return NETDEV_TX_BUSY; /* We failed */
dev_kfree_skb(skb);
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index e55b33961ae..6af706408ac 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -138,7 +138,7 @@ psa_read(struct net_device * dev,
/*------------------------------------------------------------------*/
/*
- * Write the Paramter Storage Area to the WaveLAN card's memory
+ * Write the Parameter Storage Area to the WaveLAN card's memory
*/
static void
psa_write(struct net_device * dev,
@@ -3107,11 +3107,6 @@ wavelan_packet_xmit(struct sk_buff * skb,
* so the Tx buffer is now free */
}
-#ifdef DEBUG_TX_ERROR
- if (skb->next)
- printk(KERN_INFO "skb has next\n");
-#endif
-
/* Check if we need some padding */
/* Note : on wireless the propagation time is in the order of 1us,
* and we don't have the Ethernet specific requirement of beeing
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
new file mode 100644
index 00000000000..a82c4cd436d
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/Kconfig
@@ -0,0 +1,11 @@
+config WL12XX
+ tristate "TI wl1251/wl1271 support"
+ depends on MAC80211 && WLAN_80211 && SPI_MASTER && GENERIC_HARDIRQS && EXPERIMENTAL
+ select FW_LOADER
+ select CRC7
+ ---help---
+ This module adds support for wireless adapters based on
+ TI wl1251/wl1271 chipsets.
+
+ If you choose to build a module, it'll be called wl12xx. Say N if
+ unsure.
diff --git a/drivers/net/wireless/wl12xx/Makefile b/drivers/net/wireless/wl12xx/Makefile
new file mode 100644
index 00000000000..d43de27dc54
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/Makefile
@@ -0,0 +1,4 @@
+wl12xx-objs = main.o spi.o event.o tx.o rx.o \
+ ps.o cmd.o acx.o boot.o init.o wl1251.o \
+ debugfs.o
+obj-$(CONFIG_WL12XX) += wl12xx.o
diff --git a/drivers/net/wireless/wl12xx/acx.c b/drivers/net/wireless/wl12xx/acx.c
new file mode 100644
index 00000000000..1cfd458ad5a
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/acx.c
@@ -0,0 +1,689 @@
+#include "acx.h"
+
+#include <linux/module.h>
+#include <linux/crc7.h>
+#include <linux/spi/spi.h>
+
+#include "wl12xx.h"
+#include "wl12xx_80211.h"
+#include "reg.h"
+#include "spi.h"
+#include "ps.h"
+
+int wl12xx_acx_frame_rates(struct wl12xx *wl, u8 ctrl_rate, u8 ctrl_mod,
+ u8 mgt_rate, u8 mgt_mod)
+{
+ int ret;
+ struct acx_fw_gen_frame_rates rates;
+
+ wl12xx_debug(DEBUG_ACX, "acx frame rates");
+
+ rates.header.id = ACX_FW_GEN_FRAME_RATES;
+ rates.header.len = sizeof(struct acx_fw_gen_frame_rates) -
+ sizeof(struct acx_header);
+
+ rates.tx_ctrl_frame_rate = ctrl_rate;
+ rates.tx_ctrl_frame_mod = ctrl_mod;
+ rates.tx_mgt_frame_rate = mgt_rate;
+ rates.tx_mgt_frame_mod = mgt_mod;
+
+ ret = wl12xx_cmd_configure(wl, &rates, sizeof(rates));
+ if (ret < 0) {
+ wl12xx_error("Failed to set FW rates and modulation");
+ return ret;
+ }
+
+ return 0;
+}
+
+
+int wl12xx_acx_station_id(struct wl12xx *wl)
+{
+ int ret, i;
+ struct dot11_station_id mac;
+
+ wl12xx_debug(DEBUG_ACX, "acx dot11_station_id");
+
+ mac.header.id = DOT11_STATION_ID;
+ mac.header.len = sizeof(mac) - sizeof(struct acx_header);
+
+ for (i = 0; i < ETH_ALEN; i++)
+ mac.mac[i] = wl->mac_addr[ETH_ALEN - 1 - i];
+
+ ret = wl12xx_cmd_configure(wl, &mac, sizeof(mac));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int wl12xx_acx_default_key(struct wl12xx *wl, u8 key_id)
+{
+ struct acx_dot11_default_key default_key;
+ int ret;
+
+ wl12xx_debug(DEBUG_ACX, "acx dot11_default_key (%d)", key_id);
+
+ default_key.header.id = DOT11_DEFAULT_KEY;
+ default_key.header.len = sizeof(default_key) -
+ sizeof(struct acx_header);
+
+ default_key.id = key_id;
+
+ ret = wl12xx_cmd_configure(wl, &default_key, sizeof(default_key));
+ if (ret < 0) {
+ wl12xx_error("Couldnt set default key");
+ return ret;
+ }
+
+ wl->default_key = key_id;
+
+ return 0;
+}
+
+int wl12xx_acx_wake_up_conditions(struct wl12xx *wl, u8 listen_interval)
+{
+ struct acx_wake_up_condition wake_up;
+
+ wl12xx_debug(DEBUG_ACX, "acx wake up conditions");
+
+ wake_up.header.id = ACX_WAKE_UP_CONDITIONS;
+ wake_up.header.len = sizeof(wake_up) - sizeof(struct acx_header);
+
+ wake_up.wake_up_event = WAKE_UP_EVENT_DTIM_BITMAP;
+ wake_up.listen_interval = listen_interval;
+
+ return wl12xx_cmd_configure(wl, &wake_up, sizeof(wake_up));
+}
+
+int wl12xx_acx_sleep_auth(struct wl12xx *wl, u8 sleep_auth)
+{
+ int ret;
+ struct acx_sleep_auth auth;
+
+ wl12xx_debug(DEBUG_ACX, "acx sleep auth");
+
+ auth.header.id = ACX_SLEEP_AUTH;
+ auth.header.len = sizeof(auth) - sizeof(struct acx_header);
+
+ auth.sleep_auth = sleep_auth;
+
+ ret = wl12xx_cmd_configure(wl, &auth, sizeof(auth));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int wl12xx_acx_fw_version(struct wl12xx *wl, char *buf, size_t len)
+{
+ struct wl12xx_command cmd;
+ struct acx_revision *rev;
+ int ret;
+
+ wl12xx_debug(DEBUG_ACX, "acx fw rev");
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ ret = wl12xx_cmd_interrogate(wl, ACX_FW_REV, sizeof(*rev), &cmd);
+ if (ret < 0) {
+ wl12xx_warning("ACX_FW_REV interrogate failed");
+ return ret;
+ }
+
+ rev = (struct acx_revision *) &cmd.parameters;
+
+ /* be careful with the buffer sizes */
+ strncpy(buf, rev->fw_version, min(len, sizeof(rev->fw_version)));
+
+ /*
+ * if the firmware version string is exactly
+ * sizeof(rev->fw_version) long or fw_len is less than
+ * sizeof(rev->fw_version) it won't be null terminated
+ */
+ buf[min(len, sizeof(rev->fw_version)) - 1] = '\0';
+
+ return 0;
+}
+
+int wl12xx_acx_tx_power(struct wl12xx *wl, int power)
+{
+ struct acx_current_tx_power ie;
+ int ret;
+
+ wl12xx_debug(DEBUG_ACX, "acx dot11_cur_tx_pwr");
+
+ if (power < 0 || power > 25)
+ return -EINVAL;
+
+ memset(&ie, 0, sizeof(ie));
+
+ ie.header.id = DOT11_CUR_TX_PWR;
+ ie.header.len = sizeof(ie) - sizeof(struct acx_header);
+ ie.current_tx_power = power * 10;
+
+ ret = wl12xx_cmd_configure(wl, &ie, sizeof(ie));
+ if (ret < 0) {
+ wl12xx_warning("configure of tx power failed: %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int wl12xx_acx_feature_cfg(struct wl12xx *wl)
+{
+ struct acx_feature_config feature;
+ int ret;
+
+ wl12xx_debug(DEBUG_ACX, "acx feature cfg");
+
+ memset(&feature, 0, sizeof(feature));
+
+ feature.header.id = ACX_FEATURE_CFG;
+ feature.header.len = sizeof(feature) - sizeof(struct acx_header);
+
+ /* DF_ENCRYPTION_DISABLE and DF_SNIFF_MODE_ENABLE are disabled */
+ feature.data_flow_options = 0;
+ feature.options = 0;
+
+ ret = wl12xx_cmd_configure(wl, &feature, sizeof(feature));
+ if (ret < 0)
+ wl12xx_error("Couldnt set HW encryption");
+
+ return ret;
+}
+
+int wl12xx_acx_mem_map(struct wl12xx *wl, void *mem_map, size_t len)
+{
+ struct wl12xx_command cmd;
+ int ret;
+
+ wl12xx_debug(DEBUG_ACX, "acx mem map");
+
+ ret = wl12xx_cmd_interrogate(wl, ACX_MEM_MAP, len, &cmd);
+ if (ret < 0)
+ return ret;
+ else if (cmd.status != CMD_STATUS_SUCCESS)
+ return -EIO;
+
+ memcpy(mem_map, &cmd.parameters, len);
+
+ return 0;
+}
+
+int wl12xx_acx_data_path_params(struct wl12xx *wl,
+ struct acx_data_path_params_resp *data_path)
+{
+ struct acx_data_path_params params;
+ struct wl12xx_command cmd;
+ int ret;
+
+ wl12xx_debug(DEBUG_ACX, "acx data path params");
+
+ params.rx_packet_ring_chunk_size = DP_RX_PACKET_RING_CHUNK_SIZE;
+ params.tx_packet_ring_chunk_size = DP_TX_PACKET_RING_CHUNK_SIZE;
+
+ params.rx_packet_ring_chunk_num = DP_RX_PACKET_RING_CHUNK_NUM;
+ params.tx_packet_ring_chunk_num = DP_TX_PACKET_RING_CHUNK_NUM;
+
+ params.tx_complete_threshold = 1;
+
+ params.tx_complete_ring_depth = FW_TX_CMPLT_BLOCK_SIZE;
+
+ params.tx_complete_timeout = DP_TX_COMPLETE_TIME_OUT;
+
+ params.header.id = ACX_DATA_PATH_PARAMS;
+ params.header.len = sizeof(params) - sizeof(struct acx_header);
+
+ ret = wl12xx_cmd_configure(wl, &params, sizeof(params));
+ if (ret < 0)
+ return ret;
+
+
+ ret = wl12xx_cmd_interrogate(wl, ACX_DATA_PATH_PARAMS,
+ sizeof(struct acx_data_path_params_resp),
+ &cmd);
+
+ if (ret < 0) {
+ wl12xx_warning("failed to read data path parameters: %d", ret);
+ return ret;
+ } else if (cmd.status != CMD_STATUS_SUCCESS) {
+ wl12xx_warning("data path parameter acx status failed");
+ return -EIO;
+ }
+
+ memcpy(data_path, &cmd.parameters, sizeof(*data_path));
+
+ return 0;
+}
+
+int wl12xx_acx_rx_msdu_life_time(struct wl12xx *wl, u32 life_time)
+{
+ struct rx_msdu_lifetime msdu_lifetime;
+ int ret;
+
+ wl12xx_debug(DEBUG_ACX, "acx rx msdu life time");
+
+ msdu_lifetime.header.id = DOT11_RX_MSDU_LIFE_TIME;
+ msdu_lifetime.header.len = sizeof(msdu_lifetime) -
+ sizeof(struct acx_header);
+ msdu_lifetime.lifetime = life_time;
+
+ ret = wl12xx_cmd_configure(wl, &msdu_lifetime, sizeof(msdu_lifetime));
+ if (ret < 0) {
+ wl12xx_warning("failed to set rx msdu life time: %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int wl12xx_acx_rx_config(struct wl12xx *wl, u32 config, u32 filter)
+{
+ struct acx_rx_config rx_config;
+ int ret;
+
+ wl12xx_debug(DEBUG_ACX, "acx rx config");
+
+ rx_config.header.id = ACX_RX_CFG;
+ rx_config.header.len = sizeof(rx_config) - sizeof(struct acx_header);
+ rx_config.config_options = config;
+ rx_config.filter_options = filter;
+
+ ret = wl12xx_cmd_configure(wl, &rx_config, sizeof(rx_config));
+ if (ret < 0) {
+ wl12xx_warning("failed to set rx config: %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int wl12xx_acx_pd_threshold(struct wl12xx *wl)
+{
+ struct acx_packet_detection packet_detection;
+ int ret;
+
+ wl12xx_debug(DEBUG_ACX, "acx data pd threshold");
+
+ /* FIXME: threshold value not set */
+ packet_detection.header.id = ACX_PD_THRESHOLD;
+ packet_detection.header.len = sizeof(packet_detection) -
+ sizeof(struct acx_header);
+
+ ret = wl12xx_cmd_configure(wl, &packet_detection,
+ sizeof(packet_detection));
+ if (ret < 0) {
+ wl12xx_warning("failed to set pd threshold: %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int wl12xx_acx_slot(struct wl12xx *wl, enum acx_slot_type slot_time)
+{
+ struct acx_slot slot;
+ int ret;
+
+ wl12xx_debug(DEBUG_ACX, "acx slot");
+
+ slot.header.id = ACX_SLOT;
+ slot.header.len = sizeof(slot) - sizeof(struct acx_header);
+
+ slot.wone_index = STATION_WONE_INDEX;
+ slot.slot_time = slot_time;
+
+ ret = wl12xx_cmd_configure(wl, &slot, sizeof(slot));
+ if (ret < 0) {
+ wl12xx_warning("failed to set slot time: %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int wl12xx_acx_group_address_tbl(struct wl12xx *wl)
+{
+ struct multicast_grp_addr_start multicast;
+ int ret;
+
+ wl12xx_debug(DEBUG_ACX, "acx group address tbl");
+
+ /* MAC filtering */
+ multicast.header.id = DOT11_GROUP_ADDRESS_TBL;
+ multicast.header.len = sizeof(multicast) - sizeof(struct acx_header);
+
+ multicast.enabled = 0;
+ multicast.num_groups = 0;
+ memset(multicast.mac_table, 0, ADDRESS_GROUP_MAX_LEN);
+
+ ret = wl12xx_cmd_configure(wl, &multicast, sizeof(multicast));
+ if (ret < 0) {
+ wl12xx_warning("failed to set group addr table: %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int wl12xx_acx_service_period_timeout(struct wl12xx *wl)
+{
+ struct acx_rx_timeout rx_timeout;
+ int ret;
+
+ wl12xx_debug(DEBUG_ACX, "acx service period timeout");
+
+ /* RX timeout */
+ rx_timeout.header.id = ACX_SERVICE_PERIOD_TIMEOUT;
+ rx_timeout.header.len = sizeof(rx_timeout) - sizeof(struct acx_header);
+
+ rx_timeout.ps_poll_timeout = RX_TIMEOUT_PS_POLL_DEF;
+ rx_timeout.upsd_timeout = RX_TIMEOUT_UPSD_DEF;
+
+ ret = wl12xx_cmd_configure(wl, &rx_timeout, sizeof(rx_timeout));
+ if (ret < 0) {
+ wl12xx_warning("failed to set service period timeout: %d",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int wl12xx_acx_rts_threshold(struct wl12xx *wl, u16 rts_threshold)
+{
+ struct acx_rts_threshold rts;
+ int ret;
+
+ wl12xx_debug(DEBUG_ACX, "acx rts threshold");
+
+ rts.header.id = DOT11_RTS_THRESHOLD;
+ rts.header.len = sizeof(rts) - sizeof(struct acx_header);
+
+ rts.threshold = rts_threshold;
+
+ ret = wl12xx_cmd_configure(wl, &rts, sizeof(rts));
+ if (ret < 0) {
+ wl12xx_warning("failed to set rts threshold: %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int wl12xx_acx_beacon_filter_opt(struct wl12xx *wl)
+{
+ struct acx_beacon_filter_option beacon_filter;
+ int ret;
+
+ wl12xx_debug(DEBUG_ACX, "acx beacon filter opt");
+
+ beacon_filter.header.id = ACX_BEACON_FILTER_OPT;
+ beacon_filter.header.len = sizeof(beacon_filter) -
+ sizeof(struct acx_header);
+
+ beacon_filter.enable = 0;
+ beacon_filter.max_num_beacons = 0;
+
+ ret = wl12xx_cmd_configure(wl, &beacon_filter, sizeof(beacon_filter));
+ if (ret < 0) {
+ wl12xx_warning("failed to set beacon filter opt: %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int wl12xx_acx_beacon_filter_table(struct wl12xx *wl)
+{
+ struct acx_beacon_filter_ie_table ie_table;
+ int ret;
+
+ wl12xx_debug(DEBUG_ACX, "acx beacon filter table");
+
+ ie_table.header.id = ACX_BEACON_FILTER_TABLE;
+ ie_table.header.len = sizeof(ie_table) - sizeof(struct acx_header);
+
+ ie_table.num_ie = 0;
+ memset(ie_table.table, 0, BEACON_FILTER_TABLE_MAX_SIZE);
+
+ ret = wl12xx_cmd_configure(wl, &ie_table, sizeof(ie_table));
+ if (ret < 0) {
+ wl12xx_warning("failed to set beacon filter table: %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int wl12xx_acx_sg_enable(struct wl12xx *wl)
+{
+ struct acx_bt_wlan_coex pta;
+ int ret;
+
+ wl12xx_debug(DEBUG_ACX, "acx sg enable");
+
+ pta.header.id = ACX_SG_ENABLE;
+ pta.header.len = sizeof(pta) - sizeof(struct acx_header);
+
+ pta.enable = SG_ENABLE;
+
+ ret = wl12xx_cmd_configure(wl, &pta, sizeof(pta));
+ if (ret < 0) {
+ wl12xx_warning("failed to set softgemini enable: %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int wl12xx_acx_sg_cfg(struct wl12xx *wl)
+{
+ struct acx_bt_wlan_coex_param param;
+ int ret;
+
+ wl12xx_debug(DEBUG_ACX, "acx sg cfg");
+
+ /* BT-WLAN coext parameters */
+ param.header.id = ACX_SG_CFG;
+ param.header.len = sizeof(param) - sizeof(struct acx_header);
+
+ param.min_rate = RATE_INDEX_24MBPS;
+ param.bt_hp_max_time = PTA_BT_HP_MAXTIME_DEF;
+ param.wlan_hp_max_time = PTA_WLAN_HP_MAX_TIME_DEF;
+ param.sense_disable_timer = PTA_SENSE_DISABLE_TIMER_DEF;
+ param.rx_time_bt_hp = PTA_PROTECTIVE_RX_TIME_DEF;
+ param.tx_time_bt_hp = PTA_PROTECTIVE_TX_TIME_DEF;
+ param.rx_time_bt_hp_fast = PTA_PROTECTIVE_RX_TIME_FAST_DEF;
+ param.tx_time_bt_hp_fast = PTA_PROTECTIVE_TX_TIME_FAST_DEF;
+ param.wlan_cycle_fast = PTA_CYCLE_TIME_FAST_DEF;
+ param.bt_anti_starvation_period = PTA_ANTI_STARVE_PERIOD_DEF;
+ param.next_bt_lp_packet = PTA_TIMEOUT_NEXT_BT_LP_PACKET_DEF;
+ param.wake_up_beacon = PTA_TIME_BEFORE_BEACON_DEF;
+ param.hp_dm_max_guard_time = PTA_HPDM_MAX_TIME_DEF;
+ param.next_wlan_packet = PTA_TIME_OUT_NEXT_WLAN_DEF;
+ param.antenna_type = PTA_ANTENNA_TYPE_DEF;
+ param.signal_type = PTA_SIGNALING_TYPE_DEF;
+ param.afh_leverage_on = PTA_AFH_LEVERAGE_ON_DEF;
+ param.quiet_cycle_num = PTA_NUMBER_QUIET_CYCLE_DEF;
+ param.max_cts = PTA_MAX_NUM_CTS_DEF;
+ param.wlan_packets_num = PTA_NUMBER_OF_WLAN_PACKETS_DEF;
+ param.bt_packets_num = PTA_NUMBER_OF_BT_PACKETS_DEF;
+ param.missed_rx_avalanche = PTA_RX_FOR_AVALANCHE_DEF;
+ param.wlan_elp_hp = PTA_ELP_HP_DEF;
+ param.bt_anti_starvation_cycles = PTA_ANTI_STARVE_NUM_CYCLE_DEF;
+ param.ack_mode_dual_ant = PTA_ACK_MODE_DEF;
+ param.pa_sd_enable = PTA_ALLOW_PA_SD_DEF;
+ param.pta_auto_mode_enable = PTA_AUTO_MODE_NO_CTS_DEF;
+ param.bt_hp_respected_num = PTA_BT_HP_RESPECTED_DEF;
+
+ ret = wl12xx_cmd_configure(wl, &param, sizeof(param));
+ if (ret < 0) {
+ wl12xx_warning("failed to set sg config: %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int wl12xx_acx_cca_threshold(struct wl12xx *wl)
+{
+ struct acx_energy_detection detection;
+ int ret;
+
+ wl12xx_debug(DEBUG_ACX, "acx cca threshold");
+
+ detection.header.id = ACX_CCA_THRESHOLD;
+ detection.header.len = sizeof(detection) - sizeof(struct acx_header);
+
+ detection.rx_cca_threshold = CCA_THRSH_DISABLE_ENERGY_D;
+ detection.tx_energy_detection = 0;
+
+ ret = wl12xx_cmd_configure(wl, &detection, sizeof(detection));
+ if (ret < 0) {
+ wl12xx_warning("failed to set cca threshold: %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int wl12xx_acx_bcn_dtim_options(struct wl12xx *wl)
+{
+ struct acx_beacon_broadcast bb;
+ int ret;
+
+ wl12xx_debug(DEBUG_ACX, "acx bcn dtim options");
+
+ bb.header.id = ACX_BCN_DTIM_OPTIONS;
+ bb.header.len = sizeof(bb) - sizeof(struct acx_header);
+
+ bb.beacon_rx_timeout = BCN_RX_TIMEOUT_DEF_VALUE;
+ bb.broadcast_timeout = BROADCAST_RX_TIMEOUT_DEF_VALUE;
+ bb.rx_broadcast_in_ps = RX_BROADCAST_IN_PS_DEF_VALUE;
+ bb.ps_poll_threshold = CONSECUTIVE_PS_POLL_FAILURE_DEF;
+
+ ret = wl12xx_cmd_configure(wl, &bb, sizeof(bb));
+ if (ret < 0) {
+ wl12xx_warning("failed to set rx config: %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int wl12xx_acx_aid(struct wl12xx *wl, u16 aid)
+{
+ struct acx_aid acx_aid;
+ int ret;
+
+ wl12xx_debug(DEBUG_ACX, "acx aid");
+
+ acx_aid.header.id = ACX_AID;
+ acx_aid.header.len = sizeof(acx_aid) - sizeof(struct acx_header);
+
+ acx_aid.aid = aid;
+
+ ret = wl12xx_cmd_configure(wl, &acx_aid, sizeof(acx_aid));
+ if (ret < 0) {
+ wl12xx_warning("failed to set aid: %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int wl12xx_acx_event_mbox_mask(struct wl12xx *wl, u32 event_mask)
+{
+ struct acx_event_mask mask;
+ int ret;
+
+ wl12xx_debug(DEBUG_ACX, "acx event mbox mask");
+
+ mask.header.id = ACX_EVENT_MBOX_MASK;
+ mask.header.len = sizeof(mask) - sizeof(struct acx_header);
+
+ /* high event mask is unused */
+ mask.high_event_mask = 0xffffffff;
+
+ mask.event_mask = event_mask;
+
+ ret = wl12xx_cmd_configure(wl, &mask, sizeof(mask));
+ if (ret < 0) {
+ wl12xx_warning("failed to set aid: %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int wl12xx_acx_set_preamble(struct wl12xx *wl, enum acx_preamble_type preamble)
+{
+ struct acx_preamble ie;
+ int ret;
+
+ wl12xx_debug(DEBUG_ACX, "acx_set_preamble");
+
+ memset(&ie, 0, sizeof(ie));
+
+ ie.header.id = ACX_PREAMBLE_TYPE;
+ ie.header.len = sizeof(ie) - sizeof(struct acx_header);
+ ie.preamble = preamble;
+ ret = wl12xx_cmd_configure(wl, &ie, sizeof(ie));
+ if (ret < 0) {
+ wl12xx_warning("Setting of preamble failed: %d", ret);
+ return ret;
+ }
+ return 0;
+}
+
+int wl12xx_acx_cts_protect(struct wl12xx *wl,
+ enum acx_ctsprotect_type ctsprotect)
+{
+ struct acx_ctsprotect ie;
+ int ret;
+
+ wl12xx_debug(DEBUG_ACX, "acx_set_ctsprotect");
+
+ memset(&ie, 0, sizeof(ie));
+
+ ie.header.id = ACX_CTS_PROTECTION;
+ ie.header.len = sizeof(ie) - sizeof(struct acx_header);
+ ie.ctsprotect = ctsprotect;
+ ret = wl12xx_cmd_configure(wl, &ie, sizeof(ie));
+ if (ret < 0) {
+ wl12xx_warning("Setting of ctsprotect failed: %d", ret);
+ return ret;
+ }
+ return 0;
+}
+
+int wl12xx_acx_statistics(struct wl12xx *wl, struct acx_statistics *stats)
+{
+ struct wl12xx_command *answer;
+ int ret;
+
+ wl12xx_debug(DEBUG_ACX, "acx statistics");
+
+ answer = kmalloc(sizeof(*answer), GFP_KERNEL);
+ if (!answer) {
+ wl12xx_warning("could not allocate memory for acx statistics");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = wl12xx_cmd_interrogate(wl, ACX_STATISTICS, sizeof(*answer),
+ answer);
+ if (ret < 0) {
+ wl12xx_warning("acx statistics failed: %d", ret);
+ goto out;
+ }
+
+ memcpy(stats, answer->parameters, sizeof(*stats));
+
+out:
+ kfree(answer);
+ return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/acx.h b/drivers/net/wireless/wl12xx/acx.h
new file mode 100644
index 00000000000..fb2d2340993
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/acx.h
@@ -0,0 +1,1245 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (c) 1998-2007 Texas Instruments Incorporated
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * Contact: Kalle Valo <kalle.valo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL12XX_ACX_H__
+#define __WL12XX_ACX_H__
+
+#include "wl12xx.h"
+
+/* Target's information element */
+struct acx_header {
+ u16 id;
+ u16 len;
+};
+
+struct acx_error_counter {
+ struct acx_header header;
+
+ /* The number of PLCP errors since the last time this */
+ /* information element was interrogated. This field is */
+ /* automatically cleared when it is interrogated.*/
+ u32 PLCP_error;
+
+ /* The number of FCS errors since the last time this */
+ /* information element was interrogated. This field is */
+ /* automatically cleared when it is interrogated.*/
+ u32 FCS_error;
+
+ /* The number of MPDUs without PLCP header errors received*/
+ /* since the last time this information element was interrogated. */
+ /* This field is automatically cleared when it is interrogated.*/
+ u32 valid_frame;
+
+ /* the number of missed sequence numbers in the squentially */
+ /* values of frames seq numbers */
+ u32 seq_num_miss;
+} __attribute__ ((packed));
+
+struct acx_revision {
+ struct acx_header header;
+
+ /*
+ * The WiLink firmware version, an ASCII string x.x.x.x,
+ * that uniquely identifies the current firmware.
+ * The left most digit is incremented each time a
+ * significant change is made to the firmware, such as
+ * code redesign or new platform support.
+ * The second digit is incremented when major enhancements
+ * are added or major fixes are made.
+ * The third digit is incremented for each GA release.
+ * The fourth digit is incremented for each build.
+ * The first two digits identify a firmware release version,
+ * in other words, a unique set of features.
+ * The first three digits identify a GA release.
+ */
+ char fw_version[20];
+
+ /*
+ * This 4 byte field specifies the WiLink hardware version.
+ * bits 0 - 15: Reserved.
+ * bits 16 - 23: Version ID - The WiLink version ID
+ * (1 = first spin, 2 = second spin, and so on).
+ * bits 24 - 31: Chip ID - The WiLink chip ID.
+ */
+ u32 hw_version;
+} __attribute__ ((packed));
+
+enum wl12xx_psm_mode {
+ /* Active mode */
+ WL12XX_PSM_CAM = 0,
+
+ /* Power save mode */
+ WL12XX_PSM_PS = 1,
+
+ /* Extreme low power */
+ WL12XX_PSM_ELP = 2,
+};
+
+struct acx_sleep_auth {
+ struct acx_header header;
+
+ /* The sleep level authorization of the device. */
+ /* 0 - Always active*/
+ /* 1 - Power down mode: light / fast sleep*/
+ /* 2 - ELP mode: Deep / Max sleep*/
+ u8 sleep_auth;
+ u8 padding[3];
+} __attribute__ ((packed));
+
+#define TIM_ELE_ID 5
+#define PARTIAL_VBM_MAX 251
+
+struct tim {
+ u8 identity;
+ u8 length;
+ u8 dtim_count;
+ u8 dtim_period;
+ u8 bitmap_ctrl;
+ u8 pvb_field[PARTIAL_VBM_MAX]; /* Partial Virtual Bitmap */
+} __attribute__ ((packed));
+
+/* Virtual Bit Map update */
+struct vbm_update_request {
+ __le16 len;
+ u8 padding[2];
+ struct tim tim;
+} __attribute__ ((packed));
+
+enum {
+ HOSTIF_PCI_MASTER_HOST_INDIRECT,
+ HOSTIF_PCI_MASTER_HOST_DIRECT,
+ HOSTIF_SLAVE,
+ HOSTIF_PKT_RING,
+ HOSTIF_DONTCARE = 0xFF
+};
+
+#define DEFAULT_UCAST_PRIORITY 0
+#define DEFAULT_RX_Q_PRIORITY 0
+#define DEFAULT_NUM_STATIONS 1
+#define DEFAULT_RXQ_PRIORITY 0 /* low 0 .. 15 high */
+#define DEFAULT_RXQ_TYPE 0x07 /* All frames, Data/Ctrl/Mgmt */
+#define TRACE_BUFFER_MAX_SIZE 256
+
+#define DP_RX_PACKET_RING_CHUNK_SIZE 1600
+#define DP_TX_PACKET_RING_CHUNK_SIZE 1600
+#define DP_RX_PACKET_RING_CHUNK_NUM 2
+#define DP_TX_PACKET_RING_CHUNK_NUM 2
+#define DP_TX_COMPLETE_TIME_OUT 20
+#define FW_TX_CMPLT_BLOCK_SIZE 16
+
+struct acx_data_path_params {
+ struct acx_header header;
+
+ u16 rx_packet_ring_chunk_size;
+ u16 tx_packet_ring_chunk_size;
+
+ u8 rx_packet_ring_chunk_num;
+ u8 tx_packet_ring_chunk_num;
+
+ /*
+ * Maximum number of packets that can be gathered
+ * in the TX complete ring before an interrupt
+ * is generated.
+ */
+ u8 tx_complete_threshold;
+
+ /* Number of pending TX complete entries in cyclic ring.*/
+ u8 tx_complete_ring_depth;
+
+ /*
+ * Max num microseconds since a packet enters the TX
+ * complete ring until an interrupt is generated.
+ */
+ u32 tx_complete_timeout;
+} __attribute__ ((packed));
+
+
+struct acx_data_path_params_resp {
+ struct acx_header header;
+
+ u16 rx_packet_ring_chunk_size;
+ u16 tx_packet_ring_chunk_size;
+
+ u8 rx_packet_ring_chunk_num;
+ u8 tx_packet_ring_chunk_num;
+
+ u8 pad[2];
+
+ u32 rx_packet_ring_addr;
+ u32 tx_packet_ring_addr;
+
+ u32 rx_control_addr;
+ u32 tx_control_addr;
+
+ u32 tx_complete_addr;
+} __attribute__ ((packed));
+
+#define TX_MSDU_LIFETIME_MIN 0
+#define TX_MSDU_LIFETIME_MAX 3000
+#define TX_MSDU_LIFETIME_DEF 512
+#define RX_MSDU_LIFETIME_MIN 0
+#define RX_MSDU_LIFETIME_MAX 0xFFFFFFFF
+#define RX_MSDU_LIFETIME_DEF 512000
+
+struct rx_msdu_lifetime {
+ struct acx_header header;
+
+ /*
+ * The maximum amount of time, in TU, before the
+ * firmware discards the MSDU.
+ */
+ u32 lifetime;
+} __attribute__ ((packed));
+
+/*
+ * RX Config Options Table
+ * Bit Definition
+ * === ==========
+ * 31:14 Reserved
+ * 13 Copy RX Status - when set, write three receive status words
+ * to top of rx'd MPDUs.
+ * When cleared, do not write three status words (added rev 1.5)
+ * 12 Reserved
+ * 11 RX Complete upon FCS error - when set, give rx complete
+ * interrupt for FCS errors, after the rx filtering, e.g. unicast
+ * frames not to us with FCS error will not generate an interrupt.
+ * 10 SSID Filter Enable - When set, the WiLink discards all beacon,
+ * probe request, and probe response frames with an SSID that does
+ * not match the SSID specified by the host in the START/JOIN
+ * command.
+ * When clear, the WiLink receives frames with any SSID.
+ * 9 Broadcast Filter Enable - When set, the WiLink discards all
+ * broadcast frames. When clear, the WiLink receives all received
+ * broadcast frames.
+ * 8:6 Reserved
+ * 5 BSSID Filter Enable - When set, the WiLink discards any frames
+ * with a BSSID that does not match the BSSID specified by the
+ * host.
+ * When clear, the WiLink receives frames from any BSSID.
+ * 4 MAC Addr Filter - When set, the WiLink discards any frames
+ * with a destination address that does not match the MAC address
+ * of the adaptor.
+ * When clear, the WiLink receives frames destined to any MAC
+ * address.
+ * 3 Promiscuous - When set, the WiLink receives all valid frames
+ * (i.e., all frames that pass the FCS check).
+ * When clear, only frames that pass the other filters specified
+ * are received.
+ * 2 FCS - When set, the WiLink includes the FCS with the received
+ * frame.
+ * When cleared, the FCS is discarded.
+ * 1 PLCP header - When set, write all data from baseband to frame
+ * buffer including PHY header.
+ * 0 Reserved - Always equal to 0.
+ *
+ * RX Filter Options Table
+ * Bit Definition
+ * === ==========
+ * 31:12 Reserved - Always equal to 0.
+ * 11 Association - When set, the WiLink receives all association
+ * related frames (association request/response, reassocation
+ * request/response, and disassociation). When clear, these frames
+ * are discarded.
+ * 10 Auth/De auth - When set, the WiLink receives all authentication
+ * and de-authentication frames. When clear, these frames are
+ * discarded.
+ * 9 Beacon - When set, the WiLink receives all beacon frames.
+ * When clear, these frames are discarded.
+ * 8 Contention Free - When set, the WiLink receives all contention
+ * free frames.
+ * When clear, these frames are discarded.
+ * 7 Control - When set, the WiLink receives all control frames.
+ * When clear, these frames are discarded.
+ * 6 Data - When set, the WiLink receives all data frames.
+ * When clear, these frames are discarded.
+ * 5 FCS Error - When set, the WiLink receives frames that have FCS
+ * errors.
+ * When clear, these frames are discarded.
+ * 4 Management - When set, the WiLink receives all management
+ * frames.
+ * When clear, these frames are discarded.
+ * 3 Probe Request - When set, the WiLink receives all probe request
+ * frames.
+ * When clear, these frames are discarded.
+ * 2 Probe Response - When set, the WiLink receives all probe
+ * response frames.
+ * When clear, these frames are discarded.
+ * 1 RTS/CTS/ACK - When set, the WiLink receives all RTS, CTS and ACK
+ * frames.
+ * When clear, these frames are discarded.
+ * 0 Rsvd Type/Sub Type - When set, the WiLink receives all frames
+ * that have reserved frame types and sub types as defined by the
+ * 802.11 specification.
+ * When clear, these frames are discarded.
+ */
+struct acx_rx_config {
+ struct acx_header header;
+
+ u32 config_options;
+ u32 filter_options;
+} __attribute__ ((packed));
+
+enum {
+ QOS_AC_BE = 0,
+ QOS_AC_BK,
+ QOS_AC_VI,
+ QOS_AC_VO,
+ QOS_HIGHEST_AC_INDEX = QOS_AC_VO,
+};
+
+#define MAX_NUM_OF_AC (QOS_HIGHEST_AC_INDEX+1)
+#define FIRST_AC_INDEX QOS_AC_BE
+#define MAX_NUM_OF_802_1d_TAGS 8
+#define AC_PARAMS_MAX_TSID 15
+#define MAX_APSD_CONF 0xffff
+
+#define QOS_TX_HIGH_MIN (0)
+#define QOS_TX_HIGH_MAX (100)
+
+#define QOS_TX_HIGH_BK_DEF (25)
+#define QOS_TX_HIGH_BE_DEF (35)
+#define QOS_TX_HIGH_VI_DEF (35)
+#define QOS_TX_HIGH_VO_DEF (35)
+
+#define QOS_TX_LOW_BK_DEF (15)
+#define QOS_TX_LOW_BE_DEF (25)
+#define QOS_TX_LOW_VI_DEF (25)
+#define QOS_TX_LOW_VO_DEF (25)
+
+struct acx_tx_queue_qos_config {
+ struct acx_header header;
+
+ u8 qid;
+ u8 pad[3];
+
+ /* Max number of blocks allowd in the queue */
+ u16 high_threshold;
+
+ /* Lowest memory blocks guaranteed for this queue */
+ u16 low_threshold;
+} __attribute__ ((packed));
+
+struct acx_packet_detection {
+ struct acx_header header;
+
+ u32 threshold;
+} __attribute__ ((packed));
+
+
+enum acx_slot_type {
+ SLOT_TIME_LONG = 0,
+ SLOT_TIME_SHORT = 1,
+ DEFAULT_SLOT_TIME = SLOT_TIME_SHORT,
+ MAX_SLOT_TIMES = 0xFF
+};
+
+#define STATION_WONE_INDEX 0
+
+struct acx_slot {
+ struct acx_header header;
+
+ u8 wone_index; /* Reserved */
+ u8 slot_time;
+ u8 reserved[6];
+} __attribute__ ((packed));
+
+
+#define ADDRESS_GROUP_MAX (8)
+#define ADDRESS_GROUP_MAX_LEN (ETH_ALEN * ADDRESS_GROUP_MAX)
+
+struct multicast_grp_addr_start {
+ struct acx_header header;
+
+ u8 enabled;
+ u8 num_groups;
+ u8 pad[2];
+ u8 mac_table[ADDRESS_GROUP_MAX_LEN];
+} __attribute__ ((packed));
+
+
+#define RX_TIMEOUT_PS_POLL_MIN 0
+#define RX_TIMEOUT_PS_POLL_MAX (200000)
+#define RX_TIMEOUT_PS_POLL_DEF (15)
+#define RX_TIMEOUT_UPSD_MIN 0
+#define RX_TIMEOUT_UPSD_MAX (200000)
+#define RX_TIMEOUT_UPSD_DEF (15)
+
+struct acx_rx_timeout {
+ struct acx_header header;
+
+ /*
+ * The longest time the STA will wait to receive
+ * traffic from the AP after a PS-poll has been
+ * transmitted.
+ */
+ u16 ps_poll_timeout;
+
+ /*
+ * The longest time the STA will wait to receive
+ * traffic from the AP after a frame has been sent
+ * from an UPSD enabled queue.
+ */
+ u16 upsd_timeout;
+} __attribute__ ((packed));
+
+#define RTS_THRESHOLD_MIN 0
+#define RTS_THRESHOLD_MAX 4096
+#define RTS_THRESHOLD_DEF 2347
+
+struct acx_rts_threshold {
+ struct acx_header header;
+
+ u16 threshold;
+ u8 pad[2];
+} __attribute__ ((packed));
+
+struct acx_beacon_filter_option {
+ struct acx_header header;
+
+ u8 enable;
+
+ /*
+ * The number of beacons without the unicast TIM
+ * bit set that the firmware buffers before
+ * signaling the host about ready frames.
+ * When set to 0 and the filter is enabled, beacons
+ * without the unicast TIM bit set are dropped.
+ */
+ u8 max_num_beacons;
+ u8 pad[2];
+} __attribute__ ((packed));
+
+/*
+ * ACXBeaconFilterEntry (not 221)
+ * Byte Offset Size (Bytes) Definition
+ * =========== ============ ==========
+ * 0 1 IE identifier
+ * 1 1 Treatment bit mask
+ *
+ * ACXBeaconFilterEntry (221)
+ * Byte Offset Size (Bytes) Definition
+ * =========== ============ ==========
+ * 0 1 IE identifier
+ * 1 1 Treatment bit mask
+ * 2 3 OUI
+ * 5 1 Type
+ * 6 2 Version
+ *
+ *
+ * Treatment bit mask - The information element handling:
+ * bit 0 - The information element is compared and transferred
+ * in case of change.
+ * bit 1 - The information element is transferred to the host
+ * with each appearance or disappearance.
+ * Note that both bits can be set at the same time.
+ */
+#define BEACON_FILTER_TABLE_MAX_IE_NUM (32)
+#define BEACON_FILTER_TABLE_MAX_VENDOR_SPECIFIC_IE_NUM (6)
+#define BEACON_FILTER_TABLE_IE_ENTRY_SIZE (2)
+#define BEACON_FILTER_TABLE_EXTRA_VENDOR_SPECIFIC_IE_SIZE (6)
+#define BEACON_FILTER_TABLE_MAX_SIZE ((BEACON_FILTER_TABLE_MAX_IE_NUM * \
+ BEACON_FILTER_TABLE_IE_ENTRY_SIZE) + \
+ (BEACON_FILTER_TABLE_MAX_VENDOR_SPECIFIC_IE_NUM * \
+ BEACON_FILTER_TABLE_EXTRA_VENDOR_SPECIFIC_IE_SIZE))
+
+struct acx_beacon_filter_ie_table {
+ struct acx_header header;
+
+ u8 num_ie;
+ u8 table[BEACON_FILTER_TABLE_MAX_SIZE];
+ u8 pad[3];
+} __attribute__ ((packed));
+
+enum {
+ SG_ENABLE = 0,
+ SG_DISABLE,
+ SG_SENSE_NO_ACTIVITY,
+ SG_SENSE_ACTIVE
+};
+
+struct acx_bt_wlan_coex {
+ struct acx_header header;
+
+ /*
+ * 0 -> PTA enabled
+ * 1 -> PTA disabled
+ * 2 -> sense no active mode, i.e.
+ * an interrupt is sent upon
+ * BT activity.
+ * 3 -> PTA is switched on in response
+ * to the interrupt sending.
+ */
+ u8 enable;
+ u8 pad[3];
+} __attribute__ ((packed));
+
+#define PTA_ANTENNA_TYPE_DEF (0)
+#define PTA_BT_HP_MAXTIME_DEF (2000)
+#define PTA_WLAN_HP_MAX_TIME_DEF (5000)
+#define PTA_SENSE_DISABLE_TIMER_DEF (1350)
+#define PTA_PROTECTIVE_RX_TIME_DEF (1500)
+#define PTA_PROTECTIVE_TX_TIME_DEF (1500)
+#define PTA_TIMEOUT_NEXT_BT_LP_PACKET_DEF (3000)
+#define PTA_SIGNALING_TYPE_DEF (1)
+#define PTA_AFH_LEVERAGE_ON_DEF (0)
+#define PTA_NUMBER_QUIET_CYCLE_DEF (0)
+#define PTA_MAX_NUM_CTS_DEF (3)
+#define PTA_NUMBER_OF_WLAN_PACKETS_DEF (2)
+#define PTA_NUMBER_OF_BT_PACKETS_DEF (2)
+#define PTA_PROTECTIVE_RX_TIME_FAST_DEF (1500)
+#define PTA_PROTECTIVE_TX_TIME_FAST_DEF (3000)
+#define PTA_CYCLE_TIME_FAST_DEF (8700)
+#define PTA_RX_FOR_AVALANCHE_DEF (5)
+#define PTA_ELP_HP_DEF (0)
+#define PTA_ANTI_STARVE_PERIOD_DEF (500)
+#define PTA_ANTI_STARVE_NUM_CYCLE_DEF (4)
+#define PTA_ALLOW_PA_SD_DEF (1)
+#define PTA_TIME_BEFORE_BEACON_DEF (6300)
+#define PTA_HPDM_MAX_TIME_DEF (1600)
+#define PTA_TIME_OUT_NEXT_WLAN_DEF (2550)
+#define PTA_AUTO_MODE_NO_CTS_DEF (0)
+#define PTA_BT_HP_RESPECTED_DEF (3)
+#define PTA_WLAN_RX_MIN_RATE_DEF (24)
+#define PTA_ACK_MODE_DEF (1)
+
+struct acx_bt_wlan_coex_param {
+ struct acx_header header;
+
+ /*
+ * The minimum rate of a received WLAN packet in the STA,
+ * during protective mode, of which a new BT-HP request
+ * during this Rx will always be respected and gain the antenna.
+ */
+ u32 min_rate;
+
+ /* Max time the BT HP will be respected. */
+ u16 bt_hp_max_time;
+
+ /* Max time the WLAN HP will be respected. */
+ u16 wlan_hp_max_time;
+
+ /*
+ * The time between the last BT activity
+ * and the moment when the sense mode returns
+ * to SENSE_INACTIVE.
+ */
+ u16 sense_disable_timer;
+
+ /* Time before the next BT HP instance */
+ u16 rx_time_bt_hp;
+ u16 tx_time_bt_hp;
+
+ /* range: 10-20000 default: 1500 */
+ u16 rx_time_bt_hp_fast;
+ u16 tx_time_bt_hp_fast;
+
+ /* range: 2000-65535 default: 8700 */
+ u16 wlan_cycle_fast;
+
+ /* range: 0 - 15000 (Msec) default: 1000 */
+ u16 bt_anti_starvation_period;
+
+ /* range 400-10000(Usec) default: 3000 */
+ u16 next_bt_lp_packet;
+
+ /* Deafult: worst case for BT DH5 traffic */
+ u16 wake_up_beacon;
+
+ /* range: 0-50000(Usec) default: 1050 */
+ u16 hp_dm_max_guard_time;
+
+ /*
+ * This is to prevent both BT & WLAN antenna
+ * starvation.
+ * Range: 100-50000(Usec) default:2550
+ */
+ u16 next_wlan_packet;
+
+ /* 0 -> shared antenna */
+ u8 antenna_type;
+
+ /*
+ * 0 -> TI legacy
+ * 1 -> Palau
+ */
+ u8 signal_type;
+
+ /*
+ * BT AFH status
+ * 0 -> no AFH
+ * 1 -> from dedicated GPIO
+ * 2 -> AFH on (from host)
+ */
+ u8 afh_leverage_on;
+
+ /*
+ * The number of cycles during which no
+ * TX will be sent after 1 cycle of RX
+ * transaction in protective mode
+ */
+ u8 quiet_cycle_num;
+
+ /*
+ * The maximum number of CTSs that will
+ * be sent for receiving RX packet in
+ * protective mode
+ */
+ u8 max_cts;
+
+ /*
+ * The number of WLAN packets
+ * transferred in common mode before
+ * switching to BT.
+ */
+ u8 wlan_packets_num;
+
+ /*
+ * The number of BT packets
+ * transferred in common mode before
+ * switching to WLAN.
+ */
+ u8 bt_packets_num;
+
+ /* range: 1-255 default: 5 */
+ u8 missed_rx_avalanche;
+
+ /* range: 0-1 default: 1 */
+ u8 wlan_elp_hp;
+
+ /* range: 0 - 15 default: 4 */
+ u8 bt_anti_starvation_cycles;
+
+ u8 ack_mode_dual_ant;
+
+ /*
+ * Allow PA_SD assertion/de-assertion
+ * during enabled BT activity.
+ */
+ u8 pa_sd_enable;
+
+ /*
+ * Enable/Disable PTA in auto mode:
+ * Support Both Active & P.S modes
+ */
+ u8 pta_auto_mode_enable;
+
+ /* range: 0 - 20 default: 1 */
+ u8 bt_hp_respected_num;
+} __attribute__ ((packed));
+
+#define CCA_THRSH_ENABLE_ENERGY_D 0x140A
+#define CCA_THRSH_DISABLE_ENERGY_D 0xFFEF
+
+struct acx_energy_detection {
+ struct acx_header header;
+
+ /* The RX Clear Channel Assessment threshold in the PHY */
+ u16 rx_cca_threshold;
+ u8 tx_energy_detection;
+ u8 pad;
+} __attribute__ ((packed));
+
+#define BCN_RX_TIMEOUT_DEF_VALUE 10000
+#define BROADCAST_RX_TIMEOUT_DEF_VALUE 20000
+#define RX_BROADCAST_IN_PS_DEF_VALUE 1
+#define CONSECUTIVE_PS_POLL_FAILURE_DEF 4
+
+struct acx_beacon_broadcast {
+ struct acx_header header;
+
+ u16 beacon_rx_timeout;
+ u16 broadcast_timeout;
+
+ /* Enables receiving of broadcast packets in PS mode */
+ u8 rx_broadcast_in_ps;
+
+ /* Consecutive PS Poll failures before updating the host */
+ u8 ps_poll_threshold;
+ u8 pad[2];
+} __attribute__ ((packed));
+
+struct acx_event_mask {
+ struct acx_header header;
+
+ u32 event_mask;
+ u32 high_event_mask; /* Unused */
+} __attribute__ ((packed));
+
+#define CFG_RX_FCS BIT(2)
+#define CFG_RX_ALL_GOOD BIT(3)
+#define CFG_UNI_FILTER_EN BIT(4)
+#define CFG_BSSID_FILTER_EN BIT(5)
+#define CFG_MC_FILTER_EN BIT(6)
+#define CFG_MC_ADDR0_EN BIT(7)
+#define CFG_MC_ADDR1_EN BIT(8)
+#define CFG_BC_REJECT_EN BIT(9)
+#define CFG_SSID_FILTER_EN BIT(10)
+#define CFG_RX_INT_FCS_ERROR BIT(11)
+#define CFG_RX_INT_ENCRYPTED BIT(12)
+#define CFG_RX_WR_RX_STATUS BIT(13)
+#define CFG_RX_FILTER_NULTI BIT(14)
+#define CFG_RX_RESERVE BIT(15)
+#define CFG_RX_TIMESTAMP_TSF BIT(16)
+
+#define CFG_RX_RSV_EN BIT(0)
+#define CFG_RX_RCTS_ACK BIT(1)
+#define CFG_RX_PRSP_EN BIT(2)
+#define CFG_RX_PREQ_EN BIT(3)
+#define CFG_RX_MGMT_EN BIT(4)
+#define CFG_RX_FCS_ERROR BIT(5)
+#define CFG_RX_DATA_EN BIT(6)
+#define CFG_RX_CTL_EN BIT(7)
+#define CFG_RX_CF_EN BIT(8)
+#define CFG_RX_BCN_EN BIT(9)
+#define CFG_RX_AUTH_EN BIT(10)
+#define CFG_RX_ASSOC_EN BIT(11)
+
+#define SCAN_PASSIVE BIT(0)
+#define SCAN_5GHZ_BAND BIT(1)
+#define SCAN_TRIGGERED BIT(2)
+#define SCAN_PRIORITY_HIGH BIT(3)
+
+struct acx_fw_gen_frame_rates {
+ struct acx_header header;
+
+ u8 tx_ctrl_frame_rate; /* RATE_* */
+ u8 tx_ctrl_frame_mod; /* CCK_* or PBCC_* */
+ u8 tx_mgt_frame_rate;
+ u8 tx_mgt_frame_mod;
+} __attribute__ ((packed));
+
+/* STA MAC */
+struct dot11_station_id {
+ struct acx_header header;
+
+ u8 mac[ETH_ALEN];
+ u8 pad[2];
+} __attribute__ ((packed));
+
+/* HW encryption keys */
+#define NUM_ACCESS_CATEGORIES_COPY 4
+#define MAX_KEY_SIZE 32
+
+/* When set, disable HW encryption */
+#define DF_ENCRYPTION_DISABLE 0x01
+/* When set, disable HW decryption */
+#define DF_SNIFF_MODE_ENABLE 0x80
+
+struct acx_feature_config {
+ struct acx_header header;
+
+ u32 options;
+ u32 data_flow_options;
+} __attribute__ ((packed));
+
+enum acx_key_action {
+ KEY_ADD_OR_REPLACE = 1,
+ KEY_REMOVE = 2,
+ KEY_SET_ID = 3,
+ MAX_KEY_ACTION = 0xffff,
+};
+
+enum acx_key_type {
+ KEY_WEP_DEFAULT = 0,
+ KEY_WEP_ADDR = 1,
+ KEY_AES_GROUP = 4,
+ KEY_AES_PAIRWISE = 5,
+ KEY_WEP_GROUP = 6,
+ KEY_TKIP_MIC_GROUP = 10,
+ KEY_TKIP_MIC_PAIRWISE = 11,
+};
+
+/*
+ *
+ * key_type_e key size key format
+ * ---------- --------- ----------
+ * 0x00 5, 13, 29 Key data
+ * 0x01 5, 13, 29 Key data
+ * 0x04 16 16 bytes of key data
+ * 0x05 16 16 bytes of key data
+ * 0x0a 32 16 bytes of TKIP key data
+ * 8 bytes of RX MIC key data
+ * 8 bytes of TX MIC key data
+ * 0x0b 32 16 bytes of TKIP key data
+ * 8 bytes of RX MIC key data
+ * 8 bytes of TX MIC key data
+ *
+ */
+
+struct acx_set_key {
+ /* Ignored for default WEP key */
+ u8 addr[ETH_ALEN];
+
+ /* key_action_e */
+ u16 key_action;
+
+ u16 reserved_1;
+
+ /* key size in bytes */
+ u8 key_size;
+
+ /* key_type_e */
+ u8 key_type;
+ u8 ssid_profile;
+
+ /*
+ * TKIP, AES: frame's key id field.
+ * For WEP default key: key id;
+ */
+ u8 id;
+ u8 reserved_2[6];
+ u8 key[MAX_KEY_SIZE];
+ u16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY];
+ u32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
+} __attribute__ ((packed));
+
+struct acx_current_tx_power {
+ struct acx_header header;
+
+ u8 current_tx_power;
+ u8 padding[3];
+} __attribute__ ((packed));
+
+struct acx_dot11_default_key {
+ struct acx_header header;
+
+ u8 id;
+ u8 pad[3];
+} __attribute__ ((packed));
+
+struct acx_tsf_info {
+ struct acx_header header;
+
+ u32 current_tsf_msb;
+ u32 current_tsf_lsb;
+ u32 last_TBTT_msb;
+ u32 last_TBTT_lsb;
+ u8 last_dtim_count;
+ u8 pad[3];
+} __attribute__ ((packed));
+
+/* 802.11 PS */
+enum acx_ps_mode {
+ STATION_ACTIVE_MODE,
+ STATION_POWER_SAVE_MODE
+};
+
+struct acx_ps_params {
+ u8 ps_mode; /* STATION_* */
+ u8 send_null_data; /* Do we have to send NULL data packet ? */
+ u8 retries; /* Number of retires for the initial NULL data packet */
+
+ /*
+ * TUs during which the target stays awake after switching
+ * to power save mode.
+ */
+ u8 hang_over_period;
+ u16 null_data_rate;
+ u8 pad[2];
+} __attribute__ ((packed));
+
+enum acx_wake_up_event {
+ WAKE_UP_EVENT_BEACON_BITMAP = 0x01, /* Wake on every Beacon*/
+ WAKE_UP_EVENT_DTIM_BITMAP = 0x02, /* Wake on every DTIM*/
+ WAKE_UP_EVENT_N_DTIM_BITMAP = 0x04, /* Wake on every Nth DTIM */
+ WAKE_UP_EVENT_N_BEACONS_BITMAP = 0x08, /* Wake on every Nth Beacon */
+ WAKE_UP_EVENT_BITS_MASK = 0x0F
+};
+
+struct acx_wake_up_condition {
+ struct acx_header header;
+
+ u8 wake_up_event; /* Only one bit can be set */
+ u8 listen_interval;
+ u8 pad[2];
+} __attribute__ ((packed));
+
+struct acx_aid {
+ struct acx_header header;
+
+ /*
+ * To be set when associated with an AP.
+ */
+ u16 aid;
+ u8 pad[2];
+} __attribute__ ((packed));
+
+enum acx_preamble_type {
+ ACX_PREAMBLE_LONG = 0,
+ ACX_PREAMBLE_SHORT = 1
+};
+
+struct acx_preamble {
+ struct acx_header header;
+ /*
+ * When set, the WiLink transmits the frames with a short preamble and
+ * when cleared, the WiLink transmits the frames with a long preamble.
+ */
+ u8 preamble;
+ u8 padding[3];
+} __attribute__ ((packed));
+
+enum acx_ctsprotect_type {
+ CTSPROTECT_DISABLE = 0,
+ CTSPROTECT_ENABLE = 1
+};
+
+struct acx_ctsprotect {
+ struct acx_header header;
+ u8 ctsprotect;
+ u8 padding[3];
+} __attribute__ ((packed));
+
+struct acx_tx_statistics {
+ u32 internal_desc_overflow;
+} __attribute__ ((packed));
+
+struct acx_rx_statistics {
+ u32 out_of_mem;
+ u32 hdr_overflow;
+ u32 hw_stuck;
+ u32 dropped;
+ u32 fcs_err;
+ u32 xfr_hint_trig;
+ u32 path_reset;
+ u32 reset_counter;
+} __attribute__ ((packed));
+
+struct acx_dma_statistics {
+ u32 rx_requested;
+ u32 rx_errors;
+ u32 tx_requested;
+ u32 tx_errors;
+} __attribute__ ((packed));
+
+struct acx_isr_statistics {
+ /* host command complete */
+ u32 cmd_cmplt;
+
+ /* fiqisr() */
+ u32 fiqs;
+
+ /* (INT_STS_ND & INT_TRIG_RX_HEADER) */
+ u32 rx_headers;
+
+ /* (INT_STS_ND & INT_TRIG_RX_CMPLT) */
+ u32 rx_completes;
+
+ /* (INT_STS_ND & INT_TRIG_NO_RX_BUF) */
+ u32 rx_mem_overflow;
+
+ /* (INT_STS_ND & INT_TRIG_S_RX_RDY) */
+ u32 rx_rdys;
+
+ /* irqisr() */
+ u32 irqs;
+
+ /* (INT_STS_ND & INT_TRIG_TX_PROC) */
+ u32 tx_procs;
+
+ /* (INT_STS_ND & INT_TRIG_DECRYPT_DONE) */
+ u32 decrypt_done;
+
+ /* (INT_STS_ND & INT_TRIG_DMA0) */
+ u32 dma0_done;
+
+ /* (INT_STS_ND & INT_TRIG_DMA1) */
+ u32 dma1_done;
+
+ /* (INT_STS_ND & INT_TRIG_TX_EXC_CMPLT) */
+ u32 tx_exch_complete;
+
+ /* (INT_STS_ND & INT_TRIG_COMMAND) */
+ u32 commands;
+
+ /* (INT_STS_ND & INT_TRIG_RX_PROC) */
+ u32 rx_procs;
+
+ /* (INT_STS_ND & INT_TRIG_PM_802) */
+ u32 hw_pm_mode_changes;
+
+ /* (INT_STS_ND & INT_TRIG_ACKNOWLEDGE) */
+ u32 host_acknowledges;
+
+ /* (INT_STS_ND & INT_TRIG_PM_PCI) */
+ u32 pci_pm;
+
+ /* (INT_STS_ND & INT_TRIG_ACM_WAKEUP) */
+ u32 wakeups;
+
+ /* (INT_STS_ND & INT_TRIG_LOW_RSSI) */
+ u32 low_rssi;
+} __attribute__ ((packed));
+
+struct acx_wep_statistics {
+ /* WEP address keys configured */
+ u32 addr_key_count;
+
+ /* default keys configured */
+ u32 default_key_count;
+
+ u32 reserved;
+
+ /* number of times that WEP key not found on lookup */
+ u32 key_not_found;
+
+ /* number of times that WEP key decryption failed */
+ u32 decrypt_fail;
+
+ /* WEP packets decrypted */
+ u32 packets;
+
+ /* WEP decrypt interrupts */
+ u32 interrupt;
+} __attribute__ ((packed));
+
+#define ACX_MISSED_BEACONS_SPREAD 10
+
+struct acx_pwr_statistics {
+ /* the amount of enters into power save mode (both PD & ELP) */
+ u32 ps_enter;
+
+ /* the amount of enters into ELP mode */
+ u32 elp_enter;
+
+ /* the amount of missing beacon interrupts to the host */
+ u32 missing_bcns;
+
+ /* the amount of wake on host-access times */
+ u32 wake_on_host;
+
+ /* the amount of wake on timer-expire */
+ u32 wake_on_timer_exp;
+
+ /* the number of packets that were transmitted with PS bit set */
+ u32 tx_with_ps;
+
+ /* the number of packets that were transmitted with PS bit clear */
+ u32 tx_without_ps;
+
+ /* the number of received beacons */
+ u32 rcvd_beacons;
+
+ /* the number of entering into PowerOn (power save off) */
+ u32 power_save_off;
+
+ /* the number of entries into power save mode */
+ u16 enable_ps;
+
+ /*
+ * the number of exits from power save, not including failed PS
+ * transitions
+ */
+ u16 disable_ps;
+
+ /*
+ * the number of times the TSF counter was adjusted because
+ * of drift
+ */
+ u32 fix_tsf_ps;
+
+ /* Gives statistics about the spread continuous missed beacons.
+ * The 16 LSB are dedicated for the PS mode.
+ * The 16 MSB are dedicated for the PS mode.
+ * cont_miss_bcns_spread[0] - single missed beacon.
+ * cont_miss_bcns_spread[1] - two continuous missed beacons.
+ * cont_miss_bcns_spread[2] - three continuous missed beacons.
+ * ...
+ * cont_miss_bcns_spread[9] - ten and more continuous missed beacons.
+ */
+ u32 cont_miss_bcns_spread[ACX_MISSED_BEACONS_SPREAD];
+
+ /* the number of beacons in awake mode */
+ u32 rcvd_awake_beacons;
+} __attribute__ ((packed));
+
+struct acx_mic_statistics {
+ u32 rx_pkts;
+ u32 calc_failure;
+} __attribute__ ((packed));
+
+struct acx_aes_statistics {
+ u32 encrypt_fail;
+ u32 decrypt_fail;
+ u32 encrypt_packets;
+ u32 decrypt_packets;
+ u32 encrypt_interrupt;
+ u32 decrypt_interrupt;
+} __attribute__ ((packed));
+
+struct acx_event_statistics {
+ u32 heart_beat;
+ u32 calibration;
+ u32 rx_mismatch;
+ u32 rx_mem_empty;
+ u32 rx_pool;
+ u32 oom_late;
+ u32 phy_transmit_error;
+ u32 tx_stuck;
+} __attribute__ ((packed));
+
+struct acx_ps_statistics {
+ u32 pspoll_timeouts;
+ u32 upsd_timeouts;
+ u32 upsd_max_sptime;
+ u32 upsd_max_apturn;
+ u32 pspoll_max_apturn;
+ u32 pspoll_utilization;
+ u32 upsd_utilization;
+} __attribute__ ((packed));
+
+struct acx_rxpipe_statistics {
+ u32 rx_prep_beacon_drop;
+ u32 descr_host_int_trig_rx_data;
+ u32 beacon_buffer_thres_host_int_trig_rx_data;
+ u32 missed_beacon_host_int_trig_rx_data;
+ u32 tx_xfr_host_int_trig_rx_data;
+} __attribute__ ((packed));
+
+struct acx_statistics {
+ struct acx_header header;
+
+ struct acx_tx_statistics tx;
+ struct acx_rx_statistics rx;
+ struct acx_dma_statistics dma;
+ struct acx_isr_statistics isr;
+ struct acx_wep_statistics wep;
+ struct acx_pwr_statistics pwr;
+ struct acx_aes_statistics aes;
+ struct acx_mic_statistics mic;
+ struct acx_event_statistics event;
+ struct acx_ps_statistics ps;
+ struct acx_rxpipe_statistics rxpipe;
+} __attribute__ ((packed));
+
+enum {
+ ACX_WAKE_UP_CONDITIONS = 0x0002,
+ ACX_MEM_CFG = 0x0003,
+ ACX_SLOT = 0x0004,
+ ACX_QUEUE_HEAD = 0x0005, /* for MASTER mode only */
+ ACX_AC_CFG = 0x0007,
+ ACX_MEM_MAP = 0x0008,
+ ACX_AID = 0x000A,
+ ACX_RADIO_PARAM = 0x000B, /* Not used */
+ ACX_CFG = 0x000C, /* Not used */
+ ACX_FW_REV = 0x000D,
+ ACX_MEDIUM_USAGE = 0x000F,
+ ACX_RX_CFG = 0x0010,
+ ACX_TX_QUEUE_CFG = 0x0011, /* FIXME: only used by wl1251 */
+ ACX_BSS_IN_PS = 0x0012, /* for AP only */
+ ACX_STATISTICS = 0x0013, /* Debug API */
+ ACX_FEATURE_CFG = 0x0015,
+ ACX_MISC_CFG = 0x0017, /* Not used */
+ ACX_TID_CFG = 0x001A,
+ ACX_BEACON_FILTER_OPT = 0x001F,
+ ACX_LOW_RSSI = 0x0020,
+ ACX_NOISE_HIST = 0x0021,
+ ACX_HDK_VERSION = 0x0022, /* ??? */
+ ACX_PD_THRESHOLD = 0x0023,
+ ACX_DATA_PATH_PARAMS = 0x0024, /* WO */
+ ACX_DATA_PATH_RESP_PARAMS = 0x0024, /* RO */
+ ACX_CCA_THRESHOLD = 0x0025,
+ ACX_EVENT_MBOX_MASK = 0x0026,
+#ifdef FW_RUNNING_AS_AP
+ ACX_DTIM_PERIOD = 0x0027, /* for AP only */
+#else
+ ACX_WR_TBTT_AND_DTIM = 0x0027, /* STA only */
+#endif
+ ACX_ACI_OPTION_CFG = 0x0029, /* OBSOLETE (for 1251)*/
+ ACX_GPIO_CFG = 0x002A, /* Not used */
+ ACX_GPIO_SET = 0x002B, /* Not used */
+ ACX_PM_CFG = 0x002C, /* To Be Documented */
+ ACX_CONN_MONIT_PARAMS = 0x002D,
+ ACX_AVERAGE_RSSI = 0x002E, /* Not used */
+ ACX_CONS_TX_FAILURE = 0x002F,
+ ACX_BCN_DTIM_OPTIONS = 0x0031,
+ ACX_SG_ENABLE = 0x0032,
+ ACX_SG_CFG = 0x0033,
+ ACX_ANTENNA_DIVERSITY_CFG = 0x0035, /* To Be Documented */
+ ACX_LOW_SNR = 0x0037, /* To Be Documented */
+ ACX_BEACON_FILTER_TABLE = 0x0038,
+ ACX_ARP_IP_FILTER = 0x0039,
+ ACX_ROAMING_STATISTICS_TBL = 0x003B,
+ ACX_RATE_POLICY = 0x003D,
+ ACX_CTS_PROTECTION = 0x003E,
+ ACX_SLEEP_AUTH = 0x003F,
+ ACX_PREAMBLE_TYPE = 0x0040,
+ ACX_ERROR_CNT = 0x0041,
+ ACX_FW_GEN_FRAME_RATES = 0x0042,
+ ACX_IBSS_FILTER = 0x0044,
+ ACX_SERVICE_PERIOD_TIMEOUT = 0x0045,
+ ACX_TSF_INFO = 0x0046,
+ ACX_CONFIG_PS_WMM = 0x0049,
+ ACX_ENABLE_RX_DATA_FILTER = 0x004A,
+ ACX_SET_RX_DATA_FILTER = 0x004B,
+ ACX_GET_DATA_FILTER_STATISTICS = 0x004C,
+ ACX_POWER_LEVEL_TABLE = 0x004D,
+ ACX_BET_ENABLE = 0x0050,
+ DOT11_STATION_ID = 0x1001,
+ DOT11_RX_MSDU_LIFE_TIME = 0x1004,
+ DOT11_CUR_TX_PWR = 0x100D,
+ DOT11_DEFAULT_KEY = 0x1010,
+ DOT11_RX_DOT11_MODE = 0x1012,
+ DOT11_RTS_THRESHOLD = 0x1013,
+ DOT11_GROUP_ADDRESS_TBL = 0x1014,
+
+ MAX_DOT11_IE = DOT11_GROUP_ADDRESS_TBL,
+
+ MAX_IE = 0xFFFF
+};
+
+
+int wl12xx_acx_frame_rates(struct wl12xx *wl, u8 ctrl_rate, u8 ctrl_mod,
+ u8 mgt_rate, u8 mgt_mod);
+int wl12xx_acx_station_id(struct wl12xx *wl);
+int wl12xx_acx_default_key(struct wl12xx *wl, u8 key_id);
+int wl12xx_acx_wake_up_conditions(struct wl12xx *wl, u8 listen_interval);
+int wl12xx_acx_sleep_auth(struct wl12xx *wl, u8 sleep_auth);
+int wl12xx_acx_fw_version(struct wl12xx *wl, char *buf, size_t len);
+int wl12xx_acx_tx_power(struct wl12xx *wl, int power);
+int wl12xx_acx_feature_cfg(struct wl12xx *wl);
+int wl12xx_acx_mem_map(struct wl12xx *wl, void *mem_map, size_t len);
+int wl12xx_acx_data_path_params(struct wl12xx *wl,
+ struct acx_data_path_params_resp *data_path);
+int wl12xx_acx_rx_msdu_life_time(struct wl12xx *wl, u32 life_time);
+int wl12xx_acx_rx_config(struct wl12xx *wl, u32 config, u32 filter);
+int wl12xx_acx_pd_threshold(struct wl12xx *wl);
+int wl12xx_acx_slot(struct wl12xx *wl, enum acx_slot_type slot_time);
+int wl12xx_acx_group_address_tbl(struct wl12xx *wl);
+int wl12xx_acx_service_period_timeout(struct wl12xx *wl);
+int wl12xx_acx_rts_threshold(struct wl12xx *wl, u16 rts_threshold);
+int wl12xx_acx_beacon_filter_opt(struct wl12xx *wl);
+int wl12xx_acx_beacon_filter_table(struct wl12xx *wl);
+int wl12xx_acx_sg_enable(struct wl12xx *wl);
+int wl12xx_acx_sg_cfg(struct wl12xx *wl);
+int wl12xx_acx_cca_threshold(struct wl12xx *wl);
+int wl12xx_acx_bcn_dtim_options(struct wl12xx *wl);
+int wl12xx_acx_aid(struct wl12xx *wl, u16 aid);
+int wl12xx_acx_event_mbox_mask(struct wl12xx *wl, u32 event_mask);
+int wl12xx_acx_set_preamble(struct wl12xx *wl, enum acx_preamble_type preamble);
+int wl12xx_acx_cts_protect(struct wl12xx *wl,
+ enum acx_ctsprotect_type ctsprotect);
+int wl12xx_acx_statistics(struct wl12xx *wl, struct acx_statistics *stats);
+
+#endif /* __WL12XX_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/boot.c b/drivers/net/wireless/wl12xx/boot.c
new file mode 100644
index 00000000000..48ac08c429b
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/boot.c
@@ -0,0 +1,295 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * Contact: Kalle Valo <kalle.valo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/gpio.h>
+
+#include "reg.h"
+#include "boot.h"
+#include "spi.h"
+#include "event.h"
+
+static void wl12xx_boot_enable_interrupts(struct wl12xx *wl)
+{
+ enable_irq(wl->irq);
+}
+
+void wl12xx_boot_target_enable_interrupts(struct wl12xx *wl)
+{
+ wl12xx_reg_write32(wl, ACX_REG_INTERRUPT_MASK, ~(wl->intr_mask));
+ wl12xx_reg_write32(wl, HI_CFG, HI_CFG_DEF_VAL);
+}
+
+int wl12xx_boot_soft_reset(struct wl12xx *wl)
+{
+ unsigned long timeout;
+ u32 boot_data;
+
+ /* perform soft reset */
+ wl12xx_reg_write32(wl, ACX_REG_SLV_SOFT_RESET, ACX_SLV_SOFT_RESET_BIT);
+
+ /* SOFT_RESET is self clearing */
+ timeout = jiffies + usecs_to_jiffies(SOFT_RESET_MAX_TIME);
+ while (1) {
+ boot_data = wl12xx_reg_read32(wl, ACX_REG_SLV_SOFT_RESET);
+ wl12xx_debug(DEBUG_BOOT, "soft reset bootdata 0x%x", boot_data);
+ if ((boot_data & ACX_SLV_SOFT_RESET_BIT) == 0)
+ break;
+
+ if (time_after(jiffies, timeout)) {
+ /* 1.2 check pWhalBus->uSelfClearTime if the
+ * timeout was reached */
+ wl12xx_error("soft reset timeout");
+ return -1;
+ }
+
+ udelay(SOFT_RESET_STALL_TIME);
+ }
+
+ /* disable Rx/Tx */
+ wl12xx_reg_write32(wl, ENABLE, 0x0);
+
+ /* disable auto calibration on start*/
+ wl12xx_reg_write32(wl, SPARE_A2, 0xffff);
+
+ return 0;
+}
+
+int wl12xx_boot_init_seq(struct wl12xx *wl)
+{
+ u32 scr_pad6, init_data, tmp, elp_cmd, ref_freq;
+
+ /*
+ * col #1: INTEGER_DIVIDER
+ * col #2: FRACTIONAL_DIVIDER
+ * col #3: ATTN_BB
+ * col #4: ALPHA_BB
+ * col #5: STOP_TIME_BB
+ * col #6: BB_PLL_LOOP_FILTER
+ */
+ static const u32 LUT[REF_FREQ_NUM][LUT_PARAM_NUM] = {
+
+ { 83, 87381, 0xB, 5, 0xF00, 3}, /* REF_FREQ_19_2*/
+ { 61, 141154, 0xB, 5, 0x1450, 2}, /* REF_FREQ_26_0*/
+ { 41, 174763, 0xC, 6, 0x2D00, 1}, /* REF_FREQ_38_4*/
+ { 40, 0, 0xC, 6, 0x2EE0, 1}, /* REF_FREQ_40_0*/
+ { 47, 162280, 0xC, 6, 0x2760, 1} /* REF_FREQ_33_6 */
+ };
+
+ /* read NVS params */
+ scr_pad6 = wl12xx_reg_read32(wl, SCR_PAD6);
+ wl12xx_debug(DEBUG_BOOT, "scr_pad6 0x%x", scr_pad6);
+
+ /* read ELP_CMD */
+ elp_cmd = wl12xx_reg_read32(wl, ELP_CMD);
+ wl12xx_debug(DEBUG_BOOT, "elp_cmd 0x%x", elp_cmd);
+
+ /* set the BB calibration time to be 300 usec (PLL_CAL_TIME) */
+ ref_freq = scr_pad6 & 0x000000FF;
+ wl12xx_debug(DEBUG_BOOT, "ref_freq 0x%x", ref_freq);
+
+ wl12xx_reg_write32(wl, PLL_CAL_TIME, 0x9);
+
+ /*
+ * PG 1.2: set the clock buffer time to be 210 usec (CLK_BUF_TIME)
+ */
+ wl12xx_reg_write32(wl, CLK_BUF_TIME, 0x6);
+
+ /*
+ * set the clock detect feature to work in the restart wu procedure
+ * (ELP_CFG_MODE[14]) and Select the clock source type
+ * (ELP_CFG_MODE[13:12])
+ */
+ tmp = ((scr_pad6 & 0x0000FF00) << 4) | 0x00004000;
+ wl12xx_reg_write32(wl, ELP_CFG_MODE, tmp);
+
+ /* PG 1.2: enable the BB PLL fix. Enable the PLL_LIMP_CLK_EN_CMD */
+ elp_cmd |= 0x00000040;
+ wl12xx_reg_write32(wl, ELP_CMD, elp_cmd);
+
+ /* PG 1.2: Set the BB PLL stable time to be 1000usec
+ * (PLL_STABLE_TIME) */
+ wl12xx_reg_write32(wl, CFG_PLL_SYNC_CNT, 0x20);
+
+ /* PG 1.2: read clock request time */
+ init_data = wl12xx_reg_read32(wl, CLK_REQ_TIME);
+
+ /*
+ * PG 1.2: set the clock request time to be ref_clk_settling_time -
+ * 1ms = 4ms
+ */
+ if (init_data > 0x21)
+ tmp = init_data - 0x21;
+ else
+ tmp = 0;
+ wl12xx_reg_write32(wl, CLK_REQ_TIME, tmp);
+
+ /* set BB PLL configurations in RF AFE */
+ wl12xx_reg_write32(wl, 0x003058cc, 0x4B5);
+
+ /* set RF_AFE_REG_5 */
+ wl12xx_reg_write32(wl, 0x003058d4, 0x50);
+
+ /* set RF_AFE_CTRL_REG_2 */
+ wl12xx_reg_write32(wl, 0x00305948, 0x11c001);
+
+ /*
+ * change RF PLL and BB PLL divider for VCO clock and adjust VCO
+ * bais current(RF_AFE_REG_13)
+ */
+ wl12xx_reg_write32(wl, 0x003058f4, 0x1e);
+
+ /* set BB PLL configurations */
+ tmp = LUT[ref_freq][LUT_PARAM_INTEGER_DIVIDER] | 0x00017000;
+ wl12xx_reg_write32(wl, 0x00305840, tmp);
+
+ /* set fractional divider according to Appendix C-BB PLL
+ * Calculations
+ */
+ tmp = LUT[ref_freq][LUT_PARAM_FRACTIONAL_DIVIDER];
+ wl12xx_reg_write32(wl, 0x00305844, tmp);
+
+ /* set the initial data for the sigma delta */
+ wl12xx_reg_write32(wl, 0x00305848, 0x3039);
+
+ /*
+ * set the accumulator attenuation value, calibration loop1
+ * (alpha), calibration loop2 (beta), calibration loop3 (gamma) and
+ * the VCO gain
+ */
+ tmp = (LUT[ref_freq][LUT_PARAM_ATTN_BB] << 16) |
+ (LUT[ref_freq][LUT_PARAM_ALPHA_BB] << 12) | 0x1;
+ wl12xx_reg_write32(wl, 0x00305854, tmp);
+
+ /*
+ * set the calibration stop time after holdoff time expires and set
+ * settling time HOLD_OFF_TIME_BB
+ */
+ tmp = LUT[ref_freq][LUT_PARAM_STOP_TIME_BB] | 0x000A0000;
+ wl12xx_reg_write32(wl, 0x00305858, tmp);
+
+ /*
+ * set BB PLL Loop filter capacitor3- BB_C3[2:0] and set BB PLL
+ * constant leakage current to linearize PFD to 0uA -
+ * BB_ILOOPF[7:3]
+ */
+ tmp = LUT[ref_freq][LUT_PARAM_BB_PLL_LOOP_FILTER] | 0x00000030;
+ wl12xx_reg_write32(wl, 0x003058f8, tmp);
+
+ /*
+ * set regulator output voltage for n divider to
+ * 1.35-BB_REFDIV[1:0], set charge pump current- BB_CPGAIN[4:2],
+ * set BB PLL Loop filter capacitor2- BB_C2[7:5], set gain of BB
+ * PLL auto-call to normal mode- BB_CALGAIN_3DB[8]
+ */
+ wl12xx_reg_write32(wl, 0x003058f0, 0x29);
+
+ /* enable restart wakeup sequence (ELP_CMD[0]) */
+ wl12xx_reg_write32(wl, ELP_CMD, elp_cmd | 0x1);
+
+ /* restart sequence completed */
+ udelay(2000);
+
+ return 0;
+}
+
+int wl12xx_boot_run_firmware(struct wl12xx *wl)
+{
+ int loop, ret;
+ u32 chip_id, interrupt;
+
+ wl->chip.op_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT);
+
+ chip_id = wl12xx_reg_read32(wl, CHIP_ID_B);
+
+ wl12xx_debug(DEBUG_BOOT, "chip id after firmware boot: 0x%x", chip_id);
+
+ if (chip_id != wl->chip.id) {
+ wl12xx_error("chip id doesn't match after firmware boot");
+ return -EIO;
+ }
+
+ /* wait for init to complete */
+ loop = 0;
+ while (loop++ < INIT_LOOP) {
+ udelay(INIT_LOOP_DELAY);
+ interrupt = wl12xx_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
+
+ if (interrupt == 0xffffffff) {
+ wl12xx_error("error reading hardware complete "
+ "init indication");
+ return -EIO;
+ }
+ /* check that ACX_INTR_INIT_COMPLETE is enabled */
+ else if (interrupt & wl->chip.intr_init_complete) {
+ wl12xx_reg_write32(wl, ACX_REG_INTERRUPT_ACK,
+ wl->chip.intr_init_complete);
+ break;
+ }
+ }
+
+ if (loop >= INIT_LOOP) {
+ wl12xx_error("timeout waiting for the hardware to "
+ "complete initialization");
+ return -EIO;
+ }
+
+ /* get hardware config command mail box */
+ wl->cmd_box_addr = wl12xx_reg_read32(wl, REG_COMMAND_MAILBOX_PTR);
+
+ /* get hardware config event mail box */
+ wl->event_box_addr = wl12xx_reg_read32(wl, REG_EVENT_MAILBOX_PTR);
+
+ /* set the working partition to its "running" mode offset */
+ wl12xx_set_partition(wl,
+ wl->chip.p_table[PART_WORK].mem.start,
+ wl->chip.p_table[PART_WORK].mem.size,
+ wl->chip.p_table[PART_WORK].reg.start,
+ wl->chip.p_table[PART_WORK].reg.size);
+
+ wl12xx_debug(DEBUG_MAILBOX, "cmd_box_addr 0x%x event_box_addr 0x%x",
+ wl->cmd_box_addr, wl->event_box_addr);
+
+ /*
+ * in case of full asynchronous mode the firmware event must be
+ * ready to receive event from the command mailbox
+ */
+
+ /* enable gpio interrupts */
+ wl12xx_boot_enable_interrupts(wl);
+
+ wl->chip.op_target_enable_interrupts(wl);
+
+ /* unmask all mbox events */
+ wl->event_mask = 0xffffffff;
+
+ ret = wl12xx_event_unmask(wl);
+ if (ret < 0) {
+ wl12xx_error("EVENT mask setting failed");
+ return ret;
+ }
+
+ wl12xx_event_mbox_config(wl);
+
+ /* firmware startup completed */
+ return 0;
+}
diff --git a/drivers/net/wireless/wl12xx/boot.h b/drivers/net/wireless/wl12xx/boot.h
new file mode 100644
index 00000000000..4fa73132baa
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/boot.h
@@ -0,0 +1,40 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * Contact: Kalle Valo <kalle.valo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __BOOT_H__
+#define __BOOT_H__
+
+#include "wl12xx.h"
+
+int wl12xx_boot_soft_reset(struct wl12xx *wl);
+int wl12xx_boot_init_seq(struct wl12xx *wl);
+int wl12xx_boot_run_firmware(struct wl12xx *wl);
+void wl12xx_boot_target_enable_interrupts(struct wl12xx *wl);
+
+/* number of times we try to read the INIT interrupt */
+#define INIT_LOOP 20000
+
+/* delay between retries */
+#define INIT_LOOP_DELAY 50
+
+#endif
diff --git a/drivers/net/wireless/wl12xx/cmd.c b/drivers/net/wireless/wl12xx/cmd.c
new file mode 100644
index 00000000000..f73ab602b7a
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/cmd.c
@@ -0,0 +1,353 @@
+#include "cmd.h"
+
+#include <linux/module.h>
+#include <linux/crc7.h>
+#include <linux/spi/spi.h>
+
+#include "wl12xx.h"
+#include "wl12xx_80211.h"
+#include "reg.h"
+#include "spi.h"
+#include "ps.h"
+
+int wl12xx_cmd_send(struct wl12xx *wl, u16 type, void *buf, size_t buf_len)
+{
+ struct wl12xx_command cmd;
+ unsigned long timeout;
+ size_t cmd_len;
+ u32 intr;
+ int ret = 0;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.id = type;
+ cmd.status = 0;
+ memcpy(cmd.parameters, buf, buf_len);
+ cmd_len = ALIGN(buf_len, 4) + CMDMBOX_HEADER_LEN;
+
+ wl12xx_ps_elp_wakeup(wl);
+
+ wl12xx_spi_mem_write(wl, wl->cmd_box_addr, &cmd, cmd_len);
+
+ wl12xx_reg_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_CMD);
+
+ timeout = jiffies + msecs_to_jiffies(WL12XX_COMMAND_TIMEOUT);
+
+ intr = wl12xx_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
+ while (!(intr & wl->chip.intr_cmd_complete)) {
+ if (time_after(jiffies, timeout)) {
+ wl12xx_error("command complete timeout");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ msleep(1);
+
+ intr = wl12xx_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
+ }
+
+ wl12xx_reg_write32(wl, ACX_REG_INTERRUPT_ACK,
+ wl->chip.intr_cmd_complete);
+
+out:
+ wl12xx_ps_elp_sleep(wl);
+
+ return ret;
+}
+
+int wl12xx_cmd_test(struct wl12xx *wl, void *buf, size_t buf_len, u8 answer)
+{
+ int ret;
+
+ wl12xx_debug(DEBUG_CMD, "cmd test");
+
+ ret = wl12xx_cmd_send(wl, CMD_TEST, buf, buf_len);
+ if (ret < 0) {
+ wl12xx_warning("TEST command failed");
+ return ret;
+ }
+
+ if (answer) {
+ struct wl12xx_command *cmd_answer;
+
+ /*
+ * The test command got in, we can read the answer.
+ * The answer would be a wl12xx_command, where the
+ * parameter array contains the actual answer.
+ */
+
+ wl12xx_ps_elp_wakeup(wl);
+
+ wl12xx_spi_mem_read(wl, wl->cmd_box_addr, buf, buf_len);
+
+ wl12xx_ps_elp_sleep(wl);
+
+ cmd_answer = buf;
+ if (cmd_answer->status != CMD_STATUS_SUCCESS)
+ wl12xx_error("TEST command answer error: %d",
+ cmd_answer->status);
+ }
+
+ return 0;
+}
+
+
+int wl12xx_cmd_interrogate(struct wl12xx *wl, u16 ie_id, u16 ie_len,
+ void *answer)
+{
+ struct wl12xx_command *cmd;
+ struct acx_header header;
+ int ret;
+
+ wl12xx_debug(DEBUG_CMD, "cmd interrogate");
+
+ header.id = ie_id;
+ header.len = ie_len - sizeof(header);
+
+ ret = wl12xx_cmd_send(wl, CMD_INTERROGATE, &header, sizeof(header));
+ if (ret < 0) {
+ wl12xx_error("INTERROGATE command failed");
+ return ret;
+ }
+
+ wl12xx_ps_elp_wakeup(wl);
+
+ /* the interrogate command got in, we can read the answer */
+ wl12xx_spi_mem_read(wl, wl->cmd_box_addr, answer,
+ CMDMBOX_HEADER_LEN + ie_len);
+
+ wl12xx_ps_elp_sleep(wl);
+
+ cmd = answer;
+ if (cmd->status != CMD_STATUS_SUCCESS)
+ wl12xx_error("INTERROGATE command error: %d",
+ cmd->status);
+
+ return 0;
+
+}
+
+int wl12xx_cmd_configure(struct wl12xx *wl, void *ie, int ie_len)
+{
+ int ret;
+
+ wl12xx_debug(DEBUG_CMD, "cmd configure");
+
+ ret = wl12xx_cmd_send(wl, CMD_CONFIGURE, ie,
+ ie_len);
+ if (ret < 0) {
+ wl12xx_warning("CONFIGURE command NOK");
+ return ret;
+ }
+
+ return 0;
+
+}
+
+int wl12xx_cmd_vbm(struct wl12xx *wl, u8 identity,
+ void *bitmap, u16 bitmap_len, u8 bitmap_control)
+{
+ struct vbm_update_request vbm;
+ int ret;
+
+ wl12xx_debug(DEBUG_CMD, "cmd vbm");
+
+ /* Count and period will be filled by the target */
+ vbm.tim.bitmap_ctrl = bitmap_control;
+ if (bitmap_len > PARTIAL_VBM_MAX) {
+ wl12xx_warning("cmd vbm len is %d B, truncating to %d",
+ bitmap_len, PARTIAL_VBM_MAX);
+ bitmap_len = PARTIAL_VBM_MAX;
+ }
+ memcpy(vbm.tim.pvb_field, bitmap, bitmap_len);
+ vbm.tim.identity = identity;
+ vbm.tim.length = bitmap_len + 3;
+
+ vbm.len = cpu_to_le16(bitmap_len + 5);
+
+ ret = wl12xx_cmd_send(wl, CMD_VBM, &vbm, sizeof(vbm));
+ if (ret < 0) {
+ wl12xx_error("VBM command failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+int wl12xx_cmd_data_path(struct wl12xx *wl, u8 channel, u8 enable)
+{
+ int ret;
+ u16 cmd_rx, cmd_tx;
+
+ wl12xx_debug(DEBUG_CMD, "cmd data path");
+
+ if (enable) {
+ cmd_rx = CMD_ENABLE_RX;
+ cmd_tx = CMD_ENABLE_TX;
+ } else {
+ cmd_rx = CMD_DISABLE_RX;
+ cmd_tx = CMD_DISABLE_TX;
+ }
+
+ ret = wl12xx_cmd_send(wl, cmd_rx, &channel, sizeof(channel));
+ if (ret < 0) {
+ wl12xx_error("rx %s cmd for channel %d failed",
+ enable ? "start" : "stop", channel);
+ return ret;
+ }
+
+ wl12xx_debug(DEBUG_BOOT, "rx %s cmd channel %d",
+ enable ? "start" : "stop", channel);
+
+ ret = wl12xx_cmd_send(wl, cmd_tx, &channel, sizeof(channel));
+ if (ret < 0) {
+ wl12xx_error("tx %s cmd for channel %d failed",
+ enable ? "start" : "stop", channel);
+ return ret;
+ }
+
+ wl12xx_debug(DEBUG_BOOT, "tx %s cmd channel %d",
+ enable ? "start" : "stop", channel);
+
+ return 0;
+}
+
+int wl12xx_cmd_join(struct wl12xx *wl, u8 bss_type, u8 dtim_interval,
+ u16 beacon_interval, u8 wait)
+{
+ unsigned long timeout;
+ struct cmd_join join = {};
+ int ret, i;
+ u8 *bssid;
+
+ /* FIXME: this should be in main.c */
+ ret = wl12xx_acx_frame_rates(wl, DEFAULT_HW_GEN_TX_RATE,
+ DEFAULT_HW_GEN_MODULATION_TYPE,
+ wl->tx_mgmt_frm_rate,
+ wl->tx_mgmt_frm_mod);
+ if (ret < 0)
+ return ret;
+
+ wl12xx_debug(DEBUG_CMD, "cmd join");
+
+ /* Reverse order BSSID */
+ bssid = (u8 *)&join.bssid_lsb;
+ for (i = 0; i < ETH_ALEN; i++)
+ bssid[i] = wl->bssid[ETH_ALEN - i - 1];
+
+ join.rx_config_options = wl->rx_config;
+ join.rx_filter_options = wl->rx_filter;
+
+ join.basic_rate_set = RATE_MASK_1MBPS | RATE_MASK_2MBPS |
+ RATE_MASK_5_5MBPS | RATE_MASK_11MBPS;
+
+ join.beacon_interval = beacon_interval;
+ join.dtim_interval = dtim_interval;
+ join.bss_type = bss_type;
+ join.channel = wl->channel;
+ join.ctrl = JOIN_CMD_CTRL_TX_FLUSH;
+
+ ret = wl12xx_cmd_send(wl, CMD_START_JOIN, &join, sizeof(join));
+ if (ret < 0) {
+ wl12xx_error("failed to initiate cmd join");
+ return ret;
+ }
+
+ timeout = msecs_to_jiffies(JOIN_TIMEOUT);
+
+ /*
+ * ugly hack: we should wait for JOIN_EVENT_COMPLETE_ID but to
+ * simplify locking we just sleep instead, for now
+ */
+ if (wait)
+ msleep(10);
+
+ return 0;
+}
+
+int wl12xx_cmd_ps_mode(struct wl12xx *wl, u8 ps_mode)
+{
+ int ret;
+ struct acx_ps_params ps_params;
+
+ /* FIXME: this should be in ps.c */
+ ret = wl12xx_acx_wake_up_conditions(wl, wl->listen_int);
+ if (ret < 0) {
+ wl12xx_error("Couldnt set wake up conditions");
+ return ret;
+ }
+
+ wl12xx_debug(DEBUG_CMD, "cmd set ps mode");
+
+ ps_params.ps_mode = ps_mode;
+ ps_params.send_null_data = 1;
+ ps_params.retries = 5;
+ ps_params.hang_over_period = 128;
+ ps_params.null_data_rate = 1; /* 1 Mbps */
+
+ ret = wl12xx_cmd_send(wl, CMD_SET_PS_MODE, &ps_params,
+ sizeof(ps_params));
+ if (ret < 0) {
+ wl12xx_error("cmd set_ps_mode failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+int wl12xx_cmd_read_memory(struct wl12xx *wl, u32 addr, u32 len, void *answer)
+{
+ struct cmd_read_write_memory mem_cmd, *mem_answer;
+ struct wl12xx_command cmd;
+ int ret;
+
+ wl12xx_debug(DEBUG_CMD, "cmd read memory");
+
+ memset(&mem_cmd, 0, sizeof(mem_cmd));
+ mem_cmd.addr = addr;
+ mem_cmd.size = len;
+
+ ret = wl12xx_cmd_send(wl, CMD_READ_MEMORY, &mem_cmd, sizeof(mem_cmd));
+ if (ret < 0) {
+ wl12xx_error("read memory command failed: %d", ret);
+ return ret;
+ }
+
+ /* the read command got in, we can now read the answer */
+ wl12xx_spi_mem_read(wl, wl->cmd_box_addr, &cmd,
+ CMDMBOX_HEADER_LEN + sizeof(mem_cmd));
+
+ if (cmd.status != CMD_STATUS_SUCCESS)
+ wl12xx_error("error in read command result: %d", cmd.status);
+
+ mem_answer = (struct cmd_read_write_memory *) cmd.parameters;
+ memcpy(answer, mem_answer->value, len);
+
+ return 0;
+}
+
+int wl12xx_cmd_template_set(struct wl12xx *wl, u16 cmd_id,
+ void *buf, size_t buf_len)
+{
+ struct wl12xx_cmd_packet_template template;
+ int ret;
+
+ wl12xx_debug(DEBUG_CMD, "cmd template %d", cmd_id);
+
+ memset(&template, 0, sizeof(template));
+
+ WARN_ON(buf_len > WL12XX_MAX_TEMPLATE_SIZE);
+ buf_len = min_t(size_t, buf_len, WL12XX_MAX_TEMPLATE_SIZE);
+ template.size = cpu_to_le16(buf_len);
+
+ if (buf)
+ memcpy(template.template, buf, buf_len);
+
+ ret = wl12xx_cmd_send(wl, cmd_id, &template,
+ sizeof(template.size) + buf_len);
+ if (ret < 0) {
+ wl12xx_warning("cmd set_template failed: %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/wl12xx/cmd.h b/drivers/net/wireless/wl12xx/cmd.h
new file mode 100644
index 00000000000..aa307dcd081
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/cmd.h
@@ -0,0 +1,265 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (c) 1998-2007 Texas Instruments Incorporated
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * Contact: Kalle Valo <kalle.valo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL12XX_CMD_H__
+#define __WL12XX_CMD_H__
+
+#include "wl12xx.h"
+
+int wl12xx_cmd_send(struct wl12xx *wl, u16 type, void *buf, size_t buf_len);
+int wl12xx_cmd_test(struct wl12xx *wl, void *buf, size_t buf_len, u8 answer);
+int wl12xx_cmd_interrogate(struct wl12xx *wl, u16 ie_id, u16 ie_len,
+ void *answer);
+int wl12xx_cmd_configure(struct wl12xx *wl, void *ie, int ie_len);
+int wl12xx_cmd_vbm(struct wl12xx *wl, u8 identity,
+ void *bitmap, u16 bitmap_len, u8 bitmap_control);
+int wl12xx_cmd_data_path(struct wl12xx *wl, u8 channel, u8 enable);
+int wl12xx_cmd_join(struct wl12xx *wl, u8 bss_type, u8 dtim_interval,
+ u16 beacon_interval, u8 wait);
+int wl12xx_cmd_ps_mode(struct wl12xx *wl, u8 ps_mode);
+int wl12xx_cmd_read_memory(struct wl12xx *wl, u32 addr, u32 len, void *answer);
+int wl12xx_cmd_template_set(struct wl12xx *wl, u16 cmd_id,
+ void *buf, size_t buf_len);
+
+/* unit ms */
+#define WL12XX_COMMAND_TIMEOUT 2000
+
+#define WL12XX_MAX_TEMPLATE_SIZE 300
+
+struct wl12xx_cmd_packet_template {
+ __le16 size;
+ u8 template[WL12XX_MAX_TEMPLATE_SIZE];
+} __attribute__ ((packed));
+
+enum wl12xx_commands {
+ CMD_RESET = 0,
+ CMD_INTERROGATE = 1, /*use this to read information elements*/
+ CMD_CONFIGURE = 2, /*use this to write information elements*/
+ CMD_ENABLE_RX = 3,
+ CMD_ENABLE_TX = 4,
+ CMD_DISABLE_RX = 5,
+ CMD_DISABLE_TX = 6,
+ CMD_SCAN = 8,
+ CMD_STOP_SCAN = 9,
+ CMD_VBM = 10,
+ CMD_START_JOIN = 11,
+ CMD_SET_KEYS = 12,
+ CMD_READ_MEMORY = 13,
+ CMD_WRITE_MEMORY = 14,
+ CMD_BEACON = 19,
+ CMD_PROBE_RESP = 20,
+ CMD_NULL_DATA = 21,
+ CMD_PROBE_REQ = 22,
+ CMD_TEST = 23,
+ CMD_RADIO_CALIBRATE = 25, /* OBSOLETE */
+ CMD_ENABLE_RX_PATH = 27, /* OBSOLETE */
+ CMD_NOISE_HIST = 28,
+ CMD_RX_RESET = 29,
+ CMD_PS_POLL = 30,
+ CMD_QOS_NULL_DATA = 31,
+ CMD_LNA_CONTROL = 32,
+ CMD_SET_BCN_MODE = 33,
+ CMD_MEASUREMENT = 34,
+ CMD_STOP_MEASUREMENT = 35,
+ CMD_DISCONNECT = 36,
+ CMD_SET_PS_MODE = 37,
+ CMD_CHANNEL_SWITCH = 38,
+ CMD_STOP_CHANNEL_SWICTH = 39,
+ CMD_AP_DISCOVERY = 40,
+ CMD_STOP_AP_DISCOVERY = 41,
+ CMD_SPS_SCAN = 42,
+ CMD_STOP_SPS_SCAN = 43,
+ CMD_HEALTH_CHECK = 45,
+ CMD_DEBUG = 46,
+ CMD_TRIGGER_SCAN_TO = 47,
+
+ NUM_COMMANDS,
+ MAX_COMMAND_ID = 0xFFFF,
+};
+
+#define MAX_CMD_PARAMS 572
+
+struct wl12xx_command {
+ u16 id;
+ u16 status;
+ u8 parameters[MAX_CMD_PARAMS];
+};
+
+enum {
+ CMD_MAILBOX_IDLE = 0,
+ CMD_STATUS_SUCCESS = 1,
+ CMD_STATUS_UNKNOWN_CMD = 2,
+ CMD_STATUS_UNKNOWN_IE = 3,
+ CMD_STATUS_REJECT_MEAS_SG_ACTIVE = 11,
+ CMD_STATUS_RX_BUSY = 13,
+ CMD_STATUS_INVALID_PARAM = 14,
+ CMD_STATUS_TEMPLATE_TOO_LARGE = 15,
+ CMD_STATUS_OUT_OF_MEMORY = 16,
+ CMD_STATUS_STA_TABLE_FULL = 17,
+ CMD_STATUS_RADIO_ERROR = 18,
+ CMD_STATUS_WRONG_NESTING = 19,
+ CMD_STATUS_TIMEOUT = 21, /* Driver internal use.*/
+ CMD_STATUS_FW_RESET = 22, /* Driver internal use.*/
+ MAX_COMMAND_STATUS = 0xff
+};
+
+
+/*
+ * CMD_READ_MEMORY
+ *
+ * The host issues this command to read the WiLink device memory/registers.
+ *
+ * Note: The Base Band address has special handling (16 bits registers and
+ * addresses). For more information, see the hardware specification.
+ */
+/*
+ * CMD_WRITE_MEMORY
+ *
+ * The host issues this command to write the WiLink device memory/registers.
+ *
+ * The Base Band address has special handling (16 bits registers and
+ * addresses). For more information, see the hardware specification.
+ */
+#define MAX_READ_SIZE 256
+
+struct cmd_read_write_memory {
+ /* The address of the memory to read from or write to.*/
+ u32 addr;
+
+ /* The amount of data in bytes to read from or write to the WiLink
+ * device.*/
+ u32 size;
+
+ /* The actual value read from or written to the Wilink. The source
+ of this field is the Host in WRITE command or the Wilink in READ
+ command. */
+ u8 value[MAX_READ_SIZE];
+};
+
+#define CMDMBOX_HEADER_LEN 4
+#define CMDMBOX_INFO_ELEM_HEADER_LEN 4
+
+
+struct basic_scan_parameters {
+ u32 rx_config_options;
+ u32 rx_filter_options;
+
+ /*
+ * Scan options:
+ * bit 0: When this bit is set, passive scan.
+ * bit 1: Band, when this bit is set we scan
+ * in the 5Ghz band.
+ * bit 2: voice mode, 0 for normal scan.
+ * bit 3: scan priority, 1 for high priority.
+ */
+ u16 scan_options;
+
+ /* Number of channels to scan */
+ u8 num_channels;
+
+ /* Number opf probe requests to send, per channel */
+ u8 num_probe_requests;
+
+ /* Rate and modulation for probe requests */
+ u16 tx_rate;
+
+ u8 tid_trigger;
+ u8 ssid_len;
+ u32 ssid[8];
+
+} __attribute__ ((packed));
+
+struct basic_scan_channel_parameters {
+ u32 min_duration; /* in TU */
+ u32 max_duration; /* in TU */
+ u32 bssid_lsb;
+ u16 bssid_msb;
+
+ /*
+ * bits 0-3: Early termination count.
+ * bits 4-5: Early termination condition.
+ */
+ u8 early_termination;
+
+ u8 tx_power_att;
+ u8 channel;
+ u8 pad[3];
+} __attribute__ ((packed));
+
+/* SCAN parameters */
+#define SCAN_MAX_NUM_OF_CHANNELS 16
+
+struct cmd_scan {
+ struct basic_scan_parameters params;
+ struct basic_scan_channel_parameters channels[SCAN_MAX_NUM_OF_CHANNELS];
+} __attribute__ ((packed));
+
+enum {
+ BSS_TYPE_IBSS = 0,
+ BSS_TYPE_STA_BSS = 2,
+ BSS_TYPE_AP_BSS = 3,
+ MAX_BSS_TYPE = 0xFF
+};
+
+#define JOIN_CMD_CTRL_TX_FLUSH 0x80 /* Firmware flushes all Tx */
+#define JOIN_CMD_CTRL_EARLY_WAKEUP_ENABLE 0x01 /* Early wakeup time */
+
+
+struct cmd_join {
+ u32 bssid_lsb;
+ u16 bssid_msb;
+ u16 beacon_interval; /* in TBTTs */
+ u32 rx_config_options;
+ u32 rx_filter_options;
+
+ /*
+ * The target uses this field to determine the rate at
+ * which to transmit control frame responses (such as
+ * ACK or CTS frames).
+ */
+ u16 basic_rate_set;
+ u8 dtim_interval;
+ u8 tx_ctrl_frame_rate; /* OBSOLETE */
+ u8 tx_ctrl_frame_mod; /* OBSOLETE */
+ /*
+ * bits 0-2: This bitwise field specifies the type
+ * of BSS to start or join (BSS_TYPE_*).
+ * bit 4: Band - The radio band in which to join
+ * or start.
+ * 0 - 2.4GHz band
+ * 1 - 5GHz band
+ * bits 3, 5-7: Reserved
+ */
+ u8 bss_type;
+ u8 channel;
+ u8 ssid_len;
+ u8 ssid[IW_ESSID_MAX_SIZE];
+ u8 ctrl; /* JOIN_CMD_CTRL_* */
+ u8 tx_mgt_frame_rate; /* OBSOLETE */
+ u8 tx_mgt_frame_mod; /* OBSOLETE */
+ u8 reserved;
+} __attribute__ ((packed));
+
+
+#endif /* __WL12XX_CMD_H__ */
diff --git a/drivers/net/wireless/wl12xx/debugfs.c b/drivers/net/wireless/wl12xx/debugfs.c
new file mode 100644
index 00000000000..cdb368ce4da
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/debugfs.c
@@ -0,0 +1,508 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ *
+ * Contact: Kalle Valo <kalle.valo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include "debugfs.h"
+
+#include <linux/skbuff.h>
+
+#include "wl12xx.h"
+#include "acx.h"
+
+/* ms */
+#define WL12XX_DEBUGFS_STATS_LIFETIME 1000
+
+/* debugfs macros idea from mac80211 */
+
+#define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...) \
+static ssize_t name## _read(struct file *file, char __user *userbuf, \
+ size_t count, loff_t *ppos) \
+{ \
+ struct wl12xx *wl = file->private_data; \
+ char buf[buflen]; \
+ int res; \
+ \
+ res = scnprintf(buf, buflen, fmt "\n", ##value); \
+ return simple_read_from_buffer(userbuf, count, ppos, buf, res); \
+} \
+ \
+static const struct file_operations name## _ops = { \
+ .read = name## _read, \
+ .open = wl12xx_open_file_generic, \
+};
+
+#define DEBUGFS_ADD(name, parent) \
+ wl->debugfs.name = debugfs_create_file(#name, 0400, parent, \
+ wl, &name## _ops); \
+ if (IS_ERR(wl->debugfs.name)) { \
+ ret = PTR_ERR(wl->debugfs.name); \
+ wl->debugfs.name = NULL; \
+ goto out; \
+ }
+
+#define DEBUGFS_DEL(name) \
+ do { \
+ debugfs_remove(wl->debugfs.name); \
+ wl->debugfs.name = NULL; \
+ } while (0)
+
+#define DEBUGFS_FWSTATS_FILE(sub, name, buflen, fmt) \
+static ssize_t sub## _ ##name## _read(struct file *file, \
+ char __user *userbuf, \
+ size_t count, loff_t *ppos) \
+{ \
+ struct wl12xx *wl = file->private_data; \
+ char buf[buflen]; \
+ int res; \
+ \
+ wl12xx_debugfs_update_stats(wl); \
+ \
+ res = scnprintf(buf, buflen, fmt "\n", \
+ wl->stats.fw_stats->sub.name); \
+ return simple_read_from_buffer(userbuf, count, ppos, buf, res); \
+} \
+ \
+static const struct file_operations sub## _ ##name## _ops = { \
+ .read = sub## _ ##name## _read, \
+ .open = wl12xx_open_file_generic, \
+};
+
+#define DEBUGFS_FWSTATS_ADD(sub, name) \
+ DEBUGFS_ADD(sub## _ ##name, wl->debugfs.fw_statistics)
+
+#define DEBUGFS_FWSTATS_DEL(sub, name) \
+ DEBUGFS_DEL(sub## _ ##name)
+
+static void wl12xx_debugfs_update_stats(struct wl12xx *wl)
+{
+ mutex_lock(&wl->mutex);
+
+ if (wl->state == WL12XX_STATE_ON &&
+ time_after(jiffies, wl->stats.fw_stats_update +
+ msecs_to_jiffies(WL12XX_DEBUGFS_STATS_LIFETIME))) {
+ wl12xx_acx_statistics(wl, wl->stats.fw_stats);
+ wl->stats.fw_stats_update = jiffies;
+ }
+
+ mutex_unlock(&wl->mutex);
+}
+
+static int wl12xx_open_file_generic(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+DEBUGFS_FWSTATS_FILE(tx, internal_desc_overflow, 20, "%u");
+
+DEBUGFS_FWSTATS_FILE(rx, out_of_mem, 20, "%u");
+DEBUGFS_FWSTATS_FILE(rx, hdr_overflow, 20, "%u");
+DEBUGFS_FWSTATS_FILE(rx, hw_stuck, 20, "%u");
+DEBUGFS_FWSTATS_FILE(rx, dropped, 20, "%u");
+DEBUGFS_FWSTATS_FILE(rx, fcs_err, 20, "%u");
+DEBUGFS_FWSTATS_FILE(rx, xfr_hint_trig, 20, "%u");
+DEBUGFS_FWSTATS_FILE(rx, path_reset, 20, "%u");
+DEBUGFS_FWSTATS_FILE(rx, reset_counter, 20, "%u");
+
+DEBUGFS_FWSTATS_FILE(dma, rx_requested, 20, "%u");
+DEBUGFS_FWSTATS_FILE(dma, rx_errors, 20, "%u");
+DEBUGFS_FWSTATS_FILE(dma, tx_requested, 20, "%u");
+DEBUGFS_FWSTATS_FILE(dma, tx_errors, 20, "%u");
+
+DEBUGFS_FWSTATS_FILE(isr, cmd_cmplt, 20, "%u");
+DEBUGFS_FWSTATS_FILE(isr, fiqs, 20, "%u");
+DEBUGFS_FWSTATS_FILE(isr, rx_headers, 20, "%u");
+DEBUGFS_FWSTATS_FILE(isr, rx_mem_overflow, 20, "%u");
+DEBUGFS_FWSTATS_FILE(isr, rx_rdys, 20, "%u");
+DEBUGFS_FWSTATS_FILE(isr, irqs, 20, "%u");
+DEBUGFS_FWSTATS_FILE(isr, tx_procs, 20, "%u");
+DEBUGFS_FWSTATS_FILE(isr, decrypt_done, 20, "%u");
+DEBUGFS_FWSTATS_FILE(isr, dma0_done, 20, "%u");
+DEBUGFS_FWSTATS_FILE(isr, dma1_done, 20, "%u");
+DEBUGFS_FWSTATS_FILE(isr, tx_exch_complete, 20, "%u");
+DEBUGFS_FWSTATS_FILE(isr, commands, 20, "%u");
+DEBUGFS_FWSTATS_FILE(isr, rx_procs, 20, "%u");
+DEBUGFS_FWSTATS_FILE(isr, hw_pm_mode_changes, 20, "%u");
+DEBUGFS_FWSTATS_FILE(isr, host_acknowledges, 20, "%u");
+DEBUGFS_FWSTATS_FILE(isr, pci_pm, 20, "%u");
+DEBUGFS_FWSTATS_FILE(isr, wakeups, 20, "%u");
+DEBUGFS_FWSTATS_FILE(isr, low_rssi, 20, "%u");
+
+DEBUGFS_FWSTATS_FILE(wep, addr_key_count, 20, "%u");
+DEBUGFS_FWSTATS_FILE(wep, default_key_count, 20, "%u");
+/* skipping wep.reserved */
+DEBUGFS_FWSTATS_FILE(wep, key_not_found, 20, "%u");
+DEBUGFS_FWSTATS_FILE(wep, decrypt_fail, 20, "%u");
+DEBUGFS_FWSTATS_FILE(wep, packets, 20, "%u");
+DEBUGFS_FWSTATS_FILE(wep, interrupt, 20, "%u");
+
+DEBUGFS_FWSTATS_FILE(pwr, ps_enter, 20, "%u");
+DEBUGFS_FWSTATS_FILE(pwr, elp_enter, 20, "%u");
+DEBUGFS_FWSTATS_FILE(pwr, missing_bcns, 20, "%u");
+DEBUGFS_FWSTATS_FILE(pwr, wake_on_host, 20, "%u");
+DEBUGFS_FWSTATS_FILE(pwr, wake_on_timer_exp, 20, "%u");
+DEBUGFS_FWSTATS_FILE(pwr, tx_with_ps, 20, "%u");
+DEBUGFS_FWSTATS_FILE(pwr, tx_without_ps, 20, "%u");
+DEBUGFS_FWSTATS_FILE(pwr, rcvd_beacons, 20, "%u");
+DEBUGFS_FWSTATS_FILE(pwr, power_save_off, 20, "%u");
+DEBUGFS_FWSTATS_FILE(pwr, enable_ps, 20, "%u");
+DEBUGFS_FWSTATS_FILE(pwr, disable_ps, 20, "%u");
+DEBUGFS_FWSTATS_FILE(pwr, fix_tsf_ps, 20, "%u");
+/* skipping cont_miss_bcns_spread for now */
+DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_beacons, 20, "%u");
+
+DEBUGFS_FWSTATS_FILE(mic, rx_pkts, 20, "%u");
+DEBUGFS_FWSTATS_FILE(mic, calc_failure, 20, "%u");
+
+DEBUGFS_FWSTATS_FILE(aes, encrypt_fail, 20, "%u");
+DEBUGFS_FWSTATS_FILE(aes, decrypt_fail, 20, "%u");
+DEBUGFS_FWSTATS_FILE(aes, encrypt_packets, 20, "%u");
+DEBUGFS_FWSTATS_FILE(aes, decrypt_packets, 20, "%u");
+DEBUGFS_FWSTATS_FILE(aes, encrypt_interrupt, 20, "%u");
+DEBUGFS_FWSTATS_FILE(aes, decrypt_interrupt, 20, "%u");
+
+DEBUGFS_FWSTATS_FILE(event, heart_beat, 20, "%u");
+DEBUGFS_FWSTATS_FILE(event, calibration, 20, "%u");
+DEBUGFS_FWSTATS_FILE(event, rx_mismatch, 20, "%u");
+DEBUGFS_FWSTATS_FILE(event, rx_mem_empty, 20, "%u");
+DEBUGFS_FWSTATS_FILE(event, rx_pool, 20, "%u");
+DEBUGFS_FWSTATS_FILE(event, oom_late, 20, "%u");
+DEBUGFS_FWSTATS_FILE(event, phy_transmit_error, 20, "%u");
+DEBUGFS_FWSTATS_FILE(event, tx_stuck, 20, "%u");
+
+DEBUGFS_FWSTATS_FILE(ps, pspoll_timeouts, 20, "%u");
+DEBUGFS_FWSTATS_FILE(ps, upsd_timeouts, 20, "%u");
+DEBUGFS_FWSTATS_FILE(ps, upsd_max_sptime, 20, "%u");
+DEBUGFS_FWSTATS_FILE(ps, upsd_max_apturn, 20, "%u");
+DEBUGFS_FWSTATS_FILE(ps, pspoll_max_apturn, 20, "%u");
+DEBUGFS_FWSTATS_FILE(ps, pspoll_utilization, 20, "%u");
+DEBUGFS_FWSTATS_FILE(ps, upsd_utilization, 20, "%u");
+
+DEBUGFS_FWSTATS_FILE(rxpipe, rx_prep_beacon_drop, 20, "%u");
+DEBUGFS_FWSTATS_FILE(rxpipe, descr_host_int_trig_rx_data, 20, "%u");
+DEBUGFS_FWSTATS_FILE(rxpipe, beacon_buffer_thres_host_int_trig_rx_data,
+ 20, "%u");
+DEBUGFS_FWSTATS_FILE(rxpipe, missed_beacon_host_int_trig_rx_data, 20, "%u");
+DEBUGFS_FWSTATS_FILE(rxpipe, tx_xfr_host_int_trig_rx_data, 20, "%u");
+
+DEBUGFS_READONLY_FILE(retry_count, 20, "%u", wl->stats.retry_count);
+DEBUGFS_READONLY_FILE(excessive_retries, 20, "%u",
+ wl->stats.excessive_retries);
+
+static ssize_t tx_queue_len_read(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct wl12xx *wl = file->private_data;
+ u32 queue_len;
+ char buf[20];
+ int res;
+
+ queue_len = skb_queue_len(&wl->tx_queue);
+
+ res = scnprintf(buf, sizeof(buf), "%u\n", queue_len);
+ return simple_read_from_buffer(userbuf, count, ppos, buf, res);
+}
+
+static const struct file_operations tx_queue_len_ops = {
+ .read = tx_queue_len_read,
+ .open = wl12xx_open_file_generic,
+};
+
+static void wl12xx_debugfs_delete_files(struct wl12xx *wl)
+{
+ DEBUGFS_FWSTATS_DEL(tx, internal_desc_overflow);
+
+ DEBUGFS_FWSTATS_DEL(rx, out_of_mem);
+ DEBUGFS_FWSTATS_DEL(rx, hdr_overflow);
+ DEBUGFS_FWSTATS_DEL(rx, hw_stuck);
+ DEBUGFS_FWSTATS_DEL(rx, dropped);
+ DEBUGFS_FWSTATS_DEL(rx, fcs_err);
+ DEBUGFS_FWSTATS_DEL(rx, xfr_hint_trig);
+ DEBUGFS_FWSTATS_DEL(rx, path_reset);
+ DEBUGFS_FWSTATS_DEL(rx, reset_counter);
+
+ DEBUGFS_FWSTATS_DEL(dma, rx_requested);
+ DEBUGFS_FWSTATS_DEL(dma, rx_errors);
+ DEBUGFS_FWSTATS_DEL(dma, tx_requested);
+ DEBUGFS_FWSTATS_DEL(dma, tx_errors);
+
+ DEBUGFS_FWSTATS_DEL(isr, cmd_cmplt);
+ DEBUGFS_FWSTATS_DEL(isr, fiqs);
+ DEBUGFS_FWSTATS_DEL(isr, rx_headers);
+ DEBUGFS_FWSTATS_DEL(isr, rx_mem_overflow);
+ DEBUGFS_FWSTATS_DEL(isr, rx_rdys);
+ DEBUGFS_FWSTATS_DEL(isr, irqs);
+ DEBUGFS_FWSTATS_DEL(isr, tx_procs);
+ DEBUGFS_FWSTATS_DEL(isr, decrypt_done);
+ DEBUGFS_FWSTATS_DEL(isr, dma0_done);
+ DEBUGFS_FWSTATS_DEL(isr, dma1_done);
+ DEBUGFS_FWSTATS_DEL(isr, tx_exch_complete);
+ DEBUGFS_FWSTATS_DEL(isr, commands);
+ DEBUGFS_FWSTATS_DEL(isr, rx_procs);
+ DEBUGFS_FWSTATS_DEL(isr, hw_pm_mode_changes);
+ DEBUGFS_FWSTATS_DEL(isr, host_acknowledges);
+ DEBUGFS_FWSTATS_DEL(isr, pci_pm);
+ DEBUGFS_FWSTATS_DEL(isr, wakeups);
+ DEBUGFS_FWSTATS_DEL(isr, low_rssi);
+
+ DEBUGFS_FWSTATS_DEL(wep, addr_key_count);
+ DEBUGFS_FWSTATS_DEL(wep, default_key_count);
+ /* skipping wep.reserved */
+ DEBUGFS_FWSTATS_DEL(wep, key_not_found);
+ DEBUGFS_FWSTATS_DEL(wep, decrypt_fail);
+ DEBUGFS_FWSTATS_DEL(wep, packets);
+ DEBUGFS_FWSTATS_DEL(wep, interrupt);
+
+ DEBUGFS_FWSTATS_DEL(pwr, ps_enter);
+ DEBUGFS_FWSTATS_DEL(pwr, elp_enter);
+ DEBUGFS_FWSTATS_DEL(pwr, missing_bcns);
+ DEBUGFS_FWSTATS_DEL(pwr, wake_on_host);
+ DEBUGFS_FWSTATS_DEL(pwr, wake_on_timer_exp);
+ DEBUGFS_FWSTATS_DEL(pwr, tx_with_ps);
+ DEBUGFS_FWSTATS_DEL(pwr, tx_without_ps);
+ DEBUGFS_FWSTATS_DEL(pwr, rcvd_beacons);
+ DEBUGFS_FWSTATS_DEL(pwr, power_save_off);
+ DEBUGFS_FWSTATS_DEL(pwr, enable_ps);
+ DEBUGFS_FWSTATS_DEL(pwr, disable_ps);
+ DEBUGFS_FWSTATS_DEL(pwr, fix_tsf_ps);
+ /* skipping cont_miss_bcns_spread for now */
+ DEBUGFS_FWSTATS_DEL(pwr, rcvd_awake_beacons);
+
+ DEBUGFS_FWSTATS_DEL(mic, rx_pkts);
+ DEBUGFS_FWSTATS_DEL(mic, calc_failure);
+
+ DEBUGFS_FWSTATS_DEL(aes, encrypt_fail);
+ DEBUGFS_FWSTATS_DEL(aes, decrypt_fail);
+ DEBUGFS_FWSTATS_DEL(aes, encrypt_packets);
+ DEBUGFS_FWSTATS_DEL(aes, decrypt_packets);
+ DEBUGFS_FWSTATS_DEL(aes, encrypt_interrupt);
+ DEBUGFS_FWSTATS_DEL(aes, decrypt_interrupt);
+
+ DEBUGFS_FWSTATS_DEL(event, heart_beat);
+ DEBUGFS_FWSTATS_DEL(event, calibration);
+ DEBUGFS_FWSTATS_DEL(event, rx_mismatch);
+ DEBUGFS_FWSTATS_DEL(event, rx_mem_empty);
+ DEBUGFS_FWSTATS_DEL(event, rx_pool);
+ DEBUGFS_FWSTATS_DEL(event, oom_late);
+ DEBUGFS_FWSTATS_DEL(event, phy_transmit_error);
+ DEBUGFS_FWSTATS_DEL(event, tx_stuck);
+
+ DEBUGFS_FWSTATS_DEL(ps, pspoll_timeouts);
+ DEBUGFS_FWSTATS_DEL(ps, upsd_timeouts);
+ DEBUGFS_FWSTATS_DEL(ps, upsd_max_sptime);
+ DEBUGFS_FWSTATS_DEL(ps, upsd_max_apturn);
+ DEBUGFS_FWSTATS_DEL(ps, pspoll_max_apturn);
+ DEBUGFS_FWSTATS_DEL(ps, pspoll_utilization);
+ DEBUGFS_FWSTATS_DEL(ps, upsd_utilization);
+
+ DEBUGFS_FWSTATS_DEL(rxpipe, rx_prep_beacon_drop);
+ DEBUGFS_FWSTATS_DEL(rxpipe, descr_host_int_trig_rx_data);
+ DEBUGFS_FWSTATS_DEL(rxpipe, beacon_buffer_thres_host_int_trig_rx_data);
+ DEBUGFS_FWSTATS_DEL(rxpipe, missed_beacon_host_int_trig_rx_data);
+ DEBUGFS_FWSTATS_DEL(rxpipe, tx_xfr_host_int_trig_rx_data);
+
+ DEBUGFS_DEL(tx_queue_len);
+ DEBUGFS_DEL(retry_count);
+ DEBUGFS_DEL(excessive_retries);
+}
+
+static int wl12xx_debugfs_add_files(struct wl12xx *wl)
+{
+ int ret = 0;
+
+ DEBUGFS_FWSTATS_ADD(tx, internal_desc_overflow);
+
+ DEBUGFS_FWSTATS_ADD(rx, out_of_mem);
+ DEBUGFS_FWSTATS_ADD(rx, hdr_overflow);
+ DEBUGFS_FWSTATS_ADD(rx, hw_stuck);
+ DEBUGFS_FWSTATS_ADD(rx, dropped);
+ DEBUGFS_FWSTATS_ADD(rx, fcs_err);
+ DEBUGFS_FWSTATS_ADD(rx, xfr_hint_trig);
+ DEBUGFS_FWSTATS_ADD(rx, path_reset);
+ DEBUGFS_FWSTATS_ADD(rx, reset_counter);
+
+ DEBUGFS_FWSTATS_ADD(dma, rx_requested);
+ DEBUGFS_FWSTATS_ADD(dma, rx_errors);
+ DEBUGFS_FWSTATS_ADD(dma, tx_requested);
+ DEBUGFS_FWSTATS_ADD(dma, tx_errors);
+
+ DEBUGFS_FWSTATS_ADD(isr, cmd_cmplt);
+ DEBUGFS_FWSTATS_ADD(isr, fiqs);
+ DEBUGFS_FWSTATS_ADD(isr, rx_headers);
+ DEBUGFS_FWSTATS_ADD(isr, rx_mem_overflow);
+ DEBUGFS_FWSTATS_ADD(isr, rx_rdys);
+ DEBUGFS_FWSTATS_ADD(isr, irqs);
+ DEBUGFS_FWSTATS_ADD(isr, tx_procs);
+ DEBUGFS_FWSTATS_ADD(isr, decrypt_done);
+ DEBUGFS_FWSTATS_ADD(isr, dma0_done);
+ DEBUGFS_FWSTATS_ADD(isr, dma1_done);
+ DEBUGFS_FWSTATS_ADD(isr, tx_exch_complete);
+ DEBUGFS_FWSTATS_ADD(isr, commands);
+ DEBUGFS_FWSTATS_ADD(isr, rx_procs);
+ DEBUGFS_FWSTATS_ADD(isr, hw_pm_mode_changes);
+ DEBUGFS_FWSTATS_ADD(isr, host_acknowledges);
+ DEBUGFS_FWSTATS_ADD(isr, pci_pm);
+ DEBUGFS_FWSTATS_ADD(isr, wakeups);
+ DEBUGFS_FWSTATS_ADD(isr, low_rssi);
+
+ DEBUGFS_FWSTATS_ADD(wep, addr_key_count);
+ DEBUGFS_FWSTATS_ADD(wep, default_key_count);
+ /* skipping wep.reserved */
+ DEBUGFS_FWSTATS_ADD(wep, key_not_found);
+ DEBUGFS_FWSTATS_ADD(wep, decrypt_fail);
+ DEBUGFS_FWSTATS_ADD(wep, packets);
+ DEBUGFS_FWSTATS_ADD(wep, interrupt);
+
+ DEBUGFS_FWSTATS_ADD(pwr, ps_enter);
+ DEBUGFS_FWSTATS_ADD(pwr, elp_enter);
+ DEBUGFS_FWSTATS_ADD(pwr, missing_bcns);
+ DEBUGFS_FWSTATS_ADD(pwr, wake_on_host);
+ DEBUGFS_FWSTATS_ADD(pwr, wake_on_timer_exp);
+ DEBUGFS_FWSTATS_ADD(pwr, tx_with_ps);
+ DEBUGFS_FWSTATS_ADD(pwr, tx_without_ps);
+ DEBUGFS_FWSTATS_ADD(pwr, rcvd_beacons);
+ DEBUGFS_FWSTATS_ADD(pwr, power_save_off);
+ DEBUGFS_FWSTATS_ADD(pwr, enable_ps);
+ DEBUGFS_FWSTATS_ADD(pwr, disable_ps);
+ DEBUGFS_FWSTATS_ADD(pwr, fix_tsf_ps);
+ /* skipping cont_miss_bcns_spread for now */
+ DEBUGFS_FWSTATS_ADD(pwr, rcvd_awake_beacons);
+
+ DEBUGFS_FWSTATS_ADD(mic, rx_pkts);
+ DEBUGFS_FWSTATS_ADD(mic, calc_failure);
+
+ DEBUGFS_FWSTATS_ADD(aes, encrypt_fail);
+ DEBUGFS_FWSTATS_ADD(aes, decrypt_fail);
+ DEBUGFS_FWSTATS_ADD(aes, encrypt_packets);
+ DEBUGFS_FWSTATS_ADD(aes, decrypt_packets);
+ DEBUGFS_FWSTATS_ADD(aes, encrypt_interrupt);
+ DEBUGFS_FWSTATS_ADD(aes, decrypt_interrupt);
+
+ DEBUGFS_FWSTATS_ADD(event, heart_beat);
+ DEBUGFS_FWSTATS_ADD(event, calibration);
+ DEBUGFS_FWSTATS_ADD(event, rx_mismatch);
+ DEBUGFS_FWSTATS_ADD(event, rx_mem_empty);
+ DEBUGFS_FWSTATS_ADD(event, rx_pool);
+ DEBUGFS_FWSTATS_ADD(event, oom_late);
+ DEBUGFS_FWSTATS_ADD(event, phy_transmit_error);
+ DEBUGFS_FWSTATS_ADD(event, tx_stuck);
+
+ DEBUGFS_FWSTATS_ADD(ps, pspoll_timeouts);
+ DEBUGFS_FWSTATS_ADD(ps, upsd_timeouts);
+ DEBUGFS_FWSTATS_ADD(ps, upsd_max_sptime);
+ DEBUGFS_FWSTATS_ADD(ps, upsd_max_apturn);
+ DEBUGFS_FWSTATS_ADD(ps, pspoll_max_apturn);
+ DEBUGFS_FWSTATS_ADD(ps, pspoll_utilization);
+ DEBUGFS_FWSTATS_ADD(ps, upsd_utilization);
+
+ DEBUGFS_FWSTATS_ADD(rxpipe, rx_prep_beacon_drop);
+ DEBUGFS_FWSTATS_ADD(rxpipe, descr_host_int_trig_rx_data);
+ DEBUGFS_FWSTATS_ADD(rxpipe, beacon_buffer_thres_host_int_trig_rx_data);
+ DEBUGFS_FWSTATS_ADD(rxpipe, missed_beacon_host_int_trig_rx_data);
+ DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data);
+
+ DEBUGFS_ADD(tx_queue_len, wl->debugfs.rootdir);
+ DEBUGFS_ADD(retry_count, wl->debugfs.rootdir);
+ DEBUGFS_ADD(excessive_retries, wl->debugfs.rootdir);
+
+out:
+ if (ret < 0)
+ wl12xx_debugfs_delete_files(wl);
+
+ return ret;
+}
+
+void wl12xx_debugfs_reset(struct wl12xx *wl)
+{
+ memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats));
+ wl->stats.retry_count = 0;
+ wl->stats.excessive_retries = 0;
+}
+
+int wl12xx_debugfs_init(struct wl12xx *wl)
+{
+ int ret;
+
+ wl->debugfs.rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+
+ if (IS_ERR(wl->debugfs.rootdir)) {
+ ret = PTR_ERR(wl->debugfs.rootdir);
+ wl->debugfs.rootdir = NULL;
+ goto err;
+ }
+
+ wl->debugfs.fw_statistics = debugfs_create_dir("fw-statistics",
+ wl->debugfs.rootdir);
+
+ if (IS_ERR(wl->debugfs.fw_statistics)) {
+ ret = PTR_ERR(wl->debugfs.fw_statistics);
+ wl->debugfs.fw_statistics = NULL;
+ goto err_root;
+ }
+
+ wl->stats.fw_stats = kzalloc(sizeof(*wl->stats.fw_stats),
+ GFP_KERNEL);
+
+ if (!wl->stats.fw_stats) {
+ ret = -ENOMEM;
+ goto err_fw;
+ }
+
+ wl->stats.fw_stats_update = jiffies;
+
+ ret = wl12xx_debugfs_add_files(wl);
+
+ if (ret < 0)
+ goto err_file;
+
+ return 0;
+
+err_file:
+ kfree(wl->stats.fw_stats);
+ wl->stats.fw_stats = NULL;
+
+err_fw:
+ debugfs_remove(wl->debugfs.fw_statistics);
+ wl->debugfs.fw_statistics = NULL;
+
+err_root:
+ debugfs_remove(wl->debugfs.rootdir);
+ wl->debugfs.rootdir = NULL;
+
+err:
+ return ret;
+}
+
+void wl12xx_debugfs_exit(struct wl12xx *wl)
+{
+ wl12xx_debugfs_delete_files(wl);
+
+ kfree(wl->stats.fw_stats);
+ wl->stats.fw_stats = NULL;
+
+ debugfs_remove(wl->debugfs.fw_statistics);
+ wl->debugfs.fw_statistics = NULL;
+
+ debugfs_remove(wl->debugfs.rootdir);
+ wl->debugfs.rootdir = NULL;
+
+}
diff --git a/drivers/net/wireless/wl12xx/debugfs.h b/drivers/net/wireless/wl12xx/debugfs.h
new file mode 100644
index 00000000000..562cdcbcc87
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/debugfs.h
@@ -0,0 +1,33 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ *
+ * Contact: Kalle Valo <kalle.valo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef WL12XX_DEBUGFS_H
+#define WL12XX_DEBUGFS_H
+
+#include "wl12xx.h"
+
+int wl12xx_debugfs_init(struct wl12xx *wl);
+void wl12xx_debugfs_exit(struct wl12xx *wl);
+void wl12xx_debugfs_reset(struct wl12xx *wl);
+
+#endif /* WL12XX_DEBUGFS_H */
diff --git a/drivers/net/wireless/wl12xx/event.c b/drivers/net/wireless/wl12xx/event.c
new file mode 100644
index 00000000000..99529ca89a7
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/event.c
@@ -0,0 +1,127 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (c) 1998-2007 Texas Instruments Incorporated
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * Contact: Kalle Valo <kalle.valo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include "wl12xx.h"
+#include "reg.h"
+#include "spi.h"
+#include "event.h"
+#include "ps.h"
+
+static int wl12xx_event_scan_complete(struct wl12xx *wl,
+ struct event_mailbox *mbox)
+{
+ wl12xx_debug(DEBUG_EVENT, "status: 0x%x, channels: %d",
+ mbox->scheduled_scan_status,
+ mbox->scheduled_scan_channels);
+
+ if (wl->scanning) {
+ mutex_unlock(&wl->mutex);
+ ieee80211_scan_completed(wl->hw, false);
+ mutex_lock(&wl->mutex);
+ wl->scanning = false;
+ }
+
+ return 0;
+}
+
+static void wl12xx_event_mbox_dump(struct event_mailbox *mbox)
+{
+ wl12xx_debug(DEBUG_EVENT, "MBOX DUMP:");
+ wl12xx_debug(DEBUG_EVENT, "\tvector: 0x%x", mbox->events_vector);
+ wl12xx_debug(DEBUG_EVENT, "\tmask: 0x%x", mbox->events_mask);
+}
+
+static int wl12xx_event_process(struct wl12xx *wl, struct event_mailbox *mbox)
+{
+ int ret;
+ u32 vector;
+
+ wl12xx_event_mbox_dump(mbox);
+
+ vector = mbox->events_vector & ~(mbox->events_mask);
+ wl12xx_debug(DEBUG_EVENT, "vector: 0x%x", vector);
+
+ if (vector & SCAN_COMPLETE_EVENT_ID) {
+ ret = wl12xx_event_scan_complete(wl, mbox);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (vector & BSS_LOSE_EVENT_ID) {
+ wl12xx_debug(DEBUG_EVENT, "BSS_LOSE_EVENT");
+
+ if (wl->psm_requested && wl->psm) {
+ ret = wl12xx_ps_set_mode(wl, STATION_ACTIVE_MODE);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int wl12xx_event_unmask(struct wl12xx *wl)
+{
+ int ret;
+
+ ret = wl12xx_acx_event_mbox_mask(wl, ~(wl->event_mask));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+void wl12xx_event_mbox_config(struct wl12xx *wl)
+{
+ wl->mbox_ptr[0] = wl12xx_reg_read32(wl, REG_EVENT_MAILBOX_PTR);
+ wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox);
+
+ wl12xx_debug(DEBUG_EVENT, "MBOX ptrs: 0x%x 0x%x",
+ wl->mbox_ptr[0], wl->mbox_ptr[1]);
+}
+
+int wl12xx_event_handle(struct wl12xx *wl, u8 mbox_num)
+{
+ struct event_mailbox mbox;
+ int ret;
+
+ wl12xx_debug(DEBUG_EVENT, "EVENT on mbox %d", mbox_num);
+
+ if (mbox_num > 1)
+ return -EINVAL;
+
+ /* first we read the mbox descriptor */
+ wl12xx_spi_mem_read(wl, wl->mbox_ptr[mbox_num], &mbox,
+ sizeof(struct event_mailbox));
+
+ /* process the descriptor */
+ ret = wl12xx_event_process(wl, &mbox);
+ if (ret < 0)
+ return ret;
+
+ /* then we let the firmware know it can go on...*/
+ wl12xx_reg_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_EVENT_ACK);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/wl12xx/event.h b/drivers/net/wireless/wl12xx/event.h
new file mode 100644
index 00000000000..1f4c2f7438a
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/event.h
@@ -0,0 +1,121 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (c) 1998-2007 Texas Instruments Incorporated
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * Contact: Kalle Valo <kalle.valo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL12XX_EVENT_H__
+#define __WL12XX_EVENT_H__
+
+/*
+ * Mbox events
+ *
+ * The event mechanism is based on a pair of event buffers (buffers A and
+ * B) at fixed locations in the target's memory. The host processes one
+ * buffer while the other buffer continues to collect events. If the host
+ * is not processing events, an interrupt is issued to signal that a buffer
+ * is ready. Once the host is done with processing events from one buffer,
+ * it signals the target (with an ACK interrupt) that the event buffer is
+ * free.
+ */
+
+enum {
+ RESERVED1_EVENT_ID = BIT(0),
+ RESERVED2_EVENT_ID = BIT(1),
+ MEASUREMENT_START_EVENT_ID = BIT(2),
+ SCAN_COMPLETE_EVENT_ID = BIT(3),
+ CALIBRATION_COMPLETE_EVENT_ID = BIT(4),
+ ROAMING_TRIGGER_LOW_RSSI_EVENT_ID = BIT(5),
+ PS_REPORT_EVENT_ID = BIT(6),
+ SYNCHRONIZATION_TIMEOUT_EVENT_ID = BIT(7),
+ HEALTH_REPORT_EVENT_ID = BIT(8),
+ ACI_DETECTION_EVENT_ID = BIT(9),
+ DEBUG_REPORT_EVENT_ID = BIT(10),
+ MAC_STATUS_EVENT_ID = BIT(11),
+ DISCONNECT_EVENT_COMPLETE_ID = BIT(12),
+ JOIN_EVENT_COMPLETE_ID = BIT(13),
+ CHANNEL_SWITCH_COMPLETE_EVENT_ID = BIT(14),
+ BSS_LOSE_EVENT_ID = BIT(15),
+ ROAMING_TRIGGER_MAX_TX_RETRY_EVENT_ID = BIT(16),
+ MEASUREMENT_COMPLETE_EVENT_ID = BIT(17),
+ AP_DISCOVERY_COMPLETE_EVENT_ID = BIT(18),
+ SCHEDULED_SCAN_COMPLETE_EVENT_ID = BIT(19),
+ PSPOLL_DELIVERY_FAILURE_EVENT_ID = BIT(20),
+ RESET_BSS_EVENT_ID = BIT(21),
+ REGAINED_BSS_EVENT_ID = BIT(22),
+ ROAMING_TRIGGER_REGAINED_RSSI_EVENT_ID = BIT(23),
+ ROAMING_TRIGGER_LOW_SNR_EVENT_ID = BIT(24),
+ ROAMING_TRIGGER_REGAINED_SNR_EVENT_ID = BIT(25),
+
+ DBG_EVENT_ID = BIT(26),
+ BT_PTA_SENSE_EVENT_ID = BIT(27),
+ BT_PTA_PREDICTION_EVENT_ID = BIT(28),
+ BT_PTA_AVALANCHE_EVENT_ID = BIT(29),
+
+ PLT_RX_CALIBRATION_COMPLETE_EVENT_ID = BIT(30),
+
+ EVENT_MBOX_ALL_EVENT_ID = 0x7fffffff,
+};
+
+struct event_debug_report {
+ u8 debug_event_id;
+ u8 num_params;
+ u16 pad;
+ u32 report_1;
+ u32 report_2;
+ u32 report_3;
+} __attribute__ ((packed));
+
+struct event_mailbox {
+ u32 events_vector;
+ u32 events_mask;
+ u32 reserved_1;
+ u32 reserved_2;
+
+ char average_rssi_level;
+ u8 ps_status;
+ u8 channel_switch_status;
+ u8 scheduled_scan_status;
+
+ /* Channels scanned by the scheduled scan */
+ u16 scheduled_scan_channels;
+
+ /* If bit 0 is set -> target's fatal error */
+ u16 health_report;
+ u16 bad_fft_counter;
+ u8 bt_pta_sense_info;
+ u8 bt_pta_protective_info;
+ u32 reserved;
+ u32 debug_report[2];
+
+ /* Number of FCS errors since last event */
+ u32 fcs_err_counter;
+
+ struct event_debug_report report;
+ u8 average_snr_level;
+ u8 padding[19];
+} __attribute__ ((packed));
+
+int wl12xx_event_unmask(struct wl12xx *wl);
+void wl12xx_event_mbox_config(struct wl12xx *wl);
+int wl12xx_event_handle(struct wl12xx *wl, u8 mbox);
+
+#endif
diff --git a/drivers/net/wireless/wl12xx/init.c b/drivers/net/wireless/wl12xx/init.c
new file mode 100644
index 00000000000..2a573a6010b
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/init.c
@@ -0,0 +1,200 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ *
+ * Contact: Kalle Valo <kalle.valo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "init.h"
+#include "wl12xx_80211.h"
+#include "acx.h"
+#include "cmd.h"
+
+int wl12xx_hw_init_hwenc_config(struct wl12xx *wl)
+{
+ int ret;
+
+ ret = wl12xx_acx_feature_cfg(wl);
+ if (ret < 0) {
+ wl12xx_warning("couldn't set feature config");
+ return ret;
+ }
+
+ ret = wl12xx_acx_default_key(wl, wl->default_key);
+ if (ret < 0) {
+ wl12xx_warning("couldn't set default key");
+ return ret;
+ }
+
+ return 0;
+}
+
+int wl12xx_hw_init_templates_config(struct wl12xx *wl)
+{
+ int ret;
+ u8 partial_vbm[PARTIAL_VBM_MAX];
+
+ /* send empty templates for fw memory reservation */
+ ret = wl12xx_cmd_template_set(wl, CMD_PROBE_REQ, NULL,
+ sizeof(struct wl12xx_probe_req_template));
+ if (ret < 0)
+ return ret;
+
+ ret = wl12xx_cmd_template_set(wl, CMD_NULL_DATA, NULL,
+ sizeof(struct wl12xx_null_data_template));
+ if (ret < 0)
+ return ret;
+
+ ret = wl12xx_cmd_template_set(wl, CMD_PS_POLL, NULL,
+ sizeof(struct wl12xx_ps_poll_template));
+ if (ret < 0)
+ return ret;
+
+ ret = wl12xx_cmd_template_set(wl, CMD_QOS_NULL_DATA, NULL,
+ sizeof
+ (struct wl12xx_qos_null_data_template));
+ if (ret < 0)
+ return ret;
+
+ ret = wl12xx_cmd_template_set(wl, CMD_PROBE_RESP, NULL,
+ sizeof
+ (struct wl12xx_probe_resp_template));
+ if (ret < 0)
+ return ret;
+
+ ret = wl12xx_cmd_template_set(wl, CMD_BEACON, NULL,
+ sizeof
+ (struct wl12xx_beacon_template));
+ if (ret < 0)
+ return ret;
+
+ /* tim templates, first reserve space then allocate an empty one */
+ memset(partial_vbm, 0, PARTIAL_VBM_MAX);
+ ret = wl12xx_cmd_vbm(wl, TIM_ELE_ID, partial_vbm, PARTIAL_VBM_MAX, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = wl12xx_cmd_vbm(wl, TIM_ELE_ID, partial_vbm, 1, 0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int wl12xx_hw_init_rx_config(struct wl12xx *wl, u32 config, u32 filter)
+{
+ int ret;
+
+ ret = wl12xx_acx_rx_msdu_life_time(wl, RX_MSDU_LIFETIME_DEF);
+ if (ret < 0)
+ return ret;
+
+ ret = wl12xx_acx_rx_config(wl, config, filter);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int wl12xx_hw_init_phy_config(struct wl12xx *wl)
+{
+ int ret;
+
+ ret = wl12xx_acx_pd_threshold(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl12xx_acx_slot(wl, DEFAULT_SLOT_TIME);
+ if (ret < 0)
+ return ret;
+
+ ret = wl12xx_acx_group_address_tbl(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl12xx_acx_service_period_timeout(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl12xx_acx_rts_threshold(wl, RTS_THRESHOLD_DEF);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int wl12xx_hw_init_beacon_filter(struct wl12xx *wl)
+{
+ int ret;
+
+ ret = wl12xx_acx_beacon_filter_opt(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl12xx_acx_beacon_filter_table(wl);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int wl12xx_hw_init_pta(struct wl12xx *wl)
+{
+ int ret;
+
+ ret = wl12xx_acx_sg_enable(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl12xx_acx_sg_cfg(wl);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int wl12xx_hw_init_energy_detection(struct wl12xx *wl)
+{
+ int ret;
+
+ ret = wl12xx_acx_cca_threshold(wl);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int wl12xx_hw_init_beacon_broadcast(struct wl12xx *wl)
+{
+ int ret;
+
+ ret = wl12xx_acx_bcn_dtim_options(wl);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int wl12xx_hw_init_power_auth(struct wl12xx *wl)
+{
+ return wl12xx_acx_sleep_auth(wl, WL12XX_PSM_CAM);
+}
diff --git a/drivers/net/wireless/wl12xx/init.h b/drivers/net/wireless/wl12xx/init.h
new file mode 100644
index 00000000000..c8b6cd0b7c3
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/init.h
@@ -0,0 +1,40 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ *
+ * Contact: Kalle Valo <kalle.valo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL12XX_INIT_H__
+#define __WL12XX_INIT_H__
+
+#include "wl12xx.h"
+
+int wl12xx_hw_init_hwenc_config(struct wl12xx *wl);
+int wl12xx_hw_init_templates_config(struct wl12xx *wl);
+int wl12xx_hw_init_mem_config(struct wl12xx *wl);
+int wl12xx_hw_init_rx_config(struct wl12xx *wl, u32 config, u32 filter);
+int wl12xx_hw_init_phy_config(struct wl12xx *wl);
+int wl12xx_hw_init_beacon_filter(struct wl12xx *wl);
+int wl12xx_hw_init_pta(struct wl12xx *wl);
+int wl12xx_hw_init_energy_detection(struct wl12xx *wl);
+int wl12xx_hw_init_beacon_broadcast(struct wl12xx *wl);
+int wl12xx_hw_init_power_auth(struct wl12xx *wl);
+
+#endif
diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/wl12xx/main.c
new file mode 100644
index 00000000000..603d6114882
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/main.c
@@ -0,0 +1,1358 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2008-2009 Nokia Corporation
+ *
+ * Contact: Kalle Valo <kalle.valo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/spi/spi.h>
+#include <linux/crc32.h>
+#include <linux/etherdevice.h>
+#include <linux/spi/wl12xx.h>
+
+#include "wl12xx.h"
+#include "wl12xx_80211.h"
+#include "reg.h"
+#include "wl1251.h"
+#include "spi.h"
+#include "event.h"
+#include "tx.h"
+#include "rx.h"
+#include "ps.h"
+#include "init.h"
+#include "debugfs.h"
+
+static void wl12xx_disable_interrupts(struct wl12xx *wl)
+{
+ disable_irq(wl->irq);
+}
+
+static void wl12xx_power_off(struct wl12xx *wl)
+{
+ wl->set_power(false);
+}
+
+static void wl12xx_power_on(struct wl12xx *wl)
+{
+ wl->set_power(true);
+}
+
+static irqreturn_t wl12xx_irq(int irq, void *cookie)
+{
+ struct wl12xx *wl;
+
+ wl12xx_debug(DEBUG_IRQ, "IRQ");
+
+ wl = cookie;
+
+ schedule_work(&wl->irq_work);
+
+ return IRQ_HANDLED;
+}
+
+static int wl12xx_fetch_firmware(struct wl12xx *wl)
+{
+ const struct firmware *fw;
+ int ret;
+
+ ret = request_firmware(&fw, wl->chip.fw_filename, &wl->spi->dev);
+
+ if (ret < 0) {
+ wl12xx_error("could not get firmware: %d", ret);
+ return ret;
+ }
+
+ if (fw->size % 4) {
+ wl12xx_error("firmware size is not multiple of 32 bits: %zu",
+ fw->size);
+ ret = -EILSEQ;
+ goto out;
+ }
+
+ wl->fw_len = fw->size;
+ wl->fw = kmalloc(wl->fw_len, GFP_KERNEL);
+
+ if (!wl->fw) {
+ wl12xx_error("could not allocate memory for the firmware");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(wl->fw, fw->data, wl->fw_len);
+
+ ret = 0;
+
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int wl12xx_fetch_nvs(struct wl12xx *wl)
+{
+ const struct firmware *fw;
+ int ret;
+
+ ret = request_firmware(&fw, wl->chip.nvs_filename, &wl->spi->dev);
+
+ if (ret < 0) {
+ wl12xx_error("could not get nvs file: %d", ret);
+ return ret;
+ }
+
+ if (fw->size % 4) {
+ wl12xx_error("nvs size is not multiple of 32 bits: %zu",
+ fw->size);
+ ret = -EILSEQ;
+ goto out;
+ }
+
+ wl->nvs_len = fw->size;
+ wl->nvs = kmalloc(wl->nvs_len, GFP_KERNEL);
+
+ if (!wl->nvs) {
+ wl12xx_error("could not allocate memory for the nvs file");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(wl->nvs, fw->data, wl->nvs_len);
+
+ ret = 0;
+
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static void wl12xx_fw_wakeup(struct wl12xx *wl)
+{
+ u32 elp_reg;
+
+ elp_reg = ELPCTRL_WAKE_UP;
+ wl12xx_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
+ elp_reg = wl12xx_read32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR);
+
+ if (!(elp_reg & ELPCTRL_WLAN_READY)) {
+ wl12xx_warning("WLAN not ready");
+ elp_reg = ELPCTRL_WAKE_UP_WLAN_READY;
+ wl12xx_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
+ }
+}
+
+static int wl12xx_chip_wakeup(struct wl12xx *wl)
+{
+ int ret = 0;
+
+ wl12xx_power_on(wl);
+ msleep(wl->chip.power_on_sleep);
+ wl12xx_spi_reset(wl);
+ wl12xx_spi_init(wl);
+
+ /* We don't need a real memory partition here, because we only want
+ * to use the registers at this point. */
+ wl12xx_set_partition(wl,
+ 0x00000000,
+ 0x00000000,
+ REGISTERS_BASE,
+ REGISTERS_DOWN_SIZE);
+
+ /* ELP module wake up */
+ wl12xx_fw_wakeup(wl);
+
+ /* whal_FwCtrl_BootSm() */
+
+ /* 0. read chip id from CHIP_ID */
+ wl->chip.id = wl12xx_reg_read32(wl, CHIP_ID_B);
+
+ /* 1. check if chip id is valid */
+
+ switch (wl->chip.id) {
+ case CHIP_ID_1251_PG12:
+ wl12xx_debug(DEBUG_BOOT, "chip id 0x%x (1251 PG12)",
+ wl->chip.id);
+
+ wl1251_setup(wl);
+
+ break;
+ case CHIP_ID_1271_PG10:
+ case CHIP_ID_1251_PG10:
+ case CHIP_ID_1251_PG11:
+ default:
+ wl12xx_error("unsupported chip id: 0x%x", wl->chip.id);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (wl->fw == NULL) {
+ ret = wl12xx_fetch_firmware(wl);
+ if (ret < 0)
+ goto out;
+ }
+
+ /* No NVS from netlink, try to get it from the filesystem */
+ if (wl->nvs == NULL) {
+ ret = wl12xx_fetch_nvs(wl);
+ if (ret < 0)
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static void wl12xx_filter_work(struct work_struct *work)
+{
+ struct wl12xx *wl =
+ container_of(work, struct wl12xx, filter_work);
+ int ret;
+
+ mutex_lock(&wl->mutex);
+
+ if (wl->state == WL12XX_STATE_OFF)
+ goto out;
+
+ ret = wl12xx_cmd_join(wl, wl->bss_type, 1, 100, 0);
+ if (ret < 0)
+ goto out;
+
+out:
+ mutex_unlock(&wl->mutex);
+}
+
+int wl12xx_plt_start(struct wl12xx *wl)
+{
+ int ret;
+
+ wl12xx_notice("power up");
+
+ if (wl->state != WL12XX_STATE_OFF) {
+ wl12xx_error("cannot go into PLT state because not "
+ "in off state: %d", wl->state);
+ return -EBUSY;
+ }
+
+ wl->state = WL12XX_STATE_PLT;
+
+ ret = wl12xx_chip_wakeup(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl->chip.op_boot(wl);
+ if (ret < 0)
+ return ret;
+
+ wl12xx_notice("firmware booted in PLT mode (%s)", wl->chip.fw_ver);
+
+ ret = wl->chip.op_plt_init(wl);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int wl12xx_plt_stop(struct wl12xx *wl)
+{
+ wl12xx_notice("power down");
+
+ if (wl->state != WL12XX_STATE_PLT) {
+ wl12xx_error("cannot power down because not in PLT "
+ "state: %d", wl->state);
+ return -EBUSY;
+ }
+
+ wl12xx_disable_interrupts(wl);
+ wl12xx_power_off(wl);
+
+ wl->state = WL12XX_STATE_OFF;
+
+ return 0;
+}
+
+
+static int wl12xx_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct wl12xx *wl = hw->priv;
+
+ skb_queue_tail(&wl->tx_queue, skb);
+
+ schedule_work(&wl->tx_work);
+
+ /*
+ * The workqueue is slow to process the tx_queue and we need stop
+ * the queue here, otherwise the queue will get too long.
+ */
+ if (skb_queue_len(&wl->tx_queue) >= WL12XX_TX_QUEUE_MAX_LENGTH) {
+ ieee80211_stop_queues(wl->hw);
+
+ /*
+ * FIXME: this is racy, the variable is not properly
+ * protected. Maybe fix this by removing the stupid
+ * variable altogether and checking the real queue state?
+ */
+ wl->tx_queue_stopped = true;
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static int wl12xx_op_start(struct ieee80211_hw *hw)
+{
+ struct wl12xx *wl = hw->priv;
+ int ret = 0;
+
+ wl12xx_debug(DEBUG_MAC80211, "mac80211 start");
+
+ mutex_lock(&wl->mutex);
+
+ if (wl->state != WL12XX_STATE_OFF) {
+ wl12xx_error("cannot start because not in off state: %d",
+ wl->state);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = wl12xx_chip_wakeup(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl->chip.op_boot(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wl->chip.op_hw_init(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wl12xx_acx_station_id(wl);
+ if (ret < 0)
+ goto out;
+
+ wl->state = WL12XX_STATE_ON;
+
+ wl12xx_info("firmware booted (%s)", wl->chip.fw_ver);
+
+out:
+ if (ret < 0)
+ wl12xx_power_off(wl);
+
+ mutex_unlock(&wl->mutex);
+
+ return ret;
+}
+
+static void wl12xx_op_stop(struct ieee80211_hw *hw)
+{
+ struct wl12xx *wl = hw->priv;
+
+ wl12xx_info("down");
+
+ wl12xx_debug(DEBUG_MAC80211, "mac80211 stop");
+
+ mutex_lock(&wl->mutex);
+
+ WARN_ON(wl->state != WL12XX_STATE_ON);
+
+ if (wl->scanning) {
+ mutex_unlock(&wl->mutex);
+ ieee80211_scan_completed(wl->hw, true);
+ mutex_lock(&wl->mutex);
+ wl->scanning = false;
+ }
+
+ wl->state = WL12XX_STATE_OFF;
+
+ wl12xx_disable_interrupts(wl);
+
+ mutex_unlock(&wl->mutex);
+
+ cancel_work_sync(&wl->irq_work);
+ cancel_work_sync(&wl->tx_work);
+ cancel_work_sync(&wl->filter_work);
+
+ mutex_lock(&wl->mutex);
+
+ /* let's notify MAC80211 about the remaining pending TX frames */
+ wl12xx_tx_flush(wl);
+
+ wl12xx_power_off(wl);
+
+ memset(wl->bssid, 0, ETH_ALEN);
+ wl->listen_int = 1;
+ wl->bss_type = MAX_BSS_TYPE;
+
+ wl->data_in_count = 0;
+ wl->rx_counter = 0;
+ wl->rx_handled = 0;
+ wl->rx_current_buffer = 0;
+ wl->rx_last_id = 0;
+ wl->next_tx_complete = 0;
+ wl->elp = false;
+ wl->psm = 0;
+ wl->tx_queue_stopped = false;
+ wl->power_level = WL12XX_DEFAULT_POWER_LEVEL;
+
+ wl12xx_debugfs_reset(wl);
+
+ mutex_unlock(&wl->mutex);
+}
+
+static int wl12xx_op_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_if_init_conf *conf)
+{
+ struct wl12xx *wl = hw->priv;
+ DECLARE_MAC_BUF(mac);
+ int ret = 0;
+
+ wl12xx_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %s",
+ conf->type, print_mac(mac, conf->mac_addr));
+
+ mutex_lock(&wl->mutex);
+
+ switch (conf->type) {
+ case NL80211_IFTYPE_STATION:
+ wl->bss_type = BSS_TYPE_STA_BSS;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ wl->bss_type = BSS_TYPE_IBSS;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (memcmp(wl->mac_addr, conf->mac_addr, ETH_ALEN)) {
+ memcpy(wl->mac_addr, conf->mac_addr, ETH_ALEN);
+ SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr);
+ ret = wl12xx_acx_station_id(wl);
+ if (ret < 0)
+ goto out;
+ }
+
+out:
+ mutex_unlock(&wl->mutex);
+ return ret;
+}
+
+static void wl12xx_op_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_if_init_conf *conf)
+{
+ wl12xx_debug(DEBUG_MAC80211, "mac80211 remove interface");
+}
+
+static int wl12xx_build_null_data(struct wl12xx *wl)
+{
+ struct wl12xx_null_data_template template;
+
+ if (!is_zero_ether_addr(wl->bssid)) {
+ memcpy(template.header.da, wl->bssid, ETH_ALEN);
+ memcpy(template.header.bssid, wl->bssid, ETH_ALEN);
+ } else {
+ memset(template.header.da, 0xff, ETH_ALEN);
+ memset(template.header.bssid, 0xff, ETH_ALEN);
+ }
+
+ memcpy(template.header.sa, wl->mac_addr, ETH_ALEN);
+ template.header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_NULLFUNC);
+
+ return wl12xx_cmd_template_set(wl, CMD_NULL_DATA, &template,
+ sizeof(template));
+
+}
+
+static int wl12xx_build_ps_poll(struct wl12xx *wl, u16 aid)
+{
+ struct wl12xx_ps_poll_template template;
+
+ memcpy(template.bssid, wl->bssid, ETH_ALEN);
+ memcpy(template.ta, wl->mac_addr, ETH_ALEN);
+ template.aid = aid;
+ template.fc = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL);
+
+ return wl12xx_cmd_template_set(wl, CMD_PS_POLL, &template,
+ sizeof(template));
+
+}
+
+static int wl12xx_op_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct wl12xx *wl = hw->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+ int channel, ret = 0;
+
+ channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
+
+ wl12xx_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d",
+ channel,
+ conf->flags & IEEE80211_CONF_PS ? "on" : "off",
+ conf->power_level);
+
+ mutex_lock(&wl->mutex);
+
+ if (channel != wl->channel) {
+ /* FIXME: use beacon interval provided by mac80211 */
+ ret = wl12xx_cmd_join(wl, wl->bss_type, 1, 100, 0);
+ if (ret < 0)
+ goto out;
+
+ wl->channel = channel;
+ }
+
+ ret = wl12xx_build_null_data(wl);
+ if (ret < 0)
+ goto out;
+
+ if (conf->flags & IEEE80211_CONF_PS && !wl->psm_requested) {
+ wl12xx_info("psm enabled");
+
+ wl->psm_requested = true;
+
+ /*
+ * We enter PSM only if we're already associated.
+ * If we're not, we'll enter it when joining an SSID,
+ * through the bss_info_changed() hook.
+ */
+ ret = wl12xx_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
+ } else if (!(conf->flags & IEEE80211_CONF_PS) &&
+ wl->psm_requested) {
+ wl12xx_info("psm disabled");
+
+ wl->psm_requested = false;
+
+ if (wl->psm)
+ ret = wl12xx_ps_set_mode(wl, STATION_ACTIVE_MODE);
+ }
+
+ if (conf->power_level != wl->power_level) {
+ ret = wl12xx_acx_tx_power(wl, conf->power_level);
+ if (ret < 0)
+ goto out;
+
+ wl->power_level = conf->power_level;
+ }
+
+out:
+ mutex_unlock(&wl->mutex);
+ return ret;
+}
+
+#define WL12XX_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
+ FIF_ALLMULTI | \
+ FIF_FCSFAIL | \
+ FIF_BCN_PRBRESP_PROMISC | \
+ FIF_CONTROL | \
+ FIF_OTHER_BSS)
+
+static void wl12xx_op_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed,
+ unsigned int *total,
+ int mc_count,
+ struct dev_addr_list *mc_list)
+{
+ struct wl12xx *wl = hw->priv;
+
+ wl12xx_debug(DEBUG_MAC80211, "mac80211 configure filter");
+
+ *total &= WL12XX_SUPPORTED_FILTERS;
+ changed &= WL12XX_SUPPORTED_FILTERS;
+
+ if (changed == 0)
+ /* no filters which we support changed */
+ return;
+
+ /* FIXME: wl->rx_config and wl->rx_filter are not protected */
+
+ wl->rx_config = WL12XX_DEFAULT_RX_CONFIG;
+ wl->rx_filter = WL12XX_DEFAULT_RX_FILTER;
+
+ if (*total & FIF_PROMISC_IN_BSS) {
+ wl->rx_config |= CFG_BSSID_FILTER_EN;
+ wl->rx_config |= CFG_RX_ALL_GOOD;
+ }
+ if (*total & FIF_ALLMULTI)
+ /*
+ * CFG_MC_FILTER_EN in rx_config needs to be 0 to receive
+ * all multicast frames
+ */
+ wl->rx_config &= ~CFG_MC_FILTER_EN;
+ if (*total & FIF_FCSFAIL)
+ wl->rx_filter |= CFG_RX_FCS_ERROR;
+ if (*total & FIF_BCN_PRBRESP_PROMISC) {
+ wl->rx_config &= ~CFG_BSSID_FILTER_EN;
+ wl->rx_config &= ~CFG_SSID_FILTER_EN;
+ }
+ if (*total & FIF_CONTROL)
+ wl->rx_filter |= CFG_RX_CTL_EN;
+ if (*total & FIF_OTHER_BSS)
+ wl->rx_filter &= ~CFG_BSSID_FILTER_EN;
+
+ /*
+ * FIXME: workqueues need to be properly cancelled on stop(), for
+ * now let's just disable changing the filter settings. They will
+ * be updated any on config().
+ */
+ /* schedule_work(&wl->filter_work); */
+}
+
+/* HW encryption */
+static int wl12xx_set_key_type(struct wl12xx *wl, struct acx_set_key *key,
+ enum set_key_cmd cmd,
+ struct ieee80211_key_conf *mac80211_key,
+ const u8 *addr)
+{
+ switch (mac80211_key->alg) {
+ case ALG_WEP:
+ if (is_broadcast_ether_addr(addr))
+ key->key_type = KEY_WEP_DEFAULT;
+ else
+ key->key_type = KEY_WEP_ADDR;
+
+ mac80211_key->hw_key_idx = mac80211_key->keyidx;
+ break;
+ case ALG_TKIP:
+ if (is_broadcast_ether_addr(addr))
+ key->key_type = KEY_TKIP_MIC_GROUP;
+ else
+ key->key_type = KEY_TKIP_MIC_PAIRWISE;
+
+ mac80211_key->hw_key_idx = mac80211_key->keyidx;
+ break;
+ case ALG_CCMP:
+ if (is_broadcast_ether_addr(addr))
+ key->key_type = KEY_AES_GROUP;
+ else
+ key->key_type = KEY_AES_PAIRWISE;
+ mac80211_key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ break;
+ default:
+ wl12xx_error("Unknown key algo 0x%x", mac80211_key->alg);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int wl12xx_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct wl12xx *wl = hw->priv;
+ struct acx_set_key wl_key;
+ const u8 *addr;
+ int ret;
+
+ static const u8 bcast_addr[ETH_ALEN] =
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+
+ wl12xx_debug(DEBUG_MAC80211, "mac80211 set key");
+
+ memset(&wl_key, 0, sizeof(wl_key));
+
+ addr = sta ? sta->addr : bcast_addr;
+
+ wl12xx_debug(DEBUG_CRYPT, "CMD: 0x%x", cmd);
+ wl12xx_dump(DEBUG_CRYPT, "ADDR: ", addr, ETH_ALEN);
+ wl12xx_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
+ key->alg, key->keyidx, key->keylen, key->flags);
+ wl12xx_dump(DEBUG_CRYPT, "KEY: ", key->key, key->keylen);
+
+ mutex_lock(&wl->mutex);
+
+ switch (cmd) {
+ case SET_KEY:
+ wl_key.key_action = KEY_ADD_OR_REPLACE;
+ break;
+ case DISABLE_KEY:
+ wl_key.key_action = KEY_REMOVE;
+ break;
+ default:
+ wl12xx_error("Unsupported key cmd 0x%x", cmd);
+ break;
+ }
+
+ ret = wl12xx_set_key_type(wl, &wl_key, cmd, key, addr);
+ if (ret < 0) {
+ wl12xx_error("Set KEY type failed");
+ goto out;
+ }
+
+ if (wl_key.key_type != KEY_WEP_DEFAULT)
+ memcpy(wl_key.addr, addr, ETH_ALEN);
+
+ if ((wl_key.key_type == KEY_TKIP_MIC_GROUP) ||
+ (wl_key.key_type == KEY_TKIP_MIC_PAIRWISE)) {
+ /*
+ * We get the key in the following form:
+ * TKIP (16 bytes) - TX MIC (8 bytes) - RX MIC (8 bytes)
+ * but the target is expecting:
+ * TKIP - RX MIC - TX MIC
+ */
+ memcpy(wl_key.key, key->key, 16);
+ memcpy(wl_key.key + 16, key->key + 24, 8);
+ memcpy(wl_key.key + 24, key->key + 16, 8);
+
+ } else {
+ memcpy(wl_key.key, key->key, key->keylen);
+ }
+ wl_key.key_size = key->keylen;
+
+ wl_key.id = key->keyidx;
+ wl_key.ssid_profile = 0;
+
+ wl12xx_dump(DEBUG_CRYPT, "TARGET KEY: ", &wl_key, sizeof(wl_key));
+
+ if (wl12xx_cmd_send(wl, CMD_SET_KEYS, &wl_key, sizeof(wl_key)) < 0) {
+ wl12xx_error("Set KEY failed");
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+out:
+ mutex_unlock(&wl->mutex);
+ return ret;
+}
+
+static int wl12xx_build_basic_rates(char *rates)
+{
+ u8 index = 0;
+
+ rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
+ rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
+ rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB;
+ rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB;
+
+ return index;
+}
+
+static int wl12xx_build_extended_rates(char *rates)
+{
+ u8 index = 0;
+
+ rates[index++] = IEEE80211_OFDM_RATE_6MB;
+ rates[index++] = IEEE80211_OFDM_RATE_9MB;
+ rates[index++] = IEEE80211_OFDM_RATE_12MB;
+ rates[index++] = IEEE80211_OFDM_RATE_18MB;
+ rates[index++] = IEEE80211_OFDM_RATE_24MB;
+ rates[index++] = IEEE80211_OFDM_RATE_36MB;
+ rates[index++] = IEEE80211_OFDM_RATE_48MB;
+ rates[index++] = IEEE80211_OFDM_RATE_54MB;
+
+ return index;
+}
+
+
+static int wl12xx_build_probe_req(struct wl12xx *wl, u8 *ssid, size_t ssid_len)
+{
+ struct wl12xx_probe_req_template template;
+ struct wl12xx_ie_rates *rates;
+ char *ptr;
+ u16 size;
+
+ ptr = (char *)&template;
+ size = sizeof(struct ieee80211_header);
+
+ memset(template.header.da, 0xff, ETH_ALEN);
+ memset(template.header.bssid, 0xff, ETH_ALEN);
+ memcpy(template.header.sa, wl->mac_addr, ETH_ALEN);
+ template.header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
+
+ /* IEs */
+ /* SSID */
+ template.ssid.header.id = WLAN_EID_SSID;
+ template.ssid.header.len = ssid_len;
+ if (ssid_len && ssid)
+ memcpy(template.ssid.ssid, ssid, ssid_len);
+ size += sizeof(struct wl12xx_ie_header) + ssid_len;
+ ptr += size;
+
+ /* Basic Rates */
+ rates = (struct wl12xx_ie_rates *)ptr;
+ rates->header.id = WLAN_EID_SUPP_RATES;
+ rates->header.len = wl12xx_build_basic_rates(rates->rates);
+ size += sizeof(struct wl12xx_ie_header) + rates->header.len;
+ ptr += sizeof(struct wl12xx_ie_header) + rates->header.len;
+
+ /* Extended rates */
+ rates = (struct wl12xx_ie_rates *)ptr;
+ rates->header.id = WLAN_EID_EXT_SUPP_RATES;
+ rates->header.len = wl12xx_build_extended_rates(rates->rates);
+ size += sizeof(struct wl12xx_ie_header) + rates->header.len;
+
+ wl12xx_dump(DEBUG_SCAN, "PROBE REQ: ", &template, size);
+
+ return wl12xx_cmd_template_set(wl, CMD_PROBE_REQ, &template,
+ size);
+}
+
+static int wl12xx_hw_scan(struct wl12xx *wl, u8 *ssid, size_t len,
+ u8 active_scan, u8 high_prio, u8 num_channels,
+ u8 probe_requests)
+{
+ int i, ret;
+ u32 split_scan = 0;
+ u16 scan_options = 0;
+ struct cmd_scan *params;
+ struct wl12xx_command *cmd_answer;
+
+ if (wl->scanning)
+ return -EINVAL;
+
+ params = kzalloc(sizeof(*params), GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
+
+ params->params.rx_config_options = cpu_to_le32(CFG_RX_ALL_GOOD);
+ params->params.rx_filter_options =
+ cpu_to_le32(CFG_RX_PRSP_EN | CFG_RX_MGMT_EN | CFG_RX_BCN_EN);
+
+ /* High priority scan */
+ if (!active_scan)
+ scan_options |= SCAN_PASSIVE;
+ if (high_prio)
+ scan_options |= SCAN_PRIORITY_HIGH;
+ params->params.scan_options = scan_options;
+
+ params->params.num_channels = num_channels;
+ params->params.num_probe_requests = probe_requests;
+ params->params.tx_rate = cpu_to_le16(1 << 1); /* 2 Mbps */
+ params->params.tid_trigger = 0;
+
+ for (i = 0; i < num_channels; i++) {
+ params->channels[i].min_duration = cpu_to_le32(30000);
+ params->channels[i].max_duration = cpu_to_le32(60000);
+ memset(&params->channels[i].bssid_lsb, 0xff, 4);
+ memset(&params->channels[i].bssid_msb, 0xff, 2);
+ params->channels[i].early_termination = 0;
+ params->channels[i].tx_power_att = 0;
+ params->channels[i].channel = i + 1;
+ memset(params->channels[i].pad, 0, 3);
+ }
+
+ for (i = num_channels; i < SCAN_MAX_NUM_OF_CHANNELS; i++)
+ memset(&params->channels[i], 0,
+ sizeof(struct basic_scan_channel_parameters));
+
+ if (len && ssid) {
+ params->params.ssid_len = len;
+ memcpy(params->params.ssid, ssid, len);
+ } else {
+ params->params.ssid_len = 0;
+ memset(params->params.ssid, 0, 32);
+ }
+
+ ret = wl12xx_build_probe_req(wl, ssid, len);
+ if (ret < 0) {
+ wl12xx_error("PROBE request template failed");
+ goto out;
+ }
+
+ ret = wl12xx_cmd_send(wl, CMD_TRIGGER_SCAN_TO, &split_scan,
+ sizeof(u32));
+ if (ret < 0) {
+ wl12xx_error("Split SCAN failed");
+ goto out;
+ }
+
+ wl12xx_dump(DEBUG_SCAN, "SCAN: ", params, sizeof(*params));
+
+ wl->scanning = true;
+
+ ret = wl12xx_cmd_send(wl, CMD_SCAN, params, sizeof(*params));
+ if (ret < 0)
+ wl12xx_error("SCAN failed");
+
+ wl12xx_spi_mem_read(wl, wl->cmd_box_addr, params, sizeof(*params));
+
+ cmd_answer = (struct wl12xx_command *) params;
+ if (cmd_answer->status != CMD_STATUS_SUCCESS) {
+ wl12xx_error("TEST command answer error: %d",
+ cmd_answer->status);
+ wl->scanning = false;
+ ret = -EIO;
+ goto out;
+ }
+
+out:
+ kfree(params);
+ return ret;
+
+}
+
+static int wl12xx_op_hw_scan(struct ieee80211_hw *hw,
+ struct cfg80211_scan_request *req)
+{
+ struct wl12xx *wl = hw->priv;
+ int ret;
+ u8 *ssid = NULL;
+ size_t ssid_len = 0;
+
+ wl12xx_debug(DEBUG_MAC80211, "mac80211 hw scan");
+
+ if (req->n_ssids) {
+ ssid = req->ssids[0].ssid;
+ ssid_len = req->ssids[0].ssid_len;
+ }
+
+ mutex_lock(&wl->mutex);
+ ret = wl12xx_hw_scan(hw->priv, ssid, ssid_len, 1, 0, 13, 3);
+ mutex_unlock(&wl->mutex);
+
+ return ret;
+}
+
+static int wl12xx_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct wl12xx *wl = hw->priv;
+ int ret;
+
+ ret = wl12xx_acx_rts_threshold(wl, (u16) value);
+
+ if (ret < 0)
+ wl12xx_warning("wl12xx_op_set_rts_threshold failed: %d", ret);
+
+ return ret;
+}
+
+static void wl12xx_op_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
+{
+ enum acx_ps_mode mode;
+ struct wl12xx *wl = hw->priv;
+ struct sk_buff *beacon;
+ int ret;
+
+ wl12xx_debug(DEBUG_MAC80211, "mac80211 bss info changed");
+
+ mutex_lock(&wl->mutex);
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ if (bss_conf->assoc) {
+ wl->aid = bss_conf->aid;
+
+ ret = wl12xx_build_ps_poll(wl, wl->aid);
+ if (ret < 0)
+ goto out;
+
+ ret = wl12xx_acx_aid(wl, wl->aid);
+ if (ret < 0)
+ goto out;
+
+ /* If we want to go in PSM but we're not there yet */
+ if (wl->psm_requested && !wl->psm) {
+ mode = STATION_POWER_SAVE_MODE;
+ ret = wl12xx_ps_set_mode(wl, mode);
+ if (ret < 0)
+ goto out;
+ }
+ }
+ }
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ if (bss_conf->use_short_slot)
+ ret = wl12xx_acx_slot(wl, SLOT_TIME_SHORT);
+ else
+ ret = wl12xx_acx_slot(wl, SLOT_TIME_LONG);
+ if (ret < 0) {
+ wl12xx_warning("Set slot time failed %d", ret);
+ goto out;
+ }
+ }
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ if (bss_conf->use_short_preamble)
+ wl12xx_acx_set_preamble(wl, ACX_PREAMBLE_SHORT);
+ else
+ wl12xx_acx_set_preamble(wl, ACX_PREAMBLE_LONG);
+ }
+
+ if (changed & BSS_CHANGED_ERP_CTS_PROT) {
+ if (bss_conf->use_cts_prot)
+ ret = wl12xx_acx_cts_protect(wl, CTSPROTECT_ENABLE);
+ else
+ ret = wl12xx_acx_cts_protect(wl, CTSPROTECT_DISABLE);
+ if (ret < 0) {
+ wl12xx_warning("Set ctsprotect failed %d", ret);
+ goto out;
+ }
+ }
+
+ if (changed & BSS_CHANGED_BSSID) {
+ memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
+
+ ret = wl12xx_build_null_data(wl);
+ if (ret < 0)
+ goto out;
+
+ if (wl->bss_type != BSS_TYPE_IBSS) {
+ ret = wl12xx_cmd_join(wl, wl->bss_type, 5, 100, 1);
+ if (ret < 0)
+ goto out;
+ }
+ }
+
+ if (changed & BSS_CHANGED_BEACON) {
+ beacon = ieee80211_beacon_get(hw, vif);
+ ret = wl12xx_cmd_template_set(wl, CMD_BEACON, beacon->data,
+ beacon->len);
+
+ if (ret < 0) {
+ dev_kfree_skb(beacon);
+ goto out;
+ }
+
+ ret = wl12xx_cmd_template_set(wl, CMD_PROBE_RESP, beacon->data,
+ beacon->len);
+
+ dev_kfree_skb(beacon);
+
+ if (ret < 0)
+ goto out;
+
+ ret = wl12xx_cmd_join(wl, wl->bss_type, 1, 100, 0);
+
+ if (ret < 0)
+ goto out;
+ }
+
+out:
+ mutex_unlock(&wl->mutex);
+}
+
+
+/* can't be const, mac80211 writes to this */
+static struct ieee80211_rate wl12xx_rates[] = {
+ { .bitrate = 10,
+ .hw_value = 0x1,
+ .hw_value_short = 0x1, },
+ { .bitrate = 20,
+ .hw_value = 0x2,
+ .hw_value_short = 0x2,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 55,
+ .hw_value = 0x4,
+ .hw_value_short = 0x4,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 110,
+ .hw_value = 0x20,
+ .hw_value_short = 0x20,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 60,
+ .hw_value = 0x8,
+ .hw_value_short = 0x8, },
+ { .bitrate = 90,
+ .hw_value = 0x10,
+ .hw_value_short = 0x10, },
+ { .bitrate = 120,
+ .hw_value = 0x40,
+ .hw_value_short = 0x40, },
+ { .bitrate = 180,
+ .hw_value = 0x80,
+ .hw_value_short = 0x80, },
+ { .bitrate = 240,
+ .hw_value = 0x200,
+ .hw_value_short = 0x200, },
+ { .bitrate = 360,
+ .hw_value = 0x400,
+ .hw_value_short = 0x400, },
+ { .bitrate = 480,
+ .hw_value = 0x800,
+ .hw_value_short = 0x800, },
+ { .bitrate = 540,
+ .hw_value = 0x1000,
+ .hw_value_short = 0x1000, },
+};
+
+/* can't be const, mac80211 writes to this */
+static struct ieee80211_channel wl12xx_channels[] = {
+ { .hw_value = 1, .center_freq = 2412},
+ { .hw_value = 2, .center_freq = 2417},
+ { .hw_value = 3, .center_freq = 2422},
+ { .hw_value = 4, .center_freq = 2427},
+ { .hw_value = 5, .center_freq = 2432},
+ { .hw_value = 6, .center_freq = 2437},
+ { .hw_value = 7, .center_freq = 2442},
+ { .hw_value = 8, .center_freq = 2447},
+ { .hw_value = 9, .center_freq = 2452},
+ { .hw_value = 10, .center_freq = 2457},
+ { .hw_value = 11, .center_freq = 2462},
+ { .hw_value = 12, .center_freq = 2467},
+ { .hw_value = 13, .center_freq = 2472},
+};
+
+/* can't be const, mac80211 writes to this */
+static struct ieee80211_supported_band wl12xx_band_2ghz = {
+ .channels = wl12xx_channels,
+ .n_channels = ARRAY_SIZE(wl12xx_channels),
+ .bitrates = wl12xx_rates,
+ .n_bitrates = ARRAY_SIZE(wl12xx_rates),
+};
+
+static const struct ieee80211_ops wl12xx_ops = {
+ .start = wl12xx_op_start,
+ .stop = wl12xx_op_stop,
+ .add_interface = wl12xx_op_add_interface,
+ .remove_interface = wl12xx_op_remove_interface,
+ .config = wl12xx_op_config,
+ .configure_filter = wl12xx_op_configure_filter,
+ .tx = wl12xx_op_tx,
+ .set_key = wl12xx_op_set_key,
+ .hw_scan = wl12xx_op_hw_scan,
+ .bss_info_changed = wl12xx_op_bss_info_changed,
+ .set_rts_threshold = wl12xx_op_set_rts_threshold,
+};
+
+static int wl12xx_register_hw(struct wl12xx *wl)
+{
+ int ret;
+
+ if (wl->mac80211_registered)
+ return 0;
+
+ SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr);
+
+ ret = ieee80211_register_hw(wl->hw);
+ if (ret < 0) {
+ wl12xx_error("unable to register mac80211 hw: %d", ret);
+ return ret;
+ }
+
+ wl->mac80211_registered = true;
+
+ wl12xx_notice("loaded");
+
+ return 0;
+}
+
+static int wl12xx_init_ieee80211(struct wl12xx *wl)
+{
+ /* The tx descriptor buffer and the TKIP space */
+ wl->hw->extra_tx_headroom = sizeof(struct tx_double_buffer_desc)
+ + WL12XX_TKIP_IV_SPACE;
+
+ /* unit us */
+ /* FIXME: find a proper value */
+ wl->hw->channel_change_time = 10000;
+
+ wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_NOISE_DBM;
+
+ wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ wl->hw->wiphy->max_scan_ssids = 1;
+ wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl12xx_band_2ghz;
+
+ SET_IEEE80211_DEV(wl->hw, &wl->spi->dev);
+
+ return 0;
+}
+
+#define WL12XX_DEFAULT_CHANNEL 1
+static int __devinit wl12xx_probe(struct spi_device *spi)
+{
+ struct wl12xx_platform_data *pdata;
+ struct ieee80211_hw *hw;
+ struct wl12xx *wl;
+ int ret, i;
+ static const u8 nokia_oui[3] = {0x00, 0x1f, 0xdf};
+
+ pdata = spi->dev.platform_data;
+ if (!pdata) {
+ wl12xx_error("no platform data");
+ return -ENODEV;
+ }
+
+ hw = ieee80211_alloc_hw(sizeof(*wl), &wl12xx_ops);
+ if (!hw) {
+ wl12xx_error("could not alloc ieee80211_hw");
+ return -ENOMEM;
+ }
+
+ wl = hw->priv;
+ memset(wl, 0, sizeof(*wl));
+
+ wl->hw = hw;
+ dev_set_drvdata(&spi->dev, wl);
+ wl->spi = spi;
+
+ wl->data_in_count = 0;
+
+ skb_queue_head_init(&wl->tx_queue);
+
+ INIT_WORK(&wl->tx_work, wl12xx_tx_work);
+ INIT_WORK(&wl->filter_work, wl12xx_filter_work);
+ wl->channel = WL12XX_DEFAULT_CHANNEL;
+ wl->scanning = false;
+ wl->default_key = 0;
+ wl->listen_int = 1;
+ wl->rx_counter = 0;
+ wl->rx_handled = 0;
+ wl->rx_current_buffer = 0;
+ wl->rx_last_id = 0;
+ wl->rx_config = WL12XX_DEFAULT_RX_CONFIG;
+ wl->rx_filter = WL12XX_DEFAULT_RX_FILTER;
+ wl->elp = false;
+ wl->psm = 0;
+ wl->psm_requested = false;
+ wl->tx_queue_stopped = false;
+ wl->power_level = WL12XX_DEFAULT_POWER_LEVEL;
+
+ /* We use the default power on sleep time until we know which chip
+ * we're using */
+ wl->chip.power_on_sleep = WL12XX_DEFAULT_POWER_ON_SLEEP;
+
+ for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++)
+ wl->tx_frames[i] = NULL;
+
+ wl->next_tx_complete = 0;
+
+ /*
+ * In case our MAC address is not correctly set,
+ * we use a random but Nokia MAC.
+ */
+ memcpy(wl->mac_addr, nokia_oui, 3);
+ get_random_bytes(wl->mac_addr + 3, 3);
+
+ wl->state = WL12XX_STATE_OFF;
+ mutex_init(&wl->mutex);
+
+ wl->tx_mgmt_frm_rate = DEFAULT_HW_GEN_TX_RATE;
+ wl->tx_mgmt_frm_mod = DEFAULT_HW_GEN_MODULATION_TYPE;
+
+ /* This is the only SPI value that we need to set here, the rest
+ * comes from the board-peripherals file */
+ spi->bits_per_word = 32;
+
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ wl12xx_error("spi_setup failed");
+ goto out_free;
+ }
+
+ wl->set_power = pdata->set_power;
+ if (!wl->set_power) {
+ wl12xx_error("set power function missing in platform data");
+ return -ENODEV;
+ }
+
+ wl->irq = spi->irq;
+ if (wl->irq < 0) {
+ wl12xx_error("irq missing in platform data");
+ return -ENODEV;
+ }
+
+ ret = request_irq(wl->irq, wl12xx_irq, 0, DRIVER_NAME, wl);
+ if (ret < 0) {
+ wl12xx_error("request_irq() failed: %d", ret);
+ goto out_free;
+ }
+
+ set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
+
+ disable_irq(wl->irq);
+
+ ret = wl12xx_init_ieee80211(wl);
+ if (ret)
+ goto out_irq;
+
+ ret = wl12xx_register_hw(wl);
+ if (ret)
+ goto out_irq;
+
+ wl12xx_debugfs_init(wl);
+
+ wl12xx_notice("initialized");
+
+ return 0;
+
+ out_irq:
+ free_irq(wl->irq, wl);
+
+ out_free:
+ ieee80211_free_hw(hw);
+
+ return ret;
+}
+
+static int __devexit wl12xx_remove(struct spi_device *spi)
+{
+ struct wl12xx *wl = dev_get_drvdata(&spi->dev);
+
+ ieee80211_unregister_hw(wl->hw);
+
+ wl12xx_debugfs_exit(wl);
+
+ free_irq(wl->irq, wl);
+ kfree(wl->target_mem_map);
+ kfree(wl->data_path);
+ kfree(wl->fw);
+ wl->fw = NULL;
+ kfree(wl->nvs);
+ wl->nvs = NULL;
+ ieee80211_free_hw(wl->hw);
+
+ return 0;
+}
+
+
+static struct spi_driver wl12xx_spi_driver = {
+ .driver = {
+ .name = "wl12xx",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+
+ .probe = wl12xx_probe,
+ .remove = __devexit_p(wl12xx_remove),
+};
+
+static int __init wl12xx_init(void)
+{
+ int ret;
+
+ ret = spi_register_driver(&wl12xx_spi_driver);
+ if (ret < 0) {
+ wl12xx_error("failed to register spi driver: %d", ret);
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static void __exit wl12xx_exit(void)
+{
+ spi_unregister_driver(&wl12xx_spi_driver);
+
+ wl12xx_notice("unloaded");
+}
+
+module_init(wl12xx_init);
+module_exit(wl12xx_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kalle Valo <Kalle.Valo@nokia.com>, "
+ "Luciano Coelho <luciano.coelho@nokia.com>");
diff --git a/drivers/net/wireless/wl12xx/ps.c b/drivers/net/wireless/wl12xx/ps.c
new file mode 100644
index 00000000000..83a10117330
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/ps.c
@@ -0,0 +1,151 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * Contact: Kalle Valo <kalle.valo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include "reg.h"
+#include "ps.h"
+#include "spi.h"
+
+#define WL12XX_WAKEUP_TIMEOUT 2000
+
+/* Routines to toggle sleep mode while in ELP */
+void wl12xx_ps_elp_sleep(struct wl12xx *wl)
+{
+ if (wl->elp || !wl->psm)
+ return;
+
+ wl12xx_debug(DEBUG_PSM, "chip to elp");
+
+ wl12xx_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
+
+ wl->elp = true;
+}
+
+int wl12xx_ps_elp_wakeup(struct wl12xx *wl)
+{
+ unsigned long timeout;
+ u32 elp_reg;
+
+ if (!wl->elp)
+ return 0;
+
+ wl12xx_debug(DEBUG_PSM, "waking up chip from elp");
+
+ timeout = jiffies + msecs_to_jiffies(WL12XX_WAKEUP_TIMEOUT);
+
+ wl12xx_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP);
+
+ elp_reg = wl12xx_read32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR);
+
+ /*
+ * FIXME: we should wait for irq from chip but, as a temporary
+ * solution to simplify locking, let's poll instead
+ */
+ while (!(elp_reg & ELPCTRL_WLAN_READY)) {
+ if (time_after(jiffies, timeout)) {
+ wl12xx_error("elp wakeup timeout");
+ return -ETIMEDOUT;
+ }
+ msleep(1);
+ elp_reg = wl12xx_read32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR);
+ }
+
+ wl12xx_debug(DEBUG_PSM, "wakeup time: %u ms",
+ jiffies_to_msecs(jiffies) -
+ (jiffies_to_msecs(timeout) - WL12XX_WAKEUP_TIMEOUT));
+
+ wl->elp = false;
+
+ return 0;
+}
+
+static int wl12xx_ps_set_elp(struct wl12xx *wl, bool enable)
+{
+ int ret;
+
+ if (enable) {
+ wl12xx_debug(DEBUG_PSM, "sleep auth psm/elp");
+
+ /*
+ * FIXME: we should PSM_ELP, but because of firmware wakeup
+ * problems let's use only PSM_PS
+ */
+ ret = wl12xx_acx_sleep_auth(wl, WL12XX_PSM_PS);
+ if (ret < 0)
+ return ret;
+
+ wl12xx_ps_elp_sleep(wl);
+ } else {
+ wl12xx_debug(DEBUG_PSM, "sleep auth cam");
+
+ /*
+ * When the target is in ELP, we can only
+ * access the ELP control register. Thus,
+ * we have to wake the target up before
+ * changing the power authorization.
+ */
+
+ wl12xx_ps_elp_wakeup(wl);
+
+ ret = wl12xx_acx_sleep_auth(wl, WL12XX_PSM_CAM);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+int wl12xx_ps_set_mode(struct wl12xx *wl, enum acx_ps_mode mode)
+{
+ int ret;
+
+ switch (mode) {
+ case STATION_POWER_SAVE_MODE:
+ wl12xx_debug(DEBUG_PSM, "entering psm");
+ ret = wl12xx_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE);
+ if (ret < 0)
+ return ret;
+
+ ret = wl12xx_ps_set_elp(wl, true);
+ if (ret < 0)
+ return ret;
+
+ wl->psm = 1;
+ break;
+ case STATION_ACTIVE_MODE:
+ default:
+ wl12xx_debug(DEBUG_PSM, "leaving psm");
+ ret = wl12xx_ps_set_elp(wl, false);
+ if (ret < 0)
+ return ret;
+
+ ret = wl12xx_cmd_ps_mode(wl, STATION_ACTIVE_MODE);
+ if (ret < 0)
+ return ret;
+
+ wl->psm = 0;
+ break;
+ }
+
+ return ret;
+}
+
diff --git a/drivers/net/wireless/wl12xx/ps.h b/drivers/net/wireless/wl12xx/ps.h
new file mode 100644
index 00000000000..5d7c5255383
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/ps.h
@@ -0,0 +1,36 @@
+#ifndef __WL12XX_PS_H__
+#define __WL12XX_PS_H__
+
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (c) 1998-2007 Texas Instruments Incorporated
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * Contact: Kalle Valo <kalle.valo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include "wl12xx.h"
+#include "acx.h"
+
+int wl12xx_ps_set_mode(struct wl12xx *wl, enum acx_ps_mode mode);
+void wl12xx_ps_elp_sleep(struct wl12xx *wl);
+int wl12xx_ps_elp_wakeup(struct wl12xx *wl);
+
+
+#endif /* __WL12XX_PS_H__ */
diff --git a/drivers/net/wireless/wl12xx/reg.h b/drivers/net/wireless/wl12xx/reg.h
new file mode 100644
index 00000000000..e421643215c
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/reg.h
@@ -0,0 +1,745 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (c) 1998-2007 Texas Instruments Incorporated
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * Contact: Kalle Valo <kalle.valo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __REG_H__
+#define __REG_H__
+
+#include <linux/bitops.h>
+#include "wl12xx.h"
+
+#define REGISTERS_BASE 0x00300000
+#define DRPW_BASE 0x00310000
+
+#define REGISTERS_DOWN_SIZE 0x00008800
+#define REGISTERS_WORK_SIZE 0x0000b000
+
+#define HW_ACCESS_ELP_CTRL_REG_ADDR 0x1FFFC
+
+/* ELP register commands */
+#define ELPCTRL_WAKE_UP 0x1
+#define ELPCTRL_WAKE_UP_WLAN_READY 0x5
+#define ELPCTRL_SLEEP 0x0
+/* ELP WLAN_READY bit */
+#define ELPCTRL_WLAN_READY 0x2
+
+/*
+ * Interrupt registers.
+ * 64 bit interrupt sources registers ws ced.
+ * sme interupts were removed and new ones were added.
+ * Order was changed.
+ */
+#define FIQ_MASK (REGISTERS_BASE + 0x0400)
+#define FIQ_MASK_L (REGISTERS_BASE + 0x0400)
+#define FIQ_MASK_H (REGISTERS_BASE + 0x0404)
+#define FIQ_MASK_SET (REGISTERS_BASE + 0x0408)
+#define FIQ_MASK_SET_L (REGISTERS_BASE + 0x0408)
+#define FIQ_MASK_SET_H (REGISTERS_BASE + 0x040C)
+#define FIQ_MASK_CLR (REGISTERS_BASE + 0x0410)
+#define FIQ_MASK_CLR_L (REGISTERS_BASE + 0x0410)
+#define FIQ_MASK_CLR_H (REGISTERS_BASE + 0x0414)
+#define IRQ_MASK (REGISTERS_BASE + 0x0418)
+#define IRQ_MASK_L (REGISTERS_BASE + 0x0418)
+#define IRQ_MASK_H (REGISTERS_BASE + 0x041C)
+#define IRQ_MASK_SET (REGISTERS_BASE + 0x0420)
+#define IRQ_MASK_SET_L (REGISTERS_BASE + 0x0420)
+#define IRQ_MASK_SET_H (REGISTERS_BASE + 0x0424)
+#define IRQ_MASK_CLR (REGISTERS_BASE + 0x0428)
+#define IRQ_MASK_CLR_L (REGISTERS_BASE + 0x0428)
+#define IRQ_MASK_CLR_H (REGISTERS_BASE + 0x042C)
+#define ECPU_MASK (REGISTERS_BASE + 0x0448)
+#define FIQ_STS_L (REGISTERS_BASE + 0x044C)
+#define FIQ_STS_H (REGISTERS_BASE + 0x0450)
+#define IRQ_STS_L (REGISTERS_BASE + 0x0454)
+#define IRQ_STS_H (REGISTERS_BASE + 0x0458)
+#define INT_STS_ND (REGISTERS_BASE + 0x0464)
+#define INT_STS_RAW_L (REGISTERS_BASE + 0x0464)
+#define INT_STS_RAW_H (REGISTERS_BASE + 0x0468)
+#define INT_STS_CLR (REGISTERS_BASE + 0x04B4)
+#define INT_STS_CLR_L (REGISTERS_BASE + 0x04B4)
+#define INT_STS_CLR_H (REGISTERS_BASE + 0x04B8)
+#define INT_ACK (REGISTERS_BASE + 0x046C)
+#define INT_ACK_L (REGISTERS_BASE + 0x046C)
+#define INT_ACK_H (REGISTERS_BASE + 0x0470)
+#define INT_TRIG (REGISTERS_BASE + 0x0474)
+#define INT_TRIG_L (REGISTERS_BASE + 0x0474)
+#define INT_TRIG_H (REGISTERS_BASE + 0x0478)
+#define HOST_STS_L (REGISTERS_BASE + 0x045C)
+#define HOST_STS_H (REGISTERS_BASE + 0x0460)
+#define HOST_MASK (REGISTERS_BASE + 0x0430)
+#define HOST_MASK_L (REGISTERS_BASE + 0x0430)
+#define HOST_MASK_H (REGISTERS_BASE + 0x0434)
+#define HOST_MASK_SET (REGISTERS_BASE + 0x0438)
+#define HOST_MASK_SET_L (REGISTERS_BASE + 0x0438)
+#define HOST_MASK_SET_H (REGISTERS_BASE + 0x043C)
+#define HOST_MASK_CLR (REGISTERS_BASE + 0x0440)
+#define HOST_MASK_CLR_L (REGISTERS_BASE + 0x0440)
+#define HOST_MASK_CLR_H (REGISTERS_BASE + 0x0444)
+
+/* Host Interrupts*/
+#define HINT_MASK (REGISTERS_BASE + 0x0494)
+#define HINT_MASK_SET (REGISTERS_BASE + 0x0498)
+#define HINT_MASK_CLR (REGISTERS_BASE + 0x049C)
+#define HINT_STS_ND_MASKED (REGISTERS_BASE + 0x04A0)
+/*1150 spec calls this HINT_STS_RAW*/
+#define HINT_STS_ND (REGISTERS_BASE + 0x04B0)
+#define HINT_STS_CLR (REGISTERS_BASE + 0x04A4)
+#define HINT_ACK (REGISTERS_BASE + 0x04A8)
+#define HINT_TRIG (REGISTERS_BASE + 0x04AC)
+
+/* Device Configuration registers*/
+#define SOR_CFG (REGISTERS_BASE + 0x0800)
+#define ECPU_CTRL (REGISTERS_BASE + 0x0804)
+#define HI_CFG (REGISTERS_BASE + 0x0808)
+#define EE_START (REGISTERS_BASE + 0x080C)
+
+#define CHIP_ID_B (REGISTERS_BASE + 0x5674)
+
+#define CHIP_ID_1251_PG10 (0x7010101)
+#define CHIP_ID_1251_PG11 (0x7020101)
+#define CHIP_ID_1251_PG12 (0x7030101)
+
+#define ENABLE (REGISTERS_BASE + 0x5450)
+
+/* Power Management registers */
+#define ELP_CFG_MODE (REGISTERS_BASE + 0x5804)
+#define ELP_CMD (REGISTERS_BASE + 0x5808)
+#define PLL_CAL_TIME (REGISTERS_BASE + 0x5810)
+#define CLK_REQ_TIME (REGISTERS_BASE + 0x5814)
+#define CLK_BUF_TIME (REGISTERS_BASE + 0x5818)
+
+#define CFG_PLL_SYNC_CNT (REGISTERS_BASE + 0x5820)
+
+/* Scratch Pad registers*/
+#define SCR_PAD0 (REGISTERS_BASE + 0x5608)
+#define SCR_PAD1 (REGISTERS_BASE + 0x560C)
+#define SCR_PAD2 (REGISTERS_BASE + 0x5610)
+#define SCR_PAD3 (REGISTERS_BASE + 0x5614)
+#define SCR_PAD4 (REGISTERS_BASE + 0x5618)
+#define SCR_PAD4_SET (REGISTERS_BASE + 0x561C)
+#define SCR_PAD4_CLR (REGISTERS_BASE + 0x5620)
+#define SCR_PAD5 (REGISTERS_BASE + 0x5624)
+#define SCR_PAD5_SET (REGISTERS_BASE + 0x5628)
+#define SCR_PAD5_CLR (REGISTERS_BASE + 0x562C)
+#define SCR_PAD6 (REGISTERS_BASE + 0x5630)
+#define SCR_PAD7 (REGISTERS_BASE + 0x5634)
+#define SCR_PAD8 (REGISTERS_BASE + 0x5638)
+#define SCR_PAD9 (REGISTERS_BASE + 0x563C)
+
+/* Spare registers*/
+#define SPARE_A1 (REGISTERS_BASE + 0x0994)
+#define SPARE_A2 (REGISTERS_BASE + 0x0998)
+#define SPARE_A3 (REGISTERS_BASE + 0x099C)
+#define SPARE_A4 (REGISTERS_BASE + 0x09A0)
+#define SPARE_A5 (REGISTERS_BASE + 0x09A4)
+#define SPARE_A6 (REGISTERS_BASE + 0x09A8)
+#define SPARE_A7 (REGISTERS_BASE + 0x09AC)
+#define SPARE_A8 (REGISTERS_BASE + 0x09B0)
+#define SPARE_B1 (REGISTERS_BASE + 0x5420)
+#define SPARE_B2 (REGISTERS_BASE + 0x5424)
+#define SPARE_B3 (REGISTERS_BASE + 0x5428)
+#define SPARE_B4 (REGISTERS_BASE + 0x542C)
+#define SPARE_B5 (REGISTERS_BASE + 0x5430)
+#define SPARE_B6 (REGISTERS_BASE + 0x5434)
+#define SPARE_B7 (REGISTERS_BASE + 0x5438)
+#define SPARE_B8 (REGISTERS_BASE + 0x543C)
+
+enum wl12xx_acx_int_reg {
+ ACX_REG_INTERRUPT_TRIG,
+ ACX_REG_INTERRUPT_TRIG_H,
+
+/*=============================================
+ Host Interrupt Mask Register - 32bit (RW)
+ ------------------------------------------
+ Setting a bit in this register masks the
+ corresponding interrupt to the host.
+ 0 - RX0 - Rx first dubble buffer Data Interrupt
+ 1 - TXD - Tx Data Interrupt
+ 2 - TXXFR - Tx Transfer Interrupt
+ 3 - RX1 - Rx second dubble buffer Data Interrupt
+ 4 - RXXFR - Rx Transfer Interrupt
+ 5 - EVENT_A - Event Mailbox interrupt
+ 6 - EVENT_B - Event Mailbox interrupt
+ 7 - WNONHST - Wake On Host Interrupt
+ 8 - TRACE_A - Debug Trace interrupt
+ 9 - TRACE_B - Debug Trace interrupt
+ 10 - CDCMP - Command Complete Interrupt
+ 11 -
+ 12 -
+ 13 -
+ 14 - ICOMP - Initialization Complete Interrupt
+ 16 - SG SE - Soft Gemini - Sense enable interrupt
+ 17 - SG SD - Soft Gemini - Sense disable interrupt
+ 18 - -
+ 19 - -
+ 20 - -
+ 21- -
+ Default: 0x0001
+*==============================================*/
+ ACX_REG_INTERRUPT_MASK,
+
+/*=============================================
+ Host Interrupt Mask Set 16bit, (Write only)
+ ------------------------------------------
+ Setting a bit in this register sets
+ the corresponding bin in ACX_HINT_MASK register
+ without effecting the mask
+ state of other bits (0 = no effect).
+==============================================*/
+ ACX_REG_HINT_MASK_SET,
+
+/*=============================================
+ Host Interrupt Mask Clear 16bit,(Write only)
+ ------------------------------------------
+ Setting a bit in this register clears
+ the corresponding bin in ACX_HINT_MASK register
+ without effecting the mask
+ state of other bits (0 = no effect).
+=============================================*/
+ ACX_REG_HINT_MASK_CLR,
+
+/*=============================================
+ Host Interrupt Status Nondestructive Read
+ 16bit,(Read only)
+ ------------------------------------------
+ The host can read this register to determine
+ which interrupts are active.
+ Reading this register doesn't
+ effect its content.
+=============================================*/
+ ACX_REG_INTERRUPT_NO_CLEAR,
+
+/*=============================================
+ Host Interrupt Status Clear on Read Register
+ 16bit,(Read only)
+ ------------------------------------------
+ The host can read this register to determine
+ which interrupts are active.
+ Reading this register clears it,
+ thus making all interrupts inactive.
+==============================================*/
+ ACX_REG_INTERRUPT_CLEAR,
+
+/*=============================================
+ Host Interrupt Acknowledge Register
+ 16bit,(Write only)
+ ------------------------------------------
+ The host can set individual bits in this
+ register to clear (acknowledge) the corresp.
+ interrupt status bits in the HINT_STS_CLR and
+ HINT_STS_ND registers, thus making the
+ assotiated interrupt inactive. (0-no effect)
+==============================================*/
+ ACX_REG_INTERRUPT_ACK,
+
+/*===============================================
+ Host Software Reset - 32bit RW
+ ------------------------------------------
+ [31:1] Reserved
+ 0 SOFT_RESET Soft Reset - When this bit is set,
+ it holds the Wlan hardware in a soft reset state.
+ This reset disables all MAC and baseband processor
+ clocks except the CardBus/PCI interface clock.
+ It also initializes all MAC state machines except
+ the host interface. It does not reload the
+ contents of the EEPROM. When this bit is cleared
+ (not self-clearing), the Wlan hardware
+ exits the software reset state.
+===============================================*/
+ ACX_REG_SLV_SOFT_RESET,
+
+/*===============================================
+ EEPROM Burst Read Start - 32bit RW
+ ------------------------------------------
+ [31:1] Reserved
+ 0 ACX_EE_START - EEPROM Burst Read Start 0
+ Setting this bit starts a burst read from
+ the external EEPROM.
+ If this bit is set (after reset) before an EEPROM read/write,
+ the burst read starts at EEPROM address 0.
+ Otherwise, it starts at the address
+ following the address of the previous access.
+ TheWlan hardware hardware clears this bit automatically.
+
+ Default: 0x00000000
+*================================================*/
+ ACX_REG_EE_START,
+
+/* Embedded ARM CPU Control */
+
+/*===============================================
+ Halt eCPU - 32bit RW
+ ------------------------------------------
+ 0 HALT_ECPU Halt Embedded CPU - This bit is the
+ compliment of bit 1 (MDATA2) in the SOR_CFG register.
+ During a hardware reset, this bit holds
+ the inverse of MDATA2.
+ When downloading firmware from the host,
+ set this bit (pull down MDATA2).
+ The host clears this bit after downloading the firmware into
+ zero-wait-state SSRAM.
+ When loading firmware from Flash, clear this bit (pull up MDATA2)
+ so that the eCPU can run the bootloader code in Flash
+ HALT_ECPU eCPU State
+ --------------------
+ 1 halt eCPU
+ 0 enable eCPU
+ ===============================================*/
+ ACX_REG_ECPU_CONTROL,
+
+ ACX_REG_TABLE_LEN
+};
+
+#define ACX_SLV_SOFT_RESET_BIT BIT(1)
+#define ACX_REG_EEPROM_START_BIT BIT(1)
+
+/* Command/Information Mailbox Pointers */
+
+/*===============================================
+ Command Mailbox Pointer - 32bit RW
+ ------------------------------------------
+ This register holds the start address of
+ the command mailbox located in the Wlan hardware memory.
+ The host must read this pointer after a reset to
+ find the location of the command mailbox.
+ The Wlan hardware initializes the command mailbox
+ pointer with the default address of the command mailbox.
+ The command mailbox pointer is not valid until after
+ the host receives the Init Complete interrupt from
+ the Wlan hardware.
+ ===============================================*/
+#define REG_COMMAND_MAILBOX_PTR (SCR_PAD0)
+
+/*===============================================
+ Information Mailbox Pointer - 32bit RW
+ ------------------------------------------
+ This register holds the start address of
+ the information mailbox located in the Wlan hardware memory.
+ The host must read this pointer after a reset to find
+ the location of the information mailbox.
+ The Wlan hardware initializes the information mailbox pointer
+ with the default address of the information mailbox.
+ The information mailbox pointer is not valid
+ until after the host receives the Init Complete interrupt from
+ the Wlan hardware.
+ ===============================================*/
+#define REG_EVENT_MAILBOX_PTR (SCR_PAD1)
+
+
+/* Misc */
+
+#define REG_ENABLE_TX_RX (ENABLE)
+/*
+ * Rx configuration (filter) information element
+ * ---------------------------------------------
+ */
+#define REG_RX_CONFIG (RX_CFG)
+#define REG_RX_FILTER (RX_FILTER_CFG)
+
+
+#define RX_CFG_ENABLE_PHY_HEADER_PLCP 0x0002
+
+/* promiscuous - receives all valid frames */
+#define RX_CFG_PROMISCUOUS 0x0008
+
+/* receives frames from any BSSID */
+#define RX_CFG_BSSID 0x0020
+
+/* receives frames destined to any MAC address */
+#define RX_CFG_MAC 0x0010
+
+#define RX_CFG_ENABLE_ONLY_MY_DEST_MAC 0x0010
+#define RX_CFG_ENABLE_ANY_DEST_MAC 0x0000
+#define RX_CFG_ENABLE_ONLY_MY_BSSID 0x0020
+#define RX_CFG_ENABLE_ANY_BSSID 0x0000
+
+/* discards all broadcast frames */
+#define RX_CFG_DISABLE_BCAST 0x0200
+
+#define RX_CFG_ENABLE_ONLY_MY_SSID 0x0400
+#define RX_CFG_ENABLE_RX_CMPLT_FCS_ERROR 0x0800
+#define RX_CFG_COPY_RX_STATUS 0x2000
+#define RX_CFG_TSF 0x10000
+
+#define RX_CONFIG_OPTION_ANY_DST_MY_BSS (RX_CFG_ENABLE_ANY_DEST_MAC | \
+ RX_CFG_ENABLE_ONLY_MY_BSSID)
+
+#define RX_CONFIG_OPTION_MY_DST_ANY_BSS (RX_CFG_ENABLE_ONLY_MY_DEST_MAC\
+ | RX_CFG_ENABLE_ANY_BSSID)
+
+#define RX_CONFIG_OPTION_ANY_DST_ANY_BSS (RX_CFG_ENABLE_ANY_DEST_MAC | \
+ RX_CFG_ENABLE_ANY_BSSID)
+
+#define RX_CONFIG_OPTION_MY_DST_MY_BSS (RX_CFG_ENABLE_ONLY_MY_DEST_MAC\
+ | RX_CFG_ENABLE_ONLY_MY_BSSID)
+
+#define RX_CONFIG_OPTION_FOR_SCAN (RX_CFG_ENABLE_PHY_HEADER_PLCP \
+ | RX_CFG_ENABLE_RX_CMPLT_FCS_ERROR \
+ | RX_CFG_COPY_RX_STATUS | RX_CFG_TSF)
+
+#define RX_CONFIG_OPTION_FOR_MEASUREMENT (RX_CFG_ENABLE_ANY_DEST_MAC)
+
+#define RX_CONFIG_OPTION_FOR_JOIN (RX_CFG_ENABLE_ONLY_MY_BSSID | \
+ RX_CFG_ENABLE_ONLY_MY_DEST_MAC)
+
+#define RX_CONFIG_OPTION_FOR_IBSS_JOIN (RX_CFG_ENABLE_ONLY_MY_SSID | \
+ RX_CFG_ENABLE_ONLY_MY_DEST_MAC)
+
+#define RX_FILTER_OPTION_DEF (CFG_RX_MGMT_EN | CFG_RX_DATA_EN\
+ | CFG_RX_CTL_EN | CFG_RX_BCN_EN\
+ | CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN)
+
+#define RX_FILTER_OPTION_FILTER_ALL 0
+
+#define RX_FILTER_OPTION_DEF_PRSP_BCN (CFG_RX_PRSP_EN | CFG_RX_MGMT_EN\
+ | CFG_RX_RCTS_ACK | CFG_RX_BCN_EN)
+
+#define RX_FILTER_OPTION_JOIN (CFG_RX_MGMT_EN | CFG_RX_DATA_EN\
+ | CFG_RX_BCN_EN | CFG_RX_AUTH_EN\
+ | CFG_RX_ASSOC_EN | CFG_RX_RCTS_ACK\
+ | CFG_RX_PRSP_EN)
+
+
+/*===============================================
+ Phy regs
+ ===============================================*/
+#define ACX_PHY_ADDR_REG SBB_ADDR
+#define ACX_PHY_DATA_REG SBB_DATA
+#define ACX_PHY_CTRL_REG SBB_CTL
+#define ACX_PHY_REG_WR_MASK 0x00000001ul
+#define ACX_PHY_REG_RD_MASK 0x00000002ul
+
+
+/*===============================================
+ EEPROM Read/Write Request 32bit RW
+ ------------------------------------------
+ 1 EE_READ - EEPROM Read Request 1 - Setting this bit
+ loads a single byte of data into the EE_DATA
+ register from the EEPROM location specified in
+ the EE_ADDR register.
+ The Wlan hardware hardware clears this bit automatically.
+ EE_DATA is valid when this bit is cleared.
+
+ 0 EE_WRITE - EEPROM Write Request - Setting this bit
+ writes a single byte of data from the EE_DATA register into the
+ EEPROM location specified in the EE_ADDR register.
+ The Wlan hardware hardware clears this bit automatically.
+*===============================================*/
+#define ACX_EE_CTL_REG EE_CTL
+#define EE_WRITE 0x00000001ul
+#define EE_READ 0x00000002ul
+
+/*===============================================
+ EEPROM Address - 32bit RW
+ ------------------------------------------
+ This register specifies the address
+ within the EEPROM from/to which to read/write data.
+ ===============================================*/
+#define ACX_EE_ADDR_REG EE_ADDR
+
+/*===============================================
+ EEPROM Data - 32bit RW
+ ------------------------------------------
+ This register either holds the read 8 bits of
+ data from the EEPROM or the write data
+ to be written to the EEPROM.
+ ===============================================*/
+#define ACX_EE_DATA_REG EE_DATA
+
+/*===============================================
+ EEPROM Base Address - 32bit RW
+ ------------------------------------------
+ This register holds the upper nine bits
+ [23:15] of the 24-bit Wlan hardware memory
+ address for burst reads from EEPROM accesses.
+ The EEPROM provides the lower 15 bits of this address.
+ The MSB of the address from the EEPROM is ignored.
+ ===============================================*/
+#define ACX_EE_CFG EE_CFG
+
+/*===============================================
+ GPIO Output Values -32bit, RW
+ ------------------------------------------
+ [31:16] Reserved
+ [15: 0] Specify the output values (at the output driver inputs) for
+ GPIO[15:0], respectively.
+ ===============================================*/
+#define ACX_GPIO_OUT_REG GPIO_OUT
+#define ACX_MAX_GPIO_LINES 15
+
+/*===============================================
+ Contention window -32bit, RW
+ ------------------------------------------
+ [31:26] Reserved
+ [25:16] Max (0x3ff)
+ [15:07] Reserved
+ [06:00] Current contention window value - default is 0x1F
+ ===============================================*/
+#define ACX_CONT_WIND_CFG_REG CONT_WIND_CFG
+#define ACX_CONT_WIND_MIN_MASK 0x0000007f
+#define ACX_CONT_WIND_MAX 0x03ff0000
+
+/*
+ * Indirect slave register/memory registers
+ * ----------------------------------------
+ */
+#define HW_SLAVE_REG_ADDR_REG 0x00000004
+#define HW_SLAVE_REG_DATA_REG 0x00000008
+#define HW_SLAVE_REG_CTRL_REG 0x0000000c
+
+#define SLAVE_AUTO_INC 0x00010000
+#define SLAVE_NO_AUTO_INC 0x00000000
+#define SLAVE_HOST_LITTLE_ENDIAN 0x00000000
+
+#define HW_SLAVE_MEM_ADDR_REG SLV_MEM_ADDR
+#define HW_SLAVE_MEM_DATA_REG SLV_MEM_DATA
+#define HW_SLAVE_MEM_CTRL_REG SLV_MEM_CTL
+#define HW_SLAVE_MEM_ENDIAN_REG SLV_END_CTL
+
+#define HW_FUNC_EVENT_INT_EN 0x8000
+#define HW_FUNC_EVENT_MASK_REG 0x00000034
+
+#define ACX_MAC_TIMESTAMP_REG (MAC_TIMESTAMP)
+
+/*===============================================
+ HI_CFG Interface Configuration Register Values
+ ------------------------------------------
+ ===============================================*/
+#define HI_CFG_UART_ENABLE 0x00000004
+#define HI_CFG_RST232_ENABLE 0x00000008
+#define HI_CFG_CLOCK_REQ_SELECT 0x00000010
+#define HI_CFG_HOST_INT_ENABLE 0x00000020
+#define HI_CFG_VLYNQ_OUTPUT_ENABLE 0x00000040
+#define HI_CFG_HOST_INT_ACTIVE_LOW 0x00000080
+#define HI_CFG_UART_TX_OUT_GPIO_15 0x00000100
+#define HI_CFG_UART_TX_OUT_GPIO_14 0x00000200
+#define HI_CFG_UART_TX_OUT_GPIO_7 0x00000400
+
+/*
+ * NOTE: USE_ACTIVE_HIGH compilation flag should be defined in makefile
+ * for platforms using active high interrupt level
+ */
+#ifdef USE_ACTIVE_HIGH
+#define HI_CFG_DEF_VAL \
+ (HI_CFG_UART_ENABLE | \
+ HI_CFG_RST232_ENABLE | \
+ HI_CFG_CLOCK_REQ_SELECT | \
+ HI_CFG_HOST_INT_ENABLE)
+#else
+#define HI_CFG_DEF_VAL \
+ (HI_CFG_UART_ENABLE | \
+ HI_CFG_RST232_ENABLE | \
+ HI_CFG_CLOCK_REQ_SELECT | \
+ HI_CFG_HOST_INT_ENABLE)
+
+#endif
+
+#define REF_FREQ_19_2 0
+#define REF_FREQ_26_0 1
+#define REF_FREQ_38_4 2
+#define REF_FREQ_40_0 3
+#define REF_FREQ_33_6 4
+#define REF_FREQ_NUM 5
+
+#define LUT_PARAM_INTEGER_DIVIDER 0
+#define LUT_PARAM_FRACTIONAL_DIVIDER 1
+#define LUT_PARAM_ATTN_BB 2
+#define LUT_PARAM_ALPHA_BB 3
+#define LUT_PARAM_STOP_TIME_BB 4
+#define LUT_PARAM_BB_PLL_LOOP_FILTER 5
+#define LUT_PARAM_NUM 6
+
+#define ACX_EEPROMLESS_IND_REG (SCR_PAD4)
+#define USE_EEPROM 0
+#define SOFT_RESET_MAX_TIME 1000000
+#define SOFT_RESET_STALL_TIME 1000
+#define NVS_DATA_BUNDARY_ALIGNMENT 4
+
+
+/* Firmware image load chunk size */
+#define CHUNK_SIZE 512
+
+/* Firmware image header size */
+#define FW_HDR_SIZE 8
+
+#define ECPU_CONTROL_HALT 0x00000101
+
+
+/******************************************************************************
+
+ CHANNELS, BAND & REG DOMAINS definitions
+
+******************************************************************************/
+
+
+enum {
+ RADIO_BAND_2_4GHZ = 0, /* 2.4 Ghz band */
+ RADIO_BAND_5GHZ = 1, /* 5 Ghz band */
+ RADIO_BAND_JAPAN_4_9_GHZ = 2,
+ DEFAULT_BAND = RADIO_BAND_2_4GHZ,
+ INVALID_BAND = 0xFE,
+ MAX_RADIO_BANDS = 0xFF
+};
+
+enum {
+ NO_RATE = 0,
+ RATE_1MBPS = 0x0A,
+ RATE_2MBPS = 0x14,
+ RATE_5_5MBPS = 0x37,
+ RATE_6MBPS = 0x0B,
+ RATE_9MBPS = 0x0F,
+ RATE_11MBPS = 0x6E,
+ RATE_12MBPS = 0x0A,
+ RATE_18MBPS = 0x0E,
+ RATE_22MBPS = 0xDC,
+ RATE_24MBPS = 0x09,
+ RATE_36MBPS = 0x0D,
+ RATE_48MBPS = 0x08,
+ RATE_54MBPS = 0x0C
+};
+
+enum {
+ RATE_INDEX_1MBPS = 0,
+ RATE_INDEX_2MBPS = 1,
+ RATE_INDEX_5_5MBPS = 2,
+ RATE_INDEX_6MBPS = 3,
+ RATE_INDEX_9MBPS = 4,
+ RATE_INDEX_11MBPS = 5,
+ RATE_INDEX_12MBPS = 6,
+ RATE_INDEX_18MBPS = 7,
+ RATE_INDEX_22MBPS = 8,
+ RATE_INDEX_24MBPS = 9,
+ RATE_INDEX_36MBPS = 10,
+ RATE_INDEX_48MBPS = 11,
+ RATE_INDEX_54MBPS = 12,
+ RATE_INDEX_MAX = RATE_INDEX_54MBPS,
+ MAX_RATE_INDEX,
+ INVALID_RATE_INDEX = MAX_RATE_INDEX,
+ RATE_INDEX_ENUM_MAX_SIZE = 0x7FFFFFFF
+};
+
+enum {
+ RATE_MASK_1MBPS = 0x1,
+ RATE_MASK_2MBPS = 0x2,
+ RATE_MASK_5_5MBPS = 0x4,
+ RATE_MASK_11MBPS = 0x20,
+};
+
+#define SHORT_PREAMBLE_BIT BIT(0) /* CCK or Barker depending on the rate */
+#define OFDM_RATE_BIT BIT(6)
+#define PBCC_RATE_BIT BIT(7)
+
+enum {
+ CCK_LONG = 0,
+ CCK_SHORT = SHORT_PREAMBLE_BIT,
+ PBCC_LONG = PBCC_RATE_BIT,
+ PBCC_SHORT = PBCC_RATE_BIT | SHORT_PREAMBLE_BIT,
+ OFDM = OFDM_RATE_BIT
+};
+
+/******************************************************************************
+
+Transmit-Descriptor RATE-SET field definitions...
+
+Define a new "Rate-Set" for TX path that incorporates the
+Rate & Modulation info into a single 16-bit field.
+
+TxdRateSet_t:
+b15 - Indicates Preamble type (1=SHORT, 0=LONG).
+ Notes:
+ Must be LONG (0) for 1Mbps rate.
+ Does not apply (set to 0) for RevG-OFDM rates.
+b14 - Indicates PBCC encoding (1=PBCC, 0=not).
+ Notes:
+ Does not apply (set to 0) for rates 1 and 2 Mbps.
+ Does not apply (set to 0) for RevG-OFDM rates.
+b13 - Unused (set to 0).
+b12-b0 - Supported Rate indicator bits as defined below.
+
+******************************************************************************/
+
+
+#define TNETW1251_CHIP_ID_PG1_0 0x07010101
+#define TNETW1251_CHIP_ID_PG1_1 0x07020101
+#define TNETW1251_CHIP_ID_PG1_2 0x07030101
+
+/*************************************************************************
+
+ Interrupt Trigger Register (Host -> WiLink)
+
+**************************************************************************/
+
+/* Hardware to Embedded CPU Interrupts - first 32-bit register set */
+
+/*
+ * Host Command Interrupt. Setting this bit masks
+ * the interrupt that the host issues to inform
+ * the FW that it has sent a command
+ * to the Wlan hardware Command Mailbox.
+ */
+#define INTR_TRIG_CMD BIT(0)
+
+/*
+ * Host Event Acknowlegde Interrupt. The host
+ * sets this bit to acknowledge that it received
+ * the unsolicited information from the event
+ * mailbox.
+ */
+#define INTR_TRIG_EVENT_ACK BIT(1)
+
+/*
+ * The host sets this bit to inform the Wlan
+ * FW that a TX packet is in the XFER
+ * Buffer #0.
+ */
+#define INTR_TRIG_TX_PROC0 BIT(2)
+
+/*
+ * The host sets this bit to inform the FW
+ * that it read a packet from RX XFER
+ * Buffer #0.
+ */
+#define INTR_TRIG_RX_PROC0 BIT(3)
+
+#define INTR_TRIG_DEBUG_ACK BIT(4)
+
+#define INTR_TRIG_STATE_CHANGED BIT(5)
+
+
+/* Hardware to Embedded CPU Interrupts - second 32-bit register set */
+
+/*
+ * The host sets this bit to inform the FW
+ * that it read a packet from RX XFER
+ * Buffer #1.
+ */
+#define INTR_TRIG_RX_PROC1 BIT(17)
+
+/*
+ * The host sets this bit to inform the Wlan
+ * hardware that a TX packet is in the XFER
+ * Buffer #1.
+ */
+#define INTR_TRIG_TX_PROC1 BIT(18)
+
+#endif
diff --git a/drivers/net/wireless/wl12xx/rx.c b/drivers/net/wireless/wl12xx/rx.c
new file mode 100644
index 00000000000..981ea259eb8
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/rx.c
@@ -0,0 +1,208 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (c) 1998-2007 Texas Instruments Incorporated
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * Contact: Kalle Valo <kalle.valo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/skbuff.h>
+#include <net/mac80211.h>
+
+#include "wl12xx.h"
+#include "reg.h"
+#include "spi.h"
+#include "rx.h"
+
+static void wl12xx_rx_header(struct wl12xx *wl,
+ struct wl12xx_rx_descriptor *desc)
+{
+ u32 rx_packet_ring_addr;
+
+ rx_packet_ring_addr = wl->data_path->rx_packet_ring_addr;
+ if (wl->rx_current_buffer)
+ rx_packet_ring_addr += wl->data_path->rx_packet_ring_chunk_size;
+
+ wl12xx_spi_mem_read(wl, rx_packet_ring_addr, desc,
+ sizeof(struct wl12xx_rx_descriptor));
+}
+
+static void wl12xx_rx_status(struct wl12xx *wl,
+ struct wl12xx_rx_descriptor *desc,
+ struct ieee80211_rx_status *status,
+ u8 beacon)
+{
+ memset(status, 0, sizeof(struct ieee80211_rx_status));
+
+ status->band = IEEE80211_BAND_2GHZ;
+ status->mactime = desc->timestamp;
+
+ /*
+ * The rx status timestamp is a 32 bits value while the TSF is a
+ * 64 bits one.
+ * For IBSS merging, TSF is mandatory, so we have to get it
+ * somehow, so we ask for ACX_TSF_INFO.
+ * That could be moved to the get_tsf() hook, but unfortunately,
+ * this one must be atomic, while our SPI routines can sleep.
+ */
+ if ((wl->bss_type == BSS_TYPE_IBSS) && beacon) {
+ u64 mactime;
+ int ret;
+ struct wl12xx_command cmd;
+ struct acx_tsf_info *tsf_info;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ ret = wl12xx_cmd_interrogate(wl, ACX_TSF_INFO,
+ sizeof(struct acx_tsf_info),
+ &cmd);
+ if (ret < 0) {
+ wl12xx_warning("ACX_FW_REV interrogate failed");
+ return;
+ }
+
+ tsf_info = (struct acx_tsf_info *)&(cmd.parameters);
+
+ mactime = tsf_info->current_tsf_lsb |
+ (tsf_info->current_tsf_msb << 31);
+
+ status->mactime = mactime;
+ }
+
+ status->signal = desc->rssi;
+ status->qual = (desc->rssi - WL12XX_RX_MIN_RSSI) * 100 /
+ (WL12XX_RX_MAX_RSSI - WL12XX_RX_MIN_RSSI);
+ status->qual = min(status->qual, 100);
+ status->qual = max(status->qual, 0);
+
+ /*
+ * FIXME: guessing that snr needs to be divided by two, otherwise
+ * the values don't make any sense
+ */
+ status->noise = desc->rssi - desc->snr / 2;
+
+ status->freq = ieee80211_channel_to_frequency(desc->channel);
+
+ status->flag |= RX_FLAG_TSFT;
+
+ if (desc->flags & RX_DESC_ENCRYPTION_MASK) {
+ status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
+
+ if (likely(!(desc->flags & RX_DESC_DECRYPT_FAIL)))
+ status->flag |= RX_FLAG_DECRYPTED;
+
+ if (unlikely(desc->flags & RX_DESC_MIC_FAIL))
+ status->flag |= RX_FLAG_MMIC_ERROR;
+ }
+
+ if (unlikely(!(desc->flags & RX_DESC_VALID_FCS)))
+ status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+
+ /* FIXME: set status->rate_idx */
+}
+
+static void wl12xx_rx_body(struct wl12xx *wl,
+ struct wl12xx_rx_descriptor *desc)
+{
+ struct sk_buff *skb;
+ struct ieee80211_rx_status status;
+ u8 *rx_buffer, beacon = 0;
+ u16 length, *fc;
+ u32 curr_id, last_id_inc, rx_packet_ring_addr;
+
+ length = WL12XX_RX_ALIGN(desc->length - PLCP_HEADER_LENGTH);
+ curr_id = (desc->flags & RX_DESC_SEQNUM_MASK) >> RX_DESC_PACKETID_SHIFT;
+ last_id_inc = (wl->rx_last_id + 1) % (RX_MAX_PACKET_ID + 1);
+
+ if (last_id_inc != curr_id) {
+ wl12xx_warning("curr ID:%d, last ID inc:%d",
+ curr_id, last_id_inc);
+ wl->rx_last_id = curr_id;
+ } else {
+ wl->rx_last_id = last_id_inc;
+ }
+
+ rx_packet_ring_addr = wl->data_path->rx_packet_ring_addr +
+ sizeof(struct wl12xx_rx_descriptor) + 20;
+ if (wl->rx_current_buffer)
+ rx_packet_ring_addr += wl->data_path->rx_packet_ring_chunk_size;
+
+ skb = dev_alloc_skb(length);
+ if (!skb) {
+ wl12xx_error("Couldn't allocate RX frame");
+ return;
+ }
+
+ rx_buffer = skb_put(skb, length);
+ wl12xx_spi_mem_read(wl, rx_packet_ring_addr, rx_buffer, length);
+
+ /* The actual lenght doesn't include the target's alignment */
+ skb->len = desc->length - PLCP_HEADER_LENGTH;
+
+ fc = (u16 *)skb->data;
+
+ if ((*fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON)
+ beacon = 1;
+
+ wl12xx_rx_status(wl, desc, &status, beacon);
+
+ wl12xx_debug(DEBUG_RX, "rx skb 0x%p: %d B %s", skb, skb->len,
+ beacon ? "beacon" : "");
+
+ ieee80211_rx(wl->hw, skb, &status);
+}
+
+static void wl12xx_rx_ack(struct wl12xx *wl)
+{
+ u32 data, addr;
+
+ if (wl->rx_current_buffer) {
+ addr = ACX_REG_INTERRUPT_TRIG_H;
+ data = INTR_TRIG_RX_PROC1;
+ } else {
+ addr = ACX_REG_INTERRUPT_TRIG;
+ data = INTR_TRIG_RX_PROC0;
+ }
+
+ wl12xx_reg_write32(wl, addr, data);
+
+ /* Toggle buffer ring */
+ wl->rx_current_buffer = !wl->rx_current_buffer;
+}
+
+
+void wl12xx_rx(struct wl12xx *wl)
+{
+ struct wl12xx_rx_descriptor rx_desc;
+
+ if (wl->state != WL12XX_STATE_ON)
+ return;
+
+ /* We first read the frame's header */
+ wl12xx_rx_header(wl, &rx_desc);
+
+ /* Now we can read the body */
+ wl12xx_rx_body(wl, &rx_desc);
+
+ /* Finally, we need to ACK the RX */
+ wl12xx_rx_ack(wl);
+
+ return;
+}
diff --git a/drivers/net/wireless/wl12xx/rx.h b/drivers/net/wireless/wl12xx/rx.h
new file mode 100644
index 00000000000..8a23fdea501
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/rx.h
@@ -0,0 +1,122 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (c) 1998-2007 Texas Instruments Incorporated
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * Contact: Kalle Valo <kalle.valo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL12XX_RX_H__
+#define __WL12XX_RX_H__
+
+#include <linux/bitops.h>
+
+/*
+ * RX PATH
+ *
+ * The Rx path uses a double buffer and an rx_contro structure, each located
+ * at a fixed address in the device memory. The host keeps track of which
+ * buffer is available and alternates between them on a per packet basis.
+ * The size of each of the two buffers is large enough to hold the longest
+ * 802.3 packet.
+ * The RX path goes like that:
+ * 1) The target generates an interrupt each time a new packet is received.
+ * There are 2 RX interrupts, one for each buffer.
+ * 2) The host reads the received packet from one of the double buffers.
+ * 3) The host triggers a target interrupt.
+ * 4) The target prepares the next RX packet.
+ */
+
+#define WL12XX_RX_MAX_RSSI -30
+#define WL12XX_RX_MIN_RSSI -95
+
+#define WL12XX_RX_ALIGN_TO 4
+#define WL12XX_RX_ALIGN(len) (((len) + WL12XX_RX_ALIGN_TO - 1) & \
+ ~(WL12XX_RX_ALIGN_TO - 1))
+
+#define SHORT_PREAMBLE_BIT BIT(0)
+#define OFDM_RATE_BIT BIT(6)
+#define PBCC_RATE_BIT BIT(7)
+
+#define PLCP_HEADER_LENGTH 8
+#define RX_DESC_PACKETID_SHIFT 11
+#define RX_MAX_PACKET_ID 3
+
+#define RX_DESC_VALID_FCS 0x0001
+#define RX_DESC_MATCH_RXADDR1 0x0002
+#define RX_DESC_MCAST 0x0004
+#define RX_DESC_STAINTIM 0x0008
+#define RX_DESC_VIRTUAL_BM 0x0010
+#define RX_DESC_BCAST 0x0020
+#define RX_DESC_MATCH_SSID 0x0040
+#define RX_DESC_MATCH_BSSID 0x0080
+#define RX_DESC_ENCRYPTION_MASK 0x0300
+#define RX_DESC_MEASURMENT 0x0400
+#define RX_DESC_SEQNUM_MASK 0x1800
+#define RX_DESC_MIC_FAIL 0x2000
+#define RX_DESC_DECRYPT_FAIL 0x4000
+
+struct wl12xx_rx_descriptor {
+ u32 timestamp; /* In microseconds */
+ u16 length; /* Paylod length, including headers */
+ u16 flags;
+
+ /*
+ * 0 - 802.11
+ * 1 - 802.3
+ * 2 - IP
+ * 3 - Raw Codec
+ */
+ u8 type;
+
+ /*
+ * Recevied Rate:
+ * 0x0A - 1MBPS
+ * 0x14 - 2MBPS
+ * 0x37 - 5_5MBPS
+ * 0x0B - 6MBPS
+ * 0x0F - 9MBPS
+ * 0x6E - 11MBPS
+ * 0x0A - 12MBPS
+ * 0x0E - 18MBPS
+ * 0xDC - 22MBPS
+ * 0x09 - 24MBPS
+ * 0x0D - 36MBPS
+ * 0x08 - 48MBPS
+ * 0x0C - 54MBPS
+ */
+ u8 rate;
+
+ u8 mod_pre; /* Modulation and preamble */
+ u8 channel;
+
+ /*
+ * 0 - 2.4 Ghz
+ * 1 - 5 Ghz
+ */
+ u8 band;
+
+ s8 rssi; /* in dB */
+ u8 rcpi; /* in dB */
+ u8 snr; /* in dB */
+} __attribute__ ((packed));
+
+void wl12xx_rx(struct wl12xx *wl);
+
+#endif
diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c
new file mode 100644
index 00000000000..abdf171a47e
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/spi.c
@@ -0,0 +1,358 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * Contact: Kalle Valo <kalle.valo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/crc7.h>
+#include <linux/spi/spi.h>
+
+#include "wl12xx.h"
+#include "wl12xx_80211.h"
+#include "reg.h"
+#include "spi.h"
+#include "ps.h"
+
+static int wl12xx_translate_reg_addr(struct wl12xx *wl, int addr)
+{
+ /* If the address is lower than REGISTERS_BASE, it means that this is
+ * a chip-specific register address, so look it up in the registers
+ * table */
+ if (addr < REGISTERS_BASE) {
+ /* Make sure we don't go over the table */
+ if (addr >= ACX_REG_TABLE_LEN) {
+ wl12xx_error("address out of range (%d)", addr);
+ return -EINVAL;
+ }
+ addr = wl->chip.acx_reg_table[addr];
+ }
+
+ return addr - wl->physical_reg_addr + wl->virtual_reg_addr;
+}
+
+static int wl12xx_translate_mem_addr(struct wl12xx *wl, int addr)
+{
+ return addr - wl->physical_mem_addr + wl->virtual_mem_addr;
+}
+
+
+void wl12xx_spi_reset(struct wl12xx *wl)
+{
+ u8 *cmd;
+ struct spi_transfer t;
+ struct spi_message m;
+
+ cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
+ if (!cmd) {
+ wl12xx_error("could not allocate cmd for spi reset");
+ return;
+ }
+
+ memset(&t, 0, sizeof(t));
+ spi_message_init(&m);
+
+ memset(cmd, 0xff, WSPI_INIT_CMD_LEN);
+
+ t.tx_buf = cmd;
+ t.len = WSPI_INIT_CMD_LEN;
+ spi_message_add_tail(&t, &m);
+
+ spi_sync(wl->spi, &m);
+
+ wl12xx_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
+}
+
+void wl12xx_spi_init(struct wl12xx *wl)
+{
+ u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd;
+ struct spi_transfer t;
+ struct spi_message m;
+
+ cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
+ if (!cmd) {
+ wl12xx_error("could not allocate cmd for spi init");
+ return;
+ }
+
+ memset(crc, 0, sizeof(crc));
+ memset(&t, 0, sizeof(t));
+ spi_message_init(&m);
+
+ /*
+ * Set WSPI_INIT_COMMAND
+ * the data is being send from the MSB to LSB
+ */
+ cmd[2] = 0xff;
+ cmd[3] = 0xff;
+ cmd[1] = WSPI_INIT_CMD_START | WSPI_INIT_CMD_TX;
+ cmd[0] = 0;
+ cmd[7] = 0;
+ cmd[6] |= HW_ACCESS_WSPI_INIT_CMD_MASK << 3;
+ cmd[6] |= HW_ACCESS_WSPI_FIXED_BUSY_LEN & WSPI_INIT_CMD_FIXEDBUSY_LEN;
+
+ if (HW_ACCESS_WSPI_FIXED_BUSY_LEN == 0)
+ cmd[5] |= WSPI_INIT_CMD_DIS_FIXEDBUSY;
+ else
+ cmd[5] |= WSPI_INIT_CMD_EN_FIXEDBUSY;
+
+ cmd[5] |= WSPI_INIT_CMD_IOD | WSPI_INIT_CMD_IP | WSPI_INIT_CMD_CS
+ | WSPI_INIT_CMD_WSPI | WSPI_INIT_CMD_WS;
+
+ crc[0] = cmd[1];
+ crc[1] = cmd[0];
+ crc[2] = cmd[7];
+ crc[3] = cmd[6];
+ crc[4] = cmd[5];
+
+ cmd[4] |= crc7(0, crc, WSPI_INIT_CMD_CRC_LEN) << 1;
+ cmd[4] |= WSPI_INIT_CMD_END;
+
+ t.tx_buf = cmd;
+ t.len = WSPI_INIT_CMD_LEN;
+ spi_message_add_tail(&t, &m);
+
+ spi_sync(wl->spi, &m);
+
+ wl12xx_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN);
+}
+
+/* Set the SPI partitions to access the chip addresses
+ *
+ * There are two VIRTUAL (SPI) partitions (the memory partition and the
+ * registers partition), which are mapped to two different areas of the
+ * PHYSICAL (hardware) memory. This function also makes other checks to
+ * ensure that the partitions are not overlapping. In the diagram below, the
+ * memory partition comes before the register partition, but the opposite is
+ * also supported.
+ *
+ * PHYSICAL address
+ * space
+ *
+ * | |
+ * ...+----+--> mem_start
+ * VIRTUAL address ... | |
+ * space ... | | [PART_0]
+ * ... | |
+ * 0x00000000 <--+----+... ...+----+--> mem_start + mem_size
+ * | | ... | |
+ * |MEM | ... | |
+ * | | ... | |
+ * part_size <--+----+... | | {unused area)
+ * | | ... | |
+ * |REG | ... | |
+ * part_size | | ... | |
+ * + <--+----+... ...+----+--> reg_start
+ * reg_size ... | |
+ * ... | | [PART_1]
+ * ... | |
+ * ...+----+--> reg_start + reg_size
+ * | |
+ *
+ */
+void wl12xx_set_partition(struct wl12xx *wl,
+ u32 mem_start, u32 mem_size,
+ u32 reg_start, u32 reg_size)
+{
+ u8 tx_buf[sizeof(u32) + 2 * sizeof(struct wl12xx_partition)];
+ struct wl12xx_partition *partition;
+ struct spi_transfer t;
+ struct spi_message m;
+ u32 *cmd;
+ size_t len;
+ int addr;
+
+ spi_message_init(&m);
+ memset(&t, 0, sizeof(t));
+ memset(tx_buf, 0, sizeof(tx_buf));
+
+ cmd = (u32 *) tx_buf;
+ partition = (struct wl12xx_partition *) (tx_buf + sizeof(u32));
+ addr = HW_ACCESS_PART0_SIZE_ADDR;
+ len = 2 * sizeof(struct wl12xx_partition);
+
+ *cmd |= WSPI_CMD_WRITE;
+ *cmd |= (len << WSPI_CMD_BYTE_LENGTH_OFFSET) & WSPI_CMD_BYTE_LENGTH;
+ *cmd |= addr & WSPI_CMD_BYTE_ADDR;
+
+ wl12xx_debug(DEBUG_SPI, "mem_start %08X mem_size %08X",
+ mem_start, mem_size);
+ wl12xx_debug(DEBUG_SPI, "reg_start %08X reg_size %08X",
+ reg_start, reg_size);
+
+ /* Make sure that the two partitions together don't exceed the
+ * address range */
+ if ((mem_size + reg_size) > HW_ACCESS_MEMORY_MAX_RANGE) {
+ wl12xx_debug(DEBUG_SPI, "Total size exceeds maximum virtual"
+ " address range. Truncating partition[0].");
+ mem_size = HW_ACCESS_MEMORY_MAX_RANGE - reg_size;
+ wl12xx_debug(DEBUG_SPI, "mem_start %08X mem_size %08X",
+ mem_start, mem_size);
+ wl12xx_debug(DEBUG_SPI, "reg_start %08X reg_size %08X",
+ reg_start, reg_size);
+ }
+
+ if ((mem_start < reg_start) &&
+ ((mem_start + mem_size) > reg_start)) {
+ /* Guarantee that the memory partition doesn't overlap the
+ * registers partition */
+ wl12xx_debug(DEBUG_SPI, "End of partition[0] is "
+ "overlapping partition[1]. Adjusted.");
+ mem_size = reg_start - mem_start;
+ wl12xx_debug(DEBUG_SPI, "mem_start %08X mem_size %08X",
+ mem_start, mem_size);
+ wl12xx_debug(DEBUG_SPI, "reg_start %08X reg_size %08X",
+ reg_start, reg_size);
+ } else if ((reg_start < mem_start) &&
+ ((reg_start + reg_size) > mem_start)) {
+ /* Guarantee that the register partition doesn't overlap the
+ * memory partition */
+ wl12xx_debug(DEBUG_SPI, "End of partition[1] is"
+ " overlapping partition[0]. Adjusted.");
+ reg_size = mem_start - reg_start;
+ wl12xx_debug(DEBUG_SPI, "mem_start %08X mem_size %08X",
+ mem_start, mem_size);
+ wl12xx_debug(DEBUG_SPI, "reg_start %08X reg_size %08X",
+ reg_start, reg_size);
+ }
+
+ partition[0].start = mem_start;
+ partition[0].size = mem_size;
+ partition[1].start = reg_start;
+ partition[1].size = reg_size;
+
+ wl->physical_mem_addr = mem_start;
+ wl->physical_reg_addr = reg_start;
+
+ wl->virtual_mem_addr = 0;
+ wl->virtual_reg_addr = mem_size;
+
+ t.tx_buf = tx_buf;
+ t.len = sizeof(tx_buf);
+ spi_message_add_tail(&t, &m);
+
+ spi_sync(wl->spi, &m);
+}
+
+void wl12xx_spi_read(struct wl12xx *wl, int addr, void *buf,
+ size_t len)
+{
+ struct spi_transfer t[3];
+ struct spi_message m;
+ char busy_buf[TNETWIF_READ_OFFSET_BYTES];
+ u32 cmd;
+
+ cmd = 0;
+ cmd |= WSPI_CMD_READ;
+ cmd |= (len << WSPI_CMD_BYTE_LENGTH_OFFSET) & WSPI_CMD_BYTE_LENGTH;
+ cmd |= addr & WSPI_CMD_BYTE_ADDR;
+
+ spi_message_init(&m);
+ memset(t, 0, sizeof(t));
+
+ t[0].tx_buf = &cmd;
+ t[0].len = 4;
+ spi_message_add_tail(&t[0], &m);
+
+ /* Busy and non busy words read */
+ t[1].rx_buf = busy_buf;
+ t[1].len = TNETWIF_READ_OFFSET_BYTES;
+ spi_message_add_tail(&t[1], &m);
+
+ t[2].rx_buf = buf;
+ t[2].len = len;
+ spi_message_add_tail(&t[2], &m);
+
+ spi_sync(wl->spi, &m);
+
+ /* FIXME: check busy words */
+
+ wl12xx_dump(DEBUG_SPI, "spi_read cmd -> ", &cmd, sizeof(cmd));
+ wl12xx_dump(DEBUG_SPI, "spi_read buf <- ", buf, len);
+}
+
+void wl12xx_spi_write(struct wl12xx *wl, int addr, void *buf,
+ size_t len)
+{
+ struct spi_transfer t[2];
+ struct spi_message m;
+ u32 cmd;
+
+ cmd = 0;
+ cmd |= WSPI_CMD_WRITE;
+ cmd |= (len << WSPI_CMD_BYTE_LENGTH_OFFSET) & WSPI_CMD_BYTE_LENGTH;
+ cmd |= addr & WSPI_CMD_BYTE_ADDR;
+
+ spi_message_init(&m);
+ memset(t, 0, sizeof(t));
+
+ t[0].tx_buf = &cmd;
+ t[0].len = sizeof(cmd);
+ spi_message_add_tail(&t[0], &m);
+
+ t[1].tx_buf = buf;
+ t[1].len = len;
+ spi_message_add_tail(&t[1], &m);
+
+ spi_sync(wl->spi, &m);
+
+ wl12xx_dump(DEBUG_SPI, "spi_write cmd -> ", &cmd, sizeof(cmd));
+ wl12xx_dump(DEBUG_SPI, "spi_write buf -> ", buf, len);
+}
+
+void wl12xx_spi_mem_read(struct wl12xx *wl, int addr, void *buf,
+ size_t len)
+{
+ int physical;
+
+ physical = wl12xx_translate_mem_addr(wl, addr);
+
+ wl12xx_spi_read(wl, physical, buf, len);
+}
+
+void wl12xx_spi_mem_write(struct wl12xx *wl, int addr, void *buf,
+ size_t len)
+{
+ int physical;
+
+ physical = wl12xx_translate_mem_addr(wl, addr);
+
+ wl12xx_spi_write(wl, physical, buf, len);
+}
+
+u32 wl12xx_mem_read32(struct wl12xx *wl, int addr)
+{
+ return wl12xx_read32(wl, wl12xx_translate_mem_addr(wl, addr));
+}
+
+void wl12xx_mem_write32(struct wl12xx *wl, int addr, u32 val)
+{
+ wl12xx_write32(wl, wl12xx_translate_mem_addr(wl, addr), val);
+}
+
+u32 wl12xx_reg_read32(struct wl12xx *wl, int addr)
+{
+ return wl12xx_read32(wl, wl12xx_translate_reg_addr(wl, addr));
+}
+
+void wl12xx_reg_write32(struct wl12xx *wl, int addr, u32 val)
+{
+ wl12xx_write32(wl, wl12xx_translate_reg_addr(wl, addr), val);
+}
diff --git a/drivers/net/wireless/wl12xx/spi.h b/drivers/net/wireless/wl12xx/spi.h
new file mode 100644
index 00000000000..fd3227e904a
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/spi.h
@@ -0,0 +1,109 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (c) 1998-2007 Texas Instruments Incorporated
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * Contact: Kalle Valo <kalle.valo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL12XX_SPI_H__
+#define __WL12XX_SPI_H__
+
+#include "cmd.h"
+#include "acx.h"
+#include "reg.h"
+
+#define HW_ACCESS_MEMORY_MAX_RANGE 0x1FFC0
+
+#define HW_ACCESS_PART0_SIZE_ADDR 0x1FFC0
+#define HW_ACCESS_PART0_START_ADDR 0x1FFC4
+#define HW_ACCESS_PART1_SIZE_ADDR 0x1FFC8
+#define HW_ACCESS_PART1_START_ADDR 0x1FFCC
+
+#define HW_ACCESS_REGISTER_SIZE 4
+
+#define HW_ACCESS_PRAM_MAX_RANGE 0x3c000
+
+#define WSPI_CMD_READ 0x40000000
+#define WSPI_CMD_WRITE 0x00000000
+#define WSPI_CMD_FIXED 0x20000000
+#define WSPI_CMD_BYTE_LENGTH 0x1FFE0000
+#define WSPI_CMD_BYTE_LENGTH_OFFSET 17
+#define WSPI_CMD_BYTE_ADDR 0x0001FFFF
+
+#define WSPI_INIT_CMD_CRC_LEN 5
+
+#define WSPI_INIT_CMD_START 0x00
+#define WSPI_INIT_CMD_TX 0x40
+/* the extra bypass bit is sampled by the TNET as '1' */
+#define WSPI_INIT_CMD_BYPASS_BIT 0x80
+#define WSPI_INIT_CMD_FIXEDBUSY_LEN 0x07
+#define WSPI_INIT_CMD_EN_FIXEDBUSY 0x80
+#define WSPI_INIT_CMD_DIS_FIXEDBUSY 0x00
+#define WSPI_INIT_CMD_IOD 0x40
+#define WSPI_INIT_CMD_IP 0x20
+#define WSPI_INIT_CMD_CS 0x10
+#define WSPI_INIT_CMD_WS 0x08
+#define WSPI_INIT_CMD_WSPI 0x01
+#define WSPI_INIT_CMD_END 0x01
+
+#define WSPI_INIT_CMD_LEN 8
+
+#define TNETWIF_READ_OFFSET_BYTES 8
+#define HW_ACCESS_WSPI_FIXED_BUSY_LEN \
+ ((TNETWIF_READ_OFFSET_BYTES - 4) / sizeof(u32))
+#define HW_ACCESS_WSPI_INIT_CMD_MASK 0
+
+
+/* Raw target IO, address is not translated */
+void wl12xx_spi_read(struct wl12xx *wl, int addr, void *buf, size_t len);
+void wl12xx_spi_write(struct wl12xx *wl, int addr, void *buf, size_t len);
+
+/* Memory target IO, address is tranlated to partition 0 */
+void wl12xx_spi_mem_read(struct wl12xx *wl, int addr, void *buf, size_t len);
+void wl12xx_spi_mem_write(struct wl12xx *wl, int addr, void *buf, size_t len);
+u32 wl12xx_mem_read32(struct wl12xx *wl, int addr);
+void wl12xx_mem_write32(struct wl12xx *wl, int addr, u32 val);
+
+/* Registers IO */
+u32 wl12xx_reg_read32(struct wl12xx *wl, int addr);
+void wl12xx_reg_write32(struct wl12xx *wl, int addr, u32 val);
+
+/* INIT and RESET words */
+void wl12xx_spi_reset(struct wl12xx *wl);
+void wl12xx_spi_init(struct wl12xx *wl);
+void wl12xx_set_partition(struct wl12xx *wl,
+ u32 part_start, u32 part_size,
+ u32 reg_start, u32 reg_size);
+
+static inline u32 wl12xx_read32(struct wl12xx *wl, int addr)
+{
+ u32 response;
+
+ wl12xx_spi_read(wl, addr, &response, sizeof(u32));
+
+ return response;
+}
+
+static inline void wl12xx_write32(struct wl12xx *wl, int addr, u32 val)
+{
+ wl12xx_spi_write(wl, addr, &val, sizeof(u32));
+}
+
+#endif /* __WL12XX_SPI_H__ */
diff --git a/drivers/net/wireless/wl12xx/tx.c b/drivers/net/wireless/wl12xx/tx.c
new file mode 100644
index 00000000000..62145e205a8
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/tx.c
@@ -0,0 +1,557 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (c) 1998-2007 Texas Instruments Incorporated
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * Contact: Kalle Valo <kalle.valo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "wl12xx.h"
+#include "reg.h"
+#include "spi.h"
+#include "tx.h"
+#include "ps.h"
+
+static bool wl12xx_tx_double_buffer_busy(struct wl12xx *wl, u32 data_out_count)
+{
+ int used, data_in_count;
+
+ data_in_count = wl->data_in_count;
+
+ if (data_in_count < data_out_count)
+ /* data_in_count has wrapped */
+ data_in_count += TX_STATUS_DATA_OUT_COUNT_MASK + 1;
+
+ used = data_in_count - data_out_count;
+
+ WARN_ON(used < 0);
+ WARN_ON(used > DP_TX_PACKET_RING_CHUNK_NUM);
+
+ if (used >= DP_TX_PACKET_RING_CHUNK_NUM)
+ return true;
+ else
+ return false;
+}
+
+static int wl12xx_tx_path_status(struct wl12xx *wl)
+{
+ u32 status, addr, data_out_count;
+ bool busy;
+
+ addr = wl->data_path->tx_control_addr;
+ status = wl12xx_mem_read32(wl, addr);
+ data_out_count = status & TX_STATUS_DATA_OUT_COUNT_MASK;
+ busy = wl12xx_tx_double_buffer_busy(wl, data_out_count);
+
+ if (busy)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int wl12xx_tx_id(struct wl12xx *wl, struct sk_buff *skb)
+{
+ int i;
+
+ for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++)
+ if (wl->tx_frames[i] == NULL) {
+ wl->tx_frames[i] = skb;
+ return i;
+ }
+
+ return -EBUSY;
+}
+
+static void wl12xx_tx_control(struct tx_double_buffer_desc *tx_hdr,
+ struct ieee80211_tx_info *control, u16 fc)
+{
+ *(u16 *)&tx_hdr->control = 0;
+
+ tx_hdr->control.rate_policy = 0;
+
+ /* 802.11 packets */
+ tx_hdr->control.packet_type = 0;
+
+ if (control->flags & IEEE80211_TX_CTL_NO_ACK)
+ tx_hdr->control.ack_policy = 1;
+
+ tx_hdr->control.tx_complete = 1;
+
+ if ((fc & IEEE80211_FTYPE_DATA) &&
+ ((fc & IEEE80211_STYPE_QOS_DATA) ||
+ (fc & IEEE80211_STYPE_QOS_NULLFUNC)))
+ tx_hdr->control.qos = 1;
+}
+
+/* RSN + MIC = 8 + 8 = 16 bytes (worst case - AES). */
+#define MAX_MSDU_SECURITY_LENGTH 16
+#define MAX_MPDU_SECURITY_LENGTH 16
+#define WLAN_QOS_HDR_LEN 26
+#define MAX_MPDU_HEADER_AND_SECURITY (MAX_MPDU_SECURITY_LENGTH + \
+ WLAN_QOS_HDR_LEN)
+#define HW_BLOCK_SIZE 252
+static void wl12xx_tx_frag_block_num(struct tx_double_buffer_desc *tx_hdr)
+{
+ u16 payload_len, frag_threshold, mem_blocks;
+ u16 num_mpdus, mem_blocks_per_frag;
+
+ frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD;
+ tx_hdr->frag_threshold = cpu_to_le16(frag_threshold);
+
+ payload_len = tx_hdr->length + MAX_MSDU_SECURITY_LENGTH;
+
+ if (payload_len > frag_threshold) {
+ mem_blocks_per_frag =
+ ((frag_threshold + MAX_MPDU_HEADER_AND_SECURITY) /
+ HW_BLOCK_SIZE) + 1;
+ num_mpdus = payload_len / frag_threshold;
+ mem_blocks = num_mpdus * mem_blocks_per_frag;
+ payload_len -= num_mpdus * frag_threshold;
+ num_mpdus++;
+
+ } else {
+ mem_blocks_per_frag = 0;
+ mem_blocks = 0;
+ num_mpdus = 1;
+ }
+
+ mem_blocks += (payload_len / HW_BLOCK_SIZE) + 1;
+
+ if (num_mpdus > 1)
+ mem_blocks += min(num_mpdus, mem_blocks_per_frag);
+
+ tx_hdr->num_mem_blocks = mem_blocks;
+}
+
+static int wl12xx_tx_fill_hdr(struct wl12xx *wl, struct sk_buff *skb,
+ struct ieee80211_tx_info *control)
+{
+ struct tx_double_buffer_desc *tx_hdr;
+ struct ieee80211_rate *rate;
+ int id;
+ u16 fc;
+
+ if (!skb)
+ return -EINVAL;
+
+ id = wl12xx_tx_id(wl, skb);
+ if (id < 0)
+ return id;
+
+ fc = *(u16 *)skb->data;
+ tx_hdr = (struct tx_double_buffer_desc *) skb_push(skb,
+ sizeof(*tx_hdr));
+
+ tx_hdr->length = cpu_to_le16(skb->len - sizeof(*tx_hdr));
+ rate = ieee80211_get_tx_rate(wl->hw, control);
+ tx_hdr->rate = cpu_to_le16(rate->hw_value);
+ tx_hdr->expiry_time = cpu_to_le32(1 << 16);
+ tx_hdr->id = id;
+
+ /* FIXME: how to get the correct queue id? */
+ tx_hdr->xmit_queue = 0;
+
+ wl12xx_tx_control(tx_hdr, control, fc);
+ wl12xx_tx_frag_block_num(tx_hdr);
+
+ return 0;
+}
+
+/* We copy the packet to the target */
+static int wl12xx_tx_send_packet(struct wl12xx *wl, struct sk_buff *skb,
+ struct ieee80211_tx_info *control)
+{
+ struct tx_double_buffer_desc *tx_hdr;
+ int len;
+ u32 addr;
+
+ if (!skb)
+ return -EINVAL;
+
+ tx_hdr = (struct tx_double_buffer_desc *) skb->data;
+
+ if (control->control.hw_key &&
+ control->control.hw_key->alg == ALG_TKIP) {
+ int hdrlen;
+ u16 fc;
+ u8 *pos;
+
+ fc = *(u16 *)(skb->data + sizeof(*tx_hdr));
+ tx_hdr->length += WL12XX_TKIP_IV_SPACE;
+
+ hdrlen = ieee80211_hdrlen(fc);
+
+ pos = skb_push(skb, WL12XX_TKIP_IV_SPACE);
+ memmove(pos, pos + WL12XX_TKIP_IV_SPACE,
+ sizeof(*tx_hdr) + hdrlen);
+ }
+
+ /* Revisit. This is a workaround for getting non-aligned packets.
+ This happens at least with EAPOL packets from the user space.
+ Our DMA requires packets to be aligned on a 4-byte boundary.
+ */
+ if (unlikely((long)skb->data & 0x03)) {
+ int offset = (4 - (long)skb->data) & 0x03;
+ wl12xx_debug(DEBUG_TX, "skb offset %d", offset);
+
+ /* check whether the current skb can be used */
+ if (!skb_cloned(skb) && (skb_tailroom(skb) >= offset)) {
+ unsigned char *src = skb->data;
+
+ /* align the buffer on a 4-byte boundary */
+ skb_reserve(skb, offset);
+ memmove(skb->data, src, skb->len);
+ } else {
+ wl12xx_info("No handler, fixme!");
+ return -EINVAL;
+ }
+ }
+
+ /* Our skb->data at this point includes the HW header */
+ len = WL12XX_TX_ALIGN(skb->len);
+
+ if (wl->data_in_count & 0x1)
+ addr = wl->data_path->tx_packet_ring_addr +
+ wl->data_path->tx_packet_ring_chunk_size;
+ else
+ addr = wl->data_path->tx_packet_ring_addr;
+
+ wl12xx_spi_mem_write(wl, addr, skb->data, len);
+
+ wl12xx_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u rate 0x%x",
+ tx_hdr->id, skb, tx_hdr->length, tx_hdr->rate);
+
+ return 0;
+}
+
+static void wl12xx_tx_trigger(struct wl12xx *wl)
+{
+ u32 data, addr;
+
+ if (wl->data_in_count & 0x1) {
+ addr = ACX_REG_INTERRUPT_TRIG_H;
+ data = INTR_TRIG_TX_PROC1;
+ } else {
+ addr = ACX_REG_INTERRUPT_TRIG;
+ data = INTR_TRIG_TX_PROC0;
+ }
+
+ wl12xx_reg_write32(wl, addr, data);
+
+ /* Bumping data in */
+ wl->data_in_count = (wl->data_in_count + 1) &
+ TX_STATUS_DATA_OUT_COUNT_MASK;
+}
+
+/* caller must hold wl->mutex */
+static int wl12xx_tx_frame(struct wl12xx *wl, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info;
+ int ret = 0;
+ u8 idx;
+
+ info = IEEE80211_SKB_CB(skb);
+
+ if (info->control.hw_key) {
+ idx = info->control.hw_key->hw_key_idx;
+ if (unlikely(wl->default_key != idx)) {
+ ret = wl12xx_acx_default_key(wl, idx);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ ret = wl12xx_tx_path_status(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl12xx_tx_fill_hdr(wl, skb, info);
+ if (ret < 0)
+ return ret;
+
+ ret = wl12xx_tx_send_packet(wl, skb, info);
+ if (ret < 0)
+ return ret;
+
+ wl12xx_tx_trigger(wl);
+
+ return ret;
+}
+
+void wl12xx_tx_work(struct work_struct *work)
+{
+ struct wl12xx *wl = container_of(work, struct wl12xx, tx_work);
+ struct sk_buff *skb;
+ bool woken_up = false;
+ int ret;
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state == WL12XX_STATE_OFF))
+ goto out;
+
+ while ((skb = skb_dequeue(&wl->tx_queue))) {
+ if (!woken_up) {
+ wl12xx_ps_elp_wakeup(wl);
+ woken_up = true;
+ }
+
+ ret = wl12xx_tx_frame(wl, skb);
+ if (ret == -EBUSY) {
+ /* firmware buffer is full, stop queues */
+ wl12xx_debug(DEBUG_TX, "tx_work: fw buffer full, "
+ "stop queues");
+ ieee80211_stop_queues(wl->hw);
+ wl->tx_queue_stopped = true;
+ skb_queue_head(&wl->tx_queue, skb);
+ goto out;
+ } else if (ret < 0) {
+ dev_kfree_skb(skb);
+ goto out;
+ }
+ }
+
+out:
+ if (woken_up)
+ wl12xx_ps_elp_sleep(wl);
+
+ mutex_unlock(&wl->mutex);
+}
+
+static const char *wl12xx_tx_parse_status(u8 status)
+{
+ /* 8 bit status field, one character per bit plus null */
+ static char buf[9];
+ int i = 0;
+
+ memset(buf, 0, sizeof(buf));
+
+ if (status & TX_DMA_ERROR)
+ buf[i++] = 'm';
+ if (status & TX_DISABLED)
+ buf[i++] = 'd';
+ if (status & TX_RETRY_EXCEEDED)
+ buf[i++] = 'r';
+ if (status & TX_TIMEOUT)
+ buf[i++] = 't';
+ if (status & TX_KEY_NOT_FOUND)
+ buf[i++] = 'k';
+ if (status & TX_ENCRYPT_FAIL)
+ buf[i++] = 'e';
+ if (status & TX_UNAVAILABLE_PRIORITY)
+ buf[i++] = 'p';
+
+ /* bit 0 is unused apparently */
+
+ return buf;
+}
+
+static void wl12xx_tx_packet_cb(struct wl12xx *wl,
+ struct tx_result *result)
+{
+ struct ieee80211_tx_info *info;
+ struct sk_buff *skb;
+ int hdrlen, ret;
+ u8 *frame;
+
+ skb = wl->tx_frames[result->id];
+ if (skb == NULL) {
+ wl12xx_error("SKB for packet %d is NULL", result->id);
+ return;
+ }
+
+ info = IEEE80211_SKB_CB(skb);
+
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
+ (result->status == TX_SUCCESS))
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ info->status.rates[0].count = result->ack_failures + 1;
+ wl->stats.retry_count += result->ack_failures;
+
+ /*
+ * We have to remove our private TX header before pushing
+ * the skb back to mac80211.
+ */
+ frame = skb_pull(skb, sizeof(struct tx_double_buffer_desc));
+ if (info->control.hw_key &&
+ info->control.hw_key->alg == ALG_TKIP) {
+ hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+ memmove(frame + WL12XX_TKIP_IV_SPACE, frame, hdrlen);
+ skb_pull(skb, WL12XX_TKIP_IV_SPACE);
+ }
+
+ wl12xx_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x"
+ " status 0x%x (%s)",
+ result->id, skb, result->ack_failures, result->rate,
+ result->status, wl12xx_tx_parse_status(result->status));
+
+
+ ieee80211_tx_status(wl->hw, skb);
+
+ wl->tx_frames[result->id] = NULL;
+
+ if (wl->tx_queue_stopped) {
+ wl12xx_debug(DEBUG_TX, "cb: queue was stopped");
+
+ skb = skb_dequeue(&wl->tx_queue);
+
+ /* The skb can be NULL because tx_work might have been
+ scheduled before the queue was stopped making the
+ queue empty */
+
+ if (skb) {
+ ret = wl12xx_tx_frame(wl, skb);
+ if (ret == -EBUSY) {
+ /* firmware buffer is still full */
+ wl12xx_debug(DEBUG_TX, "cb: fw buffer "
+ "still full");
+ skb_queue_head(&wl->tx_queue, skb);
+ return;
+ } else if (ret < 0) {
+ dev_kfree_skb(skb);
+ return;
+ }
+ }
+
+ wl12xx_debug(DEBUG_TX, "cb: waking queues");
+ ieee80211_wake_queues(wl->hw);
+ wl->tx_queue_stopped = false;
+ }
+}
+
+/* Called upon reception of a TX complete interrupt */
+void wl12xx_tx_complete(struct wl12xx *wl)
+{
+ int i, result_index, num_complete = 0;
+ struct tx_result result[FW_TX_CMPLT_BLOCK_SIZE], *result_ptr;
+
+ if (unlikely(wl->state != WL12XX_STATE_ON))
+ return;
+
+ /* First we read the result */
+ wl12xx_spi_mem_read(wl, wl->data_path->tx_complete_addr,
+ result, sizeof(result));
+
+ result_index = wl->next_tx_complete;
+
+ for (i = 0; i < ARRAY_SIZE(result); i++) {
+ result_ptr = &result[result_index];
+
+ if (result_ptr->done_1 == 1 &&
+ result_ptr->done_2 == 1) {
+ wl12xx_tx_packet_cb(wl, result_ptr);
+
+ result_ptr->done_1 = 0;
+ result_ptr->done_2 = 0;
+
+ result_index = (result_index + 1) &
+ (FW_TX_CMPLT_BLOCK_SIZE - 1);
+ num_complete++;
+ } else {
+ break;
+ }
+ }
+
+ /* Every completed frame needs to be acknowledged */
+ if (num_complete) {
+ /*
+ * If we've wrapped, we have to clear
+ * the results in 2 steps.
+ */
+ if (result_index > wl->next_tx_complete) {
+ /* Only 1 write is needed */
+ wl12xx_spi_mem_write(wl,
+ wl->data_path->tx_complete_addr +
+ (wl->next_tx_complete *
+ sizeof(struct tx_result)),
+ &result[wl->next_tx_complete],
+ num_complete *
+ sizeof(struct tx_result));
+
+
+ } else if (result_index < wl->next_tx_complete) {
+ /* 2 writes are needed */
+ wl12xx_spi_mem_write(wl,
+ wl->data_path->tx_complete_addr +
+ (wl->next_tx_complete *
+ sizeof(struct tx_result)),
+ &result[wl->next_tx_complete],
+ (FW_TX_CMPLT_BLOCK_SIZE -
+ wl->next_tx_complete) *
+ sizeof(struct tx_result));
+
+ wl12xx_spi_mem_write(wl,
+ wl->data_path->tx_complete_addr,
+ result,
+ (num_complete -
+ FW_TX_CMPLT_BLOCK_SIZE +
+ wl->next_tx_complete) *
+ sizeof(struct tx_result));
+
+ } else {
+ /* We have to write the whole array */
+ wl12xx_spi_mem_write(wl,
+ wl->data_path->tx_complete_addr,
+ result,
+ FW_TX_CMPLT_BLOCK_SIZE *
+ sizeof(struct tx_result));
+ }
+
+ }
+
+ wl->next_tx_complete = result_index;
+}
+
+/* caller must hold wl->mutex */
+void wl12xx_tx_flush(struct wl12xx *wl)
+{
+ int i;
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *info;
+
+ /* TX failure */
+/* control->flags = 0; FIXME */
+
+ while ((skb = skb_dequeue(&wl->tx_queue))) {
+ info = IEEE80211_SKB_CB(skb);
+
+ wl12xx_debug(DEBUG_TX, "flushing skb 0x%p", skb);
+
+ if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS))
+ continue;
+
+ ieee80211_tx_status(wl->hw, skb);
+ }
+
+ for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++)
+ if (wl->tx_frames[i] != NULL) {
+ skb = wl->tx_frames[i];
+ info = IEEE80211_SKB_CB(skb);
+
+ if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS))
+ continue;
+
+ ieee80211_tx_status(wl->hw, skb);
+ wl->tx_frames[i] = NULL;
+ }
+}
diff --git a/drivers/net/wireless/wl12xx/tx.h b/drivers/net/wireless/wl12xx/tx.h
new file mode 100644
index 00000000000..dc82691f4c1
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/tx.h
@@ -0,0 +1,215 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (c) 1998-2007 Texas Instruments Incorporated
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * Contact: Kalle Valo <kalle.valo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL12XX_TX_H__
+#define __WL12XX_TX_H__
+
+#include <linux/bitops.h>
+
+/*
+ *
+ * TX PATH
+ *
+ * The Tx path uses a double buffer and a tx_control structure, each located
+ * at a fixed address in the device's memory. On startup, the host retrieves
+ * the pointers to these addresses. A double buffer allows for continuous data
+ * flow towards the device. The host keeps track of which buffer is available
+ * and alternates between these two buffers on a per packet basis.
+ *
+ * The size of each of the two buffers is large enough to hold the longest
+ * 802.3 packet - maximum size Ethernet packet + header + descriptor.
+ * TX complete indication will be received a-synchronously in a TX done cyclic
+ * buffer which is composed of 16 tx_result descriptors structures and is used
+ * in a cyclic manner.
+ *
+ * The TX (HOST) procedure is as follows:
+ * 1. Read the Tx path status, that will give the data_out_count.
+ * 2. goto 1, if not possible.
+ * i.e. if data_in_count - data_out_count >= HwBuffer size (2 for double
+ * buffer).
+ * 3. Copy the packet (preceded by double_buffer_desc), if possible.
+ * i.e. if data_in_count - data_out_count < HwBuffer size (2 for double
+ * buffer).
+ * 4. increment data_in_count.
+ * 5. Inform the firmware by generating a firmware internal interrupt.
+ * 6. FW will increment data_out_count after it reads the buffer.
+ *
+ * The TX Complete procedure:
+ * 1. To get a TX complete indication the host enables the tx_complete flag in
+ * the TX descriptor Structure.
+ * 2. For each packet with a Tx Complete field set, the firmware adds the
+ * transmit results to the cyclic buffer (txDoneRing) and sets both done_1
+ * and done_2 to 1 to indicate driver ownership.
+ * 3. The firmware sends a Tx Complete interrupt to the host to trigger the
+ * host to process the new data. Note: interrupt will be send per packet if
+ * TX complete indication was requested in tx_control or per crossing
+ * aggregation threshold.
+ * 4. After receiving the Tx Complete interrupt, the host reads the
+ * TxDescriptorDone information in a cyclic manner and clears both done_1
+ * and done_2 fields.
+ *
+ */
+
+#define TX_COMPLETE_REQUIRED_BIT 0x80
+#define TX_STATUS_DATA_OUT_COUNT_MASK 0xf
+#define WL12XX_TX_ALIGN_TO 4
+#define WL12XX_TX_ALIGN(len) (((len) + WL12XX_TX_ALIGN_TO - 1) & \
+ ~(WL12XX_TX_ALIGN_TO - 1))
+#define WL12XX_TKIP_IV_SPACE 4
+
+struct tx_control {
+ /* Rate Policy (class) index */
+ unsigned rate_policy:3;
+
+ /* When set, no ack policy is expected */
+ unsigned ack_policy:1;
+
+ /*
+ * Packet type:
+ * 0 -> 802.11
+ * 1 -> 802.3
+ * 2 -> IP
+ * 3 -> raw codec
+ */
+ unsigned packet_type:2;
+
+ /* If set, this is a QoS-Null or QoS-Data frame */
+ unsigned qos:1;
+
+ /*
+ * If set, the target triggers the tx complete INT
+ * upon frame sending completion.
+ */
+ unsigned tx_complete:1;
+
+ /* 2 bytes padding before packet header */
+ unsigned xfer_pad:1;
+
+ unsigned reserved:7;
+} __attribute__ ((packed));
+
+
+struct tx_double_buffer_desc {
+ /* Length of payload, including headers. */
+ u16 length;
+
+ /*
+ * A bit mask that specifies the initial rate to be used
+ * Possible values are:
+ * 0x0001 - 1Mbits
+ * 0x0002 - 2Mbits
+ * 0x0004 - 5.5Mbits
+ * 0x0008 - 6Mbits
+ * 0x0010 - 9Mbits
+ * 0x0020 - 11Mbits
+ * 0x0040 - 12Mbits
+ * 0x0080 - 18Mbits
+ * 0x0100 - 22Mbits
+ * 0x0200 - 24Mbits
+ * 0x0400 - 36Mbits
+ * 0x0800 - 48Mbits
+ * 0x1000 - 54Mbits
+ */
+ u16 rate;
+
+ /* Time in us that a packet can spend in the target */
+ u32 expiry_time;
+
+ /* index of the TX queue used for this packet */
+ u8 xmit_queue;
+
+ /* Used to identify a packet */
+ u8 id;
+
+ struct tx_control control;
+
+ /*
+ * The FW should cut the packet into fragments
+ * of this size.
+ */
+ u16 frag_threshold;
+
+ /* Numbers of HW queue blocks to be allocated */
+ u8 num_mem_blocks;
+
+ u8 reserved;
+} __attribute__ ((packed));
+
+enum {
+ TX_SUCCESS = 0,
+ TX_DMA_ERROR = BIT(7),
+ TX_DISABLED = BIT(6),
+ TX_RETRY_EXCEEDED = BIT(5),
+ TX_TIMEOUT = BIT(4),
+ TX_KEY_NOT_FOUND = BIT(3),
+ TX_ENCRYPT_FAIL = BIT(2),
+ TX_UNAVAILABLE_PRIORITY = BIT(1),
+};
+
+struct tx_result {
+ /*
+ * Ownership synchronization between the host and
+ * the firmware. If done_1 and done_2 are cleared,
+ * owned by the FW (no info ready).
+ */
+ u8 done_1;
+
+ /* same as double_buffer_desc->id */
+ u8 id;
+
+ /*
+ * Total air access duration consumed by this
+ * packet, including all retries and overheads.
+ */
+ u16 medium_usage;
+
+ /* Total media delay (from 1st EDCA AIFS counter until TX Complete). */
+ u32 medium_delay;
+
+ /* Time between host xfer and tx complete */
+ u32 fw_hnadling_time;
+
+ /* The LS-byte of the last TKIP sequence number. */
+ u8 lsb_seq_num;
+
+ /* Retry count */
+ u8 ack_failures;
+
+ /* At which rate we got a ACK */
+ u16 rate;
+
+ u16 reserved;
+
+ /* TX_* */
+ u8 status;
+
+ /* See done_1 */
+ u8 done_2;
+} __attribute__ ((packed));
+
+void wl12xx_tx_work(struct work_struct *work);
+void wl12xx_tx_complete(struct wl12xx *wl);
+void wl12xx_tx_flush(struct wl12xx *wl);
+
+#endif
diff --git a/drivers/net/wireless/wl12xx/wl1251.c b/drivers/net/wireless/wl12xx/wl1251.c
new file mode 100644
index 00000000000..ce1561a41fa
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/wl1251.c
@@ -0,0 +1,709 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2008-2009 Nokia Corporation
+ *
+ * Contact: Kalle Valo <kalle.valo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "wl1251.h"
+#include "reg.h"
+#include "spi.h"
+#include "boot.h"
+#include "event.h"
+#include "acx.h"
+#include "tx.h"
+#include "rx.h"
+#include "ps.h"
+#include "init.h"
+
+static struct wl12xx_partition_set wl1251_part_table[PART_TABLE_LEN] = {
+ [PART_DOWN] = {
+ .mem = {
+ .start = 0x00000000,
+ .size = 0x00016800
+ },
+ .reg = {
+ .start = REGISTERS_BASE,
+ .size = REGISTERS_DOWN_SIZE
+ },
+ },
+
+ [PART_WORK] = {
+ .mem = {
+ .start = 0x00028000,
+ .size = 0x00014000
+ },
+ .reg = {
+ .start = REGISTERS_BASE,
+ .size = REGISTERS_WORK_SIZE
+ },
+ },
+
+ /* WL1251 doesn't use the DRPW partition, so we don't set it here */
+};
+
+static enum wl12xx_acx_int_reg wl1251_acx_reg_table[ACX_REG_TABLE_LEN] = {
+ [ACX_REG_INTERRUPT_TRIG] = (REGISTERS_BASE + 0x0474),
+ [ACX_REG_INTERRUPT_TRIG_H] = (REGISTERS_BASE + 0x0478),
+ [ACX_REG_INTERRUPT_MASK] = (REGISTERS_BASE + 0x0494),
+ [ACX_REG_HINT_MASK_SET] = (REGISTERS_BASE + 0x0498),
+ [ACX_REG_HINT_MASK_CLR] = (REGISTERS_BASE + 0x049C),
+ [ACX_REG_INTERRUPT_NO_CLEAR] = (REGISTERS_BASE + 0x04B0),
+ [ACX_REG_INTERRUPT_CLEAR] = (REGISTERS_BASE + 0x04A4),
+ [ACX_REG_INTERRUPT_ACK] = (REGISTERS_BASE + 0x04A8),
+ [ACX_REG_SLV_SOFT_RESET] = (REGISTERS_BASE + 0x0000),
+ [ACX_REG_EE_START] = (REGISTERS_BASE + 0x080C),
+ [ACX_REG_ECPU_CONTROL] = (REGISTERS_BASE + 0x0804)
+};
+
+static int wl1251_upload_firmware(struct wl12xx *wl)
+{
+ struct wl12xx_partition_set *p_table = wl->chip.p_table;
+ int addr, chunk_num, partition_limit;
+ size_t fw_data_len;
+ u8 *p;
+
+ /* whal_FwCtrl_LoadFwImageSm() */
+
+ wl12xx_debug(DEBUG_BOOT, "chip id before fw upload: 0x%x",
+ wl12xx_reg_read32(wl, CHIP_ID_B));
+
+ /* 10.0 check firmware length and set partition */
+ fw_data_len = (wl->fw[4] << 24) | (wl->fw[5] << 16) |
+ (wl->fw[6] << 8) | (wl->fw[7]);
+
+ wl12xx_debug(DEBUG_BOOT, "fw_data_len %zu chunk_size %d", fw_data_len,
+ CHUNK_SIZE);
+
+ if ((fw_data_len % 4) != 0) {
+ wl12xx_error("firmware length not multiple of four");
+ return -EIO;
+ }
+
+ wl12xx_set_partition(wl,
+ p_table[PART_DOWN].mem.start,
+ p_table[PART_DOWN].mem.size,
+ p_table[PART_DOWN].reg.start,
+ p_table[PART_DOWN].reg.size);
+
+ /* 10.1 set partition limit and chunk num */
+ chunk_num = 0;
+ partition_limit = p_table[PART_DOWN].mem.size;
+
+ while (chunk_num < fw_data_len / CHUNK_SIZE) {
+ /* 10.2 update partition, if needed */
+ addr = p_table[PART_DOWN].mem.start +
+ (chunk_num + 2) * CHUNK_SIZE;
+ if (addr > partition_limit) {
+ addr = p_table[PART_DOWN].mem.start +
+ chunk_num * CHUNK_SIZE;
+ partition_limit = chunk_num * CHUNK_SIZE +
+ p_table[PART_DOWN].mem.size;
+ wl12xx_set_partition(wl,
+ addr,
+ p_table[PART_DOWN].mem.size,
+ p_table[PART_DOWN].reg.start,
+ p_table[PART_DOWN].reg.size);
+ }
+
+ /* 10.3 upload the chunk */
+ addr = p_table[PART_DOWN].mem.start + chunk_num * CHUNK_SIZE;
+ p = wl->fw + FW_HDR_SIZE + chunk_num * CHUNK_SIZE;
+ wl12xx_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x",
+ p, addr);
+ wl12xx_spi_mem_write(wl, addr, p, CHUNK_SIZE);
+
+ chunk_num++;
+ }
+
+ /* 10.4 upload the last chunk */
+ addr = p_table[PART_DOWN].mem.start + chunk_num * CHUNK_SIZE;
+ p = wl->fw + FW_HDR_SIZE + chunk_num * CHUNK_SIZE;
+ wl12xx_debug(DEBUG_BOOT, "uploading fw last chunk (%zu B) 0x%p to 0x%x",
+ fw_data_len % CHUNK_SIZE, p, addr);
+ wl12xx_spi_mem_write(wl, addr, p, fw_data_len % CHUNK_SIZE);
+
+ return 0;
+}
+
+static int wl1251_upload_nvs(struct wl12xx *wl)
+{
+ size_t nvs_len, nvs_bytes_written, burst_len;
+ int nvs_start, i;
+ u32 dest_addr, val;
+ u8 *nvs_ptr, *nvs;
+
+ nvs = wl->nvs;
+ if (nvs == NULL)
+ return -ENODEV;
+
+ nvs_ptr = nvs;
+
+ nvs_len = wl->nvs_len;
+ nvs_start = wl->fw_len;
+
+ /*
+ * Layout before the actual NVS tables:
+ * 1 byte : burst length.
+ * 2 bytes: destination address.
+ * n bytes: data to burst copy.
+ *
+ * This is ended by a 0 length, then the NVS tables.
+ */
+
+ while (nvs_ptr[0]) {
+ burst_len = nvs_ptr[0];
+ dest_addr = (nvs_ptr[1] & 0xfe) | ((u32)(nvs_ptr[2] << 8));
+
+ /* We move our pointer to the data */
+ nvs_ptr += 3;
+
+ for (i = 0; i < burst_len; i++) {
+ val = (nvs_ptr[0] | (nvs_ptr[1] << 8)
+ | (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24));
+
+ wl12xx_debug(DEBUG_BOOT,
+ "nvs burst write 0x%x: 0x%x",
+ dest_addr, val);
+ wl12xx_mem_write32(wl, dest_addr, val);
+
+ nvs_ptr += 4;
+ dest_addr += 4;
+ }
+ }
+
+ /*
+ * We've reached the first zero length, the first NVS table
+ * is 7 bytes further.
+ */
+ nvs_ptr += 7;
+ nvs_len -= nvs_ptr - nvs;
+ nvs_len = ALIGN(nvs_len, 4);
+
+ /* Now we must set the partition correctly */
+ wl12xx_set_partition(wl, nvs_start,
+ wl->chip.p_table[PART_DOWN].mem.size,
+ wl->chip.p_table[PART_DOWN].reg.start,
+ wl->chip.p_table[PART_DOWN].reg.size);
+
+ /* And finally we upload the NVS tables */
+ nvs_bytes_written = 0;
+ while (nvs_bytes_written < nvs_len) {
+ val = (nvs_ptr[0] | (nvs_ptr[1] << 8)
+ | (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24));
+
+ val = cpu_to_le32(val);
+
+ wl12xx_debug(DEBUG_BOOT,
+ "nvs write table 0x%x: 0x%x",
+ nvs_start, val);
+ wl12xx_mem_write32(wl, nvs_start, val);
+
+ nvs_ptr += 4;
+ nvs_bytes_written += 4;
+ nvs_start += 4;
+ }
+
+ return 0;
+}
+
+static int wl1251_boot(struct wl12xx *wl)
+{
+ int ret = 0, minor_minor_e2_ver;
+ u32 tmp, boot_data;
+
+ ret = wl12xx_boot_soft_reset(wl);
+ if (ret < 0)
+ goto out;
+
+ /* 2. start processing NVS file */
+ ret = wl->chip.op_upload_nvs(wl);
+ if (ret < 0)
+ goto out;
+
+ /* write firmware's last address (ie. it's length) to
+ * ACX_EEPROMLESS_IND_REG */
+ wl12xx_reg_write32(wl, ACX_EEPROMLESS_IND_REG, wl->fw_len);
+
+ /* 6. read the EEPROM parameters */
+ tmp = wl12xx_reg_read32(wl, SCR_PAD2);
+
+ /* 7. read bootdata */
+ wl->boot_attr.radio_type = (tmp & 0x0000FF00) >> 8;
+ wl->boot_attr.major = (tmp & 0x00FF0000) >> 16;
+ tmp = wl12xx_reg_read32(wl, SCR_PAD3);
+
+ /* 8. check bootdata and call restart sequence */
+ wl->boot_attr.minor = (tmp & 0x00FF0000) >> 16;
+ minor_minor_e2_ver = (tmp & 0xFF000000) >> 24;
+
+ wl12xx_debug(DEBUG_BOOT, "radioType 0x%x majorE2Ver 0x%x "
+ "minorE2Ver 0x%x minor_minor_e2_ver 0x%x",
+ wl->boot_attr.radio_type, wl->boot_attr.major,
+ wl->boot_attr.minor, minor_minor_e2_ver);
+
+ ret = wl12xx_boot_init_seq(wl);
+ if (ret < 0)
+ goto out;
+
+ /* 9. NVS processing done */
+ boot_data = wl12xx_reg_read32(wl, ACX_REG_ECPU_CONTROL);
+
+ wl12xx_debug(DEBUG_BOOT, "halt boot_data 0x%x", boot_data);
+
+ /* 10. check that ECPU_CONTROL_HALT bits are set in
+ * pWhalBus->uBootData and start uploading firmware
+ */
+ if ((boot_data & ECPU_CONTROL_HALT) == 0) {
+ wl12xx_error("boot failed, ECPU_CONTROL_HALT not set");
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = wl->chip.op_upload_fw(wl);
+ if (ret < 0)
+ goto out;
+
+ /* 10.5 start firmware */
+ ret = wl12xx_boot_run_firmware(wl);
+ if (ret < 0)
+ goto out;
+
+ /* Get and save the firmware version */
+ wl12xx_acx_fw_version(wl, wl->chip.fw_ver, sizeof(wl->chip.fw_ver));
+
+out:
+ return ret;
+}
+
+static int wl1251_mem_cfg(struct wl12xx *wl)
+{
+ struct wl1251_acx_config_memory mem_conf;
+ int ret, i;
+
+ wl12xx_debug(DEBUG_ACX, "wl1251 mem cfg");
+
+ /* memory config */
+ mem_conf.mem_config.num_stations = cpu_to_le16(DEFAULT_NUM_STATIONS);
+ mem_conf.mem_config.rx_mem_block_num = 35;
+ mem_conf.mem_config.tx_min_mem_block_num = 64;
+ mem_conf.mem_config.num_tx_queues = MAX_TX_QUEUES;
+ mem_conf.mem_config.host_if_options = HOSTIF_PKT_RING;
+ mem_conf.mem_config.num_ssid_profiles = 1;
+ mem_conf.mem_config.debug_buffer_size =
+ cpu_to_le16(TRACE_BUFFER_MAX_SIZE);
+
+ /* RX queue config */
+ mem_conf.rx_queue_config.dma_address = 0;
+ mem_conf.rx_queue_config.num_descs = ACX_RX_DESC_DEF;
+ mem_conf.rx_queue_config.priority = DEFAULT_RXQ_PRIORITY;
+ mem_conf.rx_queue_config.type = DEFAULT_RXQ_TYPE;
+
+ /* TX queue config */
+ for (i = 0; i < MAX_TX_QUEUES; i++) {
+ mem_conf.tx_queue_config[i].num_descs = ACX_TX_DESC_DEF;
+ mem_conf.tx_queue_config[i].attributes = i;
+ }
+
+ mem_conf.header.id = ACX_MEM_CFG;
+ mem_conf.header.len = sizeof(struct wl1251_acx_config_memory) -
+ sizeof(struct acx_header);
+ mem_conf.header.len -=
+ (MAX_TX_QUEUE_CONFIGS - mem_conf.mem_config.num_tx_queues) *
+ sizeof(struct wl1251_acx_tx_queue_config);
+
+ ret = wl12xx_cmd_configure(wl, &mem_conf,
+ sizeof(struct wl1251_acx_config_memory));
+ if (ret < 0)
+ wl12xx_warning("wl1251 mem config failed: %d", ret);
+
+ return ret;
+}
+
+static int wl1251_hw_init_mem_config(struct wl12xx *wl)
+{
+ int ret;
+
+ ret = wl1251_mem_cfg(wl);
+ if (ret < 0)
+ return ret;
+
+ wl->target_mem_map = kzalloc(sizeof(struct wl1251_acx_mem_map),
+ GFP_KERNEL);
+ if (!wl->target_mem_map) {
+ wl12xx_error("couldn't allocate target memory map");
+ return -ENOMEM;
+ }
+
+ /* we now ask for the firmware built memory map */
+ ret = wl12xx_acx_mem_map(wl, wl->target_mem_map,
+ sizeof(struct wl1251_acx_mem_map));
+ if (ret < 0) {
+ wl12xx_error("couldn't retrieve firmware memory map");
+ kfree(wl->target_mem_map);
+ wl->target_mem_map = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+static void wl1251_set_ecpu_ctrl(struct wl12xx *wl, u32 flag)
+{
+ u32 cpu_ctrl;
+
+ /* 10.5.0 run the firmware (I) */
+ cpu_ctrl = wl12xx_reg_read32(wl, ACX_REG_ECPU_CONTROL);
+
+ /* 10.5.1 run the firmware (II) */
+ cpu_ctrl &= ~flag;
+ wl12xx_reg_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl);
+}
+
+static void wl1251_target_enable_interrupts(struct wl12xx *wl)
+{
+ /* Enable target's interrupts */
+ wl->intr_mask = WL1251_ACX_INTR_RX0_DATA |
+ WL1251_ACX_INTR_RX1_DATA |
+ WL1251_ACX_INTR_TX_RESULT |
+ WL1251_ACX_INTR_EVENT_A |
+ WL1251_ACX_INTR_EVENT_B |
+ WL1251_ACX_INTR_INIT_COMPLETE;
+ wl12xx_boot_target_enable_interrupts(wl);
+}
+
+static void wl1251_irq_work(struct work_struct *work)
+{
+ u32 intr;
+ struct wl12xx *wl =
+ container_of(work, struct wl12xx, irq_work);
+
+ mutex_lock(&wl->mutex);
+
+ wl12xx_debug(DEBUG_IRQ, "IRQ work");
+
+ if (wl->state == WL12XX_STATE_OFF)
+ goto out;
+
+ wl12xx_ps_elp_wakeup(wl);
+
+ wl12xx_reg_write32(wl, ACX_REG_INTERRUPT_MASK, WL1251_ACX_INTR_ALL);
+
+ intr = wl12xx_reg_read32(wl, ACX_REG_INTERRUPT_CLEAR);
+ wl12xx_debug(DEBUG_IRQ, "intr: 0x%x", intr);
+
+ if (wl->data_path) {
+ wl12xx_spi_mem_read(wl, wl->data_path->rx_control_addr,
+ &wl->rx_counter, sizeof(u32));
+
+ /* We handle a frmware bug here */
+ switch ((wl->rx_counter - wl->rx_handled) & 0xf) {
+ case 0:
+ wl12xx_debug(DEBUG_IRQ, "RX: FW and host in sync");
+ intr &= ~WL1251_ACX_INTR_RX0_DATA;
+ intr &= ~WL1251_ACX_INTR_RX1_DATA;
+ break;
+ case 1:
+ wl12xx_debug(DEBUG_IRQ, "RX: FW +1");
+ intr |= WL1251_ACX_INTR_RX0_DATA;
+ intr &= ~WL1251_ACX_INTR_RX1_DATA;
+ break;
+ case 2:
+ wl12xx_debug(DEBUG_IRQ, "RX: FW +2");
+ intr |= WL1251_ACX_INTR_RX0_DATA;
+ intr |= WL1251_ACX_INTR_RX1_DATA;
+ break;
+ default:
+ wl12xx_warning("RX: FW and host out of sync: %d",
+ wl->rx_counter - wl->rx_handled);
+ break;
+ }
+
+ wl->rx_handled = wl->rx_counter;
+
+
+ wl12xx_debug(DEBUG_IRQ, "RX counter: %d", wl->rx_counter);
+ }
+
+ intr &= wl->intr_mask;
+
+ if (intr == 0) {
+ wl12xx_debug(DEBUG_IRQ, "INTR is 0");
+ wl12xx_reg_write32(wl, ACX_REG_INTERRUPT_MASK,
+ ~(wl->intr_mask));
+
+ goto out_sleep;
+ }
+
+ if (intr & WL1251_ACX_INTR_RX0_DATA) {
+ wl12xx_debug(DEBUG_IRQ, "WL1251_ACX_INTR_RX0_DATA");
+ wl12xx_rx(wl);
+ }
+
+ if (intr & WL1251_ACX_INTR_RX1_DATA) {
+ wl12xx_debug(DEBUG_IRQ, "WL1251_ACX_INTR_RX1_DATA");
+ wl12xx_rx(wl);
+ }
+
+ if (intr & WL1251_ACX_INTR_TX_RESULT) {
+ wl12xx_debug(DEBUG_IRQ, "WL1251_ACX_INTR_TX_RESULT");
+ wl12xx_tx_complete(wl);
+ }
+
+ if (intr & (WL1251_ACX_INTR_EVENT_A | WL1251_ACX_INTR_EVENT_B)) {
+ wl12xx_debug(DEBUG_IRQ, "WL1251_ACX_INTR_EVENT (0x%x)", intr);
+ if (intr & WL1251_ACX_INTR_EVENT_A)
+ wl12xx_event_handle(wl, 0);
+ else
+ wl12xx_event_handle(wl, 1);
+ }
+
+ if (intr & WL1251_ACX_INTR_INIT_COMPLETE)
+ wl12xx_debug(DEBUG_IRQ, "WL1251_ACX_INTR_INIT_COMPLETE");
+
+ wl12xx_reg_write32(wl, ACX_REG_INTERRUPT_MASK, ~(wl->intr_mask));
+
+out_sleep:
+ wl12xx_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
+}
+
+static int wl1251_hw_init_txq_fill(u8 qid,
+ struct acx_tx_queue_qos_config *config,
+ u32 num_blocks)
+{
+ config->qid = qid;
+
+ switch (qid) {
+ case QOS_AC_BE:
+ config->high_threshold =
+ (QOS_TX_HIGH_BE_DEF * num_blocks) / 100;
+ config->low_threshold =
+ (QOS_TX_LOW_BE_DEF * num_blocks) / 100;
+ break;
+ case QOS_AC_BK:
+ config->high_threshold =
+ (QOS_TX_HIGH_BK_DEF * num_blocks) / 100;
+ config->low_threshold =
+ (QOS_TX_LOW_BK_DEF * num_blocks) / 100;
+ break;
+ case QOS_AC_VI:
+ config->high_threshold =
+ (QOS_TX_HIGH_VI_DEF * num_blocks) / 100;
+ config->low_threshold =
+ (QOS_TX_LOW_VI_DEF * num_blocks) / 100;
+ break;
+ case QOS_AC_VO:
+ config->high_threshold =
+ (QOS_TX_HIGH_VO_DEF * num_blocks) / 100;
+ config->low_threshold =
+ (QOS_TX_LOW_VO_DEF * num_blocks) / 100;
+ break;
+ default:
+ wl12xx_error("Invalid TX queue id: %d", qid);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int wl1251_hw_init_tx_queue_config(struct wl12xx *wl)
+{
+ struct acx_tx_queue_qos_config config;
+ struct wl1251_acx_mem_map *wl_mem_map = wl->target_mem_map;
+ int ret, i;
+
+ wl12xx_debug(DEBUG_ACX, "acx tx queue config");
+
+ config.header.id = ACX_TX_QUEUE_CFG;
+ config.header.len = sizeof(struct acx_tx_queue_qos_config) -
+ sizeof(struct acx_header);
+
+ for (i = 0; i < MAX_NUM_OF_AC; i++) {
+ ret = wl1251_hw_init_txq_fill(i, &config,
+ wl_mem_map->num_tx_mem_blocks);
+ if (ret < 0)
+ return ret;
+
+ ret = wl12xx_cmd_configure(wl, &config, sizeof(config));
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int wl1251_hw_init_data_path_config(struct wl12xx *wl)
+{
+ int ret;
+
+ /* asking for the data path parameters */
+ wl->data_path = kzalloc(sizeof(struct acx_data_path_params_resp),
+ GFP_KERNEL);
+ if (!wl->data_path) {
+ wl12xx_error("Couldnt allocate data path parameters");
+ return -ENOMEM;
+ }
+
+ ret = wl12xx_acx_data_path_params(wl, wl->data_path);
+ if (ret < 0) {
+ kfree(wl->data_path);
+ wl->data_path = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int wl1251_hw_init(struct wl12xx *wl)
+{
+ struct wl1251_acx_mem_map *wl_mem_map;
+ int ret;
+
+ ret = wl12xx_hw_init_hwenc_config(wl);
+ if (ret < 0)
+ return ret;
+
+ /* Template settings */
+ ret = wl12xx_hw_init_templates_config(wl);
+ if (ret < 0)
+ return ret;
+
+ /* Default memory configuration */
+ ret = wl1251_hw_init_mem_config(wl);
+ if (ret < 0)
+ return ret;
+
+ /* Default data path configuration */
+ ret = wl1251_hw_init_data_path_config(wl);
+ if (ret < 0)
+ goto out_free_memmap;
+
+ /* RX config */
+ ret = wl12xx_hw_init_rx_config(wl,
+ RX_CFG_PROMISCUOUS | RX_CFG_TSF,
+ RX_FILTER_OPTION_DEF);
+ /* RX_CONFIG_OPTION_ANY_DST_ANY_BSS,
+ RX_FILTER_OPTION_FILTER_ALL); */
+ if (ret < 0)
+ goto out_free_data_path;
+
+ /* TX queues config */
+ ret = wl1251_hw_init_tx_queue_config(wl);
+ if (ret < 0)
+ goto out_free_data_path;
+
+ /* PHY layer config */
+ ret = wl12xx_hw_init_phy_config(wl);
+ if (ret < 0)
+ goto out_free_data_path;
+
+ /* Beacon filtering */
+ ret = wl12xx_hw_init_beacon_filter(wl);
+ if (ret < 0)
+ goto out_free_data_path;
+
+ /* Bluetooth WLAN coexistence */
+ ret = wl12xx_hw_init_pta(wl);
+ if (ret < 0)
+ goto out_free_data_path;
+
+ /* Energy detection */
+ ret = wl12xx_hw_init_energy_detection(wl);
+ if (ret < 0)
+ goto out_free_data_path;
+
+ /* Beacons and boradcast settings */
+ ret = wl12xx_hw_init_beacon_broadcast(wl);
+ if (ret < 0)
+ goto out_free_data_path;
+
+ /* Enable data path */
+ ret = wl12xx_cmd_data_path(wl, wl->channel, 1);
+ if (ret < 0)
+ goto out_free_data_path;
+
+ /* Default power state */
+ ret = wl12xx_hw_init_power_auth(wl);
+ if (ret < 0)
+ goto out_free_data_path;
+
+ wl_mem_map = wl->target_mem_map;
+ wl12xx_info("%d tx blocks at 0x%x, %d rx blocks at 0x%x",
+ wl_mem_map->num_tx_mem_blocks,
+ wl->data_path->tx_control_addr,
+ wl_mem_map->num_rx_mem_blocks,
+ wl->data_path->rx_control_addr);
+
+ return 0;
+
+ out_free_data_path:
+ kfree(wl->data_path);
+
+ out_free_memmap:
+ kfree(wl->target_mem_map);
+
+ return ret;
+}
+
+static int wl1251_plt_init(struct wl12xx *wl)
+{
+ int ret;
+
+ ret = wl1251_hw_init_mem_config(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl12xx_cmd_data_path(wl, wl->channel, 1);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+void wl1251_setup(struct wl12xx *wl)
+{
+ /* FIXME: Is it better to use strncpy here or is this ok? */
+ wl->chip.fw_filename = WL1251_FW_NAME;
+ wl->chip.nvs_filename = WL1251_NVS_NAME;
+
+ /* Now we know what chip we're using, so adjust the power on sleep
+ * time accordingly */
+ wl->chip.power_on_sleep = WL1251_POWER_ON_SLEEP;
+
+ wl->chip.intr_cmd_complete = WL1251_ACX_INTR_CMD_COMPLETE;
+ wl->chip.intr_init_complete = WL1251_ACX_INTR_INIT_COMPLETE;
+
+ wl->chip.op_upload_nvs = wl1251_upload_nvs;
+ wl->chip.op_upload_fw = wl1251_upload_firmware;
+ wl->chip.op_boot = wl1251_boot;
+ wl->chip.op_set_ecpu_ctrl = wl1251_set_ecpu_ctrl;
+ wl->chip.op_target_enable_interrupts = wl1251_target_enable_interrupts;
+ wl->chip.op_hw_init = wl1251_hw_init;
+ wl->chip.op_plt_init = wl1251_plt_init;
+
+ wl->chip.p_table = wl1251_part_table;
+ wl->chip.acx_reg_table = wl1251_acx_reg_table;
+
+ INIT_WORK(&wl->irq_work, wl1251_irq_work);
+}
diff --git a/drivers/net/wireless/wl12xx/wl1251.h b/drivers/net/wireless/wl12xx/wl1251.h
new file mode 100644
index 00000000000..1f4a4433039
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/wl1251.h
@@ -0,0 +1,165 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * Contact: Kalle Valo <kalle.valo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL1251_H__
+#define __WL1251_H__
+
+#include <linux/bitops.h>
+
+#include "wl12xx.h"
+#include "acx.h"
+
+#define WL1251_FW_NAME "wl1251-fw.bin"
+#define WL1251_NVS_NAME "wl1251-nvs.bin"
+
+#define WL1251_POWER_ON_SLEEP 10 /* in miliseconds */
+
+void wl1251_setup(struct wl12xx *wl);
+
+
+struct wl1251_acx_memory {
+ __le16 num_stations; /* number of STAs to be supported. */
+ u16 reserved_1;
+
+ /*
+ * Nmber of memory buffers for the RX mem pool.
+ * The actual number may be less if there are
+ * not enough blocks left for the minimum num
+ * of TX ones.
+ */
+ u8 rx_mem_block_num;
+ u8 reserved_2;
+ u8 num_tx_queues; /* From 1 to 16 */
+ u8 host_if_options; /* HOST_IF* */
+ u8 tx_min_mem_block_num;
+ u8 num_ssid_profiles;
+ __le16 debug_buffer_size;
+} __attribute__ ((packed));
+
+
+#define ACX_RX_DESC_MIN 1
+#define ACX_RX_DESC_MAX 127
+#define ACX_RX_DESC_DEF 32
+struct wl1251_acx_rx_queue_config {
+ u8 num_descs;
+ u8 pad;
+ u8 type;
+ u8 priority;
+ __le32 dma_address;
+} __attribute__ ((packed));
+
+#define ACX_TX_DESC_MIN 1
+#define ACX_TX_DESC_MAX 127
+#define ACX_TX_DESC_DEF 16
+struct wl1251_acx_tx_queue_config {
+ u8 num_descs;
+ u8 pad[2];
+ u8 attributes;
+} __attribute__ ((packed));
+
+#define MAX_TX_QUEUE_CONFIGS 5
+#define MAX_TX_QUEUES 4
+struct wl1251_acx_config_memory {
+ struct acx_header header;
+
+ struct wl1251_acx_memory mem_config;
+ struct wl1251_acx_rx_queue_config rx_queue_config;
+ struct wl1251_acx_tx_queue_config tx_queue_config[MAX_TX_QUEUE_CONFIGS];
+} __attribute__ ((packed));
+
+struct wl1251_acx_mem_map {
+ struct acx_header header;
+
+ void *code_start;
+ void *code_end;
+
+ void *wep_defkey_start;
+ void *wep_defkey_end;
+
+ void *sta_table_start;
+ void *sta_table_end;
+
+ void *packet_template_start;
+ void *packet_template_end;
+
+ void *queue_memory_start;
+ void *queue_memory_end;
+
+ void *packet_memory_pool_start;
+ void *packet_memory_pool_end;
+
+ void *debug_buffer1_start;
+ void *debug_buffer1_end;
+
+ void *debug_buffer2_start;
+ void *debug_buffer2_end;
+
+ /* Number of blocks FW allocated for TX packets */
+ u32 num_tx_mem_blocks;
+
+ /* Number of blocks FW allocated for RX packets */
+ u32 num_rx_mem_blocks;
+} __attribute__ ((packed));
+
+/*************************************************************************
+
+ Host Interrupt Register (WiLink -> Host)
+
+**************************************************************************/
+
+/* RX packet is ready in Xfer buffer #0 */
+#define WL1251_ACX_INTR_RX0_DATA BIT(0)
+
+/* TX result(s) are in the TX complete buffer */
+#define WL1251_ACX_INTR_TX_RESULT BIT(1)
+
+/* OBSOLETE */
+#define WL1251_ACX_INTR_TX_XFR BIT(2)
+
+/* RX packet is ready in Xfer buffer #1 */
+#define WL1251_ACX_INTR_RX1_DATA BIT(3)
+
+/* Event was entered to Event MBOX #A */
+#define WL1251_ACX_INTR_EVENT_A BIT(4)
+
+/* Event was entered to Event MBOX #B */
+#define WL1251_ACX_INTR_EVENT_B BIT(5)
+
+/* OBSOLETE */
+#define WL1251_ACX_INTR_WAKE_ON_HOST BIT(6)
+
+/* Trace meassge on MBOX #A */
+#define WL1251_ACX_INTR_TRACE_A BIT(7)
+
+/* Trace meassge on MBOX #B */
+#define WL1251_ACX_INTR_TRACE_B BIT(8)
+
+/* Command processing completion */
+#define WL1251_ACX_INTR_CMD_COMPLETE BIT(9)
+
+/* Init sequence is done */
+#define WL1251_ACX_INTR_INIT_COMPLETE BIT(14)
+
+#define WL1251_ACX_INTR_ALL 0xFFFFFFFF
+
+#endif
diff --git a/drivers/net/wireless/wl12xx/wl12xx.h b/drivers/net/wireless/wl12xx/wl12xx.h
new file mode 100644
index 00000000000..48641437414
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/wl12xx.h
@@ -0,0 +1,409 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (c) 1998-2007 Texas Instruments Incorporated
+ * Copyright (C) 2008-2009 Nokia Corporation
+ *
+ * Contact: Kalle Valo <kalle.valo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL12XX_H__
+#define __WL12XX_H__
+
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <net/mac80211.h>
+
+#define DRIVER_NAME "wl12xx"
+#define DRIVER_PREFIX DRIVER_NAME ": "
+
+enum {
+ DEBUG_NONE = 0,
+ DEBUG_IRQ = BIT(0),
+ DEBUG_SPI = BIT(1),
+ DEBUG_BOOT = BIT(2),
+ DEBUG_MAILBOX = BIT(3),
+ DEBUG_NETLINK = BIT(4),
+ DEBUG_EVENT = BIT(5),
+ DEBUG_TX = BIT(6),
+ DEBUG_RX = BIT(7),
+ DEBUG_SCAN = BIT(8),
+ DEBUG_CRYPT = BIT(9),
+ DEBUG_PSM = BIT(10),
+ DEBUG_MAC80211 = BIT(11),
+ DEBUG_CMD = BIT(12),
+ DEBUG_ACX = BIT(13),
+ DEBUG_ALL = ~0,
+};
+
+#define DEBUG_LEVEL (DEBUG_NONE)
+
+#define DEBUG_DUMP_LIMIT 1024
+
+#define wl12xx_error(fmt, arg...) \
+ printk(KERN_ERR DRIVER_PREFIX "ERROR " fmt "\n", ##arg)
+
+#define wl12xx_warning(fmt, arg...) \
+ printk(KERN_WARNING DRIVER_PREFIX "WARNING " fmt "\n", ##arg)
+
+#define wl12xx_notice(fmt, arg...) \
+ printk(KERN_INFO DRIVER_PREFIX fmt "\n", ##arg)
+
+#define wl12xx_info(fmt, arg...) \
+ printk(KERN_DEBUG DRIVER_PREFIX fmt "\n", ##arg)
+
+#define wl12xx_debug(level, fmt, arg...) \
+ do { \
+ if (level & DEBUG_LEVEL) \
+ printk(KERN_DEBUG DRIVER_PREFIX fmt "\n", ##arg); \
+ } while (0)
+
+#define wl12xx_dump(level, prefix, buf, len) \
+ do { \
+ if (level & DEBUG_LEVEL) \
+ print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
+ DUMP_PREFIX_OFFSET, 16, 1, \
+ buf, \
+ min_t(size_t, len, DEBUG_DUMP_LIMIT), \
+ 0); \
+ } while (0)
+
+#define wl12xx_dump_ascii(level, prefix, buf, len) \
+ do { \
+ if (level & DEBUG_LEVEL) \
+ print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
+ DUMP_PREFIX_OFFSET, 16, 1, \
+ buf, \
+ min_t(size_t, len, DEBUG_DUMP_LIMIT), \
+ true); \
+ } while (0)
+
+#define WL12XX_DEFAULT_RX_CONFIG (CFG_UNI_FILTER_EN | \
+ CFG_BSSID_FILTER_EN)
+
+#define WL12XX_DEFAULT_RX_FILTER (CFG_RX_PRSP_EN | \
+ CFG_RX_MGMT_EN | \
+ CFG_RX_DATA_EN | \
+ CFG_RX_CTL_EN | \
+ CFG_RX_BCN_EN | \
+ CFG_RX_AUTH_EN | \
+ CFG_RX_ASSOC_EN)
+
+
+struct boot_attr {
+ u32 radio_type;
+ u8 mac_clock;
+ u8 arm_clock;
+ int firmware_debug;
+ u32 minor;
+ u32 major;
+ u32 bugfix;
+};
+
+enum wl12xx_state {
+ WL12XX_STATE_OFF,
+ WL12XX_STATE_ON,
+ WL12XX_STATE_PLT,
+};
+
+enum wl12xx_partition_type {
+ PART_DOWN,
+ PART_WORK,
+ PART_DRPW,
+
+ PART_TABLE_LEN
+};
+
+struct wl12xx_partition {
+ u32 size;
+ u32 start;
+};
+
+struct wl12xx_partition_set {
+ struct wl12xx_partition mem;
+ struct wl12xx_partition reg;
+};
+
+struct wl12xx;
+
+/* FIXME: I'm not sure about this structure name */
+struct wl12xx_chip {
+ u32 id;
+
+ const char *fw_filename;
+ const char *nvs_filename;
+
+ char fw_ver[21];
+
+ unsigned int power_on_sleep;
+ int intr_cmd_complete;
+ int intr_init_complete;
+
+ int (*op_upload_fw)(struct wl12xx *wl);
+ int (*op_upload_nvs)(struct wl12xx *wl);
+ int (*op_boot)(struct wl12xx *wl);
+ void (*op_set_ecpu_ctrl)(struct wl12xx *wl, u32 flag);
+ void (*op_target_enable_interrupts)(struct wl12xx *wl);
+ int (*op_hw_init)(struct wl12xx *wl);
+ int (*op_plt_init)(struct wl12xx *wl);
+
+ struct wl12xx_partition_set *p_table;
+ enum wl12xx_acx_int_reg *acx_reg_table;
+};
+
+struct wl12xx_stats {
+ struct acx_statistics *fw_stats;
+ unsigned long fw_stats_update;
+
+ unsigned int retry_count;
+ unsigned int excessive_retries;
+};
+
+struct wl12xx_debugfs {
+ struct dentry *rootdir;
+ struct dentry *fw_statistics;
+
+ struct dentry *tx_internal_desc_overflow;
+
+ struct dentry *rx_out_of_mem;
+ struct dentry *rx_hdr_overflow;
+ struct dentry *rx_hw_stuck;
+ struct dentry *rx_dropped;
+ struct dentry *rx_fcs_err;
+ struct dentry *rx_xfr_hint_trig;
+ struct dentry *rx_path_reset;
+ struct dentry *rx_reset_counter;
+
+ struct dentry *dma_rx_requested;
+ struct dentry *dma_rx_errors;
+ struct dentry *dma_tx_requested;
+ struct dentry *dma_tx_errors;
+
+ struct dentry *isr_cmd_cmplt;
+ struct dentry *isr_fiqs;
+ struct dentry *isr_rx_headers;
+ struct dentry *isr_rx_mem_overflow;
+ struct dentry *isr_rx_rdys;
+ struct dentry *isr_irqs;
+ struct dentry *isr_tx_procs;
+ struct dentry *isr_decrypt_done;
+ struct dentry *isr_dma0_done;
+ struct dentry *isr_dma1_done;
+ struct dentry *isr_tx_exch_complete;
+ struct dentry *isr_commands;
+ struct dentry *isr_rx_procs;
+ struct dentry *isr_hw_pm_mode_changes;
+ struct dentry *isr_host_acknowledges;
+ struct dentry *isr_pci_pm;
+ struct dentry *isr_wakeups;
+ struct dentry *isr_low_rssi;
+
+ struct dentry *wep_addr_key_count;
+ struct dentry *wep_default_key_count;
+ /* skipping wep.reserved */
+ struct dentry *wep_key_not_found;
+ struct dentry *wep_decrypt_fail;
+ struct dentry *wep_packets;
+ struct dentry *wep_interrupt;
+
+ struct dentry *pwr_ps_enter;
+ struct dentry *pwr_elp_enter;
+ struct dentry *pwr_missing_bcns;
+ struct dentry *pwr_wake_on_host;
+ struct dentry *pwr_wake_on_timer_exp;
+ struct dentry *pwr_tx_with_ps;
+ struct dentry *pwr_tx_without_ps;
+ struct dentry *pwr_rcvd_beacons;
+ struct dentry *pwr_power_save_off;
+ struct dentry *pwr_enable_ps;
+ struct dentry *pwr_disable_ps;
+ struct dentry *pwr_fix_tsf_ps;
+ /* skipping cont_miss_bcns_spread for now */
+ struct dentry *pwr_rcvd_awake_beacons;
+
+ struct dentry *mic_rx_pkts;
+ struct dentry *mic_calc_failure;
+
+ struct dentry *aes_encrypt_fail;
+ struct dentry *aes_decrypt_fail;
+ struct dentry *aes_encrypt_packets;
+ struct dentry *aes_decrypt_packets;
+ struct dentry *aes_encrypt_interrupt;
+ struct dentry *aes_decrypt_interrupt;
+
+ struct dentry *event_heart_beat;
+ struct dentry *event_calibration;
+ struct dentry *event_rx_mismatch;
+ struct dentry *event_rx_mem_empty;
+ struct dentry *event_rx_pool;
+ struct dentry *event_oom_late;
+ struct dentry *event_phy_transmit_error;
+ struct dentry *event_tx_stuck;
+
+ struct dentry *ps_pspoll_timeouts;
+ struct dentry *ps_upsd_timeouts;
+ struct dentry *ps_upsd_max_sptime;
+ struct dentry *ps_upsd_max_apturn;
+ struct dentry *ps_pspoll_max_apturn;
+ struct dentry *ps_pspoll_utilization;
+ struct dentry *ps_upsd_utilization;
+
+ struct dentry *rxpipe_rx_prep_beacon_drop;
+ struct dentry *rxpipe_descr_host_int_trig_rx_data;
+ struct dentry *rxpipe_beacon_buffer_thres_host_int_trig_rx_data;
+ struct dentry *rxpipe_missed_beacon_host_int_trig_rx_data;
+ struct dentry *rxpipe_tx_xfr_host_int_trig_rx_data;
+
+ struct dentry *tx_queue_len;
+
+ struct dentry *retry_count;
+ struct dentry *excessive_retries;
+};
+
+struct wl12xx {
+ struct ieee80211_hw *hw;
+ bool mac80211_registered;
+
+ struct spi_device *spi;
+
+ void (*set_power)(bool enable);
+ int irq;
+
+ enum wl12xx_state state;
+ struct mutex mutex;
+
+ int physical_mem_addr;
+ int physical_reg_addr;
+ int virtual_mem_addr;
+ int virtual_reg_addr;
+
+ struct wl12xx_chip chip;
+
+ int cmd_box_addr;
+ int event_box_addr;
+ struct boot_attr boot_attr;
+
+ u8 *fw;
+ size_t fw_len;
+ u8 *nvs;
+ size_t nvs_len;
+
+ u8 bssid[ETH_ALEN];
+ u8 mac_addr[ETH_ALEN];
+ u8 bss_type;
+ u8 listen_int;
+ int channel;
+
+ void *target_mem_map;
+ struct acx_data_path_params_resp *data_path;
+
+ /* Number of TX packets transferred to the FW, modulo 16 */
+ u32 data_in_count;
+
+ /* Frames scheduled for transmission, not handled yet */
+ struct sk_buff_head tx_queue;
+ bool tx_queue_stopped;
+
+ struct work_struct tx_work;
+ struct work_struct filter_work;
+
+ /* Pending TX frames */
+ struct sk_buff *tx_frames[16];
+
+ /*
+ * Index pointing to the next TX complete entry
+ * in the cyclic XT complete array we get from
+ * the FW.
+ */
+ u32 next_tx_complete;
+
+ /* FW Rx counter */
+ u32 rx_counter;
+
+ /* Rx frames handled */
+ u32 rx_handled;
+
+ /* Current double buffer */
+ u32 rx_current_buffer;
+ u32 rx_last_id;
+
+ /* The target interrupt mask */
+ u32 intr_mask;
+ struct work_struct irq_work;
+
+ /* The mbox event mask */
+ u32 event_mask;
+
+ /* Mailbox pointers */
+ u32 mbox_ptr[2];
+
+ /* Are we currently scanning */
+ bool scanning;
+
+ /* Our association ID */
+ u16 aid;
+
+ /* Default key (for WEP) */
+ u32 default_key;
+
+ unsigned int tx_mgmt_frm_rate;
+ unsigned int tx_mgmt_frm_mod;
+
+ unsigned int rx_config;
+ unsigned int rx_filter;
+
+ /* is firmware in elp mode */
+ bool elp;
+
+ /* we can be in psm, but not in elp, we have to differentiate */
+ bool psm;
+
+ /* PSM mode requested */
+ bool psm_requested;
+
+ /* in dBm */
+ int power_level;
+
+ struct wl12xx_stats stats;
+ struct wl12xx_debugfs debugfs;
+};
+
+int wl12xx_plt_start(struct wl12xx *wl);
+int wl12xx_plt_stop(struct wl12xx *wl);
+
+#define DEFAULT_HW_GEN_MODULATION_TYPE CCK_LONG /* Long Preamble */
+#define DEFAULT_HW_GEN_TX_RATE RATE_2MBPS
+#define JOIN_TIMEOUT 5000 /* 5000 milliseconds to join */
+
+#define WL12XX_DEFAULT_POWER_LEVEL 20
+
+#define WL12XX_TX_QUEUE_MAX_LENGTH 20
+
+/* Different chips need different sleep times after power on. WL1271 needs
+ * 200ms, WL1251 needs only 10ms. By default we use 200ms, but as soon as we
+ * know the chip ID, we change the sleep value in the wl12xx chip structure,
+ * so in subsequent power ons, we don't waste more time then needed. */
+#define WL12XX_DEFAULT_POWER_ON_SLEEP 200
+
+#define CHIP_ID_1251_PG10 (0x7010101)
+#define CHIP_ID_1251_PG11 (0x7020101)
+#define CHIP_ID_1251_PG12 (0x7030101)
+#define CHIP_ID_1271_PG10 (0x4030101)
+
+#endif
diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/wl12xx/wl12xx_80211.h
new file mode 100644
index 00000000000..657c2dbcb7d
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/wl12xx_80211.h
@@ -0,0 +1,156 @@
+#ifndef __WL12XX_80211_H__
+#define __WL12XX_80211_H__
+
+#include <linux/if_ether.h> /* ETH_ALEN */
+
+/* RATES */
+#define IEEE80211_CCK_RATE_1MB 0x02
+#define IEEE80211_CCK_RATE_2MB 0x04
+#define IEEE80211_CCK_RATE_5MB 0x0B
+#define IEEE80211_CCK_RATE_11MB 0x16
+#define IEEE80211_OFDM_RATE_6MB 0x0C
+#define IEEE80211_OFDM_RATE_9MB 0x12
+#define IEEE80211_OFDM_RATE_12MB 0x18
+#define IEEE80211_OFDM_RATE_18MB 0x24
+#define IEEE80211_OFDM_RATE_24MB 0x30
+#define IEEE80211_OFDM_RATE_36MB 0x48
+#define IEEE80211_OFDM_RATE_48MB 0x60
+#define IEEE80211_OFDM_RATE_54MB 0x6C
+#define IEEE80211_BASIC_RATE_MASK 0x80
+
+#define IEEE80211_CCK_RATE_1MB_MASK (1<<0)
+#define IEEE80211_CCK_RATE_2MB_MASK (1<<1)
+#define IEEE80211_CCK_RATE_5MB_MASK (1<<2)
+#define IEEE80211_CCK_RATE_11MB_MASK (1<<3)
+#define IEEE80211_OFDM_RATE_6MB_MASK (1<<4)
+#define IEEE80211_OFDM_RATE_9MB_MASK (1<<5)
+#define IEEE80211_OFDM_RATE_12MB_MASK (1<<6)
+#define IEEE80211_OFDM_RATE_18MB_MASK (1<<7)
+#define IEEE80211_OFDM_RATE_24MB_MASK (1<<8)
+#define IEEE80211_OFDM_RATE_36MB_MASK (1<<9)
+#define IEEE80211_OFDM_RATE_48MB_MASK (1<<10)
+#define IEEE80211_OFDM_RATE_54MB_MASK (1<<11)
+
+#define IEEE80211_CCK_RATES_MASK 0x0000000F
+#define IEEE80211_CCK_BASIC_RATES_MASK (IEEE80211_CCK_RATE_1MB_MASK | \
+ IEEE80211_CCK_RATE_2MB_MASK)
+#define IEEE80211_CCK_DEFAULT_RATES_MASK (IEEE80211_CCK_BASIC_RATES_MASK | \
+ IEEE80211_CCK_RATE_5MB_MASK | \
+ IEEE80211_CCK_RATE_11MB_MASK)
+
+#define IEEE80211_OFDM_RATES_MASK 0x00000FF0
+#define IEEE80211_OFDM_BASIC_RATES_MASK (IEEE80211_OFDM_RATE_6MB_MASK | \
+ IEEE80211_OFDM_RATE_12MB_MASK | \
+ IEEE80211_OFDM_RATE_24MB_MASK)
+#define IEEE80211_OFDM_DEFAULT_RATES_MASK (IEEE80211_OFDM_BASIC_RATES_MASK | \
+ IEEE80211_OFDM_RATE_9MB_MASK | \
+ IEEE80211_OFDM_RATE_18MB_MASK | \
+ IEEE80211_OFDM_RATE_36MB_MASK | \
+ IEEE80211_OFDM_RATE_48MB_MASK | \
+ IEEE80211_OFDM_RATE_54MB_MASK)
+#define IEEE80211_DEFAULT_RATES_MASK (IEEE80211_OFDM_DEFAULT_RATES_MASK | \
+ IEEE80211_CCK_DEFAULT_RATES_MASK)
+
+
+/* This really should be 8, but not for our firmware */
+#define MAX_SUPPORTED_RATES 32
+#define COUNTRY_STRING_LEN 3
+#define MAX_COUNTRY_TRIPLETS 32
+
+/* Headers */
+struct ieee80211_header {
+ __le16 frame_ctl;
+ __le16 duration_id;
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
+ __le16 seq_ctl;
+ u8 payload[0];
+} __attribute__ ((packed));
+
+struct wl12xx_ie_header {
+ u8 id;
+ u8 len;
+} __attribute__ ((packed));
+
+/* IEs */
+
+struct wl12xx_ie_ssid {
+ struct wl12xx_ie_header header;
+ char ssid[IW_ESSID_MAX_SIZE];
+} __attribute__ ((packed));
+
+struct wl12xx_ie_rates {
+ struct wl12xx_ie_header header;
+ u8 rates[MAX_SUPPORTED_RATES];
+} __attribute__ ((packed));
+
+struct wl12xx_ie_ds_params {
+ struct wl12xx_ie_header header;
+ u8 channel;
+} __attribute__ ((packed));
+
+struct country_triplet {
+ u8 channel;
+ u8 num_channels;
+ u8 max_tx_power;
+} __attribute__ ((packed));
+
+struct wl12xx_ie_country {
+ struct wl12xx_ie_header header;
+ u8 country_string[COUNTRY_STRING_LEN];
+ struct country_triplet triplets[MAX_COUNTRY_TRIPLETS];
+} __attribute__ ((packed));
+
+
+/* Templates */
+
+struct wl12xx_beacon_template {
+ struct ieee80211_header header;
+ __le32 time_stamp[2];
+ __le16 beacon_interval;
+ __le16 capability;
+ struct wl12xx_ie_ssid ssid;
+ struct wl12xx_ie_rates rates;
+ struct wl12xx_ie_rates ext_rates;
+ struct wl12xx_ie_ds_params ds_params;
+ struct wl12xx_ie_country country;
+} __attribute__ ((packed));
+
+struct wl12xx_null_data_template {
+ struct ieee80211_header header;
+} __attribute__ ((packed));
+
+struct wl12xx_ps_poll_template {
+ u16 fc;
+ u16 aid;
+ u8 bssid[ETH_ALEN];
+ u8 ta[ETH_ALEN];
+} __attribute__ ((packed));
+
+struct wl12xx_qos_null_data_template {
+ struct ieee80211_header header;
+ __le16 qos_ctl;
+} __attribute__ ((packed));
+
+struct wl12xx_probe_req_template {
+ struct ieee80211_header header;
+ struct wl12xx_ie_ssid ssid;
+ struct wl12xx_ie_rates rates;
+ struct wl12xx_ie_rates ext_rates;
+} __attribute__ ((packed));
+
+
+struct wl12xx_probe_resp_template {
+ struct ieee80211_header header;
+ __le32 time_stamp[2];
+ __le16 beacon_interval;
+ __le16 capability;
+ struct wl12xx_ie_ssid ssid;
+ struct wl12xx_ie_rates rates;
+ struct wl12xx_ie_rates ext_rates;
+ struct wl12xx_ie_ds_params ds_params;
+ struct wl12xx_ie_country country;
+} __attribute__ ((packed));
+
+#endif
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 1f64d6033ab..e3e96bb2c24 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1348,6 +1348,7 @@ static int wl3501_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (rc) {
++dev->stats.tx_dropped;
netif_stop_queue(dev);
+ rc = NETDEV_TX_OK;
} else {
++dev->stats.tx_packets;
dev->stats.tx_bytes += skb->len;
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 5fabd9c0f07..4430b8d92e2 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -819,11 +819,11 @@ static int zd1201_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (err) {
dev->stats.tx_errors++;
netif_start_queue(dev);
- return err;
+ } else {
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+ dev->trans_start = jiffies;
}
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
- dev->trans_start = jiffies;
kfree_skb(skb);
return 0;
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index c3a51266de2..40b07b98822 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -420,9 +420,9 @@ static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs,
if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
cs->control |= ZD_CS_NEED_RANDOM_BACKOFF;
- /* Multicast */
- if (is_multicast_ether_addr(header->addr1))
- cs->control |= ZD_CS_MULTICAST;
+ /* No ACK expected (multicast, etc.) */
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+ cs->control |= ZD_CS_NO_ACK;
/* PS-POLL */
if (ieee80211_is_pspoll(header->frame_control))
@@ -755,52 +755,6 @@ static int zd_op_config(struct ieee80211_hw *hw, u32 changed)
return zd_chip_set_channel(&mac->chip, conf->channel->hw_value);
}
-static int zd_op_config_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_if_conf *conf)
-{
- struct zd_mac *mac = zd_hw_mac(hw);
- int associated;
- int r;
-
- if (mac->type == NL80211_IFTYPE_MESH_POINT ||
- mac->type == NL80211_IFTYPE_ADHOC) {
- associated = true;
- if (conf->changed & IEEE80211_IFCC_BEACON) {
- struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
-
- if (!beacon)
- return -ENOMEM;
- r = zd_mac_config_beacon(hw, beacon);
- kfree_skb(beacon);
-
- if (r < 0)
- return r;
- }
-
- if (conf->changed & IEEE80211_IFCC_BEACON_ENABLED) {
- u32 interval;
-
- if (conf->enable_beacon)
- interval = BCN_MODE_IBSS | hw->conf.beacon_int;
- else
- interval = 0;
-
- r = zd_set_beacon_interval(&mac->chip, interval);
- if (r < 0)
- return r;
- }
- } else
- associated = is_valid_ether_addr(conf->bssid);
-
- spin_lock_irq(&mac->lock);
- mac->associated = associated;
- spin_unlock_irq(&mac->lock);
-
- /* TODO: do hardware bssid filtering */
- return 0;
-}
-
static void zd_process_intr(struct work_struct *work)
{
u16 int_status;
@@ -923,9 +877,42 @@ static void zd_op_bss_info_changed(struct ieee80211_hw *hw,
{
struct zd_mac *mac = zd_hw_mac(hw);
unsigned long flags;
+ int associated;
dev_dbg_f(zd_mac_dev(mac), "changes: %x\n", changes);
+ if (mac->type == NL80211_IFTYPE_MESH_POINT ||
+ mac->type == NL80211_IFTYPE_ADHOC) {
+ associated = true;
+ if (changes & BSS_CHANGED_BEACON) {
+ struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
+
+ if (beacon) {
+ zd_mac_config_beacon(hw, beacon);
+ kfree_skb(beacon);
+ }
+ }
+
+ if (changes & BSS_CHANGED_BEACON_ENABLED) {
+ u32 interval;
+
+ if (bss_conf->enable_beacon)
+ interval = BCN_MODE_IBSS |
+ bss_conf->beacon_int;
+ else
+ interval = 0;
+
+ zd_set_beacon_interval(&mac->chip, interval);
+ }
+ } else
+ associated = is_valid_ether_addr(bss_conf->bssid);
+
+ spin_lock_irq(&mac->lock);
+ mac->associated = associated;
+ spin_unlock_irq(&mac->lock);
+
+ /* TODO: do hardware bssid filtering */
+
if (changes & BSS_CHANGED_ERP_PREAMBLE) {
spin_lock_irqsave(&mac->lock, flags);
mac->short_preamble = bss_conf->use_short_preamble;
@@ -952,7 +939,6 @@ static const struct ieee80211_ops zd_ops = {
.add_interface = zd_op_add_interface,
.remove_interface = zd_op_remove_interface,
.config = zd_op_config,
- .config_interface = zd_op_config_interface,
.configure_filter = zd_op_configure_filter,
.bss_info_changed = zd_op_bss_info_changed,
.get_tsf = zd_op_get_tsf,
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index 4c05d3ee4c3..7c2759118d1 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -87,7 +87,7 @@ struct zd_ctrlset {
/* zd_ctrlset control field */
#define ZD_CS_NEED_RANDOM_BACKOFF 0x01
-#define ZD_CS_MULTICAST 0x02
+#define ZD_CS_NO_ACK 0x02
#define ZD_CS_FRAME_TYPE_MASK 0x0c
#define ZD_CS_DATA_FRAME 0x00
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index f6732538790..8d88daeed0c 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1212,7 +1212,7 @@ static int __devinit netfront_probe(struct xenbus_device *dev,
}
info = netdev_priv(netdev);
- dev->dev.driver_data = info;
+ dev_set_drvdata(&dev->dev, info);
err = register_netdev(info->netdev);
if (err) {
@@ -1233,7 +1233,7 @@ static int __devinit netfront_probe(struct xenbus_device *dev,
fail:
free_netdev(netdev);
- dev->dev.driver_data = NULL;
+ dev_set_drvdata(&dev->dev, NULL);
return err;
}
@@ -1275,7 +1275,7 @@ static void xennet_disconnect_backend(struct netfront_info *info)
*/
static int netfront_resume(struct xenbus_device *dev)
{
- struct netfront_info *info = dev->dev.driver_data;
+ struct netfront_info *info = dev_get_drvdata(&dev->dev);
dev_dbg(&dev->dev, "%s\n", dev->nodename);
@@ -1600,7 +1600,7 @@ static int xennet_connect(struct net_device *dev)
static void backend_changed(struct xenbus_device *dev,
enum xenbus_state backend_state)
{
- struct netfront_info *np = dev->dev.driver_data;
+ struct netfront_info *np = dev_get_drvdata(&dev->dev);
struct net_device *netdev = np->netdev;
dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
@@ -1774,7 +1774,7 @@ static struct xenbus_device_id netfront_ids[] = {
static int __devexit xennet_remove(struct xenbus_device *dev)
{
- struct netfront_info *info = dev->dev.driver_data;
+ struct netfront_info *info = dev_get_drvdata(&dev->dev);
dev_dbg(&dev->dev, "%s\n", dev->nodename);
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 7477ffdcddb..3c7a5053f1d 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -717,7 +717,7 @@ static void yellowfin_tx_timeout(struct net_device *dev)
if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
netif_wake_queue (dev); /* Typical path */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
dev->stats.tx_errors++;
}
@@ -876,7 +876,6 @@ static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev)
netif_start_queue (dev); /* Typical path */
else
yp->tx_full = 1;
- dev->trans_start = jiffies;
if (yellowfin_debug > 4) {
printk(KERN_DEBUG "%s: Yellowfin transmit frame #%d queued in slot %d.\n",
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index f821dbc952a..d2fa27c5c1b 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -1,21 +1,27 @@
config OF_DEVICE
def_bool y
- depends on OF && (SPARC || PPC_OF)
+ depends on OF && (SPARC || PPC_OF || MICROBLAZE)
config OF_GPIO
def_bool y
- depends on OF && PPC_OF && GPIOLIB
+ depends on OF && (PPC_OF || MICROBLAZE) && GPIOLIB
help
OpenFirmware GPIO accessors
config OF_I2C
def_tristate I2C
- depends on PPC_OF && I2C
+ depends on (PPC_OF || MICROBLAZE) && I2C
help
OpenFirmware I2C accessors
config OF_SPI
def_tristate SPI
- depends on OF && PPC_OF && SPI
+ depends on OF && (PPC_OF || MICROBLAZE) && SPI
help
OpenFirmware SPI accessors
+
+config OF_MDIO
+ def_tristate PHYLIB
+ depends on OF && PHYLIB
+ help
+ OpenFirmware MDIO bus (Ethernet PHY) accessors
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index 4c3c6f8e36f..bdfb5f5d4b0 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -3,3 +3,4 @@ obj-$(CONFIG_OF_DEVICE) += device.o platform.o
obj-$(CONFIG_OF_GPIO) += gpio.o
obj-$(CONFIG_OF_I2C) += of_i2c.o
obj-$(CONFIG_OF_SPI) += of_spi.o
+obj-$(CONFIG_OF_MDIO) += of_mdio.o
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 41c5dfd8535..69f85c07d17 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -447,6 +447,7 @@ struct of_modalias_table {
static struct of_modalias_table of_modalias_table[] = {
{ "fsl,mcu-mpc8349emitx", "mcu-mpc8349emitx" },
{ "mmc-spi-slot", "mmc_spi" },
+ { "stm,m25p40", "m25p80" },
};
/**
@@ -495,6 +496,30 @@ int of_modalias_node(struct device_node *node, char *modalias, int len)
EXPORT_SYMBOL_GPL(of_modalias_node);
/**
+ * of_parse_phandle - Resolve a phandle property to a device_node pointer
+ * @np: Pointer to device node holding phandle property
+ * @phandle_name: Name of property holding a phandle value
+ * @index: For properties holding a table of phandles, this is the index into
+ * the table
+ *
+ * Returns the device_node pointer with refcount incremented. Use
+ * of_node_put() on it when done.
+ */
+struct device_node *
+of_parse_phandle(struct device_node *np, const char *phandle_name, int index)
+{
+ const phandle *phandle;
+ int size;
+
+ phandle = of_get_property(np, phandle_name, &size);
+ if ((!phandle) || (size < sizeof(*phandle) * (index + 1)))
+ return NULL;
+
+ return of_find_node_by_phandle(phandle[index]);
+}
+EXPORT_SYMBOL(of_parse_phandle);
+
+/**
* of_parse_phandles_with_args - Find a node pointed by phandle in a list
* @np: pointer to a device tree node containing a list
* @list_name: property name that contains a list
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
new file mode 100644
index 00000000000..aee967d7f76
--- /dev/null
+++ b/drivers/of/of_mdio.c
@@ -0,0 +1,139 @@
+/*
+ * OF helpers for the MDIO (Ethernet PHY) API
+ *
+ * Copyright (c) 2009 Secret Lab Technologies, Ltd.
+ *
+ * This file is released under the GPLv2
+ *
+ * This file provides helper functions for extracting PHY device information
+ * out of the OpenFirmware device tree and using it to populate an mii_bus.
+ */
+
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/module.h>
+
+MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
+MODULE_LICENSE("GPL");
+
+/**
+ * of_mdiobus_register - Register mii_bus and create PHYs from the device tree
+ * @mdio: pointer to mii_bus structure
+ * @np: pointer to device_node of MDIO bus.
+ *
+ * This function registers the mii_bus structure and registers a phy_device
+ * for each child node of @np.
+ */
+int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
+{
+ struct phy_device *phy;
+ struct device_node *child;
+ int rc, i;
+
+ /* Mask out all PHYs from auto probing. Instead the PHYs listed in
+ * the device tree are populated after the bus has been registered */
+ mdio->phy_mask = ~0;
+
+ /* Clear all the IRQ properties */
+ if (mdio->irq)
+ for (i=0; i<PHY_MAX_ADDR; i++)
+ mdio->irq[i] = PHY_POLL;
+
+ /* Register the MDIO bus */
+ rc = mdiobus_register(mdio);
+ if (rc)
+ return rc;
+
+ /* Loop over the child nodes and register a phy_device for each one */
+ for_each_child_of_node(np, child) {
+ const u32 *addr;
+ int len;
+
+ /* A PHY must have a reg property in the range [0-31] */
+ addr = of_get_property(child, "reg", &len);
+ if (!addr || len < sizeof(*addr) || *addr >= 32 || *addr < 0) {
+ dev_err(&mdio->dev, "%s has invalid PHY address\n",
+ child->full_name);
+ continue;
+ }
+
+ if (mdio->irq) {
+ mdio->irq[*addr] = irq_of_parse_and_map(child, 0);
+ if (!mdio->irq[*addr])
+ mdio->irq[*addr] = PHY_POLL;
+ }
+
+ phy = get_phy_device(mdio, *addr);
+ if (!phy) {
+ dev_err(&mdio->dev, "error probing PHY at address %i\n",
+ *addr);
+ continue;
+ }
+ phy_scan_fixups(phy);
+
+ /* Associate the OF node with the device structure so it
+ * can be looked up later */
+ of_node_get(child);
+ dev_archdata_set_node(&phy->dev.archdata, child);
+
+ /* All data is now stored in the phy struct; register it */
+ rc = phy_device_register(phy);
+ if (rc) {
+ phy_device_free(phy);
+ of_node_put(child);
+ continue;
+ }
+
+ dev_dbg(&mdio->dev, "registered phy %s at address %i\n",
+ child->name, *addr);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(of_mdiobus_register);
+
+/**
+ * of_phy_find_device - Give a PHY node, find the phy_device
+ * @phy_np: Pointer to the phy's device tree node
+ *
+ * Returns a pointer to the phy_device.
+ */
+struct phy_device *of_phy_find_device(struct device_node *phy_np)
+{
+ struct device *d;
+ int match(struct device *dev, void *phy_np)
+ {
+ return dev_archdata_get_node(&dev->archdata) == phy_np;
+ }
+
+ if (!phy_np)
+ return NULL;
+
+ d = bus_find_device(&mdio_bus_type, NULL, phy_np, match);
+ return d ? to_phy_device(d) : NULL;
+}
+EXPORT_SYMBOL(of_phy_find_device);
+
+/**
+ * of_phy_connect - Connect to the phy described in the device tree
+ * @dev: pointer to net_device claiming the phy
+ * @phy_np: Pointer to device tree node for the PHY
+ * @hndlr: Link state callback for the network device
+ * @iface: PHY data interface type
+ *
+ * Returns a pointer to the phy_device if successfull. NULL otherwise
+ */
+struct phy_device *of_phy_connect(struct net_device *dev,
+ struct device_node *phy_np,
+ void (*hndlr)(struct net_device *), u32 flags,
+ phy_interface_t iface)
+{
+ struct phy_device *phy = of_phy_find_device(phy_np);
+
+ if (!phy)
+ return NULL;
+
+ return phy_connect_direct(dev, phy, hndlr, flags, iface) ? NULL : phy;
+}
+EXPORT_SYMBOL(of_phy_connect);
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index f0e99d4c066..242257b1944 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -78,16 +78,20 @@ void free_cpu_buffers(void)
op_ring_buffer_write = NULL;
}
+#define RB_EVENT_HDR_SIZE 4
+
int alloc_cpu_buffers(void)
{
int i;
unsigned long buffer_size = oprofile_cpu_buffer_size;
+ unsigned long byte_size = buffer_size * (sizeof(struct op_sample) +
+ RB_EVENT_HDR_SIZE);
- op_ring_buffer_read = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS);
+ op_ring_buffer_read = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
if (!op_ring_buffer_read)
goto fail;
- op_ring_buffer_write = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS);
+ op_ring_buffer_write = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
if (!op_ring_buffer_write)
goto fail;
diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c
index f415fdd9a88..5b89f404e66 100644
--- a/drivers/parisc/eisa.c
+++ b/drivers/parisc/eisa.c
@@ -373,7 +373,7 @@ static int __init eisa_probe(struct parisc_device *dev)
if (result >= 0) {
/* FIXME : Don't enumerate the bus twice. */
eisa_dev.root.dev = &dev->dev;
- dev->dev.driver_data = &eisa_dev.root;
+ dev_set_drvdata(&dev->dev, &eisa_dev.root);
eisa_dev.root.bus_base_addr = 0;
eisa_dev.root.res = &eisa_dev.hba.io_space;
eisa_dev.root.slots = result;
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 73348c4047e..4a9cc92d4d1 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -702,7 +702,7 @@ static unsigned int iosapic_startup_irq(unsigned int irq)
}
#ifdef CONFIG_SMP
-static void iosapic_set_affinity_irq(unsigned int irq,
+static int iosapic_set_affinity_irq(unsigned int irq,
const struct cpumask *dest)
{
struct vector_info *vi = iosapic_get_vector(irq);
@@ -712,7 +712,7 @@ static void iosapic_set_affinity_irq(unsigned int irq,
dest_cpu = cpu_check_affinity(irq, dest);
if (dest_cpu < 0)
- return;
+ return -1;
cpumask_copy(irq_desc[irq].affinity, cpumask_of(dest_cpu));
vi->txn_addr = txn_affinity_addr(irq, dest_cpu);
@@ -724,6 +724,8 @@ static void iosapic_set_affinity_irq(unsigned int irq,
iosapic_set_irt_data(vi, &dummy_d0, &d1);
iosapic_wr_irt_entry(vi, d0, d1);
spin_unlock_irqrestore(&iosapic_lock, flags);
+
+ return 0;
}
#endif
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index e5999c4cedc..d46dd57450a 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -2010,7 +2010,7 @@ void __init sba_init(void)
void * sba_get_iommu(struct parisc_device *pci_hba)
{
struct parisc_device *sba_dev = parisc_parent(pci_hba);
- struct sba_device *sba = sba_dev->dev.driver_data;
+ struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
char t = sba_dev->id.hw_type;
int iocnum = (pci_hba->hw_path >> 3); /* rope # */
@@ -2031,7 +2031,7 @@ void * sba_get_iommu(struct parisc_device *pci_hba)
void sba_directed_lmmio(struct parisc_device *pci_hba, struct resource *r)
{
struct parisc_device *sba_dev = parisc_parent(pci_hba);
- struct sba_device *sba = sba_dev->dev.driver_data;
+ struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
char t = sba_dev->id.hw_type;
int i;
int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1)); /* rope # */
@@ -2073,7 +2073,7 @@ void sba_directed_lmmio(struct parisc_device *pci_hba, struct resource *r)
void sba_distributed_lmmio(struct parisc_device *pci_hba, struct resource *r )
{
struct parisc_device *sba_dev = parisc_parent(pci_hba);
- struct sba_device *sba = sba_dev->dev.driver_data;
+ struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
char t = sba_dev->id.hw_type;
int base, size;
int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1)); /* rope # */
diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c
index e6a7e847ee8..5d6de380e42 100644
--- a/drivers/parport/parport_gsc.c
+++ b/drivers/parport/parport_gsc.c
@@ -352,8 +352,8 @@ static int __devinit parport_init_chip(struct parisc_device *dev)
unsigned long port;
if (!dev->irq) {
- printk(KERN_WARNING "IRQ not found for parallel device at 0x%lx\n",
- dev->hpa.start);
+ printk(KERN_WARNING "IRQ not found for parallel device at 0x%llx\n",
+ (unsigned long long)dev->hpa.start);
return -ENODEV;
}
@@ -376,14 +376,14 @@ static int __devinit parport_init_chip(struct parisc_device *dev)
/* PARPORT_IRQ_NONE */ PARPORT_DMA_NONE, NULL);
if (p)
parport_count++;
- dev->dev.driver_data = p;
+ dev_set_drvdata(&dev->dev, p);
return 0;
}
static int __devexit parport_remove_chip(struct parisc_device *dev)
{
- struct parport *p = dev->dev.driver_data;
+ struct parport *p = dev_get_drvdata(&dev->dev);
if (p) {
struct parport_gsc_private *priv = p->private_data;
struct parport_operations *ops = p->ops;
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 4e63cc9e277..151bf5bc8af 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -1,5 +1,5 @@
/* Low-level parallel-port routines for 8255-based PC-style hardware.
- *
+ *
* Authors: Phil Blundell <philb@gnu.org>
* Tim Waugh <tim@cyberelk.demon.co.uk>
* Jose Renau <renau@acm.org>
@@ -11,7 +11,7 @@
* Cleaned up include files - Russell King <linux@arm.uk.linux.org>
* DMA support - Bert De Jonghe <bert@sophis.be>
* Many ECP bugs fixed. Fred Barnes & Jamie Lokier, 1999
- * More PCI support now conditional on CONFIG_PCI, 03/2001, Paul G.
+ * More PCI support now conditional on CONFIG_PCI, 03/2001, Paul G.
* Various hacks, Fred Barnes, 04/2001
* Updated probing logic - Adam Belay <ambx1@neo.rr.com>
*/
@@ -56,10 +56,10 @@
#include <linux/pnp.h>
#include <linux/platform_device.h>
#include <linux/sysctl.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
-#include <asm/io.h>
#include <asm/dma.h>
-#include <asm/uaccess.h>
#include <linux/parport.h>
#include <linux/parport_pc.h>
@@ -82,7 +82,7 @@
#define ECR_TST 06
#define ECR_CNF 07
#define ECR_MODE_MASK 0xe0
-#define ECR_WRITE(p,v) frob_econtrol((p),0xff,(v))
+#define ECR_WRITE(p, v) frob_econtrol((p), 0xff, (v))
#undef DEBUG
@@ -109,27 +109,27 @@ static int pci_registered_parport;
static int pnp_registered_parport;
/* frob_control, but for ECR */
-static void frob_econtrol (struct parport *pb, unsigned char m,
+static void frob_econtrol(struct parport *pb, unsigned char m,
unsigned char v)
{
unsigned char ectr = 0;
if (m != 0xff)
- ectr = inb (ECONTROL (pb));
+ ectr = inb(ECONTROL(pb));
- DPRINTK (KERN_DEBUG "frob_econtrol(%02x,%02x): %02x -> %02x\n",
+ DPRINTK(KERN_DEBUG "frob_econtrol(%02x,%02x): %02x -> %02x\n",
m, v, ectr, (ectr & ~m) ^ v);
- outb ((ectr & ~m) ^ v, ECONTROL (pb));
+ outb((ectr & ~m) ^ v, ECONTROL(pb));
}
-static __inline__ void frob_set_mode (struct parport *p, int mode)
+static inline void frob_set_mode(struct parport *p, int mode)
{
- frob_econtrol (p, ECR_MODE_MASK, mode << 5);
+ frob_econtrol(p, ECR_MODE_MASK, mode << 5);
}
#ifdef CONFIG_PARPORT_PC_FIFO
-/* Safely change the mode bits in the ECR
+/* Safely change the mode bits in the ECR
Returns:
0 : Success
-EBUSY: Could not drain FIFO in some finite amount of time,
@@ -141,17 +141,18 @@ static int change_mode(struct parport *p, int m)
unsigned char oecr;
int mode;
- DPRINTK(KERN_INFO "parport change_mode ECP-ISA to mode 0x%02x\n",m);
+ DPRINTK(KERN_INFO "parport change_mode ECP-ISA to mode 0x%02x\n", m);
if (!priv->ecr) {
- printk (KERN_DEBUG "change_mode: but there's no ECR!\n");
+ printk(KERN_DEBUG "change_mode: but there's no ECR!\n");
return 0;
}
/* Bits <7:5> contain the mode. */
- oecr = inb (ECONTROL (p));
+ oecr = inb(ECONTROL(p));
mode = (oecr >> 5) & 0x7;
- if (mode == m) return 0;
+ if (mode == m)
+ return 0;
if (mode >= 2 && !(priv->ctr & 0x20)) {
/* This mode resets the FIFO, so we may
@@ -163,19 +164,21 @@ static int change_mode(struct parport *p, int m)
case ECR_ECP: /* ECP Parallel Port mode */
/* Busy wait for 200us */
for (counter = 0; counter < 40; counter++) {
- if (inb (ECONTROL (p)) & 0x01)
+ if (inb(ECONTROL(p)) & 0x01)
+ break;
+ if (signal_pending(current))
break;
- if (signal_pending (current)) break;
- udelay (5);
+ udelay(5);
}
/* Poll slowly. */
- while (!(inb (ECONTROL (p)) & 0x01)) {
- if (time_after_eq (jiffies, expire))
+ while (!(inb(ECONTROL(p)) & 0x01)) {
+ if (time_after_eq(jiffies, expire))
/* The FIFO is stuck. */
return -EBUSY;
- schedule_timeout_interruptible(msecs_to_jiffies(10));
- if (signal_pending (current))
+ schedule_timeout_interruptible(
+ msecs_to_jiffies(10));
+ if (signal_pending(current))
break;
}
}
@@ -185,20 +188,20 @@ static int change_mode(struct parport *p, int m)
/* We have to go through mode 001 */
oecr &= ~(7 << 5);
oecr |= ECR_PS2 << 5;
- ECR_WRITE (p, oecr);
+ ECR_WRITE(p, oecr);
}
/* Set the mode. */
oecr &= ~(7 << 5);
oecr |= m << 5;
- ECR_WRITE (p, oecr);
+ ECR_WRITE(p, oecr);
return 0;
}
#ifdef CONFIG_PARPORT_1284
/* Find FIFO lossage; FIFO is reset */
#if 0
-static int get_fifo_residue (struct parport *p)
+static int get_fifo_residue(struct parport *p)
{
int residue;
int cnfga;
@@ -206,26 +209,26 @@ static int get_fifo_residue (struct parport *p)
/* Adjust for the contents of the FIFO. */
for (residue = priv->fifo_depth; ; residue--) {
- if (inb (ECONTROL (p)) & 0x2)
+ if (inb(ECONTROL(p)) & 0x2)
/* Full up. */
break;
- outb (0, FIFO (p));
+ outb(0, FIFO(p));
}
- printk (KERN_DEBUG "%s: %d PWords were left in FIFO\n", p->name,
+ printk(KERN_DEBUG "%s: %d PWords were left in FIFO\n", p->name,
residue);
/* Reset the FIFO. */
- frob_set_mode (p, ECR_PS2);
+ frob_set_mode(p, ECR_PS2);
/* Now change to config mode and clean up. FIXME */
- frob_set_mode (p, ECR_CNF);
- cnfga = inb (CONFIGA (p));
- printk (KERN_DEBUG "%s: cnfgA contains 0x%02x\n", p->name, cnfga);
+ frob_set_mode(p, ECR_CNF);
+ cnfga = inb(CONFIGA(p));
+ printk(KERN_DEBUG "%s: cnfgA contains 0x%02x\n", p->name, cnfga);
if (!(cnfga & (1<<2))) {
- printk (KERN_DEBUG "%s: Accounting for extra byte\n", p->name);
+ printk(KERN_DEBUG "%s: Accounting for extra byte\n", p->name);
residue++;
}
@@ -233,9 +236,11 @@ static int get_fifo_residue (struct parport *p)
* PWord != 1 byte. */
/* Back to PS2 mode. */
- frob_set_mode (p, ECR_PS2);
+ frob_set_mode(p, ECR_PS2);
- DPRINTK (KERN_DEBUG "*** get_fifo_residue: done residue collecting (ecr = 0x%2.2x)\n", inb (ECONTROL (p)));
+ DPRINTK(KERN_DEBUG
+ "*** get_fifo_residue: done residue collecting (ecr = 0x%2.2x)\n",
+ inb(ECONTROL(p)));
return residue;
}
#endif /* 0 */
@@ -257,8 +262,8 @@ static int clear_epp_timeout(struct parport *pb)
/* To clear timeout some chips require double read */
parport_pc_read_status(pb);
r = parport_pc_read_status(pb);
- outb (r | 0x01, STATUS (pb)); /* Some reset by writing 1 */
- outb (r & 0xfe, STATUS (pb)); /* Others by writing 0 */
+ outb(r | 0x01, STATUS(pb)); /* Some reset by writing 1 */
+ outb(r & 0xfe, STATUS(pb)); /* Others by writing 0 */
r = parport_pc_read_status(pb);
return !(r & 0x01);
@@ -272,7 +277,8 @@ static int clear_epp_timeout(struct parport *pb)
* of these are in parport_pc.h.
*/
-static void parport_pc_init_state(struct pardevice *dev, struct parport_state *s)
+static void parport_pc_init_state(struct pardevice *dev,
+ struct parport_state *s)
{
s->u.pc.ctr = 0xc;
if (dev->irq_func &&
@@ -289,22 +295,23 @@ static void parport_pc_save_state(struct parport *p, struct parport_state *s)
const struct parport_pc_private *priv = p->physport->private_data;
s->u.pc.ctr = priv->ctr;
if (priv->ecr)
- s->u.pc.ecr = inb (ECONTROL (p));
+ s->u.pc.ecr = inb(ECONTROL(p));
}
-static void parport_pc_restore_state(struct parport *p, struct parport_state *s)
+static void parport_pc_restore_state(struct parport *p,
+ struct parport_state *s)
{
struct parport_pc_private *priv = p->physport->private_data;
register unsigned char c = s->u.pc.ctr & priv->ctr_writable;
- outb (c, CONTROL (p));
+ outb(c, CONTROL(p));
priv->ctr = c;
if (priv->ecr)
- ECR_WRITE (p, s->u.pc.ecr);
+ ECR_WRITE(p, s->u.pc.ecr);
}
#ifdef CONFIG_PARPORT_1284
-static size_t parport_pc_epp_read_data (struct parport *port, void *buf,
- size_t length, int flags)
+static size_t parport_pc_epp_read_data(struct parport *port, void *buf,
+ size_t length, int flags)
{
size_t got = 0;
@@ -316,54 +323,52 @@ static size_t parport_pc_epp_read_data (struct parport *port, void *buf,
* nFault is 0 if there is at least 1 byte in the Warp's FIFO
* pError is 1 if there are 16 bytes in the Warp's FIFO
*/
- status = inb (STATUS (port));
+ status = inb(STATUS(port));
- while (!(status & 0x08) && (got < length)) {
- if ((left >= 16) && (status & 0x20) && !(status & 0x08)) {
+ while (!(status & 0x08) && got < length) {
+ if (left >= 16 && (status & 0x20) && !(status & 0x08)) {
/* can grab 16 bytes from warp fifo */
- if (!((long)buf & 0x03)) {
- insl (EPPDATA (port), buf, 4);
- } else {
- insb (EPPDATA (port), buf, 16);
- }
+ if (!((long)buf & 0x03))
+ insl(EPPDATA(port), buf, 4);
+ else
+ insb(EPPDATA(port), buf, 16);
buf += 16;
got += 16;
left -= 16;
} else {
/* grab single byte from the warp fifo */
- *((char *)buf) = inb (EPPDATA (port));
+ *((char *)buf) = inb(EPPDATA(port));
buf++;
got++;
left--;
}
- status = inb (STATUS (port));
+ status = inb(STATUS(port));
if (status & 0x01) {
/* EPP timeout should never occur... */
- printk (KERN_DEBUG "%s: EPP timeout occurred while talking to "
- "w91284pic (should not have done)\n", port->name);
- clear_epp_timeout (port);
+ printk(KERN_DEBUG
+"%s: EPP timeout occurred while talking to w91284pic (should not have done)\n", port->name);
+ clear_epp_timeout(port);
}
}
return got;
}
if ((flags & PARPORT_EPP_FAST) && (length > 1)) {
- if (!(((long)buf | length) & 0x03)) {
- insl (EPPDATA (port), buf, (length >> 2));
- } else {
- insb (EPPDATA (port), buf, length);
- }
- if (inb (STATUS (port)) & 0x01) {
- clear_epp_timeout (port);
+ if (!(((long)buf | length) & 0x03))
+ insl(EPPDATA(port), buf, (length >> 2));
+ else
+ insb(EPPDATA(port), buf, length);
+ if (inb(STATUS(port)) & 0x01) {
+ clear_epp_timeout(port);
return -EIO;
}
return length;
}
for (; got < length; got++) {
- *((char*)buf) = inb (EPPDATA(port));
+ *((char *)buf) = inb(EPPDATA(port));
buf++;
- if (inb (STATUS (port)) & 0x01) {
+ if (inb(STATUS(port)) & 0x01) {
/* EPP timeout */
- clear_epp_timeout (port);
+ clear_epp_timeout(port);
break;
}
}
@@ -371,28 +376,27 @@ static size_t parport_pc_epp_read_data (struct parport *port, void *buf,
return got;
}
-static size_t parport_pc_epp_write_data (struct parport *port, const void *buf,
- size_t length, int flags)
+static size_t parport_pc_epp_write_data(struct parport *port, const void *buf,
+ size_t length, int flags)
{
size_t written = 0;
if ((flags & PARPORT_EPP_FAST) && (length > 1)) {
- if (!(((long)buf | length) & 0x03)) {
- outsl (EPPDATA (port), buf, (length >> 2));
- } else {
- outsb (EPPDATA (port), buf, length);
- }
- if (inb (STATUS (port)) & 0x01) {
- clear_epp_timeout (port);
+ if (!(((long)buf | length) & 0x03))
+ outsl(EPPDATA(port), buf, (length >> 2));
+ else
+ outsb(EPPDATA(port), buf, length);
+ if (inb(STATUS(port)) & 0x01) {
+ clear_epp_timeout(port);
return -EIO;
}
return length;
}
for (; written < length; written++) {
- outb (*((char*)buf), EPPDATA(port));
+ outb(*((char *)buf), EPPDATA(port));
buf++;
- if (inb (STATUS(port)) & 0x01) {
- clear_epp_timeout (port);
+ if (inb(STATUS(port)) & 0x01) {
+ clear_epp_timeout(port);
break;
}
}
@@ -400,24 +404,24 @@ static size_t parport_pc_epp_write_data (struct parport *port, const void *buf,
return written;
}
-static size_t parport_pc_epp_read_addr (struct parport *port, void *buf,
+static size_t parport_pc_epp_read_addr(struct parport *port, void *buf,
size_t length, int flags)
{
size_t got = 0;
if ((flags & PARPORT_EPP_FAST) && (length > 1)) {
- insb (EPPADDR (port), buf, length);
- if (inb (STATUS (port)) & 0x01) {
- clear_epp_timeout (port);
+ insb(EPPADDR(port), buf, length);
+ if (inb(STATUS(port)) & 0x01) {
+ clear_epp_timeout(port);
return -EIO;
}
return length;
}
for (; got < length; got++) {
- *((char*)buf) = inb (EPPADDR (port));
+ *((char *)buf) = inb(EPPADDR(port));
buf++;
- if (inb (STATUS (port)) & 0x01) {
- clear_epp_timeout (port);
+ if (inb(STATUS(port)) & 0x01) {
+ clear_epp_timeout(port);
break;
}
}
@@ -425,25 +429,25 @@ static size_t parport_pc_epp_read_addr (struct parport *port, void *buf,
return got;
}
-static size_t parport_pc_epp_write_addr (struct parport *port,
+static size_t parport_pc_epp_write_addr(struct parport *port,
const void *buf, size_t length,
int flags)
{
size_t written = 0;
if ((flags & PARPORT_EPP_FAST) && (length > 1)) {
- outsb (EPPADDR (port), buf, length);
- if (inb (STATUS (port)) & 0x01) {
- clear_epp_timeout (port);
+ outsb(EPPADDR(port), buf, length);
+ if (inb(STATUS(port)) & 0x01) {
+ clear_epp_timeout(port);
return -EIO;
}
return length;
}
for (; written < length; written++) {
- outb (*((char*)buf), EPPADDR (port));
+ outb(*((char *)buf), EPPADDR(port));
buf++;
- if (inb (STATUS (port)) & 0x01) {
- clear_epp_timeout (port);
+ if (inb(STATUS(port)) & 0x01) {
+ clear_epp_timeout(port);
break;
}
}
@@ -451,74 +455,74 @@ static size_t parport_pc_epp_write_addr (struct parport *port,
return written;
}
-static size_t parport_pc_ecpepp_read_data (struct parport *port, void *buf,
- size_t length, int flags)
+static size_t parport_pc_ecpepp_read_data(struct parport *port, void *buf,
+ size_t length, int flags)
{
size_t got;
- frob_set_mode (port, ECR_EPP);
- parport_pc_data_reverse (port);
- parport_pc_write_control (port, 0x4);
- got = parport_pc_epp_read_data (port, buf, length, flags);
- frob_set_mode (port, ECR_PS2);
+ frob_set_mode(port, ECR_EPP);
+ parport_pc_data_reverse(port);
+ parport_pc_write_control(port, 0x4);
+ got = parport_pc_epp_read_data(port, buf, length, flags);
+ frob_set_mode(port, ECR_PS2);
return got;
}
-static size_t parport_pc_ecpepp_write_data (struct parport *port,
- const void *buf, size_t length,
- int flags)
+static size_t parport_pc_ecpepp_write_data(struct parport *port,
+ const void *buf, size_t length,
+ int flags)
{
size_t written;
- frob_set_mode (port, ECR_EPP);
- parport_pc_write_control (port, 0x4);
- parport_pc_data_forward (port);
- written = parport_pc_epp_write_data (port, buf, length, flags);
- frob_set_mode (port, ECR_PS2);
+ frob_set_mode(port, ECR_EPP);
+ parport_pc_write_control(port, 0x4);
+ parport_pc_data_forward(port);
+ written = parport_pc_epp_write_data(port, buf, length, flags);
+ frob_set_mode(port, ECR_PS2);
return written;
}
-static size_t parport_pc_ecpepp_read_addr (struct parport *port, void *buf,
- size_t length, int flags)
+static size_t parport_pc_ecpepp_read_addr(struct parport *port, void *buf,
+ size_t length, int flags)
{
size_t got;
- frob_set_mode (port, ECR_EPP);
- parport_pc_data_reverse (port);
- parport_pc_write_control (port, 0x4);
- got = parport_pc_epp_read_addr (port, buf, length, flags);
- frob_set_mode (port, ECR_PS2);
+ frob_set_mode(port, ECR_EPP);
+ parport_pc_data_reverse(port);
+ parport_pc_write_control(port, 0x4);
+ got = parport_pc_epp_read_addr(port, buf, length, flags);
+ frob_set_mode(port, ECR_PS2);
return got;
}
-static size_t parport_pc_ecpepp_write_addr (struct parport *port,
+static size_t parport_pc_ecpepp_write_addr(struct parport *port,
const void *buf, size_t length,
int flags)
{
size_t written;
- frob_set_mode (port, ECR_EPP);
- parport_pc_write_control (port, 0x4);
- parport_pc_data_forward (port);
- written = parport_pc_epp_write_addr (port, buf, length, flags);
- frob_set_mode (port, ECR_PS2);
+ frob_set_mode(port, ECR_EPP);
+ parport_pc_write_control(port, 0x4);
+ parport_pc_data_forward(port);
+ written = parport_pc_epp_write_addr(port, buf, length, flags);
+ frob_set_mode(port, ECR_PS2);
return written;
}
#endif /* IEEE 1284 support */
#ifdef CONFIG_PARPORT_PC_FIFO
-static size_t parport_pc_fifo_write_block_pio (struct parport *port,
+static size_t parport_pc_fifo_write_block_pio(struct parport *port,
const void *buf, size_t length)
{
int ret = 0;
const unsigned char *bufp = buf;
size_t left = length;
unsigned long expire = jiffies + port->physport->cad->timeout;
- const int fifo = FIFO (port);
+ const int fifo = FIFO(port);
int poll_for = 8; /* 80 usecs */
const struct parport_pc_private *priv = port->physport->private_data;
const int fifo_depth = priv->fifo_depth;
@@ -526,25 +530,25 @@ static size_t parport_pc_fifo_write_block_pio (struct parport *port,
port = port->physport;
/* We don't want to be interrupted every character. */
- parport_pc_disable_irq (port);
+ parport_pc_disable_irq(port);
/* set nErrIntrEn and serviceIntr */
- frob_econtrol (port, (1<<4) | (1<<2), (1<<4) | (1<<2));
+ frob_econtrol(port, (1<<4) | (1<<2), (1<<4) | (1<<2));
/* Forward mode. */
- parport_pc_data_forward (port); /* Must be in PS2 mode */
+ parport_pc_data_forward(port); /* Must be in PS2 mode */
while (left) {
unsigned char byte;
- unsigned char ecrval = inb (ECONTROL (port));
+ unsigned char ecrval = inb(ECONTROL(port));
int i = 0;
- if (need_resched() && time_before (jiffies, expire))
+ if (need_resched() && time_before(jiffies, expire))
/* Can't yield the port. */
- schedule ();
+ schedule();
/* Anyone else waiting for the port? */
if (port->waithead) {
- printk (KERN_DEBUG "Somebody wants the port\n");
+ printk(KERN_DEBUG "Somebody wants the port\n");
break;
}
@@ -552,21 +556,22 @@ static size_t parport_pc_fifo_write_block_pio (struct parport *port,
/* FIFO is full. Wait for interrupt. */
/* Clear serviceIntr */
- ECR_WRITE (port, ecrval & ~(1<<2));
- false_alarm:
- ret = parport_wait_event (port, HZ);
- if (ret < 0) break;
+ ECR_WRITE(port, ecrval & ~(1<<2));
+false_alarm:
+ ret = parport_wait_event(port, HZ);
+ if (ret < 0)
+ break;
ret = 0;
- if (!time_before (jiffies, expire)) {
+ if (!time_before(jiffies, expire)) {
/* Timed out. */
- printk (KERN_DEBUG "FIFO write timed out\n");
+ printk(KERN_DEBUG "FIFO write timed out\n");
break;
}
- ecrval = inb (ECONTROL (port));
+ ecrval = inb(ECONTROL(port));
if (!(ecrval & (1<<2))) {
if (need_resched() &&
- time_before (jiffies, expire))
- schedule ();
+ time_before(jiffies, expire))
+ schedule();
goto false_alarm;
}
@@ -577,38 +582,38 @@ static size_t parport_pc_fifo_write_block_pio (struct parport *port,
/* Can't fail now. */
expire = jiffies + port->cad->timeout;
- poll:
- if (signal_pending (current))
+poll:
+ if (signal_pending(current))
break;
if (ecrval & 0x01) {
/* FIFO is empty. Blast it full. */
const int n = left < fifo_depth ? left : fifo_depth;
- outsb (fifo, bufp, n);
+ outsb(fifo, bufp, n);
bufp += n;
left -= n;
/* Adjust the poll time. */
- if (i < (poll_for - 2)) poll_for--;
+ if (i < (poll_for - 2))
+ poll_for--;
continue;
} else if (i++ < poll_for) {
- udelay (10);
- ecrval = inb (ECONTROL (port));
+ udelay(10);
+ ecrval = inb(ECONTROL(port));
goto poll;
}
- /* Half-full (call me an optimist) */
+ /* Half-full(call me an optimist) */
byte = *bufp++;
- outb (byte, fifo);
+ outb(byte, fifo);
left--;
- }
-
-dump_parport_state ("leave fifo_write_block_pio", port);
+ }
+ dump_parport_state("leave fifo_write_block_pio", port);
return length - left;
}
#ifdef HAS_DMA
-static size_t parport_pc_fifo_write_block_dma (struct parport *port,
+static size_t parport_pc_fifo_write_block_dma(struct parport *port,
const void *buf, size_t length)
{
int ret = 0;
@@ -621,7 +626,7 @@ static size_t parport_pc_fifo_write_block_dma (struct parport *port,
unsigned long start = (unsigned long) buf;
unsigned long end = (unsigned long) buf + length - 1;
-dump_parport_state ("enter fifo_write_block_dma", port);
+ dump_parport_state("enter fifo_write_block_dma", port);
if (end < MAX_DMA_ADDRESS) {
/* If it would cross a 64k boundary, cap it at the end. */
if ((start ^ end) & ~0xffffUL)
@@ -629,8 +634,9 @@ dump_parport_state ("enter fifo_write_block_dma", port);
dma_addr = dma_handle = dma_map_single(dev, (void *)buf, length,
DMA_TO_DEVICE);
- } else {
- /* above 16 MB we use a bounce buffer as ISA-DMA is not possible */
+ } else {
+ /* above 16 MB we use a bounce buffer as ISA-DMA
+ is not possible */
maxlen = PAGE_SIZE; /* sizeof(priv->dma_buf) */
dma_addr = priv->dma_handle;
dma_handle = 0;
@@ -639,12 +645,12 @@ dump_parport_state ("enter fifo_write_block_dma", port);
port = port->physport;
/* We don't want to be interrupted every character. */
- parport_pc_disable_irq (port);
+ parport_pc_disable_irq(port);
/* set nErrIntrEn and serviceIntr */
- frob_econtrol (port, (1<<4) | (1<<2), (1<<4) | (1<<2));
+ frob_econtrol(port, (1<<4) | (1<<2), (1<<4) | (1<<2));
/* Forward mode. */
- parport_pc_data_forward (port); /* Must be in PS2 mode */
+ parport_pc_data_forward(port); /* Must be in PS2 mode */
while (left) {
unsigned long expire = jiffies + port->physport->cad->timeout;
@@ -665,10 +671,10 @@ dump_parport_state ("enter fifo_write_block_dma", port);
set_dma_count(port->dma, count);
/* Set DMA mode */
- frob_econtrol (port, 1<<3, 1<<3);
+ frob_econtrol(port, 1<<3, 1<<3);
/* Clear serviceIntr */
- frob_econtrol (port, 1<<2, 0);
+ frob_econtrol(port, 1<<2, 0);
enable_dma(port->dma);
release_dma_lock(dmaflag);
@@ -676,20 +682,22 @@ dump_parport_state ("enter fifo_write_block_dma", port);
/* assume DMA will be successful */
left -= count;
buf += count;
- if (dma_handle) dma_addr += count;
+ if (dma_handle)
+ dma_addr += count;
/* Wait for interrupt. */
- false_alarm:
- ret = parport_wait_event (port, HZ);
- if (ret < 0) break;
+false_alarm:
+ ret = parport_wait_event(port, HZ);
+ if (ret < 0)
+ break;
ret = 0;
- if (!time_before (jiffies, expire)) {
+ if (!time_before(jiffies, expire)) {
/* Timed out. */
- printk (KERN_DEBUG "DMA write timed out\n");
+ printk(KERN_DEBUG "DMA write timed out\n");
break;
}
/* Is serviceIntr set? */
- if (!(inb (ECONTROL (port)) & (1<<2))) {
+ if (!(inb(ECONTROL(port)) & (1<<2))) {
cond_resched();
goto false_alarm;
@@ -705,14 +713,15 @@ dump_parport_state ("enter fifo_write_block_dma", port);
/* Anyone else waiting for the port? */
if (port->waithead) {
- printk (KERN_DEBUG "Somebody wants the port\n");
+ printk(KERN_DEBUG "Somebody wants the port\n");
break;
}
/* update for possible DMA residue ! */
buf -= count;
left += count;
- if (dma_handle) dma_addr -= count;
+ if (dma_handle)
+ dma_addr -= count;
}
/* Maybe got here through break, so adjust for DMA residue! */
@@ -723,12 +732,12 @@ dump_parport_state ("enter fifo_write_block_dma", port);
release_dma_lock(dmaflag);
/* Turn off DMA mode */
- frob_econtrol (port, 1<<3, 0);
+ frob_econtrol(port, 1<<3, 0);
if (dma_handle)
dma_unmap_single(dev, dma_handle, length, DMA_TO_DEVICE);
-dump_parport_state ("leave fifo_write_block_dma", port);
+ dump_parport_state("leave fifo_write_block_dma", port);
return length - left;
}
#endif
@@ -738,13 +747,13 @@ static inline size_t parport_pc_fifo_write_block(struct parport *port,
{
#ifdef HAS_DMA
if (port->dma != PARPORT_DMA_NONE)
- return parport_pc_fifo_write_block_dma (port, buf, length);
+ return parport_pc_fifo_write_block_dma(port, buf, length);
#endif
- return parport_pc_fifo_write_block_pio (port, buf, length);
+ return parport_pc_fifo_write_block_pio(port, buf, length);
}
/* Parallel Port FIFO mode (ECP chipsets) */
-static size_t parport_pc_compat_write_block_pio (struct parport *port,
+static size_t parport_pc_compat_write_block_pio(struct parport *port,
const void *buf, size_t length,
int flags)
{
@@ -756,14 +765,16 @@ static size_t parport_pc_compat_write_block_pio (struct parport *port,
/* Special case: a timeout of zero means we cannot call schedule().
* Also if O_NONBLOCK is set then use the default implementation. */
if (port->physport->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK)
- return parport_ieee1284_write_compat (port, buf,
+ return parport_ieee1284_write_compat(port, buf,
length, flags);
/* Set up parallel port FIFO mode.*/
- parport_pc_data_forward (port); /* Must be in PS2 mode */
- parport_pc_frob_control (port, PARPORT_CONTROL_STROBE, 0);
- r = change_mode (port, ECR_PPF); /* Parallel port FIFO */
- if (r) printk (KERN_DEBUG "%s: Warning change_mode ECR_PPF failed\n", port->name);
+ parport_pc_data_forward(port); /* Must be in PS2 mode */
+ parport_pc_frob_control(port, PARPORT_CONTROL_STROBE, 0);
+ r = change_mode(port, ECR_PPF); /* Parallel port FIFO */
+ if (r)
+ printk(KERN_DEBUG "%s: Warning change_mode ECR_PPF failed\n",
+ port->name);
port->physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
@@ -775,40 +786,39 @@ static size_t parport_pc_compat_write_block_pio (struct parport *port,
* the FIFO is empty, so allow 4 seconds for each position
* in the fifo.
*/
- expire = jiffies + (priv->fifo_depth * HZ * 4);
+ expire = jiffies + (priv->fifo_depth * HZ * 4);
do {
/* Wait for the FIFO to empty */
- r = change_mode (port, ECR_PS2);
- if (r != -EBUSY) {
+ r = change_mode(port, ECR_PS2);
+ if (r != -EBUSY)
break;
- }
- } while (time_before (jiffies, expire));
+ } while (time_before(jiffies, expire));
if (r == -EBUSY) {
- printk (KERN_DEBUG "%s: FIFO is stuck\n", port->name);
+ printk(KERN_DEBUG "%s: FIFO is stuck\n", port->name);
/* Prevent further data transfer. */
- frob_set_mode (port, ECR_TST);
+ frob_set_mode(port, ECR_TST);
/* Adjust for the contents of the FIFO. */
for (written -= priv->fifo_depth; ; written++) {
- if (inb (ECONTROL (port)) & 0x2) {
+ if (inb(ECONTROL(port)) & 0x2) {
/* Full up. */
break;
}
- outb (0, FIFO (port));
+ outb(0, FIFO(port));
}
/* Reset the FIFO and return to PS2 mode. */
- frob_set_mode (port, ECR_PS2);
+ frob_set_mode(port, ECR_PS2);
}
- r = parport_wait_peripheral (port,
+ r = parport_wait_peripheral(port,
PARPORT_STATUS_BUSY,
PARPORT_STATUS_BUSY);
if (r)
- printk (KERN_DEBUG
- "%s: BUSY timeout (%d) in compat_write_block_pio\n",
+ printk(KERN_DEBUG
+ "%s: BUSY timeout (%d) in compat_write_block_pio\n",
port->name, r);
port->physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
@@ -818,7 +828,7 @@ static size_t parport_pc_compat_write_block_pio (struct parport *port,
/* ECP */
#ifdef CONFIG_PARPORT_1284
-static size_t parport_pc_ecp_write_block_pio (struct parport *port,
+static size_t parport_pc_ecp_write_block_pio(struct parport *port,
const void *buf, size_t length,
int flags)
{
@@ -830,36 +840,38 @@ static size_t parport_pc_ecp_write_block_pio (struct parport *port,
/* Special case: a timeout of zero means we cannot call schedule().
* Also if O_NONBLOCK is set then use the default implementation. */
if (port->physport->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK)
- return parport_ieee1284_ecp_write_data (port, buf,
+ return parport_ieee1284_ecp_write_data(port, buf,
length, flags);
/* Switch to forward mode if necessary. */
if (port->physport->ieee1284.phase != IEEE1284_PH_FWD_IDLE) {
/* Event 47: Set nInit high. */
- parport_frob_control (port,
+ parport_frob_control(port,
PARPORT_CONTROL_INIT
| PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_INIT
| PARPORT_CONTROL_AUTOFD);
/* Event 49: PError goes high. */
- r = parport_wait_peripheral (port,
+ r = parport_wait_peripheral(port,
PARPORT_STATUS_PAPEROUT,
PARPORT_STATUS_PAPEROUT);
if (r) {
- printk (KERN_DEBUG "%s: PError timeout (%d) "
+ printk(KERN_DEBUG "%s: PError timeout (%d) "
"in ecp_write_block_pio\n", port->name, r);
}
}
/* Set up ECP parallel port mode.*/
- parport_pc_data_forward (port); /* Must be in PS2 mode */
- parport_pc_frob_control (port,
+ parport_pc_data_forward(port); /* Must be in PS2 mode */
+ parport_pc_frob_control(port,
PARPORT_CONTROL_STROBE |
PARPORT_CONTROL_AUTOFD,
0);
- r = change_mode (port, ECR_ECP); /* ECP FIFO */
- if (r) printk (KERN_DEBUG "%s: Warning change_mode ECR_ECP failed\n", port->name);
+ r = change_mode(port, ECR_ECP); /* ECP FIFO */
+ if (r)
+ printk(KERN_DEBUG "%s: Warning change_mode ECR_ECP failed\n",
+ port->name);
port->physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
/* Write the data to the FIFO. */
@@ -873,55 +885,54 @@ static size_t parport_pc_ecp_write_block_pio (struct parport *port,
expire = jiffies + (priv->fifo_depth * (HZ * 4));
do {
/* Wait for the FIFO to empty */
- r = change_mode (port, ECR_PS2);
- if (r != -EBUSY) {
+ r = change_mode(port, ECR_PS2);
+ if (r != -EBUSY)
break;
- }
- } while (time_before (jiffies, expire));
+ } while (time_before(jiffies, expire));
if (r == -EBUSY) {
- printk (KERN_DEBUG "%s: FIFO is stuck\n", port->name);
+ printk(KERN_DEBUG "%s: FIFO is stuck\n", port->name);
/* Prevent further data transfer. */
- frob_set_mode (port, ECR_TST);
+ frob_set_mode(port, ECR_TST);
/* Adjust for the contents of the FIFO. */
for (written -= priv->fifo_depth; ; written++) {
- if (inb (ECONTROL (port)) & 0x2) {
+ if (inb(ECONTROL(port)) & 0x2) {
/* Full up. */
break;
}
- outb (0, FIFO (port));
+ outb(0, FIFO(port));
}
/* Reset the FIFO and return to PS2 mode. */
- frob_set_mode (port, ECR_PS2);
+ frob_set_mode(port, ECR_PS2);
/* Host transfer recovery. */
- parport_pc_data_reverse (port); /* Must be in PS2 mode */
- udelay (5);
- parport_frob_control (port, PARPORT_CONTROL_INIT, 0);
- r = parport_wait_peripheral (port, PARPORT_STATUS_PAPEROUT, 0);
+ parport_pc_data_reverse(port); /* Must be in PS2 mode */
+ udelay(5);
+ parport_frob_control(port, PARPORT_CONTROL_INIT, 0);
+ r = parport_wait_peripheral(port, PARPORT_STATUS_PAPEROUT, 0);
if (r)
- printk (KERN_DEBUG "%s: PE,1 timeout (%d) "
+ printk(KERN_DEBUG "%s: PE,1 timeout (%d) "
"in ecp_write_block_pio\n", port->name, r);
- parport_frob_control (port,
+ parport_frob_control(port,
PARPORT_CONTROL_INIT,
PARPORT_CONTROL_INIT);
- r = parport_wait_peripheral (port,
+ r = parport_wait_peripheral(port,
PARPORT_STATUS_PAPEROUT,
PARPORT_STATUS_PAPEROUT);
- if (r)
- printk (KERN_DEBUG "%s: PE,2 timeout (%d) "
+ if (r)
+ printk(KERN_DEBUG "%s: PE,2 timeout (%d) "
"in ecp_write_block_pio\n", port->name, r);
}
- r = parport_wait_peripheral (port,
- PARPORT_STATUS_BUSY,
+ r = parport_wait_peripheral(port,
+ PARPORT_STATUS_BUSY,
PARPORT_STATUS_BUSY);
- if(r)
- printk (KERN_DEBUG
+ if (r)
+ printk(KERN_DEBUG
"%s: BUSY timeout (%d) in ecp_write_block_pio\n",
port->name, r);
@@ -931,7 +942,7 @@ static size_t parport_pc_ecp_write_block_pio (struct parport *port,
}
#if 0
-static size_t parport_pc_ecp_read_block_pio (struct parport *port,
+static size_t parport_pc_ecp_read_block_pio(struct parport *port,
void *buf, size_t length,
int flags)
{
@@ -944,13 +955,13 @@ static size_t parport_pc_ecp_read_block_pio (struct parport *port,
char *bufp = buf;
port = port->physport;
-DPRINTK (KERN_DEBUG "parport_pc: parport_pc_ecp_read_block_pio\n");
-dump_parport_state ("enter fcn", port);
+ DPRINTK(KERN_DEBUG "parport_pc: parport_pc_ecp_read_block_pio\n");
+ dump_parport_state("enter fcn", port);
/* Special case: a timeout of zero means we cannot call schedule().
* Also if O_NONBLOCK is set then use the default implementation. */
if (port->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK)
- return parport_ieee1284_ecp_read_data (port, buf,
+ return parport_ieee1284_ecp_read_data(port, buf,
length, flags);
if (port->ieee1284.mode == IEEE1284_MODE_ECPRLE) {
@@ -966,173 +977,178 @@ dump_parport_state ("enter fcn", port);
* go through software emulation. Otherwise we may have to throw
* away data. */
if (length < fifofull)
- return parport_ieee1284_ecp_read_data (port, buf,
+ return parport_ieee1284_ecp_read_data(port, buf,
length, flags);
if (port->ieee1284.phase != IEEE1284_PH_REV_IDLE) {
/* change to reverse-idle phase (must be in forward-idle) */
/* Event 38: Set nAutoFd low (also make sure nStrobe is high) */
- parport_frob_control (port,
+ parport_frob_control(port,
PARPORT_CONTROL_AUTOFD
| PARPORT_CONTROL_STROBE,
PARPORT_CONTROL_AUTOFD);
- parport_pc_data_reverse (port); /* Must be in PS2 mode */
- udelay (5);
+ parport_pc_data_reverse(port); /* Must be in PS2 mode */
+ udelay(5);
/* Event 39: Set nInit low to initiate bus reversal */
- parport_frob_control (port,
+ parport_frob_control(port,
PARPORT_CONTROL_INIT,
0);
/* Event 40: Wait for nAckReverse (PError) to go low */
- r = parport_wait_peripheral (port, PARPORT_STATUS_PAPEROUT, 0);
- if (r) {
- printk (KERN_DEBUG "%s: PE timeout Event 40 (%d) "
+ r = parport_wait_peripheral(port, PARPORT_STATUS_PAPEROUT, 0);
+ if (r) {
+ printk(KERN_DEBUG "%s: PE timeout Event 40 (%d) "
"in ecp_read_block_pio\n", port->name, r);
return 0;
}
}
/* Set up ECP FIFO mode.*/
-/* parport_pc_frob_control (port,
+/* parport_pc_frob_control(port,
PARPORT_CONTROL_STROBE |
PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_AUTOFD); */
- r = change_mode (port, ECR_ECP); /* ECP FIFO */
- if (r) printk (KERN_DEBUG "%s: Warning change_mode ECR_ECP failed\n", port->name);
+ r = change_mode(port, ECR_ECP); /* ECP FIFO */
+ if (r)
+ printk(KERN_DEBUG "%s: Warning change_mode ECR_ECP failed\n",
+ port->name);
port->ieee1284.phase = IEEE1284_PH_REV_DATA;
/* the first byte must be collected manually */
-dump_parport_state ("pre 43", port);
+ dump_parport_state("pre 43", port);
/* Event 43: Wait for nAck to go low */
- r = parport_wait_peripheral (port, PARPORT_STATUS_ACK, 0);
+ r = parport_wait_peripheral(port, PARPORT_STATUS_ACK, 0);
if (r) {
/* timed out while reading -- no data */
- printk (KERN_DEBUG "PIO read timed out (initial byte)\n");
+ printk(KERN_DEBUG "PIO read timed out (initial byte)\n");
goto out_no_data;
}
/* read byte */
- *bufp++ = inb (DATA (port));
+ *bufp++ = inb(DATA(port));
left--;
-dump_parport_state ("43-44", port);
+ dump_parport_state("43-44", port);
/* Event 44: nAutoFd (HostAck) goes high to acknowledge */
- parport_pc_frob_control (port,
+ parport_pc_frob_control(port,
PARPORT_CONTROL_AUTOFD,
0);
-dump_parport_state ("pre 45", port);
+ dump_parport_state("pre 45", port);
/* Event 45: Wait for nAck to go high */
-/* r = parport_wait_peripheral (port, PARPORT_STATUS_ACK, PARPORT_STATUS_ACK); */
-dump_parport_state ("post 45", port);
-r = 0;
+ /* r = parport_wait_peripheral(port, PARPORT_STATUS_ACK,
+ PARPORT_STATUS_ACK); */
+ dump_parport_state("post 45", port);
+ r = 0;
if (r) {
/* timed out while waiting for peripheral to respond to ack */
- printk (KERN_DEBUG "ECP PIO read timed out (waiting for nAck)\n");
+ printk(KERN_DEBUG "ECP PIO read timed out (waiting for nAck)\n");
/* keep hold of the byte we've got already */
goto out_no_data;
}
/* Event 46: nAutoFd (HostAck) goes low to accept more data */
- parport_pc_frob_control (port,
+ parport_pc_frob_control(port,
PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_AUTOFD);
-dump_parport_state ("rev idle", port);
+ dump_parport_state("rev idle", port);
/* Do the transfer. */
while (left > fifofull) {
int ret;
unsigned long expire = jiffies + port->cad->timeout;
- unsigned char ecrval = inb (ECONTROL (port));
+ unsigned char ecrval = inb(ECONTROL(port));
- if (need_resched() && time_before (jiffies, expire))
+ if (need_resched() && time_before(jiffies, expire))
/* Can't yield the port. */
- schedule ();
+ schedule();
/* At this point, the FIFO may already be full. In
- * that case ECP is already holding back the
- * peripheral (assuming proper design) with a delayed
- * handshake. Work fast to avoid a peripheral
- * timeout. */
+ * that case ECP is already holding back the
+ * peripheral (assuming proper design) with a delayed
+ * handshake. Work fast to avoid a peripheral
+ * timeout. */
if (ecrval & 0x01) {
/* FIFO is empty. Wait for interrupt. */
-dump_parport_state ("FIFO empty", port);
+ dump_parport_state("FIFO empty", port);
/* Anyone else waiting for the port? */
if (port->waithead) {
- printk (KERN_DEBUG "Somebody wants the port\n");
+ printk(KERN_DEBUG "Somebody wants the port\n");
break;
}
/* Clear serviceIntr */
- ECR_WRITE (port, ecrval & ~(1<<2));
- false_alarm:
-dump_parport_state ("waiting", port);
- ret = parport_wait_event (port, HZ);
-DPRINTK (KERN_DEBUG "parport_wait_event returned %d\n", ret);
+ ECR_WRITE(port, ecrval & ~(1<<2));
+false_alarm:
+ dump_parport_state("waiting", port);
+ ret = parport_wait_event(port, HZ);
+ DPRINTK(KERN_DEBUG "parport_wait_event returned %d\n",
+ ret);
if (ret < 0)
break;
ret = 0;
- if (!time_before (jiffies, expire)) {
+ if (!time_before(jiffies, expire)) {
/* Timed out. */
-dump_parport_state ("timeout", port);
- printk (KERN_DEBUG "PIO read timed out\n");
+ dump_parport_state("timeout", port);
+ printk(KERN_DEBUG "PIO read timed out\n");
break;
}
- ecrval = inb (ECONTROL (port));
+ ecrval = inb(ECONTROL(port));
if (!(ecrval & (1<<2))) {
if (need_resched() &&
- time_before (jiffies, expire)) {
- schedule ();
+ time_before(jiffies, expire)) {
+ schedule();
}
goto false_alarm;
}
/* Depending on how the FIFO threshold was
- * set, how long interrupt service took, and
- * how fast the peripheral is, we might be
- * lucky and have a just filled FIFO. */
+ * set, how long interrupt service took, and
+ * how fast the peripheral is, we might be
+ * lucky and have a just filled FIFO. */
continue;
}
if (ecrval & 0x02) {
/* FIFO is full. */
-dump_parport_state ("FIFO full", port);
- insb (fifo, bufp, fifo_depth);
+ dump_parport_state("FIFO full", port);
+ insb(fifo, bufp, fifo_depth);
bufp += fifo_depth;
left -= fifo_depth;
continue;
}
-DPRINTK (KERN_DEBUG "*** ecp_read_block_pio: reading one byte from the FIFO\n");
+ DPRINTK(KERN_DEBUG
+ "*** ecp_read_block_pio: reading one byte from the FIFO\n");
/* FIFO not filled. We will cycle this loop for a while
- * and either the peripheral will fill it faster,
- * tripping a fast empty with insb, or we empty it. */
- *bufp++ = inb (fifo);
+ * and either the peripheral will fill it faster,
+ * tripping a fast empty with insb, or we empty it. */
+ *bufp++ = inb(fifo);
left--;
}
/* scoop up anything left in the FIFO */
- while (left && !(inb (ECONTROL (port) & 0x01))) {
- *bufp++ = inb (fifo);
+ while (left && !(inb(ECONTROL(port) & 0x01))) {
+ *bufp++ = inb(fifo);
left--;
}
port->ieee1284.phase = IEEE1284_PH_REV_IDLE;
-dump_parport_state ("rev idle2", port);
+ dump_parport_state("rev idle2", port);
out_no_data:
/* Go to forward idle mode to shut the peripheral up (event 47). */
- parport_frob_control (port, PARPORT_CONTROL_INIT, PARPORT_CONTROL_INIT);
+ parport_frob_control(port, PARPORT_CONTROL_INIT, PARPORT_CONTROL_INIT);
/* event 49: PError goes high */
- r = parport_wait_peripheral (port,
+ r = parport_wait_peripheral(port,
PARPORT_STATUS_PAPEROUT,
PARPORT_STATUS_PAPEROUT);
if (r) {
- printk (KERN_DEBUG
+ printk(KERN_DEBUG
"%s: PE timeout FWDIDLE (%d) in ecp_read_block_pio\n",
port->name, r);
}
@@ -1141,14 +1157,14 @@ out_no_data:
/* Finish up. */
{
- int lost = get_fifo_residue (port);
+ int lost = get_fifo_residue(port);
if (lost)
/* Shouldn't happen with compliant peripherals. */
- printk (KERN_DEBUG "%s: DATA LOSS (%d bytes)!\n",
+ printk(KERN_DEBUG "%s: DATA LOSS (%d bytes)!\n",
port->name, lost);
}
-dump_parport_state ("fwd idle", port);
+ dump_parport_state("fwd idle", port);
return length - left;
}
#endif /* 0 */
@@ -1164,8 +1180,7 @@ dump_parport_state ("fwd idle", port);
/* GCC is not inlining extern inline function later overwriten to non-inline,
so we use outlined_ variants here. */
-static const struct parport_operations parport_pc_ops =
-{
+static const struct parport_operations parport_pc_ops = {
.write_data = parport_pc_write_data,
.read_data = parport_pc_read_data,
@@ -1202,88 +1217,107 @@ static const struct parport_operations parport_pc_ops =
};
#ifdef CONFIG_PARPORT_PC_SUPERIO
+
+static struct superio_struct *find_free_superio(void)
+{
+ int i;
+ for (i = 0; i < NR_SUPERIOS; i++)
+ if (superios[i].io == 0)
+ return &superios[i];
+ return NULL;
+}
+
+
/* Super-IO chipset detection, Winbond, SMSC */
static void __devinit show_parconfig_smsc37c669(int io, int key)
{
- int cr1,cr4,cra,cr23,cr26,cr27,i=0;
- static const char *const modes[]={
+ int cr1, cr4, cra, cr23, cr26, cr27;
+ struct superio_struct *s;
+
+ static const char *const modes[] = {
"SPP and Bidirectional (PS/2)",
"EPP and SPP",
"ECP",
"ECP and EPP" };
- outb(key,io);
- outb(key,io);
- outb(1,io);
- cr1=inb(io+1);
- outb(4,io);
- cr4=inb(io+1);
- outb(0x0a,io);
- cra=inb(io+1);
- outb(0x23,io);
- cr23=inb(io+1);
- outb(0x26,io);
- cr26=inb(io+1);
- outb(0x27,io);
- cr27=inb(io+1);
- outb(0xaa,io);
+ outb(key, io);
+ outb(key, io);
+ outb(1, io);
+ cr1 = inb(io + 1);
+ outb(4, io);
+ cr4 = inb(io + 1);
+ outb(0x0a, io);
+ cra = inb(io + 1);
+ outb(0x23, io);
+ cr23 = inb(io + 1);
+ outb(0x26, io);
+ cr26 = inb(io + 1);
+ outb(0x27, io);
+ cr27 = inb(io + 1);
+ outb(0xaa, io);
if (verbose_probing) {
- printk (KERN_INFO "SMSC 37c669 LPT Config: cr_1=0x%02x, 4=0x%02x, "
+ printk(KERN_INFO
+ "SMSC 37c669 LPT Config: cr_1=0x%02x, 4=0x%02x, "
"A=0x%2x, 23=0x%02x, 26=0x%02x, 27=0x%02x\n",
- cr1,cr4,cra,cr23,cr26,cr27);
-
+ cr1, cr4, cra, cr23, cr26, cr27);
+
/* The documentation calls DMA and IRQ-Lines by letters, so
the board maker can/will wire them
appropriately/randomly... G=reserved H=IDE-irq, */
- printk (KERN_INFO "SMSC LPT Config: io=0x%04x, irq=%c, dma=%c, "
- "fifo threshold=%d\n", cr23*4,
- (cr27 &0x0f) ? 'A'-1+(cr27 &0x0f): '-',
- (cr26 &0x0f) ? 'A'-1+(cr26 &0x0f): '-', cra & 0x0f);
+ printk(KERN_INFO
+ "SMSC LPT Config: io=0x%04x, irq=%c, dma=%c, fifo threshold=%d\n",
+ cr23 * 4,
+ (cr27 & 0x0f) ? 'A' - 1 + (cr27 & 0x0f) : '-',
+ (cr26 & 0x0f) ? 'A' - 1 + (cr26 & 0x0f) : '-',
+ cra & 0x0f);
printk(KERN_INFO "SMSC LPT Config: enabled=%s power=%s\n",
- (cr23*4 >=0x100) ?"yes":"no", (cr1 & 4) ? "yes" : "no");
- printk(KERN_INFO "SMSC LPT Config: Port mode=%s, EPP version =%s\n",
- (cr1 & 0x08 ) ? "Standard mode only (SPP)" : modes[cr4 & 0x03],
- (cr4 & 0x40) ? "1.7" : "1.9");
+ (cr23 * 4 >= 0x100) ? "yes" : "no",
+ (cr1 & 4) ? "yes" : "no");
+ printk(KERN_INFO
+ "SMSC LPT Config: Port mode=%s, EPP version =%s\n",
+ (cr1 & 0x08) ? "Standard mode only (SPP)"
+ : modes[cr4 & 0x03],
+ (cr4 & 0x40) ? "1.7" : "1.9");
}
-
+
/* Heuristics ! BIOS setup for this mainboard device limits
the choices to standard settings, i.e. io-address and IRQ
are related, however DMA can be 1 or 3, assume DMA_A=DMA1,
DMA_C=DMA3 (this is true e.g. for TYAN 1564D Tomcat IV) */
- if(cr23*4 >=0x100) { /* if active */
- while((superios[i].io!= 0) && (i<NR_SUPERIOS))
- i++;
- if(i==NR_SUPERIOS)
+ if (cr23 * 4 >= 0x100) { /* if active */
+ s = find_free_superio();
+ if (s == NULL)
printk(KERN_INFO "Super-IO: too many chips!\n");
else {
int d;
- switch (cr23*4) {
- case 0x3bc:
- superios[i].io = 0x3bc;
- superios[i].irq = 7;
- break;
- case 0x378:
- superios[i].io = 0x378;
- superios[i].irq = 7;
- break;
- case 0x278:
- superios[i].io = 0x278;
- superios[i].irq = 5;
+ switch (cr23 * 4) {
+ case 0x3bc:
+ s->io = 0x3bc;
+ s->irq = 7;
+ break;
+ case 0x378:
+ s->io = 0x378;
+ s->irq = 7;
+ break;
+ case 0x278:
+ s->io = 0x278;
+ s->irq = 5;
}
- d=(cr26 &0x0f);
- if((d==1) || (d==3))
- superios[i].dma= d;
+ d = (cr26 & 0x0f);
+ if (d == 1 || d == 3)
+ s->dma = d;
else
- superios[i].dma= PARPORT_DMA_NONE;
+ s->dma = PARPORT_DMA_NONE;
}
- }
+ }
}
static void __devinit show_parconfig_winbond(int io, int key)
{
- int cr30,cr60,cr61,cr70,cr74,crf0,i=0;
+ int cr30, cr60, cr61, cr70, cr74, crf0;
+ struct superio_struct *s;
static const char *const modes[] = {
"Standard (SPP) and Bidirectional(PS/2)", /* 0 */
"EPP-1.9 and SPP",
@@ -1296,110 +1330,134 @@ static void __devinit show_parconfig_winbond(int io, int key)
static char *const irqtypes[] = {
"pulsed low, high-Z",
"follows nACK" };
-
+
/* The registers are called compatible-PnP because the
- register layout is modelled after ISA-PnP, the access
- method is just another ... */
- outb(key,io);
- outb(key,io);
- outb(0x07,io); /* Register 7: Select Logical Device */
- outb(0x01,io+1); /* LD1 is Parallel Port */
- outb(0x30,io);
- cr30=inb(io+1);
- outb(0x60,io);
- cr60=inb(io+1);
- outb(0x61,io);
- cr61=inb(io+1);
- outb(0x70,io);
- cr70=inb(io+1);
- outb(0x74,io);
- cr74=inb(io+1);
- outb(0xf0,io);
- crf0=inb(io+1);
- outb(0xaa,io);
+ register layout is modelled after ISA-PnP, the access
+ method is just another ... */
+ outb(key, io);
+ outb(key, io);
+ outb(0x07, io); /* Register 7: Select Logical Device */
+ outb(0x01, io + 1); /* LD1 is Parallel Port */
+ outb(0x30, io);
+ cr30 = inb(io + 1);
+ outb(0x60, io);
+ cr60 = inb(io + 1);
+ outb(0x61, io);
+ cr61 = inb(io + 1);
+ outb(0x70, io);
+ cr70 = inb(io + 1);
+ outb(0x74, io);
+ cr74 = inb(io + 1);
+ outb(0xf0, io);
+ crf0 = inb(io + 1);
+ outb(0xaa, io);
if (verbose_probing) {
- printk(KERN_INFO "Winbond LPT Config: cr_30=%02x 60,61=%02x%02x "
- "70=%02x 74=%02x, f0=%02x\n", cr30,cr60,cr61,cr70,cr74,crf0);
- printk(KERN_INFO "Winbond LPT Config: active=%s, io=0x%02x%02x irq=%d, ",
- (cr30 & 0x01) ? "yes":"no", cr60,cr61,cr70&0x0f );
+ printk(KERN_INFO
+ "Winbond LPT Config: cr_30=%02x 60,61=%02x%02x 70=%02x 74=%02x, f0=%02x\n",
+ cr30, cr60, cr61, cr70, cr74, crf0);
+ printk(KERN_INFO "Winbond LPT Config: active=%s, io=0x%02x%02x irq=%d, ",
+ (cr30 & 0x01) ? "yes" : "no", cr60, cr61, cr70 & 0x0f);
if ((cr74 & 0x07) > 3)
printk("dma=none\n");
else
- printk("dma=%d\n",cr74 & 0x07);
- printk(KERN_INFO "Winbond LPT Config: irqtype=%s, ECP fifo threshold=%d\n",
- irqtypes[crf0>>7], (crf0>>3)&0x0f);
- printk(KERN_INFO "Winbond LPT Config: Port mode=%s\n", modes[crf0 & 0x07]);
+ printk("dma=%d\n", cr74 & 0x07);
+ printk(KERN_INFO
+ "Winbond LPT Config: irqtype=%s, ECP fifo threshold=%d\n",
+ irqtypes[crf0>>7], (crf0>>3)&0x0f);
+ printk(KERN_INFO "Winbond LPT Config: Port mode=%s\n",
+ modes[crf0 & 0x07]);
}
- if(cr30 & 0x01) { /* the settings can be interrogated later ... */
- while((superios[i].io!= 0) && (i<NR_SUPERIOS))
- i++;
- if(i==NR_SUPERIOS)
+ if (cr30 & 0x01) { /* the settings can be interrogated later ... */
+ s = find_free_superio();
+ if (s == NULL)
printk(KERN_INFO "Super-IO: too many chips!\n");
else {
- superios[i].io = (cr60<<8)|cr61;
- superios[i].irq = cr70&0x0f;
- superios[i].dma = (((cr74 & 0x07) > 3) ?
+ s->io = (cr60 << 8) | cr61;
+ s->irq = cr70 & 0x0f;
+ s->dma = (((cr74 & 0x07) > 3) ?
PARPORT_DMA_NONE : (cr74 & 0x07));
}
}
}
-static void __devinit decode_winbond(int efer, int key, int devid, int devrev, int oldid)
+static void __devinit decode_winbond(int efer, int key, int devid,
+ int devrev, int oldid)
{
const char *type = "unknown";
- int id,progif=2;
+ int id, progif = 2;
if (devid == devrev)
/* simple heuristics, we happened to read some
- non-winbond register */
+ non-winbond register */
return;
- id=(devid<<8) | devrev;
+ id = (devid << 8) | devrev;
/* Values are from public data sheets pdf files, I can just
- confirm 83977TF is correct :-) */
- if (id == 0x9771) type="83977F/AF";
- else if (id == 0x9773) type="83977TF / SMSC 97w33x/97w34x";
- else if (id == 0x9774) type="83977ATF";
- else if ((id & ~0x0f) == 0x5270) type="83977CTF / SMSC 97w36x";
- else if ((id & ~0x0f) == 0x52f0) type="83977EF / SMSC 97w35x";
- else if ((id & ~0x0f) == 0x5210) type="83627";
- else if ((id & ~0x0f) == 0x6010) type="83697HF";
- else if ((oldid &0x0f ) == 0x0a) { type="83877F"; progif=1;}
- else if ((oldid &0x0f ) == 0x0b) { type="83877AF"; progif=1;}
- else if ((oldid &0x0f ) == 0x0c) { type="83877TF"; progif=1;}
- else if ((oldid &0x0f ) == 0x0d) { type="83877ATF"; progif=1;}
- else progif=0;
+ confirm 83977TF is correct :-) */
+ if (id == 0x9771)
+ type = "83977F/AF";
+ else if (id == 0x9773)
+ type = "83977TF / SMSC 97w33x/97w34x";
+ else if (id == 0x9774)
+ type = "83977ATF";
+ else if ((id & ~0x0f) == 0x5270)
+ type = "83977CTF / SMSC 97w36x";
+ else if ((id & ~0x0f) == 0x52f0)
+ type = "83977EF / SMSC 97w35x";
+ else if ((id & ~0x0f) == 0x5210)
+ type = "83627";
+ else if ((id & ~0x0f) == 0x6010)
+ type = "83697HF";
+ else if ((oldid & 0x0f) == 0x0a) {
+ type = "83877F";
+ progif = 1;
+ } else if ((oldid & 0x0f) == 0x0b) {
+ type = "83877AF";
+ progif = 1;
+ } else if ((oldid & 0x0f) == 0x0c) {
+ type = "83877TF";
+ progif = 1;
+ } else if ((oldid & 0x0f) == 0x0d) {
+ type = "83877ATF";
+ progif = 1;
+ } else
+ progif = 0;
if (verbose_probing)
printk(KERN_INFO "Winbond chip at EFER=0x%x key=0x%02x "
- "devid=%02x devrev=%02x oldid=%02x type=%s\n",
+ "devid=%02x devrev=%02x oldid=%02x type=%s\n",
efer, key, devid, devrev, oldid, type);
if (progif == 2)
- show_parconfig_winbond(efer,key);
+ show_parconfig_winbond(efer, key);
}
static void __devinit decode_smsc(int efer, int key, int devid, int devrev)
{
- const char *type = "unknown";
+ const char *type = "unknown";
void (*func)(int io, int key);
- int id;
+ int id;
- if (devid == devrev)
+ if (devid == devrev)
/* simple heuristics, we happened to read some
- non-smsc register */
+ non-smsc register */
return;
- func=NULL;
- id=(devid<<8) | devrev;
+ func = NULL;
+ id = (devid << 8) | devrev;
- if (id==0x0302) {type="37c669"; func=show_parconfig_smsc37c669;}
- else if (id==0x6582) type="37c665IR";
- else if (devid==0x65) type="37c665GT";
- else if (devid==0x66) type="37c666GT";
+ if (id == 0x0302) {
+ type = "37c669";
+ func = show_parconfig_smsc37c669;
+ } else if (id == 0x6582)
+ type = "37c665IR";
+ else if (devid == 0x65)
+ type = "37c665GT";
+ else if (devid == 0x66)
+ type = "37c666GT";
if (verbose_probing)
printk(KERN_INFO "SMSC chip at EFER=0x%x "
@@ -1407,138 +1465,138 @@ static void __devinit decode_smsc(int efer, int key, int devid, int devrev)
efer, key, devid, devrev, type);
if (func)
- func(efer,key);
+ func(efer, key);
}
static void __devinit winbond_check(int io, int key)
{
- int devid,devrev,oldid,x_devid,x_devrev,x_oldid;
+ int devid, devrev, oldid, x_devid, x_devrev, x_oldid;
if (!request_region(io, 3, __func__))
return;
/* First probe without key */
- outb(0x20,io);
- x_devid=inb(io+1);
- outb(0x21,io);
- x_devrev=inb(io+1);
- outb(0x09,io);
- x_oldid=inb(io+1);
-
- outb(key,io);
- outb(key,io); /* Write Magic Sequence to EFER, extended
- funtion enable register */
- outb(0x20,io); /* Write EFIR, extended function index register */
- devid=inb(io+1); /* Read EFDR, extended function data register */
- outb(0x21,io);
- devrev=inb(io+1);
- outb(0x09,io);
- oldid=inb(io+1);
- outb(0xaa,io); /* Magic Seal */
+ outb(0x20, io);
+ x_devid = inb(io + 1);
+ outb(0x21, io);
+ x_devrev = inb(io + 1);
+ outb(0x09, io);
+ x_oldid = inb(io + 1);
+
+ outb(key, io);
+ outb(key, io); /* Write Magic Sequence to EFER, extended
+ funtion enable register */
+ outb(0x20, io); /* Write EFIR, extended function index register */
+ devid = inb(io + 1); /* Read EFDR, extended function data register */
+ outb(0x21, io);
+ devrev = inb(io + 1);
+ outb(0x09, io);
+ oldid = inb(io + 1);
+ outb(0xaa, io); /* Magic Seal */
if ((x_devid == devid) && (x_devrev == devrev) && (x_oldid == oldid))
goto out; /* protection against false positives */
- decode_winbond(io,key,devid,devrev,oldid);
+ decode_winbond(io, key, devid, devrev, oldid);
out:
release_region(io, 3);
}
-static void __devinit winbond_check2(int io,int key)
+static void __devinit winbond_check2(int io, int key)
{
- int devid,devrev,oldid,x_devid,x_devrev,x_oldid;
+ int devid, devrev, oldid, x_devid, x_devrev, x_oldid;
if (!request_region(io, 3, __func__))
return;
/* First probe without the key */
- outb(0x20,io+2);
- x_devid=inb(io+2);
- outb(0x21,io+1);
- x_devrev=inb(io+2);
- outb(0x09,io+1);
- x_oldid=inb(io+2);
-
- outb(key,io); /* Write Magic Byte to EFER, extended
- funtion enable register */
- outb(0x20,io+2); /* Write EFIR, extended function index register */
- devid=inb(io+2); /* Read EFDR, extended function data register */
- outb(0x21,io+1);
- devrev=inb(io+2);
- outb(0x09,io+1);
- oldid=inb(io+2);
- outb(0xaa,io); /* Magic Seal */
-
- if ((x_devid == devid) && (x_devrev == devrev) && (x_oldid == oldid))
+ outb(0x20, io + 2);
+ x_devid = inb(io + 2);
+ outb(0x21, io + 1);
+ x_devrev = inb(io + 2);
+ outb(0x09, io + 1);
+ x_oldid = inb(io + 2);
+
+ outb(key, io); /* Write Magic Byte to EFER, extended
+ funtion enable register */
+ outb(0x20, io + 2); /* Write EFIR, extended function index register */
+ devid = inb(io + 2); /* Read EFDR, extended function data register */
+ outb(0x21, io + 1);
+ devrev = inb(io + 2);
+ outb(0x09, io + 1);
+ oldid = inb(io + 2);
+ outb(0xaa, io); /* Magic Seal */
+
+ if (x_devid == devid && x_devrev == devrev && x_oldid == oldid)
goto out; /* protection against false positives */
- decode_winbond(io,key,devid,devrev,oldid);
+ decode_winbond(io, key, devid, devrev, oldid);
out:
release_region(io, 3);
}
static void __devinit smsc_check(int io, int key)
{
- int id,rev,oldid,oldrev,x_id,x_rev,x_oldid,x_oldrev;
+ int id, rev, oldid, oldrev, x_id, x_rev, x_oldid, x_oldrev;
if (!request_region(io, 3, __func__))
return;
/* First probe without the key */
- outb(0x0d,io);
- x_oldid=inb(io+1);
- outb(0x0e,io);
- x_oldrev=inb(io+1);
- outb(0x20,io);
- x_id=inb(io+1);
- outb(0x21,io);
- x_rev=inb(io+1);
-
- outb(key,io);
- outb(key,io); /* Write Magic Sequence to EFER, extended
- funtion enable register */
- outb(0x0d,io); /* Write EFIR, extended function index register */
- oldid=inb(io+1); /* Read EFDR, extended function data register */
- outb(0x0e,io);
- oldrev=inb(io+1);
- outb(0x20,io);
- id=inb(io+1);
- outb(0x21,io);
- rev=inb(io+1);
- outb(0xaa,io); /* Magic Seal */
-
- if ((x_id == id) && (x_oldrev == oldrev) &&
- (x_oldid == oldid) && (x_rev == rev))
+ outb(0x0d, io);
+ x_oldid = inb(io + 1);
+ outb(0x0e, io);
+ x_oldrev = inb(io + 1);
+ outb(0x20, io);
+ x_id = inb(io + 1);
+ outb(0x21, io);
+ x_rev = inb(io + 1);
+
+ outb(key, io);
+ outb(key, io); /* Write Magic Sequence to EFER, extended
+ funtion enable register */
+ outb(0x0d, io); /* Write EFIR, extended function index register */
+ oldid = inb(io + 1); /* Read EFDR, extended function data register */
+ outb(0x0e, io);
+ oldrev = inb(io + 1);
+ outb(0x20, io);
+ id = inb(io + 1);
+ outb(0x21, io);
+ rev = inb(io + 1);
+ outb(0xaa, io); /* Magic Seal */
+
+ if (x_id == id && x_oldrev == oldrev &&
+ x_oldid == oldid && x_rev == rev)
goto out; /* protection against false positives */
- decode_smsc(io,key,oldid,oldrev);
+ decode_smsc(io, key, oldid, oldrev);
out:
release_region(io, 3);
}
-static void __devinit detect_and_report_winbond (void)
-{
+static void __devinit detect_and_report_winbond(void)
+{
if (verbose_probing)
printk(KERN_DEBUG "Winbond Super-IO detection, now testing ports 3F0,370,250,4E,2E ...\n");
- winbond_check(0x3f0,0x87);
- winbond_check(0x370,0x87);
- winbond_check(0x2e ,0x87);
- winbond_check(0x4e ,0x87);
- winbond_check(0x3f0,0x86);
- winbond_check2(0x250,0x88);
- winbond_check2(0x250,0x89);
+ winbond_check(0x3f0, 0x87);
+ winbond_check(0x370, 0x87);
+ winbond_check(0x2e , 0x87);
+ winbond_check(0x4e , 0x87);
+ winbond_check(0x3f0, 0x86);
+ winbond_check2(0x250, 0x88);
+ winbond_check2(0x250, 0x89);
}
-static void __devinit detect_and_report_smsc (void)
+static void __devinit detect_and_report_smsc(void)
{
if (verbose_probing)
printk(KERN_DEBUG "SMSC Super-IO detection, now testing Ports 2F0, 370 ...\n");
- smsc_check(0x3f0,0x55);
- smsc_check(0x370,0x55);
- smsc_check(0x3f0,0x44);
- smsc_check(0x370,0x44);
+ smsc_check(0x3f0, 0x55);
+ smsc_check(0x370, 0x55);
+ smsc_check(0x3f0, 0x44);
+ smsc_check(0x370, 0x44);
}
static void __devinit detect_and_report_it87(void)
@@ -1573,34 +1631,39 @@ static void __devinit detect_and_report_it87(void)
}
#endif /* CONFIG_PARPORT_PC_SUPERIO */
-static int get_superio_dma (struct parport *p)
+static struct superio_struct *find_superio(struct parport *p)
{
- int i=0;
- while( (superios[i].io != p->base) && (i<NR_SUPERIOS))
- i++;
- if (i!=NR_SUPERIOS)
- return superios[i].dma;
+ int i;
+ for (i = 0; i < NR_SUPERIOS; i++)
+ if (superios[i].io != p->base)
+ return &superios[i];
+ return NULL;
+}
+
+static int get_superio_dma(struct parport *p)
+{
+ struct superio_struct *s = find_superio(p);
+ if (s)
+ return s->dma;
return PARPORT_DMA_NONE;
}
-static int get_superio_irq (struct parport *p)
+static int get_superio_irq(struct parport *p)
{
- int i=0;
- while( (superios[i].io != p->base) && (i<NR_SUPERIOS))
- i++;
- if (i!=NR_SUPERIOS)
- return superios[i].irq;
- return PARPORT_IRQ_NONE;
+ struct superio_struct *s = find_superio(p);
+ if (s)
+ return s->irq;
+ return PARPORT_IRQ_NONE;
}
-
+
/* --- Mode detection ------------------------------------- */
/*
* Checks for port existence, all ports support SPP MODE
- * Returns:
+ * Returns:
* 0 : No parallel port at this address
- * PARPORT_MODE_PCSPP : SPP port detected
+ * PARPORT_MODE_PCSPP : SPP port detected
* (if the user specified an ioport himself,
* this shall always be the case!)
*
@@ -1610,7 +1673,7 @@ static int parport_SPP_supported(struct parport *pb)
unsigned char r, w;
/*
- * first clear an eventually pending EPP timeout
+ * first clear an eventually pending EPP timeout
* I (sailer@ife.ee.ethz.ch) have an SMSC chipset
* that does not even respond to SPP cycles if an EPP
* timeout is pending
@@ -1619,19 +1682,19 @@ static int parport_SPP_supported(struct parport *pb)
/* Do a simple read-write test to make sure the port exists. */
w = 0xc;
- outb (w, CONTROL (pb));
+ outb(w, CONTROL(pb));
/* Is there a control register that we can read from? Some
* ports don't allow reads, so read_control just returns a
* software copy. Some ports _do_ allow reads, so bypass the
* software copy here. In addition, some bits aren't
* writable. */
- r = inb (CONTROL (pb));
+ r = inb(CONTROL(pb));
if ((r & 0xf) == w) {
w = 0xe;
- outb (w, CONTROL (pb));
- r = inb (CONTROL (pb));
- outb (0xc, CONTROL (pb));
+ outb(w, CONTROL(pb));
+ r = inb(CONTROL(pb));
+ outb(0xc, CONTROL(pb));
if ((r & 0xf) == w)
return PARPORT_MODE_PCSPP;
}
@@ -1639,18 +1702,18 @@ static int parport_SPP_supported(struct parport *pb)
if (user_specified)
/* That didn't work, but the user thinks there's a
* port here. */
- printk (KERN_INFO "parport 0x%lx (WARNING): CTR: "
+ printk(KERN_INFO "parport 0x%lx (WARNING): CTR: "
"wrote 0x%02x, read 0x%02x\n", pb->base, w, r);
/* Try the data register. The data lines aren't tri-stated at
* this stage, so we expect back what we wrote. */
w = 0xaa;
- parport_pc_write_data (pb, w);
- r = parport_pc_read_data (pb);
+ parport_pc_write_data(pb, w);
+ r = parport_pc_read_data(pb);
if (r == w) {
w = 0x55;
- parport_pc_write_data (pb, w);
- r = parport_pc_read_data (pb);
+ parport_pc_write_data(pb, w);
+ r = parport_pc_read_data(pb);
if (r == w)
return PARPORT_MODE_PCSPP;
}
@@ -1658,9 +1721,9 @@ static int parport_SPP_supported(struct parport *pb)
if (user_specified) {
/* Didn't work, but the user is convinced this is the
* place. */
- printk (KERN_INFO "parport 0x%lx (WARNING): DATA: "
+ printk(KERN_INFO "parport 0x%lx (WARNING): DATA: "
"wrote 0x%02x, read 0x%02x\n", pb->base, w, r);
- printk (KERN_INFO "parport 0x%lx: You gave this address, "
+ printk(KERN_INFO "parport 0x%lx: You gave this address, "
"but there is probably no parallel port there!\n",
pb->base);
}
@@ -1691,33 +1754,33 @@ static int parport_ECR_present(struct parport *pb)
struct parport_pc_private *priv = pb->private_data;
unsigned char r = 0xc;
- outb (r, CONTROL (pb));
- if ((inb (ECONTROL (pb)) & 0x3) == (r & 0x3)) {
- outb (r ^ 0x2, CONTROL (pb)); /* Toggle bit 1 */
+ outb(r, CONTROL(pb));
+ if ((inb(ECONTROL(pb)) & 0x3) == (r & 0x3)) {
+ outb(r ^ 0x2, CONTROL(pb)); /* Toggle bit 1 */
- r = inb (CONTROL (pb));
- if ((inb (ECONTROL (pb)) & 0x2) == (r & 0x2))
+ r = inb(CONTROL(pb));
+ if ((inb(ECONTROL(pb)) & 0x2) == (r & 0x2))
goto no_reg; /* Sure that no ECR register exists */
}
-
- if ((inb (ECONTROL (pb)) & 0x3 ) != 0x1)
+
+ if ((inb(ECONTROL(pb)) & 0x3) != 0x1)
goto no_reg;
- ECR_WRITE (pb, 0x34);
- if (inb (ECONTROL (pb)) != 0x35)
+ ECR_WRITE(pb, 0x34);
+ if (inb(ECONTROL(pb)) != 0x35)
goto no_reg;
priv->ecr = 1;
- outb (0xc, CONTROL (pb));
-
+ outb(0xc, CONTROL(pb));
+
/* Go to mode 000 */
- frob_set_mode (pb, ECR_SPP);
+ frob_set_mode(pb, ECR_SPP);
return 1;
no_reg:
- outb (0xc, CONTROL (pb));
- return 0;
+ outb(0xc, CONTROL(pb));
+ return 0;
}
#ifdef CONFIG_PARPORT_1284
@@ -1727,7 +1790,7 @@ static int parport_ECR_present(struct parport *pb)
* allows us to read data from the data lines. In theory we would get back
* 0xff but any peripheral attached to the port may drag some or all of the
* lines down to zero. So if we get back anything that isn't the contents
- * of the data register we deem PS/2 support to be present.
+ * of the data register we deem PS/2 support to be present.
*
* Some SPP ports have "half PS/2" ability - you can't turn off the line
* drivers, but an external peripheral with sufficiently beefy drivers of
@@ -1735,26 +1798,28 @@ static int parport_ECR_present(struct parport *pb)
* where they can then be read back as normal. Ports with this property
* and the right type of device attached are likely to fail the SPP test,
* (as they will appear to have stuck bits) and so the fact that they might
- * be misdetected here is rather academic.
+ * be misdetected here is rather academic.
*/
static int parport_PS2_supported(struct parport *pb)
{
int ok = 0;
-
+
clear_epp_timeout(pb);
/* try to tri-state the buffer */
- parport_pc_data_reverse (pb);
-
+ parport_pc_data_reverse(pb);
+
parport_pc_write_data(pb, 0x55);
- if (parport_pc_read_data(pb) != 0x55) ok++;
+ if (parport_pc_read_data(pb) != 0x55)
+ ok++;
parport_pc_write_data(pb, 0xaa);
- if (parport_pc_read_data(pb) != 0xaa) ok++;
+ if (parport_pc_read_data(pb) != 0xaa)
+ ok++;
/* cancel input mode */
- parport_pc_data_forward (pb);
+ parport_pc_data_forward(pb);
if (ok) {
pb->modes |= PARPORT_MODE_TRISTATE;
@@ -1773,68 +1838,68 @@ static int parport_ECP_supported(struct parport *pb)
int config, configb;
int pword;
struct parport_pc_private *priv = pb->private_data;
- /* Translate ECP intrLine to ISA irq value */
- static const int intrline[]= { 0, 7, 9, 10, 11, 14, 15, 5 };
+ /* Translate ECP intrLine to ISA irq value */
+ static const int intrline[] = { 0, 7, 9, 10, 11, 14, 15, 5 };
/* If there is no ECR, we have no hope of supporting ECP. */
if (!priv->ecr)
return 0;
/* Find out FIFO depth */
- ECR_WRITE (pb, ECR_SPP << 5); /* Reset FIFO */
- ECR_WRITE (pb, ECR_TST << 5); /* TEST FIFO */
- for (i=0; i < 1024 && !(inb (ECONTROL (pb)) & 0x02); i++)
- outb (0xaa, FIFO (pb));
+ ECR_WRITE(pb, ECR_SPP << 5); /* Reset FIFO */
+ ECR_WRITE(pb, ECR_TST << 5); /* TEST FIFO */
+ for (i = 0; i < 1024 && !(inb(ECONTROL(pb)) & 0x02); i++)
+ outb(0xaa, FIFO(pb));
/*
* Using LGS chipset it uses ECR register, but
* it doesn't support ECP or FIFO MODE
*/
if (i == 1024) {
- ECR_WRITE (pb, ECR_SPP << 5);
+ ECR_WRITE(pb, ECR_SPP << 5);
return 0;
}
priv->fifo_depth = i;
if (verbose_probing)
- printk (KERN_DEBUG "0x%lx: FIFO is %d bytes\n", pb->base, i);
+ printk(KERN_DEBUG "0x%lx: FIFO is %d bytes\n", pb->base, i);
/* Find out writeIntrThreshold */
- frob_econtrol (pb, 1<<2, 1<<2);
- frob_econtrol (pb, 1<<2, 0);
+ frob_econtrol(pb, 1<<2, 1<<2);
+ frob_econtrol(pb, 1<<2, 0);
for (i = 1; i <= priv->fifo_depth; i++) {
- inb (FIFO (pb));
- udelay (50);
- if (inb (ECONTROL (pb)) & (1<<2))
+ inb(FIFO(pb));
+ udelay(50);
+ if (inb(ECONTROL(pb)) & (1<<2))
break;
}
if (i <= priv->fifo_depth) {
if (verbose_probing)
- printk (KERN_DEBUG "0x%lx: writeIntrThreshold is %d\n",
+ printk(KERN_DEBUG "0x%lx: writeIntrThreshold is %d\n",
pb->base, i);
} else
/* Number of bytes we know we can write if we get an
- interrupt. */
+ interrupt. */
i = 0;
priv->writeIntrThreshold = i;
/* Find out readIntrThreshold */
- frob_set_mode (pb, ECR_PS2); /* Reset FIFO and enable PS2 */
- parport_pc_data_reverse (pb); /* Must be in PS2 mode */
- frob_set_mode (pb, ECR_TST); /* Test FIFO */
- frob_econtrol (pb, 1<<2, 1<<2);
- frob_econtrol (pb, 1<<2, 0);
+ frob_set_mode(pb, ECR_PS2); /* Reset FIFO and enable PS2 */
+ parport_pc_data_reverse(pb); /* Must be in PS2 mode */
+ frob_set_mode(pb, ECR_TST); /* Test FIFO */
+ frob_econtrol(pb, 1<<2, 1<<2);
+ frob_econtrol(pb, 1<<2, 0);
for (i = 1; i <= priv->fifo_depth; i++) {
- outb (0xaa, FIFO (pb));
- if (inb (ECONTROL (pb)) & (1<<2))
+ outb(0xaa, FIFO(pb));
+ if (inb(ECONTROL(pb)) & (1<<2))
break;
}
if (i <= priv->fifo_depth) {
if (verbose_probing)
- printk (KERN_INFO "0x%lx: readIntrThreshold is %d\n",
+ printk(KERN_INFO "0x%lx: readIntrThreshold is %d\n",
pb->base, i);
} else
/* Number of bytes we can read if we get an interrupt. */
@@ -1842,23 +1907,23 @@ static int parport_ECP_supported(struct parport *pb)
priv->readIntrThreshold = i;
- ECR_WRITE (pb, ECR_SPP << 5); /* Reset FIFO */
- ECR_WRITE (pb, 0xf4); /* Configuration mode */
- config = inb (CONFIGA (pb));
+ ECR_WRITE(pb, ECR_SPP << 5); /* Reset FIFO */
+ ECR_WRITE(pb, 0xf4); /* Configuration mode */
+ config = inb(CONFIGA(pb));
pword = (config >> 4) & 0x7;
switch (pword) {
case 0:
pword = 2;
- printk (KERN_WARNING "0x%lx: Unsupported pword size!\n",
+ printk(KERN_WARNING "0x%lx: Unsupported pword size!\n",
pb->base);
break;
case 2:
pword = 4;
- printk (KERN_WARNING "0x%lx: Unsupported pword size!\n",
+ printk(KERN_WARNING "0x%lx: Unsupported pword size!\n",
pb->base);
break;
default:
- printk (KERN_WARNING "0x%lx: Unknown implementation ID\n",
+ printk(KERN_WARNING "0x%lx: Unknown implementation ID\n",
pb->base);
/* Assume 1 */
case 1:
@@ -1867,28 +1932,29 @@ static int parport_ECP_supported(struct parport *pb)
priv->pword = pword;
if (verbose_probing) {
- printk (KERN_DEBUG "0x%lx: PWord is %d bits\n", pb->base, 8 * pword);
-
- printk (KERN_DEBUG "0x%lx: Interrupts are ISA-%s\n", pb->base,
+ printk(KERN_DEBUG "0x%lx: PWord is %d bits\n",
+ pb->base, 8 * pword);
+
+ printk(KERN_DEBUG "0x%lx: Interrupts are ISA-%s\n", pb->base,
config & 0x80 ? "Level" : "Pulses");
- configb = inb (CONFIGB (pb));
- printk (KERN_DEBUG "0x%lx: ECP port cfgA=0x%02x cfgB=0x%02x\n",
+ configb = inb(CONFIGB(pb));
+ printk(KERN_DEBUG "0x%lx: ECP port cfgA=0x%02x cfgB=0x%02x\n",
pb->base, config, configb);
- printk (KERN_DEBUG "0x%lx: ECP settings irq=", pb->base);
- if ((configb >>3) & 0x07)
- printk("%d",intrline[(configb >>3) & 0x07]);
+ printk(KERN_DEBUG "0x%lx: ECP settings irq=", pb->base);
+ if ((configb >> 3) & 0x07)
+ printk("%d", intrline[(configb >> 3) & 0x07]);
else
printk("<none or set by other means>");
- printk (" dma=");
- if( (configb & 0x03 ) == 0x00)
+ printk(" dma=");
+ if ((configb & 0x03) == 0x00)
printk("<none or set by other means>\n");
else
- printk("%d\n",configb & 0x07);
+ printk("%d\n", configb & 0x07);
}
/* Go back to mode 000 */
- frob_set_mode (pb, ECR_SPP);
+ frob_set_mode(pb, ECR_SPP);
return 1;
}
@@ -1903,10 +1969,10 @@ static int parport_ECPPS2_supported(struct parport *pb)
if (!priv->ecr)
return 0;
- oecr = inb (ECONTROL (pb));
- ECR_WRITE (pb, ECR_PS2 << 5);
+ oecr = inb(ECONTROL(pb));
+ ECR_WRITE(pb, ECR_PS2 << 5);
result = parport_PS2_supported(pb);
- ECR_WRITE (pb, oecr);
+ ECR_WRITE(pb, oecr);
return result;
}
@@ -1930,16 +1996,15 @@ static int parport_EPP_supported(struct parport *pb)
*/
/* If EPP timeout bit clear then EPP available */
- if (!clear_epp_timeout(pb)) {
+ if (!clear_epp_timeout(pb))
return 0; /* No way to clear timeout */
- }
/* Check for Intel bug. */
if (priv->ecr) {
unsigned char i;
for (i = 0x00; i < 0x80; i += 0x20) {
- ECR_WRITE (pb, i);
- if (clear_epp_timeout (pb)) {
+ ECR_WRITE(pb, i);
+ if (clear_epp_timeout(pb)) {
/* Phony EPP in ECP. */
return 0;
}
@@ -1963,17 +2028,16 @@ static int parport_ECPEPP_supported(struct parport *pb)
int result;
unsigned char oecr;
- if (!priv->ecr) {
+ if (!priv->ecr)
return 0;
- }
- oecr = inb (ECONTROL (pb));
+ oecr = inb(ECONTROL(pb));
/* Search for SMC style EPP+ECP mode */
- ECR_WRITE (pb, 0x80);
- outb (0x04, CONTROL (pb));
+ ECR_WRITE(pb, 0x80);
+ outb(0x04, CONTROL(pb));
result = parport_EPP_supported(pb);
- ECR_WRITE (pb, oecr);
+ ECR_WRITE(pb, oecr);
if (result) {
/* Set up access functions to use ECP+EPP hardware. */
@@ -1991,11 +2055,25 @@ static int parport_ECPEPP_supported(struct parport *pb)
/* Don't bother probing for modes we know we won't use. */
static int __devinit parport_PS2_supported(struct parport *pb) { return 0; }
#ifdef CONFIG_PARPORT_PC_FIFO
-static int parport_ECP_supported(struct parport *pb) { return 0; }
+static int parport_ECP_supported(struct parport *pb)
+{
+ return 0;
+}
#endif
-static int __devinit parport_EPP_supported(struct parport *pb) { return 0; }
-static int __devinit parport_ECPEPP_supported(struct parport *pb){return 0;}
-static int __devinit parport_ECPPS2_supported(struct parport *pb){return 0;}
+static int __devinit parport_EPP_supported(struct parport *pb)
+{
+ return 0;
+}
+
+static int __devinit parport_ECPEPP_supported(struct parport *pb)
+{
+ return 0;
+}
+
+static int __devinit parport_ECPPS2_supported(struct parport *pb)
+{
+ return 0;
+}
#endif /* No IEEE 1284 support */
@@ -2005,17 +2083,17 @@ static int __devinit parport_ECPPS2_supported(struct parport *pb){return 0;}
static int programmable_irq_support(struct parport *pb)
{
int irq, intrLine;
- unsigned char oecr = inb (ECONTROL (pb));
+ unsigned char oecr = inb(ECONTROL(pb));
static const int lookup[8] = {
PARPORT_IRQ_NONE, 7, 9, 10, 11, 14, 15, 5
};
- ECR_WRITE (pb, ECR_CNF << 5); /* Configuration MODE */
+ ECR_WRITE(pb, ECR_CNF << 5); /* Configuration MODE */
- intrLine = (inb (CONFIGB (pb)) >> 3) & 0x07;
+ intrLine = (inb(CONFIGB(pb)) >> 3) & 0x07;
irq = lookup[intrLine];
- ECR_WRITE (pb, oecr);
+ ECR_WRITE(pb, oecr);
return irq;
}
@@ -2025,17 +2103,17 @@ static int irq_probe_ECP(struct parport *pb)
unsigned long irqs;
irqs = probe_irq_on();
-
- ECR_WRITE (pb, ECR_SPP << 5); /* Reset FIFO */
- ECR_WRITE (pb, (ECR_TST << 5) | 0x04);
- ECR_WRITE (pb, ECR_TST << 5);
+
+ ECR_WRITE(pb, ECR_SPP << 5); /* Reset FIFO */
+ ECR_WRITE(pb, (ECR_TST << 5) | 0x04);
+ ECR_WRITE(pb, ECR_TST << 5);
/* If Full FIFO sure that writeIntrThreshold is generated */
- for (i=0; i < 1024 && !(inb (ECONTROL (pb)) & 0x02) ; i++)
- outb (0xaa, FIFO (pb));
-
+ for (i = 0; i < 1024 && !(inb(ECONTROL(pb)) & 0x02) ; i++)
+ outb(0xaa, FIFO(pb));
+
pb->irq = probe_irq_off(irqs);
- ECR_WRITE (pb, ECR_SPP << 5);
+ ECR_WRITE(pb, ECR_SPP << 5);
if (pb->irq <= 0)
pb->irq = PARPORT_IRQ_NONE;
@@ -2045,7 +2123,7 @@ static int irq_probe_ECP(struct parport *pb)
/*
* This detection seems that only works in National Semiconductors
- * This doesn't work in SMC, LGS, and Winbond
+ * This doesn't work in SMC, LGS, and Winbond
*/
static int irq_probe_EPP(struct parport *pb)
{
@@ -2056,16 +2134,16 @@ static int irq_probe_EPP(struct parport *pb)
unsigned char oecr;
if (pb->modes & PARPORT_MODE_PCECR)
- oecr = inb (ECONTROL (pb));
+ oecr = inb(ECONTROL(pb));
irqs = probe_irq_on();
if (pb->modes & PARPORT_MODE_PCECR)
- frob_econtrol (pb, 0x10, 0x10);
-
+ frob_econtrol(pb, 0x10, 0x10);
+
clear_epp_timeout(pb);
- parport_pc_frob_control (pb, 0x20, 0x20);
- parport_pc_frob_control (pb, 0x10, 0x10);
+ parport_pc_frob_control(pb, 0x20, 0x20);
+ parport_pc_frob_control(pb, 0x10, 0x10);
clear_epp_timeout(pb);
/* Device isn't expecting an EPP read
@@ -2074,9 +2152,9 @@ static int irq_probe_EPP(struct parport *pb)
parport_pc_read_epp(pb);
udelay(20);
- pb->irq = probe_irq_off (irqs);
+ pb->irq = probe_irq_off(irqs);
if (pb->modes & PARPORT_MODE_PCECR)
- ECR_WRITE (pb, oecr);
+ ECR_WRITE(pb, oecr);
parport_pc_write_control(pb, 0xc);
if (pb->irq <= 0)
@@ -2133,28 +2211,28 @@ static int parport_irq_probe(struct parport *pb)
/* --- DMA detection -------------------------------------- */
/* Only if chipset conforms to ECP ISA Interface Standard */
-static int programmable_dma_support (struct parport *p)
+static int programmable_dma_support(struct parport *p)
{
- unsigned char oecr = inb (ECONTROL (p));
+ unsigned char oecr = inb(ECONTROL(p));
int dma;
- frob_set_mode (p, ECR_CNF);
-
- dma = inb (CONFIGB(p)) & 0x07;
+ frob_set_mode(p, ECR_CNF);
+
+ dma = inb(CONFIGB(p)) & 0x07;
/* 000: Indicates jumpered 8-bit DMA if read-only.
100: Indicates jumpered 16-bit DMA if read-only. */
if ((dma & 0x03) == 0)
dma = PARPORT_DMA_NONE;
- ECR_WRITE (p, oecr);
+ ECR_WRITE(p, oecr);
return dma;
}
-static int parport_dma_probe (struct parport *p)
+static int parport_dma_probe(struct parport *p)
{
const struct parport_pc_private *priv = p->private_data;
- if (priv->ecr)
- p->dma = programmable_dma_support(p); /* ask ECP chipset first */
+ if (priv->ecr) /* ask ECP chipset first */
+ p->dma = programmable_dma_support(p);
if (p->dma == PARPORT_DMA_NONE) {
/* ask known Super-IO chips proper, although these
claim ECP compatible, some don't report their DMA
@@ -2212,7 +2290,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
if (!base_res)
goto out4;
- memcpy(ops, &parport_pc_ops, sizeof (struct parport_operations));
+ memcpy(ops, &parport_pc_ops, sizeof(struct parport_operations));
priv->ctr = 0xc;
priv->ctr_writable = ~0x10;
priv->ecr = 0;
@@ -2239,7 +2317,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
if (!parport_EPP_supported(p))
parport_ECPEPP_supported(p);
}
- if (!parport_SPP_supported (p))
+ if (!parport_SPP_supported(p))
/* No port. */
goto out5;
if (priv->ecr)
@@ -2247,7 +2325,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
else
parport_PS2_supported(p);
- p->size = (p->modes & PARPORT_MODE_EPP)?8:3;
+ p->size = (p->modes & PARPORT_MODE_EPP) ? 8 : 3;
printk(KERN_INFO "%s: PC-style at 0x%lx", p->name, p->base);
if (p->base_hi && priv->ecr)
@@ -2271,7 +2349,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
}
}
if (p->dma == PARPORT_DMA_AUTO) /* To use DMA, giving the irq
- is mandatory (see above) */
+ is mandatory (see above) */
p->dma = PARPORT_DMA_NONE;
#ifdef CONFIG_PARPORT_PC_FIFO
@@ -2288,16 +2366,23 @@ struct parport *parport_pc_probe_port(unsigned long int base,
if (p->dma != PARPORT_DMA_NONE) {
printk(", dma %d", p->dma);
p->modes |= PARPORT_MODE_DMA;
- }
- else printk(", using FIFO");
- }
- else
+ } else
+ printk(", using FIFO");
+ } else
/* We can't use the DMA channel after all. */
p->dma = PARPORT_DMA_NONE;
#endif /* Allowed to use FIFO/DMA */
printk(" [");
-#define printmode(x) {if(p->modes&PARPORT_MODE_##x){printk("%s%s",f?",":"",#x);f++;}}
+
+#define printmode(x) \
+ {\
+ if (p->modes & PARPORT_MODE_##x) {\
+ printk("%s%s", f ? "," : "", #x);\
+ f++;\
+ } \
+ }
+
{
int f = 0;
printmode(PCSPP);
@@ -2309,10 +2394,10 @@ struct parport *parport_pc_probe_port(unsigned long int base,
}
#undef printmode
#ifndef CONFIG_PARPORT_1284
- printk ("(,...)");
+ printk("(,...)");
#endif /* CONFIG_PARPORT_1284 */
printk("]\n");
- if (probedirq != PARPORT_IRQ_NONE)
+ if (probedirq != PARPORT_IRQ_NONE)
printk(KERN_INFO "%s: irq %d detected\n", p->name, probedirq);
/* If No ECP release the ports grabbed above. */
@@ -2328,7 +2413,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
if (p->irq != PARPORT_IRQ_NONE) {
if (request_irq(p->irq, parport_irq_handler,
irqflags, p->name, p)) {
- printk (KERN_WARNING "%s: irq %d in use, "
+ printk(KERN_WARNING "%s: irq %d in use, "
"resorting to polled operation\n",
p->name, p->irq);
p->irq = PARPORT_IRQ_NONE;
@@ -2338,8 +2423,8 @@ struct parport *parport_pc_probe_port(unsigned long int base,
#ifdef CONFIG_PARPORT_PC_FIFO
#ifdef HAS_DMA
if (p->dma != PARPORT_DMA_NONE) {
- if (request_dma (p->dma, p->name)) {
- printk (KERN_WARNING "%s: dma %d in use, "
+ if (request_dma(p->dma, p->name)) {
+ printk(KERN_WARNING "%s: dma %d in use, "
"resorting to PIO operation\n",
p->name, p->dma);
p->dma = PARPORT_DMA_NONE;
@@ -2349,8 +2434,8 @@ struct parport *parport_pc_probe_port(unsigned long int base,
PAGE_SIZE,
&priv->dma_handle,
GFP_KERNEL);
- if (! priv->dma_buf) {
- printk (KERN_WARNING "%s: "
+ if (!priv->dma_buf) {
+ printk(KERN_WARNING "%s: "
"cannot get buffer for DMA, "
"resorting to PIO operation\n",
p->name);
@@ -2369,10 +2454,10 @@ struct parport *parport_pc_probe_port(unsigned long int base,
* Put the ECP detected port in PS2 mode.
* Do this also for ports that have ECR but don't do ECP.
*/
- ECR_WRITE (p, 0x34);
+ ECR_WRITE(p, 0x34);
parport_pc_write_data(p, 0);
- parport_pc_data_forward (p);
+ parport_pc_data_forward(p);
/* Now that we've told the sharing engine about the port, and
found out its characteristics, let the high-level drivers
@@ -2380,7 +2465,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
spin_lock(&ports_lock);
list_add(&priv->list, &ports_list);
spin_unlock(&ports_lock);
- parport_announce_port (p);
+ parport_announce_port(p);
return p;
@@ -2393,18 +2478,17 @@ out5:
out4:
parport_put_port(p);
out3:
- kfree (priv);
+ kfree(priv);
out2:
- kfree (ops);
+ kfree(ops);
out1:
if (pdev)
platform_device_unregister(pdev);
return NULL;
}
+EXPORT_SYMBOL(parport_pc_probe_port);
-EXPORT_SYMBOL (parport_pc_probe_port);
-
-void parport_pc_unregister_port (struct parport *p)
+void parport_pc_unregister_port(struct parport *p)
{
struct parport_pc_private *priv = p->private_data;
struct parport_operations *ops = p->ops;
@@ -2430,17 +2514,16 @@ void parport_pc_unregister_port (struct parport *p)
priv->dma_buf,
priv->dma_handle);
#endif
- kfree (p->private_data);
+ kfree(p->private_data);
parport_put_port(p);
- kfree (ops); /* hope no-one cached it */
+ kfree(ops); /* hope no-one cached it */
}
-
-EXPORT_SYMBOL (parport_pc_unregister_port);
+EXPORT_SYMBOL(parport_pc_unregister_port);
#ifdef CONFIG_PCI
/* ITE support maintained by Rich Liu <richliu@poorman.org> */
-static int __devinit sio_ite_8872_probe (struct pci_dev *pdev, int autoirq,
+static int __devinit sio_ite_8872_probe(struct pci_dev *pdev, int autoirq,
int autodma,
const struct parport_pc_via_data *via)
{
@@ -2452,73 +2535,74 @@ static int __devinit sio_ite_8872_probe (struct pci_dev *pdev, int autoirq,
int irq;
int i;
- DPRINTK (KERN_DEBUG "sio_ite_8872_probe()\n");
-
- // make sure which one chip
- for(i = 0; i < 5; i++) {
+ DPRINTK(KERN_DEBUG "sio_ite_8872_probe()\n");
+
+ /* make sure which one chip */
+ for (i = 0; i < 5; i++) {
base_res = request_region(inta_addr[i], 32, "it887x");
if (base_res) {
int test;
- pci_write_config_dword (pdev, 0x60,
+ pci_write_config_dword(pdev, 0x60,
0xe5000000 | inta_addr[i]);
- pci_write_config_dword (pdev, 0x78,
+ pci_write_config_dword(pdev, 0x78,
0x00000000 | inta_addr[i]);
- test = inb (inta_addr[i]);
- if (test != 0xff) break;
+ test = inb(inta_addr[i]);
+ if (test != 0xff)
+ break;
release_region(inta_addr[i], 0x8);
}
}
- if(i >= 5) {
- printk (KERN_INFO "parport_pc: cannot find ITE8872 INTA\n");
+ if (i >= 5) {
+ printk(KERN_INFO "parport_pc: cannot find ITE8872 INTA\n");
return 0;
}
- type = inb (inta_addr[i] + 0x18);
+ type = inb(inta_addr[i] + 0x18);
type &= 0x0f;
switch (type) {
case 0x2:
- printk (KERN_INFO "parport_pc: ITE8871 found (1P)\n");
+ printk(KERN_INFO "parport_pc: ITE8871 found (1P)\n");
ite8872set = 0x64200000;
break;
case 0xa:
- printk (KERN_INFO "parport_pc: ITE8875 found (1P)\n");
+ printk(KERN_INFO "parport_pc: ITE8875 found (1P)\n");
ite8872set = 0x64200000;
break;
case 0xe:
- printk (KERN_INFO "parport_pc: ITE8872 found (2S1P)\n");
+ printk(KERN_INFO "parport_pc: ITE8872 found (2S1P)\n");
ite8872set = 0x64e00000;
break;
case 0x6:
- printk (KERN_INFO "parport_pc: ITE8873 found (1S)\n");
+ printk(KERN_INFO "parport_pc: ITE8873 found (1S)\n");
return 0;
case 0x8:
- DPRINTK (KERN_DEBUG "parport_pc: ITE8874 found (2S)\n");
+ DPRINTK(KERN_DEBUG "parport_pc: ITE8874 found (2S)\n");
return 0;
default:
- printk (KERN_INFO "parport_pc: unknown ITE887x\n");
- printk (KERN_INFO "parport_pc: please mail 'lspci -nvv' "
+ printk(KERN_INFO "parport_pc: unknown ITE887x\n");
+ printk(KERN_INFO "parport_pc: please mail 'lspci -nvv' "
"output to Rich.Liu@ite.com.tw\n");
return 0;
}
- pci_read_config_byte (pdev, 0x3c, &ite8872_irq);
- pci_read_config_dword (pdev, 0x1c, &ite8872_lpt);
+ pci_read_config_byte(pdev, 0x3c, &ite8872_irq);
+ pci_read_config_dword(pdev, 0x1c, &ite8872_lpt);
ite8872_lpt &= 0x0000ff00;
- pci_read_config_dword (pdev, 0x20, &ite8872_lpthi);
+ pci_read_config_dword(pdev, 0x20, &ite8872_lpthi);
ite8872_lpthi &= 0x0000ff00;
- pci_write_config_dword (pdev, 0x6c, 0xe3000000 | ite8872_lpt);
- pci_write_config_dword (pdev, 0x70, 0xe3000000 | ite8872_lpthi);
- pci_write_config_dword (pdev, 0x80, (ite8872_lpthi<<16) | ite8872_lpt);
- // SET SPP&EPP , Parallel Port NO DMA , Enable All Function
- // SET Parallel IRQ
- pci_write_config_dword (pdev, 0x9c,
+ pci_write_config_dword(pdev, 0x6c, 0xe3000000 | ite8872_lpt);
+ pci_write_config_dword(pdev, 0x70, 0xe3000000 | ite8872_lpthi);
+ pci_write_config_dword(pdev, 0x80, (ite8872_lpthi<<16) | ite8872_lpt);
+ /* SET SPP&EPP , Parallel Port NO DMA , Enable All Function */
+ /* SET Parallel IRQ */
+ pci_write_config_dword(pdev, 0x9c,
ite8872set | (ite8872_irq * 0x11111));
- DPRINTK (KERN_DEBUG "ITE887x: The IRQ is %d.\n", ite8872_irq);
- DPRINTK (KERN_DEBUG "ITE887x: The PARALLEL I/O port is 0x%x.\n",
+ DPRINTK(KERN_DEBUG "ITE887x: The IRQ is %d.\n", ite8872_irq);
+ DPRINTK(KERN_DEBUG "ITE887x: The PARALLEL I/O port is 0x%x.\n",
ite8872_lpt);
- DPRINTK (KERN_DEBUG "ITE887x: The PARALLEL I/O porthi is 0x%x.\n",
+ DPRINTK(KERN_DEBUG "ITE887x: The PARALLEL I/O porthi is 0x%x.\n",
ite8872_lpthi);
/* Let the user (or defaults) steer us away from interrupts */
@@ -2530,14 +2614,14 @@ static int __devinit sio_ite_8872_probe (struct pci_dev *pdev, int autoirq,
* Release the resource so that parport_pc_probe_port can get it.
*/
release_resource(base_res);
- if (parport_pc_probe_port (ite8872_lpt, ite8872_lpthi,
+ if (parport_pc_probe_port(ite8872_lpt, ite8872_lpthi,
irq, PARPORT_DMA_NONE, &pdev->dev, 0)) {
- printk (KERN_INFO
+ printk(KERN_INFO
"parport_pc: ITE 8872 parallel port: io=0x%X",
- ite8872_lpt);
+ ite8872_lpt);
if (irq != PARPORT_IRQ_NONE)
- printk (", irq=%d", irq);
- printk ("\n");
+ printk(", irq=%d", irq);
+ printk("\n");
return 1;
}
@@ -2546,7 +2630,7 @@ static int __devinit sio_ite_8872_probe (struct pci_dev *pdev, int autoirq,
/* VIA 8231 support by Pavel Fedin <sonic_amiga@rambler.ru>
based on VIA 686a support code by Jeff Garzik <jgarzik@pobox.com> */
-static int __devinitdata parport_init_mode = 0;
+static int __devinitdata parport_init_mode;
/* Data for two known VIA chips */
static struct parport_pc_via_data via_686a_data __devinitdata = {
@@ -2568,7 +2652,7 @@ static struct parport_pc_via_data via_8231_data __devinitdata = {
0xF6
};
-static int __devinit sio_via_probe (struct pci_dev *pdev, int autoirq,
+static int __devinit sio_via_probe(struct pci_dev *pdev, int autoirq,
int autodma,
const struct parport_pc_via_data *via)
{
@@ -2580,38 +2664,38 @@ static int __devinit sio_via_probe (struct pci_dev *pdev, int autoirq,
printk(KERN_DEBUG "parport_pc: VIA 686A/8231 detected\n");
- switch(parport_init_mode)
- {
+ switch (parport_init_mode) {
case 1:
- printk(KERN_DEBUG "parport_pc: setting SPP mode\n");
- siofunc = VIA_FUNCTION_PARPORT_SPP;
- break;
+ printk(KERN_DEBUG "parport_pc: setting SPP mode\n");
+ siofunc = VIA_FUNCTION_PARPORT_SPP;
+ break;
case 2:
- printk(KERN_DEBUG "parport_pc: setting PS/2 mode\n");
- siofunc = VIA_FUNCTION_PARPORT_SPP;
- ppcontrol = VIA_PARPORT_BIDIR;
- break;
+ printk(KERN_DEBUG "parport_pc: setting PS/2 mode\n");
+ siofunc = VIA_FUNCTION_PARPORT_SPP;
+ ppcontrol = VIA_PARPORT_BIDIR;
+ break;
case 3:
- printk(KERN_DEBUG "parport_pc: setting EPP mode\n");
- siofunc = VIA_FUNCTION_PARPORT_EPP;
- ppcontrol = VIA_PARPORT_BIDIR;
- have_epp = 1;
- break;
+ printk(KERN_DEBUG "parport_pc: setting EPP mode\n");
+ siofunc = VIA_FUNCTION_PARPORT_EPP;
+ ppcontrol = VIA_PARPORT_BIDIR;
+ have_epp = 1;
+ break;
case 4:
- printk(KERN_DEBUG "parport_pc: setting ECP mode\n");
- siofunc = VIA_FUNCTION_PARPORT_ECP;
- ppcontrol = VIA_PARPORT_BIDIR;
- break;
+ printk(KERN_DEBUG "parport_pc: setting ECP mode\n");
+ siofunc = VIA_FUNCTION_PARPORT_ECP;
+ ppcontrol = VIA_PARPORT_BIDIR;
+ break;
case 5:
- printk(KERN_DEBUG "parport_pc: setting EPP+ECP mode\n");
- siofunc = VIA_FUNCTION_PARPORT_ECP;
- ppcontrol = VIA_PARPORT_BIDIR|VIA_PARPORT_ECPEPP;
- have_epp = 1;
- break;
- default:
- printk(KERN_DEBUG "parport_pc: probing current configuration\n");
- siofunc = VIA_FUNCTION_PROBE;
- break;
+ printk(KERN_DEBUG "parport_pc: setting EPP+ECP mode\n");
+ siofunc = VIA_FUNCTION_PARPORT_ECP;
+ ppcontrol = VIA_PARPORT_BIDIR|VIA_PARPORT_ECPEPP;
+ have_epp = 1;
+ break;
+ default:
+ printk(KERN_DEBUG
+ "parport_pc: probing current configuration\n");
+ siofunc = VIA_FUNCTION_PROBE;
+ break;
}
/*
* unlock super i/o configuration
@@ -2622,38 +2706,36 @@ static int __devinit sio_via_probe (struct pci_dev *pdev, int autoirq,
/* Bits 1-0: Parallel Port Mode / Enable */
outb(via->viacfg_function, VIA_CONFIG_INDEX);
- tmp = inb (VIA_CONFIG_DATA);
+ tmp = inb(VIA_CONFIG_DATA);
/* Bit 5: EPP+ECP enable; bit 7: PS/2 bidirectional port enable */
outb(via->viacfg_parport_control, VIA_CONFIG_INDEX);
- tmp2 = inb (VIA_CONFIG_DATA);
- if (siofunc == VIA_FUNCTION_PROBE)
- {
- siofunc = tmp & VIA_FUNCTION_PARPORT_DISABLE;
- ppcontrol = tmp2;
+ tmp2 = inb(VIA_CONFIG_DATA);
+ if (siofunc == VIA_FUNCTION_PROBE) {
+ siofunc = tmp & VIA_FUNCTION_PARPORT_DISABLE;
+ ppcontrol = tmp2;
+ } else {
+ tmp &= ~VIA_FUNCTION_PARPORT_DISABLE;
+ tmp |= siofunc;
+ outb(via->viacfg_function, VIA_CONFIG_INDEX);
+ outb(tmp, VIA_CONFIG_DATA);
+ tmp2 &= ~(VIA_PARPORT_BIDIR|VIA_PARPORT_ECPEPP);
+ tmp2 |= ppcontrol;
+ outb(via->viacfg_parport_control, VIA_CONFIG_INDEX);
+ outb(tmp2, VIA_CONFIG_DATA);
}
- else
- {
- tmp &= ~VIA_FUNCTION_PARPORT_DISABLE;
- tmp |= siofunc;
- outb(via->viacfg_function, VIA_CONFIG_INDEX);
- outb(tmp, VIA_CONFIG_DATA);
- tmp2 &= ~(VIA_PARPORT_BIDIR|VIA_PARPORT_ECPEPP);
- tmp2 |= ppcontrol;
- outb(via->viacfg_parport_control, VIA_CONFIG_INDEX);
- outb(tmp2, VIA_CONFIG_DATA);
- }
-
+
/* Parallel Port I/O Base Address, bits 9-2 */
outb(via->viacfg_parport_base, VIA_CONFIG_INDEX);
port1 = inb(VIA_CONFIG_DATA) << 2;
-
- printk (KERN_DEBUG "parport_pc: Current parallel port base: 0x%X\n",port1);
- if ((port1 == 0x3BC) && have_epp)
- {
- outb(via->viacfg_parport_base, VIA_CONFIG_INDEX);
- outb((0x378 >> 2), VIA_CONFIG_DATA);
- printk(KERN_DEBUG "parport_pc: Parallel port base changed to 0x378\n");
- port1 = 0x378;
+
+ printk(KERN_DEBUG "parport_pc: Current parallel port base: 0x%X\n",
+ port1);
+ if (port1 == 0x3BC && have_epp) {
+ outb(via->viacfg_parport_base, VIA_CONFIG_INDEX);
+ outb((0x378 >> 2), VIA_CONFIG_DATA);
+ printk(KERN_DEBUG
+ "parport_pc: Parallel port base changed to 0x378\n");
+ port1 = 0x378;
}
/*
@@ -2667,36 +2749,39 @@ static int __devinit sio_via_probe (struct pci_dev *pdev, int autoirq,
printk(KERN_INFO "parport_pc: VIA parallel port disabled in BIOS\n");
return 0;
}
-
+
/* Bits 7-4: PnP Routing for Parallel Port IRQ */
pci_read_config_byte(pdev, via->via_pci_parport_irq_reg, &tmp);
irq = ((tmp & VIA_IRQCONTROL_PARALLEL) >> 4);
- if (siofunc == VIA_FUNCTION_PARPORT_ECP)
- {
- /* Bits 3-2: PnP Routing for Parallel Port DMA */
- pci_read_config_byte(pdev, via->via_pci_parport_dma_reg, &tmp);
- dma = ((tmp & VIA_DMACONTROL_PARALLEL) >> 2);
- }
- else
- /* if ECP not enabled, DMA is not enabled, assumed bogus 'dma' value */
- dma = PARPORT_DMA_NONE;
+ if (siofunc == VIA_FUNCTION_PARPORT_ECP) {
+ /* Bits 3-2: PnP Routing for Parallel Port DMA */
+ pci_read_config_byte(pdev, via->via_pci_parport_dma_reg, &tmp);
+ dma = ((tmp & VIA_DMACONTROL_PARALLEL) >> 2);
+ } else
+ /* if ECP not enabled, DMA is not enabled, assumed
+ bogus 'dma' value */
+ dma = PARPORT_DMA_NONE;
/* Let the user (or defaults) steer us away from interrupts and DMA */
if (autoirq == PARPORT_IRQ_NONE) {
- irq = PARPORT_IRQ_NONE;
- dma = PARPORT_DMA_NONE;
+ irq = PARPORT_IRQ_NONE;
+ dma = PARPORT_DMA_NONE;
}
if (autodma == PARPORT_DMA_NONE)
- dma = PARPORT_DMA_NONE;
+ dma = PARPORT_DMA_NONE;
switch (port1) {
- case 0x3bc: port2 = 0x7bc; break;
- case 0x378: port2 = 0x778; break;
- case 0x278: port2 = 0x678; break;
+ case 0x3bc:
+ port2 = 0x7bc; break;
+ case 0x378:
+ port2 = 0x778; break;
+ case 0x278:
+ port2 = 0x678; break;
default:
- printk(KERN_INFO "parport_pc: Weird VIA parport base 0x%X, ignoring\n",
- port1);
+ printk(KERN_INFO
+ "parport_pc: Weird VIA parport base 0x%X, ignoring\n",
+ port1);
return 0;
}
@@ -2714,17 +2799,17 @@ static int __devinit sio_via_probe (struct pci_dev *pdev, int autoirq,
}
/* finally, do the probe with values obtained */
- if (parport_pc_probe_port (port1, port2, irq, dma, &pdev->dev, 0)) {
- printk (KERN_INFO
+ if (parport_pc_probe_port(port1, port2, irq, dma, &pdev->dev, 0)) {
+ printk(KERN_INFO
"parport_pc: VIA parallel port: io=0x%X", port1);
if (irq != PARPORT_IRQ_NONE)
- printk (", irq=%d", irq);
+ printk(", irq=%d", irq);
if (dma != PARPORT_DMA_NONE)
- printk (", dma=%d", dma);
- printk ("\n");
+ printk(", dma=%d", dma);
+ printk("\n");
return 1;
}
-
+
printk(KERN_WARNING "parport_pc: Strange, can't probe VIA parallel port: io=0x%X, irq=%d, dma=%d\n",
port1, irq, dma);
return 0;
@@ -2732,8 +2817,8 @@ static int __devinit sio_via_probe (struct pci_dev *pdev, int autoirq,
enum parport_pc_sio_types {
- sio_via_686a = 0, /* Via VT82C686A motherboard Super I/O */
- sio_via_8231, /* Via VT8231 south bridge integrated Super IO */
+ sio_via_686a = 0, /* Via VT82C686A motherboard Super I/O */
+ sio_via_8231, /* Via VT8231 south bridge integrated Super IO */
sio_ite_8872,
last_sio
};
@@ -2804,15 +2889,15 @@ enum parport_pc_pci_cards {
};
-/* each element directly indexed from enum list, above
+/* each element directly indexed from enum list, above
* (but offset by last_sio) */
static struct parport_pc_pci {
int numports;
struct { /* BAR (base address registers) numbers in the config
- space header */
+ space header */
int lo;
- int hi; /* -1 if not there, >6 for offset-method (max
- BAR is 6) */
+ int hi;
+ /* -1 if not there, >6 for offset-method (max BAR is 6) */
} addr[4];
/* If set, this is called immediately after pci_enable_device.
@@ -2857,7 +2942,7 @@ static struct parport_pc_pci {
/* timedia_4018 */ { 2, { { 0, 1 }, { 2, 3 }, } },
/* timedia_9018a */ { 2, { { 0, 1 }, { 2, 3 }, } },
/* SYBA uses fixed offsets in
- a 1K io window */
+ a 1K io window */
/* syba_2p_epp AP138B */ { 2, { { 0, 0x078 }, { 0, 0x178 }, } },
/* syba_1p_ecp W83787 */ { 1, { { 0, 0x078 }, } },
/* titan_010l */ { 1, { { 3, -1 }, } },
@@ -2873,11 +2958,14 @@ static struct parport_pc_pci {
/* oxsemi_pcie_pport */ { 1, { { 0, 1 }, } },
/* aks_0100 */ { 1, { { 0, -1 }, } },
/* mobility_pp */ { 1, { { 0, 1 }, } },
- /* netmos_9705 */ { 1, { { 0, -1 }, } }, /* untested */
- /* netmos_9715 */ { 2, { { 0, 1 }, { 2, 3 },} }, /* untested */
- /* netmos_9755 */ { 2, { { 0, 1 }, { 2, 3 },} }, /* untested */
- /* netmos_9805 */ { 1, { { 0, -1 }, } }, /* untested */
- /* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } }, /* untested */
+
+ /* The netmos entries below are untested */
+ /* netmos_9705 */ { 1, { { 0, -1 }, } },
+ /* netmos_9715 */ { 2, { { 0, 1 }, { 2, 3 },} },
+ /* netmos_9755 */ { 2, { { 0, 1 }, { 2, 3 },} },
+ /* netmos_9805 */ { 1, { { 0, -1 }, } },
+ /* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } },
+
/* quatech_sppxp100 */ { 1, { { 0, 1 }, } },
};
@@ -2906,7 +2994,7 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
{ PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_BOCA_IOPPAR,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, boca_ioppar },
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
- PCI_SUBVENDOR_ID_EXSYS, PCI_SUBDEVICE_ID_EXSYS_4014, 0,0, plx_9050 },
+ PCI_SUBVENDOR_ID_EXSYS, PCI_SUBDEVICE_ID_EXSYS_4014, 0, 0, plx_9050 },
/* PCI_VENDOR_ID_TIMEDIA/SUNIX has many differing cards ...*/
{ 0x1409, 0x7168, 0x1409, 0x4078, 0, 0, timedia_4078a },
{ 0x1409, 0x7168, 0x1409, 0x4079, 0, 0, timedia_4079h },
@@ -2940,7 +3028,8 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
{ 0x9710, 0x9805, 0x1000, 0x0010, 0, 0, titan_1284p1 },
{ 0x9710, 0x9815, 0x1000, 0x0020, 0, 0, titan_1284p2 },
/* PCI_VENDOR_ID_AVLAB/Intek21 has another bunch of cards ...*/
- { 0x14db, 0x2120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1p}, /* AFAVLAB_TK9902 */
+ /* AFAVLAB_TK9902 */
+ { 0x14db, 0x2120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1p},
{ 0x14db, 0x2121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_2p},
{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI952PP,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_952 },
@@ -2983,14 +3072,14 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 },
{ 0, } /* terminate list */
};
-MODULE_DEVICE_TABLE(pci,parport_pc_pci_tbl);
+MODULE_DEVICE_TABLE(pci, parport_pc_pci_tbl);
struct pci_parport_data {
int num;
struct parport *ports[2];
};
-static int parport_pc_pci_probe (struct pci_dev *dev,
+static int parport_pc_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
int err, count, n, i = id->driver_data;
@@ -3003,7 +3092,8 @@ static int parport_pc_pci_probe (struct pci_dev *dev,
/* This is a PCI card */
i -= last_sio;
count = 0;
- if ((err = pci_enable_device (dev)) != 0)
+ err = pci_enable_device(dev);
+ if (err)
return err;
data = kmalloc(sizeof(struct pci_parport_data), GFP_KERNEL);
@@ -3011,7 +3101,7 @@ static int parport_pc_pci_probe (struct pci_dev *dev,
return -ENOMEM;
if (cards[i].preinit_hook &&
- cards[i].preinit_hook (dev, PARPORT_IRQ_NONE, PARPORT_DMA_NONE)) {
+ cards[i].preinit_hook(dev, PARPORT_IRQ_NONE, PARPORT_DMA_NONE)) {
kfree(data);
return -ENODEV;
}
@@ -3021,25 +3111,25 @@ static int parport_pc_pci_probe (struct pci_dev *dev,
int hi = cards[i].addr[n].hi;
int irq;
unsigned long io_lo, io_hi;
- io_lo = pci_resource_start (dev, lo);
+ io_lo = pci_resource_start(dev, lo);
io_hi = 0;
if ((hi >= 0) && (hi <= 6))
- io_hi = pci_resource_start (dev, hi);
+ io_hi = pci_resource_start(dev, hi);
else if (hi > 6)
io_lo += hi; /* Reinterpret the meaning of
- "hi" as an offset (see SYBA
- def.) */
+ "hi" as an offset (see SYBA
+ def.) */
/* TODO: test if sharing interrupts works */
irq = dev->irq;
if (irq == IRQ_NONE) {
- printk (KERN_DEBUG
+ printk(KERN_DEBUG
"PCI parallel port detected: %04x:%04x, I/O at %#lx(%#lx)\n",
parport_pc_pci_tbl[i + last_sio].vendor,
parport_pc_pci_tbl[i + last_sio].device,
io_lo, io_hi);
irq = PARPORT_IRQ_NONE;
} else {
- printk (KERN_DEBUG
+ printk(KERN_DEBUG
"PCI parallel port detected: %04x:%04x, I/O at %#lx(%#lx), IRQ %d\n",
parport_pc_pci_tbl[i + last_sio].vendor,
parport_pc_pci_tbl[i + last_sio].device,
@@ -3056,7 +3146,7 @@ static int parport_pc_pci_probe (struct pci_dev *dev,
data->num = count;
if (cards[i].postinit_hook)
- cards[i].postinit_hook (dev, count == 0);
+ cards[i].postinit_hook(dev, count == 0);
if (count) {
pci_set_drvdata(dev, data);
@@ -3090,7 +3180,7 @@ static struct pci_driver parport_pc_pci_driver = {
.remove = __devexit_p(parport_pc_pci_remove),
};
-static int __init parport_pc_init_superio (int autoirq, int autodma)
+static int __init parport_pc_init_superio(int autoirq, int autodma)
{
const struct pci_device_id *id;
struct pci_dev *pdev = NULL;
@@ -3101,8 +3191,9 @@ static int __init parport_pc_init_superio (int autoirq, int autodma)
if (id == NULL || id->driver_data >= last_sio)
continue;
- if (parport_pc_superio_info[id->driver_data].probe
- (pdev, autoirq, autodma,parport_pc_superio_info[id->driver_data].via)) {
+ if (parport_pc_superio_info[id->driver_data].probe(
+ pdev, autoirq, autodma,
+ parport_pc_superio_info[id->driver_data].via)) {
ret++;
}
}
@@ -3111,7 +3202,10 @@ static int __init parport_pc_init_superio (int autoirq, int autodma)
}
#else
static struct pci_driver parport_pc_pci_driver;
-static int __init parport_pc_init_superio(int autoirq, int autodma) {return 0;}
+static int __init parport_pc_init_superio(int autoirq, int autodma)
+{
+ return 0;
+}
#endif /* CONFIG_PCI */
#ifdef CONFIG_PNP
@@ -3124,44 +3218,45 @@ static const struct pnp_device_id parport_pc_pnp_tbl[] = {
{ }
};
-MODULE_DEVICE_TABLE(pnp,parport_pc_pnp_tbl);
+MODULE_DEVICE_TABLE(pnp, parport_pc_pnp_tbl);
-static int parport_pc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id)
+static int parport_pc_pnp_probe(struct pnp_dev *dev,
+ const struct pnp_device_id *id)
{
struct parport *pdata;
unsigned long io_lo, io_hi;
int dma, irq;
- if (pnp_port_valid(dev,0) &&
- !(pnp_port_flags(dev,0) & IORESOURCE_DISABLED)) {
- io_lo = pnp_port_start(dev,0);
+ if (pnp_port_valid(dev, 0) &&
+ !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) {
+ io_lo = pnp_port_start(dev, 0);
} else
return -EINVAL;
- if (pnp_port_valid(dev,1) &&
- !(pnp_port_flags(dev,1) & IORESOURCE_DISABLED)) {
- io_hi = pnp_port_start(dev,1);
+ if (pnp_port_valid(dev, 1) &&
+ !(pnp_port_flags(dev, 1) & IORESOURCE_DISABLED)) {
+ io_hi = pnp_port_start(dev, 1);
} else
io_hi = 0;
- if (pnp_irq_valid(dev,0) &&
- !(pnp_irq_flags(dev,0) & IORESOURCE_DISABLED)) {
- irq = pnp_irq(dev,0);
+ if (pnp_irq_valid(dev, 0) &&
+ !(pnp_irq_flags(dev, 0) & IORESOURCE_DISABLED)) {
+ irq = pnp_irq(dev, 0);
} else
irq = PARPORT_IRQ_NONE;
- if (pnp_dma_valid(dev,0) &&
- !(pnp_dma_flags(dev,0) & IORESOURCE_DISABLED)) {
- dma = pnp_dma(dev,0);
+ if (pnp_dma_valid(dev, 0) &&
+ !(pnp_dma_flags(dev, 0) & IORESOURCE_DISABLED)) {
+ dma = pnp_dma(dev, 0);
} else
dma = PARPORT_DMA_NONE;
dev_info(&dev->dev, "reported by %s\n", dev->protocol->name);
- if (!(pdata = parport_pc_probe_port(io_lo, io_hi,
- irq, dma, &dev->dev, 0)))
+ pdata = parport_pc_probe_port(io_lo, io_hi, irq, dma, &dev->dev, 0);
+ if (pdata == NULL)
return -ENODEV;
- pnp_set_drvdata(dev,pdata);
+ pnp_set_drvdata(dev, pdata);
return 0;
}
@@ -3203,7 +3298,7 @@ static struct platform_driver parport_pc_platform_driver = {
/* This is called by parport_pc_find_nonpci_ports (in asm/parport.h) */
static int __devinit __attribute__((unused))
-parport_pc_find_isa_ports (int autoirq, int autodma)
+parport_pc_find_isa_ports(int autoirq, int autodma)
{
int count = 0;
@@ -3227,7 +3322,7 @@ parport_pc_find_isa_ports (int autoirq, int autodma)
* autoirq is PARPORT_IRQ_NONE, PARPORT_IRQ_AUTO, or PARPORT_IRQ_PROBEONLY
* autodma is PARPORT_DMA_NONE or PARPORT_DMA_AUTO
*/
-static void __init parport_pc_find_ports (int autoirq, int autodma)
+static void __init parport_pc_find_ports(int autoirq, int autodma)
{
int count = 0, err;
@@ -3261,11 +3356,18 @@ static void __init parport_pc_find_ports (int autoirq, int autodma)
* syntax and keep in mind that code below is a cleaned up version.
*/
-static int __initdata io[PARPORT_PC_MAX_PORTS+1] = { [0 ... PARPORT_PC_MAX_PORTS] = 0 };
-static int __initdata io_hi[PARPORT_PC_MAX_PORTS+1] =
- { [0 ... PARPORT_PC_MAX_PORTS] = PARPORT_IOHI_AUTO };
-static int __initdata dmaval[PARPORT_PC_MAX_PORTS] = { [0 ... PARPORT_PC_MAX_PORTS-1] = PARPORT_DMA_NONE };
-static int __initdata irqval[PARPORT_PC_MAX_PORTS] = { [0 ... PARPORT_PC_MAX_PORTS-1] = PARPORT_IRQ_PROBEONLY };
+static int __initdata io[PARPORT_PC_MAX_PORTS+1] = {
+ [0 ... PARPORT_PC_MAX_PORTS] = 0
+};
+static int __initdata io_hi[PARPORT_PC_MAX_PORTS+1] = {
+ [0 ... PARPORT_PC_MAX_PORTS] = PARPORT_IOHI_AUTO
+};
+static int __initdata dmaval[PARPORT_PC_MAX_PORTS] = {
+ [0 ... PARPORT_PC_MAX_PORTS-1] = PARPORT_DMA_NONE
+};
+static int __initdata irqval[PARPORT_PC_MAX_PORTS] = {
+ [0 ... PARPORT_PC_MAX_PORTS-1] = PARPORT_IRQ_PROBEONLY
+};
static int __init parport_parse_param(const char *s, int *val,
int automatic, int none, int nofifo)
@@ -3306,18 +3408,19 @@ static int __init parport_parse_dma(const char *dmastr, int *val)
#ifdef CONFIG_PCI
static int __init parport_init_mode_setup(char *str)
{
- printk(KERN_DEBUG "parport_pc.c: Specified parameter parport_init_mode=%s\n", str);
-
- if (!strcmp (str, "spp"))
- parport_init_mode=1;
- if (!strcmp (str, "ps2"))
- parport_init_mode=2;
- if (!strcmp (str, "epp"))
- parport_init_mode=3;
- if (!strcmp (str, "ecp"))
- parport_init_mode=4;
- if (!strcmp (str, "ecpepp"))
- parport_init_mode=5;
+ printk(KERN_DEBUG
+ "parport_pc.c: Specified parameter parport_init_mode=%s\n", str);
+
+ if (!strcmp(str, "spp"))
+ parport_init_mode = 1;
+ if (!strcmp(str, "ps2"))
+ parport_init_mode = 2;
+ if (!strcmp(str, "epp"))
+ parport_init_mode = 3;
+ if (!strcmp(str, "ecp"))
+ parport_init_mode = 4;
+ if (!strcmp(str, "ecpepp"))
+ parport_init_mode = 5;
return 1;
}
#endif
@@ -3341,7 +3444,8 @@ module_param(verbose_probing, int, 0644);
#endif
#ifdef CONFIG_PCI
static char *init_mode;
-MODULE_PARM_DESC(init_mode, "Initialise mode for VIA VT8231 port (spp, ps2, epp, ecp or ecpepp)");
+MODULE_PARM_DESC(init_mode,
+ "Initialise mode for VIA VT8231 port (spp, ps2, epp, ecp or ecpepp)");
module_param(init_mode, charp, 0);
#endif
@@ -3372,7 +3476,7 @@ static int __init parse_parport_params(void)
irqval[0] = val;
break;
default:
- printk (KERN_WARNING
+ printk(KERN_WARNING
"parport_pc: irq specified "
"without base address. Use 'io=' "
"to specify one\n");
@@ -3385,7 +3489,7 @@ static int __init parse_parport_params(void)
dmaval[0] = val;
break;
default:
- printk (KERN_WARNING
+ printk(KERN_WARNING
"parport_pc: dma specified "
"without base address. Use 'io=' "
"to specify one\n");
@@ -3396,7 +3500,7 @@ static int __init parse_parport_params(void)
#else
-static int parport_setup_ptr __initdata = 0;
+static int parport_setup_ptr __initdata;
/*
* Acceptable parameters:
@@ -3407,7 +3511,7 @@ static int parport_setup_ptr __initdata = 0;
*
* IRQ/DMA may be numeric or 'auto' or 'none'
*/
-static int __init parport_setup (char *str)
+static int __init parport_setup(char *str)
{
char *endptr;
char *sep;
@@ -3419,15 +3523,15 @@ static int __init parport_setup (char *str)
return 1;
}
- if (!strncmp (str, "auto", 4)) {
+ if (!strncmp(str, "auto", 4)) {
irqval[0] = PARPORT_IRQ_AUTO;
dmaval[0] = PARPORT_DMA_AUTO;
return 1;
}
- val = simple_strtoul (str, &endptr, 0);
+ val = simple_strtoul(str, &endptr, 0);
if (endptr == str) {
- printk (KERN_WARNING "parport=%s not understood\n", str);
+ printk(KERN_WARNING "parport=%s not understood\n", str);
return 1;
}
@@ -3461,7 +3565,7 @@ static int __init parse_parport_params(void)
return io[0] == PARPORT_DISABLE;
}
-__setup ("parport=", parport_setup);
+__setup("parport=", parport_setup);
/*
* Acceptable parameters:
@@ -3469,7 +3573,7 @@ __setup ("parport=", parport_setup);
* parport_init_mode=[spp|ps2|epp|ecp|ecpepp]
*/
#ifdef CONFIG_PCI
-__setup("parport_init_mode=",parport_init_mode_setup);
+__setup("parport_init_mode=", parport_init_mode_setup);
#endif
#endif
@@ -3493,13 +3597,13 @@ static int __init parport_pc_init(void)
for (i = 0; i < PARPORT_PC_MAX_PORTS; i++) {
if (!io[i])
break;
- if ((io_hi[i]) == PARPORT_IOHI_AUTO)
- io_hi[i] = 0x400 + io[i];
+ if (io_hi[i] == PARPORT_IOHI_AUTO)
+ io_hi[i] = 0x400 + io[i];
parport_pc_probe_port(io[i], io_hi[i],
- irqval[i], dmaval[i], NULL, 0);
+ irqval[i], dmaval[i], NULL, 0);
}
} else
- parport_pc_find_ports (irqval[0], dmaval[0]);
+ parport_pc_find_ports(irqval[0], dmaval[0]);
return 0;
}
@@ -3507,9 +3611,9 @@ static int __init parport_pc_init(void)
static void __exit parport_pc_exit(void)
{
if (pci_registered_parport)
- pci_unregister_driver (&parport_pc_pci_driver);
+ pci_unregister_driver(&parport_pc_pci_driver);
if (pnp_registered_parport)
- pnp_unregister_driver (&parport_pc_pnp_driver);
+ pnp_unregister_driver(&parport_pc_pnp_driver);
platform_driver_unregister(&parport_pc_platform_driver);
while (!list_empty(&ports_list)) {
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 0ebca450ed2..dffa5d4fb29 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -614,7 +614,10 @@ parport_register_device(struct parport *port, const char *name,
* pardevice fields. -arca
*/
port->ops->init_state(tmp, tmp->state);
- parport_device_proc_register(tmp);
+ if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
+ port->proc_device = tmp;
+ parport_device_proc_register(tmp);
+ }
return tmp;
out_free_all:
@@ -646,10 +649,14 @@ void parport_unregister_device(struct pardevice *dev)
}
#endif
- parport_device_proc_unregister(dev);
-
port = dev->port->physport;
+ if (port->proc_device == dev) {
+ port->proc_device = NULL;
+ clear_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags);
+ parport_device_proc_unregister(dev);
+ }
+
if (port->cad == dev) {
printk(KERN_DEBUG "%s: %s forgot to release port\n",
port->name, dev->name);
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index ba6af162fd3..b77ae679427 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -39,7 +39,6 @@ obj-$(CONFIG_ALPHA) += setup-bus.o setup-irq.o
obj-$(CONFIG_ARM) += setup-bus.o setup-irq.o
obj-$(CONFIG_PARISC) += setup-bus.o
obj-$(CONFIG_SUPERH) += setup-bus.o setup-irq.o
-obj-$(CONFIG_PPC32) += setup-irq.o
obj-$(CONFIG_PPC) += setup-bus.o
obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o
obj-$(CONFIG_X86_VISWS) += setup-irq.o
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index 4fc168b7009..e68d5f20ffb 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -129,7 +129,6 @@ struct acpiphp_func {
struct acpiphp_bridge *bridge; /* Ejectable PCI-to-PCI bridge */
struct list_head sibling;
- struct pci_dev *pci_dev;
struct notifier_block nb;
acpi_handle handle;
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index a33794d9e0d..3a6064bce56 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -32,9 +32,6 @@
/*
* Lifetime rules for pci_dev:
- * - The one in acpiphp_func has its refcount elevated by pci_get_slot()
- * when the driver is loaded or when an insertion event occurs. It loses
- * a refcount when its ejected or the driver unloads.
* - The one in acpiphp_bridge has its refcount elevated by pci_get_slot()
* when the bridge is scanned and it loses a refcount when the bridge
* is removed.
@@ -130,6 +127,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
unsigned long long adr, sun;
int device, function, retval;
struct pci_bus *pbus = bridge->pci_bus;
+ struct pci_dev *pdev;
if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle))
return AE_OK;
@@ -213,10 +211,10 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
newfunc->slot = slot;
list_add_tail(&newfunc->sibling, &slot->funcs);
- /* associate corresponding pci_dev */
- newfunc->pci_dev = pci_get_slot(pbus, PCI_DEVFN(device, function));
- if (newfunc->pci_dev) {
+ pdev = pci_get_slot(pbus, PCI_DEVFN(device, function));
+ if (pdev) {
slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON);
+ pci_dev_put(pdev);
}
if (is_dock_device(handle)) {
@@ -617,7 +615,6 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
if (ACPI_FAILURE(status))
err("failed to remove notify handler\n");
}
- pci_dev_put(func->pci_dev);
list_del(list);
kfree(func);
}
@@ -1101,22 +1098,24 @@ static int __ref enable_device(struct acpiphp_slot *slot)
pci_enable_bridges(bus);
pci_bus_add_devices(bus);
- /* associate pci_dev to our representation */
list_for_each (l, &slot->funcs) {
func = list_entry(l, struct acpiphp_func, sibling);
- func->pci_dev = pci_get_slot(bus, PCI_DEVFN(slot->device,
- func->function));
- if (!func->pci_dev)
+ dev = pci_get_slot(bus, PCI_DEVFN(slot->device,
+ func->function));
+ if (!dev)
continue;
- if (func->pci_dev->hdr_type != PCI_HEADER_TYPE_BRIDGE &&
- func->pci_dev->hdr_type != PCI_HEADER_TYPE_CARDBUS)
+ if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE &&
+ dev->hdr_type != PCI_HEADER_TYPE_CARDBUS) {
+ pci_dev_put(dev);
continue;
+ }
status = find_p2p_bridge(func->handle, (u32)1, bus, NULL);
if (ACPI_FAILURE(status))
warn("find_p2p_bridge failed (error code = 0x%x)\n",
status);
+ pci_dev_put(dev);
}
slot->flags |= SLOT_ENABLED;
@@ -1142,17 +1141,14 @@ static void disable_bridges(struct pci_bus *bus)
*/
static int disable_device(struct acpiphp_slot *slot)
{
- int retval = 0;
struct acpiphp_func *func;
- struct list_head *l;
+ struct pci_dev *pdev;
/* is this slot already disabled? */
if (!(slot->flags & SLOT_ENABLED))
goto err_exit;
- list_for_each (l, &slot->funcs) {
- func = list_entry(l, struct acpiphp_func, sibling);
-
+ list_for_each_entry(func, &slot->funcs, sibling) {
if (func->bridge) {
/* cleanup p2p bridges under this P2P bridge */
cleanup_p2p_bridge(func->bridge->handle,
@@ -1160,35 +1156,28 @@ static int disable_device(struct acpiphp_slot *slot)
func->bridge = NULL;
}
- if (func->pci_dev) {
- pci_stop_bus_device(func->pci_dev);
- if (func->pci_dev->subordinate) {
- disable_bridges(func->pci_dev->subordinate);
- pci_disable_device(func->pci_dev);
+ pdev = pci_get_slot(slot->bridge->pci_bus,
+ PCI_DEVFN(slot->device, func->function));
+ if (pdev) {
+ pci_stop_bus_device(pdev);
+ if (pdev->subordinate) {
+ disable_bridges(pdev->subordinate);
+ pci_disable_device(pdev);
}
+ pci_remove_bus_device(pdev);
+ pci_dev_put(pdev);
}
}
- list_for_each (l, &slot->funcs) {
- func = list_entry(l, struct acpiphp_func, sibling);
-
+ list_for_each_entry(func, &slot->funcs, sibling) {
acpiphp_unconfigure_ioapics(func->handle);
acpiphp_bus_trim(func->handle);
- /* try to remove anyway.
- * acpiphp_bus_add might have been failed */
-
- if (!func->pci_dev)
- continue;
-
- pci_remove_bus_device(func->pci_dev);
- pci_dev_put(func->pci_dev);
- func->pci_dev = NULL;
}
slot->flags &= (~SLOT_ENABLED);
- err_exit:
- return retval;
+err_exit:
+ return 0;
}
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index dd18f857dfb..42e4260c3b1 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -153,45 +153,47 @@ int ibmphp_init_devno(struct slot **cur_slot)
return -1;
}
for (loop = 0; loop < len; loop++) {
- if ((*cur_slot)->number == rtable->slots[loop].slot) {
- if ((*cur_slot)->bus == rtable->slots[loop].bus) {
+ if ((*cur_slot)->number == rtable->slots[loop].slot &&
+ (*cur_slot)->bus == rtable->slots[loop].bus) {
+ struct io_apic_irq_attr irq_attr;
+
(*cur_slot)->device = PCI_SLOT(rtable->slots[loop].devfn);
for (i = 0; i < 4; i++)
(*cur_slot)->irq[i] = IO_APIC_get_PCI_irq_vector((int) (*cur_slot)->bus,
- (int) (*cur_slot)->device, i);
-
- debug("(*cur_slot)->irq[0] = %x\n",
- (*cur_slot)->irq[0]);
- debug("(*cur_slot)->irq[1] = %x\n",
- (*cur_slot)->irq[1]);
- debug("(*cur_slot)->irq[2] = %x\n",
- (*cur_slot)->irq[2]);
- debug("(*cur_slot)->irq[3] = %x\n",
- (*cur_slot)->irq[3]);
-
- debug("rtable->exlusive_irqs = %x\n",
+ (int) (*cur_slot)->device, i,
+ &irq_attr);
+
+ debug("(*cur_slot)->irq[0] = %x\n",
+ (*cur_slot)->irq[0]);
+ debug("(*cur_slot)->irq[1] = %x\n",
+ (*cur_slot)->irq[1]);
+ debug("(*cur_slot)->irq[2] = %x\n",
+ (*cur_slot)->irq[2]);
+ debug("(*cur_slot)->irq[3] = %x\n",
+ (*cur_slot)->irq[3]);
+
+ debug("rtable->exlusive_irqs = %x\n",
rtable->exclusive_irqs);
- debug("rtable->slots[loop].irq[0].bitmap = %x\n",
+ debug("rtable->slots[loop].irq[0].bitmap = %x\n",
rtable->slots[loop].irq[0].bitmap);
- debug("rtable->slots[loop].irq[1].bitmap = %x\n",
+ debug("rtable->slots[loop].irq[1].bitmap = %x\n",
rtable->slots[loop].irq[1].bitmap);
- debug("rtable->slots[loop].irq[2].bitmap = %x\n",
+ debug("rtable->slots[loop].irq[2].bitmap = %x\n",
rtable->slots[loop].irq[2].bitmap);
- debug("rtable->slots[loop].irq[3].bitmap = %x\n",
+ debug("rtable->slots[loop].irq[3].bitmap = %x\n",
rtable->slots[loop].irq[3].bitmap);
- debug("rtable->slots[loop].irq[0].link = %x\n",
+ debug("rtable->slots[loop].irq[0].link = %x\n",
rtable->slots[loop].irq[0].link);
- debug("rtable->slots[loop].irq[1].link = %x\n",
+ debug("rtable->slots[loop].irq[1].link = %x\n",
rtable->slots[loop].irq[1].link);
- debug("rtable->slots[loop].irq[2].link = %x\n",
+ debug("rtable->slots[loop].irq[2].link = %x\n",
rtable->slots[loop].irq[2].link);
- debug("rtable->slots[loop].irq[3].link = %x\n",
+ debug("rtable->slots[loop].irq[3].link = %x\n",
rtable->slots[loop].irq[3].link);
- debug("end of init_devno\n");
- kfree(rtable);
- return 0;
- }
+ debug("end of init_devno\n");
+ kfree(rtable);
+ return 0;
}
}
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index 3eee70928d4..2d6da78fddb 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -679,7 +679,7 @@ alloc_err:
return rc;
}
-static int sn_pci_hotplug_init(void)
+static int __init sn_pci_hotplug_init(void)
{
struct pci_bus *pci_bus = NULL;
int rc;
@@ -716,7 +716,7 @@ static int sn_pci_hotplug_init(void)
return registered == 1 ? 0 : -ENODEV;
}
-static void sn_pci_hotplug_exit(void)
+static void __exit sn_pci_hotplug_exit(void)
{
struct hotplug_slot *bss_hotplug_slot;
diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c
index 6808d8333ec..737a1c44b07 100644
--- a/drivers/pci/htirq.c
+++ b/drivers/pci/htirq.c
@@ -98,6 +98,7 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
int max_irq;
int pos;
int irq;
+ int node;
pos = pci_find_ht_capability(dev, HT_CAPTYPE_IRQ);
if (!pos)
@@ -125,7 +126,8 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
cfg->msg.address_lo = 0xffffffff;
cfg->msg.address_hi = 0xffffffff;
- irq = create_irq();
+ node = dev_to_node(&dev->dev);
+ irq = create_irq_nr(0, node);
if (irq <= 0) {
kfree(cfg);
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index a563fbe559d..cd389162735 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1972,15 +1972,6 @@ static int __init init_dmars(void)
}
}
-#ifdef CONFIG_INTR_REMAP
- if (!intr_remapping_enabled) {
- ret = enable_intr_remapping(0);
- if (ret)
- printk(KERN_ERR
- "IOMMU: enable interrupt remapping failed\n");
- }
-#endif
-
/*
* For each rmrr
* for each dev attached to rmrr
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index f5e0ea724a6..3a0cb0bb059 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -15,6 +15,14 @@ static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
static int ir_ioapic_num;
int intr_remapping_enabled;
+static int disable_intremap;
+static __init int setup_nointremap(char *str)
+{
+ disable_intremap = 1;
+ return 0;
+}
+early_param("nointremap", setup_nointremap);
+
struct irq_2_iommu {
struct intel_iommu *iommu;
u16 irte_index;
@@ -23,15 +31,12 @@ struct irq_2_iommu {
};
#ifdef CONFIG_GENERIC_HARDIRQS
-static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu)
+static struct irq_2_iommu *get_one_free_irq_2_iommu(int node)
{
struct irq_2_iommu *iommu;
- int node;
-
- node = cpu_to_node(cpu);
iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node);
- printk(KERN_DEBUG "alloc irq_2_iommu on cpu %d node %d\n", cpu, node);
+ printk(KERN_DEBUG "alloc irq_2_iommu on node %d\n", node);
return iommu;
}
@@ -48,7 +53,7 @@ static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
return desc->irq_2_iommu;
}
-static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu)
+static struct irq_2_iommu *irq_2_iommu_alloc_node(unsigned int irq, int node)
{
struct irq_desc *desc;
struct irq_2_iommu *irq_iommu;
@@ -56,7 +61,7 @@ static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu)
/*
* alloc irq desc if not allocated already.
*/
- desc = irq_to_desc_alloc_cpu(irq, cpu);
+ desc = irq_to_desc_alloc_node(irq, node);
if (!desc) {
printk(KERN_INFO "can not get irq_desc for %d\n", irq);
return NULL;
@@ -65,14 +70,14 @@ static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu)
irq_iommu = desc->irq_2_iommu;
if (!irq_iommu)
- desc->irq_2_iommu = get_one_free_irq_2_iommu(cpu);
+ desc->irq_2_iommu = get_one_free_irq_2_iommu(node);
return desc->irq_2_iommu;
}
static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
{
- return irq_2_iommu_alloc_cpu(irq, boot_cpu_id);
+ return irq_2_iommu_alloc_node(irq, cpu_to_node(boot_cpu_id));
}
#else /* !CONFIG_SPARSE_IRQ */
@@ -423,20 +428,6 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
readl, (sts & DMA_GSTS_IRTPS), sts);
spin_unlock_irqrestore(&iommu->register_lock, flags);
- if (mode == 0) {
- spin_lock_irqsave(&iommu->register_lock, flags);
-
- /* enable comaptiblity format interrupt pass through */
- cmd = iommu->gcmd | DMA_GCMD_CFI;
- iommu->gcmd |= DMA_GCMD_CFI;
- writel(cmd, iommu->reg + DMAR_GCMD_REG);
-
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (sts & DMA_GSTS_CFIS), sts);
-
- spin_unlock_irqrestore(&iommu->register_lock, flags);
- }
-
/*
* global invalidation of interrupt entry cache before enabling
* interrupt-remapping.
@@ -516,6 +507,23 @@ end:
spin_unlock_irqrestore(&iommu->register_lock, flags);
}
+int __init intr_remapping_supported(void)
+{
+ struct dmar_drhd_unit *drhd;
+
+ if (disable_intremap)
+ return 0;
+
+ for_each_drhd_unit(drhd) {
+ struct intel_iommu *iommu = drhd->iommu;
+
+ if (!ecap_ir_support(iommu->ecap))
+ return 0;
+ }
+
+ return 1;
+}
+
int __init enable_intr_remapping(int eim)
{
struct dmar_drhd_unit *drhd;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 34bf0fdf504..07bbb9b3b93 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -24,6 +24,11 @@
#include <asm/setup.h>
#include "pci.h"
+const char *pci_power_names[] = {
+ "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
+};
+EXPORT_SYMBOL_GPL(pci_power_names);
+
unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT;
#ifdef CONFIG_PCI_DOMAINS
@@ -557,7 +562,8 @@ static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
} else {
error = -ENODEV;
/* Fall back to PCI_D0 if native PM is not supported */
- pci_update_current_state(dev, PCI_D0);
+ if (!dev->pm_cap)
+ dev->current_state = PCI_D0;
}
return error;
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index e3998250386..13ffdc35ea0 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -275,7 +275,7 @@ static void pcie_device_init(struct pci_dev *parent, struct pcie_device *dev,
memset(device, 0, sizeof(struct device));
device->bus = &pcie_port_bus_type;
device->driver = NULL;
- device->driver_data = NULL;
+ dev_set_drvdata(device, NULL);
device->release = release_pcie_device; /* callback to free pcie dev */
dev_set_name(device, "%s:pcie%02x",
pci_name(parent), get_descriptor_id(port_type, service_type));
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index e3c3e081b83..f1ae2475fff 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -745,6 +745,8 @@ int pci_setup_device(struct pci_dev *dev)
/* Early fixups, before probing the BARs */
pci_fixup_device(pci_fixup_early, dev);
+ /* device class may be changed after fixup */
+ class = dev->class >> 8;
switch (dev->hdr_type) { /* header type */
case PCI_HEADER_TYPE_NORMAL: /* standard header */
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 3067673d54f..bd4253f93d5 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2461,6 +2461,8 @@ static void __devinit quirk_i82576_sriov(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10c9, quirk_i82576_sriov);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e6, quirk_i82576_sriov);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e7, quirk_i82576_sriov);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov);
#endif /* CONFIG_PCI_IOV */
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 27647354398..fbf965b31c1 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -217,7 +217,7 @@ config PCMCIA_PXA2XX
depends on ARM && ARCH_PXA && PCMCIA
depends on (ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL \
|| MACH_ARMCORE || ARCH_PXA_PALM || TRIZEPS_PCMCIA \
- || ARCH_VIPER || ARCH_PXA_ESERIES)
+ || ARCH_VIPER || ARCH_PXA_ESERIES || MACH_STARGATE2)
help
Say Y here to include support for the PXA2xx PCMCIA controller
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index bbac4632722..047394d98ac 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -73,5 +73,6 @@ pxa2xx-obj-$(CONFIG_TRIZEPS_PCMCIA) += pxa2xx_trizeps4.o
pxa2xx-obj-$(CONFIG_MACH_PALMTX) += pxa2xx_palmtx.o
pxa2xx-obj-$(CONFIG_MACH_PALMLD) += pxa2xx_palmld.o
pxa2xx-obj-$(CONFIG_MACH_E740) += pxa2xx_e740.o
+pxa2xx-obj-$(CONFIG_MACH_STARGATE2) += pxa2xx_stargate2.o
obj-$(CONFIG_PCMCIA_PXA2XX) += pxa2xx_core.o $(pxa2xx-obj-y)
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 47cab31ff6e..304ff6d5cf3 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -394,7 +394,7 @@ static int pcmcia_device_probe(struct device * dev)
p_drv = to_pcmcia_drv(dev->driver);
s = p_dev->socket;
- /* The PCMCIA code passes the match data in via dev->driver_data
+ /* The PCMCIA code passes the match data in via dev_set_drvdata(dev)
* which is an ugly hack. Once the driver probe is called it may
* and often will overwrite the match data so we must save it first
*
@@ -404,7 +404,7 @@ static int pcmcia_device_probe(struct device * dev)
* call which will then check whether there are two
* pseudo devices, and if not, add the second one.
*/
- did = p_dev->dev.driver_data;
+ did = dev_get_drvdata(&p_dev->dev);
ds_dev_dbg(1, dev, "trying to bind to %s\n", p_drv->drv.name);
@@ -499,7 +499,7 @@ static int pcmcia_device_remove(struct device * dev)
* pseudo multi-function card, we need to unbind
* all devices
*/
- did = p_dev->dev.driver_data;
+ did = dev_get_drvdata(&p_dev->dev);
if (did && (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) &&
(p_dev->socket->device_count != 0) &&
(p_dev->device_no == 0))
@@ -828,7 +828,6 @@ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
{
struct pcmcia_socket *s = dev->socket;
const struct firmware *fw;
- char path[FIRMWARE_NAME_MAX];
int ret = -ENOMEM;
int no_funcs;
int old_funcs;
@@ -839,16 +838,7 @@ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
ds_dev_dbg(1, &dev->dev, "trying to load CIS file %s\n", filename);
- if (strlen(filename) > (FIRMWARE_NAME_MAX - 1)) {
- dev_printk(KERN_WARNING, &dev->dev,
- "pcmcia: CIS filename is too long [%s]\n",
- filename);
- return -EINVAL;
- }
-
- snprintf(path, sizeof(path), "%s", filename);
-
- if (request_firmware(&fw, path, &dev->dev) == 0) {
+ if (request_firmware(&fw, filename, &dev->dev) == 0) {
if (fw->size >= CISTPL_MAX_CIS_SIZE) {
ret = -EINVAL;
dev_printk(KERN_ERR, &dev->dev,
@@ -988,7 +978,7 @@ static inline int pcmcia_devmatch(struct pcmcia_device *dev,
return 0;
}
- dev->dev.driver_data = (void *) did;
+ dev_set_drvdata(&dev->dev, did);
return 1;
}
diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
index 1703b20cad5..6095f8daecd 100644
--- a/drivers/pcmcia/pcmcia_ioctl.c
+++ b/drivers/pcmcia/pcmcia_ioctl.c
@@ -915,12 +915,9 @@ static int ds_ioctl(struct inode * inode, struct file * file,
err = -EPERM;
goto free_out;
} else {
- static int printed = 0;
- if (!printed) {
- printk(KERN_WARNING "2.6. kernels use pcmciamtd instead of memory_cs.c and do not require special\n");
- printk(KERN_WARNING "MTD handling any more.\n");
- printed++;
- }
+ printk_once(KERN_WARNING
+ "2.6. kernels use pcmciamtd instead of memory_cs.c and do not require special\n");
+ printk_once(KERN_WARNING "MTD handling any more.\n");
}
err = -EINVAL;
goto free_out;
diff --git a/drivers/pcmcia/pxa2xx_stargate2.c b/drivers/pcmcia/pxa2xx_stargate2.c
new file mode 100644
index 00000000000..490749ea677
--- /dev/null
+++ b/drivers/pcmcia/pxa2xx_stargate2.c
@@ -0,0 +1,174 @@
+/*
+ * linux/drivers/pcmcia/pxa2xx_stargate2.c
+ *
+ * Stargate 2 PCMCIA specific routines.
+ *
+ * Created: December 6, 2005
+ * Author: Ed C. Epp
+ * Copyright: Intel Corp 2005
+ * Jonathan Cameron <jic23@cam.ac.uk> 2009
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+
+#include <pcmcia/ss.h>
+
+#include <asm/irq.h>
+#include <asm/mach-types.h>
+
+#include "soc_common.h"
+
+#define SG2_S0_BUFF_CTL 120
+#define SG2_S0_POWER_CTL 108
+#define SG2_S0_GPIO_RESET 82
+#define SG2_S0_GPIO_DETECT 53
+#define SG2_S0_GPIO_READY 81
+
+static struct pcmcia_irqs irqs[] = {
+ { 0, IRQ_GPIO(SG2_S0_GPIO_DETECT), "PCMCIA0 CD" },
+};
+
+static int sg2_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
+{
+ skt->irq = IRQ_GPIO(SG2_S0_GPIO_READY);
+ return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
+}
+
+static void sg2_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
+{
+ soc_pcmcia_free_irqs(skt, irqs, ARRAY_SIZE(irqs));
+}
+
+static void sg2_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
+ struct pcmcia_state *state)
+{
+ state->detect = !gpio_get_value(SG2_S0_GPIO_DETECT);
+ state->ready = !!gpio_get_value(SG2_S0_GPIO_READY);
+ state->bvd1 = 0; /* not available - battery detect on card */
+ state->bvd2 = 0; /* not available */
+ state->vs_3v = 1; /* not available - voltage detect for card */
+ state->vs_Xv = 0; /* not available */
+ state->wrprot = 0; /* not available - write protect */
+}
+
+static int sg2_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
+ const socket_state_t *state)
+{
+ /* Enable card power */
+ switch (state->Vcc) {
+ case 0:
+ /* sets power ctl register high */
+ gpio_set_value(SG2_S0_POWER_CTL, 1);
+ break;
+ case 33:
+ case 50:
+ /* sets power control register low (clear) */
+ gpio_set_value(SG2_S0_POWER_CTL, 0);
+ msleep(100);
+ break;
+ default:
+ pr_err("%s(): bad Vcc %u\n",
+ __func__, state->Vcc);
+ return -1;
+ }
+
+ /* reset */
+ gpio_set_value(SG2_S0_GPIO_RESET, !!(state->flags & SS_RESET));
+
+ return 0;
+}
+
+static void sg2_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
+{
+ soc_pcmcia_enable_irqs(skt, irqs, ARRAY_SIZE(irqs));
+}
+
+static void sg2_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
+{
+ soc_pcmcia_disable_irqs(skt, irqs, ARRAY_SIZE(irqs));
+}
+
+static struct pcmcia_low_level sg2_pcmcia_ops __initdata = {
+ .owner = THIS_MODULE,
+ .hw_init = sg2_pcmcia_hw_init,
+ .hw_shutdown = sg2_pcmcia_hw_shutdown,
+ .socket_state = sg2_pcmcia_socket_state,
+ .configure_socket = sg2_pcmcia_configure_socket,
+ .socket_init = sg2_pcmcia_socket_init,
+ .socket_suspend = sg2_pcmcia_socket_suspend,
+ .nr = 1,
+};
+
+static struct platform_device *sg2_pcmcia_device;
+
+static int __init sg2_pcmcia_init(void)
+{
+ int ret;
+
+ if (!machine_is_stargate2())
+ return -ENODEV;
+
+ sg2_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
+ if (!sg2_pcmcia_device)
+ return -ENOMEM;
+
+ ret = gpio_request(SG2_S0_BUFF_CTL, "SG2 CF buff ctl");
+ if (ret)
+ goto error_put_platform_device;
+ ret = gpio_request(SG2_S0_POWER_CTL, "SG2 CF power ctl");
+ if (ret)
+ goto error_free_gpio_buff_ctl;
+ ret = gpio_request(SG2_S0_GPIO_RESET, "SG2 CF reset");
+ if (ret)
+ goto error_free_gpio_power_ctl;
+ /* Set gpio directions */
+ gpio_direction_output(SG2_S0_BUFF_CTL, 0);
+ gpio_direction_output(SG2_S0_POWER_CTL, 1);
+ gpio_direction_output(SG2_S0_GPIO_RESET, 1);
+
+ ret = platform_device_add_data(sg2_pcmcia_device,
+ &sg2_pcmcia_ops,
+ sizeof(sg2_pcmcia_ops));
+ if (ret)
+ goto error_free_gpio_reset;
+
+ ret = platform_device_add(sg2_pcmcia_device);
+ if (ret)
+ goto error_free_gpio_reset;
+
+ return 0;
+error_free_gpio_reset:
+ gpio_free(SG2_S0_GPIO_RESET);
+error_free_gpio_power_ctl:
+ gpio_free(SG2_S0_POWER_CTL);
+error_free_gpio_buff_ctl:
+ gpio_free(SG2_S0_BUFF_CTL);
+error_put_platform_device:
+ platform_device_put(sg2_pcmcia_device);
+
+ return ret;
+}
+
+static void __exit sg2_pcmcia_exit(void)
+{
+ platform_device_unregister(sg2_pcmcia_device);
+ gpio_free(SG2_S0_BUFF_CTL);
+ gpio_free(SG2_S0_POWER_CTL);
+ gpio_free(SG2_S0_GPIO_RESET);
+}
+
+fs_initcall(sg2_pcmcia_init);
+module_exit(sg2_pcmcia_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pxa2xx-pcmcia");
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 284ebaca6e4..c682ac53641 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -21,7 +21,7 @@ config ACER_WMI
depends on NEW_LEDS
depends on BACKLIGHT_CLASS_DEVICE
depends on SERIO_I8042
- depends on RFKILL
+ depends on RFKILL || RFKILL = n
select ACPI_WMI
---help---
This is a driver for newer Acer (and Wistron) laptops. It adds
@@ -60,7 +60,7 @@ config DELL_LAPTOP
depends on DCDBAS
depends on EXPERIMENTAL
depends on BACKLIGHT_CLASS_DEVICE
- depends on RFKILL
+ depends on RFKILL || RFKILL = n
depends on POWER_SUPPLY
default n
---help---
@@ -117,7 +117,7 @@ config HP_WMI
tristate "HP WMI extras"
depends on ACPI_WMI
depends on INPUT
- depends on RFKILL
+ depends on RFKILL || RFKILL = n
help
Say Y here if you want to support WMI-based hotkeys on HP laptops and
to read data from WMI such as docking or ambient light sensor state.
@@ -196,14 +196,13 @@ config THINKPAD_ACPI
tristate "ThinkPad ACPI Laptop Extras"
depends on ACPI
depends on INPUT
+ depends on RFKILL || RFKILL = n
select BACKLIGHT_LCD_SUPPORT
select BACKLIGHT_CLASS_DEVICE
select HWMON
select NVRAM
select NEW_LEDS
select LEDS_CLASS
- select NET
- select RFKILL
---help---
This is a driver for the IBM and Lenovo ThinkPad laptops. It adds
support for Fn-Fx key combinations, Bluetooth control, video
@@ -338,9 +337,9 @@ config EEEPC_LAPTOP
depends on ACPI
depends on INPUT
depends on EXPERIMENTAL
+ depends on RFKILL || RFKILL = n
select BACKLIGHT_CLASS_DEVICE
select HWMON
- select RFKILL
---help---
This driver supports the Fn-Fx keys on Eee PC laptops.
It also adds the ability to switch camera/wlan on/off.
@@ -405,9 +404,8 @@ config ACPI_TOSHIBA
tristate "Toshiba Laptop Extras"
depends on ACPI
depends on INPUT
+ depends on RFKILL || RFKILL = n
select INPUT_POLLDEV
- select NET
- select RFKILL
select BACKLIGHT_CLASS_DEVICE
---help---
This driver adds support for access to certain system settings
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 0f6e43bf4fc..09a503e5da6 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -958,59 +958,47 @@ static void acer_rfkill_update(struct work_struct *ignored)
status = get_u32(&state, ACER_CAP_WIRELESS);
if (ACPI_SUCCESS(status))
- rfkill_force_state(wireless_rfkill, state ?
- RFKILL_STATE_UNBLOCKED : RFKILL_STATE_SOFT_BLOCKED);
+ rfkill_set_sw_state(wireless_rfkill, !!state);
if (has_cap(ACER_CAP_BLUETOOTH)) {
status = get_u32(&state, ACER_CAP_BLUETOOTH);
if (ACPI_SUCCESS(status))
- rfkill_force_state(bluetooth_rfkill, state ?
- RFKILL_STATE_UNBLOCKED :
- RFKILL_STATE_SOFT_BLOCKED);
+ rfkill_set_sw_state(bluetooth_rfkill, !!state);
}
schedule_delayed_work(&acer_rfkill_work, round_jiffies_relative(HZ));
}
-static int acer_rfkill_set(void *data, enum rfkill_state state)
+static int acer_rfkill_set(void *data, bool blocked)
{
acpi_status status;
- u32 *cap = data;
- status = set_u32((u32) (state == RFKILL_STATE_UNBLOCKED), *cap);
+ u32 cap = (unsigned long)data;
+ status = set_u32(!!blocked, cap);
if (ACPI_FAILURE(status))
return -ENODEV;
return 0;
}
-static struct rfkill * acer_rfkill_register(struct device *dev,
-enum rfkill_type type, char *name, u32 cap)
+static const struct rfkill_ops acer_rfkill_ops = {
+ .set_block = acer_rfkill_set,
+};
+
+static struct rfkill *acer_rfkill_register(struct device *dev,
+ enum rfkill_type type,
+ char *name, u32 cap)
{
int err;
- u32 state;
- u32 *data;
struct rfkill *rfkill_dev;
- rfkill_dev = rfkill_allocate(dev, type);
+ rfkill_dev = rfkill_alloc(name, dev, type,
+ &acer_rfkill_ops,
+ (void *)(unsigned long)cap);
if (!rfkill_dev)
return ERR_PTR(-ENOMEM);
- rfkill_dev->name = name;
- get_u32(&state, cap);
- rfkill_dev->state = state ? RFKILL_STATE_UNBLOCKED :
- RFKILL_STATE_SOFT_BLOCKED;
- data = kzalloc(sizeof(u32), GFP_KERNEL);
- if (!data) {
- rfkill_free(rfkill_dev);
- return ERR_PTR(-ENOMEM);
- }
- *data = cap;
- rfkill_dev->data = data;
- rfkill_dev->toggle_radio = acer_rfkill_set;
- rfkill_dev->user_claim_unsupported = 1;
err = rfkill_register(rfkill_dev);
if (err) {
- kfree(rfkill_dev->data);
- rfkill_free(rfkill_dev);
+ rfkill_destroy(rfkill_dev);
return ERR_PTR(err);
}
return rfkill_dev;
@@ -1028,8 +1016,8 @@ static int acer_rfkill_init(struct device *dev)
RFKILL_TYPE_BLUETOOTH, "acer-bluetooth",
ACER_CAP_BLUETOOTH);
if (IS_ERR(bluetooth_rfkill)) {
- kfree(wireless_rfkill->data);
rfkill_unregister(wireless_rfkill);
+ rfkill_destroy(wireless_rfkill);
return PTR_ERR(bluetooth_rfkill);
}
}
@@ -1042,11 +1030,13 @@ static int acer_rfkill_init(struct device *dev)
static void acer_rfkill_exit(void)
{
cancel_delayed_work_sync(&acer_rfkill_work);
- kfree(wireless_rfkill->data);
+
rfkill_unregister(wireless_rfkill);
+ rfkill_destroy(wireless_rfkill);
+
if (has_cap(ACER_CAP_BLUETOOTH)) {
- kfree(bluetooth_rfkill->data);
rfkill_unregister(bluetooth_rfkill);
+ rfkill_destroy(bluetooth_rfkill);
}
return;
}
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index eeafc6c0160..bfc1a8892a3 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -269,16 +269,16 @@ static struct key_entry asus_keymap[] = {
{KE_KEY, 0x34, KEY_SWITCHVIDEOMODE},
{KE_KEY, 0x40, KEY_PREVIOUSSONG},
{KE_KEY, 0x41, KEY_NEXTSONG},
- {KE_KEY, 0x43, KEY_STOP},
+ {KE_KEY, 0x43, KEY_STOPCD},
{KE_KEY, 0x45, KEY_PLAYPAUSE},
{KE_KEY, 0x50, KEY_EMAIL},
{KE_KEY, 0x51, KEY_WWW},
- {KE_KEY, 0x5C, BTN_EXTRA}, /* Performance */
+ {KE_KEY, 0x5C, KEY_SCREENLOCK}, /* Screenlock */
{KE_KEY, 0x5D, KEY_WLAN},
{KE_KEY, 0x61, KEY_SWITCHVIDEOMODE},
{KE_KEY, 0x6B, BTN_TOUCH}, /* Lock Mouse */
{KE_KEY, 0x82, KEY_CAMERA},
- {KE_KEY, 0x8A, KEY_TV},
+ {KE_KEY, 0x8A, KEY_PROG1},
{KE_KEY, 0x95, KEY_MEDIA},
{KE_KEY, 0x99, KEY_PHONE},
{KE_END, 0},
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index af9f4302117..2faf0e14f05 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -174,10 +174,11 @@ dell_send_request(struct calling_interface_buffer *buffer, int class,
result[3]: NVRAM format version number
*/
-static int dell_rfkill_set(int radio, enum rfkill_state state)
+static int dell_rfkill_set(void *data, bool blocked)
{
struct calling_interface_buffer buffer;
- int disable = (state == RFKILL_STATE_UNBLOCKED) ? 0 : 1;
+ int disable = blocked ? 0 : 1;
+ unsigned long radio = (unsigned long)data;
memset(&buffer, 0, sizeof(struct calling_interface_buffer));
buffer.input[0] = (1 | (radio<<8) | (disable << 16));
@@ -186,56 +187,24 @@ static int dell_rfkill_set(int radio, enum rfkill_state state)
return 0;
}
-static int dell_wifi_set(void *data, enum rfkill_state state)
-{
- return dell_rfkill_set(1, state);
-}
-
-static int dell_bluetooth_set(void *data, enum rfkill_state state)
-{
- return dell_rfkill_set(2, state);
-}
-
-static int dell_wwan_set(void *data, enum rfkill_state state)
-{
- return dell_rfkill_set(3, state);
-}
-
-static int dell_rfkill_get(int bit, enum rfkill_state *state)
+static void dell_rfkill_query(struct rfkill *rfkill, void *data)
{
struct calling_interface_buffer buffer;
int status;
- int new_state = RFKILL_STATE_HARD_BLOCKED;
+ int bit = (unsigned long)data + 16;
memset(&buffer, 0, sizeof(struct calling_interface_buffer));
dell_send_request(&buffer, 17, 11);
status = buffer.output[1];
- if (status & (1<<16))
- new_state = RFKILL_STATE_SOFT_BLOCKED;
-
- if (status & (1<<bit))
- *state = new_state;
- else
- *state = RFKILL_STATE_UNBLOCKED;
-
- return 0;
-}
-
-static int dell_wifi_get(void *data, enum rfkill_state *state)
-{
- return dell_rfkill_get(17, state);
-}
-
-static int dell_bluetooth_get(void *data, enum rfkill_state *state)
-{
- return dell_rfkill_get(18, state);
+ if (status & BIT(bit))
+ rfkill_set_hw_state(rfkill, !!(status & BIT(16)));
}
-static int dell_wwan_get(void *data, enum rfkill_state *state)
-{
- return dell_rfkill_get(19, state);
-}
+static const struct rfkill_ops dell_rfkill_ops = {
+ .set_block = dell_rfkill_set,
+ .query = dell_rfkill_query,
+};
static int dell_setup_rfkill(void)
{
@@ -248,36 +217,37 @@ static int dell_setup_rfkill(void)
status = buffer.output[1];
if ((status & (1<<2|1<<8)) == (1<<2|1<<8)) {
- wifi_rfkill = rfkill_allocate(NULL, RFKILL_TYPE_WLAN);
- if (!wifi_rfkill)
+ wifi_rfkill = rfkill_alloc("dell-wifi", NULL, RFKILL_TYPE_WLAN,
+ &dell_rfkill_ops, (void *) 1);
+ if (!wifi_rfkill) {
+ ret = -ENOMEM;
goto err_wifi;
- wifi_rfkill->name = "dell-wifi";
- wifi_rfkill->toggle_radio = dell_wifi_set;
- wifi_rfkill->get_state = dell_wifi_get;
+ }
ret = rfkill_register(wifi_rfkill);
if (ret)
goto err_wifi;
}
if ((status & (1<<3|1<<9)) == (1<<3|1<<9)) {
- bluetooth_rfkill = rfkill_allocate(NULL, RFKILL_TYPE_BLUETOOTH);
- if (!bluetooth_rfkill)
+ bluetooth_rfkill = rfkill_alloc("dell-bluetooth", NULL,
+ RFKILL_TYPE_BLUETOOTH,
+ &dell_rfkill_ops, (void *) 2);
+ if (!bluetooth_rfkill) {
+ ret = -ENOMEM;
goto err_bluetooth;
- bluetooth_rfkill->name = "dell-bluetooth";
- bluetooth_rfkill->toggle_radio = dell_bluetooth_set;
- bluetooth_rfkill->get_state = dell_bluetooth_get;
+ }
ret = rfkill_register(bluetooth_rfkill);
if (ret)
goto err_bluetooth;
}
if ((status & (1<<4|1<<10)) == (1<<4|1<<10)) {
- wwan_rfkill = rfkill_allocate(NULL, RFKILL_TYPE_WWAN);
- if (!wwan_rfkill)
+ wwan_rfkill = rfkill_alloc("dell-wwan", NULL, RFKILL_TYPE_WWAN,
+ &dell_rfkill_ops, (void *) 3);
+ if (!wwan_rfkill) {
+ ret = -ENOMEM;
goto err_wwan;
- wwan_rfkill->name = "dell-wwan";
- wwan_rfkill->toggle_radio = dell_wwan_set;
- wwan_rfkill->get_state = dell_wwan_get;
+ }
ret = rfkill_register(wwan_rfkill);
if (ret)
goto err_wwan;
@@ -285,22 +255,15 @@ static int dell_setup_rfkill(void)
return 0;
err_wwan:
- if (wwan_rfkill)
- rfkill_free(wwan_rfkill);
- if (bluetooth_rfkill) {
+ rfkill_destroy(wwan_rfkill);
+ if (bluetooth_rfkill)
rfkill_unregister(bluetooth_rfkill);
- bluetooth_rfkill = NULL;
- }
err_bluetooth:
- if (bluetooth_rfkill)
- rfkill_free(bluetooth_rfkill);
- if (wifi_rfkill) {
+ rfkill_destroy(bluetooth_rfkill);
+ if (wifi_rfkill)
rfkill_unregister(wifi_rfkill);
- wifi_rfkill = NULL;
- }
err_wifi:
- if (wifi_rfkill)
- rfkill_free(wifi_rfkill);
+ rfkill_destroy(wifi_rfkill);
return ret;
}
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 6f54fd1757c..03bf522bd7a 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -158,6 +158,7 @@ enum { KE_KEY, KE_END };
static struct key_entry eeepc_keymap[] = {
/* Sleep already handled via generic ACPI code */
{KE_KEY, 0x10, KEY_WLAN },
+ {KE_KEY, 0x11, KEY_WLAN },
{KE_KEY, 0x12, KEY_PROG1 },
{KE_KEY, 0x13, KEY_MUTE },
{KE_KEY, 0x14, KEY_VOLUMEDOWN },
@@ -166,6 +167,8 @@ static struct key_entry eeepc_keymap[] = {
{KE_KEY, 0x1b, KEY_ZOOM },
{KE_KEY, 0x1c, KEY_PROG2 },
{KE_KEY, 0x1d, KEY_PROG3 },
+ {KE_KEY, NOTIFY_BRN_MIN, KEY_BRIGHTNESSDOWN },
+ {KE_KEY, NOTIFY_BRN_MIN + 2, KEY_BRIGHTNESSUP },
{KE_KEY, 0x30, KEY_SWITCHVIDEOMODE },
{KE_KEY, 0x31, KEY_SWITCHVIDEOMODE },
{KE_KEY, 0x32, KEY_SWITCHVIDEOMODE },
@@ -296,39 +299,22 @@ static int update_bl_status(struct backlight_device *bd)
* Rfkill helpers
*/
-static int eeepc_wlan_rfkill_set(void *data, enum rfkill_state state)
-{
- if (state == RFKILL_STATE_SOFT_BLOCKED)
- return set_acpi(CM_ASL_WLAN, 0);
- else
- return set_acpi(CM_ASL_WLAN, 1);
-}
-
-static int eeepc_wlan_rfkill_state(void *data, enum rfkill_state *state)
+static bool eeepc_wlan_rfkill_blocked(void)
{
if (get_acpi(CM_ASL_WLAN) == 1)
- *state = RFKILL_STATE_UNBLOCKED;
- else
- *state = RFKILL_STATE_SOFT_BLOCKED;
- return 0;
+ return false;
+ return true;
}
-static int eeepc_bluetooth_rfkill_set(void *data, enum rfkill_state state)
+static int eeepc_rfkill_set(void *data, bool blocked)
{
- if (state == RFKILL_STATE_SOFT_BLOCKED)
- return set_acpi(CM_ASL_BLUETOOTH, 0);
- else
- return set_acpi(CM_ASL_BLUETOOTH, 1);
+ unsigned long asl = (unsigned long)data;
+ return set_acpi(asl, !blocked);
}
-static int eeepc_bluetooth_rfkill_state(void *data, enum rfkill_state *state)
-{
- if (get_acpi(CM_ASL_BLUETOOTH) == 1)
- *state = RFKILL_STATE_UNBLOCKED;
- else
- *state = RFKILL_STATE_SOFT_BLOCKED;
- return 0;
-}
+static const struct rfkill_ops eeepc_rfkill_ops = {
+ .set_block = eeepc_rfkill_set,
+};
/*
* Sys helpers
@@ -381,11 +367,13 @@ static ssize_t show_sys_acpi(int cm, char *buf)
EEEPC_CREATE_DEVICE_ATTR(camera, CM_ASL_CAMERA);
EEEPC_CREATE_DEVICE_ATTR(cardr, CM_ASL_CARDREADER);
EEEPC_CREATE_DEVICE_ATTR(disp, CM_ASL_DISPLAYSWITCH);
+EEEPC_CREATE_DEVICE_ATTR(cpufv, CM_ASL_CPUFV);
static struct attribute *platform_attributes[] = {
&dev_attr_camera.attr,
&dev_attr_cardr.attr,
&dev_attr_disp.attr,
+ &dev_attr_cpufv.attr,
NULL
};
@@ -512,17 +500,23 @@ static int eeepc_hotk_check(void)
return 0;
}
-static void notify_brn(void)
+static int notify_brn(void)
{
+ /* returns the *previous* brightness, or -1 */
struct backlight_device *bd = eeepc_backlight_device;
- if (bd)
+ if (bd) {
+ int old = bd->props.brightness;
bd->props.brightness = read_brightness(bd);
+ return old;
+ }
+ return -1;
}
static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
{
struct pci_dev *dev;
struct pci_bus *bus = pci_find_bus(0, 1);
+ bool blocked;
if (event != ACPI_NOTIFY_BUS_CHECK)
return;
@@ -532,7 +526,8 @@ static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
return;
}
- if (get_acpi(CM_ASL_WLAN) == 1) {
+ blocked = eeepc_wlan_rfkill_blocked();
+ if (!blocked) {
dev = pci_get_slot(bus, 0);
if (dev) {
/* Device already present */
@@ -552,23 +547,41 @@ static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
pci_dev_put(dev);
}
}
+
+ rfkill_set_sw_state(ehotk->eeepc_wlan_rfkill, blocked);
}
static void eeepc_hotk_notify(acpi_handle handle, u32 event, void *data)
{
static struct key_entry *key;
u16 count;
+ int brn = -ENODEV;
if (!ehotk)
return;
if (event >= NOTIFY_BRN_MIN && event <= NOTIFY_BRN_MAX)
- notify_brn();
+ brn = notify_brn();
count = ehotk->event_count[event % 128]++;
acpi_bus_generate_proc_event(ehotk->device, event, count);
acpi_bus_generate_netlink_event(ehotk->device->pnp.device_class,
dev_name(&ehotk->device->dev), event,
count);
if (ehotk->inputdev) {
+ if (brn != -ENODEV) {
+ /* brightness-change events need special
+ * handling for conversion to key events
+ */
+ if (brn < 0)
+ brn = event;
+ else
+ brn += NOTIFY_BRN_MIN;
+ if (event < brn)
+ event = NOTIFY_BRN_MIN; /* brightness down */
+ else if (event > brn)
+ event = NOTIFY_BRN_MIN + 2; /* ... up */
+ else
+ event = NOTIFY_BRN_MIN + 1; /* ... unchanged */
+ }
key = eepc_get_entry_by_scancode(event);
if (key) {
switch (key->type) {
@@ -649,27 +662,21 @@ static int eeepc_hotk_add(struct acpi_device *device)
if (ACPI_FAILURE(status))
printk(EEEPC_ERR "Error installing notify handler\n");
+ eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P6");
+ eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P7");
+
if (get_acpi(CM_ASL_WLAN) != -1) {
- ehotk->eeepc_wlan_rfkill = rfkill_allocate(&device->dev,
- RFKILL_TYPE_WLAN);
+ ehotk->eeepc_wlan_rfkill = rfkill_alloc("eeepc-wlan",
+ &device->dev,
+ RFKILL_TYPE_WLAN,
+ &eeepc_rfkill_ops,
+ (void *)CM_ASL_WLAN);
if (!ehotk->eeepc_wlan_rfkill)
goto wlan_fail;
- ehotk->eeepc_wlan_rfkill->name = "eeepc-wlan";
- ehotk->eeepc_wlan_rfkill->toggle_radio = eeepc_wlan_rfkill_set;
- ehotk->eeepc_wlan_rfkill->get_state = eeepc_wlan_rfkill_state;
- if (get_acpi(CM_ASL_WLAN) == 1) {
- ehotk->eeepc_wlan_rfkill->state =
- RFKILL_STATE_UNBLOCKED;
- rfkill_set_default(RFKILL_TYPE_WLAN,
- RFKILL_STATE_UNBLOCKED);
- } else {
- ehotk->eeepc_wlan_rfkill->state =
- RFKILL_STATE_SOFT_BLOCKED;
- rfkill_set_default(RFKILL_TYPE_WLAN,
- RFKILL_STATE_SOFT_BLOCKED);
- }
+ rfkill_set_sw_state(ehotk->eeepc_wlan_rfkill,
+ get_acpi(CM_ASL_WLAN) != 1);
result = rfkill_register(ehotk->eeepc_wlan_rfkill);
if (result)
goto wlan_fail;
@@ -677,46 +684,31 @@ static int eeepc_hotk_add(struct acpi_device *device)
if (get_acpi(CM_ASL_BLUETOOTH) != -1) {
ehotk->eeepc_bluetooth_rfkill =
- rfkill_allocate(&device->dev, RFKILL_TYPE_BLUETOOTH);
+ rfkill_alloc("eeepc-bluetooth",
+ &device->dev,
+ RFKILL_TYPE_BLUETOOTH,
+ &eeepc_rfkill_ops,
+ (void *)CM_ASL_BLUETOOTH);
if (!ehotk->eeepc_bluetooth_rfkill)
goto bluetooth_fail;
- ehotk->eeepc_bluetooth_rfkill->name = "eeepc-bluetooth";
- ehotk->eeepc_bluetooth_rfkill->toggle_radio =
- eeepc_bluetooth_rfkill_set;
- ehotk->eeepc_bluetooth_rfkill->get_state =
- eeepc_bluetooth_rfkill_state;
- if (get_acpi(CM_ASL_BLUETOOTH) == 1) {
- ehotk->eeepc_bluetooth_rfkill->state =
- RFKILL_STATE_UNBLOCKED;
- rfkill_set_default(RFKILL_TYPE_BLUETOOTH,
- RFKILL_STATE_UNBLOCKED);
- } else {
- ehotk->eeepc_bluetooth_rfkill->state =
- RFKILL_STATE_SOFT_BLOCKED;
- rfkill_set_default(RFKILL_TYPE_BLUETOOTH,
- RFKILL_STATE_SOFT_BLOCKED);
- }
-
+ rfkill_set_sw_state(ehotk->eeepc_bluetooth_rfkill,
+ get_acpi(CM_ASL_BLUETOOTH) != 1);
result = rfkill_register(ehotk->eeepc_bluetooth_rfkill);
if (result)
goto bluetooth_fail;
}
- eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P6");
- eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P7");
-
return 0;
bluetooth_fail:
- if (ehotk->eeepc_bluetooth_rfkill)
- rfkill_free(ehotk->eeepc_bluetooth_rfkill);
+ rfkill_destroy(ehotk->eeepc_bluetooth_rfkill);
rfkill_unregister(ehotk->eeepc_wlan_rfkill);
- ehotk->eeepc_wlan_rfkill = NULL;
wlan_fail:
- if (ehotk->eeepc_wlan_rfkill)
- rfkill_free(ehotk->eeepc_wlan_rfkill);
+ rfkill_destroy(ehotk->eeepc_wlan_rfkill);
+ eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P6");
+ eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P7");
ehotk_fail:
kfree(ehotk);
ehotk = NULL;
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 50d9019de2b..16fffe44e33 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -154,58 +154,46 @@ static int hp_wmi_dock_state(void)
return hp_wmi_perform_query(HPWMI_DOCK_QUERY, 0, 0);
}
-static int hp_wmi_wifi_set(void *data, enum rfkill_state state)
+static int hp_wmi_set_block(void *data, bool blocked)
{
- if (state)
- return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, 0x101);
- else
- return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, 0x100);
-}
+ unsigned long b = (unsigned long) data;
+ int query = BIT(b + 8) | ((!!blocked) << b);
-static int hp_wmi_bluetooth_set(void *data, enum rfkill_state state)
-{
- if (state)
- return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, 0x202);
- else
- return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, 0x200);
+ return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, query);
}
-static int hp_wmi_wwan_set(void *data, enum rfkill_state state)
-{
- if (state)
- return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, 0x404);
- else
- return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, 0x400);
-}
+static const struct rfkill_ops hp_wmi_rfkill_ops = {
+ .set_block = hp_wmi_set_block,
+};
-static int hp_wmi_wifi_state(void)
+static bool hp_wmi_wifi_state(void)
{
int wireless = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 0);
if (wireless & 0x100)
- return RFKILL_STATE_UNBLOCKED;
+ return false;
else
- return RFKILL_STATE_SOFT_BLOCKED;
+ return true;
}
-static int hp_wmi_bluetooth_state(void)
+static bool hp_wmi_bluetooth_state(void)
{
int wireless = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 0);
if (wireless & 0x10000)
- return RFKILL_STATE_UNBLOCKED;
+ return false;
else
- return RFKILL_STATE_SOFT_BLOCKED;
+ return true;
}
-static int hp_wmi_wwan_state(void)
+static bool hp_wmi_wwan_state(void)
{
int wireless = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 0);
if (wireless & 0x1000000)
- return RFKILL_STATE_UNBLOCKED;
+ return false;
else
- return RFKILL_STATE_SOFT_BLOCKED;
+ return true;
}
static ssize_t show_display(struct device *dev, struct device_attribute *attr,
@@ -347,14 +335,14 @@ static void hp_wmi_notify(u32 value, void *context)
}
} else if (eventcode == 0x5) {
if (wifi_rfkill)
- rfkill_force_state(wifi_rfkill,
- hp_wmi_wifi_state());
+ rfkill_set_sw_state(wifi_rfkill,
+ hp_wmi_wifi_state());
if (bluetooth_rfkill)
- rfkill_force_state(bluetooth_rfkill,
- hp_wmi_bluetooth_state());
+ rfkill_set_sw_state(bluetooth_rfkill,
+ hp_wmi_bluetooth_state());
if (wwan_rfkill)
- rfkill_force_state(wwan_rfkill,
- hp_wmi_wwan_state());
+ rfkill_set_sw_state(wwan_rfkill,
+ hp_wmi_wwan_state());
} else
printk(KERN_INFO "HP WMI: Unknown key pressed - %x\n",
eventcode);
@@ -430,34 +418,30 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
goto add_sysfs_error;
if (wireless & 0x1) {
- wifi_rfkill = rfkill_allocate(&device->dev, RFKILL_TYPE_WLAN);
- wifi_rfkill->name = "hp-wifi";
- wifi_rfkill->state = hp_wmi_wifi_state();
- wifi_rfkill->toggle_radio = hp_wmi_wifi_set;
- wifi_rfkill->user_claim_unsupported = 1;
+ wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev,
+ RFKILL_TYPE_WLAN,
+ &hp_wmi_rfkill_ops,
+ (void *) 0);
err = rfkill_register(wifi_rfkill);
if (err)
- goto add_sysfs_error;
+ goto register_wifi_error;
}
if (wireless & 0x2) {
- bluetooth_rfkill = rfkill_allocate(&device->dev,
- RFKILL_TYPE_BLUETOOTH);
- bluetooth_rfkill->name = "hp-bluetooth";
- bluetooth_rfkill->state = hp_wmi_bluetooth_state();
- bluetooth_rfkill->toggle_radio = hp_wmi_bluetooth_set;
- bluetooth_rfkill->user_claim_unsupported = 1;
+ bluetooth_rfkill = rfkill_alloc("hp-bluetooth", &device->dev,
+ RFKILL_TYPE_BLUETOOTH,
+ &hp_wmi_rfkill_ops,
+ (void *) 1);
err = rfkill_register(bluetooth_rfkill);
if (err)
goto register_bluetooth_error;
}
if (wireless & 0x4) {
- wwan_rfkill = rfkill_allocate(&device->dev, RFKILL_TYPE_WWAN);
- wwan_rfkill->name = "hp-wwan";
- wwan_rfkill->state = hp_wmi_wwan_state();
- wwan_rfkill->toggle_radio = hp_wmi_wwan_set;
- wwan_rfkill->user_claim_unsupported = 1;
+ wwan_rfkill = rfkill_alloc("hp-wwan", &device->dev,
+ RFKILL_TYPE_WWAN,
+ &hp_wmi_rfkill_ops,
+ (void *) 2);
err = rfkill_register(wwan_rfkill);
if (err)
goto register_wwan_err;
@@ -465,11 +449,15 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
return 0;
register_wwan_err:
+ rfkill_destroy(wwan_rfkill);
if (bluetooth_rfkill)
rfkill_unregister(bluetooth_rfkill);
register_bluetooth_error:
+ rfkill_destroy(bluetooth_rfkill);
if (wifi_rfkill)
rfkill_unregister(wifi_rfkill);
+register_wifi_error:
+ rfkill_destroy(wifi_rfkill);
add_sysfs_error:
cleanup_sysfs(device);
return err;
@@ -479,12 +467,18 @@ static int __exit hp_wmi_bios_remove(struct platform_device *device)
{
cleanup_sysfs(device);
- if (wifi_rfkill)
+ if (wifi_rfkill) {
rfkill_unregister(wifi_rfkill);
- if (bluetooth_rfkill)
+ rfkill_destroy(wifi_rfkill);
+ }
+ if (bluetooth_rfkill) {
rfkill_unregister(bluetooth_rfkill);
- if (wwan_rfkill)
+ rfkill_destroy(wifi_rfkill);
+ }
+ if (wwan_rfkill) {
rfkill_unregister(wwan_rfkill);
+ rfkill_destroy(wwan_rfkill);
+ }
return 0;
}
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 552958545f9..e48d9a4506f 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -128,11 +128,11 @@ enum sony_nc_rfkill {
SONY_BLUETOOTH,
SONY_WWAN,
SONY_WIMAX,
- SONY_RFKILL_MAX,
+ N_SONY_RFKILL,
};
-static struct rfkill *sony_rfkill_devices[SONY_RFKILL_MAX];
-static int sony_rfkill_address[SONY_RFKILL_MAX] = {0x300, 0x500, 0x700, 0x900};
+static struct rfkill *sony_rfkill_devices[N_SONY_RFKILL];
+static int sony_rfkill_address[N_SONY_RFKILL] = {0x300, 0x500, 0x700, 0x900};
static void sony_nc_rfkill_update(void);
/*********** Input Devices ***********/
@@ -1051,151 +1051,96 @@ static void sony_nc_rfkill_cleanup(void)
{
int i;
- for (i = 0; i < SONY_RFKILL_MAX; i++) {
- if (sony_rfkill_devices[i])
+ for (i = 0; i < N_SONY_RFKILL; i++) {
+ if (sony_rfkill_devices[i]) {
rfkill_unregister(sony_rfkill_devices[i]);
+ rfkill_destroy(sony_rfkill_devices[i]);
+ }
}
}
-static int sony_nc_rfkill_get(void *data, enum rfkill_state *state)
-{
- int result;
- int argument = sony_rfkill_address[(long) data];
-
- sony_call_snc_handle(0x124, 0x200, &result);
- if (result & 0x1) {
- sony_call_snc_handle(0x124, argument, &result);
- if (result & 0xf)
- *state = RFKILL_STATE_UNBLOCKED;
- else
- *state = RFKILL_STATE_SOFT_BLOCKED;
- } else {
- *state = RFKILL_STATE_HARD_BLOCKED;
- }
-
- return 0;
-}
-
-static int sony_nc_rfkill_set(void *data, enum rfkill_state state)
+static int sony_nc_rfkill_set(void *data, bool blocked)
{
int result;
int argument = sony_rfkill_address[(long) data] + 0x100;
- if (state == RFKILL_STATE_UNBLOCKED)
+ if (!blocked)
argument |= 0xff0000;
return sony_call_snc_handle(0x124, argument, &result);
}
-static int sony_nc_setup_wifi_rfkill(struct acpi_device *device)
-{
- int err = 0;
- struct rfkill *sony_wifi_rfkill;
-
- sony_wifi_rfkill = rfkill_allocate(&device->dev, RFKILL_TYPE_WLAN);
- if (!sony_wifi_rfkill)
- return -1;
- sony_wifi_rfkill->name = "sony-wifi";
- sony_wifi_rfkill->toggle_radio = sony_nc_rfkill_set;
- sony_wifi_rfkill->get_state = sony_nc_rfkill_get;
- sony_wifi_rfkill->user_claim_unsupported = 1;
- sony_wifi_rfkill->data = (void *)SONY_WIFI;
- err = rfkill_register(sony_wifi_rfkill);
- if (err)
- rfkill_free(sony_wifi_rfkill);
- else {
- sony_rfkill_devices[SONY_WIFI] = sony_wifi_rfkill;
- sony_nc_rfkill_set(sony_wifi_rfkill->data,
- RFKILL_STATE_UNBLOCKED);
- }
- return err;
-}
+static const struct rfkill_ops sony_rfkill_ops = {
+ .set_block = sony_nc_rfkill_set,
+};
-static int sony_nc_setup_bluetooth_rfkill(struct acpi_device *device)
+static int sony_nc_setup_rfkill(struct acpi_device *device,
+ enum sony_nc_rfkill nc_type)
{
int err = 0;
- struct rfkill *sony_bluetooth_rfkill;
-
- sony_bluetooth_rfkill = rfkill_allocate(&device->dev,
- RFKILL_TYPE_BLUETOOTH);
- if (!sony_bluetooth_rfkill)
- return -1;
- sony_bluetooth_rfkill->name = "sony-bluetooth";
- sony_bluetooth_rfkill->toggle_radio = sony_nc_rfkill_set;
- sony_bluetooth_rfkill->get_state = sony_nc_rfkill_get;
- sony_bluetooth_rfkill->user_claim_unsupported = 1;
- sony_bluetooth_rfkill->data = (void *)SONY_BLUETOOTH;
- err = rfkill_register(sony_bluetooth_rfkill);
- if (err)
- rfkill_free(sony_bluetooth_rfkill);
- else {
- sony_rfkill_devices[SONY_BLUETOOTH] = sony_bluetooth_rfkill;
- sony_nc_rfkill_set(sony_bluetooth_rfkill->data,
- RFKILL_STATE_UNBLOCKED);
+ struct rfkill *rfk;
+ enum rfkill_type type;
+ const char *name;
+
+ switch (nc_type) {
+ case SONY_WIFI:
+ type = RFKILL_TYPE_WLAN;
+ name = "sony-wifi";
+ break;
+ case SONY_BLUETOOTH:
+ type = RFKILL_TYPE_BLUETOOTH;
+ name = "sony-bluetooth";
+ break;
+ case SONY_WWAN:
+ type = RFKILL_TYPE_WWAN;
+ name = "sony-wwan";
+ break;
+ case SONY_WIMAX:
+ type = RFKILL_TYPE_WIMAX;
+ name = "sony-wimax";
+ break;
+ default:
+ return -EINVAL;
}
- return err;
-}
-static int sony_nc_setup_wwan_rfkill(struct acpi_device *device)
-{
- int err = 0;
- struct rfkill *sony_wwan_rfkill;
+ rfk = rfkill_alloc(name, &device->dev, type,
+ &sony_rfkill_ops, (void *)nc_type);
+ if (!rfk)
+ return -ENOMEM;
- sony_wwan_rfkill = rfkill_allocate(&device->dev, RFKILL_TYPE_WWAN);
- if (!sony_wwan_rfkill)
- return -1;
- sony_wwan_rfkill->name = "sony-wwan";
- sony_wwan_rfkill->toggle_radio = sony_nc_rfkill_set;
- sony_wwan_rfkill->get_state = sony_nc_rfkill_get;
- sony_wwan_rfkill->user_claim_unsupported = 1;
- sony_wwan_rfkill->data = (void *)SONY_WWAN;
- err = rfkill_register(sony_wwan_rfkill);
- if (err)
- rfkill_free(sony_wwan_rfkill);
- else {
- sony_rfkill_devices[SONY_WWAN] = sony_wwan_rfkill;
- sony_nc_rfkill_set(sony_wwan_rfkill->data,
- RFKILL_STATE_UNBLOCKED);
+ err = rfkill_register(rfk);
+ if (err) {
+ rfkill_destroy(rfk);
+ return err;
}
+ sony_rfkill_devices[nc_type] = rfk;
return err;
}
-static int sony_nc_setup_wimax_rfkill(struct acpi_device *device)
+static void sony_nc_rfkill_update()
{
- int err = 0;
- struct rfkill *sony_wimax_rfkill;
+ enum sony_nc_rfkill i;
+ int result;
+ bool hwblock;
- sony_wimax_rfkill = rfkill_allocate(&device->dev, RFKILL_TYPE_WIMAX);
- if (!sony_wimax_rfkill)
- return -1;
- sony_wimax_rfkill->name = "sony-wimax";
- sony_wimax_rfkill->toggle_radio = sony_nc_rfkill_set;
- sony_wimax_rfkill->get_state = sony_nc_rfkill_get;
- sony_wimax_rfkill->user_claim_unsupported = 1;
- sony_wimax_rfkill->data = (void *)SONY_WIMAX;
- err = rfkill_register(sony_wimax_rfkill);
- if (err)
- rfkill_free(sony_wimax_rfkill);
- else {
- sony_rfkill_devices[SONY_WIMAX] = sony_wimax_rfkill;
- sony_nc_rfkill_set(sony_wimax_rfkill->data,
- RFKILL_STATE_UNBLOCKED);
- }
- return err;
-}
+ sony_call_snc_handle(0x124, 0x200, &result);
+ hwblock = !(result & 0x1);
-static void sony_nc_rfkill_update()
-{
- int i;
- enum rfkill_state state;
+ for (i = 0; i < N_SONY_RFKILL; i++) {
+ int argument = sony_rfkill_address[i];
- for (i = 0; i < SONY_RFKILL_MAX; i++) {
- if (sony_rfkill_devices[i]) {
- sony_rfkill_devices[i]->
- get_state(sony_rfkill_devices[i]->data,
- &state);
- rfkill_force_state(sony_rfkill_devices[i], state);
+ if (!sony_rfkill_devices[i])
+ continue;
+
+ if (hwblock) {
+ if (rfkill_set_hw_state(sony_rfkill_devices[i], true))
+ sony_nc_rfkill_set((void *)i, true);
+ continue;
}
+
+ sony_call_snc_handle(0x124, argument, &result);
+ rfkill_set_states(sony_rfkill_devices[i],
+ !(result & 0xf), false);
}
}
@@ -1214,13 +1159,13 @@ static int sony_nc_rfkill_setup(struct acpi_device *device)
}
if (result & 0x1)
- sony_nc_setup_wifi_rfkill(device);
+ sony_nc_setup_rfkill(device, SONY_WIFI);
if (result & 0x2)
- sony_nc_setup_bluetooth_rfkill(device);
+ sony_nc_setup_rfkill(device, SONY_BLUETOOTH);
if (result & 0x1c)
- sony_nc_setup_wwan_rfkill(device);
+ sony_nc_setup_rfkill(device, SONY_WWAN);
if (result & 0x20)
- sony_nc_setup_wimax_rfkill(device);
+ sony_nc_setup_rfkill(device, SONY_WIMAX);
return 0;
}
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 912be65b626..86e958539f4 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -166,13 +166,6 @@ enum {
#define TPACPI_MAX_ACPI_ARGS 3
-/* rfkill switches */
-enum {
- TPACPI_RFK_BLUETOOTH_SW_ID = 0,
- TPACPI_RFK_WWAN_SW_ID,
- TPACPI_RFK_UWB_SW_ID,
-};
-
/* printk headers */
#define TPACPI_LOG TPACPI_FILE ": "
#define TPACPI_EMERG KERN_EMERG TPACPI_LOG
@@ -1005,67 +998,234 @@ static int __init tpacpi_check_std_acpi_brightness_support(void)
return 0;
}
-static int __init tpacpi_new_rfkill(const unsigned int id,
- struct rfkill **rfk,
+static void printk_deprecated_attribute(const char * const what,
+ const char * const details)
+{
+ tpacpi_log_usertask("deprecated sysfs attribute");
+ printk(TPACPI_WARN "WARNING: sysfs attribute %s is deprecated and "
+ "will be removed. %s\n",
+ what, details);
+}
+
+/*************************************************************************
+ * rfkill and radio control support helpers
+ */
+
+/*
+ * ThinkPad-ACPI firmware handling model:
+ *
+ * WLSW (master wireless switch) is event-driven, and is common to all
+ * firmware-controlled radios. It cannot be controlled, just monitored,
+ * as expected. It overrides all radio state in firmware
+ *
+ * The kernel, a masked-off hotkey, and WLSW can change the radio state
+ * (TODO: verify how WLSW interacts with the returned radio state).
+ *
+ * The only time there are shadow radio state changes, is when
+ * masked-off hotkeys are used.
+ */
+
+/*
+ * Internal driver API for radio state:
+ *
+ * int: < 0 = error, otherwise enum tpacpi_rfkill_state
+ * bool: true means radio blocked (off)
+ */
+enum tpacpi_rfkill_state {
+ TPACPI_RFK_RADIO_OFF = 0,
+ TPACPI_RFK_RADIO_ON
+};
+
+/* rfkill switches */
+enum tpacpi_rfk_id {
+ TPACPI_RFK_BLUETOOTH_SW_ID = 0,
+ TPACPI_RFK_WWAN_SW_ID,
+ TPACPI_RFK_UWB_SW_ID,
+ TPACPI_RFK_SW_MAX
+};
+
+static const char *tpacpi_rfkill_names[] = {
+ [TPACPI_RFK_BLUETOOTH_SW_ID] = "bluetooth",
+ [TPACPI_RFK_WWAN_SW_ID] = "wwan",
+ [TPACPI_RFK_UWB_SW_ID] = "uwb",
+ [TPACPI_RFK_SW_MAX] = NULL
+};
+
+/* ThinkPad-ACPI rfkill subdriver */
+struct tpacpi_rfk {
+ struct rfkill *rfkill;
+ enum tpacpi_rfk_id id;
+ const struct tpacpi_rfk_ops *ops;
+};
+
+struct tpacpi_rfk_ops {
+ /* firmware interface */
+ int (*get_status)(void);
+ int (*set_status)(const enum tpacpi_rfkill_state);
+};
+
+static struct tpacpi_rfk *tpacpi_rfkill_switches[TPACPI_RFK_SW_MAX];
+
+/* Query FW and update rfkill sw state for a given rfkill switch */
+static int tpacpi_rfk_update_swstate(const struct tpacpi_rfk *tp_rfk)
+{
+ int status;
+
+ if (!tp_rfk)
+ return -ENODEV;
+
+ status = (tp_rfk->ops->get_status)();
+ if (status < 0)
+ return status;
+
+ rfkill_set_sw_state(tp_rfk->rfkill,
+ (status == TPACPI_RFK_RADIO_OFF));
+
+ return status;
+}
+
+/* Query FW and update rfkill sw state for all rfkill switches */
+static void tpacpi_rfk_update_swstate_all(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < TPACPI_RFK_SW_MAX; i++)
+ tpacpi_rfk_update_swstate(tpacpi_rfkill_switches[i]);
+}
+
+/*
+ * Sync the HW-blocking state of all rfkill switches,
+ * do notice it causes the rfkill core to schedule uevents
+ */
+static void tpacpi_rfk_update_hwblock_state(bool blocked)
+{
+ unsigned int i;
+ struct tpacpi_rfk *tp_rfk;
+
+ for (i = 0; i < TPACPI_RFK_SW_MAX; i++) {
+ tp_rfk = tpacpi_rfkill_switches[i];
+ if (tp_rfk) {
+ if (rfkill_set_hw_state(tp_rfk->rfkill,
+ blocked)) {
+ /* ignore -- we track sw block */
+ }
+ }
+ }
+}
+
+/* Call to get the WLSW state from the firmware */
+static int hotkey_get_wlsw(void);
+
+/* Call to query WLSW state and update all rfkill switches */
+static bool tpacpi_rfk_check_hwblock_state(void)
+{
+ int res = hotkey_get_wlsw();
+ int hw_blocked;
+
+ /* When unknown or unsupported, we have to assume it is unblocked */
+ if (res < 0)
+ return false;
+
+ hw_blocked = (res == TPACPI_RFK_RADIO_OFF);
+ tpacpi_rfk_update_hwblock_state(hw_blocked);
+
+ return hw_blocked;
+}
+
+static int tpacpi_rfk_hook_set_block(void *data, bool blocked)
+{
+ struct tpacpi_rfk *tp_rfk = data;
+ int res;
+
+ dbg_printk(TPACPI_DBG_RFKILL,
+ "request to change radio state to %s\n",
+ blocked ? "blocked" : "unblocked");
+
+ /* try to set radio state */
+ res = (tp_rfk->ops->set_status)(blocked ?
+ TPACPI_RFK_RADIO_OFF : TPACPI_RFK_RADIO_ON);
+
+ /* and update the rfkill core with whatever the FW really did */
+ tpacpi_rfk_update_swstate(tp_rfk);
+
+ return (res < 0) ? res : 0;
+}
+
+static const struct rfkill_ops tpacpi_rfk_rfkill_ops = {
+ .set_block = tpacpi_rfk_hook_set_block,
+};
+
+static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
+ const struct tpacpi_rfk_ops *tp_rfkops,
const enum rfkill_type rfktype,
const char *name,
- const bool set_default,
- int (*toggle_radio)(void *, enum rfkill_state),
- int (*get_state)(void *, enum rfkill_state *))
+ const bool set_default)
{
+ struct tpacpi_rfk *atp_rfk;
int res;
- enum rfkill_state initial_state = RFKILL_STATE_SOFT_BLOCKED;
-
- res = get_state(NULL, &initial_state);
- if (res < 0) {
- printk(TPACPI_ERR
- "failed to read initial state for %s, error %d; "
- "will turn radio off\n", name, res);
- } else if (set_default) {
- /* try to set the initial state as the default for the rfkill
- * type, since we ask the firmware to preserve it across S5 in
- * NVRAM */
- if (rfkill_set_default(rfktype,
- (initial_state == RFKILL_STATE_UNBLOCKED) ?
- RFKILL_STATE_UNBLOCKED :
- RFKILL_STATE_SOFT_BLOCKED) == -EPERM)
- vdbg_printk(TPACPI_DBG_RFKILL,
- "Default state for %s cannot be changed\n",
- name);
- }
-
- *rfk = rfkill_allocate(&tpacpi_pdev->dev, rfktype);
- if (!*rfk) {
+ bool initial_sw_state = false;
+ int initial_sw_status;
+
+ BUG_ON(id >= TPACPI_RFK_SW_MAX || tpacpi_rfkill_switches[id]);
+
+ atp_rfk = kzalloc(sizeof(struct tpacpi_rfk), GFP_KERNEL);
+ if (atp_rfk)
+ atp_rfk->rfkill = rfkill_alloc(name,
+ &tpacpi_pdev->dev,
+ rfktype,
+ &tpacpi_rfk_rfkill_ops,
+ atp_rfk);
+ if (!atp_rfk || !atp_rfk->rfkill) {
printk(TPACPI_ERR
"failed to allocate memory for rfkill class\n");
+ kfree(atp_rfk);
return -ENOMEM;
}
- (*rfk)->name = name;
- (*rfk)->get_state = get_state;
- (*rfk)->toggle_radio = toggle_radio;
- (*rfk)->state = initial_state;
+ atp_rfk->id = id;
+ atp_rfk->ops = tp_rfkops;
+
+ initial_sw_status = (tp_rfkops->get_status)();
+ if (initial_sw_status < 0) {
+ printk(TPACPI_ERR
+ "failed to read initial state for %s, error %d\n",
+ name, initial_sw_status);
+ } else {
+ initial_sw_state = (initial_sw_status == TPACPI_RFK_RADIO_OFF);
+ if (set_default) {
+ /* try to keep the initial state, since we ask the
+ * firmware to preserve it across S5 in NVRAM */
+ rfkill_set_sw_state(atp_rfk->rfkill, initial_sw_state);
+ }
+ }
+ rfkill_set_hw_state(atp_rfk->rfkill, tpacpi_rfk_check_hwblock_state());
- res = rfkill_register(*rfk);
+ res = rfkill_register(atp_rfk->rfkill);
if (res < 0) {
printk(TPACPI_ERR
"failed to register %s rfkill switch: %d\n",
name, res);
- rfkill_free(*rfk);
- *rfk = NULL;
+ rfkill_destroy(atp_rfk->rfkill);
+ kfree(atp_rfk);
return res;
}
+ tpacpi_rfkill_switches[id] = atp_rfk;
return 0;
}
-static void printk_deprecated_attribute(const char * const what,
- const char * const details)
+static void tpacpi_destroy_rfkill(const enum tpacpi_rfk_id id)
{
- tpacpi_log_usertask("deprecated sysfs attribute");
- printk(TPACPI_WARN "WARNING: sysfs attribute %s is deprecated and "
- "will be removed. %s\n",
- what, details);
+ struct tpacpi_rfk *tp_rfk;
+
+ BUG_ON(id >= TPACPI_RFK_SW_MAX);
+
+ tp_rfk = tpacpi_rfkill_switches[id];
+ if (tp_rfk) {
+ rfkill_unregister(tp_rfk->rfkill);
+ tpacpi_rfkill_switches[id] = NULL;
+ kfree(tp_rfk);
+ }
}
static void printk_deprecated_rfkill_attribute(const char * const what)
@@ -1074,6 +1234,112 @@ static void printk_deprecated_rfkill_attribute(const char * const what)
"Please switch to generic rfkill before year 2010");
}
+/* sysfs <radio> enable ------------------------------------------------ */
+static ssize_t tpacpi_rfk_sysfs_enable_show(const enum tpacpi_rfk_id id,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int status;
+
+ printk_deprecated_rfkill_attribute(attr->attr.name);
+
+ /* This is in the ABI... */
+ if (tpacpi_rfk_check_hwblock_state()) {
+ status = TPACPI_RFK_RADIO_OFF;
+ } else {
+ status = tpacpi_rfk_update_swstate(tpacpi_rfkill_switches[id]);
+ if (status < 0)
+ return status;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ (status == TPACPI_RFK_RADIO_ON) ? 1 : 0);
+}
+
+static ssize_t tpacpi_rfk_sysfs_enable_store(const enum tpacpi_rfk_id id,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long t;
+ int res;
+
+ printk_deprecated_rfkill_attribute(attr->attr.name);
+
+ if (parse_strtoul(buf, 1, &t))
+ return -EINVAL;
+
+ tpacpi_disclose_usertask(attr->attr.name, "set to %ld\n", t);
+
+ /* This is in the ABI... */
+ if (tpacpi_rfk_check_hwblock_state() && !!t)
+ return -EPERM;
+
+ res = tpacpi_rfkill_switches[id]->ops->set_status((!!t) ?
+ TPACPI_RFK_RADIO_ON : TPACPI_RFK_RADIO_OFF);
+ tpacpi_rfk_update_swstate(tpacpi_rfkill_switches[id]);
+
+ return (res < 0) ? res : count;
+}
+
+/* procfs -------------------------------------------------------------- */
+static int tpacpi_rfk_procfs_read(const enum tpacpi_rfk_id id, char *p)
+{
+ int len = 0;
+
+ if (id >= TPACPI_RFK_SW_MAX)
+ len += sprintf(p + len, "status:\t\tnot supported\n");
+ else {
+ int status;
+
+ /* This is in the ABI... */
+ if (tpacpi_rfk_check_hwblock_state()) {
+ status = TPACPI_RFK_RADIO_OFF;
+ } else {
+ status = tpacpi_rfk_update_swstate(
+ tpacpi_rfkill_switches[id]);
+ if (status < 0)
+ return status;
+ }
+
+ len += sprintf(p + len, "status:\t\t%s\n",
+ (status == TPACPI_RFK_RADIO_ON) ?
+ "enabled" : "disabled");
+ len += sprintf(p + len, "commands:\tenable, disable\n");
+ }
+
+ return len;
+}
+
+static int tpacpi_rfk_procfs_write(const enum tpacpi_rfk_id id, char *buf)
+{
+ char *cmd;
+ int status = -1;
+ int res = 0;
+
+ if (id >= TPACPI_RFK_SW_MAX)
+ return -ENODEV;
+
+ while ((cmd = next_cmd(&buf))) {
+ if (strlencmp(cmd, "enable") == 0)
+ status = TPACPI_RFK_RADIO_ON;
+ else if (strlencmp(cmd, "disable") == 0)
+ status = TPACPI_RFK_RADIO_OFF;
+ else
+ return -EINVAL;
+ }
+
+ if (status != -1) {
+ tpacpi_disclose_usertask("procfs", "attempt to %s %s\n",
+ (status == TPACPI_RFK_RADIO_ON) ?
+ "enable" : "disable",
+ tpacpi_rfkill_names[id]);
+ res = (tpacpi_rfkill_switches[id]->ops->set_status)(status);
+ tpacpi_rfk_update_swstate(tpacpi_rfkill_switches[id]);
+ }
+
+ return res;
+}
+
/*************************************************************************
* thinkpad-acpi driver attributes
*/
@@ -1127,8 +1393,6 @@ static DRIVER_ATTR(version, S_IRUGO,
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
-static void tpacpi_send_radiosw_update(void);
-
/* wlsw_emulstate ------------------------------------------------------ */
static ssize_t tpacpi_driver_wlsw_emulstate_show(struct device_driver *drv,
char *buf)
@@ -1144,11 +1408,10 @@ static ssize_t tpacpi_driver_wlsw_emulstate_store(struct device_driver *drv,
if (parse_strtoul(buf, 1, &t))
return -EINVAL;
- if (tpacpi_wlsw_emulstate != t) {
- tpacpi_wlsw_emulstate = !!t;
- tpacpi_send_radiosw_update();
- } else
+ if (tpacpi_wlsw_emulstate != !!t) {
tpacpi_wlsw_emulstate = !!t;
+ tpacpi_rfk_update_hwblock_state(!t); /* negative logic */
+ }
return count;
}
@@ -1463,17 +1726,23 @@ static struct attribute_set *hotkey_dev_attributes;
/* HKEY.MHKG() return bits */
#define TP_HOTKEY_TABLET_MASK (1 << 3)
-static int hotkey_get_wlsw(int *status)
+static int hotkey_get_wlsw(void)
{
+ int status;
+
+ if (!tp_features.hotkey_wlsw)
+ return -ENODEV;
+
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
- if (dbg_wlswemul) {
- *status = !!tpacpi_wlsw_emulstate;
- return 0;
- }
+ if (dbg_wlswemul)
+ return (tpacpi_wlsw_emulstate) ?
+ TPACPI_RFK_RADIO_ON : TPACPI_RFK_RADIO_OFF;
#endif
- if (!acpi_evalf(hkey_handle, status, "WLSW", "d"))
+
+ if (!acpi_evalf(hkey_handle, &status, "WLSW", "d"))
return -EIO;
- return 0;
+
+ return (status) ? TPACPI_RFK_RADIO_ON : TPACPI_RFK_RADIO_OFF;
}
static int hotkey_get_tablet_mode(int *status)
@@ -2107,12 +2376,16 @@ static ssize_t hotkey_radio_sw_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- int res, s;
- res = hotkey_get_wlsw(&s);
+ int res;
+ res = hotkey_get_wlsw();
if (res < 0)
return res;
- return snprintf(buf, PAGE_SIZE, "%d\n", !!s);
+ /* Opportunistic update */
+ tpacpi_rfk_update_hwblock_state((res == TPACPI_RFK_RADIO_OFF));
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ (res == TPACPI_RFK_RADIO_OFF) ? 0 : 1);
}
static struct device_attribute dev_attr_hotkey_radio_sw =
@@ -2223,30 +2496,52 @@ static struct attribute *hotkey_mask_attributes[] __initdata = {
&dev_attr_hotkey_wakeup_hotunplug_complete.attr,
};
-static void bluetooth_update_rfk(void);
-static void wan_update_rfk(void);
-static void uwb_update_rfk(void);
+/*
+ * Sync both the hw and sw blocking state of all switches
+ */
static void tpacpi_send_radiosw_update(void)
{
int wlsw;
- /* Sync these BEFORE sending any rfkill events */
- if (tp_features.bluetooth)
- bluetooth_update_rfk();
- if (tp_features.wan)
- wan_update_rfk();
- if (tp_features.uwb)
- uwb_update_rfk();
+ /*
+ * We must sync all rfkill controllers *before* issuing any
+ * rfkill input events, or we will race the rfkill core input
+ * handler.
+ *
+ * tpacpi_inputdev_send_mutex works as a syncronization point
+ * for the above.
+ *
+ * We optimize to avoid numerous calls to hotkey_get_wlsw.
+ */
+
+ wlsw = hotkey_get_wlsw();
+
+ /* Sync hw blocking state first if it is hw-blocked */
+ if (wlsw == TPACPI_RFK_RADIO_OFF)
+ tpacpi_rfk_update_hwblock_state(true);
- if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&wlsw)) {
+ /* Sync sw blocking state */
+ tpacpi_rfk_update_swstate_all();
+
+ /* Sync hw blocking state last if it is hw-unblocked */
+ if (wlsw == TPACPI_RFK_RADIO_ON)
+ tpacpi_rfk_update_hwblock_state(false);
+
+ /* Issue rfkill input event for WLSW switch */
+ if (!(wlsw < 0)) {
mutex_lock(&tpacpi_inputdev_send_mutex);
input_report_switch(tpacpi_inputdev,
- SW_RFKILL_ALL, !!wlsw);
+ SW_RFKILL_ALL, (wlsw > 0));
input_sync(tpacpi_inputdev);
mutex_unlock(&tpacpi_inputdev_send_mutex);
}
+
+ /*
+ * this can be unconditional, as we will poll state again
+ * if userspace uses the notify to read data
+ */
hotkey_radio_sw_notify_change();
}
@@ -3056,8 +3351,6 @@ enum {
#define TPACPI_RFK_BLUETOOTH_SW_NAME "tpacpi_bluetooth_sw"
-static struct rfkill *tpacpi_bluetooth_rfkill;
-
static void bluetooth_suspend(pm_message_t state)
{
/* Try to make sure radio will resume powered off */
@@ -3067,83 +3360,47 @@ static void bluetooth_suspend(pm_message_t state)
"bluetooth power down on resume request failed\n");
}
-static int bluetooth_get_radiosw(void)
+static int bluetooth_get_status(void)
{
int status;
- if (!tp_features.bluetooth)
- return -ENODEV;
-
- /* WLSW overrides bluetooth in firmware/hardware, reflect that */
- if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&status) && !status)
- return RFKILL_STATE_HARD_BLOCKED;
-
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
if (dbg_bluetoothemul)
return (tpacpi_bluetooth_emulstate) ?
- RFKILL_STATE_UNBLOCKED : RFKILL_STATE_SOFT_BLOCKED;
+ TPACPI_RFK_RADIO_ON : TPACPI_RFK_RADIO_OFF;
#endif
if (!acpi_evalf(hkey_handle, &status, "GBDC", "d"))
return -EIO;
return ((status & TP_ACPI_BLUETOOTH_RADIOSSW) != 0) ?
- RFKILL_STATE_UNBLOCKED : RFKILL_STATE_SOFT_BLOCKED;
-}
-
-static void bluetooth_update_rfk(void)
-{
- int status;
-
- if (!tpacpi_bluetooth_rfkill)
- return;
-
- status = bluetooth_get_radiosw();
- if (status < 0)
- return;
- rfkill_force_state(tpacpi_bluetooth_rfkill, status);
-
- vdbg_printk(TPACPI_DBG_RFKILL,
- "forced rfkill state to %d\n",
- status);
+ TPACPI_RFK_RADIO_ON : TPACPI_RFK_RADIO_OFF;
}
-static int bluetooth_set_radiosw(int radio_on, int update_rfk)
+static int bluetooth_set_status(enum tpacpi_rfkill_state state)
{
int status;
- if (!tp_features.bluetooth)
- return -ENODEV;
-
- /* WLSW overrides bluetooth in firmware/hardware, but there is no
- * reason to risk weird behaviour. */
- if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&status) && !status
- && radio_on)
- return -EPERM;
-
vdbg_printk(TPACPI_DBG_RFKILL,
- "will %s bluetooth\n", radio_on ? "enable" : "disable");
+ "will attempt to %s bluetooth\n",
+ (state == TPACPI_RFK_RADIO_ON) ? "enable" : "disable");
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
if (dbg_bluetoothemul) {
- tpacpi_bluetooth_emulstate = !!radio_on;
- if (update_rfk)
- bluetooth_update_rfk();
+ tpacpi_bluetooth_emulstate = (state == TPACPI_RFK_RADIO_ON);
return 0;
}
#endif
/* We make sure to keep TP_ACPI_BLUETOOTH_RESUMECTRL off */
- if (radio_on)
+ if (state == TPACPI_RFK_RADIO_ON)
status = TP_ACPI_BLUETOOTH_RADIOSSW;
else
status = 0;
+
if (!acpi_evalf(hkey_handle, NULL, "SBDC", "vd", status))
return -EIO;
- if (update_rfk)
- bluetooth_update_rfk();
-
return 0;
}
@@ -3152,35 +3409,16 @@ static ssize_t bluetooth_enable_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- int status;
-
- printk_deprecated_rfkill_attribute("bluetooth_enable");
-
- status = bluetooth_get_radiosw();
- if (status < 0)
- return status;
-
- return snprintf(buf, PAGE_SIZE, "%d\n",
- (status == RFKILL_STATE_UNBLOCKED) ? 1 : 0);
+ return tpacpi_rfk_sysfs_enable_show(TPACPI_RFK_BLUETOOTH_SW_ID,
+ attr, buf);
}
static ssize_t bluetooth_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- unsigned long t;
- int res;
-
- printk_deprecated_rfkill_attribute("bluetooth_enable");
-
- if (parse_strtoul(buf, 1, &t))
- return -EINVAL;
-
- tpacpi_disclose_usertask("bluetooth_enable", "set to %ld\n", t);
-
- res = bluetooth_set_radiosw(t, 1);
-
- return (res) ? res : count;
+ return tpacpi_rfk_sysfs_enable_store(TPACPI_RFK_BLUETOOTH_SW_ID,
+ attr, buf, count);
}
static struct device_attribute dev_attr_bluetooth_enable =
@@ -3198,23 +3436,10 @@ static const struct attribute_group bluetooth_attr_group = {
.attrs = bluetooth_attributes,
};
-static int tpacpi_bluetooth_rfk_get(void *data, enum rfkill_state *state)
-{
- int bts = bluetooth_get_radiosw();
-
- if (bts < 0)
- return bts;
-
- *state = bts;
- return 0;
-}
-
-static int tpacpi_bluetooth_rfk_set(void *data, enum rfkill_state state)
-{
- dbg_printk(TPACPI_DBG_RFKILL,
- "request to change radio state to %d\n", state);
- return bluetooth_set_radiosw((state == RFKILL_STATE_UNBLOCKED), 0);
-}
+static const struct tpacpi_rfk_ops bluetooth_tprfk_ops = {
+ .get_status = bluetooth_get_status,
+ .set_status = bluetooth_set_status,
+};
static void bluetooth_shutdown(void)
{
@@ -3230,13 +3455,12 @@ static void bluetooth_shutdown(void)
static void bluetooth_exit(void)
{
- bluetooth_shutdown();
-
- if (tpacpi_bluetooth_rfkill)
- rfkill_unregister(tpacpi_bluetooth_rfkill);
-
sysfs_remove_group(&tpacpi_pdev->dev.kobj,
&bluetooth_attr_group);
+
+ tpacpi_destroy_rfkill(TPACPI_RFK_BLUETOOTH_SW_ID);
+
+ bluetooth_shutdown();
}
static int __init bluetooth_init(struct ibm_init_struct *iibm)
@@ -3277,20 +3501,18 @@ static int __init bluetooth_init(struct ibm_init_struct *iibm)
if (!tp_features.bluetooth)
return 1;
- res = sysfs_create_group(&tpacpi_pdev->dev.kobj,
- &bluetooth_attr_group);
- if (res)
- return res;
-
res = tpacpi_new_rfkill(TPACPI_RFK_BLUETOOTH_SW_ID,
- &tpacpi_bluetooth_rfkill,
+ &bluetooth_tprfk_ops,
RFKILL_TYPE_BLUETOOTH,
TPACPI_RFK_BLUETOOTH_SW_NAME,
- true,
- tpacpi_bluetooth_rfk_set,
- tpacpi_bluetooth_rfk_get);
+ true);
+ if (res)
+ return res;
+
+ res = sysfs_create_group(&tpacpi_pdev->dev.kobj,
+ &bluetooth_attr_group);
if (res) {
- bluetooth_exit();
+ tpacpi_destroy_rfkill(TPACPI_RFK_BLUETOOTH_SW_ID);
return res;
}
@@ -3300,46 +3522,12 @@ static int __init bluetooth_init(struct ibm_init_struct *iibm)
/* procfs -------------------------------------------------------------- */
static int bluetooth_read(char *p)
{
- int len = 0;
- int status = bluetooth_get_radiosw();
-
- if (!tp_features.bluetooth)
- len += sprintf(p + len, "status:\t\tnot supported\n");
- else {
- len += sprintf(p + len, "status:\t\t%s\n",
- (status == RFKILL_STATE_UNBLOCKED) ?
- "enabled" : "disabled");
- len += sprintf(p + len, "commands:\tenable, disable\n");
- }
-
- return len;
+ return tpacpi_rfk_procfs_read(TPACPI_RFK_BLUETOOTH_SW_ID, p);
}
static int bluetooth_write(char *buf)
{
- char *cmd;
- int state = -1;
-
- if (!tp_features.bluetooth)
- return -ENODEV;
-
- while ((cmd = next_cmd(&buf))) {
- if (strlencmp(cmd, "enable") == 0) {
- state = 1;
- } else if (strlencmp(cmd, "disable") == 0) {
- state = 0;
- } else
- return -EINVAL;
- }
-
- if (state != -1) {
- tpacpi_disclose_usertask("procfs bluetooth",
- "attempt to %s\n",
- state ? "enable" : "disable");
- bluetooth_set_radiosw(state, 1);
- }
-
- return 0;
+ return tpacpi_rfk_procfs_write(TPACPI_RFK_BLUETOOTH_SW_ID, buf);
}
static struct ibm_struct bluetooth_driver_data = {
@@ -3365,8 +3553,6 @@ enum {
#define TPACPI_RFK_WWAN_SW_NAME "tpacpi_wwan_sw"
-static struct rfkill *tpacpi_wan_rfkill;
-
static void wan_suspend(pm_message_t state)
{
/* Try to make sure radio will resume powered off */
@@ -3376,83 +3562,47 @@ static void wan_suspend(pm_message_t state)
"WWAN power down on resume request failed\n");
}
-static int wan_get_radiosw(void)
+static int wan_get_status(void)
{
int status;
- if (!tp_features.wan)
- return -ENODEV;
-
- /* WLSW overrides WWAN in firmware/hardware, reflect that */
- if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&status) && !status)
- return RFKILL_STATE_HARD_BLOCKED;
-
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
if (dbg_wwanemul)
return (tpacpi_wwan_emulstate) ?
- RFKILL_STATE_UNBLOCKED : RFKILL_STATE_SOFT_BLOCKED;
+ TPACPI_RFK_RADIO_ON : TPACPI_RFK_RADIO_OFF;
#endif
if (!acpi_evalf(hkey_handle, &status, "GWAN", "d"))
return -EIO;
return ((status & TP_ACPI_WANCARD_RADIOSSW) != 0) ?
- RFKILL_STATE_UNBLOCKED : RFKILL_STATE_SOFT_BLOCKED;
-}
-
-static void wan_update_rfk(void)
-{
- int status;
-
- if (!tpacpi_wan_rfkill)
- return;
-
- status = wan_get_radiosw();
- if (status < 0)
- return;
- rfkill_force_state(tpacpi_wan_rfkill, status);
-
- vdbg_printk(TPACPI_DBG_RFKILL,
- "forced rfkill state to %d\n",
- status);
+ TPACPI_RFK_RADIO_ON : TPACPI_RFK_RADIO_OFF;
}
-static int wan_set_radiosw(int radio_on, int update_rfk)
+static int wan_set_status(enum tpacpi_rfkill_state state)
{
int status;
- if (!tp_features.wan)
- return -ENODEV;
-
- /* WLSW overrides bluetooth in firmware/hardware, but there is no
- * reason to risk weird behaviour. */
- if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&status) && !status
- && radio_on)
- return -EPERM;
-
vdbg_printk(TPACPI_DBG_RFKILL,
- "will %s WWAN\n", radio_on ? "enable" : "disable");
+ "will attempt to %s wwan\n",
+ (state == TPACPI_RFK_RADIO_ON) ? "enable" : "disable");
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
if (dbg_wwanemul) {
- tpacpi_wwan_emulstate = !!radio_on;
- if (update_rfk)
- wan_update_rfk();
+ tpacpi_wwan_emulstate = (state == TPACPI_RFK_RADIO_ON);
return 0;
}
#endif
/* We make sure to keep TP_ACPI_WANCARD_RESUMECTRL off */
- if (radio_on)
+ if (state == TPACPI_RFK_RADIO_ON)
status = TP_ACPI_WANCARD_RADIOSSW;
else
status = 0;
+
if (!acpi_evalf(hkey_handle, NULL, "SWAN", "vd", status))
return -EIO;
- if (update_rfk)
- wan_update_rfk();
-
return 0;
}
@@ -3461,35 +3611,16 @@ static ssize_t wan_enable_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- int status;
-
- printk_deprecated_rfkill_attribute("wwan_enable");
-
- status = wan_get_radiosw();
- if (status < 0)
- return status;
-
- return snprintf(buf, PAGE_SIZE, "%d\n",
- (status == RFKILL_STATE_UNBLOCKED) ? 1 : 0);
+ return tpacpi_rfk_sysfs_enable_show(TPACPI_RFK_WWAN_SW_ID,
+ attr, buf);
}
static ssize_t wan_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- unsigned long t;
- int res;
-
- printk_deprecated_rfkill_attribute("wwan_enable");
-
- if (parse_strtoul(buf, 1, &t))
- return -EINVAL;
-
- tpacpi_disclose_usertask("wwan_enable", "set to %ld\n", t);
-
- res = wan_set_radiosw(t, 1);
-
- return (res) ? res : count;
+ return tpacpi_rfk_sysfs_enable_store(TPACPI_RFK_WWAN_SW_ID,
+ attr, buf, count);
}
static struct device_attribute dev_attr_wan_enable =
@@ -3507,23 +3638,10 @@ static const struct attribute_group wan_attr_group = {
.attrs = wan_attributes,
};
-static int tpacpi_wan_rfk_get(void *data, enum rfkill_state *state)
-{
- int wans = wan_get_radiosw();
-
- if (wans < 0)
- return wans;
-
- *state = wans;
- return 0;
-}
-
-static int tpacpi_wan_rfk_set(void *data, enum rfkill_state state)
-{
- dbg_printk(TPACPI_DBG_RFKILL,
- "request to change radio state to %d\n", state);
- return wan_set_radiosw((state == RFKILL_STATE_UNBLOCKED), 0);
-}
+static const struct tpacpi_rfk_ops wan_tprfk_ops = {
+ .get_status = wan_get_status,
+ .set_status = wan_set_status,
+};
static void wan_shutdown(void)
{
@@ -3539,13 +3657,12 @@ static void wan_shutdown(void)
static void wan_exit(void)
{
- wan_shutdown();
-
- if (tpacpi_wan_rfkill)
- rfkill_unregister(tpacpi_wan_rfkill);
-
sysfs_remove_group(&tpacpi_pdev->dev.kobj,
&wan_attr_group);
+
+ tpacpi_destroy_rfkill(TPACPI_RFK_WWAN_SW_ID);
+
+ wan_shutdown();
}
static int __init wan_init(struct ibm_init_struct *iibm)
@@ -3584,20 +3701,19 @@ static int __init wan_init(struct ibm_init_struct *iibm)
if (!tp_features.wan)
return 1;
- res = sysfs_create_group(&tpacpi_pdev->dev.kobj,
- &wan_attr_group);
- if (res)
- return res;
-
res = tpacpi_new_rfkill(TPACPI_RFK_WWAN_SW_ID,
- &tpacpi_wan_rfkill,
+ &wan_tprfk_ops,
RFKILL_TYPE_WWAN,
TPACPI_RFK_WWAN_SW_NAME,
- true,
- tpacpi_wan_rfk_set,
- tpacpi_wan_rfk_get);
+ true);
+ if (res)
+ return res;
+
+ res = sysfs_create_group(&tpacpi_pdev->dev.kobj,
+ &wan_attr_group);
+
if (res) {
- wan_exit();
+ tpacpi_destroy_rfkill(TPACPI_RFK_WWAN_SW_ID);
return res;
}
@@ -3607,48 +3723,12 @@ static int __init wan_init(struct ibm_init_struct *iibm)
/* procfs -------------------------------------------------------------- */
static int wan_read(char *p)
{
- int len = 0;
- int status = wan_get_radiosw();
-
- tpacpi_disclose_usertask("procfs wan", "read");
-
- if (!tp_features.wan)
- len += sprintf(p + len, "status:\t\tnot supported\n");
- else {
- len += sprintf(p + len, "status:\t\t%s\n",
- (status == RFKILL_STATE_UNBLOCKED) ?
- "enabled" : "disabled");
- len += sprintf(p + len, "commands:\tenable, disable\n");
- }
-
- return len;
+ return tpacpi_rfk_procfs_read(TPACPI_RFK_WWAN_SW_ID, p);
}
static int wan_write(char *buf)
{
- char *cmd;
- int state = -1;
-
- if (!tp_features.wan)
- return -ENODEV;
-
- while ((cmd = next_cmd(&buf))) {
- if (strlencmp(cmd, "enable") == 0) {
- state = 1;
- } else if (strlencmp(cmd, "disable") == 0) {
- state = 0;
- } else
- return -EINVAL;
- }
-
- if (state != -1) {
- tpacpi_disclose_usertask("procfs wan",
- "attempt to %s\n",
- state ? "enable" : "disable");
- wan_set_radiosw(state, 1);
- }
-
- return 0;
+ return tpacpi_rfk_procfs_write(TPACPI_RFK_WWAN_SW_ID, buf);
}
static struct ibm_struct wan_driver_data = {
@@ -3672,108 +3752,59 @@ enum {
#define TPACPI_RFK_UWB_SW_NAME "tpacpi_uwb_sw"
-static struct rfkill *tpacpi_uwb_rfkill;
-
-static int uwb_get_radiosw(void)
+static int uwb_get_status(void)
{
int status;
- if (!tp_features.uwb)
- return -ENODEV;
-
- /* WLSW overrides UWB in firmware/hardware, reflect that */
- if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&status) && !status)
- return RFKILL_STATE_HARD_BLOCKED;
-
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
if (dbg_uwbemul)
return (tpacpi_uwb_emulstate) ?
- RFKILL_STATE_UNBLOCKED : RFKILL_STATE_SOFT_BLOCKED;
+ TPACPI_RFK_RADIO_ON : TPACPI_RFK_RADIO_OFF;
#endif
if (!acpi_evalf(hkey_handle, &status, "GUWB", "d"))
return -EIO;
return ((status & TP_ACPI_UWB_RADIOSSW) != 0) ?
- RFKILL_STATE_UNBLOCKED : RFKILL_STATE_SOFT_BLOCKED;
+ TPACPI_RFK_RADIO_ON : TPACPI_RFK_RADIO_OFF;
}
-static void uwb_update_rfk(void)
+static int uwb_set_status(enum tpacpi_rfkill_state state)
{
int status;
- if (!tpacpi_uwb_rfkill)
- return;
-
- status = uwb_get_radiosw();
- if (status < 0)
- return;
- rfkill_force_state(tpacpi_uwb_rfkill, status);
-
vdbg_printk(TPACPI_DBG_RFKILL,
- "forced rfkill state to %d\n",
- status);
-}
-
-static int uwb_set_radiosw(int radio_on, int update_rfk)
-{
- int status;
-
- if (!tp_features.uwb)
- return -ENODEV;
-
- /* WLSW overrides UWB in firmware/hardware, but there is no
- * reason to risk weird behaviour. */
- if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&status) && !status
- && radio_on)
- return -EPERM;
-
- vdbg_printk(TPACPI_DBG_RFKILL,
- "will %s UWB\n", radio_on ? "enable" : "disable");
+ "will attempt to %s UWB\n",
+ (state == TPACPI_RFK_RADIO_ON) ? "enable" : "disable");
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
if (dbg_uwbemul) {
- tpacpi_uwb_emulstate = !!radio_on;
- if (update_rfk)
- uwb_update_rfk();
+ tpacpi_uwb_emulstate = (state == TPACPI_RFK_RADIO_ON);
return 0;
}
#endif
- status = (radio_on) ? TP_ACPI_UWB_RADIOSSW : 0;
+ if (state == TPACPI_RFK_RADIO_ON)
+ status = TP_ACPI_UWB_RADIOSSW;
+ else
+ status = 0;
+
if (!acpi_evalf(hkey_handle, NULL, "SUWB", "vd", status))
return -EIO;
- if (update_rfk)
- uwb_update_rfk();
-
return 0;
}
/* --------------------------------------------------------------------- */
-static int tpacpi_uwb_rfk_get(void *data, enum rfkill_state *state)
-{
- int uwbs = uwb_get_radiosw();
-
- if (uwbs < 0)
- return uwbs;
-
- *state = uwbs;
- return 0;
-}
-
-static int tpacpi_uwb_rfk_set(void *data, enum rfkill_state state)
-{
- dbg_printk(TPACPI_DBG_RFKILL,
- "request to change radio state to %d\n", state);
- return uwb_set_radiosw((state == RFKILL_STATE_UNBLOCKED), 0);
-}
+static const struct tpacpi_rfk_ops uwb_tprfk_ops = {
+ .get_status = uwb_get_status,
+ .set_status = uwb_set_status,
+};
static void uwb_exit(void)
{
- if (tpacpi_uwb_rfkill)
- rfkill_unregister(tpacpi_uwb_rfkill);
+ tpacpi_destroy_rfkill(TPACPI_RFK_UWB_SW_ID);
}
static int __init uwb_init(struct ibm_init_struct *iibm)
@@ -3813,13 +3844,10 @@ static int __init uwb_init(struct ibm_init_struct *iibm)
return 1;
res = tpacpi_new_rfkill(TPACPI_RFK_UWB_SW_ID,
- &tpacpi_uwb_rfkill,
+ &uwb_tprfk_ops,
RFKILL_TYPE_UWB,
TPACPI_RFK_UWB_SW_NAME,
- false,
- tpacpi_uwb_rfk_set,
- tpacpi_uwb_rfk_get);
-
+ false);
return res;
}
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 9f187265db8..81d31ea507d 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -45,7 +45,6 @@
#include <linux/backlight.h>
#include <linux/platform_device.h>
#include <linux/rfkill.h>
-#include <linux/input-polldev.h>
#include <asm/uaccess.h>
@@ -250,21 +249,15 @@ static acpi_status hci_read2(u32 reg, u32 *out1, u32 *out2, u32 *result)
struct toshiba_acpi_dev {
struct platform_device *p_dev;
- struct rfkill *rfk_dev;
- struct input_polled_dev *poll_dev;
+ struct rfkill *bt_rfk;
const char *bt_name;
- const char *rfk_name;
-
- bool last_rfk_state;
struct mutex mutex;
};
static struct toshiba_acpi_dev toshiba_acpi = {
.bt_name = "Toshiba Bluetooth",
- .rfk_name = "Toshiba RFKill Switch",
- .last_rfk_state = false,
};
/* Bluetooth rfkill handlers */
@@ -283,21 +276,6 @@ static u32 hci_get_bt_present(bool *present)
return hci_result;
}
-static u32 hci_get_bt_on(bool *on)
-{
- u32 hci_result;
- u32 value, value2;
-
- value = 0;
- value2 = 0x0001;
- hci_read2(HCI_WIRELESS, &value, &value2, &hci_result);
- if (hci_result == HCI_SUCCESS)
- *on = (value & HCI_WIRELESS_BT_POWER) &&
- (value & HCI_WIRELESS_BT_ATTACH);
-
- return hci_result;
-}
-
static u32 hci_get_radio_state(bool *radio_state)
{
u32 hci_result;
@@ -311,70 +289,67 @@ static u32 hci_get_radio_state(bool *radio_state)
return hci_result;
}
-static int bt_rfkill_toggle_radio(void *data, enum rfkill_state state)
+static int bt_rfkill_set_block(void *data, bool blocked)
{
+ struct toshiba_acpi_dev *dev = data;
u32 result1, result2;
u32 value;
+ int err;
bool radio_state;
- struct toshiba_acpi_dev *dev = data;
- value = (state == RFKILL_STATE_UNBLOCKED);
+ value = (blocked == false);
- if (hci_get_radio_state(&radio_state) != HCI_SUCCESS)
- return -EFAULT;
+ mutex_lock(&dev->mutex);
+ if (hci_get_radio_state(&radio_state) != HCI_SUCCESS) {
+ err = -EBUSY;
+ goto out;
+ }
- switch (state) {
- case RFKILL_STATE_UNBLOCKED:
- if (!radio_state)
- return -EPERM;
- break;
- case RFKILL_STATE_SOFT_BLOCKED:
- break;
- default:
- return -EINVAL;
+ if (!radio_state) {
+ err = 0;
+ goto out;
}
- mutex_lock(&dev->mutex);
hci_write2(HCI_WIRELESS, value, HCI_WIRELESS_BT_POWER, &result1);
hci_write2(HCI_WIRELESS, value, HCI_WIRELESS_BT_ATTACH, &result2);
- mutex_unlock(&dev->mutex);
if (result1 != HCI_SUCCESS || result2 != HCI_SUCCESS)
- return -EFAULT;
-
- return 0;
+ err = -EBUSY;
+ else
+ err = 0;
+ out:
+ mutex_unlock(&dev->mutex);
+ return err;
}
-static void bt_poll_rfkill(struct input_polled_dev *poll_dev)
+static void bt_rfkill_poll(struct rfkill *rfkill, void *data)
{
- bool state_changed;
bool new_rfk_state;
bool value;
u32 hci_result;
- struct toshiba_acpi_dev *dev = poll_dev->private;
+ struct toshiba_acpi_dev *dev = data;
+
+ mutex_lock(&dev->mutex);
hci_result = hci_get_radio_state(&value);
- if (hci_result != HCI_SUCCESS)
- return; /* Can't do anything useful */
+ if (hci_result != HCI_SUCCESS) {
+ /* Can't do anything useful */
+ mutex_unlock(&dev->mutex);
+ }
new_rfk_state = value;
- mutex_lock(&dev->mutex);
- state_changed = new_rfk_state != dev->last_rfk_state;
- dev->last_rfk_state = new_rfk_state;
mutex_unlock(&dev->mutex);
- if (unlikely(state_changed)) {
- rfkill_force_state(dev->rfk_dev,
- new_rfk_state ?
- RFKILL_STATE_SOFT_BLOCKED :
- RFKILL_STATE_HARD_BLOCKED);
- input_report_switch(poll_dev->input, SW_RFKILL_ALL,
- new_rfk_state);
- input_sync(poll_dev->input);
- }
+ if (rfkill_set_hw_state(rfkill, !new_rfk_state))
+ bt_rfkill_set_block(data, true);
}
+static const struct rfkill_ops toshiba_rfk_ops = {
+ .set_block = bt_rfkill_set_block,
+ .poll = bt_rfkill_poll,
+};
+
static struct proc_dir_entry *toshiba_proc_dir /*= 0*/ ;
static struct backlight_device *toshiba_backlight_device;
static int force_fan;
@@ -702,14 +677,11 @@ static struct backlight_ops toshiba_backlight_data = {
static void toshiba_acpi_exit(void)
{
- if (toshiba_acpi.poll_dev) {
- input_unregister_polled_device(toshiba_acpi.poll_dev);
- input_free_polled_device(toshiba_acpi.poll_dev);
+ if (toshiba_acpi.bt_rfk) {
+ rfkill_unregister(toshiba_acpi.bt_rfk);
+ rfkill_destroy(toshiba_acpi.bt_rfk);
}
- if (toshiba_acpi.rfk_dev)
- rfkill_unregister(toshiba_acpi.rfk_dev);
-
if (toshiba_backlight_device)
backlight_device_unregister(toshiba_backlight_device);
@@ -728,8 +700,6 @@ static int __init toshiba_acpi_init(void)
acpi_status status = AE_OK;
u32 hci_result;
bool bt_present;
- bool bt_on;
- bool radio_on;
int ret = 0;
if (acpi_disabled)
@@ -793,61 +763,21 @@ static int __init toshiba_acpi_init(void)
/* Register rfkill switch for Bluetooth */
if (hci_get_bt_present(&bt_present) == HCI_SUCCESS && bt_present) {
- toshiba_acpi.rfk_dev = rfkill_allocate(&toshiba_acpi.p_dev->dev,
- RFKILL_TYPE_BLUETOOTH);
- if (!toshiba_acpi.rfk_dev) {
+ toshiba_acpi.bt_rfk = rfkill_alloc(toshiba_acpi.bt_name,
+ &toshiba_acpi.p_dev->dev,
+ RFKILL_TYPE_BLUETOOTH,
+ &toshiba_rfk_ops,
+ &toshiba_acpi);
+ if (!toshiba_acpi.bt_rfk) {
printk(MY_ERR "unable to allocate rfkill device\n");
toshiba_acpi_exit();
return -ENOMEM;
}
- toshiba_acpi.rfk_dev->name = toshiba_acpi.bt_name;
- toshiba_acpi.rfk_dev->toggle_radio = bt_rfkill_toggle_radio;
- toshiba_acpi.rfk_dev->user_claim_unsupported = 1;
- toshiba_acpi.rfk_dev->data = &toshiba_acpi;
-
- if (hci_get_bt_on(&bt_on) == HCI_SUCCESS && bt_on) {
- toshiba_acpi.rfk_dev->state = RFKILL_STATE_UNBLOCKED;
- } else if (hci_get_radio_state(&radio_on) == HCI_SUCCESS &&
- radio_on) {
- toshiba_acpi.rfk_dev->state = RFKILL_STATE_SOFT_BLOCKED;
- } else {
- toshiba_acpi.rfk_dev->state = RFKILL_STATE_HARD_BLOCKED;
- }
-
- ret = rfkill_register(toshiba_acpi.rfk_dev);
+ ret = rfkill_register(toshiba_acpi.bt_rfk);
if (ret) {
printk(MY_ERR "unable to register rfkill device\n");
- toshiba_acpi_exit();
- return -ENOMEM;
- }
-
- /* Register input device for kill switch */
- toshiba_acpi.poll_dev = input_allocate_polled_device();
- if (!toshiba_acpi.poll_dev) {
- printk(MY_ERR
- "unable to allocate kill-switch input device\n");
- toshiba_acpi_exit();
- return -ENOMEM;
- }
- toshiba_acpi.poll_dev->private = &toshiba_acpi;
- toshiba_acpi.poll_dev->poll = bt_poll_rfkill;
- toshiba_acpi.poll_dev->poll_interval = 1000; /* msecs */
-
- toshiba_acpi.poll_dev->input->name = toshiba_acpi.rfk_name;
- toshiba_acpi.poll_dev->input->id.bustype = BUS_HOST;
- /* Toshiba USB ID */
- toshiba_acpi.poll_dev->input->id.vendor = 0x0930;
- set_bit(EV_SW, toshiba_acpi.poll_dev->input->evbit);
- set_bit(SW_RFKILL_ALL, toshiba_acpi.poll_dev->input->swbit);
- input_report_switch(toshiba_acpi.poll_dev->input,
- SW_RFKILL_ALL, TRUE);
- input_sync(toshiba_acpi.poll_dev->input);
-
- ret = input_register_polled_device(toshiba_acpi.poll_dev);
- if (ret) {
- printk(MY_ERR
- "unable to register kill-switch input device\n");
+ rfkill_destroy(toshiba_acpi.bt_rfk);
toshiba_acpi_exit();
return ret;
}
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 9a3a682c698..9496494f340 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -110,11 +110,9 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev)
/* acpi_unregister_gsi(pnp_irq(dev, 0)); */
ret = 0;
- if (acpi_bus_power_manageable(handle)) {
- ret = acpi_bus_set_power(handle, ACPI_STATE_D3);
- if (ret)
- return ret;
- }
+ if (acpi_bus_power_manageable(handle))
+ acpi_bus_set_power(handle, ACPI_STATE_D3);
+ /* continue even if acpi_bus_set_power() fails */
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DIS", NULL, NULL)))
ret = -ENODEV;
return ret;
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index adf17856bac..7f207f335be 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -123,7 +123,7 @@ static void pnpacpi_parse_allocated_irqresource(struct pnp_dev *dev,
}
flags = irq_flags(triggering, polarity, shareable);
- irq = acpi_register_gsi(gsi, triggering, polarity);
+ irq = acpi_register_gsi(&dev->dev, gsi, triggering, polarity);
if (irq >= 0)
pcibios_penalize_isa_irq(irq, 1);
else
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
index f604061d2bb..ba976542788 100644
--- a/drivers/pnp/resource.c
+++ b/drivers/pnp/resource.c
@@ -638,6 +638,24 @@ int pnp_possible_config(struct pnp_dev *dev, int type, resource_size_t start,
}
EXPORT_SYMBOL(pnp_possible_config);
+int pnp_range_reserved(resource_size_t start, resource_size_t end)
+{
+ struct pnp_dev *dev;
+ struct pnp_resource *pnp_res;
+ resource_size_t *dev_start, *dev_end;
+
+ pnp_for_each_dev(dev) {
+ list_for_each_entry(pnp_res, &dev->resources, list) {
+ dev_start = &pnp_res->res.start;
+ dev_end = &pnp_res->res.end;
+ if (ranged_conflict(&start, &end, dev_start, dev_end))
+ return 1;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(pnp_range_reserved);
+
/* format is: pnp_reserve_irq=irq1[,irq2] .... */
static int __init pnp_setup_reserve_irq(char *str)
{
diff --git a/drivers/ps3/ps3-sys-manager.c b/drivers/ps3/ps3-sys-manager.c
index f17513dd9d4..88cb7408861 100644
--- a/drivers/ps3/ps3-sys-manager.c
+++ b/drivers/ps3/ps3-sys-manager.c
@@ -706,7 +706,7 @@ static void ps3_sys_manager_work(struct ps3_system_bus_device *dev)
ps3_vuart_read_async(dev, PS3_SM_RX_MSG_LEN_MIN);
}
-static int ps3_sys_manager_probe(struct ps3_system_bus_device *dev)
+static int __devinit ps3_sys_manager_probe(struct ps3_system_bus_device *dev)
{
int result;
struct ps3_sys_manager_ops ops;
diff --git a/drivers/ps3/ps3av.c b/drivers/ps3/ps3av.c
index 235e87fcb49..e82d8c9c6cd 100644
--- a/drivers/ps3/ps3av.c
+++ b/drivers/ps3/ps3av.c
@@ -80,12 +80,12 @@ static const struct avset_video_mode {
{ 0, }, /* auto */
{YUV444, XRGB, PS3AV_CMD_VIDEO_VID_480I, A_N, 720, 480},
{YUV444, XRGB, PS3AV_CMD_VIDEO_VID_480P, A_N, 720, 480},
- {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_720P_60HZ, A_N, 1280, 720},
+ {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_720P_60HZ, A_W, 1280, 720},
{YUV444, XRGB, PS3AV_CMD_VIDEO_VID_1080I_60HZ, A_W, 1920, 1080},
{YUV444, XRGB, PS3AV_CMD_VIDEO_VID_1080P_60HZ, A_W, 1920, 1080},
{YUV444, XRGB, PS3AV_CMD_VIDEO_VID_576I, A_N, 720, 576},
{YUV444, XRGB, PS3AV_CMD_VIDEO_VID_576P, A_N, 720, 576},
- {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_720P_50HZ, A_N, 1280, 720},
+ {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_720P_50HZ, A_W, 1280, 720},
{YUV444, XRGB, PS3AV_CMD_VIDEO_VID_1080I_50HZ, A_W, 1920, 1080},
{YUV444, XRGB, PS3AV_CMD_VIDEO_VID_1080P_50HZ, A_W, 1920, 1080},
{ RGB8, XRGB, PS3AV_CMD_VIDEO_VID_WXGA, A_W, 1280, 768},
@@ -937,7 +937,7 @@ int ps3av_audio_mute(int mute)
EXPORT_SYMBOL_GPL(ps3av_audio_mute);
-static int ps3av_probe(struct ps3_system_bus_device *dev)
+static int __devinit ps3av_probe(struct ps3_system_bus_device *dev)
{
int res;
int id;
@@ -1048,7 +1048,7 @@ static struct ps3_vuart_port_driver ps3av_driver = {
.shutdown = ps3av_shutdown,
};
-static int ps3av_module_init(void)
+static int __init ps3av_module_init(void)
{
int error;
diff --git a/drivers/ps3/ps3av_cmd.c b/drivers/ps3/ps3av_cmd.c
index 716596e8e5b..f555fedd507 100644
--- a/drivers/ps3/ps3av_cmd.c
+++ b/drivers/ps3/ps3av_cmd.c
@@ -21,9 +21,10 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
+
#include <asm/ps3av.h>
-#include <asm/ps3fb.h>
#include <asm/ps3.h>
+#include <asm/ps3gpu.h>
#include "vuart.h"
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index 74d0bfa3f31..3b78540288c 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -290,7 +290,7 @@ static void __devinit rio_add_device(struct rio_dev *rdev)
* to a RIO device on success or NULL on failure.
*
*/
-static struct rio_dev *rio_setup_device(struct rio_net *net,
+static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
struct rio_mport *port, u16 destid,
u8 hopcount, int do_enum)
{
@@ -559,7 +559,7 @@ static void rio_net_add_mport(struct rio_net *net, struct rio_mport *port)
* Recursively enumerates a RIO network. Transactions are sent via the
* master port passed in @port.
*/
-static int rio_enum_peer(struct rio_net *net, struct rio_mport *port,
+static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
u8 hopcount)
{
int port_num;
@@ -718,7 +718,7 @@ static int rio_enum_complete(struct rio_mport *port)
* Recursively discovers a RIO network. Transactions are sent via the
* master port passed in @port.
*/
-static int
+static int __devinit
rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
u8 hopcount)
{
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index e58c0ce65aa..f4317798e47 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -47,6 +47,16 @@ config REGULATOR_VIRTUAL_CONSUMER
If unsure, say no.
+config REGULATOR_USERSPACE_CONSUMER
+ tristate "Userspace regulator consumer support"
+ default n
+ help
+ There are some classes of devices that are controlled entirely
+ from user space. Usersapce consumer driver provides ability to
+ control power supplies for such devices.
+
+ If unsure, say no.
+
config REGULATOR_BQ24022
tristate "TI bq24022 Dual Input 1-Cell Li-Ion Charger IC"
default n
@@ -56,6 +66,15 @@ config REGULATOR_BQ24022
charging select between 100 mA and 500 mA charging current
limit.
+config REGULATOR_MAX1586
+ tristate "Maxim 1586/1587 voltage regulator"
+ depends on I2C
+ default n
+ help
+ This driver controls a Maxim 1586 or 1587 voltage output
+ regulator via I2C bus. The provided regulator is suitable
+ for PXA27x chips to control VCC_CORE and VCC_USIM voltages.
+
config REGULATOR_TWL4030
bool "TI TWL4030/TWL5030/TPS695x0 PMIC"
depends on TWL4030_CORE
@@ -91,4 +110,11 @@ config REGULATOR_PCF50633
Say Y here to support the voltage regulators and convertors
on PCF50633
+config REGULATOR_LP3971
+ tristate "National Semiconductors LP3971 PMIC regulator driver"
+ depends on I2C
+ help
+ Say Y here to support the voltage regulators and convertors
+ on National Semiconductors LP3971 PMIC
+
endif
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index bac133afc06..4d762c4cccf 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -6,8 +6,11 @@
obj-$(CONFIG_REGULATOR) += core.o
obj-$(CONFIG_REGULATOR_FIXED_VOLTAGE) += fixed.o
obj-$(CONFIG_REGULATOR_VIRTUAL_CONSUMER) += virtual.o
+obj-$(CONFIG_REGULATOR_USERSPACE_CONSUMER) += userspace-consumer.o
obj-$(CONFIG_REGULATOR_BQ24022) += bq24022.o
+obj-$(CONFIG_REGULATOR_LP3971) += lp3971.o
+obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o
obj-$(CONFIG_REGULATOR_TWL4030) += twl4030-regulator.o
obj-$(CONFIG_REGULATOR_WM8350) += wm8350-regulator.o
obj-$(CONFIG_REGULATOR_WM8400) += wm8400-regulator.o
diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x.c
index 72b15495183..b8b89ef10a8 100644
--- a/drivers/regulator/da903x.c
+++ b/drivers/regulator/da903x.c
@@ -497,14 +497,14 @@ static struct platform_driver da903x_regulator_driver = {
.owner = THIS_MODULE,
},
.probe = da903x_regulator_probe,
- .remove = da903x_regulator_remove,
+ .remove = __devexit_p(da903x_regulator_remove),
};
static int __init da903x_regulator_init(void)
{
return platform_driver_register(&da903x_regulator_driver);
}
-module_init(da903x_regulator_init);
+subsys_initcall(da903x_regulator_init);
static void __exit da903x_regulator_exit(void)
{
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index 23d554628a7..cdc674fb46c 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -44,10 +44,22 @@ static int fixed_voltage_get_voltage(struct regulator_dev *dev)
return data->microvolts;
}
+static int fixed_voltage_list_voltage(struct regulator_dev *dev,
+ unsigned selector)
+{
+ struct fixed_voltage_data *data = rdev_get_drvdata(dev);
+
+ if (selector != 0)
+ return -EINVAL;
+
+ return data->microvolts;
+}
+
static struct regulator_ops fixed_voltage_ops = {
.is_enabled = fixed_voltage_is_enabled,
.enable = fixed_voltage_enable,
.get_voltage = fixed_voltage_get_voltage,
+ .list_voltage = fixed_voltage_list_voltage,
};
static int regulator_fixed_voltage_probe(struct platform_device *pdev)
@@ -69,7 +81,8 @@ static int regulator_fixed_voltage_probe(struct platform_device *pdev)
}
drvdata->desc.type = REGULATOR_VOLTAGE;
drvdata->desc.owner = THIS_MODULE;
- drvdata->desc.ops = &fixed_voltage_ops,
+ drvdata->desc.ops = &fixed_voltage_ops;
+ drvdata->desc.n_voltages = 1;
drvdata->microvolts = config->microvolts;
@@ -117,7 +130,7 @@ static int __init regulator_fixed_voltage_init(void)
{
return platform_driver_register(&regulator_fixed_voltage_driver);
}
-module_init(regulator_fixed_voltage_init);
+subsys_initcall(regulator_fixed_voltage_init);
static void __exit regulator_fixed_voltage_exit(void)
{
@@ -128,3 +141,4 @@ module_exit(regulator_fixed_voltage_exit);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("Fixed voltage regulator");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:reg-fixed-voltage");
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
new file mode 100644
index 00000000000..a61018a2769
--- /dev/null
+++ b/drivers/regulator/lp3971.c
@@ -0,0 +1,562 @@
+/*
+ * Regulator driver for National Semiconductors LP3971 PMIC chip
+ *
+ * Copyright (C) 2009 Samsung Electronics
+ * Author: Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * Based on wm8350.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/bug.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/lp3971.h>
+
+struct lp3971 {
+ struct device *dev;
+ struct mutex io_lock;
+ struct i2c_client *i2c;
+ int num_regulators;
+ struct regulator_dev **rdev;
+};
+
+static u8 lp3971_reg_read(struct lp3971 *lp3971, u8 reg);
+static int lp3971_set_bits(struct lp3971 *lp3971, u8 reg, u16 mask, u16 val);
+
+#define LP3971_SYS_CONTROL1_REG 0x07
+
+/* System control register 1 initial value,
+ bits 4 and 5 are EPROM programmable */
+#define SYS_CONTROL1_INIT_VAL 0x40
+#define SYS_CONTROL1_INIT_MASK 0xCF
+
+#define LP3971_BUCK_VOL_ENABLE_REG 0x10
+#define LP3971_BUCK_VOL_CHANGE_REG 0x20
+
+/* Voltage control registers shift:
+ LP3971_BUCK1 -> 0
+ LP3971_BUCK2 -> 4
+ LP3971_BUCK3 -> 6
+*/
+#define BUCK_VOL_CHANGE_SHIFT(x) (((1 << x) & ~0x01) << 1)
+#define BUCK_VOL_CHANGE_FLAG_GO 0x01
+#define BUCK_VOL_CHANGE_FLAG_TARGET 0x02
+#define BUCK_VOL_CHANGE_FLAG_MASK 0x03
+
+#define LP3971_BUCK1_BASE 0x23
+#define LP3971_BUCK2_BASE 0x29
+#define LP3971_BUCK3_BASE 0x32
+
+const static int buck_base_addr[] = {
+ LP3971_BUCK1_BASE,
+ LP3971_BUCK2_BASE,
+ LP3971_BUCK3_BASE,
+};
+
+#define LP3971_BUCK_TARGET_VOL1_REG(x) (buck_base_addr[x])
+#define LP3971_BUCK_TARGET_VOL2_REG(x) (buck_base_addr[x]+1)
+
+const static int buck_voltage_map[] = {
+ 0, 800, 850, 900, 950, 1000, 1050, 1100,
+ 1150, 1200, 1250, 1300, 1350, 1400, 1450, 1500,
+ 1550, 1600, 1650, 1700, 1800, 1900, 2500, 2800,
+ 3000, 3300,
+};
+
+#define BUCK_TARGET_VOL_MASK 0x3f
+#define BUCK_TARGET_VOL_MIN_IDX 0x01
+#define BUCK_TARGET_VOL_MAX_IDX 0x19
+
+#define LP3971_BUCK_RAMP_REG(x) (buck_base_addr[x]+2)
+
+#define LP3971_LDO_ENABLE_REG 0x12
+#define LP3971_LDO_VOL_CONTR_BASE 0x39
+
+/* Voltage control registers:
+ LP3971_LDO1 -> LP3971_LDO_VOL_CONTR_BASE + 0
+ LP3971_LDO2 -> LP3971_LDO_VOL_CONTR_BASE + 0
+ LP3971_LDO3 -> LP3971_LDO_VOL_CONTR_BASE + 1
+ LP3971_LDO4 -> LP3971_LDO_VOL_CONTR_BASE + 1
+ LP3971_LDO5 -> LP3971_LDO_VOL_CONTR_BASE + 2
+*/
+#define LP3971_LDO_VOL_CONTR_REG(x) (LP3971_LDO_VOL_CONTR_BASE + (x >> 1))
+
+/* Voltage control registers shift:
+ LP3971_LDO1 -> 0, LP3971_LDO2 -> 4
+ LP3971_LDO3 -> 0, LP3971_LDO4 -> 4
+ LP3971_LDO5 -> 0
+*/
+#define LDO_VOL_CONTR_SHIFT(x) ((x & 1) << 2)
+#define LDO_VOL_CONTR_MASK 0x0f
+
+const static int ldo45_voltage_map[] = {
+ 1000, 1050, 1100, 1150, 1200, 1250, 1300, 1350,
+ 1400, 1500, 1800, 1900, 2500, 2800, 3000, 3300,
+};
+
+const static int ldo123_voltage_map[] = {
+ 1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500,
+ 2600, 2700, 2800, 2900, 3000, 3100, 3200, 3300,
+};
+
+const static int *ldo_voltage_map[] = {
+ ldo123_voltage_map, /* LDO1 */
+ ldo123_voltage_map, /* LDO2 */
+ ldo123_voltage_map, /* LDO3 */
+ ldo45_voltage_map, /* LDO4 */
+ ldo45_voltage_map, /* LDO5 */
+};
+
+#define LDO_VOL_VALUE_MAP(x) (ldo_voltage_map[(x - LP3971_LDO1)])
+
+#define LDO_VOL_MIN_IDX 0x00
+#define LDO_VOL_MAX_IDX 0x0f
+
+static int lp3971_ldo_list_voltage(struct regulator_dev *dev, unsigned index)
+{
+ int ldo = rdev_get_id(dev) - LP3971_LDO1;
+ return 1000 * LDO_VOL_VALUE_MAP(ldo)[index];
+}
+
+static int lp3971_ldo_is_enabled(struct regulator_dev *dev)
+{
+ struct lp3971 *lp3971 = rdev_get_drvdata(dev);
+ int ldo = rdev_get_id(dev) - LP3971_LDO1;
+ u16 mask = 1 << (1 + ldo);
+ u16 val;
+
+ val = lp3971_reg_read(lp3971, LP3971_LDO_ENABLE_REG);
+ return (val & mask) != 0;
+}
+
+static int lp3971_ldo_enable(struct regulator_dev *dev)
+{
+ struct lp3971 *lp3971 = rdev_get_drvdata(dev);
+ int ldo = rdev_get_id(dev) - LP3971_LDO1;
+ u16 mask = 1 << (1 + ldo);
+
+ return lp3971_set_bits(lp3971, LP3971_LDO_ENABLE_REG, mask, mask);
+}
+
+static int lp3971_ldo_disable(struct regulator_dev *dev)
+{
+ struct lp3971 *lp3971 = rdev_get_drvdata(dev);
+ int ldo = rdev_get_id(dev) - LP3971_LDO1;
+ u16 mask = 1 << (1 + ldo);
+
+ return lp3971_set_bits(lp3971, LP3971_LDO_ENABLE_REG, mask, 0);
+}
+
+static int lp3971_ldo_get_voltage(struct regulator_dev *dev)
+{
+ struct lp3971 *lp3971 = rdev_get_drvdata(dev);
+ int ldo = rdev_get_id(dev) - LP3971_LDO1;
+ u16 val, reg;
+
+ reg = lp3971_reg_read(lp3971, LP3971_LDO_VOL_CONTR_REG(ldo));
+ val = (reg >> LDO_VOL_CONTR_SHIFT(ldo)) & LDO_VOL_CONTR_MASK;
+
+ return 1000 * LDO_VOL_VALUE_MAP(ldo)[val];
+}
+
+static int lp3971_ldo_set_voltage(struct regulator_dev *dev,
+ int min_uV, int max_uV)
+{
+ struct lp3971 *lp3971 = rdev_get_drvdata(dev);
+ int ldo = rdev_get_id(dev) - LP3971_LDO1;
+ int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
+ const int *vol_map = LDO_VOL_VALUE_MAP(ldo);
+ u16 val;
+
+ if (min_vol < vol_map[LDO_VOL_MIN_IDX] ||
+ min_vol > vol_map[LDO_VOL_MAX_IDX])
+ return -EINVAL;
+
+ for (val = LDO_VOL_MIN_IDX; val <= LDO_VOL_MAX_IDX; val++)
+ if (vol_map[val] >= min_vol)
+ break;
+
+ if (vol_map[val] > max_vol)
+ return -EINVAL;
+
+ return lp3971_set_bits(lp3971, LP3971_LDO_VOL_CONTR_REG(ldo),
+ LDO_VOL_CONTR_MASK << LDO_VOL_CONTR_SHIFT(ldo), val);
+}
+
+static struct regulator_ops lp3971_ldo_ops = {
+ .list_voltage = lp3971_ldo_list_voltage,
+ .is_enabled = lp3971_ldo_is_enabled,
+ .enable = lp3971_ldo_enable,
+ .disable = lp3971_ldo_disable,
+ .get_voltage = lp3971_ldo_get_voltage,
+ .set_voltage = lp3971_ldo_set_voltage,
+};
+
+static int lp3971_dcdc_list_voltage(struct regulator_dev *dev, unsigned index)
+{
+ return 1000 * buck_voltage_map[index];
+}
+
+static int lp3971_dcdc_is_enabled(struct regulator_dev *dev)
+{
+ struct lp3971 *lp3971 = rdev_get_drvdata(dev);
+ int buck = rdev_get_id(dev) - LP3971_DCDC1;
+ u16 mask = 1 << (buck * 2);
+ u16 val;
+
+ val = lp3971_reg_read(lp3971, LP3971_BUCK_VOL_ENABLE_REG);
+ return (val & mask) != 0;
+}
+
+static int lp3971_dcdc_enable(struct regulator_dev *dev)
+{
+ struct lp3971 *lp3971 = rdev_get_drvdata(dev);
+ int buck = rdev_get_id(dev) - LP3971_DCDC1;
+ u16 mask = 1 << (buck * 2);
+
+ return lp3971_set_bits(lp3971, LP3971_BUCK_VOL_ENABLE_REG, mask, mask);
+}
+
+static int lp3971_dcdc_disable(struct regulator_dev *dev)
+{
+ struct lp3971 *lp3971 = rdev_get_drvdata(dev);
+ int buck = rdev_get_id(dev) - LP3971_DCDC1;
+ u16 mask = 1 << (buck * 2);
+
+ return lp3971_set_bits(lp3971, LP3971_BUCK_VOL_ENABLE_REG, mask, 0);
+}
+
+static int lp3971_dcdc_get_voltage(struct regulator_dev *dev)
+{
+ struct lp3971 *lp3971 = rdev_get_drvdata(dev);
+ int buck = rdev_get_id(dev) - LP3971_DCDC1;
+ u16 reg;
+ int val;
+
+ reg = lp3971_reg_read(lp3971, LP3971_BUCK_TARGET_VOL1_REG(buck));
+ reg &= BUCK_TARGET_VOL_MASK;
+
+ if (reg <= BUCK_TARGET_VOL_MAX_IDX)
+ val = 1000 * buck_voltage_map[reg];
+ else {
+ val = 0;
+ dev_warn(&dev->dev, "chip reported incorrect voltage value.\n");
+ }
+
+ return val;
+}
+
+static int lp3971_dcdc_set_voltage(struct regulator_dev *dev,
+ int min_uV, int max_uV)
+{
+ struct lp3971 *lp3971 = rdev_get_drvdata(dev);
+ int buck = rdev_get_id(dev) - LP3971_DCDC1;
+ int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
+ const int *vol_map = buck_voltage_map;
+ u16 val;
+ int ret;
+
+ if (min_vol < vol_map[BUCK_TARGET_VOL_MIN_IDX] ||
+ min_vol > vol_map[BUCK_TARGET_VOL_MAX_IDX])
+ return -EINVAL;
+
+ for (val = BUCK_TARGET_VOL_MIN_IDX; val <= BUCK_TARGET_VOL_MAX_IDX;
+ val++)
+ if (vol_map[val] >= min_vol)
+ break;
+
+ if (vol_map[val] > max_vol)
+ return -EINVAL;
+
+ ret = lp3971_set_bits(lp3971, LP3971_BUCK_TARGET_VOL1_REG(buck),
+ BUCK_TARGET_VOL_MASK, val);
+ if (ret)
+ return ret;
+
+ ret = lp3971_set_bits(lp3971, LP3971_BUCK_VOL_CHANGE_REG,
+ BUCK_VOL_CHANGE_FLAG_MASK << BUCK_VOL_CHANGE_SHIFT(buck),
+ BUCK_VOL_CHANGE_FLAG_GO << BUCK_VOL_CHANGE_SHIFT(buck));
+ if (ret)
+ return ret;
+
+ return lp3971_set_bits(lp3971, LP3971_BUCK_VOL_CHANGE_REG,
+ BUCK_VOL_CHANGE_FLAG_MASK << BUCK_VOL_CHANGE_SHIFT(buck),
+ 0 << BUCK_VOL_CHANGE_SHIFT(buck));
+}
+
+static struct regulator_ops lp3971_dcdc_ops = {
+ .list_voltage = lp3971_dcdc_list_voltage,
+ .is_enabled = lp3971_dcdc_is_enabled,
+ .enable = lp3971_dcdc_enable,
+ .disable = lp3971_dcdc_disable,
+ .get_voltage = lp3971_dcdc_get_voltage,
+ .set_voltage = lp3971_dcdc_set_voltage,
+};
+
+static struct regulator_desc regulators[] = {
+ {
+ .name = "LDO1",
+ .id = LP3971_LDO1,
+ .ops = &lp3971_ldo_ops,
+ .n_voltages = ARRAY_SIZE(ldo123_voltage_map),
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "LDO2",
+ .id = LP3971_LDO2,
+ .ops = &lp3971_ldo_ops,
+ .n_voltages = ARRAY_SIZE(ldo123_voltage_map),
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "LDO3",
+ .id = LP3971_LDO3,
+ .ops = &lp3971_ldo_ops,
+ .n_voltages = ARRAY_SIZE(ldo123_voltage_map),
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "LDO4",
+ .id = LP3971_LDO4,
+ .ops = &lp3971_ldo_ops,
+ .n_voltages = ARRAY_SIZE(ldo45_voltage_map),
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "LDO5",
+ .id = LP3971_LDO5,
+ .ops = &lp3971_ldo_ops,
+ .n_voltages = ARRAY_SIZE(ldo45_voltage_map),
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "DCDC1",
+ .id = LP3971_DCDC1,
+ .ops = &lp3971_dcdc_ops,
+ .n_voltages = ARRAY_SIZE(buck_voltage_map),
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "DCDC2",
+ .id = LP3971_DCDC2,
+ .ops = &lp3971_dcdc_ops,
+ .n_voltages = ARRAY_SIZE(buck_voltage_map),
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "DCDC3",
+ .id = LP3971_DCDC3,
+ .ops = &lp3971_dcdc_ops,
+ .n_voltages = ARRAY_SIZE(buck_voltage_map),
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int lp3971_i2c_read(struct i2c_client *i2c, char reg, int count,
+ u16 *dest)
+{
+ int ret;
+
+ if (count != 1)
+ return -EIO;
+ ret = i2c_smbus_read_byte_data(i2c, reg);
+ if (ret < 0 || count != 1)
+ return -EIO;
+
+ *dest = ret;
+ return 0;
+}
+
+static int lp3971_i2c_write(struct i2c_client *i2c, char reg, int count,
+ const u16 *src)
+{
+ int ret;
+
+ if (count != 1)
+ return -EIO;
+ ret = i2c_smbus_write_byte_data(i2c, reg, *src);
+ if (ret >= 0)
+ return 0;
+
+ return ret;
+}
+
+static u8 lp3971_reg_read(struct lp3971 *lp3971, u8 reg)
+{
+ u16 val = 0;
+
+ mutex_lock(&lp3971->io_lock);
+
+ lp3971_i2c_read(lp3971->i2c, reg, 1, &val);
+
+ dev_dbg(lp3971->dev, "reg read 0x%02x -> 0x%02x\n", (int)reg,
+ (unsigned)val&0xff);
+
+ mutex_unlock(&lp3971->io_lock);
+
+ return val & 0xff;
+}
+
+static int lp3971_set_bits(struct lp3971 *lp3971, u8 reg, u16 mask, u16 val)
+{
+ u16 tmp;
+ int ret;
+
+ mutex_lock(&lp3971->io_lock);
+
+ ret = lp3971_i2c_read(lp3971->i2c, reg, 1, &tmp);
+ tmp = (tmp & ~mask) | val;
+ if (ret == 0) {
+ ret = lp3971_i2c_write(lp3971->i2c, reg, 1, &tmp);
+ dev_dbg(lp3971->dev, "reg write 0x%02x -> 0x%02x\n", (int)reg,
+ (unsigned)val&0xff);
+ }
+ mutex_unlock(&lp3971->io_lock);
+
+ return ret;
+}
+
+static int setup_regulators(struct lp3971 *lp3971,
+ struct lp3971_platform_data *pdata)
+{
+ int i, err;
+ int num_regulators = pdata->num_regulators;
+ lp3971->num_regulators = num_regulators;
+ lp3971->rdev = kzalloc(sizeof(struct regulator_dev *) * num_regulators,
+ GFP_KERNEL);
+
+ /* Instantiate the regulators */
+ for (i = 0; i < num_regulators; i++) {
+ int id = pdata->regulators[i].id;
+ lp3971->rdev[i] = regulator_register(&regulators[id],
+ lp3971->dev, pdata->regulators[i].initdata, lp3971);
+
+ err = IS_ERR(lp3971->rdev[i]);
+ if (err) {
+ dev_err(lp3971->dev, "regulator init failed: %d\n",
+ err);
+ goto error;
+ }
+ }
+
+ return 0;
+error:
+ for (i = 0; i < num_regulators; i++)
+ if (lp3971->rdev[i])
+ regulator_unregister(lp3971->rdev[i]);
+ kfree(lp3971->rdev);
+ lp3971->rdev = NULL;
+ return err;
+}
+
+static int __devinit lp3971_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct lp3971 *lp3971;
+ struct lp3971_platform_data *pdata = i2c->dev.platform_data;
+ int ret;
+ u16 val;
+
+ lp3971 = kzalloc(sizeof(struct lp3971), GFP_KERNEL);
+ if (lp3971 == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ lp3971->i2c = i2c;
+ lp3971->dev = &i2c->dev;
+ i2c_set_clientdata(i2c, lp3971);
+
+ mutex_init(&lp3971->io_lock);
+
+ /* Detect LP3971 */
+ ret = lp3971_i2c_read(i2c, LP3971_SYS_CONTROL1_REG, 1, &val);
+ if (ret == 0 && (val & SYS_CONTROL1_INIT_MASK) != SYS_CONTROL1_INIT_VAL)
+ ret = -ENODEV;
+ if (ret < 0) {
+ dev_err(&i2c->dev, "failed to detect device\n");
+ goto err_detect;
+ }
+
+ if (pdata) {
+ ret = setup_regulators(lp3971, pdata);
+ if (ret < 0)
+ goto err_detect;
+ } else
+ dev_warn(lp3971->dev, "No platform init data supplied\n");
+
+ return 0;
+
+err_detect:
+ i2c_set_clientdata(i2c, NULL);
+ kfree(lp3971);
+err:
+ return ret;
+}
+
+static int __devexit lp3971_i2c_remove(struct i2c_client *i2c)
+{
+ struct lp3971 *lp3971 = i2c_get_clientdata(i2c);
+ int i;
+ for (i = 0; i < lp3971->num_regulators; i++)
+ if (lp3971->rdev[i])
+ regulator_unregister(lp3971->rdev[i]);
+ kfree(lp3971->rdev);
+ i2c_set_clientdata(i2c, NULL);
+ kfree(lp3971);
+
+ return 0;
+}
+
+static const struct i2c_device_id lp3971_i2c_id[] = {
+ { "lp3971", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, lp3971_i2c_id);
+
+static struct i2c_driver lp3971_i2c_driver = {
+ .driver = {
+ .name = "LP3971",
+ .owner = THIS_MODULE,
+ },
+ .probe = lp3971_i2c_probe,
+ .remove = __devexit_p(lp3971_i2c_remove),
+ .id_table = lp3971_i2c_id,
+};
+
+static int __init lp3971_module_init(void)
+{
+ int ret = -ENODEV;
+
+ ret = i2c_add_driver(&lp3971_i2c_driver);
+ if (ret != 0)
+ pr_err("Failed to register I2C driver: %d\n", ret);
+
+ return ret;
+}
+module_init(lp3971_module_init);
+
+static void __exit lp3971_module_exit(void)
+{
+ i2c_del_driver(&lp3971_i2c_driver);
+}
+module_exit(lp3971_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Marek Szyprowski <m.szyprowski@samsung.com>");
+MODULE_DESCRIPTION("LP3971 PMIC driver");
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
new file mode 100644
index 00000000000..2c082d3ef48
--- /dev/null
+++ b/drivers/regulator/max1586.c
@@ -0,0 +1,282 @@
+/*
+ * max1586.c -- Voltage and current regulation for the Maxim 1586
+ *
+ * Copyright (C) 2008 Robert Jarzmik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/max1586.h>
+
+#define MAX1586_V3_MAX_VSEL 31
+#define MAX1586_V6_MAX_VSEL 3
+
+#define MAX1586_V3_MIN_UV 700000
+#define MAX1586_V3_MAX_UV 1475000
+
+#define MAX1586_V6_MIN_UV 0
+#define MAX1586_V6_MAX_UV 3000000
+
+#define I2C_V3_SELECT (0 << 5)
+#define I2C_V6_SELECT (1 << 5)
+
+struct max1586_data {
+ struct i2c_client *client;
+
+ /* min/max V3 voltage */
+ unsigned int min_uV;
+ unsigned int max_uV;
+
+ struct regulator_dev *rdev[0];
+};
+
+/*
+ * V3 voltage
+ * On I2C bus, sending a "x" byte to the max1586 means :
+ * set V3 to 0.700V + (x & 0x1f) * 0.025V
+ * This voltage can be increased by external resistors
+ * R24 and R25=100kOhm as described in the data sheet.
+ * The gain is approximately: 1 + R24/R25 + R24/185.5kOhm
+ */
+static int max1586_v3_calc_voltage(struct max1586_data *max1586,
+ unsigned selector)
+{
+ unsigned range_uV = max1586->max_uV - max1586->min_uV;
+
+ return max1586->min_uV + (selector * range_uV / MAX1586_V3_MAX_VSEL);
+}
+
+static int max1586_v3_set(struct regulator_dev *rdev, int min_uV, int max_uV)
+{
+ struct max1586_data *max1586 = rdev_get_drvdata(rdev);
+ struct i2c_client *client = max1586->client;
+ unsigned range_uV = max1586->max_uV - max1586->min_uV;
+ unsigned selector;
+ u8 v3_prog;
+
+ if (min_uV > max1586->max_uV || max_uV < max1586->min_uV)
+ return -EINVAL;
+ if (min_uV < max1586->min_uV)
+ min_uV = max1586->min_uV;
+
+ selector = ((min_uV - max1586->min_uV) * MAX1586_V3_MAX_VSEL +
+ range_uV - 1) / range_uV;
+ if (max1586_v3_calc_voltage(max1586, selector) > max_uV)
+ return -EINVAL;
+
+ dev_dbg(&client->dev, "changing voltage v3 to %dmv\n",
+ max1586_v3_calc_voltage(max1586, selector) / 1000);
+
+ v3_prog = I2C_V3_SELECT | (u8) selector;
+ return i2c_smbus_write_byte(client, v3_prog);
+}
+
+static int max1586_v3_list(struct regulator_dev *rdev, unsigned selector)
+{
+ struct max1586_data *max1586 = rdev_get_drvdata(rdev);
+
+ if (selector > MAX1586_V3_MAX_VSEL)
+ return -EINVAL;
+ return max1586_v3_calc_voltage(max1586, selector);
+}
+
+/*
+ * V6 voltage
+ * On I2C bus, sending a "x" byte to the max1586 means :
+ * set V6 to either 0V, 1.8V, 2.5V, 3V depending on (x & 0x3)
+ * As regulator framework doesn't accept voltages to be 0V, we use 1uV.
+ */
+static int max1586_v6_calc_voltage(unsigned selector)
+{
+ static int voltages_uv[] = { 1, 1800000, 2500000, 3000000 };
+
+ return voltages_uv[selector];
+}
+
+static int max1586_v6_set(struct regulator_dev *rdev, int min_uV, int max_uV)
+{
+ struct i2c_client *client = rdev_get_drvdata(rdev);
+ unsigned selector;
+ u8 v6_prog;
+
+ if (min_uV < MAX1586_V6_MIN_UV || min_uV > MAX1586_V6_MAX_UV)
+ return -EINVAL;
+ if (max_uV < MAX1586_V6_MIN_UV || max_uV > MAX1586_V6_MAX_UV)
+ return -EINVAL;
+
+ if (min_uV >= 3000000)
+ selector = 3;
+ if (min_uV < 3000000)
+ selector = 2;
+ if (min_uV < 2500000)
+ selector = 1;
+ if (min_uV < 1800000)
+ selector = 0;
+
+ if (max1586_v6_calc_voltage(selector) > max_uV)
+ return -EINVAL;
+
+ dev_dbg(&client->dev, "changing voltage v6 to %dmv\n",
+ max1586_v6_calc_voltage(selector) / 1000);
+
+ v6_prog = I2C_V6_SELECT | (u8) selector;
+ return i2c_smbus_write_byte(client, v6_prog);
+}
+
+static int max1586_v6_list(struct regulator_dev *rdev, unsigned selector)
+{
+ if (selector > MAX1586_V6_MAX_VSEL)
+ return -EINVAL;
+ return max1586_v6_calc_voltage(selector);
+}
+
+/*
+ * The Maxim 1586 controls V3 and V6 voltages, but offers no way of reading back
+ * the set up value.
+ */
+static struct regulator_ops max1586_v3_ops = {
+ .set_voltage = max1586_v3_set,
+ .list_voltage = max1586_v3_list,
+};
+
+static struct regulator_ops max1586_v6_ops = {
+ .set_voltage = max1586_v6_set,
+ .list_voltage = max1586_v6_list,
+};
+
+static struct regulator_desc max1586_reg[] = {
+ {
+ .name = "Output_V3",
+ .id = MAX1586_V3,
+ .ops = &max1586_v3_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = MAX1586_V3_MAX_VSEL + 1,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "Output_V6",
+ .id = MAX1586_V6,
+ .ops = &max1586_v6_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = MAX1586_V6_MAX_VSEL + 1,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int max1586_pmic_probe(struct i2c_client *client,
+ const struct i2c_device_id *i2c_id)
+{
+ struct regulator_dev **rdev;
+ struct max1586_platform_data *pdata = client->dev.platform_data;
+ struct max1586_data *max1586;
+ int i, id, ret = -ENOMEM;
+
+ max1586 = kzalloc(sizeof(struct max1586_data) +
+ sizeof(struct regulator_dev *) * (MAX1586_V6 + 1),
+ GFP_KERNEL);
+ if (!max1586)
+ goto out;
+
+ max1586->client = client;
+
+ if (!pdata->v3_gain) {
+ ret = -EINVAL;
+ goto out_unmap;
+ }
+ max1586->min_uV = MAX1586_V3_MIN_UV / 1000 * pdata->v3_gain / 1000;
+ max1586->max_uV = MAX1586_V3_MAX_UV / 1000 * pdata->v3_gain / 1000;
+
+ rdev = max1586->rdev;
+ for (i = 0; i < pdata->num_subdevs && i <= MAX1586_V6; i++) {
+ id = pdata->subdevs[i].id;
+ if (!pdata->subdevs[i].platform_data)
+ continue;
+ if (id < MAX1586_V3 || id > MAX1586_V6) {
+ dev_err(&client->dev, "invalid regulator id %d\n", id);
+ goto err;
+ }
+ rdev[i] = regulator_register(&max1586_reg[id], &client->dev,
+ pdata->subdevs[i].platform_data,
+ max1586);
+ if (IS_ERR(rdev[i])) {
+ ret = PTR_ERR(rdev[i]);
+ dev_err(&client->dev, "failed to register %s\n",
+ max1586_reg[id].name);
+ goto err;
+ }
+ }
+
+ i2c_set_clientdata(client, rdev);
+ dev_info(&client->dev, "Maxim 1586 regulator driver loaded\n");
+ return 0;
+
+err:
+ while (--i >= 0)
+ regulator_unregister(rdev[i]);
+out_unmap:
+ kfree(max1586);
+out:
+ return ret;
+}
+
+static int max1586_pmic_remove(struct i2c_client *client)
+{
+ struct regulator_dev **rdev = i2c_get_clientdata(client);
+ int i;
+
+ for (i = 0; i <= MAX1586_V6; i++)
+ if (rdev[i])
+ regulator_unregister(rdev[i]);
+ kfree(rdev);
+ i2c_set_clientdata(client, NULL);
+
+ return 0;
+}
+
+static const struct i2c_device_id max1586_id[] = {
+ { "max1586", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max1586_id);
+
+static struct i2c_driver max1586_pmic_driver = {
+ .probe = max1586_pmic_probe,
+ .remove = max1586_pmic_remove,
+ .driver = {
+ .name = "max1586",
+ },
+ .id_table = max1586_id,
+};
+
+static int __init max1586_pmic_init(void)
+{
+ return i2c_add_driver(&max1586_pmic_driver);
+}
+subsys_initcall(max1586_pmic_init);
+
+static void __exit max1586_pmic_exit(void)
+{
+ i2c_del_driver(&max1586_pmic_driver);
+}
+module_exit(max1586_pmic_exit);
+
+/* Module information */
+MODULE_DESCRIPTION("MAXIM 1586 voltage regulator driver");
+MODULE_AUTHOR("Robert Jarzmik");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/pcf50633-regulator.c b/drivers/regulator/pcf50633-regulator.c
index cd761d85c8f..8e14900eb68 100644
--- a/drivers/regulator/pcf50633-regulator.c
+++ b/drivers/regulator/pcf50633-regulator.c
@@ -316,7 +316,7 @@ static int __init pcf50633_regulator_init(void)
{
return platform_driver_register(&pcf50633_regulator_driver);
}
-module_init(pcf50633_regulator_init);
+subsys_initcall(pcf50633_regulator_init);
static void __exit pcf50633_regulator_exit(void)
{
diff --git a/drivers/regulator/userspace-consumer.c b/drivers/regulator/userspace-consumer.c
new file mode 100644
index 00000000000..06d2fa96a8b
--- /dev/null
+++ b/drivers/regulator/userspace-consumer.c
@@ -0,0 +1,200 @@
+/*
+ * userspace-consumer.c
+ *
+ * Copyright 2009 CompuLab, Ltd.
+ *
+ * Author: Mike Rapoport <mike@compulab.co.il>
+ *
+ * Based of virtual consumer driver:
+ * Copyright 2008 Wolfson Microelectronics PLC.
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/userspace-consumer.h>
+
+struct userspace_consumer_data {
+ const char *name;
+
+ struct mutex lock;
+ bool enabled;
+
+ int num_supplies;
+ struct regulator_bulk_data *supplies;
+};
+
+static ssize_t reg_show_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct userspace_consumer_data *data = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", data->name);
+}
+
+static ssize_t reg_show_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct userspace_consumer_data *data = dev_get_drvdata(dev);
+
+ if (data->enabled)
+ return sprintf(buf, "enabled\n");
+
+ return sprintf(buf, "disabled\n");
+}
+
+static ssize_t reg_set_state(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct userspace_consumer_data *data = dev_get_drvdata(dev);
+ bool enabled;
+ int ret;
+
+ /*
+ * sysfs_streq() doesn't need the \n's, but we add them so the strings
+ * will be shared with show_state(), above.
+ */
+ if (sysfs_streq(buf, "enabled\n") || sysfs_streq(buf, "1"))
+ enabled = true;
+ else if (sysfs_streq(buf, "disabled\n") || sysfs_streq(buf, "0"))
+ enabled = false;
+ else {
+ dev_err(dev, "Configuring invalid mode\n");
+ return count;
+ }
+
+ mutex_lock(&data->lock);
+ if (enabled != data->enabled) {
+ if (enabled)
+ ret = regulator_bulk_enable(data->num_supplies,
+ data->supplies);
+ else
+ ret = regulator_bulk_disable(data->num_supplies,
+ data->supplies);
+
+ if (ret == 0)
+ data->enabled = enabled;
+ else
+ dev_err(dev, "Failed to configure state: %d\n", ret);
+ }
+ mutex_unlock(&data->lock);
+
+ return count;
+}
+
+static DEVICE_ATTR(name, 0444, reg_show_name, NULL);
+static DEVICE_ATTR(state, 0644, reg_show_state, reg_set_state);
+
+static struct device_attribute *attributes[] = {
+ &dev_attr_name,
+ &dev_attr_state,
+};
+
+static int regulator_userspace_consumer_probe(struct platform_device *pdev)
+{
+ struct regulator_userspace_consumer_data *pdata;
+ struct userspace_consumer_data *drvdata;
+ int ret, i;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata)
+ return -EINVAL;
+
+ drvdata = kzalloc(sizeof(struct userspace_consumer_data), GFP_KERNEL);
+ if (drvdata == NULL)
+ return -ENOMEM;
+
+ drvdata->name = pdata->name;
+ drvdata->num_supplies = pdata->num_supplies;
+ drvdata->supplies = pdata->supplies;
+
+ mutex_init(&drvdata->lock);
+
+ ret = regulator_bulk_get(&pdev->dev, drvdata->num_supplies,
+ drvdata->supplies);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to get supplies: %d\n", ret);
+ goto err_alloc_supplies;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(attributes); i++) {
+ ret = device_create_file(&pdev->dev, attributes[i]);
+ if (ret != 0)
+ goto err_create_attrs;
+ }
+
+ if (pdata->init_on)
+ ret = regulator_bulk_enable(drvdata->num_supplies,
+ drvdata->supplies);
+
+ drvdata->enabled = pdata->init_on;
+
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to set initial state: %d\n", ret);
+ goto err_create_attrs;
+ }
+
+ platform_set_drvdata(pdev, drvdata);
+
+ return 0;
+
+err_create_attrs:
+ for (i = 0; i < ARRAY_SIZE(attributes); i++)
+ device_remove_file(&pdev->dev, attributes[i]);
+
+ regulator_bulk_free(drvdata->num_supplies, drvdata->supplies);
+
+err_alloc_supplies:
+ kfree(drvdata);
+ return ret;
+}
+
+static int regulator_userspace_consumer_remove(struct platform_device *pdev)
+{
+ struct userspace_consumer_data *data = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(attributes); i++)
+ device_remove_file(&pdev->dev, attributes[i]);
+
+ if (data->enabled)
+ regulator_bulk_disable(data->num_supplies, data->supplies);
+
+ regulator_bulk_free(data->num_supplies, data->supplies);
+ kfree(data);
+
+ return 0;
+}
+
+static struct platform_driver regulator_userspace_consumer_driver = {
+ .probe = regulator_userspace_consumer_probe,
+ .remove = regulator_userspace_consumer_remove,
+ .driver = {
+ .name = "reg-userspace-consumer",
+ },
+};
+
+
+static int __init regulator_userspace_consumer_init(void)
+{
+ return platform_driver_register(&regulator_userspace_consumer_driver);
+}
+module_init(regulator_userspace_consumer_init);
+
+static void __exit regulator_userspace_consumer_exit(void)
+{
+ platform_driver_unregister(&regulator_userspace_consumer_driver);
+}
+module_exit(regulator_userspace_consumer_exit);
+
+MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>");
+MODULE_DESCRIPTION("Userspace consumer for voltage and current regulators");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/virtual.c b/drivers/regulator/virtual.c
index 71403fa3ffa..e7db5664722 100644
--- a/drivers/regulator/virtual.c
+++ b/drivers/regulator/virtual.c
@@ -347,3 +347,4 @@ module_exit(regulator_virtual_consumer_exit);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("Virtual regulator consumer");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:reg-virt-consumer");
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 771eca1066b..17a00b0fafd 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -1570,3 +1570,4 @@ module_exit(wm8350_regulator_exit);
MODULE_AUTHOR("Liam Girdwood");
MODULE_DESCRIPTION("WM8350 voltage and current regulator driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:wm8350-regulator");
diff --git a/drivers/regulator/wm8400-regulator.c b/drivers/regulator/wm8400-regulator.c
index 15742602907..d9a2c988c6e 100644
--- a/drivers/regulator/wm8400-regulator.c
+++ b/drivers/regulator/wm8400-regulator.c
@@ -320,7 +320,7 @@ static int __devinit wm8400_regulator_probe(struct platform_device *pdev)
struct regulator_dev *rdev;
rdev = regulator_register(&regulators[pdev->id], &pdev->dev,
- pdev->dev.platform_data, pdev->dev.driver_data);
+ pdev->dev.platform_data, dev_get_drvdata(&pdev->dev));
if (IS_ERR(rdev))
return PTR_ERR(rdev);
@@ -359,7 +359,7 @@ static struct platform_driver wm8400_regulator_driver = {
int wm8400_register_regulator(struct device *dev, int reg,
struct regulator_init_data *initdata)
{
- struct wm8400 *wm8400 = dev->driver_data;
+ struct wm8400 *wm8400 = dev_get_drvdata(dev);
if (wm8400->regulators[reg].name)
return -EBUSY;
@@ -369,8 +369,8 @@ int wm8400_register_regulator(struct device *dev, int reg,
wm8400->regulators[reg].name = "wm8400-regulator";
wm8400->regulators[reg].id = reg;
wm8400->regulators[reg].dev.parent = dev;
- wm8400->regulators[reg].dev.driver_data = wm8400;
wm8400->regulators[reg].dev.platform_data = initdata;
+ dev_set_drvdata(&wm8400->regulators[reg].dev, wm8400);
return platform_device_register(&wm8400->regulators[reg]);
}
@@ -380,7 +380,7 @@ static int __init wm8400_regulator_init(void)
{
return platform_driver_register(&wm8400_regulator_driver);
}
-module_init(wm8400_regulator_init);
+subsys_initcall(wm8400_regulator_init);
static void __exit wm8400_regulator_exit(void)
{
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 4e9851fc174..277d35d232f 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -692,7 +692,7 @@ config RTC_DRV_GENERIC
tristate "Generic RTC support"
# Please consider writing a new RTC driver instead of using the generic
# RTC abstraction
- depends on PARISC || M68K || PPC
+ depends on PARISC || M68K || PPC || SUPERH32
help
Say Y or M here to enable RTC support on systems using the generic
RTC abstraction. If you do not know what you are doing, you should
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index f7a3283dd02..551332e4ed0 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -12,32 +12,56 @@
#include <linux/module.h>
#include <linux/rtc.h>
#include <linux/platform_device.h>
-#include <mach/hardware.h>
+#include <linux/io.h>
+
+#define EP93XX_RTC_DATA 0x000
+#define EP93XX_RTC_MATCH 0x004
+#define EP93XX_RTC_STATUS 0x008
+#define EP93XX_RTC_STATUS_INTR (1<<0)
+#define EP93XX_RTC_LOAD 0x00C
+#define EP93XX_RTC_CONTROL 0x010
+#define EP93XX_RTC_CONTROL_MIE (1<<0)
+#define EP93XX_RTC_SWCOMP 0x108
+#define EP93XX_RTC_SWCOMP_DEL_MASK 0x001f0000
+#define EP93XX_RTC_SWCOMP_DEL_SHIFT 16
+#define EP93XX_RTC_SWCOMP_INT_MASK 0x0000ffff
+#define EP93XX_RTC_SWCOMP_INT_SHIFT 0
+
+#define DRV_VERSION "0.3"
-#define EP93XX_RTC_REG(x) (EP93XX_RTC_BASE + (x))
-#define EP93XX_RTC_DATA EP93XX_RTC_REG(0x0000)
-#define EP93XX_RTC_LOAD EP93XX_RTC_REG(0x000C)
-#define EP93XX_RTC_SWCOMP EP93XX_RTC_REG(0x0108)
-
-#define DRV_VERSION "0.2"
+/*
+ * struct device dev.platform_data is used to store our private data
+ * because struct rtc_device does not have a variable to hold it.
+ */
+struct ep93xx_rtc {
+ void __iomem *mmio_base;
+};
-static int ep93xx_get_swcomp(struct device *dev, unsigned short *preload,
+static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload,
unsigned short *delete)
{
- unsigned short comp = __raw_readl(EP93XX_RTC_SWCOMP);
+ struct ep93xx_rtc *ep93xx_rtc = dev->platform_data;
+ unsigned long comp;
+
+ comp = __raw_readl(ep93xx_rtc->mmio_base + EP93XX_RTC_SWCOMP);
if (preload)
- *preload = comp & 0xffff;
+ *preload = (comp & EP93XX_RTC_SWCOMP_INT_MASK)
+ >> EP93XX_RTC_SWCOMP_INT_SHIFT;
if (delete)
- *delete = (comp >> 16) & 0x1f;
+ *delete = (comp & EP93XX_RTC_SWCOMP_DEL_MASK)
+ >> EP93XX_RTC_SWCOMP_DEL_SHIFT;
return 0;
}
static int ep93xx_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- unsigned long time = __raw_readl(EP93XX_RTC_DATA);
+ struct ep93xx_rtc *ep93xx_rtc = dev->platform_data;
+ unsigned long time;
+
+ time = __raw_readl(ep93xx_rtc->mmio_base + EP93XX_RTC_DATA);
rtc_time_to_tm(time, tm);
return 0;
@@ -45,7 +69,9 @@ static int ep93xx_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int ep93xx_rtc_set_mmss(struct device *dev, unsigned long secs)
{
- __raw_writel(secs + 1, EP93XX_RTC_LOAD);
+ struct ep93xx_rtc *ep93xx_rtc = dev->platform_data;
+
+ __raw_writel(secs + 1, ep93xx_rtc->mmio_base + EP93XX_RTC_LOAD);
return 0;
}
@@ -53,7 +79,7 @@ static int ep93xx_rtc_proc(struct device *dev, struct seq_file *seq)
{
unsigned short preload, delete;
- ep93xx_get_swcomp(dev, &preload, &delete);
+ ep93xx_rtc_get_swcomp(dev, &preload, &delete);
seq_printf(seq, "preload\t\t: %d\n", preload);
seq_printf(seq, "delete\t\t: %d\n", delete);
@@ -67,54 +93,104 @@ static const struct rtc_class_ops ep93xx_rtc_ops = {
.proc = ep93xx_rtc_proc,
};
-static ssize_t ep93xx_sysfs_show_comp_preload(struct device *dev,
+static ssize_t ep93xx_rtc_show_comp_preload(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned short preload;
- ep93xx_get_swcomp(dev, &preload, NULL);
+ ep93xx_rtc_get_swcomp(dev, &preload, NULL);
return sprintf(buf, "%d\n", preload);
}
-static DEVICE_ATTR(comp_preload, S_IRUGO, ep93xx_sysfs_show_comp_preload, NULL);
+static DEVICE_ATTR(comp_preload, S_IRUGO, ep93xx_rtc_show_comp_preload, NULL);
-static ssize_t ep93xx_sysfs_show_comp_delete(struct device *dev,
+static ssize_t ep93xx_rtc_show_comp_delete(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned short delete;
- ep93xx_get_swcomp(dev, NULL, &delete);
+ ep93xx_rtc_get_swcomp(dev, NULL, &delete);
return sprintf(buf, "%d\n", delete);
}
-static DEVICE_ATTR(comp_delete, S_IRUGO, ep93xx_sysfs_show_comp_delete, NULL);
+static DEVICE_ATTR(comp_delete, S_IRUGO, ep93xx_rtc_show_comp_delete, NULL);
-static int __devinit ep93xx_rtc_probe(struct platform_device *dev)
+static int __init ep93xx_rtc_probe(struct platform_device *pdev)
{
- struct rtc_device *rtc = rtc_device_register("ep93xx",
- &dev->dev, &ep93xx_rtc_ops, THIS_MODULE);
+ struct ep93xx_rtc *ep93xx_rtc;
+ struct resource *res;
+ struct rtc_device *rtc;
+ int err;
+
+ ep93xx_rtc = kzalloc(sizeof(struct ep93xx_rtc), GFP_KERNEL);
+ if (ep93xx_rtc == NULL)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL)
+ return -ENXIO;
+
+ res = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (res == NULL)
+ return -EBUSY;
+
+ ep93xx_rtc->mmio_base = ioremap(res->start, resource_size(res));
+ if (ep93xx_rtc->mmio_base == NULL) {
+ err = -ENXIO;
+ goto fail;
+ }
+ pdev->dev.platform_data = ep93xx_rtc;
+
+ rtc = rtc_device_register(pdev->name,
+ &pdev->dev, &ep93xx_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc)) {
- return PTR_ERR(rtc);
+ err = PTR_ERR(rtc);
+ goto fail;
}
- platform_set_drvdata(dev, rtc);
+ platform_set_drvdata(pdev, rtc);
- device_create_file(&dev->dev, &dev_attr_comp_preload);
- device_create_file(&dev->dev, &dev_attr_comp_delete);
+ err = device_create_file(&pdev->dev, &dev_attr_comp_preload);
+ if (err)
+ goto fail;
+ err = device_create_file(&pdev->dev, &dev_attr_comp_delete);
+ if (err) {
+ device_remove_file(&pdev->dev, &dev_attr_comp_preload);
+ goto fail;
+ }
return 0;
+
+fail:
+ if (ep93xx_rtc->mmio_base) {
+ iounmap(ep93xx_rtc->mmio_base);
+ pdev->dev.platform_data = NULL;
+ }
+ release_mem_region(res->start, resource_size(res));
+ return err;
}
-static int __devexit ep93xx_rtc_remove(struct platform_device *dev)
+static int __exit ep93xx_rtc_remove(struct platform_device *pdev)
{
- struct rtc_device *rtc = platform_get_drvdata(dev);
+ struct rtc_device *rtc = platform_get_drvdata(pdev);
+ struct ep93xx_rtc *ep93xx_rtc = pdev->dev.platform_data;
+ struct resource *res;
+
+ /* cleanup sysfs */
+ device_remove_file(&pdev->dev, &dev_attr_comp_delete);
+ device_remove_file(&pdev->dev, &dev_attr_comp_preload);
+
+ rtc_device_unregister(rtc);
+
+ iounmap(ep93xx_rtc->mmio_base);
+ pdev->dev.platform_data = NULL;
- if (rtc)
- rtc_device_unregister(rtc);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
- platform_set_drvdata(dev, NULL);
+ platform_set_drvdata(pdev, NULL);
return 0;
}
@@ -122,23 +198,22 @@ static int __devexit ep93xx_rtc_remove(struct platform_device *dev)
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:ep93xx-rtc");
-static struct platform_driver ep93xx_rtc_platform_driver = {
+static struct platform_driver ep93xx_rtc_driver = {
.driver = {
.name = "ep93xx-rtc",
.owner = THIS_MODULE,
},
- .probe = ep93xx_rtc_probe,
- .remove = __devexit_p(ep93xx_rtc_remove),
+ .remove = __exit_p(ep93xx_rtc_remove),
};
static int __init ep93xx_rtc_init(void)
{
- return platform_driver_register(&ep93xx_rtc_platform_driver);
+ return platform_driver_probe(&ep93xx_rtc_driver, ep93xx_rtc_probe);
}
static void __exit ep93xx_rtc_exit(void)
{
- platform_driver_unregister(&ep93xx_rtc_platform_driver);
+ platform_driver_unregister(&ep93xx_rtc_driver);
}
MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
diff --git a/drivers/rtc/rtc-pl030.c b/drivers/rtc/rtc-pl030.c
index 82615355215..457231bb102 100644
--- a/drivers/rtc/rtc-pl030.c
+++ b/drivers/rtc/rtc-pl030.c
@@ -102,7 +102,7 @@ static const struct rtc_class_ops pl030_ops = {
.set_alarm = pl030_set_alarm,
};
-static int pl030_probe(struct amba_device *dev, void *id)
+static int pl030_probe(struct amba_device *dev, struct amba_id *id)
{
struct pl030_rtc *rtc;
int ret;
@@ -117,7 +117,7 @@ static int pl030_probe(struct amba_device *dev, void *id)
goto err_rtc;
}
- rtc->base = ioremap(dev->res.start, SZ_4K);
+ rtc->base = ioremap(dev->res.start, resource_size(&dev->res));
if (!rtc->base) {
ret = -ENOMEM;
goto err_map;
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index 333eec689d2..f41873f98f6 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -127,7 +127,7 @@ static int pl031_remove(struct amba_device *adev)
return 0;
}
-static int pl031_probe(struct amba_device *adev, void *id)
+static int pl031_probe(struct amba_device *adev, struct amba_id *id)
{
int ret;
struct pl031_local *ldata;
@@ -142,8 +142,7 @@ static int pl031_probe(struct amba_device *adev, void *id)
goto out;
}
- ldata->base = ioremap(adev->res.start,
- adev->res.end - adev->res.start + 1);
+ ldata->base = ioremap(adev->res.start, resource_size(&adev->res));
if (!ldata->base) {
ret = -ENOMEM;
goto out_no_remap;
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index d1815272c43..e5b84db0aa0 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -5,8 +5,7 @@
* Carsten Otte <Cotte@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
- *
+ * Copyright IBM Corp. 1999, 2009
*/
#define KMSG_COMPONENT "dasd"
@@ -61,6 +60,7 @@ static int dasd_flush_block_queue(struct dasd_block *);
static void dasd_device_tasklet(struct dasd_device *);
static void dasd_block_tasklet(struct dasd_block *);
static void do_kick_device(struct work_struct *);
+static void do_restore_device(struct work_struct *);
static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
static void dasd_device_timeout(unsigned long);
static void dasd_block_timeout(unsigned long);
@@ -109,6 +109,7 @@ struct dasd_device *dasd_alloc_device(void)
device->timer.function = dasd_device_timeout;
device->timer.data = (unsigned long) device;
INIT_WORK(&device->kick_work, do_kick_device);
+ INIT_WORK(&device->restore_device, do_restore_device);
device->state = DASD_STATE_NEW;
device->target = DASD_STATE_NEW;
@@ -512,6 +513,25 @@ void dasd_kick_device(struct dasd_device *device)
}
/*
+ * dasd_restore_device will schedule a call do do_restore_device to the kernel
+ * event daemon.
+ */
+static void do_restore_device(struct work_struct *work)
+{
+ struct dasd_device *device = container_of(work, struct dasd_device,
+ restore_device);
+ device->cdev->drv->restore(device->cdev);
+ dasd_put_device(device);
+}
+
+void dasd_restore_device(struct dasd_device *device)
+{
+ dasd_get_device(device);
+ /* queue call to dasd_restore_device to the kernel event daemon. */
+ schedule_work(&device->restore_device);
+}
+
+/*
* Set the target state for a device and starts the state change.
*/
void dasd_set_target_state(struct dasd_device *device, int target)
@@ -603,7 +623,7 @@ static void dasd_profile_end(struct dasd_block *block,
if (dasd_profile_level != DASD_PROFILE_ON)
return;
- sectors = req->nr_sectors;
+ sectors = blk_rq_sectors(req);
if (!cqr->buildclk || !cqr->startclk ||
!cqr->stopclk || !cqr->endclk ||
!sectors)
@@ -851,8 +871,10 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
/* Check the cqr */
rc = dasd_check_cqr(cqr);
- if (rc)
+ if (rc) {
+ cqr->intrc = rc;
return rc;
+ }
device = (struct dasd_device *) cqr->startdev;
if (cqr->retries < 0) {
/* internal error 14 - start_IO run out of retries */
@@ -906,6 +928,12 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
"start_IO: -EIO device gone, retry");
break;
+ case -EINVAL:
+ /* most likely caused in power management context */
+ DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
+ "start_IO: -EINVAL device currently "
+ "not accessible");
+ break;
default:
/* internal error 11 - unknown rc */
snprintf(errorstring, ERRORLENGTH, "11 %d", rc);
@@ -915,6 +943,7 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
BUG();
break;
}
+ cqr->intrc = rc;
return rc;
}
@@ -1454,8 +1483,12 @@ int dasd_sleep_on(struct dasd_ccw_req *cqr)
dasd_add_request_tail(cqr);
wait_event(generic_waitq, _wait_for_wakeup(cqr));
- /* Request status is either done or failed. */
- rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
+ if (cqr->status == DASD_CQR_DONE)
+ rc = 0;
+ else if (cqr->intrc)
+ rc = cqr->intrc;
+ else
+ rc = -EIO;
return rc;
}
@@ -1477,8 +1510,15 @@ int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
dasd_cancel_req(cqr);
/* wait (non-interruptible) for final status */
wait_event(generic_waitq, _wait_for_wakeup(cqr));
+ cqr->intrc = rc;
}
- rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
+
+ if (cqr->status == DASD_CQR_DONE)
+ rc = 0;
+ else if (cqr->intrc)
+ rc = cqr->intrc;
+ else
+ rc = -EIO;
return rc;
}
@@ -1523,8 +1563,12 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
wait_event(generic_waitq, _wait_for_wakeup(cqr));
- /* Request status is either done or failed. */
- rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
+ if (cqr->status == DASD_CQR_DONE)
+ rc = 0;
+ else if (cqr->intrc)
+ rc = cqr->intrc;
+ else
+ rc = -EIO;
return rc;
}
@@ -1614,15 +1658,6 @@ void dasd_block_clear_timer(struct dasd_block *block)
}
/*
- * posts the buffer_cache about a finalized request
- */
-static inline void dasd_end_request(struct request *req, int error)
-{
- if (__blk_end_request(req, error, blk_rq_bytes(req)))
- BUG();
-}
-
-/*
* Process finished error recovery ccw.
*/
static inline void __dasd_block_process_erp(struct dasd_block *block,
@@ -1665,18 +1700,14 @@ static void __dasd_process_request_queue(struct dasd_block *block)
if (basedev->state < DASD_STATE_READY)
return;
/* Now we try to fetch requests from the request queue */
- while (!blk_queue_plugged(queue) &&
- elv_next_request(queue)) {
-
- req = elv_next_request(queue);
-
+ while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) {
if (basedev->features & DASD_FEATURE_READONLY &&
rq_data_dir(req) == WRITE) {
DBF_DEV_EVENT(DBF_ERR, basedev,
"Rejecting write request %p",
req);
- blkdev_dequeue_request(req);
- dasd_end_request(req, -EIO);
+ blk_start_request(req);
+ __blk_end_request_all(req, -EIO);
continue;
}
cqr = basedev->discipline->build_cp(basedev, block, req);
@@ -1704,8 +1735,8 @@ static void __dasd_process_request_queue(struct dasd_block *block)
"CCW creation failed (rc=%ld) "
"on request %p",
PTR_ERR(cqr), req);
- blkdev_dequeue_request(req);
- dasd_end_request(req, -EIO);
+ blk_start_request(req);
+ __blk_end_request_all(req, -EIO);
continue;
}
/*
@@ -1714,7 +1745,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
*/
cqr->callback_data = (void *) req;
cqr->status = DASD_CQR_FILLED;
- blkdev_dequeue_request(req);
+ blk_start_request(req);
list_add_tail(&cqr->blocklist, &block->ccw_queue);
dasd_profile_start(block, cqr, req);
}
@@ -1731,7 +1762,7 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
status = cqr->block->base->discipline->free_cp(cqr, req);
if (status <= 0)
error = status ? status : -EIO;
- dasd_end_request(req, error);
+ __blk_end_request_all(req, error);
}
/*
@@ -2003,7 +2034,7 @@ static void dasd_setup_queue(struct dasd_block *block)
{
int max;
- blk_queue_hardsect_size(block->request_queue, block->bp_block);
+ blk_queue_logical_block_size(block->request_queue, block->bp_block);
max = block->base->discipline->max_blocks << block->s2b_shift;
blk_queue_max_sectors(block->request_queue, max);
blk_queue_max_phys_segments(block->request_queue, -1L);
@@ -2038,10 +2069,8 @@ static void dasd_flush_request_queue(struct dasd_block *block)
return;
spin_lock_irq(&block->request_queue_lock);
- while ((req = elv_next_request(block->request_queue))) {
- blkdev_dequeue_request(req);
- dasd_end_request(req, -EIO);
- }
+ while ((req = blk_fetch_request(block->request_queue)))
+ __blk_end_request_all(req, -EIO);
spin_unlock_irq(&block->request_queue_lock);
}
@@ -2397,6 +2426,12 @@ int dasd_generic_notify(struct ccw_device *cdev, int event)
case CIO_OPER:
/* FIXME: add a sanity check. */
device->stopped &= ~DASD_STOPPED_DC_WAIT;
+ if (device->stopped & DASD_UNRESUMED_PM) {
+ device->stopped &= ~DASD_UNRESUMED_PM;
+ dasd_restore_device(device);
+ ret = 1;
+ break;
+ }
dasd_schedule_device_bh(device);
if (device->block)
dasd_schedule_block_bh(device->block);
@@ -2407,6 +2442,79 @@ int dasd_generic_notify(struct ccw_device *cdev, int event)
return ret;
}
+int dasd_generic_pm_freeze(struct ccw_device *cdev)
+{
+ struct dasd_ccw_req *cqr, *n;
+ int rc;
+ struct list_head freeze_queue;
+ struct dasd_device *device = dasd_device_from_cdev(cdev);
+
+ if (IS_ERR(device))
+ return PTR_ERR(device);
+ /* disallow new I/O */
+ device->stopped |= DASD_STOPPED_PM;
+ /* clear active requests */
+ INIT_LIST_HEAD(&freeze_queue);
+ spin_lock_irq(get_ccwdev_lock(cdev));
+ rc = 0;
+ list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
+ /* Check status and move request to flush_queue */
+ if (cqr->status == DASD_CQR_IN_IO) {
+ rc = device->discipline->term_IO(cqr);
+ if (rc) {
+ /* unable to terminate requeust */
+ dev_err(&device->cdev->dev,
+ "Unable to terminate request %p "
+ "on suspend\n", cqr);
+ spin_unlock_irq(get_ccwdev_lock(cdev));
+ dasd_put_device(device);
+ return rc;
+ }
+ }
+ list_move_tail(&cqr->devlist, &freeze_queue);
+ }
+
+ spin_unlock_irq(get_ccwdev_lock(cdev));
+
+ list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) {
+ wait_event(dasd_flush_wq,
+ (cqr->status != DASD_CQR_CLEAR_PENDING));
+ if (cqr->status == DASD_CQR_CLEARED)
+ cqr->status = DASD_CQR_QUEUED;
+ }
+ /* move freeze_queue to start of the ccw_queue */
+ spin_lock_irq(get_ccwdev_lock(cdev));
+ list_splice_tail(&freeze_queue, &device->ccw_queue);
+ spin_unlock_irq(get_ccwdev_lock(cdev));
+
+ if (device->discipline->freeze)
+ rc = device->discipline->freeze(device);
+
+ dasd_put_device(device);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze);
+
+int dasd_generic_restore_device(struct ccw_device *cdev)
+{
+ struct dasd_device *device = dasd_device_from_cdev(cdev);
+ int rc = 0;
+
+ if (IS_ERR(device))
+ return PTR_ERR(device);
+
+ dasd_schedule_device_bh(device);
+ if (device->block)
+ dasd_schedule_block_bh(device->block);
+
+ if (device->discipline->restore)
+ rc = device->discipline->restore(device);
+
+ dasd_put_device(device);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
+
static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
void *rdc_buffer,
int rdc_buffer_size,
@@ -2442,12 +2550,12 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic,
- void **rdc_buffer, int rdc_buffer_size)
+ void *rdc_buffer, int rdc_buffer_size)
{
int ret;
struct dasd_ccw_req *cqr;
- cqr = dasd_generic_build_rdc(device, *rdc_buffer, rdc_buffer_size,
+ cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size,
magic);
if (IS_ERR(cqr))
return PTR_ERR(cqr);
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index e77666c8e6c..4cac5b54f26 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -1098,6 +1098,7 @@ dasd_get_uid(struct ccw_device *cdev, struct dasd_uid *uid)
spin_unlock(&dasd_devmap_lock);
return 0;
}
+EXPORT_SYMBOL_GPL(dasd_get_uid);
/*
* Register the given device unique identifier into devmap struct.
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index b9a7f773344..644086ba2ed 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -202,6 +202,7 @@ dasd_start_diag(struct dasd_ccw_req * cqr)
rc = -EIO;
break;
}
+ cqr->intrc = rc;
return rc;
}
@@ -505,8 +506,9 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
return ERR_PTR(-EINVAL);
blksize = block->bp_block;
/* Calculate record id of first and last block. */
- first_rec = req->sector >> block->s2b_shift;
- last_rec = (req->sector + req->nr_sectors - 1) >> block->s2b_shift;
+ first_rec = blk_rq_pos(req) >> block->s2b_shift;
+ last_rec =
+ (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
/* Check struct bio and count the number of blocks for the request. */
count = 0;
rq_for_each_segment(bv, req, iter) {
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index cb52da033f0..1c28ec3e4cc 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -5,10 +5,9 @@
* Carsten Otte <Cotte@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
+ * Copyright IBM Corp. 1999, 2009
* EMC Symmetrix ioctl Copyright EMC Corporation, 2008
* Author.........: Nigel Hislop <hislop_nigel@emc.com>
- *
*/
#define KMSG_COMPONENT "dasd"
@@ -104,17 +103,6 @@ dasd_eckd_set_online(struct ccw_device *cdev)
return dasd_generic_set_online(cdev, &dasd_eckd_discipline);
}
-static struct ccw_driver dasd_eckd_driver = {
- .name = "dasd-eckd",
- .owner = THIS_MODULE,
- .ids = dasd_eckd_ids,
- .probe = dasd_eckd_probe,
- .remove = dasd_generic_remove,
- .set_offline = dasd_generic_set_offline,
- .set_online = dasd_eckd_set_online,
- .notify = dasd_generic_notify,
-};
-
static const int sizes_trk0[] = { 28, 148, 84 };
#define LABEL_SIZE 140
@@ -1097,20 +1085,20 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
{
struct dasd_eckd_private *private;
struct dasd_block *block;
- void *rdc_data;
int is_known, rc;
private = (struct dasd_eckd_private *) device->private;
- if (private == NULL) {
- private = kzalloc(sizeof(struct dasd_eckd_private),
- GFP_KERNEL | GFP_DMA);
- if (private == NULL) {
+ if (!private) {
+ private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
+ if (!private) {
dev_warn(&device->cdev->dev,
"Allocating memory for private DASD data "
"failed\n");
return -ENOMEM;
}
device->private = (void *) private;
+ } else {
+ memset(private, 0, sizeof(*private));
}
/* Invalidate status of initial analysis. */
private->init_cqr_status = -1;
@@ -1161,9 +1149,8 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
goto out_err3;
/* Read Device Characteristics */
- rdc_data = (void *) &(private->rdc_data);
- memset(rdc_data, 0, sizeof(rdc_data));
- rc = dasd_generic_read_dev_chars(device, "ECKD", &rdc_data, 64);
+ rc = dasd_generic_read_dev_chars(device, "ECKD", &private->rdc_data,
+ 64);
if (rc) {
DBF_EVENT(DBF_WARNING,
"Read device characteristics failed, rc=%d for "
@@ -1183,7 +1170,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
private->rdc_data.dev_model,
private->rdc_data.cu_type,
private->rdc_data.cu_model.model,
- private->real_cyl,
+ private->real_cyl,
private->rdc_data.trk_per_cyl,
private->rdc_data.sec_per_trk);
return 0;
@@ -2336,9 +2323,10 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
{
int tpm, cmdrtd, cmdwtd;
int use_prefix;
-
- struct dasd_eckd_private *private;
+#if defined(CONFIG_64BIT)
int fcx_in_css, fcx_in_gneq, fcx_in_features;
+#endif
+ struct dasd_eckd_private *private;
struct dasd_device *basedev;
sector_t first_rec, last_rec;
sector_t first_trk, last_trk;
@@ -2354,18 +2342,22 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
blksize = block->bp_block;
blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
/* Calculate record id of first and last block. */
- first_rec = first_trk = req->sector >> block->s2b_shift;
+ first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
first_offs = sector_div(first_trk, blk_per_trk);
last_rec = last_trk =
- (req->sector + req->nr_sectors - 1) >> block->s2b_shift;
+ (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
last_offs = sector_div(last_trk, blk_per_trk);
cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
- /* is transport mode supported ? */
+ /* is transport mode supported? */
+#if defined(CONFIG_64BIT)
fcx_in_css = css_general_characteristics.fcx;
fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
fcx_in_features = private->features.feature[40] & 0x80;
tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
+#else
+ tpm = 0;
+#endif
/* is read track data and write track data in command mode supported? */
cmdrtd = private->features.feature[9] & 0x20;
@@ -2420,7 +2412,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
private = (struct dasd_eckd_private *) cqr->block->base->private;
blksize = cqr->block->bp_block;
blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
- recid = req->sector >> cqr->block->s2b_shift;
+ recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
ccw = cqr->cpaddr;
/* Skip over define extent & locate record. */
ccw++;
@@ -3013,8 +3005,9 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
" I/O status report for device %s:\n",
dev_name(&device->cdev->dev));
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
- " in req: %p CS: 0x%02X DS: 0x%02X\n", req,
- scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw));
+ " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n",
+ req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw),
+ scsw_cc(&irb->scsw), req->intrc);
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" device %s: Failing CCW: %p\n",
dev_name(&device->cdev->dev),
@@ -3115,9 +3108,10 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
" I/O status report for device %s:\n",
dev_name(&device->cdev->dev));
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
- " in req: %p CS: 0x%02X DS: 0x%02X "
+ " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d "
"fcxs: 0x%02X schxs: 0x%02X\n", req,
scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw),
+ scsw_cc(&irb->scsw), req->intrc,
irb->scsw.tm.fcxs, irb->scsw.tm.schxs);
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" device %s: Failing TCW: %p\n",
@@ -3230,6 +3224,98 @@ static void dasd_eckd_dump_sense(struct dasd_device *device,
dasd_eckd_dump_sense_ccw(device, req, irb);
}
+int dasd_eckd_pm_freeze(struct dasd_device *device)
+{
+ /*
+ * the device should be disconnected from our LCU structure
+ * on restore we will reconnect it and reread LCU specific
+ * information like PAV support that might have changed
+ */
+ dasd_alias_remove_device(device);
+ dasd_alias_disconnect_device_from_lcu(device);
+
+ return 0;
+}
+
+int dasd_eckd_restore_device(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private;
+ int is_known, rc;
+ struct dasd_uid temp_uid;
+
+ /* allow new IO again */
+ device->stopped &= ~DASD_STOPPED_PM;
+
+ private = (struct dasd_eckd_private *) device->private;
+
+ /* Read Configuration Data */
+ rc = dasd_eckd_read_conf(device);
+ if (rc)
+ goto out_err;
+
+ /* Generate device unique id and register in devmap */
+ rc = dasd_eckd_generate_uid(device, &private->uid);
+ dasd_get_uid(device->cdev, &temp_uid);
+ if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0)
+ dev_err(&device->cdev->dev, "The UID of the DASD has changed\n");
+ if (rc)
+ goto out_err;
+ dasd_set_uid(device->cdev, &private->uid);
+
+ /* register lcu with alias handling, enable PAV if this is a new lcu */
+ is_known = dasd_alias_make_device_known_to_lcu(device);
+ if (is_known < 0)
+ return is_known;
+ if (!is_known) {
+ /* new lcu found */
+ rc = dasd_eckd_validate_server(device); /* will switch pav on */
+ if (rc)
+ goto out_err;
+ }
+
+ /* Read Feature Codes */
+ rc = dasd_eckd_read_features(device);
+ if (rc)
+ goto out_err;
+
+ /* Read Device Characteristics */
+ memset(&private->rdc_data, 0, sizeof(private->rdc_data));
+ rc = dasd_generic_read_dev_chars(device, "ECKD",
+ &private->rdc_data, 64);
+ if (rc) {
+ DBF_EVENT(DBF_WARNING,
+ "Read device characteristics failed, rc=%d for "
+ "device: %s", rc, dev_name(&device->cdev->dev));
+ goto out_err;
+ }
+
+ /* add device to alias management */
+ dasd_alias_add_device(device);
+
+ return 0;
+
+out_err:
+ /*
+ * if the resume failed for the DASD we put it in
+ * an UNRESUMED stop state
+ */
+ device->stopped |= DASD_UNRESUMED_PM;
+ return 0;
+}
+
+static struct ccw_driver dasd_eckd_driver = {
+ .name = "dasd-eckd",
+ .owner = THIS_MODULE,
+ .ids = dasd_eckd_ids,
+ .probe = dasd_eckd_probe,
+ .remove = dasd_generic_remove,
+ .set_offline = dasd_generic_set_offline,
+ .set_online = dasd_eckd_set_online,
+ .notify = dasd_generic_notify,
+ .freeze = dasd_generic_pm_freeze,
+ .thaw = dasd_generic_restore_device,
+ .restore = dasd_generic_restore_device,
+};
/*
* max_blocks is dependent on the amount of storage that is available
@@ -3268,13 +3354,21 @@ static struct dasd_discipline dasd_eckd_discipline = {
.dump_sense_dbf = dasd_eckd_dump_sense_dbf,
.fill_info = dasd_eckd_fill_info,
.ioctl = dasd_eckd_ioctl,
+ .freeze = dasd_eckd_pm_freeze,
+ .restore = dasd_eckd_restore_device,
};
static int __init
dasd_eckd_init(void)
{
+ int ret;
+
ASCEBC(dasd_eckd_discipline.ebcname, 4);
- return ccw_driver_register(&dasd_eckd_driver);
+ ret = ccw_driver_register(&dasd_eckd_driver);
+ if (!ret)
+ wait_for_device_probe();
+
+ return ret;
}
static void __exit
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index a3eb6fd1467..e21ee735f92 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -2,8 +2,7 @@
* File...........: linux/drivers/s390/block/dasd_fba.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
- *
+ * Copyright IBM Corp. 1999, 2009
*/
#define KMSG_COMPONENT "dasd"
@@ -75,6 +74,9 @@ static struct ccw_driver dasd_fba_driver = {
.set_offline = dasd_generic_set_offline,
.set_online = dasd_fba_set_online,
.notify = dasd_generic_notify,
+ .freeze = dasd_generic_pm_freeze,
+ .thaw = dasd_generic_restore_device,
+ .restore = dasd_generic_restore_device,
};
static void
@@ -122,20 +124,20 @@ dasd_fba_check_characteristics(struct dasd_device *device)
struct dasd_block *block;
struct dasd_fba_private *private;
struct ccw_device *cdev = device->cdev;
- void *rdc_data;
int rc;
private = (struct dasd_fba_private *) device->private;
- if (private == NULL) {
- private = kzalloc(sizeof(struct dasd_fba_private),
- GFP_KERNEL | GFP_DMA);
- if (private == NULL) {
+ if (!private) {
+ private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
+ if (!private) {
dev_warn(&device->cdev->dev,
"Allocating memory for private DASD "
"data failed\n");
return -ENOMEM;
}
device->private = (void *) private;
+ } else {
+ memset(private, 0, sizeof(*private));
}
block = dasd_alloc_block();
if (IS_ERR(block)) {
@@ -150,8 +152,8 @@ dasd_fba_check_characteristics(struct dasd_device *device)
block->base = device;
/* Read Device Characteristics */
- rdc_data = (void *) &(private->rdc_data);
- rc = dasd_generic_read_dev_chars(device, "FBA ", &rdc_data, 32);
+ rc = dasd_generic_read_dev_chars(device, "FBA ", &private->rdc_data,
+ 32);
if (rc) {
DBF_EVENT(DBF_WARNING, "Read device characteristics returned "
"error %d for device: %s",
@@ -270,8 +272,9 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
return ERR_PTR(-EINVAL);
blksize = block->bp_block;
/* Calculate record id of first and last block. */
- first_rec = req->sector >> block->s2b_shift;
- last_rec = (req->sector + req->nr_sectors - 1) >> block->s2b_shift;
+ first_rec = blk_rq_pos(req) >> block->s2b_shift;
+ last_rec =
+ (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
/* Check struct bio and count the number of blocks for the request. */
count = 0;
cidaw = 0;
@@ -309,7 +312,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
ccw = cqr->cpaddr;
/* First ccw is define extent. */
define_extent(ccw++, cqr->data, rq_data_dir(req),
- block->bp_block, req->sector, req->nr_sectors);
+ block->bp_block, blk_rq_pos(req), blk_rq_sectors(req));
/* Build locate_record + read/write ccws. */
idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data));
LO_data = (struct LO_fba_data *) (idaws + cidaw);
@@ -603,8 +606,14 @@ static struct dasd_discipline dasd_fba_discipline = {
static int __init
dasd_fba_init(void)
{
+ int ret;
+
ASCEBC(dasd_fba_discipline.ebcname, 4);
- return ccw_driver_register(&dasd_fba_driver);
+ ret = ccw_driver_register(&dasd_fba_driver);
+ if (!ret)
+ wait_for_device_probe();
+
+ return ret;
}
static void __exit
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index c1e487f774c..fd63b2f2bda 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -4,8 +4,7 @@
* Horst Hummel <Horst.Hummel@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
- *
+ * Copyright IBM Corp. 1999, 2009
*/
#ifndef DASD_INT_H
@@ -173,6 +172,7 @@ struct dasd_ccw_req {
void *data; /* pointer to data area */
/* these are important for recovering erroneous requests */
+ int intrc; /* internal error, e.g. from start_IO */
struct irb irb; /* device status in case of an error */
struct dasd_ccw_req *refers; /* ERP-chain queueing. */
void *function; /* originating ERP action */
@@ -294,6 +294,10 @@ struct dasd_discipline {
int (*fill_geometry) (struct dasd_block *, struct hd_geometry *);
int (*fill_info) (struct dasd_device *, struct dasd_information2_t *);
int (*ioctl) (struct dasd_block *, unsigned int, void __user *);
+
+ /* suspend/resume functions */
+ int (*freeze) (struct dasd_device *);
+ int (*restore) (struct dasd_device *);
};
extern struct dasd_discipline *dasd_diag_discipline_pointer;
@@ -366,6 +370,7 @@ struct dasd_device {
atomic_t tasklet_scheduled;
struct tasklet_struct tasklet;
struct work_struct kick_work;
+ struct work_struct restore_device;
struct timer_list timer;
debug_info_t *debug_area;
@@ -409,6 +414,8 @@ struct dasd_block {
#define DASD_STOPPED_PENDING 4 /* long busy */
#define DASD_STOPPED_DC_WAIT 8 /* disconnected, wait */
#define DASD_STOPPED_SU 16 /* summary unit check handling */
+#define DASD_STOPPED_PM 32 /* pm state transition */
+#define DASD_UNRESUMED_PM 64 /* pm resume failed state */
/* per device flags */
#define DASD_FLAG_OFFLINE 3 /* device is in offline processing */
@@ -555,6 +562,7 @@ void dasd_free_block(struct dasd_block *);
void dasd_enable_device(struct dasd_device *);
void dasd_set_target_state(struct dasd_device *, int);
void dasd_kick_device(struct dasd_device *);
+void dasd_restore_device(struct dasd_device *);
void dasd_add_request_head(struct dasd_ccw_req *);
void dasd_add_request_tail(struct dasd_ccw_req *);
@@ -577,8 +585,10 @@ int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *);
int dasd_generic_set_offline (struct ccw_device *cdev);
int dasd_generic_notify(struct ccw_device *, int);
void dasd_generic_handle_state_change(struct dasd_device *);
+int dasd_generic_pm_freeze(struct ccw_device *);
+int dasd_generic_restore_device(struct ccw_device *);
-int dasd_generic_read_dev_chars(struct dasd_device *, char *, void **, int);
+int dasd_generic_read_dev_chars(struct dasd_device *, char *, void *, int);
char *dasd_get_sense(struct irb *);
/* externals in dasd_devmap.c */
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index cfdcf1aed33..016f9e9d259 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -14,10 +14,11 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
-#include <asm/extmem.h>
-#include <asm/io.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <asm/extmem.h>
+#include <asm/io.h>
#define DCSSBLK_NAME "dcssblk"
#define DCSSBLK_MINORS_PER_DISK 1
@@ -127,7 +128,7 @@ dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info)
found = 0;
// test if minor available
list_for_each_entry(entry, &dcssblk_devices, lh)
- if (minor == MINOR(disk_devt(entry->gd)))
+ if (minor == entry->gd->first_minor)
found++;
if (!found) break; // got unused minor
}
@@ -602,7 +603,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
dev_info->gd->private_data = dev_info;
dev_info->gd->driverfs_dev = &dev_info->dev;
blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
- blk_queue_hardsect_size(dev_info->dcssblk_queue, 4096);
+ blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096);
seg_byte_size = (dev_info->end - dev_info->start + 1);
set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
@@ -625,7 +626,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
if (rc)
goto release_gd;
sprintf(dev_info->gd->disk_name, "dcssblk%d",
- MINOR(disk_devt(dev_info->gd)));
+ dev_info->gd->first_minor);
list_add_tail(&dev_info->lh, &dcssblk_devices);
if (!try_module_get(THIS_MODULE)) {
@@ -940,11 +941,94 @@ dcssblk_check_params(void)
}
/*
+ * Suspend / Resume
+ */
+static int dcssblk_freeze(struct device *dev)
+{
+ struct dcssblk_dev_info *dev_info;
+ int rc = 0;
+
+ list_for_each_entry(dev_info, &dcssblk_devices, lh) {
+ switch (dev_info->segment_type) {
+ case SEG_TYPE_SR:
+ case SEG_TYPE_ER:
+ case SEG_TYPE_SC:
+ if (!dev_info->is_shared)
+ rc = -EINVAL;
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ if (rc)
+ break;
+ }
+ if (rc)
+ pr_err("Suspend failed because device %s is writeable.\n",
+ dev_info->segment_name);
+ return rc;
+}
+
+static int dcssblk_restore(struct device *dev)
+{
+ struct dcssblk_dev_info *dev_info;
+ struct segment_info *entry;
+ unsigned long start, end;
+ int rc = 0;
+
+ list_for_each_entry(dev_info, &dcssblk_devices, lh) {
+ list_for_each_entry(entry, &dev_info->seg_list, lh) {
+ segment_unload(entry->segment_name);
+ rc = segment_load(entry->segment_name, SEGMENT_SHARED,
+ &start, &end);
+ if (rc < 0) {
+// TODO in_use check ?
+ segment_warning(rc, entry->segment_name);
+ goto out_panic;
+ }
+ if (start != entry->start || end != entry->end) {
+ pr_err("Mismatch of start / end address after "
+ "resuming device %s\n",
+ entry->segment_name);
+ goto out_panic;
+ }
+ }
+ }
+ return 0;
+out_panic:
+ panic("fatal dcssblk resume error\n");
+}
+
+static int dcssblk_thaw(struct device *dev)
+{
+ return 0;
+}
+
+static struct dev_pm_ops dcssblk_pm_ops = {
+ .freeze = dcssblk_freeze,
+ .thaw = dcssblk_thaw,
+ .restore = dcssblk_restore,
+};
+
+static struct platform_driver dcssblk_pdrv = {
+ .driver = {
+ .name = "dcssblk",
+ .owner = THIS_MODULE,
+ .pm = &dcssblk_pm_ops,
+ },
+};
+
+static struct platform_device *dcssblk_pdev;
+
+
+/*
* The init/exit functions.
*/
static void __exit
dcssblk_exit(void)
{
+ platform_device_unregister(dcssblk_pdev);
+ platform_driver_unregister(&dcssblk_pdrv);
root_device_unregister(dcssblk_root_dev);
unregister_blkdev(dcssblk_major, DCSSBLK_NAME);
}
@@ -954,30 +1038,44 @@ dcssblk_init(void)
{
int rc;
- dcssblk_root_dev = root_device_register("dcssblk");
- if (IS_ERR(dcssblk_root_dev))
- return PTR_ERR(dcssblk_root_dev);
- rc = device_create_file(dcssblk_root_dev, &dev_attr_add);
- if (rc) {
- root_device_unregister(dcssblk_root_dev);
+ rc = platform_driver_register(&dcssblk_pdrv);
+ if (rc)
return rc;
+
+ dcssblk_pdev = platform_device_register_simple("dcssblk", -1, NULL,
+ 0);
+ if (IS_ERR(dcssblk_pdev)) {
+ rc = PTR_ERR(dcssblk_pdev);
+ goto out_pdrv;
}
- rc = device_create_file(dcssblk_root_dev, &dev_attr_remove);
- if (rc) {
- root_device_unregister(dcssblk_root_dev);
- return rc;
+
+ dcssblk_root_dev = root_device_register("dcssblk");
+ if (IS_ERR(dcssblk_root_dev)) {
+ rc = PTR_ERR(dcssblk_root_dev);
+ goto out_pdev;
}
+ rc = device_create_file(dcssblk_root_dev, &dev_attr_add);
+ if (rc)
+ goto out_root;
+ rc = device_create_file(dcssblk_root_dev, &dev_attr_remove);
+ if (rc)
+ goto out_root;
rc = register_blkdev(0, DCSSBLK_NAME);
- if (rc < 0) {
- root_device_unregister(dcssblk_root_dev);
- return rc;
- }
+ if (rc < 0)
+ goto out_root;
dcssblk_major = rc;
init_rwsem(&dcssblk_devices_sem);
dcssblk_check_params();
-
return 0;
+
+out_root:
+ root_device_unregister(dcssblk_root_dev);
+out_pdev:
+ platform_device_unregister(dcssblk_pdev);
+out_pdrv:
+ platform_driver_unregister(&dcssblk_pdrv);
+ return rc;
}
module_init(dcssblk_init);
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 76814f3e898..2e9e1ecd6d8 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -39,7 +39,10 @@
#include <linux/hdreg.h> /* HDIO_GETGEO */
#include <linux/sysdev.h>
#include <linux/bio.h>
+#include <linux/suspend.h>
+#include <linux/platform_device.h>
#include <asm/uaccess.h>
+#include <asm/checksum.h>
#define XPRAM_NAME "xpram"
#define XPRAM_DEVS 1 /* one partition */
@@ -48,6 +51,7 @@
typedef struct {
unsigned int size; /* size of xpram segment in pages */
unsigned int offset; /* start page of xpram segment */
+ unsigned int csum; /* partition checksum for suspend */
} xpram_device_t;
static xpram_device_t xpram_devices[XPRAM_MAX_DEVS];
@@ -138,7 +142,7 @@ static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index)
/*
* Check if xpram is available.
*/
-static int __init xpram_present(void)
+static int xpram_present(void)
{
unsigned long mem_page;
int rc;
@@ -154,7 +158,7 @@ static int __init xpram_present(void)
/*
* Return index of the last available xpram page.
*/
-static unsigned long __init xpram_highest_page_index(void)
+static unsigned long xpram_highest_page_index(void)
{
unsigned int page_index, add_bit;
unsigned long mem_page;
@@ -343,7 +347,7 @@ static int __init xpram_setup_blkdev(void)
goto out;
}
blk_queue_make_request(xpram_queues[i], xpram_make_request);
- blk_queue_hardsect_size(xpram_queues[i], 4096);
+ blk_queue_logical_block_size(xpram_queues[i], 4096);
}
/*
@@ -383,6 +387,106 @@ out:
}
/*
+ * Save checksums for all partitions.
+ */
+static int xpram_save_checksums(void)
+{
+ unsigned long mem_page;
+ int rc, i;
+
+ rc = 0;
+ mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
+ if (!mem_page)
+ return -ENOMEM;
+ for (i = 0; i < xpram_devs; i++) {
+ rc = xpram_page_in(mem_page, xpram_devices[i].offset);
+ if (rc)
+ goto fail;
+ xpram_devices[i].csum = csum_partial((const void *) mem_page,
+ PAGE_SIZE, 0);
+ }
+fail:
+ free_page(mem_page);
+ return rc ? -ENXIO : 0;
+}
+
+/*
+ * Verify checksums for all partitions.
+ */
+static int xpram_validate_checksums(void)
+{
+ unsigned long mem_page;
+ unsigned int csum;
+ int rc, i;
+
+ rc = 0;
+ mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
+ if (!mem_page)
+ return -ENOMEM;
+ for (i = 0; i < xpram_devs; i++) {
+ rc = xpram_page_in(mem_page, xpram_devices[i].offset);
+ if (rc)
+ goto fail;
+ csum = csum_partial((const void *) mem_page, PAGE_SIZE, 0);
+ if (xpram_devices[i].csum != csum) {
+ rc = -EINVAL;
+ goto fail;
+ }
+ }
+fail:
+ free_page(mem_page);
+ return rc ? -ENXIO : 0;
+}
+
+/*
+ * Resume failed: Print error message and call panic.
+ */
+static void xpram_resume_error(const char *message)
+{
+ pr_err("Resume error: %s\n", message);
+ panic("xpram resume error\n");
+}
+
+/*
+ * Check if xpram setup changed between suspend and resume.
+ */
+static int xpram_restore(struct device *dev)
+{
+ if (!xpram_pages)
+ return 0;
+ if (xpram_present() != 0)
+ xpram_resume_error("xpram disappeared");
+ if (xpram_pages != xpram_highest_page_index() + 1)
+ xpram_resume_error("Size of xpram changed");
+ if (xpram_validate_checksums())
+ xpram_resume_error("Data of xpram changed");
+ return 0;
+}
+
+/*
+ * Save necessary state in suspend.
+ */
+static int xpram_freeze(struct device *dev)
+{
+ return xpram_save_checksums();
+}
+
+static struct dev_pm_ops xpram_pm_ops = {
+ .freeze = xpram_freeze,
+ .restore = xpram_restore,
+};
+
+static struct platform_driver xpram_pdrv = {
+ .driver = {
+ .name = XPRAM_NAME,
+ .owner = THIS_MODULE,
+ .pm = &xpram_pm_ops,
+ },
+};
+
+static struct platform_device *xpram_pdev;
+
+/*
* Finally, the init/exit functions.
*/
static void __exit xpram_exit(void)
@@ -394,6 +498,8 @@ static void __exit xpram_exit(void)
put_disk(xpram_disks[i]);
}
unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME);
+ platform_device_unregister(xpram_pdev);
+ platform_driver_unregister(&xpram_pdrv);
}
static int __init xpram_init(void)
@@ -411,7 +517,24 @@ static int __init xpram_init(void)
rc = xpram_setup_sizes(xpram_pages);
if (rc)
return rc;
- return xpram_setup_blkdev();
+ rc = platform_driver_register(&xpram_pdrv);
+ if (rc)
+ return rc;
+ xpram_pdev = platform_device_register_simple(XPRAM_NAME, -1, NULL, 0);
+ if (IS_ERR(xpram_pdev)) {
+ rc = PTR_ERR(xpram_pdev);
+ goto fail_platform_driver_unregister;
+ }
+ rc = xpram_setup_blkdev();
+ if (rc)
+ goto fail_platform_device_unregister;
+ return 0;
+
+fail_platform_device_unregister:
+ platform_device_unregister(xpram_pdev);
+fail_platform_driver_unregister:
+ platform_driver_unregister(&xpram_pdrv);
+ return rc;
}
module_init(xpram_init);
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 9ab06e0dad4..04dc734805c 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -1,14 +1,12 @@
/*
- * drivers/s390/char/con3215.c
- * 3215 line mode terminal driver.
+ * 3215 line mode terminal driver.
*
- * S390 version
- * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ * Copyright IBM Corp. 1999, 2009
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*
- * Updated:
- * Aug-2000: Added tab support
- * Dan Morrison, IBM Corporation (dmorriso@cse.buffalo.edu)
+ * Updated:
+ * Aug-2000: Added tab support
+ * Dan Morrison, IBM Corporation <dmorriso@cse.buffalo.edu>
*/
#include <linux/module.h>
@@ -56,6 +54,7 @@
#define RAW3215_CLOSING 32 /* set while in close process */
#define RAW3215_TIMER_RUNS 64 /* set if the output delay timer is on */
#define RAW3215_FLUSHING 128 /* set to flush buffer (no delay) */
+#define RAW3215_FROZEN 256 /* set if 3215 is frozen for suspend */
#define TAB_STOP_SIZE 8 /* tab stop size */
@@ -111,8 +110,8 @@ static struct tty_driver *tty3215_driver;
/*
* Get a request structure from the free list
*/
-static inline struct raw3215_req *
-raw3215_alloc_req(void) {
+static inline struct raw3215_req *raw3215_alloc_req(void)
+{
struct raw3215_req *req;
unsigned long flags;
@@ -126,8 +125,8 @@ raw3215_alloc_req(void) {
/*
* Put a request structure back to the free list
*/
-static inline void
-raw3215_free_req(struct raw3215_req *req) {
+static inline void raw3215_free_req(struct raw3215_req *req)
+{
unsigned long flags;
if (req->type == RAW3215_FREE)
@@ -145,8 +144,7 @@ raw3215_free_req(struct raw3215_req *req) {
* because a 3215 terminal won't accept a new read before the old one is
* completed.
*/
-static void
-raw3215_mk_read_req(struct raw3215_info *raw)
+static void raw3215_mk_read_req(struct raw3215_info *raw)
{
struct raw3215_req *req;
struct ccw1 *ccw;
@@ -174,8 +172,7 @@ raw3215_mk_read_req(struct raw3215_info *raw)
* buffer to the 3215 device. If a queued write exists it is replaced by
* the new, probably lengthened request.
*/
-static void
-raw3215_mk_write_req(struct raw3215_info *raw)
+static void raw3215_mk_write_req(struct raw3215_info *raw)
{
struct raw3215_req *req;
struct ccw1 *ccw;
@@ -251,8 +248,7 @@ raw3215_mk_write_req(struct raw3215_info *raw)
/*
* Start a read or a write request
*/
-static void
-raw3215_start_io(struct raw3215_info *raw)
+static void raw3215_start_io(struct raw3215_info *raw)
{
struct raw3215_req *req;
int res;
@@ -290,8 +286,7 @@ raw3215_start_io(struct raw3215_info *raw)
/*
* Function to start a delayed output after RAW3215_TIMEOUT seconds
*/
-static void
-raw3215_timeout(unsigned long __data)
+static void raw3215_timeout(unsigned long __data)
{
struct raw3215_info *raw = (struct raw3215_info *) __data;
unsigned long flags;
@@ -300,8 +295,10 @@ raw3215_timeout(unsigned long __data)
if (raw->flags & RAW3215_TIMER_RUNS) {
del_timer(&raw->timer);
raw->flags &= ~RAW3215_TIMER_RUNS;
- raw3215_mk_write_req(raw);
- raw3215_start_io(raw);
+ if (!(raw->flags & RAW3215_FROZEN)) {
+ raw3215_mk_write_req(raw);
+ raw3215_start_io(raw);
+ }
}
spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
}
@@ -312,10 +309,9 @@ raw3215_timeout(unsigned long __data)
* amount of data is bigger than RAW3215_MIN_WRITE. If a write is not
* done immediately a timer is started with a delay of RAW3215_TIMEOUT.
*/
-static inline void
-raw3215_try_io(struct raw3215_info *raw)
+static inline void raw3215_try_io(struct raw3215_info *raw)
{
- if (!(raw->flags & RAW3215_ACTIVE))
+ if (!(raw->flags & RAW3215_ACTIVE) || (raw->flags & RAW3215_FROZEN))
return;
if (raw->queued_read != NULL)
raw3215_start_io(raw);
@@ -359,8 +355,8 @@ static void raw3215_next_io(struct raw3215_info *raw)
/*
* Interrupt routine, called from common io layer
*/
-static void
-raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
+static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm,
+ struct irb *irb)
{
struct raw3215_info *raw;
struct raw3215_req *req;
@@ -368,7 +364,7 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
int cstat, dstat;
int count;
- raw = cdev->dev.driver_data;
+ raw = dev_get_drvdata(&cdev->dev);
req = (struct raw3215_req *) intparm;
cstat = irb->scsw.cmd.cstat;
dstat = irb->scsw.cmd.dstat;
@@ -459,14 +455,40 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
}
/*
+ * Drop the oldest line from the output buffer.
+ */
+static void raw3215_drop_line(struct raw3215_info *raw)
+{
+ int ix;
+ char ch;
+
+ BUG_ON(raw->written != 0);
+ ix = (raw->head - raw->count) & (RAW3215_BUFFER_SIZE - 1);
+ while (raw->count > 0) {
+ ch = raw->buffer[ix];
+ ix = (ix + 1) & (RAW3215_BUFFER_SIZE - 1);
+ raw->count--;
+ if (ch == 0x15)
+ break;
+ }
+ raw->head = ix;
+}
+
+/*
* Wait until length bytes are available int the output buffer.
* Has to be called with the s390irq lock held. Can be called
* disabled.
*/
-static void
-raw3215_make_room(struct raw3215_info *raw, unsigned int length)
+static void raw3215_make_room(struct raw3215_info *raw, unsigned int length)
{
while (RAW3215_BUFFER_SIZE - raw->count < length) {
+ /* While console is frozen for suspend we have no other
+ * choice but to drop message from the buffer to make
+ * room for even more messages. */
+ if (raw->flags & RAW3215_FROZEN) {
+ raw3215_drop_line(raw);
+ continue;
+ }
/* there might be a request pending */
raw->flags |= RAW3215_FLUSHING;
raw3215_mk_write_req(raw);
@@ -488,8 +510,8 @@ raw3215_make_room(struct raw3215_info *raw, unsigned int length)
/*
* String write routine for 3215 devices
*/
-static void
-raw3215_write(struct raw3215_info *raw, const char *str, unsigned int length)
+static void raw3215_write(struct raw3215_info *raw, const char *str,
+ unsigned int length)
{
unsigned long flags;
int c, count;
@@ -529,8 +551,7 @@ raw3215_write(struct raw3215_info *raw, const char *str, unsigned int length)
/*
* Put character routine for 3215 devices
*/
-static void
-raw3215_putchar(struct raw3215_info *raw, unsigned char ch)
+static void raw3215_putchar(struct raw3215_info *raw, unsigned char ch)
{
unsigned long flags;
unsigned int length, i;
@@ -566,8 +587,7 @@ raw3215_putchar(struct raw3215_info *raw, unsigned char ch)
* Flush routine, it simply sets the flush flag and tries to start
* pending IO.
*/
-static void
-raw3215_flush_buffer(struct raw3215_info *raw)
+static void raw3215_flush_buffer(struct raw3215_info *raw)
{
unsigned long flags;
@@ -583,8 +603,7 @@ raw3215_flush_buffer(struct raw3215_info *raw)
/*
* Fire up a 3215 device.
*/
-static int
-raw3215_startup(struct raw3215_info *raw)
+static int raw3215_startup(struct raw3215_info *raw)
{
unsigned long flags;
@@ -602,8 +621,7 @@ raw3215_startup(struct raw3215_info *raw)
/*
* Shutdown a 3215 device.
*/
-static void
-raw3215_shutdown(struct raw3215_info *raw)
+static void raw3215_shutdown(struct raw3215_info *raw)
{
DECLARE_WAITQUEUE(wait, current);
unsigned long flags;
@@ -628,14 +646,13 @@ raw3215_shutdown(struct raw3215_info *raw)
spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
}
-static int
-raw3215_probe (struct ccw_device *cdev)
+static int raw3215_probe (struct ccw_device *cdev)
{
struct raw3215_info *raw;
int line;
/* Console is special. */
- if (raw3215[0] && (cdev->dev.driver_data == raw3215[0]))
+ if (raw3215[0] && (raw3215[0] == dev_get_drvdata(&cdev->dev)))
return 0;
raw = kmalloc(sizeof(struct raw3215_info) +
RAW3215_INBUF_SIZE, GFP_KERNEL|GFP_DMA);
@@ -669,44 +686,41 @@ raw3215_probe (struct ccw_device *cdev)
}
init_waitqueue_head(&raw->empty_wait);
- cdev->dev.driver_data = raw;
+ dev_set_drvdata(&cdev->dev, raw);
cdev->handler = raw3215_irq;
return 0;
}
-static void
-raw3215_remove (struct ccw_device *cdev)
+static void raw3215_remove (struct ccw_device *cdev)
{
struct raw3215_info *raw;
ccw_device_set_offline(cdev);
- raw = cdev->dev.driver_data;
+ raw = dev_get_drvdata(&cdev->dev);
if (raw) {
- cdev->dev.driver_data = NULL;
+ dev_set_drvdata(&cdev->dev, NULL);
kfree(raw->buffer);
kfree(raw);
}
}
-static int
-raw3215_set_online (struct ccw_device *cdev)
+static int raw3215_set_online (struct ccw_device *cdev)
{
struct raw3215_info *raw;
- raw = cdev->dev.driver_data;
+ raw = dev_get_drvdata(&cdev->dev);
if (!raw)
return -ENODEV;
return raw3215_startup(raw);
}
-static int
-raw3215_set_offline (struct ccw_device *cdev)
+static int raw3215_set_offline (struct ccw_device *cdev)
{
struct raw3215_info *raw;
- raw = cdev->dev.driver_data;
+ raw = dev_get_drvdata(&cdev->dev);
if (!raw)
return -ENODEV;
@@ -715,6 +729,36 @@ raw3215_set_offline (struct ccw_device *cdev)
return 0;
}
+static int raw3215_pm_stop(struct ccw_device *cdev)
+{
+ struct raw3215_info *raw;
+ unsigned long flags;
+
+ /* Empty the output buffer, then prevent new I/O. */
+ raw = cdev->dev.driver_data;
+ spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+ raw3215_make_room(raw, RAW3215_BUFFER_SIZE);
+ raw->flags |= RAW3215_FROZEN;
+ spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+ return 0;
+}
+
+static int raw3215_pm_start(struct ccw_device *cdev)
+{
+ struct raw3215_info *raw;
+ unsigned long flags;
+
+ /* Allow I/O again and flush output buffer. */
+ raw = cdev->dev.driver_data;
+ spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+ raw->flags &= ~RAW3215_FROZEN;
+ raw->flags |= RAW3215_FLUSHING;
+ raw3215_try_io(raw);
+ raw->flags &= ~RAW3215_FLUSHING;
+ spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+ return 0;
+}
+
static struct ccw_device_id raw3215_id[] = {
{ CCW_DEVICE(0x3215, 0) },
{ /* end of list */ },
@@ -728,14 +772,17 @@ static struct ccw_driver raw3215_ccw_driver = {
.remove = &raw3215_remove,
.set_online = &raw3215_set_online,
.set_offline = &raw3215_set_offline,
+ .freeze = &raw3215_pm_stop,
+ .thaw = &raw3215_pm_start,
+ .restore = &raw3215_pm_start,
};
#ifdef CONFIG_TN3215_CONSOLE
/*
* Write a string to the 3215 console
*/
-static void
-con3215_write(struct console *co, const char *str, unsigned int count)
+static void con3215_write(struct console *co, const char *str,
+ unsigned int count)
{
struct raw3215_info *raw;
int i;
@@ -768,13 +815,17 @@ static struct tty_driver *con3215_device(struct console *c, int *index)
* panic() calls con3215_flush through a panic_notifier
* before the system enters a disabled, endless loop.
*/
-static void
-con3215_flush(void)
+static void con3215_flush(void)
{
struct raw3215_info *raw;
unsigned long flags;
raw = raw3215[0]; /* console 3215 is the first one */
+ if (raw->flags & RAW3215_FROZEN)
+ /* The console is still frozen for suspend. */
+ if (ccw_device_force_console())
+ /* Forcing didn't work, no panic message .. */
+ return;
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
raw3215_make_room(raw, RAW3215_BUFFER_SIZE);
spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
@@ -811,8 +862,7 @@ static struct console con3215 = {
* 3215 console initialization code called from console_init().
* NOTE: This is called before kmalloc is available.
*/
-static int __init
-con3215_init(void)
+static int __init con3215_init(void)
{
struct ccw_device *cdev;
struct raw3215_info *raw;
@@ -848,7 +898,7 @@ con3215_init(void)
raw->buffer = (char *) alloc_bootmem_low(RAW3215_BUFFER_SIZE);
raw->inbuf = (char *) alloc_bootmem_low(RAW3215_INBUF_SIZE);
raw->cdev = cdev;
- cdev->dev.driver_data = raw;
+ dev_set_drvdata(&cdev->dev, raw);
cdev->handler = raw3215_irq;
raw->flags |= RAW3215_FIXED;
@@ -875,8 +925,7 @@ console_initcall(con3215_init);
*
* This routine is called whenever a 3215 tty is opened.
*/
-static int
-tty3215_open(struct tty_struct *tty, struct file * filp)
+static int tty3215_open(struct tty_struct *tty, struct file * filp)
{
struct raw3215_info *raw;
int retval, line;
@@ -909,8 +958,7 @@ tty3215_open(struct tty_struct *tty, struct file * filp)
* This routine is called when the 3215 tty is closed. We wait
* for the remaining request to be completed. Then we clean up.
*/
-static void
-tty3215_close(struct tty_struct *tty, struct file * filp)
+static void tty3215_close(struct tty_struct *tty, struct file * filp)
{
struct raw3215_info *raw;
@@ -927,8 +975,7 @@ tty3215_close(struct tty_struct *tty, struct file * filp)
/*
* Returns the amount of free space in the output buffer.
*/
-static int
-tty3215_write_room(struct tty_struct *tty)
+static int tty3215_write_room(struct tty_struct *tty)
{
struct raw3215_info *raw;
@@ -944,9 +991,8 @@ tty3215_write_room(struct tty_struct *tty)
/*
* String write routine for 3215 ttys
*/
-static int
-tty3215_write(struct tty_struct * tty,
- const unsigned char *buf, int count)
+static int tty3215_write(struct tty_struct * tty,
+ const unsigned char *buf, int count)
{
struct raw3215_info *raw;
@@ -960,8 +1006,7 @@ tty3215_write(struct tty_struct * tty,
/*
* Put character routine for 3215 ttys
*/
-static int
-tty3215_put_char(struct tty_struct *tty, unsigned char ch)
+static int tty3215_put_char(struct tty_struct *tty, unsigned char ch)
{
struct raw3215_info *raw;
@@ -972,16 +1017,14 @@ tty3215_put_char(struct tty_struct *tty, unsigned char ch)
return 1;
}
-static void
-tty3215_flush_chars(struct tty_struct *tty)
+static void tty3215_flush_chars(struct tty_struct *tty)
{
}
/*
* Returns the number of characters in the output buffer
*/
-static int
-tty3215_chars_in_buffer(struct tty_struct *tty)
+static int tty3215_chars_in_buffer(struct tty_struct *tty)
{
struct raw3215_info *raw;
@@ -989,8 +1032,7 @@ tty3215_chars_in_buffer(struct tty_struct *tty)
return raw->count;
}
-static void
-tty3215_flush_buffer(struct tty_struct *tty)
+static void tty3215_flush_buffer(struct tty_struct *tty)
{
struct raw3215_info *raw;
@@ -1002,9 +1044,8 @@ tty3215_flush_buffer(struct tty_struct *tty)
/*
* Currently we don't have any io controls for 3215 ttys
*/
-static int
-tty3215_ioctl(struct tty_struct *tty, struct file * file,
- unsigned int cmd, unsigned long arg)
+static int tty3215_ioctl(struct tty_struct *tty, struct file * file,
+ unsigned int cmd, unsigned long arg)
{
if (tty->flags & (1 << TTY_IO_ERROR))
return -EIO;
@@ -1019,8 +1060,7 @@ tty3215_ioctl(struct tty_struct *tty, struct file * file,
/*
* Disable reading from a 3215 tty
*/
-static void
-tty3215_throttle(struct tty_struct * tty)
+static void tty3215_throttle(struct tty_struct * tty)
{
struct raw3215_info *raw;
@@ -1031,8 +1071,7 @@ tty3215_throttle(struct tty_struct * tty)
/*
* Enable reading from a 3215 tty
*/
-static void
-tty3215_unthrottle(struct tty_struct * tty)
+static void tty3215_unthrottle(struct tty_struct * tty)
{
struct raw3215_info *raw;
unsigned long flags;
@@ -1049,8 +1088,7 @@ tty3215_unthrottle(struct tty_struct * tty)
/*
* Disable writing to a 3215 tty
*/
-static void
-tty3215_stop(struct tty_struct *tty)
+static void tty3215_stop(struct tty_struct *tty)
{
struct raw3215_info *raw;
@@ -1061,8 +1099,7 @@ tty3215_stop(struct tty_struct *tty)
/*
* Enable writing to a 3215 tty
*/
-static void
-tty3215_start(struct tty_struct *tty)
+static void tty3215_start(struct tty_struct *tty)
{
struct raw3215_info *raw;
unsigned long flags;
@@ -1096,8 +1133,7 @@ static const struct tty_operations tty3215_ops = {
* 3215 tty registration code called from tty_init().
* Most kernel services (incl. kmalloc) are available at this poimt.
*/
-static int __init
-tty3215_init(void)
+static int __init tty3215_init(void)
{
struct tty_driver *driver;
int ret;
@@ -1142,8 +1178,7 @@ tty3215_init(void)
return 0;
}
-static void __exit
-tty3215_exit(void)
+static void __exit tty3215_exit(void)
{
tty_unregister_driver(tty3215_driver);
put_tty_driver(tty3215_driver);
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index d028d2ee83d..44d02e371c0 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -1,11 +1,10 @@
/*
- * drivers/s390/char/con3270.c
- * IBM/3270 Driver - console view.
+ * IBM/3270 Driver - console view.
*
- * Author(s):
- * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
- * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
- * -- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s):
+ * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
+ * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Copyright IBM Corp. 2003, 2009
*/
#include <linux/bootmem.h>
@@ -64,7 +63,7 @@ static struct con3270 *condev;
#define CON_UPDATE_ERASE 1 /* Use EWRITEA instead of WRITE. */
#define CON_UPDATE_LIST 2 /* Update lines in tty3270->update. */
#define CON_UPDATE_STATUS 4 /* Update status line. */
-#define CON_UPDATE_ALL 7
+#define CON_UPDATE_ALL 8 /* Recreate screen. */
static void con3270_update(struct con3270 *);
@@ -73,18 +72,10 @@ static void con3270_update(struct con3270 *);
*/
static void con3270_set_timer(struct con3270 *cp, int expires)
{
- if (expires == 0) {
- if (timer_pending(&cp->timer))
- del_timer(&cp->timer);
- return;
- }
- if (timer_pending(&cp->timer) &&
- mod_timer(&cp->timer, jiffies + expires))
- return;
- cp->timer.function = (void (*)(unsigned long)) con3270_update;
- cp->timer.data = (unsigned long) cp;
- cp->timer.expires = jiffies + expires;
- add_timer(&cp->timer);
+ if (expires == 0)
+ del_timer(&cp->timer);
+ else
+ mod_timer(&cp->timer, jiffies + expires);
}
/*
@@ -225,6 +216,12 @@ con3270_update(struct con3270 *cp)
spin_lock_irqsave(&cp->view.lock, flags);
updated = 0;
+ if (cp->update_flags & CON_UPDATE_ALL) {
+ con3270_rebuild_update(cp);
+ con3270_update_status(cp);
+ cp->update_flags = CON_UPDATE_ERASE | CON_UPDATE_LIST |
+ CON_UPDATE_STATUS;
+ }
if (cp->update_flags & CON_UPDATE_ERASE) {
/* Use erase write alternate to initialize display. */
raw3270_request_set_cmd(wrq, TC_EWRITEA);
@@ -302,7 +299,6 @@ con3270_read_tasklet(struct raw3270_request *rrq)
deactivate = 1;
break;
case 0x6d: /* clear: start from scratch. */
- con3270_rebuild_update(cp);
cp->update_flags = CON_UPDATE_ALL;
con3270_set_timer(cp, 1);
break;
@@ -382,30 +378,21 @@ con3270_issue_read(struct con3270 *cp)
static int
con3270_activate(struct raw3270_view *view)
{
- unsigned long flags;
struct con3270 *cp;
cp = (struct con3270 *) view;
- spin_lock_irqsave(&cp->view.lock, flags);
- cp->nr_up = 0;
- con3270_rebuild_update(cp);
- con3270_update_status(cp);
cp->update_flags = CON_UPDATE_ALL;
con3270_set_timer(cp, 1);
- spin_unlock_irqrestore(&cp->view.lock, flags);
return 0;
}
static void
con3270_deactivate(struct raw3270_view *view)
{
- unsigned long flags;
struct con3270 *cp;
cp = (struct con3270 *) view;
- spin_lock_irqsave(&cp->view.lock, flags);
del_timer(&cp->timer);
- spin_unlock_irqrestore(&cp->view.lock, flags);
}
static int
@@ -504,6 +491,7 @@ con3270_write(struct console *co, const char *str, unsigned int count)
con3270_cline_end(cp);
}
/* Setup timer to output current console buffer after 1/10 second */
+ cp->nr_up = 0;
if (cp->view.dev && !timer_pending(&cp->timer))
con3270_set_timer(cp, HZ/10);
spin_unlock_irqrestore(&cp->view.lock,flags);
@@ -541,6 +529,7 @@ con3270_flush(void)
cp = condev;
if (!cp->view.dev)
return;
+ raw3270_pm_unfreeze(&cp->view);
spin_lock_irqsave(&cp->view.lock, flags);
con3270_wait_write(cp);
cp->nr_up = 0;
@@ -624,7 +613,8 @@ con3270_init(void)
INIT_LIST_HEAD(&condev->lines);
INIT_LIST_HEAD(&condev->update);
- init_timer(&condev->timer);
+ setup_timer(&condev->timer, (void (*)(unsigned long)) con3270_update,
+ (unsigned long) condev);
tasklet_init(&condev->readlet,
(void (*)(unsigned long)) con3270_read_tasklet,
(unsigned long) condev->read);
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 40759c33477..097d3846a82 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -1,11 +1,10 @@
/*
- * drivers/s390/char/fs3270.c
- * IBM/3270 Driver - fullscreen driver.
+ * IBM/3270 Driver - fullscreen driver.
*
- * Author(s):
- * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
- * Rewritten for 2.5/2.6 by Martin Schwidefsky <schwidefsky@de.ibm.com>
- * -- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s):
+ * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
+ * Rewritten for 2.5/2.6 by Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Copyright IBM Corp. 2003, 2009
*/
#include <linux/bootmem.h>
@@ -399,6 +398,11 @@ fs3270_free_view(struct raw3270_view *view)
static void
fs3270_release(struct raw3270_view *view)
{
+ struct fs3270 *fp;
+
+ fp = (struct fs3270 *) view;
+ if (fp->fs_pid)
+ kill_pid(fp->fs_pid, SIGHUP, 1);
}
/* View to a 3270 device. Can be console, tty or fullscreen. */
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 97e63cf4694..75a8831eebb 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -1,10 +1,9 @@
/*
- * drivers/s390/char/monreader.c
- *
* Character device driver for reading z/VM *MONITOR service records.
*
- * Copyright IBM Corp. 2004, 2008
- * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+ * Copyright IBM Corp. 2004, 2009
+ *
+ * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/
#define KMSG_COMPONENT "monreader"
@@ -22,6 +21,7 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/poll.h>
+#include <linux/device.h>
#include <net/iucv/iucv.h>
#include <asm/uaccess.h>
#include <asm/ebcdic.h>
@@ -78,6 +78,7 @@ static u8 user_data_sever[16] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
};
+static struct device *monreader_device;
/******************************************************************************
* helper functions *
@@ -319,11 +320,12 @@ static int mon_open(struct inode *inode, struct file *filp)
goto out_path;
}
filp->private_data = monpriv;
+ monreader_device->driver_data = monpriv;
unlock_kernel();
return nonseekable_open(inode, filp);
out_path:
- kfree(monpriv->path);
+ iucv_path_free(monpriv->path);
out_priv:
mon_free_mem(monpriv);
out_use:
@@ -341,10 +343,13 @@ static int mon_close(struct inode *inode, struct file *filp)
/*
* Close IUCV connection and unregister
*/
- rc = iucv_path_sever(monpriv->path, user_data_sever);
- if (rc)
- pr_warning("Disconnecting the z/VM *MONITOR system service "
- "failed with rc=%i\n", rc);
+ if (monpriv->path) {
+ rc = iucv_path_sever(monpriv->path, user_data_sever);
+ if (rc)
+ pr_warning("Disconnecting the z/VM *MONITOR system "
+ "service failed with rc=%i\n", rc);
+ iucv_path_free(monpriv->path);
+ }
atomic_set(&monpriv->iucv_severed, 0);
atomic_set(&monpriv->iucv_connected, 0);
@@ -452,6 +457,94 @@ static struct miscdevice mon_dev = {
.minor = MISC_DYNAMIC_MINOR,
};
+
+/******************************************************************************
+ * suspend / resume *
+ *****************************************************************************/
+static int monreader_freeze(struct device *dev)
+{
+ struct mon_private *monpriv = dev->driver_data;
+ int rc;
+
+ if (!monpriv)
+ return 0;
+ if (monpriv->path) {
+ rc = iucv_path_sever(monpriv->path, user_data_sever);
+ if (rc)
+ pr_warning("Disconnecting the z/VM *MONITOR system "
+ "service failed with rc=%i\n", rc);
+ iucv_path_free(monpriv->path);
+ }
+ atomic_set(&monpriv->iucv_severed, 0);
+ atomic_set(&monpriv->iucv_connected, 0);
+ atomic_set(&monpriv->read_ready, 0);
+ atomic_set(&monpriv->msglim_count, 0);
+ monpriv->write_index = 0;
+ monpriv->read_index = 0;
+ monpriv->path = NULL;
+ return 0;
+}
+
+static int monreader_thaw(struct device *dev)
+{
+ struct mon_private *monpriv = dev->driver_data;
+ int rc;
+
+ if (!monpriv)
+ return 0;
+ rc = -ENOMEM;
+ monpriv->path = iucv_path_alloc(MON_MSGLIM, IUCV_IPRMDATA, GFP_KERNEL);
+ if (!monpriv->path)
+ goto out;
+ rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler,
+ MON_SERVICE, NULL, user_data_connect, monpriv);
+ if (rc) {
+ pr_err("Connecting to the z/VM *MONITOR system service "
+ "failed with rc=%i\n", rc);
+ goto out_path;
+ }
+ wait_event(mon_conn_wait_queue,
+ atomic_read(&monpriv->iucv_connected) ||
+ atomic_read(&monpriv->iucv_severed));
+ if (atomic_read(&monpriv->iucv_severed))
+ goto out_path;
+ return 0;
+out_path:
+ rc = -EIO;
+ iucv_path_free(monpriv->path);
+ monpriv->path = NULL;
+out:
+ atomic_set(&monpriv->iucv_severed, 1);
+ return rc;
+}
+
+static int monreader_restore(struct device *dev)
+{
+ int rc;
+
+ segment_unload(mon_dcss_name);
+ rc = segment_load(mon_dcss_name, SEGMENT_SHARED,
+ &mon_dcss_start, &mon_dcss_end);
+ if (rc < 0) {
+ segment_warning(rc, mon_dcss_name);
+ panic("fatal monreader resume error: no monitor dcss\n");
+ }
+ return monreader_thaw(dev);
+}
+
+static struct dev_pm_ops monreader_pm_ops = {
+ .freeze = monreader_freeze,
+ .thaw = monreader_thaw,
+ .restore = monreader_restore,
+};
+
+static struct device_driver monreader_driver = {
+ .name = "monreader",
+ .bus = &iucv_bus,
+ .pm = &monreader_pm_ops,
+};
+
+
/******************************************************************************
* module init/exit *
*****************************************************************************/
@@ -475,16 +568,33 @@ static int __init mon_init(void)
return rc;
}
+ rc = driver_register(&monreader_driver);
+ if (rc)
+ goto out_iucv;
+ monreader_device = kzalloc(sizeof(struct device), GFP_KERNEL);
+ if (!monreader_device)
+ goto out_driver;
+ dev_set_name(monreader_device, "monreader-dev");
+ monreader_device->bus = &iucv_bus;
+ monreader_device->parent = iucv_root;
+ monreader_device->driver = &monreader_driver;
+ monreader_device->release = (void (*)(struct device *))kfree;
+ rc = device_register(monreader_device);
+ if (rc) {
+ kfree(monreader_device);
+ goto out_driver;
+ }
+
rc = segment_type(mon_dcss_name);
if (rc < 0) {
segment_warning(rc, mon_dcss_name);
- goto out_iucv;
+ goto out_device;
}
if (rc != SEG_TYPE_SC) {
pr_err("The specified *MONITOR DCSS %s does not have the "
"required type SC\n", mon_dcss_name);
rc = -EINVAL;
- goto out_iucv;
+ goto out_device;
}
rc = segment_load(mon_dcss_name, SEGMENT_SHARED,
@@ -492,7 +602,7 @@ static int __init mon_init(void)
if (rc < 0) {
segment_warning(rc, mon_dcss_name);
rc = -EINVAL;
- goto out_iucv;
+ goto out_device;
}
dcss_mkname(mon_dcss_name, &user_data_connect[8]);
@@ -503,6 +613,10 @@ static int __init mon_init(void)
out:
segment_unload(mon_dcss_name);
+out_device:
+ device_unregister(monreader_device);
+out_driver:
+ driver_unregister(&monreader_driver);
out_iucv:
iucv_unregister(&monreader_iucv_handler, 1);
return rc;
@@ -512,6 +626,8 @@ static void __exit mon_exit(void)
{
segment_unload(mon_dcss_name);
WARN_ON(misc_deregister(&mon_dev) != 0);
+ device_unregister(monreader_device);
+ driver_unregister(&monreader_driver);
iucv_unregister(&monreader_iucv_handler, 1);
return;
}
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index c7d7483bab9..66fb8eba93f 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -1,9 +1,7 @@
/*
- * drivers/s390/char/monwriter.c
- *
* Character device driver for writing z/VM *MONITOR service records.
*
- * Copyright (C) IBM Corp. 2006
+ * Copyright IBM Corp. 2006, 2009
*
* Author(s): Melissa Howland <Melissa.Howland@us.ibm.com>
*/
@@ -22,6 +20,7 @@
#include <linux/ctype.h>
#include <linux/poll.h>
#include <linux/mutex.h>
+#include <linux/platform_device.h>
#include <asm/uaccess.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
@@ -40,7 +39,10 @@ struct mon_buf {
char *data;
};
+static LIST_HEAD(mon_priv_list);
+
struct mon_private {
+ struct list_head priv_list;
struct list_head list;
struct monwrite_hdr hdr;
size_t hdr_to_read;
@@ -188,6 +190,7 @@ static int monwrite_open(struct inode *inode, struct file *filp)
monpriv->hdr_to_read = sizeof(monpriv->hdr);
mutex_init(&monpriv->thread_mutex);
filp->private_data = monpriv;
+ list_add_tail(&monpriv->priv_list, &mon_priv_list);
unlock_kernel();
return nonseekable_open(inode, filp);
}
@@ -206,6 +209,7 @@ static int monwrite_close(struct inode *inode, struct file *filp)
kfree(entry->data);
kfree(entry);
}
+ list_del(&monpriv->priv_list);
kfree(monpriv);
return 0;
}
@@ -281,20 +285,102 @@ static struct miscdevice mon_dev = {
};
/*
+ * suspend/resume
+ */
+
+static int monwriter_freeze(struct device *dev)
+{
+ struct mon_private *monpriv;
+ struct mon_buf *monbuf;
+
+ list_for_each_entry(monpriv, &mon_priv_list, priv_list) {
+ list_for_each_entry(monbuf, &monpriv->list, list) {
+ if (monbuf->hdr.mon_function != MONWRITE_GEN_EVENT)
+ monwrite_diag(&monbuf->hdr, monbuf->data,
+ APPLDATA_STOP_REC);
+ }
+ }
+ return 0;
+}
+
+static int monwriter_restore(struct device *dev)
+{
+ struct mon_private *monpriv;
+ struct mon_buf *monbuf;
+
+ list_for_each_entry(monpriv, &mon_priv_list, priv_list) {
+ list_for_each_entry(monbuf, &monpriv->list, list) {
+ if (monbuf->hdr.mon_function == MONWRITE_START_INTERVAL)
+ monwrite_diag(&monbuf->hdr, monbuf->data,
+ APPLDATA_START_INTERVAL_REC);
+ if (monbuf->hdr.mon_function == MONWRITE_START_CONFIG)
+ monwrite_diag(&monbuf->hdr, monbuf->data,
+ APPLDATA_START_CONFIG_REC);
+ }
+ }
+ return 0;
+}
+
+static int monwriter_thaw(struct device *dev)
+{
+ return monwriter_restore(dev);
+}
+
+static struct dev_pm_ops monwriter_pm_ops = {
+ .freeze = monwriter_freeze,
+ .thaw = monwriter_thaw,
+ .restore = monwriter_restore,
+};
+
+static struct platform_driver monwriter_pdrv = {
+ .driver = {
+ .name = "monwriter",
+ .owner = THIS_MODULE,
+ .pm = &monwriter_pm_ops,
+ },
+};
+
+static struct platform_device *monwriter_pdev;
+
+/*
* module init/exit
*/
static int __init mon_init(void)
{
- if (MACHINE_IS_VM)
- return misc_register(&mon_dev);
- else
+ int rc;
+
+ if (!MACHINE_IS_VM)
return -ENODEV;
+
+ rc = platform_driver_register(&monwriter_pdrv);
+ if (rc)
+ return rc;
+
+ monwriter_pdev = platform_device_register_simple("monwriter", -1, NULL,
+ 0);
+ if (IS_ERR(monwriter_pdev)) {
+ rc = PTR_ERR(monwriter_pdev);
+ goto out_driver;
+ }
+
+ rc = misc_register(&mon_dev);
+ if (rc)
+ goto out_device;
+ return 0;
+
+out_device:
+ platform_device_unregister(monwriter_pdev);
+out_driver:
+ platform_driver_unregister(&monwriter_pdrv);
+ return rc;
}
static void __exit mon_exit(void)
{
WARN_ON(misc_deregister(&mon_dev) != 0);
+ platform_device_unregister(monwriter_pdev);
+ platform_driver_unregister(&monwriter_pdrv);
}
module_init(mon_init);
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 0b15cf107ec..acab7b2dfe8 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -1,11 +1,10 @@
/*
- * drivers/s390/char/raw3270.c
- * IBM/3270 Driver - core functions.
+ * IBM/3270 Driver - core functions.
*
- * Author(s):
- * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
- * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
- * -- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s):
+ * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
+ * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Copyright IBM Corp. 2003, 2009
*/
#include <linux/bootmem.h>
@@ -61,6 +60,7 @@ struct raw3270 {
#define RAW3270_FLAGS_ATTN 2 /* Device sent an ATTN interrupt */
#define RAW3270_FLAGS_READY 4 /* Device is useable by views */
#define RAW3270_FLAGS_CONSOLE 8 /* Device is the console. */
+#define RAW3270_FLAGS_FROZEN 16 /* set if 3270 is frozen for suspend */
/* Semaphore to protect global data of raw3270 (devices, views, etc). */
static DEFINE_MUTEX(raw3270_mutex);
@@ -306,7 +306,8 @@ raw3270_start(struct raw3270_view *view, struct raw3270_request *rq)
spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags);
rp = view->dev;
- if (!rp || rp->view != view)
+ if (!rp || rp->view != view ||
+ test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
rc = -EACCES;
else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
rc = -ENODEV;
@@ -323,7 +324,8 @@ raw3270_start_locked(struct raw3270_view *view, struct raw3270_request *rq)
int rc;
rp = view->dev;
- if (!rp || rp->view != view)
+ if (!rp || rp->view != view ||
+ test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
rc = -EACCES;
else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
rc = -ENODEV;
@@ -355,7 +357,7 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
struct raw3270_request *rq;
int rc;
- rp = (struct raw3270 *) cdev->dev.driver_data;
+ rp = dev_get_drvdata(&cdev->dev);
if (!rp)
return;
rq = (struct raw3270_request *) intparm;
@@ -764,7 +766,8 @@ raw3270_reset(struct raw3270_view *view)
int rc;
rp = view->dev;
- if (!rp || rp->view != view)
+ if (!rp || rp->view != view ||
+ test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
rc = -EACCES;
else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
rc = -ENODEV;
@@ -828,7 +831,7 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
if (rp->minor == -1)
return -EUSERS;
rp->cdev = cdev;
- cdev->dev.driver_data = rp;
+ dev_set_drvdata(&cdev->dev, rp);
cdev->handler = raw3270_irq;
return 0;
}
@@ -922,6 +925,8 @@ raw3270_activate_view(struct raw3270_view *view)
rc = 0;
else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
rc = -ENODEV;
+ else if (test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
+ rc = -EACCES;
else {
oldview = NULL;
if (rp->view) {
@@ -969,7 +974,8 @@ raw3270_deactivate_view(struct raw3270_view *view)
list_del_init(&view->list);
list_add_tail(&view->list, &rp->view_list);
/* Try to activate another view. */
- if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) {
+ if (test_bit(RAW3270_FLAGS_READY, &rp->flags) &&
+ !test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) {
list_for_each_entry(view, &rp->view_list, list) {
rp->view = view;
if (view->fn->activate(view) == 0)
@@ -1068,7 +1074,8 @@ raw3270_del_view(struct raw3270_view *view)
rp->view = NULL;
}
list_del_init(&view->list);
- if (!rp->view && test_bit(RAW3270_FLAGS_READY, &rp->flags)) {
+ if (!rp->view && test_bit(RAW3270_FLAGS_READY, &rp->flags) &&
+ !test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) {
/* Try to activate another view. */
list_for_each_entry(nv, &rp->view_list, list) {
if (nv->fn->activate(nv) == 0) {
@@ -1105,7 +1112,7 @@ raw3270_delete_device(struct raw3270 *rp)
/* Disconnect from ccw_device. */
cdev = rp->cdev;
rp->cdev = NULL;
- cdev->dev.driver_data = NULL;
+ dev_set_drvdata(&cdev->dev, NULL);
cdev->handler = NULL;
/* Put ccw_device structure. */
@@ -1129,7 +1136,7 @@ static ssize_t
raw3270_model_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%i\n",
- ((struct raw3270 *) dev->driver_data)->model);
+ ((struct raw3270 *) dev_get_drvdata(dev))->model);
}
static DEVICE_ATTR(model, 0444, raw3270_model_show, NULL);
@@ -1137,7 +1144,7 @@ static ssize_t
raw3270_rows_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%i\n",
- ((struct raw3270 *) dev->driver_data)->rows);
+ ((struct raw3270 *) dev_get_drvdata(dev))->rows);
}
static DEVICE_ATTR(rows, 0444, raw3270_rows_show, NULL);
@@ -1145,7 +1152,7 @@ static ssize_t
raw3270_columns_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%i\n",
- ((struct raw3270 *) dev->driver_data)->cols);
+ ((struct raw3270 *) dev_get_drvdata(dev))->cols);
}
static DEVICE_ATTR(columns, 0444, raw3270_columns_show, NULL);
@@ -1282,7 +1289,7 @@ raw3270_remove (struct ccw_device *cdev)
struct raw3270_view *v;
struct raw3270_notifier *np;
- rp = cdev->dev.driver_data;
+ rp = dev_get_drvdata(&cdev->dev);
/*
* _remove is the opposite of _probe; it's probe that
* should set up rp. raw3270_remove gets entered for
@@ -1330,13 +1337,65 @@ raw3270_set_offline (struct ccw_device *cdev)
{
struct raw3270 *rp;
- rp = cdev->dev.driver_data;
+ rp = dev_get_drvdata(&cdev->dev);
if (test_bit(RAW3270_FLAGS_CONSOLE, &rp->flags))
return -EBUSY;
raw3270_remove(cdev);
return 0;
}
+static int raw3270_pm_stop(struct ccw_device *cdev)
+{
+ struct raw3270 *rp;
+ struct raw3270_view *view;
+ unsigned long flags;
+
+ rp = cdev->dev.driver_data;
+ if (!rp)
+ return 0;
+ spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+ if (rp->view)
+ rp->view->fn->deactivate(rp->view);
+ if (!test_bit(RAW3270_FLAGS_CONSOLE, &rp->flags)) {
+ /*
+ * Release tty and fullscreen for all non-console
+ * devices.
+ */
+ list_for_each_entry(view, &rp->view_list, list) {
+ if (view->fn->release)
+ view->fn->release(view);
+ }
+ }
+ set_bit(RAW3270_FLAGS_FROZEN, &rp->flags);
+ spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+ return 0;
+}
+
+static int raw3270_pm_start(struct ccw_device *cdev)
+{
+ struct raw3270 *rp;
+ unsigned long flags;
+
+ rp = cdev->dev.driver_data;
+ if (!rp)
+ return 0;
+ spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+ clear_bit(RAW3270_FLAGS_FROZEN, &rp->flags);
+ if (rp->view)
+ rp->view->fn->activate(rp->view);
+ spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+ return 0;
+}
+
+void raw3270_pm_unfreeze(struct raw3270_view *view)
+{
+ struct raw3270 *rp;
+
+ rp = view->dev;
+ if (rp && test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
+ ccw_device_force_console();
+}
+
static struct ccw_device_id raw3270_id[] = {
{ CCW_DEVICE(0x3270, 0) },
{ CCW_DEVICE(0x3271, 0) },
@@ -1360,6 +1419,9 @@ static struct ccw_driver raw3270_ccw_driver = {
.remove = &raw3270_remove,
.set_online = &raw3270_set_online,
.set_offline = &raw3270_set_offline,
+ .freeze = &raw3270_pm_stop,
+ .thaw = &raw3270_pm_start,
+ .restore = &raw3270_pm_start,
};
static int
diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h
index 90beaa80a78..ed34eb2199c 100644
--- a/drivers/s390/char/raw3270.h
+++ b/drivers/s390/char/raw3270.h
@@ -1,11 +1,10 @@
/*
- * drivers/s390/char/raw3270.h
- * IBM/3270 Driver
+ * IBM/3270 Driver
*
- * Author(s):
- * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
- * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
- * -- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s):
+ * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
+ * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Copyright IBM Corp. 2003, 2009
*/
#include <asm/idals.h>
@@ -195,6 +194,7 @@ void raw3270_wait_cons_dev(struct raw3270 *);
/* Notifier for device addition/removal */
int raw3270_register_notifier(void (*notifier)(int, int));
void raw3270_unregister_notifier(void (*notifier)(int, int));
+void raw3270_pm_unfreeze(struct raw3270_view *);
/*
* Little memory allocator for string objects.
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 4377e93a43d..a983f508678 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -1,11 +1,10 @@
/*
- * drivers/s390/char/sclp.c
- * core function to access sclp interface
+ * core function to access sclp interface
*
- * S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Martin Peschke <mpeschke@de.ibm.com>
- * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2009
+ *
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/module.h>
@@ -16,6 +15,9 @@
#include <linux/reboot.h>
#include <linux/jiffies.h>
#include <linux/init.h>
+#include <linux/suspend.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
#include <asm/types.h>
#include <asm/s390_ext.h>
@@ -47,6 +49,16 @@ static struct sclp_req sclp_init_req;
static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
+/* Suspend request */
+static DECLARE_COMPLETION(sclp_request_queue_flushed);
+
+static void sclp_suspend_req_cb(struct sclp_req *req, void *data)
+{
+ complete(&sclp_request_queue_flushed);
+}
+
+static struct sclp_req sclp_suspend_req;
+
/* Timer for request retries. */
static struct timer_list sclp_request_timer;
@@ -84,6 +96,12 @@ static volatile enum sclp_mask_state_t {
sclp_mask_state_initializing
} sclp_mask_state = sclp_mask_state_idle;
+/* Internal state: is the driver suspended? */
+static enum sclp_suspend_state_t {
+ sclp_suspend_state_running,
+ sclp_suspend_state_suspended,
+} sclp_suspend_state = sclp_suspend_state_running;
+
/* Maximum retry counts */
#define SCLP_INIT_RETRY 3
#define SCLP_MASK_RETRY 3
@@ -211,6 +229,8 @@ sclp_process_queue(void)
del_timer(&sclp_request_timer);
while (!list_empty(&sclp_req_queue)) {
req = list_entry(sclp_req_queue.next, struct sclp_req, list);
+ if (!req->sccb)
+ goto do_post;
rc = __sclp_start_request(req);
if (rc == 0)
break;
@@ -222,6 +242,7 @@ sclp_process_queue(void)
sclp_request_timeout, 0);
break;
}
+do_post:
/* Post-processing for aborted request */
list_del(&req->list);
if (req->callback) {
@@ -233,6 +254,19 @@ sclp_process_queue(void)
spin_unlock_irqrestore(&sclp_lock, flags);
}
+static int __sclp_can_add_request(struct sclp_req *req)
+{
+ if (req == &sclp_suspend_req || req == &sclp_init_req)
+ return 1;
+ if (sclp_suspend_state != sclp_suspend_state_running)
+ return 0;
+ if (sclp_init_state != sclp_init_state_initialized)
+ return 0;
+ if (sclp_activation_state != sclp_activation_state_active)
+ return 0;
+ return 1;
+}
+
/* Queue a new request. Return zero on success, non-zero otherwise. */
int
sclp_add_request(struct sclp_req *req)
@@ -241,9 +275,7 @@ sclp_add_request(struct sclp_req *req)
int rc;
spin_lock_irqsave(&sclp_lock, flags);
- if ((sclp_init_state != sclp_init_state_initialized ||
- sclp_activation_state != sclp_activation_state_active) &&
- req != &sclp_init_req) {
+ if (!__sclp_can_add_request(req)) {
spin_unlock_irqrestore(&sclp_lock, flags);
return -EIO;
}
@@ -254,10 +286,16 @@ sclp_add_request(struct sclp_req *req)
/* Start if request is first in list */
if (sclp_running_state == sclp_running_state_idle &&
req->list.prev == &sclp_req_queue) {
+ if (!req->sccb) {
+ list_del(&req->list);
+ rc = -ENODATA;
+ goto out;
+ }
rc = __sclp_start_request(req);
if (rc)
list_del(&req->list);
}
+out:
spin_unlock_irqrestore(&sclp_lock, flags);
return rc;
}
@@ -560,6 +598,7 @@ sclp_register(struct sclp_register *reg)
/* Trigger initial state change callback */
reg->sclp_receive_mask = 0;
reg->sclp_send_mask = 0;
+ reg->pm_event_posted = 0;
list_add(&reg->list, &sclp_reg_list);
spin_unlock_irqrestore(&sclp_lock, flags);
rc = sclp_init_mask(1);
@@ -880,20 +919,134 @@ static struct notifier_block sclp_reboot_notifier = {
.notifier_call = sclp_reboot_event
};
+/*
+ * Suspend/resume SCLP notifier implementation
+ */
+
+static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback)
+{
+ struct sclp_register *reg;
+ unsigned long flags;
+
+ if (!rollback) {
+ spin_lock_irqsave(&sclp_lock, flags);
+ list_for_each_entry(reg, &sclp_reg_list, list)
+ reg->pm_event_posted = 0;
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ }
+ do {
+ spin_lock_irqsave(&sclp_lock, flags);
+ list_for_each_entry(reg, &sclp_reg_list, list) {
+ if (rollback && reg->pm_event_posted)
+ goto found;
+ if (!rollback && !reg->pm_event_posted)
+ goto found;
+ }
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ return;
+found:
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ if (reg->pm_event_fn)
+ reg->pm_event_fn(reg, sclp_pm_event);
+ reg->pm_event_posted = rollback ? 0 : 1;
+ } while (1);
+}
+
+/*
+ * Susend/resume callbacks for platform device
+ */
+
+static int sclp_freeze(struct device *dev)
+{
+ unsigned long flags;
+ int rc;
+
+ sclp_pm_event(SCLP_PM_EVENT_FREEZE, 0);
+
+ spin_lock_irqsave(&sclp_lock, flags);
+ sclp_suspend_state = sclp_suspend_state_suspended;
+ spin_unlock_irqrestore(&sclp_lock, flags);
+
+ /* Init supend data */
+ memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req));
+ sclp_suspend_req.callback = sclp_suspend_req_cb;
+ sclp_suspend_req.status = SCLP_REQ_FILLED;
+ init_completion(&sclp_request_queue_flushed);
+
+ rc = sclp_add_request(&sclp_suspend_req);
+ if (rc == 0)
+ wait_for_completion(&sclp_request_queue_flushed);
+ else if (rc != -ENODATA)
+ goto fail_thaw;
+
+ rc = sclp_deactivate();
+ if (rc)
+ goto fail_thaw;
+ return 0;
+
+fail_thaw:
+ spin_lock_irqsave(&sclp_lock, flags);
+ sclp_suspend_state = sclp_suspend_state_running;
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ sclp_pm_event(SCLP_PM_EVENT_THAW, 1);
+ return rc;
+}
+
+static int sclp_undo_suspend(enum sclp_pm_event event)
+{
+ unsigned long flags;
+ int rc;
+
+ rc = sclp_reactivate();
+ if (rc)
+ return rc;
+
+ spin_lock_irqsave(&sclp_lock, flags);
+ sclp_suspend_state = sclp_suspend_state_running;
+ spin_unlock_irqrestore(&sclp_lock, flags);
+
+ sclp_pm_event(event, 0);
+ return 0;
+}
+
+static int sclp_thaw(struct device *dev)
+{
+ return sclp_undo_suspend(SCLP_PM_EVENT_THAW);
+}
+
+static int sclp_restore(struct device *dev)
+{
+ return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE);
+}
+
+static struct dev_pm_ops sclp_pm_ops = {
+ .freeze = sclp_freeze,
+ .thaw = sclp_thaw,
+ .restore = sclp_restore,
+};
+
+static struct platform_driver sclp_pdrv = {
+ .driver = {
+ .name = "sclp",
+ .owner = THIS_MODULE,
+ .pm = &sclp_pm_ops,
+ },
+};
+
+static struct platform_device *sclp_pdev;
+
/* Initialize SCLP driver. Return zero if driver is operational, non-zero
* otherwise. */
static int
sclp_init(void)
{
unsigned long flags;
- int rc;
+ int rc = 0;
spin_lock_irqsave(&sclp_lock, flags);
/* Check for previous or running initialization */
- if (sclp_init_state != sclp_init_state_uninitialized) {
- spin_unlock_irqrestore(&sclp_lock, flags);
- return 0;
- }
+ if (sclp_init_state != sclp_init_state_uninitialized)
+ goto fail_unlock;
sclp_init_state = sclp_init_state_initializing;
/* Set up variables */
INIT_LIST_HEAD(&sclp_req_queue);
@@ -904,27 +1057,17 @@ sclp_init(void)
spin_unlock_irqrestore(&sclp_lock, flags);
rc = sclp_check_interface();
spin_lock_irqsave(&sclp_lock, flags);
- if (rc) {
- sclp_init_state = sclp_init_state_uninitialized;
- spin_unlock_irqrestore(&sclp_lock, flags);
- return rc;
- }
+ if (rc)
+ goto fail_init_state_uninitialized;
/* Register reboot handler */
rc = register_reboot_notifier(&sclp_reboot_notifier);
- if (rc) {
- sclp_init_state = sclp_init_state_uninitialized;
- spin_unlock_irqrestore(&sclp_lock, flags);
- return rc;
- }
+ if (rc)
+ goto fail_init_state_uninitialized;
/* Register interrupt handler */
rc = register_early_external_interrupt(0x2401, sclp_interrupt_handler,
&ext_int_info_hwc);
- if (rc) {
- unregister_reboot_notifier(&sclp_reboot_notifier);
- sclp_init_state = sclp_init_state_uninitialized;
- spin_unlock_irqrestore(&sclp_lock, flags);
- return rc;
- }
+ if (rc)
+ goto fail_unregister_reboot_notifier;
sclp_init_state = sclp_init_state_initialized;
spin_unlock_irqrestore(&sclp_lock, flags);
/* Enable service-signal external interruption - needs to happen with
@@ -932,11 +1075,56 @@ sclp_init(void)
ctl_set_bit(0, 9);
sclp_init_mask(1);
return 0;
+
+fail_unregister_reboot_notifier:
+ unregister_reboot_notifier(&sclp_reboot_notifier);
+fail_init_state_uninitialized:
+ sclp_init_state = sclp_init_state_uninitialized;
+fail_unlock:
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ return rc;
}
+/*
+ * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able
+ * to print the panic message.
+ */
+static int sclp_panic_notify(struct notifier_block *self,
+ unsigned long event, void *data)
+{
+ if (sclp_suspend_state == sclp_suspend_state_suspended)
+ sclp_undo_suspend(SCLP_PM_EVENT_THAW);
+ return NOTIFY_OK;
+}
+
+static struct notifier_block sclp_on_panic_nb = {
+ .notifier_call = sclp_panic_notify,
+ .priority = SCLP_PANIC_PRIO,
+};
+
static __init int sclp_initcall(void)
{
+ int rc;
+
+ rc = platform_driver_register(&sclp_pdrv);
+ if (rc)
+ return rc;
+ sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0);
+ rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0;
+ if (rc)
+ goto fail_platform_driver_unregister;
+ rc = atomic_notifier_chain_register(&panic_notifier_list,
+ &sclp_on_panic_nb);
+ if (rc)
+ goto fail_platform_device_unregister;
+
return sclp_init();
+
+fail_platform_device_unregister:
+ platform_device_unregister(sclp_pdev);
+fail_platform_driver_unregister:
+ platform_driver_unregister(&sclp_pdrv);
+ return rc;
}
arch_initcall(sclp_initcall);
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index bac80e856f9..60e7cb07095 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -1,10 +1,8 @@
/*
- * drivers/s390/char/sclp.h
+ * Copyright IBM Corp. 1999, 2009
*
- * S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Martin Peschke <mpeschke@de.ibm.com>
- * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef __SCLP_H__
@@ -17,7 +15,7 @@
/* maximum number of pages concerning our own memory management */
#define MAX_KMEM_PAGES (sizeof(unsigned long) << 3)
-#define MAX_CONSOLE_PAGES 4
+#define MAX_CONSOLE_PAGES 6
#define EVTYP_OPCMD 0x01
#define EVTYP_MSG 0x02
@@ -68,6 +66,15 @@ typedef unsigned int sclp_cmdw_t;
#define GDS_KEY_SELFDEFTEXTMSG 0x31
+enum sclp_pm_event {
+ SCLP_PM_EVENT_FREEZE,
+ SCLP_PM_EVENT_THAW,
+ SCLP_PM_EVENT_RESTORE,
+};
+
+#define SCLP_PANIC_PRIO 1
+#define SCLP_PANIC_PRIO_CLIENT 0
+
typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */
struct sccb_header {
@@ -134,6 +141,10 @@ struct sclp_register {
void (*state_change_fn)(struct sclp_register *);
/* called for events in cp_receive_mask/sclp_receive_mask */
void (*receiver_fn)(struct evbuf_header *);
+ /* called for power management events */
+ void (*pm_event_fn)(struct sclp_register *, enum sclp_pm_event);
+ /* pm event posted flag */
+ int pm_event_posted;
};
/* externals from sclp.c */
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 77ab6e34a10..5cc11c636d3 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -1,9 +1,8 @@
/*
- * drivers/s390/char/sclp_cmd.c
+ * Copyright IBM Corp. 2007, 2009
*
- * Copyright IBM Corp. 2007
- * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
- * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
+ * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
#define KMSG_COMPONENT "sclp_cmd"
@@ -12,11 +11,13 @@
#include <linux/completion.h>
#include <linux/init.h>
#include <linux/errno.h>
+#include <linux/err.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/memory.h>
+#include <linux/platform_device.h>
#include <asm/chpid.h>
#include <asm/sclp.h>
#include <asm/setup.h>
@@ -292,6 +293,7 @@ static DEFINE_MUTEX(sclp_mem_mutex);
static LIST_HEAD(sclp_mem_list);
static u8 sclp_max_storage_id;
static unsigned long sclp_storage_ids[256 / BITS_PER_LONG];
+static int sclp_mem_state_changed;
struct memory_increment {
struct list_head list;
@@ -450,6 +452,8 @@ static int sclp_mem_notifier(struct notifier_block *nb,
rc = -EINVAL;
break;
}
+ if (!rc)
+ sclp_mem_state_changed = 1;
mutex_unlock(&sclp_mem_mutex);
return rc ? NOTIFY_BAD : NOTIFY_OK;
}
@@ -525,6 +529,14 @@ static void __init insert_increment(u16 rn, int standby, int assigned)
list_add(&new_incr->list, prev);
}
+static int sclp_mem_freeze(struct device *dev)
+{
+ if (!sclp_mem_state_changed)
+ return 0;
+ pr_err("Memory hotplug state changed, suspend refused.\n");
+ return -EPERM;
+}
+
struct read_storage_sccb {
struct sccb_header header;
u16 max_id;
@@ -534,8 +546,20 @@ struct read_storage_sccb {
u32 entries[0];
} __packed;
+static struct dev_pm_ops sclp_mem_pm_ops = {
+ .freeze = sclp_mem_freeze,
+};
+
+static struct platform_driver sclp_mem_pdrv = {
+ .driver = {
+ .name = "sclp_mem",
+ .pm = &sclp_mem_pm_ops,
+ },
+};
+
static int __init sclp_detect_standby_memory(void)
{
+ struct platform_device *sclp_pdev;
struct read_storage_sccb *sccb;
int i, id, assigned, rc;
@@ -588,7 +612,17 @@ static int __init sclp_detect_standby_memory(void)
rc = register_memory_notifier(&sclp_mem_nb);
if (rc)
goto out;
+ rc = platform_driver_register(&sclp_mem_pdrv);
+ if (rc)
+ goto out;
+ sclp_pdev = platform_device_register_simple("sclp_mem", -1, NULL, 0);
+ rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0;
+ if (rc)
+ goto out_driver;
sclp_add_standby_memory();
+ goto out;
+out_driver:
+ platform_driver_unregister(&sclp_mem_pdrv);
out:
free_page((unsigned long) sccb);
return rc;
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
index 9a25c4bd142..336811a7767 100644
--- a/drivers/s390/char/sclp_con.c
+++ b/drivers/s390/char/sclp_con.c
@@ -1,11 +1,9 @@
/*
- * drivers/s390/char/sclp_con.c
- * SCLP line mode console driver
+ * SCLP line mode console driver
*
- * S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Martin Peschke <mpeschke@de.ibm.com>
- * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2009
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/kmod.h>
@@ -32,13 +30,14 @@ static spinlock_t sclp_con_lock;
static struct list_head sclp_con_pages;
/* List of full struct sclp_buffer structures ready for output */
static struct list_head sclp_con_outqueue;
-/* Counter how many buffers are emitted (max 1) and how many */
-/* are on the output queue. */
-static int sclp_con_buffer_count;
/* Pointer to current console buffer */
static struct sclp_buffer *sclp_conbuf;
/* Timer for delayed output of console messages */
static struct timer_list sclp_con_timer;
+/* Suspend mode flag */
+static int sclp_con_suspended;
+/* Flag that output queue is currently running */
+static int sclp_con_queue_running;
/* Output format for console messages */
static unsigned short sclp_con_columns;
@@ -53,42 +52,71 @@ sclp_conbuf_callback(struct sclp_buffer *buffer, int rc)
do {
page = sclp_unmake_buffer(buffer);
spin_lock_irqsave(&sclp_con_lock, flags);
+
/* Remove buffer from outqueue */
list_del(&buffer->list);
- sclp_con_buffer_count--;
list_add_tail((struct list_head *) page, &sclp_con_pages);
+
/* Check if there is a pending buffer on the out queue. */
buffer = NULL;
if (!list_empty(&sclp_con_outqueue))
- buffer = list_entry(sclp_con_outqueue.next,
- struct sclp_buffer, list);
+ buffer = list_first_entry(&sclp_con_outqueue,
+ struct sclp_buffer, list);
+ if (!buffer || sclp_con_suspended) {
+ sclp_con_queue_running = 0;
+ spin_unlock_irqrestore(&sclp_con_lock, flags);
+ break;
+ }
spin_unlock_irqrestore(&sclp_con_lock, flags);
- } while (buffer && sclp_emit_buffer(buffer, sclp_conbuf_callback));
+ } while (sclp_emit_buffer(buffer, sclp_conbuf_callback));
}
-static void
-sclp_conbuf_emit(void)
+/*
+ * Finalize and emit first pending buffer.
+ */
+static void sclp_conbuf_emit(void)
{
struct sclp_buffer* buffer;
unsigned long flags;
- int count;
int rc;
spin_lock_irqsave(&sclp_con_lock, flags);
- buffer = sclp_conbuf;
+ if (sclp_conbuf)
+ list_add_tail(&sclp_conbuf->list, &sclp_con_outqueue);
sclp_conbuf = NULL;
- if (buffer == NULL) {
- spin_unlock_irqrestore(&sclp_con_lock, flags);
- return;
- }
- list_add_tail(&buffer->list, &sclp_con_outqueue);
- count = sclp_con_buffer_count++;
+ if (sclp_con_queue_running || sclp_con_suspended)
+ goto out_unlock;
+ if (list_empty(&sclp_con_outqueue))
+ goto out_unlock;
+ buffer = list_first_entry(&sclp_con_outqueue, struct sclp_buffer,
+ list);
+ sclp_con_queue_running = 1;
spin_unlock_irqrestore(&sclp_con_lock, flags);
- if (count)
- return;
+
rc = sclp_emit_buffer(buffer, sclp_conbuf_callback);
if (rc)
sclp_conbuf_callback(buffer, rc);
+ return;
+out_unlock:
+ spin_unlock_irqrestore(&sclp_con_lock, flags);
+}
+
+/*
+ * Wait until out queue is empty
+ */
+static void sclp_console_sync_queue(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sclp_con_lock, flags);
+ if (timer_pending(&sclp_con_timer))
+ del_timer_sync(&sclp_con_timer);
+ while (sclp_con_queue_running) {
+ spin_unlock_irqrestore(&sclp_con_lock, flags);
+ sclp_sync_wait();
+ spin_lock_irqsave(&sclp_con_lock, flags);
+ }
+ spin_unlock_irqrestore(&sclp_con_lock, flags);
}
/*
@@ -123,6 +151,8 @@ sclp_console_write(struct console *console, const char *message,
/* make sure we have a console output buffer */
if (sclp_conbuf == NULL) {
while (list_empty(&sclp_con_pages)) {
+ if (sclp_con_suspended)
+ goto out;
spin_unlock_irqrestore(&sclp_con_lock, flags);
sclp_sync_wait();
spin_lock_irqsave(&sclp_con_lock, flags);
@@ -157,6 +187,7 @@ sclp_console_write(struct console *console, const char *message,
sclp_con_timer.expires = jiffies + HZ/10;
add_timer(&sclp_con_timer);
}
+out:
spin_unlock_irqrestore(&sclp_con_lock, flags);
}
@@ -168,30 +199,43 @@ sclp_console_device(struct console *c, int *index)
}
/*
- * This routine is called from panic when the kernel
- * is going to give up. We have to make sure that all buffers
- * will be flushed to the SCLP.
+ * Make sure that all buffers will be flushed to the SCLP.
*/
static void
sclp_console_flush(void)
{
+ sclp_conbuf_emit();
+ sclp_console_sync_queue();
+}
+
+/*
+ * Resume console: If there are cached messages, emit them.
+ */
+static void sclp_console_resume(void)
+{
unsigned long flags;
+ spin_lock_irqsave(&sclp_con_lock, flags);
+ sclp_con_suspended = 0;
+ spin_unlock_irqrestore(&sclp_con_lock, flags);
sclp_conbuf_emit();
+}
+
+/*
+ * Suspend console: Set suspend flag and flush console
+ */
+static void sclp_console_suspend(void)
+{
+ unsigned long flags;
+
spin_lock_irqsave(&sclp_con_lock, flags);
- if (timer_pending(&sclp_con_timer))
- del_timer(&sclp_con_timer);
- while (sclp_con_buffer_count > 0) {
- spin_unlock_irqrestore(&sclp_con_lock, flags);
- sclp_sync_wait();
- spin_lock_irqsave(&sclp_con_lock, flags);
- }
+ sclp_con_suspended = 1;
spin_unlock_irqrestore(&sclp_con_lock, flags);
+ sclp_console_flush();
}
-static int
-sclp_console_notify(struct notifier_block *self,
- unsigned long event, void *data)
+static int sclp_console_notify(struct notifier_block *self,
+ unsigned long event, void *data)
{
sclp_console_flush();
return NOTIFY_OK;
@@ -199,7 +243,7 @@ sclp_console_notify(struct notifier_block *self,
static struct notifier_block on_panic_nb = {
.notifier_call = sclp_console_notify,
- .priority = 1,
+ .priority = SCLP_PANIC_PRIO_CLIENT,
};
static struct notifier_block on_reboot_nb = {
@@ -221,6 +265,22 @@ static struct console sclp_console =
};
/*
+ * This function is called for SCLP suspend and resume events.
+ */
+void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event)
+{
+ switch (sclp_pm_event) {
+ case SCLP_PM_EVENT_FREEZE:
+ sclp_console_suspend();
+ break;
+ case SCLP_PM_EVENT_RESTORE:
+ case SCLP_PM_EVENT_THAW:
+ sclp_console_resume();
+ break;
+ }
+}
+
+/*
* called by console_init() in drivers/char/tty_io.c at boot-time.
*/
static int __init
@@ -243,7 +303,6 @@ sclp_console_init(void)
}
INIT_LIST_HEAD(&sclp_con_outqueue);
spin_lock_init(&sclp_con_lock);
- sclp_con_buffer_count = 0;
sclp_conbuf = NULL;
init_timer(&sclp_con_timer);
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index 710af42603f..4be63be7344 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -1,11 +1,10 @@
/*
- * drivers/s390/char/sclp_rw.c
- * driver: reading from and writing to system console on S/390 via SCLP
+ * driver: reading from and writing to system console on S/390 via SCLP
*
- * S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Martin Peschke <mpeschke@de.ibm.com>
- * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2009
+ *
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/kmod.h>
@@ -26,9 +25,16 @@
*/
#define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer))
+static void sclp_rw_pm_event(struct sclp_register *reg,
+ enum sclp_pm_event sclp_pm_event)
+{
+ sclp_console_pm_event(sclp_pm_event);
+}
+
/* Event type structure for write message and write priority message */
static struct sclp_register sclp_rw_event = {
- .send_mask = EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK
+ .send_mask = EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK,
+ .pm_event_fn = sclp_rw_pm_event,
};
/*
diff --git a/drivers/s390/char/sclp_rw.h b/drivers/s390/char/sclp_rw.h
index 6aa7a6948bc..85f491ea929 100644
--- a/drivers/s390/char/sclp_rw.h
+++ b/drivers/s390/char/sclp_rw.h
@@ -1,11 +1,10 @@
/*
- * drivers/s390/char/sclp_rw.h
- * interface to the SCLP-read/write driver
+ * interface to the SCLP-read/write driver
*
- * S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Martin Peschke <mpeschke@de.ibm.com>
- * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Copyright IBM Corporation 1999, 2009
+ *
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef __SCLP_RW_H__
@@ -93,4 +92,5 @@ void sclp_set_columns(struct sclp_buffer *, unsigned short);
void sclp_set_htab(struct sclp_buffer *, unsigned short);
int sclp_chars_in_buffer(struct sclp_buffer *);
+void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event);
#endif /* __SCLP_RW_H__ */
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index a839aa531d7..5518e24946a 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -1,10 +1,9 @@
/*
- * drivers/s390/char/sclp_vt220.c
- * SCLP VT220 terminal driver.
+ * SCLP VT220 terminal driver.
*
- * S390 version
- * Copyright IBM Corp. 2003,2008
- * Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
+ * Copyright IBM Corp. 2003, 2009
+ *
+ * Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
*/
#include <linux/module.h>
@@ -69,8 +68,11 @@ static struct list_head sclp_vt220_empty;
/* List of pending requests */
static struct list_head sclp_vt220_outqueue;
-/* Number of requests in outqueue */
-static int sclp_vt220_outqueue_count;
+/* Suspend mode flag */
+static int sclp_vt220_suspended;
+
+/* Flag that output queue is currently running */
+static int sclp_vt220_queue_running;
/* Timer used for delaying write requests to merge subsequent messages into
* a single buffer */
@@ -92,6 +94,8 @@ static int __initdata sclp_vt220_init_count;
static int sclp_vt220_flush_later;
static void sclp_vt220_receiver_fn(struct evbuf_header *evbuf);
+static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
+ enum sclp_pm_event sclp_pm_event);
static int __sclp_vt220_emit(struct sclp_vt220_request *request);
static void sclp_vt220_emit_current(void);
@@ -100,7 +104,8 @@ static struct sclp_register sclp_vt220_register = {
.send_mask = EVTYP_VT220MSG_MASK,
.receive_mask = EVTYP_VT220MSG_MASK,
.state_change_fn = NULL,
- .receiver_fn = sclp_vt220_receiver_fn
+ .receiver_fn = sclp_vt220_receiver_fn,
+ .pm_event_fn = sclp_vt220_pm_event_fn,
};
@@ -120,15 +125,19 @@ sclp_vt220_process_queue(struct sclp_vt220_request *request)
spin_lock_irqsave(&sclp_vt220_lock, flags);
/* Move request from outqueue to empty queue */
list_del(&request->list);
- sclp_vt220_outqueue_count--;
list_add_tail((struct list_head *) page, &sclp_vt220_empty);
/* Check if there is a pending buffer on the out queue. */
request = NULL;
if (!list_empty(&sclp_vt220_outqueue))
request = list_entry(sclp_vt220_outqueue.next,
struct sclp_vt220_request, list);
+ if (!request || sclp_vt220_suspended) {
+ sclp_vt220_queue_running = 0;
+ spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+ break;
+ }
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
- } while (request && __sclp_vt220_emit(request));
+ } while (__sclp_vt220_emit(request));
if (request == NULL && sclp_vt220_flush_later)
sclp_vt220_emit_current();
/* Check if the tty needs a wake up call */
@@ -212,26 +221,7 @@ __sclp_vt220_emit(struct sclp_vt220_request *request)
}
/*
- * Queue and emit given request.
- */
-static void
-sclp_vt220_emit(struct sclp_vt220_request *request)
-{
- unsigned long flags;
- int count;
-
- spin_lock_irqsave(&sclp_vt220_lock, flags);
- list_add_tail(&request->list, &sclp_vt220_outqueue);
- count = sclp_vt220_outqueue_count++;
- spin_unlock_irqrestore(&sclp_vt220_lock, flags);
- /* Emit only the first buffer immediately - callback takes care of
- * the rest */
- if (count == 0 && __sclp_vt220_emit(request))
- sclp_vt220_process_queue(request);
-}
-
-/*
- * Queue and emit current request. Return zero on success, non-zero otherwise.
+ * Queue and emit current request.
*/
static void
sclp_vt220_emit_current(void)
@@ -241,22 +231,33 @@ sclp_vt220_emit_current(void)
struct sclp_vt220_sccb *sccb;
spin_lock_irqsave(&sclp_vt220_lock, flags);
- request = NULL;
- if (sclp_vt220_current_request != NULL) {
+ if (sclp_vt220_current_request) {
sccb = (struct sclp_vt220_sccb *)
sclp_vt220_current_request->sclp_req.sccb;
/* Only emit buffers with content */
if (sccb->header.length != sizeof(struct sclp_vt220_sccb)) {
- request = sclp_vt220_current_request;
+ list_add_tail(&sclp_vt220_current_request->list,
+ &sclp_vt220_outqueue);
sclp_vt220_current_request = NULL;
if (timer_pending(&sclp_vt220_timer))
del_timer(&sclp_vt220_timer);
}
sclp_vt220_flush_later = 0;
}
+ if (sclp_vt220_queue_running || sclp_vt220_suspended)
+ goto out_unlock;
+ if (list_empty(&sclp_vt220_outqueue))
+ goto out_unlock;
+ request = list_first_entry(&sclp_vt220_outqueue,
+ struct sclp_vt220_request, list);
+ sclp_vt220_queue_running = 1;
+ spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+
+ if (__sclp_vt220_emit(request))
+ sclp_vt220_process_queue(request);
+ return;
+out_unlock:
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
- if (request != NULL)
- sclp_vt220_emit(request);
}
#define SCLP_NORMAL_WRITE 0x00
@@ -396,7 +397,7 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
if (sclp_vt220_current_request == NULL) {
while (list_empty(&sclp_vt220_empty)) {
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
- if (may_fail)
+ if (may_fail || sclp_vt220_suspended)
goto out;
else
sclp_sync_wait();
@@ -531,7 +532,7 @@ sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch)
static void
sclp_vt220_flush_chars(struct tty_struct *tty)
{
- if (sclp_vt220_outqueue_count == 0)
+ if (!sclp_vt220_queue_running)
sclp_vt220_emit_current();
else
sclp_vt220_flush_later = 1;
@@ -635,7 +636,6 @@ static int __init __sclp_vt220_init(int num_pages)
init_timer(&sclp_vt220_timer);
sclp_vt220_current_request = NULL;
sclp_vt220_buffered_chars = 0;
- sclp_vt220_outqueue_count = 0;
sclp_vt220_tty = NULL;
sclp_vt220_flush_later = 0;
@@ -736,7 +736,7 @@ static void __sclp_vt220_flush_buffer(void)
spin_lock_irqsave(&sclp_vt220_lock, flags);
if (timer_pending(&sclp_vt220_timer))
del_timer(&sclp_vt220_timer);
- while (sclp_vt220_outqueue_count > 0) {
+ while (sclp_vt220_queue_running) {
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
sclp_sync_wait();
spin_lock_irqsave(&sclp_vt220_lock, flags);
@@ -744,6 +744,46 @@ static void __sclp_vt220_flush_buffer(void)
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
}
+/*
+ * Resume console: If there are cached messages, emit them.
+ */
+static void sclp_vt220_resume(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sclp_vt220_lock, flags);
+ sclp_vt220_suspended = 0;
+ spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+ sclp_vt220_emit_current();
+}
+
+/*
+ * Suspend console: Set suspend flag and flush console
+ */
+static void sclp_vt220_suspend(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sclp_vt220_lock, flags);
+ sclp_vt220_suspended = 1;
+ spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+ __sclp_vt220_flush_buffer();
+}
+
+static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
+ enum sclp_pm_event sclp_pm_event)
+{
+ switch (sclp_pm_event) {
+ case SCLP_PM_EVENT_FREEZE:
+ sclp_vt220_suspend();
+ break;
+ case SCLP_PM_EVENT_RESTORE:
+ case SCLP_PM_EVENT_THAW:
+ sclp_vt220_resume();
+ break;
+ }
+}
+
static int
sclp_vt220_notify(struct notifier_block *self,
unsigned long event, void *data)
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index 5469e099597..a2633377470 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -3,7 +3,7 @@
* tape device driver for 3480/3490E/3590 tapes.
*
* S390 and zSeries version
- * Copyright IBM Corp. 2001,2006
+ * Copyright IBM Corp. 2001, 2009
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -286,6 +286,7 @@ extern void tape_state_set(struct tape_device *, enum tape_state);
extern int tape_generic_online(struct tape_device *, struct tape_discipline *);
extern int tape_generic_offline(struct ccw_device *);
+extern int tape_generic_pm_suspend(struct ccw_device *);
/* Externals from tape_devmap.c */
extern int tape_generic_probe(struct ccw_device *);
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 5f8e8ef43dd..5a519fac37b 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -2,7 +2,7 @@
* drivers/s390/char/tape_34xx.c
* tape device discipline for 3480/3490 tapes.
*
- * Copyright (C) IBM Corp. 2001,2006
+ * Copyright IBM Corp. 2001, 2009
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -1134,7 +1134,7 @@ tape_34xx_bread(struct tape_device *device, struct request *req)
/* Setup ccws. */
request->op = TO_BLOCK;
start_block = (struct tape_34xx_block_id *) request->cpdata;
- start_block->block = req->sector >> TAPEBLOCK_HSEC_S2B;
+ start_block->block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B;
DBF_EVENT(6, "start_block = %i\n", start_block->block);
ccw = request->cpaddr;
@@ -1289,7 +1289,7 @@ static int
tape_34xx_online(struct ccw_device *cdev)
{
return tape_generic_online(
- cdev->dev.driver_data,
+ dev_get_drvdata(&cdev->dev),
&tape_discipline_34xx
);
}
@@ -1302,6 +1302,7 @@ static struct ccw_driver tape_34xx_driver = {
.remove = tape_generic_remove,
.set_online = tape_34xx_online,
.set_offline = tape_generic_offline,
+ .freeze = tape_generic_pm_suspend,
};
static int
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 823b05bd0dd..418f72dd39b 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -2,7 +2,7 @@
* drivers/s390/char/tape_3590.c
* tape device discipline for 3590 tapes.
*
- * Copyright IBM Corp. 2001,2006
+ * Copyright IBM Corp. 2001, 2009
* Author(s): Stefan Bader <shbader@de.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -633,7 +633,7 @@ tape_3590_bread(struct tape_device *device, struct request *req)
struct req_iterator iter;
DBF_EVENT(6, "xBREDid:");
- start_block = req->sector >> TAPEBLOCK_HSEC_S2B;
+ start_block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B;
DBF_EVENT(6, "start_block = %i\n", start_block);
rq_for_each_segment(bv, req, iter)
@@ -1703,7 +1703,7 @@ static struct ccw_device_id tape_3590_ids[] = {
static int
tape_3590_online(struct ccw_device *cdev)
{
- return tape_generic_online(cdev->dev.driver_data,
+ return tape_generic_online(dev_get_drvdata(&cdev->dev),
&tape_discipline_3590);
}
@@ -1715,6 +1715,7 @@ static struct ccw_driver tape_3590_driver = {
.remove = tape_generic_remove,
.set_offline = tape_generic_offline,
.set_online = tape_3590_online,
+ .freeze = tape_generic_pm_suspend,
};
/*
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index f32e89e7c4f..47ff695255e 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -74,13 +74,6 @@ tapeblock_trigger_requeue(struct tape_device *device)
* Post finished request.
*/
static void
-tapeblock_end_request(struct request *req, int error)
-{
- if (blk_end_request(req, error, blk_rq_bytes(req)))
- BUG();
-}
-
-static void
__tapeblock_end_request(struct tape_request *ccw_req, void *data)
{
struct tape_device *device;
@@ -90,17 +83,17 @@ __tapeblock_end_request(struct tape_request *ccw_req, void *data)
device = ccw_req->device;
req = (struct request *) data;
- tapeblock_end_request(req, (ccw_req->rc == 0) ? 0 : -EIO);
+ blk_end_request_all(req, (ccw_req->rc == 0) ? 0 : -EIO);
if (ccw_req->rc == 0)
/* Update position. */
device->blk_data.block_position =
- (req->sector + req->nr_sectors) >> TAPEBLOCK_HSEC_S2B;
+ (blk_rq_pos(req) + blk_rq_sectors(req)) >> TAPEBLOCK_HSEC_S2B;
else
/* We lost the position information due to an error. */
device->blk_data.block_position = -1;
device->discipline->free_bread(ccw_req);
if (!list_empty(&device->req_queue) ||
- elv_next_request(device->blk_data.request_queue))
+ blk_peek_request(device->blk_data.request_queue))
tapeblock_trigger_requeue(device);
}
@@ -118,7 +111,7 @@ tapeblock_start_request(struct tape_device *device, struct request *req)
ccw_req = device->discipline->bread(device, req);
if (IS_ERR(ccw_req)) {
DBF_EVENT(1, "TBLOCK: bread failed\n");
- tapeblock_end_request(req, -EIO);
+ blk_end_request_all(req, -EIO);
return PTR_ERR(ccw_req);
}
ccw_req->callback = __tapeblock_end_request;
@@ -131,7 +124,7 @@ tapeblock_start_request(struct tape_device *device, struct request *req)
* Start/enqueueing failed. No retries in
* this case.
*/
- tapeblock_end_request(req, -EIO);
+ blk_end_request_all(req, -EIO);
device->discipline->free_bread(ccw_req);
}
@@ -169,19 +162,16 @@ tapeblock_requeue(struct work_struct *work) {
spin_lock_irq(&device->blk_data.request_queue_lock);
while (
!blk_queue_plugged(queue) &&
- elv_next_request(queue) &&
+ (req = blk_fetch_request(queue)) &&
nr_queued < TAPEBLOCK_MIN_REQUEUE
) {
- req = elv_next_request(queue);
if (rq_data_dir(req) == WRITE) {
DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
- blkdev_dequeue_request(req);
spin_unlock_irq(&device->blk_data.request_queue_lock);
- tapeblock_end_request(req, -EIO);
+ blk_end_request_all(req, -EIO);
spin_lock_irq(&device->blk_data.request_queue_lock);
continue;
}
- blkdev_dequeue_request(req);
nr_queued++;
spin_unlock_irq(&device->blk_data.request_queue_lock);
rc = tapeblock_start_request(device, req);
@@ -232,7 +222,7 @@ tapeblock_setup_device(struct tape_device * device)
if (rc)
goto cleanup_queue;
- blk_queue_hardsect_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE);
+ blk_queue_logical_block_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE);
blk_queue_max_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC);
blk_queue_max_phys_segments(blkdat->request_queue, -1L);
blk_queue_max_hw_segments(blkdat->request_queue, -1L);
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 8a109f3b69c..595aa04cfd0 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -3,7 +3,7 @@
* basic function of the tape device driver
*
* S390 and zSeries version
- * Copyright IBM Corp. 2001,2006
+ * Copyright IBM Corp. 2001, 2009
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
@@ -92,7 +92,7 @@ tape_medium_state_show(struct device *dev, struct device_attribute *attr, char *
{
struct tape_device *tdev;
- tdev = (struct tape_device *) dev->driver_data;
+ tdev = dev_get_drvdata(dev);
return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->medium_state);
}
@@ -104,7 +104,7 @@ tape_first_minor_show(struct device *dev, struct device_attribute *attr, char *b
{
struct tape_device *tdev;
- tdev = (struct tape_device *) dev->driver_data;
+ tdev = dev_get_drvdata(dev);
return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->first_minor);
}
@@ -116,7 +116,7 @@ tape_state_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct tape_device *tdev;
- tdev = (struct tape_device *) dev->driver_data;
+ tdev = dev_get_drvdata(dev);
return scnprintf(buf, PAGE_SIZE, "%s\n", (tdev->first_minor < 0) ?
"OFFLINE" : tape_state_verbose[tdev->tape_state]);
}
@@ -130,7 +130,7 @@ tape_operation_show(struct device *dev, struct device_attribute *attr, char *buf
struct tape_device *tdev;
ssize_t rc;
- tdev = (struct tape_device *) dev->driver_data;
+ tdev = dev_get_drvdata(dev);
if (tdev->first_minor < 0)
return scnprintf(buf, PAGE_SIZE, "N/A\n");
@@ -156,7 +156,7 @@ tape_blocksize_show(struct device *dev, struct device_attribute *attr, char *buf
{
struct tape_device *tdev;
- tdev = (struct tape_device *) dev->driver_data;
+ tdev = dev_get_drvdata(dev);
return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->char_data.block_size);
}
@@ -380,6 +380,55 @@ tape_cleanup_device(struct tape_device *device)
}
/*
+ * Suspend device.
+ *
+ * Called by the common I/O layer if the drive should be suspended on user
+ * request. We refuse to suspend if the device is loaded or in use for the
+ * following reason:
+ * While the Linux guest is suspended, it might be logged off which causes
+ * devices to be detached. Tape devices are automatically rewound and unloaded
+ * during DETACH processing (unless the tape device was attached with the
+ * NOASSIGN or MULTIUSER option). After rewind/unload, there is no way to
+ * resume the original state of the tape device, since we would need to
+ * manually re-load the cartridge which was active at suspend time.
+ */
+int tape_generic_pm_suspend(struct ccw_device *cdev)
+{
+ struct tape_device *device;
+
+ device = cdev->dev.driver_data;
+ if (!device) {
+ return -ENODEV;
+ }
+
+ DBF_LH(3, "(%08x): tape_generic_pm_suspend(%p)\n",
+ device->cdev_id, device);
+
+ if (device->medium_state != MS_UNLOADED) {
+ pr_err("A cartridge is loaded in tape device %s, "
+ "refusing to suspend\n", dev_name(&cdev->dev));
+ return -EBUSY;
+ }
+
+ spin_lock_irq(get_ccwdev_lock(device->cdev));
+ switch (device->tape_state) {
+ case TS_INIT:
+ case TS_NOT_OPER:
+ case TS_UNUSED:
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
+ break;
+ default:
+ pr_err("Tape device %s is busy, refusing to "
+ "suspend\n", dev_name(&cdev->dev));
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
+ return -EBUSY;
+ }
+
+ DBF_LH(3, "(%08x): Drive suspended.\n", device->cdev_id);
+ return 0;
+}
+
+/*
* Set device offline.
*
* Called by the common I/O layer if the drive should set offline on user
@@ -391,7 +440,7 @@ tape_generic_offline(struct ccw_device *cdev)
{
struct tape_device *device;
- device = cdev->dev.driver_data;
+ device = dev_get_drvdata(&cdev->dev);
if (!device) {
return -ENODEV;
}
@@ -534,7 +583,7 @@ tape_generic_probe(struct ccw_device *cdev)
tape_put_device(device);
return ret;
}
- cdev->dev.driver_data = device;
+ dev_set_drvdata(&cdev->dev, device);
cdev->handler = __tape_do_irq;
device->cdev = cdev;
ccw_device_get_id(cdev, &dev_id);
@@ -573,7 +622,7 @@ tape_generic_remove(struct ccw_device *cdev)
{
struct tape_device * device;
- device = cdev->dev.driver_data;
+ device = dev_get_drvdata(&cdev->dev);
if (!device) {
return;
}
@@ -613,9 +662,9 @@ tape_generic_remove(struct ccw_device *cdev)
tape_cleanup_device(device);
}
- if (cdev->dev.driver_data != NULL) {
+ if (!dev_get_drvdata(&cdev->dev)) {
sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group);
- cdev->dev.driver_data = tape_put_device(cdev->dev.driver_data);
+ dev_set_drvdata(&cdev->dev, tape_put_device(dev_get_drvdata(&cdev->dev)));
}
}
@@ -1011,7 +1060,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
struct tape_request *request;
int rc;
- device = (struct tape_device *) cdev->dev.driver_data;
+ device = dev_get_drvdata(&cdev->dev);
if (device == NULL) {
return;
}
@@ -1273,6 +1322,7 @@ EXPORT_SYMBOL(tape_generic_remove);
EXPORT_SYMBOL(tape_generic_probe);
EXPORT_SYMBOL(tape_generic_online);
EXPORT_SYMBOL(tape_generic_offline);
+EXPORT_SYMBOL(tape_generic_pm_suspend);
EXPORT_SYMBOL(tape_put_device);
EXPORT_SYMBOL(tape_get_device_reference);
EXPORT_SYMBOL(tape_state_verbose);
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index a7fe6302c98..38385677c65 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -112,7 +112,7 @@ struct tty3270 {
#define TTY_UPDATE_LIST 2 /* Update lines in tty3270->update. */
#define TTY_UPDATE_INPUT 4 /* Update input line. */
#define TTY_UPDATE_STATUS 8 /* Update status line. */
-#define TTY_UPDATE_ALL 15
+#define TTY_UPDATE_ALL 16 /* Recreate screen. */
static void tty3270_update(struct tty3270 *);
@@ -121,19 +121,10 @@ static void tty3270_update(struct tty3270 *);
*/
static void tty3270_set_timer(struct tty3270 *tp, int expires)
{
- if (expires == 0) {
- if (timer_pending(&tp->timer) && del_timer(&tp->timer))
- raw3270_put_view(&tp->view);
- return;
- }
- if (timer_pending(&tp->timer) &&
- mod_timer(&tp->timer, jiffies + expires))
- return;
- raw3270_get_view(&tp->view);
- tp->timer.function = (void (*)(unsigned long)) tty3270_update;
- tp->timer.data = (unsigned long) tp;
- tp->timer.expires = jiffies + expires;
- add_timer(&tp->timer);
+ if (expires == 0)
+ del_timer(&tp->timer);
+ else
+ mod_timer(&tp->timer, jiffies + expires);
}
/*
@@ -337,7 +328,6 @@ tty3270_write_callback(struct raw3270_request *rq, void *data)
tp = (struct tty3270 *) rq->view;
if (rq->rc != 0) {
/* Write wasn't successfull. Refresh all. */
- tty3270_rebuild_update(tp);
tp->update_flags = TTY_UPDATE_ALL;
tty3270_set_timer(tp, 1);
}
@@ -366,6 +356,12 @@ tty3270_update(struct tty3270 *tp)
spin_lock(&tp->view.lock);
updated = 0;
+ if (tp->update_flags & TTY_UPDATE_ALL) {
+ tty3270_rebuild_update(tp);
+ tty3270_update_status(tp);
+ tp->update_flags = TTY_UPDATE_ERASE | TTY_UPDATE_LIST |
+ TTY_UPDATE_INPUT | TTY_UPDATE_STATUS;
+ }
if (tp->update_flags & TTY_UPDATE_ERASE) {
/* Use erase write alternate to erase display. */
raw3270_request_set_cmd(wrq, TC_EWRITEA);
@@ -425,7 +421,6 @@ tty3270_update(struct tty3270 *tp)
xchg(&tp->write, wrq);
}
spin_unlock(&tp->view.lock);
- raw3270_put_view(&tp->view);
}
/*
@@ -570,7 +565,6 @@ tty3270_read_tasklet(struct raw3270_request *rrq)
tty3270_set_timer(tp, 1);
} else if (tp->input->string[0] == 0x6d) {
/* Display has been cleared. Redraw. */
- tty3270_rebuild_update(tp);
tp->update_flags = TTY_UPDATE_ALL;
tty3270_set_timer(tp, 1);
}
@@ -641,22 +635,20 @@ static int
tty3270_activate(struct raw3270_view *view)
{
struct tty3270 *tp;
- unsigned long flags;
tp = (struct tty3270 *) view;
- spin_lock_irqsave(&tp->view.lock, flags);
- tp->nr_up = 0;
- tty3270_rebuild_update(tp);
- tty3270_update_status(tp);
tp->update_flags = TTY_UPDATE_ALL;
tty3270_set_timer(tp, 1);
- spin_unlock_irqrestore(&tp->view.lock, flags);
return 0;
}
static void
tty3270_deactivate(struct raw3270_view *view)
{
+ struct tty3270 *tp;
+
+ tp = (struct tty3270 *) view;
+ del_timer(&tp->timer);
}
static int
@@ -743,6 +735,7 @@ tty3270_free_view(struct tty3270 *tp)
{
int pages;
+ del_timer_sync(&tp->timer);
kbd_free(tp->kbd);
raw3270_request_free(tp->kreset);
raw3270_request_free(tp->read);
@@ -889,7 +882,8 @@ tty3270_open(struct tty_struct *tty, struct file * filp)
INIT_LIST_HEAD(&tp->update);
INIT_LIST_HEAD(&tp->rcl_lines);
tp->rcl_max = 20;
- init_timer(&tp->timer);
+ setup_timer(&tp->timer, (void (*)(unsigned long)) tty3270_update,
+ (unsigned long) tp);
tasklet_init(&tp->readlet,
(void (*)(unsigned long)) tty3270_read_tasklet,
(unsigned long) tp->read);
@@ -1754,14 +1748,6 @@ static const struct tty_operations tty3270_ops = {
.set_termios = tty3270_set_termios
};
-static void tty3270_notifier(int index, int active)
-{
- if (active)
- tty_register_device(tty3270_driver, index, NULL);
- else
- tty_unregister_device(tty3270_driver, index);
-}
-
/*
* 3270 tty registration code called from tty_init().
* Most kernel services (incl. kmalloc) are available at this poimt.
@@ -1796,12 +1782,6 @@ static int __init tty3270_init(void)
return ret;
}
tty3270_driver = driver;
- ret = raw3270_register_notifier(tty3270_notifier);
- if (ret) {
- put_tty_driver(driver);
- return ret;
-
- }
return 0;
}
@@ -1810,7 +1790,6 @@ tty3270_exit(void)
{
struct tty_driver *driver;
- raw3270_unregister_notifier(tty3270_notifier);
driver = tty3270_driver;
tty3270_driver = NULL;
tty_unregister_driver(driver);
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index d8a2289fcb6..411cfa3c771 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -3,7 +3,7 @@
* character device driver for reading z/VM system service records
*
*
- * Copyright 2004 IBM Corporation
+ * Copyright IBM Corp. 2004, 2009
* character device driver for reading z/VM system service records,
* Version 1.0
* Author(s): Xenia Tkatschow <xenia@us.ibm.com>
@@ -504,7 +504,7 @@ static ssize_t vmlogrdr_autopurge_store(struct device * dev,
struct device_attribute *attr,
const char * buf, size_t count)
{
- struct vmlogrdr_priv_t *priv = dev->driver_data;
+ struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
ssize_t ret = count;
switch (buf[0]) {
@@ -525,7 +525,7 @@ static ssize_t vmlogrdr_autopurge_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct vmlogrdr_priv_t *priv = dev->driver_data;
+ struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", priv->autopurge);
}
@@ -541,7 +541,7 @@ static ssize_t vmlogrdr_purge_store(struct device * dev,
char cp_command[80];
char cp_response[80];
- struct vmlogrdr_priv_t *priv = dev->driver_data;
+ struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
if (buf[0] != '1')
return -EINVAL;
@@ -578,7 +578,7 @@ static ssize_t vmlogrdr_autorecording_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct vmlogrdr_priv_t *priv = dev->driver_data;
+ struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
ssize_t ret = count;
switch (buf[0]) {
@@ -599,7 +599,7 @@ static ssize_t vmlogrdr_autorecording_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct vmlogrdr_priv_t *priv = dev->driver_data;
+ struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", priv->autorecording);
}
@@ -612,7 +612,7 @@ static ssize_t vmlogrdr_recording_store(struct device * dev,
struct device_attribute *attr,
const char * buf, size_t count)
{
- struct vmlogrdr_priv_t *priv = dev->driver_data;
+ struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
ssize_t ret;
switch (buf[0]) {
@@ -660,6 +660,29 @@ static struct attribute *vmlogrdr_attrs[] = {
NULL,
};
+static int vmlogrdr_pm_prepare(struct device *dev)
+{
+ int rc;
+ struct vmlogrdr_priv_t *priv = dev->driver_data;
+
+ rc = 0;
+ if (priv) {
+ spin_lock_bh(&priv->priv_lock);
+ if (priv->dev_in_use)
+ rc = -EBUSY;
+ spin_unlock_bh(&priv->priv_lock);
+ }
+ if (rc)
+ pr_err("vmlogrdr: device %s is busy. Refuse to suspend.\n",
+ dev_name(dev));
+ return rc;
+}
+
+
+static struct dev_pm_ops vmlogrdr_pm_ops = {
+ .prepare = vmlogrdr_pm_prepare,
+};
+
static struct attribute_group vmlogrdr_attr_group = {
.attrs = vmlogrdr_attrs,
};
@@ -668,6 +691,7 @@ static struct class *vmlogrdr_class;
static struct device_driver vmlogrdr_driver = {
.name = "vmlogrdr",
.bus = &iucv_bus,
+ .pm = &vmlogrdr_pm_ops,
};
@@ -729,6 +753,7 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
dev->bus = &iucv_bus;
dev->parent = iucv_root;
dev->driver = &vmlogrdr_driver;
+ dev->driver_data = priv;
/*
* The release function could be called after the
* module has been unloaded. It's _only_ task is to
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 5dcef81fc9d..7d9e67cb647 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -2,7 +2,7 @@
* Linux driver for System z and s390 unit record devices
* (z/VM virtual punch, reader, printer)
*
- * Copyright IBM Corp. 2001, 2007
+ * Copyright IBM Corp. 2001, 2009
* Authors: Malcolm Beattie <beattiem@uk.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
* Frank Munzert <munzert@de.ibm.com>
@@ -60,6 +60,7 @@ static int ur_probe(struct ccw_device *cdev);
static void ur_remove(struct ccw_device *cdev);
static int ur_set_online(struct ccw_device *cdev);
static int ur_set_offline(struct ccw_device *cdev);
+static int ur_pm_suspend(struct ccw_device *cdev);
static struct ccw_driver ur_driver = {
.name = "vmur",
@@ -69,6 +70,7 @@ static struct ccw_driver ur_driver = {
.remove = ur_remove,
.set_online = ur_set_online,
.set_offline = ur_set_offline,
+ .freeze = ur_pm_suspend,
};
static DEFINE_MUTEX(vmur_mutex);
@@ -78,11 +80,11 @@ static DEFINE_MUTEX(vmur_mutex);
*
* Each ur device (urd) contains a reference to its corresponding ccw device
* (cdev) using the urd->cdev pointer. Each ccw device has a reference to the
- * ur device using the cdev->dev.driver_data pointer.
+ * ur device using dev_get_drvdata(&cdev->dev) pointer.
*
* urd references:
* - ur_probe gets a urd reference, ur_remove drops the reference
- * (cdev->dev.driver_data)
+ * dev_get_drvdata(&cdev->dev)
* - ur_open gets a urd reference, ur_relase drops the reference
* (urf->urd)
*
@@ -90,7 +92,7 @@ static DEFINE_MUTEX(vmur_mutex);
* - urdev_alloc get a cdev reference (urd->cdev)
* - urdev_free drops the cdev reference (urd->cdev)
*
- * Setting and clearing of cdev->dev.driver_data is protected by the ccwdev lock
+ * Setting and clearing of dev_get_drvdata(&cdev->dev) is protected by the ccwdev lock
*/
static struct urdev *urdev_alloc(struct ccw_device *cdev)
{
@@ -129,7 +131,7 @@ static struct urdev *urdev_get_from_cdev(struct ccw_device *cdev)
unsigned long flags;
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
- urd = cdev->dev.driver_data;
+ urd = dev_get_drvdata(&cdev->dev);
if (urd)
urdev_get(urd);
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
@@ -158,6 +160,28 @@ static void urdev_put(struct urdev *urd)
}
/*
+ * State and contents of ur devices can be changed by class D users issuing
+ * CP commands such as PURGE or TRANSFER, while the Linux guest is suspended.
+ * Also the Linux guest might be logged off, which causes all active spool
+ * files to be closed.
+ * So we cannot guarantee that spool files are still the same when the Linux
+ * guest is resumed. In order to avoid unpredictable results at resume time
+ * we simply refuse to suspend if a ur device node is open.
+ */
+static int ur_pm_suspend(struct ccw_device *cdev)
+{
+ struct urdev *urd = cdev->dev.driver_data;
+
+ TRACE("ur_pm_suspend: cdev=%p\n", cdev);
+ if (urd->open_flag) {
+ pr_err("Unit record device %s is busy, %s refusing to "
+ "suspend.\n", dev_name(&cdev->dev), ur_banner);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+/*
* Low-level functions to do I/O to a ur device.
* alloc_chan_prog
* free_chan_prog
@@ -286,7 +310,7 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
TRACE("ur_int_handler: unsolicited interrupt\n");
return;
}
- urd = cdev->dev.driver_data;
+ urd = dev_get_drvdata(&cdev->dev);
BUG_ON(!urd);
/* On special conditions irb is an error pointer */
if (IS_ERR(irb))
@@ -832,7 +856,7 @@ static int ur_probe(struct ccw_device *cdev)
goto fail_remove_attr;
}
spin_lock_irq(get_ccwdev_lock(cdev));
- cdev->dev.driver_data = urd;
+ dev_set_drvdata(&cdev->dev, urd);
spin_unlock_irq(get_ccwdev_lock(cdev));
mutex_unlock(&vmur_mutex);
@@ -972,8 +996,8 @@ static void ur_remove(struct ccw_device *cdev)
ur_remove_attributes(&cdev->dev);
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
- urdev_put(cdev->dev.driver_data);
- cdev->dev.driver_data = NULL;
+ urdev_put(dev_get_drvdata(&cdev->dev));
+ dev_set_drvdata(&cdev->dev, NULL);
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
mutex_unlock(&vmur_mutex);
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index 21a2a829bf4..cb7854c10c0 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -1,17 +1,23 @@
/*
* Watchdog implementation based on z/VM Watchdog Timer API
*
+ * Copyright IBM Corp. 2004,2009
+ *
* The user space watchdog daemon can use this driver as
* /dev/vmwatchdog to have z/VM execute the specified CP
* command when the timeout expires. The default command is
* "IPL", which which cause an immediate reboot.
*/
+#define KMSG_COMPONENT "vmwatchdog"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/suspend.h>
#include <linux/watchdog.h>
#include <linux/smp_lock.h>
@@ -43,6 +49,9 @@ static unsigned int vmwdt_interval = 60;
static unsigned long vmwdt_is_open;
static int vmwdt_expect_close;
+#define VMWDT_OPEN 0 /* devnode is open or suspend in progress */
+#define VMWDT_RUNNING 1 /* The watchdog is armed */
+
enum vmwdt_func {
/* function codes */
wdt_init = 0,
@@ -92,6 +101,7 @@ static int vmwdt_keepalive(void)
EBC_TOUPPER(ebc_cmd, MAX_CMDLEN);
func = vmwdt_conceal ? (wdt_init | wdt_conceal) : wdt_init;
+ set_bit(VMWDT_RUNNING, &vmwdt_is_open);
ret = __diag288(func, vmwdt_interval, ebc_cmd, len);
WARN_ON(ret != 0);
kfree(ebc_cmd);
@@ -102,6 +112,7 @@ static int vmwdt_disable(void)
{
int ret = __diag288(wdt_cancel, 0, "", 0);
WARN_ON(ret != 0);
+ clear_bit(VMWDT_RUNNING, &vmwdt_is_open);
return ret;
}
@@ -123,13 +134,13 @@ static int vmwdt_open(struct inode *i, struct file *f)
{
int ret;
lock_kernel();
- if (test_and_set_bit(0, &vmwdt_is_open)) {
+ if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) {
unlock_kernel();
return -EBUSY;
}
ret = vmwdt_keepalive();
if (ret)
- clear_bit(0, &vmwdt_is_open);
+ clear_bit(VMWDT_OPEN, &vmwdt_is_open);
unlock_kernel();
return ret ? ret : nonseekable_open(i, f);
}
@@ -139,7 +150,7 @@ static int vmwdt_close(struct inode *i, struct file *f)
if (vmwdt_expect_close == 42)
vmwdt_disable();
vmwdt_expect_close = 0;
- clear_bit(0, &vmwdt_is_open);
+ clear_bit(VMWDT_OPEN, &vmwdt_is_open);
return 0;
}
@@ -223,6 +234,57 @@ static ssize_t vmwdt_write(struct file *f, const char __user *buf,
return count;
}
+static int vmwdt_resume(void)
+{
+ clear_bit(VMWDT_OPEN, &vmwdt_is_open);
+ return NOTIFY_DONE;
+}
+
+/*
+ * It makes no sense to go into suspend while the watchdog is running.
+ * Depending on the memory size, the watchdog might trigger, while we
+ * are still saving the memory.
+ * We reuse the open flag to ensure that suspend and watchdog open are
+ * exclusive operations
+ */
+static int vmwdt_suspend(void)
+{
+ if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) {
+ pr_err("The watchdog is in use. "
+ "This prevents hibernation or suspend.\n");
+ return NOTIFY_BAD;
+ }
+ if (test_bit(VMWDT_RUNNING, &vmwdt_is_open)) {
+ clear_bit(VMWDT_OPEN, &vmwdt_is_open);
+ pr_err("The watchdog is running. "
+ "This prevents hibernation or suspend.\n");
+ return NOTIFY_BAD;
+ }
+ return NOTIFY_DONE;
+}
+
+/*
+ * This function is called for suspend and resume.
+ */
+static int vmwdt_power_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ switch (event) {
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ return vmwdt_resume();
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ return vmwdt_suspend();
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+static struct notifier_block vmwdt_power_notifier = {
+ .notifier_call = vmwdt_power_event,
+};
+
static const struct file_operations vmwdt_fops = {
.open = &vmwdt_open,
.release = &vmwdt_close,
@@ -244,12 +306,21 @@ static int __init vmwdt_init(void)
ret = vmwdt_probe();
if (ret)
return ret;
- return misc_register(&vmwdt_dev);
+ ret = register_pm_notifier(&vmwdt_power_notifier);
+ if (ret)
+ return ret;
+ ret = misc_register(&vmwdt_dev);
+ if (ret) {
+ unregister_pm_notifier(&vmwdt_power_notifier);
+ return ret;
+ }
+ return 0;
}
module_init(vmwdt_init);
static void __exit vmwdt_exit(void)
{
- WARN_ON(misc_deregister(&vmwdt_dev) != 0);
+ unregister_pm_notifier(&vmwdt_power_notifier);
+ misc_deregister(&vmwdt_dev);
}
module_exit(vmwdt_exit);
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 22ce765d537..a5a62f1f774 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -1,11 +1,10 @@
/*
- * drivers/s390/cio/ccwgroup.c
* bus driver for ccwgroup
*
- * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
- * IBM Corporation
- * Author(s): Arnd Bergmann (arndb@de.ibm.com)
- * Cornelia Huck (cornelia.huck@de.ibm.com)
+ * Copyright IBM Corp. 2002, 2009
+ *
+ * Author(s): Arnd Bergmann (arndb@de.ibm.com)
+ * Cornelia Huck (cornelia.huck@de.ibm.com)
*/
#include <linux/module.h>
#include <linux/errno.h>
@@ -501,6 +500,74 @@ static void ccwgroup_shutdown(struct device *dev)
gdrv->shutdown(gdev);
}
+static int ccwgroup_pm_prepare(struct device *dev)
+{
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
+
+ /* Fail while device is being set online/offline. */
+ if (atomic_read(&gdev->onoff))
+ return -EAGAIN;
+
+ if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
+ return 0;
+
+ return gdrv->prepare ? gdrv->prepare(gdev) : 0;
+}
+
+static void ccwgroup_pm_complete(struct device *dev)
+{
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
+
+ if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
+ return;
+
+ if (gdrv->complete)
+ gdrv->complete(gdev);
+}
+
+static int ccwgroup_pm_freeze(struct device *dev)
+{
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
+
+ if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
+ return 0;
+
+ return gdrv->freeze ? gdrv->freeze(gdev) : 0;
+}
+
+static int ccwgroup_pm_thaw(struct device *dev)
+{
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
+
+ if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
+ return 0;
+
+ return gdrv->thaw ? gdrv->thaw(gdev) : 0;
+}
+
+static int ccwgroup_pm_restore(struct device *dev)
+{
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
+
+ if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
+ return 0;
+
+ return gdrv->restore ? gdrv->restore(gdev) : 0;
+}
+
+static struct dev_pm_ops ccwgroup_pm_ops = {
+ .prepare = ccwgroup_pm_prepare,
+ .complete = ccwgroup_pm_complete,
+ .freeze = ccwgroup_pm_freeze,
+ .thaw = ccwgroup_pm_thaw,
+ .restore = ccwgroup_pm_restore,
+};
+
static struct bus_type ccwgroup_bus_type = {
.name = "ccwgroup",
.match = ccwgroup_bus_match,
@@ -508,6 +575,7 @@ static struct bus_type ccwgroup_bus_type = {
.probe = ccwgroup_probe,
.remove = ccwgroup_remove,
.shutdown = ccwgroup_shutdown,
+ .pm = &ccwgroup_pm_ops,
};
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 883f16f96f2..1ecd3e56764 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -549,8 +549,7 @@ cleanup:
return ret;
}
-static int
-__chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
+int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
{
struct {
struct chsc_header request;
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index ba59bceace9..425e8f89a6c 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -90,6 +90,7 @@ extern void chsc_free_sei_area(void);
extern int chsc_enable_facility(int);
struct channel_subsystem;
extern int chsc_secm(struct channel_subsystem *, int);
+int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page);
int chsc_chp_vary(struct chp_id chpid, int on);
int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index 93eca1731b8..cc5144b6f9d 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -1,7 +1,8 @@
/*
* Driver for s390 chsc subchannels
*
- * Copyright IBM Corp. 2008
+ * Copyright IBM Corp. 2008, 2009
+ *
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
*
*/
@@ -112,6 +113,31 @@ static void chsc_subchannel_shutdown(struct subchannel *sch)
cio_disable_subchannel(sch);
}
+static int chsc_subchannel_prepare(struct subchannel *sch)
+{
+ int cc;
+ struct schib schib;
+ /*
+ * Don't allow suspend while the subchannel is not idle
+ * since we don't have a way to clear the subchannel and
+ * cannot disable it with a request running.
+ */
+ cc = stsch(sch->schid, &schib);
+ if (!cc && scsw_stctl(&schib.scsw))
+ return -EAGAIN;
+ return 0;
+}
+
+static int chsc_subchannel_freeze(struct subchannel *sch)
+{
+ return cio_disable_subchannel(sch);
+}
+
+static int chsc_subchannel_restore(struct subchannel *sch)
+{
+ return cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+}
+
static struct css_device_id chsc_subchannel_ids[] = {
{ .match_flags = 0x1, .type =SUBCHANNEL_TYPE_CHSC, },
{ /* end of list */ },
@@ -125,6 +151,10 @@ static struct css_driver chsc_subchannel_driver = {
.probe = chsc_subchannel_probe,
.remove = chsc_subchannel_remove,
.shutdown = chsc_subchannel_shutdown,
+ .prepare = chsc_subchannel_prepare,
+ .freeze = chsc_subchannel_freeze,
+ .thaw = chsc_subchannel_restore,
+ .restore = chsc_subchannel_restore,
.name = "chsc_subchannel",
};
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 2aebb982304..5ec7789bd9d 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -12,6 +12,7 @@
#define KMSG_COMPONENT "cio"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#include <linux/ftrace.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -28,7 +29,7 @@
#include <asm/chpid.h>
#include <asm/airq.h>
#include <asm/isc.h>
-#include <asm/cpu.h>
+#include <asm/cputime.h>
#include <asm/fcx.h>
#include <asm/nmi.h>
#include <asm/crw.h>
@@ -626,8 +627,7 @@ out:
* handlers).
*
*/
-void
-do_IRQ (struct pt_regs *regs)
+void __irq_entry do_IRQ(struct pt_regs *regs)
{
struct tpi_info *tpi_info;
struct subchannel *sch;
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index dc98b2c6386..30f51611130 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -1204,6 +1204,11 @@ static ssize_t cmb_enable_store(struct device *dev,
DEVICE_ATTR(cmb_enable, 0644, cmb_enable_show, cmb_enable_store);
+int ccw_set_cmf(struct ccw_device *cdev, int enable)
+{
+ return cmbops->set(cdev, enable ? 2 : 0);
+}
+
/**
* enable_cmf() - switch on the channel measurement for a specific device
* @cdev: The ccw device to be enabled
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 0085d890179..85d43c6bcb6 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -1,10 +1,10 @@
/*
- * drivers/s390/cio/css.c
- * driver for channel subsystem
+ * driver for channel subsystem
*
- * Copyright IBM Corp. 2002,2008
- * Author(s): Arnd Bergmann (arndb@de.ibm.com)
- * Cornelia Huck (cornelia.huck@de.ibm.com)
+ * Copyright IBM Corp. 2002, 2009
+ *
+ * Author(s): Arnd Bergmann (arndb@de.ibm.com)
+ * Cornelia Huck (cornelia.huck@de.ibm.com)
*/
#define KMSG_COMPONENT "cio"
@@ -17,6 +17,7 @@
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/reboot.h>
+#include <linux/suspend.h>
#include <asm/isc.h>
#include <asm/crw.h>
@@ -780,6 +781,79 @@ static struct notifier_block css_reboot_notifier = {
};
/*
+ * Since the css devices are neither on a bus nor have a class
+ * nor have a special device type, we cannot stop/restart channel
+ * path measurements via the normal suspend/resume callbacks, but have
+ * to use notifiers.
+ */
+static int css_power_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ void *secm_area;
+ int ret, i;
+
+ switch (event) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ ret = NOTIFY_DONE;
+ for (i = 0; i <= __MAX_CSSID; i++) {
+ struct channel_subsystem *css;
+
+ css = channel_subsystems[i];
+ mutex_lock(&css->mutex);
+ if (!css->cm_enabled) {
+ mutex_unlock(&css->mutex);
+ continue;
+ }
+ secm_area = (void *)get_zeroed_page(GFP_KERNEL |
+ GFP_DMA);
+ if (secm_area) {
+ if (__chsc_do_secm(css, 0, secm_area))
+ ret = NOTIFY_BAD;
+ free_page((unsigned long)secm_area);
+ } else
+ ret = NOTIFY_BAD;
+
+ mutex_unlock(&css->mutex);
+ }
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ ret = NOTIFY_DONE;
+ for (i = 0; i <= __MAX_CSSID; i++) {
+ struct channel_subsystem *css;
+
+ css = channel_subsystems[i];
+ mutex_lock(&css->mutex);
+ if (!css->cm_enabled) {
+ mutex_unlock(&css->mutex);
+ continue;
+ }
+ secm_area = (void *)get_zeroed_page(GFP_KERNEL |
+ GFP_DMA);
+ if (secm_area) {
+ if (__chsc_do_secm(css, 1, secm_area))
+ ret = NOTIFY_BAD;
+ free_page((unsigned long)secm_area);
+ } else
+ ret = NOTIFY_BAD;
+
+ mutex_unlock(&css->mutex);
+ }
+ /* search for subchannels, which appeared during hibernation */
+ css_schedule_reprobe();
+ break;
+ default:
+ ret = NOTIFY_DONE;
+ }
+ return ret;
+
+}
+static struct notifier_block css_power_notifier = {
+ .notifier_call = css_power_event,
+};
+
+/*
* Now that the driver core is running, we can setup our channel subsystem.
* The struct subchannel's are created during probing (except for the
* static console subchannel).
@@ -852,6 +926,11 @@ init_channel_subsystem (void)
ret = register_reboot_notifier(&css_reboot_notifier);
if (ret)
goto out_unregister;
+ ret = register_pm_notifier(&css_power_notifier);
+ if (ret) {
+ unregister_reboot_notifier(&css_reboot_notifier);
+ goto out_unregister;
+ }
css_init_done = 1;
/* Enable default isc for I/O subchannels. */
@@ -953,6 +1032,73 @@ static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
return ret;
}
+static int css_pm_prepare(struct device *dev)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ struct css_driver *drv;
+
+ if (mutex_is_locked(&sch->reg_mutex))
+ return -EAGAIN;
+ if (!sch->dev.driver)
+ return 0;
+ drv = to_cssdriver(sch->dev.driver);
+ /* Notify drivers that they may not register children. */
+ return drv->prepare ? drv->prepare(sch) : 0;
+}
+
+static void css_pm_complete(struct device *dev)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ struct css_driver *drv;
+
+ if (!sch->dev.driver)
+ return;
+ drv = to_cssdriver(sch->dev.driver);
+ if (drv->complete)
+ drv->complete(sch);
+}
+
+static int css_pm_freeze(struct device *dev)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ struct css_driver *drv;
+
+ if (!sch->dev.driver)
+ return 0;
+ drv = to_cssdriver(sch->dev.driver);
+ return drv->freeze ? drv->freeze(sch) : 0;
+}
+
+static int css_pm_thaw(struct device *dev)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ struct css_driver *drv;
+
+ if (!sch->dev.driver)
+ return 0;
+ drv = to_cssdriver(sch->dev.driver);
+ return drv->thaw ? drv->thaw(sch) : 0;
+}
+
+static int css_pm_restore(struct device *dev)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ struct css_driver *drv;
+
+ if (!sch->dev.driver)
+ return 0;
+ drv = to_cssdriver(sch->dev.driver);
+ return drv->restore ? drv->restore(sch) : 0;
+}
+
+static struct dev_pm_ops css_pm_ops = {
+ .prepare = css_pm_prepare,
+ .complete = css_pm_complete,
+ .freeze = css_pm_freeze,
+ .thaw = css_pm_thaw,
+ .restore = css_pm_restore,
+};
+
struct bus_type css_bus_type = {
.name = "css",
.match = css_bus_match,
@@ -960,6 +1106,7 @@ struct bus_type css_bus_type = {
.remove = css_remove,
.shutdown = css_shutdown,
.uevent = css_uevent,
+ .pm = &css_pm_ops,
};
/**
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 57ebf120f82..9763eeec745 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -70,6 +70,11 @@ struct chp_link;
* @probe: function called on probe
* @remove: function called on remove
* @shutdown: called at device shutdown
+ * @prepare: prepare for pm state transition
+ * @complete: undo work done in @prepare
+ * @freeze: callback for freezing during hibernation snapshotting
+ * @thaw: undo work done in @freeze
+ * @restore: callback for restoring after hibernation
* @name: name of the device driver
*/
struct css_driver {
@@ -82,6 +87,11 @@ struct css_driver {
int (*probe)(struct subchannel *);
int (*remove)(struct subchannel *);
void (*shutdown)(struct subchannel *);
+ int (*prepare) (struct subchannel *);
+ void (*complete) (struct subchannel *);
+ int (*freeze)(struct subchannel *);
+ int (*thaw) (struct subchannel *);
+ int (*restore)(struct subchannel *);
const char *name;
};
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 35441fa16be..3c57c1a18bb 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -138,6 +138,19 @@ static struct css_device_id io_subchannel_ids[] = {
};
MODULE_DEVICE_TABLE(css, io_subchannel_ids);
+static int io_subchannel_prepare(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+ /*
+ * Don't allow suspend while a ccw device registration
+ * is still outstanding.
+ */
+ cdev = sch_get_cdev(sch);
+ if (cdev && !device_is_registered(&cdev->dev))
+ return -EAGAIN;
+ return 0;
+}
+
static struct css_driver io_subchannel_driver = {
.owner = THIS_MODULE,
.subchannel_type = io_subchannel_ids,
@@ -148,6 +161,7 @@ static struct css_driver io_subchannel_driver = {
.probe = io_subchannel_probe,
.remove = io_subchannel_remove,
.shutdown = io_subchannel_shutdown,
+ .prepare = io_subchannel_prepare,
};
struct workqueue_struct *ccw_device_work;
@@ -1775,6 +1789,15 @@ ccw_device_probe_console(void)
return &console_cdev;
}
+static int ccw_device_pm_restore(struct device *dev);
+
+int ccw_device_force_console(void)
+{
+ if (!console_cdev_in_use)
+ return -ENODEV;
+ return ccw_device_pm_restore(&console_cdev.dev);
+}
+EXPORT_SYMBOL_GPL(ccw_device_force_console);
const char *cio_get_console_cdev_name(struct subchannel *sch)
{
@@ -1895,6 +1918,242 @@ static void ccw_device_shutdown(struct device *dev)
disable_cmf(cdev);
}
+static int ccw_device_pm_prepare(struct device *dev)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+
+ if (work_pending(&cdev->private->kick_work))
+ return -EAGAIN;
+ /* Fail while device is being set online/offline. */
+ if (atomic_read(&cdev->private->onoff))
+ return -EAGAIN;
+
+ if (cdev->online && cdev->drv && cdev->drv->prepare)
+ return cdev->drv->prepare(cdev);
+
+ return 0;
+}
+
+static void ccw_device_pm_complete(struct device *dev)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+
+ if (cdev->online && cdev->drv && cdev->drv->complete)
+ cdev->drv->complete(cdev);
+}
+
+static int ccw_device_pm_freeze(struct device *dev)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ int ret, cm_enabled;
+
+ /* Fail suspend while device is in transistional state. */
+ if (!dev_fsm_final_state(cdev))
+ return -EAGAIN;
+ if (!cdev->online)
+ return 0;
+ if (cdev->drv && cdev->drv->freeze) {
+ ret = cdev->drv->freeze(cdev);
+ if (ret)
+ return ret;
+ }
+
+ spin_lock_irq(sch->lock);
+ cm_enabled = cdev->private->cmb != NULL;
+ spin_unlock_irq(sch->lock);
+ if (cm_enabled) {
+ /* Don't have the css write on memory. */
+ ret = ccw_set_cmf(cdev, 0);
+ if (ret)
+ return ret;
+ }
+ /* From here on, disallow device driver I/O. */
+ spin_lock_irq(sch->lock);
+ ret = cio_disable_subchannel(sch);
+ spin_unlock_irq(sch->lock);
+
+ return ret;
+}
+
+static int ccw_device_pm_thaw(struct device *dev)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ int ret, cm_enabled;
+
+ if (!cdev->online)
+ return 0;
+
+ spin_lock_irq(sch->lock);
+ /* Allow device driver I/O again. */
+ ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
+ cm_enabled = cdev->private->cmb != NULL;
+ spin_unlock_irq(sch->lock);
+ if (ret)
+ return ret;
+
+ if (cm_enabled) {
+ ret = ccw_set_cmf(cdev, 1);
+ if (ret)
+ return ret;
+ }
+
+ if (cdev->drv && cdev->drv->thaw)
+ ret = cdev->drv->thaw(cdev);
+
+ return ret;
+}
+
+static void __ccw_device_pm_restore(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ int ret;
+
+ if (cio_is_console(sch->schid))
+ goto out;
+ /*
+ * While we were sleeping, devices may have gone or become
+ * available again. Kick re-detection.
+ */
+ spin_lock_irq(sch->lock);
+ cdev->private->flags.resuming = 1;
+ ret = ccw_device_recognition(cdev);
+ spin_unlock_irq(sch->lock);
+ if (ret) {
+ CIO_MSG_EVENT(0, "Couldn't start recognition for device "
+ "%s (ret=%d)\n", dev_name(&cdev->dev), ret);
+ spin_lock_irq(sch->lock);
+ cdev->private->state = DEV_STATE_DISCONNECTED;
+ spin_unlock_irq(sch->lock);
+ /* notify driver after the resume cb */
+ goto out;
+ }
+ wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) ||
+ cdev->private->state == DEV_STATE_DISCONNECTED);
+
+out:
+ cdev->private->flags.resuming = 0;
+}
+
+static int resume_handle_boxed(struct ccw_device *cdev)
+{
+ cdev->private->state = DEV_STATE_BOXED;
+ if (ccw_device_notify(cdev, CIO_BOXED))
+ return 0;
+ ccw_device_schedule_sch_unregister(cdev);
+ return -ENODEV;
+}
+
+static int resume_handle_disc(struct ccw_device *cdev)
+{
+ cdev->private->state = DEV_STATE_DISCONNECTED;
+ if (ccw_device_notify(cdev, CIO_GONE))
+ return 0;
+ ccw_device_schedule_sch_unregister(cdev);
+ return -ENODEV;
+}
+
+static int ccw_device_pm_restore(struct device *dev)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ int ret = 0, cm_enabled;
+
+ __ccw_device_pm_restore(cdev);
+ spin_lock_irq(sch->lock);
+ if (cio_is_console(sch->schid)) {
+ cio_enable_subchannel(sch, (u32)(addr_t)sch);
+ spin_unlock_irq(sch->lock);
+ goto out_restore;
+ }
+ cdev->private->flags.donotify = 0;
+ /* check recognition results */
+ switch (cdev->private->state) {
+ case DEV_STATE_OFFLINE:
+ break;
+ case DEV_STATE_BOXED:
+ ret = resume_handle_boxed(cdev);
+ spin_unlock_irq(sch->lock);
+ if (ret)
+ goto out;
+ goto out_restore;
+ case DEV_STATE_DISCONNECTED:
+ goto out_disc_unlock;
+ default:
+ goto out_unreg_unlock;
+ }
+ /* check if the device id has changed */
+ if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
+ CIO_MSG_EVENT(0, "resume: sch %s: failed (devno changed from "
+ "%04x to %04x)\n", dev_name(&sch->dev),
+ cdev->private->dev_id.devno,
+ sch->schib.pmcw.dev);
+ goto out_unreg_unlock;
+ }
+ /* check if the device type has changed */
+ if (!ccw_device_test_sense_data(cdev)) {
+ ccw_device_update_sense_data(cdev);
+ PREPARE_WORK(&cdev->private->kick_work,
+ ccw_device_do_unbind_bind);
+ queue_work(ccw_device_work, &cdev->private->kick_work);
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+ if (!cdev->online) {
+ ret = 0;
+ goto out_unlock;
+ }
+ ret = ccw_device_online(cdev);
+ if (ret)
+ goto out_disc_unlock;
+
+ cm_enabled = cdev->private->cmb != NULL;
+ spin_unlock_irq(sch->lock);
+
+ wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
+ if (cdev->private->state != DEV_STATE_ONLINE) {
+ spin_lock_irq(sch->lock);
+ goto out_disc_unlock;
+ }
+ if (cm_enabled) {
+ ret = ccw_set_cmf(cdev, 1);
+ if (ret) {
+ CIO_MSG_EVENT(2, "resume: cdev %s: cmf failed "
+ "(rc=%d)\n", dev_name(&cdev->dev), ret);
+ ret = 0;
+ }
+ }
+
+out_restore:
+ if (cdev->online && cdev->drv && cdev->drv->restore)
+ ret = cdev->drv->restore(cdev);
+out:
+ return ret;
+
+out_disc_unlock:
+ ret = resume_handle_disc(cdev);
+ spin_unlock_irq(sch->lock);
+ if (ret)
+ return ret;
+ goto out_restore;
+
+out_unreg_unlock:
+ ccw_device_schedule_sch_unregister(cdev);
+ ret = -ENODEV;
+out_unlock:
+ spin_unlock_irq(sch->lock);
+ return ret;
+}
+
+static struct dev_pm_ops ccw_pm_ops = {
+ .prepare = ccw_device_pm_prepare,
+ .complete = ccw_device_pm_complete,
+ .freeze = ccw_device_pm_freeze,
+ .thaw = ccw_device_pm_thaw,
+ .restore = ccw_device_pm_restore,
+};
+
struct bus_type ccw_bus_type = {
.name = "ccw",
.match = ccw_bus_match,
@@ -1902,6 +2161,7 @@ struct bus_type ccw_bus_type = {
.probe = ccw_device_probe,
.remove = ccw_device_remove,
.shutdown = ccw_device_shutdown,
+ .pm = &ccw_pm_ops,
};
/**
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index f1cbbd94ad4..e3975107a57 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -87,6 +87,8 @@ int ccw_device_is_orphan(struct ccw_device *);
int ccw_device_recognition(struct ccw_device *);
int ccw_device_online(struct ccw_device *);
int ccw_device_offline(struct ccw_device *);
+void ccw_device_update_sense_data(struct ccw_device *);
+int ccw_device_test_sense_data(struct ccw_device *);
void ccw_device_schedule_sch_unregister(struct ccw_device *);
int ccw_purge_blacklisted(void);
@@ -133,5 +135,6 @@ extern struct bus_type ccw_bus_type;
void retry_set_schib(struct ccw_device *cdev);
void cmf_retry_copy_block(struct ccw_device *);
int cmf_reenable(struct ccw_device *);
+int ccw_set_cmf(struct ccw_device *cdev, int enable);
extern struct device_attribute dev_attr_cmb_enable;
#endif
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index e4604926156..3db88c52d28 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -177,29 +177,21 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev)
panic("Can't stop i/o on subchannel.\n");
}
-static int
-ccw_device_handle_oper(struct ccw_device *cdev)
+void ccw_device_update_sense_data(struct ccw_device *cdev)
{
- struct subchannel *sch;
+ memset(&cdev->id, 0, sizeof(cdev->id));
+ cdev->id.cu_type = cdev->private->senseid.cu_type;
+ cdev->id.cu_model = cdev->private->senseid.cu_model;
+ cdev->id.dev_type = cdev->private->senseid.dev_type;
+ cdev->id.dev_model = cdev->private->senseid.dev_model;
+}
- sch = to_subchannel(cdev->dev.parent);
- cdev->private->flags.recog_done = 1;
- /*
- * Check if cu type and device type still match. If
- * not, it is certainly another device and we have to
- * de- and re-register.
- */
- if (cdev->id.cu_type != cdev->private->senseid.cu_type ||
- cdev->id.cu_model != cdev->private->senseid.cu_model ||
- cdev->id.dev_type != cdev->private->senseid.dev_type ||
- cdev->id.dev_model != cdev->private->senseid.dev_model) {
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_do_unbind_bind);
- queue_work(ccw_device_work, &cdev->private->kick_work);
- return 0;
- }
- cdev->private->flags.donotify = 1;
- return 1;
+int ccw_device_test_sense_data(struct ccw_device *cdev)
+{
+ return cdev->id.cu_type == cdev->private->senseid.cu_type &&
+ cdev->id.cu_model == cdev->private->senseid.cu_model &&
+ cdev->id.dev_type == cdev->private->senseid.dev_type &&
+ cdev->id.dev_model == cdev->private->senseid.dev_model;
}
/*
@@ -233,7 +225,7 @@ static void
ccw_device_recog_done(struct ccw_device *cdev, int state)
{
struct subchannel *sch;
- int notify, old_lpm, same_dev;
+ int old_lpm;
sch = to_subchannel(cdev->dev.parent);
@@ -263,8 +255,12 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
wake_up(&cdev->private->wait_q);
return;
}
- notify = 0;
- same_dev = 0; /* Keep the compiler quiet... */
+ if (cdev->private->flags.resuming) {
+ cdev->private->state = state;
+ cdev->private->flags.recog_done = 1;
+ wake_up(&cdev->private->wait_q);
+ return;
+ }
switch (state) {
case DEV_STATE_NOT_OPER:
CIO_MSG_EVENT(2, "SenseID : unknown device %04x on "
@@ -273,34 +269,31 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
sch->schid.ssid, sch->schid.sch_no);
break;
case DEV_STATE_OFFLINE:
- if (cdev->online) {
- same_dev = ccw_device_handle_oper(cdev);
- notify = 1;
+ if (!cdev->online) {
+ ccw_device_update_sense_data(cdev);
+ /* Issue device info message. */
+ CIO_MSG_EVENT(4, "SenseID : device 0.%x.%04x reports: "
+ "CU Type/Mod = %04X/%02X, Dev Type/Mod "
+ "= %04X/%02X\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno,
+ cdev->id.cu_type, cdev->id.cu_model,
+ cdev->id.dev_type, cdev->id.dev_model);
+ break;
}
- /* fill out sense information */
- memset(&cdev->id, 0, sizeof(cdev->id));
- cdev->id.cu_type = cdev->private->senseid.cu_type;
- cdev->id.cu_model = cdev->private->senseid.cu_model;
- cdev->id.dev_type = cdev->private->senseid.dev_type;
- cdev->id.dev_model = cdev->private->senseid.dev_model;
- if (notify) {
- cdev->private->state = DEV_STATE_OFFLINE;
- if (same_dev) {
- /* Get device online again. */
- ccw_device_online(cdev);
- wake_up(&cdev->private->wait_q);
- }
- return;
+ cdev->private->state = DEV_STATE_OFFLINE;
+ cdev->private->flags.recog_done = 1;
+ if (ccw_device_test_sense_data(cdev)) {
+ cdev->private->flags.donotify = 1;
+ ccw_device_online(cdev);
+ wake_up(&cdev->private->wait_q);
+ } else {
+ ccw_device_update_sense_data(cdev);
+ PREPARE_WORK(&cdev->private->kick_work,
+ ccw_device_do_unbind_bind);
+ queue_work(ccw_device_work, &cdev->private->kick_work);
}
- /* Issue device info message. */
- CIO_MSG_EVENT(4, "SenseID : device 0.%x.%04x reports: "
- "CU Type/Mod = %04X/%02X, Dev Type/Mod = "
- "%04X/%02X\n",
- cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno,
- cdev->id.cu_type, cdev->id.cu_model,
- cdev->id.dev_type, cdev->id.dev_model);
- break;
+ return;
case DEV_STATE_BOXED:
CIO_MSG_EVENT(0, "SenseID : boxed device %04x on "
" subchannel 0.%x.%04x\n",
@@ -502,9 +495,6 @@ ccw_device_recognition(struct ccw_device *cdev)
struct subchannel *sch;
int ret;
- if ((cdev->private->state != DEV_STATE_NOT_OPER) &&
- (cdev->private->state != DEV_STATE_BOXED))
- return -EINVAL;
sch = to_subchannel(cdev->dev.parent);
ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
if (ret != 0)
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 151754d5474..2d0efee8a29 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -1,10 +1,8 @@
/*
- * drivers/s390/cio/device_ops.c
+ * Copyright IBM Corp. 2002, 2009
*
- * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
- * IBM Corporation
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- * Cornelia Huck (cornelia.huck@de.ibm.com)
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ * Cornelia Huck (cornelia.huck@de.ibm.com)
*/
#include <linux/module.h>
#include <linux/init.h>
@@ -114,16 +112,17 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
struct subchannel *sch;
int ret;
- if (!cdev)
+ if (!cdev || !cdev->dev.parent)
return -ENODEV;
+ sch = to_subchannel(cdev->dev.parent);
+ if (!sch->schib.pmcw.ena)
+ return -EINVAL;
if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE &&
cdev->private->state != DEV_STATE_W4SENSE)
return -EINVAL;
- sch = to_subchannel(cdev->dev.parent);
- if (!sch)
- return -ENODEV;
+
ret = cio_clear(sch);
if (ret == 0)
cdev->private->intparm = intparm;
@@ -161,11 +160,11 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
struct subchannel *sch;
int ret;
- if (!cdev)
+ if (!cdev || !cdev->dev.parent)
return -ENODEV;
sch = to_subchannel(cdev->dev.parent);
- if (!sch)
- return -ENODEV;
+ if (!sch->schib.pmcw.ena)
+ return -EINVAL;
if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV;
if (cdev->private->state == DEV_STATE_VERIFY ||
@@ -339,16 +338,17 @@ int ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
struct subchannel *sch;
int ret;
- if (!cdev)
+ if (!cdev || !cdev->dev.parent)
return -ENODEV;
+ sch = to_subchannel(cdev->dev.parent);
+ if (!sch->schib.pmcw.ena)
+ return -EINVAL;
if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE &&
cdev->private->state != DEV_STATE_W4SENSE)
return -EINVAL;
- sch = to_subchannel(cdev->dev.parent);
- if (!sch)
- return -ENODEV;
+
ret = cio_halt(sch);
if (ret == 0)
cdev->private->intparm = intparm;
@@ -372,11 +372,11 @@ int ccw_device_resume(struct ccw_device *cdev)
{
struct subchannel *sch;
- if (!cdev)
+ if (!cdev || !cdev->dev.parent)
return -ENODEV;
sch = to_subchannel(cdev->dev.parent);
- if (!sch)
- return -ENODEV;
+ if (!sch->schib.pmcw.ena)
+ return -EINVAL;
if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE ||
@@ -471,11 +471,11 @@ __u8 ccw_device_get_path_mask(struct ccw_device *cdev)
{
struct subchannel *sch;
- sch = to_subchannel(cdev->dev.parent);
- if (!sch)
+ if (!cdev->dev.parent)
return 0;
- else
- return sch->lpm;
+
+ sch = to_subchannel(cdev->dev.parent);
+ return sch->lpm;
}
/*
@@ -588,6 +588,8 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
int rc;
sch = to_subchannel(cdev->dev.parent);
+ if (!sch->schib.pmcw.ena)
+ return -EINVAL;
if (cdev->private->state != DEV_STATE_ONLINE)
return -EIO;
/* Adjust requested path mask to excluded varied off paths. */
@@ -677,6 +679,8 @@ int ccw_device_tm_intrg(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ if (!sch->schib.pmcw.ena)
+ return -EINVAL;
if (cdev->private->state != DEV_STATE_ONLINE)
return -EIO;
if (!scsw_is_tm(&sch->schib.scsw) ||
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index c4f3e7c9a85..0b8f381bd20 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -107,6 +107,7 @@ struct ccw_device_private {
unsigned int recog_done:1; /* dev. recog. complete */
unsigned int fake_irb:1; /* deliver faked irb */
unsigned int intretry:1; /* retry internal operation */
+ unsigned int resuming:1; /* recognition while resume */
} __attribute__((packed)) flags;
unsigned long intparm; /* user interruption parameter */
struct qdio_irq *qdio_data;
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index accd957454e..d79cf5bf0e6 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -881,42 +881,26 @@ no_handler:
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
}
-static int qdio_establish_check_errors(struct ccw_device *cdev, int cstat,
- int dstat)
+static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
+ int dstat)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
- if (cstat || (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))) {
- DBF_ERROR("EQ:ck con");
- goto error;
- }
+ DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
- if (!(dstat & DEV_STAT_DEV_END)) {
- DBF_ERROR("EQ:no dev");
+ if (cstat)
goto error;
- }
-
- if (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) {
- DBF_ERROR("EQ: bad io");
+ if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
goto error;
- }
- return 0;
+ if (!(dstat & DEV_STAT_DEV_END))
+ goto error;
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
+ return;
+
error:
DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
-
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
- return 1;
-}
-
-static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
- int dstat)
-{
- struct qdio_irq *irq_ptr = cdev->private->qdio_data;
-
- DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
- if (!qdio_establish_check_errors(cdev, cstat, dstat))
- qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
}
/* qdio interrupt handler */
@@ -946,7 +930,6 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
}
}
qdio_irq_check_sense(irq_ptr, irb);
-
cstat = irb->scsw.cmd.cstat;
dstat = irb->scsw.cmd.dstat;
@@ -954,22 +937,19 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
case QDIO_IRQ_STATE_INACTIVE:
qdio_establish_handle_irq(cdev, cstat, dstat);
break;
-
case QDIO_IRQ_STATE_CLEANUP:
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
break;
-
case QDIO_IRQ_STATE_ESTABLISHED:
case QDIO_IRQ_STATE_ACTIVE:
if (cstat & SCHN_STAT_PCI) {
qdio_int_handler_pci(irq_ptr);
return;
}
- if ((cstat & ~SCHN_STAT_PCI) || dstat) {
+ if (cstat || dstat)
qdio_handle_activate_check(cdev, intparm, cstat,
dstat);
- break;
- }
+ break;
default:
WARN_ON(1);
}
@@ -1514,7 +1494,7 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) ||
(count > QDIO_MAX_BUFFERS_PER_Q) ||
- (q_nr > QDIO_MAX_QUEUES_PER_IRQ))
+ (q_nr >= QDIO_MAX_QUEUES_PER_IRQ))
return -EINVAL;
if (!count)
diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
index 136d0f0b1e9..eff943923c6 100644
--- a/drivers/s390/cio/qdio_perf.c
+++ b/drivers/s390/cio/qdio_perf.c
@@ -25,18 +25,6 @@ struct qdio_perf_stats perf_stats;
static struct proc_dir_entry *qdio_perf_pde;
#endif
-inline void qdio_perf_stat_inc(atomic_long_t *count)
-{
- if (qdio_performance_stats)
- atomic_long_inc(count);
-}
-
-inline void qdio_perf_stat_dec(atomic_long_t *count)
-{
- if (qdio_performance_stats)
- atomic_long_dec(count);
-}
-
/*
* procfs functions
*/
diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
index 7821ac4fa51..ff4504ce1e3 100644
--- a/drivers/s390/cio/qdio_perf.h
+++ b/drivers/s390/cio/qdio_perf.h
@@ -9,7 +9,6 @@
#define QDIO_PERF_H
#include <linux/types.h>
-#include <linux/device.h>
#include <asm/atomic.h>
struct qdio_perf_stats {
@@ -50,10 +49,13 @@ struct qdio_perf_stats {
extern struct qdio_perf_stats perf_stats;
extern int qdio_performance_stats;
+static inline void qdio_perf_stat_inc(atomic_long_t *count)
+{
+ if (qdio_performance_stats)
+ atomic_long_inc(count);
+}
+
int qdio_setup_perf_stats(void);
void qdio_remove_perf_stats(void);
-extern void qdio_perf_stat_inc(atomic_long_t *count);
-extern void qdio_perf_stat_dec(atomic_long_t *count);
-
#endif
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index cbc8566fab7..e38e5d306fa 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -173,8 +173,9 @@ static void kvm_notify(struct virtqueue *vq)
* this device and sets it up.
*/
static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
- unsigned index,
- void (*callback)(struct virtqueue *vq))
+ unsigned index,
+ void (*callback)(struct virtqueue *vq),
+ const char *name)
{
struct kvm_device *kdev = to_kvmdev(vdev);
struct kvm_vqconfig *config;
@@ -194,7 +195,7 @@ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
vq = vring_new_virtqueue(config->num, KVM_S390_VIRTIO_RING_ALIGN,
vdev, (void *) config->address,
- kvm_notify, callback);
+ kvm_notify, callback, name);
if (!vq) {
err = -ENOMEM;
goto unmap;
@@ -226,6 +227,38 @@ static void kvm_del_vq(struct virtqueue *vq)
KVM_S390_VIRTIO_RING_ALIGN));
}
+static void kvm_del_vqs(struct virtio_device *vdev)
+{
+ struct virtqueue *vq, *n;
+
+ list_for_each_entry_safe(vq, n, &vdev->vqs, list)
+ kvm_del_vq(vq);
+}
+
+static int kvm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+ struct virtqueue *vqs[],
+ vq_callback_t *callbacks[],
+ const char *names[])
+{
+ struct kvm_device *kdev = to_kvmdev(vdev);
+ int i;
+
+ /* We must have this many virtqueues. */
+ if (nvqs > kdev->desc->num_vq)
+ return -ENOENT;
+
+ for (i = 0; i < nvqs; ++i) {
+ vqs[i] = kvm_find_vq(vdev, i, callbacks[i], names[i]);
+ if (IS_ERR(vqs[i]))
+ goto error;
+ }
+ return 0;
+
+error:
+ kvm_del_vqs(vdev);
+ return PTR_ERR(vqs[i]);
+}
+
/*
* The config ops structure as defined by virtio config
*/
@@ -237,8 +270,8 @@ static struct virtio_config_ops kvm_vq_configspace_ops = {
.get_status = kvm_get_status,
.set_status = kvm_set_status,
.reset = kvm_reset,
- .find_vq = kvm_find_vq,
- .del_vq = kvm_del_vq,
+ .find_vqs = kvm_find_vqs,
+ .del_vqs = kvm_del_vqs,
};
/*
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index a7745c82b4a..cb909a5b504 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -8,7 +8,7 @@ config LCS
Select this option if you want to use LCS networking on IBM System z.
This device driver supports Token Ring (IEEE 802.5),
FDDI (IEEE 802.7) and Ethernet.
- To compile as a module, choose M. The module name is lcs.ko.
+ To compile as a module, choose M. The module name is lcs.
If you do not know what it is, it's safe to choose Y.
config CTCM
@@ -21,7 +21,7 @@ config CTCM
It also supports virtual CTCs when running under VM.
This driver also supports channel-to-channel MPC SNA devices.
MPC is an SNA protocol device used by Communication Server for Linux.
- To compile as a module, choose M. The module name is ctcm.ko.
+ To compile as a module, choose M. The module name is ctcm.
To compile into the kernel, choose Y.
If you do not need any channel-to-channel connection, choose N.
@@ -34,7 +34,7 @@ config NETIUCV
link between VM guests. Using ifconfig a point-to-point connection
can be established to the Linux on IBM System z
running on the other VM guest. To compile as a module, choose M.
- The module name is netiucv.ko. If unsure, choose Y.
+ The module name is netiucv. If unsure, choose Y.
config SMSGIUCV
tristate "IUCV special message support (VM only)"
@@ -50,7 +50,7 @@ config CLAW
This driver supports channel attached CLAW devices.
CLAW is Common Link Access for Workstation. Common devices
that use CLAW are RS/6000s, Cisco Routers (CIP) and 3172 devices.
- To compile as a module, choose M. The module name is claw.ko.
+ To compile as a module, choose M. The module name is claw.
To compile into the kernel, choose Y.
config QETH
@@ -65,14 +65,14 @@ config QETH
<http://www.ibm.com/developerworks/linux/linux390>
To compile this driver as a module, choose M.
- The module name is qeth.ko.
+ The module name is qeth.
config QETH_L2
tristate "qeth layer 2 device support"
depends on QETH
help
Select this option to be able to run qeth devices in layer 2 mode.
- To compile as a module, choose M. The module name is qeth_l2.ko.
+ To compile as a module, choose M. The module name is qeth_l2.
If unsure, choose y.
config QETH_L3
@@ -80,7 +80,7 @@ config QETH_L3
depends on QETH
help
Select this option to be able to run qeth devices in layer 3 mode.
- To compile as a module choose M. The module name is qeth_l3.ko.
+ To compile as a module choose M. The module name is qeth_l3.
If unsure, choose Y.
config QETH_IPV6
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 30a43cc79e7..f370f8d460a 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -3,12 +3,12 @@
* ESCON CLAW network driver
*
* Linux for zSeries version
- * Copyright (C) 2002,2005 IBM Corporation
+ * Copyright IBM Corp. 2002, 2009
* Author(s) Original code written by:
- * Kazuo Iimura (iimura@jp.ibm.com)
+ * Kazuo Iimura <iimura@jp.ibm.com>
* Rewritten by
- * Andy Richter (richtera@us.ibm.com)
- * Marc Price (mwprice@us.ibm.com)
+ * Andy Richter <richtera@us.ibm.com>
+ * Marc Price <mwprice@us.ibm.com>
*
* sysfs parms:
* group x.x.rrrr,x.x.wwww
@@ -253,6 +253,11 @@ static void claw_free_wrt_buf(struct net_device *dev);
/* Functions for unpack reads */
static void unpack_read(struct net_device *dev);
+static int claw_pm_prepare(struct ccwgroup_device *gdev)
+{
+ return -EPERM;
+}
+
/* ccwgroup table */
static struct ccwgroup_driver claw_group_driver = {
@@ -264,6 +269,7 @@ static struct ccwgroup_driver claw_group_driver = {
.remove = claw_remove_device,
.set_online = claw_new_device,
.set_offline = claw_shutdown_device,
+ .prepare = claw_pm_prepare,
};
/*
@@ -284,7 +290,7 @@ claw_probe(struct ccwgroup_device *cgdev)
if (!get_device(&cgdev->dev))
return -ENODEV;
privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL);
- cgdev->dev.driver_data = privptr;
+ dev_set_drvdata(&cgdev->dev, privptr);
if (privptr == NULL) {
probe_error(cgdev);
put_device(&cgdev->dev);
@@ -338,12 +344,6 @@ claw_tx(struct sk_buff *skb, struct net_device *dev)
CLAW_DBF_TEXT(4, trace, "claw_tx");
p_ch=&privptr->channel[WRITE];
- if (skb == NULL) {
- privptr->stats.tx_dropped++;
- privptr->stats.tx_errors++;
- CLAW_DBF_TEXT_(2, trace, "clawtx%d", -EIO);
- return -EIO;
- }
spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
rc=claw_hw_tx( skb, dev, 1 );
spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
@@ -597,14 +597,14 @@ claw_irq_handler(struct ccw_device *cdev,
CLAW_DBF_TEXT(4, trace, "clawirq");
/* Bypass all 'unsolicited interrupts' */
- if (!cdev->dev.driver_data) {
+ privptr = dev_get_drvdata(&cdev->dev);
+ if (!privptr) {
dev_warn(&cdev->dev, "An uninitialized CLAW device received an"
" IRQ, c-%02x d-%02x\n",
irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
CLAW_DBF_TEXT(2, trace, "badirq");
return;
}
- privptr = (struct claw_privbk *)cdev->dev.driver_data;
/* Try to extract channel from driver data. */
if (privptr->channel[READ].cdev == cdev)
@@ -1986,9 +1986,9 @@ probe_error( struct ccwgroup_device *cgdev)
struct claw_privbk *privptr;
CLAW_DBF_TEXT(4, trace, "proberr");
- privptr = (struct claw_privbk *) cgdev->dev.driver_data;
+ privptr = dev_get_drvdata(&cgdev->dev);
if (privptr != NULL) {
- cgdev->dev.driver_data = NULL;
+ dev_set_drvdata(&cgdev->dev, NULL);
kfree(privptr->p_env);
kfree(privptr->p_mtc_envelope);
kfree(privptr);
@@ -2917,9 +2917,9 @@ claw_new_device(struct ccwgroup_device *cgdev)
dev_info(&cgdev->dev, "add for %s\n",
dev_name(&cgdev->cdev[READ]->dev));
CLAW_DBF_TEXT(2, setup, "new_dev");
- privptr = cgdev->dev.driver_data;
- cgdev->cdev[READ]->dev.driver_data = privptr;
- cgdev->cdev[WRITE]->dev.driver_data = privptr;
+ privptr = dev_get_drvdata(&cgdev->dev);
+ dev_set_drvdata(&cgdev->cdev[READ]->dev, privptr);
+ dev_set_drvdata(&cgdev->cdev[WRITE]->dev, privptr);
if (!privptr)
return -ENODEV;
p_env = privptr->p_env;
@@ -2956,9 +2956,9 @@ claw_new_device(struct ccwgroup_device *cgdev)
goto out;
}
dev->ml_priv = privptr;
- cgdev->dev.driver_data = privptr;
- cgdev->cdev[READ]->dev.driver_data = privptr;
- cgdev->cdev[WRITE]->dev.driver_data = privptr;
+ dev_set_drvdata(&cgdev->dev, privptr);
+ dev_set_drvdata(&cgdev->cdev[READ]->dev, privptr);
+ dev_set_drvdata(&cgdev->cdev[WRITE]->dev, privptr);
/* sysfs magic */
SET_NETDEV_DEV(dev, &cgdev->dev);
if (register_netdev(dev) != 0) {
@@ -3024,7 +3024,7 @@ claw_shutdown_device(struct ccwgroup_device *cgdev)
int ret;
CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
- priv = cgdev->dev.driver_data;
+ priv = dev_get_drvdata(&cgdev->dev);
if (!priv)
return -ENODEV;
ndev = priv->channel[READ].ndev;
@@ -3054,7 +3054,7 @@ claw_remove_device(struct ccwgroup_device *cgdev)
BUG_ON(!cgdev);
CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
- priv = cgdev->dev.driver_data;
+ priv = dev_get_drvdata(&cgdev->dev);
BUG_ON(!priv);
dev_info(&cgdev->dev, " will be removed.\n");
if (cgdev->state == CCWGROUP_ONLINE)
@@ -3069,9 +3069,9 @@ claw_remove_device(struct ccwgroup_device *cgdev)
kfree(priv->channel[1].irb);
priv->channel[1].irb=NULL;
kfree(priv);
- cgdev->dev.driver_data=NULL;
- cgdev->cdev[READ]->dev.driver_data = NULL;
- cgdev->cdev[WRITE]->dev.driver_data = NULL;
+ dev_set_drvdata(&cgdev->dev, NULL);
+ dev_set_drvdata(&cgdev->cdev[READ]->dev, NULL);
+ dev_set_drvdata(&cgdev->cdev[WRITE]->dev, NULL);
put_device(&cgdev->dev);
return;
@@ -3087,7 +3087,7 @@ claw_hname_show(struct device *dev, struct device_attribute *attr, char *buf)
struct claw_privbk *priv;
struct claw_env * p_env;
- priv = dev->driver_data;
+ priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
p_env = priv->p_env;
@@ -3101,7 +3101,7 @@ claw_hname_write(struct device *dev, struct device_attribute *attr,
struct claw_privbk *priv;
struct claw_env * p_env;
- priv = dev->driver_data;
+ priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
p_env = priv->p_env;
@@ -3125,7 +3125,7 @@ claw_adname_show(struct device *dev, struct device_attribute *attr, char *buf)
struct claw_privbk *priv;
struct claw_env * p_env;
- priv = dev->driver_data;
+ priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
p_env = priv->p_env;
@@ -3139,7 +3139,7 @@ claw_adname_write(struct device *dev, struct device_attribute *attr,
struct claw_privbk *priv;
struct claw_env * p_env;
- priv = dev->driver_data;
+ priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
p_env = priv->p_env;
@@ -3163,7 +3163,7 @@ claw_apname_show(struct device *dev, struct device_attribute *attr, char *buf)
struct claw_privbk *priv;
struct claw_env * p_env;
- priv = dev->driver_data;
+ priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
p_env = priv->p_env;
@@ -3178,7 +3178,7 @@ claw_apname_write(struct device *dev, struct device_attribute *attr,
struct claw_privbk *priv;
struct claw_env * p_env;
- priv = dev->driver_data;
+ priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
p_env = priv->p_env;
@@ -3212,7 +3212,7 @@ claw_wbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
struct claw_privbk *priv;
struct claw_env * p_env;
- priv = dev->driver_data;
+ priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
p_env = priv->p_env;
@@ -3227,7 +3227,7 @@ claw_wbuff_write(struct device *dev, struct device_attribute *attr,
struct claw_env * p_env;
int nnn,max;
- priv = dev->driver_data;
+ priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
p_env = priv->p_env;
@@ -3254,7 +3254,7 @@ claw_rbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
struct claw_privbk *priv;
struct claw_env * p_env;
- priv = dev->driver_data;
+ priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
p_env = priv->p_env;
@@ -3269,7 +3269,7 @@ claw_rbuff_write(struct device *dev, struct device_attribute *attr,
struct claw_env *p_env;
int nnn,max;
- priv = dev->driver_data;
+ priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
p_env = priv->p_env;
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 77f4033a0f4..222e4739443 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1,7 +1,7 @@
/*
* drivers/s390/net/ctcm_main.c
*
- * Copyright IBM Corp. 2001, 2007
+ * Copyright IBM Corp. 2001, 2009
* Author(s):
* Original CTC driver(s):
* Fritz Elfert (felfert@millenux.com)
@@ -1677,10 +1677,8 @@ static void ctcm_remove_device(struct ccwgroup_device *cgdev)
BUG_ON(priv == NULL);
CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
- "removing device %s, r/w = %s/%s, proto : %d",
- priv->channel[READ]->netdev->name,
- priv->channel[READ]->id, priv->channel[WRITE]->id,
- priv->protocol);
+ "removing device %p, proto : %d",
+ cgdev, priv->protocol);
if (cgdev->state == CCWGROUP_ONLINE)
ctcm_shutdown_device(cgdev);
@@ -1690,6 +1688,38 @@ static void ctcm_remove_device(struct ccwgroup_device *cgdev)
put_device(&cgdev->dev);
}
+static int ctcm_pm_suspend(struct ccwgroup_device *gdev)
+{
+ struct ctcm_priv *priv = dev_get_drvdata(&gdev->dev);
+
+ if (gdev->state == CCWGROUP_OFFLINE)
+ return 0;
+ netif_device_detach(priv->channel[READ]->netdev);
+ ctcm_close(priv->channel[READ]->netdev);
+ ccw_device_set_offline(gdev->cdev[1]);
+ ccw_device_set_offline(gdev->cdev[0]);
+ return 0;
+}
+
+static int ctcm_pm_resume(struct ccwgroup_device *gdev)
+{
+ struct ctcm_priv *priv = dev_get_drvdata(&gdev->dev);
+ int rc;
+
+ if (gdev->state == CCWGROUP_OFFLINE)
+ return 0;
+ rc = ccw_device_set_online(gdev->cdev[1]);
+ if (rc)
+ goto err_out;
+ rc = ccw_device_set_online(gdev->cdev[0]);
+ if (rc)
+ goto err_out;
+ ctcm_open(priv->channel[READ]->netdev);
+err_out:
+ netif_device_attach(priv->channel[READ]->netdev);
+ return rc;
+}
+
static struct ccwgroup_driver ctcm_group_driver = {
.owner = THIS_MODULE,
.name = CTC_DRIVER_NAME,
@@ -1699,6 +1729,9 @@ static struct ccwgroup_driver ctcm_group_driver = {
.remove = ctcm_remove_device,
.set_online = ctcm_new_device,
.set_offline = ctcm_shutdown_device,
+ .freeze = ctcm_pm_suspend,
+ .thaw = ctcm_pm_resume,
+ .restore = ctcm_pm_resume,
};
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index a45bc24eb5f..8c675905448 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1,15 +1,12 @@
/*
- * linux/drivers/s390/net/lcs.c
- *
* Linux for S/390 Lan Channel Station Network Driver
*
- * Copyright (C) 1999-2001 IBM Deutschland Entwicklung GmbH,
- * IBM Corporation
- * Author(s): Original Code written by
- * DJ Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
- * Rewritten by
- * Frank Pavlic (fpavlic@de.ibm.com) and
- * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2009
+ * Author(s): Original Code written by
+ * DJ Barrow <djbarrow@de.ibm.com,barrow_dj@yahoo.com>
+ * Rewritten by
+ * Frank Pavlic <fpavlic@de.ibm.com> and
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -1939,7 +1936,7 @@ lcs_portno_show (struct device *dev, struct device_attribute *attr, char *buf)
{
struct lcs_card *card;
- card = (struct lcs_card *)dev->driver_data;
+ card = dev_get_drvdata(dev);
if (!card)
return 0;
@@ -1956,7 +1953,7 @@ lcs_portno_store (struct device *dev, struct device_attribute *attr, const char
struct lcs_card *card;
int value;
- card = (struct lcs_card *)dev->driver_data;
+ card = dev_get_drvdata(dev);
if (!card)
return 0;
@@ -1990,7 +1987,7 @@ lcs_timeout_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct lcs_card *card;
- card = (struct lcs_card *)dev->driver_data;
+ card = dev_get_drvdata(dev);
return card ? sprintf(buf, "%u\n", card->lancmd_timeout) : 0;
}
@@ -2001,7 +1998,7 @@ lcs_timeout_store (struct device *dev, struct device_attribute *attr, const char
struct lcs_card *card;
int value;
- card = (struct lcs_card *)dev->driver_data;
+ card = dev_get_drvdata(dev);
if (!card)
return 0;
@@ -2020,7 +2017,7 @@ static ssize_t
lcs_dev_recover_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct lcs_card *card = dev->driver_data;
+ struct lcs_card *card = dev_get_drvdata(dev);
char *tmp;
int i;
@@ -2073,7 +2070,7 @@ lcs_probe_device(struct ccwgroup_device *ccwgdev)
put_device(&ccwgdev->dev);
return ret;
}
- ccwgdev->dev.driver_data = card;
+ dev_set_drvdata(&ccwgdev->dev, card);
ccwgdev->cdev[0]->handler = lcs_irq;
ccwgdev->cdev[1]->handler = lcs_irq;
card->gdev = ccwgdev;
@@ -2090,7 +2087,7 @@ lcs_register_netdev(struct ccwgroup_device *ccwgdev)
struct lcs_card *card;
LCS_DBF_TEXT(2, setup, "regnetdv");
- card = (struct lcs_card *)ccwgdev->dev.driver_data;
+ card = dev_get_drvdata(&ccwgdev->dev);
if (card->dev->reg_state != NETREG_UNINITIALIZED)
return 0;
SET_NETDEV_DEV(card->dev, &ccwgdev->dev);
@@ -2123,7 +2120,7 @@ lcs_new_device(struct ccwgroup_device *ccwgdev)
enum lcs_dev_states recover_state;
int rc;
- card = (struct lcs_card *)ccwgdev->dev.driver_data;
+ card = dev_get_drvdata(&ccwgdev->dev);
if (!card)
return -ENODEV;
@@ -2229,7 +2226,7 @@ __lcs_shutdown_device(struct ccwgroup_device *ccwgdev, int recovery_mode)
int ret;
LCS_DBF_TEXT(3, setup, "shtdndev");
- card = (struct lcs_card *)ccwgdev->dev.driver_data;
+ card = dev_get_drvdata(&ccwgdev->dev);
if (!card)
return -ENODEV;
if (recovery_mode == 0) {
@@ -2296,7 +2293,7 @@ lcs_remove_device(struct ccwgroup_device *ccwgdev)
{
struct lcs_card *card;
- card = (struct lcs_card *)ccwgdev->dev.driver_data;
+ card = dev_get_drvdata(&ccwgdev->dev);
if (!card)
return;
@@ -2313,6 +2310,60 @@ lcs_remove_device(struct ccwgroup_device *ccwgdev)
put_device(&ccwgdev->dev);
}
+static int lcs_pm_suspend(struct lcs_card *card)
+{
+ if (card->dev)
+ netif_device_detach(card->dev);
+ lcs_set_allowed_threads(card, 0);
+ lcs_wait_for_threads(card, 0xffffffff);
+ if (card->state != DEV_STATE_DOWN)
+ __lcs_shutdown_device(card->gdev, 1);
+ return 0;
+}
+
+static int lcs_pm_resume(struct lcs_card *card)
+{
+ int rc = 0;
+
+ if (card->state == DEV_STATE_RECOVER)
+ rc = lcs_new_device(card->gdev);
+ if (card->dev)
+ netif_device_attach(card->dev);
+ if (rc) {
+ dev_warn(&card->gdev->dev, "The lcs device driver "
+ "failed to recover the device\n");
+ }
+ return rc;
+}
+
+static int lcs_prepare(struct ccwgroup_device *gdev)
+{
+ return 0;
+}
+
+static void lcs_complete(struct ccwgroup_device *gdev)
+{
+ return;
+}
+
+static int lcs_freeze(struct ccwgroup_device *gdev)
+{
+ struct lcs_card *card = dev_get_drvdata(&gdev->dev);
+ return lcs_pm_suspend(card);
+}
+
+static int lcs_thaw(struct ccwgroup_device *gdev)
+{
+ struct lcs_card *card = dev_get_drvdata(&gdev->dev);
+ return lcs_pm_resume(card);
+}
+
+static int lcs_restore(struct ccwgroup_device *gdev)
+{
+ struct lcs_card *card = dev_get_drvdata(&gdev->dev);
+ return lcs_pm_resume(card);
+}
+
/**
* LCS ccwgroup driver registration
*/
@@ -2325,6 +2376,11 @@ static struct ccwgroup_driver lcs_group_driver = {
.remove = lcs_remove_device,
.set_online = lcs_new_device,
.set_offline = lcs_shutdown_device,
+ .prepare = lcs_prepare,
+ .complete = lcs_complete,
+ .freeze = lcs_freeze,
+ .thaw = lcs_thaw,
+ .restore = lcs_restore,
};
/**
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h
index d58fea52557..6d668642af2 100644
--- a/drivers/s390/net/lcs.h
+++ b/drivers/s390/net/lcs.h
@@ -34,8 +34,8 @@ static inline int lcs_dbf_passes(debug_info_t *dbf_grp, int level)
* sysfs related stuff
*/
#define CARD_FROM_DEV(cdev) \
- (struct lcs_card *) \
- ((struct ccwgroup_device *)cdev->dev.driver_data)->dev.driver_data;
+ (struct lcs_card *) dev_get_drvdata( \
+ &((struct ccwgroup_device *)dev_get_drvdata(&cdev->dev))->dev);
/**
* CCW commands used in this driver
*/
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index be716e45f7a..52574ce797b 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1,11 +1,15 @@
/*
* IUCV network driver
*
- * Copyright 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
+ * Copyright IBM Corp. 2001, 2009
*
- * Sysfs integration and all bugs therein by Cornelia Huck
- * (cornelia.huck@de.ibm.com)
+ * Author(s):
+ * Original netiucv driver:
+ * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
+ * Sysfs integration and all bugs therein:
+ * Cornelia Huck (cornelia.huck@de.ibm.com)
+ * PM functions:
+ * Ursula Braun (ursula.braun@de.ibm.com)
*
* Documentation used:
* the source of the original IUCV driver by:
@@ -149,10 +153,27 @@ PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
#define PRINTK_HEADER " iucv: " /* for debugging */
+/* dummy device to make sure netiucv_pm functions are called */
+static struct device *netiucv_dev;
+
+static int netiucv_pm_prepare(struct device *);
+static void netiucv_pm_complete(struct device *);
+static int netiucv_pm_freeze(struct device *);
+static int netiucv_pm_restore_thaw(struct device *);
+
+static struct dev_pm_ops netiucv_pm_ops = {
+ .prepare = netiucv_pm_prepare,
+ .complete = netiucv_pm_complete,
+ .freeze = netiucv_pm_freeze,
+ .thaw = netiucv_pm_restore_thaw,
+ .restore = netiucv_pm_restore_thaw,
+};
+
static struct device_driver netiucv_driver = {
.owner = THIS_MODULE,
.name = "netiucv",
.bus = &iucv_bus,
+ .pm = &netiucv_pm_ops,
};
static int netiucv_callback_connreq(struct iucv_path *,
@@ -233,6 +254,7 @@ struct netiucv_priv {
fsm_instance *fsm;
struct iucv_connection *conn;
struct device *dev;
+ int pm_state;
};
/**
@@ -1265,6 +1287,72 @@ static int netiucv_close(struct net_device *dev)
return 0;
}
+static int netiucv_pm_prepare(struct device *dev)
+{
+ IUCV_DBF_TEXT(trace, 3, __func__);
+ return 0;
+}
+
+static void netiucv_pm_complete(struct device *dev)
+{
+ IUCV_DBF_TEXT(trace, 3, __func__);
+ return;
+}
+
+/**
+ * netiucv_pm_freeze() - Freeze PM callback
+ * @dev: netiucv device
+ *
+ * close open netiucv interfaces
+ */
+static int netiucv_pm_freeze(struct device *dev)
+{
+ struct netiucv_priv *priv = dev->driver_data;
+ struct net_device *ndev = NULL;
+ int rc = 0;
+
+ IUCV_DBF_TEXT(trace, 3, __func__);
+ if (priv && priv->conn)
+ ndev = priv->conn->netdev;
+ if (!ndev)
+ goto out;
+ netif_device_detach(ndev);
+ priv->pm_state = fsm_getstate(priv->fsm);
+ rc = netiucv_close(ndev);
+out:
+ return rc;
+}
+
+/**
+ * netiucv_pm_restore_thaw() - Thaw and restore PM callback
+ * @dev: netiucv device
+ *
+ * re-open netiucv interfaces closed during freeze
+ */
+static int netiucv_pm_restore_thaw(struct device *dev)
+{
+ struct netiucv_priv *priv = dev->driver_data;
+ struct net_device *ndev = NULL;
+ int rc = 0;
+
+ IUCV_DBF_TEXT(trace, 3, __func__);
+ if (priv && priv->conn)
+ ndev = priv->conn->netdev;
+ if (!ndev)
+ goto out;
+ switch (priv->pm_state) {
+ case DEV_STATE_RUNNING:
+ case DEV_STATE_STARTWAIT:
+ rc = netiucv_open(ndev);
+ break;
+ default:
+ break;
+ }
+ netif_device_attach(ndev);
+out:
+ return rc;
+}
+
/**
* Start transmission of a packet.
* Called from generic network device layer.
@@ -1315,9 +1403,9 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
dev->trans_start = jiffies;
- rc = netiucv_transmit_skb(privptr->conn, skb) != 0;
+ rc = netiucv_transmit_skb(privptr->conn, skb);
netiucv_clear_busy(dev);
- return rc;
+ return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK;
}
/**
@@ -1364,7 +1452,7 @@ static int netiucv_change_mtu(struct net_device * dev, int new_mtu)
static ssize_t user_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct netiucv_priv *priv = dev->driver_data;
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
@@ -1373,7 +1461,7 @@ static ssize_t user_show(struct device *dev, struct device_attribute *attr,
static ssize_t user_write(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct netiucv_priv *priv = dev->driver_data;
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
struct net_device *ndev = priv->conn->netdev;
char *p;
char *tmp;
@@ -1430,7 +1518,8 @@ static DEVICE_ATTR(user, 0644, user_show, user_write);
static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
char *buf)
-{ struct netiucv_priv *priv = dev->driver_data;
+{
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%d\n", priv->conn->max_buffsize);
@@ -1439,7 +1528,7 @@ static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct netiucv_priv *priv = dev->driver_data;
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
struct net_device *ndev = priv->conn->netdev;
char *e;
int bs1;
@@ -1487,7 +1576,7 @@ static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct netiucv_priv *priv = dev->driver_data;
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
@@ -1498,7 +1587,7 @@ static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
static ssize_t conn_fsm_show (struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct netiucv_priv *priv = dev->driver_data;
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
@@ -1509,7 +1598,7 @@ static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
static ssize_t maxmulti_show (struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct netiucv_priv *priv = dev->driver_data;
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
@@ -1519,7 +1608,7 @@ static ssize_t maxmulti_write (struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct netiucv_priv *priv = dev->driver_data;
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 4, __func__);
priv->conn->prof.maxmulti = 0;
@@ -1531,7 +1620,7 @@ static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct netiucv_priv *priv = dev->driver_data;
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
@@ -1540,7 +1629,7 @@ static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct netiucv_priv *priv = dev->driver_data;
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 4, __func__);
priv->conn->prof.maxcqueue = 0;
@@ -1552,7 +1641,7 @@ static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct netiucv_priv *priv = dev->driver_data;
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
@@ -1561,7 +1650,7 @@ static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct netiucv_priv *priv = dev->driver_data;
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 4, __func__);
priv->conn->prof.doios_single = 0;
@@ -1573,7 +1662,7 @@ static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct netiucv_priv *priv = dev->driver_data;
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
@@ -1582,7 +1671,7 @@ static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct netiucv_priv *priv = dev->driver_data;
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
priv->conn->prof.doios_multi = 0;
@@ -1594,7 +1683,7 @@ static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct netiucv_priv *priv = dev->driver_data;
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
@@ -1603,7 +1692,7 @@ static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct netiucv_priv *priv = dev->driver_data;
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 4, __func__);
priv->conn->prof.txlen = 0;
@@ -1615,7 +1704,7 @@ static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct netiucv_priv *priv = dev->driver_data;
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
@@ -1624,7 +1713,7 @@ static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct netiucv_priv *priv = dev->driver_data;
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 4, __func__);
priv->conn->prof.tx_time = 0;
@@ -1636,7 +1725,7 @@ static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct netiucv_priv *priv = dev->driver_data;
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
@@ -1645,7 +1734,7 @@ static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct netiucv_priv *priv = dev->driver_data;
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 4, __func__);
priv->conn->prof.tx_pending = 0;
@@ -1657,7 +1746,7 @@ static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct netiucv_priv *priv = dev->driver_data;
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
@@ -1666,7 +1755,7 @@ static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct netiucv_priv *priv = dev->driver_data;
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 4, __func__);
priv->conn->prof.tx_max_pending = 0;
@@ -1731,7 +1820,6 @@ static int netiucv_register_device(struct net_device *ndev)
struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
int ret;
-
IUCV_DBF_TEXT(trace, 3, __func__);
if (dev) {
@@ -1758,7 +1846,7 @@ static int netiucv_register_device(struct net_device *ndev)
if (ret)
goto out_unreg;
priv->dev = dev;
- dev->driver_data = priv;
+ dev_set_drvdata(dev, priv);
return 0;
out_unreg:
@@ -2100,6 +2188,7 @@ static void __exit netiucv_exit(void)
netiucv_unregister_device(dev);
}
+ device_unregister(netiucv_dev);
driver_unregister(&netiucv_driver);
iucv_unregister(&netiucv_handler, 1);
iucv_unregister_dbf_views();
@@ -2125,10 +2214,25 @@ static int __init netiucv_init(void)
IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
goto out_iucv;
}
-
+ /* establish dummy device */
+ netiucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+ if (!netiucv_dev) {
+ rc = -ENOMEM;
+ goto out_driver;
+ }
+ dev_set_name(netiucv_dev, "netiucv");
+ netiucv_dev->bus = &iucv_bus;
+ netiucv_dev->parent = iucv_root;
+ netiucv_dev->release = (void (*)(struct device *))kfree;
+ netiucv_dev->driver = &netiucv_driver;
+ rc = device_register(netiucv_dev);
+ if (rc)
+ goto out_driver;
netiucv_banner();
return rc;
+out_driver:
+ driver_unregister(&netiucv_driver);
out_iucv:
iucv_unregister(&netiucv_handler, 1);
out_dbf:
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index c827d69b5a9..d53621c4acb 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1,7 +1,7 @@
/*
* drivers/s390/net/qeth_core_main.c
*
- * Copyright IBM Corp. 2007
+ * Copyright IBM Corp. 2007, 2009
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
* Frank Pavlic <fpavlic@de.ibm.com>,
* Thomas Spatzier <tspat@de.ibm.com>,
@@ -952,6 +952,7 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
buf->buffer->element[i].addr = NULL;
buf->buffer->element[i].flags = 0;
}
+ buf->buffer->element[15].flags = 0;
buf->next_element_to_fill = 0;
atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
}
@@ -1140,6 +1141,8 @@ static int qeth_setup_card(struct qeth_card *card)
card->ipato.enabled = 0;
card->ipato.invert4 = 0;
card->ipato.invert6 = 0;
+ if (card->info.type == QETH_CARD_TYPE_IQD)
+ card->options.checksum_type = NO_CHECKSUMMING;
/* init QDIO stuff */
qeth_init_qdio_info(card);
return 0;
@@ -2934,8 +2937,8 @@ int qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
if (card->info.type == QETH_CARD_TYPE_OSN)
return cast_type;
- if (skb->dst && skb->dst->neighbour) {
- cast_type = skb->dst->neighbour->type;
+ if (skb_dst(skb) && skb_dst(skb)->neighbour) {
+ cast_type = skb_dst(skb)->neighbour->type;
if ((cast_type == RTN_BROADCAST) ||
(cast_type == RTN_MULTICAST) ||
(cast_type == RTN_ANYCAST))
@@ -4192,6 +4195,50 @@ static void qeth_core_shutdown(struct ccwgroup_device *gdev)
card->discipline.ccwgdriver->shutdown(gdev);
}
+static int qeth_core_prepare(struct ccwgroup_device *gdev)
+{
+ struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ if (card->discipline.ccwgdriver &&
+ card->discipline.ccwgdriver->prepare)
+ return card->discipline.ccwgdriver->prepare(gdev);
+ return 0;
+}
+
+static void qeth_core_complete(struct ccwgroup_device *gdev)
+{
+ struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ if (card->discipline.ccwgdriver &&
+ card->discipline.ccwgdriver->complete)
+ card->discipline.ccwgdriver->complete(gdev);
+}
+
+static int qeth_core_freeze(struct ccwgroup_device *gdev)
+{
+ struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ if (card->discipline.ccwgdriver &&
+ card->discipline.ccwgdriver->freeze)
+ return card->discipline.ccwgdriver->freeze(gdev);
+ return 0;
+}
+
+static int qeth_core_thaw(struct ccwgroup_device *gdev)
+{
+ struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ if (card->discipline.ccwgdriver &&
+ card->discipline.ccwgdriver->thaw)
+ return card->discipline.ccwgdriver->thaw(gdev);
+ return 0;
+}
+
+static int qeth_core_restore(struct ccwgroup_device *gdev)
+{
+ struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ if (card->discipline.ccwgdriver &&
+ card->discipline.ccwgdriver->restore)
+ return card->discipline.ccwgdriver->restore(gdev);
+ return 0;
+}
+
static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
.owner = THIS_MODULE,
.name = "qeth",
@@ -4201,6 +4248,11 @@ static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
.set_online = qeth_core_set_online,
.set_offline = qeth_core_set_offline,
.shutdown = qeth_core_shutdown,
+ .prepare = qeth_core_prepare,
+ .complete = qeth_core_complete,
+ .freeze = qeth_core_freeze,
+ .thaw = qeth_core_thaw,
+ .restore = qeth_core_restore,
};
static ssize_t
diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c
index 06f4de1f050..ec24901c802 100644
--- a/drivers/s390/net/qeth_core_mpc.c
+++ b/drivers/s390/net/qeth_core_mpc.c
@@ -181,6 +181,8 @@ static struct ipa_rc_msg qeth_ipa_rc_msg[] = {
{IPA_RC_L2_ADDR_TABLE_FULL, "Layer2 address table full"},
{IPA_RC_L2_DUP_LAYER3_MAC, "Duplicate with layer 3 MAC"},
{IPA_RC_L2_GMAC_NOT_FOUND, "GMAC not found"},
+ {IPA_RC_L2_MAC_NOT_AUTH_BY_HYP, "L2 mac not authorized by hypervisor"},
+ {IPA_RC_L2_MAC_NOT_AUTH_BY_ADP, "L2 mac not authorized by adapter"},
{IPA_RC_L2_MAC_NOT_FOUND, "L2 mac address not found"},
{IPA_RC_L2_INVALID_VLAN_ID, "L2 invalid vlan id"},
{IPA_RC_L2_DUP_VLAN_ID, "L2 duplicate vlan id"},
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 18548822e37..eecb2ee62e8 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -168,6 +168,8 @@ enum qeth_ipa_return_codes {
IPA_RC_L2_ADDR_TABLE_FULL = 0x2006,
IPA_RC_L2_DUP_LAYER3_MAC = 0x200a,
IPA_RC_L2_GMAC_NOT_FOUND = 0x200b,
+ IPA_RC_L2_MAC_NOT_AUTH_BY_HYP = 0x200c,
+ IPA_RC_L2_MAC_NOT_AUTH_BY_ADP = 0x200d,
IPA_RC_L2_MAC_NOT_FOUND = 0x2010,
IPA_RC_L2_INVALID_VLAN_ID = 0x2015,
IPA_RC_L2_DUP_VLAN_ID = 0x2016,
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 172031baedc..81d7f268418 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1,7 +1,7 @@
/*
* drivers/s390/net/qeth_l2_main.c
*
- * Copyright IBM Corp. 2007
+ * Copyright IBM Corp. 2007, 2009
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
* Frank Pavlic <fpavlic@de.ibm.com>,
* Thomas Spatzier <tspat@de.ibm.com>,
@@ -19,6 +19,7 @@
#include <linux/etherdevice.h>
#include <linux/mii.h>
#include <linux/ip.h>
+#include <linux/list.h>
#include "qeth_core.h"
@@ -130,7 +131,7 @@ static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card,
cmd = (struct qeth_ipa_cmd *) data;
mac = &cmd->data.setdelmac.mac[0];
/* MAC already registered, needed in couple/uncouple case */
- if (cmd->hdr.return_code == 0x2005) {
+ if (cmd->hdr.return_code == IPA_RC_L2_DUP_MAC) {
QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s \n",
mac, QETH_CARD_IFNAME(card));
cmd->hdr.return_code = 0;
@@ -502,6 +503,30 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card,
if (cmd->hdr.return_code) {
QETH_DBF_TEXT_(TRACE, 2, "L2er%x", cmd->hdr.return_code);
card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
+ switch (cmd->hdr.return_code) {
+ case IPA_RC_L2_DUP_MAC:
+ case IPA_RC_L2_DUP_LAYER3_MAC:
+ dev_warn(&card->gdev->dev,
+ "MAC address "
+ "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
+ "already exists\n",
+ card->dev->dev_addr[0], card->dev->dev_addr[1],
+ card->dev->dev_addr[2], card->dev->dev_addr[3],
+ card->dev->dev_addr[4], card->dev->dev_addr[5]);
+ break;
+ case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
+ case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
+ dev_warn(&card->gdev->dev,
+ "MAC address "
+ "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
+ "is not authorized\n",
+ card->dev->dev_addr[0], card->dev->dev_addr[1],
+ card->dev->dev_addr[2], card->dev->dev_addr[3],
+ card->dev->dev_addr[4], card->dev->dev_addr[5]);
+ break;
+ default:
+ break;
+ }
cmd->hdr.return_code = -EIO;
} else {
card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
@@ -616,6 +641,7 @@ static void qeth_l2_set_multicast_list(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
struct dev_addr_list *dm;
+ struct netdev_hw_addr *ha;
if (card->info.type == QETH_CARD_TYPE_OSN)
return ;
@@ -629,8 +655,8 @@ static void qeth_l2_set_multicast_list(struct net_device *dev)
for (dm = dev->mc_list; dm; dm = dm->next)
qeth_l2_add_mc(card, dm->da_addr, 0);
- for (dm = dev->uc_list; dm; dm = dm->next)
- qeth_l2_add_mc(card, dm->da_addr, 1);
+ list_for_each_entry(ha, &dev->uc_list, list)
+ qeth_l2_add_mc(card, ha->addr, 1);
spin_unlock_bh(&card->mclock);
if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
@@ -839,6 +865,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
{
struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
+ qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (cgdev->state == CCWGROUP_ONLINE) {
@@ -974,8 +1001,9 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
dev_warn(&card->gdev->dev,
"The LAN is offline\n");
card->lan_online = 0;
+ return 0;
}
- return rc;
+ goto out_remove;
} else
card->lan_online = 1;
@@ -1113,12 +1141,62 @@ static void qeth_l2_shutdown(struct ccwgroup_device *gdev)
qeth_clear_qdio_buffers(card);
}
+static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev)
+{
+ struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+
+ if (card->dev)
+ netif_device_detach(card->dev);
+ qeth_set_allowed_threads(card, 0, 1);
+ wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
+ if (gdev->state == CCWGROUP_OFFLINE)
+ return 0;
+ if (card->state == CARD_STATE_UP) {
+ card->use_hard_stop = 1;
+ __qeth_l2_set_offline(card->gdev, 1);
+ } else
+ __qeth_l2_set_offline(card->gdev, 0);
+ return 0;
+}
+
+static int qeth_l2_pm_resume(struct ccwgroup_device *gdev)
+{
+ struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ int rc = 0;
+
+ if (gdev->state == CCWGROUP_OFFLINE)
+ goto out;
+
+ if (card->state == CARD_STATE_RECOVER) {
+ rc = __qeth_l2_set_online(card->gdev, 1);
+ if (rc) {
+ if (card->dev) {
+ rtnl_lock();
+ dev_close(card->dev);
+ rtnl_unlock();
+ }
+ }
+ } else
+ rc = __qeth_l2_set_online(card->gdev, 0);
+out:
+ qeth_set_allowed_threads(card, 0xffffffff, 0);
+ if (card->dev)
+ netif_device_attach(card->dev);
+ if (rc)
+ dev_warn(&card->gdev->dev, "The qeth device driver "
+ "failed to recover an error on the device\n");
+ return rc;
+}
+
struct ccwgroup_driver qeth_l2_ccwgroup_driver = {
.probe = qeth_l2_probe_device,
.remove = qeth_l2_remove_device,
.set_online = qeth_l2_set_online,
.set_offline = qeth_l2_set_offline,
.shutdown = qeth_l2_shutdown,
+ .freeze = qeth_l2_pm_suspend,
+ .thaw = qeth_l2_pm_resume,
+ .restore = qeth_l2_pm_resume,
};
EXPORT_SYMBOL_GPL(qeth_l2_ccwgroup_driver);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 0ba3817cb6a..54872406864 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1,7 +1,7 @@
/*
* drivers/s390/net/qeth_l3_main.c
*
- * Copyright IBM Corp. 2007
+ * Copyright IBM Corp. 2007, 2009
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
* Frank Pavlic <fpavlic@de.ibm.com>,
* Thomas Spatzier <tspat@de.ibm.com>,
@@ -1920,16 +1920,22 @@ static inline __u16 qeth_l3_rebuild_skb(struct qeth_card *card,
hdr->hdr.l3.vlan_id : *((u16 *)&hdr->hdr.l3.dest_addr[12]);
}
- skb->ip_summed = card->options.checksum_type;
- if (card->options.checksum_type == HW_CHECKSUMMING) {
+ switch (card->options.checksum_type) {
+ case SW_CHECKSUMMING:
+ skb->ip_summed = CHECKSUM_NONE;
+ break;
+ case NO_CHECKSUMMING:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ break;
+ case HW_CHECKSUMMING:
if ((hdr->hdr.l3.ext_flags &
- (QETH_HDR_EXT_CSUM_HDR_REQ |
- QETH_HDR_EXT_CSUM_TRANSP_REQ)) ==
- (QETH_HDR_EXT_CSUM_HDR_REQ |
- QETH_HDR_EXT_CSUM_TRANSP_REQ))
+ (QETH_HDR_EXT_CSUM_HDR_REQ |
+ QETH_HDR_EXT_CSUM_TRANSP_REQ)) ==
+ (QETH_HDR_EXT_CSUM_HDR_REQ |
+ QETH_HDR_EXT_CSUM_TRANSP_REQ))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
- skb->ip_summed = SW_CHECKSUMMING;
+ skb->ip_summed = CHECKSUM_NONE;
}
return vlan_id;
@@ -2543,9 +2549,9 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
/* IPv4 */
hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags4(cast_type);
memset(hdr->hdr.l3.dest_addr, 0, 12);
- if ((skb->dst) && (skb->dst->neighbour)) {
+ if ((skb_dst(skb)) && (skb_dst(skb)->neighbour)) {
*((u32 *) (&hdr->hdr.l3.dest_addr[12])) =
- *((u32 *) skb->dst->neighbour->primary_key);
+ *((u32 *) skb_dst(skb)->neighbour->primary_key);
} else {
/* fill in destination address used in ip header */
*((u32 *) (&hdr->hdr.l3.dest_addr[12])) =
@@ -2556,9 +2562,9 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags6(cast_type);
if (card->info.type == QETH_CARD_TYPE_IQD)
hdr->hdr.l3.flags &= ~QETH_HDR_PASSTHRU;
- if ((skb->dst) && (skb->dst->neighbour)) {
+ if ((skb_dst(skb)) && (skb_dst(skb)->neighbour)) {
memcpy(hdr->hdr.l3.dest_addr,
- skb->dst->neighbour->primary_key, 16);
+ skb_dst(skb)->neighbour->primary_key, 16);
} else {
/* fill in destination address used in ip header */
memcpy(hdr->hdr.l3.dest_addr,
@@ -3006,6 +3012,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
card->dev->features |= NETIF_F_HW_VLAN_TX |
NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER;
+ card->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
SET_NETDEV_DEV(card->dev, &card->gdev->dev);
return register_netdev(card->dev);
@@ -3070,6 +3077,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
{
struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
+ qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (cgdev->state == CCWGROUP_ONLINE) {
@@ -3141,8 +3149,9 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
dev_warn(&card->gdev->dev,
"The LAN is offline\n");
card->lan_online = 0;
+ return 0;
}
- return rc;
+ goto out_remove;
} else
card->lan_online = 1;
qeth_set_large_send(card, card->options.large_send);
@@ -3274,12 +3283,62 @@ static void qeth_l3_shutdown(struct ccwgroup_device *gdev)
qeth_clear_qdio_buffers(card);
}
+static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev)
+{
+ struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+
+ if (card->dev)
+ netif_device_detach(card->dev);
+ qeth_set_allowed_threads(card, 0, 1);
+ wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
+ if (gdev->state == CCWGROUP_OFFLINE)
+ return 0;
+ if (card->state == CARD_STATE_UP) {
+ card->use_hard_stop = 1;
+ __qeth_l3_set_offline(card->gdev, 1);
+ } else
+ __qeth_l3_set_offline(card->gdev, 0);
+ return 0;
+}
+
+static int qeth_l3_pm_resume(struct ccwgroup_device *gdev)
+{
+ struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ int rc = 0;
+
+ if (gdev->state == CCWGROUP_OFFLINE)
+ goto out;
+
+ if (card->state == CARD_STATE_RECOVER) {
+ rc = __qeth_l3_set_online(card->gdev, 1);
+ if (rc) {
+ if (card->dev) {
+ rtnl_lock();
+ dev_close(card->dev);
+ rtnl_unlock();
+ }
+ }
+ } else
+ rc = __qeth_l3_set_online(card->gdev, 0);
+out:
+ qeth_set_allowed_threads(card, 0xffffffff, 0);
+ if (card->dev)
+ netif_device_attach(card->dev);
+ if (rc)
+ dev_warn(&card->gdev->dev, "The qeth device driver "
+ "failed to recover an error on the device\n");
+ return rc;
+}
+
struct ccwgroup_driver qeth_l3_ccwgroup_driver = {
.probe = qeth_l3_probe_device,
.remove = qeth_l3_remove_device,
.set_online = qeth_l3_set_online,
.set_offline = qeth_l3_set_offline,
.shutdown = qeth_l3_shutdown,
+ .freeze = qeth_l3_pm_suspend,
+ .thaw = qeth_l3_pm_resume,
+ .restore = qeth_l3_pm_resume,
};
EXPORT_SYMBOL_GPL(qeth_l3_ccwgroup_driver);
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index 164e090c262..e76a320d373 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -1,7 +1,8 @@
/*
* IUCV special message driver
*
- * Copyright 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 2003, 2009
+ *
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* This program is free software; you can redistribute it and/or modify
@@ -40,6 +41,8 @@ MODULE_AUTHOR
MODULE_DESCRIPTION ("Linux for S/390 IUCV special message driver");
static struct iucv_path *smsg_path;
+/* dummy device used as trigger for PM functions */
+static struct device *smsg_dev;
static DEFINE_SPINLOCK(smsg_list_lock);
static LIST_HEAD(smsg_list);
@@ -132,14 +135,51 @@ void smsg_unregister_callback(char *prefix,
kfree(cb);
}
+static int smsg_pm_freeze(struct device *dev)
+{
+#ifdef CONFIG_PM_DEBUG
+ printk(KERN_WARNING "smsg_pm_freeze\n");
+#endif
+ if (smsg_path)
+ iucv_path_sever(smsg_path, NULL);
+ return 0;
+}
+
+static int smsg_pm_restore_thaw(struct device *dev)
+{
+ int rc;
+
+#ifdef CONFIG_PM_DEBUG
+ printk(KERN_WARNING "smsg_pm_restore_thaw\n");
+#endif
+ if (smsg_path) {
+ memset(smsg_path, 0, sizeof(*smsg_path));
+ smsg_path->msglim = 255;
+ smsg_path->flags = 0;
+ rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ",
+ NULL, NULL, NULL);
+ printk(KERN_ERR "iucv_path_connect returned with rc %i\n", rc);
+ }
+ return 0;
+}
+
+static struct dev_pm_ops smsg_pm_ops = {
+ .freeze = smsg_pm_freeze,
+ .thaw = smsg_pm_restore_thaw,
+ .restore = smsg_pm_restore_thaw,
+};
+
static struct device_driver smsg_driver = {
+ .owner = THIS_MODULE,
.name = "SMSGIUCV",
.bus = &iucv_bus,
+ .pm = &smsg_pm_ops,
};
static void __exit smsg_exit(void)
{
cpcmd("SET SMSG IUCV", NULL, 0, NULL);
+ device_unregister(smsg_dev);
iucv_unregister(&smsg_handler, 1);
driver_unregister(&smsg_driver);
}
@@ -166,12 +206,29 @@ static int __init smsg_init(void)
rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ",
NULL, NULL, NULL);
if (rc)
- goto out_free;
+ goto out_free_path;
+ smsg_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+ if (!smsg_dev) {
+ rc = -ENOMEM;
+ goto out_free_path;
+ }
+ dev_set_name(smsg_dev, "smsg_iucv");
+ smsg_dev->bus = &iucv_bus;
+ smsg_dev->parent = iucv_root;
+ smsg_dev->release = (void (*)(struct device *))kfree;
+ smsg_dev->driver = &smsg_driver;
+ rc = device_register(smsg_dev);
+ if (rc)
+ goto out_free_dev;
+
cpcmd("SET SMSG IUCV", NULL, 0, NULL);
return 0;
-out_free:
+out_free_dev:
+ kfree(smsg_dev);
+out_free_path:
iucv_path_free(smsg_path);
+ smsg_path = NULL;
out_register:
iucv_unregister(&smsg_handler, 1);
out_driver:
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 733fe3bf628..d9da5c42ccb 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -11,6 +11,54 @@
#include "zfcp_ext.h"
+#define ZFCP_MODEL_PRIV 0x4
+
+static int zfcp_ccw_suspend(struct ccw_device *cdev)
+
+{
+ struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev);
+
+ down(&zfcp_data.config_sema);
+
+ zfcp_erp_adapter_shutdown(adapter, 0, "ccsusp1", NULL);
+ zfcp_erp_wait(adapter);
+
+ up(&zfcp_data.config_sema);
+
+ return 0;
+}
+
+static int zfcp_ccw_activate(struct ccw_device *cdev)
+
+{
+ struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev);
+
+ zfcp_erp_modify_adapter_status(adapter, "ccresu1", NULL,
+ ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
+ zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
+ "ccresu2", NULL);
+ zfcp_erp_wait(adapter);
+ flush_work(&adapter->scan_work);
+
+ return 0;
+}
+
+static struct ccw_device_id zfcp_ccw_device_id[] = {
+ { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) },
+ { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, ZFCP_MODEL_PRIV) },
+ {},
+};
+MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
+
+/**
+ * zfcp_ccw_priv_sch - check if subchannel is privileged
+ * @adapter: Adapter/Subchannel to check
+ */
+int zfcp_ccw_priv_sch(struct zfcp_adapter *adapter)
+{
+ return adapter->ccw_device->id.dev_model == ZFCP_MODEL_PRIV;
+}
+
/**
* zfcp_ccw_probe - probe function of zfcp driver
* @ccw_device: pointer to belonging ccw device
@@ -176,8 +224,8 @@ static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
"ccnoti4", NULL);
break;
case CIO_BOXED:
- dev_warn(&adapter->ccw_device->dev,
- "The ccw device did not respond in time.\n");
+ dev_warn(&adapter->ccw_device->dev, "The FCP device "
+ "did not respond within the specified time\n");
zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5", NULL);
break;
}
@@ -199,14 +247,6 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev)
up(&zfcp_data.config_sema);
}
-static struct ccw_device_id zfcp_ccw_device_id[] = {
- { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) },
- { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x4) }, /* priv. */
- {},
-};
-
-MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
-
static struct ccw_driver zfcp_ccw_driver = {
.owner = THIS_MODULE,
.name = "zfcp",
@@ -217,6 +257,9 @@ static struct ccw_driver zfcp_ccw_driver = {
.set_offline = zfcp_ccw_set_offline,
.notify = zfcp_ccw_notify,
.shutdown = zfcp_ccw_shutdown,
+ .freeze = zfcp_ccw_suspend,
+ .thaw = zfcp_ccw_activate,
+ .restore = zfcp_ccw_activate,
};
/**
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 0a1a5dd8d01..b99b87ce5a3 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -163,7 +163,7 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
}
response->fsf_command = fsf_req->fsf_command;
- response->fsf_reqid = (unsigned long)fsf_req;
+ response->fsf_reqid = fsf_req->req_id;
response->fsf_seqno = fsf_req->seq_no;
response->fsf_issued = fsf_req->issued;
response->fsf_prot_status = qtcb->prefix.prot_status;
@@ -737,7 +737,7 @@ void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
spin_lock_irqsave(&adapter->san_dbf_lock, flags);
memset(r, 0, sizeof(*r));
strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE);
- r->fsf_reqid = (unsigned long)fsf_req;
+ r->fsf_reqid = fsf_req->req_id;
r->fsf_seqno = fsf_req->seq_no;
r->s_id = fc_host_port_id(adapter->scsi_host);
r->d_id = wka_port->d_id;
@@ -773,7 +773,7 @@ void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
spin_lock_irqsave(&adapter->san_dbf_lock, flags);
memset(r, 0, sizeof(*r));
strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE);
- r->fsf_reqid = (unsigned long)fsf_req;
+ r->fsf_reqid = fsf_req->req_id;
r->fsf_seqno = fsf_req->seq_no;
r->s_id = wka_port->d_id;
r->d_id = fc_host_port_id(adapter->scsi_host);
@@ -803,7 +803,7 @@ static void zfcp_san_dbf_event_els(const char *tag, int level,
spin_lock_irqsave(&adapter->san_dbf_lock, flags);
memset(rec, 0, sizeof(*rec));
strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
- rec->fsf_reqid = (unsigned long)fsf_req;
+ rec->fsf_reqid = fsf_req->req_id;
rec->fsf_seqno = fsf_req->seq_no;
rec->s_id = s_id;
rec->d_id = d_id;
@@ -965,7 +965,7 @@ static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level,
ZFCP_DBF_SCSI_FCP_SNS_INFO);
}
- rec->fsf_reqid = (unsigned long)fsf_req;
+ rec->fsf_reqid = fsf_req->req_id;
rec->fsf_seqno = fsf_req->seq_no;
rec->fsf_issued = fsf_req->issued;
}
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 4c362a9069f..2074d45dbf6 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -47,13 +47,6 @@
/********************* CIO/QDIO SPECIFIC DEFINES *****************************/
-/* Adapter Identification Parameters */
-#define ZFCP_CONTROL_UNIT_TYPE 0x1731
-#define ZFCP_CONTROL_UNIT_MODEL 0x03
-#define ZFCP_DEVICE_TYPE 0x1732
-#define ZFCP_DEVICE_MODEL 0x03
-#define ZFCP_DEVICE_MODEL_PRIV 0x04
-
/* DMQ bug workaround: don't use last SBALE */
#define ZFCP_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index fdc9b4352a6..e50ea465bc2 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -880,6 +880,7 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
zfcp_port_put(port);
return ZFCP_ERP_CONTINUES;
}
+ /* fall through */
case ZFCP_ERP_STEP_NAMESERVER_LOOKUP:
if (!port->d_id)
return ZFCP_ERP_FAILED;
@@ -894,8 +895,13 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
act->step = ZFCP_ERP_STEP_PORT_CLOSING;
return ZFCP_ERP_CONTINUES;
}
- /* fall through otherwise */
}
+ if (port->d_id && !(p_status & ZFCP_STATUS_COMMON_NOESC)) {
+ port->d_id = 0;
+ _zfcp_erp_port_reopen(port, 0, "erpsoc1", NULL);
+ return ZFCP_ERP_EXIT;
+ }
+ /* fall through otherwise */
}
return ZFCP_ERP_FAILED;
}
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 2e31b536548..120a9a1c81f 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -27,6 +27,7 @@ extern int zfcp_sg_setup_table(struct scatterlist *, int);
/* zfcp_ccw.c */
extern int zfcp_ccw_register(void);
+extern int zfcp_ccw_priv_sch(struct zfcp_adapter *);
extern struct zfcp_adapter *zfcp_get_adapter_by_busid(char *);
/* zfcp_cfdc.c */
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 19ae0842047..35493a82d2a 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -116,7 +116,7 @@ static void zfcp_wka_port_put(struct zfcp_wka_port *wka_port)
{
if (atomic_dec_return(&wka_port->refcount) != 0)
return;
- /* wait 10 miliseconds, other reqs might pop in */
+ /* wait 10 milliseconds, other reqs might pop in */
schedule_delayed_work(&wka_port->work, HZ / 100);
}
@@ -150,9 +150,14 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
struct zfcp_port *port;
read_lock_irqsave(&zfcp_data.config_lock, flags);
- list_for_each_entry(port, &fsf_req->adapter->port_list_head, list)
+ list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) {
if ((port->d_id & range) == (elem->nport_did & range))
zfcp_test_link(port);
+ if (!port->d_id)
+ zfcp_erp_port_reopen(port,
+ ZFCP_STATUS_COMMON_ERP_FAILED,
+ "fcrscn1", NULL);
+ }
read_unlock_irqrestore(&zfcp_data.config_lock, flags);
}
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 74dee32afba..e6dae3744e7 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -526,6 +526,7 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
break;
case FSF_TOPO_AL:
fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
+ /* fall through */
default:
dev_err(&adapter->ccw_device->dev,
"Unknown or unsupported arbitrated loop "
@@ -897,6 +898,7 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
switch (fsq->word[0]) {
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
zfcp_test_link(unit->port);
+ /* fall through */
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -993,6 +995,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
break;
case FSF_PORT_HANDLE_NOT_VALID:
zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req);
+ /* fall through */
case FSF_GENERIC_COMMAND_REJECTED:
case FSF_PAYLOAD_SIZE_MISMATCH:
case FSF_REQUEST_SIZE_TOO_LARGE:
@@ -1399,7 +1402,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
struct fsf_plogi *plogi;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
- return;
+ goto out;
switch (header->fsf_status) {
case FSF_PORT_ALREADY_OPEN:
@@ -1461,6 +1464,9 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
}
+
+out:
+ zfcp_port_put(port);
}
/**
@@ -1473,6 +1479,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
struct qdio_buffer_element *sbale;
struct zfcp_adapter *adapter = erp_action->adapter;
struct zfcp_fsf_req *req;
+ struct zfcp_port *port = erp_action->port;
int retval = -EIO;
spin_lock_bh(&adapter->req_q_lock);
@@ -1493,16 +1500,18 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
req->handler = zfcp_fsf_open_port_handler;
- req->qtcb->bottom.support.d_id = erp_action->port->d_id;
- req->data = erp_action->port;
+ req->qtcb->bottom.support.d_id = port->d_id;
+ req->data = port;
req->erp_action = erp_action;
erp_action->fsf_req = req;
+ zfcp_port_get(port);
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
erp_action->fsf_req = NULL;
+ zfcp_port_put(port);
}
out:
spin_unlock_bh(&adapter->req_q_lock);
@@ -1590,8 +1599,10 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
dev_warn(&req->adapter->ccw_device->dev,
"Opening WKA port 0x%x failed\n", wka_port->d_id);
+ /* fall through */
case FSF_ADAPTER_STATUS_AVAILABLE:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ /* fall through */
case FSF_ACCESS_DENIED:
wka_port->status = ZFCP_WKA_PORT_OFFLINE;
break;
@@ -1876,7 +1887,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) &&
(adapter->adapter_features & FSF_FEATURE_LUN_SHARING) &&
- (adapter->ccw_device->id.dev_model != ZFCP_DEVICE_MODEL_PRIV)) {
+ !zfcp_ccw_priv_sch(adapter)) {
exclusive = (bottom->lun_access_info &
FSF_UNIT_ACCESS_EXCLUSIVE);
readwrite = (bottom->lun_access_info &
@@ -2314,7 +2325,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
{
struct zfcp_fsf_req *req;
struct fcp_cmnd_iu *fcp_cmnd_iu;
- unsigned int sbtype;
+ unsigned int sbtype = SBAL_FLAGS0_TYPE_READ;
int real_bytes, retval = -EIO;
struct zfcp_adapter *adapter = unit->port->adapter;
@@ -2356,11 +2367,9 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
switch (scsi_cmnd->sc_data_direction) {
case DMA_NONE:
req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
- sbtype = SBAL_FLAGS0_TYPE_READ;
break;
case DMA_FROM_DEVICE:
req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
- sbtype = SBAL_FLAGS0_TYPE_READ;
fcp_cmnd_iu->rddata = 1;
break;
case DMA_TO_DEVICE:
@@ -2369,8 +2378,6 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
fcp_cmnd_iu->wddata = 1;
break;
case DMA_BIDIRECTIONAL:
- default:
- retval = -EIO;
goto failed_scsi_cmnd;
}
@@ -2394,9 +2401,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
scsi_sglist(scsi_cmnd),
FSF_MAX_SBALS_PER_REQ);
if (unlikely(real_bytes < 0)) {
- if (req->sbal_number < FSF_MAX_SBALS_PER_REQ)
- retval = -EIO;
- else {
+ if (req->sbal_number >= FSF_MAX_SBALS_PER_REQ) {
dev_err(&adapter->ccw_device->dev,
"Oversize data package, unit 0x%016Lx "
"on port 0x%016Lx closed\n",
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index e8fbeaeb5fb..7d0da230eb6 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -12,6 +12,10 @@
#include "zfcp_ext.h"
#include <asm/atomic.h>
+static unsigned int default_depth = 32;
+module_param_named(queue_depth, default_depth, uint, 0600);
+MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices");
+
/* Find start of Sense Information in FCP response unit*/
char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
{
@@ -24,6 +28,12 @@ char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
return fcp_sns_info_ptr;
}
+static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth)
+{
+ scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
+ return sdev->queue_depth;
+}
+
static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
{
struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
@@ -34,7 +44,7 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
{
if (sdp->tagged_supported)
- scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, 32);
+ scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, default_depth);
else
scsi_adjust_queue_depth(sdp, 0, 1);
return 0;
@@ -647,6 +657,7 @@ struct zfcp_data zfcp_data = {
.name = "zfcp",
.module = THIS_MODULE,
.proc_name = "zfcp",
+ .change_queue_depth = zfcp_scsi_change_queue_depth,
.slave_alloc = zfcp_scsi_slave_alloc,
.slave_configure = zfcp_scsi_slave_configure,
.slave_destroy = zfcp_scsi_slave_destroy,
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index a85ad05e854..6d465168468 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -186,31 +186,31 @@ static void jsfd_do_request(struct request_queue *q)
{
struct request *req;
- while ((req = elv_next_request(q)) != NULL) {
+ req = blk_fetch_request(q);
+ while (req) {
struct jsfd_part *jdp = req->rq_disk->private_data;
- unsigned long offset = req->sector << 9;
- size_t len = req->current_nr_sectors << 9;
+ unsigned long offset = blk_rq_pos(req) << 9;
+ size_t len = blk_rq_cur_bytes(req);
+ int err = -EIO;
- if ((offset + len) > jdp->dsize) {
- end_request(req, 0);
- continue;
- }
+ if ((offset + len) > jdp->dsize)
+ goto end;
if (rq_data_dir(req) != READ) {
printk(KERN_ERR "jsfd: write\n");
- end_request(req, 0);
- continue;
+ goto end;
}
if ((jdp->dbase & 0xff000000) != 0x20000000) {
printk(KERN_ERR "jsfd: bad base %x\n", (int)jdp->dbase);
- end_request(req, 0);
- continue;
+ goto end;
}
jsfd_read(req->buffer, jdp->dbase + offset, len);
-
- end_request(req, 1);
+ err = 0;
+ end:
+ if (!__blk_end_request_cur(req, err))
+ req = blk_fetch_request(q);
}
}
diff --git a/drivers/sbus/char/openprom.c b/drivers/sbus/char/openprom.c
index 124f660a038..75ac19b1192 100644
--- a/drivers/sbus/char/openprom.c
+++ b/drivers/sbus/char/openprom.c
@@ -303,7 +303,7 @@ static int openprom_sunos_ioctl(struct inode * inode, struct file * file,
struct device_node *dp)
{
DATA *data = file->private_data;
- struct openpromio *opp;
+ struct openpromio *opp = NULL;
int bufsize, error = 0;
static int cnt;
void __user *argp = (void __user *)arg;
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 8b7983aba8f..36c21b19e5d 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1978,7 +1978,8 @@ static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
{
struct scsi_cmnd *cmd = tw_dev->srb[request_id];
- scsi_dma_unmap(cmd);
+ if (cmd->SCp.phase == TW_PHASE_SGLIST)
+ scsi_dma_unmap(cmd);
} /* End twa_unmap_scsi_data() */
/* scsi_host_template initializer */
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index c03f1d2c9e2..faa0fcfed71 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -6,7 +6,7 @@
Arnaldo Carvalho de Melo <acme@conectiva.com.br>
Brad Strand <linux@3ware.com>
- Copyright (C) 1999-2007 3ware Inc.
+ Copyright (C) 1999-2009 3ware Inc.
Kernel compatiblity By: Andre Hedrick <andre@suse.com>
Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com>
@@ -1294,7 +1294,8 @@ static void tw_unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
{
dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n");
- scsi_dma_unmap(cmd);
+ if (cmd->SCp.phase == TW_PHASE_SGLIST)
+ scsi_dma_unmap(cmd);
} /* End tw_unmap_scsi_data() */
/* This function will reset a device extension */
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
index 8e71e5e122b..a5a2ba2561d 100644
--- a/drivers/scsi/3w-xxxx.h
+++ b/drivers/scsi/3w-xxxx.h
@@ -6,7 +6,7 @@
Arnaldo Carvalho de Melo <acme@conectiva.com.br>
Brad Strand <linux@3ware.com>
- Copyright (C) 1999-2007 3ware Inc.
+ Copyright (C) 1999-2009 3ware Inc.
Kernel compatiblity By: Andre Hedrick <andre@suse.com>
Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com>
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 8ed2990c826..6a19ed9a119 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -191,20 +191,19 @@ config SCSI_ENCLOSURE
it has an enclosure device. Selecting this option will just allow
certain enclosure conditions to be reported and is not required.
-comment "Some SCSI devices (e.g. CD jukebox) support multiple LUNs"
- depends on SCSI
-
config SCSI_MULTI_LUN
bool "Probe all LUNs on each SCSI device"
depends on SCSI
help
- If you have a SCSI device that supports more than one LUN (Logical
- Unit Number), e.g. a CD jukebox, and only one LUN is detected, you
- can say Y here to force the SCSI driver to probe for multiple LUNs.
- A SCSI device with multiple LUNs acts logically like multiple SCSI
- devices. The vast majority of SCSI devices have only one LUN, and
- so most people can say N here. The max_luns boot/module parameter
- allows to override this setting.
+ Some devices support more than one LUN (Logical Unit Number) in order
+ to allow access to several media, e.g. CD jukebox, USB card reader,
+ mobile phone in mass storage mode. This option forces the kernel to
+ probe for all LUNs by default. This setting can be overriden by
+ max_luns boot/module parameter. Note that this option does not affect
+ devices conforming to SCSI-3 or higher as they can explicitely report
+ their number of LUNs. It is safe to say Y here unless you have one of
+ those rare devices which reacts in an unexpected way when probed for
+ multiple LUNs.
config SCSI_CONSTANTS
bool "Verbose SCSI error reporting (kernel size +=12K)"
@@ -355,6 +354,7 @@ config ISCSI_TCP
http://open-iscsi.org
source "drivers/scsi/cxgb3i/Kconfig"
+source "drivers/scsi/bnx2i/Kconfig"
config SGIWD93_SCSI
tristate "SGI WD93C93 SCSI Driver"
@@ -508,6 +508,7 @@ config SCSI_AIC7XXX_OLD
source "drivers/scsi/aic7xxx/Kconfig.aic79xx"
source "drivers/scsi/aic94xx/Kconfig"
+source "drivers/scsi/mvsas/Kconfig"
config SCSI_DPT_I2O
tristate "Adaptec I2O RAID support "
@@ -628,6 +629,17 @@ config FCOE
---help---
Fibre Channel over Ethernet module
+config FCOE_FNIC
+ tristate "Cisco FNIC Driver"
+ depends on PCI && X86
+ select LIBFC
+ help
+ This is support for the Cisco PCI-Express FCoE HBA.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/scsi/scsi.txt>.
+ The module will be called fnic.
+
config SCSI_DMX3191D
tristate "DMX3191D SCSI support"
depends on PCI && SCSI
@@ -1039,16 +1051,6 @@ config SCSI_IZIP_SLOW_CTR
Generally, saying N is fine.
-config SCSI_MVSAS
- tristate "Marvell 88SE6440 SAS/SATA support"
- depends on PCI && SCSI
- select SCSI_SAS_LIBSAS
- help
- This driver supports Marvell SAS/SATA PCI devices.
-
- To compiler this driver as a module, choose M here: the module
- will be called mvsas.
-
config SCSI_NCR53C406A
tristate "NCR53c406a SCSI support"
depends on ISA && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index e7c861ac417..25429ea63d0 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_SCSI_DH) += device_handler/
obj-$(CONFIG_LIBFC) += libfc/
obj-$(CONFIG_LIBFCOE) += fcoe/
obj-$(CONFIG_FCOE) += fcoe/
+obj-$(CONFIG_FCOE_FNIC) += fnic/
obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o
obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o
@@ -125,9 +126,10 @@ obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/
obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/
obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
obj-$(CONFIG_SCSI_STEX) += stex.o
-obj-$(CONFIG_SCSI_MVSAS) += mvsas.o
+obj-$(CONFIG_SCSI_MVSAS) += mvsas/
obj-$(CONFIG_PS3_ROM) += ps3rom.o
obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/
+obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
obj-$(CONFIG_ARM) += arm/
diff --git a/drivers/scsi/NCR_D700.c b/drivers/scsi/NCR_D700.c
index c889d845868..1cdf09a4779 100644
--- a/drivers/scsi/NCR_D700.c
+++ b/drivers/scsi/NCR_D700.c
@@ -224,7 +224,7 @@ NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq,
return ret;
}
-static int
+static irqreturn_t
NCR_D700_intr(int irq, void *data)
{
struct NCR_D700_private *p = (struct NCR_D700_private *)data;
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index ed0e3e55652..538135783aa 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -646,7 +646,7 @@ static int aha1740_probe (struct device *dev)
static __devexit int aha1740_remove (struct device *dev)
{
- struct Scsi_Host *shpnt = dev->driver_data;
+ struct Scsi_Host *shpnt = dev_get_drvdata(dev);
struct aha1740_hostdata *host = HOSTDATA (shpnt);
scsi_remove_host(shpnt);
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
new file mode 100644
index 00000000000..2fceb19eb27
--- /dev/null
+++ b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
@@ -0,0 +1,155 @@
+/* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI
+ *
+ * Copyright (c) 2006 - 2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+#ifndef __57XX_ISCSI_CONSTANTS_H_
+#define __57XX_ISCSI_CONSTANTS_H_
+
+/**
+* This file defines HSI constants for the iSCSI flows
+*/
+
+/* iSCSI request op codes */
+#define ISCSI_OPCODE_CLEANUP_REQUEST (7)
+
+/* iSCSI response/messages op codes */
+#define ISCSI_OPCODE_CLEANUP_RESPONSE (0x27)
+#define ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION (0)
+
+/* iSCSI task types */
+#define ISCSI_TASK_TYPE_READ (0)
+#define ISCSI_TASK_TYPE_WRITE (1)
+#define ISCSI_TASK_TYPE_MPATH (2)
+
+/* initial CQ sequence numbers */
+#define ISCSI_INITIAL_SN (1)
+
+/* KWQ (kernel work queue) layer codes */
+#define ISCSI_KWQE_LAYER_CODE (6)
+
+/* KWQ (kernel work queue) request op codes */
+#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN1 (0)
+#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN2 (1)
+#define ISCSI_KWQE_OPCODE_UPDATE_CONN (2)
+#define ISCSI_KWQE_OPCODE_DESTROY_CONN (3)
+#define ISCSI_KWQE_OPCODE_INIT1 (4)
+#define ISCSI_KWQE_OPCODE_INIT2 (5)
+
+/* KCQ (kernel completion queue) response op codes */
+#define ISCSI_KCQE_OPCODE_OFFLOAD_CONN (0x10)
+#define ISCSI_KCQE_OPCODE_UPDATE_CONN (0x12)
+#define ISCSI_KCQE_OPCODE_DESTROY_CONN (0x13)
+#define ISCSI_KCQE_OPCODE_INIT (0x14)
+#define ISCSI_KCQE_OPCODE_FW_CLEAN_TASK (0x15)
+#define ISCSI_KCQE_OPCODE_TCP_RESET (0x16)
+#define ISCSI_KCQE_OPCODE_TCP_SYN (0x17)
+#define ISCSI_KCQE_OPCODE_TCP_FIN (0X18)
+#define ISCSI_KCQE_OPCODE_TCP_ERROR (0x19)
+#define ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20)
+#define ISCSI_KCQE_OPCODE_ISCSI_ERROR (0x21)
+
+/* KCQ (kernel completion queue) completion status */
+#define ISCSI_KCQE_COMPLETION_STATUS_SUCCESS (0x0)
+#define ISCSI_KCQE_COMPLETION_STATUS_INVALID_OPCODE (0x1)
+#define ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x2)
+#define ISCSI_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE (0x3)
+#define ISCSI_KCQE_COMPLETION_STATUS_NIC_ERROR (0x4)
+
+#define ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR (0x5)
+#define ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR (0x6)
+
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_UNEXPECTED_OPCODE (0xa)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE (0xb)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN (0xc)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT (0xd)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN (0xe)
+
+/* Response */
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN (0xf)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T (0x10)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_IS_ZERO (0x2c)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG (0x2d)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0 (0x11)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1 (0x12)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2 (0x13)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3 (0x14)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4 (0x15)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5 (0x16)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6 (0x17)
+
+/* Data-In */
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN (0x18)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN (0x19)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO (0x1a)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV (0x1b)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN (0x1c)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN (0x1d)
+
+/* R2T */
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF (0x1f)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN (0x20)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN (0x21)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0 (0x22)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1 (0x23)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED (0x24)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV (0x25)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN (0x26)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO (0x27)
+
+/* TMF */
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN (0x28)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN (0x29)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN (0x2a)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP (0x2b)
+
+/* IP/TCP processing errors: */
+#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT (0x40)
+#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS (0x41)
+#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG (0x42)
+#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_MAX_RTRANS (0x43)
+
+/* iSCSI licensing errors */
+/* general iSCSI license not installed */
+#define ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED (0x50)
+/* additional LOM specific iSCSI license not installed */
+#define ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED (0x51)
+
+/* SQ/RQ/CQ DB structure sizes */
+#define ISCSI_SQ_DB_SIZE (16)
+#define ISCSI_RQ_DB_SIZE (16)
+#define ISCSI_CQ_DB_SIZE (80)
+
+#define ISCSI_SQN_TO_NOTIFY_NOT_VALID 0xFFFF
+
+/* Page size codes (for flags field in connection offload request) */
+#define ISCSI_PAGE_SIZE_256 (0)
+#define ISCSI_PAGE_SIZE_512 (1)
+#define ISCSI_PAGE_SIZE_1K (2)
+#define ISCSI_PAGE_SIZE_2K (3)
+#define ISCSI_PAGE_SIZE_4K (4)
+#define ISCSI_PAGE_SIZE_8K (5)
+#define ISCSI_PAGE_SIZE_16K (6)
+#define ISCSI_PAGE_SIZE_32K (7)
+#define ISCSI_PAGE_SIZE_64K (8)
+#define ISCSI_PAGE_SIZE_128K (9)
+#define ISCSI_PAGE_SIZE_256K (10)
+#define ISCSI_PAGE_SIZE_512K (11)
+#define ISCSI_PAGE_SIZE_1M (12)
+#define ISCSI_PAGE_SIZE_2M (13)
+#define ISCSI_PAGE_SIZE_4M (14)
+#define ISCSI_PAGE_SIZE_8M (15)
+
+/* Iscsi PDU related defines */
+#define ISCSI_HEADER_SIZE (48)
+#define ISCSI_DIGEST_SHIFT (2)
+#define ISCSI_DIGEST_SIZE (4)
+
+#define B577XX_ISCSI_CONNECTION_TYPE 3
+
+#endif /*__57XX_ISCSI_CONSTANTS_H_ */
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
new file mode 100644
index 00000000000..36af1afef9b
--- /dev/null
+++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
@@ -0,0 +1,1509 @@
+/* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI.
+ *
+ * Copyright (c) 2006 - 2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+#ifndef __57XX_ISCSI_HSI_LINUX_LE__
+#define __57XX_ISCSI_HSI_LINUX_LE__
+
+/*
+ * iSCSI Async CQE
+ */
+struct bnx2i_async_msg {
+#if defined(__BIG_ENDIAN)
+ u8 op_code;
+ u8 reserved1;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ u8 reserved1;
+ u8 op_code;
+#endif
+ u32 reserved2;
+ u32 exp_cmd_sn;
+ u32 max_cmd_sn;
+ u32 reserved3[2];
+#if defined(__BIG_ENDIAN)
+ u16 reserved5;
+ u8 err_code;
+ u8 reserved4;
+#elif defined(__LITTLE_ENDIAN)
+ u8 reserved4;
+ u8 err_code;
+ u16 reserved5;
+#endif
+ u32 reserved6;
+ u32 lun[2];
+#if defined(__BIG_ENDIAN)
+ u8 async_event;
+ u8 async_vcode;
+ u16 param1;
+#elif defined(__LITTLE_ENDIAN)
+ u16 param1;
+ u8 async_vcode;
+ u8 async_event;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 param2;
+ u16 param3;
+#elif defined(__LITTLE_ENDIAN)
+ u16 param3;
+ u16 param2;
+#endif
+ u32 reserved7[3];
+ u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI Buffer Descriptor (BD)
+ */
+struct iscsi_bd {
+ u32 buffer_addr_hi;
+ u32 buffer_addr_lo;
+#if defined(__BIG_ENDIAN)
+ u16 reserved0;
+ u16 buffer_length;
+#elif defined(__LITTLE_ENDIAN)
+ u16 buffer_length;
+ u16 reserved0;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 reserved3;
+ u16 flags;
+#define ISCSI_BD_RESERVED1 (0x3F<<0)
+#define ISCSI_BD_RESERVED1_SHIFT 0
+#define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6)
+#define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6
+#define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7)
+#define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7
+#define ISCSI_BD_RESERVED2 (0xFF<<8)
+#define ISCSI_BD_RESERVED2_SHIFT 8
+#elif defined(__LITTLE_ENDIAN)
+ u16 flags;
+#define ISCSI_BD_RESERVED1 (0x3F<<0)
+#define ISCSI_BD_RESERVED1_SHIFT 0
+#define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6)
+#define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6
+#define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7)
+#define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7
+#define ISCSI_BD_RESERVED2 (0xFF<<8)
+#define ISCSI_BD_RESERVED2_SHIFT 8
+ u16 reserved3;
+#endif
+};
+
+
+/*
+ * iSCSI Cleanup SQ WQE
+ */
+struct bnx2i_cleanup_request {
+#if defined(__BIG_ENDIAN)
+ u8 op_code;
+ u8 reserved1;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ u8 reserved1;
+ u8 op_code;
+#endif
+ u32 reserved2[3];
+#if defined(__BIG_ENDIAN)
+ u16 reserved3;
+ u16 itt;
+#define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0
+#define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14)
+#define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 itt;
+#define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0
+#define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14)
+#define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14
+ u16 reserved3;
+#endif
+ u32 reserved4[10];
+#if defined(__BIG_ENDIAN)
+ u8 cq_index;
+ u8 reserved6;
+ u16 reserved5;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved5;
+ u8 reserved6;
+ u8 cq_index;
+#endif
+};
+
+
+/*
+ * iSCSI Cleanup CQE
+ */
+struct bnx2i_cleanup_response {
+#if defined(__BIG_ENDIAN)
+ u8 op_code;
+ u8 status;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ u8 status;
+ u8 op_code;
+#endif
+ u32 reserved1[3];
+ u32 reserved2[2];
+#if defined(__BIG_ENDIAN)
+ u16 reserved4;
+ u8 err_code;
+ u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+ u8 reserved3;
+ u8 err_code;
+ u16 reserved4;
+#endif
+ u32 reserved5[7];
+#if defined(__BIG_ENDIAN)
+ u16 reserved6;
+ u16 itt;
+#define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 itt;
+#define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14
+ u16 reserved6;
+#endif
+ u32 cq_req_sn;
+};
+
+
+/*
+ * SCSI read/write SQ WQE
+ */
+struct bnx2i_cmd_request {
+#if defined(__BIG_ENDIAN)
+ u8 op_code;
+ u8 op_attr;
+#define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0)
+#define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0
+#define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3)
+#define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3
+#define ISCSI_CMD_REQUEST_WRITE (0x1<<5)
+#define ISCSI_CMD_REQUEST_WRITE_SHIFT 5
+#define ISCSI_CMD_REQUEST_READ (0x1<<6)
+#define ISCSI_CMD_REQUEST_READ_SHIFT 6
+#define ISCSI_CMD_REQUEST_FINAL (0x1<<7)
+#define ISCSI_CMD_REQUEST_FINAL_SHIFT 7
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ u8 op_attr;
+#define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0)
+#define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0
+#define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3)
+#define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3
+#define ISCSI_CMD_REQUEST_WRITE (0x1<<5)
+#define ISCSI_CMD_REQUEST_WRITE_SHIFT 5
+#define ISCSI_CMD_REQUEST_READ (0x1<<6)
+#define ISCSI_CMD_REQUEST_READ_SHIFT 6
+#define ISCSI_CMD_REQUEST_FINAL (0x1<<7)
+#define ISCSI_CMD_REQUEST_FINAL_SHIFT 7
+ u8 op_code;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 ud_buffer_offset;
+ u16 sd_buffer_offset;
+#elif defined(__LITTLE_ENDIAN)
+ u16 sd_buffer_offset;
+ u16 ud_buffer_offset;
+#endif
+ u32 lun[2];
+#if defined(__BIG_ENDIAN)
+ u16 reserved2;
+ u16 itt;
+#define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_CMD_REQUEST_INDEX_SHIFT 0
+#define ISCSI_CMD_REQUEST_TYPE (0x3<<14)
+#define ISCSI_CMD_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 itt;
+#define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_CMD_REQUEST_INDEX_SHIFT 0
+#define ISCSI_CMD_REQUEST_TYPE (0x3<<14)
+#define ISCSI_CMD_REQUEST_TYPE_SHIFT 14
+ u16 reserved2;
+#endif
+ u32 total_data_transfer_length;
+ u32 cmd_sn;
+ u32 reserved3;
+ u32 cdb[4];
+ u32 zero_fill;
+ u32 bd_list_addr_lo;
+ u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u8 cq_index;
+ u8 sd_start_bd_index;
+ u8 ud_start_bd_index;
+ u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+ u8 num_bds;
+ u8 ud_start_bd_index;
+ u8 sd_start_bd_index;
+ u8 cq_index;
+#endif
+};
+
+
+/*
+ * task statistics for write response
+ */
+struct bnx2i_write_resp_task_stat {
+ u32 num_data_ins;
+};
+
+/*
+ * task statistics for read response
+ */
+struct bnx2i_read_resp_task_stat {
+#if defined(__BIG_ENDIAN)
+ u16 num_data_outs;
+ u16 num_r2ts;
+#elif defined(__LITTLE_ENDIAN)
+ u16 num_r2ts;
+ u16 num_data_outs;
+#endif
+};
+
+/*
+ * task statistics for iSCSI cmd response
+ */
+union bnx2i_cmd_resp_task_stat {
+ struct bnx2i_write_resp_task_stat write_stat;
+ struct bnx2i_read_resp_task_stat read_stat;
+};
+
+/*
+ * SCSI Command CQE
+ */
+struct bnx2i_cmd_response {
+#if defined(__BIG_ENDIAN)
+ u8 op_code;
+ u8 response_flags;
+#define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0)
+#define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0
+#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1)
+#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1
+#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2)
+#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3)
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4)
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4
+#define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5)
+#define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5
+ u8 response;
+ u8 status;
+#elif defined(__LITTLE_ENDIAN)
+ u8 status;
+ u8 response;
+ u8 response_flags;
+#define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0)
+#define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0
+#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1)
+#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1
+#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2)
+#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3)
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4)
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4
+#define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5)
+#define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5
+ u8 op_code;
+#endif
+ u32 data_length;
+ u32 exp_cmd_sn;
+ u32 max_cmd_sn;
+ u32 reserved2;
+ u32 residual_count;
+#if defined(__BIG_ENDIAN)
+ u16 reserved4;
+ u8 err_code;
+ u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+ u8 reserved3;
+ u8 err_code;
+ u16 reserved4;
+#endif
+ u32 reserved5[5];
+ union bnx2i_cmd_resp_task_stat task_stat;
+ u32 reserved6;
+#if defined(__BIG_ENDIAN)
+ u16 reserved7;
+ u16 itt;
+#define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_CMD_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 itt;
+#define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_CMD_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14
+ u16 reserved7;
+#endif
+ u32 cq_req_sn;
+};
+
+
+
+/*
+ * firmware middle-path request SQ WQE
+ */
+struct bnx2i_fw_mp_request {
+#if defined(__BIG_ENDIAN)
+ u8 op_code;
+ u8 op_attr;
+ u16 hdr_opaque1;
+#elif defined(__LITTLE_ENDIAN)
+ u16 hdr_opaque1;
+ u8 op_attr;
+ u8 op_code;
+#endif
+ u32 data_length;
+ u32 hdr_opaque2[2];
+#if defined(__BIG_ENDIAN)
+ u16 reserved0;
+ u16 itt;
+#define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14)
+#define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 itt;
+#define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14)
+#define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14
+ u16 reserved0;
+#endif
+ u32 hdr_opaque3[4];
+ u32 resp_bd_list_addr_lo;
+ u32 resp_bd_list_addr_hi;
+ u32 resp_buffer;
+#define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
+#define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS (0xFF<<24)
+#define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS_SHIFT 24
+#if defined(__BIG_ENDIAN)
+ u16 reserved4;
+ u8 reserved3;
+ u8 flags;
+#define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0)
+#define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1)
+#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1
+#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
+#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
+#define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3)
+#define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3
+#elif defined(__LITTLE_ENDIAN)
+ u8 flags;
+#define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0)
+#define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1)
+#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1
+#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
+#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
+#define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3)
+#define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3
+ u8 reserved3;
+ u16 reserved4;
+#endif
+ u32 bd_list_addr_lo;
+ u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u8 cq_index;
+ u8 reserved6;
+ u8 reserved5;
+ u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+ u8 num_bds;
+ u8 reserved5;
+ u8 reserved6;
+ u8 cq_index;
+#endif
+};
+
+
+/*
+ * firmware response - CQE: used only by firmware
+ */
+struct bnx2i_fw_response {
+ u32 hdr_dword1[2];
+ u32 hdr_exp_cmd_sn;
+ u32 hdr_max_cmd_sn;
+ u32 hdr_ttt;
+ u32 hdr_res_cnt;
+ u32 cqe_flags;
+#define ISCSI_FW_RESPONSE_RESERVED2 (0xFF<<0)
+#define ISCSI_FW_RESPONSE_RESERVED2_SHIFT 0
+#define ISCSI_FW_RESPONSE_ERR_CODE (0xFF<<8)
+#define ISCSI_FW_RESPONSE_ERR_CODE_SHIFT 8
+#define ISCSI_FW_RESPONSE_RESERVED3 (0xFFFF<<16)
+#define ISCSI_FW_RESPONSE_RESERVED3_SHIFT 16
+ u32 stat_sn;
+ u32 hdr_dword2[2];
+ u32 hdr_dword3[2];
+ u32 task_stat;
+ u32 reserved0;
+ u32 hdr_itt;
+ u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI KCQ CQE parameters
+ */
+union iscsi_kcqe_params {
+ u32 reserved0[4];
+};
+
+/*
+ * iSCSI KCQ CQE
+ */
+struct iscsi_kcqe {
+ u32 iscsi_conn_id;
+ u32 completion_status;
+ u32 iscsi_conn_context_id;
+ union iscsi_kcqe_params params;
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define ISCSI_KCQE_RESERVED0 (0xF<<0)
+#define ISCSI_KCQE_RESERVED0_SHIFT 0
+#define ISCSI_KCQE_LAYER_CODE (0x7<<4)
+#define ISCSI_KCQE_LAYER_CODE_SHIFT 4
+#define ISCSI_KCQE_RESERVED1 (0x1<<7)
+#define ISCSI_KCQE_RESERVED1_SHIFT 7
+ u8 op_code;
+ u16 qe_self_seq;
+#elif defined(__LITTLE_ENDIAN)
+ u16 qe_self_seq;
+ u8 op_code;
+ u8 flags;
+#define ISCSI_KCQE_RESERVED0 (0xF<<0)
+#define ISCSI_KCQE_RESERVED0_SHIFT 0
+#define ISCSI_KCQE_LAYER_CODE (0x7<<4)
+#define ISCSI_KCQE_LAYER_CODE_SHIFT 4
+#define ISCSI_KCQE_RESERVED1 (0x1<<7)
+#define ISCSI_KCQE_RESERVED1_SHIFT 7
+#endif
+};
+
+
+
+/*
+ * iSCSI KWQE header
+ */
+struct iscsi_kwqe_header {
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0)
+#define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0
+#define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4)
+#define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4
+#define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7)
+#define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7
+ u8 op_code;
+#elif defined(__LITTLE_ENDIAN)
+ u8 op_code;
+ u8 flags;
+#define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0)
+#define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0
+#define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4)
+#define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4
+#define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7)
+#define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7
+#endif
+};
+
+/*
+ * iSCSI firmware init request 1
+ */
+struct iscsi_kwqe_init1 {
+#if defined(__BIG_ENDIAN)
+ struct iscsi_kwqe_header hdr;
+ u8 reserved0;
+ u8 num_cqs;
+#elif defined(__LITTLE_ENDIAN)
+ u8 num_cqs;
+ u8 reserved0;
+ struct iscsi_kwqe_header hdr;
+#endif
+ u32 dummy_buffer_addr_lo;
+ u32 dummy_buffer_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u16 num_ccells_per_conn;
+ u16 num_tasks_per_conn;
+#elif defined(__LITTLE_ENDIAN)
+ u16 num_tasks_per_conn;
+ u16 num_ccells_per_conn;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 sq_wqes_per_page;
+ u16 sq_num_wqes;
+#elif defined(__LITTLE_ENDIAN)
+ u16 sq_num_wqes;
+ u16 sq_wqes_per_page;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 cq_log_wqes_per_page;
+ u8 flags;
+#define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0)
+#define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0
+#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4)
+#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
+#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
+#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
+#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6)
+#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6
+ u16 cq_num_wqes;
+#elif defined(__LITTLE_ENDIAN)
+ u16 cq_num_wqes;
+ u8 flags;
+#define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0)
+#define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0
+#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4)
+#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
+#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
+#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
+#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6)
+#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6
+ u8 cq_log_wqes_per_page;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 cq_num_pages;
+ u16 sq_num_pages;
+#elif defined(__LITTLE_ENDIAN)
+ u16 sq_num_pages;
+ u16 cq_num_pages;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 rq_buffer_size;
+ u16 rq_num_wqes;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rq_num_wqes;
+ u16 rq_buffer_size;
+#endif
+};
+
+/*
+ * iSCSI firmware init request 2
+ */
+struct iscsi_kwqe_init2 {
+#if defined(__BIG_ENDIAN)
+ struct iscsi_kwqe_header hdr;
+ u16 max_cq_sqn;
+#elif defined(__LITTLE_ENDIAN)
+ u16 max_cq_sqn;
+ struct iscsi_kwqe_header hdr;
+#endif
+ u32 error_bit_map[2];
+ u32 reserved1[5];
+};
+
+/*
+ * Initial iSCSI connection offload request 1
+ */
+struct iscsi_kwqe_conn_offload1 {
+#if defined(__BIG_ENDIAN)
+ struct iscsi_kwqe_header hdr;
+ u16 iscsi_conn_id;
+#elif defined(__LITTLE_ENDIAN)
+ u16 iscsi_conn_id;
+ struct iscsi_kwqe_header hdr;
+#endif
+ u32 sq_page_table_addr_lo;
+ u32 sq_page_table_addr_hi;
+ u32 cq_page_table_addr_lo;
+ u32 cq_page_table_addr_hi;
+ u32 reserved0[3];
+};
+
+/*
+ * iSCSI Page Table Entry (PTE)
+ */
+struct iscsi_pte {
+ u32 hi;
+ u32 lo;
+};
+
+/*
+ * Initial iSCSI connection offload request 2
+ */
+struct iscsi_kwqe_conn_offload2 {
+#if defined(__BIG_ENDIAN)
+ struct iscsi_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct iscsi_kwqe_header hdr;
+#endif
+ u32 rq_page_table_addr_lo;
+ u32 rq_page_table_addr_hi;
+ struct iscsi_pte sq_first_pte;
+ struct iscsi_pte cq_first_pte;
+ u32 num_additional_wqes;
+};
+
+
+/*
+ * Initial iSCSI connection offload request 3
+ */
+struct iscsi_kwqe_conn_offload3 {
+#if defined(__BIG_ENDIAN)
+ struct iscsi_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct iscsi_kwqe_header hdr;
+#endif
+ u32 reserved1;
+ struct iscsi_pte qp_first_pte[3];
+};
+
+
+/*
+ * iSCSI connection update request
+ */
+struct iscsi_kwqe_conn_update {
+#if defined(__BIG_ENDIAN)
+ struct iscsi_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct iscsi_kwqe_header hdr;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 session_error_recovery_level;
+ u8 max_outstanding_r2ts;
+ u8 reserved2;
+ u8 conn_flags;
+#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0)
+#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0
+#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1)
+#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1
+#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2)
+#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2
+#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3)
+#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4)
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4
+#elif defined(__LITTLE_ENDIAN)
+ u8 conn_flags;
+#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0)
+#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0
+#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1)
+#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1
+#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2)
+#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2
+#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3)
+#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4)
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4
+ u8 reserved2;
+ u8 max_outstanding_r2ts;
+ u8 session_error_recovery_level;
+#endif
+ u32 context_id;
+ u32 max_send_pdu_length;
+ u32 max_recv_pdu_length;
+ u32 first_burst_length;
+ u32 max_burst_length;
+ u32 exp_stat_sn;
+};
+
+/*
+ * iSCSI destroy connection request
+ */
+struct iscsi_kwqe_conn_destroy {
+#if defined(__BIG_ENDIAN)
+ struct iscsi_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct iscsi_kwqe_header hdr;
+#endif
+ u32 context_id;
+ u32 reserved1[6];
+};
+
+/*
+ * iSCSI KWQ WQE
+ */
+union iscsi_kwqe {
+ struct iscsi_kwqe_init1 init1;
+ struct iscsi_kwqe_init2 init2;
+ struct iscsi_kwqe_conn_offload1 conn_offload1;
+ struct iscsi_kwqe_conn_offload2 conn_offload2;
+ struct iscsi_kwqe_conn_update conn_update;
+ struct iscsi_kwqe_conn_destroy conn_destroy;
+};
+
+/*
+ * iSCSI Login SQ WQE
+ */
+struct bnx2i_login_request {
+#if defined(__BIG_ENDIAN)
+ u8 op_code;
+ u8 op_attr;
+#define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0)
+#define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2)
+#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2
+#define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4)
+#define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4
+#define ISCSI_LOGIN_REQUEST_CONT (0x1<<6)
+#define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6
+#define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7
+ u8 version_max;
+ u8 version_min;
+#elif defined(__LITTLE_ENDIAN)
+ u8 version_min;
+ u8 version_max;
+ u8 op_attr;
+#define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0)
+#define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2)
+#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2
+#define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4)
+#define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4
+#define ISCSI_LOGIN_REQUEST_CONT (0x1<<6)
+#define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6
+#define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7
+ u8 op_code;
+#endif
+ u32 data_length;
+ u32 isid_lo;
+#if defined(__BIG_ENDIAN)
+ u16 isid_hi;
+ u16 tsih;
+#elif defined(__LITTLE_ENDIAN)
+ u16 tsih;
+ u16 isid_hi;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 reserved2;
+ u16 itt;
+#define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14)
+#define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 itt;
+#define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14)
+#define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14
+ u16 reserved2;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 cid;
+ u16 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved3;
+ u16 cid;
+#endif
+ u32 cmd_sn;
+ u32 exp_stat_sn;
+ u32 reserved4;
+ u32 resp_bd_list_addr_lo;
+ u32 resp_bd_list_addr_hi;
+ u32 resp_buffer;
+#define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
+#define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS (0xFF<<24)
+#define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT 24
+#if defined(__BIG_ENDIAN)
+ u16 reserved8;
+ u8 reserved7;
+ u8 flags;
+#define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0)
+#define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
+#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
+#define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3)
+#define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3
+#elif defined(__LITTLE_ENDIAN)
+ u8 flags;
+#define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0)
+#define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
+#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
+#define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3)
+#define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3
+ u8 reserved7;
+ u16 reserved8;
+#endif
+ u32 bd_list_addr_lo;
+ u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u8 cq_index;
+ u8 reserved10;
+ u8 reserved9;
+ u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+ u8 num_bds;
+ u8 reserved9;
+ u8 reserved10;
+ u8 cq_index;
+#endif
+};
+
+
+/*
+ * iSCSI Login CQE
+ */
+struct bnx2i_login_response {
+#if defined(__BIG_ENDIAN)
+ u8 op_code;
+ u8 response_flags;
+#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0)
+#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2)
+#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2
+#define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4)
+#define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4
+#define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6)
+#define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6
+#define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7
+ u8 version_max;
+ u8 version_active;
+#elif defined(__LITTLE_ENDIAN)
+ u8 version_active;
+ u8 version_max;
+ u8 response_flags;
+#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0)
+#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2)
+#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2
+#define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4)
+#define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4
+#define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6)
+#define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6
+#define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7
+ u8 op_code;
+#endif
+ u32 data_length;
+ u32 exp_cmd_sn;
+ u32 max_cmd_sn;
+ u32 reserved1[2];
+#if defined(__BIG_ENDIAN)
+ u16 reserved3;
+ u8 err_code;
+ u8 reserved2;
+#elif defined(__LITTLE_ENDIAN)
+ u8 reserved2;
+ u8 err_code;
+ u16 reserved3;
+#endif
+ u32 stat_sn;
+ u32 isid_lo;
+#if defined(__BIG_ENDIAN)
+ u16 isid_hi;
+ u16 tsih;
+#elif defined(__LITTLE_ENDIAN)
+ u16 tsih;
+ u16 isid_hi;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 status_class;
+ u8 status_detail;
+ u16 reserved4;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved4;
+ u8 status_detail;
+ u8 status_class;
+#endif
+ u32 reserved5[3];
+#if defined(__BIG_ENDIAN)
+ u16 reserved6;
+ u16 itt;
+#define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 itt;
+#define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14
+ u16 reserved6;
+#endif
+ u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI Logout SQ WQE
+ */
+struct bnx2i_logout_request {
+#if defined(__BIG_ENDIAN)
+ u8 op_code;
+ u8 op_attr;
+#define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0)
+#define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0
+#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ u8 op_attr;
+#define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0)
+#define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0
+#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7
+ u8 op_code;
+#endif
+ u32 data_length;
+ u32 reserved1[2];
+#if defined(__BIG_ENDIAN)
+ u16 reserved2;
+ u16 itt;
+#define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 itt;
+#define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14
+ u16 reserved2;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 cid;
+ u16 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved3;
+ u16 cid;
+#endif
+ u32 cmd_sn;
+ u32 reserved4[5];
+ u32 zero_fill;
+ u32 bd_list_addr_lo;
+ u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u8 cq_index;
+ u8 reserved6;
+ u8 reserved5;
+ u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+ u8 num_bds;
+ u8 reserved5;
+ u8 reserved6;
+ u8 cq_index;
+#endif
+};
+
+
+/*
+ * iSCSI Logout CQE
+ */
+struct bnx2i_logout_response {
+#if defined(__BIG_ENDIAN)
+ u8 op_code;
+ u8 reserved1;
+ u8 response;
+ u8 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u8 reserved0;
+ u8 response;
+ u8 reserved1;
+ u8 op_code;
+#endif
+ u32 reserved2;
+ u32 exp_cmd_sn;
+ u32 max_cmd_sn;
+ u32 reserved3[2];
+#if defined(__BIG_ENDIAN)
+ u16 reserved5;
+ u8 err_code;
+ u8 reserved4;
+#elif defined(__LITTLE_ENDIAN)
+ u8 reserved4;
+ u8 err_code;
+ u16 reserved5;
+#endif
+ u32 reserved6[3];
+#if defined(__BIG_ENDIAN)
+ u16 time_to_wait;
+ u16 time_to_retain;
+#elif defined(__LITTLE_ENDIAN)
+ u16 time_to_retain;
+ u16 time_to_wait;
+#endif
+ u32 reserved7[3];
+#if defined(__BIG_ENDIAN)
+ u16 reserved8;
+ u16 itt;
+#define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 itt;
+#define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14
+ u16 reserved8;
+#endif
+ u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI Nop-In CQE
+ */
+struct bnx2i_nop_in_msg {
+#if defined(__BIG_ENDIAN)
+ u8 op_code;
+ u8 reserved1;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ u8 reserved1;
+ u8 op_code;
+#endif
+ u32 data_length;
+ u32 exp_cmd_sn;
+ u32 max_cmd_sn;
+ u32 ttt;
+ u32 reserved2;
+#if defined(__BIG_ENDIAN)
+ u16 reserved4;
+ u8 err_code;
+ u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+ u8 reserved3;
+ u8 err_code;
+ u16 reserved4;
+#endif
+ u32 reserved5;
+ u32 lun[2];
+ u32 reserved6[4];
+#if defined(__BIG_ENDIAN)
+ u16 reserved7;
+ u16 itt;
+#define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0)
+#define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0
+#define ISCSI_NOP_IN_MSG_TYPE (0x3<<14)
+#define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 itt;
+#define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0)
+#define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0
+#define ISCSI_NOP_IN_MSG_TYPE (0x3<<14)
+#define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14
+ u16 reserved7;
+#endif
+ u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI NOP-OUT SQ WQE
+ */
+struct bnx2i_nop_out_request {
+#if defined(__BIG_ENDIAN)
+ u8 op_code;
+ u8 op_attr;
+#define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ u8 op_attr;
+#define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7
+ u8 op_code;
+#endif
+ u32 data_length;
+ u32 lun[2];
+#if defined(__BIG_ENDIAN)
+ u16 reserved2;
+ u16 itt;
+#define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 itt;
+#define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14
+ u16 reserved2;
+#endif
+ u32 ttt;
+ u32 cmd_sn;
+ u32 reserved3[2];
+ u32 resp_bd_list_addr_lo;
+ u32 resp_bd_list_addr_hi;
+ u32 resp_buffer;
+#define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS (0xFF<<24)
+#define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS_SHIFT 24
+#if defined(__BIG_ENDIAN)
+ u16 reserved7;
+ u8 reserved6;
+ u8 flags;
+#define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1)
+#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1
+#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2)
+#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2
+#elif defined(__LITTLE_ENDIAN)
+ u8 flags;
+#define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1)
+#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1
+#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2)
+#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2
+ u8 reserved6;
+ u16 reserved7;
+#endif
+ u32 bd_list_addr_lo;
+ u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u8 cq_index;
+ u8 reserved9;
+ u8 reserved8;
+ u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+ u8 num_bds;
+ u8 reserved8;
+ u8 reserved9;
+ u8 cq_index;
+#endif
+};
+
+/*
+ * iSCSI Reject CQE
+ */
+struct bnx2i_reject_msg {
+#if defined(__BIG_ENDIAN)
+ u8 op_code;
+ u8 reserved1;
+ u8 reason;
+ u8 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u8 reserved0;
+ u8 reason;
+ u8 reserved1;
+ u8 op_code;
+#endif
+ u32 data_length;
+ u32 exp_cmd_sn;
+ u32 max_cmd_sn;
+ u32 reserved2[2];
+#if defined(__BIG_ENDIAN)
+ u16 reserved4;
+ u8 err_code;
+ u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+ u8 reserved3;
+ u8 err_code;
+ u16 reserved4;
+#endif
+ u32 reserved5[8];
+ u32 cq_req_sn;
+};
+
+/*
+ * bnx2i iSCSI TMF SQ WQE
+ */
+struct bnx2i_tmf_request {
+#if defined(__BIG_ENDIAN)
+ u8 op_code;
+ u8 op_attr;
+#define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0)
+#define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0
+#define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ u8 op_attr;
+#define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0)
+#define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0
+#define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7
+ u8 op_code;
+#endif
+ u32 data_length;
+ u32 lun[2];
+#if defined(__BIG_ENDIAN)
+ u16 reserved1;
+ u16 itt;
+#define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_TMF_REQUEST_INDEX_SHIFT 0
+#define ISCSI_TMF_REQUEST_TYPE (0x3<<14)
+#define ISCSI_TMF_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 itt;
+#define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_TMF_REQUEST_INDEX_SHIFT 0
+#define ISCSI_TMF_REQUEST_TYPE (0x3<<14)
+#define ISCSI_TMF_REQUEST_TYPE_SHIFT 14
+ u16 reserved1;
+#endif
+ u32 ref_itt;
+ u32 cmd_sn;
+ u32 reserved2;
+ u32 ref_cmd_sn;
+ u32 reserved3[3];
+ u32 zero_fill;
+ u32 bd_list_addr_lo;
+ u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u8 cq_index;
+ u8 reserved5;
+ u8 reserved4;
+ u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+ u8 num_bds;
+ u8 reserved4;
+ u8 reserved5;
+ u8 cq_index;
+#endif
+};
+
+/*
+ * iSCSI Text SQ WQE
+ */
+struct bnx2i_text_request {
+#if defined(__BIG_ENDIAN)
+ u8 op_code;
+ u8 op_attr;
+#define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0)
+#define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_TEXT_REQUEST_CONT (0x1<<6)
+#define ISCSI_TEXT_REQUEST_CONT_SHIFT 6
+#define ISCSI_TEXT_REQUEST_FINAL (0x1<<7)
+#define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ u8 op_attr;
+#define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0)
+#define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_TEXT_REQUEST_CONT (0x1<<6)
+#define ISCSI_TEXT_REQUEST_CONT_SHIFT 6
+#define ISCSI_TEXT_REQUEST_FINAL (0x1<<7)
+#define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7
+ u8 op_code;
+#endif
+ u32 data_length;
+ u32 lun[2];
+#if defined(__BIG_ENDIAN)
+ u16 reserved3;
+ u16 itt;
+#define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_TEXT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 itt;
+#define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_TEXT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14
+ u16 reserved3;
+#endif
+ u32 ttt;
+ u32 cmd_sn;
+ u32 reserved4[2];
+ u32 resp_bd_list_addr_lo;
+ u32 resp_bd_list_addr_hi;
+ u32 resp_buffer;
+#define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
+#define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
+#define ISCSI_TEXT_REQUEST_NUM_RESP_BDS (0xFF<<24)
+#define ISCSI_TEXT_REQUEST_NUM_RESP_BDS_SHIFT 24
+ u32 zero_fill;
+ u32 bd_list_addr_lo;
+ u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u8 cq_index;
+ u8 reserved7;
+ u8 reserved6;
+ u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+ u8 num_bds;
+ u8 reserved6;
+ u8 reserved7;
+ u8 cq_index;
+#endif
+};
+
+/*
+ * iSCSI SQ WQE
+ */
+union iscsi_request {
+ struct bnx2i_cmd_request cmd;
+ struct bnx2i_tmf_request tmf;
+ struct bnx2i_nop_out_request nop_out;
+ struct bnx2i_login_request login_req;
+ struct bnx2i_text_request text;
+ struct bnx2i_logout_request logout_req;
+ struct bnx2i_cleanup_request cleanup;
+};
+
+
+/*
+ * iSCSI TMF CQE
+ */
+struct bnx2i_tmf_response {
+#if defined(__BIG_ENDIAN)
+ u8 op_code;
+ u8 reserved1;
+ u8 response;
+ u8 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u8 reserved0;
+ u8 response;
+ u8 reserved1;
+ u8 op_code;
+#endif
+ u32 reserved2;
+ u32 exp_cmd_sn;
+ u32 max_cmd_sn;
+ u32 reserved3[2];
+#if defined(__BIG_ENDIAN)
+ u16 reserved5;
+ u8 err_code;
+ u8 reserved4;
+#elif defined(__LITTLE_ENDIAN)
+ u8 reserved4;
+ u8 err_code;
+ u16 reserved5;
+#endif
+ u32 reserved6[7];
+#if defined(__BIG_ENDIAN)
+ u16 reserved7;
+ u16 itt;
+#define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_TMF_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 itt;
+#define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_TMF_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14
+ u16 reserved7;
+#endif
+ u32 cq_req_sn;
+};
+
+/*
+ * iSCSI Text CQE
+ */
+struct bnx2i_text_response {
+#if defined(__BIG_ENDIAN)
+ u8 op_code;
+ u8 response_flags;
+#define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0)
+#define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_CONT (0x1<<6)
+#define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6
+#define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7)
+#define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ u8 response_flags;
+#define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0)
+#define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_CONT (0x1<<6)
+#define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6
+#define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7)
+#define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7
+ u8 op_code;
+#endif
+ u32 data_length;
+ u32 exp_cmd_sn;
+ u32 max_cmd_sn;
+ u32 ttt;
+ u32 reserved2;
+#if defined(__BIG_ENDIAN)
+ u16 reserved4;
+ u8 err_code;
+ u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+ u8 reserved3;
+ u8 err_code;
+ u16 reserved4;
+#endif
+ u32 reserved5;
+ u32 lun[2];
+ u32 reserved6[4];
+#if defined(__BIG_ENDIAN)
+ u16 reserved7;
+ u16 itt;
+#define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 itt;
+#define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14
+ u16 reserved7;
+#endif
+ u32 cq_req_sn;
+};
+
+/*
+ * iSCSI CQE
+ */
+union iscsi_response {
+ struct bnx2i_cmd_response cmd;
+ struct bnx2i_tmf_response tmf;
+ struct bnx2i_login_response login_resp;
+ struct bnx2i_text_response text;
+ struct bnx2i_logout_response logout_resp;
+ struct bnx2i_cleanup_response cleanup;
+ struct bnx2i_reject_msg reject;
+ struct bnx2i_async_msg async;
+ struct bnx2i_nop_in_msg nop_in;
+};
+
+#endif /* __57XX_ISCSI_HSI_LINUX_LE__ */
diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig
new file mode 100644
index 00000000000..820d428ae83
--- /dev/null
+++ b/drivers/scsi/bnx2i/Kconfig
@@ -0,0 +1,7 @@
+config SCSI_BNX2_ISCSI
+ tristate "Broadcom NetXtreme II iSCSI support"
+ select SCSI_ISCSI_ATTRS
+ select CNIC
+ ---help---
+ This driver supports iSCSI offload for the Broadcom NetXtreme II
+ devices.
diff --git a/drivers/scsi/bnx2i/Makefile b/drivers/scsi/bnx2i/Makefile
new file mode 100644
index 00000000000..b5802bd2e76
--- /dev/null
+++ b/drivers/scsi/bnx2i/Makefile
@@ -0,0 +1,3 @@
+bnx2i-y := bnx2i_init.o bnx2i_hwi.o bnx2i_iscsi.o bnx2i_sysfs.o
+
+obj-$(CONFIG_SCSI_BNX2_ISCSI) += bnx2i.o
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
new file mode 100644
index 00000000000..d7576f28c6e
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -0,0 +1,771 @@
+/* bnx2i.h: Broadcom NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2006 - 2009 Broadcom Corporation
+ * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+
+#ifndef _BNX2I_H_
+#define _BNX2I_H_
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/in.h>
+#include <linux/kfifo.h>
+#include <linux/netdevice.h>
+#include <linux/completion.h>
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi.h>
+#include <scsi/iscsi_proto.h>
+#include <scsi/libiscsi.h>
+#include <scsi/scsi_transport_iscsi.h>
+
+#include "../../net/cnic_if.h"
+#include "57xx_iscsi_hsi.h"
+#include "57xx_iscsi_constants.h"
+
+#define BNX2_ISCSI_DRIVER_NAME "bnx2i"
+
+#define BNX2I_MAX_ADAPTERS 8
+
+#define ISCSI_MAX_CONNS_PER_HBA 128
+#define ISCSI_MAX_SESS_PER_HBA ISCSI_MAX_CONNS_PER_HBA
+#define ISCSI_MAX_CMDS_PER_SESS 128
+
+/* Total active commands across all connections supported by devices */
+#define ISCSI_MAX_CMDS_PER_HBA_5708 (28 * (ISCSI_MAX_CMDS_PER_SESS - 1))
+#define ISCSI_MAX_CMDS_PER_HBA_5709 (128 * (ISCSI_MAX_CMDS_PER_SESS - 1))
+#define ISCSI_MAX_CMDS_PER_HBA_57710 (256 * (ISCSI_MAX_CMDS_PER_SESS - 1))
+
+#define ISCSI_MAX_BDS_PER_CMD 32
+
+#define MAX_PAGES_PER_CTRL_STRUCT_POOL 8
+#define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS 4
+
+/* 5706/08 hardware has limit on maximum buffer size per BD it can handle */
+#define MAX_BD_LENGTH 65535
+#define BD_SPLIT_SIZE 32768
+
+/* min, max & default values for SQ/RQ/CQ size, configurable via' modparam */
+#define BNX2I_SQ_WQES_MIN 16
+#define BNX2I_570X_SQ_WQES_MAX 128
+#define BNX2I_5770X_SQ_WQES_MAX 512
+#define BNX2I_570X_SQ_WQES_DEFAULT 128
+#define BNX2I_5770X_SQ_WQES_DEFAULT 256
+
+#define BNX2I_570X_CQ_WQES_MAX 128
+#define BNX2I_5770X_CQ_WQES_MAX 512
+
+#define BNX2I_RQ_WQES_MIN 16
+#define BNX2I_RQ_WQES_MAX 32
+#define BNX2I_RQ_WQES_DEFAULT 16
+
+/* CCELLs per conn */
+#define BNX2I_CCELLS_MIN 16
+#define BNX2I_CCELLS_MAX 96
+#define BNX2I_CCELLS_DEFAULT 64
+
+#define ITT_INVALID_SIGNATURE 0xFFFF
+
+#define ISCSI_CMD_CLEANUP_TIMEOUT 100
+
+#define BNX2I_CONN_CTX_BUF_SIZE 16384
+
+#define BNX2I_SQ_WQE_SIZE 64
+#define BNX2I_RQ_WQE_SIZE 256
+#define BNX2I_CQE_SIZE 64
+
+#define MB_KERNEL_CTX_SHIFT 8
+#define MB_KERNEL_CTX_SIZE (1 << MB_KERNEL_CTX_SHIFT)
+
+#define CTX_SHIFT 7
+#define GET_CID_NUM(cid_addr) ((cid_addr) >> CTX_SHIFT)
+
+#define CTX_OFFSET 0x10000
+#define MAX_CID_CNT 0x4000
+
+/* 5709 context registers */
+#define BNX2_MQ_CONFIG2 0x00003d00
+#define BNX2_MQ_CONFIG2_CONT_SZ (0x7L<<4)
+#define BNX2_MQ_CONFIG2_FIRST_L4L5 (0x1fL<<8)
+
+/* 57710's BAR2 is mapped to doorbell registers */
+#define BNX2X_DOORBELL_PCI_BAR 2
+#define BNX2X_MAX_CQS 8
+
+#define CNIC_ARM_CQE 1
+#define CNIC_DISARM_CQE 0
+
+#define REG_RD(__hba, offset) \
+ readl(__hba->regview + offset)
+#define REG_WR(__hba, offset, val) \
+ writel(val, __hba->regview + offset)
+
+
+/**
+ * struct generic_pdu_resc - login pdu resource structure
+ *
+ * @req_buf: driver buffer used to stage payload associated with
+ * the login request
+ * @req_dma_addr: dma address for iscsi login request payload buffer
+ * @req_buf_size: actual login request payload length
+ * @req_wr_ptr: pointer into login request buffer when next data is
+ * to be written
+ * @resp_hdr: iscsi header where iscsi login response header is to
+ * be recreated
+ * @resp_buf: buffer to stage login response payload
+ * @resp_dma_addr: login response payload buffer dma address
+ * @resp_buf_size: login response paylod length
+ * @resp_wr_ptr: pointer into login response buffer when next data is
+ * to be written
+ * @req_bd_tbl: iscsi login request payload BD table
+ * @req_bd_dma: login request BD table dma address
+ * @resp_bd_tbl: iscsi login response payload BD table
+ * @resp_bd_dma: login request BD table dma address
+ *
+ * following structure defines buffer info for generic pdus such as iSCSI Login,
+ * Logout and NOP
+ */
+struct generic_pdu_resc {
+ char *req_buf;
+ dma_addr_t req_dma_addr;
+ u32 req_buf_size;
+ char *req_wr_ptr;
+ struct iscsi_hdr resp_hdr;
+ char *resp_buf;
+ dma_addr_t resp_dma_addr;
+ u32 resp_buf_size;
+ char *resp_wr_ptr;
+ char *req_bd_tbl;
+ dma_addr_t req_bd_dma;
+ char *resp_bd_tbl;
+ dma_addr_t resp_bd_dma;
+};
+
+
+/**
+ * struct bd_resc_page - tracks DMA'able memory allocated for BD tables
+ *
+ * @link: list head to link elements
+ * @max_ptrs: maximun pointers that can be stored in this page
+ * @num_valid: number of pointer valid in this page
+ * @page: base addess for page pointer array
+ *
+ * structure to track DMA'able memory allocated for command BD tables
+ */
+struct bd_resc_page {
+ struct list_head link;
+ u32 max_ptrs;
+ u32 num_valid;
+ void *page[1];
+};
+
+
+/**
+ * struct io_bdt - I/O buffer destricptor table
+ *
+ * @bd_tbl: BD table's virtual address
+ * @bd_tbl_dma: BD table's dma address
+ * @bd_valid: num valid BD entries
+ *
+ * IO BD table
+ */
+struct io_bdt {
+ struct iscsi_bd *bd_tbl;
+ dma_addr_t bd_tbl_dma;
+ u16 bd_valid;
+};
+
+
+/**
+ * bnx2i_cmd - iscsi command structure
+ *
+ * @scsi_cmd: SCSI-ML task pointer corresponding to this iscsi cmd
+ * @sg: SG list
+ * @io_tbl: buffer descriptor (BD) table
+ * @bd_tbl_dma: buffer descriptor (BD) table's dma address
+ */
+struct bnx2i_cmd {
+ struct iscsi_hdr hdr;
+ struct bnx2i_conn *conn;
+ struct scsi_cmnd *scsi_cmd;
+ struct scatterlist *sg;
+ struct io_bdt io_tbl;
+ dma_addr_t bd_tbl_dma;
+ struct bnx2i_cmd_request req;
+};
+
+
+/**
+ * struct bnx2i_conn - iscsi connection structure
+ *
+ * @cls_conn: pointer to iscsi cls conn
+ * @hba: adapter structure pointer
+ * @iscsi_conn_cid: iscsi conn id
+ * @fw_cid: firmware iscsi context id
+ * @ep: endpoint structure pointer
+ * @gen_pdu: login/nopout/logout pdu resources
+ * @violation_notified: bit mask used to track iscsi error/warning messages
+ * already printed out
+ *
+ * iSCSI connection structure
+ */
+struct bnx2i_conn {
+ struct iscsi_cls_conn *cls_conn;
+ struct bnx2i_hba *hba;
+ struct completion cmd_cleanup_cmpl;
+ int is_bound;
+
+ u32 iscsi_conn_cid;
+#define BNX2I_CID_RESERVED 0x5AFF
+ u32 fw_cid;
+
+ struct timer_list poll_timer;
+ /*
+ * Queue Pair (QP) related structure elements.
+ */
+ struct bnx2i_endpoint *ep;
+
+ /*
+ * Buffer for login negotiation process
+ */
+ struct generic_pdu_resc gen_pdu;
+ u64 violation_notified;
+};
+
+
+
+/**
+ * struct iscsi_cid_queue - Per adapter iscsi cid queue
+ *
+ * @cid_que_base: queue base memory
+ * @cid_que: queue memory pointer
+ * @cid_q_prod_idx: produce index
+ * @cid_q_cons_idx: consumer index
+ * @cid_q_max_idx: max index. used to detect wrap around condition
+ * @cid_free_cnt: queue size
+ * @conn_cid_tbl: iscsi cid to conn structure mapping table
+ *
+ * Per adapter iSCSI CID Queue
+ */
+struct iscsi_cid_queue {
+ void *cid_que_base;
+ u32 *cid_que;
+ u32 cid_q_prod_idx;
+ u32 cid_q_cons_idx;
+ u32 cid_q_max_idx;
+ u32 cid_free_cnt;
+ struct bnx2i_conn **conn_cid_tbl;
+};
+
+/**
+ * struct bnx2i_hba - bnx2i adapter structure
+ *
+ * @link: list head to link elements
+ * @cnic: pointer to cnic device
+ * @pcidev: pointer to pci dev
+ * @netdev: pointer to netdev structure
+ * @regview: mapped PCI register space
+ * @age: age, incremented by every recovery
+ * @cnic_dev_type: cnic device type, 5706/5708/5709/57710
+ * @mail_queue_access: mailbox queue access mode, applicable to 5709 only
+ * @reg_with_cnic: indicates whether the device is register with CNIC
+ * @adapter_state: adapter state, UP, GOING_DOWN, LINK_DOWN
+ * @mtu_supported: Ethernet MTU supported
+ * @shost: scsi host pointer
+ * @max_sqes: SQ size
+ * @max_rqes: RQ size
+ * @max_cqes: CQ size
+ * @num_ccell: number of command cells per connection
+ * @ofld_conns_active: active connection list
+ * @max_active_conns: max offload connections supported by this device
+ * @cid_que: iscsi cid queue
+ * @ep_rdwr_lock: read / write lock to synchronize various ep lists
+ * @ep_ofld_list: connection list for pending offload completion
+ * @ep_destroy_list: connection list for pending offload completion
+ * @mp_bd_tbl: BD table to be used with middle path requests
+ * @mp_bd_dma: DMA address of 'mp_bd_tbl' memory buffer
+ * @dummy_buffer: Dummy buffer to be used with zero length scsicmd reqs
+ * @dummy_buf_dma: DMA address of 'dummy_buffer' memory buffer
+ * @lock: lock to synchonize access to hba structure
+ * @pci_did: PCI device ID
+ * @pci_vid: PCI vendor ID
+ * @pci_sdid: PCI subsystem device ID
+ * @pci_svid: PCI subsystem vendor ID
+ * @pci_func: PCI function number in system pci tree
+ * @pci_devno: PCI device number in system pci tree
+ * @num_wqe_sent: statistic counter, total wqe's sent
+ * @num_cqe_rcvd: statistic counter, total cqe's received
+ * @num_intr_claimed: statistic counter, total interrupts claimed
+ * @link_changed_count: statistic counter, num of link change notifications
+ * received
+ * @ipaddr_changed_count: statistic counter, num times IP address changed while
+ * at least one connection is offloaded
+ * @num_sess_opened: statistic counter, total num sessions opened
+ * @num_conn_opened: statistic counter, total num conns opened on this hba
+ * @ctx_ccell_tasks: captures number of ccells and tasks supported by
+ * currently offloaded connection, used to decode
+ * context memory
+ *
+ * Adapter Data Structure
+ */
+struct bnx2i_hba {
+ struct list_head link;
+ struct cnic_dev *cnic;
+ struct pci_dev *pcidev;
+ struct net_device *netdev;
+ void __iomem *regview;
+
+ u32 age;
+ unsigned long cnic_dev_type;
+ #define BNX2I_NX2_DEV_5706 0x0
+ #define BNX2I_NX2_DEV_5708 0x1
+ #define BNX2I_NX2_DEV_5709 0x2
+ #define BNX2I_NX2_DEV_57710 0x3
+ u32 mail_queue_access;
+ #define BNX2I_MQ_KERNEL_MODE 0x0
+ #define BNX2I_MQ_KERNEL_BYPASS_MODE 0x1
+ #define BNX2I_MQ_BIN_MODE 0x2
+ unsigned long reg_with_cnic;
+ #define BNX2I_CNIC_REGISTERED 1
+
+ unsigned long adapter_state;
+ #define ADAPTER_STATE_UP 0
+ #define ADAPTER_STATE_GOING_DOWN 1
+ #define ADAPTER_STATE_LINK_DOWN 2
+ #define ADAPTER_STATE_INIT_FAILED 31
+ unsigned int mtu_supported;
+ #define BNX2I_MAX_MTU_SUPPORTED 1500
+
+ struct Scsi_Host *shost;
+
+ u32 max_sqes;
+ u32 max_rqes;
+ u32 max_cqes;
+ u32 num_ccell;
+
+ int ofld_conns_active;
+
+ int max_active_conns;
+ struct iscsi_cid_queue cid_que;
+
+ rwlock_t ep_rdwr_lock;
+ struct list_head ep_ofld_list;
+ struct list_head ep_destroy_list;
+
+ /*
+ * BD table to be used with MP (Middle Path requests.
+ */
+ char *mp_bd_tbl;
+ dma_addr_t mp_bd_dma;
+ char *dummy_buffer;
+ dma_addr_t dummy_buf_dma;
+
+ spinlock_t lock; /* protects hba structure access */
+ struct mutex net_dev_lock;/* sync net device access */
+
+ /*
+ * PCI related info.
+ */
+ u16 pci_did;
+ u16 pci_vid;
+ u16 pci_sdid;
+ u16 pci_svid;
+ u16 pci_func;
+ u16 pci_devno;
+
+ /*
+ * Following are a bunch of statistics useful during development
+ * and later stage for score boarding.
+ */
+ u32 num_wqe_sent;
+ u32 num_cqe_rcvd;
+ u32 num_intr_claimed;
+ u32 link_changed_count;
+ u32 ipaddr_changed_count;
+ u32 num_sess_opened;
+ u32 num_conn_opened;
+ unsigned int ctx_ccell_tasks;
+};
+
+
+/*******************************************************************************
+ * QP [ SQ / RQ / CQ ] info.
+ ******************************************************************************/
+
+/*
+ * SQ/RQ/CQ generic structure definition
+ */
+struct sqe {
+ u8 sqe_byte[BNX2I_SQ_WQE_SIZE];
+};
+
+struct rqe {
+ u8 rqe_byte[BNX2I_RQ_WQE_SIZE];
+};
+
+struct cqe {
+ u8 cqe_byte[BNX2I_CQE_SIZE];
+};
+
+
+enum {
+#if defined(__LITTLE_ENDIAN)
+ CNIC_EVENT_COAL_INDEX = 0x0,
+ CNIC_SEND_DOORBELL = 0x4,
+ CNIC_EVENT_CQ_ARM = 0x7,
+ CNIC_RECV_DOORBELL = 0x8
+#elif defined(__BIG_ENDIAN)
+ CNIC_EVENT_COAL_INDEX = 0x2,
+ CNIC_SEND_DOORBELL = 0x6,
+ CNIC_EVENT_CQ_ARM = 0x4,
+ CNIC_RECV_DOORBELL = 0xa
+#endif
+};
+
+
+/*
+ * CQ DB
+ */
+struct bnx2x_iscsi_cq_pend_cmpl {
+ /* CQ producer, updated by Ustorm */
+ u16 ustrom_prod;
+ /* CQ pending completion counter */
+ u16 pend_cntr;
+};
+
+
+struct bnx2i_5771x_cq_db {
+ struct bnx2x_iscsi_cq_pend_cmpl qp_pend_cmpl[BNX2X_MAX_CQS];
+ /* CQ pending completion ITT array */
+ u16 itt[BNX2X_MAX_CQS];
+ /* Cstorm CQ sequence to notify array, updated by driver */;
+ u16 sqn[BNX2X_MAX_CQS];
+ u32 reserved[4] /* 16 byte allignment */;
+};
+
+
+struct bnx2i_5771x_sq_rq_db {
+ u16 prod_idx;
+ u8 reserved0[14]; /* Pad structure size to 16 bytes */
+};
+
+
+struct bnx2i_5771x_dbell_hdr {
+ u8 header;
+ /* 1 for rx doorbell, 0 for tx doorbell */
+#define B577XX_DOORBELL_HDR_RX (0x1<<0)
+#define B577XX_DOORBELL_HDR_RX_SHIFT 0
+ /* 0 for normal doorbell, 1 for advertise wnd doorbell */
+#define B577XX_DOORBELL_HDR_DB_TYPE (0x1<<1)
+#define B577XX_DOORBELL_HDR_DB_TYPE_SHIFT 1
+ /* rdma tx only: DPM transaction size specifier (64/128/256/512B) */
+#define B577XX_DOORBELL_HDR_DPM_SIZE (0x3<<2)
+#define B577XX_DOORBELL_HDR_DPM_SIZE_SHIFT 2
+ /* connection type */
+#define B577XX_DOORBELL_HDR_CONN_TYPE (0xF<<4)
+#define B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT 4
+};
+
+struct bnx2i_5771x_dbell {
+ struct bnx2i_5771x_dbell_hdr dbell;
+ u8 pad[3];
+
+};
+
+/**
+ * struct qp_info - QP (share queue region) atrributes structure
+ *
+ * @ctx_base: ioremapped pci register base to access doorbell register
+ * pertaining to this offloaded connection
+ * @sq_virt: virtual address of send queue (SQ) region
+ * @sq_phys: DMA address of SQ memory region
+ * @sq_mem_size: SQ size
+ * @sq_prod_qe: SQ producer entry pointer
+ * @sq_cons_qe: SQ consumer entry pointer
+ * @sq_first_qe: virtaul address of first entry in SQ
+ * @sq_last_qe: virtaul address of last entry in SQ
+ * @sq_prod_idx: SQ producer index
+ * @sq_cons_idx: SQ consumer index
+ * @sqe_left: number sq entry left
+ * @sq_pgtbl_virt: page table describing buffer consituting SQ region
+ * @sq_pgtbl_phys: dma address of 'sq_pgtbl_virt'
+ * @sq_pgtbl_size: SQ page table size
+ * @cq_virt: virtual address of completion queue (CQ) region
+ * @cq_phys: DMA address of RQ memory region
+ * @cq_mem_size: CQ size
+ * @cq_prod_qe: CQ producer entry pointer
+ * @cq_cons_qe: CQ consumer entry pointer
+ * @cq_first_qe: virtaul address of first entry in CQ
+ * @cq_last_qe: virtaul address of last entry in CQ
+ * @cq_prod_idx: CQ producer index
+ * @cq_cons_idx: CQ consumer index
+ * @cqe_left: number cq entry left
+ * @cqe_size: size of each CQ entry
+ * @cqe_exp_seq_sn: next expected CQE sequence number
+ * @cq_pgtbl_virt: page table describing buffer consituting CQ region
+ * @cq_pgtbl_phys: dma address of 'cq_pgtbl_virt'
+ * @cq_pgtbl_size: CQ page table size
+ * @rq_virt: virtual address of receive queue (RQ) region
+ * @rq_phys: DMA address of RQ memory region
+ * @rq_mem_size: RQ size
+ * @rq_prod_qe: RQ producer entry pointer
+ * @rq_cons_qe: RQ consumer entry pointer
+ * @rq_first_qe: virtaul address of first entry in RQ
+ * @rq_last_qe: virtaul address of last entry in RQ
+ * @rq_prod_idx: RQ producer index
+ * @rq_cons_idx: RQ consumer index
+ * @rqe_left: number rq entry left
+ * @rq_pgtbl_virt: page table describing buffer consituting RQ region
+ * @rq_pgtbl_phys: dma address of 'rq_pgtbl_virt'
+ * @rq_pgtbl_size: RQ page table size
+ *
+ * queue pair (QP) is a per connection shared data structure which is used
+ * to send work requests (SQ), receive completion notifications (CQ)
+ * and receive asynchoronous / scsi sense info (RQ). 'qp_info' structure
+ * below holds queue memory, consumer/producer indexes and page table
+ * information
+ */
+struct qp_info {
+ void __iomem *ctx_base;
+#define DPM_TRIGER_TYPE 0x40
+
+#define BNX2I_570x_QUE_DB_SIZE 0
+#define BNX2I_5771x_QUE_DB_SIZE 16
+ struct sqe *sq_virt;
+ dma_addr_t sq_phys;
+ u32 sq_mem_size;
+
+ struct sqe *sq_prod_qe;
+ struct sqe *sq_cons_qe;
+ struct sqe *sq_first_qe;
+ struct sqe *sq_last_qe;
+ u16 sq_prod_idx;
+ u16 sq_cons_idx;
+ u32 sqe_left;
+
+ void *sq_pgtbl_virt;
+ dma_addr_t sq_pgtbl_phys;
+ u32 sq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */
+
+ struct cqe *cq_virt;
+ dma_addr_t cq_phys;
+ u32 cq_mem_size;
+
+ struct cqe *cq_prod_qe;
+ struct cqe *cq_cons_qe;
+ struct cqe *cq_first_qe;
+ struct cqe *cq_last_qe;
+ u16 cq_prod_idx;
+ u16 cq_cons_idx;
+ u32 cqe_left;
+ u32 cqe_size;
+ u32 cqe_exp_seq_sn;
+
+ void *cq_pgtbl_virt;
+ dma_addr_t cq_pgtbl_phys;
+ u32 cq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */
+
+ struct rqe *rq_virt;
+ dma_addr_t rq_phys;
+ u32 rq_mem_size;
+
+ struct rqe *rq_prod_qe;
+ struct rqe *rq_cons_qe;
+ struct rqe *rq_first_qe;
+ struct rqe *rq_last_qe;
+ u16 rq_prod_idx;
+ u16 rq_cons_idx;
+ u32 rqe_left;
+
+ void *rq_pgtbl_virt;
+ dma_addr_t rq_pgtbl_phys;
+ u32 rq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */
+};
+
+
+
+/*
+ * CID handles
+ */
+struct ep_handles {
+ u32 fw_cid;
+ u32 drv_iscsi_cid;
+ u16 pg_cid;
+ u16 rsvd;
+};
+
+
+enum {
+ EP_STATE_IDLE = 0x0,
+ EP_STATE_PG_OFLD_START = 0x1,
+ EP_STATE_PG_OFLD_COMPL = 0x2,
+ EP_STATE_OFLD_START = 0x4,
+ EP_STATE_OFLD_COMPL = 0x8,
+ EP_STATE_CONNECT_START = 0x10,
+ EP_STATE_CONNECT_COMPL = 0x20,
+ EP_STATE_ULP_UPDATE_START = 0x40,
+ EP_STATE_ULP_UPDATE_COMPL = 0x80,
+ EP_STATE_DISCONN_START = 0x100,
+ EP_STATE_DISCONN_COMPL = 0x200,
+ EP_STATE_CLEANUP_START = 0x400,
+ EP_STATE_CLEANUP_CMPL = 0x800,
+ EP_STATE_TCP_FIN_RCVD = 0x1000,
+ EP_STATE_TCP_RST_RCVD = 0x2000,
+ EP_STATE_PG_OFLD_FAILED = 0x1000000,
+ EP_STATE_ULP_UPDATE_FAILED = 0x2000000,
+ EP_STATE_CLEANUP_FAILED = 0x4000000,
+ EP_STATE_OFLD_FAILED = 0x8000000,
+ EP_STATE_CONNECT_FAILED = 0x10000000,
+ EP_STATE_DISCONN_TIMEDOUT = 0x20000000,
+};
+
+/**
+ * struct bnx2i_endpoint - representation of tcp connection in NX2 world
+ *
+ * @link: list head to link elements
+ * @hba: adapter to which this connection belongs
+ * @conn: iscsi connection this EP is linked to
+ * @sess: iscsi session this EP is linked to
+ * @cm_sk: cnic sock struct
+ * @hba_age: age to detect if 'iscsid' issues ep_disconnect()
+ * after HBA reset is completed by bnx2i/cnic/bnx2
+ * modules
+ * @state: tracks offload connection state machine
+ * @teardown_mode: indicates if conn teardown is abortive or orderly
+ * @qp: QP information
+ * @ids: contains chip allocated *context id* & driver assigned
+ * *iscsi cid*
+ * @ofld_timer: offload timer to detect timeout
+ * @ofld_wait: wait queue
+ *
+ * Endpoint Structure - equivalent of tcp socket structure
+ */
+struct bnx2i_endpoint {
+ struct list_head link;
+ struct bnx2i_hba *hba;
+ struct bnx2i_conn *conn;
+ struct cnic_sock *cm_sk;
+ u32 hba_age;
+ u32 state;
+ unsigned long timestamp;
+ int num_active_cmds;
+
+ struct qp_info qp;
+ struct ep_handles ids;
+ #define ep_iscsi_cid ids.drv_iscsi_cid
+ #define ep_cid ids.fw_cid
+ #define ep_pg_cid ids.pg_cid
+ struct timer_list ofld_timer;
+ wait_queue_head_t ofld_wait;
+};
+
+
+
+/* Global variables */
+extern unsigned int error_mask1, error_mask2;
+extern u64 iscsi_error_mask;
+extern unsigned int en_tcp_dack;
+extern unsigned int event_coal_div;
+
+extern struct scsi_transport_template *bnx2i_scsi_xport_template;
+extern struct iscsi_transport bnx2i_iscsi_transport;
+extern struct cnic_ulp_ops bnx2i_cnic_cb;
+
+extern unsigned int sq_size;
+extern unsigned int rq_size;
+
+extern struct device_attribute *bnx2i_dev_attributes[];
+
+
+
+/*
+ * Function Prototypes
+ */
+extern void bnx2i_identify_device(struct bnx2i_hba *hba);
+extern void bnx2i_register_device(struct bnx2i_hba *hba);
+
+extern void bnx2i_ulp_init(struct cnic_dev *dev);
+extern void bnx2i_ulp_exit(struct cnic_dev *dev);
+extern void bnx2i_start(void *handle);
+extern void bnx2i_stop(void *handle);
+extern void bnx2i_reg_dev_all(void);
+extern void bnx2i_unreg_dev_all(void);
+extern struct bnx2i_hba *get_adapter_list_head(void);
+
+struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
+ u16 iscsi_cid);
+
+int bnx2i_alloc_ep_pool(void);
+void bnx2i_release_ep_pool(void);
+struct bnx2i_endpoint *bnx2i_ep_ofld_list_next(struct bnx2i_hba *hba);
+struct bnx2i_endpoint *bnx2i_ep_destroy_list_next(struct bnx2i_hba *hba);
+
+struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic);
+
+struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic);
+void bnx2i_free_hba(struct bnx2i_hba *hba);
+
+void bnx2i_get_rq_buf(struct bnx2i_conn *conn, char *ptr, int len);
+void bnx2i_put_rq_buf(struct bnx2i_conn *conn, int count);
+
+void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd);
+
+void bnx2i_drop_session(struct iscsi_cls_session *session);
+
+extern int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba);
+extern int bnx2i_send_iscsi_login(struct bnx2i_conn *conn,
+ struct iscsi_task *mtask);
+extern int bnx2i_send_iscsi_tmf(struct bnx2i_conn *conn,
+ struct iscsi_task *mtask);
+extern int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *conn,
+ struct bnx2i_cmd *cmnd);
+extern int bnx2i_send_iscsi_nopout(struct bnx2i_conn *conn,
+ struct iscsi_task *mtask, u32 ttt,
+ char *datap, int data_len, int unsol);
+extern int bnx2i_send_iscsi_logout(struct bnx2i_conn *conn,
+ struct iscsi_task *mtask);
+extern void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba,
+ struct bnx2i_cmd *cmd);
+extern void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba,
+ struct bnx2i_endpoint *ep);
+extern void bnx2i_update_iscsi_conn(struct iscsi_conn *conn);
+extern void bnx2i_send_conn_destroy(struct bnx2i_hba *hba,
+ struct bnx2i_endpoint *ep);
+
+extern int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba,
+ struct bnx2i_endpoint *ep);
+extern void bnx2i_free_qp_resc(struct bnx2i_hba *hba,
+ struct bnx2i_endpoint *ep);
+extern void bnx2i_ep_ofld_timer(unsigned long data);
+extern struct bnx2i_endpoint *bnx2i_find_ep_in_ofld_list(
+ struct bnx2i_hba *hba, u32 iscsi_cid);
+extern struct bnx2i_endpoint *bnx2i_find_ep_in_destroy_list(
+ struct bnx2i_hba *hba, u32 iscsi_cid);
+
+extern int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep);
+extern void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action);
+
+/* Debug related function prototypes */
+extern void bnx2i_print_pend_cmd_queue(struct bnx2i_conn *conn);
+extern void bnx2i_print_active_cmd_queue(struct bnx2i_conn *conn);
+extern void bnx2i_print_xmit_pdu_queue(struct bnx2i_conn *conn);
+extern void bnx2i_print_recv_state(struct bnx2i_conn *conn);
+
+#endif
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
new file mode 100644
index 00000000000..906cef5cda8
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -0,0 +1,2405 @@
+/* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2006 - 2009 Broadcom Corporation
+ * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+
+#include <scsi/scsi_tcq.h>
+#include <scsi/libiscsi.h>
+#include "bnx2i.h"
+
+/**
+ * bnx2i_get_cid_num - get cid from ep
+ * @ep: endpoint pointer
+ *
+ * Only applicable to 57710 family of devices
+ */
+static u32 bnx2i_get_cid_num(struct bnx2i_endpoint *ep)
+{
+ u32 cid;
+
+ if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+ cid = ep->ep_cid;
+ else
+ cid = GET_CID_NUM(ep->ep_cid);
+ return cid;
+}
+
+
+/**
+ * bnx2i_adjust_qp_size - Adjust SQ/RQ/CQ size for 57710 device type
+ * @hba: Adapter for which adjustments is to be made
+ *
+ * Only applicable to 57710 family of devices
+ */
+static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)
+{
+ u32 num_elements_per_pg;
+
+ if (test_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type) ||
+ test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type) ||
+ test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
+ if (!is_power_of_2(hba->max_sqes))
+ hba->max_sqes = rounddown_pow_of_two(hba->max_sqes);
+
+ if (!is_power_of_2(hba->max_rqes))
+ hba->max_rqes = rounddown_pow_of_two(hba->max_rqes);
+ }
+
+ /* Adjust each queue size if the user selection does not
+ * yield integral num of page buffers
+ */
+ /* adjust SQ */
+ num_elements_per_pg = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
+ if (hba->max_sqes < num_elements_per_pg)
+ hba->max_sqes = num_elements_per_pg;
+ else if (hba->max_sqes % num_elements_per_pg)
+ hba->max_sqes = (hba->max_sqes + num_elements_per_pg - 1) &
+ ~(num_elements_per_pg - 1);
+
+ /* adjust CQ */
+ num_elements_per_pg = PAGE_SIZE / BNX2I_CQE_SIZE;
+ if (hba->max_cqes < num_elements_per_pg)
+ hba->max_cqes = num_elements_per_pg;
+ else if (hba->max_cqes % num_elements_per_pg)
+ hba->max_cqes = (hba->max_cqes + num_elements_per_pg - 1) &
+ ~(num_elements_per_pg - 1);
+
+ /* adjust RQ */
+ num_elements_per_pg = PAGE_SIZE / BNX2I_RQ_WQE_SIZE;
+ if (hba->max_rqes < num_elements_per_pg)
+ hba->max_rqes = num_elements_per_pg;
+ else if (hba->max_rqes % num_elements_per_pg)
+ hba->max_rqes = (hba->max_rqes + num_elements_per_pg - 1) &
+ ~(num_elements_per_pg - 1);
+}
+
+
+/**
+ * bnx2i_get_link_state - get network interface link state
+ * @hba: adapter instance pointer
+ *
+ * updates adapter structure flag based on netdev state
+ */
+static void bnx2i_get_link_state(struct bnx2i_hba *hba)
+{
+ if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state))
+ set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+ else
+ clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+}
+
+
+/**
+ * bnx2i_iscsi_license_error - displays iscsi license related error message
+ * @hba: adapter instance pointer
+ * @error_code: error classification
+ *
+ * Puts out an error log when driver is unable to offload iscsi connection
+ * due to license restrictions
+ */
+static void bnx2i_iscsi_license_error(struct bnx2i_hba *hba, u32 error_code)
+{
+ if (error_code == ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED)
+ /* iSCSI offload not supported on this device */
+ printk(KERN_ERR "bnx2i: iSCSI not supported, dev=%s\n",
+ hba->netdev->name);
+ if (error_code == ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED)
+ /* iSCSI offload not supported on this LOM device */
+ printk(KERN_ERR "bnx2i: LOM is not enable to "
+ "offload iSCSI connections, dev=%s\n",
+ hba->netdev->name);
+ set_bit(ADAPTER_STATE_INIT_FAILED, &hba->adapter_state);
+}
+
+
+/**
+ * bnx2i_arm_cq_event_coalescing - arms CQ to enable EQ notification
+ * @ep: endpoint (transport indentifier) structure
+ * @action: action, ARM or DISARM. For now only ARM_CQE is used
+ *
+ * Arm'ing CQ will enable chip to generate global EQ events inorder to interrupt
+ * the driver. EQ event is generated CQ index is hit or at least 1 CQ is
+ * outstanding and on chip timer expires
+ */
+void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
+{
+ struct bnx2i_5771x_cq_db *cq_db;
+ u16 cq_index;
+
+ if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+ return;
+
+ if (action == CNIC_ARM_CQE) {
+ cq_index = ep->qp.cqe_exp_seq_sn +
+ ep->num_active_cmds / event_coal_div;
+ cq_index %= (ep->qp.cqe_size * 2 + 1);
+ if (!cq_index) {
+ cq_index = 1;
+ cq_db = (struct bnx2i_5771x_cq_db *)
+ ep->qp.cq_pgtbl_virt;
+ cq_db->sqn[0] = cq_index;
+ }
+ }
+}
+
+
+/**
+ * bnx2i_get_rq_buf - copy RQ buffer contents to driver buffer
+ * @conn: iscsi connection on which RQ event occured
+ * @ptr: driver buffer to which RQ buffer contents is to
+ * be copied
+ * @len: length of valid data inside RQ buf
+ *
+ * Copies RQ buffer contents from shared (DMA'able) memory region to
+ * driver buffer. RQ is used to DMA unsolicitated iscsi pdu's and
+ * scsi sense info
+ */
+void bnx2i_get_rq_buf(struct bnx2i_conn *bnx2i_conn, char *ptr, int len)
+{
+ if (!bnx2i_conn->ep->qp.rqe_left)
+ return;
+
+ bnx2i_conn->ep->qp.rqe_left--;
+ memcpy(ptr, (u8 *) bnx2i_conn->ep->qp.rq_cons_qe, len);
+ if (bnx2i_conn->ep->qp.rq_cons_qe == bnx2i_conn->ep->qp.rq_last_qe) {
+ bnx2i_conn->ep->qp.rq_cons_qe = bnx2i_conn->ep->qp.rq_first_qe;
+ bnx2i_conn->ep->qp.rq_cons_idx = 0;
+ } else {
+ bnx2i_conn->ep->qp.rq_cons_qe++;
+ bnx2i_conn->ep->qp.rq_cons_idx++;
+ }
+}
+
+
+static void bnx2i_ring_577xx_doorbell(struct bnx2i_conn *conn)
+{
+ struct bnx2i_5771x_dbell dbell;
+ u32 msg;
+
+ memset(&dbell, 0, sizeof(dbell));
+ dbell.dbell.header = (B577XX_ISCSI_CONNECTION_TYPE <<
+ B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT);
+ msg = *((u32 *)&dbell);
+ /* TODO : get doorbell register mapping */
+ writel(cpu_to_le32(msg), conn->ep->qp.ctx_base);
+}
+
+
+/**
+ * bnx2i_put_rq_buf - Replenish RQ buffer, if required ring on chip doorbell
+ * @conn: iscsi connection on which event to post
+ * @count: number of RQ buffer being posted to chip
+ *
+ * No need to ring hardware doorbell for 57710 family of devices
+ */
+void bnx2i_put_rq_buf(struct bnx2i_conn *bnx2i_conn, int count)
+{
+ struct bnx2i_5771x_sq_rq_db *rq_db;
+ u16 hi_bit = (bnx2i_conn->ep->qp.rq_prod_idx & 0x8000);
+ struct bnx2i_endpoint *ep = bnx2i_conn->ep;
+
+ ep->qp.rqe_left += count;
+ ep->qp.rq_prod_idx &= 0x7FFF;
+ ep->qp.rq_prod_idx += count;
+
+ if (ep->qp.rq_prod_idx > bnx2i_conn->hba->max_rqes) {
+ ep->qp.rq_prod_idx %= bnx2i_conn->hba->max_rqes;
+ if (!hi_bit)
+ ep->qp.rq_prod_idx |= 0x8000;
+ } else
+ ep->qp.rq_prod_idx |= hi_bit;
+
+ if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
+ rq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.rq_pgtbl_virt;
+ rq_db->prod_idx = ep->qp.rq_prod_idx;
+ /* no need to ring hardware doorbell for 57710 */
+ } else {
+ writew(ep->qp.rq_prod_idx,
+ ep->qp.ctx_base + CNIC_RECV_DOORBELL);
+ }
+ mmiowb();
+}
+
+
+/**
+ * bnx2i_ring_sq_dbell - Ring SQ doorbell to wake-up the processing engine
+ * @conn: iscsi connection to which new SQ entries belong
+ * @count: number of SQ WQEs to post
+ *
+ * SQ DB is updated in host memory and TX Doorbell is rung for 57710 family
+ * of devices. For 5706/5708/5709 new SQ WQE count is written into the
+ * doorbell register
+ */
+static void bnx2i_ring_sq_dbell(struct bnx2i_conn *bnx2i_conn, int count)
+{
+ struct bnx2i_5771x_sq_rq_db *sq_db;
+ struct bnx2i_endpoint *ep = bnx2i_conn->ep;
+
+ ep->num_active_cmds++;
+ wmb(); /* flush SQ WQE memory before the doorbell is rung */
+ if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
+ sq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.sq_pgtbl_virt;
+ sq_db->prod_idx = ep->qp.sq_prod_idx;
+ bnx2i_ring_577xx_doorbell(bnx2i_conn);
+ } else
+ writew(count, ep->qp.ctx_base + CNIC_SEND_DOORBELL);
+
+ mmiowb(); /* flush posted PCI writes */
+}
+
+
+/**
+ * bnx2i_ring_dbell_update_sq_params - update SQ driver parameters
+ * @conn: iscsi connection to which new SQ entries belong
+ * @count: number of SQ WQEs to post
+ *
+ * this routine will update SQ driver parameters and ring the doorbell
+ */
+static void bnx2i_ring_dbell_update_sq_params(struct bnx2i_conn *bnx2i_conn,
+ int count)
+{
+ int tmp_cnt;
+
+ if (count == 1) {
+ if (bnx2i_conn->ep->qp.sq_prod_qe ==
+ bnx2i_conn->ep->qp.sq_last_qe)
+ bnx2i_conn->ep->qp.sq_prod_qe =
+ bnx2i_conn->ep->qp.sq_first_qe;
+ else
+ bnx2i_conn->ep->qp.sq_prod_qe++;
+ } else {
+ if ((bnx2i_conn->ep->qp.sq_prod_qe + count) <=
+ bnx2i_conn->ep->qp.sq_last_qe)
+ bnx2i_conn->ep->qp.sq_prod_qe += count;
+ else {
+ tmp_cnt = bnx2i_conn->ep->qp.sq_last_qe -
+ bnx2i_conn->ep->qp.sq_prod_qe;
+ bnx2i_conn->ep->qp.sq_prod_qe =
+ &bnx2i_conn->ep->qp.sq_first_qe[count -
+ (tmp_cnt + 1)];
+ }
+ }
+ bnx2i_conn->ep->qp.sq_prod_idx += count;
+ /* Ring the doorbell */
+ bnx2i_ring_sq_dbell(bnx2i_conn, bnx2i_conn->ep->qp.sq_prod_idx);
+}
+
+
+/**
+ * bnx2i_send_iscsi_login - post iSCSI login request MP WQE to hardware
+ * @conn: iscsi connection
+ * @cmd: driver command structure which is requesting
+ * a WQE to sent to chip for further processing
+ *
+ * prepare and post an iSCSI Login request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn,
+ struct iscsi_task *task)
+{
+ struct bnx2i_cmd *bnx2i_cmd;
+ struct bnx2i_login_request *login_wqe;
+ struct iscsi_login *login_hdr;
+ u32 dword;
+
+ bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
+ login_hdr = (struct iscsi_login *)task->hdr;
+ login_wqe = (struct bnx2i_login_request *)
+ bnx2i_conn->ep->qp.sq_prod_qe;
+
+ login_wqe->op_code = login_hdr->opcode;
+ login_wqe->op_attr = login_hdr->flags;
+ login_wqe->version_max = login_hdr->max_version;
+ login_wqe->version_min = login_hdr->min_version;
+ login_wqe->data_length = ntoh24(login_hdr->dlength);
+ login_wqe->isid_lo = *((u32 *) login_hdr->isid);
+ login_wqe->isid_hi = *((u16 *) login_hdr->isid + 2);
+ login_wqe->tsih = login_hdr->tsih;
+ login_wqe->itt = task->itt |
+ (ISCSI_TASK_TYPE_MPATH << ISCSI_LOGIN_REQUEST_TYPE_SHIFT);
+ login_wqe->cid = login_hdr->cid;
+
+ login_wqe->cmd_sn = be32_to_cpu(login_hdr->cmdsn);
+ login_wqe->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
+
+ login_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma;
+ login_wqe->resp_bd_list_addr_hi =
+ (u32) ((u64) bnx2i_conn->gen_pdu.resp_bd_dma >> 32);
+
+ dword = ((1 << ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT) |
+ (bnx2i_conn->gen_pdu.resp_buf_size <<
+ ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT));
+ login_wqe->resp_buffer = dword;
+ login_wqe->flags = 0;
+ login_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma;
+ login_wqe->bd_list_addr_hi =
+ (u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32);
+ login_wqe->num_bds = 1;
+ login_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+ bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+ return 0;
+}
+
+/**
+ * bnx2i_send_iscsi_tmf - post iSCSI task management request MP WQE to hardware
+ * @conn: iscsi connection
+ * @mtask: driver command structure which is requesting
+ * a WQE to sent to chip for further processing
+ *
+ * prepare and post an iSCSI Login request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
+ struct iscsi_task *mtask)
+{
+ struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+ struct iscsi_tm *tmfabort_hdr;
+ struct scsi_cmnd *ref_sc;
+ struct iscsi_task *ctask;
+ struct bnx2i_cmd *bnx2i_cmd;
+ struct bnx2i_tmf_request *tmfabort_wqe;
+ u32 dword;
+
+ bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
+ tmfabort_hdr = (struct iscsi_tm *)mtask->hdr;
+ tmfabort_wqe = (struct bnx2i_tmf_request *)
+ bnx2i_conn->ep->qp.sq_prod_qe;
+
+ tmfabort_wqe->op_code = tmfabort_hdr->opcode;
+ tmfabort_wqe->op_attr = 0;
+ tmfabort_wqe->op_attr =
+ ISCSI_TMF_REQUEST_ALWAYS_ONE | ISCSI_TM_FUNC_ABORT_TASK;
+ tmfabort_wqe->lun[0] = be32_to_cpu(tmfabort_hdr->lun[0]);
+ tmfabort_wqe->lun[1] = be32_to_cpu(tmfabort_hdr->lun[1]);
+
+ tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14));
+ tmfabort_wqe->reserved2 = 0;
+ tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn);
+
+ ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt);
+ if (!ctask || ctask->sc)
+ /*
+ * the iscsi layer must have completed the cmd while this
+ * was starting up.
+ */
+ return 0;
+ ref_sc = ctask->sc;
+
+ if (ref_sc->sc_data_direction == DMA_TO_DEVICE)
+ dword = (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
+ else
+ dword = (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
+ tmfabort_wqe->ref_itt = (dword | tmfabort_hdr->rtt);
+ tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn);
+
+ tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
+ tmfabort_wqe->bd_list_addr_hi = (u32)
+ ((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
+ tmfabort_wqe->num_bds = 1;
+ tmfabort_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+ bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+ return 0;
+}
+
+/**
+ * bnx2i_send_iscsi_scsicmd - post iSCSI scsicmd request WQE to hardware
+ * @conn: iscsi connection
+ * @cmd: driver command structure which is requesting
+ * a WQE to sent to chip for further processing
+ *
+ * prepare and post an iSCSI SCSI-CMD request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *bnx2i_conn,
+ struct bnx2i_cmd *cmd)
+{
+ struct bnx2i_cmd_request *scsi_cmd_wqe;
+
+ scsi_cmd_wqe = (struct bnx2i_cmd_request *)
+ bnx2i_conn->ep->qp.sq_prod_qe;
+ memcpy(scsi_cmd_wqe, &cmd->req, sizeof(struct bnx2i_cmd_request));
+ scsi_cmd_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+ bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+ return 0;
+}
+
+/**
+ * bnx2i_send_iscsi_nopout - post iSCSI NOPOUT request WQE to hardware
+ * @conn: iscsi connection
+ * @cmd: driver command structure which is requesting
+ * a WQE to sent to chip for further processing
+ * @ttt: TTT to be used when building pdu header
+ * @datap: payload buffer pointer
+ * @data_len: payload data length
+ * @unsol: indicated whether nopout pdu is unsolicited pdu or
+ * in response to target's NOPIN w/ TTT != FFFFFFFF
+ *
+ * prepare and post a nopout request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
+ struct iscsi_task *task, u32 ttt,
+ char *datap, int data_len, int unsol)
+{
+ struct bnx2i_endpoint *ep = bnx2i_conn->ep;
+ struct bnx2i_cmd *bnx2i_cmd;
+ struct bnx2i_nop_out_request *nopout_wqe;
+ struct iscsi_nopout *nopout_hdr;
+
+ bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
+ nopout_hdr = (struct iscsi_nopout *)task->hdr;
+ nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe;
+ nopout_wqe->op_code = nopout_hdr->opcode;
+ nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL;
+ memcpy(nopout_wqe->lun, nopout_hdr->lun, 8);
+
+ if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
+ u32 tmp = nopout_hdr->lun[0];
+ /* 57710 requires LUN field to be swapped */
+ nopout_hdr->lun[0] = nopout_hdr->lun[1];
+ nopout_hdr->lun[1] = tmp;
+ }
+
+ nopout_wqe->itt = ((u16)task->itt |
+ (ISCSI_TASK_TYPE_MPATH <<
+ ISCSI_TMF_REQUEST_TYPE_SHIFT));
+ nopout_wqe->ttt = ttt;
+ nopout_wqe->flags = 0;
+ if (!unsol)
+ nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
+ else if (nopout_hdr->itt == RESERVED_ITT)
+ nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
+
+ nopout_wqe->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
+ nopout_wqe->data_length = data_len;
+ if (data_len) {
+ /* handle payload data, not required in first release */
+ printk(KERN_ALERT "NOPOUT: WARNING!! payload len != 0\n");
+ } else {
+ nopout_wqe->bd_list_addr_lo = (u32)
+ bnx2i_conn->hba->mp_bd_dma;
+ nopout_wqe->bd_list_addr_hi =
+ (u32) ((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
+ nopout_wqe->num_bds = 1;
+ }
+ nopout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+ bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+ return 0;
+}
+
+
+/**
+ * bnx2i_send_iscsi_logout - post iSCSI logout request WQE to hardware
+ * @conn: iscsi connection
+ * @cmd: driver command structure which is requesting
+ * a WQE to sent to chip for further processing
+ *
+ * prepare and post logout request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_logout(struct bnx2i_conn *bnx2i_conn,
+ struct iscsi_task *task)
+{
+ struct bnx2i_cmd *bnx2i_cmd;
+ struct bnx2i_logout_request *logout_wqe;
+ struct iscsi_logout *logout_hdr;
+
+ bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
+ logout_hdr = (struct iscsi_logout *)task->hdr;
+
+ logout_wqe = (struct bnx2i_logout_request *)
+ bnx2i_conn->ep->qp.sq_prod_qe;
+ memset(logout_wqe, 0x00, sizeof(struct bnx2i_logout_request));
+
+ logout_wqe->op_code = logout_hdr->opcode;
+ logout_wqe->cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
+ logout_wqe->op_attr =
+ logout_hdr->flags | ISCSI_LOGOUT_REQUEST_ALWAYS_ONE;
+ logout_wqe->itt = ((u16)task->itt |
+ (ISCSI_TASK_TYPE_MPATH <<
+ ISCSI_LOGOUT_REQUEST_TYPE_SHIFT));
+ logout_wqe->data_length = 0;
+ logout_wqe->cid = 0;
+
+ logout_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
+ logout_wqe->bd_list_addr_hi = (u32)
+ ((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
+ logout_wqe->num_bds = 1;
+ logout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+ bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+ return 0;
+}
+
+
+/**
+ * bnx2i_update_iscsi_conn - post iSCSI logout request WQE to hardware
+ * @conn: iscsi connection which requires iscsi parameter update
+ *
+ * sends down iSCSI Conn Update request to move iSCSI conn to FFP
+ */
+void bnx2i_update_iscsi_conn(struct iscsi_conn *conn)
+{
+ struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+ struct bnx2i_hba *hba = bnx2i_conn->hba;
+ struct kwqe *kwqe_arr[2];
+ struct iscsi_kwqe_conn_update *update_wqe;
+ struct iscsi_kwqe_conn_update conn_update_kwqe;
+
+ update_wqe = &conn_update_kwqe;
+
+ update_wqe->hdr.op_code = ISCSI_KWQE_OPCODE_UPDATE_CONN;
+ update_wqe->hdr.flags =
+ (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ /* 5771x requires conn context id to be passed as is */
+ if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_conn->ep->hba->cnic_dev_type))
+ update_wqe->context_id = bnx2i_conn->ep->ep_cid;
+ else
+ update_wqe->context_id = (bnx2i_conn->ep->ep_cid >> 7);
+ update_wqe->conn_flags = 0;
+ if (conn->hdrdgst_en)
+ update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST;
+ if (conn->datadgst_en)
+ update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST;
+ if (conn->session->initial_r2t_en)
+ update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T;
+ if (conn->session->imm_data_en)
+ update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA;
+
+ update_wqe->max_send_pdu_length = conn->max_xmit_dlength;
+ update_wqe->max_recv_pdu_length = conn->max_recv_dlength;
+ update_wqe->first_burst_length = conn->session->first_burst;
+ update_wqe->max_burst_length = conn->session->max_burst;
+ update_wqe->exp_stat_sn = conn->exp_statsn;
+ update_wqe->max_outstanding_r2ts = conn->session->max_r2t;
+ update_wqe->session_error_recovery_level = conn->session->erl;
+ iscsi_conn_printk(KERN_ALERT, conn,
+ "bnx2i: conn update - MBL 0x%x FBL 0x%x"
+ "MRDSL_I 0x%x MRDSL_T 0x%x \n",
+ update_wqe->max_burst_length,
+ update_wqe->first_burst_length,
+ update_wqe->max_recv_pdu_length,
+ update_wqe->max_send_pdu_length);
+
+ kwqe_arr[0] = (struct kwqe *) update_wqe;
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1);
+}
+
+
+/**
+ * bnx2i_ep_ofld_timer - post iSCSI logout request WQE to hardware
+ * @data: endpoint (transport handle) structure pointer
+ *
+ * routine to handle connection offload/destroy request timeout
+ */
+void bnx2i_ep_ofld_timer(unsigned long data)
+{
+ struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) data;
+
+ if (ep->state == EP_STATE_OFLD_START) {
+ printk(KERN_ALERT "ofld_timer: CONN_OFLD timeout\n");
+ ep->state = EP_STATE_OFLD_FAILED;
+ } else if (ep->state == EP_STATE_DISCONN_START) {
+ printk(KERN_ALERT "ofld_timer: CONN_DISCON timeout\n");
+ ep->state = EP_STATE_DISCONN_TIMEDOUT;
+ } else if (ep->state == EP_STATE_CLEANUP_START) {
+ printk(KERN_ALERT "ofld_timer: CONN_CLEANUP timeout\n");
+ ep->state = EP_STATE_CLEANUP_FAILED;
+ }
+
+ wake_up_interruptible(&ep->ofld_wait);
+}
+
+
+static int bnx2i_power_of2(u32 val)
+{
+ u32 power = 0;
+ if (val & (val - 1))
+ return power;
+ val--;
+ while (val) {
+ val = val >> 1;
+ power++;
+ }
+ return power;
+}
+
+
+/**
+ * bnx2i_send_cmd_cleanup_req - send iscsi cmd context clean-up request
+ * @hba: adapter structure pointer
+ * @cmd: driver command structure which is requesting
+ * a WQE to sent to chip for further processing
+ *
+ * prepares and posts CONN_OFLD_REQ1/2 KWQE
+ */
+void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
+{
+ struct bnx2i_cleanup_request *cmd_cleanup;
+
+ cmd_cleanup =
+ (struct bnx2i_cleanup_request *)cmd->conn->ep->qp.sq_prod_qe;
+ memset(cmd_cleanup, 0x00, sizeof(struct bnx2i_cleanup_request));
+
+ cmd_cleanup->op_code = ISCSI_OPCODE_CLEANUP_REQUEST;
+ cmd_cleanup->itt = cmd->req.itt;
+ cmd_cleanup->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+ bnx2i_ring_dbell_update_sq_params(cmd->conn, 1);
+}
+
+
+/**
+ * bnx2i_send_conn_destroy - initiates iscsi connection teardown process
+ * @hba: adapter structure pointer
+ * @ep: endpoint (transport indentifier) structure
+ *
+ * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE to initiate
+ * iscsi connection context clean-up process
+ */
+void bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
+{
+ struct kwqe *kwqe_arr[2];
+ struct iscsi_kwqe_conn_destroy conn_cleanup;
+
+ memset(&conn_cleanup, 0x00, sizeof(struct iscsi_kwqe_conn_destroy));
+
+ conn_cleanup.hdr.op_code = ISCSI_KWQE_OPCODE_DESTROY_CONN;
+ conn_cleanup.hdr.flags =
+ (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+ /* 5771x requires conn context id to be passed as is */
+ if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+ conn_cleanup.context_id = ep->ep_cid;
+ else
+ conn_cleanup.context_id = (ep->ep_cid >> 7);
+
+ conn_cleanup.reserved0 = (u16)ep->ep_iscsi_cid;
+
+ kwqe_arr[0] = (struct kwqe *) &conn_cleanup;
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1);
+}
+
+
+/**
+ * bnx2i_570x_send_conn_ofld_req - initiates iscsi conn context setup process
+ * @hba: adapter structure pointer
+ * @ep: endpoint (transport indentifier) structure
+ *
+ * 5706/5708/5709 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
+ */
+static void bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba,
+ struct bnx2i_endpoint *ep)
+{
+ struct kwqe *kwqe_arr[2];
+ struct iscsi_kwqe_conn_offload1 ofld_req1;
+ struct iscsi_kwqe_conn_offload2 ofld_req2;
+ dma_addr_t dma_addr;
+ int num_kwqes = 2;
+ u32 *ptbl;
+
+ ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1;
+ ofld_req1.hdr.flags =
+ (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid;
+
+ dma_addr = ep->qp.sq_pgtbl_phys;
+ ofld_req1.sq_page_table_addr_lo = (u32) dma_addr;
+ ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+ dma_addr = ep->qp.cq_pgtbl_phys;
+ ofld_req1.cq_page_table_addr_lo = (u32) dma_addr;
+ ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+ ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2;
+ ofld_req2.hdr.flags =
+ (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ dma_addr = ep->qp.rq_pgtbl_phys;
+ ofld_req2.rq_page_table_addr_lo = (u32) dma_addr;
+ ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+ ptbl = (u32 *) ep->qp.sq_pgtbl_virt;
+
+ ofld_req2.sq_first_pte.hi = *ptbl++;
+ ofld_req2.sq_first_pte.lo = *ptbl;
+
+ ptbl = (u32 *) ep->qp.cq_pgtbl_virt;
+ ofld_req2.cq_first_pte.hi = *ptbl++;
+ ofld_req2.cq_first_pte.lo = *ptbl;
+
+ kwqe_arr[0] = (struct kwqe *) &ofld_req1;
+ kwqe_arr[1] = (struct kwqe *) &ofld_req2;
+ ofld_req2.num_additional_wqes = 0;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+}
+
+
+/**
+ * bnx2i_5771x_send_conn_ofld_req - initiates iscsi connection context creation
+ * @hba: adapter structure pointer
+ * @ep: endpoint (transport indentifier) structure
+ *
+ * 57710 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
+ */
+static void bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba,
+ struct bnx2i_endpoint *ep)
+{
+ struct kwqe *kwqe_arr[5];
+ struct iscsi_kwqe_conn_offload1 ofld_req1;
+ struct iscsi_kwqe_conn_offload2 ofld_req2;
+ struct iscsi_kwqe_conn_offload3 ofld_req3[1];
+ dma_addr_t dma_addr;
+ int num_kwqes = 2;
+ u32 *ptbl;
+
+ ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1;
+ ofld_req1.hdr.flags =
+ (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid;
+
+ dma_addr = ep->qp.sq_pgtbl_phys + ISCSI_SQ_DB_SIZE;
+ ofld_req1.sq_page_table_addr_lo = (u32) dma_addr;
+ ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+ dma_addr = ep->qp.cq_pgtbl_phys + ISCSI_CQ_DB_SIZE;
+ ofld_req1.cq_page_table_addr_lo = (u32) dma_addr;
+ ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+ ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2;
+ ofld_req2.hdr.flags =
+ (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ dma_addr = ep->qp.rq_pgtbl_phys + ISCSI_RQ_DB_SIZE;
+ ofld_req2.rq_page_table_addr_lo = (u32) dma_addr;
+ ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+ ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE);
+ ofld_req2.sq_first_pte.hi = *ptbl++;
+ ofld_req2.sq_first_pte.lo = *ptbl;
+
+ ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE);
+ ofld_req2.cq_first_pte.hi = *ptbl++;
+ ofld_req2.cq_first_pte.lo = *ptbl;
+
+ kwqe_arr[0] = (struct kwqe *) &ofld_req1;
+ kwqe_arr[1] = (struct kwqe *) &ofld_req2;
+
+ ofld_req2.num_additional_wqes = 1;
+ memset(ofld_req3, 0x00, sizeof(ofld_req3[0]));
+ ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE);
+ ofld_req3[0].qp_first_pte[0].hi = *ptbl++;
+ ofld_req3[0].qp_first_pte[0].lo = *ptbl;
+
+ kwqe_arr[2] = (struct kwqe *) ofld_req3;
+ /* need if we decide to go with multiple KCQE's per conn */
+ num_kwqes += 1;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+}
+
+/**
+ * bnx2i_send_conn_ofld_req - initiates iscsi connection context setup process
+ *
+ * @hba: adapter structure pointer
+ * @ep: endpoint (transport indentifier) structure
+ *
+ * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE
+ */
+void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
+{
+ if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
+ bnx2i_5771x_send_conn_ofld_req(hba, ep);
+ else
+ bnx2i_570x_send_conn_ofld_req(hba, ep);
+}
+
+
+/**
+ * setup_qp_page_tables - iscsi QP page table setup function
+ * @ep: endpoint (transport indentifier) structure
+ *
+ * Sets up page tables for SQ/RQ/CQ, 1G/sec (5706/5708/5709) devices requires
+ * 64-bit address in big endian format. Whereas 10G/sec (57710) requires
+ * PT in little endian format
+ */
+static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
+{
+ int num_pages;
+ u32 *ptbl;
+ dma_addr_t page;
+ int cnic_dev_10g;
+
+ if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+ cnic_dev_10g = 1;
+ else
+ cnic_dev_10g = 0;
+
+ /* SQ page table */
+ memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size);
+ num_pages = ep->qp.sq_mem_size / PAGE_SIZE;
+ page = ep->qp.sq_phys;
+
+ if (cnic_dev_10g)
+ ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE);
+ else
+ ptbl = (u32 *) ep->qp.sq_pgtbl_virt;
+ while (num_pages--) {
+ if (cnic_dev_10g) {
+ /* PTE is written in little endian format for 57710 */
+ *ptbl = (u32) page;
+ ptbl++;
+ *ptbl = (u32) ((u64) page >> 32);
+ ptbl++;
+ page += PAGE_SIZE;
+ } else {
+ /* PTE is written in big endian format for
+ * 5706/5708/5709 devices */
+ *ptbl = (u32) ((u64) page >> 32);
+ ptbl++;
+ *ptbl = (u32) page;
+ ptbl++;
+ page += PAGE_SIZE;
+ }
+ }
+
+ /* RQ page table */
+ memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size);
+ num_pages = ep->qp.rq_mem_size / PAGE_SIZE;
+ page = ep->qp.rq_phys;
+
+ if (cnic_dev_10g)
+ ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE);
+ else
+ ptbl = (u32 *) ep->qp.rq_pgtbl_virt;
+ while (num_pages--) {
+ if (cnic_dev_10g) {
+ /* PTE is written in little endian format for 57710 */
+ *ptbl = (u32) page;
+ ptbl++;
+ *ptbl = (u32) ((u64) page >> 32);
+ ptbl++;
+ page += PAGE_SIZE;
+ } else {
+ /* PTE is written in big endian format for
+ * 5706/5708/5709 devices */
+ *ptbl = (u32) ((u64) page >> 32);
+ ptbl++;
+ *ptbl = (u32) page;
+ ptbl++;
+ page += PAGE_SIZE;
+ }
+ }
+
+ /* CQ page table */
+ memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size);
+ num_pages = ep->qp.cq_mem_size / PAGE_SIZE;
+ page = ep->qp.cq_phys;
+
+ if (cnic_dev_10g)
+ ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE);
+ else
+ ptbl = (u32 *) ep->qp.cq_pgtbl_virt;
+ while (num_pages--) {
+ if (cnic_dev_10g) {
+ /* PTE is written in little endian format for 57710 */
+ *ptbl = (u32) page;
+ ptbl++;
+ *ptbl = (u32) ((u64) page >> 32);
+ ptbl++;
+ page += PAGE_SIZE;
+ } else {
+ /* PTE is written in big endian format for
+ * 5706/5708/5709 devices */
+ *ptbl = (u32) ((u64) page >> 32);
+ ptbl++;
+ *ptbl = (u32) page;
+ ptbl++;
+ page += PAGE_SIZE;
+ }
+ }
+}
+
+
+/**
+ * bnx2i_alloc_qp_resc - allocates required resources for QP.
+ * @hba: adapter structure pointer
+ * @ep: endpoint (transport indentifier) structure
+ *
+ * Allocate QP (transport layer for iSCSI connection) resources, DMA'able
+ * memory for SQ/RQ/CQ and page tables. EP structure elements such
+ * as producer/consumer indexes/pointers, queue sizes and page table
+ * contents are setup
+ */
+int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
+{
+ struct bnx2i_5771x_cq_db *cq_db;
+
+ ep->hba = hba;
+ ep->conn = NULL;
+ ep->ep_cid = ep->ep_iscsi_cid = ep->ep_pg_cid = 0;
+
+ /* Allocate page table memory for SQ which is page aligned */
+ ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE;
+ ep->qp.sq_mem_size =
+ (ep->qp.sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ ep->qp.sq_pgtbl_size =
+ (ep->qp.sq_mem_size / PAGE_SIZE) * sizeof(void *);
+ ep->qp.sq_pgtbl_size =
+ (ep->qp.sq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+ ep->qp.sq_pgtbl_virt =
+ dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
+ &ep->qp.sq_pgtbl_phys, GFP_KERNEL);
+ if (!ep->qp.sq_pgtbl_virt) {
+ printk(KERN_ALERT "bnx2i: unable to alloc SQ PT mem (%d)\n",
+ ep->qp.sq_pgtbl_size);
+ goto mem_alloc_err;
+ }
+
+ /* Allocate memory area for actual SQ element */
+ ep->qp.sq_virt =
+ dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
+ &ep->qp.sq_phys, GFP_KERNEL);
+ if (!ep->qp.sq_virt) {
+ printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n",
+ ep->qp.sq_mem_size);
+ goto mem_alloc_err;
+ }
+
+ memset(ep->qp.sq_virt, 0x00, ep->qp.sq_mem_size);
+ ep->qp.sq_first_qe = ep->qp.sq_virt;
+ ep->qp.sq_prod_qe = ep->qp.sq_first_qe;
+ ep->qp.sq_cons_qe = ep->qp.sq_first_qe;
+ ep->qp.sq_last_qe = &ep->qp.sq_first_qe[hba->max_sqes - 1];
+ ep->qp.sq_prod_idx = 0;
+ ep->qp.sq_cons_idx = 0;
+ ep->qp.sqe_left = hba->max_sqes;
+
+ /* Allocate page table memory for CQ which is page aligned */
+ ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE;
+ ep->qp.cq_mem_size =
+ (ep->qp.cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ ep->qp.cq_pgtbl_size =
+ (ep->qp.cq_mem_size / PAGE_SIZE) * sizeof(void *);
+ ep->qp.cq_pgtbl_size =
+ (ep->qp.cq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+ ep->qp.cq_pgtbl_virt =
+ dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
+ &ep->qp.cq_pgtbl_phys, GFP_KERNEL);
+ if (!ep->qp.cq_pgtbl_virt) {
+ printk(KERN_ALERT "bnx2i: unable to alloc CQ PT memory %d\n",
+ ep->qp.cq_pgtbl_size);
+ goto mem_alloc_err;
+ }
+
+ /* Allocate memory area for actual CQ element */
+ ep->qp.cq_virt =
+ dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
+ &ep->qp.cq_phys, GFP_KERNEL);
+ if (!ep->qp.cq_virt) {
+ printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n",
+ ep->qp.cq_mem_size);
+ goto mem_alloc_err;
+ }
+ memset(ep->qp.cq_virt, 0x00, ep->qp.cq_mem_size);
+
+ ep->qp.cq_first_qe = ep->qp.cq_virt;
+ ep->qp.cq_prod_qe = ep->qp.cq_first_qe;
+ ep->qp.cq_cons_qe = ep->qp.cq_first_qe;
+ ep->qp.cq_last_qe = &ep->qp.cq_first_qe[hba->max_cqes - 1];
+ ep->qp.cq_prod_idx = 0;
+ ep->qp.cq_cons_idx = 0;
+ ep->qp.cqe_left = hba->max_cqes;
+ ep->qp.cqe_exp_seq_sn = ISCSI_INITIAL_SN;
+ ep->qp.cqe_size = hba->max_cqes;
+
+ /* Invalidate all EQ CQE index, req only for 57710 */
+ cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
+ memset(cq_db->sqn, 0xFF, sizeof(cq_db->sqn[0]) * BNX2X_MAX_CQS);
+
+ /* Allocate page table memory for RQ which is page aligned */
+ ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE;
+ ep->qp.rq_mem_size =
+ (ep->qp.rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ ep->qp.rq_pgtbl_size =
+ (ep->qp.rq_mem_size / PAGE_SIZE) * sizeof(void *);
+ ep->qp.rq_pgtbl_size =
+ (ep->qp.rq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+ ep->qp.rq_pgtbl_virt =
+ dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
+ &ep->qp.rq_pgtbl_phys, GFP_KERNEL);
+ if (!ep->qp.rq_pgtbl_virt) {
+ printk(KERN_ALERT "bnx2i: unable to alloc RQ PT mem %d\n",
+ ep->qp.rq_pgtbl_size);
+ goto mem_alloc_err;
+ }
+
+ /* Allocate memory area for actual RQ element */
+ ep->qp.rq_virt =
+ dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size,
+ &ep->qp.rq_phys, GFP_KERNEL);
+ if (!ep->qp.rq_virt) {
+ printk(KERN_ALERT "bnx2i: unable to alloc RQ BD memory %d\n",
+ ep->qp.rq_mem_size);
+ goto mem_alloc_err;
+ }
+
+ ep->qp.rq_first_qe = ep->qp.rq_virt;
+ ep->qp.rq_prod_qe = ep->qp.rq_first_qe;
+ ep->qp.rq_cons_qe = ep->qp.rq_first_qe;
+ ep->qp.rq_last_qe = &ep->qp.rq_first_qe[hba->max_rqes - 1];
+ ep->qp.rq_prod_idx = 0x8000;
+ ep->qp.rq_cons_idx = 0;
+ ep->qp.rqe_left = hba->max_rqes;
+
+ setup_qp_page_tables(ep);
+
+ return 0;
+
+mem_alloc_err:
+ bnx2i_free_qp_resc(hba, ep);
+ return -ENOMEM;
+}
+
+
+
+/**
+ * bnx2i_free_qp_resc - free memory resources held by QP
+ * @hba: adapter structure pointer
+ * @ep: endpoint (transport indentifier) structure
+ *
+ * Free QP resources - SQ/RQ/CQ memory and page tables.
+ */
+void bnx2i_free_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
+{
+ if (ep->qp.ctx_base) {
+ iounmap(ep->qp.ctx_base);
+ ep->qp.ctx_base = NULL;
+ }
+ /* Free SQ mem */
+ if (ep->qp.sq_pgtbl_virt) {
+ dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
+ ep->qp.sq_pgtbl_virt, ep->qp.sq_pgtbl_phys);
+ ep->qp.sq_pgtbl_virt = NULL;
+ ep->qp.sq_pgtbl_phys = 0;
+ }
+ if (ep->qp.sq_virt) {
+ dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
+ ep->qp.sq_virt, ep->qp.sq_phys);
+ ep->qp.sq_virt = NULL;
+ ep->qp.sq_phys = 0;
+ }
+
+ /* Free RQ mem */
+ if (ep->qp.rq_pgtbl_virt) {
+ dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
+ ep->qp.rq_pgtbl_virt, ep->qp.rq_pgtbl_phys);
+ ep->qp.rq_pgtbl_virt = NULL;
+ ep->qp.rq_pgtbl_phys = 0;
+ }
+ if (ep->qp.rq_virt) {
+ dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size,
+ ep->qp.rq_virt, ep->qp.rq_phys);
+ ep->qp.rq_virt = NULL;
+ ep->qp.rq_phys = 0;
+ }
+
+ /* Free CQ mem */
+ if (ep->qp.cq_pgtbl_virt) {
+ dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
+ ep->qp.cq_pgtbl_virt, ep->qp.cq_pgtbl_phys);
+ ep->qp.cq_pgtbl_virt = NULL;
+ ep->qp.cq_pgtbl_phys = 0;
+ }
+ if (ep->qp.cq_virt) {
+ dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
+ ep->qp.cq_virt, ep->qp.cq_phys);
+ ep->qp.cq_virt = NULL;
+ ep->qp.cq_phys = 0;
+ }
+}
+
+
+/**
+ * bnx2i_send_fw_iscsi_init_msg - initiates initial handshake with iscsi f/w
+ * @hba: adapter structure pointer
+ *
+ * Send down iscsi_init KWQEs which initiates the initial handshake with the f/w
+ * This results in iSCSi support validation and on-chip context manager
+ * initialization. Firmware completes this handshake with a CQE carrying
+ * the result of iscsi support validation. Parameter carried by
+ * iscsi init request determines the number of offloaded connection and
+ * tolerance level for iscsi protocol violation this hba/chip can support
+ */
+int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
+{
+ struct kwqe *kwqe_arr[3];
+ struct iscsi_kwqe_init1 iscsi_init;
+ struct iscsi_kwqe_init2 iscsi_init2;
+ int rc = 0;
+ u64 mask64;
+
+ bnx2i_adjust_qp_size(hba);
+
+ iscsi_init.flags =
+ ISCSI_PAGE_SIZE_4K << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT;
+ if (en_tcp_dack)
+ iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE;
+ iscsi_init.reserved0 = 0;
+ iscsi_init.num_cqs = 1;
+ iscsi_init.hdr.op_code = ISCSI_KWQE_OPCODE_INIT1;
+ iscsi_init.hdr.flags =
+ (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ iscsi_init.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
+ iscsi_init.dummy_buffer_addr_hi =
+ (u32) ((u64) hba->dummy_buf_dma >> 32);
+
+ hba->ctx_ccell_tasks =
+ ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));
+ iscsi_init.num_ccells_per_conn = hba->num_ccell;
+ iscsi_init.num_tasks_per_conn = hba->max_sqes;
+ iscsi_init.sq_wqes_per_page = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
+ iscsi_init.sq_num_wqes = hba->max_sqes;
+ iscsi_init.cq_log_wqes_per_page =
+ (u8) bnx2i_power_of2(PAGE_SIZE / BNX2I_CQE_SIZE);
+ iscsi_init.cq_num_wqes = hba->max_cqes;
+ iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE +
+ (PAGE_SIZE - 1)) / PAGE_SIZE;
+ iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE +
+ (PAGE_SIZE - 1)) / PAGE_SIZE;
+ iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE;
+ iscsi_init.rq_num_wqes = hba->max_rqes;
+
+
+ iscsi_init2.hdr.op_code = ISCSI_KWQE_OPCODE_INIT2;
+ iscsi_init2.hdr.flags =
+ (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+ iscsi_init2.max_cq_sqn = hba->max_cqes * 2 + 1;
+ mask64 = 0x0ULL;
+ mask64 |= (
+ /* CISCO MDS */
+ (1UL <<
+ ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV) |
+ /* HP MSA1510i */
+ (1UL <<
+ ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN) |
+ /* EMC */
+ (1ULL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN));
+ if (error_mask1)
+ iscsi_init2.error_bit_map[0] = error_mask1;
+ else
+ iscsi_init2.error_bit_map[0] = (u32) mask64;
+
+ if (error_mask2)
+ iscsi_init2.error_bit_map[1] = error_mask2;
+ else
+ iscsi_init2.error_bit_map[1] = (u32) (mask64 >> 32);
+
+ iscsi_error_mask = mask64;
+
+ kwqe_arr[0] = (struct kwqe *) &iscsi_init;
+ kwqe_arr[1] = (struct kwqe *) &iscsi_init2;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 2);
+ return rc;
+}
+
+
+/**
+ * bnx2i_process_scsi_cmd_resp - this function handles scsi cmd completion.
+ * @conn: iscsi connection
+ * @cqe: pointer to newly DMA'ed CQE entry for processing
+ *
+ * process SCSI CMD Response CQE & complete the request to SCSI-ML
+ */
+static int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
+ struct bnx2i_conn *bnx2i_conn,
+ struct cqe *cqe)
+{
+ struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+ struct bnx2i_cmd_response *resp_cqe;
+ struct bnx2i_cmd *bnx2i_cmd;
+ struct iscsi_task *task;
+ struct iscsi_cmd_rsp *hdr;
+ u32 datalen = 0;
+
+ resp_cqe = (struct bnx2i_cmd_response *)cqe;
+ spin_lock(&session->lock);
+ task = iscsi_itt_to_task(conn,
+ resp_cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
+ if (!task)
+ goto fail;
+
+ bnx2i_cmd = task->dd_data;
+
+ if (bnx2i_cmd->req.op_attr & ISCSI_CMD_REQUEST_READ) {
+ conn->datain_pdus_cnt +=
+ resp_cqe->task_stat.read_stat.num_data_outs;
+ conn->rxdata_octets +=
+ bnx2i_cmd->req.total_data_transfer_length;
+ } else {
+ conn->dataout_pdus_cnt +=
+ resp_cqe->task_stat.read_stat.num_data_outs;
+ conn->r2t_pdus_cnt +=
+ resp_cqe->task_stat.read_stat.num_r2ts;
+ conn->txdata_octets +=
+ bnx2i_cmd->req.total_data_transfer_length;
+ }
+ bnx2i_iscsi_unmap_sg_list(bnx2i_cmd);
+
+ hdr = (struct iscsi_cmd_rsp *)task->hdr;
+ resp_cqe = (struct bnx2i_cmd_response *)cqe;
+ hdr->opcode = resp_cqe->op_code;
+ hdr->max_cmdsn = cpu_to_be32(resp_cqe->max_cmd_sn);
+ hdr->exp_cmdsn = cpu_to_be32(resp_cqe->exp_cmd_sn);
+ hdr->response = resp_cqe->response;
+ hdr->cmd_status = resp_cqe->status;
+ hdr->flags = resp_cqe->response_flags;
+ hdr->residual_count = cpu_to_be32(resp_cqe->residual_count);
+
+ if (resp_cqe->op_code == ISCSI_OP_SCSI_DATA_IN)
+ goto done;
+
+ if (resp_cqe->status == SAM_STAT_CHECK_CONDITION) {
+ datalen = resp_cqe->data_length;
+ if (datalen < 2)
+ goto done;
+
+ if (datalen > BNX2I_RQ_WQE_SIZE) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "sense data len %d > RQ sz\n",
+ datalen);
+ datalen = BNX2I_RQ_WQE_SIZE;
+ } else if (datalen > ISCSI_DEF_MAX_RECV_SEG_LEN) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "sense data len %d > conn data\n",
+ datalen);
+ datalen = ISCSI_DEF_MAX_RECV_SEG_LEN;
+ }
+
+ bnx2i_get_rq_buf(bnx2i_cmd->conn, conn->data, datalen);
+ bnx2i_put_rq_buf(bnx2i_cmd->conn, 1);
+ }
+
+done:
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
+ conn->data, datalen);
+fail:
+ spin_unlock(&session->lock);
+ return 0;
+}
+
+
+/**
+ * bnx2i_process_login_resp - this function handles iscsi login response
+ * @session: iscsi session pointer
+ * @bnx2i_conn: iscsi connection pointer
+ * @cqe: pointer to newly DMA'ed CQE entry for processing
+ *
+ * process Login Response CQE & complete it to open-iscsi user daemon
+ */
+static int bnx2i_process_login_resp(struct iscsi_session *session,
+ struct bnx2i_conn *bnx2i_conn,
+ struct cqe *cqe)
+{
+ struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+ struct iscsi_task *task;
+ struct bnx2i_login_response *login;
+ struct iscsi_login_rsp *resp_hdr;
+ int pld_len;
+ int pad_len;
+
+ login = (struct bnx2i_login_response *) cqe;
+ spin_lock(&session->lock);
+ task = iscsi_itt_to_task(conn,
+ login->itt & ISCSI_LOGIN_RESPONSE_INDEX);
+ if (!task)
+ goto done;
+
+ resp_hdr = (struct iscsi_login_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
+ memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+ resp_hdr->opcode = login->op_code;
+ resp_hdr->flags = login->response_flags;
+ resp_hdr->max_version = login->version_max;
+ resp_hdr->active_version = login->version_active;;
+ resp_hdr->hlength = 0;
+
+ hton24(resp_hdr->dlength, login->data_length);
+ memcpy(resp_hdr->isid, &login->isid_lo, 6);
+ resp_hdr->tsih = cpu_to_be16(login->tsih);
+ resp_hdr->itt = task->hdr->itt;
+ resp_hdr->statsn = cpu_to_be32(login->stat_sn);
+ resp_hdr->exp_cmdsn = cpu_to_be32(login->exp_cmd_sn);
+ resp_hdr->max_cmdsn = cpu_to_be32(login->max_cmd_sn);
+ resp_hdr->status_class = login->status_class;
+ resp_hdr->status_detail = login->status_detail;
+ pld_len = login->data_length;
+ bnx2i_conn->gen_pdu.resp_wr_ptr =
+ bnx2i_conn->gen_pdu.resp_buf + pld_len;
+
+ pad_len = 0;
+ if (pld_len & 0x3)
+ pad_len = 4 - (pld_len % 4);
+
+ if (pad_len) {
+ int i = 0;
+ for (i = 0; i < pad_len; i++) {
+ bnx2i_conn->gen_pdu.resp_wr_ptr[0] = 0;
+ bnx2i_conn->gen_pdu.resp_wr_ptr++;
+ }
+ }
+
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr,
+ bnx2i_conn->gen_pdu.resp_buf,
+ bnx2i_conn->gen_pdu.resp_wr_ptr - bnx2i_conn->gen_pdu.resp_buf);
+done:
+ spin_unlock(&session->lock);
+ return 0;
+}
+
+/**
+ * bnx2i_process_tmf_resp - this function handles iscsi TMF response
+ * @session: iscsi session pointer
+ * @bnx2i_conn: iscsi connection pointer
+ * @cqe: pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI TMF Response CQE and wake up the driver eh thread.
+ */
+static int bnx2i_process_tmf_resp(struct iscsi_session *session,
+ struct bnx2i_conn *bnx2i_conn,
+ struct cqe *cqe)
+{
+ struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+ struct iscsi_task *task;
+ struct bnx2i_tmf_response *tmf_cqe;
+ struct iscsi_tm_rsp *resp_hdr;
+
+ tmf_cqe = (struct bnx2i_tmf_response *)cqe;
+ spin_lock(&session->lock);
+ task = iscsi_itt_to_task(conn,
+ tmf_cqe->itt & ISCSI_TMF_RESPONSE_INDEX);
+ if (!task)
+ goto done;
+
+ resp_hdr = (struct iscsi_tm_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
+ memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+ resp_hdr->opcode = tmf_cqe->op_code;
+ resp_hdr->max_cmdsn = cpu_to_be32(tmf_cqe->max_cmd_sn);
+ resp_hdr->exp_cmdsn = cpu_to_be32(tmf_cqe->exp_cmd_sn);
+ resp_hdr->itt = task->hdr->itt;
+ resp_hdr->response = tmf_cqe->response;
+
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
+done:
+ spin_unlock(&session->lock);
+ return 0;
+}
+
+/**
+ * bnx2i_process_logout_resp - this function handles iscsi logout response
+ * @session: iscsi session pointer
+ * @bnx2i_conn: iscsi connection pointer
+ * @cqe: pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI Logout Response CQE & make function call to
+ * notify the user daemon.
+ */
+static int bnx2i_process_logout_resp(struct iscsi_session *session,
+ struct bnx2i_conn *bnx2i_conn,
+ struct cqe *cqe)
+{
+ struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+ struct iscsi_task *task;
+ struct bnx2i_logout_response *logout;
+ struct iscsi_logout_rsp *resp_hdr;
+
+ logout = (struct bnx2i_logout_response *) cqe;
+ spin_lock(&session->lock);
+ task = iscsi_itt_to_task(conn,
+ logout->itt & ISCSI_LOGOUT_RESPONSE_INDEX);
+ if (!task)
+ goto done;
+
+ resp_hdr = (struct iscsi_logout_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
+ memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+ resp_hdr->opcode = logout->op_code;
+ resp_hdr->flags = logout->response;
+ resp_hdr->hlength = 0;
+
+ resp_hdr->itt = task->hdr->itt;
+ resp_hdr->statsn = task->hdr->exp_statsn;
+ resp_hdr->exp_cmdsn = cpu_to_be32(logout->exp_cmd_sn);
+ resp_hdr->max_cmdsn = cpu_to_be32(logout->max_cmd_sn);
+
+ resp_hdr->t2wait = cpu_to_be32(logout->time_to_wait);
+ resp_hdr->t2retain = cpu_to_be32(logout->time_to_retain);
+
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
+done:
+ spin_unlock(&session->lock);
+ return 0;
+}
+
+/**
+ * bnx2i_process_nopin_local_cmpl - this function handles iscsi nopin CQE
+ * @session: iscsi session pointer
+ * @bnx2i_conn: iscsi connection pointer
+ * @cqe: pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI NOPIN local completion CQE, frees IIT and command structures
+ */
+static void bnx2i_process_nopin_local_cmpl(struct iscsi_session *session,
+ struct bnx2i_conn *bnx2i_conn,
+ struct cqe *cqe)
+{
+ struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+ struct bnx2i_nop_in_msg *nop_in;
+ struct iscsi_task *task;
+
+ nop_in = (struct bnx2i_nop_in_msg *)cqe;
+ spin_lock(&session->lock);
+ task = iscsi_itt_to_task(conn,
+ nop_in->itt & ISCSI_NOP_IN_MSG_INDEX);
+ if (task)
+ iscsi_put_task(task);
+ spin_unlock(&session->lock);
+}
+
+/**
+ * bnx2i_unsol_pdu_adjust_rq - makes adjustments to RQ after unsol pdu is recvd
+ * @conn: iscsi connection
+ *
+ * Firmware advances RQ producer index for every unsolicited PDU even if
+ * payload data length is '0'. This function makes corresponding
+ * adjustments on the driver side to match this f/w behavior
+ */
+static void bnx2i_unsol_pdu_adjust_rq(struct bnx2i_conn *bnx2i_conn)
+{
+ char dummy_rq_data[2];
+ bnx2i_get_rq_buf(bnx2i_conn, dummy_rq_data, 1);
+ bnx2i_put_rq_buf(bnx2i_conn, 1);
+}
+
+
+/**
+ * bnx2i_process_nopin_mesg - this function handles iscsi nopin CQE
+ * @session: iscsi session pointer
+ * @bnx2i_conn: iscsi connection pointer
+ * @cqe: pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI target's proactive iSCSI NOPIN request
+ */
+static int bnx2i_process_nopin_mesg(struct iscsi_session *session,
+ struct bnx2i_conn *bnx2i_conn,
+ struct cqe *cqe)
+{
+ struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+ struct iscsi_task *task;
+ struct bnx2i_nop_in_msg *nop_in;
+ struct iscsi_nopin *hdr;
+ u32 itt;
+ int tgt_async_nop = 0;
+
+ nop_in = (struct bnx2i_nop_in_msg *)cqe;
+ itt = nop_in->itt & ISCSI_NOP_IN_MSG_INDEX;
+
+ spin_lock(&session->lock);
+ hdr = (struct iscsi_nopin *)&bnx2i_conn->gen_pdu.resp_hdr;
+ memset(hdr, 0, sizeof(struct iscsi_hdr));
+ hdr->opcode = nop_in->op_code;
+ hdr->max_cmdsn = cpu_to_be32(nop_in->max_cmd_sn);
+ hdr->exp_cmdsn = cpu_to_be32(nop_in->exp_cmd_sn);
+ hdr->ttt = cpu_to_be32(nop_in->ttt);
+
+ if (itt == (u16) RESERVED_ITT) {
+ bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
+ hdr->itt = RESERVED_ITT;
+ tgt_async_nop = 1;
+ goto done;
+ }
+
+ /* this is a response to one of our nop-outs */
+ task = iscsi_itt_to_task(conn, itt);
+ if (task) {
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
+ hdr->itt = task->hdr->itt;
+ hdr->ttt = cpu_to_be32(nop_in->ttt);
+ memcpy(hdr->lun, nop_in->lun, 8);
+ }
+done:
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
+ spin_unlock(&session->lock);
+
+ return tgt_async_nop;
+}
+
+
+/**
+ * bnx2i_process_async_mesg - this function handles iscsi async message
+ * @session: iscsi session pointer
+ * @bnx2i_conn: iscsi connection pointer
+ * @cqe: pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI ASYNC Message
+ */
+static void bnx2i_process_async_mesg(struct iscsi_session *session,
+ struct bnx2i_conn *bnx2i_conn,
+ struct cqe *cqe)
+{
+ struct bnx2i_async_msg *async_cqe;
+ struct iscsi_async *resp_hdr;
+ u8 async_event;
+
+ bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
+
+ async_cqe = (struct bnx2i_async_msg *)cqe;
+ async_event = async_cqe->async_event;
+
+ if (async_event == ISCSI_ASYNC_MSG_SCSI_EVENT) {
+ iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
+ "async: scsi events not supported\n");
+ return;
+ }
+
+ spin_lock(&session->lock);
+ resp_hdr = (struct iscsi_async *) &bnx2i_conn->gen_pdu.resp_hdr;
+ memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+ resp_hdr->opcode = async_cqe->op_code;
+ resp_hdr->flags = 0x80;
+
+ memcpy(resp_hdr->lun, async_cqe->lun, 8);
+ resp_hdr->exp_cmdsn = cpu_to_be32(async_cqe->exp_cmd_sn);
+ resp_hdr->max_cmdsn = cpu_to_be32(async_cqe->max_cmd_sn);
+
+ resp_hdr->async_event = async_cqe->async_event;
+ resp_hdr->async_vcode = async_cqe->async_vcode;
+
+ resp_hdr->param1 = cpu_to_be16(async_cqe->param1);
+ resp_hdr->param2 = cpu_to_be16(async_cqe->param2);
+ resp_hdr->param3 = cpu_to_be16(async_cqe->param3);
+
+ __iscsi_complete_pdu(bnx2i_conn->cls_conn->dd_data,
+ (struct iscsi_hdr *)resp_hdr, NULL, 0);
+ spin_unlock(&session->lock);
+}
+
+
+/**
+ * bnx2i_process_reject_mesg - process iscsi reject pdu
+ * @session: iscsi session pointer
+ * @bnx2i_conn: iscsi connection pointer
+ * @cqe: pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI REJECT message
+ */
+static void bnx2i_process_reject_mesg(struct iscsi_session *session,
+ struct bnx2i_conn *bnx2i_conn,
+ struct cqe *cqe)
+{
+ struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+ struct bnx2i_reject_msg *reject;
+ struct iscsi_reject *hdr;
+
+ reject = (struct bnx2i_reject_msg *) cqe;
+ if (reject->data_length) {
+ bnx2i_get_rq_buf(bnx2i_conn, conn->data, reject->data_length);
+ bnx2i_put_rq_buf(bnx2i_conn, 1);
+ } else
+ bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
+
+ spin_lock(&session->lock);
+ hdr = (struct iscsi_reject *) &bnx2i_conn->gen_pdu.resp_hdr;
+ memset(hdr, 0, sizeof(struct iscsi_hdr));
+ hdr->opcode = reject->op_code;
+ hdr->reason = reject->reason;
+ hton24(hdr->dlength, reject->data_length);
+ hdr->max_cmdsn = cpu_to_be32(reject->max_cmd_sn);
+ hdr->exp_cmdsn = cpu_to_be32(reject->exp_cmd_sn);
+ hdr->ffffffff = cpu_to_be32(RESERVED_ITT);
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, conn->data,
+ reject->data_length);
+ spin_unlock(&session->lock);
+}
+
+/**
+ * bnx2i_process_cmd_cleanup_resp - process scsi command clean-up completion
+ * @session: iscsi session pointer
+ * @bnx2i_conn: iscsi connection pointer
+ * @cqe: pointer to newly DMA'ed CQE entry for processing
+ *
+ * process command cleanup response CQE during conn shutdown or error recovery
+ */
+static void bnx2i_process_cmd_cleanup_resp(struct iscsi_session *session,
+ struct bnx2i_conn *bnx2i_conn,
+ struct cqe *cqe)
+{
+ struct bnx2i_cleanup_response *cmd_clean_rsp;
+ struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+ struct iscsi_task *task;
+
+ cmd_clean_rsp = (struct bnx2i_cleanup_response *)cqe;
+ spin_lock(&session->lock);
+ task = iscsi_itt_to_task(conn,
+ cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
+ if (!task)
+ printk(KERN_ALERT "bnx2i: cmd clean ITT %x not active\n",
+ cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
+ spin_unlock(&session->lock);
+ complete(&bnx2i_conn->cmd_cleanup_cmpl);
+}
+
+
+
+/**
+ * bnx2i_process_new_cqes - process newly DMA'ed CQE's
+ * @bnx2i_conn: iscsi connection
+ *
+ * this function is called by generic KCQ handler to process all pending CQE's
+ */
+static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
+{
+ struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct qp_info *qp = &bnx2i_conn->ep->qp;
+ struct bnx2i_nop_in_msg *nopin;
+ int tgt_async_msg;
+
+ while (1) {
+ nopin = (struct bnx2i_nop_in_msg *) qp->cq_cons_qe;
+ if (nopin->cq_req_sn != qp->cqe_exp_seq_sn)
+ break;
+
+ if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx)))
+ break;
+
+ tgt_async_msg = 0;
+
+ switch (nopin->op_code) {
+ case ISCSI_OP_SCSI_CMD_RSP:
+ case ISCSI_OP_SCSI_DATA_IN:
+ bnx2i_process_scsi_cmd_resp(session, bnx2i_conn,
+ qp->cq_cons_qe);
+ break;
+ case ISCSI_OP_LOGIN_RSP:
+ bnx2i_process_login_resp(session, bnx2i_conn,
+ qp->cq_cons_qe);
+ break;
+ case ISCSI_OP_SCSI_TMFUNC_RSP:
+ bnx2i_process_tmf_resp(session, bnx2i_conn,
+ qp->cq_cons_qe);
+ break;
+ case ISCSI_OP_LOGOUT_RSP:
+ bnx2i_process_logout_resp(session, bnx2i_conn,
+ qp->cq_cons_qe);
+ break;
+ case ISCSI_OP_NOOP_IN:
+ if (bnx2i_process_nopin_mesg(session, bnx2i_conn,
+ qp->cq_cons_qe))
+ tgt_async_msg = 1;
+ break;
+ case ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION:
+ bnx2i_process_nopin_local_cmpl(session, bnx2i_conn,
+ qp->cq_cons_qe);
+ break;
+ case ISCSI_OP_ASYNC_EVENT:
+ bnx2i_process_async_mesg(session, bnx2i_conn,
+ qp->cq_cons_qe);
+ tgt_async_msg = 1;
+ break;
+ case ISCSI_OP_REJECT:
+ bnx2i_process_reject_mesg(session, bnx2i_conn,
+ qp->cq_cons_qe);
+ break;
+ case ISCSI_OPCODE_CLEANUP_RESPONSE:
+ bnx2i_process_cmd_cleanup_resp(session, bnx2i_conn,
+ qp->cq_cons_qe);
+ break;
+ default:
+ printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
+ nopin->op_code);
+ }
+
+ if (!tgt_async_msg)
+ bnx2i_conn->ep->num_active_cmds--;
+
+ /* clear out in production version only, till beta keep opcode
+ * field intact, will be helpful in debugging (context dump)
+ * nopin->op_code = 0;
+ */
+ qp->cqe_exp_seq_sn++;
+ if (qp->cqe_exp_seq_sn == (qp->cqe_size * 2 + 1))
+ qp->cqe_exp_seq_sn = ISCSI_INITIAL_SN;
+
+ if (qp->cq_cons_qe == qp->cq_last_qe) {
+ qp->cq_cons_qe = qp->cq_first_qe;
+ qp->cq_cons_idx = 0;
+ } else {
+ qp->cq_cons_qe++;
+ qp->cq_cons_idx++;
+ }
+ }
+ bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
+}
+
+/**
+ * bnx2i_fastpath_notification - process global event queue (KCQ)
+ * @hba: adapter structure pointer
+ * @new_cqe_kcqe: pointer to newly DMA'ed KCQE entry
+ *
+ * Fast path event notification handler, KCQ entry carries context id
+ * of the connection that has 1 or more pending CQ entries
+ */
+static void bnx2i_fastpath_notification(struct bnx2i_hba *hba,
+ struct iscsi_kcqe *new_cqe_kcqe)
+{
+ struct bnx2i_conn *conn;
+ u32 iscsi_cid;
+
+ iscsi_cid = new_cqe_kcqe->iscsi_conn_id;
+ conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+
+ if (!conn) {
+ printk(KERN_ALERT "cid #%x not valid\n", iscsi_cid);
+ return;
+ }
+ if (!conn->ep) {
+ printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid);
+ return;
+ }
+
+ bnx2i_process_new_cqes(conn);
+}
+
+
+/**
+ * bnx2i_process_update_conn_cmpl - process iscsi conn update completion KCQE
+ * @hba: adapter structure pointer
+ * @update_kcqe: kcqe pointer
+ *
+ * CONN_UPDATE completion handler, this completes iSCSI connection FFP migration
+ */
+static void bnx2i_process_update_conn_cmpl(struct bnx2i_hba *hba,
+ struct iscsi_kcqe *update_kcqe)
+{
+ struct bnx2i_conn *conn;
+ u32 iscsi_cid;
+
+ iscsi_cid = update_kcqe->iscsi_conn_id;
+ conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+
+ if (!conn) {
+ printk(KERN_ALERT "conn_update: cid %x not valid\n", iscsi_cid);
+ return;
+ }
+ if (!conn->ep) {
+ printk(KERN_ALERT "cid %x does not have ep bound\n", iscsi_cid);
+ return;
+ }
+
+ if (update_kcqe->completion_status) {
+ printk(KERN_ALERT "request failed cid %x\n", iscsi_cid);
+ conn->ep->state = EP_STATE_ULP_UPDATE_FAILED;
+ } else
+ conn->ep->state = EP_STATE_ULP_UPDATE_COMPL;
+
+ wake_up_interruptible(&conn->ep->ofld_wait);
+}
+
+
+/**
+ * bnx2i_recovery_que_add_conn - add connection to recovery queue
+ * @hba: adapter structure pointer
+ * @bnx2i_conn: iscsi connection
+ *
+ * Add connection to recovery queue and schedule adapter eh worker
+ */
+static void bnx2i_recovery_que_add_conn(struct bnx2i_hba *hba,
+ struct bnx2i_conn *bnx2i_conn)
+{
+ iscsi_conn_failure(bnx2i_conn->cls_conn->dd_data,
+ ISCSI_ERR_CONN_FAILED);
+}
+
+
+/**
+ * bnx2i_process_tcp_error - process error notification on a given connection
+ *
+ * @hba: adapter structure pointer
+ * @tcp_err: tcp error kcqe pointer
+ *
+ * handles tcp level error notifications from FW.
+ */
+static void bnx2i_process_tcp_error(struct bnx2i_hba *hba,
+ struct iscsi_kcqe *tcp_err)
+{
+ struct bnx2i_conn *bnx2i_conn;
+ u32 iscsi_cid;
+
+ iscsi_cid = tcp_err->iscsi_conn_id;
+ bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+
+ if (!bnx2i_conn) {
+ printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid);
+ return;
+ }
+
+ printk(KERN_ALERT "bnx2i - cid 0x%x had TCP errors, error code 0x%x\n",
+ iscsi_cid, tcp_err->completion_status);
+ bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn);
+}
+
+
+/**
+ * bnx2i_process_iscsi_error - process error notification on a given connection
+ * @hba: adapter structure pointer
+ * @iscsi_err: iscsi error kcqe pointer
+ *
+ * handles iscsi error notifications from the FW. Firmware based in initial
+ * handshake classifies iscsi protocol / TCP rfc violation into either
+ * warning or error indications. If indication is of "Error" type, driver
+ * will initiate session recovery for that connection/session. For
+ * "Warning" type indication, driver will put out a system log message
+ * (there will be only one message for each type for the life of the
+ * session, this is to avoid un-necessarily overloading the system)
+ */
+static void bnx2i_process_iscsi_error(struct bnx2i_hba *hba,
+ struct iscsi_kcqe *iscsi_err)
+{
+ struct bnx2i_conn *bnx2i_conn;
+ u32 iscsi_cid;
+ char warn_notice[] = "iscsi_warning";
+ char error_notice[] = "iscsi_error";
+ char additional_notice[64];
+ char *message;
+ int need_recovery;
+ u64 err_mask64;
+
+ iscsi_cid = iscsi_err->iscsi_conn_id;
+ bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+ if (!bnx2i_conn) {
+ printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid);
+ return;
+ }
+
+ err_mask64 = (0x1ULL << iscsi_err->completion_status);
+
+ if (err_mask64 & iscsi_error_mask) {
+ need_recovery = 0;
+ message = warn_notice;
+ } else {
+ need_recovery = 1;
+ message = error_notice;
+ }
+
+ switch (iscsi_err->completion_status) {
+ case ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR:
+ strcpy(additional_notice, "hdr digest err");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR:
+ strcpy(additional_notice, "data digest err");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE:
+ strcpy(additional_notice, "wrong opcode rcvd");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN:
+ strcpy(additional_notice, "AHS len > 0 rcvd");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT:
+ strcpy(additional_notice, "invalid ITT rcvd");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN:
+ strcpy(additional_notice, "wrong StatSN rcvd");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN:
+ strcpy(additional_notice, "wrong DataSN rcvd");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T:
+ strcpy(additional_notice, "pend R2T violation");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0:
+ strcpy(additional_notice, "ERL0, UO");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1:
+ strcpy(additional_notice, "ERL0, U1");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2:
+ strcpy(additional_notice, "ERL0, U2");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3:
+ strcpy(additional_notice, "ERL0, U3");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4:
+ strcpy(additional_notice, "ERL0, U4");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5:
+ strcpy(additional_notice, "ERL0, U5");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6:
+ strcpy(additional_notice, "ERL0, U6");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN:
+ strcpy(additional_notice, "invalid resi len");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN:
+ strcpy(additional_notice, "MRDSL violation");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO:
+ strcpy(additional_notice, "F-bit not set");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV:
+ strcpy(additional_notice, "invalid TTT");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN:
+ strcpy(additional_notice, "invalid DataSN");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN:
+ strcpy(additional_notice, "burst len violation");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF:
+ strcpy(additional_notice, "buf offset violation");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN:
+ strcpy(additional_notice, "invalid LUN field");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN:
+ strcpy(additional_notice, "invalid R2TSN field");
+ break;
+#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0 \
+ ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0
+ case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0:
+ strcpy(additional_notice, "invalid cmd len1");
+ break;
+#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1 \
+ ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1
+ case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1:
+ strcpy(additional_notice, "invalid cmd len2");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED:
+ strcpy(additional_notice,
+ "pend r2t exceeds MaxOutstandingR2T value");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV:
+ strcpy(additional_notice, "TTT is rsvd");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN:
+ strcpy(additional_notice, "MBL violation");
+ break;
+#define BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO \
+ ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO
+ case BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO:
+ strcpy(additional_notice, "data seg len != 0");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN:
+ strcpy(additional_notice, "reject pdu len error");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN:
+ strcpy(additional_notice, "async pdu len error");
+ break;
+ case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN:
+ strcpy(additional_notice, "nopin pdu len error");
+ break;
+#define BNX2_ERR_PEND_R2T_IN_CLEANUP \
+ ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP
+ case BNX2_ERR_PEND_R2T_IN_CLEANUP:
+ strcpy(additional_notice, "pend r2t in cleanup");
+ break;
+
+ case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT:
+ strcpy(additional_notice, "IP fragments rcvd");
+ break;
+ case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS:
+ strcpy(additional_notice, "IP options error");
+ break;
+ case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG:
+ strcpy(additional_notice, "urgent flag error");
+ break;
+ default:
+ printk(KERN_ALERT "iscsi_err - unknown err %x\n",
+ iscsi_err->completion_status);
+ }
+
+ if (need_recovery) {
+ iscsi_conn_printk(KERN_ALERT,
+ bnx2i_conn->cls_conn->dd_data,
+ "bnx2i: %s - %s\n",
+ message, additional_notice);
+
+ iscsi_conn_printk(KERN_ALERT,
+ bnx2i_conn->cls_conn->dd_data,
+ "conn_err - hostno %d conn %p, "
+ "iscsi_cid %x cid %x\n",
+ bnx2i_conn->hba->shost->host_no,
+ bnx2i_conn, bnx2i_conn->ep->ep_iscsi_cid,
+ bnx2i_conn->ep->ep_cid);
+ bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn);
+ } else
+ if (!test_and_set_bit(iscsi_err->completion_status,
+ (void *) &bnx2i_conn->violation_notified))
+ iscsi_conn_printk(KERN_ALERT,
+ bnx2i_conn->cls_conn->dd_data,
+ "bnx2i: %s - %s\n",
+ message, additional_notice);
+}
+
+
+/**
+ * bnx2i_process_conn_destroy_cmpl - process iscsi conn destroy completion
+ * @hba: adapter structure pointer
+ * @conn_destroy: conn destroy kcqe pointer
+ *
+ * handles connection destroy completion request.
+ */
+static void bnx2i_process_conn_destroy_cmpl(struct bnx2i_hba *hba,
+ struct iscsi_kcqe *conn_destroy)
+{
+ struct bnx2i_endpoint *ep;
+
+ ep = bnx2i_find_ep_in_destroy_list(hba, conn_destroy->iscsi_conn_id);
+ if (!ep) {
+ printk(KERN_ALERT "bnx2i_conn_destroy_cmpl: no pending "
+ "offload request, unexpected complection\n");
+ return;
+ }
+
+ if (hba != ep->hba) {
+ printk(KERN_ALERT "conn destroy- error hba mis-match\n");
+ return;
+ }
+
+ if (conn_destroy->completion_status) {
+ printk(KERN_ALERT "conn_destroy_cmpl: op failed\n");
+ ep->state = EP_STATE_CLEANUP_FAILED;
+ } else
+ ep->state = EP_STATE_CLEANUP_CMPL;
+ wake_up_interruptible(&ep->ofld_wait);
+}
+
+
+/**
+ * bnx2i_process_ofld_cmpl - process initial iscsi conn offload completion
+ * @hba: adapter structure pointer
+ * @ofld_kcqe: conn offload kcqe pointer
+ *
+ * handles initial connection offload completion, ep_connect() thread is
+ * woken-up to continue with LLP connect process
+ */
+static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba,
+ struct iscsi_kcqe *ofld_kcqe)
+{
+ u32 cid_addr;
+ struct bnx2i_endpoint *ep;
+ u32 cid_num;
+
+ ep = bnx2i_find_ep_in_ofld_list(hba, ofld_kcqe->iscsi_conn_id);
+ if (!ep) {
+ printk(KERN_ALERT "ofld_cmpl: no pend offload request\n");
+ return;
+ }
+
+ if (hba != ep->hba) {
+ printk(KERN_ALERT "ofld_cmpl: error hba mis-match\n");
+ return;
+ }
+
+ if (ofld_kcqe->completion_status) {
+ if (ofld_kcqe->completion_status ==
+ ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE)
+ printk(KERN_ALERT "bnx2i: unable to allocate"
+ " iSCSI context resources\n");
+ ep->state = EP_STATE_OFLD_FAILED;
+ } else {
+ ep->state = EP_STATE_OFLD_COMPL;
+ cid_addr = ofld_kcqe->iscsi_conn_context_id;
+ cid_num = bnx2i_get_cid_num(ep);
+ ep->ep_cid = cid_addr;
+ ep->qp.ctx_base = NULL;
+ }
+ wake_up_interruptible(&ep->ofld_wait);
+}
+
+/**
+ * bnx2i_indicate_kcqe - process iscsi conn update completion KCQE
+ * @hba: adapter structure pointer
+ * @update_kcqe: kcqe pointer
+ *
+ * Generic KCQ event handler/dispatcher
+ */
+static void bnx2i_indicate_kcqe(void *context, struct kcqe *kcqe[],
+ u32 num_cqe)
+{
+ struct bnx2i_hba *hba = context;
+ int i = 0;
+ struct iscsi_kcqe *ikcqe = NULL;
+
+ while (i < num_cqe) {
+ ikcqe = (struct iscsi_kcqe *) kcqe[i++];
+
+ if (ikcqe->op_code ==
+ ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION)
+ bnx2i_fastpath_notification(hba, ikcqe);
+ else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_OFFLOAD_CONN)
+ bnx2i_process_ofld_cmpl(hba, ikcqe);
+ else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_UPDATE_CONN)
+ bnx2i_process_update_conn_cmpl(hba, ikcqe);
+ else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_INIT) {
+ if (ikcqe->completion_status !=
+ ISCSI_KCQE_COMPLETION_STATUS_SUCCESS)
+ bnx2i_iscsi_license_error(hba, ikcqe->\
+ completion_status);
+ else {
+ set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+ bnx2i_get_link_state(hba);
+ printk(KERN_INFO "bnx2i [%.2x:%.2x.%.2x]: "
+ "ISCSI_INIT passed\n",
+ (u8)hba->pcidev->bus->number,
+ hba->pci_devno,
+ (u8)hba->pci_func);
+
+
+ }
+ } else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_DESTROY_CONN)
+ bnx2i_process_conn_destroy_cmpl(hba, ikcqe);
+ else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_ISCSI_ERROR)
+ bnx2i_process_iscsi_error(hba, ikcqe);
+ else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_TCP_ERROR)
+ bnx2i_process_tcp_error(hba, ikcqe);
+ else
+ printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
+ ikcqe->op_code);
+ }
+}
+
+
+/**
+ * bnx2i_indicate_netevent - Generic netdev event handler
+ * @context: adapter structure pointer
+ * @event: event type
+ *
+ * Handles four netdev events, NETDEV_UP, NETDEV_DOWN,
+ * NETDEV_GOING_DOWN and NETDEV_CHANGE
+ */
+static void bnx2i_indicate_netevent(void *context, unsigned long event)
+{
+ struct bnx2i_hba *hba = context;
+
+ switch (event) {
+ case NETDEV_UP:
+ if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
+ bnx2i_send_fw_iscsi_init_msg(hba);
+ break;
+ case NETDEV_DOWN:
+ clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
+ clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+ break;
+ case NETDEV_GOING_DOWN:
+ set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
+ iscsi_host_for_each_session(hba->shost,
+ bnx2i_drop_session);
+ break;
+ case NETDEV_CHANGE:
+ bnx2i_get_link_state(hba);
+ break;
+ default:
+ ;
+ }
+}
+
+
+/**
+ * bnx2i_cm_connect_cmpl - process iscsi conn establishment completion
+ * @cm_sk: cnic sock structure pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to
+ * indicate completion of option-2 TCP connect request.
+ */
+static void bnx2i_cm_connect_cmpl(struct cnic_sock *cm_sk)
+{
+ struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+
+ if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state))
+ ep->state = EP_STATE_CONNECT_FAILED;
+ else if (test_bit(SK_F_OFFLD_COMPLETE, &cm_sk->flags))
+ ep->state = EP_STATE_CONNECT_COMPL;
+ else
+ ep->state = EP_STATE_CONNECT_FAILED;
+
+ wake_up_interruptible(&ep->ofld_wait);
+}
+
+
+/**
+ * bnx2i_cm_close_cmpl - process tcp conn close completion
+ * @cm_sk: cnic sock structure pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to
+ * indicate completion of option-2 graceful TCP connect shutdown
+ */
+static void bnx2i_cm_close_cmpl(struct cnic_sock *cm_sk)
+{
+ struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+
+ ep->state = EP_STATE_DISCONN_COMPL;
+ wake_up_interruptible(&ep->ofld_wait);
+}
+
+
+/**
+ * bnx2i_cm_abort_cmpl - process abortive tcp conn teardown completion
+ * @cm_sk: cnic sock structure pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to
+ * indicate completion of option-2 abortive TCP connect termination
+ */
+static void bnx2i_cm_abort_cmpl(struct cnic_sock *cm_sk)
+{
+ struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+
+ ep->state = EP_STATE_DISCONN_COMPL;
+ wake_up_interruptible(&ep->ofld_wait);
+}
+
+
+/**
+ * bnx2i_cm_remote_close - process received TCP FIN
+ * @hba: adapter structure pointer
+ * @update_kcqe: kcqe pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to indicate
+ * async TCP events such as FIN
+ */
+static void bnx2i_cm_remote_close(struct cnic_sock *cm_sk)
+{
+ struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+
+ ep->state = EP_STATE_TCP_FIN_RCVD;
+ if (ep->conn)
+ bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
+}
+
+/**
+ * bnx2i_cm_remote_abort - process TCP RST and start conn cleanup
+ * @hba: adapter structure pointer
+ * @update_kcqe: kcqe pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to
+ * indicate async TCP events (RST) sent by the peer.
+ */
+static void bnx2i_cm_remote_abort(struct cnic_sock *cm_sk)
+{
+ struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+
+ ep->state = EP_STATE_TCP_RST_RCVD;
+ if (ep->conn)
+ bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
+}
+
+
+static void bnx2i_send_nl_mesg(struct cnic_dev *dev, u32 msg_type,
+ char *buf, u16 buflen)
+{
+ struct bnx2i_hba *hba;
+
+ hba = bnx2i_find_hba_for_cnic(dev);
+ if (!hba)
+ return;
+
+ if (iscsi_offload_mesg(hba->shost, &bnx2i_iscsi_transport,
+ msg_type, buf, buflen))
+ printk(KERN_ALERT "bnx2i: private nl message send error\n");
+
+}
+
+
+/**
+ * bnx2i_cnic_cb - global template of bnx2i - cnic driver interface structure
+ * carrying callback function pointers
+ *
+ */
+struct cnic_ulp_ops bnx2i_cnic_cb = {
+ .cnic_init = bnx2i_ulp_init,
+ .cnic_exit = bnx2i_ulp_exit,
+ .cnic_start = bnx2i_start,
+ .cnic_stop = bnx2i_stop,
+ .indicate_kcqes = bnx2i_indicate_kcqe,
+ .indicate_netevent = bnx2i_indicate_netevent,
+ .cm_connect_complete = bnx2i_cm_connect_cmpl,
+ .cm_close_complete = bnx2i_cm_close_cmpl,
+ .cm_abort_complete = bnx2i_cm_abort_cmpl,
+ .cm_remote_close = bnx2i_cm_remote_close,
+ .cm_remote_abort = bnx2i_cm_remote_abort,
+ .iscsi_nl_send_msg = bnx2i_send_nl_mesg,
+ .owner = THIS_MODULE
+};
+
+
+/**
+ * bnx2i_map_ep_dbell_regs - map connection doorbell registers
+ * @ep: bnx2i endpoint
+ *
+ * maps connection's SQ and RQ doorbell registers, 5706/5708/5709 hosts these
+ * register in BAR #0. Whereas in 57710 these register are accessed by
+ * mapping BAR #1
+ */
+int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
+{
+ u32 cid_num;
+ u32 reg_off;
+ u32 first_l4l5;
+ u32 ctx_sz;
+ u32 config2;
+ resource_size_t reg_base;
+
+ cid_num = bnx2i_get_cid_num(ep);
+
+ if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
+ reg_base = pci_resource_start(ep->hba->pcidev,
+ BNX2X_DOORBELL_PCI_BAR);
+ reg_off = PAGE_SIZE * (cid_num & 0x1FFFF) + DPM_TRIGER_TYPE;
+ ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
+ goto arm_cq;
+ }
+
+ reg_base = ep->hba->netdev->base_addr;
+ if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) &&
+ (ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) {
+ config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2);
+ first_l4l5 = config2 & BNX2_MQ_CONFIG2_FIRST_L4L5;
+ ctx_sz = (config2 & BNX2_MQ_CONFIG2_CONT_SZ) >> 3;
+ if (ctx_sz)
+ reg_off = CTX_OFFSET + MAX_CID_CNT * MB_KERNEL_CTX_SIZE
+ + PAGE_SIZE *
+ (((cid_num - first_l4l5) / ctx_sz) + 256);
+ else
+ reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
+ } else
+ /* 5709 device in normal node and 5706/5708 devices */
+ reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
+
+ ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off,
+ MB_KERNEL_CTX_SIZE);
+ if (!ep->qp.ctx_base)
+ return -ENOMEM;
+
+arm_cq:
+ bnx2i_arm_cq_event_coalescing(ep, CNIC_ARM_CQE);
+ return 0;
+}
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
new file mode 100644
index 00000000000..ae4b2d588fd
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -0,0 +1,438 @@
+/* bnx2i.c: Broadcom NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2006 - 2009 Broadcom Corporation
+ * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+
+#include "bnx2i.h"
+
+static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
+static u32 adapter_count;
+static int bnx2i_reg_device;
+
+#define DRV_MODULE_NAME "bnx2i"
+#define DRV_MODULE_VERSION "2.0.1d"
+#define DRV_MODULE_RELDATE "Mar 25, 2009"
+
+static char version[] __devinitdata =
+ "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
+ " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+
+MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 iSCSI Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+static DEFINE_RWLOCK(bnx2i_dev_lock);
+
+unsigned int event_coal_div = 1;
+module_param(event_coal_div, int, 0664);
+MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor");
+
+unsigned int en_tcp_dack = 1;
+module_param(en_tcp_dack, int, 0664);
+MODULE_PARM_DESC(en_tcp_dack, "Enable TCP Delayed ACK");
+
+unsigned int error_mask1 = 0x00;
+module_param(error_mask1, int, 0664);
+MODULE_PARM_DESC(error_mask1, "Config FW iSCSI Error Mask #1");
+
+unsigned int error_mask2 = 0x00;
+module_param(error_mask2, int, 0664);
+MODULE_PARM_DESC(error_mask2, "Config FW iSCSI Error Mask #2");
+
+unsigned int sq_size;
+module_param(sq_size, int, 0664);
+MODULE_PARM_DESC(sq_size, "Configure SQ size");
+
+unsigned int rq_size = BNX2I_RQ_WQES_DEFAULT;
+module_param(rq_size, int, 0664);
+MODULE_PARM_DESC(rq_size, "Configure RQ size");
+
+u64 iscsi_error_mask = 0x00;
+
+static void bnx2i_unreg_one_device(struct bnx2i_hba *hba) ;
+
+
+/**
+ * bnx2i_identify_device - identifies NetXtreme II device type
+ * @hba: Adapter structure pointer
+ *
+ * This function identifies the NX2 device type and sets appropriate
+ * queue mailbox register access method, 5709 requires driver to
+ * access MBOX regs using *bin* mode
+ */
+void bnx2i_identify_device(struct bnx2i_hba *hba)
+{
+ hba->cnic_dev_type = 0;
+ if ((hba->pci_did == PCI_DEVICE_ID_NX2_5706) ||
+ (hba->pci_did == PCI_DEVICE_ID_NX2_5706S))
+ set_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type);
+ else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5708) ||
+ (hba->pci_did == PCI_DEVICE_ID_NX2_5708S))
+ set_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type);
+ else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5709) ||
+ (hba->pci_did == PCI_DEVICE_ID_NX2_5709S)) {
+ set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
+ hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
+ } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_57711)
+ set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
+}
+
+
+/**
+ * get_adapter_list_head - returns head of adapter list
+ */
+struct bnx2i_hba *get_adapter_list_head(void)
+{
+ struct bnx2i_hba *hba = NULL;
+ struct bnx2i_hba *tmp_hba;
+
+ if (!adapter_count)
+ goto hba_not_found;
+
+ read_lock(&bnx2i_dev_lock);
+ list_for_each_entry(tmp_hba, &adapter_list, link) {
+ if (tmp_hba->cnic && tmp_hba->cnic->cm_select_dev) {
+ hba = tmp_hba;
+ break;
+ }
+ }
+ read_unlock(&bnx2i_dev_lock);
+hba_not_found:
+ return hba;
+}
+
+
+/**
+ * bnx2i_find_hba_for_cnic - maps cnic device instance to bnx2i adapter instance
+ * @cnic: pointer to cnic device instance
+ *
+ */
+struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic)
+{
+ struct bnx2i_hba *hba, *temp;
+
+ read_lock(&bnx2i_dev_lock);
+ list_for_each_entry_safe(hba, temp, &adapter_list, link) {
+ if (hba->cnic == cnic) {
+ read_unlock(&bnx2i_dev_lock);
+ return hba;
+ }
+ }
+ read_unlock(&bnx2i_dev_lock);
+ return NULL;
+}
+
+
+/**
+ * bnx2i_start - cnic callback to initialize & start adapter instance
+ * @handle: transparent handle pointing to adapter structure
+ *
+ * This function maps adapter structure to pcidev structure and initiates
+ * firmware handshake to enable/initialize on chip iscsi components
+ * This bnx2i - cnic interface api callback is issued after following
+ * 2 conditions are met -
+ * a) underlying network interface is up (marked by event 'NETDEV_UP'
+ * from netdev
+ * b) bnx2i adapter instance is registered
+ */
+void bnx2i_start(void *handle)
+{
+#define BNX2I_INIT_POLL_TIME (1000 / HZ)
+ struct bnx2i_hba *hba = handle;
+ int i = HZ;
+
+ bnx2i_send_fw_iscsi_init_msg(hba);
+ while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
+ msleep(BNX2I_INIT_POLL_TIME);
+}
+
+
+/**
+ * bnx2i_stop - cnic callback to shutdown adapter instance
+ * @handle: transparent handle pointing to adapter structure
+ *
+ * driver checks if adapter is already in shutdown mode, if not start
+ * the shutdown process
+ */
+void bnx2i_stop(void *handle)
+{
+ struct bnx2i_hba *hba = handle;
+
+ /* check if cleanup happened in GOING_DOWN context */
+ clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+ if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN,
+ &hba->adapter_state))
+ iscsi_host_for_each_session(hba->shost,
+ bnx2i_drop_session);
+}
+
+/**
+ * bnx2i_register_device - register bnx2i adapter instance with the cnic driver
+ * @hba: Adapter instance to register
+ *
+ * registers bnx2i adapter instance with the cnic driver while holding the
+ * adapter structure lock
+ */
+void bnx2i_register_device(struct bnx2i_hba *hba)
+{
+ if (test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
+ test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+ return;
+ }
+
+ hba->cnic->register_device(hba->cnic, CNIC_ULP_ISCSI, hba);
+
+ spin_lock(&hba->lock);
+ bnx2i_reg_device++;
+ spin_unlock(&hba->lock);
+
+ set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+}
+
+
+/**
+ * bnx2i_reg_dev_all - registers all adapter instances with the cnic driver
+ *
+ * registers all bnx2i adapter instances with the cnic driver while holding
+ * the global resource lock
+ */
+void bnx2i_reg_dev_all(void)
+{
+ struct bnx2i_hba *hba, *temp;
+
+ read_lock(&bnx2i_dev_lock);
+ list_for_each_entry_safe(hba, temp, &adapter_list, link)
+ bnx2i_register_device(hba);
+ read_unlock(&bnx2i_dev_lock);
+}
+
+
+/**
+ * bnx2i_unreg_one_device - unregister adapter instance with the cnic driver
+ * @hba: Adapter instance to unregister
+ *
+ * registers bnx2i adapter instance with the cnic driver while holding
+ * the adapter structure lock
+ */
+static void bnx2i_unreg_one_device(struct bnx2i_hba *hba)
+{
+ if (hba->ofld_conns_active ||
+ !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) ||
+ test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state))
+ return;
+
+ hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
+
+ spin_lock(&hba->lock);
+ bnx2i_reg_device--;
+ spin_unlock(&hba->lock);
+
+ /* ep_disconnect could come before NETDEV_DOWN, driver won't
+ * see NETDEV_DOWN as it already unregistered itself.
+ */
+ hba->adapter_state = 0;
+ clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+}
+
+/**
+ * bnx2i_unreg_dev_all - unregisters all bnx2i instances with the cnic driver
+ *
+ * unregisters all bnx2i adapter instances with the cnic driver while holding
+ * the global resource lock
+ */
+void bnx2i_unreg_dev_all(void)
+{
+ struct bnx2i_hba *hba, *temp;
+
+ read_lock(&bnx2i_dev_lock);
+ list_for_each_entry_safe(hba, temp, &adapter_list, link)
+ bnx2i_unreg_one_device(hba);
+ read_unlock(&bnx2i_dev_lock);
+}
+
+
+/**
+ * bnx2i_init_one - initialize an adapter instance and allocate memory resources
+ * @hba: bnx2i adapter instance
+ * @cnic: cnic device handle
+ *
+ * Global resource lock and host adapter lock is held during critical sections
+ * below. This routine is called from cnic_register_driver() context and
+ * work horse thread which does majority of device specific initialization
+ */
+static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic)
+{
+ int rc;
+
+ read_lock(&bnx2i_dev_lock);
+ if (bnx2i_reg_device &&
+ !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+ rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba);
+ if (rc) /* duplicate registration */
+ printk(KERN_ERR "bnx2i- dev reg failed\n");
+
+ spin_lock(&hba->lock);
+ bnx2i_reg_device++;
+ hba->age++;
+ spin_unlock(&hba->lock);
+
+ set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+ }
+ read_unlock(&bnx2i_dev_lock);
+
+ write_lock(&bnx2i_dev_lock);
+ list_add_tail(&hba->link, &adapter_list);
+ adapter_count++;
+ write_unlock(&bnx2i_dev_lock);
+ return 0;
+}
+
+
+/**
+ * bnx2i_ulp_init - initialize an adapter instance
+ * @dev: cnic device handle
+ *
+ * Called from cnic_register_driver() context to initialize all enumerated
+ * cnic devices. This routine allocate adapter structure and other
+ * device specific resources.
+ */
+void bnx2i_ulp_init(struct cnic_dev *dev)
+{
+ struct bnx2i_hba *hba;
+
+ /* Allocate a HBA structure for this device */
+ hba = bnx2i_alloc_hba(dev);
+ if (!hba) {
+ printk(KERN_ERR "bnx2i init: hba initialization failed\n");
+ return;
+ }
+
+ /* Get PCI related information and update hba struct members */
+ clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+ if (bnx2i_init_one(hba, dev)) {
+ printk(KERN_ERR "bnx2i - hba %p init failed\n", hba);
+ bnx2i_free_hba(hba);
+ } else
+ hba->cnic = dev;
+}
+
+
+/**
+ * bnx2i_ulp_exit - shuts down adapter instance and frees all resources
+ * @dev: cnic device handle
+ *
+ */
+void bnx2i_ulp_exit(struct cnic_dev *dev)
+{
+ struct bnx2i_hba *hba;
+
+ hba = bnx2i_find_hba_for_cnic(dev);
+ if (!hba) {
+ printk(KERN_INFO "bnx2i_ulp_exit: hba not "
+ "found, dev 0x%p\n", dev);
+ return;
+ }
+ write_lock(&bnx2i_dev_lock);
+ list_del_init(&hba->link);
+ adapter_count--;
+
+ if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+ hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
+ clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+
+ spin_lock(&hba->lock);
+ bnx2i_reg_device--;
+ spin_unlock(&hba->lock);
+ }
+ write_unlock(&bnx2i_dev_lock);
+
+ bnx2i_free_hba(hba);
+}
+
+
+/**
+ * bnx2i_mod_init - module init entry point
+ *
+ * initialize any driver wide global data structures such as endpoint pool,
+ * tcp port manager/queue, sysfs. finally driver will register itself
+ * with the cnic module
+ */
+static int __init bnx2i_mod_init(void)
+{
+ int err;
+
+ printk(KERN_INFO "%s", version);
+
+ if (!is_power_of_2(sq_size))
+ sq_size = roundup_pow_of_two(sq_size);
+
+ bnx2i_scsi_xport_template =
+ iscsi_register_transport(&bnx2i_iscsi_transport);
+ if (!bnx2i_scsi_xport_template) {
+ printk(KERN_ERR "Could not register bnx2i transport.\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = cnic_register_driver(CNIC_ULP_ISCSI, &bnx2i_cnic_cb);
+ if (err) {
+ printk(KERN_ERR "Could not register bnx2i cnic driver.\n");
+ goto unreg_xport;
+ }
+
+ return 0;
+
+unreg_xport:
+ iscsi_unregister_transport(&bnx2i_iscsi_transport);
+out:
+ return err;
+}
+
+
+/**
+ * bnx2i_mod_exit - module cleanup/exit entry point
+ *
+ * Global resource lock and host adapter lock is held during critical sections
+ * in this function. Driver will browse through the adapter list, cleans-up
+ * each instance, unregisters iscsi transport name and finally driver will
+ * unregister itself with the cnic module
+ */
+static void __exit bnx2i_mod_exit(void)
+{
+ struct bnx2i_hba *hba;
+
+ write_lock(&bnx2i_dev_lock);
+ while (!list_empty(&adapter_list)) {
+ hba = list_entry(adapter_list.next, struct bnx2i_hba, link);
+ list_del(&hba->link);
+ adapter_count--;
+
+ if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+ hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
+ clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+ bnx2i_reg_device--;
+ }
+
+ write_unlock(&bnx2i_dev_lock);
+ bnx2i_free_hba(hba);
+ write_lock(&bnx2i_dev_lock);
+ }
+ write_unlock(&bnx2i_dev_lock);
+
+ iscsi_unregister_transport(&bnx2i_iscsi_transport);
+ cnic_unregister_driver(CNIC_ULP_ISCSI);
+}
+
+module_init(bnx2i_mod_init);
+module_exit(bnx2i_mod_exit);
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
new file mode 100644
index 00000000000..f7412196f2f
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -0,0 +1,2064 @@
+/*
+ * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2006 - 2009 Broadcom Corporation
+ * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+
+#include <scsi/scsi_tcq.h>
+#include <scsi/libiscsi.h>
+#include "bnx2i.h"
+
+struct scsi_transport_template *bnx2i_scsi_xport_template;
+struct iscsi_transport bnx2i_iscsi_transport;
+static struct scsi_host_template bnx2i_host_template;
+
+/*
+ * Global endpoint resource info
+ */
+static DEFINE_SPINLOCK(bnx2i_resc_lock); /* protects global resources */
+
+
+static int bnx2i_adapter_ready(struct bnx2i_hba *hba)
+{
+ int retval = 0;
+
+ if (!hba || !test_bit(ADAPTER_STATE_UP, &hba->adapter_state) ||
+ test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
+ test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state))
+ retval = -EPERM;
+ return retval;
+}
+
+/**
+ * bnx2i_get_write_cmd_bd_idx - identifies various BD bookmarks
+ * @cmd: iscsi cmd struct pointer
+ * @buf_off: absolute buffer offset
+ * @start_bd_off: u32 pointer to return the offset within the BD
+ * indicated by 'start_bd_idx' on which 'buf_off' falls
+ * @start_bd_idx: index of the BD on which 'buf_off' falls
+ *
+ * identifies & marks various bd info for scsi command's imm data,
+ * unsolicited data and the first solicited data seq.
+ */
+static void bnx2i_get_write_cmd_bd_idx(struct bnx2i_cmd *cmd, u32 buf_off,
+ u32 *start_bd_off, u32 *start_bd_idx)
+{
+ struct iscsi_bd *bd_tbl = cmd->io_tbl.bd_tbl;
+ u32 cur_offset = 0;
+ u32 cur_bd_idx = 0;
+
+ if (buf_off) {
+ while (buf_off >= (cur_offset + bd_tbl->buffer_length)) {
+ cur_offset += bd_tbl->buffer_length;
+ cur_bd_idx++;
+ bd_tbl++;
+ }
+ }
+
+ *start_bd_off = buf_off - cur_offset;
+ *start_bd_idx = cur_bd_idx;
+}
+
+/**
+ * bnx2i_setup_write_cmd_bd_info - sets up BD various information
+ * @task: transport layer's cmd struct pointer
+ *
+ * identifies & marks various bd info for scsi command's immediate data,
+ * unsolicited data and first solicited data seq which includes BD start
+ * index & BD buf off. his function takes into account iscsi parameter such
+ * as immediate data and unsolicited data is support on this connection.
+ */
+static void bnx2i_setup_write_cmd_bd_info(struct iscsi_task *task)
+{
+ struct bnx2i_cmd *cmd = task->dd_data;
+ u32 start_bd_offset;
+ u32 start_bd_idx;
+ u32 buffer_offset = 0;
+ u32 cmd_len = cmd->req.total_data_transfer_length;
+
+ /* if ImmediateData is turned off & IntialR2T is turned on,
+ * there will be no immediate or unsolicited data, just return.
+ */
+ if (!iscsi_task_has_unsol_data(task) && !task->imm_count)
+ return;
+
+ /* Immediate data */
+ buffer_offset += task->imm_count;
+ if (task->imm_count == cmd_len)
+ return;
+
+ if (iscsi_task_has_unsol_data(task)) {
+ bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
+ &start_bd_offset, &start_bd_idx);
+ cmd->req.ud_buffer_offset = start_bd_offset;
+ cmd->req.ud_start_bd_index = start_bd_idx;
+ buffer_offset += task->unsol_r2t.data_length;
+ }
+
+ if (buffer_offset != cmd_len) {
+ bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
+ &start_bd_offset, &start_bd_idx);
+ if ((start_bd_offset > task->conn->session->first_burst) ||
+ (start_bd_idx > scsi_sg_count(cmd->scsi_cmd))) {
+ int i = 0;
+
+ iscsi_conn_printk(KERN_ALERT, task->conn,
+ "bnx2i- error, buf offset 0x%x "
+ "bd_valid %d use_sg %d\n",
+ buffer_offset, cmd->io_tbl.bd_valid,
+ scsi_sg_count(cmd->scsi_cmd));
+ for (i = 0; i < cmd->io_tbl.bd_valid; i++)
+ iscsi_conn_printk(KERN_ALERT, task->conn,
+ "bnx2i err, bd[%d]: len %x\n",
+ i, cmd->io_tbl.bd_tbl[i].\
+ buffer_length);
+ }
+ cmd->req.sd_buffer_offset = start_bd_offset;
+ cmd->req.sd_start_bd_index = start_bd_idx;
+ }
+}
+
+
+
+/**
+ * bnx2i_map_scsi_sg - maps IO buffer and prepares the BD table
+ * @hba: adapter instance
+ * @cmd: iscsi cmd struct pointer
+ *
+ * map SG list
+ */
+static int bnx2i_map_scsi_sg(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
+{
+ struct scsi_cmnd *sc = cmd->scsi_cmd;
+ struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
+ struct scatterlist *sg;
+ int byte_count = 0;
+ int bd_count = 0;
+ int sg_count;
+ int sg_len;
+ u64 addr;
+ int i;
+
+ BUG_ON(scsi_sg_count(sc) > ISCSI_MAX_BDS_PER_CMD);
+
+ sg_count = scsi_dma_map(sc);
+
+ scsi_for_each_sg(sc, sg, sg_count, i) {
+ sg_len = sg_dma_len(sg);
+ addr = (u64) sg_dma_address(sg);
+ bd[bd_count].buffer_addr_lo = addr & 0xffffffff;
+ bd[bd_count].buffer_addr_hi = addr >> 32;
+ bd[bd_count].buffer_length = sg_len;
+ bd[bd_count].flags = 0;
+ if (bd_count == 0)
+ bd[bd_count].flags = ISCSI_BD_FIRST_IN_BD_CHAIN;
+
+ byte_count += sg_len;
+ bd_count++;
+ }
+
+ if (bd_count)
+ bd[bd_count - 1].flags |= ISCSI_BD_LAST_IN_BD_CHAIN;
+
+ BUG_ON(byte_count != scsi_bufflen(sc));
+ return bd_count;
+}
+
+/**
+ * bnx2i_iscsi_map_sg_list - maps SG list
+ * @cmd: iscsi cmd struct pointer
+ *
+ * creates BD list table for the command
+ */
+static void bnx2i_iscsi_map_sg_list(struct bnx2i_cmd *cmd)
+{
+ int bd_count;
+
+ bd_count = bnx2i_map_scsi_sg(cmd->conn->hba, cmd);
+ if (!bd_count) {
+ struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
+
+ bd[0].buffer_addr_lo = bd[0].buffer_addr_hi = 0;
+ bd[0].buffer_length = bd[0].flags = 0;
+ }
+ cmd->io_tbl.bd_valid = bd_count;
+}
+
+
+/**
+ * bnx2i_iscsi_unmap_sg_list - unmaps SG list
+ * @cmd: iscsi cmd struct pointer
+ *
+ * unmap IO buffers and invalidate the BD table
+ */
+void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd)
+{
+ struct scsi_cmnd *sc = cmd->scsi_cmd;
+
+ if (cmd->io_tbl.bd_valid && sc) {
+ scsi_dma_unmap(sc);
+ cmd->io_tbl.bd_valid = 0;
+ }
+}
+
+static void bnx2i_setup_cmd_wqe_template(struct bnx2i_cmd *cmd)
+{
+ memset(&cmd->req, 0x00, sizeof(cmd->req));
+ cmd->req.op_code = 0xFF;
+ cmd->req.bd_list_addr_lo = (u32) cmd->io_tbl.bd_tbl_dma;
+ cmd->req.bd_list_addr_hi =
+ (u32) ((u64) cmd->io_tbl.bd_tbl_dma >> 32);
+
+}
+
+
+/**
+ * bnx2i_bind_conn_to_iscsi_cid - bind conn structure to 'iscsi_cid'
+ * @hba: pointer to adapter instance
+ * @conn: pointer to iscsi connection
+ * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1)
+ *
+ * update iscsi cid table entry with connection pointer. This enables
+ * driver to quickly get hold of connection structure pointer in
+ * completion/interrupt thread using iscsi context ID
+ */
+static int bnx2i_bind_conn_to_iscsi_cid(struct bnx2i_hba *hba,
+ struct bnx2i_conn *bnx2i_conn,
+ u32 iscsi_cid)
+{
+ if (hba && hba->cid_que.conn_cid_tbl[iscsi_cid]) {
+ iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
+ "conn bind - entry #%d not free\n", iscsi_cid);
+ return -EBUSY;
+ }
+
+ hba->cid_que.conn_cid_tbl[iscsi_cid] = bnx2i_conn;
+ return 0;
+}
+
+
+/**
+ * bnx2i_get_conn_from_id - maps an iscsi cid to corresponding conn ptr
+ * @hba: pointer to adapter instance
+ * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1)
+ */
+struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
+ u16 iscsi_cid)
+{
+ if (!hba->cid_que.conn_cid_tbl) {
+ printk(KERN_ERR "bnx2i: ERROR - missing conn<->cid table\n");
+ return NULL;
+
+ } else if (iscsi_cid >= hba->max_active_conns) {
+ printk(KERN_ERR "bnx2i: wrong cid #%d\n", iscsi_cid);
+ return NULL;
+ }
+ return hba->cid_que.conn_cid_tbl[iscsi_cid];
+}
+
+
+/**
+ * bnx2i_alloc_iscsi_cid - allocates a iscsi_cid from free pool
+ * @hba: pointer to adapter instance
+ */
+static u32 bnx2i_alloc_iscsi_cid(struct bnx2i_hba *hba)
+{
+ int idx;
+
+ if (!hba->cid_que.cid_free_cnt)
+ return -1;
+
+ idx = hba->cid_que.cid_q_cons_idx;
+ hba->cid_que.cid_q_cons_idx++;
+ if (hba->cid_que.cid_q_cons_idx == hba->cid_que.cid_q_max_idx)
+ hba->cid_que.cid_q_cons_idx = 0;
+
+ hba->cid_que.cid_free_cnt--;
+ return hba->cid_que.cid_que[idx];
+}
+
+
+/**
+ * bnx2i_free_iscsi_cid - returns tcp port to free list
+ * @hba: pointer to adapter instance
+ * @iscsi_cid: iscsi context ID to free
+ */
+static void bnx2i_free_iscsi_cid(struct bnx2i_hba *hba, u16 iscsi_cid)
+{
+ int idx;
+
+ if (iscsi_cid == (u16) -1)
+ return;
+
+ hba->cid_que.cid_free_cnt++;
+
+ idx = hba->cid_que.cid_q_prod_idx;
+ hba->cid_que.cid_que[idx] = iscsi_cid;
+ hba->cid_que.conn_cid_tbl[iscsi_cid] = NULL;
+ hba->cid_que.cid_q_prod_idx++;
+ if (hba->cid_que.cid_q_prod_idx == hba->cid_que.cid_q_max_idx)
+ hba->cid_que.cid_q_prod_idx = 0;
+}
+
+
+/**
+ * bnx2i_setup_free_cid_que - sets up free iscsi cid queue
+ * @hba: pointer to adapter instance
+ *
+ * allocates memory for iscsi cid queue & 'cid - conn ptr' mapping table,
+ * and initialize table attributes
+ */
+static int bnx2i_setup_free_cid_que(struct bnx2i_hba *hba)
+{
+ int mem_size;
+ int i;
+
+ mem_size = hba->max_active_conns * sizeof(u32);
+ mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+ hba->cid_que.cid_que_base = kmalloc(mem_size, GFP_KERNEL);
+ if (!hba->cid_que.cid_que_base)
+ return -ENOMEM;
+
+ mem_size = hba->max_active_conns * sizeof(struct bnx2i_conn *);
+ mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ hba->cid_que.conn_cid_tbl = kmalloc(mem_size, GFP_KERNEL);
+ if (!hba->cid_que.conn_cid_tbl) {
+ kfree(hba->cid_que.cid_que_base);
+ hba->cid_que.cid_que_base = NULL;
+ return -ENOMEM;
+ }
+
+ hba->cid_que.cid_que = (u32 *)hba->cid_que.cid_que_base;
+ hba->cid_que.cid_q_prod_idx = 0;
+ hba->cid_que.cid_q_cons_idx = 0;
+ hba->cid_que.cid_q_max_idx = hba->max_active_conns;
+ hba->cid_que.cid_free_cnt = hba->max_active_conns;
+
+ for (i = 0; i < hba->max_active_conns; i++) {
+ hba->cid_que.cid_que[i] = i;
+ hba->cid_que.conn_cid_tbl[i] = NULL;
+ }
+ return 0;
+}
+
+
+/**
+ * bnx2i_release_free_cid_que - releases 'iscsi_cid' queue resources
+ * @hba: pointer to adapter instance
+ */
+static void bnx2i_release_free_cid_que(struct bnx2i_hba *hba)
+{
+ kfree(hba->cid_que.cid_que_base);
+ hba->cid_que.cid_que_base = NULL;
+
+ kfree(hba->cid_que.conn_cid_tbl);
+ hba->cid_que.conn_cid_tbl = NULL;
+}
+
+
+/**
+ * bnx2i_alloc_ep - allocates ep structure from global pool
+ * @hba: pointer to adapter instance
+ *
+ * routine allocates a free endpoint structure from global pool and
+ * a tcp port to be used for this connection. Global resource lock,
+ * 'bnx2i_resc_lock' is held while accessing shared global data structures
+ */
+static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
+{
+ struct iscsi_endpoint *ep;
+ struct bnx2i_endpoint *bnx2i_ep;
+
+ ep = iscsi_create_endpoint(sizeof(*bnx2i_ep));
+ if (!ep) {
+ printk(KERN_ERR "bnx2i: Could not allocate ep\n");
+ return NULL;
+ }
+
+ bnx2i_ep = ep->dd_data;
+ INIT_LIST_HEAD(&bnx2i_ep->link);
+ bnx2i_ep->state = EP_STATE_IDLE;
+ bnx2i_ep->hba = hba;
+ bnx2i_ep->hba_age = hba->age;
+ hba->ofld_conns_active++;
+ init_waitqueue_head(&bnx2i_ep->ofld_wait);
+ return ep;
+}
+
+
+/**
+ * bnx2i_free_ep - free endpoint
+ * @ep: pointer to iscsi endpoint structure
+ */
+static void bnx2i_free_ep(struct iscsi_endpoint *ep)
+{
+ struct bnx2i_endpoint *bnx2i_ep = ep->dd_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnx2i_resc_lock, flags);
+ bnx2i_ep->state = EP_STATE_IDLE;
+ bnx2i_ep->hba->ofld_conns_active--;
+
+ bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid);
+ if (bnx2i_ep->conn) {
+ bnx2i_ep->conn->ep = NULL;
+ bnx2i_ep->conn = NULL;
+ }
+
+ bnx2i_ep->hba = NULL;
+ spin_unlock_irqrestore(&bnx2i_resc_lock, flags);
+ iscsi_destroy_endpoint(ep);
+}
+
+
+/**
+ * bnx2i_alloc_bdt - allocates buffer descriptor (BD) table for the command
+ * @hba: adapter instance pointer
+ * @session: iscsi session pointer
+ * @cmd: iscsi command structure
+ */
+static int bnx2i_alloc_bdt(struct bnx2i_hba *hba, struct iscsi_session *session,
+ struct bnx2i_cmd *cmd)
+{
+ struct io_bdt *io = &cmd->io_tbl;
+ struct iscsi_bd *bd;
+
+ io->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
+ ISCSI_MAX_BDS_PER_CMD * sizeof(*bd),
+ &io->bd_tbl_dma, GFP_KERNEL);
+ if (!io->bd_tbl) {
+ iscsi_session_printk(KERN_ERR, session, "Could not "
+ "allocate bdt.\n");
+ return -ENOMEM;
+ }
+ io->bd_valid = 0;
+ return 0;
+}
+
+/**
+ * bnx2i_destroy_cmd_pool - destroys iscsi command pool and release BD table
+ * @hba: adapter instance pointer
+ * @session: iscsi session pointer
+ * @cmd: iscsi command structure
+ */
+static void bnx2i_destroy_cmd_pool(struct bnx2i_hba *hba,
+ struct iscsi_session *session)
+{
+ int i;
+
+ for (i = 0; i < session->cmds_max; i++) {
+ struct iscsi_task *task = session->cmds[i];
+ struct bnx2i_cmd *cmd = task->dd_data;
+
+ if (cmd->io_tbl.bd_tbl)
+ dma_free_coherent(&hba->pcidev->dev,
+ ISCSI_MAX_BDS_PER_CMD *
+ sizeof(struct iscsi_bd),
+ cmd->io_tbl.bd_tbl,
+ cmd->io_tbl.bd_tbl_dma);
+ }
+
+}
+
+
+/**
+ * bnx2i_setup_cmd_pool - sets up iscsi command pool for the session
+ * @hba: adapter instance pointer
+ * @session: iscsi session pointer
+ */
+static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba,
+ struct iscsi_session *session)
+{
+ int i;
+
+ for (i = 0; i < session->cmds_max; i++) {
+ struct iscsi_task *task = session->cmds[i];
+ struct bnx2i_cmd *cmd = task->dd_data;
+
+ /* Anil */
+ task->hdr = &cmd->hdr;
+ task->hdr_max = sizeof(struct iscsi_hdr);
+
+ if (bnx2i_alloc_bdt(hba, session, cmd))
+ goto free_bdts;
+ }
+
+ return 0;
+
+free_bdts:
+ bnx2i_destroy_cmd_pool(hba, session);
+ return -ENOMEM;
+}
+
+
+/**
+ * bnx2i_setup_mp_bdt - allocate BD table resources
+ * @hba: pointer to adapter structure
+ *
+ * Allocate memory for dummy buffer and associated BD
+ * table to be used by middle path (MP) requests
+ */
+static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
+{
+ int rc = 0;
+ struct iscsi_bd *mp_bdt;
+ u64 addr;
+
+ hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ &hba->mp_bd_dma, GFP_KERNEL);
+ if (!hba->mp_bd_tbl) {
+ printk(KERN_ERR "unable to allocate Middle Path BDT\n");
+ rc = -1;
+ goto out;
+ }
+
+ hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ &hba->dummy_buf_dma, GFP_KERNEL);
+ if (!hba->dummy_buffer) {
+ printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n");
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->mp_bd_tbl, hba->mp_bd_dma);
+ hba->mp_bd_tbl = NULL;
+ rc = -1;
+ goto out;
+ }
+
+ mp_bdt = (struct iscsi_bd *) hba->mp_bd_tbl;
+ addr = (unsigned long) hba->dummy_buf_dma;
+ mp_bdt->buffer_addr_lo = addr & 0xffffffff;
+ mp_bdt->buffer_addr_hi = addr >> 32;
+ mp_bdt->buffer_length = PAGE_SIZE;
+ mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
+ ISCSI_BD_FIRST_IN_BD_CHAIN;
+out:
+ return rc;
+}
+
+
+/**
+ * bnx2i_free_mp_bdt - releases ITT back to free pool
+ * @hba: pointer to adapter instance
+ *
+ * free MP dummy buffer and associated BD table
+ */
+static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba)
+{
+ if (hba->mp_bd_tbl) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->mp_bd_tbl, hba->mp_bd_dma);
+ hba->mp_bd_tbl = NULL;
+ }
+ if (hba->dummy_buffer) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->dummy_buffer, hba->dummy_buf_dma);
+ hba->dummy_buffer = NULL;
+ }
+ return;
+}
+
+/**
+ * bnx2i_drop_session - notifies iscsid of connection error.
+ * @hba: adapter instance pointer
+ * @session: iscsi session pointer
+ *
+ * This notifies iscsid that there is a error, so it can initiate
+ * recovery.
+ *
+ * This relies on caller using the iscsi class iterator so the object
+ * is refcounted and does not disapper from under us.
+ */
+void bnx2i_drop_session(struct iscsi_cls_session *cls_session)
+{
+ iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
+}
+
+/**
+ * bnx2i_ep_destroy_list_add - add an entry to EP destroy list
+ * @hba: pointer to adapter instance
+ * @ep: pointer to endpoint (transport indentifier) structure
+ *
+ * EP destroy queue manager
+ */
+static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba,
+ struct bnx2i_endpoint *ep)
+{
+ write_lock_bh(&hba->ep_rdwr_lock);
+ list_add_tail(&ep->link, &hba->ep_destroy_list);
+ write_unlock_bh(&hba->ep_rdwr_lock);
+ return 0;
+}
+
+/**
+ * bnx2i_ep_destroy_list_del - add an entry to EP destroy list
+ *
+ * @hba: pointer to adapter instance
+ * @ep: pointer to endpoint (transport indentifier) structure
+ *
+ * EP destroy queue manager
+ */
+static int bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba,
+ struct bnx2i_endpoint *ep)
+{
+ write_lock_bh(&hba->ep_rdwr_lock);
+ list_del_init(&ep->link);
+ write_unlock_bh(&hba->ep_rdwr_lock);
+
+ return 0;
+}
+
+/**
+ * bnx2i_ep_ofld_list_add - add an entry to ep offload pending list
+ * @hba: pointer to adapter instance
+ * @ep: pointer to endpoint (transport indentifier) structure
+ *
+ * pending conn offload completion queue manager
+ */
+static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba,
+ struct bnx2i_endpoint *ep)
+{
+ write_lock_bh(&hba->ep_rdwr_lock);
+ list_add_tail(&ep->link, &hba->ep_ofld_list);
+ write_unlock_bh(&hba->ep_rdwr_lock);
+ return 0;
+}
+
+/**
+ * bnx2i_ep_ofld_list_del - add an entry to ep offload pending list
+ * @hba: pointer to adapter instance
+ * @ep: pointer to endpoint (transport indentifier) structure
+ *
+ * pending conn offload completion queue manager
+ */
+static int bnx2i_ep_ofld_list_del(struct bnx2i_hba *hba,
+ struct bnx2i_endpoint *ep)
+{
+ write_lock_bh(&hba->ep_rdwr_lock);
+ list_del_init(&ep->link);
+ write_unlock_bh(&hba->ep_rdwr_lock);
+ return 0;
+}
+
+
+/**
+ * bnx2i_find_ep_in_ofld_list - find iscsi_cid in pending list of endpoints
+ *
+ * @hba: pointer to adapter instance
+ * @iscsi_cid: iscsi context ID to find
+ *
+ */
+struct bnx2i_endpoint *
+bnx2i_find_ep_in_ofld_list(struct bnx2i_hba *hba, u32 iscsi_cid)
+{
+ struct list_head *list;
+ struct list_head *tmp;
+ struct bnx2i_endpoint *ep;
+
+ read_lock_bh(&hba->ep_rdwr_lock);
+ list_for_each_safe(list, tmp, &hba->ep_ofld_list) {
+ ep = (struct bnx2i_endpoint *)list;
+
+ if (ep->ep_iscsi_cid == iscsi_cid)
+ break;
+ ep = NULL;
+ }
+ read_unlock_bh(&hba->ep_rdwr_lock);
+
+ if (!ep)
+ printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
+ return ep;
+}
+
+
+/**
+ * bnx2i_find_ep_in_destroy_list - find iscsi_cid in destroy list
+ * @hba: pointer to adapter instance
+ * @iscsi_cid: iscsi context ID to find
+ *
+ */
+struct bnx2i_endpoint *
+bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid)
+{
+ struct list_head *list;
+ struct list_head *tmp;
+ struct bnx2i_endpoint *ep;
+
+ read_lock_bh(&hba->ep_rdwr_lock);
+ list_for_each_safe(list, tmp, &hba->ep_destroy_list) {
+ ep = (struct bnx2i_endpoint *)list;
+
+ if (ep->ep_iscsi_cid == iscsi_cid)
+ break;
+ ep = NULL;
+ }
+ read_unlock_bh(&hba->ep_rdwr_lock);
+
+ if (!ep)
+ printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
+
+ return ep;
+}
+
+/**
+ * bnx2i_setup_host_queue_size - assigns shost->can_queue param
+ * @hba: pointer to adapter instance
+ * @shost: scsi host pointer
+ *
+ * Initializes 'can_queue' parameter based on how many outstanding commands
+ * the device can handle. Each device 5708/5709/57710 has different
+ * capabilities
+ */
+static void bnx2i_setup_host_queue_size(struct bnx2i_hba *hba,
+ struct Scsi_Host *shost)
+{
+ if (test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type))
+ shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708;
+ else if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type))
+ shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5709;
+ else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
+ shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_57710;
+ else
+ shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708;
+}
+
+
+/**
+ * bnx2i_alloc_hba - allocate and init adapter instance
+ * @cnic: cnic device pointer
+ *
+ * allocate & initialize adapter structure and call other
+ * support routines to do per adapter initialization
+ */
+struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
+{
+ struct Scsi_Host *shost;
+ struct bnx2i_hba *hba;
+
+ shost = iscsi_host_alloc(&bnx2i_host_template, sizeof(*hba), 0);
+ if (!shost)
+ return NULL;
+ shost->dma_boundary = cnic->pcidev->dma_mask;
+ shost->transportt = bnx2i_scsi_xport_template;
+ shost->max_id = ISCSI_MAX_CONNS_PER_HBA;
+ shost->max_channel = 0;
+ shost->max_lun = 512;
+ shost->max_cmd_len = 16;
+
+ hba = iscsi_host_priv(shost);
+ hba->shost = shost;
+ hba->netdev = cnic->netdev;
+ /* Get PCI related information and update hba struct members */
+ hba->pcidev = cnic->pcidev;
+ pci_dev_get(hba->pcidev);
+ hba->pci_did = hba->pcidev->device;
+ hba->pci_vid = hba->pcidev->vendor;
+ hba->pci_sdid = hba->pcidev->subsystem_device;
+ hba->pci_svid = hba->pcidev->subsystem_vendor;
+ hba->pci_func = PCI_FUNC(hba->pcidev->devfn);
+ hba->pci_devno = PCI_SLOT(hba->pcidev->devfn);
+ bnx2i_identify_device(hba);
+
+ bnx2i_identify_device(hba);
+ bnx2i_setup_host_queue_size(hba, shost);
+
+ if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
+ hba->regview = ioremap_nocache(hba->netdev->base_addr,
+ BNX2_MQ_CONFIG2);
+ if (!hba->regview)
+ goto ioreg_map_err;
+ } else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
+ hba->regview = ioremap_nocache(hba->netdev->base_addr, 4096);
+ if (!hba->regview)
+ goto ioreg_map_err;
+ }
+
+ if (bnx2i_setup_mp_bdt(hba))
+ goto mp_bdt_mem_err;
+
+ INIT_LIST_HEAD(&hba->ep_ofld_list);
+ INIT_LIST_HEAD(&hba->ep_destroy_list);
+ rwlock_init(&hba->ep_rdwr_lock);
+
+ hba->mtu_supported = BNX2I_MAX_MTU_SUPPORTED;
+
+ /* different values for 5708/5709/57710 */
+ hba->max_active_conns = ISCSI_MAX_CONNS_PER_HBA;
+
+ if (bnx2i_setup_free_cid_que(hba))
+ goto cid_que_err;
+
+ /* SQ/RQ/CQ size can be changed via sysfx interface */
+ if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
+ if (sq_size && sq_size <= BNX2I_5770X_SQ_WQES_MAX)
+ hba->max_sqes = sq_size;
+ else
+ hba->max_sqes = BNX2I_5770X_SQ_WQES_DEFAULT;
+ } else { /* 5706/5708/5709 */
+ if (sq_size && sq_size <= BNX2I_570X_SQ_WQES_MAX)
+ hba->max_sqes = sq_size;
+ else
+ hba->max_sqes = BNX2I_570X_SQ_WQES_DEFAULT;
+ }
+
+ hba->max_rqes = rq_size;
+ hba->max_cqes = hba->max_sqes + rq_size;
+ if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
+ if (hba->max_cqes > BNX2I_5770X_CQ_WQES_MAX)
+ hba->max_cqes = BNX2I_5770X_CQ_WQES_MAX;
+ } else if (hba->max_cqes > BNX2I_570X_CQ_WQES_MAX)
+ hba->max_cqes = BNX2I_570X_CQ_WQES_MAX;
+
+ hba->num_ccell = hba->max_sqes / 2;
+
+ spin_lock_init(&hba->lock);
+ mutex_init(&hba->net_dev_lock);
+
+ if (iscsi_host_add(shost, &hba->pcidev->dev))
+ goto free_dump_mem;
+ return hba;
+
+free_dump_mem:
+ bnx2i_release_free_cid_que(hba);
+cid_que_err:
+ bnx2i_free_mp_bdt(hba);
+mp_bdt_mem_err:
+ if (hba->regview) {
+ iounmap(hba->regview);
+ hba->regview = NULL;
+ }
+ioreg_map_err:
+ pci_dev_put(hba->pcidev);
+ scsi_host_put(shost);
+ return NULL;
+}
+
+/**
+ * bnx2i_free_hba- releases hba structure and resources held by the adapter
+ * @hba: pointer to adapter instance
+ *
+ * free adapter structure and call various cleanup routines.
+ */
+void bnx2i_free_hba(struct bnx2i_hba *hba)
+{
+ struct Scsi_Host *shost = hba->shost;
+
+ iscsi_host_remove(shost);
+ INIT_LIST_HEAD(&hba->ep_ofld_list);
+ INIT_LIST_HEAD(&hba->ep_destroy_list);
+ pci_dev_put(hba->pcidev);
+
+ if (hba->regview) {
+ iounmap(hba->regview);
+ hba->regview = NULL;
+ }
+ bnx2i_free_mp_bdt(hba);
+ bnx2i_release_free_cid_que(hba);
+ iscsi_host_free(shost);
+}
+
+/**
+ * bnx2i_conn_free_login_resources - free DMA resources used for login process
+ * @hba: pointer to adapter instance
+ * @bnx2i_conn: iscsi connection pointer
+ *
+ * Login related resources, mostly BDT & payload DMA memory is freed
+ */
+static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba,
+ struct bnx2i_conn *bnx2i_conn)
+{
+ if (bnx2i_conn->gen_pdu.resp_bd_tbl) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ bnx2i_conn->gen_pdu.resp_bd_tbl,
+ bnx2i_conn->gen_pdu.resp_bd_dma);
+ bnx2i_conn->gen_pdu.resp_bd_tbl = NULL;
+ }
+
+ if (bnx2i_conn->gen_pdu.req_bd_tbl) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ bnx2i_conn->gen_pdu.req_bd_tbl,
+ bnx2i_conn->gen_pdu.req_bd_dma);
+ bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
+ }
+
+ if (bnx2i_conn->gen_pdu.resp_buf) {
+ dma_free_coherent(&hba->pcidev->dev,
+ ISCSI_DEF_MAX_RECV_SEG_LEN,
+ bnx2i_conn->gen_pdu.resp_buf,
+ bnx2i_conn->gen_pdu.resp_dma_addr);
+ bnx2i_conn->gen_pdu.resp_buf = NULL;
+ }
+
+ if (bnx2i_conn->gen_pdu.req_buf) {
+ dma_free_coherent(&hba->pcidev->dev,
+ ISCSI_DEF_MAX_RECV_SEG_LEN,
+ bnx2i_conn->gen_pdu.req_buf,
+ bnx2i_conn->gen_pdu.req_dma_addr);
+ bnx2i_conn->gen_pdu.req_buf = NULL;
+ }
+}
+
+/**
+ * bnx2i_conn_alloc_login_resources - alloc DMA resources for login/nop.
+ * @hba: pointer to adapter instance
+ * @bnx2i_conn: iscsi connection pointer
+ *
+ * Mgmt task DNA resources are allocated in this routine.
+ */
+static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba,
+ struct bnx2i_conn *bnx2i_conn)
+{
+ /* Allocate memory for login request/response buffers */
+ bnx2i_conn->gen_pdu.req_buf =
+ dma_alloc_coherent(&hba->pcidev->dev,
+ ISCSI_DEF_MAX_RECV_SEG_LEN,
+ &bnx2i_conn->gen_pdu.req_dma_addr,
+ GFP_KERNEL);
+ if (bnx2i_conn->gen_pdu.req_buf == NULL)
+ goto login_req_buf_failure;
+
+ bnx2i_conn->gen_pdu.req_buf_size = 0;
+ bnx2i_conn->gen_pdu.req_wr_ptr = bnx2i_conn->gen_pdu.req_buf;
+
+ bnx2i_conn->gen_pdu.resp_buf =
+ dma_alloc_coherent(&hba->pcidev->dev,
+ ISCSI_DEF_MAX_RECV_SEG_LEN,
+ &bnx2i_conn->gen_pdu.resp_dma_addr,
+ GFP_KERNEL);
+ if (bnx2i_conn->gen_pdu.resp_buf == NULL)
+ goto login_resp_buf_failure;
+
+ bnx2i_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN;
+ bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf;
+
+ bnx2i_conn->gen_pdu.req_bd_tbl =
+ dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
+ if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL)
+ goto login_req_bd_tbl_failure;
+
+ bnx2i_conn->gen_pdu.resp_bd_tbl =
+ dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ &bnx2i_conn->gen_pdu.resp_bd_dma,
+ GFP_KERNEL);
+ if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL)
+ goto login_resp_bd_tbl_failure;
+
+ return 0;
+
+login_resp_bd_tbl_failure:
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ bnx2i_conn->gen_pdu.req_bd_tbl,
+ bnx2i_conn->gen_pdu.req_bd_dma);
+ bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
+
+login_req_bd_tbl_failure:
+ dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
+ bnx2i_conn->gen_pdu.resp_buf,
+ bnx2i_conn->gen_pdu.resp_dma_addr);
+ bnx2i_conn->gen_pdu.resp_buf = NULL;
+login_resp_buf_failure:
+ dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
+ bnx2i_conn->gen_pdu.req_buf,
+ bnx2i_conn->gen_pdu.req_dma_addr);
+ bnx2i_conn->gen_pdu.req_buf = NULL;
+login_req_buf_failure:
+ iscsi_conn_printk(KERN_ERR, bnx2i_conn->cls_conn->dd_data,
+ "login resource alloc failed!!\n");
+ return -ENOMEM;
+
+}
+
+
+/**
+ * bnx2i_iscsi_prep_generic_pdu_bd - prepares BD table.
+ * @bnx2i_conn: iscsi connection pointer
+ *
+ * Allocates buffers and BD tables before shipping requests to cnic
+ * for PDUs prepared by 'iscsid' daemon
+ */
+static void bnx2i_iscsi_prep_generic_pdu_bd(struct bnx2i_conn *bnx2i_conn)
+{
+ struct iscsi_bd *bd_tbl;
+
+ bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.req_bd_tbl;
+
+ bd_tbl->buffer_addr_hi =
+ (u32) ((u64) bnx2i_conn->gen_pdu.req_dma_addr >> 32);
+ bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.req_dma_addr;
+ bd_tbl->buffer_length = bnx2i_conn->gen_pdu.req_wr_ptr -
+ bnx2i_conn->gen_pdu.req_buf;
+ bd_tbl->reserved0 = 0;
+ bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
+ ISCSI_BD_FIRST_IN_BD_CHAIN;
+
+ bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.resp_bd_tbl;
+ bd_tbl->buffer_addr_hi = (u64) bnx2i_conn->gen_pdu.resp_dma_addr >> 32;
+ bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_dma_addr;
+ bd_tbl->buffer_length = ISCSI_DEF_MAX_RECV_SEG_LEN;
+ bd_tbl->reserved0 = 0;
+ bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
+ ISCSI_BD_FIRST_IN_BD_CHAIN;
+}
+
+
+/**
+ * bnx2i_iscsi_send_generic_request - called to send mgmt tasks.
+ * @task: transport layer task pointer
+ *
+ * called to transmit PDUs prepared by the 'iscsid' daemon. iSCSI login,
+ * Nop-out and Logout requests flow through this path.
+ */
+static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task)
+{
+ struct bnx2i_cmd *cmd = task->dd_data;
+ struct bnx2i_conn *bnx2i_conn = cmd->conn;
+ int rc = 0;
+ char *buf;
+ int data_len;
+
+ bnx2i_iscsi_prep_generic_pdu_bd(bnx2i_conn);
+ switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
+ case ISCSI_OP_LOGIN:
+ bnx2i_send_iscsi_login(bnx2i_conn, task);
+ break;
+ case ISCSI_OP_NOOP_OUT:
+ data_len = bnx2i_conn->gen_pdu.req_buf_size;
+ buf = bnx2i_conn->gen_pdu.req_buf;
+ if (data_len)
+ rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
+ RESERVED_ITT,
+ buf, data_len, 1);
+ else
+ rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
+ RESERVED_ITT,
+ NULL, 0, 1);
+ break;
+ case ISCSI_OP_LOGOUT:
+ rc = bnx2i_send_iscsi_logout(bnx2i_conn, task);
+ break;
+ case ISCSI_OP_SCSI_TMFUNC:
+ rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task);
+ break;
+ default:
+ iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
+ "send_gen: unsupported op 0x%x\n",
+ task->hdr->opcode);
+ }
+ return rc;
+}
+
+
+/**********************************************************************
+ * SCSI-ML Interface
+ **********************************************************************/
+
+/**
+ * bnx2i_cpy_scsi_cdb - copies LUN & CDB fields in required format to sq wqe
+ * @sc: SCSI-ML command pointer
+ * @cmd: iscsi cmd pointer
+ */
+static void bnx2i_cpy_scsi_cdb(struct scsi_cmnd *sc, struct bnx2i_cmd *cmd)
+{
+ u32 dword;
+ int lpcnt;
+ u8 *srcp;
+ u32 *dstp;
+ u32 scsi_lun[2];
+
+ int_to_scsilun(sc->device->lun, (struct scsi_lun *) scsi_lun);
+ cmd->req.lun[0] = be32_to_cpu(scsi_lun[0]);
+ cmd->req.lun[1] = be32_to_cpu(scsi_lun[1]);
+
+ lpcnt = cmd->scsi_cmd->cmd_len / sizeof(dword);
+ srcp = (u8 *) sc->cmnd;
+ dstp = (u32 *) cmd->req.cdb;
+ while (lpcnt--) {
+ memcpy(&dword, (const void *) srcp, 4);
+ *dstp = cpu_to_be32(dword);
+ srcp += 4;
+ dstp++;
+ }
+ if (sc->cmd_len & 0x3) {
+ dword = (u32) srcp[0] | ((u32) srcp[1] << 8);
+ *dstp = cpu_to_be32(dword);
+ }
+}
+
+static void bnx2i_cleanup_task(struct iscsi_task *task)
+{
+ struct iscsi_conn *conn = task->conn;
+ struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+ struct bnx2i_hba *hba = bnx2i_conn->hba;
+
+ /*
+ * mgmt task or cmd was never sent to us to transmit.
+ */
+ if (!task->sc || task->state == ISCSI_TASK_PENDING)
+ return;
+ /*
+ * need to clean-up task context to claim dma buffers
+ */
+ if (task->state == ISCSI_TASK_ABRT_TMF) {
+ bnx2i_send_cmd_cleanup_req(hba, task->dd_data);
+
+ spin_unlock_bh(&conn->session->lock);
+ wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl,
+ msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT));
+ spin_lock_bh(&conn->session->lock);
+ }
+ bnx2i_iscsi_unmap_sg_list(task->dd_data);
+}
+
+/**
+ * bnx2i_mtask_xmit - transmit mtask to chip for further processing
+ * @conn: transport layer conn structure pointer
+ * @task: transport layer command structure pointer
+ */
+static int
+bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
+{
+ struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+ struct bnx2i_cmd *cmd = task->dd_data;
+
+ memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN);
+
+ bnx2i_setup_cmd_wqe_template(cmd);
+ bnx2i_conn->gen_pdu.req_buf_size = task->data_count;
+ if (task->data_count) {
+ memcpy(bnx2i_conn->gen_pdu.req_buf, task->data,
+ task->data_count);
+ bnx2i_conn->gen_pdu.req_wr_ptr =
+ bnx2i_conn->gen_pdu.req_buf + task->data_count;
+ }
+ cmd->conn = conn->dd_data;
+ cmd->scsi_cmd = NULL;
+ return bnx2i_iscsi_send_generic_request(task);
+}
+
+/**
+ * bnx2i_task_xmit - transmit iscsi command to chip for further processing
+ * @task: transport layer command structure pointer
+ *
+ * maps SG buffers and send request to chip/firmware in the form of SQ WQE
+ */
+static int bnx2i_task_xmit(struct iscsi_task *task)
+{
+ struct iscsi_conn *conn = task->conn;
+ struct iscsi_session *session = conn->session;
+ struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
+ struct bnx2i_hba *hba = iscsi_host_priv(shost);
+ struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+ struct scsi_cmnd *sc = task->sc;
+ struct bnx2i_cmd *cmd = task->dd_data;
+ struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr;
+
+ if (test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state))
+ return -ENOTCONN;
+
+ if (!bnx2i_conn->is_bound)
+ return -ENOTCONN;
+
+ /*
+ * If there is no scsi_cmnd this must be a mgmt task
+ */
+ if (!sc)
+ return bnx2i_mtask_xmit(conn, task);
+
+ bnx2i_setup_cmd_wqe_template(cmd);
+ cmd->req.op_code = ISCSI_OP_SCSI_CMD;
+ cmd->conn = bnx2i_conn;
+ cmd->scsi_cmd = sc;
+ cmd->req.total_data_transfer_length = scsi_bufflen(sc);
+ cmd->req.cmd_sn = be32_to_cpu(hdr->cmdsn);
+
+ bnx2i_iscsi_map_sg_list(cmd);
+ bnx2i_cpy_scsi_cdb(sc, cmd);
+
+ cmd->req.op_attr = ISCSI_ATTR_SIMPLE;
+ if (sc->sc_data_direction == DMA_TO_DEVICE) {
+ cmd->req.op_attr |= ISCSI_CMD_REQUEST_WRITE;
+ cmd->req.itt = task->itt |
+ (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
+ bnx2i_setup_write_cmd_bd_info(task);
+ } else {
+ if (scsi_bufflen(sc))
+ cmd->req.op_attr |= ISCSI_CMD_REQUEST_READ;
+ cmd->req.itt = task->itt |
+ (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
+ }
+
+ cmd->req.num_bds = cmd->io_tbl.bd_valid;
+ if (!cmd->io_tbl.bd_valid) {
+ cmd->req.bd_list_addr_lo = (u32) hba->mp_bd_dma;
+ cmd->req.bd_list_addr_hi = (u32) ((u64) hba->mp_bd_dma >> 32);
+ cmd->req.num_bds = 1;
+ }
+
+ bnx2i_send_iscsi_scsicmd(bnx2i_conn, cmd);
+ return 0;
+}
+
+/**
+ * bnx2i_session_create - create a new iscsi session
+ * @cmds_max: max commands supported
+ * @qdepth: scsi queue depth to support
+ * @initial_cmdsn: initial iscsi CMDSN to be used for this session
+ *
+ * Creates a new iSCSI session instance on given device.
+ */
+static struct iscsi_cls_session *
+bnx2i_session_create(struct iscsi_endpoint *ep,
+ uint16_t cmds_max, uint16_t qdepth,
+ uint32_t initial_cmdsn)
+{
+ struct Scsi_Host *shost;
+ struct iscsi_cls_session *cls_session;
+ struct bnx2i_hba *hba;
+ struct bnx2i_endpoint *bnx2i_ep;
+
+ if (!ep) {
+ printk(KERN_ERR "bnx2i: missing ep.\n");
+ return NULL;
+ }
+
+ bnx2i_ep = ep->dd_data;
+ shost = bnx2i_ep->hba->shost;
+ hba = iscsi_host_priv(shost);
+ if (bnx2i_adapter_ready(hba))
+ return NULL;
+
+ /*
+ * user can override hw limit as long as it is within
+ * the min/max.
+ */
+ if (cmds_max > hba->max_sqes)
+ cmds_max = hba->max_sqes;
+ else if (cmds_max < BNX2I_SQ_WQES_MIN)
+ cmds_max = BNX2I_SQ_WQES_MIN;
+
+ cls_session = iscsi_session_setup(&bnx2i_iscsi_transport, shost,
+ cmds_max, sizeof(struct bnx2i_cmd),
+ initial_cmdsn, ISCSI_MAX_TARGET);
+ if (!cls_session)
+ return NULL;
+
+ if (bnx2i_setup_cmd_pool(hba, cls_session->dd_data))
+ goto session_teardown;
+ return cls_session;
+
+session_teardown:
+ iscsi_session_teardown(cls_session);
+ return NULL;
+}
+
+
+/**
+ * bnx2i_session_destroy - destroys iscsi session
+ * @cls_session: pointer to iscsi cls session
+ *
+ * Destroys previously created iSCSI session instance and releases
+ * all resources held by it
+ */
+static void bnx2i_session_destroy(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *session = cls_session->dd_data;
+ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+ struct bnx2i_hba *hba = iscsi_host_priv(shost);
+
+ bnx2i_destroy_cmd_pool(hba, session);
+ iscsi_session_teardown(cls_session);
+}
+
+
+/**
+ * bnx2i_conn_create - create iscsi connection instance
+ * @cls_session: pointer to iscsi cls session
+ * @cid: iscsi cid as per rfc (not NX2's CID terminology)
+ *
+ * Creates a new iSCSI connection instance for a given session
+ */
+static struct iscsi_cls_conn *
+bnx2i_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid)
+{
+ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+ struct bnx2i_hba *hba = iscsi_host_priv(shost);
+ struct bnx2i_conn *bnx2i_conn;
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_conn *conn;
+
+ cls_conn = iscsi_conn_setup(cls_session, sizeof(*bnx2i_conn),
+ cid);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+
+ bnx2i_conn = conn->dd_data;
+ bnx2i_conn->cls_conn = cls_conn;
+ bnx2i_conn->hba = hba;
+ /* 'ep' ptr will be assigned in bind() call */
+ bnx2i_conn->ep = NULL;
+ init_completion(&bnx2i_conn->cmd_cleanup_cmpl);
+
+ if (bnx2i_conn_alloc_login_resources(hba, bnx2i_conn)) {
+ iscsi_conn_printk(KERN_ALERT, conn,
+ "conn_new: login resc alloc failed!!\n");
+ goto free_conn;
+ }
+
+ return cls_conn;
+
+free_conn:
+ iscsi_conn_teardown(cls_conn);
+ return NULL;
+}
+
+/**
+ * bnx2i_conn_bind - binds iscsi sess, conn and ep objects together
+ * @cls_session: pointer to iscsi cls session
+ * @cls_conn: pointer to iscsi cls conn
+ * @transport_fd: 64-bit EP handle
+ * @is_leading: leading connection on this session?
+ *
+ * Binds together iSCSI session instance, iSCSI connection instance
+ * and the TCP connection. This routine returns error code if
+ * TCP connection does not belong on the device iSCSI sess/conn
+ * is bound
+ */
+static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn,
+ uint64_t transport_fd, int is_leading)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+ struct bnx2i_hba *hba = iscsi_host_priv(shost);
+ struct bnx2i_endpoint *bnx2i_ep;
+ struct iscsi_endpoint *ep;
+ int ret_code;
+
+ ep = iscsi_lookup_endpoint(transport_fd);
+ if (!ep)
+ return -EINVAL;
+
+ bnx2i_ep = ep->dd_data;
+ if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) ||
+ (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD))
+ /* Peer disconnect via' FIN or RST */
+ return -EINVAL;
+
+ if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
+ return -EINVAL;
+
+ if (bnx2i_ep->hba != hba) {
+ /* Error - TCP connection does not belong to this device
+ */
+ iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
+ "conn bind, ep=0x%p (%s) does not",
+ bnx2i_ep, bnx2i_ep->hba->netdev->name);
+ iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
+ "belong to hba (%s)\n",
+ hba->netdev->name);
+ return -EEXIST;
+ }
+
+ bnx2i_ep->conn = bnx2i_conn;
+ bnx2i_conn->ep = bnx2i_ep;
+ bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid;
+ bnx2i_conn->fw_cid = bnx2i_ep->ep_cid;
+ bnx2i_conn->is_bound = 1;
+
+ ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn,
+ bnx2i_ep->ep_iscsi_cid);
+
+ /* 5706/5708/5709 FW takes RQ as full when initiated, but for 57710
+ * driver needs to explicitly replenish RQ index during setup.
+ */
+ if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
+ bnx2i_put_rq_buf(bnx2i_conn, 0);
+
+ bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
+ return ret_code;
+}
+
+
+/**
+ * bnx2i_conn_destroy - destroy iscsi connection instance & release resources
+ * @cls_conn: pointer to iscsi cls conn
+ *
+ * Destroy an iSCSI connection instance and release memory resources held by
+ * this connection
+ */
+static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+ struct Scsi_Host *shost;
+ struct bnx2i_hba *hba;
+
+ shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
+ hba = iscsi_host_priv(shost);
+
+ bnx2i_conn_free_login_resources(hba, bnx2i_conn);
+ iscsi_conn_teardown(cls_conn);
+}
+
+
+/**
+ * bnx2i_conn_get_param - return iscsi connection parameter to caller
+ * @cls_conn: pointer to iscsi cls conn
+ * @param: parameter type identifier
+ * @buf: buffer pointer
+ *
+ * returns iSCSI connection parameters
+ */
+static int bnx2i_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+ int len = 0;
+
+ switch (param) {
+ case ISCSI_PARAM_CONN_PORT:
+ if (bnx2i_conn->ep)
+ len = sprintf(buf, "%hu\n",
+ bnx2i_conn->ep->cm_sk->dst_port);
+ break;
+ case ISCSI_PARAM_CONN_ADDRESS:
+ if (bnx2i_conn->ep)
+ len = sprintf(buf, NIPQUAD_FMT "\n",
+ NIPQUAD(bnx2i_conn->ep->cm_sk->dst_ip));
+ break;
+ default:
+ return iscsi_conn_get_param(cls_conn, param, buf);
+ }
+
+ return len;
+}
+
+/**
+ * bnx2i_host_get_param - returns host (adapter) related parameters
+ * @shost: scsi host pointer
+ * @param: parameter type identifier
+ * @buf: buffer pointer
+ */
+static int bnx2i_host_get_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf)
+{
+ struct bnx2i_hba *hba = iscsi_host_priv(shost);
+ int len = 0;
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ len = sysfs_format_mac(buf, hba->cnic->mac_addr, 6);
+ break;
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+ len = sprintf(buf, "%s\n", hba->netdev->name);
+ break;
+ default:
+ return iscsi_host_get_param(shost, param, buf);
+ }
+ return len;
+}
+
+/**
+ * bnx2i_conn_start - completes iscsi connection migration to FFP
+ * @cls_conn: pointer to iscsi cls conn
+ *
+ * last call in FFP migration to handover iscsi conn to the driver
+ */
+static int bnx2i_conn_start(struct iscsi_cls_conn *cls_conn)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+
+ bnx2i_conn->ep->state = EP_STATE_ULP_UPDATE_START;
+ bnx2i_update_iscsi_conn(conn);
+
+ /*
+ * this should normally not sleep for a long time so it should
+ * not disrupt the caller.
+ */
+ bnx2i_conn->ep->ofld_timer.expires = 1 * HZ + jiffies;
+ bnx2i_conn->ep->ofld_timer.function = bnx2i_ep_ofld_timer;
+ bnx2i_conn->ep->ofld_timer.data = (unsigned long) bnx2i_conn->ep;
+ add_timer(&bnx2i_conn->ep->ofld_timer);
+ /* update iSCSI context for this conn, wait for CNIC to complete */
+ wait_event_interruptible(bnx2i_conn->ep->ofld_wait,
+ bnx2i_conn->ep->state != EP_STATE_ULP_UPDATE_START);
+
+ if (signal_pending(current))
+ flush_signals(current);
+ del_timer_sync(&bnx2i_conn->ep->ofld_timer);
+
+ iscsi_conn_start(cls_conn);
+ return 0;
+}
+
+
+/**
+ * bnx2i_conn_get_stats - returns iSCSI stats
+ * @cls_conn: pointer to iscsi cls conn
+ * @stats: pointer to iscsi statistic struct
+ */
+static void bnx2i_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+ struct iscsi_stats *stats)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+
+ stats->txdata_octets = conn->txdata_octets;
+ stats->rxdata_octets = conn->rxdata_octets;
+ stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
+ stats->dataout_pdus = conn->dataout_pdus_cnt;
+ stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
+ stats->datain_pdus = conn->datain_pdus_cnt;
+ stats->r2t_pdus = conn->r2t_pdus_cnt;
+ stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
+ stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
+ stats->custom_length = 3;
+ strcpy(stats->custom[2].desc, "eh_abort_cnt");
+ stats->custom[2].value = conn->eh_abort_cnt;
+ stats->digest_err = 0;
+ stats->timeout_err = 0;
+ stats->custom_length = 0;
+}
+
+
+/**
+ * bnx2i_check_route - checks if target IP route belongs to one of NX2 devices
+ * @dst_addr: target IP address
+ *
+ * check if route resolves to BNX2 device
+ */
+static struct bnx2i_hba *bnx2i_check_route(struct sockaddr *dst_addr)
+{
+ struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
+ struct bnx2i_hba *hba;
+ struct cnic_dev *cnic = NULL;
+
+ bnx2i_reg_dev_all();
+
+ hba = get_adapter_list_head();
+ if (hba && hba->cnic)
+ cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI);
+ if (!cnic) {
+ printk(KERN_ALERT "bnx2i: no route,"
+ "can't connect using cnic\n");
+ goto no_nx2_route;
+ }
+ hba = bnx2i_find_hba_for_cnic(cnic);
+ if (!hba)
+ goto no_nx2_route;
+
+ if (bnx2i_adapter_ready(hba)) {
+ printk(KERN_ALERT "bnx2i: check route, hba not found\n");
+ goto no_nx2_route;
+ }
+ if (hba->netdev->mtu > hba->mtu_supported) {
+ printk(KERN_ALERT "bnx2i: %s network i/f mtu is set to %d\n",
+ hba->netdev->name, hba->netdev->mtu);
+ printk(KERN_ALERT "bnx2i: iSCSI HBA can support mtu of %d\n",
+ hba->mtu_supported);
+ goto no_nx2_route;
+ }
+ return hba;
+no_nx2_route:
+ return NULL;
+}
+
+
+/**
+ * bnx2i_tear_down_conn - tear down iscsi/tcp connection and free resources
+ * @hba: pointer to adapter instance
+ * @ep: endpoint (transport indentifier) structure
+ *
+ * destroys cm_sock structure and on chip iscsi context
+ */
+static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
+ struct bnx2i_endpoint *ep)
+{
+ if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic))
+ hba->cnic->cm_destroy(ep->cm_sk);
+
+ if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state))
+ ep->state = EP_STATE_DISCONN_COMPL;
+
+ if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) &&
+ ep->state == EP_STATE_DISCONN_TIMEDOUT) {
+ printk(KERN_ALERT "bnx2i - ERROR - please submit GRC Dump,"
+ " NW/PCIe trace, driver msgs to developers"
+ " for analysis\n");
+ return 1;
+ }
+
+ ep->state = EP_STATE_CLEANUP_START;
+ init_timer(&ep->ofld_timer);
+ ep->ofld_timer.expires = 10*HZ + jiffies;
+ ep->ofld_timer.function = bnx2i_ep_ofld_timer;
+ ep->ofld_timer.data = (unsigned long) ep;
+ add_timer(&ep->ofld_timer);
+
+ bnx2i_ep_destroy_list_add(hba, ep);
+
+ /* destroy iSCSI context, wait for it to complete */
+ bnx2i_send_conn_destroy(hba, ep);
+ wait_event_interruptible(ep->ofld_wait,
+ (ep->state != EP_STATE_CLEANUP_START));
+
+ if (signal_pending(current))
+ flush_signals(current);
+ del_timer_sync(&ep->ofld_timer);
+
+ bnx2i_ep_destroy_list_del(hba, ep);
+
+ if (ep->state != EP_STATE_CLEANUP_CMPL)
+ /* should never happen */
+ printk(KERN_ALERT "bnx2i - conn destroy failed\n");
+
+ return 0;
+}
+
+
+/**
+ * bnx2i_ep_connect - establish TCP connection to target portal
+ * @shost: scsi host
+ * @dst_addr: target IP address
+ * @non_blocking: blocking or non-blocking call
+ *
+ * this routine initiates the TCP/IP connection by invoking Option-2 i/f
+ * with l5_core and the CNIC. This is a multi-step process of resolving
+ * route to target, create a iscsi connection context, handshaking with
+ * CNIC module to create/initialize the socket struct and finally
+ * sending down option-2 request to complete TCP 3-way handshake
+ */
+static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
+ struct sockaddr *dst_addr,
+ int non_blocking)
+{
+ u32 iscsi_cid = BNX2I_CID_RESERVED;
+ struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
+ struct sockaddr_in6 *desti6;
+ struct bnx2i_endpoint *bnx2i_ep;
+ struct bnx2i_hba *hba;
+ struct cnic_dev *cnic;
+ struct cnic_sockaddr saddr;
+ struct iscsi_endpoint *ep;
+ int rc = 0;
+
+ if (shost)
+ /* driver is given scsi host to work with */
+ hba = iscsi_host_priv(shost);
+ else
+ /*
+ * check if the given destination can be reached through
+ * a iscsi capable NetXtreme2 device
+ */
+ hba = bnx2i_check_route(dst_addr);
+ if (!hba) {
+ rc = -ENOMEM;
+ goto check_busy;
+ }
+
+ cnic = hba->cnic;
+ ep = bnx2i_alloc_ep(hba);
+ if (!ep) {
+ rc = -ENOMEM;
+ goto check_busy;
+ }
+ bnx2i_ep = ep->dd_data;
+
+ mutex_lock(&hba->net_dev_lock);
+ if (bnx2i_adapter_ready(hba)) {
+ rc = -EPERM;
+ goto net_if_down;
+ }
+
+ bnx2i_ep->state = EP_STATE_IDLE;
+ bnx2i_ep->ep_iscsi_cid = (u16) -1;
+ bnx2i_ep->num_active_cmds = 0;
+ iscsi_cid = bnx2i_alloc_iscsi_cid(hba);
+ if (iscsi_cid == -1) {
+ printk(KERN_ALERT "alloc_ep: unable to allocate iscsi cid\n");
+ rc = -ENOMEM;
+ goto iscsi_cid_err;
+ }
+ bnx2i_ep->hba_age = hba->age;
+
+ rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep);
+ if (rc != 0) {
+ printk(KERN_ALERT "bnx2i: ep_conn, alloc QP resc error\n");
+ rc = -ENOMEM;
+ goto qp_resc_err;
+ }
+
+ bnx2i_ep->ep_iscsi_cid = (u16)iscsi_cid;
+ bnx2i_ep->state = EP_STATE_OFLD_START;
+ bnx2i_ep_ofld_list_add(hba, bnx2i_ep);
+
+ init_timer(&bnx2i_ep->ofld_timer);
+ bnx2i_ep->ofld_timer.expires = 2 * HZ + jiffies;
+ bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer;
+ bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
+ add_timer(&bnx2i_ep->ofld_timer);
+
+ bnx2i_send_conn_ofld_req(hba, bnx2i_ep);
+
+ /* Wait for CNIC hardware to setup conn context and return 'cid' */
+ wait_event_interruptible(bnx2i_ep->ofld_wait,
+ bnx2i_ep->state != EP_STATE_OFLD_START);
+
+ if (signal_pending(current))
+ flush_signals(current);
+ del_timer_sync(&bnx2i_ep->ofld_timer);
+
+ bnx2i_ep_ofld_list_del(hba, bnx2i_ep);
+
+ if (bnx2i_ep->state != EP_STATE_OFLD_COMPL) {
+ rc = -ENOSPC;
+ goto conn_failed;
+ }
+
+ rc = cnic->cm_create(cnic, CNIC_ULP_ISCSI, bnx2i_ep->ep_cid,
+ iscsi_cid, &bnx2i_ep->cm_sk, bnx2i_ep);
+ if (rc) {
+ rc = -EINVAL;
+ goto conn_failed;
+ }
+
+ bnx2i_ep->cm_sk->rcv_buf = 256 * 1024;
+ bnx2i_ep->cm_sk->snd_buf = 256 * 1024;
+ clear_bit(SK_TCP_TIMESTAMP, &bnx2i_ep->cm_sk->tcp_flags);
+
+ memset(&saddr, 0, sizeof(saddr));
+ if (dst_addr->sa_family == AF_INET) {
+ desti = (struct sockaddr_in *) dst_addr;
+ saddr.remote.v4 = *desti;
+ saddr.local.v4.sin_family = desti->sin_family;
+ } else if (dst_addr->sa_family == AF_INET6) {
+ desti6 = (struct sockaddr_in6 *) dst_addr;
+ saddr.remote.v6 = *desti6;
+ saddr.local.v6.sin6_family = desti6->sin6_family;
+ }
+
+ bnx2i_ep->timestamp = jiffies;
+ bnx2i_ep->state = EP_STATE_CONNECT_START;
+ if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+ rc = -EINVAL;
+ goto conn_failed;
+ } else
+ rc = cnic->cm_connect(bnx2i_ep->cm_sk, &saddr);
+
+ if (rc)
+ goto release_ep;
+
+ if (bnx2i_map_ep_dbell_regs(bnx2i_ep))
+ goto release_ep;
+ mutex_unlock(&hba->net_dev_lock);
+ return ep;
+
+release_ep:
+ if (bnx2i_tear_down_conn(hba, bnx2i_ep)) {
+ mutex_unlock(&hba->net_dev_lock);
+ return ERR_PTR(rc);
+ }
+conn_failed:
+net_if_down:
+iscsi_cid_err:
+ bnx2i_free_qp_resc(hba, bnx2i_ep);
+qp_resc_err:
+ bnx2i_free_ep(ep);
+ mutex_unlock(&hba->net_dev_lock);
+check_busy:
+ bnx2i_unreg_dev_all();
+ return ERR_PTR(rc);
+}
+
+
+/**
+ * bnx2i_ep_poll - polls for TCP connection establishement
+ * @ep: TCP connection (endpoint) handle
+ * @timeout_ms: timeout value in milli secs
+ *
+ * polls for TCP connect request to complete
+ */
+static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+{
+ struct bnx2i_endpoint *bnx2i_ep;
+ int rc = 0;
+
+ bnx2i_ep = ep->dd_data;
+ if ((bnx2i_ep->state == EP_STATE_IDLE) ||
+ (bnx2i_ep->state == EP_STATE_CONNECT_FAILED) ||
+ (bnx2i_ep->state == EP_STATE_OFLD_FAILED))
+ return -1;
+ if (bnx2i_ep->state == EP_STATE_CONNECT_COMPL)
+ return 1;
+
+ rc = wait_event_interruptible_timeout(bnx2i_ep->ofld_wait,
+ ((bnx2i_ep->state ==
+ EP_STATE_OFLD_FAILED) ||
+ (bnx2i_ep->state ==
+ EP_STATE_CONNECT_FAILED) ||
+ (bnx2i_ep->state ==
+ EP_STATE_CONNECT_COMPL)),
+ msecs_to_jiffies(timeout_ms));
+ if (!rc || (bnx2i_ep->state == EP_STATE_OFLD_FAILED))
+ rc = -1;
+
+ if (rc > 0)
+ return 1;
+ else if (!rc)
+ return 0; /* timeout */
+ else
+ return rc;
+}
+
+
+/**
+ * bnx2i_ep_tcp_conn_active - check EP state transition
+ * @ep: endpoint pointer
+ *
+ * check if underlying TCP connection is active
+ */
+static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep)
+{
+ int ret;
+ int cnic_dev_10g = 0;
+
+ if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
+ cnic_dev_10g = 1;
+
+ switch (bnx2i_ep->state) {
+ case EP_STATE_CONNECT_START:
+ case EP_STATE_CLEANUP_FAILED:
+ case EP_STATE_OFLD_FAILED:
+ case EP_STATE_DISCONN_TIMEDOUT:
+ ret = 0;
+ break;
+ case EP_STATE_CONNECT_COMPL:
+ case EP_STATE_ULP_UPDATE_START:
+ case EP_STATE_ULP_UPDATE_COMPL:
+ case EP_STATE_TCP_FIN_RCVD:
+ case EP_STATE_ULP_UPDATE_FAILED:
+ ret = 1;
+ break;
+ case EP_STATE_TCP_RST_RCVD:
+ ret = 0;
+ break;
+ case EP_STATE_CONNECT_FAILED:
+ if (cnic_dev_10g)
+ ret = 1;
+ else
+ ret = 0;
+ break;
+ default:
+ ret = 0;
+ }
+
+ return ret;
+}
+
+
+/**
+ * bnx2i_ep_disconnect - executes TCP connection teardown process
+ * @ep: TCP connection (endpoint) handle
+ *
+ * executes TCP connection teardown process
+ */
+static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
+{
+ struct bnx2i_endpoint *bnx2i_ep;
+ struct bnx2i_conn *bnx2i_conn = NULL;
+ struct iscsi_session *session = NULL;
+ struct iscsi_conn *conn;
+ struct cnic_dev *cnic;
+ struct bnx2i_hba *hba;
+
+ bnx2i_ep = ep->dd_data;
+
+ /* driver should not attempt connection cleanup untill TCP_CONNECT
+ * completes either successfully or fails. Timeout is 9-secs, so
+ * wait for it to complete
+ */
+ while ((bnx2i_ep->state == EP_STATE_CONNECT_START) &&
+ !time_after(jiffies, bnx2i_ep->timestamp + (12 * HZ)))
+ msleep(250);
+
+ if (bnx2i_ep->conn) {
+ bnx2i_conn = bnx2i_ep->conn;
+ conn = bnx2i_conn->cls_conn->dd_data;
+ session = conn->session;
+
+ spin_lock_bh(&session->lock);
+ bnx2i_conn->is_bound = 0;
+ spin_unlock_bh(&session->lock);
+ }
+
+ hba = bnx2i_ep->hba;
+ if (bnx2i_ep->state == EP_STATE_IDLE)
+ goto return_bnx2i_ep;
+ cnic = hba->cnic;
+
+ mutex_lock(&hba->net_dev_lock);
+
+ if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
+ goto free_resc;
+ if (bnx2i_ep->hba_age != hba->age)
+ goto free_resc;
+
+ if (!bnx2i_ep_tcp_conn_active(bnx2i_ep))
+ goto destory_conn;
+
+ bnx2i_ep->state = EP_STATE_DISCONN_START;
+
+ init_timer(&bnx2i_ep->ofld_timer);
+ bnx2i_ep->ofld_timer.expires = 10*HZ + jiffies;
+ bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer;
+ bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
+ add_timer(&bnx2i_ep->ofld_timer);
+
+ if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+ int close = 0;
+
+ if (session) {
+ spin_lock_bh(&session->lock);
+ if (session->state == ISCSI_STATE_LOGGING_OUT)
+ close = 1;
+ spin_unlock_bh(&session->lock);
+ }
+ if (close)
+ cnic->cm_close(bnx2i_ep->cm_sk);
+ else
+ cnic->cm_abort(bnx2i_ep->cm_sk);
+ } else
+ goto free_resc;
+
+ /* wait for option-2 conn teardown */
+ wait_event_interruptible(bnx2i_ep->ofld_wait,
+ bnx2i_ep->state != EP_STATE_DISCONN_START);
+
+ if (signal_pending(current))
+ flush_signals(current);
+ del_timer_sync(&bnx2i_ep->ofld_timer);
+
+destory_conn:
+ if (bnx2i_tear_down_conn(hba, bnx2i_ep)) {
+ mutex_unlock(&hba->net_dev_lock);
+ return;
+ }
+free_resc:
+ mutex_unlock(&hba->net_dev_lock);
+ bnx2i_free_qp_resc(hba, bnx2i_ep);
+return_bnx2i_ep:
+ if (bnx2i_conn)
+ bnx2i_conn->ep = NULL;
+
+ bnx2i_free_ep(ep);
+
+ if (!hba->ofld_conns_active)
+ bnx2i_unreg_dev_all();
+}
+
+
+/**
+ * bnx2i_nl_set_path - ISCSI_UEVENT_PATH_UPDATE user message handler
+ * @buf: pointer to buffer containing iscsi path message
+ *
+ */
+static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params)
+{
+ struct bnx2i_hba *hba = iscsi_host_priv(shost);
+ char *buf = (char *) params;
+ u16 len = sizeof(*params);
+
+ /* handled by cnic driver */
+ hba->cnic->iscsi_nl_msg_recv(hba->cnic, ISCSI_UEVENT_PATH_UPDATE, buf,
+ len);
+
+ return 0;
+}
+
+
+/*
+ * 'Scsi_Host_Template' structure and 'iscsi_tranport' structure template
+ * used while registering with the scsi host and iSCSI transport module.
+ */
+static struct scsi_host_template bnx2i_host_template = {
+ .module = THIS_MODULE,
+ .name = "Broadcom Offload iSCSI Initiator",
+ .proc_name = "bnx2i",
+ .queuecommand = iscsi_queuecommand,
+ .eh_abort_handler = iscsi_eh_abort,
+ .eh_device_reset_handler = iscsi_eh_device_reset,
+ .eh_target_reset_handler = iscsi_eh_target_reset,
+ .can_queue = 1024,
+ .max_sectors = 127,
+ .cmd_per_lun = 32,
+ .this_id = -1,
+ .use_clustering = ENABLE_CLUSTERING,
+ .sg_tablesize = ISCSI_MAX_BDS_PER_CMD,
+ .shost_attrs = bnx2i_dev_attributes,
+};
+
+struct iscsi_transport bnx2i_iscsi_transport = {
+ .owner = THIS_MODULE,
+ .name = "bnx2i",
+ .caps = CAP_RECOVERY_L0 | CAP_HDRDGST |
+ CAP_MULTI_R2T | CAP_DATADGST |
+ CAP_DATA_PATH_OFFLOAD,
+ .param_mask = ISCSI_MAX_RECV_DLENGTH |
+ ISCSI_MAX_XMIT_DLENGTH |
+ ISCSI_HDRDGST_EN |
+ ISCSI_DATADGST_EN |
+ ISCSI_INITIAL_R2T_EN |
+ ISCSI_MAX_R2T |
+ ISCSI_IMM_DATA_EN |
+ ISCSI_FIRST_BURST |
+ ISCSI_MAX_BURST |
+ ISCSI_PDU_INORDER_EN |
+ ISCSI_DATASEQ_INORDER_EN |
+ ISCSI_ERL |
+ ISCSI_CONN_PORT |
+ ISCSI_CONN_ADDRESS |
+ ISCSI_EXP_STATSN |
+ ISCSI_PERSISTENT_PORT |
+ ISCSI_PERSISTENT_ADDRESS |
+ ISCSI_TARGET_NAME | ISCSI_TPGT |
+ ISCSI_USERNAME | ISCSI_PASSWORD |
+ ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+ ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+ ISCSI_LU_RESET_TMO |
+ ISCSI_PING_TMO | ISCSI_RECV_TMO |
+ ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
+ .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_NETDEV_NAME,
+ .create_session = bnx2i_session_create,
+ .destroy_session = bnx2i_session_destroy,
+ .create_conn = bnx2i_conn_create,
+ .bind_conn = bnx2i_conn_bind,
+ .destroy_conn = bnx2i_conn_destroy,
+ .set_param = iscsi_set_param,
+ .get_conn_param = bnx2i_conn_get_param,
+ .get_session_param = iscsi_session_get_param,
+ .get_host_param = bnx2i_host_get_param,
+ .start_conn = bnx2i_conn_start,
+ .stop_conn = iscsi_conn_stop,
+ .send_pdu = iscsi_conn_send_pdu,
+ .xmit_task = bnx2i_task_xmit,
+ .get_stats = bnx2i_conn_get_stats,
+ /* TCP connect - disconnect - option-2 interface calls */
+ .ep_connect = bnx2i_ep_connect,
+ .ep_poll = bnx2i_ep_poll,
+ .ep_disconnect = bnx2i_ep_disconnect,
+ .set_path = bnx2i_nl_set_path,
+ /* Error recovery timeout call */
+ .session_recovery_timedout = iscsi_session_recovery_timedout,
+ .cleanup_task = bnx2i_cleanup_task,
+};
diff --git a/drivers/scsi/bnx2i/bnx2i_sysfs.c b/drivers/scsi/bnx2i/bnx2i_sysfs.c
new file mode 100644
index 00000000000..96426b751eb
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_sysfs.c
@@ -0,0 +1,142 @@
+/* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2004 - 2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+
+#include "bnx2i.h"
+
+/**
+ * bnx2i_dev_to_hba - maps dev pointer to adapter struct
+ * @dev: device pointer
+ *
+ * Map device to hba structure
+ */
+static inline struct bnx2i_hba *bnx2i_dev_to_hba(struct device *dev)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ return iscsi_host_priv(shost);
+}
+
+
+/**
+ * bnx2i_show_sq_info - return(s currently configured send queue (SQ) size
+ * @dev: device pointer
+ * @buf: buffer to return current SQ size parameter
+ *
+ * Returns current SQ size parameter, this paramater determines the number
+ * outstanding iSCSI commands supported on a connection
+ */
+static ssize_t bnx2i_show_sq_info(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
+
+ return sprintf(buf, "0x%x\n", hba->max_sqes);
+}
+
+
+/**
+ * bnx2i_set_sq_info - update send queue (SQ) size parameter
+ * @dev: device pointer
+ * @buf: buffer to return current SQ size parameter
+ * @count: parameter buffer size
+ *
+ * Interface for user to change shared queue size allocated for each conn
+ * Must be within SQ limits and a power of 2. For the latter this is needed
+ * because of how libiscsi preallocates tasks.
+ */
+static ssize_t bnx2i_set_sq_info(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
+ u32 val;
+ int max_sq_size;
+
+ if (hba->ofld_conns_active)
+ goto skip_config;
+
+ if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
+ max_sq_size = BNX2I_5770X_SQ_WQES_MAX;
+ else
+ max_sq_size = BNX2I_570X_SQ_WQES_MAX;
+
+ if (sscanf(buf, " 0x%x ", &val) > 0) {
+ if ((val >= BNX2I_SQ_WQES_MIN) && (val <= max_sq_size) &&
+ (is_power_of_2(val)))
+ hba->max_sqes = val;
+ }
+
+ return count;
+
+skip_config:
+ printk(KERN_ERR "bnx2i: device busy, cannot change SQ size\n");
+ return 0;
+}
+
+
+/**
+ * bnx2i_show_ccell_info - returns command cell (HQ) size
+ * @dev: device pointer
+ * @buf: buffer to return current SQ size parameter
+ *
+ * returns per-connection TCP history queue size parameter
+ */
+static ssize_t bnx2i_show_ccell_info(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
+
+ return sprintf(buf, "0x%x\n", hba->num_ccell);
+}
+
+
+/**
+ * bnx2i_get_link_state - set command cell (HQ) size
+ * @dev: device pointer
+ * @buf: buffer to return current SQ size parameter
+ * @count: parameter buffer size
+ *
+ * updates per-connection TCP history queue size parameter
+ */
+static ssize_t bnx2i_set_ccell_info(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 val;
+ struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
+
+ if (hba->ofld_conns_active)
+ goto skip_config;
+
+ if (sscanf(buf, " 0x%x ", &val) > 0) {
+ if ((val >= BNX2I_CCELLS_MIN) &&
+ (val <= BNX2I_CCELLS_MAX)) {
+ hba->num_ccell = val;
+ }
+ }
+
+ return count;
+
+skip_config:
+ printk(KERN_ERR "bnx2i: device busy, cannot change CCELL size\n");
+ return 0;
+}
+
+
+static DEVICE_ATTR(sq_size, S_IRUGO | S_IWUSR,
+ bnx2i_show_sq_info, bnx2i_set_sq_info);
+static DEVICE_ATTR(num_ccell, S_IRUGO | S_IWUSR,
+ bnx2i_show_ccell_info, bnx2i_set_ccell_info);
+
+struct device_attribute *bnx2i_dev_attributes[] = {
+ &dev_attr_sq_size,
+ &dev_attr_num_ccell,
+ NULL
+};
diff --git a/drivers/scsi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgb3i/cxgb3i.h
index 59b0958d2d1..e3133b58e59 100644
--- a/drivers/scsi/cxgb3i/cxgb3i.h
+++ b/drivers/scsi/cxgb3i/cxgb3i.h
@@ -144,7 +144,6 @@ struct cxgb3i_adapter *cxgb3i_adapter_find_by_tdev(struct t3cdev *);
void cxgb3i_adapter_open(struct t3cdev *);
void cxgb3i_adapter_close(struct t3cdev *);
-struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *);
struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *,
struct net_device *);
void cxgb3i_hba_host_remove(struct cxgb3i_hba *);
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
index 9212400b9b1..74369a3f963 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
@@ -13,6 +13,7 @@
#include <linux/inet.h>
#include <linux/crypto.h>
+#include <net/dst.h>
#include <net/tcp.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@@ -178,7 +179,7 @@ void cxgb3i_adapter_close(struct t3cdev *t3dev)
* cxgb3i_hba_find_by_netdev - find the cxgb3i_hba structure via net_device
* @t3dev: t3cdev adapter
*/
-struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev)
+static struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev)
{
struct cxgb3i_adapter *snic;
int i;
@@ -261,20 +262,27 @@ void cxgb3i_hba_host_remove(struct cxgb3i_hba *hba)
/**
* cxgb3i_ep_connect - establish TCP connection to target portal
+ * @shost: scsi host to use
* @dst_addr: target IP address
* @non_blocking: blocking or non-blocking call
*
* Initiates a TCP/IP connection to the dst_addr
*/
-static struct iscsi_endpoint *cxgb3i_ep_connect(struct sockaddr *dst_addr,
+static struct iscsi_endpoint *cxgb3i_ep_connect(struct Scsi_Host *shost,
+ struct sockaddr *dst_addr,
int non_blocking)
{
struct iscsi_endpoint *ep;
struct cxgb3i_endpoint *cep;
- struct cxgb3i_hba *hba;
+ struct cxgb3i_hba *hba = NULL;
struct s3_conn *c3cn = NULL;
int err = 0;
+ if (shost)
+ hba = iscsi_host_priv(shost);
+
+ cxgb3i_api_debug("shost 0x%p, hba 0x%p.\n", shost, hba);
+
c3cn = cxgb3i_c3cn_create();
if (!c3cn) {
cxgb3i_log_info("ep connect OOM.\n");
@@ -282,17 +290,27 @@ static struct iscsi_endpoint *cxgb3i_ep_connect(struct sockaddr *dst_addr,
goto release_conn;
}
- err = cxgb3i_c3cn_connect(c3cn, (struct sockaddr_in *)dst_addr);
+ err = cxgb3i_c3cn_connect(hba ? hba->ndev : NULL, c3cn,
+ (struct sockaddr_in *)dst_addr);
if (err < 0) {
cxgb3i_log_info("ep connect failed.\n");
goto release_conn;
}
+
hba = cxgb3i_hba_find_by_netdev(c3cn->dst_cache->dev);
if (!hba) {
err = -ENOSPC;
cxgb3i_log_info("NOT going through cxgbi device.\n");
goto release_conn;
}
+
+ if (shost && hba != iscsi_host_priv(shost)) {
+ err = -ENOSPC;
+ cxgb3i_log_info("Could not connect through request host%u\n",
+ shost->host_no);
+ goto release_conn;
+ }
+
if (c3cn_is_closing(c3cn)) {
err = -ENOSPC;
cxgb3i_log_info("ep connect unable to connect.\n");
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c
index e11c9c180f3..c1d5be4adf9 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c
@@ -1479,12 +1479,13 @@ static struct net_device *cxgb3_egress_dev(struct net_device *root_dev,
return NULL;
}
-static struct rtable *find_route(__be32 saddr, __be32 daddr,
+static struct rtable *find_route(struct net_device *dev,
+ __be32 saddr, __be32 daddr,
__be16 sport, __be16 dport)
{
struct rtable *rt;
struct flowi fl = {
- .oif = 0,
+ .oif = dev ? dev->ifindex : 0,
.nl_u = {
.ip4_u = {
.daddr = daddr,
@@ -1573,36 +1574,40 @@ out_err:
*
* return 0 if active open request is sent, < 0 otherwise.
*/
-int cxgb3i_c3cn_connect(struct s3_conn *c3cn, struct sockaddr_in *usin)
+int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn,
+ struct sockaddr_in *usin)
{
struct rtable *rt;
- struct net_device *dev;
struct cxgb3i_sdev_data *cdata;
struct t3cdev *cdev;
__be32 sipv4;
int err;
+ c3cn_conn_debug("c3cn 0x%p, dev 0x%p.\n", c3cn, dev);
+
if (usin->sin_family != AF_INET)
return -EAFNOSUPPORT;
c3cn->daddr.sin_port = usin->sin_port;
c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr;
- rt = find_route(c3cn->saddr.sin_addr.s_addr,
+ rt = find_route(dev, c3cn->saddr.sin_addr.s_addr,
c3cn->daddr.sin_addr.s_addr,
c3cn->saddr.sin_port,
c3cn->daddr.sin_port);
if (rt == NULL) {
- c3cn_conn_debug("NO route to 0x%x, port %u.\n",
+ c3cn_conn_debug("NO route to 0x%x, port %u, dev %s.\n",
c3cn->daddr.sin_addr.s_addr,
- ntohs(c3cn->daddr.sin_port));
+ ntohs(c3cn->daddr.sin_port),
+ dev ? dev->name : "any");
return -ENETUNREACH;
}
if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
- c3cn_conn_debug("multi-cast route to 0x%x, port %u.\n",
+ c3cn_conn_debug("multi-cast route to 0x%x, port %u, dev %s.\n",
c3cn->daddr.sin_addr.s_addr,
- ntohs(c3cn->daddr.sin_port));
+ ntohs(c3cn->daddr.sin_port),
+ dev ? dev->name : "any");
ip_rt_put(rt);
return -ENETUNREACH;
}
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.h b/drivers/scsi/cxgb3i/cxgb3i_offload.h
index ebfca960c0a..6a1d86b1faf 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.h
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.h
@@ -169,7 +169,8 @@ void cxgb3i_sdev_add(struct t3cdev *, struct cxgb3_client *);
void cxgb3i_sdev_remove(struct t3cdev *);
struct s3_conn *cxgb3i_c3cn_create(void);
-int cxgb3i_c3cn_connect(struct s3_conn *, struct sockaddr_in *);
+int cxgb3i_c3cn_connect(struct net_device *, struct s3_conn *,
+ struct sockaddr_in *);
void cxgb3i_c3cn_rx_credits(struct s3_conn *, int);
int cxgb3i_c3cn_send_pdus(struct s3_conn *, struct sk_buff *);
void cxgb3i_c3cn_release(struct s3_conn *);
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 43b8c51e98d..fd0544f7da8 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -561,6 +561,12 @@ static int rdac_check_sense(struct scsi_device *sdev,
struct rdac_dh_data *h = get_rdac_data(sdev);
switch (sense_hdr->sense_key) {
case NOT_READY:
+ if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01)
+ /* LUN Not Ready - Logical Unit Not Ready and is in
+ * the process of becoming ready
+ * Just retry.
+ */
+ return ADD_TO_MLQUEUE;
if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81)
/* LUN Not Ready - Storage firmware incompatible
* Manual code synchonisation required.
diff --git a/drivers/scsi/dpt/osd_util.h b/drivers/scsi/dpt/osd_util.h
index 4b56c0436ba..b2613c2eaac 100644
--- a/drivers/scsi/dpt/osd_util.h
+++ b/drivers/scsi/dpt/osd_util.h
@@ -342,7 +342,7 @@ uLONG osdGetThreadID(void);
/* wakes up the specifed thread */
void osdWakeThread(uLONG);
-/* osd sleep for x miliseconds */
+/* osd sleep for x milliseconds */
void osdSleep(uLONG);
#define DPT_THREAD_PRIORITY_LOWEST 0x00
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index be5099dd94b..c7076ce25e2 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1825,7 +1825,7 @@ static int eata2x_queuecommand(struct scsi_cmnd *SCpnt,
if (linked_comm && SCpnt->device->queue_depth > 2
&& TLDEV(SCpnt->device->type)) {
ha->cp_stat[i] = READY;
- flush_dev(SCpnt->device, SCpnt->request->sector, ha, 0);
+ flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), ha, 0);
return 0;
}
@@ -2144,13 +2144,13 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
if (!cpp->din)
input_only = 0;
- if (SCpnt->request->sector < minsec)
- minsec = SCpnt->request->sector;
- if (SCpnt->request->sector > maxsec)
- maxsec = SCpnt->request->sector;
+ if (blk_rq_pos(SCpnt->request) < minsec)
+ minsec = blk_rq_pos(SCpnt->request);
+ if (blk_rq_pos(SCpnt->request) > maxsec)
+ maxsec = blk_rq_pos(SCpnt->request);
- sl[n] = SCpnt->request->sector;
- ioseek += SCpnt->request->nr_sectors;
+ sl[n] = blk_rq_pos(SCpnt->request);
+ ioseek += blk_rq_sectors(SCpnt->request);
if (!n)
continue;
@@ -2190,7 +2190,7 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
k = il[n];
cpp = &ha->cp[k];
SCpnt = cpp->SCpnt;
- ll[n] = SCpnt->request->nr_sectors;
+ ll[n] = blk_rq_sectors(SCpnt->request);
pl[n] = SCpnt->serial_number;
if (!n)
@@ -2236,12 +2236,12 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
cpp = &ha->cp[k];
SCpnt = cpp->SCpnt;
scmd_printk(KERN_INFO, SCpnt,
- "%s pid %ld mb %d fc %d nr %d sec %ld ns %ld"
+ "%s pid %ld mb %d fc %d nr %d sec %ld ns %u"
" cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
(ihdlr ? "ihdlr" : "qcomm"),
SCpnt->serial_number, k, flushcount,
- n_ready, SCpnt->request->sector,
- SCpnt->request->nr_sectors, cursec, YESNO(s),
+ n_ready, blk_rq_pos(SCpnt->request),
+ blk_rq_sectors(SCpnt->request), cursec, YESNO(s),
YESNO(r), YESNO(rev), YESNO(input_only),
YESNO(overlap), cpp->din);
}
@@ -2408,7 +2408,7 @@ static irqreturn_t ihdlr(struct Scsi_Host *shost)
if (linked_comm && SCpnt->device->queue_depth > 2
&& TLDEV(SCpnt->device->type))
- flush_dev(SCpnt->device, SCpnt->request->sector, ha, 1);
+ flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), ha, 1);
tstatus = status_byte(spp->target_status);
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 03e1926f40b..c15878e8815 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -54,7 +54,6 @@ MODULE_LICENSE("GPL v2");
/* fcoe host list */
LIST_HEAD(fcoe_hostlist);
DEFINE_RWLOCK(fcoe_hostlist_lock);
-DEFINE_TIMER(fcoe_timer, NULL, 0, 0);
DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
/* Function Prototypes */
@@ -71,7 +70,7 @@ static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
static int fcoe_hostlist_add(const struct fc_lport *);
static int fcoe_hostlist_remove(const struct fc_lport *);
-static int fcoe_check_wait_queue(struct fc_lport *);
+static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *);
static int fcoe_device_notification(struct notifier_block *, ulong, void *);
static void fcoe_dev_setup(void);
static void fcoe_dev_cleanup(void);
@@ -136,6 +135,58 @@ static struct scsi_host_template fcoe_shost_template = {
};
/**
+ * fcoe_fip_recv - handle a received FIP frame.
+ * @skb: the receive skb
+ * @dev: associated &net_device
+ * @ptype: the &packet_type structure which was used to register this handler.
+ * @orig_dev: original receive &net_device, in case @dev is a bond.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype,
+ struct net_device *orig_dev)
+{
+ struct fcoe_softc *fc;
+
+ fc = container_of(ptype, struct fcoe_softc, fip_packet_type);
+ fcoe_ctlr_recv(&fc->ctlr, skb);
+ return 0;
+}
+
+/**
+ * fcoe_fip_send() - send an Ethernet-encapsulated FIP frame.
+ * @fip: FCoE controller.
+ * @skb: FIP Packet.
+ */
+static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
+{
+ skb->dev = fcoe_from_ctlr(fip)->real_dev;
+ dev_queue_xmit(skb);
+}
+
+/**
+ * fcoe_update_src_mac() - Update Ethernet MAC filters.
+ * @fip: FCoE controller.
+ * @old: Unicast MAC address to delete if the MAC is non-zero.
+ * @new: Unicast MAC address to add.
+ *
+ * Remove any previously-set unicast MAC filter.
+ * Add secondary FCoE MAC address filter for our OUI.
+ */
+static void fcoe_update_src_mac(struct fcoe_ctlr *fip, u8 *old, u8 *new)
+{
+ struct fcoe_softc *fc;
+
+ fc = fcoe_from_ctlr(fip);
+ rtnl_lock();
+ if (!is_zero_ether_addr(old))
+ dev_unicast_delete(fc->real_dev, old);
+ dev_unicast_add(fc->real_dev, new);
+ rtnl_unlock();
+}
+
+/**
* fcoe_lport_config() - sets up the fc_lport
* @lp: ptr to the fc_lport
*
@@ -146,6 +197,7 @@ static int fcoe_lport_config(struct fc_lport *lp)
lp->link_up = 0;
lp->qfull = 0;
lp->max_retry_count = 3;
+ lp->max_rport_retry_count = 3;
lp->e_d_tov = 2 * 1000; /* FC-FS default */
lp->r_a_tov = 2 * 2 * 1000;
lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
@@ -167,6 +219,42 @@ static int fcoe_lport_config(struct fc_lport *lp)
}
/**
+ * fcoe_netdev_cleanup() - clean up netdev configurations
+ * @fc: ptr to the fcoe_softc
+ */
+void fcoe_netdev_cleanup(struct fcoe_softc *fc)
+{
+ u8 flogi_maddr[ETH_ALEN];
+
+ /* Don't listen for Ethernet packets anymore */
+ dev_remove_pack(&fc->fcoe_packet_type);
+ dev_remove_pack(&fc->fip_packet_type);
+
+ /* Delete secondary MAC addresses */
+ rtnl_lock();
+ memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
+ dev_unicast_delete(fc->real_dev, flogi_maddr);
+ if (!is_zero_ether_addr(fc->ctlr.data_src_addr))
+ dev_unicast_delete(fc->real_dev, fc->ctlr.data_src_addr);
+ if (fc->ctlr.spma)
+ dev_unicast_delete(fc->real_dev, fc->ctlr.ctl_src_addr);
+ dev_mc_delete(fc->real_dev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
+ rtnl_unlock();
+}
+
+/**
+ * fcoe_queue_timer() - fcoe queue timer
+ * @lp: the fc_lport pointer
+ *
+ * Calls fcoe_check_wait_queue on timeout
+ *
+ */
+static void fcoe_queue_timer(ulong lp)
+{
+ fcoe_check_wait_queue((struct fc_lport *)lp, NULL);
+}
+
+/**
* fcoe_netdev_config() - Set up netdev for SW FCoE
* @lp : ptr to the fc_lport
* @netdev : ptr to the associated netdevice struct
@@ -181,6 +269,7 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev)
u64 wwnn, wwpn;
struct fcoe_softc *fc;
u8 flogi_maddr[ETH_ALEN];
+ struct netdev_hw_addr *ha;
/* Setup lport private data to point to fcoe softc */
fc = lport_priv(lp);
@@ -236,10 +325,25 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev)
}
skb_queue_head_init(&fc->fcoe_pending_queue);
fc->fcoe_pending_queue_active = 0;
+ setup_timer(&fc->timer, fcoe_queue_timer, (unsigned long)lp);
+
+ /* look for SAN MAC address, if multiple SAN MACs exist, only
+ * use the first one for SPMA */
+ rcu_read_lock();
+ for_each_dev_addr(netdev, ha) {
+ if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
+ (is_valid_ether_addr(fc->ctlr.ctl_src_addr))) {
+ memcpy(fc->ctlr.ctl_src_addr, ha->addr, ETH_ALEN);
+ fc->ctlr.spma = 1;
+ break;
+ }
+ }
+ rcu_read_unlock();
/* setup Source Mac Address */
- memcpy(fc->ctlr.ctl_src_addr, fc->real_dev->dev_addr,
- fc->real_dev->addr_len);
+ if (!fc->ctlr.spma)
+ memcpy(fc->ctlr.ctl_src_addr, fc->real_dev->dev_addr,
+ fc->real_dev->addr_len);
wwnn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 1, 0);
fc_set_wwnn(lp, wwnn);
@@ -254,7 +358,9 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev)
*/
rtnl_lock();
memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
- dev_unicast_add(fc->real_dev, flogi_maddr, ETH_ALEN);
+ dev_unicast_add(fc->real_dev, flogi_maddr);
+ if (fc->ctlr.spma)
+ dev_unicast_add(fc->real_dev, fc->ctlr.ctl_src_addr);
dev_mc_add(fc->real_dev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
rtnl_unlock();
@@ -267,6 +373,11 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev)
fc->fcoe_packet_type.dev = fc->real_dev;
dev_add_pack(&fc->fcoe_packet_type);
+ fc->fip_packet_type.func = fcoe_fip_recv;
+ fc->fip_packet_type.type = htons(ETH_P_FIP);
+ fc->fip_packet_type.dev = fc->real_dev;
+ dev_add_pack(&fc->fip_packet_type);
+
return 0;
}
@@ -334,7 +445,6 @@ static int fcoe_if_destroy(struct net_device *netdev)
{
struct fc_lport *lp = NULL;
struct fcoe_softc *fc;
- u8 flogi_maddr[ETH_ALEN];
BUG_ON(!netdev);
@@ -353,9 +463,10 @@ static int fcoe_if_destroy(struct net_device *netdev)
/* Remove the instance from fcoe's list */
fcoe_hostlist_remove(lp);
- /* Don't listen for Ethernet packets anymore */
- dev_remove_pack(&fc->fcoe_packet_type);
- dev_remove_pack(&fc->fip_packet_type);
+ /* clean up netdev configurations */
+ fcoe_netdev_cleanup(fc);
+
+ /* tear-down the FCoE controller */
fcoe_ctlr_destroy(&fc->ctlr);
/* Cleanup the fc_lport */
@@ -370,22 +481,15 @@ static int fcoe_if_destroy(struct net_device *netdev)
if (lp->emp)
fc_exch_mgr_free(lp->emp);
- /* Delete secondary MAC addresses */
- rtnl_lock();
- memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
- dev_unicast_delete(fc->real_dev, flogi_maddr, ETH_ALEN);
- if (!is_zero_ether_addr(fc->ctlr.data_src_addr))
- dev_unicast_delete(fc->real_dev,
- fc->ctlr.data_src_addr, ETH_ALEN);
- dev_mc_delete(fc->real_dev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
- rtnl_unlock();
-
/* Free the per-CPU receive threads */
fcoe_percpu_clean(lp);
/* Free existing skbs */
fcoe_clean_pending_queue(lp);
+ /* Stop the timer */
+ del_timer_sync(&fc->timer);
+
/* Free memory used by statistical counters */
fc_lport_free_stats(lp);
@@ -439,58 +543,6 @@ static struct libfc_function_template fcoe_libfc_fcn_templ = {
};
/**
- * fcoe_fip_recv - handle a received FIP frame.
- * @skb: the receive skb
- * @dev: associated &net_device
- * @ptype: the &packet_type structure which was used to register this handler.
- * @orig_dev: original receive &net_device, in case @dev is a bond.
- *
- * Returns: 0 for success
- */
-static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *ptype,
- struct net_device *orig_dev)
-{
- struct fcoe_softc *fc;
-
- fc = container_of(ptype, struct fcoe_softc, fip_packet_type);
- fcoe_ctlr_recv(&fc->ctlr, skb);
- return 0;
-}
-
-/**
- * fcoe_fip_send() - send an Ethernet-encapsulated FIP frame.
- * @fip: FCoE controller.
- * @skb: FIP Packet.
- */
-static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
-{
- skb->dev = fcoe_from_ctlr(fip)->real_dev;
- dev_queue_xmit(skb);
-}
-
-/**
- * fcoe_update_src_mac() - Update Ethernet MAC filters.
- * @fip: FCoE controller.
- * @old: Unicast MAC address to delete if the MAC is non-zero.
- * @new: Unicast MAC address to add.
- *
- * Remove any previously-set unicast MAC filter.
- * Add secondary FCoE MAC address filter for our OUI.
- */
-static void fcoe_update_src_mac(struct fcoe_ctlr *fip, u8 *old, u8 *new)
-{
- struct fcoe_softc *fc;
-
- fc = fcoe_from_ctlr(fip);
- rtnl_lock();
- if (!is_zero_ether_addr(old))
- dev_unicast_delete(fc->real_dev, old, ETH_ALEN);
- dev_unicast_add(fc->real_dev, new, ETH_ALEN);
- rtnl_unlock();
-}
-
-/**
* fcoe_if_create() - this function creates the fcoe interface
* @netdev: pointer the associated netdevice
*
@@ -531,13 +583,6 @@ static int fcoe_if_create(struct net_device *netdev)
goto out_host_put;
}
- /* configure lport network properties */
- rc = fcoe_netdev_config(lp, netdev);
- if (rc) {
- FC_DBG("Could not configure netdev for lport\n");
- goto out_host_put;
- }
-
/*
* Initialize FIP.
*/
@@ -545,23 +590,25 @@ static int fcoe_if_create(struct net_device *netdev)
fc->ctlr.send = fcoe_fip_send;
fc->ctlr.update_mac = fcoe_update_src_mac;
- fc->fip_packet_type.func = fcoe_fip_recv;
- fc->fip_packet_type.type = htons(ETH_P_FIP);
- fc->fip_packet_type.dev = fc->real_dev;
- dev_add_pack(&fc->fip_packet_type);
+ /* configure lport network properties */
+ rc = fcoe_netdev_config(lp, netdev);
+ if (rc) {
+ FC_DBG("Could not configure netdev for the interface\n");
+ goto out_netdev_cleanup;
+ }
/* configure lport scsi host properties */
rc = fcoe_shost_config(lp, shost, &netdev->dev);
if (rc) {
FC_DBG("Could not configure shost for lport\n");
- goto out_host_put;
+ goto out_netdev_cleanup;
}
/* lport exch manager allocation */
rc = fcoe_em_config(lp);
if (rc) {
FC_DBG("Could not configure em for lport\n");
- goto out_host_put;
+ goto out_netdev_cleanup;
}
/* Initialize the library */
@@ -587,6 +634,8 @@ static int fcoe_if_create(struct net_device *netdev)
out_lp_destroy:
fc_exch_mgr_free(lp->emp); /* Free the EM */
+out_netdev_cleanup:
+ fcoe_netdev_cleanup(fc);
out_host_put:
scsi_host_put(lp->host);
return rc;
@@ -988,7 +1037,7 @@ u32 fcoe_fc_crc(struct fc_frame *fp)
*/
int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
{
- int wlen, rc = 0;
+ int wlen;
u32 crc;
struct ethhdr *eh;
struct fcoe_crc_eof *cp;
@@ -1021,8 +1070,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
sof = fr_sof(fp);
eof = fr_eof(fp);
- elen = (fc->real_dev->priv_flags & IFF_802_1Q_VLAN) ?
- sizeof(struct vlan_ethhdr) : sizeof(struct ethhdr);
+ elen = sizeof(struct ethhdr);
hlen = sizeof(struct fcoe_hdr);
tlen = sizeof(struct fcoe_crc_eof);
wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
@@ -1107,18 +1155,9 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
/* send down to lld */
fr_dev(fp) = lp;
if (fc->fcoe_pending_queue.qlen)
- rc = fcoe_check_wait_queue(lp);
-
- if (rc == 0)
- rc = fcoe_start_io(skb);
-
- if (rc) {
- spin_lock_bh(&fc->fcoe_pending_queue.lock);
- __skb_queue_tail(&fc->fcoe_pending_queue, skb);
- spin_unlock_bh(&fc->fcoe_pending_queue.lock);
- if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
- lp->qfull = 1;
- }
+ fcoe_check_wait_queue(lp, skb);
+ else if (fcoe_start_io(skb))
+ fcoe_check_wait_queue(lp, skb);
return 0;
}
@@ -1268,32 +1307,6 @@ int fcoe_percpu_receive_thread(void *arg)
}
/**
- * fcoe_watchdog() - fcoe timer callback
- * @vp:
- *
- * This checks the pending queue length for fcoe and set lport qfull
- * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the
- * fcoe_hostlist.
- *
- * Returns: 0 for success
- */
-void fcoe_watchdog(ulong vp)
-{
- struct fcoe_softc *fc;
-
- read_lock(&fcoe_hostlist_lock);
- list_for_each_entry(fc, &fcoe_hostlist, list) {
- if (fc->ctlr.lp)
- fcoe_check_wait_queue(fc->ctlr.lp);
- }
- read_unlock(&fcoe_hostlist_lock);
-
- fcoe_timer.expires = jiffies + (1 * HZ);
- add_timer(&fcoe_timer);
-}
-
-
-/**
* fcoe_check_wait_queue() - attempt to clear the transmit backlog
* @lp: the fc_lport
*
@@ -1305,16 +1318,17 @@ void fcoe_watchdog(ulong vp)
* The wait_queue is used when the skb transmit fails. skb will go
* in the wait_queue which will be emptied by the timer function or
* by the next skb transmit.
- *
- * Returns: 0 for success
*/
-static int fcoe_check_wait_queue(struct fc_lport *lp)
+static void fcoe_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb)
{
struct fcoe_softc *fc = lport_priv(lp);
- struct sk_buff *skb;
- int rc = -1;
+ int rc;
spin_lock_bh(&fc->fcoe_pending_queue.lock);
+
+ if (skb)
+ __skb_queue_tail(&fc->fcoe_pending_queue, skb);
+
if (fc->fcoe_pending_queue_active)
goto out;
fc->fcoe_pending_queue_active = 1;
@@ -1340,23 +1354,26 @@ static int fcoe_check_wait_queue(struct fc_lport *lp)
if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
lp->qfull = 0;
+ if (fc->fcoe_pending_queue.qlen && !timer_pending(&fc->timer))
+ mod_timer(&fc->timer, jiffies + 2);
fc->fcoe_pending_queue_active = 0;
- rc = fc->fcoe_pending_queue.qlen;
out:
+ if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
+ lp->qfull = 1;
spin_unlock_bh(&fc->fcoe_pending_queue.lock);
- return rc;
+ return;
}
/**
* fcoe_dev_setup() - setup link change notification interface
*/
-static void fcoe_dev_setup()
+static void fcoe_dev_setup(void)
{
register_netdevice_notifier(&fcoe_notifier);
}
/**
- * fcoe_dev_setup() - cleanup link change notification interface
+ * fcoe_dev_cleanup() - cleanup link change notification interface
*/
static void fcoe_dev_cleanup(void)
{
@@ -1815,10 +1832,6 @@ static int __init fcoe_init(void)
/* Setup link change notification */
fcoe_dev_setup();
- setup_timer(&fcoe_timer, fcoe_watchdog, 0);
-
- mod_timer(&fcoe_timer, jiffies + (10 * HZ));
-
fcoe_if_init();
return 0;
@@ -1844,9 +1857,6 @@ static void __exit fcoe_exit(void)
fcoe_dev_cleanup();
- /* Stop the timer */
- del_timer_sync(&fcoe_timer);
-
/* releases the associated fcoe hosts */
list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list)
fcoe_if_destroy(fc->real_dev);
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 917aae88689..a1eb8c1988b 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -61,6 +61,7 @@ struct fcoe_softc {
struct packet_type fip_packet_type;
struct sk_buff_head fcoe_pending_queue;
u8 fcoe_pending_queue_active;
+ struct timer_list timer; /* queue timer */
struct fcoe_ctlr ctlr;
};
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
index 62ba0f39c6b..2f5bc7fd3fa 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -198,6 +198,8 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
sol->fip.fip_subcode = FIP_SC_SOL;
sol->fip.fip_dl_len = htons(sizeof(sol->desc) / FIP_BPW);
sol->fip.fip_flags = htons(FIP_FL_FPMA);
+ if (fip->spma)
+ sol->fip.fip_flags |= htons(FIP_FL_SPMA);
sol->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
sol->desc.mac.fd_desc.fip_dlen = sizeof(sol->desc.mac) / FIP_BPW;
@@ -213,7 +215,7 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
sol->desc.size.fd_size = htons(fcoe_size);
skb_put(skb, sizeof(*sol));
- skb->protocol = htons(ETH_P_802_3);
+ skb->protocol = htons(ETH_P_FIP);
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
fip->send(fip, skb);
@@ -350,6 +352,8 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa)
kal->fip.fip_dl_len = htons((sizeof(kal->mac) +
ports * sizeof(*vn)) / FIP_BPW);
kal->fip.fip_flags = htons(FIP_FL_FPMA);
+ if (fip->spma)
+ kal->fip.fip_flags |= htons(FIP_FL_SPMA);
kal->mac.fd_desc.fip_dtype = FIP_DT_MAC;
kal->mac.fd_desc.fip_dlen = sizeof(kal->mac) / FIP_BPW;
@@ -365,7 +369,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa)
}
skb_put(skb, len);
- skb->protocol = htons(ETH_P_802_3);
+ skb->protocol = htons(ETH_P_FIP);
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
fip->send(fip, skb);
@@ -413,6 +417,8 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip,
cap->fip.fip_subcode = FIP_SC_REQ;
cap->fip.fip_dl_len = htons((dlen + sizeof(*mac)) / FIP_BPW);
cap->fip.fip_flags = htons(FIP_FL_FPMA);
+ if (fip->spma)
+ cap->fip.fip_flags |= htons(FIP_FL_SPMA);
cap->encaps.fd_desc.fip_dtype = dtype;
cap->encaps.fd_desc.fip_dlen = dlen / FIP_BPW;
@@ -421,10 +427,12 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip,
memset(mac, 0, sizeof(mac));
mac->fd_desc.fip_dtype = FIP_DT_MAC;
mac->fd_desc.fip_dlen = sizeof(*mac) / FIP_BPW;
- if (dtype != ELS_FLOGI)
+ if (dtype != FIP_DT_FLOGI)
memcpy(mac->fd_mac, fip->data_src_addr, ETH_ALEN);
+ else if (fip->spma)
+ memcpy(mac->fd_mac, fip->ctl_src_addr, ETH_ALEN);
- skb->protocol = htons(ETH_P_802_3);
+ skb->protocol = htons(ETH_P_FIP);
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
return 0;
@@ -447,14 +455,10 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
u16 old_xid;
u8 op;
- if (fip->state == FIP_ST_NON_FIP)
- return 0;
-
fh = (struct fc_frame_header *)skb->data;
op = *(u8 *)(fh + 1);
- switch (op) {
- case ELS_FLOGI:
+ if (op == ELS_FLOGI) {
old_xid = fip->flogi_oxid;
fip->flogi_oxid = ntohs(fh->fh_ox_id);
if (fip->state == FIP_ST_AUTO) {
@@ -466,6 +470,15 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
fip->map_dest = 1;
return 0;
}
+ if (fip->state == FIP_ST_NON_FIP)
+ fip->map_dest = 1;
+ }
+
+ if (fip->state == FIP_ST_NON_FIP)
+ return 0;
+
+ switch (op) {
+ case ELS_FLOGI:
op = FIP_DT_FLOGI;
break;
case ELS_FDISC:
diff --git a/drivers/scsi/fnic/Makefile b/drivers/scsi/fnic/Makefile
new file mode 100644
index 00000000000..37c3440bc17
--- /dev/null
+++ b/drivers/scsi/fnic/Makefile
@@ -0,0 +1,15 @@
+obj-$(CONFIG_FCOE_FNIC) += fnic.o
+
+fnic-y := \
+ fnic_attrs.o \
+ fnic_isr.o \
+ fnic_main.o \
+ fnic_res.o \
+ fnic_fcs.o \
+ fnic_scsi.o \
+ vnic_cq.o \
+ vnic_dev.o \
+ vnic_intr.o \
+ vnic_rq.o \
+ vnic_wq_copy.o \
+ vnic_wq.o
diff --git a/drivers/scsi/fnic/cq_desc.h b/drivers/scsi/fnic/cq_desc.h
new file mode 100644
index 00000000000..d1225cf6320
--- /dev/null
+++ b/drivers/scsi/fnic/cq_desc.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _CQ_DESC_H_
+#define _CQ_DESC_H_
+
+/*
+ * Completion queue descriptor types
+ */
+enum cq_desc_types {
+ CQ_DESC_TYPE_WQ_ENET = 0,
+ CQ_DESC_TYPE_DESC_COPY = 1,
+ CQ_DESC_TYPE_WQ_EXCH = 2,
+ CQ_DESC_TYPE_RQ_ENET = 3,
+ CQ_DESC_TYPE_RQ_FCP = 4,
+};
+
+/* Completion queue descriptor: 16B
+ *
+ * All completion queues have this basic layout. The
+ * type_specfic area is unique for each completion
+ * queue type.
+ */
+struct cq_desc {
+ __le16 completed_index;
+ __le16 q_number;
+ u8 type_specfic[11];
+ u8 type_color;
+};
+
+#define CQ_DESC_TYPE_BITS 4
+#define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1)
+#define CQ_DESC_COLOR_MASK 1
+#define CQ_DESC_COLOR_SHIFT 7
+#define CQ_DESC_Q_NUM_BITS 10
+#define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1)
+#define CQ_DESC_COMP_NDX_BITS 12
+#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
+
+static inline void cq_desc_dec(const struct cq_desc *desc_arg,
+ u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
+{
+ const struct cq_desc *desc = desc_arg;
+ const u8 type_color = desc->type_color;
+
+ *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
+
+ /*
+ * Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+
+ rmb();
+
+ *type = type_color & CQ_DESC_TYPE_MASK;
+ *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
+ *completed_index = le16_to_cpu(desc->completed_index) &
+ CQ_DESC_COMP_NDX_MASK;
+}
+
+#endif /* _CQ_DESC_H_ */
diff --git a/drivers/scsi/fnic/cq_enet_desc.h b/drivers/scsi/fnic/cq_enet_desc.h
new file mode 100644
index 00000000000..a9fa26f82dd
--- /dev/null
+++ b/drivers/scsi/fnic/cq_enet_desc.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _CQ_ENET_DESC_H_
+#define _CQ_ENET_DESC_H_
+
+#include "cq_desc.h"
+
+/* Ethernet completion queue descriptor: 16B */
+struct cq_enet_wq_desc {
+ __le16 completed_index;
+ __le16 q_number;
+ u8 reserved[11];
+ u8 type_color;
+};
+
+static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc,
+ u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
+{
+ cq_desc_dec((struct cq_desc *)desc, type,
+ color, q_number, completed_index);
+}
+
+/* Completion queue descriptor: Ethernet receive queue, 16B */
+struct cq_enet_rq_desc {
+ __le16 completed_index_flags;
+ __le16 q_number_rss_type_flags;
+ __le32 rss_hash;
+ __le16 bytes_written_flags;
+ __le16 vlan;
+ __le16 checksum_fcoe;
+ u8 flags;
+ u8 type_color;
+};
+
+#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12)
+#define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13)
+#define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14)
+#define CQ_ENET_RQ_DESC_FLAGS_SOP (0x1 << 15)
+
+#define CQ_ENET_RQ_DESC_RSS_TYPE_BITS 4
+#define CQ_ENET_RQ_DESC_RSS_TYPE_MASK \
+ ((1 << CQ_ENET_RQ_DESC_RSS_TYPE_BITS) - 1)
+#define CQ_ENET_RQ_DESC_RSS_TYPE_NONE 0
+#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv4 1
+#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4 2
+#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6 3
+#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6 4
+#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX 5
+#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX 6
+
+#define CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC (0x1 << 14)
+
+#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS 14
+#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK \
+ ((1 << CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS) - 1)
+#define CQ_ENET_RQ_DESC_FLAGS_TRUNCATED (0x1 << 14)
+#define CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED (0x1 << 15)
+
+#define CQ_ENET_RQ_DESC_FCOE_SOF_BITS 4
+#define CQ_ENET_RQ_DESC_FCOE_SOF_MASK \
+ ((1 << CQ_ENET_RQ_DESC_FCOE_SOF_BITS) - 1)
+#define CQ_ENET_RQ_DESC_FCOE_EOF_BITS 8
+#define CQ_ENET_RQ_DESC_FCOE_EOF_MASK \
+ ((1 << CQ_ENET_RQ_DESC_FCOE_EOF_BITS) - 1)
+#define CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT 8
+
+#define CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK (0x1 << 0)
+#define CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK (0x1 << 0)
+#define CQ_ENET_RQ_DESC_FLAGS_UDP (0x1 << 1)
+#define CQ_ENET_RQ_DESC_FCOE_ENC_ERROR (0x1 << 1)
+#define CQ_ENET_RQ_DESC_FLAGS_TCP (0x1 << 2)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK (0x1 << 3)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV6 (0x1 << 4)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV4 (0x1 << 5)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT (0x1 << 6)
+#define CQ_ENET_RQ_DESC_FLAGS_FCS_OK (0x1 << 7)
+
+static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
+ u8 *type, u8 *color, u16 *q_number, u16 *completed_index,
+ u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
+ u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error,
+ u8 *vlan_stripped, u16 *vlan, u16 *checksum, u8 *fcoe_sof,
+ u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof,
+ u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
+ u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
+{
+ u16 completed_index_flags = le16_to_cpu(desc->completed_index_flags);
+ u16 q_number_rss_type_flags =
+ le16_to_cpu(desc->q_number_rss_type_flags);
+ u16 bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
+
+ cq_desc_dec((struct cq_desc *)desc, type,
+ color, q_number, completed_index);
+
+ *ingress_port = (completed_index_flags &
+ CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
+ *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
+ 1 : 0;
+ *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
+ 1 : 0;
+ *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
+ 1 : 0;
+
+ *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
+ CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
+ *csum_not_calc = (q_number_rss_type_flags &
+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
+
+ *rss_hash = le32_to_cpu(desc->rss_hash);
+
+ *bytes_written = bytes_written_flags &
+ CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
+ *packet_error = (bytes_written_flags &
+ CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
+ *vlan_stripped = (bytes_written_flags &
+ CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
+
+ *vlan = le16_to_cpu(desc->vlan);
+
+ if (*fcoe) {
+ *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
+ CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
+ *fcoe_fc_crc_ok = (desc->flags &
+ CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
+ *fcoe_enc_error = (desc->flags &
+ CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
+ *fcoe_eof = (u8)((desc->checksum_fcoe >>
+ CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
+ CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
+ *checksum = 0;
+ } else {
+ *fcoe_sof = 0;
+ *fcoe_fc_crc_ok = 0;
+ *fcoe_enc_error = 0;
+ *fcoe_eof = 0;
+ *checksum = le16_to_cpu(desc->checksum_fcoe);
+ }
+
+ *tcp_udp_csum_ok =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
+ *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
+ *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
+ *ipv4_csum_ok =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
+ *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
+ *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
+ *ipv4_fragment =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
+ *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
+}
+
+#endif /* _CQ_ENET_DESC_H_ */
diff --git a/drivers/scsi/fnic/cq_exch_desc.h b/drivers/scsi/fnic/cq_exch_desc.h
new file mode 100644
index 00000000000..501660cfe22
--- /dev/null
+++ b/drivers/scsi/fnic/cq_exch_desc.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _CQ_EXCH_DESC_H_
+#define _CQ_EXCH_DESC_H_
+
+#include "cq_desc.h"
+
+/* Exchange completion queue descriptor: 16B */
+struct cq_exch_wq_desc {
+ u16 completed_index;
+ u16 q_number;
+ u16 exchange_id;
+ u8 tmpl;
+ u8 reserved0;
+ u32 reserved1;
+ u8 exch_status;
+ u8 reserved2[2];
+ u8 type_color;
+};
+
+#define CQ_EXCH_WQ_STATUS_BITS 2
+#define CQ_EXCH_WQ_STATUS_MASK ((1 << CQ_EXCH_WQ_STATUS_BITS) - 1)
+
+enum cq_exch_status_types {
+ CQ_EXCH_WQ_STATUS_TYPE_COMPLETE = 0,
+ CQ_EXCH_WQ_STATUS_TYPE_ABORT = 1,
+ CQ_EXCH_WQ_STATUS_TYPE_SGL_EOF = 2,
+ CQ_EXCH_WQ_STATUS_TYPE_TMPL_ERR = 3,
+};
+
+static inline void cq_exch_wq_desc_dec(struct cq_exch_wq_desc *desc_ptr,
+ u8 *type,
+ u8 *color,
+ u16 *q_number,
+ u16 *completed_index,
+ u8 *exch_status)
+{
+ cq_desc_dec((struct cq_desc *)desc_ptr, type,
+ color, q_number, completed_index);
+ *exch_status = desc_ptr->exch_status & CQ_EXCH_WQ_STATUS_MASK;
+}
+
+struct cq_fcp_rq_desc {
+ u16 completed_index_eop_sop_prt;
+ u16 q_number;
+ u16 exchange_id;
+ u16 tmpl;
+ u16 bytes_written;
+ u16 vlan;
+ u8 sof;
+ u8 eof;
+ u8 fcs_fer_fck;
+ u8 type_color;
+};
+
+#define CQ_FCP_RQ_DESC_FLAGS_SOP (1 << 15)
+#define CQ_FCP_RQ_DESC_FLAGS_EOP (1 << 14)
+#define CQ_FCP_RQ_DESC_FLAGS_PRT (1 << 12)
+#define CQ_FCP_RQ_DESC_TMPL_MASK 0x1f
+#define CQ_FCP_RQ_DESC_BYTES_WRITTEN_MASK 0x3fff
+#define CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT 14
+#define CQ_FCP_RQ_DESC_PACKET_ERR_MASK (1 << CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT)
+#define CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT 15
+#define CQ_FCP_RQ_DESC_VS_STRIPPED_MASK (1 << CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT)
+#define CQ_FCP_RQ_DESC_FC_CRC_OK_MASK 0x1
+#define CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT 1
+#define CQ_FCP_RQ_DESC_FCOE_ERR_MASK (1 << CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT)
+#define CQ_FCP_RQ_DESC_FCS_OK_SHIFT 7
+#define CQ_FCP_RQ_DESC_FCS_OK_MASK (1 << CQ_FCP_RQ_DESC_FCS_OK_SHIFT)
+
+static inline void cq_fcp_rq_desc_dec(struct cq_fcp_rq_desc *desc_ptr,
+ u8 *type,
+ u8 *color,
+ u16 *q_number,
+ u16 *completed_index,
+ u8 *eop,
+ u8 *sop,
+ u8 *fck,
+ u16 *exchange_id,
+ u16 *tmpl,
+ u32 *bytes_written,
+ u8 *sof,
+ u8 *eof,
+ u8 *ingress_port,
+ u8 *packet_err,
+ u8 *fcoe_err,
+ u8 *fcs_ok,
+ u8 *vlan_stripped,
+ u16 *vlan)
+{
+ cq_desc_dec((struct cq_desc *)desc_ptr, type,
+ color, q_number, completed_index);
+ *eop = (desc_ptr->completed_index_eop_sop_prt &
+ CQ_FCP_RQ_DESC_FLAGS_EOP) ? 1 : 0;
+ *sop = (desc_ptr->completed_index_eop_sop_prt &
+ CQ_FCP_RQ_DESC_FLAGS_SOP) ? 1 : 0;
+ *ingress_port =
+ (desc_ptr->completed_index_eop_sop_prt &
+ CQ_FCP_RQ_DESC_FLAGS_PRT) ? 1 : 0;
+ *exchange_id = desc_ptr->exchange_id;
+ *tmpl = desc_ptr->tmpl & CQ_FCP_RQ_DESC_TMPL_MASK;
+ *bytes_written =
+ desc_ptr->bytes_written & CQ_FCP_RQ_DESC_BYTES_WRITTEN_MASK;
+ *packet_err =
+ (desc_ptr->bytes_written & CQ_FCP_RQ_DESC_PACKET_ERR_MASK) >>
+ CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT;
+ *vlan_stripped =
+ (desc_ptr->bytes_written & CQ_FCP_RQ_DESC_VS_STRIPPED_MASK) >>
+ CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT;
+ *vlan = desc_ptr->vlan;
+ *sof = desc_ptr->sof;
+ *fck = desc_ptr->fcs_fer_fck & CQ_FCP_RQ_DESC_FC_CRC_OK_MASK;
+ *fcoe_err = (desc_ptr->fcs_fer_fck & CQ_FCP_RQ_DESC_FCOE_ERR_MASK) >>
+ CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT;
+ *eof = desc_ptr->eof;
+ *fcs_ok =
+ (desc_ptr->fcs_fer_fck & CQ_FCP_RQ_DESC_FCS_OK_MASK) >>
+ CQ_FCP_RQ_DESC_FCS_OK_SHIFT;
+}
+
+struct cq_sgl_desc {
+ u16 exchange_id;
+ u16 q_number;
+ u32 active_burst_offset;
+ u32 tot_data_bytes;
+ u16 tmpl;
+ u8 sgl_err;
+ u8 type_color;
+};
+
+enum cq_sgl_err_types {
+ CQ_SGL_ERR_NO_ERROR = 0,
+ CQ_SGL_ERR_OVERFLOW, /* data ran beyond end of SGL */
+ CQ_SGL_ERR_SGL_LCL_ADDR_ERR, /* sgl access to local vnic addr illegal*/
+ CQ_SGL_ERR_ADDR_RSP_ERR, /* sgl address error */
+ CQ_SGL_ERR_DATA_RSP_ERR, /* sgl data rsp error */
+ CQ_SGL_ERR_CNT_ZERO_ERR, /* SGL count is 0 */
+ CQ_SGL_ERR_CNT_MAX_ERR, /* SGL count is larger than supported */
+ CQ_SGL_ERR_ORDER_ERR, /* frames recv on both ports, order err */
+ CQ_SGL_ERR_DATA_LCL_ADDR_ERR,/* sgl data buf to local vnic addr ill */
+ CQ_SGL_ERR_HOST_CQ_ERR, /* host cq entry to local vnic addr ill */
+};
+
+#define CQ_SGL_SGL_ERR_MASK 0x1f
+#define CQ_SGL_TMPL_MASK 0x1f
+
+static inline void cq_sgl_desc_dec(struct cq_sgl_desc *desc_ptr,
+ u8 *type,
+ u8 *color,
+ u16 *q_number,
+ u16 *exchange_id,
+ u32 *active_burst_offset,
+ u32 *tot_data_bytes,
+ u16 *tmpl,
+ u8 *sgl_err)
+{
+ /* Cheat a little by assuming exchange_id is the same as completed
+ index */
+ cq_desc_dec((struct cq_desc *)desc_ptr, type, color, q_number,
+ exchange_id);
+ *active_burst_offset = desc_ptr->active_burst_offset;
+ *tot_data_bytes = desc_ptr->tot_data_bytes;
+ *tmpl = desc_ptr->tmpl & CQ_SGL_TMPL_MASK;
+ *sgl_err = desc_ptr->sgl_err & CQ_SGL_SGL_ERR_MASK;
+}
+
+#endif /* _CQ_EXCH_DESC_H_ */
diff --git a/drivers/scsi/fnic/fcpio.h b/drivers/scsi/fnic/fcpio.h
new file mode 100644
index 00000000000..12d770d885c
--- /dev/null
+++ b/drivers/scsi/fnic/fcpio.h
@@ -0,0 +1,780 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _FCPIO_H_
+#define _FCPIO_H_
+
+#include <linux/if_ether.h>
+
+/*
+ * This header file includes all of the data structures used for
+ * communication by the host driver to the fcp firmware.
+ */
+
+/*
+ * Exchange and sequence id space allocated to the host driver
+ */
+#define FCPIO_HOST_EXCH_RANGE_START 0x1000
+#define FCPIO_HOST_EXCH_RANGE_END 0x1fff
+#define FCPIO_HOST_SEQ_ID_RANGE_START 0x80
+#define FCPIO_HOST_SEQ_ID_RANGE_END 0xff
+
+/*
+ * Command entry type
+ */
+enum fcpio_type {
+ /*
+ * Initiator request types
+ */
+ FCPIO_ICMND_16 = 0x1,
+ FCPIO_ICMND_32,
+ FCPIO_ICMND_CMPL,
+ FCPIO_ITMF,
+ FCPIO_ITMF_CMPL,
+
+ /*
+ * Target request types
+ */
+ FCPIO_TCMND_16 = 0x11,
+ FCPIO_TCMND_32,
+ FCPIO_TDATA,
+ FCPIO_TXRDY,
+ FCPIO_TRSP,
+ FCPIO_TDRSP_CMPL,
+ FCPIO_TTMF,
+ FCPIO_TTMF_ACK,
+ FCPIO_TABORT,
+ FCPIO_TABORT_CMPL,
+
+ /*
+ * Misc request types
+ */
+ FCPIO_ACK = 0x20,
+ FCPIO_RESET,
+ FCPIO_RESET_CMPL,
+ FCPIO_FLOGI_REG,
+ FCPIO_FLOGI_REG_CMPL,
+ FCPIO_ECHO,
+ FCPIO_ECHO_CMPL,
+ FCPIO_LUNMAP_CHNG,
+ FCPIO_LUNMAP_REQ,
+ FCPIO_LUNMAP_REQ_CMPL,
+ FCPIO_FLOGI_FIP_REG,
+ FCPIO_FLOGI_FIP_REG_CMPL,
+};
+
+/*
+ * Header status codes from the firmware
+ */
+enum fcpio_status {
+ FCPIO_SUCCESS = 0, /* request was successful */
+
+ /*
+ * If a request to the firmware is rejected, the original request
+ * header will be returned with the status set to one of the following:
+ */
+ FCPIO_INVALID_HEADER, /* header contains invalid data */
+ FCPIO_OUT_OF_RESOURCE, /* out of resources to complete request */
+ FCPIO_INVALID_PARAM, /* some parameter in request is invalid */
+ FCPIO_REQ_NOT_SUPPORTED, /* request type is not supported */
+ FCPIO_IO_NOT_FOUND, /* requested I/O was not found */
+
+ /*
+ * Once a request is processed, the firmware will usually return
+ * a cmpl message type. In cases where errors occurred,
+ * the header status field would be filled in with one of the following:
+ */
+ FCPIO_ABORTED = 0x41, /* request was aborted */
+ FCPIO_TIMEOUT, /* request was timed out */
+ FCPIO_SGL_INVALID, /* request was aborted due to sgl error */
+ FCPIO_MSS_INVALID, /* request was aborted due to mss error */
+ FCPIO_DATA_CNT_MISMATCH, /* recv/sent more/less data than exp. */
+ FCPIO_FW_ERR, /* request was terminated due to fw error */
+ FCPIO_ITMF_REJECTED, /* itmf req was rejected by remote node */
+ FCPIO_ITMF_FAILED, /* itmf req was failed by remote node */
+ FCPIO_ITMF_INCORRECT_LUN, /* itmf req targeted incorrect LUN */
+ FCPIO_CMND_REJECTED, /* request was invalid and rejected */
+ FCPIO_NO_PATH_AVAIL, /* no paths to the lun was available */
+ FCPIO_PATH_FAILED, /* i/o sent to current path failed */
+ FCPIO_LUNMAP_CHNG_PEND, /* i/o rejected due to lunmap change */
+};
+
+/*
+ * The header command tag. All host requests will use the "tag" field
+ * to mark commands with a unique tag. When the firmware responds to
+ * a host request, it will copy the tag field into the response.
+ *
+ * The only firmware requests that will use the rx_id/ox_id fields instead
+ * of the tag field will be the target command and target task management
+ * requests. These two requests do not have corresponding host requests
+ * since they come directly from the FC initiator on the network.
+ */
+struct fcpio_tag {
+ union {
+ u32 req_id;
+ struct {
+ u16 rx_id;
+ u16 ox_id;
+ } ex_id;
+ } u;
+};
+
+static inline void
+fcpio_tag_id_enc(struct fcpio_tag *tag, u32 id)
+{
+ tag->u.req_id = id;
+}
+
+static inline void
+fcpio_tag_id_dec(struct fcpio_tag *tag, u32 *id)
+{
+ *id = tag->u.req_id;
+}
+
+static inline void
+fcpio_tag_exid_enc(struct fcpio_tag *tag, u16 ox_id, u16 rx_id)
+{
+ tag->u.ex_id.rx_id = rx_id;
+ tag->u.ex_id.ox_id = ox_id;
+}
+
+static inline void
+fcpio_tag_exid_dec(struct fcpio_tag *tag, u16 *ox_id, u16 *rx_id)
+{
+ *rx_id = tag->u.ex_id.rx_id;
+ *ox_id = tag->u.ex_id.ox_id;
+}
+
+/*
+ * The header for an fcpio request, whether from the firmware or from the
+ * host driver
+ */
+struct fcpio_header {
+ u8 type; /* enum fcpio_type */
+ u8 status; /* header status entry */
+ u16 _resvd; /* reserved */
+ struct fcpio_tag tag; /* header tag */
+};
+
+static inline void
+fcpio_header_enc(struct fcpio_header *hdr,
+ u8 type, u8 status,
+ struct fcpio_tag tag)
+{
+ hdr->type = type;
+ hdr->status = status;
+ hdr->_resvd = 0;
+ hdr->tag = tag;
+}
+
+static inline void
+fcpio_header_dec(struct fcpio_header *hdr,
+ u8 *type, u8 *status,
+ struct fcpio_tag *tag)
+{
+ *type = hdr->type;
+ *status = hdr->status;
+ *tag = hdr->tag;
+}
+
+#define CDB_16 16
+#define CDB_32 32
+#define LUN_ADDRESS 8
+
+/*
+ * fcpio_icmnd_16: host -> firmware request
+ *
+ * used for sending out an initiator SCSI 16-byte command
+ */
+struct fcpio_icmnd_16 {
+ u32 lunmap_id; /* index into lunmap table */
+ u8 special_req_flags; /* special exchange request flags */
+ u8 _resvd0[3]; /* reserved */
+ u32 sgl_cnt; /* scatter-gather list count */
+ u32 sense_len; /* sense buffer length */
+ u64 sgl_addr; /* scatter-gather list addr */
+ u64 sense_addr; /* sense buffer address */
+ u8 crn; /* SCSI Command Reference No. */
+ u8 pri_ta; /* SCSI Priority and Task attribute */
+ u8 _resvd1; /* reserved: should be 0 */
+ u8 flags; /* command flags */
+ u8 scsi_cdb[CDB_16]; /* SCSI Cmnd Descriptor Block */
+ u32 data_len; /* length of data expected */
+ u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
+ u8 _resvd2; /* reserved */
+ u8 d_id[3]; /* FC vNIC only: Target D_ID */
+ u16 mss; /* FC vNIC only: max burst */
+ u16 _resvd3; /* reserved */
+ u32 r_a_tov; /* FC vNIC only: Res. Alloc Timeout */
+ u32 e_d_tov; /* FC vNIC only: Err Detect Timeout */
+};
+
+/*
+ * Special request flags
+ */
+#define FCPIO_ICMND_SRFLAG_RETRY 0x01 /* Enable Retry handling on exchange */
+
+/*
+ * Priority/Task Attribute settings
+ */
+#define FCPIO_ICMND_PTA_SIMPLE 0 /* simple task attribute */
+#define FCPIO_ICMND_PTA_HEADQ 1 /* head of queue task attribute */
+#define FCPIO_ICMND_PTA_ORDERED 2 /* ordered task attribute */
+#define FCPIO_ICMND_PTA_ACA 4 /* auto contingent allegiance */
+#define FCPIO_ICMND_PRI_SHIFT 3 /* priority field starts in bit 3 */
+
+/*
+ * Command flags
+ */
+#define FCPIO_ICMND_RDDATA 0x02 /* read data */
+#define FCPIO_ICMND_WRDATA 0x01 /* write data */
+
+/*
+ * fcpio_icmnd_32: host -> firmware request
+ *
+ * used for sending out an initiator SCSI 32-byte command
+ */
+struct fcpio_icmnd_32 {
+ u32 lunmap_id; /* index into lunmap table */
+ u8 special_req_flags; /* special exchange request flags */
+ u8 _resvd0[3]; /* reserved */
+ u32 sgl_cnt; /* scatter-gather list count */
+ u32 sense_len; /* sense buffer length */
+ u64 sgl_addr; /* scatter-gather list addr */
+ u64 sense_addr; /* sense buffer address */
+ u8 crn; /* SCSI Command Reference No. */
+ u8 pri_ta; /* SCSI Priority and Task attribute */
+ u8 _resvd1; /* reserved: should be 0 */
+ u8 flags; /* command flags */
+ u8 scsi_cdb[CDB_32]; /* SCSI Cmnd Descriptor Block */
+ u32 data_len; /* length of data expected */
+ u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
+ u8 _resvd2; /* reserved */
+ u8 d_id[3]; /* FC vNIC only: Target D_ID */
+ u16 mss; /* FC vNIC only: max burst */
+ u16 _resvd3; /* reserved */
+ u32 r_a_tov; /* FC vNIC only: Res. Alloc Timeout */
+ u32 e_d_tov; /* FC vNIC only: Error Detect Timeout */
+};
+
+/*
+ * fcpio_itmf: host -> firmware request
+ *
+ * used for requesting the firmware to abort a request and/or send out
+ * a task management function
+ *
+ * The t_tag field is only needed when the request type is ABT_TASK.
+ */
+struct fcpio_itmf {
+ u32 lunmap_id; /* index into lunmap table */
+ u32 tm_req; /* SCSI Task Management request */
+ u32 t_tag; /* header tag of fcpio to be aborted */
+ u32 _resvd; /* _reserved */
+ u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
+ u8 _resvd1; /* reserved */
+ u8 d_id[3]; /* FC vNIC only: Target D_ID */
+ u32 r_a_tov; /* FC vNIC only: R_A_TOV in msec */
+ u32 e_d_tov; /* FC vNIC only: E_D_TOV in msec */
+};
+
+/*
+ * Task Management request
+ */
+enum fcpio_itmf_tm_req_type {
+ FCPIO_ITMF_ABT_TASK_TERM = 0x01, /* abort task and terminate */
+ FCPIO_ITMF_ABT_TASK, /* abort task and issue abts */
+ FCPIO_ITMF_ABT_TASK_SET, /* abort task set */
+ FCPIO_ITMF_CLR_TASK_SET, /* clear task set */
+ FCPIO_ITMF_LUN_RESET, /* logical unit reset task mgmt */
+ FCPIO_ITMF_CLR_ACA, /* Clear ACA condition */
+};
+
+/*
+ * fcpio_tdata: host -> firmware request
+ *
+ * used for requesting the firmware to send out a read data transfer for a
+ * target command
+ */
+struct fcpio_tdata {
+ u16 rx_id; /* FC rx_id of target command */
+ u16 flags; /* command flags */
+ u32 rel_offset; /* data sequence relative offset */
+ u32 sgl_cnt; /* scatter-gather list count */
+ u32 data_len; /* length of data expected to send */
+ u64 sgl_addr; /* scatter-gather list address */
+};
+
+/*
+ * Command flags
+ */
+#define FCPIO_TDATA_SCSI_RSP 0x01 /* send a scsi resp. after last frame */
+
+/*
+ * fcpio_txrdy: host -> firmware request
+ *
+ * used for requesting the firmware to send out a write data transfer for a
+ * target command
+ */
+struct fcpio_txrdy {
+ u16 rx_id; /* FC rx_id of target command */
+ u16 _resvd0; /* reserved */
+ u32 rel_offset; /* data sequence relative offset */
+ u32 sgl_cnt; /* scatter-gather list count */
+ u32 data_len; /* length of data expected to send */
+ u64 sgl_addr; /* scatter-gather list address */
+};
+
+/*
+ * fcpio_trsp: host -> firmware request
+ *
+ * used for requesting the firmware to send out a response for a target
+ * command
+ */
+struct fcpio_trsp {
+ u16 rx_id; /* FC rx_id of target command */
+ u16 _resvd0; /* reserved */
+ u32 sense_len; /* sense data buffer length */
+ u64 sense_addr; /* sense data buffer address */
+ u16 _resvd1; /* reserved */
+ u8 flags; /* response request flags */
+ u8 scsi_status; /* SCSI status */
+ u32 residual; /* SCSI data residual value of I/O */
+};
+
+/*
+ * resposnse request flags
+ */
+#define FCPIO_TRSP_RESID_UNDER 0x08 /* residual is valid and is underflow */
+#define FCPIO_TRSP_RESID_OVER 0x04 /* residual is valid and is overflow */
+
+/*
+ * fcpio_ttmf_ack: host -> firmware response
+ *
+ * used by the host to indicate to the firmware it has received and processed
+ * the target tmf request
+ */
+struct fcpio_ttmf_ack {
+ u16 rx_id; /* FC rx_id of target command */
+ u16 _resvd0; /* reserved */
+ u32 tmf_status; /* SCSI task management status */
+};
+
+/*
+ * fcpio_tabort: host -> firmware request
+ *
+ * used by the host to request the firmware to abort a target request that was
+ * received by the firmware
+ */
+struct fcpio_tabort {
+ u16 rx_id; /* rx_id of the target request */
+};
+
+/*
+ * fcpio_reset: host -> firmware request
+ *
+ * used by the host to signal a reset of the driver to the firmware
+ * and to request firmware to clean up all outstanding I/O
+ */
+struct fcpio_reset {
+ u32 _resvd;
+};
+
+enum fcpio_flogi_reg_format_type {
+ FCPIO_FLOGI_REG_DEF_DEST = 0, /* Use the oui | s_id mac format */
+ FCPIO_FLOGI_REG_GW_DEST, /* Use the fixed gateway mac */
+};
+
+/*
+ * fcpio_flogi_reg: host -> firmware request
+ *
+ * fc vnic only
+ * used by the host to notify the firmware of the lif's s_id
+ * and destination mac address format
+ */
+struct fcpio_flogi_reg {
+ u8 format;
+ u8 s_id[3]; /* FC vNIC only: Source S_ID */
+ u8 gateway_mac[ETH_ALEN]; /* Destination gateway mac */
+ u16 _resvd;
+ u32 r_a_tov; /* R_A_TOV in msec */
+ u32 e_d_tov; /* E_D_TOV in msec */
+};
+
+/*
+ * fcpio_echo: host -> firmware request
+ *
+ * sends a heartbeat echo request to the firmware
+ */
+struct fcpio_echo {
+ u32 _resvd;
+};
+
+/*
+ * fcpio_lunmap_req: host -> firmware request
+ *
+ * scsi vnic only
+ * sends a request to retrieve the lunmap table for scsi vnics
+ */
+struct fcpio_lunmap_req {
+ u64 addr; /* address of the buffer */
+ u32 len; /* len of the buffer */
+};
+
+/*
+ * fcpio_flogi_fip_reg: host -> firmware request
+ *
+ * fc vnic only
+ * used by the host to notify the firmware of the lif's s_id
+ * and destination mac address format
+ */
+struct fcpio_flogi_fip_reg {
+ u8 _resvd0;
+ u8 s_id[3]; /* FC vNIC only: Source S_ID */
+ u8 fcf_mac[ETH_ALEN]; /* FCF Target destination mac */
+ u16 _resvd1;
+ u32 r_a_tov; /* R_A_TOV in msec */
+ u32 e_d_tov; /* E_D_TOV in msec */
+ u8 ha_mac[ETH_ALEN]; /* Host adapter source mac */
+ u16 _resvd2;
+};
+
+/*
+ * Basic structure for all fcpio structures that are sent from the host to the
+ * firmware. They are 128 bytes per structure.
+ */
+#define FCPIO_HOST_REQ_LEN 128 /* expected length of host requests */
+
+struct fcpio_host_req {
+ struct fcpio_header hdr;
+
+ union {
+ /*
+ * Defines space needed for request
+ */
+ u8 buf[FCPIO_HOST_REQ_LEN - sizeof(struct fcpio_header)];
+
+ /*
+ * Initiator host requests
+ */
+ struct fcpio_icmnd_16 icmnd_16;
+ struct fcpio_icmnd_32 icmnd_32;
+ struct fcpio_itmf itmf;
+
+ /*
+ * Target host requests
+ */
+ struct fcpio_tdata tdata;
+ struct fcpio_txrdy txrdy;
+ struct fcpio_trsp trsp;
+ struct fcpio_ttmf_ack ttmf_ack;
+ struct fcpio_tabort tabort;
+
+ /*
+ * Misc requests
+ */
+ struct fcpio_reset reset;
+ struct fcpio_flogi_reg flogi_reg;
+ struct fcpio_echo echo;
+ struct fcpio_lunmap_req lunmap_req;
+ struct fcpio_flogi_fip_reg flogi_fip_reg;
+ } u;
+};
+
+/*
+ * fcpio_icmnd_cmpl: firmware -> host response
+ *
+ * used for sending the host a response to an initiator command
+ */
+struct fcpio_icmnd_cmpl {
+ u8 _resvd0[6]; /* reserved */
+ u8 flags; /* response flags */
+ u8 scsi_status; /* SCSI status */
+ u32 residual; /* SCSI data residual length */
+ u32 sense_len; /* SCSI sense length */
+};
+
+/*
+ * response flags
+ */
+#define FCPIO_ICMND_CMPL_RESID_UNDER 0x08 /* resid under and valid */
+#define FCPIO_ICMND_CMPL_RESID_OVER 0x04 /* resid over and valid */
+
+/*
+ * fcpio_itmf_cmpl: firmware -> host response
+ *
+ * used for sending the host a response for a itmf request
+ */
+struct fcpio_itmf_cmpl {
+ u32 _resvd; /* reserved */
+};
+
+/*
+ * fcpio_tcmnd_16: firmware -> host request
+ *
+ * used by the firmware to notify the host of an incoming target SCSI 16-Byte
+ * request
+ */
+struct fcpio_tcmnd_16 {
+ u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
+ u8 crn; /* SCSI Command Reference No. */
+ u8 pri_ta; /* SCSI Priority and Task attribute */
+ u8 _resvd2; /* reserved: should be 0 */
+ u8 flags; /* command flags */
+ u8 scsi_cdb[CDB_16]; /* SCSI Cmnd Descriptor Block */
+ u32 data_len; /* length of data expected */
+ u8 _resvd1; /* reserved */
+ u8 s_id[3]; /* FC vNIC only: Source S_ID */
+};
+
+/*
+ * Priority/Task Attribute settings
+ */
+#define FCPIO_TCMND_PTA_SIMPLE 0 /* simple task attribute */
+#define FCPIO_TCMND_PTA_HEADQ 1 /* head of queue task attribute */
+#define FCPIO_TCMND_PTA_ORDERED 2 /* ordered task attribute */
+#define FCPIO_TCMND_PTA_ACA 4 /* auto contingent allegiance */
+#define FCPIO_TCMND_PRI_SHIFT 3 /* priority field starts in bit 3 */
+
+/*
+ * Command flags
+ */
+#define FCPIO_TCMND_RDDATA 0x02 /* read data */
+#define FCPIO_TCMND_WRDATA 0x01 /* write data */
+
+/*
+ * fcpio_tcmnd_32: firmware -> host request
+ *
+ * used by the firmware to notify the host of an incoming target SCSI 32-Byte
+ * request
+ */
+struct fcpio_tcmnd_32 {
+ u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
+ u8 crn; /* SCSI Command Reference No. */
+ u8 pri_ta; /* SCSI Priority and Task attribute */
+ u8 _resvd2; /* reserved: should be 0 */
+ u8 flags; /* command flags */
+ u8 scsi_cdb[CDB_32]; /* SCSI Cmnd Descriptor Block */
+ u32 data_len; /* length of data expected */
+ u8 _resvd0; /* reserved */
+ u8 s_id[3]; /* FC vNIC only: Source S_ID */
+};
+
+/*
+ * fcpio_tdrsp_cmpl: firmware -> host response
+ *
+ * used by the firmware to notify the host of a response to a host target
+ * command
+ */
+struct fcpio_tdrsp_cmpl {
+ u16 rx_id; /* rx_id of the target request */
+ u16 _resvd0; /* reserved */
+};
+
+/*
+ * fcpio_ttmf: firmware -> host request
+ *
+ * used by the firmware to notify the host of an incoming task management
+ * function request
+ */
+struct fcpio_ttmf {
+ u8 _resvd0; /* reserved */
+ u8 s_id[3]; /* FC vNIC only: Source S_ID */
+ u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
+ u8 crn; /* SCSI Command Reference No. */
+ u8 _resvd2[3]; /* reserved */
+ u32 tmf_type; /* task management request type */
+};
+
+/*
+ * Task Management request
+ */
+#define FCPIO_TTMF_CLR_ACA 0x40 /* Clear ACA condition */
+#define FCPIO_TTMF_LUN_RESET 0x10 /* logical unit reset task mgmt */
+#define FCPIO_TTMF_CLR_TASK_SET 0x04 /* clear task set */
+#define FCPIO_TTMF_ABT_TASK_SET 0x02 /* abort task set */
+#define FCPIO_TTMF_ABT_TASK 0x01 /* abort task */
+
+/*
+ * fcpio_tabort_cmpl: firmware -> host response
+ *
+ * used by the firmware to respond to a host's tabort request
+ */
+struct fcpio_tabort_cmpl {
+ u16 rx_id; /* rx_id of the target request */
+ u16 _resvd0; /* reserved */
+};
+
+/*
+ * fcpio_ack: firmware -> host response
+ *
+ * used by firmware to notify the host of the last work request received
+ */
+struct fcpio_ack {
+ u16 request_out; /* last host entry received */
+ u16 _resvd;
+};
+
+/*
+ * fcpio_reset_cmpl: firmware -> host response
+ *
+ * use by firmware to respond to the host's reset request
+ */
+struct fcpio_reset_cmpl {
+ u16 vnic_id;
+};
+
+/*
+ * fcpio_flogi_reg_cmpl: firmware -> host response
+ *
+ * fc vnic only
+ * response to the fcpio_flogi_reg request
+ */
+struct fcpio_flogi_reg_cmpl {
+ u32 _resvd;
+};
+
+/*
+ * fcpio_echo_cmpl: firmware -> host response
+ *
+ * response to the fcpio_echo request
+ */
+struct fcpio_echo_cmpl {
+ u32 _resvd;
+};
+
+/*
+ * fcpio_lunmap_chng: firmware -> host notification
+ *
+ * scsi vnic only
+ * notifies the host that the lunmap tables have changed
+ */
+struct fcpio_lunmap_chng {
+ u32 _resvd;
+};
+
+/*
+ * fcpio_lunmap_req_cmpl: firmware -> host response
+ *
+ * scsi vnic only
+ * response for lunmap table request from the host
+ */
+struct fcpio_lunmap_req_cmpl {
+ u32 _resvd;
+};
+
+/*
+ * Basic structure for all fcpio structures that are sent from the firmware to
+ * the host. They are 64 bytes per structure.
+ */
+#define FCPIO_FW_REQ_LEN 64 /* expected length of fw requests */
+struct fcpio_fw_req {
+ struct fcpio_header hdr;
+
+ union {
+ /*
+ * Defines space needed for request
+ */
+ u8 buf[FCPIO_FW_REQ_LEN - sizeof(struct fcpio_header)];
+
+ /*
+ * Initiator firmware responses
+ */
+ struct fcpio_icmnd_cmpl icmnd_cmpl;
+ struct fcpio_itmf_cmpl itmf_cmpl;
+
+ /*
+ * Target firmware new requests
+ */
+ struct fcpio_tcmnd_16 tcmnd_16;
+ struct fcpio_tcmnd_32 tcmnd_32;
+
+ /*
+ * Target firmware responses
+ */
+ struct fcpio_tdrsp_cmpl tdrsp_cmpl;
+ struct fcpio_ttmf ttmf;
+ struct fcpio_tabort_cmpl tabort_cmpl;
+
+ /*
+ * Firmware response to work received
+ */
+ struct fcpio_ack ack;
+
+ /*
+ * Misc requests
+ */
+ struct fcpio_reset_cmpl reset_cmpl;
+ struct fcpio_flogi_reg_cmpl flogi_reg_cmpl;
+ struct fcpio_echo_cmpl echo_cmpl;
+ struct fcpio_lunmap_chng lunmap_chng;
+ struct fcpio_lunmap_req_cmpl lunmap_req_cmpl;
+ } u;
+};
+
+/*
+ * Access routines to encode and decode the color bit, which is the most
+ * significant bit of the MSB of the structure
+ */
+static inline void fcpio_color_enc(struct fcpio_fw_req *fw_req, u8 color)
+{
+ u8 *c = ((u8 *) fw_req) + sizeof(struct fcpio_fw_req) - 1;
+
+ if (color)
+ *c |= 0x80;
+ else
+ *c &= ~0x80;
+}
+
+static inline void fcpio_color_dec(struct fcpio_fw_req *fw_req, u8 *color)
+{
+ u8 *c = ((u8 *) fw_req) + sizeof(struct fcpio_fw_req) - 1;
+
+ *color = *c >> 7;
+
+ /*
+ * Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+
+ rmb();
+
+}
+
+/*
+ * Lunmap table entry for scsi vnics
+ */
+#define FCPIO_LUNMAP_TABLE_SIZE 256
+#define FCPIO_FLAGS_LUNMAP_VALID 0x80
+#define FCPIO_FLAGS_BOOT 0x01
+struct fcpio_lunmap_entry {
+ u8 bus;
+ u8 target;
+ u8 lun;
+ u8 path_cnt;
+ u16 flags;
+ u16 update_cnt;
+};
+
+struct fcpio_lunmap_tbl {
+ u32 update_cnt;
+ struct fcpio_lunmap_entry lunmaps[FCPIO_LUNMAP_TABLE_SIZE];
+};
+
+#endif /* _FCPIO_H_ */
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
new file mode 100644
index 00000000000..e4c0a3d7d87
--- /dev/null
+++ b/drivers/scsi/fnic/fnic.h
@@ -0,0 +1,265 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _FNIC_H_
+#define _FNIC_H_
+
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <scsi/libfc.h>
+#include "fnic_io.h"
+#include "fnic_res.h"
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+#include "vnic_cq.h"
+#include "vnic_wq_copy.h"
+#include "vnic_intr.h"
+#include "vnic_stats.h"
+#include "vnic_scsi.h"
+
+#define DRV_NAME "fnic"
+#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
+#define DRV_VERSION "1.0.0.1121"
+#define PFX DRV_NAME ": "
+#define DFX DRV_NAME "%d: "
+
+#define DESC_CLEAN_LOW_WATERMARK 8
+#define FNIC_MAX_IO_REQ 2048 /* scsi_cmnd tag map entries */
+#define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */
+#define FNIC_DFLT_QUEUE_DEPTH 32
+#define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */
+
+/*
+ * Tag bits used for special requests.
+ */
+#define BIT(nr) (1UL << (nr))
+#define FNIC_TAG_ABORT BIT(30) /* tag bit indicating abort */
+#define FNIC_TAG_DEV_RST BIT(29) /* indicates device reset */
+#define FNIC_TAG_MASK (BIT(24) - 1) /* mask for lookup */
+#define FNIC_NO_TAG -1
+
+/*
+ * Usage of the scsi_cmnd scratchpad.
+ * These fields are locked by the hashed io_req_lock.
+ */
+#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
+#define CMD_STATE(Cmnd) ((Cmnd)->SCp.phase)
+#define CMD_ABTS_STATUS(Cmnd) ((Cmnd)->SCp.Message)
+#define CMD_LR_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
+#define CMD_TAG(Cmnd) ((Cmnd)->SCp.sent_command)
+
+#define FCPIO_INVALID_CODE 0x100 /* hdr_status value unused by firmware */
+
+#define FNIC_LUN_RESET_TIMEOUT 10000 /* mSec */
+#define FNIC_HOST_RESET_TIMEOUT 10000 /* mSec */
+#define FNIC_RMDEVICE_TIMEOUT 1000 /* mSec */
+#define FNIC_HOST_RESET_SETTLE_TIME 30 /* Sec */
+
+#define FNIC_MAX_FCP_TARGET 256
+
+extern unsigned int fnic_log_level;
+
+#define FNIC_MAIN_LOGGING 0x01
+#define FNIC_FCS_LOGGING 0x02
+#define FNIC_SCSI_LOGGING 0x04
+#define FNIC_ISR_LOGGING 0x08
+
+#define FNIC_CHECK_LOGGING(LEVEL, CMD) \
+do { \
+ if (unlikely(fnic_log_level & LEVEL)) \
+ do { \
+ CMD; \
+ } while (0); \
+} while (0)
+
+#define FNIC_MAIN_DBG(kern_level, host, fmt, args...) \
+ FNIC_CHECK_LOGGING(FNIC_MAIN_LOGGING, \
+ shost_printk(kern_level, host, fmt, ##args);)
+
+#define FNIC_FCS_DBG(kern_level, host, fmt, args...) \
+ FNIC_CHECK_LOGGING(FNIC_FCS_LOGGING, \
+ shost_printk(kern_level, host, fmt, ##args);)
+
+#define FNIC_SCSI_DBG(kern_level, host, fmt, args...) \
+ FNIC_CHECK_LOGGING(FNIC_SCSI_LOGGING, \
+ shost_printk(kern_level, host, fmt, ##args);)
+
+#define FNIC_ISR_DBG(kern_level, host, fmt, args...) \
+ FNIC_CHECK_LOGGING(FNIC_ISR_LOGGING, \
+ shost_printk(kern_level, host, fmt, ##args);)
+
+extern const char *fnic_state_str[];
+
+enum fnic_intx_intr_index {
+ FNIC_INTX_WQ_RQ_COPYWQ,
+ FNIC_INTX_ERR,
+ FNIC_INTX_NOTIFY,
+ FNIC_INTX_INTR_MAX,
+};
+
+enum fnic_msix_intr_index {
+ FNIC_MSIX_RQ,
+ FNIC_MSIX_WQ,
+ FNIC_MSIX_WQ_COPY,
+ FNIC_MSIX_ERR_NOTIFY,
+ FNIC_MSIX_INTR_MAX,
+};
+
+struct fnic_msix_entry {
+ int requested;
+ char devname[IFNAMSIZ];
+ irqreturn_t (*isr)(int, void *);
+ void *devid;
+};
+
+enum fnic_state {
+ FNIC_IN_FC_MODE = 0,
+ FNIC_IN_FC_TRANS_ETH_MODE,
+ FNIC_IN_ETH_MODE,
+ FNIC_IN_ETH_TRANS_FC_MODE,
+};
+
+#define FNIC_WQ_COPY_MAX 1
+#define FNIC_WQ_MAX 1
+#define FNIC_RQ_MAX 1
+#define FNIC_CQ_MAX (FNIC_WQ_COPY_MAX + FNIC_WQ_MAX + FNIC_RQ_MAX)
+
+struct mempool;
+
+/* Per-instance private data structure */
+struct fnic {
+ struct fc_lport *lport;
+ struct vnic_dev_bar bar0;
+
+ struct msix_entry msix_entry[FNIC_MSIX_INTR_MAX];
+ struct fnic_msix_entry msix[FNIC_MSIX_INTR_MAX];
+
+ struct vnic_stats *stats;
+ unsigned long stats_time; /* time of stats update */
+ struct vnic_nic_cfg *nic_cfg;
+ char name[IFNAMSIZ];
+ struct timer_list notify_timer; /* used for MSI interrupts */
+
+ unsigned int err_intr_offset;
+ unsigned int link_intr_offset;
+
+ unsigned int wq_count;
+ unsigned int cq_count;
+
+ u32 fcoui_mode:1; /* use fcoui address*/
+ u32 vlan_hw_insert:1; /* let hw insert the tag */
+ u32 in_remove:1; /* fnic device in removal */
+ u32 stop_rx_link_events:1; /* stop proc. rx frames, link events */
+
+ struct completion *remove_wait; /* device remove thread blocks */
+
+ struct fc_frame *flogi;
+ struct fc_frame *flogi_resp;
+ u16 flogi_oxid;
+ unsigned long s_id;
+ enum fnic_state state;
+ spinlock_t fnic_lock;
+
+ u16 vlan_id; /* VLAN tag including priority */
+ u8 mac_addr[ETH_ALEN];
+ u8 dest_addr[ETH_ALEN];
+ u8 data_src_addr[ETH_ALEN];
+ u64 fcp_input_bytes; /* internal statistic */
+ u64 fcp_output_bytes; /* internal statistic */
+ u32 link_down_cnt;
+ int link_status;
+
+ struct list_head list;
+ struct pci_dev *pdev;
+ struct vnic_fc_config config;
+ struct vnic_dev *vdev;
+ unsigned int raw_wq_count;
+ unsigned int wq_copy_count;
+ unsigned int rq_count;
+ int fw_ack_index[FNIC_WQ_COPY_MAX];
+ unsigned short fw_ack_recd[FNIC_WQ_COPY_MAX];
+ unsigned short wq_copy_desc_low[FNIC_WQ_COPY_MAX];
+ unsigned int intr_count;
+ u32 __iomem *legacy_pba;
+ struct fnic_host_tag *tags;
+ mempool_t *io_req_pool;
+ mempool_t *io_sgl_pool[FNIC_SGL_NUM_CACHES];
+ spinlock_t io_req_lock[FNIC_IO_LOCKS]; /* locks for scsi cmnds */
+
+ struct work_struct link_work;
+ struct work_struct frame_work;
+ struct sk_buff_head frame_queue;
+
+ /* copy work queue cache line section */
+ ____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX];
+ /* completion queue cache line section */
+ ____cacheline_aligned struct vnic_cq cq[FNIC_CQ_MAX];
+
+ spinlock_t wq_copy_lock[FNIC_WQ_COPY_MAX];
+
+ /* work queue cache line section */
+ ____cacheline_aligned struct vnic_wq wq[FNIC_WQ_MAX];
+ spinlock_t wq_lock[FNIC_WQ_MAX];
+
+ /* receive queue cache line section */
+ ____cacheline_aligned struct vnic_rq rq[FNIC_RQ_MAX];
+
+ /* interrupt resource cache line section */
+ ____cacheline_aligned struct vnic_intr intr[FNIC_MSIX_INTR_MAX];
+};
+
+extern struct workqueue_struct *fnic_event_queue;
+extern struct device_attribute *fnic_attrs[];
+
+void fnic_clear_intr_mode(struct fnic *fnic);
+int fnic_set_intr_mode(struct fnic *fnic);
+void fnic_free_intr(struct fnic *fnic);
+int fnic_request_intr(struct fnic *fnic);
+
+int fnic_send(struct fc_lport *, struct fc_frame *);
+void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf);
+void fnic_handle_frame(struct work_struct *work);
+void fnic_handle_link(struct work_struct *work);
+int fnic_rq_cmpl_handler(struct fnic *fnic, int);
+int fnic_alloc_rq_frame(struct vnic_rq *rq);
+void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
+int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp);
+
+int fnic_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *));
+int fnic_abort_cmd(struct scsi_cmnd *);
+int fnic_device_reset(struct scsi_cmnd *);
+int fnic_host_reset(struct scsi_cmnd *);
+int fnic_reset(struct Scsi_Host *);
+void fnic_scsi_cleanup(struct fc_lport *);
+void fnic_scsi_abort_io(struct fc_lport *);
+void fnic_empty_scsi_cleanup(struct fc_lport *);
+void fnic_exch_mgr_reset(struct fc_lport *, u32, u32);
+int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int);
+int fnic_wq_cmpl_handler(struct fnic *fnic, int);
+int fnic_flogi_reg_handler(struct fnic *fnic);
+void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
+ struct fcpio_host_req *desc);
+int fnic_fw_reset_handler(struct fnic *fnic);
+void fnic_terminate_rport_io(struct fc_rport *);
+const char *fnic_state_to_str(unsigned int state);
+
+void fnic_log_q_error(struct fnic *fnic);
+void fnic_handle_link_event(struct fnic *fnic);
+
+#endif /* _FNIC_H_ */
diff --git a/drivers/scsi/fnic/fnic_attrs.c b/drivers/scsi/fnic/fnic_attrs.c
new file mode 100644
index 00000000000..aea0c3becfd
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_attrs.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/string.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include "fnic.h"
+
+static ssize_t fnic_show_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fc_lport *lp = shost_priv(class_to_shost(dev));
+ struct fnic *fnic = lport_priv(lp);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", fnic_state_str[fnic->state]);
+}
+
+static ssize_t fnic_show_drv_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION);
+}
+
+static ssize_t fnic_show_link_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fc_lport *lp = shost_priv(class_to_shost(dev));
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", (lp->link_up)
+ ? "Link Up" : "Link Down");
+}
+
+static DEVICE_ATTR(fnic_state, S_IRUGO, fnic_show_state, NULL);
+static DEVICE_ATTR(drv_version, S_IRUGO, fnic_show_drv_version, NULL);
+static DEVICE_ATTR(link_state, S_IRUGO, fnic_show_link_state, NULL);
+
+struct device_attribute *fnic_attrs[] = {
+ &dev_attr_fnic_state,
+ &dev_attr_drv_version,
+ &dev_attr_link_state,
+ NULL,
+};
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
new file mode 100644
index 00000000000..07e6eedb83c
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -0,0 +1,742 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/workqueue.h>
+#include <scsi/fc/fc_els.h>
+#include <scsi/fc/fc_fcoe.h>
+#include <scsi/fc_frame.h>
+#include <scsi/libfc.h>
+#include "fnic_io.h"
+#include "fnic.h"
+#include "cq_enet_desc.h"
+#include "cq_exch_desc.h"
+
+struct workqueue_struct *fnic_event_queue;
+
+void fnic_handle_link(struct work_struct *work)
+{
+ struct fnic *fnic = container_of(work, struct fnic, link_work);
+ unsigned long flags;
+ int old_link_status;
+ u32 old_link_down_cnt;
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+
+ if (fnic->stop_rx_link_events) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return;
+ }
+
+ old_link_down_cnt = fnic->link_down_cnt;
+ old_link_status = fnic->link_status;
+ fnic->link_status = vnic_dev_link_status(fnic->vdev);
+ fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
+
+ if (old_link_status == fnic->link_status) {
+ if (!fnic->link_status)
+ /* DOWN -> DOWN */
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ else {
+ if (old_link_down_cnt != fnic->link_down_cnt) {
+ /* UP -> DOWN -> UP */
+ fnic->lport->host_stats.link_failure_count++;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+ "link down\n");
+ fc_linkdown(fnic->lport);
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+ "link up\n");
+ fc_linkup(fnic->lport);
+ } else
+ /* UP -> UP */
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ }
+ } else if (fnic->link_status) {
+ /* DOWN -> UP */
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
+ fc_linkup(fnic->lport);
+ } else {
+ /* UP -> DOWN */
+ fnic->lport->host_stats.link_failure_count++;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
+ fc_linkdown(fnic->lport);
+ }
+
+}
+
+/*
+ * This function passes incoming fabric frames to libFC
+ */
+void fnic_handle_frame(struct work_struct *work)
+{
+ struct fnic *fnic = container_of(work, struct fnic, frame_work);
+ struct fc_lport *lp = fnic->lport;
+ unsigned long flags;
+ struct sk_buff *skb;
+ struct fc_frame *fp;
+
+ while ((skb = skb_dequeue(&fnic->frame_queue))) {
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (fnic->stop_rx_link_events) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ dev_kfree_skb(skb);
+ return;
+ }
+ fp = (struct fc_frame *)skb;
+ /* if Flogi resp frame, register the address */
+ if (fr_flags(fp)) {
+ vnic_dev_add_addr(fnic->vdev,
+ fnic->data_src_addr);
+ fr_flags(fp) = 0;
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ fc_exch_recv(lp, lp->emp, fp);
+ }
+
+}
+
+static inline void fnic_import_rq_fc_frame(struct sk_buff *skb,
+ u32 len, u8 sof, u8 eof)
+{
+ struct fc_frame *fp = (struct fc_frame *)skb;
+
+ skb_trim(skb, len);
+ fr_eof(fp) = eof;
+ fr_sof(fp) = sof;
+}
+
+
+static inline int fnic_import_rq_eth_pkt(struct sk_buff *skb, u32 len)
+{
+ struct fc_frame *fp;
+ struct ethhdr *eh;
+ struct vlan_ethhdr *vh;
+ struct fcoe_hdr *fcoe_hdr;
+ struct fcoe_crc_eof *ft;
+ u32 transport_len = 0;
+
+ eh = (struct ethhdr *)skb->data;
+ vh = (struct vlan_ethhdr *)skb->data;
+ if (vh->h_vlan_proto == htons(ETH_P_8021Q) &&
+ vh->h_vlan_encapsulated_proto == htons(ETH_P_FCOE)) {
+ skb_pull(skb, sizeof(struct vlan_ethhdr));
+ transport_len += sizeof(struct vlan_ethhdr);
+ } else if (eh->h_proto == htons(ETH_P_FCOE)) {
+ transport_len += sizeof(struct ethhdr);
+ skb_pull(skb, sizeof(struct ethhdr));
+ } else
+ return -1;
+
+ fcoe_hdr = (struct fcoe_hdr *)skb->data;
+ if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
+ return -1;
+
+ fp = (struct fc_frame *)skb;
+ fc_frame_init(fp);
+ fr_sof(fp) = fcoe_hdr->fcoe_sof;
+ skb_pull(skb, sizeof(struct fcoe_hdr));
+ transport_len += sizeof(struct fcoe_hdr);
+
+ ft = (struct fcoe_crc_eof *)(skb->data + len -
+ transport_len - sizeof(*ft));
+ fr_eof(fp) = ft->fcoe_eof;
+ skb_trim(skb, len - transport_len - sizeof(*ft));
+ return 0;
+}
+
+static inline int fnic_handle_flogi_resp(struct fnic *fnic,
+ struct fc_frame *fp)
+{
+ u8 mac[ETH_ALEN] = FC_FCOE_FLOGI_MAC;
+ struct ethhdr *eth_hdr;
+ struct fc_frame_header *fh;
+ int ret = 0;
+ unsigned long flags;
+ struct fc_frame *old_flogi_resp = NULL;
+
+ fh = (struct fc_frame_header *)fr_hdr(fp);
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+
+ if (fnic->state == FNIC_IN_ETH_MODE) {
+
+ /*
+ * Check if oxid matches on taking the lock. A new Flogi
+ * issued by libFC might have changed the fnic cached oxid
+ */
+ if (fnic->flogi_oxid != ntohs(fh->fh_ox_id)) {
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+ "Flogi response oxid not"
+ " matching cached oxid, dropping frame"
+ "\n");
+ ret = -1;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ dev_kfree_skb_irq(fp_skb(fp));
+ goto handle_flogi_resp_end;
+ }
+
+ /* Drop older cached flogi response frame, cache this frame */
+ old_flogi_resp = fnic->flogi_resp;
+ fnic->flogi_resp = fp;
+ fnic->flogi_oxid = FC_XID_UNKNOWN;
+
+ /*
+ * this frame is part of flogi get the src mac addr from this
+ * frame if the src mac is fcoui based then we mark the
+ * address mode flag to use fcoui base for dst mac addr
+ * otherwise we have to store the fcoe gateway addr
+ */
+ eth_hdr = (struct ethhdr *)skb_mac_header(fp_skb(fp));
+ memcpy(mac, eth_hdr->h_source, ETH_ALEN);
+
+ if (ntoh24(mac) == FC_FCOE_OUI)
+ fnic->fcoui_mode = 1;
+ else {
+ fnic->fcoui_mode = 0;
+ memcpy(fnic->dest_addr, mac, ETH_ALEN);
+ }
+
+ /*
+ * Except for Flogi frame, all outbound frames from us have the
+ * Eth Src address as FC_FCOE_OUI"our_sid". Flogi frame uses
+ * the vnic MAC address as the Eth Src address
+ */
+ fc_fcoe_set_mac(fnic->data_src_addr, fh->fh_d_id);
+
+ /* We get our s_id from the d_id of the flogi resp frame */
+ fnic->s_id = ntoh24(fh->fh_d_id);
+
+ /* Change state to reflect transition from Eth to FC mode */
+ fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
+
+ } else {
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+ "Unexpected fnic state %s while"
+ " processing flogi resp\n",
+ fnic_state_to_str(fnic->state));
+ ret = -1;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ dev_kfree_skb_irq(fp_skb(fp));
+ goto handle_flogi_resp_end;
+ }
+
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ /* Drop older cached frame */
+ if (old_flogi_resp)
+ dev_kfree_skb_irq(fp_skb(old_flogi_resp));
+
+ /*
+ * send flogi reg request to firmware, this will put the fnic in
+ * in FC mode
+ */
+ ret = fnic_flogi_reg_handler(fnic);
+
+ if (ret < 0) {
+ int free_fp = 1;
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ /*
+ * free the frame is some other thread is not
+ * pointing to it
+ */
+ if (fnic->flogi_resp != fp)
+ free_fp = 0;
+ else
+ fnic->flogi_resp = NULL;
+
+ if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
+ fnic->state = FNIC_IN_ETH_MODE;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ if (free_fp)
+ dev_kfree_skb_irq(fp_skb(fp));
+ }
+
+ handle_flogi_resp_end:
+ return ret;
+}
+
+/* Returns 1 for a response that matches cached flogi oxid */
+static inline int is_matching_flogi_resp_frame(struct fnic *fnic,
+ struct fc_frame *fp)
+{
+ struct fc_frame_header *fh;
+ int ret = 0;
+ u32 f_ctl;
+
+ fh = fc_frame_header_get(fp);
+ f_ctl = ntoh24(fh->fh_f_ctl);
+
+ if (fnic->flogi_oxid == ntohs(fh->fh_ox_id) &&
+ fh->fh_r_ctl == FC_RCTL_ELS_REP &&
+ (f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) == FC_FC_EX_CTX &&
+ fh->fh_type == FC_TYPE_ELS)
+ ret = 1;
+
+ return ret;
+}
+
+static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
+ *cq_desc, struct vnic_rq_buf *buf,
+ int skipped __attribute__((unused)),
+ void *opaque)
+{
+ struct fnic *fnic = vnic_dev_priv(rq->vdev);
+ struct sk_buff *skb;
+ struct fc_frame *fp;
+ unsigned int eth_hdrs_stripped;
+ u8 type, color, eop, sop, ingress_port, vlan_stripped;
+ u8 fcoe = 0, fcoe_sof, fcoe_eof;
+ u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
+ u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
+ u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
+ u8 fcs_ok = 1, packet_error = 0;
+ u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
+ u32 rss_hash;
+ u16 exchange_id, tmpl;
+ u8 sof = 0;
+ u8 eof = 0;
+ u32 fcp_bytes_written = 0;
+ unsigned long flags;
+
+ pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
+ PCI_DMA_FROMDEVICE);
+ skb = buf->os_buf;
+ buf->os_buf = NULL;
+
+ cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
+ if (type == CQ_DESC_TYPE_RQ_FCP) {
+ cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
+ &type, &color, &q_number, &completed_index,
+ &eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
+ &tmpl, &fcp_bytes_written, &sof, &eof,
+ &ingress_port, &packet_error,
+ &fcoe_enc_error, &fcs_ok, &vlan_stripped,
+ &vlan);
+ eth_hdrs_stripped = 1;
+
+ } else if (type == CQ_DESC_TYPE_RQ_ENET) {
+ cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
+ &type, &color, &q_number, &completed_index,
+ &ingress_port, &fcoe, &eop, &sop,
+ &rss_type, &csum_not_calc, &rss_hash,
+ &bytes_written, &packet_error,
+ &vlan_stripped, &vlan, &checksum,
+ &fcoe_sof, &fcoe_fc_crc_ok,
+ &fcoe_enc_error, &fcoe_eof,
+ &tcp_udp_csum_ok, &udp, &tcp,
+ &ipv4_csum_ok, &ipv6, &ipv4,
+ &ipv4_fragment, &fcs_ok);
+ eth_hdrs_stripped = 0;
+
+ } else {
+ /* wrong CQ type*/
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "fnic rq_cmpl wrong cq type x%x\n", type);
+ goto drop;
+ }
+
+ if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic rq_cmpl fcoe x%x fcsok x%x"
+ " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
+ " x%x\n",
+ fcoe, fcs_ok, packet_error,
+ fcoe_fc_crc_ok, fcoe_enc_error);
+ goto drop;
+ }
+
+ if (eth_hdrs_stripped)
+ fnic_import_rq_fc_frame(skb, fcp_bytes_written, sof, eof);
+ else if (fnic_import_rq_eth_pkt(skb, bytes_written))
+ goto drop;
+
+ fp = (struct fc_frame *)skb;
+
+ /*
+ * If frame is an ELS response that matches the cached FLOGI OX_ID,
+ * and is accept, issue flogi_reg_request copy wq request to firmware
+ * to register the S_ID and determine whether FC_OUI mode or GW mode.
+ */
+ if (is_matching_flogi_resp_frame(fnic, fp)) {
+ if (!eth_hdrs_stripped) {
+ if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
+ fnic_handle_flogi_resp(fnic, fp);
+ return;
+ }
+ /*
+ * Recd. Flogi reject. No point registering
+ * with fw, but forward to libFC
+ */
+ goto forward;
+ }
+ goto drop;
+ }
+ if (!eth_hdrs_stripped)
+ goto drop;
+
+forward:
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (fnic->stop_rx_link_events) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ goto drop;
+ }
+ /* Use fr_flags to indicate whether succ. flogi resp or not */
+ fr_flags(fp) = 0;
+ fr_dev(fp) = fnic->lport;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ skb_queue_tail(&fnic->frame_queue, skb);
+ queue_work(fnic_event_queue, &fnic->frame_work);
+
+ return;
+drop:
+ dev_kfree_skb_irq(skb);
+}
+
+static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
+ struct cq_desc *cq_desc, u8 type,
+ u16 q_number, u16 completed_index,
+ void *opaque)
+{
+ struct fnic *fnic = vnic_dev_priv(vdev);
+
+ vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
+ VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
+ NULL);
+ return 0;
+}
+
+int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
+{
+ unsigned int tot_rq_work_done = 0, cur_work_done;
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < fnic->rq_count; i++) {
+ cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
+ fnic_rq_cmpl_handler_cont,
+ NULL);
+ if (cur_work_done) {
+ err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
+ if (err)
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "fnic_alloc_rq_frame cant alloc"
+ " frame\n");
+ }
+ tot_rq_work_done += cur_work_done;
+ }
+
+ return tot_rq_work_done;
+}
+
+/*
+ * This function is called once at init time to allocate and fill RQ
+ * buffers. Subsequently, it is called in the interrupt context after RQ
+ * buffer processing to replenish the buffers in the RQ
+ */
+int fnic_alloc_rq_frame(struct vnic_rq *rq)
+{
+ struct fnic *fnic = vnic_dev_priv(rq->vdev);
+ struct sk_buff *skb;
+ u16 len;
+ dma_addr_t pa;
+
+ len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
+ skb = dev_alloc_skb(len);
+ if (!skb) {
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+ "Unable to allocate RQ sk_buff\n");
+ return -ENOMEM;
+ }
+ skb_reset_mac_header(skb);
+ skb_reset_transport_header(skb);
+ skb_reset_network_header(skb);
+ skb_put(skb, len);
+ pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE);
+ fnic_queue_rq_desc(rq, skb, pa, len);
+ return 0;
+}
+
+void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
+{
+ struct fc_frame *fp = buf->os_buf;
+ struct fnic *fnic = vnic_dev_priv(rq->vdev);
+
+ pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
+ PCI_DMA_FROMDEVICE);
+
+ dev_kfree_skb(fp_skb(fp));
+ buf->os_buf = NULL;
+}
+
+static inline int is_flogi_frame(struct fc_frame_header *fh)
+{
+ return fh->fh_r_ctl == FC_RCTL_ELS_REQ && *(u8 *)(fh + 1) == ELS_FLOGI;
+}
+
+int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
+{
+ struct vnic_wq *wq = &fnic->wq[0];
+ struct sk_buff *skb;
+ dma_addr_t pa;
+ struct ethhdr *eth_hdr;
+ struct vlan_ethhdr *vlan_hdr;
+ struct fcoe_hdr *fcoe_hdr;
+ struct fc_frame_header *fh;
+ u32 tot_len, eth_hdr_len;
+ int ret = 0;
+ unsigned long flags;
+
+ fh = fc_frame_header_get(fp);
+ skb = fp_skb(fp);
+
+ if (!fnic->vlan_hw_insert) {
+ eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
+ vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len);
+ eth_hdr = (struct ethhdr *)vlan_hdr;
+ vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
+ vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
+ vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
+ fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
+ } else {
+ eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
+ eth_hdr = (struct ethhdr *)skb_push(skb, eth_hdr_len);
+ eth_hdr->h_proto = htons(ETH_P_FCOE);
+ fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
+ }
+
+ if (is_flogi_frame(fh)) {
+ fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
+ memcpy(eth_hdr->h_source, fnic->mac_addr, ETH_ALEN);
+ } else {
+ if (fnic->fcoui_mode)
+ fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
+ else
+ memcpy(eth_hdr->h_dest, fnic->dest_addr, ETH_ALEN);
+ memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
+ }
+
+ tot_len = skb->len;
+ BUG_ON(tot_len % 4);
+
+ memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
+ fcoe_hdr->fcoe_sof = fr_sof(fp);
+ if (FC_FCOE_VER)
+ FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
+
+ pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
+
+ spin_lock_irqsave(&fnic->wq_lock[0], flags);
+
+ if (!vnic_wq_desc_avail(wq)) {
+ pci_unmap_single(fnic->pdev, pa,
+ tot_len, PCI_DMA_TODEVICE);
+ ret = -1;
+ goto fnic_send_frame_end;
+ }
+
+ fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
+ fnic->vlan_hw_insert, fnic->vlan_id, 1, 1, 1);
+fnic_send_frame_end:
+ spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
+
+ if (ret)
+ dev_kfree_skb_any(fp_skb(fp));
+
+ return ret;
+}
+
+/*
+ * fnic_send
+ * Routine to send a raw frame
+ */
+int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
+{
+ struct fnic *fnic = lport_priv(lp);
+ struct fc_frame_header *fh;
+ int ret = 0;
+ enum fnic_state old_state;
+ unsigned long flags;
+ struct fc_frame *old_flogi = NULL;
+ struct fc_frame *old_flogi_resp = NULL;
+
+ if (fnic->in_remove) {
+ dev_kfree_skb(fp_skb(fp));
+ ret = -1;
+ goto fnic_send_end;
+ }
+
+ fh = fc_frame_header_get(fp);
+ /* if not an Flogi frame, send it out, this is the common case */
+ if (!is_flogi_frame(fh))
+ return fnic_send_frame(fnic, fp);
+
+ /* Flogi frame, now enter the state machine */
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+again:
+ /* Get any old cached frames, free them after dropping lock */
+ old_flogi = fnic->flogi;
+ fnic->flogi = NULL;
+ old_flogi_resp = fnic->flogi_resp;
+ fnic->flogi_resp = NULL;
+
+ fnic->flogi_oxid = FC_XID_UNKNOWN;
+
+ old_state = fnic->state;
+ switch (old_state) {
+ case FNIC_IN_FC_MODE:
+ case FNIC_IN_ETH_TRANS_FC_MODE:
+ default:
+ fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
+ vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ if (old_flogi) {
+ dev_kfree_skb(fp_skb(old_flogi));
+ old_flogi = NULL;
+ }
+ if (old_flogi_resp) {
+ dev_kfree_skb(fp_skb(old_flogi_resp));
+ old_flogi_resp = NULL;
+ }
+
+ ret = fnic_fw_reset_handler(fnic);
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
+ goto again;
+ if (ret) {
+ fnic->state = old_state;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ dev_kfree_skb(fp_skb(fp));
+ goto fnic_send_end;
+ }
+ old_flogi = fnic->flogi;
+ fnic->flogi = fp;
+ fnic->flogi_oxid = ntohs(fh->fh_ox_id);
+ old_flogi_resp = fnic->flogi_resp;
+ fnic->flogi_resp = NULL;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ break;
+
+ case FNIC_IN_FC_TRANS_ETH_MODE:
+ /*
+ * A reset is pending with the firmware. Store the flogi
+ * and its oxid. The transition out of this state happens
+ * only when Firmware completes the reset, either with
+ * success or failed. If success, transition to
+ * FNIC_IN_ETH_MODE, if fail, then transition to
+ * FNIC_IN_FC_MODE
+ */
+ fnic->flogi = fp;
+ fnic->flogi_oxid = ntohs(fh->fh_ox_id);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ break;
+
+ case FNIC_IN_ETH_MODE:
+ /*
+ * The fw/hw is already in eth mode. Store the oxid,
+ * and send the flogi frame out. The transition out of this
+ * state happens only we receive flogi response from the
+ * network, and the oxid matches the cached oxid when the
+ * flogi frame was sent out. If they match, then we issue
+ * a flogi_reg request and transition to state
+ * FNIC_IN_ETH_TRANS_FC_MODE
+ */
+ fnic->flogi_oxid = ntohs(fh->fh_ox_id);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ ret = fnic_send_frame(fnic, fp);
+ break;
+ }
+
+fnic_send_end:
+ if (old_flogi)
+ dev_kfree_skb(fp_skb(old_flogi));
+ if (old_flogi_resp)
+ dev_kfree_skb(fp_skb(old_flogi_resp));
+ return ret;
+}
+
+static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
+ struct cq_desc *cq_desc,
+ struct vnic_wq_buf *buf, void *opaque)
+{
+ struct sk_buff *skb = buf->os_buf;
+ struct fc_frame *fp = (struct fc_frame *)skb;
+ struct fnic *fnic = vnic_dev_priv(wq->vdev);
+
+ pci_unmap_single(fnic->pdev, buf->dma_addr,
+ buf->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(fp_skb(fp));
+ buf->os_buf = NULL;
+}
+
+static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
+ struct cq_desc *cq_desc, u8 type,
+ u16 q_number, u16 completed_index,
+ void *opaque)
+{
+ struct fnic *fnic = vnic_dev_priv(vdev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
+ vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
+ fnic_wq_complete_frame_send, NULL);
+ spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
+
+ return 0;
+}
+
+int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
+{
+ unsigned int wq_work_done = 0;
+ unsigned int i;
+
+ for (i = 0; i < fnic->raw_wq_count; i++) {
+ wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
+ work_to_do,
+ fnic_wq_cmpl_handler_cont,
+ NULL);
+ }
+
+ return wq_work_done;
+}
+
+
+void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
+{
+ struct fc_frame *fp = buf->os_buf;
+ struct fnic *fnic = vnic_dev_priv(wq->vdev);
+
+ pci_unmap_single(fnic->pdev, buf->dma_addr,
+ buf->len, PCI_DMA_TODEVICE);
+
+ dev_kfree_skb(fp_skb(fp));
+ buf->os_buf = NULL;
+}
diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h
new file mode 100644
index 00000000000..f0b896988cd
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_io.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _FNIC_IO_H_
+#define _FNIC_IO_H_
+
+#include <scsi/fc/fc_fcp.h>
+
+#define FNIC_DFLT_SG_DESC_CNT 32
+#define FNIC_MAX_SG_DESC_CNT 1024 /* Maximum descriptors per sgl */
+#define FNIC_SG_DESC_ALIGN 16 /* Descriptor address alignment */
+
+struct host_sg_desc {
+ __le64 addr;
+ __le32 len;
+ u32 _resvd;
+};
+
+struct fnic_dflt_sgl_list {
+ struct host_sg_desc sg_desc[FNIC_DFLT_SG_DESC_CNT];
+};
+
+struct fnic_sgl_list {
+ struct host_sg_desc sg_desc[FNIC_MAX_SG_DESC_CNT];
+};
+
+enum fnic_sgl_list_type {
+ FNIC_SGL_CACHE_DFLT = 0, /* cache with default size sgl */
+ FNIC_SGL_CACHE_MAX, /* cache with max size sgl */
+ FNIC_SGL_NUM_CACHES /* number of sgl caches */
+};
+
+enum fnic_ioreq_state {
+ FNIC_IOREQ_CMD_PENDING = 0,
+ FNIC_IOREQ_ABTS_PENDING,
+ FNIC_IOREQ_ABTS_COMPLETE,
+ FNIC_IOREQ_CMD_COMPLETE,
+};
+
+struct fnic_io_req {
+ struct host_sg_desc *sgl_list; /* sgl list */
+ void *sgl_list_alloc; /* sgl list address used for free */
+ dma_addr_t sense_buf_pa; /* dma address for sense buffer*/
+ dma_addr_t sgl_list_pa; /* dma address for sgl list */
+ u16 sgl_cnt;
+ u8 sgl_type; /* device DMA descriptor list type */
+ u8 io_completed:1; /* set to 1 when fw completes IO */
+ u32 port_id; /* remote port DID */
+ struct completion *abts_done; /* completion for abts */
+ struct completion *dr_done; /* completion for device reset */
+};
+
+#endif /* _FNIC_IO_H_ */
diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c
new file mode 100644
index 00000000000..2b3064828ae
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_isr.c
@@ -0,0 +1,332 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <scsi/libfc.h>
+#include <scsi/fc_frame.h>
+#include "vnic_dev.h"
+#include "vnic_intr.h"
+#include "vnic_stats.h"
+#include "fnic_io.h"
+#include "fnic.h"
+
+static irqreturn_t fnic_isr_legacy(int irq, void *data)
+{
+ struct fnic *fnic = data;
+ u32 pba;
+ unsigned long work_done = 0;
+
+ pba = vnic_intr_legacy_pba(fnic->legacy_pba);
+ if (!pba)
+ return IRQ_NONE;
+
+ if (pba & (1 << FNIC_INTX_NOTIFY)) {
+ vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_NOTIFY]);
+ fnic_handle_link_event(fnic);
+ }
+
+ if (pba & (1 << FNIC_INTX_ERR)) {
+ vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_ERR]);
+ fnic_log_q_error(fnic);
+ }
+
+ if (pba & (1 << FNIC_INTX_WQ_RQ_COPYWQ)) {
+ work_done += fnic_wq_copy_cmpl_handler(fnic, 8);
+ work_done += fnic_wq_cmpl_handler(fnic, 4);
+ work_done += fnic_rq_cmpl_handler(fnic, 4);
+
+ vnic_intr_return_credits(&fnic->intr[FNIC_INTX_WQ_RQ_COPYWQ],
+ work_done,
+ 1 /* unmask intr */,
+ 1 /* reset intr timer */);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fnic_isr_msi(int irq, void *data)
+{
+ struct fnic *fnic = data;
+ unsigned long work_done = 0;
+
+ work_done += fnic_wq_copy_cmpl_handler(fnic, 8);
+ work_done += fnic_wq_cmpl_handler(fnic, 4);
+ work_done += fnic_rq_cmpl_handler(fnic, 4);
+
+ vnic_intr_return_credits(&fnic->intr[0],
+ work_done,
+ 1 /* unmask intr */,
+ 1 /* reset intr timer */);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fnic_isr_msix_rq(int irq, void *data)
+{
+ struct fnic *fnic = data;
+ unsigned long rq_work_done = 0;
+
+ rq_work_done = fnic_rq_cmpl_handler(fnic, 4);
+ vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_RQ],
+ rq_work_done,
+ 1 /* unmask intr */,
+ 1 /* reset intr timer */);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fnic_isr_msix_wq(int irq, void *data)
+{
+ struct fnic *fnic = data;
+ unsigned long wq_work_done = 0;
+
+ wq_work_done = fnic_wq_cmpl_handler(fnic, 4);
+ vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ],
+ wq_work_done,
+ 1 /* unmask intr */,
+ 1 /* reset intr timer */);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fnic_isr_msix_wq_copy(int irq, void *data)
+{
+ struct fnic *fnic = data;
+ unsigned long wq_copy_work_done = 0;
+
+ wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, 8);
+ vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY],
+ wq_copy_work_done,
+ 1 /* unmask intr */,
+ 1 /* reset intr timer */);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fnic_isr_msix_err_notify(int irq, void *data)
+{
+ struct fnic *fnic = data;
+
+ vnic_intr_return_all_credits(&fnic->intr[FNIC_MSIX_ERR_NOTIFY]);
+ fnic_log_q_error(fnic);
+ fnic_handle_link_event(fnic);
+
+ return IRQ_HANDLED;
+}
+
+void fnic_free_intr(struct fnic *fnic)
+{
+ int i;
+
+ switch (vnic_dev_get_intr_mode(fnic->vdev)) {
+ case VNIC_DEV_INTR_MODE_INTX:
+ case VNIC_DEV_INTR_MODE_MSI:
+ free_irq(fnic->pdev->irq, fnic);
+ break;
+
+ case VNIC_DEV_INTR_MODE_MSIX:
+ for (i = 0; i < ARRAY_SIZE(fnic->msix); i++)
+ if (fnic->msix[i].requested)
+ free_irq(fnic->msix_entry[i].vector,
+ fnic->msix[i].devid);
+ break;
+
+ default:
+ break;
+ }
+}
+
+int fnic_request_intr(struct fnic *fnic)
+{
+ int err = 0;
+ int i;
+
+ switch (vnic_dev_get_intr_mode(fnic->vdev)) {
+
+ case VNIC_DEV_INTR_MODE_INTX:
+ err = request_irq(fnic->pdev->irq, &fnic_isr_legacy,
+ IRQF_SHARED, DRV_NAME, fnic);
+ break;
+
+ case VNIC_DEV_INTR_MODE_MSI:
+ err = request_irq(fnic->pdev->irq, &fnic_isr_msi,
+ 0, fnic->name, fnic);
+ break;
+
+ case VNIC_DEV_INTR_MODE_MSIX:
+
+ sprintf(fnic->msix[FNIC_MSIX_RQ].devname,
+ "%.11s-fcs-rq", fnic->name);
+ fnic->msix[FNIC_MSIX_RQ].isr = fnic_isr_msix_rq;
+ fnic->msix[FNIC_MSIX_RQ].devid = fnic;
+
+ sprintf(fnic->msix[FNIC_MSIX_WQ].devname,
+ "%.11s-fcs-wq", fnic->name);
+ fnic->msix[FNIC_MSIX_WQ].isr = fnic_isr_msix_wq;
+ fnic->msix[FNIC_MSIX_WQ].devid = fnic;
+
+ sprintf(fnic->msix[FNIC_MSIX_WQ_COPY].devname,
+ "%.11s-scsi-wq", fnic->name);
+ fnic->msix[FNIC_MSIX_WQ_COPY].isr = fnic_isr_msix_wq_copy;
+ fnic->msix[FNIC_MSIX_WQ_COPY].devid = fnic;
+
+ sprintf(fnic->msix[FNIC_MSIX_ERR_NOTIFY].devname,
+ "%.11s-err-notify", fnic->name);
+ fnic->msix[FNIC_MSIX_ERR_NOTIFY].isr =
+ fnic_isr_msix_err_notify;
+ fnic->msix[FNIC_MSIX_ERR_NOTIFY].devid = fnic;
+
+ for (i = 0; i < ARRAY_SIZE(fnic->msix); i++) {
+ err = request_irq(fnic->msix_entry[i].vector,
+ fnic->msix[i].isr, 0,
+ fnic->msix[i].devname,
+ fnic->msix[i].devid);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "MSIX: request_irq"
+ " failed %d\n", err);
+ fnic_free_intr(fnic);
+ break;
+ }
+ fnic->msix[i].requested = 1;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return err;
+}
+
+int fnic_set_intr_mode(struct fnic *fnic)
+{
+ unsigned int n = ARRAY_SIZE(fnic->rq);
+ unsigned int m = ARRAY_SIZE(fnic->wq);
+ unsigned int o = ARRAY_SIZE(fnic->wq_copy);
+ unsigned int i;
+
+ /*
+ * Set interrupt mode (INTx, MSI, MSI-X) depending
+ * system capabilities.
+ *
+ * Try MSI-X first
+ *
+ * We need n RQs, m WQs, o Copy WQs, n+m+o CQs, and n+m+o+1 INTRs
+ * (last INTR is used for WQ/RQ errors and notification area)
+ */
+
+ BUG_ON(ARRAY_SIZE(fnic->msix_entry) < n + m + o + 1);
+ for (i = 0; i < n + m + o + 1; i++)
+ fnic->msix_entry[i].entry = i;
+
+ if (fnic->rq_count >= n &&
+ fnic->raw_wq_count >= m &&
+ fnic->wq_copy_count >= o &&
+ fnic->cq_count >= n + m + o) {
+ if (!pci_enable_msix(fnic->pdev, fnic->msix_entry,
+ n + m + o + 1)) {
+ fnic->rq_count = n;
+ fnic->raw_wq_count = m;
+ fnic->wq_copy_count = o;
+ fnic->wq_count = m + o;
+ fnic->cq_count = n + m + o;
+ fnic->intr_count = n + m + o + 1;
+ fnic->err_intr_offset = FNIC_MSIX_ERR_NOTIFY;
+
+ FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host,
+ "Using MSI-X Interrupts\n");
+ vnic_dev_set_intr_mode(fnic->vdev,
+ VNIC_DEV_INTR_MODE_MSIX);
+ return 0;
+ }
+ }
+
+ /*
+ * Next try MSI
+ * We need 1 RQ, 1 WQ, 1 WQ_COPY, 3 CQs, and 1 INTR
+ */
+ if (fnic->rq_count >= 1 &&
+ fnic->raw_wq_count >= 1 &&
+ fnic->wq_copy_count >= 1 &&
+ fnic->cq_count >= 3 &&
+ fnic->intr_count >= 1 &&
+ !pci_enable_msi(fnic->pdev)) {
+
+ fnic->rq_count = 1;
+ fnic->raw_wq_count = 1;
+ fnic->wq_copy_count = 1;
+ fnic->wq_count = 2;
+ fnic->cq_count = 3;
+ fnic->intr_count = 1;
+ fnic->err_intr_offset = 0;
+
+ FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host,
+ "Using MSI Interrupts\n");
+ vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSI);
+
+ return 0;
+ }
+
+ /*
+ * Next try INTx
+ * We need 1 RQ, 1 WQ, 1 WQ_COPY, 3 CQs, and 3 INTRs
+ * 1 INTR is used for all 3 queues, 1 INTR for queue errors
+ * 1 INTR for notification area
+ */
+
+ if (fnic->rq_count >= 1 &&
+ fnic->raw_wq_count >= 1 &&
+ fnic->wq_copy_count >= 1 &&
+ fnic->cq_count >= 3 &&
+ fnic->intr_count >= 3) {
+
+ fnic->rq_count = 1;
+ fnic->raw_wq_count = 1;
+ fnic->wq_copy_count = 1;
+ fnic->cq_count = 3;
+ fnic->intr_count = 3;
+
+ FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host,
+ "Using Legacy Interrupts\n");
+ vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX);
+
+ return 0;
+ }
+
+ vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
+
+ return -EINVAL;
+}
+
+void fnic_clear_intr_mode(struct fnic *fnic)
+{
+ switch (vnic_dev_get_intr_mode(fnic->vdev)) {
+ case VNIC_DEV_INTR_MODE_MSIX:
+ pci_disable_msix(fnic->pdev);
+ break;
+ case VNIC_DEV_INTR_MODE_MSI:
+ pci_disable_msi(fnic->pdev);
+ break;
+ default:
+ break;
+ }
+
+ vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX);
+}
+
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
new file mode 100644
index 00000000000..a84072865fc
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -0,0 +1,943 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/module.h>
+#include <linux/mempool.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/libfc.h>
+#include <scsi/fc_frame.h>
+
+#include "vnic_dev.h"
+#include "vnic_intr.h"
+#include "vnic_stats.h"
+#include "fnic_io.h"
+#include "fnic.h"
+
+#define PCI_DEVICE_ID_CISCO_FNIC 0x0045
+
+/* Timer to poll notification area for events. Used for MSI interrupts */
+#define FNIC_NOTIFY_TIMER_PERIOD (2 * HZ)
+
+static struct kmem_cache *fnic_sgl_cache[FNIC_SGL_NUM_CACHES];
+static struct kmem_cache *fnic_io_req_cache;
+LIST_HEAD(fnic_list);
+DEFINE_SPINLOCK(fnic_list_lock);
+
+/* Supported devices by fnic module */
+static struct pci_device_id fnic_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_FNIC) },
+ { 0, }
+};
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_AUTHOR("Abhijeet Joglekar <abjoglek@cisco.com>, "
+ "Joseph R. Eykholt <jeykholt@cisco.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, fnic_id_table);
+
+unsigned int fnic_log_level;
+module_param(fnic_log_level, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels");
+
+
+static struct libfc_function_template fnic_transport_template = {
+ .frame_send = fnic_send,
+ .fcp_abort_io = fnic_empty_scsi_cleanup,
+ .fcp_cleanup = fnic_empty_scsi_cleanup,
+ .exch_mgr_reset = fnic_exch_mgr_reset
+};
+
+static int fnic_slave_alloc(struct scsi_device *sdev)
+{
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+ struct fc_lport *lp = shost_priv(sdev->host);
+ struct fnic *fnic = lport_priv(lp);
+
+ sdev->tagged_supported = 1;
+
+ if (!rport || fc_remote_port_chkready(rport))
+ return -ENXIO;
+
+ scsi_activate_tcq(sdev, FNIC_DFLT_QUEUE_DEPTH);
+ rport->dev_loss_tmo = fnic->config.port_down_timeout / 1000;
+
+ return 0;
+}
+
+static struct scsi_host_template fnic_host_template = {
+ .module = THIS_MODULE,
+ .name = DRV_NAME,
+ .queuecommand = fnic_queuecommand,
+ .eh_abort_handler = fnic_abort_cmd,
+ .eh_device_reset_handler = fnic_device_reset,
+ .eh_host_reset_handler = fnic_host_reset,
+ .slave_alloc = fnic_slave_alloc,
+ .change_queue_depth = fc_change_queue_depth,
+ .change_queue_type = fc_change_queue_type,
+ .this_id = -1,
+ .cmd_per_lun = 3,
+ .can_queue = FNIC_MAX_IO_REQ,
+ .use_clustering = ENABLE_CLUSTERING,
+ .sg_tablesize = FNIC_MAX_SG_DESC_CNT,
+ .max_sectors = 0xffff,
+ .shost_attrs = fnic_attrs,
+};
+
+static void fnic_get_host_speed(struct Scsi_Host *shost);
+static struct scsi_transport_template *fnic_fc_transport;
+static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *);
+
+static struct fc_function_template fnic_fc_functions = {
+
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_active_fc4s = 1,
+ .show_host_maxframe_size = 1,
+ .show_host_port_id = 1,
+ .show_host_supported_speeds = 1,
+ .get_host_speed = fnic_get_host_speed,
+ .show_host_speed = 1,
+ .show_host_port_type = 1,
+ .get_host_port_state = fc_get_host_port_state,
+ .show_host_port_state = 1,
+ .show_host_symbolic_name = 1,
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+ .show_host_fabric_name = 1,
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+ .show_starget_port_id = 1,
+ .show_rport_dev_loss_tmo = 1,
+ .issue_fc_host_lip = fnic_reset,
+ .get_fc_host_stats = fnic_get_stats,
+ .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
+ .terminate_rport_io = fnic_terminate_rport_io,
+};
+
+static void fnic_get_host_speed(struct Scsi_Host *shost)
+{
+ struct fc_lport *lp = shost_priv(shost);
+ struct fnic *fnic = lport_priv(lp);
+ u32 port_speed = vnic_dev_port_speed(fnic->vdev);
+
+ /* Add in other values as they get defined in fw */
+ switch (port_speed) {
+ case 10000:
+ fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
+ break;
+ default:
+ fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
+ break;
+ }
+}
+
+static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host)
+{
+ int ret;
+ struct fc_lport *lp = shost_priv(host);
+ struct fnic *fnic = lport_priv(lp);
+ struct fc_host_statistics *stats = &lp->host_stats;
+ struct vnic_stats *vs;
+ unsigned long flags;
+
+ if (time_before(jiffies, fnic->stats_time + HZ / FNIC_STATS_RATE_LIMIT))
+ return stats;
+ fnic->stats_time = jiffies;
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ ret = vnic_dev_stats_dump(fnic->vdev, &fnic->stats);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ if (ret) {
+ FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic: Get vnic stats failed"
+ " 0x%x", ret);
+ return stats;
+ }
+ vs = fnic->stats;
+ stats->tx_frames = vs->tx.tx_unicast_frames_ok;
+ stats->tx_words = vs->tx.tx_unicast_bytes_ok / 4;
+ stats->rx_frames = vs->rx.rx_unicast_frames_ok;
+ stats->rx_words = vs->rx.rx_unicast_bytes_ok / 4;
+ stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors;
+ stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop;
+ stats->invalid_crc_count = vs->rx.rx_crc_errors;
+ stats->seconds_since_last_reset = (jiffies - lp->boot_time) / HZ;
+ stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000);
+ stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000);
+
+ return stats;
+}
+
+void fnic_log_q_error(struct fnic *fnic)
+{
+ unsigned int i;
+ u32 error_status;
+
+ for (i = 0; i < fnic->raw_wq_count; i++) {
+ error_status = ioread32(&fnic->wq[i].ctrl->error_status);
+ if (error_status)
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "WQ[%d] error_status"
+ " %d\n", i, error_status);
+ }
+
+ for (i = 0; i < fnic->rq_count; i++) {
+ error_status = ioread32(&fnic->rq[i].ctrl->error_status);
+ if (error_status)
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "RQ[%d] error_status"
+ " %d\n", i, error_status);
+ }
+
+ for (i = 0; i < fnic->wq_copy_count; i++) {
+ error_status = ioread32(&fnic->wq_copy[i].ctrl->error_status);
+ if (error_status)
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "CWQ[%d] error_status"
+ " %d\n", i, error_status);
+ }
+}
+
+void fnic_handle_link_event(struct fnic *fnic)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (fnic->stop_rx_link_events) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ queue_work(fnic_event_queue, &fnic->link_work);
+
+}
+
+static int fnic_notify_set(struct fnic *fnic)
+{
+ int err;
+
+ switch (vnic_dev_get_intr_mode(fnic->vdev)) {
+ case VNIC_DEV_INTR_MODE_INTX:
+ err = vnic_dev_notify_set(fnic->vdev, FNIC_INTX_NOTIFY);
+ break;
+ case VNIC_DEV_INTR_MODE_MSI:
+ err = vnic_dev_notify_set(fnic->vdev, -1);
+ break;
+ case VNIC_DEV_INTR_MODE_MSIX:
+ err = vnic_dev_notify_set(fnic->vdev, FNIC_MSIX_ERR_NOTIFY);
+ break;
+ default:
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Interrupt mode should be set up"
+ " before devcmd notify set %d\n",
+ vnic_dev_get_intr_mode(fnic->vdev));
+ err = -1;
+ break;
+ }
+
+ return err;
+}
+
+static void fnic_notify_timer(unsigned long data)
+{
+ struct fnic *fnic = (struct fnic *)data;
+
+ fnic_handle_link_event(fnic);
+ mod_timer(&fnic->notify_timer,
+ round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD));
+}
+
+static void fnic_notify_timer_start(struct fnic *fnic)
+{
+ switch (vnic_dev_get_intr_mode(fnic->vdev)) {
+ case VNIC_DEV_INTR_MODE_MSI:
+ /*
+ * Schedule first timeout immediately. The driver is
+ * initiatialized and ready to look for link up notification
+ */
+ mod_timer(&fnic->notify_timer, jiffies);
+ break;
+ default:
+ /* Using intr for notification for INTx/MSI-X */
+ break;
+ };
+}
+
+static int fnic_dev_wait(struct vnic_dev *vdev,
+ int (*start)(struct vnic_dev *, int),
+ int (*finished)(struct vnic_dev *, int *),
+ int arg)
+{
+ unsigned long time;
+ int done;
+ int err;
+
+ err = start(vdev, arg);
+ if (err)
+ return err;
+
+ /* Wait for func to complete...2 seconds max */
+ time = jiffies + (HZ * 2);
+ do {
+ err = finished(vdev, &done);
+ if (err)
+ return err;
+ if (done)
+ return 0;
+ schedule_timeout_uninterruptible(HZ / 10);
+ } while (time_after(time, jiffies));
+
+ return -ETIMEDOUT;
+}
+
+static int fnic_cleanup(struct fnic *fnic)
+{
+ unsigned int i;
+ int err;
+ unsigned long flags;
+ struct fc_frame *flogi = NULL;
+ struct fc_frame *flogi_resp = NULL;
+
+ vnic_dev_disable(fnic->vdev);
+ for (i = 0; i < fnic->intr_count; i++)
+ vnic_intr_mask(&fnic->intr[i]);
+
+ for (i = 0; i < fnic->rq_count; i++) {
+ err = vnic_rq_disable(&fnic->rq[i]);
+ if (err)
+ return err;
+ }
+ for (i = 0; i < fnic->raw_wq_count; i++) {
+ err = vnic_wq_disable(&fnic->wq[i]);
+ if (err)
+ return err;
+ }
+ for (i = 0; i < fnic->wq_copy_count; i++) {
+ err = vnic_wq_copy_disable(&fnic->wq_copy[i]);
+ if (err)
+ return err;
+ }
+
+ /* Clean up completed IOs and FCS frames */
+ fnic_wq_copy_cmpl_handler(fnic, -1);
+ fnic_wq_cmpl_handler(fnic, -1);
+ fnic_rq_cmpl_handler(fnic, -1);
+
+ /* Clean up the IOs and FCS frames that have not completed */
+ for (i = 0; i < fnic->raw_wq_count; i++)
+ vnic_wq_clean(&fnic->wq[i], fnic_free_wq_buf);
+ for (i = 0; i < fnic->rq_count; i++)
+ vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
+ for (i = 0; i < fnic->wq_copy_count; i++)
+ vnic_wq_copy_clean(&fnic->wq_copy[i],
+ fnic_wq_copy_cleanup_handler);
+
+ for (i = 0; i < fnic->cq_count; i++)
+ vnic_cq_clean(&fnic->cq[i]);
+ for (i = 0; i < fnic->intr_count; i++)
+ vnic_intr_clean(&fnic->intr[i]);
+
+ /*
+ * Remove cached flogi and flogi resp frames if any
+ * These frames are not in any queue, and therefore queue
+ * cleanup does not clean them. So clean them explicitly
+ */
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ flogi = fnic->flogi;
+ fnic->flogi = NULL;
+ flogi_resp = fnic->flogi_resp;
+ fnic->flogi_resp = NULL;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ if (flogi)
+ dev_kfree_skb(fp_skb(flogi));
+
+ if (flogi_resp)
+ dev_kfree_skb(fp_skb(flogi_resp));
+
+ mempool_destroy(fnic->io_req_pool);
+ for (i = 0; i < FNIC_SGL_NUM_CACHES; i++)
+ mempool_destroy(fnic->io_sgl_pool[i]);
+
+ return 0;
+}
+
+static void fnic_iounmap(struct fnic *fnic)
+{
+ if (fnic->bar0.vaddr)
+ iounmap(fnic->bar0.vaddr);
+}
+
+/*
+ * Allocate element for mempools requiring GFP_DMA flag.
+ * Otherwise, checks in kmem_flagcheck() hit BUG_ON().
+ */
+static void *fnic_alloc_slab_dma(gfp_t gfp_mask, void *pool_data)
+{
+ struct kmem_cache *mem = pool_data;
+
+ return kmem_cache_alloc(mem, gfp_mask | GFP_ATOMIC | GFP_DMA);
+}
+
+static int __devinit fnic_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct Scsi_Host *host;
+ struct fc_lport *lp;
+ struct fnic *fnic;
+ mempool_t *pool;
+ int err;
+ int i;
+ unsigned long flags;
+
+ /*
+ * Allocate SCSI Host and set up association between host,
+ * local port, and fnic
+ */
+ host = scsi_host_alloc(&fnic_host_template,
+ sizeof(struct fc_lport) + sizeof(struct fnic));
+ if (!host) {
+ printk(KERN_ERR PFX "Unable to alloc SCSI host\n");
+ err = -ENOMEM;
+ goto err_out;
+ }
+ lp = shost_priv(host);
+ lp->host = host;
+ fnic = lport_priv(lp);
+ fnic->lport = lp;
+
+ snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME,
+ host->host_no);
+
+ host->transportt = fnic_fc_transport;
+
+ err = scsi_init_shared_tag_map(host, FNIC_MAX_IO_REQ);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Unable to alloc shared tag map\n");
+ goto err_out_free_hba;
+ }
+
+ /* Setup PCI resources */
+ pci_set_drvdata(pdev, fnic);
+
+ fnic->pdev = pdev;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Cannot enable PCI device, aborting.\n");
+ goto err_out_free_hba;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Cannot enable PCI resources, aborting\n");
+ goto err_out_disable_device;
+ }
+
+ pci_set_master(pdev);
+
+ /* Query PCI controller on system for DMA addressing
+ * limitation for the device. Try 40-bit first, and
+ * fail to 32-bit.
+ */
+ err = pci_set_dma_mask(pdev, DMA_40BIT_MASK);
+ if (err) {
+ err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "No usable DMA configuration "
+ "aborting\n");
+ goto err_out_release_regions;
+ }
+ err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Unable to obtain 32-bit DMA "
+ "for consistent allocations, aborting.\n");
+ goto err_out_release_regions;
+ }
+ } else {
+ err = pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Unable to obtain 40-bit DMA "
+ "for consistent allocations, aborting.\n");
+ goto err_out_release_regions;
+ }
+ }
+
+ /* Map vNIC resources from BAR0 */
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "BAR0 not memory-map'able, aborting.\n");
+ err = -ENODEV;
+ goto err_out_release_regions;
+ }
+
+ fnic->bar0.vaddr = pci_iomap(pdev, 0, 0);
+ fnic->bar0.bus_addr = pci_resource_start(pdev, 0);
+ fnic->bar0.len = pci_resource_len(pdev, 0);
+
+ if (!fnic->bar0.vaddr) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Cannot memory-map BAR0 res hdr, "
+ "aborting.\n");
+ err = -ENODEV;
+ goto err_out_release_regions;
+ }
+
+ fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0);
+ if (!fnic->vdev) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "vNIC registration failed, "
+ "aborting.\n");
+ err = -ENODEV;
+ goto err_out_iounmap;
+ }
+
+ err = fnic_dev_wait(fnic->vdev, vnic_dev_open,
+ vnic_dev_open_done, 0);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "vNIC dev open failed, aborting.\n");
+ goto err_out_vnic_unregister;
+ }
+
+ err = vnic_dev_init(fnic->vdev, 0);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "vNIC dev init failed, aborting.\n");
+ goto err_out_dev_close;
+ }
+
+ err = vnic_dev_mac_addr(fnic->vdev, fnic->mac_addr);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "vNIC get MAC addr failed \n");
+ goto err_out_dev_close;
+ }
+
+ /* Get vNIC configuration */
+ err = fnic_get_vnic_config(fnic);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Get vNIC configuration failed, "
+ "aborting.\n");
+ goto err_out_dev_close;
+ }
+ host->max_lun = fnic->config.luns_per_tgt;
+ host->max_id = FNIC_MAX_FCP_TARGET;
+
+ fnic_get_res_counts(fnic);
+
+ err = fnic_set_intr_mode(fnic);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Failed to set intr mode, "
+ "aborting.\n");
+ goto err_out_dev_close;
+ }
+
+ err = fnic_request_intr(fnic);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Unable to request irq.\n");
+ goto err_out_clear_intr;
+ }
+
+ err = fnic_alloc_vnic_resources(fnic);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Failed to alloc vNIC resources, "
+ "aborting.\n");
+ goto err_out_free_intr;
+ }
+
+
+ /* initialize all fnic locks */
+ spin_lock_init(&fnic->fnic_lock);
+
+ for (i = 0; i < FNIC_WQ_MAX; i++)
+ spin_lock_init(&fnic->wq_lock[i]);
+
+ for (i = 0; i < FNIC_WQ_COPY_MAX; i++) {
+ spin_lock_init(&fnic->wq_copy_lock[i]);
+ fnic->wq_copy_desc_low[i] = DESC_CLEAN_LOW_WATERMARK;
+ fnic->fw_ack_recd[i] = 0;
+ fnic->fw_ack_index[i] = -1;
+ }
+
+ for (i = 0; i < FNIC_IO_LOCKS; i++)
+ spin_lock_init(&fnic->io_req_lock[i]);
+
+ fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
+ if (!fnic->io_req_pool)
+ goto err_out_free_resources;
+
+ pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab,
+ fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
+ if (!pool)
+ goto err_out_free_ioreq_pool;
+ fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool;
+
+ pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab,
+ fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
+ if (!pool)
+ goto err_out_free_dflt_pool;
+ fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool;
+
+ /* setup vlan config, hw inserts vlan header */
+ fnic->vlan_hw_insert = 1;
+ fnic->vlan_id = 0;
+
+ fnic->flogi_oxid = FC_XID_UNKNOWN;
+ fnic->flogi = NULL;
+ fnic->flogi_resp = NULL;
+ fnic->state = FNIC_IN_FC_MODE;
+
+ /* Enable hardware stripping of vlan header on ingress */
+ fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1);
+
+ /* Setup notification buffer area */
+ err = fnic_notify_set(fnic);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Failed to alloc notify buffer, aborting.\n");
+ goto err_out_free_max_pool;
+ }
+
+ /* Setup notify timer when using MSI interrupts */
+ if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
+ setup_timer(&fnic->notify_timer,
+ fnic_notify_timer, (unsigned long)fnic);
+
+ /* allocate RQ buffers and post them to RQ*/
+ for (i = 0; i < fnic->rq_count; i++) {
+ err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "fnic_alloc_rq_frame can't alloc "
+ "frame\n");
+ goto err_out_free_rq_buf;
+ }
+ }
+
+ /*
+ * Initialization done with PCI system, hardware, firmware.
+ * Add host to SCSI
+ */
+ err = scsi_add_host(lp->host, &pdev->dev);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "fnic: scsi_add_host failed...exiting\n");
+ goto err_out_free_rq_buf;
+ }
+
+ /* Start local port initiatialization */
+
+ lp->link_up = 0;
+ lp->tt = fnic_transport_template;
+
+ lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3,
+ FCPIO_HOST_EXCH_RANGE_START,
+ FCPIO_HOST_EXCH_RANGE_END);
+ if (!lp->emp) {
+ err = -ENOMEM;
+ goto err_out_remove_scsi_host;
+ }
+
+ lp->max_retry_count = fnic->config.flogi_retries;
+ lp->max_rport_retry_count = fnic->config.plogi_retries;
+ lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
+ FCP_SPPF_CONF_COMPL);
+ if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR)
+ lp->service_params |= FCP_SPPF_RETRY;
+
+ lp->boot_time = jiffies;
+ lp->e_d_tov = fnic->config.ed_tov;
+ lp->r_a_tov = fnic->config.ra_tov;
+ lp->link_supported_speeds = FC_PORTSPEED_10GBIT;
+ fc_set_wwnn(lp, fnic->config.node_wwn);
+ fc_set_wwpn(lp, fnic->config.port_wwn);
+
+ fc_exch_init(lp);
+ fc_lport_init(lp);
+ fc_elsct_init(lp);
+ fc_rport_init(lp);
+ fc_disc_init(lp);
+
+ fc_lport_config(lp);
+
+ if (fc_set_mfs(lp, fnic->config.maxdatafieldsize +
+ sizeof(struct fc_frame_header))) {
+ err = -EINVAL;
+ goto err_out_free_exch_mgr;
+ }
+ fc_host_maxframe_size(lp->host) = lp->mfs;
+
+ sprintf(fc_host_symbolic_name(lp->host),
+ DRV_NAME " v" DRV_VERSION " over %s", fnic->name);
+
+ spin_lock_irqsave(&fnic_list_lock, flags);
+ list_add_tail(&fnic->list, &fnic_list);
+ spin_unlock_irqrestore(&fnic_list_lock, flags);
+
+ INIT_WORK(&fnic->link_work, fnic_handle_link);
+ INIT_WORK(&fnic->frame_work, fnic_handle_frame);
+ skb_queue_head_init(&fnic->frame_queue);
+
+ /* Enable all queues */
+ for (i = 0; i < fnic->raw_wq_count; i++)
+ vnic_wq_enable(&fnic->wq[i]);
+ for (i = 0; i < fnic->rq_count; i++)
+ vnic_rq_enable(&fnic->rq[i]);
+ for (i = 0; i < fnic->wq_copy_count; i++)
+ vnic_wq_copy_enable(&fnic->wq_copy[i]);
+
+ fc_fabric_login(lp);
+
+ vnic_dev_enable(fnic->vdev);
+ for (i = 0; i < fnic->intr_count; i++)
+ vnic_intr_unmask(&fnic->intr[i]);
+
+ fnic_notify_timer_start(fnic);
+
+ return 0;
+
+err_out_free_exch_mgr:
+ fc_exch_mgr_free(lp->emp);
+err_out_remove_scsi_host:
+ fc_remove_host(fnic->lport->host);
+ scsi_remove_host(fnic->lport->host);
+err_out_free_rq_buf:
+ for (i = 0; i < fnic->rq_count; i++)
+ vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
+ vnic_dev_notify_unset(fnic->vdev);
+err_out_free_max_pool:
+ mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]);
+err_out_free_dflt_pool:
+ mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]);
+err_out_free_ioreq_pool:
+ mempool_destroy(fnic->io_req_pool);
+err_out_free_resources:
+ fnic_free_vnic_resources(fnic);
+err_out_free_intr:
+ fnic_free_intr(fnic);
+err_out_clear_intr:
+ fnic_clear_intr_mode(fnic);
+err_out_dev_close:
+ vnic_dev_close(fnic->vdev);
+err_out_vnic_unregister:
+ vnic_dev_unregister(fnic->vdev);
+err_out_iounmap:
+ fnic_iounmap(fnic);
+err_out_release_regions:
+ pci_release_regions(pdev);
+err_out_disable_device:
+ pci_disable_device(pdev);
+err_out_free_hba:
+ scsi_host_put(lp->host);
+err_out:
+ return err;
+}
+
+static void __devexit fnic_remove(struct pci_dev *pdev)
+{
+ struct fnic *fnic = pci_get_drvdata(pdev);
+ unsigned long flags;
+
+ /*
+ * Mark state so that the workqueue thread stops forwarding
+ * received frames and link events to the local port. ISR and
+ * other threads that can queue work items will also stop
+ * creating work items on the fnic workqueue
+ */
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ fnic->stop_rx_link_events = 1;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
+ del_timer_sync(&fnic->notify_timer);
+
+ /*
+ * Flush the fnic event queue. After this call, there should
+ * be no event queued for this fnic device in the workqueue
+ */
+ flush_workqueue(fnic_event_queue);
+ skb_queue_purge(&fnic->frame_queue);
+
+ /*
+ * Log off the fabric. This stops all remote ports, dns port,
+ * logs off the fabric. This flushes all rport, disc, lport work
+ * before returning
+ */
+ fc_fabric_logoff(fnic->lport);
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ fnic->in_remove = 1;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ fc_lport_destroy(fnic->lport);
+
+ /*
+ * This stops the fnic device, masks all interrupts. Completed
+ * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are
+ * cleaned up
+ */
+ fnic_cleanup(fnic);
+
+ BUG_ON(!skb_queue_empty(&fnic->frame_queue));
+
+ spin_lock_irqsave(&fnic_list_lock, flags);
+ list_del(&fnic->list);
+ spin_unlock_irqrestore(&fnic_list_lock, flags);
+
+ fc_remove_host(fnic->lport->host);
+ scsi_remove_host(fnic->lport->host);
+ fc_exch_mgr_free(fnic->lport->emp);
+ vnic_dev_notify_unset(fnic->vdev);
+ fnic_free_vnic_resources(fnic);
+ fnic_free_intr(fnic);
+ fnic_clear_intr_mode(fnic);
+ vnic_dev_close(fnic->vdev);
+ vnic_dev_unregister(fnic->vdev);
+ fnic_iounmap(fnic);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ scsi_host_put(fnic->lport->host);
+}
+
+static struct pci_driver fnic_driver = {
+ .name = DRV_NAME,
+ .id_table = fnic_id_table,
+ .probe = fnic_probe,
+ .remove = __devexit_p(fnic_remove),
+};
+
+static int __init fnic_init_module(void)
+{
+ size_t len;
+ int err = 0;
+
+ printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
+
+ /* Create a cache for allocation of default size sgls */
+ len = sizeof(struct fnic_dflt_sgl_list);
+ fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create
+ ("fnic_sgl_dflt", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
+ SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA,
+ NULL);
+ if (!fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]) {
+ printk(KERN_ERR PFX "failed to create fnic dflt sgl slab\n");
+ err = -ENOMEM;
+ goto err_create_fnic_sgl_slab_dflt;
+ }
+
+ /* Create a cache for allocation of max size sgls*/
+ len = sizeof(struct fnic_sgl_list);
+ fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create
+ ("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
+ SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA,
+ NULL);
+ if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) {
+ printk(KERN_ERR PFX "failed to create fnic max sgl slab\n");
+ err = -ENOMEM;
+ goto err_create_fnic_sgl_slab_max;
+ }
+
+ /* Create a cache of io_req structs for use via mempool */
+ fnic_io_req_cache = kmem_cache_create("fnic_io_req",
+ sizeof(struct fnic_io_req),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!fnic_io_req_cache) {
+ printk(KERN_ERR PFX "failed to create fnic io_req slab\n");
+ err = -ENOMEM;
+ goto err_create_fnic_ioreq_slab;
+ }
+
+ fnic_event_queue = create_singlethread_workqueue("fnic_event_wq");
+ if (!fnic_event_queue) {
+ printk(KERN_ERR PFX "fnic work queue create failed\n");
+ err = -ENOMEM;
+ goto err_create_fnic_workq;
+ }
+
+ spin_lock_init(&fnic_list_lock);
+ INIT_LIST_HEAD(&fnic_list);
+
+ fnic_fc_transport = fc_attach_transport(&fnic_fc_functions);
+ if (!fnic_fc_transport) {
+ printk(KERN_ERR PFX "fc_attach_transport error\n");
+ err = -ENOMEM;
+ goto err_fc_transport;
+ }
+
+ /* register the driver with PCI system */
+ err = pci_register_driver(&fnic_driver);
+ if (err < 0) {
+ printk(KERN_ERR PFX "pci register error\n");
+ goto err_pci_register;
+ }
+ return err;
+
+err_pci_register:
+ fc_release_transport(fnic_fc_transport);
+err_fc_transport:
+ destroy_workqueue(fnic_event_queue);
+err_create_fnic_workq:
+ kmem_cache_destroy(fnic_io_req_cache);
+err_create_fnic_ioreq_slab:
+ kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
+err_create_fnic_sgl_slab_max:
+ kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
+err_create_fnic_sgl_slab_dflt:
+ return err;
+}
+
+static void __exit fnic_cleanup_module(void)
+{
+ pci_unregister_driver(&fnic_driver);
+ destroy_workqueue(fnic_event_queue);
+ kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
+ kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
+ kmem_cache_destroy(fnic_io_req_cache);
+ fc_release_transport(fnic_fc_transport);
+}
+
+module_init(fnic_init_module);
+module_exit(fnic_cleanup_module);
+
diff --git a/drivers/scsi/fnic/fnic_res.c b/drivers/scsi/fnic/fnic_res.c
new file mode 100644
index 00000000000..7ba61ec715d
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_res.c
@@ -0,0 +1,444 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include "wq_enet_desc.h"
+#include "rq_enet_desc.h"
+#include "cq_enet_desc.h"
+#include "vnic_resource.h"
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+#include "vnic_cq.h"
+#include "vnic_intr.h"
+#include "vnic_stats.h"
+#include "vnic_nic.h"
+#include "fnic.h"
+
+int fnic_get_vnic_config(struct fnic *fnic)
+{
+ struct vnic_fc_config *c = &fnic->config;
+ int err;
+
+#define GET_CONFIG(m) \
+ do { \
+ err = vnic_dev_spec(fnic->vdev, \
+ offsetof(struct vnic_fc_config, m), \
+ sizeof(c->m), &c->m); \
+ if (err) { \
+ shost_printk(KERN_ERR, fnic->lport->host, \
+ "Error getting %s, %d\n", #m, \
+ err); \
+ return err; \
+ } \
+ } while (0);
+
+ GET_CONFIG(node_wwn);
+ GET_CONFIG(port_wwn);
+ GET_CONFIG(wq_enet_desc_count);
+ GET_CONFIG(wq_copy_desc_count);
+ GET_CONFIG(rq_desc_count);
+ GET_CONFIG(maxdatafieldsize);
+ GET_CONFIG(ed_tov);
+ GET_CONFIG(ra_tov);
+ GET_CONFIG(intr_timer);
+ GET_CONFIG(intr_timer_type);
+ GET_CONFIG(flags);
+ GET_CONFIG(flogi_retries);
+ GET_CONFIG(flogi_timeout);
+ GET_CONFIG(plogi_retries);
+ GET_CONFIG(plogi_timeout);
+ GET_CONFIG(io_throttle_count);
+ GET_CONFIG(link_down_timeout);
+ GET_CONFIG(port_down_timeout);
+ GET_CONFIG(port_down_io_retries);
+ GET_CONFIG(luns_per_tgt);
+
+ c->wq_enet_desc_count =
+ min_t(u32, VNIC_FNIC_WQ_DESCS_MAX,
+ max_t(u32, VNIC_FNIC_WQ_DESCS_MIN,
+ c->wq_enet_desc_count));
+ c->wq_enet_desc_count = ALIGN(c->wq_enet_desc_count, 16);
+
+ c->wq_copy_desc_count =
+ min_t(u32, VNIC_FNIC_WQ_COPY_DESCS_MAX,
+ max_t(u32, VNIC_FNIC_WQ_COPY_DESCS_MIN,
+ c->wq_copy_desc_count));
+ c->wq_copy_desc_count = ALIGN(c->wq_copy_desc_count, 16);
+
+ c->rq_desc_count =
+ min_t(u32, VNIC_FNIC_RQ_DESCS_MAX,
+ max_t(u32, VNIC_FNIC_RQ_DESCS_MIN,
+ c->rq_desc_count));
+ c->rq_desc_count = ALIGN(c->rq_desc_count, 16);
+
+ c->maxdatafieldsize =
+ min_t(u16, VNIC_FNIC_MAXDATAFIELDSIZE_MAX,
+ max_t(u16, VNIC_FNIC_MAXDATAFIELDSIZE_MIN,
+ c->maxdatafieldsize));
+ c->ed_tov =
+ min_t(u32, VNIC_FNIC_EDTOV_MAX,
+ max_t(u32, VNIC_FNIC_EDTOV_MIN,
+ c->ed_tov));
+
+ c->ra_tov =
+ min_t(u32, VNIC_FNIC_RATOV_MAX,
+ max_t(u32, VNIC_FNIC_RATOV_MIN,
+ c->ra_tov));
+
+ c->flogi_retries =
+ min_t(u32, VNIC_FNIC_FLOGI_RETRIES_MAX, c->flogi_retries);
+
+ c->flogi_timeout =
+ min_t(u32, VNIC_FNIC_FLOGI_TIMEOUT_MAX,
+ max_t(u32, VNIC_FNIC_FLOGI_TIMEOUT_MIN,
+ c->flogi_timeout));
+
+ c->plogi_retries =
+ min_t(u32, VNIC_FNIC_PLOGI_RETRIES_MAX, c->plogi_retries);
+
+ c->plogi_timeout =
+ min_t(u32, VNIC_FNIC_PLOGI_TIMEOUT_MAX,
+ max_t(u32, VNIC_FNIC_PLOGI_TIMEOUT_MIN,
+ c->plogi_timeout));
+
+ c->io_throttle_count =
+ min_t(u32, VNIC_FNIC_IO_THROTTLE_COUNT_MAX,
+ max_t(u32, VNIC_FNIC_IO_THROTTLE_COUNT_MIN,
+ c->io_throttle_count));
+
+ c->link_down_timeout =
+ min_t(u32, VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX,
+ c->link_down_timeout);
+
+ c->port_down_timeout =
+ min_t(u32, VNIC_FNIC_PORT_DOWN_TIMEOUT_MAX,
+ c->port_down_timeout);
+
+ c->port_down_io_retries =
+ min_t(u32, VNIC_FNIC_PORT_DOWN_IO_RETRIES_MAX,
+ c->port_down_io_retries);
+
+ c->luns_per_tgt =
+ min_t(u32, VNIC_FNIC_LUNS_PER_TARGET_MAX,
+ max_t(u32, VNIC_FNIC_LUNS_PER_TARGET_MIN,
+ c->luns_per_tgt));
+
+ c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer);
+ c->intr_timer_type = c->intr_timer_type;
+
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x "
+ "wq/wq_copy/rq %d/%d/%d\n",
+ fnic->mac_addr[0], fnic->mac_addr[1], fnic->mac_addr[2],
+ fnic->mac_addr[3], fnic->mac_addr[4], fnic->mac_addr[5],
+ c->wq_enet_desc_count, c->wq_copy_desc_count,
+ c->rq_desc_count);
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "vNIC node wwn %llx port wwn %llx\n",
+ c->node_wwn, c->port_wwn);
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "vNIC ed_tov %d ra_tov %d\n",
+ c->ed_tov, c->ra_tov);
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "vNIC mtu %d intr timer %d\n",
+ c->maxdatafieldsize, c->intr_timer);
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "vNIC flags 0x%x luns per tgt %d\n",
+ c->flags, c->luns_per_tgt);
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "vNIC flogi_retries %d flogi timeout %d\n",
+ c->flogi_retries, c->flogi_timeout);
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "vNIC plogi retries %d plogi timeout %d\n",
+ c->plogi_retries, c->plogi_timeout);
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "vNIC io throttle count %d link dn timeout %d\n",
+ c->io_throttle_count, c->link_down_timeout);
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "vNIC port dn io retries %d port dn timeout %d\n",
+ c->port_down_io_retries, c->port_down_timeout);
+
+ return 0;
+}
+
+int fnic_set_nic_config(struct fnic *fnic, u8 rss_default_cpu,
+ u8 rss_hash_type,
+ u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable,
+ u8 tso_ipid_split_en, u8 ig_vlan_strip_en)
+{
+ u64 a0, a1;
+ u32 nic_cfg;
+ int wait = 1000;
+
+ vnic_set_nic_cfg(&nic_cfg, rss_default_cpu,
+ rss_hash_type, rss_hash_bits, rss_base_cpu,
+ rss_enable, tso_ipid_split_en, ig_vlan_strip_en);
+
+ a0 = nic_cfg;
+ a1 = 0;
+
+ return vnic_dev_cmd(fnic->vdev, CMD_NIC_CFG, &a0, &a1, wait);
+}
+
+void fnic_get_res_counts(struct fnic *fnic)
+{
+ fnic->wq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_WQ);
+ fnic->raw_wq_count = fnic->wq_count - 1;
+ fnic->wq_copy_count = fnic->wq_count - fnic->raw_wq_count;
+ fnic->rq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_RQ);
+ fnic->cq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_CQ);
+ fnic->intr_count = vnic_dev_get_res_count(fnic->vdev,
+ RES_TYPE_INTR_CTRL);
+}
+
+void fnic_free_vnic_resources(struct fnic *fnic)
+{
+ unsigned int i;
+
+ for (i = 0; i < fnic->raw_wq_count; i++)
+ vnic_wq_free(&fnic->wq[i]);
+
+ for (i = 0; i < fnic->wq_copy_count; i++)
+ vnic_wq_copy_free(&fnic->wq_copy[i]);
+
+ for (i = 0; i < fnic->rq_count; i++)
+ vnic_rq_free(&fnic->rq[i]);
+
+ for (i = 0; i < fnic->cq_count; i++)
+ vnic_cq_free(&fnic->cq[i]);
+
+ for (i = 0; i < fnic->intr_count; i++)
+ vnic_intr_free(&fnic->intr[i]);
+}
+
+int fnic_alloc_vnic_resources(struct fnic *fnic)
+{
+ enum vnic_dev_intr_mode intr_mode;
+ unsigned int mask_on_assertion;
+ unsigned int interrupt_offset;
+ unsigned int error_interrupt_enable;
+ unsigned int error_interrupt_offset;
+ unsigned int i, cq_index;
+ unsigned int wq_copy_cq_desc_count;
+ int err;
+
+ intr_mode = vnic_dev_get_intr_mode(fnic->vdev);
+
+ shost_printk(KERN_INFO, fnic->lport->host, "vNIC interrupt mode: %s\n",
+ intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" :
+ intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" :
+ intr_mode == VNIC_DEV_INTR_MODE_MSIX ?
+ "MSI-X" : "unknown");
+
+ shost_printk(KERN_INFO, fnic->lport->host, "vNIC resources avail: "
+ "wq %d cp_wq %d raw_wq %d rq %d cq %d intr %d\n",
+ fnic->wq_count, fnic->wq_copy_count, fnic->raw_wq_count,
+ fnic->rq_count, fnic->cq_count, fnic->intr_count);
+
+ /* Allocate Raw WQ used for FCS frames */
+ for (i = 0; i < fnic->raw_wq_count; i++) {
+ err = vnic_wq_alloc(fnic->vdev, &fnic->wq[i], i,
+ fnic->config.wq_enet_desc_count,
+ sizeof(struct wq_enet_desc));
+ if (err)
+ goto err_out_cleanup;
+ }
+
+ /* Allocate Copy WQs used for SCSI IOs */
+ for (i = 0; i < fnic->wq_copy_count; i++) {
+ err = vnic_wq_copy_alloc(fnic->vdev, &fnic->wq_copy[i],
+ (fnic->raw_wq_count + i),
+ fnic->config.wq_copy_desc_count,
+ sizeof(struct fcpio_host_req));
+ if (err)
+ goto err_out_cleanup;
+ }
+
+ /* RQ for receiving FCS frames */
+ for (i = 0; i < fnic->rq_count; i++) {
+ err = vnic_rq_alloc(fnic->vdev, &fnic->rq[i], i,
+ fnic->config.rq_desc_count,
+ sizeof(struct rq_enet_desc));
+ if (err)
+ goto err_out_cleanup;
+ }
+
+ /* CQ for each RQ */
+ for (i = 0; i < fnic->rq_count; i++) {
+ cq_index = i;
+ err = vnic_cq_alloc(fnic->vdev,
+ &fnic->cq[cq_index], cq_index,
+ fnic->config.rq_desc_count,
+ sizeof(struct cq_enet_rq_desc));
+ if (err)
+ goto err_out_cleanup;
+ }
+
+ /* CQ for each WQ */
+ for (i = 0; i < fnic->raw_wq_count; i++) {
+ cq_index = fnic->rq_count + i;
+ err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index], cq_index,
+ fnic->config.wq_enet_desc_count,
+ sizeof(struct cq_enet_wq_desc));
+ if (err)
+ goto err_out_cleanup;
+ }
+
+ /* CQ for each COPY WQ */
+ wq_copy_cq_desc_count = (fnic->config.wq_copy_desc_count * 3);
+ for (i = 0; i < fnic->wq_copy_count; i++) {
+ cq_index = fnic->raw_wq_count + fnic->rq_count + i;
+ err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index],
+ cq_index,
+ wq_copy_cq_desc_count,
+ sizeof(struct fcpio_fw_req));
+ if (err)
+ goto err_out_cleanup;
+ }
+
+ for (i = 0; i < fnic->intr_count; i++) {
+ err = vnic_intr_alloc(fnic->vdev, &fnic->intr[i], i);
+ if (err)
+ goto err_out_cleanup;
+ }
+
+ fnic->legacy_pba = vnic_dev_get_res(fnic->vdev,
+ RES_TYPE_INTR_PBA_LEGACY, 0);
+
+ if (!fnic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Failed to hook legacy pba resource\n");
+ err = -ENODEV;
+ goto err_out_cleanup;
+ }
+
+ /*
+ * Init RQ/WQ resources.
+ *
+ * RQ[0 to n-1] point to CQ[0 to n-1]
+ * WQ[0 to m-1] point to CQ[n to n+m-1]
+ * WQ_COPY[0 to k-1] points to CQ[n+m to n+m+k-1]
+ *
+ * Note for copy wq we always initialize with cq_index = 0
+ *
+ * Error interrupt is not enabled for MSI.
+ */
+
+ switch (intr_mode) {
+ case VNIC_DEV_INTR_MODE_INTX:
+ case VNIC_DEV_INTR_MODE_MSIX:
+ error_interrupt_enable = 1;
+ error_interrupt_offset = fnic->err_intr_offset;
+ break;
+ default:
+ error_interrupt_enable = 0;
+ error_interrupt_offset = 0;
+ break;
+ }
+
+ for (i = 0; i < fnic->rq_count; i++) {
+ cq_index = i;
+ vnic_rq_init(&fnic->rq[i],
+ cq_index,
+ error_interrupt_enable,
+ error_interrupt_offset);
+ }
+
+ for (i = 0; i < fnic->raw_wq_count; i++) {
+ cq_index = i + fnic->rq_count;
+ vnic_wq_init(&fnic->wq[i],
+ cq_index,
+ error_interrupt_enable,
+ error_interrupt_offset);
+ }
+
+ for (i = 0; i < fnic->wq_copy_count; i++) {
+ vnic_wq_copy_init(&fnic->wq_copy[i],
+ 0 /* cq_index 0 - always */,
+ error_interrupt_enable,
+ error_interrupt_offset);
+ }
+
+ for (i = 0; i < fnic->cq_count; i++) {
+
+ switch (intr_mode) {
+ case VNIC_DEV_INTR_MODE_MSIX:
+ interrupt_offset = i;
+ break;
+ default:
+ interrupt_offset = 0;
+ break;
+ }
+
+ vnic_cq_init(&fnic->cq[i],
+ 0 /* flow_control_enable */,
+ 1 /* color_enable */,
+ 0 /* cq_head */,
+ 0 /* cq_tail */,
+ 1 /* cq_tail_color */,
+ 1 /* interrupt_enable */,
+ 1 /* cq_entry_enable */,
+ 0 /* cq_message_enable */,
+ interrupt_offset,
+ 0 /* cq_message_addr */);
+ }
+
+ /*
+ * Init INTR resources
+ *
+ * mask_on_assertion is not used for INTx due to the level-
+ * triggered nature of INTx
+ */
+
+ switch (intr_mode) {
+ case VNIC_DEV_INTR_MODE_MSI:
+ case VNIC_DEV_INTR_MODE_MSIX:
+ mask_on_assertion = 1;
+ break;
+ default:
+ mask_on_assertion = 0;
+ break;
+ }
+
+ for (i = 0; i < fnic->intr_count; i++) {
+ vnic_intr_init(&fnic->intr[i],
+ fnic->config.intr_timer,
+ fnic->config.intr_timer_type,
+ mask_on_assertion);
+ }
+
+ /* init the stats memory by making the first call here */
+ err = vnic_dev_stats_dump(fnic->vdev, &fnic->stats);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "vnic_dev_stats_dump failed - x%x\n", err);
+ goto err_out_cleanup;
+ }
+
+ /* Clear LIF stats */
+ vnic_dev_stats_clear(fnic->vdev);
+
+ return 0;
+
+err_out_cleanup:
+ fnic_free_vnic_resources(fnic);
+
+ return err;
+}
diff --git a/drivers/scsi/fnic/fnic_res.h b/drivers/scsi/fnic/fnic_res.h
new file mode 100644
index 00000000000..b6f31026253
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_res.h
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _FNIC_RES_H_
+#define _FNIC_RES_H_
+
+#include "wq_enet_desc.h"
+#include "rq_enet_desc.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+#include "fnic_io.h"
+#include "fcpio.h"
+#include "vnic_wq_copy.h"
+#include "vnic_cq_copy.h"
+
+static inline void fnic_queue_wq_desc(struct vnic_wq *wq,
+ void *os_buf, dma_addr_t dma_addr,
+ unsigned int len, unsigned int fc_eof,
+ int vlan_tag_insert,
+ unsigned int vlan_tag,
+ int cq_entry, int sop, int eop)
+{
+ struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
+
+ wq_enet_desc_enc(desc,
+ (u64)dma_addr | VNIC_PADDR_TARGET,
+ (u16)len,
+ 0, /* mss_or_csum_offset */
+ (u16)fc_eof,
+ 0, /* offload_mode */
+ (u8)eop, (u8)cq_entry,
+ 1, /* fcoe_encap */
+ (u8)vlan_tag_insert,
+ (u16)vlan_tag,
+ 0 /* loopback */);
+
+ vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop);
+}
+
+static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq,
+ u32 req_id,
+ u32 lunmap_id, u8 spl_flags,
+ u32 sgl_cnt, u32 sense_len,
+ u64 sgl_addr, u64 sns_addr,
+ u8 crn, u8 pri_ta,
+ u8 flags, u8 *scsi_cdb,
+ u32 data_len, u8 *lun,
+ u32 d_id, u16 mss,
+ u32 ratov, u32 edtov)
+{
+ struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
+
+ desc->hdr.type = FCPIO_ICMND_16; /* enum fcpio_type */
+ desc->hdr.status = 0; /* header status entry */
+ desc->hdr._resvd = 0; /* reserved */
+ desc->hdr.tag.u.req_id = req_id; /* id for this request */
+
+ desc->u.icmnd_16.lunmap_id = lunmap_id; /* index into lunmap table */
+ desc->u.icmnd_16.special_req_flags = spl_flags; /* exch req flags */
+ desc->u.icmnd_16._resvd0[0] = 0; /* reserved */
+ desc->u.icmnd_16._resvd0[1] = 0; /* reserved */
+ desc->u.icmnd_16._resvd0[2] = 0; /* reserved */
+ desc->u.icmnd_16.sgl_cnt = sgl_cnt; /* scatter-gather list count */
+ desc->u.icmnd_16.sense_len = sense_len; /* sense buffer length */
+ desc->u.icmnd_16.sgl_addr = sgl_addr; /* scatter-gather list addr */
+ desc->u.icmnd_16.sense_addr = sns_addr; /* sense buffer address */
+ desc->u.icmnd_16.crn = crn; /* SCSI Command Reference No.*/
+ desc->u.icmnd_16.pri_ta = pri_ta; /* SCSI Pri & Task attribute */
+ desc->u.icmnd_16._resvd1 = 0; /* reserved: should be 0 */
+ desc->u.icmnd_16.flags = flags; /* command flags */
+ memcpy(desc->u.icmnd_16.scsi_cdb, scsi_cdb, CDB_16); /* SCSI CDB */
+ desc->u.icmnd_16.data_len = data_len; /* length of data expected */
+ memcpy(desc->u.icmnd_16.lun, lun, LUN_ADDRESS); /* LUN address */
+ desc->u.icmnd_16._resvd2 = 0; /* reserved */
+ hton24(desc->u.icmnd_16.d_id, d_id); /* FC vNIC only: Target D_ID */
+ desc->u.icmnd_16.mss = mss; /* FC vNIC only: max burst */
+ desc->u.icmnd_16.r_a_tov = ratov; /*FC vNIC only: Res. Alloc Timeout */
+ desc->u.icmnd_16.e_d_tov = edtov; /*FC vNIC only: Err Detect Timeout */
+
+ vnic_wq_copy_post(wq);
+}
+
+static inline void fnic_queue_wq_copy_desc_itmf(struct vnic_wq_copy *wq,
+ u32 req_id, u32 lunmap_id,
+ u32 tm_req, u32 tm_id, u8 *lun,
+ u32 d_id, u32 r_a_tov,
+ u32 e_d_tov)
+{
+ struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
+
+ desc->hdr.type = FCPIO_ITMF; /* enum fcpio_type */
+ desc->hdr.status = 0; /* header status entry */
+ desc->hdr._resvd = 0; /* reserved */
+ desc->hdr.tag.u.req_id = req_id; /* id for this request */
+
+ desc->u.itmf.lunmap_id = lunmap_id; /* index into lunmap table */
+ desc->u.itmf.tm_req = tm_req; /* SCSI Task Management request */
+ desc->u.itmf.t_tag = tm_id; /* tag of fcpio to be aborted */
+ desc->u.itmf._resvd = 0;
+ memcpy(desc->u.itmf.lun, lun, LUN_ADDRESS); /* LUN address */
+ desc->u.itmf._resvd1 = 0;
+ hton24(desc->u.itmf.d_id, d_id); /* FC vNIC only: Target D_ID */
+ desc->u.itmf.r_a_tov = r_a_tov; /* FC vNIC only: R_A_TOV in msec */
+ desc->u.itmf.e_d_tov = e_d_tov; /* FC vNIC only: E_D_TOV in msec */
+
+ vnic_wq_copy_post(wq);
+}
+
+static inline void fnic_queue_wq_copy_desc_flogi_reg(struct vnic_wq_copy *wq,
+ u32 req_id, u8 format,
+ u32 s_id, u8 *gw_mac)
+{
+ struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
+
+ desc->hdr.type = FCPIO_FLOGI_REG; /* enum fcpio_type */
+ desc->hdr.status = 0; /* header status entry */
+ desc->hdr._resvd = 0; /* reserved */
+ desc->hdr.tag.u.req_id = req_id; /* id for this request */
+
+ desc->u.flogi_reg.format = format;
+ hton24(desc->u.flogi_reg.s_id, s_id);
+ memcpy(desc->u.flogi_reg.gateway_mac, gw_mac, ETH_ALEN);
+
+ vnic_wq_copy_post(wq);
+}
+
+static inline void fnic_queue_wq_copy_desc_fw_reset(struct vnic_wq_copy *wq,
+ u32 req_id)
+{
+ struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
+
+ desc->hdr.type = FCPIO_RESET; /* enum fcpio_type */
+ desc->hdr.status = 0; /* header status entry */
+ desc->hdr._resvd = 0; /* reserved */
+ desc->hdr.tag.u.req_id = req_id; /* id for this request */
+
+ vnic_wq_copy_post(wq);
+}
+
+static inline void fnic_queue_wq_copy_desc_lunmap(struct vnic_wq_copy *wq,
+ u32 req_id, u64 lunmap_addr,
+ u32 lunmap_len)
+{
+ struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
+
+ desc->hdr.type = FCPIO_LUNMAP_REQ; /* enum fcpio_type */
+ desc->hdr.status = 0; /* header status entry */
+ desc->hdr._resvd = 0; /* reserved */
+ desc->hdr.tag.u.req_id = req_id; /* id for this request */
+
+ desc->u.lunmap_req.addr = lunmap_addr; /* address of the buffer */
+ desc->u.lunmap_req.len = lunmap_len; /* len of the buffer */
+
+ vnic_wq_copy_post(wq);
+}
+
+static inline void fnic_queue_rq_desc(struct vnic_rq *rq,
+ void *os_buf, dma_addr_t dma_addr,
+ u16 len)
+{
+ struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
+
+ rq_enet_desc_enc(desc,
+ (u64)dma_addr | VNIC_PADDR_TARGET,
+ RQ_ENET_TYPE_ONLY_SOP,
+ (u16)len);
+
+ vnic_rq_post(rq, os_buf, 0, dma_addr, len);
+}
+
+
+struct fnic;
+
+int fnic_get_vnic_config(struct fnic *);
+int fnic_alloc_vnic_resources(struct fnic *);
+void fnic_free_vnic_resources(struct fnic *);
+void fnic_get_res_counts(struct fnic *);
+int fnic_set_nic_config(struct fnic *fnic, u8 rss_default_cpu,
+ u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu,
+ u8 rss_enable, u8 tso_ipid_split_en,
+ u8 ig_vlan_strip_en);
+
+#endif /* _FNIC_RES_H_ */
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
new file mode 100644
index 00000000000..eabf3650285
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -0,0 +1,1850 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/mempool.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+#include <linux/scatterlist.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/delay.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/fc/fc_els.h>
+#include <scsi/fc/fc_fcoe.h>
+#include <scsi/libfc.h>
+#include <scsi/fc_frame.h>
+#include "fnic_io.h"
+#include "fnic.h"
+
+const char *fnic_state_str[] = {
+ [FNIC_IN_FC_MODE] = "FNIC_IN_FC_MODE",
+ [FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE",
+ [FNIC_IN_ETH_MODE] = "FNIC_IN_ETH_MODE",
+ [FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE",
+};
+
+static const char *fnic_ioreq_state_str[] = {
+ [FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
+ [FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
+ [FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
+ [FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE",
+};
+
+static const char *fcpio_status_str[] = {
+ [FCPIO_SUCCESS] = "FCPIO_SUCCESS", /*0x0*/
+ [FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER",
+ [FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE",
+ [FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]",
+ [FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED",
+ [FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND",
+ [FCPIO_ABORTED] = "FCPIO_ABORTED", /*0x41*/
+ [FCPIO_TIMEOUT] = "FCPIO_TIMEOUT",
+ [FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID",
+ [FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID",
+ [FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH",
+ [FCPIO_FW_ERR] = "FCPIO_FW_ERR",
+ [FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED",
+ [FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED",
+ [FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN",
+ [FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED",
+ [FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL",
+ [FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED",
+ [FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND",
+};
+
+const char *fnic_state_to_str(unsigned int state)
+{
+ if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state])
+ return "unknown";
+
+ return fnic_state_str[state];
+}
+
+static const char *fnic_ioreq_state_to_str(unsigned int state)
+{
+ if (state >= ARRAY_SIZE(fnic_ioreq_state_str) ||
+ !fnic_ioreq_state_str[state])
+ return "unknown";
+
+ return fnic_ioreq_state_str[state];
+}
+
+static const char *fnic_fcpio_status_to_str(unsigned int status)
+{
+ if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status])
+ return "unknown";
+
+ return fcpio_status_str[status];
+}
+
+static void fnic_cleanup_io(struct fnic *fnic, int exclude_id);
+
+static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
+ struct scsi_cmnd *sc)
+{
+ u32 hash = sc->request->tag & (FNIC_IO_LOCKS - 1);
+
+ return &fnic->io_req_lock[hash];
+}
+
+/*
+ * Unmap the data buffer and sense buffer for an io_req,
+ * also unmap and free the device-private scatter/gather list.
+ */
+static void fnic_release_ioreq_buf(struct fnic *fnic,
+ struct fnic_io_req *io_req,
+ struct scsi_cmnd *sc)
+{
+ if (io_req->sgl_list_pa)
+ pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
+ sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
+ PCI_DMA_TODEVICE);
+ scsi_dma_unmap(sc);
+
+ if (io_req->sgl_cnt)
+ mempool_free(io_req->sgl_list_alloc,
+ fnic->io_sgl_pool[io_req->sgl_type]);
+ if (io_req->sense_buf_pa)
+ pci_unmap_single(fnic->pdev, io_req->sense_buf_pa,
+ SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
+}
+
+/* Free up Copy Wq descriptors. Called with copy_wq lock held */
+static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
+{
+ /* if no Ack received from firmware, then nothing to clean */
+ if (!fnic->fw_ack_recd[0])
+ return 1;
+
+ /*
+ * Update desc_available count based on number of freed descriptors
+ * Account for wraparound
+ */
+ if (wq->to_clean_index <= fnic->fw_ack_index[0])
+ wq->ring.desc_avail += (fnic->fw_ack_index[0]
+ - wq->to_clean_index + 1);
+ else
+ wq->ring.desc_avail += (wq->ring.desc_count
+ - wq->to_clean_index
+ + fnic->fw_ack_index[0] + 1);
+
+ /*
+ * just bump clean index to ack_index+1 accounting for wraparound
+ * this will essentially free up all descriptors between
+ * to_clean_index and fw_ack_index, both inclusive
+ */
+ wq->to_clean_index =
+ (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count;
+
+ /* we have processed the acks received so far */
+ fnic->fw_ack_recd[0] = 0;
+ return 0;
+}
+
+
+/*
+ * fnic_fw_reset_handler
+ * Routine to send reset msg to fw
+ */
+int fnic_fw_reset_handler(struct fnic *fnic)
+{
+ struct vnic_wq_copy *wq = &fnic->wq_copy[0];
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
+
+ if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
+ free_wq_copy_descs(fnic, wq);
+
+ if (!vnic_wq_copy_desc_avail(wq))
+ ret = -EAGAIN;
+ else
+ fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
+
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+
+ if (!ret)
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Issued fw reset\n");
+ else
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Failed to issue fw reset\n");
+ return ret;
+}
+
+
+/*
+ * fnic_flogi_reg_handler
+ * Routine to send flogi register msg to fw
+ */
+int fnic_flogi_reg_handler(struct fnic *fnic)
+{
+ struct vnic_wq_copy *wq = &fnic->wq_copy[0];
+ u8 gw_mac[ETH_ALEN];
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
+
+ if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
+ free_wq_copy_descs(fnic, wq);
+
+ if (!vnic_wq_copy_desc_avail(wq)) {
+ ret = -EAGAIN;
+ goto flogi_reg_ioreq_end;
+ }
+
+ if (fnic->fcoui_mode)
+ memset(gw_mac, 0xff, ETH_ALEN);
+ else
+ memcpy(gw_mac, fnic->dest_addr, ETH_ALEN);
+
+ fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
+ FCPIO_FLOGI_REG_GW_DEST,
+ fnic->s_id,
+ gw_mac);
+
+flogi_reg_ioreq_end:
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+
+ if (!ret)
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "flog reg issued\n");
+
+ return ret;
+}
+
+/*
+ * fnic_queue_wq_copy_desc
+ * Routine to enqueue a wq copy desc
+ */
+static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
+ struct vnic_wq_copy *wq,
+ struct fnic_io_req *io_req,
+ struct scsi_cmnd *sc,
+ u32 sg_count)
+{
+ struct scatterlist *sg;
+ struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct host_sg_desc *desc;
+ u8 pri_tag = 0;
+ unsigned int i;
+ unsigned long intr_flags;
+ int flags;
+ u8 exch_flags;
+ struct scsi_lun fc_lun;
+ char msg[2];
+
+ if (sg_count) {
+ BUG_ON(sg_count < 0);
+ BUG_ON(sg_count > FNIC_MAX_SG_DESC_CNT);
+
+ /* For each SGE, create a device desc entry */
+ desc = io_req->sgl_list;
+ for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
+ desc->addr = cpu_to_le64(sg_dma_address(sg));
+ desc->len = cpu_to_le32(sg_dma_len(sg));
+ desc->_resvd = 0;
+ desc++;
+ }
+
+ io_req->sgl_list_pa = pci_map_single
+ (fnic->pdev,
+ io_req->sgl_list,
+ sizeof(io_req->sgl_list[0]) * sg_count,
+ PCI_DMA_TODEVICE);
+ }
+
+ io_req->sense_buf_pa = pci_map_single(fnic->pdev,
+ sc->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE,
+ PCI_DMA_FROMDEVICE);
+
+ int_to_scsilun(sc->device->lun, &fc_lun);
+
+ pri_tag = FCPIO_ICMND_PTA_SIMPLE;
+ msg[0] = MSG_SIMPLE_TAG;
+ scsi_populate_tag_msg(sc, msg);
+ if (msg[0] == MSG_ORDERED_TAG)
+ pri_tag = FCPIO_ICMND_PTA_ORDERED;
+
+ /* Enqueue the descriptor in the Copy WQ */
+ spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
+
+ if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
+ free_wq_copy_descs(fnic, wq);
+
+ if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ flags = 0;
+ if (sc->sc_data_direction == DMA_FROM_DEVICE)
+ flags = FCPIO_ICMND_RDDATA;
+ else if (sc->sc_data_direction == DMA_TO_DEVICE)
+ flags = FCPIO_ICMND_WRDATA;
+
+ exch_flags = 0;
+ if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) &&
+ (rp->flags & FC_RP_FLAGS_RETRY))
+ exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
+
+ fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag,
+ 0, exch_flags, io_req->sgl_cnt,
+ SCSI_SENSE_BUFFERSIZE,
+ io_req->sgl_list_pa,
+ io_req->sense_buf_pa,
+ 0, /* scsi cmd ref, always 0 */
+ pri_tag, /* scsi pri and tag */
+ flags, /* command flags */
+ sc->cmnd, scsi_bufflen(sc),
+ fc_lun.scsi_lun, io_req->port_id,
+ rport->maxframe_size, rp->r_a_tov,
+ rp->e_d_tov);
+
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
+ return 0;
+}
+
+/*
+ * fnic_queuecommand
+ * Routine to send a scsi cdb
+ * Called with host_lock held and interrupts disabled.
+ */
+int fnic_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+{
+ struct fc_lport *lp;
+ struct fc_rport *rport;
+ struct fnic_io_req *io_req;
+ struct fnic *fnic;
+ struct vnic_wq_copy *wq;
+ int ret;
+ u32 sg_count;
+ unsigned long flags;
+ unsigned long ptr;
+
+ rport = starget_to_rport(scsi_target(sc->device));
+ ret = fc_remote_port_chkready(rport);
+ if (ret) {
+ sc->result = ret;
+ done(sc);
+ return 0;
+ }
+
+ lp = shost_priv(sc->device->host);
+ if (lp->state != LPORT_ST_READY || !(lp->link_up))
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ /*
+ * Release host lock, use driver resource specific locks from here.
+ * Don't re-enable interrupts in case they were disabled prior to the
+ * caller disabling them.
+ */
+ spin_unlock(lp->host->host_lock);
+
+ /* Get a new io_req for this SCSI IO */
+ fnic = lport_priv(lp);
+
+ io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
+ if (!io_req) {
+ ret = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+ memset(io_req, 0, sizeof(*io_req));
+
+ /* Map the data buffer */
+ sg_count = scsi_dma_map(sc);
+ if (sg_count < 0) {
+ mempool_free(io_req, fnic->io_req_pool);
+ goto out;
+ }
+
+ /* Determine the type of scatter/gather list we need */
+ io_req->sgl_cnt = sg_count;
+ io_req->sgl_type = FNIC_SGL_CACHE_DFLT;
+ if (sg_count > FNIC_DFLT_SG_DESC_CNT)
+ io_req->sgl_type = FNIC_SGL_CACHE_MAX;
+
+ if (sg_count) {
+ io_req->sgl_list =
+ mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
+ GFP_ATOMIC | GFP_DMA);
+ if (!io_req->sgl_list) {
+ ret = SCSI_MLQUEUE_HOST_BUSY;
+ scsi_dma_unmap(sc);
+ mempool_free(io_req, fnic->io_req_pool);
+ goto out;
+ }
+
+ /* Cache sgl list allocated address before alignment */
+ io_req->sgl_list_alloc = io_req->sgl_list;
+ ptr = (unsigned long) io_req->sgl_list;
+ if (ptr % FNIC_SG_DESC_ALIGN) {
+ io_req->sgl_list = (struct host_sg_desc *)
+ (((unsigned long) ptr
+ + FNIC_SG_DESC_ALIGN - 1)
+ & ~(FNIC_SG_DESC_ALIGN - 1));
+ }
+ }
+
+ /* initialize rest of io_req */
+ io_req->port_id = rport->port_id;
+ CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
+ CMD_SP(sc) = (char *)io_req;
+ sc->scsi_done = done;
+
+ /* create copy wq desc and enqueue it */
+ wq = &fnic->wq_copy[0];
+ ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count);
+ if (ret) {
+ /*
+ * In case another thread cancelled the request,
+ * refetch the pointer under the lock.
+ */
+ spinlock_t *io_lock = fnic_io_lock_hash(fnic, sc);
+
+ spin_lock_irqsave(io_lock, flags);
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ CMD_SP(sc) = NULL;
+ CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
+ spin_unlock_irqrestore(io_lock, flags);
+ if (io_req) {
+ fnic_release_ioreq_buf(fnic, io_req, sc);
+ mempool_free(io_req, fnic->io_req_pool);
+ }
+ }
+out:
+ /* acquire host lock before returning to SCSI */
+ spin_lock(lp->host->host_lock);
+ return ret;
+}
+
+/*
+ * fnic_fcpio_fw_reset_cmpl_handler
+ * Routine to handle fw reset completion
+ */
+static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
+ struct fcpio_fw_req *desc)
+{
+ u8 type;
+ u8 hdr_status;
+ struct fcpio_tag tag;
+ int ret = 0;
+ struct fc_frame *flogi;
+ unsigned long flags;
+
+ fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
+
+ /* Clean up all outstanding io requests */
+ fnic_cleanup_io(fnic, SCSI_NO_TAG);
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+
+ flogi = fnic->flogi;
+ fnic->flogi = NULL;
+
+ /* fnic should be in FC_TRANS_ETH_MODE */
+ if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
+ /* Check status of reset completion */
+ if (!hdr_status) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "reset cmpl success\n");
+ /* Ready to send flogi out */
+ fnic->state = FNIC_IN_ETH_MODE;
+ } else {
+ FNIC_SCSI_DBG(KERN_DEBUG,
+ fnic->lport->host,
+ "fnic fw_reset : failed %s\n",
+ fnic_fcpio_status_to_str(hdr_status));
+
+ /*
+ * Unable to change to eth mode, cannot send out flogi
+ * Change state to fc mode, so that subsequent Flogi
+ * requests from libFC will cause more attempts to
+ * reset the firmware. Free the cached flogi
+ */
+ fnic->state = FNIC_IN_FC_MODE;
+ ret = -1;
+ }
+ } else {
+ FNIC_SCSI_DBG(KERN_DEBUG,
+ fnic->lport->host,
+ "Unexpected state %s while processing"
+ " reset cmpl\n", fnic_state_to_str(fnic->state));
+ ret = -1;
+ }
+
+ /* Thread removing device blocks till firmware reset is complete */
+ if (fnic->remove_wait)
+ complete(fnic->remove_wait);
+
+ /*
+ * If fnic is being removed, or fw reset failed
+ * free the flogi frame. Else, send it out
+ */
+ if (fnic->remove_wait || ret) {
+ fnic->flogi_oxid = FC_XID_UNKNOWN;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ if (flogi)
+ dev_kfree_skb_irq(fp_skb(flogi));
+ goto reset_cmpl_handler_end;
+ }
+
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ if (flogi)
+ ret = fnic_send_frame(fnic, flogi);
+
+ reset_cmpl_handler_end:
+ return ret;
+}
+
+/*
+ * fnic_fcpio_flogi_reg_cmpl_handler
+ * Routine to handle flogi register completion
+ */
+static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
+ struct fcpio_fw_req *desc)
+{
+ u8 type;
+ u8 hdr_status;
+ struct fcpio_tag tag;
+ int ret = 0;
+ struct fc_frame *flogi_resp = NULL;
+ unsigned long flags;
+ struct sk_buff *skb;
+
+ fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
+
+ /* Update fnic state based on status of flogi reg completion */
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+
+ flogi_resp = fnic->flogi_resp;
+ fnic->flogi_resp = NULL;
+
+ if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) {
+
+ /* Check flogi registration completion status */
+ if (!hdr_status) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "flog reg succeeded\n");
+ fnic->state = FNIC_IN_FC_MODE;
+ } else {
+ FNIC_SCSI_DBG(KERN_DEBUG,
+ fnic->lport->host,
+ "fnic flogi reg :failed %s\n",
+ fnic_fcpio_status_to_str(hdr_status));
+ fnic->state = FNIC_IN_ETH_MODE;
+ ret = -1;
+ }
+ } else {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Unexpected fnic state %s while"
+ " processing flogi reg completion\n",
+ fnic_state_to_str(fnic->state));
+ ret = -1;
+ }
+
+ /* Successful flogi reg cmpl, pass frame to LibFC */
+ if (!ret && flogi_resp) {
+ if (fnic->stop_rx_link_events) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ goto reg_cmpl_handler_end;
+ }
+ skb = (struct sk_buff *)flogi_resp;
+ /* Use fr_flags to indicate whether flogi resp or not */
+ fr_flags(flogi_resp) = 1;
+ fr_dev(flogi_resp) = fnic->lport;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ skb_queue_tail(&fnic->frame_queue, skb);
+ queue_work(fnic_event_queue, &fnic->frame_work);
+
+ } else {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ if (flogi_resp)
+ dev_kfree_skb_irq(fp_skb(flogi_resp));
+ }
+
+reg_cmpl_handler_end:
+ return ret;
+}
+
+static inline int is_ack_index_in_range(struct vnic_wq_copy *wq,
+ u16 request_out)
+{
+ if (wq->to_clean_index <= wq->to_use_index) {
+ /* out of range, stale request_out index */
+ if (request_out < wq->to_clean_index ||
+ request_out >= wq->to_use_index)
+ return 0;
+ } else {
+ /* out of range, stale request_out index */
+ if (request_out < wq->to_clean_index &&
+ request_out >= wq->to_use_index)
+ return 0;
+ }
+ /* request_out index is in range */
+ return 1;
+}
+
+
+/*
+ * Mark that ack received and store the Ack index. If there are multiple
+ * acks received before Tx thread cleans it up, the latest value will be
+ * used which is correct behavior. This state should be in the copy Wq
+ * instead of in the fnic
+ */
+static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
+ unsigned int cq_index,
+ struct fcpio_fw_req *desc)
+{
+ struct vnic_wq_copy *wq;
+ u16 request_out = desc->u.ack.request_out;
+ unsigned long flags;
+
+ /* mark the ack state */
+ wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
+ spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
+
+ if (is_ack_index_in_range(wq, request_out)) {
+ fnic->fw_ack_index[0] = request_out;
+ fnic->fw_ack_recd[0] = 1;
+ }
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+}
+
+/*
+ * fnic_fcpio_icmnd_cmpl_handler
+ * Routine to handle icmnd completions
+ */
+static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
+ struct fcpio_fw_req *desc)
+{
+ u8 type;
+ u8 hdr_status;
+ struct fcpio_tag tag;
+ u32 id;
+ u64 xfer_len = 0;
+ struct fcpio_icmnd_cmpl *icmnd_cmpl;
+ struct fnic_io_req *io_req;
+ struct scsi_cmnd *sc;
+ unsigned long flags;
+ spinlock_t *io_lock;
+
+ /* Decode the cmpl description to get the io_req id */
+ fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
+ fcpio_tag_id_dec(&tag, &id);
+
+ if (id >= FNIC_MAX_IO_REQ)
+ return;
+
+ sc = scsi_host_find_tag(fnic->lport->host, id);
+ WARN_ON_ONCE(!sc);
+ if (!sc)
+ return;
+
+ io_lock = fnic_io_lock_hash(fnic, sc);
+ spin_lock_irqsave(io_lock, flags);
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ WARN_ON_ONCE(!io_req);
+ if (!io_req) {
+ spin_unlock_irqrestore(io_lock, flags);
+ return;
+ }
+
+ /* firmware completed the io */
+ io_req->io_completed = 1;
+
+ /*
+ * if SCSI-ML has already issued abort on this command,
+ * ignore completion of the IO. The abts path will clean it up
+ */
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ spin_unlock_irqrestore(io_lock, flags);
+ return;
+ }
+
+ /* Mark the IO as complete */
+ CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
+
+ icmnd_cmpl = &desc->u.icmnd_cmpl;
+
+ switch (hdr_status) {
+ case FCPIO_SUCCESS:
+ sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status;
+ xfer_len = scsi_bufflen(sc);
+ scsi_set_resid(sc, icmnd_cmpl->residual);
+
+ if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
+ xfer_len -= icmnd_cmpl->residual;
+
+ /*
+ * If queue_full, then try to reduce queue depth for all
+ * LUNS on the target. Todo: this should be accompanied
+ * by a periodic queue_depth rampup based on successful
+ * IO completion.
+ */
+ if (icmnd_cmpl->scsi_status == QUEUE_FULL) {
+ struct scsi_device *t_sdev;
+ int qd = 0;
+
+ shost_for_each_device(t_sdev, sc->device->host) {
+ if (t_sdev->id != sc->device->id)
+ continue;
+
+ if (t_sdev->queue_depth > 1) {
+ qd = scsi_track_queue_full
+ (t_sdev,
+ t_sdev->queue_depth - 1);
+ if (qd == -1)
+ qd = t_sdev->host->cmd_per_lun;
+ shost_printk(KERN_INFO,
+ fnic->lport->host,
+ "scsi[%d:%d:%d:%d"
+ "] queue full detected,"
+ "new depth = %d\n",
+ t_sdev->host->host_no,
+ t_sdev->channel,
+ t_sdev->id, t_sdev->lun,
+ t_sdev->queue_depth);
+ }
+ }
+ }
+ break;
+
+ case FCPIO_TIMEOUT: /* request was timed out */
+ sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
+ break;
+
+ case FCPIO_ABORTED: /* request was aborted */
+ sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
+ break;
+
+ case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
+ scsi_set_resid(sc, icmnd_cmpl->residual);
+ sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
+ break;
+
+ case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */
+ sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
+ break;
+ case FCPIO_INVALID_HEADER: /* header contains invalid data */
+ case FCPIO_INVALID_PARAM: /* some parameter in request invalid */
+ case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
+ case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */
+ case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */
+ case FCPIO_MSS_INVALID: /* request was aborted due to mss error */
+ case FCPIO_FW_ERR: /* request was terminated due fw error */
+ default:
+ shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
+ fnic_fcpio_status_to_str(hdr_status));
+ sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
+ break;
+ }
+
+ /* Break link with the SCSI command */
+ CMD_SP(sc) = NULL;
+
+ spin_unlock_irqrestore(io_lock, flags);
+
+ fnic_release_ioreq_buf(fnic, io_req, sc);
+
+ mempool_free(io_req, fnic->io_req_pool);
+
+ if (sc->sc_data_direction == DMA_FROM_DEVICE) {
+ fnic->lport->host_stats.fcp_input_requests++;
+ fnic->fcp_input_bytes += xfer_len;
+ } else if (sc->sc_data_direction == DMA_TO_DEVICE) {
+ fnic->lport->host_stats.fcp_output_requests++;
+ fnic->fcp_output_bytes += xfer_len;
+ } else
+ fnic->lport->host_stats.fcp_control_requests++;
+
+ /* Call SCSI completion function to complete the IO */
+ if (sc->scsi_done)
+ sc->scsi_done(sc);
+
+}
+
+/* fnic_fcpio_itmf_cmpl_handler
+ * Routine to handle itmf completions
+ */
+static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
+ struct fcpio_fw_req *desc)
+{
+ u8 type;
+ u8 hdr_status;
+ struct fcpio_tag tag;
+ u32 id;
+ struct scsi_cmnd *sc;
+ struct fnic_io_req *io_req;
+ unsigned long flags;
+ spinlock_t *io_lock;
+
+ fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
+ fcpio_tag_id_dec(&tag, &id);
+
+ if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ)
+ return;
+
+ sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
+ WARN_ON_ONCE(!sc);
+ if (!sc)
+ return;
+
+ io_lock = fnic_io_lock_hash(fnic, sc);
+ spin_lock_irqsave(io_lock, flags);
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ WARN_ON_ONCE(!io_req);
+ if (!io_req) {
+ spin_unlock_irqrestore(io_lock, flags);
+ return;
+ }
+
+ if (id & FNIC_TAG_ABORT) {
+ /* Completion of abort cmd */
+ if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
+ /* This is a late completion. Ignore it */
+ spin_unlock_irqrestore(io_lock, flags);
+ return;
+ }
+ CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
+ CMD_ABTS_STATUS(sc) = hdr_status;
+
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "abts cmpl recd. id %d status %s\n",
+ (int)(id & FNIC_TAG_MASK),
+ fnic_fcpio_status_to_str(hdr_status));
+
+ /*
+ * If scsi_eh thread is blocked waiting for abts to complete,
+ * signal completion to it. IO will be cleaned in the thread
+ * else clean it in this context
+ */
+ if (io_req->abts_done) {
+ complete(io_req->abts_done);
+ spin_unlock_irqrestore(io_lock, flags);
+ } else {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "abts cmpl, completing IO\n");
+ CMD_SP(sc) = NULL;
+ sc->result = (DID_ERROR << 16);
+
+ spin_unlock_irqrestore(io_lock, flags);
+
+ fnic_release_ioreq_buf(fnic, io_req, sc);
+ mempool_free(io_req, fnic->io_req_pool);
+ if (sc->scsi_done)
+ sc->scsi_done(sc);
+ }
+
+ } else if (id & FNIC_TAG_DEV_RST) {
+ /* Completion of device reset */
+ CMD_LR_STATUS(sc) = hdr_status;
+ CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "dev reset cmpl recd. id %d status %s\n",
+ (int)(id & FNIC_TAG_MASK),
+ fnic_fcpio_status_to_str(hdr_status));
+ if (io_req->dr_done)
+ complete(io_req->dr_done);
+ spin_unlock_irqrestore(io_lock, flags);
+
+ } else {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Unexpected itmf io state %s tag %x\n",
+ fnic_ioreq_state_to_str(CMD_STATE(sc)), id);
+ spin_unlock_irqrestore(io_lock, flags);
+ }
+
+}
+
+/*
+ * fnic_fcpio_cmpl_handler
+ * Routine to service the cq for wq_copy
+ */
+static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
+ unsigned int cq_index,
+ struct fcpio_fw_req *desc)
+{
+ struct fnic *fnic = vnic_dev_priv(vdev);
+ int ret = 0;
+
+ switch (desc->hdr.type) {
+ case FCPIO_ACK: /* fw copied copy wq desc to its queue */
+ fnic_fcpio_ack_handler(fnic, cq_index, desc);
+ break;
+
+ case FCPIO_ICMND_CMPL: /* fw completed a command */
+ fnic_fcpio_icmnd_cmpl_handler(fnic, desc);
+ break;
+
+ case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
+ fnic_fcpio_itmf_cmpl_handler(fnic, desc);
+ break;
+
+ case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
+ ret = fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
+ break;
+
+ case FCPIO_RESET_CMPL: /* fw completed reset */
+ ret = fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
+ break;
+
+ default:
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "firmware completion type %d\n",
+ desc->hdr.type);
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * fnic_wq_copy_cmpl_handler
+ * Routine to process wq copy
+ */
+int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
+{
+ unsigned int wq_work_done = 0;
+ unsigned int i, cq_index;
+ unsigned int cur_work_done;
+
+ for (i = 0; i < fnic->wq_copy_count; i++) {
+ cq_index = i + fnic->raw_wq_count + fnic->rq_count;
+ cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index],
+ fnic_fcpio_cmpl_handler,
+ copy_work_to_do);
+ wq_work_done += cur_work_done;
+ }
+ return wq_work_done;
+}
+
+static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
+{
+ unsigned int i;
+ struct fnic_io_req *io_req;
+ unsigned long flags = 0;
+ struct scsi_cmnd *sc;
+ spinlock_t *io_lock;
+
+ for (i = 0; i < FNIC_MAX_IO_REQ; i++) {
+ if (i == exclude_id)
+ continue;
+
+ sc = scsi_host_find_tag(fnic->lport->host, i);
+ if (!sc)
+ continue;
+
+ io_lock = fnic_io_lock_hash(fnic, sc);
+ spin_lock_irqsave(io_lock, flags);
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ if (!io_req) {
+ spin_unlock_irqrestore(io_lock, flags);
+ goto cleanup_scsi_cmd;
+ }
+
+ CMD_SP(sc) = NULL;
+
+ spin_unlock_irqrestore(io_lock, flags);
+
+ /*
+ * If there is a scsi_cmnd associated with this io_req, then
+ * free the corresponding state
+ */
+ fnic_release_ioreq_buf(fnic, io_req, sc);
+ mempool_free(io_req, fnic->io_req_pool);
+
+cleanup_scsi_cmd:
+ sc->result = DID_TRANSPORT_DISRUPTED << 16;
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_cleanup_io:"
+ " DID_TRANSPORT_DISRUPTED\n");
+
+ /* Complete the command to SCSI */
+ if (sc->scsi_done)
+ sc->scsi_done(sc);
+ }
+}
+
+void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
+ struct fcpio_host_req *desc)
+{
+ u32 id;
+ struct fnic *fnic = vnic_dev_priv(wq->vdev);
+ struct fnic_io_req *io_req;
+ struct scsi_cmnd *sc;
+ unsigned long flags;
+ spinlock_t *io_lock;
+
+ /* get the tag reference */
+ fcpio_tag_id_dec(&desc->hdr.tag, &id);
+ id &= FNIC_TAG_MASK;
+
+ if (id >= FNIC_MAX_IO_REQ)
+ return;
+
+ sc = scsi_host_find_tag(fnic->lport->host, id);
+ if (!sc)
+ return;
+
+ io_lock = fnic_io_lock_hash(fnic, sc);
+ spin_lock_irqsave(io_lock, flags);
+
+ /* Get the IO context which this desc refers to */
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+
+ /* fnic interrupts are turned off by now */
+
+ if (!io_req) {
+ spin_unlock_irqrestore(io_lock, flags);
+ goto wq_copy_cleanup_scsi_cmd;
+ }
+
+ CMD_SP(sc) = NULL;
+
+ spin_unlock_irqrestore(io_lock, flags);
+
+ fnic_release_ioreq_buf(fnic, io_req, sc);
+ mempool_free(io_req, fnic->io_req_pool);
+
+wq_copy_cleanup_scsi_cmd:
+ sc->result = DID_NO_CONNECT << 16;
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
+ " DID_NO_CONNECT\n");
+
+ if (sc->scsi_done)
+ sc->scsi_done(sc);
+}
+
+static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
+ u32 task_req, u8 *fc_lun,
+ struct fnic_io_req *io_req)
+{
+ struct vnic_wq_copy *wq = &fnic->wq_copy[0];
+ unsigned long flags;
+
+ spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
+
+ if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
+ free_wq_copy_descs(fnic, wq);
+
+ if (!vnic_wq_copy_desc_avail(wq)) {
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+ return 1;
+ }
+ fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
+ 0, task_req, tag, fc_lun, io_req->port_id,
+ fnic->config.ra_tov, fnic->config.ed_tov);
+
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+ return 0;
+}
+
+void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
+{
+ int tag;
+ struct fnic_io_req *io_req;
+ spinlock_t *io_lock;
+ unsigned long flags;
+ struct scsi_cmnd *sc;
+ struct scsi_lun fc_lun;
+ enum fnic_ioreq_state old_ioreq_state;
+
+ FNIC_SCSI_DBG(KERN_DEBUG,
+ fnic->lport->host,
+ "fnic_rport_reset_exch called portid 0x%06x\n",
+ port_id);
+
+ if (fnic->in_remove)
+ return;
+
+ for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+ sc = scsi_host_find_tag(fnic->lport->host, tag);
+ if (!sc)
+ continue;
+
+ io_lock = fnic_io_lock_hash(fnic, sc);
+ spin_lock_irqsave(io_lock, flags);
+
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+
+ if (!io_req || io_req->port_id != port_id) {
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+
+ /*
+ * Found IO that is still pending with firmware and
+ * belongs to rport that went away
+ */
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+ old_ioreq_state = CMD_STATE(sc);
+ CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
+ CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
+
+ BUG_ON(io_req->abts_done);
+
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_rport_reset_exch: Issuing abts\n");
+
+ spin_unlock_irqrestore(io_lock, flags);
+
+ /* Now queue the abort command to firmware */
+ int_to_scsilun(sc->device->lun, &fc_lun);
+
+ if (fnic_queue_abort_io_req(fnic, tag,
+ FCPIO_ITMF_ABT_TASK_TERM,
+ fc_lun.scsi_lun, io_req)) {
+ /*
+ * Revert the cmd state back to old state, if
+ * it hasnt changed in between. This cmd will get
+ * aborted later by scsi_eh, or cleaned up during
+ * lun reset
+ */
+ io_lock = fnic_io_lock_hash(fnic, sc);
+
+ spin_lock_irqsave(io_lock, flags);
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
+ CMD_STATE(sc) = old_ioreq_state;
+ spin_unlock_irqrestore(io_lock, flags);
+ }
+ }
+
+}
+
+void fnic_terminate_rport_io(struct fc_rport *rport)
+{
+ int tag;
+ struct fnic_io_req *io_req;
+ spinlock_t *io_lock;
+ unsigned long flags;
+ struct scsi_cmnd *sc;
+ struct scsi_lun fc_lun;
+ struct fc_rport_libfc_priv *rdata = rport->dd_data;
+ struct fc_lport *lport = rdata->local_port;
+ struct fnic *fnic = lport_priv(lport);
+ struct fc_rport *cmd_rport;
+ enum fnic_ioreq_state old_ioreq_state;
+
+ FNIC_SCSI_DBG(KERN_DEBUG,
+ fnic->lport->host, "fnic_terminate_rport_io called"
+ " wwpn 0x%llx, wwnn0x%llx, portid 0x%06x\n",
+ rport->port_name, rport->node_name,
+ rport->port_id);
+
+ if (fnic->in_remove)
+ return;
+
+ for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+ sc = scsi_host_find_tag(fnic->lport->host, tag);
+ if (!sc)
+ continue;
+
+ cmd_rport = starget_to_rport(scsi_target(sc->device));
+ if (rport != cmd_rport)
+ continue;
+
+ io_lock = fnic_io_lock_hash(fnic, sc);
+ spin_lock_irqsave(io_lock, flags);
+
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+
+ if (!io_req || rport != cmd_rport) {
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+
+ /*
+ * Found IO that is still pending with firmware and
+ * belongs to rport that went away
+ */
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+ old_ioreq_state = CMD_STATE(sc);
+ CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
+ CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
+
+ BUG_ON(io_req->abts_done);
+
+ FNIC_SCSI_DBG(KERN_DEBUG,
+ fnic->lport->host,
+ "fnic_terminate_rport_io: Issuing abts\n");
+
+ spin_unlock_irqrestore(io_lock, flags);
+
+ /* Now queue the abort command to firmware */
+ int_to_scsilun(sc->device->lun, &fc_lun);
+
+ if (fnic_queue_abort_io_req(fnic, tag,
+ FCPIO_ITMF_ABT_TASK_TERM,
+ fc_lun.scsi_lun, io_req)) {
+ /*
+ * Revert the cmd state back to old state, if
+ * it hasnt changed in between. This cmd will get
+ * aborted later by scsi_eh, or cleaned up during
+ * lun reset
+ */
+ io_lock = fnic_io_lock_hash(fnic, sc);
+
+ spin_lock_irqsave(io_lock, flags);
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
+ CMD_STATE(sc) = old_ioreq_state;
+ spin_unlock_irqrestore(io_lock, flags);
+ }
+ }
+
+}
+
+static void fnic_block_error_handler(struct scsi_cmnd *sc)
+{
+ struct Scsi_Host *shost = sc->device->host;
+ struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ while (rport->port_state == FC_PORTSTATE_BLOCKED) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ msleep(1000);
+ spin_lock_irqsave(shost->host_lock, flags);
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+}
+
+/*
+ * This function is exported to SCSI for sending abort cmnds.
+ * A SCSI IO is represented by a io_req in the driver.
+ * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO.
+ */
+int fnic_abort_cmd(struct scsi_cmnd *sc)
+{
+ struct fc_lport *lp;
+ struct fnic *fnic;
+ struct fnic_io_req *io_req;
+ struct fc_rport *rport;
+ spinlock_t *io_lock;
+ unsigned long flags;
+ int ret = SUCCESS;
+ u32 task_req;
+ struct scsi_lun fc_lun;
+ DECLARE_COMPLETION_ONSTACK(tm_done);
+
+ /* Wait for rport to unblock */
+ fnic_block_error_handler(sc);
+
+ /* Get local-port, check ready and link up */
+ lp = shost_priv(sc->device->host);
+
+ fnic = lport_priv(lp);
+ FNIC_SCSI_DBG(KERN_DEBUG,
+ fnic->lport->host,
+ "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %d\n",
+ (starget_to_rport(scsi_target(sc->device)))->port_id,
+ sc->device->lun, sc->request->tag);
+
+ if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
+ ret = FAILED;
+ goto fnic_abort_cmd_end;
+ }
+
+ /*
+ * Avoid a race between SCSI issuing the abort and the device
+ * completing the command.
+ *
+ * If the command is already completed by the fw cmpl code,
+ * we just return SUCCESS from here. This means that the abort
+ * succeeded. In the SCSI ML, since the timeout for command has
+ * happened, the completion wont actually complete the command
+ * and it will be considered as an aborted command
+ *
+ * The CMD_SP will not be cleared except while holding io_req_lock.
+ */
+ io_lock = fnic_io_lock_hash(fnic, sc);
+ spin_lock_irqsave(io_lock, flags);
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ if (!io_req) {
+ spin_unlock_irqrestore(io_lock, flags);
+ goto fnic_abort_cmd_end;
+ }
+
+ io_req->abts_done = &tm_done;
+
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ spin_unlock_irqrestore(io_lock, flags);
+ goto wait_pending;
+ }
+ /*
+ * Command is still pending, need to abort it
+ * If the firmware completes the command after this point,
+ * the completion wont be done till mid-layer, since abort
+ * has already started.
+ */
+ CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
+ CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
+
+ spin_unlock_irqrestore(io_lock, flags);
+
+ /*
+ * Check readiness of the remote port. If the path to remote
+ * port is up, then send abts to the remote port to terminate
+ * the IO. Else, just locally terminate the IO in the firmware
+ */
+ rport = starget_to_rport(scsi_target(sc->device));
+ if (fc_remote_port_chkready(rport) == 0)
+ task_req = FCPIO_ITMF_ABT_TASK;
+ else
+ task_req = FCPIO_ITMF_ABT_TASK_TERM;
+
+ /* Now queue the abort command to firmware */
+ int_to_scsilun(sc->device->lun, &fc_lun);
+
+ if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req,
+ fc_lun.scsi_lun, io_req)) {
+ spin_lock_irqsave(io_lock, flags);
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ if (io_req)
+ io_req->abts_done = NULL;
+ spin_unlock_irqrestore(io_lock, flags);
+ ret = FAILED;
+ goto fnic_abort_cmd_end;
+ }
+
+ /*
+ * We queued an abort IO, wait for its completion.
+ * Once the firmware completes the abort command, it will
+ * wake up this thread.
+ */
+ wait_pending:
+ wait_for_completion_timeout(&tm_done,
+ msecs_to_jiffies
+ (2 * fnic->config.ra_tov +
+ fnic->config.ed_tov));
+
+ /* Check the abort status */
+ spin_lock_irqsave(io_lock, flags);
+
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ if (!io_req) {
+ spin_unlock_irqrestore(io_lock, flags);
+ ret = FAILED;
+ goto fnic_abort_cmd_end;
+ }
+ io_req->abts_done = NULL;
+
+ /* fw did not complete abort, timed out */
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ spin_unlock_irqrestore(io_lock, flags);
+ ret = FAILED;
+ goto fnic_abort_cmd_end;
+ }
+
+ /*
+ * firmware completed the abort, check the status,
+ * free the io_req irrespective of failure or success
+ */
+ if (CMD_ABTS_STATUS(sc) != FCPIO_SUCCESS)
+ ret = FAILED;
+
+ CMD_SP(sc) = NULL;
+
+ spin_unlock_irqrestore(io_lock, flags);
+
+ fnic_release_ioreq_buf(fnic, io_req, sc);
+ mempool_free(io_req, fnic->io_req_pool);
+
+fnic_abort_cmd_end:
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Returning from abort cmd %s\n",
+ (ret == SUCCESS) ?
+ "SUCCESS" : "FAILED");
+ return ret;
+}
+
+static inline int fnic_queue_dr_io_req(struct fnic *fnic,
+ struct scsi_cmnd *sc,
+ struct fnic_io_req *io_req)
+{
+ struct vnic_wq_copy *wq = &fnic->wq_copy[0];
+ struct scsi_lun fc_lun;
+ int ret = 0;
+ unsigned long intr_flags;
+
+ spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
+
+ if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
+ free_wq_copy_descs(fnic, wq);
+
+ if (!vnic_wq_copy_desc_avail(wq)) {
+ ret = -EAGAIN;
+ goto lr_io_req_end;
+ }
+
+ /* fill in the lun info */
+ int_to_scsilun(sc->device->lun, &fc_lun);
+
+ fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST,
+ 0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG,
+ fc_lun.scsi_lun, io_req->port_id,
+ fnic->config.ra_tov, fnic->config.ed_tov);
+
+lr_io_req_end:
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
+
+ return ret;
+}
+
+/*
+ * Clean up any pending aborts on the lun
+ * For each outstanding IO on this lun, whose abort is not completed by fw,
+ * issue a local abort. Wait for abort to complete. Return 0 if all commands
+ * successfully aborted, 1 otherwise
+ */
+static int fnic_clean_pending_aborts(struct fnic *fnic,
+ struct scsi_cmnd *lr_sc)
+{
+ int tag;
+ struct fnic_io_req *io_req;
+ spinlock_t *io_lock;
+ unsigned long flags;
+ int ret = 0;
+ struct scsi_cmnd *sc;
+ struct fc_rport *rport;
+ struct scsi_lun fc_lun;
+ struct scsi_device *lun_dev = lr_sc->device;
+ DECLARE_COMPLETION_ONSTACK(tm_done);
+
+ for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+ sc = scsi_host_find_tag(fnic->lport->host, tag);
+ /*
+ * ignore this lun reset cmd or cmds that do not belong to
+ * this lun
+ */
+ if (!sc || sc == lr_sc || sc->device != lun_dev)
+ continue;
+
+ io_lock = fnic_io_lock_hash(fnic, sc);
+ spin_lock_irqsave(io_lock, flags);
+
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+
+ if (!io_req || sc->device != lun_dev) {
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+
+ /*
+ * Found IO that is still pending with firmware and
+ * belongs to the LUN that we are resetting
+ */
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Found IO in %s on lun\n",
+ fnic_ioreq_state_to_str(CMD_STATE(sc)));
+
+ BUG_ON(CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING);
+
+ CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
+ io_req->abts_done = &tm_done;
+ spin_unlock_irqrestore(io_lock, flags);
+
+ /* Now queue the abort command to firmware */
+ int_to_scsilun(sc->device->lun, &fc_lun);
+ rport = starget_to_rport(scsi_target(sc->device));
+
+ if (fnic_queue_abort_io_req(fnic, tag,
+ FCPIO_ITMF_ABT_TASK_TERM,
+ fc_lun.scsi_lun, io_req)) {
+ spin_lock_irqsave(io_lock, flags);
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ if (io_req)
+ io_req->abts_done = NULL;
+ spin_unlock_irqrestore(io_lock, flags);
+ ret = 1;
+ goto clean_pending_aborts_end;
+ }
+
+ wait_for_completion_timeout(&tm_done,
+ msecs_to_jiffies
+ (fnic->config.ed_tov));
+
+ /* Recheck cmd state to check if it is now aborted */
+ spin_lock_irqsave(io_lock, flags);
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ if (!io_req) {
+ spin_unlock_irqrestore(io_lock, flags);
+ ret = 1;
+ goto clean_pending_aborts_end;
+ }
+
+ io_req->abts_done = NULL;
+
+ /* if abort is still pending with fw, fail */
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ spin_unlock_irqrestore(io_lock, flags);
+ ret = 1;
+ goto clean_pending_aborts_end;
+ }
+ CMD_SP(sc) = NULL;
+ spin_unlock_irqrestore(io_lock, flags);
+
+ fnic_release_ioreq_buf(fnic, io_req, sc);
+ mempool_free(io_req, fnic->io_req_pool);
+ }
+
+clean_pending_aborts_end:
+ return ret;
+}
+
+/*
+ * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
+ * fail to get aborted. It calls driver's eh_device_reset with a SCSI command
+ * on the LUN.
+ */
+int fnic_device_reset(struct scsi_cmnd *sc)
+{
+ struct fc_lport *lp;
+ struct fnic *fnic;
+ struct fnic_io_req *io_req;
+ struct fc_rport *rport;
+ int status;
+ int ret = FAILED;
+ spinlock_t *io_lock;
+ unsigned long flags;
+ DECLARE_COMPLETION_ONSTACK(tm_done);
+
+ /* Wait for rport to unblock */
+ fnic_block_error_handler(sc);
+
+ /* Get local-port, check ready and link up */
+ lp = shost_priv(sc->device->host);
+
+ fnic = lport_priv(lp);
+ FNIC_SCSI_DBG(KERN_DEBUG,
+ fnic->lport->host,
+ "Device reset called FCID 0x%x, LUN 0x%x\n",
+ (starget_to_rport(scsi_target(sc->device)))->port_id,
+ sc->device->lun);
+
+
+ if (lp->state != LPORT_ST_READY || !(lp->link_up))
+ goto fnic_device_reset_end;
+
+ /* Check if remote port up */
+ rport = starget_to_rport(scsi_target(sc->device));
+ if (fc_remote_port_chkready(rport))
+ goto fnic_device_reset_end;
+
+ io_lock = fnic_io_lock_hash(fnic, sc);
+ spin_lock_irqsave(io_lock, flags);
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+
+ /*
+ * If there is a io_req attached to this command, then use it,
+ * else allocate a new one.
+ */
+ if (!io_req) {
+ io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
+ if (!io_req) {
+ spin_unlock_irqrestore(io_lock, flags);
+ goto fnic_device_reset_end;
+ }
+ memset(io_req, 0, sizeof(*io_req));
+ io_req->port_id = rport->port_id;
+ CMD_SP(sc) = (char *)io_req;
+ }
+ io_req->dr_done = &tm_done;
+ CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
+ CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE;
+ spin_unlock_irqrestore(io_lock, flags);
+
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %d\n",
+ sc->request->tag);
+
+ /*
+ * issue the device reset, if enqueue failed, clean up the ioreq
+ * and break assoc with scsi cmd
+ */
+ if (fnic_queue_dr_io_req(fnic, sc, io_req)) {
+ spin_lock_irqsave(io_lock, flags);
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ if (io_req)
+ io_req->dr_done = NULL;
+ goto fnic_device_reset_clean;
+ }
+
+ /*
+ * Wait on the local completion for LUN reset. The io_req may be
+ * freed while we wait since we hold no lock.
+ */
+ wait_for_completion_timeout(&tm_done,
+ msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
+
+ spin_lock_irqsave(io_lock, flags);
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ if (!io_req) {
+ spin_unlock_irqrestore(io_lock, flags);
+ goto fnic_device_reset_end;
+ }
+ io_req->dr_done = NULL;
+
+ status = CMD_LR_STATUS(sc);
+ spin_unlock_irqrestore(io_lock, flags);
+
+ /*
+ * If lun reset not completed, bail out with failed. io_req
+ * gets cleaned up during higher levels of EH
+ */
+ if (status == FCPIO_INVALID_CODE) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Device reset timed out\n");
+ goto fnic_device_reset_end;
+ }
+
+ /* Completed, but not successful, clean up the io_req, return fail */
+ if (status != FCPIO_SUCCESS) {
+ spin_lock_irqsave(io_lock, flags);
+ FNIC_SCSI_DBG(KERN_DEBUG,
+ fnic->lport->host,
+ "Device reset completed - failed\n");
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ goto fnic_device_reset_clean;
+ }
+
+ /*
+ * Clean up any aborts on this lun that have still not
+ * completed. If any of these fail, then LUN reset fails.
+ * clean_pending_aborts cleans all cmds on this lun except
+ * the lun reset cmd. If all cmds get cleaned, the lun reset
+ * succeeds
+ */
+ if (fnic_clean_pending_aborts(fnic, sc)) {
+ spin_lock_irqsave(io_lock, flags);
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Device reset failed"
+ " since could not abort all IOs\n");
+ goto fnic_device_reset_clean;
+ }
+
+ /* Clean lun reset command */
+ spin_lock_irqsave(io_lock, flags);
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ if (io_req)
+ /* Completed, and successful */
+ ret = SUCCESS;
+
+fnic_device_reset_clean:
+ if (io_req)
+ CMD_SP(sc) = NULL;
+
+ spin_unlock_irqrestore(io_lock, flags);
+
+ if (io_req) {
+ fnic_release_ioreq_buf(fnic, io_req, sc);
+ mempool_free(io_req, fnic->io_req_pool);
+ }
+
+fnic_device_reset_end:
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Returning from device reset %s\n",
+ (ret == SUCCESS) ?
+ "SUCCESS" : "FAILED");
+ return ret;
+}
+
+/* Clean up all IOs, clean up libFC local port */
+int fnic_reset(struct Scsi_Host *shost)
+{
+ struct fc_lport *lp;
+ struct fnic *fnic;
+ int ret = SUCCESS;
+
+ lp = shost_priv(shost);
+ fnic = lport_priv(lp);
+
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_reset called\n");
+
+ /*
+ * Reset local port, this will clean up libFC exchanges,
+ * reset remote port sessions, and if link is up, begin flogi
+ */
+ if (lp->tt.lport_reset(lp))
+ ret = FAILED;
+
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Returning from fnic reset %s\n",
+ (ret == SUCCESS) ?
+ "SUCCESS" : "FAILED");
+
+ return ret;
+}
+
+/*
+ * SCSI Error handling calls driver's eh_host_reset if all prior
+ * error handling levels return FAILED. If host reset completes
+ * successfully, and if link is up, then Fabric login begins.
+ *
+ * Host Reset is the highest level of error recovery. If this fails, then
+ * host is offlined by SCSI.
+ *
+ */
+int fnic_host_reset(struct scsi_cmnd *sc)
+{
+ int ret;
+ unsigned long wait_host_tmo;
+ struct Scsi_Host *shost = sc->device->host;
+ struct fc_lport *lp = shost_priv(shost);
+
+ /*
+ * If fnic_reset is successful, wait for fabric login to complete
+ * scsi-ml tries to send a TUR to every device if host reset is
+ * successful, so before returning to scsi, fabric should be up
+ */
+ ret = fnic_reset(shost);
+ if (ret == SUCCESS) {
+ wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
+ ret = FAILED;
+ while (time_before(jiffies, wait_host_tmo)) {
+ if ((lp->state == LPORT_ST_READY) &&
+ (lp->link_up)) {
+ ret = SUCCESS;
+ break;
+ }
+ ssleep(1);
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * This fxn is called from libFC when host is removed
+ */
+void fnic_scsi_abort_io(struct fc_lport *lp)
+{
+ int err = 0;
+ unsigned long flags;
+ enum fnic_state old_state;
+ struct fnic *fnic = lport_priv(lp);
+ DECLARE_COMPLETION_ONSTACK(remove_wait);
+
+ /* Issue firmware reset for fnic, wait for reset to complete */
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ fnic->remove_wait = &remove_wait;
+ old_state = fnic->state;
+ fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
+ vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ err = fnic_fw_reset_handler(fnic);
+ if (err) {
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
+ fnic->state = old_state;
+ fnic->remove_wait = NULL;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return;
+ }
+
+ /* Wait for firmware reset to complete */
+ wait_for_completion_timeout(&remove_wait,
+ msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT));
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ fnic->remove_wait = NULL;
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_scsi_abort_io %s\n",
+ (fnic->state == FNIC_IN_ETH_MODE) ?
+ "SUCCESS" : "FAILED");
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+}
+
+/*
+ * This fxn called from libFC to clean up driver IO state on link down
+ */
+void fnic_scsi_cleanup(struct fc_lport *lp)
+{
+ unsigned long flags;
+ enum fnic_state old_state;
+ struct fnic *fnic = lport_priv(lp);
+
+ /* issue fw reset */
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ old_state = fnic->state;
+ fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
+ vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ if (fnic_fw_reset_handler(fnic)) {
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
+ fnic->state = old_state;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ }
+
+}
+
+void fnic_empty_scsi_cleanup(struct fc_lport *lp)
+{
+}
+
+void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
+{
+ struct fnic *fnic = lport_priv(lp);
+
+ /* Non-zero sid, nothing to do */
+ if (sid)
+ goto call_fc_exch_mgr_reset;
+
+ if (did) {
+ fnic_rport_exch_reset(fnic, did);
+ goto call_fc_exch_mgr_reset;
+ }
+
+ /*
+ * sid = 0, did = 0
+ * link down or device being removed
+ */
+ if (!fnic->in_remove)
+ fnic_scsi_cleanup(lp);
+ else
+ fnic_scsi_abort_io(lp);
+
+ /* call libFC exch mgr reset to reset its exchanges */
+call_fc_exch_mgr_reset:
+ fc_exch_mgr_reset(lp, sid, did);
+
+}
diff --git a/drivers/scsi/fnic/rq_enet_desc.h b/drivers/scsi/fnic/rq_enet_desc.h
new file mode 100644
index 00000000000..92e80ae6b72
--- /dev/null
+++ b/drivers/scsi/fnic/rq_enet_desc.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _RQ_ENET_DESC_H_
+#define _RQ_ENET_DESC_H_
+
+/* Ethernet receive queue descriptor: 16B */
+struct rq_enet_desc {
+ __le64 address;
+ __le16 length_type;
+ u8 reserved[6];
+};
+
+enum rq_enet_type_types {
+ RQ_ENET_TYPE_ONLY_SOP = 0,
+ RQ_ENET_TYPE_NOT_SOP = 1,
+ RQ_ENET_TYPE_RESV2 = 2,
+ RQ_ENET_TYPE_RESV3 = 3,
+};
+
+#define RQ_ENET_ADDR_BITS 64
+#define RQ_ENET_LEN_BITS 14
+#define RQ_ENET_LEN_MASK ((1 << RQ_ENET_LEN_BITS) - 1)
+#define RQ_ENET_TYPE_BITS 2
+#define RQ_ENET_TYPE_MASK ((1 << RQ_ENET_TYPE_BITS) - 1)
+
+static inline void rq_enet_desc_enc(struct rq_enet_desc *desc,
+ u64 address, u8 type, u16 length)
+{
+ desc->address = cpu_to_le64(address);
+ desc->length_type = cpu_to_le16((length & RQ_ENET_LEN_MASK) |
+ ((type & RQ_ENET_TYPE_MASK) << RQ_ENET_LEN_BITS));
+}
+
+static inline void rq_enet_desc_dec(struct rq_enet_desc *desc,
+ u64 *address, u8 *type, u16 *length)
+{
+ *address = le64_to_cpu(desc->address);
+ *length = le16_to_cpu(desc->length_type) & RQ_ENET_LEN_MASK;
+ *type = (u8)((le16_to_cpu(desc->length_type) >> RQ_ENET_LEN_BITS) &
+ RQ_ENET_TYPE_MASK);
+}
+
+#endif /* _RQ_ENET_DESC_H_ */
diff --git a/drivers/scsi/fnic/vnic_cq.c b/drivers/scsi/fnic/vnic_cq.c
new file mode 100644
index 00000000000..c5db32eda5e
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_cq.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include "vnic_dev.h"
+#include "vnic_cq.h"
+
+void vnic_cq_free(struct vnic_cq *cq)
+{
+ vnic_dev_free_desc_ring(cq->vdev, &cq->ring);
+
+ cq->ctrl = NULL;
+}
+
+int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
+ unsigned int desc_count, unsigned int desc_size)
+{
+ int err;
+
+ cq->index = index;
+ cq->vdev = vdev;
+
+ cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index);
+ if (!cq->ctrl) {
+ printk(KERN_ERR "Failed to hook CQ[%d] resource\n", index);
+ return -EINVAL;
+ }
+
+ err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
+ unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
+ unsigned int cq_tail_color, unsigned int interrupt_enable,
+ unsigned int cq_entry_enable, unsigned int cq_message_enable,
+ unsigned int interrupt_offset, u64 cq_message_addr)
+{
+ u64 paddr;
+
+ paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET;
+ writeq(paddr, &cq->ctrl->ring_base);
+ iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size);
+ iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable);
+ iowrite32(color_enable, &cq->ctrl->color_enable);
+ iowrite32(cq_head, &cq->ctrl->cq_head);
+ iowrite32(cq_tail, &cq->ctrl->cq_tail);
+ iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color);
+ iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable);
+ iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable);
+ iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable);
+ iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset);
+ writeq(cq_message_addr, &cq->ctrl->cq_message_addr);
+}
+
+void vnic_cq_clean(struct vnic_cq *cq)
+{
+ cq->to_clean = 0;
+ cq->last_color = 0;
+
+ iowrite32(0, &cq->ctrl->cq_head);
+ iowrite32(0, &cq->ctrl->cq_tail);
+ iowrite32(1, &cq->ctrl->cq_tail_color);
+
+ vnic_dev_clear_desc_ring(&cq->ring);
+}
diff --git a/drivers/scsi/fnic/vnic_cq.h b/drivers/scsi/fnic/vnic_cq.h
new file mode 100644
index 00000000000..4ede6809fb1
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_cq.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_CQ_H_
+#define _VNIC_CQ_H_
+
+#include "cq_desc.h"
+#include "vnic_dev.h"
+
+/*
+ * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
+ * Driver) when both are built with CONFIG options =y
+ */
+#define vnic_cq_service fnic_cq_service
+#define vnic_cq_free fnic_cq_free
+#define vnic_cq_alloc fnic_cq_alloc
+#define vnic_cq_init fnic_cq_init
+#define vnic_cq_clean fnic_cq_clean
+
+/* Completion queue control */
+struct vnic_cq_ctrl {
+ u64 ring_base; /* 0x00 */
+ u32 ring_size; /* 0x08 */
+ u32 pad0;
+ u32 flow_control_enable; /* 0x10 */
+ u32 pad1;
+ u32 color_enable; /* 0x18 */
+ u32 pad2;
+ u32 cq_head; /* 0x20 */
+ u32 pad3;
+ u32 cq_tail; /* 0x28 */
+ u32 pad4;
+ u32 cq_tail_color; /* 0x30 */
+ u32 pad5;
+ u32 interrupt_enable; /* 0x38 */
+ u32 pad6;
+ u32 cq_entry_enable; /* 0x40 */
+ u32 pad7;
+ u32 cq_message_enable; /* 0x48 */
+ u32 pad8;
+ u32 interrupt_offset; /* 0x50 */
+ u32 pad9;
+ u64 cq_message_addr; /* 0x58 */
+ u32 pad10;
+};
+
+struct vnic_cq {
+ unsigned int index;
+ struct vnic_dev *vdev;
+ struct vnic_cq_ctrl __iomem *ctrl; /* memory-mapped */
+ struct vnic_dev_ring ring;
+ unsigned int to_clean;
+ unsigned int last_color;
+};
+
+static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
+ unsigned int work_to_do,
+ int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
+ u8 type, u16 q_number, u16 completed_index, void *opaque),
+ void *opaque)
+{
+ struct cq_desc *cq_desc;
+ unsigned int work_done = 0;
+ u16 q_number, completed_index;
+ u8 type, color;
+
+ cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
+ cq->ring.desc_size * cq->to_clean);
+ cq_desc_dec(cq_desc, &type, &color,
+ &q_number, &completed_index);
+
+ while (color != cq->last_color) {
+
+ if ((*q_service)(cq->vdev, cq_desc, type,
+ q_number, completed_index, opaque))
+ break;
+
+ cq->to_clean++;
+ if (cq->to_clean == cq->ring.desc_count) {
+ cq->to_clean = 0;
+ cq->last_color = cq->last_color ? 0 : 1;
+ }
+
+ cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
+ cq->ring.desc_size * cq->to_clean);
+ cq_desc_dec(cq_desc, &type, &color,
+ &q_number, &completed_index);
+
+ work_done++;
+ if (work_done >= work_to_do)
+ break;
+ }
+
+ return work_done;
+}
+
+void vnic_cq_free(struct vnic_cq *cq);
+int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
+ unsigned int desc_count, unsigned int desc_size);
+void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
+ unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
+ unsigned int cq_tail_color, unsigned int interrupt_enable,
+ unsigned int cq_entry_enable, unsigned int message_enable,
+ unsigned int interrupt_offset, u64 message_addr);
+void vnic_cq_clean(struct vnic_cq *cq);
+
+#endif /* _VNIC_CQ_H_ */
diff --git a/drivers/scsi/fnic/vnic_cq_copy.h b/drivers/scsi/fnic/vnic_cq_copy.h
new file mode 100644
index 00000000000..7901ce255a8
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_cq_copy.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_CQ_COPY_H_
+#define _VNIC_CQ_COPY_H_
+
+#include "fcpio.h"
+
+static inline unsigned int vnic_cq_copy_service(
+ struct vnic_cq *cq,
+ int (*q_service)(struct vnic_dev *vdev,
+ unsigned int index,
+ struct fcpio_fw_req *desc),
+ unsigned int work_to_do)
+
+{
+ struct fcpio_fw_req *desc;
+ unsigned int work_done = 0;
+ u8 color;
+
+ desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs +
+ cq->ring.desc_size * cq->to_clean);
+ fcpio_color_dec(desc, &color);
+
+ while (color != cq->last_color) {
+
+ if ((*q_service)(cq->vdev, cq->index, desc))
+ break;
+
+ cq->to_clean++;
+ if (cq->to_clean == cq->ring.desc_count) {
+ cq->to_clean = 0;
+ cq->last_color = cq->last_color ? 0 : 1;
+ }
+
+ desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs +
+ cq->ring.desc_size * cq->to_clean);
+ fcpio_color_dec(desc, &color);
+
+ work_done++;
+ if (work_done >= work_to_do)
+ break;
+ }
+
+ return work_done;
+}
+
+#endif /* _VNIC_CQ_COPY_H_ */
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
new file mode 100644
index 00000000000..56677064508
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -0,0 +1,690 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/if_ether.h>
+#include "vnic_resource.h"
+#include "vnic_devcmd.h"
+#include "vnic_dev.h"
+#include "vnic_stats.h"
+
+struct vnic_res {
+ void __iomem *vaddr;
+ unsigned int count;
+};
+
+struct vnic_dev {
+ void *priv;
+ struct pci_dev *pdev;
+ struct vnic_res res[RES_TYPE_MAX];
+ enum vnic_dev_intr_mode intr_mode;
+ struct vnic_devcmd __iomem *devcmd;
+ struct vnic_devcmd_notify *notify;
+ struct vnic_devcmd_notify notify_copy;
+ dma_addr_t notify_pa;
+ u32 *linkstatus;
+ dma_addr_t linkstatus_pa;
+ struct vnic_stats *stats;
+ dma_addr_t stats_pa;
+ struct vnic_devcmd_fw_info *fw_info;
+ dma_addr_t fw_info_pa;
+};
+
+#define VNIC_MAX_RES_HDR_SIZE \
+ (sizeof(struct vnic_resource_header) + \
+ sizeof(struct vnic_resource) * RES_TYPE_MAX)
+#define VNIC_RES_STRIDE 128
+
+void *vnic_dev_priv(struct vnic_dev *vdev)
+{
+ return vdev->priv;
+}
+
+static int vnic_dev_discover_res(struct vnic_dev *vdev,
+ struct vnic_dev_bar *bar)
+{
+ struct vnic_resource_header __iomem *rh;
+ struct vnic_resource __iomem *r;
+ u8 type;
+
+ if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
+ printk(KERN_ERR "vNIC BAR0 res hdr length error\n");
+ return -EINVAL;
+ }
+
+ rh = bar->vaddr;
+ if (!rh) {
+ printk(KERN_ERR "vNIC BAR0 res hdr not mem-mapped\n");
+ return -EINVAL;
+ }
+
+ if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
+ ioread32(&rh->version) != VNIC_RES_VERSION) {
+ printk(KERN_ERR "vNIC BAR0 res magic/version error "
+ "exp (%lx/%lx) curr (%x/%x)\n",
+ VNIC_RES_MAGIC, VNIC_RES_VERSION,
+ ioread32(&rh->magic), ioread32(&rh->version));
+ return -EINVAL;
+ }
+
+ r = (struct vnic_resource __iomem *)(rh + 1);
+
+ while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
+
+ u8 bar_num = ioread8(&r->bar);
+ u32 bar_offset = ioread32(&r->bar_offset);
+ u32 count = ioread32(&r->count);
+ u32 len;
+
+ r++;
+
+ if (bar_num != 0) /* only mapping in BAR0 resources */
+ continue;
+
+ switch (type) {
+ case RES_TYPE_WQ:
+ case RES_TYPE_RQ:
+ case RES_TYPE_CQ:
+ case RES_TYPE_INTR_CTRL:
+ /* each count is stride bytes long */
+ len = count * VNIC_RES_STRIDE;
+ if (len + bar_offset > bar->len) {
+ printk(KERN_ERR "vNIC BAR0 resource %d "
+ "out-of-bounds, offset 0x%x + "
+ "size 0x%x > bar len 0x%lx\n",
+ type, bar_offset,
+ len,
+ bar->len);
+ return -EINVAL;
+ }
+ break;
+ case RES_TYPE_INTR_PBA_LEGACY:
+ case RES_TYPE_DEVCMD:
+ len = count;
+ break;
+ default:
+ continue;
+ }
+
+ vdev->res[type].count = count;
+ vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset;
+ }
+
+ return 0;
+}
+
+unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
+ enum vnic_res_type type)
+{
+ return vdev->res[type].count;
+}
+
+void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
+ unsigned int index)
+{
+ if (!vdev->res[type].vaddr)
+ return NULL;
+
+ switch (type) {
+ case RES_TYPE_WQ:
+ case RES_TYPE_RQ:
+ case RES_TYPE_CQ:
+ case RES_TYPE_INTR_CTRL:
+ return (char __iomem *)vdev->res[type].vaddr +
+ index * VNIC_RES_STRIDE;
+ default:
+ return (char __iomem *)vdev->res[type].vaddr;
+ }
+}
+
+unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
+ unsigned int desc_count,
+ unsigned int desc_size)
+{
+ /* The base address of the desc rings must be 512 byte aligned.
+ * Descriptor count is aligned to groups of 32 descriptors. A
+ * count of 0 means the maximum 4096 descriptors. Descriptor
+ * size is aligned to 16 bytes.
+ */
+
+ unsigned int count_align = 32;
+ unsigned int desc_align = 16;
+
+ ring->base_align = 512;
+
+ if (desc_count == 0)
+ desc_count = 4096;
+
+ ring->desc_count = ALIGN(desc_count, count_align);
+
+ ring->desc_size = ALIGN(desc_size, desc_align);
+
+ ring->size = ring->desc_count * ring->desc_size;
+ ring->size_unaligned = ring->size + ring->base_align;
+
+ return ring->size_unaligned;
+}
+
+void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
+{
+ memset(ring->descs, 0, ring->size);
+}
+
+int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
+ unsigned int desc_count, unsigned int desc_size)
+{
+ vnic_dev_desc_ring_size(ring, desc_count, desc_size);
+
+ ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
+ ring->size_unaligned,
+ &ring->base_addr_unaligned);
+
+ if (!ring->descs_unaligned) {
+ printk(KERN_ERR
+ "Failed to allocate ring (size=%d), aborting\n",
+ (int)ring->size);
+ return -ENOMEM;
+ }
+
+ ring->base_addr = ALIGN(ring->base_addr_unaligned,
+ ring->base_align);
+ ring->descs = (u8 *)ring->descs_unaligned +
+ (ring->base_addr - ring->base_addr_unaligned);
+
+ vnic_dev_clear_desc_ring(ring);
+
+ ring->desc_avail = ring->desc_count - 1;
+
+ return 0;
+}
+
+void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
+{
+ if (ring->descs) {
+ pci_free_consistent(vdev->pdev,
+ ring->size_unaligned,
+ ring->descs_unaligned,
+ ring->base_addr_unaligned);
+ ring->descs = NULL;
+ }
+}
+
+int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ u64 *a0, u64 *a1, int wait)
+{
+ struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
+ int delay;
+ u32 status;
+ int dev_cmd_err[] = {
+ /* convert from fw's version of error.h to host's version */
+ 0, /* ERR_SUCCESS */
+ EINVAL, /* ERR_EINVAL */
+ EFAULT, /* ERR_EFAULT */
+ EPERM, /* ERR_EPERM */
+ EBUSY, /* ERR_EBUSY */
+ };
+ int err;
+
+ status = ioread32(&devcmd->status);
+ if (status & STAT_BUSY) {
+ printk(KERN_ERR "Busy devcmd %d\n", _CMD_N(cmd));
+ return -EBUSY;
+ }
+
+ if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
+ writeq(*a0, &devcmd->args[0]);
+ writeq(*a1, &devcmd->args[1]);
+ wmb();
+ }
+
+ iowrite32(cmd, &devcmd->cmd);
+
+ if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
+ return 0;
+
+ for (delay = 0; delay < wait; delay++) {
+
+ udelay(100);
+
+ status = ioread32(&devcmd->status);
+ if (!(status & STAT_BUSY)) {
+
+ if (status & STAT_ERROR) {
+ err = dev_cmd_err[(int)readq(&devcmd->args[0])];
+ printk(KERN_ERR "Error %d devcmd %d\n",
+ err, _CMD_N(cmd));
+ return -err;
+ }
+
+ if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
+ rmb();
+ *a0 = readq(&devcmd->args[0]);
+ *a1 = readq(&devcmd->args[1]);
+ }
+
+ return 0;
+ }
+ }
+
+ printk(KERN_ERR "Timedout devcmd %d\n", _CMD_N(cmd));
+ return -ETIMEDOUT;
+}
+
+int vnic_dev_fw_info(struct vnic_dev *vdev,
+ struct vnic_devcmd_fw_info **fw_info)
+{
+ u64 a0, a1 = 0;
+ int wait = 1000;
+ int err = 0;
+
+ if (!vdev->fw_info) {
+ vdev->fw_info = pci_alloc_consistent(vdev->pdev,
+ sizeof(struct vnic_devcmd_fw_info),
+ &vdev->fw_info_pa);
+ if (!vdev->fw_info)
+ return -ENOMEM;
+
+ a0 = vdev->fw_info_pa;
+
+ /* only get fw_info once and cache it */
+ err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
+ }
+
+ *fw_info = vdev->fw_info;
+
+ return err;
+}
+
+int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
+ void *value)
+{
+ u64 a0, a1;
+ int wait = 1000;
+ int err;
+
+ a0 = offset;
+ a1 = size;
+
+ err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
+
+ switch (size) {
+ case 1:
+ *(u8 *)value = (u8)a0;
+ break;
+ case 2:
+ *(u16 *)value = (u16)a0;
+ break;
+ case 4:
+ *(u32 *)value = (u32)a0;
+ break;
+ case 8:
+ *(u64 *)value = a0;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ return err;
+}
+
+int vnic_dev_stats_clear(struct vnic_dev *vdev)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
+}
+
+int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
+{
+ u64 a0, a1;
+ int wait = 1000;
+
+ if (!vdev->stats) {
+ vdev->stats = pci_alloc_consistent(vdev->pdev,
+ sizeof(struct vnic_stats), &vdev->stats_pa);
+ if (!vdev->stats)
+ return -ENOMEM;
+ }
+
+ *stats = vdev->stats;
+ a0 = vdev->stats_pa;
+ a1 = sizeof(struct vnic_stats);
+
+ return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
+}
+
+int vnic_dev_close(struct vnic_dev *vdev)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
+}
+
+int vnic_dev_enable(struct vnic_dev *vdev)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
+}
+
+int vnic_dev_disable(struct vnic_dev *vdev)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
+}
+
+int vnic_dev_open(struct vnic_dev *vdev, int arg)
+{
+ u64 a0 = (u32)arg, a1 = 0;
+ int wait = 1000;
+ return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
+}
+
+int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ *done = 0;
+
+ err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
+ if (err)
+ return err;
+
+ *done = (a0 == 0);
+
+ return 0;
+}
+
+int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
+{
+ u64 a0 = (u32)arg, a1 = 0;
+ int wait = 1000;
+ return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
+}
+
+int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ *done = 0;
+
+ err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait);
+ if (err)
+ return err;
+
+ *done = (a0 == 0);
+
+ return 0;
+}
+
+int vnic_dev_hang_notify(struct vnic_dev *vdev)
+{
+ u64 a0, a1;
+ int wait = 1000;
+ return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
+}
+
+int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
+{
+ u64 a0, a1;
+ int wait = 1000;
+ int err, i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] = 0;
+
+ err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
+ if (err)
+ return err;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] = ((u8 *)&a0)[i];
+
+ return 0;
+}
+
+void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
+ int broadcast, int promisc, int allmulti)
+{
+ u64 a0, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
+ (multicast ? CMD_PFILTER_MULTICAST : 0) |
+ (broadcast ? CMD_PFILTER_BROADCAST : 0) |
+ (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
+ (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
+
+ err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
+ if (err)
+ printk(KERN_ERR "Can't set packet filter\n");
+}
+
+void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ int err;
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ ((u8 *)&a0)[i] = addr[i];
+
+ err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
+ if (err)
+ printk(KERN_ERR
+ "Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
+ err);
+}
+
+void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ int err;
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ ((u8 *)&a0)[i] = addr[i];
+
+ err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
+ if (err)
+ printk(KERN_ERR
+ "Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
+ err);
+}
+
+int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
+{
+ u64 a0, a1;
+ int wait = 1000;
+
+ if (!vdev->notify) {
+ vdev->notify = pci_alloc_consistent(vdev->pdev,
+ sizeof(struct vnic_devcmd_notify),
+ &vdev->notify_pa);
+ if (!vdev->notify)
+ return -ENOMEM;
+ }
+
+ a0 = vdev->notify_pa;
+ a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
+ a1 += sizeof(struct vnic_devcmd_notify);
+
+ return vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
+}
+
+void vnic_dev_notify_unset(struct vnic_dev *vdev)
+{
+ u64 a0, a1;
+ int wait = 1000;
+
+ a0 = 0; /* paddr = 0 to unset notify buffer */
+ a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
+ a1 += sizeof(struct vnic_devcmd_notify);
+
+ vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
+}
+
+static int vnic_dev_notify_ready(struct vnic_dev *vdev)
+{
+ u32 *words;
+ unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4;
+ unsigned int i;
+ u32 csum;
+
+ if (!vdev->notify)
+ return 0;
+
+ do {
+ csum = 0;
+ memcpy(&vdev->notify_copy, vdev->notify,
+ sizeof(struct vnic_devcmd_notify));
+ words = (u32 *)&vdev->notify_copy;
+ for (i = 1; i < nwords; i++)
+ csum += words[i];
+ } while (csum != words[0]);
+
+ return 1;
+}
+
+int vnic_dev_init(struct vnic_dev *vdev, int arg)
+{
+ u64 a0 = (u32)arg, a1 = 0;
+ int wait = 1000;
+ return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
+}
+
+int vnic_dev_link_status(struct vnic_dev *vdev)
+{
+ if (vdev->linkstatus)
+ return *vdev->linkstatus;
+
+ if (!vnic_dev_notify_ready(vdev))
+ return 0;
+
+ return vdev->notify_copy.link_state;
+}
+
+u32 vnic_dev_port_speed(struct vnic_dev *vdev)
+{
+ if (!vnic_dev_notify_ready(vdev))
+ return 0;
+
+ return vdev->notify_copy.port_speed;
+}
+
+u32 vnic_dev_msg_lvl(struct vnic_dev *vdev)
+{
+ if (!vnic_dev_notify_ready(vdev))
+ return 0;
+
+ return vdev->notify_copy.msglvl;
+}
+
+u32 vnic_dev_mtu(struct vnic_dev *vdev)
+{
+ if (!vnic_dev_notify_ready(vdev))
+ return 0;
+
+ return vdev->notify_copy.mtu;
+}
+
+u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev)
+{
+ if (!vnic_dev_notify_ready(vdev))
+ return 0;
+
+ return vdev->notify_copy.link_down_cnt;
+}
+
+void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
+ enum vnic_dev_intr_mode intr_mode)
+{
+ vdev->intr_mode = intr_mode;
+}
+
+enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
+ struct vnic_dev *vdev)
+{
+ return vdev->intr_mode;
+}
+
+void vnic_dev_unregister(struct vnic_dev *vdev)
+{
+ if (vdev) {
+ if (vdev->notify)
+ pci_free_consistent(vdev->pdev,
+ sizeof(struct vnic_devcmd_notify),
+ vdev->notify,
+ vdev->notify_pa);
+ if (vdev->linkstatus)
+ pci_free_consistent(vdev->pdev,
+ sizeof(u32),
+ vdev->linkstatus,
+ vdev->linkstatus_pa);
+ if (vdev->stats)
+ pci_free_consistent(vdev->pdev,
+ sizeof(struct vnic_dev),
+ vdev->stats, vdev->stats_pa);
+ if (vdev->fw_info)
+ pci_free_consistent(vdev->pdev,
+ sizeof(struct vnic_devcmd_fw_info),
+ vdev->fw_info, vdev->fw_info_pa);
+ kfree(vdev);
+ }
+}
+
+struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
+ void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar)
+{
+ if (!vdev) {
+ vdev = kzalloc(sizeof(struct vnic_dev), GFP_KERNEL);
+ if (!vdev)
+ return NULL;
+ }
+
+ vdev->priv = priv;
+ vdev->pdev = pdev;
+
+ if (vnic_dev_discover_res(vdev, bar))
+ goto err_out;
+
+ vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
+ if (!vdev->devcmd)
+ goto err_out;
+
+ return vdev;
+
+err_out:
+ vnic_dev_unregister(vdev);
+ return NULL;
+}
diff --git a/drivers/scsi/fnic/vnic_dev.h b/drivers/scsi/fnic/vnic_dev.h
new file mode 100644
index 00000000000..f9935a8a5a0
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_dev.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_DEV_H_
+#define _VNIC_DEV_H_
+
+#include "vnic_resource.h"
+#include "vnic_devcmd.h"
+
+/*
+ * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
+ * Driver) when both are built with CONFIG options =y
+ */
+#define vnic_dev_priv fnic_dev_priv
+#define vnic_dev_get_res_count fnic_dev_get_res_count
+#define vnic_dev_get_res fnic_dev_get_res
+#define vnic_dev_desc_ring_size fnic_dev_desc_ring_siz
+#define vnic_dev_clear_desc_ring fnic_dev_clear_desc_ring
+#define vnic_dev_alloc_desc_ring fnic_dev_alloc_desc_ring
+#define vnic_dev_free_desc_ring fnic_dev_free_desc_ring
+#define vnic_dev_cmd fnic_dev_cmd
+#define vnic_dev_fw_info fnic_dev_fw_info
+#define vnic_dev_spec fnic_dev_spec
+#define vnic_dev_stats_clear fnic_dev_stats_clear
+#define vnic_dev_stats_dump fnic_dev_stats_dump
+#define vnic_dev_hang_notify fnic_dev_hang_notify
+#define vnic_dev_packet_filter fnic_dev_packet_filter
+#define vnic_dev_add_addr fnic_dev_add_addr
+#define vnic_dev_del_addr fnic_dev_del_addr
+#define vnic_dev_mac_addr fnic_dev_mac_addr
+#define vnic_dev_notify_set fnic_dev_notify_set
+#define vnic_dev_notify_unset fnic_dev_notify_unset
+#define vnic_dev_link_status fnic_dev_link_status
+#define vnic_dev_port_speed fnic_dev_port_speed
+#define vnic_dev_msg_lvl fnic_dev_msg_lvl
+#define vnic_dev_mtu fnic_dev_mtu
+#define vnic_dev_link_down_cnt fnic_dev_link_down_cnt
+#define vnic_dev_close fnic_dev_close
+#define vnic_dev_enable fnic_dev_enable
+#define vnic_dev_disable fnic_dev_disable
+#define vnic_dev_open fnic_dev_open
+#define vnic_dev_open_done fnic_dev_open_done
+#define vnic_dev_init fnic_dev_init
+#define vnic_dev_soft_reset fnic_dev_soft_reset
+#define vnic_dev_soft_reset_done fnic_dev_soft_reset_done
+#define vnic_dev_set_intr_mode fnic_dev_set_intr_mode
+#define vnic_dev_get_intr_mode fnic_dev_get_intr_mode
+#define vnic_dev_unregister fnic_dev_unregister
+#define vnic_dev_register fnic_dev_register
+
+#ifndef VNIC_PADDR_TARGET
+#define VNIC_PADDR_TARGET 0x0000000000000000ULL
+#endif
+
+#ifndef readq
+static inline u64 readq(void __iomem *reg)
+{
+ return ((u64)readl(reg + 0x4UL) << 32) | (u64)readl(reg);
+}
+
+static inline void writeq(u64 val, void __iomem *reg)
+{
+ writel(val & 0xffffffff, reg);
+ writel(val >> 32, reg + 0x4UL);
+}
+#endif
+
+enum vnic_dev_intr_mode {
+ VNIC_DEV_INTR_MODE_UNKNOWN,
+ VNIC_DEV_INTR_MODE_INTX,
+ VNIC_DEV_INTR_MODE_MSI,
+ VNIC_DEV_INTR_MODE_MSIX,
+};
+
+struct vnic_dev_bar {
+ void __iomem *vaddr;
+ dma_addr_t bus_addr;
+ unsigned long len;
+};
+
+struct vnic_dev_ring {
+ void *descs;
+ size_t size;
+ dma_addr_t base_addr;
+ size_t base_align;
+ void *descs_unaligned;
+ size_t size_unaligned;
+ dma_addr_t base_addr_unaligned;
+ unsigned int desc_size;
+ unsigned int desc_count;
+ unsigned int desc_avail;
+};
+
+struct vnic_dev;
+struct vnic_stats;
+
+void *vnic_dev_priv(struct vnic_dev *vdev);
+unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
+ enum vnic_res_type type);
+void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
+ unsigned int index);
+unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
+ unsigned int desc_count,
+ unsigned int desc_size);
+void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring);
+int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
+ unsigned int desc_count, unsigned int desc_size);
+void vnic_dev_free_desc_ring(struct vnic_dev *vdev,
+ struct vnic_dev_ring *ring);
+int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ u64 *a0, u64 *a1, int wait);
+int vnic_dev_fw_info(struct vnic_dev *vdev,
+ struct vnic_devcmd_fw_info **fw_info);
+int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset,
+ unsigned int size, void *value);
+int vnic_dev_stats_clear(struct vnic_dev *vdev);
+int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
+int vnic_dev_hang_notify(struct vnic_dev *vdev);
+void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
+ int broadcast, int promisc, int allmulti);
+void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
+void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
+int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
+int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
+void vnic_dev_notify_unset(struct vnic_dev *vdev);
+int vnic_dev_link_status(struct vnic_dev *vdev);
+u32 vnic_dev_port_speed(struct vnic_dev *vdev);
+u32 vnic_dev_msg_lvl(struct vnic_dev *vdev);
+u32 vnic_dev_mtu(struct vnic_dev *vdev);
+u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev);
+int vnic_dev_close(struct vnic_dev *vdev);
+int vnic_dev_enable(struct vnic_dev *vdev);
+int vnic_dev_disable(struct vnic_dev *vdev);
+int vnic_dev_open(struct vnic_dev *vdev, int arg);
+int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
+int vnic_dev_init(struct vnic_dev *vdev, int arg);
+int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
+int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
+void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
+ enum vnic_dev_intr_mode intr_mode);
+enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev);
+void vnic_dev_unregister(struct vnic_dev *vdev);
+struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
+ void *priv, struct pci_dev *pdev,
+ struct vnic_dev_bar *bar);
+
+#endif /* _VNIC_DEV_H_ */
diff --git a/drivers/scsi/fnic/vnic_devcmd.h b/drivers/scsi/fnic/vnic_devcmd.h
new file mode 100644
index 00000000000..d62b9061bf1
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_devcmd.h
@@ -0,0 +1,281 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_DEVCMD_H_
+#define _VNIC_DEVCMD_H_
+
+#define _CMD_NBITS 14
+#define _CMD_VTYPEBITS 10
+#define _CMD_FLAGSBITS 6
+#define _CMD_DIRBITS 2
+
+#define _CMD_NMASK ((1 << _CMD_NBITS)-1)
+#define _CMD_VTYPEMASK ((1 << _CMD_VTYPEBITS)-1)
+#define _CMD_FLAGSMASK ((1 << _CMD_FLAGSBITS)-1)
+#define _CMD_DIRMASK ((1 << _CMD_DIRBITS)-1)
+
+#define _CMD_NSHIFT 0
+#define _CMD_VTYPESHIFT (_CMD_NSHIFT+_CMD_NBITS)
+#define _CMD_FLAGSSHIFT (_CMD_VTYPESHIFT+_CMD_VTYPEBITS)
+#define _CMD_DIRSHIFT (_CMD_FLAGSSHIFT+_CMD_FLAGSBITS)
+
+/*
+ * Direction bits (from host perspective).
+ */
+#define _CMD_DIR_NONE 0U
+#define _CMD_DIR_WRITE 1U
+#define _CMD_DIR_READ 2U
+#define _CMD_DIR_RW (_CMD_DIR_WRITE | _CMD_DIR_READ)
+
+/*
+ * Flag bits.
+ */
+#define _CMD_FLAGS_NONE 0U
+#define _CMD_FLAGS_NOWAIT 1U
+
+/*
+ * vNIC type bits.
+ */
+#define _CMD_VTYPE_NONE 0U
+#define _CMD_VTYPE_ENET 1U
+#define _CMD_VTYPE_FC 2U
+#define _CMD_VTYPE_SCSI 4U
+#define _CMD_VTYPE_ALL (_CMD_VTYPE_ENET | _CMD_VTYPE_FC | _CMD_VTYPE_SCSI)
+
+/*
+ * Used to create cmds..
+*/
+#define _CMDCF(dir, flags, vtype, nr) \
+ (((dir) << _CMD_DIRSHIFT) | \
+ ((flags) << _CMD_FLAGSSHIFT) | \
+ ((vtype) << _CMD_VTYPESHIFT) | \
+ ((nr) << _CMD_NSHIFT))
+#define _CMDC(dir, vtype, nr) _CMDCF(dir, 0, vtype, nr)
+#define _CMDCNW(dir, vtype, nr) _CMDCF(dir, _CMD_FLAGS_NOWAIT, vtype, nr)
+
+/*
+ * Used to decode cmds..
+*/
+#define _CMD_DIR(cmd) (((cmd) >> _CMD_DIRSHIFT) & _CMD_DIRMASK)
+#define _CMD_FLAGS(cmd) (((cmd) >> _CMD_FLAGSSHIFT) & _CMD_FLAGSMASK)
+#define _CMD_VTYPE(cmd) (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK)
+#define _CMD_N(cmd) (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK)
+
+enum vnic_devcmd_cmd {
+ CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0),
+
+ /* mcpu fw info in mem: (u64)a0=paddr to struct vnic_devcmd_fw_info */
+ CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1),
+
+ /* dev-specific block member:
+ * in: (u16)a0=offset,(u8)a1=size
+ * out: a0=value */
+ CMD_DEV_SPEC = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2),
+
+ /* stats clear */
+ CMD_STATS_CLEAR = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 3),
+
+ /* stats dump in mem: (u64)a0=paddr to stats area,
+ * (u16)a1=sizeof stats area */
+ CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4),
+
+ /* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */
+ CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7),
+
+ /* hang detection notification */
+ CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8),
+
+ /* MAC address in (u48)a0 */
+ CMD_MAC_ADDR = _CMDC(_CMD_DIR_READ,
+ _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 9),
+
+ /* disable/enable promisc mode: (u8)a0=0/1 */
+/***** XXX DEPRECATED *****/
+ CMD_PROMISC_MODE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 10),
+
+ /* disable/enable all-multi mode: (u8)a0=0/1 */
+/***** XXX DEPRECATED *****/
+ CMD_ALLMULTI_MODE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 11),
+
+ /* add addr from (u48)a0 */
+ CMD_ADDR_ADD = _CMDCNW(_CMD_DIR_WRITE,
+ _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 12),
+
+ /* del addr from (u48)a0 */
+ CMD_ADDR_DEL = _CMDCNW(_CMD_DIR_WRITE,
+ _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 13),
+
+ /* add VLAN id in (u16)a0 */
+ CMD_VLAN_ADD = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 14),
+
+ /* del VLAN id in (u16)a0 */
+ CMD_VLAN_DEL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 15),
+
+ /* nic_cfg in (u32)a0 */
+ CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
+
+ /* union vnic_rss_key in mem: (u64)a0=paddr, (u16)a1=len */
+ CMD_RSS_KEY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 17),
+
+ /* union vnic_rss_cpu in mem: (u64)a0=paddr, (u16)a1=len */
+ CMD_RSS_CPU = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 18),
+
+ /* initiate softreset */
+ CMD_SOFT_RESET = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 19),
+
+ /* softreset status:
+ * out: a0=0 reset complete, a0=1 reset in progress */
+ CMD_SOFT_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 20),
+
+ /* set struct vnic_devcmd_notify buffer in mem:
+ * in:
+ * (u64)a0=paddr to notify (set paddr=0 to unset)
+ * (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify)
+ * (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr)
+ * out:
+ * (u32)a1 = effective size
+ */
+ CMD_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 21),
+
+ /* UNDI API: (u64)a0=paddr to s_PXENV_UNDI_ struct,
+ * (u8)a1=PXENV_UNDI_xxx */
+ CMD_UNDI = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 22),
+
+ /* initiate open sequence (u32)a0=flags (see CMD_OPENF_*) */
+ CMD_OPEN = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 23),
+
+ /* open status:
+ * out: a0=0 open complete, a0=1 open in progress */
+ CMD_OPEN_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 24),
+
+ /* close vnic */
+ CMD_CLOSE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25),
+
+ /* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */
+ CMD_INIT = _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26),
+
+ /* variant of CMD_INIT, with provisioning info
+ * (u64)a0=paddr of vnic_devcmd_provinfo
+ * (u32)a1=sizeof provision info */
+ CMD_INIT_PROV_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 27),
+
+ /* enable virtual link */
+ CMD_ENABLE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
+
+ /* disable virtual link */
+ CMD_DISABLE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29),
+
+ /* stats dump all vnics on uplink in mem: (u64)a0=paddr (u32)a1=uif */
+ CMD_STATS_DUMP_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 30),
+
+ /* init status:
+ * out: a0=0 init complete, a0=1 init in progress
+ * if a0=0, a1=errno */
+ CMD_INIT_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31),
+
+ /* INT13 API: (u64)a0=paddr to vnic_int13_params struct
+ * (u8)a1=INT13_CMD_xxx */
+ CMD_INT13 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_FC, 32),
+
+ /* logical uplink enable/disable: (u64)a0: 0/1=disable/enable */
+ CMD_LOGICAL_UPLINK = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 33),
+
+ /* undo initialize of virtual link */
+ CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34),
+};
+
+/* flags for CMD_OPEN */
+#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */
+
+/* flags for CMD_INIT */
+#define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */
+
+/* flags for CMD_PACKET_FILTER */
+#define CMD_PFILTER_DIRECTED 0x01
+#define CMD_PFILTER_MULTICAST 0x02
+#define CMD_PFILTER_BROADCAST 0x04
+#define CMD_PFILTER_PROMISCUOUS 0x08
+#define CMD_PFILTER_ALL_MULTICAST 0x10
+
+enum vnic_devcmd_status {
+ STAT_NONE = 0,
+ STAT_BUSY = 1 << 0, /* cmd in progress */
+ STAT_ERROR = 1 << 1, /* last cmd caused error (code in a0) */
+};
+
+enum vnic_devcmd_error {
+ ERR_SUCCESS = 0,
+ ERR_EINVAL = 1,
+ ERR_EFAULT = 2,
+ ERR_EPERM = 3,
+ ERR_EBUSY = 4,
+ ERR_ECMDUNKNOWN = 5,
+ ERR_EBADSTATE = 6,
+ ERR_ENOMEM = 7,
+ ERR_ETIMEDOUT = 8,
+ ERR_ELINKDOWN = 9,
+};
+
+struct vnic_devcmd_fw_info {
+ char fw_version[32];
+ char fw_build[32];
+ char hw_version[32];
+ char hw_serial_number[32];
+};
+
+struct vnic_devcmd_notify {
+ u32 csum; /* checksum over following words */
+
+ u32 link_state; /* link up == 1 */
+ u32 port_speed; /* effective port speed (rate limit) */
+ u32 mtu; /* MTU */
+ u32 msglvl; /* requested driver msg lvl */
+ u32 uif; /* uplink interface */
+ u32 status; /* status bits (see VNIC_STF_*) */
+ u32 error; /* error code (see ERR_*) for first ERR */
+ u32 link_down_cnt; /* running count of link down transitions */
+};
+#define VNIC_STF_FATAL_ERR 0x0001 /* fatal fw error */
+
+struct vnic_devcmd_provinfo {
+ u8 oui[3];
+ u8 type;
+ u8 data[0];
+};
+
+/*
+ * Writing cmd register causes STAT_BUSY to get set in status register.
+ * When cmd completes, STAT_BUSY will be cleared.
+ *
+ * If cmd completed successfully STAT_ERROR will be clear
+ * and args registers contain cmd-specific results.
+ *
+ * If cmd error, STAT_ERROR will be set and args[0] contains error code.
+ *
+ * status register is read-only. While STAT_BUSY is set,
+ * all other register contents are read-only.
+ */
+
+/* Make sizeof(vnic_devcmd) a power-of-2 for I/O BAR. */
+#define VNIC_DEVCMD_NARGS 15
+struct vnic_devcmd {
+ u32 status; /* RO */
+ u32 cmd; /* RW */
+ u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */
+};
+
+#endif /* _VNIC_DEVCMD_H_ */
diff --git a/drivers/scsi/fnic/vnic_intr.c b/drivers/scsi/fnic/vnic_intr.c
new file mode 100644
index 00000000000..4f4dc8793d2
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_intr.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include "vnic_dev.h"
+#include "vnic_intr.h"
+
+void vnic_intr_free(struct vnic_intr *intr)
+{
+ intr->ctrl = NULL;
+}
+
+int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
+ unsigned int index)
+{
+ intr->index = index;
+ intr->vdev = vdev;
+
+ intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
+ if (!intr->ctrl) {
+ printk(KERN_ERR "Failed to hook INTR[%d].ctrl resource\n",
+ index);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
+ unsigned int coalescing_type, unsigned int mask_on_assertion)
+{
+ iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer);
+ iowrite32(coalescing_type, &intr->ctrl->coalescing_type);
+ iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion);
+ iowrite32(0, &intr->ctrl->int_credits);
+}
+
+void vnic_intr_clean(struct vnic_intr *intr)
+{
+ iowrite32(0, &intr->ctrl->int_credits);
+}
diff --git a/drivers/scsi/fnic/vnic_intr.h b/drivers/scsi/fnic/vnic_intr.h
new file mode 100644
index 00000000000..d5fb40e7c98
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_intr.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_INTR_H_
+#define _VNIC_INTR_H_
+
+#include <linux/pci.h>
+#include "vnic_dev.h"
+
+/*
+ * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
+ * Driver) when both are built with CONFIG options =y
+ */
+#define vnic_intr_unmask fnic_intr_unmask
+#define vnic_intr_mask fnic_intr_mask
+#define vnic_intr_return_credits fnic_intr_return_credits
+#define vnic_intr_credits fnic_intr_credits
+#define vnic_intr_return_all_credits fnic_intr_return_all_credits
+#define vnic_intr_legacy_pba fnic_intr_legacy_pba
+#define vnic_intr_free fnic_intr_free
+#define vnic_intr_alloc fnic_intr_alloc
+#define vnic_intr_init fnic_intr_init
+#define vnic_intr_clean fnic_intr_clean
+
+#define VNIC_INTR_TIMER_MAX 0xffff
+
+#define VNIC_INTR_TIMER_TYPE_ABS 0
+#define VNIC_INTR_TIMER_TYPE_QUIET 1
+
+/* Interrupt control */
+struct vnic_intr_ctrl {
+ u32 coalescing_timer; /* 0x00 */
+ u32 pad0;
+ u32 coalescing_value; /* 0x08 */
+ u32 pad1;
+ u32 coalescing_type; /* 0x10 */
+ u32 pad2;
+ u32 mask_on_assertion; /* 0x18 */
+ u32 pad3;
+ u32 mask; /* 0x20 */
+ u32 pad4;
+ u32 int_credits; /* 0x28 */
+ u32 pad5;
+ u32 int_credit_return; /* 0x30 */
+ u32 pad6;
+};
+
+struct vnic_intr {
+ unsigned int index;
+ struct vnic_dev *vdev;
+ struct vnic_intr_ctrl __iomem *ctrl; /* memory-mapped */
+};
+
+static inline void vnic_intr_unmask(struct vnic_intr *intr)
+{
+ iowrite32(0, &intr->ctrl->mask);
+}
+
+static inline void vnic_intr_mask(struct vnic_intr *intr)
+{
+ iowrite32(1, &intr->ctrl->mask);
+}
+
+static inline void vnic_intr_return_credits(struct vnic_intr *intr,
+ unsigned int credits, int unmask, int reset_timer)
+{
+#define VNIC_INTR_UNMASK_SHIFT 16
+#define VNIC_INTR_RESET_TIMER_SHIFT 17
+
+ u32 int_credit_return = (credits & 0xffff) |
+ (unmask ? (1 << VNIC_INTR_UNMASK_SHIFT) : 0) |
+ (reset_timer ? (1 << VNIC_INTR_RESET_TIMER_SHIFT) : 0);
+
+ iowrite32(int_credit_return, &intr->ctrl->int_credit_return);
+}
+
+static inline unsigned int vnic_intr_credits(struct vnic_intr *intr)
+{
+ return ioread32(&intr->ctrl->int_credits);
+}
+
+static inline void vnic_intr_return_all_credits(struct vnic_intr *intr)
+{
+ unsigned int credits = vnic_intr_credits(intr);
+ int unmask = 1;
+ int reset_timer = 1;
+
+ vnic_intr_return_credits(intr, credits, unmask, reset_timer);
+}
+
+static inline u32 vnic_intr_legacy_pba(u32 __iomem *legacy_pba)
+{
+ /* read PBA without clearing */
+ return ioread32(legacy_pba);
+}
+
+void vnic_intr_free(struct vnic_intr *intr);
+int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
+ unsigned int index);
+void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
+ unsigned int coalescing_type, unsigned int mask_on_assertion);
+void vnic_intr_clean(struct vnic_intr *intr);
+
+#endif /* _VNIC_INTR_H_ */
diff --git a/drivers/scsi/fnic/vnic_nic.h b/drivers/scsi/fnic/vnic_nic.h
new file mode 100644
index 00000000000..f15b83eeace
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_nic.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_NIC_H_
+#define _VNIC_NIC_H_
+
+/*
+ * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
+ * Driver) when both are built with CONFIG options =y
+ */
+#define vnic_set_nic_cfg fnic_set_nic_cfg
+
+#define NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD 0xffUL
+#define NIC_CFG_RSS_DEFAULT_CPU_SHIFT 0
+#define NIC_CFG_RSS_HASH_TYPE (0xffUL << 8)
+#define NIC_CFG_RSS_HASH_TYPE_MASK_FIELD 0xffUL
+#define NIC_CFG_RSS_HASH_TYPE_SHIFT 8
+#define NIC_CFG_RSS_HASH_BITS (7UL << 16)
+#define NIC_CFG_RSS_HASH_BITS_MASK_FIELD 7UL
+#define NIC_CFG_RSS_HASH_BITS_SHIFT 16
+#define NIC_CFG_RSS_BASE_CPU (7UL << 19)
+#define NIC_CFG_RSS_BASE_CPU_MASK_FIELD 7UL
+#define NIC_CFG_RSS_BASE_CPU_SHIFT 19
+#define NIC_CFG_RSS_ENABLE (1UL << 22)
+#define NIC_CFG_RSS_ENABLE_MASK_FIELD 1UL
+#define NIC_CFG_RSS_ENABLE_SHIFT 22
+#define NIC_CFG_TSO_IPID_SPLIT_EN (1UL << 23)
+#define NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD 1UL
+#define NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT 23
+#define NIC_CFG_IG_VLAN_STRIP_EN (1UL << 24)
+#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL
+#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24
+
+static inline void vnic_set_nic_cfg(u32 *nic_cfg,
+ u8 rss_default_cpu, u8 rss_hash_type,
+ u8 rss_hash_bits, u8 rss_base_cpu,
+ u8 rss_enable, u8 tso_ipid_split_en,
+ u8 ig_vlan_strip_en)
+{
+ *nic_cfg = (rss_default_cpu & NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD) |
+ ((rss_hash_type & NIC_CFG_RSS_HASH_TYPE_MASK_FIELD)
+ << NIC_CFG_RSS_HASH_TYPE_SHIFT) |
+ ((rss_hash_bits & NIC_CFG_RSS_HASH_BITS_MASK_FIELD)
+ << NIC_CFG_RSS_HASH_BITS_SHIFT) |
+ ((rss_base_cpu & NIC_CFG_RSS_BASE_CPU_MASK_FIELD)
+ << NIC_CFG_RSS_BASE_CPU_SHIFT) |
+ ((rss_enable & NIC_CFG_RSS_ENABLE_MASK_FIELD)
+ << NIC_CFG_RSS_ENABLE_SHIFT) |
+ ((tso_ipid_split_en & NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD)
+ << NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT) |
+ ((ig_vlan_strip_en & NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD)
+ << NIC_CFG_IG_VLAN_STRIP_EN_SHIFT);
+}
+
+#endif /* _VNIC_NIC_H_ */
diff --git a/drivers/scsi/fnic/vnic_resource.h b/drivers/scsi/fnic/vnic_resource.h
new file mode 100644
index 00000000000..2d842f79d41
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_resource.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_RESOURCE_H_
+#define _VNIC_RESOURCE_H_
+
+#define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */
+#define VNIC_RES_VERSION 0x00000000L
+
+/* vNIC resource types */
+enum vnic_res_type {
+ RES_TYPE_EOL, /* End-of-list */
+ RES_TYPE_WQ, /* Work queues */
+ RES_TYPE_RQ, /* Receive queues */
+ RES_TYPE_CQ, /* Completion queues */
+ RES_TYPE_RSVD1,
+ RES_TYPE_NIC_CFG, /* Enet NIC config registers */
+ RES_TYPE_RSVD2,
+ RES_TYPE_RSVD3,
+ RES_TYPE_RSVD4,
+ RES_TYPE_RSVD5,
+ RES_TYPE_INTR_CTRL, /* Interrupt ctrl table */
+ RES_TYPE_INTR_TABLE, /* MSI/MSI-X Interrupt table */
+ RES_TYPE_INTR_PBA, /* MSI/MSI-X PBA table */
+ RES_TYPE_INTR_PBA_LEGACY, /* Legacy intr status */
+ RES_TYPE_RSVD6,
+ RES_TYPE_RSVD7,
+ RES_TYPE_DEVCMD, /* Device command region */
+ RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */
+
+ RES_TYPE_MAX, /* Count of resource types */
+};
+
+struct vnic_resource_header {
+ u32 magic;
+ u32 version;
+};
+
+struct vnic_resource {
+ u8 type;
+ u8 bar;
+ u8 pad[2];
+ u32 bar_offset;
+ u32 count;
+};
+
+#endif /* _VNIC_RESOURCE_H_ */
diff --git a/drivers/scsi/fnic/vnic_rq.c b/drivers/scsi/fnic/vnic_rq.c
new file mode 100644
index 00000000000..bedd0d28563
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_rq.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include "vnic_dev.h"
+#include "vnic_rq.h"
+
+static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
+{
+ struct vnic_rq_buf *buf;
+ struct vnic_dev *vdev;
+ unsigned int i, j, count = rq->ring.desc_count;
+ unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
+
+ vdev = rq->vdev;
+
+ for (i = 0; i < blks; i++) {
+ rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC);
+ if (!rq->bufs[i]) {
+ printk(KERN_ERR "Failed to alloc rq_bufs\n");
+ return -ENOMEM;
+ }
+ }
+
+ for (i = 0; i < blks; i++) {
+ buf = rq->bufs[i];
+ for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES; j++) {
+ buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES + j;
+ buf->desc = (u8 *)rq->ring.descs +
+ rq->ring.desc_size * buf->index;
+ if (buf->index + 1 == count) {
+ buf->next = rq->bufs[0];
+ break;
+ } else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES) {
+ buf->next = rq->bufs[i + 1];
+ } else {
+ buf->next = buf + 1;
+ buf++;
+ }
+ }
+ }
+
+ rq->to_use = rq->to_clean = rq->bufs[0];
+ rq->buf_index = 0;
+
+ return 0;
+}
+
+void vnic_rq_free(struct vnic_rq *rq)
+{
+ struct vnic_dev *vdev;
+ unsigned int i;
+
+ vdev = rq->vdev;
+
+ vnic_dev_free_desc_ring(vdev, &rq->ring);
+
+ for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) {
+ kfree(rq->bufs[i]);
+ rq->bufs[i] = NULL;
+ }
+
+ rq->ctrl = NULL;
+}
+
+int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
+ unsigned int desc_count, unsigned int desc_size)
+{
+ int err;
+
+ rq->index = index;
+ rq->vdev = vdev;
+
+ rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
+ if (!rq->ctrl) {
+ printk(KERN_ERR "Failed to hook RQ[%d] resource\n", index);
+ return -EINVAL;
+ }
+
+ vnic_rq_disable(rq);
+
+ err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size);
+ if (err)
+ return err;
+
+ err = vnic_rq_alloc_bufs(rq);
+ if (err) {
+ vnic_rq_free(rq);
+ return err;
+ }
+
+ return 0;
+}
+
+void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset)
+{
+ u64 paddr;
+ u32 fetch_index;
+
+ paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
+ writeq(paddr, &rq->ctrl->ring_base);
+ iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size);
+ iowrite32(cq_index, &rq->ctrl->cq_index);
+ iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
+ iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
+ iowrite32(0, &rq->ctrl->dropped_packet_count);
+ iowrite32(0, &rq->ctrl->error_status);
+
+ /* Use current fetch_index as the ring starting point */
+ fetch_index = ioread32(&rq->ctrl->fetch_index);
+ rq->to_use = rq->to_clean =
+ &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
+ [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
+ iowrite32(fetch_index, &rq->ctrl->posted_index);
+
+ rq->buf_index = 0;
+}
+
+unsigned int vnic_rq_error_status(struct vnic_rq *rq)
+{
+ return ioread32(&rq->ctrl->error_status);
+}
+
+void vnic_rq_enable(struct vnic_rq *rq)
+{
+ iowrite32(1, &rq->ctrl->enable);
+}
+
+int vnic_rq_disable(struct vnic_rq *rq)
+{
+ unsigned int wait;
+
+ iowrite32(0, &rq->ctrl->enable);
+
+ /* Wait for HW to ACK disable request */
+ for (wait = 0; wait < 100; wait++) {
+ if (!(ioread32(&rq->ctrl->running)))
+ return 0;
+ udelay(1);
+ }
+
+ printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index);
+
+ return -ETIMEDOUT;
+}
+
+void vnic_rq_clean(struct vnic_rq *rq,
+ void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf))
+{
+ struct vnic_rq_buf *buf;
+ u32 fetch_index;
+
+ BUG_ON(ioread32(&rq->ctrl->enable));
+
+ buf = rq->to_clean;
+
+ while (vnic_rq_desc_used(rq) > 0) {
+
+ (*buf_clean)(rq, buf);
+
+ buf = rq->to_clean = buf->next;
+ rq->ring.desc_avail++;
+ }
+
+ /* Use current fetch_index as the ring starting point */
+ fetch_index = ioread32(&rq->ctrl->fetch_index);
+ rq->to_use = rq->to_clean =
+ &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
+ [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
+ iowrite32(fetch_index, &rq->ctrl->posted_index);
+
+ rq->buf_index = 0;
+
+ vnic_dev_clear_desc_ring(&rq->ring);
+}
+
diff --git a/drivers/scsi/fnic/vnic_rq.h b/drivers/scsi/fnic/vnic_rq.h
new file mode 100644
index 00000000000..aebdfbd6ad3
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_rq.h
@@ -0,0 +1,235 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_RQ_H_
+#define _VNIC_RQ_H_
+
+#include <linux/pci.h>
+#include "vnic_dev.h"
+#include "vnic_cq.h"
+
+/*
+ * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
+ * Driver) when both are built with CONFIG options =y
+ */
+#define vnic_rq_desc_avail fnic_rq_desc_avail
+#define vnic_rq_desc_used fnic_rq_desc_used
+#define vnic_rq_next_desc fnic_rq_next_desc
+#define vnic_rq_next_index fnic_rq_next_index
+#define vnic_rq_next_buf_index fnic_rq_next_buf_index
+#define vnic_rq_post fnic_rq_post
+#define vnic_rq_posting_soon fnic_rq_posting_soon
+#define vnic_rq_return_descs fnic_rq_return_descs
+#define vnic_rq_service fnic_rq_service
+#define vnic_rq_fill fnic_rq_fill
+#define vnic_rq_free fnic_rq_free
+#define vnic_rq_alloc fnic_rq_alloc
+#define vnic_rq_init fnic_rq_init
+#define vnic_rq_error_status fnic_rq_error_status
+#define vnic_rq_enable fnic_rq_enable
+#define vnic_rq_disable fnic_rq_disable
+#define vnic_rq_clean fnic_rq_clean
+
+/* Receive queue control */
+struct vnic_rq_ctrl {
+ u64 ring_base; /* 0x00 */
+ u32 ring_size; /* 0x08 */
+ u32 pad0;
+ u32 posted_index; /* 0x10 */
+ u32 pad1;
+ u32 cq_index; /* 0x18 */
+ u32 pad2;
+ u32 enable; /* 0x20 */
+ u32 pad3;
+ u32 running; /* 0x28 */
+ u32 pad4;
+ u32 fetch_index; /* 0x30 */
+ u32 pad5;
+ u32 error_interrupt_enable; /* 0x38 */
+ u32 pad6;
+ u32 error_interrupt_offset; /* 0x40 */
+ u32 pad7;
+ u32 error_status; /* 0x48 */
+ u32 pad8;
+ u32 dropped_packet_count; /* 0x50 */
+ u32 pad9;
+ u32 dropped_packet_count_rc; /* 0x58 */
+ u32 pad10;
+};
+
+/* Break the vnic_rq_buf allocations into blocks of 64 entries */
+#define VNIC_RQ_BUF_BLK_ENTRIES 64
+#define VNIC_RQ_BUF_BLK_SZ \
+ (VNIC_RQ_BUF_BLK_ENTRIES * sizeof(struct vnic_rq_buf))
+#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
+ DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES)
+#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
+
+struct vnic_rq_buf {
+ struct vnic_rq_buf *next;
+ dma_addr_t dma_addr;
+ void *os_buf;
+ unsigned int os_buf_index;
+ unsigned int len;
+ unsigned int index;
+ void *desc;
+};
+
+struct vnic_rq {
+ unsigned int index;
+ struct vnic_dev *vdev;
+ struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */
+ struct vnic_dev_ring ring;
+ struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX];
+ struct vnic_rq_buf *to_use;
+ struct vnic_rq_buf *to_clean;
+ void *os_buf_head;
+ unsigned int buf_index;
+ unsigned int pkts_outstanding;
+};
+
+static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
+{
+ /* how many does SW own? */
+ return rq->ring.desc_avail;
+}
+
+static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
+{
+ /* how many does HW own? */
+ return rq->ring.desc_count - rq->ring.desc_avail - 1;
+}
+
+static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
+{
+ return rq->to_use->desc;
+}
+
+static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
+{
+ return rq->to_use->index;
+}
+
+static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq)
+{
+ return rq->buf_index++;
+}
+
+static inline void vnic_rq_post(struct vnic_rq *rq,
+ void *os_buf, unsigned int os_buf_index,
+ dma_addr_t dma_addr, unsigned int len)
+{
+ struct vnic_rq_buf *buf = rq->to_use;
+
+ buf->os_buf = os_buf;
+ buf->os_buf_index = os_buf_index;
+ buf->dma_addr = dma_addr;
+ buf->len = len;
+
+ buf = buf->next;
+ rq->to_use = buf;
+ rq->ring.desc_avail--;
+
+ /* Move the posted_index every nth descriptor
+ */
+
+#ifndef VNIC_RQ_RETURN_RATE
+#define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */
+#endif
+
+ if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
+ /* Adding write memory barrier prevents compiler and/or CPU
+ * reordering, thus avoiding descriptor posting before
+ * descriptor is initialized. Otherwise, hardware can read
+ * stale descriptor fields.
+ */
+ wmb();
+ iowrite32(buf->index, &rq->ctrl->posted_index);
+ }
+}
+
+static inline int vnic_rq_posting_soon(struct vnic_rq *rq)
+{
+ return (rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0;
+}
+
+static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
+{
+ rq->ring.desc_avail += count;
+}
+
+enum desc_return_options {
+ VNIC_RQ_RETURN_DESC,
+ VNIC_RQ_DEFER_RETURN_DESC,
+};
+
+static inline void vnic_rq_service(struct vnic_rq *rq,
+ struct cq_desc *cq_desc, u16 completed_index,
+ int desc_return, void (*buf_service)(struct vnic_rq *rq,
+ struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
+ int skipped, void *opaque), void *opaque)
+{
+ struct vnic_rq_buf *buf;
+ int skipped;
+
+ buf = rq->to_clean;
+ while (1) {
+
+ skipped = (buf->index != completed_index);
+
+ (*buf_service)(rq, cq_desc, buf, skipped, opaque);
+
+ if (desc_return == VNIC_RQ_RETURN_DESC)
+ rq->ring.desc_avail++;
+
+ rq->to_clean = buf->next;
+
+ if (!skipped)
+ break;
+
+ buf = rq->to_clean;
+ }
+}
+
+static inline int vnic_rq_fill(struct vnic_rq *rq,
+ int (*buf_fill)(struct vnic_rq *rq))
+{
+ int err;
+
+ while (vnic_rq_desc_avail(rq) > 1) {
+
+ err = (*buf_fill)(rq);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+void vnic_rq_free(struct vnic_rq *rq);
+int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
+ unsigned int desc_count, unsigned int desc_size);
+void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset);
+unsigned int vnic_rq_error_status(struct vnic_rq *rq);
+void vnic_rq_enable(struct vnic_rq *rq);
+int vnic_rq_disable(struct vnic_rq *rq);
+void vnic_rq_clean(struct vnic_rq *rq,
+ void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));
+
+#endif /* _VNIC_RQ_H_ */
diff --git a/drivers/scsi/fnic/vnic_scsi.h b/drivers/scsi/fnic/vnic_scsi.h
new file mode 100644
index 00000000000..46baa525400
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_scsi.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_SCSI_H_
+#define _VNIC_SCSI_H_
+
+#define VNIC_FNIC_WQ_COPY_COUNT_MIN 1
+#define VNIC_FNIC_WQ_COPY_COUNT_MAX 1
+
+#define VNIC_FNIC_WQ_DESCS_MIN 64
+#define VNIC_FNIC_WQ_DESCS_MAX 128
+
+#define VNIC_FNIC_WQ_COPY_DESCS_MIN 64
+#define VNIC_FNIC_WQ_COPY_DESCS_MAX 512
+
+#define VNIC_FNIC_RQ_DESCS_MIN 64
+#define VNIC_FNIC_RQ_DESCS_MAX 128
+
+#define VNIC_FNIC_EDTOV_MIN 1000
+#define VNIC_FNIC_EDTOV_MAX 255000
+#define VNIC_FNIC_EDTOV_DEF 2000
+
+#define VNIC_FNIC_RATOV_MIN 1000
+#define VNIC_FNIC_RATOV_MAX 255000
+
+#define VNIC_FNIC_MAXDATAFIELDSIZE_MIN 256
+#define VNIC_FNIC_MAXDATAFIELDSIZE_MAX 2112
+
+#define VNIC_FNIC_FLOGI_RETRIES_MIN 0
+#define VNIC_FNIC_FLOGI_RETRIES_MAX 0xffffffff
+#define VNIC_FNIC_FLOGI_RETRIES_DEF 0xffffffff
+
+#define VNIC_FNIC_FLOGI_TIMEOUT_MIN 1000
+#define VNIC_FNIC_FLOGI_TIMEOUT_MAX 255000
+
+#define VNIC_FNIC_PLOGI_RETRIES_MIN 0
+#define VNIC_FNIC_PLOGI_RETRIES_MAX 255
+#define VNIC_FNIC_PLOGI_RETRIES_DEF 8
+
+#define VNIC_FNIC_PLOGI_TIMEOUT_MIN 1000
+#define VNIC_FNIC_PLOGI_TIMEOUT_MAX 255000
+
+#define VNIC_FNIC_IO_THROTTLE_COUNT_MIN 256
+#define VNIC_FNIC_IO_THROTTLE_COUNT_MAX 4096
+
+#define VNIC_FNIC_LINK_DOWN_TIMEOUT_MIN 0
+#define VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX 240000
+
+#define VNIC_FNIC_PORT_DOWN_TIMEOUT_MIN 0
+#define VNIC_FNIC_PORT_DOWN_TIMEOUT_MAX 240000
+
+#define VNIC_FNIC_PORT_DOWN_IO_RETRIES_MIN 0
+#define VNIC_FNIC_PORT_DOWN_IO_RETRIES_MAX 255
+
+#define VNIC_FNIC_LUNS_PER_TARGET_MIN 1
+#define VNIC_FNIC_LUNS_PER_TARGET_MAX 1024
+
+/* Device-specific region: scsi configuration */
+struct vnic_fc_config {
+ u64 node_wwn;
+ u64 port_wwn;
+ u32 flags;
+ u32 wq_enet_desc_count;
+ u32 wq_copy_desc_count;
+ u32 rq_desc_count;
+ u32 flogi_retries;
+ u32 flogi_timeout;
+ u32 plogi_retries;
+ u32 plogi_timeout;
+ u32 io_throttle_count;
+ u32 link_down_timeout;
+ u32 port_down_timeout;
+ u32 port_down_io_retries;
+ u32 luns_per_tgt;
+ u16 maxdatafieldsize;
+ u16 ed_tov;
+ u16 ra_tov;
+ u16 intr_timer;
+ u8 intr_timer_type;
+};
+
+#define VFCF_FCP_SEQ_LVL_ERR 0x1 /* Enable FCP-2 Error Recovery */
+#define VFCF_PERBI 0x2 /* persistent binding info available */
+
+#endif /* _VNIC_SCSI_H_ */
diff --git a/drivers/scsi/fnic/vnic_stats.h b/drivers/scsi/fnic/vnic_stats.h
new file mode 100644
index 00000000000..5372e23c1cb
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_stats.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_STATS_H_
+#define _VNIC_STATS_H_
+
+/* Tx statistics */
+struct vnic_tx_stats {
+ u64 tx_frames_ok;
+ u64 tx_unicast_frames_ok;
+ u64 tx_multicast_frames_ok;
+ u64 tx_broadcast_frames_ok;
+ u64 tx_bytes_ok;
+ u64 tx_unicast_bytes_ok;
+ u64 tx_multicast_bytes_ok;
+ u64 tx_broadcast_bytes_ok;
+ u64 tx_drops;
+ u64 tx_errors;
+ u64 tx_tso;
+ u64 rsvd[16];
+};
+
+/* Rx statistics */
+struct vnic_rx_stats {
+ u64 rx_frames_ok;
+ u64 rx_frames_total;
+ u64 rx_unicast_frames_ok;
+ u64 rx_multicast_frames_ok;
+ u64 rx_broadcast_frames_ok;
+ u64 rx_bytes_ok;
+ u64 rx_unicast_bytes_ok;
+ u64 rx_multicast_bytes_ok;
+ u64 rx_broadcast_bytes_ok;
+ u64 rx_drop;
+ u64 rx_no_bufs;
+ u64 rx_errors;
+ u64 rx_rss;
+ u64 rx_crc_errors;
+ u64 rx_frames_64;
+ u64 rx_frames_127;
+ u64 rx_frames_255;
+ u64 rx_frames_511;
+ u64 rx_frames_1023;
+ u64 rx_frames_1518;
+ u64 rx_frames_to_max;
+ u64 rsvd[16];
+};
+
+struct vnic_stats {
+ struct vnic_tx_stats tx;
+ struct vnic_rx_stats rx;
+};
+
+#endif /* _VNIC_STATS_H_ */
diff --git a/drivers/scsi/fnic/vnic_wq.c b/drivers/scsi/fnic/vnic_wq.c
new file mode 100644
index 00000000000..1f9ea790d13
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_wq.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+
+static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
+{
+ struct vnic_wq_buf *buf;
+ struct vnic_dev *vdev;
+ unsigned int i, j, count = wq->ring.desc_count;
+ unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
+
+ vdev = wq->vdev;
+
+ for (i = 0; i < blks; i++) {
+ wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC);
+ if (!wq->bufs[i]) {
+ printk(KERN_ERR "Failed to alloc wq_bufs\n");
+ return -ENOMEM;
+ }
+ }
+
+ for (i = 0; i < blks; i++) {
+ buf = wq->bufs[i];
+ for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES; j++) {
+ buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES + j;
+ buf->desc = (u8 *)wq->ring.descs +
+ wq->ring.desc_size * buf->index;
+ if (buf->index + 1 == count) {
+ buf->next = wq->bufs[0];
+ break;
+ } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES) {
+ buf->next = wq->bufs[i + 1];
+ } else {
+ buf->next = buf + 1;
+ buf++;
+ }
+ }
+ }
+
+ wq->to_use = wq->to_clean = wq->bufs[0];
+
+ return 0;
+}
+
+void vnic_wq_free(struct vnic_wq *wq)
+{
+ struct vnic_dev *vdev;
+ unsigned int i;
+
+ vdev = wq->vdev;
+
+ vnic_dev_free_desc_ring(vdev, &wq->ring);
+
+ for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
+ kfree(wq->bufs[i]);
+ wq->bufs[i] = NULL;
+ }
+
+ wq->ctrl = NULL;
+
+}
+
+int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
+ unsigned int desc_count, unsigned int desc_size)
+{
+ int err;
+
+ wq->index = index;
+ wq->vdev = vdev;
+
+ wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
+ if (!wq->ctrl) {
+ printk(KERN_ERR "Failed to hook WQ[%d] resource\n", index);
+ return -EINVAL;
+ }
+
+ vnic_wq_disable(wq);
+
+ err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
+ if (err)
+ return err;
+
+ err = vnic_wq_alloc_bufs(wq);
+ if (err) {
+ vnic_wq_free(wq);
+ return err;
+ }
+
+ return 0;
+}
+
+void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset)
+{
+ u64 paddr;
+
+ paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
+ writeq(paddr, &wq->ctrl->ring_base);
+ iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size);
+ iowrite32(0, &wq->ctrl->fetch_index);
+ iowrite32(0, &wq->ctrl->posted_index);
+ iowrite32(cq_index, &wq->ctrl->cq_index);
+ iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
+ iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
+ iowrite32(0, &wq->ctrl->error_status);
+}
+
+unsigned int vnic_wq_error_status(struct vnic_wq *wq)
+{
+ return ioread32(&wq->ctrl->error_status);
+}
+
+void vnic_wq_enable(struct vnic_wq *wq)
+{
+ iowrite32(1, &wq->ctrl->enable);
+}
+
+int vnic_wq_disable(struct vnic_wq *wq)
+{
+ unsigned int wait;
+
+ iowrite32(0, &wq->ctrl->enable);
+
+ /* Wait for HW to ACK disable request */
+ for (wait = 0; wait < 100; wait++) {
+ if (!(ioread32(&wq->ctrl->running)))
+ return 0;
+ udelay(1);
+ }
+
+ printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index);
+
+ return -ETIMEDOUT;
+}
+
+void vnic_wq_clean(struct vnic_wq *wq,
+ void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf))
+{
+ struct vnic_wq_buf *buf;
+
+ BUG_ON(ioread32(&wq->ctrl->enable));
+
+ buf = wq->to_clean;
+
+ while (vnic_wq_desc_used(wq) > 0) {
+
+ (*buf_clean)(wq, buf);
+
+ buf = wq->to_clean = buf->next;
+ wq->ring.desc_avail++;
+ }
+
+ wq->to_use = wq->to_clean = wq->bufs[0];
+
+ iowrite32(0, &wq->ctrl->fetch_index);
+ iowrite32(0, &wq->ctrl->posted_index);
+ iowrite32(0, &wq->ctrl->error_status);
+
+ vnic_dev_clear_desc_ring(&wq->ring);
+}
diff --git a/drivers/scsi/fnic/vnic_wq.h b/drivers/scsi/fnic/vnic_wq.h
new file mode 100644
index 00000000000..5cd094f7928
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_wq.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_WQ_H_
+#define _VNIC_WQ_H_
+
+#include <linux/pci.h>
+#include "vnic_dev.h"
+#include "vnic_cq.h"
+
+/*
+ * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
+ * Driver) when both are built with CONFIG options =y
+ */
+#define vnic_wq_desc_avail fnic_wq_desc_avail
+#define vnic_wq_desc_used fnic_wq_desc_used
+#define vnic_wq_next_desc fni_cwq_next_desc
+#define vnic_wq_post fnic_wq_post
+#define vnic_wq_service fnic_wq_service
+#define vnic_wq_free fnic_wq_free
+#define vnic_wq_alloc fnic_wq_alloc
+#define vnic_wq_init fnic_wq_init
+#define vnic_wq_error_status fnic_wq_error_status
+#define vnic_wq_enable fnic_wq_enable
+#define vnic_wq_disable fnic_wq_disable
+#define vnic_wq_clean fnic_wq_clean
+
+/* Work queue control */
+struct vnic_wq_ctrl {
+ u64 ring_base; /* 0x00 */
+ u32 ring_size; /* 0x08 */
+ u32 pad0;
+ u32 posted_index; /* 0x10 */
+ u32 pad1;
+ u32 cq_index; /* 0x18 */
+ u32 pad2;
+ u32 enable; /* 0x20 */
+ u32 pad3;
+ u32 running; /* 0x28 */
+ u32 pad4;
+ u32 fetch_index; /* 0x30 */
+ u32 pad5;
+ u32 dca_value; /* 0x38 */
+ u32 pad6;
+ u32 error_interrupt_enable; /* 0x40 */
+ u32 pad7;
+ u32 error_interrupt_offset; /* 0x48 */
+ u32 pad8;
+ u32 error_status; /* 0x50 */
+ u32 pad9;
+};
+
+struct vnic_wq_buf {
+ struct vnic_wq_buf *next;
+ dma_addr_t dma_addr;
+ void *os_buf;
+ unsigned int len;
+ unsigned int index;
+ int sop;
+ void *desc;
+};
+
+/* Break the vnic_wq_buf allocations into blocks of 64 entries */
+#define VNIC_WQ_BUF_BLK_ENTRIES 64
+#define VNIC_WQ_BUF_BLK_SZ \
+ (VNIC_WQ_BUF_BLK_ENTRIES * sizeof(struct vnic_wq_buf))
+#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
+ DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES)
+#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
+
+struct vnic_wq {
+ unsigned int index;
+ struct vnic_dev *vdev;
+ struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
+ struct vnic_dev_ring ring;
+ struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX];
+ struct vnic_wq_buf *to_use;
+ struct vnic_wq_buf *to_clean;
+ unsigned int pkts_outstanding;
+};
+
+static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
+{
+ /* how many does SW own? */
+ return wq->ring.desc_avail;
+}
+
+static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq)
+{
+ /* how many does HW own? */
+ return wq->ring.desc_count - wq->ring.desc_avail - 1;
+}
+
+static inline void *vnic_wq_next_desc(struct vnic_wq *wq)
+{
+ return wq->to_use->desc;
+}
+
+static inline void vnic_wq_post(struct vnic_wq *wq,
+ void *os_buf, dma_addr_t dma_addr,
+ unsigned int len, int sop, int eop)
+{
+ struct vnic_wq_buf *buf = wq->to_use;
+
+ buf->sop = sop;
+ buf->os_buf = eop ? os_buf : NULL;
+ buf->dma_addr = dma_addr;
+ buf->len = len;
+
+ buf = buf->next;
+ if (eop) {
+ /* Adding write memory barrier prevents compiler and/or CPU
+ * reordering, thus avoiding descriptor posting before
+ * descriptor is initialized. Otherwise, hardware can read
+ * stale descriptor fields.
+ */
+ wmb();
+ iowrite32(buf->index, &wq->ctrl->posted_index);
+ }
+ wq->to_use = buf;
+
+ wq->ring.desc_avail--;
+}
+
+static inline void vnic_wq_service(struct vnic_wq *wq,
+ struct cq_desc *cq_desc, u16 completed_index,
+ void (*buf_service)(struct vnic_wq *wq,
+ struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque),
+ void *opaque)
+{
+ struct vnic_wq_buf *buf;
+
+ buf = wq->to_clean;
+ while (1) {
+
+ (*buf_service)(wq, cq_desc, buf, opaque);
+
+ wq->ring.desc_avail++;
+
+ wq->to_clean = buf->next;
+
+ if (buf->index == completed_index)
+ break;
+
+ buf = wq->to_clean;
+ }
+}
+
+void vnic_wq_free(struct vnic_wq *wq);
+int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
+ unsigned int desc_count, unsigned int desc_size);
+void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset);
+unsigned int vnic_wq_error_status(struct vnic_wq *wq);
+void vnic_wq_enable(struct vnic_wq *wq);
+int vnic_wq_disable(struct vnic_wq *wq);
+void vnic_wq_clean(struct vnic_wq *wq,
+ void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
+
+#endif /* _VNIC_WQ_H_ */
diff --git a/drivers/scsi/fnic/vnic_wq_copy.c b/drivers/scsi/fnic/vnic_wq_copy.c
new file mode 100644
index 00000000000..9eab7e7caf3
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_wq_copy.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include "vnic_wq_copy.h"
+
+void vnic_wq_copy_enable(struct vnic_wq_copy *wq)
+{
+ iowrite32(1, &wq->ctrl->enable);
+}
+
+int vnic_wq_copy_disable(struct vnic_wq_copy *wq)
+{
+ unsigned int wait;
+
+ iowrite32(0, &wq->ctrl->enable);
+
+ /* Wait for HW to ACK disable request */
+ for (wait = 0; wait < 100; wait++) {
+ if (!(ioread32(&wq->ctrl->running)))
+ return 0;
+ udelay(1);
+ }
+
+ printk(KERN_ERR "Failed to disable Copy WQ[%d],"
+ " fetch index=%d, posted_index=%d\n",
+ wq->index, ioread32(&wq->ctrl->fetch_index),
+ ioread32(&wq->ctrl->posted_index));
+
+ return -ENODEV;
+}
+
+void vnic_wq_copy_clean(struct vnic_wq_copy *wq,
+ void (*q_clean)(struct vnic_wq_copy *wq,
+ struct fcpio_host_req *wq_desc))
+{
+ BUG_ON(ioread32(&wq->ctrl->enable));
+
+ if (vnic_wq_copy_desc_in_use(wq))
+ vnic_wq_copy_service(wq, -1, q_clean);
+
+ wq->to_use_index = wq->to_clean_index = 0;
+
+ iowrite32(0, &wq->ctrl->fetch_index);
+ iowrite32(0, &wq->ctrl->posted_index);
+ iowrite32(0, &wq->ctrl->error_status);
+
+ vnic_dev_clear_desc_ring(&wq->ring);
+}
+
+void vnic_wq_copy_free(struct vnic_wq_copy *wq)
+{
+ struct vnic_dev *vdev;
+
+ vdev = wq->vdev;
+ vnic_dev_free_desc_ring(vdev, &wq->ring);
+ wq->ctrl = NULL;
+}
+
+int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq,
+ unsigned int index, unsigned int desc_count,
+ unsigned int desc_size)
+{
+ int err;
+
+ wq->index = index;
+ wq->vdev = vdev;
+ wq->to_use_index = wq->to_clean_index = 0;
+ wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
+ if (!wq->ctrl) {
+ printk(KERN_ERR "Failed to hook COPY WQ[%d] resource\n", index);
+ return -EINVAL;
+ }
+
+ vnic_wq_copy_disable(wq);
+
+ err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset)
+{
+ u64 paddr;
+
+ paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
+ writeq(paddr, &wq->ctrl->ring_base);
+ iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size);
+ iowrite32(0, &wq->ctrl->fetch_index);
+ iowrite32(0, &wq->ctrl->posted_index);
+ iowrite32(cq_index, &wq->ctrl->cq_index);
+ iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
+ iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
+}
+
diff --git a/drivers/scsi/fnic/vnic_wq_copy.h b/drivers/scsi/fnic/vnic_wq_copy.h
new file mode 100644
index 00000000000..6aff9740c3d
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_wq_copy.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_WQ_COPY_H_
+#define _VNIC_WQ_COPY_H_
+
+#include <linux/pci.h>
+#include "vnic_wq.h"
+#include "fcpio.h"
+
+#define VNIC_WQ_COPY_MAX 1
+
+struct vnic_wq_copy {
+ unsigned int index;
+ struct vnic_dev *vdev;
+ struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
+ struct vnic_dev_ring ring;
+ unsigned to_use_index;
+ unsigned to_clean_index;
+};
+
+static inline unsigned int vnic_wq_copy_desc_avail(struct vnic_wq_copy *wq)
+{
+ return wq->ring.desc_avail;
+}
+
+static inline unsigned int vnic_wq_copy_desc_in_use(struct vnic_wq_copy *wq)
+{
+ return wq->ring.desc_count - 1 - wq->ring.desc_avail;
+}
+
+static inline void *vnic_wq_copy_next_desc(struct vnic_wq_copy *wq)
+{
+ struct fcpio_host_req *desc = wq->ring.descs;
+ return &desc[wq->to_use_index];
+}
+
+static inline void vnic_wq_copy_post(struct vnic_wq_copy *wq)
+{
+
+ ((wq->to_use_index + 1) == wq->ring.desc_count) ?
+ (wq->to_use_index = 0) : (wq->to_use_index++);
+ wq->ring.desc_avail--;
+
+ /* Adding write memory barrier prevents compiler and/or CPU
+ * reordering, thus avoiding descriptor posting before
+ * descriptor is initialized. Otherwise, hardware can read
+ * stale descriptor fields.
+ */
+ wmb();
+
+ iowrite32(wq->to_use_index, &wq->ctrl->posted_index);
+}
+
+static inline void vnic_wq_copy_desc_process(struct vnic_wq_copy *wq, u16 index)
+{
+ unsigned int cnt;
+
+ if (wq->to_clean_index <= index)
+ cnt = (index - wq->to_clean_index) + 1;
+ else
+ cnt = wq->ring.desc_count - wq->to_clean_index + index + 1;
+
+ wq->to_clean_index = ((index + 1) % wq->ring.desc_count);
+ wq->ring.desc_avail += cnt;
+
+}
+
+static inline void vnic_wq_copy_service(struct vnic_wq_copy *wq,
+ u16 completed_index,
+ void (*q_service)(struct vnic_wq_copy *wq,
+ struct fcpio_host_req *wq_desc))
+{
+ struct fcpio_host_req *wq_desc = wq->ring.descs;
+ unsigned int curr_index;
+
+ while (1) {
+
+ if (q_service)
+ (*q_service)(wq, &wq_desc[wq->to_clean_index]);
+
+ wq->ring.desc_avail++;
+
+ curr_index = wq->to_clean_index;
+
+ /* increment the to-clean index so that we start
+ * with an unprocessed index next time we enter the loop
+ */
+ ((wq->to_clean_index + 1) == wq->ring.desc_count) ?
+ (wq->to_clean_index = 0) : (wq->to_clean_index++);
+
+ if (curr_index == completed_index)
+ break;
+
+ /* we have cleaned all the entries */
+ if ((completed_index == (u16)-1) &&
+ (wq->to_clean_index == wq->to_use_index))
+ break;
+ }
+}
+
+void vnic_wq_copy_enable(struct vnic_wq_copy *wq);
+int vnic_wq_copy_disable(struct vnic_wq_copy *wq);
+void vnic_wq_copy_free(struct vnic_wq_copy *wq);
+int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq,
+ unsigned int index, unsigned int desc_count, unsigned int desc_size);
+void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset);
+void vnic_wq_copy_clean(struct vnic_wq_copy *wq,
+ void (*q_clean)(struct vnic_wq_copy *wq,
+ struct fcpio_host_req *wq_desc));
+
+#endif /* _VNIC_WQ_COPY_H_ */
diff --git a/drivers/scsi/fnic/wq_enet_desc.h b/drivers/scsi/fnic/wq_enet_desc.h
new file mode 100644
index 00000000000..b121cbad18b
--- /dev/null
+++ b/drivers/scsi/fnic/wq_enet_desc.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _WQ_ENET_DESC_H_
+#define _WQ_ENET_DESC_H_
+
+/* Ethernet work queue descriptor: 16B */
+struct wq_enet_desc {
+ __le64 address;
+ __le16 length;
+ __le16 mss_loopback;
+ __le16 header_length_flags;
+ __le16 vlan_tag;
+};
+
+#define WQ_ENET_ADDR_BITS 64
+#define WQ_ENET_LEN_BITS 14
+#define WQ_ENET_LEN_MASK ((1 << WQ_ENET_LEN_BITS) - 1)
+#define WQ_ENET_MSS_BITS 14
+#define WQ_ENET_MSS_MASK ((1 << WQ_ENET_MSS_BITS) - 1)
+#define WQ_ENET_MSS_SHIFT 2
+#define WQ_ENET_LOOPBACK_SHIFT 1
+#define WQ_ENET_HDRLEN_BITS 10
+#define WQ_ENET_HDRLEN_MASK ((1 << WQ_ENET_HDRLEN_BITS) - 1)
+#define WQ_ENET_FLAGS_OM_BITS 2
+#define WQ_ENET_FLAGS_OM_MASK ((1 << WQ_ENET_FLAGS_OM_BITS) - 1)
+#define WQ_ENET_FLAGS_EOP_SHIFT 12
+#define WQ_ENET_FLAGS_CQ_ENTRY_SHIFT 13
+#define WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT 14
+#define WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT 15
+
+#define WQ_ENET_OFFLOAD_MODE_CSUM 0
+#define WQ_ENET_OFFLOAD_MODE_RESERVED 1
+#define WQ_ENET_OFFLOAD_MODE_CSUM_L4 2
+#define WQ_ENET_OFFLOAD_MODE_TSO 3
+
+static inline void wq_enet_desc_enc(struct wq_enet_desc *desc,
+ u64 address, u16 length, u16 mss, u16 header_length,
+ u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap,
+ u8 vlan_tag_insert, u16 vlan_tag, u8 loopback)
+{
+ desc->address = cpu_to_le64(address);
+ desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK);
+ desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) <<
+ WQ_ENET_MSS_SHIFT | (loopback & 1) << WQ_ENET_LOOPBACK_SHIFT);
+ desc->header_length_flags = cpu_to_le16(
+ (header_length & WQ_ENET_HDRLEN_MASK) |
+ (offload_mode & WQ_ENET_FLAGS_OM_MASK) << WQ_ENET_HDRLEN_BITS |
+ (eop & 1) << WQ_ENET_FLAGS_EOP_SHIFT |
+ (cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT |
+ (fcoe_encap & 1) << WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT |
+ (vlan_tag_insert & 1) << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT);
+ desc->vlan_tag = cpu_to_le16(vlan_tag);
+}
+
+static inline void wq_enet_desc_dec(struct wq_enet_desc *desc,
+ u64 *address, u16 *length, u16 *mss, u16 *header_length,
+ u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap,
+ u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback)
+{
+ *address = le64_to_cpu(desc->address);
+ *length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK;
+ *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) &
+ WQ_ENET_MSS_MASK;
+ *loopback = (u8)((le16_to_cpu(desc->mss_loopback) >>
+ WQ_ENET_LOOPBACK_SHIFT) & 1);
+ *header_length = le16_to_cpu(desc->header_length_flags) &
+ WQ_ENET_HDRLEN_MASK;
+ *offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_HDRLEN_BITS) & WQ_ENET_FLAGS_OM_MASK);
+ *eop = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_FLAGS_EOP_SHIFT) & 1);
+ *cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_FLAGS_CQ_ENTRY_SHIFT) & 1);
+ *fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT) & 1);
+ *vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT) & 1);
+ *vlan_tag = le16_to_cpu(desc->vlan_tag);
+}
+
+#endif /* _WQ_ENET_DESC_H_ */
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index 59349a316e1..1258da34fbc 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -152,6 +152,7 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
struct Scsi_Host *host, gdth_ha_str *ha)
{
int size = 0,len = 0;
+ int hlen;
off_t begin = 0,pos = 0;
int id, i, j, k, sec, flag;
int no_mdrv = 0, drv_no, is_mirr;
@@ -192,11 +193,11 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
if (reserve_list[0] == 0xff)
strcpy(hrec, "--");
else {
- sprintf(hrec, "%d", reserve_list[0]);
+ hlen = sprintf(hrec, "%d", reserve_list[0]);
for (i = 1; i < MAX_RES_ARGS; i++) {
if (reserve_list[i] == 0xff)
break;
- sprintf(hrec,"%s,%d", hrec, reserve_list[i]);
+ hlen += snprintf(hrec + hlen , 161 - hlen, ",%d", reserve_list[i]);
}
}
size = sprintf(buffer+len,
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index ea4abee7a2a..b4b805e8d7d 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -110,7 +110,7 @@ static const struct {
{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
{ IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
{ IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
- { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 0, 0, "link halted" },
+ { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" },
{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
{ IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
@@ -143,6 +143,7 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *);
static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
+static void ibmvfc_npiv_logout(struct ibmvfc_host *);
static const char *unknown_error = "unknown error";
@@ -275,7 +276,7 @@ static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd)
int fc_rsp_len = rsp->fcp_rsp_len;
if ((rsp->flags & FCP_RSP_LEN_VALID) &&
- ((!fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
+ ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
rsp->data.info.rsp_code))
return DID_ERROR << 16;
@@ -431,6 +432,8 @@ static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
case IBMVFC_TGT_ACTION_DEL_RPORT:
break;
default:
+ if (action == IBMVFC_TGT_ACTION_DEL_RPORT)
+ tgt->add_rport = 0;
tgt->action = action;
break;
}
@@ -475,6 +478,10 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
vhost->action = action;
break;
+ case IBMVFC_HOST_ACTION_LOGO_WAIT:
+ if (vhost->action == IBMVFC_HOST_ACTION_LOGO)
+ vhost->action = action;
+ break;
case IBMVFC_HOST_ACTION_INIT_WAIT:
if (vhost->action == IBMVFC_HOST_ACTION_INIT)
vhost->action = action;
@@ -483,7 +490,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
switch (vhost->action) {
case IBMVFC_HOST_ACTION_INIT_WAIT:
case IBMVFC_HOST_ACTION_NONE:
- case IBMVFC_HOST_ACTION_TGT_ADD:
+ case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
vhost->action = action;
break;
default:
@@ -494,11 +501,11 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
vhost->action = action;
break;
+ case IBMVFC_HOST_ACTION_LOGO:
case IBMVFC_HOST_ACTION_INIT:
case IBMVFC_HOST_ACTION_TGT_DEL:
case IBMVFC_HOST_ACTION_QUERY_TGTS:
case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
- case IBMVFC_HOST_ACTION_TGT_ADD:
case IBMVFC_HOST_ACTION_NONE:
default:
vhost->action = action;
@@ -576,7 +583,7 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost, int relogin)
}
list_for_each_entry(tgt, &vhost->targets, queue)
- tgt->need_login = 1;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
scsi_block_requests(vhost->host);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
vhost->job_step = ibmvfc_npiv_login;
@@ -646,6 +653,7 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
vhost->state = IBMVFC_NO_CRQ;
+ vhost->logged_in = 0;
dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
free_page((unsigned long)crq->msgs);
}
@@ -692,6 +700,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
vhost->state = IBMVFC_NO_CRQ;
+ vhost->logged_in = 0;
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
/* Clean out the queue */
@@ -807,10 +816,10 @@ static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
}
/**
- * __ibmvfc_reset_host - Reset the connection to the server (no locking)
+ * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ
* @vhost: struct ibmvfc host to reset
**/
-static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
+static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost)
{
int rc;
@@ -826,9 +835,25 @@ static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
}
/**
- * ibmvfc_reset_host - Reset the connection to the server
+ * __ibmvfc_reset_host - Reset the connection to the server (no locking)
* @vhost: struct ibmvfc host to reset
**/
+static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
+{
+ if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT &&
+ !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
+ scsi_block_requests(vhost->host);
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO);
+ vhost->job_step = ibmvfc_npiv_logout;
+ wake_up(&vhost->work_wait_q);
+ } else
+ ibmvfc_hard_reset_host(vhost);
+}
+
+/**
+ * ibmvfc_reset_host - Reset the connection to the server
+ * @vhost: ibmvfc host struct
+ **/
static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
{
unsigned long flags;
@@ -842,9 +867,13 @@ static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
* ibmvfc_retry_host_init - Retry host initialization if allowed
* @vhost: ibmvfc host struct
*
+ * Returns: 1 if init will be retried / 0 if not
+ *
**/
-static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
+static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
{
+ int retry = 0;
+
if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
vhost->delay_init = 1;
if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
@@ -853,11 +882,14 @@ static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
} else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
__ibmvfc_reset_host(vhost);
- else
+ else {
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
+ retry = 1;
+ }
}
wake_up(&vhost->work_wait_q);
+ return retry;
}
/**
@@ -1137,8 +1169,9 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
login_info->partition_num = vhost->partition_number;
login_info->vfc_frame_version = 1;
login_info->fcp_version = 3;
+ login_info->flags = IBMVFC_FLUSH_ON_HALT;
if (vhost->client_migrated)
- login_info->flags = IBMVFC_CLIENT_MIGRATED;
+ login_info->flags |= IBMVFC_CLIENT_MIGRATED;
login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ;
login_info->capabilities = IBMVFC_CAN_MIGRATE;
@@ -1452,6 +1485,27 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt)
}
/**
+ * ibmvfc_relogin - Log back into the specified device
+ * @sdev: scsi device struct
+ *
+ **/
+static void ibmvfc_relogin(struct scsi_device *sdev)
+{
+ struct ibmvfc_host *vhost = shost_priv(sdev->host);
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+ struct ibmvfc_target *tgt;
+
+ list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (rport == tgt->rport) {
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ break;
+ }
+ }
+
+ ibmvfc_reinit_host(vhost);
+}
+
+/**
* ibmvfc_scsi_done - Handle responses from commands
* @evt: ibmvfc event to be handled
*
@@ -1483,7 +1537,7 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
if ((vfc_cmd->status & IBMVFC_VIOS_FAILURE) && (vfc_cmd->error == IBMVFC_PLOGI_REQUIRED))
- ibmvfc_reinit_host(evt->vhost);
+ ibmvfc_relogin(cmnd->device);
if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
cmnd->result = (DID_ERROR << 16);
@@ -2148,13 +2202,31 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
struct ibmvfc_host *vhost)
{
const char *desc = ibmvfc_get_ae_desc(crq->event);
+ struct ibmvfc_target *tgt;
ibmvfc_log(vhost, 3, "%s event received. scsi_id: %llx, wwpn: %llx,"
" node_name: %llx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name);
switch (crq->event) {
- case IBMVFC_AE_LINK_UP:
case IBMVFC_AE_RESUME:
+ switch (crq->link_state) {
+ case IBMVFC_AE_LS_LINK_DOWN:
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
+ break;
+ case IBMVFC_AE_LS_LINK_DEAD:
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ break;
+ case IBMVFC_AE_LS_LINK_UP:
+ case IBMVFC_AE_LS_LINK_BOUNCED:
+ default:
+ vhost->events_to_log |= IBMVFC_AE_LINKUP;
+ vhost->delay_init = 1;
+ __ibmvfc_reset_host(vhost);
+ break;
+ };
+
+ break;
+ case IBMVFC_AE_LINK_UP:
vhost->events_to_log |= IBMVFC_AE_LINKUP;
vhost->delay_init = 1;
__ibmvfc_reset_host(vhost);
@@ -2168,9 +2240,23 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
case IBMVFC_AE_SCN_NPORT:
case IBMVFC_AE_SCN_GROUP:
vhost->events_to_log |= IBMVFC_AE_RSCN;
+ ibmvfc_reinit_host(vhost);
+ break;
case IBMVFC_AE_ELS_LOGO:
case IBMVFC_AE_ELS_PRLO:
case IBMVFC_AE_ELS_PLOGI:
+ list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (!crq->scsi_id && !crq->wwpn && !crq->node_name)
+ break;
+ if (crq->scsi_id && tgt->scsi_id != crq->scsi_id)
+ continue;
+ if (crq->wwpn && tgt->ids.port_name != crq->wwpn)
+ continue;
+ if (crq->node_name && tgt->ids.node_name != crq->node_name)
+ continue;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ }
+
ibmvfc_reinit_host(vhost);
break;
case IBMVFC_AE_LINK_DOWN:
@@ -2222,6 +2308,7 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
return;
case IBMVFC_CRQ_XPORT_EVENT:
vhost->state = IBMVFC_NO_CRQ;
+ vhost->logged_in = 0;
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
if (crq->format == IBMVFC_PARTITION_MIGRATED) {
/* We need to re-setup the interpartition connection */
@@ -2299,7 +2386,7 @@ static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
done = 1;
}
- if (vhost->state != IBMVFC_NO_CRQ && vhost->action == IBMVFC_HOST_ACTION_NONE)
+ if (vhost->scan_complete)
done = 1;
spin_unlock_irqrestore(shost->host_lock, flags);
return done;
@@ -2434,14 +2521,6 @@ static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
vhost->login_buf->resp.partition_name);
}
-static struct device_attribute ibmvfc_host_partition_name = {
- .attr = {
- .name = "partition_name",
- .mode = S_IRUGO,
- },
- .show = ibmvfc_show_host_partition_name,
-};
-
static ssize_t ibmvfc_show_host_device_name(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -2452,14 +2531,6 @@ static ssize_t ibmvfc_show_host_device_name(struct device *dev,
vhost->login_buf->resp.device_name);
}
-static struct device_attribute ibmvfc_host_device_name = {
- .attr = {
- .name = "device_name",
- .mode = S_IRUGO,
- },
- .show = ibmvfc_show_host_device_name,
-};
-
static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -2470,14 +2541,6 @@ static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
vhost->login_buf->resp.port_loc_code);
}
-static struct device_attribute ibmvfc_host_loc_code = {
- .attr = {
- .name = "port_loc_code",
- .mode = S_IRUGO,
- },
- .show = ibmvfc_show_host_loc_code,
-};
-
static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -2488,14 +2551,6 @@ static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
vhost->login_buf->resp.drc_name);
}
-static struct device_attribute ibmvfc_host_drc_name = {
- .attr = {
- .name = "drc_name",
- .mode = S_IRUGO,
- },
- .show = ibmvfc_show_host_drc_name,
-};
-
static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -2504,13 +2559,13 @@ static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version);
}
-static struct device_attribute ibmvfc_host_npiv_version = {
- .attr = {
- .name = "npiv_version",
- .mode = S_IRUGO,
- },
- .show = ibmvfc_show_host_npiv_version,
-};
+static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ibmvfc_host *vhost = shost_priv(shost);
+ return snprintf(buf, PAGE_SIZE, "%llx\n", vhost->login_buf->resp.capabilities);
+}
/**
* ibmvfc_show_log_level - Show the adapter's error logging level
@@ -2556,14 +2611,14 @@ static ssize_t ibmvfc_store_log_level(struct device *dev,
return strlen(buf);
}
-static struct device_attribute ibmvfc_log_level_attr = {
- .attr = {
- .name = "log_level",
- .mode = S_IRUGO | S_IWUSR,
- },
- .show = ibmvfc_show_log_level,
- .store = ibmvfc_store_log_level
-};
+static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL);
+static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL);
+static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL);
+static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL);
+static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL);
+static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL);
+static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
+ ibmvfc_show_log_level, ibmvfc_store_log_level);
#ifdef CONFIG_SCSI_IBMVFC_TRACE
/**
@@ -2612,12 +2667,13 @@ static struct bin_attribute ibmvfc_trace_attr = {
#endif
static struct device_attribute *ibmvfc_attrs[] = {
- &ibmvfc_host_partition_name,
- &ibmvfc_host_device_name,
- &ibmvfc_host_loc_code,
- &ibmvfc_host_drc_name,
- &ibmvfc_host_npiv_version,
- &ibmvfc_log_level_attr,
+ &dev_attr_partition_name,
+ &dev_attr_device_name,
+ &dev_attr_port_loc_code,
+ &dev_attr_drc_name,
+ &dev_attr_npiv_version,
+ &dev_attr_capabilities,
+ &dev_attr_log_level,
NULL
};
@@ -2774,15 +2830,19 @@ static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
* @tgt: ibmvfc target struct
* @job_step: initialization job step
*
+ * Returns: 1 if step will be retried / 0 if not
+ *
**/
-static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
+static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
void (*job_step) (struct ibmvfc_target *))
{
if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
wake_up(&tgt->vhost->work_wait_q);
+ return 0;
} else
ibmvfc_init_tgt(tgt, job_step);
+ return 1;
}
/* Defined in FC-LS */
@@ -2831,7 +2891,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
struct ibmvfc_prli_svc_parms *parms = &rsp->parms;
u32 status = rsp->common.status;
- int index;
+ int index, level = IBMVFC_DEFAULT_LOG_LEVEL;
vhost->discovery_threads--;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
@@ -2850,7 +2910,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET;
if (parms->service_parms & IBMVFC_PRLI_INITIATOR_FUNC)
tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_ADD_RPORT);
+ tgt->add_rport = 1;
} else
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
} else if (prli_rsp[index].retry)
@@ -2867,13 +2927,14 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
break;
case IBMVFC_MAD_FAILED:
default:
- tgt_err(tgt, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
- ibmvfc_get_cmd_error(rsp->status, rsp->error),
- rsp->status, rsp->error, status);
if (ibmvfc_retry_cmd(rsp->status, rsp->error))
- ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
+ level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
else
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+
+ tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
+ ibmvfc_get_cmd_error(rsp->status, rsp->error),
+ rsp->status, rsp->error, status);
break;
};
@@ -2932,6 +2993,7 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
struct ibmvfc_host *vhost = evt->vhost;
struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
u32 status = rsp->common.status;
+ int level = IBMVFC_DEFAULT_LOG_LEVEL;
vhost->discovery_threads--;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
@@ -2960,15 +3022,15 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
break;
case IBMVFC_MAD_FAILED:
default:
- tgt_err(tgt, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
- ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
- ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
- ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status);
-
if (ibmvfc_retry_cmd(rsp->status, rsp->error))
- ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
+ level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
else
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+
+ tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
+ ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
+ ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
+ ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status);
break;
};
@@ -3129,13 +3191,13 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
case IBMVFC_MAD_SUCCESS:
tgt_dbg(tgt, "ADISC succeeded\n");
if (ibmvfc_adisc_needs_plogi(mad, tgt))
- tgt->need_login = 1;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
break;
case IBMVFC_MAD_DRIVER_FAILED:
break;
case IBMVFC_MAD_FAILED:
default:
- tgt->need_login = 1;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
fc_reason = (mad->fc_iu.response[1] & 0x00ff0000) >> 16;
fc_explain = (mad->fc_iu.response[1] & 0x0000ff00) >> 8;
tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
@@ -3322,6 +3384,7 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
struct ibmvfc_host *vhost = evt->vhost;
struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
u32 status = rsp->common.status;
+ int level = IBMVFC_DEFAULT_LOG_LEVEL;
vhost->discovery_threads--;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
@@ -3341,19 +3404,19 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
break;
case IBMVFC_MAD_FAILED:
default:
- tgt_err(tgt, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
- ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
- ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
- ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status);
-
if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ &&
rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG)
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
else if (ibmvfc_retry_cmd(rsp->status, rsp->error))
- ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
+ level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
else
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+
+ tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
+ ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
+ ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
+ ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status);
break;
};
@@ -3420,7 +3483,7 @@ static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id)
}
spin_unlock_irqrestore(vhost->host->host_lock, flags);
- tgt = mempool_alloc(vhost->tgt_pool, GFP_KERNEL);
+ tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
if (!tgt) {
dev_err(vhost->dev, "Target allocation failure for scsi id %08llx\n",
scsi_id);
@@ -3472,6 +3535,7 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
struct ibmvfc_host *vhost = evt->vhost;
struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
u32 mad_status = rsp->common.status;
+ int level = IBMVFC_DEFAULT_LOG_LEVEL;
switch (mad_status) {
case IBMVFC_MAD_SUCCESS:
@@ -3480,9 +3544,9 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
break;
case IBMVFC_MAD_FAILED:
- dev_err(vhost->dev, "Discover Targets failed: %s (%x:%x)\n",
- ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
- ibmvfc_retry_host_init(vhost);
+ level += ibmvfc_retry_host_init(vhost);
+ ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
+ ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
break;
case IBMVFC_MAD_DRIVER_FAILED:
break;
@@ -3534,18 +3598,19 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
u32 mad_status = evt->xfer_iu->npiv_login.common.status;
struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
unsigned int npiv_max_sectors;
+ int level = IBMVFC_DEFAULT_LOG_LEVEL;
switch (mad_status) {
case IBMVFC_MAD_SUCCESS:
ibmvfc_free_event(evt);
break;
case IBMVFC_MAD_FAILED:
- dev_err(vhost->dev, "NPIV Login failed: %s (%x:%x)\n",
- ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
if (ibmvfc_retry_cmd(rsp->status, rsp->error))
- ibmvfc_retry_host_init(vhost);
+ level += ibmvfc_retry_host_init(vhost);
else
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
+ ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
ibmvfc_free_event(evt);
return;
case IBMVFC_MAD_CRQ_ERROR:
@@ -3578,6 +3643,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
return;
}
+ vhost->logged_in = 1;
npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS);
dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
rsp->partition_name, rsp->device_name, rsp->port_loc_code,
@@ -3636,6 +3702,65 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
};
/**
+ * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
+ * @vhost: ibmvfc host struct
+ *
+ **/
+static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_host *vhost = evt->vhost;
+ u32 mad_status = evt->xfer_iu->npiv_logout.common.status;
+
+ ibmvfc_free_event(evt);
+
+ switch (mad_status) {
+ case IBMVFC_MAD_SUCCESS:
+ if (list_empty(&vhost->sent) &&
+ vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
+ ibmvfc_init_host(vhost, 0);
+ return;
+ }
+ break;
+ case IBMVFC_MAD_FAILED:
+ case IBMVFC_MAD_NOT_SUPPORTED:
+ case IBMVFC_MAD_CRQ_ERROR:
+ case IBMVFC_MAD_DRIVER_FAILED:
+ default:
+ ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status);
+ break;
+ }
+
+ ibmvfc_hard_reset_host(vhost);
+}
+
+/**
+ * ibmvfc_npiv_logout - Issue an NPIV Logout
+ * @vhost: ibmvfc host struct
+ *
+ **/
+static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_npiv_logout_mad *mad;
+ struct ibmvfc_event *evt;
+
+ evt = ibmvfc_get_event(vhost);
+ ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
+
+ mad = &evt->iu.npiv_logout;
+ memset(mad, 0, sizeof(*mad));
+ mad->common.version = 1;
+ mad->common.opcode = IBMVFC_NPIV_LOGOUT;
+ mad->common.length = sizeof(struct ibmvfc_npiv_logout_mad);
+
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT);
+
+ if (!ibmvfc_send_event(evt, vhost, default_timeout))
+ ibmvfc_dbg(vhost, "Sent NPIV logout\n");
+ else
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+}
+
+/**
* ibmvfc_dev_init_to_do - Is there target initialization work to do?
* @vhost: ibmvfc host struct
*
@@ -3671,6 +3796,7 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
switch (vhost->action) {
case IBMVFC_HOST_ACTION_NONE:
case IBMVFC_HOST_ACTION_INIT_WAIT:
+ case IBMVFC_HOST_ACTION_LOGO_WAIT:
return 0;
case IBMVFC_HOST_ACTION_TGT_INIT:
case IBMVFC_HOST_ACTION_QUERY_TGTS:
@@ -3683,9 +3809,9 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
return 0;
return 1;
+ case IBMVFC_HOST_ACTION_LOGO:
case IBMVFC_HOST_ACTION_INIT:
case IBMVFC_HOST_ACTION_ALLOC_TGTS:
- case IBMVFC_HOST_ACTION_TGT_ADD:
case IBMVFC_HOST_ACTION_TGT_DEL:
case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
case IBMVFC_HOST_ACTION_QUERY:
@@ -3740,25 +3866,26 @@ static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
{
struct ibmvfc_host *vhost = tgt->vhost;
- struct fc_rport *rport = tgt->rport;
+ struct fc_rport *rport;
unsigned long flags;
- if (rport) {
- tgt_dbg(tgt, "Setting rport roles\n");
- fc_remote_port_rolechg(rport, tgt->ids.roles);
- spin_lock_irqsave(vhost->host->host_lock, flags);
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+ tgt_dbg(tgt, "Adding rport\n");
+ rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+
+ if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
+ tgt_dbg(tgt, "Deleting rport\n");
+ list_del(&tgt->queue);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ fc_remote_port_delete(rport);
+ del_timer_sync(&tgt->timer);
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
return;
}
- tgt_dbg(tgt, "Adding rport\n");
- rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
- spin_lock_irqsave(vhost->host->host_lock, flags);
- tgt->rport = rport;
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
if (rport) {
tgt_dbg(tgt, "rport add succeeded\n");
+ tgt->rport = rport;
rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff;
rport->supported_classes = 0;
tgt->target_id = rport->scsi_target_id;
@@ -3789,8 +3916,12 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
vhost->events_to_log = 0;
switch (vhost->action) {
case IBMVFC_HOST_ACTION_NONE:
+ case IBMVFC_HOST_ACTION_LOGO_WAIT:
case IBMVFC_HOST_ACTION_INIT_WAIT:
break;
+ case IBMVFC_HOST_ACTION_LOGO:
+ vhost->job_step(vhost);
+ break;
case IBMVFC_HOST_ACTION_INIT:
BUG_ON(vhost->state != IBMVFC_INITIALIZING);
if (vhost->delay_init) {
@@ -3836,11 +3967,21 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
if (vhost->state == IBMVFC_INITIALIZING) {
if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
- ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
- ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_ADD);
- vhost->init_retries = 0;
- spin_unlock_irqrestore(vhost->host->host_lock, flags);
- scsi_unblock_requests(vhost->host);
+ if (vhost->reinit) {
+ vhost->reinit = 0;
+ scsi_block_requests(vhost->host);
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ } else {
+ ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
+ wake_up(&vhost->init_wait_q);
+ schedule_work(&vhost->rport_add_work_q);
+ vhost->init_retries = 0;
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ scsi_unblock_requests(vhost->host);
+ }
+
return;
} else {
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
@@ -3871,24 +4012,6 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
if (!ibmvfc_dev_init_to_do(vhost))
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
break;
- case IBMVFC_HOST_ACTION_TGT_ADD:
- list_for_each_entry(tgt, &vhost->targets, queue) {
- if (tgt->action == IBMVFC_TGT_ACTION_ADD_RPORT) {
- spin_unlock_irqrestore(vhost->host->host_lock, flags);
- ibmvfc_tgt_add_rport(tgt);
- return;
- }
- }
-
- if (vhost->reinit && !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
- vhost->reinit = 0;
- scsi_block_requests(vhost->host);
- ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
- } else {
- ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
- wake_up(&vhost->init_wait_q);
- }
- break;
default:
break;
};
@@ -4118,6 +4241,56 @@ nomem:
}
/**
+ * ibmvfc_rport_add_thread - Worker thread for rport adds
+ * @work: work struct
+ *
+ **/
+static void ibmvfc_rport_add_thread(struct work_struct *work)
+{
+ struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host,
+ rport_add_work_q);
+ struct ibmvfc_target *tgt;
+ struct fc_rport *rport;
+ unsigned long flags;
+ int did_work;
+
+ ENTER;
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ do {
+ did_work = 0;
+ if (vhost->state != IBMVFC_ACTIVE)
+ break;
+
+ list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (tgt->add_rport) {
+ did_work = 1;
+ tgt->add_rport = 0;
+ kref_get(&tgt->kref);
+ rport = tgt->rport;
+ if (!rport) {
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ ibmvfc_tgt_add_rport(tgt);
+ } else if (get_device(&rport->dev)) {
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ tgt_dbg(tgt, "Setting rport roles\n");
+ fc_remote_port_rolechg(rport, tgt->ids.roles);
+ put_device(&rport->dev);
+ }
+
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ break;
+ }
+ }
+ } while(did_work);
+
+ if (vhost->state == IBMVFC_ACTIVE)
+ vhost->scan_complete = 1;
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ LEAVE;
+}
+
+/**
* ibmvfc_probe - Adapter hot plug add entry point
* @vdev: vio device struct
* @id: vio device id struct
@@ -4160,6 +4333,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
strcpy(vhost->partition_name, "UNKNOWN");
init_waitqueue_head(&vhost->work_wait_q);
init_waitqueue_head(&vhost->init_wait_q);
+ INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread);
if ((rc = ibmvfc_alloc_mem(vhost)))
goto free_scsi_host;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index ca1dcf7a756..c2668d7d67f 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -29,8 +29,8 @@
#include "viosrp.h"
#define IBMVFC_NAME "ibmvfc"
-#define IBMVFC_DRIVER_VERSION "1.0.5"
-#define IBMVFC_DRIVER_DATE "(March 19, 2009)"
+#define IBMVFC_DRIVER_VERSION "1.0.6"
+#define IBMVFC_DRIVER_DATE "(May 28, 2009)"
#define IBMVFC_DEFAULT_TIMEOUT 60
#define IBMVFC_ADISC_CANCEL_TIMEOUT 45
@@ -57,9 +57,10 @@
* Ensure we have resources for ERP and initialization:
* 1 for ERP
* 1 for initialization
+ * 1 for NPIV Logout
* 2 for each discovery thread
*/
-#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + (disc_threads * 2))
+#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + 1 + (disc_threads * 2))
#define IBMVFC_MAD_SUCCESS 0x00
#define IBMVFC_MAD_NOT_SUPPORTED 0xF1
@@ -127,6 +128,7 @@ enum ibmvfc_mad_types {
IBMVFC_IMPLICIT_LOGOUT = 0x0040,
IBMVFC_PASSTHRU = 0x0200,
IBMVFC_TMF_MAD = 0x0100,
+ IBMVFC_NPIV_LOGOUT = 0x0800,
};
struct ibmvfc_mad_common {
@@ -143,6 +145,10 @@ struct ibmvfc_npiv_login_mad {
struct srp_direct_buf buffer;
}__attribute__((packed, aligned (8)));
+struct ibmvfc_npiv_logout_mad {
+ struct ibmvfc_mad_common common;
+}__attribute__((packed, aligned (8)));
+
#define IBMVFC_MAX_NAME 256
struct ibmvfc_npiv_login {
@@ -201,7 +207,8 @@ struct ibmvfc_npiv_login_resp {
#define IBMVFC_NATIVE_FC 0x01
#define IBMVFC_CAN_FLUSH_ON_HALT 0x08
u32 reserved;
- u64 capabilites;
+ u64 capabilities;
+#define IBMVFC_CAN_FLUSH_ON_HALT 0x08
u32 max_cmds;
u32 scsi_id_sz;
u64 max_dma_len;
@@ -541,9 +548,17 @@ struct ibmvfc_crq_queue {
dma_addr_t msg_token;
};
+enum ibmvfc_ae_link_state {
+ IBMVFC_AE_LS_LINK_UP = 0x01,
+ IBMVFC_AE_LS_LINK_BOUNCED = 0x02,
+ IBMVFC_AE_LS_LINK_DOWN = 0x04,
+ IBMVFC_AE_LS_LINK_DEAD = 0x08,
+};
+
struct ibmvfc_async_crq {
volatile u8 valid;
- u8 pad[3];
+ u8 link_state;
+ u8 pad[2];
u32 pad2;
volatile u64 event;
volatile u64 scsi_id;
@@ -561,6 +576,7 @@ struct ibmvfc_async_crq_queue {
union ibmvfc_iu {
struct ibmvfc_mad_common mad_common;
struct ibmvfc_npiv_login_mad npiv_login;
+ struct ibmvfc_npiv_logout_mad npiv_logout;
struct ibmvfc_discover_targets discover_targets;
struct ibmvfc_port_login plogi;
struct ibmvfc_process_login prli;
@@ -575,7 +591,6 @@ enum ibmvfc_target_action {
IBMVFC_TGT_ACTION_NONE = 0,
IBMVFC_TGT_ACTION_INIT,
IBMVFC_TGT_ACTION_INIT_WAIT,
- IBMVFC_TGT_ACTION_ADD_RPORT,
IBMVFC_TGT_ACTION_DEL_RPORT,
};
@@ -588,6 +603,7 @@ struct ibmvfc_target {
int target_id;
enum ibmvfc_target_action action;
int need_login;
+ int add_rport;
int init_retries;
u32 cancel_key;
struct ibmvfc_service_parms service_parms;
@@ -627,6 +643,8 @@ struct ibmvfc_event_pool {
enum ibmvfc_host_action {
IBMVFC_HOST_ACTION_NONE = 0,
+ IBMVFC_HOST_ACTION_LOGO,
+ IBMVFC_HOST_ACTION_LOGO_WAIT,
IBMVFC_HOST_ACTION_INIT,
IBMVFC_HOST_ACTION_INIT_WAIT,
IBMVFC_HOST_ACTION_QUERY,
@@ -635,7 +653,6 @@ enum ibmvfc_host_action {
IBMVFC_HOST_ACTION_ALLOC_TGTS,
IBMVFC_HOST_ACTION_TGT_INIT,
IBMVFC_HOST_ACTION_TGT_DEL_FAILED,
- IBMVFC_HOST_ACTION_TGT_ADD,
};
enum ibmvfc_host_state {
@@ -682,6 +699,8 @@ struct ibmvfc_host {
int client_migrated;
int reinit;
int delay_init;
+ int scan_complete;
+ int logged_in;
int events_to_log;
#define IBMVFC_AE_LINKUP 0x0001
#define IBMVFC_AE_LINKDOWN 0x0002
@@ -692,6 +711,7 @@ struct ibmvfc_host {
void (*job_step) (struct ibmvfc_host *);
struct task_struct *work_thread;
struct tasklet_struct tasklet;
+ struct work_struct rport_add_work_q;
wait_queue_head_t init_wait_q;
wait_queue_head_t work_wait_q;
};
@@ -707,6 +727,12 @@ struct ibmvfc_host {
#define tgt_err(t, fmt, ...) \
dev_err((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
+#define tgt_log(t, level, fmt, ...) \
+ do { \
+ if ((t)->vhost->log_level >= level) \
+ tgt_err(t, fmt, ##__VA_ARGS__); \
+ } while (0)
+
#define ibmvfc_dbg(vhost, ...) \
DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__))
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index c9aa7611e40..869a11bdccb 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -70,6 +70,7 @@
#include <linux/moduleparam.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
+#include <linux/of.h>
#include <asm/firmware.h>
#include <asm/vio.h>
#include <asm/firmware.h>
@@ -87,9 +88,15 @@
*/
static int max_id = 64;
static int max_channel = 3;
-static int init_timeout = 5;
+static int init_timeout = 300;
+static int login_timeout = 60;
+static int info_timeout = 30;
+static int abort_timeout = 60;
+static int reset_timeout = 60;
static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
+static int fast_fail = 1;
+static int client_reserve = 1;
static struct scsi_transport_template *ibmvscsi_transport_template;
@@ -110,6 +117,10 @@ module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds");
module_param_named(max_requests, max_requests, int, S_IRUGO);
MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter");
+module_param_named(fast_fail, fast_fail, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]");
+module_param_named(client_reserve, client_reserve, int, S_IRUGO );
+MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release");
/* ------------------------------------------------------------
* Routines for the event pool and event structs
@@ -781,105 +792,53 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
/* ------------------------------------------------------------
* Routines for driver initialization
*/
+
/**
- * adapter_info_rsp: - Handle response to MAD adapter info request
- * @evt_struct: srp_event_struct with the response
+ * map_persist_bufs: - Pre-map persistent data for adapter logins
+ * @hostdata: ibmvscsi_host_data of host
*
- * Used as a "done" callback by when sending adapter_info. Gets called
- * by ibmvscsi_handle_crq()
-*/
-static void adapter_info_rsp(struct srp_event_struct *evt_struct)
+ * Map the capabilities and adapter info DMA buffers to avoid runtime failures.
+ * Return 1 on error, 0 on success.
+ */
+static int map_persist_bufs(struct ibmvscsi_host_data *hostdata)
{
- struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
- dma_unmap_single(hostdata->dev,
- evt_struct->iu.mad.adapter_info.buffer,
- evt_struct->iu.mad.adapter_info.common.length,
- DMA_BIDIRECTIONAL);
- if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
- dev_err(hostdata->dev, "error %d getting adapter info\n",
- evt_struct->xfer_iu->mad.adapter_info.common.status);
- } else {
- dev_info(hostdata->dev, "host srp version: %s, "
- "host partition %s (%d), OS %d, max io %u\n",
- hostdata->madapter_info.srp_version,
- hostdata->madapter_info.partition_name,
- hostdata->madapter_info.partition_number,
- hostdata->madapter_info.os_type,
- hostdata->madapter_info.port_max_txu[0]);
-
- if (hostdata->madapter_info.port_max_txu[0])
- hostdata->host->max_sectors =
- hostdata->madapter_info.port_max_txu[0] >> 9;
-
- if (hostdata->madapter_info.os_type == 3 &&
- strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
- dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
- hostdata->madapter_info.srp_version);
- dev_err(hostdata->dev, "limiting scatterlists to %d\n",
- MAX_INDIRECT_BUFS);
- hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
- }
+ hostdata->caps_addr = dma_map_single(hostdata->dev, &hostdata->caps,
+ sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(hostdata->dev, hostdata->caps_addr)) {
+ dev_err(hostdata->dev, "Unable to map capabilities buffer!\n");
+ return 1;
}
+
+ hostdata->adapter_info_addr = dma_map_single(hostdata->dev,
+ &hostdata->madapter_info,
+ sizeof(hostdata->madapter_info),
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(hostdata->dev, hostdata->adapter_info_addr)) {
+ dev_err(hostdata->dev, "Unable to map adapter info buffer!\n");
+ dma_unmap_single(hostdata->dev, hostdata->caps_addr,
+ sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
+ return 1;
+ }
+
+ return 0;
}
/**
- * send_mad_adapter_info: - Sends the mad adapter info request
- * and stores the result so it can be retrieved with
- * sysfs. We COULD consider causing a failure if the
- * returned SRP version doesn't match ours.
- * @hostdata: ibmvscsi_host_data of host
- *
- * Returns zero if successful.
-*/
-static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
+ * unmap_persist_bufs: - Unmap persistent data needed for adapter logins
+ * @hostdata: ibmvscsi_host_data of host
+ *
+ * Unmap the capabilities and adapter info DMA buffers
+ */
+static void unmap_persist_bufs(struct ibmvscsi_host_data *hostdata)
{
- struct viosrp_adapter_info *req;
- struct srp_event_struct *evt_struct;
- unsigned long flags;
- dma_addr_t addr;
-
- evt_struct = get_event_struct(&hostdata->pool);
- if (!evt_struct) {
- dev_err(hostdata->dev,
- "couldn't allocate an event for ADAPTER_INFO_REQ!\n");
- return;
- }
-
- init_event_struct(evt_struct,
- adapter_info_rsp,
- VIOSRP_MAD_FORMAT,
- init_timeout);
-
- req = &evt_struct->iu.mad.adapter_info;
- memset(req, 0x00, sizeof(*req));
-
- req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
- req->common.length = sizeof(hostdata->madapter_info);
- req->buffer = addr = dma_map_single(hostdata->dev,
- &hostdata->madapter_info,
- sizeof(hostdata->madapter_info),
- DMA_BIDIRECTIONAL);
+ dma_unmap_single(hostdata->dev, hostdata->caps_addr,
+ sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
- if (dma_mapping_error(hostdata->dev, req->buffer)) {
- if (!firmware_has_feature(FW_FEATURE_CMO))
- dev_err(hostdata->dev,
- "Unable to map request_buffer for "
- "adapter_info!\n");
- free_event_struct(&hostdata->pool, evt_struct);
- return;
- }
-
- spin_lock_irqsave(hostdata->host->host_lock, flags);
- if (ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2)) {
- dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
- dma_unmap_single(hostdata->dev,
- addr,
- sizeof(hostdata->madapter_info),
- DMA_BIDIRECTIONAL);
- }
- spin_unlock_irqrestore(hostdata->host->host_lock, flags);
-};
+ dma_unmap_single(hostdata->dev, hostdata->adapter_info_addr,
+ sizeof(hostdata->madapter_info), DMA_BIDIRECTIONAL);
+}
/**
* login_rsp: - Handle response to SRP login request
@@ -909,9 +868,7 @@ static void login_rsp(struct srp_event_struct *evt_struct)
}
dev_info(hostdata->dev, "SRP_LOGIN succeeded\n");
-
- if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta < 0)
- dev_err(hostdata->dev, "Invalid request_limit.\n");
+ hostdata->client_migrated = 0;
/* Now we know what the real request-limit is.
* This value is set rather than added to request_limit because
@@ -922,15 +879,12 @@ static void login_rsp(struct srp_event_struct *evt_struct)
/* If we had any pending I/Os, kick them */
scsi_unblock_requests(hostdata->host);
-
- send_mad_adapter_info(hostdata);
- return;
}
/**
* send_srp_login: - Sends the srp login
* @hostdata: ibmvscsi_host_data of host
- *
+ *
* Returns zero if successful.
*/
static int send_srp_login(struct ibmvscsi_host_data *hostdata)
@@ -939,22 +893,17 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
unsigned long flags;
struct srp_login_req *login;
struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool);
- if (!evt_struct) {
- dev_err(hostdata->dev, "couldn't allocate an event for login req!\n");
- return FAILED;
- }
- init_event_struct(evt_struct,
- login_rsp,
- VIOSRP_SRP_FORMAT,
- init_timeout);
+ BUG_ON(!evt_struct);
+ init_event_struct(evt_struct, login_rsp,
+ VIOSRP_SRP_FORMAT, login_timeout);
login = &evt_struct->iu.srp.login_req;
- memset(login, 0x00, sizeof(struct srp_login_req));
+ memset(login, 0, sizeof(*login));
login->opcode = SRP_LOGIN_REQ;
login->req_it_iu_len = sizeof(union srp_iu);
login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
-
+
spin_lock_irqsave(hostdata->host->host_lock, flags);
/* Start out with a request limit of 0, since this is negotiated in
* the login request we are just sending and login requests always
@@ -962,13 +911,241 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
*/
atomic_set(&hostdata->request_limit, 0);
- rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2);
+ rc = ibmvscsi_send_srp_event(evt_struct, hostdata, login_timeout * 2);
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
dev_info(hostdata->dev, "sent SRP login\n");
return rc;
};
/**
+ * capabilities_rsp: - Handle response to MAD adapter capabilities request
+ * @evt_struct: srp_event_struct with the response
+ *
+ * Used as a "done" callback by when sending adapter_info.
+ */
+static void capabilities_rsp(struct srp_event_struct *evt_struct)
+{
+ struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
+
+ if (evt_struct->xfer_iu->mad.capabilities.common.status) {
+ dev_err(hostdata->dev, "error 0x%X getting capabilities info\n",
+ evt_struct->xfer_iu->mad.capabilities.common.status);
+ } else {
+ if (hostdata->caps.migration.common.server_support != SERVER_SUPPORTS_CAP)
+ dev_info(hostdata->dev, "Partition migration not supported\n");
+
+ if (client_reserve) {
+ if (hostdata->caps.reserve.common.server_support ==
+ SERVER_SUPPORTS_CAP)
+ dev_info(hostdata->dev, "Client reserve enabled\n");
+ else
+ dev_info(hostdata->dev, "Client reserve not supported\n");
+ }
+ }
+
+ send_srp_login(hostdata);
+}
+
+/**
+ * send_mad_capabilities: - Sends the mad capabilities request
+ * and stores the result so it can be retrieved with
+ * @hostdata: ibmvscsi_host_data of host
+ */
+static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
+{
+ struct viosrp_capabilities *req;
+ struct srp_event_struct *evt_struct;
+ unsigned long flags;
+ struct device_node *of_node = hostdata->dev->archdata.of_node;
+ const char *location;
+
+ evt_struct = get_event_struct(&hostdata->pool);
+ BUG_ON(!evt_struct);
+
+ init_event_struct(evt_struct, capabilities_rsp,
+ VIOSRP_MAD_FORMAT, info_timeout);
+
+ req = &evt_struct->iu.mad.capabilities;
+ memset(req, 0, sizeof(*req));
+
+ hostdata->caps.flags = CAP_LIST_SUPPORTED;
+ if (hostdata->client_migrated)
+ hostdata->caps.flags |= CLIENT_MIGRATED;
+
+ strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev),
+ sizeof(hostdata->caps.name));
+ hostdata->caps.name[sizeof(hostdata->caps.name) - 1] = '\0';
+
+ location = of_get_property(of_node, "ibm,loc-code", NULL);
+ location = location ? location : dev_name(hostdata->dev);
+ strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc));
+ hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0';
+
+ req->common.type = VIOSRP_CAPABILITIES_TYPE;
+ req->buffer = hostdata->caps_addr;
+
+ hostdata->caps.migration.common.cap_type = MIGRATION_CAPABILITIES;
+ hostdata->caps.migration.common.length = sizeof(hostdata->caps.migration);
+ hostdata->caps.migration.common.server_support = SERVER_SUPPORTS_CAP;
+ hostdata->caps.migration.ecl = 1;
+
+ if (client_reserve) {
+ hostdata->caps.reserve.common.cap_type = RESERVATION_CAPABILITIES;
+ hostdata->caps.reserve.common.length = sizeof(hostdata->caps.reserve);
+ hostdata->caps.reserve.common.server_support = SERVER_SUPPORTS_CAP;
+ hostdata->caps.reserve.type = CLIENT_RESERVE_SCSI_2;
+ req->common.length = sizeof(hostdata->caps);
+ } else
+ req->common.length = sizeof(hostdata->caps) - sizeof(hostdata->caps.reserve);
+
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
+ if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
+ dev_err(hostdata->dev, "couldn't send CAPABILITIES_REQ!\n");
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+};
+
+/**
+ * fast_fail_rsp: - Handle response to MAD enable fast fail
+ * @evt_struct: srp_event_struct with the response
+ *
+ * Used as a "done" callback by when sending enable fast fail. Gets called
+ * by ibmvscsi_handle_crq()
+ */
+static void fast_fail_rsp(struct srp_event_struct *evt_struct)
+{
+ struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
+ u8 status = evt_struct->xfer_iu->mad.fast_fail.common.status;
+
+ if (status == VIOSRP_MAD_NOT_SUPPORTED)
+ dev_err(hostdata->dev, "fast_fail not supported in server\n");
+ else if (status == VIOSRP_MAD_FAILED)
+ dev_err(hostdata->dev, "fast_fail request failed\n");
+ else if (status != VIOSRP_MAD_SUCCESS)
+ dev_err(hostdata->dev, "error 0x%X enabling fast_fail\n", status);
+
+ send_mad_capabilities(hostdata);
+}
+
+/**
+ * init_host - Start host initialization
+ * @hostdata: ibmvscsi_host_data of host
+ *
+ * Returns zero if successful.
+ */
+static int enable_fast_fail(struct ibmvscsi_host_data *hostdata)
+{
+ int rc;
+ unsigned long flags;
+ struct viosrp_fast_fail *fast_fail_mad;
+ struct srp_event_struct *evt_struct;
+
+ if (!fast_fail) {
+ send_mad_capabilities(hostdata);
+ return 0;
+ }
+
+ evt_struct = get_event_struct(&hostdata->pool);
+ BUG_ON(!evt_struct);
+
+ init_event_struct(evt_struct, fast_fail_rsp, VIOSRP_MAD_FORMAT, info_timeout);
+
+ fast_fail_mad = &evt_struct->iu.mad.fast_fail;
+ memset(fast_fail_mad, 0, sizeof(*fast_fail_mad));
+ fast_fail_mad->common.type = VIOSRP_ENABLE_FAST_FAIL;
+ fast_fail_mad->common.length = sizeof(*fast_fail_mad);
+
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
+ rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+ return rc;
+}
+
+/**
+ * adapter_info_rsp: - Handle response to MAD adapter info request
+ * @evt_struct: srp_event_struct with the response
+ *
+ * Used as a "done" callback by when sending adapter_info. Gets called
+ * by ibmvscsi_handle_crq()
+*/
+static void adapter_info_rsp(struct srp_event_struct *evt_struct)
+{
+ struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
+
+ if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
+ dev_err(hostdata->dev, "error %d getting adapter info\n",
+ evt_struct->xfer_iu->mad.adapter_info.common.status);
+ } else {
+ dev_info(hostdata->dev, "host srp version: %s, "
+ "host partition %s (%d), OS %d, max io %u\n",
+ hostdata->madapter_info.srp_version,
+ hostdata->madapter_info.partition_name,
+ hostdata->madapter_info.partition_number,
+ hostdata->madapter_info.os_type,
+ hostdata->madapter_info.port_max_txu[0]);
+
+ if (hostdata->madapter_info.port_max_txu[0])
+ hostdata->host->max_sectors =
+ hostdata->madapter_info.port_max_txu[0] >> 9;
+
+ if (hostdata->madapter_info.os_type == 3 &&
+ strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
+ dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
+ hostdata->madapter_info.srp_version);
+ dev_err(hostdata->dev, "limiting scatterlists to %d\n",
+ MAX_INDIRECT_BUFS);
+ hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
+ }
+ }
+
+ enable_fast_fail(hostdata);
+}
+
+/**
+ * send_mad_adapter_info: - Sends the mad adapter info request
+ * and stores the result so it can be retrieved with
+ * sysfs. We COULD consider causing a failure if the
+ * returned SRP version doesn't match ours.
+ * @hostdata: ibmvscsi_host_data of host
+ *
+ * Returns zero if successful.
+*/
+static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
+{
+ struct viosrp_adapter_info *req;
+ struct srp_event_struct *evt_struct;
+ unsigned long flags;
+
+ evt_struct = get_event_struct(&hostdata->pool);
+ BUG_ON(!evt_struct);
+
+ init_event_struct(evt_struct,
+ adapter_info_rsp,
+ VIOSRP_MAD_FORMAT,
+ info_timeout);
+
+ req = &evt_struct->iu.mad.adapter_info;
+ memset(req, 0x00, sizeof(*req));
+
+ req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
+ req->common.length = sizeof(hostdata->madapter_info);
+ req->buffer = hostdata->adapter_info_addr;
+
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
+ if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
+ dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+};
+
+/**
+ * init_adapter: Start virtual adapter initialization sequence
+ *
+ */
+static void init_adapter(struct ibmvscsi_host_data *hostdata)
+{
+ send_mad_adapter_info(hostdata);
+}
+
+/**
* sync_completion: Signal that a synchronous command has completed
* Note that after returning from this call, the evt_struct is freed.
* the caller waiting on this completion shouldn't touch the evt_struct
@@ -1029,7 +1206,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
init_event_struct(evt,
sync_completion,
VIOSRP_SRP_FORMAT,
- init_timeout);
+ abort_timeout);
tsk_mgmt = &evt->iu.srp.tsk_mgmt;
@@ -1043,7 +1220,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
evt->sync_srp = &srp_rsp;
init_completion(&evt->comp);
- rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
+ rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, abort_timeout * 2);
if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
break;
@@ -1152,7 +1329,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
init_event_struct(evt,
sync_completion,
VIOSRP_SRP_FORMAT,
- init_timeout);
+ reset_timeout);
tsk_mgmt = &evt->iu.srp.tsk_mgmt;
@@ -1165,7 +1342,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
evt->sync_srp = &srp_rsp;
init_completion(&evt->comp);
- rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
+ rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, reset_timeout * 2);
if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
break;
@@ -1281,7 +1458,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
if ((rc = ibmvscsi_ops->send_crq(hostdata,
0xC002000000000000LL, 0)) == 0) {
/* Now login */
- send_srp_login(hostdata);
+ init_adapter(hostdata);
} else {
dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc);
}
@@ -1291,7 +1468,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
dev_info(hostdata->dev, "partner initialization complete\n");
/* Now login */
- send_srp_login(hostdata);
+ init_adapter(hostdata);
break;
default:
dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format);
@@ -1303,6 +1480,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
if (crq->format == 0x06) {
/* We need to re-setup the interpartition connection */
dev_info(hostdata->dev, "Re-enabling adapter!\n");
+ hostdata->client_migrated = 1;
purge_requests(hostdata, DID_REQUEUE);
if ((ibmvscsi_ops->reenable_crq_queue(&hostdata->queue,
hostdata)) ||
@@ -1397,7 +1575,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
init_event_struct(evt_struct,
sync_completion,
VIOSRP_MAD_FORMAT,
- init_timeout);
+ info_timeout);
host_config = &evt_struct->iu.mad.host_config;
@@ -1419,7 +1597,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
init_completion(&evt_struct->comp);
spin_lock_irqsave(hostdata->host->host_lock, flags);
- rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2);
+ rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
if (rc == 0)
wait_for_completion(&evt_struct->comp);
@@ -1444,7 +1622,7 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
spin_lock_irqsave(shost->host_lock, lock_flags);
if (sdev->type == TYPE_DISK) {
sdev->allow_restart = 1;
- blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
+ blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
}
scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
spin_unlock_irqrestore(shost->host_lock, lock_flags);
@@ -1471,6 +1649,46 @@ static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
/* ------------------------------------------------------------
* sysfs attributes
*/
+static ssize_t show_host_vhost_loc(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ibmvscsi_host_data *hostdata = shost_priv(shost);
+ int len;
+
+ len = snprintf(buf, sizeof(hostdata->caps.loc), "%s\n",
+ hostdata->caps.loc);
+ return len;
+}
+
+static struct device_attribute ibmvscsi_host_vhost_loc = {
+ .attr = {
+ .name = "vhost_loc",
+ .mode = S_IRUGO,
+ },
+ .show = show_host_vhost_loc,
+};
+
+static ssize_t show_host_vhost_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ibmvscsi_host_data *hostdata = shost_priv(shost);
+ int len;
+
+ len = snprintf(buf, sizeof(hostdata->caps.name), "%s\n",
+ hostdata->caps.name);
+ return len;
+}
+
+static struct device_attribute ibmvscsi_host_vhost_name = {
+ .attr = {
+ .name = "vhost_name",
+ .mode = S_IRUGO,
+ },
+ .show = show_host_vhost_name,
+};
+
static ssize_t show_host_srp_version(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1594,6 +1812,8 @@ static struct device_attribute ibmvscsi_host_config = {
};
static struct device_attribute *ibmvscsi_attrs[] = {
+ &ibmvscsi_host_vhost_loc,
+ &ibmvscsi_host_vhost_name,
&ibmvscsi_host_srp_version,
&ibmvscsi_host_partition_name,
&ibmvscsi_host_partition_number,
@@ -1657,7 +1877,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
unsigned long wait_switch = 0;
int rc;
- vdev->dev.driver_data = NULL;
+ dev_set_drvdata(&vdev->dev, NULL);
host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
if (!host) {
@@ -1674,6 +1894,11 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
atomic_set(&hostdata->request_limit, -1);
hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT;
+ if (map_persist_bufs(hostdata)) {
+ dev_err(&vdev->dev, "couldn't map persistent buffers\n");
+ goto persist_bufs_failed;
+ }
+
rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events);
if (rc != 0 && rc != H_RESOURCE) {
dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
@@ -1687,6 +1912,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
host->max_lun = 8;
host->max_id = max_id;
host->max_channel = max_channel;
+ host->max_cmd_len = 16;
if (scsi_add_host(hostdata->host, hostdata->dev))
goto add_host_failed;
@@ -1723,7 +1949,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
scsi_scan_host(host);
}
- vdev->dev.driver_data = hostdata;
+ dev_set_drvdata(&vdev->dev, hostdata);
return 0;
add_srp_port_failed:
@@ -1733,6 +1959,8 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
init_pool_failed:
ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events);
init_crq_failed:
+ unmap_persist_bufs(hostdata);
+ persist_bufs_failed:
scsi_host_put(host);
scsi_host_alloc_failed:
return -1;
@@ -1740,7 +1968,8 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
static int ibmvscsi_remove(struct vio_dev *vdev)
{
- struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data;
+ struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
+ unmap_persist_bufs(hostdata);
release_event_pool(&hostdata->pool, hostdata);
ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata,
max_events);
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index 2d4339d5e16..76425303def 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -90,6 +90,7 @@ struct event_pool {
/* all driver data associated with a host adapter */
struct ibmvscsi_host_data {
atomic_t request_limit;
+ int client_migrated;
struct device *dev;
struct event_pool pool;
struct crq_queue queue;
@@ -97,6 +98,9 @@ struct ibmvscsi_host_data {
struct list_head sent;
struct Scsi_Host *host;
struct mad_adapter_info_data madapter_info;
+ struct capabilities caps;
+ dma_addr_t caps_addr;
+ dma_addr_t adapter_info_addr;
};
/* routines for managing a command/response queue */
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
index e2dd6a45924..d5eaf972710 100644
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -892,7 +892,7 @@ free_vport:
static int ibmvstgt_remove(struct vio_dev *dev)
{
- struct srp_target *target = (struct srp_target *) dev->dev.driver_data;
+ struct srp_target *target = dev_get_drvdata(&dev->dev);
struct Scsi_Host *shost = target->shost;
struct vio_port *vport = target->ldata;
diff --git a/drivers/scsi/ibmvscsi/viosrp.h b/drivers/scsi/ibmvscsi/viosrp.h
index 204604501ad..2cd735d1d19 100644
--- a/drivers/scsi/ibmvscsi/viosrp.h
+++ b/drivers/scsi/ibmvscsi/viosrp.h
@@ -37,6 +37,7 @@
#define SRP_VERSION "16.a"
#define SRP_MAX_IU_LEN 256
+#define SRP_MAX_LOC_LEN 32
union srp_iu {
struct srp_login_req login_req;
@@ -86,7 +87,37 @@ enum viosrp_mad_types {
VIOSRP_EMPTY_IU_TYPE = 0x01,
VIOSRP_ERROR_LOG_TYPE = 0x02,
VIOSRP_ADAPTER_INFO_TYPE = 0x03,
- VIOSRP_HOST_CONFIG_TYPE = 0x04
+ VIOSRP_HOST_CONFIG_TYPE = 0x04,
+ VIOSRP_CAPABILITIES_TYPE = 0x05,
+ VIOSRP_ENABLE_FAST_FAIL = 0x08,
+};
+
+enum viosrp_mad_status {
+ VIOSRP_MAD_SUCCESS = 0x00,
+ VIOSRP_MAD_NOT_SUPPORTED = 0xF1,
+ VIOSRP_MAD_FAILED = 0xF7,
+};
+
+enum viosrp_capability_type {
+ MIGRATION_CAPABILITIES = 0x01,
+ RESERVATION_CAPABILITIES = 0x02,
+};
+
+enum viosrp_capability_support {
+ SERVER_DOES_NOT_SUPPORTS_CAP = 0x0,
+ SERVER_SUPPORTS_CAP = 0x01,
+ SERVER_CAP_DATA = 0x02,
+};
+
+enum viosrp_reserve_type {
+ CLIENT_RESERVE_SCSI_2 = 0x01,
+};
+
+enum viosrp_capability_flag {
+ CLIENT_MIGRATED = 0x01,
+ CLIENT_RECONNECT = 0x02,
+ CAP_LIST_SUPPORTED = 0x04,
+ CAP_LIST_DATA = 0x08,
};
/*
@@ -127,11 +158,46 @@ struct viosrp_host_config {
u64 buffer;
};
+struct viosrp_fast_fail {
+ struct mad_common common;
+};
+
+struct viosrp_capabilities {
+ struct mad_common common;
+ u64 buffer;
+};
+
+struct mad_capability_common {
+ u32 cap_type;
+ u16 length;
+ u16 server_support;
+};
+
+struct mad_reserve_cap {
+ struct mad_capability_common common;
+ u32 type;
+};
+
+struct mad_migration_cap {
+ struct mad_capability_common common;
+ u32 ecl;
+};
+
+struct capabilities{
+ u32 flags;
+ char name[SRP_MAX_LOC_LEN];
+ char loc[SRP_MAX_LOC_LEN];
+ struct mad_migration_cap migration;
+ struct mad_reserve_cap reserve;
+};
+
union mad_iu {
struct viosrp_empty_iu empty_iu;
struct viosrp_error_log error_log;
struct viosrp_adapter_info adapter_info;
struct viosrp_host_config host_config;
+ struct viosrp_fast_fail fast_fail;
+ struct viosrp_capabilities capabilities;
};
union viosrp_iu {
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index dd689ded860..0f8bc772b11 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -7003,6 +7003,7 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev)
ioa_cfg->sdt_state = ABORT_DUMP;
ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
ioa_cfg->in_ioa_bringdown = 1;
+ ioa_cfg->allow_cmds = 0;
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
}
@@ -7688,7 +7689,7 @@ static void __ipr_remove(struct pci_dev *pdev)
* Return value:
* none
**/
-static void ipr_remove(struct pci_dev *pdev)
+static void __devexit ipr_remove(struct pci_dev *pdev)
{
struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
@@ -7864,7 +7865,7 @@ static struct pci_driver ipr_driver = {
.name = IPR_NAME,
.id_table = ipr_pci_table,
.probe = ipr_probe,
- .remove = ipr_remove,
+ .remove = __devexit_p(ipr_remove),
.shutdown = ipr_shutdown,
.err_handler = &ipr_err_handler,
};
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 992af05aacf..7af9bceb8aa 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -1159,6 +1159,10 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
atomic_inc(&mp->stats.xid_not_found);
goto out;
}
+ if (ep->esb_stat & ESB_ST_COMPLETE) {
+ atomic_inc(&mp->stats.xid_not_found);
+ goto out;
+ }
if (ep->rxid == FC_XID_UNKNOWN)
ep->rxid = ntohs(fh->fh_rx_id);
if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 521f996f9b1..ad8b747837b 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -1896,7 +1896,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
break;
case FC_CMD_ABORTED:
- sc_cmd->result = (DID_ABORT << 16) | fsp->io_status;
+ sc_cmd->result = (DID_ERROR << 16) | fsp->io_status;
break;
case FC_CMD_TIME_OUT:
sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 747d73c5c8a..7bfbff7e0ef 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -478,7 +478,7 @@ static void fc_rport_error_retry(struct fc_rport *rport, struct fc_frame *fp)
if (PTR_ERR(fp) == -FC_EX_CLOSED)
return fc_rport_error(rport, fp);
- if (rdata->retries < rdata->local_port->max_retry_count) {
+ if (rdata->retries < rdata->local_port->max_rport_retry_count) {
FC_DEBUG_RPORT("Error %ld in state %s, retrying\n",
PTR_ERR(fp), fc_rport_state(rport));
rdata->retries++;
@@ -1330,7 +1330,7 @@ int fc_rport_init(struct fc_lport *lport)
}
EXPORT_SYMBOL(fc_rport_init);
-int fc_setup_rport()
+int fc_setup_rport(void)
{
rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
if (!rport_event_queue)
@@ -1339,7 +1339,7 @@ int fc_setup_rport()
}
EXPORT_SYMBOL(fc_setup_rport);
-void fc_destroy_rport()
+void fc_destroy_rport(void)
{
destroy_workqueue(rport_event_queue);
}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index e72b4ad47d3..59908aead53 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -81,7 +81,8 @@ inline void iscsi_conn_queue_work(struct iscsi_conn *conn)
struct Scsi_Host *shost = conn->session->host;
struct iscsi_host *ihost = shost_priv(shost);
- queue_work(ihost->workq, &conn->xmitwork);
+ if (ihost->workq)
+ queue_work(ihost->workq, &conn->xmitwork);
}
EXPORT_SYMBOL_GPL(iscsi_conn_queue_work);
@@ -109,11 +110,9 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
* if the window closed with IO queued, then kick the
* xmit thread
*/
- if (!list_empty(&session->leadconn->xmitqueue) ||
- !list_empty(&session->leadconn->mgmtqueue)) {
- if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD))
- iscsi_conn_queue_work(session->leadconn);
- }
+ if (!list_empty(&session->leadconn->cmdqueue) ||
+ !list_empty(&session->leadconn->mgmtqueue))
+ iscsi_conn_queue_work(session->leadconn);
}
}
EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
@@ -257,9 +256,11 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
itt_t itt;
int rc;
- rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD);
- if (rc)
- return rc;
+ if (conn->session->tt->alloc_pdu) {
+ rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD);
+ if (rc)
+ return rc;
+ }
hdr = (struct iscsi_cmd *) task->hdr;
itt = hdr->itt;
memset(hdr, 0, sizeof(*hdr));
@@ -364,7 +365,6 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
return -EIO;
task->state = ISCSI_TASK_RUNNING;
- list_move_tail(&task->running, &conn->run_list);
conn->scsicmd_pdus_cnt++;
ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x "
@@ -380,26 +380,25 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
}
/**
- * iscsi_complete_command - finish a task
+ * iscsi_free_task - free a task
* @task: iscsi cmd task
*
* Must be called with session lock.
* This function returns the scsi command to scsi-ml or cleans
* up mgmt tasks then returns the task to the pool.
*/
-static void iscsi_complete_command(struct iscsi_task *task)
+static void iscsi_free_task(struct iscsi_task *task)
{
struct iscsi_conn *conn = task->conn;
struct iscsi_session *session = conn->session;
struct scsi_cmnd *sc = task->sc;
+ ISCSI_DBG_SESSION(session, "freeing task itt 0x%x state %d sc %p\n",
+ task->itt, task->state, task->sc);
+
session->tt->cleanup_task(task);
- list_del_init(&task->running);
- task->state = ISCSI_TASK_COMPLETED;
+ task->state = ISCSI_TASK_FREE;
task->sc = NULL;
-
- if (conn->task == task)
- conn->task = NULL;
/*
* login task is preallocated so do not free
*/
@@ -408,9 +407,6 @@ static void iscsi_complete_command(struct iscsi_task *task)
__kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*));
- if (conn->ping_task == task)
- conn->ping_task = NULL;
-
if (sc) {
task->sc = NULL;
/* SCSI eh reuses commands to verify us */
@@ -433,7 +429,7 @@ EXPORT_SYMBOL_GPL(__iscsi_get_task);
static void __iscsi_put_task(struct iscsi_task *task)
{
if (atomic_dec_and_test(&task->refcount))
- iscsi_complete_command(task);
+ iscsi_free_task(task);
}
void iscsi_put_task(struct iscsi_task *task)
@@ -446,26 +442,74 @@ void iscsi_put_task(struct iscsi_task *task)
}
EXPORT_SYMBOL_GPL(iscsi_put_task);
+/**
+ * iscsi_complete_task - finish a task
+ * @task: iscsi cmd task
+ * @state: state to complete task with
+ *
+ * Must be called with session lock.
+ */
+static void iscsi_complete_task(struct iscsi_task *task, int state)
+{
+ struct iscsi_conn *conn = task->conn;
+
+ ISCSI_DBG_SESSION(conn->session,
+ "complete task itt 0x%x state %d sc %p\n",
+ task->itt, task->state, task->sc);
+ if (task->state == ISCSI_TASK_COMPLETED ||
+ task->state == ISCSI_TASK_ABRT_TMF ||
+ task->state == ISCSI_TASK_ABRT_SESS_RECOV)
+ return;
+ WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
+ task->state = state;
+
+ if (!list_empty(&task->running))
+ list_del_init(&task->running);
+
+ if (conn->task == task)
+ conn->task = NULL;
+
+ if (conn->ping_task == task)
+ conn->ping_task = NULL;
+
+ /* release get from queueing */
+ __iscsi_put_task(task);
+}
+
/*
- * session lock must be held
+ * session lock must be held and if not called for a task that is
+ * still pending or from the xmit thread, then xmit thread must
+ * be suspended.
*/
-static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
- int err)
+static void fail_scsi_task(struct iscsi_task *task, int err)
{
+ struct iscsi_conn *conn = task->conn;
struct scsi_cmnd *sc;
+ int state;
+ /*
+ * if a command completes and we get a successful tmf response
+ * we will hit this because the scsi eh abort code does not take
+ * a ref to the task.
+ */
sc = task->sc;
if (!sc)
return;
- if (task->state == ISCSI_TASK_PENDING)
+ if (task->state == ISCSI_TASK_PENDING) {
/*
* cmd never made it to the xmit thread, so we should not count
* the cmd in the sequencing
*/
conn->session->queued_cmdsn--;
+ /* it was never sent so just complete like normal */
+ state = ISCSI_TASK_COMPLETED;
+ } else if (err == DID_TRANSPORT_DISRUPTED)
+ state = ISCSI_TASK_ABRT_SESS_RECOV;
+ else
+ state = ISCSI_TASK_ABRT_TMF;
- sc->result = err;
+ sc->result = err << 16;
if (!scsi_bidi_cmnd(sc))
scsi_set_resid(sc, scsi_bufflen(sc));
else {
@@ -473,10 +517,7 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
scsi_in(sc)->resid = scsi_in(sc)->length;
}
- if (conn->task == task)
- conn->task = NULL;
- /* release ref from queuecommand */
- __iscsi_put_task(task);
+ iscsi_complete_task(task, state);
}
static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
@@ -516,7 +557,6 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
session->state = ISCSI_STATE_LOGGING_OUT;
task->state = ISCSI_TASK_RUNNING;
- list_move_tail(&task->running, &conn->mgmt_run_list);
ISCSI_DBG_SESSION(session, "mgmtpdu [op 0x%x hdr->itt 0x%x "
"datalen %d]\n", hdr->opcode & ISCSI_OPCODE_MASK,
hdr->itt, task->data_count);
@@ -528,6 +568,7 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
char *data, uint32_t data_size)
{
struct iscsi_session *session = conn->session;
+ struct iscsi_host *ihost = shost_priv(session->host);
struct iscsi_task *task;
itt_t itt;
@@ -544,6 +585,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
*/
task = conn->login_task;
else {
+ if (session->state != ISCSI_STATE_LOGGED_IN)
+ return NULL;
+
BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
@@ -559,6 +603,8 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
atomic_set(&task->refcount, 1);
task->conn = conn;
task->sc = NULL;
+ INIT_LIST_HEAD(&task->running);
+ task->state = ISCSI_TASK_PENDING;
if (data_size) {
memcpy(task->data, data, data_size);
@@ -566,11 +612,14 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
} else
task->data_count = 0;
- if (conn->session->tt->alloc_pdu(task, hdr->opcode)) {
- iscsi_conn_printk(KERN_ERR, conn, "Could not allocate "
- "pdu for mgmt task.\n");
- goto requeue_task;
+ if (conn->session->tt->alloc_pdu) {
+ if (conn->session->tt->alloc_pdu(task, hdr->opcode)) {
+ iscsi_conn_printk(KERN_ERR, conn, "Could not allocate "
+ "pdu for mgmt task.\n");
+ goto free_task;
+ }
}
+
itt = task->hdr->itt;
task->hdr_len = sizeof(struct iscsi_hdr);
memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
@@ -583,30 +632,22 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
task->conn->session->age);
}
- INIT_LIST_HEAD(&task->running);
- list_add_tail(&task->running, &conn->mgmtqueue);
-
- if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
+ if (!ihost->workq) {
if (iscsi_prep_mgmt_task(conn, task))
goto free_task;
if (session->tt->xmit_task(task))
goto free_task;
-
- } else
+ } else {
+ list_add_tail(&task->running, &conn->mgmtqueue);
iscsi_conn_queue_work(conn);
+ }
return task;
free_task:
__iscsi_put_task(task);
return NULL;
-
-requeue_task:
- if (task != conn->login_task)
- __kfifo_put(session->cmdpool.queue, (void*)&task,
- sizeof(void*));
- return NULL;
}
int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
@@ -701,11 +742,10 @@ invalid_datalen:
sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
}
out:
- ISCSI_DBG_SESSION(session, "done [sc %p res %d itt 0x%x]\n",
+ ISCSI_DBG_SESSION(session, "cmd rsp done [sc %p res %d itt 0x%x]\n",
sc, sc->result, task->itt);
conn->scsirsp_pdus_cnt++;
-
- __iscsi_put_task(task);
+ iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
}
/**
@@ -724,6 +764,7 @@ iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS))
return;
+ iscsi_update_cmdsn(conn->session, (struct iscsi_nopin *)hdr);
sc->result = (DID_OK << 16) | rhdr->cmd_status;
conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
@@ -738,8 +779,11 @@ iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
}
+ ISCSI_DBG_SESSION(conn->session, "data in with status done "
+ "[sc %p res %d itt 0x%x]\n",
+ sc, sc->result, task->itt);
conn->scsirsp_pdus_cnt++;
- __iscsi_put_task(task);
+ iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
}
static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
@@ -823,7 +867,7 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
*
* The session lock must be held.
*/
-static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
+struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
{
struct iscsi_session *session = conn->session;
int i;
@@ -840,6 +884,7 @@ static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
return session->cmds[i];
}
+EXPORT_SYMBOL_GPL(iscsi_itt_to_task);
/**
* __iscsi_complete_pdu - complete pdu
@@ -959,7 +1004,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
}
iscsi_tmf_rsp(conn, hdr);
- __iscsi_put_task(task);
+ iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
break;
case ISCSI_OP_NOOP_IN:
iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
@@ -977,7 +1022,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
goto recv_pdu;
mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
- __iscsi_put_task(task);
+ iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
break;
default:
rc = ISCSI_ERR_BAD_OPCODE;
@@ -989,7 +1034,7 @@ out:
recv_pdu:
if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
rc = ISCSI_ERR_CONN_FAILED;
- __iscsi_put_task(task);
+ iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
return rc;
}
EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
@@ -1166,7 +1211,12 @@ void iscsi_requeue_task(struct iscsi_task *task)
{
struct iscsi_conn *conn = task->conn;
- list_move_tail(&task->running, &conn->requeue);
+ /*
+ * this may be on the requeue list already if the xmit_task callout
+ * is handling the r2ts while we are adding new ones
+ */
+ if (list_empty(&task->running))
+ list_add_tail(&task->running, &conn->requeue);
iscsi_conn_queue_work(conn);
}
EXPORT_SYMBOL_GPL(iscsi_requeue_task);
@@ -1206,6 +1256,7 @@ check_mgmt:
while (!list_empty(&conn->mgmtqueue)) {
conn->task = list_entry(conn->mgmtqueue.next,
struct iscsi_task, running);
+ list_del_init(&conn->task->running);
if (iscsi_prep_mgmt_task(conn, conn->task)) {
__iscsi_put_task(conn->task);
conn->task = NULL;
@@ -1217,23 +1268,26 @@ check_mgmt:
}
/* process pending command queue */
- while (!list_empty(&conn->xmitqueue)) {
+ while (!list_empty(&conn->cmdqueue)) {
if (conn->tmf_state == TMF_QUEUED)
break;
- conn->task = list_entry(conn->xmitqueue.next,
+ conn->task = list_entry(conn->cmdqueue.next,
struct iscsi_task, running);
+ list_del_init(&conn->task->running);
if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
- fail_command(conn, conn->task, DID_IMM_RETRY << 16);
+ fail_scsi_task(conn->task, DID_IMM_RETRY);
continue;
}
rc = iscsi_prep_scsi_cmd_pdu(conn->task);
if (rc) {
if (rc == -ENOMEM) {
+ list_add_tail(&conn->task->running,
+ &conn->cmdqueue);
conn->task = NULL;
goto again;
} else
- fail_command(conn, conn->task, DID_ABORT << 16);
+ fail_scsi_task(conn->task, DID_ABORT);
continue;
}
rc = iscsi_xmit_task(conn);
@@ -1260,8 +1314,8 @@ check_mgmt:
conn->task = list_entry(conn->requeue.next,
struct iscsi_task, running);
+ list_del_init(&conn->task->running);
conn->task->state = ISCSI_TASK_RUNNING;
- list_move_tail(conn->requeue.next, &conn->run_list);
rc = iscsi_xmit_task(conn);
if (rc)
goto again;
@@ -1328,6 +1382,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
{
struct iscsi_cls_session *cls_session;
struct Scsi_Host *host;
+ struct iscsi_host *ihost;
int reason = 0;
struct iscsi_session *session;
struct iscsi_conn *conn;
@@ -1338,6 +1393,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
sc->SCp.ptr = NULL;
host = sc->device->host;
+ ihost = shost_priv(host);
spin_unlock(host->host_lock);
cls_session = starget_to_session(scsi_target(sc->device));
@@ -1350,13 +1406,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
goto fault;
}
- /*
- * ISCSI_STATE_FAILED is a temp. state. The recovery
- * code will decide what is best to do with command queued
- * during this time
- */
- if (session->state != ISCSI_STATE_LOGGED_IN &&
- session->state != ISCSI_STATE_FAILED) {
+ if (session->state != ISCSI_STATE_LOGGED_IN) {
/*
* to handle the race between when we set the recovery state
* and block the session we requeue here (commands could
@@ -1364,12 +1414,15 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
* up because the block code is not locked)
*/
switch (session->state) {
+ case ISCSI_STATE_FAILED:
case ISCSI_STATE_IN_RECOVERY:
reason = FAILURE_SESSION_IN_RECOVERY;
- goto reject;
+ sc->result = DID_IMM_RETRY << 16;
+ break;
case ISCSI_STATE_LOGGING_OUT:
reason = FAILURE_SESSION_LOGGING_OUT;
- goto reject;
+ sc->result = DID_IMM_RETRY << 16;
+ break;
case ISCSI_STATE_RECOVERY_FAILED:
reason = FAILURE_SESSION_RECOVERY_TIMEOUT;
sc->result = DID_TRANSPORT_FAILFAST << 16;
@@ -1402,9 +1455,8 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
reason = FAILURE_OOM;
goto reject;
}
- list_add_tail(&task->running, &conn->xmitqueue);
- if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
+ if (!ihost->workq) {
reason = iscsi_prep_scsi_cmd_pdu(task);
if (reason) {
if (reason == -ENOMEM) {
@@ -1419,8 +1471,10 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
reason = FAILURE_SESSION_NOT_READY;
goto prepd_reject;
}
- } else
+ } else {
+ list_add_tail(&task->running, &conn->cmdqueue);
iscsi_conn_queue_work(conn);
+ }
session->queued_cmdsn++;
spin_unlock(&session->lock);
@@ -1429,7 +1483,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
prepd_reject:
sc->scsi_done = NULL;
- iscsi_complete_command(task);
+ iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
reject:
spin_unlock(&session->lock);
ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n",
@@ -1439,7 +1493,7 @@ reject:
prepd_fault:
sc->scsi_done = NULL;
- iscsi_complete_command(task);
+ iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
fault:
spin_unlock(&session->lock);
ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n",
@@ -1608,44 +1662,24 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
* Fail commands. session lock held and recv side suspended and xmit
* thread flushed
*/
-static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
- int error)
+static void fail_scsi_tasks(struct iscsi_conn *conn, unsigned lun,
+ int error)
{
- struct iscsi_task *task, *tmp;
-
- if (conn->task) {
- if (lun == -1 ||
- (conn->task->sc && conn->task->sc->device->lun == lun))
- conn->task = NULL;
- }
+ struct iscsi_task *task;
+ int i;
- /* flush pending */
- list_for_each_entry_safe(task, tmp, &conn->xmitqueue, running) {
- if (lun == task->sc->device->lun || lun == -1) {
- ISCSI_DBG_SESSION(conn->session,
- "failing pending sc %p itt 0x%x\n",
- task->sc, task->itt);
- fail_command(conn, task, error << 16);
- }
- }
+ for (i = 0; i < conn->session->cmds_max; i++) {
+ task = conn->session->cmds[i];
+ if (!task->sc || task->state == ISCSI_TASK_FREE)
+ continue;
- list_for_each_entry_safe(task, tmp, &conn->requeue, running) {
- if (lun == task->sc->device->lun || lun == -1) {
- ISCSI_DBG_SESSION(conn->session,
- "failing requeued sc %p itt 0x%x\n",
- task->sc, task->itt);
- fail_command(conn, task, error << 16);
- }
- }
+ if (lun != -1 && lun != task->sc->device->lun)
+ continue;
- /* fail all other running */
- list_for_each_entry_safe(task, tmp, &conn->run_list, running) {
- if (lun == task->sc->device->lun || lun == -1) {
- ISCSI_DBG_SESSION(conn->session,
- "failing in progress sc %p itt 0x%x\n",
- task->sc, task->itt);
- fail_command(conn, task, error << 16);
- }
+ ISCSI_DBG_SESSION(conn->session,
+ "failing sc %p itt 0x%x state %d\n",
+ task->sc, task->itt, task->state);
+ fail_scsi_task(task, error);
}
}
@@ -1655,7 +1689,7 @@ void iscsi_suspend_tx(struct iscsi_conn *conn)
struct iscsi_host *ihost = shost_priv(shost);
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
- if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+ if (ihost->workq)
flush_workqueue(ihost->workq);
}
EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
@@ -1663,8 +1697,23 @@ EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
static void iscsi_start_tx(struct iscsi_conn *conn)
{
clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
- if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
- iscsi_conn_queue_work(conn);
+ iscsi_conn_queue_work(conn);
+}
+
+/*
+ * We want to make sure a ping is in flight. It has timed out.
+ * And we are not busy processing a pdu that is making
+ * progress but got started before the ping and is taking a while
+ * to complete so the ping is just stuck behind it in a queue.
+ */
+static int iscsi_has_ping_timed_out(struct iscsi_conn *conn)
+{
+ if (conn->ping_task &&
+ time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
+ (conn->ping_timeout * HZ), jiffies))
+ return 1;
+ else
+ return 0;
}
static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
@@ -1702,16 +1751,20 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
* if the ping timedout then we are in the middle of cleaning up
* and can let the iscsi eh handle it
*/
- if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
- (conn->ping_timeout * HZ), jiffies))
+ if (iscsi_has_ping_timed_out(conn)) {
rc = BLK_EH_RESET_TIMER;
+ goto done;
+ }
/*
* if we are about to check the transport then give the command
* more time
*/
if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
- jiffies))
+ jiffies)) {
rc = BLK_EH_RESET_TIMER;
+ goto done;
+ }
+
/* if in the middle of checking the transport then give us more time */
if (conn->ping_task)
rc = BLK_EH_RESET_TIMER;
@@ -1738,13 +1791,13 @@ static void iscsi_check_transport_timeouts(unsigned long data)
recv_timeout *= HZ;
last_recv = conn->last_recv;
- if (conn->ping_task &&
- time_before_eq(conn->last_ping + (conn->ping_timeout * HZ),
- jiffies)) {
+
+ if (iscsi_has_ping_timed_out(conn)) {
iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
- "expired, last rx %lu, last ping %lu, "
- "now %lu\n", conn->ping_timeout, last_recv,
- conn->last_ping, jiffies);
+ "expired, recv timeout %d, last rx %lu, "
+ "last ping %lu, now %lu\n",
+ conn->ping_timeout, conn->recv_timeout,
+ last_recv, conn->last_ping, jiffies);
spin_unlock(&session->lock);
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
return;
@@ -1788,6 +1841,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
cls_session = starget_to_session(scsi_target(sc->device));
session = cls_session->dd_data;
+ ISCSI_DBG_SESSION(session, "aborting sc %p\n", sc);
+
mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->lock);
/*
@@ -1810,6 +1865,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
sc->SCp.phase != session->age) {
spin_unlock_bh(&session->lock);
mutex_unlock(&session->eh_mutex);
+ ISCSI_DBG_SESSION(session, "failing abort due to dropped "
+ "session.\n");
return FAILED;
}
@@ -1829,7 +1886,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
}
if (task->state == ISCSI_TASK_PENDING) {
- fail_command(conn, task, DID_ABORT << 16);
+ fail_scsi_task(task, DID_ABORT);
goto success;
}
@@ -1860,7 +1917,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
* then sent more data for the cmd.
*/
spin_lock(&session->lock);
- fail_command(conn, task, DID_ABORT << 16);
+ fail_scsi_task(task, DID_ABORT);
conn->tmf_state = TMF_INITIAL;
spin_unlock(&session->lock);
iscsi_start_tx(conn);
@@ -1967,7 +2024,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
iscsi_suspend_tx(conn);
spin_lock_bh(&session->lock);
- fail_all_commands(conn, sc->device->lun, DID_ERROR);
+ fail_scsi_tasks(conn, sc->device->lun, DID_ERROR);
conn->tmf_state = TMF_INITIAL;
spin_unlock_bh(&session->lock);
@@ -2274,6 +2331,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
if (cmd_task_size)
task->dd_data = &task[1];
task->itt = cmd_i;
+ task->state = ISCSI_TASK_FREE;
INIT_LIST_HEAD(&task->running);
}
@@ -2360,10 +2418,8 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
conn->transport_timer.data = (unsigned long)conn;
conn->transport_timer.function = iscsi_check_transport_timeouts;
- INIT_LIST_HEAD(&conn->run_list);
- INIT_LIST_HEAD(&conn->mgmt_run_list);
INIT_LIST_HEAD(&conn->mgmtqueue);
- INIT_LIST_HEAD(&conn->xmitqueue);
+ INIT_LIST_HEAD(&conn->cmdqueue);
INIT_LIST_HEAD(&conn->requeue);
INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
@@ -2531,27 +2587,28 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
EXPORT_SYMBOL_GPL(iscsi_conn_start);
static void
-flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn)
+fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn)
{
- struct iscsi_task *task, *tmp;
+ struct iscsi_task *task;
+ int i, state;
- /* handle pending */
- list_for_each_entry_safe(task, tmp, &conn->mgmtqueue, running) {
- ISCSI_DBG_SESSION(session, "flushing pending mgmt task "
- "itt 0x%x\n", task->itt);
- /* release ref from prep task */
- __iscsi_put_task(task);
- }
+ for (i = 0; i < conn->session->cmds_max; i++) {
+ task = conn->session->cmds[i];
+ if (task->sc)
+ continue;
- /* handle running */
- list_for_each_entry_safe(task, tmp, &conn->mgmt_run_list, running) {
- ISCSI_DBG_SESSION(session, "flushing running mgmt task "
- "itt 0x%x\n", task->itt);
- /* release ref from prep task */
- __iscsi_put_task(task);
- }
+ if (task->state == ISCSI_TASK_FREE)
+ continue;
+
+ ISCSI_DBG_SESSION(conn->session,
+ "failing mgmt itt 0x%x state %d\n",
+ task->itt, task->state);
+ state = ISCSI_TASK_ABRT_SESS_RECOV;
+ if (task->state == ISCSI_TASK_PENDING)
+ state = ISCSI_TASK_COMPLETED;
+ iscsi_complete_task(task, state);
- conn->task = NULL;
+ }
}
static void iscsi_start_session_recovery(struct iscsi_session *session,
@@ -2559,8 +2616,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
{
int old_stop_stage;
- del_timer_sync(&conn->transport_timer);
-
mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->lock);
if (conn->stop_stage == STOP_CONN_TERM) {
@@ -2578,13 +2633,17 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
session->state = ISCSI_STATE_TERMINATE;
else if (conn->stop_stage != STOP_CONN_RECOVER)
session->state = ISCSI_STATE_IN_RECOVERY;
+ spin_unlock_bh(&session->lock);
+
+ del_timer_sync(&conn->transport_timer);
+ iscsi_suspend_tx(conn);
+ spin_lock_bh(&session->lock);
old_stop_stage = conn->stop_stage;
conn->stop_stage = flag;
conn->c_stage = ISCSI_CONN_STOPPED;
spin_unlock_bh(&session->lock);
- iscsi_suspend_tx(conn);
/*
* for connection level recovery we should not calculate
* header digest. conn->hdr_size used for optimization
@@ -2605,11 +2664,8 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
* flush queues.
*/
spin_lock_bh(&session->lock);
- if (flag == STOP_CONN_RECOVER)
- fail_all_commands(conn, -1, DID_TRANSPORT_DISRUPTED);
- else
- fail_all_commands(conn, -1, DID_ERROR);
- flush_control_queues(session, conn);
+ fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED);
+ fail_mgmt_tasks(session, conn);
spin_unlock_bh(&session->lock);
mutex_unlock(&session->eh_mutex);
}
@@ -2651,6 +2707,23 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
}
EXPORT_SYMBOL_GPL(iscsi_conn_bind);
+static int iscsi_switch_str_param(char **param, char *new_val_buf)
+{
+ char *new_val;
+
+ if (*param) {
+ if (!strcmp(*param, new_val_buf))
+ return 0;
+ }
+
+ new_val = kstrdup(new_val_buf, GFP_NOIO);
+ if (!new_val)
+ return -ENOMEM;
+
+ kfree(*param);
+ *param = new_val;
+ return 0;
+}
int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf, int buflen)
@@ -2723,38 +2796,15 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
sscanf(buf, "%u", &conn->exp_statsn);
break;
case ISCSI_PARAM_USERNAME:
- kfree(session->username);
- session->username = kstrdup(buf, GFP_KERNEL);
- if (!session->username)
- return -ENOMEM;
- break;
+ return iscsi_switch_str_param(&session->username, buf);
case ISCSI_PARAM_USERNAME_IN:
- kfree(session->username_in);
- session->username_in = kstrdup(buf, GFP_KERNEL);
- if (!session->username_in)
- return -ENOMEM;
- break;
+ return iscsi_switch_str_param(&session->username_in, buf);
case ISCSI_PARAM_PASSWORD:
- kfree(session->password);
- session->password = kstrdup(buf, GFP_KERNEL);
- if (!session->password)
- return -ENOMEM;
- break;
+ return iscsi_switch_str_param(&session->password, buf);
case ISCSI_PARAM_PASSWORD_IN:
- kfree(session->password_in);
- session->password_in = kstrdup(buf, GFP_KERNEL);
- if (!session->password_in)
- return -ENOMEM;
- break;
+ return iscsi_switch_str_param(&session->password_in, buf);
case ISCSI_PARAM_TARGET_NAME:
- /* this should not change between logins */
- if (session->targetname)
- break;
-
- session->targetname = kstrdup(buf, GFP_KERNEL);
- if (!session->targetname)
- return -ENOMEM;
- break;
+ return iscsi_switch_str_param(&session->targetname, buf);
case ISCSI_PARAM_TPGT:
sscanf(buf, "%d", &session->tpgt);
break;
@@ -2762,25 +2812,11 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
sscanf(buf, "%d", &conn->persistent_port);
break;
case ISCSI_PARAM_PERSISTENT_ADDRESS:
- /*
- * this is the address returned in discovery so it should
- * not change between logins.
- */
- if (conn->persistent_address)
- break;
-
- conn->persistent_address = kstrdup(buf, GFP_KERNEL);
- if (!conn->persistent_address)
- return -ENOMEM;
- break;
+ return iscsi_switch_str_param(&conn->persistent_address, buf);
case ISCSI_PARAM_IFACE_NAME:
- if (!session->ifacename)
- session->ifacename = kstrdup(buf, GFP_KERNEL);
- break;
+ return iscsi_switch_str_param(&session->ifacename, buf);
case ISCSI_PARAM_INITIATOR_NAME:
- if (!session->initiatorname)
- session->initiatorname = kstrdup(buf, GFP_KERNEL);
- break;
+ return iscsi_switch_str_param(&session->initiatorname, buf);
default:
return -ENOSYS;
}
@@ -2851,10 +2887,7 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
len = sprintf(buf, "%s\n", session->ifacename);
break;
case ISCSI_PARAM_INITIATOR_NAME:
- if (!session->initiatorname)
- len = sprintf(buf, "%s\n", "unknown");
- else
- len = sprintf(buf, "%s\n", session->initiatorname);
+ len = sprintf(buf, "%s\n", session->initiatorname);
break;
default:
return -ENOSYS;
@@ -2920,29 +2953,16 @@ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
switch (param) {
case ISCSI_HOST_PARAM_NETDEV_NAME:
- if (!ihost->netdev)
- len = sprintf(buf, "%s\n", "default");
- else
- len = sprintf(buf, "%s\n", ihost->netdev);
+ len = sprintf(buf, "%s\n", ihost->netdev);
break;
case ISCSI_HOST_PARAM_HWADDRESS:
- if (!ihost->hwaddress)
- len = sprintf(buf, "%s\n", "default");
- else
- len = sprintf(buf, "%s\n", ihost->hwaddress);
+ len = sprintf(buf, "%s\n", ihost->hwaddress);
break;
case ISCSI_HOST_PARAM_INITIATOR_NAME:
- if (!ihost->initiatorname)
- len = sprintf(buf, "%s\n", "unknown");
- else
- len = sprintf(buf, "%s\n", ihost->initiatorname);
+ len = sprintf(buf, "%s\n", ihost->initiatorname);
break;
case ISCSI_HOST_PARAM_IPADDRESS:
- if (!strlen(ihost->local_address))
- len = sprintf(buf, "%s\n", "unknown");
- else
- len = sprintf(buf, "%s\n",
- ihost->local_address);
+ len = sprintf(buf, "%s\n", ihost->local_address);
break;
default:
return -ENOSYS;
@@ -2959,17 +2979,11 @@ int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
switch (param) {
case ISCSI_HOST_PARAM_NETDEV_NAME:
- if (!ihost->netdev)
- ihost->netdev = kstrdup(buf, GFP_KERNEL);
- break;
+ return iscsi_switch_str_param(&ihost->netdev, buf);
case ISCSI_HOST_PARAM_HWADDRESS:
- if (!ihost->hwaddress)
- ihost->hwaddress = kstrdup(buf, GFP_KERNEL);
- break;
+ return iscsi_switch_str_param(&ihost->hwaddress, buf);
case ISCSI_HOST_PARAM_INITIATOR_NAME:
- if (!ihost->initiatorname)
- ihost->initiatorname = kstrdup(buf, GFP_KERNEL);
- break;
+ return iscsi_switch_str_param(&ihost->initiatorname, buf);
default:
return -ENOSYS;
}
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index b579ca9f483..2bc07090321 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -440,8 +440,8 @@ void iscsi_tcp_cleanup_task(struct iscsi_task *task)
struct iscsi_tcp_task *tcp_task = task->dd_data;
struct iscsi_r2t_info *r2t;
- /* nothing to do for mgmt or pending tasks */
- if (!task->sc || task->state == ISCSI_TASK_PENDING)
+ /* nothing to do for mgmt */
+ if (!task->sc)
return;
/* flush task's r2t queues */
@@ -473,7 +473,13 @@ static int iscsi_tcp_data_in(struct iscsi_conn *conn, struct iscsi_task *task)
int datasn = be32_to_cpu(rhdr->datasn);
unsigned total_in_length = scsi_in(task->sc)->length;
- iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr);
+ /*
+ * lib iscsi will update this in the completion handling if there
+ * is status.
+ */
+ if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS))
+ iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr);
+
if (tcp_conn->in.datalen == 0)
return 0;
@@ -857,6 +863,12 @@ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
int rc = 0;
ISCSI_DBG_TCP(conn, "in %d bytes\n", skb->len - offset);
+ /*
+ * Update for each skb instead of pdu, because over slow networks a
+ * data_in's data could take a while to read in. We also want to
+ * account for r2ts.
+ */
+ conn->last_recv = jiffies;
if (unlikely(conn->suspend_rx)) {
ISCSI_DBG_TCP(conn, "Rx suspended!\n");
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 3da02e43678..54fa1e42dc4 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -1927,21 +1927,21 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
/* do we need to support multiple segments? */
if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
printk("%s: multiple segments req %u %u, rsp %u %u\n",
- __func__, req->bio->bi_vcnt, req->data_len,
- rsp->bio->bi_vcnt, rsp->data_len);
+ __func__, req->bio->bi_vcnt, blk_rq_bytes(req),
+ rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
return -EINVAL;
}
- ret = smp_execute_task(dev, bio_data(req->bio), req->data_len,
- bio_data(rsp->bio), rsp->data_len);
+ ret = smp_execute_task(dev, bio_data(req->bio), blk_rq_bytes(req),
+ bio_data(rsp->bio), blk_rq_bytes(rsp));
if (ret > 0) {
/* positive number is the untransferred residual */
- rsp->data_len = ret;
- req->data_len = 0;
+ rsp->resid_len = ret;
+ req->resid_len = 0;
ret = 0;
} else if (ret == 0) {
- rsp->data_len = 0;
- req->data_len = 0;
+ rsp->resid_len = 0;
+ req->resid_len = 0;
}
return ret;
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
index d110a366c48..1bc3b756799 100644
--- a/drivers/scsi/libsas/sas_host_smp.c
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -134,24 +134,24 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
{
u8 *req_data = NULL, *resp_data = NULL, *buf;
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
- int error = -EINVAL, resp_data_len = rsp->data_len;
+ int error = -EINVAL;
/* eight is the minimum size for request and response frames */
- if (req->data_len < 8 || rsp->data_len < 8)
+ if (blk_rq_bytes(req) < 8 || blk_rq_bytes(rsp) < 8)
goto out;
- if (bio_offset(req->bio) + req->data_len > PAGE_SIZE ||
- bio_offset(rsp->bio) + rsp->data_len > PAGE_SIZE) {
+ if (bio_offset(req->bio) + blk_rq_bytes(req) > PAGE_SIZE ||
+ bio_offset(rsp->bio) + blk_rq_bytes(rsp) > PAGE_SIZE) {
shost_printk(KERN_ERR, shost,
"SMP request/response frame crosses page boundary");
goto out;
}
- req_data = kzalloc(req->data_len, GFP_KERNEL);
+ req_data = kzalloc(blk_rq_bytes(req), GFP_KERNEL);
/* make sure frame can always be built ... we copy
* back only the requested length */
- resp_data = kzalloc(max(rsp->data_len, 128U), GFP_KERNEL);
+ resp_data = kzalloc(max(blk_rq_bytes(rsp), 128U), GFP_KERNEL);
if (!req_data || !resp_data) {
error = -ENOMEM;
@@ -160,7 +160,7 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
local_irq_disable();
buf = kmap_atomic(bio_page(req->bio), KM_USER0) + bio_offset(req->bio);
- memcpy(req_data, buf, req->data_len);
+ memcpy(req_data, buf, blk_rq_bytes(req));
kunmap_atomic(buf - bio_offset(req->bio), KM_USER0);
local_irq_enable();
@@ -178,15 +178,15 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
switch (req_data[1]) {
case SMP_REPORT_GENERAL:
- req->data_len -= 8;
- resp_data_len -= 32;
+ req->resid_len -= 8;
+ rsp->resid_len -= 32;
resp_data[2] = SMP_RESP_FUNC_ACC;
resp_data[9] = sas_ha->num_phys;
break;
case SMP_REPORT_MANUF_INFO:
- req->data_len -= 8;
- resp_data_len -= 64;
+ req->resid_len -= 8;
+ rsp->resid_len -= 64;
resp_data[2] = SMP_RESP_FUNC_ACC;
memcpy(resp_data + 12, shost->hostt->name,
SAS_EXPANDER_VENDOR_ID_LEN);
@@ -199,13 +199,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
break;
case SMP_DISCOVER:
- req->data_len -= 16;
- if ((int)req->data_len < 0) {
- req->data_len = 0;
+ req->resid_len -= 16;
+ if ((int)req->resid_len < 0) {
+ req->resid_len = 0;
error = -EINVAL;
goto out;
}
- resp_data_len -= 56;
+ rsp->resid_len -= 56;
sas_host_smp_discover(sas_ha, resp_data, req_data[9]);
break;
@@ -215,13 +215,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
break;
case SMP_REPORT_PHY_SATA:
- req->data_len -= 16;
- if ((int)req->data_len < 0) {
- req->data_len = 0;
+ req->resid_len -= 16;
+ if ((int)req->resid_len < 0) {
+ req->resid_len = 0;
error = -EINVAL;
goto out;
}
- resp_data_len -= 60;
+ rsp->resid_len -= 60;
sas_report_phy_sata(sas_ha, resp_data, req_data[9]);
break;
@@ -238,13 +238,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
break;
case SMP_PHY_CONTROL:
- req->data_len -= 44;
- if ((int)req->data_len < 0) {
- req->data_len = 0;
+ req->resid_len -= 44;
+ if ((int)req->resid_len < 0) {
+ req->resid_len = 0;
error = -EINVAL;
goto out;
}
- resp_data_len -= 8;
+ rsp->resid_len -= 8;
sas_phy_control(sas_ha, req_data[9], req_data[10],
req_data[32] >> 4, req_data[33] >> 4,
resp_data);
@@ -261,11 +261,10 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
local_irq_disable();
buf = kmap_atomic(bio_page(rsp->bio), KM_USER0) + bio_offset(rsp->bio);
- memcpy(buf, resp_data, rsp->data_len);
+ memcpy(buf, resp_data, blk_rq_bytes(rsp));
flush_kernel_dcache_page(bio_page(rsp->bio));
kunmap_atomic(buf - bio_offset(rsp->bio), KM_USER0);
local_irq_enable();
- rsp->data_len = resp_data_len;
out:
kfree(req_data);
diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c
index 15e2d132e8b..2742ae8a3d0 100644
--- a/drivers/scsi/libsrp.c
+++ b/drivers/scsi/libsrp.c
@@ -135,7 +135,7 @@ int srp_target_alloc(struct srp_target *target, struct device *dev,
INIT_LIST_HEAD(&target->cmd_queue);
target->dev = dev;
- target->dev->driver_data = target;
+ dev_set_drvdata(target->dev, target);
target->srp_iu_size = iu_size;
target->rx_ring_size = nr;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 1105f9a111b..54056984909 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2008 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -23,6 +23,13 @@
struct lpfc_sli2_slim;
+#define LPFC_PCI_DEV_LP 0x1
+#define LPFC_PCI_DEV_OC 0x2
+
+#define LPFC_SLI_REV2 2
+#define LPFC_SLI_REV3 3
+#define LPFC_SLI_REV4 4
+
#define LPFC_MAX_TARGET 4096 /* max number of targets supported */
#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els
requests */
@@ -98,9 +105,11 @@ struct lpfc_dma_pool {
};
struct hbq_dmabuf {
+ struct lpfc_dmabuf hbuf;
struct lpfc_dmabuf dbuf;
uint32_t size;
uint32_t tag;
+ struct lpfc_rcqe rcqe;
};
/* Priority bit. Set value to exceed low water mark in lpfc_mem. */
@@ -134,7 +143,10 @@ typedef struct lpfc_vpd {
} rev;
struct {
#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t rsvd2 :24; /* Reserved */
+ uint32_t rsvd3 :19; /* Reserved */
+ uint32_t cdss : 1; /* Configure Data Security SLI */
+ uint32_t rsvd2 : 3; /* Reserved */
+ uint32_t cbg : 1; /* Configure BlockGuard */
uint32_t cmv : 1; /* Configure Max VPIs */
uint32_t ccrp : 1; /* Config Command Ring Polling */
uint32_t csah : 1; /* Configure Synchronous Abort Handling */
@@ -152,7 +164,10 @@ typedef struct lpfc_vpd {
uint32_t csah : 1; /* Configure Synchronous Abort Handling */
uint32_t ccrp : 1; /* Config Command Ring Polling */
uint32_t cmv : 1; /* Configure Max VPIs */
- uint32_t rsvd2 :24; /* Reserved */
+ uint32_t cbg : 1; /* Configure BlockGuard */
+ uint32_t rsvd2 : 3; /* Reserved */
+ uint32_t cdss : 1; /* Configure Data Security SLI */
+ uint32_t rsvd3 :19; /* Reserved */
#endif
} sli3Feat;
} lpfc_vpd_t;
@@ -264,8 +279,8 @@ enum hba_state {
};
struct lpfc_vport {
- struct list_head listentry;
struct lpfc_hba *phba;
+ struct list_head listentry;
uint8_t port_type;
#define LPFC_PHYSICAL_PORT 1
#define LPFC_NPIV_PORT 2
@@ -273,6 +288,9 @@ struct lpfc_vport {
enum discovery_state port_state;
uint16_t vpi;
+ uint16_t vfi;
+ uint8_t vfi_state;
+#define LPFC_VFI_REGISTERED 0x1
uint32_t fc_flag; /* FC flags */
/* Several of these flags are HBA centric and should be moved to
@@ -385,6 +403,9 @@ struct lpfc_vport {
#endif
uint8_t stat_data_enabled;
uint8_t stat_data_blocked;
+ struct list_head rcv_buffer_list;
+ uint32_t vport_flag;
+#define STATIC_VPORT 1
};
struct hbq_s {
@@ -420,8 +441,66 @@ enum intr_type_t {
};
struct lpfc_hba {
+ /* SCSI interface function jump table entries */
+ int (*lpfc_new_scsi_buf)
+ (struct lpfc_vport *, int);
+ struct lpfc_scsi_buf * (*lpfc_get_scsi_buf)
+ (struct lpfc_hba *);
+ int (*lpfc_scsi_prep_dma_buf)
+ (struct lpfc_hba *, struct lpfc_scsi_buf *);
+ void (*lpfc_scsi_unprep_dma_buf)
+ (struct lpfc_hba *, struct lpfc_scsi_buf *);
+ void (*lpfc_release_scsi_buf)
+ (struct lpfc_hba *, struct lpfc_scsi_buf *);
+ void (*lpfc_rampdown_queue_depth)
+ (struct lpfc_hba *);
+ void (*lpfc_scsi_prep_cmnd)
+ (struct lpfc_vport *, struct lpfc_scsi_buf *,
+ struct lpfc_nodelist *);
+ int (*lpfc_scsi_prep_task_mgmt_cmd)
+ (struct lpfc_vport *, struct lpfc_scsi_buf *,
+ unsigned int, uint8_t);
+
+ /* IOCB interface function jump table entries */
+ int (*__lpfc_sli_issue_iocb)
+ (struct lpfc_hba *, uint32_t,
+ struct lpfc_iocbq *, uint32_t);
+ void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *,
+ struct lpfc_iocbq *);
+ int (*lpfc_hba_down_post)(struct lpfc_hba *phba);
+
+
+ IOCB_t * (*lpfc_get_iocb_from_iocbq)
+ (struct lpfc_iocbq *);
+ void (*lpfc_scsi_cmd_iocb_cmpl)
+ (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *);
+
+ /* MBOX interface function jump table entries */
+ int (*lpfc_sli_issue_mbox)
+ (struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
+ /* Slow-path IOCB process function jump table entries */
+ void (*lpfc_sli_handle_slow_ring_event)
+ (struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ uint32_t mask);
+ /* INIT device interface function jump table entries */
+ int (*lpfc_sli_hbq_to_firmware)
+ (struct lpfc_hba *, uint32_t, struct hbq_dmabuf *);
+ int (*lpfc_sli_brdrestart)
+ (struct lpfc_hba *);
+ int (*lpfc_sli_brdready)
+ (struct lpfc_hba *, uint32_t);
+ void (*lpfc_handle_eratt)
+ (struct lpfc_hba *);
+ void (*lpfc_stop_port)
+ (struct lpfc_hba *);
+
+
+ /* SLI4 specific HBA data structure */
+ struct lpfc_sli4_hba sli4_hba;
+
struct lpfc_sli sli;
- uint32_t sli_rev; /* SLI2 or SLI3 */
+ uint8_t pci_dev_grp; /* lpfc PCI dev group: 0x0, 0x1, 0x2,... */
+ uint32_t sli_rev; /* SLI2, SLI3, or SLI4 */
uint32_t sli3_options; /* Mask of enabled SLI3 options */
#define LPFC_SLI3_HBQ_ENABLED 0x01
#define LPFC_SLI3_NPIV_ENABLED 0x02
@@ -429,6 +508,7 @@ struct lpfc_hba {
#define LPFC_SLI3_CRP_ENABLED 0x08
#define LPFC_SLI3_INB_ENABLED 0x10
#define LPFC_SLI3_BG_ENABLED 0x20
+#define LPFC_SLI3_DSS_ENABLED 0x40
uint32_t iocb_cmd_size;
uint32_t iocb_rsp_size;
@@ -442,8 +522,13 @@ struct lpfc_hba {
uint32_t hba_flag; /* hba generic flags */
#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
-
-#define DEFER_ERATT 0x4 /* Deferred error attention in progress */
+#define DEFER_ERATT 0x2 /* Deferred error attention in progress */
+#define HBA_FCOE_SUPPORT 0x4 /* HBA function supports FCOE */
+#define HBA_RECEIVE_BUFFER 0x8 /* Rcv buffer posted to worker thread */
+#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
+#define FCP_XRI_ABORT_EVENT 0x20
+#define ELS_XRI_ABORT_EVENT 0x40
+#define ASYNC_EVENT 0x80
struct lpfc_dmabuf slim2p;
MAILBOX_t *mbox;
@@ -502,6 +587,9 @@ struct lpfc_hba {
uint32_t cfg_poll;
uint32_t cfg_poll_tmo;
uint32_t cfg_use_msi;
+ uint32_t cfg_fcp_imax;
+ uint32_t cfg_fcp_wq_count;
+ uint32_t cfg_fcp_eq_count;
uint32_t cfg_sg_seg_cnt;
uint32_t cfg_prot_sg_seg_cnt;
uint32_t cfg_sg_dma_buf_size;
@@ -511,6 +599,8 @@ struct lpfc_hba {
uint32_t cfg_enable_hba_reset;
uint32_t cfg_enable_hba_heartbeat;
uint32_t cfg_enable_bg;
+ uint32_t cfg_enable_fip;
+ uint32_t cfg_log_verbose;
lpfc_vpd_t vpd; /* vital product data */
@@ -526,11 +616,12 @@ struct lpfc_hba {
unsigned long data_flags;
uint32_t hbq_in_use; /* HBQs in use flag */
- struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */
+ struct list_head rb_pend_list; /* Received buffers to be processed */
uint32_t hbq_count; /* Count of configured HBQs */
struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */
+ unsigned long pci_bar1_map; /* Physical address for PCI BAR1 */
unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */
void __iomem *slim_memmap_p; /* Kernel memory mapped address for
PCI BAR0 */
@@ -593,7 +684,8 @@ struct lpfc_hba {
/* pci_mem_pools */
struct pci_pool *lpfc_scsi_dma_buf_pool;
struct pci_pool *lpfc_mbuf_pool;
- struct pci_pool *lpfc_hbq_pool;
+ struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */
+ struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */
struct lpfc_dma_pool lpfc_mbuf_safety_pool;
mempool_t *mbox_mem_pool;
@@ -609,6 +701,14 @@ struct lpfc_hba {
struct lpfc_vport *pport; /* physical lpfc_vport pointer */
uint16_t max_vpi; /* Maximum virtual nports */
#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */
+ uint16_t max_vports; /*
+ * For IOV HBAs max_vpi can change
+ * after a reset. max_vports is max
+ * number of vports present. This can
+ * be greater than max_vpi.
+ */
+ uint16_t vpi_base;
+ uint16_t vfi_base;
unsigned long *vpi_bmask; /* vpi allocation table */
/* Data structure used by fabric iocb scheduler */
@@ -667,6 +767,11 @@ struct lpfc_hba {
/* Maximum number of events that can be outstanding at any time*/
#define LPFC_MAX_EVT_COUNT 512
atomic_t fast_event_count;
+ struct lpfc_fcf fcf;
+ uint8_t fc_map[3];
+ uint8_t valid_vlan;
+ uint16_t vlan_id;
+ struct list_head fcf_conn_rec_list;
};
static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index c14f0cbdb12..d73e677201f 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2008 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -30,8 +30,10 @@
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport_fc.h>
+#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
@@ -505,12 +507,14 @@ lpfc_issue_lip(struct Scsi_Host *shost)
return -ENOMEM;
memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
- pmboxq->mb.mbxCommand = MBX_DOWN_LINK;
- pmboxq->mb.mbxOwner = OWN_HOST;
+ pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
+ pmboxq->u.mb.mbxOwner = OWN_HOST;
mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
- if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) {
+ if ((mbxstatus == MBX_SUCCESS) &&
+ (pmboxq->u.mb.mbxStatus == 0 ||
+ pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) {
memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
lpfc_init_link(phba, pmboxq, phba->cfg_topology,
phba->cfg_link_speed);
@@ -789,7 +793,8 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
uint32_t *mrpi, uint32_t *arpi,
uint32_t *mvpi, uint32_t *avpi)
{
- struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_mbx_read_config *rd_config;
LPFC_MBOXQ_t *pmboxq;
MAILBOX_t *pmb;
int rc = 0;
@@ -800,7 +805,7 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
*/
if (phba->link_state < LPFC_LINK_DOWN ||
!phba->mbox_mem_pool ||
- (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0)
+ (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
return 0;
if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
@@ -811,13 +816,13 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
return 0;
memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
- pmb = &pmboxq->mb;
+ pmb = &pmboxq->u.mb;
pmb->mbxCommand = MBX_READ_CONFIG;
pmb->mbxOwner = OWN_HOST;
pmboxq->context1 = NULL;
if ((phba->pport->fc_flag & FC_OFFLINE_MODE) ||
- (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
+ (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
rc = MBX_NOT_FINISHED;
else
rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -828,18 +833,37 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
return 0;
}
- if (mrpi)
- *mrpi = pmb->un.varRdConfig.max_rpi;
- if (arpi)
- *arpi = pmb->un.varRdConfig.avail_rpi;
- if (mxri)
- *mxri = pmb->un.varRdConfig.max_xri;
- if (axri)
- *axri = pmb->un.varRdConfig.avail_xri;
- if (mvpi)
- *mvpi = pmb->un.varRdConfig.max_vpi;
- if (avpi)
- *avpi = pmb->un.varRdConfig.avail_vpi;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ rd_config = &pmboxq->u.mqe.un.rd_config;
+ if (mrpi)
+ *mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
+ if (arpi)
+ *arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) -
+ phba->sli4_hba.max_cfg_param.rpi_used;
+ if (mxri)
+ *mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
+ if (axri)
+ *axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) -
+ phba->sli4_hba.max_cfg_param.xri_used;
+ if (mvpi)
+ *mvpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
+ if (avpi)
+ *avpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) -
+ phba->sli4_hba.max_cfg_param.vpi_used;
+ } else {
+ if (mrpi)
+ *mrpi = pmb->un.varRdConfig.max_rpi;
+ if (arpi)
+ *arpi = pmb->un.varRdConfig.avail_rpi;
+ if (mxri)
+ *mxri = pmb->un.varRdConfig.max_xri;
+ if (axri)
+ *axri = pmb->un.varRdConfig.avail_xri;
+ if (mvpi)
+ *mvpi = pmb->un.varRdConfig.max_vpi;
+ if (avpi)
+ *avpi = pmb->un.varRdConfig.avail_vpi;
+ }
mempool_free(pmboxq, phba->mbox_mem_pool);
return 1;
@@ -2021,22 +2045,9 @@ static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
# lpfc_log_verbose: Only turn this flag on if you are willing to risk being
# deluged with LOTS of information.
# You can set a bit mask to record specific types of verbose messages:
-#
-# LOG_ELS 0x1 ELS events
-# LOG_DISCOVERY 0x2 Link discovery events
-# LOG_MBOX 0x4 Mailbox events
-# LOG_INIT 0x8 Initialization events
-# LOG_LINK_EVENT 0x10 Link events
-# LOG_FCP 0x40 FCP traffic history
-# LOG_NODE 0x80 Node table events
-# LOG_BG 0x200 BlockBuard events
-# LOG_MISC 0x400 Miscellaneous events
-# LOG_SLI 0x800 SLI events
-# LOG_FCP_ERROR 0x1000 Only log FCP errors
-# LOG_LIBDFC 0x2000 LIBDFC events
-# LOG_ALL_MSG 0xffff LOG all messages
+# See lpfc_logmsh.h for definitions.
*/
-LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffff,
+LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff,
"Verbose logging bit-mask");
/*
@@ -2266,6 +2277,36 @@ lpfc_param_init(topology, 0, 0, 6)
static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR,
lpfc_topology_show, lpfc_topology_store);
+/**
+ * lpfc_static_vport_show: Read callback function for
+ * lpfc_static_vport sysfs file.
+ * @dev: Pointer to class device object.
+ * @attr: device attribute structure.
+ * @buf: Data buffer.
+ *
+ * This function is the read call back function for
+ * lpfc_static_vport sysfs file. The lpfc_static_vport
+ * sysfs file report the mageability of the vport.
+ **/
+static ssize_t
+lpfc_static_vport_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ if (vport->vport_flag & STATIC_VPORT)
+ sprintf(buf, "1\n");
+ else
+ sprintf(buf, "0\n");
+
+ return strlen(buf);
+}
+
+/*
+ * Sysfs attribute to control the statistical data collection.
+ */
+static DEVICE_ATTR(lpfc_static_vport, S_IRUGO,
+ lpfc_static_vport_show, NULL);
/**
* lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file
@@ -2341,7 +2382,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
if (vports == NULL)
return -ENOMEM;
- for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
v_shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irq(v_shost->host_lock);
/* Block and reset data collection */
@@ -2356,7 +2397,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
phba->bucket_base = base;
phba->bucket_step = step;
- for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
v_shost = lpfc_shost_from_vport(vports[i]);
/* Unblock data collection */
@@ -2373,7 +2414,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
if (vports == NULL)
return -ENOMEM;
- for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
v_shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irq(shost->host_lock);
vports[i]->stat_data_blocked = 1;
@@ -2844,15 +2885,39 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
/*
# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
# support this feature
-# 0 = MSI disabled
+# 0 = MSI disabled (default)
# 1 = MSI enabled
-# 2 = MSI-X enabled (default)
-# Value range is [0,2]. Default value is 2.
+# 2 = MSI-X enabled
+# Value range is [0,2]. Default value is 0.
*/
-LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
+LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or "
"MSI-X (2), if possible");
/*
+# lpfc_fcp_imax: Set the maximum number of fast-path FCP interrupts per second
+#
+# Value range is [636,651042]. Default value is 10000.
+*/
+LPFC_ATTR_R(fcp_imax, LPFC_FP_DEF_IMAX, LPFC_MIM_IMAX, LPFC_DMULT_CONST,
+ "Set the maximum number of fast-path FCP interrupts per second");
+
+/*
+# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues
+#
+# Value range is [1,31]. Default value is 4.
+*/
+LPFC_ATTR_R(fcp_wq_count, LPFC_FP_WQN_DEF, LPFC_FP_WQN_MIN, LPFC_FP_WQN_MAX,
+ "Set the number of fast-path FCP work queues, if possible");
+
+/*
+# lpfc_fcp_eq_count: Set the number of fast-path FCP event queues
+#
+# Value range is [1,7]. Default value is 1.
+*/
+LPFC_ATTR_R(fcp_eq_count, LPFC_FP_EQN_DEF, LPFC_FP_EQN_MIN, LPFC_FP_EQN_MAX,
+ "Set the number of fast-path FCP event queues, if possible");
+
+/*
# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
# 0 = HBA resets disabled
# 1 = HBA resets enabled (default)
@@ -2876,6 +2941,14 @@ LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat.");
*/
LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
+/*
+# lpfc_enable_fip: When set, FIP is required to start discovery. If not
+# set, the driver will add an FCF record manually if the port has no
+# FCF records available and start discovery.
+# Value range is [0,1]. Default value is 1 (enabled)
+*/
+LPFC_ATTR_RW(enable_fip, 0, 0, 1, "Enable FIP Discovery");
+
/*
# lpfc_prot_mask: i
@@ -2942,6 +3015,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_peer_port_login,
&dev_attr_lpfc_nodev_tmo,
&dev_attr_lpfc_devloss_tmo,
+ &dev_attr_lpfc_enable_fip,
&dev_attr_lpfc_fcp_class,
&dev_attr_lpfc_use_adisc,
&dev_attr_lpfc_ack0,
@@ -2969,6 +3043,9 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_poll,
&dev_attr_lpfc_poll_tmo,
&dev_attr_lpfc_use_msi,
+ &dev_attr_lpfc_fcp_imax,
+ &dev_attr_lpfc_fcp_wq_count,
+ &dev_attr_lpfc_fcp_eq_count,
&dev_attr_lpfc_enable_bg,
&dev_attr_lpfc_soft_wwnn,
&dev_attr_lpfc_soft_wwpn,
@@ -2991,6 +3068,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
&dev_attr_lpfc_lun_queue_depth,
&dev_attr_lpfc_nodev_tmo,
&dev_attr_lpfc_devloss_tmo,
+ &dev_attr_lpfc_enable_fip,
&dev_attr_lpfc_hba_queue_depth,
&dev_attr_lpfc_peer_port_login,
&dev_attr_lpfc_restrict_login,
@@ -3003,6 +3081,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
&dev_attr_lpfc_enable_da_id,
&dev_attr_lpfc_max_scsicmpl_time,
&dev_attr_lpfc_stat_data_ctrl,
+ &dev_attr_lpfc_static_vport,
NULL,
};
@@ -3199,7 +3278,7 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
}
}
- memcpy((uint8_t *) & phba->sysfs_mbox.mbox->mb + off,
+ memcpy((uint8_t *) &phba->sysfs_mbox.mbox->u.mb + off,
buf, count);
phba->sysfs_mbox.offset = off + count;
@@ -3241,6 +3320,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
int rc;
+ MAILBOX_t *pmb;
if (off > MAILBOX_CMD_SIZE)
return -ERANGE;
@@ -3265,8 +3345,8 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
if (off == 0 &&
phba->sysfs_mbox.state == SMBOX_WRITING &&
phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) {
-
- switch (phba->sysfs_mbox.mbox->mb.mbxCommand) {
+ pmb = &phba->sysfs_mbox.mbox->u.mb;
+ switch (pmb->mbxCommand) {
/* Offline only */
case MBX_INIT_LINK:
case MBX_DOWN_LINK:
@@ -3283,7 +3363,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
printk(KERN_WARNING "mbox_read:Command 0x%x "
"is illegal in on-line state\n",
- phba->sysfs_mbox.mbox->mb.mbxCommand);
+ pmb->mbxCommand);
sysfs_mbox_idle(phba);
spin_unlock_irq(&phba->hbalock);
return -EPERM;
@@ -3319,13 +3399,13 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
case MBX_CONFIG_PORT:
case MBX_RUN_BIU_DIAG:
printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n",
- phba->sysfs_mbox.mbox->mb.mbxCommand);
+ pmb->mbxCommand);
sysfs_mbox_idle(phba);
spin_unlock_irq(&phba->hbalock);
return -EPERM;
default:
printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n",
- phba->sysfs_mbox.mbox->mb.mbxCommand);
+ pmb->mbxCommand);
sysfs_mbox_idle(phba);
spin_unlock_irq(&phba->hbalock);
return -EPERM;
@@ -3335,14 +3415,14 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
* or RESTART mailbox commands until the HBA is restarted.
*/
if (phba->pport->stopped &&
- phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_DUMP_MEMORY &&
- phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_RESTART &&
- phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_VPARMS &&
- phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_WWN)
+ pmb->mbxCommand != MBX_DUMP_MEMORY &&
+ pmb->mbxCommand != MBX_RESTART &&
+ pmb->mbxCommand != MBX_WRITE_VPARMS &&
+ pmb->mbxCommand != MBX_WRITE_WWN)
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
"1259 mbox: Issued mailbox cmd "
"0x%x while in stopped state.\n",
- phba->sysfs_mbox.mbox->mb.mbxCommand);
+ pmb->mbxCommand);
phba->sysfs_mbox.mbox->vport = vport;
@@ -3356,7 +3436,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
}
if ((vport->fc_flag & FC_OFFLINE_MODE) ||
- (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE))){
+ (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
spin_unlock_irq(&phba->hbalock);
rc = lpfc_sli_issue_mbox (phba,
@@ -3368,8 +3448,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
spin_unlock_irq(&phba->hbalock);
rc = lpfc_sli_issue_mbox_wait (phba,
phba->sysfs_mbox.mbox,
- lpfc_mbox_tmo_val(phba,
- phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ);
+ lpfc_mbox_tmo_val(phba, pmb->mbxCommand) * HZ);
spin_lock_irq(&phba->hbalock);
}
@@ -3391,7 +3470,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
return -EAGAIN;
}
- memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count);
+ memcpy(buf, (uint8_t *) &pmb + off, count);
phba->sysfs_mbox.offset = off + count;
@@ -3585,6 +3664,9 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
case LA_8GHZ_LINK:
fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
break;
+ case LA_10GHZ_LINK:
+ fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
+ break;
default:
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
break;
@@ -3652,7 +3734,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
*/
if (phba->link_state < LPFC_LINK_DOWN ||
!phba->mbox_mem_pool ||
- (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0)
+ (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
return NULL;
if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
@@ -3663,14 +3745,14 @@ lpfc_get_stats(struct Scsi_Host *shost)
return NULL;
memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
- pmb = &pmboxq->mb;
+ pmb = &pmboxq->u.mb;
pmb->mbxCommand = MBX_READ_STATUS;
pmb->mbxOwner = OWN_HOST;
pmboxq->context1 = NULL;
pmboxq->vport = vport;
if ((vport->fc_flag & FC_OFFLINE_MODE) ||
- (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
+ (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
else
rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3695,7 +3777,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
pmboxq->vport = vport;
if ((vport->fc_flag & FC_OFFLINE_MODE) ||
- (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
+ (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
else
rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3769,7 +3851,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
return;
memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
- pmb = &pmboxq->mb;
+ pmb = &pmboxq->u.mb;
pmb->mbxCommand = MBX_READ_STATUS;
pmb->mbxOwner = OWN_HOST;
pmb->un.varWords[0] = 0x1; /* reset request */
@@ -3777,7 +3859,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
pmboxq->vport = vport;
if ((vport->fc_flag & FC_OFFLINE_MODE) ||
- (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
+ (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
else
rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3795,7 +3877,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
pmboxq->vport = vport;
if ((vport->fc_flag & FC_OFFLINE_MODE) ||
- (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
+ (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
else
rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3962,6 +4044,21 @@ lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
}
+/**
+ * lpfc_hba_log_verbose_init - Set hba's log verbose level
+ * @phba: Pointer to lpfc_hba struct.
+ *
+ * This function is called by the lpfc_get_cfgparam() routine to set the
+ * module lpfc_log_verbose into the @phba cfg_log_verbose for use with
+ * log messsage according to the module's lpfc_log_verbose parameter setting
+ * before hba port or vport created.
+ **/
+static void
+lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose)
+{
+ phba->cfg_log_verbose = verbose;
+}
+
struct fc_function_template lpfc_transport_functions = {
/* fixed attributes the driver supports */
.show_host_node_name = 1,
@@ -4105,6 +4202,9 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
lpfc_use_msi_init(phba, lpfc_use_msi);
+ lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
+ lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
+ lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
lpfc_enable_bg_init(phba, lpfc_enable_bg);
@@ -4113,26 +4213,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
phba->cfg_soft_wwpn = 0L;
lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt);
- /*
- * Since the sg_tablesize is module parameter, the sg_dma_buf_size
- * used to create the sg_dma_buf_pool must be dynamically calculated.
- * 2 segments are added since the IOCB needs a command and response bde.
- */
- phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
- sizeof(struct fcp_rsp) +
- ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
-
- if (phba->cfg_enable_bg) {
- phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
- phba->cfg_sg_dma_buf_size +=
- phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
- }
-
- /* Also reinitialize the host templates with new values. */
- lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
- lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
-
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
+ lpfc_enable_fip_init(phba, lpfc_enable_fip);
+ lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
+
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index f88ce3f2619..d2a922997c0 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -23,6 +23,8 @@ typedef int (*node_filter)(struct lpfc_nodelist *, void *);
struct fc_rport;
void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
+int lpfc_dump_fcoe_param(struct lpfc_hba *, struct lpfcMboxq *);
void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
@@ -35,17 +37,19 @@ int lpfc_config_msi(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int);
void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
-int lpfc_reg_login(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
- LPFC_MBOXQ_t *, uint32_t);
+int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
+ LPFC_MBOXQ_t *, uint32_t);
void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
-void lpfc_reg_vpi(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
+void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *);
void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
+void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
void lpfc_cleanup_rpis(struct lpfc_vport *, int);
int lpfc_linkdown(struct lpfc_hba *);
+void lpfc_linkdown_port(struct lpfc_vport *);
void lpfc_port_link_failure(struct lpfc_vport *);
void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -54,6 +58,7 @@ void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *);
void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *,
@@ -105,6 +110,7 @@ int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *);
int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t);
+int lpfc_issue_fabric_reglogin(struct lpfc_vport *);
int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
@@ -149,15 +155,19 @@ int lpfc_online(struct lpfc_hba *);
void lpfc_unblock_mgmt_io(struct lpfc_hba *);
void lpfc_offline_prep(struct lpfc_hba *);
void lpfc_offline(struct lpfc_hba *);
+void lpfc_reset_hba(struct lpfc_hba *);
int lpfc_sli_setup(struct lpfc_hba *);
int lpfc_sli_queue_setup(struct lpfc_hba *);
void lpfc_handle_eratt(struct lpfc_hba *);
void lpfc_handle_latt(struct lpfc_hba *);
-irqreturn_t lpfc_intr_handler(int, void *);
-irqreturn_t lpfc_sp_intr_handler(int, void *);
-irqreturn_t lpfc_fp_intr_handler(int, void *);
+irqreturn_t lpfc_sli_intr_handler(int, void *);
+irqreturn_t lpfc_sli_sp_intr_handler(int, void *);
+irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
+irqreturn_t lpfc_sli4_intr_handler(int, void *);
+irqreturn_t lpfc_sli4_sp_intr_handler(int, void *);
+irqreturn_t lpfc_sli4_fp_intr_handler(int, void *);
void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
@@ -165,16 +175,32 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
+void __lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_mbox_cmd_check(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_mbox_dev_check(struct lpfc_hba *);
int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
+void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *);
+void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t);
+void lpfc_init_vpi(struct lpfcMboxq *, uint16_t);
+void lpfc_unreg_vfi(struct lpfcMboxq *, uint16_t);
+void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *);
+void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t);
+void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *);
void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *,
uint32_t , LPFC_MBOXQ_t *);
struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *);
void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *);
+struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *);
+void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
+void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
+ uint16_t);
+void lpfc_unregister_unused_fcf(struct lpfc_hba *);
-int lpfc_mem_alloc(struct lpfc_hba *);
+int lpfc_mem_alloc(struct lpfc_hba *, int align);
void lpfc_mem_free(struct lpfc_hba *);
+void lpfc_mem_free_all(struct lpfc_hba *);
void lpfc_stop_vport_timers(struct lpfc_vport *);
void lpfc_poll_timeout(unsigned long ptr);
@@ -186,6 +212,7 @@ void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *);
uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t,
uint32_t);
+void lpfc_sli_wake_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_reset_barrier(struct lpfc_hba * phba);
int lpfc_sli_brdready(struct lpfc_hba *, uint32_t);
@@ -198,12 +225,13 @@ int lpfc_sli_host_down(struct lpfc_vport *);
int lpfc_sli_hba_down(struct lpfc_hba *);
int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
int lpfc_sli_handle_mb_event(struct lpfc_hba *);
-int lpfc_sli_flush_mbox_queue(struct lpfc_hba *);
+void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *);
int lpfc_sli_check_eratt(struct lpfc_hba *);
-int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
+void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
struct lpfc_sli_ring *, uint32_t);
+int lpfc_sli4_handle_received_buffer(struct lpfc_hba *);
void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
-int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *,
+int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
struct lpfc_iocbq *, uint32_t);
void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
@@ -237,7 +265,7 @@ struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *,
int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
-int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, struct lpfc_sli_ring *,
+int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, uint32_t,
struct lpfc_iocbq *, struct lpfc_iocbq *,
uint32_t);
void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *,
@@ -254,6 +282,12 @@ void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *);
const char* lpfc_info(struct Scsi_Host *);
int lpfc_scan_finished(struct Scsi_Host *, unsigned long);
+int lpfc_init_api_table_setup(struct lpfc_hba *, uint8_t);
+int lpfc_sli_api_table_setup(struct lpfc_hba *, uint8_t);
+int lpfc_scsi_api_table_setup(struct lpfc_hba *, uint8_t);
+int lpfc_mbox_api_table_setup(struct lpfc_hba *, uint8_t);
+int lpfc_api_table_setup(struct lpfc_hba *, uint8_t);
+
void lpfc_get_cfgparam(struct lpfc_hba *);
void lpfc_get_vport_cfgparam(struct lpfc_vport *);
int lpfc_alloc_sysfs_attr(struct lpfc_vport *);
@@ -314,8 +348,15 @@ lpfc_send_els_failure_event(struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *);
void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *);
+void lpfc_create_static_vport(struct lpfc_hba *);
+void lpfc_stop_hba_timers(struct lpfc_hba *);
+void lpfc_stop_port(struct lpfc_hba *);
+void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t);
+int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
+void lpfc_start_fdiscs(struct lpfc_hba *phba);
#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
#define HBA_EVENT_RSCN 5
#define HBA_EVENT_LINK_UP 2
#define HBA_EVENT_LINK_DOWN 3
+
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 896c7b0351e..1dbccfd3d02 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2008 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -32,8 +32,10 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
+#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
@@ -267,8 +269,6 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
uint32_t tmo, uint8_t retry)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_sli *psli = &phba->sli;
- struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
IOCB_t *icmd;
struct lpfc_iocbq *geniocb;
int rc;
@@ -331,7 +331,7 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
geniocb->vport = vport;
geniocb->retry = retry;
- rc = lpfc_sli_issue_iocb(phba, pring, geniocb, 0);
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0);
if (rc == IOCB_ERROR) {
lpfc_sli_release_iocbq(phba, geniocb);
@@ -1578,6 +1578,9 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
case LA_8GHZ_LINK:
ae->un.PortSpeed = HBA_PORTSPEED_8GBIT;
break;
+ case LA_10GHZ_LINK:
+ ae->un.PortSpeed = HBA_PORTSPEED_10GBIT;
+ break;
default:
ae->un.PortSpeed =
HBA_PORTSPEED_UNKNOWN;
@@ -1730,7 +1733,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
uint8_t *fwname;
if (vp->rev.rBit) {
- if (psli->sli_flag & LPFC_SLI2_ACTIVE)
+ if (psli->sli_flag & LPFC_SLI_ACTIVE)
rev = vp->rev.sli2FwRev;
else
rev = vp->rev.sli1FwRev;
@@ -1756,7 +1759,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
}
b4 = (rev & 0x0000000f);
- if (psli->sli_flag & LPFC_SLI2_ACTIVE)
+ if (psli->sli_flag & LPFC_SLI_ACTIVE)
fwname = vp->rev.sli2FwName;
else
fwname = vp->rev.sli1FwName;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 52be5644e07..8d0f0de76b6 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2007-2008 Emulex. All rights reserved. *
+ * Copyright (C) 2007-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -33,8 +33,10 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
+#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
@@ -51,8 +53,7 @@
* debugfs interface
*
* To access this interface the user should:
- * # mkdir /debug
- * # mount -t debugfs none /debug
+ * # mount -t debugfs none /sys/kernel/debug
*
* The lpfc debugfs directory hierarchy is:
* lpfc/lpfcX/vportY
@@ -280,6 +281,8 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
struct lpfc_dmabuf *d_buf;
struct hbq_dmabuf *hbq_buf;
+ if (phba->sli_rev != 3)
+ return 0;
cnt = LPFC_HBQINFO_SIZE;
spin_lock_irq(&phba->hbalock);
@@ -489,12 +492,15 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
pring->next_cmdidx, pring->local_getidx,
pring->flag, pgpp->rspPutInx, pring->numRiocb);
}
- word0 = readl(phba->HAregaddr);
- word1 = readl(phba->CAregaddr);
- word2 = readl(phba->HSregaddr);
- word3 = readl(phba->HCregaddr);
- len += snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x HC:%08x\n",
- word0, word1, word2, word3);
+
+ if (phba->sli_rev <= LPFC_SLI_REV3) {
+ word0 = readl(phba->HAregaddr);
+ word1 = readl(phba->CAregaddr);
+ word2 = readl(phba->HSregaddr);
+ word3 = readl(phba->HCregaddr);
+ len += snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x "
+ "HC:%08x\n", word0, word1, word2, word3);
+ }
spin_unlock_irq(&phba->hbalock);
return len;
}
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index ffd10897207..1142070e948 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -135,6 +135,7 @@ struct lpfc_nodelist {
#define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */
#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */
#define NLP_SC_REQ 0x20000000 /* Target requires authentication */
+#define NLP_RPI_VALID 0x80000000 /* nlp_rpi is valid */
/* ndlp usage management macros */
#define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index b8b34cf5c3d..6bdeb14878a 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2008 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,8 +28,10 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
+#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
@@ -84,7 +86,8 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
uint32_t ha_copy;
if (vport->port_state >= LPFC_VPORT_READY ||
- phba->link_state == LPFC_LINK_DOWN)
+ phba->link_state == LPFC_LINK_DOWN ||
+ phba->sli_rev > LPFC_SLI_REV3)
return 0;
/* Read the HBA Host Attention Register */
@@ -219,7 +222,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
icmd->un.elsreq64.myID = vport->fc_myDID;
/* For ELS_REQUEST64_CR, use the VPI by default */
- icmd->ulpContext = vport->vpi;
+ icmd->ulpContext = vport->vpi + phba->vpi_base;
icmd->ulpCt_h = 0;
/* The CT field must be 0=INVALID_RPI for the ECHO cmd */
if (elscmd == ELS_CMD_ECHO)
@@ -305,7 +308,7 @@ els_iocb_free_pcmb_exit:
* 0 - successfully issued fabric registration login for @vport
* -ENXIO -- failed to issue fabric registration login for @vport
**/
-static int
+int
lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
@@ -345,8 +348,7 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
err = 4;
goto fail;
}
- rc = lpfc_reg_login(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
- 0);
+ rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 0);
if (rc) {
err = 5;
goto fail_free_mbox;
@@ -386,6 +388,75 @@ fail:
}
/**
+ * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
+ * the @vport. This mailbox command is necessary for FCoE only.
+ *
+ * Return code
+ * 0 - successfully issued REG_VFI for @vport
+ * A failure code otherwise.
+ **/
+static int
+lpfc_issue_reg_vfi(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ LPFC_MBOXQ_t *mboxq;
+ struct lpfc_nodelist *ndlp;
+ struct serv_parm *sp;
+ struct lpfc_dmabuf *dmabuf;
+ int rc = 0;
+
+ sp = &phba->fc_fabparam;
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ rc = -ENODEV;
+ goto fail;
+ }
+
+ dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!dmabuf) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
+ if (!dmabuf->virt) {
+ rc = -ENOMEM;
+ goto fail_free_dmabuf;
+ }
+ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ rc = -ENOMEM;
+ goto fail_free_coherent;
+ }
+ vport->port_state = LPFC_FABRIC_CFG_LINK;
+ memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
+ lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
+ mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
+ mboxq->vport = vport;
+ mboxq->context1 = dmabuf;
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ rc = -ENXIO;
+ goto fail_free_mbox;
+ }
+ return 0;
+
+fail_free_mbox:
+ mempool_free(mboxq, phba->mbox_mem_pool);
+fail_free_coherent:
+ lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
+fail_free_dmabuf:
+ kfree(dmabuf);
+fail:
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0289 Issue Register VFI failed: Err %d\n", rc);
+ return rc;
+}
+
+/**
* lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
* @vport: pointer to a host virtual N_Port data structure.
* @ndlp: pointer to a node-list data structure.
@@ -497,17 +568,24 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
}
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
-
- if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
- vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) {
- lpfc_register_new_vport(phba, vport, ndlp);
- return 0;
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
+ vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
+ lpfc_register_new_vport(phba, vport, ndlp);
+ else
+ lpfc_issue_fabric_reglogin(vport);
+ } else {
+ ndlp->nlp_type |= NLP_FABRIC;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+ if (vport->vfi_state & LPFC_VFI_REGISTERED) {
+ lpfc_start_fdiscs(phba);
+ lpfc_do_scr_ns_plogi(phba, vport);
+ } else
+ lpfc_issue_reg_vfi(vport);
}
- lpfc_issue_fabric_reglogin(vport);
return 0;
}
-
/**
* lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
* @vport: pointer to a host virtual N_Port data structure.
@@ -815,9 +893,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (sp->cmn.fcphHigh < FC_PH3)
sp->cmn.fcphHigh = FC_PH3;
- if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
+ elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
+ /* FLOGI needs to be 3 for WQE FCFI */
+ /* Set the fcfi to the fcfi we registered with */
+ elsiocb->iocb.ulpContext = phba->fcf.fcfi;
+ } else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
sp->cmn.request_multiple_Nport = 1;
-
/* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
icmd->ulpCt_h = 1;
icmd->ulpCt_l = 0;
@@ -930,6 +1013,8 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
if (!ndlp)
return 0;
lpfc_nlp_init(vport, ndlp, Fabric_DID);
+ /* Set the node type */
+ ndlp->nlp_type |= NLP_FABRIC;
/* Put ndlp onto node list */
lpfc_enqueue_node(vport, ndlp);
} else if (!NLP_CHK_NODE_ACT(ndlp)) {
@@ -1350,14 +1435,12 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
IOCB_t *icmd;
struct lpfc_nodelist *ndlp;
struct lpfc_iocbq *elsiocb;
- struct lpfc_sli_ring *pring;
struct lpfc_sli *psli;
uint8_t *pcmd;
uint16_t cmdsize;
int ret;
psli = &phba->sli;
- pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
ndlp = lpfc_findnode_did(vport, did);
if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
@@ -1391,7 +1474,7 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
phba->fc_stat.elsXmitPLOGI++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
- ret = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+ ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (ret == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
@@ -1501,14 +1584,9 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
PRLI *npr;
IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
- struct lpfc_sli_ring *pring;
- struct lpfc_sli *psli;
uint8_t *pcmd;
uint16_t cmdsize;
- psli = &phba->sli;
- pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
-
cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_PRLI);
@@ -1550,7 +1628,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_PRLI_SND;
spin_unlock_irq(shost->host_lock);
- if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+ IOCB_ERROR) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_PRLI_SND;
spin_unlock_irq(shost->host_lock);
@@ -1608,7 +1687,8 @@ lpfc_adisc_done(struct lpfc_vport *vport)
* and continue discovery.
*/
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
- !(vport->fc_flag & FC_RSCN_MODE)) {
+ !(vport->fc_flag & FC_RSCN_MODE) &&
+ (phba->sli_rev < LPFC_SLI_REV4)) {
lpfc_issue_reg_vpi(phba, vport);
return;
}
@@ -1788,8 +1868,6 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ADISC *ap;
IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
- struct lpfc_sli *psli = &phba->sli;
- struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
uint8_t *pcmd;
uint16_t cmdsize;
@@ -1822,7 +1900,8 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_ADISC_SND;
spin_unlock_irq(shost->host_lock);
- if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+ IOCB_ERROR) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_ADISC_SND;
spin_unlock_irq(shost->host_lock);
@@ -1937,15 +2016,10 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
- struct lpfc_sli_ring *pring;
- struct lpfc_sli *psli;
uint8_t *pcmd;
uint16_t cmdsize;
int rc;
- psli = &phba->sli;
- pring = &psli->ring[LPFC_ELS_RING];
-
spin_lock_irq(shost->host_lock);
if (ndlp->nlp_flag & NLP_LOGO_SND) {
spin_unlock_irq(shost->host_lock);
@@ -1978,7 +2052,7 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_LOGO_SND;
spin_unlock_irq(shost->host_lock);
- rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
spin_lock_irq(shost->host_lock);
@@ -2058,14 +2132,12 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
- struct lpfc_sli_ring *pring;
struct lpfc_sli *psli;
uint8_t *pcmd;
uint16_t cmdsize;
struct lpfc_nodelist *ndlp;
psli = &phba->sli;
- pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
cmdsize = (sizeof(uint32_t) + sizeof(SCR));
ndlp = lpfc_findnode_did(vport, nportid);
@@ -2108,7 +2180,8 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
phba->fc_stat.elsXmitSCR++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
- if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+ IOCB_ERROR) {
/* The additional lpfc_nlp_put will cause the following
* lpfc_els_free_iocb routine to trigger the rlease of
* the node.
@@ -2152,7 +2225,6 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
- struct lpfc_sli_ring *pring;
struct lpfc_sli *psli;
FARP *fp;
uint8_t *pcmd;
@@ -2162,7 +2234,6 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
struct lpfc_nodelist *ndlp;
psli = &phba->sli;
- pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
cmdsize = (sizeof(uint32_t) + sizeof(FARP));
ndlp = lpfc_findnode_did(vport, nportid);
@@ -2219,7 +2290,8 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
phba->fc_stat.elsXmitFARPR++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
- if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+ IOCB_ERROR) {
/* The additional lpfc_nlp_put will cause the following
* lpfc_els_free_iocb routine to trigger the release of
* the node.
@@ -2949,6 +3021,14 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+ /*
+ * This routine is used to register and unregister in previous SLI
+ * modes.
+ */
+ if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
+ (phba->sli_rev == LPFC_SLI_REV4))
+ lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
+
pmb->context1 = NULL;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
@@ -2961,6 +3041,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
*/
lpfc_nlp_not_used(ndlp);
}
+
return;
}
@@ -3170,7 +3251,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
IOCB_t *icmd;
IOCB_t *oldcmd;
struct lpfc_iocbq *elsiocb;
- struct lpfc_sli_ring *pring;
struct lpfc_sli *psli;
uint8_t *pcmd;
uint16_t cmdsize;
@@ -3178,7 +3258,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
ELS_PKT *els_pkt_ptr;
psli = &phba->sli;
- pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
oldcmd = &oldiocb->iocb;
switch (flag) {
@@ -3266,7 +3345,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
}
phba->fc_stat.elsXmitACC++;
- rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
@@ -3305,15 +3384,12 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
IOCB_t *icmd;
IOCB_t *oldcmd;
struct lpfc_iocbq *elsiocb;
- struct lpfc_sli_ring *pring;
struct lpfc_sli *psli;
uint8_t *pcmd;
uint16_t cmdsize;
int rc;
psli = &phba->sli;
- pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
-
cmdsize = 2 * sizeof(uint32_t);
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
ndlp->nlp_DID, ELS_CMD_LS_RJT);
@@ -3346,7 +3422,7 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
phba->fc_stat.elsXmitLSRJT++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
- rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
@@ -3379,8 +3455,6 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
struct lpfc_nodelist *ndlp)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_sli *psli = &phba->sli;
- struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
ADISC *ap;
IOCB_t *icmd, *oldcmd;
struct lpfc_iocbq *elsiocb;
@@ -3422,7 +3496,7 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
phba->fc_stat.elsXmitACC++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
- rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
@@ -3459,14 +3533,12 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
IOCB_t *icmd;
IOCB_t *oldcmd;
struct lpfc_iocbq *elsiocb;
- struct lpfc_sli_ring *pring;
struct lpfc_sli *psli;
uint8_t *pcmd;
uint16_t cmdsize;
int rc;
psli = &phba->sli;
- pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
cmdsize = sizeof(uint32_t) + sizeof(PRLI);
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
@@ -3520,7 +3592,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
phba->fc_stat.elsXmitACC++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
- rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
@@ -3562,15 +3634,12 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
RNID *rn;
IOCB_t *icmd, *oldcmd;
struct lpfc_iocbq *elsiocb;
- struct lpfc_sli_ring *pring;
struct lpfc_sli *psli;
uint8_t *pcmd;
uint16_t cmdsize;
int rc;
psli = &phba->sli;
- pring = &psli->ring[LPFC_ELS_RING];
-
cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
+ (2 * sizeof(struct lpfc_name));
if (format)
@@ -3626,7 +3695,7 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
* it could be freed */
- rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
@@ -3839,7 +3908,9 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
payload_len -= sizeof(uint32_t);
switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
case RSCN_ADDRESS_FORMAT_PORT:
- if (ns_did.un.word == rscn_did.un.word)
+ if ((ns_did.un.b.domain == rscn_did.un.b.domain)
+ && (ns_did.un.b.area == rscn_did.un.b.area)
+ && (ns_did.un.b.id == rscn_did.un.b.id))
goto return_did_out;
break;
case RSCN_ADDRESS_FORMAT_AREA:
@@ -4300,7 +4371,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
lpfc_init_link(phba, mbox,
phba->cfg_topology,
phba->cfg_link_speed);
- mbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
+ mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
mbox->vport = vport;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
@@ -4440,8 +4511,6 @@ lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
static void
lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
- struct lpfc_sli *psli = &phba->sli;
- struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
MAILBOX_t *mb;
IOCB_t *icmd;
RPS_RSP *rps_rsp;
@@ -4451,7 +4520,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
uint16_t xri, status;
uint32_t cmdsize;
- mb = &pmb->mb;
+ mb = &pmb->u.mb;
ndlp = (struct lpfc_nodelist *) pmb->context2;
xri = (uint16_t) ((unsigned long)(pmb->context1));
@@ -4507,7 +4576,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
ndlp->nlp_rpi);
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
- if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR)
+ if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
lpfc_els_free_iocb(phba, elsiocb);
return;
}
@@ -4616,8 +4685,6 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
IOCB_t *icmd, *oldcmd;
RPL_RSP rpl_rsp;
struct lpfc_iocbq *elsiocb;
- struct lpfc_sli *psli = &phba->sli;
- struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
uint8_t *pcmd;
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
@@ -4654,7 +4721,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
ndlp->nlp_rpi);
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
- if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+ IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
@@ -4883,7 +4951,10 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
} else {
/* FAN verified - skip FLOGI */
vport->fc_myDID = vport->fc_prevDID;
- lpfc_issue_fabric_reglogin(vport);
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ lpfc_issue_fabric_reglogin(vport);
+ else
+ lpfc_issue_reg_vfi(vport);
}
}
return 0;
@@ -5566,11 +5637,10 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
dropit:
if (vport && !(vport->load_flag & FC_UNLOADING))
- lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
- "(%d):0111 Dropping received ELS cmd "
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0111 Dropping received ELS cmd "
"Data: x%x x%x x%x\n",
- vport->vpi, icmd->ulpStatus,
- icmd->un.ulpWord[4], icmd->ulpTimeout);
+ icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
phba->fc_stat.elsRcvDrop++;
}
@@ -5646,10 +5716,9 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
if (icmd->unsli3.rcvsli3.vpi == 0xffff)
vport = phba->pport;
- else {
- uint16_t vpi = icmd->unsli3.rcvsli3.vpi;
- vport = lpfc_find_vport_by_vpid(phba, vpi);
- }
+ else
+ vport = lpfc_find_vport_by_vpid(phba,
+ icmd->unsli3.rcvsli3.vpi - phba->vpi_base);
}
/* If there are no BDEs associated
* with this IOCB, there is nothing to do.
@@ -5781,7 +5850,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
struct lpfc_vport *vport = pmb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
- MAILBOX_t *mb = &pmb->mb;
+ MAILBOX_t *mb = &pmb->u.mb;
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
@@ -5818,7 +5887,10 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
} else {
if (vport == phba->pport)
- lpfc_issue_fabric_reglogin(vport);
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ lpfc_issue_fabric_reglogin(vport);
+ else
+ lpfc_issue_reg_vfi(vport);
else
lpfc_do_scr_ns_plogi(phba, vport);
}
@@ -5850,7 +5922,7 @@ lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
- lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, mbox);
+ lpfc_reg_vpi(vport, mbox);
mbox->vport = vport;
mbox->context2 = lpfc_nlp_get(ndlp);
mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
@@ -6139,7 +6211,6 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
- struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
@@ -6169,7 +6240,8 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_LOGO_SND;
spin_unlock_irq(shost->host_lock);
- if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+ IOCB_ERROR) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_LOGO_SND;
spin_unlock_irq(shost->host_lock);
@@ -6224,7 +6296,6 @@ lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
struct lpfc_iocbq *iocb;
unsigned long iflags;
int ret;
- struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
IOCB_t *cmd;
repeat:
@@ -6248,7 +6319,7 @@ repeat:
"Fabric sched1: ste:x%x",
iocb->vport->port_state, 0, 0);
- ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0);
+ ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
if (ret == IOCB_ERROR) {
iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
@@ -6394,7 +6465,6 @@ static int
lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
{
unsigned long iflags;
- struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
int ready;
int ret;
@@ -6418,7 +6488,7 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
"Fabric sched2: ste:x%x",
iocb->vport->port_state, 0, 0);
- ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0);
+ ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
if (ret == IOCB_ERROR) {
iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
@@ -6524,3 +6594,38 @@ void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
IOERR_SLI_ABORTED);
}
+
+/**
+ * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
+ * @phba: pointer to lpfc hba data structure.
+ * @axri: pointer to the els xri abort wcqe structure.
+ *
+ * This routine is invoked by the worker thread to process a SLI4 slow-path
+ * ELS aborted xri.
+ **/
+void
+lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
+ struct sli4_wcqe_xri_aborted *axri)
+{
+ uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
+ struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
+ unsigned long iflag = 0;
+
+ spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, iflag);
+ list_for_each_entry_safe(sglq_entry, sglq_next,
+ &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
+ if (sglq_entry->sli4_xritag == xri) {
+ list_del(&sglq_entry->list);
+ spin_unlock_irqrestore(
+ &phba->sli4_hba.abts_sgl_list_lock,
+ iflag);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+
+ list_add_tail(&sglq_entry->list,
+ &phba->sli4_hba.lpfc_sgl_list);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&phba->sli4_hba.abts_sgl_list_lock, iflag);
+}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index e764ce0bf70..35c41ae75be 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2008 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -29,10 +29,12 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
+#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
#include "lpfc_logmsg.h"
@@ -273,6 +275,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
!(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
(ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
+
+ lpfc_unregister_unused_fcf(phba);
}
/**
@@ -295,10 +299,11 @@ lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
ret = kzalloc(sizeof(struct lpfc_fast_path_event),
GFP_ATOMIC);
- if (ret)
+ if (ret) {
atomic_inc(&phba->fast_event_count);
- INIT_LIST_HEAD(&ret->work_evt.evt_listp);
- ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
+ INIT_LIST_HEAD(&ret->work_evt.evt_listp);
+ ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
+ }
return ret;
}
@@ -491,6 +496,10 @@ lpfc_work_done(struct lpfc_hba *phba)
phba->work_ha = 0;
spin_unlock_irq(&phba->hbalock);
+ /* First, try to post the next mailbox command to SLI4 device */
+ if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
+ lpfc_sli4_post_async_mbox(phba);
+
if (ha_copy & HA_ERATT)
/* Handle the error attention event */
lpfc_handle_eratt(phba);
@@ -501,9 +510,27 @@ lpfc_work_done(struct lpfc_hba *phba)
if (ha_copy & HA_LATT)
lpfc_handle_latt(phba);
+ /* Process SLI4 events */
+ if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
+ if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
+ lpfc_sli4_fcp_xri_abort_event_proc(phba);
+ if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
+ lpfc_sli4_els_xri_abort_event_proc(phba);
+ if (phba->hba_flag & ASYNC_EVENT)
+ lpfc_sli4_async_event_proc(phba);
+ if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) {
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER;
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
+ }
+ if (phba->hba_flag & HBA_RECEIVE_BUFFER)
+ lpfc_sli4_handle_received_buffer(phba);
+ }
+
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
- for(i = 0; i <= phba->max_vpi; i++) {
+ for (i = 0; i <= phba->max_vports; i++) {
/*
* We could have no vports in array if unloading, so if
* this happens then just use the pport
@@ -555,23 +582,24 @@ lpfc_work_done(struct lpfc_hba *phba)
/*
* Turn on Ring interrupts
*/
- spin_lock_irq(&phba->hbalock);
- control = readl(phba->HCregaddr);
- if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
- lpfc_debugfs_slow_ring_trc(phba,
- "WRK Enable ring: cntl:x%x hacopy:x%x",
- control, ha_copy, 0);
-
- control |= (HC_R0INT_ENA << LPFC_ELS_RING);
- writel(control, phba->HCregaddr);
- readl(phba->HCregaddr); /* flush */
- }
- else {
- lpfc_debugfs_slow_ring_trc(phba,
- "WRK Ring ok: cntl:x%x hacopy:x%x",
- control, ha_copy, 0);
+ if (phba->sli_rev <= LPFC_SLI_REV3) {
+ spin_lock_irq(&phba->hbalock);
+ control = readl(phba->HCregaddr);
+ if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
+ lpfc_debugfs_slow_ring_trc(phba,
+ "WRK Enable ring: cntl:x%x hacopy:x%x",
+ control, ha_copy, 0);
+
+ control |= (HC_R0INT_ENA << LPFC_ELS_RING);
+ writel(control, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ } else {
+ lpfc_debugfs_slow_ring_trc(phba,
+ "WRK Ring ok: cntl:x%x hacopy:x%x",
+ control, ha_copy, 0);
+ }
+ spin_unlock_irq(&phba->hbalock);
}
- spin_unlock_irq(&phba->hbalock);
}
lpfc_work_list_done(phba);
}
@@ -689,7 +717,7 @@ lpfc_port_link_failure(struct lpfc_vport *vport)
lpfc_can_disctmo(vport);
}
-static void
+void
lpfc_linkdown_port(struct lpfc_vport *vport)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
@@ -716,6 +744,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
if (phba->link_state == LPFC_LINK_DOWN)
return 0;
spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_DISCOVERED);
if (phba->link_state > LPFC_LINK_DOWN) {
phba->link_state = LPFC_LINK_DOWN;
phba->pport->fc_flag &= ~FC_LBIT;
@@ -723,7 +752,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
- for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
/* Issue a LINK DOWN event to all nodes */
lpfc_linkdown_port(vports[i]);
}
@@ -833,10 +862,11 @@ lpfc_linkup(struct lpfc_hba *phba)
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
- for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++)
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
lpfc_linkup_port(vports[i]);
lpfc_destroy_vport_work_array(phba, vports);
- if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+ (phba->sli_rev < LPFC_SLI_REV4))
lpfc_issue_clear_la(phba, phba->pport);
return 0;
@@ -854,7 +884,7 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
struct lpfc_vport *vport = pmb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_sli *psli = &phba->sli;
- MAILBOX_t *mb = &pmb->mb;
+ MAILBOX_t *mb = &pmb->u.mb;
uint32_t control;
/* Since we don't do discovery right now, turn these off here */
@@ -917,7 +947,7 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
- if (pmb->mb.mbxStatus)
+ if (pmb->u.mb.mbxStatus)
goto out;
mempool_free(pmb, phba->mbox_mem_pool);
@@ -945,7 +975,7 @@ out:
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
"0306 CONFIG_LINK mbxStatus error x%x "
"HBA state x%x\n",
- pmb->mb.mbxStatus, vport->port_state);
+ pmb->u.mb.mbxStatus, vport->port_state);
mempool_free(pmb, phba->mbox_mem_pool);
lpfc_linkdown(phba);
@@ -959,9 +989,592 @@ out:
}
static void
+lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ struct lpfc_vport *vport = mboxq->vport;
+ unsigned long flags;
+
+ if (mboxq->u.mb.mbxStatus) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+ "2017 REG_FCFI mbxStatus error x%x "
+ "HBA state x%x\n",
+ mboxq->u.mb.mbxStatus, vport->port_state);
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return;
+ }
+
+ /* Start FCoE discovery by sending a FLOGI. */
+ phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi);
+ /* Set the FCFI registered flag */
+ spin_lock_irqsave(&phba->hbalock, flags);
+ phba->fcf.fcf_flag |= FCF_REGISTERED;
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ if (vport->port_state != LPFC_FLOGI) {
+ spin_lock_irqsave(&phba->hbalock, flags);
+ phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ lpfc_initial_flogi(vport);
+ }
+
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return;
+}
+
+/**
+ * lpfc_fab_name_match - Check if the fcf fabric name match.
+ * @fab_name: pointer to fabric name.
+ * @new_fcf_record: pointer to fcf record.
+ *
+ * This routine compare the fcf record's fabric name with provided
+ * fabric name. If the fabric name are identical this function
+ * returns 1 else return 0.
+ **/
+static uint32_t
+lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
+{
+ if ((fab_name[0] ==
+ bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record)) &&
+ (fab_name[1] ==
+ bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record)) &&
+ (fab_name[2] ==
+ bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record)) &&
+ (fab_name[3] ==
+ bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record)) &&
+ (fab_name[4] ==
+ bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record)) &&
+ (fab_name[5] ==
+ bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record)) &&
+ (fab_name[6] ==
+ bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record)) &&
+ (fab_name[7] ==
+ bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record)))
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * lpfc_mac_addr_match - Check if the fcf mac address match.
+ * @phba: pointer to lpfc hba data structure.
+ * @new_fcf_record: pointer to fcf record.
+ *
+ * This routine compare the fcf record's mac address with HBA's
+ * FCF mac address. If the mac addresses are identical this function
+ * returns 1 else return 0.
+ **/
+static uint32_t
+lpfc_mac_addr_match(struct lpfc_hba *phba, struct fcf_record *new_fcf_record)
+{
+ if ((phba->fcf.mac_addr[0] ==
+ bf_get(lpfc_fcf_record_mac_0, new_fcf_record)) &&
+ (phba->fcf.mac_addr[1] ==
+ bf_get(lpfc_fcf_record_mac_1, new_fcf_record)) &&
+ (phba->fcf.mac_addr[2] ==
+ bf_get(lpfc_fcf_record_mac_2, new_fcf_record)) &&
+ (phba->fcf.mac_addr[3] ==
+ bf_get(lpfc_fcf_record_mac_3, new_fcf_record)) &&
+ (phba->fcf.mac_addr[4] ==
+ bf_get(lpfc_fcf_record_mac_4, new_fcf_record)) &&
+ (phba->fcf.mac_addr[5] ==
+ bf_get(lpfc_fcf_record_mac_5, new_fcf_record)))
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
+ * @phba: pointer to lpfc hba data structure.
+ * @new_fcf_record: pointer to fcf record.
+ *
+ * This routine copies the FCF information from the FCF
+ * record to lpfc_hba data structure.
+ **/
+static void
+lpfc_copy_fcf_record(struct lpfc_hba *phba, struct fcf_record *new_fcf_record)
+{
+ phba->fcf.fabric_name[0] =
+ bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
+ phba->fcf.fabric_name[1] =
+ bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
+ phba->fcf.fabric_name[2] =
+ bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
+ phba->fcf.fabric_name[3] =
+ bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
+ phba->fcf.fabric_name[4] =
+ bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record);
+ phba->fcf.fabric_name[5] =
+ bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record);
+ phba->fcf.fabric_name[6] =
+ bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record);
+ phba->fcf.fabric_name[7] =
+ bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record);
+ phba->fcf.mac_addr[0] =
+ bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
+ phba->fcf.mac_addr[1] =
+ bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
+ phba->fcf.mac_addr[2] =
+ bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
+ phba->fcf.mac_addr[3] =
+ bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
+ phba->fcf.mac_addr[4] =
+ bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
+ phba->fcf.mac_addr[5] =
+ bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
+ phba->fcf.fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
+ phba->fcf.priority = new_fcf_record->fip_priority;
+}
+
+/**
+ * lpfc_register_fcf - Register the FCF with hba.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine issues a register fcfi mailbox command to register
+ * the fcf with HBA.
+ **/
+static void
+lpfc_register_fcf(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *fcf_mbxq;
+ int rc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+
+ /* If the FCF is not availabe do nothing. */
+ if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return;
+ }
+
+ /* The FCF is already registered, start discovery */
+ if (phba->fcf.fcf_flag & FCF_REGISTERED) {
+ phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ if (phba->pport->port_state != LPFC_FLOGI)
+ lpfc_initial_flogi(phba->pport);
+ return;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
+ fcf_mbxq = mempool_alloc(phba->mbox_mem_pool,
+ GFP_KERNEL);
+ if (!fcf_mbxq)
+ return;
+
+ lpfc_reg_fcfi(phba, fcf_mbxq);
+ fcf_mbxq->vport = phba->pport;
+ fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
+ rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED)
+ mempool_free(fcf_mbxq, phba->mbox_mem_pool);
+
+ return;
+}
+
+/**
+ * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
+ * @phba: pointer to lpfc hba data structure.
+ * @new_fcf_record: pointer to fcf record.
+ * @boot_flag: Indicates if this record used by boot bios.
+ * @addr_mode: The address mode to be used by this FCF
+ *
+ * This routine compare the fcf record with connect list obtained from the
+ * config region to decide if this FCF can be used for SAN discovery. It returns
+ * 1 if this record can be used for SAN discovery else return zero. If this FCF
+ * record can be used for SAN discovery, the boot_flag will indicate if this FCF
+ * is used by boot bios and addr_mode will indicate the addressing mode to be
+ * used for this FCF when the function returns.
+ * If the FCF record need to be used with a particular vlan id, the vlan is
+ * set in the vlan_id on return of the function. If not VLAN tagging need to
+ * be used with the FCF vlan_id will be set to 0xFFFF;
+ **/
+static int
+lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
+ struct fcf_record *new_fcf_record,
+ uint32_t *boot_flag, uint32_t *addr_mode,
+ uint16_t *vlan_id)
+{
+ struct lpfc_fcf_conn_entry *conn_entry;
+
+ if (!phba->cfg_enable_fip) {
+ *boot_flag = 0;
+ *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
+ new_fcf_record);
+ if (phba->valid_vlan)
+ *vlan_id = phba->vlan_id;
+ else
+ *vlan_id = 0xFFFF;
+ return 1;
+ }
+
+ /*
+ * If there are no FCF connection table entry, driver connect to all
+ * FCFs.
+ */
+ if (list_empty(&phba->fcf_conn_rec_list)) {
+ *boot_flag = 0;
+ *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
+ new_fcf_record);
+ *vlan_id = 0xFFFF;
+ return 1;
+ }
+
+ list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list, list) {
+ if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID))
+ continue;
+
+ if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) &&
+ !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name,
+ new_fcf_record))
+ continue;
+
+ if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) {
+ /*
+ * If the vlan bit map does not have the bit set for the
+ * vlan id to be used, then it is not a match.
+ */
+ if (!(new_fcf_record->vlan_bitmap
+ [conn_entry->conn_rec.vlan_tag / 8] &
+ (1 << (conn_entry->conn_rec.vlan_tag % 8))))
+ continue;
+ }
+
+ /*
+ * Check if the connection record specifies a required
+ * addressing mode.
+ */
+ if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
+ !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) {
+
+ /*
+ * If SPMA required but FCF not support this continue.
+ */
+ if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
+ !(bf_get(lpfc_fcf_record_mac_addr_prov,
+ new_fcf_record) & LPFC_FCF_SPMA))
+ continue;
+
+ /*
+ * If FPMA required but FCF not support this continue.
+ */
+ if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
+ !(bf_get(lpfc_fcf_record_mac_addr_prov,
+ new_fcf_record) & LPFC_FCF_FPMA))
+ continue;
+ }
+
+ /*
+ * This fcf record matches filtering criteria.
+ */
+ if (conn_entry->conn_rec.flags & FCFCNCT_BOOT)
+ *boot_flag = 1;
+ else
+ *boot_flag = 0;
+
+ *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
+ new_fcf_record);
+ /*
+ * If the user specified a required address mode, assign that
+ * address mode
+ */
+ if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
+ (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)))
+ *addr_mode = (conn_entry->conn_rec.flags &
+ FCFCNCT_AM_SPMA) ?
+ LPFC_FCF_SPMA : LPFC_FCF_FPMA;
+ /*
+ * If the user specified a prefered address mode, use the
+ * addr mode only if FCF support the addr_mode.
+ */
+ else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
+ (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
+ (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
+ (*addr_mode & LPFC_FCF_SPMA))
+ *addr_mode = LPFC_FCF_SPMA;
+ else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
+ (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
+ !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
+ (*addr_mode & LPFC_FCF_FPMA))
+ *addr_mode = LPFC_FCF_FPMA;
+ /*
+ * If user did not specify any addressing mode, use FPMA if
+ * possible else use SPMA.
+ */
+ else if (*addr_mode & LPFC_FCF_FPMA)
+ *addr_mode = LPFC_FCF_FPMA;
+
+ if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
+ *vlan_id = conn_entry->conn_rec.vlan_tag;
+ else
+ *vlan_id = 0xFFFF;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox.
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox object.
+ *
+ * This function iterate through all the fcf records available in
+ * HBA and choose the optimal FCF record for discovery. After finding
+ * the FCF for discovery it register the FCF record and kick start
+ * discovery.
+ * If FCF_IN_USE flag is set in currently used FCF, the routine try to
+ * use a FCF record which match fabric name and mac address of the
+ * currently used FCF record.
+ * If the driver support only one FCF, it will try to use the FCF record
+ * used by BOOT_BIOS.
+ */
+void
+lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ void *virt_addr;
+ dma_addr_t phys_addr;
+ uint8_t *bytep;
+ struct lpfc_mbx_sge sge;
+ struct lpfc_mbx_read_fcf_tbl *read_fcf;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+ struct fcf_record *new_fcf_record;
+ int rc;
+ uint32_t boot_flag, addr_mode;
+ uint32_t next_fcf_index;
+ unsigned long flags;
+ uint16_t vlan_id;
+
+ /* Get the first SGE entry from the non-embedded DMA memory. This
+ * routine only uses a single SGE.
+ */
+ lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
+ phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
+ if (unlikely(!mboxq->sge_array)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "2524 Failed to get the non-embedded SGE "
+ "virtual address\n");
+ goto out;
+ }
+ virt_addr = mboxq->sge_array->addr[0];
+
+ shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+ &shdr->response);
+ /*
+ * The FCF Record was read and there is no reason for the driver
+ * to maintain the FCF record data or memory. Instead, just need
+ * to book keeping the FCFIs can be used.
+ */
+ if (shdr_status || shdr_add_status) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2521 READ_FCF_RECORD mailbox failed "
+ "with status x%x add_status x%x, mbx\n",
+ shdr_status, shdr_add_status);
+ goto out;
+ }
+ /* Interpreting the returned information of FCF records */
+ read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
+ lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
+ sizeof(struct lpfc_mbx_read_fcf_tbl));
+ next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
+
+ new_fcf_record = (struct fcf_record *)(virt_addr +
+ sizeof(struct lpfc_mbx_read_fcf_tbl));
+ lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
+ sizeof(struct fcf_record));
+ bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
+
+ rc = lpfc_match_fcf_conn_list(phba, new_fcf_record,
+ &boot_flag, &addr_mode,
+ &vlan_id);
+ /*
+ * If the fcf record does not match with connect list entries
+ * read the next entry.
+ */
+ if (!rc)
+ goto read_next_fcf;
+ /*
+ * If this is not the first FCF discovery of the HBA, use last
+ * FCF record for the discovery.
+ */
+ spin_lock_irqsave(&phba->hbalock, flags);
+ if (phba->fcf.fcf_flag & FCF_IN_USE) {
+ if (lpfc_fab_name_match(phba->fcf.fabric_name,
+ new_fcf_record) &&
+ lpfc_mac_addr_match(phba, new_fcf_record)) {
+ phba->fcf.fcf_flag |= FCF_AVAILABLE;
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ goto out;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ goto read_next_fcf;
+ }
+ if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
+ /*
+ * If the current FCF record does not have boot flag
+ * set and new fcf record has boot flag set, use the
+ * new fcf record.
+ */
+ if (boot_flag && !(phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) {
+ /* Use this FCF record */
+ lpfc_copy_fcf_record(phba, new_fcf_record);
+ phba->fcf.addr_mode = addr_mode;
+ phba->fcf.fcf_flag |= FCF_BOOT_ENABLE;
+ if (vlan_id != 0xFFFF) {
+ phba->fcf.fcf_flag |= FCF_VALID_VLAN;
+ phba->fcf.vlan_id = vlan_id;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ goto read_next_fcf;
+ }
+ /*
+ * If the current FCF record has boot flag set and the
+ * new FCF record does not have boot flag, read the next
+ * FCF record.
+ */
+ if (!boot_flag && (phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ goto read_next_fcf;
+ }
+ /*
+ * If there is a record with lower priority value for
+ * the current FCF, use that record.
+ */
+ if (lpfc_fab_name_match(phba->fcf.fabric_name, new_fcf_record)
+ && (new_fcf_record->fip_priority <
+ phba->fcf.priority)) {
+ /* Use this FCF record */
+ lpfc_copy_fcf_record(phba, new_fcf_record);
+ phba->fcf.addr_mode = addr_mode;
+ if (vlan_id != 0xFFFF) {
+ phba->fcf.fcf_flag |= FCF_VALID_VLAN;
+ phba->fcf.vlan_id = vlan_id;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ goto read_next_fcf;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ goto read_next_fcf;
+ }
+ /*
+ * This is the first available FCF record, use this
+ * record.
+ */
+ lpfc_copy_fcf_record(phba, new_fcf_record);
+ phba->fcf.addr_mode = addr_mode;
+ if (boot_flag)
+ phba->fcf.fcf_flag |= FCF_BOOT_ENABLE;
+ phba->fcf.fcf_flag |= FCF_AVAILABLE;
+ if (vlan_id != 0xFFFF) {
+ phba->fcf.fcf_flag |= FCF_VALID_VLAN;
+ phba->fcf.vlan_id = vlan_id;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ goto read_next_fcf;
+
+read_next_fcf:
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0)
+ lpfc_register_fcf(phba);
+ else
+ lpfc_sli4_read_fcf_record(phba, next_fcf_index);
+ return;
+
+out:
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ lpfc_register_fcf(phba);
+
+ return;
+}
+
+/**
+ * lpfc_start_fdiscs - send fdiscs for each vports on this port.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This function loops through the list of vports on the @phba and issues an
+ * FDISC if possible.
+ */
+void
+lpfc_start_fdiscs(struct lpfc_hba *phba)
+{
+ struct lpfc_vport **vports;
+ int i;
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL) {
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
+ continue;
+ /* There are no vpi for this vport */
+ if (vports[i]->vpi > phba->max_vpi) {
+ lpfc_vport_set_state(vports[i],
+ FC_VPORT_FAILED);
+ continue;
+ }
+ if (phba->fc_topology == TOPOLOGY_LOOP) {
+ lpfc_vport_set_state(vports[i],
+ FC_VPORT_LINKDOWN);
+ continue;
+ }
+ if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
+ lpfc_initial_fdisc(vports[i]);
+ else {
+ lpfc_vport_set_state(vports[i],
+ FC_VPORT_NO_FABRIC_SUPP);
+ lpfc_printf_vlog(vports[i], KERN_ERR,
+ LOG_ELS,
+ "0259 No NPIV "
+ "Fabric support\n");
+ }
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+}
+
+void
+lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ struct lpfc_dmabuf *dmabuf = mboxq->context1;
+ struct lpfc_vport *vport = mboxq->vport;
+
+ if (mboxq->u.mb.mbxStatus) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+ "2018 REG_VFI mbxStatus error x%x "
+ "HBA state x%x\n",
+ mboxq->u.mb.mbxStatus, vport->port_state);
+ if (phba->fc_topology == TOPOLOGY_LOOP) {
+ /* FLOGI failed, use loop map to make discovery list */
+ lpfc_disc_list_loopmap(vport);
+ /* Start discovery */
+ lpfc_disc_start(vport);
+ goto fail_free_mem;
+ }
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ goto fail_free_mem;
+ }
+ /* Mark the vport has registered with its VFI */
+ vport->vfi_state |= LPFC_VFI_REGISTERED;
+
+ if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
+ lpfc_start_fdiscs(phba);
+ lpfc_do_scr_ns_plogi(phba, vport);
+ }
+
+fail_free_mem:
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
+ kfree(dmabuf);
+ return;
+}
+
+static void
lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
- MAILBOX_t *mb = &pmb->mb;
+ MAILBOX_t *mb = &pmb->u.mb;
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
struct lpfc_vport *vport = pmb->vport;
@@ -1012,13 +1625,13 @@ static void
lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
{
struct lpfc_vport *vport = phba->pport;
- LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
+ LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
int i;
struct lpfc_dmabuf *mp;
int rc;
+ struct fcf_record *fcf_record;
sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
spin_lock_irq(&phba->hbalock);
switch (la->UlnkSpeed) {
@@ -1034,6 +1647,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
case LA_8GHZ_LINK:
phba->fc_linkspeed = LA_8GHZ_LINK;
break;
+ case LA_10GHZ_LINK:
+ phba->fc_linkspeed = LA_10GHZ_LINK;
+ break;
default:
phba->fc_linkspeed = LA_UNKNW_LINK;
break;
@@ -1115,22 +1731,66 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mempool_free(sparam_mbox, phba->mbox_mem_pool);
- if (cfglink_mbox)
- mempool_free(cfglink_mbox, phba->mbox_mem_pool);
goto out;
}
}
- if (cfglink_mbox) {
+ if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) {
+ cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!cfglink_mbox)
+ goto out;
vport->port_state = LPFC_LOCAL_CFG_LINK;
lpfc_config_link(phba, cfglink_mbox);
cfglink_mbox->vport = vport;
cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
- if (rc != MBX_NOT_FINISHED)
- return;
- mempool_free(cfglink_mbox, phba->mbox_mem_pool);
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(cfglink_mbox, phba->mbox_mem_pool);
+ goto out;
+ }
+ } else {
+ /*
+ * Add the driver's default FCF record at FCF index 0 now. This
+ * is phase 1 implementation that support FCF index 0 and driver
+ * defaults.
+ */
+ if (phba->cfg_enable_fip == 0) {
+ fcf_record = kzalloc(sizeof(struct fcf_record),
+ GFP_KERNEL);
+ if (unlikely(!fcf_record)) {
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_MBOX | LOG_SLI,
+ "2554 Could not allocate memmory for "
+ "fcf record\n");
+ rc = -ENODEV;
+ goto out;
+ }
+
+ lpfc_sli4_build_dflt_fcf_record(phba, fcf_record,
+ LPFC_FCOE_FCF_DEF_INDEX);
+ rc = lpfc_sli4_add_fcf_record(phba, fcf_record);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_MBOX | LOG_SLI,
+ "2013 Could not manually add FCF "
+ "record 0, status %d\n", rc);
+ rc = -ENODEV;
+ kfree(fcf_record);
+ goto out;
+ }
+ kfree(fcf_record);
+ }
+ /*
+ * The driver is expected to do FIP/FCF. Call the port
+ * and get the FCF Table.
+ */
+ rc = lpfc_sli4_read_fcf_record(phba,
+ LPFC_FCOE_FCF_GET_FIRST);
+ if (rc)
+ goto out;
}
+
+ return;
out:
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
@@ -1147,10 +1807,12 @@ lpfc_enable_la(struct lpfc_hba *phba)
struct lpfc_sli *psli = &phba->sli;
spin_lock_irq(&phba->hbalock);
psli->sli_flag |= LPFC_PROCESS_LA;
- control = readl(phba->HCregaddr);
- control |= HC_LAINT_ENA;
- writel(control, phba->HCregaddr);
- readl(phba->HCregaddr); /* flush */
+ if (phba->sli_rev <= LPFC_SLI_REV3) {
+ control = readl(phba->HCregaddr);
+ control |= HC_LAINT_ENA;
+ writel(control, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ }
spin_unlock_irq(&phba->hbalock);
}
@@ -1159,6 +1821,7 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
{
lpfc_linkdown(phba);
lpfc_enable_la(phba);
+ lpfc_unregister_unused_fcf(phba);
/* turn on Link Attention interrupts - no CLEAR_LA needed */
}
@@ -1175,7 +1838,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
struct lpfc_vport *vport = pmb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
READ_LA_VAR *la;
- MAILBOX_t *mb = &pmb->mb;
+ MAILBOX_t *mb = &pmb->u.mb;
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
/* Unblock ELS traffic */
@@ -1190,7 +1853,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
goto lpfc_mbx_cmpl_read_la_free_mbuf;
}
- la = (READ_LA_VAR *) & pmb->mb.un.varReadLA;
+ la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
memcpy(&phba->alpa_map[0], mp->virt, 128);
@@ -1328,7 +1991,7 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
static void
lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
- MAILBOX_t *mb = &pmb->mb;
+ MAILBOX_t *mb = &pmb->u.mb;
struct lpfc_vport *vport = pmb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
@@ -1381,7 +2044,7 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- MAILBOX_t *mb = &pmb->mb;
+ MAILBOX_t *mb = &pmb->u.mb;
switch (mb->mbxStatus) {
case 0x0011:
@@ -1416,6 +2079,128 @@ out:
return;
}
+/**
+ * lpfc_create_static_vport - Read HBA config region to create static vports.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine issue a DUMP mailbox command for config region 22 to get
+ * the list of static vports to be created. The function create vports
+ * based on the information returned from the HBA.
+ **/
+void
+lpfc_create_static_vport(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *pmb = NULL;
+ MAILBOX_t *mb;
+ struct static_vport_info *vport_info;
+ int rc, i;
+ struct fc_vport_identifiers vport_id;
+ struct fc_vport *new_fc_vport;
+ struct Scsi_Host *shost;
+ struct lpfc_vport *vport;
+ uint16_t offset = 0;
+ uint8_t *vport_buff;
+
+ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmb) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0542 lpfc_create_static_vport failed to"
+ " allocate mailbox memory\n");
+ return;
+ }
+
+ mb = &pmb->u.mb;
+
+ vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
+ if (!vport_info) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0543 lpfc_create_static_vport failed to"
+ " allocate vport_info\n");
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return;
+ }
+
+ vport_buff = (uint8_t *) vport_info;
+ do {
+ lpfc_dump_static_vport(phba, pmb, offset);
+ pmb->vport = phba->pport;
+ rc = lpfc_sli_issue_mbox_wait(phba, pmb, LPFC_MBOX_TMO);
+
+ if ((rc != MBX_SUCCESS) || mb->mbxStatus) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0544 lpfc_create_static_vport failed to"
+ " issue dump mailbox command ret 0x%x "
+ "status 0x%x\n",
+ rc, mb->mbxStatus);
+ goto out;
+ }
+
+ if (mb->un.varDmp.word_cnt >
+ sizeof(struct static_vport_info) - offset)
+ mb->un.varDmp.word_cnt =
+ sizeof(struct static_vport_info) - offset;
+
+ lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
+ vport_buff + offset,
+ mb->un.varDmp.word_cnt);
+ offset += mb->un.varDmp.word_cnt;
+
+ } while (mb->un.varDmp.word_cnt &&
+ offset < sizeof(struct static_vport_info));
+
+
+ if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) ||
+ ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK)
+ != VPORT_INFO_REV)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0545 lpfc_create_static_vport bad"
+ " information header 0x%x 0x%x\n",
+ le32_to_cpu(vport_info->signature),
+ le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK);
+
+ goto out;
+ }
+
+ shost = lpfc_shost_from_vport(phba->pport);
+
+ for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) {
+ memset(&vport_id, 0, sizeof(vport_id));
+ vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn);
+ vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn);
+ if (!vport_id.port_name || !vport_id.node_name)
+ continue;
+
+ vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
+ vport_id.vport_type = FC_PORTTYPE_NPIV;
+ vport_id.disable = false;
+ new_fc_vport = fc_vport_create(shost, 0, &vport_id);
+
+ if (!new_fc_vport) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0546 lpfc_create_static_vport failed to"
+ " create vport \n");
+ continue;
+ }
+
+ vport = *(struct lpfc_vport **)new_fc_vport->dd_data;
+ vport->vport_flag |= STATIC_VPORT;
+ }
+
+out:
+ /*
+ * If this is timed out command, setting NULL to context2 tell SLI
+ * layer not to use this buffer.
+ */
+ spin_lock_irq(&phba->hbalock);
+ pmb->context2 = NULL;
+ spin_unlock_irq(&phba->hbalock);
+ kfree(vport_info);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(pmb, phba->mbox_mem_pool);
+
+ return;
+}
+
/*
* This routine handles processing a Fabric REG_LOGIN mailbox
* command upon completion. It is setup in the LPFC_MBOXQ
@@ -1426,16 +2211,17 @@ void
lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
- MAILBOX_t *mb = &pmb->mb;
+ MAILBOX_t *mb = &pmb->u.mb;
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
struct lpfc_nodelist *ndlp;
- struct lpfc_vport **vports;
- int i;
ndlp = (struct lpfc_nodelist *) pmb->context2;
pmb->context1 = NULL;
pmb->context2 = NULL;
if (mb->mbxStatus) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+ "0258 Register Fabric login error: 0x%x\n",
+ mb->mbxStatus);
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mempool_free(pmb, phba->mbox_mem_pool);
@@ -1454,9 +2240,6 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
- "0258 Register Fabric login error: 0x%x\n",
- mb->mbxStatus);
/* Decrement the reference count to ndlp after the reference
* to the ndlp are done.
*/
@@ -1465,34 +2248,12 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
ndlp->nlp_rpi = mb->un.varWords[0];
+ ndlp->nlp_flag |= NLP_RPI_VALID;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
- vports = lpfc_create_vport_work_array(phba);
- if (vports != NULL)
- for(i = 0;
- i <= phba->max_vpi && vports[i] != NULL;
- i++) {
- if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
- continue;
- if (phba->fc_topology == TOPOLOGY_LOOP) {
- lpfc_vport_set_state(vports[i],
- FC_VPORT_LINKDOWN);
- continue;
- }
- if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
- lpfc_initial_fdisc(vports[i]);
- else {
- lpfc_vport_set_state(vports[i],
- FC_VPORT_NO_FABRIC_SUPP);
- lpfc_printf_vlog(vport, KERN_ERR,
- LOG_ELS,
- "0259 No NPIV "
- "Fabric support\n");
- }
- }
- lpfc_destroy_vport_work_array(phba, vports);
+ lpfc_start_fdiscs(phba);
lpfc_do_scr_ns_plogi(phba, vport);
}
@@ -1516,13 +2277,16 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
void
lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
- MAILBOX_t *mb = &pmb->mb;
+ MAILBOX_t *mb = &pmb->u.mb;
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
struct lpfc_vport *vport = pmb->vport;
if (mb->mbxStatus) {
out:
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0260 Register NameServer error: 0x%x\n",
+ mb->mbxStatus);
/* decrement the node reference count held for this
* callback function.
*/
@@ -1546,15 +2310,13 @@ out:
return;
}
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "0260 Register NameServer error: 0x%x\n",
- mb->mbxStatus);
return;
}
pmb->context1 = NULL;
ndlp->nlp_rpi = mb->un.varWords[0];
+ ndlp->nlp_flag |= NLP_RPI_VALID;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -2055,7 +2817,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
if (pring->ringno == LPFC_ELS_RING) {
switch (icmd->ulpCommand) {
case CMD_GEN_REQUEST64_CR:
- if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi)
+ if (iocb->context_un.ndlp == ndlp)
return 1;
case CMD_ELS_REQUEST64_CR:
if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
@@ -2102,7 +2864,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
*/
psli = &phba->sli;
rpi = ndlp->nlp_rpi;
- if (rpi) {
+ if (ndlp->nlp_flag & NLP_RPI_VALID) {
/* Now process each ring */
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
@@ -2150,7 +2912,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
LPFC_MBOXQ_t *mbox;
int rc;
- if (ndlp->nlp_rpi) {
+ if (ndlp->nlp_flag & NLP_RPI_VALID) {
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
@@ -2162,6 +2924,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
}
lpfc_no_rpi(phba, ndlp);
ndlp->nlp_rpi = 0;
+ ndlp->nlp_flag &= ~NLP_RPI_VALID;
return 1;
}
return 0;
@@ -2252,7 +3015,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
if ((mb = phba->sli.mbox_active)) {
- if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
+ if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
(ndlp == (struct lpfc_nodelist *) mb->context2)) {
mb->context2 = NULL;
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -2261,7 +3024,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
- if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
+ if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
(ndlp == (struct lpfc_nodelist *) mb->context2)) {
mp = (struct lpfc_dmabuf *) (mb->context1);
if (mp) {
@@ -2309,13 +3072,14 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
int rc;
lpfc_cancel_retry_delay_tmo(vport, ndlp);
- if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) {
+ if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
+ !(ndlp->nlp_flag & NLP_RPI_VALID)) {
/* For this case we need to cleanup the default rpi
* allocated by the firmware.
*/
if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
!= NULL) {
- rc = lpfc_reg_login(phba, vport->vpi, ndlp->nlp_DID,
+ rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
(uint8_t *) &vport->fc_sparam, mbox, 0);
if (rc) {
mempool_free(mbox, phba->mbox_mem_pool);
@@ -2553,7 +3317,8 @@ lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
* clear_la then don't send it.
*/
if ((phba->link_state >= LPFC_CLEAR_LA) ||
- (vport->port_type != LPFC_PHYSICAL_PORT))
+ (vport->port_type != LPFC_PHYSICAL_PORT) ||
+ (phba->sli_rev == LPFC_SLI_REV4))
return;
/* Link up discovery */
@@ -2582,7 +3347,7 @@ lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (regvpimbox) {
- lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox);
+ lpfc_reg_vpi(vport, regvpimbox);
regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
regvpimbox->vport = vport;
if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
@@ -2642,7 +3407,8 @@ lpfc_disc_start(struct lpfc_vport *vport)
*/
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
!(vport->fc_flag & FC_PT2PT) &&
- !(vport->fc_flag & FC_RSCN_MODE)) {
+ !(vport->fc_flag & FC_RSCN_MODE) &&
+ (phba->sli_rev < LPFC_SLI_REV4)) {
lpfc_issue_reg_vpi(phba, vport);
return;
}
@@ -2919,11 +3685,13 @@ restart_disc:
* set port_state to PORT_READY if SLI2.
* cmpl_reg_vpi will set port_state to READY for SLI3.
*/
- if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
- lpfc_issue_reg_vpi(phba, vport);
- else { /* NPIV Not enabled */
- lpfc_issue_clear_la(phba, vport);
- vport->port_state = LPFC_VPORT_READY;
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
+ lpfc_issue_reg_vpi(phba, vport);
+ else { /* NPIV Not enabled */
+ lpfc_issue_clear_la(phba, vport);
+ vport->port_state = LPFC_VPORT_READY;
+ }
}
/* Setup and issue mailbox INITIALIZE LINK command */
@@ -2939,7 +3707,7 @@ restart_disc:
lpfc_linkdown(phba);
lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
phba->cfg_link_speed);
- initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
+ initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
initlinkmbox->vport = vport;
initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
@@ -2959,11 +3727,13 @@ restart_disc:
* set port_state to PORT_READY if SLI2.
* cmpl_reg_vpi will set port_state to READY for SLI3.
*/
- if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
- lpfc_issue_reg_vpi(phba, vport);
- else { /* NPIV Not enabled */
- lpfc_issue_clear_la(phba, vport);
- vport->port_state = LPFC_VPORT_READY;
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
+ lpfc_issue_reg_vpi(phba, vport);
+ else { /* NPIV Not enabled */
+ lpfc_issue_clear_la(phba, vport);
+ vport->port_state = LPFC_VPORT_READY;
+ }
}
break;
@@ -3036,7 +3806,7 @@ restart_disc:
void
lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
- MAILBOX_t *mb = &pmb->mb;
+ MAILBOX_t *mb = &pmb->u.mb;
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
struct lpfc_vport *vport = pmb->vport;
@@ -3044,6 +3814,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
pmb->context1 = NULL;
ndlp->nlp_rpi = mb->un.varWords[0];
+ ndlp->nlp_flag |= NLP_RPI_VALID;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -3297,3 +4068,395 @@ lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
return 1;
return 0;
}
+
+/**
+ * lpfc_fcf_inuse - Check if FCF can be unregistered.
+ * @phba: Pointer to hba context object.
+ *
+ * This function iterate through all FC nodes associated
+ * will all vports to check if there is any node with
+ * fc_rports associated with it. If there is an fc_rport
+ * associated with the node, then the node is either in
+ * discovered state or its devloss_timer is pending.
+ */
+static int
+lpfc_fcf_inuse(struct lpfc_hba *phba)
+{
+ struct lpfc_vport **vports;
+ int i, ret = 0;
+ struct lpfc_nodelist *ndlp;
+ struct Scsi_Host *shost;
+
+ vports = lpfc_create_vport_work_array(phba);
+
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ shost = lpfc_shost_from_vport(vports[i]);
+ spin_lock_irq(shost->host_lock);
+ list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
+ if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
+ (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
+ ret = 1;
+ spin_unlock_irq(shost->host_lock);
+ goto out;
+ }
+ }
+ spin_unlock_irq(shost->host_lock);
+ }
+out:
+ lpfc_destroy_vport_work_array(phba, vports);
+ return ret;
+}
+
+/**
+ * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
+ * @phba: Pointer to hba context object.
+ * @mboxq: Pointer to mailbox object.
+ *
+ * This function frees memory associated with the mailbox command.
+ */
+static void
+lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ struct lpfc_vport *vport = mboxq->vport;
+
+ if (mboxq->u.mb.mbxStatus) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+ "2555 UNREG_VFI mbxStatus error x%x "
+ "HBA state x%x\n",
+ mboxq->u.mb.mbxStatus, vport->port_state);
+ }
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return;
+}
+
+/**
+ * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
+ * @phba: Pointer to hba context object.
+ * @mboxq: Pointer to mailbox object.
+ *
+ * This function frees memory associated with the mailbox command.
+ */
+static void
+lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ struct lpfc_vport *vport = mboxq->vport;
+
+ if (mboxq->u.mb.mbxStatus) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+ "2550 UNREG_FCFI mbxStatus error x%x "
+ "HBA state x%x\n",
+ mboxq->u.mb.mbxStatus, vport->port_state);
+ }
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return;
+}
+
+/**
+ * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
+ * @phba: Pointer to hba context object.
+ *
+ * This function check if there are any connected remote port for the FCF and
+ * if all the devices are disconnected, this function unregister FCFI.
+ * This function also tries to use another FCF for discovery.
+ */
+void
+lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *mbox;
+ int rc;
+ struct lpfc_vport **vports;
+ int i;
+
+ spin_lock_irq(&phba->hbalock);
+ /*
+ * If HBA is not running in FIP mode or
+ * If HBA does not support FCoE or
+ * If FCF is not registered.
+ * do nothing.
+ */
+ if (!(phba->hba_flag & HBA_FCOE_SUPPORT) ||
+ !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
+ (phba->cfg_enable_fip == 0)) {
+ spin_unlock_irq(&phba->hbalock);
+ return;
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ if (lpfc_fcf_inuse(phba))
+ return;
+
+
+ /* Unregister VPIs */
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports &&
+ (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ lpfc_mbx_unreg_vpi(vports[i]);
+ vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED;
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+
+ /* Unregister VFI */
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+ "2556 UNREG_VFI mbox allocation failed"
+ "HBA state x%x\n",
+ phba->pport->port_state);
+ return;
+ }
+
+ lpfc_unreg_vfi(mbox, phba->pport->vfi);
+ mbox->vport = phba->pport;
+ mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl;
+
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+ "2557 UNREG_VFI issue mbox failed rc x%x "
+ "HBA state x%x\n",
+ rc, phba->pport->port_state);
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return;
+ }
+
+ /* Unregister FCF */
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+ "2551 UNREG_FCFI mbox allocation failed"
+ "HBA state x%x\n",
+ phba->pport->port_state);
+ return;
+ }
+
+ lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
+ mbox->vport = phba->pport;
+ mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+ "2552 UNREG_FCFI issue mbox failed rc x%x "
+ "HBA state x%x\n",
+ rc, phba->pport->port_state);
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return;
+ }
+
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_REGISTERED |
+ FCF_DISCOVERED | FCF_BOOT_ENABLE | FCF_IN_USE |
+ FCF_VALID_VLAN);
+ spin_unlock_irq(&phba->hbalock);
+
+ /*
+ * If driver is not unloading, check if there is any other
+ * FCF record that can be used for discovery.
+ */
+ if ((phba->pport->load_flag & FC_UNLOADING) ||
+ (phba->link_state < LPFC_LINK_UP))
+ return;
+
+ rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
+
+ if (rc)
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+ "2553 lpfc_unregister_unused_fcf failed to read FCF"
+ " record HBA state x%x\n",
+ phba->pport->port_state);
+}
+
+/**
+ * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
+ * @phba: Pointer to hba context object.
+ * @buff: Buffer containing the FCF connection table as in the config
+ * region.
+ * This function create driver data structure for the FCF connection
+ * record table read from config region 23.
+ */
+static void
+lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
+ uint8_t *buff)
+{
+ struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
+ struct lpfc_fcf_conn_hdr *conn_hdr;
+ struct lpfc_fcf_conn_rec *conn_rec;
+ uint32_t record_count;
+ int i;
+
+ /* Free the current connect table */
+ list_for_each_entry_safe(conn_entry, next_conn_entry,
+ &phba->fcf_conn_rec_list, list)
+ kfree(conn_entry);
+
+ conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
+ record_count = conn_hdr->length * sizeof(uint32_t)/
+ sizeof(struct lpfc_fcf_conn_rec);
+
+ conn_rec = (struct lpfc_fcf_conn_rec *)
+ (buff + sizeof(struct lpfc_fcf_conn_hdr));
+
+ for (i = 0; i < record_count; i++) {
+ if (!(conn_rec[i].flags & FCFCNCT_VALID))
+ continue;
+ conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry),
+ GFP_KERNEL);
+ if (!conn_entry) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2566 Failed to allocate connection"
+ " table entry\n");
+ return;
+ }
+
+ memcpy(&conn_entry->conn_rec, &conn_rec[i],
+ sizeof(struct lpfc_fcf_conn_rec));
+ conn_entry->conn_rec.vlan_tag =
+ le16_to_cpu(conn_entry->conn_rec.vlan_tag) & 0xFFF;
+ conn_entry->conn_rec.flags =
+ le16_to_cpu(conn_entry->conn_rec.flags);
+ list_add_tail(&conn_entry->list,
+ &phba->fcf_conn_rec_list);
+ }
+}
+
+/**
+ * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
+ * @phba: Pointer to hba context object.
+ * @buff: Buffer containing the FCoE parameter data structure.
+ *
+ * This function update driver data structure with config
+ * parameters read from config region 23.
+ */
+static void
+lpfc_read_fcoe_param(struct lpfc_hba *phba,
+ uint8_t *buff)
+{
+ struct lpfc_fip_param_hdr *fcoe_param_hdr;
+ struct lpfc_fcoe_params *fcoe_param;
+
+ fcoe_param_hdr = (struct lpfc_fip_param_hdr *)
+ buff;
+ fcoe_param = (struct lpfc_fcoe_params *)
+ buff + sizeof(struct lpfc_fip_param_hdr);
+
+ if ((fcoe_param_hdr->parm_version != FIPP_VERSION) ||
+ (fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
+ return;
+
+ if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) ==
+ FIPP_MODE_ON)
+ phba->cfg_enable_fip = 1;
+
+ if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) ==
+ FIPP_MODE_OFF)
+ phba->cfg_enable_fip = 0;
+
+ if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) {
+ phba->valid_vlan = 1;
+ phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) &
+ 0xFFF;
+ }
+
+ phba->fc_map[0] = fcoe_param->fc_map[0];
+ phba->fc_map[1] = fcoe_param->fc_map[1];
+ phba->fc_map[2] = fcoe_param->fc_map[2];
+ return;
+}
+
+/**
+ * lpfc_get_rec_conf23 - Get a record type in config region data.
+ * @buff: Buffer containing config region 23 data.
+ * @size: Size of the data buffer.
+ * @rec_type: Record type to be searched.
+ *
+ * This function searches config region data to find the begining
+ * of the record specified by record_type. If record found, this
+ * function return pointer to the record else return NULL.
+ */
+static uint8_t *
+lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type)
+{
+ uint32_t offset = 0, rec_length;
+
+ if ((buff[0] == LPFC_REGION23_LAST_REC) ||
+ (size < sizeof(uint32_t)))
+ return NULL;
+
+ rec_length = buff[offset + 1];
+
+ /*
+ * One TLV record has one word header and number of data words
+ * specified in the rec_length field of the record header.
+ */
+ while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t))
+ <= size) {
+ if (buff[offset] == rec_type)
+ return &buff[offset];
+
+ if (buff[offset] == LPFC_REGION23_LAST_REC)
+ return NULL;
+
+ offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t);
+ rec_length = buff[offset + 1];
+ }
+ return NULL;
+}
+
+/**
+ * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
+ * @phba: Pointer to lpfc_hba data structure.
+ * @buff: Buffer containing config region 23 data.
+ * @size: Size of the data buffer.
+ *
+ * This fuction parse the FCoE config parameters in config region 23 and
+ * populate driver data structure with the parameters.
+ */
+void
+lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
+ uint8_t *buff,
+ uint32_t size)
+{
+ uint32_t offset = 0, rec_length;
+ uint8_t *rec_ptr;
+
+ /*
+ * If data size is less than 2 words signature and version cannot be
+ * verified.
+ */
+ if (size < 2*sizeof(uint32_t))
+ return;
+
+ /* Check the region signature first */
+ if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2567 Config region 23 has bad signature\n");
+ return;
+ }
+
+ offset += 4;
+
+ /* Check the data structure version */
+ if (buff[offset] != LPFC_REGION23_VERSION) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2568 Config region 23 has bad version\n");
+ return;
+ }
+ offset += 4;
+
+ rec_length = buff[offset + 1];
+
+ /* Read FCoE param record */
+ rec_ptr = lpfc_get_rec_conf23(&buff[offset],
+ size - offset, FCOE_PARAM_TYPE);
+ if (rec_ptr)
+ lpfc_read_fcoe_param(phba, rec_ptr);
+
+ /* Read FCF connection table */
+ rec_ptr = lpfc_get_rec_conf23(&buff[offset],
+ size - offset, FCOE_CONN_TBL_TYPE);
+ if (rec_ptr)
+ lpfc_read_fcf_conn_tbl(phba, rec_ptr);
+
+}
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 4168c7b498b..02aa016b93e 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2008 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -471,6 +471,35 @@ struct serv_parm { /* Structure is in Big Endian format */
};
/*
+ * Virtual Fabric Tagging Header
+ */
+struct fc_vft_header {
+ uint32_t word0;
+#define fc_vft_hdr_r_ctl_SHIFT 24
+#define fc_vft_hdr_r_ctl_MASK 0xFF
+#define fc_vft_hdr_r_ctl_WORD word0
+#define fc_vft_hdr_ver_SHIFT 22
+#define fc_vft_hdr_ver_MASK 0x3
+#define fc_vft_hdr_ver_WORD word0
+#define fc_vft_hdr_type_SHIFT 18
+#define fc_vft_hdr_type_MASK 0xF
+#define fc_vft_hdr_type_WORD word0
+#define fc_vft_hdr_e_SHIFT 16
+#define fc_vft_hdr_e_MASK 0x1
+#define fc_vft_hdr_e_WORD word0
+#define fc_vft_hdr_priority_SHIFT 13
+#define fc_vft_hdr_priority_MASK 0x7
+#define fc_vft_hdr_priority_WORD word0
+#define fc_vft_hdr_vf_id_SHIFT 1
+#define fc_vft_hdr_vf_id_MASK 0xFFF
+#define fc_vft_hdr_vf_id_WORD word0
+ uint32_t word1;
+#define fc_vft_hdr_hopct_SHIFT 24
+#define fc_vft_hdr_hopct_MASK 0xFF
+#define fc_vft_hdr_hopct_WORD word1
+};
+
+/*
* Extended Link Service LS_COMMAND codes (Payload Word 0)
*/
#ifdef __BIG_ENDIAN_BITFIELD
@@ -1152,6 +1181,9 @@ typedef struct {
#define PCI_DEVICE_ID_HORNET 0xfe05
#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11
#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12
+#define PCI_VENDOR_ID_SERVERENGINE 0x19a2
+#define PCI_DEVICE_ID_TIGERSHARK 0x0704
+#define PCI_DEVICE_ID_TIGERSHARK_S 0x0705
#define JEDEC_ID_ADDRESS 0x0080001c
#define FIREFLY_JEDEC_ID 0x1ACC
@@ -1342,15 +1374,21 @@ typedef struct { /* FireFly BIU registers */
#define MBX_READ_LA64 0x95
#define MBX_REG_VPI 0x96
#define MBX_UNREG_VPI 0x97
-#define MBX_REG_VNPID 0x96
-#define MBX_UNREG_VNPID 0x97
#define MBX_WRITE_WWN 0x98
#define MBX_SET_DEBUG 0x99
#define MBX_LOAD_EXP_ROM 0x9C
-
-#define MBX_MAX_CMDS 0x9D
+#define MBX_SLI4_CONFIG 0x9B
+#define MBX_SLI4_REQ_FTRS 0x9D
+#define MBX_MAX_CMDS 0x9E
+#define MBX_RESUME_RPI 0x9E
#define MBX_SLI2_CMD_MASK 0x80
+#define MBX_REG_VFI 0x9F
+#define MBX_REG_FCFI 0xA0
+#define MBX_UNREG_VFI 0xA1
+#define MBX_UNREG_FCFI 0xA2
+#define MBX_INIT_VFI 0xA3
+#define MBX_INIT_VPI 0xA4
/* IOCB Commands */
@@ -1440,6 +1478,16 @@ typedef struct { /* FireFly BIU registers */
#define CMD_IOCB_LOGENTRY_CN 0x94
#define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96
+/* Unhandled Data Security SLI Commands */
+#define DSSCMD_IWRITE64_CR 0xD8
+#define DSSCMD_IWRITE64_CX 0xD9
+#define DSSCMD_IREAD64_CR 0xDA
+#define DSSCMD_IREAD64_CX 0xDB
+#define DSSCMD_INVALIDATE_DEK 0xDC
+#define DSSCMD_SET_KEK 0xDD
+#define DSSCMD_GET_KEK_ID 0xDE
+#define DSSCMD_GEN_XFER 0xDF
+
#define CMD_MAX_IOCB_CMD 0xE6
#define CMD_IOCB_MASK 0xff
@@ -1466,6 +1514,7 @@ typedef struct { /* FireFly BIU registers */
#define MBXERR_BAD_RCV_LENGTH 14
#define MBXERR_DMA_ERROR 15
#define MBXERR_ERROR 16
+#define MBXERR_LINK_DOWN 0x33
#define MBX_NOT_FINISHED 255
#define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */
@@ -1504,32 +1553,6 @@ struct ulp_bde {
#endif
};
-struct ulp_bde64 { /* SLI-2 */
- union ULP_BDE_TUS {
- uint32_t w;
- struct {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
- VALUE !! */
- uint32_t bdeSize:24; /* Size of buffer (in bytes) */
-#else /* __LITTLE_ENDIAN_BITFIELD */
- uint32_t bdeSize:24; /* Size of buffer (in bytes) */
- uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
- VALUE !! */
-#endif
-#define BUFF_TYPE_BDE_64 0x00 /* BDE (Host_resident) */
-#define BUFF_TYPE_BDE_IMMED 0x01 /* Immediate Data BDE */
-#define BUFF_TYPE_BDE_64P 0x02 /* BDE (Port-resident) */
-#define BUFF_TYPE_BDE_64I 0x08 /* Input BDE (Host-resident) */
-#define BUFF_TYPE_BDE_64IP 0x0A /* Input BDE (Port-resident) */
-#define BUFF_TYPE_BLP_64 0x40 /* BLP (Host-resident) */
-#define BUFF_TYPE_BLP_64P 0x42 /* BLP (Port-resident) */
- } f;
- } tus;
- uint32_t addrLow;
- uint32_t addrHigh;
-};
-
typedef struct ULP_BDL { /* SLI-2 */
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t bdeFlags:8; /* BDL Flags */
@@ -2287,7 +2310,7 @@ typedef struct {
uint32_t rsvd3;
uint32_t rsvd4;
uint32_t rsvd5;
- uint16_t rsvd6;
+ uint16_t vfi;
uint16_t vpi;
#else /* __LITTLE_ENDIAN */
uint32_t rsvd1;
@@ -2297,7 +2320,7 @@ typedef struct {
uint32_t rsvd4;
uint32_t rsvd5;
uint16_t vpi;
- uint16_t rsvd6;
+ uint16_t vfi;
#endif
} REG_VPI_VAR;
@@ -2457,7 +2480,7 @@ typedef struct {
uint32_t entry_index:16;
#endif
- uint32_t rsvd1;
+ uint32_t sli4_length;
uint32_t word_cnt;
uint32_t resp_offset;
} DUMP_VAR;
@@ -2470,9 +2493,32 @@ typedef struct {
#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */
#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */
+#define DMP_REGION_VPORT 0x16 /* VPort info region */
+#define DMP_VPORT_REGION_SIZE 0x200
+#define DMP_MBOX_OFFSET_WORD 0x5
+
+#define DMP_REGION_FCOEPARAM 0x17 /* fcoe param region */
+#define DMP_FCOEPARAM_RGN_SIZE 0x400
+
#define WAKE_UP_PARMS_REGION_ID 4
#define WAKE_UP_PARMS_WORD_SIZE 15
+struct vport_rec {
+ uint8_t wwpn[8];
+ uint8_t wwnn[8];
+};
+
+#define VPORT_INFO_SIG 0x32324752
+#define VPORT_INFO_REV_MASK 0xff
+#define VPORT_INFO_REV 0x1
+#define MAX_STATIC_VPORT_COUNT 16
+struct static_vport_info {
+ uint32_t signature;
+ uint32_t rev;
+ struct vport_rec vport_list[MAX_STATIC_VPORT_COUNT];
+ uint32_t resvd[66];
+};
+
/* Option rom version structure */
struct prog_id {
#ifdef __BIG_ENDIAN_BITFIELD
@@ -2697,7 +2743,9 @@ typedef struct {
#endif
#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t rsvd1 : 23; /* Reserved */
+ uint32_t rsvd1 : 19; /* Reserved */
+ uint32_t cdss : 1; /* Configure Data Security SLI */
+ uint32_t rsvd2 : 3; /* Reserved */
uint32_t cbg : 1; /* Configure BlockGuard */
uint32_t cmv : 1; /* Configure Max VPIs */
uint32_t ccrp : 1; /* Config Command Ring Polling */
@@ -2717,10 +2765,14 @@ typedef struct {
uint32_t ccrp : 1; /* Config Command Ring Polling */
uint32_t cmv : 1; /* Configure Max VPIs */
uint32_t cbg : 1; /* Configure BlockGuard */
- uint32_t rsvd1 : 23; /* Reserved */
+ uint32_t rsvd2 : 3; /* Reserved */
+ uint32_t cdss : 1; /* Configure Data Security SLI */
+ uint32_t rsvd1 : 19; /* Reserved */
#endif
#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t rsvd2 : 23; /* Reserved */
+ uint32_t rsvd3 : 19; /* Reserved */
+ uint32_t gdss : 1; /* Configure Data Security SLI */
+ uint32_t rsvd4 : 3; /* Reserved */
uint32_t gbg : 1; /* Grant BlockGuard */
uint32_t gmv : 1; /* Grant Max VPIs */
uint32_t gcrp : 1; /* Grant Command Ring Polling */
@@ -2740,7 +2792,9 @@ typedef struct {
uint32_t gcrp : 1; /* Grant Command Ring Polling */
uint32_t gmv : 1; /* Grant Max VPIs */
uint32_t gbg : 1; /* Grant BlockGuard */
- uint32_t rsvd2 : 23; /* Reserved */
+ uint32_t rsvd4 : 3; /* Reserved */
+ uint32_t gdss : 1; /* Configure Data Security SLI */
+ uint32_t rsvd3 : 19; /* Reserved */
#endif
#ifdef __BIG_ENDIAN_BITFIELD
@@ -2753,20 +2807,20 @@ typedef struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */
- uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */
+ uint32_t rsvd5 : 16; /* Max HBQs Host expect to configure */
#else /* __LITTLE_ENDIAN */
- uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */
+ uint32_t rsvd5 : 16; /* Max HBQs Host expect to configure */
uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */
#endif
- uint32_t rsvd4; /* Reserved */
+ uint32_t rsvd6; /* Reserved */
#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t rsvd5 : 16; /* Reserved */
+ uint32_t rsvd7 : 16; /* Reserved */
uint32_t max_vpi : 16; /* Max number of virt N-Ports */
#else /* __LITTLE_ENDIAN */
uint32_t max_vpi : 16; /* Max number of virt N-Ports */
- uint32_t rsvd5 : 16; /* Reserved */
+ uint32_t rsvd7 : 16; /* Reserved */
#endif
} CONFIG_PORT_VAR;
@@ -3666,3 +3720,5 @@ lpfc_error_lost_link(IOCB_t *iocbp)
#define MENLO_TIMEOUT 30
#define SETVAR_MLOMNT 0x103107
#define SETVAR_MLORST 0x103007
+
+#define BPL_ALIGN_SZ 8 /* 8 byte alignment for bpl and mbufs */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
new file mode 100644
index 00000000000..39c34b3ad29
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -0,0 +1,2141 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2009 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+/* Macros to deal with bit fields. Each bit field must have 3 #defines
+ * associated with it (_SHIFT, _MASK, and _WORD).
+ * EG. For a bit field that is in the 7th bit of the "field4" field of a
+ * structure and is 2 bits in size the following #defines must exist:
+ * struct temp {
+ * uint32_t field1;
+ * uint32_t field2;
+ * uint32_t field3;
+ * uint32_t field4;
+ * #define example_bit_field_SHIFT 7
+ * #define example_bit_field_MASK 0x03
+ * #define example_bit_field_WORD field4
+ * uint32_t field5;
+ * };
+ * Then the macros below may be used to get or set the value of that field.
+ * EG. To get the value of the bit field from the above example:
+ * struct temp t1;
+ * value = bf_get(example_bit_field, &t1);
+ * And then to set that bit field:
+ * bf_set(example_bit_field, &t1, 2);
+ * Or clear that bit field:
+ * bf_set(example_bit_field, &t1, 0);
+ */
+#define bf_get(name, ptr) \
+ (((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK)
+#define bf_set(name, ptr, value) \
+ ((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \
+ ((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT))))
+
+struct dma_address {
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+};
+
+#define LPFC_SLI4_BAR0 1
+#define LPFC_SLI4_BAR1 2
+#define LPFC_SLI4_BAR2 4
+
+#define LPFC_SLI4_MBX_EMBED true
+#define LPFC_SLI4_MBX_NEMBED false
+
+#define LPFC_SLI4_MB_WORD_COUNT 64
+#define LPFC_MAX_MQ_PAGE 8
+#define LPFC_MAX_WQ_PAGE 8
+#define LPFC_MAX_CQ_PAGE 4
+#define LPFC_MAX_EQ_PAGE 8
+
+#define LPFC_VIR_FUNC_MAX 32 /* Maximum number of virtual functions */
+#define LPFC_PCI_FUNC_MAX 5 /* Maximum number of PCI functions */
+#define LPFC_VFR_PAGE_SIZE 0x1000 /* 4KB BAR2 per-VF register page size */
+
+/* Define SLI4 Alignment requirements. */
+#define LPFC_ALIGN_16_BYTE 16
+#define LPFC_ALIGN_64_BYTE 64
+
+/* Define SLI4 specific definitions. */
+#define LPFC_MQ_CQE_BYTE_OFFSET 256
+#define LPFC_MBX_CMD_HDR_LENGTH 16
+#define LPFC_MBX_ERROR_RANGE 0x4000
+#define LPFC_BMBX_BIT1_ADDR_HI 0x2
+#define LPFC_BMBX_BIT1_ADDR_LO 0
+#define LPFC_RPI_HDR_COUNT 64
+#define LPFC_HDR_TEMPLATE_SIZE 4096
+#define LPFC_RPI_ALLOC_ERROR 0xFFFF
+#define LPFC_FCF_RECORD_WD_CNT 132
+#define LPFC_ENTIRE_FCF_DATABASE 0
+#define LPFC_DFLT_FCF_INDEX 0
+
+/* Virtual function numbers */
+#define LPFC_VF0 0
+#define LPFC_VF1 1
+#define LPFC_VF2 2
+#define LPFC_VF3 3
+#define LPFC_VF4 4
+#define LPFC_VF5 5
+#define LPFC_VF6 6
+#define LPFC_VF7 7
+#define LPFC_VF8 8
+#define LPFC_VF9 9
+#define LPFC_VF10 10
+#define LPFC_VF11 11
+#define LPFC_VF12 12
+#define LPFC_VF13 13
+#define LPFC_VF14 14
+#define LPFC_VF15 15
+#define LPFC_VF16 16
+#define LPFC_VF17 17
+#define LPFC_VF18 18
+#define LPFC_VF19 19
+#define LPFC_VF20 20
+#define LPFC_VF21 21
+#define LPFC_VF22 22
+#define LPFC_VF23 23
+#define LPFC_VF24 24
+#define LPFC_VF25 25
+#define LPFC_VF26 26
+#define LPFC_VF27 27
+#define LPFC_VF28 28
+#define LPFC_VF29 29
+#define LPFC_VF30 30
+#define LPFC_VF31 31
+
+/* PCI function numbers */
+#define LPFC_PCI_FUNC0 0
+#define LPFC_PCI_FUNC1 1
+#define LPFC_PCI_FUNC2 2
+#define LPFC_PCI_FUNC3 3
+#define LPFC_PCI_FUNC4 4
+
+/* Active interrupt test count */
+#define LPFC_ACT_INTR_CNT 4
+
+/* Delay Multiplier constant */
+#define LPFC_DMULT_CONST 651042
+#define LPFC_MIM_IMAX 636
+#define LPFC_FP_DEF_IMAX 10000
+#define LPFC_SP_DEF_IMAX 10000
+
+struct ulp_bde64 {
+ union ULP_BDE_TUS {
+ uint32_t w;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
+ VALUE !! */
+ uint32_t bdeSize:24; /* Size of buffer (in bytes) */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t bdeSize:24; /* Size of buffer (in bytes) */
+ uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
+ VALUE !! */
+#endif
+#define BUFF_TYPE_BDE_64 0x00 /* BDE (Host_resident) */
+#define BUFF_TYPE_BDE_IMMED 0x01 /* Immediate Data BDE */
+#define BUFF_TYPE_BDE_64P 0x02 /* BDE (Port-resident) */
+#define BUFF_TYPE_BDE_64I 0x08 /* Input BDE (Host-resident) */
+#define BUFF_TYPE_BDE_64IP 0x0A /* Input BDE (Port-resident) */
+#define BUFF_TYPE_BLP_64 0x40 /* BLP (Host-resident) */
+#define BUFF_TYPE_BLP_64P 0x42 /* BLP (Port-resident) */
+ } f;
+ } tus;
+ uint32_t addrLow;
+ uint32_t addrHigh;
+};
+
+struct lpfc_sli4_flags {
+ uint32_t word0;
+#define lpfc_fip_flag_SHIFT 0
+#define lpfc_fip_flag_MASK 0x00000001
+#define lpfc_fip_flag_WORD word0
+};
+
+/* event queue entry structure */
+struct lpfc_eqe {
+ uint32_t word0;
+#define lpfc_eqe_resource_id_SHIFT 16
+#define lpfc_eqe_resource_id_MASK 0x000000FF
+#define lpfc_eqe_resource_id_WORD word0
+#define lpfc_eqe_minor_code_SHIFT 4
+#define lpfc_eqe_minor_code_MASK 0x00000FFF
+#define lpfc_eqe_minor_code_WORD word0
+#define lpfc_eqe_major_code_SHIFT 1
+#define lpfc_eqe_major_code_MASK 0x00000007
+#define lpfc_eqe_major_code_WORD word0
+#define lpfc_eqe_valid_SHIFT 0
+#define lpfc_eqe_valid_MASK 0x00000001
+#define lpfc_eqe_valid_WORD word0
+};
+
+/* completion queue entry structure (common fields for all cqe types) */
+struct lpfc_cqe {
+ uint32_t reserved0;
+ uint32_t reserved1;
+ uint32_t reserved2;
+ uint32_t word3;
+#define lpfc_cqe_valid_SHIFT 31
+#define lpfc_cqe_valid_MASK 0x00000001
+#define lpfc_cqe_valid_WORD word3
+#define lpfc_cqe_code_SHIFT 16
+#define lpfc_cqe_code_MASK 0x000000FF
+#define lpfc_cqe_code_WORD word3
+};
+
+/* Completion Queue Entry Status Codes */
+#define CQE_STATUS_SUCCESS 0x0
+#define CQE_STATUS_FCP_RSP_FAILURE 0x1
+#define CQE_STATUS_REMOTE_STOP 0x2
+#define CQE_STATUS_LOCAL_REJECT 0x3
+#define CQE_STATUS_NPORT_RJT 0x4
+#define CQE_STATUS_FABRIC_RJT 0x5
+#define CQE_STATUS_NPORT_BSY 0x6
+#define CQE_STATUS_FABRIC_BSY 0x7
+#define CQE_STATUS_INTERMED_RSP 0x8
+#define CQE_STATUS_LS_RJT 0x9
+#define CQE_STATUS_CMD_REJECT 0xb
+#define CQE_STATUS_FCP_TGT_LENCHECK 0xc
+#define CQE_STATUS_NEED_BUFF_ENTRY 0xf
+
+/* Status returned by hardware (valid only if status = CQE_STATUS_SUCCESS). */
+#define CQE_HW_STATUS_NO_ERR 0x0
+#define CQE_HW_STATUS_UNDERRUN 0x1
+#define CQE_HW_STATUS_OVERRUN 0x2
+
+/* Completion Queue Entry Codes */
+#define CQE_CODE_COMPL_WQE 0x1
+#define CQE_CODE_RELEASE_WQE 0x2
+#define CQE_CODE_RECEIVE 0x4
+#define CQE_CODE_XRI_ABORTED 0x5
+
+/* completion queue entry for wqe completions */
+struct lpfc_wcqe_complete {
+ uint32_t word0;
+#define lpfc_wcqe_c_request_tag_SHIFT 16
+#define lpfc_wcqe_c_request_tag_MASK 0x0000FFFF
+#define lpfc_wcqe_c_request_tag_WORD word0
+#define lpfc_wcqe_c_status_SHIFT 8
+#define lpfc_wcqe_c_status_MASK 0x000000FF
+#define lpfc_wcqe_c_status_WORD word0
+#define lpfc_wcqe_c_hw_status_SHIFT 0
+#define lpfc_wcqe_c_hw_status_MASK 0x000000FF
+#define lpfc_wcqe_c_hw_status_WORD word0
+ uint32_t total_data_placed;
+ uint32_t parameter;
+ uint32_t word3;
+#define lpfc_wcqe_c_valid_SHIFT lpfc_cqe_valid_SHIFT
+#define lpfc_wcqe_c_valid_MASK lpfc_cqe_valid_MASK
+#define lpfc_wcqe_c_valid_WORD lpfc_cqe_valid_WORD
+#define lpfc_wcqe_c_xb_SHIFT 28
+#define lpfc_wcqe_c_xb_MASK 0x00000001
+#define lpfc_wcqe_c_xb_WORD word3
+#define lpfc_wcqe_c_pv_SHIFT 27
+#define lpfc_wcqe_c_pv_MASK 0x00000001
+#define lpfc_wcqe_c_pv_WORD word3
+#define lpfc_wcqe_c_priority_SHIFT 24
+#define lpfc_wcqe_c_priority_MASK 0x00000007
+#define lpfc_wcqe_c_priority_WORD word3
+#define lpfc_wcqe_c_code_SHIFT lpfc_cqe_code_SHIFT
+#define lpfc_wcqe_c_code_MASK lpfc_cqe_code_MASK
+#define lpfc_wcqe_c_code_WORD lpfc_cqe_code_WORD
+};
+
+/* completion queue entry for wqe release */
+struct lpfc_wcqe_release {
+ uint32_t reserved0;
+ uint32_t reserved1;
+ uint32_t word2;
+#define lpfc_wcqe_r_wq_id_SHIFT 16
+#define lpfc_wcqe_r_wq_id_MASK 0x0000FFFF
+#define lpfc_wcqe_r_wq_id_WORD word2
+#define lpfc_wcqe_r_wqe_index_SHIFT 0
+#define lpfc_wcqe_r_wqe_index_MASK 0x0000FFFF
+#define lpfc_wcqe_r_wqe_index_WORD word2
+ uint32_t word3;
+#define lpfc_wcqe_r_valid_SHIFT lpfc_cqe_valid_SHIFT
+#define lpfc_wcqe_r_valid_MASK lpfc_cqe_valid_MASK
+#define lpfc_wcqe_r_valid_WORD lpfc_cqe_valid_WORD
+#define lpfc_wcqe_r_code_SHIFT lpfc_cqe_code_SHIFT
+#define lpfc_wcqe_r_code_MASK lpfc_cqe_code_MASK
+#define lpfc_wcqe_r_code_WORD lpfc_cqe_code_WORD
+};
+
+struct sli4_wcqe_xri_aborted {
+ uint32_t word0;
+#define lpfc_wcqe_xa_status_SHIFT 8
+#define lpfc_wcqe_xa_status_MASK 0x000000FF
+#define lpfc_wcqe_xa_status_WORD word0
+ uint32_t parameter;
+ uint32_t word2;
+#define lpfc_wcqe_xa_remote_xid_SHIFT 16
+#define lpfc_wcqe_xa_remote_xid_MASK 0x0000FFFF
+#define lpfc_wcqe_xa_remote_xid_WORD word2
+#define lpfc_wcqe_xa_xri_SHIFT 0
+#define lpfc_wcqe_xa_xri_MASK 0x0000FFFF
+#define lpfc_wcqe_xa_xri_WORD word2
+ uint32_t word3;
+#define lpfc_wcqe_xa_valid_SHIFT lpfc_cqe_valid_SHIFT
+#define lpfc_wcqe_xa_valid_MASK lpfc_cqe_valid_MASK
+#define lpfc_wcqe_xa_valid_WORD lpfc_cqe_valid_WORD
+#define lpfc_wcqe_xa_ia_SHIFT 30
+#define lpfc_wcqe_xa_ia_MASK 0x00000001
+#define lpfc_wcqe_xa_ia_WORD word3
+#define CQE_XRI_ABORTED_IA_REMOTE 0
+#define CQE_XRI_ABORTED_IA_LOCAL 1
+#define lpfc_wcqe_xa_br_SHIFT 29
+#define lpfc_wcqe_xa_br_MASK 0x00000001
+#define lpfc_wcqe_xa_br_WORD word3
+#define CQE_XRI_ABORTED_BR_BA_ACC 0
+#define CQE_XRI_ABORTED_BR_BA_RJT 1
+#define lpfc_wcqe_xa_eo_SHIFT 28
+#define lpfc_wcqe_xa_eo_MASK 0x00000001
+#define lpfc_wcqe_xa_eo_WORD word3
+#define CQE_XRI_ABORTED_EO_REMOTE 0
+#define CQE_XRI_ABORTED_EO_LOCAL 1
+#define lpfc_wcqe_xa_code_SHIFT lpfc_cqe_code_SHIFT
+#define lpfc_wcqe_xa_code_MASK lpfc_cqe_code_MASK
+#define lpfc_wcqe_xa_code_WORD lpfc_cqe_code_WORD
+};
+
+/* completion queue entry structure for rqe completion */
+struct lpfc_rcqe {
+ uint32_t word0;
+#define lpfc_rcqe_bindex_SHIFT 16
+#define lpfc_rcqe_bindex_MASK 0x0000FFF
+#define lpfc_rcqe_bindex_WORD word0
+#define lpfc_rcqe_status_SHIFT 8
+#define lpfc_rcqe_status_MASK 0x000000FF
+#define lpfc_rcqe_status_WORD word0
+#define FC_STATUS_RQ_SUCCESS 0x10 /* Async receive successful */
+#define FC_STATUS_RQ_BUF_LEN_EXCEEDED 0x11 /* payload truncated */
+#define FC_STATUS_INSUFF_BUF_NEED_BUF 0x12 /* Insufficient buffers */
+#define FC_STATUS_INSUFF_BUF_FRM_DISC 0x13 /* Frame Discard */
+ uint32_t reserved1;
+ uint32_t word2;
+#define lpfc_rcqe_length_SHIFT 16
+#define lpfc_rcqe_length_MASK 0x0000FFFF
+#define lpfc_rcqe_length_WORD word2
+#define lpfc_rcqe_rq_id_SHIFT 6
+#define lpfc_rcqe_rq_id_MASK 0x000003FF
+#define lpfc_rcqe_rq_id_WORD word2
+#define lpfc_rcqe_fcf_id_SHIFT 0
+#define lpfc_rcqe_fcf_id_MASK 0x0000003F
+#define lpfc_rcqe_fcf_id_WORD word2
+ uint32_t word3;
+#define lpfc_rcqe_valid_SHIFT lpfc_cqe_valid_SHIFT
+#define lpfc_rcqe_valid_MASK lpfc_cqe_valid_MASK
+#define lpfc_rcqe_valid_WORD lpfc_cqe_valid_WORD
+#define lpfc_rcqe_port_SHIFT 30
+#define lpfc_rcqe_port_MASK 0x00000001
+#define lpfc_rcqe_port_WORD word3
+#define lpfc_rcqe_hdr_length_SHIFT 24
+#define lpfc_rcqe_hdr_length_MASK 0x0000001F
+#define lpfc_rcqe_hdr_length_WORD word3
+#define lpfc_rcqe_code_SHIFT lpfc_cqe_code_SHIFT
+#define lpfc_rcqe_code_MASK lpfc_cqe_code_MASK
+#define lpfc_rcqe_code_WORD lpfc_cqe_code_WORD
+#define lpfc_rcqe_eof_SHIFT 8
+#define lpfc_rcqe_eof_MASK 0x000000FF
+#define lpfc_rcqe_eof_WORD word3
+#define FCOE_EOFn 0x41
+#define FCOE_EOFt 0x42
+#define FCOE_EOFni 0x49
+#define FCOE_EOFa 0x50
+#define lpfc_rcqe_sof_SHIFT 0
+#define lpfc_rcqe_sof_MASK 0x000000FF
+#define lpfc_rcqe_sof_WORD word3
+#define FCOE_SOFi2 0x2d
+#define FCOE_SOFi3 0x2e
+#define FCOE_SOFn2 0x35
+#define FCOE_SOFn3 0x36
+};
+
+struct lpfc_wqe_generic{
+ struct ulp_bde64 bde;
+ uint32_t word3;
+ uint32_t word4;
+ uint32_t word5;
+ uint32_t word6;
+#define lpfc_wqe_gen_context_SHIFT 16
+#define lpfc_wqe_gen_context_MASK 0x0000FFFF
+#define lpfc_wqe_gen_context_WORD word6
+#define lpfc_wqe_gen_xri_SHIFT 0
+#define lpfc_wqe_gen_xri_MASK 0x0000FFFF
+#define lpfc_wqe_gen_xri_WORD word6
+ uint32_t word7;
+#define lpfc_wqe_gen_lnk_SHIFT 23
+#define lpfc_wqe_gen_lnk_MASK 0x00000001
+#define lpfc_wqe_gen_lnk_WORD word7
+#define lpfc_wqe_gen_erp_SHIFT 22
+#define lpfc_wqe_gen_erp_MASK 0x00000001
+#define lpfc_wqe_gen_erp_WORD word7
+#define lpfc_wqe_gen_pu_SHIFT 20
+#define lpfc_wqe_gen_pu_MASK 0x00000003
+#define lpfc_wqe_gen_pu_WORD word7
+#define lpfc_wqe_gen_class_SHIFT 16
+#define lpfc_wqe_gen_class_MASK 0x00000007
+#define lpfc_wqe_gen_class_WORD word7
+#define lpfc_wqe_gen_command_SHIFT 8
+#define lpfc_wqe_gen_command_MASK 0x000000FF
+#define lpfc_wqe_gen_command_WORD word7
+#define lpfc_wqe_gen_status_SHIFT 4
+#define lpfc_wqe_gen_status_MASK 0x0000000F
+#define lpfc_wqe_gen_status_WORD word7
+#define lpfc_wqe_gen_ct_SHIFT 2
+#define lpfc_wqe_gen_ct_MASK 0x00000007
+#define lpfc_wqe_gen_ct_WORD word7
+ uint32_t abort_tag;
+ uint32_t word9;
+#define lpfc_wqe_gen_request_tag_SHIFT 0
+#define lpfc_wqe_gen_request_tag_MASK 0x0000FFFF
+#define lpfc_wqe_gen_request_tag_WORD word9
+ uint32_t word10;
+#define lpfc_wqe_gen_ccp_SHIFT 24
+#define lpfc_wqe_gen_ccp_MASK 0x000000FF
+#define lpfc_wqe_gen_ccp_WORD word10
+#define lpfc_wqe_gen_ccpe_SHIFT 23
+#define lpfc_wqe_gen_ccpe_MASK 0x00000001
+#define lpfc_wqe_gen_ccpe_WORD word10
+#define lpfc_wqe_gen_pv_SHIFT 19
+#define lpfc_wqe_gen_pv_MASK 0x00000001
+#define lpfc_wqe_gen_pv_WORD word10
+#define lpfc_wqe_gen_pri_SHIFT 16
+#define lpfc_wqe_gen_pri_MASK 0x00000007
+#define lpfc_wqe_gen_pri_WORD word10
+ uint32_t word11;
+#define lpfc_wqe_gen_cq_id_SHIFT 16
+#define lpfc_wqe_gen_cq_id_MASK 0x000003FF
+#define lpfc_wqe_gen_cq_id_WORD word11
+#define LPFC_WQE_CQ_ID_DEFAULT 0x3ff
+#define lpfc_wqe_gen_wqec_SHIFT 7
+#define lpfc_wqe_gen_wqec_MASK 0x00000001
+#define lpfc_wqe_gen_wqec_WORD word11
+#define lpfc_wqe_gen_cmd_type_SHIFT 0
+#define lpfc_wqe_gen_cmd_type_MASK 0x0000000F
+#define lpfc_wqe_gen_cmd_type_WORD word11
+ uint32_t payload[4];
+};
+
+struct lpfc_rqe {
+ uint32_t address_hi;
+ uint32_t address_lo;
+};
+
+/* buffer descriptors */
+struct lpfc_bde4 {
+ uint32_t addr_hi;
+ uint32_t addr_lo;
+ uint32_t word2;
+#define lpfc_bde4_last_SHIFT 31
+#define lpfc_bde4_last_MASK 0x00000001
+#define lpfc_bde4_last_WORD word2
+#define lpfc_bde4_sge_offset_SHIFT 0
+#define lpfc_bde4_sge_offset_MASK 0x000003FF
+#define lpfc_bde4_sge_offset_WORD word2
+ uint32_t word3;
+#define lpfc_bde4_length_SHIFT 0
+#define lpfc_bde4_length_MASK 0x000000FF
+#define lpfc_bde4_length_WORD word3
+};
+
+struct lpfc_register {
+ uint32_t word0;
+};
+
+#define LPFC_UERR_STATUS_HI 0x00A4
+#define LPFC_UERR_STATUS_LO 0x00A0
+#define LPFC_ONLINE0 0x00B0
+#define LPFC_ONLINE1 0x00B4
+#define LPFC_SCRATCHPAD 0x0058
+
+/* BAR0 Registers */
+#define LPFC_HST_STATE 0x00AC
+#define lpfc_hst_state_perr_SHIFT 31
+#define lpfc_hst_state_perr_MASK 0x1
+#define lpfc_hst_state_perr_WORD word0
+#define lpfc_hst_state_sfi_SHIFT 30
+#define lpfc_hst_state_sfi_MASK 0x1
+#define lpfc_hst_state_sfi_WORD word0
+#define lpfc_hst_state_nip_SHIFT 29
+#define lpfc_hst_state_nip_MASK 0x1
+#define lpfc_hst_state_nip_WORD word0
+#define lpfc_hst_state_ipc_SHIFT 28
+#define lpfc_hst_state_ipc_MASK 0x1
+#define lpfc_hst_state_ipc_WORD word0
+#define lpfc_hst_state_xrom_SHIFT 27
+#define lpfc_hst_state_xrom_MASK 0x1
+#define lpfc_hst_state_xrom_WORD word0
+#define lpfc_hst_state_dl_SHIFT 26
+#define lpfc_hst_state_dl_MASK 0x1
+#define lpfc_hst_state_dl_WORD word0
+#define lpfc_hst_state_port_status_SHIFT 0
+#define lpfc_hst_state_port_status_MASK 0xFFFF
+#define lpfc_hst_state_port_status_WORD word0
+
+#define LPFC_POST_STAGE_POWER_ON_RESET 0x0000
+#define LPFC_POST_STAGE_AWAITING_HOST_RDY 0x0001
+#define LPFC_POST_STAGE_HOST_RDY 0x0002
+#define LPFC_POST_STAGE_BE_RESET 0x0003
+#define LPFC_POST_STAGE_SEEPROM_CS_START 0x0100
+#define LPFC_POST_STAGE_SEEPROM_CS_DONE 0x0101
+#define LPFC_POST_STAGE_DDR_CONFIG_START 0x0200
+#define LPFC_POST_STAGE_DDR_CONFIG_DONE 0x0201
+#define LPFC_POST_STAGE_DDR_CALIBRATE_START 0x0300
+#define LPFC_POST_STAGE_DDR_CALIBRATE_DONE 0x0301
+#define LPFC_POST_STAGE_DDR_TEST_START 0x0400
+#define LPFC_POST_STAGE_DDR_TEST_DONE 0x0401
+#define LPFC_POST_STAGE_REDBOOT_INIT_START 0x0600
+#define LPFC_POST_STAGE_REDBOOT_INIT_DONE 0x0601
+#define LPFC_POST_STAGE_FW_IMAGE_LOAD_START 0x0700
+#define LPFC_POST_STAGE_FW_IMAGE_LOAD_DONE 0x0701
+#define LPFC_POST_STAGE_ARMFW_START 0x0800
+#define LPFC_POST_STAGE_DHCP_QUERY_START 0x0900
+#define LPFC_POST_STAGE_DHCP_QUERY_DONE 0x0901
+#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_START 0x0A00
+#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_DONE 0x0A01
+#define LPFC_POST_STAGE_RC_OPTION_SET 0x0B00
+#define LPFC_POST_STAGE_SWITCH_LINK 0x0B01
+#define LPFC_POST_STAGE_SEND_ICDS_MESSAGE 0x0B02
+#define LPFC_POST_STAGE_PERFROM_TFTP 0x0B03
+#define LPFC_POST_STAGE_PARSE_XML 0x0B04
+#define LPFC_POST_STAGE_DOWNLOAD_IMAGE 0x0B05
+#define LPFC_POST_STAGE_FLASH_IMAGE 0x0B06
+#define LPFC_POST_STAGE_RC_DONE 0x0B07
+#define LPFC_POST_STAGE_REBOOT_SYSTEM 0x0B08
+#define LPFC_POST_STAGE_MAC_ADDRESS 0x0C00
+#define LPFC_POST_STAGE_ARMFW_READY 0xC000
+#define LPFC_POST_STAGE_ARMFW_UE 0xF000
+
+#define lpfc_scratchpad_slirev_SHIFT 4
+#define lpfc_scratchpad_slirev_MASK 0xF
+#define lpfc_scratchpad_slirev_WORD word0
+#define lpfc_scratchpad_chiptype_SHIFT 8
+#define lpfc_scratchpad_chiptype_MASK 0xFF
+#define lpfc_scratchpad_chiptype_WORD word0
+#define lpfc_scratchpad_featurelevel1_SHIFT 16
+#define lpfc_scratchpad_featurelevel1_MASK 0xFF
+#define lpfc_scratchpad_featurelevel1_WORD word0
+#define lpfc_scratchpad_featurelevel2_SHIFT 24
+#define lpfc_scratchpad_featurelevel2_MASK 0xFF
+#define lpfc_scratchpad_featurelevel2_WORD word0
+
+/* BAR1 Registers */
+#define LPFC_IMR_MASK_ALL 0xFFFFFFFF
+#define LPFC_ISCR_CLEAR_ALL 0xFFFFFFFF
+
+#define LPFC_HST_ISR0 0x0C18
+#define LPFC_HST_ISR1 0x0C1C
+#define LPFC_HST_ISR2 0x0C20
+#define LPFC_HST_ISR3 0x0C24
+#define LPFC_HST_ISR4 0x0C28
+
+#define LPFC_HST_IMR0 0x0C48
+#define LPFC_HST_IMR1 0x0C4C
+#define LPFC_HST_IMR2 0x0C50
+#define LPFC_HST_IMR3 0x0C54
+#define LPFC_HST_IMR4 0x0C58
+
+#define LPFC_HST_ISCR0 0x0C78
+#define LPFC_HST_ISCR1 0x0C7C
+#define LPFC_HST_ISCR2 0x0C80
+#define LPFC_HST_ISCR3 0x0C84
+#define LPFC_HST_ISCR4 0x0C88
+
+#define LPFC_SLI4_INTR0 BIT0
+#define LPFC_SLI4_INTR1 BIT1
+#define LPFC_SLI4_INTR2 BIT2
+#define LPFC_SLI4_INTR3 BIT3
+#define LPFC_SLI4_INTR4 BIT4
+#define LPFC_SLI4_INTR5 BIT5
+#define LPFC_SLI4_INTR6 BIT6
+#define LPFC_SLI4_INTR7 BIT7
+#define LPFC_SLI4_INTR8 BIT8
+#define LPFC_SLI4_INTR9 BIT9
+#define LPFC_SLI4_INTR10 BIT10
+#define LPFC_SLI4_INTR11 BIT11
+#define LPFC_SLI4_INTR12 BIT12
+#define LPFC_SLI4_INTR13 BIT13
+#define LPFC_SLI4_INTR14 BIT14
+#define LPFC_SLI4_INTR15 BIT15
+#define LPFC_SLI4_INTR16 BIT16
+#define LPFC_SLI4_INTR17 BIT17
+#define LPFC_SLI4_INTR18 BIT18
+#define LPFC_SLI4_INTR19 BIT19
+#define LPFC_SLI4_INTR20 BIT20
+#define LPFC_SLI4_INTR21 BIT21
+#define LPFC_SLI4_INTR22 BIT22
+#define LPFC_SLI4_INTR23 BIT23
+#define LPFC_SLI4_INTR24 BIT24
+#define LPFC_SLI4_INTR25 BIT25
+#define LPFC_SLI4_INTR26 BIT26
+#define LPFC_SLI4_INTR27 BIT27
+#define LPFC_SLI4_INTR28 BIT28
+#define LPFC_SLI4_INTR29 BIT29
+#define LPFC_SLI4_INTR30 BIT30
+#define LPFC_SLI4_INTR31 BIT31
+
+/* BAR2 Registers */
+#define LPFC_RQ_DOORBELL 0x00A0
+#define lpfc_rq_doorbell_num_posted_SHIFT 16
+#define lpfc_rq_doorbell_num_posted_MASK 0x3FFF
+#define lpfc_rq_doorbell_num_posted_WORD word0
+#define LPFC_RQ_POST_BATCH 8 /* RQEs to post at one time */
+#define lpfc_rq_doorbell_id_SHIFT 0
+#define lpfc_rq_doorbell_id_MASK 0x03FF
+#define lpfc_rq_doorbell_id_WORD word0
+
+#define LPFC_WQ_DOORBELL 0x0040
+#define lpfc_wq_doorbell_num_posted_SHIFT 24
+#define lpfc_wq_doorbell_num_posted_MASK 0x00FF
+#define lpfc_wq_doorbell_num_posted_WORD word0
+#define lpfc_wq_doorbell_index_SHIFT 16
+#define lpfc_wq_doorbell_index_MASK 0x00FF
+#define lpfc_wq_doorbell_index_WORD word0
+#define lpfc_wq_doorbell_id_SHIFT 0
+#define lpfc_wq_doorbell_id_MASK 0xFFFF
+#define lpfc_wq_doorbell_id_WORD word0
+
+#define LPFC_EQCQ_DOORBELL 0x0120
+#define lpfc_eqcq_doorbell_arm_SHIFT 29
+#define lpfc_eqcq_doorbell_arm_MASK 0x0001
+#define lpfc_eqcq_doorbell_arm_WORD word0
+#define lpfc_eqcq_doorbell_num_released_SHIFT 16
+#define lpfc_eqcq_doorbell_num_released_MASK 0x1FFF
+#define lpfc_eqcq_doorbell_num_released_WORD word0
+#define lpfc_eqcq_doorbell_qt_SHIFT 10
+#define lpfc_eqcq_doorbell_qt_MASK 0x0001
+#define lpfc_eqcq_doorbell_qt_WORD word0
+#define LPFC_QUEUE_TYPE_COMPLETION 0
+#define LPFC_QUEUE_TYPE_EVENT 1
+#define lpfc_eqcq_doorbell_eqci_SHIFT 9
+#define lpfc_eqcq_doorbell_eqci_MASK 0x0001
+#define lpfc_eqcq_doorbell_eqci_WORD word0
+#define lpfc_eqcq_doorbell_cqid_SHIFT 0
+#define lpfc_eqcq_doorbell_cqid_MASK 0x03FF
+#define lpfc_eqcq_doorbell_cqid_WORD word0
+#define lpfc_eqcq_doorbell_eqid_SHIFT 0
+#define lpfc_eqcq_doorbell_eqid_MASK 0x01FF
+#define lpfc_eqcq_doorbell_eqid_WORD word0
+
+#define LPFC_BMBX 0x0160
+#define lpfc_bmbx_addr_SHIFT 2
+#define lpfc_bmbx_addr_MASK 0x3FFFFFFF
+#define lpfc_bmbx_addr_WORD word0
+#define lpfc_bmbx_hi_SHIFT 1
+#define lpfc_bmbx_hi_MASK 0x0001
+#define lpfc_bmbx_hi_WORD word0
+#define lpfc_bmbx_rdy_SHIFT 0
+#define lpfc_bmbx_rdy_MASK 0x0001
+#define lpfc_bmbx_rdy_WORD word0
+
+#define LPFC_MQ_DOORBELL 0x0140
+#define lpfc_mq_doorbell_num_posted_SHIFT 16
+#define lpfc_mq_doorbell_num_posted_MASK 0x3FFF
+#define lpfc_mq_doorbell_num_posted_WORD word0
+#define lpfc_mq_doorbell_id_SHIFT 0
+#define lpfc_mq_doorbell_id_MASK 0x03FF
+#define lpfc_mq_doorbell_id_WORD word0
+
+struct lpfc_sli4_cfg_mhdr {
+ uint32_t word1;
+#define lpfc_mbox_hdr_emb_SHIFT 0
+#define lpfc_mbox_hdr_emb_MASK 0x00000001
+#define lpfc_mbox_hdr_emb_WORD word1
+#define lpfc_mbox_hdr_sge_cnt_SHIFT 3
+#define lpfc_mbox_hdr_sge_cnt_MASK 0x0000001F
+#define lpfc_mbox_hdr_sge_cnt_WORD word1
+ uint32_t payload_length;
+ uint32_t tag_lo;
+ uint32_t tag_hi;
+ uint32_t reserved5;
+};
+
+union lpfc_sli4_cfg_shdr {
+ struct {
+ uint32_t word6;
+#define lpfc_mbox_hdr_opcode_SHIFT 0
+#define lpfc_mbox_hdr_opcode_MASK 0x000000FF
+#define lpfc_mbox_hdr_opcode_WORD word6
+#define lpfc_mbox_hdr_subsystem_SHIFT 8
+#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF
+#define lpfc_mbox_hdr_subsystem_WORD word6
+#define lpfc_mbox_hdr_port_number_SHIFT 16
+#define lpfc_mbox_hdr_port_number_MASK 0x000000FF
+#define lpfc_mbox_hdr_port_number_WORD word6
+#define lpfc_mbox_hdr_domain_SHIFT 24
+#define lpfc_mbox_hdr_domain_MASK 0x000000FF
+#define lpfc_mbox_hdr_domain_WORD word6
+ uint32_t timeout;
+ uint32_t request_length;
+ uint32_t reserved9;
+ } request;
+ struct {
+ uint32_t word6;
+#define lpfc_mbox_hdr_opcode_SHIFT 0
+#define lpfc_mbox_hdr_opcode_MASK 0x000000FF
+#define lpfc_mbox_hdr_opcode_WORD word6
+#define lpfc_mbox_hdr_subsystem_SHIFT 8
+#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF
+#define lpfc_mbox_hdr_subsystem_WORD word6
+#define lpfc_mbox_hdr_domain_SHIFT 24
+#define lpfc_mbox_hdr_domain_MASK 0x000000FF
+#define lpfc_mbox_hdr_domain_WORD word6
+ uint32_t word7;
+#define lpfc_mbox_hdr_status_SHIFT 0
+#define lpfc_mbox_hdr_status_MASK 0x000000FF
+#define lpfc_mbox_hdr_status_WORD word7
+#define lpfc_mbox_hdr_add_status_SHIFT 8
+#define lpfc_mbox_hdr_add_status_MASK 0x000000FF
+#define lpfc_mbox_hdr_add_status_WORD word7
+ uint32_t response_length;
+ uint32_t actual_response_length;
+ } response;
+};
+
+/* Mailbox structures */
+struct mbox_header {
+ struct lpfc_sli4_cfg_mhdr cfg_mhdr;
+ union lpfc_sli4_cfg_shdr cfg_shdr;
+};
+
+/* Subsystem Definitions */
+#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1
+#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC
+
+/* Device Specific Definitions */
+
+/* The HOST ENDIAN defines are in Big Endian format. */
+#define HOST_ENDIAN_LOW_WORD0 0xFF3412FF
+#define HOST_ENDIAN_HIGH_WORD1 0xFF7856FF
+
+/* Common Opcodes */
+#define LPFC_MBOX_OPCODE_CQ_CREATE 0x0C
+#define LPFC_MBOX_OPCODE_EQ_CREATE 0x0D
+#define LPFC_MBOX_OPCODE_MQ_CREATE 0x15
+#define LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES 0x20
+#define LPFC_MBOX_OPCODE_NOP 0x21
+#define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35
+#define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36
+#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37
+#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
+
+/* FCoE Opcodes */
+#define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01
+#define LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY 0x02
+#define LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES 0x03
+#define LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES 0x04
+#define LPFC_MBOX_OPCODE_FCOE_RQ_CREATE 0x05
+#define LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY 0x06
+#define LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE 0x08
+#define LPFC_MBOX_OPCODE_FCOE_ADD_FCF 0x09
+#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A
+#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B
+
+/* Mailbox command structures */
+struct eq_context {
+ uint32_t word0;
+#define lpfc_eq_context_size_SHIFT 31
+#define lpfc_eq_context_size_MASK 0x00000001
+#define lpfc_eq_context_size_WORD word0
+#define LPFC_EQE_SIZE_4 0x0
+#define LPFC_EQE_SIZE_16 0x1
+#define lpfc_eq_context_valid_SHIFT 29
+#define lpfc_eq_context_valid_MASK 0x00000001
+#define lpfc_eq_context_valid_WORD word0
+ uint32_t word1;
+#define lpfc_eq_context_count_SHIFT 26
+#define lpfc_eq_context_count_MASK 0x00000003
+#define lpfc_eq_context_count_WORD word1
+#define LPFC_EQ_CNT_256 0x0
+#define LPFC_EQ_CNT_512 0x1
+#define LPFC_EQ_CNT_1024 0x2
+#define LPFC_EQ_CNT_2048 0x3
+#define LPFC_EQ_CNT_4096 0x4
+ uint32_t word2;
+#define lpfc_eq_context_delay_multi_SHIFT 13
+#define lpfc_eq_context_delay_multi_MASK 0x000003FF
+#define lpfc_eq_context_delay_multi_WORD word2
+ uint32_t reserved3;
+};
+
+struct sgl_page_pairs {
+ uint32_t sgl_pg0_addr_lo;
+ uint32_t sgl_pg0_addr_hi;
+ uint32_t sgl_pg1_addr_lo;
+ uint32_t sgl_pg1_addr_hi;
+};
+
+struct lpfc_mbx_post_sgl_pages {
+ struct mbox_header header;
+ uint32_t word0;
+#define lpfc_post_sgl_pages_xri_SHIFT 0
+#define lpfc_post_sgl_pages_xri_MASK 0x0000FFFF
+#define lpfc_post_sgl_pages_xri_WORD word0
+#define lpfc_post_sgl_pages_xricnt_SHIFT 16
+#define lpfc_post_sgl_pages_xricnt_MASK 0x0000FFFF
+#define lpfc_post_sgl_pages_xricnt_WORD word0
+ struct sgl_page_pairs sgl_pg_pairs[1];
+};
+
+/* word0 of page-1 struct shares the same SHIFT/MASK/WORD defines as above */
+struct lpfc_mbx_post_uembed_sgl_page1 {
+ union lpfc_sli4_cfg_shdr cfg_shdr;
+ uint32_t word0;
+ struct sgl_page_pairs sgl_pg_pairs;
+};
+
+struct lpfc_mbx_sge {
+ uint32_t pa_lo;
+ uint32_t pa_hi;
+ uint32_t length;
+};
+
+struct lpfc_mbx_nembed_cmd {
+ struct lpfc_sli4_cfg_mhdr cfg_mhdr;
+#define LPFC_SLI4_MBX_SGE_MAX_PAGES 19
+ struct lpfc_mbx_sge sge[LPFC_SLI4_MBX_SGE_MAX_PAGES];
+};
+
+struct lpfc_mbx_nembed_sge_virt {
+ void *addr[LPFC_SLI4_MBX_SGE_MAX_PAGES];
+};
+
+struct lpfc_mbx_eq_create {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_eq_create_num_pages_SHIFT 0
+#define lpfc_mbx_eq_create_num_pages_MASK 0x0000FFFF
+#define lpfc_mbx_eq_create_num_pages_WORD word0
+ struct eq_context context;
+ struct dma_address page[LPFC_MAX_EQ_PAGE];
+ } request;
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_eq_create_q_id_SHIFT 0
+#define lpfc_mbx_eq_create_q_id_MASK 0x0000FFFF
+#define lpfc_mbx_eq_create_q_id_WORD word0
+ } response;
+ } u;
+};
+
+struct lpfc_mbx_eq_destroy {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_eq_destroy_q_id_SHIFT 0
+#define lpfc_mbx_eq_destroy_q_id_MASK 0x0000FFFF
+#define lpfc_mbx_eq_destroy_q_id_WORD word0
+ } request;
+ struct {
+ uint32_t word0;
+ } response;
+ } u;
+};
+
+struct lpfc_mbx_nop {
+ struct mbox_header header;
+ uint32_t context[2];
+};
+
+struct cq_context {
+ uint32_t word0;
+#define lpfc_cq_context_event_SHIFT 31
+#define lpfc_cq_context_event_MASK 0x00000001
+#define lpfc_cq_context_event_WORD word0
+#define lpfc_cq_context_valid_SHIFT 29
+#define lpfc_cq_context_valid_MASK 0x00000001
+#define lpfc_cq_context_valid_WORD word0
+#define lpfc_cq_context_count_SHIFT 27
+#define lpfc_cq_context_count_MASK 0x00000003
+#define lpfc_cq_context_count_WORD word0
+#define LPFC_CQ_CNT_256 0x0
+#define LPFC_CQ_CNT_512 0x1
+#define LPFC_CQ_CNT_1024 0x2
+ uint32_t word1;
+#define lpfc_cq_eq_id_SHIFT 22
+#define lpfc_cq_eq_id_MASK 0x000000FF
+#define lpfc_cq_eq_id_WORD word1
+ uint32_t reserved0;
+ uint32_t reserved1;
+};
+
+struct lpfc_mbx_cq_create {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_cq_create_num_pages_SHIFT 0
+#define lpfc_mbx_cq_create_num_pages_MASK 0x0000FFFF
+#define lpfc_mbx_cq_create_num_pages_WORD word0
+ struct cq_context context;
+ struct dma_address page[LPFC_MAX_CQ_PAGE];
+ } request;
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_cq_create_q_id_SHIFT 0
+#define lpfc_mbx_cq_create_q_id_MASK 0x0000FFFF
+#define lpfc_mbx_cq_create_q_id_WORD word0
+ } response;
+ } u;
+};
+
+struct lpfc_mbx_cq_destroy {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_cq_destroy_q_id_SHIFT 0
+#define lpfc_mbx_cq_destroy_q_id_MASK 0x0000FFFF
+#define lpfc_mbx_cq_destroy_q_id_WORD word0
+ } request;
+ struct {
+ uint32_t word0;
+ } response;
+ } u;
+};
+
+struct wq_context {
+ uint32_t reserved0;
+ uint32_t reserved1;
+ uint32_t reserved2;
+ uint32_t reserved3;
+};
+
+struct lpfc_mbx_wq_create {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_wq_create_num_pages_SHIFT 0
+#define lpfc_mbx_wq_create_num_pages_MASK 0x0000FFFF
+#define lpfc_mbx_wq_create_num_pages_WORD word0
+#define lpfc_mbx_wq_create_cq_id_SHIFT 16
+#define lpfc_mbx_wq_create_cq_id_MASK 0x0000FFFF
+#define lpfc_mbx_wq_create_cq_id_WORD word0
+ struct dma_address page[LPFC_MAX_WQ_PAGE];
+ } request;
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_wq_create_q_id_SHIFT 0
+#define lpfc_mbx_wq_create_q_id_MASK 0x0000FFFF
+#define lpfc_mbx_wq_create_q_id_WORD word0
+ } response;
+ } u;
+};
+
+struct lpfc_mbx_wq_destroy {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_wq_destroy_q_id_SHIFT 0
+#define lpfc_mbx_wq_destroy_q_id_MASK 0x0000FFFF
+#define lpfc_mbx_wq_destroy_q_id_WORD word0
+ } request;
+ struct {
+ uint32_t word0;
+ } response;
+ } u;
+};
+
+#define LPFC_HDR_BUF_SIZE 128
+#define LPFC_DATA_BUF_SIZE 4096
+struct rq_context {
+ uint32_t word0;
+#define lpfc_rq_context_rq_size_SHIFT 16
+#define lpfc_rq_context_rq_size_MASK 0x0000000F
+#define lpfc_rq_context_rq_size_WORD word0
+#define LPFC_RQ_RING_SIZE_512 9 /* 512 entries */
+#define LPFC_RQ_RING_SIZE_1024 10 /* 1024 entries */
+#define LPFC_RQ_RING_SIZE_2048 11 /* 2048 entries */
+#define LPFC_RQ_RING_SIZE_4096 12 /* 4096 entries */
+ uint32_t reserved1;
+ uint32_t word2;
+#define lpfc_rq_context_cq_id_SHIFT 16
+#define lpfc_rq_context_cq_id_MASK 0x000003FF
+#define lpfc_rq_context_cq_id_WORD word2
+#define lpfc_rq_context_buf_size_SHIFT 0
+#define lpfc_rq_context_buf_size_MASK 0x0000FFFF
+#define lpfc_rq_context_buf_size_WORD word2
+ uint32_t reserved3;
+};
+
+struct lpfc_mbx_rq_create {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_rq_create_num_pages_SHIFT 0
+#define lpfc_mbx_rq_create_num_pages_MASK 0x0000FFFF
+#define lpfc_mbx_rq_create_num_pages_WORD word0
+ struct rq_context context;
+ struct dma_address page[LPFC_MAX_WQ_PAGE];
+ } request;
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_rq_create_q_id_SHIFT 0
+#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF
+#define lpfc_mbx_rq_create_q_id_WORD word0
+ } response;
+ } u;
+};
+
+struct lpfc_mbx_rq_destroy {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_rq_destroy_q_id_SHIFT 0
+#define lpfc_mbx_rq_destroy_q_id_MASK 0x0000FFFF
+#define lpfc_mbx_rq_destroy_q_id_WORD word0
+ } request;
+ struct {
+ uint32_t word0;
+ } response;
+ } u;
+};
+
+struct mq_context {
+ uint32_t word0;
+#define lpfc_mq_context_cq_id_SHIFT 22
+#define lpfc_mq_context_cq_id_MASK 0x000003FF
+#define lpfc_mq_context_cq_id_WORD word0
+#define lpfc_mq_context_count_SHIFT 16
+#define lpfc_mq_context_count_MASK 0x0000000F
+#define lpfc_mq_context_count_WORD word0
+#define LPFC_MQ_CNT_16 0x5
+#define LPFC_MQ_CNT_32 0x6
+#define LPFC_MQ_CNT_64 0x7
+#define LPFC_MQ_CNT_128 0x8
+ uint32_t word1;
+#define lpfc_mq_context_valid_SHIFT 31
+#define lpfc_mq_context_valid_MASK 0x00000001
+#define lpfc_mq_context_valid_WORD word1
+ uint32_t reserved2;
+ uint32_t reserved3;
+};
+
+struct lpfc_mbx_mq_create {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_mq_create_num_pages_SHIFT 0
+#define lpfc_mbx_mq_create_num_pages_MASK 0x0000FFFF
+#define lpfc_mbx_mq_create_num_pages_WORD word0
+ struct mq_context context;
+ struct dma_address page[LPFC_MAX_MQ_PAGE];
+ } request;
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_mq_create_q_id_SHIFT 0
+#define lpfc_mbx_mq_create_q_id_MASK 0x0000FFFF
+#define lpfc_mbx_mq_create_q_id_WORD word0
+ } response;
+ } u;
+};
+
+struct lpfc_mbx_mq_destroy {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_mq_destroy_q_id_SHIFT 0
+#define lpfc_mbx_mq_destroy_q_id_MASK 0x0000FFFF
+#define lpfc_mbx_mq_destroy_q_id_WORD word0
+ } request;
+ struct {
+ uint32_t word0;
+ } response;
+ } u;
+};
+
+struct lpfc_mbx_post_hdr_tmpl {
+ struct mbox_header header;
+ uint32_t word10;
+#define lpfc_mbx_post_hdr_tmpl_rpi_offset_SHIFT 0
+#define lpfc_mbx_post_hdr_tmpl_rpi_offset_MASK 0x0000FFFF
+#define lpfc_mbx_post_hdr_tmpl_rpi_offset_WORD word10
+#define lpfc_mbx_post_hdr_tmpl_page_cnt_SHIFT 16
+#define lpfc_mbx_post_hdr_tmpl_page_cnt_MASK 0x0000FFFF
+#define lpfc_mbx_post_hdr_tmpl_page_cnt_WORD word10
+ uint32_t rpi_paddr_lo;
+ uint32_t rpi_paddr_hi;
+};
+
+struct sli4_sge { /* SLI-4 */
+ uint32_t addr_hi;
+ uint32_t addr_lo;
+
+ uint32_t word2;
+#define lpfc_sli4_sge_offset_SHIFT 0 /* Offset of buffer - Not used*/
+#define lpfc_sli4_sge_offset_MASK 0x00FFFFFF
+#define lpfc_sli4_sge_offset_WORD word2
+#define lpfc_sli4_sge_last_SHIFT 31 /* Last SEG in the SGL sets
+ this flag !! */
+#define lpfc_sli4_sge_last_MASK 0x00000001
+#define lpfc_sli4_sge_last_WORD word2
+ uint32_t word3;
+#define lpfc_sli4_sge_len_SHIFT 0
+#define lpfc_sli4_sge_len_MASK 0x0001FFFF
+#define lpfc_sli4_sge_len_WORD word3
+};
+
+struct fcf_record {
+ uint32_t max_rcv_size;
+ uint32_t fka_adv_period;
+ uint32_t fip_priority;
+ uint32_t word3;
+#define lpfc_fcf_record_mac_0_SHIFT 0
+#define lpfc_fcf_record_mac_0_MASK 0x000000FF
+#define lpfc_fcf_record_mac_0_WORD word3
+#define lpfc_fcf_record_mac_1_SHIFT 8
+#define lpfc_fcf_record_mac_1_MASK 0x000000FF
+#define lpfc_fcf_record_mac_1_WORD word3
+#define lpfc_fcf_record_mac_2_SHIFT 16
+#define lpfc_fcf_record_mac_2_MASK 0x000000FF
+#define lpfc_fcf_record_mac_2_WORD word3
+#define lpfc_fcf_record_mac_3_SHIFT 24
+#define lpfc_fcf_record_mac_3_MASK 0x000000FF
+#define lpfc_fcf_record_mac_3_WORD word3
+ uint32_t word4;
+#define lpfc_fcf_record_mac_4_SHIFT 0
+#define lpfc_fcf_record_mac_4_MASK 0x000000FF
+#define lpfc_fcf_record_mac_4_WORD word4
+#define lpfc_fcf_record_mac_5_SHIFT 8
+#define lpfc_fcf_record_mac_5_MASK 0x000000FF
+#define lpfc_fcf_record_mac_5_WORD word4
+#define lpfc_fcf_record_fcf_avail_SHIFT 16
+#define lpfc_fcf_record_fcf_avail_MASK 0x000000FF
+#define lpfc_fcf_record_fc_avail_WORD word4
+#define lpfc_fcf_record_mac_addr_prov_SHIFT 24
+#define lpfc_fcf_record_mac_addr_prov_MASK 0x000000FF
+#define lpfc_fcf_record_mac_addr_prov_WORD word4
+#define LPFC_FCF_FPMA 1 /* Fabric Provided MAC Address */
+#define LPFC_FCF_SPMA 2 /* Server Provided MAC Address */
+ uint32_t word5;
+#define lpfc_fcf_record_fab_name_0_SHIFT 0
+#define lpfc_fcf_record_fab_name_0_MASK 0x000000FF
+#define lpfc_fcf_record_fab_name_0_WORD word5
+#define lpfc_fcf_record_fab_name_1_SHIFT 8
+#define lpfc_fcf_record_fab_name_1_MASK 0x000000FF
+#define lpfc_fcf_record_fab_name_1_WORD word5
+#define lpfc_fcf_record_fab_name_2_SHIFT 16
+#define lpfc_fcf_record_fab_name_2_MASK 0x000000FF
+#define lpfc_fcf_record_fab_name_2_WORD word5
+#define lpfc_fcf_record_fab_name_3_SHIFT 24
+#define lpfc_fcf_record_fab_name_3_MASK 0x000000FF
+#define lpfc_fcf_record_fab_name_3_WORD word5
+ uint32_t word6;
+#define lpfc_fcf_record_fab_name_4_SHIFT 0
+#define lpfc_fcf_record_fab_name_4_MASK 0x000000FF
+#define lpfc_fcf_record_fab_name_4_WORD word6
+#define lpfc_fcf_record_fab_name_5_SHIFT 8
+#define lpfc_fcf_record_fab_name_5_MASK 0x000000FF
+#define lpfc_fcf_record_fab_name_5_WORD word6
+#define lpfc_fcf_record_fab_name_6_SHIFT 16
+#define lpfc_fcf_record_fab_name_6_MASK 0x000000FF
+#define lpfc_fcf_record_fab_name_6_WORD word6
+#define lpfc_fcf_record_fab_name_7_SHIFT 24
+#define lpfc_fcf_record_fab_name_7_MASK 0x000000FF
+#define lpfc_fcf_record_fab_name_7_WORD word6
+ uint32_t word7;
+#define lpfc_fcf_record_fc_map_0_SHIFT 0
+#define lpfc_fcf_record_fc_map_0_MASK 0x000000FF
+#define lpfc_fcf_record_fc_map_0_WORD word7
+#define lpfc_fcf_record_fc_map_1_SHIFT 8
+#define lpfc_fcf_record_fc_map_1_MASK 0x000000FF
+#define lpfc_fcf_record_fc_map_1_WORD word7
+#define lpfc_fcf_record_fc_map_2_SHIFT 16
+#define lpfc_fcf_record_fc_map_2_MASK 0x000000FF
+#define lpfc_fcf_record_fc_map_2_WORD word7
+#define lpfc_fcf_record_fcf_valid_SHIFT 24
+#define lpfc_fcf_record_fcf_valid_MASK 0x000000FF
+#define lpfc_fcf_record_fcf_valid_WORD word7
+ uint32_t word8;
+#define lpfc_fcf_record_fcf_index_SHIFT 0
+#define lpfc_fcf_record_fcf_index_MASK 0x0000FFFF
+#define lpfc_fcf_record_fcf_index_WORD word8
+#define lpfc_fcf_record_fcf_state_SHIFT 16
+#define lpfc_fcf_record_fcf_state_MASK 0x0000FFFF
+#define lpfc_fcf_record_fcf_state_WORD word8
+ uint8_t vlan_bitmap[512];
+};
+
+struct lpfc_mbx_read_fcf_tbl {
+ union lpfc_sli4_cfg_shdr cfg_shdr;
+ union {
+ struct {
+ uint32_t word10;
+#define lpfc_mbx_read_fcf_tbl_indx_SHIFT 0
+#define lpfc_mbx_read_fcf_tbl_indx_MASK 0x0000FFFF
+#define lpfc_mbx_read_fcf_tbl_indx_WORD word10
+ } request;
+ struct {
+ uint32_t eventag;
+ } response;
+ } u;
+ uint32_t word11;
+#define lpfc_mbx_read_fcf_tbl_nxt_vindx_SHIFT 0
+#define lpfc_mbx_read_fcf_tbl_nxt_vindx_MASK 0x0000FFFF
+#define lpfc_mbx_read_fcf_tbl_nxt_vindx_WORD word11
+};
+
+struct lpfc_mbx_add_fcf_tbl_entry {
+ union lpfc_sli4_cfg_shdr cfg_shdr;
+ uint32_t word10;
+#define lpfc_mbx_add_fcf_tbl_fcfi_SHIFT 0
+#define lpfc_mbx_add_fcf_tbl_fcfi_MASK 0x0000FFFF
+#define lpfc_mbx_add_fcf_tbl_fcfi_WORD word10
+ struct lpfc_mbx_sge fcf_sge;
+};
+
+struct lpfc_mbx_del_fcf_tbl_entry {
+ struct mbox_header header;
+ uint32_t word10;
+#define lpfc_mbx_del_fcf_tbl_count_SHIFT 0
+#define lpfc_mbx_del_fcf_tbl_count_MASK 0x0000FFFF
+#define lpfc_mbx_del_fcf_tbl_count_WORD word10
+#define lpfc_mbx_del_fcf_tbl_index_SHIFT 16
+#define lpfc_mbx_del_fcf_tbl_index_MASK 0x0000FFFF
+#define lpfc_mbx_del_fcf_tbl_index_WORD word10
+};
+
+/* Status field for embedded SLI_CONFIG mailbox command */
+#define STATUS_SUCCESS 0x0
+#define STATUS_FAILED 0x1
+#define STATUS_ILLEGAL_REQUEST 0x2
+#define STATUS_ILLEGAL_FIELD 0x3
+#define STATUS_INSUFFICIENT_BUFFER 0x4
+#define STATUS_UNAUTHORIZED_REQUEST 0x5
+#define STATUS_FLASHROM_SAVE_FAILED 0x17
+#define STATUS_FLASHROM_RESTORE_FAILED 0x18
+#define STATUS_ICCBINDEX_ALLOC_FAILED 0x1a
+#define STATUS_IOCTLHANDLE_ALLOC_FAILED 0x1b
+#define STATUS_INVALID_PHY_ADDR_FROM_OSM 0x1c
+#define STATUS_INVALID_PHY_ADDR_LEN_FROM_OSM 0x1d
+#define STATUS_ASSERT_FAILED 0x1e
+#define STATUS_INVALID_SESSION 0x1f
+#define STATUS_INVALID_CONNECTION 0x20
+#define STATUS_BTL_PATH_EXCEEDS_OSM_LIMIT 0x21
+#define STATUS_BTL_NO_FREE_SLOT_PATH 0x24
+#define STATUS_BTL_NO_FREE_SLOT_TGTID 0x25
+#define STATUS_OSM_DEVSLOT_NOT_FOUND 0x26
+#define STATUS_FLASHROM_READ_FAILED 0x27
+#define STATUS_POLL_IOCTL_TIMEOUT 0x28
+#define STATUS_ERROR_ACITMAIN 0x2a
+#define STATUS_REBOOT_REQUIRED 0x2c
+#define STATUS_FCF_IN_USE 0x3a
+
+struct lpfc_mbx_sli4_config {
+ struct mbox_header header;
+};
+
+struct lpfc_mbx_init_vfi {
+ uint32_t word1;
+#define lpfc_init_vfi_vr_SHIFT 31
+#define lpfc_init_vfi_vr_MASK 0x00000001
+#define lpfc_init_vfi_vr_WORD word1
+#define lpfc_init_vfi_vt_SHIFT 30
+#define lpfc_init_vfi_vt_MASK 0x00000001
+#define lpfc_init_vfi_vt_WORD word1
+#define lpfc_init_vfi_vf_SHIFT 29
+#define lpfc_init_vfi_vf_MASK 0x00000001
+#define lpfc_init_vfi_vf_WORD word1
+#define lpfc_init_vfi_vfi_SHIFT 0
+#define lpfc_init_vfi_vfi_MASK 0x0000FFFF
+#define lpfc_init_vfi_vfi_WORD word1
+ uint32_t word2;
+#define lpfc_init_vfi_fcfi_SHIFT 0
+#define lpfc_init_vfi_fcfi_MASK 0x0000FFFF
+#define lpfc_init_vfi_fcfi_WORD word2
+ uint32_t word3;
+#define lpfc_init_vfi_pri_SHIFT 13
+#define lpfc_init_vfi_pri_MASK 0x00000007
+#define lpfc_init_vfi_pri_WORD word3
+#define lpfc_init_vfi_vf_id_SHIFT 1
+#define lpfc_init_vfi_vf_id_MASK 0x00000FFF
+#define lpfc_init_vfi_vf_id_WORD word3
+ uint32_t word4;
+#define lpfc_init_vfi_hop_count_SHIFT 24
+#define lpfc_init_vfi_hop_count_MASK 0x000000FF
+#define lpfc_init_vfi_hop_count_WORD word4
+};
+
+struct lpfc_mbx_reg_vfi {
+ uint32_t word1;
+#define lpfc_reg_vfi_vp_SHIFT 28
+#define lpfc_reg_vfi_vp_MASK 0x00000001
+#define lpfc_reg_vfi_vp_WORD word1
+#define lpfc_reg_vfi_vfi_SHIFT 0
+#define lpfc_reg_vfi_vfi_MASK 0x0000FFFF
+#define lpfc_reg_vfi_vfi_WORD word1
+ uint32_t word2;
+#define lpfc_reg_vfi_vpi_SHIFT 16
+#define lpfc_reg_vfi_vpi_MASK 0x0000FFFF
+#define lpfc_reg_vfi_vpi_WORD word2
+#define lpfc_reg_vfi_fcfi_SHIFT 0
+#define lpfc_reg_vfi_fcfi_MASK 0x0000FFFF
+#define lpfc_reg_vfi_fcfi_WORD word2
+ uint32_t word3_rsvd;
+ uint32_t word4_rsvd;
+ struct ulp_bde64 bde;
+ uint32_t word8_rsvd;
+ uint32_t word9_rsvd;
+ uint32_t word10;
+#define lpfc_reg_vfi_nport_id_SHIFT 0
+#define lpfc_reg_vfi_nport_id_MASK 0x00FFFFFF
+#define lpfc_reg_vfi_nport_id_WORD word10
+};
+
+struct lpfc_mbx_init_vpi {
+ uint32_t word1;
+#define lpfc_init_vpi_vfi_SHIFT 16
+#define lpfc_init_vpi_vfi_MASK 0x0000FFFF
+#define lpfc_init_vpi_vfi_WORD word1
+#define lpfc_init_vpi_vpi_SHIFT 0
+#define lpfc_init_vpi_vpi_MASK 0x0000FFFF
+#define lpfc_init_vpi_vpi_WORD word1
+};
+
+struct lpfc_mbx_read_vpi {
+ uint32_t word1_rsvd;
+ uint32_t word2;
+#define lpfc_mbx_read_vpi_vnportid_SHIFT 0
+#define lpfc_mbx_read_vpi_vnportid_MASK 0x00FFFFFF
+#define lpfc_mbx_read_vpi_vnportid_WORD word2
+ uint32_t word3_rsvd;
+ uint32_t word4;
+#define lpfc_mbx_read_vpi_acq_alpa_SHIFT 0
+#define lpfc_mbx_read_vpi_acq_alpa_MASK 0x000000FF
+#define lpfc_mbx_read_vpi_acq_alpa_WORD word4
+#define lpfc_mbx_read_vpi_pb_SHIFT 15
+#define lpfc_mbx_read_vpi_pb_MASK 0x00000001
+#define lpfc_mbx_read_vpi_pb_WORD word4
+#define lpfc_mbx_read_vpi_spec_alpa_SHIFT 16
+#define lpfc_mbx_read_vpi_spec_alpa_MASK 0x000000FF
+#define lpfc_mbx_read_vpi_spec_alpa_WORD word4
+#define lpfc_mbx_read_vpi_ns_SHIFT 30
+#define lpfc_mbx_read_vpi_ns_MASK 0x00000001
+#define lpfc_mbx_read_vpi_ns_WORD word4
+#define lpfc_mbx_read_vpi_hl_SHIFT 31
+#define lpfc_mbx_read_vpi_hl_MASK 0x00000001
+#define lpfc_mbx_read_vpi_hl_WORD word4
+ uint32_t word5_rsvd;
+ uint32_t word6;
+#define lpfc_mbx_read_vpi_vpi_SHIFT 0
+#define lpfc_mbx_read_vpi_vpi_MASK 0x0000FFFF
+#define lpfc_mbx_read_vpi_vpi_WORD word6
+ uint32_t word7;
+#define lpfc_mbx_read_vpi_mac_0_SHIFT 0
+#define lpfc_mbx_read_vpi_mac_0_MASK 0x000000FF
+#define lpfc_mbx_read_vpi_mac_0_WORD word7
+#define lpfc_mbx_read_vpi_mac_1_SHIFT 8
+#define lpfc_mbx_read_vpi_mac_1_MASK 0x000000FF
+#define lpfc_mbx_read_vpi_mac_1_WORD word7
+#define lpfc_mbx_read_vpi_mac_2_SHIFT 16
+#define lpfc_mbx_read_vpi_mac_2_MASK 0x000000FF
+#define lpfc_mbx_read_vpi_mac_2_WORD word7
+#define lpfc_mbx_read_vpi_mac_3_SHIFT 24
+#define lpfc_mbx_read_vpi_mac_3_MASK 0x000000FF
+#define lpfc_mbx_read_vpi_mac_3_WORD word7
+ uint32_t word8;
+#define lpfc_mbx_read_vpi_mac_4_SHIFT 0
+#define lpfc_mbx_read_vpi_mac_4_MASK 0x000000FF
+#define lpfc_mbx_read_vpi_mac_4_WORD word8
+#define lpfc_mbx_read_vpi_mac_5_SHIFT 8
+#define lpfc_mbx_read_vpi_mac_5_MASK 0x000000FF
+#define lpfc_mbx_read_vpi_mac_5_WORD word8
+#define lpfc_mbx_read_vpi_vlan_tag_SHIFT 16
+#define lpfc_mbx_read_vpi_vlan_tag_MASK 0x00000FFF
+#define lpfc_mbx_read_vpi_vlan_tag_WORD word8
+#define lpfc_mbx_read_vpi_vv_SHIFT 28
+#define lpfc_mbx_read_vpi_vv_MASK 0x0000001
+#define lpfc_mbx_read_vpi_vv_WORD word8
+};
+
+struct lpfc_mbx_unreg_vfi {
+ uint32_t word1_rsvd;
+ uint32_t word2;
+#define lpfc_unreg_vfi_vfi_SHIFT 0
+#define lpfc_unreg_vfi_vfi_MASK 0x0000FFFF
+#define lpfc_unreg_vfi_vfi_WORD word2
+};
+
+struct lpfc_mbx_resume_rpi {
+ uint32_t word1;
+#define lpfc_resume_rpi_rpi_SHIFT 0
+#define lpfc_resume_rpi_rpi_MASK 0x0000FFFF
+#define lpfc_resume_rpi_rpi_WORD word1
+ uint32_t event_tag;
+ uint32_t word3_rsvd;
+ uint32_t word4_rsvd;
+ uint32_t word5_rsvd;
+ uint32_t word6;
+#define lpfc_resume_rpi_vpi_SHIFT 0
+#define lpfc_resume_rpi_vpi_MASK 0x0000FFFF
+#define lpfc_resume_rpi_vpi_WORD word6
+#define lpfc_resume_rpi_vfi_SHIFT 16
+#define lpfc_resume_rpi_vfi_MASK 0x0000FFFF
+#define lpfc_resume_rpi_vfi_WORD word6
+};
+
+#define REG_FCF_INVALID_QID 0xFFFF
+struct lpfc_mbx_reg_fcfi {
+ uint32_t word1;
+#define lpfc_reg_fcfi_info_index_SHIFT 0
+#define lpfc_reg_fcfi_info_index_MASK 0x0000FFFF
+#define lpfc_reg_fcfi_info_index_WORD word1
+#define lpfc_reg_fcfi_fcfi_SHIFT 16
+#define lpfc_reg_fcfi_fcfi_MASK 0x0000FFFF
+#define lpfc_reg_fcfi_fcfi_WORD word1
+ uint32_t word2;
+#define lpfc_reg_fcfi_rq_id1_SHIFT 0
+#define lpfc_reg_fcfi_rq_id1_MASK 0x0000FFFF
+#define lpfc_reg_fcfi_rq_id1_WORD word2
+#define lpfc_reg_fcfi_rq_id0_SHIFT 16
+#define lpfc_reg_fcfi_rq_id0_MASK 0x0000FFFF
+#define lpfc_reg_fcfi_rq_id0_WORD word2
+ uint32_t word3;
+#define lpfc_reg_fcfi_rq_id3_SHIFT 0
+#define lpfc_reg_fcfi_rq_id3_MASK 0x0000FFFF
+#define lpfc_reg_fcfi_rq_id3_WORD word3
+#define lpfc_reg_fcfi_rq_id2_SHIFT 16
+#define lpfc_reg_fcfi_rq_id2_MASK 0x0000FFFF
+#define lpfc_reg_fcfi_rq_id2_WORD word3
+ uint32_t word4;
+#define lpfc_reg_fcfi_type_match0_SHIFT 24
+#define lpfc_reg_fcfi_type_match0_MASK 0x000000FF
+#define lpfc_reg_fcfi_type_match0_WORD word4
+#define lpfc_reg_fcfi_type_mask0_SHIFT 16
+#define lpfc_reg_fcfi_type_mask0_MASK 0x000000FF
+#define lpfc_reg_fcfi_type_mask0_WORD word4
+#define lpfc_reg_fcfi_rctl_match0_SHIFT 8
+#define lpfc_reg_fcfi_rctl_match0_MASK 0x000000FF
+#define lpfc_reg_fcfi_rctl_match0_WORD word4
+#define lpfc_reg_fcfi_rctl_mask0_SHIFT 0
+#define lpfc_reg_fcfi_rctl_mask0_MASK 0x000000FF
+#define lpfc_reg_fcfi_rctl_mask0_WORD word4
+ uint32_t word5;
+#define lpfc_reg_fcfi_type_match1_SHIFT 24
+#define lpfc_reg_fcfi_type_match1_MASK 0x000000FF
+#define lpfc_reg_fcfi_type_match1_WORD word5
+#define lpfc_reg_fcfi_type_mask1_SHIFT 16
+#define lpfc_reg_fcfi_type_mask1_MASK 0x000000FF
+#define lpfc_reg_fcfi_type_mask1_WORD word5
+#define lpfc_reg_fcfi_rctl_match1_SHIFT 8
+#define lpfc_reg_fcfi_rctl_match1_MASK 0x000000FF
+#define lpfc_reg_fcfi_rctl_match1_WORD word5
+#define lpfc_reg_fcfi_rctl_mask1_SHIFT 0
+#define lpfc_reg_fcfi_rctl_mask1_MASK 0x000000FF
+#define lpfc_reg_fcfi_rctl_mask1_WORD word5
+ uint32_t word6;
+#define lpfc_reg_fcfi_type_match2_SHIFT 24
+#define lpfc_reg_fcfi_type_match2_MASK 0x000000FF
+#define lpfc_reg_fcfi_type_match2_WORD word6
+#define lpfc_reg_fcfi_type_mask2_SHIFT 16
+#define lpfc_reg_fcfi_type_mask2_MASK 0x000000FF
+#define lpfc_reg_fcfi_type_mask2_WORD word6
+#define lpfc_reg_fcfi_rctl_match2_SHIFT 8
+#define lpfc_reg_fcfi_rctl_match2_MASK 0x000000FF
+#define lpfc_reg_fcfi_rctl_match2_WORD word6
+#define lpfc_reg_fcfi_rctl_mask2_SHIFT 0
+#define lpfc_reg_fcfi_rctl_mask2_MASK 0x000000FF
+#define lpfc_reg_fcfi_rctl_mask2_WORD word6
+ uint32_t word7;
+#define lpfc_reg_fcfi_type_match3_SHIFT 24
+#define lpfc_reg_fcfi_type_match3_MASK 0x000000FF
+#define lpfc_reg_fcfi_type_match3_WORD word7
+#define lpfc_reg_fcfi_type_mask3_SHIFT 16
+#define lpfc_reg_fcfi_type_mask3_MASK 0x000000FF
+#define lpfc_reg_fcfi_type_mask3_WORD word7
+#define lpfc_reg_fcfi_rctl_match3_SHIFT 8
+#define lpfc_reg_fcfi_rctl_match3_MASK 0x000000FF
+#define lpfc_reg_fcfi_rctl_match3_WORD word7
+#define lpfc_reg_fcfi_rctl_mask3_SHIFT 0
+#define lpfc_reg_fcfi_rctl_mask3_MASK 0x000000FF
+#define lpfc_reg_fcfi_rctl_mask3_WORD word7
+ uint32_t word8;
+#define lpfc_reg_fcfi_mam_SHIFT 13
+#define lpfc_reg_fcfi_mam_MASK 0x00000003
+#define lpfc_reg_fcfi_mam_WORD word8
+#define LPFC_MAM_BOTH 0 /* Both SPMA and FPMA */
+#define LPFC_MAM_SPMA 1 /* Server Provided MAC Address */
+#define LPFC_MAM_FPMA 2 /* Fabric Provided MAC Address */
+#define lpfc_reg_fcfi_vv_SHIFT 12
+#define lpfc_reg_fcfi_vv_MASK 0x00000001
+#define lpfc_reg_fcfi_vv_WORD word8
+#define lpfc_reg_fcfi_vlan_tag_SHIFT 0
+#define lpfc_reg_fcfi_vlan_tag_MASK 0x00000FFF
+#define lpfc_reg_fcfi_vlan_tag_WORD word8
+};
+
+struct lpfc_mbx_unreg_fcfi {
+ uint32_t word1_rsv;
+ uint32_t word2;
+#define lpfc_unreg_fcfi_SHIFT 0
+#define lpfc_unreg_fcfi_MASK 0x0000FFFF
+#define lpfc_unreg_fcfi_WORD word2
+};
+
+struct lpfc_mbx_read_rev {
+ uint32_t word1;
+#define lpfc_mbx_rd_rev_sli_lvl_SHIFT 16
+#define lpfc_mbx_rd_rev_sli_lvl_MASK 0x0000000F
+#define lpfc_mbx_rd_rev_sli_lvl_WORD word1
+#define lpfc_mbx_rd_rev_fcoe_SHIFT 20
+#define lpfc_mbx_rd_rev_fcoe_MASK 0x00000001
+#define lpfc_mbx_rd_rev_fcoe_WORD word1
+#define lpfc_mbx_rd_rev_vpd_SHIFT 29
+#define lpfc_mbx_rd_rev_vpd_MASK 0x00000001
+#define lpfc_mbx_rd_rev_vpd_WORD word1
+ uint32_t first_hw_rev;
+ uint32_t second_hw_rev;
+ uint32_t word4_rsvd;
+ uint32_t third_hw_rev;
+ uint32_t word6;
+#define lpfc_mbx_rd_rev_fcph_low_SHIFT 0
+#define lpfc_mbx_rd_rev_fcph_low_MASK 0x000000FF
+#define lpfc_mbx_rd_rev_fcph_low_WORD word6
+#define lpfc_mbx_rd_rev_fcph_high_SHIFT 8
+#define lpfc_mbx_rd_rev_fcph_high_MASK 0x000000FF
+#define lpfc_mbx_rd_rev_fcph_high_WORD word6
+#define lpfc_mbx_rd_rev_ftr_lvl_low_SHIFT 16
+#define lpfc_mbx_rd_rev_ftr_lvl_low_MASK 0x000000FF
+#define lpfc_mbx_rd_rev_ftr_lvl_low_WORD word6
+#define lpfc_mbx_rd_rev_ftr_lvl_high_SHIFT 24
+#define lpfc_mbx_rd_rev_ftr_lvl_high_MASK 0x000000FF
+#define lpfc_mbx_rd_rev_ftr_lvl_high_WORD word6
+ uint32_t word7_rsvd;
+ uint32_t fw_id_rev;
+ uint8_t fw_name[16];
+ uint32_t ulp_fw_id_rev;
+ uint8_t ulp_fw_name[16];
+ uint32_t word18_47_rsvd[30];
+ uint32_t word48;
+#define lpfc_mbx_rd_rev_avail_len_SHIFT 0
+#define lpfc_mbx_rd_rev_avail_len_MASK 0x00FFFFFF
+#define lpfc_mbx_rd_rev_avail_len_WORD word48
+ uint32_t vpd_paddr_low;
+ uint32_t vpd_paddr_high;
+ uint32_t avail_vpd_len;
+ uint32_t rsvd_52_63[12];
+};
+
+struct lpfc_mbx_read_config {
+ uint32_t word1;
+#define lpfc_mbx_rd_conf_max_bbc_SHIFT 0
+#define lpfc_mbx_rd_conf_max_bbc_MASK 0x000000FF
+#define lpfc_mbx_rd_conf_max_bbc_WORD word1
+#define lpfc_mbx_rd_conf_init_bbc_SHIFT 8
+#define lpfc_mbx_rd_conf_init_bbc_MASK 0x000000FF
+#define lpfc_mbx_rd_conf_init_bbc_WORD word1
+ uint32_t word2;
+#define lpfc_mbx_rd_conf_nport_did_SHIFT 0
+#define lpfc_mbx_rd_conf_nport_did_MASK 0x00FFFFFF
+#define lpfc_mbx_rd_conf_nport_did_WORD word2
+#define lpfc_mbx_rd_conf_topology_SHIFT 24
+#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF
+#define lpfc_mbx_rd_conf_topology_WORD word2
+ uint32_t word3;
+#define lpfc_mbx_rd_conf_ao_SHIFT 0
+#define lpfc_mbx_rd_conf_ao_MASK 0x00000001
+#define lpfc_mbx_rd_conf_ao_WORD word3
+#define lpfc_mbx_rd_conf_bb_scn_SHIFT 8
+#define lpfc_mbx_rd_conf_bb_scn_MASK 0x0000000F
+#define lpfc_mbx_rd_conf_bb_scn_WORD word3
+#define lpfc_mbx_rd_conf_cbb_scn_SHIFT 12
+#define lpfc_mbx_rd_conf_cbb_scn_MASK 0x0000000F
+#define lpfc_mbx_rd_conf_cbb_scn_WORD word3
+#define lpfc_mbx_rd_conf_mc_SHIFT 29
+#define lpfc_mbx_rd_conf_mc_MASK 0x00000001
+#define lpfc_mbx_rd_conf_mc_WORD word3
+ uint32_t word4;
+#define lpfc_mbx_rd_conf_e_d_tov_SHIFT 0
+#define lpfc_mbx_rd_conf_e_d_tov_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_e_d_tov_WORD word4
+ uint32_t word5;
+#define lpfc_mbx_rd_conf_lp_tov_SHIFT 0
+#define lpfc_mbx_rd_conf_lp_tov_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_lp_tov_WORD word5
+ uint32_t word6;
+#define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0
+#define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_r_a_tov_WORD word6
+ uint32_t word7;
+#define lpfc_mbx_rd_conf_r_t_tov_SHIFT 0
+#define lpfc_mbx_rd_conf_r_t_tov_MASK 0x000000FF
+#define lpfc_mbx_rd_conf_r_t_tov_WORD word7
+ uint32_t word8;
+#define lpfc_mbx_rd_conf_al_tov_SHIFT 0
+#define lpfc_mbx_rd_conf_al_tov_MASK 0x0000000F
+#define lpfc_mbx_rd_conf_al_tov_WORD word8
+ uint32_t word9;
+#define lpfc_mbx_rd_conf_lmt_SHIFT 0
+#define lpfc_mbx_rd_conf_lmt_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_lmt_WORD word9
+ uint32_t word10;
+#define lpfc_mbx_rd_conf_max_alpa_SHIFT 0
+#define lpfc_mbx_rd_conf_max_alpa_MASK 0x000000FF
+#define lpfc_mbx_rd_conf_max_alpa_WORD word10
+ uint32_t word11_rsvd;
+ uint32_t word12;
+#define lpfc_mbx_rd_conf_xri_base_SHIFT 0
+#define lpfc_mbx_rd_conf_xri_base_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_xri_base_WORD word12
+#define lpfc_mbx_rd_conf_xri_count_SHIFT 16
+#define lpfc_mbx_rd_conf_xri_count_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_xri_count_WORD word12
+ uint32_t word13;
+#define lpfc_mbx_rd_conf_rpi_base_SHIFT 0
+#define lpfc_mbx_rd_conf_rpi_base_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_rpi_base_WORD word13
+#define lpfc_mbx_rd_conf_rpi_count_SHIFT 16
+#define lpfc_mbx_rd_conf_rpi_count_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_rpi_count_WORD word13
+ uint32_t word14;
+#define lpfc_mbx_rd_conf_vpi_base_SHIFT 0
+#define lpfc_mbx_rd_conf_vpi_base_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_vpi_base_WORD word14
+#define lpfc_mbx_rd_conf_vpi_count_SHIFT 16
+#define lpfc_mbx_rd_conf_vpi_count_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_vpi_count_WORD word14
+ uint32_t word15;
+#define lpfc_mbx_rd_conf_vfi_base_SHIFT 0
+#define lpfc_mbx_rd_conf_vfi_base_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_vfi_base_WORD word15
+#define lpfc_mbx_rd_conf_vfi_count_SHIFT 16
+#define lpfc_mbx_rd_conf_vfi_count_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_vfi_count_WORD word15
+ uint32_t word16;
+#define lpfc_mbx_rd_conf_fcfi_base_SHIFT 0
+#define lpfc_mbx_rd_conf_fcfi_base_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_fcfi_base_WORD word16
+#define lpfc_mbx_rd_conf_fcfi_count_SHIFT 16
+#define lpfc_mbx_rd_conf_fcfi_count_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_fcfi_count_WORD word16
+ uint32_t word17;
+#define lpfc_mbx_rd_conf_rq_count_SHIFT 0
+#define lpfc_mbx_rd_conf_rq_count_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_rq_count_WORD word17
+#define lpfc_mbx_rd_conf_eq_count_SHIFT 16
+#define lpfc_mbx_rd_conf_eq_count_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_eq_count_WORD word17
+ uint32_t word18;
+#define lpfc_mbx_rd_conf_wq_count_SHIFT 0
+#define lpfc_mbx_rd_conf_wq_count_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_wq_count_WORD word18
+#define lpfc_mbx_rd_conf_cq_count_SHIFT 16
+#define lpfc_mbx_rd_conf_cq_count_MASK 0x0000FFFF
+#define lpfc_mbx_rd_conf_cq_count_WORD word18
+};
+
+struct lpfc_mbx_request_features {
+ uint32_t word1;
+#define lpfc_mbx_rq_ftr_qry_SHIFT 0
+#define lpfc_mbx_rq_ftr_qry_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_qry_WORD word1
+ uint32_t word2;
+#define lpfc_mbx_rq_ftr_rq_iaab_SHIFT 0
+#define lpfc_mbx_rq_ftr_rq_iaab_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rq_iaab_WORD word2
+#define lpfc_mbx_rq_ftr_rq_npiv_SHIFT 1
+#define lpfc_mbx_rq_ftr_rq_npiv_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rq_npiv_WORD word2
+#define lpfc_mbx_rq_ftr_rq_dif_SHIFT 2
+#define lpfc_mbx_rq_ftr_rq_dif_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rq_dif_WORD word2
+#define lpfc_mbx_rq_ftr_rq_vf_SHIFT 3
+#define lpfc_mbx_rq_ftr_rq_vf_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rq_vf_WORD word2
+#define lpfc_mbx_rq_ftr_rq_fcpi_SHIFT 4
+#define lpfc_mbx_rq_ftr_rq_fcpi_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rq_fcpi_WORD word2
+#define lpfc_mbx_rq_ftr_rq_fcpt_SHIFT 5
+#define lpfc_mbx_rq_ftr_rq_fcpt_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rq_fcpt_WORD word2
+#define lpfc_mbx_rq_ftr_rq_fcpc_SHIFT 6
+#define lpfc_mbx_rq_ftr_rq_fcpc_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rq_fcpc_WORD word2
+#define lpfc_mbx_rq_ftr_rq_ifip_SHIFT 7
+#define lpfc_mbx_rq_ftr_rq_ifip_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rq_ifip_WORD word2
+ uint32_t word3;
+#define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT 0
+#define lpfc_mbx_rq_ftr_rsp_iaab_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rsp_iaab_WORD word3
+#define lpfc_mbx_rq_ftr_rsp_npiv_SHIFT 1
+#define lpfc_mbx_rq_ftr_rsp_npiv_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rsp_npiv_WORD word3
+#define lpfc_mbx_rq_ftr_rsp_dif_SHIFT 2
+#define lpfc_mbx_rq_ftr_rsp_dif_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rsp_dif_WORD word3
+#define lpfc_mbx_rq_ftr_rsp_vf_SHIFT 3
+#define lpfc_mbx_rq_ftr_rsp_vf__MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rsp_vf_WORD word3
+#define lpfc_mbx_rq_ftr_rsp_fcpi_SHIFT 4
+#define lpfc_mbx_rq_ftr_rsp_fcpi_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rsp_fcpi_WORD word3
+#define lpfc_mbx_rq_ftr_rsp_fcpt_SHIFT 5
+#define lpfc_mbx_rq_ftr_rsp_fcpt_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rsp_fcpt_WORD word3
+#define lpfc_mbx_rq_ftr_rsp_fcpc_SHIFT 6
+#define lpfc_mbx_rq_ftr_rsp_fcpc_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rsp_fcpc_WORD word3
+#define lpfc_mbx_rq_ftr_rsp_ifip_SHIFT 7
+#define lpfc_mbx_rq_ftr_rsp_ifip_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3
+};
+
+/* Mailbox Completion Queue Error Messages */
+#define MB_CQE_STATUS_SUCCESS 0x0
+#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1
+#define MB_CQE_STATUS_INVALID_PARAMETER 0x2
+#define MB_CQE_STATUS_INSUFFICIENT_RESOURCES 0x3
+#define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4
+#define MB_CQE_STATUS_DMA_FAILED 0x5
+
+/* mailbox queue entry structure */
+struct lpfc_mqe {
+ uint32_t word0;
+#define lpfc_mqe_status_SHIFT 16
+#define lpfc_mqe_status_MASK 0x0000FFFF
+#define lpfc_mqe_status_WORD word0
+#define lpfc_mqe_command_SHIFT 8
+#define lpfc_mqe_command_MASK 0x000000FF
+#define lpfc_mqe_command_WORD word0
+ union {
+ uint32_t mb_words[LPFC_SLI4_MB_WORD_COUNT - 1];
+ /* sli4 mailbox commands */
+ struct lpfc_mbx_sli4_config sli4_config;
+ struct lpfc_mbx_init_vfi init_vfi;
+ struct lpfc_mbx_reg_vfi reg_vfi;
+ struct lpfc_mbx_reg_vfi unreg_vfi;
+ struct lpfc_mbx_init_vpi init_vpi;
+ struct lpfc_mbx_resume_rpi resume_rpi;
+ struct lpfc_mbx_read_fcf_tbl read_fcf_tbl;
+ struct lpfc_mbx_add_fcf_tbl_entry add_fcf_entry;
+ struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry;
+ struct lpfc_mbx_reg_fcfi reg_fcfi;
+ struct lpfc_mbx_unreg_fcfi unreg_fcfi;
+ struct lpfc_mbx_mq_create mq_create;
+ struct lpfc_mbx_eq_create eq_create;
+ struct lpfc_mbx_cq_create cq_create;
+ struct lpfc_mbx_wq_create wq_create;
+ struct lpfc_mbx_rq_create rq_create;
+ struct lpfc_mbx_mq_destroy mq_destroy;
+ struct lpfc_mbx_eq_destroy eq_destroy;
+ struct lpfc_mbx_cq_destroy cq_destroy;
+ struct lpfc_mbx_wq_destroy wq_destroy;
+ struct lpfc_mbx_rq_destroy rq_destroy;
+ struct lpfc_mbx_post_sgl_pages post_sgl_pages;
+ struct lpfc_mbx_nembed_cmd nembed_cmd;
+ struct lpfc_mbx_read_rev read_rev;
+ struct lpfc_mbx_read_vpi read_vpi;
+ struct lpfc_mbx_read_config rd_config;
+ struct lpfc_mbx_request_features req_ftrs;
+ struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
+ struct lpfc_mbx_nop nop;
+ } un;
+};
+
+struct lpfc_mcqe {
+ uint32_t word0;
+#define lpfc_mcqe_status_SHIFT 0
+#define lpfc_mcqe_status_MASK 0x0000FFFF
+#define lpfc_mcqe_status_WORD word0
+#define lpfc_mcqe_ext_status_SHIFT 16
+#define lpfc_mcqe_ext_status_MASK 0x0000FFFF
+#define lpfc_mcqe_ext_status_WORD word0
+ uint32_t mcqe_tag0;
+ uint32_t mcqe_tag1;
+ uint32_t trailer;
+#define lpfc_trailer_valid_SHIFT 31
+#define lpfc_trailer_valid_MASK 0x00000001
+#define lpfc_trailer_valid_WORD trailer
+#define lpfc_trailer_async_SHIFT 30
+#define lpfc_trailer_async_MASK 0x00000001
+#define lpfc_trailer_async_WORD trailer
+#define lpfc_trailer_hpi_SHIFT 29
+#define lpfc_trailer_hpi_MASK 0x00000001
+#define lpfc_trailer_hpi_WORD trailer
+#define lpfc_trailer_completed_SHIFT 28
+#define lpfc_trailer_completed_MASK 0x00000001
+#define lpfc_trailer_completed_WORD trailer
+#define lpfc_trailer_consumed_SHIFT 27
+#define lpfc_trailer_consumed_MASK 0x00000001
+#define lpfc_trailer_consumed_WORD trailer
+#define lpfc_trailer_type_SHIFT 16
+#define lpfc_trailer_type_MASK 0x000000FF
+#define lpfc_trailer_type_WORD trailer
+#define lpfc_trailer_code_SHIFT 8
+#define lpfc_trailer_code_MASK 0x000000FF
+#define lpfc_trailer_code_WORD trailer
+#define LPFC_TRAILER_CODE_LINK 0x1
+#define LPFC_TRAILER_CODE_FCOE 0x2
+#define LPFC_TRAILER_CODE_DCBX 0x3
+};
+
+struct lpfc_acqe_link {
+ uint32_t word0;
+#define lpfc_acqe_link_speed_SHIFT 24
+#define lpfc_acqe_link_speed_MASK 0x000000FF
+#define lpfc_acqe_link_speed_WORD word0
+#define LPFC_ASYNC_LINK_SPEED_ZERO 0x0
+#define LPFC_ASYNC_LINK_SPEED_10MBPS 0x1
+#define LPFC_ASYNC_LINK_SPEED_100MBPS 0x2
+#define LPFC_ASYNC_LINK_SPEED_1GBPS 0x3
+#define LPFC_ASYNC_LINK_SPEED_10GBPS 0x4
+#define lpfc_acqe_link_duplex_SHIFT 16
+#define lpfc_acqe_link_duplex_MASK 0x000000FF
+#define lpfc_acqe_link_duplex_WORD word0
+#define LPFC_ASYNC_LINK_DUPLEX_NONE 0x0
+#define LPFC_ASYNC_LINK_DUPLEX_HALF 0x1
+#define LPFC_ASYNC_LINK_DUPLEX_FULL 0x2
+#define lpfc_acqe_link_status_SHIFT 8
+#define lpfc_acqe_link_status_MASK 0x000000FF
+#define lpfc_acqe_link_status_WORD word0
+#define LPFC_ASYNC_LINK_STATUS_DOWN 0x0
+#define LPFC_ASYNC_LINK_STATUS_UP 0x1
+#define LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN 0x2
+#define LPFC_ASYNC_LINK_STATUS_LOGICAL_UP 0x3
+#define lpfc_acqe_link_physical_SHIFT 0
+#define lpfc_acqe_link_physical_MASK 0x000000FF
+#define lpfc_acqe_link_physical_WORD word0
+#define LPFC_ASYNC_LINK_PORT_A 0x0
+#define LPFC_ASYNC_LINK_PORT_B 0x1
+ uint32_t word1;
+#define lpfc_acqe_link_fault_SHIFT 0
+#define lpfc_acqe_link_fault_MASK 0x000000FF
+#define lpfc_acqe_link_fault_WORD word1
+#define LPFC_ASYNC_LINK_FAULT_NONE 0x0
+#define LPFC_ASYNC_LINK_FAULT_LOCAL 0x1
+#define LPFC_ASYNC_LINK_FAULT_REMOTE 0x2
+ uint32_t event_tag;
+ uint32_t trailer;
+};
+
+struct lpfc_acqe_fcoe {
+ uint32_t fcf_index;
+ uint32_t word1;
+#define lpfc_acqe_fcoe_fcf_count_SHIFT 0
+#define lpfc_acqe_fcoe_fcf_count_MASK 0x0000FFFF
+#define lpfc_acqe_fcoe_fcf_count_WORD word1
+#define lpfc_acqe_fcoe_event_type_SHIFT 16
+#define lpfc_acqe_fcoe_event_type_MASK 0x0000FFFF
+#define lpfc_acqe_fcoe_event_type_WORD word1
+#define LPFC_FCOE_EVENT_TYPE_NEW_FCF 0x1
+#define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL 0x2
+#define LPFC_FCOE_EVENT_TYPE_FCF_DEAD 0x3
+ uint32_t event_tag;
+ uint32_t trailer;
+};
+
+struct lpfc_acqe_dcbx {
+ uint32_t tlv_ttl;
+ uint32_t reserved;
+ uint32_t event_tag;
+ uint32_t trailer;
+};
+
+/*
+ * Define the bootstrap mailbox (bmbx) region used to communicate
+ * mailbox command between the host and port. The mailbox consists
+ * of a payload area of 256 bytes and a completion queue of length
+ * 16 bytes.
+ */
+struct lpfc_bmbx_create {
+ struct lpfc_mqe mqe;
+ struct lpfc_mcqe mcqe;
+};
+
+#define SGL_ALIGN_SZ 64
+#define SGL_PAGE_SIZE 4096
+/* align SGL addr on a size boundary - adjust address up */
+#define NO_XRI ((uint16_t)-1)
+struct wqe_common {
+ uint32_t word6;
+#define wqe_xri_SHIFT 0
+#define wqe_xri_MASK 0x0000FFFF
+#define wqe_xri_WORD word6
+#define wqe_ctxt_tag_SHIFT 16
+#define wqe_ctxt_tag_MASK 0x0000FFFF
+#define wqe_ctxt_tag_WORD word6
+ uint32_t word7;
+#define wqe_ct_SHIFT 2
+#define wqe_ct_MASK 0x00000003
+#define wqe_ct_WORD word7
+#define wqe_status_SHIFT 4
+#define wqe_status_MASK 0x0000000f
+#define wqe_status_WORD word7
+#define wqe_cmnd_SHIFT 8
+#define wqe_cmnd_MASK 0x000000ff
+#define wqe_cmnd_WORD word7
+#define wqe_class_SHIFT 16
+#define wqe_class_MASK 0x00000007
+#define wqe_class_WORD word7
+#define wqe_pu_SHIFT 20
+#define wqe_pu_MASK 0x00000003
+#define wqe_pu_WORD word7
+#define wqe_erp_SHIFT 22
+#define wqe_erp_MASK 0x00000001
+#define wqe_erp_WORD word7
+#define wqe_lnk_SHIFT 23
+#define wqe_lnk_MASK 0x00000001
+#define wqe_lnk_WORD word7
+#define wqe_tmo_SHIFT 24
+#define wqe_tmo_MASK 0x000000ff
+#define wqe_tmo_WORD word7
+ uint32_t abort_tag; /* word 8 in WQE */
+ uint32_t word9;
+#define wqe_reqtag_SHIFT 0
+#define wqe_reqtag_MASK 0x0000FFFF
+#define wqe_reqtag_WORD word9
+#define wqe_rcvoxid_SHIFT 16
+#define wqe_rcvoxid_MASK 0x0000FFFF
+#define wqe_rcvoxid_WORD word9
+ uint32_t word10;
+#define wqe_pri_SHIFT 16
+#define wqe_pri_MASK 0x00000007
+#define wqe_pri_WORD word10
+#define wqe_pv_SHIFT 19
+#define wqe_pv_MASK 0x00000001
+#define wqe_pv_WORD word10
+#define wqe_xc_SHIFT 21
+#define wqe_xc_MASK 0x00000001
+#define wqe_xc_WORD word10
+#define wqe_ccpe_SHIFT 23
+#define wqe_ccpe_MASK 0x00000001
+#define wqe_ccpe_WORD word10
+#define wqe_ccp_SHIFT 24
+#define wqe_ccp_MASK 0x000000ff
+#define wqe_ccp_WORD word10
+ uint32_t word11;
+#define wqe_cmd_type_SHIFT 0
+#define wqe_cmd_type_MASK 0x0000000f
+#define wqe_cmd_type_WORD word11
+#define wqe_wqec_SHIFT 7
+#define wqe_wqec_MASK 0x00000001
+#define wqe_wqec_WORD word11
+#define wqe_cqid_SHIFT 16
+#define wqe_cqid_MASK 0x000003ff
+#define wqe_cqid_WORD word11
+};
+
+struct wqe_did {
+ uint32_t word5;
+#define wqe_els_did_SHIFT 0
+#define wqe_els_did_MASK 0x00FFFFFF
+#define wqe_els_did_WORD word5
+#define wqe_xmit_bls_ar_SHIFT 30
+#define wqe_xmit_bls_ar_MASK 0x00000001
+#define wqe_xmit_bls_ar_WORD word5
+#define wqe_xmit_bls_xo_SHIFT 31
+#define wqe_xmit_bls_xo_MASK 0x00000001
+#define wqe_xmit_bls_xo_WORD word5
+};
+
+struct els_request64_wqe {
+ struct ulp_bde64 bde;
+ uint32_t payload_len;
+ uint32_t word4;
+#define els_req64_sid_SHIFT 0
+#define els_req64_sid_MASK 0x00FFFFFF
+#define els_req64_sid_WORD word4
+#define els_req64_sp_SHIFT 24
+#define els_req64_sp_MASK 0x00000001
+#define els_req64_sp_WORD word4
+#define els_req64_vf_SHIFT 25
+#define els_req64_vf_MASK 0x00000001
+#define els_req64_vf_WORD word4
+ struct wqe_did wqe_dest;
+ struct wqe_common wqe_com; /* words 6-11 */
+ uint32_t word12;
+#define els_req64_vfid_SHIFT 1
+#define els_req64_vfid_MASK 0x00000FFF
+#define els_req64_vfid_WORD word12
+#define els_req64_pri_SHIFT 13
+#define els_req64_pri_MASK 0x00000007
+#define els_req64_pri_WORD word12
+ uint32_t word13;
+#define els_req64_hopcnt_SHIFT 24
+#define els_req64_hopcnt_MASK 0x000000ff
+#define els_req64_hopcnt_WORD word13
+ uint32_t reserved[2];
+};
+
+struct xmit_els_rsp64_wqe {
+ struct ulp_bde64 bde;
+ uint32_t rsvd3;
+ uint32_t rsvd4;
+ struct wqe_did wqe_dest;
+ struct wqe_common wqe_com; /* words 6-11 */
+ uint32_t rsvd_12_15[4];
+};
+
+struct xmit_bls_rsp64_wqe {
+ uint32_t payload0;
+ uint32_t word1;
+#define xmit_bls_rsp64_rxid_SHIFT 0
+#define xmit_bls_rsp64_rxid_MASK 0x0000ffff
+#define xmit_bls_rsp64_rxid_WORD word1
+#define xmit_bls_rsp64_oxid_SHIFT 16
+#define xmit_bls_rsp64_oxid_MASK 0x0000ffff
+#define xmit_bls_rsp64_oxid_WORD word1
+ uint32_t word2;
+#define xmit_bls_rsp64_seqcntlo_SHIFT 0
+#define xmit_bls_rsp64_seqcntlo_MASK 0x0000ffff
+#define xmit_bls_rsp64_seqcntlo_WORD word2
+#define xmit_bls_rsp64_seqcnthi_SHIFT 16
+#define xmit_bls_rsp64_seqcnthi_MASK 0x0000ffff
+#define xmit_bls_rsp64_seqcnthi_WORD word2
+ uint32_t rsrvd3;
+ uint32_t rsrvd4;
+ struct wqe_did wqe_dest;
+ struct wqe_common wqe_com; /* words 6-11 */
+ uint32_t rsvd_12_15[4];
+};
+struct wqe_rctl_dfctl {
+ uint32_t word5;
+#define wqe_si_SHIFT 2
+#define wqe_si_MASK 0x000000001
+#define wqe_si_WORD word5
+#define wqe_la_SHIFT 3
+#define wqe_la_MASK 0x000000001
+#define wqe_la_WORD word5
+#define wqe_ls_SHIFT 7
+#define wqe_ls_MASK 0x000000001
+#define wqe_ls_WORD word5
+#define wqe_dfctl_SHIFT 8
+#define wqe_dfctl_MASK 0x0000000ff
+#define wqe_dfctl_WORD word5
+#define wqe_type_SHIFT 16
+#define wqe_type_MASK 0x0000000ff
+#define wqe_type_WORD word5
+#define wqe_rctl_SHIFT 24
+#define wqe_rctl_MASK 0x0000000ff
+#define wqe_rctl_WORD word5
+};
+
+struct xmit_seq64_wqe {
+ struct ulp_bde64 bde;
+ uint32_t paylaod_offset;
+ uint32_t relative_offset;
+ struct wqe_rctl_dfctl wge_ctl;
+ struct wqe_common wqe_com; /* words 6-11 */
+ /* Note: word10 different REVISIT */
+ uint32_t xmit_len;
+ uint32_t rsvd_12_15[3];
+};
+struct xmit_bcast64_wqe {
+ struct ulp_bde64 bde;
+ uint32_t paylaod_len;
+ uint32_t rsvd4;
+ struct wqe_rctl_dfctl wge_ctl; /* word 5 */
+ struct wqe_common wqe_com; /* words 6-11 */
+ uint32_t rsvd_12_15[4];
+};
+
+struct gen_req64_wqe {
+ struct ulp_bde64 bde;
+ uint32_t command_len;
+ uint32_t payload_len;
+ struct wqe_rctl_dfctl wge_ctl; /* word 5 */
+ struct wqe_common wqe_com; /* words 6-11 */
+ uint32_t rsvd_12_15[4];
+};
+
+struct create_xri_wqe {
+ uint32_t rsrvd[5]; /* words 0-4 */
+ struct wqe_did wqe_dest; /* word 5 */
+ struct wqe_common wqe_com; /* words 6-11 */
+ uint32_t rsvd_12_15[4]; /* word 12-15 */
+};
+
+#define T_REQUEST_TAG 3
+#define T_XRI_TAG 1
+
+struct abort_cmd_wqe {
+ uint32_t rsrvd[3];
+ uint32_t word3;
+#define abort_cmd_ia_SHIFT 0
+#define abort_cmd_ia_MASK 0x000000001
+#define abort_cmd_ia_WORD word3
+#define abort_cmd_criteria_SHIFT 8
+#define abort_cmd_criteria_MASK 0x0000000ff
+#define abort_cmd_criteria_WORD word3
+ uint32_t rsrvd4;
+ uint32_t rsrvd5;
+ struct wqe_common wqe_com; /* words 6-11 */
+ uint32_t rsvd_12_15[4]; /* word 12-15 */
+};
+
+struct fcp_iwrite64_wqe {
+ struct ulp_bde64 bde;
+ uint32_t payload_len;
+ uint32_t total_xfer_len;
+ uint32_t initial_xfer_len;
+ struct wqe_common wqe_com; /* words 6-11 */
+ uint32_t rsvd_12_15[4]; /* word 12-15 */
+};
+
+struct fcp_iread64_wqe {
+ struct ulp_bde64 bde;
+ uint32_t payload_len; /* word 3 */
+ uint32_t total_xfer_len; /* word 4 */
+ uint32_t rsrvd5; /* word 5 */
+ struct wqe_common wqe_com; /* words 6-11 */
+ uint32_t rsvd_12_15[4]; /* word 12-15 */
+};
+
+struct fcp_icmnd64_wqe {
+ struct ulp_bde64 bde; /* words 0-2 */
+ uint32_t rsrvd[3]; /* words 3-5 */
+ struct wqe_common wqe_com; /* words 6-11 */
+ uint32_t rsvd_12_15[4]; /* word 12-15 */
+};
+
+
+union lpfc_wqe {
+ uint32_t words[16];
+ struct lpfc_wqe_generic generic;
+ struct fcp_icmnd64_wqe fcp_icmd;
+ struct fcp_iread64_wqe fcp_iread;
+ struct fcp_iwrite64_wqe fcp_iwrite;
+ struct abort_cmd_wqe abort_cmd;
+ struct create_xri_wqe create_xri;
+ struct xmit_bcast64_wqe xmit_bcast64;
+ struct xmit_seq64_wqe xmit_sequence;
+ struct xmit_bls_rsp64_wqe xmit_bls_rsp;
+ struct xmit_els_rsp64_wqe xmit_els_rsp;
+ struct els_request64_wqe els_req;
+ struct gen_req64_wqe gen_req;
+};
+
+#define FCP_COMMAND 0x0
+#define FCP_COMMAND_DATA_OUT 0x1
+#define ELS_COMMAND_NON_FIP 0xC
+#define ELS_COMMAND_FIP 0xD
+#define OTHER_COMMAND 0x8
+
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 86d1bdcbf2d..2f5907f92ee 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2008 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -34,8 +34,10 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
+#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
@@ -51,9 +53,23 @@ char *_dump_buf_dif;
unsigned long _dump_buf_dif_order;
spinlock_t _dump_buf_lock;
-static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
static int lpfc_post_rcv_buf(struct lpfc_hba *);
+static int lpfc_sli4_queue_create(struct lpfc_hba *);
+static void lpfc_sli4_queue_destroy(struct lpfc_hba *);
+static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
+static int lpfc_setup_endian_order(struct lpfc_hba *);
+static int lpfc_sli4_read_config(struct lpfc_hba *);
+static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
+static void lpfc_free_sgl_list(struct lpfc_hba *);
+static int lpfc_init_sgl_list(struct lpfc_hba *);
+static int lpfc_init_active_sgl_array(struct lpfc_hba *);
+static void lpfc_free_active_sgl(struct lpfc_hba *);
+static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
+static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
+static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
+static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
+static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
static struct scsi_transport_template *lpfc_transport_template = NULL;
static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
@@ -92,7 +108,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
return -ENOMEM;
}
- mb = &pmb->mb;
+ mb = &pmb->u.mb;
phba->link_state = LPFC_INIT_MBX_CMDS;
if (lpfc_is_LC_HBA(phba->pcidev->device)) {
@@ -205,6 +221,11 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
mb->mbxCommand, mb->mbxStatus);
mb->un.varDmp.word_cnt = 0;
}
+ /* dump mem may return a zero when finished or we got a
+ * mailbox error, either way we are done.
+ */
+ if (mb->un.varDmp.word_cnt == 0)
+ break;
if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
@@ -233,7 +254,7 @@ out_free_mbox:
static void
lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
{
- if (pmboxq->mb.mbxStatus == MBX_SUCCESS)
+ if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
phba->temp_sensor_support = 1;
else
phba->temp_sensor_support = 0;
@@ -260,7 +281,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
/* character array used for decoding dist type. */
char dist_char[] = "nabx";
- if (pmboxq->mb.mbxStatus != MBX_SUCCESS) {
+ if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
mempool_free(pmboxq, phba->mbox_mem_pool);
return;
}
@@ -268,7 +289,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
prg = (struct prog_id *) &prog_id_word;
/* word 7 contain option rom version */
- prog_id_word = pmboxq->mb.un.varWords[7];
+ prog_id_word = pmboxq->u.mb.un.varWords[7];
/* Decode the Option rom version word to a readable string */
if (prg->dist < 4)
@@ -325,7 +346,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
phba->link_state = LPFC_HBA_ERROR;
return -ENOMEM;
}
- mb = &pmb->mb;
+ mb = &pmb->u.mb;
/* Get login parameters for NID. */
lpfc_read_sparam(phba, pmb, 0);
@@ -364,6 +385,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
/* Update the fc_host data structures with new wwn. */
fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
+ fc_host_max_npiv_vports(shost) = phba->max_vpi;
/* If no serial number in VPD data, use low 6 bytes of WWNN */
/* This should be consolidated into parse_vpd ? - mr */
@@ -460,17 +482,18 @@ lpfc_config_port_post(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
"0352 Config MSI mailbox command "
"failed, mbxCmd x%x, mbxStatus x%x\n",
- pmb->mb.mbxCommand, pmb->mb.mbxStatus);
+ pmb->u.mb.mbxCommand,
+ pmb->u.mb.mbxStatus);
mempool_free(pmb, phba->mbox_mem_pool);
return -EIO;
}
}
+ spin_lock_irq(&phba->hbalock);
/* Initialize ERATT handling flag */
phba->hba_flag &= ~HBA_ERATT_HANDLED;
/* Enable appropriate host interrupts */
- spin_lock_irq(&phba->hbalock);
status = readl(phba->HCregaddr);
status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
if (psli->num_rings > 0)
@@ -571,16 +594,20 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
{
struct lpfc_vport **vports;
int i;
- /* Disable interrupts */
- writel(0, phba->HCregaddr);
- readl(phba->HCregaddr); /* flush */
+
+ if (phba->sli_rev <= LPFC_SLI_REV3) {
+ /* Disable interrupts */
+ writel(0, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ }
if (phba->pport->load_flag & FC_UNLOADING)
lpfc_cleanup_discovery_resources(phba->pport);
else {
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
- for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++)
+ for (i = 0; i <= phba->max_vports &&
+ vports[i] != NULL; i++)
lpfc_cleanup_discovery_resources(vports[i]);
lpfc_destroy_vport_work_array(phba, vports);
}
@@ -588,7 +615,7 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
}
/**
- * lpfc_hba_down_post - Perform lpfc uninitialization after HBA reset
+ * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
* @phba: pointer to lpfc HBA data structure.
*
* This routine will do uninitialization after the HBA is reset when bring
@@ -598,8 +625,8 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
* 0 - sucess.
* Any other value - error.
**/
-int
-lpfc_hba_down_post(struct lpfc_hba *phba)
+static int
+lpfc_hba_down_post_s3(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
@@ -642,6 +669,77 @@ lpfc_hba_down_post(struct lpfc_hba *phba)
return 0;
}
+/**
+ * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine will do uninitialization after the HBA is reset when bring
+ * down the SLI Layer.
+ *
+ * Return codes
+ * 0 - sucess.
+ * Any other value - error.
+ **/
+static int
+lpfc_hba_down_post_s4(struct lpfc_hba *phba)
+{
+ struct lpfc_scsi_buf *psb, *psb_next;
+ LIST_HEAD(aborts);
+ int ret;
+ unsigned long iflag = 0;
+ ret = lpfc_hba_down_post_s3(phba);
+ if (ret)
+ return ret;
+ /* At this point in time the HBA is either reset or DOA. Either
+ * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
+ * on the lpfc_sgl_list so that it can either be freed if the
+ * driver is unloading or reposted if the driver is restarting
+ * the port.
+ */
+ spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */
+ /* scsl_buf_list */
+ /* abts_sgl_list_lock required because worker thread uses this
+ * list.
+ */
+ spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
+ list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
+ &phba->sli4_hba.lpfc_sgl_list);
+ spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
+ /* abts_scsi_buf_list_lock required because worker thread uses this
+ * list.
+ */
+ spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+ list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
+ &aborts);
+ spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+ spin_unlock_irq(&phba->hbalock);
+
+ list_for_each_entry_safe(psb, psb_next, &aborts, list) {
+ psb->pCmd = NULL;
+ psb->status = IOSTAT_SUCCESS;
+ }
+ spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
+ list_splice(&aborts, &phba->lpfc_scsi_buf_list);
+ spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
+ return 0;
+}
+
+/**
+ * lpfc_hba_down_post - Wrapper func for hba down post routine
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine wraps the actual SLI3 or SLI4 routine for performing
+ * uninitialization after the HBA is reset when bring down the SLI Layer.
+ *
+ * Return codes
+ * 0 - sucess.
+ * Any other value - error.
+ **/
+int
+lpfc_hba_down_post(struct lpfc_hba *phba)
+{
+ return (*phba->lpfc_hba_down_post)(phba);
+}
/**
* lpfc_hb_timeout - The HBA-timer timeout handler
@@ -809,7 +907,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
"taking this port offline.\n");
spin_lock_irq(&phba->hbalock);
- psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
+ psli->sli_flag &= ~LPFC_SLI_ACTIVE;
spin_unlock_irq(&phba->hbalock);
lpfc_offline_prep(phba);
@@ -834,13 +932,15 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
struct lpfc_sli *psli = &phba->sli;
spin_lock_irq(&phba->hbalock);
- psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
+ psli->sli_flag &= ~LPFC_SLI_ACTIVE;
spin_unlock_irq(&phba->hbalock);
lpfc_offline_prep(phba);
lpfc_offline(phba);
lpfc_reset_barrier(phba);
+ spin_lock_irq(&phba->hbalock);
lpfc_sli_brdreset(phba);
+ spin_unlock_irq(&phba->hbalock);
lpfc_hba_down_post(phba);
lpfc_sli_brdready(phba, HS_MBRDY);
lpfc_unblock_mgmt_io(phba);
@@ -849,6 +949,25 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to bring a SLI4 HBA offline when HBA hardware error
+ * other than Port Error 6 has been detected.
+ **/
+static void
+lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
+{
+ lpfc_offline_prep(phba);
+ lpfc_offline(phba);
+ lpfc_sli4_brdreset(phba);
+ lpfc_hba_down_post(phba);
+ lpfc_sli4_post_status_check(phba);
+ lpfc_unblock_mgmt_io(phba);
+ phba->link_state = LPFC_HBA_ERROR;
+}
+
+/**
* lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
* @phba: pointer to lpfc hba data structure.
*
@@ -864,6 +983,16 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
struct lpfc_sli_ring *pring;
struct lpfc_sli *psli = &phba->sli;
+ /* If the pci channel is offline, ignore possible errors,
+ * since we cannot communicate with the pci card anyway.
+ */
+ if (pci_channel_offline(phba->pcidev)) {
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag &= ~DEFER_ERATT;
+ spin_unlock_irq(&phba->hbalock);
+ return;
+ }
+
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0479 Deferred Adapter Hardware Error "
"Data: x%x x%x x%x\n",
@@ -871,7 +1000,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
phba->work_status[0], phba->work_status[1]);
spin_lock_irq(&phba->hbalock);
- psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
+ psli->sli_flag &= ~LPFC_SLI_ACTIVE;
spin_unlock_irq(&phba->hbalock);
@@ -909,13 +1038,30 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
phba->work_hs = old_host_status & ~HS_FFER1;
+ spin_lock_irq(&phba->hbalock);
phba->hba_flag &= ~DEFER_ERATT;
+ spin_unlock_irq(&phba->hbalock);
phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
}
+static void
+lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
+{
+ struct lpfc_board_event_header board_event;
+ struct Scsi_Host *shost;
+
+ board_event.event_type = FC_REG_BOARD_EVENT;
+ board_event.subcategory = LPFC_EVENT_PORTINTERR;
+ shost = lpfc_shost_from_vport(phba->pport);
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(board_event),
+ (char *) &board_event,
+ LPFC_NL_VENDOR_ID);
+}
+
/**
- * lpfc_handle_eratt - The HBA hardware error handler
+ * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to handle the following HBA hardware error
@@ -924,8 +1070,8 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
* 2 - DMA ring index out of range
* 3 - Mailbox command came back as unknown
**/
-void
-lpfc_handle_eratt(struct lpfc_hba *phba)
+static void
+lpfc_handle_eratt_s3(struct lpfc_hba *phba)
{
struct lpfc_vport *vport = phba->pport;
struct lpfc_sli *psli = &phba->sli;
@@ -934,24 +1080,23 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
unsigned long temperature;
struct temp_event temp_event_data;
struct Scsi_Host *shost;
- struct lpfc_board_event_header board_event;
/* If the pci channel is offline, ignore possible errors,
- * since we cannot communicate with the pci card anyway. */
- if (pci_channel_offline(phba->pcidev))
+ * since we cannot communicate with the pci card anyway.
+ */
+ if (pci_channel_offline(phba->pcidev)) {
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag &= ~DEFER_ERATT;
+ spin_unlock_irq(&phba->hbalock);
return;
+ }
+
/* If resets are disabled then leave the HBA alone and return */
if (!phba->cfg_enable_hba_reset)
return;
/* Send an internal error event to mgmt application */
- board_event.event_type = FC_REG_BOARD_EVENT;
- board_event.subcategory = LPFC_EVENT_PORTINTERR;
- shost = lpfc_shost_from_vport(phba->pport);
- fc_host_post_vendor_event(shost, fc_get_event_number(),
- sizeof(board_event),
- (char *) &board_event,
- LPFC_NL_VENDOR_ID);
+ lpfc_board_errevt_to_mgmt(phba);
if (phba->hba_flag & DEFER_ERATT)
lpfc_handle_deferred_eratt(phba);
@@ -965,7 +1110,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
phba->work_status[0], phba->work_status[1]);
spin_lock_irq(&phba->hbalock);
- psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
+ psli->sli_flag &= ~LPFC_SLI_ACTIVE;
spin_unlock_irq(&phba->hbalock);
/*
@@ -1037,6 +1182,65 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
}
/**
+ * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to handle the SLI4 HBA hardware error attention
+ * conditions.
+ **/
+static void
+lpfc_handle_eratt_s4(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport = phba->pport;
+ uint32_t event_data;
+ struct Scsi_Host *shost;
+
+ /* If the pci channel is offline, ignore possible errors, since
+ * we cannot communicate with the pci card anyway.
+ */
+ if (pci_channel_offline(phba->pcidev))
+ return;
+ /* If resets are disabled then leave the HBA alone and return */
+ if (!phba->cfg_enable_hba_reset)
+ return;
+
+ /* Send an internal error event to mgmt application */
+ lpfc_board_errevt_to_mgmt(phba);
+
+ /* For now, the actual action for SLI4 device handling is not
+ * specified yet, just treated it as adaptor hardware failure
+ */
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0143 SLI4 Adapter Hardware Error Data: x%x x%x\n",
+ phba->work_status[0], phba->work_status[1]);
+
+ event_data = FC_REG_DUMP_EVENT;
+ shost = lpfc_shost_from_vport(vport);
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(event_data), (char *) &event_data,
+ SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
+
+ lpfc_sli4_offline_eratt(phba);
+}
+
+/**
+ * lpfc_handle_eratt - Wrapper func for handling hba error attention
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine wraps the actual SLI3 or SLI4 hba error attention handling
+ * routine from the API jump table function pointer from the lpfc_hba struct.
+ *
+ * Return codes
+ * 0 - sucess.
+ * Any other value - error.
+ **/
+void
+lpfc_handle_eratt(struct lpfc_hba *phba)
+{
+ (*phba->lpfc_handle_eratt)(phba);
+}
+
+/**
* lpfc_handle_latt - The HBA link event handler
* @phba: pointer to lpfc hba data structure.
*
@@ -1137,7 +1341,7 @@ lpfc_handle_latt_err_exit:
* 0 - pointer to the VPD passed in is NULL
* 1 - success
**/
-static int
+int
lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
{
uint8_t lenlo, lenhi;
@@ -1292,6 +1496,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
uint16_t dev_id = phba->pcidev->device;
int max_speed;
int GE = 0;
+ int oneConnect = 0; /* default is not a oneConnect */
struct {
char * name;
int max_speed;
@@ -1437,6 +1642,14 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
case PCI_DEVICE_ID_PROTEUS_S:
m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"};
break;
+ case PCI_DEVICE_ID_TIGERSHARK:
+ oneConnect = 1;
+ m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"};
+ break;
+ case PCI_DEVICE_ID_TIGERSHARK_S:
+ oneConnect = 1;
+ m = (typeof(m)) {"OCe10100-F-S", max_speed, "PCIe"};
+ break;
default:
m = (typeof(m)){ NULL };
break;
@@ -1444,13 +1657,24 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
if (mdp && mdp[0] == '\0')
snprintf(mdp, 79,"%s", m.name);
- if (descp && descp[0] == '\0')
- snprintf(descp, 255,
- "Emulex %s %d%s %s %s",
- m.name, m.max_speed,
- (GE) ? "GE" : "Gb",
- m.bus,
- (GE) ? "FCoE Adapter" : "Fibre Channel Adapter");
+ /* oneConnect hba requires special processing, they are all initiators
+ * and we put the port number on the end
+ */
+ if (descp && descp[0] == '\0') {
+ if (oneConnect)
+ snprintf(descp, 255,
+ "Emulex OneConnect %s, FCoE Initiator, Port %s",
+ m.name,
+ phba->Port);
+ else
+ snprintf(descp, 255,
+ "Emulex %s %d%s %s %s",
+ m.name, m.max_speed,
+ (GE) ? "GE" : "Gb",
+ m.bus,
+ (GE) ? "FCoE Adapter" :
+ "Fibre Channel Adapter");
+ }
}
/**
@@ -1533,7 +1757,8 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
icmd->ulpLe = 1;
- if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) {
+ if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
+ IOCB_ERROR) {
lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
kfree(mp1);
cnt++;
@@ -1761,7 +1986,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
* Lets wait for this to happen, if needed.
*/
while (!list_empty(&vport->fc_nodes)) {
-
if (i++ > 3000) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"0233 Nodelist not empty\n");
@@ -1782,7 +2006,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
/* Wait for any activity on ndlps to settle */
msleep(10);
}
- return;
}
/**
@@ -1803,22 +2026,36 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
}
/**
- * lpfc_stop_phba_timers - Stop all the timers associated with an HBA
+ * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
* @phba: pointer to lpfc hba data structure.
*
* This routine stops all the timers associated with a HBA. This function is
* invoked before either putting a HBA offline or unloading the driver.
**/
-static void
-lpfc_stop_phba_timers(struct lpfc_hba *phba)
+void
+lpfc_stop_hba_timers(struct lpfc_hba *phba)
{
- del_timer_sync(&phba->fcp_poll_timer);
lpfc_stop_vport_timers(phba->pport);
del_timer_sync(&phba->sli.mbox_tmo);
del_timer_sync(&phba->fabric_block_timer);
- phba->hb_outstanding = 0;
- del_timer_sync(&phba->hb_tmofunc);
del_timer_sync(&phba->eratt_poll);
+ del_timer_sync(&phba->hb_tmofunc);
+ phba->hb_outstanding = 0;
+
+ switch (phba->pci_dev_grp) {
+ case LPFC_PCI_DEV_LP:
+ /* Stop any LightPulse device specific driver timers */
+ del_timer_sync(&phba->fcp_poll_timer);
+ break;
+ case LPFC_PCI_DEV_OC:
+ /* Stop any OneConnect device sepcific driver timers */
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0297 Invalid device group (x%x)\n",
+ phba->pci_dev_grp);
+ break;
+ }
return;
}
@@ -1878,14 +2115,21 @@ lpfc_online(struct lpfc_hba *phba)
return 1;
}
- if (lpfc_sli_hba_setup(phba)) { /* Initialize the HBA */
- lpfc_unblock_mgmt_io(phba);
- return 1;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
+ lpfc_unblock_mgmt_io(phba);
+ return 1;
+ }
+ } else {
+ if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
+ lpfc_unblock_mgmt_io(phba);
+ return 1;
+ }
}
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
- for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
struct Scsi_Host *shost;
shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irq(shost->host_lock);
@@ -1947,11 +2191,12 @@ lpfc_offline_prep(struct lpfc_hba * phba)
/* Issue an unreg_login to all nodes on all vports */
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL) {
- for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
struct Scsi_Host *shost;
if (vports[i]->load_flag & FC_UNLOADING)
continue;
+ vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED;
shost = lpfc_shost_from_vport(vports[i]);
list_for_each_entry_safe(ndlp, next_ndlp,
&vports[i]->fc_nodes,
@@ -1975,7 +2220,7 @@ lpfc_offline_prep(struct lpfc_hba * phba)
}
lpfc_destroy_vport_work_array(phba, vports);
- lpfc_sli_flush_mbox_queue(phba);
+ lpfc_sli_mbox_sys_shutdown(phba);
}
/**
@@ -1996,11 +2241,11 @@ lpfc_offline(struct lpfc_hba *phba)
if (phba->pport->fc_flag & FC_OFFLINE_MODE)
return;
- /* stop all timers associated with this hba */
- lpfc_stop_phba_timers(phba);
+ /* stop port and all timers associated with this hba */
+ lpfc_stop_port(phba);
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
- for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++)
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
lpfc_stop_vport_timers(vports[i]);
lpfc_destroy_vport_work_array(phba, vports);
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -2013,7 +2258,7 @@ lpfc_offline(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
- for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irq(shost->host_lock);
vports[i]->work_port_events = 0;
@@ -2106,6 +2351,10 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
shost->max_lun = vport->cfg_max_luns;
shost->this_id = -1;
shost->max_cmd_len = 16;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE;
+ shost->sg_tablesize = phba->cfg_sg_seg_cnt;
+ }
/*
* Set initial can_queue value since 0 is no longer supported and
@@ -2123,6 +2372,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
/* Initialize all internally managed lists. */
INIT_LIST_HEAD(&vport->fc_nodes);
+ INIT_LIST_HEAD(&vport->rcv_buffer_list);
spin_lock_init(&vport->work_port_lock);
init_timer(&vport->fc_disctmo);
@@ -2314,15 +2564,3461 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
}
/**
- * lpfc_enable_msix - Enable MSI-X interrupt mode
+ * lpfc_stop_port_s3 - Stop SLI3 device port
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to stop an SLI3 device port, it stops the device
+ * from generating interrupts and stops the device driver's timers for the
+ * device.
+ **/
+static void
+lpfc_stop_port_s3(struct lpfc_hba *phba)
+{
+ /* Clear all interrupt enable conditions */
+ writel(0, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ /* Clear all pending interrupts */
+ writel(0xffffffff, phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
+
+ /* Reset some HBA SLI setup states */
+ lpfc_stop_hba_timers(phba);
+ phba->pport->work_port_events = 0;
+}
+
+/**
+ * lpfc_stop_port_s4 - Stop SLI4 device port
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to stop an SLI4 device port, it stops the device
+ * from generating interrupts and stops the device driver's timers for the
+ * device.
+ **/
+static void
+lpfc_stop_port_s4(struct lpfc_hba *phba)
+{
+ /* Reset some HBA SLI4 setup states */
+ lpfc_stop_hba_timers(phba);
+ phba->pport->work_port_events = 0;
+ phba->sli4_hba.intr_enable = 0;
+ /* Hard clear it for now, shall have more graceful way to wait later */
+ phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+}
+
+/**
+ * lpfc_stop_port - Wrapper function for stopping hba port
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
+ * the API jump table function pointer from the lpfc_hba struct.
+ **/
+void
+lpfc_stop_port(struct lpfc_hba *phba)
+{
+ phba->lpfc_stop_port(phba);
+}
+
+/**
+ * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to remove the driver default fcf record from
+ * the port. This routine currently acts on FCF Index 0.
+ *
+ **/
+void
+lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
+{
+ int rc = 0;
+ LPFC_MBOXQ_t *mboxq;
+ struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record;
+ uint32_t mbox_tmo, req_len;
+ uint32_t shdr_status, shdr_add_status;
+
+ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2020 Failed to allocate mbox for ADD_FCF cmd\n");
+ return;
+ }
+
+ req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) -
+ sizeof(struct lpfc_sli4_cfg_mhdr);
+ rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_DELETE_FCF,
+ req_len, LPFC_SLI4_MBX_EMBED);
+ /*
+ * In phase 1, there is a single FCF index, 0. In phase2, the driver
+ * supports multiple FCF indices.
+ */
+ del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry;
+ bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1);
+ bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record,
+ phba->fcf.fcf_indx);
+
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
+ }
+ /* The IOCTL status is embedded in the mailbox subheader. */
+ shdr_status = bf_get(lpfc_mbox_hdr_status,
+ &del_fcf_record->header.cfg_shdr.response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+ &del_fcf_record->header.cfg_shdr.response);
+ if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2516 DEL FCF of default FCF Index failed "
+ "mbx status x%x, status x%x add_status x%x\n",
+ rc, shdr_status, shdr_add_status);
+ }
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mboxq, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async link completion queue entry.
+ *
+ * This routine is to parse the SLI4 link-attention link fault code and
+ * translate it into the base driver's read link attention mailbox command
+ * status.
+ *
+ * Return: Link-attention status in terms of base driver's coding.
+ **/
+static uint16_t
+lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
+ struct lpfc_acqe_link *acqe_link)
+{
+ uint16_t latt_fault;
+
+ switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
+ case LPFC_ASYNC_LINK_FAULT_NONE:
+ case LPFC_ASYNC_LINK_FAULT_LOCAL:
+ case LPFC_ASYNC_LINK_FAULT_REMOTE:
+ latt_fault = 0;
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0398 Invalid link fault code: x%x\n",
+ bf_get(lpfc_acqe_link_fault, acqe_link));
+ latt_fault = MBXERR_ERROR;
+ break;
+ }
+ return latt_fault;
+}
+
+/**
+ * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async link completion queue entry.
+ *
+ * This routine is to parse the SLI4 link attention type and translate it
+ * into the base driver's link attention type coding.
+ *
+ * Return: Link attention type in terms of base driver's coding.
+ **/
+static uint8_t
+lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
+ struct lpfc_acqe_link *acqe_link)
+{
+ uint8_t att_type;
+
+ switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
+ case LPFC_ASYNC_LINK_STATUS_DOWN:
+ case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
+ att_type = AT_LINK_DOWN;
+ break;
+ case LPFC_ASYNC_LINK_STATUS_UP:
+ /* Ignore physical link up events - wait for logical link up */
+ att_type = AT_RESERVED;
+ break;
+ case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
+ att_type = AT_LINK_UP;
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0399 Invalid link attention type: x%x\n",
+ bf_get(lpfc_acqe_link_status, acqe_link));
+ att_type = AT_RESERVED;
+ break;
+ }
+ return att_type;
+}
+
+/**
+ * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async link completion queue entry.
+ *
+ * This routine is to parse the SLI4 link-attention link speed and translate
+ * it into the base driver's link-attention link speed coding.
+ *
+ * Return: Link-attention link speed in terms of base driver's coding.
+ **/
+static uint8_t
+lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
+ struct lpfc_acqe_link *acqe_link)
+{
+ uint8_t link_speed;
+
+ switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
+ case LPFC_ASYNC_LINK_SPEED_ZERO:
+ link_speed = LA_UNKNW_LINK;
+ break;
+ case LPFC_ASYNC_LINK_SPEED_10MBPS:
+ link_speed = LA_UNKNW_LINK;
+ break;
+ case LPFC_ASYNC_LINK_SPEED_100MBPS:
+ link_speed = LA_UNKNW_LINK;
+ break;
+ case LPFC_ASYNC_LINK_SPEED_1GBPS:
+ link_speed = LA_1GHZ_LINK;
+ break;
+ case LPFC_ASYNC_LINK_SPEED_10GBPS:
+ link_speed = LA_10GHZ_LINK;
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0483 Invalid link-attention link speed: x%x\n",
+ bf_get(lpfc_acqe_link_speed, acqe_link));
+ link_speed = LA_UNKNW_LINK;
+ break;
+ }
+ return link_speed;
+}
+
+/**
+ * lpfc_sli4_async_link_evt - Process the asynchronous link event
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async link completion queue entry.
+ *
+ * This routine is to handle the SLI4 asynchronous link event.
+ **/
+static void
+lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
+ struct lpfc_acqe_link *acqe_link)
+{
+ struct lpfc_dmabuf *mp;
+ LPFC_MBOXQ_t *pmb;
+ MAILBOX_t *mb;
+ READ_LA_VAR *la;
+ uint8_t att_type;
+
+ att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
+ if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP)
+ return;
+ pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmb) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0395 The mboxq allocation failed\n");
+ return;
+ }
+ mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!mp) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0396 The lpfc_dmabuf allocation failed\n");
+ goto out_free_pmb;
+ }
+ mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+ if (!mp->virt) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0397 The mbuf allocation failed\n");
+ goto out_free_dmabuf;
+ }
+
+ /* Cleanup any outstanding ELS commands */
+ lpfc_els_flush_all_cmd(phba);
+
+ /* Block ELS IOCBs until we have done process link event */
+ phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
+
+ /* Update link event statistics */
+ phba->sli.slistat.link_event++;
+
+ /* Create pseudo lpfc_handle_latt mailbox command from link ACQE */
+ lpfc_read_la(phba, pmb, mp);
+ pmb->vport = phba->pport;
+
+ /* Parse and translate status field */
+ mb = &pmb->u.mb;
+ mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
+
+ /* Parse and translate link attention fields */
+ la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
+ la->eventTag = acqe_link->event_tag;
+ la->attType = att_type;
+ la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link);
+
+ /* Fake the the following irrelvant fields */
+ la->topology = TOPOLOGY_PT_PT;
+ la->granted_AL_PA = 0;
+ la->il = 0;
+ la->pb = 0;
+ la->fa = 0;
+ la->mm = 0;
+
+ /* Keep the link status for extra SLI4 state machine reference */
+ phba->sli4_hba.link_state.speed =
+ bf_get(lpfc_acqe_link_speed, acqe_link);
+ phba->sli4_hba.link_state.duplex =
+ bf_get(lpfc_acqe_link_duplex, acqe_link);
+ phba->sli4_hba.link_state.status =
+ bf_get(lpfc_acqe_link_status, acqe_link);
+ phba->sli4_hba.link_state.physical =
+ bf_get(lpfc_acqe_link_physical, acqe_link);
+ phba->sli4_hba.link_state.fault =
+ bf_get(lpfc_acqe_link_fault, acqe_link);
+
+ /* Invoke the lpfc_handle_latt mailbox command callback function */
+ lpfc_mbx_cmpl_read_la(phba, pmb);
+
+ return;
+
+out_free_dmabuf:
+ kfree(mp);
+out_free_pmb:
+ mempool_free(pmb, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async fcoe completion queue entry.
+ *
+ * This routine is to handle the SLI4 asynchronous fcoe event.
+ **/
+static void
+lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
+ struct lpfc_acqe_fcoe *acqe_fcoe)
+{
+ uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
+ int rc;
+
+ switch (event_type) {
+ case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "2546 New FCF found index 0x%x tag 0x%x \n",
+ acqe_fcoe->fcf_index,
+ acqe_fcoe->event_tag);
+ /*
+ * If the current FCF is in discovered state,
+ * do nothing.
+ */
+ spin_lock_irq(&phba->hbalock);
+ if (phba->fcf.fcf_flag & FCF_DISCOVERED) {
+ spin_unlock_irq(&phba->hbalock);
+ break;
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Read the FCF table and re-discover SAN. */
+ rc = lpfc_sli4_read_fcf_record(phba,
+ LPFC_FCOE_FCF_GET_FIRST);
+ if (rc)
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "2547 Read FCF record failed 0x%x\n",
+ rc);
+ break;
+
+ case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2548 FCF Table full count 0x%x tag 0x%x \n",
+ bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe),
+ acqe_fcoe->event_tag);
+ break;
+
+ case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "2549 FCF disconnected fron network index 0x%x"
+ " tag 0x%x \n", acqe_fcoe->fcf_index,
+ acqe_fcoe->event_tag);
+ /* If the event is not for currently used fcf do nothing */
+ if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index)
+ break;
+ /*
+ * Currently, driver support only one FCF - so treat this as
+ * a link down.
+ */
+ lpfc_linkdown(phba);
+ /* Unregister FCF if no devices connected to it */
+ lpfc_unregister_unused_fcf(phba);
+ break;
+
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0288 Unknown FCoE event type 0x%x event tag "
+ "0x%x\n", event_type, acqe_fcoe->event_tag);
+ break;
+ }
+}
+
+/**
+ * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async dcbx completion queue entry.
+ *
+ * This routine is to handle the SLI4 asynchronous dcbx event.
+ **/
+static void
+lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
+ struct lpfc_acqe_dcbx *acqe_dcbx)
+{
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0290 The SLI4 DCBX asynchronous event is not "
+ "handled yet\n");
+}
+
+/**
+ * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked by the worker thread to process all the pending
+ * SLI4 asynchronous events.
+ **/
+void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
+{
+ struct lpfc_cq_event *cq_event;
+
+ /* First, declare the async event has been handled */
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag &= ~ASYNC_EVENT;
+ spin_unlock_irq(&phba->hbalock);
+ /* Now, handle all the async events */
+ while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
+ /* Get the first event from the head of the event queue */
+ spin_lock_irq(&phba->hbalock);
+ list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
+ cq_event, struct lpfc_cq_event, list);
+ spin_unlock_irq(&phba->hbalock);
+ /* Process the asynchronous event */
+ switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
+ case LPFC_TRAILER_CODE_LINK:
+ lpfc_sli4_async_link_evt(phba,
+ &cq_event->cqe.acqe_link);
+ break;
+ case LPFC_TRAILER_CODE_FCOE:
+ lpfc_sli4_async_fcoe_evt(phba,
+ &cq_event->cqe.acqe_fcoe);
+ break;
+ case LPFC_TRAILER_CODE_DCBX:
+ lpfc_sli4_async_dcbx_evt(phba,
+ &cq_event->cqe.acqe_dcbx);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "1804 Invalid asynchrous event code: "
+ "x%x\n", bf_get(lpfc_trailer_code,
+ &cq_event->cqe.mcqe_cmpl));
+ break;
+ }
+ /* Free the completion event processed to the free pool */
+ lpfc_sli4_cq_event_release(phba, cq_event);
+ }
+}
+
+/**
+ * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
+ * @phba: pointer to lpfc hba data structure.
+ * @dev_grp: The HBA PCI-Device group number.
+ *
+ * This routine is invoked to set up the per HBA PCI-Device group function
+ * API jump table entries.
+ *
+ * Return: 0 if success, otherwise -ENODEV
+ **/
+int
+lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
+{
+ int rc;
+
+ /* Set up lpfc PCI-device group */
+ phba->pci_dev_grp = dev_grp;
+
+ /* The LPFC_PCI_DEV_OC uses SLI4 */
+ if (dev_grp == LPFC_PCI_DEV_OC)
+ phba->sli_rev = LPFC_SLI_REV4;
+
+ /* Set up device INIT API function jump table */
+ rc = lpfc_init_api_table_setup(phba, dev_grp);
+ if (rc)
+ return -ENODEV;
+ /* Set up SCSI API function jump table */
+ rc = lpfc_scsi_api_table_setup(phba, dev_grp);
+ if (rc)
+ return -ENODEV;
+ /* Set up SLI API function jump table */
+ rc = lpfc_sli_api_table_setup(phba, dev_grp);
+ if (rc)
+ return -ENODEV;
+ /* Set up MBOX API function jump table */
+ rc = lpfc_mbox_api_table_setup(phba, dev_grp);
+ if (rc)
+ return -ENODEV;
+
+ return 0;
+}
+
+/**
+ * lpfc_log_intr_mode - Log the active interrupt mode
+ * @phba: pointer to lpfc hba data structure.
+ * @intr_mode: active interrupt mode adopted.
+ *
+ * This routine it invoked to log the currently used active interrupt mode
+ * to the device.
+ **/
+static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
+{
+ switch (intr_mode) {
+ case 0:
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0470 Enable INTx interrupt mode.\n");
+ break;
+ case 1:
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0481 Enabled MSI interrupt mode.\n");
+ break;
+ case 2:
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0480 Enabled MSI-X interrupt mode.\n");
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0482 Illegal interrupt mode.\n");
+ break;
+ }
+ return;
+}
+
+/**
+ * lpfc_enable_pci_dev - Enable a generic PCI device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable the PCI device that is common to all
+ * PCI devices.
+ *
+ * Return codes
+ * 0 - sucessful
+ * other values - error
+ **/
+static int
+lpfc_enable_pci_dev(struct lpfc_hba *phba)
+{
+ struct pci_dev *pdev;
+ int bars;
+
+ /* Obtain PCI device reference */
+ if (!phba->pcidev)
+ goto out_error;
+ else
+ pdev = phba->pcidev;
+ /* Select PCI BARs */
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ /* Enable PCI device */
+ if (pci_enable_device_mem(pdev))
+ goto out_error;
+ /* Request PCI resource for the device */
+ if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
+ goto out_disable_device;
+ /* Set up device as PCI master and save state for EEH */
+ pci_set_master(pdev);
+ pci_try_set_mwi(pdev);
+ pci_save_state(pdev);
+
+ return 0;
+
+out_disable_device:
+ pci_disable_device(pdev);
+out_error:
+ return -ENODEV;
+}
+
+/**
+ * lpfc_disable_pci_dev - Disable a generic PCI device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to disable the PCI device that is common to all
+ * PCI devices.
+ **/
+static void
+lpfc_disable_pci_dev(struct lpfc_hba *phba)
+{
+ struct pci_dev *pdev;
+ int bars;
+
+ /* Obtain PCI device reference */
+ if (!phba->pcidev)
+ return;
+ else
+ pdev = phba->pcidev;
+ /* Select PCI BARs */
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ /* Release PCI resource and disable PCI device */
+ pci_release_selected_regions(pdev, bars);
+ pci_disable_device(pdev);
+ /* Null out PCI private reference to driver */
+ pci_set_drvdata(pdev, NULL);
+
+ return;
+}
+
+/**
+ * lpfc_reset_hba - Reset a hba
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to reset a hba device. It brings the HBA
+ * offline, performs a board restart, and then brings the board back
+ * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
+ * on outstanding mailbox commands.
+ **/
+void
+lpfc_reset_hba(struct lpfc_hba *phba)
+{
+ /* If resets are disabled then set error state and return. */
+ if (!phba->cfg_enable_hba_reset) {
+ phba->link_state = LPFC_HBA_ERROR;
+ return;
+ }
+ lpfc_offline_prep(phba);
+ lpfc_offline(phba);
+ lpfc_sli_brdrestart(phba);
+ lpfc_online(phba);
+ lpfc_unblock_mgmt_io(phba);
+}
+
+/**
+ * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the driver internal resources specific to
+ * support the SLI-3 HBA device it attached to.
+ *
+ * Return codes
+ * 0 - sucessful
+ * other values - error
+ **/
+static int
+lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli;
+
+ /*
+ * Initialize timers used by driver
+ */
+
+ /* Heartbeat timer */
+ init_timer(&phba->hb_tmofunc);
+ phba->hb_tmofunc.function = lpfc_hb_timeout;
+ phba->hb_tmofunc.data = (unsigned long)phba;
+
+ psli = &phba->sli;
+ /* MBOX heartbeat timer */
+ init_timer(&psli->mbox_tmo);
+ psli->mbox_tmo.function = lpfc_mbox_timeout;
+ psli->mbox_tmo.data = (unsigned long) phba;
+ /* FCP polling mode timer */
+ init_timer(&phba->fcp_poll_timer);
+ phba->fcp_poll_timer.function = lpfc_poll_timeout;
+ phba->fcp_poll_timer.data = (unsigned long) phba;
+ /* Fabric block timer */
+ init_timer(&phba->fabric_block_timer);
+ phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
+ phba->fabric_block_timer.data = (unsigned long) phba;
+ /* EA polling mode timer */
+ init_timer(&phba->eratt_poll);
+ phba->eratt_poll.function = lpfc_poll_eratt;
+ phba->eratt_poll.data = (unsigned long) phba;
+
+ /* Host attention work mask setup */
+ phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
+ phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
+
+ /* Get all the module params for configuring this host */
+ lpfc_get_cfgparam(phba);
+ /*
+ * Since the sg_tablesize is module parameter, the sg_dma_buf_size
+ * used to create the sg_dma_buf_pool must be dynamically calculated.
+ * 2 segments are added since the IOCB needs a command and response bde.
+ */
+ phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+ sizeof(struct fcp_rsp) +
+ ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
+
+ if (phba->cfg_enable_bg) {
+ phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
+ phba->cfg_sg_dma_buf_size +=
+ phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
+ }
+
+ /* Also reinitialize the host templates with new values. */
+ lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+ lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+
+ phba->max_vpi = LPFC_MAX_VPI;
+ /* This will be set to correct value after config_port mbox */
+ phba->max_vports = 0;
+
+ /*
+ * Initialize the SLI Layer to run with lpfc HBAs.
+ */
+ lpfc_sli_setup(phba);
+ lpfc_sli_queue_setup(phba);
+
+ /* Allocate device driver memory */
+ if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the driver internal resources set up
+ * specific for supporting the SLI-3 HBA device it attached to.
+ **/
+static void
+lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
+{
+ /* Free device driver memory allocated */
+ lpfc_mem_free_all(phba);
+
+ return;
+}
+
+/**
+ * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the driver internal resources specific to
+ * support the SLI-4 HBA device it attached to.
+ *
+ * Return codes
+ * 0 - sucessful
+ * other values - error
+ **/
+static int
+lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli;
+ int rc;
+ int i, hbq_count;
+
+ /* Before proceed, wait for POST done and device ready */
+ rc = lpfc_sli4_post_status_check(phba);
+ if (rc)
+ return -ENODEV;
+
+ /*
+ * Initialize timers used by driver
+ */
+
+ /* Heartbeat timer */
+ init_timer(&phba->hb_tmofunc);
+ phba->hb_tmofunc.function = lpfc_hb_timeout;
+ phba->hb_tmofunc.data = (unsigned long)phba;
+
+ psli = &phba->sli;
+ /* MBOX heartbeat timer */
+ init_timer(&psli->mbox_tmo);
+ psli->mbox_tmo.function = lpfc_mbox_timeout;
+ psli->mbox_tmo.data = (unsigned long) phba;
+ /* Fabric block timer */
+ init_timer(&phba->fabric_block_timer);
+ phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
+ phba->fabric_block_timer.data = (unsigned long) phba;
+ /* EA polling mode timer */
+ init_timer(&phba->eratt_poll);
+ phba->eratt_poll.function = lpfc_poll_eratt;
+ phba->eratt_poll.data = (unsigned long) phba;
+ /*
+ * We need to do a READ_CONFIG mailbox command here before
+ * calling lpfc_get_cfgparam. For VFs this will report the
+ * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
+ * All of the resources allocated
+ * for this Port are tied to these values.
+ */
+ /* Get all the module params for configuring this host */
+ lpfc_get_cfgparam(phba);
+ phba->max_vpi = LPFC_MAX_VPI;
+ /* This will be set to correct value after the read_config mbox */
+ phba->max_vports = 0;
+
+ /* Program the default value of vlan_id and fc_map */
+ phba->valid_vlan = 0;
+ phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
+ phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
+ phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
+
+ /*
+ * Since the sg_tablesize is module parameter, the sg_dma_buf_size
+ * used to create the sg_dma_buf_pool must be dynamically calculated.
+ * 2 segments are added since the IOCB needs a command and response bde.
+ * To insure that the scsi sgl does not cross a 4k page boundary only
+ * sgl sizes of 1k, 2k, 4k, and 8k are supported.
+ * Table of sgl sizes and seg_cnt:
+ * sgl size, sg_seg_cnt total seg
+ * 1k 50 52
+ * 2k 114 116
+ * 4k 242 244
+ * 8k 498 500
+ * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024
+ * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048
+ * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096
+ * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192
+ */
+ if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT)
+ phba->cfg_sg_seg_cnt = 50;
+ else if (phba->cfg_sg_seg_cnt <= 114)
+ phba->cfg_sg_seg_cnt = 114;
+ else if (phba->cfg_sg_seg_cnt <= 242)
+ phba->cfg_sg_seg_cnt = 242;
+ else
+ phba->cfg_sg_seg_cnt = 498;
+
+ phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd)
+ + sizeof(struct fcp_rsp);
+ phba->cfg_sg_dma_buf_size +=
+ ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
+
+ /* Initialize buffer queue management fields */
+ hbq_count = lpfc_sli_hbq_count();
+ for (i = 0; i < hbq_count; ++i)
+ INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
+ INIT_LIST_HEAD(&phba->rb_pend_list);
+ phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
+ phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
+
+ /*
+ * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
+ */
+ /* Initialize the Abort scsi buffer list used by driver */
+ spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
+ /* This abort list used by worker thread */
+ spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
+
+ /*
+ * Initialize dirver internal slow-path work queues
+ */
+
+ /* Driver internel slow-path CQ Event pool */
+ INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
+ /* Response IOCB work queue list */
+ INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue);
+ /* Asynchronous event CQ Event work queue list */
+ INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
+ /* Fast-path XRI aborted CQ Event work queue list */
+ INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
+ /* Slow-path XRI aborted CQ Event work queue list */
+ INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
+ /* Receive queue CQ Event work queue list */
+ INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
+
+ /* Initialize the driver internal SLI layer lists. */
+ lpfc_sli_setup(phba);
+ lpfc_sli_queue_setup(phba);
+
+ /* Allocate device driver memory */
+ rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
+ if (rc)
+ return -ENOMEM;
+
+ /* Create the bootstrap mailbox command */
+ rc = lpfc_create_bootstrap_mbox(phba);
+ if (unlikely(rc))
+ goto out_free_mem;
+
+ /* Set up the host's endian order with the device. */
+ rc = lpfc_setup_endian_order(phba);
+ if (unlikely(rc))
+ goto out_free_bsmbx;
+
+ /* Set up the hba's configuration parameters. */
+ rc = lpfc_sli4_read_config(phba);
+ if (unlikely(rc))
+ goto out_free_bsmbx;
+
+ /* Perform a function reset */
+ rc = lpfc_pci_function_reset(phba);
+ if (unlikely(rc))
+ goto out_free_bsmbx;
+
+ /* Create all the SLI4 queues */
+ rc = lpfc_sli4_queue_create(phba);
+ if (rc)
+ goto out_free_bsmbx;
+
+ /* Create driver internal CQE event pool */
+ rc = lpfc_sli4_cq_event_pool_create(phba);
+ if (rc)
+ goto out_destroy_queue;
+
+ /* Initialize and populate the iocb list per host */
+ rc = lpfc_init_sgl_list(phba);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1400 Failed to initialize sgl list.\n");
+ goto out_destroy_cq_event_pool;
+ }
+ rc = lpfc_init_active_sgl_array(phba);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1430 Failed to initialize sgl list.\n");
+ goto out_free_sgl_list;
+ }
+
+ rc = lpfc_sli4_init_rpi_hdrs(phba);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1432 Failed to initialize rpi headers.\n");
+ goto out_free_active_sgl;
+ }
+
+ phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
+ phba->cfg_fcp_eq_count), GFP_KERNEL);
+ if (!phba->sli4_hba.fcp_eq_hdl) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2572 Failed allocate memory for fast-path "
+ "per-EQ handle array\n");
+ goto out_remove_rpi_hdrs;
+ }
+
+ phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
+ phba->sli4_hba.cfg_eqn), GFP_KERNEL);
+ if (!phba->sli4_hba.msix_entries) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2573 Failed allocate memory for msi-x "
+ "interrupt vector entries\n");
+ goto out_free_fcp_eq_hdl;
+ }
+
+ return rc;
+
+out_free_fcp_eq_hdl:
+ kfree(phba->sli4_hba.fcp_eq_hdl);
+out_remove_rpi_hdrs:
+ lpfc_sli4_remove_rpi_hdrs(phba);
+out_free_active_sgl:
+ lpfc_free_active_sgl(phba);
+out_free_sgl_list:
+ lpfc_free_sgl_list(phba);
+out_destroy_cq_event_pool:
+ lpfc_sli4_cq_event_pool_destroy(phba);
+out_destroy_queue:
+ lpfc_sli4_queue_destroy(phba);
+out_free_bsmbx:
+ lpfc_destroy_bootstrap_mbox(phba);
+out_free_mem:
+ lpfc_mem_free(phba);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the driver internal resources set up
+ * specific for supporting the SLI-4 HBA device it attached to.
+ **/
+static void
+lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
+{
+ struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
+
+ /* unregister default FCFI from the HBA */
+ lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
+
+ /* Free the default FCR table */
+ lpfc_sli_remove_dflt_fcf(phba);
+
+ /* Free memory allocated for msi-x interrupt vector entries */
+ kfree(phba->sli4_hba.msix_entries);
+
+ /* Free memory allocated for fast-path work queue handles */
+ kfree(phba->sli4_hba.fcp_eq_hdl);
+
+ /* Free the allocated rpi headers. */
+ lpfc_sli4_remove_rpi_hdrs(phba);
+
+ /* Free the ELS sgl list */
+ lpfc_free_active_sgl(phba);
+ lpfc_free_sgl_list(phba);
+
+ /* Free the SCSI sgl management array */
+ kfree(phba->sli4_hba.lpfc_scsi_psb_array);
+
+ /* Free the SLI4 queues */
+ lpfc_sli4_queue_destroy(phba);
+
+ /* Free the completion queue EQ event pool */
+ lpfc_sli4_cq_event_release_all(phba);
+ lpfc_sli4_cq_event_pool_destroy(phba);
+
+ /* Reset SLI4 HBA FCoE function */
+ lpfc_pci_function_reset(phba);
+
+ /* Free the bsmbx region. */
+ lpfc_destroy_bootstrap_mbox(phba);
+
+ /* Free the SLI Layer memory with SLI4 HBAs */
+ lpfc_mem_free_all(phba);
+
+ /* Free the current connect table */
+ list_for_each_entry_safe(conn_entry, next_conn_entry,
+ &phba->fcf_conn_rec_list, list)
+ kfree(conn_entry);
+
+ return;
+}
+
+/**
+ * lpfc_init_api_table_setup - Set up init api fucntion jump table
+ * @phba: The hba struct for which this call is being executed.
+ * @dev_grp: The HBA PCI-Device group number.
+ *
+ * This routine sets up the device INIT interface API function jump table
+ * in @phba struct.
+ *
+ * Returns: 0 - success, -ENODEV - failure.
+ **/
+int
+lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
+{
+ switch (dev_grp) {
+ case LPFC_PCI_DEV_LP:
+ phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
+ phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
+ phba->lpfc_stop_port = lpfc_stop_port_s3;
+ break;
+ case LPFC_PCI_DEV_OC:
+ phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
+ phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
+ phba->lpfc_stop_port = lpfc_stop_port_s4;
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1431 Invalid HBA PCI-device group: 0x%x\n",
+ dev_grp);
+ return -ENODEV;
+ break;
+ }
+ return 0;
+}
+
+/**
+ * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the driver internal resources before the
+ * device specific resource setup to support the HBA device it attached to.
+ *
+ * Return codes
+ * 0 - sucessful
+ * other values - error
+ **/
+static int
+lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
+{
+ /*
+ * Driver resources common to all SLI revisions
+ */
+ atomic_set(&phba->fast_event_count, 0);
+ spin_lock_init(&phba->hbalock);
+
+ /* Initialize ndlp management spinlock */
+ spin_lock_init(&phba->ndlp_lock);
+
+ INIT_LIST_HEAD(&phba->port_list);
+ INIT_LIST_HEAD(&phba->work_list);
+ init_waitqueue_head(&phba->wait_4_mlo_m_q);
+
+ /* Initialize the wait queue head for the kernel thread */
+ init_waitqueue_head(&phba->work_waitq);
+
+ /* Initialize the scsi buffer list used by driver for scsi IO */
+ spin_lock_init(&phba->scsi_buf_list_lock);
+ INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
+
+ /* Initialize the fabric iocb list */
+ INIT_LIST_HEAD(&phba->fabric_iocb_list);
+
+ /* Initialize list to save ELS buffers */
+ INIT_LIST_HEAD(&phba->elsbuf);
+
+ /* Initialize FCF connection rec list */
+ INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
+
+ return 0;
+}
+
+/**
+ * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the driver internal resources after the
+ * device specific resource setup to support the HBA device it attached to.
+ *
+ * Return codes
+ * 0 - sucessful
+ * other values - error
+ **/
+static int
+lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
+{
+ int error;
+
+ /* Startup the kernel thread for this host adapter. */
+ phba->worker_thread = kthread_run(lpfc_do_work, phba,
+ "lpfc_worker_%d", phba->brd_no);
+ if (IS_ERR(phba->worker_thread)) {
+ error = PTR_ERR(phba->worker_thread);
+ return error;
+ }
+
+ return 0;
+}
+
+/**
+ * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the driver internal resources set up after
+ * the device specific resource setup for supporting the HBA device it
+ * attached to.
+ **/
+static void
+lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
+{
+ /* Stop kernel worker thread */
+ kthread_stop(phba->worker_thread);
+}
+
+/**
+ * lpfc_free_iocb_list - Free iocb list.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to free the driver's IOCB list and memory.
+ **/
+static void
+lpfc_free_iocb_list(struct lpfc_hba *phba)
+{
+ struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
+
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry_safe(iocbq_entry, iocbq_next,
+ &phba->lpfc_iocb_list, list) {
+ list_del(&iocbq_entry->list);
+ kfree(iocbq_entry);
+ phba->total_iocbq_bufs--;
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ return;
+}
+
+/**
+ * lpfc_init_iocb_list - Allocate and initialize iocb list.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate and initizlize the driver's IOCB
+ * list and set up the IOCB tag array accordingly.
+ *
+ * Return codes
+ * 0 - sucessful
+ * other values - error
+ **/
+static int
+lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
+{
+ struct lpfc_iocbq *iocbq_entry = NULL;
+ uint16_t iotag;
+ int i;
+
+ /* Initialize and populate the iocb list per host. */
+ INIT_LIST_HEAD(&phba->lpfc_iocb_list);
+ for (i = 0; i < iocb_count; i++) {
+ iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
+ if (iocbq_entry == NULL) {
+ printk(KERN_ERR "%s: only allocated %d iocbs of "
+ "expected %d count. Unloading driver.\n",
+ __func__, i, LPFC_IOCB_LIST_CNT);
+ goto out_free_iocbq;
+ }
+
+ iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
+ if (iotag == 0) {
+ kfree(iocbq_entry);
+ printk(KERN_ERR "%s: failed to allocate IOTAG. "
+ "Unloading driver.\n", __func__);
+ goto out_free_iocbq;
+ }
+ iocbq_entry->sli4_xritag = NO_XRI;
+
+ spin_lock_irq(&phba->hbalock);
+ list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
+ phba->total_iocbq_bufs++;
+ spin_unlock_irq(&phba->hbalock);
+ }
+
+ return 0;
+
+out_free_iocbq:
+ lpfc_free_iocb_list(phba);
+
+ return -ENOMEM;
+}
+
+/**
+ * lpfc_free_sgl_list - Free sgl list.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to free the driver's sgl list and memory.
+ **/
+static void
+lpfc_free_sgl_list(struct lpfc_hba *phba)
+{
+ struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
+ LIST_HEAD(sglq_list);
+ int rc = 0;
+
+ spin_lock_irq(&phba->hbalock);
+ list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
+ spin_unlock_irq(&phba->hbalock);
+
+ list_for_each_entry_safe(sglq_entry, sglq_next,
+ &sglq_list, list) {
+ list_del(&sglq_entry->list);
+ lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
+ kfree(sglq_entry);
+ phba->sli4_hba.total_sglq_bufs--;
+ }
+ rc = lpfc_sli4_remove_all_sgl_pages(phba);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2005 Unable to deregister pages from HBA: %x", rc);
+ }
+ kfree(phba->sli4_hba.lpfc_els_sgl_array);
+}
+
+/**
+ * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate the driver's active sgl memory.
+ * This array will hold the sglq_entry's for active IOs.
+ **/
+static int
+lpfc_init_active_sgl_array(struct lpfc_hba *phba)
+{
+ int size;
+ size = sizeof(struct lpfc_sglq *);
+ size *= phba->sli4_hba.max_cfg_param.max_xri;
+
+ phba->sli4_hba.lpfc_sglq_active_list =
+ kzalloc(size, GFP_KERNEL);
+ if (!phba->sli4_hba.lpfc_sglq_active_list)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to walk through the array of active sglq entries
+ * and free all of the resources.
+ * This is just a place holder for now.
+ **/
+static void
+lpfc_free_active_sgl(struct lpfc_hba *phba)
+{
+ kfree(phba->sli4_hba.lpfc_sglq_active_list);
+}
+
+/**
+ * lpfc_init_sgl_list - Allocate and initialize sgl list.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate and initizlize the driver's sgl
+ * list and set up the sgl xritag tag array accordingly.
+ *
+ * Return codes
+ * 0 - sucessful
+ * other values - error
+ **/
+static int
+lpfc_init_sgl_list(struct lpfc_hba *phba)
+{
+ struct lpfc_sglq *sglq_entry = NULL;
+ int i;
+ int els_xri_cnt;
+
+ els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2400 lpfc_init_sgl_list els %d.\n",
+ els_xri_cnt);
+ /* Initialize and populate the sglq list per host/VF. */
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
+
+ /* Sanity check on XRI management */
+ if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2562 No room left for SCSI XRI allocation: "
+ "max_xri=%d, els_xri=%d\n",
+ phba->sli4_hba.max_cfg_param.max_xri,
+ els_xri_cnt);
+ return -ENOMEM;
+ }
+
+ /* Allocate memory for the ELS XRI management array */
+ phba->sli4_hba.lpfc_els_sgl_array =
+ kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
+ GFP_KERNEL);
+
+ if (!phba->sli4_hba.lpfc_els_sgl_array) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2401 Failed to allocate memory for ELS "
+ "XRI management array of size %d.\n",
+ els_xri_cnt);
+ return -ENOMEM;
+ }
+
+ /* Keep the SCSI XRI into the XRI management array */
+ phba->sli4_hba.scsi_xri_max =
+ phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
+ phba->sli4_hba.scsi_xri_cnt = 0;
+
+ phba->sli4_hba.lpfc_scsi_psb_array =
+ kzalloc((sizeof(struct lpfc_scsi_buf *) *
+ phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
+
+ if (!phba->sli4_hba.lpfc_scsi_psb_array) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2563 Failed to allocate memory for SCSI "
+ "XRI management array of size %d.\n",
+ phba->sli4_hba.scsi_xri_max);
+ kfree(phba->sli4_hba.lpfc_els_sgl_array);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < els_xri_cnt; i++) {
+ sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
+ if (sglq_entry == NULL) {
+ printk(KERN_ERR "%s: only allocated %d sgls of "
+ "expected %d count. Unloading driver.\n",
+ __func__, i, els_xri_cnt);
+ goto out_free_mem;
+ }
+
+ sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
+ if (sglq_entry->sli4_xritag == NO_XRI) {
+ kfree(sglq_entry);
+ printk(KERN_ERR "%s: failed to allocate XRI.\n"
+ "Unloading driver.\n", __func__);
+ goto out_free_mem;
+ }
+ sglq_entry->buff_type = GEN_BUFF_TYPE;
+ sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
+ if (sglq_entry->virt == NULL) {
+ kfree(sglq_entry);
+ printk(KERN_ERR "%s: failed to allocate mbuf.\n"
+ "Unloading driver.\n", __func__);
+ goto out_free_mem;
+ }
+ sglq_entry->sgl = sglq_entry->virt;
+ memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
+
+ /* The list order is used by later block SGL registraton */
+ spin_lock_irq(&phba->hbalock);
+ list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
+ phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
+ phba->sli4_hba.total_sglq_bufs++;
+ spin_unlock_irq(&phba->hbalock);
+ }
+ return 0;
+
+out_free_mem:
+ kfree(phba->sli4_hba.lpfc_scsi_psb_array);
+ lpfc_free_sgl_list(phba);
+ return -ENOMEM;
+}
+
+/**
+ * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post rpi header templates to the
+ * HBA consistent with the SLI-4 interface spec. This routine
+ * posts a PAGE_SIZE memory region to the port to hold up to
+ * PAGE_SIZE modulo 64 rpi context headers.
+ * No locks are held here because this is an initialization routine
+ * called only from probe or lpfc_online when interrupts are not
+ * enabled and the driver is reinitializing the device.
+ *
+ * Return codes
+ * 0 - sucessful
+ * ENOMEM - No availble memory
+ * EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
+{
+ int rc = 0;
+ int longs;
+ uint16_t rpi_count;
+ struct lpfc_rpi_hdr *rpi_hdr;
+
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
+
+ /*
+ * Provision an rpi bitmask range for discovery. The total count
+ * is the difference between max and base + 1.
+ */
+ rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
+ phba->sli4_hba.max_cfg_param.max_rpi - 1;
+
+ longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!phba->sli4_hba.rpi_bmask)
+ return -ENOMEM;
+
+ rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
+ if (!rpi_hdr) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "0391 Error during rpi post operation\n");
+ lpfc_sli4_remove_rpis(phba);
+ rc = -ENODEV;
+ }
+
+ return rc;
+}
+
+/**
+ * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate a single 4KB memory region to
+ * support rpis and stores them in the phba. This single region
+ * provides support for up to 64 rpis. The region is used globally
+ * by the device.
+ *
+ * Returns:
+ * A valid rpi hdr on success.
+ * A NULL pointer on any failure.
+ **/
+struct lpfc_rpi_hdr *
+lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
+{
+ uint16_t rpi_limit, curr_rpi_range;
+ struct lpfc_dmabuf *dmabuf;
+ struct lpfc_rpi_hdr *rpi_hdr;
+
+ rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
+ phba->sli4_hba.max_cfg_param.max_rpi - 1;
+
+ spin_lock_irq(&phba->hbalock);
+ curr_rpi_range = phba->sli4_hba.next_rpi;
+ spin_unlock_irq(&phba->hbalock);
+
+ /*
+ * The port has a limited number of rpis. The increment here
+ * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
+ * and to allow the full max_rpi range per port.
+ */
+ if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
+ return NULL;
+
+ /*
+ * First allocate the protocol header region for the port. The
+ * port expects a 4KB DMA-mapped memory region that is 4K aligned.
+ */
+ dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!dmabuf)
+ return NULL;
+
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+ LPFC_HDR_TEMPLATE_SIZE,
+ &dmabuf->phys,
+ GFP_KERNEL);
+ if (!dmabuf->virt) {
+ rpi_hdr = NULL;
+ goto err_free_dmabuf;
+ }
+
+ memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
+ if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
+ rpi_hdr = NULL;
+ goto err_free_coherent;
+ }
+
+ /* Save the rpi header data for cleanup later. */
+ rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
+ if (!rpi_hdr)
+ goto err_free_coherent;
+
+ rpi_hdr->dmabuf = dmabuf;
+ rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
+ rpi_hdr->page_count = 1;
+ spin_lock_irq(&phba->hbalock);
+ rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
+ list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
+
+ /*
+ * The next_rpi stores the next module-64 rpi value to post
+ * in any subsequent rpi memory region postings.
+ */
+ phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT;
+ spin_unlock_irq(&phba->hbalock);
+ return rpi_hdr;
+
+ err_free_coherent:
+ dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
+ dmabuf->virt, dmabuf->phys);
+ err_free_dmabuf:
+ kfree(dmabuf);
+ return NULL;
+}
+
+/**
+ * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
* @phba: pointer to lpfc hba data structure.
*
- * This routine is invoked to enable the MSI-X interrupt vectors. The kernel
- * function pci_enable_msix() is called to enable the MSI-X vectors. Note that
- * pci_enable_msix(), once invoked, enables either all or nothing, depending
- * on the current availability of PCI vector resources. The device driver is
- * responsible for calling the individual request_irq() to register each MSI-X
- * vector with a interrupt handler, which is done in this function. Note that
+ * This routine is invoked to remove all memory resources allocated
+ * to support rpis. This routine presumes the caller has released all
+ * rpis consumed by fabric or port logins and is prepared to have
+ * the header pages removed.
+ **/
+void
+lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
+{
+ struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
+
+ list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
+ &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
+ list_del(&rpi_hdr->list);
+ dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
+ rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
+ kfree(rpi_hdr->dmabuf);
+ kfree(rpi_hdr);
+ }
+
+ phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
+ memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask));
+}
+
+/**
+ * lpfc_hba_alloc - Allocate driver hba data structure for a device.
+ * @pdev: pointer to pci device data structure.
+ *
+ * This routine is invoked to allocate the driver hba data structure for an
+ * HBA device. If the allocation is successful, the phba reference to the
+ * PCI device data structure is set.
+ *
+ * Return codes
+ * pointer to @phba - sucessful
+ * NULL - error
+ **/
+static struct lpfc_hba *
+lpfc_hba_alloc(struct pci_dev *pdev)
+{
+ struct lpfc_hba *phba;
+
+ /* Allocate memory for HBA structure */
+ phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
+ if (!phba) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1417 Failed to allocate hba struct.\n");
+ return NULL;
+ }
+
+ /* Set reference to PCI device in HBA structure */
+ phba->pcidev = pdev;
+
+ /* Assign an unused board number */
+ phba->brd_no = lpfc_get_instance();
+ if (phba->brd_no < 0) {
+ kfree(phba);
+ return NULL;
+ }
+
+ return phba;
+}
+
+/**
+ * lpfc_hba_free - Free driver hba data structure with a device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to free the driver hba data structure with an
+ * HBA device.
+ **/
+static void
+lpfc_hba_free(struct lpfc_hba *phba)
+{
+ /* Release the driver assigned board number */
+ idr_remove(&lpfc_hba_index, phba->brd_no);
+
+ kfree(phba);
+ return;
+}
+
+/**
+ * lpfc_create_shost - Create hba physical port with associated scsi host.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to create HBA physical port and associate a SCSI
+ * host with it.
+ *
+ * Return codes
+ * 0 - sucessful
+ * other values - error
+ **/
+static int
+lpfc_create_shost(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport;
+ struct Scsi_Host *shost;
+
+ /* Initialize HBA FC structure */
+ phba->fc_edtov = FF_DEF_EDTOV;
+ phba->fc_ratov = FF_DEF_RATOV;
+ phba->fc_altov = FF_DEF_ALTOV;
+ phba->fc_arbtov = FF_DEF_ARBTOV;
+
+ vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
+ if (!vport)
+ return -ENODEV;
+
+ shost = lpfc_shost_from_vport(vport);
+ phba->pport = vport;
+ lpfc_debugfs_initialize(vport);
+ /* Put reference to SCSI host to driver's device private data */
+ pci_set_drvdata(phba->pcidev, shost);
+
+ return 0;
+}
+
+/**
+ * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to destroy HBA physical port and the associated
+ * SCSI host.
+ **/
+static void
+lpfc_destroy_shost(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport = phba->pport;
+
+ /* Destroy physical port that associated with the SCSI host */
+ destroy_port(vport);
+
+ return;
+}
+
+/**
+ * lpfc_setup_bg - Setup Block guard structures and debug areas.
+ * @phba: pointer to lpfc hba data structure.
+ * @shost: the shost to be used to detect Block guard settings.
+ *
+ * This routine sets up the local Block guard protocol settings for @shost.
+ * This routine also allocates memory for debugging bg buffers.
+ **/
+static void
+lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
+{
+ int pagecnt = 10;
+ if (lpfc_prot_mask && lpfc_prot_guard) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "1478 Registering BlockGuard with the "
+ "SCSI layer\n");
+ scsi_host_set_prot(shost, lpfc_prot_mask);
+ scsi_host_set_guard(shost, lpfc_prot_guard);
+ }
+ if (!_dump_buf_data) {
+ while (pagecnt) {
+ spin_lock_init(&_dump_buf_lock);
+ _dump_buf_data =
+ (char *) __get_free_pages(GFP_KERNEL, pagecnt);
+ if (_dump_buf_data) {
+ printk(KERN_ERR "BLKGRD allocated %d pages for "
+ "_dump_buf_data at 0x%p\n",
+ (1 << pagecnt), _dump_buf_data);
+ _dump_buf_data_order = pagecnt;
+ memset(_dump_buf_data, 0,
+ ((1 << PAGE_SHIFT) << pagecnt));
+ break;
+ } else
+ --pagecnt;
+ }
+ if (!_dump_buf_data_order)
+ printk(KERN_ERR "BLKGRD ERROR unable to allocate "
+ "memory for hexdump\n");
+ } else
+ printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
+ "\n", _dump_buf_data);
+ if (!_dump_buf_dif) {
+ while (pagecnt) {
+ _dump_buf_dif =
+ (char *) __get_free_pages(GFP_KERNEL, pagecnt);
+ if (_dump_buf_dif) {
+ printk(KERN_ERR "BLKGRD allocated %d pages for "
+ "_dump_buf_dif at 0x%p\n",
+ (1 << pagecnt), _dump_buf_dif);
+ _dump_buf_dif_order = pagecnt;
+ memset(_dump_buf_dif, 0,
+ ((1 << PAGE_SHIFT) << pagecnt));
+ break;
+ } else
+ --pagecnt;
+ }
+ if (!_dump_buf_dif_order)
+ printk(KERN_ERR "BLKGRD ERROR unable to allocate "
+ "memory for hexdump\n");
+ } else
+ printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
+ _dump_buf_dif);
+}
+
+/**
+ * lpfc_post_init_setup - Perform necessary device post initialization setup.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to perform all the necessary post initialization
+ * setup for the device.
+ **/
+static void
+lpfc_post_init_setup(struct lpfc_hba *phba)
+{
+ struct Scsi_Host *shost;
+ struct lpfc_adapter_event_header adapter_event;
+
+ /* Get the default values for Model Name and Description */
+ lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
+
+ /*
+ * hba setup may have changed the hba_queue_depth so we need to
+ * adjust the value of can_queue.
+ */
+ shost = pci_get_drvdata(phba->pcidev);
+ shost->can_queue = phba->cfg_hba_queue_depth - 10;
+ if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
+ lpfc_setup_bg(phba, shost);
+
+ lpfc_host_attrib_init(shost);
+
+ if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
+ spin_lock_irq(shost->host_lock);
+ lpfc_poll_start_timer(phba);
+ spin_unlock_irq(shost->host_lock);
+ }
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0428 Perform SCSI scan\n");
+ /* Send board arrival event to upper layer */
+ adapter_event.event_type = FC_REG_ADAPTER_EVENT;
+ adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(adapter_event),
+ (char *) &adapter_event,
+ LPFC_NL_VENDOR_ID);
+ return;
+}
+
+/**
+ * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the PCI device memory space for device
+ * with SLI-3 interface spec.
+ *
+ * Return codes
+ * 0 - sucessful
+ * other values - error
+ **/
+static int
+lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
+{
+ struct pci_dev *pdev;
+ unsigned long bar0map_len, bar2map_len;
+ int i, hbq_count;
+ void *ptr;
+ int error = -ENODEV;
+
+ /* Obtain PCI device reference */
+ if (!phba->pcidev)
+ return error;
+ else
+ pdev = phba->pcidev;
+
+ /* Set the device DMA mask size */
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
+ return error;
+
+ /* Get the bus address of Bar0 and Bar2 and the number of bytes
+ * required by each mapping.
+ */
+ phba->pci_bar0_map = pci_resource_start(pdev, 0);
+ bar0map_len = pci_resource_len(pdev, 0);
+
+ phba->pci_bar2_map = pci_resource_start(pdev, 2);
+ bar2map_len = pci_resource_len(pdev, 2);
+
+ /* Map HBA SLIM to a kernel virtual address. */
+ phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
+ if (!phba->slim_memmap_p) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "ioremap failed for SLIM memory.\n");
+ goto out;
+ }
+
+ /* Map HBA Control Registers to a kernel virtual address. */
+ phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
+ if (!phba->ctrl_regs_memmap_p) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "ioremap failed for HBA control registers.\n");
+ goto out_iounmap_slim;
+ }
+
+ /* Allocate memory for SLI-2 structures */
+ phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
+ SLI2_SLIM_SIZE,
+ &phba->slim2p.phys,
+ GFP_KERNEL);
+ if (!phba->slim2p.virt)
+ goto out_iounmap;
+
+ memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
+ phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
+ phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
+ phba->IOCBs = (phba->slim2p.virt +
+ offsetof(struct lpfc_sli2_slim, IOCBs));
+
+ phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
+ lpfc_sli_hbq_size(),
+ &phba->hbqslimp.phys,
+ GFP_KERNEL);
+ if (!phba->hbqslimp.virt)
+ goto out_free_slim;
+
+ hbq_count = lpfc_sli_hbq_count();
+ ptr = phba->hbqslimp.virt;
+ for (i = 0; i < hbq_count; ++i) {
+ phba->hbqs[i].hbq_virt = ptr;
+ INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
+ ptr += (lpfc_hbq_defs[i]->entry_count *
+ sizeof(struct lpfc_hbq_entry));
+ }
+ phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
+ phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
+
+ memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
+
+ INIT_LIST_HEAD(&phba->rb_pend_list);
+
+ phba->MBslimaddr = phba->slim_memmap_p;
+ phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
+ phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
+ phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
+ phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
+
+ return 0;
+
+out_free_slim:
+ dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
+ phba->slim2p.virt, phba->slim2p.phys);
+out_iounmap:
+ iounmap(phba->ctrl_regs_memmap_p);
+out_iounmap_slim:
+ iounmap(phba->slim_memmap_p);
+out:
+ return error;
+}
+
+/**
+ * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the PCI device memory space for device
+ * with SLI-3 interface spec.
+ **/
+static void
+lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
+{
+ struct pci_dev *pdev;
+
+ /* Obtain PCI device reference */
+ if (!phba->pcidev)
+ return;
+ else
+ pdev = phba->pcidev;
+
+ /* Free coherent DMA memory allocated */
+ dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
+ phba->hbqslimp.virt, phba->hbqslimp.phys);
+ dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
+ phba->slim2p.virt, phba->slim2p.phys);
+
+ /* I/O memory unmap */
+ iounmap(phba->ctrl_regs_memmap_p);
+ iounmap(phba->slim_memmap_p);
+
+ return;
+}
+
+/**
+ * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
+ * done and check status.
+ *
+ * Return 0 if successful, otherwise -ENODEV.
+ **/
+int
+lpfc_sli4_post_status_check(struct lpfc_hba *phba)
+{
+ struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad;
+ uint32_t onlnreg0, onlnreg1;
+ int i, port_error = -ENODEV;
+
+ if (!phba->sli4_hba.STAregaddr)
+ return -ENODEV;
+
+ /* With uncoverable error, log the error message and return error */
+ onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
+ onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
+ if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
+ uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
+ uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
+ if (uerrlo_reg.word0 || uerrhi_reg.word0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1422 HBA Unrecoverable error: "
+ "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
+ "online0_reg=0x%x, online1_reg=0x%x\n",
+ uerrlo_reg.word0, uerrhi_reg.word0,
+ onlnreg0, onlnreg1);
+ }
+ return -ENODEV;
+ }
+
+ /* Wait up to 30 seconds for the SLI Port POST done and ready */
+ for (i = 0; i < 3000; i++) {
+ sta_reg.word0 = readl(phba->sli4_hba.STAregaddr);
+ /* Encounter fatal POST error, break out */
+ if (bf_get(lpfc_hst_state_perr, &sta_reg)) {
+ port_error = -ENODEV;
+ break;
+ }
+ if (LPFC_POST_STAGE_ARMFW_READY ==
+ bf_get(lpfc_hst_state_port_status, &sta_reg)) {
+ port_error = 0;
+ break;
+ }
+ msleep(10);
+ }
+
+ if (port_error)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1408 Failure HBA POST Status: sta_reg=0x%x, "
+ "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, "
+ "dl=x%x, pstatus=x%x\n", sta_reg.word0,
+ bf_get(lpfc_hst_state_perr, &sta_reg),
+ bf_get(lpfc_hst_state_sfi, &sta_reg),
+ bf_get(lpfc_hst_state_nip, &sta_reg),
+ bf_get(lpfc_hst_state_ipc, &sta_reg),
+ bf_get(lpfc_hst_state_xrom, &sta_reg),
+ bf_get(lpfc_hst_state_dl, &sta_reg),
+ bf_get(lpfc_hst_state_port_status, &sta_reg));
+
+ /* Log device information */
+ scratchpad.word0 = readl(phba->sli4_hba.SCRATCHPADregaddr);
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
+ "FeatureL1=0x%x, FeatureL2=0x%x\n",
+ bf_get(lpfc_scratchpad_chiptype, &scratchpad),
+ bf_get(lpfc_scratchpad_slirev, &scratchpad),
+ bf_get(lpfc_scratchpad_featurelevel1, &scratchpad),
+ bf_get(lpfc_scratchpad_featurelevel2, &scratchpad));
+
+ return port_error;
+}
+
+/**
+ * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up SLI4 BAR0 PCI config space register
+ * memory map.
+ **/
+static void
+lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
+{
+ phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_UERR_STATUS_LO;
+ phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_UERR_STATUS_HI;
+ phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_ONLINE0;
+ phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_ONLINE1;
+ phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_SCRATCHPAD;
+}
+
+/**
+ * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
+ * memory map.
+ **/
+static void
+lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
+{
+
+ phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
+ LPFC_HST_STATE;
+ phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
+ LPFC_HST_ISR0;
+ phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
+ LPFC_HST_IMR0;
+ phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
+ LPFC_HST_ISCR0;
+ return;
+}
+
+/**
+ * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
+ * @phba: pointer to lpfc hba data structure.
+ * @vf: virtual function number
+ *
+ * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
+ * based on the given viftual function number, @vf.
+ *
+ * Return 0 if successful, otherwise -ENODEV.
+ **/
+static int
+lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
+{
+ if (vf > LPFC_VIR_FUNC_MAX)
+ return -ENODEV;
+
+ phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
+ vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
+ phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
+ vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
+ phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
+ vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
+ phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
+ vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
+ phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
+ vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
+ return 0;
+}
+
+/**
+ * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to create the bootstrap mailbox
+ * region consistent with the SLI-4 interface spec. This
+ * routine allocates all memory necessary to communicate
+ * mailbox commands to the port and sets up all alignment
+ * needs. No locks are expected to be held when calling
+ * this routine.
+ *
+ * Return codes
+ * 0 - sucessful
+ * ENOMEM - could not allocated memory.
+ **/
+static int
+lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
+{
+ uint32_t bmbx_size;
+ struct lpfc_dmabuf *dmabuf;
+ struct dma_address *dma_address;
+ uint32_t pa_addr;
+ uint64_t phys_addr;
+
+ dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!dmabuf)
+ return -ENOMEM;
+
+ /*
+ * The bootstrap mailbox region is comprised of 2 parts
+ * plus an alignment restriction of 16 bytes.
+ */
+ bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+ bmbx_size,
+ &dmabuf->phys,
+ GFP_KERNEL);
+ if (!dmabuf->virt) {
+ kfree(dmabuf);
+ return -ENOMEM;
+ }
+ memset(dmabuf->virt, 0, bmbx_size);
+
+ /*
+ * Initialize the bootstrap mailbox pointers now so that the register
+ * operations are simple later. The mailbox dma address is required
+ * to be 16-byte aligned. Also align the virtual memory as each
+ * maibox is copied into the bmbx mailbox region before issuing the
+ * command to the port.
+ */
+ phba->sli4_hba.bmbx.dmabuf = dmabuf;
+ phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
+
+ phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
+ LPFC_ALIGN_16_BYTE);
+ phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
+ LPFC_ALIGN_16_BYTE);
+
+ /*
+ * Set the high and low physical addresses now. The SLI4 alignment
+ * requirement is 16 bytes and the mailbox is posted to the port
+ * as two 30-bit addresses. The other data is a bit marking whether
+ * the 30-bit address is the high or low address.
+ * Upcast bmbx aphys to 64bits so shift instruction compiles
+ * clean on 32 bit machines.
+ */
+ dma_address = &phba->sli4_hba.bmbx.dma_address;
+ phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
+ pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
+ dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
+ LPFC_BMBX_BIT1_ADDR_HI);
+
+ pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
+ dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
+ LPFC_BMBX_BIT1_ADDR_LO);
+ return 0;
+}
+
+/**
+ * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to teardown the bootstrap mailbox
+ * region and release all host resources. This routine requires
+ * the caller to ensure all mailbox commands recovered, no
+ * additional mailbox comands are sent, and interrupts are disabled
+ * before calling this routine.
+ *
+ **/
+static void
+lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
+{
+ dma_free_coherent(&phba->pcidev->dev,
+ phba->sli4_hba.bmbx.bmbx_size,
+ phba->sli4_hba.bmbx.dmabuf->virt,
+ phba->sli4_hba.bmbx.dmabuf->phys);
+
+ kfree(phba->sli4_hba.bmbx.dmabuf);
+ memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
+}
+
+/**
+ * lpfc_sli4_read_config - Get the config parameters.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to read the configuration parameters from the HBA.
+ * The configuration parameters are used to set the base and maximum values
+ * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
+ * allocation for the port.
+ *
+ * Return codes
+ * 0 - sucessful
+ * ENOMEM - No availble memory
+ * EIO - The mailbox failed to complete successfully.
+ **/
+static int
+lpfc_sli4_read_config(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *pmb;
+ struct lpfc_mbx_read_config *rd_config;
+ uint32_t rc = 0;
+
+ pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmb) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2011 Unable to allocate memory for issuing "
+ "SLI_CONFIG_SPECIAL mailbox command\n");
+ return -ENOMEM;
+ }
+
+ lpfc_read_config(phba, pmb);
+
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2012 Mailbox failed , mbxCmd x%x "
+ "READ_CONFIG, mbxStatus x%x\n",
+ bf_get(lpfc_mqe_command, &pmb->u.mqe),
+ bf_get(lpfc_mqe_status, &pmb->u.mqe));
+ rc = -EIO;
+ } else {
+ rd_config = &pmb->u.mqe.un.rd_config;
+ phba->sli4_hba.max_cfg_param.max_xri =
+ bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
+ phba->sli4_hba.max_cfg_param.xri_base =
+ bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
+ phba->sli4_hba.max_cfg_param.max_vpi =
+ bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
+ phba->sli4_hba.max_cfg_param.vpi_base =
+ bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
+ phba->sli4_hba.max_cfg_param.max_rpi =
+ bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
+ phba->sli4_hba.max_cfg_param.rpi_base =
+ bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
+ phba->sli4_hba.max_cfg_param.max_vfi =
+ bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
+ phba->sli4_hba.max_cfg_param.vfi_base =
+ bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
+ phba->sli4_hba.max_cfg_param.max_fcfi =
+ bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
+ phba->sli4_hba.max_cfg_param.fcfi_base =
+ bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
+ phba->sli4_hba.max_cfg_param.max_eq =
+ bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
+ phba->sli4_hba.max_cfg_param.max_rq =
+ bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
+ phba->sli4_hba.max_cfg_param.max_wq =
+ bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
+ phba->sli4_hba.max_cfg_param.max_cq =
+ bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
+ phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
+ phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
+ phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
+ phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
+ phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
+ phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi;
+ phba->max_vports = phba->max_vpi;
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2003 cfg params XRI(B:%d M:%d), "
+ "VPI(B:%d M:%d) "
+ "VFI(B:%d M:%d) "
+ "RPI(B:%d M:%d) "
+ "FCFI(B:%d M:%d)\n",
+ phba->sli4_hba.max_cfg_param.xri_base,
+ phba->sli4_hba.max_cfg_param.max_xri,
+ phba->sli4_hba.max_cfg_param.vpi_base,
+ phba->sli4_hba.max_cfg_param.max_vpi,
+ phba->sli4_hba.max_cfg_param.vfi_base,
+ phba->sli4_hba.max_cfg_param.max_vfi,
+ phba->sli4_hba.max_cfg_param.rpi_base,
+ phba->sli4_hba.max_cfg_param.max_rpi,
+ phba->sli4_hba.max_cfg_param.fcfi_base,
+ phba->sli4_hba.max_cfg_param.max_fcfi);
+ }
+ mempool_free(pmb, phba->mbox_mem_pool);
+
+ /* Reset the DFT_HBA_Q_DEPTH to the max xri */
+ if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri))
+ phba->cfg_hba_queue_depth =
+ phba->sli4_hba.max_cfg_param.max_xri;
+ return rc;
+}
+
+/**
+ * lpfc_dev_endian_order_setup - Notify the port of the host's endian order.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to setup the host-side endian order to the
+ * HBA consistent with the SLI-4 interface spec.
+ *
+ * Return codes
+ * 0 - sucessful
+ * ENOMEM - No availble memory
+ * EIO - The mailbox failed to complete successfully.
+ **/
+static int
+lpfc_setup_endian_order(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *mboxq;
+ uint32_t rc = 0;
+ uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
+ HOST_ENDIAN_HIGH_WORD1};
+
+ mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0492 Unable to allocate memory for issuing "
+ "SLI_CONFIG_SPECIAL mailbox command\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * The SLI4_CONFIG_SPECIAL mailbox command requires the first two
+ * words to contain special data values and no other data.
+ */
+ memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
+ memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0493 SLI_CONFIG_SPECIAL mailbox failed with "
+ "status x%x\n",
+ rc);
+ rc = -EIO;
+ }
+
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_queue_create - Create all the SLI4 queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
+ * operation. For each SLI4 queue type, the parameters such as queue entry
+ * count (queue depth) shall be taken from the module parameter. For now,
+ * we just use some constant number as place holder.
+ *
+ * Return codes
+ * 0 - sucessful
+ * ENOMEM - No availble memory
+ * EIO - The mailbox failed to complete successfully.
+ **/
+static int
+lpfc_sli4_queue_create(struct lpfc_hba *phba)
+{
+ struct lpfc_queue *qdesc;
+ int fcp_eqidx, fcp_cqidx, fcp_wqidx;
+ int cfg_fcp_wq_count;
+ int cfg_fcp_eq_count;
+
+ /*
+ * Sanity check for confiugred queue parameters against the run-time
+ * device parameters
+ */
+
+ /* Sanity check on FCP fast-path WQ parameters */
+ cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
+ if (cfg_fcp_wq_count >
+ (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
+ cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
+ LPFC_SP_WQN_DEF;
+ if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2581 Not enough WQs (%d) from "
+ "the pci function for supporting "
+ "FCP WQs (%d)\n",
+ phba->sli4_hba.max_cfg_param.max_wq,
+ phba->cfg_fcp_wq_count);
+ goto out_error;
+ }
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2582 Not enough WQs (%d) from the pci "
+ "function for supporting the requested "
+ "FCP WQs (%d), the actual FCP WQs can "
+ "be supported: %d\n",
+ phba->sli4_hba.max_cfg_param.max_wq,
+ phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
+ }
+ /* The actual number of FCP work queues adopted */
+ phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
+
+ /* Sanity check on FCP fast-path EQ parameters */
+ cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
+ if (cfg_fcp_eq_count >
+ (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
+ cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
+ LPFC_SP_EQN_DEF;
+ if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2574 Not enough EQs (%d) from the "
+ "pci function for supporting FCP "
+ "EQs (%d)\n",
+ phba->sli4_hba.max_cfg_param.max_eq,
+ phba->cfg_fcp_eq_count);
+ goto out_error;
+ }
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2575 Not enough EQs (%d) from the pci "
+ "function for supporting the requested "
+ "FCP EQs (%d), the actual FCP EQs can "
+ "be supported: %d\n",
+ phba->sli4_hba.max_cfg_param.max_eq,
+ phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
+ }
+ /* It does not make sense to have more EQs than WQs */
+ if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2593 The number of FCP EQs (%d) is more "
+ "than the number of FCP WQs (%d), take "
+ "the number of FCP EQs same as than of "
+ "WQs (%d)\n", cfg_fcp_eq_count,
+ phba->cfg_fcp_wq_count,
+ phba->cfg_fcp_wq_count);
+ cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
+ }
+ /* The actual number of FCP event queues adopted */
+ phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
+ /* The overall number of event queues used */
+ phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
+
+ /*
+ * Create Event Queues (EQs)
+ */
+
+ /* Get EQ depth from module parameter, fake the default for now */
+ phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
+ phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
+
+ /* Create slow path event queue */
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+ phba->sli4_hba.eq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0496 Failed allocate slow-path EQ\n");
+ goto out_error;
+ }
+ phba->sli4_hba.sp_eq = qdesc;
+
+ /* Create fast-path FCP Event Queue(s) */
+ phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
+ phba->cfg_fcp_eq_count), GFP_KERNEL);
+ if (!phba->sli4_hba.fp_eq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2576 Failed allocate memory for fast-path "
+ "EQ record array\n");
+ goto out_free_sp_eq;
+ }
+ for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+ phba->sli4_hba.eq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0497 Failed allocate fast-path EQ\n");
+ goto out_free_fp_eq;
+ }
+ phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
+ }
+
+ /*
+ * Create Complete Queues (CQs)
+ */
+
+ /* Get CQ depth from module parameter, fake the default for now */
+ phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
+ phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
+
+ /* Create slow-path Mailbox Command Complete Queue */
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ phba->sli4_hba.cq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0500 Failed allocate slow-path mailbox CQ\n");
+ goto out_free_fp_eq;
+ }
+ phba->sli4_hba.mbx_cq = qdesc;
+
+ /* Create slow-path ELS Complete Queue */
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ phba->sli4_hba.cq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0501 Failed allocate slow-path ELS CQ\n");
+ goto out_free_mbx_cq;
+ }
+ phba->sli4_hba.els_cq = qdesc;
+
+ /* Create slow-path Unsolicited Receive Complete Queue */
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ phba->sli4_hba.cq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0502 Failed allocate slow-path USOL RX CQ\n");
+ goto out_free_els_cq;
+ }
+ phba->sli4_hba.rxq_cq = qdesc;
+
+ /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
+ phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
+ phba->cfg_fcp_eq_count), GFP_KERNEL);
+ if (!phba->sli4_hba.fcp_cq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2577 Failed allocate memory for fast-path "
+ "CQ record array\n");
+ goto out_free_rxq_cq;
+ }
+ for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ phba->sli4_hba.cq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0499 Failed allocate fast-path FCP "
+ "CQ (%d)\n", fcp_cqidx);
+ goto out_free_fcp_cq;
+ }
+ phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
+ }
+
+ /* Create Mailbox Command Queue */
+ phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
+ phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
+
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
+ phba->sli4_hba.mq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0505 Failed allocate slow-path MQ\n");
+ goto out_free_fcp_cq;
+ }
+ phba->sli4_hba.mbx_wq = qdesc;
+
+ /*
+ * Create all the Work Queues (WQs)
+ */
+ phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
+ phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
+
+ /* Create slow-path ELS Work Queue */
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+ phba->sli4_hba.wq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0504 Failed allocate slow-path ELS WQ\n");
+ goto out_free_mbx_wq;
+ }
+ phba->sli4_hba.els_wq = qdesc;
+
+ /* Create fast-path FCP Work Queue(s) */
+ phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
+ phba->cfg_fcp_wq_count), GFP_KERNEL);
+ if (!phba->sli4_hba.fcp_wq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2578 Failed allocate memory for fast-path "
+ "WQ record array\n");
+ goto out_free_els_wq;
+ }
+ for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+ phba->sli4_hba.wq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0503 Failed allocate fast-path FCP "
+ "WQ (%d)\n", fcp_wqidx);
+ goto out_free_fcp_wq;
+ }
+ phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
+ }
+
+ /*
+ * Create Receive Queue (RQ)
+ */
+ phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
+ phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
+
+ /* Create Receive Queue for header */
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
+ phba->sli4_hba.rq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0506 Failed allocate receive HRQ\n");
+ goto out_free_fcp_wq;
+ }
+ phba->sli4_hba.hdr_rq = qdesc;
+
+ /* Create Receive Queue for data */
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
+ phba->sli4_hba.rq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0507 Failed allocate receive DRQ\n");
+ goto out_free_hdr_rq;
+ }
+ phba->sli4_hba.dat_rq = qdesc;
+
+ return 0;
+
+out_free_hdr_rq:
+ lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
+ phba->sli4_hba.hdr_rq = NULL;
+out_free_fcp_wq:
+ for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
+ lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
+ phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
+ }
+ kfree(phba->sli4_hba.fcp_wq);
+out_free_els_wq:
+ lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
+ phba->sli4_hba.els_wq = NULL;
+out_free_mbx_wq:
+ lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
+ phba->sli4_hba.mbx_wq = NULL;
+out_free_fcp_cq:
+ for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
+ lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
+ phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
+ }
+ kfree(phba->sli4_hba.fcp_cq);
+out_free_rxq_cq:
+ lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
+ phba->sli4_hba.rxq_cq = NULL;
+out_free_els_cq:
+ lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
+ phba->sli4_hba.els_cq = NULL;
+out_free_mbx_cq:
+ lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
+ phba->sli4_hba.mbx_cq = NULL;
+out_free_fp_eq:
+ for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
+ lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
+ phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
+ }
+ kfree(phba->sli4_hba.fp_eq);
+out_free_sp_eq:
+ lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
+ phba->sli4_hba.sp_eq = NULL;
+out_error:
+ return -ENOMEM;
+}
+
+/**
+ * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release all the SLI4 queues with the FCoE HBA
+ * operation.
+ *
+ * Return codes
+ * 0 - sucessful
+ * ENOMEM - No availble memory
+ * EIO - The mailbox failed to complete successfully.
+ **/
+static void
+lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
+{
+ int fcp_qidx;
+
+ /* Release mailbox command work queue */
+ lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
+ phba->sli4_hba.mbx_wq = NULL;
+
+ /* Release ELS work queue */
+ lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
+ phba->sli4_hba.els_wq = NULL;
+
+ /* Release FCP work queue */
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
+ lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
+ kfree(phba->sli4_hba.fcp_wq);
+ phba->sli4_hba.fcp_wq = NULL;
+
+ /* Release unsolicited receive queue */
+ lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
+ phba->sli4_hba.hdr_rq = NULL;
+ lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
+ phba->sli4_hba.dat_rq = NULL;
+
+ /* Release unsolicited receive complete queue */
+ lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
+ phba->sli4_hba.rxq_cq = NULL;
+
+ /* Release ELS complete queue */
+ lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
+ phba->sli4_hba.els_cq = NULL;
+
+ /* Release mailbox command complete queue */
+ lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
+ phba->sli4_hba.mbx_cq = NULL;
+
+ /* Release FCP response complete queue */
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
+ lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
+ kfree(phba->sli4_hba.fcp_cq);
+ phba->sli4_hba.fcp_cq = NULL;
+
+ /* Release fast-path event queue */
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
+ lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
+ kfree(phba->sli4_hba.fp_eq);
+ phba->sli4_hba.fp_eq = NULL;
+
+ /* Release slow-path event queue */
+ lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
+ phba->sli4_hba.sp_eq = NULL;
+
+ return;
+}
+
+/**
+ * lpfc_sli4_queue_setup - Set up all the SLI4 queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
+ * operation.
+ *
+ * Return codes
+ * 0 - sucessful
+ * ENOMEM - No availble memory
+ * EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_sli4_queue_setup(struct lpfc_hba *phba)
+{
+ int rc = -ENOMEM;
+ int fcp_eqidx, fcp_cqidx, fcp_wqidx;
+ int fcp_cq_index = 0;
+
+ /*
+ * Set up Event Queues (EQs)
+ */
+
+ /* Set up slow-path event queue */
+ if (!phba->sli4_hba.sp_eq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0520 Slow-path EQ not allocated\n");
+ goto out_error;
+ }
+ rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
+ LPFC_SP_DEF_IMAX);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0521 Failed setup of slow-path EQ: "
+ "rc = 0x%x\n", rc);
+ goto out_error;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2583 Slow-path EQ setup: queue-id=%d\n",
+ phba->sli4_hba.sp_eq->queue_id);
+
+ /* Set up fast-path event queue */
+ for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
+ if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0522 Fast-path EQ (%d) not "
+ "allocated\n", fcp_eqidx);
+ goto out_destroy_fp_eq;
+ }
+ rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
+ phba->cfg_fcp_imax);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0523 Failed setup of fast-path EQ "
+ "(%d), rc = 0x%x\n", fcp_eqidx, rc);
+ goto out_destroy_fp_eq;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2584 Fast-path EQ setup: "
+ "queue[%d]-id=%d\n", fcp_eqidx,
+ phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
+ }
+
+ /*
+ * Set up Complete Queues (CQs)
+ */
+
+ /* Set up slow-path MBOX Complete Queue as the first CQ */
+ if (!phba->sli4_hba.mbx_cq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0528 Mailbox CQ not allocated\n");
+ goto out_destroy_fp_eq;
+ }
+ rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
+ LPFC_MCQ, LPFC_MBOX);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0529 Failed setup of slow-path mailbox CQ: "
+ "rc = 0x%x\n", rc);
+ goto out_destroy_fp_eq;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
+ phba->sli4_hba.mbx_cq->queue_id,
+ phba->sli4_hba.sp_eq->queue_id);
+
+ /* Set up slow-path ELS Complete Queue */
+ if (!phba->sli4_hba.els_cq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0530 ELS CQ not allocated\n");
+ goto out_destroy_mbx_cq;
+ }
+ rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
+ LPFC_WCQ, LPFC_ELS);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0531 Failed setup of slow-path ELS CQ: "
+ "rc = 0x%x\n", rc);
+ goto out_destroy_mbx_cq;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
+ phba->sli4_hba.els_cq->queue_id,
+ phba->sli4_hba.sp_eq->queue_id);
+
+ /* Set up slow-path Unsolicited Receive Complete Queue */
+ if (!phba->sli4_hba.rxq_cq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0532 USOL RX CQ not allocated\n");
+ goto out_destroy_els_cq;
+ }
+ rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq,
+ LPFC_RCQ, LPFC_USOL);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0533 Failed setup of slow-path USOL RX CQ: "
+ "rc = 0x%x\n", rc);
+ goto out_destroy_els_cq;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n",
+ phba->sli4_hba.rxq_cq->queue_id,
+ phba->sli4_hba.sp_eq->queue_id);
+
+ /* Set up fast-path FCP Response Complete Queue */
+ for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
+ if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0526 Fast-path FCP CQ (%d) not "
+ "allocated\n", fcp_cqidx);
+ goto out_destroy_fcp_cq;
+ }
+ rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
+ phba->sli4_hba.fp_eq[fcp_cqidx],
+ LPFC_WCQ, LPFC_FCP);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0527 Failed setup of fast-path FCP "
+ "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
+ goto out_destroy_fcp_cq;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2588 FCP CQ setup: cq[%d]-id=%d, "
+ "parent eq[%d]-id=%d\n",
+ fcp_cqidx,
+ phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
+ fcp_cqidx,
+ phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id);
+ }
+
+ /*
+ * Set up all the Work Queues (WQs)
+ */
+
+ /* Set up Mailbox Command Queue */
+ if (!phba->sli4_hba.mbx_wq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0538 Slow-path MQ not allocated\n");
+ goto out_destroy_fcp_cq;
+ }
+ rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
+ phba->sli4_hba.mbx_cq, LPFC_MBOX);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0539 Failed setup of slow-path MQ: "
+ "rc = 0x%x\n", rc);
+ goto out_destroy_fcp_cq;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
+ phba->sli4_hba.mbx_wq->queue_id,
+ phba->sli4_hba.mbx_cq->queue_id);
+
+ /* Set up slow-path ELS Work Queue */
+ if (!phba->sli4_hba.els_wq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0536 Slow-path ELS WQ not allocated\n");
+ goto out_destroy_mbx_wq;
+ }
+ rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
+ phba->sli4_hba.els_cq, LPFC_ELS);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0537 Failed setup of slow-path ELS WQ: "
+ "rc = 0x%x\n", rc);
+ goto out_destroy_mbx_wq;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
+ phba->sli4_hba.els_wq->queue_id,
+ phba->sli4_hba.els_cq->queue_id);
+
+ /* Set up fast-path FCP Work Queue */
+ for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
+ if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0534 Fast-path FCP WQ (%d) not "
+ "allocated\n", fcp_wqidx);
+ goto out_destroy_fcp_wq;
+ }
+ rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
+ phba->sli4_hba.fcp_cq[fcp_cq_index],
+ LPFC_FCP);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0535 Failed setup of fast-path FCP "
+ "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
+ goto out_destroy_fcp_wq;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2591 FCP WQ setup: wq[%d]-id=%d, "
+ "parent cq[%d]-id=%d\n",
+ fcp_wqidx,
+ phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
+ fcp_cq_index,
+ phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
+ /* Round robin FCP Work Queue's Completion Queue assignment */
+ fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count);
+ }
+
+ /*
+ * Create Receive Queue (RQ)
+ */
+ if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0540 Receive Queue not allocated\n");
+ goto out_destroy_fcp_wq;
+ }
+ rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
+ phba->sli4_hba.rxq_cq, LPFC_USOL);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0541 Failed setup of Receive Queue: "
+ "rc = 0x%x\n", rc);
+ goto out_destroy_fcp_wq;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
+ "parent cq-id=%d\n",
+ phba->sli4_hba.hdr_rq->queue_id,
+ phba->sli4_hba.dat_rq->queue_id,
+ phba->sli4_hba.rxq_cq->queue_id);
+ return 0;
+
+out_destroy_fcp_wq:
+ for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
+ lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
+ lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
+out_destroy_mbx_wq:
+ lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
+out_destroy_fcp_cq:
+ for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
+ lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
+ lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
+out_destroy_els_cq:
+ lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
+out_destroy_mbx_cq:
+ lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
+out_destroy_fp_eq:
+ for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
+ lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
+ lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
+out_error:
+ return rc;
+}
+
+/**
+ * lpfc_sli4_queue_unset - Unset all the SLI4 queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
+ * operation.
+ *
+ * Return codes
+ * 0 - sucessful
+ * ENOMEM - No availble memory
+ * EIO - The mailbox failed to complete successfully.
+ **/
+void
+lpfc_sli4_queue_unset(struct lpfc_hba *phba)
+{
+ int fcp_qidx;
+
+ /* Unset mailbox command work queue */
+ lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
+ /* Unset ELS work queue */
+ lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
+ /* Unset unsolicited receive queue */
+ lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
+ /* Unset FCP work queue */
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
+ lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
+ /* Unset mailbox command complete queue */
+ lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
+ /* Unset ELS complete queue */
+ lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
+ /* Unset unsolicited receive complete queue */
+ lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
+ /* Unset FCP response complete queue */
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
+ lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
+ /* Unset fast-path event queue */
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
+ lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
+ /* Unset slow-path event queue */
+ lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
+}
+
+/**
+ * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate and set up a pool of completion queue
+ * events. The body of the completion queue event is a completion queue entry
+ * CQE. For now, this pool is used for the interrupt service routine to queue
+ * the following HBA completion queue events for the worker thread to process:
+ * - Mailbox asynchronous events
+ * - Receive queue completion unsolicited events
+ * Later, this can be used for all the slow-path events.
+ *
+ * Return codes
+ * 0 - sucessful
+ * -ENOMEM - No availble memory
+ **/
+static int
+lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
+{
+ struct lpfc_cq_event *cq_event;
+ int i;
+
+ for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
+ cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
+ if (!cq_event)
+ goto out_pool_create_fail;
+ list_add_tail(&cq_event->list,
+ &phba->sli4_hba.sp_cqe_event_pool);
+ }
+ return 0;
+
+out_pool_create_fail:
+ lpfc_sli4_cq_event_pool_destroy(phba);
+ return -ENOMEM;
+}
+
+/**
+ * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to free the pool of completion queue events at
+ * driver unload time. Note that, it is the responsibility of the driver
+ * cleanup routine to free all the outstanding completion-queue events
+ * allocated from this pool back into the pool before invoking this routine
+ * to destroy the pool.
+ **/
+static void
+lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
+{
+ struct lpfc_cq_event *cq_event, *next_cq_event;
+
+ list_for_each_entry_safe(cq_event, next_cq_event,
+ &phba->sli4_hba.sp_cqe_event_pool, list) {
+ list_del(&cq_event->list);
+ kfree(cq_event);
+ }
+}
+
+/**
+ * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is the lock free version of the API invoked to allocate a
+ * completion-queue event from the free pool.
+ *
+ * Return: Pointer to the newly allocated completion-queue event if successful
+ * NULL otherwise.
+ **/
+struct lpfc_cq_event *
+__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
+{
+ struct lpfc_cq_event *cq_event = NULL;
+
+ list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
+ struct lpfc_cq_event, list);
+ return cq_event;
+}
+
+/**
+ * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is the lock version of the API invoked to allocate a
+ * completion-queue event from the free pool.
+ *
+ * Return: Pointer to the newly allocated completion-queue event if successful
+ * NULL otherwise.
+ **/
+struct lpfc_cq_event *
+lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
+{
+ struct lpfc_cq_event *cq_event;
+ unsigned long iflags;
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ cq_event = __lpfc_sli4_cq_event_alloc(phba);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return cq_event;
+}
+
+/**
+ * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
+ * @phba: pointer to lpfc hba data structure.
+ * @cq_event: pointer to the completion queue event to be freed.
+ *
+ * This routine is the lock free version of the API invoked to release a
+ * completion-queue event back into the free pool.
+ **/
+void
+__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
+ struct lpfc_cq_event *cq_event)
+{
+ list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
+}
+
+/**
+ * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
+ * @phba: pointer to lpfc hba data structure.
+ * @cq_event: pointer to the completion queue event to be freed.
+ *
+ * This routine is the lock version of the API invoked to release a
+ * completion-queue event back into the free pool.
+ **/
+void
+lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
+ struct lpfc_cq_event *cq_event)
+{
+ unsigned long iflags;
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ __lpfc_sli4_cq_event_release(phba, cq_event);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+}
+
+/**
+ * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is to free all the pending completion-queue events to the
+ * back into the free pool for device reset.
+ **/
+static void
+lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
+{
+ LIST_HEAD(cqelist);
+ struct lpfc_cq_event *cqe;
+ unsigned long iflags;
+
+ /* Retrieve all the pending WCQEs from pending WCQE lists */
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ /* Pending FCP XRI abort events */
+ list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
+ &cqelist);
+ /* Pending ELS XRI abort events */
+ list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
+ &cqelist);
+ /* Pending asynnc events */
+ list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
+ &cqelist);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ while (!list_empty(&cqelist)) {
+ list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
+ lpfc_sli4_cq_event_release(phba, cqe);
+ }
+}
+
+/**
+ * lpfc_pci_function_reset - Reset pci function.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to request a PCI function reset. It will destroys
+ * all resources assigned to the PCI function which originates this request.
+ *
+ * Return codes
+ * 0 - sucessful
+ * ENOMEM - No availble memory
+ * EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_pci_function_reset(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *mboxq;
+ uint32_t rc = 0;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+
+ mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0494 Unable to allocate memory for issuing "
+ "SLI_FUNCTION_RESET mailbox command\n");
+ return -ENOMEM;
+ }
+
+ /* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */
+ lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
+ LPFC_SLI4_MBX_EMBED);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0495 SLI_FUNCTION_RESET mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ rc = -ENXIO;
+ }
+ return rc;
+}
+
+/**
+ * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
+ * @phba: pointer to lpfc hba data structure.
+ * @cnt: number of nop mailbox commands to send.
+ *
+ * This routine is invoked to send a number @cnt of NOP mailbox command and
+ * wait for each command to complete.
+ *
+ * Return: the number of NOP mailbox command completed.
+ **/
+static int
+lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
+{
+ LPFC_MBOXQ_t *mboxq;
+ int length, cmdsent;
+ uint32_t mbox_tmo;
+ uint32_t rc = 0;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+
+ if (cnt == 0) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2518 Requested to send 0 NOP mailbox cmd\n");
+ return cnt;
+ }
+
+ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2519 Unable to allocate memory for issuing "
+ "NOP mailbox command\n");
+ return 0;
+ }
+
+ /* Set up NOP SLI4_CONFIG mailbox-ioctl command */
+ length = (sizeof(struct lpfc_mbx_nop) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
+
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ else
+ rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
+ if (rc == MBX_TIMEOUT)
+ break;
+ /* Check return status */
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+ &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2520 NOP mailbox command failed "
+ "status x%x add_status x%x mbx "
+ "status x%x\n", shdr_status,
+ shdr_add_status, rc);
+ break;
+ }
+ }
+
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mboxq, phba->mbox_mem_pool);
+
+ return cmdsent;
+}
+
+/**
+ * lpfc_sli4_fcfi_unreg - Unregister fcfi to device
+ * @phba: pointer to lpfc hba data structure.
+ * @fcfi: fcf index.
+ *
+ * This routine is invoked to unregister a FCFI from device.
+ **/
+void
+lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
+{
+ LPFC_MBOXQ_t *mbox;
+ uint32_t mbox_tmo;
+ int rc;
+ unsigned long flags;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+
+ if (!mbox)
+ return;
+
+ lpfc_unreg_fcfi(mbox, fcfi);
+
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ if (rc != MBX_SUCCESS)
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2517 Unregister FCFI command failed "
+ "status %d, mbxStatus x%x\n", rc,
+ bf_get(lpfc_mqe_status, &mbox->u.mqe));
+ else {
+ spin_lock_irqsave(&phba->hbalock, flags);
+ /* Mark the FCFI is no longer registered */
+ phba->fcf.fcf_flag &=
+ ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ }
+}
+
+/**
+ * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the PCI device memory space for device
+ * with SLI-4 interface spec.
+ *
+ * Return codes
+ * 0 - sucessful
+ * other values - error
+ **/
+static int
+lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
+{
+ struct pci_dev *pdev;
+ unsigned long bar0map_len, bar1map_len, bar2map_len;
+ int error = -ENODEV;
+
+ /* Obtain PCI device reference */
+ if (!phba->pcidev)
+ return error;
+ else
+ pdev = phba->pcidev;
+
+ /* Set the device DMA mask size */
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
+ return error;
+
+ /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
+ * number of bytes required by each mapping. They are actually
+ * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device.
+ */
+ phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0);
+ bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0);
+
+ phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1);
+ bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1);
+
+ phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2);
+ bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2);
+
+ /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
+ phba->sli4_hba.conf_regs_memmap_p =
+ ioremap(phba->pci_bar0_map, bar0map_len);
+ if (!phba->sli4_hba.conf_regs_memmap_p) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "ioremap failed for SLI4 PCI config registers.\n");
+ goto out;
+ }
+
+ /* Map SLI4 HBA Control Register base to a kernel virtual address. */
+ phba->sli4_hba.ctrl_regs_memmap_p =
+ ioremap(phba->pci_bar1_map, bar1map_len);
+ if (!phba->sli4_hba.ctrl_regs_memmap_p) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "ioremap failed for SLI4 HBA control registers.\n");
+ goto out_iounmap_conf;
+ }
+
+ /* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */
+ phba->sli4_hba.drbl_regs_memmap_p =
+ ioremap(phba->pci_bar2_map, bar2map_len);
+ if (!phba->sli4_hba.drbl_regs_memmap_p) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "ioremap failed for SLI4 HBA doorbell registers.\n");
+ goto out_iounmap_ctrl;
+ }
+
+ /* Set up BAR0 PCI config space register memory map */
+ lpfc_sli4_bar0_register_memmap(phba);
+
+ /* Set up BAR1 register memory map */
+ lpfc_sli4_bar1_register_memmap(phba);
+
+ /* Set up BAR2 register memory map */
+ error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
+ if (error)
+ goto out_iounmap_all;
+
+ return 0;
+
+out_iounmap_all:
+ iounmap(phba->sli4_hba.drbl_regs_memmap_p);
+out_iounmap_ctrl:
+ iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
+out_iounmap_conf:
+ iounmap(phba->sli4_hba.conf_regs_memmap_p);
+out:
+ return error;
+}
+
+/**
+ * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the PCI device memory space for device
+ * with SLI-4 interface spec.
+ **/
+static void
+lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
+{
+ struct pci_dev *pdev;
+
+ /* Obtain PCI device reference */
+ if (!phba->pcidev)
+ return;
+ else
+ pdev = phba->pcidev;
+
+ /* Free coherent DMA memory allocated */
+
+ /* Unmap I/O memory space */
+ iounmap(phba->sli4_hba.drbl_regs_memmap_p);
+ iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
+ iounmap(phba->sli4_hba.conf_regs_memmap_p);
+
+ return;
+}
+
+/**
+ * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable the MSI-X interrupt vectors to device
+ * with SLI-3 interface specs. The kernel function pci_enable_msix() is
+ * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
+ * invoked, enables either all or nothing, depending on the current
+ * availability of PCI vector resources. The device driver is responsible
+ * for calling the individual request_irq() to register each MSI-X vector
+ * with a interrupt handler, which is done in this function. Note that
* later when device is unloading, the driver should always call free_irq()
* on all MSI-X vectors it has done request_irq() on before calling
* pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
@@ -2333,7 +6029,7 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
* other values - error
**/
static int
-lpfc_enable_msix(struct lpfc_hba *phba)
+lpfc_sli_enable_msix(struct lpfc_hba *phba)
{
int rc, i;
LPFC_MBOXQ_t *pmb;
@@ -2349,20 +6045,21 @@ lpfc_enable_msix(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0420 PCI enable MSI-X failed (%d)\n", rc);
goto msi_fail_out;
- } else
- for (i = 0; i < LPFC_MSIX_VECTORS; i++)
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "0477 MSI-X entry[%d]: vector=x%x "
- "message=%d\n", i,
- phba->msix_entries[i].vector,
- phba->msix_entries[i].entry);
+ }
+ for (i = 0; i < LPFC_MSIX_VECTORS; i++)
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0477 MSI-X entry[%d]: vector=x%x "
+ "message=%d\n", i,
+ phba->msix_entries[i].vector,
+ phba->msix_entries[i].entry);
/*
* Assign MSI-X vectors to interrupt handlers
*/
/* vector-0 is associated to slow-path handler */
- rc = request_irq(phba->msix_entries[0].vector, &lpfc_sp_intr_handler,
- IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba);
+ rc = request_irq(phba->msix_entries[0].vector,
+ &lpfc_sli_sp_intr_handler, IRQF_SHARED,
+ LPFC_SP_DRIVER_HANDLER_NAME, phba);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0421 MSI-X slow-path request_irq failed "
@@ -2371,8 +6068,9 @@ lpfc_enable_msix(struct lpfc_hba *phba)
}
/* vector-1 is associated to fast-path handler */
- rc = request_irq(phba->msix_entries[1].vector, &lpfc_fp_intr_handler,
- IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, phba);
+ rc = request_irq(phba->msix_entries[1].vector,
+ &lpfc_sli_fp_intr_handler, IRQF_SHARED,
+ LPFC_FP_DRIVER_HANDLER_NAME, phba);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -2401,7 +6099,7 @@ lpfc_enable_msix(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
"0351 Config MSI mailbox command failed, "
"mbxCmd x%x, mbxStatus x%x\n",
- pmb->mb.mbxCommand, pmb->mb.mbxStatus);
+ pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
goto mbx_fail_out;
}
@@ -2428,14 +6126,14 @@ msi_fail_out:
}
/**
- * lpfc_disable_msix - Disable MSI-X interrupt mode
+ * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to release the MSI-X vectors and then disable the
- * MSI-X interrupt mode.
+ * MSI-X interrupt mode to device with SLI-3 interface spec.
**/
static void
-lpfc_disable_msix(struct lpfc_hba *phba)
+lpfc_sli_disable_msix(struct lpfc_hba *phba)
{
int i;
@@ -2444,23 +6142,26 @@ lpfc_disable_msix(struct lpfc_hba *phba)
free_irq(phba->msix_entries[i].vector, phba);
/* Disable MSI-X */
pci_disable_msix(phba->pcidev);
+
+ return;
}
/**
- * lpfc_enable_msi - Enable MSI interrupt mode
+ * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
* @phba: pointer to lpfc hba data structure.
*
- * This routine is invoked to enable the MSI interrupt mode. The kernel
- * function pci_enable_msi() is called to enable the MSI vector. The
- * device driver is responsible for calling the request_irq() to register
- * MSI vector with a interrupt the handler, which is done in this function.
+ * This routine is invoked to enable the MSI interrupt mode to device with
+ * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
+ * enable the MSI vector. The device driver is responsible for calling the
+ * request_irq() to register MSI vector with a interrupt the handler, which
+ * is done in this function.
*
* Return codes
* 0 - sucessful
* other values - error
*/
static int
-lpfc_enable_msi(struct lpfc_hba *phba)
+lpfc_sli_enable_msi(struct lpfc_hba *phba)
{
int rc;
@@ -2474,7 +6175,7 @@ lpfc_enable_msi(struct lpfc_hba *phba)
return rc;
}
- rc = request_irq(phba->pcidev->irq, lpfc_intr_handler,
+ rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
IRQF_SHARED, LPFC_DRIVER_NAME, phba);
if (rc) {
pci_disable_msi(phba->pcidev);
@@ -2485,17 +6186,17 @@ lpfc_enable_msi(struct lpfc_hba *phba)
}
/**
- * lpfc_disable_msi - Disable MSI interrupt mode
+ * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
* @phba: pointer to lpfc hba data structure.
*
- * This routine is invoked to disable the MSI interrupt mode. The driver
- * calls free_irq() on MSI vector it has done request_irq() on before
- * calling pci_disable_msi(). Failure to do so results in a BUG_ON() and
- * a device will be left with MSI enabled and leaks its vector.
+ * This routine is invoked to disable the MSI interrupt mode to device with
+ * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
+ * done request_irq() on before calling pci_disable_msi(). Failure to do so
+ * results in a BUG_ON() and a device will be left with MSI enabled and leaks
+ * its vector.
*/
-
static void
-lpfc_disable_msi(struct lpfc_hba *phba)
+lpfc_sli_disable_msi(struct lpfc_hba *phba)
{
free_irq(phba->pcidev->irq, phba);
pci_disable_msi(phba->pcidev);
@@ -2503,80 +6204,298 @@ lpfc_disable_msi(struct lpfc_hba *phba)
}
/**
- * lpfc_log_intr_mode - Log the active interrupt mode
+ * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
* @phba: pointer to lpfc hba data structure.
- * @intr_mode: active interrupt mode adopted.
*
- * This routine it invoked to log the currently used active interrupt mode
- * to the device.
- */
+ * This routine is invoked to enable device interrupt and associate driver's
+ * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
+ * spec. Depends on the interrupt mode configured to the driver, the driver
+ * will try to fallback from the configured interrupt mode to an interrupt
+ * mode which is supported by the platform, kernel, and device in the order
+ * of:
+ * MSI-X -> MSI -> IRQ.
+ *
+ * Return codes
+ * 0 - sucessful
+ * other values - error
+ **/
+static uint32_t
+lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
+{
+ uint32_t intr_mode = LPFC_INTR_ERROR;
+ int retval;
+
+ if (cfg_mode == 2) {
+ /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
+ retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
+ if (!retval) {
+ /* Now, try to enable MSI-X interrupt mode */
+ retval = lpfc_sli_enable_msix(phba);
+ if (!retval) {
+ /* Indicate initialization to MSI-X mode */
+ phba->intr_type = MSIX;
+ intr_mode = 2;
+ }
+ }
+ }
+
+ /* Fallback to MSI if MSI-X initialization failed */
+ if (cfg_mode >= 1 && phba->intr_type == NONE) {
+ retval = lpfc_sli_enable_msi(phba);
+ if (!retval) {
+ /* Indicate initialization to MSI mode */
+ phba->intr_type = MSI;
+ intr_mode = 1;
+ }
+ }
+
+ /* Fallback to INTx if both MSI-X/MSI initalization failed */
+ if (phba->intr_type == NONE) {
+ retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
+ IRQF_SHARED, LPFC_DRIVER_NAME, phba);
+ if (!retval) {
+ /* Indicate initialization to INTx mode */
+ phba->intr_type = INTx;
+ intr_mode = 0;
+ }
+ }
+ return intr_mode;
+}
+
+/**
+ * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to disable device interrupt and disassociate the
+ * driver's interrupt handler(s) from interrupt vector(s) to device with
+ * SLI-3 interface spec. Depending on the interrupt mode, the driver will
+ * release the interrupt vector(s) for the message signaled interrupt.
+ **/
static void
-lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
+lpfc_sli_disable_intr(struct lpfc_hba *phba)
{
- switch (intr_mode) {
- case 0:
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "0470 Enable INTx interrupt mode.\n");
- break;
- case 1:
+ /* Disable the currently initialized interrupt mode */
+ if (phba->intr_type == MSIX)
+ lpfc_sli_disable_msix(phba);
+ else if (phba->intr_type == MSI)
+ lpfc_sli_disable_msi(phba);
+ else if (phba->intr_type == INTx)
+ free_irq(phba->pcidev->irq, phba);
+
+ /* Reset interrupt management states */
+ phba->intr_type = NONE;
+ phba->sli.slistat.sli_intr = 0;
+
+ return;
+}
+
+/**
+ * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable the MSI-X interrupt vectors to device
+ * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
+ * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
+ * enables either all or nothing, depending on the current availability of
+ * PCI vector resources. The device driver is responsible for calling the
+ * individual request_irq() to register each MSI-X vector with a interrupt
+ * handler, which is done in this function. Note that later when device is
+ * unloading, the driver should always call free_irq() on all MSI-X vectors
+ * it has done request_irq() on before calling pci_disable_msix(). Failure
+ * to do so results in a BUG_ON() and a device will be left with MSI-X
+ * enabled and leaks its vectors.
+ *
+ * Return codes
+ * 0 - sucessful
+ * other values - error
+ **/
+static int
+lpfc_sli4_enable_msix(struct lpfc_hba *phba)
+{
+ int rc, index;
+
+ /* Set up MSI-X multi-message vectors */
+ for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
+ phba->sli4_hba.msix_entries[index].entry = index;
+
+ /* Configure MSI-X capability structure */
+ rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
+ phba->sli4_hba.cfg_eqn);
+ if (rc) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "0481 Enabled MSI interrupt mode.\n");
- break;
- case 2:
+ "0484 PCI enable MSI-X failed (%d)\n", rc);
+ goto msi_fail_out;
+ }
+ /* Log MSI-X vector assignment */
+ for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "0480 Enabled MSI-X interrupt mode.\n");
- break;
- default:
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0482 Illegal interrupt mode.\n");
- break;
+ "0489 MSI-X entry[%d]: vector=x%x "
+ "message=%d\n", index,
+ phba->sli4_hba.msix_entries[index].vector,
+ phba->sli4_hba.msix_entries[index].entry);
+ /*
+ * Assign MSI-X vectors to interrupt handlers
+ */
+
+ /* The first vector must associated to slow-path handler for MQ */
+ rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
+ &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
+ LPFC_SP_DRIVER_HANDLER_NAME, phba);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0485 MSI-X slow-path request_irq failed "
+ "(%d)\n", rc);
+ goto msi_fail_out;
}
- return;
+
+ /* The rest of the vector(s) are associated to fast-path handler(s) */
+ for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) {
+ phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
+ phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
+ rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
+ &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
+ LPFC_FP_DRIVER_HANDLER_NAME,
+ &phba->sli4_hba.fcp_eq_hdl[index - 1]);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0486 MSI-X fast-path (%d) "
+ "request_irq failed (%d)\n", index, rc);
+ goto cfg_fail_out;
+ }
+ }
+
+ return rc;
+
+cfg_fail_out:
+ /* free the irq already requested */
+ for (--index; index >= 1; index--)
+ free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
+ &phba->sli4_hba.fcp_eq_hdl[index - 1]);
+
+ /* free the irq already requested */
+ free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
+
+msi_fail_out:
+ /* Unconfigure MSI-X capability structure */
+ pci_disable_msix(phba->pcidev);
+ return rc;
}
+/**
+ * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release the MSI-X vectors and then disable the
+ * MSI-X interrupt mode to device with SLI-4 interface spec.
+ **/
static void
-lpfc_stop_port(struct lpfc_hba *phba)
+lpfc_sli4_disable_msix(struct lpfc_hba *phba)
{
- /* Clear all interrupt enable conditions */
- writel(0, phba->HCregaddr);
- readl(phba->HCregaddr); /* flush */
- /* Clear all pending interrupts */
- writel(0xffffffff, phba->HAregaddr);
- readl(phba->HAregaddr); /* flush */
+ int index;
- /* Reset some HBA SLI setup states */
- lpfc_stop_phba_timers(phba);
- phba->pport->work_port_events = 0;
+ /* Free up MSI-X multi-message vectors */
+ free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
+
+ for (index = 1; index < phba->sli4_hba.cfg_eqn; index++)
+ free_irq(phba->sli4_hba.msix_entries[index].vector,
+ &phba->sli4_hba.fcp_eq_hdl[index - 1]);
+ /* Disable MSI-X */
+ pci_disable_msix(phba->pcidev);
+
+ return;
+}
+
+/**
+ * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable the MSI interrupt mode to device with
+ * SLI-4 interface spec. The kernel function pci_enable_msi() is called
+ * to enable the MSI vector. The device driver is responsible for calling
+ * the request_irq() to register MSI vector with a interrupt the handler,
+ * which is done in this function.
+ *
+ * Return codes
+ * 0 - sucessful
+ * other values - error
+ **/
+static int
+lpfc_sli4_enable_msi(struct lpfc_hba *phba)
+{
+ int rc, index;
+
+ rc = pci_enable_msi(phba->pcidev);
+ if (!rc)
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0487 PCI enable MSI mode success.\n");
+ else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0488 PCI enable MSI mode failed (%d)\n", rc);
+ return rc;
+ }
+
+ rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
+ IRQF_SHARED, LPFC_DRIVER_NAME, phba);
+ if (rc) {
+ pci_disable_msi(phba->pcidev);
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0490 MSI request_irq failed (%d)\n", rc);
+ }
+
+ for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
+ phba->sli4_hba.fcp_eq_hdl[index].idx = index;
+ phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
+ }
+
+ return rc;
+}
+/**
+ * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to disable the MSI interrupt mode to device with
+ * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
+ * done request_irq() on before calling pci_disable_msi(). Failure to do so
+ * results in a BUG_ON() and a device will be left with MSI enabled and leaks
+ * its vector.
+ **/
+static void
+lpfc_sli4_disable_msi(struct lpfc_hba *phba)
+{
+ free_irq(phba->pcidev->irq, phba);
+ pci_disable_msi(phba->pcidev);
return;
}
/**
- * lpfc_enable_intr - Enable device interrupt
+ * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to enable device interrupt and associate driver's
- * interrupt handler(s) to interrupt vector(s). Depends on the interrupt
- * mode configured to the driver, the driver will try to fallback from the
- * configured interrupt mode to an interrupt mode which is supported by the
- * platform, kernel, and device in the order of: MSI-X -> MSI -> IRQ.
+ * interrupt handler(s) to interrupt vector(s) to device with SLI-4
+ * interface spec. Depends on the interrupt mode configured to the driver,
+ * the driver will try to fallback from the configured interrupt mode to an
+ * interrupt mode which is supported by the platform, kernel, and device in
+ * the order of:
+ * MSI-X -> MSI -> IRQ.
*
* Return codes
- * 0 - sucessful
- * other values - error
+ * 0 - sucessful
+ * other values - error
**/
static uint32_t
-lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
+lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
{
uint32_t intr_mode = LPFC_INTR_ERROR;
- int retval;
+ int retval, index;
if (cfg_mode == 2) {
- /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
- retval = lpfc_sli_config_port(phba, 3);
+ /* Preparation before conf_msi mbox cmd */
+ retval = 0;
if (!retval) {
/* Now, try to enable MSI-X interrupt mode */
- retval = lpfc_enable_msix(phba);
+ retval = lpfc_sli4_enable_msix(phba);
if (!retval) {
/* Indicate initialization to MSI-X mode */
phba->intr_type = MSIX;
@@ -2587,7 +6506,7 @@ lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
/* Fallback to MSI if MSI-X initialization failed */
if (cfg_mode >= 1 && phba->intr_type == NONE) {
- retval = lpfc_enable_msi(phba);
+ retval = lpfc_sli4_enable_msi(phba);
if (!retval) {
/* Indicate initialization to MSI mode */
phba->intr_type = MSI;
@@ -2597,34 +6516,39 @@ lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
/* Fallback to INTx if both MSI-X/MSI initalization failed */
if (phba->intr_type == NONE) {
- retval = request_irq(phba->pcidev->irq, lpfc_intr_handler,
+ retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
IRQF_SHARED, LPFC_DRIVER_NAME, phba);
if (!retval) {
/* Indicate initialization to INTx mode */
phba->intr_type = INTx;
intr_mode = 0;
+ for (index = 0; index < phba->cfg_fcp_eq_count;
+ index++) {
+ phba->sli4_hba.fcp_eq_hdl[index].idx = index;
+ phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
+ }
}
}
return intr_mode;
}
/**
- * lpfc_disable_intr - Disable device interrupt
+ * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
* @phba: pointer to lpfc hba data structure.
*
- * This routine is invoked to disable device interrupt and disassociate the
- * driver's interrupt handler(s) from interrupt vector(s). Depending on the
- * interrupt mode, the driver will release the interrupt vector(s) for the
- * message signaled interrupt.
+ * This routine is invoked to disable device interrupt and disassociate
+ * the driver's interrupt handler(s) from interrupt vector(s) to device
+ * with SLI-4 interface spec. Depending on the interrupt mode, the driver
+ * will release the interrupt vector(s) for the message signaled interrupt.
**/
static void
-lpfc_disable_intr(struct lpfc_hba *phba)
+lpfc_sli4_disable_intr(struct lpfc_hba *phba)
{
/* Disable the currently initialized interrupt mode */
if (phba->intr_type == MSIX)
- lpfc_disable_msix(phba);
+ lpfc_sli4_disable_msix(phba);
else if (phba->intr_type == MSI)
- lpfc_disable_msi(phba);
+ lpfc_sli4_disable_msi(phba);
else if (phba->intr_type == INTx)
free_irq(phba->pcidev->irq, phba);
@@ -2636,263 +6560,233 @@ lpfc_disable_intr(struct lpfc_hba *phba)
}
/**
- * lpfc_pci_probe_one - lpfc PCI probe func to register device to PCI subsystem
- * @pdev: pointer to PCI device
- * @pid: pointer to PCI device identifier
- *
- * This routine is to be registered to the kernel's PCI subsystem. When an
- * Emulex HBA is presented in PCI bus, the kernel PCI subsystem looks at
- * PCI device-specific information of the device and driver to see if the
- * driver state that it can support this kind of device. If the match is
- * successful, the driver core invokes this routine. If this routine
- * determines it can claim the HBA, it does all the initialization that it
- * needs to do to handle the HBA properly.
+ * lpfc_unset_hba - Unset SLI3 hba device initialization
+ * @phba: pointer to lpfc hba data structure.
*
- * Return code
- * 0 - driver can claim the device
- * negative value - driver can not claim the device
+ * This routine is invoked to unset the HBA device initialization steps to
+ * a device with SLI-3 interface spec.
**/
-static int __devinit
-lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
+static void
+lpfc_unset_hba(struct lpfc_hba *phba)
{
- struct lpfc_vport *vport = NULL;
- struct lpfc_hba *phba;
- struct lpfc_sli *psli;
- struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
- struct Scsi_Host *shost = NULL;
- void *ptr;
- unsigned long bar0map_len, bar2map_len;
- int error = -ENODEV, retval;
- int i, hbq_count;
- uint16_t iotag;
- uint32_t cfg_mode, intr_mode;
- int bars = pci_select_bars(pdev, IORESOURCE_MEM);
- struct lpfc_adapter_event_header adapter_event;
-
- if (pci_enable_device_mem(pdev))
- goto out;
- if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
- goto out_disable_device;
+ struct lpfc_vport *vport = phba->pport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL);
- if (!phba)
- goto out_release_regions;
+ spin_lock_irq(shost->host_lock);
+ vport->load_flag |= FC_UNLOADING;
+ spin_unlock_irq(shost->host_lock);
- atomic_set(&phba->fast_event_count, 0);
- spin_lock_init(&phba->hbalock);
+ lpfc_stop_hba_timers(phba);
- /* Initialize ndlp management spinlock */
- spin_lock_init(&phba->ndlp_lock);
+ phba->pport->work_port_events = 0;
- phba->pcidev = pdev;
+ lpfc_sli_hba_down(phba);
- /* Assign an unused board number */
- if ((phba->brd_no = lpfc_get_instance()) < 0)
- goto out_free_phba;
+ lpfc_sli_brdrestart(phba);
- INIT_LIST_HEAD(&phba->port_list);
- init_waitqueue_head(&phba->wait_4_mlo_m_q);
- /*
- * Get all the module params for configuring this host and then
- * establish the host.
- */
- lpfc_get_cfgparam(phba);
- phba->max_vpi = LPFC_MAX_VPI;
+ lpfc_sli_disable_intr(phba);
- /* Initialize timers used by driver */
- init_timer(&phba->hb_tmofunc);
- phba->hb_tmofunc.function = lpfc_hb_timeout;
- phba->hb_tmofunc.data = (unsigned long)phba;
+ return;
+}
- psli = &phba->sli;
- init_timer(&psli->mbox_tmo);
- psli->mbox_tmo.function = lpfc_mbox_timeout;
- psli->mbox_tmo.data = (unsigned long) phba;
- init_timer(&phba->fcp_poll_timer);
- phba->fcp_poll_timer.function = lpfc_poll_timeout;
- phba->fcp_poll_timer.data = (unsigned long) phba;
- init_timer(&phba->fabric_block_timer);
- phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
- phba->fabric_block_timer.data = (unsigned long) phba;
- init_timer(&phba->eratt_poll);
- phba->eratt_poll.function = lpfc_poll_eratt;
- phba->eratt_poll.data = (unsigned long) phba;
+/**
+ * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the HBA device initialization steps to
+ * a device with SLI-4 interface spec.
+ **/
+static void
+lpfc_sli4_unset_hba(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport = phba->pport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- pci_set_master(pdev);
- pci_save_state(pdev);
- pci_try_set_mwi(pdev);
+ spin_lock_irq(shost->host_lock);
+ vport->load_flag |= FC_UNLOADING;
+ spin_unlock_irq(shost->host_lock);
- if (pci_set_dma_mask(phba->pcidev, DMA_BIT_MASK(64)) != 0)
- if (pci_set_dma_mask(phba->pcidev, DMA_BIT_MASK(32)) != 0)
- goto out_idr_remove;
+ phba->pport->work_port_events = 0;
- /*
- * Get the bus address of Bar0 and Bar2 and the number of bytes
- * required by each mapping.
- */
- phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0);
- bar0map_len = pci_resource_len(phba->pcidev, 0);
+ lpfc_sli4_hba_down(phba);
- phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2);
- bar2map_len = pci_resource_len(phba->pcidev, 2);
+ lpfc_sli4_disable_intr(phba);
- /* Map HBA SLIM to a kernel virtual address. */
- phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
- if (!phba->slim_memmap_p) {
- error = -ENODEV;
- dev_printk(KERN_ERR, &pdev->dev,
- "ioremap failed for SLIM memory.\n");
- goto out_idr_remove;
- }
-
- /* Map HBA Control Registers to a kernel virtual address. */
- phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
- if (!phba->ctrl_regs_memmap_p) {
- error = -ENODEV;
- dev_printk(KERN_ERR, &pdev->dev,
- "ioremap failed for HBA control registers.\n");
- goto out_iounmap_slim;
- }
+ return;
+}
- /* Allocate memory for SLI-2 structures */
- phba->slim2p.virt = dma_alloc_coherent(&phba->pcidev->dev,
- SLI2_SLIM_SIZE,
- &phba->slim2p.phys,
- GFP_KERNEL);
- if (!phba->slim2p.virt)
- goto out_iounmap;
+/**
+ * lpfc_sli4_hba_unset - Unset the fcoe hba
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called in the SLI4 code path to reset the HBA's FCoE
+ * function. The caller is not required to hold any lock. This routine
+ * issues PCI function reset mailbox command to reset the FCoE function.
+ * At the end of the function, it calls lpfc_hba_down_post function to
+ * free any pending commands.
+ **/
+static void
+lpfc_sli4_hba_unset(struct lpfc_hba *phba)
+{
+ int wait_cnt = 0;
+ LPFC_MBOXQ_t *mboxq;
- memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
- phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
- phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
- phba->IOCBs = (phba->slim2p.virt +
- offsetof(struct lpfc_sli2_slim, IOCBs));
+ lpfc_stop_hba_timers(phba);
+ phba->sli4_hba.intr_enable = 0;
- phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev,
- lpfc_sli_hbq_size(),
- &phba->hbqslimp.phys,
- GFP_KERNEL);
- if (!phba->hbqslimp.virt)
- goto out_free_slim;
+ /*
+ * Gracefully wait out the potential current outstanding asynchronous
+ * mailbox command.
+ */
- hbq_count = lpfc_sli_hbq_count();
- ptr = phba->hbqslimp.virt;
- for (i = 0; i < hbq_count; ++i) {
- phba->hbqs[i].hbq_virt = ptr;
- INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
- ptr += (lpfc_hbq_defs[i]->entry_count *
- sizeof(struct lpfc_hbq_entry));
+ /* First, block any pending async mailbox command from posted */
+ spin_lock_irq(&phba->hbalock);
+ phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
+ spin_unlock_irq(&phba->hbalock);
+ /* Now, trying to wait it out if we can */
+ while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
+ msleep(10);
+ if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
+ break;
}
- phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
- phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
-
- memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
-
- INIT_LIST_HEAD(&phba->hbqbuf_in_list);
-
- /* Initialize the SLI Layer to run with lpfc HBAs. */
- lpfc_sli_setup(phba);
- lpfc_sli_queue_setup(phba);
-
- retval = lpfc_mem_alloc(phba);
- if (retval) {
- error = retval;
- goto out_free_hbqslimp;
+ /* Forcefully release the outstanding mailbox command if timed out */
+ if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
+ spin_lock_irq(&phba->hbalock);
+ mboxq = phba->sli.mbox_active;
+ mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
+ __lpfc_mbox_cmpl_put(phba, mboxq);
+ phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ phba->sli.mbox_active = NULL;
+ spin_unlock_irq(&phba->hbalock);
}
- /* Initialize and populate the iocb list per host. */
- INIT_LIST_HEAD(&phba->lpfc_iocb_list);
- for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) {
- iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
- if (iocbq_entry == NULL) {
- printk(KERN_ERR "%s: only allocated %d iocbs of "
- "expected %d count. Unloading driver.\n",
- __func__, i, LPFC_IOCB_LIST_CNT);
- error = -ENOMEM;
- goto out_free_iocbq;
- }
+ /* Tear down the queues in the HBA */
+ lpfc_sli4_queue_unset(phba);
- iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
- if (iotag == 0) {
- kfree (iocbq_entry);
- printk(KERN_ERR "%s: failed to allocate IOTAG. "
- "Unloading driver.\n",
- __func__);
- error = -ENOMEM;
- goto out_free_iocbq;
- }
+ /* Disable PCI subsystem interrupt */
+ lpfc_sli4_disable_intr(phba);
- spin_lock_irq(&phba->hbalock);
- list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
- phba->total_iocbq_bufs++;
- spin_unlock_irq(&phba->hbalock);
- }
+ /* Stop kthread signal shall trigger work_done one more time */
+ kthread_stop(phba->worker_thread);
- /* Initialize HBA structure */
- phba->fc_edtov = FF_DEF_EDTOV;
- phba->fc_ratov = FF_DEF_RATOV;
- phba->fc_altov = FF_DEF_ALTOV;
- phba->fc_arbtov = FF_DEF_ARBTOV;
+ /* Stop the SLI4 device port */
+ phba->pport->work_port_events = 0;
+}
- INIT_LIST_HEAD(&phba->work_list);
- phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
- phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
+/**
+ * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
+ * @pdev: pointer to PCI device
+ * @pid: pointer to PCI device identifier
+ *
+ * This routine is to be called to attach a device with SLI-3 interface spec
+ * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
+ * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
+ * information of the device and driver to see if the driver state that it can
+ * support this kind of device. If the match is successful, the driver core
+ * invokes this routine. If this routine determines it can claim the HBA, it
+ * does all the initialization that it needs to do to handle the HBA properly.
+ *
+ * Return code
+ * 0 - driver can claim the device
+ * negative value - driver can not claim the device
+ **/
+static int __devinit
+lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
+{
+ struct lpfc_hba *phba;
+ struct lpfc_vport *vport = NULL;
+ int error;
+ uint32_t cfg_mode, intr_mode;
- /* Initialize the wait queue head for the kernel thread */
- init_waitqueue_head(&phba->work_waitq);
+ /* Allocate memory for HBA structure */
+ phba = lpfc_hba_alloc(pdev);
+ if (!phba)
+ return -ENOMEM;
- /* Startup the kernel thread for this host adapter. */
- phba->worker_thread = kthread_run(lpfc_do_work, phba,
- "lpfc_worker_%d", phba->brd_no);
- if (IS_ERR(phba->worker_thread)) {
- error = PTR_ERR(phba->worker_thread);
- goto out_free_iocbq;
+ /* Perform generic PCI device enabling operation */
+ error = lpfc_enable_pci_dev(phba);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1401 Failed to enable pci device.\n");
+ goto out_free_phba;
}
- /* Initialize the list of scsi buffers used by driver for scsi IO. */
- spin_lock_init(&phba->scsi_buf_list_lock);
- INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
+ /* Set up SLI API function jump table for PCI-device group-0 HBAs */
+ error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
+ if (error)
+ goto out_disable_pci_dev;
- /* Initialize list of fabric iocbs */
- INIT_LIST_HEAD(&phba->fabric_iocb_list);
+ /* Set up SLI-3 specific device PCI memory space */
+ error = lpfc_sli_pci_mem_setup(phba);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1402 Failed to set up pci memory space.\n");
+ goto out_disable_pci_dev;
+ }
- /* Initialize list to save ELS buffers */
- INIT_LIST_HEAD(&phba->elsbuf);
+ /* Set up phase-1 common device driver resources */
+ error = lpfc_setup_driver_resource_phase1(phba);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1403 Failed to set up driver resource.\n");
+ goto out_unset_pci_mem_s3;
+ }
- vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
- if (!vport)
- goto out_kthread_stop;
+ /* Set up SLI-3 specific device driver resources */
+ error = lpfc_sli_driver_resource_setup(phba);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1404 Failed to set up driver resource.\n");
+ goto out_unset_pci_mem_s3;
+ }
- shost = lpfc_shost_from_vport(vport);
- phba->pport = vport;
- lpfc_debugfs_initialize(vport);
+ /* Initialize and populate the iocb list per host */
+ error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1405 Failed to initialize iocb list.\n");
+ goto out_unset_driver_resource_s3;
+ }
- pci_set_drvdata(pdev, shost);
+ /* Set up common device driver resources */
+ error = lpfc_setup_driver_resource_phase2(phba);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1406 Failed to set up driver resource.\n");
+ goto out_free_iocb_list;
+ }
- phba->MBslimaddr = phba->slim_memmap_p;
- phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
- phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
- phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
- phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
+ /* Create SCSI host to the physical port */
+ error = lpfc_create_shost(phba);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1407 Failed to create scsi host.\n");
+ goto out_unset_driver_resource;
+ }
/* Configure sysfs attributes */
- if (lpfc_alloc_sysfs_attr(vport)) {
+ vport = phba->pport;
+ error = lpfc_alloc_sysfs_attr(vport);
+ if (error) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1476 Failed to allocate sysfs attr\n");
- error = -ENOMEM;
- goto out_destroy_port;
+ goto out_destroy_shost;
}
+ /* Now, trying to enable interrupt and bring up the device */
cfg_mode = phba->cfg_use_msi;
while (true) {
+ /* Put device to a known state before enabling interrupt */
+ lpfc_stop_port(phba);
/* Configure and enable interrupt */
- intr_mode = lpfc_enable_intr(phba, cfg_mode);
+ intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
if (intr_mode == LPFC_INTR_ERROR) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0426 Failed to enable interrupt.\n");
+ "0431 Failed to enable interrupt.\n");
+ error = -ENODEV;
goto out_free_sysfs_attr;
}
- /* HBA SLI setup */
+ /* SLI-3 HBA setup */
if (lpfc_sli_hba_setup(phba)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1477 Failed to set up hba\n");
@@ -2902,185 +6796,65 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Wait 50ms for the interrupts of previous mailbox commands */
msleep(50);
- /* Check active interrupts received */
- if (phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
+ /* Check active interrupts on message signaled interrupts */
+ if (intr_mode == 0 ||
+ phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
/* Log the current active interrupt mode */
phba->intr_mode = intr_mode;
lpfc_log_intr_mode(phba, intr_mode);
break;
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "0451 Configure interrupt mode (%d) "
+ "0447 Configure interrupt mode (%d) "
"failed active interrupt test.\n",
intr_mode);
- if (intr_mode == 0) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0479 Failed to enable "
- "interrupt.\n");
- error = -ENODEV;
- goto out_remove_device;
- }
- /* Stop HBA SLI setups */
- lpfc_stop_port(phba);
/* Disable the current interrupt mode */
- lpfc_disable_intr(phba);
+ lpfc_sli_disable_intr(phba);
/* Try next level of interrupt mode */
cfg_mode = --intr_mode;
}
}
- /*
- * hba setup may have changed the hba_queue_depth so we need to adjust
- * the value of can_queue.
- */
- shost->can_queue = phba->cfg_hba_queue_depth - 10;
- if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
-
- if (lpfc_prot_mask && lpfc_prot_guard) {
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "1478 Registering BlockGuard with the "
- "SCSI layer\n");
+ /* Perform post initialization setup */
+ lpfc_post_init_setup(phba);
- scsi_host_set_prot(shost, lpfc_prot_mask);
- scsi_host_set_guard(shost, lpfc_prot_guard);
- }
- }
-
- if (!_dump_buf_data) {
- int pagecnt = 10;
- while (pagecnt) {
- spin_lock_init(&_dump_buf_lock);
- _dump_buf_data =
- (char *) __get_free_pages(GFP_KERNEL, pagecnt);
- if (_dump_buf_data) {
- printk(KERN_ERR "BLKGRD allocated %d pages for "
- "_dump_buf_data at 0x%p\n",
- (1 << pagecnt), _dump_buf_data);
- _dump_buf_data_order = pagecnt;
- memset(_dump_buf_data, 0, ((1 << PAGE_SHIFT)
- << pagecnt));
- break;
- } else {
- --pagecnt;
- }
-
- }
-
- if (!_dump_buf_data_order)
- printk(KERN_ERR "BLKGRD ERROR unable to allocate "
- "memory for hexdump\n");
-
- } else {
- printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
- "\n", _dump_buf_data);
- }
-
-
- if (!_dump_buf_dif) {
- int pagecnt = 10;
- while (pagecnt) {
- _dump_buf_dif =
- (char *) __get_free_pages(GFP_KERNEL, pagecnt);
- if (_dump_buf_dif) {
- printk(KERN_ERR "BLKGRD allocated %d pages for "
- "_dump_buf_dif at 0x%p\n",
- (1 << pagecnt), _dump_buf_dif);
- _dump_buf_dif_order = pagecnt;
- memset(_dump_buf_dif, 0, ((1 << PAGE_SHIFT)
- << pagecnt));
- break;
- } else {
- --pagecnt;
- }
-
- }
-
- if (!_dump_buf_dif_order)
- printk(KERN_ERR "BLKGRD ERROR unable to allocate "
- "memory for hexdump\n");
-
- } else {
- printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
- _dump_buf_dif);
- }
-
- lpfc_host_attrib_init(shost);
-
- if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
- spin_lock_irq(shost->host_lock);
- lpfc_poll_start_timer(phba);
- spin_unlock_irq(shost->host_lock);
- }
-
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "0428 Perform SCSI scan\n");
- /* Send board arrival event to upper layer */
- adapter_event.event_type = FC_REG_ADAPTER_EVENT;
- adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
- fc_host_post_vendor_event(shost, fc_get_event_number(),
- sizeof(adapter_event),
- (char *) &adapter_event,
- LPFC_NL_VENDOR_ID);
+ /* Check if there are static vports to be created. */
+ lpfc_create_static_vport(phba);
return 0;
out_remove_device:
- spin_lock_irq(shost->host_lock);
- vport->load_flag |= FC_UNLOADING;
- spin_unlock_irq(shost->host_lock);
- lpfc_stop_phba_timers(phba);
- phba->pport->work_port_events = 0;
- lpfc_disable_intr(phba);
- lpfc_sli_hba_down(phba);
- lpfc_sli_brdrestart(phba);
+ lpfc_unset_hba(phba);
out_free_sysfs_attr:
lpfc_free_sysfs_attr(vport);
-out_destroy_port:
- destroy_port(vport);
-out_kthread_stop:
- kthread_stop(phba->worker_thread);
-out_free_iocbq:
- list_for_each_entry_safe(iocbq_entry, iocbq_next,
- &phba->lpfc_iocb_list, list) {
- kfree(iocbq_entry);
- phba->total_iocbq_bufs--;
- }
- lpfc_mem_free(phba);
-out_free_hbqslimp:
- dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
- phba->hbqslimp.virt, phba->hbqslimp.phys);
-out_free_slim:
- dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
- phba->slim2p.virt, phba->slim2p.phys);
-out_iounmap:
- iounmap(phba->ctrl_regs_memmap_p);
-out_iounmap_slim:
- iounmap(phba->slim_memmap_p);
-out_idr_remove:
- idr_remove(&lpfc_hba_index, phba->brd_no);
+out_destroy_shost:
+ lpfc_destroy_shost(phba);
+out_unset_driver_resource:
+ lpfc_unset_driver_resource_phase2(phba);
+out_free_iocb_list:
+ lpfc_free_iocb_list(phba);
+out_unset_driver_resource_s3:
+ lpfc_sli_driver_resource_unset(phba);
+out_unset_pci_mem_s3:
+ lpfc_sli_pci_mem_unset(phba);
+out_disable_pci_dev:
+ lpfc_disable_pci_dev(phba);
out_free_phba:
- kfree(phba);
-out_release_regions:
- pci_release_selected_regions(pdev, bars);
-out_disable_device:
- pci_disable_device(pdev);
-out:
- pci_set_drvdata(pdev, NULL);
- if (shost)
- scsi_host_put(shost);
+ lpfc_hba_free(phba);
return error;
}
/**
- * lpfc_pci_remove_one - lpfc PCI func to unregister device from PCI subsystem
+ * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
* @pdev: pointer to PCI device
*
- * This routine is to be registered to the kernel's PCI subsystem. When an
- * Emulex HBA is removed from PCI bus, it performs all the necessary cleanup
- * for the HBA device to be removed from the PCI subsystem properly.
+ * This routine is to be called to disattach a device with SLI-3 interface
+ * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
+ * removed from PCI bus, it performs all the necessary cleanup for the HBA
+ * device to be removed from the PCI subsystem properly.
**/
static void __devexit
-lpfc_pci_remove_one(struct pci_dev *pdev)
+lpfc_pci_remove_one_s3(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
@@ -3098,7 +6872,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
/* Release all the vports against this physical port */
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
- for (i = 1; i <= phba->max_vpi && vports[i] != NULL; i++)
+ for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
fc_vport_terminate(vports[i]->fc_vport);
lpfc_destroy_vport_work_array(phba, vports);
@@ -3120,7 +6894,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
/* Final cleanup of txcmplq and reset the HBA */
lpfc_sli_brdrestart(phba);
- lpfc_stop_phba_timers(phba);
+ lpfc_stop_hba_timers(phba);
spin_lock_irq(&phba->hbalock);
list_del_init(&vport->listentry);
spin_unlock_irq(&phba->hbalock);
@@ -3128,7 +6902,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
lpfc_debugfs_terminate(vport);
/* Disable interrupt */
- lpfc_disable_intr(phba);
+ lpfc_sli_disable_intr(phba);
pci_set_drvdata(pdev, NULL);
scsi_host_put(shost);
@@ -3138,7 +6912,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
* corresponding pools here.
*/
lpfc_scsi_free(phba);
- lpfc_mem_free(phba);
+ lpfc_mem_free_all(phba);
dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
phba->hbqslimp.virt, phba->hbqslimp.phys);
@@ -3151,36 +6925,35 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
iounmap(phba->ctrl_regs_memmap_p);
iounmap(phba->slim_memmap_p);
- idr_remove(&lpfc_hba_index, phba->brd_no);
-
- kfree(phba);
+ lpfc_hba_free(phba);
pci_release_selected_regions(pdev, bars);
pci_disable_device(pdev);
}
/**
- * lpfc_pci_suspend_one - lpfc PCI func to suspend device for power management
+ * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
* @pdev: pointer to PCI device
* @msg: power management message
*
- * This routine is to be registered to the kernel's PCI subsystem to support
- * system Power Management (PM). When PM invokes this method, it quiesces the
- * device by stopping the driver's worker thread for the device, turning off
- * device's interrupt and DMA, and bring the device offline. Note that as the
- * driver implements the minimum PM requirements to a power-aware driver's PM
- * support for suspend/resume -- all the possible PM messages (SUSPEND,
- * HIBERNATE, FREEZE) to the suspend() method call will be treated as SUSPEND
- * and the driver will fully reinitialize its device during resume() method
- * call, the driver will set device to PCI_D3hot state in PCI config space
- * instead of setting it according to the @msg provided by the PM.
+ * This routine is to be called from the kernel's PCI subsystem to support
+ * system Power Management (PM) to device with SLI-3 interface spec. When
+ * PM invokes this method, it quiesces the device by stopping the driver's
+ * worker thread for the device, turning off device's interrupt and DMA,
+ * and bring the device offline. Note that as the driver implements the
+ * minimum PM requirements to a power-aware driver's PM support for the
+ * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
+ * to the suspend() method call will be treated as SUSPEND and the driver will
+ * fully reinitialize its device during resume() method call, the driver will
+ * set device to PCI_D3hot state in PCI config space instead of setting it
+ * according to the @msg provided by the PM.
*
* Return code
- * 0 - driver suspended the device
- * Error otherwise
+ * 0 - driver suspended the device
+ * Error otherwise
**/
static int
-lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
+lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3194,7 +6967,7 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
kthread_stop(phba->worker_thread);
/* Disable interrupt from device */
- lpfc_disable_intr(phba);
+ lpfc_sli_disable_intr(phba);
/* Save device state to PCI config space */
pci_save_state(pdev);
@@ -3204,25 +6977,26 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
}
/**
- * lpfc_pci_resume_one - lpfc PCI func to resume device for power management
+ * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
* @pdev: pointer to PCI device
*
- * This routine is to be registered to the kernel's PCI subsystem to support
- * system Power Management (PM). When PM invokes this method, it restores
- * the device's PCI config space state and fully reinitializes the device
- * and brings it online. Note that as the driver implements the minimum PM
- * requirements to a power-aware driver's PM for suspend/resume -- all
- * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
- * method call will be treated as SUSPEND and the driver will fully
- * reinitialize its device during resume() method call, the device will be
- * set to PCI_D0 directly in PCI config space before restoring the state.
+ * This routine is to be called from the kernel's PCI subsystem to support
+ * system Power Management (PM) to device with SLI-3 interface spec. When PM
+ * invokes this method, it restores the device's PCI config space state and
+ * fully reinitializes the device and brings it online. Note that as the
+ * driver implements the minimum PM requirements to a power-aware driver's
+ * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
+ * FREEZE) to the suspend() method call will be treated as SUSPEND and the
+ * driver will fully reinitialize its device during resume() method call,
+ * the device will be set to PCI_D0 directly in PCI config space before
+ * restoring the state.
*
* Return code
- * 0 - driver suspended the device
- * Error otherwise
+ * 0 - driver suspended the device
+ * Error otherwise
**/
static int
-lpfc_pci_resume_one(struct pci_dev *pdev)
+lpfc_pci_resume_one_s3(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3250,7 +7024,7 @@ lpfc_pci_resume_one(struct pci_dev *pdev)
}
/* Configure and enable interrupt */
- intr_mode = lpfc_enable_intr(phba, phba->intr_mode);
+ intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
if (intr_mode == LPFC_INTR_ERROR) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0430 PM resume Failed to enable interrupt\n");
@@ -3269,23 +7043,24 @@ lpfc_pci_resume_one(struct pci_dev *pdev)
}
/**
- * lpfc_io_error_detected - Driver method for handling PCI I/O error detected
+ * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
* @pdev: pointer to PCI device.
* @state: the current PCI connection state.
*
- * This routine is registered to the PCI subsystem for error handling. This
- * function is called by the PCI subsystem after a PCI bus error affecting
- * this device has been detected. When this function is invoked, it will
- * need to stop all the I/Os and interrupt(s) to the device. Once that is
- * done, it will return PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to
- * perform proper recovery as desired.
+ * This routine is called from the PCI subsystem for I/O error handling to
+ * device with SLI-3 interface spec. This function is called by the PCI
+ * subsystem after a PCI bus error affecting this device has been detected.
+ * When this function is invoked, it will need to stop all the I/Os and
+ * interrupt(s) to the device. Once that is done, it will return
+ * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
+ * as desired.
*
* Return codes
- * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
- * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
+ * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
**/
-static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
+static pci_ers_result_t
+lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3312,30 +7087,32 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
lpfc_sli_abort_iocb_ring(phba, pring);
/* Disable interrupt */
- lpfc_disable_intr(phba);
+ lpfc_sli_disable_intr(phba);
/* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
/**
- * lpfc_io_slot_reset - Restart a PCI device from scratch
+ * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
* @pdev: pointer to PCI device.
*
- * This routine is registered to the PCI subsystem for error handling. This is
- * called after PCI bus has been reset to restart the PCI card from scratch,
- * as if from a cold-boot. During the PCI subsystem error recovery, after the
- * driver returns PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform
- * proper error recovery and then call this routine before calling the .resume
- * method to recover the device. This function will initialize the HBA device,
- * enable the interrupt, but it will just put the HBA to offline state without
- * passing any I/O traffic.
+ * This routine is called from the PCI subsystem for error handling to
+ * device with SLI-3 interface spec. This is called after PCI bus has been
+ * reset to restart the PCI card from scratch, as if from a cold-boot.
+ * During the PCI subsystem error recovery, after driver returns
+ * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
+ * recovery and then call this routine before calling the .resume method
+ * to recover the device. This function will initialize the HBA device,
+ * enable the interrupt, but it will just put the HBA to offline state
+ * without passing any I/O traffic.
*
* Return codes
- * PCI_ERS_RESULT_RECOVERED - the device has been recovered
- * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ * PCI_ERS_RESULT_RECOVERED - the device has been recovered
+ * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
*/
-static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
+static pci_ers_result_t
+lpfc_io_slot_reset_s3(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3354,11 +7131,11 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
spin_lock_irq(&phba->hbalock);
- psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
+ psli->sli_flag &= ~LPFC_SLI_ACTIVE;
spin_unlock_irq(&phba->hbalock);
/* Configure and enable interrupt */
- intr_mode = lpfc_enable_intr(phba, phba->intr_mode);
+ intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
if (intr_mode == LPFC_INTR_ERROR) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0427 Cannot re-enable interrupt after "
@@ -3378,20 +7155,713 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
}
/**
- * lpfc_io_resume - Resume PCI I/O operation
+ * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
* @pdev: pointer to PCI device
*
- * This routine is registered to the PCI subsystem for error handling. It is
- * called when kernel error recovery tells the lpfc driver that it is ok to
- * resume normal PCI operation after PCI bus error recovery. After this call,
- * traffic can start to flow from this device again.
+ * This routine is called from the PCI subsystem for error handling to device
+ * with SLI-3 interface spec. It is called when kernel error recovery tells
+ * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
+ * error recovery. After this call, traffic can start to flow from this device
+ * again.
*/
-static void lpfc_io_resume(struct pci_dev *pdev)
+static void
+lpfc_io_resume_s3(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ lpfc_online(phba);
+}
+
+/**
+ * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * returns the number of ELS/CT IOCBs to reserve
+ **/
+int
+lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
+{
+ int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
+
+ if (max_xri <= 100)
+ return 4;
+ else if (max_xri <= 256)
+ return 8;
+ else if (max_xri <= 512)
+ return 16;
+ else if (max_xri <= 1024)
+ return 32;
+ else
+ return 48;
+}
+
+/**
+ * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
+ * @pdev: pointer to PCI device
+ * @pid: pointer to PCI device identifier
+ *
+ * This routine is called from the kernel's PCI subsystem to device with
+ * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
+ * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
+ * information of the device and driver to see if the driver state that it
+ * can support this kind of device. If the match is successful, the driver
+ * core invokes this routine. If this routine determines it can claim the HBA,
+ * it does all the initialization that it needs to do to handle the HBA
+ * properly.
+ *
+ * Return code
+ * 0 - driver can claim the device
+ * negative value - driver can not claim the device
+ **/
+static int __devinit
+lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
+{
+ struct lpfc_hba *phba;
+ struct lpfc_vport *vport = NULL;
+ int error;
+ uint32_t cfg_mode, intr_mode;
+ int mcnt;
+
+ /* Allocate memory for HBA structure */
+ phba = lpfc_hba_alloc(pdev);
+ if (!phba)
+ return -ENOMEM;
+
+ /* Perform generic PCI device enabling operation */
+ error = lpfc_enable_pci_dev(phba);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1409 Failed to enable pci device.\n");
+ goto out_free_phba;
+ }
+
+ /* Set up SLI API function jump table for PCI-device group-1 HBAs */
+ error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
+ if (error)
+ goto out_disable_pci_dev;
+
+ /* Set up SLI-4 specific device PCI memory space */
+ error = lpfc_sli4_pci_mem_setup(phba);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1410 Failed to set up pci memory space.\n");
+ goto out_disable_pci_dev;
+ }
+
+ /* Set up phase-1 common device driver resources */
+ error = lpfc_setup_driver_resource_phase1(phba);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1411 Failed to set up driver resource.\n");
+ goto out_unset_pci_mem_s4;
+ }
+
+ /* Set up SLI-4 Specific device driver resources */
+ error = lpfc_sli4_driver_resource_setup(phba);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1412 Failed to set up driver resource.\n");
+ goto out_unset_pci_mem_s4;
+ }
+
+ /* Initialize and populate the iocb list per host */
+ error = lpfc_init_iocb_list(phba,
+ phba->sli4_hba.max_cfg_param.max_xri);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1413 Failed to initialize iocb list.\n");
+ goto out_unset_driver_resource_s4;
+ }
+
+ /* Set up common device driver resources */
+ error = lpfc_setup_driver_resource_phase2(phba);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1414 Failed to set up driver resource.\n");
+ goto out_free_iocb_list;
+ }
+
+ /* Create SCSI host to the physical port */
+ error = lpfc_create_shost(phba);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1415 Failed to create scsi host.\n");
+ goto out_unset_driver_resource;
+ }
+
+ /* Configure sysfs attributes */
+ vport = phba->pport;
+ error = lpfc_alloc_sysfs_attr(vport);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1416 Failed to allocate sysfs attr\n");
+ goto out_destroy_shost;
+ }
+
+ /* Now, trying to enable interrupt and bring up the device */
+ cfg_mode = phba->cfg_use_msi;
+ while (true) {
+ /* Put device to a known state before enabling interrupt */
+ lpfc_stop_port(phba);
+ /* Configure and enable interrupt */
+ intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
+ if (intr_mode == LPFC_INTR_ERROR) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0426 Failed to enable interrupt.\n");
+ error = -ENODEV;
+ goto out_free_sysfs_attr;
+ }
+ /* Set up SLI-4 HBA */
+ if (lpfc_sli4_hba_setup(phba)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1421 Failed to set up hba\n");
+ error = -ENODEV;
+ goto out_disable_intr;
+ }
+
+ /* Send NOP mbx cmds for non-INTx mode active interrupt test */
+ if (intr_mode != 0)
+ mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
+ LPFC_ACT_INTR_CNT);
+
+ /* Check active interrupts received only for MSI/MSI-X */
+ if (intr_mode == 0 ||
+ phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
+ /* Log the current active interrupt mode */
+ phba->intr_mode = intr_mode;
+ lpfc_log_intr_mode(phba, intr_mode);
+ break;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0451 Configure interrupt mode (%d) "
+ "failed active interrupt test.\n",
+ intr_mode);
+ /* Unset the preivous SLI-4 HBA setup */
+ lpfc_sli4_unset_hba(phba);
+ /* Try next level of interrupt mode */
+ cfg_mode = --intr_mode;
+ }
+
+ /* Perform post initialization setup */
+ lpfc_post_init_setup(phba);
+
+ return 0;
+
+out_disable_intr:
+ lpfc_sli4_disable_intr(phba);
+out_free_sysfs_attr:
+ lpfc_free_sysfs_attr(vport);
+out_destroy_shost:
+ lpfc_destroy_shost(phba);
+out_unset_driver_resource:
+ lpfc_unset_driver_resource_phase2(phba);
+out_free_iocb_list:
+ lpfc_free_iocb_list(phba);
+out_unset_driver_resource_s4:
+ lpfc_sli4_driver_resource_unset(phba);
+out_unset_pci_mem_s4:
+ lpfc_sli4_pci_mem_unset(phba);
+out_disable_pci_dev:
+ lpfc_disable_pci_dev(phba);
+out_free_phba:
+ lpfc_hba_free(phba);
+ return error;
+}
+
+/**
+ * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
+ * @pdev: pointer to PCI device
+ *
+ * This routine is called from the kernel's PCI subsystem to device with
+ * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
+ * removed from PCI bus, it performs all the necessary cleanup for the HBA
+ * device to be removed from the PCI subsystem properly.
+ **/
+static void __devexit
+lpfc_pci_remove_one_s4(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_vport **vports;
+ struct lpfc_hba *phba = vport->phba;
+ int i;
+
+ /* Mark the device unloading flag */
+ spin_lock_irq(&phba->hbalock);
+ vport->load_flag |= FC_UNLOADING;
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Free the HBA sysfs attributes */
+ lpfc_free_sysfs_attr(vport);
+
+ /* Release all the vports against this physical port */
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL)
+ for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
+ fc_vport_terminate(vports[i]->fc_vport);
+ lpfc_destroy_vport_work_array(phba, vports);
+
+ /* Remove FC host and then SCSI host with the physical port */
+ fc_remove_host(shost);
+ scsi_remove_host(shost);
+
+ /* Perform cleanup on the physical port */
+ lpfc_cleanup(vport);
+
+ /*
+ * Bring down the SLI Layer. This step disables all interrupts,
+ * clears the rings, discards all mailbox commands, and resets
+ * the HBA FCoE function.
+ */
+ lpfc_debugfs_terminate(vport);
+ lpfc_sli4_hba_unset(phba);
+
+ spin_lock_irq(&phba->hbalock);
+ list_del_init(&vport->listentry);
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi
+ * buffers are released to their corresponding pools here.
+ */
+ lpfc_scsi_free(phba);
+ lpfc_sli4_driver_resource_unset(phba);
+
+ /* Unmap adapter Control and Doorbell registers */
+ lpfc_sli4_pci_mem_unset(phba);
+
+ /* Release PCI resources and disable device's PCI function */
+ scsi_host_put(shost);
+ lpfc_disable_pci_dev(phba);
+
+ /* Finally, free the driver's device data structure */
+ lpfc_hba_free(phba);
+
+ return;
+}
+
+/**
+ * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
+ * @pdev: pointer to PCI device
+ * @msg: power management message
+ *
+ * This routine is called from the kernel's PCI subsystem to support system
+ * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
+ * this method, it quiesces the device by stopping the driver's worker
+ * thread for the device, turning off device's interrupt and DMA, and bring
+ * the device offline. Note that as the driver implements the minimum PM
+ * requirements to a power-aware driver's PM support for suspend/resume -- all
+ * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
+ * method call will be treated as SUSPEND and the driver will fully
+ * reinitialize its device during resume() method call, the driver will set
+ * device to PCI_D3hot state in PCI config space instead of setting it
+ * according to the @msg provided by the PM.
+ *
+ * Return code
+ * 0 - driver suspended the device
+ * Error otherwise
+ **/
+static int
+lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0298 PCI device Power Management suspend.\n");
+
+ /* Bring down the device */
+ lpfc_offline_prep(phba);
+ lpfc_offline(phba);
+ kthread_stop(phba->worker_thread);
+
+ /* Disable interrupt from device */
+ lpfc_sli4_disable_intr(phba);
+
+ /* Save device state to PCI config space */
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+/**
+ * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
+ * @pdev: pointer to PCI device
+ *
+ * This routine is called from the kernel's PCI subsystem to support system
+ * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
+ * this method, it restores the device's PCI config space state and fully
+ * reinitializes the device and brings it online. Note that as the driver
+ * implements the minimum PM requirements to a power-aware driver's PM for
+ * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
+ * to the suspend() method call will be treated as SUSPEND and the driver
+ * will fully reinitialize its device during resume() method call, the device
+ * will be set to PCI_D0 directly in PCI config space before restoring the
+ * state.
+ *
+ * Return code
+ * 0 - driver suspended the device
+ * Error otherwise
+ **/
+static int
+lpfc_pci_resume_one_s4(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ uint32_t intr_mode;
+ int error;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0292 PCI device Power Management resume.\n");
+
+ /* Restore device state from PCI config space */
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ if (pdev->is_busmaster)
+ pci_set_master(pdev);
+
+ /* Startup the kernel thread for this host adapter. */
+ phba->worker_thread = kthread_run(lpfc_do_work, phba,
+ "lpfc_worker_%d", phba->brd_no);
+ if (IS_ERR(phba->worker_thread)) {
+ error = PTR_ERR(phba->worker_thread);
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0293 PM resume failed to start worker "
+ "thread: error=x%x.\n", error);
+ return error;
+ }
+
+ /* Configure and enable interrupt */
+ intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
+ if (intr_mode == LPFC_INTR_ERROR) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0294 PM resume Failed to enable interrupt\n");
+ return -EIO;
+ } else
+ phba->intr_mode = intr_mode;
+
+ /* Restart HBA and bring it online */
+ lpfc_sli_brdrestart(phba);
lpfc_online(phba);
+
+ /* Log the current active interrupt mode */
+ lpfc_log_intr_mode(phba, phba->intr_mode);
+
+ return 0;
+}
+
+/**
+ * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
+ * @pdev: pointer to PCI device.
+ * @state: the current PCI connection state.
+ *
+ * This routine is called from the PCI subsystem for error handling to device
+ * with SLI-4 interface spec. This function is called by the PCI subsystem
+ * after a PCI bus error affecting this device has been detected. When this
+ * function is invoked, it will need to stop all the I/Os and interrupt(s)
+ * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
+ * for the PCI subsystem to perform proper recovery as desired.
+ *
+ * Return codes
+ * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
+ * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ **/
+static pci_ers_result_t
+lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
+ * @pdev: pointer to PCI device.
+ *
+ * This routine is called from the PCI subsystem for error handling to device
+ * with SLI-4 interface spec. It is called after PCI bus has been reset to
+ * restart the PCI card from scratch, as if from a cold-boot. During the
+ * PCI subsystem error recovery, after the driver returns
+ * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
+ * recovery and then call this routine before calling the .resume method to
+ * recover the device. This function will initialize the HBA device, enable
+ * the interrupt, but it will just put the HBA to offline state without
+ * passing any I/O traffic.
+ *
+ * Return codes
+ * PCI_ERS_RESULT_RECOVERED - the device has been recovered
+ * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ */
+static pci_ers_result_t
+lpfc_io_slot_reset_s4(struct pci_dev *pdev)
+{
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
+ * @pdev: pointer to PCI device
+ *
+ * This routine is called from the PCI subsystem for error handling to device
+ * with SLI-4 interface spec. It is called when kernel error recovery tells
+ * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
+ * error recovery. After this call, traffic can start to flow from this device
+ * again.
+ **/
+static void
+lpfc_io_resume_s4(struct pci_dev *pdev)
+{
+ return;
+}
+
+/**
+ * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
+ * @pdev: pointer to PCI device
+ * @pid: pointer to PCI device identifier
+ *
+ * This routine is to be registered to the kernel's PCI subsystem. When an
+ * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
+ * at PCI device-specific information of the device and driver to see if the
+ * driver state that it can support this kind of device. If the match is
+ * successful, the driver core invokes this routine. This routine dispatches
+ * the action to the proper SLI-3 or SLI-4 device probing routine, which will
+ * do all the initialization that it needs to do to handle the HBA device
+ * properly.
+ *
+ * Return code
+ * 0 - driver can claim the device
+ * negative value - driver can not claim the device
+ **/
+static int __devinit
+lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
+{
+ int rc;
+ uint16_t dev_id;
+
+ if (pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id))
+ return -ENODEV;
+
+ switch (dev_id) {
+ case PCI_DEVICE_ID_TIGERSHARK:
+ case PCI_DEVICE_ID_TIGERSHARK_S:
+ rc = lpfc_pci_probe_one_s4(pdev, pid);
+ break;
+ default:
+ rc = lpfc_pci_probe_one_s3(pdev, pid);
+ break;
+ }
+ return rc;
+}
+
+/**
+ * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
+ * @pdev: pointer to PCI device
+ *
+ * This routine is to be registered to the kernel's PCI subsystem. When an
+ * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
+ * This routine dispatches the action to the proper SLI-3 or SLI-4 device
+ * remove routine, which will perform all the necessary cleanup for the
+ * device to be removed from the PCI subsystem properly.
+ **/
+static void __devexit
+lpfc_pci_remove_one(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ switch (phba->pci_dev_grp) {
+ case LPFC_PCI_DEV_LP:
+ lpfc_pci_remove_one_s3(pdev);
+ break;
+ case LPFC_PCI_DEV_OC:
+ lpfc_pci_remove_one_s4(pdev);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1424 Invalid PCI device group: 0x%x\n",
+ phba->pci_dev_grp);
+ break;
+ }
+ return;
+}
+
+/**
+ * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
+ * @pdev: pointer to PCI device
+ * @msg: power management message
+ *
+ * This routine is to be registered to the kernel's PCI subsystem to support
+ * system Power Management (PM). When PM invokes this method, it dispatches
+ * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
+ * suspend the device.
+ *
+ * Return code
+ * 0 - driver suspended the device
+ * Error otherwise
+ **/
+static int
+lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ int rc = -ENODEV;
+
+ switch (phba->pci_dev_grp) {
+ case LPFC_PCI_DEV_LP:
+ rc = lpfc_pci_suspend_one_s3(pdev, msg);
+ break;
+ case LPFC_PCI_DEV_OC:
+ rc = lpfc_pci_suspend_one_s4(pdev, msg);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1425 Invalid PCI device group: 0x%x\n",
+ phba->pci_dev_grp);
+ break;
+ }
+ return rc;
+}
+
+/**
+ * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
+ * @pdev: pointer to PCI device
+ *
+ * This routine is to be registered to the kernel's PCI subsystem to support
+ * system Power Management (PM). When PM invokes this method, it dispatches
+ * the action to the proper SLI-3 or SLI-4 device resume routine, which will
+ * resume the device.
+ *
+ * Return code
+ * 0 - driver suspended the device
+ * Error otherwise
+ **/
+static int
+lpfc_pci_resume_one(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ int rc = -ENODEV;
+
+ switch (phba->pci_dev_grp) {
+ case LPFC_PCI_DEV_LP:
+ rc = lpfc_pci_resume_one_s3(pdev);
+ break;
+ case LPFC_PCI_DEV_OC:
+ rc = lpfc_pci_resume_one_s4(pdev);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1426 Invalid PCI device group: 0x%x\n",
+ phba->pci_dev_grp);
+ break;
+ }
+ return rc;
+}
+
+/**
+ * lpfc_io_error_detected - lpfc method for handling PCI I/O error
+ * @pdev: pointer to PCI device.
+ * @state: the current PCI connection state.
+ *
+ * This routine is registered to the PCI subsystem for error handling. This
+ * function is called by the PCI subsystem after a PCI bus error affecting
+ * this device has been detected. When this routine is invoked, it dispatches
+ * the action to the proper SLI-3 or SLI-4 device error detected handling
+ * routine, which will perform the proper error detected operation.
+ *
+ * Return codes
+ * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
+ * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ **/
+static pci_ers_result_t
+lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
+
+ switch (phba->pci_dev_grp) {
+ case LPFC_PCI_DEV_LP:
+ rc = lpfc_io_error_detected_s3(pdev, state);
+ break;
+ case LPFC_PCI_DEV_OC:
+ rc = lpfc_io_error_detected_s4(pdev, state);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1427 Invalid PCI device group: 0x%x\n",
+ phba->pci_dev_grp);
+ break;
+ }
+ return rc;
+}
+
+/**
+ * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
+ * @pdev: pointer to PCI device.
+ *
+ * This routine is registered to the PCI subsystem for error handling. This
+ * function is called after PCI bus has been reset to restart the PCI card
+ * from scratch, as if from a cold-boot. When this routine is invoked, it
+ * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
+ * routine, which will perform the proper device reset.
+ *
+ * Return codes
+ * PCI_ERS_RESULT_RECOVERED - the device has been recovered
+ * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ **/
+static pci_ers_result_t
+lpfc_io_slot_reset(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
+
+ switch (phba->pci_dev_grp) {
+ case LPFC_PCI_DEV_LP:
+ rc = lpfc_io_slot_reset_s3(pdev);
+ break;
+ case LPFC_PCI_DEV_OC:
+ rc = lpfc_io_slot_reset_s4(pdev);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1428 Invalid PCI device group: 0x%x\n",
+ phba->pci_dev_grp);
+ break;
+ }
+ return rc;
+}
+
+/**
+ * lpfc_io_resume - lpfc method for resuming PCI I/O operation
+ * @pdev: pointer to PCI device
+ *
+ * This routine is registered to the PCI subsystem for error handling. It
+ * is called when kernel error recovery tells the lpfc driver that it is
+ * OK to resume normal PCI operation after PCI bus error recovery. When
+ * this routine is invoked, it dispatches the action to the proper SLI-3
+ * or SLI-4 device io_resume routine, which will resume the device operation.
+ **/
+static void
+lpfc_io_resume(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ switch (phba->pci_dev_grp) {
+ case LPFC_PCI_DEV_LP:
+ lpfc_io_resume_s3(pdev);
+ break;
+ case LPFC_PCI_DEV_OC:
+ lpfc_io_resume_s4(pdev);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1429 Invalid PCI device group: 0x%x\n",
+ phba->pci_dev_grp);
+ break;
+ }
+ return;
}
static struct pci_device_id lpfc_id_table[] = {
@@ -3469,6 +7939,10 @@ static struct pci_device_id lpfc_id_table[] = {
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK_S,
+ PCI_ANY_ID, PCI_ANY_ID, },
{ 0 }
};
@@ -3486,7 +7960,7 @@ static struct pci_driver lpfc_driver = {
.probe = lpfc_pci_probe_one,
.remove = __devexit_p(lpfc_pci_remove_one),
.suspend = lpfc_pci_suspend_one,
- .resume = lpfc_pci_resume_one,
+ .resume = lpfc_pci_resume_one,
.err_handler = &lpfc_err_handler,
};
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 1aa85709b01..954ba57970a 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2008 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -18,33 +18,39 @@
* included with this package. *
*******************************************************************/
-#define LOG_ELS 0x1 /* ELS events */
-#define LOG_DISCOVERY 0x2 /* Link discovery events */
-#define LOG_MBOX 0x4 /* Mailbox events */
-#define LOG_INIT 0x8 /* Initialization events */
-#define LOG_LINK_EVENT 0x10 /* Link events */
-#define LOG_IP 0x20 /* IP traffic history */
-#define LOG_FCP 0x40 /* FCP traffic history */
-#define LOG_NODE 0x80 /* Node table events */
-#define LOG_TEMP 0x100 /* Temperature sensor events */
-#define LOG_BG 0x200 /* BlockGuard events */
-#define LOG_MISC 0x400 /* Miscellaneous events */
-#define LOG_SLI 0x800 /* SLI events */
-#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */
-#define LOG_LIBDFC 0x2000 /* Libdfc events */
-#define LOG_VPORT 0x4000 /* NPIV events */
-#define LOG_ALL_MSG 0xffff /* LOG all messages */
+#define LOG_ELS 0x00000001 /* ELS events */
+#define LOG_DISCOVERY 0x00000002 /* Link discovery events */
+#define LOG_MBOX 0x00000004 /* Mailbox events */
+#define LOG_INIT 0x00000008 /* Initialization events */
+#define LOG_LINK_EVENT 0x00000010 /* Link events */
+#define LOG_IP 0x00000020 /* IP traffic history */
+#define LOG_FCP 0x00000040 /* FCP traffic history */
+#define LOG_NODE 0x00000080 /* Node table events */
+#define LOG_TEMP 0x00000100 /* Temperature sensor events */
+#define LOG_BG 0x00000200 /* BlockGuard events */
+#define LOG_MISC 0x00000400 /* Miscellaneous events */
+#define LOG_SLI 0x00000800 /* SLI events */
+#define LOG_FCP_ERROR 0x00001000 /* log errors, not underruns */
+#define LOG_LIBDFC 0x00002000 /* Libdfc events */
+#define LOG_VPORT 0x00004000 /* NPIV events */
+#define LOF_SECURITY 0x00008000 /* Security events */
+#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */
+#define LOG_ALL_MSG 0xffffffff /* LOG all messages */
#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
- do { \
- { if (((mask) &(vport)->cfg_log_verbose) || (level[1] <= '3')) \
+do { \
+ { if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) \
dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \
fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \
- } while (0)
+} while (0)
#define lpfc_printf_log(phba, level, mask, fmt, arg...) \
- do { \
- { if (((mask) &(phba)->pport->cfg_log_verbose) || (level[1] <= '3')) \
+do { \
+ { uint32_t log_verbose = (phba)->pport ? \
+ (phba)->pport->cfg_log_verbose : \
+ (phba)->cfg_log_verbose; \
+ if (((mask) & log_verbose) || (level[1] <= '3')) \
dev_printk(level, &((phba)->pcidev)->dev, "%d:" \
- fmt, phba->brd_no, ##arg); } \
- } while (0)
+ fmt, phba->brd_no, ##arg); \
+ } \
+} while (0)
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 134fc7fc212..b9b451c0901 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2008 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,8 +28,10 @@
#include <scsi/scsi.h>
+#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
@@ -39,6 +41,44 @@
#include "lpfc_compat.h"
/**
+ * lpfc_dump_static_vport - Dump HBA's static vport information.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @offset: offset for dumping vport info.
+ *
+ * The dump mailbox command provides a method for the device driver to obtain
+ * various types of information from the HBA device.
+ *
+ * This routine prepares the mailbox command for dumping list of static
+ * vports to be created.
+ **/
+void
+lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
+ uint16_t offset)
+{
+ MAILBOX_t *mb;
+ void *ctx;
+
+ mb = &pmb->u.mb;
+ ctx = pmb->context2;
+
+ /* Setup to dump vport info region */
+ memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
+ mb->mbxCommand = MBX_DUMP_MEMORY;
+ mb->un.varDmp.cv = 1;
+ mb->un.varDmp.type = DMP_NV_PARAMS;
+ mb->un.varDmp.entry_index = offset;
+ mb->un.varDmp.region_id = DMP_REGION_VPORT;
+ mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
+ mb->un.varDmp.co = 0;
+ mb->un.varDmp.resp_offset = 0;
+ pmb->context2 = ctx;
+ mb->mbxOwner = OWN_HOST;
+
+ return;
+}
+
+/**
* lpfc_dump_mem - Prepare a mailbox command for retrieving HBA's VPD memory
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
@@ -58,7 +98,7 @@ lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
MAILBOX_t *mb;
void *ctx;
- mb = &pmb->mb;
+ mb = &pmb->u.mb;
ctx = pmb->context2;
/* Setup to dump VPD region */
@@ -90,7 +130,7 @@ lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
MAILBOX_t *mb;
void *ctx;
- mb = &pmb->mb;
+ mb = &pmb->u.mb;
/* Save context so that we can restore after memset */
ctx = pmb->context2;
@@ -125,7 +165,7 @@ lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
MAILBOX_t *mb;
- mb = &pmb->mb;
+ mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->mbxCommand = MBX_READ_NV;
mb->mbxOwner = OWN_HOST;
@@ -151,7 +191,7 @@ lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
{
MAILBOX_t *mb;
- mb = &pmb->mb;
+ mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->mbxCommand = MBX_ASYNCEVT_ENABLE;
mb->un.varCfgAsyncEvent.ring = ring;
@@ -177,7 +217,7 @@ lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
MAILBOX_t *mb;
- mb = &pmb->mb;
+ mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->mbxCommand = MBX_HEARTBEAT;
mb->mbxOwner = OWN_HOST;
@@ -211,7 +251,7 @@ lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp)
struct lpfc_sli *psli;
psli = &phba->sli;
- mb = &pmb->mb;
+ mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
INIT_LIST_HEAD(&mp->list);
@@ -248,7 +288,7 @@ lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
MAILBOX_t *mb;
- mb = &pmb->mb;
+ mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varClearLA.eventTag = phba->fc_eventTag;
@@ -275,7 +315,7 @@ void
lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
struct lpfc_vport *vport = phba->pport;
- MAILBOX_t *mb = &pmb->mb;
+ MAILBOX_t *mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
/* NEW_FEATURE
@@ -321,7 +361,7 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
int
lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
- MAILBOX_t *mb = &pmb->mb;
+ MAILBOX_t *mb = &pmb->u.mb;
uint32_t attentionConditions[2];
/* Sanity check */
@@ -405,7 +445,7 @@ lpfc_init_link(struct lpfc_hba * phba,
struct lpfc_sli *psli;
MAILBOX_t *mb;
- mb = &pmb->mb;
+ mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
psli = &phba->sli;
@@ -492,7 +532,7 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
struct lpfc_sli *psli;
psli = &phba->sli;
- mb = &pmb->mb;
+ mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->mbxOwner = OWN_HOST;
@@ -515,7 +555,7 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
- mb->un.varRdSparm.vpi = vpi;
+ mb->un.varRdSparm.vpi = vpi + phba->vpi_base;
/* save address for completion */
pmb->context1 = mp;
@@ -544,10 +584,12 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
{
MAILBOX_t *mb;
- mb = &pmb->mb;
+ mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varUnregDID.did = did;
+ if (vpi != 0xffff)
+ vpi += phba->vpi_base;
mb->un.varUnregDID.vpi = vpi;
mb->mbxCommand = MBX_UNREG_D_ID;
@@ -573,7 +615,7 @@ lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
MAILBOX_t *mb;
- mb = &pmb->mb;
+ mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->mbxCommand = MBX_READ_CONFIG;
@@ -598,7 +640,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
MAILBOX_t *mb;
- mb = &pmb->mb;
+ mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->mbxCommand = MBX_READ_LNK_STAT;
@@ -607,7 +649,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
}
/**
- * lpfc_reg_login - Prepare a mailbox command for registering remote login
+ * lpfc_reg_rpi - Prepare a mailbox command for registering remote login
* @phba: pointer to lpfc hba data structure.
* @vpi: virtual N_Port identifier.
* @did: remote port identifier.
@@ -631,17 +673,23 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
* 1 - DMA memory allocation failed
**/
int
-lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
+lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag)
{
- MAILBOX_t *mb = &pmb->mb;
+ MAILBOX_t *mb = &pmb->u.mb;
uint8_t *sparam;
struct lpfc_dmabuf *mp;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varRegLogin.rpi = 0;
- mb->un.varRegLogin.vpi = vpi;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ mb->un.varRegLogin.rpi = lpfc_sli4_alloc_rpi(phba);
+ if (mb->un.varRegLogin.rpi == LPFC_RPI_ALLOC_ERROR)
+ return 1;
+ }
+
+ mb->un.varRegLogin.vpi = vpi + phba->vpi_base;
mb->un.varRegLogin.did = did;
mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */
@@ -697,15 +745,16 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
{
MAILBOX_t *mb;
- mb = &pmb->mb;
+ mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varUnregLogin.rpi = (uint16_t) rpi;
mb->un.varUnregLogin.rsvd1 = 0;
- mb->un.varUnregLogin.vpi = vpi;
+ mb->un.varUnregLogin.vpi = vpi + phba->vpi_base;
mb->mbxCommand = MBX_UNREG_LOGIN;
mb->mbxOwner = OWN_HOST;
+
return;
}
@@ -725,15 +774,15 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
* This routine prepares the mailbox command for registering a virtual N_Port.
**/
void
-lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid,
- LPFC_MBOXQ_t *pmb)
+lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
{
- MAILBOX_t *mb = &pmb->mb;
+ MAILBOX_t *mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
- mb->un.varRegVpi.vpi = vpi;
- mb->un.varRegVpi.sid = sid;
+ mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base;
+ mb->un.varRegVpi.sid = vport->fc_myDID;
+ mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
mb->mbxCommand = MBX_REG_VPI;
mb->mbxOwner = OWN_HOST;
@@ -760,10 +809,10 @@ lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid,
void
lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
{
- MAILBOX_t *mb = &pmb->mb;
+ MAILBOX_t *mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
- mb->un.varUnregVpi.vpi = vpi;
+ mb->un.varUnregVpi.vpi = vpi + phba->vpi_base;
mb->mbxCommand = MBX_UNREG_VPI;
mb->mbxOwner = OWN_HOST;
@@ -852,7 +901,7 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
void
lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
- MAILBOX_t *mb = &pmb->mb;
+ MAILBOX_t *mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varRdRev.cv = 1;
mb->un.varRdRev.v3req = 1; /* Request SLI3 info */
@@ -945,7 +994,7 @@ lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id,
uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb)
{
int i;
- MAILBOX_t *mb = &pmb->mb;
+ MAILBOX_t *mb = &pmb->u.mb;
struct config_hbq_var *hbqmb = &mb->un.varCfgHbq;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
@@ -1020,7 +1069,7 @@ void
lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
{
int i;
- MAILBOX_t *mb = &pmb->mb;
+ MAILBOX_t *mb = &pmb->u.mb;
struct lpfc_sli *psli;
struct lpfc_sli_ring *pring;
@@ -1075,7 +1124,7 @@ void
lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr;
- MAILBOX_t *mb = &pmb->mb;
+ MAILBOX_t *mb = &pmb->u.mb;
dma_addr_t pdma_addr;
uint32_t bar_low, bar_high;
size_t offset;
@@ -1099,21 +1148,22 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* If HBA supports SLI=3 ask for it */
- if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) {
+ if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
if (phba->cfg_enable_bg)
mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
+ mb->un.varCfgPort.cdss = 1; /* Configure Security */
mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */
mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
if (phba->max_vpi && phba->cfg_enable_npiv &&
phba->vpd.sli3Feat.cmv) {
- mb->un.varCfgPort.max_vpi = phba->max_vpi;
+ mb->un.varCfgPort.max_vpi = LPFC_MAX_VPI;
mb->un.varCfgPort.cmv = 1;
} else
mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
} else
- phba->sli_rev = 2;
+ phba->sli_rev = LPFC_SLI_REV2;
mb->un.varCfgPort.sli_mode = phba->sli_rev;
/* Now setup pcb */
@@ -1245,7 +1295,7 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
void
lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
- MAILBOX_t *mb = &pmb->mb;
+ MAILBOX_t *mb = &pmb->u.mb;
memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
mb->mbxCommand = MBX_KILL_BOARD;
@@ -1305,29 +1355,98 @@ lpfc_mbox_get(struct lpfc_hba * phba)
}
/**
+ * __lpfc_mbox_cmpl_put - Put mailbox cmd into mailbox cmd complete list
+ * @phba: pointer to lpfc hba data structure.
+ * @mbq: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine put the completed mailbox command into the mailbox command
+ * complete list. This is the unlocked version of the routine. The mailbox
+ * complete list is used by the driver worker thread to process mailbox
+ * complete callback functions outside the driver interrupt handler.
+ **/
+void
+__lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
+{
+ list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
+}
+
+/**
* lpfc_mbox_cmpl_put - Put mailbox command into mailbox command complete list
* @phba: pointer to lpfc hba data structure.
* @mbq: pointer to the driver internal queue element for mailbox command.
*
* This routine put the completed mailbox command into the mailbox command
- * complete list. This routine is called from driver interrupt handler
- * context.The mailbox complete list is used by the driver worker thread
- * to process mailbox complete callback functions outside the driver interrupt
- * handler.
+ * complete list. This is the locked version of the routine. The mailbox
+ * complete list is used by the driver worker thread to process mailbox
+ * complete callback functions outside the driver interrupt handler.
**/
void
-lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
+lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
{
unsigned long iflag;
/* This function expects to be called from interrupt context */
spin_lock_irqsave(&phba->hbalock, iflag);
- list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
+ __lpfc_mbox_cmpl_put(phba, mbq);
spin_unlock_irqrestore(&phba->hbalock, iflag);
return;
}
/**
+ * lpfc_mbox_cmd_check - Check the validality of a mailbox command
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine is to check whether a mailbox command is valid to be issued.
+ * This check will be performed by both the mailbox issue API when a client
+ * is to issue a mailbox command to the mailbox transport.
+ *
+ * Return 0 - pass the check, -ENODEV - fail the check
+ **/
+int
+lpfc_mbox_cmd_check(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ /* Mailbox command that have a completion handler must also have a
+ * vport specified.
+ */
+ if (mboxq->mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
+ mboxq->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
+ if (!mboxq->vport) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
+ "1814 Mbox x%x failed, no vport\n",
+ mboxq->u.mb.mbxCommand);
+ dump_stack();
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
+/**
+ * lpfc_mbox_dev_check - Check the device state for issuing a mailbox command
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is to check whether the HBA device is ready for posting a
+ * mailbox command. It is used by the mailbox transport API at the time the
+ * to post a mailbox command to the device.
+ *
+ * Return 0 - pass the check, -ENODEV - fail the check
+ **/
+int
+lpfc_mbox_dev_check(struct lpfc_hba *phba)
+{
+ /* If the PCI channel is in offline state, do not issue mbox */
+ if (unlikely(pci_channel_offline(phba->pcidev)))
+ return -ENODEV;
+
+ /* If the HBA is in error state, do not issue mbox */
+ if (phba->link_state == LPFC_HBA_ERROR)
+ return -ENODEV;
+
+ return 0;
+}
+
+/**
* lpfc_mbox_tmo_val - Retrieve mailbox command timeout value
* @phba: pointer to lpfc hba data structure.
* @cmd: mailbox command code.
@@ -1350,6 +1469,475 @@ lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
case MBX_WRITE_WWN: /* 0x98 */
case MBX_LOAD_EXP_ROM: /* 0x9C */
return LPFC_MBOX_TMO_FLASH_CMD;
+ case MBX_SLI4_CONFIG: /* 0x9b */
+ return LPFC_MBOX_SLI4_CONFIG_TMO;
}
return LPFC_MBOX_TMO;
}
+
+/**
+ * lpfc_sli4_mbx_sge_set - Set a sge entry in non-embedded mailbox command
+ * @mbox: pointer to lpfc mbox command.
+ * @sgentry: sge entry index.
+ * @phyaddr: physical address for the sge
+ * @length: Length of the sge.
+ *
+ * This routine sets up an entry in the non-embedded mailbox command at the sge
+ * index location.
+ **/
+void
+lpfc_sli4_mbx_sge_set(struct lpfcMboxq *mbox, uint32_t sgentry,
+ dma_addr_t phyaddr, uint32_t length)
+{
+ struct lpfc_mbx_nembed_cmd *nembed_sge;
+
+ nembed_sge = (struct lpfc_mbx_nembed_cmd *)
+ &mbox->u.mqe.un.nembed_cmd;
+ nembed_sge->sge[sgentry].pa_lo = putPaddrLow(phyaddr);
+ nembed_sge->sge[sgentry].pa_hi = putPaddrHigh(phyaddr);
+ nembed_sge->sge[sgentry].length = length;
+}
+
+/**
+ * lpfc_sli4_mbx_sge_get - Get a sge entry from non-embedded mailbox command
+ * @mbox: pointer to lpfc mbox command.
+ * @sgentry: sge entry index.
+ *
+ * This routine gets an entry from the non-embedded mailbox command at the sge
+ * index location.
+ **/
+void
+lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry,
+ struct lpfc_mbx_sge *sge)
+{
+ struct lpfc_mbx_nembed_cmd *nembed_sge;
+
+ nembed_sge = (struct lpfc_mbx_nembed_cmd *)
+ &mbox->u.mqe.un.nembed_cmd;
+ sge->pa_lo = nembed_sge->sge[sgentry].pa_lo;
+ sge->pa_hi = nembed_sge->sge[sgentry].pa_hi;
+ sge->length = nembed_sge->sge[sgentry].length;
+}
+
+/**
+ * lpfc_sli4_mbox_cmd_free - Free a sli4 mailbox command
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to lpfc mbox command.
+ *
+ * This routine frees SLI4 specific mailbox command for sending IOCTL command.
+ **/
+void
+lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
+{
+ struct lpfc_mbx_sli4_config *sli4_cfg;
+ struct lpfc_mbx_sge sge;
+ dma_addr_t phyaddr;
+ uint32_t sgecount, sgentry;
+
+ sli4_cfg = &mbox->u.mqe.un.sli4_config;
+
+ /* For embedded mbox command, just free the mbox command */
+ if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return;
+ }
+
+ /* For non-embedded mbox command, we need to free the pages first */
+ sgecount = bf_get(lpfc_mbox_hdr_sge_cnt, &sli4_cfg->header.cfg_mhdr);
+ /* There is nothing we can do if there is no sge address array */
+ if (unlikely(!mbox->sge_array)) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return;
+ }
+ /* Each non-embedded DMA memory was allocated in the length of a page */
+ for (sgentry = 0; sgentry < sgecount; sgentry++) {
+ lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge);
+ phyaddr = getPaddr(sge.pa_hi, sge.pa_lo);
+ dma_free_coherent(&phba->pcidev->dev, PAGE_SIZE,
+ mbox->sge_array->addr[sgentry], phyaddr);
+ }
+ /* Free the sge address array memory */
+ kfree(mbox->sge_array);
+ /* Finally, free the mailbox command itself */
+ mempool_free(mbox, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_sli4_config - Initialize the SLI4 Config Mailbox command
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to lpfc mbox command.
+ * @subsystem: The sli4 config sub mailbox subsystem.
+ * @opcode: The sli4 config sub mailbox command opcode.
+ * @length: Length of the sli4 config mailbox command.
+ *
+ * This routine sets up the header fields of SLI4 specific mailbox command
+ * for sending IOCTL command.
+ *
+ * Return: the actual length of the mbox command allocated (mostly useful
+ * for none embedded mailbox command).
+ **/
+int
+lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
+ uint8_t subsystem, uint8_t opcode, uint32_t length, bool emb)
+{
+ struct lpfc_mbx_sli4_config *sli4_config;
+ union lpfc_sli4_cfg_shdr *cfg_shdr = NULL;
+ uint32_t alloc_len;
+ uint32_t resid_len;
+ uint32_t pagen, pcount;
+ void *viraddr;
+ dma_addr_t phyaddr;
+
+ /* Set up SLI4 mailbox command header fields */
+ memset(mbox, 0, sizeof(*mbox));
+ bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_SLI4_CONFIG);
+
+ /* Set up SLI4 ioctl command header fields */
+ sli4_config = &mbox->u.mqe.un.sli4_config;
+
+ /* Setup for the embedded mbox command */
+ if (emb) {
+ /* Set up main header fields */
+ bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1);
+ sli4_config->header.cfg_mhdr.payload_length =
+ LPFC_MBX_CMD_HDR_LENGTH + length;
+ /* Set up sub-header fields following main header */
+ bf_set(lpfc_mbox_hdr_opcode,
+ &sli4_config->header.cfg_shdr.request, opcode);
+ bf_set(lpfc_mbox_hdr_subsystem,
+ &sli4_config->header.cfg_shdr.request, subsystem);
+ sli4_config->header.cfg_shdr.request.request_length = length;
+ return length;
+ }
+
+ /* Setup for the none-embedded mbox command */
+ pcount = (PAGE_ALIGN(length))/PAGE_SIZE;
+ pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
+ LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
+ /* Allocate record for keeping SGE virtual addresses */
+ mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
+ GFP_KERNEL);
+ if (!mbox->sge_array)
+ return 0;
+
+ for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) {
+ /* The DMA memory is always allocated in the length of a
+ * page even though the last SGE might not fill up to a
+ * page, this is used as a priori size of PAGE_SIZE for
+ * the later DMA memory free.
+ */
+ viraddr = dma_alloc_coherent(&phba->pcidev->dev, PAGE_SIZE,
+ &phyaddr, GFP_KERNEL);
+ /* In case of malloc fails, proceed with whatever we have */
+ if (!viraddr)
+ break;
+ mbox->sge_array->addr[pagen] = viraddr;
+ /* Keep the first page for later sub-header construction */
+ if (pagen == 0)
+ cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr;
+ resid_len = length - alloc_len;
+ if (resid_len > PAGE_SIZE) {
+ lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
+ PAGE_SIZE);
+ alloc_len += PAGE_SIZE;
+ } else {
+ lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
+ resid_len);
+ alloc_len = length;
+ }
+ }
+
+ /* Set up main header fields in mailbox command */
+ sli4_config->header.cfg_mhdr.payload_length = alloc_len;
+ bf_set(lpfc_mbox_hdr_sge_cnt, &sli4_config->header.cfg_mhdr, pagen);
+
+ /* Set up sub-header fields into the first page */
+ if (pagen > 0) {
+ bf_set(lpfc_mbox_hdr_opcode, &cfg_shdr->request, opcode);
+ bf_set(lpfc_mbox_hdr_subsystem, &cfg_shdr->request, subsystem);
+ cfg_shdr->request.request_length =
+ alloc_len - sizeof(union lpfc_sli4_cfg_shdr);
+ }
+ /* The sub-header is in DMA memory, which needs endian converstion */
+ lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
+ sizeof(union lpfc_sli4_cfg_shdr));
+
+ return alloc_len;
+}
+
+/**
+ * lpfc_sli4_mbox_opcode_get - Get the opcode from a sli4 mailbox command
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to lpfc mbox command.
+ *
+ * This routine gets the opcode from a SLI4 specific mailbox command for
+ * sending IOCTL command. If the mailbox command is not MBX_SLI4_CONFIG
+ * (0x9B) or if the IOCTL sub-header is not present, opcode 0x0 shall be
+ * returned.
+ **/
+uint8_t
+lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
+{
+ struct lpfc_mbx_sli4_config *sli4_cfg;
+ union lpfc_sli4_cfg_shdr *cfg_shdr;
+
+ if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
+ return 0;
+ sli4_cfg = &mbox->u.mqe.un.sli4_config;
+
+ /* For embedded mbox command, get opcode from embedded sub-header*/
+ if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
+ cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
+ return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
+ }
+
+ /* For non-embedded mbox command, get opcode from first dma page */
+ if (unlikely(!mbox->sge_array))
+ return 0;
+ cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
+ return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
+}
+
+/**
+ * lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox
+ * @mboxq: pointer to lpfc mbox command.
+ *
+ * This routine sets up the mailbox for an SLI4 REQUEST_FEATURES
+ * mailbox command.
+ **/
+void
+lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
+{
+ /* Set up SLI4 mailbox command header fields */
+ memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
+ bf_set(lpfc_mqe_command, &mboxq->u.mqe, MBX_SLI4_REQ_FTRS);
+
+ /* Set up host requested features. */
+ bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
+
+ /* Virtual fabrics and FIPs are not supported yet. */
+ bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0);
+
+ /* Enable DIF (block guard) only if configured to do so. */
+ if (phba->cfg_enable_bg)
+ bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1);
+
+ /* Enable NPIV only if configured to do so. */
+ if (phba->max_vpi && phba->cfg_enable_npiv)
+ bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1);
+
+ return;
+}
+
+/**
+ * lpfc_init_vfi - Initialize the INIT_VFI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @vport: Vport associated with the VF.
+ *
+ * This routine initializes @mbox to all zeros and then fills in the mailbox
+ * fields from @vport. INIT_VFI configures virtual fabrics identified by VFI
+ * in the context of an FCF. The driver issues this command to setup a VFI
+ * before issuing a FLOGI to login to the VSAN. The driver should also issue a
+ * REG_VFI after a successful VSAN login.
+ **/
+void
+lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
+{
+ struct lpfc_mbx_init_vfi *init_vfi;
+
+ memset(mbox, 0, sizeof(*mbox));
+ init_vfi = &mbox->u.mqe.un.init_vfi;
+ bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI);
+ bf_set(lpfc_init_vfi_vr, init_vfi, 1);
+ bf_set(lpfc_init_vfi_vt, init_vfi, 1);
+ bf_set(lpfc_init_vfi_vfi, init_vfi, vport->vfi + vport->phba->vfi_base);
+ bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi);
+}
+
+/**
+ * lpfc_reg_vfi - Initialize the REG_VFI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @vport: vport associated with the VF.
+ * @phys: BDE DMA bus address used to send the service parameters to the HBA.
+ *
+ * This routine initializes @mbox to all zeros and then fills in the mailbox
+ * fields from @vport, and uses @buf as a DMAable buffer to send the vport's
+ * fc service parameters to the HBA for this VFI. REG_VFI configures virtual
+ * fabrics identified by VFI in the context of an FCF.
+ **/
+void
+lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
+{
+ struct lpfc_mbx_reg_vfi *reg_vfi;
+
+ memset(mbox, 0, sizeof(*mbox));
+ reg_vfi = &mbox->u.mqe.un.reg_vfi;
+ bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
+ bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
+ bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base);
+ bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
+ bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base);
+ reg_vfi->bde.addrHigh = putPaddrHigh(phys);
+ reg_vfi->bde.addrLow = putPaddrLow(phys);
+ reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
+ reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
+}
+
+/**
+ * lpfc_init_vpi - Initialize the INIT_VPI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @vpi: VPI to be initialized.
+ *
+ * The INIT_VPI mailbox command supports virtual N_Ports. The driver uses the
+ * command to activate a virtual N_Port. The HBA assigns a MAC address to use
+ * with the virtual N Port. The SLI Host issues this command before issuing a
+ * FDISC to connect to the Fabric. The SLI Host should issue a REG_VPI after a
+ * successful virtual NPort login.
+ **/
+void
+lpfc_init_vpi(struct lpfcMboxq *mbox, uint16_t vpi)
+{
+ memset(mbox, 0, sizeof(*mbox));
+ bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
+ bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi, vpi);
+}
+
+/**
+ * lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @vfi: VFI to be unregistered.
+ *
+ * The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric
+ * (logical NPort) into the inactive state. The SLI Host must have logged out
+ * and unregistered all remote N_Ports to abort any activity on the virtual
+ * fabric. The SLI Port posts the mailbox response after marking the virtual
+ * fabric inactive.
+ **/
+void
+lpfc_unreg_vfi(struct lpfcMboxq *mbox, uint16_t vfi)
+{
+ memset(mbox, 0, sizeof(*mbox));
+ bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
+ bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi, vfi);
+}
+
+/**
+ * lpfc_dump_fcoe_param - Dump config region 23 to get FCoe parameters.
+ * @phba: pointer to the hba structure containing.
+ * @mbox: pointer to lpfc mbox command to initialize.
+ *
+ * This function create a SLI4 dump mailbox command to dump FCoE
+ * parameters stored in region 23.
+ **/
+int
+lpfc_dump_fcoe_param(struct lpfc_hba *phba,
+ struct lpfcMboxq *mbox)
+{
+ struct lpfc_dmabuf *mp = NULL;
+ MAILBOX_t *mb;
+
+ memset(mbox, 0, sizeof(*mbox));
+ mb = &mbox->u.mb;
+
+ mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (mp)
+ mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+
+ if (!mp || !mp->virt) {
+ kfree(mp);
+ /* dump_fcoe_param failed to allocate memory */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+ "2569 lpfc_dump_fcoe_param: memory"
+ " allocation failed \n");
+ return 1;
+ }
+
+ memset(mp->virt, 0, LPFC_BPL_SIZE);
+ INIT_LIST_HEAD(&mp->list);
+
+ /* save address for completion */
+ mbox->context1 = (uint8_t *) mp;
+
+ mb->mbxCommand = MBX_DUMP_MEMORY;
+ mb->un.varDmp.type = DMP_NV_PARAMS;
+ mb->un.varDmp.region_id = DMP_REGION_FCOEPARAM;
+ mb->un.varDmp.sli4_length = DMP_FCOEPARAM_RGN_SIZE;
+ mb->un.varWords[3] = putPaddrLow(mp->phys);
+ mb->un.varWords[4] = putPaddrHigh(mp->phys);
+ return 0;
+}
+
+/**
+ * lpfc_reg_fcfi - Initialize the REG_FCFI mailbox command
+ * @phba: pointer to the hba structure containing the FCF index and RQ ID.
+ * @mbox: pointer to lpfc mbox command to initialize.
+ *
+ * The REG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs). The
+ * SLI Host uses the command to activate an FCF after it has acquired FCF
+ * information via a READ_FCF mailbox command. This mailbox command also is used
+ * to indicate where received unsolicited frames from this FCF will be sent. By
+ * default this routine will set up the FCF to forward all unsolicited frames
+ * the the RQ ID passed in the @phba. This can be overridden by the caller for
+ * more complicated setups.
+ **/
+void
+lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
+{
+ struct lpfc_mbx_reg_fcfi *reg_fcfi;
+
+ memset(mbox, 0, sizeof(*mbox));
+ reg_fcfi = &mbox->u.mqe.un.reg_fcfi;
+ bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI);
+ bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id);
+ bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
+ bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
+ bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
+ bf_set(lpfc_reg_fcfi_info_index, reg_fcfi, phba->fcf.fcf_indx);
+ /* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */
+ bf_set(lpfc_reg_fcfi_mam, reg_fcfi,
+ (~phba->fcf.addr_mode) & 0x3);
+ if (phba->fcf.fcf_flag & FCF_VALID_VLAN) {
+ bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
+ bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi, phba->fcf.vlan_id);
+ }
+}
+
+/**
+ * lpfc_unreg_fcfi - Initialize the UNREG_FCFI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @fcfi: FCFI to be unregistered.
+ *
+ * The UNREG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs).
+ * The SLI Host uses the command to inactivate an FCFI.
+ **/
+void
+lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi)
+{
+ memset(mbox, 0, sizeof(*mbox));
+ bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_FCFI);
+ bf_set(lpfc_unreg_fcfi, &mbox->u.mqe.un.unreg_fcfi, fcfi);
+}
+
+/**
+ * lpfc_resume_rpi - Initialize the RESUME_RPI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @ndlp: The nodelist structure that describes the RPI to resume.
+ *
+ * The RESUME_RPI mailbox command is used to restart I/O to an RPI after a
+ * link event.
+ **/
+void
+lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_mbx_resume_rpi *resume_rpi;
+
+ memset(mbox, 0, sizeof(*mbox));
+ resume_rpi = &mbox->u.mqe.un.resume_rpi;
+ bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
+ bf_set(lpfc_resume_rpi_rpi, resume_rpi, ndlp->nlp_rpi);
+ bf_set(lpfc_resume_rpi_vpi, resume_rpi,
+ ndlp->vport->vpi + ndlp->vport->phba->vpi_base);
+ bf_set(lpfc_resume_rpi_vfi, resume_rpi,
+ ndlp->vport->vfi + ndlp->vport->phba->vfi_base);
+}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 35a97673339..e198c917c13 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2008 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,8 +28,10 @@
#include <scsi/scsi.h>
+#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
@@ -45,7 +47,7 @@
* @phba: HBA to allocate pools for
*
* Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool,
- * lpfc_mbuf_pool, lpfc_hbq_pool. Creates and allocates kmalloc-backed mempools
+ * lpfc_mbuf_pool, lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools
* for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask.
*
* Notes: Not interrupt-safe. Must be called with no locks held. If any
@@ -56,19 +58,30 @@
* -ENOMEM on failure (if any memory allocations fail)
**/
int
-lpfc_mem_alloc(struct lpfc_hba * phba)
+lpfc_mem_alloc(struct lpfc_hba *phba, int align)
{
struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
int longs;
int i;
- phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool",
- phba->pcidev, phba->cfg_sg_dma_buf_size, 8, 0);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ phba->lpfc_scsi_dma_buf_pool =
+ pci_pool_create("lpfc_scsi_dma_buf_pool",
+ phba->pcidev,
+ phba->cfg_sg_dma_buf_size,
+ phba->cfg_sg_dma_buf_size,
+ 0);
+ else
+ phba->lpfc_scsi_dma_buf_pool =
+ pci_pool_create("lpfc_scsi_dma_buf_pool",
+ phba->pcidev, phba->cfg_sg_dma_buf_size,
+ align, 0);
if (!phba->lpfc_scsi_dma_buf_pool)
goto fail;
phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev,
- LPFC_BPL_SIZE, 8,0);
+ LPFC_BPL_SIZE,
+ align, 0);
if (!phba->lpfc_mbuf_pool)
goto fail_free_dma_buf_pool;
@@ -97,23 +110,31 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
sizeof(struct lpfc_nodelist));
if (!phba->nlp_mem_pool)
goto fail_free_mbox_pool;
-
- phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool",phba->pcidev,
- LPFC_BPL_SIZE, 8, 0);
- if (!phba->lpfc_hbq_pool)
+ phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool",
+ phba->pcidev,
+ LPFC_HDR_BUF_SIZE, align, 0);
+ if (!phba->lpfc_hrb_pool)
goto fail_free_nlp_mem_pool;
+ phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool",
+ phba->pcidev,
+ LPFC_DATA_BUF_SIZE, align, 0);
+ if (!phba->lpfc_drb_pool)
+ goto fail_free_hbq_pool;
/* vpi zero is reserved for the physical port so add 1 to max */
longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG;
phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL);
if (!phba->vpi_bmask)
- goto fail_free_hbq_pool;
+ goto fail_free_dbq_pool;
return 0;
+ fail_free_dbq_pool:
+ pci_pool_destroy(phba->lpfc_drb_pool);
+ phba->lpfc_drb_pool = NULL;
fail_free_hbq_pool:
- lpfc_sli_hbqbuf_free_all(phba);
- pci_pool_destroy(phba->lpfc_hbq_pool);
+ pci_pool_destroy(phba->lpfc_hrb_pool);
+ phba->lpfc_hrb_pool = NULL;
fail_free_nlp_mem_pool:
mempool_destroy(phba->nlp_mem_pool);
phba->nlp_mem_pool = NULL;
@@ -136,27 +157,73 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
}
/**
- * lpfc_mem_free - Frees all PCI and memory allocated by lpfc_mem_alloc
+ * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc
* @phba: HBA to free memory for
*
- * Description: Frees PCI pools lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool,
- * lpfc_hbq_pool. Frees kmalloc-backed mempools for LPFC_MBOXQ_t and
- * lpfc_nodelist. Also frees the VPI bitmask
+ * Description: Free the memory allocated by lpfc_mem_alloc routine. This
+ * routine is a the counterpart of lpfc_mem_alloc.
*
* Returns: None
**/
void
-lpfc_mem_free(struct lpfc_hba * phba)
+lpfc_mem_free(struct lpfc_hba *phba)
{
- struct lpfc_sli *psli = &phba->sli;
- struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
- LPFC_MBOXQ_t *mbox, *next_mbox;
- struct lpfc_dmabuf *mp;
int i;
+ struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+ /* Free VPI bitmask memory */
kfree(phba->vpi_bmask);
+
+ /* Free HBQ pools */
lpfc_sli_hbqbuf_free_all(phba);
+ pci_pool_destroy(phba->lpfc_drb_pool);
+ phba->lpfc_drb_pool = NULL;
+ pci_pool_destroy(phba->lpfc_hrb_pool);
+ phba->lpfc_hrb_pool = NULL;
+
+ /* Free NLP memory pool */
+ mempool_destroy(phba->nlp_mem_pool);
+ phba->nlp_mem_pool = NULL;
+
+ /* Free mbox memory pool */
+ mempool_destroy(phba->mbox_mem_pool);
+ phba->mbox_mem_pool = NULL;
+
+ /* Free MBUF memory pool */
+ for (i = 0; i < pool->current_count; i++)
+ pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
+ pool->elements[i].phys);
+ kfree(pool->elements);
+
+ pci_pool_destroy(phba->lpfc_mbuf_pool);
+ phba->lpfc_mbuf_pool = NULL;
+ /* Free DMA buffer memory pool */
+ pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
+ phba->lpfc_scsi_dma_buf_pool = NULL;
+
+ return;
+}
+
+/**
+ * lpfc_mem_free_all - Frees all PCI and driver memory
+ * @phba: HBA to free memory for
+ *
+ * Description: Free memory from PCI and driver memory pools and also those
+ * used : lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees
+ * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees
+ * the VPI bitmask.
+ *
+ * Returns: None
+ **/
+void
+lpfc_mem_free_all(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ LPFC_MBOXQ_t *mbox, *next_mbox;
+ struct lpfc_dmabuf *mp;
+
+ /* Free memory used in mailbox queue back to mailbox memory pool */
list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
mp = (struct lpfc_dmabuf *) (mbox->context1);
if (mp) {
@@ -166,6 +233,7 @@ lpfc_mem_free(struct lpfc_hba * phba)
list_del(&mbox->list);
mempool_free(mbox, phba->mbox_mem_pool);
}
+ /* Free memory used in mailbox cmpl list back to mailbox memory pool */
list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) {
mp = (struct lpfc_dmabuf *) (mbox->context1);
if (mp) {
@@ -175,8 +243,10 @@ lpfc_mem_free(struct lpfc_hba * phba)
list_del(&mbox->list);
mempool_free(mbox, phba->mbox_mem_pool);
}
-
+ /* Free the active mailbox command back to the mailbox memory pool */
+ spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ spin_unlock_irq(&phba->hbalock);
if (psli->mbox_active) {
mbox = psli->mbox_active;
mp = (struct lpfc_dmabuf *) (mbox->context1);
@@ -188,27 +258,14 @@ lpfc_mem_free(struct lpfc_hba * phba)
psli->mbox_active = NULL;
}
- for (i = 0; i < pool->current_count; i++)
- pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
- pool->elements[i].phys);
- kfree(pool->elements);
-
- pci_pool_destroy(phba->lpfc_hbq_pool);
- mempool_destroy(phba->nlp_mem_pool);
- mempool_destroy(phba->mbox_mem_pool);
-
- pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
- pci_pool_destroy(phba->lpfc_mbuf_pool);
-
- phba->lpfc_hbq_pool = NULL;
- phba->nlp_mem_pool = NULL;
- phba->mbox_mem_pool = NULL;
- phba->lpfc_scsi_dma_buf_pool = NULL;
- phba->lpfc_mbuf_pool = NULL;
+ /* Free and destroy all the allocated memory pools */
+ lpfc_mem_free(phba);
/* Free the iocb lookup array */
kfree(psli->iocbq_lookup);
psli->iocbq_lookup = NULL;
+
+ return;
}
/**
@@ -305,7 +362,7 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
* lpfc_els_hbq_alloc - Allocate an HBQ buffer
* @phba: HBA to allocate HBQ buffer for
*
- * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hbq_pool PCI
+ * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hrb_pool PCI
* pool along a non-DMA-mapped container for it.
*
* Notes: Not interrupt-safe. Must be called with no locks held.
@@ -323,7 +380,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
if (!hbqbp)
return NULL;
- hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL,
+ hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
&hbqbp->dbuf.phys);
if (!hbqbp->dbuf.virt) {
kfree(hbqbp);
@@ -334,7 +391,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
}
/**
- * lpfc_mem_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc
+ * lpfc_els_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc
* @phba: HBA buffer was allocated for
* @hbqbp: HBQ container returned by lpfc_els_hbq_alloc
*
@@ -348,12 +405,73 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
void
lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
{
- pci_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
+ pci_pool_free(phba->lpfc_hrb_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
kfree(hbqbp);
return;
}
/**
+ * lpfc_sli4_rb_alloc - Allocate an SLI4 Receive buffer
+ * @phba: HBA to allocate a receive buffer for
+ *
+ * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
+ * pool along a non-DMA-mapped container for it.
+ *
+ * Notes: Not interrupt-safe. Must be called with no locks held.
+ *
+ * Returns:
+ * pointer to HBQ on success
+ * NULL on failure
+ **/
+struct hbq_dmabuf *
+lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
+{
+ struct hbq_dmabuf *dma_buf;
+
+ dma_buf = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
+ if (!dma_buf)
+ return NULL;
+
+ dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
+ &dma_buf->hbuf.phys);
+ if (!dma_buf->hbuf.virt) {
+ kfree(dma_buf);
+ return NULL;
+ }
+ dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
+ &dma_buf->dbuf.phys);
+ if (!dma_buf->dbuf.virt) {
+ pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
+ dma_buf->hbuf.phys);
+ kfree(dma_buf);
+ return NULL;
+ }
+ dma_buf->size = LPFC_BPL_SIZE;
+ return dma_buf;
+}
+
+/**
+ * lpfc_sli4_rb_free - Frees a receive buffer
+ * @phba: HBA buffer was allocated for
+ * @dmab: DMA Buffer container returned by lpfc_sli4_hbq_alloc
+ *
+ * Description: Frees both the container and the DMA-mapped buffers returned by
+ * lpfc_sli4_rb_alloc.
+ *
+ * Notes: Can be called with or without locks held.
+ *
+ * Returns: None
+ **/
+void
+lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab)
+{
+ pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
+ pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
+ kfree(dmab);
+ return;
+}
+
+/**
* lpfc_in_buf_free - Free a DMA buffer
* @phba: HBA buffer is associated with
* @mp: Buffer to free
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 08cdc77af41..09f659f77bb 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2008 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,8 +28,10 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
+#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
@@ -361,7 +363,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!mbox)
goto out;
- rc = lpfc_reg_login(phba, vport->vpi, icmd->un.rcvels.remoteID,
+ rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
(uint8_t *) sp, mbox, 0);
if (rc) {
mempool_free(mbox, phba->mbox_mem_pool);
@@ -495,11 +497,19 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
else
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+ if ((ndlp->nlp_type & NLP_FABRIC) &&
+ vport->port_type == LPFC_NPIV_PORT) {
+ lpfc_linkdown_port(vport);
+ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_DELAY_TMO;
+ spin_unlock_irq(shost->host_lock);
- if ((!(ndlp->nlp_type & NLP_FABRIC) &&
- ((ndlp->nlp_type & NLP_FCP_TARGET) ||
- !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
- (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
+ ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
+ } else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
+ ((ndlp->nlp_type & NLP_FCP_TARGET) ||
+ !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
+ (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
/* Only try to re-login if this is NOT a Fabric Node */
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
spin_lock_irq(shost->host_lock);
@@ -567,7 +577,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- if (!ndlp->nlp_rpi) {
+ if (!(ndlp->nlp_flag & NLP_RPI_VALID)) {
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
return 0;
}
@@ -857,7 +867,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
lpfc_unreg_rpi(vport, ndlp);
- if (lpfc_reg_login(phba, vport->vpi, irsp->un.elsreq64.remoteID,
+ if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
(uint8_t *) sp, mbox, 0) == 0) {
switch (ndlp->nlp_DID) {
case NameServer_DID:
@@ -1068,6 +1078,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
struct lpfc_iocbq *cmdiocb, *rspiocb;
IOCB_t *irsp;
ADISC *ap;
+ int rc;
cmdiocb = (struct lpfc_iocbq *) arg;
rspiocb = cmdiocb->context_un.rsp_iocb;
@@ -1093,6 +1104,15 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
return ndlp->nlp_state;
}
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ rc = lpfc_sli4_resume_rpi(ndlp);
+ if (rc) {
+ /* Stay in state and retry. */
+ ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
+ return ndlp->nlp_state;
+ }
+ }
+
if (ndlp->nlp_type & NLP_FCP_TARGET) {
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
@@ -1100,6 +1120,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
}
+
return ndlp->nlp_state;
}
@@ -1190,7 +1211,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
if ((mb = phba->sli.mbox_active)) {
- if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
+ if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
(ndlp == (struct lpfc_nodelist *) mb->context2)) {
lpfc_nlp_put(ndlp);
mb->context2 = NULL;
@@ -1200,7 +1221,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
- if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
+ if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
(ndlp == (struct lpfc_nodelist *) mb->context2)) {
mp = (struct lpfc_dmabuf *) (mb->context1);
if (mp) {
@@ -1251,7 +1272,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
- MAILBOX_t *mb = &pmb->mb;
+ MAILBOX_t *mb = &pmb->u.mb;
uint32_t did = mb->un.varWords[1];
if (mb->mbxStatus) {
@@ -1283,6 +1304,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
}
ndlp->nlp_rpi = mb->un.varWords[0];
+ ndlp->nlp_flag |= NLP_RPI_VALID;
/* Only if we are not a fabric nport do we issue PRLI */
if (!(ndlp->nlp_type & NLP_FABRIC)) {
@@ -1878,11 +1900,12 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
void *arg, uint32_t evt)
{
LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
- MAILBOX_t *mb = &pmb->mb;
+ MAILBOX_t *mb = &pmb->u.mb;
- if (!mb->mbxStatus)
+ if (!mb->mbxStatus) {
ndlp->nlp_rpi = mb->un.varWords[0];
- else {
+ ndlp->nlp_flag |= NLP_RPI_VALID;
+ } else {
if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 167b66dd34c..7991ba1980a 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2008 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -31,8 +31,10 @@
#include <scsi/scsi_transport_fc.h>
#include "lpfc_version.h"
+#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
@@ -57,6 +59,8 @@ static char *dif_op_str[] = {
"SCSI_PROT_READ_CONVERT",
"SCSI_PROT_WRITE_CONVERT"
};
+static void
+lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
static void
lpfc_debug_save_data(struct scsi_cmnd *cmnd)
@@ -325,7 +329,7 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
- for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
shost = lpfc_shost_from_vport(vports[i]);
shost_for_each_device(sdev, shost) {
new_queue_depth =
@@ -379,7 +383,7 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
- for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
shost = lpfc_shost_from_vport(vports[i]);
shost_for_each_device(sdev, shost) {
if (vports[i]->cfg_lun_queue_depth <=
@@ -427,7 +431,7 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba)
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
- for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
shost = lpfc_shost_from_vport(vports[i]);
shost_for_each_device(sdev, shost) {
rport = starget_to_rport(scsi_target(sdev));
@@ -438,22 +442,23 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba)
}
/**
- * lpfc_new_scsi_buf - Scsi buffer allocator
+ * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
* @vport: The virtual port for which this call being executed.
+ * @num_to_allocate: The requested number of buffers to allocate.
*
- * This routine allocates a scsi buffer, which contains all the necessary
- * information needed to initiate a SCSI I/O. The non-DMAable buffer region
- * contains information to build the IOCB. The DMAable region contains
- * memory for the FCP CMND, FCP RSP, and the initial BPL. In addition to
- * allocating memory, the FCP CMND and FCP RSP BDEs are setup in the BPL
- * and the BPL BDE is setup in the IOCB.
+ * This routine allocates a scsi buffer for device with SLI-3 interface spec,
+ * the scsi buffer contains all the necessary information needed to initiate
+ * a SCSI I/O. The non-DMAable buffer region contains information to build
+ * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
+ * and the initial BPL. In addition to allocating memory, the FCP CMND and
+ * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
*
* Return codes:
- * NULL - Error
- * Pointer to lpfc_scsi_buf data structure - Success
+ * int - number of scsi buffers that were allocated.
+ * 0 = failure, less than num_to_alloc is a partial failure.
**/
-static struct lpfc_scsi_buf *
-lpfc_new_scsi_buf(struct lpfc_vport *vport)
+static int
+lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_scsi_buf *psb;
@@ -463,107 +468,401 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
dma_addr_t pdma_phys_fcp_rsp;
dma_addr_t pdma_phys_bpl;
uint16_t iotag;
+ int bcnt;
- psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
- if (!psb)
- return NULL;
+ for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
+ psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
+ if (!psb)
+ break;
+
+ /*
+ * Get memory from the pci pool to map the virt space to pci
+ * bus space for an I/O. The DMA buffer includes space for the
+ * struct fcp_cmnd, struct fcp_rsp and the number of bde's
+ * necessary to support the sg_tablesize.
+ */
+ psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
+ GFP_KERNEL, &psb->dma_handle);
+ if (!psb->data) {
+ kfree(psb);
+ break;
+ }
+
+ /* Initialize virtual ptrs to dma_buf region. */
+ memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
+
+ /* Allocate iotag for psb->cur_iocbq. */
+ iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
+ if (iotag == 0) {
+ pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
+ psb->data, psb->dma_handle);
+ kfree(psb);
+ break;
+ }
+ psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
+
+ psb->fcp_cmnd = psb->data;
+ psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
+ psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
+ sizeof(struct fcp_rsp);
+
+ /* Initialize local short-hand pointers. */
+ bpl = psb->fcp_bpl;
+ pdma_phys_fcp_cmd = psb->dma_handle;
+ pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
+ pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
+ sizeof(struct fcp_rsp);
+
+ /*
+ * The first two bdes are the FCP_CMD and FCP_RSP. The balance
+ * are sg list bdes. Initialize the first two and leave the
+ * rest for queuecommand.
+ */
+ bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
+ bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
+ bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
+ bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
+
+ /* Setup the physical region for the FCP RSP */
+ bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
+ bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
+ bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
+ bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
+
+ /*
+ * Since the IOCB for the FCP I/O is built into this
+ * lpfc_scsi_buf, initialize it with all known data now.
+ */
+ iocb = &psb->cur_iocbq.iocb;
+ iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
+ if ((phba->sli_rev == 3) &&
+ !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
+ /* fill in immediate fcp command BDE */
+ iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
+ iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
+ iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
+ unsli3.fcp_ext.icd);
+ iocb->un.fcpi64.bdl.addrHigh = 0;
+ iocb->ulpBdeCount = 0;
+ iocb->ulpLe = 0;
+ /* fill in responce BDE */
+ iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
+ BUFF_TYPE_BDE_64;
+ iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
+ sizeof(struct fcp_rsp);
+ iocb->unsli3.fcp_ext.rbde.addrLow =
+ putPaddrLow(pdma_phys_fcp_rsp);
+ iocb->unsli3.fcp_ext.rbde.addrHigh =
+ putPaddrHigh(pdma_phys_fcp_rsp);
+ } else {
+ iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+ iocb->un.fcpi64.bdl.bdeSize =
+ (2 * sizeof(struct ulp_bde64));
+ iocb->un.fcpi64.bdl.addrLow =
+ putPaddrLow(pdma_phys_bpl);
+ iocb->un.fcpi64.bdl.addrHigh =
+ putPaddrHigh(pdma_phys_bpl);
+ iocb->ulpBdeCount = 1;
+ iocb->ulpLe = 1;
+ }
+ iocb->ulpClass = CLASS3;
+ psb->status = IOSTAT_SUCCESS;
+ /* Put it back into the SCSI buffer list */
+ lpfc_release_scsi_buf_s4(phba, psb);
- /*
- * Get memory from the pci pool to map the virt space to pci bus space
- * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
- * struct fcp_rsp and the number of bde's necessary to support the
- * sg_tablesize.
- */
- psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
- &psb->dma_handle);
- if (!psb->data) {
- kfree(psb);
- return NULL;
}
- /* Initialize virtual ptrs to dma_buf region. */
- memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
+ return bcnt;
+}
- /* Allocate iotag for psb->cur_iocbq. */
- iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
- if (iotag == 0) {
- pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
- psb->data, psb->dma_handle);
- kfree (psb);
- return NULL;
+/**
+ * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
+ * @phba: pointer to lpfc hba data structure.
+ * @axri: pointer to the fcp xri abort wcqe structure.
+ *
+ * This routine is invoked by the worker thread to process a SLI4 fast-path
+ * FCP aborted xri.
+ **/
+void
+lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
+ struct sli4_wcqe_xri_aborted *axri)
+{
+ uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
+ struct lpfc_scsi_buf *psb, *next_psb;
+ unsigned long iflag = 0;
+
+ spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag);
+ list_for_each_entry_safe(psb, next_psb,
+ &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
+ if (psb->cur_iocbq.sli4_xritag == xri) {
+ list_del(&psb->list);
+ psb->status = IOSTAT_SUCCESS;
+ spin_unlock_irqrestore(
+ &phba->sli4_hba.abts_scsi_buf_list_lock,
+ iflag);
+ lpfc_release_scsi_buf_s4(phba, psb);
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
+ iflag);
+}
+
+/**
+ * lpfc_sli4_repost_scsi_sgl_list - Repsot the Scsi buffers sgl pages as block
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine walks the list of scsi buffers that have been allocated and
+ * repost them to the HBA by using SGL block post. This is needed after a
+ * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
+ * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
+ * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
+ *
+ * Returns: 0 = success, non-zero failure.
+ **/
+int
+lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
+{
+ struct lpfc_scsi_buf *psb;
+ int index, status, bcnt = 0, rcnt = 0, rc = 0;
+ LIST_HEAD(sblist);
+
+ for (index = 0; index < phba->sli4_hba.scsi_xri_cnt; index++) {
+ psb = phba->sli4_hba.lpfc_scsi_psb_array[index];
+ if (psb) {
+ /* Remove from SCSI buffer list */
+ list_del(&psb->list);
+ /* Add it to a local SCSI buffer list */
+ list_add_tail(&psb->list, &sblist);
+ if (++rcnt == LPFC_NEMBED_MBOX_SGL_CNT) {
+ bcnt = rcnt;
+ rcnt = 0;
+ }
+ } else
+ /* A hole present in the XRI array, need to skip */
+ bcnt = rcnt;
+
+ if (index == phba->sli4_hba.scsi_xri_cnt - 1)
+ /* End of XRI array for SCSI buffer, complete */
+ bcnt = rcnt;
+
+ /* Continue until collect up to a nembed page worth of sgls */
+ if (bcnt == 0)
+ continue;
+ /* Now, post the SCSI buffer list sgls as a block */
+ status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
+ /* Reset SCSI buffer count for next round of posting */
+ bcnt = 0;
+ while (!list_empty(&sblist)) {
+ list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
+ list);
+ if (status) {
+ /* Put this back on the abort scsi list */
+ psb->status = IOSTAT_LOCAL_REJECT;
+ psb->result = IOERR_ABORT_REQUESTED;
+ rc++;
+ } else
+ psb->status = IOSTAT_SUCCESS;
+ /* Put it back into the SCSI buffer list */
+ lpfc_release_scsi_buf_s4(phba, psb);
+ }
}
- psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
+ return rc;
+}
- psb->fcp_cmnd = psb->data;
- psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
- psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
- sizeof(struct fcp_rsp);
+/**
+ * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
+ * @vport: The virtual port for which this call being executed.
+ * @num_to_allocate: The requested number of buffers to allocate.
+ *
+ * This routine allocates a scsi buffer for device with SLI-4 interface spec,
+ * the scsi buffer contains all the necessary information needed to initiate
+ * a SCSI I/O.
+ *
+ * Return codes:
+ * int - number of scsi buffers that were allocated.
+ * 0 = failure, less than num_to_alloc is a partial failure.
+ **/
+static int
+lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_scsi_buf *psb;
+ struct sli4_sge *sgl;
+ IOCB_t *iocb;
+ dma_addr_t pdma_phys_fcp_cmd;
+ dma_addr_t pdma_phys_fcp_rsp;
+ dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
+ uint16_t iotag, last_xritag = NO_XRI;
+ int status = 0, index;
+ int bcnt;
+ int non_sequential_xri = 0;
+ int rc = 0;
+ LIST_HEAD(sblist);
+
+ for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
+ psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
+ if (!psb)
+ break;
- /* Initialize local short-hand pointers. */
- bpl = psb->fcp_bpl;
- pdma_phys_fcp_cmd = psb->dma_handle;
- pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
- pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
- sizeof(struct fcp_rsp);
+ /*
+ * Get memory from the pci pool to map the virt space to pci bus
+ * space for an I/O. The DMA buffer includes space for the
+ * struct fcp_cmnd, struct fcp_rsp and the number of bde's
+ * necessary to support the sg_tablesize.
+ */
+ psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
+ GFP_KERNEL, &psb->dma_handle);
+ if (!psb->data) {
+ kfree(psb);
+ break;
+ }
- /*
- * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
- * list bdes. Initialize the first two and leave the rest for
- * queuecommand.
- */
- bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
- bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
- bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
- bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
- bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
-
- /* Setup the physical region for the FCP RSP */
- bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
- bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
- bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
- bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
- bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
+ /* Initialize virtual ptrs to dma_buf region. */
+ memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
- /*
- * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
- * initialize it with all known data now.
- */
- iocb = &psb->cur_iocbq.iocb;
- iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
- if ((phba->sli_rev == 3) &&
- !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
- /* fill in immediate fcp command BDE */
- iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
+ /* Allocate iotag for psb->cur_iocbq. */
+ iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
+ if (iotag == 0) {
+ kfree(psb);
+ break;
+ }
+
+ psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba);
+ if (psb->cur_iocbq.sli4_xritag == NO_XRI) {
+ pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
+ psb->data, psb->dma_handle);
+ kfree(psb);
+ break;
+ }
+ if (last_xritag != NO_XRI
+ && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) {
+ non_sequential_xri = 1;
+ } else
+ list_add_tail(&psb->list, &sblist);
+ last_xritag = psb->cur_iocbq.sli4_xritag;
+
+ index = phba->sli4_hba.scsi_xri_cnt++;
+ psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
+
+ psb->fcp_bpl = psb->data;
+ psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size)
+ - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+ psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
+ sizeof(struct fcp_cmnd));
+
+ /* Initialize local short-hand pointers. */
+ sgl = (struct sli4_sge *)psb->fcp_bpl;
+ pdma_phys_bpl = psb->dma_handle;
+ pdma_phys_fcp_cmd =
+ (psb->dma_handle + phba->cfg_sg_dma_buf_size)
+ - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+ pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
+
+ /*
+ * The first two bdes are the FCP_CMD and FCP_RSP. The balance
+ * are sg list bdes. Initialize the first two and leave the
+ * rest for queuecommand.
+ */
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
+ bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_cmnd));
+ bf_set(lpfc_sli4_sge_last, sgl, 0);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->word3 = cpu_to_le32(sgl->word3);
+ sgl++;
+
+ /* Setup the physical region for the FCP RSP */
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
+ bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_rsp));
+ bf_set(lpfc_sli4_sge_last, sgl, 1);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->word3 = cpu_to_le32(sgl->word3);
+
+ /*
+ * Since the IOCB for the FCP I/O is built into this
+ * lpfc_scsi_buf, initialize it with all known data now.
+ */
+ iocb = &psb->cur_iocbq.iocb;
+ iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
+ iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
+ /* setting the BLP size to 2 * sizeof BDE may not be correct.
+ * We are setting the bpl to point to out sgl. An sgl's
+ * entries are 16 bytes, a bpl entries are 12 bytes.
+ */
iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
- iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
- unsli3.fcp_ext.icd);
- iocb->un.fcpi64.bdl.addrHigh = 0;
- iocb->ulpBdeCount = 0;
- iocb->ulpLe = 0;
- /* fill in responce BDE */
- iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
- iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
- sizeof(struct fcp_rsp);
- iocb->unsli3.fcp_ext.rbde.addrLow =
- putPaddrLow(pdma_phys_fcp_rsp);
- iocb->unsli3.fcp_ext.rbde.addrHigh =
- putPaddrHigh(pdma_phys_fcp_rsp);
- } else {
- iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
- iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl);
- iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl);
+ iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
+ iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
iocb->ulpBdeCount = 1;
iocb->ulpLe = 1;
+ iocb->ulpClass = CLASS3;
+ if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
+ pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
+ else
+ pdma_phys_bpl1 = 0;
+ psb->dma_phys_bpl = pdma_phys_bpl;
+ phba->sli4_hba.lpfc_scsi_psb_array[index] = psb;
+ if (non_sequential_xri) {
+ status = lpfc_sli4_post_sgl(phba, pdma_phys_bpl,
+ pdma_phys_bpl1,
+ psb->cur_iocbq.sli4_xritag);
+ if (status) {
+ /* Put this back on the abort scsi list */
+ psb->status = IOSTAT_LOCAL_REJECT;
+ psb->result = IOERR_ABORT_REQUESTED;
+ rc++;
+ } else
+ psb->status = IOSTAT_SUCCESS;
+ /* Put it back into the SCSI buffer list */
+ lpfc_release_scsi_buf_s4(phba, psb);
+ break;
+ }
+ }
+ if (bcnt) {
+ status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
+ /* Reset SCSI buffer count for next round of posting */
+ while (!list_empty(&sblist)) {
+ list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
+ list);
+ if (status) {
+ /* Put this back on the abort scsi list */
+ psb->status = IOSTAT_LOCAL_REJECT;
+ psb->result = IOERR_ABORT_REQUESTED;
+ rc++;
+ } else
+ psb->status = IOSTAT_SUCCESS;
+ /* Put it back into the SCSI buffer list */
+ lpfc_release_scsi_buf_s4(phba, psb);
+ }
}
- iocb->ulpClass = CLASS3;
- return psb;
+ return bcnt + non_sequential_xri - rc;
}
/**
- * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list list of Hba
- * @phba: The Hba for which this call is being executed.
+ * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
+ * @vport: The virtual port for which this call being executed.
+ * @num_to_allocate: The requested number of buffers to allocate.
+ *
+ * This routine wraps the actual SCSI buffer allocator function pointer from
+ * the lpfc_hba struct.
+ *
+ * Return codes:
+ * int - number of scsi buffers that were allocated.
+ * 0 = failure, less than num_to_alloc is a partial failure.
+ **/
+static inline int
+lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
+{
+ return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
+}
+
+/**
+ * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
+ * @phba: The HBA for which this call is being executed.
*
* This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
* and returns to caller.
@@ -591,7 +890,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
}
/**
- * lpfc_release_scsi_buf - Return a scsi buffer back to hba's lpfc_scsi_buf_list
+ * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
* @phba: The Hba for which this call is being executed.
* @psb: The scsi buffer which is being released.
*
@@ -599,7 +898,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
* lpfc_scsi_buf_list list.
**/
static void
-lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
{
unsigned long iflag = 0;
@@ -610,21 +909,69 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
}
/**
- * lpfc_scsi_prep_dma_buf - Routine to do DMA mapping for scsi buffer
+ * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
+ * @phba: The Hba for which this call is being executed.
+ * @psb: The scsi buffer which is being released.
+ *
+ * This routine releases @psb scsi buffer by adding it to tail of @phba
+ * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
+ * and cannot be reused for at least RA_TOV amount of time if it was
+ * aborted.
+ **/
+static void
+lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+{
+ unsigned long iflag = 0;
+
+ if (psb->status == IOSTAT_LOCAL_REJECT
+ && psb->result == IOERR_ABORT_REQUESTED) {
+ spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
+ iflag);
+ psb->pCmd = NULL;
+ list_add_tail(&psb->list,
+ &phba->sli4_hba.lpfc_abts_scsi_buf_list);
+ spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
+ iflag);
+ } else {
+
+ spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
+ psb->pCmd = NULL;
+ list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
+ spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
+ }
+}
+
+/**
+ * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
+ * @phba: The Hba for which this call is being executed.
+ * @psb: The scsi buffer which is being released.
+ *
+ * This routine releases @psb scsi buffer by adding it to tail of @phba
+ * lpfc_scsi_buf_list list.
+ **/
+static void
+lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+{
+
+ phba->lpfc_release_scsi_buf(phba, psb);
+}
+
+/**
+ * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
* @phba: The Hba for which this call is being executed.
* @lpfc_cmd: The scsi buffer which is going to be mapped.
*
* This routine does the pci dma mapping for scatter-gather list of scsi cmnd
- * field of @lpfc_cmd. This routine scans through sg elements and format the
- * bdea. This routine also initializes all IOCB fields which are dependent on
- * scsi command request buffer.
+ * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
+ * through sg elements and format the bdea. This routine also initializes all
+ * IOCB fields which are dependent on scsi command request buffer.
*
* Return codes:
* 1 - Error
* 0 - Success
**/
static int
-lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
{
struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
struct scatterlist *sgel = NULL;
@@ -827,8 +1174,8 @@ lpfc_cmd_blksize(struct scsi_cmnd *sc)
* @reftag: out: ref tag (reference tag)
*
* Description:
- * Extract DIF paramters from the command if possible. Otherwise,
- * use default paratmers.
+ * Extract DIF parameters from the command if possible. Otherwise,
+ * use default parameters.
*
**/
static inline void
@@ -1312,10 +1659,10 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
uint32_t bgstat = bgf->bgstat;
uint64_t failing_sector = 0;
- printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%lx "
+ printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%x "
"bgstat=0x%x bghm=0x%x\n",
cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
- cmd->request->nr_sectors, bgstat, bghm);
+ blk_rq_sectors(cmd->request), bgstat, bghm);
spin_lock(&_dump_buf_lock);
if (!_dump_buf_done) {
@@ -1412,6 +1759,133 @@ out:
}
/**
+ * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be mapped.
+ *
+ * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
+ * field of @lpfc_cmd for device with SLI-4 interface spec.
+ *
+ * Return codes:
+ * 1 - Error
+ * 0 - Success
+ **/
+static int
+lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+{
+ struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
+ struct scatterlist *sgel = NULL;
+ struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
+ struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
+ IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+ dma_addr_t physaddr;
+ uint32_t num_bde = 0;
+ uint32_t dma_len;
+ uint32_t dma_offset = 0;
+ int nseg;
+
+ /*
+ * There are three possibilities here - use scatter-gather segment, use
+ * the single mapping, or neither. Start the lpfc command prep by
+ * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
+ * data bde entry.
+ */
+ if (scsi_sg_count(scsi_cmnd)) {
+ /*
+ * The driver stores the segment count returned from pci_map_sg
+ * because this a count of dma-mappings used to map the use_sg
+ * pages. They are not guaranteed to be the same for those
+ * architectures that implement an IOMMU.
+ */
+
+ nseg = scsi_dma_map(scsi_cmnd);
+ if (unlikely(!nseg))
+ return 1;
+ sgl += 1;
+ /* clear the last flag in the fcp_rsp map entry */
+ sgl->word2 = le32_to_cpu(sgl->word2);
+ bf_set(lpfc_sli4_sge_last, sgl, 0);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl += 1;
+
+ lpfc_cmd->seg_cnt = nseg;
+ if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
+ printk(KERN_ERR "%s: Too many sg segments from "
+ "dma_map_sg. Config %d, seg_cnt %d\n",
+ __func__, phba->cfg_sg_seg_cnt,
+ lpfc_cmd->seg_cnt);
+ scsi_dma_unmap(scsi_cmnd);
+ return 1;
+ }
+
+ /*
+ * The driver established a maximum scatter-gather segment count
+ * during probe that limits the number of sg elements in any
+ * single scsi command. Just run through the seg_cnt and format
+ * the sge's.
+ * When using SLI-3 the driver will try to fit all the BDEs into
+ * the IOCB. If it can't then the BDEs get added to a BPL as it
+ * does for SLI-2 mode.
+ */
+ scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
+ physaddr = sg_dma_address(sgel);
+ dma_len = sg_dma_len(sgel);
+ bf_set(lpfc_sli4_sge_len, sgl, sg_dma_len(sgel));
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
+ if ((num_bde + 1) == nseg)
+ bf_set(lpfc_sli4_sge_last, sgl, 1);
+ else
+ bf_set(lpfc_sli4_sge_last, sgl, 0);
+ bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->word3 = cpu_to_le32(sgl->word3);
+ dma_offset += dma_len;
+ sgl++;
+ }
+ } else {
+ sgl += 1;
+ /* clear the last flag in the fcp_rsp map entry */
+ sgl->word2 = le32_to_cpu(sgl->word2);
+ bf_set(lpfc_sli4_sge_last, sgl, 1);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ }
+
+ /*
+ * Finish initializing those IOCB fields that are dependent on the
+ * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
+ * explicitly reinitialized.
+ * all iocb memory resources are reused.
+ */
+ fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
+
+ /*
+ * Due to difference in data length between DIF/non-DIF paths,
+ * we need to set word 4 of IOCB here
+ */
+ iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
+ return 0;
+}
+
+/**
+ * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be mapped.
+ *
+ * This routine wraps the actual DMA mapping function pointer from the
+ * lpfc_hba struct.
+ *
+ * Return codes:
+ * 1 - Error
+ * 0 - Success
+ **/
+static inline int
+lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+{
+ return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
+}
+
+/**
* lpfc_send_scsi_error_event - Posts an event when there is SCSI error
* @phba: Pointer to hba context object.
* @vport: Pointer to vport object.
@@ -1504,15 +1978,15 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
}
/**
- * lpfc_scsi_unprep_dma_buf - Routine to un-map DMA mapping of scatter gather
- * @phba: The Hba for which this call is being executed.
+ * lpfc_scsi_unprep_dma_buf_s3 - Un-map DMA mapping of SG-list for SLI3 dev
+ * @phba: The HBA for which this call is being executed.
* @psb: The scsi buffer which is going to be un-mapped.
*
* This routine does DMA un-mapping of scatter gather list of scsi command
- * field of @lpfc_cmd.
+ * field of @lpfc_cmd for device with SLI-3 interface spec.
**/
static void
-lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
+lpfc_scsi_unprep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
{
/*
* There are only two special cases to consider. (1) the scsi command
@@ -1529,6 +2003,36 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
}
/**
+ * lpfc_scsi_unprep_dma_buf_s4 - Un-map DMA mapping of SG-list for SLI4 dev
+ * @phba: The Hba for which this call is being executed.
+ * @psb: The scsi buffer which is going to be un-mapped.
+ *
+ * This routine does DMA un-mapping of scatter gather list of scsi command
+ * field of @lpfc_cmd for device with SLI-4 interface spec. If we have to
+ * remove the sgl for this scsi buffer then we will do it here. For now
+ * we should be able to just call the sli3 unprep routine.
+ **/
+static void
+lpfc_scsi_unprep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+{
+ lpfc_scsi_unprep_dma_buf_s3(phba, psb);
+}
+
+/**
+ * lpfc_scsi_unprep_dma_buf - Wrapper function for unmap DMA mapping of SG-list
+ * @phba: The Hba for which this call is being executed.
+ * @psb: The scsi buffer which is going to be un-mapped.
+ *
+ * This routine does DMA un-mapping of scatter gather list of scsi command
+ * field of @lpfc_cmd for device with SLI-4 interface spec.
+ **/
+static void
+lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+{
+ phba->lpfc_scsi_unprep_dma_buf(phba, psb);
+}
+
+/**
* lpfc_handler_fcp_err - FCP response handler
* @vport: The virtual port for which this call is being executed.
* @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
@@ -1676,7 +2180,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
* lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
* @phba: The Hba for which this call is being executed.
* @pIocbIn: The command IOCBQ for the scsi cmnd.
- * @pIocbOut: The response IOCBQ for the scsi cmnd .
+ * @pIocbOut: The response IOCBQ for the scsi cmnd.
*
* This routine assigns scsi command result by looking into response IOCB
* status field appropriately. This routine handles QUEUE FULL condition as
@@ -1957,16 +2461,16 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
}
/**
- * lpfc_scsi_prep_cmnd - Routine to convert scsi cmnd to FCP information unit
+ * lpfc_scsi_prep_cmnd_s3 - Convert scsi cmnd to FCP infor unit for SLI3 dev
* @vport: The virtual port for which this call is being executed.
* @lpfc_cmd: The scsi command which needs to send.
* @pnode: Pointer to lpfc_nodelist.
*
* This routine initializes fcp_cmnd and iocb data structure from scsi command
- * to transfer.
+ * to transfer for device with SLI3 interface spec.
**/
static void
-lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
+lpfc_scsi_prep_cmnd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
struct lpfc_nodelist *pnode)
{
struct lpfc_hba *phba = vport->phba;
@@ -2013,8 +2517,11 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
if (scsi_sg_count(scsi_cmnd)) {
if (datadir == DMA_TO_DEVICE) {
iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
- iocb_cmd->un.fcpi.fcpi_parm = 0;
- iocb_cmd->ulpPU = 0;
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ iocb_cmd->un.fcpi.fcpi_parm = 0;
+ iocb_cmd->ulpPU = 0;
+ } else
+ iocb_cmd->ulpPU = PARM_READ_CHECK;
fcp_cmnd->fcpCntl3 = WRITE_DATA;
phba->fc4OutputRequests++;
} else {
@@ -2051,20 +2558,60 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
}
/**
- * lpfc_scsi_prep_task_mgmt_cmnd - Convert scsi TM cmnd to FCP information unit
+ * lpfc_scsi_prep_cmnd_s4 - Convert scsi cmnd to FCP infor unit for SLI4 dev
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: The scsi command which needs to send.
+ * @pnode: Pointer to lpfc_nodelist.
+ *
+ * This routine initializes fcp_cmnd and iocb data structure from scsi command
+ * to transfer for device with SLI4 interface spec.
+ **/
+static void
+lpfc_scsi_prep_cmnd_s4(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
+ struct lpfc_nodelist *pnode)
+{
+ /*
+ * The prep cmnd routines do not touch the sgl or its
+ * entries. We may not have to do anything different.
+ * I will leave this function in place until we can
+ * run some IO through the driver and determine if changes
+ * are needed.
+ */
+ return lpfc_scsi_prep_cmnd_s3(vport, lpfc_cmd, pnode);
+}
+
+/**
+ * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: The scsi command which needs to send.
+ * @pnode: Pointer to lpfc_nodelist.
+ *
+ * This routine wraps the actual convert SCSI cmnd function pointer from
+ * the lpfc_hba struct.
+ **/
+static inline void
+lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
+ struct lpfc_nodelist *pnode)
+{
+ vport->phba->lpfc_scsi_prep_cmnd(vport, lpfc_cmd, pnode);
+}
+
+/**
+ * lpfc_scsi_prep_task_mgmt_cmnd_s3 - Convert SLI3 scsi TM cmd to FCP info unit
* @vport: The virtual port for which this call is being executed.
* @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
* @lun: Logical unit number.
* @task_mgmt_cmd: SCSI task management command.
*
- * This routine creates FCP information unit corresponding to @task_mgmt_cmd.
+ * This routine creates FCP information unit corresponding to @task_mgmt_cmd
+ * for device with SLI-3 interface spec.
*
* Return codes:
* 0 - Error
* 1 - Success
**/
static int
-lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
+lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport,
struct lpfc_scsi_buf *lpfc_cmd,
unsigned int lun,
uint8_t task_mgmt_cmd)
@@ -2114,6 +2661,107 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
}
/**
+ * lpfc_scsi_prep_task_mgmt_cmnd_s4 - Convert SLI4 scsi TM cmd to FCP info unit
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
+ * @lun: Logical unit number.
+ * @task_mgmt_cmd: SCSI task management command.
+ *
+ * This routine creates FCP information unit corresponding to @task_mgmt_cmd
+ * for device with SLI-4 interface spec.
+ *
+ * Return codes:
+ * 0 - Error
+ * 1 - Success
+ **/
+static int
+lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport,
+ struct lpfc_scsi_buf *lpfc_cmd,
+ unsigned int lun,
+ uint8_t task_mgmt_cmd)
+{
+ /*
+ * The prep cmnd routines do not touch the sgl or its
+ * entries. We may not have to do anything different.
+ * I will leave this function in place until we can
+ * run some IO through the driver and determine if changes
+ * are needed.
+ */
+ return lpfc_scsi_prep_task_mgmt_cmd_s3(vport, lpfc_cmd, lun,
+ task_mgmt_cmd);
+}
+
+/**
+ * lpfc_scsi_prep_task_mgmt_cmnd - Wrapper func convert scsi TM cmd to FCP info
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
+ * @lun: Logical unit number.
+ * @task_mgmt_cmd: SCSI task management command.
+ *
+ * This routine wraps the actual convert SCSI TM to FCP information unit
+ * function pointer from the lpfc_hba struct.
+ *
+ * Return codes:
+ * 0 - Error
+ * 1 - Success
+ **/
+static inline int
+lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
+ struct lpfc_scsi_buf *lpfc_cmd,
+ unsigned int lun,
+ uint8_t task_mgmt_cmd)
+{
+ struct lpfc_hba *phba = vport->phba;
+
+ return phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
+ task_mgmt_cmd);
+}
+
+/**
+ * lpfc_scsi_api_table_setup - Set up scsi api fucntion jump table
+ * @phba: The hba struct for which this call is being executed.
+ * @dev_grp: The HBA PCI-Device group number.
+ *
+ * This routine sets up the SCSI interface API function jump table in @phba
+ * struct.
+ * Returns: 0 - success, -ENODEV - failure.
+ **/
+int
+lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
+{
+
+ switch (dev_grp) {
+ case LPFC_PCI_DEV_LP:
+ phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
+ phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
+ phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s3;
+ phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s3;
+ phba->lpfc_scsi_prep_task_mgmt_cmd =
+ lpfc_scsi_prep_task_mgmt_cmd_s3;
+ phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
+ break;
+ case LPFC_PCI_DEV_OC:
+ phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
+ phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
+ phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s4;
+ phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s4;
+ phba->lpfc_scsi_prep_task_mgmt_cmd =
+ lpfc_scsi_prep_task_mgmt_cmd_s4;
+ phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1418 Invalid HBA PCI-device group: 0x%x\n",
+ dev_grp);
+ return -ENODEV;
+ break;
+ }
+ phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
+ phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
+ return 0;
+}
+
+/**
* lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
* @phba: The Hba for which this call is being executed.
* @cmdiocbq: Pointer to lpfc_iocbq data structure.
@@ -2178,9 +2826,8 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
- status = lpfc_sli_issue_iocb_wait(phba,
- &phba->sli.ring[phba->sli.fcp_ring],
- iocbq, iocbqrsp, lpfc_cmd->timeout);
+ status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
+ iocbq, iocbqrsp, lpfc_cmd->timeout);
if (status != IOCB_SUCCESS) {
if (status == IOCB_TIMEDOUT) {
iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
@@ -2305,7 +2952,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
struct Scsi_Host *shost = cmnd->device->host;
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- struct lpfc_sli *psli = &phba->sli;
struct lpfc_rport_data *rdata = cmnd->device->hostdata;
struct lpfc_nodelist *ndlp = rdata->pnode;
struct lpfc_scsi_buf *lpfc_cmd;
@@ -2378,15 +3024,15 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
if (cmnd->cmnd[0] == READ_10)
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
"9035 BLKGRD: READ @ sector %llu, "
- "count %lu\n",
- (unsigned long long)scsi_get_lba(cmnd),
- cmnd->request->nr_sectors);
+ "count %u\n",
+ (unsigned long long)scsi_get_lba(cmnd),
+ blk_rq_sectors(cmnd->request));
else if (cmnd->cmnd[0] == WRITE_10)
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
"9036 BLKGRD: WRITE @ sector %llu, "
- "count %lu cmd=%p\n",
+ "count %u cmd=%p\n",
(unsigned long long)scsi_get_lba(cmnd),
- cmnd->request->nr_sectors,
+ blk_rq_sectors(cmnd->request),
cmnd);
err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
@@ -2406,15 +3052,15 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
if (cmnd->cmnd[0] == READ_10)
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
"9040 dbg: READ @ sector %llu, "
- "count %lu\n",
+ "count %u\n",
(unsigned long long)scsi_get_lba(cmnd),
- cmnd->request->nr_sectors);
+ blk_rq_sectors(cmnd->request));
else if (cmnd->cmnd[0] == WRITE_10)
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
"9041 dbg: WRITE @ sector %llu, "
- "count %lu cmd=%p\n",
+ "count %u cmd=%p\n",
(unsigned long long)scsi_get_lba(cmnd),
- cmnd->request->nr_sectors, cmnd);
+ blk_rq_sectors(cmnd->request), cmnd);
else
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
"9042 dbg: parser not implemented\n");
@@ -2427,7 +3073,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
atomic_inc(&ndlp->cmd_pending);
- err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
+ err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
&lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
if (err) {
atomic_dec(&ndlp->cmd_pending);
@@ -2490,7 +3136,6 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
struct Scsi_Host *shost = cmnd->device->host;
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
struct lpfc_iocbq *iocb;
struct lpfc_iocbq *abtsiocb;
struct lpfc_scsi_buf *lpfc_cmd;
@@ -2531,7 +3176,10 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
icmd = &abtsiocb->iocb;
icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
icmd->un.acxri.abortContextTag = cmd->ulpContext;
- icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
+ else
+ icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
icmd->ulpLe = 1;
icmd->ulpClass = cmd->ulpClass;
@@ -2542,7 +3190,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
abtsiocb->vport = vport;
- if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) {
+ if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
+ IOCB_ERROR) {
lpfc_sli_release_iocbq(phba, abtsiocb);
ret = FAILED;
goto out;
@@ -2668,8 +3317,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
"0703 Issue target reset to TGT %d LUN %d "
"rpi x%x nlp_flag x%x\n", cmnd->device->id,
cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
- status = lpfc_sli_issue_iocb_wait(phba,
- &phba->sli.ring[phba->sli.fcp_ring],
+ status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
iocbq, iocbqrsp, lpfc_cmd->timeout);
if (status == IOCB_TIMEDOUT) {
iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
@@ -2825,11 +3473,10 @@ lpfc_slave_alloc(struct scsi_device *sdev)
{
struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
struct lpfc_hba *phba = vport->phba;
- struct lpfc_scsi_buf *scsi_buf = NULL;
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
- uint32_t total = 0, i;
+ uint32_t total = 0;
uint32_t num_to_alloc = 0;
- unsigned long flags;
+ int num_allocated = 0;
if (!rport || fc_remote_port_chkready(rport))
return -ENXIO;
@@ -2863,20 +3510,13 @@ lpfc_slave_alloc(struct scsi_device *sdev)
(phba->cfg_hba_queue_depth - total));
num_to_alloc = phba->cfg_hba_queue_depth - total;
}
-
- for (i = 0; i < num_to_alloc; i++) {
- scsi_buf = lpfc_new_scsi_buf(vport);
- if (!scsi_buf) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "0706 Failed to allocate "
- "command buffer\n");
- break;
- }
-
- spin_lock_irqsave(&phba->scsi_buf_list_lock, flags);
- phba->total_scsi_bufs++;
- list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
- spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags);
+ num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
+ if (num_to_alloc != num_allocated) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+ "0708 Allocation request of %d "
+ "command buffers did not succeed. "
+ "Allocated %d buffers.\n",
+ num_to_alloc, num_allocated);
}
return 0;
}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index c7c440d5fa2..65dfc8bd5b4 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -140,6 +140,8 @@ struct lpfc_scsi_buf {
struct fcp_rsp *fcp_rsp;
struct ulp_bde64 *fcp_bpl;
+ dma_addr_t dma_phys_bpl;
+
/* cur_iocbq has phys of the dma-able buffer.
* Iotag is in here
*/
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index eb5c75c45ba..ff04daf18f4 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2008 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -29,9 +29,12 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_fs.h>
+#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
@@ -40,24 +43,7 @@
#include "lpfc_logmsg.h"
#include "lpfc_compat.h"
#include "lpfc_debugfs.h"
-
-/*
- * Define macro to log: Mailbox command x%x cannot issue Data
- * This allows multiple uses of lpfc_msgBlk0311
- * w/o perturbing log msg utility.
- */
-#define LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) \
- lpfc_printf_log(phba, \
- KERN_INFO, \
- LOG_MBOX | LOG_SLI, \
- "(%d):0311 Mailbox command x%x cannot " \
- "issue Data: x%x x%x x%x\n", \
- pmbox->vport ? pmbox->vport->vpi : 0, \
- pmbox->mb.mbxCommand, \
- phba->pport->port_state, \
- psli->sli_flag, \
- flag)
-
+#include "lpfc_vport.h"
/* There are only four IOCB completion types. */
typedef enum _lpfc_iocb_type {
@@ -67,6 +53,350 @@ typedef enum _lpfc_iocb_type {
LPFC_ABORT_IOCB
} lpfc_iocb_type;
+
+/* Provide function prototypes local to this module. */
+static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
+ uint32_t);
+static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
+ uint8_t *, uint32_t *);
+
+static IOCB_t *
+lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
+{
+ return &iocbq->iocb;
+}
+
+/**
+ * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
+ * @q: The Work Queue to operate on.
+ * @wqe: The work Queue Entry to put on the Work queue.
+ *
+ * This routine will copy the contents of @wqe to the next available entry on
+ * the @q. This function will then ring the Work Queue Doorbell to signal the
+ * HBA to start processing the Work Queue Entry. This function returns 0 if
+ * successful. If no entries are available on @q then this function will return
+ * -ENOMEM.
+ * The caller is expected to hold the hbalock when calling this routine.
+ **/
+static uint32_t
+lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
+{
+ union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe;
+ struct lpfc_register doorbell;
+ uint32_t host_index;
+
+ /* If the host has not yet processed the next entry then we are done */
+ if (((q->host_index + 1) % q->entry_count) == q->hba_index)
+ return -ENOMEM;
+ /* set consumption flag every once in a while */
+ if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
+ bf_set(lpfc_wqe_gen_wqec, &wqe->generic, 1);
+
+ lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
+
+ /* Update the host index before invoking device */
+ host_index = q->host_index;
+ q->host_index = ((q->host_index + 1) % q->entry_count);
+
+ /* Ring Doorbell */
+ doorbell.word0 = 0;
+ bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1);
+ bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
+ bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
+ writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
+ readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */
+
+ return 0;
+}
+
+/**
+ * lpfc_sli4_wq_release - Updates internal hba index for WQ
+ * @q: The Work Queue to operate on.
+ * @index: The index to advance the hba index to.
+ *
+ * This routine will update the HBA index of a queue to reflect consumption of
+ * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
+ * an entry the host calls this function to update the queue's internal
+ * pointers. This routine returns the number of entries that were consumed by
+ * the HBA.
+ **/
+static uint32_t
+lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
+{
+ uint32_t released = 0;
+
+ if (q->hba_index == index)
+ return 0;
+ do {
+ q->hba_index = ((q->hba_index + 1) % q->entry_count);
+ released++;
+ } while (q->hba_index != index);
+ return released;
+}
+
+/**
+ * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
+ * @q: The Mailbox Queue to operate on.
+ * @wqe: The Mailbox Queue Entry to put on the Work queue.
+ *
+ * This routine will copy the contents of @mqe to the next available entry on
+ * the @q. This function will then ring the Work Queue Doorbell to signal the
+ * HBA to start processing the Work Queue Entry. This function returns 0 if
+ * successful. If no entries are available on @q then this function will return
+ * -ENOMEM.
+ * The caller is expected to hold the hbalock when calling this routine.
+ **/
+static uint32_t
+lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
+{
+ struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe;
+ struct lpfc_register doorbell;
+ uint32_t host_index;
+
+ /* If the host has not yet processed the next entry then we are done */
+ if (((q->host_index + 1) % q->entry_count) == q->hba_index)
+ return -ENOMEM;
+ lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
+ /* Save off the mailbox pointer for completion */
+ q->phba->mbox = (MAILBOX_t *)temp_mqe;
+
+ /* Update the host index before invoking device */
+ host_index = q->host_index;
+ q->host_index = ((q->host_index + 1) % q->entry_count);
+
+ /* Ring Doorbell */
+ doorbell.word0 = 0;
+ bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
+ bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
+ writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
+ readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */
+ return 0;
+}
+
+/**
+ * lpfc_sli4_mq_release - Updates internal hba index for MQ
+ * @q: The Mailbox Queue to operate on.
+ *
+ * This routine will update the HBA index of a queue to reflect consumption of
+ * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
+ * an entry the host calls this function to update the queue's internal
+ * pointers. This routine returns the number of entries that were consumed by
+ * the HBA.
+ **/
+static uint32_t
+lpfc_sli4_mq_release(struct lpfc_queue *q)
+{
+ /* Clear the mailbox pointer for completion */
+ q->phba->mbox = NULL;
+ q->hba_index = ((q->hba_index + 1) % q->entry_count);
+ return 1;
+}
+
+/**
+ * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
+ * @q: The Event Queue to get the first valid EQE from
+ *
+ * This routine will get the first valid Event Queue Entry from @q, update
+ * the queue's internal hba index, and return the EQE. If no valid EQEs are in
+ * the Queue (no more work to do), or the Queue is full of EQEs that have been
+ * processed, but not popped back to the HBA then this routine will return NULL.
+ **/
+static struct lpfc_eqe *
+lpfc_sli4_eq_get(struct lpfc_queue *q)
+{
+ struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe;
+
+ /* If the next EQE is not valid then we are done */
+ if (!bf_get(lpfc_eqe_valid, eqe))
+ return NULL;
+ /* If the host has not yet processed the next entry then we are done */
+ if (((q->hba_index + 1) % q->entry_count) == q->host_index)
+ return NULL;
+
+ q->hba_index = ((q->hba_index + 1) % q->entry_count);
+ return eqe;
+}
+
+/**
+ * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
+ * @q: The Event Queue that the host has completed processing for.
+ * @arm: Indicates whether the host wants to arms this CQ.
+ *
+ * This routine will mark all Event Queue Entries on @q, from the last
+ * known completed entry to the last entry that was processed, as completed
+ * by clearing the valid bit for each completion queue entry. Then it will
+ * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
+ * The internal host index in the @q will be updated by this routine to indicate
+ * that the host has finished processing the entries. The @arm parameter
+ * indicates that the queue should be rearmed when ringing the doorbell.
+ *
+ * This function will return the number of EQEs that were popped.
+ **/
+uint32_t
+lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
+{
+ uint32_t released = 0;
+ struct lpfc_eqe *temp_eqe;
+ struct lpfc_register doorbell;
+
+ /* while there are valid entries */
+ while (q->hba_index != q->host_index) {
+ temp_eqe = q->qe[q->host_index].eqe;
+ bf_set(lpfc_eqe_valid, temp_eqe, 0);
+ released++;
+ q->host_index = ((q->host_index + 1) % q->entry_count);
+ }
+ if (unlikely(released == 0 && !arm))
+ return 0;
+
+ /* ring doorbell for number popped */
+ doorbell.word0 = 0;
+ if (arm) {
+ bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
+ bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
+ }
+ bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
+ bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
+ bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id);
+ writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
+ return released;
+}
+
+/**
+ * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
+ * @q: The Completion Queue to get the first valid CQE from
+ *
+ * This routine will get the first valid Completion Queue Entry from @q, update
+ * the queue's internal hba index, and return the CQE. If no valid CQEs are in
+ * the Queue (no more work to do), or the Queue is full of CQEs that have been
+ * processed, but not popped back to the HBA then this routine will return NULL.
+ **/
+static struct lpfc_cqe *
+lpfc_sli4_cq_get(struct lpfc_queue *q)
+{
+ struct lpfc_cqe *cqe;
+
+ /* If the next CQE is not valid then we are done */
+ if (!bf_get(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
+ return NULL;
+ /* If the host has not yet processed the next entry then we are done */
+ if (((q->hba_index + 1) % q->entry_count) == q->host_index)
+ return NULL;
+
+ cqe = q->qe[q->hba_index].cqe;
+ q->hba_index = ((q->hba_index + 1) % q->entry_count);
+ return cqe;
+}
+
+/**
+ * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
+ * @q: The Completion Queue that the host has completed processing for.
+ * @arm: Indicates whether the host wants to arms this CQ.
+ *
+ * This routine will mark all Completion queue entries on @q, from the last
+ * known completed entry to the last entry that was processed, as completed
+ * by clearing the valid bit for each completion queue entry. Then it will
+ * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
+ * The internal host index in the @q will be updated by this routine to indicate
+ * that the host has finished processing the entries. The @arm parameter
+ * indicates that the queue should be rearmed when ringing the doorbell.
+ *
+ * This function will return the number of CQEs that were released.
+ **/
+uint32_t
+lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
+{
+ uint32_t released = 0;
+ struct lpfc_cqe *temp_qe;
+ struct lpfc_register doorbell;
+
+ /* while there are valid entries */
+ while (q->hba_index != q->host_index) {
+ temp_qe = q->qe[q->host_index].cqe;
+ bf_set(lpfc_cqe_valid, temp_qe, 0);
+ released++;
+ q->host_index = ((q->host_index + 1) % q->entry_count);
+ }
+ if (unlikely(released == 0 && !arm))
+ return 0;
+
+ /* ring doorbell for number popped */
+ doorbell.word0 = 0;
+ if (arm)
+ bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
+ bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
+ bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
+ bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id);
+ writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
+ return released;
+}
+
+/**
+ * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
+ * @q: The Header Receive Queue to operate on.
+ * @wqe: The Receive Queue Entry to put on the Receive queue.
+ *
+ * This routine will copy the contents of @wqe to the next available entry on
+ * the @q. This function will then ring the Receive Queue Doorbell to signal the
+ * HBA to start processing the Receive Queue Entry. This function returns the
+ * index that the rqe was copied to if successful. If no entries are available
+ * on @q then this function will return -ENOMEM.
+ * The caller is expected to hold the hbalock when calling this routine.
+ **/
+static int
+lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
+ struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
+{
+ struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe;
+ struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe;
+ struct lpfc_register doorbell;
+ int put_index = hq->host_index;
+
+ if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
+ return -EINVAL;
+ if (hq->host_index != dq->host_index)
+ return -EINVAL;
+ /* If the host has not yet processed the next entry then we are done */
+ if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
+ return -EBUSY;
+ lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
+ lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
+
+ /* Update the host index to point to the next slot */
+ hq->host_index = ((hq->host_index + 1) % hq->entry_count);
+ dq->host_index = ((dq->host_index + 1) % dq->entry_count);
+
+ /* Ring The Header Receive Queue Doorbell */
+ if (!(hq->host_index % LPFC_RQ_POST_BATCH)) {
+ doorbell.word0 = 0;
+ bf_set(lpfc_rq_doorbell_num_posted, &doorbell,
+ LPFC_RQ_POST_BATCH);
+ bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id);
+ writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr);
+ }
+ return put_index;
+}
+
+/**
+ * lpfc_sli4_rq_release - Updates internal hba index for RQ
+ * @q: The Header Receive Queue to operate on.
+ *
+ * This routine will update the HBA index of a queue to reflect consumption of
+ * one Receive Queue Entry by the HBA. When the HBA indicates that it has
+ * consumed an entry the host calls this function to update the queue's
+ * internal pointers. This routine returns the number of entries that were
+ * consumed by the HBA.
+ **/
+static uint32_t
+lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
+{
+ if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
+ return 0;
+ hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
+ dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
+ return 1;
+}
+
/**
* lpfc_cmd_iocb - Get next command iocb entry in the ring
* @phba: Pointer to HBA context object.
@@ -121,6 +451,76 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
}
/**
+ * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
+ * @phba: Pointer to HBA context object.
+ * @xritag: XRI value.
+ *
+ * This function clears the sglq pointer from the array of acive
+ * sglq's. The xritag that is passed in is used to index into the
+ * array. Before the xritag can be used it needs to be adjusted
+ * by subtracting the xribase.
+ *
+ * Returns sglq ponter = success, NULL = Failure.
+ **/
+static struct lpfc_sglq *
+__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
+{
+ uint16_t adj_xri;
+ struct lpfc_sglq *sglq;
+ adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
+ if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
+ return NULL;
+ sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
+ phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL;
+ return sglq;
+}
+
+/**
+ * __lpfc_get_active_sglq - Get the active sglq for this XRI.
+ * @phba: Pointer to HBA context object.
+ * @xritag: XRI value.
+ *
+ * This function returns the sglq pointer from the array of acive
+ * sglq's. The xritag that is passed in is used to index into the
+ * array. Before the xritag can be used it needs to be adjusted
+ * by subtracting the xribase.
+ *
+ * Returns sglq ponter = success, NULL = Failure.
+ **/
+static struct lpfc_sglq *
+__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
+{
+ uint16_t adj_xri;
+ struct lpfc_sglq *sglq;
+ adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
+ if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
+ return NULL;
+ sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
+ return sglq;
+}
+
+/**
+ * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called with hbalock held. This function
+ * Gets a new driver sglq object from the sglq list. If the
+ * list is not empty then it is successful, it returns pointer to the newly
+ * allocated sglq object else it returns NULL.
+ **/
+static struct lpfc_sglq *
+__lpfc_sli_get_sglq(struct lpfc_hba *phba)
+{
+ struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
+ struct lpfc_sglq *sglq = NULL;
+ uint16_t adj_xri;
+ list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
+ adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
+ phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
+ return sglq;
+}
+
+/**
* lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
* @phba: Pointer to HBA context object.
*
@@ -142,7 +542,7 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
}
/**
- * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
+ * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
* @phba: Pointer to HBA context object.
* @iocbq: Pointer to driver iocb object.
*
@@ -150,9 +550,62 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
* iocb object to the iocb pool. The iotag in the iocb object
* does not change for each use of the iocb object. This function
* clears all other fields of the iocb object when it is freed.
+ * The sqlq structure that holds the xritag and phys and virtual
+ * mappings for the scatter gather list is retrieved from the
+ * active array of sglq. The get of the sglq pointer also clears
+ * the entry in the array. If the status of the IO indiactes that
+ * this IO was aborted then the sglq entry it put on the
+ * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
+ * IO has good status or fails for any other reason then the sglq
+ * entry is added to the free list (lpfc_sgl_list).
**/
static void
-__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ struct lpfc_sglq *sglq;
+ size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
+ unsigned long iflag;
+
+ if (iocbq->sli4_xritag == NO_XRI)
+ sglq = NULL;
+ else
+ sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
+ if (sglq) {
+ if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED
+ || ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
+ && (iocbq->iocb.un.ulpWord[4]
+ == IOERR_SLI_ABORTED))) {
+ spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
+ iflag);
+ list_add(&sglq->list,
+ &phba->sli4_hba.lpfc_abts_els_sgl_list);
+ spin_unlock_irqrestore(
+ &phba->sli4_hba.abts_sgl_list_lock, iflag);
+ } else
+ list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
+ }
+
+
+ /*
+ * Clean all volatile data fields, preserve iotag and node struct.
+ */
+ memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
+ iocbq->sli4_xritag = NO_XRI;
+ list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
+}
+
+/**
+ * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
+ * @phba: Pointer to HBA context object.
+ * @iocbq: Pointer to driver iocb object.
+ *
+ * This function is called with hbalock held to release driver
+ * iocb object to the iocb pool. The iotag in the iocb object
+ * does not change for each use of the iocb object. This function
+ * clears all other fields of the iocb object when it is freed.
+ **/
+static void
+__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{
size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
@@ -160,10 +613,27 @@ __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
* Clean all volatile data fields, preserve iotag and node struct.
*/
memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
+ iocbq->sli4_xritag = NO_XRI;
list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
}
/**
+ * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
+ * @phba: Pointer to HBA context object.
+ * @iocbq: Pointer to driver iocb object.
+ *
+ * This function is called with hbalock held to release driver
+ * iocb object to the iocb pool. The iotag in the iocb object
+ * does not change for each use of the iocb object. This function
+ * clears all other fields of the iocb object when it is freed.
+ **/
+static void
+__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ phba->__lpfc_sli_release_iocbq(phba, iocbq);
+}
+
+/**
* lpfc_sli_release_iocbq - Release iocb to the iocb pool
* @phba: Pointer to HBA context object.
* @iocbq: Pointer to driver iocb object.
@@ -281,6 +751,14 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
case CMD_GEN_REQUEST64_CR:
case CMD_GEN_REQUEST64_CX:
case CMD_XMIT_ELS_RSP64_CX:
+ case DSSCMD_IWRITE64_CR:
+ case DSSCMD_IWRITE64_CX:
+ case DSSCMD_IREAD64_CR:
+ case DSSCMD_IREAD64_CX:
+ case DSSCMD_INVALIDATE_DEK:
+ case DSSCMD_SET_KEK:
+ case DSSCMD_GET_KEK_ID:
+ case DSSCMD_GEN_XFER:
type = LPFC_SOL_IOCB;
break;
case CMD_ABORT_XRI_CN:
@@ -348,7 +826,7 @@ lpfc_sli_ring_map(struct lpfc_hba *phba)
pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb)
return -ENOMEM;
- pmbox = &pmb->mb;
+ pmbox = &pmb->u.mb;
phba->link_state = LPFC_INIT_MBX_CMDS;
for (i = 0; i < psli->num_rings; i++) {
lpfc_config_ring(phba, i, pmb);
@@ -779,8 +1257,8 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
phba->hbqs[i].buffer_count = 0;
}
/* Return all HBQ buffer that are in-fly */
- list_for_each_entry_safe(dmabuf, next_dmabuf,
- &phba->hbqbuf_in_list, list) {
+ list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
+ list) {
hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
list_del(&hbq_buf->dbuf.list);
if (hbq_buf->tag == -1) {
@@ -814,10 +1292,28 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
* pointer to the hbq entry if it successfully post the buffer
* else it will return NULL.
**/
-static struct lpfc_hbq_entry *
+static int
lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
struct hbq_dmabuf *hbq_buf)
{
+ return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
+}
+
+/**
+ * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ * @hbq_buf: Pointer to HBQ buffer.
+ *
+ * This function is called with the hbalock held to post a hbq buffer to the
+ * firmware. If the function finds an empty slot in the HBQ, it will post the
+ * buffer and place it on the hbq_buffer_list. The function will return zero if
+ * it successfully post the buffer else it will return an error.
+ **/
+static int
+lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
+ struct hbq_dmabuf *hbq_buf)
+{
struct lpfc_hbq_entry *hbqe;
dma_addr_t physaddr = hbq_buf->dbuf.phys;
@@ -838,8 +1334,40 @@ lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
/* flush */
readl(phba->hbq_put + hbqno);
list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
- }
- return hbqe;
+ return 0;
+ } else
+ return -ENOMEM;
+}
+
+/**
+ * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ * @hbq_buf: Pointer to HBQ buffer.
+ *
+ * This function is called with the hbalock held to post an RQE to the SLI4
+ * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
+ * the hbq_buffer_list and return zero, otherwise it will return an error.
+ **/
+static int
+lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
+ struct hbq_dmabuf *hbq_buf)
+{
+ int rc;
+ struct lpfc_rqe hrqe;
+ struct lpfc_rqe drqe;
+
+ hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
+ hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
+ drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
+ drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
+ rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
+ &hrqe, &drqe);
+ if (rc < 0)
+ return rc;
+ hbq_buf->tag = rc;
+ list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
+ return 0;
}
/* HBQ for ELS and CT traffic. */
@@ -914,7 +1442,7 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
dbuf.list);
hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
(hbqno << 16));
- if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
+ if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
phba->hbqs[hbqno].buffer_count++;
posted++;
} else
@@ -965,6 +1493,25 @@ lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
}
/**
+ * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ *
+ * This function removes the first hbq buffer on an hbq list and returns a
+ * pointer to that buffer. If it finds no buffers on the list it returns NULL.
+ **/
+static struct hbq_dmabuf *
+lpfc_sli_hbqbuf_get(struct list_head *rb_list)
+{
+ struct lpfc_dmabuf *d_buf;
+
+ list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
+ if (!d_buf)
+ return NULL;
+ return container_of(d_buf, struct hbq_dmabuf, dbuf);
+}
+
+/**
* lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
* @phba: Pointer to HBA context object.
* @tag: Tag of the hbq buffer.
@@ -985,12 +1532,15 @@ lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
if (hbqno >= LPFC_MAX_HBQS)
return NULL;
+ spin_lock_irq(&phba->hbalock);
list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
if (hbq_buf->tag == tag) {
+ spin_unlock_irq(&phba->hbalock);
return hbq_buf;
}
}
+ spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
"1803 Bad hbq tag. Data: x%x x%x\n",
tag, phba->hbqs[tag >> 16].buffer_count);
@@ -1013,9 +1563,8 @@ lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
if (hbq_buffer) {
hbqno = hbq_buffer->tag >> 16;
- if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
+ if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
- }
}
}
@@ -1086,6 +1635,15 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
case MBX_HEARTBEAT:
case MBX_PORT_CAPABILITIES:
case MBX_PORT_IOV_CONTROL:
+ case MBX_SLI4_CONFIG:
+ case MBX_SLI4_REQ_FTRS:
+ case MBX_REG_FCFI:
+ case MBX_UNREG_FCFI:
+ case MBX_REG_VFI:
+ case MBX_UNREG_VFI:
+ case MBX_INIT_VPI:
+ case MBX_INIT_VFI:
+ case MBX_RESUME_RPI:
ret = mbxCommand;
break;
default:
@@ -1106,7 +1664,7 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
* will wake up thread waiting on the wait queue pointed by context1
* of the mailbox.
**/
-static void
+void
lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
wait_queue_head_t *pdone_q;
@@ -1140,7 +1698,7 @@ void
lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_dmabuf *mp;
- uint16_t rpi;
+ uint16_t rpi, vpi;
int rc;
mp = (struct lpfc_dmabuf *) (pmb->context1);
@@ -1150,24 +1708,30 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
kfree(mp);
}
+ if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
+ (phba->sli_rev == LPFC_SLI_REV4))
+ lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
+
/*
* If a REG_LOGIN succeeded after node is destroyed or node
* is in re-discovery driver need to cleanup the RPI.
*/
if (!(phba->pport->load_flag & FC_UNLOADING) &&
- pmb->mb.mbxCommand == MBX_REG_LOGIN64 &&
- !pmb->mb.mbxStatus) {
-
- rpi = pmb->mb.un.varWords[0];
- lpfc_unreg_login(phba, pmb->mb.un.varRegLogin.vpi, rpi, pmb);
+ pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
+ !pmb->u.mb.mbxStatus) {
+ rpi = pmb->u.mb.un.varWords[0];
+ vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base;
+ lpfc_unreg_login(phba, vpi, rpi, pmb);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (rc != MBX_NOT_FINISHED)
return;
}
- mempool_free(pmb, phba->mbox_mem_pool);
- return;
+ if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
+ lpfc_sli4_mbox_cmd_free(phba, pmb);
+ else
+ mempool_free(pmb, phba->mbox_mem_pool);
}
/**
@@ -1204,7 +1768,7 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
if (pmb == NULL)
break;
- pmbox = &pmb->mb;
+ pmbox = &pmb->u.mb;
if (pmbox->mbxCommand != MBX_HEARTBEAT) {
if (pmb->vport) {
@@ -1233,9 +1797,10 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
/* Unknow mailbox command compl */
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"(%d):0323 Unknown Mailbox command "
- "%x Cmpl\n",
+ "x%x (x%x) Cmpl\n",
pmb->vport ? pmb->vport->vpi : 0,
- pmbox->mbxCommand);
+ pmbox->mbxCommand,
+ lpfc_sli4_mbox_opcode_get(phba, pmb));
phba->link_state = LPFC_HBA_ERROR;
phba->work_hs = HS_FFER3;
lpfc_handle_eratt(phba);
@@ -1250,29 +1815,29 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
LOG_MBOX | LOG_SLI,
"(%d):0305 Mbox cmd cmpl "
"error - RETRYing Data: x%x "
- "x%x x%x x%x\n",
+ "(x%x) x%x x%x x%x\n",
pmb->vport ? pmb->vport->vpi :0,
pmbox->mbxCommand,
+ lpfc_sli4_mbox_opcode_get(phba,
+ pmb),
pmbox->mbxStatus,
pmbox->un.varWords[0],
pmb->vport->port_state);
pmbox->mbxStatus = 0;
pmbox->mbxOwner = OWN_HOST;
- spin_lock_irq(&phba->hbalock);
- phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
- spin_unlock_irq(&phba->hbalock);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
- if (rc == MBX_SUCCESS)
+ if (rc != MBX_NOT_FINISHED)
continue;
}
}
/* Mailbox cmd <cmd> Cmpl <cmpl> */
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
- "(%d):0307 Mailbox cmd x%x Cmpl x%p "
+ "(%d):0307 Mailbox cmd x%x (x%x) Cmpl x%p "
"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
pmb->vport ? pmb->vport->vpi : 0,
pmbox->mbxCommand,
+ lpfc_sli4_mbox_opcode_get(phba, pmb),
pmb->mbox_cmpl,
*((uint32_t *) pmbox),
pmbox->un.varWords[0],
@@ -1317,6 +1882,45 @@ lpfc_sli_get_buff(struct lpfc_hba *phba,
return &hbq_entry->dbuf;
}
+/**
+ * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
+ * @fch_r_ctl: the r_ctl for the first frame of the sequence.
+ * @fch_type: the type for the first frame of the sequence.
+ *
+ * This function is called with no lock held. This function uses the r_ctl and
+ * type of the received sequence to find the correct callback function to call
+ * to process the sequence.
+ **/
+static int
+lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
+ uint32_t fch_type)
+{
+ int i;
+
+ /* unSolicited Responses */
+ if (pring->prt[0].profile) {
+ if (pring->prt[0].lpfc_sli_rcv_unsol_event)
+ (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
+ saveq);
+ return 1;
+ }
+ /* We must search, based on rctl / type
+ for the right routine */
+ for (i = 0; i < pring->num_mask; i++) {
+ if ((pring->prt[i].rctl == fch_r_ctl) &&
+ (pring->prt[i].type == fch_type)) {
+ if (pring->prt[i].lpfc_sli_rcv_unsol_event)
+ (pring->prt[i].lpfc_sli_rcv_unsol_event)
+ (phba, pring, saveq);
+ return 1;
+ }
+ }
+ return 0;
+}
/**
* lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
@@ -1339,7 +1943,7 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
IOCB_t * irsp;
WORD5 * w5p;
uint32_t Rctl, Type;
- uint32_t match, i;
+ uint32_t match;
struct lpfc_iocbq *iocbq;
struct lpfc_dmabuf *dmzbuf;
@@ -1482,35 +2086,12 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
}
- /* unSolicited Responses */
- if (pring->prt[0].profile) {
- if (pring->prt[0].lpfc_sli_rcv_unsol_event)
- (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
- saveq);
- match = 1;
- } else {
- /* We must search, based on rctl / type
- for the right routine */
- for (i = 0; i < pring->num_mask; i++) {
- if ((pring->prt[i].rctl == Rctl)
- && (pring->prt[i].type == Type)) {
- if (pring->prt[i].lpfc_sli_rcv_unsol_event)
- (pring->prt[i].lpfc_sli_rcv_unsol_event)
- (phba, pring, saveq);
- match = 1;
- break;
- }
- }
- }
- if (match == 0) {
- /* Unexpected Rctl / Type received */
- /* Ring <ringno> handler: unexpected
- Rctl <Rctl> Type <Type> received */
+ if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0313 Ring %d handler: unexpected Rctl x%x "
"Type x%x received\n",
pring->ringno, Rctl, Type);
- }
+
return 1;
}
@@ -1552,6 +2133,37 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
}
/**
+ * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @iotag: IOCB tag.
+ *
+ * This function looks up the iocb_lookup table to get the command iocb
+ * corresponding to the given iotag. This function is called with the
+ * hbalock held.
+ * This function returns the command iocb object if it finds the command
+ * iocb else returns NULL.
+ **/
+static struct lpfc_iocbq *
+lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
+ struct lpfc_sli_ring *pring, uint16_t iotag)
+{
+ struct lpfc_iocbq *cmd_iocb;
+
+ if (iotag != 0 && iotag <= phba->sli.last_iotag) {
+ cmd_iocb = phba->sli.iocbq_lookup[iotag];
+ list_del_init(&cmd_iocb->list);
+ pring->txcmplq_cnt--;
+ return cmd_iocb;
+ }
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0372 iotag x%x is out off range: max iotag (x%x)\n",
+ iotag, phba->sli.last_iotag);
+ return NULL;
+}
+
+/**
* lpfc_sli_process_sol_iocb - process solicited iocb completion
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
@@ -1954,7 +2566,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
(irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
spin_unlock_irqrestore(&phba->hbalock, iflag);
- lpfc_rampdown_queue_depth(phba);
+ phba->lpfc_rampdown_queue_depth(phba);
spin_lock_irqsave(&phba->hbalock, iflag);
}
@@ -2068,39 +2680,215 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
}
/**
- * lpfc_sli_handle_slow_ring_event - Handle ring events for non-FCP rings
+ * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @rspiocbp: Pointer to driver response IOCB object.
+ *
+ * This function is called from the worker thread when there is a slow-path
+ * response IOCB to process. This function chains all the response iocbs until
+ * seeing the iocb with the LE bit set. The function will call
+ * lpfc_sli_process_sol_iocb function if the response iocb indicates a
+ * completion of a command iocb. The function will call the
+ * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
+ * The function frees the resources or calls the completion handler if this
+ * iocb is an abort completion. The function returns NULL when the response
+ * iocb has the LE bit set and all the chained iocbs are processed, otherwise
+ * this function shall chain the iocb on to the iocb_continueq and return the
+ * response iocb passed in.
+ **/
+static struct lpfc_iocbq *
+lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *rspiocbp)
+{
+ struct lpfc_iocbq *saveq;
+ struct lpfc_iocbq *cmdiocbp;
+ struct lpfc_iocbq *next_iocb;
+ IOCB_t *irsp = NULL;
+ uint32_t free_saveq;
+ uint8_t iocb_cmd_type;
+ lpfc_iocb_type type;
+ unsigned long iflag;
+ int rc;
+
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ /* First add the response iocb to the countinueq list */
+ list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
+ pring->iocb_continueq_cnt++;
+
+ /* Now, determine whetehr the list is completed for processing */
+ irsp = &rspiocbp->iocb;
+ if (irsp->ulpLe) {
+ /*
+ * By default, the driver expects to free all resources
+ * associated with this iocb completion.
+ */
+ free_saveq = 1;
+ saveq = list_get_first(&pring->iocb_continueq,
+ struct lpfc_iocbq, list);
+ irsp = &(saveq->iocb);
+ list_del_init(&pring->iocb_continueq);
+ pring->iocb_continueq_cnt = 0;
+
+ pring->stats.iocb_rsp++;
+
+ /*
+ * If resource errors reported from HBA, reduce
+ * queuedepths of the SCSI device.
+ */
+ if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+ (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ phba->lpfc_rampdown_queue_depth(phba);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ }
+
+ if (irsp->ulpStatus) {
+ /* Rsp ring <ringno> error: IOCB */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "0328 Rsp Ring %d error: "
+ "IOCB Data: "
+ "x%x x%x x%x x%x "
+ "x%x x%x x%x x%x "
+ "x%x x%x x%x x%x "
+ "x%x x%x x%x x%x\n",
+ pring->ringno,
+ irsp->un.ulpWord[0],
+ irsp->un.ulpWord[1],
+ irsp->un.ulpWord[2],
+ irsp->un.ulpWord[3],
+ irsp->un.ulpWord[4],
+ irsp->un.ulpWord[5],
+ *(((uint32_t *) irsp) + 6),
+ *(((uint32_t *) irsp) + 7),
+ *(((uint32_t *) irsp) + 8),
+ *(((uint32_t *) irsp) + 9),
+ *(((uint32_t *) irsp) + 10),
+ *(((uint32_t *) irsp) + 11),
+ *(((uint32_t *) irsp) + 12),
+ *(((uint32_t *) irsp) + 13),
+ *(((uint32_t *) irsp) + 14),
+ *(((uint32_t *) irsp) + 15));
+ }
+
+ /*
+ * Fetch the IOCB command type and call the correct completion
+ * routine. Solicited and Unsolicited IOCBs on the ELS ring
+ * get freed back to the lpfc_iocb_list by the discovery
+ * kernel thread.
+ */
+ iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
+ type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
+ switch (type) {
+ case LPFC_SOL_IOCB:
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ break;
+
+ case LPFC_UNSOL_IOCB:
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ if (!rc)
+ free_saveq = 0;
+ break;
+
+ case LPFC_ABORT_IOCB:
+ cmdiocbp = NULL;
+ if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
+ cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
+ saveq);
+ if (cmdiocbp) {
+ /* Call the specified completion routine */
+ if (cmdiocbp->iocb_cmpl) {
+ spin_unlock_irqrestore(&phba->hbalock,
+ iflag);
+ (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
+ saveq);
+ spin_lock_irqsave(&phba->hbalock,
+ iflag);
+ } else
+ __lpfc_sli_release_iocbq(phba,
+ cmdiocbp);
+ }
+ break;
+
+ case LPFC_UNKNOWN_IOCB:
+ if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
+ char adaptermsg[LPFC_MAX_ADPTMSG];
+ memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
+ memcpy(&adaptermsg[0], (uint8_t *)irsp,
+ MAX_MSG_DATA);
+ dev_warn(&((phba->pcidev)->dev),
+ "lpfc%d: %s\n",
+ phba->brd_no, adaptermsg);
+ } else {
+ /* Unknown IOCB command */
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0335 Unknown IOCB "
+ "command Data: x%x "
+ "x%x x%x x%x\n",
+ irsp->ulpCommand,
+ irsp->ulpStatus,
+ irsp->ulpIoTag,
+ irsp->ulpContext);
+ }
+ break;
+ }
+
+ if (free_saveq) {
+ list_for_each_entry_safe(rspiocbp, next_iocb,
+ &saveq->list, list) {
+ list_del(&rspiocbp->list);
+ __lpfc_sli_release_iocbq(phba, rspiocbp);
+ }
+ __lpfc_sli_release_iocbq(phba, saveq);
+ }
+ rspiocbp = NULL;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return rspiocbp;
+}
+
+/**
+ * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
* @mask: Host attention register mask for this ring.
*
- * This function is called from the worker thread when there is a ring
- * event for non-fcp rings. The caller does not hold any lock .
- * The function processes each response iocb in the response ring until it
- * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
- * LE bit set. The function will call lpfc_sli_process_sol_iocb function if the
- * response iocb indicates a completion of a command iocb. The function
- * will call lpfc_sli_process_unsol_iocb function if this is an unsolicited
- * iocb. The function frees the resources or calls the completion handler if
- * this iocb is an abort completion. The function returns 0 when the allocated
- * iocbs are not freed, otherwise returns 1.
+ * This routine wraps the actual slow_ring event process routine from the
+ * API jump table function pointer from the lpfc_hba struct.
**/
-int
+void
lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
struct lpfc_sli_ring *pring, uint32_t mask)
{
+ phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
+}
+
+/**
+ * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @mask: Host attention register mask for this ring.
+ *
+ * This function is called from the worker thread when there is a ring event
+ * for non-fcp rings. The caller does not hold any lock. The function will
+ * remove each response iocb in the response ring and calls the handle
+ * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
+ **/
+static void
+lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
+ struct lpfc_sli_ring *pring, uint32_t mask)
+{
struct lpfc_pgp *pgp;
IOCB_t *entry;
IOCB_t *irsp = NULL;
struct lpfc_iocbq *rspiocbp = NULL;
- struct lpfc_iocbq *next_iocb;
- struct lpfc_iocbq *cmdiocbp;
- struct lpfc_iocbq *saveq;
- uint8_t iocb_cmd_type;
- lpfc_iocb_type type;
- uint32_t status, free_saveq;
uint32_t portRspPut, portRspMax;
- int rc = 1;
unsigned long iflag;
+ uint32_t status;
pgp = &phba->port_gp[pring->ringno];
spin_lock_irqsave(&phba->hbalock, iflag);
@@ -2128,7 +2916,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
phba->work_hs = HS_FFER3;
lpfc_handle_eratt(phba);
- return 1;
+ return;
}
rmb();
@@ -2173,138 +2961,10 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
- list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
-
- pring->iocb_continueq_cnt++;
- if (irsp->ulpLe) {
- /*
- * By default, the driver expects to free all resources
- * associated with this iocb completion.
- */
- free_saveq = 1;
- saveq = list_get_first(&pring->iocb_continueq,
- struct lpfc_iocbq, list);
- irsp = &(saveq->iocb);
- list_del_init(&pring->iocb_continueq);
- pring->iocb_continueq_cnt = 0;
-
- pring->stats.iocb_rsp++;
-
- /*
- * If resource errors reported from HBA, reduce
- * queuedepths of the SCSI device.
- */
- if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
- spin_unlock_irqrestore(&phba->hbalock, iflag);
- lpfc_rampdown_queue_depth(phba);
- spin_lock_irqsave(&phba->hbalock, iflag);
- }
-
- if (irsp->ulpStatus) {
- /* Rsp ring <ringno> error: IOCB */
- lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
- "0328 Rsp Ring %d error: "
- "IOCB Data: "
- "x%x x%x x%x x%x "
- "x%x x%x x%x x%x "
- "x%x x%x x%x x%x "
- "x%x x%x x%x x%x\n",
- pring->ringno,
- irsp->un.ulpWord[0],
- irsp->un.ulpWord[1],
- irsp->un.ulpWord[2],
- irsp->un.ulpWord[3],
- irsp->un.ulpWord[4],
- irsp->un.ulpWord[5],
- *(((uint32_t *) irsp) + 6),
- *(((uint32_t *) irsp) + 7),
- *(((uint32_t *) irsp) + 8),
- *(((uint32_t *) irsp) + 9),
- *(((uint32_t *) irsp) + 10),
- *(((uint32_t *) irsp) + 11),
- *(((uint32_t *) irsp) + 12),
- *(((uint32_t *) irsp) + 13),
- *(((uint32_t *) irsp) + 14),
- *(((uint32_t *) irsp) + 15));
- }
-
- /*
- * Fetch the IOCB command type and call the correct
- * completion routine. Solicited and Unsolicited
- * IOCBs on the ELS ring get freed back to the
- * lpfc_iocb_list by the discovery kernel thread.
- */
- iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
- type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
- if (type == LPFC_SOL_IOCB) {
- spin_unlock_irqrestore(&phba->hbalock, iflag);
- rc = lpfc_sli_process_sol_iocb(phba, pring,
- saveq);
- spin_lock_irqsave(&phba->hbalock, iflag);
- } else if (type == LPFC_UNSOL_IOCB) {
- spin_unlock_irqrestore(&phba->hbalock, iflag);
- rc = lpfc_sli_process_unsol_iocb(phba, pring,
- saveq);
- spin_lock_irqsave(&phba->hbalock, iflag);
- if (!rc)
- free_saveq = 0;
- } else if (type == LPFC_ABORT_IOCB) {
- if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
- ((cmdiocbp =
- lpfc_sli_iocbq_lookup(phba, pring,
- saveq)))) {
- /* Call the specified completion
- routine */
- if (cmdiocbp->iocb_cmpl) {
- spin_unlock_irqrestore(
- &phba->hbalock,
- iflag);
- (cmdiocbp->iocb_cmpl) (phba,
- cmdiocbp, saveq);
- spin_lock_irqsave(
- &phba->hbalock,
- iflag);
- } else
- __lpfc_sli_release_iocbq(phba,
- cmdiocbp);
- }
- } else if (type == LPFC_UNKNOWN_IOCB) {
- if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
-
- char adaptermsg[LPFC_MAX_ADPTMSG];
-
- memset(adaptermsg, 0,
- LPFC_MAX_ADPTMSG);
- memcpy(&adaptermsg[0], (uint8_t *) irsp,
- MAX_MSG_DATA);
- dev_warn(&((phba->pcidev)->dev),
- "lpfc%d: %s\n",
- phba->brd_no, adaptermsg);
- } else {
- /* Unknown IOCB command */
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0335 Unknown IOCB "
- "command Data: x%x "
- "x%x x%x x%x\n",
- irsp->ulpCommand,
- irsp->ulpStatus,
- irsp->ulpIoTag,
- irsp->ulpContext);
- }
- }
-
- if (free_saveq) {
- list_for_each_entry_safe(rspiocbp, next_iocb,
- &saveq->list, list) {
- list_del(&rspiocbp->list);
- __lpfc_sli_release_iocbq(phba,
- rspiocbp);
- }
- __lpfc_sli_release_iocbq(phba, saveq);
- }
- rspiocbp = NULL;
- }
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ /* Handle the response IOCB */
+ rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
+ spin_lock_irqsave(&phba->hbalock, iflag);
/*
* If the port response put pointer has not been updated, sync
@@ -2338,7 +2998,37 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
}
spin_unlock_irqrestore(&phba->hbalock, iflag);
- return rc;
+ return;
+}
+
+/**
+ * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @mask: Host attention register mask for this ring.
+ *
+ * This function is called from the worker thread when there is a pending
+ * ELS response iocb on the driver internal slow-path response iocb worker
+ * queue. The caller does not hold any lock. The function will remove each
+ * response iocb from the response worker queue and calls the handle
+ * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
+ **/
+static void
+lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
+ struct lpfc_sli_ring *pring, uint32_t mask)
+{
+ struct lpfc_iocbq *irspiocbq;
+ unsigned long iflag;
+
+ while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) {
+ /* Get the response iocb from the head of work queue */
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue,
+ irspiocbq, struct lpfc_iocbq, list);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ /* Process the response iocb */
+ lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq);
+ }
}
/**
@@ -2420,7 +3110,7 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
}
/**
- * lpfc_sli_brdready - Check for host status bits
+ * lpfc_sli_brdready_s3 - Check for sli3 host ready status
* @phba: Pointer to HBA context object.
* @mask: Bit mask to be checked.
*
@@ -2432,8 +3122,8 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
* function returns 1 when HBA fail to restart otherwise returns
* zero.
**/
-int
-lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
+static int
+lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
{
uint32_t status;
int i = 0;
@@ -2477,6 +3167,56 @@ lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
return retval;
}
+/**
+ * lpfc_sli_brdready_s4 - Check for sli4 host ready status
+ * @phba: Pointer to HBA context object.
+ * @mask: Bit mask to be checked.
+ *
+ * This function checks the host status register to check if HBA is
+ * ready. This function will wait in a loop for the HBA to be ready
+ * If the HBA is not ready , the function will will reset the HBA PCI
+ * function again. The function returns 1 when HBA fail to be ready
+ * otherwise returns zero.
+ **/
+static int
+lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
+{
+ uint32_t status;
+ int retval = 0;
+
+ /* Read the HBA Host Status Register */
+ status = lpfc_sli4_post_status_check(phba);
+
+ if (status) {
+ phba->pport->port_state = LPFC_VPORT_UNKNOWN;
+ lpfc_sli_brdrestart(phba);
+ status = lpfc_sli4_post_status_check(phba);
+ }
+
+ /* Check to see if any errors occurred during init */
+ if (status) {
+ phba->link_state = LPFC_HBA_ERROR;
+ retval = 1;
+ } else
+ phba->sli4_hba.intr_enable = 0;
+
+ return retval;
+}
+
+/**
+ * lpfc_sli_brdready - Wrapper func for checking the hba readyness
+ * @phba: Pointer to HBA context object.
+ * @mask: Bit mask to be checked.
+ *
+ * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
+ * from the API jump table function pointer from the lpfc_hba struct.
+ **/
+int
+lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
+{
+ return phba->lpfc_sli_brdready(phba, mask);
+}
+
#define BARRIER_TEST_PATTERN (0xdeadbeef)
/**
@@ -2532,7 +3272,7 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
mdelay(1);
if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
- if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE ||
+ if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
phba->pport->stopped)
goto restore_hc;
else
@@ -2613,7 +3353,9 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
return 1;
}
- psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
+ spin_lock_irq(&phba->hbalock);
+ psli->sli_flag &= ~LPFC_SLI_ACTIVE;
+ spin_unlock_irq(&phba->hbalock);
mempool_free(pmb, phba->mbox_mem_pool);
@@ -2636,10 +3378,10 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
}
spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ psli->mbox_active = NULL;
phba->link_flag &= ~LS_IGNORE_ERATT;
spin_unlock_irq(&phba->hbalock);
- psli->mbox_active = NULL;
lpfc_hba_down_post(phba);
phba->link_state = LPFC_HBA_ERROR;
@@ -2647,7 +3389,7 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
}
/**
- * lpfc_sli_brdreset - Reset the HBA
+ * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
* @phba: Pointer to HBA context object.
*
* This function resets the HBA by writing HC_INITFF to the control
@@ -2683,7 +3425,8 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
(cfg_value &
~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
- psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA);
+ psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
+
/* Now toggle INITFF bit in the Host Control Register */
writel(HC_INITFF, phba->HCregaddr);
mdelay(1);
@@ -2710,7 +3453,66 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
}
/**
- * lpfc_sli_brdrestart - Restart the HBA
+ * lpfc_sli4_brdreset - Reset a sli-4 HBA
+ * @phba: Pointer to HBA context object.
+ *
+ * This function resets a SLI4 HBA. This function disables PCI layer parity
+ * checking during resets the device. The caller is not required to hold
+ * any locks.
+ *
+ * This function returns 0 always.
+ **/
+int
+lpfc_sli4_brdreset(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ uint16_t cfg_value;
+ uint8_t qindx;
+
+ /* Reset HBA */
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "0295 Reset HBA Data: x%x x%x\n",
+ phba->pport->port_state, psli->sli_flag);
+
+ /* perform board reset */
+ phba->fc_eventTag = 0;
+ phba->pport->fc_myDID = 0;
+ phba->pport->fc_prevDID = 0;
+
+ /* Turn off parity checking and serr during the physical reset */
+ pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
+ pci_write_config_word(phba->pcidev, PCI_COMMAND,
+ (cfg_value &
+ ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
+
+ spin_lock_irq(&phba->hbalock);
+ psli->sli_flag &= ~(LPFC_PROCESS_LA);
+ phba->fcf.fcf_flag = 0;
+ /* Clean up the child queue list for the CQs */
+ list_del_init(&phba->sli4_hba.mbx_wq->list);
+ list_del_init(&phba->sli4_hba.els_wq->list);
+ list_del_init(&phba->sli4_hba.hdr_rq->list);
+ list_del_init(&phba->sli4_hba.dat_rq->list);
+ list_del_init(&phba->sli4_hba.mbx_cq->list);
+ list_del_init(&phba->sli4_hba.els_cq->list);
+ list_del_init(&phba->sli4_hba.rxq_cq->list);
+ for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
+ list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
+ for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++)
+ list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Now physically reset the device */
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0389 Performing PCI function reset!\n");
+ /* Perform FCoE PCI function reset */
+ lpfc_pci_function_reset(phba);
+
+ return 0;
+}
+
+/**
+ * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
* @phba: Pointer to HBA context object.
*
* This function is called in the SLI initialization code path to
@@ -2722,8 +3524,8 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
* The function does not guarantee completion of MBX_RESTART mailbox
* command before the return of this function.
**/
-int
-lpfc_sli_brdrestart(struct lpfc_hba *phba)
+static int
+lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
{
MAILBOX_t *mb;
struct lpfc_sli *psli;
@@ -2762,7 +3564,7 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
lpfc_sli_brdreset(phba);
phba->pport->stopped = 0;
phba->link_state = LPFC_INIT_START;
-
+ phba->hba_flag = 0;
spin_unlock_irq(&phba->hbalock);
memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
@@ -2777,6 +3579,55 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called in the SLI initialization code path to restart
+ * a SLI4 HBA. The caller is not required to hold any lock.
+ * At the end of the function, it calls lpfc_hba_down_post function to
+ * free any pending commands.
+ **/
+static int
+lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+
+
+ /* Restart HBA */
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "0296 Restart HBA Data: x%x x%x\n",
+ phba->pport->port_state, psli->sli_flag);
+
+ lpfc_sli4_brdreset(phba);
+
+ spin_lock_irq(&phba->hbalock);
+ phba->pport->stopped = 0;
+ phba->link_state = LPFC_INIT_START;
+ phba->hba_flag = 0;
+ spin_unlock_irq(&phba->hbalock);
+
+ memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
+ psli->stats_start = get_seconds();
+
+ lpfc_hba_down_post(phba);
+
+ return 0;
+}
+
+/**
+ * lpfc_sli_brdrestart - Wrapper func for restarting hba
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
+ * API jump table function pointer from the lpfc_hba struct.
+**/
+int
+lpfc_sli_brdrestart(struct lpfc_hba *phba)
+{
+ return phba->lpfc_sli_brdrestart(phba);
+}
+
+/**
* lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
* @phba: Pointer to HBA context object.
*
@@ -2940,7 +3791,7 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
if (!pmb)
return -ENOMEM;
- pmbox = &pmb->mb;
+ pmbox = &pmb->u.mb;
/* Initialize the struct lpfc_sli_hbq structure for each hbq */
phba->link_state = LPFC_INIT_MBX_CMDS;
@@ -2984,6 +3835,26 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called during the SLI initialization to configure
+ * all the HBQs and post buffers to the HBQ. The caller is not
+ * required to hold any locks. This function will return zero if successful
+ * else it will return negative error code.
+ **/
+static int
+lpfc_sli4_rb_setup(struct lpfc_hba *phba)
+{
+ phba->hbq_in_use = 1;
+ phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count;
+ phba->hbq_count = 1;
+ /* Initially populate or replenish the HBQs */
+ lpfc_sli_hbqbuf_init_hbqs(phba, 0);
+ return 0;
+}
+
+/**
* lpfc_sli_config_port - Issue config port mailbox command
* @phba: Pointer to HBA context object.
* @sli_mode: sli mode - 2/3
@@ -3047,33 +3918,43 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0442 Adapter failed to init, mbxCmd x%x "
"CONFIG_PORT, mbxStatus x%x Data: x%x\n",
- pmb->mb.mbxCommand, pmb->mb.mbxStatus, 0);
+ pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
spin_lock_irq(&phba->hbalock);
- phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE;
+ phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
spin_unlock_irq(&phba->hbalock);
rc = -ENXIO;
- } else
+ } else {
+ /* Allow asynchronous mailbox command to go through */
+ spin_lock_irq(&phba->hbalock);
+ phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
+ spin_unlock_irq(&phba->hbalock);
done = 1;
+ }
}
if (!done) {
rc = -EINVAL;
goto do_prep_failed;
}
- if (pmb->mb.un.varCfgPort.sli_mode == 3) {
- if (!pmb->mb.un.varCfgPort.cMA) {
+ if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
+ if (!pmb->u.mb.un.varCfgPort.cMA) {
rc = -ENXIO;
goto do_prep_failed;
}
- if (phba->max_vpi && pmb->mb.un.varCfgPort.gmv) {
+ if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
- phba->max_vpi = pmb->mb.un.varCfgPort.max_vpi;
+ phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
+ phba->max_vports = (phba->max_vpi > phba->max_vports) ?
+ phba->max_vpi : phba->max_vports;
+
} else
phba->max_vpi = 0;
- if (pmb->mb.un.varCfgPort.gerbm)
+ if (pmb->u.mb.un.varCfgPort.gdss)
+ phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
+ if (pmb->u.mb.un.varCfgPort.gerbm)
phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
- if (pmb->mb.un.varCfgPort.gcrp)
+ if (pmb->u.mb.un.varCfgPort.gcrp)
phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
- if (pmb->mb.un.varCfgPort.ginb) {
+ if (pmb->u.mb.un.varCfgPort.ginb) {
phba->sli3_options |= LPFC_SLI3_INB_ENABLED;
phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get;
phba->port_gp = phba->mbox->us.s3_inb_pgp.port;
@@ -3089,7 +3970,7 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
}
if (phba->cfg_enable_bg) {
- if (pmb->mb.un.varCfgPort.gbg)
+ if (pmb->u.mb.un.varCfgPort.gbg)
phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
else
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -3184,8 +4065,9 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
if (rc)
goto lpfc_sli_hba_setup_error;
}
-
+ spin_lock_irq(&phba->hbalock);
phba->sli.sli_flag |= LPFC_PROCESS_LA;
+ spin_unlock_irq(&phba->hbalock);
rc = lpfc_config_port_post(phba);
if (rc)
@@ -3200,6 +4082,488 @@ lpfc_sli_hba_setup_error:
return rc;
}
+/**
+ * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
+ * @phba: Pointer to HBA context object.
+ * @mboxq: mailbox pointer.
+ * This function issue a dump mailbox command to read config region
+ * 23 and parse the records in the region and populate driver
+ * data structure.
+ **/
+static int
+lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
+ LPFC_MBOXQ_t *mboxq)
+{
+ struct lpfc_dmabuf *mp;
+ struct lpfc_mqe *mqe;
+ uint32_t data_length;
+ int rc;
+
+ /* Program the default value of vlan_id and fc_map */
+ phba->valid_vlan = 0;
+ phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
+ phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
+ phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
+
+ mqe = &mboxq->u.mqe;
+ if (lpfc_dump_fcoe_param(phba, mboxq))
+ return -ENOMEM;
+
+ mp = (struct lpfc_dmabuf *) mboxq->context1;
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "(%d):2571 Mailbox cmd x%x Status x%x "
+ "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
+ "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
+ "CQ: x%x x%x x%x x%x\n",
+ mboxq->vport ? mboxq->vport->vpi : 0,
+ bf_get(lpfc_mqe_command, mqe),
+ bf_get(lpfc_mqe_status, mqe),
+ mqe->un.mb_words[0], mqe->un.mb_words[1],
+ mqe->un.mb_words[2], mqe->un.mb_words[3],
+ mqe->un.mb_words[4], mqe->un.mb_words[5],
+ mqe->un.mb_words[6], mqe->un.mb_words[7],
+ mqe->un.mb_words[8], mqe->un.mb_words[9],
+ mqe->un.mb_words[10], mqe->un.mb_words[11],
+ mqe->un.mb_words[12], mqe->un.mb_words[13],
+ mqe->un.mb_words[14], mqe->un.mb_words[15],
+ mqe->un.mb_words[16], mqe->un.mb_words[50],
+ mboxq->mcqe.word0,
+ mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
+ mboxq->mcqe.trailer);
+
+ if (rc) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ return -EIO;
+ }
+ data_length = mqe->un.mb_words[5];
+ if (data_length > DMP_FCOEPARAM_RGN_SIZE)
+ return -EIO;
+
+ lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ return 0;
+}
+
+/**
+ * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to the LPFC_MBOXQ_t structure.
+ * @vpd: pointer to the memory to hold resulting port vpd data.
+ * @vpd_size: On input, the number of bytes allocated to @vpd.
+ * On output, the number of data bytes in @vpd.
+ *
+ * This routine executes a READ_REV SLI4 mailbox command. In
+ * addition, this routine gets the port vpd data.
+ *
+ * Return codes
+ * 0 - sucessful
+ * ENOMEM - could not allocated memory.
+ **/
+static int
+lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
+ uint8_t *vpd, uint32_t *vpd_size)
+{
+ int rc = 0;
+ uint32_t dma_size;
+ struct lpfc_dmabuf *dmabuf;
+ struct lpfc_mqe *mqe;
+
+ dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!dmabuf)
+ return -ENOMEM;
+
+ /*
+ * Get a DMA buffer for the vpd data resulting from the READ_REV
+ * mailbox command.
+ */
+ dma_size = *vpd_size;
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+ dma_size,
+ &dmabuf->phys,
+ GFP_KERNEL);
+ if (!dmabuf->virt) {
+ kfree(dmabuf);
+ return -ENOMEM;
+ }
+ memset(dmabuf->virt, 0, dma_size);
+
+ /*
+ * The SLI4 implementation of READ_REV conflicts at word1,
+ * bits 31:16 and SLI4 adds vpd functionality not present
+ * in SLI3. This code corrects the conflicts.
+ */
+ lpfc_read_rev(phba, mboxq);
+ mqe = &mboxq->u.mqe;
+ mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
+ mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
+ mqe->un.read_rev.word1 &= 0x0000FFFF;
+ bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
+ bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
+
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (rc) {
+ dma_free_coherent(&phba->pcidev->dev, dma_size,
+ dmabuf->virt, dmabuf->phys);
+ return -EIO;
+ }
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "(%d):0380 Mailbox cmd x%x Status x%x "
+ "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
+ "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
+ "CQ: x%x x%x x%x x%x\n",
+ mboxq->vport ? mboxq->vport->vpi : 0,
+ bf_get(lpfc_mqe_command, mqe),
+ bf_get(lpfc_mqe_status, mqe),
+ mqe->un.mb_words[0], mqe->un.mb_words[1],
+ mqe->un.mb_words[2], mqe->un.mb_words[3],
+ mqe->un.mb_words[4], mqe->un.mb_words[5],
+ mqe->un.mb_words[6], mqe->un.mb_words[7],
+ mqe->un.mb_words[8], mqe->un.mb_words[9],
+ mqe->un.mb_words[10], mqe->un.mb_words[11],
+ mqe->un.mb_words[12], mqe->un.mb_words[13],
+ mqe->un.mb_words[14], mqe->un.mb_words[15],
+ mqe->un.mb_words[16], mqe->un.mb_words[50],
+ mboxq->mcqe.word0,
+ mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
+ mboxq->mcqe.trailer);
+
+ /*
+ * The available vpd length cannot be bigger than the
+ * DMA buffer passed to the port. Catch the less than
+ * case and update the caller's size.
+ */
+ if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
+ *vpd_size = mqe->un.read_rev.avail_vpd_len;
+
+ lpfc_sli_pcimem_bcopy(dmabuf->virt, vpd, *vpd_size);
+ dma_free_coherent(&phba->pcidev->dev, dma_size,
+ dmabuf->virt, dmabuf->phys);
+ kfree(dmabuf);
+ return 0;
+}
+
+/**
+ * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to explicitly arm the SLI4 device's completion and
+ * event queues
+ **/
+static void
+lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
+{
+ uint8_t fcp_eqidx;
+
+ lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
+ lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
+ lpfc_sli4_cq_release(phba->sli4_hba.rxq_cq, LPFC_QUEUE_REARM);
+ for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
+ lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
+ LPFC_QUEUE_REARM);
+ lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
+ for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
+ lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
+ LPFC_QUEUE_REARM);
+}
+
+/**
+ * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is the main SLI4 device intialization PCI function. This
+ * function is called by the HBA intialization code, HBA reset code and
+ * HBA error attention handler code. Caller is not required to hold any
+ * locks.
+ **/
+int
+lpfc_sli4_hba_setup(struct lpfc_hba *phba)
+{
+ int rc;
+ LPFC_MBOXQ_t *mboxq;
+ struct lpfc_mqe *mqe;
+ uint8_t *vpd;
+ uint32_t vpd_size;
+ uint32_t ftr_rsp = 0;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
+ struct lpfc_vport *vport = phba->pport;
+ struct lpfc_dmabuf *mp;
+
+ /* Perform a PCI function reset to start from clean */
+ rc = lpfc_pci_function_reset(phba);
+ if (unlikely(rc))
+ return -ENODEV;
+
+ /* Check the HBA Host Status Register for readyness */
+ rc = lpfc_sli4_post_status_check(phba);
+ if (unlikely(rc))
+ return -ENODEV;
+ else {
+ spin_lock_irq(&phba->hbalock);
+ phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
+ spin_unlock_irq(&phba->hbalock);
+ }
+
+ /*
+ * Allocate a single mailbox container for initializing the
+ * port.
+ */
+ mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq)
+ return -ENOMEM;
+
+ /*
+ * Continue initialization with default values even if driver failed
+ * to read FCoE param config regions
+ */
+ if (lpfc_sli4_read_fcoe_params(phba, mboxq))
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+ "2570 Failed to read FCoE parameters \n");
+
+ /* Issue READ_REV to collect vpd and FW information. */
+ vpd_size = PAGE_SIZE;
+ vpd = kzalloc(vpd_size, GFP_KERNEL);
+ if (!vpd) {
+ rc = -ENOMEM;
+ goto out_free_mbox;
+ }
+
+ rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
+ if (unlikely(rc))
+ goto out_free_vpd;
+
+ mqe = &mboxq->u.mqe;
+ if ((bf_get(lpfc_mbx_rd_rev_sli_lvl,
+ &mqe->un.read_rev) != LPFC_SLI_REV4) ||
+ (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev) == 0)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "0376 READ_REV Error. SLI Level %d "
+ "FCoE enabled %d\n",
+ bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev),
+ bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev));
+ rc = -EIO;
+ goto out_free_vpd;
+ }
+ /* Single threaded at this point, no need for lock */
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag |= HBA_FCOE_SUPPORT;
+ spin_unlock_irq(&phba->hbalock);
+ /*
+ * Evaluate the read rev and vpd data. Populate the driver
+ * state with the results. If this routine fails, the failure
+ * is not fatal as the driver will use generic values.
+ */
+ rc = lpfc_parse_vpd(phba, vpd, vpd_size);
+ if (unlikely(!rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "0377 Error %d parsing vpd. "
+ "Using defaults.\n", rc);
+ rc = 0;
+ }
+
+ /* By now, we should determine the SLI revision, hard code for now */
+ phba->sli_rev = LPFC_SLI_REV4;
+
+ /*
+ * Discover the port's supported feature set and match it against the
+ * hosts requests.
+ */
+ lpfc_request_features(phba, mboxq);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto out_free_vpd;
+ }
+
+ /*
+ * The port must support FCP initiator mode as this is the
+ * only mode running in the host.
+ */
+ if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+ "0378 No support for fcpi mode.\n");
+ ftr_rsp++;
+ }
+
+ /*
+ * If the port cannot support the host's requested features
+ * then turn off the global config parameters to disable the
+ * feature in the driver. This is not a fatal error.
+ */
+ if ((phba->cfg_enable_bg) &&
+ !(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
+ ftr_rsp++;
+
+ if (phba->max_vpi && phba->cfg_enable_npiv &&
+ !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
+ ftr_rsp++;
+
+ if (ftr_rsp) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+ "0379 Feature Mismatch Data: x%08x %08x "
+ "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
+ mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
+ phba->cfg_enable_npiv, phba->max_vpi);
+ if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
+ phba->cfg_enable_bg = 0;
+ if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
+ phba->cfg_enable_npiv = 0;
+ }
+
+ /* These SLI3 features are assumed in SLI4 */
+ spin_lock_irq(&phba->hbalock);
+ phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Read the port's service parameters. */
+ lpfc_read_sparam(phba, mboxq, vport->vpi);
+ mboxq->vport = vport;
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ mp = (struct lpfc_dmabuf *) mboxq->context1;
+ if (rc == MBX_SUCCESS) {
+ memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
+ rc = 0;
+ }
+
+ /*
+ * This memory was allocated by the lpfc_read_sparam routine. Release
+ * it to the mbuf pool.
+ */
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mboxq->context1 = NULL;
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "0382 READ_SPARAM command failed "
+ "status %d, mbxStatus x%x\n",
+ rc, bf_get(lpfc_mqe_status, mqe));
+ phba->link_state = LPFC_HBA_ERROR;
+ rc = -EIO;
+ goto out_free_vpd;
+ }
+
+ if (phba->cfg_soft_wwnn)
+ u64_to_wwn(phba->cfg_soft_wwnn,
+ vport->fc_sparam.nodeName.u.wwn);
+ if (phba->cfg_soft_wwpn)
+ u64_to_wwn(phba->cfg_soft_wwpn,
+ vport->fc_sparam.portName.u.wwn);
+ memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
+ sizeof(struct lpfc_name));
+ memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
+ sizeof(struct lpfc_name));
+
+ /* Update the fc_host data structures with new wwn. */
+ fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
+ fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
+
+ /* Register SGL pool to the device using non-embedded mailbox command */
+ rc = lpfc_sli4_post_sgl_list(phba);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "0582 Error %d during sgl post operation", rc);
+ rc = -ENODEV;
+ goto out_free_vpd;
+ }
+
+ /* Register SCSI SGL pool to the device */
+ rc = lpfc_sli4_repost_scsi_sgl_list(phba);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+ "0383 Error %d during scsi sgl post opeation",
+ rc);
+ /* Some Scsi buffers were moved to the abort scsi list */
+ /* A pci function reset will repost them */
+ rc = -ENODEV;
+ goto out_free_vpd;
+ }
+
+ /* Post the rpi header region to the device. */
+ rc = lpfc_sli4_post_all_rpi_hdrs(phba);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "0393 Error %d during rpi post operation\n",
+ rc);
+ rc = -ENODEV;
+ goto out_free_vpd;
+ }
+ /* Temporary initialization of lpfc_fip_flag to non-fip */
+ bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0);
+
+ /* Set up all the queues to the device */
+ rc = lpfc_sli4_queue_setup(phba);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "0381 Error %d during queue setup.\n ", rc);
+ goto out_stop_timers;
+ }
+
+ /* Arm the CQs and then EQs on device */
+ lpfc_sli4_arm_cqeq_intr(phba);
+
+ /* Indicate device interrupt mode */
+ phba->sli4_hba.intr_enable = 1;
+
+ /* Allow asynchronous mailbox command to go through */
+ spin_lock_irq(&phba->hbalock);
+ phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Post receive buffers to the device */
+ lpfc_sli4_rb_setup(phba);
+
+ /* Start the ELS watchdog timer */
+ /*
+ * The driver for SLI4 is not yet ready to process timeouts
+ * or interrupts. Once it is, the comment bars can be removed.
+ */
+ /* mod_timer(&vport->els_tmofunc,
+ * jiffies + HZ * (phba->fc_ratov*2)); */
+
+ /* Start heart beat timer */
+ mod_timer(&phba->hb_tmofunc,
+ jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+ phba->hb_outstanding = 0;
+ phba->last_completion_time = jiffies;
+
+ /* Start error attention (ERATT) polling timer */
+ mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
+
+ /*
+ * The port is ready, set the host's link state to LINK_DOWN
+ * in preparation for link interrupts.
+ */
+ lpfc_init_link(phba, mboxq, phba->cfg_topology, phba->cfg_link_speed);
+ mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ lpfc_set_loopback_flag(phba);
+ /* Change driver state to LPFC_LINK_DOWN right before init link */
+ spin_lock_irq(&phba->hbalock);
+ phba->link_state = LPFC_LINK_DOWN;
+ spin_unlock_irq(&phba->hbalock);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+ if (unlikely(rc != MBX_NOT_FINISHED)) {
+ kfree(vpd);
+ return 0;
+ } else
+ rc = -EIO;
+
+ /* Unset all the queues set up in this routine when error out */
+ if (rc)
+ lpfc_sli4_queue_unset(phba);
+
+out_stop_timers:
+ if (rc)
+ lpfc_stop_hba_timers(phba);
+out_free_vpd:
+ kfree(vpd);
+out_free_mbox:
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return rc;
+}
/**
* lpfc_mbox_timeout - Timeout call back function for mbox timer
@@ -3244,7 +4608,7 @@ void
lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
{
LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
- MAILBOX_t *mb = &pmbox->mb;
+ MAILBOX_t *mb = &pmbox->u.mb;
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
@@ -3281,7 +4645,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
spin_unlock_irq(&phba->pport->work_port_lock);
spin_lock_irq(&phba->hbalock);
phba->link_state = LPFC_LINK_UNKNOWN;
- psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
+ psli->sli_flag &= ~LPFC_SLI_ACTIVE;
spin_unlock_irq(&phba->hbalock);
pring = &psli->ring[psli->fcp_ring];
@@ -3289,32 +4653,20 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"0345 Resetting board due to mailbox timeout\n");
- /*
- * lpfc_offline calls lpfc_sli_hba_down which will clean up
- * on oustanding mailbox commands.
- */
- /* If resets are disabled then set error state and return. */
- if (!phba->cfg_enable_hba_reset) {
- phba->link_state = LPFC_HBA_ERROR;
- return;
- }
- lpfc_offline_prep(phba);
- lpfc_offline(phba);
- lpfc_sli_brdrestart(phba);
- lpfc_online(phba);
- lpfc_unblock_mgmt_io(phba);
- return;
+
+ /* Reset the HBA device */
+ lpfc_reset_hba(phba);
}
/**
- * lpfc_sli_issue_mbox - Issue a mailbox command to firmware
+ * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
* @phba: Pointer to HBA context object.
* @pmbox: Pointer to mailbox object.
* @flag: Flag indicating how the mailbox need to be processed.
*
* This function is called by discovery code and HBA management code
- * to submit a mailbox command to firmware. This function gets the
- * hbalock to protect the data structures.
+ * to submit a mailbox command to firmware with SLI-3 interface spec. This
+ * function gets the hbalock to protect the data structures.
* The mailbox command can be submitted in polling mode, in which case
* this function will wait in a polling loop for the completion of the
* mailbox.
@@ -3332,8 +4684,9 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
* return codes the caller owns the mailbox command after the return of
* the function.
**/
-int
-lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
+static int
+lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
+ uint32_t flag)
{
MAILBOX_t *mb;
struct lpfc_sli *psli = &phba->sli;
@@ -3349,6 +4702,10 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
spin_lock_irqsave(&phba->hbalock, drvr_flag);
if (!pmbox) {
/* processing mbox queue from intr_handler */
+ if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+ return MBX_SUCCESS;
+ }
processing_queue = 1;
phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
pmbox = lpfc_mbox_get(phba);
@@ -3365,7 +4722,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
lpfc_printf_log(phba, KERN_ERR,
LOG_MBOX | LOG_VPORT,
"1806 Mbox x%x failed. No vport\n",
- pmbox->mb.mbxCommand);
+ pmbox->u.mb.mbxCommand);
dump_stack();
goto out_not_finished;
}
@@ -3385,21 +4742,29 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
psli = &phba->sli;
- mb = &pmbox->mb;
+ mb = &pmbox->u.mb;
status = MBX_SUCCESS;
if (phba->link_state == LPFC_HBA_ERROR) {
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
/* Mbox command <mbxCommand> cannot issue */
- LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "(%d):0311 Mailbox command x%x cannot "
+ "issue Data: x%x x%x\n",
+ pmbox->vport ? pmbox->vport->vpi : 0,
+ pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
goto out_not_finished;
}
if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
!(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
- LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "(%d):2528 Mailbox command x%x cannot "
+ "issue Data: x%x x%x\n",
+ pmbox->vport ? pmbox->vport->vpi : 0,
+ pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
goto out_not_finished;
}
@@ -3413,14 +4778,24 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
/* Mbox command <mbxCommand> cannot issue */
- LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "(%d):2529 Mailbox command x%x "
+ "cannot issue Data: x%x x%x\n",
+ pmbox->vport ? pmbox->vport->vpi : 0,
+ pmbox->u.mb.mbxCommand,
+ psli->sli_flag, flag);
goto out_not_finished;
}
- if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
+ if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
/* Mbox command <mbxCommand> cannot issue */
- LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "(%d):2530 Mailbox command x%x "
+ "cannot issue Data: x%x x%x\n",
+ pmbox->vport ? pmbox->vport->vpi : 0,
+ pmbox->u.mb.mbxCommand,
+ psli->sli_flag, flag);
goto out_not_finished;
}
@@ -3462,12 +4837,17 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
/* If we are not polling, we MUST be in SLI2 mode */
if (flag != MBX_POLL) {
- if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) &&
+ if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
(mb->mbxCommand != MBX_KILL_BOARD)) {
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
/* Mbox command <mbxCommand> cannot issue */
- LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "(%d):2531 Mailbox command x%x "
+ "cannot issue Data: x%x x%x\n",
+ pmbox->vport ? pmbox->vport->vpi : 0,
+ pmbox->u.mb.mbxCommand,
+ psli->sli_flag, flag);
goto out_not_finished;
}
/* timeout active mbox command */
@@ -3506,7 +4886,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
/* next set own bit for the adapter and copy over command word */
mb->mbxOwner = OWN_CHIP;
- if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
+ if (psli->sli_flag & LPFC_SLI_ACTIVE) {
/* First copy command data to host SLIM area */
lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
} else {
@@ -3529,7 +4909,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
if (mb->mbxCommand == MBX_CONFIG_PORT) {
/* switch over to host mailbox */
- psli->sli_flag |= LPFC_SLI2_ACTIVE;
+ psli->sli_flag |= LPFC_SLI_ACTIVE;
}
}
@@ -3552,7 +4932,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
writel(CA_MBATT, phba->CAregaddr);
readl(phba->CAregaddr); /* flush */
- if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
+ if (psli->sli_flag & LPFC_SLI_ACTIVE) {
/* First read mbox status word */
word0 = *((uint32_t *)phba->mbox);
word0 = le32_to_cpu(word0);
@@ -3591,7 +4971,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
spin_lock_irqsave(&phba->hbalock, drvr_flag);
}
- if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
+ if (psli->sli_flag & LPFC_SLI_ACTIVE) {
/* First copy command data */
word0 = *((uint32_t *)phba->mbox);
word0 = le32_to_cpu(word0);
@@ -3604,7 +4984,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
if (((slimword0 & OWN_CHIP) != OWN_CHIP)
&& slimmb->mbxStatus) {
psli->sli_flag &=
- ~LPFC_SLI2_ACTIVE;
+ ~LPFC_SLI_ACTIVE;
word0 = slimword0;
}
}
@@ -3616,7 +4996,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
ha_copy = readl(phba->HAregaddr);
}
- if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
+ if (psli->sli_flag & LPFC_SLI_ACTIVE) {
/* copy results back to user */
lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
} else {
@@ -3643,13 +5023,420 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
out_not_finished:
if (processing_queue) {
- pmbox->mb.mbxStatus = MBX_NOT_FINISHED;
+ pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
lpfc_mbox_cmpl_put(phba, pmbox);
}
return MBX_NOT_FINISHED;
}
/**
+ * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
+ * @phba: Pointer to HBA context object.
+ * @mboxq: Pointer to mailbox object.
+ *
+ * The function posts a mailbox to the port. The mailbox is expected
+ * to be comletely filled in and ready for the port to operate on it.
+ * This routine executes a synchronous completion operation on the
+ * mailbox by polling for its completion.
+ *
+ * The caller must not be holding any locks when calling this routine.
+ *
+ * Returns:
+ * MBX_SUCCESS - mailbox posted successfully
+ * Any of the MBX error values.
+ **/
+static int
+lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ int rc = MBX_SUCCESS;
+ unsigned long iflag;
+ uint32_t db_ready;
+ uint32_t mcqe_status;
+ uint32_t mbx_cmnd;
+ unsigned long timeout;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_mqe *mb = &mboxq->u.mqe;
+ struct lpfc_bmbx_create *mbox_rgn;
+ struct dma_address *dma_address;
+ struct lpfc_register bmbx_reg;
+
+ /*
+ * Only one mailbox can be active to the bootstrap mailbox region
+ * at a time and there is no queueing provided.
+ */
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "(%d):2532 Mailbox command x%x (x%x) "
+ "cannot issue Data: x%x x%x\n",
+ mboxq->vport ? mboxq->vport->vpi : 0,
+ mboxq->u.mb.mbxCommand,
+ lpfc_sli4_mbox_opcode_get(phba, mboxq),
+ psli->sli_flag, MBX_POLL);
+ return MBXERR_ERROR;
+ }
+ /* The server grabs the token and owns it until release */
+ psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
+ phba->sli.mbox_active = mboxq;
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+ /*
+ * Initialize the bootstrap memory region to avoid stale data areas
+ * in the mailbox post. Then copy the caller's mailbox contents to
+ * the bmbx mailbox region.
+ */
+ mbx_cmnd = bf_get(lpfc_mqe_command, mb);
+ memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
+ lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
+ sizeof(struct lpfc_mqe));
+
+ /* Post the high mailbox dma address to the port and wait for ready. */
+ dma_address = &phba->sli4_hba.bmbx.dma_address;
+ writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
+
+ timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd)
+ * 1000) + jiffies;
+ do {
+ bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
+ db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
+ if (!db_ready)
+ msleep(2);
+
+ if (time_after(jiffies, timeout)) {
+ rc = MBXERR_ERROR;
+ goto exit;
+ }
+ } while (!db_ready);
+
+ /* Post the low mailbox dma address to the port. */
+ writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
+ timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd)
+ * 1000) + jiffies;
+ do {
+ bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
+ db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
+ if (!db_ready)
+ msleep(2);
+
+ if (time_after(jiffies, timeout)) {
+ rc = MBXERR_ERROR;
+ goto exit;
+ }
+ } while (!db_ready);
+
+ /*
+ * Read the CQ to ensure the mailbox has completed.
+ * If so, update the mailbox status so that the upper layers
+ * can complete the request normally.
+ */
+ lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
+ sizeof(struct lpfc_mqe));
+ mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
+ lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
+ sizeof(struct lpfc_mcqe));
+ mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
+
+ /* Prefix the mailbox status with range x4000 to note SLI4 status. */
+ if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
+ bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status);
+ rc = MBXERR_ERROR;
+ }
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "(%d):0356 Mailbox cmd x%x (x%x) Status x%x "
+ "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
+ " x%x x%x CQ: x%x x%x x%x x%x\n",
+ mboxq->vport ? mboxq->vport->vpi : 0,
+ mbx_cmnd, lpfc_sli4_mbox_opcode_get(phba, mboxq),
+ bf_get(lpfc_mqe_status, mb),
+ mb->un.mb_words[0], mb->un.mb_words[1],
+ mb->un.mb_words[2], mb->un.mb_words[3],
+ mb->un.mb_words[4], mb->un.mb_words[5],
+ mb->un.mb_words[6], mb->un.mb_words[7],
+ mb->un.mb_words[8], mb->un.mb_words[9],
+ mb->un.mb_words[10], mb->un.mb_words[11],
+ mb->un.mb_words[12], mboxq->mcqe.word0,
+ mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
+ mboxq->mcqe.trailer);
+exit:
+ /* We are holding the token, no needed for lock when release */
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ phba->sli.mbox_active = NULL;
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return rc;
+}
+
+/**
+ * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
+ * @phba: Pointer to HBA context object.
+ * @pmbox: Pointer to mailbox object.
+ * @flag: Flag indicating how the mailbox need to be processed.
+ *
+ * This function is called by discovery code and HBA management code to submit
+ * a mailbox command to firmware with SLI-4 interface spec.
+ *
+ * Return codes the caller owns the mailbox command after the return of the
+ * function.
+ **/
+static int
+lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
+ uint32_t flag)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ unsigned long iflags;
+ int rc;
+
+ /* Detect polling mode and jump to a handler */
+ if (!phba->sli4_hba.intr_enable) {
+ if (flag == MBX_POLL)
+ rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
+ else
+ rc = -EIO;
+ if (rc != MBX_SUCCESS)
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "(%d):2541 Mailbox command x%x "
+ "(x%x) cannot issue Data: x%x x%x\n",
+ mboxq->vport ? mboxq->vport->vpi : 0,
+ mboxq->u.mb.mbxCommand,
+ lpfc_sli4_mbox_opcode_get(phba, mboxq),
+ psli->sli_flag, flag);
+ return rc;
+ } else if (flag == MBX_POLL) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "(%d):2542 Mailbox command x%x (x%x) "
+ "cannot issue Data: x%x x%x\n",
+ mboxq->vport ? mboxq->vport->vpi : 0,
+ mboxq->u.mb.mbxCommand,
+ lpfc_sli4_mbox_opcode_get(phba, mboxq),
+ psli->sli_flag, flag);
+ return -EIO;
+ }
+
+ /* Now, interrupt mode asynchrous mailbox command */
+ rc = lpfc_mbox_cmd_check(phba, mboxq);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "(%d):2543 Mailbox command x%x (x%x) "
+ "cannot issue Data: x%x x%x\n",
+ mboxq->vport ? mboxq->vport->vpi : 0,
+ mboxq->u.mb.mbxCommand,
+ lpfc_sli4_mbox_opcode_get(phba, mboxq),
+ psli->sli_flag, flag);
+ goto out_not_finished;
+ }
+ rc = lpfc_mbox_dev_check(phba);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "(%d):2544 Mailbox command x%x (x%x) "
+ "cannot issue Data: x%x x%x\n",
+ mboxq->vport ? mboxq->vport->vpi : 0,
+ mboxq->u.mb.mbxCommand,
+ lpfc_sli4_mbox_opcode_get(phba, mboxq),
+ psli->sli_flag, flag);
+ goto out_not_finished;
+ }
+
+ /* Put the mailbox command to the driver internal FIFO */
+ psli->slistat.mbox_busy++;
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ lpfc_mbox_put(phba, mboxq);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "(%d):0354 Mbox cmd issue - Enqueue Data: "
+ "x%x (x%x) x%x x%x x%x\n",
+ mboxq->vport ? mboxq->vport->vpi : 0xffffff,
+ bf_get(lpfc_mqe_command, &mboxq->u.mqe),
+ lpfc_sli4_mbox_opcode_get(phba, mboxq),
+ phba->pport->port_state,
+ psli->sli_flag, MBX_NOWAIT);
+ /* Wake up worker thread to transport mailbox command from head */
+ lpfc_worker_wake_up(phba);
+
+ return MBX_BUSY;
+
+out_not_finished:
+ return MBX_NOT_FINISHED;
+}
+
+/**
+ * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called by worker thread to send a mailbox command to
+ * SLI4 HBA firmware.
+ *
+ **/
+int
+lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ LPFC_MBOXQ_t *mboxq;
+ int rc = MBX_SUCCESS;
+ unsigned long iflags;
+ struct lpfc_mqe *mqe;
+ uint32_t mbx_cmnd;
+
+ /* Check interrupt mode before post async mailbox command */
+ if (unlikely(!phba->sli4_hba.intr_enable))
+ return MBX_NOT_FINISHED;
+
+ /* Check for mailbox command service token */
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return MBX_NOT_FINISHED;
+ }
+ if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return MBX_NOT_FINISHED;
+ }
+ if (unlikely(phba->sli.mbox_active)) {
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "0384 There is pending active mailbox cmd\n");
+ return MBX_NOT_FINISHED;
+ }
+ /* Take the mailbox command service token */
+ psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
+
+ /* Get the next mailbox command from head of queue */
+ mboxq = lpfc_mbox_get(phba);
+
+ /* If no more mailbox command waiting for post, we're done */
+ if (!mboxq) {
+ psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return MBX_SUCCESS;
+ }
+ phba->sli.mbox_active = mboxq;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ /* Check device readiness for posting mailbox command */
+ rc = lpfc_mbox_dev_check(phba);
+ if (unlikely(rc))
+ /* Driver clean routine will clean up pending mailbox */
+ goto out_not_finished;
+
+ /* Prepare the mbox command to be posted */
+ mqe = &mboxq->u.mqe;
+ mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
+
+ /* Start timer for the mbox_tmo and log some mailbox post messages */
+ mod_timer(&psli->mbox_tmo, (jiffies +
+ (HZ * lpfc_mbox_tmo_val(phba, mbx_cmnd))));
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "(%d):0355 Mailbox cmd x%x (x%x) issue Data: "
+ "x%x x%x\n",
+ mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
+ lpfc_sli4_mbox_opcode_get(phba, mboxq),
+ phba->pport->port_state, psli->sli_flag);
+
+ if (mbx_cmnd != MBX_HEARTBEAT) {
+ if (mboxq->vport) {
+ lpfc_debugfs_disc_trc(mboxq->vport,
+ LPFC_DISC_TRC_MBOX_VPORT,
+ "MBOX Send vport: cmd:x%x mb:x%x x%x",
+ mbx_cmnd, mqe->un.mb_words[0],
+ mqe->un.mb_words[1]);
+ } else {
+ lpfc_debugfs_disc_trc(phba->pport,
+ LPFC_DISC_TRC_MBOX,
+ "MBOX Send: cmd:x%x mb:x%x x%x",
+ mbx_cmnd, mqe->un.mb_words[0],
+ mqe->un.mb_words[1]);
+ }
+ }
+ psli->slistat.mbox_cmd++;
+
+ /* Post the mailbox command to the port */
+ rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "(%d):2533 Mailbox command x%x (x%x) "
+ "cannot issue Data: x%x x%x\n",
+ mboxq->vport ? mboxq->vport->vpi : 0,
+ mboxq->u.mb.mbxCommand,
+ lpfc_sli4_mbox_opcode_get(phba, mboxq),
+ psli->sli_flag, MBX_NOWAIT);
+ goto out_not_finished;
+ }
+
+ return rc;
+
+out_not_finished:
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
+ __lpfc_mbox_cmpl_put(phba, mboxq);
+ /* Release the token */
+ psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ phba->sli.mbox_active = NULL;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ return MBX_NOT_FINISHED;
+}
+
+/**
+ * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
+ * @phba: Pointer to HBA context object.
+ * @pmbox: Pointer to mailbox object.
+ * @flag: Flag indicating how the mailbox need to be processed.
+ *
+ * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
+ * the API jump table function pointer from the lpfc_hba struct.
+ *
+ * Return codes the caller owns the mailbox command after the return of the
+ * function.
+ **/
+int
+lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
+{
+ return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
+}
+
+/**
+ * lpfc_mbox_api_table_setup - Set up mbox api fucntion jump table
+ * @phba: The hba struct for which this call is being executed.
+ * @dev_grp: The HBA PCI-Device group number.
+ *
+ * This routine sets up the mbox interface API function jump table in @phba
+ * struct.
+ * Returns: 0 - success, -ENODEV - failure.
+ **/
+int
+lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
+{
+
+ switch (dev_grp) {
+ case LPFC_PCI_DEV_LP:
+ phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
+ phba->lpfc_sli_handle_slow_ring_event =
+ lpfc_sli_handle_slow_ring_event_s3;
+ phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
+ phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
+ phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
+ break;
+ case LPFC_PCI_DEV_OC:
+ phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
+ phba->lpfc_sli_handle_slow_ring_event =
+ lpfc_sli_handle_slow_ring_event_s4;
+ phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
+ phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
+ phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1420 Invalid HBA PCI-device group: 0x%x\n",
+ dev_grp);
+ return -ENODEV;
+ break;
+ }
+ return 0;
+}
+
+/**
* __lpfc_sli_ringtx_put - Add an iocb to the txq
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
@@ -3701,35 +5488,34 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
/**
- * __lpfc_sli_issue_iocb - Lockless version of lpfc_sli_issue_iocb
+ * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
* @phba: Pointer to HBA context object.
- * @pring: Pointer to driver SLI ring object.
+ * @ring_number: SLI ring number to issue iocb on.
* @piocb: Pointer to command iocb.
* @flag: Flag indicating if this command can be put into txq.
*
- * __lpfc_sli_issue_iocb is used by other functions in the driver
- * to issue an iocb command to the HBA. If the PCI slot is recovering
- * from error state or if HBA is resetting or if LPFC_STOP_IOCB_EVENT
- * flag is turned on, the function returns IOCB_ERROR.
- * When the link is down, this function allows only iocbs for
- * posting buffers.
- * This function finds next available slot in the command ring and
- * posts the command to the available slot and writes the port
- * attention register to request HBA start processing new iocb.
- * If there is no slot available in the ring and
- * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the
- * txq, otherwise the function returns IOCB_BUSY.
- *
- * This function is called with hbalock held.
- * The function will return success after it successfully submit the
- * iocb to firmware or after adding to the txq.
+ * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
+ * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
+ * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
+ * flag is turned on, the function returns IOCB_ERROR. When the link is down,
+ * this function allows only iocbs for posting buffers. This function finds
+ * next available slot in the command ring and posts the command to the
+ * available slot and writes the port attention register to request HBA start
+ * processing new iocb. If there is no slot available in the ring and
+ * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
+ * the function returns IOCB_BUSY.
+ *
+ * This function is called with hbalock held. The function will return success
+ * after it successfully submit the iocb to firmware or after adding to the
+ * txq.
**/
static int
-__lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
struct lpfc_iocbq *piocb, uint32_t flag)
{
struct lpfc_iocbq *nextiocb;
IOCB_t *iocb;
+ struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
if (piocb->iocb_cmpl && (!piocb->vport) &&
(piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
@@ -3833,6 +5619,498 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
return IOCB_BUSY;
}
+/**
+ * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
+ * @phba: Pointer to HBA context object.
+ * @piocb: Pointer to command iocb.
+ * @sglq: Pointer to the scatter gather queue object.
+ *
+ * This routine converts the bpl or bde that is in the IOCB
+ * to a sgl list for the sli4 hardware. The physical address
+ * of the bpl/bde is converted back to a virtual address.
+ * If the IOCB contains a BPL then the list of BDE's is
+ * converted to sli4_sge's. If the IOCB contains a single
+ * BDE then it is converted to a single sli_sge.
+ * The IOCB is still in cpu endianess so the contents of
+ * the bpl can be used without byte swapping.
+ *
+ * Returns valid XRI = Success, NO_XRI = Failure.
+**/
+static uint16_t
+lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
+ struct lpfc_sglq *sglq)
+{
+ uint16_t xritag = NO_XRI;
+ struct ulp_bde64 *bpl = NULL;
+ struct ulp_bde64 bde;
+ struct sli4_sge *sgl = NULL;
+ IOCB_t *icmd;
+ int numBdes = 0;
+ int i = 0;
+
+ if (!piocbq || !sglq)
+ return xritag;
+
+ sgl = (struct sli4_sge *)sglq->sgl;
+ icmd = &piocbq->iocb;
+ if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
+ numBdes = icmd->un.genreq64.bdl.bdeSize /
+ sizeof(struct ulp_bde64);
+ /* The addrHigh and addrLow fields within the IOCB
+ * have not been byteswapped yet so there is no
+ * need to swap them back.
+ */
+ bpl = (struct ulp_bde64 *)
+ ((struct lpfc_dmabuf *)piocbq->context3)->virt;
+
+ if (!bpl)
+ return xritag;
+
+ for (i = 0; i < numBdes; i++) {
+ /* Should already be byte swapped. */
+ sgl->addr_hi = bpl->addrHigh;
+ sgl->addr_lo = bpl->addrLow;
+ /* swap the size field back to the cpu so we
+ * can assign it to the sgl.
+ */
+ bde.tus.w = le32_to_cpu(bpl->tus.w);
+ bf_set(lpfc_sli4_sge_len, sgl, bde.tus.f.bdeSize);
+ if ((i+1) == numBdes)
+ bf_set(lpfc_sli4_sge_last, sgl, 1);
+ else
+ bf_set(lpfc_sli4_sge_last, sgl, 0);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->word3 = cpu_to_le32(sgl->word3);
+ bpl++;
+ sgl++;
+ }
+ } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
+ /* The addrHigh and addrLow fields of the BDE have not
+ * been byteswapped yet so they need to be swapped
+ * before putting them in the sgl.
+ */
+ sgl->addr_hi =
+ cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
+ sgl->addr_lo =
+ cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
+ bf_set(lpfc_sli4_sge_len, sgl,
+ icmd->un.genreq64.bdl.bdeSize);
+ bf_set(lpfc_sli4_sge_last, sgl, 1);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->word3 = cpu_to_le32(sgl->word3);
+ }
+ return sglq->sli4_xritag;
+}
+
+/**
+ * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
+ * @phba: Pointer to HBA context object.
+ * @piocb: Pointer to command iocb.
+ *
+ * This routine performs a round robin SCSI command to SLI4 FCP WQ index
+ * distribution.
+ *
+ * Return: index into SLI4 fast-path FCP queue index.
+ **/
+static uint32_t
+lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
+{
+ static uint32_t fcp_qidx;
+
+ return fcp_qidx++ % phba->cfg_fcp_wq_count;
+}
+
+/**
+ * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
+ * @phba: Pointer to HBA context object.
+ * @piocb: Pointer to command iocb.
+ * @wqe: Pointer to the work queue entry.
+ *
+ * This routine converts the iocb command to its Work Queue Entry
+ * equivalent. The wqe pointer should not have any fields set when
+ * this routine is called because it will memcpy over them.
+ * This routine does not set the CQ_ID or the WQEC bits in the
+ * wqe.
+ *
+ * Returns: 0 = Success, IOCB_ERROR = Failure.
+ **/
+static int
+lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
+ union lpfc_wqe *wqe)
+{
+ uint32_t payload_len = 0;
+ uint8_t ct = 0;
+ uint32_t fip;
+ uint32_t abort_tag;
+ uint8_t command_type = ELS_COMMAND_NON_FIP;
+ uint8_t cmnd;
+ uint16_t xritag;
+ struct ulp_bde64 *bpl = NULL;
+
+ fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags);
+ /* The fcp commands will set command type */
+ if ((!(iocbq->iocb_flag & LPFC_IO_FCP)) && (!fip))
+ command_type = ELS_COMMAND_NON_FIP;
+ else if (!(iocbq->iocb_flag & LPFC_IO_FCP))
+ command_type = ELS_COMMAND_FIP;
+ else if (iocbq->iocb_flag & LPFC_IO_FCP)
+ command_type = FCP_COMMAND;
+ else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2019 Invalid cmd 0x%x\n",
+ iocbq->iocb.ulpCommand);
+ return IOCB_ERROR;
+ }
+ /* Some of the fields are in the right position already */
+ memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
+ abort_tag = (uint32_t) iocbq->iotag;
+ xritag = iocbq->sli4_xritag;
+ wqe->words[7] = 0; /* The ct field has moved so reset */
+ /* words0-2 bpl convert bde */
+ if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
+ bpl = (struct ulp_bde64 *)
+ ((struct lpfc_dmabuf *)iocbq->context3)->virt;
+ if (!bpl)
+ return IOCB_ERROR;
+
+ /* Should already be byte swapped. */
+ wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
+ wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
+ /* swap the size field back to the cpu so we
+ * can assign it to the sgl.
+ */
+ wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
+ payload_len = wqe->generic.bde.tus.f.bdeSize;
+ } else
+ payload_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
+
+ iocbq->iocb.ulpIoTag = iocbq->iotag;
+ cmnd = iocbq->iocb.ulpCommand;
+
+ switch (iocbq->iocb.ulpCommand) {
+ case CMD_ELS_REQUEST64_CR:
+ if (!iocbq->iocb.ulpLe) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2007 Only Limited Edition cmd Format"
+ " supported 0x%x\n",
+ iocbq->iocb.ulpCommand);
+ return IOCB_ERROR;
+ }
+ wqe->els_req.payload_len = payload_len;
+ /* Els_reguest64 has a TMO */
+ bf_set(wqe_tmo, &wqe->els_req.wqe_com,
+ iocbq->iocb.ulpTimeout);
+ /* Need a VF for word 4 set the vf bit*/
+ bf_set(els_req64_vf, &wqe->els_req, 0);
+ /* And a VFID for word 12 */
+ bf_set(els_req64_vfid, &wqe->els_req, 0);
+ /*
+ * Set ct field to 3, indicates that the context_tag field
+ * contains the FCFI and remote N_Port_ID is
+ * in word 5.
+ */
+
+ ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
+ bf_set(lpfc_wqe_gen_context, &wqe->generic,
+ iocbq->iocb.ulpContext);
+
+ if (iocbq->vport->fc_myDID != 0) {
+ bf_set(els_req64_sid, &wqe->els_req,
+ iocbq->vport->fc_myDID);
+ bf_set(els_req64_sp, &wqe->els_req, 1);
+ }
+ bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct);
+ bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
+ /* CCP CCPE PV PRI in word10 were set in the memcpy */
+ break;
+ case CMD_XMIT_SEQUENCE64_CR:
+ /* word3 iocb=io_tag32 wqe=payload_offset */
+ /* payload offset used for multilpe outstanding
+ * sequences on the same exchange
+ */
+ wqe->words[3] = 0;
+ /* word4 relative_offset memcpy */
+ /* word5 r_ctl/df_ctl memcpy */
+ bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
+ wqe->xmit_sequence.xmit_len = payload_len;
+ break;
+ case CMD_XMIT_BCAST64_CN:
+ /* word3 iocb=iotag32 wqe=payload_len */
+ wqe->words[3] = 0; /* no definition for this in wqe */
+ /* word4 iocb=rsvd wqe=rsvd */
+ /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
+ /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
+ bf_set(lpfc_wqe_gen_ct, &wqe->generic,
+ ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
+ break;
+ case CMD_FCP_IWRITE64_CR:
+ command_type = FCP_COMMAND_DATA_OUT;
+ /* The struct for wqe fcp_iwrite has 3 fields that are somewhat
+ * confusing.
+ * word3 is payload_len: byte offset to the sgl entry for the
+ * fcp_command.
+ * word4 is total xfer len, same as the IOCB->ulpParameter.
+ * word5 is initial xfer len 0 = wait for xfer-ready
+ */
+
+ /* Always wait for xfer-ready before sending data */
+ wqe->fcp_iwrite.initial_xfer_len = 0;
+ /* word 4 (xfer length) should have been set on the memcpy */
+
+ /* allow write to fall through to read */
+ case CMD_FCP_IREAD64_CR:
+ /* FCP_CMD is always the 1st sgl entry */
+ wqe->fcp_iread.payload_len =
+ payload_len + sizeof(struct fcp_rsp);
+
+ /* word 4 (xfer length) should have been set on the memcpy */
+
+ bf_set(lpfc_wqe_gen_erp, &wqe->generic,
+ iocbq->iocb.ulpFCP2Rcvy);
+ bf_set(lpfc_wqe_gen_lnk, &wqe->generic, iocbq->iocb.ulpXS);
+ /* The XC bit and the XS bit are similar. The driver never
+ * tracked whether or not the exchange was previouslly open.
+ * XC = Exchange create, 0 is create. 1 is already open.
+ * XS = link cmd: 1 do not close the exchange after command.
+ * XS = 0 close exchange when command completes.
+ * The only time we would not set the XC bit is when the XS bit
+ * is set and we are sending our 2nd or greater command on
+ * this exchange.
+ */
+
+ /* ALLOW read & write to fall through to ICMD64 */
+ case CMD_FCP_ICMND64_CR:
+ /* Always open the exchange */
+ bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
+
+ wqe->words[10] &= 0xffff0000; /* zero out ebde count */
+ bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
+ break;
+ case CMD_GEN_REQUEST64_CR:
+ /* word3 command length is described as byte offset to the
+ * rsp_data. Would always be 16, sizeof(struct sli4_sge)
+ * sgl[0] = cmnd
+ * sgl[1] = rsp.
+ *
+ */
+ wqe->gen_req.command_len = payload_len;
+ /* Word4 parameter copied in the memcpy */
+ /* Word5 [rctl, type, df_ctl, la] copied in memcpy */
+ /* word6 context tag copied in memcpy */
+ if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
+ ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2015 Invalid CT %x command 0x%x\n",
+ ct, iocbq->iocb.ulpCommand);
+ return IOCB_ERROR;
+ }
+ bf_set(lpfc_wqe_gen_ct, &wqe->generic, 0);
+ bf_set(wqe_tmo, &wqe->gen_req.wqe_com,
+ iocbq->iocb.ulpTimeout);
+
+ bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
+ command_type = OTHER_COMMAND;
+ break;
+ case CMD_XMIT_ELS_RSP64_CX:
+ /* words0-2 BDE memcpy */
+ /* word3 iocb=iotag32 wqe=rsvd */
+ wqe->words[3] = 0;
+ /* word4 iocb=did wge=rsvd. */
+ wqe->words[4] = 0;
+ /* word5 iocb=rsvd wge=did */
+ bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
+ iocbq->iocb.un.elsreq64.remoteID);
+
+ bf_set(lpfc_wqe_gen_ct, &wqe->generic,
+ ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
+
+ bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
+ bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
+ if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
+ bf_set(lpfc_wqe_gen_context, &wqe->generic,
+ iocbq->vport->vpi + phba->vpi_base);
+ command_type = OTHER_COMMAND;
+ break;
+ case CMD_CLOSE_XRI_CN:
+ case CMD_ABORT_XRI_CN:
+ case CMD_ABORT_XRI_CX:
+ /* words 0-2 memcpy should be 0 rserved */
+ /* port will send abts */
+ if (iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
+ /*
+ * The link is down so the fw does not need to send abts
+ * on the wire.
+ */
+ bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
+ else
+ bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
+ bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
+ abort_tag = iocbq->iocb.un.acxri.abortIoTag;
+ wqe->words[5] = 0;
+ bf_set(lpfc_wqe_gen_ct, &wqe->generic,
+ ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
+ abort_tag = iocbq->iocb.un.acxri.abortIoTag;
+ wqe->generic.abort_tag = abort_tag;
+ /*
+ * The abort handler will send us CMD_ABORT_XRI_CN or
+ * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
+ */
+ bf_set(lpfc_wqe_gen_command, &wqe->generic, CMD_ABORT_XRI_CX);
+ cmnd = CMD_ABORT_XRI_CX;
+ command_type = OTHER_COMMAND;
+ xritag = 0;
+ break;
+ case CMD_XRI_ABORTED_CX:
+ case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
+ /* words0-2 are all 0's no bde */
+ /* word3 and word4 are rsvrd */
+ wqe->words[3] = 0;
+ wqe->words[4] = 0;
+ /* word5 iocb=rsvd wge=did */
+ /* There is no remote port id in the IOCB? */
+ /* Let this fall through and fail */
+ case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
+ case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
+ case CMD_FCP_TRSP64_CX: /* Target mode rcv */
+ case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2014 Invalid command 0x%x\n",
+ iocbq->iocb.ulpCommand);
+ return IOCB_ERROR;
+ break;
+
+ }
+ bf_set(lpfc_wqe_gen_xri, &wqe->generic, xritag);
+ bf_set(lpfc_wqe_gen_request_tag, &wqe->generic, iocbq->iotag);
+ wqe->generic.abort_tag = abort_tag;
+ bf_set(lpfc_wqe_gen_cmd_type, &wqe->generic, command_type);
+ bf_set(lpfc_wqe_gen_command, &wqe->generic, cmnd);
+ bf_set(lpfc_wqe_gen_class, &wqe->generic, iocbq->iocb.ulpClass);
+ bf_set(lpfc_wqe_gen_cq_id, &wqe->generic, LPFC_WQE_CQ_ID_DEFAULT);
+
+ return 0;
+}
+
+/**
+ * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
+ * @phba: Pointer to HBA context object.
+ * @ring_number: SLI ring number to issue iocb on.
+ * @piocb: Pointer to command iocb.
+ * @flag: Flag indicating if this command can be put into txq.
+ *
+ * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
+ * an iocb command to an HBA with SLI-4 interface spec.
+ *
+ * This function is called with hbalock held. The function will return success
+ * after it successfully submit the iocb to firmware or after adding to the
+ * txq.
+ **/
+static int
+__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
+ struct lpfc_iocbq *piocb, uint32_t flag)
+{
+ struct lpfc_sglq *sglq;
+ uint16_t xritag;
+ union lpfc_wqe wqe;
+ struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
+ uint32_t fcp_wqidx;
+
+ if (piocb->sli4_xritag == NO_XRI) {
+ if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
+ piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
+ sglq = NULL;
+ else {
+ sglq = __lpfc_sli_get_sglq(phba);
+ if (!sglq)
+ return IOCB_ERROR;
+ piocb->sli4_xritag = sglq->sli4_xritag;
+ }
+ } else if (piocb->iocb_flag & LPFC_IO_FCP) {
+ sglq = NULL; /* These IO's already have an XRI and
+ * a mapped sgl.
+ */
+ } else {
+ /* This is a continuation of a commandi,(CX) so this
+ * sglq is on the active list
+ */
+ sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
+ if (!sglq)
+ return IOCB_ERROR;
+ }
+
+ if (sglq) {
+ xritag = lpfc_sli4_bpl2sgl(phba, piocb, sglq);
+ if (xritag != sglq->sli4_xritag)
+ return IOCB_ERROR;
+ }
+
+ if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
+ return IOCB_ERROR;
+
+ if (piocb->iocb_flag & LPFC_IO_FCP) {
+ fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba, piocb);
+ if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[fcp_wqidx], &wqe))
+ return IOCB_ERROR;
+ } else {
+ if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
+ return IOCB_ERROR;
+ }
+ lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
+
+ return 0;
+}
+
+/**
+ * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
+ *
+ * This routine wraps the actual lockless version for issusing IOCB function
+ * pointer from the lpfc_hba struct.
+ *
+ * Return codes:
+ * IOCB_ERROR - Error
+ * IOCB_SUCCESS - Success
+ * IOCB_BUSY - Busy
+ **/
+static inline int
+__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
+ struct lpfc_iocbq *piocb, uint32_t flag)
+{
+ return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
+}
+
+/**
+ * lpfc_sli_api_table_setup - Set up sli api fucntion jump table
+ * @phba: The hba struct for which this call is being executed.
+ * @dev_grp: The HBA PCI-Device group number.
+ *
+ * This routine sets up the SLI interface API function jump table in @phba
+ * struct.
+ * Returns: 0 - success, -ENODEV - failure.
+ **/
+int
+lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
+{
+
+ switch (dev_grp) {
+ case LPFC_PCI_DEV_LP:
+ phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
+ phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
+ break;
+ case LPFC_PCI_DEV_OC:
+ phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
+ phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1419 Invalid HBA PCI-device group: 0x%x\n",
+ dev_grp);
+ return -ENODEV;
+ break;
+ }
+ phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
+ return 0;
+}
/**
* lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
@@ -3848,14 +6126,14 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* functions which do not hold hbalock.
**/
int
-lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
struct lpfc_iocbq *piocb, uint32_t flag)
{
unsigned long iflags;
int rc;
spin_lock_irqsave(&phba->hbalock, iflags);
- rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag);
+ rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
spin_unlock_irqrestore(&phba->hbalock, iflags);
return rc;
@@ -4148,6 +6426,52 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine flushes the mailbox command subsystem. It will unconditionally
+ * flush all the mailbox commands in the three possible stages in the mailbox
+ * command sub-system: pending mailbox command queue; the outstanding mailbox
+ * command; and completed mailbox command queue. It is caller's responsibility
+ * to make sure that the driver is in the proper state to flush the mailbox
+ * command sub-system. Namely, the posting of mailbox commands into the
+ * pending mailbox command queue from the various clients must be stopped;
+ * either the HBA is in a state that it will never works on the outstanding
+ * mailbox command (such as in EEH or ERATT conditions) or the outstanding
+ * mailbox command has been completed.
+ **/
+static void
+lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
+{
+ LIST_HEAD(completions);
+ struct lpfc_sli *psli = &phba->sli;
+ LPFC_MBOXQ_t *pmb;
+ unsigned long iflag;
+
+ /* Flush all the mailbox commands in the mbox system */
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ /* The pending mailbox command queue */
+ list_splice_init(&phba->sli.mboxq, &completions);
+ /* The outstanding active mailbox command */
+ if (psli->mbox_active) {
+ list_add_tail(&psli->mbox_active->list, &completions);
+ psli->mbox_active = NULL;
+ psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ }
+ /* The completed mailbox command queue */
+ list_splice_init(&phba->sli.mboxq_cmpl, &completions);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+ /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
+ while (!list_empty(&completions)) {
+ list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
+ pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
+ if (pmb->mbox_cmpl)
+ pmb->mbox_cmpl(phba, pmb);
+ }
+}
+
+/**
* lpfc_sli_host_down - Vport cleanup function
* @vport: Pointer to virtual port object.
*
@@ -4240,9 +6564,11 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
struct lpfc_dmabuf *buf_ptr;
- LPFC_MBOXQ_t *pmb;
- int i;
unsigned long flags = 0;
+ int i;
+
+ /* Shutdown the mailbox command sub-system */
+ lpfc_sli_mbox_sys_shutdown(phba);
lpfc_hba_down_prep(phba);
@@ -4287,28 +6613,42 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
/* Return any active mbox cmds */
del_timer_sync(&psli->mbox_tmo);
- spin_lock_irqsave(&phba->hbalock, flags);
- spin_lock(&phba->pport->work_port_lock);
+ spin_lock_irqsave(&phba->pport->work_port_lock, flags);
phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
- spin_unlock(&phba->pport->work_port_lock);
+ spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
- /* Return any pending or completed mbox cmds */
- list_splice_init(&phba->sli.mboxq, &completions);
- if (psli->mbox_active) {
- list_add_tail(&psli->mbox_active->list, &completions);
- psli->mbox_active = NULL;
- psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
- }
- list_splice_init(&phba->sli.mboxq_cmpl, &completions);
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ return 1;
+}
+
+/**
+ * lpfc_sli4_hba_down - PCI function resource cleanup for the SLI4 HBA
+ * @phba: Pointer to HBA context object.
+ *
+ * This function cleans up all queues, iocb, buffers, mailbox commands while
+ * shutting down the SLI4 HBA FCoE function. This function is called with no
+ * lock held and always returns 1.
+ *
+ * This function does the following to cleanup driver FCoE function resources:
+ * - Free discovery resources for each virtual port
+ * - Cleanup any pending fabric iocbs
+ * - Iterate through the iocb txq and free each entry in the list.
+ * - Free up any buffer posted to the HBA.
+ * - Clean up all the queue entries: WQ, RQ, MQ, EQ, CQ, etc.
+ * - Free mailbox commands in the mailbox queue.
+ **/
+int
+lpfc_sli4_hba_down(struct lpfc_hba *phba)
+{
+ /* Stop the SLI4 device port */
+ lpfc_stop_port(phba);
+
+ /* Tear down the queues in the HBA */
+ lpfc_sli4_queue_unset(phba);
+
+ /* unregister default FCFI from the HBA */
+ lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
- while (!list_empty(&completions)) {
- list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
- pmb->mb.mbxStatus = MBX_NOT_FINISHED;
- if (pmb->mbox_cmpl)
- pmb->mbox_cmpl(phba,pmb);
- }
return 1;
}
@@ -4639,7 +6979,10 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
iabt = &abtsiocbp->iocb;
iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
iabt->un.acxri.abortContextTag = icmd->ulpContext;
- iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
+ else
+ iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
iabt->ulpLe = 1;
iabt->ulpClass = icmd->ulpClass;
@@ -4655,7 +6998,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
"abort cmd iotag x%x\n",
iabt->un.acxri.abortContextTag,
iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
- retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0);
+ retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0);
if (retval)
__lpfc_sli_release_iocbq(phba, abtsiocbp);
@@ -4838,7 +7181,10 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
cmd = &iocbq->iocb;
abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
- abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
+ else
+ abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
abtsiocb->iocb.ulpLe = 1;
abtsiocb->iocb.ulpClass = cmd->ulpClass;
abtsiocb->vport = phba->pport;
@@ -4850,7 +7196,8 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
/* Setup callback routine and issue the command. */
abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
- ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0);
+ ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
+ abtsiocb, 0);
if (ret_val == IOCB_ERROR) {
lpfc_sli_release_iocbq(phba, abtsiocb);
errcnt++;
@@ -4931,7 +7278,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
**/
int
lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
- struct lpfc_sli_ring *pring,
+ uint32_t ring_number,
struct lpfc_iocbq *piocb,
struct lpfc_iocbq *prspiocbq,
uint32_t timeout)
@@ -4962,7 +7309,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
readl(phba->HCregaddr); /* flush */
}
- retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0);
+ retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 0);
if (retval == IOCB_SUCCESS) {
timeout_req = timeout * HZ;
timeleft = wait_event_timeout(done_q,
@@ -5077,53 +7424,156 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
}
/**
- * lpfc_sli_flush_mbox_queue - mailbox queue cleanup function
+ * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
* @phba: Pointer to HBA context.
*
- * This function is called to cleanup any pending mailbox
- * objects in the driver queue before bringing the HBA offline.
- * This function is called while resetting the HBA.
- * The function is called without any lock held. The function
- * takes hbalock to update SLI data structure.
- * This function returns 1 when there is an active mailbox
- * command pending else returns 0.
+ * This function is called to shutdown the driver's mailbox sub-system.
+ * It first marks the mailbox sub-system is in a block state to prevent
+ * the asynchronous mailbox command from issued off the pending mailbox
+ * command queue. If the mailbox command sub-system shutdown is due to
+ * HBA error conditions such as EEH or ERATT, this routine shall invoke
+ * the mailbox sub-system flush routine to forcefully bring down the
+ * mailbox sub-system. Otherwise, if it is due to normal condition (such
+ * as with offline or HBA function reset), this routine will wait for the
+ * outstanding mailbox command to complete before invoking the mailbox
+ * sub-system flush routine to gracefully bring down mailbox sub-system.
**/
-int
-lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)
+void
+lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba)
{
- struct lpfc_vport *vport = phba->pport;
- int i = 0;
- uint32_t ha_copy;
+ struct lpfc_sli *psli = &phba->sli;
+ uint8_t actcmd = MBX_HEARTBEAT;
+ unsigned long timeout;
- while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !vport->stopped) {
- if (i++ > LPFC_MBOX_TMO * 1000)
- return 1;
+ spin_lock_irq(&phba->hbalock);
+ psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
+ spin_unlock_irq(&phba->hbalock);
- /*
- * Call lpfc_sli_handle_mb_event only if a mailbox cmd
- * did finish. This way we won't get the misleading
- * "Stray Mailbox Interrupt" message.
- */
+ if (psli->sli_flag & LPFC_SLI_ACTIVE) {
spin_lock_irq(&phba->hbalock);
- ha_copy = phba->work_ha;
- phba->work_ha &= ~HA_MBATT;
+ if (phba->sli.mbox_active)
+ actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
spin_unlock_irq(&phba->hbalock);
+ /* Determine how long we might wait for the active mailbox
+ * command to be gracefully completed by firmware.
+ */
+ timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) *
+ 1000) + jiffies;
+ while (phba->sli.mbox_active) {
+ /* Check active mailbox complete status every 2ms */
+ msleep(2);
+ if (time_after(jiffies, timeout))
+ /* Timeout, let the mailbox flush routine to
+ * forcefully release active mailbox command
+ */
+ break;
+ }
+ }
+ lpfc_sli_mbox_sys_flush(phba);
+}
+
+/**
+ * lpfc_sli_eratt_read - read sli-3 error attention events
+ * @phba: Pointer to HBA context.
+ *
+ * This function is called to read the SLI3 device error attention registers
+ * for possible error attention events. The caller must hold the hostlock
+ * with spin_lock_irq().
+ *
+ * This fucntion returns 1 when there is Error Attention in the Host Attention
+ * Register and returns 0 otherwise.
+ **/
+static int
+lpfc_sli_eratt_read(struct lpfc_hba *phba)
+{
+ uint32_t ha_copy;
- if (ha_copy & HA_MBATT)
- if (lpfc_sli_handle_mb_event(phba) == 0)
- i = 0;
+ /* Read chip Host Attention (HA) register */
+ ha_copy = readl(phba->HAregaddr);
+ if (ha_copy & HA_ERATT) {
+ /* Read host status register to retrieve error event */
+ lpfc_sli_read_hs(phba);
+
+ /* Check if there is a deferred error condition is active */
+ if ((HS_FFER1 & phba->work_hs) &&
+ ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
+ HS_FFER6 | HS_FFER7) & phba->work_hs)) {
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag |= DEFER_ERATT;
+ spin_unlock_irq(&phba->hbalock);
+ /* Clear all interrupt enable conditions */
+ writel(0, phba->HCregaddr);
+ readl(phba->HCregaddr);
+ }
- msleep(1);
+ /* Set the driver HA work bitmap */
+ spin_lock_irq(&phba->hbalock);
+ phba->work_ha |= HA_ERATT;
+ /* Indicate polling handles this ERATT */
+ phba->hba_flag |= HBA_ERATT_HANDLED;
+ spin_unlock_irq(&phba->hbalock);
+ return 1;
}
+ return 0;
+}
+
+/**
+ * lpfc_sli4_eratt_read - read sli-4 error attention events
+ * @phba: Pointer to HBA context.
+ *
+ * This function is called to read the SLI4 device error attention registers
+ * for possible error attention events. The caller must hold the hostlock
+ * with spin_lock_irq().
+ *
+ * This fucntion returns 1 when there is Error Attention in the Host Attention
+ * Register and returns 0 otherwise.
+ **/
+static int
+lpfc_sli4_eratt_read(struct lpfc_hba *phba)
+{
+ uint32_t uerr_sta_hi, uerr_sta_lo;
+ uint32_t onlnreg0, onlnreg1;
- return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0;
+ /* For now, use the SLI4 device internal unrecoverable error
+ * registers for error attention. This can be changed later.
+ */
+ onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
+ onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
+ if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
+ uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr);
+ uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr);
+ if (uerr_sta_lo || uerr_sta_hi) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1423 HBA Unrecoverable error: "
+ "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
+ "online0_reg=0x%x, online1_reg=0x%x\n",
+ uerr_sta_lo, uerr_sta_hi,
+ onlnreg0, onlnreg1);
+ /* TEMP: as the driver error recover logic is not
+ * fully developed, we just log the error message
+ * and the device error attention action is now
+ * temporarily disabled.
+ */
+ return 0;
+ phba->work_status[0] = uerr_sta_lo;
+ phba->work_status[1] = uerr_sta_hi;
+ spin_lock_irq(&phba->hbalock);
+ /* Set the driver HA work bitmap */
+ phba->work_ha |= HA_ERATT;
+ /* Indicate polling handles this ERATT */
+ phba->hba_flag |= HBA_ERATT_HANDLED;
+ spin_unlock_irq(&phba->hbalock);
+ return 1;
+ }
+ }
+ return 0;
}
/**
* lpfc_sli_check_eratt - check error attention events
* @phba: Pointer to HBA context.
*
- * This function is called form timer soft interrupt context to check HBA's
+ * This function is called from timer soft interrupt context to check HBA's
* error attention register bit for error attention events.
*
* This fucntion returns 1 when there is Error Attention in the Host Attention
@@ -5134,10 +7584,6 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba)
{
uint32_t ha_copy;
- /* If PCI channel is offline, don't process it */
- if (unlikely(pci_channel_offline(phba->pcidev)))
- return 0;
-
/* If somebody is waiting to handle an eratt, don't process it
* here. The brdkill function will do this.
*/
@@ -5161,56 +7607,84 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba)
return 0;
}
- /* Read chip Host Attention (HA) register */
- ha_copy = readl(phba->HAregaddr);
- if (ha_copy & HA_ERATT) {
- /* Read host status register to retrieve error event */
- lpfc_sli_read_hs(phba);
-
- /* Check if there is a deferred error condition is active */
- if ((HS_FFER1 & phba->work_hs) &&
- ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
- HS_FFER6 | HS_FFER7) & phba->work_hs)) {
- phba->hba_flag |= DEFER_ERATT;
- /* Clear all interrupt enable conditions */
- writel(0, phba->HCregaddr);
- readl(phba->HCregaddr);
- }
-
- /* Set the driver HA work bitmap */
- phba->work_ha |= HA_ERATT;
- /* Indicate polling handles this ERATT */
- phba->hba_flag |= HBA_ERATT_HANDLED;
+ /* If PCI channel is offline, don't process it */
+ if (unlikely(pci_channel_offline(phba->pcidev))) {
spin_unlock_irq(&phba->hbalock);
- return 1;
+ return 0;
+ }
+
+ switch (phba->sli_rev) {
+ case LPFC_SLI_REV2:
+ case LPFC_SLI_REV3:
+ /* Read chip Host Attention (HA) register */
+ ha_copy = lpfc_sli_eratt_read(phba);
+ break;
+ case LPFC_SLI_REV4:
+ /* Read devcie Uncoverable Error (UERR) registers */
+ ha_copy = lpfc_sli4_eratt_read(phba);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0299 Invalid SLI revision (%d)\n",
+ phba->sli_rev);
+ ha_copy = 0;
+ break;
}
spin_unlock_irq(&phba->hbalock);
+
+ return ha_copy;
+}
+
+/**
+ * lpfc_intr_state_check - Check device state for interrupt handling
+ * @phba: Pointer to HBA context.
+ *
+ * This inline routine checks whether a device or its PCI slot is in a state
+ * that the interrupt should be handled.
+ *
+ * This function returns 0 if the device or the PCI slot is in a state that
+ * interrupt should be handled, otherwise -EIO.
+ */
+static inline int
+lpfc_intr_state_check(struct lpfc_hba *phba)
+{
+ /* If the pci channel is offline, ignore all the interrupts */
+ if (unlikely(pci_channel_offline(phba->pcidev)))
+ return -EIO;
+
+ /* Update device level interrupt statistics */
+ phba->sli.slistat.sli_intr++;
+
+ /* Ignore all interrupts during initialization. */
+ if (unlikely(phba->link_state < LPFC_LINK_DOWN))
+ return -EIO;
+
return 0;
}
/**
- * lpfc_sp_intr_handler - The slow-path interrupt handler of lpfc driver
+ * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
* @irq: Interrupt number.
* @dev_id: The device context pointer.
*
* This function is directly called from the PCI layer as an interrupt
- * service routine when the device is enabled with MSI-X multi-message
- * interrupt mode and there are slow-path events in the HBA. However,
- * when the device is enabled with either MSI or Pin-IRQ interrupt mode,
- * this function is called as part of the device-level interrupt handler.
- * When the PCI slot is in error recovery or the HBA is undergoing
- * initialization, the interrupt handler will not process the interrupt.
- * The link attention and ELS ring attention events are handled by the
- * worker thread. The interrupt handler signals the worker thread and
- * and returns for these events. This function is called without any
- * lock held. It gets the hbalock to access and update SLI data
+ * service routine when device with SLI-3 interface spec is enabled with
+ * MSI-X multi-message interrupt mode and there are slow-path events in
+ * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
+ * interrupt mode, this function is called as part of the device-level
+ * interrupt handler. When the PCI slot is in error recovery or the HBA
+ * is undergoing initialization, the interrupt handler will not process
+ * the interrupt. The link attention and ELS ring attention events are
+ * handled by the worker thread. The interrupt handler signals the worker
+ * thread and returns for these events. This function is called without
+ * any lock held. It gets the hbalock to access and update SLI data
* structures.
*
* This function returns IRQ_HANDLED when interrupt is handled else it
* returns IRQ_NONE.
**/
irqreturn_t
-lpfc_sp_intr_handler(int irq, void *dev_id)
+lpfc_sli_sp_intr_handler(int irq, void *dev_id)
{
struct lpfc_hba *phba;
uint32_t ha_copy;
@@ -5240,13 +7714,8 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
* individual interrupt handler in MSI-X multi-message interrupt mode
*/
if (phba->intr_type == MSIX) {
- /* If the pci channel is offline, ignore all the interrupts */
- if (unlikely(pci_channel_offline(phba->pcidev)))
- return IRQ_NONE;
- /* Update device-level interrupt statistics */
- phba->sli.slistat.sli_intr++;
- /* Ignore all interrupts during initialization. */
- if (unlikely(phba->link_state < LPFC_LINK_DOWN))
+ /* Check device state for handling interrupt */
+ if (lpfc_intr_state_check(phba))
return IRQ_NONE;
/* Need to read HA REG for slow-path events */
spin_lock_irqsave(&phba->hbalock, iflag);
@@ -5271,7 +7740,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
* interrupt.
*/
if (unlikely(phba->hba_flag & DEFER_ERATT)) {
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
return IRQ_NONE;
}
@@ -5364,7 +7833,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
pmb = phba->sli.mbox_active;
- pmbox = &pmb->mb;
+ pmbox = &pmb->u.mb;
mbox = phba->mbox;
vport = pmb->vport;
@@ -5434,7 +7903,8 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
LOG_MBOX | LOG_SLI,
"0350 rc should have"
"been MBX_BUSY");
- goto send_current_mbox;
+ if (rc != MBX_NOT_FINISHED)
+ goto send_current_mbox;
}
}
spin_lock_irqsave(
@@ -5471,29 +7941,29 @@ send_current_mbox:
}
return IRQ_HANDLED;
-} /* lpfc_sp_intr_handler */
+} /* lpfc_sli_sp_intr_handler */
/**
- * lpfc_fp_intr_handler - The fast-path interrupt handler of lpfc driver
+ * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
* @irq: Interrupt number.
* @dev_id: The device context pointer.
*
* This function is directly called from the PCI layer as an interrupt
- * service routine when the device is enabled with MSI-X multi-message
- * interrupt mode and there is a fast-path FCP IOCB ring event in the
- * HBA. However, when the device is enabled with either MSI or Pin-IRQ
- * interrupt mode, this function is called as part of the device-level
- * interrupt handler. When the PCI slot is in error recovery or the HBA
- * is undergoing initialization, the interrupt handler will not process
- * the interrupt. The SCSI FCP fast-path ring event are handled in the
- * intrrupt context. This function is called without any lock held. It
- * gets the hbalock to access and update SLI data structures.
+ * service routine when device with SLI-3 interface spec is enabled with
+ * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
+ * ring event in the HBA. However, when the device is enabled with either
+ * MSI or Pin-IRQ interrupt mode, this function is called as part of the
+ * device-level interrupt handler. When the PCI slot is in error recovery
+ * or the HBA is undergoing initialization, the interrupt handler will not
+ * process the interrupt. The SCSI FCP fast-path ring event are handled in
+ * the intrrupt context. This function is called without any lock held.
+ * It gets the hbalock to access and update SLI data structures.
*
* This function returns IRQ_HANDLED when interrupt is handled else it
* returns IRQ_NONE.
**/
irqreturn_t
-lpfc_fp_intr_handler(int irq, void *dev_id)
+lpfc_sli_fp_intr_handler(int irq, void *dev_id)
{
struct lpfc_hba *phba;
uint32_t ha_copy;
@@ -5513,13 +7983,8 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
* individual interrupt handler in MSI-X multi-message interrupt mode
*/
if (phba->intr_type == MSIX) {
- /* If pci channel is offline, ignore all the interrupts */
- if (unlikely(pci_channel_offline(phba->pcidev)))
- return IRQ_NONE;
- /* Update device-level interrupt statistics */
- phba->sli.slistat.sli_intr++;
- /* Ignore all interrupts during initialization. */
- if (unlikely(phba->link_state < LPFC_LINK_DOWN))
+ /* Check device state for handling interrupt */
+ if (lpfc_intr_state_check(phba))
return IRQ_NONE;
/* Need to read HA REG for FCP ring and other ring events */
ha_copy = readl(phba->HAregaddr);
@@ -5530,7 +7995,7 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
* any interrupt.
*/
if (unlikely(phba->hba_flag & DEFER_ERATT)) {
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
return IRQ_NONE;
}
writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
@@ -5566,26 +8031,27 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
}
}
return IRQ_HANDLED;
-} /* lpfc_fp_intr_handler */
+} /* lpfc_sli_fp_intr_handler */
/**
- * lpfc_intr_handler - The device-level interrupt handler of lpfc driver
+ * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
* @irq: Interrupt number.
* @dev_id: The device context pointer.
*
- * This function is the device-level interrupt handler called from the PCI
- * layer when either MSI or Pin-IRQ interrupt mode is enabled and there is
- * an event in the HBA which requires driver attention. This function
- * invokes the slow-path interrupt attention handling function and fast-path
- * interrupt attention handling function in turn to process the relevant
- * HBA attention events. This function is called without any lock held. It
- * gets the hbalock to access and update SLI data structures.
+ * This function is the HBA device-level interrupt handler to device with
+ * SLI-3 interface spec, called from the PCI layer when either MSI or
+ * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
+ * requires driver attention. This function invokes the slow-path interrupt
+ * attention handling function and fast-path interrupt attention handling
+ * function in turn to process the relevant HBA attention events. This
+ * function is called without any lock held. It gets the hbalock to access
+ * and update SLI data structures.
*
* This function returns IRQ_HANDLED when interrupt is handled, else it
* returns IRQ_NONE.
**/
irqreturn_t
-lpfc_intr_handler(int irq, void *dev_id)
+lpfc_sli_intr_handler(int irq, void *dev_id)
{
struct lpfc_hba *phba;
irqreturn_t sp_irq_rc, fp_irq_rc;
@@ -5600,15 +8066,8 @@ lpfc_intr_handler(int irq, void *dev_id)
if (unlikely(!phba))
return IRQ_NONE;
- /* If the pci channel is offline, ignore all the interrupts. */
- if (unlikely(pci_channel_offline(phba->pcidev)))
- return IRQ_NONE;
-
- /* Update device level interrupt statistics */
- phba->sli.slistat.sli_intr++;
-
- /* Ignore all interrupts during initialization. */
- if (unlikely(phba->link_state < LPFC_LINK_DOWN))
+ /* Check device state for handling interrupt */
+ if (lpfc_intr_state_check(phba))
return IRQ_NONE;
spin_lock(&phba->hbalock);
@@ -5650,7 +8109,7 @@ lpfc_intr_handler(int irq, void *dev_id)
status2 >>= (4*LPFC_ELS_RING);
if (status1 || (status2 & HA_RXMASK))
- sp_irq_rc = lpfc_sp_intr_handler(irq, dev_id);
+ sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
else
sp_irq_rc = IRQ_NONE;
@@ -5670,10 +8129,3322 @@ lpfc_intr_handler(int irq, void *dev_id)
status2 = 0;
if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
- fp_irq_rc = lpfc_fp_intr_handler(irq, dev_id);
+ fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
else
fp_irq_rc = IRQ_NONE;
/* Return device-level interrupt handling status */
return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
-} /* lpfc_intr_handler */
+} /* lpfc_sli_intr_handler */
+
+/**
+ * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked by the worker thread to process all the pending
+ * SLI4 FCP abort XRI events.
+ **/
+void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
+{
+ struct lpfc_cq_event *cq_event;
+
+ /* First, declare the fcp xri abort event has been handled */
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
+ spin_unlock_irq(&phba->hbalock);
+ /* Now, handle all the fcp xri abort events */
+ while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
+ /* Get the first event from the head of the event queue */
+ spin_lock_irq(&phba->hbalock);
+ list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
+ cq_event, struct lpfc_cq_event, list);
+ spin_unlock_irq(&phba->hbalock);
+ /* Notify aborted XRI for FCP work queue */
+ lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
+ /* Free the event processed back to the free pool */
+ lpfc_sli4_cq_event_release(phba, cq_event);
+ }
+}
+
+/**
+ * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked by the worker thread to process all the pending
+ * SLI4 els abort xri events.
+ **/
+void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
+{
+ struct lpfc_cq_event *cq_event;
+
+ /* First, declare the els xri abort event has been handled */
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
+ spin_unlock_irq(&phba->hbalock);
+ /* Now, handle all the els xri abort events */
+ while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
+ /* Get the first event from the head of the event queue */
+ spin_lock_irq(&phba->hbalock);
+ list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
+ cq_event, struct lpfc_cq_event, list);
+ spin_unlock_irq(&phba->hbalock);
+ /* Notify aborted XRI for ELS work queue */
+ lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
+ /* Free the event processed back to the free pool */
+ lpfc_sli4_cq_event_release(phba, cq_event);
+ }
+}
+
+static void
+lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
+ struct lpfc_iocbq *pIocbOut,
+ struct lpfc_wcqe_complete *wcqe)
+{
+ size_t offset = offsetof(struct lpfc_iocbq, iocb);
+
+ memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
+ sizeof(struct lpfc_iocbq) - offset);
+ memset(&pIocbIn->sli4_info, 0,
+ sizeof(struct lpfc_sli4_rspiocb_info));
+ /* Map WCQE parameters into irspiocb parameters */
+ pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe);
+ if (pIocbOut->iocb_flag & LPFC_IO_FCP)
+ if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
+ pIocbIn->iocb.un.fcpi.fcpi_parm =
+ pIocbOut->iocb.un.fcpi.fcpi_parm -
+ wcqe->total_data_placed;
+ else
+ pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
+ else
+ pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
+ /* Load in additional WCQE parameters */
+ pIocbIn->sli4_info.hw_status = bf_get(lpfc_wcqe_c_hw_status, wcqe);
+ pIocbIn->sli4_info.bfield = 0;
+ if (bf_get(lpfc_wcqe_c_xb, wcqe))
+ pIocbIn->sli4_info.bfield |= LPFC_XB;
+ if (bf_get(lpfc_wcqe_c_pv, wcqe)) {
+ pIocbIn->sli4_info.bfield |= LPFC_PV;
+ pIocbIn->sli4_info.priority =
+ bf_get(lpfc_wcqe_c_priority, wcqe);
+ }
+}
+
+/**
+ * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
+ * @phba: Pointer to HBA context object.
+ * @cqe: Pointer to mailbox completion queue entry.
+ *
+ * This routine process a mailbox completion queue entry with asynchrous
+ * event.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
+{
+ struct lpfc_cq_event *cq_event;
+ unsigned long iflags;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "0392 Async Event: word0:x%x, word1:x%x, "
+ "word2:x%x, word3:x%x\n", mcqe->word0,
+ mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
+
+ /* Allocate a new internal CQ_EVENT entry */
+ cq_event = lpfc_sli4_cq_event_alloc(phba);
+ if (!cq_event) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0394 Failed to allocate CQ_EVENT entry\n");
+ return false;
+ }
+
+ /* Move the CQE into an asynchronous event entry */
+ memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
+ /* Set the async event flag */
+ phba->hba_flag |= ASYNC_EVENT;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ return true;
+}
+
+/**
+ * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
+ * @phba: Pointer to HBA context object.
+ * @cqe: Pointer to mailbox completion queue entry.
+ *
+ * This routine process a mailbox completion queue entry with mailbox
+ * completion event.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
+{
+ uint32_t mcqe_status;
+ MAILBOX_t *mbox, *pmbox;
+ struct lpfc_mqe *mqe;
+ struct lpfc_vport *vport;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_dmabuf *mp;
+ unsigned long iflags;
+ LPFC_MBOXQ_t *pmb;
+ bool workposted = false;
+ int rc;
+
+ /* If not a mailbox complete MCQE, out by checking mailbox consume */
+ if (!bf_get(lpfc_trailer_completed, mcqe))
+ goto out_no_mqe_complete;
+
+ /* Get the reference to the active mbox command */
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ pmb = phba->sli.mbox_active;
+ if (unlikely(!pmb)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "1832 No pending MBOX command to handle\n");
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ goto out_no_mqe_complete;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ mqe = &pmb->u.mqe;
+ pmbox = (MAILBOX_t *)&pmb->u.mqe;
+ mbox = phba->mbox;
+ vport = pmb->vport;
+
+ /* Reset heartbeat timer */
+ phba->last_completion_time = jiffies;
+ del_timer(&phba->sli.mbox_tmo);
+
+ /* Move mbox data to caller's mailbox region, do endian swapping */
+ if (pmb->mbox_cmpl && mbox)
+ lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
+ /* Set the mailbox status with SLI4 range 0x4000 */
+ mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
+ if (mcqe_status != MB_CQE_STATUS_SUCCESS)
+ bf_set(lpfc_mqe_status, mqe,
+ (LPFC_MBX_ERROR_RANGE | mcqe_status));
+
+ if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
+ pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
+ "MBOX dflt rpi: status:x%x rpi:x%x",
+ mcqe_status,
+ pmbox->un.varWords[0], 0);
+ if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
+ mp = (struct lpfc_dmabuf *)(pmb->context1);
+ ndlp = (struct lpfc_nodelist *)pmb->context2;
+ /* Reg_LOGIN of dflt RPI was successful. Now lets get
+ * RID of the PPI using the same mbox buffer.
+ */
+ lpfc_unreg_login(phba, vport->vpi,
+ pmbox->un.varWords[0], pmb);
+ pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
+ pmb->context1 = mp;
+ pmb->context2 = ndlp;
+ pmb->vport = vport;
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+ if (rc != MBX_BUSY)
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
+ LOG_SLI, "0385 rc should "
+ "have been MBX_BUSY\n");
+ if (rc != MBX_NOT_FINISHED)
+ goto send_current_mbox;
+ }
+ }
+ spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
+ phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
+ spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
+
+ /* There is mailbox completion work to do */
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ __lpfc_mbox_cmpl_put(phba, pmb);
+ phba->work_ha |= HA_MBATT;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ workposted = true;
+
+send_current_mbox:
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ /* Release the mailbox command posting token */
+ phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ /* Setting active mailbox pointer need to be in sync to flag clear */
+ phba->sli.mbox_active = NULL;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ /* Wake up worker thread to post the next pending mailbox command */
+ lpfc_worker_wake_up(phba);
+out_no_mqe_complete:
+ if (bf_get(lpfc_trailer_consumed, mcqe))
+ lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
+ return workposted;
+}
+
+/**
+ * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
+ * @phba: Pointer to HBA context object.
+ * @cqe: Pointer to mailbox completion queue entry.
+ *
+ * This routine process a mailbox completion queue entry, it invokes the
+ * proper mailbox complete handling or asynchrous event handling routine
+ * according to the MCQE's async bit.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
+{
+ struct lpfc_mcqe mcqe;
+ bool workposted;
+
+ /* Copy the mailbox MCQE and convert endian order as needed */
+ lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
+
+ /* Invoke the proper event handling routine */
+ if (!bf_get(lpfc_trailer_async, &mcqe))
+ workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
+ else
+ workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
+ return workposted;
+}
+
+/**
+ * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
+ * @phba: Pointer to HBA context object.
+ * @wcqe: Pointer to work-queue completion queue entry.
+ *
+ * This routine handles an ELS work-queue completion event.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba,
+ struct lpfc_wcqe_complete *wcqe)
+{
+ struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+ struct lpfc_iocbq *cmdiocbq;
+ struct lpfc_iocbq *irspiocbq;
+ unsigned long iflags;
+ bool workposted = false;
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ pring->stats.iocb_event++;
+ /* Look up the ELS command IOCB and create pseudo response IOCB */
+ cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
+ bf_get(lpfc_wcqe_c_request_tag, wcqe));
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ if (unlikely(!cmdiocbq)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "0386 ELS complete with no corresponding "
+ "cmdiocb: iotag (%d)\n",
+ bf_get(lpfc_wcqe_c_request_tag, wcqe));
+ return workposted;
+ }
+
+ /* Fake the irspiocbq and copy necessary response information */
+ irspiocbq = lpfc_sli_get_iocbq(phba);
+ if (!irspiocbq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0387 Failed to allocate an iocbq\n");
+ return workposted;
+ }
+ lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe);
+
+ /* Add the irspiocb to the response IOCB work list */
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ list_add_tail(&irspiocbq->list, &phba->sli4_hba.sp_rspiocb_work_queue);
+ /* Indicate ELS ring attention */
+ phba->work_ha |= (HA_R0ATT << (4*LPFC_ELS_RING));
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ workposted = true;
+
+ return workposted;
+}
+
+/**
+ * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
+ * @phba: Pointer to HBA context object.
+ * @wcqe: Pointer to work-queue completion queue entry.
+ *
+ * This routine handles slow-path WQ entry comsumed event by invoking the
+ * proper WQ release routine to the slow-path WQ.
+ **/
+static void
+lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
+ struct lpfc_wcqe_release *wcqe)
+{
+ /* Check for the slow-path ELS work queue */
+ if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
+ lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
+ bf_get(lpfc_wcqe_r_wqe_index, wcqe));
+ else
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "2579 Slow-path wqe consume event carries "
+ "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
+ bf_get(lpfc_wcqe_r_wqe_index, wcqe),
+ phba->sli4_hba.els_wq->queue_id);
+}
+
+/**
+ * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
+ * @phba: Pointer to HBA context object.
+ * @cq: Pointer to a WQ completion queue.
+ * @wcqe: Pointer to work-queue completion queue entry.
+ *
+ * This routine handles an XRI abort event.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
+ struct lpfc_queue *cq,
+ struct sli4_wcqe_xri_aborted *wcqe)
+{
+ bool workposted = false;
+ struct lpfc_cq_event *cq_event;
+ unsigned long iflags;
+
+ /* Allocate a new internal CQ_EVENT entry */
+ cq_event = lpfc_sli4_cq_event_alloc(phba);
+ if (!cq_event) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0602 Failed to allocate CQ_EVENT entry\n");
+ return false;
+ }
+
+ /* Move the CQE into the proper xri abort event list */
+ memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
+ switch (cq->subtype) {
+ case LPFC_FCP:
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ list_add_tail(&cq_event->list,
+ &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
+ /* Set the fcp xri abort event flag */
+ phba->hba_flag |= FCP_XRI_ABORT_EVENT;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ workposted = true;
+ break;
+ case LPFC_ELS:
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ list_add_tail(&cq_event->list,
+ &phba->sli4_hba.sp_els_xri_aborted_work_queue);
+ /* Set the els xri abort event flag */
+ phba->hba_flag |= ELS_XRI_ABORT_EVENT;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ workposted = true;
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0603 Invalid work queue CQE subtype (x%x)\n",
+ cq->subtype);
+ workposted = false;
+ break;
+ }
+ return workposted;
+}
+
+/**
+ * lpfc_sli4_sp_handle_wcqe - Process a work-queue completion queue entry
+ * @phba: Pointer to HBA context object.
+ * @cq: Pointer to the completion queue.
+ * @wcqe: Pointer to a completion queue entry.
+ *
+ * This routine process a slow-path work-queue completion queue entry.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
+ struct lpfc_cqe *cqe)
+{
+ struct lpfc_wcqe_complete wcqe;
+ bool workposted = false;
+
+ /* Copy the work queue CQE and convert endian order if needed */
+ lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
+
+ /* Check and process for different type of WCQE and dispatch */
+ switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
+ case CQE_CODE_COMPL_WQE:
+ /* Process the WQ complete event */
+ workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
+ (struct lpfc_wcqe_complete *)&wcqe);
+ break;
+ case CQE_CODE_RELEASE_WQE:
+ /* Process the WQ release event */
+ lpfc_sli4_sp_handle_rel_wcqe(phba,
+ (struct lpfc_wcqe_release *)&wcqe);
+ break;
+ case CQE_CODE_XRI_ABORTED:
+ /* Process the WQ XRI abort event */
+ workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
+ (struct sli4_wcqe_xri_aborted *)&wcqe);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0388 Not a valid WCQE code: x%x\n",
+ bf_get(lpfc_wcqe_c_code, &wcqe));
+ break;
+ }
+ return workposted;
+}
+
+/**
+ * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
+ * @phba: Pointer to HBA context object.
+ * @rcqe: Pointer to receive-queue completion queue entry.
+ *
+ * This routine process a receive-queue completion queue entry.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
+{
+ struct lpfc_rcqe rcqe;
+ bool workposted = false;
+ struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
+ struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
+ struct hbq_dmabuf *dma_buf;
+ uint32_t status;
+ unsigned long iflags;
+
+ /* Copy the receive queue CQE and convert endian order if needed */
+ lpfc_sli_pcimem_bcopy(cqe, &rcqe, sizeof(struct lpfc_rcqe));
+ lpfc_sli4_rq_release(hrq, drq);
+ if (bf_get(lpfc_rcqe_code, &rcqe) != CQE_CODE_RECEIVE)
+ goto out;
+ if (bf_get(lpfc_rcqe_rq_id, &rcqe) != hrq->queue_id)
+ goto out;
+
+ status = bf_get(lpfc_rcqe_status, &rcqe);
+ switch (status) {
+ case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2537 Receive Frame Truncated!!\n");
+ case FC_STATUS_RQ_SUCCESS:
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
+ if (!dma_buf) {
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ goto out;
+ }
+ memcpy(&dma_buf->rcqe, &rcqe, sizeof(rcqe));
+ /* save off the frame for the word thread to process */
+ list_add_tail(&dma_buf->dbuf.list, &phba->rb_pend_list);
+ /* Frame received */
+ phba->hba_flag |= HBA_RECEIVE_BUFFER;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ workposted = true;
+ break;
+ case FC_STATUS_INSUFF_BUF_NEED_BUF:
+ case FC_STATUS_INSUFF_BUF_FRM_DISC:
+ /* Post more buffers if possible */
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ workposted = true;
+ break;
+ }
+out:
+ return workposted;
+
+}
+
+/**
+ * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
+ * @phba: Pointer to HBA context object.
+ * @eqe: Pointer to fast-path event queue entry.
+ *
+ * This routine process a event queue entry from the slow-path event queue.
+ * It will check the MajorCode and MinorCode to determine this is for a
+ * completion event on a completion queue, if not, an error shall be logged
+ * and just return. Otherwise, it will get to the corresponding completion
+ * queue and process all the entries on that completion queue, rearm the
+ * completion queue, and then return.
+ *
+ **/
+static void
+lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
+{
+ struct lpfc_queue *cq = NULL, *childq, *speq;
+ struct lpfc_cqe *cqe;
+ bool workposted = false;
+ int ecount = 0;
+ uint16_t cqid;
+
+ if (bf_get(lpfc_eqe_major_code, eqe) != 0 ||
+ bf_get(lpfc_eqe_minor_code, eqe) != 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0359 Not a valid slow-path completion "
+ "event: majorcode=x%x, minorcode=x%x\n",
+ bf_get(lpfc_eqe_major_code, eqe),
+ bf_get(lpfc_eqe_minor_code, eqe));
+ return;
+ }
+
+ /* Get the reference to the corresponding CQ */
+ cqid = bf_get(lpfc_eqe_resource_id, eqe);
+
+ /* Search for completion queue pointer matching this cqid */
+ speq = phba->sli4_hba.sp_eq;
+ list_for_each_entry(childq, &speq->child_list, list) {
+ if (childq->queue_id == cqid) {
+ cq = childq;
+ break;
+ }
+ }
+ if (unlikely(!cq)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0365 Slow-path CQ identifier (%d) does "
+ "not exist\n", cqid);
+ return;
+ }
+
+ /* Process all the entries to the CQ */
+ switch (cq->type) {
+ case LPFC_MCQ:
+ while ((cqe = lpfc_sli4_cq_get(cq))) {
+ workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
+ if (!(++ecount % LPFC_GET_QE_REL_INT))
+ lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+ }
+ break;
+ case LPFC_WCQ:
+ while ((cqe = lpfc_sli4_cq_get(cq))) {
+ workposted |= lpfc_sli4_sp_handle_wcqe(phba, cq, cqe);
+ if (!(++ecount % LPFC_GET_QE_REL_INT))
+ lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+ }
+ break;
+ case LPFC_RCQ:
+ while ((cqe = lpfc_sli4_cq_get(cq))) {
+ workposted |= lpfc_sli4_sp_handle_rcqe(phba, cqe);
+ if (!(++ecount % LPFC_GET_QE_REL_INT))
+ lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+ }
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0370 Invalid completion queue type (%d)\n",
+ cq->type);
+ return;
+ }
+
+ /* Catch the no cq entry condition, log an error */
+ if (unlikely(ecount == 0))
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0371 No entry from the CQ: identifier "
+ "(x%x), type (%d)\n", cq->queue_id, cq->type);
+
+ /* In any case, flash and re-arm the RCQ */
+ lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
+
+ /* wake up worker thread if there are works to be done */
+ if (workposted)
+ lpfc_worker_wake_up(phba);
+}
+
+/**
+ * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
+ * @eqe: Pointer to fast-path completion queue entry.
+ *
+ * This routine process a fast-path work queue completion entry from fast-path
+ * event queue for FCP command response completion.
+ **/
+static void
+lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
+ struct lpfc_wcqe_complete *wcqe)
+{
+ struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING];
+ struct lpfc_iocbq *cmdiocbq;
+ struct lpfc_iocbq irspiocbq;
+ unsigned long iflags;
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ pring->stats.iocb_event++;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ /* Check for response status */
+ if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
+ /* If resource errors reported from HBA, reduce queue
+ * depth of the SCSI device.
+ */
+ if ((bf_get(lpfc_wcqe_c_status, wcqe) ==
+ IOSTAT_LOCAL_REJECT) &&
+ (wcqe->parameter == IOERR_NO_RESOURCES)) {
+ phba->lpfc_rampdown_queue_depth(phba);
+ }
+ /* Log the error status */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "0373 FCP complete error: status=x%x, "
+ "hw_status=x%x, total_data_specified=%d, "
+ "parameter=x%x, word3=x%x\n",
+ bf_get(lpfc_wcqe_c_status, wcqe),
+ bf_get(lpfc_wcqe_c_hw_status, wcqe),
+ wcqe->total_data_placed, wcqe->parameter,
+ wcqe->word3);
+ }
+
+ /* Look up the FCP command IOCB and create pseudo response IOCB */
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
+ bf_get(lpfc_wcqe_c_request_tag, wcqe));
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ if (unlikely(!cmdiocbq)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "0374 FCP complete with no corresponding "
+ "cmdiocb: iotag (%d)\n",
+ bf_get(lpfc_wcqe_c_request_tag, wcqe));
+ return;
+ }
+ if (unlikely(!cmdiocbq->iocb_cmpl)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "0375 FCP cmdiocb not callback function "
+ "iotag: (%d)\n",
+ bf_get(lpfc_wcqe_c_request_tag, wcqe));
+ return;
+ }
+
+ /* Fake the irspiocb and copy necessary response information */
+ lpfc_sli4_iocb_param_transfer(&irspiocbq, cmdiocbq, wcqe);
+
+ /* Pass the cmd_iocb and the rsp state to the upper layer */
+ (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
+}
+
+/**
+ * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
+ * @phba: Pointer to HBA context object.
+ * @cq: Pointer to completion queue.
+ * @wcqe: Pointer to work-queue completion queue entry.
+ *
+ * This routine handles an fast-path WQ entry comsumed event by invoking the
+ * proper WQ release routine to the slow-path WQ.
+ **/
+static void
+lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
+ struct lpfc_wcqe_release *wcqe)
+{
+ struct lpfc_queue *childwq;
+ bool wqid_matched = false;
+ uint16_t fcp_wqid;
+
+ /* Check for fast-path FCP work queue release */
+ fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
+ list_for_each_entry(childwq, &cq->child_list, list) {
+ if (childwq->queue_id == fcp_wqid) {
+ lpfc_sli4_wq_release(childwq,
+ bf_get(lpfc_wcqe_r_wqe_index, wcqe));
+ wqid_matched = true;
+ break;
+ }
+ }
+ /* Report warning log message if no match found */
+ if (wqid_matched != true)
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "2580 Fast-path wqe consume event carries "
+ "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid);
+}
+
+/**
+ * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry
+ * @cq: Pointer to the completion queue.
+ * @eqe: Pointer to fast-path completion queue entry.
+ *
+ * This routine process a fast-path work queue completion entry from fast-path
+ * event queue for FCP command response completion.
+ **/
+static int
+lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
+ struct lpfc_cqe *cqe)
+{
+ struct lpfc_wcqe_release wcqe;
+ bool workposted = false;
+
+ /* Copy the work queue CQE and convert endian order if needed */
+ lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
+
+ /* Check and process for different type of WCQE and dispatch */
+ switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
+ case CQE_CODE_COMPL_WQE:
+ /* Process the WQ complete event */
+ lpfc_sli4_fp_handle_fcp_wcqe(phba,
+ (struct lpfc_wcqe_complete *)&wcqe);
+ break;
+ case CQE_CODE_RELEASE_WQE:
+ /* Process the WQ release event */
+ lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
+ (struct lpfc_wcqe_release *)&wcqe);
+ break;
+ case CQE_CODE_XRI_ABORTED:
+ /* Process the WQ XRI abort event */
+ workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
+ (struct sli4_wcqe_xri_aborted *)&wcqe);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0144 Not a valid WCQE code: x%x\n",
+ bf_get(lpfc_wcqe_c_code, &wcqe));
+ break;
+ }
+ return workposted;
+}
+
+/**
+ * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry
+ * @phba: Pointer to HBA context object.
+ * @eqe: Pointer to fast-path event queue entry.
+ *
+ * This routine process a event queue entry from the fast-path event queue.
+ * It will check the MajorCode and MinorCode to determine this is for a
+ * completion event on a completion queue, if not, an error shall be logged
+ * and just return. Otherwise, it will get to the corresponding completion
+ * queue and process all the entries on the completion queue, rearm the
+ * completion queue, and then return.
+ **/
+static void
+lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
+ uint32_t fcp_cqidx)
+{
+ struct lpfc_queue *cq;
+ struct lpfc_cqe *cqe;
+ bool workposted = false;
+ uint16_t cqid;
+ int ecount = 0;
+
+ if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0) ||
+ unlikely(bf_get(lpfc_eqe_minor_code, eqe) != 0)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0366 Not a valid fast-path completion "
+ "event: majorcode=x%x, minorcode=x%x\n",
+ bf_get(lpfc_eqe_major_code, eqe),
+ bf_get(lpfc_eqe_minor_code, eqe));
+ return;
+ }
+
+ cq = phba->sli4_hba.fcp_cq[fcp_cqidx];
+ if (unlikely(!cq)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0367 Fast-path completion queue does not "
+ "exist\n");
+ return;
+ }
+
+ /* Get the reference to the corresponding CQ */
+ cqid = bf_get(lpfc_eqe_resource_id, eqe);
+ if (unlikely(cqid != cq->queue_id)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0368 Miss-matched fast-path completion "
+ "queue identifier: eqcqid=%d, fcpcqid=%d\n",
+ cqid, cq->queue_id);
+ return;
+ }
+
+ /* Process all the entries to the CQ */
+ while ((cqe = lpfc_sli4_cq_get(cq))) {
+ workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
+ if (!(++ecount % LPFC_GET_QE_REL_INT))
+ lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+ }
+
+ /* Catch the no cq entry condition */
+ if (unlikely(ecount == 0))
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0369 No entry from fast-path completion "
+ "queue fcpcqid=%d\n", cq->queue_id);
+
+ /* In any case, flash and re-arm the CQ */
+ lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
+
+ /* wake up worker thread if there are works to be done */
+ if (workposted)
+ lpfc_worker_wake_up(phba);
+}
+
+static void
+lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
+{
+ struct lpfc_eqe *eqe;
+
+ /* walk all the EQ entries and drop on the floor */
+ while ((eqe = lpfc_sli4_eq_get(eq)))
+ ;
+
+ /* Clear and re-arm the EQ */
+ lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
+}
+
+/**
+ * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is directly called from the PCI layer as an interrupt
+ * service routine when device with SLI-4 interface spec is enabled with
+ * MSI-X multi-message interrupt mode and there are slow-path events in
+ * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
+ * interrupt mode, this function is called as part of the device-level
+ * interrupt handler. When the PCI slot is in error recovery or the HBA is
+ * undergoing initialization, the interrupt handler will not process the
+ * interrupt. The link attention and ELS ring attention events are handled
+ * by the worker thread. The interrupt handler signals the worker thread
+ * and returns for these events. This function is called without any lock
+ * held. It gets the hbalock to access and update SLI data structures.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
+{
+ struct lpfc_hba *phba;
+ struct lpfc_queue *speq;
+ struct lpfc_eqe *eqe;
+ unsigned long iflag;
+ int ecount = 0;
+
+ /*
+ * Get the driver's phba structure from the dev_id
+ */
+ phba = (struct lpfc_hba *)dev_id;
+
+ if (unlikely(!phba))
+ return IRQ_NONE;
+
+ /* Get to the EQ struct associated with this vector */
+ speq = phba->sli4_hba.sp_eq;
+
+ /* Check device state for handling interrupt */
+ if (unlikely(lpfc_intr_state_check(phba))) {
+ /* Check again for link_state with lock held */
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ if (phba->link_state < LPFC_LINK_DOWN)
+ /* Flush, clear interrupt, and rearm the EQ */
+ lpfc_sli4_eq_flush(phba, speq);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return IRQ_NONE;
+ }
+
+ /*
+ * Process all the event on FCP slow-path EQ
+ */
+ while ((eqe = lpfc_sli4_eq_get(speq))) {
+ lpfc_sli4_sp_handle_eqe(phba, eqe);
+ if (!(++ecount % LPFC_GET_QE_REL_INT))
+ lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM);
+ }
+
+ /* Always clear and re-arm the slow-path EQ */
+ lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM);
+
+ /* Catch the no cq entry condition */
+ if (unlikely(ecount == 0)) {
+ if (phba->intr_type == MSIX)
+ /* MSI-X treated interrupt served as no EQ share INT */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "0357 MSI-X interrupt with no EQE\n");
+ else
+ /* Non MSI-X treated on interrupt as EQ share INT */
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+} /* lpfc_sli4_sp_intr_handler */
+
+/**
+ * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is directly called from the PCI layer as an interrupt
+ * service routine when device with SLI-4 interface spec is enabled with
+ * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
+ * ring event in the HBA. However, when the device is enabled with either
+ * MSI or Pin-IRQ interrupt mode, this function is called as part of the
+ * device-level interrupt handler. When the PCI slot is in error recovery
+ * or the HBA is undergoing initialization, the interrupt handler will not
+ * process the interrupt. The SCSI FCP fast-path ring event are handled in
+ * the intrrupt context. This function is called without any lock held.
+ * It gets the hbalock to access and update SLI data structures. Note that,
+ * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
+ * equal to that of FCP CQ index.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
+{
+ struct lpfc_hba *phba;
+ struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
+ struct lpfc_queue *fpeq;
+ struct lpfc_eqe *eqe;
+ unsigned long iflag;
+ int ecount = 0;
+ uint32_t fcp_eqidx;
+
+ /* Get the driver's phba structure from the dev_id */
+ fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
+ phba = fcp_eq_hdl->phba;
+ fcp_eqidx = fcp_eq_hdl->idx;
+
+ if (unlikely(!phba))
+ return IRQ_NONE;
+
+ /* Get to the EQ struct associated with this vector */
+ fpeq = phba->sli4_hba.fp_eq[fcp_eqidx];
+
+ /* Check device state for handling interrupt */
+ if (unlikely(lpfc_intr_state_check(phba))) {
+ /* Check again for link_state with lock held */
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ if (phba->link_state < LPFC_LINK_DOWN)
+ /* Flush, clear interrupt, and rearm the EQ */
+ lpfc_sli4_eq_flush(phba, fpeq);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return IRQ_NONE;
+ }
+
+ /*
+ * Process all the event on FCP fast-path EQ
+ */
+ while ((eqe = lpfc_sli4_eq_get(fpeq))) {
+ lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx);
+ if (!(++ecount % LPFC_GET_QE_REL_INT))
+ lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
+ }
+
+ /* Always clear and re-arm the fast-path EQ */
+ lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
+
+ if (unlikely(ecount == 0)) {
+ if (phba->intr_type == MSIX)
+ /* MSI-X treated interrupt served as no EQ share INT */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "0358 MSI-X interrupt with no EQE\n");
+ else
+ /* Non MSI-X treated on interrupt as EQ share INT */
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+} /* lpfc_sli4_fp_intr_handler */
+
+/**
+ * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is the device-level interrupt handler to device with SLI-4
+ * interface spec, called from the PCI layer when either MSI or Pin-IRQ
+ * interrupt mode is enabled and there is an event in the HBA which requires
+ * driver attention. This function invokes the slow-path interrupt attention
+ * handling function and fast-path interrupt attention handling function in
+ * turn to process the relevant HBA attention events. This function is called
+ * without any lock held. It gets the hbalock to access and update SLI data
+ * structures.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled, else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli4_intr_handler(int irq, void *dev_id)
+{
+ struct lpfc_hba *phba;
+ irqreturn_t sp_irq_rc, fp_irq_rc;
+ bool fp_handled = false;
+ uint32_t fcp_eqidx;
+
+ /* Get the driver's phba structure from the dev_id */
+ phba = (struct lpfc_hba *)dev_id;
+
+ if (unlikely(!phba))
+ return IRQ_NONE;
+
+ /*
+ * Invokes slow-path host attention interrupt handling as appropriate.
+ */
+ sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id);
+
+ /*
+ * Invoke fast-path host attention interrupt handling as appropriate.
+ */
+ for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
+ fp_irq_rc = lpfc_sli4_fp_intr_handler(irq,
+ &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
+ if (fp_irq_rc == IRQ_HANDLED)
+ fp_handled |= true;
+ }
+
+ return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc;
+} /* lpfc_sli4_intr_handler */
+
+/**
+ * lpfc_sli4_queue_free - free a queue structure and associated memory
+ * @queue: The queue structure to free.
+ *
+ * This function frees a queue structure and the DMAable memeory used for
+ * the host resident queue. This function must be called after destroying the
+ * queue on the HBA.
+ **/
+void
+lpfc_sli4_queue_free(struct lpfc_queue *queue)
+{
+ struct lpfc_dmabuf *dmabuf;
+
+ if (!queue)
+ return;
+
+ while (!list_empty(&queue->page_list)) {
+ list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
+ list);
+ dma_free_coherent(&queue->phba->pcidev->dev, PAGE_SIZE,
+ dmabuf->virt, dmabuf->phys);
+ kfree(dmabuf);
+ }
+ kfree(queue);
+ return;
+}
+
+/**
+ * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
+ * @phba: The HBA that this queue is being created on.
+ * @entry_size: The size of each queue entry for this queue.
+ * @entry count: The number of entries that this queue will handle.
+ *
+ * This function allocates a queue structure and the DMAable memory used for
+ * the host resident queue. This function must be called before creating the
+ * queue on the HBA.
+ **/
+struct lpfc_queue *
+lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
+ uint32_t entry_count)
+{
+ struct lpfc_queue *queue;
+ struct lpfc_dmabuf *dmabuf;
+ int x, total_qe_count;
+ void *dma_pointer;
+
+
+ queue = kzalloc(sizeof(struct lpfc_queue) +
+ (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
+ if (!queue)
+ return NULL;
+ queue->page_count = (PAGE_ALIGN(entry_size * entry_count))/PAGE_SIZE;
+ INIT_LIST_HEAD(&queue->list);
+ INIT_LIST_HEAD(&queue->page_list);
+ INIT_LIST_HEAD(&queue->child_list);
+ for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
+ dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!dmabuf)
+ goto out_fail;
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+ PAGE_SIZE, &dmabuf->phys,
+ GFP_KERNEL);
+ if (!dmabuf->virt) {
+ kfree(dmabuf);
+ goto out_fail;
+ }
+ dmabuf->buffer_tag = x;
+ list_add_tail(&dmabuf->list, &queue->page_list);
+ /* initialize queue's entry array */
+ dma_pointer = dmabuf->virt;
+ for (; total_qe_count < entry_count &&
+ dma_pointer < (PAGE_SIZE + dmabuf->virt);
+ total_qe_count++, dma_pointer += entry_size) {
+ queue->qe[total_qe_count].address = dma_pointer;
+ }
+ }
+ queue->entry_size = entry_size;
+ queue->entry_count = entry_count;
+ queue->phba = phba;
+
+ return queue;
+out_fail:
+ lpfc_sli4_queue_free(queue);
+ return NULL;
+}
+
+/**
+ * lpfc_eq_create - Create an Event Queue on the HBA
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @eq: The queue structure to use to create the event queue.
+ * @imax: The maximum interrupt per second limit.
+ *
+ * This function creates an event queue, as detailed in @eq, on a port,
+ * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @eq struct
+ * is used to get the entry count and entry size that are necessary to
+ * determine the number of pages to allocate and use for this queue. This
+ * function will send the EQ_CREATE mailbox command to the HBA to setup the
+ * event queue. This function is asynchronous and will wait for the mailbox
+ * command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return ENOMEM. If the queue create mailbox command
+ * fails this function will return ENXIO.
+ **/
+uint32_t
+lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
+{
+ struct lpfc_mbx_eq_create *eq_create;
+ LPFC_MBOXQ_t *mbox;
+ int rc, length, status = 0;
+ struct lpfc_dmabuf *dmabuf;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+ uint16_t dmult;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ length = (sizeof(struct lpfc_mbx_eq_create) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_EQ_CREATE,
+ length, LPFC_SLI4_MBX_EMBED);
+ eq_create = &mbox->u.mqe.un.eq_create;
+ bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
+ eq->page_count);
+ bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
+ LPFC_EQE_SIZE);
+ bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
+ /* Calculate delay multiper from maximum interrupt per second */
+ dmult = LPFC_DMULT_CONST/imax - 1;
+ bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
+ dmult);
+ switch (eq->entry_count) {
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0360 Unsupported EQ count. (%d)\n",
+ eq->entry_count);
+ if (eq->entry_count < 256)
+ return -EINVAL;
+ /* otherwise default to smallest count (drop through) */
+ case 256:
+ bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
+ LPFC_EQ_CNT_256);
+ break;
+ case 512:
+ bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
+ LPFC_EQ_CNT_512);
+ break;
+ case 1024:
+ bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
+ LPFC_EQ_CNT_1024);
+ break;
+ case 2048:
+ bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
+ LPFC_EQ_CNT_2048);
+ break;
+ case 4096:
+ bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
+ LPFC_EQ_CNT_4096);
+ break;
+ }
+ list_for_each_entry(dmabuf, &eq->page_list, list) {
+ eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+ putPaddrLow(dmabuf->phys);
+ eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+ putPaddrHigh(dmabuf->phys);
+ }
+ mbox->vport = phba->pport;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->context1 = NULL;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2500 EQ_CREATE mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ status = -ENXIO;
+ }
+ eq->type = LPFC_EQ;
+ eq->subtype = LPFC_NONE;
+ eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
+ if (eq->queue_id == 0xFFFF)
+ status = -ENXIO;
+ eq->host_index = 0;
+ eq->hba_index = 0;
+
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return status;
+}
+
+/**
+ * lpfc_cq_create - Create a Completion Queue on the HBA
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @cq: The queue structure to use to create the completion queue.
+ * @eq: The event queue to bind this completion queue to.
+ *
+ * This function creates a completion queue, as detailed in @wq, on a port,
+ * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @cq struct
+ * is used to get the entry count and entry size that are necessary to
+ * determine the number of pages to allocate and use for this queue. The @eq
+ * is used to indicate which event queue to bind this completion queue to. This
+ * function will send the CQ_CREATE mailbox command to the HBA to setup the
+ * completion queue. This function is asynchronous and will wait for the mailbox
+ * command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return ENOMEM. If the queue create mailbox command
+ * fails this function will return ENXIO.
+ **/
+uint32_t
+lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
+ struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
+{
+ struct lpfc_mbx_cq_create *cq_create;
+ struct lpfc_dmabuf *dmabuf;
+ LPFC_MBOXQ_t *mbox;
+ int rc, length, status = 0;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ length = (sizeof(struct lpfc_mbx_cq_create) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_CQ_CREATE,
+ length, LPFC_SLI4_MBX_EMBED);
+ cq_create = &mbox->u.mqe.un.cq_create;
+ bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
+ cq->page_count);
+ bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
+ bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
+ bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, eq->queue_id);
+ switch (cq->entry_count) {
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0361 Unsupported CQ count. (%d)\n",
+ cq->entry_count);
+ if (cq->entry_count < 256)
+ return -EINVAL;
+ /* otherwise default to smallest count (drop through) */
+ case 256:
+ bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
+ LPFC_CQ_CNT_256);
+ break;
+ case 512:
+ bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
+ LPFC_CQ_CNT_512);
+ break;
+ case 1024:
+ bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
+ LPFC_CQ_CNT_1024);
+ break;
+ }
+ list_for_each_entry(dmabuf, &cq->page_list, list) {
+ cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+ putPaddrLow(dmabuf->phys);
+ cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+ putPaddrHigh(dmabuf->phys);
+ }
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+
+ /* The IOCTL status is embedded in the mailbox subheader. */
+ shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2501 CQ_CREATE mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ status = -ENXIO;
+ goto out;
+ }
+ cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
+ if (cq->queue_id == 0xFFFF) {
+ status = -ENXIO;
+ goto out;
+ }
+ /* link the cq onto the parent eq child list */
+ list_add_tail(&cq->list, &eq->child_list);
+ /* Set up completion queue's type and subtype */
+ cq->type = type;
+ cq->subtype = subtype;
+ cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
+ cq->host_index = 0;
+ cq->hba_index = 0;
+out:
+
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return status;
+}
+
+/**
+ * lpfc_mq_create - Create a mailbox Queue on the HBA
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @mq: The queue structure to use to create the mailbox queue.
+ *
+ * This function creates a mailbox queue, as detailed in @mq, on a port,
+ * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @cq struct
+ * is used to get the entry count and entry size that are necessary to
+ * determine the number of pages to allocate and use for this queue. This
+ * function will send the MQ_CREATE mailbox command to the HBA to setup the
+ * mailbox queue. This function is asynchronous and will wait for the mailbox
+ * command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return ENOMEM. If the queue create mailbox command
+ * fails this function will return ENXIO.
+ **/
+uint32_t
+lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
+ struct lpfc_queue *cq, uint32_t subtype)
+{
+ struct lpfc_mbx_mq_create *mq_create;
+ struct lpfc_dmabuf *dmabuf;
+ LPFC_MBOXQ_t *mbox;
+ int rc, length, status = 0;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ length = (sizeof(struct lpfc_mbx_mq_create) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_MQ_CREATE,
+ length, LPFC_SLI4_MBX_EMBED);
+ mq_create = &mbox->u.mqe.un.mq_create;
+ bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
+ mq->page_count);
+ bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
+ cq->queue_id);
+ bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
+ switch (mq->entry_count) {
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0362 Unsupported MQ count. (%d)\n",
+ mq->entry_count);
+ if (mq->entry_count < 16)
+ return -EINVAL;
+ /* otherwise default to smallest count (drop through) */
+ case 16:
+ bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ LPFC_MQ_CNT_16);
+ break;
+ case 32:
+ bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ LPFC_MQ_CNT_32);
+ break;
+ case 64:
+ bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ LPFC_MQ_CNT_64);
+ break;
+ case 128:
+ bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ LPFC_MQ_CNT_128);
+ break;
+ }
+ list_for_each_entry(dmabuf, &mq->page_list, list) {
+ mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+ putPaddrLow(dmabuf->phys);
+ mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+ putPaddrHigh(dmabuf->phys);
+ }
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ /* The IOCTL status is embedded in the mailbox subheader. */
+ shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2502 MQ_CREATE mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ status = -ENXIO;
+ goto out;
+ }
+ mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, &mq_create->u.response);
+ if (mq->queue_id == 0xFFFF) {
+ status = -ENXIO;
+ goto out;
+ }
+ mq->type = LPFC_MQ;
+ mq->subtype = subtype;
+ mq->host_index = 0;
+ mq->hba_index = 0;
+
+ /* link the mq onto the parent cq child list */
+ list_add_tail(&mq->list, &cq->child_list);
+out:
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return status;
+}
+
+/**
+ * lpfc_wq_create - Create a Work Queue on the HBA
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @wq: The queue structure to use to create the work queue.
+ * @cq: The completion queue to bind this work queue to.
+ * @subtype: The subtype of the work queue indicating its functionality.
+ *
+ * This function creates a work queue, as detailed in @wq, on a port, described
+ * by @phba by sending a WQ_CREATE mailbox command to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @wq struct
+ * is used to get the entry count and entry size that are necessary to
+ * determine the number of pages to allocate and use for this queue. The @cq
+ * is used to indicate which completion queue to bind this work queue to. This
+ * function will send the WQ_CREATE mailbox command to the HBA to setup the
+ * work queue. This function is asynchronous and will wait for the mailbox
+ * command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return ENOMEM. If the queue create mailbox command
+ * fails this function will return ENXIO.
+ **/
+uint32_t
+lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
+ struct lpfc_queue *cq, uint32_t subtype)
+{
+ struct lpfc_mbx_wq_create *wq_create;
+ struct lpfc_dmabuf *dmabuf;
+ LPFC_MBOXQ_t *mbox;
+ int rc, length, status = 0;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ length = (sizeof(struct lpfc_mbx_wq_create) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
+ length, LPFC_SLI4_MBX_EMBED);
+ wq_create = &mbox->u.mqe.un.wq_create;
+ bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
+ wq->page_count);
+ bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
+ cq->queue_id);
+ list_for_each_entry(dmabuf, &wq->page_list, list) {
+ wq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+ putPaddrLow(dmabuf->phys);
+ wq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+ putPaddrHigh(dmabuf->phys);
+ }
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ /* The IOCTL status is embedded in the mailbox subheader. */
+ shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2503 WQ_CREATE mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ status = -ENXIO;
+ goto out;
+ }
+ wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
+ if (wq->queue_id == 0xFFFF) {
+ status = -ENXIO;
+ goto out;
+ }
+ wq->type = LPFC_WQ;
+ wq->subtype = subtype;
+ wq->host_index = 0;
+ wq->hba_index = 0;
+
+ /* link the wq onto the parent cq child list */
+ list_add_tail(&wq->list, &cq->child_list);
+out:
+ if (rc == MBX_TIMEOUT)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return status;
+}
+
+/**
+ * lpfc_rq_create - Create a Receive Queue on the HBA
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @hrq: The queue structure to use to create the header receive queue.
+ * @drq: The queue structure to use to create the data receive queue.
+ * @cq: The completion queue to bind this work queue to.
+ *
+ * This function creates a receive buffer queue pair , as detailed in @hrq and
+ * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
+ * to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
+ * struct is used to get the entry count that is necessary to determine the
+ * number of pages to use for this queue. The @cq is used to indicate which
+ * completion queue to bind received buffers that are posted to these queues to.
+ * This function will send the RQ_CREATE mailbox command to the HBA to setup the
+ * receive queue pair. This function is asynchronous and will wait for the
+ * mailbox command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return ENOMEM. If the queue create mailbox command
+ * fails this function will return ENXIO.
+ **/
+uint32_t
+lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
+ struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
+{
+ struct lpfc_mbx_rq_create *rq_create;
+ struct lpfc_dmabuf *dmabuf;
+ LPFC_MBOXQ_t *mbox;
+ int rc, length, status = 0;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+
+ if (hrq->entry_count != drq->entry_count)
+ return -EINVAL;
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ length = (sizeof(struct lpfc_mbx_rq_create) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
+ length, LPFC_SLI4_MBX_EMBED);
+ rq_create = &mbox->u.mqe.un.rq_create;
+ switch (hrq->entry_count) {
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2535 Unsupported RQ count. (%d)\n",
+ hrq->entry_count);
+ if (hrq->entry_count < 512)
+ return -EINVAL;
+ /* otherwise default to smallest count (drop through) */
+ case 512:
+ bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
+ LPFC_RQ_RING_SIZE_512);
+ break;
+ case 1024:
+ bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
+ LPFC_RQ_RING_SIZE_1024);
+ break;
+ case 2048:
+ bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
+ LPFC_RQ_RING_SIZE_2048);
+ break;
+ case 4096:
+ bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
+ LPFC_RQ_RING_SIZE_4096);
+ break;
+ }
+ bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
+ cq->queue_id);
+ bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
+ hrq->page_count);
+ bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
+ LPFC_HDR_BUF_SIZE);
+ list_for_each_entry(dmabuf, &hrq->page_list, list) {
+ rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+ putPaddrLow(dmabuf->phys);
+ rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+ putPaddrHigh(dmabuf->phys);
+ }
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ /* The IOCTL status is embedded in the mailbox subheader. */
+ shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2504 RQ_CREATE mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ status = -ENXIO;
+ goto out;
+ }
+ hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
+ if (hrq->queue_id == 0xFFFF) {
+ status = -ENXIO;
+ goto out;
+ }
+ hrq->type = LPFC_HRQ;
+ hrq->subtype = subtype;
+ hrq->host_index = 0;
+ hrq->hba_index = 0;
+
+ /* now create the data queue */
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
+ length, LPFC_SLI4_MBX_EMBED);
+ switch (drq->entry_count) {
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2536 Unsupported RQ count. (%d)\n",
+ drq->entry_count);
+ if (drq->entry_count < 512)
+ return -EINVAL;
+ /* otherwise default to smallest count (drop through) */
+ case 512:
+ bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
+ LPFC_RQ_RING_SIZE_512);
+ break;
+ case 1024:
+ bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
+ LPFC_RQ_RING_SIZE_1024);
+ break;
+ case 2048:
+ bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
+ LPFC_RQ_RING_SIZE_2048);
+ break;
+ case 4096:
+ bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
+ LPFC_RQ_RING_SIZE_4096);
+ break;
+ }
+ bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
+ cq->queue_id);
+ bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
+ drq->page_count);
+ bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
+ LPFC_DATA_BUF_SIZE);
+ list_for_each_entry(dmabuf, &drq->page_list, list) {
+ rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+ putPaddrLow(dmabuf->phys);
+ rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+ putPaddrHigh(dmabuf->phys);
+ }
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ /* The IOCTL status is embedded in the mailbox subheader. */
+ shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ status = -ENXIO;
+ goto out;
+ }
+ drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
+ if (drq->queue_id == 0xFFFF) {
+ status = -ENXIO;
+ goto out;
+ }
+ drq->type = LPFC_DRQ;
+ drq->subtype = subtype;
+ drq->host_index = 0;
+ drq->hba_index = 0;
+
+ /* link the header and data RQs onto the parent cq child list */
+ list_add_tail(&hrq->list, &cq->child_list);
+ list_add_tail(&drq->list, &cq->child_list);
+
+out:
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return status;
+}
+
+/**
+ * lpfc_eq_destroy - Destroy an event Queue on the HBA
+ * @eq: The queue structure associated with the queue to destroy.
+ *
+ * This function destroys a queue, as detailed in @eq by sending an mailbox
+ * command, specific to the type of queue, to the HBA.
+ *
+ * The @eq struct is used to get the queue ID of the queue to destroy.
+ *
+ * On success this function will return a zero. If the queue destroy mailbox
+ * command fails this function will return ENXIO.
+ **/
+uint32_t
+lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
+{
+ LPFC_MBOXQ_t *mbox;
+ int rc, length, status = 0;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+
+ if (!eq)
+ return -ENODEV;
+ mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ length = (sizeof(struct lpfc_mbx_eq_destroy) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_EQ_DESTROY,
+ length, LPFC_SLI4_MBX_EMBED);
+ bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
+ eq->queue_id);
+ mbox->vport = eq->phba->pport;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+
+ rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
+ /* The IOCTL status is embedded in the mailbox subheader. */
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2505 EQ_DESTROY mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ status = -ENXIO;
+ }
+
+ /* Remove eq from any list */
+ list_del_init(&eq->list);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mbox, eq->phba->mbox_mem_pool);
+ return status;
+}
+
+/**
+ * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
+ * @cq: The queue structure associated with the queue to destroy.
+ *
+ * This function destroys a queue, as detailed in @cq by sending an mailbox
+ * command, specific to the type of queue, to the HBA.
+ *
+ * The @cq struct is used to get the queue ID of the queue to destroy.
+ *
+ * On success this function will return a zero. If the queue destroy mailbox
+ * command fails this function will return ENXIO.
+ **/
+uint32_t
+lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
+{
+ LPFC_MBOXQ_t *mbox;
+ int rc, length, status = 0;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+
+ if (!cq)
+ return -ENODEV;
+ mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ length = (sizeof(struct lpfc_mbx_cq_destroy) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_CQ_DESTROY,
+ length, LPFC_SLI4_MBX_EMBED);
+ bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
+ cq->queue_id);
+ mbox->vport = cq->phba->pport;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
+ /* The IOCTL status is embedded in the mailbox subheader. */
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &mbox->u.mqe.un.wq_create.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2506 CQ_DESTROY mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ status = -ENXIO;
+ }
+ /* Remove cq from any list */
+ list_del_init(&cq->list);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mbox, cq->phba->mbox_mem_pool);
+ return status;
+}
+
+/**
+ * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
+ * @qm: The queue structure associated with the queue to destroy.
+ *
+ * This function destroys a queue, as detailed in @mq by sending an mailbox
+ * command, specific to the type of queue, to the HBA.
+ *
+ * The @mq struct is used to get the queue ID of the queue to destroy.
+ *
+ * On success this function will return a zero. If the queue destroy mailbox
+ * command fails this function will return ENXIO.
+ **/
+uint32_t
+lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
+{
+ LPFC_MBOXQ_t *mbox;
+ int rc, length, status = 0;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+
+ if (!mq)
+ return -ENODEV;
+ mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ length = (sizeof(struct lpfc_mbx_mq_destroy) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_MQ_DESTROY,
+ length, LPFC_SLI4_MBX_EMBED);
+ bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
+ mq->queue_id);
+ mbox->vport = mq->phba->pport;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
+ /* The IOCTL status is embedded in the mailbox subheader. */
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2507 MQ_DESTROY mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ status = -ENXIO;
+ }
+ /* Remove mq from any list */
+ list_del_init(&mq->list);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mbox, mq->phba->mbox_mem_pool);
+ return status;
+}
+
+/**
+ * lpfc_wq_destroy - Destroy a Work Queue on the HBA
+ * @wq: The queue structure associated with the queue to destroy.
+ *
+ * This function destroys a queue, as detailed in @wq by sending an mailbox
+ * command, specific to the type of queue, to the HBA.
+ *
+ * The @wq struct is used to get the queue ID of the queue to destroy.
+ *
+ * On success this function will return a zero. If the queue destroy mailbox
+ * command fails this function will return ENXIO.
+ **/
+uint32_t
+lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
+{
+ LPFC_MBOXQ_t *mbox;
+ int rc, length, status = 0;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+
+ if (!wq)
+ return -ENODEV;
+ mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ length = (sizeof(struct lpfc_mbx_wq_destroy) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
+ length, LPFC_SLI4_MBX_EMBED);
+ bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
+ wq->queue_id);
+ mbox->vport = wq->phba->pport;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2508 WQ_DESTROY mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ status = -ENXIO;
+ }
+ /* Remove wq from any list */
+ list_del_init(&wq->list);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mbox, wq->phba->mbox_mem_pool);
+ return status;
+}
+
+/**
+ * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
+ * @rq: The queue structure associated with the queue to destroy.
+ *
+ * This function destroys a queue, as detailed in @rq by sending an mailbox
+ * command, specific to the type of queue, to the HBA.
+ *
+ * The @rq struct is used to get the queue ID of the queue to destroy.
+ *
+ * On success this function will return a zero. If the queue destroy mailbox
+ * command fails this function will return ENXIO.
+ **/
+uint32_t
+lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
+ struct lpfc_queue *drq)
+{
+ LPFC_MBOXQ_t *mbox;
+ int rc, length, status = 0;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+
+ if (!hrq || !drq)
+ return -ENODEV;
+ mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ length = (sizeof(struct lpfc_mbx_rq_destroy) -
+ sizeof(struct mbox_header));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
+ length, LPFC_SLI4_MBX_EMBED);
+ bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
+ hrq->queue_id);
+ mbox->vport = hrq->phba->pport;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
+ /* The IOCTL status is embedded in the mailbox subheader. */
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2509 RQ_DESTROY mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mbox, hrq->phba->mbox_mem_pool);
+ return -ENXIO;
+ }
+ bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
+ drq->queue_id);
+ rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2510 RQ_DESTROY mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ status = -ENXIO;
+ }
+ list_del_init(&hrq->list);
+ list_del_init(&drq->list);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mbox, hrq->phba->mbox_mem_pool);
+ return status;
+}
+
+/**
+ * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
+ * @phba: The virtual port for which this call being executed.
+ * @pdma_phys_addr0: Physical address of the 1st SGL page.
+ * @pdma_phys_addr1: Physical address of the 2nd SGL page.
+ * @xritag: the xritag that ties this io to the SGL pages.
+ *
+ * This routine will post the sgl pages for the IO that has the xritag
+ * that is in the iocbq structure. The xritag is assigned during iocbq
+ * creation and persists for as long as the driver is loaded.
+ * if the caller has fewer than 256 scatter gather segments to map then
+ * pdma_phys_addr1 should be 0.
+ * If the caller needs to map more than 256 scatter gather segment then
+ * pdma_phys_addr1 should be a valid physical address.
+ * physical address for SGLs must be 64 byte aligned.
+ * If you are going to map 2 SGL's then the first one must have 256 entries
+ * the second sgl can have between 1 and 256 entries.
+ *
+ * Return codes:
+ * 0 - Success
+ * -ENXIO, -ENOMEM - Failure
+ **/
+int
+lpfc_sli4_post_sgl(struct lpfc_hba *phba,
+ dma_addr_t pdma_phys_addr0,
+ dma_addr_t pdma_phys_addr1,
+ uint16_t xritag)
+{
+ struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
+ LPFC_MBOXQ_t *mbox;
+ int rc;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+
+ if (xritag == NO_XRI) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0364 Invalid param:\n");
+ return -EINVAL;
+ }
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
+ sizeof(struct lpfc_mbx_post_sgl_pages) -
+ sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
+
+ post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
+ &mbox->u.mqe.un.post_sgl_pages;
+ bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
+ bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
+
+ post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
+ cpu_to_le32(putPaddrLow(pdma_phys_addr0));
+ post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
+ cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
+
+ post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
+ cpu_to_le32(putPaddrLow(pdma_phys_addr1));
+ post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
+ cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
+ /* The IOCTL status is embedded in the mailbox subheader. */
+ shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2511 POST_SGL mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ rc = -ENXIO;
+ }
+ return 0;
+}
+/**
+ * lpfc_sli4_remove_all_sgl_pages - Post scatter gather list for an XRI to HBA
+ * @phba: The virtual port for which this call being executed.
+ *
+ * This routine will remove all of the sgl pages registered with the hba.
+ *
+ * Return codes:
+ * 0 - Success
+ * -ENXIO, -ENOMEM - Failure
+ **/
+int
+lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *mbox;
+ int rc;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES, 0,
+ LPFC_SLI4_MBX_EMBED);
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
+ /* The IOCTL status is embedded in the mailbox subheader. */
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2512 REMOVE_ALL_SGL_PAGES mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ rc = -ENXIO;
+ }
+ return rc;
+}
+
+/**
+ * lpfc_sli4_next_xritag - Get an xritag for the io
+ * @phba: Pointer to HBA context object.
+ *
+ * This function gets an xritag for the iocb. If there is no unused xritag
+ * it will return 0xffff.
+ * The function returns the allocated xritag if successful, else returns zero.
+ * Zero is not a valid xritag.
+ * The caller is not required to hold any lock.
+ **/
+uint16_t
+lpfc_sli4_next_xritag(struct lpfc_hba *phba)
+{
+ uint16_t xritag;
+
+ spin_lock_irq(&phba->hbalock);
+ xritag = phba->sli4_hba.next_xri;
+ if ((xritag != (uint16_t) -1) && xritag <
+ (phba->sli4_hba.max_cfg_param.max_xri
+ + phba->sli4_hba.max_cfg_param.xri_base)) {
+ phba->sli4_hba.next_xri++;
+ phba->sli4_hba.max_cfg_param.xri_used++;
+ spin_unlock_irq(&phba->hbalock);
+ return xritag;
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2004 Failed to allocate XRI.last XRITAG is %d"
+ " Max XRI is %d, Used XRI is %d\n",
+ phba->sli4_hba.next_xri,
+ phba->sli4_hba.max_cfg_param.max_xri,
+ phba->sli4_hba.max_cfg_param.xri_used);
+ return -1;
+}
+
+/**
+ * lpfc_sli4_post_sgl_list - post a block of sgl list to the firmware.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post a block of driver's sgl pages to the
+ * HBA using non-embedded mailbox command. No Lock is held. This routine
+ * is only called when the driver is loading and after all IO has been
+ * stopped.
+ **/
+int
+lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
+{
+ struct lpfc_sglq *sglq_entry;
+ struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
+ struct sgl_page_pairs *sgl_pg_pairs;
+ void *viraddr;
+ LPFC_MBOXQ_t *mbox;
+ uint32_t reqlen, alloclen, pg_pairs;
+ uint32_t mbox_tmo;
+ uint16_t xritag_start = 0;
+ int els_xri_cnt, rc = 0;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+
+ /* The number of sgls to be posted */
+ els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
+
+ reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
+ sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+ if (reqlen > PAGE_SIZE) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2559 Block sgl registration required DMA "
+ "size (%d) great than a page\n", reqlen);
+ return -ENOMEM;
+ }
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2560 Failed to allocate mbox cmd memory\n");
+ return -ENOMEM;
+ }
+
+ /* Allocate DMA memory and set up the non-embedded mailbox command */
+ alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
+ LPFC_SLI4_MBX_NEMBED);
+
+ if (alloclen < reqlen) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0285 Allocated DMA memory size (%d) is "
+ "less than the requested DMA memory "
+ "size (%d)\n", alloclen, reqlen);
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ return -ENOMEM;
+ }
+
+ /* Get the first SGE entry from the non-embedded DMA memory */
+ if (unlikely(!mbox->sge_array)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "2525 Failed to get the non-embedded SGE "
+ "virtual address\n");
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ return -ENOMEM;
+ }
+ viraddr = mbox->sge_array->addr[0];
+
+ /* Set up the SGL pages in the non-embedded DMA pages */
+ sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
+ sgl_pg_pairs = &sgl->sgl_pg_pairs;
+
+ for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) {
+ sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs];
+ /* Set up the sge entry */
+ sgl_pg_pairs->sgl_pg0_addr_lo =
+ cpu_to_le32(putPaddrLow(sglq_entry->phys));
+ sgl_pg_pairs->sgl_pg0_addr_hi =
+ cpu_to_le32(putPaddrHigh(sglq_entry->phys));
+ sgl_pg_pairs->sgl_pg1_addr_lo =
+ cpu_to_le32(putPaddrLow(0));
+ sgl_pg_pairs->sgl_pg1_addr_hi =
+ cpu_to_le32(putPaddrHigh(0));
+ /* Keep the first xritag on the list */
+ if (pg_pairs == 0)
+ xritag_start = sglq_entry->sli4_xritag;
+ sgl_pg_pairs++;
+ }
+ bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
+ pg_pairs = (pg_pairs > 0) ? (pg_pairs - 1) : pg_pairs;
+ bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
+ /* Perform endian conversion if necessary */
+ sgl->word0 = cpu_to_le32(sgl->word0);
+
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (rc != MBX_TIMEOUT)
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2513 POST_SGL_BLOCK mailbox command failed "
+ "status x%x add_status x%x mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ rc = -ENXIO;
+ }
+ return rc;
+}
+
+/**
+ * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
+ * @phba: pointer to lpfc hba data structure.
+ * @sblist: pointer to scsi buffer list.
+ * @count: number of scsi buffers on the list.
+ *
+ * This routine is invoked to post a block of @count scsi sgl pages from a
+ * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
+ * No Lock is held.
+ *
+ **/
+int
+lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
+ int cnt)
+{
+ struct lpfc_scsi_buf *psb;
+ struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
+ struct sgl_page_pairs *sgl_pg_pairs;
+ void *viraddr;
+ LPFC_MBOXQ_t *mbox;
+ uint32_t reqlen, alloclen, pg_pairs;
+ uint32_t mbox_tmo;
+ uint16_t xritag_start = 0;
+ int rc = 0;
+ uint32_t shdr_status, shdr_add_status;
+ dma_addr_t pdma_phys_bpl1;
+ union lpfc_sli4_cfg_shdr *shdr;
+
+ /* Calculate the requested length of the dma memory */
+ reqlen = cnt * sizeof(struct sgl_page_pairs) +
+ sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+ if (reqlen > PAGE_SIZE) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0217 Block sgl registration required DMA "
+ "size (%d) great than a page\n", reqlen);
+ return -ENOMEM;
+ }
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0283 Failed to allocate mbox cmd memory\n");
+ return -ENOMEM;
+ }
+
+ /* Allocate DMA memory and set up the non-embedded mailbox command */
+ alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
+ LPFC_SLI4_MBX_NEMBED);
+
+ if (alloclen < reqlen) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2561 Allocated DMA memory size (%d) is "
+ "less than the requested DMA memory "
+ "size (%d)\n", alloclen, reqlen);
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ return -ENOMEM;
+ }
+
+ /* Get the first SGE entry from the non-embedded DMA memory */
+ if (unlikely(!mbox->sge_array)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "2565 Failed to get the non-embedded SGE "
+ "virtual address\n");
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ return -ENOMEM;
+ }
+ viraddr = mbox->sge_array->addr[0];
+
+ /* Set up the SGL pages in the non-embedded DMA pages */
+ sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
+ sgl_pg_pairs = &sgl->sgl_pg_pairs;
+
+ pg_pairs = 0;
+ list_for_each_entry(psb, sblist, list) {
+ /* Set up the sge entry */
+ sgl_pg_pairs->sgl_pg0_addr_lo =
+ cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
+ sgl_pg_pairs->sgl_pg0_addr_hi =
+ cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
+ if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
+ pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
+ else
+ pdma_phys_bpl1 = 0;
+ sgl_pg_pairs->sgl_pg1_addr_lo =
+ cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
+ sgl_pg_pairs->sgl_pg1_addr_hi =
+ cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
+ /* Keep the first xritag on the list */
+ if (pg_pairs == 0)
+ xritag_start = psb->cur_iocbq.sli4_xritag;
+ sgl_pg_pairs++;
+ pg_pairs++;
+ }
+ bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
+ bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
+ /* Perform endian conversion if necessary */
+ sgl->word0 = cpu_to_le32(sgl->word0);
+
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (rc != MBX_TIMEOUT)
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2564 POST_SGL_BLOCK mailbox command failed "
+ "status x%x add_status x%x mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ rc = -ENXIO;
+ }
+ return rc;
+}
+
+/**
+ * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
+ * @phba: pointer to lpfc_hba struct that the frame was received on
+ * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
+ *
+ * This function checks the fields in the @fc_hdr to see if the FC frame is a
+ * valid type of frame that the LPFC driver will handle. This function will
+ * return a zero if the frame is a valid frame or a non zero value when the
+ * frame does not pass the check.
+ **/
+static int
+lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
+{
+ char *rctl_names[] = FC_RCTL_NAMES_INIT;
+ char *type_names[] = FC_TYPE_NAMES_INIT;
+ struct fc_vft_header *fc_vft_hdr;
+
+ switch (fc_hdr->fh_r_ctl) {
+ case FC_RCTL_DD_UNCAT: /* uncategorized information */
+ case FC_RCTL_DD_SOL_DATA: /* solicited data */
+ case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
+ case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
+ case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
+ case FC_RCTL_DD_DATA_DESC: /* data descriptor */
+ case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
+ case FC_RCTL_DD_CMD_STATUS: /* command status */
+ case FC_RCTL_ELS_REQ: /* extended link services request */
+ case FC_RCTL_ELS_REP: /* extended link services reply */
+ case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
+ case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
+ case FC_RCTL_BA_NOP: /* basic link service NOP */
+ case FC_RCTL_BA_ABTS: /* basic link service abort */
+ case FC_RCTL_BA_RMC: /* remove connection */
+ case FC_RCTL_BA_ACC: /* basic accept */
+ case FC_RCTL_BA_RJT: /* basic reject */
+ case FC_RCTL_BA_PRMT:
+ case FC_RCTL_ACK_1: /* acknowledge_1 */
+ case FC_RCTL_ACK_0: /* acknowledge_0 */
+ case FC_RCTL_P_RJT: /* port reject */
+ case FC_RCTL_F_RJT: /* fabric reject */
+ case FC_RCTL_P_BSY: /* port busy */
+ case FC_RCTL_F_BSY: /* fabric busy to data frame */
+ case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
+ case FC_RCTL_LCR: /* link credit reset */
+ case FC_RCTL_END: /* end */
+ break;
+ case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
+ fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
+ fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
+ return lpfc_fc_frame_check(phba, fc_hdr);
+ default:
+ goto drop;
+ }
+ switch (fc_hdr->fh_type) {
+ case FC_TYPE_BLS:
+ case FC_TYPE_ELS:
+ case FC_TYPE_FCP:
+ case FC_TYPE_CT:
+ break;
+ case FC_TYPE_IP:
+ case FC_TYPE_ILS:
+ default:
+ goto drop;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "2538 Received frame rctl:%s type:%s\n",
+ rctl_names[fc_hdr->fh_r_ctl],
+ type_names[fc_hdr->fh_type]);
+ return 0;
+drop:
+ lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
+ "2539 Dropped frame rctl:%s type:%s\n",
+ rctl_names[fc_hdr->fh_r_ctl],
+ type_names[fc_hdr->fh_type]);
+ return 1;
+}
+
+/**
+ * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
+ * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
+ *
+ * This function processes the FC header to retrieve the VFI from the VF
+ * header, if one exists. This function will return the VFI if one exists
+ * or 0 if no VSAN Header exists.
+ **/
+static uint32_t
+lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
+{
+ struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
+
+ if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
+ return 0;
+ return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
+}
+
+/**
+ * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
+ * @phba: Pointer to the HBA structure to search for the vport on
+ * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
+ * @fcfi: The FC Fabric ID that the frame came from
+ *
+ * This function searches the @phba for a vport that matches the content of the
+ * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
+ * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
+ * returns the matching vport pointer or NULL if unable to match frame to a
+ * vport.
+ **/
+static struct lpfc_vport *
+lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
+ uint16_t fcfi)
+{
+ struct lpfc_vport **vports;
+ struct lpfc_vport *vport = NULL;
+ int i;
+ uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
+ fc_hdr->fh_d_id[1] << 8 |
+ fc_hdr->fh_d_id[2]);
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL)
+ for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ if (phba->fcf.fcfi == fcfi &&
+ vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
+ vports[i]->fc_myDID == did) {
+ vport = vports[i];
+ break;
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+ return vport;
+}
+
+/**
+ * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
+ * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
+ *
+ * This function searches through the existing incomplete sequences that have
+ * been sent to this @vport. If the frame matches one of the incomplete
+ * sequences then the dbuf in the @dmabuf is added to the list of frames that
+ * make up that sequence. If no sequence is found that matches this frame then
+ * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
+ * This function returns a pointer to the first dmabuf in the sequence list that
+ * the frame was linked to.
+ **/
+static struct hbq_dmabuf *
+lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
+{
+ struct fc_frame_header *new_hdr;
+ struct fc_frame_header *temp_hdr;
+ struct lpfc_dmabuf *d_buf;
+ struct lpfc_dmabuf *h_buf;
+ struct hbq_dmabuf *seq_dmabuf = NULL;
+ struct hbq_dmabuf *temp_dmabuf = NULL;
+
+ new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
+ /* Use the hdr_buf to find the sequence that this frame belongs to */
+ list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
+ temp_hdr = (struct fc_frame_header *)h_buf->virt;
+ if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
+ (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
+ (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
+ continue;
+ /* found a pending sequence that matches this frame */
+ seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
+ break;
+ }
+ if (!seq_dmabuf) {
+ /*
+ * This indicates first frame received for this sequence.
+ * Queue the buffer on the vport's rcv_buffer_list.
+ */
+ list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
+ return dmabuf;
+ }
+ temp_hdr = seq_dmabuf->hbuf.virt;
+ if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) {
+ list_add(&seq_dmabuf->dbuf.list, &dmabuf->dbuf.list);
+ return dmabuf;
+ }
+ /* find the correct place in the sequence to insert this frame */
+ list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
+ temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+ temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
+ /*
+ * If the frame's sequence count is greater than the frame on
+ * the list then insert the frame right after this frame
+ */
+ if (new_hdr->fh_seq_cnt > temp_hdr->fh_seq_cnt) {
+ list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
+ return seq_dmabuf;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * lpfc_seq_complete - Indicates if a sequence is complete
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function checks the sequence, starting with the frame described by
+ * @dmabuf, to see if all the frames associated with this sequence are present.
+ * the frames associated with this sequence are linked to the @dmabuf using the
+ * dbuf list. This function looks for two major things. 1) That the first frame
+ * has a sequence count of zero. 2) There is a frame with last frame of sequence
+ * set. 3) That there are no holes in the sequence count. The function will
+ * return 1 when the sequence is complete, otherwise it will return 0.
+ **/
+static int
+lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
+{
+ struct fc_frame_header *hdr;
+ struct lpfc_dmabuf *d_buf;
+ struct hbq_dmabuf *seq_dmabuf;
+ uint32_t fctl;
+ int seq_count = 0;
+
+ hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
+ /* make sure first fame of sequence has a sequence count of zero */
+ if (hdr->fh_seq_cnt != seq_count)
+ return 0;
+ fctl = (hdr->fh_f_ctl[0] << 16 |
+ hdr->fh_f_ctl[1] << 8 |
+ hdr->fh_f_ctl[2]);
+ /* If last frame of sequence we can return success. */
+ if (fctl & FC_FC_END_SEQ)
+ return 1;
+ list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
+ seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+ hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
+ /* If there is a hole in the sequence count then fail. */
+ if (++seq_count != hdr->fh_seq_cnt)
+ return 0;
+ fctl = (hdr->fh_f_ctl[0] << 16 |
+ hdr->fh_f_ctl[1] << 8 |
+ hdr->fh_f_ctl[2]);
+ /* If last frame of sequence we can return success. */
+ if (fctl & FC_FC_END_SEQ)
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * lpfc_prep_seq - Prep sequence for ULP processing
+ * @vport: Pointer to the vport on which this sequence was received
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function takes a sequence, described by a list of frames, and creates
+ * a list of iocbq structures to describe the sequence. This iocbq list will be
+ * used to issue to the generic unsolicited sequence handler. This routine
+ * returns a pointer to the first iocbq in the list. If the function is unable
+ * to allocate an iocbq then it throw out the received frames that were not
+ * able to be described and return a pointer to the first iocbq. If unable to
+ * allocate any iocbqs (including the first) this function will return NULL.
+ **/
+static struct lpfc_iocbq *
+lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
+{
+ struct lpfc_dmabuf *d_buf, *n_buf;
+ struct lpfc_iocbq *first_iocbq, *iocbq;
+ struct fc_frame_header *fc_hdr;
+ uint32_t sid;
+
+ fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
+ /* remove from receive buffer list */
+ list_del_init(&seq_dmabuf->hbuf.list);
+ /* get the Remote Port's SID */
+ sid = (fc_hdr->fh_s_id[0] << 16 |
+ fc_hdr->fh_s_id[1] << 8 |
+ fc_hdr->fh_s_id[2]);
+ /* Get an iocbq struct to fill in. */
+ first_iocbq = lpfc_sli_get_iocbq(vport->phba);
+ if (first_iocbq) {
+ /* Initialize the first IOCB. */
+ first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
+ first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
+ first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
+ first_iocbq->iocb.unsli3.rcvsli3.vpi =
+ vport->vpi + vport->phba->vpi_base;
+ /* put the first buffer into the first IOCBq */
+ first_iocbq->context2 = &seq_dmabuf->dbuf;
+ first_iocbq->context3 = NULL;
+ first_iocbq->iocb.ulpBdeCount = 1;
+ first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
+ LPFC_DATA_BUF_SIZE;
+ first_iocbq->iocb.un.rcvels.remoteID = sid;
+ }
+ iocbq = first_iocbq;
+ /*
+ * Each IOCBq can have two Buffers assigned, so go through the list
+ * of buffers for this sequence and save two buffers in each IOCBq
+ */
+ list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
+ if (!iocbq) {
+ lpfc_in_buf_free(vport->phba, d_buf);
+ continue;
+ }
+ if (!iocbq->context3) {
+ iocbq->context3 = d_buf;
+ iocbq->iocb.ulpBdeCount++;
+ iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize =
+ LPFC_DATA_BUF_SIZE;
+ } else {
+ iocbq = lpfc_sli_get_iocbq(vport->phba);
+ if (!iocbq) {
+ if (first_iocbq) {
+ first_iocbq->iocb.ulpStatus =
+ IOSTAT_FCP_RSP_ERROR;
+ first_iocbq->iocb.un.ulpWord[4] =
+ IOERR_NO_RESOURCES;
+ }
+ lpfc_in_buf_free(vport->phba, d_buf);
+ continue;
+ }
+ iocbq->context2 = d_buf;
+ iocbq->context3 = NULL;
+ iocbq->iocb.ulpBdeCount = 1;
+ iocbq->iocb.un.cont64[0].tus.f.bdeSize =
+ LPFC_DATA_BUF_SIZE;
+ iocbq->iocb.un.rcvels.remoteID = sid;
+ list_add_tail(&iocbq->list, &first_iocbq->list);
+ }
+ }
+ return first_iocbq;
+}
+
+/**
+ * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called with no lock held. This function processes all
+ * the received buffers and gives it to upper layers when a received buffer
+ * indicates that it is the final frame in the sequence. The interrupt
+ * service routine processes received buffers at interrupt contexts and adds
+ * received dma buffers to the rb_pend_list queue and signals the worker thread.
+ * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
+ * appropriate receive function when the final frame in a sequence is received.
+ **/
+int
+lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba)
+{
+ LIST_HEAD(cmplq);
+ struct hbq_dmabuf *dmabuf, *seq_dmabuf;
+ struct fc_frame_header *fc_hdr;
+ struct lpfc_vport *vport;
+ uint32_t fcfi;
+ struct lpfc_iocbq *iocbq;
+
+ /* Clear hba flag and get all received buffers into the cmplq */
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag &= ~HBA_RECEIVE_BUFFER;
+ list_splice_init(&phba->rb_pend_list, &cmplq);
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Process each received buffer */
+ while ((dmabuf = lpfc_sli_hbqbuf_get(&cmplq)) != NULL) {
+ fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
+ /* check to see if this a valid type of frame */
+ if (lpfc_fc_frame_check(phba, fc_hdr)) {
+ lpfc_in_buf_free(phba, &dmabuf->dbuf);
+ continue;
+ }
+ fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->rcqe);
+ vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
+ if (!vport) {
+ /* throw out the frame */
+ lpfc_in_buf_free(phba, &dmabuf->dbuf);
+ continue;
+ }
+ /* Link this frame */
+ seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
+ if (!seq_dmabuf) {
+ /* unable to add frame to vport - throw it out */
+ lpfc_in_buf_free(phba, &dmabuf->dbuf);
+ continue;
+ }
+ /* If not last frame in sequence continue processing frames. */
+ if (!lpfc_seq_complete(seq_dmabuf)) {
+ /*
+ * When saving off frames post a new one and mark this
+ * frame to be freed when it is finished.
+ **/
+ lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1);
+ dmabuf->tag = -1;
+ continue;
+ }
+ fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
+ iocbq = lpfc_prep_seq(vport, seq_dmabuf);
+ if (!lpfc_complete_unsol_iocb(phba,
+ &phba->sli.ring[LPFC_ELS_RING],
+ iocbq, fc_hdr->fh_r_ctl,
+ fc_hdr->fh_type))
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "2540 Ring %d handler: unexpected Rctl "
+ "x%x Type x%x received\n",
+ LPFC_ELS_RING,
+ fc_hdr->fh_r_ctl, fc_hdr->fh_type);
+ };
+ return 0;
+}
+
+/**
+ * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post rpi header templates to the
+ * HBA consistent with the SLI-4 interface spec. This routine
+ * posts a PAGE_SIZE memory region to the port to hold up to
+ * PAGE_SIZE modulo 64 rpi context headers.
+ *
+ * This routine does not require any locks. It's usage is expected
+ * to be driver load or reset recovery when the driver is
+ * sequential.
+ *
+ * Return codes
+ * 0 - sucessful
+ * EIO - The mailbox failed to complete successfully.
+ * When this error occurs, the driver is not guaranteed
+ * to have any rpi regions posted to the device and
+ * must either attempt to repost the regions or take a
+ * fatal error.
+ **/
+int
+lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
+{
+ struct lpfc_rpi_hdr *rpi_page;
+ uint32_t rc = 0;
+
+ /* Post all rpi memory regions to the port. */
+ list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
+ rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2008 Error %d posting all rpi "
+ "headers\n", rc);
+ rc = -EIO;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
+ * @phba: pointer to lpfc hba data structure.
+ * @rpi_page: pointer to the rpi memory region.
+ *
+ * This routine is invoked to post a single rpi header to the
+ * HBA consistent with the SLI-4 interface spec. This memory region
+ * maps up to 64 rpi context regions.
+ *
+ * Return codes
+ * 0 - sucessful
+ * ENOMEM - No available memory
+ * EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
+{
+ LPFC_MBOXQ_t *mboxq;
+ struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
+ uint32_t rc = 0;
+ uint32_t mbox_tmo;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+
+ /* The port is notified of the header region via a mailbox command. */
+ mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2001 Unable to allocate memory for issuing "
+ "SLI_CONFIG_SPECIAL mailbox command\n");
+ return -ENOMEM;
+ }
+
+ /* Post all rpi memory regions to the port. */
+ hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
+ sizeof(struct lpfc_mbx_post_hdr_tmpl) -
+ sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
+ bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
+ hdr_tmpl, rpi_page->page_count);
+ bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
+ rpi_page->start_rpi);
+ hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
+ hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ else
+ rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
+ shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2514 POST_RPI_HDR mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ rc = -ENXIO;
+ }
+ return rc;
+}
+
+/**
+ * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post rpi header templates to the
+ * HBA consistent with the SLI-4 interface spec. This routine
+ * posts a PAGE_SIZE memory region to the port to hold up to
+ * PAGE_SIZE modulo 64 rpi context headers.
+ *
+ * Returns
+ * A nonzero rpi defined as rpi_base <= rpi < max_rpi if sucessful
+ * LPFC_RPI_ALLOC_ERROR if no rpis are available.
+ **/
+int
+lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
+{
+ int rpi;
+ uint16_t max_rpi, rpi_base, rpi_limit;
+ uint16_t rpi_remaining;
+ struct lpfc_rpi_hdr *rpi_hdr;
+
+ max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
+ rpi_base = phba->sli4_hba.max_cfg_param.rpi_base;
+ rpi_limit = phba->sli4_hba.next_rpi;
+
+ /*
+ * The valid rpi range is not guaranteed to be zero-based. Start
+ * the search at the rpi_base as reported by the port.
+ */
+ spin_lock_irq(&phba->hbalock);
+ rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, rpi_base);
+ if (rpi >= rpi_limit || rpi < rpi_base)
+ rpi = LPFC_RPI_ALLOC_ERROR;
+ else {
+ set_bit(rpi, phba->sli4_hba.rpi_bmask);
+ phba->sli4_hba.max_cfg_param.rpi_used++;
+ phba->sli4_hba.rpi_count++;
+ }
+
+ /*
+ * Don't try to allocate more rpi header regions if the device limit
+ * on available rpis max has been exhausted.
+ */
+ if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
+ (phba->sli4_hba.rpi_count >= max_rpi)) {
+ spin_unlock_irq(&phba->hbalock);
+ return rpi;
+ }
+
+ /*
+ * If the driver is running low on rpi resources, allocate another
+ * page now. Note that the next_rpi value is used because
+ * it represents how many are actually in use whereas max_rpi notes
+ * how many are supported max by the device.
+ */
+ rpi_remaining = phba->sli4_hba.next_rpi - rpi_base -
+ phba->sli4_hba.rpi_count;
+ spin_unlock_irq(&phba->hbalock);
+ if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
+ rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
+ if (!rpi_hdr) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2002 Error Could not grow rpi "
+ "count\n");
+ } else {
+ lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
+ }
+ }
+
+ return rpi;
+}
+
+/**
+ * lpfc_sli4_free_rpi - Release an rpi for reuse.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release an rpi to the pool of
+ * available rpis maintained by the driver.
+ **/
+void
+lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
+{
+ spin_lock_irq(&phba->hbalock);
+ clear_bit(rpi, phba->sli4_hba.rpi_bmask);
+ phba->sli4_hba.rpi_count--;
+ phba->sli4_hba.max_cfg_param.rpi_used--;
+ spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to remove the memory region that
+ * provided rpi via a bitmask.
+ **/
+void
+lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
+{
+ kfree(phba->sli4_hba.rpi_bmask);
+}
+
+/**
+ * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to remove the memory region that
+ * provided rpi via a bitmask.
+ **/
+int
+lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp)
+{
+ LPFC_MBOXQ_t *mboxq;
+ struct lpfc_hba *phba = ndlp->phba;
+ int rc;
+
+ /* The port is notified of the header region via a mailbox command. */
+ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq)
+ return -ENOMEM;
+
+ /* Post all rpi memory regions to the port. */
+ lpfc_resume_rpi(mboxq, ndlp);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2010 Resume RPI Mailbox failed "
+ "status %d, mbxStatus x%x\n", rc,
+ bf_get(lpfc_mqe_status, &mboxq->u.mqe));
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return -EIO;
+ }
+ return 0;
+}
+
+/**
+ * lpfc_sli4_init_vpi - Initialize a vpi with the port
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: vpi value to activate with the port.
+ *
+ * This routine is invoked to activate a vpi with the
+ * port when the host intends to use vports with a
+ * nonzero vpi.
+ *
+ * Returns:
+ * 0 success
+ * -Evalue otherwise
+ **/
+int
+lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi)
+{
+ LPFC_MBOXQ_t *mboxq;
+ int rc = 0;
+ uint32_t mbox_tmo;
+
+ if (vpi == 0)
+ return -EINVAL;
+ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq)
+ return -ENOMEM;
+ lpfc_init_vpi(mboxq, vpi);
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI);
+ rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2022 INIT VPI Mailbox failed "
+ "status %d, mbxStatus x%x\n", rc,
+ bf_get(lpfc_mqe_status, &mboxq->u.mqe));
+ rc = -EIO;
+ }
+ return rc;
+}
+
+/**
+ * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: Pointer to mailbox object.
+ *
+ * This routine is invoked to manually add a single FCF record. The caller
+ * must pass a completely initialized FCF_Record. This routine takes
+ * care of the nonembedded mailbox operations.
+ **/
+static void
+lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ void *virt_addr;
+ union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t shdr_status, shdr_add_status;
+
+ virt_addr = mboxq->sge_array->addr[0];
+ /* The IOCTL status is embedded in the mailbox subheader. */
+ shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+
+ if ((shdr_status || shdr_add_status) &&
+ (shdr_status != STATUS_FCF_IN_USE))
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2558 ADD_FCF_RECORD mailbox failed with "
+ "status x%x add_status x%x\n",
+ shdr_status, shdr_add_status);
+
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+}
+
+/**
+ * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_record: pointer to the initialized fcf record to add.
+ *
+ * This routine is invoked to manually add a single FCF record. The caller
+ * must pass a completely initialized FCF_Record. This routine takes
+ * care of the nonembedded mailbox operations.
+ **/
+int
+lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
+{
+ int rc = 0;
+ LPFC_MBOXQ_t *mboxq;
+ uint8_t *bytep;
+ void *virt_addr;
+ dma_addr_t phys_addr;
+ struct lpfc_mbx_sge sge;
+ uint32_t alloc_len, req_len;
+ uint32_t fcfindex;
+
+ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2009 Failed to allocate mbox for ADD_FCF cmd\n");
+ return -ENOMEM;
+ }
+
+ req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
+ sizeof(uint32_t);
+
+ /* Allocate DMA memory and set up the non-embedded mailbox command */
+ alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
+ req_len, LPFC_SLI4_MBX_NEMBED);
+ if (alloc_len < req_len) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2523 Allocated DMA memory size (x%x) is "
+ "less than the requested DMA memory "
+ "size (x%x)\n", alloc_len, req_len);
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ return -ENOMEM;
+ }
+
+ /*
+ * Get the first SGE entry from the non-embedded DMA memory. This
+ * routine only uses a single SGE.
+ */
+ lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
+ phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
+ if (unlikely(!mboxq->sge_array)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "2526 Failed to get the non-embedded SGE "
+ "virtual address\n");
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ return -ENOMEM;
+ }
+ virt_addr = mboxq->sge_array->addr[0];
+ /*
+ * Configure the FCF record for FCFI 0. This is the driver's
+ * hardcoded default and gets used in nonFIP mode.
+ */
+ fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
+ bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
+ lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
+
+ /*
+ * Copy the fcf_index and the FCF Record Data. The data starts after
+ * the FCoE header plus word10. The data copy needs to be endian
+ * correct.
+ */
+ bytep += sizeof(uint32_t);
+ lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
+ mboxq->vport = phba->pport;
+ mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2515 ADD_FCF_RECORD mailbox failed with "
+ "status 0x%x\n", rc);
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ rc = -EIO;
+ } else
+ rc = 0;
+
+ return rc;
+}
+
+/**
+ * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_record: pointer to the fcf record to write the default data.
+ * @fcf_index: FCF table entry index.
+ *
+ * This routine is invoked to build the driver's default FCF record. The
+ * values used are hardcoded. This routine handles memory initialization.
+ *
+ **/
+void
+lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
+ struct fcf_record *fcf_record,
+ uint16_t fcf_index)
+{
+ memset(fcf_record, 0, sizeof(struct fcf_record));
+ fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
+ fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
+ fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
+ bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
+ bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
+ bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
+ bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
+ bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
+ bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
+ bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
+ bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
+ bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
+ bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
+ bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
+ bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
+ LPFC_FCF_FPMA | LPFC_FCF_SPMA);
+ /* Set the VLAN bit map */
+ if (phba->valid_vlan) {
+ fcf_record->vlan_bitmap[phba->vlan_id / 8]
+ = 1 << (phba->vlan_id % 8);
+ }
+}
+
+/**
+ * lpfc_sli4_read_fcf_record - Read the driver's default FCF Record.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index: FCF table entry offset.
+ *
+ * This routine is invoked to read up to @fcf_num of FCF record from the
+ * device starting with the given @fcf_index.
+ **/
+int
+lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+ int rc = 0, error;
+ LPFC_MBOXQ_t *mboxq;
+ void *virt_addr;
+ dma_addr_t phys_addr;
+ uint8_t *bytep;
+ struct lpfc_mbx_sge sge;
+ uint32_t alloc_len, req_len;
+ struct lpfc_mbx_read_fcf_tbl *read_fcf;
+
+ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2000 Failed to allocate mbox for "
+ "READ_FCF cmd\n");
+ return -ENOMEM;
+ }
+
+ req_len = sizeof(struct fcf_record) +
+ sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
+
+ /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */
+ alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
+ LPFC_SLI4_MBX_NEMBED);
+
+ if (alloc_len < req_len) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0291 Allocated DMA memory size (x%x) is "
+ "less than the requested DMA memory "
+ "size (x%x)\n", alloc_len, req_len);
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ return -ENOMEM;
+ }
+
+ /* Get the first SGE entry from the non-embedded DMA memory. This
+ * routine only uses a single SGE.
+ */
+ lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
+ phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
+ if (unlikely(!mboxq->sge_array)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "2527 Failed to get the non-embedded SGE "
+ "virtual address\n");
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ return -ENOMEM;
+ }
+ virt_addr = mboxq->sge_array->addr[0];
+ read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
+
+ /* Set up command fields */
+ bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
+ /* Perform necessary endian conversion */
+ bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
+ lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
+ mboxq->vport = phba->pport;
+ mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record;
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ error = -EIO;
+ } else
+ error = 0;
+ return error;
+}
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 883938652a6..7d37eb7459b 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -29,13 +29,23 @@ typedef enum _lpfc_ctx_cmd {
LPFC_CTX_HOST
} lpfc_ctx_cmd;
+/* This structure is used to carry the needed response IOCB states */
+struct lpfc_sli4_rspiocb_info {
+ uint8_t hw_status;
+ uint8_t bfield;
+#define LPFC_XB 0x1
+#define LPFC_PV 0x2
+ uint8_t priority;
+ uint8_t reserved;
+};
+
/* This structure is used to handle IOCB requests / responses */
struct lpfc_iocbq {
/* lpfc_iocbqs are used in double linked lists */
struct list_head list;
struct list_head clist;
uint16_t iotag; /* pre-assigned IO tag */
- uint16_t rsvd1;
+ uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
IOCB_t iocb; /* IOCB cmd */
uint8_t retry; /* retry counter for IOCB cmd - if needed */
@@ -65,7 +75,7 @@ struct lpfc_iocbq {
struct lpfc_iocbq *);
void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
-
+ struct lpfc_sli4_rspiocb_info sli4_info;
};
#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */
@@ -81,14 +91,18 @@ struct lpfc_iocbq {
typedef struct lpfcMboxq {
/* MBOXQs are used in single linked lists */
struct list_head list; /* ptr to next mailbox command */
- MAILBOX_t mb; /* Mailbox cmd */
- struct lpfc_vport *vport;/* virutal port pointer */
+ union {
+ MAILBOX_t mb; /* Mailbox cmd */
+ struct lpfc_mqe mqe;
+ } u;
+ struct lpfc_vport *vport;/* virtual port pointer */
void *context1; /* caller context information */
void *context2; /* caller context information */
void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
uint8_t mbox_flag;
-
+ struct lpfc_mcqe mcqe;
+ struct lpfc_mbx_nembed_sge_virt *sge_array;
} LPFC_MBOXQ_t;
#define MBX_POLL 1 /* poll mailbox till command done, then
@@ -230,10 +244,11 @@ struct lpfc_sli {
/* Additional sli_flags */
#define LPFC_SLI_MBOX_ACTIVE 0x100 /* HBA mailbox is currently active */
-#define LPFC_SLI2_ACTIVE 0x200 /* SLI2 overlay in firmware is active */
+#define LPFC_SLI_ACTIVE 0x200 /* SLI in firmware is active */
#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */
#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */
#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */
+#define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */
struct lpfc_sli_ring ring[LPFC_MAX_RING];
int fcp_ring; /* ring used for FCP initiator commands */
@@ -261,6 +276,8 @@ struct lpfc_sli {
#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox
command */
+#define LPFC_MBOX_SLI4_CONFIG_TMO 60 /* Sec tmo for outstanding mbox
+ command */
#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write
* or erase cmds. This is especially
* long because of the potential of
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
new file mode 100644
index 00000000000..5196b46608d
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -0,0 +1,467 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2009 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#define LPFC_ACTIVE_MBOX_WAIT_CNT 100
+#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32
+#define LPFC_GET_QE_REL_INT 32
+#define LPFC_RPI_LOW_WATER_MARK 10
+/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */
+#define LPFC_NEMBED_MBOX_SGL_CNT 254
+
+/* Multi-queue arrangement for fast-path FCP work queues */
+#define LPFC_FN_EQN_MAX 8
+#define LPFC_SP_EQN_DEF 1
+#define LPFC_FP_EQN_DEF 1
+#define LPFC_FP_EQN_MIN 1
+#define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF)
+
+#define LPFC_FN_WQN_MAX 32
+#define LPFC_SP_WQN_DEF 1
+#define LPFC_FP_WQN_DEF 4
+#define LPFC_FP_WQN_MIN 1
+#define LPFC_FP_WQN_MAX (LPFC_FN_WQN_MAX - LPFC_SP_WQN_DEF)
+
+/*
+ * Provide the default FCF Record attributes used by the driver
+ * when nonFIP mode is configured and there is no other default
+ * FCF Record attributes.
+ */
+#define LPFC_FCOE_FCF_DEF_INDEX 0
+#define LPFC_FCOE_FCF_GET_FIRST 0xFFFF
+#define LPFC_FCOE_FCF_NEXT_NONE 0xFFFF
+
+/* First 3 bytes of default FCF MAC is specified by FC_MAP */
+#define LPFC_FCOE_FCF_MAC3 0xFF
+#define LPFC_FCOE_FCF_MAC4 0xFF
+#define LPFC_FCOE_FCF_MAC5 0xFE
+#define LPFC_FCOE_FCF_MAP0 0x0E
+#define LPFC_FCOE_FCF_MAP1 0xFC
+#define LPFC_FCOE_FCF_MAP2 0x00
+#define LPFC_FCOE_MAX_RCV_SIZE 0x5AC
+#define LPFC_FCOE_FKA_ADV_PER 0
+#define LPFC_FCOE_FIP_PRIORITY 0x80
+
+enum lpfc_sli4_queue_type {
+ LPFC_EQ,
+ LPFC_GCQ,
+ LPFC_MCQ,
+ LPFC_WCQ,
+ LPFC_RCQ,
+ LPFC_MQ,
+ LPFC_WQ,
+ LPFC_HRQ,
+ LPFC_DRQ
+};
+
+/* The queue sub-type defines the functional purpose of the queue */
+enum lpfc_sli4_queue_subtype {
+ LPFC_NONE,
+ LPFC_MBOX,
+ LPFC_FCP,
+ LPFC_ELS,
+ LPFC_USOL
+};
+
+union sli4_qe {
+ void *address;
+ struct lpfc_eqe *eqe;
+ struct lpfc_cqe *cqe;
+ struct lpfc_mcqe *mcqe;
+ struct lpfc_wcqe_complete *wcqe_complete;
+ struct lpfc_wcqe_release *wcqe_release;
+ struct sli4_wcqe_xri_aborted *wcqe_xri_aborted;
+ struct lpfc_rcqe_complete *rcqe_complete;
+ struct lpfc_mqe *mqe;
+ union lpfc_wqe *wqe;
+ struct lpfc_rqe *rqe;
+};
+
+struct lpfc_queue {
+ struct list_head list;
+ enum lpfc_sli4_queue_type type;
+ enum lpfc_sli4_queue_subtype subtype;
+ struct lpfc_hba *phba;
+ struct list_head child_list;
+ uint32_t entry_count; /* Number of entries to support on the queue */
+ uint32_t entry_size; /* Size of each queue entry. */
+ uint32_t queue_id; /* Queue ID assigned by the hardware */
+ struct list_head page_list;
+ uint32_t page_count; /* Number of pages allocated for this queue */
+
+ uint32_t host_index; /* The host's index for putting or getting */
+ uint32_t hba_index; /* The last known hba index for get or put */
+ union sli4_qe qe[1]; /* array to index entries (must be last) */
+};
+
+struct lpfc_cq_event {
+ struct list_head list;
+ union {
+ struct lpfc_mcqe mcqe_cmpl;
+ struct lpfc_acqe_link acqe_link;
+ struct lpfc_acqe_fcoe acqe_fcoe;
+ struct lpfc_acqe_dcbx acqe_dcbx;
+ struct lpfc_rcqe rcqe_cmpl;
+ struct sli4_wcqe_xri_aborted wcqe_axri;
+ } cqe;
+};
+
+struct lpfc_sli4_link {
+ uint8_t speed;
+ uint8_t duplex;
+ uint8_t status;
+ uint8_t physical;
+ uint8_t fault;
+};
+
+struct lpfc_fcf {
+ uint8_t fabric_name[8];
+ uint8_t mac_addr[6];
+ uint16_t fcf_indx;
+ uint16_t fcfi;
+ uint32_t fcf_flag;
+#define FCF_AVAILABLE 0x01 /* FCF available for discovery */
+#define FCF_REGISTERED 0x02 /* FCF registered with FW */
+#define FCF_DISCOVERED 0x04 /* FCF discovery started */
+#define FCF_BOOT_ENABLE 0x08 /* Boot bios use this FCF */
+#define FCF_IN_USE 0x10 /* Atleast one discovery completed */
+#define FCF_VALID_VLAN 0x20 /* Use the vlan id specified */
+ uint32_t priority;
+ uint32_t addr_mode;
+ uint16_t vlan_id;
+};
+
+#define LPFC_REGION23_SIGNATURE "RG23"
+#define LPFC_REGION23_VERSION 1
+#define LPFC_REGION23_LAST_REC 0xff
+struct lpfc_fip_param_hdr {
+ uint8_t type;
+#define FCOE_PARAM_TYPE 0xA0
+ uint8_t length;
+#define FCOE_PARAM_LENGTH 2
+ uint8_t parm_version;
+#define FIPP_VERSION 0x01
+ uint8_t parm_flags;
+#define lpfc_fip_param_hdr_fipp_mode_SHIFT 6
+#define lpfc_fip_param_hdr_fipp_mode_MASK 0x3
+#define lpfc_fip_param_hdr_fipp_mode_WORD parm_flags
+#define FIPP_MODE_ON 0x2
+#define FIPP_MODE_OFF 0x0
+#define FIPP_VLAN_VALID 0x1
+};
+
+struct lpfc_fcoe_params {
+ uint8_t fc_map[3];
+ uint8_t reserved1;
+ uint16_t vlan_tag;
+ uint8_t reserved[2];
+};
+
+struct lpfc_fcf_conn_hdr {
+ uint8_t type;
+#define FCOE_CONN_TBL_TYPE 0xA1
+ uint8_t length; /* words */
+ uint8_t reserved[2];
+};
+
+struct lpfc_fcf_conn_rec {
+ uint16_t flags;
+#define FCFCNCT_VALID 0x0001
+#define FCFCNCT_BOOT 0x0002
+#define FCFCNCT_PRIMARY 0x0004 /* if not set, Secondary */
+#define FCFCNCT_FBNM_VALID 0x0008
+#define FCFCNCT_SWNM_VALID 0x0010
+#define FCFCNCT_VLAN_VALID 0x0020
+#define FCFCNCT_AM_VALID 0x0040
+#define FCFCNCT_AM_PREFERRED 0x0080 /* if not set, AM Required */
+#define FCFCNCT_AM_SPMA 0x0100 /* if not set, FPMA */
+
+ uint16_t vlan_tag;
+ uint8_t fabric_name[8];
+ uint8_t switch_name[8];
+};
+
+struct lpfc_fcf_conn_entry {
+ struct list_head list;
+ struct lpfc_fcf_conn_rec conn_rec;
+};
+
+/*
+ * Define the host's bootstrap mailbox. This structure contains
+ * the member attributes needed to create, use, and destroy the
+ * bootstrap mailbox region.
+ *
+ * The macro definitions for the bmbx data structure are defined
+ * in lpfc_hw4.h with the register definition.
+ */
+struct lpfc_bmbx {
+ struct lpfc_dmabuf *dmabuf;
+ struct dma_address dma_address;
+ void *avirt;
+ dma_addr_t aphys;
+ uint32_t bmbx_size;
+};
+
+#define LPFC_EQE_SIZE LPFC_EQE_SIZE_4
+
+#define LPFC_EQE_SIZE_4B 4
+#define LPFC_EQE_SIZE_16B 16
+#define LPFC_CQE_SIZE 16
+#define LPFC_WQE_SIZE 64
+#define LPFC_MQE_SIZE 256
+#define LPFC_RQE_SIZE 8
+
+#define LPFC_EQE_DEF_COUNT 1024
+#define LPFC_CQE_DEF_COUNT 256
+#define LPFC_WQE_DEF_COUNT 64
+#define LPFC_MQE_DEF_COUNT 16
+#define LPFC_RQE_DEF_COUNT 512
+
+#define LPFC_QUEUE_NOARM false
+#define LPFC_QUEUE_REARM true
+
+
+/*
+ * SLI4 CT field defines
+ */
+#define SLI4_CT_RPI 0
+#define SLI4_CT_VPI 1
+#define SLI4_CT_VFI 2
+#define SLI4_CT_FCFI 3
+
+#define LPFC_SLI4_MAX_SEGMENT_SIZE 0x10000
+
+/*
+ * SLI4 specific data structures
+ */
+struct lpfc_max_cfg_param {
+ uint16_t max_xri;
+ uint16_t xri_base;
+ uint16_t xri_used;
+ uint16_t max_rpi;
+ uint16_t rpi_base;
+ uint16_t rpi_used;
+ uint16_t max_vpi;
+ uint16_t vpi_base;
+ uint16_t vpi_used;
+ uint16_t max_vfi;
+ uint16_t vfi_base;
+ uint16_t vfi_used;
+ uint16_t max_fcfi;
+ uint16_t fcfi_base;
+ uint16_t fcfi_used;
+ uint16_t max_eq;
+ uint16_t max_rq;
+ uint16_t max_cq;
+ uint16_t max_wq;
+};
+
+struct lpfc_hba;
+/* SLI4 HBA multi-fcp queue handler struct */
+struct lpfc_fcp_eq_hdl {
+ uint32_t idx;
+ struct lpfc_hba *phba;
+};
+
+/* SLI4 HBA data structure entries */
+struct lpfc_sli4_hba {
+ void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
+ PCI BAR0, config space registers */
+ void __iomem *ctrl_regs_memmap_p; /* Kernel memory mapped address for
+ PCI BAR1, control registers */
+ void __iomem *drbl_regs_memmap_p; /* Kernel memory mapped address for
+ PCI BAR2, doorbell registers */
+ /* BAR0 PCI config space register memory map */
+ void __iomem *UERRLOregaddr; /* Address to UERR_STATUS_LO register */
+ void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */
+ void __iomem *ONLINE0regaddr; /* Address to components of internal UE */
+ void __iomem *ONLINE1regaddr; /* Address to components of internal UE */
+#define LPFC_ONLINE_NERR 0xFFFFFFFF
+ void __iomem *SCRATCHPADregaddr; /* Address to scratchpad register */
+ /* BAR1 FCoE function CSR register memory map */
+ void __iomem *STAregaddr; /* Address to HST_STATE register */
+ void __iomem *ISRregaddr; /* Address to HST_ISR register */
+ void __iomem *IMRregaddr; /* Address to HST_IMR register */
+ void __iomem *ISCRregaddr; /* Address to HST_ISCR register */
+ /* BAR2 VF-0 doorbell register memory map */
+ void __iomem *RQDBregaddr; /* Address to RQ_DOORBELL register */
+ void __iomem *WQDBregaddr; /* Address to WQ_DOORBELL register */
+ void __iomem *EQCQDBregaddr; /* Address to EQCQ_DOORBELL register */
+ void __iomem *MQDBregaddr; /* Address to MQ_DOORBELL register */
+ void __iomem *BMBXregaddr; /* Address to BootStrap MBX register */
+
+ struct msix_entry *msix_entries;
+ uint32_t cfg_eqn;
+ struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
+ /* Pointers to the constructed SLI4 queues */
+ struct lpfc_queue **fp_eq; /* Fast-path event queue */
+ struct lpfc_queue *sp_eq; /* Slow-path event queue */
+ struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */
+ struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */
+ struct lpfc_queue *els_wq; /* Slow-path ELS work queue */
+ struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
+ struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
+ struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
+ struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
+ struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
+ struct lpfc_queue *rxq_cq; /* Slow-path unsolicited complete queue */
+
+ /* Setup information for various queue parameters */
+ int eq_esize;
+ int eq_ecount;
+ int cq_esize;
+ int cq_ecount;
+ int wq_esize;
+ int wq_ecount;
+ int mq_esize;
+ int mq_ecount;
+ int rq_esize;
+ int rq_ecount;
+#define LPFC_SP_EQ_MAX_INTR_SEC 10000
+#define LPFC_FP_EQ_MAX_INTR_SEC 10000
+
+ uint32_t intr_enable;
+ struct lpfc_bmbx bmbx;
+ struct lpfc_max_cfg_param max_cfg_param;
+ uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
+ uint16_t next_rpi;
+ uint16_t scsi_xri_max;
+ uint16_t scsi_xri_cnt;
+ struct list_head lpfc_free_sgl_list;
+ struct list_head lpfc_sgl_list;
+ struct lpfc_sglq **lpfc_els_sgl_array;
+ struct list_head lpfc_abts_els_sgl_list;
+ struct lpfc_scsi_buf **lpfc_scsi_psb_array;
+ struct list_head lpfc_abts_scsi_buf_list;
+ uint32_t total_sglq_bufs;
+ struct lpfc_sglq **lpfc_sglq_active_list;
+ struct list_head lpfc_rpi_hdr_list;
+ unsigned long *rpi_bmask;
+ uint16_t rpi_count;
+ struct lpfc_sli4_flags sli4_flags;
+ struct list_head sp_rspiocb_work_queue;
+ struct list_head sp_cqe_event_pool;
+ struct list_head sp_asynce_work_queue;
+ struct list_head sp_fcp_xri_aborted_work_queue;
+ struct list_head sp_els_xri_aborted_work_queue;
+ struct list_head sp_unsol_work_queue;
+ struct lpfc_sli4_link link_state;
+ spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
+ spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
+};
+
+enum lpfc_sge_type {
+ GEN_BUFF_TYPE,
+ SCSI_BUFF_TYPE
+};
+
+struct lpfc_sglq {
+ /* lpfc_sglqs are used in double linked lists */
+ struct list_head list;
+ struct list_head clist;
+ enum lpfc_sge_type buff_type; /* is this a scsi sgl */
+ uint16_t iotag; /* pre-assigned IO tag */
+ uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
+ struct sli4_sge *sgl; /* pre-assigned SGL */
+ void *virt; /* virtual address. */
+ dma_addr_t phys; /* physical address */
+};
+
+struct lpfc_rpi_hdr {
+ struct list_head list;
+ uint32_t len;
+ struct lpfc_dmabuf *dmabuf;
+ uint32_t page_count;
+ uint32_t start_rpi;
+};
+
+/*
+ * SLI4 specific function prototypes
+ */
+int lpfc_pci_function_reset(struct lpfc_hba *);
+int lpfc_sli4_hba_setup(struct lpfc_hba *);
+int lpfc_sli4_hba_down(struct lpfc_hba *);
+int lpfc_sli4_config(struct lpfc_hba *, struct lpfcMboxq *, uint8_t,
+ uint8_t, uint32_t, bool);
+void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *);
+void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t);
+void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t,
+ struct lpfc_mbx_sge *);
+
+void lpfc_sli4_hba_reset(struct lpfc_hba *);
+struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
+ uint32_t);
+void lpfc_sli4_queue_free(struct lpfc_queue *);
+uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t);
+uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
+ struct lpfc_queue *, uint32_t, uint32_t);
+uint32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
+ struct lpfc_queue *, uint32_t);
+uint32_t lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *,
+ struct lpfc_queue *, uint32_t);
+uint32_t lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
+ struct lpfc_queue *, struct lpfc_queue *, uint32_t);
+uint32_t lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+uint32_t lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+uint32_t lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+uint32_t lpfc_wq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+uint32_t lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *,
+ struct lpfc_queue *);
+int lpfc_sli4_queue_setup(struct lpfc_hba *);
+void lpfc_sli4_queue_unset(struct lpfc_hba *);
+int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
+int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
+int lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *);
+uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
+int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
+int lpfc_sli4_post_sgl_list(struct lpfc_hba *phba);
+int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
+struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
+struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
+void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
+void lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
+int lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *);
+int lpfc_sli4_post_rpi_hdr(struct lpfc_hba *, struct lpfc_rpi_hdr *);
+int lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *);
+struct lpfc_rpi_hdr *lpfc_sli4_create_rpi_hdr(struct lpfc_hba *);
+void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *);
+int lpfc_sli4_alloc_rpi(struct lpfc_hba *);
+void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
+void lpfc_sli4_remove_rpis(struct lpfc_hba *);
+void lpfc_sli4_async_event_proc(struct lpfc_hba *);
+int lpfc_sli4_resume_rpi(struct lpfc_nodelist *);
+void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
+void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
+void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
+ struct sli4_wcqe_xri_aborted *);
+void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
+ struct sli4_wcqe_xri_aborted *);
+int lpfc_sli4_brdreset(struct lpfc_hba *);
+int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *);
+void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *);
+int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *);
+int lpfc_sli4_init_vpi(struct lpfc_hba *, uint16_t);
+uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool);
+uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool);
+void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t);
+int lpfc_sli4_read_fcf_record(struct lpfc_hba *, uint16_t);
+void lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_sli4_post_status_check(struct lpfc_hba *);
+uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *);
+
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index e599519e307..6b8a148f0a5 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.1"
+#define LPFC_DRIVER_VERSION "8.3.2"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 917ad56b0af..a6313ee84ac 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -32,8 +32,10 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
+#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
@@ -89,6 +91,8 @@ lpfc_alloc_vpi(struct lpfc_hba *phba)
vpi = 0;
else
set_bit(vpi, phba->vpi_bmask);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ phba->sli4_hba.max_cfg_param.vpi_used++;
spin_unlock_irq(&phba->hbalock);
return vpi;
}
@@ -96,8 +100,12 @@ lpfc_alloc_vpi(struct lpfc_hba *phba)
static void
lpfc_free_vpi(struct lpfc_hba *phba, int vpi)
{
+ if (vpi == 0)
+ return;
spin_lock_irq(&phba->hbalock);
clear_bit(vpi, phba->vpi_bmask);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ phba->sli4_hba.max_cfg_param.vpi_used--;
spin_unlock_irq(&phba->hbalock);
}
@@ -113,7 +121,7 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
if (!pmb) {
return -ENOMEM;
}
- mb = &pmb->mb;
+ mb = &pmb->u.mb;
lpfc_read_sparam(phba, pmb, vport->vpi);
/*
@@ -243,23 +251,22 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
(vport->fc_flag & wait_flags) ||
((vport->port_state > LPFC_VPORT_FAILED) &&
(vport->port_state < LPFC_VPORT_READY))) {
- lpfc_printf_log(phba, KERN_INFO, LOG_VPORT,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
"1833 Vport discovery quiesce Wait:"
- " vpi x%x state x%x fc_flags x%x"
+ " state x%x fc_flags x%x"
" num_nodes x%x, waiting 1000 msecs"
" total wait msecs x%x\n",
- vport->vpi, vport->port_state,
- vport->fc_flag, vport->num_disc_nodes,
+ vport->port_state, vport->fc_flag,
+ vport->num_disc_nodes,
jiffies_to_msecs(jiffies - start_time));
msleep(1000);
} else {
/* Base case. Wait variants satisfied. Break out */
- lpfc_printf_log(phba, KERN_INFO, LOG_VPORT,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
"1834 Vport discovery quiesced:"
- " vpi x%x state x%x fc_flags x%x"
+ " state x%x fc_flags x%x"
" wait msecs x%x\n",
- vport->vpi, vport->port_state,
- vport->fc_flag,
+ vport->port_state, vport->fc_flag,
jiffies_to_msecs(jiffies
- start_time));
break;
@@ -267,12 +274,10 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
}
if (time_after(jiffies, wait_time_max))
- lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
"1835 Vport discovery quiesce failed:"
- " vpi x%x state x%x fc_flags x%x"
- " wait msecs x%x\n",
- vport->vpi, vport->port_state,
- vport->fc_flag,
+ " state x%x fc_flags x%x wait msecs x%x\n",
+ vport->port_state, vport->fc_flag,
jiffies_to_msecs(jiffies - start_time));
}
@@ -308,6 +313,21 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
goto error_out;
}
+ /*
+ * In SLI4, the vpi must be activated before it can be used
+ * by the port.
+ */
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ rc = lpfc_sli4_init_vpi(phba, vpi);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ "1838 Failed to INIT_VPI on vpi %d "
+ "status %d\n", vpi, rc);
+ rc = VPORT_NORESOURCES;
+ lpfc_free_vpi(phba, vpi);
+ goto error_out;
+ }
+ }
/* Assign an unused board number */
if ((instance = lpfc_get_instance()) < 0) {
@@ -535,6 +555,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
"physical host\n");
return VPORT_ERROR;
}
+
+ /* If the vport is a static vport fail the deletion. */
+ if ((vport->vport_flag & STATIC_VPORT) &&
+ !(phba->pport->load_flag & FC_UNLOADING)) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+ "1837 vport_delete failed: Cannot delete "
+ "static vport.\n");
+ return VPORT_ERROR;
+ }
+
/*
* If we are not unloading the driver then prevent the vport_delete
* from happening until after this vport's discovery is finished.
@@ -710,7 +740,7 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
struct lpfc_vport *port_iterator;
struct lpfc_vport **vports;
int index = 0;
- vports = kzalloc((phba->max_vpi + 1) * sizeof(struct lpfc_vport *),
+ vports = kzalloc((phba->max_vports + 1) * sizeof(struct lpfc_vport *),
GFP_KERNEL);
if (vports == NULL)
return NULL;
@@ -734,7 +764,7 @@ lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
int i;
if (vports == NULL)
return;
- for (i=0; vports[i] != NULL && i <= phba->max_vpi; i++)
+ for (i = 0; vports[i] != NULL && i <= phba->max_vports; i++)
scsi_host_put(lpfc_shost_from_vport(vports[i]));
kfree(vports);
}
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index 795201fa0b4..512c2cc1a33 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -469,7 +469,7 @@ typedef struct {
u8 type; /* Type of the device */
u8 cur_status; /* current status of the device */
u8 tag_depth; /* Level of tagging */
- u8 sync_neg; /* sync negotiation - ENABLE or DISBALE */
+ u8 sync_neg; /* sync negotiation - ENABLE or DISABLE */
u32 size; /* configurable size in terms of 512 byte
blocks */
}__attribute__ ((packed)) phys_drv;
diff --git a/drivers/scsi/megaraid/mbox_defs.h b/drivers/scsi/megaraid/mbox_defs.h
index 170399ef06f..b25b74764ec 100644
--- a/drivers/scsi/megaraid/mbox_defs.h
+++ b/drivers/scsi/megaraid/mbox_defs.h
@@ -686,7 +686,7 @@ typedef struct {
* @type : Type of the device
* @cur_status : current status of the device
* @tag_depth : Level of tagging
- * @sync_neg : sync negotiation - ENABLE or DISBALE
+ * @sync_neg : sync negotiation - ENABLE or DISABLE
* @size : configurable size in terms of 512 byte
*/
typedef struct {
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index babd4cc0cb2..286c185fa9e 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -61,6 +61,7 @@
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport_sas.h>
#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_eh.h>
#include "mpt2sas_debug.h"
@@ -68,10 +69,10 @@
#define MPT2SAS_DRIVER_NAME "mpt2sas"
#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION "01.100.02.00"
-#define MPT2SAS_MAJOR_VERSION 00
+#define MPT2SAS_DRIVER_VERSION "01.100.03.00"
+#define MPT2SAS_MAJOR_VERSION 01
#define MPT2SAS_MINOR_VERSION 100
-#define MPT2SAS_BUILD_VERSION 02
+#define MPT2SAS_BUILD_VERSION 03
#define MPT2SAS_RELEASE_VERSION 00
/*
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index ba6ab170bdf..14e473d1fa7 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -473,7 +473,7 @@ _ctl_poll(struct file *filep, poll_table *wait)
}
/**
- * _ctl_do_task_abort - assign an active smid to the abort_task
+ * _ctl_set_task_mid - assign an active smid to tm request
* @ioc: per adapter object
* @karg - (struct mpt2_ioctl_command)
* @tm_request - pointer to mf from user space
@@ -482,7 +482,7 @@ _ctl_poll(struct file *filep, poll_table *wait)
* during failure, the reply frame is filled.
*/
static int
-_ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
+_ctl_set_task_mid(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
Mpi2SCSITaskManagementRequest_t *tm_request)
{
u8 found = 0;
@@ -494,6 +494,14 @@ _ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
Mpi2SCSITaskManagementReply_t *tm_reply;
u32 sz;
u32 lun;
+ char *desc = NULL;
+
+ if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
+ desc = "abort_task";
+ else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
+ desc = "query_task";
+ else
+ return 0;
lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN);
@@ -517,13 +525,13 @@ _ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
if (!found) {
- dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ABORT_TASK: "
- "DevHandle(0x%04x), lun(%d), no active mid!!\n", ioc->name,
- tm_request->DevHandle, lun));
+ dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
+ "handle(0x%04x), lun(%d), no active mid!!\n", ioc->name,
+ desc, tm_request->DevHandle, lun));
tm_reply = ioc->ctl_cmds.reply;
tm_reply->DevHandle = tm_request->DevHandle;
tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
- tm_reply->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
+ tm_reply->TaskType = tm_request->TaskType;
tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4;
tm_reply->VP_ID = tm_request->VP_ID;
tm_reply->VF_ID = tm_request->VF_ID;
@@ -535,9 +543,9 @@ _ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
return 1;
}
- dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ABORT_TASK: "
- "DevHandle(0x%04x), lun(%d), smid(%d)\n", ioc->name,
- tm_request->DevHandle, lun, tm_request->TaskMID));
+ dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
+ "handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name,
+ desc, tm_request->DevHandle, lun, tm_request->TaskMID));
return 0;
}
@@ -739,8 +747,10 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
(Mpi2SCSITaskManagementRequest_t *)mpi_request;
if (tm_request->TaskType ==
- MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
- if (_ctl_do_task_abort(ioc, &karg, tm_request)) {
+ MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
+ tm_request->TaskType ==
+ MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) {
+ if (_ctl_set_task_mid(ioc, &karg, tm_request)) {
mpt2sas_base_free_smid(ioc, smid);
goto out;
}
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index e3a7967259e..2a01a5f2a84 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -197,12 +197,12 @@ static struct pci_device_id scsih_pci_table[] = {
MODULE_DEVICE_TABLE(pci, scsih_pci_table);
/**
- * scsih_set_debug_level - global setting of ioc->logging_level.
+ * _scsih_set_debug_level - global setting of ioc->logging_level.
*
* Note: The logging levels are defined in mpt2sas_debug.h.
*/
static int
-scsih_set_debug_level(const char *val, struct kernel_param *kp)
+_scsih_set_debug_level(const char *val, struct kernel_param *kp)
{
int ret = param_set_int(val, kp);
struct MPT2SAS_ADAPTER *ioc;
@@ -215,7 +215,7 @@ scsih_set_debug_level(const char *val, struct kernel_param *kp)
ioc->logging_level = logging_level;
return 0;
}
-module_param_call(logging_level, scsih_set_debug_level, param_get_int,
+module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
&logging_level, 0644);
/**
@@ -884,6 +884,41 @@ _scsih_scsi_lookup_find_by_target(struct MPT2SAS_ADAPTER *ioc, int id,
}
/**
+ * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
+ * @ioc: per adapter object
+ * @id: target id
+ * @lun: lun number
+ * @channel: channel
+ * Context: This function will acquire ioc->scsi_lookup_lock.
+ *
+ * This will search for a matching channel:id:lun in the scsi_lookup array,
+ * returning 1 if found.
+ */
+static u8
+_scsih_scsi_lookup_find_by_lun(struct MPT2SAS_ADAPTER *ioc, int id,
+ unsigned int lun, int channel)
+{
+ u8 found;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ found = 0;
+ for (i = 0 ; i < ioc->request_depth; i++) {
+ if (ioc->scsi_lookup[i].scmd &&
+ (ioc->scsi_lookup[i].scmd->device->id == id &&
+ ioc->scsi_lookup[i].scmd->device->channel == channel &&
+ ioc->scsi_lookup[i].scmd->device->lun == lun)) {
+ found = 1;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return found;
+}
+
+/**
* _scsih_get_chain_buffer_dma - obtain block of chains (dma address)
* @ioc: per adapter object
* @smid: system request message index
@@ -1047,14 +1082,14 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc,
}
/**
- * scsih_change_queue_depth - setting device queue depth
+ * _scsih_change_queue_depth - setting device queue depth
* @sdev: scsi device struct
* @qdepth: requested queue depth
*
* Returns queue depth.
*/
static int
-scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
+_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
{
struct Scsi_Host *shost = sdev->host;
int max_depth;
@@ -1079,14 +1114,14 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
}
/**
- * scsih_change_queue_depth - changing device queue tag type
+ * _scsih_change_queue_depth - changing device queue tag type
* @sdev: scsi device struct
* @tag_type: requested tag type
*
* Returns queue tag type.
*/
static int
-scsih_change_queue_type(struct scsi_device *sdev, int tag_type)
+_scsih_change_queue_type(struct scsi_device *sdev, int tag_type)
{
if (sdev->tagged_supported) {
scsi_set_tag_type(sdev, tag_type);
@@ -1101,14 +1136,14 @@ scsih_change_queue_type(struct scsi_device *sdev, int tag_type)
}
/**
- * scsih_target_alloc - target add routine
+ * _scsih_target_alloc - target add routine
* @starget: scsi target struct
*
* Returns 0 if ok. Any other return is assumed to be an error and
* the device is ignored.
*/
static int
-scsih_target_alloc(struct scsi_target *starget)
+_scsih_target_alloc(struct scsi_target *starget)
{
struct Scsi_Host *shost = dev_to_shost(&starget->dev);
struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -1163,13 +1198,13 @@ scsih_target_alloc(struct scsi_target *starget)
}
/**
- * scsih_target_destroy - target destroy routine
+ * _scsih_target_destroy - target destroy routine
* @starget: scsi target struct
*
* Returns nothing.
*/
static void
-scsih_target_destroy(struct scsi_target *starget)
+_scsih_target_destroy(struct scsi_target *starget)
{
struct Scsi_Host *shost = dev_to_shost(&starget->dev);
struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -1212,14 +1247,14 @@ scsih_target_destroy(struct scsi_target *starget)
}
/**
- * scsih_slave_alloc - device add routine
+ * _scsih_slave_alloc - device add routine
* @sdev: scsi device struct
*
* Returns 0 if ok. Any other return is assumed to be an error and
* the device is ignored.
*/
static int
-scsih_slave_alloc(struct scsi_device *sdev)
+_scsih_slave_alloc(struct scsi_device *sdev)
{
struct Scsi_Host *shost;
struct MPT2SAS_ADAPTER *ioc;
@@ -1273,13 +1308,13 @@ scsih_slave_alloc(struct scsi_device *sdev)
}
/**
- * scsih_slave_destroy - device destroy routine
+ * _scsih_slave_destroy - device destroy routine
* @sdev: scsi device struct
*
* Returns nothing.
*/
static void
-scsih_slave_destroy(struct scsi_device *sdev)
+_scsih_slave_destroy(struct scsi_device *sdev)
{
struct MPT2SAS_TARGET *sas_target_priv_data;
struct scsi_target *starget;
@@ -1295,13 +1330,13 @@ scsih_slave_destroy(struct scsi_device *sdev)
}
/**
- * scsih_display_sata_capabilities - sata capabilities
+ * _scsih_display_sata_capabilities - sata capabilities
* @ioc: per adapter object
* @sas_device: the sas_device object
* @sdev: scsi device struct
*/
static void
-scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc,
+_scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc,
struct _sas_device *sas_device, struct scsi_device *sdev)
{
Mpi2ConfigReply_t mpi_reply;
@@ -1401,14 +1436,14 @@ _scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc,
}
/**
- * scsih_slave_configure - device configure routine.
+ * _scsih_slave_configure - device configure routine.
* @sdev: scsi device struct
*
* Returns 0 if ok. Any other return is assumed to be an error and
* the device is ignored.
*/
static int
-scsih_slave_configure(struct scsi_device *sdev)
+_scsih_slave_configure(struct scsi_device *sdev)
{
struct Scsi_Host *shost = sdev->host;
struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -1489,7 +1524,7 @@ scsih_slave_configure(struct scsi_device *sdev)
r_level, raid_device->handle,
(unsigned long long)raid_device->wwid,
raid_device->num_pds, ds);
- scsih_change_queue_depth(sdev, qdepth);
+ _scsih_change_queue_depth(sdev, qdepth);
return 0;
}
@@ -1532,10 +1567,10 @@ scsih_slave_configure(struct scsi_device *sdev)
sas_device->slot);
if (!ssp_target)
- scsih_display_sata_capabilities(ioc, sas_device, sdev);
+ _scsih_display_sata_capabilities(ioc, sas_device, sdev);
}
- scsih_change_queue_depth(sdev, qdepth);
+ _scsih_change_queue_depth(sdev, qdepth);
if (ssp_target)
sas_read_port_mode_page(sdev);
@@ -1543,7 +1578,7 @@ scsih_slave_configure(struct scsi_device *sdev)
}
/**
- * scsih_bios_param - fetch head, sector, cylinder info for a disk
+ * _scsih_bios_param - fetch head, sector, cylinder info for a disk
* @sdev: scsi device struct
* @bdev: pointer to block device context
* @capacity: device size (in 512 byte sectors)
@@ -1555,7 +1590,7 @@ scsih_slave_configure(struct scsi_device *sdev)
* Return nothing.
*/
static int
-scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
+_scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
sector_t capacity, int params[])
{
int heads;
@@ -1636,7 +1671,7 @@ _scsih_response_code(struct MPT2SAS_ADAPTER *ioc, u8 response_code)
}
/**
- * scsih_tm_done - tm completion routine
+ * _scsih_tm_done - tm completion routine
* @ioc: per adapter object
* @smid: system request message index
* @VF_ID: virtual function id
@@ -1648,7 +1683,7 @@ _scsih_response_code(struct MPT2SAS_ADAPTER *ioc, u8 response_code)
* Return nothing.
*/
static void
-scsih_tm_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
+_scsih_tm_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
{
MPI2DefaultReply_t *mpi_reply;
@@ -1823,13 +1858,13 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
}
/**
- * scsih_abort - eh threads main abort routine
+ * _scsih_abort - eh threads main abort routine
* @sdev: scsi device struct
*
* Returns SUCCESS if command aborted else FAILED
*/
static int
-scsih_abort(struct scsi_cmnd *scmd)
+_scsih_abort(struct scsi_cmnd *scmd)
{
struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
struct MPT2SAS_DEVICE *sas_device_priv_data;
@@ -1889,15 +1924,86 @@ scsih_abort(struct scsi_cmnd *scmd)
return r;
}
+/**
+ * _scsih_dev_reset - eh threads main device reset routine
+ * @sdev: scsi device struct
+ *
+ * Returns SUCCESS if command aborted else FAILED
+ */
+static int
+_scsih_dev_reset(struct scsi_cmnd *scmd)
+{
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
+ struct MPT2SAS_DEVICE *sas_device_priv_data;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ u16 handle;
+ int r;
+
+ printk(MPT2SAS_INFO_FMT "attempting device reset! scmd(%p)\n",
+ ioc->name, scmd);
+ scsi_print_command(scmd);
+
+ sas_device_priv_data = scmd->device->hostdata;
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+ printk(MPT2SAS_INFO_FMT "device been deleted! scmd(%p)\n",
+ ioc->name, scmd);
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ r = SUCCESS;
+ goto out;
+ }
+
+ /* for hidden raid components obtain the volume_handle */
+ handle = 0;
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT) {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc,
+ sas_device_priv_data->sas_target->handle);
+ if (sas_device)
+ handle = sas_device->volume_handle;
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ } else
+ handle = sas_device_priv_data->sas_target->handle;
+
+ if (!handle) {
+ scmd->result = DID_RESET << 16;
+ r = FAILED;
+ goto out;
+ }
+
+ mutex_lock(&ioc->tm_cmds.mutex);
+ mpt2sas_scsih_issue_tm(ioc, handle, 0,
+ MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, scmd->device->lun,
+ 30);
+
+ /*
+ * sanity check see whether all commands to this device been
+ * completed
+ */
+ if (_scsih_scsi_lookup_find_by_lun(ioc, scmd->device->id,
+ scmd->device->lun, scmd->device->channel))
+ r = FAILED;
+ else
+ r = SUCCESS;
+ ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
+ mutex_unlock(&ioc->tm_cmds.mutex);
+
+ out:
+ printk(MPT2SAS_INFO_FMT "device reset: %s scmd(%p)\n",
+ ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+ return r;
+}
/**
- * scsih_dev_reset - eh threads main device reset routine
+ * _scsih_target_reset - eh threads main target reset routine
* @sdev: scsi device struct
*
* Returns SUCCESS if command aborted else FAILED
*/
static int
-scsih_dev_reset(struct scsi_cmnd *scmd)
+_scsih_target_reset(struct scsi_cmnd *scmd)
{
struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
struct MPT2SAS_DEVICE *sas_device_priv_data;
@@ -1912,7 +2018,7 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
sas_device_priv_data = scmd->device->hostdata;
if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
- printk(MPT2SAS_INFO_FMT "device been deleted! scmd(%p)\n",
+ printk(MPT2SAS_INFO_FMT "target been deleted! scmd(%p)\n",
ioc->name, scmd);
scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd);
@@ -1962,13 +2068,13 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
}
/**
- * scsih_abort - eh threads main host reset routine
+ * _scsih_abort - eh threads main host reset routine
* @sdev: scsi device struct
*
* Returns SUCCESS if command aborted else FAILED
*/
static int
-scsih_host_reset(struct scsi_cmnd *scmd)
+_scsih_host_reset(struct scsi_cmnd *scmd)
{
struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
int r, retval;
@@ -2390,7 +2496,107 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
}
/**
- * scsih_qcmd - main scsi request entry point
+ * _scsih_setup_eedp - setup MPI request for EEDP transfer
+ * @scmd: pointer to scsi command object
+ * @mpi_request: pointer to the SCSI_IO reqest message frame
+ *
+ * Supporting protection 1 and 3.
+ *
+ * Returns nothing
+ */
+static void
+_scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request)
+{
+ u16 eedp_flags;
+ unsigned char prot_op = scsi_get_prot_op(scmd);
+ unsigned char prot_type = scsi_get_prot_type(scmd);
+
+ if (prot_type == SCSI_PROT_DIF_TYPE0 ||
+ prot_type == SCSI_PROT_DIF_TYPE2 ||
+ prot_op == SCSI_PROT_NORMAL)
+ return;
+
+ if (prot_op == SCSI_PROT_READ_STRIP)
+ eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
+ else if (prot_op == SCSI_PROT_WRITE_INSERT)
+ eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
+ else
+ return;
+
+ mpi_request->EEDPBlockSize = scmd->device->sector_size;
+
+ switch (prot_type) {
+ case SCSI_PROT_DIF_TYPE1:
+
+ /*
+ * enable ref/guard checking
+ * auto increment ref tag
+ */
+ mpi_request->EEDPFlags = eedp_flags |
+ MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
+ mpi_request->CDB.EEDP32.PrimaryReferenceTag =
+ cpu_to_be32(scsi_get_lba(scmd));
+
+ break;
+
+ case SCSI_PROT_DIF_TYPE3:
+
+ /*
+ * enable guard checking
+ */
+ mpi_request->EEDPFlags = eedp_flags |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
+
+ break;
+ }
+}
+
+/**
+ * _scsih_eedp_error_handling - return sense code for EEDP errors
+ * @scmd: pointer to scsi command object
+ * @ioc_status: ioc status
+ *
+ * Returns nothing
+ */
+static void
+_scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
+{
+ u8 ascq;
+ u8 sk;
+ u8 host_byte;
+
+ switch (ioc_status) {
+ case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
+ ascq = 0x01;
+ break;
+ case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
+ ascq = 0x02;
+ break;
+ case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
+ ascq = 0x03;
+ break;
+ default:
+ ascq = 0x00;
+ break;
+ }
+
+ if (scmd->sc_data_direction == DMA_TO_DEVICE) {
+ sk = ILLEGAL_REQUEST;
+ host_byte = DID_ABORT;
+ } else {
+ sk = ABORTED_COMMAND;
+ host_byte = DID_OK;
+ }
+
+ scsi_build_sense_buffer(0, scmd->sense_buffer, sk, 0x10, ascq);
+ scmd->result = DRIVER_SENSE << 24 | (host_byte << 16) |
+ SAM_STAT_CHECK_CONDITION;
+}
+
+/**
+ * _scsih_qcmd - main scsi request entry point
* @scmd: pointer to scsi command object
* @done: function pointer to be invoked on completion
*
@@ -2401,7 +2607,7 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
* SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
*/
static int
-scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
+_scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
{
struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
struct MPT2SAS_DEVICE *sas_device_priv_data;
@@ -2470,6 +2676,7 @@ scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
}
mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t));
+ _scsih_setup_eedp(scmd, mpi_request);
mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
if (sas_device_priv_data->sas_target->flags &
MPT_TARGET_FLAGS_RAID_COMPONENT)
@@ -2604,6 +2811,15 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
desc_ioc_state = "scsi ext terminated";
break;
+ case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
+ desc_ioc_state = "eedp guard error";
+ break;
+ case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
+ desc_ioc_state = "eedp ref tag error";
+ break;
+ case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
+ desc_ioc_state = "eedp app tag error";
+ break;
default:
desc_ioc_state = "unknown";
break;
@@ -2783,7 +2999,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
}
/**
- * scsih_io_done - scsi request callback
+ * _scsih_io_done - scsi request callback
* @ioc: per adapter object
* @smid: system request message index
* @VF_ID: virtual function id
@@ -2794,7 +3010,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
* Return nothing.
*/
static void
-scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
+_scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
{
Mpi2SCSIIORequest_t *mpi_request;
Mpi2SCSIIOReply_t *mpi_reply;
@@ -2939,6 +3155,11 @@ scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
scmd->result = DID_RESET << 16;
break;
+ case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
+ case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
+ case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
+ _scsih_eedp_error_handling(scmd, ioc_status);
+ break;
case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
case MPI2_IOCSTATUS_INVALID_FUNCTION:
case MPI2_IOCSTATUS_INVALID_SGL:
@@ -5130,18 +5351,19 @@ static struct scsi_host_template scsih_driver_template = {
.module = THIS_MODULE,
.name = "Fusion MPT SAS Host",
.proc_name = MPT2SAS_DRIVER_NAME,
- .queuecommand = scsih_qcmd,
- .target_alloc = scsih_target_alloc,
- .slave_alloc = scsih_slave_alloc,
- .slave_configure = scsih_slave_configure,
- .target_destroy = scsih_target_destroy,
- .slave_destroy = scsih_slave_destroy,
- .change_queue_depth = scsih_change_queue_depth,
- .change_queue_type = scsih_change_queue_type,
- .eh_abort_handler = scsih_abort,
- .eh_device_reset_handler = scsih_dev_reset,
- .eh_host_reset_handler = scsih_host_reset,
- .bios_param = scsih_bios_param,
+ .queuecommand = _scsih_qcmd,
+ .target_alloc = _scsih_target_alloc,
+ .slave_alloc = _scsih_slave_alloc,
+ .slave_configure = _scsih_slave_configure,
+ .target_destroy = _scsih_target_destroy,
+ .slave_destroy = _scsih_slave_destroy,
+ .change_queue_depth = _scsih_change_queue_depth,
+ .change_queue_type = _scsih_change_queue_type,
+ .eh_abort_handler = _scsih_abort,
+ .eh_device_reset_handler = _scsih_dev_reset,
+ .eh_target_reset_handler = _scsih_target_reset,
+ .eh_host_reset_handler = _scsih_host_reset,
+ .bios_param = _scsih_bios_param,
.can_queue = 1,
.this_id = -1,
.sg_tablesize = MPT2SAS_SG_DEPTH,
@@ -5228,13 +5450,13 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc,
}
/**
- * scsih_remove - detach and remove add host
+ * _scsih_remove - detach and remove add host
* @pdev: PCI device struct
*
* Return nothing.
*/
static void __devexit
-scsih_remove(struct pci_dev *pdev)
+_scsih_remove(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -5442,14 +5664,14 @@ _scsih_probe_devices(struct MPT2SAS_ADAPTER *ioc)
}
/**
- * scsih_probe - attach and add scsi host
+ * _scsih_probe - attach and add scsi host
* @pdev: PCI device struct
* @id: pci device id
*
* Returns 0 success, anything else error.
*/
static int
-scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+_scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct MPT2SAS_ADAPTER *ioc;
struct Scsi_Host *shost;
@@ -5503,6 +5725,9 @@ scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_add_shost_fail;
}
+ scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
+ | SHOST_DIF_TYPE3_PROTECTION);
+
/* event thread */
snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
"fw_event%d", ioc->id);
@@ -5536,14 +5761,14 @@ scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
#ifdef CONFIG_PM
/**
- * scsih_suspend - power management suspend main entry point
+ * _scsih_suspend - power management suspend main entry point
* @pdev: PCI device struct
* @state: PM state change to (usually PCI_D3)
*
* Returns 0 success, anything else error.
*/
static int
-scsih_suspend(struct pci_dev *pdev, pm_message_t state)
+_scsih_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -5564,13 +5789,13 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state)
}
/**
- * scsih_resume - power management resume main entry point
+ * _scsih_resume - power management resume main entry point
* @pdev: PCI device struct
*
* Returns 0 success, anything else error.
*/
static int
-scsih_resume(struct pci_dev *pdev)
+_scsih_resume(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -5599,22 +5824,22 @@ scsih_resume(struct pci_dev *pdev)
static struct pci_driver scsih_driver = {
.name = MPT2SAS_DRIVER_NAME,
.id_table = scsih_pci_table,
- .probe = scsih_probe,
- .remove = __devexit_p(scsih_remove),
+ .probe = _scsih_probe,
+ .remove = __devexit_p(_scsih_remove),
#ifdef CONFIG_PM
- .suspend = scsih_suspend,
- .resume = scsih_resume,
+ .suspend = _scsih_suspend,
+ .resume = _scsih_resume,
#endif
};
/**
- * scsih_init - main entry point for this driver.
+ * _scsih_init - main entry point for this driver.
*
* Returns 0 success, anything else error.
*/
static int __init
-scsih_init(void)
+_scsih_init(void)
{
int error;
@@ -5630,10 +5855,10 @@ scsih_init(void)
mpt2sas_base_initialize_callback_handler();
/* queuecommand callback hander */
- scsi_io_cb_idx = mpt2sas_base_register_callback_handler(scsih_io_done);
+ scsi_io_cb_idx = mpt2sas_base_register_callback_handler(_scsih_io_done);
/* task managment callback handler */
- tm_cb_idx = mpt2sas_base_register_callback_handler(scsih_tm_done);
+ tm_cb_idx = mpt2sas_base_register_callback_handler(_scsih_tm_done);
/* base internal commands callback handler */
base_cb_idx = mpt2sas_base_register_callback_handler(mpt2sas_base_done);
@@ -5659,12 +5884,12 @@ scsih_init(void)
}
/**
- * scsih_exit - exit point for this driver (when it is a module).
+ * _scsih_exit - exit point for this driver (when it is a module).
*
* Returns 0 success, anything else error.
*/
static void __exit
-scsih_exit(void)
+_scsih_exit(void)
{
printk(KERN_INFO "mpt2sas version %s unloading\n",
MPT2SAS_DRIVER_VERSION);
@@ -5682,5 +5907,5 @@ scsih_exit(void)
mpt2sas_ctl_exit();
}
-module_init(scsih_init);
-module_exit(scsih_exit);
+module_init(_scsih_init);
+module_exit(_scsih_exit);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index e03dc0b1e1a..686695b155c 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -264,7 +264,7 @@ struct rep_manu_reply{
};
/**
- * transport_expander_report_manufacture - obtain SMP report_manufacture
+ * _transport_expander_report_manufacture - obtain SMP report_manufacture
* @ioc: per adapter object
* @sas_address: expander sas address
* @edev: the sas_expander_device object
@@ -274,7 +274,7 @@ struct rep_manu_reply{
* Returns 0 for success, non-zero for failure.
*/
static int
-transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
+_transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
u64 sas_address, struct sas_expander_device *edev)
{
Mpi2SmpPassthroughRequest_t *mpi_request;
@@ -578,7 +578,7 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle,
MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER ||
mpt2sas_port->remote_identify.device_type ==
MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER)
- transport_expander_report_manufacture(ioc,
+ _transport_expander_report_manufacture(ioc,
mpt2sas_port->remote_identify.sas_address,
rphy_to_expander_device(rphy));
@@ -852,7 +852,7 @@ rphy_to_ioc(struct sas_rphy *rphy)
}
/**
- * transport_get_linkerrors -
+ * _transport_get_linkerrors -
* @phy: The sas phy object
*
* Only support sas_host direct attached phys.
@@ -860,7 +860,7 @@ rphy_to_ioc(struct sas_rphy *rphy)
*
*/
static int
-transport_get_linkerrors(struct sas_phy *phy)
+_transport_get_linkerrors(struct sas_phy *phy)
{
struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
struct _sas_phy *mpt2sas_phy;
@@ -903,14 +903,14 @@ transport_get_linkerrors(struct sas_phy *phy)
}
/**
- * transport_get_enclosure_identifier -
+ * _transport_get_enclosure_identifier -
* @phy: The sas phy object
*
* Obtain the enclosure logical id for an expander.
* Returns 0 for success, non-zero for failure.
*/
static int
-transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
+_transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
{
struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
struct _sas_node *sas_expander;
@@ -929,13 +929,13 @@ transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
}
/**
- * transport_get_bay_identifier -
+ * _transport_get_bay_identifier -
* @phy: The sas phy object
*
* Returns the slot id for a device that resides inside an enclosure.
*/
static int
-transport_get_bay_identifier(struct sas_rphy *rphy)
+_transport_get_bay_identifier(struct sas_rphy *rphy)
{
struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
struct _sas_device *sas_device;
@@ -953,7 +953,7 @@ transport_get_bay_identifier(struct sas_rphy *rphy)
}
/**
- * transport_phy_reset -
+ * _transport_phy_reset -
* @phy: The sas phy object
* @hard_reset:
*
@@ -961,7 +961,7 @@ transport_get_bay_identifier(struct sas_rphy *rphy)
* Returns 0 for success, non-zero for failure.
*/
static int
-transport_phy_reset(struct sas_phy *phy, int hard_reset)
+_transport_phy_reset(struct sas_phy *phy, int hard_reset)
{
struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
struct _sas_phy *mpt2sas_phy;
@@ -1002,7 +1002,7 @@ transport_phy_reset(struct sas_phy *phy, int hard_reset)
}
/**
- * transport_smp_handler - transport portal for smp passthru
+ * _transport_smp_handler - transport portal for smp passthru
* @shost: shost object
* @rphy: sas transport rphy object
* @req:
@@ -1012,7 +1012,7 @@ transport_phy_reset(struct sas_phy *phy, int hard_reset)
* smp_rep_general /sys/class/bsg/expander-5:0
*/
static int
-transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
+_transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
struct request *req)
{
struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -1041,7 +1041,7 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
printk(MPT2SAS_ERR_FMT "%s: multiple segments req %u %u, "
"rsp %u %u\n", ioc->name, __func__, req->bio->bi_vcnt,
- req->data_len, rsp->bio->bi_vcnt, rsp->data_len);
+ blk_rq_bytes(req), rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
return -EINVAL;
}
@@ -1104,7 +1104,7 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
*((u64 *)&mpi_request->SASAddress) = (rphy) ?
cpu_to_le64(rphy->identify.sas_address) :
cpu_to_le64(ioc->sas_hba.sas_address);
- mpi_request->RequestDataLength = cpu_to_le16(req->data_len - 4);
+ mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
psge = &mpi_request->SGL;
/* WRITE sgel first */
@@ -1112,13 +1112,13 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
- req->data_len, PCI_DMA_BIDIRECTIONAL);
+ blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
if (!dma_addr_out) {
mpt2sas_base_free_smid(ioc, le16_to_cpu(smid));
goto unmap;
}
- ioc->base_add_sg_single(psge, sgl_flags | (req->data_len - 4),
+ ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(req) - 4),
dma_addr_out);
/* incr sgel */
@@ -1129,14 +1129,14 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
MPI2_SGE_FLAGS_END_OF_LIST);
sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
- dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio),
- rsp->data_len, PCI_DMA_BIDIRECTIONAL);
+ dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio),
+ blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
if (!dma_addr_in) {
mpt2sas_base_free_smid(ioc, le16_to_cpu(smid));
goto unmap;
}
- ioc->base_add_sg_single(psge, sgl_flags | (rsp->data_len + 4),
+ ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(rsp) + 4),
dma_addr_in);
dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s - "
@@ -1170,9 +1170,8 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
memcpy(req->sense, mpi_reply, sizeof(*mpi_reply));
req->sense_len = sizeof(*mpi_reply);
- req->data_len = 0;
- rsp->data_len -= mpi_reply->ResponseDataLength;
-
+ req->resid_len = 0;
+ rsp->resid_len -= mpi_reply->ResponseDataLength;
} else {
dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT
"%s - no reply\n", ioc->name, __func__));
@@ -1188,10 +1187,10 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
unmap:
if (dma_addr_out)
- pci_unmap_single(ioc->pdev, dma_addr_out, req->data_len,
+ pci_unmap_single(ioc->pdev, dma_addr_out, blk_rq_bytes(req),
PCI_DMA_BIDIRECTIONAL);
if (dma_addr_in)
- pci_unmap_single(ioc->pdev, dma_addr_in, rsp->data_len,
+ pci_unmap_single(ioc->pdev, dma_addr_in, blk_rq_bytes(rsp),
PCI_DMA_BIDIRECTIONAL);
out:
@@ -1201,11 +1200,11 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
}
struct sas_function_template mpt2sas_transport_functions = {
- .get_linkerrors = transport_get_linkerrors,
- .get_enclosure_identifier = transport_get_enclosure_identifier,
- .get_bay_identifier = transport_get_bay_identifier,
- .phy_reset = transport_phy_reset,
- .smp_handler = transport_smp_handler,
+ .get_linkerrors = _transport_get_linkerrors,
+ .get_enclosure_identifier = _transport_get_enclosure_identifier,
+ .get_bay_identifier = _transport_get_bay_identifier,
+ .phy_reset = _transport_phy_reset,
+ .smp_handler = _transport_smp_handler,
};
struct scsi_transport_template *mpt2sas_transport_template;
diff --git a/drivers/scsi/mvsas.c b/drivers/scsi/mvsas.c
deleted file mode 100644
index e4acebd10d1..00000000000
--- a/drivers/scsi/mvsas.c
+++ /dev/null
@@ -1,3222 +0,0 @@
-/*
- mvsas.c - Marvell 88SE6440 SAS/SATA support
-
- Copyright 2007 Red Hat, Inc.
- Copyright 2008 Marvell. <kewei@marvell.com>
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License as
- published by the Free Software Foundation; either version 2,
- or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty
- of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public
- License along with this program; see the file COPYING. If not,
- write to the Free Software Foundation, 675 Mass Ave, Cambridge,
- MA 02139, USA.
-
- ---------------------------------------------------------------
-
- Random notes:
- * hardware supports controlling the endian-ness of data
- structures. this permits elimination of all the le32_to_cpu()
- and cpu_to_le32() conversions.
-
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/ctype.h>
-#include <scsi/libsas.h>
-#include <scsi/scsi_tcq.h>
-#include <scsi/sas_ata.h>
-#include <asm/io.h>
-
-#define DRV_NAME "mvsas"
-#define DRV_VERSION "0.5.2"
-#define _MV_DUMP 0
-#define MVS_DISABLE_NVRAM
-#define MVS_DISABLE_MSI
-
-#define mr32(reg) readl(regs + MVS_##reg)
-#define mw32(reg,val) writel((val), regs + MVS_##reg)
-#define mw32_f(reg,val) do { \
- writel((val), regs + MVS_##reg); \
- readl(regs + MVS_##reg); \
- } while (0)
-
-#define MVS_ID_NOT_MAPPED 0x7f
-#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width)
-
-/* offset for D2H FIS in the Received FIS List Structure */
-#define SATA_RECEIVED_D2H_FIS(reg_set) \
- ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x40)
-#define SATA_RECEIVED_PIO_FIS(reg_set) \
- ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x20)
-#define UNASSOC_D2H_FIS(id) \
- ((void *) mvi->rx_fis + 0x100 * id)
-
-#define for_each_phy(__lseq_mask, __mc, __lseq, __rest) \
- for ((__mc) = (__lseq_mask), (__lseq) = 0; \
- (__mc) != 0 && __rest; \
- (++__lseq), (__mc) >>= 1)
-
-/* driver compile-time configuration */
-enum driver_configuration {
- MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */
- MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */
- /* software requires power-of-2
- ring size */
-
- MVS_SLOTS = 512, /* command slots */
- MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */
- MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */
- MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */
- MVS_OAF_SZ = 64, /* Open address frame buffer size */
-
- MVS_RX_FIS_COUNT = 17, /* Optional rx'd FISs (max 17) */
-
- MVS_QUEUE_SIZE = 30, /* Support Queue depth */
- MVS_CAN_QUEUE = MVS_SLOTS - 1, /* SCSI Queue depth */
-};
-
-/* unchangeable hardware details */
-enum hardware_details {
- MVS_MAX_PHYS = 8, /* max. possible phys */
- MVS_MAX_PORTS = 8, /* max. possible ports */
- MVS_RX_FISL_SZ = 0x400 + (MVS_RX_FIS_COUNT * 0x100),
-};
-
-/* peripheral registers (BAR2) */
-enum peripheral_registers {
- SPI_CTL = 0x10, /* EEPROM control */
- SPI_CMD = 0x14, /* EEPROM command */
- SPI_DATA = 0x18, /* EEPROM data */
-};
-
-enum peripheral_register_bits {
- TWSI_RDY = (1U << 7), /* EEPROM interface ready */
- TWSI_RD = (1U << 4), /* EEPROM read access */
-
- SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */
-};
-
-/* enhanced mode registers (BAR4) */
-enum hw_registers {
- MVS_GBL_CTL = 0x04, /* global control */
- MVS_GBL_INT_STAT = 0x08, /* global irq status */
- MVS_GBL_PI = 0x0C, /* ports implemented bitmask */
- MVS_GBL_PORT_TYPE = 0xa0, /* port type */
-
- MVS_CTL = 0x100, /* SAS/SATA port configuration */
- MVS_PCS = 0x104, /* SAS/SATA port control/status */
- MVS_CMD_LIST_LO = 0x108, /* cmd list addr */
- MVS_CMD_LIST_HI = 0x10C,
- MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */
- MVS_RX_FIS_HI = 0x114,
-
- MVS_TX_CFG = 0x120, /* TX configuration */
- MVS_TX_LO = 0x124, /* TX (delivery) ring addr */
- MVS_TX_HI = 0x128,
-
- MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */
- MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */
- MVS_RX_CFG = 0x134, /* RX configuration */
- MVS_RX_LO = 0x138, /* RX (completion) ring addr */
- MVS_RX_HI = 0x13C,
- MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */
-
- MVS_INT_COAL = 0x148, /* Int coalescing config */
- MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */
- MVS_INT_STAT = 0x150, /* Central int status */
- MVS_INT_MASK = 0x154, /* Central int enable */
- MVS_INT_STAT_SRS = 0x158, /* SATA register set status */
- MVS_INT_MASK_SRS = 0x15C,
-
- /* ports 1-3 follow after this */
- MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */
- MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */
- MVS_P4_INT_STAT = 0x200, /* Port 4 interrupt status */
- MVS_P4_INT_MASK = 0x204, /* Port 4 interrupt enable mask */
-
- /* ports 1-3 follow after this */
- MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */
- MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */
-
- MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */
- MVS_CMD_DATA = 0x1BC, /* Command register port (data) */
-
- /* ports 1-3 follow after this */
- MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */
- MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */
- MVS_P4_CFG_ADDR = 0x230, /* Port 4 config address */
- MVS_P4_CFG_DATA = 0x234, /* Port 4 config data */
-
- /* ports 1-3 follow after this */
- MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */
- MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */
- MVS_P4_VSR_ADDR = 0x250, /* port 4 VSR addr */
- MVS_P4_VSR_DATA = 0x254, /* port 4 VSR data */
-};
-
-enum hw_register_bits {
- /* MVS_GBL_CTL */
- INT_EN = (1U << 1), /* Global int enable */
- HBA_RST = (1U << 0), /* HBA reset */
-
- /* MVS_GBL_INT_STAT */
- INT_XOR = (1U << 4), /* XOR engine event */
- INT_SAS_SATA = (1U << 0), /* SAS/SATA event */
-
- /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */
- SATA_TARGET = (1U << 16), /* port0 SATA target enable */
- MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */
- MODE_AUTO_DET_PORT6 = (1U << 14),
- MODE_AUTO_DET_PORT5 = (1U << 13),
- MODE_AUTO_DET_PORT4 = (1U << 12),
- MODE_AUTO_DET_PORT3 = (1U << 11),
- MODE_AUTO_DET_PORT2 = (1U << 10),
- MODE_AUTO_DET_PORT1 = (1U << 9),
- MODE_AUTO_DET_PORT0 = (1U << 8),
- MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 |
- MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 |
- MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 |
- MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7,
- MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */
- MODE_SAS_PORT6_MASK = (1U << 6),
- MODE_SAS_PORT5_MASK = (1U << 5),
- MODE_SAS_PORT4_MASK = (1U << 4),
- MODE_SAS_PORT3_MASK = (1U << 3),
- MODE_SAS_PORT2_MASK = (1U << 2),
- MODE_SAS_PORT1_MASK = (1U << 1),
- MODE_SAS_PORT0_MASK = (1U << 0),
- MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK |
- MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK |
- MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK |
- MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK,
-
- /* SAS_MODE value may be
- * dictated (in hw) by values
- * of SATA_TARGET & AUTO_DET
- */
-
- /* MVS_TX_CFG */
- TX_EN = (1U << 16), /* Enable TX */
- TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */
-
- /* MVS_RX_CFG */
- RX_EN = (1U << 16), /* Enable RX */
- RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */
-
- /* MVS_INT_COAL */
- COAL_EN = (1U << 16), /* Enable int coalescing */
-
- /* MVS_INT_STAT, MVS_INT_MASK */
- CINT_I2C = (1U << 31), /* I2C event */
- CINT_SW0 = (1U << 30), /* software event 0 */
- CINT_SW1 = (1U << 29), /* software event 1 */
- CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */
- CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */
- CINT_MEM = (1U << 26), /* int mem parity err */
- CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */
- CINT_SRS = (1U << 3), /* SRS event */
- CINT_CI_STOP = (1U << 1), /* cmd issue stopped */
- CINT_DONE = (1U << 0), /* cmd completion */
-
- /* shl for ports 1-3 */
- CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */
- CINT_PORT = (1U << 8), /* port0 event */
- CINT_PORT_MASK_OFFSET = 8,
- CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET),
-
- /* TX (delivery) ring bits */
- TXQ_CMD_SHIFT = 29,
- TXQ_CMD_SSP = 1, /* SSP protocol */
- TXQ_CMD_SMP = 2, /* SMP protocol */
- TXQ_CMD_STP = 3, /* STP/SATA protocol */
- TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */
- TXQ_CMD_SLOT_RESET = 7, /* reset command slot */
- TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */
- TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */
- TXQ_SRS_SHIFT = 20, /* SATA register set */
- TXQ_SRS_MASK = 0x7f,
- TXQ_PHY_SHIFT = 12, /* PHY bitmap */
- TXQ_PHY_MASK = 0xff,
- TXQ_SLOT_MASK = 0xfff, /* slot number */
-
- /* RX (completion) ring bits */
- RXQ_GOOD = (1U << 23), /* Response good */
- RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */
- RXQ_CMD_RX = (1U << 20), /* target cmd received */
- RXQ_ATTN = (1U << 19), /* attention */
- RXQ_RSP = (1U << 18), /* response frame xfer'd */
- RXQ_ERR = (1U << 17), /* err info rec xfer'd */
- RXQ_DONE = (1U << 16), /* cmd complete */
- RXQ_SLOT_MASK = 0xfff, /* slot number */
-
- /* mvs_cmd_hdr bits */
- MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */
- MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */
-
- /* SSP initiator only */
- MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */
-
- /* SSP initiator or target */
- MCH_SSP_FR_TASK = 0x1, /* TASK frame */
-
- /* SSP target only */
- MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */
- MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */
- MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */
- MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */
-
- MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */
- MCH_FBURST = (1U << 11), /* first burst (SSP) */
- MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */
- MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */
- MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */
- MCH_RESET = (1U << 7), /* Reset (STP/SATA) */
- MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */
- MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */
- MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */
- MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/
-
- CCTL_RST = (1U << 5), /* port logic reset */
-
- /* 0(LSB first), 1(MSB first) */
- CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */
- CCTL_ENDIAN_RSP = (1U << 2), /* response frame */
- CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */
- CCTL_ENDIAN_CMD = (1U << 0), /* command table */
-
- /* MVS_Px_SER_CTLSTAT (per-phy control) */
- PHY_SSP_RST = (1U << 3), /* reset SSP link layer */
- PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */
- PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */
- PHY_RST = (1U << 0), /* phy reset */
- PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8),
- PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12),
- PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
- PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
- (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
- PHY_READY_MASK = (1U << 20),
-
- /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */
- PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */
- PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */
- PHYEV_AN = (1U << 18), /* SATA async notification */
- PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */
- PHYEV_SIG_FIS = (1U << 16), /* signature FIS */
- PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */
- PHYEV_IU_BIG = (1U << 11), /* IU too long err */
- PHYEV_IU_SMALL = (1U << 10), /* IU too short err */
- PHYEV_UNK_TAG = (1U << 9), /* unknown tag */
- PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */
- PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */
- PHYEV_PORT_SEL = (1U << 6), /* port selector present */
- PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */
- PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */
- PHYEV_ID_FAIL = (1U << 3), /* identify failed */
- PHYEV_ID_DONE = (1U << 2), /* identify done */
- PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */
- PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */
-
- /* MVS_PCS */
- PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */
- PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */
- PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6480 */
- PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */
- PCS_RSP_RX_EN = (1U << 7), /* raw response rx */
- PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */
- PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */
- PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */
- PCS_CMD_RST = (1U << 1), /* reset cmd issue */
- PCS_CMD_EN = (1U << 0), /* enable cmd issue */
-
- /* Port n Attached Device Info */
- PORT_DEV_SSP_TRGT = (1U << 19),
- PORT_DEV_SMP_TRGT = (1U << 18),
- PORT_DEV_STP_TRGT = (1U << 17),
- PORT_DEV_SSP_INIT = (1U << 11),
- PORT_DEV_SMP_INIT = (1U << 10),
- PORT_DEV_STP_INIT = (1U << 9),
- PORT_PHY_ID_MASK = (0xFFU << 24),
- PORT_DEV_TRGT_MASK = (0x7U << 17),
- PORT_DEV_INIT_MASK = (0x7U << 9),
- PORT_DEV_TYPE_MASK = (0x7U << 0),
-
- /* Port n PHY Status */
- PHY_RDY = (1U << 2),
- PHY_DW_SYNC = (1U << 1),
- PHY_OOB_DTCTD = (1U << 0),
-
- /* VSR */
- /* PHYMODE 6 (CDB) */
- PHY_MODE6_LATECLK = (1U << 29), /* Lock Clock */
- PHY_MODE6_DTL_SPEED = (1U << 27), /* Digital Loop Speed */
- PHY_MODE6_FC_ORDER = (1U << 26), /* Fibre Channel Mode Order*/
- PHY_MODE6_MUCNT_EN = (1U << 24), /* u Count Enable */
- PHY_MODE6_SEL_MUCNT_LEN = (1U << 22), /* Training Length Select */
- PHY_MODE6_SELMUPI = (1U << 20), /* Phase Multi Select (init) */
- PHY_MODE6_SELMUPF = (1U << 18), /* Phase Multi Select (final) */
- PHY_MODE6_SELMUFF = (1U << 16), /* Freq Loop Multi Sel(final) */
- PHY_MODE6_SELMUFI = (1U << 14), /* Freq Loop Multi Sel(init) */
- PHY_MODE6_FREEZE_LOOP = (1U << 12), /* Freeze Rx CDR Loop */
- PHY_MODE6_INT_RXFOFFS = (1U << 3), /* Rx CDR Freq Loop Enable */
- PHY_MODE6_FRC_RXFOFFS = (1U << 2), /* Initial Rx CDR Offset */
- PHY_MODE6_STAU_0D8 = (1U << 1), /* Rx CDR Freq Loop Saturate */
- PHY_MODE6_RXSAT_DIS = (1U << 0), /* Saturate Ctl */
-};
-
-enum mvs_info_flags {
- MVF_MSI = (1U << 0), /* MSI is enabled */
- MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */
-};
-
-enum sas_cmd_port_registers {
- CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */
- CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */
- CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */
- CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */
- CMD_OOB_SPACE = 0x110, /* OOB space control register */
- CMD_OOB_BURST = 0x114, /* OOB burst control register */
- CMD_PHY_TIMER = 0x118, /* PHY timer control register */
- CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */
- CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */
- CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */
- CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */
- CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */
- CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */
- CMD_ID_TEST = 0x134, /* ID test register */
- CMD_PL_TIMER = 0x138, /* PL timer register */
- CMD_WD_TIMER = 0x13c, /* WD timer register */
- CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */
- CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */
- CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */
- CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */
- CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */
- CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */
- CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */
- CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */
- CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */
- CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */
- CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */
- CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */
- CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */
- CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */
- CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */
- CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */
- CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */
- CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */
- CMD_RESET_COUNT = 0x188, /* Reset Count */
- CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */
- CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */
- CMD_PHY_CTL = 0x194, /* PHY Control and Status */
- CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */
- CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */
- CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */
- CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */
- CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */
- CMD_HOST_CTL = 0x1AC, /* Host Control Status */
- CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */
- CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */
- CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */
- CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */
- CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */
- CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */
-};
-
-/* SAS/SATA configuration port registers, aka phy registers */
-enum sas_sata_config_port_regs {
- PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */
- PHYR_ADDR_LO = 0x04, /* my SAS address (low) */
- PHYR_ADDR_HI = 0x08, /* my SAS address (high) */
- PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */
- PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */
- PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */
- PHYR_SATA_CTL = 0x18, /* SATA control */
- PHYR_PHY_STAT = 0x1C, /* PHY status */
- PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */
- PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */
- PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */
- PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */
- PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */
- PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */
- PHYR_WIDE_PORT = 0x38, /* wide port participating */
- PHYR_CURRENT0 = 0x80, /* current connection info 0 */
- PHYR_CURRENT1 = 0x84, /* current connection info 1 */
- PHYR_CURRENT2 = 0x88, /* current connection info 2 */
-};
-
-/* SAS/SATA Vendor Specific Port Registers */
-enum sas_sata_vsp_regs {
- VSR_PHY_STAT = 0x00, /* Phy Status */
- VSR_PHY_MODE1 = 0x01, /* phy tx */
- VSR_PHY_MODE2 = 0x02, /* tx scc */
- VSR_PHY_MODE3 = 0x03, /* pll */
- VSR_PHY_MODE4 = 0x04, /* VCO */
- VSR_PHY_MODE5 = 0x05, /* Rx */
- VSR_PHY_MODE6 = 0x06, /* CDR */
- VSR_PHY_MODE7 = 0x07, /* Impedance */
- VSR_PHY_MODE8 = 0x08, /* Voltage */
- VSR_PHY_MODE9 = 0x09, /* Test */
- VSR_PHY_MODE10 = 0x0A, /* Power */
- VSR_PHY_MODE11 = 0x0B, /* Phy Mode */
- VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */
- VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */
-};
-
-enum pci_cfg_registers {
- PCR_PHY_CTL = 0x40,
- PCR_PHY_CTL2 = 0x90,
- PCR_DEV_CTRL = 0xE8,
-};
-
-enum pci_cfg_register_bits {
- PCTL_PWR_ON = (0xFU << 24),
- PCTL_OFF = (0xFU << 12),
- PRD_REQ_SIZE = (0x4000),
- PRD_REQ_MASK = (0x00007000),
-};
-
-enum nvram_layout_offsets {
- NVR_SIG = 0x00, /* 0xAA, 0x55 */
- NVR_SAS_ADDR = 0x02, /* 8-byte SAS address */
-};
-
-enum chip_flavors {
- chip_6320,
- chip_6440,
- chip_6480,
-};
-
-enum port_type {
- PORT_TYPE_SAS = (1L << 1),
- PORT_TYPE_SATA = (1L << 0),
-};
-
-/* Command Table Format */
-enum ct_format {
- /* SSP */
- SSP_F_H = 0x00,
- SSP_F_IU = 0x18,
- SSP_F_MAX = 0x4D,
- /* STP */
- STP_CMD_FIS = 0x00,
- STP_ATAPI_CMD = 0x40,
- STP_F_MAX = 0x10,
- /* SMP */
- SMP_F_T = 0x00,
- SMP_F_DEP = 0x01,
- SMP_F_MAX = 0x101,
-};
-
-enum status_buffer {
- SB_EIR_OFF = 0x00, /* Error Information Record */
- SB_RFB_OFF = 0x08, /* Response Frame Buffer */
- SB_RFB_MAX = 0x400, /* RFB size*/
-};
-
-enum error_info_rec {
- CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */
- CMD_PI_ERR = (1U << 30), /* Protection info error. see flags2 */
- RSP_OVER = (1U << 29), /* rsp buffer overflow */
- RETRY_LIM = (1U << 28), /* FIS/frame retry limit exceeded */
- UNK_FIS = (1U << 27), /* unknown FIS */
- DMA_TERM = (1U << 26), /* DMA terminate primitive rx'd */
- SYNC_ERR = (1U << 25), /* SYNC rx'd during frame xmit */
- TFILE_ERR = (1U << 24), /* SATA taskfile Error bit set */
- R_ERR = (1U << 23), /* SATA returned R_ERR prim */
- RD_OFS = (1U << 20), /* Read DATA frame invalid offset */
- XFER_RDY_OFS = (1U << 19), /* XFER_RDY offset error */
- UNEXP_XFER_RDY = (1U << 18), /* unexpected XFER_RDY error */
- DATA_OVER_UNDER = (1U << 16), /* data overflow/underflow */
- INTERLOCK = (1U << 15), /* interlock error */
- NAK = (1U << 14), /* NAK rx'd */
- ACK_NAK_TO = (1U << 13), /* ACK/NAK timeout */
- CXN_CLOSED = (1U << 12), /* cxn closed w/out ack/nak */
- OPEN_TO = (1U << 11), /* I_T nexus lost, open cxn timeout */
- PATH_BLOCKED = (1U << 10), /* I_T nexus lost, pathway blocked */
- NO_DEST = (1U << 9), /* I_T nexus lost, no destination */
- STP_RES_BSY = (1U << 8), /* STP resources busy */
- BREAK = (1U << 7), /* break received */
- BAD_DEST = (1U << 6), /* bad destination */
- BAD_PROTO = (1U << 5), /* protocol not supported */
- BAD_RATE = (1U << 4), /* cxn rate not supported */
- WRONG_DEST = (1U << 3), /* wrong destination error */
- CREDIT_TO = (1U << 2), /* credit timeout */
- WDOG_TO = (1U << 1), /* watchdog timeout */
- BUF_PAR = (1U << 0), /* buffer parity error */
-};
-
-enum error_info_rec_2 {
- SLOT_BSY_ERR = (1U << 31), /* Slot Busy Error */
- GRD_CHK_ERR = (1U << 14), /* Guard Check Error */
- APP_CHK_ERR = (1U << 13), /* Application Check error */
- REF_CHK_ERR = (1U << 12), /* Reference Check Error */
- USR_BLK_NM = (1U << 0), /* User Block Number */
-};
-
-struct mvs_chip_info {
- u32 n_phy;
- u32 srs_sz;
- u32 slot_width;
-};
-
-struct mvs_err_info {
- __le32 flags;
- __le32 flags2;
-};
-
-struct mvs_prd {
- __le64 addr; /* 64-bit buffer address */
- __le32 reserved;
- __le32 len; /* 16-bit length */
-};
-
-struct mvs_cmd_hdr {
- __le32 flags; /* PRD tbl len; SAS, SATA ctl */
- __le32 lens; /* cmd, max resp frame len */
- __le32 tags; /* targ port xfer tag; tag */
- __le32 data_len; /* data xfer len */
- __le64 cmd_tbl; /* command table address */
- __le64 open_frame; /* open addr frame address */
- __le64 status_buf; /* status buffer address */
- __le64 prd_tbl; /* PRD tbl address */
- __le32 reserved[4];
-};
-
-struct mvs_port {
- struct asd_sas_port sas_port;
- u8 port_attached;
- u8 taskfileset;
- u8 wide_port_phymap;
- struct list_head list;
-};
-
-struct mvs_phy {
- struct mvs_port *port;
- struct asd_sas_phy sas_phy;
- struct sas_identify identify;
- struct scsi_device *sdev;
- u64 dev_sas_addr;
- u64 att_dev_sas_addr;
- u32 att_dev_info;
- u32 dev_info;
- u32 phy_type;
- u32 phy_status;
- u32 irq_status;
- u32 frame_rcvd_size;
- u8 frame_rcvd[32];
- u8 phy_attached;
- enum sas_linkrate minimum_linkrate;
- enum sas_linkrate maximum_linkrate;
-};
-
-struct mvs_slot_info {
- struct list_head list;
- struct sas_task *task;
- u32 n_elem;
- u32 tx;
-
- /* DMA buffer for storing cmd tbl, open addr frame, status buffer,
- * and PRD table
- */
- void *buf;
- dma_addr_t buf_dma;
-#if _MV_DUMP
- u32 cmd_size;
-#endif
-
- void *response;
- struct mvs_port *port;
-};
-
-struct mvs_info {
- unsigned long flags;
-
- spinlock_t lock; /* host-wide lock */
- struct pci_dev *pdev; /* our device */
- void __iomem *regs; /* enhanced mode registers */
- void __iomem *peri_regs; /* peripheral registers */
-
- u8 sas_addr[SAS_ADDR_SIZE];
- struct sas_ha_struct sas; /* SCSI/SAS glue */
- struct Scsi_Host *shost;
-
- __le32 *tx; /* TX (delivery) DMA ring */
- dma_addr_t tx_dma;
- u32 tx_prod; /* cached next-producer idx */
-
- __le32 *rx; /* RX (completion) DMA ring */
- dma_addr_t rx_dma;
- u32 rx_cons; /* RX consumer idx */
-
- __le32 *rx_fis; /* RX'd FIS area */
- dma_addr_t rx_fis_dma;
-
- struct mvs_cmd_hdr *slot; /* DMA command header slots */
- dma_addr_t slot_dma;
-
- const struct mvs_chip_info *chip;
-
- u8 tags[MVS_SLOTS];
- struct mvs_slot_info slot_info[MVS_SLOTS];
- /* further per-slot information */
- struct mvs_phy phy[MVS_MAX_PHYS];
- struct mvs_port port[MVS_MAX_PHYS];
-#ifdef MVS_USE_TASKLET
- struct tasklet_struct tasklet;
-#endif
-};
-
-static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
- void *funcdata);
-static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port);
-static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val);
-static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port);
-static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val);
-static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val);
-static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port);
-
-static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i);
-static void mvs_detect_porttype(struct mvs_info *mvi, int i);
-static void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
-static void mvs_release_task(struct mvs_info *mvi, int phy_no);
-
-static int mvs_scan_finished(struct Scsi_Host *, unsigned long);
-static void mvs_scan_start(struct Scsi_Host *);
-static int mvs_slave_configure(struct scsi_device *sdev);
-
-static struct scsi_transport_template *mvs_stt;
-
-static const struct mvs_chip_info mvs_chips[] = {
- [chip_6320] = { 2, 16, 9 },
- [chip_6440] = { 4, 16, 9 },
- [chip_6480] = { 8, 32, 10 },
-};
-
-static struct scsi_host_template mvs_sht = {
- .module = THIS_MODULE,
- .name = DRV_NAME,
- .queuecommand = sas_queuecommand,
- .target_alloc = sas_target_alloc,
- .slave_configure = mvs_slave_configure,
- .slave_destroy = sas_slave_destroy,
- .scan_finished = mvs_scan_finished,
- .scan_start = mvs_scan_start,
- .change_queue_depth = sas_change_queue_depth,
- .change_queue_type = sas_change_queue_type,
- .bios_param = sas_bios_param,
- .can_queue = 1,
- .cmd_per_lun = 1,
- .this_id = -1,
- .sg_tablesize = SG_ALL,
- .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
- .use_clustering = ENABLE_CLUSTERING,
- .eh_device_reset_handler = sas_eh_device_reset_handler,
- .eh_bus_reset_handler = sas_eh_bus_reset_handler,
- .slave_alloc = sas_slave_alloc,
- .target_destroy = sas_target_destroy,
- .ioctl = sas_ioctl,
-};
-
-static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
-{
- u32 i;
- u32 run;
- u32 offset;
-
- offset = 0;
- while (size) {
- printk("%08X : ", baseaddr + offset);
- if (size >= 16)
- run = 16;
- else
- run = size;
- size -= run;
- for (i = 0; i < 16; i++) {
- if (i < run)
- printk("%02X ", (u32)data[i]);
- else
- printk(" ");
- }
- printk(": ");
- for (i = 0; i < run; i++)
- printk("%c", isalnum(data[i]) ? data[i] : '.');
- printk("\n");
- data = &data[16];
- offset += run;
- }
- printk("\n");
-}
-
-#if _MV_DUMP
-static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
- enum sas_protocol proto)
-{
- u32 offset;
- struct pci_dev *pdev = mvi->pdev;
- struct mvs_slot_info *slot = &mvi->slot_info[tag];
-
- offset = slot->cmd_size + MVS_OAF_SZ +
- sizeof(struct mvs_prd) * slot->n_elem;
- dev_printk(KERN_DEBUG, &pdev->dev, "+---->Status buffer[%d] :\n",
- tag);
- mvs_hexdump(32, (u8 *) slot->response,
- (u32) slot->buf_dma + offset);
-}
-#endif
-
-static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
- enum sas_protocol proto)
-{
-#if _MV_DUMP
- u32 sz, w_ptr;
- u64 addr;
- void __iomem *regs = mvi->regs;
- struct pci_dev *pdev = mvi->pdev;
- struct mvs_slot_info *slot = &mvi->slot_info[tag];
-
- /*Delivery Queue */
- sz = mr32(TX_CFG) & TX_RING_SZ_MASK;
- w_ptr = slot->tx;
- addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO);
- dev_printk(KERN_DEBUG, &pdev->dev,
- "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
- dev_printk(KERN_DEBUG, &pdev->dev,
- "Delivery Queue Base Address=0x%llX (PA)"
- "(tx_dma=0x%llX), Entry=%04d\n",
- addr, mvi->tx_dma, w_ptr);
- mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
- (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
- /*Command List */
- addr = mvi->slot_dma;
- dev_printk(KERN_DEBUG, &pdev->dev,
- "Command List Base Address=0x%llX (PA)"
- "(slot_dma=0x%llX), Header=%03d\n",
- addr, slot->buf_dma, tag);
- dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag);
- /*mvs_cmd_hdr */
- mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
- (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
- /*1.command table area */
- dev_printk(KERN_DEBUG, &pdev->dev, "+---->Command Table :\n");
- mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
- /*2.open address frame area */
- dev_printk(KERN_DEBUG, &pdev->dev, "+---->Open Address Frame :\n");
- mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
- (u32) slot->buf_dma + slot->cmd_size);
- /*3.status buffer */
- mvs_hba_sb_dump(mvi, tag, proto);
- /*4.PRD table */
- dev_printk(KERN_DEBUG, &pdev->dev, "+---->PRD table :\n");
- mvs_hexdump(sizeof(struct mvs_prd) * slot->n_elem,
- (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
- (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
-#endif
-}
-
-static void mvs_hba_cq_dump(struct mvs_info *mvi)
-{
-#if (_MV_DUMP > 2)
- u64 addr;
- void __iomem *regs = mvi->regs;
- struct pci_dev *pdev = mvi->pdev;
- u32 entry = mvi->rx_cons + 1;
- u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
-
- /*Completion Queue */
- addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
- dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%p\n",
- mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
- dev_printk(KERN_DEBUG, &pdev->dev,
- "Completion List Base Address=0x%llX (PA), "
- "CQ_Entry=%04d, CQ_WP=0x%08X\n",
- addr, entry - 1, mvi->rx[0]);
- mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
- mvi->rx_dma + sizeof(u32) * entry);
-#endif
-}
-
-static void mvs_hba_interrupt_enable(struct mvs_info *mvi)
-{
- void __iomem *regs = mvi->regs;
- u32 tmp;
-
- tmp = mr32(GBL_CTL);
-
- mw32(GBL_CTL, tmp | INT_EN);
-}
-
-static void mvs_hba_interrupt_disable(struct mvs_info *mvi)
-{
- void __iomem *regs = mvi->regs;
- u32 tmp;
-
- tmp = mr32(GBL_CTL);
-
- mw32(GBL_CTL, tmp & ~INT_EN);
-}
-
-static int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
-
-/* move to PCI layer or libata core? */
-static int pci_go_64(struct pci_dev *pdev)
-{
- int rc;
-
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (rc) {
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "64-bit DMA enable failed\n");
- return rc;
- }
- }
- } else {
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit DMA enable failed\n");
- return rc;
- }
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit consistent DMA enable failed\n");
- return rc;
- }
- }
-
- return rc;
-}
-
-static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
-{
- if (task->lldd_task) {
- struct mvs_slot_info *slot;
- slot = (struct mvs_slot_info *) task->lldd_task;
- *tag = slot - mvi->slot_info;
- return 1;
- }
- return 0;
-}
-
-static void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
-{
- void *bitmap = (void *) &mvi->tags;
- clear_bit(tag, bitmap);
-}
-
-static void mvs_tag_free(struct mvs_info *mvi, u32 tag)
-{
- mvs_tag_clear(mvi, tag);
-}
-
-static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
-{
- void *bitmap = (void *) &mvi->tags;
- set_bit(tag, bitmap);
-}
-
-static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
-{
- unsigned int index, tag;
- void *bitmap = (void *) &mvi->tags;
-
- index = find_first_zero_bit(bitmap, MVS_SLOTS);
- tag = index;
- if (tag >= MVS_SLOTS)
- return -SAS_QUEUE_FULL;
- mvs_tag_set(mvi, tag);
- *tag_out = tag;
- return 0;
-}
-
-static void mvs_tag_init(struct mvs_info *mvi)
-{
- int i;
- for (i = 0; i < MVS_SLOTS; ++i)
- mvs_tag_clear(mvi, i);
-}
-
-#ifndef MVS_DISABLE_NVRAM
-static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data)
-{
- int timeout = 1000;
-
- if (addr & ~SPI_ADDR_MASK)
- return -EINVAL;
-
- writel(addr, regs + SPI_CMD);
- writel(TWSI_RD, regs + SPI_CTL);
-
- while (timeout-- > 0) {
- if (readl(regs + SPI_CTL) & TWSI_RDY) {
- *data = readl(regs + SPI_DATA);
- return 0;
- }
-
- udelay(10);
- }
-
- return -EBUSY;
-}
-
-static int mvs_eep_read_buf(void __iomem *regs, u32 addr,
- void *buf, u32 buflen)
-{
- u32 addr_end, tmp_addr, i, j;
- u32 tmp = 0;
- int rc;
- u8 *tmp8, *buf8 = buf;
-
- addr_end = addr + buflen;
- tmp_addr = ALIGN(addr, 4);
- if (addr > 0xff)
- return -EINVAL;
-
- j = addr & 0x3;
- if (j) {
- rc = mvs_eep_read(regs, tmp_addr, &tmp);
- if (rc)
- return rc;
-
- tmp8 = (u8 *)&tmp;
- for (i = j; i < 4; i++)
- *buf8++ = tmp8[i];
-
- tmp_addr += 4;
- }
-
- for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) {
- rc = mvs_eep_read(regs, tmp_addr, &tmp);
- if (rc)
- return rc;
-
- memcpy(buf8, &tmp, 4);
- buf8 += 4;
- }
-
- if (tmp_addr < addr_end) {
- rc = mvs_eep_read(regs, tmp_addr, &tmp);
- if (rc)
- return rc;
-
- tmp8 = (u8 *)&tmp;
- j = addr_end - tmp_addr;
- for (i = 0; i < j; i++)
- *buf8++ = tmp8[i];
-
- tmp_addr += 4;
- }
-
- return 0;
-}
-#endif
-
-static int mvs_nvram_read(struct mvs_info *mvi, u32 addr,
- void *buf, u32 buflen)
-{
-#ifndef MVS_DISABLE_NVRAM
- void __iomem *regs = mvi->regs;
- int rc, i;
- u32 sum;
- u8 hdr[2], *tmp;
- const char *msg;
-
- rc = mvs_eep_read_buf(regs, addr, &hdr, 2);
- if (rc) {
- msg = "nvram hdr read failed";
- goto err_out;
- }
- rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen);
- if (rc) {
- msg = "nvram read failed";
- goto err_out;
- }
-
- if (hdr[0] != 0x5A) {
- /* entry id */
- msg = "invalid nvram entry id";
- rc = -ENOENT;
- goto err_out;
- }
-
- tmp = buf;
- sum = ((u32)hdr[0]) + ((u32)hdr[1]);
- for (i = 0; i < buflen; i++)
- sum += ((u32)tmp[i]);
-
- if (sum) {
- msg = "nvram checksum failure";
- rc = -EILSEQ;
- goto err_out;
- }
-
- return 0;
-
-err_out:
- dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg);
- return rc;
-#else
- /* FIXME , For SAS target mode */
- memcpy(buf, "\x50\x05\x04\x30\x11\xab\x00\x00", 8);
- return 0;
-#endif
-}
-
-static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
-{
- struct mvs_phy *phy = &mvi->phy[i];
- struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
-
- if (!phy->phy_attached)
- return;
-
- if (sas_phy->phy) {
- struct sas_phy *sphy = sas_phy->phy;
-
- sphy->negotiated_linkrate = sas_phy->linkrate;
- sphy->minimum_linkrate = phy->minimum_linkrate;
- sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
- sphy->maximum_linkrate = phy->maximum_linkrate;
- sphy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
- }
-
- if (phy->phy_type & PORT_TYPE_SAS) {
- struct sas_identify_frame *id;
-
- id = (struct sas_identify_frame *)phy->frame_rcvd;
- id->dev_type = phy->identify.device_type;
- id->initiator_bits = SAS_PROTOCOL_ALL;
- id->target_bits = phy->identify.target_port_protocols;
- } else if (phy->phy_type & PORT_TYPE_SATA) {
- /* TODO */
- }
- mvi->sas.sas_phy[i]->frame_rcvd_size = phy->frame_rcvd_size;
- mvi->sas.notify_port_event(mvi->sas.sas_phy[i],
- PORTE_BYTES_DMAED);
-}
-
-static int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
-{
- /* give the phy enabling interrupt event time to come in (1s
- * is empirically about all it takes) */
- if (time < HZ)
- return 0;
- /* Wait for discovery to finish */
- scsi_flush_work(shost);
- return 1;
-}
-
-static void mvs_scan_start(struct Scsi_Host *shost)
-{
- int i;
- struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha;
-
- for (i = 0; i < mvi->chip->n_phy; ++i) {
- mvs_bytes_dmaed(mvi, i);
- }
-}
-
-static int mvs_slave_configure(struct scsi_device *sdev)
-{
- struct domain_device *dev = sdev_to_domain_dev(sdev);
- int ret = sas_slave_configure(sdev);
-
- if (ret)
- return ret;
-
- if (dev_is_sata(dev)) {
- /* struct ata_port *ap = dev->sata_dev.ap; */
- /* struct ata_device *adev = ap->link.device; */
-
- /* clamp at no NCQ for the time being */
- /* adev->flags |= ATA_DFLAG_NCQ_OFF; */
- scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
- }
- return 0;
-}
-
-static void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
-{
- struct pci_dev *pdev = mvi->pdev;
- struct sas_ha_struct *sas_ha = &mvi->sas;
- struct mvs_phy *phy = &mvi->phy[phy_no];
- struct asd_sas_phy *sas_phy = &phy->sas_phy;
-
- phy->irq_status = mvs_read_port_irq_stat(mvi, phy_no);
- /*
- * events is port event now ,
- * we need check the interrupt status which belongs to per port.
- */
- dev_printk(KERN_DEBUG, &pdev->dev,
- "Port %d Event = %X\n",
- phy_no, phy->irq_status);
-
- if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) {
- mvs_release_task(mvi, phy_no);
- if (!mvs_is_phy_ready(mvi, phy_no)) {
- sas_phy_disconnected(sas_phy);
- sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
- dev_printk(KERN_INFO, &pdev->dev,
- "Port %d Unplug Notice\n", phy_no);
-
- } else
- mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL);
- }
- if (!(phy->irq_status & PHYEV_DEC_ERR)) {
- if (phy->irq_status & PHYEV_COMWAKE) {
- u32 tmp = mvs_read_port_irq_mask(mvi, phy_no);
- mvs_write_port_irq_mask(mvi, phy_no,
- tmp | PHYEV_SIG_FIS);
- }
- if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
- phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
- if (phy->phy_status) {
- mvs_detect_porttype(mvi, phy_no);
-
- if (phy->phy_type & PORT_TYPE_SATA) {
- u32 tmp = mvs_read_port_irq_mask(mvi,
- phy_no);
- tmp &= ~PHYEV_SIG_FIS;
- mvs_write_port_irq_mask(mvi,
- phy_no, tmp);
- }
-
- mvs_update_phyinfo(mvi, phy_no, 0);
- sas_ha->notify_phy_event(sas_phy,
- PHYE_OOB_DONE);
- mvs_bytes_dmaed(mvi, phy_no);
- } else {
- dev_printk(KERN_DEBUG, &pdev->dev,
- "plugin interrupt but phy is gone\n");
- mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET,
- NULL);
- }
- } else if (phy->irq_status & PHYEV_BROAD_CH) {
- mvs_release_task(mvi, phy_no);
- sas_ha->notify_port_event(sas_phy,
- PORTE_BROADCAST_RCVD);
- }
- }
- mvs_write_port_irq_stat(mvi, phy_no, phy->irq_status);
-}
-
-static void mvs_int_sata(struct mvs_info *mvi)
-{
- u32 tmp;
- void __iomem *regs = mvi->regs;
- tmp = mr32(INT_STAT_SRS);
- mw32(INT_STAT_SRS, tmp & 0xFFFF);
-}
-
-static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task,
- u32 slot_idx)
-{
- void __iomem *regs = mvi->regs;
- struct domain_device *dev = task->dev;
- struct asd_sas_port *sas_port = dev->port;
- struct mvs_port *port = mvi->slot_info[slot_idx].port;
- u32 reg_set, phy_mask;
-
- if (!sas_protocol_ata(task->task_proto)) {
- reg_set = 0;
- phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap :
- sas_port->phy_mask;
- } else {
- reg_set = port->taskfileset;
- phy_mask = sas_port->phy_mask;
- }
- mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | slot_idx |
- (TXQ_CMD_SLOT_RESET << TXQ_CMD_SHIFT) |
- (phy_mask << TXQ_PHY_SHIFT) |
- (reg_set << TXQ_SRS_SHIFT));
-
- mw32(TX_PROD_IDX, mvi->tx_prod);
- mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
-}
-
-static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
- u32 slot_idx, int err)
-{
- struct mvs_port *port = mvi->slot_info[slot_idx].port;
- struct task_status_struct *tstat = &task->task_status;
- struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
- int stat = SAM_GOOD;
-
- resp->frame_len = sizeof(struct dev_to_host_fis);
- memcpy(&resp->ending_fis[0],
- SATA_RECEIVED_D2H_FIS(port->taskfileset),
- sizeof(struct dev_to_host_fis));
- tstat->buf_valid_size = sizeof(*resp);
- if (unlikely(err))
- stat = SAS_PROTO_RESPONSE;
- return stat;
-}
-
-static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
-{
- u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
- mvs_tag_clear(mvi, slot_idx);
-}
-
-static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
- struct mvs_slot_info *slot, u32 slot_idx)
-{
- if (!sas_protocol_ata(task->task_proto))
- if (slot->n_elem)
- pci_unmap_sg(mvi->pdev, task->scatter,
- slot->n_elem, task->data_dir);
-
- switch (task->task_proto) {
- case SAS_PROTOCOL_SMP:
- pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1,
- PCI_DMA_FROMDEVICE);
- pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1,
- PCI_DMA_TODEVICE);
- break;
-
- case SAS_PROTOCOL_SATA:
- case SAS_PROTOCOL_STP:
- case SAS_PROTOCOL_SSP:
- default:
- /* do nothing */
- break;
- }
- list_del(&slot->list);
- task->lldd_task = NULL;
- slot->task = NULL;
- slot->port = NULL;
-}
-
-static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
- u32 slot_idx)
-{
- struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
- u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response));
- u32 err_dw1 = le32_to_cpu(*(u32 *) (slot->response + 4));
- int stat = SAM_CHECK_COND;
-
- if (err_dw1 & SLOT_BSY_ERR) {
- stat = SAS_QUEUE_FULL;
- mvs_slot_reset(mvi, task, slot_idx);
- }
- switch (task->task_proto) {
- case SAS_PROTOCOL_SSP:
- break;
- case SAS_PROTOCOL_SMP:
- break;
- case SAS_PROTOCOL_SATA:
- case SAS_PROTOCOL_STP:
- case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
- if (err_dw0 & TFILE_ERR)
- stat = mvs_sata_done(mvi, task, slot_idx, 1);
- break;
- default:
- break;
- }
-
- mvs_hexdump(16, (u8 *) slot->response, 0);
- return stat;
-}
-
-static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
-{
- u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
- struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
- struct sas_task *task = slot->task;
- struct task_status_struct *tstat;
- struct mvs_port *port;
- bool aborted;
- void *to;
-
- if (unlikely(!task || !task->lldd_task))
- return -1;
-
- mvs_hba_cq_dump(mvi);
-
- spin_lock(&task->task_state_lock);
- aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
- if (!aborted) {
- task->task_state_flags &=
- ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
- task->task_state_flags |= SAS_TASK_STATE_DONE;
- }
- spin_unlock(&task->task_state_lock);
-
- if (aborted) {
- mvs_slot_task_free(mvi, task, slot, slot_idx);
- mvs_slot_free(mvi, rx_desc);
- return -1;
- }
-
- port = slot->port;
- tstat = &task->task_status;
- memset(tstat, 0, sizeof(*tstat));
- tstat->resp = SAS_TASK_COMPLETE;
-
- if (unlikely(!port->port_attached || flags)) {
- mvs_slot_err(mvi, task, slot_idx);
- if (!sas_protocol_ata(task->task_proto))
- tstat->stat = SAS_PHY_DOWN;
- goto out;
- }
-
- /* error info record present */
- if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
- tstat->stat = mvs_slot_err(mvi, task, slot_idx);
- goto out;
- }
-
- switch (task->task_proto) {
- case SAS_PROTOCOL_SSP:
- /* hw says status == 0, datapres == 0 */
- if (rx_desc & RXQ_GOOD) {
- tstat->stat = SAM_GOOD;
- tstat->resp = SAS_TASK_COMPLETE;
- }
- /* response frame present */
- else if (rx_desc & RXQ_RSP) {
- struct ssp_response_iu *iu =
- slot->response + sizeof(struct mvs_err_info);
- sas_ssp_task_response(&mvi->pdev->dev, task, iu);
- }
-
- /* should never happen? */
- else
- tstat->stat = SAM_CHECK_COND;
- break;
-
- case SAS_PROTOCOL_SMP: {
- struct scatterlist *sg_resp = &task->smp_task.smp_resp;
- tstat->stat = SAM_GOOD;
- to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
- memcpy(to + sg_resp->offset,
- slot->response + sizeof(struct mvs_err_info),
- sg_dma_len(sg_resp));
- kunmap_atomic(to, KM_IRQ0);
- break;
- }
-
- case SAS_PROTOCOL_SATA:
- case SAS_PROTOCOL_STP:
- case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
- tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0);
- break;
- }
-
- default:
- tstat->stat = SAM_CHECK_COND;
- break;
- }
-
-out:
- mvs_slot_task_free(mvi, task, slot, slot_idx);
- if (unlikely(tstat->stat != SAS_QUEUE_FULL))
- mvs_slot_free(mvi, rx_desc);
-
- spin_unlock(&mvi->lock);
- task->task_done(task);
- spin_lock(&mvi->lock);
- return tstat->stat;
-}
-
-static void mvs_release_task(struct mvs_info *mvi, int phy_no)
-{
- struct list_head *pos, *n;
- struct mvs_slot_info *slot;
- struct mvs_phy *phy = &mvi->phy[phy_no];
- struct mvs_port *port = phy->port;
- u32 rx_desc;
-
- if (!port)
- return;
-
- list_for_each_safe(pos, n, &port->list) {
- slot = container_of(pos, struct mvs_slot_info, list);
- rx_desc = (u32) (slot - mvi->slot_info);
- mvs_slot_complete(mvi, rx_desc, 1);
- }
-}
-
-static void mvs_int_full(struct mvs_info *mvi)
-{
- void __iomem *regs = mvi->regs;
- u32 tmp, stat;
- int i;
-
- stat = mr32(INT_STAT);
-
- mvs_int_rx(mvi, false);
-
- for (i = 0; i < MVS_MAX_PORTS; i++) {
- tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
- if (tmp)
- mvs_int_port(mvi, i, tmp);
- }
-
- if (stat & CINT_SRS)
- mvs_int_sata(mvi);
-
- mw32(INT_STAT, stat);
-}
-
-static int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
-{
- void __iomem *regs = mvi->regs;
- u32 rx_prod_idx, rx_desc;
- bool attn = false;
- struct pci_dev *pdev = mvi->pdev;
-
- /* the first dword in the RX ring is special: it contains
- * a mirror of the hardware's RX producer index, so that
- * we don't have to stall the CPU reading that register.
- * The actual RX ring is offset by one dword, due to this.
- */
- rx_prod_idx = mvi->rx_cons;
- mvi->rx_cons = le32_to_cpu(mvi->rx[0]);
- if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */
- return 0;
-
- /* The CMPL_Q may come late, read from register and try again
- * note: if coalescing is enabled,
- * it will need to read from register every time for sure
- */
- if (mvi->rx_cons == rx_prod_idx)
- mvi->rx_cons = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK;
-
- if (mvi->rx_cons == rx_prod_idx)
- return 0;
-
- while (mvi->rx_cons != rx_prod_idx) {
-
- /* increment our internal RX consumer pointer */
- rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1);
-
- rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]);
-
- if (likely(rx_desc & RXQ_DONE))
- mvs_slot_complete(mvi, rx_desc, 0);
- if (rx_desc & RXQ_ATTN) {
- attn = true;
- dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n",
- rx_desc);
- } else if (rx_desc & RXQ_ERR) {
- if (!(rx_desc & RXQ_DONE))
- mvs_slot_complete(mvi, rx_desc, 0);
- dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n",
- rx_desc);
- } else if (rx_desc & RXQ_SLOT_RESET) {
- dev_printk(KERN_DEBUG, &pdev->dev, "Slot reset[%X]\n",
- rx_desc);
- mvs_slot_free(mvi, rx_desc);
- }
- }
-
- if (attn && self_clear)
- mvs_int_full(mvi);
-
- return 0;
-}
-
-#ifdef MVS_USE_TASKLET
-static void mvs_tasklet(unsigned long data)
-{
- struct mvs_info *mvi = (struct mvs_info *) data;
- unsigned long flags;
-
- spin_lock_irqsave(&mvi->lock, flags);
-
-#ifdef MVS_DISABLE_MSI
- mvs_int_full(mvi);
-#else
- mvs_int_rx(mvi, true);
-#endif
- spin_unlock_irqrestore(&mvi->lock, flags);
-}
-#endif
-
-static irqreturn_t mvs_interrupt(int irq, void *opaque)
-{
- struct mvs_info *mvi = opaque;
- void __iomem *regs = mvi->regs;
- u32 stat;
-
- stat = mr32(GBL_INT_STAT);
-
- if (stat == 0 || stat == 0xffffffff)
- return IRQ_NONE;
-
- /* clear CMD_CMPLT ASAP */
- mw32_f(INT_STAT, CINT_DONE);
-
-#ifndef MVS_USE_TASKLET
- spin_lock(&mvi->lock);
-
- mvs_int_full(mvi);
-
- spin_unlock(&mvi->lock);
-#else
- tasklet_schedule(&mvi->tasklet);
-#endif
- return IRQ_HANDLED;
-}
-
-#ifndef MVS_DISABLE_MSI
-static irqreturn_t mvs_msi_interrupt(int irq, void *opaque)
-{
- struct mvs_info *mvi = opaque;
-
-#ifndef MVS_USE_TASKLET
- spin_lock(&mvi->lock);
-
- mvs_int_rx(mvi, true);
-
- spin_unlock(&mvi->lock);
-#else
- tasklet_schedule(&mvi->tasklet);
-#endif
- return IRQ_HANDLED;
-}
-#endif
-
-struct mvs_task_exec_info {
- struct sas_task *task;
- struct mvs_cmd_hdr *hdr;
- struct mvs_port *port;
- u32 tag;
- int n_elem;
-};
-
-static int mvs_task_prep_smp(struct mvs_info *mvi,
- struct mvs_task_exec_info *tei)
-{
- int elem, rc, i;
- struct sas_task *task = tei->task;
- struct mvs_cmd_hdr *hdr = tei->hdr;
- struct scatterlist *sg_req, *sg_resp;
- u32 req_len, resp_len, tag = tei->tag;
- void *buf_tmp;
- u8 *buf_oaf;
- dma_addr_t buf_tmp_dma;
- struct mvs_prd *buf_prd;
- struct scatterlist *sg;
- struct mvs_slot_info *slot = &mvi->slot_info[tag];
- struct asd_sas_port *sas_port = task->dev->port;
- u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
-#if _MV_DUMP
- u8 *buf_cmd;
- void *from;
-#endif
- /*
- * DMA-map SMP request, response buffers
- */
- sg_req = &task->smp_task.smp_req;
- elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE);
- if (!elem)
- return -ENOMEM;
- req_len = sg_dma_len(sg_req);
-
- sg_resp = &task->smp_task.smp_resp;
- elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE);
- if (!elem) {
- rc = -ENOMEM;
- goto err_out;
- }
- resp_len = sg_dma_len(sg_resp);
-
- /* must be in dwords */
- if ((req_len & 0x3) || (resp_len & 0x3)) {
- rc = -EINVAL;
- goto err_out_2;
- }
-
- /*
- * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
- */
-
- /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
- buf_tmp = slot->buf;
- buf_tmp_dma = slot->buf_dma;
-
-#if _MV_DUMP
- buf_cmd = buf_tmp;
- hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
- buf_tmp += req_len;
- buf_tmp_dma += req_len;
- slot->cmd_size = req_len;
-#else
- hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
-#endif
-
- /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
- buf_oaf = buf_tmp;
- hdr->open_frame = cpu_to_le64(buf_tmp_dma);
-
- buf_tmp += MVS_OAF_SZ;
- buf_tmp_dma += MVS_OAF_SZ;
-
- /* region 3: PRD table ********************************************* */
- buf_prd = buf_tmp;
- if (tei->n_elem)
- hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
- else
- hdr->prd_tbl = 0;
-
- i = sizeof(struct mvs_prd) * tei->n_elem;
- buf_tmp += i;
- buf_tmp_dma += i;
-
- /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
- slot->response = buf_tmp;
- hdr->status_buf = cpu_to_le64(buf_tmp_dma);
-
- /*
- * Fill in TX ring and command slot header
- */
- slot->tx = mvi->tx_prod;
- mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
- TXQ_MODE_I | tag |
- (sas_port->phy_mask << TXQ_PHY_SHIFT));
-
- hdr->flags |= flags;
- hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
- hdr->tags = cpu_to_le32(tag);
- hdr->data_len = 0;
-
- /* generate open address frame hdr (first 12 bytes) */
- buf_oaf[0] = (1 << 7) | (0 << 4) | 0x01; /* initiator, SMP, ftype 1h */
- buf_oaf[1] = task->dev->linkrate & 0xf;
- *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */
- memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
-
- /* fill in PRD (scatter/gather) table, if any */
- for_each_sg(task->scatter, sg, tei->n_elem, i) {
- buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
- buf_prd->len = cpu_to_le32(sg_dma_len(sg));
- buf_prd++;
- }
-
-#if _MV_DUMP
- /* copy cmd table */
- from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
- memcpy(buf_cmd, from + sg_req->offset, req_len);
- kunmap_atomic(from, KM_IRQ0);
-#endif
- return 0;
-
-err_out_2:
- pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_resp, 1,
- PCI_DMA_FROMDEVICE);
-err_out:
- pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_req, 1,
- PCI_DMA_TODEVICE);
- return rc;
-}
-
-static void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port)
-{
- void __iomem *regs = mvi->regs;
- u32 tmp, offs;
- u8 *tfs = &port->taskfileset;
-
- if (*tfs == MVS_ID_NOT_MAPPED)
- return;
-
- offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT);
- if (*tfs < 16) {
- tmp = mr32(PCS);
- mw32(PCS, tmp & ~offs);
- } else {
- tmp = mr32(CTL);
- mw32(CTL, tmp & ~offs);
- }
-
- tmp = mr32(INT_STAT_SRS) & (1U << *tfs);
- if (tmp)
- mw32(INT_STAT_SRS, tmp);
-
- *tfs = MVS_ID_NOT_MAPPED;
-}
-
-static u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port)
-{
- int i;
- u32 tmp, offs;
- void __iomem *regs = mvi->regs;
-
- if (port->taskfileset != MVS_ID_NOT_MAPPED)
- return 0;
-
- tmp = mr32(PCS);
-
- for (i = 0; i < mvi->chip->srs_sz; i++) {
- if (i == 16)
- tmp = mr32(CTL);
- offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT);
- if (!(tmp & offs)) {
- port->taskfileset = i;
-
- if (i < 16)
- mw32(PCS, tmp | offs);
- else
- mw32(CTL, tmp | offs);
- tmp = mr32(INT_STAT_SRS) & (1U << i);
- if (tmp)
- mw32(INT_STAT_SRS, tmp);
- return 0;
- }
- }
- return MVS_ID_NOT_MAPPED;
-}
-
-static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
-{
- struct ata_queued_cmd *qc = task->uldd_task;
-
- if (qc) {
- if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
- qc->tf.command == ATA_CMD_FPDMA_READ) {
- *tag = qc->tag;
- return 1;
- }
- }
-
- return 0;
-}
-
-static int mvs_task_prep_ata(struct mvs_info *mvi,
- struct mvs_task_exec_info *tei)
-{
- struct sas_task *task = tei->task;
- struct domain_device *dev = task->dev;
- struct mvs_cmd_hdr *hdr = tei->hdr;
- struct asd_sas_port *sas_port = dev->port;
- struct mvs_slot_info *slot;
- struct scatterlist *sg;
- struct mvs_prd *buf_prd;
- struct mvs_port *port = tei->port;
- u32 tag = tei->tag;
- u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
- void *buf_tmp;
- u8 *buf_cmd, *buf_oaf;
- dma_addr_t buf_tmp_dma;
- u32 i, req_len, resp_len;
- const u32 max_resp_len = SB_RFB_MAX;
-
- if (mvs_assign_reg_set(mvi, port) == MVS_ID_NOT_MAPPED)
- return -EBUSY;
-
- slot = &mvi->slot_info[tag];
- slot->tx = mvi->tx_prod;
- mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
- (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
- (sas_port->phy_mask << TXQ_PHY_SHIFT) |
- (port->taskfileset << TXQ_SRS_SHIFT));
-
- if (task->ata_task.use_ncq)
- flags |= MCH_FPDMA;
- if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
- if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
- flags |= MCH_ATAPI;
- }
-
- /* FIXME: fill in port multiplier number */
-
- hdr->flags = cpu_to_le32(flags);
-
- /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
- if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr->tags))
- task->ata_task.fis.sector_count |= hdr->tags << 3;
- else
- hdr->tags = cpu_to_le32(tag);
- hdr->data_len = cpu_to_le32(task->total_xfer_len);
-
- /*
- * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
- */
-
- /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
- buf_cmd = buf_tmp = slot->buf;
- buf_tmp_dma = slot->buf_dma;
-
- hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
-
- buf_tmp += MVS_ATA_CMD_SZ;
- buf_tmp_dma += MVS_ATA_CMD_SZ;
-#if _MV_DUMP
- slot->cmd_size = MVS_ATA_CMD_SZ;
-#endif
-
- /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
- /* used for STP. unused for SATA? */
- buf_oaf = buf_tmp;
- hdr->open_frame = cpu_to_le64(buf_tmp_dma);
-
- buf_tmp += MVS_OAF_SZ;
- buf_tmp_dma += MVS_OAF_SZ;
-
- /* region 3: PRD table ********************************************* */
- buf_prd = buf_tmp;
- if (tei->n_elem)
- hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
- else
- hdr->prd_tbl = 0;
-
- i = sizeof(struct mvs_prd) * tei->n_elem;
- buf_tmp += i;
- buf_tmp_dma += i;
-
- /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
- /* FIXME: probably unused, for SATA. kept here just in case
- * we get a STP/SATA error information record
- */
- slot->response = buf_tmp;
- hdr->status_buf = cpu_to_le64(buf_tmp_dma);
-
- req_len = sizeof(struct host_to_dev_fis);
- resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
- sizeof(struct mvs_err_info) - i;
-
- /* request, response lengths */
- resp_len = min(resp_len, max_resp_len);
- hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
-
- task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
- /* fill in command FIS and ATAPI CDB */
- memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
- if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
- memcpy(buf_cmd + STP_ATAPI_CMD,
- task->ata_task.atapi_packet, 16);
-
- /* generate open address frame hdr (first 12 bytes) */
- buf_oaf[0] = (1 << 7) | (2 << 4) | 0x1; /* initiator, STP, ftype 1h */
- buf_oaf[1] = task->dev->linkrate & 0xf;
- *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
- memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
-
- /* fill in PRD (scatter/gather) table, if any */
- for_each_sg(task->scatter, sg, tei->n_elem, i) {
- buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
- buf_prd->len = cpu_to_le32(sg_dma_len(sg));
- buf_prd++;
- }
-
- return 0;
-}
-
-static int mvs_task_prep_ssp(struct mvs_info *mvi,
- struct mvs_task_exec_info *tei)
-{
- struct sas_task *task = tei->task;
- struct mvs_cmd_hdr *hdr = tei->hdr;
- struct mvs_port *port = tei->port;
- struct mvs_slot_info *slot;
- struct scatterlist *sg;
- struct mvs_prd *buf_prd;
- struct ssp_frame_hdr *ssp_hdr;
- void *buf_tmp;
- u8 *buf_cmd, *buf_oaf, fburst = 0;
- dma_addr_t buf_tmp_dma;
- u32 flags;
- u32 resp_len, req_len, i, tag = tei->tag;
- const u32 max_resp_len = SB_RFB_MAX;
- u8 phy_mask;
-
- slot = &mvi->slot_info[tag];
-
- phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap :
- task->dev->port->phy_mask;
- slot->tx = mvi->tx_prod;
- mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
- (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
- (phy_mask << TXQ_PHY_SHIFT));
-
- flags = MCH_RETRY;
- if (task->ssp_task.enable_first_burst) {
- flags |= MCH_FBURST;
- fburst = (1 << 7);
- }
- hdr->flags = cpu_to_le32(flags |
- (tei->n_elem << MCH_PRD_LEN_SHIFT) |
- (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT));
-
- hdr->tags = cpu_to_le32(tag);
- hdr->data_len = cpu_to_le32(task->total_xfer_len);
-
- /*
- * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
- */
-
- /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
- buf_cmd = buf_tmp = slot->buf;
- buf_tmp_dma = slot->buf_dma;
-
- hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
-
- buf_tmp += MVS_SSP_CMD_SZ;
- buf_tmp_dma += MVS_SSP_CMD_SZ;
-#if _MV_DUMP
- slot->cmd_size = MVS_SSP_CMD_SZ;
-#endif
-
- /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
- buf_oaf = buf_tmp;
- hdr->open_frame = cpu_to_le64(buf_tmp_dma);
-
- buf_tmp += MVS_OAF_SZ;
- buf_tmp_dma += MVS_OAF_SZ;
-
- /* region 3: PRD table ********************************************* */
- buf_prd = buf_tmp;
- if (tei->n_elem)
- hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
- else
- hdr->prd_tbl = 0;
-
- i = sizeof(struct mvs_prd) * tei->n_elem;
- buf_tmp += i;
- buf_tmp_dma += i;
-
- /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
- slot->response = buf_tmp;
- hdr->status_buf = cpu_to_le64(buf_tmp_dma);
-
- resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
- sizeof(struct mvs_err_info) - i;
- resp_len = min(resp_len, max_resp_len);
-
- req_len = sizeof(struct ssp_frame_hdr) + 28;
-
- /* request, response lengths */
- hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
-
- /* generate open address frame hdr (first 12 bytes) */
- buf_oaf[0] = (1 << 7) | (1 << 4) | 0x1; /* initiator, SSP, ftype 1h */
- buf_oaf[1] = task->dev->linkrate & 0xf;
- *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
- memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
-
- /* fill in SSP frame header (Command Table.SSP frame header) */
- ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
- ssp_hdr->frame_type = SSP_COMMAND;
- memcpy(ssp_hdr->hashed_dest_addr, task->dev->hashed_sas_addr,
- HASHED_SAS_ADDR_SIZE);
- memcpy(ssp_hdr->hashed_src_addr,
- task->dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
- ssp_hdr->tag = cpu_to_be16(tag);
-
- /* fill in command frame IU */
- buf_cmd += sizeof(*ssp_hdr);
- memcpy(buf_cmd, &task->ssp_task.LUN, 8);
- buf_cmd[9] = fburst | task->ssp_task.task_attr |
- (task->ssp_task.task_prio << 3);
- memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
-
- /* fill in PRD (scatter/gather) table, if any */
- for_each_sg(task->scatter, sg, tei->n_elem, i) {
- buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
- buf_prd->len = cpu_to_le32(sg_dma_len(sg));
- buf_prd++;
- }
-
- return 0;
-}
-
-static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags)
-{
- struct domain_device *dev = task->dev;
- struct mvs_info *mvi = dev->port->ha->lldd_ha;
- struct pci_dev *pdev = mvi->pdev;
- void __iomem *regs = mvi->regs;
- struct mvs_task_exec_info tei;
- struct sas_task *t = task;
- struct mvs_slot_info *slot;
- u32 tag = 0xdeadbeef, rc, n_elem = 0;
- unsigned long flags;
- u32 n = num, pass = 0;
-
- spin_lock_irqsave(&mvi->lock, flags);
- do {
- dev = t->dev;
- tei.port = &mvi->port[dev->port->id];
-
- if (!tei.port->port_attached) {
- if (sas_protocol_ata(t->task_proto)) {
- rc = SAS_PHY_DOWN;
- goto out_done;
- } else {
- struct task_status_struct *ts = &t->task_status;
- ts->resp = SAS_TASK_UNDELIVERED;
- ts->stat = SAS_PHY_DOWN;
- t->task_done(t);
- if (n > 1)
- t = list_entry(t->list.next,
- struct sas_task, list);
- continue;
- }
- }
-
- if (!sas_protocol_ata(t->task_proto)) {
- if (t->num_scatter) {
- n_elem = pci_map_sg(mvi->pdev, t->scatter,
- t->num_scatter,
- t->data_dir);
- if (!n_elem) {
- rc = -ENOMEM;
- goto err_out;
- }
- }
- } else {
- n_elem = t->num_scatter;
- }
-
- rc = mvs_tag_alloc(mvi, &tag);
- if (rc)
- goto err_out;
-
- slot = &mvi->slot_info[tag];
- t->lldd_task = NULL;
- slot->n_elem = n_elem;
- memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
- tei.task = t;
- tei.hdr = &mvi->slot[tag];
- tei.tag = tag;
- tei.n_elem = n_elem;
-
- switch (t->task_proto) {
- case SAS_PROTOCOL_SMP:
- rc = mvs_task_prep_smp(mvi, &tei);
- break;
- case SAS_PROTOCOL_SSP:
- rc = mvs_task_prep_ssp(mvi, &tei);
- break;
- case SAS_PROTOCOL_SATA:
- case SAS_PROTOCOL_STP:
- case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
- rc = mvs_task_prep_ata(mvi, &tei);
- break;
- default:
- dev_printk(KERN_ERR, &pdev->dev,
- "unknown sas_task proto: 0x%x\n",
- t->task_proto);
- rc = -EINVAL;
- break;
- }
-
- if (rc)
- goto err_out_tag;
-
- slot->task = t;
- slot->port = tei.port;
- t->lldd_task = (void *) slot;
- list_add_tail(&slot->list, &slot->port->list);
- /* TODO: select normal or high priority */
-
- spin_lock(&t->task_state_lock);
- t->task_state_flags |= SAS_TASK_AT_INITIATOR;
- spin_unlock(&t->task_state_lock);
-
- mvs_hba_memory_dump(mvi, tag, t->task_proto);
-
- ++pass;
- mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
- if (n > 1)
- t = list_entry(t->list.next, struct sas_task, list);
- } while (--n);
-
- rc = 0;
- goto out_done;
-
-err_out_tag:
- mvs_tag_free(mvi, tag);
-err_out:
- dev_printk(KERN_ERR, &pdev->dev, "mvsas exec failed[%d]!\n", rc);
- if (!sas_protocol_ata(t->task_proto))
- if (n_elem)
- pci_unmap_sg(mvi->pdev, t->scatter, n_elem,
- t->data_dir);
-out_done:
- if (pass)
- mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
- spin_unlock_irqrestore(&mvi->lock, flags);
- return rc;
-}
-
-static int mvs_task_abort(struct sas_task *task)
-{
- int rc;
- unsigned long flags;
- struct mvs_info *mvi = task->dev->port->ha->lldd_ha;
- struct pci_dev *pdev = mvi->pdev;
- int tag;
-
- spin_lock_irqsave(&task->task_state_lock, flags);
- if (task->task_state_flags & SAS_TASK_STATE_DONE) {
- rc = TMF_RESP_FUNC_COMPLETE;
- spin_unlock_irqrestore(&task->task_state_lock, flags);
- goto out_done;
- }
- spin_unlock_irqrestore(&task->task_state_lock, flags);
-
- switch (task->task_proto) {
- case SAS_PROTOCOL_SMP:
- dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! \n");
- break;
- case SAS_PROTOCOL_SSP:
- dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! \n");
- break;
- case SAS_PROTOCOL_SATA:
- case SAS_PROTOCOL_STP:
- case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{
- dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! \n");
-#if _MV_DUMP
- dev_printk(KERN_DEBUG, &pdev->dev, "Dump D2H FIS: \n");
- mvs_hexdump(sizeof(struct host_to_dev_fis),
- (void *)&task->ata_task.fis, 0);
- dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n");
- mvs_hexdump(16, task->ata_task.atapi_packet, 0);
-#endif
- spin_lock_irqsave(&task->task_state_lock, flags);
- if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) {
- /* TODO */
- ;
- }
- spin_unlock_irqrestore(&task->task_state_lock, flags);
- break;
- }
- default:
- break;
- }
-
- if (mvs_find_tag(mvi, task, &tag)) {
- spin_lock_irqsave(&mvi->lock, flags);
- mvs_slot_task_free(mvi, task, &mvi->slot_info[tag], tag);
- spin_unlock_irqrestore(&mvi->lock, flags);
- }
- if (!mvs_task_exec(task, 1, GFP_ATOMIC))
- rc = TMF_RESP_FUNC_COMPLETE;
- else
- rc = TMF_RESP_FUNC_FAILED;
-out_done:
- return rc;
-}
-
-static void mvs_free(struct mvs_info *mvi)
-{
- int i;
-
- if (!mvi)
- return;
-
- for (i = 0; i < MVS_SLOTS; i++) {
- struct mvs_slot_info *slot = &mvi->slot_info[i];
-
- if (slot->buf)
- dma_free_coherent(&mvi->pdev->dev, MVS_SLOT_BUF_SZ,
- slot->buf, slot->buf_dma);
- }
-
- if (mvi->tx)
- dma_free_coherent(&mvi->pdev->dev,
- sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
- mvi->tx, mvi->tx_dma);
- if (mvi->rx_fis)
- dma_free_coherent(&mvi->pdev->dev, MVS_RX_FISL_SZ,
- mvi->rx_fis, mvi->rx_fis_dma);
- if (mvi->rx)
- dma_free_coherent(&mvi->pdev->dev,
- sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
- mvi->rx, mvi->rx_dma);
- if (mvi->slot)
- dma_free_coherent(&mvi->pdev->dev,
- sizeof(*mvi->slot) * MVS_SLOTS,
- mvi->slot, mvi->slot_dma);
-#ifdef MVS_ENABLE_PERI
- if (mvi->peri_regs)
- iounmap(mvi->peri_regs);
-#endif
- if (mvi->regs)
- iounmap(mvi->regs);
- if (mvi->shost)
- scsi_host_put(mvi->shost);
- kfree(mvi->sas.sas_port);
- kfree(mvi->sas.sas_phy);
- kfree(mvi);
-}
-
-/* FIXME: locking? */
-static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
- void *funcdata)
-{
- struct mvs_info *mvi = sas_phy->ha->lldd_ha;
- int rc = 0, phy_id = sas_phy->id;
- u32 tmp;
-
- tmp = mvs_read_phy_ctl(mvi, phy_id);
-
- switch (func) {
- case PHY_FUNC_SET_LINK_RATE:{
- struct sas_phy_linkrates *rates = funcdata;
- u32 lrmin = 0, lrmax = 0;
-
- lrmin = (rates->minimum_linkrate << 8);
- lrmax = (rates->maximum_linkrate << 12);
-
- if (lrmin) {
- tmp &= ~(0xf << 8);
- tmp |= lrmin;
- }
- if (lrmax) {
- tmp &= ~(0xf << 12);
- tmp |= lrmax;
- }
- mvs_write_phy_ctl(mvi, phy_id, tmp);
- break;
- }
-
- case PHY_FUNC_HARD_RESET:
- if (tmp & PHY_RST_HARD)
- break;
- mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD);
- break;
-
- case PHY_FUNC_LINK_RESET:
- mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST);
- break;
-
- case PHY_FUNC_DISABLE:
- case PHY_FUNC_RELEASE_SPINUP_HOLD:
- default:
- rc = -EOPNOTSUPP;
- }
-
- return rc;
-}
-
-static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
-{
- struct mvs_phy *phy = &mvi->phy[phy_id];
- struct asd_sas_phy *sas_phy = &phy->sas_phy;
-
- sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
- sas_phy->class = SAS;
- sas_phy->iproto = SAS_PROTOCOL_ALL;
- sas_phy->tproto = 0;
- sas_phy->type = PHY_TYPE_PHYSICAL;
- sas_phy->role = PHY_ROLE_INITIATOR;
- sas_phy->oob_mode = OOB_NOT_CONNECTED;
- sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
-
- sas_phy->id = phy_id;
- sas_phy->sas_addr = &mvi->sas_addr[0];
- sas_phy->frame_rcvd = &phy->frame_rcvd[0];
- sas_phy->ha = &mvi->sas;
- sas_phy->lldd_phy = phy;
-}
-
-static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct mvs_info *mvi;
- unsigned long res_start, res_len, res_flag;
- struct asd_sas_phy **arr_phy;
- struct asd_sas_port **arr_port;
- const struct mvs_chip_info *chip = &mvs_chips[ent->driver_data];
- int i;
-
- /*
- * alloc and init our per-HBA mvs_info struct
- */
-
- mvi = kzalloc(sizeof(*mvi), GFP_KERNEL);
- if (!mvi)
- return NULL;
-
- spin_lock_init(&mvi->lock);
-#ifdef MVS_USE_TASKLET
- tasklet_init(&mvi->tasklet, mvs_tasklet, (unsigned long)mvi);
-#endif
- mvi->pdev = pdev;
- mvi->chip = chip;
-
- if (pdev->device == 0x6440 && pdev->revision == 0)
- mvi->flags |= MVF_PHY_PWR_FIX;
-
- /*
- * alloc and init SCSI, SAS glue
- */
-
- mvi->shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
- if (!mvi->shost)
- goto err_out;
-
- arr_phy = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
- arr_port = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
- if (!arr_phy || !arr_port)
- goto err_out;
-
- for (i = 0; i < MVS_MAX_PHYS; i++) {
- mvs_phy_init(mvi, i);
- arr_phy[i] = &mvi->phy[i].sas_phy;
- arr_port[i] = &mvi->port[i].sas_port;
- mvi->port[i].taskfileset = MVS_ID_NOT_MAPPED;
- mvi->port[i].wide_port_phymap = 0;
- mvi->port[i].port_attached = 0;
- INIT_LIST_HEAD(&mvi->port[i].list);
- }
-
- SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas;
- mvi->shost->transportt = mvs_stt;
- mvi->shost->max_id = 21;
- mvi->shost->max_lun = ~0;
- mvi->shost->max_channel = 0;
- mvi->shost->max_cmd_len = 16;
-
- mvi->sas.sas_ha_name = DRV_NAME;
- mvi->sas.dev = &pdev->dev;
- mvi->sas.lldd_module = THIS_MODULE;
- mvi->sas.sas_addr = &mvi->sas_addr[0];
- mvi->sas.sas_phy = arr_phy;
- mvi->sas.sas_port = arr_port;
- mvi->sas.num_phys = chip->n_phy;
- mvi->sas.lldd_max_execute_num = 1;
- mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE;
- mvi->shost->can_queue = MVS_CAN_QUEUE;
- mvi->shost->cmd_per_lun = MVS_SLOTS / mvi->sas.num_phys;
- mvi->sas.lldd_ha = mvi;
- mvi->sas.core.shost = mvi->shost;
-
- mvs_tag_init(mvi);
-
- /*
- * ioremap main and peripheral registers
- */
-
-#ifdef MVS_ENABLE_PERI
- res_start = pci_resource_start(pdev, 2);
- res_len = pci_resource_len(pdev, 2);
- if (!res_start || !res_len)
- goto err_out;
-
- mvi->peri_regs = ioremap_nocache(res_start, res_len);
- if (!mvi->peri_regs)
- goto err_out;
-#endif
-
- res_start = pci_resource_start(pdev, 4);
- res_len = pci_resource_len(pdev, 4);
- if (!res_start || !res_len)
- goto err_out;
-
- res_flag = pci_resource_flags(pdev, 4);
- if (res_flag & IORESOURCE_CACHEABLE)
- mvi->regs = ioremap(res_start, res_len);
- else
- mvi->regs = ioremap_nocache(res_start, res_len);
-
- if (!mvi->regs)
- goto err_out;
-
- /*
- * alloc and init our DMA areas
- */
-
- mvi->tx = dma_alloc_coherent(&pdev->dev,
- sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
- &mvi->tx_dma, GFP_KERNEL);
- if (!mvi->tx)
- goto err_out;
- memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
-
- mvi->rx_fis = dma_alloc_coherent(&pdev->dev, MVS_RX_FISL_SZ,
- &mvi->rx_fis_dma, GFP_KERNEL);
- if (!mvi->rx_fis)
- goto err_out;
- memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
-
- mvi->rx = dma_alloc_coherent(&pdev->dev,
- sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
- &mvi->rx_dma, GFP_KERNEL);
- if (!mvi->rx)
- goto err_out;
- memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1));
-
- mvi->rx[0] = cpu_to_le32(0xfff);
- mvi->rx_cons = 0xfff;
-
- mvi->slot = dma_alloc_coherent(&pdev->dev,
- sizeof(*mvi->slot) * MVS_SLOTS,
- &mvi->slot_dma, GFP_KERNEL);
- if (!mvi->slot)
- goto err_out;
- memset(mvi->slot, 0, sizeof(*mvi->slot) * MVS_SLOTS);
-
- for (i = 0; i < MVS_SLOTS; i++) {
- struct mvs_slot_info *slot = &mvi->slot_info[i];
-
- slot->buf = dma_alloc_coherent(&pdev->dev, MVS_SLOT_BUF_SZ,
- &slot->buf_dma, GFP_KERNEL);
- if (!slot->buf)
- goto err_out;
- memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
- }
-
- /* finally, read NVRAM to get our SAS address */
- if (mvs_nvram_read(mvi, NVR_SAS_ADDR, &mvi->sas_addr, 8))
- goto err_out;
- return mvi;
-
-err_out:
- mvs_free(mvi);
- return NULL;
-}
-
-static u32 mvs_cr32(void __iomem *regs, u32 addr)
-{
- mw32(CMD_ADDR, addr);
- return mr32(CMD_DATA);
-}
-
-static void mvs_cw32(void __iomem *regs, u32 addr, u32 val)
-{
- mw32(CMD_ADDR, addr);
- mw32(CMD_DATA, val);
-}
-
-static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port)
-{
- void __iomem *regs = mvi->regs;
- return (port < 4)?mr32(P0_SER_CTLSTAT + port * 4):
- mr32(P4_SER_CTLSTAT + (port - 4) * 4);
-}
-
-static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val)
-{
- void __iomem *regs = mvi->regs;
- if (port < 4)
- mw32(P0_SER_CTLSTAT + port * 4, val);
- else
- mw32(P4_SER_CTLSTAT + (port - 4) * 4, val);
-}
-
-static u32 mvs_read_port(struct mvs_info *mvi, u32 off, u32 off2, u32 port)
-{
- void __iomem *regs = mvi->regs + off;
- void __iomem *regs2 = mvi->regs + off2;
- return (port < 4)?readl(regs + port * 8):
- readl(regs2 + (port - 4) * 8);
-}
-
-static void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2,
- u32 port, u32 val)
-{
- void __iomem *regs = mvi->regs + off;
- void __iomem *regs2 = mvi->regs + off2;
- if (port < 4)
- writel(val, regs + port * 8);
- else
- writel(val, regs2 + (port - 4) * 8);
-}
-
-static u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port)
-{
- return mvs_read_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port);
-}
-
-static void mvs_write_port_cfg_data(struct mvs_info *mvi, u32 port, u32 val)
-{
- mvs_write_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port, val);
-}
-
-static void mvs_write_port_cfg_addr(struct mvs_info *mvi, u32 port, u32 addr)
-{
- mvs_write_port(mvi, MVS_P0_CFG_ADDR, MVS_P4_CFG_ADDR, port, addr);
-}
-
-static u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port)
-{
- return mvs_read_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port);
-}
-
-static void mvs_write_port_vsr_data(struct mvs_info *mvi, u32 port, u32 val)
-{
- mvs_write_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port, val);
-}
-
-static void mvs_write_port_vsr_addr(struct mvs_info *mvi, u32 port, u32 addr)
-{
- mvs_write_port(mvi, MVS_P0_VSR_ADDR, MVS_P4_VSR_ADDR, port, addr);
-}
-
-static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port)
-{
- return mvs_read_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port);
-}
-
-static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val)
-{
- mvs_write_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port, val);
-}
-
-static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port)
-{
- return mvs_read_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port);
-}
-
-static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val)
-{
- mvs_write_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port, val);
-}
-
-static void __devinit mvs_phy_hacks(struct mvs_info *mvi)
-{
- void __iomem *regs = mvi->regs;
- u32 tmp;
-
- /* workaround for SATA R-ERR, to ignore phy glitch */
- tmp = mvs_cr32(regs, CMD_PHY_TIMER);
- tmp &= ~(1 << 9);
- tmp |= (1 << 10);
- mvs_cw32(regs, CMD_PHY_TIMER, tmp);
-
- /* enable retry 127 times */
- mvs_cw32(regs, CMD_SAS_CTL1, 0x7f7f);
-
- /* extend open frame timeout to max */
- tmp = mvs_cr32(regs, CMD_SAS_CTL0);
- tmp &= ~0xffff;
- tmp |= 0x3fff;
- mvs_cw32(regs, CMD_SAS_CTL0, tmp);
-
- /* workaround for WDTIMEOUT , set to 550 ms */
- mvs_cw32(regs, CMD_WD_TIMER, 0x86470);
-
- /* not to halt for different port op during wideport link change */
- mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d);
-
- /* workaround for Seagate disk not-found OOB sequence, recv
- * COMINIT before sending out COMWAKE */
- tmp = mvs_cr32(regs, CMD_PHY_MODE_21);
- tmp &= 0x0000ffff;
- tmp |= 0x00fa0000;
- mvs_cw32(regs, CMD_PHY_MODE_21, tmp);
-
- tmp = mvs_cr32(regs, CMD_PHY_TIMER);
- tmp &= 0x1fffffff;
- tmp |= (2U << 29); /* 8 ms retry */
- mvs_cw32(regs, CMD_PHY_TIMER, tmp);
-
- /* TEST - for phy decoding error, adjust voltage levels */
- mw32(P0_VSR_ADDR + 0, 0x8);
- mw32(P0_VSR_DATA + 0, 0x2F0);
-
- mw32(P0_VSR_ADDR + 8, 0x8);
- mw32(P0_VSR_DATA + 8, 0x2F0);
-
- mw32(P0_VSR_ADDR + 16, 0x8);
- mw32(P0_VSR_DATA + 16, 0x2F0);
-
- mw32(P0_VSR_ADDR + 24, 0x8);
- mw32(P0_VSR_DATA + 24, 0x2F0);
-
-}
-
-static void mvs_enable_xmt(struct mvs_info *mvi, int PhyId)
-{
- void __iomem *regs = mvi->regs;
- u32 tmp;
-
- tmp = mr32(PCS);
- if (mvi->chip->n_phy <= 4)
- tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT);
- else
- tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT2);
- mw32(PCS, tmp);
-}
-
-static void mvs_detect_porttype(struct mvs_info *mvi, int i)
-{
- void __iomem *regs = mvi->regs;
- u32 reg;
- struct mvs_phy *phy = &mvi->phy[i];
-
- /* TODO check & save device type */
- reg = mr32(GBL_PORT_TYPE);
-
- if (reg & MODE_SAS_SATA & (1 << i))
- phy->phy_type |= PORT_TYPE_SAS;
- else
- phy->phy_type |= PORT_TYPE_SATA;
-}
-
-static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
-{
- u32 *s = (u32 *) buf;
-
- if (!s)
- return NULL;
-
- mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
- s[3] = mvs_read_port_cfg_data(mvi, i);
-
- mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
- s[2] = mvs_read_port_cfg_data(mvi, i);
-
- mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
- s[1] = mvs_read_port_cfg_data(mvi, i);
-
- mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
- s[0] = mvs_read_port_cfg_data(mvi, i);
-
- return (void *)s;
-}
-
-static u32 mvs_is_sig_fis_received(u32 irq_status)
-{
- return irq_status & PHYEV_SIG_FIS;
-}
-
-static void mvs_update_wideport(struct mvs_info *mvi, int i)
-{
- struct mvs_phy *phy = &mvi->phy[i];
- struct mvs_port *port = phy->port;
- int j, no;
-
- for_each_phy(port->wide_port_phymap, no, j, mvi->chip->n_phy)
- if (no & 1) {
- mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
- mvs_write_port_cfg_data(mvi, no,
- port->wide_port_phymap);
- } else {
- mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
- mvs_write_port_cfg_data(mvi, no, 0);
- }
-}
-
-static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
-{
- u32 tmp;
- struct mvs_phy *phy = &mvi->phy[i];
- struct mvs_port *port = phy->port;;
-
- tmp = mvs_read_phy_ctl(mvi, i);
-
- if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
- if (!port)
- phy->phy_attached = 1;
- return tmp;
- }
-
- if (port) {
- if (phy->phy_type & PORT_TYPE_SAS) {
- port->wide_port_phymap &= ~(1U << i);
- if (!port->wide_port_phymap)
- port->port_attached = 0;
- mvs_update_wideport(mvi, i);
- } else if (phy->phy_type & PORT_TYPE_SATA)
- port->port_attached = 0;
- mvs_free_reg_set(mvi, phy->port);
- phy->port = NULL;
- phy->phy_attached = 0;
- phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
- }
- return 0;
-}
-
-static void mvs_update_phyinfo(struct mvs_info *mvi, int i,
- int get_st)
-{
- struct mvs_phy *phy = &mvi->phy[i];
- struct pci_dev *pdev = mvi->pdev;
- u32 tmp;
- u64 tmp64;
-
- mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
- phy->dev_info = mvs_read_port_cfg_data(mvi, i);
-
- mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
- phy->dev_sas_addr = (u64) mvs_read_port_cfg_data(mvi, i) << 32;
-
- mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
- phy->dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
-
- if (get_st) {
- phy->irq_status = mvs_read_port_irq_stat(mvi, i);
- phy->phy_status = mvs_is_phy_ready(mvi, i);
- }
-
- if (phy->phy_status) {
- u32 phy_st;
- struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
-
- mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
- phy_st = mvs_read_port_cfg_data(mvi, i);
-
- sas_phy->linkrate =
- (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
- PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
- phy->minimum_linkrate =
- (phy->phy_status &
- PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8;
- phy->maximum_linkrate =
- (phy->phy_status &
- PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12;
-
- if (phy->phy_type & PORT_TYPE_SAS) {
- /* Updated attached_sas_addr */
- mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
- phy->att_dev_sas_addr =
- (u64) mvs_read_port_cfg_data(mvi, i) << 32;
- mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
- phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
- mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
- phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
- phy->identify.device_type =
- phy->att_dev_info & PORT_DEV_TYPE_MASK;
-
- if (phy->identify.device_type == SAS_END_DEV)
- phy->identify.target_port_protocols =
- SAS_PROTOCOL_SSP;
- else if (phy->identify.device_type != NO_DEVICE)
- phy->identify.target_port_protocols =
- SAS_PROTOCOL_SMP;
- if (phy_st & PHY_OOB_DTCTD)
- sas_phy->oob_mode = SAS_OOB_MODE;
- phy->frame_rcvd_size =
- sizeof(struct sas_identify_frame);
- } else if (phy->phy_type & PORT_TYPE_SATA) {
- phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
- if (mvs_is_sig_fis_received(phy->irq_status)) {
- phy->att_dev_sas_addr = i; /* temp */
- if (phy_st & PHY_OOB_DTCTD)
- sas_phy->oob_mode = SATA_OOB_MODE;
- phy->frame_rcvd_size =
- sizeof(struct dev_to_host_fis);
- mvs_get_d2h_reg(mvi, i,
- (void *)sas_phy->frame_rcvd);
- } else {
- dev_printk(KERN_DEBUG, &pdev->dev,
- "No sig fis\n");
- phy->phy_type &= ~(PORT_TYPE_SATA);
- goto out_done;
- }
- }
- tmp64 = cpu_to_be64(phy->att_dev_sas_addr);
- memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE);
-
- dev_printk(KERN_DEBUG, &pdev->dev,
- "phy[%d] Get Attached Address 0x%llX ,"
- " SAS Address 0x%llX\n",
- i,
- (unsigned long long)phy->att_dev_sas_addr,
- (unsigned long long)phy->dev_sas_addr);
- dev_printk(KERN_DEBUG, &pdev->dev,
- "Rate = %x , type = %d\n",
- sas_phy->linkrate, phy->phy_type);
-
- /* workaround for HW phy decoding error on 1.5g disk drive */
- mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
- tmp = mvs_read_port_vsr_data(mvi, i);
- if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
- PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
- SAS_LINK_RATE_1_5_GBPS)
- tmp &= ~PHY_MODE6_LATECLK;
- else
- tmp |= PHY_MODE6_LATECLK;
- mvs_write_port_vsr_data(mvi, i, tmp);
-
- }
-out_done:
- if (get_st)
- mvs_write_port_irq_stat(mvi, i, phy->irq_status);
-}
-
-static void mvs_port_formed(struct asd_sas_phy *sas_phy)
-{
- struct sas_ha_struct *sas_ha = sas_phy->ha;
- struct mvs_info *mvi = sas_ha->lldd_ha;
- struct asd_sas_port *sas_port = sas_phy->port;
- struct mvs_phy *phy = sas_phy->lldd_phy;
- struct mvs_port *port = &mvi->port[sas_port->id];
- unsigned long flags;
-
- spin_lock_irqsave(&mvi->lock, flags);
- port->port_attached = 1;
- phy->port = port;
- port->taskfileset = MVS_ID_NOT_MAPPED;
- if (phy->phy_type & PORT_TYPE_SAS) {
- port->wide_port_phymap = sas_port->phy_mask;
- mvs_update_wideport(mvi, sas_phy->id);
- }
- spin_unlock_irqrestore(&mvi->lock, flags);
-}
-
-static int mvs_I_T_nexus_reset(struct domain_device *dev)
-{
- return TMF_RESP_FUNC_FAILED;
-}
-
-static int __devinit mvs_hw_init(struct mvs_info *mvi)
-{
- void __iomem *regs = mvi->regs;
- int i;
- u32 tmp, cctl;
-
- /* make sure interrupts are masked immediately (paranoia) */
- mw32(GBL_CTL, 0);
- tmp = mr32(GBL_CTL);
-
- /* Reset Controller */
- if (!(tmp & HBA_RST)) {
- if (mvi->flags & MVF_PHY_PWR_FIX) {
- pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
- tmp &= ~PCTL_PWR_ON;
- tmp |= PCTL_OFF;
- pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
-
- pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
- tmp &= ~PCTL_PWR_ON;
- tmp |= PCTL_OFF;
- pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
- }
-
- /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
- mw32_f(GBL_CTL, HBA_RST);
- }
-
- /* wait for reset to finish; timeout is just a guess */
- i = 1000;
- while (i-- > 0) {
- msleep(10);
-
- if (!(mr32(GBL_CTL) & HBA_RST))
- break;
- }
- if (mr32(GBL_CTL) & HBA_RST) {
- dev_printk(KERN_ERR, &mvi->pdev->dev, "HBA reset failed\n");
- return -EBUSY;
- }
-
- /* Init Chip */
- /* make sure RST is set; HBA_RST /should/ have done that for us */
- cctl = mr32(CTL);
- if (cctl & CCTL_RST)
- cctl &= ~CCTL_RST;
- else
- mw32_f(CTL, cctl | CCTL_RST);
-
- /* write to device control _AND_ device status register? - A.C. */
- pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
- tmp &= ~PRD_REQ_MASK;
- tmp |= PRD_REQ_SIZE;
- pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
-
- pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
- tmp |= PCTL_PWR_ON;
- tmp &= ~PCTL_OFF;
- pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
-
- pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
- tmp |= PCTL_PWR_ON;
- tmp &= ~PCTL_OFF;
- pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
-
- mw32_f(CTL, cctl);
-
- /* reset control */
- mw32(PCS, 0); /*MVS_PCS */
-
- mvs_phy_hacks(mvi);
-
- mw32(CMD_LIST_LO, mvi->slot_dma);
- mw32(CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
-
- mw32(RX_FIS_LO, mvi->rx_fis_dma);
- mw32(RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
-
- mw32(TX_CFG, MVS_CHIP_SLOT_SZ);
- mw32(TX_LO, mvi->tx_dma);
- mw32(TX_HI, (mvi->tx_dma >> 16) >> 16);
-
- mw32(RX_CFG, MVS_RX_RING_SZ);
- mw32(RX_LO, mvi->rx_dma);
- mw32(RX_HI, (mvi->rx_dma >> 16) >> 16);
-
- /* enable auto port detection */
- mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN);
- msleep(1100);
- /* init and reset phys */
- for (i = 0; i < mvi->chip->n_phy; i++) {
- u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]);
- u32 hi = be32_to_cpu(*(u32 *)&mvi->sas_addr[0]);
-
- mvs_detect_porttype(mvi, i);
-
- /* set phy local SAS address */
- mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
- mvs_write_port_cfg_data(mvi, i, lo);
- mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
- mvs_write_port_cfg_data(mvi, i, hi);
-
- /* reset phy */
- tmp = mvs_read_phy_ctl(mvi, i);
- tmp |= PHY_RST;
- mvs_write_phy_ctl(mvi, i, tmp);
- }
-
- msleep(100);
-
- for (i = 0; i < mvi->chip->n_phy; i++) {
- /* clear phy int status */
- tmp = mvs_read_port_irq_stat(mvi, i);
- tmp &= ~PHYEV_SIG_FIS;
- mvs_write_port_irq_stat(mvi, i, tmp);
-
- /* set phy int mask */
- tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
- PHYEV_ID_DONE | PHYEV_DEC_ERR;
- mvs_write_port_irq_mask(mvi, i, tmp);
-
- msleep(100);
- mvs_update_phyinfo(mvi, i, 1);
- mvs_enable_xmt(mvi, i);
- }
-
- /* FIXME: update wide port bitmaps */
-
- /* little endian for open address and command table, etc. */
- /* A.C.
- * it seems that ( from the spec ) turning on big-endian won't
- * do us any good on big-endian machines, need further confirmation
- */
- cctl = mr32(CTL);
- cctl |= CCTL_ENDIAN_CMD;
- cctl |= CCTL_ENDIAN_DATA;
- cctl &= ~CCTL_ENDIAN_OPEN;
- cctl |= CCTL_ENDIAN_RSP;
- mw32_f(CTL, cctl);
-
- /* reset CMD queue */
- tmp = mr32(PCS);
- tmp |= PCS_CMD_RST;
- mw32(PCS, tmp);
- /* interrupt coalescing may cause missing HW interrput in some case,
- * and the max count is 0x1ff, while our max slot is 0x200,
- * it will make count 0.
- */
- tmp = 0;
- mw32(INT_COAL, tmp);
-
- tmp = 0x100;
- mw32(INT_COAL_TMOUT, tmp);
-
- /* ladies and gentlemen, start your engines */
- mw32(TX_CFG, 0);
- mw32(TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
- mw32(RX_CFG, MVS_RX_RING_SZ | RX_EN);
- /* enable CMD/CMPL_Q/RESP mode */
- mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN);
-
- /* enable completion queue interrupt */
- tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS);
- mw32(INT_MASK, tmp);
-
- /* Enable SRS interrupt */
- mw32(INT_MASK_SRS, 0xFF);
- return 0;
-}
-
-static void __devinit mvs_print_info(struct mvs_info *mvi)
-{
- struct pci_dev *pdev = mvi->pdev;
- static int printed_version;
-
- if (!printed_version++)
- dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
-
- dev_printk(KERN_INFO, &pdev->dev, "%u phys, addr %llx\n",
- mvi->chip->n_phy, SAS_ADDR(mvi->sas_addr));
-}
-
-static int __devinit mvs_pci_init(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- int rc;
- struct mvs_info *mvi;
- irq_handler_t irq_handler = mvs_interrupt;
-
- rc = pci_enable_device(pdev);
- if (rc)
- return rc;
-
- pci_set_master(pdev);
-
- rc = pci_request_regions(pdev, DRV_NAME);
- if (rc)
- goto err_out_disable;
-
- rc = pci_go_64(pdev);
- if (rc)
- goto err_out_regions;
-
- mvi = mvs_alloc(pdev, ent);
- if (!mvi) {
- rc = -ENOMEM;
- goto err_out_regions;
- }
-
- rc = mvs_hw_init(mvi);
- if (rc)
- goto err_out_mvi;
-
-#ifndef MVS_DISABLE_MSI
- if (!pci_enable_msi(pdev)) {
- u32 tmp;
- void __iomem *regs = mvi->regs;
- mvi->flags |= MVF_MSI;
- irq_handler = mvs_msi_interrupt;
- tmp = mr32(PCS);
- mw32(PCS, tmp | PCS_SELF_CLEAR);
- }
-#endif
-
- rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, mvi);
- if (rc)
- goto err_out_msi;
-
- rc = scsi_add_host(mvi->shost, &pdev->dev);
- if (rc)
- goto err_out_irq;
-
- rc = sas_register_ha(&mvi->sas);
- if (rc)
- goto err_out_shost;
-
- pci_set_drvdata(pdev, mvi);
-
- mvs_print_info(mvi);
-
- mvs_hba_interrupt_enable(mvi);
-
- scsi_scan_host(mvi->shost);
-
- return 0;
-
-err_out_shost:
- scsi_remove_host(mvi->shost);
-err_out_irq:
- free_irq(pdev->irq, mvi);
-err_out_msi:
- if (mvi->flags |= MVF_MSI)
- pci_disable_msi(pdev);
-err_out_mvi:
- mvs_free(mvi);
-err_out_regions:
- pci_release_regions(pdev);
-err_out_disable:
- pci_disable_device(pdev);
- return rc;
-}
-
-static void __devexit mvs_pci_remove(struct pci_dev *pdev)
-{
- struct mvs_info *mvi = pci_get_drvdata(pdev);
-
- pci_set_drvdata(pdev, NULL);
-
- if (mvi) {
- sas_unregister_ha(&mvi->sas);
- mvs_hba_interrupt_disable(mvi);
- sas_remove_host(mvi->shost);
- scsi_remove_host(mvi->shost);
-
- free_irq(pdev->irq, mvi);
- if (mvi->flags & MVF_MSI)
- pci_disable_msi(pdev);
- mvs_free(mvi);
- pci_release_regions(pdev);
- }
- pci_disable_device(pdev);
-}
-
-static struct sas_domain_function_template mvs_transport_ops = {
- .lldd_execute_task = mvs_task_exec,
- .lldd_control_phy = mvs_phy_control,
- .lldd_abort_task = mvs_task_abort,
- .lldd_port_formed = mvs_port_formed,
- .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset,
-};
-
-static struct pci_device_id __devinitdata mvs_pci_table[] = {
- { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
- { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
- {
- .vendor = PCI_VENDOR_ID_MARVELL,
- .device = 0x6440,
- .subvendor = PCI_ANY_ID,
- .subdevice = 0x6480,
- .class = 0,
- .class_mask = 0,
- .driver_data = chip_6480,
- },
- { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
- { PCI_VDEVICE(MARVELL, 0x6480), chip_6480 },
-
- { } /* terminate list */
-};
-
-static struct pci_driver mvs_pci_driver = {
- .name = DRV_NAME,
- .id_table = mvs_pci_table,
- .probe = mvs_pci_init,
- .remove = __devexit_p(mvs_pci_remove),
-};
-
-static int __init mvs_init(void)
-{
- int rc;
-
- mvs_stt = sas_domain_attach_transport(&mvs_transport_ops);
- if (!mvs_stt)
- return -ENOMEM;
-
- rc = pci_register_driver(&mvs_pci_driver);
- if (rc)
- goto err_out;
-
- return 0;
-
-err_out:
- sas_release_transport(mvs_stt);
- return rc;
-}
-
-static void __exit mvs_exit(void)
-{
- pci_unregister_driver(&mvs_pci_driver);
- sas_release_transport(mvs_stt);
-}
-
-module_init(mvs_init);
-module_exit(mvs_exit);
-
-MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
-MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
-MODULE_VERSION(DRV_VERSION);
-MODULE_LICENSE("GPL");
-MODULE_DEVICE_TABLE(pci, mvs_pci_table);
diff --git a/drivers/scsi/mvsas/Kconfig b/drivers/scsi/mvsas/Kconfig
new file mode 100644
index 00000000000..6de7af27e50
--- /dev/null
+++ b/drivers/scsi/mvsas/Kconfig
@@ -0,0 +1,42 @@
+#
+# Kernel configuration file for 88SE64XX/88SE94XX SAS/SATA driver.
+#
+# Copyright 2007 Red Hat, Inc.
+# Copyright 2008 Marvell. <kewei@marvell.com>
+#
+# This file is licensed under GPLv2.
+#
+# This file is part of the 88SE64XX/88SE94XX driver.
+#
+# The 88SE64XX/88SE94XX driver is free software; you can redistribute
+# it and/or modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; version 2 of the
+# License.
+#
+# The 88SE64XX/88SE94XX driver is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with 88SE64XX/88SE94XX Driver; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+#
+#
+
+config SCSI_MVSAS
+ tristate "Marvell 88SE64XX/88SE94XX SAS/SATA support"
+ depends on PCI
+ select SCSI_SAS_LIBSAS
+ select FW_LOADER
+ help
+ This driver supports Marvell's SAS/SATA 3Gb/s PCI-E 88SE64XX and 6Gb/s
+ PCI-E 88SE94XX chip based host adapters.
+
+config SCSI_MVSAS_DEBUG
+ bool "Compile in debug mode"
+ default y
+ depends on SCSI_MVSAS
+ help
+ Compiles the 88SE64XX/88SE94XX driver in debug mode. In debug mode,
+ the driver prints some messages to the console.
diff --git a/drivers/scsi/mvsas/Makefile b/drivers/scsi/mvsas/Makefile
new file mode 100644
index 00000000000..52ac4264677
--- /dev/null
+++ b/drivers/scsi/mvsas/Makefile
@@ -0,0 +1,32 @@
+#
+# Makefile for Marvell 88SE64xx/88SE84xx SAS/SATA driver.
+#
+# Copyright 2007 Red Hat, Inc.
+# Copyright 2008 Marvell. <kewei@marvell.com>
+#
+# This file is licensed under GPLv2.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 of the
+# License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+# USA
+
+ifeq ($(CONFIG_SCSI_MVSAS_DEBUG),y)
+ EXTRA_CFLAGS += -DMV_DEBUG
+endif
+
+obj-$(CONFIG_SCSI_MVSAS) += mvsas.o
+mvsas-y += mv_init.o \
+ mv_sas.o \
+ mv_64xx.o \
+ mv_94xx.o
diff --git a/drivers/scsi/mvsas/mv_64xx.c b/drivers/scsi/mvsas/mv_64xx.c
new file mode 100644
index 00000000000..10a5077b6ae
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_64xx.c
@@ -0,0 +1,793 @@
+/*
+ * Marvell 88SE64xx hardware specific
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#include "mv_sas.h"
+#include "mv_64xx.h"
+#include "mv_chips.h"
+
+static void mvs_64xx_detect_porttype(struct mvs_info *mvi, int i)
+{
+ void __iomem *regs = mvi->regs;
+ u32 reg;
+ struct mvs_phy *phy = &mvi->phy[i];
+
+ /* TODO check & save device type */
+ reg = mr32(MVS_GBL_PORT_TYPE);
+ phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
+ if (reg & MODE_SAS_SATA & (1 << i))
+ phy->phy_type |= PORT_TYPE_SAS;
+ else
+ phy->phy_type |= PORT_TYPE_SATA;
+}
+
+static void __devinit mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id)
+{
+ void __iomem *regs = mvi->regs;
+ u32 tmp;
+
+ tmp = mr32(MVS_PCS);
+ if (mvi->chip->n_phy <= 4)
+ tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT);
+ else
+ tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
+ mw32(MVS_PCS, tmp);
+}
+
+static void __devinit mvs_64xx_phy_hacks(struct mvs_info *mvi)
+{
+ void __iomem *regs = mvi->regs;
+
+ mvs_phy_hacks(mvi);
+
+ if (!(mvi->flags & MVF_FLAG_SOC)) {
+ /* TEST - for phy decoding error, adjust voltage levels */
+ mw32(MVS_P0_VSR_ADDR + 0, 0x8);
+ mw32(MVS_P0_VSR_DATA + 0, 0x2F0);
+
+ mw32(MVS_P0_VSR_ADDR + 8, 0x8);
+ mw32(MVS_P0_VSR_DATA + 8, 0x2F0);
+
+ mw32(MVS_P0_VSR_ADDR + 16, 0x8);
+ mw32(MVS_P0_VSR_DATA + 16, 0x2F0);
+
+ mw32(MVS_P0_VSR_ADDR + 24, 0x8);
+ mw32(MVS_P0_VSR_DATA + 24, 0x2F0);
+ } else {
+ int i;
+ /* disable auto port detection */
+ mw32(MVS_GBL_PORT_TYPE, 0);
+ for (i = 0; i < mvi->chip->n_phy; i++) {
+ mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE7);
+ mvs_write_port_vsr_data(mvi, i, 0x90000000);
+ mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE9);
+ mvs_write_port_vsr_data(mvi, i, 0x50f2);
+ mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE11);
+ mvs_write_port_vsr_data(mvi, i, 0x0e);
+ }
+ }
+}
+
+static void mvs_64xx_stp_reset(struct mvs_info *mvi, u32 phy_id)
+{
+ void __iomem *regs = mvi->regs;
+ u32 reg, tmp;
+
+ if (!(mvi->flags & MVF_FLAG_SOC)) {
+ if (phy_id < 4)
+ pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &reg);
+ else
+ pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &reg);
+
+ } else
+ reg = mr32(MVS_PHY_CTL);
+
+ tmp = reg;
+ if (phy_id < 4)
+ tmp |= (1U << phy_id) << PCTL_LINK_OFFS;
+ else
+ tmp |= (1U << (phy_id - 4)) << PCTL_LINK_OFFS;
+
+ if (!(mvi->flags & MVF_FLAG_SOC)) {
+ if (phy_id < 4) {
+ pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
+ mdelay(10);
+ pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, reg);
+ } else {
+ pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
+ mdelay(10);
+ pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, reg);
+ }
+ } else {
+ mw32(MVS_PHY_CTL, tmp);
+ mdelay(10);
+ mw32(MVS_PHY_CTL, reg);
+ }
+}
+
+static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
+{
+ u32 tmp;
+ tmp = mvs_read_port_irq_stat(mvi, phy_id);
+ tmp &= ~PHYEV_RDY_CH;
+ mvs_write_port_irq_stat(mvi, phy_id, tmp);
+ tmp = mvs_read_phy_ctl(mvi, phy_id);
+ if (hard)
+ tmp |= PHY_RST_HARD;
+ else
+ tmp |= PHY_RST;
+ mvs_write_phy_ctl(mvi, phy_id, tmp);
+ if (hard) {
+ do {
+ tmp = mvs_read_phy_ctl(mvi, phy_id);
+ } while (tmp & PHY_RST_HARD);
+ }
+}
+
+static int __devinit mvs_64xx_chip_reset(struct mvs_info *mvi)
+{
+ void __iomem *regs = mvi->regs;
+ u32 tmp;
+ int i;
+
+ /* make sure interrupts are masked immediately (paranoia) */
+ mw32(MVS_GBL_CTL, 0);
+ tmp = mr32(MVS_GBL_CTL);
+
+ /* Reset Controller */
+ if (!(tmp & HBA_RST)) {
+ if (mvi->flags & MVF_PHY_PWR_FIX) {
+ pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
+ tmp &= ~PCTL_PWR_OFF;
+ tmp |= PCTL_PHY_DSBL;
+ pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
+
+ pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
+ tmp &= ~PCTL_PWR_OFF;
+ tmp |= PCTL_PHY_DSBL;
+ pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
+ }
+ }
+
+ /* make sure interrupts are masked immediately (paranoia) */
+ mw32(MVS_GBL_CTL, 0);
+ tmp = mr32(MVS_GBL_CTL);
+
+ /* Reset Controller */
+ if (!(tmp & HBA_RST)) {
+ /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
+ mw32_f(MVS_GBL_CTL, HBA_RST);
+ }
+
+ /* wait for reset to finish; timeout is just a guess */
+ i = 1000;
+ while (i-- > 0) {
+ msleep(10);
+
+ if (!(mr32(MVS_GBL_CTL) & HBA_RST))
+ break;
+ }
+ if (mr32(MVS_GBL_CTL) & HBA_RST) {
+ dev_printk(KERN_ERR, mvi->dev, "HBA reset failed\n");
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static void mvs_64xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
+{
+ void __iomem *regs = mvi->regs;
+ u32 tmp;
+ if (!(mvi->flags & MVF_FLAG_SOC)) {
+ u32 offs;
+ if (phy_id < 4)
+ offs = PCR_PHY_CTL;
+ else {
+ offs = PCR_PHY_CTL2;
+ phy_id -= 4;
+ }
+ pci_read_config_dword(mvi->pdev, offs, &tmp);
+ tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id);
+ pci_write_config_dword(mvi->pdev, offs, tmp);
+ } else {
+ tmp = mr32(MVS_PHY_CTL);
+ tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id);
+ mw32(MVS_PHY_CTL, tmp);
+ }
+}
+
+static void mvs_64xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
+{
+ void __iomem *regs = mvi->regs;
+ u32 tmp;
+ if (!(mvi->flags & MVF_FLAG_SOC)) {
+ u32 offs;
+ if (phy_id < 4)
+ offs = PCR_PHY_CTL;
+ else {
+ offs = PCR_PHY_CTL2;
+ phy_id -= 4;
+ }
+ pci_read_config_dword(mvi->pdev, offs, &tmp);
+ tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id));
+ pci_write_config_dword(mvi->pdev, offs, tmp);
+ } else {
+ tmp = mr32(MVS_PHY_CTL);
+ tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id));
+ mw32(MVS_PHY_CTL, tmp);
+ }
+}
+
+static int __devinit mvs_64xx_init(struct mvs_info *mvi)
+{
+ void __iomem *regs = mvi->regs;
+ int i;
+ u32 tmp, cctl;
+
+ if (mvi->pdev && mvi->pdev->revision == 0)
+ mvi->flags |= MVF_PHY_PWR_FIX;
+ if (!(mvi->flags & MVF_FLAG_SOC)) {
+ mvs_show_pcie_usage(mvi);
+ tmp = mvs_64xx_chip_reset(mvi);
+ if (tmp)
+ return tmp;
+ } else {
+ tmp = mr32(MVS_PHY_CTL);
+ tmp &= ~PCTL_PWR_OFF;
+ tmp |= PCTL_PHY_DSBL;
+ mw32(MVS_PHY_CTL, tmp);
+ }
+
+ /* Init Chip */
+ /* make sure RST is set; HBA_RST /should/ have done that for us */
+ cctl = mr32(MVS_CTL) & 0xFFFF;
+ if (cctl & CCTL_RST)
+ cctl &= ~CCTL_RST;
+ else
+ mw32_f(MVS_CTL, cctl | CCTL_RST);
+
+ if (!(mvi->flags & MVF_FLAG_SOC)) {
+ /* write to device control _AND_ device status register */
+ pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
+ tmp &= ~PRD_REQ_MASK;
+ tmp |= PRD_REQ_SIZE;
+ pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
+
+ pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
+ tmp &= ~PCTL_PWR_OFF;
+ tmp &= ~PCTL_PHY_DSBL;
+ pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
+
+ pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
+ tmp &= PCTL_PWR_OFF;
+ tmp &= ~PCTL_PHY_DSBL;
+ pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
+ } else {
+ tmp = mr32(MVS_PHY_CTL);
+ tmp &= ~PCTL_PWR_OFF;
+ tmp |= PCTL_COM_ON;
+ tmp &= ~PCTL_PHY_DSBL;
+ tmp |= PCTL_LINK_RST;
+ mw32(MVS_PHY_CTL, tmp);
+ msleep(100);
+ tmp &= ~PCTL_LINK_RST;
+ mw32(MVS_PHY_CTL, tmp);
+ msleep(100);
+ }
+
+ /* reset control */
+ mw32(MVS_PCS, 0); /* MVS_PCS */
+ /* init phys */
+ mvs_64xx_phy_hacks(mvi);
+
+ /* enable auto port detection */
+ mw32(MVS_GBL_PORT_TYPE, MODE_AUTO_DET_EN);
+
+ mw32(MVS_CMD_LIST_LO, mvi->slot_dma);
+ mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
+
+ mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma);
+ mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
+
+ mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ);
+ mw32(MVS_TX_LO, mvi->tx_dma);
+ mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16);
+
+ mw32(MVS_RX_CFG, MVS_RX_RING_SZ);
+ mw32(MVS_RX_LO, mvi->rx_dma);
+ mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16);
+
+ for (i = 0; i < mvi->chip->n_phy; i++) {
+ /* set phy local SAS address */
+ /* should set little endian SAS address to 64xx chip */
+ mvs_set_sas_addr(mvi, i, PHYR_ADDR_LO, PHYR_ADDR_HI,
+ cpu_to_be64(mvi->phy[i].dev_sas_addr));
+
+ mvs_64xx_enable_xmt(mvi, i);
+
+ mvs_64xx_phy_reset(mvi, i, 1);
+ msleep(500);
+ mvs_64xx_detect_porttype(mvi, i);
+ }
+ if (mvi->flags & MVF_FLAG_SOC) {
+ /* set select registers */
+ writel(0x0E008000, regs + 0x000);
+ writel(0x59000008, regs + 0x004);
+ writel(0x20, regs + 0x008);
+ writel(0x20, regs + 0x00c);
+ writel(0x20, regs + 0x010);
+ writel(0x20, regs + 0x014);
+ writel(0x20, regs + 0x018);
+ writel(0x20, regs + 0x01c);
+ }
+ for (i = 0; i < mvi->chip->n_phy; i++) {
+ /* clear phy int status */
+ tmp = mvs_read_port_irq_stat(mvi, i);
+ tmp &= ~PHYEV_SIG_FIS;
+ mvs_write_port_irq_stat(mvi, i, tmp);
+
+ /* set phy int mask */
+ tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
+ PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR |
+ PHYEV_DEC_ERR;
+ mvs_write_port_irq_mask(mvi, i, tmp);
+
+ msleep(100);
+ mvs_update_phyinfo(mvi, i, 1);
+ }
+
+ /* FIXME: update wide port bitmaps */
+
+ /* little endian for open address and command table, etc. */
+ /*
+ * it seems that ( from the spec ) turning on big-endian won't
+ * do us any good on big-endian machines, need further confirmation
+ */
+ cctl = mr32(MVS_CTL);
+ cctl |= CCTL_ENDIAN_CMD;
+ cctl |= CCTL_ENDIAN_DATA;
+ cctl &= ~CCTL_ENDIAN_OPEN;
+ cctl |= CCTL_ENDIAN_RSP;
+ mw32_f(MVS_CTL, cctl);
+
+ /* reset CMD queue */
+ tmp = mr32(MVS_PCS);
+ tmp |= PCS_CMD_RST;
+ mw32(MVS_PCS, tmp);
+ /* interrupt coalescing may cause missing HW interrput in some case,
+ * and the max count is 0x1ff, while our max slot is 0x200,
+ * it will make count 0.
+ */
+ tmp = 0;
+ mw32(MVS_INT_COAL, tmp);
+
+ tmp = 0x100;
+ mw32(MVS_INT_COAL_TMOUT, tmp);
+
+ /* ladies and gentlemen, start your engines */
+ mw32(MVS_TX_CFG, 0);
+ mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
+ mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN);
+ /* enable CMD/CMPL_Q/RESP mode */
+ mw32(MVS_PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN |
+ PCS_CMD_EN | PCS_CMD_STOP_ERR);
+
+ /* enable completion queue interrupt */
+ tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
+ CINT_DMA_PCIE);
+
+ mw32(MVS_INT_MASK, tmp);
+
+ /* Enable SRS interrupt */
+ mw32(MVS_INT_MASK_SRS_0, 0xFFFF);
+
+ return 0;
+}
+
+static int mvs_64xx_ioremap(struct mvs_info *mvi)
+{
+ if (!mvs_ioremap(mvi, 4, 2))
+ return 0;
+ return -1;
+}
+
+static void mvs_64xx_iounmap(struct mvs_info *mvi)
+{
+ mvs_iounmap(mvi->regs);
+ mvs_iounmap(mvi->regs_ex);
+}
+
+static void mvs_64xx_interrupt_enable(struct mvs_info *mvi)
+{
+ void __iomem *regs = mvi->regs;
+ u32 tmp;
+
+ tmp = mr32(MVS_GBL_CTL);
+ mw32(MVS_GBL_CTL, tmp | INT_EN);
+}
+
+static void mvs_64xx_interrupt_disable(struct mvs_info *mvi)
+{
+ void __iomem *regs = mvi->regs;
+ u32 tmp;
+
+ tmp = mr32(MVS_GBL_CTL);
+ mw32(MVS_GBL_CTL, tmp & ~INT_EN);
+}
+
+static u32 mvs_64xx_isr_status(struct mvs_info *mvi, int irq)
+{
+ void __iomem *regs = mvi->regs;
+ u32 stat;
+
+ if (!(mvi->flags & MVF_FLAG_SOC)) {
+ stat = mr32(MVS_GBL_INT_STAT);
+
+ if (stat == 0 || stat == 0xffffffff)
+ return 0;
+ } else
+ stat = 1;
+ return stat;
+}
+
+static irqreturn_t mvs_64xx_isr(struct mvs_info *mvi, int irq, u32 stat)
+{
+ void __iomem *regs = mvi->regs;
+
+ /* clear CMD_CMPLT ASAP */
+ mw32_f(MVS_INT_STAT, CINT_DONE);
+#ifndef MVS_USE_TASKLET
+ spin_lock(&mvi->lock);
+#endif
+ mvs_int_full(mvi);
+#ifndef MVS_USE_TASKLET
+ spin_unlock(&mvi->lock);
+#endif
+ return IRQ_HANDLED;
+}
+
+static void mvs_64xx_command_active(struct mvs_info *mvi, u32 slot_idx)
+{
+ u32 tmp;
+ mvs_cw32(mvi, 0x40 + (slot_idx >> 3), 1 << (slot_idx % 32));
+ mvs_cw32(mvi, 0x00 + (slot_idx >> 3), 1 << (slot_idx % 32));
+ do {
+ tmp = mvs_cr32(mvi, 0x00 + (slot_idx >> 3));
+ } while (tmp & 1 << (slot_idx % 32));
+ do {
+ tmp = mvs_cr32(mvi, 0x40 + (slot_idx >> 3));
+ } while (tmp & 1 << (slot_idx % 32));
+}
+
+static void mvs_64xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
+ u32 tfs)
+{
+ void __iomem *regs = mvi->regs;
+ u32 tmp;
+
+ if (type == PORT_TYPE_SATA) {
+ tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs);
+ mw32(MVS_INT_STAT_SRS_0, tmp);
+ }
+ mw32(MVS_INT_STAT, CINT_CI_STOP);
+ tmp = mr32(MVS_PCS) | 0xFF00;
+ mw32(MVS_PCS, tmp);
+}
+
+static void mvs_64xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
+{
+ void __iomem *regs = mvi->regs;
+ u32 tmp, offs;
+
+ if (*tfs == MVS_ID_NOT_MAPPED)
+ return;
+
+ offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT);
+ if (*tfs < 16) {
+ tmp = mr32(MVS_PCS);
+ mw32(MVS_PCS, tmp & ~offs);
+ } else {
+ tmp = mr32(MVS_CTL);
+ mw32(MVS_CTL, tmp & ~offs);
+ }
+
+ tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << *tfs);
+ if (tmp)
+ mw32(MVS_INT_STAT_SRS_0, tmp);
+
+ *tfs = MVS_ID_NOT_MAPPED;
+ return;
+}
+
+static u8 mvs_64xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
+{
+ int i;
+ u32 tmp, offs;
+ void __iomem *regs = mvi->regs;
+
+ if (*tfs != MVS_ID_NOT_MAPPED)
+ return 0;
+
+ tmp = mr32(MVS_PCS);
+
+ for (i = 0; i < mvi->chip->srs_sz; i++) {
+ if (i == 16)
+ tmp = mr32(MVS_CTL);
+ offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT);
+ if (!(tmp & offs)) {
+ *tfs = i;
+
+ if (i < 16)
+ mw32(MVS_PCS, tmp | offs);
+ else
+ mw32(MVS_CTL, tmp | offs);
+ tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << i);
+ if (tmp)
+ mw32(MVS_INT_STAT_SRS_0, tmp);
+ return 0;
+ }
+ }
+ return MVS_ID_NOT_MAPPED;
+}
+
+void mvs_64xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
+{
+ int i;
+ struct scatterlist *sg;
+ struct mvs_prd *buf_prd = prd;
+ for_each_sg(scatter, sg, nr, i) {
+ buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
+ buf_prd->len = cpu_to_le32(sg_dma_len(sg));
+ buf_prd++;
+ }
+}
+
+static int mvs_64xx_oob_done(struct mvs_info *mvi, int i)
+{
+ u32 phy_st;
+ mvs_write_port_cfg_addr(mvi, i,
+ PHYR_PHY_STAT);
+ phy_st = mvs_read_port_cfg_data(mvi, i);
+ if (phy_st & PHY_OOB_DTCTD)
+ return 1;
+ return 0;
+}
+
+static void mvs_64xx_fix_phy_info(struct mvs_info *mvi, int i,
+ struct sas_identify_frame *id)
+
+{
+ struct mvs_phy *phy = &mvi->phy[i];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+
+ sas_phy->linkrate =
+ (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
+ PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
+
+ phy->minimum_linkrate =
+ (phy->phy_status &
+ PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8;
+ phy->maximum_linkrate =
+ (phy->phy_status &
+ PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12;
+
+ mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
+ phy->dev_info = mvs_read_port_cfg_data(mvi, i);
+
+ mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
+ phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
+
+ mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
+ phy->att_dev_sas_addr =
+ (u64) mvs_read_port_cfg_data(mvi, i) << 32;
+ mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
+ phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
+ phy->att_dev_sas_addr = SAS_ADDR(&phy->att_dev_sas_addr);
+}
+
+static void mvs_64xx_phy_work_around(struct mvs_info *mvi, int i)
+{
+ u32 tmp;
+ struct mvs_phy *phy = &mvi->phy[i];
+ /* workaround for HW phy decoding error on 1.5g disk drive */
+ mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
+ tmp = mvs_read_port_vsr_data(mvi, i);
+ if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
+ PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
+ SAS_LINK_RATE_1_5_GBPS)
+ tmp &= ~PHY_MODE6_LATECLK;
+ else
+ tmp |= PHY_MODE6_LATECLK;
+ mvs_write_port_vsr_data(mvi, i, tmp);
+}
+
+void mvs_64xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
+ struct sas_phy_linkrates *rates)
+{
+ u32 lrmin = 0, lrmax = 0;
+ u32 tmp;
+
+ tmp = mvs_read_phy_ctl(mvi, phy_id);
+ lrmin = (rates->minimum_linkrate << 8);
+ lrmax = (rates->maximum_linkrate << 12);
+
+ if (lrmin) {
+ tmp &= ~(0xf << 8);
+ tmp |= lrmin;
+ }
+ if (lrmax) {
+ tmp &= ~(0xf << 12);
+ tmp |= lrmax;
+ }
+ mvs_write_phy_ctl(mvi, phy_id, tmp);
+ mvs_64xx_phy_reset(mvi, phy_id, 1);
+}
+
+static void mvs_64xx_clear_active_cmds(struct mvs_info *mvi)
+{
+ u32 tmp;
+ void __iomem *regs = mvi->regs;
+ tmp = mr32(MVS_PCS);
+ mw32(MVS_PCS, tmp & 0xFFFF);
+ mw32(MVS_PCS, tmp);
+ tmp = mr32(MVS_CTL);
+ mw32(MVS_CTL, tmp & 0xFFFF);
+ mw32(MVS_CTL, tmp);
+}
+
+
+u32 mvs_64xx_spi_read_data(struct mvs_info *mvi)
+{
+ void __iomem *regs = mvi->regs_ex;
+ return ior32(SPI_DATA_REG_64XX);
+}
+
+void mvs_64xx_spi_write_data(struct mvs_info *mvi, u32 data)
+{
+ void __iomem *regs = mvi->regs_ex;
+ iow32(SPI_DATA_REG_64XX, data);
+}
+
+
+int mvs_64xx_spi_buildcmd(struct mvs_info *mvi,
+ u32 *dwCmd,
+ u8 cmd,
+ u8 read,
+ u8 length,
+ u32 addr
+ )
+{
+ u32 dwTmp;
+
+ dwTmp = ((u32)cmd << 24) | ((u32)length << 19);
+ if (read)
+ dwTmp |= 1U<<23;
+
+ if (addr != MV_MAX_U32) {
+ dwTmp |= 1U<<22;
+ dwTmp |= (addr & 0x0003FFFF);
+ }
+
+ *dwCmd = dwTmp;
+ return 0;
+}
+
+
+int mvs_64xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
+{
+ void __iomem *regs = mvi->regs_ex;
+ int retry;
+
+ for (retry = 0; retry < 1; retry++) {
+ iow32(SPI_CTRL_REG_64XX, SPI_CTRL_VENDOR_ENABLE);
+ iow32(SPI_CMD_REG_64XX, cmd);
+ iow32(SPI_CTRL_REG_64XX,
+ SPI_CTRL_VENDOR_ENABLE | SPI_CTRL_SPISTART);
+ }
+
+ return 0;
+}
+
+int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
+{
+ void __iomem *regs = mvi->regs_ex;
+ u32 i, dwTmp;
+
+ for (i = 0; i < timeout; i++) {
+ dwTmp = ior32(SPI_CTRL_REG_64XX);
+ if (!(dwTmp & SPI_CTRL_SPISTART))
+ return 0;
+ msleep(10);
+ }
+
+ return -1;
+}
+
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+void mvs_64xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
+{
+ int i;
+ struct mvs_prd *buf_prd = prd;
+ buf_prd += from;
+ for (i = 0; i < MAX_SG_ENTRY - from; i++) {
+ buf_prd->addr = cpu_to_le64(buf_dma);
+ buf_prd->len = cpu_to_le32(buf_len);
+ ++buf_prd;
+ }
+}
+#endif
+
+const struct mvs_dispatch mvs_64xx_dispatch = {
+ "mv64xx",
+ mvs_64xx_init,
+ NULL,
+ mvs_64xx_ioremap,
+ mvs_64xx_iounmap,
+ mvs_64xx_isr,
+ mvs_64xx_isr_status,
+ mvs_64xx_interrupt_enable,
+ mvs_64xx_interrupt_disable,
+ mvs_read_phy_ctl,
+ mvs_write_phy_ctl,
+ mvs_read_port_cfg_data,
+ mvs_write_port_cfg_data,
+ mvs_write_port_cfg_addr,
+ mvs_read_port_vsr_data,
+ mvs_write_port_vsr_data,
+ mvs_write_port_vsr_addr,
+ mvs_read_port_irq_stat,
+ mvs_write_port_irq_stat,
+ mvs_read_port_irq_mask,
+ mvs_write_port_irq_mask,
+ mvs_get_sas_addr,
+ mvs_64xx_command_active,
+ mvs_64xx_issue_stop,
+ mvs_start_delivery,
+ mvs_rx_update,
+ mvs_int_full,
+ mvs_64xx_assign_reg_set,
+ mvs_64xx_free_reg_set,
+ mvs_get_prd_size,
+ mvs_get_prd_count,
+ mvs_64xx_make_prd,
+ mvs_64xx_detect_porttype,
+ mvs_64xx_oob_done,
+ mvs_64xx_fix_phy_info,
+ mvs_64xx_phy_work_around,
+ mvs_64xx_phy_set_link_rate,
+ mvs_hw_max_link_rate,
+ mvs_64xx_phy_disable,
+ mvs_64xx_phy_enable,
+ mvs_64xx_phy_reset,
+ mvs_64xx_stp_reset,
+ mvs_64xx_clear_active_cmds,
+ mvs_64xx_spi_read_data,
+ mvs_64xx_spi_write_data,
+ mvs_64xx_spi_buildcmd,
+ mvs_64xx_spi_issuecmd,
+ mvs_64xx_spi_waitdataready,
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+ mvs_64xx_fix_dma,
+#endif
+};
+
diff --git a/drivers/scsi/mvsas/mv_64xx.h b/drivers/scsi/mvsas/mv_64xx.h
new file mode 100644
index 00000000000..42e947d9795
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_64xx.h
@@ -0,0 +1,151 @@
+/*
+ * Marvell 88SE64xx hardware specific head file
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#ifndef _MVS64XX_REG_H_
+#define _MVS64XX_REG_H_
+
+#include <linux/types.h>
+
+#define MAX_LINK_RATE SAS_LINK_RATE_3_0_GBPS
+
+/* enhanced mode registers (BAR4) */
+enum hw_registers {
+ MVS_GBL_CTL = 0x04, /* global control */
+ MVS_GBL_INT_STAT = 0x08, /* global irq status */
+ MVS_GBL_PI = 0x0C, /* ports implemented bitmask */
+
+ MVS_PHY_CTL = 0x40, /* SOC PHY Control */
+ MVS_PORTS_IMP = 0x9C, /* SOC Port Implemented */
+
+ MVS_GBL_PORT_TYPE = 0xa0, /* port type */
+
+ MVS_CTL = 0x100, /* SAS/SATA port configuration */
+ MVS_PCS = 0x104, /* SAS/SATA port control/status */
+ MVS_CMD_LIST_LO = 0x108, /* cmd list addr */
+ MVS_CMD_LIST_HI = 0x10C,
+ MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */
+ MVS_RX_FIS_HI = 0x114,
+
+ MVS_TX_CFG = 0x120, /* TX configuration */
+ MVS_TX_LO = 0x124, /* TX (delivery) ring addr */
+ MVS_TX_HI = 0x128,
+
+ MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */
+ MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */
+ MVS_RX_CFG = 0x134, /* RX configuration */
+ MVS_RX_LO = 0x138, /* RX (completion) ring addr */
+ MVS_RX_HI = 0x13C,
+ MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */
+
+ MVS_INT_COAL = 0x148, /* Int coalescing config */
+ MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */
+ MVS_INT_STAT = 0x150, /* Central int status */
+ MVS_INT_MASK = 0x154, /* Central int enable */
+ MVS_INT_STAT_SRS_0 = 0x158, /* SATA register set status */
+ MVS_INT_MASK_SRS_0 = 0x15C,
+
+ /* ports 1-3 follow after this */
+ MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */
+ MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */
+ /* ports 5-7 follow after this */
+ MVS_P4_INT_STAT = 0x200, /* Port4 interrupt status */
+ MVS_P4_INT_MASK = 0x204, /* Port4 interrupt enable mask */
+
+ /* ports 1-3 follow after this */
+ MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */
+ /* ports 5-7 follow after this */
+ MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */
+
+ MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */
+ MVS_CMD_DATA = 0x1BC, /* Command register port (data) */
+
+ /* ports 1-3 follow after this */
+ MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */
+ MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */
+ /* ports 5-7 follow after this */
+ MVS_P4_CFG_ADDR = 0x230, /* Port4 config address */
+ MVS_P4_CFG_DATA = 0x234, /* Port4 config data */
+
+ /* ports 1-3 follow after this */
+ MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */
+ MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */
+ /* ports 5-7 follow after this */
+ MVS_P4_VSR_ADDR = 0x250, /* port4 VSR addr */
+ MVS_P4_VSR_DATA = 0x254, /* port4 VSR data */
+};
+
+enum pci_cfg_registers {
+ PCR_PHY_CTL = 0x40,
+ PCR_PHY_CTL2 = 0x90,
+ PCR_DEV_CTRL = 0xE8,
+ PCR_LINK_STAT = 0xF2,
+};
+
+/* SAS/SATA Vendor Specific Port Registers */
+enum sas_sata_vsp_regs {
+ VSR_PHY_STAT = 0x00, /* Phy Status */
+ VSR_PHY_MODE1 = 0x01, /* phy tx */
+ VSR_PHY_MODE2 = 0x02, /* tx scc */
+ VSR_PHY_MODE3 = 0x03, /* pll */
+ VSR_PHY_MODE4 = 0x04, /* VCO */
+ VSR_PHY_MODE5 = 0x05, /* Rx */
+ VSR_PHY_MODE6 = 0x06, /* CDR */
+ VSR_PHY_MODE7 = 0x07, /* Impedance */
+ VSR_PHY_MODE8 = 0x08, /* Voltage */
+ VSR_PHY_MODE9 = 0x09, /* Test */
+ VSR_PHY_MODE10 = 0x0A, /* Power */
+ VSR_PHY_MODE11 = 0x0B, /* Phy Mode */
+ VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */
+ VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */
+};
+
+enum chip_register_bits {
+ PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8),
+ PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12),
+ PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
+ PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
+ (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
+};
+
+#define MAX_SG_ENTRY 64
+
+struct mvs_prd {
+ __le64 addr; /* 64-bit buffer address */
+ __le32 reserved;
+ __le32 len; /* 16-bit length */
+};
+
+#define SPI_CTRL_REG 0xc0
+#define SPI_CTRL_VENDOR_ENABLE (1U<<29)
+#define SPI_CTRL_SPIRDY (1U<<22)
+#define SPI_CTRL_SPISTART (1U<<20)
+
+#define SPI_CMD_REG 0xc4
+#define SPI_DATA_REG 0xc8
+
+#define SPI_CTRL_REG_64XX 0x10
+#define SPI_CMD_REG_64XX 0x14
+#define SPI_DATA_REG_64XX 0x18
+
+#endif
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
new file mode 100644
index 00000000000..0940fae19d2
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -0,0 +1,672 @@
+/*
+ * Marvell 88SE94xx hardware specific
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#include "mv_sas.h"
+#include "mv_94xx.h"
+#include "mv_chips.h"
+
+static void mvs_94xx_detect_porttype(struct mvs_info *mvi, int i)
+{
+ u32 reg;
+ struct mvs_phy *phy = &mvi->phy[i];
+ u32 phy_status;
+
+ mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE3);
+ reg = mvs_read_port_vsr_data(mvi, i);
+ phy_status = ((reg & 0x3f0000) >> 16) & 0xff;
+ phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
+ switch (phy_status) {
+ case 0x10:
+ phy->phy_type |= PORT_TYPE_SAS;
+ break;
+ case 0x1d:
+ default:
+ phy->phy_type |= PORT_TYPE_SATA;
+ break;
+ }
+}
+
+static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id)
+{
+ void __iomem *regs = mvi->regs;
+ u32 tmp;
+
+ tmp = mr32(MVS_PCS);
+ tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
+ mw32(MVS_PCS, tmp);
+}
+
+static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
+{
+ u32 tmp;
+
+ tmp = mvs_read_port_irq_stat(mvi, phy_id);
+ tmp &= ~PHYEV_RDY_CH;
+ mvs_write_port_irq_stat(mvi, phy_id, tmp);
+ if (hard) {
+ tmp = mvs_read_phy_ctl(mvi, phy_id);
+ tmp |= PHY_RST_HARD;
+ mvs_write_phy_ctl(mvi, phy_id, tmp);
+ do {
+ tmp = mvs_read_phy_ctl(mvi, phy_id);
+ } while (tmp & PHY_RST_HARD);
+ } else {
+ mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_STAT);
+ tmp = mvs_read_port_vsr_data(mvi, phy_id);
+ tmp |= PHY_RST;
+ mvs_write_port_vsr_data(mvi, phy_id, tmp);
+ }
+}
+
+static void mvs_94xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
+{
+ u32 tmp;
+ mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
+ tmp = mvs_read_port_vsr_data(mvi, phy_id);
+ mvs_write_port_vsr_data(mvi, phy_id, tmp | 0x00800000);
+}
+
+static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
+{
+ mvs_write_port_vsr_addr(mvi, phy_id, 0x1B4);
+ mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1);
+ mvs_write_port_vsr_addr(mvi, phy_id, 0x104);
+ mvs_write_port_vsr_data(mvi, phy_id, 0x00018080);
+ mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
+ mvs_write_port_vsr_data(mvi, phy_id, 0x00207fff);
+}
+
+static int __devinit mvs_94xx_init(struct mvs_info *mvi)
+{
+ void __iomem *regs = mvi->regs;
+ int i;
+ u32 tmp, cctl;
+
+ mvs_show_pcie_usage(mvi);
+ if (mvi->flags & MVF_FLAG_SOC) {
+ tmp = mr32(MVS_PHY_CTL);
+ tmp &= ~PCTL_PWR_OFF;
+ tmp |= PCTL_PHY_DSBL;
+ mw32(MVS_PHY_CTL, tmp);
+ }
+
+ /* Init Chip */
+ /* make sure RST is set; HBA_RST /should/ have done that for us */
+ cctl = mr32(MVS_CTL) & 0xFFFF;
+ if (cctl & CCTL_RST)
+ cctl &= ~CCTL_RST;
+ else
+ mw32_f(MVS_CTL, cctl | CCTL_RST);
+
+ if (mvi->flags & MVF_FLAG_SOC) {
+ tmp = mr32(MVS_PHY_CTL);
+ tmp &= ~PCTL_PWR_OFF;
+ tmp |= PCTL_COM_ON;
+ tmp &= ~PCTL_PHY_DSBL;
+ tmp |= PCTL_LINK_RST;
+ mw32(MVS_PHY_CTL, tmp);
+ msleep(100);
+ tmp &= ~PCTL_LINK_RST;
+ mw32(MVS_PHY_CTL, tmp);
+ msleep(100);
+ }
+
+ /* reset control */
+ mw32(MVS_PCS, 0); /* MVS_PCS */
+ mw32(MVS_STP_REG_SET_0, 0);
+ mw32(MVS_STP_REG_SET_1, 0);
+
+ /* init phys */
+ mvs_phy_hacks(mvi);
+
+ /* disable Multiplexing, enable phy implemented */
+ mw32(MVS_PORTS_IMP, 0xFF);
+
+
+ mw32(MVS_PA_VSR_ADDR, 0x00000104);
+ mw32(MVS_PA_VSR_PORT, 0x00018080);
+ mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE8);
+ mw32(MVS_PA_VSR_PORT, 0x0084ffff);
+
+ /* set LED blink when IO*/
+ mw32(MVS_PA_VSR_ADDR, 0x00000030);
+ tmp = mr32(MVS_PA_VSR_PORT);
+ tmp &= 0xFFFF00FF;
+ tmp |= 0x00003300;
+ mw32(MVS_PA_VSR_PORT, tmp);
+
+ mw32(MVS_CMD_LIST_LO, mvi->slot_dma);
+ mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
+
+ mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma);
+ mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
+
+ mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ);
+ mw32(MVS_TX_LO, mvi->tx_dma);
+ mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16);
+
+ mw32(MVS_RX_CFG, MVS_RX_RING_SZ);
+ mw32(MVS_RX_LO, mvi->rx_dma);
+ mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16);
+
+ for (i = 0; i < mvi->chip->n_phy; i++) {
+ mvs_94xx_phy_disable(mvi, i);
+ /* set phy local SAS address */
+ mvs_set_sas_addr(mvi, i, CONFIG_ID_FRAME3, CONFIG_ID_FRAME4,
+ (mvi->phy[i].dev_sas_addr));
+
+ mvs_94xx_enable_xmt(mvi, i);
+ mvs_94xx_phy_enable(mvi, i);
+
+ mvs_94xx_phy_reset(mvi, i, 1);
+ msleep(500);
+ mvs_94xx_detect_porttype(mvi, i);
+ }
+
+ if (mvi->flags & MVF_FLAG_SOC) {
+ /* set select registers */
+ writel(0x0E008000, regs + 0x000);
+ writel(0x59000008, regs + 0x004);
+ writel(0x20, regs + 0x008);
+ writel(0x20, regs + 0x00c);
+ writel(0x20, regs + 0x010);
+ writel(0x20, regs + 0x014);
+ writel(0x20, regs + 0x018);
+ writel(0x20, regs + 0x01c);
+ }
+ for (i = 0; i < mvi->chip->n_phy; i++) {
+ /* clear phy int status */
+ tmp = mvs_read_port_irq_stat(mvi, i);
+ tmp &= ~PHYEV_SIG_FIS;
+ mvs_write_port_irq_stat(mvi, i, tmp);
+
+ /* set phy int mask */
+ tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH |
+ PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR ;
+ mvs_write_port_irq_mask(mvi, i, tmp);
+
+ msleep(100);
+ mvs_update_phyinfo(mvi, i, 1);
+ }
+
+ /* FIXME: update wide port bitmaps */
+
+ /* little endian for open address and command table, etc. */
+ /*
+ * it seems that ( from the spec ) turning on big-endian won't
+ * do us any good on big-endian machines, need further confirmation
+ */
+ cctl = mr32(MVS_CTL);
+ cctl |= CCTL_ENDIAN_CMD;
+ cctl |= CCTL_ENDIAN_DATA;
+ cctl &= ~CCTL_ENDIAN_OPEN;
+ cctl |= CCTL_ENDIAN_RSP;
+ mw32_f(MVS_CTL, cctl);
+
+ /* reset CMD queue */
+ tmp = mr32(MVS_PCS);
+ tmp |= PCS_CMD_RST;
+ mw32(MVS_PCS, tmp);
+ /* interrupt coalescing may cause missing HW interrput in some case,
+ * and the max count is 0x1ff, while our max slot is 0x200,
+ * it will make count 0.
+ */
+ tmp = 0;
+ mw32(MVS_INT_COAL, tmp);
+
+ tmp = 0x100;
+ mw32(MVS_INT_COAL_TMOUT, tmp);
+
+ /* ladies and gentlemen, start your engines */
+ mw32(MVS_TX_CFG, 0);
+ mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
+ mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN);
+ /* enable CMD/CMPL_Q/RESP mode */
+ mw32(MVS_PCS, PCS_SATA_RETRY_2 | PCS_FIS_RX_EN |
+ PCS_CMD_EN | PCS_CMD_STOP_ERR);
+
+ /* enable completion queue interrupt */
+ tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
+ CINT_DMA_PCIE);
+ tmp |= CINT_PHY_MASK;
+ mw32(MVS_INT_MASK, tmp);
+
+ /* Enable SRS interrupt */
+ mw32(MVS_INT_MASK_SRS_0, 0xFFFF);
+
+ return 0;
+}
+
+static int mvs_94xx_ioremap(struct mvs_info *mvi)
+{
+ if (!mvs_ioremap(mvi, 2, -1)) {
+ mvi->regs_ex = mvi->regs + 0x10200;
+ mvi->regs += 0x20000;
+ if (mvi->id == 1)
+ mvi->regs += 0x4000;
+ return 0;
+ }
+ return -1;
+}
+
+static void mvs_94xx_iounmap(struct mvs_info *mvi)
+{
+ if (mvi->regs) {
+ mvi->regs -= 0x20000;
+ if (mvi->id == 1)
+ mvi->regs -= 0x4000;
+ mvs_iounmap(mvi->regs);
+ }
+}
+
+static void mvs_94xx_interrupt_enable(struct mvs_info *mvi)
+{
+ void __iomem *regs = mvi->regs_ex;
+ u32 tmp;
+
+ tmp = mr32(MVS_GBL_CTL);
+ tmp |= (IRQ_SAS_A | IRQ_SAS_B);
+ mw32(MVS_GBL_INT_STAT, tmp);
+ writel(tmp, regs + 0x0C);
+ writel(tmp, regs + 0x10);
+ writel(tmp, regs + 0x14);
+ writel(tmp, regs + 0x18);
+ mw32(MVS_GBL_CTL, tmp);
+}
+
+static void mvs_94xx_interrupt_disable(struct mvs_info *mvi)
+{
+ void __iomem *regs = mvi->regs_ex;
+ u32 tmp;
+
+ tmp = mr32(MVS_GBL_CTL);
+
+ tmp &= ~(IRQ_SAS_A | IRQ_SAS_B);
+ mw32(MVS_GBL_INT_STAT, tmp);
+ writel(tmp, regs + 0x0C);
+ writel(tmp, regs + 0x10);
+ writel(tmp, regs + 0x14);
+ writel(tmp, regs + 0x18);
+ mw32(MVS_GBL_CTL, tmp);
+}
+
+static u32 mvs_94xx_isr_status(struct mvs_info *mvi, int irq)
+{
+ void __iomem *regs = mvi->regs_ex;
+ u32 stat = 0;
+ if (!(mvi->flags & MVF_FLAG_SOC)) {
+ stat = mr32(MVS_GBL_INT_STAT);
+
+ if (!(stat & (IRQ_SAS_A | IRQ_SAS_B)))
+ return 0;
+ }
+ return stat;
+}
+
+static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat)
+{
+ void __iomem *regs = mvi->regs;
+
+ if (((stat & IRQ_SAS_A) && mvi->id == 0) ||
+ ((stat & IRQ_SAS_B) && mvi->id == 1)) {
+ mw32_f(MVS_INT_STAT, CINT_DONE);
+ #ifndef MVS_USE_TASKLET
+ spin_lock(&mvi->lock);
+ #endif
+ mvs_int_full(mvi);
+ #ifndef MVS_USE_TASKLET
+ spin_unlock(&mvi->lock);
+ #endif
+ }
+ return IRQ_HANDLED;
+}
+
+static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx)
+{
+ u32 tmp;
+ mvs_cw32(mvi, 0x300 + (slot_idx >> 3), 1 << (slot_idx % 32));
+ do {
+ tmp = mvs_cr32(mvi, 0x300 + (slot_idx >> 3));
+ } while (tmp & 1 << (slot_idx % 32));
+}
+
+static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
+ u32 tfs)
+{
+ void __iomem *regs = mvi->regs;
+ u32 tmp;
+
+ if (type == PORT_TYPE_SATA) {
+ tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs);
+ mw32(MVS_INT_STAT_SRS_0, tmp);
+ }
+ mw32(MVS_INT_STAT, CINT_CI_STOP);
+ tmp = mr32(MVS_PCS) | 0xFF00;
+ mw32(MVS_PCS, tmp);
+}
+
+static void mvs_94xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
+{
+ void __iomem *regs = mvi->regs;
+ u32 tmp;
+ u8 reg_set = *tfs;
+
+ if (*tfs == MVS_ID_NOT_MAPPED)
+ return;
+
+ mvi->sata_reg_set &= ~bit(reg_set);
+ if (reg_set < 32) {
+ w_reg_set_enable(reg_set, (u32)mvi->sata_reg_set);
+ tmp = mr32(MVS_INT_STAT_SRS_0) & (u32)mvi->sata_reg_set;
+ if (tmp)
+ mw32(MVS_INT_STAT_SRS_0, tmp);
+ } else {
+ w_reg_set_enable(reg_set, mvi->sata_reg_set);
+ tmp = mr32(MVS_INT_STAT_SRS_1) & mvi->sata_reg_set;
+ if (tmp)
+ mw32(MVS_INT_STAT_SRS_1, tmp);
+ }
+
+ *tfs = MVS_ID_NOT_MAPPED;
+
+ return;
+}
+
+static u8 mvs_94xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
+{
+ int i;
+ void __iomem *regs = mvi->regs;
+
+ if (*tfs != MVS_ID_NOT_MAPPED)
+ return 0;
+
+ i = mv_ffc64(mvi->sata_reg_set);
+ if (i > 32) {
+ mvi->sata_reg_set |= bit(i);
+ w_reg_set_enable(i, (u32)(mvi->sata_reg_set >> 32));
+ *tfs = i;
+ return 0;
+ } else if (i >= 0) {
+ mvi->sata_reg_set |= bit(i);
+ w_reg_set_enable(i, (u32)mvi->sata_reg_set);
+ *tfs = i;
+ return 0;
+ }
+ return MVS_ID_NOT_MAPPED;
+}
+
+static void mvs_94xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
+{
+ int i;
+ struct scatterlist *sg;
+ struct mvs_prd *buf_prd = prd;
+ for_each_sg(scatter, sg, nr, i) {
+ buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
+ buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg));
+ buf_prd++;
+ }
+}
+
+static int mvs_94xx_oob_done(struct mvs_info *mvi, int i)
+{
+ u32 phy_st;
+ phy_st = mvs_read_phy_ctl(mvi, i);
+ if (phy_st & PHY_READY_MASK) /* phy ready */
+ return 1;
+ return 0;
+}
+
+static void mvs_94xx_get_dev_identify_frame(struct mvs_info *mvi, int port_id,
+ struct sas_identify_frame *id)
+{
+ int i;
+ u32 id_frame[7];
+
+ for (i = 0; i < 7; i++) {
+ mvs_write_port_cfg_addr(mvi, port_id,
+ CONFIG_ID_FRAME0 + i * 4);
+ id_frame[i] = mvs_read_port_cfg_data(mvi, port_id);
+ }
+ memcpy(id, id_frame, 28);
+}
+
+static void mvs_94xx_get_att_identify_frame(struct mvs_info *mvi, int port_id,
+ struct sas_identify_frame *id)
+{
+ int i;
+ u32 id_frame[7];
+
+ /* mvs_hexdump(28, (u8 *)id_frame, 0); */
+ for (i = 0; i < 7; i++) {
+ mvs_write_port_cfg_addr(mvi, port_id,
+ CONFIG_ATT_ID_FRAME0 + i * 4);
+ id_frame[i] = mvs_read_port_cfg_data(mvi, port_id);
+ mv_dprintk("94xx phy %d atta frame %d %x.\n",
+ port_id + mvi->id * mvi->chip->n_phy, i, id_frame[i]);
+ }
+ /* mvs_hexdump(28, (u8 *)id_frame, 0); */
+ memcpy(id, id_frame, 28);
+}
+
+static u32 mvs_94xx_make_dev_info(struct sas_identify_frame *id)
+{
+ u32 att_dev_info = 0;
+
+ att_dev_info |= id->dev_type;
+ if (id->stp_iport)
+ att_dev_info |= PORT_DEV_STP_INIT;
+ if (id->smp_iport)
+ att_dev_info |= PORT_DEV_SMP_INIT;
+ if (id->ssp_iport)
+ att_dev_info |= PORT_DEV_SSP_INIT;
+ if (id->stp_tport)
+ att_dev_info |= PORT_DEV_STP_TRGT;
+ if (id->smp_tport)
+ att_dev_info |= PORT_DEV_SMP_TRGT;
+ if (id->ssp_tport)
+ att_dev_info |= PORT_DEV_SSP_TRGT;
+
+ att_dev_info |= (u32)id->phy_id<<24;
+ return att_dev_info;
+}
+
+static u32 mvs_94xx_make_att_info(struct sas_identify_frame *id)
+{
+ return mvs_94xx_make_dev_info(id);
+}
+
+static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i,
+ struct sas_identify_frame *id)
+{
+ struct mvs_phy *phy = &mvi->phy[i];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ mv_dprintk("get all reg link rate is 0x%x\n", phy->phy_status);
+ sas_phy->linkrate =
+ (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
+ PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
+ sas_phy->linkrate += 0x8;
+ mv_dprintk("get link rate is %d\n", sas_phy->linkrate);
+ phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
+ phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS;
+ mvs_94xx_get_dev_identify_frame(mvi, i, id);
+ phy->dev_info = mvs_94xx_make_dev_info(id);
+
+ if (phy->phy_type & PORT_TYPE_SAS) {
+ mvs_94xx_get_att_identify_frame(mvi, i, id);
+ phy->att_dev_info = mvs_94xx_make_att_info(id);
+ phy->att_dev_sas_addr = *(u64 *)id->sas_addr;
+ } else {
+ phy->att_dev_info = PORT_DEV_STP_TRGT | 1;
+ }
+
+}
+
+void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
+ struct sas_phy_linkrates *rates)
+{
+ /* TODO */
+}
+
+static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi)
+{
+ u32 tmp;
+ void __iomem *regs = mvi->regs;
+ tmp = mr32(MVS_STP_REG_SET_0);
+ mw32(MVS_STP_REG_SET_0, 0);
+ mw32(MVS_STP_REG_SET_0, tmp);
+ tmp = mr32(MVS_STP_REG_SET_1);
+ mw32(MVS_STP_REG_SET_1, 0);
+ mw32(MVS_STP_REG_SET_1, tmp);
+}
+
+
+u32 mvs_94xx_spi_read_data(struct mvs_info *mvi)
+{
+ void __iomem *regs = mvi->regs_ex - 0x10200;
+ return mr32(SPI_RD_DATA_REG_94XX);
+}
+
+void mvs_94xx_spi_write_data(struct mvs_info *mvi, u32 data)
+{
+ void __iomem *regs = mvi->regs_ex - 0x10200;
+ mw32(SPI_RD_DATA_REG_94XX, data);
+}
+
+
+int mvs_94xx_spi_buildcmd(struct mvs_info *mvi,
+ u32 *dwCmd,
+ u8 cmd,
+ u8 read,
+ u8 length,
+ u32 addr
+ )
+{
+ void __iomem *regs = mvi->regs_ex - 0x10200;
+ u32 dwTmp;
+
+ dwTmp = ((u32)cmd << 8) | ((u32)length << 4);
+ if (read)
+ dwTmp |= SPI_CTRL_READ_94XX;
+
+ if (addr != MV_MAX_U32) {
+ mw32(SPI_ADDR_REG_94XX, (addr & 0x0003FFFFL));
+ dwTmp |= SPI_ADDR_VLD_94XX;
+ }
+
+ *dwCmd = dwTmp;
+ return 0;
+}
+
+
+int mvs_94xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
+{
+ void __iomem *regs = mvi->regs_ex - 0x10200;
+ mw32(SPI_CTRL_REG_94XX, cmd | SPI_CTRL_SpiStart_94XX);
+
+ return 0;
+}
+
+int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
+{
+ void __iomem *regs = mvi->regs_ex - 0x10200;
+ u32 i, dwTmp;
+
+ for (i = 0; i < timeout; i++) {
+ dwTmp = mr32(SPI_CTRL_REG_94XX);
+ if (!(dwTmp & SPI_CTRL_SpiStart_94XX))
+ return 0;
+ msleep(10);
+ }
+
+ return -1;
+}
+
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+void mvs_94xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
+{
+ int i;
+ struct mvs_prd *buf_prd = prd;
+ buf_prd += from;
+ for (i = 0; i < MAX_SG_ENTRY - from; i++) {
+ buf_prd->addr = cpu_to_le64(buf_dma);
+ buf_prd->im_len.len = cpu_to_le32(buf_len);
+ ++buf_prd;
+ }
+}
+#endif
+
+const struct mvs_dispatch mvs_94xx_dispatch = {
+ "mv94xx",
+ mvs_94xx_init,
+ NULL,
+ mvs_94xx_ioremap,
+ mvs_94xx_iounmap,
+ mvs_94xx_isr,
+ mvs_94xx_isr_status,
+ mvs_94xx_interrupt_enable,
+ mvs_94xx_interrupt_disable,
+ mvs_read_phy_ctl,
+ mvs_write_phy_ctl,
+ mvs_read_port_cfg_data,
+ mvs_write_port_cfg_data,
+ mvs_write_port_cfg_addr,
+ mvs_read_port_vsr_data,
+ mvs_write_port_vsr_data,
+ mvs_write_port_vsr_addr,
+ mvs_read_port_irq_stat,
+ mvs_write_port_irq_stat,
+ mvs_read_port_irq_mask,
+ mvs_write_port_irq_mask,
+ mvs_get_sas_addr,
+ mvs_94xx_command_active,
+ mvs_94xx_issue_stop,
+ mvs_start_delivery,
+ mvs_rx_update,
+ mvs_int_full,
+ mvs_94xx_assign_reg_set,
+ mvs_94xx_free_reg_set,
+ mvs_get_prd_size,
+ mvs_get_prd_count,
+ mvs_94xx_make_prd,
+ mvs_94xx_detect_porttype,
+ mvs_94xx_oob_done,
+ mvs_94xx_fix_phy_info,
+ NULL,
+ mvs_94xx_phy_set_link_rate,
+ mvs_hw_max_link_rate,
+ mvs_94xx_phy_disable,
+ mvs_94xx_phy_enable,
+ mvs_94xx_phy_reset,
+ NULL,
+ mvs_94xx_clear_active_cmds,
+ mvs_94xx_spi_read_data,
+ mvs_94xx_spi_write_data,
+ mvs_94xx_spi_buildcmd,
+ mvs_94xx_spi_issuecmd,
+ mvs_94xx_spi_waitdataready,
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+ mvs_94xx_fix_dma,
+#endif
+};
+
diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h
new file mode 100644
index 00000000000..23ed9b16466
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_94xx.h
@@ -0,0 +1,222 @@
+/*
+ * Marvell 88SE94xx hardware specific head file
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#ifndef _MVS94XX_REG_H_
+#define _MVS94XX_REG_H_
+
+#include <linux/types.h>
+
+#define MAX_LINK_RATE SAS_LINK_RATE_6_0_GBPS
+
+enum hw_registers {
+ MVS_GBL_CTL = 0x04, /* global control */
+ MVS_GBL_INT_STAT = 0x00, /* global irq status */
+ MVS_GBL_PI = 0x0C, /* ports implemented bitmask */
+
+ MVS_PHY_CTL = 0x40, /* SOC PHY Control */
+ MVS_PORTS_IMP = 0x9C, /* SOC Port Implemented */
+
+ MVS_GBL_PORT_TYPE = 0xa0, /* port type */
+
+ MVS_CTL = 0x100, /* SAS/SATA port configuration */
+ MVS_PCS = 0x104, /* SAS/SATA port control/status */
+ MVS_CMD_LIST_LO = 0x108, /* cmd list addr */
+ MVS_CMD_LIST_HI = 0x10C,
+ MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */
+ MVS_RX_FIS_HI = 0x114,
+ MVS_STP_REG_SET_0 = 0x118, /* STP/SATA Register Set Enable */
+ MVS_STP_REG_SET_1 = 0x11C,
+ MVS_TX_CFG = 0x120, /* TX configuration */
+ MVS_TX_LO = 0x124, /* TX (delivery) ring addr */
+ MVS_TX_HI = 0x128,
+
+ MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */
+ MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */
+ MVS_RX_CFG = 0x134, /* RX configuration */
+ MVS_RX_LO = 0x138, /* RX (completion) ring addr */
+ MVS_RX_HI = 0x13C,
+ MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */
+
+ MVS_INT_COAL = 0x148, /* Int coalescing config */
+ MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */
+ MVS_INT_STAT = 0x150, /* Central int status */
+ MVS_INT_MASK = 0x154, /* Central int enable */
+ MVS_INT_STAT_SRS_0 = 0x158, /* SATA register set status */
+ MVS_INT_MASK_SRS_0 = 0x15C,
+ MVS_INT_STAT_SRS_1 = 0x160,
+ MVS_INT_MASK_SRS_1 = 0x164,
+ MVS_NON_NCQ_ERR_0 = 0x168, /* SRS Non-specific NCQ Error */
+ MVS_NON_NCQ_ERR_1 = 0x16C,
+ MVS_CMD_ADDR = 0x170, /* Command register port (addr) */
+ MVS_CMD_DATA = 0x174, /* Command register port (data) */
+ MVS_MEM_PARITY_ERR = 0x178, /* Memory parity error */
+
+ /* ports 1-3 follow after this */
+ MVS_P0_INT_STAT = 0x180, /* port0 interrupt status */
+ MVS_P0_INT_MASK = 0x184, /* port0 interrupt mask */
+ /* ports 5-7 follow after this */
+ MVS_P4_INT_STAT = 0x1A0, /* Port4 interrupt status */
+ MVS_P4_INT_MASK = 0x1A4, /* Port4 interrupt enable mask */
+
+ /* ports 1-3 follow after this */
+ MVS_P0_SER_CTLSTAT = 0x1D0, /* port0 serial control/status */
+ /* ports 5-7 follow after this */
+ MVS_P4_SER_CTLSTAT = 0x1E0, /* port4 serial control/status */
+
+ /* ports 1-3 follow after this */
+ MVS_P0_CFG_ADDR = 0x200, /* port0 phy register address */
+ MVS_P0_CFG_DATA = 0x204, /* port0 phy register data */
+ /* ports 5-7 follow after this */
+ MVS_P4_CFG_ADDR = 0x220, /* Port4 config address */
+ MVS_P4_CFG_DATA = 0x224, /* Port4 config data */
+
+ /* phys 1-3 follow after this */
+ MVS_P0_VSR_ADDR = 0x250, /* phy0 VSR address */
+ MVS_P0_VSR_DATA = 0x254, /* phy0 VSR data */
+ /* phys 1-3 follow after this */
+ /* multiplexing */
+ MVS_P4_VSR_ADDR = 0x250, /* phy4 VSR address */
+ MVS_P4_VSR_DATA = 0x254, /* phy4 VSR data */
+ MVS_PA_VSR_ADDR = 0x290, /* All port VSR addr */
+ MVS_PA_VSR_PORT = 0x294, /* All port VSR data */
+};
+
+enum pci_cfg_registers {
+ PCR_PHY_CTL = 0x40,
+ PCR_PHY_CTL2 = 0x90,
+ PCR_DEV_CTRL = 0x78,
+ PCR_LINK_STAT = 0x82,
+};
+
+/* SAS/SATA Vendor Specific Port Registers */
+enum sas_sata_vsp_regs {
+ VSR_PHY_STAT = 0x00 * 4, /* Phy Status */
+ VSR_PHY_MODE1 = 0x01 * 4, /* phy tx */
+ VSR_PHY_MODE2 = 0x02 * 4, /* tx scc */
+ VSR_PHY_MODE3 = 0x03 * 4, /* pll */
+ VSR_PHY_MODE4 = 0x04 * 4, /* VCO */
+ VSR_PHY_MODE5 = 0x05 * 4, /* Rx */
+ VSR_PHY_MODE6 = 0x06 * 4, /* CDR */
+ VSR_PHY_MODE7 = 0x07 * 4, /* Impedance */
+ VSR_PHY_MODE8 = 0x08 * 4, /* Voltage */
+ VSR_PHY_MODE9 = 0x09 * 4, /* Test */
+ VSR_PHY_MODE10 = 0x0A * 4, /* Power */
+ VSR_PHY_MODE11 = 0x0B * 4, /* Phy Mode */
+ VSR_PHY_VS0 = 0x0C * 4, /* Vednor Specific 0 */
+ VSR_PHY_VS1 = 0x0D * 4, /* Vednor Specific 1 */
+};
+
+enum chip_register_bits {
+ PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8),
+ PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8),
+ PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (12),
+ PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
+ (0x3 << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
+};
+
+enum pci_interrupt_cause {
+ /* MAIN_IRQ_CAUSE (R10200) Bits*/
+ IRQ_COM_IN_I2O_IOP0 = (1 << 0),
+ IRQ_COM_IN_I2O_IOP1 = (1 << 1),
+ IRQ_COM_IN_I2O_IOP2 = (1 << 2),
+ IRQ_COM_IN_I2O_IOP3 = (1 << 3),
+ IRQ_COM_OUT_I2O_HOS0 = (1 << 4),
+ IRQ_COM_OUT_I2O_HOS1 = (1 << 5),
+ IRQ_COM_OUT_I2O_HOS2 = (1 << 6),
+ IRQ_COM_OUT_I2O_HOS3 = (1 << 7),
+ IRQ_PCIF_TO_CPU_DRBL0 = (1 << 8),
+ IRQ_PCIF_TO_CPU_DRBL1 = (1 << 9),
+ IRQ_PCIF_TO_CPU_DRBL2 = (1 << 10),
+ IRQ_PCIF_TO_CPU_DRBL3 = (1 << 11),
+ IRQ_PCIF_DRBL0 = (1 << 12),
+ IRQ_PCIF_DRBL1 = (1 << 13),
+ IRQ_PCIF_DRBL2 = (1 << 14),
+ IRQ_PCIF_DRBL3 = (1 << 15),
+ IRQ_XOR_A = (1 << 16),
+ IRQ_XOR_B = (1 << 17),
+ IRQ_SAS_A = (1 << 18),
+ IRQ_SAS_B = (1 << 19),
+ IRQ_CPU_CNTRL = (1 << 20),
+ IRQ_GPIO = (1 << 21),
+ IRQ_UART = (1 << 22),
+ IRQ_SPI = (1 << 23),
+ IRQ_I2C = (1 << 24),
+ IRQ_SGPIO = (1 << 25),
+ IRQ_COM_ERR = (1 << 29),
+ IRQ_I2O_ERR = (1 << 30),
+ IRQ_PCIE_ERR = (1 << 31),
+};
+
+#define MAX_SG_ENTRY 255
+
+struct mvs_prd_imt {
+ __le32 len:22;
+ u8 _r_a:2;
+ u8 misc_ctl:4;
+ u8 inter_sel:4;
+};
+
+struct mvs_prd {
+ /* 64-bit buffer address */
+ __le64 addr;
+ /* 22-bit length */
+ struct mvs_prd_imt im_len;
+} __attribute__ ((packed));
+
+#define SPI_CTRL_REG_94XX 0xc800
+#define SPI_ADDR_REG_94XX 0xc804
+#define SPI_WR_DATA_REG_94XX 0xc808
+#define SPI_RD_DATA_REG_94XX 0xc80c
+#define SPI_CTRL_READ_94XX (1U << 2)
+#define SPI_ADDR_VLD_94XX (1U << 1)
+#define SPI_CTRL_SpiStart_94XX (1U << 0)
+
+#define mv_ffc(x) ffz(x)
+
+static inline int
+mv_ffc64(u64 v)
+{
+ int i;
+ i = mv_ffc((u32)v);
+ if (i >= 0)
+ return i;
+ i = mv_ffc((u32)(v>>32));
+
+ if (i != 0)
+ return 32 + i;
+
+ return -1;
+}
+
+#define r_reg_set_enable(i) \
+ (((i) > 31) ? mr32(MVS_STP_REG_SET_1) : \
+ mr32(MVS_STP_REG_SET_0))
+
+#define w_reg_set_enable(i, tmp) \
+ (((i) > 31) ? mw32(MVS_STP_REG_SET_1, tmp) : \
+ mw32(MVS_STP_REG_SET_0, tmp))
+
+extern const struct mvs_dispatch mvs_94xx_dispatch;
+#endif
+
diff --git a/drivers/scsi/mvsas/mv_chips.h b/drivers/scsi/mvsas/mv_chips.h
new file mode 100644
index 00000000000..a67e1c4172f
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_chips.h
@@ -0,0 +1,280 @@
+/*
+ * Marvell 88SE64xx/88SE94xx register IO interface
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+
+#ifndef _MV_CHIPS_H_
+#define _MV_CHIPS_H_
+
+#define mr32(reg) readl(regs + reg)
+#define mw32(reg, val) writel((val), regs + reg)
+#define mw32_f(reg, val) do { \
+ mw32(reg, val); \
+ mr32(reg); \
+ } while (0)
+
+#define iow32(reg, val) outl(val, (unsigned long)(regs + reg))
+#define ior32(reg) inl((unsigned long)(regs + reg))
+#define iow16(reg, val) outw((unsigned long)(val, regs + reg))
+#define ior16(reg) inw((unsigned long)(regs + reg))
+#define iow8(reg, val) outb((unsigned long)(val, regs + reg))
+#define ior8(reg) inb((unsigned long)(regs + reg))
+
+static inline u32 mvs_cr32(struct mvs_info *mvi, u32 addr)
+{
+ void __iomem *regs = mvi->regs;
+ mw32(MVS_CMD_ADDR, addr);
+ return mr32(MVS_CMD_DATA);
+}
+
+static inline void mvs_cw32(struct mvs_info *mvi, u32 addr, u32 val)
+{
+ void __iomem *regs = mvi->regs;
+ mw32(MVS_CMD_ADDR, addr);
+ mw32(MVS_CMD_DATA, val);
+}
+
+static inline u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port)
+{
+ void __iomem *regs = mvi->regs;
+ return (port < 4) ? mr32(MVS_P0_SER_CTLSTAT + port * 4) :
+ mr32(MVS_P4_SER_CTLSTAT + (port - 4) * 4);
+}
+
+static inline void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val)
+{
+ void __iomem *regs = mvi->regs;
+ if (port < 4)
+ mw32(MVS_P0_SER_CTLSTAT + port * 4, val);
+ else
+ mw32(MVS_P4_SER_CTLSTAT + (port - 4) * 4, val);
+}
+
+static inline u32 mvs_read_port(struct mvs_info *mvi, u32 off,
+ u32 off2, u32 port)
+{
+ void __iomem *regs = mvi->regs + off;
+ void __iomem *regs2 = mvi->regs + off2;
+ return (port < 4) ? readl(regs + port * 8) :
+ readl(regs2 + (port - 4) * 8);
+}
+
+static inline void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2,
+ u32 port, u32 val)
+{
+ void __iomem *regs = mvi->regs + off;
+ void __iomem *regs2 = mvi->regs + off2;
+ if (port < 4)
+ writel(val, regs + port * 8);
+ else
+ writel(val, regs2 + (port - 4) * 8);
+}
+
+static inline u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port)
+{
+ return mvs_read_port(mvi, MVS_P0_CFG_DATA,
+ MVS_P4_CFG_DATA, port);
+}
+
+static inline void mvs_write_port_cfg_data(struct mvs_info *mvi,
+ u32 port, u32 val)
+{
+ mvs_write_port(mvi, MVS_P0_CFG_DATA,
+ MVS_P4_CFG_DATA, port, val);
+}
+
+static inline void mvs_write_port_cfg_addr(struct mvs_info *mvi,
+ u32 port, u32 addr)
+{
+ mvs_write_port(mvi, MVS_P0_CFG_ADDR,
+ MVS_P4_CFG_ADDR, port, addr);
+ mdelay(10);
+}
+
+static inline u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port)
+{
+ return mvs_read_port(mvi, MVS_P0_VSR_DATA,
+ MVS_P4_VSR_DATA, port);
+}
+
+static inline void mvs_write_port_vsr_data(struct mvs_info *mvi,
+ u32 port, u32 val)
+{
+ mvs_write_port(mvi, MVS_P0_VSR_DATA,
+ MVS_P4_VSR_DATA, port, val);
+}
+
+static inline void mvs_write_port_vsr_addr(struct mvs_info *mvi,
+ u32 port, u32 addr)
+{
+ mvs_write_port(mvi, MVS_P0_VSR_ADDR,
+ MVS_P4_VSR_ADDR, port, addr);
+ mdelay(10);
+}
+
+static inline u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port)
+{
+ return mvs_read_port(mvi, MVS_P0_INT_STAT,
+ MVS_P4_INT_STAT, port);
+}
+
+static inline void mvs_write_port_irq_stat(struct mvs_info *mvi,
+ u32 port, u32 val)
+{
+ mvs_write_port(mvi, MVS_P0_INT_STAT,
+ MVS_P4_INT_STAT, port, val);
+}
+
+static inline u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port)
+{
+ return mvs_read_port(mvi, MVS_P0_INT_MASK,
+ MVS_P4_INT_MASK, port);
+
+}
+
+static inline void mvs_write_port_irq_mask(struct mvs_info *mvi,
+ u32 port, u32 val)
+{
+ mvs_write_port(mvi, MVS_P0_INT_MASK,
+ MVS_P4_INT_MASK, port, val);
+}
+
+static inline void __devinit mvs_phy_hacks(struct mvs_info *mvi)
+{
+ u32 tmp;
+
+ /* workaround for SATA R-ERR, to ignore phy glitch */
+ tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
+ tmp &= ~(1 << 9);
+ tmp |= (1 << 10);
+ mvs_cw32(mvi, CMD_PHY_TIMER, tmp);
+
+ /* enable retry 127 times */
+ mvs_cw32(mvi, CMD_SAS_CTL1, 0x7f7f);
+
+ /* extend open frame timeout to max */
+ tmp = mvs_cr32(mvi, CMD_SAS_CTL0);
+ tmp &= ~0xffff;
+ tmp |= 0x3fff;
+ mvs_cw32(mvi, CMD_SAS_CTL0, tmp);
+
+ /* workaround for WDTIMEOUT , set to 550 ms */
+ mvs_cw32(mvi, CMD_WD_TIMER, 0x7a0000);
+
+ /* not to halt for different port op during wideport link change */
+ mvs_cw32(mvi, CMD_APP_ERR_CONFIG, 0xffefbf7d);
+
+ /* workaround for Seagate disk not-found OOB sequence, recv
+ * COMINIT before sending out COMWAKE */
+ tmp = mvs_cr32(mvi, CMD_PHY_MODE_21);
+ tmp &= 0x0000ffff;
+ tmp |= 0x00fa0000;
+ mvs_cw32(mvi, CMD_PHY_MODE_21, tmp);
+
+ tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
+ tmp &= 0x1fffffff;
+ tmp |= (2U << 29); /* 8 ms retry */
+ mvs_cw32(mvi, CMD_PHY_TIMER, tmp);
+}
+
+static inline void mvs_int_sata(struct mvs_info *mvi)
+{
+ u32 tmp;
+ void __iomem *regs = mvi->regs;
+ tmp = mr32(MVS_INT_STAT_SRS_0);
+ if (tmp)
+ mw32(MVS_INT_STAT_SRS_0, tmp);
+ MVS_CHIP_DISP->clear_active_cmds(mvi);
+}
+
+static inline void mvs_int_full(struct mvs_info *mvi)
+{
+ void __iomem *regs = mvi->regs;
+ u32 tmp, stat;
+ int i;
+
+ stat = mr32(MVS_INT_STAT);
+ mvs_int_rx(mvi, false);
+
+ for (i = 0; i < mvi->chip->n_phy; i++) {
+ tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
+ if (tmp)
+ mvs_int_port(mvi, i, tmp);
+ }
+
+ if (stat & CINT_SRS)
+ mvs_int_sata(mvi);
+
+ mw32(MVS_INT_STAT, stat);
+}
+
+static inline void mvs_start_delivery(struct mvs_info *mvi, u32 tx)
+{
+ void __iomem *regs = mvi->regs;
+ mw32(MVS_TX_PROD_IDX, tx);
+}
+
+static inline u32 mvs_rx_update(struct mvs_info *mvi)
+{
+ void __iomem *regs = mvi->regs;
+ return mr32(MVS_RX_CONS_IDX);
+}
+
+static inline u32 mvs_get_prd_size(void)
+{
+ return sizeof(struct mvs_prd);
+}
+
+static inline u32 mvs_get_prd_count(void)
+{
+ return MAX_SG_ENTRY;
+}
+
+static inline void mvs_show_pcie_usage(struct mvs_info *mvi)
+{
+ u16 link_stat, link_spd;
+ const char *spd[] = {
+ "UnKnown",
+ "2.5",
+ "5.0",
+ };
+ if (mvi->flags & MVF_FLAG_SOC || mvi->id > 0)
+ return;
+
+ pci_read_config_word(mvi->pdev, PCR_LINK_STAT, &link_stat);
+ link_spd = (link_stat & PLS_LINK_SPD) >> PLS_LINK_SPD_OFFS;
+ if (link_spd >= 3)
+ link_spd = 0;
+ dev_printk(KERN_INFO, mvi->dev,
+ "mvsas: PCI-E x%u, Bandwidth Usage: %s Gbps\n",
+ (link_stat & PLS_NEG_LINK_WD) >> PLS_NEG_LINK_WD_OFFS,
+ spd[link_spd]);
+}
+
+static inline u32 mvs_hw_max_link_rate(void)
+{
+ return MAX_LINK_RATE;
+}
+
+#endif /* _MV_CHIPS_H_ */
+
diff --git a/drivers/scsi/mvsas/mv_defs.h b/drivers/scsi/mvsas/mv_defs.h
new file mode 100644
index 00000000000..f8cb9defb96
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_defs.h
@@ -0,0 +1,502 @@
+/*
+ * Marvell 88SE64xx/88SE94xx const head file
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#ifndef _MV_DEFS_H_
+#define _MV_DEFS_H_
+
+
+enum chip_flavors {
+ chip_6320,
+ chip_6440,
+ chip_6485,
+ chip_9480,
+ chip_9180,
+};
+
+/* driver compile-time configuration */
+enum driver_configuration {
+ MVS_SLOTS = 512, /* command slots */
+ MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */
+ MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */
+ /* software requires power-of-2
+ ring size */
+ MVS_SOC_SLOTS = 64,
+ MVS_SOC_TX_RING_SZ = MVS_SOC_SLOTS * 2,
+ MVS_SOC_RX_RING_SZ = MVS_SOC_SLOTS * 2,
+
+ MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */
+ MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */
+ MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */
+ MVS_OAF_SZ = 64, /* Open address frame buffer size */
+ MVS_QUEUE_SIZE = 32, /* Support Queue depth */
+ MVS_CAN_QUEUE = MVS_SLOTS - 2, /* SCSI Queue depth */
+ MVS_SOC_CAN_QUEUE = MVS_SOC_SLOTS - 2,
+};
+
+/* unchangeable hardware details */
+enum hardware_details {
+ MVS_MAX_PHYS = 8, /* max. possible phys */
+ MVS_MAX_PORTS = 8, /* max. possible ports */
+ MVS_SOC_PHYS = 4, /* soc phys */
+ MVS_SOC_PORTS = 4, /* soc phys */
+ MVS_MAX_DEVICES = 1024, /* max supported device */
+};
+
+/* peripheral registers (BAR2) */
+enum peripheral_registers {
+ SPI_CTL = 0x10, /* EEPROM control */
+ SPI_CMD = 0x14, /* EEPROM command */
+ SPI_DATA = 0x18, /* EEPROM data */
+};
+
+enum peripheral_register_bits {
+ TWSI_RDY = (1U << 7), /* EEPROM interface ready */
+ TWSI_RD = (1U << 4), /* EEPROM read access */
+
+ SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */
+};
+
+enum hw_register_bits {
+ /* MVS_GBL_CTL */
+ INT_EN = (1U << 1), /* Global int enable */
+ HBA_RST = (1U << 0), /* HBA reset */
+
+ /* MVS_GBL_INT_STAT */
+ INT_XOR = (1U << 4), /* XOR engine event */
+ INT_SAS_SATA = (1U << 0), /* SAS/SATA event */
+
+ /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */
+ SATA_TARGET = (1U << 16), /* port0 SATA target enable */
+ MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */
+ MODE_AUTO_DET_PORT6 = (1U << 14),
+ MODE_AUTO_DET_PORT5 = (1U << 13),
+ MODE_AUTO_DET_PORT4 = (1U << 12),
+ MODE_AUTO_DET_PORT3 = (1U << 11),
+ MODE_AUTO_DET_PORT2 = (1U << 10),
+ MODE_AUTO_DET_PORT1 = (1U << 9),
+ MODE_AUTO_DET_PORT0 = (1U << 8),
+ MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 |
+ MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 |
+ MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 |
+ MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7,
+ MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */
+ MODE_SAS_PORT6_MASK = (1U << 6),
+ MODE_SAS_PORT5_MASK = (1U << 5),
+ MODE_SAS_PORT4_MASK = (1U << 4),
+ MODE_SAS_PORT3_MASK = (1U << 3),
+ MODE_SAS_PORT2_MASK = (1U << 2),
+ MODE_SAS_PORT1_MASK = (1U << 1),
+ MODE_SAS_PORT0_MASK = (1U << 0),
+ MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK |
+ MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK |
+ MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK |
+ MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK,
+
+ /* SAS_MODE value may be
+ * dictated (in hw) by values
+ * of SATA_TARGET & AUTO_DET
+ */
+
+ /* MVS_TX_CFG */
+ TX_EN = (1U << 16), /* Enable TX */
+ TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */
+
+ /* MVS_RX_CFG */
+ RX_EN = (1U << 16), /* Enable RX */
+ RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */
+
+ /* MVS_INT_COAL */
+ COAL_EN = (1U << 16), /* Enable int coalescing */
+
+ /* MVS_INT_STAT, MVS_INT_MASK */
+ CINT_I2C = (1U << 31), /* I2C event */
+ CINT_SW0 = (1U << 30), /* software event 0 */
+ CINT_SW1 = (1U << 29), /* software event 1 */
+ CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */
+ CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */
+ CINT_MEM = (1U << 26), /* int mem parity err */
+ CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */
+ CINT_SRS = (1U << 3), /* SRS event */
+ CINT_CI_STOP = (1U << 1), /* cmd issue stopped */
+ CINT_DONE = (1U << 0), /* cmd completion */
+
+ /* shl for ports 1-3 */
+ CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */
+ CINT_PORT = (1U << 8), /* port0 event */
+ CINT_PORT_MASK_OFFSET = 8,
+ CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET),
+ CINT_PHY_MASK_OFFSET = 4,
+ CINT_PHY_MASK = (0x0F << CINT_PHY_MASK_OFFSET),
+
+ /* TX (delivery) ring bits */
+ TXQ_CMD_SHIFT = 29,
+ TXQ_CMD_SSP = 1, /* SSP protocol */
+ TXQ_CMD_SMP = 2, /* SMP protocol */
+ TXQ_CMD_STP = 3, /* STP/SATA protocol */
+ TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */
+ TXQ_CMD_SLOT_RESET = 7, /* reset command slot */
+ TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */
+ TXQ_MODE_TARGET = 0,
+ TXQ_MODE_INITIATOR = 1,
+ TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */
+ TXQ_PRI_NORMAL = 0,
+ TXQ_PRI_HIGH = 1,
+ TXQ_SRS_SHIFT = 20, /* SATA register set */
+ TXQ_SRS_MASK = 0x7f,
+ TXQ_PHY_SHIFT = 12, /* PHY bitmap */
+ TXQ_PHY_MASK = 0xff,
+ TXQ_SLOT_MASK = 0xfff, /* slot number */
+
+ /* RX (completion) ring bits */
+ RXQ_GOOD = (1U << 23), /* Response good */
+ RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */
+ RXQ_CMD_RX = (1U << 20), /* target cmd received */
+ RXQ_ATTN = (1U << 19), /* attention */
+ RXQ_RSP = (1U << 18), /* response frame xfer'd */
+ RXQ_ERR = (1U << 17), /* err info rec xfer'd */
+ RXQ_DONE = (1U << 16), /* cmd complete */
+ RXQ_SLOT_MASK = 0xfff, /* slot number */
+
+ /* mvs_cmd_hdr bits */
+ MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */
+ MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */
+
+ /* SSP initiator only */
+ MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */
+
+ /* SSP initiator or target */
+ MCH_SSP_FR_TASK = 0x1, /* TASK frame */
+
+ /* SSP target only */
+ MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */
+ MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */
+ MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */
+ MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */
+
+ MCH_SSP_MODE_PASSTHRU = 1,
+ MCH_SSP_MODE_NORMAL = 0,
+ MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */
+ MCH_FBURST = (1U << 11), /* first burst (SSP) */
+ MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */
+ MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */
+ MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */
+ MCH_RESET = (1U << 7), /* Reset (STP/SATA) */
+ MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */
+ MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */
+ MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */
+ MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/
+
+ CCTL_RST = (1U << 5), /* port logic reset */
+
+ /* 0(LSB first), 1(MSB first) */
+ CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */
+ CCTL_ENDIAN_RSP = (1U << 2), /* response frame */
+ CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */
+ CCTL_ENDIAN_CMD = (1U << 0), /* command table */
+
+ /* MVS_Px_SER_CTLSTAT (per-phy control) */
+ PHY_SSP_RST = (1U << 3), /* reset SSP link layer */
+ PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */
+ PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */
+ PHY_RST = (1U << 0), /* phy reset */
+ PHY_READY_MASK = (1U << 20),
+
+ /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */
+ PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */
+ PHYEV_DCDR_ERR = (1U << 23), /* STP Deocder Error */
+ PHYEV_CRC_ERR = (1U << 22), /* STP CRC Error */
+ PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */
+ PHYEV_AN = (1U << 18), /* SATA async notification */
+ PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */
+ PHYEV_SIG_FIS = (1U << 16), /* signature FIS */
+ PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */
+ PHYEV_IU_BIG = (1U << 11), /* IU too long err */
+ PHYEV_IU_SMALL = (1U << 10), /* IU too short err */
+ PHYEV_UNK_TAG = (1U << 9), /* unknown tag */
+ PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */
+ PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */
+ PHYEV_PORT_SEL = (1U << 6), /* port selector present */
+ PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */
+ PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */
+ PHYEV_ID_FAIL = (1U << 3), /* identify failed */
+ PHYEV_ID_DONE = (1U << 2), /* identify done */
+ PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */
+ PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */
+
+ /* MVS_PCS */
+ PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */
+ PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */
+ PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6485 */
+ PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */
+ PCS_RSP_RX_EN = (1U << 7), /* raw response rx */
+ PCS_SATA_RETRY_2 = (1U << 6), /* For 9180 */
+ PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */
+ PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */
+ PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */
+ PCS_CMD_RST = (1U << 1), /* reset cmd issue */
+ PCS_CMD_EN = (1U << 0), /* enable cmd issue */
+
+ /* Port n Attached Device Info */
+ PORT_DEV_SSP_TRGT = (1U << 19),
+ PORT_DEV_SMP_TRGT = (1U << 18),
+ PORT_DEV_STP_TRGT = (1U << 17),
+ PORT_DEV_SSP_INIT = (1U << 11),
+ PORT_DEV_SMP_INIT = (1U << 10),
+ PORT_DEV_STP_INIT = (1U << 9),
+ PORT_PHY_ID_MASK = (0xFFU << 24),
+ PORT_SSP_TRGT_MASK = (0x1U << 19),
+ PORT_SSP_INIT_MASK = (0x1U << 11),
+ PORT_DEV_TRGT_MASK = (0x7U << 17),
+ PORT_DEV_INIT_MASK = (0x7U << 9),
+ PORT_DEV_TYPE_MASK = (0x7U << 0),
+
+ /* Port n PHY Status */
+ PHY_RDY = (1U << 2),
+ PHY_DW_SYNC = (1U << 1),
+ PHY_OOB_DTCTD = (1U << 0),
+
+ /* VSR */
+ /* PHYMODE 6 (CDB) */
+ PHY_MODE6_LATECLK = (1U << 29), /* Lock Clock */
+ PHY_MODE6_DTL_SPEED = (1U << 27), /* Digital Loop Speed */
+ PHY_MODE6_FC_ORDER = (1U << 26), /* Fibre Channel Mode Order*/
+ PHY_MODE6_MUCNT_EN = (1U << 24), /* u Count Enable */
+ PHY_MODE6_SEL_MUCNT_LEN = (1U << 22), /* Training Length Select */
+ PHY_MODE6_SELMUPI = (1U << 20), /* Phase Multi Select (init) */
+ PHY_MODE6_SELMUPF = (1U << 18), /* Phase Multi Select (final) */
+ PHY_MODE6_SELMUFF = (1U << 16), /* Freq Loop Multi Sel(final) */
+ PHY_MODE6_SELMUFI = (1U << 14), /* Freq Loop Multi Sel(init) */
+ PHY_MODE6_FREEZE_LOOP = (1U << 12), /* Freeze Rx CDR Loop */
+ PHY_MODE6_INT_RXFOFFS = (1U << 3), /* Rx CDR Freq Loop Enable */
+ PHY_MODE6_FRC_RXFOFFS = (1U << 2), /* Initial Rx CDR Offset */
+ PHY_MODE6_STAU_0D8 = (1U << 1), /* Rx CDR Freq Loop Saturate */
+ PHY_MODE6_RXSAT_DIS = (1U << 0), /* Saturate Ctl */
+};
+
+/* SAS/SATA configuration port registers, aka phy registers */
+enum sas_sata_config_port_regs {
+ PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */
+ PHYR_ADDR_LO = 0x04, /* my SAS address (low) */
+ PHYR_ADDR_HI = 0x08, /* my SAS address (high) */
+ PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */
+ PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */
+ PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */
+ PHYR_SATA_CTL = 0x18, /* SATA control */
+ PHYR_PHY_STAT = 0x1C, /* PHY status */
+ PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */
+ PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */
+ PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */
+ PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */
+ PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */
+ PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */
+ PHYR_WIDE_PORT = 0x38, /* wide port participating */
+ PHYR_CURRENT0 = 0x80, /* current connection info 0 */
+ PHYR_CURRENT1 = 0x84, /* current connection info 1 */
+ PHYR_CURRENT2 = 0x88, /* current connection info 2 */
+ CONFIG_ID_FRAME0 = 0x100, /* Port device ID frame register 0 */
+ CONFIG_ID_FRAME1 = 0x104, /* Port device ID frame register 1 */
+ CONFIG_ID_FRAME2 = 0x108, /* Port device ID frame register 2 */
+ CONFIG_ID_FRAME3 = 0x10c, /* Port device ID frame register 3 */
+ CONFIG_ID_FRAME4 = 0x110, /* Port device ID frame register 4 */
+ CONFIG_ID_FRAME5 = 0x114, /* Port device ID frame register 5 */
+ CONFIG_ID_FRAME6 = 0x118, /* Port device ID frame register 6 */
+ CONFIG_ATT_ID_FRAME0 = 0x11c, /* attached ID frame register 0 */
+ CONFIG_ATT_ID_FRAME1 = 0x120, /* attached ID frame register 1 */
+ CONFIG_ATT_ID_FRAME2 = 0x124, /* attached ID frame register 2 */
+ CONFIG_ATT_ID_FRAME3 = 0x128, /* attached ID frame register 3 */
+ CONFIG_ATT_ID_FRAME4 = 0x12c, /* attached ID frame register 4 */
+ CONFIG_ATT_ID_FRAME5 = 0x130, /* attached ID frame register 5 */
+ CONFIG_ATT_ID_FRAME6 = 0x134, /* attached ID frame register 6 */
+};
+
+enum sas_cmd_port_registers {
+ CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */
+ CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */
+ CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */
+ CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */
+ CMD_OOB_SPACE = 0x110, /* OOB space control register */
+ CMD_OOB_BURST = 0x114, /* OOB burst control register */
+ CMD_PHY_TIMER = 0x118, /* PHY timer control register */
+ CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */
+ CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */
+ CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */
+ CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */
+ CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */
+ CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */
+ CMD_ID_TEST = 0x134, /* ID test register */
+ CMD_PL_TIMER = 0x138, /* PL timer register */
+ CMD_WD_TIMER = 0x13c, /* WD timer register */
+ CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */
+ CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */
+ CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */
+ CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */
+ CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */
+ CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */
+ CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */
+ CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */
+ CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */
+ CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */
+ CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */
+ CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */
+ CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */
+ CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */
+ CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */
+ CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */
+ CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */
+ CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */
+ CMD_RESET_COUNT = 0x188, /* Reset Count */
+ CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */
+ CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */
+ CMD_PHY_CTL = 0x194, /* PHY Control and Status */
+ CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */
+ CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */
+ CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */
+ CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */
+ CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */
+ CMD_HOST_CTL = 0x1AC, /* Host Control Status */
+ CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */
+ CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */
+ CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */
+ CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */
+ CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */
+ CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */
+};
+
+enum mvs_info_flags {
+ MVF_MSI = (1U << 0), /* MSI is enabled */
+ MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */
+ MVF_FLAG_SOC = (1U << 2), /* SoC integrated controllers */
+};
+
+enum mvs_event_flags {
+ PHY_PLUG_EVENT = (3U),
+ PHY_PLUG_IN = (1U << 0), /* phy plug in */
+ PHY_PLUG_OUT = (1U << 1), /* phy plug out */
+};
+
+enum mvs_port_type {
+ PORT_TGT_MASK = (1U << 5),
+ PORT_INIT_PORT = (1U << 4),
+ PORT_TGT_PORT = (1U << 3),
+ PORT_INIT_TGT_PORT = (PORT_INIT_PORT | PORT_TGT_PORT),
+ PORT_TYPE_SAS = (1U << 1),
+ PORT_TYPE_SATA = (1U << 0),
+};
+
+/* Command Table Format */
+enum ct_format {
+ /* SSP */
+ SSP_F_H = 0x00,
+ SSP_F_IU = 0x18,
+ SSP_F_MAX = 0x4D,
+ /* STP */
+ STP_CMD_FIS = 0x00,
+ STP_ATAPI_CMD = 0x40,
+ STP_F_MAX = 0x10,
+ /* SMP */
+ SMP_F_T = 0x00,
+ SMP_F_DEP = 0x01,
+ SMP_F_MAX = 0x101,
+};
+
+enum status_buffer {
+ SB_EIR_OFF = 0x00, /* Error Information Record */
+ SB_RFB_OFF = 0x08, /* Response Frame Buffer */
+ SB_RFB_MAX = 0x400, /* RFB size*/
+};
+
+enum error_info_rec {
+ CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */
+ CMD_PI_ERR = (1U << 30), /* Protection info error. see flags2 */
+ RSP_OVER = (1U << 29), /* rsp buffer overflow */
+ RETRY_LIM = (1U << 28), /* FIS/frame retry limit exceeded */
+ UNK_FIS = (1U << 27), /* unknown FIS */
+ DMA_TERM = (1U << 26), /* DMA terminate primitive rx'd */
+ SYNC_ERR = (1U << 25), /* SYNC rx'd during frame xmit */
+ TFILE_ERR = (1U << 24), /* SATA taskfile Error bit set */
+ R_ERR = (1U << 23), /* SATA returned R_ERR prim */
+ RD_OFS = (1U << 20), /* Read DATA frame invalid offset */
+ XFER_RDY_OFS = (1U << 19), /* XFER_RDY offset error */
+ UNEXP_XFER_RDY = (1U << 18), /* unexpected XFER_RDY error */
+ DATA_OVER_UNDER = (1U << 16), /* data overflow/underflow */
+ INTERLOCK = (1U << 15), /* interlock error */
+ NAK = (1U << 14), /* NAK rx'd */
+ ACK_NAK_TO = (1U << 13), /* ACK/NAK timeout */
+ CXN_CLOSED = (1U << 12), /* cxn closed w/out ack/nak */
+ OPEN_TO = (1U << 11), /* I_T nexus lost, open cxn timeout */
+ PATH_BLOCKED = (1U << 10), /* I_T nexus lost, pathway blocked */
+ NO_DEST = (1U << 9), /* I_T nexus lost, no destination */
+ STP_RES_BSY = (1U << 8), /* STP resources busy */
+ BREAK = (1U << 7), /* break received */
+ BAD_DEST = (1U << 6), /* bad destination */
+ BAD_PROTO = (1U << 5), /* protocol not supported */
+ BAD_RATE = (1U << 4), /* cxn rate not supported */
+ WRONG_DEST = (1U << 3), /* wrong destination error */
+ CREDIT_TO = (1U << 2), /* credit timeout */
+ WDOG_TO = (1U << 1), /* watchdog timeout */
+ BUF_PAR = (1U << 0), /* buffer parity error */
+};
+
+enum error_info_rec_2 {
+ SLOT_BSY_ERR = (1U << 31), /* Slot Busy Error */
+ GRD_CHK_ERR = (1U << 14), /* Guard Check Error */
+ APP_CHK_ERR = (1U << 13), /* Application Check error */
+ REF_CHK_ERR = (1U << 12), /* Reference Check Error */
+ USR_BLK_NM = (1U << 0), /* User Block Number */
+};
+
+enum pci_cfg_register_bits {
+ PCTL_PWR_OFF = (0xFU << 24),
+ PCTL_COM_ON = (0xFU << 20),
+ PCTL_LINK_RST = (0xFU << 16),
+ PCTL_LINK_OFFS = (16),
+ PCTL_PHY_DSBL = (0xFU << 12),
+ PCTL_PHY_DSBL_OFFS = (12),
+ PRD_REQ_SIZE = (0x4000),
+ PRD_REQ_MASK = (0x00007000),
+ PLS_NEG_LINK_WD = (0x3FU << 4),
+ PLS_NEG_LINK_WD_OFFS = 4,
+ PLS_LINK_SPD = (0x0FU << 0),
+ PLS_LINK_SPD_OFFS = 0,
+};
+
+enum open_frame_protocol {
+ PROTOCOL_SMP = 0x0,
+ PROTOCOL_SSP = 0x1,
+ PROTOCOL_STP = 0x2,
+};
+
+/* define for response frame datapres field */
+enum datapres_field {
+ NO_DATA = 0,
+ RESPONSE_DATA = 1,
+ SENSE_DATA = 2,
+};
+
+/* define task management IU */
+struct mvs_tmf_task{
+ u8 tmf;
+ u16 tag_of_task_to_be_managed;
+};
+#endif
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
new file mode 100644
index 00000000000..8646a19f999
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -0,0 +1,703 @@
+/*
+ * Marvell 88SE64xx/88SE94xx pci init
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+
+#include "mv_sas.h"
+
+static struct scsi_transport_template *mvs_stt;
+static const struct mvs_chip_info mvs_chips[] = {
+ [chip_6320] = { 1, 2, 0x400, 17, 16, 9, &mvs_64xx_dispatch, },
+ [chip_6440] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, },
+ [chip_6485] = { 1, 8, 0x800, 33, 32, 10, &mvs_64xx_dispatch, },
+ [chip_9180] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, },
+ [chip_9480] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, },
+};
+
+#define SOC_SAS_NUM 2
+
+static struct scsi_host_template mvs_sht = {
+ .module = THIS_MODULE,
+ .name = DRV_NAME,
+ .queuecommand = sas_queuecommand,
+ .target_alloc = sas_target_alloc,
+ .slave_configure = mvs_slave_configure,
+ .slave_destroy = sas_slave_destroy,
+ .scan_finished = mvs_scan_finished,
+ .scan_start = mvs_scan_start,
+ .change_queue_depth = sas_change_queue_depth,
+ .change_queue_type = sas_change_queue_type,
+ .bios_param = sas_bios_param,
+ .can_queue = 1,
+ .cmd_per_lun = 1,
+ .this_id = -1,
+ .sg_tablesize = SG_ALL,
+ .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
+ .use_clustering = ENABLE_CLUSTERING,
+ .eh_device_reset_handler = sas_eh_device_reset_handler,
+ .eh_bus_reset_handler = sas_eh_bus_reset_handler,
+ .slave_alloc = mvs_slave_alloc,
+ .target_destroy = sas_target_destroy,
+ .ioctl = sas_ioctl,
+};
+
+static struct sas_domain_function_template mvs_transport_ops = {
+ .lldd_dev_found = mvs_dev_found,
+ .lldd_dev_gone = mvs_dev_gone,
+
+ .lldd_execute_task = mvs_queue_command,
+ .lldd_control_phy = mvs_phy_control,
+
+ .lldd_abort_task = mvs_abort_task,
+ .lldd_abort_task_set = mvs_abort_task_set,
+ .lldd_clear_aca = mvs_clear_aca,
+ .lldd_clear_task_set = mvs_clear_task_set,
+ .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset,
+ .lldd_lu_reset = mvs_lu_reset,
+ .lldd_query_task = mvs_query_task,
+
+ .lldd_port_formed = mvs_port_formed,
+ .lldd_port_deformed = mvs_port_deformed,
+
+};
+
+static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
+{
+ struct mvs_phy *phy = &mvi->phy[phy_id];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+
+ phy->mvi = mvi;
+ init_timer(&phy->timer);
+ sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
+ sas_phy->class = SAS;
+ sas_phy->iproto = SAS_PROTOCOL_ALL;
+ sas_phy->tproto = 0;
+ sas_phy->type = PHY_TYPE_PHYSICAL;
+ sas_phy->role = PHY_ROLE_INITIATOR;
+ sas_phy->oob_mode = OOB_NOT_CONNECTED;
+ sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
+
+ sas_phy->id = phy_id;
+ sas_phy->sas_addr = &mvi->sas_addr[0];
+ sas_phy->frame_rcvd = &phy->frame_rcvd[0];
+ sas_phy->ha = (struct sas_ha_struct *)mvi->shost->hostdata;
+ sas_phy->lldd_phy = phy;
+}
+
+static void mvs_free(struct mvs_info *mvi)
+{
+ int i;
+ struct mvs_wq *mwq;
+ int slot_nr;
+
+ if (!mvi)
+ return;
+
+ if (mvi->flags & MVF_FLAG_SOC)
+ slot_nr = MVS_SOC_SLOTS;
+ else
+ slot_nr = MVS_SLOTS;
+
+ for (i = 0; i < mvi->tags_num; i++) {
+ struct mvs_slot_info *slot = &mvi->slot_info[i];
+ if (slot->buf)
+ dma_free_coherent(mvi->dev, MVS_SLOT_BUF_SZ,
+ slot->buf, slot->buf_dma);
+ }
+
+ if (mvi->tx)
+ dma_free_coherent(mvi->dev,
+ sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
+ mvi->tx, mvi->tx_dma);
+ if (mvi->rx_fis)
+ dma_free_coherent(mvi->dev, MVS_RX_FISL_SZ,
+ mvi->rx_fis, mvi->rx_fis_dma);
+ if (mvi->rx)
+ dma_free_coherent(mvi->dev,
+ sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
+ mvi->rx, mvi->rx_dma);
+ if (mvi->slot)
+ dma_free_coherent(mvi->dev,
+ sizeof(*mvi->slot) * slot_nr,
+ mvi->slot, mvi->slot_dma);
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+ if (mvi->bulk_buffer)
+ dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE,
+ mvi->bulk_buffer, mvi->bulk_buffer_dma);
+#endif
+
+ MVS_CHIP_DISP->chip_iounmap(mvi);
+ if (mvi->shost)
+ scsi_host_put(mvi->shost);
+ list_for_each_entry(mwq, &mvi->wq_list, entry)
+ cancel_delayed_work(&mwq->work_q);
+ kfree(mvi);
+}
+
+#ifdef MVS_USE_TASKLET
+struct tasklet_struct mv_tasklet;
+static void mvs_tasklet(unsigned long opaque)
+{
+ unsigned long flags;
+ u32 stat;
+ u16 core_nr, i = 0;
+
+ struct mvs_info *mvi;
+ struct sas_ha_struct *sha = (struct sas_ha_struct *)opaque;
+
+ core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
+ mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
+
+ if (unlikely(!mvi))
+ BUG_ON(1);
+
+ for (i = 0; i < core_nr; i++) {
+ mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
+ stat = MVS_CHIP_DISP->isr_status(mvi, mvi->irq);
+ if (stat)
+ MVS_CHIP_DISP->isr(mvi, mvi->irq, stat);
+ }
+
+}
+#endif
+
+static irqreturn_t mvs_interrupt(int irq, void *opaque)
+{
+ u32 core_nr, i = 0;
+ u32 stat;
+ struct mvs_info *mvi;
+ struct sas_ha_struct *sha = opaque;
+
+ core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
+ mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
+
+ if (unlikely(!mvi))
+ return IRQ_NONE;
+
+ stat = MVS_CHIP_DISP->isr_status(mvi, irq);
+ if (!stat)
+ return IRQ_NONE;
+
+#ifdef MVS_USE_TASKLET
+ tasklet_schedule(&mv_tasklet);
+#else
+ for (i = 0; i < core_nr; i++) {
+ mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
+ MVS_CHIP_DISP->isr(mvi, irq, stat);
+ }
+#endif
+ return IRQ_HANDLED;
+}
+
+static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
+{
+ int i, slot_nr;
+
+ if (mvi->flags & MVF_FLAG_SOC)
+ slot_nr = MVS_SOC_SLOTS;
+ else
+ slot_nr = MVS_SLOTS;
+
+ spin_lock_init(&mvi->lock);
+ for (i = 0; i < mvi->chip->n_phy; i++) {
+ mvs_phy_init(mvi, i);
+ mvi->port[i].wide_port_phymap = 0;
+ mvi->port[i].port_attached = 0;
+ INIT_LIST_HEAD(&mvi->port[i].list);
+ }
+ for (i = 0; i < MVS_MAX_DEVICES; i++) {
+ mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED;
+ mvi->devices[i].dev_type = NO_DEVICE;
+ mvi->devices[i].device_id = i;
+ mvi->devices[i].dev_status = MVS_DEV_NORMAL;
+ }
+
+ /*
+ * alloc and init our DMA areas
+ */
+ mvi->tx = dma_alloc_coherent(mvi->dev,
+ sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
+ &mvi->tx_dma, GFP_KERNEL);
+ if (!mvi->tx)
+ goto err_out;
+ memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
+ mvi->rx_fis = dma_alloc_coherent(mvi->dev, MVS_RX_FISL_SZ,
+ &mvi->rx_fis_dma, GFP_KERNEL);
+ if (!mvi->rx_fis)
+ goto err_out;
+ memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
+
+ mvi->rx = dma_alloc_coherent(mvi->dev,
+ sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
+ &mvi->rx_dma, GFP_KERNEL);
+ if (!mvi->rx)
+ goto err_out;
+ memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1));
+ mvi->rx[0] = cpu_to_le32(0xfff);
+ mvi->rx_cons = 0xfff;
+
+ mvi->slot = dma_alloc_coherent(mvi->dev,
+ sizeof(*mvi->slot) * slot_nr,
+ &mvi->slot_dma, GFP_KERNEL);
+ if (!mvi->slot)
+ goto err_out;
+ memset(mvi->slot, 0, sizeof(*mvi->slot) * slot_nr);
+
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+ mvi->bulk_buffer = dma_alloc_coherent(mvi->dev,
+ TRASH_BUCKET_SIZE,
+ &mvi->bulk_buffer_dma, GFP_KERNEL);
+ if (!mvi->bulk_buffer)
+ goto err_out;
+#endif
+ for (i = 0; i < slot_nr; i++) {
+ struct mvs_slot_info *slot = &mvi->slot_info[i];
+
+ slot->buf = dma_alloc_coherent(mvi->dev, MVS_SLOT_BUF_SZ,
+ &slot->buf_dma, GFP_KERNEL);
+ if (!slot->buf) {
+ printk(KERN_DEBUG"failed to allocate slot->buf.\n");
+ goto err_out;
+ }
+ memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
+ ++mvi->tags_num;
+ }
+ /* Initialize tags */
+ mvs_tag_init(mvi);
+ return 0;
+err_out:
+ return 1;
+}
+
+
+int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex)
+{
+ unsigned long res_start, res_len, res_flag, res_flag_ex = 0;
+ struct pci_dev *pdev = mvi->pdev;
+ if (bar_ex != -1) {
+ /*
+ * ioremap main and peripheral registers
+ */
+ res_start = pci_resource_start(pdev, bar_ex);
+ res_len = pci_resource_len(pdev, bar_ex);
+ if (!res_start || !res_len)
+ goto err_out;
+
+ res_flag_ex = pci_resource_flags(pdev, bar_ex);
+ if (res_flag_ex & IORESOURCE_MEM) {
+ if (res_flag_ex & IORESOURCE_CACHEABLE)
+ mvi->regs_ex = ioremap(res_start, res_len);
+ else
+ mvi->regs_ex = ioremap_nocache(res_start,
+ res_len);
+ } else
+ mvi->regs_ex = (void *)res_start;
+ if (!mvi->regs_ex)
+ goto err_out;
+ }
+
+ res_start = pci_resource_start(pdev, bar);
+ res_len = pci_resource_len(pdev, bar);
+ if (!res_start || !res_len)
+ goto err_out;
+
+ res_flag = pci_resource_flags(pdev, bar);
+ if (res_flag & IORESOURCE_CACHEABLE)
+ mvi->regs = ioremap(res_start, res_len);
+ else
+ mvi->regs = ioremap_nocache(res_start, res_len);
+
+ if (!mvi->regs) {
+ if (mvi->regs_ex && (res_flag_ex & IORESOURCE_MEM))
+ iounmap(mvi->regs_ex);
+ mvi->regs_ex = NULL;
+ goto err_out;
+ }
+
+ return 0;
+err_out:
+ return -1;
+}
+
+void mvs_iounmap(void __iomem *regs)
+{
+ iounmap(regs);
+}
+
+static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev,
+ const struct pci_device_id *ent,
+ struct Scsi_Host *shost, unsigned int id)
+{
+ struct mvs_info *mvi;
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+
+ mvi = kzalloc(sizeof(*mvi) + MVS_SLOTS * sizeof(struct mvs_slot_info),
+ GFP_KERNEL);
+ if (!mvi)
+ return NULL;
+
+ mvi->pdev = pdev;
+ mvi->dev = &pdev->dev;
+ mvi->chip_id = ent->driver_data;
+ mvi->chip = &mvs_chips[mvi->chip_id];
+ INIT_LIST_HEAD(&mvi->wq_list);
+ mvi->irq = pdev->irq;
+
+ ((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi;
+ ((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy;
+
+ mvi->id = id;
+ mvi->sas = sha;
+ mvi->shost = shost;
+#ifdef MVS_USE_TASKLET
+ tasklet_init(&mv_tasklet, mvs_tasklet, (unsigned long)sha);
+#endif
+
+ if (MVS_CHIP_DISP->chip_ioremap(mvi))
+ goto err_out;
+ if (!mvs_alloc(mvi, shost))
+ return mvi;
+err_out:
+ mvs_free(mvi);
+ return NULL;
+}
+
+/* move to PCI layer or libata core? */
+static int pci_go_64(struct pci_dev *pdev)
+{
+ int rc;
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (rc) {
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "64-bit DMA enable failed\n");
+ return rc;
+ }
+ }
+ } else {
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "32-bit DMA enable failed\n");
+ return rc;
+ }
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "32-bit consistent DMA enable failed\n");
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+static int __devinit mvs_prep_sas_ha_init(struct Scsi_Host *shost,
+ const struct mvs_chip_info *chip_info)
+{
+ int phy_nr, port_nr; unsigned short core_nr;
+ struct asd_sas_phy **arr_phy;
+ struct asd_sas_port **arr_port;
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+
+ core_nr = chip_info->n_host;
+ phy_nr = core_nr * chip_info->n_phy;
+ port_nr = phy_nr;
+
+ memset(sha, 0x00, sizeof(struct sas_ha_struct));
+ arr_phy = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL);
+ arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL);
+ if (!arr_phy || !arr_port)
+ goto exit_free;
+
+ sha->sas_phy = arr_phy;
+ sha->sas_port = arr_port;
+
+ sha->lldd_ha = kzalloc(sizeof(struct mvs_prv_info), GFP_KERNEL);
+ if (!sha->lldd_ha)
+ goto exit_free;
+
+ ((struct mvs_prv_info *)sha->lldd_ha)->n_host = core_nr;
+
+ shost->transportt = mvs_stt;
+ shost->max_id = 128;
+ shost->max_lun = ~0;
+ shost->max_channel = 1;
+ shost->max_cmd_len = 16;
+
+ return 0;
+exit_free:
+ kfree(arr_phy);
+ kfree(arr_port);
+ return -1;
+
+}
+
+static void __devinit mvs_post_sas_ha_init(struct Scsi_Host *shost,
+ const struct mvs_chip_info *chip_info)
+{
+ int can_queue, i = 0, j = 0;
+ struct mvs_info *mvi = NULL;
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ unsigned short nr_core = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
+
+ for (j = 0; j < nr_core; j++) {
+ mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
+ for (i = 0; i < chip_info->n_phy; i++) {
+ sha->sas_phy[j * chip_info->n_phy + i] =
+ &mvi->phy[i].sas_phy;
+ sha->sas_port[j * chip_info->n_phy + i] =
+ &mvi->port[i].sas_port;
+ }
+ }
+
+ sha->sas_ha_name = DRV_NAME;
+ sha->dev = mvi->dev;
+ sha->lldd_module = THIS_MODULE;
+ sha->sas_addr = &mvi->sas_addr[0];
+
+ sha->num_phys = nr_core * chip_info->n_phy;
+
+ sha->lldd_max_execute_num = 1;
+
+ if (mvi->flags & MVF_FLAG_SOC)
+ can_queue = MVS_SOC_CAN_QUEUE;
+ else
+ can_queue = MVS_CAN_QUEUE;
+
+ sha->lldd_queue_size = can_queue;
+ shost->can_queue = can_queue;
+ mvi->shost->cmd_per_lun = MVS_SLOTS/sha->num_phys;
+ sha->core.shost = mvi->shost;
+}
+
+static void mvs_init_sas_add(struct mvs_info *mvi)
+{
+ u8 i;
+ for (i = 0; i < mvi->chip->n_phy; i++) {
+ mvi->phy[i].dev_sas_addr = 0x5005043011ab0000ULL;
+ mvi->phy[i].dev_sas_addr =
+ cpu_to_be64((u64)(*(u64 *)&mvi->phy[i].dev_sas_addr));
+ }
+
+ memcpy(mvi->sas_addr, &mvi->phy[0].dev_sas_addr, SAS_ADDR_SIZE);
+}
+
+static int __devinit mvs_pci_init(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ unsigned int rc, nhost = 0;
+ struct mvs_info *mvi;
+ irq_handler_t irq_handler = mvs_interrupt;
+ struct Scsi_Host *shost = NULL;
+ const struct mvs_chip_info *chip;
+
+ dev_printk(KERN_INFO, &pdev->dev,
+ "mvsas: driver version %s\n", DRV_VERSION);
+ rc = pci_enable_device(pdev);
+ if (rc)
+ goto err_out_enable;
+
+ pci_set_master(pdev);
+
+ rc = pci_request_regions(pdev, DRV_NAME);
+ if (rc)
+ goto err_out_disable;
+
+ rc = pci_go_64(pdev);
+ if (rc)
+ goto err_out_regions;
+
+ shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
+ if (!shost) {
+ rc = -ENOMEM;
+ goto err_out_regions;
+ }
+
+ chip = &mvs_chips[ent->driver_data];
+ SHOST_TO_SAS_HA(shost) =
+ kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL);
+ if (!SHOST_TO_SAS_HA(shost)) {
+ kfree(shost);
+ rc = -ENOMEM;
+ goto err_out_regions;
+ }
+
+ rc = mvs_prep_sas_ha_init(shost, chip);
+ if (rc) {
+ kfree(shost);
+ rc = -ENOMEM;
+ goto err_out_regions;
+ }
+
+ pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost));
+
+ do {
+ mvi = mvs_pci_alloc(pdev, ent, shost, nhost);
+ if (!mvi) {
+ rc = -ENOMEM;
+ goto err_out_regions;
+ }
+
+ mvs_init_sas_add(mvi);
+
+ mvi->instance = nhost;
+ rc = MVS_CHIP_DISP->chip_init(mvi);
+ if (rc) {
+ mvs_free(mvi);
+ goto err_out_regions;
+ }
+ nhost++;
+ } while (nhost < chip->n_host);
+
+ mvs_post_sas_ha_init(shost, chip);
+
+ rc = scsi_add_host(shost, &pdev->dev);
+ if (rc)
+ goto err_out_shost;
+
+ rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
+ if (rc)
+ goto err_out_shost;
+ rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED,
+ DRV_NAME, SHOST_TO_SAS_HA(shost));
+ if (rc)
+ goto err_not_sas;
+
+ MVS_CHIP_DISP->interrupt_enable(mvi);
+
+ scsi_scan_host(mvi->shost);
+
+ return 0;
+
+err_not_sas:
+ sas_unregister_ha(SHOST_TO_SAS_HA(shost));
+err_out_shost:
+ scsi_remove_host(mvi->shost);
+err_out_regions:
+ pci_release_regions(pdev);
+err_out_disable:
+ pci_disable_device(pdev);
+err_out_enable:
+ return rc;
+}
+
+static void __devexit mvs_pci_remove(struct pci_dev *pdev)
+{
+ unsigned short core_nr, i = 0;
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct mvs_info *mvi = NULL;
+
+ core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
+ mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
+
+#ifdef MVS_USE_TASKLET
+ tasklet_kill(&mv_tasklet);
+#endif
+
+ pci_set_drvdata(pdev, NULL);
+ sas_unregister_ha(sha);
+ sas_remove_host(mvi->shost);
+ scsi_remove_host(mvi->shost);
+
+ MVS_CHIP_DISP->interrupt_disable(mvi);
+ free_irq(mvi->irq, sha);
+ for (i = 0; i < core_nr; i++) {
+ mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
+ mvs_free(mvi);
+ }
+ kfree(sha->sas_phy);
+ kfree(sha->sas_port);
+ kfree(sha);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ return;
+}
+
+static struct pci_device_id __devinitdata mvs_pci_table[] = {
+ { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
+ { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
+ {
+ .vendor = PCI_VENDOR_ID_MARVELL,
+ .device = 0x6440,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = 0x6480,
+ .class = 0,
+ .class_mask = 0,
+ .driver_data = chip_6485,
+ },
+ { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
+ { PCI_VDEVICE(MARVELL, 0x6485), chip_6485 },
+ { PCI_VDEVICE(MARVELL, 0x9480), chip_9480 },
+ { PCI_VDEVICE(MARVELL, 0x9180), chip_9180 },
+
+ { } /* terminate list */
+};
+
+static struct pci_driver mvs_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = mvs_pci_table,
+ .probe = mvs_pci_init,
+ .remove = __devexit_p(mvs_pci_remove),
+};
+
+/* task handler */
+struct task_struct *mvs_th;
+static int __init mvs_init(void)
+{
+ int rc;
+ mvs_stt = sas_domain_attach_transport(&mvs_transport_ops);
+ if (!mvs_stt)
+ return -ENOMEM;
+
+ rc = pci_register_driver(&mvs_pci_driver);
+
+ if (rc)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ sas_release_transport(mvs_stt);
+ return rc;
+}
+
+static void __exit mvs_exit(void)
+{
+ pci_unregister_driver(&mvs_pci_driver);
+ sas_release_transport(mvs_stt);
+}
+
+module_init(mvs_init);
+module_exit(mvs_exit);
+
+MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
+MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
+#ifdef CONFIG_PCI
+MODULE_DEVICE_TABLE(pci, mvs_pci_table);
+#endif
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
new file mode 100644
index 00000000000..0d213864121
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -0,0 +1,2154 @@
+/*
+ * Marvell 88SE64xx/88SE94xx main function
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#include "mv_sas.h"
+
+static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
+{
+ if (task->lldd_task) {
+ struct mvs_slot_info *slot;
+ slot = task->lldd_task;
+ *tag = slot->slot_tag;
+ return 1;
+ }
+ return 0;
+}
+
+void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
+{
+ void *bitmap = &mvi->tags;
+ clear_bit(tag, bitmap);
+}
+
+void mvs_tag_free(struct mvs_info *mvi, u32 tag)
+{
+ mvs_tag_clear(mvi, tag);
+}
+
+void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
+{
+ void *bitmap = &mvi->tags;
+ set_bit(tag, bitmap);
+}
+
+inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
+{
+ unsigned int index, tag;
+ void *bitmap = &mvi->tags;
+
+ index = find_first_zero_bit(bitmap, mvi->tags_num);
+ tag = index;
+ if (tag >= mvi->tags_num)
+ return -SAS_QUEUE_FULL;
+ mvs_tag_set(mvi, tag);
+ *tag_out = tag;
+ return 0;
+}
+
+void mvs_tag_init(struct mvs_info *mvi)
+{
+ int i;
+ for (i = 0; i < mvi->tags_num; ++i)
+ mvs_tag_clear(mvi, i);
+}
+
+void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
+{
+ u32 i;
+ u32 run;
+ u32 offset;
+
+ offset = 0;
+ while (size) {
+ printk(KERN_DEBUG"%08X : ", baseaddr + offset);
+ if (size >= 16)
+ run = 16;
+ else
+ run = size;
+ size -= run;
+ for (i = 0; i < 16; i++) {
+ if (i < run)
+ printk(KERN_DEBUG"%02X ", (u32)data[i]);
+ else
+ printk(KERN_DEBUG" ");
+ }
+ printk(KERN_DEBUG": ");
+ for (i = 0; i < run; i++)
+ printk(KERN_DEBUG"%c",
+ isalnum(data[i]) ? data[i] : '.');
+ printk(KERN_DEBUG"\n");
+ data = &data[16];
+ offset += run;
+ }
+ printk(KERN_DEBUG"\n");
+}
+
+#if (_MV_DUMP > 1)
+static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
+ enum sas_protocol proto)
+{
+ u32 offset;
+ struct mvs_slot_info *slot = &mvi->slot_info[tag];
+
+ offset = slot->cmd_size + MVS_OAF_SZ +
+ MVS_CHIP_DISP->prd_size() * slot->n_elem;
+ dev_printk(KERN_DEBUG, mvi->dev, "+---->Status buffer[%d] :\n",
+ tag);
+ mvs_hexdump(32, (u8 *) slot->response,
+ (u32) slot->buf_dma + offset);
+}
+#endif
+
+static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
+ enum sas_protocol proto)
+{
+#if (_MV_DUMP > 1)
+ u32 sz, w_ptr;
+ u64 addr;
+ struct mvs_slot_info *slot = &mvi->slot_info[tag];
+
+ /*Delivery Queue */
+ sz = MVS_CHIP_SLOT_SZ;
+ w_ptr = slot->tx;
+ addr = mvi->tx_dma;
+ dev_printk(KERN_DEBUG, mvi->dev,
+ "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
+ dev_printk(KERN_DEBUG, mvi->dev,
+ "Delivery Queue Base Address=0x%llX (PA)"
+ "(tx_dma=0x%llX), Entry=%04d\n",
+ addr, (unsigned long long)mvi->tx_dma, w_ptr);
+ mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
+ (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
+ /*Command List */
+ addr = mvi->slot_dma;
+ dev_printk(KERN_DEBUG, mvi->dev,
+ "Command List Base Address=0x%llX (PA)"
+ "(slot_dma=0x%llX), Header=%03d\n",
+ addr, (unsigned long long)slot->buf_dma, tag);
+ dev_printk(KERN_DEBUG, mvi->dev, "Command Header[%03d]:\n", tag);
+ /*mvs_cmd_hdr */
+ mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
+ (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
+ /*1.command table area */
+ dev_printk(KERN_DEBUG, mvi->dev, "+---->Command Table :\n");
+ mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
+ /*2.open address frame area */
+ dev_printk(KERN_DEBUG, mvi->dev, "+---->Open Address Frame :\n");
+ mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
+ (u32) slot->buf_dma + slot->cmd_size);
+ /*3.status buffer */
+ mvs_hba_sb_dump(mvi, tag, proto);
+ /*4.PRD table */
+ dev_printk(KERN_DEBUG, mvi->dev, "+---->PRD table :\n");
+ mvs_hexdump(MVS_CHIP_DISP->prd_size() * slot->n_elem,
+ (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
+ (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
+#endif
+}
+
+static void mvs_hba_cq_dump(struct mvs_info *mvi)
+{
+#if (_MV_DUMP > 2)
+ u64 addr;
+ void __iomem *regs = mvi->regs;
+ u32 entry = mvi->rx_cons + 1;
+ u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
+
+ /*Completion Queue */
+ addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
+ dev_printk(KERN_DEBUG, mvi->dev, "Completion Task = 0x%p\n",
+ mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
+ dev_printk(KERN_DEBUG, mvi->dev,
+ "Completion List Base Address=0x%llX (PA), "
+ "CQ_Entry=%04d, CQ_WP=0x%08X\n",
+ addr, entry - 1, mvi->rx[0]);
+ mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
+ mvi->rx_dma + sizeof(u32) * entry);
+#endif
+}
+
+void mvs_get_sas_addr(void *buf, u32 buflen)
+{
+ /*memcpy(buf, "\x50\x05\x04\x30\x11\xab\x64\x40", 8);*/
+}
+
+struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
+{
+ unsigned long i = 0, j = 0, hi = 0;
+ struct sas_ha_struct *sha = dev->port->ha;
+ struct mvs_info *mvi = NULL;
+ struct asd_sas_phy *phy;
+
+ while (sha->sas_port[i]) {
+ if (sha->sas_port[i] == dev->port) {
+ phy = container_of(sha->sas_port[i]->phy_list.next,
+ struct asd_sas_phy, port_phy_el);
+ j = 0;
+ while (sha->sas_phy[j]) {
+ if (sha->sas_phy[j] == phy)
+ break;
+ j++;
+ }
+ break;
+ }
+ i++;
+ }
+ hi = j/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
+ mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
+
+ return mvi;
+
+}
+
+/* FIXME */
+int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
+{
+ unsigned long i = 0, j = 0, n = 0, num = 0;
+ struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
+ struct mvs_info *mvi = mvi_dev->mvi_info;
+ struct sas_ha_struct *sha = dev->port->ha;
+
+ while (sha->sas_port[i]) {
+ if (sha->sas_port[i] == dev->port) {
+ struct asd_sas_phy *phy;
+ list_for_each_entry(phy,
+ &sha->sas_port[i]->phy_list, port_phy_el) {
+ j = 0;
+ while (sha->sas_phy[j]) {
+ if (sha->sas_phy[j] == phy)
+ break;
+ j++;
+ }
+ phyno[n] = (j >= mvi->chip->n_phy) ?
+ (j - mvi->chip->n_phy) : j;
+ num++;
+ n++;
+ }
+ break;
+ }
+ i++;
+ }
+ return num;
+}
+
+static inline void mvs_free_reg_set(struct mvs_info *mvi,
+ struct mvs_device *dev)
+{
+ if (!dev) {
+ mv_printk("device has been free.\n");
+ return;
+ }
+ if (dev->runing_req != 0)
+ return;
+ if (dev->taskfileset == MVS_ID_NOT_MAPPED)
+ return;
+ MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset);
+}
+
+static inline u8 mvs_assign_reg_set(struct mvs_info *mvi,
+ struct mvs_device *dev)
+{
+ if (dev->taskfileset != MVS_ID_NOT_MAPPED)
+ return 0;
+ return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset);
+}
+
+void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard)
+{
+ u32 no;
+ for_each_phy(phy_mask, phy_mask, no) {
+ if (!(phy_mask & 1))
+ continue;
+ MVS_CHIP_DISP->phy_reset(mvi, no, hard);
+ }
+}
+
+/* FIXME: locking? */
+int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
+ void *funcdata)
+{
+ int rc = 0, phy_id = sas_phy->id;
+ u32 tmp, i = 0, hi;
+ struct sas_ha_struct *sha = sas_phy->ha;
+ struct mvs_info *mvi = NULL;
+
+ while (sha->sas_phy[i]) {
+ if (sha->sas_phy[i] == sas_phy)
+ break;
+ i++;
+ }
+ hi = i/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
+ mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
+
+ switch (func) {
+ case PHY_FUNC_SET_LINK_RATE:
+ MVS_CHIP_DISP->phy_set_link_rate(mvi, phy_id, funcdata);
+ break;
+
+ case PHY_FUNC_HARD_RESET:
+ tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id);
+ if (tmp & PHY_RST_HARD)
+ break;
+ MVS_CHIP_DISP->phy_reset(mvi, phy_id, 1);
+ break;
+
+ case PHY_FUNC_LINK_RESET:
+ MVS_CHIP_DISP->phy_enable(mvi, phy_id);
+ MVS_CHIP_DISP->phy_reset(mvi, phy_id, 0);
+ break;
+
+ case PHY_FUNC_DISABLE:
+ MVS_CHIP_DISP->phy_disable(mvi, phy_id);
+ break;
+ case PHY_FUNC_RELEASE_SPINUP_HOLD:
+ default:
+ rc = -EOPNOTSUPP;
+ }
+ msleep(200);
+ return rc;
+}
+
+void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id,
+ u32 off_lo, u32 off_hi, u64 sas_addr)
+{
+ u32 lo = (u32)sas_addr;
+ u32 hi = (u32)(sas_addr>>32);
+
+ MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_lo);
+ MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, lo);
+ MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_hi);
+ MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi);
+}
+
+static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
+{
+ struct mvs_phy *phy = &mvi->phy[i];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ struct sas_ha_struct *sas_ha;
+ if (!phy->phy_attached)
+ return;
+
+ if (!(phy->att_dev_info & PORT_DEV_TRGT_MASK)
+ && phy->phy_type & PORT_TYPE_SAS) {
+ return;
+ }
+
+ sas_ha = mvi->sas;
+ sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
+
+ if (sas_phy->phy) {
+ struct sas_phy *sphy = sas_phy->phy;
+
+ sphy->negotiated_linkrate = sas_phy->linkrate;
+ sphy->minimum_linkrate = phy->minimum_linkrate;
+ sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
+ sphy->maximum_linkrate = phy->maximum_linkrate;
+ sphy->maximum_linkrate_hw = MVS_CHIP_DISP->phy_max_link_rate();
+ }
+
+ if (phy->phy_type & PORT_TYPE_SAS) {
+ struct sas_identify_frame *id;
+
+ id = (struct sas_identify_frame *)phy->frame_rcvd;
+ id->dev_type = phy->identify.device_type;
+ id->initiator_bits = SAS_PROTOCOL_ALL;
+ id->target_bits = phy->identify.target_port_protocols;
+ } else if (phy->phy_type & PORT_TYPE_SATA) {
+ /*Nothing*/
+ }
+ mv_dprintk("phy %d byte dmaded.\n", i + mvi->id * mvi->chip->n_phy);
+
+ sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
+
+ mvi->sas->notify_port_event(sas_phy,
+ PORTE_BYTES_DMAED);
+}
+
+int mvs_slave_alloc(struct scsi_device *scsi_dev)
+{
+ struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
+ if (dev_is_sata(dev)) {
+ /* We don't need to rescan targets
+ * if REPORT_LUNS request is failed
+ */
+ if (scsi_dev->lun > 0)
+ return -ENXIO;
+ scsi_dev->tagged_supported = 1;
+ }
+
+ return sas_slave_alloc(scsi_dev);
+}
+
+int mvs_slave_configure(struct scsi_device *sdev)
+{
+ struct domain_device *dev = sdev_to_domain_dev(sdev);
+ int ret = sas_slave_configure(sdev);
+
+ if (ret)
+ return ret;
+ if (dev_is_sata(dev)) {
+ /* may set PIO mode */
+ #if MV_DISABLE_NCQ
+ struct ata_port *ap = dev->sata_dev.ap;
+ struct ata_device *adev = ap->link.device;
+ adev->flags |= ATA_DFLAG_NCQ_OFF;
+ scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
+ #endif
+ }
+ return 0;
+}
+
+void mvs_scan_start(struct Scsi_Host *shost)
+{
+ int i, j;
+ unsigned short core_nr;
+ struct mvs_info *mvi;
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+
+ core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
+
+ for (j = 0; j < core_nr; j++) {
+ mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
+ for (i = 0; i < mvi->chip->n_phy; ++i)
+ mvs_bytes_dmaed(mvi, i);
+ }
+}
+
+int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+ /* give the phy enabling interrupt event time to come in (1s
+ * is empirically about all it takes) */
+ if (time < HZ)
+ return 0;
+ /* Wait for discovery to finish */
+ scsi_flush_work(shost);
+ return 1;
+}
+
+static int mvs_task_prep_smp(struct mvs_info *mvi,
+ struct mvs_task_exec_info *tei)
+{
+ int elem, rc, i;
+ struct sas_task *task = tei->task;
+ struct mvs_cmd_hdr *hdr = tei->hdr;
+ struct domain_device *dev = task->dev;
+ struct asd_sas_port *sas_port = dev->port;
+ struct scatterlist *sg_req, *sg_resp;
+ u32 req_len, resp_len, tag = tei->tag;
+ void *buf_tmp;
+ u8 *buf_oaf;
+ dma_addr_t buf_tmp_dma;
+ void *buf_prd;
+ struct mvs_slot_info *slot = &mvi->slot_info[tag];
+ u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
+#if _MV_DUMP
+ u8 *buf_cmd;
+ void *from;
+#endif
+ /*
+ * DMA-map SMP request, response buffers
+ */
+ sg_req = &task->smp_task.smp_req;
+ elem = dma_map_sg(mvi->dev, sg_req, 1, PCI_DMA_TODEVICE);
+ if (!elem)
+ return -ENOMEM;
+ req_len = sg_dma_len(sg_req);
+
+ sg_resp = &task->smp_task.smp_resp;
+ elem = dma_map_sg(mvi->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
+ if (!elem) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ resp_len = SB_RFB_MAX;
+
+ /* must be in dwords */
+ if ((req_len & 0x3) || (resp_len & 0x3)) {
+ rc = -EINVAL;
+ goto err_out_2;
+ }
+
+ /*
+ * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
+ */
+
+ /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */
+ buf_tmp = slot->buf;
+ buf_tmp_dma = slot->buf_dma;
+
+#if _MV_DUMP
+ buf_cmd = buf_tmp;
+ hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
+ buf_tmp += req_len;
+ buf_tmp_dma += req_len;
+ slot->cmd_size = req_len;
+#else
+ hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
+#endif
+
+ /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
+ buf_oaf = buf_tmp;
+ hdr->open_frame = cpu_to_le64(buf_tmp_dma);
+
+ buf_tmp += MVS_OAF_SZ;
+ buf_tmp_dma += MVS_OAF_SZ;
+
+ /* region 3: PRD table *********************************** */
+ buf_prd = buf_tmp;
+ if (tei->n_elem)
+ hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
+ else
+ hdr->prd_tbl = 0;
+
+ i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
+ buf_tmp += i;
+ buf_tmp_dma += i;
+
+ /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
+ slot->response = buf_tmp;
+ hdr->status_buf = cpu_to_le64(buf_tmp_dma);
+ if (mvi->flags & MVF_FLAG_SOC)
+ hdr->reserved[0] = 0;
+
+ /*
+ * Fill in TX ring and command slot header
+ */
+ slot->tx = mvi->tx_prod;
+ mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
+ TXQ_MODE_I | tag |
+ (sas_port->phy_mask << TXQ_PHY_SHIFT));
+
+ hdr->flags |= flags;
+ hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
+ hdr->tags = cpu_to_le32(tag);
+ hdr->data_len = 0;
+
+ /* generate open address frame hdr (first 12 bytes) */
+ /* initiator, SMP, ftype 1h */
+ buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01;
+ buf_oaf[1] = dev->linkrate & 0xf;
+ *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */
+ memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
+
+ /* fill in PRD (scatter/gather) table, if any */
+ MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
+
+#if _MV_DUMP
+ /* copy cmd table */
+ from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
+ memcpy(buf_cmd, from + sg_req->offset, req_len);
+ kunmap_atomic(from, KM_IRQ0);
+#endif
+ return 0;
+
+err_out_2:
+ dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1,
+ PCI_DMA_FROMDEVICE);
+err_out:
+ dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1,
+ PCI_DMA_TODEVICE);
+ return rc;
+}
+
+static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
+{
+ struct ata_queued_cmd *qc = task->uldd_task;
+
+ if (qc) {
+ if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
+ qc->tf.command == ATA_CMD_FPDMA_READ) {
+ *tag = qc->tag;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int mvs_task_prep_ata(struct mvs_info *mvi,
+ struct mvs_task_exec_info *tei)
+{
+ struct sas_task *task = tei->task;
+ struct domain_device *dev = task->dev;
+ struct mvs_device *mvi_dev = dev->lldd_dev;
+ struct mvs_cmd_hdr *hdr = tei->hdr;
+ struct asd_sas_port *sas_port = dev->port;
+ struct mvs_slot_info *slot;
+ void *buf_prd;
+ u32 tag = tei->tag, hdr_tag;
+ u32 flags, del_q;
+ void *buf_tmp;
+ u8 *buf_cmd, *buf_oaf;
+ dma_addr_t buf_tmp_dma;
+ u32 i, req_len, resp_len;
+ const u32 max_resp_len = SB_RFB_MAX;
+
+ if (mvs_assign_reg_set(mvi, mvi_dev) == MVS_ID_NOT_MAPPED) {
+ mv_dprintk("Have not enough regiset for dev %d.\n",
+ mvi_dev->device_id);
+ return -EBUSY;
+ }
+ slot = &mvi->slot_info[tag];
+ slot->tx = mvi->tx_prod;
+ del_q = TXQ_MODE_I | tag |
+ (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
+ (sas_port->phy_mask << TXQ_PHY_SHIFT) |
+ (mvi_dev->taskfileset << TXQ_SRS_SHIFT);
+ mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
+
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+ if (task->data_dir == DMA_FROM_DEVICE)
+ flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT);
+ else
+ flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
+#else
+ flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
+#endif
+ if (task->ata_task.use_ncq)
+ flags |= MCH_FPDMA;
+ if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
+ if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
+ flags |= MCH_ATAPI;
+ }
+
+ /* FIXME: fill in port multiplier number */
+
+ hdr->flags = cpu_to_le32(flags);
+
+ /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
+ if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag))
+ task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
+ else
+ hdr_tag = tag;
+
+ hdr->tags = cpu_to_le32(hdr_tag);
+
+ hdr->data_len = cpu_to_le32(task->total_xfer_len);
+
+ /*
+ * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
+ */
+
+ /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
+ buf_cmd = buf_tmp = slot->buf;
+ buf_tmp_dma = slot->buf_dma;
+
+ hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
+
+ buf_tmp += MVS_ATA_CMD_SZ;
+ buf_tmp_dma += MVS_ATA_CMD_SZ;
+#if _MV_DUMP
+ slot->cmd_size = MVS_ATA_CMD_SZ;
+#endif
+
+ /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
+ /* used for STP. unused for SATA? */
+ buf_oaf = buf_tmp;
+ hdr->open_frame = cpu_to_le64(buf_tmp_dma);
+
+ buf_tmp += MVS_OAF_SZ;
+ buf_tmp_dma += MVS_OAF_SZ;
+
+ /* region 3: PRD table ********************************************* */
+ buf_prd = buf_tmp;
+
+ if (tei->n_elem)
+ hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
+ else
+ hdr->prd_tbl = 0;
+ i = MVS_CHIP_DISP->prd_size() * MVS_CHIP_DISP->prd_count();
+
+ buf_tmp += i;
+ buf_tmp_dma += i;
+
+ /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
+ /* FIXME: probably unused, for SATA. kept here just in case
+ * we get a STP/SATA error information record
+ */
+ slot->response = buf_tmp;
+ hdr->status_buf = cpu_to_le64(buf_tmp_dma);
+ if (mvi->flags & MVF_FLAG_SOC)
+ hdr->reserved[0] = 0;
+
+ req_len = sizeof(struct host_to_dev_fis);
+ resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
+ sizeof(struct mvs_err_info) - i;
+
+ /* request, response lengths */
+ resp_len = min(resp_len, max_resp_len);
+ hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
+
+ if (likely(!task->ata_task.device_control_reg_update))
+ task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
+ /* fill in command FIS and ATAPI CDB */
+ memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
+ if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
+ memcpy(buf_cmd + STP_ATAPI_CMD,
+ task->ata_task.atapi_packet, 16);
+
+ /* generate open address frame hdr (first 12 bytes) */
+ /* initiator, STP, ftype 1h */
+ buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1;
+ buf_oaf[1] = dev->linkrate & 0xf;
+ *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
+ memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
+
+ /* fill in PRD (scatter/gather) table, if any */
+ MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+ if (task->data_dir == DMA_FROM_DEVICE)
+ MVS_CHIP_DISP->dma_fix(mvi->bulk_buffer_dma,
+ TRASH_BUCKET_SIZE, tei->n_elem, buf_prd);
+#endif
+ return 0;
+}
+
+static int mvs_task_prep_ssp(struct mvs_info *mvi,
+ struct mvs_task_exec_info *tei, int is_tmf,
+ struct mvs_tmf_task *tmf)
+{
+ struct sas_task *task = tei->task;
+ struct mvs_cmd_hdr *hdr = tei->hdr;
+ struct mvs_port *port = tei->port;
+ struct domain_device *dev = task->dev;
+ struct mvs_device *mvi_dev = dev->lldd_dev;
+ struct asd_sas_port *sas_port = dev->port;
+ struct mvs_slot_info *slot;
+ void *buf_prd;
+ struct ssp_frame_hdr *ssp_hdr;
+ void *buf_tmp;
+ u8 *buf_cmd, *buf_oaf, fburst = 0;
+ dma_addr_t buf_tmp_dma;
+ u32 flags;
+ u32 resp_len, req_len, i, tag = tei->tag;
+ const u32 max_resp_len = SB_RFB_MAX;
+ u32 phy_mask;
+
+ slot = &mvi->slot_info[tag];
+
+ phy_mask = ((port->wide_port_phymap) ? port->wide_port_phymap :
+ sas_port->phy_mask) & TXQ_PHY_MASK;
+
+ slot->tx = mvi->tx_prod;
+ mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
+ (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
+ (phy_mask << TXQ_PHY_SHIFT));
+
+ flags = MCH_RETRY;
+ if (task->ssp_task.enable_first_burst) {
+ flags |= MCH_FBURST;
+ fburst = (1 << 7);
+ }
+ if (is_tmf)
+ flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT);
+ else
+ flags |= (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT);
+ hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT));
+ hdr->tags = cpu_to_le32(tag);
+ hdr->data_len = cpu_to_le32(task->total_xfer_len);
+
+ /*
+ * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
+ */
+
+ /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
+ buf_cmd = buf_tmp = slot->buf;
+ buf_tmp_dma = slot->buf_dma;
+
+ hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
+
+ buf_tmp += MVS_SSP_CMD_SZ;
+ buf_tmp_dma += MVS_SSP_CMD_SZ;
+#if _MV_DUMP
+ slot->cmd_size = MVS_SSP_CMD_SZ;
+#endif
+
+ /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
+ buf_oaf = buf_tmp;
+ hdr->open_frame = cpu_to_le64(buf_tmp_dma);
+
+ buf_tmp += MVS_OAF_SZ;
+ buf_tmp_dma += MVS_OAF_SZ;
+
+ /* region 3: PRD table ********************************************* */
+ buf_prd = buf_tmp;
+ if (tei->n_elem)
+ hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
+ else
+ hdr->prd_tbl = 0;
+
+ i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
+ buf_tmp += i;
+ buf_tmp_dma += i;
+
+ /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
+ slot->response = buf_tmp;
+ hdr->status_buf = cpu_to_le64(buf_tmp_dma);
+ if (mvi->flags & MVF_FLAG_SOC)
+ hdr->reserved[0] = 0;
+
+ resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
+ sizeof(struct mvs_err_info) - i;
+ resp_len = min(resp_len, max_resp_len);
+
+ req_len = sizeof(struct ssp_frame_hdr) + 28;
+
+ /* request, response lengths */
+ hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
+
+ /* generate open address frame hdr (first 12 bytes) */
+ /* initiator, SSP, ftype 1h */
+ buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1;
+ buf_oaf[1] = dev->linkrate & 0xf;
+ *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
+ memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
+
+ /* fill in SSP frame header (Command Table.SSP frame header) */
+ ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
+
+ if (is_tmf)
+ ssp_hdr->frame_type = SSP_TASK;
+ else
+ ssp_hdr->frame_type = SSP_COMMAND;
+
+ memcpy(ssp_hdr->hashed_dest_addr, dev->hashed_sas_addr,
+ HASHED_SAS_ADDR_SIZE);
+ memcpy(ssp_hdr->hashed_src_addr,
+ dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
+ ssp_hdr->tag = cpu_to_be16(tag);
+
+ /* fill in IU for TASK and Command Frame */
+ buf_cmd += sizeof(*ssp_hdr);
+ memcpy(buf_cmd, &task->ssp_task.LUN, 8);
+
+ if (ssp_hdr->frame_type != SSP_TASK) {
+ buf_cmd[9] = fburst | task->ssp_task.task_attr |
+ (task->ssp_task.task_prio << 3);
+ memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
+ } else{
+ buf_cmd[10] = tmf->tmf;
+ switch (tmf->tmf) {
+ case TMF_ABORT_TASK:
+ case TMF_QUERY_TASK:
+ buf_cmd[12] =
+ (tmf->tag_of_task_to_be_managed >> 8) & 0xff;
+ buf_cmd[13] =
+ tmf->tag_of_task_to_be_managed & 0xff;
+ break;
+ default:
+ break;
+ }
+ }
+ /* fill in PRD (scatter/gather) table, if any */
+ MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
+ return 0;
+}
+
+#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE)))
+static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
+ struct completion *completion,int is_tmf,
+ struct mvs_tmf_task *tmf)
+{
+ struct domain_device *dev = task->dev;
+ struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
+ struct mvs_info *mvi = mvi_dev->mvi_info;
+ struct mvs_task_exec_info tei;
+ struct sas_task *t = task;
+ struct mvs_slot_info *slot;
+ u32 tag = 0xdeadbeef, rc, n_elem = 0;
+ u32 n = num, pass = 0;
+ unsigned long flags = 0;
+
+ if (!dev->port) {
+ struct task_status_struct *tsm = &t->task_status;
+
+ tsm->resp = SAS_TASK_UNDELIVERED;
+ tsm->stat = SAS_PHY_DOWN;
+ t->task_done(t);
+ return 0;
+ }
+
+ spin_lock_irqsave(&mvi->lock, flags);
+ do {
+ dev = t->dev;
+ mvi_dev = dev->lldd_dev;
+ if (DEV_IS_GONE(mvi_dev)) {
+ if (mvi_dev)
+ mv_dprintk("device %d not ready.\n",
+ mvi_dev->device_id);
+ else
+ mv_dprintk("device %016llx not ready.\n",
+ SAS_ADDR(dev->sas_addr));
+
+ rc = SAS_PHY_DOWN;
+ goto out_done;
+ }
+
+ if (dev->port->id >= mvi->chip->n_phy)
+ tei.port = &mvi->port[dev->port->id - mvi->chip->n_phy];
+ else
+ tei.port = &mvi->port[dev->port->id];
+
+ if (!tei.port->port_attached) {
+ if (sas_protocol_ata(t->task_proto)) {
+ mv_dprintk("port %d does not"
+ "attached device.\n", dev->port->id);
+ rc = SAS_PHY_DOWN;
+ goto out_done;
+ } else {
+ struct task_status_struct *ts = &t->task_status;
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_PHY_DOWN;
+ t->task_done(t);
+ if (n > 1)
+ t = list_entry(t->list.next,
+ struct sas_task, list);
+ continue;
+ }
+ }
+
+ if (!sas_protocol_ata(t->task_proto)) {
+ if (t->num_scatter) {
+ n_elem = dma_map_sg(mvi->dev,
+ t->scatter,
+ t->num_scatter,
+ t->data_dir);
+ if (!n_elem) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ }
+ } else {
+ n_elem = t->num_scatter;
+ }
+
+ rc = mvs_tag_alloc(mvi, &tag);
+ if (rc)
+ goto err_out;
+
+ slot = &mvi->slot_info[tag];
+
+
+ t->lldd_task = NULL;
+ slot->n_elem = n_elem;
+ slot->slot_tag = tag;
+ memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
+
+ tei.task = t;
+ tei.hdr = &mvi->slot[tag];
+ tei.tag = tag;
+ tei.n_elem = n_elem;
+ switch (t->task_proto) {
+ case SAS_PROTOCOL_SMP:
+ rc = mvs_task_prep_smp(mvi, &tei);
+ break;
+ case SAS_PROTOCOL_SSP:
+ rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf);
+ break;
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
+ case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+ rc = mvs_task_prep_ata(mvi, &tei);
+ break;
+ default:
+ dev_printk(KERN_ERR, mvi->dev,
+ "unknown sas_task proto: 0x%x\n",
+ t->task_proto);
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc) {
+ mv_dprintk("rc is %x\n", rc);
+ goto err_out_tag;
+ }
+ slot->task = t;
+ slot->port = tei.port;
+ t->lldd_task = slot;
+ list_add_tail(&slot->entry, &tei.port->list);
+ /* TODO: select normal or high priority */
+ spin_lock(&t->task_state_lock);
+ t->task_state_flags |= SAS_TASK_AT_INITIATOR;
+ spin_unlock(&t->task_state_lock);
+
+ mvs_hba_memory_dump(mvi, tag, t->task_proto);
+ mvi_dev->runing_req++;
+ ++pass;
+ mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
+ if (n > 1)
+ t = list_entry(t->list.next, struct sas_task, list);
+ } while (--n);
+ rc = 0;
+ goto out_done;
+
+err_out_tag:
+ mvs_tag_free(mvi, tag);
+err_out:
+
+ dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc);
+ if (!sas_protocol_ata(t->task_proto))
+ if (n_elem)
+ dma_unmap_sg(mvi->dev, t->scatter, n_elem,
+ t->data_dir);
+out_done:
+ if (likely(pass)) {
+ MVS_CHIP_DISP->start_delivery(mvi,
+ (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
+ }
+ spin_unlock_irqrestore(&mvi->lock, flags);
+ return rc;
+}
+
+int mvs_queue_command(struct sas_task *task, const int num,
+ gfp_t gfp_flags)
+{
+ return mvs_task_exec(task, num, gfp_flags, NULL, 0, NULL);
+}
+
+static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
+{
+ u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
+ mvs_tag_clear(mvi, slot_idx);
+}
+
+static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
+ struct mvs_slot_info *slot, u32 slot_idx)
+{
+ if (!slot->task)
+ return;
+ if (!sas_protocol_ata(task->task_proto))
+ if (slot->n_elem)
+ dma_unmap_sg(mvi->dev, task->scatter,
+ slot->n_elem, task->data_dir);
+
+ switch (task->task_proto) {
+ case SAS_PROTOCOL_SMP:
+ dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1,
+ PCI_DMA_FROMDEVICE);
+ dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1,
+ PCI_DMA_TODEVICE);
+ break;
+
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
+ case SAS_PROTOCOL_SSP:
+ default:
+ /* do nothing */
+ break;
+ }
+ list_del_init(&slot->entry);
+ task->lldd_task = NULL;
+ slot->task = NULL;
+ slot->port = NULL;
+ slot->slot_tag = 0xFFFFFFFF;
+ mvs_slot_free(mvi, slot_idx);
+}
+
+static void mvs_update_wideport(struct mvs_info *mvi, int i)
+{
+ struct mvs_phy *phy = &mvi->phy[i];
+ struct mvs_port *port = phy->port;
+ int j, no;
+
+ for_each_phy(port->wide_port_phymap, j, no) {
+ if (j & 1) {
+ MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
+ PHYR_WIDE_PORT);
+ MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
+ port->wide_port_phymap);
+ } else {
+ MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
+ PHYR_WIDE_PORT);
+ MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
+ 0);
+ }
+ }
+}
+
+static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
+{
+ u32 tmp;
+ struct mvs_phy *phy = &mvi->phy[i];
+ struct mvs_port *port = phy->port;
+
+ tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, i);
+ if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
+ if (!port)
+ phy->phy_attached = 1;
+ return tmp;
+ }
+
+ if (port) {
+ if (phy->phy_type & PORT_TYPE_SAS) {
+ port->wide_port_phymap &= ~(1U << i);
+ if (!port->wide_port_phymap)
+ port->port_attached = 0;
+ mvs_update_wideport(mvi, i);
+ } else if (phy->phy_type & PORT_TYPE_SATA)
+ port->port_attached = 0;
+ phy->port = NULL;
+ phy->phy_attached = 0;
+ phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
+ }
+ return 0;
+}
+
+static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
+{
+ u32 *s = (u32 *) buf;
+
+ if (!s)
+ return NULL;
+
+ MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
+ s[3] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
+
+ MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
+ s[2] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
+
+ MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
+ s[1] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
+
+ MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
+ s[0] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
+
+ /* Workaround: take some ATAPI devices for ATA */
+ if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01))
+ s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10);
+
+ return s;
+}
+
+static u32 mvs_is_sig_fis_received(u32 irq_status)
+{
+ return irq_status & PHYEV_SIG_FIS;
+}
+
+void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
+{
+ struct mvs_phy *phy = &mvi->phy[i];
+ struct sas_identify_frame *id;
+
+ id = (struct sas_identify_frame *)phy->frame_rcvd;
+
+ if (get_st) {
+ phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, i);
+ phy->phy_status = mvs_is_phy_ready(mvi, i);
+ }
+
+ if (phy->phy_status) {
+ int oob_done = 0;
+ struct asd_sas_phy *sas_phy = &mvi->phy[i].sas_phy;
+
+ oob_done = MVS_CHIP_DISP->oob_done(mvi, i);
+
+ MVS_CHIP_DISP->fix_phy_info(mvi, i, id);
+ if (phy->phy_type & PORT_TYPE_SATA) {
+ phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
+ if (mvs_is_sig_fis_received(phy->irq_status)) {
+ phy->phy_attached = 1;
+ phy->att_dev_sas_addr =
+ i + mvi->id * mvi->chip->n_phy;
+ if (oob_done)
+ sas_phy->oob_mode = SATA_OOB_MODE;
+ phy->frame_rcvd_size =
+ sizeof(struct dev_to_host_fis);
+ mvs_get_d2h_reg(mvi, i, id);
+ } else {
+ u32 tmp;
+ dev_printk(KERN_DEBUG, mvi->dev,
+ "Phy%d : No sig fis\n", i);
+ tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, i);
+ MVS_CHIP_DISP->write_port_irq_mask(mvi, i,
+ tmp | PHYEV_SIG_FIS);
+ phy->phy_attached = 0;
+ phy->phy_type &= ~PORT_TYPE_SATA;
+ MVS_CHIP_DISP->phy_reset(mvi, i, 0);
+ goto out_done;
+ }
+ } else if (phy->phy_type & PORT_TYPE_SAS
+ || phy->att_dev_info & PORT_SSP_INIT_MASK) {
+ phy->phy_attached = 1;
+ phy->identify.device_type =
+ phy->att_dev_info & PORT_DEV_TYPE_MASK;
+
+ if (phy->identify.device_type == SAS_END_DEV)
+ phy->identify.target_port_protocols =
+ SAS_PROTOCOL_SSP;
+ else if (phy->identify.device_type != NO_DEVICE)
+ phy->identify.target_port_protocols =
+ SAS_PROTOCOL_SMP;
+ if (oob_done)
+ sas_phy->oob_mode = SAS_OOB_MODE;
+ phy->frame_rcvd_size =
+ sizeof(struct sas_identify_frame);
+ }
+ memcpy(sas_phy->attached_sas_addr,
+ &phy->att_dev_sas_addr, SAS_ADDR_SIZE);
+
+ if (MVS_CHIP_DISP->phy_work_around)
+ MVS_CHIP_DISP->phy_work_around(mvi, i);
+ }
+ mv_dprintk("port %d attach dev info is %x\n",
+ i + mvi->id * mvi->chip->n_phy, phy->att_dev_info);
+ mv_dprintk("port %d attach sas addr is %llx\n",
+ i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr);
+out_done:
+ if (get_st)
+ MVS_CHIP_DISP->write_port_irq_stat(mvi, i, phy->irq_status);
+}
+
+static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock)
+{
+ struct sas_ha_struct *sas_ha = sas_phy->ha;
+ struct mvs_info *mvi = NULL; int i = 0, hi;
+ struct mvs_phy *phy = sas_phy->lldd_phy;
+ struct asd_sas_port *sas_port = sas_phy->port;
+ struct mvs_port *port;
+ unsigned long flags = 0;
+ if (!sas_port)
+ return;
+
+ while (sas_ha->sas_phy[i]) {
+ if (sas_ha->sas_phy[i] == sas_phy)
+ break;
+ i++;
+ }
+ hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy;
+ mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi];
+ if (sas_port->id >= mvi->chip->n_phy)
+ port = &mvi->port[sas_port->id - mvi->chip->n_phy];
+ else
+ port = &mvi->port[sas_port->id];
+ if (lock)
+ spin_lock_irqsave(&mvi->lock, flags);
+ port->port_attached = 1;
+ phy->port = port;
+ if (phy->phy_type & PORT_TYPE_SAS) {
+ port->wide_port_phymap = sas_port->phy_mask;
+ mv_printk("set wide port phy map %x\n", sas_port->phy_mask);
+ mvs_update_wideport(mvi, sas_phy->id);
+ }
+ if (lock)
+ spin_unlock_irqrestore(&mvi->lock, flags);
+}
+
+static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock)
+{
+ /*Nothing*/
+}
+
+
+void mvs_port_formed(struct asd_sas_phy *sas_phy)
+{
+ mvs_port_notify_formed(sas_phy, 1);
+}
+
+void mvs_port_deformed(struct asd_sas_phy *sas_phy)
+{
+ mvs_port_notify_deformed(sas_phy, 1);
+}
+
+struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi)
+{
+ u32 dev;
+ for (dev = 0; dev < MVS_MAX_DEVICES; dev++) {
+ if (mvi->devices[dev].dev_type == NO_DEVICE) {
+ mvi->devices[dev].device_id = dev;
+ return &mvi->devices[dev];
+ }
+ }
+
+ if (dev == MVS_MAX_DEVICES)
+ mv_printk("max support %d devices, ignore ..\n",
+ MVS_MAX_DEVICES);
+
+ return NULL;
+}
+
+void mvs_free_dev(struct mvs_device *mvi_dev)
+{
+ u32 id = mvi_dev->device_id;
+ memset(mvi_dev, 0, sizeof(*mvi_dev));
+ mvi_dev->device_id = id;
+ mvi_dev->dev_type = NO_DEVICE;
+ mvi_dev->dev_status = MVS_DEV_NORMAL;
+ mvi_dev->taskfileset = MVS_ID_NOT_MAPPED;
+}
+
+int mvs_dev_found_notify(struct domain_device *dev, int lock)
+{
+ unsigned long flags = 0;
+ int res = 0;
+ struct mvs_info *mvi = NULL;
+ struct domain_device *parent_dev = dev->parent;
+ struct mvs_device *mvi_device;
+
+ mvi = mvs_find_dev_mvi(dev);
+
+ if (lock)
+ spin_lock_irqsave(&mvi->lock, flags);
+
+ mvi_device = mvs_alloc_dev(mvi);
+ if (!mvi_device) {
+ res = -1;
+ goto found_out;
+ }
+ dev->lldd_dev = mvi_device;
+ mvi_device->dev_type = dev->dev_type;
+ mvi_device->mvi_info = mvi;
+ if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
+ int phy_id;
+ u8 phy_num = parent_dev->ex_dev.num_phys;
+ struct ex_phy *phy;
+ for (phy_id = 0; phy_id < phy_num; phy_id++) {
+ phy = &parent_dev->ex_dev.ex_phy[phy_id];
+ if (SAS_ADDR(phy->attached_sas_addr) ==
+ SAS_ADDR(dev->sas_addr)) {
+ mvi_device->attached_phy = phy_id;
+ break;
+ }
+ }
+
+ if (phy_id == phy_num) {
+ mv_printk("Error: no attached dev:%016llx"
+ "at ex:%016llx.\n",
+ SAS_ADDR(dev->sas_addr),
+ SAS_ADDR(parent_dev->sas_addr));
+ res = -1;
+ }
+ }
+
+found_out:
+ if (lock)
+ spin_unlock_irqrestore(&mvi->lock, flags);
+ return res;
+}
+
+int mvs_dev_found(struct domain_device *dev)
+{
+ return mvs_dev_found_notify(dev, 1);
+}
+
+void mvs_dev_gone_notify(struct domain_device *dev, int lock)
+{
+ unsigned long flags = 0;
+ struct mvs_device *mvi_dev = dev->lldd_dev;
+ struct mvs_info *mvi = mvi_dev->mvi_info;
+
+ if (lock)
+ spin_lock_irqsave(&mvi->lock, flags);
+
+ if (mvi_dev) {
+ mv_dprintk("found dev[%d:%x] is gone.\n",
+ mvi_dev->device_id, mvi_dev->dev_type);
+ mvs_free_reg_set(mvi, mvi_dev);
+ mvs_free_dev(mvi_dev);
+ } else {
+ mv_dprintk("found dev has gone.\n");
+ }
+ dev->lldd_dev = NULL;
+
+ if (lock)
+ spin_unlock_irqrestore(&mvi->lock, flags);
+}
+
+
+void mvs_dev_gone(struct domain_device *dev)
+{
+ mvs_dev_gone_notify(dev, 1);
+}
+
+static struct sas_task *mvs_alloc_task(void)
+{
+ struct sas_task *task = kzalloc(sizeof(struct sas_task), GFP_KERNEL);
+
+ if (task) {
+ INIT_LIST_HEAD(&task->list);
+ spin_lock_init(&task->task_state_lock);
+ task->task_state_flags = SAS_TASK_STATE_PENDING;
+ init_timer(&task->timer);
+ init_completion(&task->completion);
+ }
+ return task;
+}
+
+static void mvs_free_task(struct sas_task *task)
+{
+ if (task) {
+ BUG_ON(!list_empty(&task->list));
+ kfree(task);
+ }
+}
+
+static void mvs_task_done(struct sas_task *task)
+{
+ if (!del_timer(&task->timer))
+ return;
+ complete(&task->completion);
+}
+
+static void mvs_tmf_timedout(unsigned long data)
+{
+ struct sas_task *task = (struct sas_task *)data;
+
+ task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ complete(&task->completion);
+}
+
+/* XXX */
+#define MVS_TASK_TIMEOUT 20
+static int mvs_exec_internal_tmf_task(struct domain_device *dev,
+ void *parameter, u32 para_len, struct mvs_tmf_task *tmf)
+{
+ int res, retry;
+ struct sas_task *task = NULL;
+
+ for (retry = 0; retry < 3; retry++) {
+ task = mvs_alloc_task();
+ if (!task)
+ return -ENOMEM;
+
+ task->dev = dev;
+ task->task_proto = dev->tproto;
+
+ memcpy(&task->ssp_task, parameter, para_len);
+ task->task_done = mvs_task_done;
+
+ task->timer.data = (unsigned long) task;
+ task->timer.function = mvs_tmf_timedout;
+ task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
+ add_timer(&task->timer);
+
+ res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 1, tmf);
+
+ if (res) {
+ del_timer(&task->timer);
+ mv_printk("executing internel task failed:%d\n", res);
+ goto ex_err;
+ }
+
+ wait_for_completion(&task->completion);
+ res = -TMF_RESP_FUNC_FAILED;
+ /* Even TMF timed out, return direct. */
+ if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
+ mv_printk("TMF task[%x] timeout.\n", tmf->tmf);
+ goto ex_err;
+ }
+ }
+
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAM_GOOD) {
+ res = TMF_RESP_FUNC_COMPLETE;
+ break;
+ }
+
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAS_DATA_UNDERRUN) {
+ /* no error, but return the number of bytes of
+ * underrun */
+ res = task->task_status.residual;
+ break;
+ }
+
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAS_DATA_OVERRUN) {
+ mv_dprintk("blocked task error.\n");
+ res = -EMSGSIZE;
+ break;
+ } else {
+ mv_dprintk(" task to dev %016llx response: 0x%x "
+ "status 0x%x\n",
+ SAS_ADDR(dev->sas_addr),
+ task->task_status.resp,
+ task->task_status.stat);
+ mvs_free_task(task);
+ task = NULL;
+
+ }
+ }
+ex_err:
+ BUG_ON(retry == 3 && task != NULL);
+ if (task != NULL)
+ mvs_free_task(task);
+ return res;
+}
+
+static int mvs_debug_issue_ssp_tmf(struct domain_device *dev,
+ u8 *lun, struct mvs_tmf_task *tmf)
+{
+ struct sas_ssp_task ssp_task;
+ DECLARE_COMPLETION_ONSTACK(completion);
+ if (!(dev->tproto & SAS_PROTOCOL_SSP))
+ return TMF_RESP_FUNC_ESUPP;
+
+ strncpy((u8 *)&ssp_task.LUN, lun, 8);
+
+ return mvs_exec_internal_tmf_task(dev, &ssp_task,
+ sizeof(ssp_task), tmf);
+}
+
+
+/* Standard mandates link reset for ATA (type 0)
+ and hard reset for SSP (type 1) , only for RECOVERY */
+static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
+{
+ int rc;
+ struct sas_phy *phy = sas_find_local_phy(dev);
+ int reset_type = (dev->dev_type == SATA_DEV ||
+ (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
+ rc = sas_phy_reset(phy, reset_type);
+ msleep(2000);
+ return rc;
+}
+
+/* mandatory SAM-3 */
+int mvs_lu_reset(struct domain_device *dev, u8 *lun)
+{
+ unsigned long flags;
+ int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED;
+ struct mvs_tmf_task tmf_task;
+ struct mvs_device * mvi_dev = dev->lldd_dev;
+ struct mvs_info *mvi = mvi_dev->mvi_info;
+
+ tmf_task.tmf = TMF_LU_RESET;
+ mvi_dev->dev_status = MVS_DEV_EH;
+ rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
+ if (rc == TMF_RESP_FUNC_COMPLETE) {
+ num = mvs_find_dev_phyno(dev, phyno);
+ spin_lock_irqsave(&mvi->lock, flags);
+ for (i = 0; i < num; i++)
+ mvs_release_task(mvi, phyno[i], dev);
+ spin_unlock_irqrestore(&mvi->lock, flags);
+ }
+ /* If failed, fall-through I_T_Nexus reset */
+ mv_printk("%s for device[%x]:rc= %d\n", __func__,
+ mvi_dev->device_id, rc);
+ return rc;
+}
+
+int mvs_I_T_nexus_reset(struct domain_device *dev)
+{
+ unsigned long flags;
+ int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED;
+ struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev;
+ struct mvs_info *mvi = mvi_dev->mvi_info;
+
+ if (mvi_dev->dev_status != MVS_DEV_EH)
+ return TMF_RESP_FUNC_COMPLETE;
+ rc = mvs_debug_I_T_nexus_reset(dev);
+ mv_printk("%s for device[%x]:rc= %d\n",
+ __func__, mvi_dev->device_id, rc);
+
+ /* housekeeper */
+ num = mvs_find_dev_phyno(dev, phyno);
+ spin_lock_irqsave(&mvi->lock, flags);
+ for (i = 0; i < num; i++)
+ mvs_release_task(mvi, phyno[i], dev);
+ spin_unlock_irqrestore(&mvi->lock, flags);
+
+ return rc;
+}
+/* optional SAM-3 */
+int mvs_query_task(struct sas_task *task)
+{
+ u32 tag;
+ struct scsi_lun lun;
+ struct mvs_tmf_task tmf_task;
+ int rc = TMF_RESP_FUNC_FAILED;
+
+ if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
+ struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
+ struct domain_device *dev = task->dev;
+ struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
+ struct mvs_info *mvi = mvi_dev->mvi_info;
+
+ int_to_scsilun(cmnd->device->lun, &lun);
+ rc = mvs_find_tag(mvi, task, &tag);
+ if (rc == 0) {
+ rc = TMF_RESP_FUNC_FAILED;
+ return rc;
+ }
+
+ tmf_task.tmf = TMF_QUERY_TASK;
+ tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
+
+ rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
+ switch (rc) {
+ /* The task is still in Lun, release it then */
+ case TMF_RESP_FUNC_SUCC:
+ /* The task is not in Lun or failed, reset the phy */
+ case TMF_RESP_FUNC_FAILED:
+ case TMF_RESP_FUNC_COMPLETE:
+ break;
+ }
+ }
+ mv_printk("%s:rc= %d\n", __func__, rc);
+ return rc;
+}
+
+/* mandatory SAM-3, still need free task/slot info */
+int mvs_abort_task(struct sas_task *task)
+{
+ struct scsi_lun lun;
+ struct mvs_tmf_task tmf_task;
+ struct domain_device *dev = task->dev;
+ struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
+ struct mvs_info *mvi = mvi_dev->mvi_info;
+ int rc = TMF_RESP_FUNC_FAILED;
+ unsigned long flags;
+ u32 tag;
+
+ if (mvi->exp_req)
+ mvi->exp_req--;
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ if (task->task_state_flags & SAS_TASK_STATE_DONE) {
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+ rc = TMF_RESP_FUNC_COMPLETE;
+ goto out;
+ }
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+ if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
+ struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
+
+ int_to_scsilun(cmnd->device->lun, &lun);
+ rc = mvs_find_tag(mvi, task, &tag);
+ if (rc == 0) {
+ mv_printk("No such tag in %s\n", __func__);
+ rc = TMF_RESP_FUNC_FAILED;
+ return rc;
+ }
+
+ tmf_task.tmf = TMF_ABORT_TASK;
+ tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
+
+ rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
+
+ /* if successful, clear the task and callback forwards.*/
+ if (rc == TMF_RESP_FUNC_COMPLETE) {
+ u32 slot_no;
+ struct mvs_slot_info *slot;
+
+ if (task->lldd_task) {
+ slot = task->lldd_task;
+ slot_no = (u32) (slot - mvi->slot_info);
+ mvs_slot_complete(mvi, slot_no, 1);
+ }
+ }
+ } else if (task->task_proto & SAS_PROTOCOL_SATA ||
+ task->task_proto & SAS_PROTOCOL_STP) {
+ /* to do free register_set */
+ } else {
+ /* SMP */
+
+ }
+out:
+ if (rc != TMF_RESP_FUNC_COMPLETE)
+ mv_printk("%s:rc= %d\n", __func__, rc);
+ return rc;
+}
+
+int mvs_abort_task_set(struct domain_device *dev, u8 *lun)
+{
+ int rc = TMF_RESP_FUNC_FAILED;
+ struct mvs_tmf_task tmf_task;
+
+ tmf_task.tmf = TMF_ABORT_TASK_SET;
+ rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
+
+ return rc;
+}
+
+int mvs_clear_aca(struct domain_device *dev, u8 *lun)
+{
+ int rc = TMF_RESP_FUNC_FAILED;
+ struct mvs_tmf_task tmf_task;
+
+ tmf_task.tmf = TMF_CLEAR_ACA;
+ rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
+
+ return rc;
+}
+
+int mvs_clear_task_set(struct domain_device *dev, u8 *lun)
+{
+ int rc = TMF_RESP_FUNC_FAILED;
+ struct mvs_tmf_task tmf_task;
+
+ tmf_task.tmf = TMF_CLEAR_TASK_SET;
+ rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
+
+ return rc;
+}
+
+static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
+ u32 slot_idx, int err)
+{
+ struct mvs_device *mvi_dev = task->dev->lldd_dev;
+ struct task_status_struct *tstat = &task->task_status;
+ struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
+ int stat = SAM_GOOD;
+
+
+ resp->frame_len = sizeof(struct dev_to_host_fis);
+ memcpy(&resp->ending_fis[0],
+ SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset),
+ sizeof(struct dev_to_host_fis));
+ tstat->buf_valid_size = sizeof(*resp);
+ if (unlikely(err))
+ stat = SAS_PROTO_RESPONSE;
+ return stat;
+}
+
+static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
+ u32 slot_idx)
+{
+ struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
+ int stat;
+ u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response));
+ u32 tfs = 0;
+ enum mvs_port_type type = PORT_TYPE_SAS;
+
+ if (err_dw0 & CMD_ISS_STPD)
+ MVS_CHIP_DISP->issue_stop(mvi, type, tfs);
+
+ MVS_CHIP_DISP->command_active(mvi, slot_idx);
+
+ stat = SAM_CHECK_COND;
+ switch (task->task_proto) {
+ case SAS_PROTOCOL_SSP:
+ stat = SAS_ABORTED_TASK;
+ break;
+ case SAS_PROTOCOL_SMP:
+ stat = SAM_CHECK_COND;
+ break;
+
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
+ case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+ {
+ if (err_dw0 == 0x80400002)
+ mv_printk("find reserved error, why?\n");
+
+ task->ata_task.use_ncq = 0;
+ stat = SAS_PROTO_RESPONSE;
+ mvs_sata_done(mvi, task, slot_idx, 1);
+
+ }
+ break;
+ default:
+ break;
+ }
+
+ return stat;
+}
+
+int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
+{
+ u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
+ struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
+ struct sas_task *task = slot->task;
+ struct mvs_device *mvi_dev = NULL;
+ struct task_status_struct *tstat;
+
+ bool aborted;
+ void *to;
+ enum exec_status sts;
+
+ if (mvi->exp_req)
+ mvi->exp_req--;
+ if (unlikely(!task || !task->lldd_task))
+ return -1;
+
+ tstat = &task->task_status;
+ mvi_dev = task->dev->lldd_dev;
+
+ mvs_hba_cq_dump(mvi);
+
+ spin_lock(&task->task_state_lock);
+ task->task_state_flags &=
+ ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
+ task->task_state_flags |= SAS_TASK_STATE_DONE;
+ /* race condition*/
+ aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
+ spin_unlock(&task->task_state_lock);
+
+ memset(tstat, 0, sizeof(*tstat));
+ tstat->resp = SAS_TASK_COMPLETE;
+
+ if (unlikely(aborted)) {
+ tstat->stat = SAS_ABORTED_TASK;
+ if (mvi_dev)
+ mvi_dev->runing_req--;
+ if (sas_protocol_ata(task->task_proto))
+ mvs_free_reg_set(mvi, mvi_dev);
+
+ mvs_slot_task_free(mvi, task, slot, slot_idx);
+ return -1;
+ }
+
+ if (unlikely(!mvi_dev || !slot->port->port_attached || flags)) {
+ mv_dprintk("port has not device.\n");
+ tstat->stat = SAS_PHY_DOWN;
+ goto out;
+ }
+
+ /*
+ if (unlikely((rx_desc & RXQ_ERR) || (*(u64 *) slot->response))) {
+ mv_dprintk("Find device[%016llx] RXQ_ERR %X,
+ err info:%016llx\n",
+ SAS_ADDR(task->dev->sas_addr),
+ rx_desc, (u64)(*(u64 *) slot->response));
+ }
+ */
+
+ /* error info record present */
+ if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
+ tstat->stat = mvs_slot_err(mvi, task, slot_idx);
+ goto out;
+ }
+
+ switch (task->task_proto) {
+ case SAS_PROTOCOL_SSP:
+ /* hw says status == 0, datapres == 0 */
+ if (rx_desc & RXQ_GOOD) {
+ tstat->stat = SAM_GOOD;
+ tstat->resp = SAS_TASK_COMPLETE;
+ }
+ /* response frame present */
+ else if (rx_desc & RXQ_RSP) {
+ struct ssp_response_iu *iu = slot->response +
+ sizeof(struct mvs_err_info);
+ sas_ssp_task_response(mvi->dev, task, iu);
+ } else
+ tstat->stat = SAM_CHECK_COND;
+ break;
+
+ case SAS_PROTOCOL_SMP: {
+ struct scatterlist *sg_resp = &task->smp_task.smp_resp;
+ tstat->stat = SAM_GOOD;
+ to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
+ memcpy(to + sg_resp->offset,
+ slot->response + sizeof(struct mvs_err_info),
+ sg_dma_len(sg_resp));
+ kunmap_atomic(to, KM_IRQ0);
+ break;
+ }
+
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
+ case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
+ tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0);
+ break;
+ }
+
+ default:
+ tstat->stat = SAM_CHECK_COND;
+ break;
+ }
+
+out:
+ if (mvi_dev) {
+ mvi_dev->runing_req--;
+ if (sas_protocol_ata(task->task_proto))
+ mvs_free_reg_set(mvi, mvi_dev);
+ }
+ mvs_slot_task_free(mvi, task, slot, slot_idx);
+ sts = tstat->stat;
+
+ spin_unlock(&mvi->lock);
+ if (task->task_done)
+ task->task_done(task);
+ else
+ mv_dprintk("why has not task_done.\n");
+ spin_lock(&mvi->lock);
+
+ return sts;
+}
+
+void mvs_release_task(struct mvs_info *mvi,
+ int phy_no, struct domain_device *dev)
+{
+ int i = 0; u32 slot_idx;
+ struct mvs_phy *phy;
+ struct mvs_port *port;
+ struct mvs_slot_info *slot, *slot2;
+
+ phy = &mvi->phy[phy_no];
+ port = phy->port;
+ if (!port)
+ return;
+
+ list_for_each_entry_safe(slot, slot2, &port->list, entry) {
+ struct sas_task *task;
+ slot_idx = (u32) (slot - mvi->slot_info);
+ task = slot->task;
+
+ if (dev && task->dev != dev)
+ continue;
+
+ mv_printk("Release slot [%x] tag[%x], task [%p]:\n",
+ slot_idx, slot->slot_tag, task);
+
+ if (task->task_proto & SAS_PROTOCOL_SSP) {
+ mv_printk("attached with SSP task CDB[");
+ for (i = 0; i < 16; i++)
+ mv_printk(" %02x", task->ssp_task.cdb[i]);
+ mv_printk(" ]\n");
+ }
+
+ mvs_slot_complete(mvi, slot_idx, 1);
+ }
+}
+
+static void mvs_phy_disconnected(struct mvs_phy *phy)
+{
+ phy->phy_attached = 0;
+ phy->att_dev_info = 0;
+ phy->att_dev_sas_addr = 0;
+}
+
+static void mvs_work_queue(struct work_struct *work)
+{
+ struct delayed_work *dw = container_of(work, struct delayed_work, work);
+ struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q);
+ struct mvs_info *mvi = mwq->mvi;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mvi->lock, flags);
+ if (mwq->handler & PHY_PLUG_EVENT) {
+ u32 phy_no = (unsigned long) mwq->data;
+ struct sas_ha_struct *sas_ha = mvi->sas;
+ struct mvs_phy *phy = &mvi->phy[phy_no];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+
+ if (phy->phy_event & PHY_PLUG_OUT) {
+ u32 tmp;
+ struct sas_identify_frame *id;
+ id = (struct sas_identify_frame *)phy->frame_rcvd;
+ tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no);
+ phy->phy_event &= ~PHY_PLUG_OUT;
+ if (!(tmp & PHY_READY_MASK)) {
+ sas_phy_disconnected(sas_phy);
+ mvs_phy_disconnected(phy);
+ sas_ha->notify_phy_event(sas_phy,
+ PHYE_LOSS_OF_SIGNAL);
+ mv_dprintk("phy%d Removed Device\n", phy_no);
+ } else {
+ MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
+ mvs_update_phyinfo(mvi, phy_no, 1);
+ mvs_bytes_dmaed(mvi, phy_no);
+ mvs_port_notify_formed(sas_phy, 0);
+ mv_dprintk("phy%d Attached Device\n", phy_no);
+ }
+ }
+ }
+ list_del(&mwq->entry);
+ spin_unlock_irqrestore(&mvi->lock, flags);
+ kfree(mwq);
+}
+
+static int mvs_handle_event(struct mvs_info *mvi, void *data, int handler)
+{
+ struct mvs_wq *mwq;
+ int ret = 0;
+
+ mwq = kmalloc(sizeof(struct mvs_wq), GFP_ATOMIC);
+ if (mwq) {
+ mwq->mvi = mvi;
+ mwq->data = data;
+ mwq->handler = handler;
+ MV_INIT_DELAYED_WORK(&mwq->work_q, mvs_work_queue, mwq);
+ list_add_tail(&mwq->entry, &mvi->wq_list);
+ schedule_delayed_work(&mwq->work_q, HZ * 2);
+ } else
+ ret = -ENOMEM;
+
+ return ret;
+}
+
+static void mvs_sig_time_out(unsigned long tphy)
+{
+ struct mvs_phy *phy = (struct mvs_phy *)tphy;
+ struct mvs_info *mvi = phy->mvi;
+ u8 phy_no;
+
+ for (phy_no = 0; phy_no < mvi->chip->n_phy; phy_no++) {
+ if (&mvi->phy[phy_no] == phy) {
+ mv_dprintk("Get signature time out, reset phy %d\n",
+ phy_no+mvi->id*mvi->chip->n_phy);
+ MVS_CHIP_DISP->phy_reset(mvi, phy_no, 1);
+ }
+ }
+}
+
+static void mvs_sig_remove_timer(struct mvs_phy *phy)
+{
+ if (phy->timer.function)
+ del_timer(&phy->timer);
+ phy->timer.function = NULL;
+}
+
+void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
+{
+ u32 tmp;
+ struct sas_ha_struct *sas_ha = mvi->sas;
+ struct mvs_phy *phy = &mvi->phy[phy_no];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+
+ phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no);
+ mv_dprintk("port %d ctrl sts=0x%X.\n", phy_no+mvi->id*mvi->chip->n_phy,
+ MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no));
+ mv_dprintk("Port %d irq sts = 0x%X\n", phy_no+mvi->id*mvi->chip->n_phy,
+ phy->irq_status);
+
+ /*
+ * events is port event now ,
+ * we need check the interrupt status which belongs to per port.
+ */
+
+ if (phy->irq_status & PHYEV_DCDR_ERR)
+ mv_dprintk("port %d STP decoding error.\n",
+ phy_no+mvi->id*mvi->chip->n_phy);
+
+ if (phy->irq_status & PHYEV_POOF) {
+ if (!(phy->phy_event & PHY_PLUG_OUT)) {
+ int dev_sata = phy->phy_type & PORT_TYPE_SATA;
+ int ready;
+ mvs_release_task(mvi, phy_no, NULL);
+ phy->phy_event |= PHY_PLUG_OUT;
+ mvs_handle_event(mvi,
+ (void *)(unsigned long)phy_no,
+ PHY_PLUG_EVENT);
+ ready = mvs_is_phy_ready(mvi, phy_no);
+ if (!ready)
+ mv_dprintk("phy%d Unplug Notice\n",
+ phy_no +
+ mvi->id * mvi->chip->n_phy);
+ if (ready || dev_sata) {
+ if (MVS_CHIP_DISP->stp_reset)
+ MVS_CHIP_DISP->stp_reset(mvi,
+ phy_no);
+ else
+ MVS_CHIP_DISP->phy_reset(mvi,
+ phy_no, 0);
+ return;
+ }
+ }
+ }
+
+ if (phy->irq_status & PHYEV_COMWAKE) {
+ tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, phy_no);
+ MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no,
+ tmp | PHYEV_SIG_FIS);
+ if (phy->timer.function == NULL) {
+ phy->timer.data = (unsigned long)phy;
+ phy->timer.function = mvs_sig_time_out;
+ phy->timer.expires = jiffies + 10*HZ;
+ add_timer(&phy->timer);
+ }
+ }
+ if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
+ phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
+ mvs_sig_remove_timer(phy);
+ mv_dprintk("notify plug in on phy[%d]\n", phy_no);
+ if (phy->phy_status) {
+ mdelay(10);
+ MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
+ if (phy->phy_type & PORT_TYPE_SATA) {
+ tmp = MVS_CHIP_DISP->read_port_irq_mask(
+ mvi, phy_no);
+ tmp &= ~PHYEV_SIG_FIS;
+ MVS_CHIP_DISP->write_port_irq_mask(mvi,
+ phy_no, tmp);
+ }
+ mvs_update_phyinfo(mvi, phy_no, 0);
+ mvs_bytes_dmaed(mvi, phy_no);
+ /* whether driver is going to handle hot plug */
+ if (phy->phy_event & PHY_PLUG_OUT) {
+ mvs_port_notify_formed(sas_phy, 0);
+ phy->phy_event &= ~PHY_PLUG_OUT;
+ }
+ } else {
+ mv_dprintk("plugin interrupt but phy%d is gone\n",
+ phy_no + mvi->id*mvi->chip->n_phy);
+ }
+ } else if (phy->irq_status & PHYEV_BROAD_CH) {
+ mv_dprintk("port %d broadcast change.\n",
+ phy_no + mvi->id*mvi->chip->n_phy);
+ /* exception for Samsung disk drive*/
+ mdelay(1000);
+ sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ }
+ MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status);
+}
+
+int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
+{
+ u32 rx_prod_idx, rx_desc;
+ bool attn = false;
+
+ /* the first dword in the RX ring is special: it contains
+ * a mirror of the hardware's RX producer index, so that
+ * we don't have to stall the CPU reading that register.
+ * The actual RX ring is offset by one dword, due to this.
+ */
+ rx_prod_idx = mvi->rx_cons;
+ mvi->rx_cons = le32_to_cpu(mvi->rx[0]);
+ if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */
+ return 0;
+
+ /* The CMPL_Q may come late, read from register and try again
+ * note: if coalescing is enabled,
+ * it will need to read from register every time for sure
+ */
+ if (unlikely(mvi->rx_cons == rx_prod_idx))
+ mvi->rx_cons = MVS_CHIP_DISP->rx_update(mvi) & RX_RING_SZ_MASK;
+
+ if (mvi->rx_cons == rx_prod_idx)
+ return 0;
+
+ while (mvi->rx_cons != rx_prod_idx) {
+ /* increment our internal RX consumer pointer */
+ rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1);
+ rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]);
+
+ if (likely(rx_desc & RXQ_DONE))
+ mvs_slot_complete(mvi, rx_desc, 0);
+ if (rx_desc & RXQ_ATTN) {
+ attn = true;
+ } else if (rx_desc & RXQ_ERR) {
+ if (!(rx_desc & RXQ_DONE))
+ mvs_slot_complete(mvi, rx_desc, 0);
+ } else if (rx_desc & RXQ_SLOT_RESET) {
+ mvs_slot_free(mvi, rx_desc);
+ }
+ }
+
+ if (attn && self_clear)
+ MVS_CHIP_DISP->int_full(mvi);
+ return 0;
+}
+
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
new file mode 100644
index 00000000000..aa2270af1ba
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -0,0 +1,406 @@
+/*
+ * Marvell 88SE64xx/88SE94xx main function head file
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#ifndef _MV_SAS_H_
+#define _MV_SAS_H_
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/vmalloc.h>
+#include <scsi/libsas.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/sas_ata.h>
+#include <linux/version.h>
+#include "mv_defs.h"
+
+#define DRV_NAME "mvsas"
+#define DRV_VERSION "0.8.2"
+#define _MV_DUMP 0
+#define MVS_ID_NOT_MAPPED 0x7f
+/* #define DISABLE_HOTPLUG_DMA_FIX */
+#define MAX_EXP_RUNNING_REQ 2
+#define WIDE_PORT_MAX_PHY 4
+#define MV_DISABLE_NCQ 0
+#define mv_printk(fmt, arg ...) \
+ printk(KERN_DEBUG"%s %d:" fmt, __FILE__, __LINE__, ## arg)
+#ifdef MV_DEBUG
+#define mv_dprintk(format, arg...) \
+ printk(KERN_DEBUG"%s %d:" format, __FILE__, __LINE__, ## arg)
+#else
+#define mv_dprintk(format, arg...)
+#endif
+#define MV_MAX_U32 0xffffffff
+
+extern struct mvs_tgt_initiator mvs_tgt;
+extern struct mvs_info *tgt_mvi;
+extern const struct mvs_dispatch mvs_64xx_dispatch;
+extern const struct mvs_dispatch mvs_94xx_dispatch;
+
+#define DEV_IS_EXPANDER(type) \
+ ((type == EDGE_DEV) || (type == FANOUT_DEV))
+
+#define bit(n) ((u32)1 << n)
+
+#define for_each_phy(__lseq_mask, __mc, __lseq) \
+ for ((__mc) = (__lseq_mask), (__lseq) = 0; \
+ (__mc) != 0 ; \
+ (++__lseq), (__mc) >>= 1)
+
+#define MV_INIT_DELAYED_WORK(w, f, d) INIT_DELAYED_WORK(w, f)
+#define UNASSOC_D2H_FIS(id) \
+ ((void *) mvi->rx_fis + 0x100 * id)
+#define SATA_RECEIVED_FIS_LIST(reg_set) \
+ ((void *) mvi->rx_fis + mvi->chip->fis_offs + 0x100 * reg_set)
+#define SATA_RECEIVED_SDB_FIS(reg_set) \
+ (SATA_RECEIVED_FIS_LIST(reg_set) + 0x58)
+#define SATA_RECEIVED_D2H_FIS(reg_set) \
+ (SATA_RECEIVED_FIS_LIST(reg_set) + 0x40)
+#define SATA_RECEIVED_PIO_FIS(reg_set) \
+ (SATA_RECEIVED_FIS_LIST(reg_set) + 0x20)
+#define SATA_RECEIVED_DMA_FIS(reg_set) \
+ (SATA_RECEIVED_FIS_LIST(reg_set) + 0x00)
+
+enum dev_status {
+ MVS_DEV_NORMAL = 0x0,
+ MVS_DEV_EH = 0x1,
+};
+
+
+struct mvs_info;
+
+struct mvs_dispatch {
+ char *name;
+ int (*chip_init)(struct mvs_info *mvi);
+ int (*spi_init)(struct mvs_info *mvi);
+ int (*chip_ioremap)(struct mvs_info *mvi);
+ void (*chip_iounmap)(struct mvs_info *mvi);
+ irqreturn_t (*isr)(struct mvs_info *mvi, int irq, u32 stat);
+ u32 (*isr_status)(struct mvs_info *mvi, int irq);
+ void (*interrupt_enable)(struct mvs_info *mvi);
+ void (*interrupt_disable)(struct mvs_info *mvi);
+
+ u32 (*read_phy_ctl)(struct mvs_info *mvi, u32 port);
+ void (*write_phy_ctl)(struct mvs_info *mvi, u32 port, u32 val);
+
+ u32 (*read_port_cfg_data)(struct mvs_info *mvi, u32 port);
+ void (*write_port_cfg_data)(struct mvs_info *mvi, u32 port, u32 val);
+ void (*write_port_cfg_addr)(struct mvs_info *mvi, u32 port, u32 addr);
+
+ u32 (*read_port_vsr_data)(struct mvs_info *mvi, u32 port);
+ void (*write_port_vsr_data)(struct mvs_info *mvi, u32 port, u32 val);
+ void (*write_port_vsr_addr)(struct mvs_info *mvi, u32 port, u32 addr);
+
+ u32 (*read_port_irq_stat)(struct mvs_info *mvi, u32 port);
+ void (*write_port_irq_stat)(struct mvs_info *mvi, u32 port, u32 val);
+
+ u32 (*read_port_irq_mask)(struct mvs_info *mvi, u32 port);
+ void (*write_port_irq_mask)(struct mvs_info *mvi, u32 port, u32 val);
+
+ void (*get_sas_addr)(void *buf, u32 buflen);
+ void (*command_active)(struct mvs_info *mvi, u32 slot_idx);
+ void (*issue_stop)(struct mvs_info *mvi, enum mvs_port_type type,
+ u32 tfs);
+ void (*start_delivery)(struct mvs_info *mvi, u32 tx);
+ u32 (*rx_update)(struct mvs_info *mvi);
+ void (*int_full)(struct mvs_info *mvi);
+ u8 (*assign_reg_set)(struct mvs_info *mvi, u8 *tfs);
+ void (*free_reg_set)(struct mvs_info *mvi, u8 *tfs);
+ u32 (*prd_size)(void);
+ u32 (*prd_count)(void);
+ void (*make_prd)(struct scatterlist *scatter, int nr, void *prd);
+ void (*detect_porttype)(struct mvs_info *mvi, int i);
+ int (*oob_done)(struct mvs_info *mvi, int i);
+ void (*fix_phy_info)(struct mvs_info *mvi, int i,
+ struct sas_identify_frame *id);
+ void (*phy_work_around)(struct mvs_info *mvi, int i);
+ void (*phy_set_link_rate)(struct mvs_info *mvi, u32 phy_id,
+ struct sas_phy_linkrates *rates);
+ u32 (*phy_max_link_rate)(void);
+ void (*phy_disable)(struct mvs_info *mvi, u32 phy_id);
+ void (*phy_enable)(struct mvs_info *mvi, u32 phy_id);
+ void (*phy_reset)(struct mvs_info *mvi, u32 phy_id, int hard);
+ void (*stp_reset)(struct mvs_info *mvi, u32 phy_id);
+ void (*clear_active_cmds)(struct mvs_info *mvi);
+ u32 (*spi_read_data)(struct mvs_info *mvi);
+ void (*spi_write_data)(struct mvs_info *mvi, u32 data);
+ int (*spi_buildcmd)(struct mvs_info *mvi,
+ u32 *dwCmd,
+ u8 cmd,
+ u8 read,
+ u8 length,
+ u32 addr
+ );
+ int (*spi_issuecmd)(struct mvs_info *mvi, u32 cmd);
+ int (*spi_waitdataready)(struct mvs_info *mvi, u32 timeout);
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+ void (*dma_fix)(dma_addr_t buf_dma, int buf_len, int from, void *prd);
+#endif
+
+};
+
+struct mvs_chip_info {
+ u32 n_host;
+ u32 n_phy;
+ u32 fis_offs;
+ u32 fis_count;
+ u32 srs_sz;
+ u32 slot_width;
+ const struct mvs_dispatch *dispatch;
+};
+#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width)
+#define MVS_RX_FISL_SZ \
+ (mvi->chip->fis_offs + (mvi->chip->fis_count * 0x100))
+#define MVS_CHIP_DISP (mvi->chip->dispatch)
+
+struct mvs_err_info {
+ __le32 flags;
+ __le32 flags2;
+};
+
+struct mvs_cmd_hdr {
+ __le32 flags; /* PRD tbl len; SAS, SATA ctl */
+ __le32 lens; /* cmd, max resp frame len */
+ __le32 tags; /* targ port xfer tag; tag */
+ __le32 data_len; /* data xfer len */
+ __le64 cmd_tbl; /* command table address */
+ __le64 open_frame; /* open addr frame address */
+ __le64 status_buf; /* status buffer address */
+ __le64 prd_tbl; /* PRD tbl address */
+ __le32 reserved[4];
+};
+
+struct mvs_port {
+ struct asd_sas_port sas_port;
+ u8 port_attached;
+ u8 wide_port_phymap;
+ struct list_head list;
+};
+
+struct mvs_phy {
+ struct mvs_info *mvi;
+ struct mvs_port *port;
+ struct asd_sas_phy sas_phy;
+ struct sas_identify identify;
+ struct scsi_device *sdev;
+ struct timer_list timer;
+ u64 dev_sas_addr;
+ u64 att_dev_sas_addr;
+ u32 att_dev_info;
+ u32 dev_info;
+ u32 phy_type;
+ u32 phy_status;
+ u32 irq_status;
+ u32 frame_rcvd_size;
+ u8 frame_rcvd[32];
+ u8 phy_attached;
+ u8 phy_mode;
+ u8 reserved[2];
+ u32 phy_event;
+ enum sas_linkrate minimum_linkrate;
+ enum sas_linkrate maximum_linkrate;
+};
+
+struct mvs_device {
+ struct list_head dev_entry;
+ enum sas_dev_type dev_type;
+ struct mvs_info *mvi_info;
+ struct domain_device *sas_device;
+ u32 attached_phy;
+ u32 device_id;
+ u32 runing_req;
+ u8 taskfileset;
+ u8 dev_status;
+ u16 reserved;
+};
+
+struct mvs_slot_info {
+ struct list_head entry;
+ union {
+ struct sas_task *task;
+ void *tdata;
+ };
+ u32 n_elem;
+ u32 tx;
+ u32 slot_tag;
+
+ /* DMA buffer for storing cmd tbl, open addr frame, status buffer,
+ * and PRD table
+ */
+ void *buf;
+ dma_addr_t buf_dma;
+#if _MV_DUMP
+ u32 cmd_size;
+#endif
+ void *response;
+ struct mvs_port *port;
+ struct mvs_device *device;
+ void *open_frame;
+};
+
+struct mvs_info {
+ unsigned long flags;
+
+ /* host-wide lock */
+ spinlock_t lock;
+
+ /* our device */
+ struct pci_dev *pdev;
+ struct device *dev;
+
+ /* enhanced mode registers */
+ void __iomem *regs;
+
+ /* peripheral or soc registers */
+ void __iomem *regs_ex;
+ u8 sas_addr[SAS_ADDR_SIZE];
+
+ /* SCSI/SAS glue */
+ struct sas_ha_struct *sas;
+ struct Scsi_Host *shost;
+
+ /* TX (delivery) DMA ring */
+ __le32 *tx;
+ dma_addr_t tx_dma;
+
+ /* cached next-producer idx */
+ u32 tx_prod;
+
+ /* RX (completion) DMA ring */
+ __le32 *rx;
+ dma_addr_t rx_dma;
+
+ /* RX consumer idx */
+ u32 rx_cons;
+
+ /* RX'd FIS area */
+ __le32 *rx_fis;
+ dma_addr_t rx_fis_dma;
+
+ /* DMA command header slots */
+ struct mvs_cmd_hdr *slot;
+ dma_addr_t slot_dma;
+
+ u32 chip_id;
+ const struct mvs_chip_info *chip;
+
+ int tags_num;
+ DECLARE_BITMAP(tags, MVS_SLOTS);
+ /* further per-slot information */
+ struct mvs_phy phy[MVS_MAX_PHYS];
+ struct mvs_port port[MVS_MAX_PHYS];
+ u32 irq;
+ u32 exp_req;
+ u32 id;
+ u64 sata_reg_set;
+ struct list_head *hba_list;
+ struct list_head soc_entry;
+ struct list_head wq_list;
+ unsigned long instance;
+ u16 flashid;
+ u32 flashsize;
+ u32 flashsectSize;
+
+ void *addon;
+ struct mvs_device devices[MVS_MAX_DEVICES];
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+ void *bulk_buffer;
+ dma_addr_t bulk_buffer_dma;
+#define TRASH_BUCKET_SIZE 0x20000
+#endif
+ struct mvs_slot_info slot_info[0];
+};
+
+struct mvs_prv_info{
+ u8 n_host;
+ u8 n_phy;
+ u16 reserve;
+ struct mvs_info *mvi[2];
+};
+
+struct mvs_wq {
+ struct delayed_work work_q;
+ struct mvs_info *mvi;
+ void *data;
+ int handler;
+ struct list_head entry;
+};
+
+struct mvs_task_exec_info {
+ struct sas_task *task;
+ struct mvs_cmd_hdr *hdr;
+ struct mvs_port *port;
+ u32 tag;
+ int n_elem;
+};
+
+
+/******************** function prototype *********************/
+void mvs_get_sas_addr(void *buf, u32 buflen);
+void mvs_tag_clear(struct mvs_info *mvi, u32 tag);
+void mvs_tag_free(struct mvs_info *mvi, u32 tag);
+void mvs_tag_set(struct mvs_info *mvi, unsigned int tag);
+int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out);
+void mvs_tag_init(struct mvs_info *mvi);
+void mvs_iounmap(void __iomem *regs);
+int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex);
+void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard);
+int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
+ void *funcdata);
+void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id,
+ u32 off_lo, u32 off_hi, u64 sas_addr);
+int mvs_slave_alloc(struct scsi_device *scsi_dev);
+int mvs_slave_configure(struct scsi_device *sdev);
+void mvs_scan_start(struct Scsi_Host *shost);
+int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time);
+int mvs_queue_command(struct sas_task *task, const int num,
+ gfp_t gfp_flags);
+int mvs_abort_task(struct sas_task *task);
+int mvs_abort_task_set(struct domain_device *dev, u8 *lun);
+int mvs_clear_aca(struct domain_device *dev, u8 *lun);
+int mvs_clear_task_set(struct domain_device *dev, u8 * lun);
+void mvs_port_formed(struct asd_sas_phy *sas_phy);
+void mvs_port_deformed(struct asd_sas_phy *sas_phy);
+int mvs_dev_found(struct domain_device *dev);
+void mvs_dev_gone(struct domain_device *dev);
+int mvs_lu_reset(struct domain_device *dev, u8 *lun);
+int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags);
+int mvs_I_T_nexus_reset(struct domain_device *dev);
+int mvs_query_task(struct sas_task *task);
+void mvs_release_task(struct mvs_info *mvi, int phy_no,
+ struct domain_device *dev);
+void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events);
+void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
+int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
+void mvs_hexdump(u32 size, u8 *data, u32 baseaddr);
+#endif
+
diff --git a/drivers/scsi/osd/Kbuild b/drivers/scsi/osd/Kbuild
index 0e207aa67d1..5fd73d77c3a 100644
--- a/drivers/scsi/osd/Kbuild
+++ b/drivers/scsi/osd/Kbuild
@@ -11,31 +11,6 @@
# it under the terms of the GNU General Public License version 2
#
-ifneq ($(OSD_INC),)
-# we are built out-of-tree Kconfigure everything as on
-
-CONFIG_SCSI_OSD_INITIATOR=m
-ccflags-y += -DCONFIG_SCSI_OSD_INITIATOR -DCONFIG_SCSI_OSD_INITIATOR_MODULE
-
-CONFIG_SCSI_OSD_ULD=m
-ccflags-y += -DCONFIG_SCSI_OSD_ULD -DCONFIG_SCSI_OSD_ULD_MODULE
-
-# CONFIG_SCSI_OSD_DPRINT_SENSE =
-# 0 - no print of errors
-# 1 - print errors
-# 2 - errors + warrnings
-ccflags-y += -DCONFIG_SCSI_OSD_DPRINT_SENSE=1
-
-# Uncomment to turn debug on
-# ccflags-y += -DCONFIG_SCSI_OSD_DEBUG
-
-# if we are built out-of-tree and the hosting kernel has OSD headers
-# then "ccflags-y +=" will not pick the out-off-tree headers. Only by doing
-# this it will work. This might break in future kernels
-LINUXINCLUDE := -I$(OSD_INC) $(LINUXINCLUDE)
-
-endif
-
# libosd.ko - osd-initiator library
libosd-y := osd_initiator.o
obj-$(CONFIG_SCSI_OSD_INITIATOR) += libosd.o
diff --git a/drivers/scsi/osd/Makefile b/drivers/scsi/osd/Makefile
deleted file mode 100755
index d905344f83b..00000000000
--- a/drivers/scsi/osd/Makefile
+++ /dev/null
@@ -1,37 +0,0 @@
-#
-# Makefile for the OSD modules (out of tree)
-#
-# Copyright (C) 2008 Panasas Inc. All rights reserved.
-#
-# Authors:
-# Boaz Harrosh <bharrosh@panasas.com>
-# Benny Halevy <bhalevy@panasas.com>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2
-#
-# This Makefile is used to call the kernel Makefile in case of an out-of-tree
-# build.
-# $KSRC should point to a Kernel source tree otherwise host's default is
-# used. (eg. /lib/modules/`uname -r`/build)
-
-# include path for out-of-tree Headers
-OSD_INC ?= `pwd`/../../../include
-
-# allow users to override these
-# e.g. to compile for a kernel that you aren't currently running
-KSRC ?= /lib/modules/$(shell uname -r)/build
-KBUILD_OUTPUT ?=
-ARCH ?=
-V ?= 0
-
-# this is the basic Kbuild out-of-tree invocation, with the M= option
-KBUILD_BASE = +$(MAKE) -C $(KSRC) M=`pwd` KBUILD_OUTPUT=$(KBUILD_OUTPUT) ARCH=$(ARCH) V=$(V)
-
-all: libosd
-
-libosd: ;
- $(KBUILD_BASE) OSD_INC=$(OSD_INC) modules
-
-clean:
- $(KBUILD_BASE) clean
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 1ce6b24abab..7a117c18114 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -118,39 +118,39 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
_osd_ver_desc(or));
pFirst = get_attrs[a++].val_ptr;
- OSD_INFO("OSD_ATTR_RI_VENDOR_IDENTIFICATION [%s]\n",
+ OSD_INFO("VENDOR_IDENTIFICATION [%s]\n",
(char *)pFirst);
pFirst = get_attrs[a++].val_ptr;
- OSD_INFO("OSD_ATTR_RI_PRODUCT_IDENTIFICATION [%s]\n",
+ OSD_INFO("PRODUCT_IDENTIFICATION [%s]\n",
(char *)pFirst);
pFirst = get_attrs[a++].val_ptr;
- OSD_INFO("OSD_ATTR_RI_PRODUCT_MODEL [%s]\n",
+ OSD_INFO("PRODUCT_MODEL [%s]\n",
(char *)pFirst);
pFirst = get_attrs[a++].val_ptr;
- OSD_INFO("OSD_ATTR_RI_PRODUCT_REVISION_LEVEL [%u]\n",
+ OSD_INFO("PRODUCT_REVISION_LEVEL [%u]\n",
pFirst ? get_unaligned_be32(pFirst) : ~0U);
pFirst = get_attrs[a++].val_ptr;
- OSD_INFO("OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER [%s]\n",
+ OSD_INFO("PRODUCT_SERIAL_NUMBER [%s]\n",
(char *)pFirst);
pFirst = get_attrs[a].val_ptr;
- OSD_INFO("OSD_ATTR_RI_OSD_NAME [%s]\n", (char *)pFirst);
+ OSD_INFO("OSD_NAME [%s]\n", (char *)pFirst);
a++;
pFirst = get_attrs[a++].val_ptr;
- OSD_INFO("OSD_ATTR_RI_TOTAL_CAPACITY [0x%llx]\n",
+ OSD_INFO("TOTAL_CAPACITY [0x%llx]\n",
pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
pFirst = get_attrs[a++].val_ptr;
- OSD_INFO("OSD_ATTR_RI_USED_CAPACITY [0x%llx]\n",
+ OSD_INFO("USED_CAPACITY [0x%llx]\n",
pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
pFirst = get_attrs[a++].val_ptr;
- OSD_INFO("OSD_ATTR_RI_NUMBER_OF_PARTITIONS [%llu]\n",
+ OSD_INFO("NUMBER_OF_PARTITIONS [%llu]\n",
pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
if (a >= nelem)
@@ -158,7 +158,7 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
/* FIXME: Where are the time utilities */
pFirst = get_attrs[a++].val_ptr;
- OSD_INFO("OSD_ATTR_RI_CLOCK [0x%02x%02x%02x%02x%02x%02x]\n",
+ OSD_INFO("CLOCK [0x%02x%02x%02x%02x%02x%02x]\n",
((char *)pFirst)[0], ((char *)pFirst)[1],
((char *)pFirst)[2], ((char *)pFirst)[3],
((char *)pFirst)[4], ((char *)pFirst)[5]);
@@ -169,7 +169,8 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
hex_dump_to_buffer(get_attrs[a].val_ptr, len, 32, 1,
sid_dump, sizeof(sid_dump), true);
- OSD_INFO("OSD_ATTR_RI_OSD_SYSTEM_ID(%d) [%s]\n", len, sid_dump);
+ OSD_INFO("OSD_SYSTEM_ID(%d)\n"
+ " [%s]\n", len, sid_dump);
a++;
}
out:
@@ -669,7 +670,7 @@ static int _osd_req_list_objects(struct osd_request *or,
__be16 action, const struct osd_obj_id *obj, osd_id initial_id,
struct osd_obj_id_list *list, unsigned nelem)
{
- struct request_queue *q = or->osd_dev->scsi_device->request_queue;
+ struct request_queue *q = osd_request_queue(or->osd_dev);
u64 len = nelem * sizeof(osd_id) + sizeof(*list);
struct bio *bio;
@@ -778,16 +779,32 @@ EXPORT_SYMBOL(osd_req_remove_object);
*/
void osd_req_write(struct osd_request *or,
- const struct osd_obj_id *obj, struct bio *bio, u64 offset)
+ const struct osd_obj_id *obj, u64 offset,
+ struct bio *bio, u64 len)
{
- _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, bio->bi_size);
+ _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len);
WARN_ON(or->out.bio || or->out.total_bytes);
- bio->bi_rw |= (1 << BIO_RW);
+ WARN_ON(0 == bio_rw_flagged(bio, BIO_RW));
or->out.bio = bio;
- or->out.total_bytes = bio->bi_size;
+ or->out.total_bytes = len;
}
EXPORT_SYMBOL(osd_req_write);
+int osd_req_write_kern(struct osd_request *or,
+ const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
+{
+ struct request_queue *req_q = osd_request_queue(or->osd_dev);
+ struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
+
+ if (IS_ERR(bio))
+ return PTR_ERR(bio);
+
+ bio->bi_rw |= (1 << BIO_RW); /* FIXME: bio_set_dir() */
+ osd_req_write(or, obj, offset, bio, len);
+ return 0;
+}
+EXPORT_SYMBOL(osd_req_write_kern);
+
/*TODO: void osd_req_append(struct osd_request *,
const struct osd_obj_id *, struct bio *data_out); */
/*TODO: void osd_req_create_write(struct osd_request *,
@@ -813,16 +830,31 @@ void osd_req_flush_object(struct osd_request *or,
EXPORT_SYMBOL(osd_req_flush_object);
void osd_req_read(struct osd_request *or,
- const struct osd_obj_id *obj, struct bio *bio, u64 offset)
+ const struct osd_obj_id *obj, u64 offset,
+ struct bio *bio, u64 len)
{
- _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, bio->bi_size);
+ _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len);
WARN_ON(or->in.bio || or->in.total_bytes);
- bio->bi_rw &= ~(1 << BIO_RW);
+ WARN_ON(1 == bio_rw_flagged(bio, BIO_RW));
or->in.bio = bio;
- or->in.total_bytes = bio->bi_size;
+ or->in.total_bytes = len;
}
EXPORT_SYMBOL(osd_req_read);
+int osd_req_read_kern(struct osd_request *or,
+ const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
+{
+ struct request_queue *req_q = osd_request_queue(or->osd_dev);
+ struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
+
+ if (IS_ERR(bio))
+ return PTR_ERR(bio);
+
+ osd_req_read(or, obj, offset, bio, len);
+ return 0;
+}
+EXPORT_SYMBOL(osd_req_read_kern);
+
void osd_req_get_attributes(struct osd_request *or,
const struct osd_obj_id *obj)
{
@@ -889,26 +921,6 @@ int osd_req_add_set_attr_list(struct osd_request *or,
}
EXPORT_SYMBOL(osd_req_add_set_attr_list);
-static int _append_map_kern(struct request *req,
- void *buff, unsigned len, gfp_t flags)
-{
- struct bio *bio;
- int ret;
-
- bio = bio_map_kern(req->q, buff, len, flags);
- if (IS_ERR(bio)) {
- OSD_ERR("Failed bio_map_kern(%p, %d) => %ld\n", buff, len,
- PTR_ERR(bio));
- return PTR_ERR(bio);
- }
- ret = blk_rq_append_bio(req->q, req, bio);
- if (ret) {
- OSD_ERR("Failed blk_rq_append_bio(%p) => %d\n", bio, ret);
- bio_put(bio);
- }
- return ret;
-}
-
static int _req_append_segment(struct osd_request *or,
unsigned padding, struct _osd_req_data_segment *seg,
struct _osd_req_data_segment *last_seg, struct _osd_io_info *io)
@@ -924,14 +936,14 @@ static int _req_append_segment(struct osd_request *or,
else
pad_buff = io->pad_buff;
- ret = _append_map_kern(io->req, pad_buff, padding,
+ ret = blk_rq_map_kern(io->req->q, io->req, pad_buff, padding,
or->alloc_flags);
if (ret)
return ret;
io->total_bytes += padding;
}
- ret = _append_map_kern(io->req, seg->buff, seg->total_bytes,
+ ret = blk_rq_map_kern(io->req->q, io->req, seg->buff, seg->total_bytes,
or->alloc_flags);
if (ret)
return ret;
@@ -1233,7 +1245,7 @@ static inline void osd_sec_parms_set_in_offset(bool is_v1,
}
static int _osd_req_finalize_data_integrity(struct osd_request *or,
- bool has_in, bool has_out, const u8 *cap_key)
+ bool has_in, bool has_out, u64 out_data_bytes, const u8 *cap_key)
{
struct osd_security_parameters *sec_parms = _osd_req_sec_params(or);
int ret;
@@ -1248,8 +1260,7 @@ static int _osd_req_finalize_data_integrity(struct osd_request *or,
};
unsigned pad;
- or->out_data_integ.data_bytes = cpu_to_be64(
- or->out.bio ? or->out.bio->bi_size : 0);
+ or->out_data_integ.data_bytes = cpu_to_be64(out_data_bytes);
or->out_data_integ.set_attributes_bytes = cpu_to_be64(
or->set_attr.total_bytes);
or->out_data_integ.get_attributes_bytes = cpu_to_be64(
@@ -1293,6 +1304,21 @@ static int _osd_req_finalize_data_integrity(struct osd_request *or,
/*
* osd_finalize_request and helpers
*/
+static struct request *_make_request(struct request_queue *q, bool has_write,
+ struct _osd_io_info *oii, gfp_t flags)
+{
+ if (oii->bio)
+ return blk_make_request(q, oii->bio, flags);
+ else {
+ struct request *req;
+
+ req = blk_get_request(q, has_write ? WRITE : READ, flags);
+ if (unlikely(!req))
+ return ERR_PTR(-ENOMEM);
+
+ return req;
+ }
+}
static int _init_blk_request(struct osd_request *or,
bool has_in, bool has_out)
@@ -1301,14 +1327,18 @@ static int _init_blk_request(struct osd_request *or,
struct scsi_device *scsi_device = or->osd_dev->scsi_device;
struct request_queue *q = scsi_device->request_queue;
struct request *req;
- int ret = -ENOMEM;
+ int ret;
- req = blk_get_request(q, has_out, flags);
- if (!req)
+ req = _make_request(q, has_out, has_out ? &or->out : &or->in, flags);
+ if (IS_ERR(req)) {
+ ret = PTR_ERR(req);
goto out;
+ }
or->request = req;
req->cmd_type = REQ_TYPE_BLOCK_PC;
+ req->cmd_flags |= REQ_QUIET;
+
req->timeout = or->timeout;
req->retries = or->retries;
req->sense = or->sense;
@@ -1318,9 +1348,10 @@ static int _init_blk_request(struct osd_request *or,
or->out.req = req;
if (has_in) {
/* allocate bidi request */
- req = blk_get_request(q, READ, flags);
- if (!req) {
+ req = _make_request(q, false, &or->in, flags);
+ if (IS_ERR(req)) {
OSD_DEBUG("blk_get_request for bidi failed\n");
+ ret = PTR_ERR(req);
goto out;
}
req->cmd_type = REQ_TYPE_BLOCK_PC;
@@ -1341,6 +1372,7 @@ int osd_finalize_request(struct osd_request *or,
{
struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
bool has_in, has_out;
+ u64 out_data_bytes = or->out.total_bytes;
int ret;
if (options & OSD_REQ_FUA)
@@ -1364,26 +1396,6 @@ int osd_finalize_request(struct osd_request *or,
return ret;
}
- if (or->out.bio) {
- ret = blk_rq_append_bio(or->request->q, or->out.req,
- or->out.bio);
- if (ret) {
- OSD_DEBUG("blk_rq_append_bio out failed\n");
- return ret;
- }
- OSD_DEBUG("out bytes=%llu (bytes_req=%u)\n",
- _LLU(or->out.total_bytes), or->out.req->data_len);
- }
- if (or->in.bio) {
- ret = blk_rq_append_bio(or->request->q, or->in.req, or->in.bio);
- if (ret) {
- OSD_DEBUG("blk_rq_append_bio in failed\n");
- return ret;
- }
- OSD_DEBUG("in bytes=%llu (bytes_req=%u)\n",
- _LLU(or->in.total_bytes), or->in.req->data_len);
- }
-
or->out.pad_buff = sg_out_pad_buffer;
or->in.pad_buff = sg_in_pad_buffer;
@@ -1410,7 +1422,8 @@ int osd_finalize_request(struct osd_request *or,
}
}
- ret = _osd_req_finalize_data_integrity(or, has_in, has_out, cap_key);
+ ret = _osd_req_finalize_data_integrity(or, has_in, has_out,
+ out_data_bytes, cap_key);
if (ret)
return ret;
diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c
index 22b59e13ba8..0bdef339090 100644
--- a/drivers/scsi/osd/osd_uld.c
+++ b/drivers/scsi/osd/osd_uld.c
@@ -49,6 +49,7 @@
#include <linux/device.h>
#include <linux/idr.h>
#include <linux/major.h>
+#include <linux/file.h>
#include <scsi/scsi.h>
#include <scsi/scsi_driver.h>
@@ -175,10 +176,9 @@ static const struct file_operations osd_fops = {
struct osd_dev *osduld_path_lookup(const char *name)
{
- struct path path;
- struct inode *inode;
- struct cdev *cdev;
- struct osd_uld_device *uninitialized_var(oud);
+ struct osd_uld_device *oud;
+ struct osd_dev *od;
+ struct file *file;
int error;
if (!name || !*name) {
@@ -186,52 +186,46 @@ struct osd_dev *osduld_path_lookup(const char *name)
return ERR_PTR(-EINVAL);
}
- error = kern_path(name, LOOKUP_FOLLOW, &path);
- if (error) {
- OSD_ERR("path_lookup of %s failed=>%d\n", name, error);
- return ERR_PTR(error);
- }
+ od = kzalloc(sizeof(*od), GFP_KERNEL);
+ if (!od)
+ return ERR_PTR(-ENOMEM);
- inode = path.dentry->d_inode;
- error = -EINVAL; /* Not the right device e.g osd_uld_device */
- if (!S_ISCHR(inode->i_mode)) {
- OSD_DEBUG("!S_ISCHR()\n");
- goto out;
+ file = filp_open(name, O_RDWR, 0);
+ if (IS_ERR(file)) {
+ error = PTR_ERR(file);
+ goto free_od;
}
- cdev = inode->i_cdev;
- if (!cdev) {
- OSD_ERR("Before mounting an OSD Based filesystem\n");
- OSD_ERR(" user-mode must open+close the %s device\n", name);
- OSD_ERR(" Example: bash: echo < %s\n", name);
- goto out;
+ if (file->f_op != &osd_fops){
+ error = -EINVAL;
+ goto close_file;
}
- /* The Magic wand. Is it our char-dev */
- /* TODO: Support sg devices */
- if (cdev->owner != THIS_MODULE) {
- OSD_ERR("Error mounting %s - is not an OSD device\n", name);
- goto out;
- }
+ oud = file->private_data;
- oud = container_of(cdev, struct osd_uld_device, cdev);
+ *od = oud->od;
+ od->file = file;
- __uld_get(oud);
- error = 0;
+ return od;
-out:
- path_put(&path);
- return error ? ERR_PTR(error) : &oud->od;
+close_file:
+ fput(file);
+free_od:
+ kfree(od);
+ return ERR_PTR(error);
}
EXPORT_SYMBOL(osduld_path_lookup);
void osduld_put_device(struct osd_dev *od)
{
- if (od) {
- struct osd_uld_device *oud = container_of(od,
- struct osd_uld_device, od);
- __uld_put(oud);
+ if (od && !IS_ERR(od)) {
+ struct osd_uld_device *oud = od->file->private_data;
+
+ BUG_ON(od->scsi_device != oud->od.scsi_device);
+
+ fput(od->file);
+ kfree(od);
}
}
EXPORT_SYMBOL(osduld_put_device);
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 5defe5ea5ed..8371d917a9a 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -17,9 +17,12 @@
* General Public License for more details.
*
******************************************************************************/
-#define QLA1280_VERSION "3.26"
+#define QLA1280_VERSION "3.27"
/*****************************************************************************
Revision History:
+ Rev 3.27, February 10, 2009, Michael Reed
+ - General code cleanup.
+ - Improve error recovery.
Rev 3.26, January 16, 2006 Jes Sorensen
- Ditch all < 2.6 support
Rev 3.25.1, February 10, 2005 Christoph Hellwig
@@ -435,7 +438,6 @@ static int qla1280_mailbox_command(struct scsi_qla_host *,
uint8_t, uint16_t *);
static int qla1280_bus_reset(struct scsi_qla_host *, int);
static int qla1280_device_reset(struct scsi_qla_host *, int, int);
-static int qla1280_abort_device(struct scsi_qla_host *, int, int, int);
static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int);
static int qla1280_abort_isp(struct scsi_qla_host *);
#ifdef QLA_64BIT_PTR
@@ -698,7 +700,7 @@ qla1280_info(struct Scsi_Host *host)
}
/**************************************************************************
- * qla1200_queuecommand
+ * qla1280_queuecommand
* Queue a command to the controller.
*
* Note:
@@ -713,12 +715,14 @@ qla1280_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
{
struct Scsi_Host *host = cmd->device->host;
struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
- struct srb *sp = (struct srb *)&cmd->SCp;
+ struct srb *sp = (struct srb *)CMD_SP(cmd);
int status;
cmd->scsi_done = fn;
sp->cmd = cmd;
sp->flags = 0;
+ sp->wait = NULL;
+ CMD_HANDLE(cmd) = (unsigned char *)NULL;
qla1280_print_scsi_cmd(5, cmd);
@@ -738,21 +742,11 @@ qla1280_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
enum action {
ABORT_COMMAND,
- ABORT_DEVICE,
DEVICE_RESET,
BUS_RESET,
ADAPTER_RESET,
- FAIL
};
-/* timer action for error action processor */
-static void qla1280_error_wait_timeout(unsigned long __data)
-{
- struct scsi_cmnd *cmd = (struct scsi_cmnd *)__data;
- struct srb *sp = (struct srb *)CMD_SP(cmd);
-
- complete(sp->wait);
-}
static void qla1280_mailbox_timeout(unsigned long __data)
{
@@ -767,8 +761,67 @@ static void qla1280_mailbox_timeout(unsigned long __data)
complete(ha->mailbox_wait);
}
+static int
+_qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp,
+ struct completion *wait)
+{
+ int status = FAILED;
+ struct scsi_cmnd *cmd = sp->cmd;
+
+ spin_unlock_irq(ha->host->host_lock);
+ wait_for_completion_timeout(wait, 4*HZ);
+ spin_lock_irq(ha->host->host_lock);
+ sp->wait = NULL;
+ if(CMD_HANDLE(cmd) == COMPLETED_HANDLE) {
+ status = SUCCESS;
+ (*cmd->scsi_done)(cmd);
+ }
+ return status;
+}
+
+static int
+qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
+
+ sp->wait = &wait;
+ return _qla1280_wait_for_single_command(ha, sp, &wait);
+}
+
+static int
+qla1280_wait_for_pending_commands(struct scsi_qla_host *ha, int bus, int target)
+{
+ int cnt;
+ int status;
+ struct srb *sp;
+ struct scsi_cmnd *cmd;
+
+ status = SUCCESS;
+
+ /*
+ * Wait for all commands with the designated bus/target
+ * to be completed by the firmware
+ */
+ for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ sp = ha->outstanding_cmds[cnt];
+ if (sp) {
+ cmd = sp->cmd;
+
+ if (bus >= 0 && SCSI_BUS_32(cmd) != bus)
+ continue;
+ if (target >= 0 && SCSI_TCN_32(cmd) != target)
+ continue;
+
+ status = qla1280_wait_for_single_command(ha, sp);
+ if (status == FAILED)
+ break;
+ }
+ }
+ return status;
+}
+
/**************************************************************************
- * qla1200_error_action
+ * qla1280_error_action
* The function will attempt to perform a specified error action and
* wait for the results (or time out).
*
@@ -780,11 +833,6 @@ static void qla1280_mailbox_timeout(unsigned long __data)
* Returns:
* SUCCESS or FAILED
*
- * Note:
- * Resetting the bus always succeeds - is has to, otherwise the
- * kernel will panic! Try a surgical technique - sending a BUS
- * DEVICE RESET message - on the offending target before pulling
- * the SCSI bus reset line.
**************************************************************************/
static int
qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
@@ -792,13 +840,19 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
struct scsi_qla_host *ha;
int bus, target, lun;
struct srb *sp;
- uint16_t data;
- unsigned char *handle;
- int result, i;
+ int i, found;
+ int result=FAILED;
+ int wait_for_bus=-1;
+ int wait_for_target = -1;
DECLARE_COMPLETION_ONSTACK(wait);
- struct timer_list timer;
+
+ ENTER("qla1280_error_action");
ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata);
+ sp = (struct srb *)CMD_SP(cmd);
+ bus = SCSI_BUS_32(cmd);
+ target = SCSI_TCN_32(cmd);
+ lun = SCSI_LUN_32(cmd);
dprintk(4, "error_action %i, istatus 0x%04x\n", action,
RD_REG_WORD(&ha->iobase->istatus));
@@ -807,99 +861,47 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
RD_REG_WORD(&ha->iobase->host_cmd),
RD_REG_WORD(&ha->iobase->ictrl), jiffies);
- ENTER("qla1280_error_action");
if (qla1280_verbose)
printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, "
"Handle=0x%p, action=0x%x\n",
ha->host_no, cmd, CMD_HANDLE(cmd), action);
- if (cmd == NULL) {
- printk(KERN_WARNING "(scsi?:?:?:?) Reset called with NULL "
- "si_Cmnd pointer, failing.\n");
- LEAVE("qla1280_error_action");
- return FAILED;
- }
-
- ha = (struct scsi_qla_host *)cmd->device->host->hostdata;
- sp = (struct srb *)CMD_SP(cmd);
- handle = CMD_HANDLE(cmd);
-
- /* Check for pending interrupts. */
- data = qla1280_debounce_register(&ha->iobase->istatus);
- /*
- * The io_request_lock is held when the reset handler is called, hence
- * the interrupt handler cannot be running in parallel as it also
- * grabs the lock. /Jes
- */
- if (data & RISC_INT)
- qla1280_isr(ha, &ha->done_q);
-
/*
- * Determine the suggested action that the mid-level driver wants
- * us to perform.
+ * Check to see if we have the command in the outstanding_cmds[]
+ * array. If not then it must have completed before this error
+ * action was initiated. If the error_action isn't ABORT_COMMAND
+ * then the driver must proceed with the requested action.
*/
- if (handle == (unsigned char *)INVALID_HANDLE || handle == NULL) {
- if(action == ABORT_COMMAND) {
- /* we never got this command */
- printk(KERN_INFO "qla1280: Aborting a NULL handle\n");
- return SUCCESS; /* no action - we don't have command */
+ found = -1;
+ for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
+ if (sp == ha->outstanding_cmds[i]) {
+ found = i;
+ sp->wait = &wait; /* we'll wait for it to complete */
+ break;
}
- } else {
- sp->wait = &wait;
}
- bus = SCSI_BUS_32(cmd);
- target = SCSI_TCN_32(cmd);
- lun = SCSI_LUN_32(cmd);
+ if (found < 0) { /* driver doesn't have command */
+ result = SUCCESS;
+ if (qla1280_verbose) {
+ printk(KERN_INFO
+ "scsi(%ld:%d:%d:%d): specified command has "
+ "already completed.\n", ha->host_no, bus,
+ target, lun);
+ }
+ }
- /* Overloading result. Here it means the success or fail of the
- * *issue* of the action. When we return from the routine, it must
- * mean the actual success or fail of the action */
- result = FAILED;
switch (action) {
- case FAIL:
- break;
case ABORT_COMMAND:
- if ((sp->flags & SRB_ABORT_PENDING)) {
- printk(KERN_WARNING
- "scsi(): Command has a pending abort "
- "message - ABORT_PENDING.\n");
- /* This should technically be impossible since we
- * now wait for abort completion */
- break;
- }
-
- for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
- if (sp == ha->outstanding_cmds[i]) {
- dprintk(1, "qla1280: RISC aborting command\n");
- if (qla1280_abort_command(ha, sp, i) == 0)
- result = SUCCESS;
- else {
- /*
- * Since we don't know what might
- * have happend to the command, it
- * is unsafe to remove it from the
- * device's queue at this point.
- * Wait and let the escalation
- * process take care of it.
- */
- printk(KERN_WARNING
- "scsi(%li:%i:%i:%i): Unable"
- " to abort command!\n",
- ha->host_no, bus, target, lun);
- }
- }
- }
- break;
-
- case ABORT_DEVICE:
- if (qla1280_verbose)
- printk(KERN_INFO
- "scsi(%ld:%d:%d:%d): Queueing abort device "
- "command.\n", ha->host_no, bus, target, lun);
- if (qla1280_abort_device(ha, bus, target, lun) == 0)
- result = SUCCESS;
+ dprintk(1, "qla1280: RISC aborting command\n");
+ /*
+ * The abort might fail due to race when the host_lock
+ * is released to issue the abort. As such, we
+ * don't bother to check the return status.
+ */
+ if (found >= 0)
+ qla1280_abort_command(ha, sp, found);
break;
case DEVICE_RESET:
@@ -907,16 +909,21 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
printk(KERN_INFO
"scsi(%ld:%d:%d:%d): Queueing device reset "
"command.\n", ha->host_no, bus, target, lun);
- if (qla1280_device_reset(ha, bus, target) == 0)
- result = SUCCESS;
+ if (qla1280_device_reset(ha, bus, target) == 0) {
+ /* issued device reset, set wait conditions */
+ wait_for_bus = bus;
+ wait_for_target = target;
+ }
break;
case BUS_RESET:
if (qla1280_verbose)
printk(KERN_INFO "qla1280(%ld:%d): Issued bus "
"reset.\n", ha->host_no, bus);
- if (qla1280_bus_reset(ha, bus) == 0)
- result = SUCCESS;
+ if (qla1280_bus_reset(ha, bus) == 0) {
+ /* issued bus reset, set wait conditions */
+ wait_for_bus = bus;
+ }
break;
case ADAPTER_RESET:
@@ -929,55 +936,48 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
"continue automatically\n", ha->host_no);
}
ha->flags.reset_active = 1;
- /*
- * We restarted all of the commands automatically, so the
- * mid-level code can expect completions momentitarily.
- */
- if (qla1280_abort_isp(ha) == 0)
- result = SUCCESS;
+
+ if (qla1280_abort_isp(ha) != 0) { /* it's dead */
+ result = FAILED;
+ }
ha->flags.reset_active = 0;
}
- if (!list_empty(&ha->done_q))
- qla1280_done(ha);
-
- /* If we didn't manage to issue the action, or we have no
- * command to wait for, exit here */
- if (result == FAILED || handle == NULL ||
- handle == (unsigned char *)INVALID_HANDLE) {
- /*
- * Clear completion queue to avoid qla1280_done() trying
- * to complete the command at a later stage after we
- * have exited the current context
- */
- sp->wait = NULL;
- goto leave;
- }
+ /*
+ * At this point, the host_lock has been released and retaken
+ * by the issuance of the mailbox command.
+ * Wait for the command passed in by the mid-layer if it
+ * was found by the driver. It might have been returned
+ * between eh recovery steps, hence the check of the "found"
+ * variable.
+ */
- /* set up a timer just in case we're really jammed */
- init_timer(&timer);
- timer.expires = jiffies + 4*HZ;
- timer.data = (unsigned long)cmd;
- timer.function = qla1280_error_wait_timeout;
- add_timer(&timer);
+ if (found >= 0)
+ result = _qla1280_wait_for_single_command(ha, sp, &wait);
- /* wait for the action to complete (or the timer to expire) */
- spin_unlock_irq(ha->host->host_lock);
- wait_for_completion(&wait);
- del_timer_sync(&timer);
- spin_lock_irq(ha->host->host_lock);
- sp->wait = NULL;
+ if (action == ABORT_COMMAND && result != SUCCESS) {
+ printk(KERN_WARNING
+ "scsi(%li:%i:%i:%i): "
+ "Unable to abort command!\n",
+ ha->host_no, bus, target, lun);
+ }
- /* the only action we might get a fail for is abort */
- if (action == ABORT_COMMAND) {
- if(sp->flags & SRB_ABORTED)
- result = SUCCESS;
- else
- result = FAILED;
+ /*
+ * If the command passed in by the mid-layer has been
+ * returned by the board, then wait for any additional
+ * commands which are supposed to complete based upon
+ * the error action.
+ *
+ * All commands are unconditionally returned during a
+ * call to qla1280_abort_isp(), ADAPTER_RESET. No need
+ * to wait for them.
+ */
+ if (result == SUCCESS && wait_for_bus >= 0) {
+ result = qla1280_wait_for_pending_commands(ha,
+ wait_for_bus, wait_for_target);
}
- leave:
dprintk(1, "RESET returning %d\n", result);
LEAVE("qla1280_error_action");
@@ -1280,13 +1280,12 @@ qla1280_done(struct scsi_qla_host *ha)
switch ((CMD_RESULT(cmd) >> 16)) {
case DID_RESET:
/* Issue marker command. */
- qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
+ if (!ha->flags.abort_isp_active)
+ qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
break;
case DID_ABORT:
sp->flags &= ~SRB_ABORT_PENDING;
sp->flags |= SRB_ABORTED;
- if (sp->flags & SRB_TIMEOUT)
- CMD_RESULT(sp->cmd) = DID_TIME_OUT << 16;
break;
default:
break;
@@ -1296,12 +1295,11 @@ qla1280_done(struct scsi_qla_host *ha)
scsi_dma_unmap(cmd);
/* Call the mid-level driver interrupt handler */
- CMD_HANDLE(sp->cmd) = (unsigned char *)INVALID_HANDLE;
ha->actthreads--;
- (*(cmd)->scsi_done)(cmd);
-
- if(sp->wait != NULL)
+ if (sp->wait == NULL)
+ (*(cmd)->scsi_done)(cmd);
+ else
complete(sp->wait);
}
LEAVE("qla1280_done");
@@ -2417,9 +2415,6 @@ static int
qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
{
struct device_reg __iomem *reg = ha->iobase;
-#if 0
- LIST_HEAD(done_q);
-#endif
int status = 0;
int cnt;
uint16_t *optr, *iptr;
@@ -2493,19 +2488,9 @@ qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
mr = MAILBOX_REGISTER_COUNT;
memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t));
-#if 0
- /* Go check for any response interrupts pending. */
- qla1280_isr(ha, &done_q);
-#endif
-
if (ha->flags.reset_marker)
qla1280_rst_aen(ha);
-#if 0
- if (!list_empty(&done_q))
- qla1280_done(ha, &done_q);
-#endif
-
if (status)
dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = "
"0x%x ****\n", mb[0]);
@@ -2641,41 +2626,6 @@ qla1280_device_reset(struct scsi_qla_host *ha, int bus, int target)
}
/*
- * qla1280_abort_device
- * Issue an abort message to the device
- *
- * Input:
- * ha = adapter block pointer.
- * bus = SCSI BUS.
- * target = SCSI ID.
- * lun = SCSI LUN.
- *
- * Returns:
- * 0 = success
- */
-static int
-qla1280_abort_device(struct scsi_qla_host *ha, int bus, int target, int lun)
-{
- uint16_t mb[MAILBOX_REGISTER_COUNT];
- int status;
-
- ENTER("qla1280_abort_device");
-
- mb[0] = MBC_ABORT_DEVICE;
- mb[1] = (bus ? target | BIT_7 : target) << 8 | lun;
- status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
-
- /* Issue marker command. */
- qla1280_marker(ha, bus, target, lun, MK_SYNC_ID_LUN);
-
- if (status)
- dprintk(2, "qla1280_abort_device: **** FAILED ****\n");
-
- LEAVE("qla1280_abort_device");
- return status;
-}
-
-/*
* qla1280_abort_command
* Abort command aborts a specified IOCB.
*
@@ -2833,7 +2783,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
/* If room for request in request ring. */
if ((req_cnt + 2) >= ha->req_q_cnt) {
- status = 1;
+ status = SCSI_MLQUEUE_HOST_BUSY;
dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt="
"0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt,
req_cnt);
@@ -2845,7 +2795,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
ha->outstanding_cmds[cnt] != NULL; cnt++);
if (cnt >= MAX_OUTSTANDING_COMMANDS) {
- status = 1;
+ status = SCSI_MLQUEUE_HOST_BUSY;
dprintk(2, "qla1280_start_scsi: NO ROOM IN "
"OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt);
goto out;
@@ -3108,7 +3058,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
ha->req_q_cnt, seg_cnt);
/* If room for request in request ring. */
if ((req_cnt + 2) >= ha->req_q_cnt) {
- status = 1;
+ status = SCSI_MLQUEUE_HOST_BUSY;
dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, "
"req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index,
ha->req_q_cnt, req_cnt);
@@ -3120,7 +3070,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
(ha->outstanding_cmds[cnt] != 0); cnt++) ;
if (cnt >= MAX_OUTSTANDING_COMMANDS) {
- status = 1;
+ status = SCSI_MLQUEUE_HOST_BUSY;
dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING "
"ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt);
goto out;
@@ -3487,6 +3437,7 @@ qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q)
/* Save ISP completion status */
CMD_RESULT(sp->cmd) = 0;
+ CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
/* Place block on done queue */
list_add_tail(&sp->list, done_q);
@@ -3495,7 +3446,7 @@ qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q)
* If we get here we have a real problem!
*/
printk(KERN_WARNING
- "qla1280: ISP invalid handle");
+ "qla1280: ISP invalid handle\n");
}
}
break;
@@ -3753,6 +3704,8 @@ qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
}
}
+ CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
+
/* Place command on done queue. */
list_add_tail(&sp->list, done_q);
out:
@@ -3808,6 +3761,8 @@ qla1280_error_entry(struct scsi_qla_host *ha, struct response *pkt,
CMD_RESULT(sp->cmd) = DID_ERROR << 16;
}
+ CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
+
/* Place command on done queue. */
list_add_tail(&sp->list, done_q);
}
@@ -3858,19 +3813,16 @@ qla1280_abort_isp(struct scsi_qla_host *ha)
struct scsi_cmnd *cmd;
sp = ha->outstanding_cmds[cnt];
if (sp) {
-
cmd = sp->cmd;
CMD_RESULT(cmd) = DID_RESET << 16;
-
- sp->cmd = NULL;
+ CMD_HANDLE(cmd) = COMPLETED_HANDLE;
ha->outstanding_cmds[cnt] = NULL;
-
- (*cmd->scsi_done)(cmd);
-
- sp->flags = 0;
+ list_add_tail(&sp->list, &ha->done_q);
}
}
+ qla1280_done(ha);
+
status = qla1280_load_firmware(ha);
if (status)
goto out;
@@ -3955,13 +3907,6 @@ qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *ha, unsigned int bus)
if (scsi_control == SCSI_PHASE_INVALID) {
ha->bus_settings[bus].scsi_bus_dead = 1;
-#if 0
- CMD_RESULT(cp) = DID_NO_CONNECT << 16;
- CMD_HANDLE(cp) = INVALID_HANDLE;
- /* ha->actthreads--; */
-
- (*(cp)->scsi_done)(cp);
-#endif
return 1; /* bus is dead */
} else {
ha->bus_settings[bus].scsi_bus_dead = 0;
diff --git a/drivers/scsi/qla1280.h b/drivers/scsi/qla1280.h
index d7c44b8d2b4..834884b9eed 100644
--- a/drivers/scsi/qla1280.h
+++ b/drivers/scsi/qla1280.h
@@ -88,7 +88,8 @@
/* Maximum outstanding commands in ISP queues */
#define MAX_OUTSTANDING_COMMANDS 512
-#define INVALID_HANDLE (MAX_OUTSTANDING_COMMANDS + 2)
+#define COMPLETED_HANDLE ((unsigned char *) \
+ (MAX_OUTSTANDING_COMMANDS + 2))
/* ISP request and response entry counts (37-65535) */
#define REQUEST_ENTRY_CNT 255 /* Number of request entries. */
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index b09993a0657..0f879620150 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -97,7 +97,7 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj,
return 0;
if (IS_NOCACHE_VPD_TYPE(ha))
- ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_nvram << 2,
+ ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
ha->nvram_size);
return memory_read_from_buffer(buf, count, &off, ha->nvram,
ha->nvram_size);
@@ -692,6 +692,109 @@ static struct bin_attribute sysfs_edc_status_attr = {
.read = qla2x00_sysfs_read_edc_status,
};
+static ssize_t
+qla2x00_sysfs_read_xgmac_stats(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+ struct device, kobj)));
+ struct qla_hw_data *ha = vha->hw;
+ int rval;
+ uint16_t actual_size;
+
+ if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
+ return 0;
+
+ if (ha->xgmac_data)
+ goto do_read;
+
+ ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
+ &ha->xgmac_data_dma, GFP_KERNEL);
+ if (!ha->xgmac_data) {
+ qla_printk(KERN_WARNING, ha,
+ "Unable to allocate memory for XGMAC read-data.\n");
+ return 0;
+ }
+
+do_read:
+ actual_size = 0;
+ memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);
+
+ rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
+ XGMAC_DATA_SIZE, &actual_size);
+ if (rval != QLA_SUCCESS) {
+ qla_printk(KERN_WARNING, ha,
+ "Unable to read XGMAC data (%x).\n", rval);
+ count = 0;
+ }
+
+ count = actual_size > count ? count: actual_size;
+ memcpy(buf, ha->xgmac_data, count);
+
+ return count;
+}
+
+static struct bin_attribute sysfs_xgmac_stats_attr = {
+ .attr = {
+ .name = "xgmac_stats",
+ .mode = S_IRUSR,
+ },
+ .size = 0,
+ .read = qla2x00_sysfs_read_xgmac_stats,
+};
+
+static ssize_t
+qla2x00_sysfs_read_dcbx_tlv(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+ struct device, kobj)));
+ struct qla_hw_data *ha = vha->hw;
+ int rval;
+ uint16_t actual_size;
+
+ if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
+ return 0;
+
+ if (ha->dcbx_tlv)
+ goto do_read;
+
+ ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
+ &ha->dcbx_tlv_dma, GFP_KERNEL);
+ if (!ha->dcbx_tlv) {
+ qla_printk(KERN_WARNING, ha,
+ "Unable to allocate memory for DCBX TLV read-data.\n");
+ return 0;
+ }
+
+do_read:
+ actual_size = 0;
+ memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
+
+ rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
+ DCBX_TLV_DATA_SIZE);
+ if (rval != QLA_SUCCESS) {
+ qla_printk(KERN_WARNING, ha,
+ "Unable to read DCBX TLV data (%x).\n", rval);
+ count = 0;
+ }
+
+ memcpy(buf, ha->dcbx_tlv, count);
+
+ return count;
+}
+
+static struct bin_attribute sysfs_dcbx_tlv_attr = {
+ .attr = {
+ .name = "dcbx_tlv",
+ .mode = S_IRUSR,
+ },
+ .size = 0,
+ .read = qla2x00_sysfs_read_dcbx_tlv,
+};
+
static struct sysfs_entry {
char *name;
struct bin_attribute *attr;
@@ -706,6 +809,8 @@ static struct sysfs_entry {
{ "reset", &sysfs_reset_attr, },
{ "edc", &sysfs_edc_attr, 2 },
{ "edc_status", &sysfs_edc_status_attr, 2 },
+ { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
+ { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
{ NULL },
};
@@ -721,6 +826,8 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
continue;
if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
continue;
+ if (iter->is4GBp_only == 3 && !IS_QLA81XX(vha->hw))
+ continue;
ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
iter->attr);
@@ -743,6 +850,8 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
continue;
if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
continue;
+ if (iter->is4GBp_only == 3 && !IS_QLA81XX(ha))
+ continue;
sysfs_remove_bin_file(&host->shost_gendev.kobj,
iter->attr);
@@ -1088,6 +1197,58 @@ qla2x00_flash_block_size_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
}
+static ssize_t
+qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+ if (!IS_QLA81XX(vha->hw))
+ return snprintf(buf, PAGE_SIZE, "\n");
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
+}
+
+static ssize_t
+qla2x00_vn_port_mac_address_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+ if (!IS_QLA81XX(vha->hw))
+ return snprintf(buf, PAGE_SIZE, "\n");
+
+ return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ vha->fcoe_vn_port_mac[5], vha->fcoe_vn_port_mac[4],
+ vha->fcoe_vn_port_mac[3], vha->fcoe_vn_port_mac[2],
+ vha->fcoe_vn_port_mac[1], vha->fcoe_vn_port_mac[0]);
+}
+
+static ssize_t
+qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
+}
+
+static ssize_t
+qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ int rval;
+ uint16_t state[5];
+
+ rval = qla2x00_get_firmware_state(vha, state);
+ if (rval != QLA_SUCCESS)
+ memset(state, -1, sizeof(state));
+
+ return snprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0],
+ state[1], state[2], state[3], state[4]);
+}
+
static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -1116,6 +1277,11 @@ static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
NULL);
+static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
+static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
+ qla2x00_vn_port_mac_address_show, NULL);
+static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
+static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
struct device_attribute *qla2x00_host_attrs[] = {
&dev_attr_driver_version,
@@ -1138,6 +1304,10 @@ struct device_attribute *qla2x00_host_attrs[] = {
&dev_attr_mpi_version,
&dev_attr_phy_version,
&dev_attr_flash_block_size,
+ &dev_attr_vlan_id,
+ &dev_attr_vn_port_mac_address,
+ &dev_attr_fabric_param,
+ &dev_attr_fw_state,
NULL,
};
@@ -1313,7 +1483,8 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
* At this point all fcport's software-states are cleared. Perform any
* final cleanup of firmware resources (PCBs and XCBs).
*/
- if (fcport->loop_id != FC_NO_LOOP_ID)
+ if (fcport->loop_id != FC_NO_LOOP_ID &&
+ !test_bit(UNLOADING, &fcport->vha->dpc_flags))
fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
fcport->loop_id, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa);
@@ -1437,11 +1608,13 @@ static int
qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
{
int ret = 0;
- int cnt = 0;
- uint8_t qos = QLA_DEFAULT_QUE_QOS;
+ uint8_t qos = 0;
scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
scsi_qla_host_t *vha = NULL;
struct qla_hw_data *ha = base_vha->hw;
+ uint16_t options = 0;
+ int cnt;
+ struct req_que *req = ha->req_q_map[0];
ret = qla24xx_vport_create_req_sanity_check(fc_vport);
if (ret) {
@@ -1497,23 +1670,39 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
qla24xx_vport_disable(fc_vport, disable);
- /* Create a queue pair for the vport */
- if (ha->mqenable) {
- if (ha->npiv_info) {
- for (; cnt < ha->nvram_npiv_size; cnt++) {
- if (ha->npiv_info[cnt].port_name ==
- vha->port_name &&
- ha->npiv_info[cnt].node_name ==
- vha->node_name) {
- qos = ha->npiv_info[cnt].q_qos;
- break;
- }
- }
+ if (ql2xmultique_tag) {
+ req = ha->req_q_map[1];
+ goto vport_queue;
+ } else if (ql2xmaxqueues == 1 || !ha->npiv_info)
+ goto vport_queue;
+ /* Create a request queue in QoS mode for the vport */
+ for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
+ if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
+ && memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
+ 8) == 0) {
+ qos = ha->npiv_info[cnt].q_qos;
+ break;
+ }
+ }
+ if (qos) {
+ ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
+ qos);
+ if (!ret)
+ qla_printk(KERN_WARNING, ha,
+ "Can't create request queue for vp_idx:%d\n",
+ vha->vp_idx);
+ else {
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "Request Que:%d (QoS: %d) created for vp_idx:%d\n",
+ ret, qos, vha->vp_idx));
+ req = ha->req_q_map[ret];
}
- qla25xx_create_queues(vha, qos);
}
+vport_queue:
+ vha->req = req;
return 0;
+
vport_create_failed_2:
qla24xx_disable_vp(vha);
qla24xx_deallocate_vp_id(vha);
@@ -1554,8 +1743,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
vha->host_no, vha->vp_idx, vha));
}
- if (ha->mqenable) {
- if (qla25xx_delete_queues(vha, 0) != QLA_SUCCESS)
+ if (vha->req->id && !ql2xmultique_tag) {
+ if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
qla_printk(KERN_WARNING, ha,
"Queue delete failed.\n");
}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 34760f8d4f1..4a990f4da4e 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -149,11 +149,9 @@ qla24xx_pause_risc(struct device_reg_24xx __iomem *reg)
int rval = QLA_SUCCESS;
uint32_t cnt;
- if (RD_REG_DWORD(&reg->hccr) & HCCRX_RISC_PAUSE)
- return rval;
-
WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
- for (cnt = 30000; (RD_REG_DWORD(&reg->hccr) & HCCRX_RISC_PAUSE) == 0 &&
+ for (cnt = 30000;
+ ((RD_REG_DWORD(&reg->host_status) & HSRX_RISC_PAUSED) == 0) &&
rval == QLA_SUCCESS; cnt--) {
if (cnt)
udelay(100);
@@ -351,7 +349,7 @@ static inline void *
qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
{
uint32_t cnt, que_idx;
- uint8_t req_cnt, rsp_cnt, que_cnt;
+ uint8_t que_cnt;
struct qla2xxx_mq_chain *mq = ptr;
struct device_reg_25xxmq __iomem *reg;
@@ -363,9 +361,8 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
mq->type = __constant_htonl(DUMP_CHAIN_MQ);
mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain));
- req_cnt = find_first_zero_bit(ha->req_qid_map, ha->max_queues);
- rsp_cnt = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues);
- que_cnt = req_cnt > rsp_cnt ? req_cnt : rsp_cnt;
+ que_cnt = ha->max_req_queues > ha->max_rsp_queues ?
+ ha->max_req_queues : ha->max_rsp_queues;
mq->count = htonl(que_cnt);
for (cnt = 0; cnt < que_cnt; cnt++) {
reg = (struct device_reg_25xxmq *) ((void *)
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 714ee67567e..00aa48d975a 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -93,6 +93,7 @@
#define LSD(x) ((uint32_t)((uint64_t)(x)))
#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16))
+#define MAKE_HANDLE(x, y) ((uint32_t)((((uint32_t)(x)) << 16) | (uint32_t)(y)))
/*
* I/O register
@@ -179,6 +180,7 @@
#define REQUEST_ENTRY_CNT_24XX 2048 /* Number of request entries. */
#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/
#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/
+#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/
struct req_que;
@@ -186,7 +188,6 @@ struct req_que;
* SCSI Request Block
*/
typedef struct srb {
- struct req_que *que;
struct fc_port *fcport;
struct scsi_cmnd *cmd; /* Linux SCSI command pkt */
@@ -2008,7 +2009,7 @@ typedef struct vport_params {
#define VP_RET_CODE_NOT_FOUND 6
struct qla_hw_data;
-
+struct rsp_que;
/*
* ISP operations
*/
@@ -2030,10 +2031,9 @@ struct isp_operations {
void (*enable_intrs) (struct qla_hw_data *);
void (*disable_intrs) (struct qla_hw_data *);
- int (*abort_command) (struct scsi_qla_host *, srb_t *,
- struct req_que *);
- int (*target_reset) (struct fc_port *, unsigned int);
- int (*lun_reset) (struct fc_port *, unsigned int);
+ int (*abort_command) (srb_t *);
+ int (*target_reset) (struct fc_port *, unsigned int, int);
+ int (*lun_reset) (struct fc_port *, unsigned int, int);
int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t,
uint8_t, uint8_t, uint16_t *, uint8_t);
int (*fabric_logout) (struct scsi_qla_host *, uint16_t, uint8_t,
@@ -2079,7 +2079,6 @@ struct isp_operations {
#define QLA_PCI_MSIX_CONTROL 0xa2
struct scsi_qla_host;
-struct rsp_que;
struct qla_msix_entry {
int have_irq;
@@ -2140,7 +2139,6 @@ struct qla_statistics {
#define MBC_INITIALIZE_MULTIQ 0x1f
#define QLA_QUE_PAGE 0X1000
#define QLA_MQ_SIZE 32
-#define QLA_MAX_HOST_QUES 16
#define QLA_MAX_QUEUES 256
#define ISP_QUE_REG(ha, id) \
((ha->mqenable) ? \
@@ -2170,6 +2168,8 @@ struct rsp_que {
struct qla_hw_data *hw;
struct qla_msix_entry *msix;
struct req_que *req;
+ srb_t *status_srb; /* status continuation entry */
+ struct work_struct q_work;
};
/* Request queue data structure */
@@ -2222,6 +2222,8 @@ struct qla_hw_data {
uint32_t fce_enabled :1;
uint32_t fac_supported :1;
uint32_t chip_reset_done :1;
+ uint32_t port0 :1;
+ uint32_t running_gold_fw :1;
} flags;
/* This spinlock is used to protect "io transactions", you must
@@ -2246,7 +2248,8 @@ struct qla_hw_data {
struct rsp_que **rsp_q_map;
unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
- uint16_t max_queues;
+ uint8_t max_req_queues;
+ uint8_t max_rsp_queues;
struct qla_npiv_entry *npiv_info;
uint16_t nvram_npiv_size;
@@ -2255,6 +2258,9 @@ struct qla_hw_data {
#define FLOGI_MID_SUPPORT BIT_10
#define FLOGI_VSAN_SUPPORT BIT_12
#define FLOGI_SP_SUPPORT BIT_13
+
+ uint8_t port_no; /* Physical port of adapter */
+
/* Timeout timers. */
uint8_t loop_down_abort_time; /* port down timer */
atomic_t loop_down_timer; /* loop down timer */
@@ -2392,6 +2398,14 @@ struct qla_hw_data {
dma_addr_t edc_data_dma;
uint16_t edc_data_len;
+#define XGMAC_DATA_SIZE PAGE_SIZE
+ void *xgmac_data;
+ dma_addr_t xgmac_data_dma;
+
+#define DCBX_TLV_DATA_SIZE PAGE_SIZE
+ void *dcbx_tlv;
+ dma_addr_t dcbx_tlv_dma;
+
struct task_struct *dpc_thread;
uint8_t dpc_active; /* DPC routine is active */
@@ -2510,6 +2524,7 @@ struct qla_hw_data {
uint32_t flt_region_vpd;
uint32_t flt_region_nvram;
uint32_t flt_region_npiv_conf;
+ uint32_t flt_region_gold_fw;
/* Needed for BEACON */
uint16_t beacon_blink_led;
@@ -2536,6 +2551,7 @@ struct qla_hw_data {
struct qla_chip_state_84xx *cs84xx;
struct qla_statistics qla_stats;
struct isp_operations *isp_ops;
+ struct workqueue_struct *wq;
};
/*
@@ -2545,6 +2561,8 @@ typedef struct scsi_qla_host {
struct list_head list;
struct list_head vp_fcports; /* list of fcports */
struct list_head work_list;
+ spinlock_t work_lock;
+
/* Commonly used flags and state information. */
struct Scsi_Host *host;
unsigned long host_no;
@@ -2591,8 +2609,6 @@ typedef struct scsi_qla_host {
#define SWITCH_FOUND BIT_0
#define DFLG_NO_CABLE BIT_1
- srb_t *status_srb; /* Status continuation entry. */
-
/* ISP configuration data. */
uint16_t loop_id; /* Host adapter loop id */
@@ -2618,6 +2634,11 @@ typedef struct scsi_qla_host {
uint8_t node_name[WWN_SIZE];
uint8_t port_name[WWN_SIZE];
uint8_t fabric_node_name[WWN_SIZE];
+
+ uint16_t fcoe_vlan_id;
+ uint16_t fcoe_fcf_idx;
+ uint8_t fcoe_vn_port_mac[6];
+
uint32_t vp_abort_cnt;
struct fc_vport *fc_vport; /* holds fc_vport * for each vport */
@@ -2643,7 +2664,7 @@ typedef struct scsi_qla_host {
#define VP_ERR_FAB_LOGOUT 4
#define VP_ERR_ADAP_NORESOURCES 5
struct qla_hw_data *hw;
- int req_ques[QLA_MAX_HOST_QUES];
+ struct req_que *req;
} scsi_qla_host_t;
/*
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 96ccb9642ba..dfde2dd865c 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -878,7 +878,6 @@ struct device_reg_24xx {
/* HCCR statuses. */
#define HCCRX_HOST_INT BIT_6 /* Host to RISC interrupt bit. */
#define HCCRX_RISC_RESET BIT_5 /* RISC Reset mode bit. */
-#define HCCRX_RISC_PAUSE BIT_4 /* RISC Pause mode bit. */
/* HCCR commands. */
/* NOOP. */
#define HCCRX_NOOP 0x00000000
@@ -1241,6 +1240,7 @@ struct qla_flt_header {
#define FLT_REG_HW_EVENT_1 0x1f
#define FLT_REG_NPIV_CONF_0 0x29
#define FLT_REG_NPIV_CONF_1 0x2a
+#define FLT_REG_GOLD_FW 0x2f
struct qla_flt_region {
uint32_t code;
@@ -1405,6 +1405,8 @@ struct access_chip_rsp_84xx {
#define MBC_IDC_ACK 0x101
#define MBC_RESTART_MPI_FW 0x3d
#define MBC_FLASH_ACCESS_CTRL 0x3e /* Control flash access. */
+#define MBC_GET_XGMAC_STATS 0x7a
+#define MBC_GET_DCBX_PARAMS 0x51
/* Flash access control option field bit definitions */
#define FAC_OPT_FORCE_SEMAPHORE BIT_15
@@ -1711,7 +1713,7 @@ struct ex_init_cb_81xx {
#define FA_VPD0_ADDR_81 0xD0000
#define FA_VPD1_ADDR_81 0xD0400
#define FA_NVRAM0_ADDR_81 0xD0080
-#define FA_NVRAM1_ADDR_81 0xD0480
+#define FA_NVRAM1_ADDR_81 0xD0180
#define FA_FEATURE_ADDR_81 0xD4000
#define FA_FLASH_DESCR_ADDR_81 0xD8000
#define FA_FLASH_LAYOUT_ADDR_81 0xD8400
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 528913f6bed..65b12d82867 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -65,8 +65,11 @@ extern int ql2xfdmienable;
extern int ql2xallocfwdump;
extern int ql2xextended_error_logging;
extern int ql2xqfullrampup;
+extern int ql2xqfulltracking;
extern int ql2xiidmaenable;
extern int ql2xmaxqueues;
+extern int ql2xmultique_tag;
+extern int ql2xfwloadbin;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -145,7 +148,7 @@ qla2x00_dump_ram(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t);
extern int
qla2x00_execute_fw(scsi_qla_host_t *, uint32_t);
-extern void
+extern int
qla2x00_get_fw_version(scsi_qla_host_t *, uint16_t *, uint16_t *, uint16_t *,
uint16_t *, uint32_t *, uint8_t *, uint32_t *, uint8_t *);
@@ -165,13 +168,13 @@ extern int
qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t);
extern int
-qla2x00_abort_command(scsi_qla_host_t *, srb_t *, struct req_que *);
+qla2x00_abort_command(srb_t *);
extern int
-qla2x00_abort_target(struct fc_port *, unsigned int);
+qla2x00_abort_target(struct fc_port *, unsigned int, int);
extern int
-qla2x00_lun_reset(struct fc_port *, unsigned int);
+qla2x00_lun_reset(struct fc_port *, unsigned int, int);
extern int
qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *,
@@ -236,9 +239,11 @@ extern int
qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *,
dma_addr_t);
-extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *, struct req_que *);
-extern int qla24xx_abort_target(struct fc_port *, unsigned int);
-extern int qla24xx_lun_reset(struct fc_port *, unsigned int);
+extern int qla24xx_abort_command(srb_t *);
+extern int
+qla24xx_abort_target(struct fc_port *, unsigned int, int);
+extern int
+qla24xx_lun_reset(struct fc_port *, unsigned int, int);
extern int
qla2x00_system_error(scsi_qla_host_t *);
@@ -288,6 +293,18 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *, int);
extern int
qla81xx_fac_erase_sector(scsi_qla_host_t *, uint32_t, uint32_t);
+extern int
+qla2x00_get_xgmac_stats(scsi_qla_host_t *, dma_addr_t, uint16_t, uint16_t *);
+
+extern int
+qla2x00_get_dcbx_params(scsi_qla_host_t *, dma_addr_t, uint16_t);
+
+extern int
+qla2x00_read_ram_word(scsi_qla_host_t *, uint32_t, uint32_t *);
+
+extern int
+qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t);
+
/*
* Global Function Prototypes in qla_isr.c source file.
*/
@@ -295,8 +312,8 @@ extern irqreturn_t qla2100_intr_handler(int, void *);
extern irqreturn_t qla2300_intr_handler(int, void *);
extern irqreturn_t qla24xx_intr_handler(int, void *);
extern void qla2x00_process_response_queue(struct rsp_que *);
-extern void qla24xx_process_response_queue(struct rsp_que *);
-
+extern void
+qla24xx_process_response_queue(struct scsi_qla_host *, struct rsp_que *);
extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *);
extern void qla2x00_free_irqs(scsi_qla_host_t *);
@@ -401,19 +418,21 @@ extern int qla25xx_request_irq(struct rsp_que *);
extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *);
extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *);
extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t,
- uint16_t, uint8_t, uint8_t);
+ uint16_t, int, uint8_t);
extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t,
- uint16_t);
+ uint16_t, int);
extern int qla25xx_update_req_que(struct scsi_qla_host *, uint8_t, uint8_t);
extern void qla2x00_init_response_q_entries(struct rsp_que *);
extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *);
extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *);
extern int qla25xx_create_queues(struct scsi_qla_host *, uint8_t);
-extern int qla25xx_delete_queues(struct scsi_qla_host *, uint8_t);
+extern int qla25xx_delete_queues(struct scsi_qla_host *);
extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t);
extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t);
extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
+extern struct scsi_qla_host * qla25xx_get_host(struct rsp_que *);
+
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 557f58d5bf8..917534b9f22 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1107,7 +1107,7 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
return ret;
ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
- mb, BIT_1);
+ mb, BIT_1|BIT_0);
if (mb[0] != MBS_COMMAND_COMPLETE) {
DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: "
"loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n",
@@ -1879,6 +1879,9 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
case BIT_13:
list[i].fp_speed = PORT_SPEED_4GB;
break;
+ case BIT_12:
+ list[i].fp_speed = PORT_SPEED_10GB;
+ break;
case BIT_11:
list[i].fp_speed = PORT_SPEED_8GB;
break;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index bd7dd84c064..26202612932 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -634,7 +634,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
goto chip_diag_failed;
DEBUG3(printk("scsi(%ld): Reset register cleared by chip reset\n",
- ha->host_no));
+ vha->host_no));
/* Reset RISC processor. */
WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
@@ -655,7 +655,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
goto chip_diag_failed;
/* Check product ID of chip */
- DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", ha->host_no));
+ DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", vha->host_no));
mb[1] = RD_MAILBOX_REG(ha, reg, 1);
mb[2] = RD_MAILBOX_REG(ha, reg, 2);
@@ -730,9 +730,6 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
- /* Perform RISC reset. */
- qla24xx_reset_risc(vha);
-
ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
rval = qla2x00_mbx_reg_test(vha);
@@ -786,7 +783,6 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
sizeof(uint32_t);
if (ha->mqenable)
mq_size = sizeof(struct qla2xxx_mq_chain);
-
/* Allocate memory for Fibre Channel Event Buffer. */
if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
goto try_eft;
@@ -850,8 +846,7 @@ cont_alloc:
rsp_q_size = rsp->length * sizeof(response_t);
dump_size = offsetof(struct qla2xxx_fw_dump, isp);
- dump_size += fixed_size + mem_size + req_q_size + rsp_q_size +
- eft_size;
+ dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
ha->chain_offset = dump_size;
dump_size += mq_size + fce_size;
@@ -891,6 +886,56 @@ cont_alloc:
htonl(offsetof(struct qla2xxx_fw_dump, isp));
}
+static int
+qla81xx_mpi_sync(scsi_qla_host_t *vha)
+{
+#define MPS_MASK 0xe0
+ int rval;
+ uint16_t dc;
+ uint32_t dw;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_QLA81XX(vha->hw))
+ return QLA_SUCCESS;
+
+ rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
+ if (rval != QLA_SUCCESS) {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "Sync-MPI: Unable to acquire semaphore.\n"));
+ goto done;
+ }
+
+ pci_read_config_word(vha->hw->pdev, 0x54, &dc);
+ rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
+ if (rval != QLA_SUCCESS) {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "Sync-MPI: Unable to read sync.\n"));
+ goto done_release;
+ }
+
+ dc &= MPS_MASK;
+ if (dc == (dw & MPS_MASK))
+ goto done_release;
+
+ dw &= ~MPS_MASK;
+ dw |= dc;
+ rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
+ if (rval != QLA_SUCCESS) {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "Sync-MPI: Unable to gain sync.\n"));
+ }
+
+done_release:
+ rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
+ if (rval != QLA_SUCCESS) {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "Sync-MPI: Unable to release semaphore.\n"));
+ }
+
+done:
+ return rval;
+}
+
/**
* qla2x00_setup_chip() - Load and start RISC firmware.
* @ha: HA context
@@ -915,6 +960,8 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
+ qla81xx_mpi_sync(vha);
+
/* Load firmware sequences */
rval = ha->isp_ops->load_risc(vha, &srisc_address);
if (rval == QLA_SUCCESS) {
@@ -931,13 +978,16 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
/* Retrieve firmware information. */
if (rval == QLA_SUCCESS) {
fw_major_version = ha->fw_major_version;
- qla2x00_get_fw_version(vha,
+ rval = qla2x00_get_fw_version(vha,
&ha->fw_major_version,
&ha->fw_minor_version,
&ha->fw_subminor_version,
&ha->fw_attributes, &ha->fw_memory_size,
ha->mpi_version, &ha->mpi_capabilities,
ha->phy_version);
+ if (rval != QLA_SUCCESS)
+ goto failed;
+
ha->flags.npiv_supported = 0;
if (IS_QLA2XXX_MIDTYPE(ha) &&
(ha->fw_attributes & BIT_2)) {
@@ -989,7 +1039,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
ha->fw_subminor_version);
}
}
-
+failed:
if (rval) {
DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n",
vha->host_no));
@@ -1013,12 +1063,14 @@ qla2x00_init_response_q_entries(struct rsp_que *rsp)
uint16_t cnt;
response_t *pkt;
+ rsp->ring_ptr = rsp->ring;
+ rsp->ring_index = 0;
+ rsp->status_srb = NULL;
pkt = rsp->ring_ptr;
for (cnt = 0; cnt < rsp->length; cnt++) {
pkt->signature = RESPONSE_PROCESSED;
pkt++;
}
-
}
/**
@@ -1176,7 +1228,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
if (ha->flags.msix_enabled) {
msix = &ha->msix_entries[1];
DEBUG2_17(printk(KERN_INFO
- "Reistering vector 0x%x for base que\n", msix->entry));
+ "Registering vector 0x%x for base que\n", msix->entry));
icb->msix = cpu_to_le16(msix->entry);
}
/* Use alternate PCI bus number */
@@ -1230,14 +1282,14 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Clear outstanding commands array. */
- for (que = 0; que < ha->max_queues; que++) {
+ for (que = 0; que < ha->max_req_queues; que++) {
req = ha->req_q_map[que];
if (!req)
continue;
- for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
+ for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
req->outstanding_cmds[cnt] = NULL;
- req->current_outstanding_cmd = 0;
+ req->current_outstanding_cmd = 1;
/* Initialize firmware. */
req->ring_ptr = req->ring;
@@ -1245,13 +1297,10 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
req->cnt = req->length;
}
- for (que = 0; que < ha->max_queues; que++) {
+ for (que = 0; que < ha->max_rsp_queues; que++) {
rsp = ha->rsp_q_map[que];
if (!rsp)
continue;
- rsp->ring_ptr = rsp->ring;
- rsp->ring_index = 0;
-
/* Initialize response queue entries */
qla2x00_init_response_q_entries(rsp);
}
@@ -1307,7 +1356,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
unsigned long wtime, mtime, cs84xx_time;
uint16_t min_wait; /* Minimum wait time if loop is down */
uint16_t wait_time; /* Wait time if loop is coming ready */
- uint16_t state[3];
+ uint16_t state[5];
struct qla_hw_data *ha = vha->hw;
rval = QLA_SUCCESS;
@@ -1406,8 +1455,9 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
vha->host_no, state[0], jiffies));
} while (1);
- DEBUG(printk("scsi(%ld): fw_state=%x curr time=%lx.\n",
- vha->host_no, state[0], jiffies));
+ DEBUG(printk("scsi(%ld): fw_state=%x (%x, %x, %x, %x) curr time=%lx.\n",
+ vha->host_no, state[0], state[1], state[2], state[3], state[4],
+ jiffies));
if (rval) {
DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n",
@@ -1541,6 +1591,7 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
char *st, *en;
uint16_t index;
struct qla_hw_data *ha = vha->hw;
+ int use_tbl = !IS_QLA25XX(ha) && !IS_QLA81XX(ha);
if (memcmp(model, BINZERO, len) != 0) {
strncpy(ha->model_number, model, len);
@@ -1553,14 +1604,16 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
}
index = (ha->pdev->subsystem_device & 0xff);
- if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
+ if (use_tbl &&
+ ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
index < QLA_MODEL_NAMES)
strncpy(ha->model_desc,
qla2x00_model_name[index * 2 + 1],
sizeof(ha->model_desc) - 1);
} else {
index = (ha->pdev->subsystem_device & 0xff);
- if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
+ if (use_tbl &&
+ ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
index < QLA_MODEL_NAMES) {
strcpy(ha->model_number,
qla2x00_model_name[index * 2]);
@@ -2061,8 +2114,10 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
- if (test_bit(RSCN_UPDATE, &save_flags))
+ if (test_bit(RSCN_UPDATE, &save_flags)) {
set_bit(RSCN_UPDATE, &vha->dpc_flags);
+ vha->flags.rscn_queue_overflow = 1;
+ }
}
return (rval);
@@ -2110,7 +2165,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
goto cleanup_allocation;
DEBUG3(printk("scsi(%ld): Entries in ID list (%d)\n",
- ha->host_no, entries));
+ vha->host_no, entries));
DEBUG3(qla2x00_dump_buffer((uint8_t *)ha->gid_list,
entries * sizeof(struct gid_list_info)));
@@ -2243,7 +2298,8 @@ static void
qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
{
#define LS_UNKNOWN 2
- static char *link_speeds[5] = { "1", "2", "?", "4", "8" };
+ static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
+ char *link_speed;
int rval;
uint16_t mb[6];
struct qla_hw_data *ha = vha->hw;
@@ -2266,10 +2322,15 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
fcport->port_name[6], fcport->port_name[7], rval,
fcport->fp_speed, mb[0], mb[1]));
} else {
+ link_speed = link_speeds[LS_UNKNOWN];
+ if (fcport->fp_speed < 5)
+ link_speed = link_speeds[fcport->fp_speed];
+ else if (fcport->fp_speed == 0x13)
+ link_speed = link_speeds[5];
DEBUG2(qla_printk(KERN_INFO, ha,
"iIDMA adjusted to %s GB/s on "
"%02x%02x%02x%02x%02x%02x%02x%02x.\n",
- link_speeds[fcport->fp_speed], fcport->port_name[0],
+ link_speed, fcport->port_name[0],
fcport->port_name[1], fcport->port_name[2],
fcport->port_name[3], fcport->port_name[4],
fcport->port_name[5], fcport->port_name[6],
@@ -3180,9 +3241,14 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
{
int rval = QLA_SUCCESS;
uint32_t wait_time;
- struct qla_hw_data *ha = vha->hw;
- struct req_que *req = ha->req_q_map[vha->req_ques[0]];
- struct rsp_que *rsp = req->rsp;
+ struct req_que *req;
+ struct rsp_que *rsp;
+
+ if (ql2xmultique_tag)
+ req = vha->hw->req_q_map[0];
+ else
+ req = vha->req;
+ rsp = req->rsp;
atomic_set(&vha->loop_state, LOOP_UPDATE);
clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
@@ -3448,7 +3514,7 @@ qla25xx_init_queues(struct qla_hw_data *ha)
int ret = -1;
int i;
- for (i = 1; i < ha->max_queues; i++) {
+ for (i = 1; i < ha->max_rsp_queues; i++) {
rsp = ha->rsp_q_map[i];
if (rsp) {
rsp->options &= ~BIT_0;
@@ -3462,6 +3528,8 @@ qla25xx_init_queues(struct qla_hw_data *ha)
"%s Rsp que:%d inited\n", __func__,
rsp->id));
}
+ }
+ for (i = 1; i < ha->max_req_queues; i++) {
req = ha->req_q_map[i];
if (req) {
/* Clear outstanding commands array. */
@@ -3566,14 +3634,15 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
nv = ha->nvram;
/* Determine NVRAM starting address. */
- ha->nvram_size = sizeof(struct nvram_24xx);
- ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
- ha->vpd_size = FA_NVRAM_VPD_SIZE;
- ha->vpd_base = FA_NVRAM_VPD0_ADDR;
- if (PCI_FUNC(ha->pdev->devfn)) {
+ if (ha->flags.port0) {
+ ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
+ ha->vpd_base = FA_NVRAM_VPD0_ADDR;
+ } else {
ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
ha->vpd_base = FA_NVRAM_VPD1_ADDR;
}
+ ha->nvram_size = sizeof(struct nvram_24xx);
+ ha->vpd_size = FA_NVRAM_VPD_SIZE;
/* Get VPD data into cache */
ha->vpd = ha->nvram + VPD_OFFSET;
@@ -3587,7 +3656,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
chksum += le32_to_cpu(*dptr++);
- DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", ha->host_no));
+ DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
/* Bad NVRAM data, set defaults parameters. */
@@ -3612,7 +3681,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
nv->exchange_count = __constant_cpu_to_le16(0);
nv->hard_address = __constant_cpu_to_le16(124);
nv->port_name[0] = 0x21;
- nv->port_name[1] = 0x00 + PCI_FUNC(ha->pdev->devfn);
+ nv->port_name[1] = 0x00 + ha->port_no;
nv->port_name[2] = 0x00;
nv->port_name[3] = 0xe0;
nv->port_name[4] = 0x8b;
@@ -3798,11 +3867,11 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
}
static int
-qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr)
+qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
+ uint32_t faddr)
{
int rval = QLA_SUCCESS;
int segments, fragment;
- uint32_t faddr;
uint32_t *dcode, dlen;
uint32_t risc_addr;
uint32_t risc_size;
@@ -3811,12 +3880,11 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr)
struct req_que *req = ha->req_q_map[0];
qla_printk(KERN_INFO, ha,
- "FW: Loading from flash (%x)...\n", ha->flt_region_fw);
+ "FW: Loading from flash (%x)...\n", faddr);
rval = QLA_SUCCESS;
segments = FA_RISC_CODE_SEGMENTS;
- faddr = ha->flt_region_fw;
dcode = (uint32_t *)req->ring;
*srisc_addr = 0;
@@ -4104,6 +4172,9 @@ qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
{
int rval;
+ if (ql2xfwloadbin == 1)
+ return qla81xx_load_risc(vha, srisc_addr);
+
/*
* FW Load priority:
* 1) Firmware via request-firmware interface (.bin file).
@@ -4113,24 +4184,45 @@ qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
if (rval == QLA_SUCCESS)
return rval;
- return qla24xx_load_risc_flash(vha, srisc_addr);
+ return qla24xx_load_risc_flash(vha, srisc_addr,
+ vha->hw->flt_region_fw);
}
int
qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
{
int rval;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (ql2xfwloadbin == 2)
+ goto try_blob_fw;
/*
* FW Load priority:
* 1) Firmware residing in flash.
* 2) Firmware via request-firmware interface (.bin file).
+ * 3) Golden-Firmware residing in flash -- limited operation.
*/
- rval = qla24xx_load_risc_flash(vha, srisc_addr);
+ rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
if (rval == QLA_SUCCESS)
return rval;
- return qla24xx_load_risc_blob(vha, srisc_addr);
+try_blob_fw:
+ rval = qla24xx_load_risc_blob(vha, srisc_addr);
+ if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
+ return rval;
+
+ qla_printk(KERN_ERR, ha,
+ "FW: Attempting to fallback to golden firmware...\n");
+ rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
+ if (rval != QLA_SUCCESS)
+ return rval;
+
+ qla_printk(KERN_ERR, ha,
+ "FW: Please update operational firmware...\n");
+ ha->flags.running_gold_fw = 1;
+
+ return rval;
}
void
@@ -4146,7 +4238,7 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
ret = qla2x00_stop_firmware(vha);
for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
- retries ; retries--) {
+ ret != QLA_INVALID_COMMAND && retries ; retries--) {
ha->isp_ops->reset_chip(vha);
if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
continue;
@@ -4165,13 +4257,19 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha)
uint16_t mb[MAILBOX_REGISTER_COUNT];
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- struct req_que *req = ha->req_q_map[vha->req_ques[0]];
- struct rsp_que *rsp = req->rsp;
+ struct req_que *req;
+ struct rsp_que *rsp;
if (!vha->vp_idx)
return -EINVAL;
rval = qla2x00_fw_ready(base_vha);
+ if (ql2xmultique_tag)
+ req = ha->req_q_map[0];
+ else
+ req = vha->req;
+ rsp = req->rsp;
+
if (rval == QLA_SUCCESS) {
clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
@@ -4305,7 +4403,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
chksum += le32_to_cpu(*dptr++);
- DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", ha->host_no));
+ DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
/* Bad NVRAM data, set defaults parameters. */
@@ -4329,7 +4427,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
nv->exchange_count = __constant_cpu_to_le16(0);
nv->port_name[0] = 0x21;
- nv->port_name[1] = 0x00 + PCI_FUNC(ha->pdev->devfn);
+ nv->port_name[1] = 0x00 + ha->port_no;
nv->port_name[2] = 0x00;
nv->port_name[3] = 0xe0;
nv->port_name[4] = 0x8b;
@@ -4358,12 +4456,12 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
nv->max_luns_per_target = __constant_cpu_to_le16(128);
nv->port_down_retry_count = __constant_cpu_to_le16(30);
nv->link_down_timeout = __constant_cpu_to_le16(30);
- nv->enode_mac[0] = 0x01;
+ nv->enode_mac[0] = 0x00;
nv->enode_mac[1] = 0x02;
nv->enode_mac[2] = 0x03;
nv->enode_mac[3] = 0x04;
nv->enode_mac[4] = 0x05;
- nv->enode_mac[5] = 0x06 + PCI_FUNC(ha->pdev->devfn);
+ nv->enode_mac[5] = 0x06 + ha->port_no;
rval = 1;
}
@@ -4396,7 +4494,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
icb->enode_mac[2] = 0x03;
icb->enode_mac[3] = 0x04;
icb->enode_mac[4] = 0x05;
- icb->enode_mac[5] = 0x06 + PCI_FUNC(ha->pdev->devfn);
+ icb->enode_mac[5] = 0x06 + ha->port_no;
}
/* Use extended-initialization control block. */
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index a8abbb95730..13396beae2c 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -15,6 +15,7 @@ static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *,
struct rsp_que *rsp);
static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
+static void qla25xx_set_que(srb_t *, struct rsp_que **);
/**
* qla2x00_get_cmd_direction() - Determine control_flag data direction.
* @cmd: SCSI command
@@ -92,9 +93,10 @@ qla2x00_calc_iocbs_64(uint16_t dsds)
* Returns a pointer to the Continuation Type 0 IOCB packet.
*/
static inline cont_entry_t *
-qla2x00_prep_cont_type0_iocb(struct req_que *req, struct scsi_qla_host *vha)
+qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
{
cont_entry_t *cont_pkt;
+ struct req_que *req = vha->req;
/* Adjust ring index. */
req->ring_index++;
if (req->ring_index == req->length) {
@@ -120,10 +122,11 @@ qla2x00_prep_cont_type0_iocb(struct req_que *req, struct scsi_qla_host *vha)
* Returns a pointer to the continuation type 1 IOCB packet.
*/
static inline cont_a64_entry_t *
-qla2x00_prep_cont_type1_iocb(struct req_que *req, scsi_qla_host_t *vha)
+qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
{
cont_a64_entry_t *cont_pkt;
+ struct req_que *req = vha->req;
/* Adjust ring index. */
req->ring_index++;
if (req->ring_index == req->length) {
@@ -159,7 +162,6 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
struct scsi_cmnd *cmd;
struct scatterlist *sg;
int i;
- struct req_que *req;
cmd = sp->cmd;
@@ -174,8 +176,6 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
}
vha = sp->fcport->vha;
- req = sp->que;
-
cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
/* Three DSDs are available in the Command Type 2 IOCB */
@@ -192,7 +192,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
* Seven DSDs are available in the Continuation
* Type 0 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type0_iocb(req, vha);
+ cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
avail_dsds = 7;
}
@@ -220,7 +220,6 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
struct scsi_cmnd *cmd;
struct scatterlist *sg;
int i;
- struct req_que *req;
cmd = sp->cmd;
@@ -235,8 +234,6 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
}
vha = sp->fcport->vha;
- req = sp->que;
-
cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
/* Two DSDs are available in the Command Type 3 IOCB */
@@ -254,7 +251,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
* Five DSDs are available in the Continuation
* Type 1 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha);
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
avail_dsds = 5;
}
@@ -353,7 +350,6 @@ qla2x00_start_scsi(srb_t *sp)
/* Build command packet */
req->current_outstanding_cmd = handle;
req->outstanding_cmds[handle] = sp;
- sp->que = req;
sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
req->cnt -= req_cnt;
@@ -453,6 +449,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
mrk24->lun[2] = MSB(lun);
host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
mrk24->vp_index = vha->vp_idx;
+ mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
} else {
SET_TARGET_ID(ha, mrk->target, loop_id);
mrk->lun = cpu_to_le16(lun);
@@ -531,9 +528,6 @@ qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
*dword_ptr++ = 0;
- /* Set system defined field. */
- pkt->sys_define = (uint8_t)req->ring_index;
-
/* Set entry count. */
pkt->entry_count = 1;
@@ -656,7 +650,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
}
vha = sp->fcport->vha;
- req = sp->que;
+ req = vha->req;
/* Set transfer direction */
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
@@ -687,7 +681,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
* Five DSDs are available in the Continuation
* Type 1 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha);
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
avail_dsds = 5;
}
@@ -724,19 +718,13 @@ qla24xx_start_scsi(srb_t *sp)
struct scsi_cmnd *cmd = sp->cmd;
struct scsi_qla_host *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
- uint16_t que_id;
/* Setup device pointers. */
ret = 0;
- que_id = vha->req_ques[0];
- req = ha->req_q_map[que_id];
- sp->que = req;
+ qla25xx_set_que(sp, &rsp);
+ req = vha->req;
- if (req->rsp)
- rsp = req->rsp;
- else
- rsp = ha->rsp_q_map[que_id];
/* So we know we haven't pci_map'ed anything yet */
tot_dsds = 0;
@@ -794,7 +782,7 @@ qla24xx_start_scsi(srb_t *sp)
req->cnt -= req_cnt;
cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
- cmd_pkt->handle = handle;
+ cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
/* Zero out remaining portion of packet. */
/* tagged queuing modifier -- default is TSK_SIMPLE (0). */
@@ -823,6 +811,8 @@ qla24xx_start_scsi(srb_t *sp)
/* Set total data segment count. */
cmd_pkt->entry_count = (uint8_t)req_cnt;
+ /* Specify response queue number where completion should happen */
+ cmd_pkt->entry_status = (uint8_t) rsp->id;
wmb();
/* Adjust ring index. */
@@ -842,7 +832,7 @@ qla24xx_start_scsi(srb_t *sp)
/* Manage unprocessed RIO/ZIO commands in response queue. */
if (vha->flags.process_response_queue &&
rsp->ring_ptr->signature != RESPONSE_PROCESSED)
- qla24xx_process_response_queue(rsp);
+ qla24xx_process_response_queue(vha, rsp);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
@@ -855,3 +845,16 @@ queuing_error:
return QLA_FUNCTION_FAILED;
}
+
+static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
+{
+ struct scsi_cmnd *cmd = sp->cmd;
+ struct qla_hw_data *ha = sp->fcport->vha->hw;
+ int affinity = cmd->request->cpu;
+
+ if (ql2xmultique_tag && affinity >= 0 &&
+ affinity < ha->max_rsp_queues - 1)
+ *rsp = ha->rsp_q_map[affinity + 1];
+ else
+ *rsp = ha->rsp_q_map[0];
+}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index d04981848e5..c8d0a176fea 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -13,10 +13,9 @@ static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
static void qla2x00_process_completed_request(struct scsi_qla_host *,
struct req_que *, uint32_t);
static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
-static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *);
+static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
sts_entry_t *);
-static struct scsi_qla_host *qla2x00_get_rsp_host(struct rsp_que *);
/**
* qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
@@ -51,7 +50,7 @@ qla2100_intr_handler(int irq, void *dev_id)
status = 0;
spin_lock(&ha->hardware_lock);
- vha = qla2x00_get_rsp_host(rsp);
+ vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) {
hccr = RD_REG_WORD(&reg->hccr);
if (hccr & HCCR_RISC_PAUSE) {
@@ -147,7 +146,7 @@ qla2300_intr_handler(int irq, void *dev_id)
status = 0;
spin_lock(&ha->hardware_lock);
- vha = qla2x00_get_rsp_host(rsp);
+ vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) {
stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
if (stat & HSR_RISC_PAUSED) {
@@ -685,7 +684,7 @@ skip_rio:
vha->host_no));
if (IS_FWI2_CAPABLE(ha))
- qla24xx_process_response_queue(rsp);
+ qla24xx_process_response_queue(vha, rsp);
else
qla2x00_process_response_queue(rsp);
break;
@@ -766,7 +765,10 @@ qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = NULL;
- req = ha->req_q_map[vha->req_ques[0]];
+ if (!ql2xqfulltracking)
+ return;
+
+ req = vha->req;
if (!req)
return;
if (req->max_q_depth <= sdev->queue_depth)
@@ -808,6 +810,9 @@ qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req,
fc_port_t *fcport;
struct scsi_device *sdev;
+ if (!ql2xqfulltracking)
+ return;
+
sdev = sp->cmd->device;
if (sdev->queue_depth >= req->max_q_depth)
return;
@@ -858,8 +863,8 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
qla2x00_ramp_up_queue_depth(vha, req, sp);
qla2x00_sp_compl(ha, sp);
} else {
- DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n",
- vha->host_no));
+ DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion"
+ " handle(%d)\n", vha->host_no, req->id, index));
qla_printk(KERN_WARNING, ha,
"Invalid ISP SCSI completion handle\n");
@@ -881,7 +886,7 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
uint16_t handle_cnt;
uint16_t cnt;
- vha = qla2x00_get_rsp_host(rsp);
+ vha = pci_get_drvdata(ha->pdev);
if (!vha->flags.online)
return;
@@ -926,7 +931,7 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
}
break;
case STATUS_CONT_TYPE:
- qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt);
+ qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
break;
default:
/* Type Not Supported. */
@@ -945,7 +950,8 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
}
static inline void
-qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
+qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len,
+ struct rsp_que *rsp)
{
struct scsi_cmnd *cp = sp->cmd;
@@ -962,7 +968,7 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
sp->request_sense_ptr += sense_len;
sp->request_sense_length -= sense_len;
if (sp->request_sense_length != 0)
- sp->fcport->vha->status_srb = sp;
+ rsp->status_srb = sp;
DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
"cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no,
@@ -992,7 +998,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len;
uint8_t *rsp_info, *sense_data;
struct qla_hw_data *ha = vha->hw;
- struct req_que *req = rsp->req;
+ uint32_t handle;
+ uint16_t que;
+ struct req_que *req;
sts = (sts_entry_t *) pkt;
sts24 = (struct sts_entry_24xx *) pkt;
@@ -1003,18 +1011,20 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
comp_status = le16_to_cpu(sts->comp_status);
scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
}
-
+ handle = (uint32_t) LSW(sts->handle);
+ que = MSW(sts->handle);
+ req = ha->req_q_map[que];
/* Fast path completion. */
if (comp_status == CS_COMPLETE && scsi_status == 0) {
- qla2x00_process_completed_request(vha, req, sts->handle);
+ qla2x00_process_completed_request(vha, req, handle);
return;
}
/* Validate handle. */
- if (sts->handle < MAX_OUTSTANDING_COMMANDS) {
- sp = req->outstanding_cmds[sts->handle];
- req->outstanding_cmds[sts->handle] = NULL;
+ if (handle < MAX_OUTSTANDING_COMMANDS) {
+ sp = req->outstanding_cmds[handle];
+ req->outstanding_cmds[handle] = NULL;
} else
sp = NULL;
@@ -1030,7 +1040,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
cp = sp->cmd;
if (cp == NULL) {
DEBUG2(printk("scsi(%ld): Command already returned back to OS "
- "pkt->handle=%d sp=%p.\n", vha->host_no, sts->handle, sp));
+ "pkt->handle=%d sp=%p.\n", vha->host_no, handle, sp));
qla_printk(KERN_WARNING, ha,
"Command is NULL: already returned to OS (sp=%p)\n", sp);
@@ -1121,6 +1131,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
scsi_status));
/* Adjust queue depth for all luns on the port. */
+ if (!ql2xqfulltracking)
+ break;
fcport->last_queue_full = jiffies;
starget_for_each_device(cp->device->sdev_target,
fcport, qla2x00_adjust_sdev_qdepth_down);
@@ -1133,7 +1145,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
if (!(scsi_status & SS_SENSE_LEN_VALID))
break;
- qla2x00_handle_sense(sp, sense_data, sense_len);
+ qla2x00_handle_sense(sp, sense_data, sense_len, rsp);
break;
case CS_DATA_UNDERRUN:
@@ -1179,6 +1191,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
* Adjust queue depth for all luns on the
* port.
*/
+ if (!ql2xqfulltracking)
+ break;
fcport->last_queue_full = jiffies;
starget_for_each_device(
cp->device->sdev_target, fcport,
@@ -1192,12 +1206,12 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
if (!(scsi_status & SS_SENSE_LEN_VALID))
break;
- qla2x00_handle_sense(sp, sense_data, sense_len);
+ qla2x00_handle_sense(sp, sense_data, sense_len, rsp);
} else {
/*
* If RISC reports underrun and target does not report
* it then we must have a lost frame, so tell upper
- * layer to retry it by reporting a bus busy.
+ * layer to retry it by reporting an error.
*/
if (!(scsi_status & SS_RESIDUAL_UNDER)) {
DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
@@ -1207,7 +1221,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
cp->device->id, cp->device->lun, resid,
scsi_bufflen(cp)));
- cp->result = DID_BUS_BUSY << 16;
+ cp->result = DID_ERROR << 16;
break;
}
@@ -1334,7 +1348,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
}
/* Place command on done queue. */
- if (vha->status_srb == NULL)
+ if (rsp->status_srb == NULL)
qla2x00_sp_compl(ha, sp);
}
@@ -1346,11 +1360,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
* Extended sense data.
*/
static void
-qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt)
+qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
{
uint8_t sense_sz = 0;
- struct qla_hw_data *ha = vha->hw;
- srb_t *sp = vha->status_srb;
+ struct qla_hw_data *ha = rsp->hw;
+ srb_t *sp = rsp->status_srb;
struct scsi_cmnd *cp;
if (sp != NULL && sp->request_sense_length != 0) {
@@ -1362,7 +1376,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt)
"cmd is NULL: already returned to OS (sp=%p)\n",
sp);
- vha->status_srb = NULL;
+ rsp->status_srb = NULL;
return;
}
@@ -1383,7 +1397,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt)
/* Place command on done queue. */
if (sp->request_sense_length == 0) {
- vha->status_srb = NULL;
+ rsp->status_srb = NULL;
qla2x00_sp_compl(ha, sp);
}
}
@@ -1399,7 +1413,9 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
{
srb_t *sp;
struct qla_hw_data *ha = vha->hw;
- struct req_que *req = rsp->req;
+ uint32_t handle = LSW(pkt->handle);
+ uint16_t que = MSW(pkt->handle);
+ struct req_que *req = ha->req_q_map[que];
#if defined(QL_DEBUG_LEVEL_2)
if (pkt->entry_status & RF_INV_E_ORDER)
qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
@@ -1417,14 +1433,14 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
#endif
/* Validate handle. */
- if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
- sp = req->outstanding_cmds[pkt->handle];
+ if (handle < MAX_OUTSTANDING_COMMANDS)
+ sp = req->outstanding_cmds[handle];
else
sp = NULL;
if (sp) {
/* Free outstanding command slot. */
- req->outstanding_cmds[pkt->handle] = NULL;
+ req->outstanding_cmds[handle] = NULL;
/* Bad payload or header */
if (pkt->entry_status &
@@ -1486,13 +1502,10 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
* qla24xx_process_response_queue() - Process response queue entries.
* @ha: SCSI driver HA context
*/
-void
-qla24xx_process_response_queue(struct rsp_que *rsp)
+void qla24xx_process_response_queue(struct scsi_qla_host *vha,
+ struct rsp_que *rsp)
{
struct sts_entry_24xx *pkt;
- struct scsi_qla_host *vha;
-
- vha = qla2x00_get_rsp_host(rsp);
if (!vha->flags.online)
return;
@@ -1523,7 +1536,7 @@ qla24xx_process_response_queue(struct rsp_que *rsp)
qla2x00_status_entry(vha, rsp, pkt);
break;
case STATUS_CONT_TYPE:
- qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt);
+ qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
break;
case VP_RPT_ID_IOCB_TYPE:
qla24xx_report_id_acquisition(vha,
@@ -1626,7 +1639,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
status = 0;
spin_lock(&ha->hardware_lock);
- vha = qla2x00_get_rsp_host(rsp);
+ vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) {
stat = RD_REG_DWORD(&reg->host_status);
if (stat & HSRX_RISC_PAUSED) {
@@ -1664,7 +1677,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
break;
case 0x13:
case 0x14:
- qla24xx_process_response_queue(rsp);
+ qla24xx_process_response_queue(vha, rsp);
break;
default:
DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
@@ -1692,6 +1705,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
struct qla_hw_data *ha;
struct rsp_que *rsp;
struct device_reg_24xx __iomem *reg;
+ struct scsi_qla_host *vha;
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
@@ -1704,7 +1718,8 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
spin_lock_irq(&ha->hardware_lock);
- qla24xx_process_response_queue(rsp);
+ vha = qla25xx_get_host(rsp);
+ qla24xx_process_response_queue(vha, rsp);
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
spin_unlock_irq(&ha->hardware_lock);
@@ -1717,7 +1732,6 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
{
struct qla_hw_data *ha;
struct rsp_que *rsp;
- struct device_reg_24xx __iomem *reg;
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
@@ -1726,13 +1740,8 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
return IRQ_NONE;
}
ha = rsp->hw;
- reg = &ha->iobase->isp24;
- spin_lock_irq(&ha->hardware_lock);
-
- qla24xx_process_response_queue(rsp);
-
- spin_unlock_irq(&ha->hardware_lock);
+ queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
return IRQ_HANDLED;
}
@@ -1760,7 +1769,7 @@ qla24xx_msix_default(int irq, void *dev_id)
status = 0;
spin_lock_irq(&ha->hardware_lock);
- vha = qla2x00_get_rsp_host(rsp);
+ vha = pci_get_drvdata(ha->pdev);
do {
stat = RD_REG_DWORD(&reg->host_status);
if (stat & HSRX_RISC_PAUSED) {
@@ -1798,7 +1807,7 @@ qla24xx_msix_default(int irq, void *dev_id)
break;
case 0x13:
case 0x14:
- qla24xx_process_response_queue(rsp);
+ qla24xx_process_response_queue(vha, rsp);
break;
default:
DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
@@ -1822,31 +1831,14 @@ qla24xx_msix_default(int irq, void *dev_id)
/* Interrupt handling helpers. */
struct qla_init_msix_entry {
- uint16_t entry;
- uint16_t index;
const char *name;
irq_handler_t handler;
};
-static struct qla_init_msix_entry base_queue = {
- .entry = 0,
- .index = 0,
- .name = "qla2xxx (default)",
- .handler = qla24xx_msix_default,
-};
-
-static struct qla_init_msix_entry base_rsp_queue = {
- .entry = 1,
- .index = 1,
- .name = "qla2xxx (rsp_q)",
- .handler = qla24xx_msix_rsp_q,
-};
-
-static struct qla_init_msix_entry multi_rsp_queue = {
- .entry = 1,
- .index = 1,
- .name = "qla2xxx (multi_q)",
- .handler = qla25xx_msix_rsp_q,
+static struct qla_init_msix_entry msix_entries[3] = {
+ { "qla2xxx (default)", qla24xx_msix_default },
+ { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
+ { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
};
static void
@@ -1873,7 +1865,6 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
int i, ret;
struct msix_entry *entries;
struct qla_msix_entry *qentry;
- struct qla_init_msix_entry *msix_queue;
entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
GFP_KERNEL);
@@ -1900,7 +1891,7 @@ msix_failed:
ha->msix_count, ret);
goto msix_out;
}
- ha->max_queues = ha->msix_count - 1;
+ ha->max_rsp_queues = ha->msix_count - 1;
}
ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
ha->msix_count, GFP_KERNEL);
@@ -1918,45 +1909,27 @@ msix_failed:
qentry->rsp = NULL;
}
- /* Enable MSI-X for AENs for queue 0 */
- qentry = &ha->msix_entries[0];
- ret = request_irq(qentry->vector, base_queue.handler, 0,
- base_queue.name, rsp);
- if (ret) {
- qla_printk(KERN_WARNING, ha,
+ /* Enable MSI-X vectors for the base queue */
+ for (i = 0; i < 2; i++) {
+ qentry = &ha->msix_entries[i];
+ ret = request_irq(qentry->vector, msix_entries[i].handler,
+ 0, msix_entries[i].name, rsp);
+ if (ret) {
+ qla_printk(KERN_WARNING, ha,
"MSI-X: Unable to register handler -- %x/%d.\n",
qentry->vector, ret);
- qla24xx_disable_msix(ha);
- goto msix_out;
+ qla24xx_disable_msix(ha);
+ ha->mqenable = 0;
+ goto msix_out;
+ }
+ qentry->have_irq = 1;
+ qentry->rsp = rsp;
+ rsp->msix = qentry;
}
- qentry->have_irq = 1;
- qentry->rsp = rsp;
/* Enable MSI-X vector for response queue update for queue 0 */
- if (ha->max_queues > 1 && ha->mqiobase) {
+ if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
ha->mqenable = 1;
- msix_queue = &multi_rsp_queue;
- qla_printk(KERN_INFO, ha,
- "MQ enabled, Number of Queue Resources: %d \n",
- ha->max_queues);
- } else {
- ha->mqenable = 0;
- msix_queue = &base_rsp_queue;
- }
-
- qentry = &ha->msix_entries[1];
- ret = request_irq(qentry->vector, msix_queue->handler, 0,
- msix_queue->name, rsp);
- if (ret) {
- qla_printk(KERN_WARNING, ha,
- "MSI-X: Unable to register handler -- %x/%d.\n",
- qentry->vector, ret);
- qla24xx_disable_msix(ha);
- ha->mqenable = 0;
- goto msix_out;
- }
- qentry->have_irq = 1;
- qentry->rsp = rsp;
msix_out:
kfree(entries);
@@ -2063,35 +2036,11 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
}
}
-static struct scsi_qla_host *
-qla2x00_get_rsp_host(struct rsp_que *rsp)
-{
- srb_t *sp;
- struct qla_hw_data *ha = rsp->hw;
- struct scsi_qla_host *vha = NULL;
- struct sts_entry_24xx *pkt;
- struct req_que *req;
-
- if (rsp->id) {
- pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
- req = rsp->req;
- if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) {
- sp = req->outstanding_cmds[pkt->handle];
- if (sp)
- vha = sp->fcport->vha;
- }
- }
- if (!vha)
- /* handle it in base queue */
- vha = pci_get_drvdata(ha->pdev);
-
- return vha;
-}
int qla25xx_request_irq(struct rsp_que *rsp)
{
struct qla_hw_data *ha = rsp->hw;
- struct qla_init_msix_entry *intr = &multi_rsp_queue;
+ struct qla_init_msix_entry *intr = &msix_entries[2];
struct qla_msix_entry *msix = rsp->msix;
int ret;
@@ -2106,3 +2055,30 @@ int qla25xx_request_irq(struct rsp_que *rsp)
msix->rsp = rsp;
return ret;
}
+
+struct scsi_qla_host *
+qla25xx_get_host(struct rsp_que *rsp)
+{
+ srb_t *sp;
+ struct qla_hw_data *ha = rsp->hw;
+ struct scsi_qla_host *vha = NULL;
+ struct sts_entry_24xx *pkt;
+ struct req_que *req;
+ uint16_t que;
+ uint32_t handle;
+
+ pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
+ que = MSW(pkt->handle);
+ handle = (uint32_t) LSW(pkt->handle);
+ req = ha->req_q_map[que];
+ if (handle < MAX_OUTSTANDING_COMMANDS) {
+ sp = req->outstanding_cmds[handle];
+ if (sp)
+ return sp->fcport->vha;
+ else
+ goto base_que;
+ }
+base_que:
+ vha = pci_get_drvdata(ha->pdev);
+ return vha;
+}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index e67c1660bf4..451ece0760b 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -408,7 +408,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
* Context:
* Kernel context.
*/
-void
+int
qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
uint16_t *subminor, uint16_t *attributes, uint32_t *memory, uint8_t *mpi,
uint32_t *mpi_caps, uint8_t *phy)
@@ -427,6 +427,8 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
mcp->flags = 0;
mcp->tov = MBX_TOV_SECONDS;
rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS)
+ goto failed;
/* Return mailbox data. */
*major = mcp->mb[1];
@@ -446,7 +448,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
phy[1] = mcp->mb[9] >> 8;
phy[2] = mcp->mb[9] & 0xff;
}
-
+failed:
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
@@ -455,6 +457,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
/*EMPTY*/
DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
}
+ return rval;
}
/*
@@ -748,20 +751,20 @@ qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
* Kernel context.
*/
int
-qla2x00_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req)
+qla2x00_abort_command(srb_t *sp)
{
unsigned long flags = 0;
- fc_port_t *fcport;
int rval;
uint32_t handle = 0;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ fc_port_t *fcport = sp->fcport;
+ scsi_qla_host_t *vha = fcport->vha;
struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = vha->req;
DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", vha->host_no));
- fcport = sp->fcport;
-
spin_lock_irqsave(&ha->hardware_lock, flags);
for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
if (req->outstanding_cmds[handle] == sp)
@@ -800,7 +803,7 @@ qla2x00_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req)
}
int
-qla2x00_abort_target(struct fc_port *fcport, unsigned int l)
+qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
{
int rval, rval2;
mbx_cmd_t mc;
@@ -813,8 +816,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l)
l = l;
vha = fcport->vha;
- req = vha->hw->req_q_map[0];
- rsp = vha->hw->rsp_q_map[0];
+ req = vha->hw->req_q_map[tag];
+ rsp = vha->hw->rsp_q_map[tag];
mcp->mb[0] = MBC_ABORT_TARGET;
mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
if (HAS_EXTENDED_IDS(vha->hw)) {
@@ -850,7 +853,7 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l)
}
int
-qla2x00_lun_reset(struct fc_port *fcport, unsigned int l)
+qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
{
int rval, rval2;
mbx_cmd_t mc;
@@ -862,8 +865,8 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l)
DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
vha = fcport->vha;
- req = vha->hw->req_q_map[0];
- rsp = vha->hw->rsp_q_map[0];
+ req = vha->hw->req_q_map[tag];
+ rsp = vha->hw->rsp_q_map[tag];
mcp->mb[0] = MBC_LUN_RESET;
mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
if (HAS_EXTENDED_IDS(vha->hw))
@@ -931,6 +934,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
mcp->mb[9] = vha->vp_idx;
mcp->out_mb = MBX_9|MBX_0;
mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ if (IS_QLA81XX(vha->hw))
+ mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
@@ -952,9 +957,19 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n",
vha->host_no, rval));
} else {
- /*EMPTY*/
DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n",
vha->host_no));
+
+ if (IS_QLA81XX(vha->hw)) {
+ vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
+ vha->fcoe_fcf_idx = mcp->mb[10];
+ vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
+ vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
+ vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
+ vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
+ vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
+ vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
+ }
}
return rval;
@@ -1252,7 +1267,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
mcp->out_mb = MBX_0;
- mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
@@ -1261,6 +1276,8 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
states[0] = mcp->mb[1];
states[1] = mcp->mb[2];
states[2] = mcp->mb[3];
+ states[3] = mcp->mb[4];
+ states[4] = mcp->mb[5];
if (rval != QLA_SUCCESS) {
/*EMPTY*/
@@ -1480,9 +1497,17 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
dma_addr_t lg_dma;
uint32_t iop[2];
struct qla_hw_data *ha = vha->hw;
+ struct req_que *req;
+ struct rsp_que *rsp;
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ if (ql2xmultique_tag)
+ req = ha->req_q_map[0];
+ else
+ req = vha->req;
+ rsp = req->rsp;
+
lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
if (lg == NULL) {
DEBUG2_3(printk("%s(%ld): failed to allocate Login IOCB.\n",
@@ -1493,6 +1518,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
lg->entry_count = 1;
+ lg->handle = MAKE_HANDLE(req->id, lg->handle);
lg->nport_handle = cpu_to_le16(loop_id);
lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI);
if (opt & BIT_0)
@@ -1741,6 +1767,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
struct logio_entry_24xx *lg;
dma_addr_t lg_dma;
struct qla_hw_data *ha = vha->hw;
+ struct req_que *req;
+ struct rsp_que *rsp;
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
@@ -1752,8 +1780,14 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
}
memset(lg, 0, sizeof(struct logio_entry_24xx));
+ if (ql2xmaxqueues > 1)
+ req = ha->req_q_map[0];
+ else
+ req = vha->req;
+ rsp = req->rsp;
lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
lg->entry_count = 1;
+ lg->handle = MAKE_HANDLE(req->id, lg->handle);
lg->nport_handle = cpu_to_le16(loop_id);
lg->control_flags =
__constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
@@ -1864,9 +1898,6 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (IS_QLA81XX(vha->hw))
- return QLA_SUCCESS;
-
DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n",
vha->host_no));
@@ -2195,21 +2226,21 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
}
int
-qla24xx_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req)
+qla24xx_abort_command(srb_t *sp)
{
int rval;
- fc_port_t *fcport;
unsigned long flags = 0;
struct abort_entry_24xx *abt;
dma_addr_t abt_dma;
uint32_t handle;
+ fc_port_t *fcport = sp->fcport;
+ struct scsi_qla_host *vha = fcport->vha;
struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = vha->req;
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
- fcport = sp->fcport;
-
spin_lock_irqsave(&ha->hardware_lock, flags);
for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
if (req->outstanding_cmds[handle] == sp)
@@ -2231,6 +2262,7 @@ qla24xx_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req)
abt->entry_type = ABORT_IOCB_TYPE;
abt->entry_count = 1;
+ abt->handle = MAKE_HANDLE(req->id, abt->handle);
abt->nport_handle = cpu_to_le16(fcport->loop_id);
abt->handle_to_abort = handle;
abt->port_id[0] = fcport->d_id.b.al_pa;
@@ -2272,7 +2304,7 @@ struct tsk_mgmt_cmd {
static int
__qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
- unsigned int l)
+ unsigned int l, int tag)
{
int rval, rval2;
struct tsk_mgmt_cmd *tsk;
@@ -2286,8 +2318,11 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
vha = fcport->vha;
ha = vha->hw;
- req = ha->req_q_map[0];
- rsp = ha->rsp_q_map[0];
+ req = vha->req;
+ if (ql2xmultique_tag)
+ rsp = ha->rsp_q_map[tag + 1];
+ else
+ rsp = req->rsp;
tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
if (tsk == NULL) {
DEBUG2_3(printk("%s(%ld): failed to allocate Task Management "
@@ -2298,6 +2333,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
tsk->p.tsk.entry_count = 1;
+ tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
tsk->p.tsk.control_flags = cpu_to_le32(type);
@@ -2344,15 +2380,15 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
}
int
-qla24xx_abort_target(struct fc_port *fcport, unsigned int l)
+qla24xx_abort_target(struct fc_port *fcport, unsigned int l, int tag)
{
- return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l);
+ return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
}
int
-qla24xx_lun_reset(struct fc_port *fcport, unsigned int l)
+qla24xx_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
{
- return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l);
+ return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
}
int
@@ -2446,6 +2482,8 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
vha->host_no, rval));
+ if (mcp->mb[0] == MBS_INVALID_COMMAND)
+ rval = QLA_INVALID_COMMAND;
} else {
DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
}
@@ -2717,8 +2755,11 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
if (vp_idx == 0)
return;
- if (MSB(stat) == 1)
+ if (MSB(stat) == 1) {
+ DEBUG2(printk("scsi(%ld): Could not acquire ID for "
+ "VP[%d].\n", vha->host_no, vp_idx));
return;
+ }
list_for_each_entry_safe(vp, tvp, &ha->vp_list, list)
if (vp_idx == vp->vp_idx)
@@ -3141,6 +3182,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
WRT_REG_DWORD(&reg->req_q_in, 0);
WRT_REG_DWORD(&reg->req_q_out, 0);
}
+ req->req_q_in = &reg->req_q_in;
+ req->req_q_out = &reg->req_q_out;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
rval = qla2x00_mailbox_command(vha, mcp);
@@ -3167,7 +3210,6 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
mcp->mb[6] = MSW(MSD(rsp->dma));
mcp->mb[7] = LSW(MSD(rsp->dma));
mcp->mb[5] = rsp->length;
- mcp->mb[11] = rsp->vp_idx;
mcp->mb[14] = rsp->msix->entry;
mcp->mb[13] = rsp->rid;
@@ -3179,7 +3221,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
mcp->mb[8] = 0;
/* que out ptr index */
mcp->mb[9] = 0;
- mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7
+ mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_0;
mcp->flags = MBX_DMA_OUT;
@@ -3384,7 +3426,7 @@ qla2x00_read_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr,
DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
vha->host_no, rval, mcp->mb[0]));
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
}
return rval;
@@ -3428,3 +3470,141 @@ qla2x00_write_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr,
return rval;
}
+
+int
+qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
+ uint16_t size_in_bytes, uint16_t *actual_size)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_QLA81XX(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+
+ mcp->mb[0] = MBC_GET_XGMAC_STATS;
+ mcp->mb[2] = MSW(stats_dma);
+ mcp->mb[3] = LSW(stats_dma);
+ mcp->mb[6] = MSW(MSD(stats_dma));
+ mcp->mb[7] = LSW(MSD(stats_dma));
+ mcp->mb[8] = size_in_bytes >> 2;
+ mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
+ mcp->in_mb = MBX_2|MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x "
+ "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval,
+ mcp->mb[0], mcp->mb[1], mcp->mb[2]));
+ } else {
+ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+
+ *actual_size = mcp->mb[2] << 2;
+ }
+
+ return rval;
+}
+
+int
+qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
+ uint16_t size)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_QLA81XX(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+
+ mcp->mb[0] = MBC_GET_DCBX_PARAMS;
+ mcp->mb[1] = 0;
+ mcp->mb[2] = MSW(tlv_dma);
+ mcp->mb[3] = LSW(tlv_dma);
+ mcp->mb[6] = MSW(MSD(tlv_dma));
+ mcp->mb[7] = LSW(MSD(tlv_dma));
+ mcp->mb[8] = size;
+ mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_2|MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x "
+ "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval,
+ mcp->mb[0], mcp->mb[1], mcp->mb[2]));
+ } else {
+ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ }
+
+ return rval;
+}
+
+int
+qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_FWI2_CAPABLE(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+
+ mcp->mb[0] = MBC_READ_RAM_EXTENDED;
+ mcp->mb[1] = LSW(risc_addr);
+ mcp->mb[8] = MSW(risc_addr);
+ mcp->out_mb = MBX_8|MBX_1|MBX_0;
+ mcp->in_mb = MBX_3|MBX_2|MBX_0;
+ mcp->tov = 30;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
+ vha->host_no, rval, mcp->mb[0]));
+ } else {
+ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ *data = mcp->mb[3] << 16 | mcp->mb[2];
+ }
+
+ return rval;
+}
+
+int
+qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_FWI2_CAPABLE(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+
+ mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
+ mcp->mb[1] = LSW(risc_addr);
+ mcp->mb[2] = LSW(data);
+ mcp->mb[3] = MSW(data);
+ mcp->mb[8] = MSW(risc_addr);
+ mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = 30;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
+ vha->host_no, rval, mcp->mb[0]));
+ } else {
+ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ }
+
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 51716c7e300..650bcef08f2 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -398,9 +398,8 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);
- memset(vha->req_ques, 0, sizeof(vha->req_ques));
- vha->req_ques[0] = ha->req_q_map[0]->id;
- host->can_queue = ha->req_q_map[0]->length + 128;
+ vha->req = base_vha->req;
+ host->can_queue = base_vha->req->length + 128;
host->this_id = 255;
host->cmd_per_lun = 3;
host->max_cmd_len = MAX_CMDSZ;
@@ -515,76 +514,53 @@ int qla25xx_update_req_que(struct scsi_qla_host *vha, uint8_t que, uint8_t qos)
/* Delete all queues for a given vhost */
int
-qla25xx_delete_queues(struct scsi_qla_host *vha, uint8_t que_no)
+qla25xx_delete_queues(struct scsi_qla_host *vha)
{
int cnt, ret = 0;
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
struct qla_hw_data *ha = vha->hw;
- if (que_no) {
- /* Delete request queue */
- req = ha->req_q_map[que_no];
+ /* Delete request queues */
+ for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
+ req = ha->req_q_map[cnt];
if (req) {
- rsp = req->rsp;
ret = qla25xx_delete_req_que(vha, req);
if (ret != QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha,
- "Couldn't delete req que %d\n", req->id);
+ "Couldn't delete req que %d\n",
+ req->id);
return ret;
}
- /* Delete associated response queue */
- if (rsp) {
- ret = qla25xx_delete_rsp_que(vha, rsp);
- if (ret != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
- "Couldn't delete rsp que %d\n",
- rsp->id);
- return ret;
- }
- }
}
- } else { /* delete all queues of this host */
- for (cnt = 0; cnt < QLA_MAX_HOST_QUES; cnt++) {
- /* Delete request queues */
- req = ha->req_q_map[vha->req_ques[cnt]];
- if (req && req->id) {
- rsp = req->rsp;
- ret = qla25xx_delete_req_que(vha, req);
- if (ret != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
- "Couldn't delete req que %d\n",
- vha->req_ques[cnt]);
- return ret;
- }
- vha->req_ques[cnt] = ha->req_q_map[0]->id;
- /* Delete associated response queue */
- if (rsp && rsp->id) {
- ret = qla25xx_delete_rsp_que(vha, rsp);
- if (ret != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
- "Couldn't delete rsp que %d\n",
- rsp->id);
- return ret;
- }
- }
+ }
+
+ /* Delete response queues */
+ for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
+ rsp = ha->rsp_q_map[cnt];
+ if (rsp) {
+ ret = qla25xx_delete_rsp_que(vha, rsp);
+ if (ret != QLA_SUCCESS) {
+ qla_printk(KERN_WARNING, ha,
+ "Couldn't delete rsp que %d\n",
+ rsp->id);
+ return ret;
}
}
}
- qla_printk(KERN_INFO, ha, "Queues deleted for vport:%d\n",
- vha->vp_idx);
return ret;
}
int
qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
- uint8_t vp_idx, uint16_t rid, uint8_t rsp_que, uint8_t qos)
+ uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos)
{
int ret = 0;
struct req_que *req = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
uint16_t que_id = 0;
device_reg_t __iomem *reg;
+ uint32_t cnt;
req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
if (req == NULL) {
@@ -604,8 +580,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
}
mutex_lock(&ha->vport_lock);
- que_id = find_first_zero_bit(ha->req_qid_map, ha->max_queues);
- if (que_id >= ha->max_queues) {
+ que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
+ if (que_id >= ha->max_req_queues) {
mutex_unlock(&ha->vport_lock);
qla_printk(KERN_INFO, ha, "No resources to create "
"additional request queue\n");
@@ -617,10 +593,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
req->vp_idx = vp_idx;
req->qos = qos;
- if (ha->rsp_q_map[rsp_que]) {
+ if (rsp_que < 0)
+ req->rsp = NULL;
+ else
req->rsp = ha->rsp_q_map[rsp_que];
- req->rsp->req = req;
- }
/* Use alternate PCI bus number */
if (MSB(req->rid))
options |= BIT_4;
@@ -628,13 +604,16 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
if (LSB(req->rid))
options |= BIT_5;
req->options = options;
+
+ for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
+ req->outstanding_cmds[cnt] = NULL;
+ req->current_outstanding_cmd = 1;
+
req->ring_ptr = req->ring;
req->ring_index = 0;
req->cnt = req->length;
req->id = que_id;
reg = ISP_QUE_REG(ha, que_id);
- req->req_q_in = &reg->isp25mq.req_q_in;
- req->req_q_out = &reg->isp25mq.req_q_out;
req->max_q_depth = ha->req_q_map[0]->max_q_depth;
mutex_unlock(&ha->vport_lock);
@@ -654,10 +633,19 @@ que_failed:
return 0;
}
+static void qla_do_work(struct work_struct *work)
+{
+ struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
+ struct scsi_qla_host *vha;
+
+ vha = qla25xx_get_host(rsp);
+ qla24xx_process_response_queue(vha, rsp);
+}
+
/* create response queue */
int
qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
- uint8_t vp_idx, uint16_t rid)
+ uint8_t vp_idx, uint16_t rid, int req)
{
int ret = 0;
struct rsp_que *rsp = NULL;
@@ -672,7 +660,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
goto que_failed;
}
- rsp->length = RESPONSE_ENTRY_CNT_2300;
+ rsp->length = RESPONSE_ENTRY_CNT_MQ;
rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
(rsp->length + 1) * sizeof(response_t),
&rsp->dma, GFP_KERNEL);
@@ -683,8 +671,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
}
mutex_lock(&ha->vport_lock);
- que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues);
- if (que_id >= ha->max_queues) {
+ que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
+ if (que_id >= ha->max_rsp_queues) {
mutex_unlock(&ha->vport_lock);
qla_printk(KERN_INFO, ha, "No resources to create "
"additional response queue\n");
@@ -708,8 +696,6 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
if (LSB(rsp->rid))
options |= BIT_5;
rsp->options = options;
- rsp->ring_ptr = rsp->ring;
- rsp->ring_index = 0;
rsp->id = que_id;
reg = ISP_QUE_REG(ha, que_id);
rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
@@ -728,9 +714,14 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
mutex_unlock(&ha->vport_lock);
goto que_failed;
}
+ if (req >= 0)
+ rsp->req = ha->req_q_map[req];
+ else
+ rsp->req = NULL;
qla2x00_init_response_q_entries(rsp);
-
+ if (rsp->hw->wq)
+ INIT_WORK(&rsp->q_work, qla_do_work);
return rsp->id;
que_failed:
@@ -744,14 +735,16 @@ qla25xx_create_queues(struct scsi_qla_host *vha, uint8_t qos)
uint16_t options = 0;
uint8_t ret = 0;
struct qla_hw_data *ha = vha->hw;
+ struct rsp_que *rsp;
options |= BIT_1;
- ret = qla25xx_create_rsp_que(ha, options, vha->vp_idx, 0);
+ ret = qla25xx_create_rsp_que(ha, options, vha->vp_idx, 0, -1);
if (!ret) {
qla_printk(KERN_WARNING, ha, "Response Que create failed\n");
return ret;
} else
qla_printk(KERN_INFO, ha, "Response Que:%d created.\n", ret);
+ rsp = ha->rsp_q_map[ret];
options = 0;
if (qos & BIT_7)
@@ -759,10 +752,11 @@ qla25xx_create_queues(struct scsi_qla_host *vha, uint8_t qos)
ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, ret,
qos & ~BIT_7);
if (ret) {
- vha->req_ques[0] = ret;
+ vha->req = ha->req_q_map[ret];
qla_printk(KERN_INFO, ha, "Request Que:%d created.\n", ret);
} else
qla_printk(KERN_WARNING, ha, "Request Que create failed\n");
+ rsp->req = ha->req_q_map[ret];
return ret;
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index e4fdcdad80d..dcf011679c8 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -77,6 +77,14 @@ module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xmaxqdepth,
"Maximum queue depth to report for target devices.");
+int ql2xqfulltracking = 1;
+module_param(ql2xqfulltracking, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xqfulltracking,
+ "Controls whether the driver tracks queue full status "
+ "returns and dynamically adjusts a scsi device's queue "
+ "depth. Default is 1, perform tracking. Set to 0 to "
+ "disable dynamic tracking and adjustment of queue depth.");
+
int ql2xqfullrampup = 120;
module_param(ql2xqfullrampup, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xqfullrampup,
@@ -96,6 +104,23 @@ MODULE_PARM_DESC(ql2xmaxqueues,
"Enables MQ settings "
"Default is 1 for single queue. Set it to number \
of queues in MQ mode.");
+
+int ql2xmultique_tag;
+module_param(ql2xmultique_tag, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xmultique_tag,
+ "Enables CPU affinity settings for the driver "
+ "Default is 0 for no affinity of request and response IO. "
+ "Set it to 1 to turn on the cpu affinity.");
+
+int ql2xfwloadbin;
+module_param(ql2xfwloadbin, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xfwloadbin,
+ "Option to specify location from which to load ISP firmware:\n"
+ " 2 -- load firmware via the request_firmware() (hotplug)\n"
+ " interface.\n"
+ " 1 -- load firmware from flash.\n"
+ " 0 -- use default semantics.\n");
+
/*
* SCSI host template entry points
*/
@@ -187,7 +212,7 @@ static void qla2x00_sp_free_dma(srb_t *);
/* -------------------------------------------------------------------------- */
static int qla2x00_alloc_queues(struct qla_hw_data *ha)
{
- ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_queues,
+ ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
GFP_KERNEL);
if (!ha->req_q_map) {
qla_printk(KERN_WARNING, ha,
@@ -195,7 +220,7 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha)
goto fail_req_map;
}
- ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_queues,
+ ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues,
GFP_KERNEL);
if (!ha->rsp_q_map) {
qla_printk(KERN_WARNING, ha,
@@ -213,16 +238,8 @@ fail_req_map:
return -ENOMEM;
}
-static void qla2x00_free_que(struct qla_hw_data *ha, struct req_que *req,
- struct rsp_que *rsp)
+static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
{
- if (rsp && rsp->ring)
- dma_free_coherent(&ha->pdev->dev,
- (rsp->length + 1) * sizeof(response_t),
- rsp->ring, rsp->dma);
-
- kfree(rsp);
- rsp = NULL;
if (req && req->ring)
dma_free_coherent(&ha->pdev->dev,
(req->length + 1) * sizeof(request_t),
@@ -232,22 +249,77 @@ static void qla2x00_free_que(struct qla_hw_data *ha, struct req_que *req,
req = NULL;
}
+static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
+{
+ if (rsp && rsp->ring)
+ dma_free_coherent(&ha->pdev->dev,
+ (rsp->length + 1) * sizeof(response_t),
+ rsp->ring, rsp->dma);
+
+ kfree(rsp);
+ rsp = NULL;
+}
+
static void qla2x00_free_queues(struct qla_hw_data *ha)
{
struct req_que *req;
struct rsp_que *rsp;
int cnt;
- for (cnt = 0; cnt < ha->max_queues; cnt++) {
- rsp = ha->rsp_q_map[cnt];
+ for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
req = ha->req_q_map[cnt];
- qla2x00_free_que(ha, req, rsp);
+ qla2x00_free_req_que(ha, req);
+ }
+ kfree(ha->req_q_map);
+ ha->req_q_map = NULL;
+
+ for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
+ rsp = ha->rsp_q_map[cnt];
+ qla2x00_free_rsp_que(ha, rsp);
}
kfree(ha->rsp_q_map);
ha->rsp_q_map = NULL;
+}
- kfree(ha->req_q_map);
- ha->req_q_map = NULL;
+static int qla25xx_setup_mode(struct scsi_qla_host *vha)
+{
+ uint16_t options = 0;
+ int ques, req, ret;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (ql2xmultique_tag) {
+ /* CPU affinity mode */
+ ha->wq = create_workqueue("qla2xxx_wq");
+ /* create a request queue for IO */
+ options |= BIT_7;
+ req = qla25xx_create_req_que(ha, options, 0, 0, -1,
+ QLA_DEFAULT_QUE_QOS);
+ if (!req) {
+ qla_printk(KERN_WARNING, ha,
+ "Can't create request queue\n");
+ goto fail;
+ }
+ vha->req = ha->req_q_map[req];
+ options |= BIT_1;
+ for (ques = 1; ques < ha->max_rsp_queues; ques++) {
+ ret = qla25xx_create_rsp_que(ha, options, 0, 0, req);
+ if (!ret) {
+ qla_printk(KERN_WARNING, ha,
+ "Response Queue create failed\n");
+ goto fail2;
+ }
+ }
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "CPU affinity mode enabled, no. of response"
+ " queues:%d, no. of request queues:%d\n",
+ ha->max_rsp_queues, ha->max_req_queues));
+ }
+ return 0;
+fail2:
+ qla25xx_delete_queues(vha);
+fail:
+ ha->mqenable = 0;
+ return 1;
}
static char *
@@ -387,7 +459,6 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
sp->fcport = fcport;
sp->cmd = cmd;
- sp->que = ha->req_q_map[0];
sp->flags = 0;
CMD_SP(cmd) = (void *)sp;
cmd->scsi_done = done;
@@ -612,7 +683,7 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
void
qla2x00_abort_fcport_cmds(fc_port_t *fcport)
{
- int cnt, que, id;
+ int cnt;
unsigned long flags;
srb_t *sp;
scsi_qla_host_t *vha = fcport->vha;
@@ -620,32 +691,27 @@ qla2x00_abort_fcport_cmds(fc_port_t *fcport)
struct req_que *req;
spin_lock_irqsave(&ha->hardware_lock, flags);
- for (que = 0; que < QLA_MAX_HOST_QUES; que++) {
- id = vha->req_ques[que];
- req = ha->req_q_map[id];
- if (!req)
+ req = vha->req;
+ for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ sp = req->outstanding_cmds[cnt];
+ if (!sp)
+ continue;
+ if (sp->fcport != fcport)
continue;
- for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
- sp = req->outstanding_cmds[cnt];
- if (!sp)
- continue;
- if (sp->fcport != fcport)
- continue;
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (ha->isp_ops->abort_command(vha, sp, req)) {
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (ha->isp_ops->abort_command(sp)) {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "Abort failed -- %lx\n",
+ sp->cmd->serial_number));
+ } else {
+ if (qla2x00_eh_wait_on_command(sp->cmd) !=
+ QLA_SUCCESS)
DEBUG2(qla_printk(KERN_WARNING, ha,
- "Abort failed -- %lx\n",
+ "Abort failed while waiting -- %lx\n",
sp->cmd->serial_number));
- } else {
- if (qla2x00_eh_wait_on_command(sp->cmd) !=
- QLA_SUCCESS)
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "Abort failed while waiting -- %lx\n",
- sp->cmd->serial_number));
- }
- spin_lock_irqsave(&ha->hardware_lock, flags);
}
+ spin_lock_irqsave(&ha->hardware_lock, flags);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -693,7 +759,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
unsigned long flags;
int wait = 0;
struct qla_hw_data *ha = vha->hw;
- struct req_que *req;
+ struct req_que *req = vha->req;
srb_t *spt;
qla2x00_block_error_handler(cmd);
@@ -709,7 +775,6 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
spt = (srb_t *) CMD_SP(cmd);
if (!spt)
return SUCCESS;
- req = spt->que;
/* Check active list for command command. */
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -726,7 +791,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
" pid=%ld.\n", __func__, vha->host_no, sp, serial));
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (ha->isp_ops->abort_command(vha, sp, req)) {
+ if (ha->isp_ops->abort_command(sp)) {
DEBUG2(printk("%s(%ld): abort_command "
"mbx failed.\n", __func__, vha->host_no));
ret = FAILED;
@@ -777,7 +842,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
return status;
spin_lock_irqsave(&ha->hardware_lock, flags);
- req = sp->que;
+ req = vha->req;
for (cnt = 1; status == QLA_SUCCESS &&
cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
sp = req->outstanding_cmds[cnt];
@@ -820,7 +885,7 @@ static char *reset_errors[] = {
static int
__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
- struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int))
+ struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int, int))
{
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
@@ -841,7 +906,8 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS)
goto eh_reset_failed;
err = 2;
- if (do_reset(fcport, cmd->device->lun) != QLA_SUCCESS)
+ if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
+ != QLA_SUCCESS)
goto eh_reset_failed;
err = 3;
if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
@@ -996,6 +1062,9 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
if (qla2x00_vp_abort_isp(vha))
goto eh_host_reset_lock;
} else {
+ if (ha->wq)
+ flush_workqueue(ha->wq);
+
set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
if (qla2x00_abort_isp(base_vha)) {
clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
@@ -1037,7 +1106,8 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
struct fc_port *fcport;
struct qla_hw_data *ha = vha->hw;
- if (ha->flags.enable_lip_full_login && !vha->vp_idx) {
+ if (ha->flags.enable_lip_full_login && !vha->vp_idx &&
+ !IS_QLA81XX(ha)) {
ret = qla2x00_full_login_lip(vha);
if (ret != QLA_SUCCESS) {
DEBUG2_3(printk("%s(%ld): failed: "
@@ -1064,7 +1134,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
if (fcport->port_type != FCT_TARGET)
continue;
- ret = ha->isp_ops->target_reset(fcport, 0);
+ ret = ha->isp_ops->target_reset(fcport, 0, 0);
if (ret != QLA_SUCCESS) {
DEBUG2_3(printk("%s(%ld): bus_reset failed: "
"target_reset=%d d_id=%x.\n", __func__,
@@ -1088,7 +1158,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
struct req_que *req;
spin_lock_irqsave(&ha->hardware_lock, flags);
- for (que = 0; que < ha->max_queues; que++) {
+ for (que = 0; que < ha->max_req_queues; que++) {
req = ha->req_q_map[que];
if (!req)
continue;
@@ -1123,7 +1193,7 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
scsi_qla_host_t *vha = shost_priv(sdev->host);
struct qla_hw_data *ha = vha->hw;
struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
- struct req_que *req = ha->req_q_map[vha->req_ques[0]];
+ struct req_que *req = vha->req;
if (sdev->tagged_supported)
scsi_activate_tcq(sdev, req->max_q_depth);
@@ -1511,6 +1581,13 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
}
+
+ /* Get adapter physical port no from interrupt pin register. */
+ pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
+ if (ha->port_no & 1)
+ ha->flags.port0 = 1;
+ else
+ ha->flags.port0 = 0;
}
static int
@@ -1518,6 +1595,7 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
{
resource_size_t pio;
uint16_t msix;
+ int cpus;
if (pci_request_selected_regions(ha->pdev, ha->bars,
QLA2XXX_DRIVER_NAME)) {
@@ -1571,8 +1649,9 @@ skip_pio:
}
/* Determine queue resources */
- ha->max_queues = 1;
- if (ql2xmaxqueues <= 1 || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
+ ha->max_req_queues = ha->max_rsp_queues = 1;
+ if ((ql2xmaxqueues <= 1 || ql2xmultique_tag < 1) &&
+ (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
goto mqiobase_exit;
ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
pci_resource_len(ha->pdev, 3));
@@ -1582,18 +1661,24 @@ skip_pio:
ha->msix_count = msix;
/* Max queues are bounded by available msix vectors */
/* queue 0 uses two msix vectors */
- if (ha->msix_count - 1 < ql2xmaxqueues)
- ha->max_queues = ha->msix_count - 1;
- else if (ql2xmaxqueues > QLA_MQ_SIZE)
- ha->max_queues = QLA_MQ_SIZE;
- else
- ha->max_queues = ql2xmaxqueues;
+ if (ql2xmultique_tag) {
+ cpus = num_online_cpus();
+ ha->max_rsp_queues = (ha->msix_count - 1 - cpus) ?
+ (cpus + 1) : (ha->msix_count - 1);
+ ha->max_req_queues = 2;
+ } else if (ql2xmaxqueues > 1) {
+ ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
+ QLA_MQ_SIZE : ql2xmaxqueues;
+ DEBUG2(qla_printk(KERN_INFO, ha, "QoS mode set, max no"
+ " of request queues:%d\n", ha->max_req_queues));
+ }
qla_printk(KERN_INFO, ha,
"MSI-X vector count: %d\n", msix);
- }
+ } else
+ qla_printk(KERN_INFO, ha, "BAR 3 not enabled\n");
mqiobase_exit:
- ha->msix_count = ha->max_queues + 1;
+ ha->msix_count = ha->max_rsp_queues + 1;
return (0);
iospace_error_exit:
@@ -1605,6 +1690,9 @@ qla2xxx_scan_start(struct Scsi_Host *shost)
{
scsi_qla_host_t *vha = shost_priv(shost);
+ if (vha->hw->flags.running_gold_fw)
+ return;
+
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(RSCN_UPDATE, &vha->dpc_flags);
@@ -1768,6 +1856,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
ha->gid_list_info_size = 8;
ha->optrom_size = OPTROM_SIZE_81XX;
+ ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
ha->isp_ops = &qla81xx_isp_ops;
ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
@@ -1803,14 +1892,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ret = -ENOMEM;
qla2x00_mem_free(ha);
- qla2x00_free_que(ha, req, rsp);
+ qla2x00_free_req_que(ha, req);
+ qla2x00_free_rsp_que(ha, rsp);
goto probe_hw_failed;
}
pci_set_drvdata(pdev, base_vha);
host = base_vha->host;
- base_vha->req_ques[0] = req->id;
+ base_vha->req = req;
host->can_queue = req->length + 128;
if (IS_QLA2XXX_MIDTYPE(ha))
base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
@@ -1841,7 +1931,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
}
ha->rsp_q_map[0] = rsp;
ha->req_q_map[0] = req;
-
+ rsp->req = req;
+ req->rsp = rsp;
+ set_bit(0, ha->req_qid_map);
+ set_bit(0, ha->rsp_qid_map);
/* FWI2-capable only. */
req->req_q_in = &ha->iobase->isp24.req_q_in;
req->req_q_out = &ha->iobase->isp24.req_q_out;
@@ -1866,6 +1959,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto probe_failed;
}
+ if (ha->mqenable)
+ if (qla25xx_setup_mode(base_vha))
+ qla_printk(KERN_WARNING, ha,
+ "Can't create queues, falling back to single"
+ " queue mode\n");
+
+ if (ha->flags.running_gold_fw)
+ goto skip_dpc;
+
/*
* Startup the kernel thread for this host adapter
*/
@@ -1878,6 +1980,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto probe_failed;
}
+skip_dpc:
list_add_tail(&base_vha->list, &ha->vp_list);
base_vha->host->irq = ha->pdev->irq;
@@ -1917,8 +2020,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
probe_init_failed:
- qla2x00_free_que(ha, req, rsp);
- ha->max_queues = 0;
+ qla2x00_free_req_que(ha, req);
+ qla2x00_free_rsp_que(ha, rsp);
+ ha->max_req_queues = ha->max_rsp_queues = 0;
probe_failed:
if (base_vha->timer_active)
@@ -1976,6 +2080,13 @@ qla2x00_remove_one(struct pci_dev *pdev)
base_vha->flags.online = 0;
+ /* Flush the work queue and remove it */
+ if (ha->wq) {
+ flush_workqueue(ha->wq);
+ destroy_workqueue(ha->wq);
+ ha->wq = NULL;
+ }
+
/* Kill the kernel thread for this host */
if (ha->dpc_thread) {
struct task_struct *t = ha->dpc_thread;
@@ -2017,6 +2128,8 @@ qla2x00_free_device(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
+ qla25xx_delete_queues(vha);
+
if (ha->flags.fce_enabled)
qla2x00_disable_fce_trace(vha, NULL, NULL);
@@ -2329,6 +2442,14 @@ qla2x00_mem_free(struct qla_hw_data *ha)
vfree(ha->fw_dump);
}
+ if (ha->dcbx_tlv)
+ dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
+ ha->dcbx_tlv, ha->dcbx_tlv_dma);
+
+ if (ha->xgmac_data)
+ dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
+ ha->xgmac_data, ha->xgmac_data_dma);
+
if (ha->sns_cmd)
dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
ha->sns_cmd, ha->sns_cmd_dma);
@@ -2412,6 +2533,8 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
INIT_LIST_HEAD(&vha->work_list);
INIT_LIST_HEAD(&vha->list);
+ spin_lock_init(&vha->work_lock);
+
sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
return vha;
@@ -2420,13 +2543,11 @@ fail:
}
static struct qla_work_evt *
-qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type,
- int locked)
+qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
{
struct qla_work_evt *e;
- e = kzalloc(sizeof(struct qla_work_evt), locked ? GFP_ATOMIC:
- GFP_KERNEL);
+ e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
if (!e)
return NULL;
@@ -2437,17 +2558,15 @@ qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type,
}
static int
-qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e, int locked)
+qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
{
- unsigned long uninitialized_var(flags);
- struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
- if (!locked)
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(&vha->work_lock, flags);
list_add_tail(&e->list, &vha->work_list);
+ spin_unlock_irqrestore(&vha->work_lock, flags);
qla2xxx_wake_dpc(vha);
- if (!locked)
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
return QLA_SUCCESS;
}
@@ -2457,13 +2576,13 @@ qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code,
{
struct qla_work_evt *e;
- e = qla2x00_alloc_work(vha, QLA_EVT_AEN, 1);
+ e = qla2x00_alloc_work(vha, QLA_EVT_AEN);
if (!e)
return QLA_FUNCTION_FAILED;
e->u.aen.code = code;
e->u.aen.data = data;
- return qla2x00_post_work(vha, e, 1);
+ return qla2x00_post_work(vha, e);
}
int
@@ -2471,25 +2590,27 @@ qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb)
{
struct qla_work_evt *e;
- e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK, 1);
+ e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK);
if (!e)
return QLA_FUNCTION_FAILED;
memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
- return qla2x00_post_work(vha, e, 1);
+ return qla2x00_post_work(vha, e);
}
static void
qla2x00_do_work(struct scsi_qla_host *vha)
{
- struct qla_work_evt *e;
- struct qla_hw_data *ha = vha->hw;
+ struct qla_work_evt *e, *tmp;
+ unsigned long flags;
+ LIST_HEAD(work);
- spin_lock_irq(&ha->hardware_lock);
- while (!list_empty(&vha->work_list)) {
- e = list_entry(vha->work_list.next, struct qla_work_evt, list);
+ spin_lock_irqsave(&vha->work_lock, flags);
+ list_splice_init(&vha->work_list, &work);
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
+ list_for_each_entry_safe(e, tmp, &work, list) {
list_del_init(&e->list);
- spin_unlock_irq(&ha->hardware_lock);
switch (e->type) {
case QLA_EVT_AEN:
@@ -2502,10 +2623,9 @@ qla2x00_do_work(struct scsi_qla_host *vha)
}
if (e->flags & QLA_EVT_FLAG_FREE)
kfree(e);
- spin_lock_irq(&ha->hardware_lock);
}
- spin_unlock_irq(&ha->hardware_lock);
}
+
/* Relogins all the fcports of a vport
* Context: dpc thread
*/
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 152ecfc26cd..6260505dceb 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -219,8 +219,8 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
wait_cnt = NVR_WAIT_CNT;
do {
if (!--wait_cnt) {
- DEBUG9_10(printk("%s(%ld): NVRAM didn't go ready...\n",
- __func__, vha->host_no));
+ DEBUG9_10(qla_printk(KERN_WARNING, ha,
+ "NVRAM didn't go ready...\n"));
break;
}
NVRAM_DELAY();
@@ -349,7 +349,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
wait_cnt = NVR_WAIT_CNT;
do {
if (!--wait_cnt) {
- DEBUG9_10(qla_printk(
+ DEBUG9_10(qla_printk(KERN_WARNING, ha,
"NVRAM didn't go ready...\n"));
break;
}
@@ -408,7 +408,8 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
wait_cnt = NVR_WAIT_CNT;
do {
if (!--wait_cnt) {
- DEBUG9_10(qla_printk("NVRAM didn't go ready...\n"));
+ DEBUG9_10(qla_printk(KERN_WARNING, ha,
+ "NVRAM didn't go ready...\n"));
break;
}
NVRAM_DELAY();
@@ -701,32 +702,35 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
break;
case FLT_REG_VPD_0:
ha->flt_region_vpd_nvram = start;
- if (!(PCI_FUNC(ha->pdev->devfn) & 1))
+ if (ha->flags.port0)
ha->flt_region_vpd = start;
break;
case FLT_REG_VPD_1:
- if (PCI_FUNC(ha->pdev->devfn) & 1)
+ if (!ha->flags.port0)
ha->flt_region_vpd = start;
break;
case FLT_REG_NVRAM_0:
- if (!(PCI_FUNC(ha->pdev->devfn) & 1))
+ if (ha->flags.port0)
ha->flt_region_nvram = start;
break;
case FLT_REG_NVRAM_1:
- if (PCI_FUNC(ha->pdev->devfn) & 1)
+ if (!ha->flags.port0)
ha->flt_region_nvram = start;
break;
case FLT_REG_FDT:
ha->flt_region_fdt = start;
break;
case FLT_REG_NPIV_CONF_0:
- if (!(PCI_FUNC(ha->pdev->devfn) & 1))
+ if (ha->flags.port0)
ha->flt_region_npiv_conf = start;
break;
case FLT_REG_NPIV_CONF_1:
- if (PCI_FUNC(ha->pdev->devfn) & 1)
+ if (!ha->flags.port0)
ha->flt_region_npiv_conf = start;
break;
+ case FLT_REG_GOLD_FW:
+ ha->flt_region_gold_fw = start;
+ break;
}
}
goto done;
@@ -744,12 +748,12 @@ no_flash_data:
ha->flt_region_fw = def_fw[def];
ha->flt_region_boot = def_boot[def];
ha->flt_region_vpd_nvram = def_vpd_nvram[def];
- ha->flt_region_vpd = !(PCI_FUNC(ha->pdev->devfn) & 1) ?
+ ha->flt_region_vpd = ha->flags.port0 ?
def_vpd0[def]: def_vpd1[def];
- ha->flt_region_nvram = !(PCI_FUNC(ha->pdev->devfn) & 1) ?
+ ha->flt_region_nvram = ha->flags.port0 ?
def_nvram0[def]: def_nvram1[def];
ha->flt_region_fdt = def_fdt[def];
- ha->flt_region_npiv_conf = !(PCI_FUNC(ha->pdev->devfn) & 1) ?
+ ha->flt_region_npiv_conf = ha->flags.port0 ?
def_npiv_conf0[def]: def_npiv_conf1[def];
done:
DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x "
@@ -924,6 +928,8 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
struct fc_vport_identifiers vid;
struct fc_vport *vport;
+ memcpy(&ha->npiv_info[i], entry, sizeof(struct qla_npiv_entry));
+
flags = le16_to_cpu(entry->flags);
if (flags == 0xffff)
continue;
@@ -937,9 +943,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
vid.port_name = wwn_to_u64(entry->port_name);
vid.node_name = wwn_to_u64(entry->node_name);
- memcpy(&ha->npiv_info[i], entry, sizeof(struct qla_npiv_entry));
-
- DEBUG2(qla_printk(KERN_DEBUG, ha, "NPIV[%02x]: wwpn=%llx "
+ DEBUG2(qla_printk(KERN_INFO, ha, "NPIV[%02x]: wwpn=%llx "
"wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt,
vid.port_name, vid.node_name, le16_to_cpu(entry->vf_id),
entry->q_qos, entry->f_qos));
@@ -955,7 +959,6 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
}
done:
kfree(data);
- ha->npiv_info = NULL;
}
static int
@@ -1079,8 +1082,9 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
0xff0000) | ((fdata >> 16) & 0xff));
ret = qla24xx_erase_sector(vha, fdata);
if (ret != QLA_SUCCESS) {
- DEBUG9(qla_printk("Unable to erase sector: "
- "address=%x.\n", faddr));
+ DEBUG9(qla_printk(KERN_WARNING, ha,
+ "Unable to erase sector: address=%x.\n",
+ faddr));
break;
}
}
@@ -1240,8 +1244,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
ret = qla24xx_write_flash_dword(ha,
nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr));
if (ret != QLA_SUCCESS) {
- DEBUG9(qla_printk("Unable to program nvram address=%x "
- "data=%x.\n", naddr, *dwptr));
+ DEBUG9(qla_printk(KERN_WARNING, ha,
+ "Unable to program nvram address=%x data=%x.\n",
+ naddr, *dwptr));
break;
}
}
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 19d1afc3a34..b63feaf4312 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.03.01-k1"
+#define QLA2XXX_VERSION "8.03.01-k3"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 3
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 166417a6afb..2de5f3ad640 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -1225,8 +1225,8 @@ EXPORT_SYMBOL(__scsi_device_lookup_by_target);
* @starget: SCSI target pointer
* @lun: SCSI Logical Unit Number
*
- * Description: Looks up the scsi_device with the specified @channel, @id, @lun
- * for a given host. The returned scsi_device has an additional reference that
+ * Description: Looks up the scsi_device with the specified @lun for a given
+ * @starget. The returned scsi_device has an additional reference that
* needs to be released with scsi_device_put once you're done with it.
**/
struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 213123b0486..41a21772df1 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -887,7 +887,7 @@ static int resp_start_stop(struct scsi_cmnd * scp,
static sector_t get_sdebug_capacity(void)
{
if (scsi_debug_virtual_gb > 0)
- return 2048 * 1024 * scsi_debug_virtual_gb;
+ return 2048 * 1024 * (sector_t)scsi_debug_virtual_gb;
else
return sdebug_store_sectors;
}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 0c2c73be197..a1689353d7f 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -641,9 +641,9 @@ EXPORT_SYMBOL(scsi_eh_prep_cmnd);
/**
* scsi_eh_restore_cmnd - Restore a scsi command info as part of error recory
* @scmd: SCSI command structure to restore
- * @ses: saved information from a coresponding call to scsi_prep_eh_cmnd
+ * @ses: saved information from a coresponding call to scsi_eh_prep_cmnd
*
- * Undo any damage done by above scsi_prep_eh_cmnd().
+ * Undo any damage done by above scsi_eh_prep_cmnd().
*/
void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
{
@@ -1451,28 +1451,21 @@ static void eh_lock_door_done(struct request *req, int uptodate)
* @sdev: SCSI device to prevent medium removal
*
* Locking:
- * We must be called from process context; scsi_allocate_request()
- * may sleep.
+ * We must be called from process context.
*
* Notes:
* We queue up an asynchronous "ALLOW MEDIUM REMOVAL" request on the
* head of the devices request queue, and continue.
- *
- * Bugs:
- * scsi_allocate_request() may sleep waiting for existing requests to
- * be processed. However, since we haven't kicked off any request
- * processing for this host, this may deadlock.
- *
- * If scsi_allocate_request() fails for what ever reason, we
- * completely forget to lock the door.
*/
static void scsi_eh_lock_door(struct scsi_device *sdev)
{
struct request *req;
+ /*
+ * blk_get_request with GFP_KERNEL (__GFP_WAIT) sleeps until a
+ * request becomes available
+ */
req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL);
- if (!req)
- return;
req->cmd[0] = ALLOW_MEDIUM_REMOVAL;
req->cmd[1] = 0;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index bb218c8b6e9..30f3275e119 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -240,11 +240,11 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
* is invalid. Prevent the garbage from being misinterpreted
* and prevent security leaks by zeroing out the excess data.
*/
- if (unlikely(req->data_len > 0 && req->data_len <= bufflen))
- memset(buffer + (bufflen - req->data_len), 0, req->data_len);
+ if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
+ memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
if (resid)
- *resid = req->data_len;
+ *resid = req->resid_len;
ret = req->errors;
out:
blk_put_request(req);
@@ -546,14 +546,9 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
* to queue the remainder of them.
*/
if (blk_end_request(req, error, bytes)) {
- int leftover = (req->hard_nr_sectors << 9);
-
- if (blk_pc_request(req))
- leftover = req->data_len;
-
/* kill remainder if no retrys */
if (error && scsi_noretry_cmd(cmd))
- blk_end_request(req, error, leftover);
+ blk_end_request_all(req, error);
else {
if (requeue) {
/*
@@ -673,34 +668,6 @@ void scsi_release_buffers(struct scsi_cmnd *cmd)
EXPORT_SYMBOL(scsi_release_buffers);
/*
- * Bidi commands Must be complete as a whole, both sides at once.
- * If part of the bytes were written and lld returned
- * scsi_in()->resid and/or scsi_out()->resid this information will be left
- * in req->data_len and req->next_rq->data_len. The upper-layer driver can
- * decide what to do with this information.
- */
-static void scsi_end_bidi_request(struct scsi_cmnd *cmd)
-{
- struct request *req = cmd->request;
- unsigned int dlen = req->data_len;
- unsigned int next_dlen = req->next_rq->data_len;
-
- req->data_len = scsi_out(cmd)->resid;
- req->next_rq->data_len = scsi_in(cmd)->resid;
-
- /* The req and req->next_rq have not been completed */
- BUG_ON(blk_end_bidi_request(req, 0, dlen, next_dlen));
-
- scsi_release_buffers(cmd);
-
- /*
- * This will goose the queue request function at the end, so we don't
- * need to worry about launching another command.
- */
- scsi_next_command(cmd);
-}
-
-/*
* Function: scsi_io_completion()
*
* Purpose: Completion processing for block device I/O requests.
@@ -739,7 +706,6 @@ static void scsi_end_bidi_request(struct scsi_cmnd *cmd)
void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
{
int result = cmd->result;
- int this_count;
struct request_queue *q = cmd->device->request_queue;
struct request *req = cmd->request;
int error = 0;
@@ -773,12 +739,22 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
if (!sense_deferred)
error = -EIO;
}
+
+ req->resid_len = scsi_get_resid(cmd);
+
if (scsi_bidi_cmnd(cmd)) {
- /* will also release_buffers */
- scsi_end_bidi_request(cmd);
+ /*
+ * Bidi commands Must be complete as a whole,
+ * both sides at once.
+ */
+ req->next_rq->resid_len = scsi_in(cmd)->resid;
+
+ blk_end_request_all(req, 0);
+
+ scsi_release_buffers(cmd);
+ scsi_next_command(cmd);
return;
}
- req->data_len = scsi_get_resid(cmd);
}
BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */
@@ -787,9 +763,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
* Next deal with any sectors which we were able to correctly
* handle.
*/
- SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, "
+ SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
"%d bytes done.\n",
- req->nr_sectors, good_bytes));
+ blk_rq_sectors(req), good_bytes));
/*
* Recovered errors need reporting, but they're always treated
@@ -812,7 +788,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
*/
if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
return;
- this_count = blk_rq_bytes(req);
error = -EIO;
@@ -922,7 +897,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
if (driver_byte(result) & DRIVER_SENSE)
scsi_print_sense("", cmd);
}
- blk_end_request(req, -EIO, blk_rq_bytes(req));
+ blk_end_request_all(req, -EIO);
scsi_next_command(cmd);
break;
case ACTION_REPREP:
@@ -965,10 +940,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
BUG_ON(count > sdb->table.nents);
sdb->table.nents = count;
- if (blk_pc_request(req))
- sdb->length = req->data_len;
- else
- sdb->length = req->nr_sectors << 9;
+ sdb->length = blk_rq_bytes(req);
return BLKPREP_OK;
}
@@ -1087,22 +1059,21 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
if (unlikely(ret))
return ret;
} else {
- BUG_ON(req->data_len);
- BUG_ON(req->data);
+ BUG_ON(blk_rq_bytes(req));
memset(&cmd->sdb, 0, sizeof(cmd->sdb));
req->buffer = NULL;
}
cmd->cmd_len = req->cmd_len;
- if (!req->data_len)
+ if (!blk_rq_bytes(req))
cmd->sc_data_direction = DMA_NONE;
else if (rq_data_dir(req) == WRITE)
cmd->sc_data_direction = DMA_TO_DEVICE;
else
cmd->sc_data_direction = DMA_FROM_DEVICE;
- cmd->transfersize = req->data_len;
+ cmd->transfersize = blk_rq_bytes(req);
cmd->allowed = req->retries;
return BLKPREP_OK;
}
@@ -1212,7 +1183,7 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
break;
case BLKPREP_DEFER:
/*
- * If we defer, the elv_next_request() returns NULL, but the
+ * If we defer, the blk_peek_request() returns NULL, but the
* queue must be restarted, so we plug here if no returning
* command will automatically do that.
*/
@@ -1388,7 +1359,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
struct scsi_target *starget = scsi_target(sdev);
struct Scsi_Host *shost = sdev->host;
- blkdev_dequeue_request(req);
+ blk_start_request(req);
if (unlikely(cmd == NULL)) {
printk(KERN_CRIT "impossible request in %s.\n",
@@ -1480,7 +1451,7 @@ static void scsi_request_fn(struct request_queue *q)
if (!sdev) {
printk("scsi: killing requests for dead queue\n");
- while ((req = elv_next_request(q)) != NULL)
+ while ((req = blk_peek_request(q)) != NULL)
scsi_kill_request(req, q);
return;
}
@@ -1501,7 +1472,7 @@ static void scsi_request_fn(struct request_queue *q)
* that the request is fully prepared even if we cannot
* accept it.
*/
- req = elv_next_request(q);
+ req = blk_peek_request(q);
if (!req || !scsi_dev_queue_ready(q, sdev))
break;
@@ -1517,7 +1488,7 @@ static void scsi_request_fn(struct request_queue *q)
* Remove the request from the request list.
*/
if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
- blkdev_dequeue_request(req);
+ blk_start_request(req);
sdev->device_busy++;
spin_unlock(q->queue_lock);
@@ -2441,20 +2412,18 @@ int
scsi_internal_device_unblock(struct scsi_device *sdev)
{
struct request_queue *q = sdev->request_queue;
- int err;
unsigned long flags;
/*
* Try to transition the scsi device to SDEV_RUNNING
* and goose the device queue if successful.
*/
- err = scsi_device_set_state(sdev, SDEV_RUNNING);
- if (err) {
- err = scsi_device_set_state(sdev, SDEV_CREATED);
-
- if (err)
- return err;
- }
+ if (sdev->sdev_state == SDEV_BLOCK)
+ sdev->sdev_state = SDEV_RUNNING;
+ else if (sdev->sdev_state == SDEV_CREATED_BLOCK)
+ sdev->sdev_state = SDEV_CREATED;
+ else
+ return -EINVAL;
spin_lock_irqsave(q->queue_lock, flags);
blk_start_queue(q);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 6f51ca485f3..c4478380140 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -115,12 +115,12 @@ MODULE_PARM_DESC(max_report_luns,
"REPORT LUNS maximum number of LUNS received (should be"
" between 1 and 16384)");
-static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ+3;
+static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18;
module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(inq_timeout,
"Timeout (in seconds) waiting for devices to answer INQUIRY."
- " Default is 5. Some non-compliant devices need more.");
+ " Default is 20. Some devices may need more; most need less.");
/* This lock protects only this list */
static DEFINE_SPINLOCK(async_scan_lock);
@@ -425,6 +425,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
INIT_LIST_HEAD(&starget->devices);
starget->state = STARGET_CREATED;
starget->scsi_level = SCSI_2;
+ starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED;
retry:
spin_lock_irqsave(shost->host_lock, flags);
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 48ba413f7f6..10303272ba4 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -387,7 +387,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
* we use REQ_TYPE_BLOCK_PC so scsi_init_io doesn't set the
* length for us.
*/
- cmd->sdb.length = rq->data_len;
+ cmd->sdb.length = blk_rq_bytes(rq);
return 0;
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 09479545529..f3e664628d7 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -37,7 +37,6 @@
#define ISCSI_TRANSPORT_VERSION "2.0-870"
struct iscsi_internal {
- int daemon_pid;
struct scsi_transport_template t;
struct iscsi_transport *iscsi_transport;
struct list_head list;
@@ -357,7 +356,7 @@ int iscsi_session_chkready(struct iscsi_cls_session *session)
err = 0;
break;
case ISCSI_SESSION_FAILED:
- err = DID_TRANSPORT_DISRUPTED << 16;
+ err = DID_IMM_RETRY << 16;
break;
case ISCSI_SESSION_FREE:
err = DID_TRANSPORT_FAILFAST << 16;
@@ -938,23 +937,9 @@ iscsi_if_transport_lookup(struct iscsi_transport *tt)
}
static int
-iscsi_broadcast_skb(struct sk_buff *skb, gfp_t gfp)
+iscsi_multicast_skb(struct sk_buff *skb, uint32_t group, gfp_t gfp)
{
- return netlink_broadcast(nls, skb, 0, 1, gfp);
-}
-
-static int
-iscsi_unicast_skb(struct sk_buff *skb, int pid)
-{
- int rc;
-
- rc = netlink_unicast(nls, skb, pid, MSG_DONTWAIT);
- if (rc < 0) {
- printk(KERN_ERR "iscsi: can not unicast skb (%d)\n", rc);
- return rc;
- }
-
- return 0;
+ return nlmsg_multicast(nls, skb, 0, group, gfp);
}
int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
@@ -980,7 +965,7 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
return -ENOMEM;
}
- nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0);
+ nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
ev = NLMSG_DATA(nlh);
memset(ev, 0, sizeof(*ev));
ev->transport_handle = iscsi_handle(conn->transport);
@@ -991,10 +976,45 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
memcpy(pdu, hdr, sizeof(struct iscsi_hdr));
memcpy(pdu + sizeof(struct iscsi_hdr), data, data_size);
- return iscsi_unicast_skb(skb, priv->daemon_pid);
+ return iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC);
}
EXPORT_SYMBOL_GPL(iscsi_recv_pdu);
+int iscsi_offload_mesg(struct Scsi_Host *shost,
+ struct iscsi_transport *transport, uint32_t type,
+ char *data, uint16_t data_size)
+{
+ struct nlmsghdr *nlh;
+ struct sk_buff *skb;
+ struct iscsi_uevent *ev;
+ int len = NLMSG_SPACE(sizeof(*ev) + data_size);
+
+ skb = alloc_skb(len, GFP_NOIO);
+ if (!skb) {
+ printk(KERN_ERR "can not deliver iscsi offload message:OOM\n");
+ return -ENOMEM;
+ }
+
+ nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
+ ev = NLMSG_DATA(nlh);
+ memset(ev, 0, sizeof(*ev));
+ ev->type = type;
+ ev->transport_handle = iscsi_handle(transport);
+ switch (type) {
+ case ISCSI_KEVENT_PATH_REQ:
+ ev->r.req_path.host_no = shost->host_no;
+ break;
+ case ISCSI_KEVENT_IF_DOWN:
+ ev->r.notify_if_down.host_no = shost->host_no;
+ break;
+ }
+
+ memcpy((char *)ev + sizeof(*ev), data, data_size);
+
+ return iscsi_multicast_skb(skb, ISCSI_NL_GRP_UIP, GFP_NOIO);
+}
+EXPORT_SYMBOL_GPL(iscsi_offload_mesg);
+
void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
{
struct nlmsghdr *nlh;
@@ -1014,7 +1034,7 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
return;
}
- nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0);
+ nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
ev = NLMSG_DATA(nlh);
ev->transport_handle = iscsi_handle(conn->transport);
ev->type = ISCSI_KEVENT_CONN_ERROR;
@@ -1022,7 +1042,7 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
ev->r.connerror.cid = conn->cid;
ev->r.connerror.sid = iscsi_conn_get_sid(conn);
- iscsi_broadcast_skb(skb, GFP_ATOMIC);
+ iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC);
iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n",
error);
@@ -1030,8 +1050,8 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
EXPORT_SYMBOL_GPL(iscsi_conn_error_event);
static int
-iscsi_if_send_reply(int pid, int seq, int type, int done, int multi,
- void *payload, int size)
+iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
+ void *payload, int size)
{
struct sk_buff *skb;
struct nlmsghdr *nlh;
@@ -1045,10 +1065,10 @@ iscsi_if_send_reply(int pid, int seq, int type, int done, int multi,
return -ENOMEM;
}
- nlh = __nlmsg_put(skb, pid, seq, t, (len - sizeof(*nlh)), 0);
+ nlh = __nlmsg_put(skb, 0, 0, t, (len - sizeof(*nlh)), 0);
nlh->nlmsg_flags = flags;
memcpy(NLMSG_DATA(nlh), payload, size);
- return iscsi_unicast_skb(skb, pid);
+ return iscsi_multicast_skb(skb, group, GFP_ATOMIC);
}
static int
@@ -1085,7 +1105,7 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
return -ENOMEM;
}
- nlhstat = __nlmsg_put(skbstat, priv->daemon_pid, 0, 0,
+ nlhstat = __nlmsg_put(skbstat, 0, 0, 0,
(len - sizeof(*nlhstat)), 0);
evstat = NLMSG_DATA(nlhstat);
memset(evstat, 0, sizeof(*evstat));
@@ -1109,7 +1129,8 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
skb_trim(skbstat, NLMSG_ALIGN(actual_size));
nlhstat->nlmsg_len = actual_size;
- err = iscsi_unicast_skb(skbstat, priv->daemon_pid);
+ err = iscsi_multicast_skb(skbstat, ISCSI_NL_GRP_ISCSID,
+ GFP_ATOMIC);
} while (err < 0 && err != -ECONNREFUSED);
return err;
@@ -1143,7 +1164,7 @@ int iscsi_session_event(struct iscsi_cls_session *session,
return -ENOMEM;
}
- nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0);
+ nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
ev = NLMSG_DATA(nlh);
ev->transport_handle = iscsi_handle(session->transport);
@@ -1172,7 +1193,7 @@ int iscsi_session_event(struct iscsi_cls_session *session,
* this will occur if the daemon is not up, so we just warn
* the user and when the daemon is restarted it will handle it
*/
- rc = iscsi_broadcast_skb(skb, GFP_KERNEL);
+ rc = iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_KERNEL);
if (rc == -ESRCH)
iscsi_cls_session_printk(KERN_ERR, session,
"Cannot notify userspace of session "
@@ -1268,26 +1289,54 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
return err;
}
+static int iscsi_if_ep_connect(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev, int msg_type)
+{
+ struct iscsi_endpoint *ep;
+ struct sockaddr *dst_addr;
+ struct Scsi_Host *shost = NULL;
+ int non_blocking, err = 0;
+
+ if (!transport->ep_connect)
+ return -EINVAL;
+
+ if (msg_type == ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST) {
+ shost = scsi_host_lookup(ev->u.ep_connect_through_host.host_no);
+ if (!shost) {
+ printk(KERN_ERR "ep connect failed. Could not find "
+ "host no %u\n",
+ ev->u.ep_connect_through_host.host_no);
+ return -ENODEV;
+ }
+ non_blocking = ev->u.ep_connect_through_host.non_blocking;
+ } else
+ non_blocking = ev->u.ep_connect.non_blocking;
+
+ dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
+ ep = transport->ep_connect(shost, dst_addr, non_blocking);
+ if (IS_ERR(ep)) {
+ err = PTR_ERR(ep);
+ goto release_host;
+ }
+
+ ev->r.ep_connect_ret.handle = ep->id;
+release_host:
+ if (shost)
+ scsi_host_put(shost);
+ return err;
+}
+
static int
iscsi_if_transport_ep(struct iscsi_transport *transport,
struct iscsi_uevent *ev, int msg_type)
{
struct iscsi_endpoint *ep;
- struct sockaddr *dst_addr;
int rc = 0;
switch (msg_type) {
+ case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST:
case ISCSI_UEVENT_TRANSPORT_EP_CONNECT:
- if (!transport->ep_connect)
- return -EINVAL;
-
- dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
- ep = transport->ep_connect(dst_addr,
- ev->u.ep_connect.non_blocking);
- if (IS_ERR(ep))
- return PTR_ERR(ep);
-
- ev->r.ep_connect_ret.handle = ep->id;
+ rc = iscsi_if_ep_connect(transport, ev, msg_type);
break;
case ISCSI_UEVENT_TRANSPORT_EP_POLL:
if (!transport->ep_poll)
@@ -1365,7 +1414,31 @@ iscsi_set_host_param(struct iscsi_transport *transport,
}
static int
-iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev)
+{
+ struct Scsi_Host *shost;
+ struct iscsi_path *params;
+ int err;
+
+ if (!transport->set_path)
+ return -ENOSYS;
+
+ shost = scsi_host_lookup(ev->u.set_path.host_no);
+ if (!shost) {
+ printk(KERN_ERR "set path could not find host no %u\n",
+ ev->u.set_path.host_no);
+ return -ENODEV;
+ }
+
+ params = (struct iscsi_path *)((char *)ev + sizeof(*ev));
+ err = transport->set_path(shost, params);
+
+ scsi_host_put(shost);
+ return err;
+}
+
+static int
+iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
{
int err = 0;
struct iscsi_uevent *ev = NLMSG_DATA(nlh);
@@ -1375,6 +1448,11 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
struct iscsi_cls_conn *conn;
struct iscsi_endpoint *ep = NULL;
+ if (nlh->nlmsg_type == ISCSI_UEVENT_PATH_UPDATE)
+ *group = ISCSI_NL_GRP_UIP;
+ else
+ *group = ISCSI_NL_GRP_ISCSID;
+
priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle));
if (!priv)
return -EINVAL;
@@ -1383,8 +1461,6 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
if (!try_module_get(transport->owner))
return -EINVAL;
- priv->daemon_pid = NETLINK_CREDS(skb)->pid;
-
switch (nlh->nlmsg_type) {
case ISCSI_UEVENT_CREATE_SESSION:
err = iscsi_if_create_session(priv, ep, ev,
@@ -1469,6 +1545,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
case ISCSI_UEVENT_TRANSPORT_EP_CONNECT:
case ISCSI_UEVENT_TRANSPORT_EP_POLL:
case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
+ case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST:
err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type);
break;
case ISCSI_UEVENT_TGT_DSCVR:
@@ -1477,6 +1554,9 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
case ISCSI_UEVENT_SET_HOST_PARAM:
err = iscsi_set_host_param(transport, ev);
break;
+ case ISCSI_UEVENT_PATH_UPDATE:
+ err = iscsi_set_path(transport, ev);
+ break;
default:
err = -ENOSYS;
break;
@@ -1499,6 +1579,7 @@ iscsi_if_rx(struct sk_buff *skb)
uint32_t rlen;
struct nlmsghdr *nlh;
struct iscsi_uevent *ev;
+ uint32_t group;
nlh = nlmsg_hdr(skb);
if (nlh->nlmsg_len < sizeof(*nlh) ||
@@ -1511,7 +1592,7 @@ iscsi_if_rx(struct sk_buff *skb)
if (rlen > skb->len)
rlen = skb->len;
- err = iscsi_if_recv_msg(skb, nlh);
+ err = iscsi_if_recv_msg(skb, nlh, &group);
if (err) {
ev->type = ISCSI_KEVENT_IF_ERROR;
ev->iferror = err;
@@ -1525,8 +1606,7 @@ iscsi_if_rx(struct sk_buff *skb)
*/
if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
break;
- err = iscsi_if_send_reply(
- NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
+ err = iscsi_if_send_reply(group, nlh->nlmsg_seq,
nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
} while (err < 0 && err != -ECONNREFUSED);
skb_pull(skb, rlen);
@@ -1774,7 +1854,6 @@ iscsi_register_transport(struct iscsi_transport *tt)
if (!priv)
return NULL;
INIT_LIST_HEAD(&priv->list);
- priv->daemon_pid = -1;
priv->iscsi_transport = tt;
priv->t.user_scan = iscsi_user_scan;
priv->t.create_work_queue = 1;
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 50988cbf7b2..d606452297c 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -163,12 +163,10 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *);
while (!blk_queue_plugged(q)) {
- req = elv_next_request(q);
+ req = blk_fetch_request(q);
if (!req)
break;
- blkdev_dequeue_request(req);
-
spin_unlock_irq(q->queue_lock);
handler = to_sas_internal(shost->transportt)->f->smp_handler;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 84044233b63..878b17a9af3 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -384,9 +384,9 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
struct scsi_device *sdp = q->queuedata;
struct gendisk *disk = rq->rq_disk;
struct scsi_disk *sdkp;
- sector_t block = rq->sector;
+ sector_t block = blk_rq_pos(rq);
sector_t threshold;
- unsigned int this_count = rq->nr_sectors;
+ unsigned int this_count = blk_rq_sectors(rq);
int ret, host_dif;
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
@@ -413,10 +413,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
this_count));
if (!sdp || !scsi_device_online(sdp) ||
- block + rq->nr_sectors > get_capacity(disk)) {
+ block + blk_rq_sectors(rq) > get_capacity(disk)) {
SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
- "Finishing %ld sectors\n",
- rq->nr_sectors));
+ "Finishing %u sectors\n",
+ blk_rq_sectors(rq)));
SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
"Retry with 0x%p\n", SCpnt));
goto out;
@@ -463,7 +463,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
* for this.
*/
if (sdp->sector_size == 1024) {
- if ((block & 1) || (rq->nr_sectors & 1)) {
+ if ((block & 1) || (blk_rq_sectors(rq) & 1)) {
scmd_printk(KERN_ERR, SCpnt,
"Bad block number requested\n");
goto out;
@@ -473,7 +473,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
}
}
if (sdp->sector_size == 2048) {
- if ((block & 3) || (rq->nr_sectors & 3)) {
+ if ((block & 3) || (blk_rq_sectors(rq) & 3)) {
scmd_printk(KERN_ERR, SCpnt,
"Bad block number requested\n");
goto out;
@@ -483,7 +483,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
}
}
if (sdp->sector_size == 4096) {
- if ((block & 7) || (rq->nr_sectors & 7)) {
+ if ((block & 7) || (blk_rq_sectors(rq) & 7)) {
scmd_printk(KERN_ERR, SCpnt,
"Bad block number requested\n");
goto out;
@@ -512,10 +512,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
}
SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
- "%s %d/%ld 512 byte blocks.\n",
+ "%s %d/%u 512 byte blocks.\n",
(rq_data_dir(rq) == WRITE) ?
"writing" : "reading", this_count,
- rq->nr_sectors));
+ blk_rq_sectors(rq)));
/* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */
host_dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
@@ -971,8 +971,8 @@ static struct block_device_operations sd_fops = {
static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
{
- u64 start_lba = scmd->request->sector;
- u64 end_lba = scmd->request->sector + (scsi_bufflen(scmd) / 512);
+ u64 start_lba = blk_rq_pos(scmd->request);
+ u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512);
u64 bad_lba;
int info_valid;
@@ -1510,7 +1510,7 @@ got_data:
*/
sector_size = 512;
}
- blk_queue_hardsect_size(sdp->request_queue, sector_size);
+ blk_queue_logical_block_size(sdp->request_queue, sector_size);
{
char cap_str_2[10], cap_str_10[10];
@@ -1902,24 +1902,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
index = sdkp->index;
dev = &sdp->sdev_gendev;
- if (!sdp->request_queue->rq_timeout) {
- if (sdp->type != TYPE_MOD)
- blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
- else
- blk_queue_rq_timeout(sdp->request_queue,
- SD_MOD_TIMEOUT);
- }
-
- device_initialize(&sdkp->dev);
- sdkp->dev.parent = &sdp->sdev_gendev;
- sdkp->dev.class = &sd_disk_class;
- dev_set_name(&sdkp->dev, dev_name(&sdp->sdev_gendev));
-
- if (device_add(&sdkp->dev))
- goto out_free_index;
-
- get_device(&sdp->sdev_gendev);
-
if (index < SD_MAX_DISKS) {
gd->major = sd_major((index & 0xf0) >> 4);
gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
@@ -1954,11 +1936,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
sdp->removable ? "removable " : "");
-
- return;
-
- out_free_index:
- ida_remove(&sd_index_ida, index);
}
/**
@@ -2026,6 +2003,24 @@ static int sd_probe(struct device *dev)
sdkp->openers = 0;
sdkp->previous_state = 1;
+ if (!sdp->request_queue->rq_timeout) {
+ if (sdp->type != TYPE_MOD)
+ blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
+ else
+ blk_queue_rq_timeout(sdp->request_queue,
+ SD_MOD_TIMEOUT);
+ }
+
+ device_initialize(&sdkp->dev);
+ sdkp->dev.parent = &sdp->sdev_gendev;
+ sdkp->dev.class = &sd_disk_class;
+ dev_set_name(&sdkp->dev, dev_name(&sdp->sdev_gendev));
+
+ if (device_add(&sdkp->dev))
+ goto out_free_index;
+
+ get_device(&sdp->sdev_gendev);
+
async_schedule(sd_probe_async, sdkp);
return 0;
@@ -2055,8 +2050,10 @@ static int sd_probe(struct device *dev)
**/
static int sd_remove(struct device *dev)
{
- struct scsi_disk *sdkp = dev_get_drvdata(dev);
+ struct scsi_disk *sdkp;
+ async_synchronize_full();
+ sdkp = dev_get_drvdata(dev);
device_del(&sdkp->dev);
del_gendisk(sdkp->disk);
sd_shutdown(dev);
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 184dff49279..82f14a9482d 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -507,7 +507,7 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
sector_sz = scmd->device->sector_size;
sectors = good_bytes / sector_sz;
- phys = scmd->request->sector & 0xffffffff;
+ phys = blk_rq_pos(scmd->request) & 0xffffffff;
if (sector_sz == 4096)
phys >>= 3;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 91e316fe652..8201387b4da 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -289,8 +289,8 @@ sg_open(struct inode *inode, struct file *filp)
if (list_empty(&sdp->sfds)) { /* no existing opens on this device */
sdp->sgdebug = 0;
q = sdp->device->request_queue;
- sdp->sg_tablesize = min(q->max_hw_segments,
- q->max_phys_segments);
+ sdp->sg_tablesize = min(queue_max_hw_segments(q),
+ queue_max_phys_segments(q));
}
if ((sfp = sg_add_sfp(sdp, dev)))
filp->private_data = sfp;
@@ -909,7 +909,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
if (val < 0)
return -EINVAL;
val = min_t(int, val,
- sdp->device->request_queue->max_sectors * 512);
+ queue_max_sectors(sdp->device->request_queue) * 512);
if (val != sfp->reserve.bufflen) {
if (sg_res_in_use(sfp) || sfp->mmap_called)
return -EBUSY;
@@ -919,7 +919,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
return 0;
case SG_GET_RESERVED_SIZE:
val = min_t(int, sfp->reserve.bufflen,
- sdp->device->request_queue->max_sectors * 512);
+ queue_max_sectors(sdp->device->request_queue) * 512);
return put_user(val, ip);
case SG_SET_COMMAND_Q:
result = get_user(val, ip);
@@ -1059,7 +1059,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
return -ENODEV;
return scsi_ioctl(sdp->device, cmd_in, p);
case BLKSECTGET:
- return put_user(sdp->device->request_queue->max_sectors * 512,
+ return put_user(queue_max_sectors(sdp->device->request_queue) * 512,
ip);
case BLKTRACESETUP:
return blk_trace_setup(sdp->device->request_queue,
@@ -1261,7 +1261,7 @@ static void sg_rq_end_io(struct request *rq, int uptodate)
sense = rq->sense;
result = rq->errors;
- resid = rq->data_len;
+ resid = rq->resid_len;
SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
sdp->disk->disk_name, srp->header.pack_id, result));
@@ -1378,7 +1378,8 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
sdp->device = scsidp;
INIT_LIST_HEAD(&sdp->sfds);
init_waitqueue_head(&sdp->o_excl_wait);
- sdp->sg_tablesize = min(q->max_hw_segments, q->max_phys_segments);
+ sdp->sg_tablesize = min(queue_max_hw_segments(q),
+ queue_max_phys_segments(q));
sdp->index = k;
kref_init(&sdp->d_ref);
@@ -2056,7 +2057,7 @@ sg_add_sfp(Sg_device * sdp, int dev)
sg_big_buff = def_reserved_size;
bufflen = min_t(int, sg_big_buff,
- sdp->device->request_queue->max_sectors * 512);
+ queue_max_sectors(sdp->device->request_queue) * 512);
sg_build_reserve(sfp, bufflen);
SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
sfp->reserve.bufflen, sfp->reserve.k_use_sg));
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 0e1a0f2d2ad..cd350dfc121 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -292,7 +292,8 @@ static int sr_done(struct scsi_cmnd *SCpnt)
if (cd->device->sector_size == 2048)
error_sector <<= 2;
error_sector &= ~(block_sectors - 1);
- good_bytes = (error_sector - SCpnt->request->sector) << 9;
+ good_bytes = (error_sector -
+ blk_rq_pos(SCpnt->request)) << 9;
if (good_bytes < 0 || good_bytes >= this_count)
good_bytes = 0;
/*
@@ -349,8 +350,8 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
cd->disk->disk_name, block));
if (!cd->device || !scsi_device_online(cd->device)) {
- SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n",
- rq->nr_sectors));
+ SCSI_LOG_HLQUEUE(2, printk("Finishing %u sectors\n",
+ blk_rq_sectors(rq)));
SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt));
goto out;
}
@@ -413,7 +414,7 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
/*
* request doesn't start on hw block boundary, add scatter pads
*/
- if (((unsigned int)rq->sector % (s_size >> 9)) ||
+ if (((unsigned int)blk_rq_pos(rq) % (s_size >> 9)) ||
(scsi_bufflen(SCpnt) % s_size)) {
scmd_printk(KERN_NOTICE, SCpnt, "unaligned transfer\n");
goto out;
@@ -422,14 +423,14 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
this_count = (scsi_bufflen(SCpnt) >> 9) / (s_size >> 9);
- SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n",
+ SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%u 512 byte blocks.\n",
cd->cdi.name,
(rq_data_dir(rq) == WRITE) ?
"writing" : "reading",
- this_count, rq->nr_sectors));
+ this_count, blk_rq_sectors(rq)));
SCpnt->cmnd[1] = 0;
- block = (unsigned int)rq->sector / (s_size >> 9);
+ block = (unsigned int)blk_rq_pos(rq) / (s_size >> 9);
if (this_count > 0xffff) {
this_count = 0xffff;
@@ -726,7 +727,7 @@ static void get_sectorsize(struct scsi_cd *cd)
}
queue = cd->device->request_queue;
- blk_queue_hardsect_size(queue, sector_size);
+ blk_queue_logical_block_size(queue, sector_size);
return;
}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index eb24efea8f1..b33d04250bb 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -463,7 +463,7 @@ static void st_scsi_execute_end(struct request *req, int uptodate)
struct scsi_tape *STp = SRpnt->stp;
STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors;
- STp->buffer->cmdstat.residual = req->data_len;
+ STp->buffer->cmdstat.residual = req->resid_len;
if (SRpnt->waiting)
complete(SRpnt->waiting);
@@ -2964,7 +2964,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
!(STp->use_pf & PF_TESTED)) {
/* Try the other possible state of Page Format if not
already tried */
- STp->use_pf = !STp->use_pf | PF_TESTED;
+ STp->use_pf = (STp->use_pf ^ USE_PF) | PF_TESTED;
st_release_request(SRpnt);
SRpnt = NULL;
return st_int_ioctl(STp, cmd_in, arg);
@@ -3983,8 +3983,8 @@ static int st_probe(struct device *dev)
return -ENODEV;
}
- i = min(SDp->request_queue->max_hw_segments,
- SDp->request_queue->max_phys_segments);
+ i = min(queue_max_hw_segments(SDp->request_queue),
+ queue_max_phys_segments(SDp->request_queue));
if (st_max_sg_segs < i)
i = st_max_sg_segs;
buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i);
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 583966ec826..45374d66d26 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -737,11 +737,14 @@ static int sym53c8xx_slave_alloc(struct scsi_device *sdev)
struct sym_hcb *np = sym_get_hcb(sdev->host);
struct sym_tcb *tp = &np->target[sdev->id];
struct sym_lcb *lp;
+ unsigned long flags;
+ int error;
if (sdev->id >= SYM_CONF_MAX_TARGET || sdev->lun >= SYM_CONF_MAX_LUN)
return -ENXIO;
- tp->starget = sdev->sdev_target;
+ spin_lock_irqsave(np->s.host->host_lock, flags);
+
/*
* Fail the device init if the device is flagged NOSCAN at BOOT in
* the NVRAM. This may speed up boot and maintain coherency with
@@ -753,26 +756,37 @@ static int sym53c8xx_slave_alloc(struct scsi_device *sdev)
if (tp->usrflags & SYM_SCAN_BOOT_DISABLED) {
tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED;
- starget_printk(KERN_INFO, tp->starget,
+ starget_printk(KERN_INFO, sdev->sdev_target,
"Scan at boot disabled in NVRAM\n");
- return -ENXIO;
+ error = -ENXIO;
+ goto out;
}
if (tp->usrflags & SYM_SCAN_LUNS_DISABLED) {
- if (sdev->lun != 0)
- return -ENXIO;
- starget_printk(KERN_INFO, tp->starget,
+ if (sdev->lun != 0) {
+ error = -ENXIO;
+ goto out;
+ }
+ starget_printk(KERN_INFO, sdev->sdev_target,
"Multiple LUNs disabled in NVRAM\n");
}
lp = sym_alloc_lcb(np, sdev->id, sdev->lun);
- if (!lp)
- return -ENOMEM;
+ if (!lp) {
+ error = -ENOMEM;
+ goto out;
+ }
+ if (tp->nlcb == 1)
+ tp->starget = sdev->sdev_target;
spi_min_period(tp->starget) = tp->usr_period;
spi_max_width(tp->starget) = tp->usr_width;
- return 0;
+ error = 0;
+out:
+ spin_unlock_irqrestore(np->s.host->host_lock, flags);
+
+ return error;
}
/*
@@ -819,12 +833,34 @@ static int sym53c8xx_slave_configure(struct scsi_device *sdev)
static void sym53c8xx_slave_destroy(struct scsi_device *sdev)
{
struct sym_hcb *np = sym_get_hcb(sdev->host);
- struct sym_lcb *lp = sym_lp(&np->target[sdev->id], sdev->lun);
+ struct sym_tcb *tp = &np->target[sdev->id];
+ struct sym_lcb *lp = sym_lp(tp, sdev->lun);
+ unsigned long flags;
+
+ spin_lock_irqsave(np->s.host->host_lock, flags);
+
+ if (lp->busy_itlq || lp->busy_itl) {
+ /*
+ * This really shouldn't happen, but we can't return an error
+ * so let's try to stop all on-going I/O.
+ */
+ starget_printk(KERN_WARNING, tp->starget,
+ "Removing busy LCB (%d)\n", sdev->lun);
+ sym_reset_scsi_bus(np, 1);
+ }
- if (lp->itlq_tbl)
- sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK * 4, "ITLQ_TBL");
- kfree(lp->cb_tags);
- sym_mfree_dma(lp, sizeof(*lp), "LCB");
+ if (sym_free_lcb(np, sdev->id, sdev->lun) == 0) {
+ /*
+ * It was the last unit for this target.
+ */
+ tp->head.sval = 0;
+ tp->head.wval = np->rv_scntl3;
+ tp->head.uval = 0;
+ tp->tgoal.check_nego = 1;
+ tp->starget = NULL;
+ }
+
+ spin_unlock_irqrestore(np->s.host->host_lock, flags);
}
/*
@@ -890,6 +926,8 @@ static void sym_exec_user_command (struct sym_hcb *np, struct sym_usrcmd *uc)
if (!((uc->target >> t) & 1))
continue;
tp = &np->target[t];
+ if (!tp->nlcb)
+ continue;
switch (uc->cmd) {
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index ffa70d1ed18..69ad4945c93 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -1896,6 +1896,15 @@ void sym_start_up(struct Scsi_Host *shost, int reason)
tp->head.sval = 0;
tp->head.wval = np->rv_scntl3;
tp->head.uval = 0;
+ if (tp->lun0p)
+ tp->lun0p->to_clear = 0;
+ if (tp->lunmp) {
+ int ln;
+
+ for (ln = 1; ln < SYM_CONF_MAX_LUN; ln++)
+ if (tp->lunmp[ln])
+ tp->lunmp[ln]->to_clear = 0;
+ }
}
/*
@@ -4988,7 +4997,7 @@ struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln)
*/
if (ln && !tp->lunmp) {
tp->lunmp = kcalloc(SYM_CONF_MAX_LUN, sizeof(struct sym_lcb *),
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!tp->lunmp)
goto fail;
}
@@ -5008,6 +5017,7 @@ struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln)
tp->lun0p = lp;
tp->head.lun0_sa = cpu_to_scr(vtobus(lp));
}
+ tp->nlcb++;
/*
* Let the itl task point to error handling.
@@ -5085,6 +5095,43 @@ fail:
}
/*
+ * Lun control block deallocation. Returns the number of valid remaing LCBs
+ * for the target.
+ */
+int sym_free_lcb(struct sym_hcb *np, u_char tn, u_char ln)
+{
+ struct sym_tcb *tp = &np->target[tn];
+ struct sym_lcb *lp = sym_lp(tp, ln);
+
+ tp->nlcb--;
+
+ if (ln) {
+ if (!tp->nlcb) {
+ kfree(tp->lunmp);
+ sym_mfree_dma(tp->luntbl, 256, "LUNTBL");
+ tp->lunmp = NULL;
+ tp->luntbl = NULL;
+ tp->head.luntbl_sa = cpu_to_scr(vtobus(np->badluntbl));
+ } else {
+ tp->luntbl[ln] = cpu_to_scr(vtobus(&np->badlun_sa));
+ tp->lunmp[ln] = NULL;
+ }
+ } else {
+ tp->lun0p = NULL;
+ tp->head.lun0_sa = cpu_to_scr(vtobus(&np->badlun_sa));
+ }
+
+ if (lp->itlq_tbl) {
+ sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL");
+ kfree(lp->cb_tags);
+ }
+
+ sym_mfree_dma(lp, sizeof(*lp), "LCB");
+
+ return tp->nlcb;
+}
+
+/*
* Queue a SCSI IO to the controller.
*/
int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp)
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.h b/drivers/scsi/sym53c8xx_2/sym_hipd.h
index 9ebc8706b6b..053e63c8682 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.h
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.h
@@ -401,6 +401,7 @@ struct sym_tcb {
* An array of bus addresses is used on reselection.
*/
u32 *luntbl; /* LCBs bus address table */
+ int nlcb; /* Number of valid LCBs (including LUN #0) */
/*
* LUN table used by the C code.
@@ -1065,6 +1066,7 @@ int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int
struct sym_ccb *sym_get_ccb(struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order);
void sym_free_ccb(struct sym_hcb *np, struct sym_ccb *cp);
struct sym_lcb *sym_alloc_lcb(struct sym_hcb *np, u_char tn, u_char ln);
+int sym_free_lcb(struct sym_hcb *np, u_char tn, u_char ln);
int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *csio, struct sym_ccb *cp);
int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *ccb, int timed_out);
int sym_reset_scsi_target(struct sym_hcb *np, int target);
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 601e95141cb..54023d41fd1 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1306,7 +1306,7 @@ static int u14_34f_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scs
if (linked_comm && SCpnt->device->queue_depth > 2
&& TLDEV(SCpnt->device->type)) {
HD(j)->cp_stat[i] = READY;
- flush_dev(SCpnt->device, SCpnt->request->sector, j, FALSE);
+ flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, FALSE);
return 0;
}
@@ -1610,11 +1610,13 @@ static int reorder(unsigned int j, unsigned long cursec,
if (!(cpp->xdir == DTD_IN)) input_only = FALSE;
- if (SCpnt->request->sector < minsec) minsec = SCpnt->request->sector;
- if (SCpnt->request->sector > maxsec) maxsec = SCpnt->request->sector;
+ if (blk_rq_pos(SCpnt->request) < minsec)
+ minsec = blk_rq_pos(SCpnt->request);
+ if (blk_rq_pos(SCpnt->request) > maxsec)
+ maxsec = blk_rq_pos(SCpnt->request);
- sl[n] = SCpnt->request->sector;
- ioseek += SCpnt->request->nr_sectors;
+ sl[n] = blk_rq_pos(SCpnt->request);
+ ioseek += blk_rq_sectors(SCpnt->request);
if (!n) continue;
@@ -1642,7 +1644,7 @@ static int reorder(unsigned int j, unsigned long cursec,
if (!input_only) for (n = 0; n < n_ready; n++) {
k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
- ll[n] = SCpnt->request->nr_sectors; pl[n] = SCpnt->serial_number;
+ ll[n] = blk_rq_sectors(SCpnt->request); pl[n] = SCpnt->serial_number;
if (!n) continue;
@@ -1666,12 +1668,12 @@ static int reorder(unsigned int j, unsigned long cursec,
if (link_statistics && (overlap || !(flushcount % link_statistics)))
for (n = 0; n < n_ready; n++) {
k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
- printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %ld"\
+ printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %u"\
" cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
(ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target,
SCpnt->lun, SCpnt->serial_number, k, flushcount, n_ready,
- SCpnt->request->sector, SCpnt->request->nr_sectors, cursec,
- YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only),
+ blk_rq_pos(SCpnt->request), blk_rq_sectors(SCpnt->request),
+ cursec, YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only),
YESNO(overlap), cpp->xdir);
}
#endif
@@ -1799,7 +1801,7 @@ static irqreturn_t ihdlr(unsigned int j)
if (linked_comm && SCpnt->device->queue_depth > 2
&& TLDEV(SCpnt->device->type))
- flush_dev(SCpnt->device, SCpnt->request->sector, j, TRUE);
+ flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, TRUE);
tstatus = status_byte(spp->target_status);
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index b4b39811b44..fb867a9f55e 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -137,6 +137,7 @@ struct uart_8250_port {
unsigned char mcr;
unsigned char mcr_mask; /* mask of user bits */
unsigned char mcr_force; /* mask of forced bits */
+ unsigned char cur_iotype; /* Running I/O type */
/*
* Some bits in registers are cleared on a read, so they must
@@ -286,6 +287,13 @@ static const struct serial8250_config uart_config[] = {
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
.flags = UART_CAP_FIFO,
},
+ [PORT_AR7] = {
+ .name = "AR7",
+ .fifo_size = 16,
+ .tx_loadsz = 16,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_00,
+ .flags = UART_CAP_FIFO | UART_CAP_AFE,
+ },
};
#if defined (CONFIG_SERIAL_8250_AU1X00)
@@ -471,6 +479,7 @@ static void io_serial_out(struct uart_port *p, int offset, int value)
static void set_io_from_upio(struct uart_port *p)
{
+ struct uart_8250_port *up = (struct uart_8250_port *)p;
switch (p->iotype) {
case UPIO_HUB6:
p->serial_in = hub6_serial_in;
@@ -509,6 +518,8 @@ static void set_io_from_upio(struct uart_port *p)
p->serial_out = io_serial_out;
break;
}
+ /* Remember loaded iotype */
+ up->cur_iotype = p->iotype;
}
static void
@@ -1937,6 +1948,9 @@ static int serial8250_startup(struct uart_port *port)
up->capabilities = uart_config[up->port.type].flags;
up->mcr = 0;
+ if (up->port.iotype != up->cur_iotype)
+ set_io_from_upio(port);
+
if (up->port.type == PORT_16C950) {
/* Wake up and initialize UART */
up->acr = 0;
@@ -2563,6 +2577,9 @@ static void serial8250_config_port(struct uart_port *port, int flags)
if (ret < 0)
probeflags &= ~PROBE_RSA;
+ if (up->port.iotype != up->cur_iotype)
+ set_io_from_upio(port);
+
if (flags & UART_CONFIG_TYPE)
autoconfig(up, probeflags);
if (up->port.type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ)
@@ -2671,6 +2688,11 @@ serial8250_register_ports(struct uart_driver *drv, struct device *dev)
{
int i;
+ for (i = 0; i < nr_uarts; i++) {
+ struct uart_8250_port *up = &serial8250_ports[i];
+ up->cur_iotype = 0xFF;
+ }
+
serial8250_isa_init_ports();
for (i = 0; i < nr_uarts; i++) {
diff --git a/drivers/serial/8250_gsc.c b/drivers/serial/8250_gsc.c
index 418b4fe9a0a..33149d982e8 100644
--- a/drivers/serial/8250_gsc.c
+++ b/drivers/serial/8250_gsc.c
@@ -39,9 +39,9 @@ static int __init serial_init_chip(struct parisc_device *dev)
*/
if (parisc_parent(dev)->id.hw_type != HPHW_IOA)
printk(KERN_INFO
- "Serial: device 0x%lx not configured.\n"
+ "Serial: device 0x%llx not configured.\n"
"Enable support for Wax, Lasi, Asp or Dino.\n",
- dev->hpa.start);
+ (unsigned long long)dev->hpa.start);
return -ENODEV;
}
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index 938bc1b6c3f..e371a9c1534 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -2776,6 +2776,9 @@ static struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_OXSEMI, 0x950a,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b0_2_1130000 },
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_C950,
+ PCI_VENDOR_ID_OXSEMI, PCI_SUBDEVICE_ID_OXSEMI_C950, 0, 0,
+ pbn_b0_1_921600 },
{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b0_4_115200 },
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 343e3a35b6a..1132c5cae7a 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -833,6 +833,7 @@ config SERIAL_IMX
bool "IMX serial port support"
depends on ARM && (ARCH_IMX || ARCH_MXC)
select SERIAL_CORE
+ select RATIONAL
help
If you have a machine based on a Motorola IMX CPU you
can enable its onboard serial port by enabling this option.
@@ -860,7 +861,7 @@ config SERIAL_UARTLITE
Say Y here if you want to use the Xilinx uartlite serial controller.
To compile this driver as a module, choose M here: the
- module will be called uartlite.ko.
+ module will be called uartlite.
config SERIAL_UARTLITE_CONSOLE
bool "Support for console on Xilinx uartlite serial port"
@@ -1433,4 +1434,11 @@ config SPORT_BAUD_RATE
default 19200 if (SERIAL_SPORT_BAUD_RATE_19200)
default 9600 if (SERIAL_SPORT_BAUD_RATE_9600)
+config SERIAL_TIMBERDALE
+ tristate "Support for timberdale UART"
+ depends on MFD_TIMBERDALE
+ select SERIAL_CORE
+ ---help---
+ Add support for UART controller on timberdale.
+
endmenu
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index d438eb2a73d..45a8658f54d 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -77,3 +77,4 @@ obj-$(CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL) += nwpserial.o
obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o
obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o
obj-$(CONFIG_SERIAL_QE) += ucc_uart.o
+obj-$(CONFIG_SERIAL_TIMBERDALE) += timbuart.o
diff --git a/drivers/serial/amba-pl010.c b/drivers/serial/amba-pl010.c
index e3a5ad5ef1d..58a4879c7e4 100644
--- a/drivers/serial/amba-pl010.c
+++ b/drivers/serial/amba-pl010.c
@@ -665,7 +665,7 @@ static struct uart_driver amba_reg = {
.cons = AMBA_CONSOLE,
};
-static int pl010_probe(struct amba_device *dev, void *id)
+static int pl010_probe(struct amba_device *dev, struct amba_id *id)
{
struct uart_amba_port *uap;
void __iomem *base;
@@ -686,7 +686,7 @@ static int pl010_probe(struct amba_device *dev, void *id)
goto out;
}
- base = ioremap(dev->res.start, PAGE_SIZE);
+ base = ioremap(dev->res.start, resource_size(&dev->res));
if (!base) {
ret = -ENOMEM;
goto free;
diff --git a/drivers/serial/amba-pl011.c b/drivers/serial/amba-pl011.c
index 8b2b9700f3e..bf82e28770a 100644
--- a/drivers/serial/amba-pl011.c
+++ b/drivers/serial/amba-pl011.c
@@ -70,6 +70,23 @@ struct uart_amba_port {
struct clk *clk;
unsigned int im; /* interrupt mask */
unsigned int old_status;
+ unsigned int ifls; /* vendor-specific */
+};
+
+/* There is by now at least one vendor with differing details, so handle it */
+struct vendor_data {
+ unsigned int ifls;
+ unsigned int fifosize;
+};
+
+static struct vendor_data vendor_arm = {
+ .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
+ .fifosize = 16,
+};
+
+static struct vendor_data vendor_st = {
+ .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
+ .fifosize = 64,
};
static void pl011_stop_tx(struct uart_port *port)
@@ -360,8 +377,7 @@ static int pl011_startup(struct uart_port *port)
if (retval)
goto clk_dis;
- writew(UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
- uap->port.membase + UART011_IFLS);
+ writew(uap->ifls, uap->port.membase + UART011_IFLS);
/*
* Provoke TX FIFO interrupt into asserting.
@@ -729,9 +745,10 @@ static struct uart_driver amba_reg = {
.cons = AMBA_CONSOLE,
};
-static int pl011_probe(struct amba_device *dev, void *id)
+static int pl011_probe(struct amba_device *dev, struct amba_id *id)
{
struct uart_amba_port *uap;
+ struct vendor_data *vendor = id->data;
void __iomem *base;
int i, ret;
@@ -750,7 +767,7 @@ static int pl011_probe(struct amba_device *dev, void *id)
goto out;
}
- base = ioremap(dev->res.start, PAGE_SIZE);
+ base = ioremap(dev->res.start, resource_size(&dev->res));
if (!base) {
ret = -ENOMEM;
goto free;
@@ -762,12 +779,13 @@ static int pl011_probe(struct amba_device *dev, void *id)
goto unmap;
}
+ uap->ifls = vendor->ifls;
uap->port.dev = &dev->dev;
uap->port.mapbase = dev->res.start;
uap->port.membase = base;
uap->port.iotype = UPIO_MEM;
uap->port.irq = dev->irq[0];
- uap->port.fifosize = 16;
+ uap->port.fifosize = vendor->fifosize;
uap->port.ops = &amba_pl011_pops;
uap->port.flags = UPF_BOOT_AUTOCONF;
uap->port.line = i;
@@ -812,6 +830,12 @@ static struct amba_id pl011_ids[] __initdata = {
{
.id = 0x00041011,
.mask = 0x000fffff,
+ .data = &vendor_arm,
+ },
+ {
+ .id = 0x00380802,
+ .mask = 0x00ffffff,
+ .data = &vendor_st,
},
{ 0, 0 },
};
@@ -845,7 +869,11 @@ static void __exit pl011_exit(void)
uart_unregister_driver(&amba_reg);
}
-module_init(pl011_init);
+/*
+ * While this can be a module, if builtin it's most likely the console
+ * So let's leave module_exit but move module_init to an earlier place
+ */
+arch_initcall(pl011_init);
module_exit(pl011_exit);
MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c
index b3497d7e535..338b15c0a54 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/serial/atmel_serial.c
@@ -1104,11 +1104,13 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
/* update the per-port timeout */
uart_update_timeout(port, termios->c_cflag, baud);
- /* save/disable interrupts and drain transmitter */
+ /*
+ * save/disable interrupts. The tty layer will ensure that the
+ * transmitter is empty if requested by the caller, so there's
+ * no need to wait for it here.
+ */
imr = UART_GET_IMR(port);
UART_PUT_IDR(port, -1);
- while (!(UART_GET_CSR(port) & ATMEL_US_TXEMPTY))
- cpu_relax();
/* disable receiver and transmitter */
UART_PUT_CR(port, ATMEL_US_TXDIS | ATMEL_US_RXDIS);
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c
index d86123e0339..e2f6b1bfac9 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/serial/bfin_5xx.c
@@ -330,6 +330,11 @@ static void bfin_serial_tx_chars(struct bfin_serial_port *uart)
/* Clear TFI bit */
UART_PUT_LSR(uart, TFI);
#endif
+ /* Anomaly notes:
+ * 05000215 - we always clear ETBEI within last UART TX
+ * interrupt to end a string. It is always set
+ * when start a new tx.
+ */
UART_CLEAR_IER(uart, ETBEI);
return;
}
@@ -415,6 +420,7 @@ static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart)
set_dma_start_addr(uart->tx_dma_channel, (unsigned long)(xmit->buf+xmit->tail));
set_dma_x_count(uart->tx_dma_channel, uart->tx_count);
set_dma_x_modify(uart->tx_dma_channel, 1);
+ SSYNC();
enable_dma(uart->tx_dma_channel);
UART_SET_IER(uart, ETBEI);
@@ -473,27 +479,41 @@ static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart)
void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
{
int x_pos, pos;
- unsigned long flags;
-
- spin_lock_irqsave(&uart->port.lock, flags);
+ dma_disable_irq(uart->rx_dma_channel);
+ spin_lock_bh(&uart->port.lock);
+
+ /* 2D DMA RX buffer ring is used. Because curr_y_count and
+ * curr_x_count can't be read as an atomic operation,
+ * curr_y_count should be read before curr_x_count. When
+ * curr_x_count is read, curr_y_count may already indicate
+ * next buffer line. But, the position calculated here is
+ * still indicate the old line. The wrong position data may
+ * be smaller than current buffer tail, which cause garbages
+ * are received if it is not prohibit.
+ */
uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel);
x_pos = get_dma_curr_xcount(uart->rx_dma_channel);
uart->rx_dma_nrows = DMA_RX_YCOUNT - uart->rx_dma_nrows;
- if (uart->rx_dma_nrows == DMA_RX_YCOUNT)
+ if (uart->rx_dma_nrows == DMA_RX_YCOUNT || x_pos == 0)
uart->rx_dma_nrows = 0;
x_pos = DMA_RX_XCOUNT - x_pos;
if (x_pos == DMA_RX_XCOUNT)
x_pos = 0;
pos = uart->rx_dma_nrows * DMA_RX_XCOUNT + x_pos;
- if (pos != uart->rx_dma_buf.tail) {
+ /* Ignore receiving data if new position is in the same line of
+ * current buffer tail and small.
+ */
+ if (pos > uart->rx_dma_buf.tail ||
+ uart->rx_dma_nrows < (uart->rx_dma_buf.tail/DMA_RX_XCOUNT)) {
uart->rx_dma_buf.head = pos;
bfin_serial_dma_rx_chars(uart);
uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
}
- spin_unlock_irqrestore(&uart->port.lock, flags);
+ spin_unlock_bh(&uart->port.lock);
+ dma_enable_irq(uart->rx_dma_channel);
mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES);
}
@@ -514,6 +534,11 @@ static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id)
if (!(get_dma_curr_irqstat(uart->tx_dma_channel)&DMA_RUN)) {
disable_dma(uart->tx_dma_channel);
clear_dma_irqstat(uart->tx_dma_channel);
+ /* Anomaly notes:
+ * 05000215 - we always clear ETBEI within last UART TX
+ * interrupt to end a string. It is always set
+ * when start a new tx.
+ */
UART_CLEAR_IER(uart, ETBEI);
xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1);
uart->port.icount.tx += uart->tx_count;
@@ -532,11 +557,26 @@ static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id)
{
struct bfin_serial_port *uart = dev_id;
unsigned short irqstat;
+ int x_pos, pos;
spin_lock(&uart->port.lock);
irqstat = get_dma_curr_irqstat(uart->rx_dma_channel);
clear_dma_irqstat(uart->rx_dma_channel);
- bfin_serial_dma_rx_chars(uart);
+
+ uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel);
+ x_pos = get_dma_curr_xcount(uart->rx_dma_channel);
+ uart->rx_dma_nrows = DMA_RX_YCOUNT - uart->rx_dma_nrows;
+ if (uart->rx_dma_nrows == DMA_RX_YCOUNT || x_pos == 0)
+ uart->rx_dma_nrows = 0;
+
+ pos = uart->rx_dma_nrows * DMA_RX_XCOUNT;
+ if (pos > uart->rx_dma_buf.tail ||
+ uart->rx_dma_nrows < (uart->rx_dma_buf.tail/DMA_RX_XCOUNT)) {
+ uart->rx_dma_buf.head = pos;
+ bfin_serial_dma_rx_chars(uart);
+ uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
+ }
+
spin_unlock(&uart->port.lock);
return IRQ_HANDLED;
@@ -789,8 +829,16 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios,
__func__);
}
- if (termios->c_cflag & CSTOPB)
- lcr |= STB;
+ /* Anomaly notes:
+ * 05000231 - STOP bit is always set to 1 whatever the user is set.
+ */
+ if (termios->c_cflag & CSTOPB) {
+ if (ANOMALY_05000231)
+ printk(KERN_WARNING "STOP bits other than 1 is not "
+ "supported in case of anomaly 05000231.\n");
+ else
+ lcr |= STB;
+ }
if (termios->c_cflag & PARENB)
lcr |= PEN;
if (!(termios->c_cflag & PARODD))
@@ -940,6 +988,10 @@ static void bfin_serial_reset_irda(struct uart_port *port)
}
#ifdef CONFIG_CONSOLE_POLL
+/* Anomaly notes:
+ * 05000099 - Because we only use THRE in poll_put and DR in poll_get,
+ * losing other bits of UART_LSR is not a problem here.
+ */
static void bfin_serial_poll_put_char(struct uart_port *port, unsigned char chr)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
@@ -1245,12 +1297,17 @@ static __init void early_serial_write(struct console *con, const char *s,
}
}
+/*
+ * This should have a .setup or .early_setup in it, but then things get called
+ * without the command line options, and the baud rate gets messed up - so
+ * don't let the common infrastructure play with things. (see calls to setup
+ * & earlysetup in ./kernel/printk.c:register_console()
+ */
static struct __initdata console bfin_early_serial_console = {
.name = "early_BFuart",
.write = early_serial_write,
.device = uart_console_device,
.flags = CON_PRINTBUFFER,
- .setup = bfin_serial_console_setup,
.index = -1,
.data = &bfin_serial_reg,
};
diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/serial/bfin_sport_uart.c
index 529c0ff7952..34b4ae0fe76 100644
--- a/drivers/serial/bfin_sport_uart.c
+++ b/drivers/serial/bfin_sport_uart.c
@@ -101,15 +101,16 @@ static inline void tx_one_byte(struct sport_uart_port *up, unsigned int value)
{
pr_debug("%s value:%x\n", __func__, value);
/* Place a Start and Stop bit */
- __asm__ volatile (
- "R2 = b#01111111100;\n\t"
- "R3 = b#10000000001;\n\t"
- "%0 <<= 2;\n\t"
- "%0 = %0 & R2;\n\t"
- "%0 = %0 | R3;\n\t"
- :"=r"(value)
- :"0"(value)
- :"R2", "R3");
+ __asm__ __volatile__ (
+ "R2 = b#01111111100;"
+ "R3 = b#10000000001;"
+ "%0 <<= 2;"
+ "%0 = %0 & R2;"
+ "%0 = %0 | R3;"
+ : "=d"(value)
+ : "d"(value)
+ : "ASTAT", "R2", "R3"
+ );
pr_debug("%s value:%x\n", __func__, value);
SPORT_PUT_TX(up, value);
@@ -118,27 +119,30 @@ static inline void tx_one_byte(struct sport_uart_port *up, unsigned int value)
static inline unsigned int rx_one_byte(struct sport_uart_port *up)
{
unsigned int value, extract;
+ u32 tmp_mask1, tmp_mask2, tmp_shift, tmp;
value = SPORT_GET_RX32(up);
pr_debug("%s value:%x\n", __func__, value);
/* Extract 8 bits data */
- __asm__ volatile (
- "R5 = 0;\n\t"
- "P0 = 8;\n\t"
- "R1 = 0x1801(Z);\n\t"
- "R3 = 0x0300(Z);\n\t"
- "R4 = 0;\n\t"
- "LSETUP(loop_s, loop_e) LC0 = P0;\nloop_s:\t"
- "R2 = extract(%1, R1.L)(Z);\n\t"
- "R2 <<= R4;\n\t"
- "R5 = R5 | R2;\n\t"
- "R1 = R1 - R3;\nloop_e:\t"
- "R4 += 1;\n\t"
- "%0 = R5;\n\t"
- :"=r"(extract)
- :"r"(value)
- :"P0", "R1", "R2","R3","R4", "R5");
+ __asm__ __volatile__ (
+ "%[extr] = 0;"
+ "%[mask1] = 0x1801(Z);"
+ "%[mask2] = 0x0300(Z);"
+ "%[shift] = 0;"
+ "LSETUP(.Lloop_s, .Lloop_e) LC0 = %[lc];"
+ ".Lloop_s:"
+ "%[tmp] = extract(%[val], %[mask1].L)(Z);"
+ "%[tmp] <<= %[shift];"
+ "%[extr] = %[extr] | %[tmp];"
+ "%[mask1] = %[mask1] - %[mask2];"
+ ".Lloop_e:"
+ "%[shift] += 1;"
+ : [val]"=d"(value), [extr]"=d"(extract), [shift]"=d"(tmp_shift), [tmp]"=d"(tmp),
+ [mask1]"=d"(tmp_mask1), [mask2]"=d"(tmp_mask2)
+ : "d"(value), [lc]"a"(8)
+ : "ASTAT", "LB0", "LC0", "LT0"
+ );
pr_debug(" extract:%x\n", extract);
return extract;
@@ -149,7 +153,7 @@ static int sport_uart_setup(struct sport_uart_port *up, int sclk, int baud_rate)
int tclkdiv, tfsdiv, rclkdiv;
/* Set TCR1 and TCR2 */
- SPORT_PUT_TCR1(up, (LTFS | ITFS | TFSR | TLSBIT | ITCLK));
+ SPORT_PUT_TCR1(up, (LATFS | ITFS | TFSR | TLSBIT | ITCLK));
SPORT_PUT_TCR2(up, 10);
pr_debug("%s TCR1:%x, TCR2:%x\n", __func__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up));
@@ -419,7 +423,7 @@ static void sport_shutdown(struct uart_port *port)
}
static void sport_set_termios(struct uart_port *port,
- struct termios *termios, struct termios *old)
+ struct ktermios *termios, struct ktermios *old)
{
pr_debug("%s enter, c_cflag:%08x\n", __func__, termios->c_cflag);
uart_update_timeout(port, CS8 ,port->uartclk);
diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c
index 6579e2be1dd..9f2891c2c4a 100644
--- a/drivers/serial/icom.c
+++ b/drivers/serial/icom.c
@@ -137,7 +137,12 @@ static LIST_HEAD(icom_adapter_head);
static spinlock_t icom_lock;
#ifdef ICOM_TRACE
-static inline void trace(struct icom_port *, char *, unsigned long) {};
+static inline void trace(struct icom_port *icom_port, char *trace_pt,
+ unsigned long trace_data)
+{
+ dev_info(&icom_port->adapter->pci_dev->dev, ":%d:%s - %lx\n",
+ icom_port->port, trace_pt, trace_data);
+}
#else
static inline void trace(struct icom_port *icom_port, char *trace_pt, unsigned long trace_data) {};
#endif
@@ -408,7 +413,7 @@ static void load_code(struct icom_port *icom_port)
release_firmware(fw);
/* Set Hardware level */
- if ((icom_port->adapter->version | ADAPTER_V2) == ADAPTER_V2)
+ if (icom_port->adapter->version == ADAPTER_V2)
writeb(V2_HARDWARE, &(icom_port->dram->misc_flags));
/* Start the processor in Adapter */
@@ -861,7 +866,7 @@ static irqreturn_t icom_interrupt(int irq, void *dev_id)
/* find icom_port for this interrupt */
icom_adapter = (struct icom_adapter *) dev_id;
- if ((icom_adapter->version | ADAPTER_V2) == ADAPTER_V2) {
+ if (icom_adapter->version == ADAPTER_V2) {
int_reg = icom_adapter->base_addr + 0x8024;
adapter_interrupts = readl(int_reg);
@@ -1472,8 +1477,8 @@ static void icom_remove_adapter(struct icom_adapter *icom_adapter)
free_irq(icom_adapter->pci_dev->irq, (void *) icom_adapter);
iounmap(icom_adapter->base_addr);
- icom_free_adapter(icom_adapter);
pci_release_regions(icom_adapter->pci_dev);
+ icom_free_adapter(icom_adapter);
}
static void icom_kref_release(struct kref *kref)
@@ -1647,15 +1652,6 @@ static void __exit icom_exit(void)
module_init(icom_init);
module_exit(icom_exit);
-#ifdef ICOM_TRACE
-static inline void trace(struct icom_port *icom_port, char *trace_pt,
- unsigned long trace_data)
-{
- dev_info(&icom_port->adapter->pci_dev->dev, ":%d:%s - %lx\n",
- icom_port->port, trace_pt, trace_data);
-}
-#endif
-
MODULE_AUTHOR("Michael Anderson <mjanders@us.ibm.com>");
MODULE_DESCRIPTION("IBM iSeries Serial IOA driver");
MODULE_SUPPORTED_DEVICE
diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c
index 9f460b175c5..5d7b58f1fe4 100644
--- a/drivers/serial/imx.c
+++ b/drivers/serial/imx.c
@@ -8,6 +8,9 @@
* Author: Sascha Hauer <sascha@saschahauer.de>
* Copyright (C) 2004 Pengutronix
*
+ * Copyright (C) 2009 emlix GmbH
+ * Author: Fabian Godehardt (added IrDA support for iMX)
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -41,6 +44,8 @@
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/rational.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -66,7 +71,7 @@
#define ONEMS 0xb0 /* One Millisecond register */
#define UTS 0xb4 /* UART Test Register */
#endif
-#if defined(CONFIG_ARCH_IMX) || defined(CONFIG_ARCH_MX1)
+#ifdef CONFIG_ARCH_MX1
#define BIPR1 0xb0 /* Incremental Preset Register 1 */
#define BIPR2 0xb4 /* Incremental Preset Register 2 */
#define BIPR3 0xb8 /* Incremental Preset Register 3 */
@@ -96,7 +101,7 @@
#define UCR1_RTSDEN (1<<5) /* RTS delta interrupt enable */
#define UCR1_SNDBRK (1<<4) /* Send break */
#define UCR1_TDMAEN (1<<3) /* Transmitter ready DMA enable */
-#if defined(CONFIG_ARCH_IMX) || defined(CONFIG_ARCH_MX1)
+#ifdef CONFIG_ARCH_MX1
#define UCR1_UARTCLKEN (1<<2) /* UART clock enabled */
#endif
#if defined CONFIG_ARCH_MX3 || defined CONFIG_ARCH_MX2
@@ -127,7 +132,7 @@
#define UCR3_RXDSEN (1<<6) /* Receive status interrupt enable */
#define UCR3_AIRINTEN (1<<5) /* Async IR wake interrupt enable */
#define UCR3_AWAKEN (1<<4) /* Async wake interrupt enable */
-#ifdef CONFIG_ARCH_IMX
+#ifdef CONFIG_ARCH_MX1
#define UCR3_REF25 (1<<3) /* Ref freq 25 MHz, only on mx1 */
#define UCR3_REF30 (1<<2) /* Ref Freq 30 MHz, only on mx1 */
#endif
@@ -148,6 +153,7 @@
#define UCR4_DREN (1<<0) /* Recv data ready interrupt enable */
#define UFCR_RXTL_SHF 0 /* Receiver trigger level shift */
#define UFCR_RFDIV (7<<7) /* Reference freq divider mask */
+#define UFCR_RFDIV_REG(x) (((x) < 7 ? 6 - (x) : 6) << 7)
#define UFCR_TXTL_SHF 10 /* Transmitter trigger level shift */
#define USR1_PARITYERR (1<<15) /* Parity error interrupt flag */
#define USR1_RTSS (1<<14) /* RTS pin status */
@@ -180,13 +186,6 @@
#define UTS_SOFTRST (1<<0) /* Software reset */
/* We've been assigned a range on the "Low-density serial ports" major */
-#ifdef CONFIG_ARCH_IMX
-#define SERIAL_IMX_MAJOR 204
-#define MINOR_START 41
-#define DEV_NAME "ttySMX"
-#define MAX_INTERNAL_IRQ IMX_IRQS
-#endif
-
#ifdef CONFIG_ARCH_MXC
#define SERIAL_IMX_MAJOR 207
#define MINOR_START 16
@@ -211,10 +210,20 @@ struct imx_port {
struct timer_list timer;
unsigned int old_status;
int txirq,rxirq,rtsirq;
- int have_rtscts:1;
+ unsigned int have_rtscts:1;
+ unsigned int use_irda:1;
+ unsigned int irda_inv_rx:1;
+ unsigned int irda_inv_tx:1;
+ unsigned short trcv_delay; /* transceiver delay */
struct clk *clk;
};
+#ifdef CONFIG_IRDA
+#define USE_IRDA(sport) ((sport)->use_irda)
+#else
+#define USE_IRDA(sport) (0)
+#endif
+
/*
* Handle any change of modem status signal since we were last called.
*/
@@ -268,6 +277,48 @@ static void imx_stop_tx(struct uart_port *port)
struct imx_port *sport = (struct imx_port *)port;
unsigned long temp;
+ if (USE_IRDA(sport)) {
+ /* half duplex - wait for end of transmission */
+ int n = 256;
+ while ((--n > 0) &&
+ !(readl(sport->port.membase + USR2) & USR2_TXDC)) {
+ udelay(5);
+ barrier();
+ }
+ /*
+ * irda transceiver - wait a bit more to avoid
+ * cutoff, hardware dependent
+ */
+ udelay(sport->trcv_delay);
+
+ /*
+ * half duplex - reactivate receive mode,
+ * flush receive pipe echo crap
+ */
+ if (readl(sport->port.membase + USR2) & USR2_TXDC) {
+ temp = readl(sport->port.membase + UCR1);
+ temp &= ~(UCR1_TXMPTYEN | UCR1_TRDYEN);
+ writel(temp, sport->port.membase + UCR1);
+
+ temp = readl(sport->port.membase + UCR4);
+ temp &= ~(UCR4_TCEN);
+ writel(temp, sport->port.membase + UCR4);
+
+ while (readl(sport->port.membase + URXD0) &
+ URXD_CHARRDY)
+ barrier();
+
+ temp = readl(sport->port.membase + UCR1);
+ temp |= UCR1_RRDYEN;
+ writel(temp, sport->port.membase + UCR1);
+
+ temp = readl(sport->port.membase + UCR4);
+ temp |= UCR4_DREN;
+ writel(temp, sport->port.membase + UCR4);
+ }
+ return;
+ }
+
temp = readl(sport->port.membase + UCR1);
writel(temp & ~UCR1_TXMPTYEN, sport->port.membase + UCR1);
}
@@ -302,13 +353,15 @@ static inline void imx_transmit_buffer(struct imx_port *sport)
/* send xmit->buf[xmit->tail]
* out the port here */
writel(xmit->buf[xmit->tail], sport->port.membase + URTX0);
- xmit->tail = (xmit->tail + 1) &
- (UART_XMIT_SIZE - 1);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
sport->port.icount.tx++;
if (uart_circ_empty(xmit))
break;
}
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&sport->port);
+
if (uart_circ_empty(xmit))
imx_stop_tx(&sport->port);
}
@@ -321,9 +374,30 @@ static void imx_start_tx(struct uart_port *port)
struct imx_port *sport = (struct imx_port *)port;
unsigned long temp;
+ if (USE_IRDA(sport)) {
+ /* half duplex in IrDA mode; have to disable receive mode */
+ temp = readl(sport->port.membase + UCR4);
+ temp &= ~(UCR4_DREN);
+ writel(temp, sport->port.membase + UCR4);
+
+ temp = readl(sport->port.membase + UCR1);
+ temp &= ~(UCR1_RRDYEN);
+ writel(temp, sport->port.membase + UCR1);
+ }
+
temp = readl(sport->port.membase + UCR1);
writel(temp | UCR1_TXMPTYEN, sport->port.membase + UCR1);
+ if (USE_IRDA(sport)) {
+ temp = readl(sport->port.membase + UCR1);
+ temp |= UCR1_TRDYEN;
+ writel(temp, sport->port.membase + UCR1);
+
+ temp = readl(sport->port.membase + UCR4);
+ temp |= UCR4_TCEN;
+ writel(temp, sport->port.membase + UCR4);
+ }
+
if (readl(sport->port.membase + UTS) & UTS_TXEMPTY)
imx_transmit_buffer(sport);
}
@@ -395,8 +469,7 @@ static irqreturn_t imx_rxint(int irq, void *dev_id)
continue;
}
- if (uart_handle_sysrq_char
- (&sport->port, (unsigned char)rx))
+ if (uart_handle_sysrq_char(&sport->port, (unsigned char)rx))
continue;
if (rx & (URXD_PRERR | URXD_OVRRUN | URXD_FRMERR) ) {
@@ -471,26 +544,26 @@ static unsigned int imx_tx_empty(struct uart_port *port)
*/
static unsigned int imx_get_mctrl(struct uart_port *port)
{
- struct imx_port *sport = (struct imx_port *)port;
- unsigned int tmp = TIOCM_DSR | TIOCM_CAR;
+ struct imx_port *sport = (struct imx_port *)port;
+ unsigned int tmp = TIOCM_DSR | TIOCM_CAR;
- if (readl(sport->port.membase + USR1) & USR1_RTSS)
- tmp |= TIOCM_CTS;
+ if (readl(sport->port.membase + USR1) & USR1_RTSS)
+ tmp |= TIOCM_CTS;
- if (readl(sport->port.membase + UCR2) & UCR2_CTS)
- tmp |= TIOCM_RTS;
+ if (readl(sport->port.membase + UCR2) & UCR2_CTS)
+ tmp |= TIOCM_RTS;
- return tmp;
+ return tmp;
}
static void imx_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
- struct imx_port *sport = (struct imx_port *)port;
+ struct imx_port *sport = (struct imx_port *)port;
unsigned long temp;
temp = readl(sport->port.membase + UCR2) & ~UCR2_CTS;
- if (mctrl & TIOCM_RTS)
+ if (mctrl & TIOCM_RTS)
temp |= UCR2_CTS;
writel(temp, sport->port.membase + UCR2);
@@ -534,12 +607,7 @@ static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode)
if(!ufcr_rfdiv)
ufcr_rfdiv = 1;
- if(ufcr_rfdiv >= 7)
- ufcr_rfdiv = 6;
- else
- ufcr_rfdiv = 6 - ufcr_rfdiv;
-
- val |= UFCR_RFDIV & (ufcr_rfdiv << 7);
+ val |= UFCR_RFDIV_REG(ufcr_rfdiv);
writel(val, sport->port.membase + UFCR);
@@ -558,8 +626,24 @@ static int imx_startup(struct uart_port *port)
* requesting IRQs
*/
temp = readl(sport->port.membase + UCR4);
+
+ if (USE_IRDA(sport))
+ temp |= UCR4_IRSC;
+
writel(temp & ~UCR4_DREN, sport->port.membase + UCR4);
+ if (USE_IRDA(sport)) {
+ /* reset fifo's and state machines */
+ int i = 100;
+ temp = readl(sport->port.membase + UCR2);
+ temp &= ~UCR2_SRST;
+ writel(temp, sport->port.membase + UCR2);
+ while (!(readl(sport->port.membase + UCR2) & UCR2_SRST) &&
+ (--i > 0)) {
+ udelay(1);
+ }
+ }
+
/*
* Allocate the IRQ(s) i.MX1 has three interrupts whereas later
* chips only have one interrupt.
@@ -575,12 +659,16 @@ static int imx_startup(struct uart_port *port)
if (retval)
goto error_out2;
- retval = request_irq(sport->rtsirq, imx_rtsint,
- (sport->rtsirq < MAX_INTERNAL_IRQ) ? 0 :
- IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
- DRIVER_NAME, sport);
- if (retval)
- goto error_out3;
+ /* do not use RTS IRQ on IrDA */
+ if (!USE_IRDA(sport)) {
+ retval = request_irq(sport->rtsirq, imx_rtsint,
+ (sport->rtsirq < MAX_INTERNAL_IRQ) ? 0 :
+ IRQF_TRIGGER_FALLING |
+ IRQF_TRIGGER_RISING,
+ DRIVER_NAME, sport);
+ if (retval)
+ goto error_out3;
+ }
} else {
retval = request_irq(sport->port.irq, imx_int, 0,
DRIVER_NAME, sport);
@@ -597,18 +685,49 @@ static int imx_startup(struct uart_port *port)
temp = readl(sport->port.membase + UCR1);
temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN;
+
+ if (USE_IRDA(sport)) {
+ temp |= UCR1_IREN;
+ temp &= ~(UCR1_RTSDEN);
+ }
+
writel(temp, sport->port.membase + UCR1);
temp = readl(sport->port.membase + UCR2);
temp |= (UCR2_RXEN | UCR2_TXEN);
writel(temp, sport->port.membase + UCR2);
+ if (USE_IRDA(sport)) {
+ /* clear RX-FIFO */
+ int i = 64;
+ while ((--i > 0) &&
+ (readl(sport->port.membase + URXD0) & URXD_CHARRDY)) {
+ barrier();
+ }
+ }
+
#if defined CONFIG_ARCH_MX2 || defined CONFIG_ARCH_MX3
temp = readl(sport->port.membase + UCR3);
temp |= UCR3_RXDMUXSEL;
writel(temp, sport->port.membase + UCR3);
#endif
+ if (USE_IRDA(sport)) {
+ temp = readl(sport->port.membase + UCR4);
+ if (sport->irda_inv_rx)
+ temp |= UCR4_INVR;
+ else
+ temp &= ~(UCR4_INVR);
+ writel(temp | UCR4_DREN, sport->port.membase + UCR4);
+
+ temp = readl(sport->port.membase + UCR3);
+ if (sport->irda_inv_tx)
+ temp |= UCR3_INVT;
+ else
+ temp &= ~(UCR3_INVT);
+ writel(temp, sport->port.membase + UCR3);
+ }
+
/*
* Enable modem status interrupts
*/
@@ -616,6 +735,16 @@ static int imx_startup(struct uart_port *port)
imx_enable_ms(&sport->port);
spin_unlock_irqrestore(&sport->port.lock,flags);
+ if (USE_IRDA(sport)) {
+ struct imxuart_platform_data *pdata;
+ pdata = sport->port.dev->platform_data;
+ sport->irda_inv_rx = pdata->irda_inv_rx;
+ sport->irda_inv_tx = pdata->irda_inv_tx;
+ sport->trcv_delay = pdata->transceiver_delay;
+ if (pdata->irda_enable)
+ pdata->irda_enable(1);
+ }
+
return 0;
error_out3:
@@ -633,6 +762,17 @@ static void imx_shutdown(struct uart_port *port)
struct imx_port *sport = (struct imx_port *)port;
unsigned long temp;
+ temp = readl(sport->port.membase + UCR2);
+ temp &= ~(UCR2_TXEN);
+ writel(temp, sport->port.membase + UCR2);
+
+ if (USE_IRDA(sport)) {
+ struct imxuart_platform_data *pdata;
+ pdata = sport->port.dev->platform_data;
+ if (pdata->irda_enable)
+ pdata->irda_enable(0);
+ }
+
/*
* Stop our timer.
*/
@@ -642,7 +782,8 @@ static void imx_shutdown(struct uart_port *port)
* Free the interrupts
*/
if (sport->txirq > 0) {
- free_irq(sport->rtsirq, sport);
+ if (!USE_IRDA(sport))
+ free_irq(sport->rtsirq, sport);
free_irq(sport->txirq, sport);
free_irq(sport->rxirq, sport);
} else
@@ -654,6 +795,9 @@ static void imx_shutdown(struct uart_port *port)
temp = readl(sport->port.membase + UCR1);
temp &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN);
+ if (USE_IRDA(sport))
+ temp &= ~(UCR1_IREN);
+
writel(temp, sport->port.membase + UCR1);
}
@@ -665,7 +809,9 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
unsigned long flags;
unsigned int ucr2, old_ucr1, old_txrxen, baud, quot;
unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
- unsigned int div, num, denom, ufcr;
+ unsigned int div, ufcr;
+ unsigned long num, denom;
+ uint64_t tdiv64;
/*
* If we don't support modem control lines, don't allow
@@ -761,38 +907,41 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
sport->port.membase + UCR2);
old_txrxen &= (UCR2_TXEN | UCR2_RXEN);
- div = sport->port.uartclk / (baud * 16);
- if (div > 7)
- div = 7;
- if (!div)
+ if (USE_IRDA(sport)) {
+ /*
+ * use maximum available submodule frequency to
+ * avoid missing short pulses due to low sampling rate
+ */
div = 1;
+ } else {
+ div = sport->port.uartclk / (baud * 16);
+ if (div > 7)
+ div = 7;
+ if (!div)
+ div = 1;
+ }
- num = baud;
- denom = port->uartclk / div / 16;
+ rational_best_approximation(16 * div * baud, sport->port.uartclk,
+ 1 << 16, 1 << 16, &num, &denom);
- /* shift num and denom right until they fit into 16 bits */
- while (num > 0x10000 || denom > 0x10000) {
- num >>= 1;
- denom >>= 1;
+ if (port->info && port->info->port.tty) {
+ tdiv64 = sport->port.uartclk;
+ tdiv64 *= num;
+ do_div(tdiv64, denom * 16 * div);
+ tty_encode_baud_rate(sport->port.info->port.tty,
+ (speed_t)tdiv64, (speed_t)tdiv64);
}
- if (num > 0)
- num -= 1;
- if (denom > 0)
- denom -= 1;
- writel(num, sport->port.membase + UBIR);
- writel(denom, sport->port.membase + UBMR);
-
- if (div == 7)
- div = 6; /* 6 in RFDIV means divide by 7 */
- else
- div = 6 - div;
+ num -= 1;
+ denom -= 1;
ufcr = readl(sport->port.membase + UFCR);
- ufcr = (ufcr & (~UFCR_RFDIV)) |
- (div << 7);
+ ufcr = (ufcr & (~UFCR_RFDIV)) | UFCR_RFDIV_REG(div);
writel(ufcr, sport->port.membase + UFCR);
+ writel(num, sport->port.membase + UBIR);
+ writel(denom, sport->port.membase + UBMR);
+
#ifdef ONEMS
writel(sport->port.uartclk / div / 1000, sport->port.membase + ONEMS);
#endif
@@ -1031,6 +1180,8 @@ imx_console_setup(struct console *co, char *options)
if (co->index == -1 || co->index >= ARRAY_SIZE(imx_ports))
co->index = 0;
sport = imx_ports[co->index];
+ if(sport == NULL)
+ return -ENODEV;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
@@ -1070,22 +1221,22 @@ static struct uart_driver imx_reg = {
static int serial_imx_suspend(struct platform_device *dev, pm_message_t state)
{
- struct imx_port *sport = platform_get_drvdata(dev);
+ struct imx_port *sport = platform_get_drvdata(dev);
- if (sport)
- uart_suspend_port(&imx_reg, &sport->port);
+ if (sport)
+ uart_suspend_port(&imx_reg, &sport->port);
- return 0;
+ return 0;
}
static int serial_imx_resume(struct platform_device *dev)
{
- struct imx_port *sport = platform_get_drvdata(dev);
+ struct imx_port *sport = platform_get_drvdata(dev);
- if (sport)
- uart_resume_port(&imx_reg, &sport->port);
+ if (sport)
+ uart_resume_port(&imx_reg, &sport->port);
- return 0;
+ return 0;
}
static int serial_imx_probe(struct platform_device *pdev)
@@ -1141,19 +1292,29 @@ static int serial_imx_probe(struct platform_device *pdev)
imx_ports[pdev->id] = sport;
pdata = pdev->dev.platform_data;
- if(pdata && (pdata->flags & IMXUART_HAVE_RTSCTS))
+ if (pdata && (pdata->flags & IMXUART_HAVE_RTSCTS))
sport->have_rtscts = 1;
+#ifdef CONFIG_IRDA
+ if (pdata && (pdata->flags & IMXUART_IRDA))
+ sport->use_irda = 1;
+#endif
+
if (pdata->init) {
ret = pdata->init(pdev);
if (ret)
goto clkput;
}
- uart_add_one_port(&imx_reg, &sport->port);
+ ret = uart_add_one_port(&imx_reg, &sport->port);
+ if (ret)
+ goto deinit;
platform_set_drvdata(pdev, &sport->port);
return 0;
+deinit:
+ if (pdata->exit)
+ pdata->exit(pdev);
clkput:
clk_put(sport->clk);
clk_disable(sport->clk);
@@ -1191,13 +1352,13 @@ static int serial_imx_remove(struct platform_device *pdev)
}
static struct platform_driver serial_imx_driver = {
- .probe = serial_imx_probe,
- .remove = serial_imx_remove,
+ .probe = serial_imx_probe,
+ .remove = serial_imx_remove,
.suspend = serial_imx_suspend,
.resume = serial_imx_resume,
.driver = {
- .name = "imx-uart",
+ .name = "imx-uart",
.owner = THIS_MODULE,
},
};
diff --git a/drivers/serial/jsm/jsm.h b/drivers/serial/jsm/jsm.h
index c0a3e2734e2..4e5f3bde046 100644
--- a/drivers/serial/jsm/jsm.h
+++ b/drivers/serial/jsm/jsm.h
@@ -61,6 +61,7 @@ enum {
if ((DBG_##nlevel & jsm_debug)) \
dev_printk(KERN_##klevel, pdev->dev, fmt, ## args)
+#define MAXLINES 256
#define MAXPORTS 8
#define MAX_STOPS_SENT 5
diff --git a/drivers/serial/jsm/jsm_tty.c b/drivers/serial/jsm/jsm_tty.c
index 31496dc0a0d..107ce2e187b 100644
--- a/drivers/serial/jsm/jsm_tty.c
+++ b/drivers/serial/jsm/jsm_tty.c
@@ -33,6 +33,8 @@
#include "jsm.h"
+static DECLARE_BITMAP(linemap, MAXLINES);
+
static void jsm_carrier(struct jsm_channel *ch);
static inline int jsm_get_mstat(struct jsm_channel *ch)
@@ -433,6 +435,7 @@ int __devinit jsm_tty_init(struct jsm_board *brd)
int __devinit jsm_uart_port_init(struct jsm_board *brd)
{
int i;
+ unsigned int line;
struct jsm_channel *ch;
if (!brd)
@@ -459,9 +462,15 @@ int __devinit jsm_uart_port_init(struct jsm_board *brd)
brd->channels[i]->uart_port.membase = brd->re_map_membase;
brd->channels[i]->uart_port.fifosize = 16;
brd->channels[i]->uart_port.ops = &jsm_ops;
- brd->channels[i]->uart_port.line = brd->channels[i]->ch_portnum + brd->boardnum * 2;
+ line = find_first_zero_bit(linemap, MAXLINES);
+ if (line >= MAXLINES) {
+ printk(KERN_INFO "jsm: linemap is full, added device failed\n");
+ continue;
+ } else
+ set_bit((int)line, linemap);
+ brd->channels[i]->uart_port.line = line;
if (uart_add_one_port (&jsm_uart_driver, &brd->channels[i]->uart_port))
- printk(KERN_INFO "Added device failed\n");
+ printk(KERN_INFO "jsm: add device failed\n");
else
printk(KERN_INFO "Added device \n");
}
@@ -494,6 +503,7 @@ int jsm_remove_uart_port(struct jsm_board *brd)
ch = brd->channels[i];
+ clear_bit((int)(ch->uart_port.line), linemap);
uart_remove_one_port(&jsm_uart_driver, &brd->channels[i]->uart_port);
}
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index 7f72f8ceaa6..b3feb6198d5 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -988,7 +988,7 @@ mpc52xx_console_setup(struct console *co, char *options)
pr_debug("mpc52xx_console_setup co=%p, co->index=%i, options=%s\n",
co, co->index, options);
- if ((co->index < 0) || (co->index > MPC52xx_PSC_MAXNUM)) {
+ if ((co->index < 0) || (co->index >= MPC52xx_PSC_MAXNUM)) {
pr_debug("PSC%x out of range\n", co->index);
return -EINVAL;
}
diff --git a/drivers/serial/of_serial.c b/drivers/serial/of_serial.c
index 14f8fa9135b..54483cd3529 100644
--- a/drivers/serial/of_serial.c
+++ b/drivers/serial/of_serial.c
@@ -122,7 +122,7 @@ static int __devinit of_platform_serial_probe(struct of_device *ofdev,
info->type = port_type;
info->line = ret;
- ofdev->dev.driver_data = info;
+ dev_set_drvdata(&ofdev->dev, info);
return 0;
out:
kfree(info);
@@ -135,7 +135,7 @@ out:
*/
static int of_platform_serial_remove(struct of_device *ofdev)
{
- struct of_serial_info *info = ofdev->dev.driver_data;
+ struct of_serial_info *info = dev_get_drvdata(&ofdev->dev);
switch (info->type) {
#ifdef CONFIG_SERIAL_8250
case PORT_8250 ... PORT_MAX_8250:
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index 7546aa887fa..79c9c5f5cdb 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -681,7 +681,7 @@ static int serial_config(struct pcmcia_device * link)
u_char *buf;
cisparse_t *parse;
cistpl_cftable_entry_t *cf;
- int i;
+ int i, last_ret, last_fn;
DEBUG(0, "serial_config(0x%p)\n", link);
@@ -699,6 +699,16 @@ static int serial_config(struct pcmcia_device * link)
tuple->TupleDataMax = 255;
tuple->Attributes = 0;
+ /* Get configuration register information */
+ tuple->DesiredTuple = CISTPL_CONFIG;
+ last_ret = first_tuple(link, tuple, parse);
+ if (last_ret != 0) {
+ last_fn = ParseTuple;
+ goto cs_failed;
+ }
+ link->conf.ConfigBase = parse->config.base;
+ link->conf.Present = parse->config.rmask[0];
+
/* Is this a compliant multifunction card? */
tuple->DesiredTuple = CISTPL_LONGLINK_MFC;
tuple->Attributes = TUPLE_RETURN_COMMON | TUPLE_RETURN_LINK;
@@ -761,7 +771,9 @@ static int serial_config(struct pcmcia_device * link)
kfree(cfg_mem);
return 0;
- failed:
+cs_failed:
+ cs_error(link, last_fn, last_ret);
+failed:
serial_remove(link);
kfree(cfg_mem);
return -ENODEV;
@@ -863,10 +875,10 @@ static struct pcmcia_device_id serial_ids[] = {
PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "PCMLM28.cis"),
PCMCIA_MFC_DEVICE_CIS_PROD_ID12(1, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "DP83903.cis"),
PCMCIA_MFC_DEVICE_CIS_PROD_ID4(1, "NSC MF LAN/Modem", 0x58fc6056, "DP83903.cis"),
- PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0556, "3CCFEM556.cis"),
+ PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0556, "cis/3CCFEM556.cis"),
PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0175, 0x0000, "DP83903.cis"),
- PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "3CXEM556.cis"),
- PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "3CXEM556.cis"),
+ PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "cis/3CXEM556.cis"),
+ PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "cis/3CXEM556.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC850", 0xd85f6206, 0x42a2c018, "SW_8xx_SER.cis"), /* Sierra Wireless AC850 3G Network Adapter R1 */
PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0x0710, "SW_7xx_SER.cis"), /* Sierra Wireless AC710/AC750 GPRS Network Adapter R1 */
PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0xa555, "SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- pre update */
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index dbf5357a77b..a4cf1079b31 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -47,12 +47,17 @@
#include <linux/clk.h>
#include <linux/ctype.h>
#include <linux/err.h>
+#include <linux/list.h>
#ifdef CONFIG_SUPERH
#include <asm/clock.h>
#include <asm/sh_bios.h>
#endif
+#ifdef CONFIG_H8300
+#include <asm/gpio.h>
+#endif
+
#include "sh-sci.h"
struct sci_port {
@@ -75,14 +80,22 @@ struct sci_port {
int break_flag;
#ifdef CONFIG_HAVE_CLK
- /* Port clock */
- struct clk *clk;
+ /* Interface clock */
+ struct clk *iclk;
+ /* Data clock */
+ struct clk *dclk;
#endif
+ struct list_head node;
};
-#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
-static struct sci_port *serial_console_port;
+struct sh_sci_priv {
+ spinlock_t lock;
+ struct list_head ports;
+
+#ifdef CONFIG_HAVE_CLK
+ struct notifier_block clk_nb;
#endif
+};
/* Function prototypes */
static void sci_stop_tx(struct uart_port *port);
@@ -138,9 +151,8 @@ static void sci_poll_put_char(struct uart_port *port, unsigned char c)
status = sci_in(port, SCxSR);
} while (!(status & SCxSR_TDxE(port)));
- sci_in(port, SCxSR); /* Dummy read */
- sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port));
sci_out(port, SCxTDR, c);
+ sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port));
}
#endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */
@@ -159,12 +171,12 @@ static void h8300_sci_config(struct uart_port *port, unsigned int ctrl)
*mstpcrl &= ~mask;
}
-static inline void h8300_sci_enable(struct uart_port *port)
+static void h8300_sci_enable(struct uart_port *port)
{
h8300_sci_config(port, sci_enable);
}
-static inline void h8300_sci_disable(struct uart_port *port)
+static void h8300_sci_disable(struct uart_port *port)
{
h8300_sci_config(port, sci_disable);
}
@@ -611,7 +623,7 @@ static inline int sci_handle_breaks(struct uart_port *port)
int copied = 0;
unsigned short status = sci_in(port, SCxSR);
struct tty_struct *tty = port->info->port.tty;
- struct sci_port *s = &sci_ports[port->line];
+ struct sci_port *s = to_sci_port(port);
if (uart_handle_break(port))
return 0;
@@ -726,19 +738,43 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
static int sci_notifier(struct notifier_block *self,
unsigned long phase, void *p)
{
- int i;
+ struct sh_sci_priv *priv = container_of(self,
+ struct sh_sci_priv, clk_nb);
+ struct sci_port *sci_port;
+ unsigned long flags;
if ((phase == CPUFREQ_POSTCHANGE) ||
- (phase == CPUFREQ_RESUMECHANGE))
- for (i = 0; i < SCI_NPORTS; i++) {
- struct sci_port *s = &sci_ports[i];
- s->port.uartclk = clk_get_rate(s->clk);
- }
+ (phase == CPUFREQ_RESUMECHANGE)) {
+ spin_lock_irqsave(&priv->lock, flags);
+ list_for_each_entry(sci_port, &priv->ports, node)
+ sci_port->port.uartclk = clk_get_rate(sci_port->dclk);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
return NOTIFY_OK;
}
-static struct notifier_block sci_nb = { &sci_notifier, NULL, 0 };
+static void sci_clk_enable(struct uart_port *port)
+{
+ struct sci_port *sci_port = to_sci_port(port);
+
+ clk_enable(sci_port->dclk);
+ sci_port->port.uartclk = clk_get_rate(sci_port->dclk);
+
+ if (sci_port->iclk)
+ clk_enable(sci_port->iclk);
+}
+
+static void sci_clk_disable(struct uart_port *port)
+{
+ struct sci_port *sci_port = to_sci_port(port);
+
+ if (sci_port->iclk)
+ clk_disable(sci_port->iclk);
+
+ clk_disable(sci_port->dclk);
+}
#endif
static int sci_request_irq(struct sci_port *port)
@@ -865,15 +901,11 @@ static void sci_break_ctl(struct uart_port *port, int break_state)
static int sci_startup(struct uart_port *port)
{
- struct sci_port *s = &sci_ports[port->line];
+ struct sci_port *s = to_sci_port(port);
if (s->enable)
s->enable(port);
-#ifdef CONFIG_HAVE_CLK
- s->clk = clk_get(NULL, "module_clk");
-#endif
-
sci_request_irq(s);
sci_start_tx(port);
sci_start_rx(port, 1);
@@ -883,7 +915,7 @@ static int sci_startup(struct uart_port *port)
static void sci_shutdown(struct uart_port *port)
{
- struct sci_port *s = &sci_ports[port->line];
+ struct sci_port *s = to_sci_port(port);
sci_stop_rx(port);
sci_stop_tx(port);
@@ -891,11 +923,6 @@ static void sci_shutdown(struct uart_port *port)
if (s->disable)
s->disable(port);
-
-#ifdef CONFIG_HAVE_CLK
- clk_put(s->clk);
- s->clk = NULL;
-#endif
}
static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
@@ -980,25 +1007,31 @@ static int sci_request_port(struct uart_port *port)
static void sci_config_port(struct uart_port *port, int flags)
{
- struct sci_port *s = &sci_ports[port->line];
+ struct sci_port *s = to_sci_port(port);
port->type = s->type;
- if (port->flags & UPF_IOREMAP && !port->membase) {
-#if defined(CONFIG_SUPERH64)
- port->mapbase = onchip_remap(SCIF_ADDR_SH5, 1024, "SCIF");
- port->membase = (void __iomem *)port->mapbase;
-#else
+ if (port->membase)
+ return;
+
+ if (port->flags & UPF_IOREMAP) {
port->membase = ioremap_nocache(port->mapbase, 0x40);
-#endif
- dev_err(port->dev, "can't remap port#%d\n", port->line);
+ if (IS_ERR(port->membase))
+ dev_err(port->dev, "can't remap port#%d\n", port->line);
+ } else {
+ /*
+ * For the simple (and majority of) cases where we don't
+ * need to do any remapping, just cast the cookie
+ * directly.
+ */
+ port->membase = (void __iomem *)port->mapbase;
}
}
static int sci_verify_port(struct uart_port *port, struct serial_struct *ser)
{
- struct sci_port *s = &sci_ports[port->line];
+ struct sci_port *s = to_sci_port(port);
if (ser->irq != s->irqs[SCIx_TXI_IRQ] || ser->irq > nr_irqs)
return -EINVAL;
@@ -1032,63 +1065,60 @@ static struct uart_ops sci_uart_ops = {
#endif
};
-static void __init sci_init_ports(void)
+static void __devinit sci_init_single(struct platform_device *dev,
+ struct sci_port *sci_port,
+ unsigned int index,
+ struct plat_sci_port *p)
{
- static int first = 1;
- int i;
-
- if (!first)
- return;
-
- first = 0;
-
- for (i = 0; i < SCI_NPORTS; i++) {
- sci_ports[i].port.ops = &sci_uart_ops;
- sci_ports[i].port.iotype = UPIO_MEM;
- sci_ports[i].port.line = i;
- sci_ports[i].port.fifosize = 1;
+ sci_port->port.ops = &sci_uart_ops;
+ sci_port->port.iotype = UPIO_MEM;
+ sci_port->port.line = index;
+ sci_port->port.fifosize = 1;
#if defined(__H8300H__) || defined(__H8300S__)
#ifdef __H8300S__
- sci_ports[i].enable = h8300_sci_enable;
- sci_ports[i].disable = h8300_sci_disable;
+ sci_port->enable = h8300_sci_enable;
+ sci_port->disable = h8300_sci_disable;
#endif
- sci_ports[i].port.uartclk = CONFIG_CPU_CLOCK;
+ sci_port->port.uartclk = CONFIG_CPU_CLOCK;
#elif defined(CONFIG_HAVE_CLK)
- /*
- * XXX: We should use a proper SCI/SCIF clock
- */
- {
- struct clk *clk = clk_get(NULL, "module_clk");
- sci_ports[i].port.uartclk = clk_get_rate(clk);
- clk_put(clk);
- }
+ sci_port->iclk = p->clk ? clk_get(&dev->dev, p->clk) : NULL;
+ sci_port->dclk = clk_get(&dev->dev, "peripheral_clk");
+ sci_port->enable = sci_clk_enable;
+ sci_port->disable = sci_clk_disable;
#else
#error "Need a valid uartclk"
#endif
- sci_ports[i].break_timer.data = (unsigned long)&sci_ports[i];
- sci_ports[i].break_timer.function = sci_break_timer;
+ sci_port->break_timer.data = (unsigned long)sci_port;
+ sci_port->break_timer.function = sci_break_timer;
+ init_timer(&sci_port->break_timer);
- init_timer(&sci_ports[i].break_timer);
- }
-}
-
-int __init early_sci_setup(struct uart_port *port)
-{
- if (unlikely(port->line > SCI_NPORTS))
- return -ENODEV;
+ sci_port->port.mapbase = p->mapbase;
+ sci_port->port.membase = p->membase;
- sci_init_ports();
+ sci_port->port.irq = p->irqs[SCIx_TXI_IRQ];
+ sci_port->port.flags = p->flags;
+ sci_port->port.dev = &dev->dev;
+ sci_port->type = sci_port->port.type = p->type;
- sci_ports[port->line].port.membase = port->membase;
- sci_ports[port->line].port.mapbase = port->mapbase;
- sci_ports[port->line].port.type = port->type;
+ memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs));
- return 0;
}
#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
+static struct tty_driver *serial_console_device(struct console *co, int *index)
+{
+ struct uart_driver *p = &sci_uart_driver;
+ *index = co->index;
+ return p->tty_driver;
+}
+
+static void serial_console_putchar(struct uart_port *port, int ch)
+{
+ sci_poll_put_char(port, ch);
+}
+
/*
* Print a string to the serial port trying not to disturb
* any possible real use of the port...
@@ -1096,25 +1126,27 @@ int __init early_sci_setup(struct uart_port *port)
static void serial_console_write(struct console *co, const char *s,
unsigned count)
{
- struct uart_port *port = &serial_console_port->port;
+ struct uart_port *port = co->data;
+ struct sci_port *sci_port = to_sci_port(port);
unsigned short bits;
- int i;
- for (i = 0; i < count; i++) {
- if (*s == 10)
- sci_poll_put_char(port, '\r');
+ if (sci_port->enable)
+ sci_port->enable(port);
- sci_poll_put_char(port, *s++);
- }
+ uart_console_write(port, s, count, serial_console_putchar);
/* wait until fifo is empty and last bit has been transmitted */
bits = SCxSR_TDxE(port) | SCxSR_TEND(port);
while ((sci_in(port, SCxSR) & bits) != bits)
cpu_relax();
+
+ if (sci_port->disable);
+ sci_port->disable(port);
}
static int __init serial_console_setup(struct console *co, char *options)
{
+ struct sci_port *sci_port;
struct uart_port *port;
int baud = 115200;
int bits = 8;
@@ -1130,8 +1162,9 @@ static int __init serial_console_setup(struct console *co, char *options)
if (co->index >= SCI_NPORTS)
co->index = 0;
- serial_console_port = &sci_ports[co->index];
- port = &serial_console_port->port;
+ sci_port = &sci_ports[co->index];
+ port = &sci_port->port;
+ co->data = port;
/*
* Also need to check port->type, we don't actually have any
@@ -1141,21 +1174,11 @@ static int __init serial_console_setup(struct console *co, char *options)
*/
if (!port->type)
return -ENODEV;
- if (!port->membase || !port->mapbase)
- return -ENODEV;
-
- port->type = serial_console_port->type;
-
-#ifdef CONFIG_HAVE_CLK
- if (!serial_console_port->clk)
- serial_console_port->clk = clk_get(NULL, "module_clk");
-#endif
- if (port->flags & UPF_IOREMAP)
- sci_config_port(port, 0);
+ sci_config_port(port, 0);
- if (serial_console_port->enable)
- serial_console_port->enable(port);
+ if (sci_port->enable)
+ sci_port->enable(port);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
@@ -1166,22 +1189,21 @@ static int __init serial_console_setup(struct console *co, char *options)
if (ret == 0)
sci_stop_rx(port);
#endif
+ /* TODO: disable clock */
return ret;
}
static struct console serial_console = {
.name = "ttySC",
- .device = uart_console_device,
+ .device = serial_console_device,
.write = serial_console_write,
.setup = serial_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
- .data = &sci_uart_driver,
};
static int __init sci_console_init(void)
{
- sci_init_ports();
register_console(&serial_console);
return 0;
}
@@ -1207,6 +1229,61 @@ static struct uart_driver sci_uart_driver = {
.cons = SCI_CONSOLE,
};
+
+static int sci_remove(struct platform_device *dev)
+{
+ struct sh_sci_priv *priv = platform_get_drvdata(dev);
+ struct sci_port *p;
+ unsigned long flags;
+
+#ifdef CONFIG_HAVE_CLK
+ cpufreq_unregister_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER);
+#endif
+
+ spin_lock_irqsave(&priv->lock, flags);
+ list_for_each_entry(p, &priv->ports, node)
+ uart_remove_one_port(&sci_uart_driver, &p->port);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ kfree(priv);
+ return 0;
+}
+
+static int __devinit sci_probe_single(struct platform_device *dev,
+ unsigned int index,
+ struct plat_sci_port *p,
+ struct sci_port *sciport)
+{
+ struct sh_sci_priv *priv = platform_get_drvdata(dev);
+ unsigned long flags;
+ int ret;
+
+ /* Sanity check */
+ if (unlikely(index >= SCI_NPORTS)) {
+ dev_notice(&dev->dev, "Attempting to register port "
+ "%d when only %d are available.\n",
+ index+1, SCI_NPORTS);
+ dev_notice(&dev->dev, "Consider bumping "
+ "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
+ return 0;
+ }
+
+ sci_init_single(dev, sciport, index, p);
+
+ ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
+ if (ret)
+ return ret;
+
+ INIT_LIST_HEAD(&sciport->node);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ list_add(&sciport->node, &priv->ports);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
/*
* Register a set of serial devices attached to a platform device. The
* list is terminated with a zero flags entry, which means we expect
@@ -1216,57 +1293,34 @@ static struct uart_driver sci_uart_driver = {
static int __devinit sci_probe(struct platform_device *dev)
{
struct plat_sci_port *p = dev->dev.platform_data;
+ struct sh_sci_priv *priv;
int i, ret = -EINVAL;
- for (i = 0; p && p->flags != 0; p++, i++) {
- struct sci_port *sciport = &sci_ports[i];
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
- /* Sanity check */
- if (unlikely(i == SCI_NPORTS)) {
- dev_notice(&dev->dev, "Attempting to register port "
- "%d when only %d are available.\n",
- i+1, SCI_NPORTS);
- dev_notice(&dev->dev, "Consider bumping "
- "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
- break;
- }
+ INIT_LIST_HEAD(&priv->ports);
+ spin_lock_init(&priv->lock);
+ platform_set_drvdata(dev, priv);
- sciport->port.mapbase = p->mapbase;
+#ifdef CONFIG_HAVE_CLK
+ priv->clk_nb.notifier_call = sci_notifier;
+ cpufreq_register_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER);
+#endif
- if (p->mapbase && !p->membase) {
- if (p->flags & UPF_IOREMAP) {
- p->membase = ioremap_nocache(p->mapbase, 0x40);
- if (IS_ERR(p->membase)) {
- ret = PTR_ERR(p->membase);
- goto err_unreg;
- }
- } else {
- /*
- * For the simple (and majority of) cases
- * where we don't need to do any remapping,
- * just cast the cookie directly.
- */
- p->membase = (void __iomem *)p->mapbase;
- }
+ if (dev->id != -1) {
+ ret = sci_probe_single(dev, dev->id, p, &sci_ports[dev->id]);
+ if (ret)
+ goto err_unreg;
+ } else {
+ for (i = 0; p && p->flags != 0; p++, i++) {
+ ret = sci_probe_single(dev, i, p, &sci_ports[i]);
+ if (ret)
+ goto err_unreg;
}
-
- sciport->port.membase = p->membase;
-
- sciport->port.irq = p->irqs[SCIx_TXI_IRQ];
- sciport->port.flags = p->flags;
- sciport->port.dev = &dev->dev;
-
- sciport->type = sciport->port.type = p->type;
-
- memcpy(&sciport->irqs, &p->irqs, sizeof(p->irqs));
-
- uart_add_one_port(&sci_uart_driver, &sciport->port);
}
-#ifdef CONFIG_HAVE_CLK
- cpufreq_register_notifier(&sci_nb, CPUFREQ_TRANSITION_NOTIFIER);
-#endif
-
#ifdef CONFIG_SH_STANDARD_BIOS
sh_bios_gdb_detach();
#endif
@@ -1274,50 +1328,36 @@ static int __devinit sci_probe(struct platform_device *dev)
return 0;
err_unreg:
- for (i = i - 1; i >= 0; i--)
- uart_remove_one_port(&sci_uart_driver, &sci_ports[i].port);
-
+ sci_remove(dev);
return ret;
}
-static int __devexit sci_remove(struct platform_device *dev)
-{
- int i;
-
-#ifdef CONFIG_HAVE_CLK
- cpufreq_unregister_notifier(&sci_nb, CPUFREQ_TRANSITION_NOTIFIER);
-#endif
-
- for (i = 0; i < SCI_NPORTS; i++)
- uart_remove_one_port(&sci_uart_driver, &sci_ports[i].port);
-
- return 0;
-}
-
static int sci_suspend(struct platform_device *dev, pm_message_t state)
{
- int i;
+ struct sh_sci_priv *priv = platform_get_drvdata(dev);
+ struct sci_port *p;
+ unsigned long flags;
- for (i = 0; i < SCI_NPORTS; i++) {
- struct sci_port *p = &sci_ports[i];
+ spin_lock_irqsave(&priv->lock, flags);
+ list_for_each_entry(p, &priv->ports, node)
+ uart_suspend_port(&sci_uart_driver, &p->port);
- if (p->type != PORT_UNKNOWN && p->port.dev == &dev->dev)
- uart_suspend_port(&sci_uart_driver, &p->port);
- }
+ spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
static int sci_resume(struct platform_device *dev)
{
- int i;
+ struct sh_sci_priv *priv = platform_get_drvdata(dev);
+ struct sci_port *p;
+ unsigned long flags;
- for (i = 0; i < SCI_NPORTS; i++) {
- struct sci_port *p = &sci_ports[i];
+ spin_lock_irqsave(&priv->lock, flags);
+ list_for_each_entry(p, &priv->ports, node)
+ uart_resume_port(&sci_uart_driver, &p->port);
- if (p->type != PORT_UNKNOWN && p->port.dev == &dev->dev)
- uart_resume_port(&sci_uart_driver, &p->port);
- }
+ spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
@@ -1339,8 +1379,6 @@ static int __init sci_init(void)
printk(banner);
- sci_init_ports();
-
ret = uart_register_driver(&sci_uart_driver);
if (likely(ret == 0)) {
ret = platform_driver_register(&sci_driver);
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h
index d0aa82d7fce..38072c15b84 100644
--- a/drivers/serial/sh-sci.h
+++ b/drivers/serial/sh-sci.h
@@ -91,6 +91,9 @@
# define SCSPTR5 0xa4050128
# define SCIF_ORER 0x0001 /* overrun error bit */
# define SCSCR_INIT(port) 0x0038 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
+#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
+# define SCIF_ORER 0x0001 /* overrun error bit */
+# define SCSCR_INIT(port) 0x0038 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
#elif defined(CONFIG_CPU_SUBTYPE_SH4_202)
# define SCSPTR2 0xffe80020 /* 16 bit SCIF */
# define SCIF_ORER 0x0001 /* overrun error bit */
@@ -314,7 +317,18 @@
} \
}
-#define CPU_SCIF_FNS(name, scif_offset, scif_size) \
+#ifdef CONFIG_H8300
+/* h8300 don't have SCIF */
+#define CPU_SCIF_FNS(name) \
+ static inline unsigned int sci_##name##_in(struct uart_port *port) \
+ { \
+ return 0; \
+ } \
+ static inline void sci_##name##_out(struct uart_port *port, unsigned int value) \
+ { \
+ }
+#else
+#define CPU_SCIF_FNS(name, scif_offset, scif_size) \
static inline unsigned int sci_##name##_in(struct uart_port *port) \
{ \
SCI_IN(scif_size, scif_offset); \
@@ -323,6 +337,7 @@
{ \
SCI_OUT(scif_size, scif_offset, value); \
}
+#endif
#define CPU_SCI_FNS(name, sci_offset, sci_size) \
static inline unsigned int sci_##name##_in(struct uart_port* port) \
@@ -360,8 +375,10 @@
sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \
h8_sci_offset, h8_sci_size) \
CPU_SCI_FNS(name, h8_sci_offset, h8_sci_size)
-#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size)
-#elif defined(CONFIG_CPU_SUBTYPE_SH7723)
+#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \
+ CPU_SCIF_FNS(name)
+#elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\
+ defined(CONFIG_CPU_SUBTYPE_SH7724)
#define SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scif_offset, sh4_scif_size) \
CPU_SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scif_offset, sh4_scif_size)
#define SCIF_FNS(name, sh4_scif_offset, sh4_scif_size) \
@@ -390,7 +407,8 @@ SCIF_FNS(SCFDR, 0x1c, 16)
SCIF_FNS(SCxTDR, 0x20, 8)
SCIF_FNS(SCxRDR, 0x24, 8)
SCIF_FNS(SCLSR, 0x24, 16)
-#elif defined(CONFIG_CPU_SUBTYPE_SH7723)
+#elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\
+ defined(CONFIG_CPU_SUBTYPE_SH7724)
SCIx_FNS(SCSMR, 0x00, 16, 0x00, 16)
SCIx_FNS(SCBRR, 0x04, 8, 0x04, 8)
SCIx_FNS(SCSCR, 0x08, 16, 0x08, 16)
@@ -604,10 +622,21 @@ static inline int sci_rxd_in(struct uart_port *port)
return ctrl_inb(SCSPTR5) & 0x0008 ? 1 : 0; /* SCIF5 */
return 1;
}
+#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
+# define SCFSR 0x0010
+# define SCASSR 0x0014
+static inline int sci_rxd_in(struct uart_port *port)
+{
+ if (port->type == PORT_SCIF)
+ return ctrl_inw((port->mapbase + SCFSR)) & SCIF_BRK ? 1 : 0;
+ if (port->type == PORT_SCIFA)
+ return ctrl_inw((port->mapbase + SCASSR)) & SCIF_BRK ? 1 : 0;
+ return 1;
+}
#elif defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103)
static inline int sci_rxd_in(struct uart_port *port)
{
- return sci_in(port, SCSPTR2)&0x0001 ? 1 : 0; /* SCIF */
+ return sci_in(port, SCSPTR)&0x0001 ? 1 : 0; /* SCIF */
}
#elif defined(__H8300H__) || defined(__H8300S__)
static inline int sci_rxd_in(struct uart_port *port)
@@ -757,7 +786,8 @@ static inline int sci_rxd_in(struct uart_port *port)
defined(CONFIG_CPU_SUBTYPE_SH7720) || \
defined(CONFIG_CPU_SUBTYPE_SH7721)
#define SCBRR_VALUE(bps, clk) (((clk*2)+16*bps)/(32*bps)-1)
-#elif defined(CONFIG_CPU_SUBTYPE_SH7723)
+#elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\
+ defined(CONFIG_CPU_SUBTYPE_SH7724)
static inline int scbrr_calc(struct uart_port *port, int bps, int clk)
{
if (port->type == PORT_SCIF)
diff --git a/drivers/serial/timbuart.c b/drivers/serial/timbuart.c
new file mode 100644
index 00000000000..ac9e5d5f742
--- /dev/null
+++ b/drivers/serial/timbuart.c
@@ -0,0 +1,526 @@
+/*
+ * timbuart.c timberdale FPGA UART driver
+ * Copyright (c) 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* Supports:
+ * Timberdale FPGA UART
+ */
+
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/serial_core.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/ioport.h>
+
+#include "timbuart.h"
+
+struct timbuart_port {
+ struct uart_port port;
+ struct tasklet_struct tasklet;
+ int usedma;
+ u8 last_ier;
+ struct platform_device *dev;
+};
+
+static int baudrates[] = {9600, 19200, 38400, 57600, 115200, 230400, 460800,
+ 921600, 1843200, 3250000};
+
+static void timbuart_mctrl_check(struct uart_port *port, u8 isr, u8 *ier);
+
+static irqreturn_t timbuart_handleinterrupt(int irq, void *devid);
+
+static void timbuart_stop_rx(struct uart_port *port)
+{
+ /* spin lock held by upper layer, disable all RX interrupts */
+ u8 ier = ioread8(port->membase + TIMBUART_IER) & ~RXFLAGS;
+ iowrite8(ier, port->membase + TIMBUART_IER);
+}
+
+static void timbuart_stop_tx(struct uart_port *port)
+{
+ /* spinlock held by upper layer, disable TX interrupt */
+ u8 ier = ioread8(port->membase + TIMBUART_IER) & ~TXBAE;
+ iowrite8(ier, port->membase + TIMBUART_IER);
+}
+
+static void timbuart_start_tx(struct uart_port *port)
+{
+ struct timbuart_port *uart =
+ container_of(port, struct timbuart_port, port);
+
+ /* do not transfer anything here -> fire off the tasklet */
+ tasklet_schedule(&uart->tasklet);
+}
+
+static void timbuart_flush_buffer(struct uart_port *port)
+{
+ u8 ctl = ioread8(port->membase + TIMBUART_CTRL) | TIMBUART_CTRL_FLSHTX;
+
+ iowrite8(ctl, port->membase + TIMBUART_CTRL);
+ iowrite8(TXBF, port->membase + TIMBUART_ISR);
+}
+
+static void timbuart_rx_chars(struct uart_port *port)
+{
+ struct tty_struct *tty = port->info->port.tty;
+
+ while (ioread8(port->membase + TIMBUART_ISR) & RXDP) {
+ u8 ch = ioread8(port->membase + TIMBUART_RXFIFO);
+ port->icount.rx++;
+ tty_insert_flip_char(tty, ch, TTY_NORMAL);
+ }
+
+ spin_unlock(&port->lock);
+ tty_flip_buffer_push(port->info->port.tty);
+ spin_lock(&port->lock);
+
+ dev_dbg(port->dev, "%s - total read %d bytes\n",
+ __func__, port->icount.rx);
+}
+
+static void timbuart_tx_chars(struct uart_port *port)
+{
+ struct circ_buf *xmit = &port->info->xmit;
+
+ while (!(ioread8(port->membase + TIMBUART_ISR) & TXBF) &&
+ !uart_circ_empty(xmit)) {
+ iowrite8(xmit->buf[xmit->tail],
+ port->membase + TIMBUART_TXFIFO);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ }
+
+ dev_dbg(port->dev,
+ "%s - total written %d bytes, CTL: %x, RTS: %x, baud: %x\n",
+ __func__,
+ port->icount.tx,
+ ioread8(port->membase + TIMBUART_CTRL),
+ port->mctrl & TIOCM_RTS,
+ ioread8(port->membase + TIMBUART_BAUDRATE));
+}
+
+static void timbuart_handle_tx_port(struct uart_port *port, u8 isr, u8 *ier)
+{
+ struct timbuart_port *uart =
+ container_of(port, struct timbuart_port, port);
+ struct circ_buf *xmit = &port->info->xmit;
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port))
+ return;
+
+ if (port->x_char)
+ return;
+
+ if (isr & TXFLAGS) {
+ timbuart_tx_chars(port);
+ /* clear all TX interrupts */
+ iowrite8(TXFLAGS, port->membase + TIMBUART_ISR);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+ } else
+ /* Re-enable any tx interrupt */
+ *ier |= uart->last_ier & TXFLAGS;
+
+ /* enable interrupts if there are chars in the transmit buffer,
+ * Or if we delivered some bytes and want the almost empty interrupt
+ * we wake up the upper layer later when we got the interrupt
+ * to give it some time to go out...
+ */
+ if (!uart_circ_empty(xmit))
+ *ier |= TXBAE;
+
+ dev_dbg(port->dev, "%s - leaving\n", __func__);
+}
+
+void timbuart_handle_rx_port(struct uart_port *port, u8 isr, u8 *ier)
+{
+ if (isr & RXFLAGS) {
+ /* Some RX status is set */
+ if (isr & RXBF) {
+ u8 ctl = ioread8(port->membase + TIMBUART_CTRL) |
+ TIMBUART_CTRL_FLSHRX;
+ iowrite8(ctl, port->membase + TIMBUART_CTRL);
+ port->icount.overrun++;
+ } else if (isr & (RXDP))
+ timbuart_rx_chars(port);
+
+ /* ack all RX interrupts */
+ iowrite8(RXFLAGS, port->membase + TIMBUART_ISR);
+ }
+
+ /* always have the RX interrupts enabled */
+ *ier |= RXBAF | RXBF | RXTT;
+
+ dev_dbg(port->dev, "%s - leaving\n", __func__);
+}
+
+void timbuart_tasklet(unsigned long arg)
+{
+ struct timbuart_port *uart = (struct timbuart_port *)arg;
+ u8 isr, ier = 0;
+
+ spin_lock(&uart->port.lock);
+
+ isr = ioread8(uart->port.membase + TIMBUART_ISR);
+ dev_dbg(uart->port.dev, "%s ISR: %x\n", __func__, isr);
+
+ if (!uart->usedma)
+ timbuart_handle_tx_port(&uart->port, isr, &ier);
+
+ timbuart_mctrl_check(&uart->port, isr, &ier);
+
+ if (!uart->usedma)
+ timbuart_handle_rx_port(&uart->port, isr, &ier);
+
+ iowrite8(ier, uart->port.membase + TIMBUART_IER);
+
+ spin_unlock(&uart->port.lock);
+ dev_dbg(uart->port.dev, "%s leaving\n", __func__);
+}
+
+static unsigned int timbuart_tx_empty(struct uart_port *port)
+{
+ u8 isr = ioread8(port->membase + TIMBUART_ISR);
+
+ return (isr & TXBAE) ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int timbuart_get_mctrl(struct uart_port *port)
+{
+ u8 cts = ioread8(port->membase + TIMBUART_CTRL);
+ dev_dbg(port->dev, "%s - cts %x\n", __func__, cts);
+
+ if (cts & TIMBUART_CTRL_CTS)
+ return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+ else
+ return TIOCM_DSR | TIOCM_CAR;
+}
+
+static void timbuart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ dev_dbg(port->dev, "%s - %x\n", __func__, mctrl);
+
+ if (mctrl & TIOCM_RTS)
+ iowrite8(TIMBUART_CTRL_RTS, port->membase + TIMBUART_CTRL);
+ else
+ iowrite8(TIMBUART_CTRL_RTS, port->membase + TIMBUART_CTRL);
+}
+
+static void timbuart_mctrl_check(struct uart_port *port, u8 isr, u8 *ier)
+{
+ unsigned int cts;
+
+ if (isr & CTS_DELTA) {
+ /* ack */
+ iowrite8(CTS_DELTA, port->membase + TIMBUART_ISR);
+ cts = timbuart_get_mctrl(port);
+ uart_handle_cts_change(port, cts & TIOCM_CTS);
+ wake_up_interruptible(&port->info->delta_msr_wait);
+ }
+
+ *ier |= CTS_DELTA;
+}
+
+static void timbuart_enable_ms(struct uart_port *port)
+{
+ /* N/A */
+}
+
+static void timbuart_break_ctl(struct uart_port *port, int ctl)
+{
+ /* N/A */
+}
+
+static int timbuart_startup(struct uart_port *port)
+{
+ struct timbuart_port *uart =
+ container_of(port, struct timbuart_port, port);
+
+ dev_dbg(port->dev, "%s\n", __func__);
+
+ iowrite8(TIMBUART_CTRL_FLSHRX, port->membase + TIMBUART_CTRL);
+ iowrite8(0xff, port->membase + TIMBUART_ISR);
+ /* Enable all but TX interrupts */
+ iowrite8(RXBAF | RXBF | RXTT | CTS_DELTA,
+ port->membase + TIMBUART_IER);
+
+ return request_irq(port->irq, timbuart_handleinterrupt, IRQF_SHARED,
+ "timb-uart", uart);
+}
+
+static void timbuart_shutdown(struct uart_port *port)
+{
+ struct timbuart_port *uart =
+ container_of(port, struct timbuart_port, port);
+ dev_dbg(port->dev, "%s\n", __func__);
+ free_irq(port->irq, uart);
+ iowrite8(0, port->membase + TIMBUART_IER);
+}
+
+static int get_bindex(int baud)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(baudrates); i++)
+ if (baud <= baudrates[i])
+ return i;
+
+ return -1;
+}
+
+static void timbuart_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ struct ktermios *old)
+{
+ unsigned int baud;
+ short bindex;
+ unsigned long flags;
+
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
+ bindex = get_bindex(baud);
+ dev_dbg(port->dev, "%s - bindex %d\n", __func__, bindex);
+
+ if (bindex < 0)
+ bindex = 0;
+ baud = baudrates[bindex];
+
+ /* The serial layer calls into this once with old = NULL when setting
+ up initially */
+ if (old)
+ tty_termios_copy_hw(termios, old);
+ tty_termios_encode_baud_rate(termios, baud, baud);
+
+ spin_lock_irqsave(&port->lock, flags);
+ iowrite8((u8)bindex, port->membase + TIMBUART_BAUDRATE);
+ uart_update_timeout(port, termios->c_cflag, baud);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *timbuart_type(struct uart_port *port)
+{
+ return port->type == PORT_UNKNOWN ? "timbuart" : NULL;
+}
+
+/* We do not request/release mappings of the registers here,
+ * currently it's done in the proble function.
+ */
+static void timbuart_release_port(struct uart_port *port)
+{
+ struct platform_device *pdev = to_platform_device(port->dev);
+ int size =
+ resource_size(platform_get_resource(pdev, IORESOURCE_MEM, 0));
+
+ if (port->flags & UPF_IOREMAP) {
+ iounmap(port->membase);
+ port->membase = NULL;
+ }
+
+ release_mem_region(port->mapbase, size);
+}
+
+static int timbuart_request_port(struct uart_port *port)
+{
+ struct platform_device *pdev = to_platform_device(port->dev);
+ int size =
+ resource_size(platform_get_resource(pdev, IORESOURCE_MEM, 0));
+
+ if (!request_mem_region(port->mapbase, size, "timb-uart"))
+ return -EBUSY;
+
+ if (port->flags & UPF_IOREMAP) {
+ port->membase = ioremap(port->mapbase, size);
+ if (port->membase == NULL) {
+ release_mem_region(port->mapbase, size);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static irqreturn_t timbuart_handleinterrupt(int irq, void *devid)
+{
+ struct timbuart_port *uart = (struct timbuart_port *)devid;
+
+ if (ioread8(uart->port.membase + TIMBUART_IPR)) {
+ uart->last_ier = ioread8(uart->port.membase + TIMBUART_IER);
+
+ /* disable interrupts, the tasklet enables them again */
+ iowrite8(0, uart->port.membase + TIMBUART_IER);
+
+ /* fire off bottom half */
+ tasklet_schedule(&uart->tasklet);
+
+ return IRQ_HANDLED;
+ } else
+ return IRQ_NONE;
+}
+
+/*
+ * Configure/autoconfigure the port.
+ */
+static void timbuart_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE) {
+ port->type = PORT_TIMBUART;
+ timbuart_request_port(port);
+ }
+}
+
+static int timbuart_verify_port(struct uart_port *port,
+ struct serial_struct *ser)
+{
+ /* we don't want the core code to modify any port params */
+ return -EINVAL;
+}
+
+static struct uart_ops timbuart_ops = {
+ .tx_empty = timbuart_tx_empty,
+ .set_mctrl = timbuart_set_mctrl,
+ .get_mctrl = timbuart_get_mctrl,
+ .stop_tx = timbuart_stop_tx,
+ .start_tx = timbuart_start_tx,
+ .flush_buffer = timbuart_flush_buffer,
+ .stop_rx = timbuart_stop_rx,
+ .enable_ms = timbuart_enable_ms,
+ .break_ctl = timbuart_break_ctl,
+ .startup = timbuart_startup,
+ .shutdown = timbuart_shutdown,
+ .set_termios = timbuart_set_termios,
+ .type = timbuart_type,
+ .release_port = timbuart_release_port,
+ .request_port = timbuart_request_port,
+ .config_port = timbuart_config_port,
+ .verify_port = timbuart_verify_port
+};
+
+static struct uart_driver timbuart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "timberdale_uart",
+ .dev_name = "ttyTU",
+ .major = TIMBUART_MAJOR,
+ .minor = TIMBUART_MINOR,
+ .nr = 1
+};
+
+static int timbuart_probe(struct platform_device *dev)
+{
+ int err;
+ struct timbuart_port *uart;
+ struct resource *iomem;
+
+ dev_dbg(&dev->dev, "%s\n", __func__);
+
+ uart = kzalloc(sizeof(*uart), GFP_KERNEL);
+ if (!uart) {
+ err = -EINVAL;
+ goto err_mem;
+ }
+
+ uart->usedma = 0;
+
+ uart->port.uartclk = 3250000 * 16;
+ uart->port.fifosize = TIMBUART_FIFO_SIZE;
+ uart->port.regshift = 2;
+ uart->port.iotype = UPIO_MEM;
+ uart->port.ops = &timbuart_ops;
+ uart->port.irq = 0;
+ uart->port.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP;
+ uart->port.line = 0;
+ uart->port.dev = &dev->dev;
+
+ iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!iomem) {
+ err = -ENOMEM;
+ goto err_register;
+ }
+ uart->port.mapbase = iomem->start;
+ uart->port.membase = NULL;
+
+ uart->port.irq = platform_get_irq(dev, 0);
+ if (uart->port.irq < 0) {
+ err = -EINVAL;
+ goto err_register;
+ }
+
+ tasklet_init(&uart->tasklet, timbuart_tasklet, (unsigned long)uart);
+
+ err = uart_register_driver(&timbuart_driver);
+ if (err)
+ goto err_register;
+
+ err = uart_add_one_port(&timbuart_driver, &uart->port);
+ if (err)
+ goto err_add_port;
+
+ platform_set_drvdata(dev, uart);
+
+ return 0;
+
+err_add_port:
+ uart_unregister_driver(&timbuart_driver);
+err_register:
+ kfree(uart);
+err_mem:
+ printk(KERN_ERR "timberdale: Failed to register Timberdale UART: %d\n",
+ err);
+
+ return err;
+}
+
+static int timbuart_remove(struct platform_device *dev)
+{
+ struct timbuart_port *uart = platform_get_drvdata(dev);
+
+ tasklet_kill(&uart->tasklet);
+ uart_remove_one_port(&timbuart_driver, &uart->port);
+ uart_unregister_driver(&timbuart_driver);
+ kfree(uart);
+
+ return 0;
+}
+
+static struct platform_driver timbuart_platform_driver = {
+ .driver = {
+ .name = "timb-uart",
+ .owner = THIS_MODULE,
+ },
+ .probe = timbuart_probe,
+ .remove = timbuart_remove,
+};
+
+/*--------------------------------------------------------------------------*/
+
+static int __init timbuart_init(void)
+{
+ return platform_driver_register(&timbuart_platform_driver);
+}
+
+static void __exit timbuart_exit(void)
+{
+ platform_driver_unregister(&timbuart_platform_driver);
+}
+
+module_init(timbuart_init);
+module_exit(timbuart_exit);
+
+MODULE_DESCRIPTION("Timberdale UART driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:timb-uart");
+
diff --git a/drivers/serial/timbuart.h b/drivers/serial/timbuart.h
new file mode 100644
index 00000000000..7e566766bc4
--- /dev/null
+++ b/drivers/serial/timbuart.h
@@ -0,0 +1,58 @@
+/*
+ * timbuart.c timberdale FPGA GPIO driver
+ * Copyright (c) 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* Supports:
+ * Timberdale FPGA UART
+ */
+
+#ifndef _TIMBUART_H
+#define _TIMBUART_H
+
+#define TIMBUART_FIFO_SIZE 2048
+
+#define TIMBUART_RXFIFO 0x08
+#define TIMBUART_TXFIFO 0x0c
+#define TIMBUART_IER 0x10
+#define TIMBUART_IPR 0x14
+#define TIMBUART_ISR 0x18
+#define TIMBUART_CTRL 0x1c
+#define TIMBUART_BAUDRATE 0x20
+
+#define TIMBUART_CTRL_RTS 0x01
+#define TIMBUART_CTRL_CTS 0x02
+#define TIMBUART_CTRL_FLSHTX 0x40
+#define TIMBUART_CTRL_FLSHRX 0x80
+
+#define TXBF 0x01
+#define TXBAE 0x02
+#define CTS_DELTA 0x04
+#define RXDP 0x08
+#define RXBAF 0x10
+#define RXBF 0x20
+#define RXTT 0x40
+#define RXBNAE 0x80
+#define TXBE 0x100
+
+#define RXFLAGS (RXDP | RXBAF | RXBF | RXTT | RXBNAE)
+#define TXFLAGS (TXBF | TXBAE)
+
+#define TIMBUART_MAJOR 204
+#define TIMBUART_MINOR 192
+
+#endif /* _TIMBUART_H */
+
diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c
index 12d13d99b6f..d687a9b93d0 100644
--- a/drivers/sh/intc.c
+++ b/drivers/sh/intc.c
@@ -24,6 +24,7 @@
#include <linux/sh_intc.h>
#include <linux/sysdev.h>
#include <linux/list.h>
+#include <linux/topology.h>
#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
@@ -770,11 +771,19 @@ void __init register_intc_controller(struct intc_desc *desc)
/* register the vectors one by one */
for (i = 0; i < desc->nr_vectors; i++) {
struct intc_vect *vect = desc->vectors + i;
+ unsigned int irq = evt2irq(vect->vect);
+ struct irq_desc *irq_desc;
if (!vect->enum_id)
continue;
- intc_register_irq(desc, d, vect->enum_id, evt2irq(vect->vect));
+ irq_desc = irq_to_desc_alloc_node(irq, numa_node_id());
+ if (unlikely(!irq_desc)) {
+ printk(KERN_INFO "can not get irq_desc for %d\n", irq);
+ continue;
+ }
+
+ intc_register_irq(desc, d, vect->enum_id, irq);
}
}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 83a185d5296..e8aae227b5e 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -118,7 +118,7 @@ config SPI_GPIO
config SPI_IMX
tristate "Freescale iMX SPI controller"
- depends on ARCH_IMX && EXPERIMENTAL
+ depends on ARCH_MX1 && EXPERIMENTAL
help
This enables using the Freescale iMX SPI controller in master
mode.
@@ -171,6 +171,15 @@ config SPI_ORION
help
This enables using the SPI master controller on the Orion chips.
+config SPI_PL022
+ tristate "ARM AMBA PL022 SSP controller (EXPERIMENTAL)"
+ depends on ARM_AMBA && EXPERIMENTAL
+ default y if MACH_U300
+ help
+ This selects the ARM(R) AMBA(R) PrimeCell PL022 SSP
+ controller. If you have an embedded system with an AMBA(R)
+ bus and a PL022 controller, say Y or M here.
+
config SPI_PXA2XX
tristate "PXA2xx SSP SPI master"
depends on ARCH_PXA && EXPERIMENTAL
@@ -212,7 +221,7 @@ config SPI_TXX9
config SPI_XILINX
tristate "Xilinx SPI controller"
- depends on XILINX_VIRTEX && EXPERIMENTAL
+ depends on (XILINX_VIRTEX || MICROBLAZE) && EXPERIMENTAL
select SPI_BITBANG
help
This exposes the SPI controller IP from the Xilinx EDK.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 5d0451936d8..ecfadb18048 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o
obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o
obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o
obj-$(CONFIG_SPI_ORION) += orion_spi.o
+obj-$(CONFIG_SPI_PL022) += amba-pl022.o
obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o
obj-$(CONFIG_SPI_MPC83xx) += spi_mpc83xx.o
obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
new file mode 100644
index 00000000000..da76797ce8b
--- /dev/null
+++ b/drivers/spi/amba-pl022.c
@@ -0,0 +1,1866 @@
+/*
+ * drivers/spi/amba-pl022.c
+ *
+ * A driver for the ARM PL022 PrimeCell SSP/SPI bus master.
+ *
+ * Copyright (C) 2008-2009 ST-Ericsson AB
+ * Copyright (C) 2006 STMicroelectronics Pvt. Ltd.
+ *
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ *
+ * Initial version inspired by:
+ * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c
+ * Initial adoption to PL022 by:
+ * Sachin Verma <sachin.verma@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * TODO:
+ * - add timeout on polled transfers
+ * - add generic DMA framework support
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/ioport.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/spi/spi.h>
+#include <linux/workqueue.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/amba/bus.h>
+#include <linux/amba/pl022.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+
+/*
+ * This macro is used to define some register default values.
+ * reg is masked with mask, the OR:ed with an (again masked)
+ * val shifted sb steps to the left.
+ */
+#define SSP_WRITE_BITS(reg, val, mask, sb) \
+ ((reg) = (((reg) & ~(mask)) | (((val)<<(sb)) & (mask))))
+
+/*
+ * This macro is also used to define some default values.
+ * It will just shift val by sb steps to the left and mask
+ * the result with mask.
+ */
+#define GEN_MASK_BITS(val, mask, sb) \
+ (((val)<<(sb)) & (mask))
+
+#define DRIVE_TX 0
+#define DO_NOT_DRIVE_TX 1
+
+#define DO_NOT_QUEUE_DMA 0
+#define QUEUE_DMA 1
+
+#define RX_TRANSFER 1
+#define TX_TRANSFER 2
+
+/*
+ * Macros to access SSP Registers with their offsets
+ */
+#define SSP_CR0(r) (r + 0x000)
+#define SSP_CR1(r) (r + 0x004)
+#define SSP_DR(r) (r + 0x008)
+#define SSP_SR(r) (r + 0x00C)
+#define SSP_CPSR(r) (r + 0x010)
+#define SSP_IMSC(r) (r + 0x014)
+#define SSP_RIS(r) (r + 0x018)
+#define SSP_MIS(r) (r + 0x01C)
+#define SSP_ICR(r) (r + 0x020)
+#define SSP_DMACR(r) (r + 0x024)
+#define SSP_ITCR(r) (r + 0x080)
+#define SSP_ITIP(r) (r + 0x084)
+#define SSP_ITOP(r) (r + 0x088)
+#define SSP_TDR(r) (r + 0x08C)
+
+#define SSP_PID0(r) (r + 0xFE0)
+#define SSP_PID1(r) (r + 0xFE4)
+#define SSP_PID2(r) (r + 0xFE8)
+#define SSP_PID3(r) (r + 0xFEC)
+
+#define SSP_CID0(r) (r + 0xFF0)
+#define SSP_CID1(r) (r + 0xFF4)
+#define SSP_CID2(r) (r + 0xFF8)
+#define SSP_CID3(r) (r + 0xFFC)
+
+/*
+ * SSP Control Register 0 - SSP_CR0
+ */
+#define SSP_CR0_MASK_DSS (0x1FUL << 0)
+#define SSP_CR0_MASK_HALFDUP (0x1UL << 5)
+#define SSP_CR0_MASK_SPO (0x1UL << 6)
+#define SSP_CR0_MASK_SPH (0x1UL << 7)
+#define SSP_CR0_MASK_SCR (0xFFUL << 8)
+#define SSP_CR0_MASK_CSS (0x1FUL << 16)
+#define SSP_CR0_MASK_FRF (0x3UL << 21)
+
+/*
+ * SSP Control Register 0 - SSP_CR1
+ */
+#define SSP_CR1_MASK_LBM (0x1UL << 0)
+#define SSP_CR1_MASK_SSE (0x1UL << 1)
+#define SSP_CR1_MASK_MS (0x1UL << 2)
+#define SSP_CR1_MASK_SOD (0x1UL << 3)
+#define SSP_CR1_MASK_RENDN (0x1UL << 4)
+#define SSP_CR1_MASK_TENDN (0x1UL << 5)
+#define SSP_CR1_MASK_MWAIT (0x1UL << 6)
+#define SSP_CR1_MASK_RXIFLSEL (0x7UL << 7)
+#define SSP_CR1_MASK_TXIFLSEL (0x7UL << 10)
+
+/*
+ * SSP Data Register - SSP_DR
+ */
+#define SSP_DR_MASK_DATA 0xFFFFFFFF
+
+/*
+ * SSP Status Register - SSP_SR
+ */
+#define SSP_SR_MASK_TFE (0x1UL << 0) /* Transmit FIFO empty */
+#define SSP_SR_MASK_TNF (0x1UL << 1) /* Transmit FIFO not full */
+#define SSP_SR_MASK_RNE (0x1UL << 2) /* Receive FIFO not empty */
+#define SSP_SR_MASK_RFF (0x1UL << 3) /* Receive FIFO full */
+#define SSP_SR_MASK_BSY (0x1UL << 4) /* Busy Flag */
+
+/*
+ * SSP Clock Prescale Register - SSP_CPSR
+ */
+#define SSP_CPSR_MASK_CPSDVSR (0xFFUL << 0)
+
+/*
+ * SSP Interrupt Mask Set/Clear Register - SSP_IMSC
+ */
+#define SSP_IMSC_MASK_RORIM (0x1UL << 0) /* Receive Overrun Interrupt mask */
+#define SSP_IMSC_MASK_RTIM (0x1UL << 1) /* Receive timeout Interrupt mask */
+#define SSP_IMSC_MASK_RXIM (0x1UL << 2) /* Receive FIFO Interrupt mask */
+#define SSP_IMSC_MASK_TXIM (0x1UL << 3) /* Transmit FIFO Interrupt mask */
+
+/*
+ * SSP Raw Interrupt Status Register - SSP_RIS
+ */
+/* Receive Overrun Raw Interrupt status */
+#define SSP_RIS_MASK_RORRIS (0x1UL << 0)
+/* Receive Timeout Raw Interrupt status */
+#define SSP_RIS_MASK_RTRIS (0x1UL << 1)
+/* Receive FIFO Raw Interrupt status */
+#define SSP_RIS_MASK_RXRIS (0x1UL << 2)
+/* Transmit FIFO Raw Interrupt status */
+#define SSP_RIS_MASK_TXRIS (0x1UL << 3)
+
+/*
+ * SSP Masked Interrupt Status Register - SSP_MIS
+ */
+/* Receive Overrun Masked Interrupt status */
+#define SSP_MIS_MASK_RORMIS (0x1UL << 0)
+/* Receive Timeout Masked Interrupt status */
+#define SSP_MIS_MASK_RTMIS (0x1UL << 1)
+/* Receive FIFO Masked Interrupt status */
+#define SSP_MIS_MASK_RXMIS (0x1UL << 2)
+/* Transmit FIFO Masked Interrupt status */
+#define SSP_MIS_MASK_TXMIS (0x1UL << 3)
+
+/*
+ * SSP Interrupt Clear Register - SSP_ICR
+ */
+/* Receive Overrun Raw Clear Interrupt bit */
+#define SSP_ICR_MASK_RORIC (0x1UL << 0)
+/* Receive Timeout Clear Interrupt bit */
+#define SSP_ICR_MASK_RTIC (0x1UL << 1)
+
+/*
+ * SSP DMA Control Register - SSP_DMACR
+ */
+/* Receive DMA Enable bit */
+#define SSP_DMACR_MASK_RXDMAE (0x1UL << 0)
+/* Transmit DMA Enable bit */
+#define SSP_DMACR_MASK_TXDMAE (0x1UL << 1)
+
+/*
+ * SSP Integration Test control Register - SSP_ITCR
+ */
+#define SSP_ITCR_MASK_ITEN (0x1UL << 0)
+#define SSP_ITCR_MASK_TESTFIFO (0x1UL << 1)
+
+/*
+ * SSP Integration Test Input Register - SSP_ITIP
+ */
+#define ITIP_MASK_SSPRXD (0x1UL << 0)
+#define ITIP_MASK_SSPFSSIN (0x1UL << 1)
+#define ITIP_MASK_SSPCLKIN (0x1UL << 2)
+#define ITIP_MASK_RXDMAC (0x1UL << 3)
+#define ITIP_MASK_TXDMAC (0x1UL << 4)
+#define ITIP_MASK_SSPTXDIN (0x1UL << 5)
+
+/*
+ * SSP Integration Test output Register - SSP_ITOP
+ */
+#define ITOP_MASK_SSPTXD (0x1UL << 0)
+#define ITOP_MASK_SSPFSSOUT (0x1UL << 1)
+#define ITOP_MASK_SSPCLKOUT (0x1UL << 2)
+#define ITOP_MASK_SSPOEn (0x1UL << 3)
+#define ITOP_MASK_SSPCTLOEn (0x1UL << 4)
+#define ITOP_MASK_RORINTR (0x1UL << 5)
+#define ITOP_MASK_RTINTR (0x1UL << 6)
+#define ITOP_MASK_RXINTR (0x1UL << 7)
+#define ITOP_MASK_TXINTR (0x1UL << 8)
+#define ITOP_MASK_INTR (0x1UL << 9)
+#define ITOP_MASK_RXDMABREQ (0x1UL << 10)
+#define ITOP_MASK_RXDMASREQ (0x1UL << 11)
+#define ITOP_MASK_TXDMABREQ (0x1UL << 12)
+#define ITOP_MASK_TXDMASREQ (0x1UL << 13)
+
+/*
+ * SSP Test Data Register - SSP_TDR
+ */
+#define TDR_MASK_TESTDATA (0xFFFFFFFF)
+
+/*
+ * Message State
+ * we use the spi_message.state (void *) pointer to
+ * hold a single state value, that's why all this
+ * (void *) casting is done here.
+ */
+#define STATE_START ((void *) 0)
+#define STATE_RUNNING ((void *) 1)
+#define STATE_DONE ((void *) 2)
+#define STATE_ERROR ((void *) -1)
+
+/*
+ * Queue State
+ */
+#define QUEUE_RUNNING (0)
+#define QUEUE_STOPPED (1)
+/*
+ * SSP State - Whether Enabled or Disabled
+ */
+#define SSP_DISABLED (0)
+#define SSP_ENABLED (1)
+
+/*
+ * SSP DMA State - Whether DMA Enabled or Disabled
+ */
+#define SSP_DMA_DISABLED (0)
+#define SSP_DMA_ENABLED (1)
+
+/*
+ * SSP Clock Defaults
+ */
+#define NMDK_SSP_DEFAULT_CLKRATE 0x2
+#define NMDK_SSP_DEFAULT_PRESCALE 0x40
+
+/*
+ * SSP Clock Parameter ranges
+ */
+#define CPSDVR_MIN 0x02
+#define CPSDVR_MAX 0xFE
+#define SCR_MIN 0x00
+#define SCR_MAX 0xFF
+
+/*
+ * SSP Interrupt related Macros
+ */
+#define DEFAULT_SSP_REG_IMSC 0x0UL
+#define DISABLE_ALL_INTERRUPTS DEFAULT_SSP_REG_IMSC
+#define ENABLE_ALL_INTERRUPTS (~DEFAULT_SSP_REG_IMSC)
+
+#define CLEAR_ALL_INTERRUPTS 0x3
+
+
+/*
+ * The type of reading going on on this chip
+ */
+enum ssp_reading {
+ READING_NULL,
+ READING_U8,
+ READING_U16,
+ READING_U32
+};
+
+/**
+ * The type of writing going on on this chip
+ */
+enum ssp_writing {
+ WRITING_NULL,
+ WRITING_U8,
+ WRITING_U16,
+ WRITING_U32
+};
+
+/**
+ * struct vendor_data - vendor-specific config parameters
+ * for PL022 derivates
+ * @fifodepth: depth of FIFOs (both)
+ * @max_bpw: maximum number of bits per word
+ * @unidir: supports unidirection transfers
+ */
+struct vendor_data {
+ int fifodepth;
+ int max_bpw;
+ bool unidir;
+};
+
+/**
+ * struct pl022 - This is the private SSP driver data structure
+ * @adev: AMBA device model hookup
+ * @phybase: The physical memory where the SSP device resides
+ * @virtbase: The virtual memory where the SSP is mapped
+ * @master: SPI framework hookup
+ * @master_info: controller-specific data from machine setup
+ * @regs: SSP controller register's virtual address
+ * @pump_messages: Work struct for scheduling work to the workqueue
+ * @lock: spinlock to syncronise access to driver data
+ * @workqueue: a workqueue on which any spi_message request is queued
+ * @busy: workqueue is busy
+ * @run: workqueue is running
+ * @pump_transfers: Tasklet used in Interrupt Transfer mode
+ * @cur_msg: Pointer to current spi_message being processed
+ * @cur_transfer: Pointer to current spi_transfer
+ * @cur_chip: pointer to current clients chip(assigned from controller_state)
+ * @tx: current position in TX buffer to be read
+ * @tx_end: end position in TX buffer to be read
+ * @rx: current position in RX buffer to be written
+ * @rx_end: end position in RX buffer to be written
+ * @readingtype: the type of read currently going on
+ * @writingtype: the type or write currently going on
+ */
+struct pl022 {
+ struct amba_device *adev;
+ struct vendor_data *vendor;
+ resource_size_t phybase;
+ void __iomem *virtbase;
+ struct clk *clk;
+ struct spi_master *master;
+ struct pl022_ssp_controller *master_info;
+ /* Driver message queue */
+ struct workqueue_struct *workqueue;
+ struct work_struct pump_messages;
+ spinlock_t queue_lock;
+ struct list_head queue;
+ int busy;
+ int run;
+ /* Message transfer pump */
+ struct tasklet_struct pump_transfers;
+ struct spi_message *cur_msg;
+ struct spi_transfer *cur_transfer;
+ struct chip_data *cur_chip;
+ void *tx;
+ void *tx_end;
+ void *rx;
+ void *rx_end;
+ enum ssp_reading read;
+ enum ssp_writing write;
+};
+
+/**
+ * struct chip_data - To maintain runtime state of SSP for each client chip
+ * @cr0: Value of control register CR0 of SSP
+ * @cr1: Value of control register CR1 of SSP
+ * @dmacr: Value of DMA control Register of SSP
+ * @cpsr: Value of Clock prescale register
+ * @n_bytes: how many bytes(power of 2) reqd for a given data width of client
+ * @enable_dma: Whether to enable DMA or not
+ * @write: function ptr to be used to write when doing xfer for this chip
+ * @read: function ptr to be used to read when doing xfer for this chip
+ * @cs_control: chip select callback provided by chip
+ * @xfer_type: polling/interrupt/DMA
+ *
+ * Runtime state of the SSP controller, maintained per chip,
+ * This would be set according to the current message that would be served
+ */
+struct chip_data {
+ u16 cr0;
+ u16 cr1;
+ u16 dmacr;
+ u16 cpsr;
+ u8 n_bytes;
+ u8 enable_dma:1;
+ enum ssp_reading read;
+ enum ssp_writing write;
+ void (*cs_control) (u32 command);
+ int xfer_type;
+};
+
+/**
+ * null_cs_control - Dummy chip select function
+ * @command: select/delect the chip
+ *
+ * If no chip select function is provided by client this is used as dummy
+ * chip select
+ */
+static void null_cs_control(u32 command)
+{
+ pr_debug("pl022: dummy chip select control, CS=0x%x\n", command);
+}
+
+/**
+ * giveback - current spi_message is over, schedule next message and call
+ * callback of this message. Assumes that caller already
+ * set message->status; dma and pio irqs are blocked
+ * @pl022: SSP driver private data structure
+ */
+static void giveback(struct pl022 *pl022)
+{
+ struct spi_transfer *last_transfer;
+ unsigned long flags;
+ struct spi_message *msg;
+ void (*curr_cs_control) (u32 command);
+
+ /*
+ * This local reference to the chip select function
+ * is needed because we set curr_chip to NULL
+ * as a step toward termininating the message.
+ */
+ curr_cs_control = pl022->cur_chip->cs_control;
+ spin_lock_irqsave(&pl022->queue_lock, flags);
+ msg = pl022->cur_msg;
+ pl022->cur_msg = NULL;
+ pl022->cur_transfer = NULL;
+ pl022->cur_chip = NULL;
+ queue_work(pl022->workqueue, &pl022->pump_messages);
+ spin_unlock_irqrestore(&pl022->queue_lock, flags);
+
+ last_transfer = list_entry(msg->transfers.prev,
+ struct spi_transfer,
+ transfer_list);
+
+ /* Delay if requested before any change in chip select */
+ if (last_transfer->delay_usecs)
+ /*
+ * FIXME: This runs in interrupt context.
+ * Is this really smart?
+ */
+ udelay(last_transfer->delay_usecs);
+
+ /*
+ * Drop chip select UNLESS cs_change is true or we are returning
+ * a message with an error, or next message is for another chip
+ */
+ if (!last_transfer->cs_change)
+ curr_cs_control(SSP_CHIP_DESELECT);
+ else {
+ struct spi_message *next_msg;
+
+ /* Holding of cs was hinted, but we need to make sure
+ * the next message is for the same chip. Don't waste
+ * time with the following tests unless this was hinted.
+ *
+ * We cannot postpone this until pump_messages, because
+ * after calling msg->complete (below) the driver that
+ * sent the current message could be unloaded, which
+ * could invalidate the cs_control() callback...
+ */
+
+ /* get a pointer to the next message, if any */
+ spin_lock_irqsave(&pl022->queue_lock, flags);
+ if (list_empty(&pl022->queue))
+ next_msg = NULL;
+ else
+ next_msg = list_entry(pl022->queue.next,
+ struct spi_message, queue);
+ spin_unlock_irqrestore(&pl022->queue_lock, flags);
+
+ /* see if the next and current messages point
+ * to the same chip
+ */
+ if (next_msg && next_msg->spi != msg->spi)
+ next_msg = NULL;
+ if (!next_msg || msg->state == STATE_ERROR)
+ curr_cs_control(SSP_CHIP_DESELECT);
+ }
+ msg->state = NULL;
+ if (msg->complete)
+ msg->complete(msg->context);
+ /* This message is completed, so let's turn off the clock! */
+ clk_disable(pl022->clk);
+}
+
+/**
+ * flush - flush the FIFO to reach a clean state
+ * @pl022: SSP driver private data structure
+ */
+static int flush(struct pl022 *pl022)
+{
+ unsigned long limit = loops_per_jiffy << 1;
+
+ dev_dbg(&pl022->adev->dev, "flush\n");
+ do {
+ while (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
+ readw(SSP_DR(pl022->virtbase));
+ } while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_BSY) && limit--);
+ return limit;
+}
+
+/**
+ * restore_state - Load configuration of current chip
+ * @pl022: SSP driver private data structure
+ */
+static void restore_state(struct pl022 *pl022)
+{
+ struct chip_data *chip = pl022->cur_chip;
+
+ writew(chip->cr0, SSP_CR0(pl022->virtbase));
+ writew(chip->cr1, SSP_CR1(pl022->virtbase));
+ writew(chip->dmacr, SSP_DMACR(pl022->virtbase));
+ writew(chip->cpsr, SSP_CPSR(pl022->virtbase));
+ writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
+ writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
+}
+
+/**
+ * load_ssp_default_config - Load default configuration for SSP
+ * @pl022: SSP driver private data structure
+ */
+
+/*
+ * Default SSP Register Values
+ */
+#define DEFAULT_SSP_REG_CR0 ( \
+ GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \
+ GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP, 5) | \
+ GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
+ GEN_MASK_BITS(SSP_CLK_FALLING_EDGE, SSP_CR0_MASK_SPH, 7) | \
+ GEN_MASK_BITS(NMDK_SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \
+ GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS, 16) | \
+ GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 21) \
+)
+
+#define DEFAULT_SSP_REG_CR1 ( \
+ GEN_MASK_BITS(LOOPBACK_DISABLED, SSP_CR1_MASK_LBM, 0) | \
+ GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \
+ GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \
+ GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) | \
+ GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN, 4) | \
+ GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN, 5) | \
+ GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT, 6) |\
+ GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL, 7) | \
+ GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL, 10) \
+)
+
+#define DEFAULT_SSP_REG_CPSR ( \
+ GEN_MASK_BITS(NMDK_SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \
+)
+
+#define DEFAULT_SSP_REG_DMACR (\
+ GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_RXDMAE, 0) | \
+ GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1) \
+)
+
+
+static void load_ssp_default_config(struct pl022 *pl022)
+{
+ writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase));
+ writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase));
+ writew(DEFAULT_SSP_REG_DMACR, SSP_DMACR(pl022->virtbase));
+ writew(DEFAULT_SSP_REG_CPSR, SSP_CPSR(pl022->virtbase));
+ writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
+ writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
+}
+
+/**
+ * This will write to TX and read from RX according to the parameters
+ * set in pl022.
+ */
+static void readwriter(struct pl022 *pl022)
+{
+
+ /*
+ * The FIFO depth is different inbetween primecell variants.
+ * I believe filling in too much in the FIFO might cause
+ * errons in 8bit wide transfers on ARM variants (just 8 words
+ * FIFO, means only 8x8 = 64 bits in FIFO) at least.
+ *
+ * FIXME: currently we have no logic to account for this.
+ * perhaps there is even something broken in HW regarding
+ * 8bit transfers (it doesn't fail on 16bit) so this needs
+ * more investigation...
+ */
+ dev_dbg(&pl022->adev->dev,
+ "%s, rx: %p, rxend: %p, tx: %p, txend: %p\n",
+ __func__, pl022->rx, pl022->rx_end, pl022->tx, pl022->tx_end);
+
+ /* Read as much as you can */
+ while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
+ && (pl022->rx < pl022->rx_end)) {
+ switch (pl022->read) {
+ case READING_NULL:
+ readw(SSP_DR(pl022->virtbase));
+ break;
+ case READING_U8:
+ *(u8 *) (pl022->rx) =
+ readw(SSP_DR(pl022->virtbase)) & 0xFFU;
+ break;
+ case READING_U16:
+ *(u16 *) (pl022->rx) =
+ (u16) readw(SSP_DR(pl022->virtbase));
+ break;
+ case READING_U32:
+ *(u32 *) (pl022->rx) =
+ readl(SSP_DR(pl022->virtbase));
+ break;
+ }
+ pl022->rx += (pl022->cur_chip->n_bytes);
+ }
+ /*
+ * Write as much as you can, while keeping an eye on the RX FIFO!
+ */
+ while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_TNF)
+ && (pl022->tx < pl022->tx_end)) {
+ switch (pl022->write) {
+ case WRITING_NULL:
+ writew(0x0, SSP_DR(pl022->virtbase));
+ break;
+ case WRITING_U8:
+ writew(*(u8 *) (pl022->tx), SSP_DR(pl022->virtbase));
+ break;
+ case WRITING_U16:
+ writew((*(u16 *) (pl022->tx)), SSP_DR(pl022->virtbase));
+ break;
+ case WRITING_U32:
+ writel(*(u32 *) (pl022->tx), SSP_DR(pl022->virtbase));
+ break;
+ }
+ pl022->tx += (pl022->cur_chip->n_bytes);
+ /*
+ * This inner reader takes care of things appearing in the RX
+ * FIFO as we're transmitting. This will happen a lot since the
+ * clock starts running when you put things into the TX FIFO,
+ * and then things are continously clocked into the RX FIFO.
+ */
+ while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
+ && (pl022->rx < pl022->rx_end)) {
+ switch (pl022->read) {
+ case READING_NULL:
+ readw(SSP_DR(pl022->virtbase));
+ break;
+ case READING_U8:
+ *(u8 *) (pl022->rx) =
+ readw(SSP_DR(pl022->virtbase)) & 0xFFU;
+ break;
+ case READING_U16:
+ *(u16 *) (pl022->rx) =
+ (u16) readw(SSP_DR(pl022->virtbase));
+ break;
+ case READING_U32:
+ *(u32 *) (pl022->rx) =
+ readl(SSP_DR(pl022->virtbase));
+ break;
+ }
+ pl022->rx += (pl022->cur_chip->n_bytes);
+ }
+ }
+ /*
+ * When we exit here the TX FIFO should be full and the RX FIFO
+ * should be empty
+ */
+}
+
+
+/**
+ * next_transfer - Move to the Next transfer in the current spi message
+ * @pl022: SSP driver private data structure
+ *
+ * This function moves though the linked list of spi transfers in the
+ * current spi message and returns with the state of current spi
+ * message i.e whether its last transfer is done(STATE_DONE) or
+ * Next transfer is ready(STATE_RUNNING)
+ */
+static void *next_transfer(struct pl022 *pl022)
+{
+ struct spi_message *msg = pl022->cur_msg;
+ struct spi_transfer *trans = pl022->cur_transfer;
+
+ /* Move to next transfer */
+ if (trans->transfer_list.next != &msg->transfers) {
+ pl022->cur_transfer =
+ list_entry(trans->transfer_list.next,
+ struct spi_transfer, transfer_list);
+ return STATE_RUNNING;
+ }
+ return STATE_DONE;
+}
+/**
+ * pl022_interrupt_handler - Interrupt handler for SSP controller
+ *
+ * This function handles interrupts generated for an interrupt based transfer.
+ * If a receive overrun (ROR) interrupt is there then we disable SSP, flag the
+ * current message's state as STATE_ERROR and schedule the tasklet
+ * pump_transfers which will do the postprocessing of the current message by
+ * calling giveback(). Otherwise it reads data from RX FIFO till there is no
+ * more data, and writes data in TX FIFO till it is not full. If we complete
+ * the transfer we move to the next transfer and schedule the tasklet.
+ */
+static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
+{
+ struct pl022 *pl022 = dev_id;
+ struct spi_message *msg = pl022->cur_msg;
+ u16 irq_status = 0;
+ u16 flag = 0;
+
+ if (unlikely(!msg)) {
+ dev_err(&pl022->adev->dev,
+ "bad message state in interrupt handler");
+ /* Never fail */
+ return IRQ_HANDLED;
+ }
+
+ /* Read the Interrupt Status Register */
+ irq_status = readw(SSP_MIS(pl022->virtbase));
+
+ if (unlikely(!irq_status))
+ return IRQ_NONE;
+
+ /* This handles the error code interrupts */
+ if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) {
+ /*
+ * Overrun interrupt - bail out since our Data has been
+ * corrupted
+ */
+ dev_err(&pl022->adev->dev,
+ "FIFO overrun\n");
+ if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF)
+ dev_err(&pl022->adev->dev,
+ "RXFIFO is full\n");
+ if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_TNF)
+ dev_err(&pl022->adev->dev,
+ "TXFIFO is full\n");
+
+ /*
+ * Disable and clear interrupts, disable SSP,
+ * mark message with bad status so it can be
+ * retried.
+ */
+ writew(DISABLE_ALL_INTERRUPTS,
+ SSP_IMSC(pl022->virtbase));
+ writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
+ writew((readw(SSP_CR1(pl022->virtbase)) &
+ (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
+ msg->state = STATE_ERROR;
+
+ /* Schedule message queue handler */
+ tasklet_schedule(&pl022->pump_transfers);
+ return IRQ_HANDLED;
+ }
+
+ readwriter(pl022);
+
+ if ((pl022->tx == pl022->tx_end) && (flag == 0)) {
+ flag = 1;
+ /* Disable Transmit interrupt */
+ writew(readw(SSP_IMSC(pl022->virtbase)) &
+ (~SSP_IMSC_MASK_TXIM),
+ SSP_IMSC(pl022->virtbase));
+ }
+
+ /*
+ * Since all transactions must write as much as shall be read,
+ * we can conclude the entire transaction once RX is complete.
+ * At this point, all TX will always be finished.
+ */
+ if (pl022->rx >= pl022->rx_end) {
+ writew(DISABLE_ALL_INTERRUPTS,
+ SSP_IMSC(pl022->virtbase));
+ writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
+ if (unlikely(pl022->rx > pl022->rx_end)) {
+ dev_warn(&pl022->adev->dev, "read %u surplus "
+ "bytes (did you request an odd "
+ "number of bytes on a 16bit bus?)\n",
+ (u32) (pl022->rx - pl022->rx_end));
+ }
+ /* Update total bytes transfered */
+ msg->actual_length += pl022->cur_transfer->len;
+ if (pl022->cur_transfer->cs_change)
+ pl022->cur_chip->
+ cs_control(SSP_CHIP_DESELECT);
+ /* Move to next transfer */
+ msg->state = next_transfer(pl022);
+ tasklet_schedule(&pl022->pump_transfers);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * This sets up the pointers to memory for the next message to
+ * send out on the SPI bus.
+ */
+static int set_up_next_transfer(struct pl022 *pl022,
+ struct spi_transfer *transfer)
+{
+ int residue;
+
+ /* Sanity check the message for this bus width */
+ residue = pl022->cur_transfer->len % pl022->cur_chip->n_bytes;
+ if (unlikely(residue != 0)) {
+ dev_err(&pl022->adev->dev,
+ "message of %u bytes to transmit but the current "
+ "chip bus has a data width of %u bytes!\n",
+ pl022->cur_transfer->len,
+ pl022->cur_chip->n_bytes);
+ dev_err(&pl022->adev->dev, "skipping this message\n");
+ return -EIO;
+ }
+ pl022->tx = (void *)transfer->tx_buf;
+ pl022->tx_end = pl022->tx + pl022->cur_transfer->len;
+ pl022->rx = (void *)transfer->rx_buf;
+ pl022->rx_end = pl022->rx + pl022->cur_transfer->len;
+ pl022->write =
+ pl022->tx ? pl022->cur_chip->write : WRITING_NULL;
+ pl022->read = pl022->rx ? pl022->cur_chip->read : READING_NULL;
+ return 0;
+}
+
+/**
+ * pump_transfers - Tasklet function which schedules next interrupt transfer
+ * when running in interrupt transfer mode.
+ * @data: SSP driver private data structure
+ *
+ */
+static void pump_transfers(unsigned long data)
+{
+ struct pl022 *pl022 = (struct pl022 *) data;
+ struct spi_message *message = NULL;
+ struct spi_transfer *transfer = NULL;
+ struct spi_transfer *previous = NULL;
+
+ /* Get current state information */
+ message = pl022->cur_msg;
+ transfer = pl022->cur_transfer;
+
+ /* Handle for abort */
+ if (message->state == STATE_ERROR) {
+ message->status = -EIO;
+ giveback(pl022);
+ return;
+ }
+
+ /* Handle end of message */
+ if (message->state == STATE_DONE) {
+ message->status = 0;
+ giveback(pl022);
+ return;
+ }
+
+ /* Delay if requested at end of transfer before CS change */
+ if (message->state == STATE_RUNNING) {
+ previous = list_entry(transfer->transfer_list.prev,
+ struct spi_transfer,
+ transfer_list);
+ if (previous->delay_usecs)
+ /*
+ * FIXME: This runs in interrupt context.
+ * Is this really smart?
+ */
+ udelay(previous->delay_usecs);
+
+ /* Drop chip select only if cs_change is requested */
+ if (previous->cs_change)
+ pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
+ } else {
+ /* STATE_START */
+ message->state = STATE_RUNNING;
+ }
+
+ if (set_up_next_transfer(pl022, transfer)) {
+ message->state = STATE_ERROR;
+ message->status = -EIO;
+ giveback(pl022);
+ return;
+ }
+ /* Flush the FIFOs and let's go! */
+ flush(pl022);
+ writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
+}
+
+/**
+ * NOT IMPLEMENTED
+ * configure_dma - It configures the DMA pipes for DMA transfers
+ * @data: SSP driver's private data structure
+ *
+ */
+static int configure_dma(void *data)
+{
+ struct pl022 *pl022 = data;
+ dev_dbg(&pl022->adev->dev, "configure DMA\n");
+ return -ENOTSUPP;
+}
+
+/**
+ * do_dma_transfer - It handles transfers of the current message
+ * if it is DMA xfer.
+ * NOT FULLY IMPLEMENTED
+ * @data: SSP driver's private data structure
+ */
+static void do_dma_transfer(void *data)
+{
+ struct pl022 *pl022 = data;
+
+ if (configure_dma(data)) {
+ dev_dbg(&pl022->adev->dev, "configuration of DMA Failed!\n");
+ goto err_config_dma;
+ }
+
+ /* TODO: Implememt DMA setup of pipes here */
+
+ /* Enable target chip, set up transfer */
+ pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
+ if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
+ /* Error path */
+ pl022->cur_msg->state = STATE_ERROR;
+ pl022->cur_msg->status = -EIO;
+ giveback(pl022);
+ return;
+ }
+ /* Enable SSP */
+ writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
+ SSP_CR1(pl022->virtbase));
+
+ /* TODO: Enable the DMA transfer here */
+ return;
+
+ err_config_dma:
+ pl022->cur_msg->state = STATE_ERROR;
+ pl022->cur_msg->status = -EIO;
+ giveback(pl022);
+ return;
+}
+
+static void do_interrupt_transfer(void *data)
+{
+ struct pl022 *pl022 = data;
+
+ /* Enable target chip */
+ pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
+ if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
+ /* Error path */
+ pl022->cur_msg->state = STATE_ERROR;
+ pl022->cur_msg->status = -EIO;
+ giveback(pl022);
+ return;
+ }
+ /* Enable SSP, turn on interrupts */
+ writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
+ SSP_CR1(pl022->virtbase));
+ writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
+}
+
+static void do_polling_transfer(void *data)
+{
+ struct pl022 *pl022 = data;
+ struct spi_message *message = NULL;
+ struct spi_transfer *transfer = NULL;
+ struct spi_transfer *previous = NULL;
+ struct chip_data *chip;
+
+ chip = pl022->cur_chip;
+ message = pl022->cur_msg;
+
+ while (message->state != STATE_DONE) {
+ /* Handle for abort */
+ if (message->state == STATE_ERROR)
+ break;
+ transfer = pl022->cur_transfer;
+
+ /* Delay if requested at end of transfer */
+ if (message->state == STATE_RUNNING) {
+ previous =
+ list_entry(transfer->transfer_list.prev,
+ struct spi_transfer, transfer_list);
+ if (previous->delay_usecs)
+ udelay(previous->delay_usecs);
+ if (previous->cs_change)
+ pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
+ } else {
+ /* STATE_START */
+ message->state = STATE_RUNNING;
+ pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
+ }
+
+ /* Configuration Changing Per Transfer */
+ if (set_up_next_transfer(pl022, transfer)) {
+ /* Error path */
+ message->state = STATE_ERROR;
+ break;
+ }
+ /* Flush FIFOs and enable SSP */
+ flush(pl022);
+ writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
+ SSP_CR1(pl022->virtbase));
+
+ dev_dbg(&pl022->adev->dev, "POLLING TRANSFER ONGOING ... \n");
+ /* FIXME: insert a timeout so we don't hang here indefinately */
+ while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end)
+ readwriter(pl022);
+
+ /* Update total byte transfered */
+ message->actual_length += pl022->cur_transfer->len;
+ if (pl022->cur_transfer->cs_change)
+ pl022->cur_chip->cs_control(SSP_CHIP_DESELECT);
+ /* Move to next transfer */
+ message->state = next_transfer(pl022);
+ }
+
+ /* Handle end of message */
+ if (message->state == STATE_DONE)
+ message->status = 0;
+ else
+ message->status = -EIO;
+
+ giveback(pl022);
+ return;
+}
+
+/**
+ * pump_messages - Workqueue function which processes spi message queue
+ * @data: pointer to private data of SSP driver
+ *
+ * This function checks if there is any spi message in the queue that
+ * needs processing and delegate control to appropriate function
+ * do_polling_transfer()/do_interrupt_transfer()/do_dma_transfer()
+ * based on the kind of the transfer
+ *
+ */
+static void pump_messages(struct work_struct *work)
+{
+ struct pl022 *pl022 =
+ container_of(work, struct pl022, pump_messages);
+ unsigned long flags;
+
+ /* Lock queue and check for queue work */
+ spin_lock_irqsave(&pl022->queue_lock, flags);
+ if (list_empty(&pl022->queue) || pl022->run == QUEUE_STOPPED) {
+ pl022->busy = 0;
+ spin_unlock_irqrestore(&pl022->queue_lock, flags);
+ return;
+ }
+ /* Make sure we are not already running a message */
+ if (pl022->cur_msg) {
+ spin_unlock_irqrestore(&pl022->queue_lock, flags);
+ return;
+ }
+ /* Extract head of queue */
+ pl022->cur_msg =
+ list_entry(pl022->queue.next, struct spi_message, queue);
+
+ list_del_init(&pl022->cur_msg->queue);
+ pl022->busy = 1;
+ spin_unlock_irqrestore(&pl022->queue_lock, flags);
+
+ /* Initial message state */
+ pl022->cur_msg->state = STATE_START;
+ pl022->cur_transfer = list_entry(pl022->cur_msg->transfers.next,
+ struct spi_transfer,
+ transfer_list);
+
+ /* Setup the SPI using the per chip configuration */
+ pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi);
+ /*
+ * We enable the clock here, then the clock will be disabled when
+ * giveback() is called in each method (poll/interrupt/DMA)
+ */
+ clk_enable(pl022->clk);
+ restore_state(pl022);
+ flush(pl022);
+
+ if (pl022->cur_chip->xfer_type == POLLING_TRANSFER)
+ do_polling_transfer(pl022);
+ else if (pl022->cur_chip->xfer_type == INTERRUPT_TRANSFER)
+ do_interrupt_transfer(pl022);
+ else
+ do_dma_transfer(pl022);
+}
+
+
+static int __init init_queue(struct pl022 *pl022)
+{
+ INIT_LIST_HEAD(&pl022->queue);
+ spin_lock_init(&pl022->queue_lock);
+
+ pl022->run = QUEUE_STOPPED;
+ pl022->busy = 0;
+
+ tasklet_init(&pl022->pump_transfers,
+ pump_transfers, (unsigned long)pl022);
+
+ INIT_WORK(&pl022->pump_messages, pump_messages);
+ pl022->workqueue = create_singlethread_workqueue(
+ dev_name(pl022->master->dev.parent));
+ if (pl022->workqueue == NULL)
+ return -EBUSY;
+
+ return 0;
+}
+
+
+static int start_queue(struct pl022 *pl022)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pl022->queue_lock, flags);
+
+ if (pl022->run == QUEUE_RUNNING || pl022->busy) {
+ spin_unlock_irqrestore(&pl022->queue_lock, flags);
+ return -EBUSY;
+ }
+
+ pl022->run = QUEUE_RUNNING;
+ pl022->cur_msg = NULL;
+ pl022->cur_transfer = NULL;
+ pl022->cur_chip = NULL;
+ spin_unlock_irqrestore(&pl022->queue_lock, flags);
+
+ queue_work(pl022->workqueue, &pl022->pump_messages);
+
+ return 0;
+}
+
+
+static int stop_queue(struct pl022 *pl022)
+{
+ unsigned long flags;
+ unsigned limit = 500;
+ int status = 0;
+
+ spin_lock_irqsave(&pl022->queue_lock, flags);
+
+ /* This is a bit lame, but is optimized for the common execution path.
+ * A wait_queue on the pl022->busy could be used, but then the common
+ * execution path (pump_messages) would be required to call wake_up or
+ * friends on every SPI message. Do this instead */
+ pl022->run = QUEUE_STOPPED;
+ while (!list_empty(&pl022->queue) && pl022->busy && limit--) {
+ spin_unlock_irqrestore(&pl022->queue_lock, flags);
+ msleep(10);
+ spin_lock_irqsave(&pl022->queue_lock, flags);
+ }
+
+ if (!list_empty(&pl022->queue) || pl022->busy)
+ status = -EBUSY;
+
+ spin_unlock_irqrestore(&pl022->queue_lock, flags);
+
+ return status;
+}
+
+static int destroy_queue(struct pl022 *pl022)
+{
+ int status;
+
+ status = stop_queue(pl022);
+ /* we are unloading the module or failing to load (only two calls
+ * to this routine), and neither call can handle a return value.
+ * However, destroy_workqueue calls flush_workqueue, and that will
+ * block until all work is done. If the reason that stop_queue
+ * timed out is that the work will never finish, then it does no
+ * good to call destroy_workqueue, so return anyway. */
+ if (status != 0)
+ return status;
+
+ destroy_workqueue(pl022->workqueue);
+
+ return 0;
+}
+
+static int verify_controller_parameters(struct pl022 *pl022,
+ struct pl022_config_chip *chip_info)
+{
+ if ((chip_info->lbm != LOOPBACK_ENABLED)
+ && (chip_info->lbm != LOOPBACK_DISABLED)) {
+ dev_err(chip_info->dev,
+ "loopback Mode is configured incorrectly\n");
+ return -EINVAL;
+ }
+ if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI)
+ || (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) {
+ dev_err(chip_info->dev,
+ "interface is configured incorrectly\n");
+ return -EINVAL;
+ }
+ if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) &&
+ (!pl022->vendor->unidir)) {
+ dev_err(chip_info->dev,
+ "unidirectional mode not supported in this "
+ "hardware version\n");
+ return -EINVAL;
+ }
+ if ((chip_info->hierarchy != SSP_MASTER)
+ && (chip_info->hierarchy != SSP_SLAVE)) {
+ dev_err(chip_info->dev,
+ "hierarchy is configured incorrectly\n");
+ return -EINVAL;
+ }
+ if (((chip_info->clk_freq).cpsdvsr < CPSDVR_MIN)
+ || ((chip_info->clk_freq).cpsdvsr > CPSDVR_MAX)) {
+ dev_err(chip_info->dev,
+ "cpsdvsr is configured incorrectly\n");
+ return -EINVAL;
+ }
+ if ((chip_info->endian_rx != SSP_RX_MSB)
+ && (chip_info->endian_rx != SSP_RX_LSB)) {
+ dev_err(chip_info->dev,
+ "RX FIFO endianess is configured incorrectly\n");
+ return -EINVAL;
+ }
+ if ((chip_info->endian_tx != SSP_TX_MSB)
+ && (chip_info->endian_tx != SSP_TX_LSB)) {
+ dev_err(chip_info->dev,
+ "TX FIFO endianess is configured incorrectly\n");
+ return -EINVAL;
+ }
+ if ((chip_info->data_size < SSP_DATA_BITS_4)
+ || (chip_info->data_size > SSP_DATA_BITS_32)) {
+ dev_err(chip_info->dev,
+ "DATA Size is configured incorrectly\n");
+ return -EINVAL;
+ }
+ if ((chip_info->com_mode != INTERRUPT_TRANSFER)
+ && (chip_info->com_mode != DMA_TRANSFER)
+ && (chip_info->com_mode != POLLING_TRANSFER)) {
+ dev_err(chip_info->dev,
+ "Communication mode is configured incorrectly\n");
+ return -EINVAL;
+ }
+ if ((chip_info->rx_lev_trig < SSP_RX_1_OR_MORE_ELEM)
+ || (chip_info->rx_lev_trig > SSP_RX_32_OR_MORE_ELEM)) {
+ dev_err(chip_info->dev,
+ "RX FIFO Trigger Level is configured incorrectly\n");
+ return -EINVAL;
+ }
+ if ((chip_info->tx_lev_trig < SSP_TX_1_OR_MORE_EMPTY_LOC)
+ || (chip_info->tx_lev_trig > SSP_TX_32_OR_MORE_EMPTY_LOC)) {
+ dev_err(chip_info->dev,
+ "TX FIFO Trigger Level is configured incorrectly\n");
+ return -EINVAL;
+ }
+ if (chip_info->iface == SSP_INTERFACE_MOTOROLA_SPI) {
+ if ((chip_info->clk_phase != SSP_CLK_RISING_EDGE)
+ && (chip_info->clk_phase != SSP_CLK_FALLING_EDGE)) {
+ dev_err(chip_info->dev,
+ "Clock Phase is configured incorrectly\n");
+ return -EINVAL;
+ }
+ if ((chip_info->clk_pol != SSP_CLK_POL_IDLE_LOW)
+ && (chip_info->clk_pol != SSP_CLK_POL_IDLE_HIGH)) {
+ dev_err(chip_info->dev,
+ "Clock Polarity is configured incorrectly\n");
+ return -EINVAL;
+ }
+ }
+ if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) {
+ if ((chip_info->ctrl_len < SSP_BITS_4)
+ || (chip_info->ctrl_len > SSP_BITS_32)) {
+ dev_err(chip_info->dev,
+ "CTRL LEN is configured incorrectly\n");
+ return -EINVAL;
+ }
+ if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO)
+ && (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) {
+ dev_err(chip_info->dev,
+ "Wait State is configured incorrectly\n");
+ return -EINVAL;
+ }
+ if ((chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
+ && (chip_info->duplex !=
+ SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) {
+ dev_err(chip_info->dev,
+ "DUPLEX is configured incorrectly\n");
+ return -EINVAL;
+ }
+ }
+ if (chip_info->cs_control == NULL) {
+ dev_warn(chip_info->dev,
+ "Chip Select Function is NULL for this chip\n");
+ chip_info->cs_control = null_cs_control;
+ }
+ return 0;
+}
+
+/**
+ * pl022_transfer - transfer function registered to SPI master framework
+ * @spi: spi device which is requesting transfer
+ * @msg: spi message which is to handled is queued to driver queue
+ *
+ * This function is registered to the SPI framework for this SPI master
+ * controller. It will queue the spi_message in the queue of driver if
+ * the queue is not stopped and return.
+ */
+static int pl022_transfer(struct spi_device *spi, struct spi_message *msg)
+{
+ struct pl022 *pl022 = spi_master_get_devdata(spi->master);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pl022->queue_lock, flags);
+
+ if (pl022->run == QUEUE_STOPPED) {
+ spin_unlock_irqrestore(&pl022->queue_lock, flags);
+ return -ESHUTDOWN;
+ }
+ msg->actual_length = 0;
+ msg->status = -EINPROGRESS;
+ msg->state = STATE_START;
+
+ list_add_tail(&msg->queue, &pl022->queue);
+ if (pl022->run == QUEUE_RUNNING && !pl022->busy)
+ queue_work(pl022->workqueue, &pl022->pump_messages);
+
+ spin_unlock_irqrestore(&pl022->queue_lock, flags);
+ return 0;
+}
+
+static int calculate_effective_freq(struct pl022 *pl022,
+ int freq,
+ struct ssp_clock_params *clk_freq)
+{
+ /* Lets calculate the frequency parameters */
+ u16 cpsdvsr = 2;
+ u16 scr = 0;
+ bool freq_found = false;
+ u32 rate;
+ u32 max_tclk;
+ u32 min_tclk;
+
+ rate = clk_get_rate(pl022->clk);
+ /* cpsdvscr = 2 & scr 0 */
+ max_tclk = (rate / (CPSDVR_MIN * (1 + SCR_MIN)));
+ /* cpsdvsr = 254 & scr = 255 */
+ min_tclk = (rate / (CPSDVR_MAX * (1 + SCR_MAX)));
+
+ if ((freq <= max_tclk) && (freq >= min_tclk)) {
+ while (cpsdvsr <= CPSDVR_MAX && !freq_found) {
+ while (scr <= SCR_MAX && !freq_found) {
+ if ((rate /
+ (cpsdvsr * (1 + scr))) > freq)
+ scr += 1;
+ else {
+ /*
+ * This bool is made true when
+ * effective frequency >=
+ * target frequency is found
+ */
+ freq_found = true;
+ if ((rate /
+ (cpsdvsr * (1 + scr))) != freq) {
+ if (scr == SCR_MIN) {
+ cpsdvsr -= 2;
+ scr = SCR_MAX;
+ } else
+ scr -= 1;
+ }
+ }
+ }
+ if (!freq_found) {
+ cpsdvsr += 2;
+ scr = SCR_MIN;
+ }
+ }
+ if (cpsdvsr != 0) {
+ dev_dbg(&pl022->adev->dev,
+ "SSP Effective Frequency is %u\n",
+ (rate / (cpsdvsr * (1 + scr))));
+ clk_freq->cpsdvsr = (u8) (cpsdvsr & 0xFF);
+ clk_freq->scr = (u8) (scr & 0xFF);
+ dev_dbg(&pl022->adev->dev,
+ "SSP cpsdvsr = %d, scr = %d\n",
+ clk_freq->cpsdvsr, clk_freq->scr);
+ }
+ } else {
+ dev_err(&pl022->adev->dev,
+ "controller data is incorrect: out of range frequency");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * NOT IMPLEMENTED
+ * process_dma_info - Processes the DMA info provided by client drivers
+ * @chip_info: chip info provided by client device
+ * @chip: Runtime state maintained by the SSP controller for each spi device
+ *
+ * This function processes and stores DMA config provided by client driver
+ * into the runtime state maintained by the SSP controller driver
+ */
+static int process_dma_info(struct pl022_config_chip *chip_info,
+ struct chip_data *chip)
+{
+ dev_err(chip_info->dev,
+ "cannot process DMA info, DMA not implemented!\n");
+ return -ENOTSUPP;
+}
+
+/**
+ * pl022_setup - setup function registered to SPI master framework
+ * @spi: spi device which is requesting setup
+ *
+ * This function is registered to the SPI framework for this SPI master
+ * controller. If it is the first time when setup is called by this device,
+ * this function will initialize the runtime state for this chip and save
+ * the same in the device structure. Else it will update the runtime info
+ * with the updated chip info. Nothing is really being written to the
+ * controller hardware here, that is not done until the actual transfer
+ * commence.
+ */
+
+/* FIXME: JUST GUESSING the spi->mode bits understood by this driver */
+#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
+ | SPI_LSB_FIRST | SPI_LOOP)
+
+static int pl022_setup(struct spi_device *spi)
+{
+ struct pl022_config_chip *chip_info;
+ struct chip_data *chip;
+ int status = 0;
+ struct pl022 *pl022 = spi_master_get_devdata(spi->master);
+
+ if (spi->mode & ~MODEBITS) {
+ dev_dbg(&spi->dev, "unsupported mode bits %x\n",
+ spi->mode & ~MODEBITS);
+ return -EINVAL;
+ }
+
+ if (!spi->max_speed_hz)
+ return -EINVAL;
+
+ /* Get controller_state if one is supplied */
+ chip = spi_get_ctldata(spi);
+
+ if (chip == NULL) {
+ chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
+ if (!chip) {
+ dev_err(&spi->dev,
+ "cannot allocate controller state\n");
+ return -ENOMEM;
+ }
+ dev_dbg(&spi->dev,
+ "allocated memory for controller's runtime state\n");
+ }
+
+ /* Get controller data if one is supplied */
+ chip_info = spi->controller_data;
+
+ if (chip_info == NULL) {
+ /* spi_board_info.controller_data not is supplied */
+ dev_dbg(&spi->dev,
+ "using default controller_data settings\n");
+
+ chip_info =
+ kzalloc(sizeof(struct pl022_config_chip), GFP_KERNEL);
+
+ if (!chip_info) {
+ dev_err(&spi->dev,
+ "cannot allocate controller data\n");
+ status = -ENOMEM;
+ goto err_first_setup;
+ }
+
+ dev_dbg(&spi->dev, "allocated memory for controller data\n");
+
+ /* Pointer back to the SPI device */
+ chip_info->dev = &spi->dev;
+ /*
+ * Set controller data default values:
+ * Polling is supported by default
+ */
+ chip_info->lbm = LOOPBACK_DISABLED;
+ chip_info->com_mode = POLLING_TRANSFER;
+ chip_info->iface = SSP_INTERFACE_MOTOROLA_SPI;
+ chip_info->hierarchy = SSP_SLAVE;
+ chip_info->slave_tx_disable = DO_NOT_DRIVE_TX;
+ chip_info->endian_tx = SSP_TX_LSB;
+ chip_info->endian_rx = SSP_RX_LSB;
+ chip_info->data_size = SSP_DATA_BITS_12;
+ chip_info->rx_lev_trig = SSP_RX_1_OR_MORE_ELEM;
+ chip_info->tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC;
+ chip_info->clk_phase = SSP_CLK_FALLING_EDGE;
+ chip_info->clk_pol = SSP_CLK_POL_IDLE_LOW;
+ chip_info->ctrl_len = SSP_BITS_8;
+ chip_info->wait_state = SSP_MWIRE_WAIT_ZERO;
+ chip_info->duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX;
+ chip_info->cs_control = null_cs_control;
+ } else {
+ dev_dbg(&spi->dev,
+ "using user supplied controller_data settings\n");
+ }
+
+ /*
+ * We can override with custom divisors, else we use the board
+ * frequency setting
+ */
+ if ((0 == chip_info->clk_freq.cpsdvsr)
+ && (0 == chip_info->clk_freq.scr)) {
+ status = calculate_effective_freq(pl022,
+ spi->max_speed_hz,
+ &chip_info->clk_freq);
+ if (status < 0)
+ goto err_config_params;
+ } else {
+ if ((chip_info->clk_freq.cpsdvsr % 2) != 0)
+ chip_info->clk_freq.cpsdvsr =
+ chip_info->clk_freq.cpsdvsr - 1;
+ }
+ status = verify_controller_parameters(pl022, chip_info);
+ if (status) {
+ dev_err(&spi->dev, "controller data is incorrect");
+ goto err_config_params;
+ }
+ /* Now set controller state based on controller data */
+ chip->xfer_type = chip_info->com_mode;
+ chip->cs_control = chip_info->cs_control;
+
+ if (chip_info->data_size <= 8) {
+ dev_dbg(&spi->dev, "1 <= n <=8 bits per word\n");
+ chip->n_bytes = 1;
+ chip->read = READING_U8;
+ chip->write = WRITING_U8;
+ } else if (chip_info->data_size <= 16) {
+ dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n");
+ chip->n_bytes = 2;
+ chip->read = READING_U16;
+ chip->write = WRITING_U16;
+ } else {
+ if (pl022->vendor->max_bpw >= 32) {
+ dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n");
+ chip->n_bytes = 4;
+ chip->read = READING_U32;
+ chip->write = WRITING_U32;
+ } else {
+ dev_err(&spi->dev,
+ "illegal data size for this controller!\n");
+ dev_err(&spi->dev,
+ "a standard pl022 can only handle "
+ "1 <= n <= 16 bit words\n");
+ goto err_config_params;
+ }
+ }
+
+ /* Now Initialize all register settings required for this chip */
+ chip->cr0 = 0;
+ chip->cr1 = 0;
+ chip->dmacr = 0;
+ chip->cpsr = 0;
+ if ((chip_info->com_mode == DMA_TRANSFER)
+ && ((pl022->master_info)->enable_dma)) {
+ chip->enable_dma = 1;
+ dev_dbg(&spi->dev, "DMA mode set in controller state\n");
+ status = process_dma_info(chip_info, chip);
+ if (status < 0)
+ goto err_config_params;
+ SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
+ SSP_DMACR_MASK_RXDMAE, 0);
+ SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
+ SSP_DMACR_MASK_TXDMAE, 1);
+ } else {
+ chip->enable_dma = 0;
+ dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n");
+ SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
+ SSP_DMACR_MASK_RXDMAE, 0);
+ SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
+ SSP_DMACR_MASK_TXDMAE, 1);
+ }
+
+ chip->cpsr = chip_info->clk_freq.cpsdvsr;
+
+ SSP_WRITE_BITS(chip->cr0, chip_info->data_size, SSP_CR0_MASK_DSS, 0);
+ SSP_WRITE_BITS(chip->cr0, chip_info->duplex, SSP_CR0_MASK_HALFDUP, 5);
+ SSP_WRITE_BITS(chip->cr0, chip_info->clk_pol, SSP_CR0_MASK_SPO, 6);
+ SSP_WRITE_BITS(chip->cr0, chip_info->clk_phase, SSP_CR0_MASK_SPH, 7);
+ SSP_WRITE_BITS(chip->cr0, chip_info->clk_freq.scr, SSP_CR0_MASK_SCR, 8);
+ SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len, SSP_CR0_MASK_CSS, 16);
+ SSP_WRITE_BITS(chip->cr0, chip_info->iface, SSP_CR0_MASK_FRF, 21);
+ SSP_WRITE_BITS(chip->cr1, chip_info->lbm, SSP_CR1_MASK_LBM, 0);
+ SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1);
+ SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2);
+ SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD, 3);
+ SSP_WRITE_BITS(chip->cr1, chip_info->endian_rx, SSP_CR1_MASK_RENDN, 4);
+ SSP_WRITE_BITS(chip->cr1, chip_info->endian_tx, SSP_CR1_MASK_TENDN, 5);
+ SSP_WRITE_BITS(chip->cr1, chip_info->wait_state, SSP_CR1_MASK_MWAIT, 6);
+ SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig, SSP_CR1_MASK_RXIFLSEL, 7);
+ SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig, SSP_CR1_MASK_TXIFLSEL, 10);
+
+ /* Save controller_state */
+ spi_set_ctldata(spi, chip);
+ return status;
+ err_config_params:
+ err_first_setup:
+ kfree(chip);
+ return status;
+}
+
+/**
+ * pl022_cleanup - cleanup function registered to SPI master framework
+ * @spi: spi device which is requesting cleanup
+ *
+ * This function is registered to the SPI framework for this SPI master
+ * controller. It will free the runtime state of chip.
+ */
+static void pl022_cleanup(struct spi_device *spi)
+{
+ struct chip_data *chip = spi_get_ctldata(spi);
+
+ spi_set_ctldata(spi, NULL);
+ kfree(chip);
+}
+
+
+static int __init
+pl022_probe(struct amba_device *adev, struct amba_id *id)
+{
+ struct device *dev = &adev->dev;
+ struct pl022_ssp_controller *platform_info = adev->dev.platform_data;
+ struct spi_master *master;
+ struct pl022 *pl022 = NULL; /*Data for this driver */
+ int status = 0;
+
+ dev_info(&adev->dev,
+ "ARM PL022 driver, device ID: 0x%08x\n", adev->periphid);
+ if (platform_info == NULL) {
+ dev_err(&adev->dev, "probe - no platform data supplied\n");
+ status = -ENODEV;
+ goto err_no_pdata;
+ }
+
+ /* Allocate master with space for data */
+ master = spi_alloc_master(dev, sizeof(struct pl022));
+ if (master == NULL) {
+ dev_err(&adev->dev, "probe - cannot alloc SPI master\n");
+ status = -ENOMEM;
+ goto err_no_master;
+ }
+
+ pl022 = spi_master_get_devdata(master);
+ pl022->master = master;
+ pl022->master_info = platform_info;
+ pl022->adev = adev;
+ pl022->vendor = id->data;
+
+ /*
+ * Bus Number Which has been Assigned to this SSP controller
+ * on this board
+ */
+ master->bus_num = platform_info->bus_id;
+ master->num_chipselect = platform_info->num_chipselect;
+ master->cleanup = pl022_cleanup;
+ master->setup = pl022_setup;
+ master->transfer = pl022_transfer;
+
+ dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num);
+
+ status = amba_request_regions(adev, NULL);
+ if (status)
+ goto err_no_ioregion;
+
+ pl022->virtbase = ioremap(adev->res.start, resource_size(&adev->res));
+ if (pl022->virtbase == NULL) {
+ status = -ENOMEM;
+ goto err_no_ioremap;
+ }
+ printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n",
+ adev->res.start, pl022->virtbase);
+
+ pl022->clk = clk_get(&adev->dev, NULL);
+ if (IS_ERR(pl022->clk)) {
+ status = PTR_ERR(pl022->clk);
+ dev_err(&adev->dev, "could not retrieve SSP/SPI bus clock\n");
+ goto err_no_clk;
+ }
+
+ /* Disable SSP */
+ clk_enable(pl022->clk);
+ writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
+ SSP_CR1(pl022->virtbase));
+ load_ssp_default_config(pl022);
+ clk_disable(pl022->clk);
+
+ status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022",
+ pl022);
+ if (status < 0) {
+ dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status);
+ goto err_no_irq;
+ }
+ /* Initialize and start queue */
+ status = init_queue(pl022);
+ if (status != 0) {
+ dev_err(&adev->dev, "probe - problem initializing queue\n");
+ goto err_init_queue;
+ }
+ status = start_queue(pl022);
+ if (status != 0) {
+ dev_err(&adev->dev, "probe - problem starting queue\n");
+ goto err_start_queue;
+ }
+ /* Register with the SPI framework */
+ amba_set_drvdata(adev, pl022);
+ status = spi_register_master(master);
+ if (status != 0) {
+ dev_err(&adev->dev,
+ "probe - problem registering spi master\n");
+ goto err_spi_register;
+ }
+ dev_dbg(dev, "probe succeded\n");
+ return 0;
+
+ err_spi_register:
+ err_start_queue:
+ err_init_queue:
+ destroy_queue(pl022);
+ free_irq(adev->irq[0], pl022);
+ err_no_irq:
+ clk_put(pl022->clk);
+ err_no_clk:
+ iounmap(pl022->virtbase);
+ err_no_ioremap:
+ amba_release_regions(adev);
+ err_no_ioregion:
+ spi_master_put(master);
+ err_no_master:
+ err_no_pdata:
+ return status;
+}
+
+static int __exit
+pl022_remove(struct amba_device *adev)
+{
+ struct pl022 *pl022 = amba_get_drvdata(adev);
+ int status = 0;
+ if (!pl022)
+ return 0;
+
+ /* Remove the queue */
+ status = destroy_queue(pl022);
+ if (status != 0) {
+ dev_err(&adev->dev,
+ "queue remove failed (%d)\n", status);
+ return status;
+ }
+ load_ssp_default_config(pl022);
+ free_irq(adev->irq[0], pl022);
+ clk_disable(pl022->clk);
+ clk_put(pl022->clk);
+ iounmap(pl022->virtbase);
+ amba_release_regions(adev);
+ tasklet_disable(&pl022->pump_transfers);
+ spi_unregister_master(pl022->master);
+ spi_master_put(pl022->master);
+ amba_set_drvdata(adev, NULL);
+ dev_dbg(&adev->dev, "remove succeded\n");
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int pl022_suspend(struct amba_device *adev, pm_message_t state)
+{
+ struct pl022 *pl022 = amba_get_drvdata(adev);
+ int status = 0;
+
+ status = stop_queue(pl022);
+ if (status) {
+ dev_warn(&adev->dev, "suspend cannot stop queue\n");
+ return status;
+ }
+
+ clk_enable(pl022->clk);
+ load_ssp_default_config(pl022);
+ clk_disable(pl022->clk);
+ dev_dbg(&adev->dev, "suspended\n");
+ return 0;
+}
+
+static int pl022_resume(struct amba_device *adev)
+{
+ struct pl022 *pl022 = amba_get_drvdata(adev);
+ int status = 0;
+
+ /* Start the queue running */
+ status = start_queue(pl022);
+ if (status)
+ dev_err(&adev->dev, "problem starting queue (%d)\n", status);
+ else
+ dev_dbg(&adev->dev, "resumed\n");
+
+ return status;
+}
+#else
+#define pl022_suspend NULL
+#define pl022_resume NULL
+#endif /* CONFIG_PM */
+
+static struct vendor_data vendor_arm = {
+ .fifodepth = 8,
+ .max_bpw = 16,
+ .unidir = false,
+};
+
+
+static struct vendor_data vendor_st = {
+ .fifodepth = 32,
+ .max_bpw = 32,
+ .unidir = false,
+};
+
+static struct amba_id pl022_ids[] = {
+ {
+ /*
+ * ARM PL022 variant, this has a 16bit wide
+ * and 8 locations deep TX/RX FIFO
+ */
+ .id = 0x00041022,
+ .mask = 0x000fffff,
+ .data = &vendor_arm,
+ },
+ {
+ /*
+ * ST Micro derivative, this has 32bit wide
+ * and 32 locations deep TX/RX FIFO
+ */
+ .id = 0x00108022,
+ .mask = 0xffffffff,
+ .data = &vendor_st,
+ },
+ { 0, 0 },
+};
+
+static struct amba_driver pl022_driver = {
+ .drv = {
+ .name = "ssp-pl022",
+ },
+ .id_table = pl022_ids,
+ .probe = pl022_probe,
+ .remove = __exit_p(pl022_remove),
+ .suspend = pl022_suspend,
+ .resume = pl022_resume,
+};
+
+
+static int __init pl022_init(void)
+{
+ return amba_driver_register(&pl022_driver);
+}
+
+module_init(pl022_init);
+
+static void __exit pl022_exit(void)
+{
+ amba_driver_unregister(&pl022_driver);
+}
+
+module_exit(pl022_exit);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
+MODULE_DESCRIPTION("PL022 SSP Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c
index f014cc21e81..011c5bddba6 100644
--- a/drivers/spi/spi_bfin5xx.c
+++ b/drivers/spi/spi_bfin5xx.c
@@ -803,7 +803,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
drv_data->rx, drv_data->len_in_bytes);
/* invalidate caches, if needed */
- if (bfin_addr_dcachable((unsigned long) drv_data->rx))
+ if (bfin_addr_dcacheable((unsigned long) drv_data->rx))
invalidate_dcache_range((unsigned long) drv_data->rx,
(unsigned long) (drv_data->rx +
drv_data->len_in_bytes));
@@ -816,7 +816,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
dev_dbg(&drv_data->pdev->dev, "doing DMA out.\n");
/* flush caches, if needed */
- if (bfin_addr_dcachable((unsigned long) drv_data->tx))
+ if (bfin_addr_dcacheable((unsigned long) drv_data->tx))
flush_dcache_range((unsigned long) drv_data->tx,
(unsigned long) (drv_data->tx +
drv_data->len_in_bytes));
diff --git a/drivers/spi/spi_mpc83xx.c b/drivers/spi/spi_mpc83xx.c
index f4573a96af2..a32ccb44065 100644
--- a/drivers/spi/spi_mpc83xx.c
+++ b/drivers/spi/spi_mpc83xx.c
@@ -711,12 +711,12 @@ static int of_mpc83xx_spi_get_chipselects(struct device *dev)
return 0;
}
- pinfo->gpios = kmalloc(ngpios * sizeof(pinfo->gpios), GFP_KERNEL);
+ pinfo->gpios = kmalloc(ngpios * sizeof(*pinfo->gpios), GFP_KERNEL);
if (!pinfo->gpios)
return -ENOMEM;
- memset(pinfo->gpios, -1, ngpios * sizeof(pinfo->gpios));
+ memset(pinfo->gpios, -1, ngpios * sizeof(*pinfo->gpios));
- pinfo->alow_flags = kzalloc(ngpios * sizeof(pinfo->alow_flags),
+ pinfo->alow_flags = kzalloc(ngpios * sizeof(*pinfo->alow_flags),
GFP_KERNEL);
if (!pinfo->alow_flags) {
ret = -ENOMEM;
diff --git a/drivers/spi/spi_s3c24xx_gpio.c b/drivers/spi/spi_s3c24xx_gpio.c
index f2447a5476b..bbf9371cd28 100644
--- a/drivers/spi/spi_s3c24xx_gpio.c
+++ b/drivers/spi/spi_s3c24xx_gpio.c
@@ -17,6 +17,7 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/platform_device.h>
+#include <linux/gpio.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
diff --git a/drivers/ssb/embedded.c b/drivers/ssb/embedded.c
index 7dc3a6b4139..a0e0d246b59 100644
--- a/drivers/ssb/embedded.c
+++ b/drivers/ssb/embedded.c
@@ -29,6 +29,7 @@ int ssb_watchdog_timer_set(struct ssb_bus *bus, u32 ticks)
}
return -ENODEV;
}
+EXPORT_SYMBOL(ssb_watchdog_timer_set);
u32 ssb_gpio_in(struct ssb_bus *bus, u32 mask)
{
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 0dcf9ca0b0a..d0fcf36c2ab 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -115,5 +115,7 @@ source "drivers/staging/line6/Kconfig"
source "drivers/staging/serqt_usb/Kconfig"
+source "drivers/gpu/drm/radeon/Kconfig"
+
endif # !STAGING_EXCLUDE_BUILD
endif # STAGING
diff --git a/drivers/staging/agnx/pci.c b/drivers/staging/agnx/pci.c
index 25c0ffd2faa..43b3fe35261 100644
--- a/drivers/staging/agnx/pci.c
+++ b/drivers/staging/agnx/pci.c
@@ -303,14 +303,18 @@ static int agnx_config(struct ieee80211_hw *dev, u32 changed)
return 0;
}
-static int agnx_config_interface(struct ieee80211_hw *dev,
- struct ieee80211_vif *vif,
- struct ieee80211_if_conf *conf)
+static void agnx_bss_info_changed(struct ieee80211_hw *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *conf,
+ u32 changed)
{
struct agnx_priv *priv = dev->priv;
void __iomem *ctl = priv->ctl;
AGNX_TRACE;
+ if (!(changed & BSS_CHANGED_BSSID))
+ return;
+
spin_lock(&priv->lock);
if (memcmp(conf->bssid, priv->bssid, ETH_ALEN)) {
@@ -323,8 +327,7 @@ static int agnx_config_interface(struct ieee80211_hw *dev,
agnx_write32(ctl, AGNX_BM_MTSM, 0xff & ~0x1);
}
spin_unlock(&priv->lock);
- return 0;
-} /* agnx_config_interface */
+} /* agnx_bss_info_changed */
static void agnx_configure_filter(struct ieee80211_hw *dev,
@@ -422,7 +425,7 @@ static struct ieee80211_ops agnx_ops = {
.add_interface = agnx_add_interface,
.remove_interface = agnx_remove_interface,
.config = agnx_config,
- .config_interface = agnx_config_interface,
+ .bss_info_changed = agnx_bss_info_changed,
.configure_filter = agnx_configure_filter,
.get_stats = agnx_get_stats,
.get_tx_stats = agnx_get_tx_stats,
diff --git a/drivers/staging/at76_usb/at76_usb.c b/drivers/staging/at76_usb/at76_usb.c
index c8af9a868d6..3f303ae97b4 100644
--- a/drivers/staging/at76_usb/at76_usb.c
+++ b/drivers/staging/at76_usb/at76_usb.c
@@ -3242,12 +3242,11 @@ static int at76_tx(struct sk_buff *skb, struct net_device *netdev)
"%s: -EINVAL: tx urb %p hcpriv %p complete %p\n",
priv->netdev->name, priv->tx_urb,
priv->tx_urb->hcpriv, priv->tx_urb->complete);
- } else {
+ } else
stats->tx_bytes += skb->len;
- dev_kfree_skb(skb);
- }
- return ret;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
}
static void at76_tx_timeout(struct net_device *netdev)
diff --git a/drivers/staging/et131x/et131x_netdev.c b/drivers/staging/et131x/et131x_netdev.c
index 951c73d5db2..59e99cc7786 100644
--- a/drivers/staging/et131x/et131x_netdev.c
+++ b/drivers/staging/et131x/et131x_netdev.c
@@ -585,11 +585,11 @@ int et131x_tx(struct sk_buff *skb, struct net_device *netdev)
* available
*/
netif_stop_queue(netdev);
- status = 1;
+ status = NETDEV_TX_BUSY;
} else {
DBG_WARNING(et131x_dbginfo,
"Misc error; drop packet\n");
- status = 0;
+ status = NETDEV_TX_OK;
}
}
diff --git a/drivers/staging/go7007/go7007.txt b/drivers/staging/go7007/go7007.txt
index 9f6772bc68c..1c2907c1dc8 100644
--- a/drivers/staging/go7007/go7007.txt
+++ b/drivers/staging/go7007/go7007.txt
@@ -2,7 +2,7 @@ This is a driver for the WIS GO7007SB multi-format video encoder.
Pete Eberlein <pete@sensoray.com>
-The driver was orignally released under the GPL and is currently hosted at:
+The driver was originally released under the GPL and is currently hosted at:
http://nikosapi.org/wiki/index.php/WIS_Go7007_Linux_driver
The go7007 firmware can be acquired from the package on the site above.
@@ -24,7 +24,7 @@ These should be used instead of the non-standard GO7007 ioctls described
below.
-The README files from the orignal package appear below:
+The README files from the original package appears below:
---------------------------------------------------------------------------
WIS GO7007SB Public Linux Driver
diff --git a/drivers/staging/panel/lcd-panel-cgram.txt b/drivers/staging/panel/lcd-panel-cgram.txt
index f9ceef4322a..7f82c905763 100644
--- a/drivers/staging/panel/lcd-panel-cgram.txt
+++ b/drivers/staging/panel/lcd-panel-cgram.txt
@@ -3,7 +3,7 @@ characters 0 to 7. The escape code to define a new character is
'\e[LG' followed by one digit from 0 to 7, representing the character
number, and up to 8 couples of hex digits terminated by a semi-colon
(';'). Each couple of digits represents a line, with 1-bits for each
-illuminated pixel with LSB on the right. Lines are numberred from the
+illuminated pixel with LSB on the right. Lines are numbered from the
top of the character to the bottom. On a 5x7 matrix, only the 5 lower
bits of the 7 first bytes are used for each character. If the string
is incomplete, only complete lines will be redefined. Here are some
diff --git a/drivers/staging/rt2860/common/mlme.c b/drivers/staging/rt2860/common/mlme.c
index c00f9ab9c46..2edf2999f5c 100644
--- a/drivers/staging/rt2860/common/mlme.c
+++ b/drivers/staging/rt2860/common/mlme.c
@@ -5664,7 +5664,7 @@ VOID AsicUpdateProtect(
#if 0
MacReg |= (pAd->CommonCfg.RtsThreshold << 8);
#else
- // If the user want disable RtsThreshold and enbale Amsdu/Ralink-Aggregation, set the RtsThreshold as 4096
+ // If the user want disable RtsThreshold and enable Amsdu/Ralink-Aggregation, set the RtsThreshold as 4096
if ((
#ifdef DOT11_N_SUPPORT
(pAd->CommonCfg.BACapability.field.AmsduEnable) ||
diff --git a/drivers/staging/rt2870/common/mlme.c b/drivers/staging/rt2870/common/mlme.c
index 8a82cee8bf2..a26bc033337 100644
--- a/drivers/staging/rt2870/common/mlme.c
+++ b/drivers/staging/rt2870/common/mlme.c
@@ -5561,7 +5561,7 @@ VOID AsicUpdateProtect(
#if 0
MacReg |= (pAd->CommonCfg.RtsThreshold << 8);
#else
- // If the user want disable RtsThreshold and enbale Amsdu/Ralink-Aggregation, set the RtsThreshold as 4096
+ // If the user want disable RtsThreshold and enable Amsdu/Ralink-Aggregation, set the RtsThreshold as 4096
if ((
#ifdef DOT11_N_SUPPORT
(pAd->CommonCfg.BACapability.field.AmsduEnable) ||
diff --git a/drivers/staging/rt3070/common/mlme.c b/drivers/staging/rt3070/common/mlme.c
index 0ffbfa36699..0189bab013c 100644
--- a/drivers/staging/rt3070/common/mlme.c
+++ b/drivers/staging/rt3070/common/mlme.c
@@ -5575,7 +5575,7 @@ VOID AsicUpdateProtect(
RTMP_IO_READ32(pAd, TX_RTS_CFG, &MacReg);
MacReg &= 0xFF0000FF;
- // If the user want disable RtsThreshold and enbale Amsdu/Ralink-Aggregation, set the RtsThreshold as 4096
+ // If the user want disable RtsThreshold and enable Amsdu/Ralink-Aggregation, set the RtsThreshold as 4096
if ((
#ifdef DOT11_N_SUPPORT
(pAd->CommonCfg.BACapability.field.AmsduEnable) ||
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c
index 33a0687252a..1294e05fcf1 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c
@@ -814,7 +814,7 @@ int ieee80211_xmit(struct sk_buff *skb,
spin_unlock_irqrestore(&ieee->lock, flags);
netif_stop_queue(dev);
stats->tx_errors++;
- return 1;
+ return NETDEV_TX_BUSY;
}
diff --git a/drivers/staging/uc2322/aten2011.c b/drivers/staging/uc2322/aten2011.c
index 9c62f787cc9..39d0926d1a9 100644
--- a/drivers/staging/uc2322/aten2011.c
+++ b/drivers/staging/uc2322/aten2011.c
@@ -2336,7 +2336,7 @@ static int ATEN2011_startup(struct usb_serial *serial)
return 0;
}
-static void ATEN2011_shutdown(struct usb_serial *serial)
+static void ATEN2011_release(struct usb_serial *serial)
{
int i;
struct ATENINTL_port *ATEN2011_port;
@@ -2382,7 +2382,7 @@ static struct usb_serial_driver aten_serial_driver = {
.tiocmget = ATEN2011_tiocmget,
.tiocmset = ATEN2011_tiocmset,
.attach = ATEN2011_startup,
- .shutdown = ATEN2011_shutdown,
+ .release = ATEN2011_release,
.read_bulk_callback = ATEN2011_bulk_in_callback,
.read_int_callback = ATEN2011_interrupt_callback,
};
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 888198c9a10..824e65bdc43 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -2424,7 +2424,7 @@ int hfa384x_drvr_ramdl_write(hfa384x_t *hw, u32 daddr, void *buf, u32 len)
* 0 success
* >0 f/w reported error - f/w status code
* <0 driver reported error
-* -ETIMEOUT timout waiting for the cmd regs to become
+* -ETIMEDOUT timout waiting for the cmd regs to become
* available, or waiting for the control reg
* to indicate the Aux port is enabled.
* -ENODATA the buffer does NOT contain a valid PDA.
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index 393e4df70df..bc0d764d851 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -432,21 +432,21 @@ static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
/* success and more buf */
/* avail, re: hw_txdata */
netif_wake_queue(wlandev->netdev);
- result = 0;
+ result = NETDEV_TX_OK;
} else if (txresult == 1) {
/* success, no more avail */
pr_debug("txframe success, no more bufs\n");
/* netdev->tbusy = 1; don't set here, irqhdlr */
/* may have already cleared it */
- result = 0;
+ result = NETDEV_TX_OK;
} else if (txresult == 2) {
/* alloc failure, drop frame */
pr_debug("txframe returned alloc_fail\n");
- result = 1;
+ result = NETDEV_TX_BUSY;
} else {
/* buffer full or queue busy, drop frame. */
pr_debug("txframe returned full or busy\n");
- result = 1;
+ result = NETDEV_TX_BUSY;
}
failed:
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index d0b093b66ad..0a69672097a 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -417,7 +417,7 @@ static LIST_HEAD(thermal_hwmon_list);
static ssize_t
name_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct thermal_hwmon_device *hwmon = dev->driver_data;
+ struct thermal_hwmon_device *hwmon = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", hwmon->type);
}
static DEVICE_ATTR(name, 0444, name_show, NULL);
@@ -488,7 +488,7 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
result = PTR_ERR(hwmon->device);
goto free_mem;
}
- hwmon->device->driver_data = hwmon;
+ dev_set_drvdata(hwmon->device, hwmon);
result = device_create_file(hwmon->device, &dev_attr_name);
if (result)
goto unregister_hwmon_device;
@@ -961,7 +961,7 @@ void thermal_zone_device_update(struct thermal_zone_device *tz)
switch (trip_type) {
case THERMAL_TRIP_CRITICAL:
- if (temp > trip_temp) {
+ if (temp >= trip_temp) {
if (tz->ops->notify)
ret = tz->ops->notify(tz, count,
trip_type);
@@ -974,7 +974,7 @@ void thermal_zone_device_update(struct thermal_zone_device *tz)
}
break;
case THERMAL_TRIP_HOT:
- if (temp > trip_temp)
+ if (temp >= trip_temp)
if (tz->ops->notify)
tz->ops->notify(tz, count, trip_type);
break;
@@ -986,14 +986,14 @@ void thermal_zone_device_update(struct thermal_zone_device *tz)
cdev = instance->cdev;
- if (temp > trip_temp)
+ if (temp >= trip_temp)
cdev->ops->set_cur_state(cdev, 1);
else
cdev->ops->set_cur_state(cdev, 0);
}
break;
case THERMAL_TRIP_PASSIVE:
- if (temp > trip_temp || tz->passive)
+ if (temp >= trip_temp || tz->passive)
thermal_zone_device_passive(tz, temp,
trip_temp, count);
break;
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index c6c816b7ecb..dcd49f1e96d 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -22,6 +22,7 @@ config USB_ARCH_HAS_HCD
default y if PCMCIA && !M32R # sl811_cs
default y if ARM # SL-811
default y if SUPERH # r8a66597-hcd
+ default y if MICROBLAZE
default PCI
# many non-PCI SOC chips embed OHCI
@@ -63,6 +64,7 @@ config USB_ARCH_HAS_EHCI
config USB
tristate "Support for Host-side USB"
depends on USB_ARCH_HAS_HCD
+ select NLS # for UTF-8 strings
---help---
Universal Serial Bus (USB) is a specification for a serial bus
subsystem which offers higher speeds and more features than the
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 0716cdb44cd..19cb7d5480d 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -11,10 +11,10 @@ obj-$(CONFIG_USB_MON) += mon/
obj-$(CONFIG_PCI) += host/
obj-$(CONFIG_USB_EHCI_HCD) += host/
obj-$(CONFIG_USB_ISP116X_HCD) += host/
-obj-$(CONFIG_USB_ISP1760_HCD) += host/
obj-$(CONFIG_USB_OHCI_HCD) += host/
obj-$(CONFIG_USB_UHCI_HCD) += host/
obj-$(CONFIG_USB_FHCI_HCD) += host/
+obj-$(CONFIG_USB_XHCI_HCD) += host/
obj-$(CONFIG_USB_SL811_HCD) += host/
obj-$(CONFIG_USB_U132_HCD) += host/
obj-$(CONFIG_USB_R8A66597_HCD) += host/
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index 9cf9ff69e3e..d171b563e94 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -306,6 +306,7 @@ enum {
#define FW_GET_BYTE(p) *((__u8 *) (p))
#define FW_DIR "ueagle-atm/"
+#define UEA_FW_NAME_MAX 30
#define NB_MODEM 4
#define BULK_TIMEOUT 300
@@ -1564,9 +1565,9 @@ static void cmvs_file_name(struct uea_softc *sc, char *const cmv_name, int ver)
file = cmv_file[sc->modem_index];
strcpy(cmv_name, FW_DIR);
- strlcat(cmv_name, file, FIRMWARE_NAME_MAX);
+ strlcat(cmv_name, file, UEA_FW_NAME_MAX);
if (ver == 2)
- strlcat(cmv_name, ".v2", FIRMWARE_NAME_MAX);
+ strlcat(cmv_name, ".v2", UEA_FW_NAME_MAX);
}
static int request_cmvs_old(struct uea_softc *sc,
@@ -1574,7 +1575,7 @@ static int request_cmvs_old(struct uea_softc *sc,
{
int ret, size;
u8 *data;
- char cmv_name[FIRMWARE_NAME_MAX]; /* 30 bytes stack variable */
+ char cmv_name[UEA_FW_NAME_MAX]; /* 30 bytes stack variable */
cmvs_file_name(sc, cmv_name, 1);
ret = request_firmware(fw, cmv_name, &sc->usb_dev->dev);
@@ -1608,7 +1609,7 @@ static int request_cmvs(struct uea_softc *sc,
int ret, size;
u32 crc;
u8 *data;
- char cmv_name[FIRMWARE_NAME_MAX]; /* 30 bytes stack variable */
+ char cmv_name[UEA_FW_NAME_MAX]; /* 30 bytes stack variable */
cmvs_file_name(sc, cmv_name, 2);
ret = request_firmware(fw, cmv_name, &sc->usb_dev->dev);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 0a69c0977e3..38bfdb0f666 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -16,7 +16,8 @@
* v0.9 - thorough cleaning, URBification, almost a rewrite
* v0.10 - some more cleanups
* v0.11 - fixed flow control, read error doesn't stop reads
- * v0.12 - added TIOCM ioctls, added break handling, made struct acm kmalloced
+ * v0.12 - added TIOCM ioctls, added break handling, made struct acm
+ * kmalloced
* v0.13 - added termios, added hangup
* v0.14 - sized down struct acm
* v0.15 - fixed flow control again - characters could be lost
@@ -62,7 +63,7 @@
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/cdc.h>
#include <asm/byteorder.h>
@@ -87,7 +88,10 @@ static struct acm *acm_table[ACM_TTY_MINORS];
static DEFINE_MUTEX(open_mutex);
-#define ACM_READY(acm) (acm && acm->dev && acm->used)
+#define ACM_READY(acm) (acm && acm->dev && acm->port.count)
+
+static const struct tty_port_operations acm_port_ops = {
+};
#ifdef VERBOSE_DEBUG
#define verbose 1
@@ -99,13 +103,15 @@ static DEFINE_MUTEX(open_mutex);
* Functions for ACM control messages.
*/
-static int acm_ctrl_msg(struct acm *acm, int request, int value, void *buf, int len)
+static int acm_ctrl_msg(struct acm *acm, int request, int value,
+ void *buf, int len)
{
int retval = usb_control_msg(acm->dev, usb_sndctrlpipe(acm->dev, 0),
request, USB_RT_ACM, value,
acm->control->altsetting[0].desc.bInterfaceNumber,
buf, len, 5000);
- dbg("acm_control_msg: rq: 0x%02x val: %#x len: %#x result: %d", request, value, len, retval);
+ dbg("acm_control_msg: rq: 0x%02x val: %#x len: %#x result: %d",
+ request, value, len, retval);
return retval < 0 ? retval : 0;
}
@@ -150,9 +156,8 @@ static int acm_wb_is_avail(struct acm *acm)
n = ACM_NW;
spin_lock_irqsave(&acm->write_lock, flags);
- for (i = 0; i < ACM_NW; i++) {
+ for (i = 0; i < ACM_NW; i++)
n -= acm->wb[i].use;
- }
spin_unlock_irqrestore(&acm->write_lock, flags);
return n;
}
@@ -183,7 +188,8 @@ static int acm_start_wb(struct acm *acm, struct acm_wb *wb)
wb->urb->transfer_buffer_length = wb->len;
wb->urb->dev = acm->dev;
- if ((rc = usb_submit_urb(wb->urb, GFP_ATOMIC)) < 0) {
+ rc = usb_submit_urb(wb->urb, GFP_ATOMIC);
+ if (rc < 0) {
dbg("usb_submit_urb(write bulk) failed: %d", rc);
acm_write_done(acm, wb);
}
@@ -262,6 +268,7 @@ static void acm_ctrl_irq(struct urb *urb)
{
struct acm *acm = urb->context;
struct usb_cdc_notification *dr = urb->transfer_buffer;
+ struct tty_struct *tty;
unsigned char *data;
int newctrl;
int retval;
@@ -287,40 +294,45 @@ static void acm_ctrl_irq(struct urb *urb)
data = (unsigned char *)(dr + 1);
switch (dr->bNotificationType) {
+ case USB_CDC_NOTIFY_NETWORK_CONNECTION:
+ dbg("%s network", dr->wValue ?
+ "connected to" : "disconnected from");
+ break;
- case USB_CDC_NOTIFY_NETWORK_CONNECTION:
-
- dbg("%s network", dr->wValue ? "connected to" : "disconnected from");
- break;
-
- case USB_CDC_NOTIFY_SERIAL_STATE:
-
- newctrl = get_unaligned_le16(data);
+ case USB_CDC_NOTIFY_SERIAL_STATE:
+ tty = tty_port_tty_get(&acm->port);
+ newctrl = get_unaligned_le16(data);
- if (acm->tty && !acm->clocal && (acm->ctrlin & ~newctrl & ACM_CTRL_DCD)) {
+ if (tty) {
+ if (!acm->clocal &&
+ (acm->ctrlin & ~newctrl & ACM_CTRL_DCD)) {
dbg("calling hangup");
- tty_hangup(acm->tty);
+ tty_hangup(tty);
}
+ tty_kref_put(tty);
+ }
- acm->ctrlin = newctrl;
-
- dbg("input control lines: dcd%c dsr%c break%c ring%c framing%c parity%c overrun%c",
- acm->ctrlin & ACM_CTRL_DCD ? '+' : '-', acm->ctrlin & ACM_CTRL_DSR ? '+' : '-',
- acm->ctrlin & ACM_CTRL_BRK ? '+' : '-', acm->ctrlin & ACM_CTRL_RI ? '+' : '-',
- acm->ctrlin & ACM_CTRL_FRAMING ? '+' : '-', acm->ctrlin & ACM_CTRL_PARITY ? '+' : '-',
- acm->ctrlin & ACM_CTRL_OVERRUN ? '+' : '-');
+ acm->ctrlin = newctrl;
+ dbg("input control lines: dcd%c dsr%c break%c ring%c framing%c parity%c overrun%c",
+ acm->ctrlin & ACM_CTRL_DCD ? '+' : '-',
+ acm->ctrlin & ACM_CTRL_DSR ? '+' : '-',
+ acm->ctrlin & ACM_CTRL_BRK ? '+' : '-',
+ acm->ctrlin & ACM_CTRL_RI ? '+' : '-',
+ acm->ctrlin & ACM_CTRL_FRAMING ? '+' : '-',
+ acm->ctrlin & ACM_CTRL_PARITY ? '+' : '-',
+ acm->ctrlin & ACM_CTRL_OVERRUN ? '+' : '-');
break;
- default:
- dbg("unknown notification %d received: index %d len %d data0 %d data1 %d",
- dr->bNotificationType, dr->wIndex,
- dr->wLength, data[0], data[1]);
- break;
+ default:
+ dbg("unknown notification %d received: index %d len %d data0 %d data1 %d",
+ dr->bNotificationType, dr->wIndex,
+ dr->wLength, data[0], data[1]);
+ break;
}
exit:
usb_mark_last_busy(acm->dev);
- retval = usb_submit_urb (urb, GFP_ATOMIC);
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
dev_err(&urb->dev->dev, "%s - usb_submit_urb failed with "
"result %d", __func__, retval);
@@ -371,15 +383,14 @@ static void acm_rx_tasklet(unsigned long _acm)
{
struct acm *acm = (void *)_acm;
struct acm_rb *buf;
- struct tty_struct *tty = acm->tty;
+ struct tty_struct *tty;
struct acm_ru *rcv;
unsigned long flags;
unsigned char throttled;
dbg("Entering acm_rx_tasklet");
- if (!ACM_READY(acm))
- {
+ if (!ACM_READY(acm)) {
dbg("acm_rx_tasklet: ACM not ready");
return;
}
@@ -387,12 +398,13 @@ static void acm_rx_tasklet(unsigned long _acm)
spin_lock_irqsave(&acm->throttle_lock, flags);
throttled = acm->throttle;
spin_unlock_irqrestore(&acm->throttle_lock, flags);
- if (throttled)
- {
+ if (throttled) {
dbg("acm_rx_tasklet: throttled");
return;
}
+ tty = tty_port_tty_get(&acm->port);
+
next_buffer:
spin_lock_irqsave(&acm->read_lock, flags);
if (list_empty(&acm->filled_read_bufs)) {
@@ -406,20 +418,22 @@ next_buffer:
dbg("acm_rx_tasklet: procesing buf 0x%p, size = %d", buf, buf->size);
- tty_buffer_request_room(tty, buf->size);
- spin_lock_irqsave(&acm->throttle_lock, flags);
- throttled = acm->throttle;
- spin_unlock_irqrestore(&acm->throttle_lock, flags);
- if (!throttled)
- tty_insert_flip_string(tty, buf->base, buf->size);
- tty_flip_buffer_push(tty);
-
- if (throttled) {
- dbg("Throttling noticed");
- spin_lock_irqsave(&acm->read_lock, flags);
- list_add(&buf->list, &acm->filled_read_bufs);
- spin_unlock_irqrestore(&acm->read_lock, flags);
- return;
+ if (tty) {
+ spin_lock_irqsave(&acm->throttle_lock, flags);
+ throttled = acm->throttle;
+ spin_unlock_irqrestore(&acm->throttle_lock, flags);
+ if (!throttled) {
+ tty_buffer_request_room(tty, buf->size);
+ tty_insert_flip_string(tty, buf->base, buf->size);
+ tty_flip_buffer_push(tty);
+ } else {
+ tty_kref_put(tty);
+ dbg("Throttling noticed");
+ spin_lock_irqsave(&acm->read_lock, flags);
+ list_add(&buf->list, &acm->filled_read_bufs);
+ spin_unlock_irqrestore(&acm->read_lock, flags);
+ return;
+ }
}
spin_lock_irqsave(&acm->read_lock, flags);
@@ -428,6 +442,8 @@ next_buffer:
goto next_buffer;
urbs:
+ tty_kref_put(tty);
+
while (!list_empty(&acm->spare_read_bufs)) {
spin_lock_irqsave(&acm->read_lock, flags);
if (list_empty(&acm->spare_read_urbs)) {
@@ -454,10 +470,11 @@ urbs:
rcv->urb->transfer_dma = buf->dma;
rcv->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
- /* This shouldn't kill the driver as unsuccessful URBs are returned to the
- free-urbs-pool and resubmited ASAP */
+ /* This shouldn't kill the driver as unsuccessful URBs are
+ returned to the free-urbs-pool and resubmited ASAP */
spin_lock_irqsave(&acm->read_lock, flags);
- if (acm->susp_count || usb_submit_urb(rcv->urb, GFP_ATOMIC) < 0) {
+ if (acm->susp_count ||
+ usb_submit_urb(rcv->urb, GFP_ATOMIC) < 0) {
list_add(&buf->list, &acm->spare_read_bufs);
list_add(&rcv->list, &acm->spare_read_urbs);
acm->processing = 0;
@@ -499,11 +516,14 @@ static void acm_write_bulk(struct urb *urb)
static void acm_softint(struct work_struct *work)
{
struct acm *acm = container_of(work, struct acm, work);
+ struct tty_struct *tty;
dev_vdbg(&acm->data->dev, "tx work\n");
if (!ACM_READY(acm))
return;
- tty_wakeup(acm->tty);
+ tty = tty_port_tty_get(&acm->port);
+ tty_wakeup(tty);
+ tty_kref_put(tty);
}
static void acm_waker(struct work_struct *waker)
@@ -543,8 +563,9 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
rv = 0;
set_bit(TTY_NO_WRITE_SPLIT, &tty->flags);
+
tty->driver_data = acm;
- acm->tty = tty;
+ tty_port_tty_set(&acm->port, tty);
if (usb_autopm_get_interface(acm->control) < 0)
goto early_bail;
@@ -552,11 +573,10 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
acm->control->needs_remote_wakeup = 1;
mutex_lock(&acm->mutex);
- if (acm->used++) {
+ if (acm->port.count++) {
usb_autopm_put_interface(acm->control);
goto done;
- }
-
+ }
acm->ctrlurb->dev = acm->dev;
if (usb_submit_urb(acm->ctrlurb, GFP_KERNEL)) {
@@ -567,22 +587,22 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
if (0 > acm_set_control(acm, acm->ctrlout = ACM_CTRL_DTR | ACM_CTRL_RTS) &&
(acm->ctrl_caps & USB_CDC_CAP_LINE))
goto full_bailout;
+
usb_autopm_put_interface(acm->control);
INIT_LIST_HEAD(&acm->spare_read_urbs);
INIT_LIST_HEAD(&acm->spare_read_bufs);
INIT_LIST_HEAD(&acm->filled_read_bufs);
- for (i = 0; i < acm->rx_buflimit; i++) {
+
+ for (i = 0; i < acm->rx_buflimit; i++)
list_add(&(acm->ru[i].list), &acm->spare_read_urbs);
- }
- for (i = 0; i < acm->rx_buflimit; i++) {
+ for (i = 0; i < acm->rx_buflimit; i++)
list_add(&(acm->rb[i].list), &acm->spare_read_bufs);
- }
acm->throttle = 0;
tasklet_schedule(&acm->urb_task);
-
+ rv = tty_port_block_til_ready(&acm->port, tty, filp);
done:
mutex_unlock(&acm->mutex);
err_out:
@@ -593,16 +613,17 @@ full_bailout:
usb_kill_urb(acm->ctrlurb);
bail_out:
usb_autopm_put_interface(acm->control);
- acm->used--;
+ acm->port.count--;
mutex_unlock(&acm->mutex);
early_bail:
mutex_unlock(&open_mutex);
+ tty_port_tty_set(&acm->port, NULL);
return -EIO;
}
static void acm_tty_unregister(struct acm *acm)
{
- int i,nr;
+ int i, nr;
nr = acm->rx_buflimit;
tty_unregister_device(acm_tty_driver, acm->minor);
@@ -619,41 +640,56 @@ static void acm_tty_unregister(struct acm *acm)
static int acm_tty_chars_in_buffer(struct tty_struct *tty);
+static void acm_port_down(struct acm *acm, int drain)
+{
+ int i, nr = acm->rx_buflimit;
+ mutex_lock(&open_mutex);
+ if (acm->dev) {
+ usb_autopm_get_interface(acm->control);
+ acm_set_control(acm, acm->ctrlout = 0);
+ /* try letting the last writes drain naturally */
+ if (drain) {
+ wait_event_interruptible_timeout(acm->drain_wait,
+ (ACM_NW == acm_wb_is_avail(acm)) || !acm->dev,
+ ACM_CLOSE_TIMEOUT * HZ);
+ }
+ usb_kill_urb(acm->ctrlurb);
+ for (i = 0; i < ACM_NW; i++)
+ usb_kill_urb(acm->wb[i].urb);
+ for (i = 0; i < nr; i++)
+ usb_kill_urb(acm->ru[i].urb);
+ acm->control->needs_remote_wakeup = 0;
+ usb_autopm_put_interface(acm->control);
+ }
+ mutex_unlock(&open_mutex);
+}
+
+static void acm_tty_hangup(struct tty_struct *tty)
+{
+ struct acm *acm = tty->driver_data;
+ tty_port_hangup(&acm->port);
+ acm_port_down(acm, 0);
+}
+
static void acm_tty_close(struct tty_struct *tty, struct file *filp)
{
struct acm *acm = tty->driver_data;
- int i,nr;
- if (!acm || !acm->used)
+ /* Perform the closing process and see if we need to do the hardware
+ shutdown */
+ if (tty_port_close_start(&acm->port, tty, filp) == 0)
return;
-
- nr = acm->rx_buflimit;
+ acm_port_down(acm, 0);
+ tty_port_close_end(&acm->port, tty);
mutex_lock(&open_mutex);
- if (!--acm->used) {
- if (acm->dev) {
- usb_autopm_get_interface(acm->control);
- acm_set_control(acm, acm->ctrlout = 0);
-
- /* try letting the last writes drain naturally */
- wait_event_interruptible_timeout(acm->drain_wait,
- (ACM_NW == acm_wb_is_avail(acm))
- || !acm->dev,
- ACM_CLOSE_TIMEOUT * HZ);
-
- usb_kill_urb(acm->ctrlurb);
- for (i = 0; i < ACM_NW; i++)
- usb_kill_urb(acm->wb[i].urb);
- for (i = 0; i < nr; i++)
- usb_kill_urb(acm->ru[i].urb);
- acm->control->needs_remote_wakeup = 0;
- usb_autopm_put_interface(acm->control);
- } else
- acm_tty_unregister(acm);
- }
+ tty_port_tty_set(&acm->port, NULL);
+ if (!acm->dev)
+ acm_tty_unregister(acm);
mutex_unlock(&open_mutex);
}
-static int acm_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+static int acm_tty_write(struct tty_struct *tty,
+ const unsigned char *buf, int count)
{
struct acm *acm = tty->driver_data;
int stat;
@@ -669,7 +705,8 @@ static int acm_tty_write(struct tty_struct *tty, const unsigned char *buf, int c
return 0;
spin_lock_irqsave(&acm->write_lock, flags);
- if ((wbn = acm_wb_alloc(acm)) < 0) {
+ wbn = acm_wb_alloc(acm);
+ if (wbn < 0) {
spin_unlock_irqrestore(&acm->write_lock, flags);
return 0;
}
@@ -681,7 +718,8 @@ static int acm_tty_write(struct tty_struct *tty, const unsigned char *buf, int c
wb->len = count;
spin_unlock_irqrestore(&acm->write_lock, flags);
- if ((stat = acm_write_start(acm, wbn)) < 0)
+ stat = acm_write_start(acm, wbn);
+ if (stat < 0)
return stat;
return count;
}
@@ -767,8 +805,10 @@ static int acm_tty_tiocmset(struct tty_struct *tty, struct file *file,
return -EINVAL;
newctrl = acm->ctrlout;
- set = (set & TIOCM_DTR ? ACM_CTRL_DTR : 0) | (set & TIOCM_RTS ? ACM_CTRL_RTS : 0);
- clear = (clear & TIOCM_DTR ? ACM_CTRL_DTR : 0) | (clear & TIOCM_RTS ? ACM_CTRL_RTS : 0);
+ set = (set & TIOCM_DTR ? ACM_CTRL_DTR : 0) |
+ (set & TIOCM_RTS ? ACM_CTRL_RTS : 0);
+ clear = (clear & TIOCM_DTR ? ACM_CTRL_DTR : 0) |
+ (clear & TIOCM_RTS ? ACM_CTRL_RTS : 0);
newctrl = (newctrl & ~clear) | set;
@@ -777,7 +817,8 @@ static int acm_tty_tiocmset(struct tty_struct *tty, struct file *file,
return acm_set_control(acm, acm->ctrlout = newctrl);
}
-static int acm_tty_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg)
+static int acm_tty_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
{
struct acm *acm = tty->driver_data;
@@ -799,7 +840,8 @@ static const __u8 acm_tty_size[] = {
5, 6, 7, 8
};
-static void acm_tty_set_termios(struct tty_struct *tty, struct ktermios *termios_old)
+static void acm_tty_set_termios(struct tty_struct *tty,
+ struct ktermios *termios_old)
{
struct acm *acm = tty->driver_data;
struct ktermios *termios = tty->termios;
@@ -809,19 +851,23 @@ static void acm_tty_set_termios(struct tty_struct *tty, struct ktermios *termios
if (!ACM_READY(acm))
return;
+ /* FIXME: Needs to support the tty_baud interface */
+ /* FIXME: Broken on sparc */
newline.dwDTERate = cpu_to_le32p(acm_tty_speed +
(termios->c_cflag & CBAUD & ~CBAUDEX) + (termios->c_cflag & CBAUDEX ? 15 : 0));
newline.bCharFormat = termios->c_cflag & CSTOPB ? 2 : 0;
newline.bParityType = termios->c_cflag & PARENB ?
- (termios->c_cflag & PARODD ? 1 : 2) + (termios->c_cflag & CMSPAR ? 2 : 0) : 0;
+ (termios->c_cflag & PARODD ? 1 : 2) +
+ (termios->c_cflag & CMSPAR ? 2 : 0) : 0;
newline.bDataBits = acm_tty_size[(termios->c_cflag & CSIZE) >> 4];
-
+ /* FIXME: Needs to clear unsupported bits in the termios */
acm->clocal = ((termios->c_cflag & CLOCAL) != 0);
if (!newline.dwDTERate) {
newline.dwDTERate = acm->line.dwDTERate;
newctrl &= ~ACM_CTRL_DTR;
- } else newctrl |= ACM_CTRL_DTR;
+ } else
+ newctrl |= ACM_CTRL_DTR;
if (newctrl != acm->ctrlout)
acm_set_control(acm, acm->ctrlout = newctrl);
@@ -846,9 +892,8 @@ static void acm_write_buffers_free(struct acm *acm)
struct acm_wb *wb;
struct usb_device *usb_dev = interface_to_usbdev(acm->control);
- for (wb = &acm->wb[0], i = 0; i < ACM_NW; i++, wb++) {
+ for (wb = &acm->wb[0], i = 0; i < ACM_NW; i++, wb++)
usb_buffer_free(usb_dev, acm->writesize, wb->buf, wb->dmah);
- }
}
static void acm_read_buffers_free(struct acm *acm)
@@ -857,7 +902,8 @@ static void acm_read_buffers_free(struct acm *acm)
int i, n = acm->rx_buflimit;
for (i = 0; i < n; i++)
- usb_buffer_free(usb_dev, acm->readsize, acm->rb[i].base, acm->rb[i].dma);
+ usb_buffer_free(usb_dev, acm->readsize,
+ acm->rb[i].base, acm->rb[i].dma);
}
/* Little helper: write buffers allocate */
@@ -882,8 +928,8 @@ static int acm_write_buffers_alloc(struct acm *acm)
return 0;
}
-static int acm_probe (struct usb_interface *intf,
- const struct usb_device_id *id)
+static int acm_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
{
struct usb_cdc_union_desc *union_header = NULL;
struct usb_cdc_country_functional_desc *cfd = NULL;
@@ -891,13 +937,13 @@ static int acm_probe (struct usb_interface *intf,
int buflen = intf->altsetting->extralen;
struct usb_interface *control_interface;
struct usb_interface *data_interface;
- struct usb_endpoint_descriptor *epctrl;
- struct usb_endpoint_descriptor *epread;
- struct usb_endpoint_descriptor *epwrite;
+ struct usb_endpoint_descriptor *epctrl = NULL;
+ struct usb_endpoint_descriptor *epread = NULL;
+ struct usb_endpoint_descriptor *epwrite = NULL;
struct usb_device *usb_dev = interface_to_usbdev(intf);
struct acm *acm;
int minor;
- int ctrlsize,readsize;
+ int ctrlsize, readsize;
u8 *buf;
u8 ac_management_function = 0;
u8 call_management_function = 0;
@@ -906,6 +952,7 @@ static int acm_probe (struct usb_interface *intf,
unsigned long quirks;
int num_rx_buf;
int i;
+ int combined_interfaces = 0;
/* normal quirks */
quirks = (unsigned long)id->driver_info;
@@ -917,7 +964,7 @@ static int acm_probe (struct usb_interface *intf,
control_interface = usb_ifnum_to_if(usb_dev, 0);
goto skip_normal_probe;
}
-
+
/* normal probing*/
if (!buffer) {
dev_err(&intf->dev, "Weird descriptor references\n");
@@ -925,8 +972,10 @@ static int acm_probe (struct usb_interface *intf,
}
if (!buflen) {
- if (intf->cur_altsetting->endpoint->extralen && intf->cur_altsetting->endpoint->extra) {
- dev_dbg(&intf->dev,"Seeking extra descriptors on endpoint\n");
+ if (intf->cur_altsetting->endpoint->extralen &&
+ intf->cur_altsetting->endpoint->extra) {
+ dev_dbg(&intf->dev,
+ "Seeking extra descriptors on endpoint\n");
buflen = intf->cur_altsetting->endpoint->extralen;
buffer = intf->cur_altsetting->endpoint->extra;
} else {
@@ -937,47 +986,43 @@ static int acm_probe (struct usb_interface *intf,
}
while (buflen > 0) {
- if (buffer [1] != USB_DT_CS_INTERFACE) {
+ if (buffer[1] != USB_DT_CS_INTERFACE) {
dev_err(&intf->dev, "skipping garbage\n");
goto next_desc;
}
- switch (buffer [2]) {
- case USB_CDC_UNION_TYPE: /* we've found it */
- if (union_header) {
- dev_err(&intf->dev, "More than one "
- "union descriptor, "
- "skipping ...\n");
- goto next_desc;
- }
- union_header = (struct usb_cdc_union_desc *)
- buffer;
- break;
- case USB_CDC_COUNTRY_TYPE: /* export through sysfs*/
- cfd = (struct usb_cdc_country_functional_desc *)buffer;
- break;
- case USB_CDC_HEADER_TYPE: /* maybe check version */
- break; /* for now we ignore it */
- case USB_CDC_ACM_TYPE:
- ac_management_function = buffer[3];
- break;
- case USB_CDC_CALL_MANAGEMENT_TYPE:
- call_management_function = buffer[3];
- call_interface_num = buffer[4];
- if ((call_management_function & 3) != 3)
- dev_err(&intf->dev, "This device "
- "cannot do calls on its own. "
- "It is no modem.\n");
- break;
- default:
- /* there are LOTS more CDC descriptors that
- * could legitimately be found here.
- */
- dev_dbg(&intf->dev, "Ignoring descriptor: "
- "type %02x, length %d\n",
- buffer[2], buffer[0]);
- break;
+ switch (buffer[2]) {
+ case USB_CDC_UNION_TYPE: /* we've found it */
+ if (union_header) {
+ dev_err(&intf->dev, "More than one "
+ "union descriptor, skipping ...\n");
+ goto next_desc;
}
+ union_header = (struct usb_cdc_union_desc *)buffer;
+ break;
+ case USB_CDC_COUNTRY_TYPE: /* export through sysfs*/
+ cfd = (struct usb_cdc_country_functional_desc *)buffer;
+ break;
+ case USB_CDC_HEADER_TYPE: /* maybe check version */
+ break; /* for now we ignore it */
+ case USB_CDC_ACM_TYPE:
+ ac_management_function = buffer[3];
+ break;
+ case USB_CDC_CALL_MANAGEMENT_TYPE:
+ call_management_function = buffer[3];
+ call_interface_num = buffer[4];
+ if ((call_management_function & 3) != 3)
+ dev_err(&intf->dev, "This device cannot do calls on its own. It is not a modem.\n");
+ break;
+ default:
+ /* there are LOTS more CDC descriptors that
+ * could legitimately be found here.
+ */
+ dev_dbg(&intf->dev, "Ignoring descriptor: "
+ "type %02x, length %d\n",
+ buffer[2], buffer[0]);
+ break;
+ }
next_desc:
buflen -= buffer[0];
buffer += buffer[0];
@@ -985,33 +1030,72 @@ next_desc:
if (!union_header) {
if (call_interface_num > 0) {
- dev_dbg(&intf->dev,"No union descriptor, using call management descriptor\n");
+ dev_dbg(&intf->dev, "No union descriptor, using call management descriptor\n");
data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = call_interface_num));
control_interface = intf;
} else {
- dev_dbg(&intf->dev,"No union descriptor, giving up\n");
- return -ENODEV;
+ if (intf->cur_altsetting->desc.bNumEndpoints != 3) {
+ dev_dbg(&intf->dev,"No union descriptor, giving up\n");
+ return -ENODEV;
+ } else {
+ dev_warn(&intf->dev,"No union descriptor, testing for castrated device\n");
+ combined_interfaces = 1;
+ control_interface = data_interface = intf;
+ goto look_for_collapsed_interface;
+ }
}
} else {
control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0);
data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = union_header->bSlaveInterface0));
if (!control_interface || !data_interface) {
- dev_dbg(&intf->dev,"no interfaces\n");
+ dev_dbg(&intf->dev, "no interfaces\n");
return -ENODEV;
}
}
-
+
if (data_interface_num != call_interface_num)
- dev_dbg(&intf->dev,"Separate call control interface. That is not fully supported.\n");
+ dev_dbg(&intf->dev, "Separate call control interface. That is not fully supported.\n");
+
+ if (control_interface == data_interface) {
+ /* some broken devices designed for windows work this way */
+ dev_warn(&intf->dev,"Control and data interfaces are not separated!\n");
+ combined_interfaces = 1;
+ /* a popular other OS doesn't use it */
+ quirks |= NO_CAP_LINE;
+ if (data_interface->cur_altsetting->desc.bNumEndpoints != 3) {
+ dev_err(&intf->dev, "This needs exactly 3 endpoints\n");
+ return -EINVAL;
+ }
+look_for_collapsed_interface:
+ for (i = 0; i < 3; i++) {
+ struct usb_endpoint_descriptor *ep;
+ ep = &data_interface->cur_altsetting->endpoint[i].desc;
+
+ if (usb_endpoint_is_int_in(ep))
+ epctrl = ep;
+ else if (usb_endpoint_is_bulk_out(ep))
+ epwrite = ep;
+ else if (usb_endpoint_is_bulk_in(ep))
+ epread = ep;
+ else
+ return -EINVAL;
+ }
+ if (!epctrl || !epread || !epwrite)
+ return -ENODEV;
+ else
+ goto made_compressed_probe;
+ }
skip_normal_probe:
/*workaround for switched interfaces */
- if (data_interface->cur_altsetting->desc.bInterfaceClass != CDC_DATA_INTERFACE_TYPE) {
- if (control_interface->cur_altsetting->desc.bInterfaceClass == CDC_DATA_INTERFACE_TYPE) {
+ if (data_interface->cur_altsetting->desc.bInterfaceClass
+ != CDC_DATA_INTERFACE_TYPE) {
+ if (control_interface->cur_altsetting->desc.bInterfaceClass
+ == CDC_DATA_INTERFACE_TYPE) {
struct usb_interface *t;
- dev_dbg(&intf->dev,"Your device has switched interfaces.\n");
-
+ dev_dbg(&intf->dev,
+ "Your device has switched interfaces.\n");
t = control_interface;
control_interface = data_interface;
data_interface = t;
@@ -1021,11 +1105,12 @@ skip_normal_probe:
}
/* Accept probe requests only for the control interface */
- if (intf != control_interface)
+ if (!combined_interfaces && intf != control_interface)
return -ENODEV;
-
- if (usb_interface_claimed(data_interface)) { /* valid in this context */
- dev_dbg(&intf->dev,"The data interface isn't available\n");
+
+ if (!combined_interfaces && usb_interface_claimed(data_interface)) {
+ /* valid in this context */
+ dev_dbg(&intf->dev, "The data interface isn't available\n");
return -EBUSY;
}
@@ -1042,12 +1127,13 @@ skip_normal_probe:
if (!usb_endpoint_dir_in(epread)) {
/* descriptors are swapped */
struct usb_endpoint_descriptor *t;
- dev_dbg(&intf->dev,"The data interface has switched endpoints\n");
-
+ dev_dbg(&intf->dev,
+ "The data interface has switched endpoints\n");
t = epread;
epread = epwrite;
epwrite = t;
}
+made_compressed_probe:
dbg("interfaces are valid");
for (minor = 0; minor < ACM_TTY_MINORS && acm_table[minor]; minor++);
@@ -1056,19 +1142,24 @@ skip_normal_probe:
return -ENODEV;
}
- if (!(acm = kzalloc(sizeof(struct acm), GFP_KERNEL))) {
+ acm = kzalloc(sizeof(struct acm), GFP_KERNEL);
+ if (acm == NULL) {
dev_dbg(&intf->dev, "out of memory (acm kzalloc)\n");
goto alloc_fail;
}
ctrlsize = le16_to_cpu(epctrl->wMaxPacketSize);
- readsize = le16_to_cpu(epread->wMaxPacketSize)* ( quirks == SINGLE_RX_URB ? 1 : 2);
+ readsize = le16_to_cpu(epread->wMaxPacketSize) *
+ (quirks == SINGLE_RX_URB ? 1 : 2);
+ acm->combined_interfaces = combined_interfaces;
acm->writesize = le16_to_cpu(epwrite->wMaxPacketSize) * 20;
acm->control = control_interface;
acm->data = data_interface;
acm->minor = minor;
acm->dev = usb_dev;
acm->ctrl_caps = ac_management_function;
+ if (quirks & NO_CAP_LINE)
+ acm->ctrl_caps &= ~USB_CDC_CAP_LINE;
acm->ctrlsize = ctrlsize;
acm->readsize = readsize;
acm->rx_buflimit = num_rx_buf;
@@ -1082,6 +1173,8 @@ skip_normal_probe:
spin_lock_init(&acm->read_lock);
mutex_init(&acm->mutex);
acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress);
+ tty_port_init(&acm->port);
+ acm->port.ops = &acm_port_ops;
buf = usb_buffer_alloc(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma);
if (!buf) {
@@ -1103,8 +1196,10 @@ skip_normal_probe:
for (i = 0; i < num_rx_buf; i++) {
struct acm_ru *rcv = &(acm->ru[i]);
- if (!(rcv->urb = usb_alloc_urb(0, GFP_KERNEL))) {
- dev_dbg(&intf->dev, "out of memory (read urbs usb_alloc_urb)\n");
+ rcv->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (rcv->urb == NULL) {
+ dev_dbg(&intf->dev,
+ "out of memory (read urbs usb_alloc_urb)\n");
goto alloc_fail7;
}
@@ -1117,26 +1212,29 @@ skip_normal_probe:
rb->base = usb_buffer_alloc(acm->dev, readsize,
GFP_KERNEL, &rb->dma);
if (!rb->base) {
- dev_dbg(&intf->dev, "out of memory (read bufs usb_buffer_alloc)\n");
+ dev_dbg(&intf->dev,
+ "out of memory (read bufs usb_buffer_alloc)\n");
goto alloc_fail7;
}
}
- for(i = 0; i < ACM_NW; i++)
- {
+ for (i = 0; i < ACM_NW; i++) {
struct acm_wb *snd = &(acm->wb[i]);
- if (!(snd->urb = usb_alloc_urb(0, GFP_KERNEL))) {
- dev_dbg(&intf->dev, "out of memory (write urbs usb_alloc_urb)");
+ snd->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (snd->urb == NULL) {
+ dev_dbg(&intf->dev,
+ "out of memory (write urbs usb_alloc_urb)");
goto alloc_fail7;
}
- usb_fill_bulk_urb(snd->urb, usb_dev, usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress),
- NULL, acm->writesize, acm_write_bulk, snd);
+ usb_fill_bulk_urb(snd->urb, usb_dev,
+ usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress),
+ NULL, acm->writesize, acm_write_bulk, snd);
snd->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
snd->instance = acm;
}
- usb_set_intfdata (intf, acm);
+ usb_set_intfdata(intf, acm);
i = device_create_file(&intf->dev, &dev_attr_bmCapabilities);
if (i < 0)
@@ -1147,7 +1245,8 @@ skip_normal_probe:
if (!acm->country_codes)
goto skip_countries;
acm->country_code_size = cfd->bLength - 4;
- memcpy(acm->country_codes, (u8 *)&cfd->wCountyCode0, cfd->bLength - 4);
+ memcpy(acm->country_codes, (u8 *)&cfd->wCountyCode0,
+ cfd->bLength - 4);
acm->country_rel_date = cfd->iCountryCodeRelDate;
i = device_create_file(&intf->dev, &dev_attr_wCountryCodes);
@@ -1156,7 +1255,8 @@ skip_normal_probe:
goto skip_countries;
}
- i = device_create_file(&intf->dev, &dev_attr_iCountryCodeRelDate);
+ i = device_create_file(&intf->dev,
+ &dev_attr_iCountryCodeRelDate);
if (i < 0) {
kfree(acm->country_codes);
goto skip_countries;
@@ -1164,8 +1264,11 @@ skip_normal_probe:
}
skip_countries:
- usb_fill_int_urb(acm->ctrlurb, usb_dev, usb_rcvintpipe(usb_dev, epctrl->bEndpointAddress),
- acm->ctrl_buffer, ctrlsize, acm_ctrl_irq, acm, epctrl->bInterval);
+ usb_fill_int_urb(acm->ctrlurb, usb_dev,
+ usb_rcvintpipe(usb_dev, epctrl->bEndpointAddress),
+ acm->ctrl_buffer, ctrlsize, acm_ctrl_irq, acm,
+ /* works around buggy devices */
+ epctrl->bInterval ? epctrl->bInterval : 0xff);
acm->ctrlurb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
acm->ctrlurb->transfer_dma = acm->ctrl_dma;
@@ -1212,7 +1315,7 @@ static void stop_data_traffic(struct acm *acm)
tasklet_disable(&acm->urb_task);
usb_kill_urb(acm->ctrlurb);
- for(i = 0; i < ACM_NW; i++)
+ for (i = 0; i < ACM_NW; i++)
usb_kill_urb(acm->wb[i].urb);
for (i = 0; i < acm->rx_buflimit; i++)
usb_kill_urb(acm->ru[i].urb);
@@ -1227,13 +1330,14 @@ static void acm_disconnect(struct usb_interface *intf)
{
struct acm *acm = usb_get_intfdata(intf);
struct usb_device *usb_dev = interface_to_usbdev(intf);
+ struct tty_struct *tty;
/* sibling interface is already cleaning up */
if (!acm)
return;
mutex_lock(&open_mutex);
- if (acm->country_codes){
+ if (acm->country_codes) {
device_remove_file(&acm->control->dev,
&dev_attr_wCountryCodes);
device_remove_file(&acm->control->dev,
@@ -1247,22 +1351,26 @@ static void acm_disconnect(struct usb_interface *intf)
stop_data_traffic(acm);
acm_write_buffers_free(acm);
- usb_buffer_free(usb_dev, acm->ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
+ usb_buffer_free(usb_dev, acm->ctrlsize, acm->ctrl_buffer,
+ acm->ctrl_dma);
acm_read_buffers_free(acm);
- usb_driver_release_interface(&acm_driver, intf == acm->control ?
+ if (!acm->combined_interfaces)
+ usb_driver_release_interface(&acm_driver, intf == acm->control ?
acm->data : acm->control);
- if (!acm->used) {
+ if (acm->port.count == 0) {
acm_tty_unregister(acm);
mutex_unlock(&open_mutex);
return;
}
mutex_unlock(&open_mutex);
-
- if (acm->tty)
- tty_hangup(acm->tty);
+ tty = tty_port_tty_get(&acm->port);
+ if (tty) {
+ tty_hangup(tty);
+ tty_kref_put(tty);
+ }
}
#ifdef CONFIG_PM
@@ -1297,7 +1405,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
*/
mutex_lock(&acm->mutex);
- if (acm->used)
+ if (acm->port.count)
stop_data_traffic(acm);
mutex_unlock(&acm->mutex);
@@ -1319,7 +1427,7 @@ static int acm_resume(struct usb_interface *intf)
return 0;
mutex_lock(&acm->mutex);
- if (acm->used) {
+ if (acm->port.count) {
rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO);
if (rv < 0)
goto err_out;
@@ -1375,6 +1483,9 @@ static struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x0572, 0x1324), /* Conexant USB MODEM RD02-D400 */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
+ { USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */
+ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+ },
{ USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
},
{ USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */
@@ -1384,6 +1495,9 @@ static struct usb_device_id acm_ids[] = {
Maybe we should define a new
quirk for this. */
},
+ { USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */
+ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+ },
/* control interfaces with various AT-command sets */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
@@ -1395,7 +1509,7 @@ static struct usb_device_id acm_ids[] = {
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_ACM_PROTO_AT_GSM) },
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
- USB_CDC_ACM_PROTO_AT_3G ) },
+ USB_CDC_ACM_PROTO_AT_3G) },
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_ACM_PROTO_AT_CDMA) },
@@ -1403,7 +1517,7 @@ static struct usb_device_id acm_ids[] = {
{ }
};
-MODULE_DEVICE_TABLE (usb, acm_ids);
+MODULE_DEVICE_TABLE(usb, acm_ids);
static struct usb_driver acm_driver = {
.name = "cdc_acm",
@@ -1426,6 +1540,7 @@ static struct usb_driver acm_driver = {
static const struct tty_operations acm_ops = {
.open = acm_tty_open,
.close = acm_tty_close,
+ .hangup = acm_tty_hangup,
.write = acm_tty_write,
.write_room = acm_tty_write_room,
.ioctl = acm_tty_ioctl,
@@ -1457,7 +1572,8 @@ static int __init acm_init(void)
acm_tty_driver->subtype = SERIAL_TYPE_NORMAL,
acm_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
acm_tty_driver->init_termios = tty_std_termios;
- acm_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+ acm_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD |
+ HUPCL | CLOCAL;
tty_set_operations(acm_tty_driver, &acm_ops);
retval = tty_register_driver(acm_tty_driver);
@@ -1489,7 +1605,7 @@ static void __exit acm_exit(void)
module_init(acm_init);
module_exit(acm_exit);
-MODULE_AUTHOR( DRIVER_AUTHOR );
-MODULE_DESCRIPTION( DRIVER_DESC );
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(ACM_TTY_MAJOR);
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 1f95e7aa1b6..1602324808b 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -89,8 +89,8 @@ struct acm {
struct usb_device *dev; /* the corresponding usb device */
struct usb_interface *control; /* control interface */
struct usb_interface *data; /* data interface */
- struct tty_struct *tty; /* the corresponding tty */
- struct urb *ctrlurb; /* urbs */
+ struct tty_port port; /* our tty port data */
+ struct urb *ctrlurb; /* urbs */
u8 *ctrl_buffer; /* buffers of urbs */
dma_addr_t ctrl_dma; /* dma handles of buffers */
u8 *country_codes; /* country codes from device */
@@ -120,12 +120,12 @@ struct acm {
unsigned int ctrlout; /* output control lines (DTR, RTS) */
unsigned int writesize; /* max packet size for the output bulk endpoint */
unsigned int readsize,ctrlsize; /* buffer sizes for freeing */
- unsigned int used; /* someone has this acm's device open */
unsigned int minor; /* acm minor number */
unsigned char throttle; /* throttled by tty layer */
unsigned char clocal; /* termios CLOCAL */
unsigned int ctrl_caps; /* control capabilities from the class specific header */
unsigned int susp_count; /* number of suspended interfaces */
+ int combined_interfaces:1; /* control and data collapsed */
struct acm_wb *delayed_wb; /* write queued for a device about to be woken */
};
@@ -134,3 +134,4 @@ struct acm {
/* constants describing various quirks and errors */
#define NO_UNION_NORMAL 1
#define SINGLE_RX_URB 2
+#define NO_CAP_LINE 4
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index d2747a49b97..26c09f0257d 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -1057,8 +1057,14 @@ static const struct file_operations usblp_fops = {
.release = usblp_release,
};
+static char *usblp_nodename(struct device *dev)
+{
+ return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
+}
+
static struct usb_class_driver usblp_class = {
.name = "lp%d",
+ .nodename = usblp_nodename,
.fops = &usblp_fops,
.minor_base = USBLP_MINOR_BASE,
};
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index c40a9b284cc..3703789d0d2 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -927,21 +927,27 @@ static long usbtmc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
switch (cmd) {
case USBTMC_IOCTL_CLEAR_OUT_HALT:
retval = usbtmc_ioctl_clear_out_halt(data);
+ break;
case USBTMC_IOCTL_CLEAR_IN_HALT:
retval = usbtmc_ioctl_clear_in_halt(data);
+ break;
case USBTMC_IOCTL_INDICATOR_PULSE:
retval = usbtmc_ioctl_indicator_pulse(data);
+ break;
case USBTMC_IOCTL_CLEAR:
retval = usbtmc_ioctl_clear(data);
+ break;
case USBTMC_IOCTL_ABORT_BULK_OUT:
retval = usbtmc_ioctl_abort_bulk_out(data);
+ break;
case USBTMC_IOCTL_ABORT_BULK_IN:
retval = usbtmc_ioctl_abort_bulk_in(data);
+ break;
}
mutex_unlock(&data->io_mutex);
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index e1759d17ac5..69280c35b5c 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -28,7 +28,7 @@ comment "Miscellaneous USB options"
depends on USB
config USB_DEVICEFS
- bool "USB device filesystem"
+ bool "USB device filesystem (DEPRECATED)" if EMBEDDED
depends on USB
---help---
If you say Y here (and to "/proc file system support" in the "File
@@ -46,11 +46,15 @@ config USB_DEVICEFS
For the format of the various /proc/bus/usb/ files, please read
<file:Documentation/usb/proc_usb_info.txt>.
- Usbfs files can't handle Access Control Lists (ACL), which are the
- default way to grant access to USB devices for untrusted users of a
- desktop system. The usbfs functionality is replaced by real
- device-nodes managed by udev. These nodes live in /dev/bus/usb and
- are used by libusb.
+ Modern Linux systems do not use this.
+
+ Usbfs entries are files and not character devices; usbfs can't
+ handle Access Control Lists (ACL) which are the default way to
+ grant access to USB devices for untrusted users of a desktop
+ system.
+
+ The usbfs functionality is replaced by real device-nodes managed by
+ udev. These nodes lived in /dev/bus/usb and are used by libusb.
config USB_DEVICE_CLASS
bool "USB device class-devices (DEPRECATED)"
diff --git a/drivers/usb/core/Makefile b/drivers/usb/core/Makefile
index b6078706fb9..ec16e602990 100644
--- a/drivers/usb/core/Makefile
+++ b/drivers/usb/core/Makefile
@@ -4,14 +4,14 @@
usbcore-objs := usb.o hub.o hcd.o urb.o message.o driver.o \
config.o file.o buffer.o sysfs.o endpoint.o \
- devio.o notify.o generic.o quirks.o
+ devio.o notify.o generic.o quirks.o devices.o
ifeq ($(CONFIG_PCI),y)
usbcore-objs += hcd-pci.o
endif
ifeq ($(CONFIG_USB_DEVICEFS),y)
- usbcore-objs += inode.o devices.o
+ usbcore-objs += inode.o
endif
obj-$(CONFIG_USB) += usbcore.o
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 568244c99bd..24dfb33f90c 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -19,6 +19,32 @@ static inline const char *plural(int n)
return (n == 1 ? "" : "s");
}
+/* FIXME: this is a kludge */
+static int find_next_descriptor_more(unsigned char *buffer, int size,
+ int dt1, int dt2, int dt3, int *num_skipped)
+{
+ struct usb_descriptor_header *h;
+ int n = 0;
+ unsigned char *buffer0 = buffer;
+
+ /* Find the next descriptor of type dt1 or dt2 or dt3 */
+ while (size > 0) {
+ h = (struct usb_descriptor_header *) buffer;
+ if (h->bDescriptorType == dt1 || h->bDescriptorType == dt2 ||
+ h->bDescriptorType == dt3)
+ break;
+ buffer += h->bLength;
+ size -= h->bLength;
+ ++n;
+ }
+
+ /* Store the number of descriptors skipped and return the
+ * number of bytes skipped */
+ if (num_skipped)
+ *num_skipped = n;
+ return buffer - buffer0;
+}
+
static int find_next_descriptor(unsigned char *buffer, int size,
int dt1, int dt2, int *num_skipped)
{
@@ -43,6 +69,129 @@ static int find_next_descriptor(unsigned char *buffer, int size,
return buffer - buffer0;
}
+static int usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
+ int inum, int asnum, struct usb_host_endpoint *ep,
+ int num_ep, unsigned char *buffer, int size)
+{
+ unsigned char *buffer_start = buffer;
+ struct usb_ss_ep_comp_descriptor *desc;
+ int retval;
+ int num_skipped;
+ int max_tx;
+ int i;
+
+ /* Allocate space for the SS endpoint companion descriptor */
+ ep->ss_ep_comp = kzalloc(sizeof(struct usb_host_ss_ep_comp),
+ GFP_KERNEL);
+ if (!ep->ss_ep_comp)
+ return -ENOMEM;
+ desc = (struct usb_ss_ep_comp_descriptor *) buffer;
+ if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP) {
+ dev_warn(ddev, "No SuperSpeed endpoint companion for config %d "
+ " interface %d altsetting %d ep %d: "
+ "using minimum values\n",
+ cfgno, inum, asnum, ep->desc.bEndpointAddress);
+ ep->ss_ep_comp->desc.bLength = USB_DT_SS_EP_COMP_SIZE;
+ ep->ss_ep_comp->desc.bDescriptorType = USB_DT_SS_ENDPOINT_COMP;
+ ep->ss_ep_comp->desc.bMaxBurst = 0;
+ /*
+ * Leave bmAttributes as zero, which will mean no streams for
+ * bulk, and isoc won't support multiple bursts of packets.
+ * With bursts of only one packet, and a Mult of 1, the max
+ * amount of data moved per endpoint service interval is one
+ * packet.
+ */
+ if (usb_endpoint_xfer_isoc(&ep->desc) ||
+ usb_endpoint_xfer_int(&ep->desc))
+ ep->ss_ep_comp->desc.wBytesPerInterval =
+ ep->desc.wMaxPacketSize;
+ /*
+ * The next descriptor is for an Endpoint or Interface,
+ * no extra descriptors to copy into the companion structure,
+ * and we didn't eat up any of the buffer.
+ */
+ retval = 0;
+ goto valid;
+ }
+ memcpy(&ep->ss_ep_comp->desc, desc, USB_DT_SS_EP_COMP_SIZE);
+ desc = &ep->ss_ep_comp->desc;
+ buffer += desc->bLength;
+ size -= desc->bLength;
+
+ /* Eat up the other descriptors we don't care about */
+ ep->ss_ep_comp->extra = buffer;
+ i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT,
+ USB_DT_INTERFACE, &num_skipped);
+ ep->ss_ep_comp->extralen = i;
+ buffer += i;
+ size -= i;
+ retval = buffer - buffer_start + i;
+ if (num_skipped > 0)
+ dev_dbg(ddev, "skipped %d descriptor%s after %s\n",
+ num_skipped, plural(num_skipped),
+ "SuperSpeed endpoint companion");
+
+ /* Check the various values */
+ if (usb_endpoint_xfer_control(&ep->desc) && desc->bMaxBurst != 0) {
+ dev_warn(ddev, "Control endpoint with bMaxBurst = %d in "
+ "config %d interface %d altsetting %d ep %d: "
+ "setting to zero\n", desc->bMaxBurst,
+ cfgno, inum, asnum, ep->desc.bEndpointAddress);
+ desc->bMaxBurst = 0;
+ }
+ if (desc->bMaxBurst > 15) {
+ dev_warn(ddev, "Endpoint with bMaxBurst = %d in "
+ "config %d interface %d altsetting %d ep %d: "
+ "setting to 15\n", desc->bMaxBurst,
+ cfgno, inum, asnum, ep->desc.bEndpointAddress);
+ desc->bMaxBurst = 15;
+ }
+ if ((usb_endpoint_xfer_control(&ep->desc) || usb_endpoint_xfer_int(&ep->desc))
+ && desc->bmAttributes != 0) {
+ dev_warn(ddev, "%s endpoint with bmAttributes = %d in "
+ "config %d interface %d altsetting %d ep %d: "
+ "setting to zero\n",
+ usb_endpoint_xfer_control(&ep->desc) ? "Control" : "Bulk",
+ desc->bmAttributes,
+ cfgno, inum, asnum, ep->desc.bEndpointAddress);
+ desc->bmAttributes = 0;
+ }
+ if (usb_endpoint_xfer_bulk(&ep->desc) && desc->bmAttributes > 16) {
+ dev_warn(ddev, "Bulk endpoint with more than 65536 streams in "
+ "config %d interface %d altsetting %d ep %d: "
+ "setting to max\n",
+ cfgno, inum, asnum, ep->desc.bEndpointAddress);
+ desc->bmAttributes = 16;
+ }
+ if (usb_endpoint_xfer_isoc(&ep->desc) && desc->bmAttributes > 2) {
+ dev_warn(ddev, "Isoc endpoint has Mult of %d in "
+ "config %d interface %d altsetting %d ep %d: "
+ "setting to 3\n", desc->bmAttributes + 1,
+ cfgno, inum, asnum, ep->desc.bEndpointAddress);
+ desc->bmAttributes = 2;
+ }
+ if (usb_endpoint_xfer_isoc(&ep->desc)) {
+ max_tx = ep->desc.wMaxPacketSize * (desc->bMaxBurst + 1) *
+ (desc->bmAttributes + 1);
+ } else if (usb_endpoint_xfer_int(&ep->desc)) {
+ max_tx = ep->desc.wMaxPacketSize * (desc->bMaxBurst + 1);
+ } else {
+ goto valid;
+ }
+ if (desc->wBytesPerInterval > max_tx) {
+ dev_warn(ddev, "%s endpoint with wBytesPerInterval of %d in "
+ "config %d interface %d altsetting %d ep %d: "
+ "setting to %d\n",
+ usb_endpoint_xfer_isoc(&ep->desc) ? "Isoc" : "Int",
+ desc->wBytesPerInterval,
+ cfgno, inum, asnum, ep->desc.bEndpointAddress,
+ max_tx);
+ desc->wBytesPerInterval = max_tx;
+ }
+valid:
+ return retval;
+}
+
static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
int asnum, struct usb_host_interface *ifp, int num_ep,
unsigned char *buffer, int size)
@@ -50,7 +199,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
unsigned char *buffer0 = buffer;
struct usb_endpoint_descriptor *d;
struct usb_host_endpoint *endpoint;
- int n, i, j;
+ int n, i, j, retval;
d = (struct usb_endpoint_descriptor *) buffer;
buffer += d->bLength;
@@ -92,6 +241,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
if (usb_endpoint_xfer_int(d)) {
i = 1;
switch (to_usb_device(ddev)->speed) {
+ case USB_SPEED_SUPER:
case USB_SPEED_HIGH:
/* Many device manufacturers are using full-speed
* bInterval values in high-speed interrupt endpoint
@@ -161,17 +311,39 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
cfgno, inum, asnum, d->bEndpointAddress,
maxp);
}
-
- /* Skip over any Class Specific or Vendor Specific descriptors;
- * find the next endpoint or interface descriptor */
- endpoint->extra = buffer;
- i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT,
- USB_DT_INTERFACE, &n);
- endpoint->extralen = i;
+ /* Allocate room for and parse any SS endpoint companion descriptors */
+ if (to_usb_device(ddev)->speed == USB_SPEED_SUPER) {
+ endpoint->extra = buffer;
+ i = find_next_descriptor_more(buffer, size, USB_DT_SS_ENDPOINT_COMP,
+ USB_DT_ENDPOINT, USB_DT_INTERFACE, &n);
+ endpoint->extralen = i;
+ buffer += i;
+ size -= i;
+
+ if (size > 0) {
+ retval = usb_parse_ss_endpoint_companion(ddev, cfgno,
+ inum, asnum, endpoint, num_ep, buffer,
+ size);
+ if (retval >= 0) {
+ buffer += retval;
+ retval = buffer - buffer0;
+ }
+ } else {
+ retval = buffer - buffer0;
+ }
+ } else {
+ /* Skip over any Class Specific or Vendor Specific descriptors;
+ * find the next endpoint or interface descriptor */
+ endpoint->extra = buffer;
+ i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT,
+ USB_DT_INTERFACE, &n);
+ endpoint->extralen = i;
+ retval = buffer - buffer0 + i;
+ }
if (n > 0)
dev_dbg(ddev, "skipped %d descriptor%s after %s\n",
n, plural(n), "endpoint");
- return buffer - buffer0 + i;
+ return retval;
skip_to_next_endpoint_or_interface_descriptor:
i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT,
@@ -452,6 +624,8 @@ static int usb_parse_configuration(struct device *ddev, int cfgidx,
kref_init(&intfc->ref);
}
+ /* FIXME: parse the BOS descriptor */
+
/* Skip over any Class Specific or Vendor Specific descriptors;
* find the first interface descriptor */
config->extra = buffer;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index d0a21a5f820..69e5773abfc 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -154,16 +154,11 @@ static const struct usb_device_id *usb_match_dynamic_id(struct usb_interface *in
static int usb_probe_device(struct device *dev)
{
struct usb_device_driver *udriver = to_usb_device_driver(dev->driver);
- struct usb_device *udev;
+ struct usb_device *udev = to_usb_device(dev);
int error = -ENODEV;
dev_dbg(dev, "%s\n", __func__);
- if (!is_usb_device(dev)) /* Sanity check */
- return error;
-
- udev = to_usb_device(dev);
-
/* TODO: Add real matching code */
/* The device should always appear to be in use
@@ -203,18 +198,13 @@ static void usb_cancel_queued_reset(struct usb_interface *iface)
static int usb_probe_interface(struct device *dev)
{
struct usb_driver *driver = to_usb_driver(dev->driver);
- struct usb_interface *intf;
- struct usb_device *udev;
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
const struct usb_device_id *id;
int error = -ENODEV;
dev_dbg(dev, "%s\n", __func__);
- if (is_usb_device(dev)) /* Sanity check */
- return error;
-
- intf = to_usb_interface(dev);
- udev = interface_to_usbdev(intf);
intf->needs_binding = 0;
if (udev->authorized == 0) {
@@ -385,7 +375,6 @@ void usb_driver_release_interface(struct usb_driver *driver,
struct usb_interface *iface)
{
struct device *dev = &iface->dev;
- struct usb_device *udev = interface_to_usbdev(iface);
/* this should never happen, don't release something that's not ours */
if (!dev->driver || dev->driver != &driver->drvwrap.driver)
@@ -394,23 +383,19 @@ void usb_driver_release_interface(struct usb_driver *driver,
/* don't release from within disconnect() */
if (iface->condition != USB_INTERFACE_BOUND)
return;
+ iface->condition = USB_INTERFACE_UNBINDING;
- /* don't release if the interface hasn't been added yet */
+ /* Release via the driver core only if the interface
+ * has already been registered
+ */
if (device_is_registered(dev)) {
- iface->condition = USB_INTERFACE_UNBINDING;
device_release_driver(dev);
} else {
- iface->condition = USB_INTERFACE_UNBOUND;
- usb_cancel_queued_reset(iface);
+ down(&dev->sem);
+ usb_unbind_interface(dev);
+ dev->driver = NULL;
+ up(&dev->sem);
}
- dev->driver = NULL;
- usb_set_intfdata(iface, NULL);
-
- usb_pm_lock(udev);
- iface->condition = USB_INTERFACE_UNBOUND;
- mark_quiesced(iface);
- iface->needs_remote_wakeup = 0;
- usb_pm_unlock(udev);
}
EXPORT_SYMBOL_GPL(usb_driver_release_interface);
@@ -598,7 +583,7 @@ static int usb_device_match(struct device *dev, struct device_driver *drv)
/* TODO: Add real matching code */
return 1;
- } else {
+ } else if (is_usb_interface(dev)) {
struct usb_interface *intf;
struct usb_driver *usb_drv;
const struct usb_device_id *id;
@@ -630,11 +615,14 @@ static int usb_uevent(struct device *dev, struct kobj_uevent_env *env)
/* driver is often null here; dev_dbg() would oops */
pr_debug("usb %s: uevent\n", dev_name(dev));
- if (is_usb_device(dev))
+ if (is_usb_device(dev)) {
usb_dev = to_usb_device(dev);
- else {
+ } else if (is_usb_interface(dev)) {
struct usb_interface *intf = to_usb_interface(dev);
+
usb_dev = interface_to_usbdev(intf);
+ } else {
+ return 0;
}
if (usb_dev->devnum < 0) {
@@ -1762,6 +1750,7 @@ int usb_suspend(struct device *dev, pm_message_t msg)
int usb_resume(struct device *dev, pm_message_t msg)
{
struct usb_device *udev;
+ int status;
udev = to_usb_device(dev);
@@ -1771,7 +1760,14 @@ int usb_resume(struct device *dev, pm_message_t msg)
*/
if (udev->skip_sys_resume)
return 0;
- return usb_external_resume_device(udev, msg);
+ status = usb_external_resume_device(udev, msg);
+
+ /* Avoid PM error messages for devices disconnected while suspended
+ * as we'll display regular disconnect messages just a bit later.
+ */
+ if (status == -ENODEV)
+ return 0;
+ return status;
}
#endif /* CONFIG_PM */
diff --git a/drivers/usb/core/endpoint.c b/drivers/usb/core/endpoint.c
index 40dee2ac013..bc39fc40bbd 100644
--- a/drivers/usb/core/endpoint.c
+++ b/drivers/usb/core/endpoint.c
@@ -15,19 +15,18 @@
#include <linux/usb.h>
#include "usb.h"
-#define MAX_ENDPOINT_MINORS (64*128*32)
-static int usb_endpoint_major;
-static DEFINE_IDR(endpoint_idr);
-
struct ep_device {
struct usb_endpoint_descriptor *desc;
struct usb_device *udev;
struct device dev;
- int minor;
};
#define to_ep_device(_dev) \
container_of(_dev, struct ep_device, dev)
+struct device_type usb_ep_device_type = {
+ .name = "usb_endpoint",
+};
+
struct ep_attribute {
struct attribute attr;
ssize_t (*show)(struct usb_device *,
@@ -160,118 +159,10 @@ static struct attribute_group *ep_dev_groups[] = {
NULL
};
-static int usb_endpoint_major_init(void)
-{
- dev_t dev;
- int error;
-
- error = alloc_chrdev_region(&dev, 0, MAX_ENDPOINT_MINORS,
- "usb_endpoint");
- if (error) {
- printk(KERN_ERR "Unable to get a dynamic major for "
- "usb endpoints.\n");
- return error;
- }
- usb_endpoint_major = MAJOR(dev);
-
- return error;
-}
-
-static void usb_endpoint_major_cleanup(void)
-{
- unregister_chrdev_region(MKDEV(usb_endpoint_major, 0),
- MAX_ENDPOINT_MINORS);
-}
-
-static int endpoint_get_minor(struct ep_device *ep_dev)
-{
- static DEFINE_MUTEX(minor_lock);
- int retval = -ENOMEM;
- int id;
-
- mutex_lock(&minor_lock);
- if (idr_pre_get(&endpoint_idr, GFP_KERNEL) == 0)
- goto exit;
-
- retval = idr_get_new(&endpoint_idr, ep_dev, &id);
- if (retval < 0) {
- if (retval == -EAGAIN)
- retval = -ENOMEM;
- goto exit;
- }
- ep_dev->minor = id & MAX_ID_MASK;
-exit:
- mutex_unlock(&minor_lock);
- return retval;
-}
-
-static void endpoint_free_minor(struct ep_device *ep_dev)
-{
- idr_remove(&endpoint_idr, ep_dev->minor);
-}
-
-static struct endpoint_class {
- struct kref kref;
- struct class *class;
-} *ep_class;
-
-static int init_endpoint_class(void)
-{
- int result = 0;
-
- if (ep_class != NULL) {
- kref_get(&ep_class->kref);
- goto exit;
- }
-
- ep_class = kmalloc(sizeof(*ep_class), GFP_KERNEL);
- if (!ep_class) {
- result = -ENOMEM;
- goto exit;
- }
-
- kref_init(&ep_class->kref);
- ep_class->class = class_create(THIS_MODULE, "usb_endpoint");
- if (IS_ERR(ep_class->class)) {
- result = PTR_ERR(ep_class->class);
- goto class_create_error;
- }
-
- result = usb_endpoint_major_init();
- if (result)
- goto endpoint_major_error;
-
- goto exit;
-
-endpoint_major_error:
- class_destroy(ep_class->class);
-class_create_error:
- kfree(ep_class);
- ep_class = NULL;
-exit:
- return result;
-}
-
-static void release_endpoint_class(struct kref *kref)
-{
- /* Ok, we cheat as we know we only have one ep_class */
- class_destroy(ep_class->class);
- kfree(ep_class);
- ep_class = NULL;
- usb_endpoint_major_cleanup();
-}
-
-static void destroy_endpoint_class(void)
-{
- if (ep_class)
- kref_put(&ep_class->kref, release_endpoint_class);
-}
-
static void ep_device_release(struct device *dev)
{
struct ep_device *ep_dev = to_ep_device(dev);
- endpoint_free_minor(ep_dev);
kfree(ep_dev);
}
@@ -279,62 +170,32 @@ int usb_create_ep_devs(struct device *parent,
struct usb_host_endpoint *endpoint,
struct usb_device *udev)
{
- char name[8];
struct ep_device *ep_dev;
int retval;
- retval = init_endpoint_class();
- if (retval)
- goto exit;
-
ep_dev = kzalloc(sizeof(*ep_dev), GFP_KERNEL);
if (!ep_dev) {
retval = -ENOMEM;
- goto error_alloc;
- }
-
- retval = endpoint_get_minor(ep_dev);
- if (retval) {
- dev_err(parent, "can not allocate minor number for %s\n",
- dev_name(&ep_dev->dev));
- goto error_register;
+ goto exit;
}
ep_dev->desc = &endpoint->desc;
ep_dev->udev = udev;
ep_dev->dev.groups = ep_dev_groups;
- ep_dev->dev.devt = MKDEV(usb_endpoint_major, ep_dev->minor);
- ep_dev->dev.class = ep_class->class;
+ ep_dev->dev.type = &usb_ep_device_type;
ep_dev->dev.parent = parent;
ep_dev->dev.release = ep_device_release;
- dev_set_name(&ep_dev->dev, "usbdev%d.%d_ep%02x",
- udev->bus->busnum, udev->devnum,
- endpoint->desc.bEndpointAddress);
+ dev_set_name(&ep_dev->dev, "ep_%02x", endpoint->desc.bEndpointAddress);
retval = device_register(&ep_dev->dev);
if (retval)
- goto error_chrdev;
+ goto error_register;
- /* create the symlink to the old-style "ep_XX" directory */
- sprintf(name, "ep_%02x", endpoint->desc.bEndpointAddress);
- retval = sysfs_create_link(&parent->kobj, &ep_dev->dev.kobj, name);
- if (retval)
- goto error_link;
endpoint->ep_dev = ep_dev;
return retval;
-error_link:
- device_unregister(&ep_dev->dev);
- destroy_endpoint_class();
- return retval;
-
-error_chrdev:
- endpoint_free_minor(ep_dev);
-
error_register:
kfree(ep_dev);
-error_alloc:
- destroy_endpoint_class();
exit:
return retval;
}
@@ -344,12 +205,7 @@ void usb_remove_ep_devs(struct usb_host_endpoint *endpoint)
struct ep_device *ep_dev = endpoint->ep_dev;
if (ep_dev) {
- char name[8];
-
- sprintf(name, "ep_%02x", endpoint->desc.bEndpointAddress);
- sysfs_remove_link(&ep_dev->dev.parent->kobj, name);
device_unregister(&ep_dev->dev);
endpoint->ep_dev = NULL;
- destroy_endpoint_class();
}
}
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index 997e659ff69..5cef88929b3 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -67,6 +67,16 @@ static struct usb_class {
struct class *class;
} *usb_class;
+static char *usb_nodename(struct device *dev)
+{
+ struct usb_class_driver *drv;
+
+ drv = dev_get_drvdata(dev);
+ if (!drv || !drv->nodename)
+ return NULL;
+ return drv->nodename(dev);
+}
+
static int init_usb_class(void)
{
int result = 0;
@@ -90,6 +100,7 @@ static int init_usb_class(void)
kfree(usb_class);
usb_class = NULL;
}
+ usb_class->class->nodename = usb_nodename;
exit:
return result;
@@ -198,7 +209,7 @@ int usb_register_dev(struct usb_interface *intf,
else
temp = name;
intf->usb_dev = device_create(usb_class->class, &intf->dev,
- MKDEV(USB_MAJOR, minor), NULL,
+ MKDEV(USB_MAJOR, minor), class_driver,
"%s", temp);
if (IS_ERR(intf->usb_dev)) {
down_write(&minor_rwsem);
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index a4301dc02d2..91f2885b6ee 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -185,194 +185,198 @@ void usb_hcd_pci_remove(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(usb_hcd_pci_remove);
-
-#ifdef CONFIG_PM
-
/**
- * usb_hcd_pci_suspend - power management suspend of a PCI-based HCD
- * @dev: USB Host Controller being suspended
- * @message: Power Management message describing this state transition
- *
- * Store this function in the HCD's struct pci_driver as .suspend.
+ * usb_hcd_pci_shutdown - shutdown host controller
+ * @dev: USB Host Controller being shutdown
*/
-int usb_hcd_pci_suspend(struct pci_dev *dev, pm_message_t message)
+void usb_hcd_pci_shutdown(struct pci_dev *dev)
+{
+ struct usb_hcd *hcd;
+
+ hcd = pci_get_drvdata(dev);
+ if (!hcd)
+ return;
+
+ if (hcd->driver->shutdown)
+ hcd->driver->shutdown(hcd);
+}
+EXPORT_SYMBOL_GPL(usb_hcd_pci_shutdown);
+
+#ifdef CONFIG_PM_SLEEP
+
+static int check_root_hub_suspended(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct usb_hcd *hcd = pci_get_drvdata(pci_dev);
+
+ if (!(hcd->state == HC_STATE_SUSPENDED ||
+ hcd->state == HC_STATE_HALT)) {
+ dev_warn(dev, "Root hub is not suspended\n");
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int hcd_pci_suspend(struct device *dev)
{
- struct usb_hcd *hcd = pci_get_drvdata(dev);
- int retval = 0;
- int wake, w;
- int has_pci_pm;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct usb_hcd *hcd = pci_get_drvdata(pci_dev);
+ int retval;
/* Root hub suspend should have stopped all downstream traffic,
* and all bus master traffic. And done so for both the interface
* and the stub usb_device (which we check here). But maybe it
* didn't; writing sysfs power/state files ignores such rules...
- *
- * We must ignore the FREEZE vs SUSPEND distinction here, because
- * otherwise the swsusp will save (and restore) garbage state.
*/
- if (!(hcd->state == HC_STATE_SUSPENDED ||
- hcd->state == HC_STATE_HALT)) {
- dev_warn(&dev->dev, "Root hub is not suspended\n");
- retval = -EBUSY;
- goto done;
- }
+ retval = check_root_hub_suspended(dev);
+ if (retval)
+ return retval;
/* We might already be suspended (runtime PM -- not yet written) */
- if (dev->current_state != PCI_D0)
- goto done;
+ if (pci_dev->current_state != PCI_D0)
+ return retval;
if (hcd->driver->pci_suspend) {
- retval = hcd->driver->pci_suspend(hcd, message);
+ retval = hcd->driver->pci_suspend(hcd);
suspend_report_result(hcd->driver->pci_suspend, retval);
if (retval)
- goto done;
+ return retval;
}
- synchronize_irq(dev->irq);
+ synchronize_irq(pci_dev->irq);
/* Downstream ports from this root hub should already be quiesced, so
* there will be no DMA activity. Now we can shut down the upstream
- * link (except maybe for PME# resume signaling) and enter some PCI
- * low power state, if the hardware allows.
+ * link (except maybe for PME# resume signaling). We'll enter a
+ * low power state during suspend_noirq, if the hardware allows.
*/
- pci_disable_device(dev);
+ pci_disable_device(pci_dev);
+ return retval;
+}
+
+static int hcd_pci_suspend_noirq(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct usb_hcd *hcd = pci_get_drvdata(pci_dev);
+ int retval;
+
+ retval = check_root_hub_suspended(dev);
+ if (retval)
+ return retval;
- pci_save_state(dev);
+ pci_save_state(pci_dev);
- /* Don't fail on error to enable wakeup. We rely on pci code
- * to reject requests the hardware can't implement, rather
- * than coding the same thing.
+ /* If the root hub is HALTed rather than SUSPENDed,
+ * disallow remote wakeup.
*/
- wake = (hcd->state == HC_STATE_SUSPENDED &&
- device_may_wakeup(&dev->dev));
- w = pci_wake_from_d3(dev, wake);
- if (w < 0)
- wake = w;
- dev_dbg(&dev->dev, "wakeup: %d\n", wake);
-
- /* Don't change state if we don't need to */
- if (message.event == PM_EVENT_FREEZE ||
- message.event == PM_EVENT_PRETHAW) {
- dev_dbg(&dev->dev, "--> no state change\n");
- goto done;
- }
+ if (hcd->state == HC_STATE_HALT)
+ device_set_wakeup_enable(dev, 0);
+ dev_dbg(dev, "wakeup: %d\n", device_may_wakeup(dev));
- has_pci_pm = pci_find_capability(dev, PCI_CAP_ID_PM);
- if (!has_pci_pm) {
- dev_dbg(&dev->dev, "--> PCI D0 legacy\n");
+ /* Possibly enable remote wakeup,
+ * choose the appropriate low-power state, and go to that state.
+ */
+ retval = pci_prepare_to_sleep(pci_dev);
+ if (retval == -EIO) { /* Low-power not supported */
+ dev_dbg(dev, "--> PCI D0 legacy\n");
+ retval = 0;
+ } else if (retval == 0) {
+ dev_dbg(dev, "--> PCI %s\n",
+ pci_power_name(pci_dev->current_state));
} else {
-
- /* NOTE: dev->current_state becomes nonzero only here, and
- * only for devices that support PCI PM. Also, exiting
- * PCI_D3 (but not PCI_D1 or PCI_D2) is allowed to reset
- * some device state (e.g. as part of clock reinit).
- */
- retval = pci_set_power_state(dev, PCI_D3hot);
- suspend_report_result(pci_set_power_state, retval);
- if (retval == 0) {
- dev_dbg(&dev->dev, "--> PCI D3\n");
- } else {
- dev_dbg(&dev->dev, "PCI D3 suspend fail, %d\n",
- retval);
- pci_restore_state(dev);
- }
+ suspend_report_result(pci_prepare_to_sleep, retval);
+ return retval;
}
#ifdef CONFIG_PPC_PMAC
- if (retval == 0) {
- /* Disable ASIC clocks for USB */
- if (machine_is(powermac)) {
- struct device_node *of_node;
-
- of_node = pci_device_to_OF_node(dev);
- if (of_node)
- pmac_call_feature(PMAC_FTR_USB_ENABLE,
- of_node, 0, 0);
- }
+ /* Disable ASIC clocks for USB */
+ if (machine_is(powermac)) {
+ struct device_node *of_node;
+
+ of_node = pci_device_to_OF_node(pci_dev);
+ if (of_node)
+ pmac_call_feature(PMAC_FTR_USB_ENABLE, of_node, 0, 0);
}
#endif
-
- done:
return retval;
}
-EXPORT_SYMBOL_GPL(usb_hcd_pci_suspend);
-/**
- * usb_hcd_pci_resume - power management resume of a PCI-based HCD
- * @dev: USB Host Controller being resumed
- *
- * Store this function in the HCD's struct pci_driver as .resume.
- */
-int usb_hcd_pci_resume(struct pci_dev *dev)
+static int hcd_pci_resume_noirq(struct device *dev)
{
- struct usb_hcd *hcd;
- int retval;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
#ifdef CONFIG_PPC_PMAC
/* Reenable ASIC clocks for USB */
if (machine_is(powermac)) {
struct device_node *of_node;
- of_node = pci_device_to_OF_node(dev);
+ of_node = pci_device_to_OF_node(pci_dev);
if (of_node)
pmac_call_feature(PMAC_FTR_USB_ENABLE,
of_node, 0, 1);
}
#endif
- pci_restore_state(dev);
+ /* Go back to D0 and disable remote wakeup */
+ pci_back_from_sleep(pci_dev);
+ return 0;
+}
+
+static int resume_common(struct device *dev, bool hibernated)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct usb_hcd *hcd = pci_get_drvdata(pci_dev);
+ int retval;
- hcd = pci_get_drvdata(dev);
if (hcd->state != HC_STATE_SUSPENDED) {
- dev_dbg(hcd->self.controller,
- "can't resume, not suspended!\n");
+ dev_dbg(dev, "can't resume, not suspended!\n");
return 0;
}
- pci_enable_wake(dev, PCI_D0, false);
-
- retval = pci_enable_device(dev);
+ retval = pci_enable_device(pci_dev);
if (retval < 0) {
- dev_err(&dev->dev, "can't re-enable after resume, %d!\n",
- retval);
+ dev_err(dev, "can't re-enable after resume, %d!\n", retval);
return retval;
}
- pci_set_master(dev);
-
- /* yes, ignore this result too... */
- (void) pci_wake_from_d3(dev, 0);
+ pci_set_master(pci_dev);
clear_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
if (hcd->driver->pci_resume) {
- retval = hcd->driver->pci_resume(hcd);
+ retval = hcd->driver->pci_resume(hcd, hibernated);
if (retval) {
- dev_err(hcd->self.controller,
- "PCI post-resume error %d!\n", retval);
+ dev_err(dev, "PCI post-resume error %d!\n", retval);
usb_hc_died(hcd);
}
}
return retval;
}
-EXPORT_SYMBOL_GPL(usb_hcd_pci_resume);
-#endif /* CONFIG_PM */
-
-/**
- * usb_hcd_pci_shutdown - shutdown host controller
- * @dev: USB Host Controller being shutdown
- */
-void usb_hcd_pci_shutdown(struct pci_dev *dev)
+static int hcd_pci_resume(struct device *dev)
{
- struct usb_hcd *hcd;
-
- hcd = pci_get_drvdata(dev);
- if (!hcd)
- return;
+ return resume_common(dev, false);
+}
- if (hcd->driver->shutdown)
- hcd->driver->shutdown(hcd);
+static int hcd_pci_restore(struct device *dev)
+{
+ return resume_common(dev, true);
}
-EXPORT_SYMBOL_GPL(usb_hcd_pci_shutdown);
+struct dev_pm_ops usb_hcd_pci_pm_ops = {
+ .suspend = hcd_pci_suspend,
+ .suspend_noirq = hcd_pci_suspend_noirq,
+ .resume_noirq = hcd_pci_resume_noirq,
+ .resume = hcd_pci_resume,
+ .freeze = check_root_hub_suspended,
+ .freeze_noirq = check_root_hub_suspended,
+ .thaw_noirq = NULL,
+ .thaw = NULL,
+ .poweroff = hcd_pci_suspend,
+ .poweroff_noirq = hcd_pci_suspend_noirq,
+ .restore_noirq = hcd_pci_resume_noirq,
+ .restore = hcd_pci_restore,
+};
+EXPORT_SYMBOL_GPL(usb_hcd_pci_pm_ops);
+
+#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 42b93da1085..ce3f453f02e 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -128,6 +128,27 @@ static inline int is_root_hub(struct usb_device *udev)
#define KERNEL_REL ((LINUX_VERSION_CODE >> 16) & 0x0ff)
#define KERNEL_VER ((LINUX_VERSION_CODE >> 8) & 0x0ff)
+/* usb 3.0 root hub device descriptor */
+static const u8 usb3_rh_dev_descriptor[18] = {
+ 0x12, /* __u8 bLength; */
+ 0x01, /* __u8 bDescriptorType; Device */
+ 0x00, 0x03, /* __le16 bcdUSB; v3.0 */
+
+ 0x09, /* __u8 bDeviceClass; HUB_CLASSCODE */
+ 0x00, /* __u8 bDeviceSubClass; */
+ 0x03, /* __u8 bDeviceProtocol; USB 3.0 hub */
+ 0x09, /* __u8 bMaxPacketSize0; 2^9 = 512 Bytes */
+
+ 0x6b, 0x1d, /* __le16 idVendor; Linux Foundation */
+ 0x02, 0x00, /* __le16 idProduct; device 0x0002 */
+ KERNEL_VER, KERNEL_REL, /* __le16 bcdDevice */
+
+ 0x03, /* __u8 iManufacturer; */
+ 0x02, /* __u8 iProduct; */
+ 0x01, /* __u8 iSerialNumber; */
+ 0x01 /* __u8 bNumConfigurations; */
+};
+
/* usb 2.0 root hub device descriptor */
static const u8 usb2_rh_dev_descriptor [18] = {
0x12, /* __u8 bLength; */
@@ -273,6 +294,47 @@ static const u8 hs_rh_config_descriptor [] = {
0x0c /* __u8 ep_bInterval; (256ms -- usb 2.0 spec) */
};
+static const u8 ss_rh_config_descriptor[] = {
+ /* one configuration */
+ 0x09, /* __u8 bLength; */
+ 0x02, /* __u8 bDescriptorType; Configuration */
+ 0x19, 0x00, /* __le16 wTotalLength; FIXME */
+ 0x01, /* __u8 bNumInterfaces; (1) */
+ 0x01, /* __u8 bConfigurationValue; */
+ 0x00, /* __u8 iConfiguration; */
+ 0xc0, /* __u8 bmAttributes;
+ Bit 7: must be set,
+ 6: Self-powered,
+ 5: Remote wakeup,
+ 4..0: resvd */
+ 0x00, /* __u8 MaxPower; */
+
+ /* one interface */
+ 0x09, /* __u8 if_bLength; */
+ 0x04, /* __u8 if_bDescriptorType; Interface */
+ 0x00, /* __u8 if_bInterfaceNumber; */
+ 0x00, /* __u8 if_bAlternateSetting; */
+ 0x01, /* __u8 if_bNumEndpoints; */
+ 0x09, /* __u8 if_bInterfaceClass; HUB_CLASSCODE */
+ 0x00, /* __u8 if_bInterfaceSubClass; */
+ 0x00, /* __u8 if_bInterfaceProtocol; */
+ 0x00, /* __u8 if_iInterface; */
+
+ /* one endpoint (status change endpoint) */
+ 0x07, /* __u8 ep_bLength; */
+ 0x05, /* __u8 ep_bDescriptorType; Endpoint */
+ 0x81, /* __u8 ep_bEndpointAddress; IN Endpoint 1 */
+ 0x03, /* __u8 ep_bmAttributes; Interrupt */
+ /* __le16 ep_wMaxPacketSize; 1 + (MAX_ROOT_PORTS / 8)
+ * see hub.c:hub_configure() for details. */
+ (USB_MAXCHILDREN + 1 + 7) / 8, 0x00,
+ 0x0c /* __u8 ep_bInterval; (256ms -- usb 2.0 spec) */
+ /*
+ * All 3.0 hubs should have an endpoint companion descriptor,
+ * but we're ignoring that for now. FIXME?
+ */
+};
+
/*-------------------------------------------------------------------------*/
/*
@@ -426,23 +488,39 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
case DeviceRequest | USB_REQ_GET_DESCRIPTOR:
switch (wValue & 0xff00) {
case USB_DT_DEVICE << 8:
- if (hcd->driver->flags & HCD_USB2)
+ switch (hcd->driver->flags & HCD_MASK) {
+ case HCD_USB3:
+ bufp = usb3_rh_dev_descriptor;
+ break;
+ case HCD_USB2:
bufp = usb2_rh_dev_descriptor;
- else if (hcd->driver->flags & HCD_USB11)
+ break;
+ case HCD_USB11:
bufp = usb11_rh_dev_descriptor;
- else
+ break;
+ default:
goto error;
+ }
len = 18;
if (hcd->has_tt)
patch_protocol = 1;
break;
case USB_DT_CONFIG << 8:
- if (hcd->driver->flags & HCD_USB2) {
+ switch (hcd->driver->flags & HCD_MASK) {
+ case HCD_USB3:
+ bufp = ss_rh_config_descriptor;
+ len = sizeof ss_rh_config_descriptor;
+ break;
+ case HCD_USB2:
bufp = hs_rh_config_descriptor;
len = sizeof hs_rh_config_descriptor;
- } else {
+ break;
+ case HCD_USB11:
bufp = fs_rh_config_descriptor;
len = sizeof fs_rh_config_descriptor;
+ break;
+ default:
+ goto error;
}
if (device_can_wakeup(&hcd->self.root_hub->dev))
patch_wakeup = 1;
@@ -755,23 +833,6 @@ static struct attribute_group usb_bus_attr_group = {
/*-------------------------------------------------------------------------*/
-static struct class *usb_host_class;
-
-int usb_host_init(void)
-{
- int retval = 0;
-
- usb_host_class = class_create(THIS_MODULE, "usb_host");
- if (IS_ERR(usb_host_class))
- retval = PTR_ERR(usb_host_class);
- return retval;
-}
-
-void usb_host_cleanup(void)
-{
- class_destroy(usb_host_class);
-}
-
/**
* usb_bus_init - shared initialization code
* @bus: the bus structure being initialized
@@ -818,12 +879,6 @@ static int usb_register_bus(struct usb_bus *bus)
set_bit (busnum, busmap.busmap);
bus->busnum = busnum;
- bus->dev = device_create(usb_host_class, bus->controller, MKDEV(0, 0),
- bus, "usb_host%d", busnum);
- result = PTR_ERR(bus->dev);
- if (IS_ERR(bus->dev))
- goto error_create_class_dev;
-
/* Add it to the local list of buses */
list_add (&bus->bus_list, &usb_bus_list);
mutex_unlock(&usb_bus_list_lock);
@@ -834,8 +889,6 @@ static int usb_register_bus(struct usb_bus *bus)
"number %d\n", bus->busnum);
return 0;
-error_create_class_dev:
- clear_bit(busnum, busmap.busmap);
error_find_busnum:
mutex_unlock(&usb_bus_list_lock);
return result;
@@ -865,8 +918,6 @@ static void usb_deregister_bus (struct usb_bus *bus)
usb_notify_remove_bus(bus);
clear_bit (bus->busnum, busmap.busmap);
-
- device_unregister(bus->dev);
}
/**
@@ -1199,7 +1250,8 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
/* Map the URB's buffers for DMA access.
* Lower level HCD code should use *_dma exclusively,
- * unless it uses pio or talks to another transport.
+ * unless it uses pio or talks to another transport,
+ * or uses the provided scatter gather list for bulk.
*/
if (is_root_hub(urb->dev))
return 0;
@@ -1520,6 +1572,92 @@ rescan:
}
}
+/* Check whether a new configuration or alt setting for an interface
+ * will exceed the bandwidth for the bus (or the host controller resources).
+ * Only pass in a non-NULL config or interface, not both!
+ * Passing NULL for both new_config and new_intf means the device will be
+ * de-configured by issuing a set configuration 0 command.
+ */
+int usb_hcd_check_bandwidth(struct usb_device *udev,
+ struct usb_host_config *new_config,
+ struct usb_interface *new_intf)
+{
+ int num_intfs, i, j;
+ struct usb_interface_cache *intf_cache;
+ struct usb_host_interface *alt = 0;
+ int ret = 0;
+ struct usb_hcd *hcd;
+ struct usb_host_endpoint *ep;
+
+ hcd = bus_to_hcd(udev->bus);
+ if (!hcd->driver->check_bandwidth)
+ return 0;
+
+ /* Configuration is being removed - set configuration 0 */
+ if (!new_config && !new_intf) {
+ for (i = 1; i < 16; ++i) {
+ ep = udev->ep_out[i];
+ if (ep)
+ hcd->driver->drop_endpoint(hcd, udev, ep);
+ ep = udev->ep_in[i];
+ if (ep)
+ hcd->driver->drop_endpoint(hcd, udev, ep);
+ }
+ hcd->driver->check_bandwidth(hcd, udev);
+ return 0;
+ }
+ /* Check if the HCD says there's enough bandwidth. Enable all endpoints
+ * each interface's alt setting 0 and ask the HCD to check the bandwidth
+ * of the bus. There will always be bandwidth for endpoint 0, so it's
+ * ok to exclude it.
+ */
+ if (new_config) {
+ num_intfs = new_config->desc.bNumInterfaces;
+ /* Remove endpoints (except endpoint 0, which is always on the
+ * schedule) from the old config from the schedule
+ */
+ for (i = 1; i < 16; ++i) {
+ ep = udev->ep_out[i];
+ if (ep) {
+ ret = hcd->driver->drop_endpoint(hcd, udev, ep);
+ if (ret < 0)
+ goto reset;
+ }
+ ep = udev->ep_in[i];
+ if (ep) {
+ ret = hcd->driver->drop_endpoint(hcd, udev, ep);
+ if (ret < 0)
+ goto reset;
+ }
+ }
+ for (i = 0; i < num_intfs; ++i) {
+
+ /* Dig the endpoints for alt setting 0 out of the
+ * interface cache for this interface
+ */
+ intf_cache = new_config->intf_cache[i];
+ for (j = 0; j < intf_cache->num_altsetting; j++) {
+ if (intf_cache->altsetting[j].desc.bAlternateSetting == 0)
+ alt = &intf_cache->altsetting[j];
+ }
+ if (!alt) {
+ printk(KERN_DEBUG "Did not find alt setting 0 for intf %d\n", i);
+ continue;
+ }
+ for (j = 0; j < alt->desc.bNumEndpoints; j++) {
+ ret = hcd->driver->add_endpoint(hcd, udev, &alt->endpoint[j]);
+ if (ret < 0)
+ goto reset;
+ }
+ }
+ }
+ ret = hcd->driver->check_bandwidth(hcd, udev);
+reset:
+ if (ret < 0)
+ hcd->driver->reset_bandwidth(hcd, udev);
+ return ret;
+}
+
/* Disables the endpoint: synchronizes with the hcd to make sure all
* endpoint state is gone from hardware. usb_hcd_flush_endpoint() must
* have been called previously. Use for set_configuration, set_interface,
@@ -1897,8 +2035,20 @@ int usb_add_hcd(struct usb_hcd *hcd,
retval = -ENOMEM;
goto err_allocate_root_hub;
}
- rhdev->speed = (hcd->driver->flags & HCD_USB2) ? USB_SPEED_HIGH :
- USB_SPEED_FULL;
+
+ switch (hcd->driver->flags & HCD_MASK) {
+ case HCD_USB11:
+ rhdev->speed = USB_SPEED_FULL;
+ break;
+ case HCD_USB2:
+ rhdev->speed = USB_SPEED_HIGH;
+ break;
+ case HCD_USB3:
+ rhdev->speed = USB_SPEED_SUPER;
+ break;
+ default:
+ goto err_allocate_root_hub;
+ }
hcd->self.root_hub = rhdev;
/* wakeup flag init defaults to "everything works" for root hubs,
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
index e7d4479de41..d397ecfd5b1 100644
--- a/drivers/usb/core/hcd.h
+++ b/drivers/usb/core/hcd.h
@@ -173,6 +173,8 @@ struct hc_driver {
#define HCD_LOCAL_MEM 0x0002 /* HC needs local memory */
#define HCD_USB11 0x0010 /* USB 1.1 */
#define HCD_USB2 0x0020 /* USB 2.0 */
+#define HCD_USB3 0x0040 /* USB 3.0 */
+#define HCD_MASK 0x0070
/* called to init HCD and root hub */
int (*reset) (struct usb_hcd *hcd);
@@ -182,10 +184,10 @@ struct hc_driver {
* a whole, not just the root hub; they're for PCI bus glue.
*/
/* called after suspending the hub, before entering D3 etc */
- int (*pci_suspend) (struct usb_hcd *hcd, pm_message_t message);
+ int (*pci_suspend)(struct usb_hcd *hcd);
/* called after entering D0 (etc), before resuming the hub */
- int (*pci_resume) (struct usb_hcd *hcd);
+ int (*pci_resume)(struct usb_hcd *hcd, bool hibernated);
/* cleanly make HCD stop writing memory and doing I/O */
void (*stop) (struct usb_hcd *hcd);
@@ -224,6 +226,43 @@ struct hc_driver {
void (*relinquish_port)(struct usb_hcd *, int);
/* has a port been handed over to a companion? */
int (*port_handed_over)(struct usb_hcd *, int);
+
+ /* xHCI specific functions */
+ /* Called by usb_alloc_dev to alloc HC device structures */
+ int (*alloc_dev)(struct usb_hcd *, struct usb_device *);
+ /* Called by usb_release_dev to free HC device structures */
+ void (*free_dev)(struct usb_hcd *, struct usb_device *);
+
+ /* Bandwidth computation functions */
+ /* Note that add_endpoint() can only be called once per endpoint before
+ * check_bandwidth() or reset_bandwidth() must be called.
+ * drop_endpoint() can only be called once per endpoint also.
+ * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
+ * add the endpoint to the schedule with possibly new parameters denoted by a
+ * different endpoint descriptor in usb_host_endpoint.
+ * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
+ * not allowed.
+ */
+ /* Allocate endpoint resources and add them to a new schedule */
+ int (*add_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *);
+ /* Drop an endpoint from a new schedule */
+ int (*drop_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *);
+ /* Check that a new hardware configuration, set using
+ * endpoint_enable and endpoint_disable, does not exceed bus
+ * bandwidth. This must be called before any set configuration
+ * or set interface requests are sent to the device.
+ */
+ int (*check_bandwidth)(struct usb_hcd *, struct usb_device *);
+ /* Reset the device schedule to the last known good schedule,
+ * which was set from a previous successful call to
+ * check_bandwidth(). This reverts any add_endpoint() and
+ * drop_endpoint() calls since that last successful call.
+ * Used for when a check_bandwidth() call fails due to resource
+ * or bandwidth constraints.
+ */
+ void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
+ /* Returns the hardware-chosen device address */
+ int (*address_device)(struct usb_hcd *, struct usb_device *udev);
};
extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
@@ -242,6 +281,9 @@ extern void usb_hcd_disable_endpoint(struct usb_device *udev,
extern void usb_hcd_reset_endpoint(struct usb_device *udev,
struct usb_host_endpoint *ep);
extern void usb_hcd_synchronize_unlinks(struct usb_device *udev);
+extern int usb_hcd_check_bandwidth(struct usb_device *udev,
+ struct usb_host_config *new_config,
+ struct usb_interface *new_intf);
extern int usb_hcd_get_frame_number(struct usb_device *udev);
extern struct usb_hcd *usb_create_hcd(const struct hc_driver *driver,
@@ -261,14 +303,11 @@ struct pci_device_id;
extern int usb_hcd_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id);
extern void usb_hcd_pci_remove(struct pci_dev *dev);
-
-#ifdef CONFIG_PM
-extern int usb_hcd_pci_suspend(struct pci_dev *dev, pm_message_t msg);
-extern int usb_hcd_pci_resume(struct pci_dev *dev);
-#endif /* CONFIG_PM */
-
extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
+#ifdef CONFIG_PM_SLEEP
+extern struct dev_pm_ops usb_hcd_pci_pm_ops;
+#endif
#endif /* CONFIG_PCI */
/* pci-ish (pdev null is ok) buffer alloc/mapping support */
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index be86ae3f408..2af3b4f0605 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -155,6 +155,8 @@ static inline char *portspeed(int portstatus)
return "480 Mb/s";
else if (portstatus & (1 << USB_PORT_FEAT_LOWSPEED))
return "1.5 Mb/s";
+ else if (portstatus & (1 << USB_PORT_FEAT_SUPERSPEED))
+ return "5.0 Gb/s";
else
return "12 Mb/s";
}
@@ -457,13 +459,13 @@ static void hub_tt_kevent (struct work_struct *work)
spin_lock_irqsave (&hub->tt.lock, flags);
while (--limit && !list_empty (&hub->tt.clear_list)) {
- struct list_head *temp;
+ struct list_head *next;
struct usb_tt_clear *clear;
struct usb_device *hdev = hub->hdev;
int status;
- temp = hub->tt.clear_list.next;
- clear = list_entry (temp, struct usb_tt_clear, clear_list);
+ next = hub->tt.clear_list.next;
+ clear = list_entry (next, struct usb_tt_clear, clear_list);
list_del (&clear->clear_list);
/* drop lock so HCD can concurrently report other TT errors */
@@ -951,6 +953,9 @@ static int hub_configure(struct usb_hub *hub,
ret);
hub->tt.hub = hdev;
break;
+ case 3:
+ /* USB 3.0 hubs don't have a TT */
+ break;
default:
dev_dbg(hub_dev, "Unrecognized hub protocol %d\n",
hdev->descriptor.bDeviceProtocol);
@@ -1323,6 +1328,11 @@ EXPORT_SYMBOL_GPL(usb_set_device_state);
* 0 is reserved by USB for default address; (b) Linux's USB stack
* uses always #1 for the root hub of the controller. So USB stack's
* port #1, which is wusb virtual-port #0 has address #2.
+ *
+ * Devices connected under xHCI are not as simple. The host controller
+ * supports virtualization, so the hardware assigns device addresses and
+ * the HCD must setup data structures before issuing a set address
+ * command to the hardware.
*/
static void choose_address(struct usb_device *udev)
{
@@ -1642,6 +1652,9 @@ int usb_new_device(struct usb_device *udev)
err = usb_configure_device(udev); /* detect & probe dev/intfs */
if (err < 0)
goto fail;
+ dev_dbg(&udev->dev, "udev %d, busnum %d, minor = %d\n",
+ udev->devnum, udev->bus->busnum,
+ (((udev->bus->busnum-1) * 128) + (udev->devnum-1)));
/* export the usbdev device-node for libusb */
udev->dev.devt = MKDEV(USB_DEVICE_MAJOR,
(((udev->bus->busnum-1) * 128) + (udev->devnum-1)));
@@ -2395,19 +2408,29 @@ EXPORT_SYMBOL_GPL(usb_ep0_reinit);
static int hub_set_address(struct usb_device *udev, int devnum)
{
int retval;
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
- if (devnum <= 1)
+ /*
+ * The host controller will choose the device address,
+ * instead of the core having chosen it earlier
+ */
+ if (!hcd->driver->address_device && devnum <= 1)
return -EINVAL;
if (udev->state == USB_STATE_ADDRESS)
return 0;
if (udev->state != USB_STATE_DEFAULT)
return -EINVAL;
- retval = usb_control_msg(udev, usb_sndaddr0pipe(),
- USB_REQ_SET_ADDRESS, 0, devnum, 0,
- NULL, 0, USB_CTRL_SET_TIMEOUT);
+ if (hcd->driver->address_device) {
+ retval = hcd->driver->address_device(hcd, udev);
+ } else {
+ retval = usb_control_msg(udev, usb_sndaddr0pipe(),
+ USB_REQ_SET_ADDRESS, 0, devnum, 0,
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
+ if (retval == 0)
+ update_address(udev, devnum);
+ }
if (retval == 0) {
/* Device now using proper address. */
- update_address(udev, devnum);
usb_set_device_state(udev, USB_STATE_ADDRESS);
usb_ep0_reinit(udev);
}
@@ -2430,6 +2453,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
static DEFINE_MUTEX(usb_address0_mutex);
struct usb_device *hdev = hub->hdev;
+ struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
int i, j, retval;
unsigned delay = HUB_SHORT_RESET_TIME;
enum usb_device_speed oldspeed = udev->speed;
@@ -2452,11 +2476,24 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
mutex_lock(&usb_address0_mutex);
- /* Reset the device; full speed may morph to high speed */
- retval = hub_port_reset(hub, port1, udev, delay);
- if (retval < 0) /* error or disconnect */
+ if ((hcd->driver->flags & HCD_USB3) && udev->config) {
+ /* FIXME this will need special handling by the xHCI driver. */
+ dev_dbg(&udev->dev,
+ "xHCI reset of configured device "
+ "not supported yet.\n");
+ retval = -EINVAL;
goto fail;
- /* success, speed is known */
+ } else if (!udev->config && oldspeed == USB_SPEED_SUPER) {
+ /* Don't reset USB 3.0 devices during an initial setup */
+ usb_set_device_state(udev, USB_STATE_DEFAULT);
+ } else {
+ /* Reset the device; full speed may morph to high speed */
+ /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
+ retval = hub_port_reset(hub, port1, udev, delay);
+ if (retval < 0) /* error or disconnect */
+ goto fail;
+ /* success, speed is known */
+ }
retval = -ENODEV;
if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) {
@@ -2471,6 +2508,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
* reported as 0xff in the device descriptor). WUSB1.0[4.8.1].
*/
switch (udev->speed) {
+ case USB_SPEED_SUPER:
case USB_SPEED_VARIABLE: /* fixed at 512 */
udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512);
break;
@@ -2496,16 +2534,20 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
case USB_SPEED_LOW: speed = "low"; break;
case USB_SPEED_FULL: speed = "full"; break;
case USB_SPEED_HIGH: speed = "high"; break;
+ case USB_SPEED_SUPER:
+ speed = "super";
+ break;
case USB_SPEED_VARIABLE:
speed = "variable";
type = "Wireless ";
break;
default: speed = "?"; break;
}
- dev_info (&udev->dev,
- "%s %s speed %sUSB device using %s and address %d\n",
- (udev->config) ? "reset" : "new", speed, type,
- udev->bus->controller->driver->name, devnum);
+ if (udev->speed != USB_SPEED_SUPER)
+ dev_info(&udev->dev,
+ "%s %s speed %sUSB device using %s and address %d\n",
+ (udev->config) ? "reset" : "new", speed, type,
+ udev->bus->controller->driver->name, devnum);
/* Set up TT records, if needed */
if (hdev->tt) {
@@ -2530,7 +2572,11 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
* value.
*/
for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) {
- if (USE_NEW_SCHEME(retry_counter)) {
+ /*
+ * An xHCI controller cannot send any packets to a device until
+ * a set address command successfully completes.
+ */
+ if (USE_NEW_SCHEME(retry_counter) && !(hcd->driver->flags & HCD_USB3)) {
struct usb_device_descriptor *buf;
int r = 0;
@@ -2596,7 +2642,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
* unauthorized address in the Connect Ack sequence;
* authorization will assign the final address.
*/
- if (udev->wusb == 0) {
+ if (udev->wusb == 0) {
for (j = 0; j < SET_ADDRESS_TRIES; ++j) {
retval = hub_set_address(udev, devnum);
if (retval >= 0)
@@ -2609,13 +2655,20 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
devnum, retval);
goto fail;
}
+ if (udev->speed == USB_SPEED_SUPER) {
+ devnum = udev->devnum;
+ dev_info(&udev->dev,
+ "%s SuperSpeed USB device using %s and address %d\n",
+ (udev->config) ? "reset" : "new",
+ udev->bus->controller->driver->name, devnum);
+ }
/* cope with hardware quirkiness:
* - let SET_ADDRESS settle, some device hardware wants it
* - read ep0 maxpacket even for high and low speed,
*/
msleep(10);
- if (USE_NEW_SCHEME(retry_counter))
+ if (USE_NEW_SCHEME(retry_counter) && !(hcd->driver->flags & HCD_USB3))
break;
}
@@ -2634,8 +2687,11 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
if (retval)
goto fail;
- i = udev->descriptor.bMaxPacketSize0 == 0xff? /* wusb device? */
- 512 : udev->descriptor.bMaxPacketSize0;
+ if (udev->descriptor.bMaxPacketSize0 == 0xff ||
+ udev->speed == USB_SPEED_SUPER)
+ i = 512;
+ else
+ i = udev->descriptor.bMaxPacketSize0;
if (le16_to_cpu(udev->ep0.desc.wMaxPacketSize) != i) {
if (udev->speed != USB_SPEED_FULL ||
!(i == 8 || i == 16 || i == 32 || i == 64)) {
@@ -2847,19 +2903,41 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
}
usb_set_device_state(udev, USB_STATE_POWERED);
- udev->speed = USB_SPEED_UNKNOWN;
udev->bus_mA = hub->mA_per_port;
udev->level = hdev->level + 1;
udev->wusb = hub_is_wusb(hub);
- /* set the address */
- choose_address(udev);
- if (udev->devnum <= 0) {
- status = -ENOTCONN; /* Don't retry */
- goto loop;
+ /*
+ * USB 3.0 devices are reset automatically before the connect
+ * port status change appears, and the root hub port status
+ * shows the correct speed. We also get port change
+ * notifications for USB 3.0 devices from the USB 3.0 portion of
+ * an external USB 3.0 hub, but this isn't handled correctly yet
+ * FIXME.
+ */
+
+ if (!(hcd->driver->flags & HCD_USB3))
+ udev->speed = USB_SPEED_UNKNOWN;
+ else if ((hdev->parent == NULL) &&
+ (portstatus & (1 << USB_PORT_FEAT_SUPERSPEED)))
+ udev->speed = USB_SPEED_SUPER;
+ else
+ udev->speed = USB_SPEED_UNKNOWN;
+
+ /*
+ * xHCI needs to issue an address device command later
+ * in the hub_port_init sequence for SS/HS/FS/LS devices.
+ */
+ if (!(hcd->driver->flags & HCD_USB3)) {
+ /* set the address */
+ choose_address(udev);
+ if (udev->devnum <= 0) {
+ status = -ENOTCONN; /* Don't retry */
+ goto loop;
+ }
}
- /* reset and get descriptor */
+ /* reset (non-USB 3.0 devices) and get descriptor */
status = hub_port_init(hub, udev, port1, i);
if (status < 0)
goto loop;
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
index 2a116ce53c9..889c0f32a40 100644
--- a/drivers/usb/core/hub.h
+++ b/drivers/usb/core/hub.h
@@ -47,7 +47,10 @@
#define USB_PORT_FEAT_L1 5 /* L1 suspend */
#define USB_PORT_FEAT_POWER 8
#define USB_PORT_FEAT_LOWSPEED 9
+/* This value was never in Table 11-17 */
#define USB_PORT_FEAT_HIGHSPEED 10
+/* This value is also fake */
+#define USB_PORT_FEAT_SUPERSPEED 11
#define USB_PORT_FEAT_C_CONNECTION 16
#define USB_PORT_FEAT_C_ENABLE 17
#define USB_PORT_FEAT_C_SUSPEND 18
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index dff5760a37f..ffe75e83787 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -39,6 +39,7 @@
#include <linux/parser.h>
#include <linux/notifier.h>
#include <linux/seq_file.h>
+#include <linux/smp_lock.h>
#include <asm/byteorder.h>
#include "usb.h"
#include "hcd.h"
@@ -265,9 +266,13 @@ static int remount(struct super_block *sb, int *flags, char *data)
return -EINVAL;
}
+ lock_kernel();
+
if (usbfs_mount && usbfs_mount->mnt_sb)
update_sb(usbfs_mount->mnt_sb);
+ unlock_kernel();
+
return 0;
}
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index b6262837765..2bed83caacb 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -10,6 +10,7 @@
#include <linux/mm.h>
#include <linux/timer.h>
#include <linux/ctype.h>
+#include <linux/nls.h>
#include <linux/device.h>
#include <linux/scatterlist.h>
#include <linux/usb/quirks.h>
@@ -364,6 +365,7 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
int i;
int urb_flags;
int dma;
+ int use_sg;
if (!io || !dev || !sg
|| usb_pipecontrol(pipe)
@@ -391,7 +393,19 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
if (io->entries <= 0)
return io->entries;
- io->urbs = kmalloc(io->entries * sizeof *io->urbs, mem_flags);
+ /* If we're running on an xHCI host controller, queue the whole scatter
+ * gather list with one call to urb_enqueue(). This is only for bulk,
+ * as that endpoint type does not care how the data gets broken up
+ * across frames.
+ */
+ if (usb_pipebulk(pipe) &&
+ bus_to_hcd(dev->bus)->driver->flags & HCD_USB3) {
+ io->urbs = kmalloc(sizeof *io->urbs, mem_flags);
+ use_sg = true;
+ } else {
+ io->urbs = kmalloc(io->entries * sizeof *io->urbs, mem_flags);
+ use_sg = false;
+ }
if (!io->urbs)
goto nomem;
@@ -401,62 +415,92 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
if (usb_pipein(pipe))
urb_flags |= URB_SHORT_NOT_OK;
- for_each_sg(sg, sg, io->entries, i) {
- unsigned len;
-
- io->urbs[i] = usb_alloc_urb(0, mem_flags);
- if (!io->urbs[i]) {
- io->entries = i;
+ if (use_sg) {
+ io->urbs[0] = usb_alloc_urb(0, mem_flags);
+ if (!io->urbs[0]) {
+ io->entries = 0;
goto nomem;
}
- io->urbs[i]->dev = NULL;
- io->urbs[i]->pipe = pipe;
- io->urbs[i]->interval = period;
- io->urbs[i]->transfer_flags = urb_flags;
-
- io->urbs[i]->complete = sg_complete;
- io->urbs[i]->context = io;
-
- /*
- * Some systems need to revert to PIO when DMA is temporarily
- * unavailable. For their sakes, both transfer_buffer and
- * transfer_dma are set when possible. However this can only
- * work on systems without:
- *
- * - HIGHMEM, since DMA buffers located in high memory are
- * not directly addressable by the CPU for PIO;
- *
- * - IOMMU, since dma_map_sg() is allowed to use an IOMMU to
- * make virtually discontiguous buffers be "dma-contiguous"
- * so that PIO and DMA need diferent numbers of URBs.
- *
- * So when HIGHMEM or IOMMU are in use, transfer_buffer is NULL
- * to prevent stale pointers and to help spot bugs.
- */
- if (dma) {
- io->urbs[i]->transfer_dma = sg_dma_address(sg);
- len = sg_dma_len(sg);
+ io->urbs[0]->dev = NULL;
+ io->urbs[0]->pipe = pipe;
+ io->urbs[0]->interval = period;
+ io->urbs[0]->transfer_flags = urb_flags;
+
+ io->urbs[0]->complete = sg_complete;
+ io->urbs[0]->context = io;
+ /* A length of zero means transfer the whole sg list */
+ io->urbs[0]->transfer_buffer_length = length;
+ if (length == 0) {
+ for_each_sg(sg, sg, io->entries, i) {
+ io->urbs[0]->transfer_buffer_length +=
+ sg_dma_len(sg);
+ }
+ }
+ io->urbs[0]->sg = io;
+ io->urbs[0]->num_sgs = io->entries;
+ io->entries = 1;
+ } else {
+ for_each_sg(sg, sg, io->entries, i) {
+ unsigned len;
+
+ io->urbs[i] = usb_alloc_urb(0, mem_flags);
+ if (!io->urbs[i]) {
+ io->entries = i;
+ goto nomem;
+ }
+
+ io->urbs[i]->dev = NULL;
+ io->urbs[i]->pipe = pipe;
+ io->urbs[i]->interval = period;
+ io->urbs[i]->transfer_flags = urb_flags;
+
+ io->urbs[i]->complete = sg_complete;
+ io->urbs[i]->context = io;
+
+ /*
+ * Some systems need to revert to PIO when DMA is
+ * temporarily unavailable. For their sakes, both
+ * transfer_buffer and transfer_dma are set when
+ * possible. However this can only work on systems
+ * without:
+ *
+ * - HIGHMEM, since DMA buffers located in high memory
+ * are not directly addressable by the CPU for PIO;
+ *
+ * - IOMMU, since dma_map_sg() is allowed to use an
+ * IOMMU to make virtually discontiguous buffers be
+ * "dma-contiguous" so that PIO and DMA need diferent
+ * numbers of URBs.
+ *
+ * So when HIGHMEM or IOMMU are in use, transfer_buffer
+ * is NULL to prevent stale pointers and to help spot
+ * bugs.
+ */
+ if (dma) {
+ io->urbs[i]->transfer_dma = sg_dma_address(sg);
+ len = sg_dma_len(sg);
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_GART_IOMMU)
- io->urbs[i]->transfer_buffer = NULL;
+ io->urbs[i]->transfer_buffer = NULL;
#else
- io->urbs[i]->transfer_buffer = sg_virt(sg);
+ io->urbs[i]->transfer_buffer = sg_virt(sg);
#endif
- } else {
- /* hc may use _only_ transfer_buffer */
- io->urbs[i]->transfer_buffer = sg_virt(sg);
- len = sg->length;
- }
+ } else {
+ /* hc may use _only_ transfer_buffer */
+ io->urbs[i]->transfer_buffer = sg_virt(sg);
+ len = sg->length;
+ }
- if (length) {
- len = min_t(unsigned, len, length);
- length -= len;
- if (length == 0)
- io->entries = i + 1;
+ if (length) {
+ len = min_t(unsigned, len, length);
+ length -= len;
+ if (length == 0)
+ io->entries = i + 1;
+ }
+ io->urbs[i]->transfer_buffer_length = len;
}
- io->urbs[i]->transfer_buffer_length = len;
+ io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT;
}
- io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT;
/* transaction state */
io->count = io->entries;
@@ -509,6 +553,10 @@ EXPORT_SYMBOL_GPL(usb_sg_init);
* could be transferred. That capability is less useful for low or full
* speed interrupt endpoints, which allow at most one packet per millisecond,
* of at most 8 or 64 bytes (respectively).
+ *
+ * It is not necessary to call this function to reserve bandwidth for devices
+ * under an xHCI host controller, as the bandwidth is reserved when the
+ * configuration or interface alt setting is selected.
*/
void usb_sg_wait(struct usb_sg_request *io)
{
@@ -759,7 +807,7 @@ static int usb_string_sub(struct usb_device *dev, unsigned int langid,
}
/**
- * usb_string - returns ISO 8859-1 version of a string descriptor
+ * usb_string - returns UTF-8 version of a string descriptor
* @dev: the device whose string descriptor is being retrieved
* @index: the number of the descriptor
* @buf: where to put the string
@@ -767,17 +815,10 @@ static int usb_string_sub(struct usb_device *dev, unsigned int langid,
* Context: !in_interrupt ()
*
* This converts the UTF-16LE encoded strings returned by devices, from
- * usb_get_string_descriptor(), to null-terminated ISO-8859-1 encoded ones
- * that are more usable in most kernel contexts. Note that all characters
- * in the chosen descriptor that can't be encoded using ISO-8859-1
- * are converted to the question mark ("?") character, and this function
+ * usb_get_string_descriptor(), to null-terminated UTF-8 encoded ones
+ * that are more usable in most kernel contexts. Note that this function
* chooses strings in the first language supported by the device.
*
- * The ASCII (or, redundantly, "US-ASCII") character set is the seven-bit
- * subset of ISO 8859-1. ISO-8859-1 is the eight-bit subset of Unicode,
- * and is appropriate for use many uses of English and several other
- * Western European languages. (But it doesn't include the "Euro" symbol.)
- *
* This call is synchronous, and may not be used in an interrupt context.
*
* Returns length of the string (>= 0) or usb_control_msg status (< 0).
@@ -786,7 +827,6 @@ int usb_string(struct usb_device *dev, int index, char *buf, size_t size)
{
unsigned char *tbuf;
int err;
- unsigned int u, idx;
if (dev->state == USB_STATE_SUSPENDED)
return -EHOSTUNREACH;
@@ -821,16 +861,9 @@ int usb_string(struct usb_device *dev, int index, char *buf, size_t size)
goto errout;
size--; /* leave room for trailing NULL char in output buffer */
- for (idx = 0, u = 2; u < err; u += 2) {
- if (idx >= size)
- break;
- if (tbuf[u+1]) /* high byte */
- buf[idx++] = '?'; /* non ISO-8859-1 character */
- else
- buf[idx++] = tbuf[u];
- }
- buf[idx] = 0;
- err = idx;
+ err = utf16s_to_utf8s((wchar_t *) &tbuf[2], (err - 2) / 2,
+ UTF16_LITTLE_ENDIAN, buf, size);
+ buf[err] = 0;
if (tbuf[1] != USB_DT_STRING)
dev_dbg(&dev->dev,
@@ -843,6 +876,9 @@ int usb_string(struct usb_device *dev, int index, char *buf, size_t size)
}
EXPORT_SYMBOL_GPL(usb_string);
+/* one UTF-8-encoded 16-bit character has at most three bytes */
+#define MAX_USB_STRING_SIZE (127 * 3 + 1)
+
/**
* usb_cache_string - read a string descriptor and cache it for later use
* @udev: the device whose string descriptor is being read
@@ -860,9 +896,9 @@ char *usb_cache_string(struct usb_device *udev, int index)
if (index <= 0)
return NULL;
- buf = kmalloc(256, GFP_KERNEL);
+ buf = kmalloc(MAX_USB_STRING_SIZE, GFP_KERNEL);
if (buf) {
- len = usb_string(udev, index, buf, 256);
+ len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
if (len > 0) {
smallbuf = kmalloc(++len, GFP_KERNEL);
if (!smallbuf)
@@ -1664,6 +1700,21 @@ free_interfaces:
if (ret)
goto free_interfaces;
+ /* Make sure we have bandwidth (and available HCD resources) for this
+ * configuration. Remove endpoints from the schedule if we're dropping
+ * this configuration to set configuration 0. After this point, the
+ * host controller will not allow submissions to dropped endpoints. If
+ * this call fails, the device state is unchanged.
+ */
+ if (cp)
+ ret = usb_hcd_check_bandwidth(dev, cp, NULL);
+ else
+ ret = usb_hcd_check_bandwidth(dev, NULL, NULL);
+ if (ret < 0) {
+ usb_autosuspend_device(dev);
+ goto free_interfaces;
+ }
+
/* if it's already configured, clear out old state first.
* getting rid of old interfaces means unbinding their drivers.
*/
@@ -1686,6 +1737,7 @@ free_interfaces:
dev->actconfig = cp;
if (!cp) {
usb_set_device_state(dev, USB_STATE_ADDRESS);
+ usb_hcd_check_bandwidth(dev, NULL, NULL);
usb_autosuspend_device(dev);
goto free_interfaces;
}
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index c6678919792..b5c72e45894 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -552,8 +552,8 @@ static struct attribute *dev_string_attrs[] = {
static mode_t dev_string_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
- struct usb_device *udev = to_usb_device(
- container_of(kobj, struct device, kobj));
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct usb_device *udev = to_usb_device(dev);
if (a == &dev_attr_manufacturer.attr) {
if (udev->manufacturer == NULL)
@@ -585,8 +585,8 @@ static ssize_t
read_descriptors(struct kobject *kobj, struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
- struct usb_device *udev = to_usb_device(
- container_of(kobj, struct device, kobj));
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct usb_device *udev = to_usb_device(dev);
size_t nleft = count;
size_t srclen, n;
int cfgno;
@@ -786,8 +786,8 @@ static struct attribute *intf_assoc_attrs[] = {
static mode_t intf_assoc_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
- struct usb_interface *intf = to_usb_interface(
- container_of(kobj, struct device, kobj));
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct usb_interface *intf = to_usb_interface(dev);
if (intf->intf_assoc == NULL)
return 0;
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 3376055f36e..0885d4abdc6 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -241,6 +241,12 @@ EXPORT_SYMBOL_GPL(usb_unanchor_urb);
* If the USB subsystem can't allocate sufficient bandwidth to perform
* the periodic request, submitting such a periodic request should fail.
*
+ * For devices under xHCI, the bandwidth is reserved at configuration time, or
+ * when the alt setting is selected. If there is not enough bus bandwidth, the
+ * configuration/alt setting request will fail. Therefore, submissions to
+ * periodic endpoints on devices under xHCI should never fail due to bandwidth
+ * constraints.
+ *
* Device drivers must explicitly request that repetition, by ensuring that
* some URB is always on the endpoint's queue (except possibly for short
* periods during completion callacks). When there is no longer an urb
@@ -351,6 +357,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
if (xfertype == USB_ENDPOINT_XFER_ISOC) {
int n, len;
+ /* FIXME SuperSpeed isoc endpoints have up to 16 bursts */
/* "high bandwidth" mode, 1-3 packets/uframe? */
if (dev->speed == USB_SPEED_HIGH) {
int mult = 1 + ((max >> 11) & 0x03);
@@ -426,6 +433,11 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
return -EINVAL;
/* too big? */
switch (dev->speed) {
+ case USB_SPEED_SUPER: /* units are 125us */
+ /* Handle up to 2^(16-1) microframes */
+ if (urb->interval > (1 << 15))
+ return -EINVAL;
+ max = 1 << 15;
case USB_SPEED_HIGH: /* units are microframes */
/* NOTE usb handles 2^15 */
if (urb->interval > (1024 * 8))
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 7eee400d3e3..a26f73880c3 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -34,6 +34,7 @@
#include <linux/usb.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
+#include <linux/debugfs.h>
#include <asm/io.h>
#include <linux/scatterlist.h>
@@ -139,8 +140,7 @@ static int __find_interface(struct device *dev, void *data)
struct find_interface_arg *arg = data;
struct usb_interface *intf;
- /* can't look at usb devices, only interfaces */
- if (is_usb_device(dev))
+ if (!is_usb_interface(dev))
return 0;
intf = to_usb_interface(dev);
@@ -184,11 +184,16 @@ EXPORT_SYMBOL_GPL(usb_find_interface);
static void usb_release_dev(struct device *dev)
{
struct usb_device *udev;
+ struct usb_hcd *hcd;
udev = to_usb_device(dev);
+ hcd = bus_to_hcd(udev->bus);
usb_destroy_configuration(udev);
- usb_put_hcd(bus_to_hcd(udev->bus));
+ /* Root hubs aren't real devices, so don't free HCD resources */
+ if (hcd->driver->free_dev && udev->parent)
+ hcd->driver->free_dev(hcd, udev);
+ usb_put_hcd(hcd);
kfree(udev->product);
kfree(udev->manufacturer);
kfree(udev->serial);
@@ -305,10 +310,21 @@ static struct dev_pm_ops usb_device_pm_ops = {
#endif /* CONFIG_PM */
+
+static char *usb_nodename(struct device *dev)
+{
+ struct usb_device *usb_dev;
+
+ usb_dev = to_usb_device(dev);
+ return kasprintf(GFP_KERNEL, "bus/usb/%03d/%03d",
+ usb_dev->bus->busnum, usb_dev->devnum);
+}
+
struct device_type usb_device_type = {
.name = "usb_device",
.release = usb_release_dev,
.uevent = usb_dev_uevent,
+ .nodename = usb_nodename,
.pm = &usb_device_pm_ops,
};
@@ -348,6 +364,13 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
kfree(dev);
return NULL;
}
+ /* Root hubs aren't true devices, so don't allocate HCD resources */
+ if (usb_hcd->driver->alloc_dev && parent &&
+ !usb_hcd->driver->alloc_dev(usb_hcd, dev)) {
+ usb_put_hcd(bus_to_hcd(bus));
+ kfree(dev);
+ return NULL;
+ }
device_initialize(&dev->dev);
dev->dev.bus = &usb_bus_type;
@@ -375,18 +398,24 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
*/
if (unlikely(!parent)) {
dev->devpath[0] = '0';
+ dev->route = 0;
dev->dev.parent = bus->controller;
dev_set_name(&dev->dev, "usb%d", bus->busnum);
root_hub = 1;
} else {
/* match any labeling on the hubs; it's one-based */
- if (parent->devpath[0] == '0')
+ if (parent->devpath[0] == '0') {
snprintf(dev->devpath, sizeof dev->devpath,
"%d", port1);
- else
+ /* Root ports are not counted in route string */
+ dev->route = 0;
+ } else {
snprintf(dev->devpath, sizeof dev->devpath,
"%s.%d", parent->devpath, port1);
+ dev->route = parent->route +
+ (port1 << ((parent->level - 1)*4));
+ }
dev->dev.parent = &parent->dev;
dev_set_name(&dev->dev, "%d-%s", bus->busnum, dev->devpath);
@@ -799,12 +828,12 @@ void usb_buffer_dmasync(struct urb *urb)
return;
if (controller->dma_mask) {
- dma_sync_single(controller,
+ dma_sync_single_for_cpu(controller,
urb->transfer_dma, urb->transfer_buffer_length,
usb_pipein(urb->pipe)
? DMA_FROM_DEVICE : DMA_TO_DEVICE);
if (usb_pipecontrol(urb->pipe))
- dma_sync_single(controller,
+ dma_sync_single_for_cpu(controller,
urb->setup_dma,
sizeof(struct usb_ctrlrequest),
DMA_TO_DEVICE);
@@ -922,8 +951,8 @@ void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in,
|| !controller->dma_mask)
return;
- dma_sync_sg(controller, sg, n_hw_ents,
- is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ dma_sync_sg_for_cpu(controller, sg, n_hw_ents,
+ is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
EXPORT_SYMBOL_GPL(usb_buffer_dmasync_sg);
#endif
@@ -1001,6 +1030,35 @@ static struct notifier_block usb_bus_nb = {
.notifier_call = usb_bus_notify,
};
+struct dentry *usb_debug_root;
+EXPORT_SYMBOL_GPL(usb_debug_root);
+
+struct dentry *usb_debug_devices;
+
+static int usb_debugfs_init(void)
+{
+ usb_debug_root = debugfs_create_dir("usb", NULL);
+ if (!usb_debug_root)
+ return -ENOENT;
+
+ usb_debug_devices = debugfs_create_file("devices", 0444,
+ usb_debug_root, NULL,
+ &usbfs_devices_fops);
+ if (!usb_debug_devices) {
+ debugfs_remove(usb_debug_root);
+ usb_debug_root = NULL;
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static void usb_debugfs_cleanup(void)
+{
+ debugfs_remove(usb_debug_devices);
+ debugfs_remove(usb_debug_root);
+}
+
/*
* Init
*/
@@ -1012,6 +1070,10 @@ static int __init usb_init(void)
return 0;
}
+ retval = usb_debugfs_init();
+ if (retval)
+ goto out;
+
retval = ksuspend_usb_init();
if (retval)
goto out;
@@ -1021,9 +1083,6 @@ static int __init usb_init(void)
retval = bus_register_notifier(&usb_bus_type, &usb_bus_nb);
if (retval)
goto bus_notifier_failed;
- retval = usb_host_init();
- if (retval)
- goto host_init_failed;
retval = usb_major_init();
if (retval)
goto major_init_failed;
@@ -1053,8 +1112,6 @@ usb_devio_init_failed:
driver_register_failed:
usb_major_cleanup();
major_init_failed:
- usb_host_cleanup();
-host_init_failed:
bus_unregister_notifier(&usb_bus_type, &usb_bus_nb);
bus_notifier_failed:
bus_unregister(&usb_bus_type);
@@ -1079,10 +1136,10 @@ static void __exit usb_exit(void)
usb_deregister(&usbfs_driver);
usb_devio_cleanup();
usb_hub_cleanup();
- usb_host_cleanup();
bus_unregister_notifier(&usb_bus_type, &usb_bus_nb);
bus_unregister(&usb_bus_type);
ksuspend_usb_cleanup();
+ usb_debugfs_cleanup();
}
subsys_initcall(usb_init);
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 79d8a9ea559..e2a8cfaade1 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -41,8 +41,6 @@ extern int usb_hub_init(void);
extern void usb_hub_cleanup(void);
extern int usb_major_init(void);
extern void usb_major_cleanup(void);
-extern int usb_host_init(void);
-extern void usb_host_cleanup(void);
#ifdef CONFIG_PM
@@ -106,6 +104,7 @@ extern struct workqueue_struct *ksuspend_usb_wq;
extern struct bus_type usb_bus_type;
extern struct device_type usb_device_type;
extern struct device_type usb_if_device_type;
+extern struct device_type usb_ep_device_type;
extern struct usb_device_driver usb_generic_driver;
static inline int is_usb_device(const struct device *dev)
@@ -113,6 +112,16 @@ static inline int is_usb_device(const struct device *dev)
return dev->type == &usb_device_type;
}
+static inline int is_usb_interface(const struct device *dev)
+{
+ return dev->type == &usb_if_device_type;
+}
+
+static inline int is_usb_endpoint(const struct device *dev)
+{
+ return dev->type == &usb_ep_device_type;
+}
+
/* Do the same for device drivers and interface drivers. */
static inline int is_usb_device_driver(struct device_driver *drv)
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 080bb1e4b84..5d1ddf485d1 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -156,7 +156,7 @@ config USB_ATMEL_USBA
config USB_GADGET_FSL_USB2
boolean "Freescale Highspeed USB DR Peripheral Controller"
- depends on FSL_SOC
+ depends on FSL_SOC || ARCH_MXC
select USB_GADGET_DUALSPEED
help
Some of Freescale PowerPC processors have a High Speed
@@ -253,7 +253,7 @@ config USB_PXA25X_SMALL
config USB_GADGET_PXA27X
boolean "PXA 27x"
- depends on ARCH_PXA && PXA27x
+ depends on ARCH_PXA && (PXA27x || PXA3xx)
select USB_OTG_UTILS
help
Intel's PXA 27x series XScale ARM v5TE processors include
@@ -272,6 +272,20 @@ config USB_PXA27X
default USB_GADGET
select USB_GADGET_SELECTED
+config USB_GADGET_S3C_HSOTG
+ boolean "S3C HS/OtG USB Device controller"
+ depends on S3C_DEV_USB_HSOTG
+ select USB_GADGET_S3C_HSOTG_PIO
+ help
+ The Samsung S3C64XX USB2.0 high-speed gadget controller
+ integrated into the S3C64XX series SoC.
+
+config USB_S3C_HSOTG
+ tristate
+ depends on USB_GADGET_S3C_HSOTG
+ default USB_GADGET
+ select USB_GADGET_SELECTED
+
config USB_GADGET_S3C2410
boolean "S3C2410 USB Device Controller"
depends on ARCH_S3C2410
@@ -460,6 +474,27 @@ config USB_GOKU
default USB_GADGET
select USB_GADGET_SELECTED
+config USB_GADGET_LANGWELL
+ boolean "Intel Langwell USB Device Controller"
+ depends on PCI
+ select USB_GADGET_DUALSPEED
+ help
+ Intel Langwell USB Device Controller is a High-Speed USB
+ On-The-Go device controller.
+
+ The number of programmable endpoints is different through
+ controller revision.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "langwell_udc" and force all
+ gadget drivers to also be dynamically linked.
+
+config USB_LANGWELL
+ tristate
+ depends on USB_GADGET_LANGWELL
+ default USB_GADGET
+ select USB_GADGET_SELECTED
+
#
# LAST -- dummy/emulated controller
@@ -566,6 +601,20 @@ config USB_ZERO_HNPTEST
the "B-Peripheral" role, that device will use HNP to let this
one serve as the USB host instead (in the "B-Host" role).
+config USB_AUDIO
+ tristate "Audio Gadget (EXPERIMENTAL)"
+ depends on SND
+ help
+ Gadget Audio is compatible with USB Audio Class specification 1.0.
+ It will include at least one AudioControl interface, zero or more
+ AudioStream interface and zero or more MIDIStream interface.
+
+ Gadget Audio will use on-board ALSA (CONFIG_SND) audio card to
+ playback or capture audio stream.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "g_audio".
+
config USB_ETH
tristate "Ethernet Gadget (with CDC Ethernet support)"
depends on NET
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 39a51d746cb..e6017e6bf6d 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -18,14 +18,21 @@ obj-$(CONFIG_USB_S3C2410) += s3c2410_udc.o
obj-$(CONFIG_USB_AT91) += at91_udc.o
obj-$(CONFIG_USB_ATMEL_USBA) += atmel_usba_udc.o
obj-$(CONFIG_USB_FSL_USB2) += fsl_usb2_udc.o
+fsl_usb2_udc-objs := fsl_udc_core.o
+ifeq ($(CONFIG_ARCH_MXC),y)
+fsl_usb2_udc-objs += fsl_mx3_udc.o
+endif
obj-$(CONFIG_USB_M66592) += m66592-udc.o
obj-$(CONFIG_USB_FSL_QE) += fsl_qe_udc.o
obj-$(CONFIG_USB_CI13XXX) += ci13xxx_udc.o
+obj-$(CONFIG_USB_S3C_HSOTG) += s3c-hsotg.o
+obj-$(CONFIG_USB_LANGWELL) += langwell_udc.o
#
# USB gadget drivers
#
g_zero-objs := zero.o
+g_audio-objs := audio.o
g_ether-objs := ether.o
g_serial-objs := serial.o
g_midi-objs := gmidi.o
@@ -35,6 +42,7 @@ g_printer-objs := printer.o
g_cdc-objs := cdc2.o
obj-$(CONFIG_USB_ZERO) += g_zero.o
+obj-$(CONFIG_USB_AUDIO) += g_audio.o
obj-$(CONFIG_USB_ETH) += g_ether.o
obj-$(CONFIG_USB_GADGETFS) += gadgetfs.o
obj-$(CONFIG_USB_FILE_STORAGE) += g_file_storage.o
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 0b2bb8f0706..72bae8f39d8 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -485,7 +485,7 @@ static int at91_ep_enable(struct usb_ep *_ep,
return -ESHUTDOWN;
}
- tmp = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+ tmp = usb_endpoint_type(desc);
switch (tmp) {
case USB_ENDPOINT_XFER_CONTROL:
DBG("only one control endpoint\n");
@@ -517,7 +517,7 @@ ok:
local_irq_save(flags);
/* initialize endpoint to match this descriptor */
- ep->is_in = (desc->bEndpointAddress & USB_DIR_IN) != 0;
+ ep->is_in = usb_endpoint_dir_in(desc);
ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC);
ep->stopped = 0;
if (ep->is_in)
@@ -1574,7 +1574,7 @@ int usb_gadget_register_driver (struct usb_gadget_driver *driver)
udc->driver = driver;
udc->gadget.dev.driver = &driver->driver;
- udc->gadget.dev.driver_data = &driver->driver;
+ dev_set_drvdata(&udc->gadget.dev, &driver->driver);
udc->enabled = 1;
udc->selfpowered = 1;
@@ -1583,7 +1583,7 @@ int usb_gadget_register_driver (struct usb_gadget_driver *driver)
DBG("driver->bind() returned %d\n", retval);
udc->driver = NULL;
udc->gadget.dev.driver = NULL;
- udc->gadget.dev.driver_data = NULL;
+ dev_set_drvdata(&udc->gadget.dev, NULL);
udc->enabled = 0;
udc->selfpowered = 0;
return retval;
@@ -1613,7 +1613,7 @@ int usb_gadget_unregister_driver (struct usb_gadget_driver *driver)
driver->unbind(&udc->gadget);
udc->gadget.dev.driver = NULL;
- udc->gadget.dev.driver_data = NULL;
+ dev_set_drvdata(&udc->gadget.dev, NULL);
udc->driver = NULL;
DBG("unbound from %s\n", driver->driver.name);
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index 563d5727544..4e970cf0e29 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -326,13 +326,7 @@ static int vbus_is_present(struct usba_udc *udc)
return 1;
}
-#if defined(CONFIG_AVR32)
-
-static void toggle_bias(int is_on)
-{
-}
-
-#elif defined(CONFIG_ARCH_AT91)
+#if defined(CONFIG_ARCH_AT91SAM9RL)
#include <mach/at91_pmc.h>
@@ -346,7 +340,13 @@ static void toggle_bias(int is_on)
at91_sys_write(AT91_CKGR_UCKR, uckr & ~(AT91_PMC_BIASEN));
}
-#endif /* CONFIG_ARCH_AT91 */
+#else
+
+static void toggle_bias(int is_on)
+{
+}
+
+#endif /* CONFIG_ARCH_AT91SAM9RL */
static void next_fifo_transaction(struct usba_ep *ep, struct usba_request *req)
{
@@ -550,12 +550,12 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
DBG(DBG_HW, "%s: EPT_SIZE = %lu (maxpacket = %lu)\n",
ep->ep.name, ept_cfg, maxpacket);
- if ((desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN) {
+ if (usb_endpoint_dir_in(desc)) {
ep->is_in = 1;
ept_cfg |= USBA_EPT_DIR_IN;
}
- switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+ switch (usb_endpoint_type(desc)) {
case USB_ENDPOINT_XFER_CONTROL:
ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL);
ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE);
@@ -794,7 +794,8 @@ usba_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
if (ep->desc) {
list_add_tail(&req->queue, &ep->queue);
- if (ep->is_in || (ep_is_control(ep)
+ if ((!ep_is_control(ep) && ep->is_in) ||
+ (ep_is_control(ep)
&& (ep->state == DATA_STAGE_IN
|| ep->state == STATUS_STAGE_IN)))
usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
@@ -1940,7 +1941,7 @@ static int __init usba_udc_probe(struct platform_device *pdev)
usba_writel(udc, CTRL, USBA_DISABLE_MASK);
clk_disable(pclk);
- usba_ep = kmalloc(sizeof(struct usba_ep) * pdata->num_ep,
+ usba_ep = kzalloc(sizeof(struct usba_ep) * pdata->num_ep,
GFP_KERNEL);
if (!usba_ep)
goto err_alloc_ep;
diff --git a/drivers/usb/gadget/audio.c b/drivers/usb/gadget/audio.c
new file mode 100644
index 00000000000..94de7e86461
--- /dev/null
+++ b/drivers/usb/gadget/audio.c
@@ -0,0 +1,302 @@
+/*
+ * audio.c -- Audio gadget driver
+ *
+ * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
+ * Copyright (C) 2008 Analog Devices, Inc
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+
+#include "u_audio.h"
+
+#define DRIVER_DESC "Linux USB Audio Gadget"
+#define DRIVER_VERSION "Dec 18, 2008"
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module. So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+
+#include "u_audio.c"
+#include "f_audio.c"
+
+/*-------------------------------------------------------------------------*/
+
+/* DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!!
+ * Instead: allocate your own, using normal USB-IF procedures.
+ */
+
+/* Thanks to NetChip Technologies for donating this product ID. */
+#define AUDIO_VENDOR_NUM 0x0525 /* NetChip */
+#define AUDIO_PRODUCT_NUM 0xa4a1 /* Linux-USB Audio Gadget */
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_device_descriptor device_desc = {
+ .bLength = sizeof device_desc,
+ .bDescriptorType = USB_DT_DEVICE,
+
+ .bcdUSB = __constant_cpu_to_le16(0x200),
+
+ .bDeviceClass = USB_CLASS_PER_INTERFACE,
+ .bDeviceSubClass = 0,
+ .bDeviceProtocol = 0,
+ /* .bMaxPacketSize0 = f(hardware) */
+
+ /* Vendor and product id defaults change according to what configs
+ * we support. (As does bNumConfigurations.) These values can
+ * also be overridden by module parameters.
+ */
+ .idVendor = __constant_cpu_to_le16(AUDIO_VENDOR_NUM),
+ .idProduct = __constant_cpu_to_le16(AUDIO_PRODUCT_NUM),
+ /* .bcdDevice = f(hardware) */
+ /* .iManufacturer = DYNAMIC */
+ /* .iProduct = DYNAMIC */
+ /* NO SERIAL NUMBER */
+ .bNumConfigurations = 1,
+};
+
+static struct usb_otg_descriptor otg_descriptor = {
+ .bLength = sizeof otg_descriptor,
+ .bDescriptorType = USB_DT_OTG,
+
+ /* REVISIT SRP-only hardware is possible, although
+ * it would not be called "OTG" ...
+ */
+ .bmAttributes = USB_OTG_SRP | USB_OTG_HNP,
+};
+
+static const struct usb_descriptor_header *otg_desc[] = {
+ (struct usb_descriptor_header *) &otg_descriptor,
+ NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/**
+ * Handle USB audio endpoint set/get command in setup class request
+ */
+
+static int audio_set_endpoint_req(struct usb_configuration *c,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ int value = -EOPNOTSUPP;
+ u16 ep = le16_to_cpu(ctrl->wIndex);
+ u16 len = le16_to_cpu(ctrl->wLength);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+
+ DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+ ctrl->bRequest, w_value, len, ep);
+
+ switch (ctrl->bRequest) {
+ case SET_CUR:
+ value = 0;
+ break;
+
+ case SET_MIN:
+ break;
+
+ case SET_MAX:
+ break;
+
+ case SET_RES:
+ break;
+
+ case SET_MEM:
+ break;
+
+ default:
+ break;
+ }
+
+ return value;
+}
+
+static int audio_get_endpoint_req(struct usb_configuration *c,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ int value = -EOPNOTSUPP;
+ u8 ep = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+ u16 len = le16_to_cpu(ctrl->wLength);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+
+ DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+ ctrl->bRequest, w_value, len, ep);
+
+ switch (ctrl->bRequest) {
+ case GET_CUR:
+ case GET_MIN:
+ case GET_MAX:
+ case GET_RES:
+ value = 3;
+ break;
+ case GET_MEM:
+ break;
+ default:
+ break;
+ }
+
+ return value;
+}
+
+static int
+audio_setup(struct usb_configuration *c, const struct usb_ctrlrequest *ctrl)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct usb_request *req = cdev->req;
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+ /* composite driver infrastructure handles everything except
+ * Audio class messages; interface activation uses set_alt().
+ */
+ switch (ctrl->bRequestType) {
+ case USB_AUDIO_SET_ENDPOINT:
+ value = audio_set_endpoint_req(c, ctrl);
+ break;
+
+ case USB_AUDIO_GET_ENDPOINT:
+ value = audio_get_endpoint_req(c, ctrl);
+ break;
+
+ default:
+ ERROR(cdev, "Invalid control req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ DBG(cdev, "Audio req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->zero = 0;
+ req->length = value;
+ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (value < 0)
+ ERROR(cdev, "Audio response on err %d\n", value);
+ }
+
+ /* device either stalls (value < 0) or reports success */
+ return value;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int __init audio_do_config(struct usb_configuration *c)
+{
+ /* FIXME alloc iConfiguration string, set it in c->strings */
+
+ if (gadget_is_otg(c->cdev->gadget)) {
+ c->descriptors = otg_desc;
+ c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+ }
+
+ audio_bind_config(c);
+
+ return 0;
+}
+
+static struct usb_configuration audio_config_driver = {
+ .label = DRIVER_DESC,
+ .bind = audio_do_config,
+ .setup = audio_setup,
+ .bConfigurationValue = 1,
+ /* .iConfiguration = DYNAMIC */
+ .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int __init audio_bind(struct usb_composite_dev *cdev)
+{
+ int gcnum;
+ int status;
+
+ gcnum = usb_gadget_controller_number(cdev->gadget);
+ if (gcnum >= 0)
+ device_desc.bcdDevice = cpu_to_le16(0x0300 | gcnum);
+ else {
+ ERROR(cdev, "controller '%s' not recognized; trying %s\n",
+ cdev->gadget->name,
+ audio_config_driver.label);
+ device_desc.bcdDevice =
+ __constant_cpu_to_le16(0x0300 | 0x0099);
+ }
+
+ /* device descriptor strings: manufacturer, product */
+ snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
+ init_utsname()->sysname, init_utsname()->release,
+ cdev->gadget->name);
+ status = usb_string_id(cdev);
+ if (status < 0)
+ goto fail;
+ strings_dev[STRING_MANUFACTURER_IDX].id = status;
+ device_desc.iManufacturer = status;
+
+ status = usb_string_id(cdev);
+ if (status < 0)
+ goto fail;
+ strings_dev[STRING_PRODUCT_IDX].id = status;
+ device_desc.iProduct = status;
+
+ status = usb_add_config(cdev, &audio_config_driver);
+ if (status < 0)
+ goto fail;
+
+ INFO(cdev, "%s, version: %s\n", DRIVER_DESC, DRIVER_VERSION);
+ return 0;
+
+fail:
+ return status;
+}
+
+static int __exit audio_unbind(struct usb_composite_dev *cdev)
+{
+ return 0;
+}
+
+static struct usb_composite_driver audio_driver = {
+ .name = "g_audio",
+ .dev = &device_desc,
+ .strings = audio_strings,
+ .bind = audio_bind,
+ .unbind = __exit_p(audio_unbind),
+};
+
+static int __init init(void)
+{
+ return usb_composite_register(&audio_driver);
+}
+module_init(init);
+
+static void __exit cleanup(void)
+{
+ usb_composite_unregister(&audio_driver);
+}
+module_exit(cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Bryan Wu <cooloney@kernel.org>");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 38e531ecae4..c7cb87a6fee 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -1977,9 +1977,9 @@ static int ep_enable(struct usb_ep *ep,
if (!list_empty(&mEp->qh[mEp->dir].queue))
warn("enabling a non-empty endpoint!");
- mEp->dir = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? TX : RX;
- mEp->num = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
- mEp->type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+ mEp->dir = usb_endpoint_dir_in(desc) ? TX : RX;
+ mEp->num = usb_endpoint_num(desc);
+ mEp->type = usb_endpoint_type(desc);
mEp->ep.maxpacket = __constant_le16_to_cpu(desc->wMaxPacketSize);
diff --git a/drivers/usb/gadget/f_audio.c b/drivers/usb/gadget/f_audio.c
new file mode 100644
index 00000000000..66527ba2d2e
--- /dev/null
+++ b/drivers/usb/gadget/f_audio.c
@@ -0,0 +1,707 @@
+/*
+ * f_audio.c -- USB Audio class function driver
+ *
+ * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
+ * Copyright (C) 2008 Analog Devices, Inc
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <asm/atomic.h>
+
+#include "u_audio.h"
+
+#define OUT_EP_MAX_PACKET_SIZE 200
+static int req_buf_size = OUT_EP_MAX_PACKET_SIZE;
+module_param(req_buf_size, int, S_IRUGO);
+MODULE_PARM_DESC(req_buf_size, "ISO OUT endpoint request buffer size");
+
+static int req_count = 256;
+module_param(req_count, int, S_IRUGO);
+MODULE_PARM_DESC(req_count, "ISO OUT endpoint request count");
+
+static int audio_buf_size = 48000;
+module_param(audio_buf_size, int, S_IRUGO);
+MODULE_PARM_DESC(audio_buf_size, "Audio buffer size");
+
+/*
+ * DESCRIPTORS ... most are static, but strings and full
+ * configuration descriptors are built on demand.
+ */
+
+/*
+ * We have two interfaces- AudioControl and AudioStreaming
+ * TODO: only supcard playback currently
+ */
+#define F_AUDIO_AC_INTERFACE 0
+#define F_AUDIO_AS_INTERFACE 1
+#define F_AUDIO_NUM_INTERFACES 2
+
+/* B.3.1 Standard AC Interface Descriptor */
+static struct usb_interface_descriptor ac_interface_desc __initdata = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+};
+
+DECLARE_USB_AC_HEADER_DESCRIPTOR(2);
+
+#define USB_DT_AC_HEADER_LENGH USB_DT_AC_HEADER_SIZE(F_AUDIO_NUM_INTERFACES)
+/* B.3.2 Class-Specific AC Interface Descriptor */
+static struct usb_ac_header_descriptor_2 ac_header_desc = {
+ .bLength = USB_DT_AC_HEADER_LENGH,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = HEADER,
+ .bcdADC = __constant_cpu_to_le16(0x0100),
+ .wTotalLength = __constant_cpu_to_le16(USB_DT_AC_HEADER_LENGH),
+ .bInCollection = F_AUDIO_NUM_INTERFACES,
+ .baInterfaceNr = {
+ [0] = F_AUDIO_AC_INTERFACE,
+ [1] = F_AUDIO_AS_INTERFACE,
+ }
+};
+
+#define INPUT_TERMINAL_ID 1
+static struct usb_input_terminal_descriptor input_terminal_desc = {
+ .bLength = USB_DT_AC_INPUT_TERMINAL_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = INPUT_TERMINAL,
+ .bTerminalID = INPUT_TERMINAL_ID,
+ .wTerminalType = USB_AC_TERMINAL_STREAMING,
+ .bAssocTerminal = 0,
+ .wChannelConfig = 0x3,
+};
+
+DECLARE_USB_AC_FEATURE_UNIT_DESCRIPTOR(0);
+
+#define FEATURE_UNIT_ID 2
+static struct usb_ac_feature_unit_descriptor_0 feature_unit_desc = {
+ .bLength = USB_DT_AC_FEATURE_UNIT_SIZE(0),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = FEATURE_UNIT,
+ .bUnitID = FEATURE_UNIT_ID,
+ .bSourceID = INPUT_TERMINAL_ID,
+ .bControlSize = 2,
+ .bmaControls[0] = (FU_MUTE | FU_VOLUME),
+};
+
+static struct usb_audio_control mute_control = {
+ .list = LIST_HEAD_INIT(mute_control.list),
+ .name = "Mute Control",
+ .type = MUTE_CONTROL,
+ /* Todo: add real Mute control code */
+ .set = generic_set_cmd,
+ .get = generic_get_cmd,
+};
+
+static struct usb_audio_control volume_control = {
+ .list = LIST_HEAD_INIT(volume_control.list),
+ .name = "Volume Control",
+ .type = VOLUME_CONTROL,
+ /* Todo: add real Volume control code */
+ .set = generic_set_cmd,
+ .get = generic_get_cmd,
+};
+
+static struct usb_audio_control_selector feature_unit = {
+ .list = LIST_HEAD_INIT(feature_unit.list),
+ .id = FEATURE_UNIT_ID,
+ .name = "Mute & Volume Control",
+ .type = FEATURE_UNIT,
+ .desc = (struct usb_descriptor_header *)&feature_unit_desc,
+};
+
+#define OUTPUT_TERMINAL_ID 3
+static struct usb_output_terminal_descriptor output_terminal_desc = {
+ .bLength = USB_DT_AC_OUTPUT_TERMINAL_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = OUTPUT_TERMINAL,
+ .bTerminalID = OUTPUT_TERMINAL_ID,
+ .wTerminalType = USB_AC_OUTPUT_TERMINAL_SPEAKER,
+ .bAssocTerminal = FEATURE_UNIT_ID,
+ .bSourceID = FEATURE_UNIT_ID,
+};
+
+/* B.4.1 Standard AS Interface Descriptor */
+static struct usb_interface_descriptor as_interface_alt_0_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+static struct usb_interface_descriptor as_interface_alt_1_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bAlternateSetting = 1,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+/* B.4.2 Class-Specific AS Interface Descriptor */
+static struct usb_as_header_descriptor as_header_desc = {
+ .bLength = USB_DT_AS_HEADER_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = AS_GENERAL,
+ .bTerminalLink = INPUT_TERMINAL_ID,
+ .bDelay = 1,
+ .wFormatTag = USB_AS_AUDIO_FORMAT_TYPE_I_PCM,
+};
+
+DECLARE_USB_AS_FORMAT_TYPE_I_DISCRETE_DESC(1);
+
+static struct usb_as_formate_type_i_discrete_descriptor_1 as_type_i_desc = {
+ .bLength = USB_AS_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = FORMAT_TYPE,
+ .bFormatType = USB_AS_FORMAT_TYPE_I,
+ .bSubframeSize = 2,
+ .bBitResolution = 16,
+ .bSamFreqType = 1,
+};
+
+/* Standard ISO OUT Endpoint Descriptor */
+static struct usb_endpoint_descriptor as_out_ep_desc __initdata = {
+ .bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_AS_ENDPOINT_ADAPTIVE
+ | USB_ENDPOINT_XFER_ISOC,
+ .wMaxPacketSize = __constant_cpu_to_le16(OUT_EP_MAX_PACKET_SIZE),
+ .bInterval = 4,
+};
+
+/* Class-specific AS ISO OUT Endpoint Descriptor */
+static struct usb_as_iso_endpoint_descriptor as_iso_out_desc __initdata = {
+ .bLength = USB_AS_ISO_ENDPOINT_DESC_SIZE,
+ .bDescriptorType = USB_DT_CS_ENDPOINT,
+ .bDescriptorSubtype = EP_GENERAL,
+ .bmAttributes = 1,
+ .bLockDelayUnits = 1,
+ .wLockDelay = __constant_cpu_to_le16(1),
+};
+
+static struct usb_descriptor_header *f_audio_desc[] __initdata = {
+ (struct usb_descriptor_header *)&ac_interface_desc,
+ (struct usb_descriptor_header *)&ac_header_desc,
+
+ (struct usb_descriptor_header *)&input_terminal_desc,
+ (struct usb_descriptor_header *)&output_terminal_desc,
+ (struct usb_descriptor_header *)&feature_unit_desc,
+
+ (struct usb_descriptor_header *)&as_interface_alt_0_desc,
+ (struct usb_descriptor_header *)&as_interface_alt_1_desc,
+ (struct usb_descriptor_header *)&as_header_desc,
+
+ (struct usb_descriptor_header *)&as_type_i_desc,
+
+ (struct usb_descriptor_header *)&as_out_ep_desc,
+ (struct usb_descriptor_header *)&as_iso_out_desc,
+ NULL,
+};
+
+/* string IDs are assigned dynamically */
+
+#define STRING_MANUFACTURER_IDX 0
+#define STRING_PRODUCT_IDX 1
+
+static char manufacturer[50];
+
+static struct usb_string strings_dev[] = {
+ [STRING_MANUFACTURER_IDX].s = manufacturer,
+ [STRING_PRODUCT_IDX].s = DRIVER_DESC,
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+ .language = 0x0409, /* en-us */
+ .strings = strings_dev,
+};
+
+static struct usb_gadget_strings *audio_strings[] = {
+ &stringtab_dev,
+ NULL,
+};
+
+/*
+ * This function is an ALSA sound card following USB Audio Class Spec 1.0.
+ */
+
+/*-------------------------------------------------------------------------*/
+struct f_audio_buf {
+ u8 *buf;
+ int actual;
+ struct list_head list;
+};
+
+static struct f_audio_buf *f_audio_buffer_alloc(int buf_size)
+{
+ struct f_audio_buf *copy_buf;
+
+ copy_buf = kzalloc(sizeof *copy_buf, GFP_ATOMIC);
+ if (!copy_buf)
+ return (struct f_audio_buf *)-ENOMEM;
+
+ copy_buf->buf = kzalloc(buf_size, GFP_ATOMIC);
+ if (!copy_buf->buf) {
+ kfree(copy_buf);
+ return (struct f_audio_buf *)-ENOMEM;
+ }
+
+ return copy_buf;
+}
+
+static void f_audio_buffer_free(struct f_audio_buf *audio_buf)
+{
+ kfree(audio_buf->buf);
+ kfree(audio_buf);
+}
+/*-------------------------------------------------------------------------*/
+
+struct f_audio {
+ struct gaudio card;
+
+ /* endpoints handle full and/or high speeds */
+ struct usb_ep *out_ep;
+ struct usb_endpoint_descriptor *out_desc;
+
+ spinlock_t lock;
+ struct f_audio_buf *copy_buf;
+ struct work_struct playback_work;
+ struct list_head play_queue;
+
+ /* Control Set command */
+ struct list_head cs;
+ u8 set_cmd;
+ struct usb_audio_control *set_con;
+};
+
+static inline struct f_audio *func_to_audio(struct usb_function *f)
+{
+ return container_of(f, struct f_audio, card.func);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void f_audio_playback_work(struct work_struct *data)
+{
+ struct f_audio *audio = container_of(data, struct f_audio,
+ playback_work);
+ struct f_audio_buf *play_buf;
+
+ spin_lock_irq(&audio->lock);
+ if (list_empty(&audio->play_queue)) {
+ spin_unlock_irq(&audio->lock);
+ return;
+ }
+ play_buf = list_first_entry(&audio->play_queue,
+ struct f_audio_buf, list);
+ list_del(&play_buf->list);
+ spin_unlock_irq(&audio->lock);
+
+ u_audio_playback(&audio->card, play_buf->buf, play_buf->actual);
+ f_audio_buffer_free(play_buf);
+
+ return;
+}
+
+static int f_audio_out_ep_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_audio *audio = req->context;
+ struct usb_composite_dev *cdev = audio->card.func.config->cdev;
+ struct f_audio_buf *copy_buf = audio->copy_buf;
+ int err;
+
+ if (!copy_buf)
+ return -EINVAL;
+
+ /* Copy buffer is full, add it to the play_queue */
+ if (audio_buf_size - copy_buf->actual < req->actual) {
+ list_add_tail(&copy_buf->list, &audio->play_queue);
+ schedule_work(&audio->playback_work);
+ copy_buf = f_audio_buffer_alloc(audio_buf_size);
+ if (copy_buf < 0)
+ return -ENOMEM;
+ }
+
+ memcpy(copy_buf->buf + copy_buf->actual, req->buf, req->actual);
+ copy_buf->actual += req->actual;
+ audio->copy_buf = copy_buf;
+
+ err = usb_ep_queue(ep, req, GFP_ATOMIC);
+ if (err)
+ ERROR(cdev, "%s queue req: %d\n", ep->name, err);
+
+ return 0;
+
+}
+
+static void f_audio_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_audio *audio = req->context;
+ int status = req->status;
+ u32 data = 0;
+ struct usb_ep *out_ep = audio->out_ep;
+
+ switch (status) {
+
+ case 0: /* normal completion? */
+ if (ep == out_ep)
+ f_audio_out_ep_complete(ep, req);
+ else if (audio->set_con) {
+ memcpy(&data, req->buf, req->length);
+ audio->set_con->set(audio->set_con, audio->set_cmd,
+ le16_to_cpu(data));
+ audio->set_con = NULL;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static int audio_set_intf_req(struct usb_function *f,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct f_audio *audio = func_to_audio(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ u8 id = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+ u16 len = le16_to_cpu(ctrl->wLength);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u8 con_sel = (w_value >> 8) & 0xFF;
+ u8 cmd = (ctrl->bRequest & 0x0F);
+ struct usb_audio_control_selector *cs;
+ struct usb_audio_control *con;
+
+ DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, entity %d\n",
+ ctrl->bRequest, w_value, len, id);
+
+ list_for_each_entry(cs, &audio->cs, list) {
+ if (cs->id == id) {
+ list_for_each_entry(con, &cs->control, list) {
+ if (con->type == con_sel) {
+ audio->set_con = con;
+ break;
+ }
+ }
+ break;
+ }
+ }
+
+ audio->set_cmd = cmd;
+ req->context = audio;
+ req->complete = f_audio_complete;
+
+ return len;
+}
+
+static int audio_get_intf_req(struct usb_function *f,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct f_audio *audio = func_to_audio(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int value = -EOPNOTSUPP;
+ u8 id = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+ u16 len = le16_to_cpu(ctrl->wLength);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u8 con_sel = (w_value >> 8) & 0xFF;
+ u8 cmd = (ctrl->bRequest & 0x0F);
+ struct usb_audio_control_selector *cs;
+ struct usb_audio_control *con;
+
+ DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, entity %d\n",
+ ctrl->bRequest, w_value, len, id);
+
+ list_for_each_entry(cs, &audio->cs, list) {
+ if (cs->id == id) {
+ list_for_each_entry(con, &cs->control, list) {
+ if (con->type == con_sel && con->get) {
+ value = con->get(con, cmd);
+ break;
+ }
+ }
+ break;
+ }
+ }
+
+ req->context = audio;
+ req->complete = f_audio_complete;
+ memcpy(req->buf, &value, len);
+
+ return len;
+}
+
+static int
+f_audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+ /* composite driver infrastructure handles everything except
+ * Audio class messages; interface activation uses set_alt().
+ */
+ switch (ctrl->bRequestType) {
+ case USB_AUDIO_SET_INTF:
+ value = audio_set_intf_req(f, ctrl);
+ break;
+
+ case USB_AUDIO_GET_INTF:
+ value = audio_get_intf_req(f, ctrl);
+ break;
+
+ default:
+ ERROR(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ DBG(cdev, "audio req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->zero = 0;
+ req->length = value;
+ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (value < 0)
+ ERROR(cdev, "audio response on err %d\n", value);
+ }
+
+ /* device either stalls (value < 0) or reports success */
+ return value;
+}
+
+static int f_audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct f_audio *audio = func_to_audio(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_ep *out_ep = audio->out_ep;
+ struct usb_request *req;
+ int i = 0, err = 0;
+
+ DBG(cdev, "intf %d, alt %d\n", intf, alt);
+
+ if (intf == 1) {
+ if (alt == 1) {
+ usb_ep_enable(out_ep, audio->out_desc);
+ out_ep->driver_data = audio;
+ audio->copy_buf = f_audio_buffer_alloc(audio_buf_size);
+
+ /*
+ * allocate a bunch of read buffers
+ * and queue them all at once.
+ */
+ for (i = 0; i < req_count && err == 0; i++) {
+ req = usb_ep_alloc_request(out_ep, GFP_ATOMIC);
+ if (req) {
+ req->buf = kzalloc(req_buf_size,
+ GFP_ATOMIC);
+ if (req->buf) {
+ req->length = req_buf_size;
+ req->context = audio;
+ req->complete =
+ f_audio_complete;
+ err = usb_ep_queue(out_ep,
+ req, GFP_ATOMIC);
+ if (err)
+ ERROR(cdev,
+ "%s queue req: %d\n",
+ out_ep->name, err);
+ } else
+ err = -ENOMEM;
+ } else
+ err = -ENOMEM;
+ }
+
+ } else {
+ struct f_audio_buf *copy_buf = audio->copy_buf;
+ if (copy_buf) {
+ list_add_tail(&copy_buf->list,
+ &audio->play_queue);
+ schedule_work(&audio->playback_work);
+ }
+ }
+ }
+
+ return err;
+}
+
+static void f_audio_disable(struct usb_function *f)
+{
+ return;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void f_audio_build_desc(struct f_audio *audio)
+{
+ struct gaudio *card = &audio->card;
+ u8 *sam_freq;
+ int rate;
+
+ /* Set channel numbers */
+ input_terminal_desc.bNrChannels = u_audio_get_playback_channels(card);
+ as_type_i_desc.bNrChannels = u_audio_get_playback_channels(card);
+
+ /* Set sample rates */
+ rate = u_audio_get_playback_rate(card);
+ sam_freq = as_type_i_desc.tSamFreq[0];
+ memcpy(sam_freq, &rate, 3);
+
+ /* Todo: Set Sample bits and other parameters */
+
+ return;
+}
+
+/* audio function driver setup/binding */
+static int __init
+f_audio_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct f_audio *audio = func_to_audio(f);
+ int status;
+ struct usb_ep *ep;
+
+ f_audio_build_desc(audio);
+
+ /* allocate instance-specific interface IDs, and patch descriptors */
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ ac_interface_desc.bInterfaceNumber = status;
+
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ as_interface_alt_0_desc.bInterfaceNumber = status;
+ as_interface_alt_1_desc.bInterfaceNumber = status;
+
+ status = -ENODEV;
+
+ /* allocate instance-specific endpoints */
+ ep = usb_ep_autoconfig(cdev->gadget, &as_out_ep_desc);
+ if (!ep)
+ goto fail;
+ audio->out_ep = ep;
+ ep->driver_data = cdev; /* claim */
+
+ status = -ENOMEM;
+
+ /* supcard all relevant hardware speeds... we expect that when
+ * hardware is dual speed, all bulk-capable endpoints work at
+ * both speeds
+ */
+
+ /* copy descriptors, and track endpoint copies */
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ c->highspeed = true;
+ f->hs_descriptors = usb_copy_descriptors(f_audio_desc);
+ } else
+ f->descriptors = usb_copy_descriptors(f_audio_desc);
+
+ return 0;
+
+fail:
+
+ return status;
+}
+
+static void
+f_audio_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_audio *audio = func_to_audio(f);
+
+ usb_free_descriptors(f->descriptors);
+ kfree(audio);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* Todo: add more control selecotor dynamically */
+int __init control_selector_init(struct f_audio *audio)
+{
+ INIT_LIST_HEAD(&audio->cs);
+ list_add(&feature_unit.list, &audio->cs);
+
+ INIT_LIST_HEAD(&feature_unit.control);
+ list_add(&mute_control.list, &feature_unit.control);
+ list_add(&volume_control.list, &feature_unit.control);
+
+ volume_control.data[_CUR] = 0xffc0;
+ volume_control.data[_MIN] = 0xe3a0;
+ volume_control.data[_MAX] = 0xfff0;
+ volume_control.data[_RES] = 0x0030;
+
+ return 0;
+}
+
+/**
+ * audio_bind_config - add USB audio fucntion to a configuration
+ * @c: the configuration to supcard the USB audio function
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ */
+int __init audio_bind_config(struct usb_configuration *c)
+{
+ struct f_audio *audio;
+ int status;
+
+ /* allocate and initialize one new instance */
+ audio = kzalloc(sizeof *audio, GFP_KERNEL);
+ if (!audio)
+ return -ENOMEM;
+
+ audio->card.func.name = "g_audio";
+ audio->card.gadget = c->cdev->gadget;
+
+ INIT_LIST_HEAD(&audio->play_queue);
+ spin_lock_init(&audio->lock);
+
+ /* set up ASLA audio devices */
+ status = gaudio_setup(&audio->card);
+ if (status < 0)
+ goto setup_fail;
+
+ audio->card.func.strings = audio_strings;
+ audio->card.func.bind = f_audio_bind;
+ audio->card.func.unbind = f_audio_unbind;
+ audio->card.func.set_alt = f_audio_set_alt;
+ audio->card.func.setup = f_audio_setup;
+ audio->card.func.disable = f_audio_disable;
+ audio->out_desc = &as_out_ep_desc;
+
+ control_selector_init(audio);
+
+ INIT_WORK(&audio->playback_work, f_audio_playback_work);
+
+ status = usb_add_function(c, &audio->card.func);
+ if (status)
+ goto add_fail;
+
+ INFO(c->cdev, "audio_buf_size %d, req_buf_size %d, req_count %d\n",
+ audio_buf_size, req_buf_size, req_count);
+
+ return status;
+
+add_fail:
+ gaudio_cleanup(&audio->card);
+setup_fail:
+ kfree(audio);
+ return status;
+}
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
index c1abeb89b41..96fb118355b 100644
--- a/drivers/usb/gadget/f_phonet.c
+++ b/drivers/usb/gadget/f_phonet.c
@@ -188,8 +188,7 @@ static struct usb_descriptor_header *hs_pn_function[] = {
static int pn_net_open(struct net_device *dev)
{
- if (netif_carrier_ok(dev))
- netif_wake_queue(dev);
+ netif_wake_queue(dev);
return 0;
}
@@ -219,8 +218,7 @@ static void pn_tx_complete(struct usb_ep *ep, struct usb_request *req)
}
dev_kfree_skb_any(skb);
- if (netif_carrier_ok(dev))
- netif_wake_queue(dev);
+ netif_wake_queue(dev);
}
static int pn_net_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -255,7 +253,7 @@ out_unlock:
spin_unlock_irqrestore(&port->lock, flags);
out:
if (unlikely(skb)) {
- dev_kfree_skb_any(skb);
+ dev_kfree_skb(skb);
dev->stats.tx_dropped++;
}
return 0;
@@ -383,7 +381,6 @@ static void __pn_reset(struct usb_function *f)
struct phonet_port *port = netdev_priv(dev);
netif_carrier_off(dev);
- netif_stop_queue(dev);
port->usb = NULL;
usb_ep_disable(fp->out_ep);
@@ -427,8 +424,6 @@ static int pn_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
fp->in_ep->driver_data = fp;
netif_carrier_on(dev);
- if (netif_running(dev))
- netif_wake_queue(dev);
for (i = 0; i < phonet_rxq_size; i++)
pn_rx_submit(fp, fp->out_reqv[i], GFP_ATOMIC);
}
@@ -574,9 +569,10 @@ static struct net_device *dev;
int __init phonet_bind_config(struct usb_configuration *c)
{
struct f_phonet *fp;
- int err;
+ int err, size;
- fp = kzalloc(sizeof(*fp), GFP_KERNEL);
+ size = sizeof(*fp) + (phonet_rxq_size * sizeof(struct usb_request *));
+ fp = kzalloc(size, GFP_KERNEL);
if (!fp)
return -ENOMEM;
@@ -601,16 +597,13 @@ int __init gphonet_setup(struct usb_gadget *gadget)
/* Create net device */
BUG_ON(dev);
- dev = alloc_netdev(sizeof(*port)
- + (phonet_rxq_size * sizeof(struct usb_request *)),
- "upnlink%d", pn_net_setup);
+ dev = alloc_netdev(sizeof(*port), "upnlink%d", pn_net_setup);
if (!dev)
return -ENOMEM;
port = netdev_priv(dev);
spin_lock_init(&port->lock);
netif_carrier_off(dev);
- netif_stop_queue(dev);
SET_NETDEV_DEV(dev, &gadget->dev);
err = register_netdev(dev);
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 3279a472604..424a37c5773 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -475,7 +475,9 @@ static int rndis_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
if (rndis->port.in_ep->driver_data) {
DBG(cdev, "reset rndis\n");
gether_disconnect(&rndis->port);
- } else {
+ }
+
+ if (!rndis->port.in) {
DBG(cdev, "init rndis\n");
rndis->port.in = ep_choose(cdev->gadget,
rndis->hs.in, rndis->fs.in);
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 381a53b3e11..1e6aa504d58 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -248,6 +248,8 @@
#include <linux/freezer.h>
#include <linux/utsname.h>
+#include <asm/unaligned.h>
+
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
@@ -799,29 +801,9 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
/* Routines for unaligned data access */
-static u16 get_be16(u8 *buf)
-{
- return ((u16) buf[0] << 8) | ((u16) buf[1]);
-}
-
-static u32 get_be32(u8 *buf)
-{
- return ((u32) buf[0] << 24) | ((u32) buf[1] << 16) |
- ((u32) buf[2] << 8) | ((u32) buf[3]);
-}
-
-static void put_be16(u8 *buf, u16 val)
-{
- buf[0] = val >> 8;
- buf[1] = val;
-}
-
-static void put_be32(u8 *buf, u32 val)
+static u32 get_unaligned_be24(u8 *buf)
{
- buf[0] = val >> 24;
- buf[1] = val >> 16;
- buf[2] = val >> 8;
- buf[3] = val & 0xff;
+ return 0xffffff & (u32) get_unaligned_be32(buf - 1);
}
@@ -1582,9 +1564,9 @@ static int do_read(struct fsg_dev *fsg)
/* Get the starting Logical Block Address and check that it's
* not too big */
if (fsg->cmnd[0] == SC_READ_6)
- lba = (fsg->cmnd[1] << 16) | get_be16(&fsg->cmnd[2]);
+ lba = get_unaligned_be24(&fsg->cmnd[1]);
else {
- lba = get_be32(&fsg->cmnd[2]);
+ lba = get_unaligned_be32(&fsg->cmnd[2]);
/* We allow DPO (Disable Page Out = don't save data in the
* cache) and FUA (Force Unit Access = don't read from the
@@ -1717,9 +1699,9 @@ static int do_write(struct fsg_dev *fsg)
/* Get the starting Logical Block Address and check that it's
* not too big */
if (fsg->cmnd[0] == SC_WRITE_6)
- lba = (fsg->cmnd[1] << 16) | get_be16(&fsg->cmnd[2]);
+ lba = get_unaligned_be24(&fsg->cmnd[1]);
else {
- lba = get_be32(&fsg->cmnd[2]);
+ lba = get_unaligned_be32(&fsg->cmnd[2]);
/* We allow DPO (Disable Page Out = don't save data in the
* cache) and FUA (Force Unit Access = write directly to the
@@ -1940,7 +1922,7 @@ static int do_verify(struct fsg_dev *fsg)
/* Get the starting Logical Block Address and check that it's
* not too big */
- lba = get_be32(&fsg->cmnd[2]);
+ lba = get_unaligned_be32(&fsg->cmnd[2]);
if (lba >= curlun->num_sectors) {
curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
return -EINVAL;
@@ -1953,7 +1935,7 @@ static int do_verify(struct fsg_dev *fsg)
return -EINVAL;
}
- verification_length = get_be16(&fsg->cmnd[7]);
+ verification_length = get_unaligned_be16(&fsg->cmnd[7]);
if (unlikely(verification_length == 0))
return -EIO; // No default reply
@@ -2103,7 +2085,7 @@ static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
memset(buf, 0, 18);
buf[0] = valid | 0x70; // Valid, current error
buf[2] = SK(sd);
- put_be32(&buf[3], sdinfo); // Sense information
+ put_unaligned_be32(sdinfo, &buf[3]); /* Sense information */
buf[7] = 18 - 8; // Additional sense length
buf[12] = ASC(sd);
buf[13] = ASCQ(sd);
@@ -2114,7 +2096,7 @@ static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh)
{
struct lun *curlun = fsg->curlun;
- u32 lba = get_be32(&fsg->cmnd[2]);
+ u32 lba = get_unaligned_be32(&fsg->cmnd[2]);
int pmi = fsg->cmnd[8];
u8 *buf = (u8 *) bh->buf;
@@ -2124,8 +2106,9 @@ static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh)
return -EINVAL;
}
- put_be32(&buf[0], curlun->num_sectors - 1); // Max logical block
- put_be32(&buf[4], 512); // Block length
+ put_unaligned_be32(curlun->num_sectors - 1, &buf[0]);
+ /* Max logical block */
+ put_unaligned_be32(512, &buf[4]); /* Block length */
return 8;
}
@@ -2144,7 +2127,7 @@ static void store_cdrom_address(u8 *dest, int msf, u32 addr)
dest[0] = 0; /* Reserved */
} else {
/* Absolute sector */
- put_be32(dest, addr);
+ put_unaligned_be32(addr, dest);
}
}
@@ -2152,7 +2135,7 @@ static int do_read_header(struct fsg_dev *fsg, struct fsg_buffhd *bh)
{
struct lun *curlun = fsg->curlun;
int msf = fsg->cmnd[1] & 0x02;
- u32 lba = get_be32(&fsg->cmnd[2]);
+ u32 lba = get_unaligned_be32(&fsg->cmnd[2]);
u8 *buf = (u8 *) bh->buf;
if ((fsg->cmnd[1] & ~0x02) != 0) { /* Mask away MSF */
@@ -2252,10 +2235,13 @@ static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
buf[2] = 0x04; // Write cache enable,
// Read cache not disabled
// No cache retention priorities
- put_be16(&buf[4], 0xffff); // Don't disable prefetch
- // Minimum prefetch = 0
- put_be16(&buf[8], 0xffff); // Maximum prefetch
- put_be16(&buf[10], 0xffff); // Maximum prefetch ceiling
+ put_unaligned_be16(0xffff, &buf[4]);
+ /* Don't disable prefetch */
+ /* Minimum prefetch = 0 */
+ put_unaligned_be16(0xffff, &buf[8]);
+ /* Maximum prefetch */
+ put_unaligned_be16(0xffff, &buf[10]);
+ /* Maximum prefetch ceiling */
}
buf += 12;
}
@@ -2272,7 +2258,7 @@ static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
if (mscmnd == SC_MODE_SENSE_6)
buf0[0] = len - 1;
else
- put_be16(buf0, len - 2);
+ put_unaligned_be16(len - 2, buf0);
return len;
}
@@ -2360,9 +2346,10 @@ static int do_read_format_capacities(struct fsg_dev *fsg,
buf[3] = 8; // Only the Current/Maximum Capacity Descriptor
buf += 4;
- put_be32(&buf[0], curlun->num_sectors); // Number of blocks
- put_be32(&buf[4], 512); // Block length
- buf[4] = 0x02; // Current capacity
+ put_unaligned_be32(curlun->num_sectors, &buf[0]);
+ /* Number of blocks */
+ put_unaligned_be32(512, &buf[4]); /* Block length */
+ buf[4] = 0x02; /* Current capacity */
return 12;
}
@@ -2882,7 +2869,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
break;
case SC_MODE_SELECT_10:
- fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
+ fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
(1<<1) | (3<<7), 0,
"MODE SELECT(10)")) == 0)
@@ -2898,7 +2885,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
break;
case SC_MODE_SENSE_10:
- fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
+ fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
(1<<1) | (1<<2) | (3<<7), 0,
"MODE SENSE(10)")) == 0)
@@ -2923,7 +2910,8 @@ static int do_scsi_command(struct fsg_dev *fsg)
break;
case SC_READ_10:
- fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]) << 9;
+ fsg->data_size_from_cmnd =
+ get_unaligned_be16(&fsg->cmnd[7]) << 9;
if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
(1<<1) | (0xf<<2) | (3<<7), 1,
"READ(10)")) == 0)
@@ -2931,7 +2919,8 @@ static int do_scsi_command(struct fsg_dev *fsg)
break;
case SC_READ_12:
- fsg->data_size_from_cmnd = get_be32(&fsg->cmnd[6]) << 9;
+ fsg->data_size_from_cmnd =
+ get_unaligned_be32(&fsg->cmnd[6]) << 9;
if ((reply = check_command(fsg, 12, DATA_DIR_TO_HOST,
(1<<1) | (0xf<<2) | (0xf<<6), 1,
"READ(12)")) == 0)
@@ -2949,7 +2938,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
case SC_READ_HEADER:
if (!mod_data.cdrom)
goto unknown_cmnd;
- fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
+ fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
(3<<7) | (0x1f<<1), 1,
"READ HEADER")) == 0)
@@ -2959,7 +2948,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
case SC_READ_TOC:
if (!mod_data.cdrom)
goto unknown_cmnd;
- fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
+ fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
(7<<6) | (1<<1), 1,
"READ TOC")) == 0)
@@ -2967,7 +2956,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
break;
case SC_READ_FORMAT_CAPACITIES:
- fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
+ fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
(3<<7), 1,
"READ FORMAT CAPACITIES")) == 0)
@@ -3025,7 +3014,8 @@ static int do_scsi_command(struct fsg_dev *fsg)
break;
case SC_WRITE_10:
- fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]) << 9;
+ fsg->data_size_from_cmnd =
+ get_unaligned_be16(&fsg->cmnd[7]) << 9;
if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
(1<<1) | (0xf<<2) | (3<<7), 1,
"WRITE(10)")) == 0)
@@ -3033,7 +3023,8 @@ static int do_scsi_command(struct fsg_dev *fsg)
break;
case SC_WRITE_12:
- fsg->data_size_from_cmnd = get_be32(&fsg->cmnd[6]) << 9;
+ fsg->data_size_from_cmnd =
+ get_unaligned_be32(&fsg->cmnd[6]) << 9;
if ((reply = check_command(fsg, 12, DATA_DIR_FROM_HOST,
(1<<1) | (0xf<<2) | (0xf<<6), 1,
"WRITE(12)")) == 0)
diff --git a/drivers/usb/gadget/fsl_mx3_udc.c b/drivers/usb/gadget/fsl_mx3_udc.c
new file mode 100644
index 00000000000..4bc2bf3d602
--- /dev/null
+++ b/drivers/usb/gadget/fsl_mx3_udc.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2009
+ * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
+ *
+ * Description:
+ * Helper routines for i.MX3x SoCs from Freescale, needed by the fsl_usb2_udc.c
+ * driver to function correctly on these systems.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/fsl_devices.h>
+#include <linux/platform_device.h>
+
+static struct clk *mxc_ahb_clk;
+static struct clk *mxc_usb_clk;
+
+int fsl_udc_clk_init(struct platform_device *pdev)
+{
+ struct fsl_usb2_platform_data *pdata;
+ unsigned long freq;
+ int ret;
+
+ pdata = pdev->dev.platform_data;
+
+ mxc_ahb_clk = clk_get(&pdev->dev, "usb_ahb");
+ if (IS_ERR(mxc_ahb_clk))
+ return PTR_ERR(mxc_ahb_clk);
+
+ ret = clk_enable(mxc_ahb_clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "clk_enable(\"usb_ahb\") failed\n");
+ goto eenahb;
+ }
+
+ /* make sure USB_CLK is running at 60 MHz +/- 1000 Hz */
+ mxc_usb_clk = clk_get(&pdev->dev, "usb");
+ if (IS_ERR(mxc_usb_clk)) {
+ dev_err(&pdev->dev, "clk_get(\"usb\") failed\n");
+ ret = PTR_ERR(mxc_usb_clk);
+ goto egusb;
+ }
+
+ freq = clk_get_rate(mxc_usb_clk);
+ if (pdata->phy_mode != FSL_USB2_PHY_ULPI &&
+ (freq < 59999000 || freq > 60001000)) {
+ dev_err(&pdev->dev, "USB_CLK=%lu, should be 60MHz\n", freq);
+ goto eclkrate;
+ }
+
+ ret = clk_enable(mxc_usb_clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "clk_enable(\"usb_clk\") failed\n");
+ goto eenusb;
+ }
+
+ return 0;
+
+eenusb:
+eclkrate:
+ clk_put(mxc_usb_clk);
+ mxc_usb_clk = NULL;
+egusb:
+ clk_disable(mxc_ahb_clk);
+eenahb:
+ clk_put(mxc_ahb_clk);
+ return ret;
+}
+
+void fsl_udc_clk_finalize(struct platform_device *pdev)
+{
+ struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
+
+ /* ULPI transceivers don't need usbpll */
+ if (pdata->phy_mode == FSL_USB2_PHY_ULPI) {
+ clk_disable(mxc_usb_clk);
+ clk_put(mxc_usb_clk);
+ mxc_usb_clk = NULL;
+ }
+}
+
+void fsl_udc_clk_release(void)
+{
+ if (mxc_usb_clk) {
+ clk_disable(mxc_usb_clk);
+ clk_put(mxc_usb_clk);
+ }
+ clk_disable(mxc_ahb_clk);
+ clk_put(mxc_ahb_clk);
+}
diff --git a/drivers/usb/gadget/fsl_usb2_udc.c b/drivers/usb/gadget/fsl_udc_core.c
index 9d7b95d4e3d..42a74b8a0bb 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -38,6 +38,7 @@
#include <linux/platform_device.h>
#include <linux/fsl_devices.h>
#include <linux/dmapool.h>
+#include <linux/delay.h>
#include <asm/byteorder.h>
#include <asm/io.h>
@@ -57,7 +58,9 @@ static const char driver_name[] = "fsl-usb2-udc";
static const char driver_desc[] = DRIVER_DESC;
static struct usb_dr_device *dr_regs;
+#ifndef CONFIG_ARCH_MXC
static struct usb_sys_interface *usb_sys_regs;
+#endif
/* it is initialized in probe() */
static struct fsl_udc *udc_controller = NULL;
@@ -174,10 +177,34 @@ static void nuke(struct fsl_ep *ep, int status)
static int dr_controller_setup(struct fsl_udc *udc)
{
- unsigned int tmp = 0, portctrl = 0, ctrl = 0;
+ unsigned int tmp, portctrl;
+#ifndef CONFIG_ARCH_MXC
+ unsigned int ctrl;
+#endif
unsigned long timeout;
#define FSL_UDC_RESET_TIMEOUT 1000
+ /* Config PHY interface */
+ portctrl = fsl_readl(&dr_regs->portsc1);
+ portctrl &= ~(PORTSCX_PHY_TYPE_SEL | PORTSCX_PORT_WIDTH);
+ switch (udc->phy_mode) {
+ case FSL_USB2_PHY_ULPI:
+ portctrl |= PORTSCX_PTS_ULPI;
+ break;
+ case FSL_USB2_PHY_UTMI_WIDE:
+ portctrl |= PORTSCX_PTW_16BIT;
+ /* fall through */
+ case FSL_USB2_PHY_UTMI:
+ portctrl |= PORTSCX_PTS_UTMI;
+ break;
+ case FSL_USB2_PHY_SERIAL:
+ portctrl |= PORTSCX_PTS_FSLS;
+ break;
+ default:
+ return -EINVAL;
+ }
+ fsl_writel(portctrl, &dr_regs->portsc1);
+
/* Stop and reset the usb controller */
tmp = fsl_readl(&dr_regs->usbcmd);
tmp &= ~USB_CMD_RUN_STOP;
@@ -215,31 +242,12 @@ static int dr_controller_setup(struct fsl_udc *udc)
udc->ep_qh, (int)tmp,
fsl_readl(&dr_regs->endpointlistaddr));
- /* Config PHY interface */
- portctrl = fsl_readl(&dr_regs->portsc1);
- portctrl &= ~(PORTSCX_PHY_TYPE_SEL | PORTSCX_PORT_WIDTH);
- switch (udc->phy_mode) {
- case FSL_USB2_PHY_ULPI:
- portctrl |= PORTSCX_PTS_ULPI;
- break;
- case FSL_USB2_PHY_UTMI_WIDE:
- portctrl |= PORTSCX_PTW_16BIT;
- /* fall through */
- case FSL_USB2_PHY_UTMI:
- portctrl |= PORTSCX_PTS_UTMI;
- break;
- case FSL_USB2_PHY_SERIAL:
- portctrl |= PORTSCX_PTS_FSLS;
- break;
- default:
- return -EINVAL;
- }
- fsl_writel(portctrl, &dr_regs->portsc1);
-
/* Config control enable i/o output, cpu endian register */
+#ifndef CONFIG_ARCH_MXC
ctrl = __raw_readl(&usb_sys_regs->control);
ctrl |= USB_CTRL_IOENB;
__raw_writel(ctrl, &usb_sys_regs->control);
+#endif
#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
/* Turn on cache snooping hardware, since some PowerPC platforms
@@ -2043,6 +2051,7 @@ static int fsl_proc_read(char *page, char **start, off_t off, int count,
size -= t;
next += t;
+#ifndef CONFIG_ARCH_MXC
tmp_reg = usb_sys_regs->snoop1;
t = scnprintf(next, size, "Snoop1 Reg : = [0x%x]\n\n", tmp_reg);
size -= t;
@@ -2053,6 +2062,7 @@ static int fsl_proc_read(char *page, char **start, off_t off, int count,
tmp_reg);
size -= t;
next += t;
+#endif
/* ------fsl_udc, fsl_ep, fsl_request structure information ----- */
ep = &udc->eps[0];
@@ -2263,14 +2273,21 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
goto err_kfree;
}
- dr_regs = ioremap(res->start, res->end - res->start + 1);
+ dr_regs = ioremap(res->start, resource_size(res));
if (!dr_regs) {
ret = -ENOMEM;
goto err_release_mem_region;
}
+#ifndef CONFIG_ARCH_MXC
usb_sys_regs = (struct usb_sys_interface *)
((u32)dr_regs + USB_DR_SYS_OFFSET);
+#endif
+
+ /* Initialize USB clocks */
+ ret = fsl_udc_clk_init(pdev);
+ if (ret < 0)
+ goto err_iounmap_noclk;
/* Read Device Controller Capability Parameters register */
dccparams = fsl_readl(&dr_regs->dccparams);
@@ -2308,6 +2325,8 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
* leave usbintr reg untouched */
dr_controller_setup(udc_controller);
+ fsl_udc_clk_finalize(pdev);
+
/* Setup gadget structure */
udc_controller->gadget.ops = &fsl_gadget_ops;
udc_controller->gadget.is_dualspeed = 1;
@@ -2362,6 +2381,8 @@ err_unregister:
err_free_irq:
free_irq(udc_controller->irq, udc_controller);
err_iounmap:
+ fsl_udc_clk_release();
+err_iounmap_noclk:
iounmap(dr_regs);
err_release_mem_region:
release_mem_region(res->start, res->end - res->start + 1);
@@ -2384,6 +2405,8 @@ static int __exit fsl_udc_remove(struct platform_device *pdev)
return -ENODEV;
udc_controller->done = &done;
+ fsl_udc_clk_release();
+
/* DR has been stopped in usb_gadget_unregister_driver() */
remove_proc_file();
diff --git a/drivers/usb/gadget/fsl_usb2_udc.h b/drivers/usb/gadget/fsl_usb2_udc.h
index e63ef12645f..20aeceed48c 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.h
+++ b/drivers/usb/gadget/fsl_usb2_udc.h
@@ -563,4 +563,22 @@ static void dump_msg(const char *label, const u8 * buf, unsigned int length)
* 2 + ((windex & USB_DIR_IN) ? 1 : 0))
#define get_pipe_by_ep(EP) (ep_index(EP) * 2 + ep_is_in(EP))
+struct platform_device;
+#ifdef CONFIG_ARCH_MXC
+int fsl_udc_clk_init(struct platform_device *pdev);
+void fsl_udc_clk_finalize(struct platform_device *pdev);
+void fsl_udc_clk_release(void);
+#else
+static inline int fsl_udc_clk_init(struct platform_device *pdev)
+{
+ return 0;
+}
+static inline void fsl_udc_clk_finalize(struct platform_device *pdev)
+{
+}
+static inline void fsl_udc_clk_release(void)
+{
+}
+#endif
+
#endif
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index ec6d439a2aa..8e0e9a0b736 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -137,6 +137,12 @@
#define gadget_is_musbhdrc(g) 0
#endif
+#ifdef CONFIG_USB_GADGET_LANGWELL
+#define gadget_is_langwell(g) (!strcmp("langwell_udc", (g)->name))
+#else
+#define gadget_is_langwell(g) 0
+#endif
+
/* from Montavista kernel (?) */
#ifdef CONFIG_USB_GADGET_MPC8272
#define gadget_is_mpc8272(g) !strcmp("mpc8272_udc", (g)->name)
@@ -231,6 +237,8 @@ static inline int usb_gadget_controller_number(struct usb_gadget *gadget)
return 0x22;
else if (gadget_is_ci13xxx(gadget))
return 0x23;
+ else if (gadget_is_langwell(gadget))
+ return 0x24;
return -ENOENT;
}
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index de010c939db..112bb40a427 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -110,10 +110,10 @@ goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
return -EINVAL;
if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
- if (ep->num != (desc->bEndpointAddress & 0x0f))
+ if (ep->num != usb_endpoint_num(desc))
return -EINVAL;
- switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+ switch (usb_endpoint_type(desc)) {
case USB_ENDPOINT_XFER_BULK:
case USB_ENDPOINT_XFER_INT:
break;
@@ -142,7 +142,7 @@ goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
/* ep1/ep2 dma direction is chosen early; it works in the other
* direction, with pio. be cautious with out-dma.
*/
- ep->is_in = (USB_DIR_IN & desc->bEndpointAddress) != 0;
+ ep->is_in = usb_endpoint_dir_in(desc);
if (ep->is_in) {
mode |= 1;
ep->dma = (use_dma != 0) && (ep->num == UDC_MSTRD_ENDPOINT);
diff --git a/drivers/usb/gadget/imx_udc.c b/drivers/usb/gadget/imx_udc.c
index 168658b4b4e..c52a681f376 100644
--- a/drivers/usb/gadget/imx_udc.c
+++ b/drivers/usb/gadget/imx_udc.c
@@ -415,6 +415,13 @@ static int write_packet(struct imx_ep_struct *imx_ep, struct imx_request *req)
u8 *buf;
int length, count, temp;
+ if (unlikely(__raw_readl(imx_ep->imx_usb->base +
+ USB_EP_STAT(EP_NO(imx_ep))) & EPSTAT_ZLPS)) {
+ D_TRX(imx_ep->imx_usb->dev, "<%s> zlp still queued in EP %s\n",
+ __func__, imx_ep->ep.name);
+ return -1;
+ }
+
buf = req->req.buf + req->req.actual;
prefetch(buf);
@@ -734,9 +741,12 @@ static struct usb_request *imx_ep_alloc_request
{
struct imx_request *req;
+ if (!usb_ep)
+ return NULL;
+
req = kzalloc(sizeof *req, gfp_flags);
- if (!req || !usb_ep)
- return 0;
+ if (!req)
+ return NULL;
INIT_LIST_HEAD(&req->queue);
req->in_use = 0;
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index d20937f28a1..7d33f50b587 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -384,9 +384,8 @@ ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
return value;
/* halt any endpoint by doing a "wrong direction" i/o call */
- if (data->desc.bEndpointAddress & USB_DIR_IN) {
- if ((data->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
- == USB_ENDPOINT_XFER_ISOC)
+ if (usb_endpoint_dir_in(&data->desc)) {
+ if (usb_endpoint_xfer_isoc(&data->desc))
return -EINVAL;
DBG (data->dev, "%s halt\n", data->name);
spin_lock_irq (&data->dev->lock);
@@ -428,9 +427,8 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
return value;
/* halt any endpoint by doing a "wrong direction" i/o call */
- if (!(data->desc.bEndpointAddress & USB_DIR_IN)) {
- if ((data->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
- == USB_ENDPOINT_XFER_ISOC)
+ if (!usb_endpoint_dir_in(&data->desc)) {
+ if (usb_endpoint_xfer_isoc(&data->desc))
return -EINVAL;
DBG (data->dev, "%s halt\n", data->name);
spin_lock_irq (&data->dev->lock);
@@ -691,7 +689,7 @@ ep_aio_read(struct kiocb *iocb, const struct iovec *iov,
struct ep_data *epdata = iocb->ki_filp->private_data;
char *buf;
- if (unlikely(epdata->desc.bEndpointAddress & USB_DIR_IN))
+ if (unlikely(usb_endpoint_dir_in(&epdata->desc)))
return -EINVAL;
buf = kmalloc(iocb->ki_left, GFP_KERNEL);
@@ -711,7 +709,7 @@ ep_aio_write(struct kiocb *iocb, const struct iovec *iov,
size_t len = 0;
int i = 0;
- if (unlikely(!(epdata->desc.bEndpointAddress & USB_DIR_IN)))
+ if (unlikely(!usb_endpoint_dir_in(&epdata->desc)))
return -EINVAL;
buf = kmalloc(iocb->ki_left, GFP_KERNEL);
diff --git a/drivers/usb/gadget/langwell_udc.c b/drivers/usb/gadget/langwell_udc.c
new file mode 100644
index 00000000000..6829d596135
--- /dev/null
+++ b/drivers/usb/gadget/langwell_udc.c
@@ -0,0 +1,3373 @@
+/*
+ * Intel Langwell USB Device Controller driver
+ * Copyright (C) 2008-2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+
+/* #undef DEBUG */
+/* #undef VERBOSE */
+
+#if defined(CONFIG_USB_LANGWELL_OTG)
+#define OTG_TRANSCEIVER
+#endif
+
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <linux/pm.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <asm/system.h>
+#include <asm/unaligned.h>
+
+#include "langwell_udc.h"
+
+
+#define DRIVER_DESC "Intel Langwell USB Device Controller driver"
+#define DRIVER_VERSION "16 May 2009"
+
+static const char driver_name[] = "langwell_udc";
+static const char driver_desc[] = DRIVER_DESC;
+
+
+/* controller device global variable */
+static struct langwell_udc *the_controller;
+
+/* for endpoint 0 operations */
+static const struct usb_endpoint_descriptor
+langwell_ep0_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = 0,
+ .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
+ .wMaxPacketSize = EP0_MAX_PKT_SIZE,
+};
+
+
+/*-------------------------------------------------------------------------*/
+/* debugging */
+
+#ifdef DEBUG
+#define DBG(dev, fmt, args...) \
+ pr_debug("%s %s: " fmt , driver_name, \
+ pci_name(dev->pdev), ## args)
+#else
+#define DBG(dev, fmt, args...) \
+ do { } while (0)
+#endif /* DEBUG */
+
+
+#ifdef VERBOSE
+#define VDBG DBG
+#else
+#define VDBG(dev, fmt, args...) \
+ do { } while (0)
+#endif /* VERBOSE */
+
+
+#define ERROR(dev, fmt, args...) \
+ pr_err("%s %s: " fmt , driver_name, \
+ pci_name(dev->pdev), ## args)
+
+#define WARNING(dev, fmt, args...) \
+ pr_warning("%s %s: " fmt , driver_name, \
+ pci_name(dev->pdev), ## args)
+
+#define INFO(dev, fmt, args...) \
+ pr_info("%s %s: " fmt , driver_name, \
+ pci_name(dev->pdev), ## args)
+
+
+#ifdef VERBOSE
+static inline void print_all_registers(struct langwell_udc *dev)
+{
+ int i;
+
+ /* Capability Registers */
+ printk(KERN_DEBUG "Capability Registers (offset: "
+ "0x%04x, length: 0x%08x)\n",
+ CAP_REG_OFFSET,
+ (u32)sizeof(struct langwell_cap_regs));
+ printk(KERN_DEBUG "caplength=0x%02x\n",
+ readb(&dev->cap_regs->caplength));
+ printk(KERN_DEBUG "hciversion=0x%04x\n",
+ readw(&dev->cap_regs->hciversion));
+ printk(KERN_DEBUG "hcsparams=0x%08x\n",
+ readl(&dev->cap_regs->hcsparams));
+ printk(KERN_DEBUG "hccparams=0x%08x\n",
+ readl(&dev->cap_regs->hccparams));
+ printk(KERN_DEBUG "dciversion=0x%04x\n",
+ readw(&dev->cap_regs->dciversion));
+ printk(KERN_DEBUG "dccparams=0x%08x\n",
+ readl(&dev->cap_regs->dccparams));
+
+ /* Operational Registers */
+ printk(KERN_DEBUG "Operational Registers (offset: "
+ "0x%04x, length: 0x%08x)\n",
+ OP_REG_OFFSET,
+ (u32)sizeof(struct langwell_op_regs));
+ printk(KERN_DEBUG "extsts=0x%08x\n",
+ readl(&dev->op_regs->extsts));
+ printk(KERN_DEBUG "extintr=0x%08x\n",
+ readl(&dev->op_regs->extintr));
+ printk(KERN_DEBUG "usbcmd=0x%08x\n",
+ readl(&dev->op_regs->usbcmd));
+ printk(KERN_DEBUG "usbsts=0x%08x\n",
+ readl(&dev->op_regs->usbsts));
+ printk(KERN_DEBUG "usbintr=0x%08x\n",
+ readl(&dev->op_regs->usbintr));
+ printk(KERN_DEBUG "frindex=0x%08x\n",
+ readl(&dev->op_regs->frindex));
+ printk(KERN_DEBUG "ctrldssegment=0x%08x\n",
+ readl(&dev->op_regs->ctrldssegment));
+ printk(KERN_DEBUG "deviceaddr=0x%08x\n",
+ readl(&dev->op_regs->deviceaddr));
+ printk(KERN_DEBUG "endpointlistaddr=0x%08x\n",
+ readl(&dev->op_regs->endpointlistaddr));
+ printk(KERN_DEBUG "ttctrl=0x%08x\n",
+ readl(&dev->op_regs->ttctrl));
+ printk(KERN_DEBUG "burstsize=0x%08x\n",
+ readl(&dev->op_regs->burstsize));
+ printk(KERN_DEBUG "txfilltuning=0x%08x\n",
+ readl(&dev->op_regs->txfilltuning));
+ printk(KERN_DEBUG "txttfilltuning=0x%08x\n",
+ readl(&dev->op_regs->txttfilltuning));
+ printk(KERN_DEBUG "ic_usb=0x%08x\n",
+ readl(&dev->op_regs->ic_usb));
+ printk(KERN_DEBUG "ulpi_viewport=0x%08x\n",
+ readl(&dev->op_regs->ulpi_viewport));
+ printk(KERN_DEBUG "configflag=0x%08x\n",
+ readl(&dev->op_regs->configflag));
+ printk(KERN_DEBUG "portsc1=0x%08x\n",
+ readl(&dev->op_regs->portsc1));
+ printk(KERN_DEBUG "devlc=0x%08x\n",
+ readl(&dev->op_regs->devlc));
+ printk(KERN_DEBUG "otgsc=0x%08x\n",
+ readl(&dev->op_regs->otgsc));
+ printk(KERN_DEBUG "usbmode=0x%08x\n",
+ readl(&dev->op_regs->usbmode));
+ printk(KERN_DEBUG "endptnak=0x%08x\n",
+ readl(&dev->op_regs->endptnak));
+ printk(KERN_DEBUG "endptnaken=0x%08x\n",
+ readl(&dev->op_regs->endptnaken));
+ printk(KERN_DEBUG "endptsetupstat=0x%08x\n",
+ readl(&dev->op_regs->endptsetupstat));
+ printk(KERN_DEBUG "endptprime=0x%08x\n",
+ readl(&dev->op_regs->endptprime));
+ printk(KERN_DEBUG "endptflush=0x%08x\n",
+ readl(&dev->op_regs->endptflush));
+ printk(KERN_DEBUG "endptstat=0x%08x\n",
+ readl(&dev->op_regs->endptstat));
+ printk(KERN_DEBUG "endptcomplete=0x%08x\n",
+ readl(&dev->op_regs->endptcomplete));
+
+ for (i = 0; i < dev->ep_max / 2; i++) {
+ printk(KERN_DEBUG "endptctrl[%d]=0x%08x\n",
+ i, readl(&dev->op_regs->endptctrl[i]));
+ }
+}
+#endif /* VERBOSE */
+
+
+/*-------------------------------------------------------------------------*/
+
+#define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out")
+
+#define is_in(ep) (((ep)->ep_num == 0) ? ((ep)->dev->ep0_dir == \
+ USB_DIR_IN) : ((ep)->desc->bEndpointAddress \
+ & USB_DIR_IN) == USB_DIR_IN)
+
+
+#ifdef DEBUG
+static char *type_string(u8 bmAttributes)
+{
+ switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
+ case USB_ENDPOINT_XFER_BULK:
+ return "bulk";
+ case USB_ENDPOINT_XFER_ISOC:
+ return "iso";
+ case USB_ENDPOINT_XFER_INT:
+ return "int";
+ };
+
+ return "control";
+}
+#endif
+
+
+/* configure endpoint control registers */
+static void ep_reset(struct langwell_ep *ep, unsigned char ep_num,
+ unsigned char is_in, unsigned char ep_type)
+{
+ struct langwell_udc *dev;
+ u32 endptctrl;
+
+ dev = ep->dev;
+ VDBG(dev, "---> %s()\n", __func__);
+
+ endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
+ if (is_in) { /* TX */
+ if (ep_num)
+ endptctrl |= EPCTRL_TXR;
+ endptctrl |= EPCTRL_TXE;
+ endptctrl |= ep_type << EPCTRL_TXT_SHIFT;
+ } else { /* RX */
+ if (ep_num)
+ endptctrl |= EPCTRL_RXR;
+ endptctrl |= EPCTRL_RXE;
+ endptctrl |= ep_type << EPCTRL_RXT_SHIFT;
+ }
+
+ writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
+
+ VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/* reset ep0 dQH and endptctrl */
+static void ep0_reset(struct langwell_udc *dev)
+{
+ struct langwell_ep *ep;
+ int i;
+
+ VDBG(dev, "---> %s()\n", __func__);
+
+ /* ep0 in and out */
+ for (i = 0; i < 2; i++) {
+ ep = &dev->ep[i];
+ ep->dev = dev;
+
+ /* ep0 dQH */
+ ep->dqh = &dev->ep_dqh[i];
+
+ /* configure ep0 endpoint capabilities in dQH */
+ ep->dqh->dqh_ios = 1;
+ ep->dqh->dqh_mpl = EP0_MAX_PKT_SIZE;
+
+ /* FIXME: enable ep0-in HW zero length termination select */
+ if (is_in(ep))
+ ep->dqh->dqh_zlt = 0;
+ ep->dqh->dqh_mult = 0;
+
+ /* configure ep0 control registers */
+ ep_reset(&dev->ep[0], 0, i, USB_ENDPOINT_XFER_CONTROL);
+ }
+
+ VDBG(dev, "<--- %s()\n", __func__);
+ return;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* endpoints operations */
+
+/* configure endpoint, making it usable */
+static int langwell_ep_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct langwell_udc *dev;
+ struct langwell_ep *ep;
+ u16 max = 0;
+ unsigned long flags;
+ int retval = 0;
+ unsigned char zlt, ios = 0, mult = 0;
+
+ ep = container_of(_ep, struct langwell_ep, ep);
+ dev = ep->dev;
+ VDBG(dev, "---> %s()\n", __func__);
+
+ if (!_ep || !desc || ep->desc
+ || desc->bDescriptorType != USB_DT_ENDPOINT)
+ return -EINVAL;
+
+ if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ max = le16_to_cpu(desc->wMaxPacketSize);
+
+ /*
+ * disable HW zero length termination select
+ * driver handles zero length packet through req->req.zero
+ */
+ zlt = 1;
+
+ /*
+ * sanity check type, direction, address, and then
+ * initialize the endpoint capabilities fields in dQH
+ */
+ switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ ios = 1;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ if ((dev->gadget.speed == USB_SPEED_HIGH
+ && max != 512)
+ || (dev->gadget.speed == USB_SPEED_FULL
+ && max > 64)) {
+ goto done;
+ }
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ if (strstr(ep->ep.name, "-iso")) /* bulk is ok */
+ goto done;
+
+ switch (dev->gadget.speed) {
+ case USB_SPEED_HIGH:
+ if (max <= 1024)
+ break;
+ case USB_SPEED_FULL:
+ if (max <= 64)
+ break;
+ default:
+ if (max <= 8)
+ break;
+ goto done;
+ }
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ if (strstr(ep->ep.name, "-bulk")
+ || strstr(ep->ep.name, "-int"))
+ goto done;
+
+ switch (dev->gadget.speed) {
+ case USB_SPEED_HIGH:
+ if (max <= 1024)
+ break;
+ case USB_SPEED_FULL:
+ if (max <= 1023)
+ break;
+ default:
+ goto done;
+ }
+ /*
+ * FIXME:
+ * calculate transactions needed for high bandwidth iso
+ */
+ mult = (unsigned char)(1 + ((max >> 11) & 0x03));
+ max = max & 0x8ff; /* bit 0~10 */
+ /* 3 transactions at most */
+ if (mult > 3)
+ goto done;
+ break;
+ default:
+ goto done;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ /* configure endpoint capabilities in dQH */
+ ep->dqh->dqh_ios = ios;
+ ep->dqh->dqh_mpl = cpu_to_le16(max);
+ ep->dqh->dqh_zlt = zlt;
+ ep->dqh->dqh_mult = mult;
+
+ ep->ep.maxpacket = max;
+ ep->desc = desc;
+ ep->stopped = 0;
+ ep->ep_num = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+
+ /* ep_type */
+ ep->ep_type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+
+ /* configure endpoint control registers */
+ ep_reset(ep, ep->ep_num, is_in(ep), ep->ep_type);
+
+ DBG(dev, "enabled %s (ep%d%s-%s), max %04x\n",
+ _ep->name,
+ ep->ep_num,
+ DIR_STRING(desc->bEndpointAddress),
+ type_string(desc->bmAttributes),
+ max);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+done:
+ VDBG(dev, "<--- %s()\n", __func__);
+ return retval;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* retire a request */
+static void done(struct langwell_ep *ep, struct langwell_request *req,
+ int status)
+{
+ struct langwell_udc *dev = ep->dev;
+ unsigned stopped = ep->stopped;
+ struct langwell_dtd *curr_dtd, *next_dtd;
+ int i;
+
+ VDBG(dev, "---> %s()\n", __func__);
+
+ /* remove the req from ep->queue */
+ list_del_init(&req->queue);
+
+ if (req->req.status == -EINPROGRESS)
+ req->req.status = status;
+ else
+ status = req->req.status;
+
+ /* free dTD for the request */
+ next_dtd = req->head;
+ for (i = 0; i < req->dtd_count; i++) {
+ curr_dtd = next_dtd;
+ if (i != req->dtd_count - 1)
+ next_dtd = curr_dtd->next_dtd_virt;
+ dma_pool_free(dev->dtd_pool, curr_dtd, curr_dtd->dtd_dma);
+ }
+
+ if (req->mapped) {
+ dma_unmap_single(&dev->pdev->dev, req->req.dma, req->req.length,
+ is_in(ep) ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
+ req->req.dma = DMA_ADDR_INVALID;
+ req->mapped = 0;
+ } else
+ dma_sync_single_for_cpu(&dev->pdev->dev, req->req.dma,
+ req->req.length,
+ is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ if (status != -ESHUTDOWN)
+ DBG(dev, "complete %s, req %p, stat %d, len %u/%u\n",
+ ep->ep.name, &req->req, status,
+ req->req.actual, req->req.length);
+
+ /* don't modify queue heads during completion callback */
+ ep->stopped = 1;
+
+ spin_unlock(&dev->lock);
+ /* complete routine from gadget driver */
+ if (req->req.complete)
+ req->req.complete(&ep->ep, &req->req);
+
+ spin_lock(&dev->lock);
+ ep->stopped = stopped;
+
+ VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+static void langwell_ep_fifo_flush(struct usb_ep *_ep);
+
+/* delete all endpoint requests, called with spinlock held */
+static void nuke(struct langwell_ep *ep, int status)
+{
+ /* called with spinlock held */
+ ep->stopped = 1;
+
+ /* endpoint fifo flush */
+ if (&ep->ep && ep->desc)
+ langwell_ep_fifo_flush(&ep->ep);
+
+ while (!list_empty(&ep->queue)) {
+ struct langwell_request *req = NULL;
+ req = list_entry(ep->queue.next, struct langwell_request,
+ queue);
+ done(ep, req, status);
+ }
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* endpoint is no longer usable */
+static int langwell_ep_disable(struct usb_ep *_ep)
+{
+ struct langwell_ep *ep;
+ unsigned long flags;
+ struct langwell_udc *dev;
+ int ep_num;
+ u32 endptctrl;
+
+ ep = container_of(_ep, struct langwell_ep, ep);
+ dev = ep->dev;
+ VDBG(dev, "---> %s()\n", __func__);
+
+ if (!_ep || !ep->desc)
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ /* disable endpoint control register */
+ ep_num = ep->ep_num;
+ endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
+ if (is_in(ep))
+ endptctrl &= ~EPCTRL_TXE;
+ else
+ endptctrl &= ~EPCTRL_RXE;
+ writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
+
+ /* nuke all pending requests (does flush) */
+ nuke(ep, -ESHUTDOWN);
+
+ ep->desc = NULL;
+ ep->stopped = 1;
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ DBG(dev, "disabled %s\n", _ep->name);
+ VDBG(dev, "<--- %s()\n", __func__);
+
+ return 0;
+}
+
+
+/* allocate a request object to use with this endpoint */
+static struct usb_request *langwell_alloc_request(struct usb_ep *_ep,
+ gfp_t gfp_flags)
+{
+ struct langwell_ep *ep;
+ struct langwell_udc *dev;
+ struct langwell_request *req = NULL;
+
+ if (!_ep)
+ return NULL;
+
+ ep = container_of(_ep, struct langwell_ep, ep);
+ dev = ep->dev;
+ VDBG(dev, "---> %s()\n", __func__);
+
+ req = kzalloc(sizeof(*req), gfp_flags);
+ if (!req)
+ return NULL;
+
+ req->req.dma = DMA_ADDR_INVALID;
+ INIT_LIST_HEAD(&req->queue);
+
+ VDBG(dev, "alloc request for %s\n", _ep->name);
+ VDBG(dev, "<--- %s()\n", __func__);
+ return &req->req;
+}
+
+
+/* free a request object */
+static void langwell_free_request(struct usb_ep *_ep,
+ struct usb_request *_req)
+{
+ struct langwell_ep *ep;
+ struct langwell_udc *dev;
+ struct langwell_request *req = NULL;
+
+ ep = container_of(_ep, struct langwell_ep, ep);
+ dev = ep->dev;
+ VDBG(dev, "---> %s()\n", __func__);
+
+ if (!_ep || !_req)
+ return;
+
+ req = container_of(_req, struct langwell_request, req);
+ WARN_ON(!list_empty(&req->queue));
+
+ if (_req)
+ kfree(req);
+
+ VDBG(dev, "free request for %s\n", _ep->name);
+ VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* queue dTD and PRIME endpoint */
+static int queue_dtd(struct langwell_ep *ep, struct langwell_request *req)
+{
+ u32 bit_mask, usbcmd, endptstat, dtd_dma;
+ u8 dtd_status;
+ int i;
+ struct langwell_dqh *dqh;
+ struct langwell_udc *dev;
+
+ dev = ep->dev;
+ VDBG(dev, "---> %s()\n", __func__);
+
+ i = ep->ep_num * 2 + is_in(ep);
+ dqh = &dev->ep_dqh[i];
+
+ if (ep->ep_num)
+ VDBG(dev, "%s\n", ep->name);
+ else
+ /* ep0 */
+ VDBG(dev, "%s-%s\n", ep->name, is_in(ep) ? "in" : "out");
+
+ VDBG(dev, "ep_dqh[%d] addr: 0x%08x\n", i, (u32)&(dev->ep_dqh[i]));
+
+ bit_mask = is_in(ep) ?
+ (1 << (ep->ep_num + 16)) : (1 << (ep->ep_num));
+
+ VDBG(dev, "bit_mask = 0x%08x\n", bit_mask);
+
+ /* check if the pipe is empty */
+ if (!(list_empty(&ep->queue))) {
+ /* add dTD to the end of linked list */
+ struct langwell_request *lastreq;
+ lastreq = list_entry(ep->queue.prev,
+ struct langwell_request, queue);
+
+ lastreq->tail->dtd_next =
+ cpu_to_le32(req->head->dtd_dma & DTD_NEXT_MASK);
+
+ /* read prime bit, if 1 goto out */
+ if (readl(&dev->op_regs->endptprime) & bit_mask)
+ goto out;
+
+ do {
+ /* set ATDTW bit in USBCMD */
+ usbcmd = readl(&dev->op_regs->usbcmd);
+ writel(usbcmd | CMD_ATDTW, &dev->op_regs->usbcmd);
+
+ /* read correct status bit */
+ endptstat = readl(&dev->op_regs->endptstat) & bit_mask;
+
+ } while (!(readl(&dev->op_regs->usbcmd) & CMD_ATDTW));
+
+ /* write ATDTW bit to 0 */
+ usbcmd = readl(&dev->op_regs->usbcmd);
+ writel(usbcmd & ~CMD_ATDTW, &dev->op_regs->usbcmd);
+
+ if (endptstat)
+ goto out;
+ }
+
+ /* write dQH next pointer and terminate bit to 0 */
+ dtd_dma = req->head->dtd_dma & DTD_NEXT_MASK;
+ dqh->dtd_next = cpu_to_le32(dtd_dma);
+
+ /* clear active and halt bit */
+ dtd_status = (u8) ~(DTD_STS_ACTIVE | DTD_STS_HALTED);
+ dqh->dtd_status &= dtd_status;
+ VDBG(dev, "dqh->dtd_status = 0x%x\n", dqh->dtd_status);
+
+ /* write 1 to endptprime register to PRIME endpoint */
+ bit_mask = is_in(ep) ? (1 << (ep->ep_num + 16)) : (1 << ep->ep_num);
+ VDBG(dev, "endprime bit_mask = 0x%08x\n", bit_mask);
+ writel(bit_mask, &dev->op_regs->endptprime);
+out:
+ VDBG(dev, "<--- %s()\n", __func__);
+ return 0;
+}
+
+
+/* fill in the dTD structure to build a transfer descriptor */
+static struct langwell_dtd *build_dtd(struct langwell_request *req,
+ unsigned *length, dma_addr_t *dma, int *is_last)
+{
+ u32 buf_ptr;
+ struct langwell_dtd *dtd;
+ struct langwell_udc *dev;
+ int i;
+
+ dev = req->ep->dev;
+ VDBG(dev, "---> %s()\n", __func__);
+
+ /* the maximum transfer length, up to 16k bytes */
+ *length = min(req->req.length - req->req.actual,
+ (unsigned)DTD_MAX_TRANSFER_LENGTH);
+
+ /* create dTD dma_pool resource */
+ dtd = dma_pool_alloc(dev->dtd_pool, GFP_KERNEL, dma);
+ if (dtd == NULL)
+ return dtd;
+ dtd->dtd_dma = *dma;
+
+ /* initialize buffer page pointers */
+ buf_ptr = (u32)(req->req.dma + req->req.actual);
+ for (i = 0; i < 5; i++)
+ dtd->dtd_buf[i] = cpu_to_le32(buf_ptr + i * PAGE_SIZE);
+
+ req->req.actual += *length;
+
+ /* fill in total bytes with transfer size */
+ dtd->dtd_total = cpu_to_le16(*length);
+ VDBG(dev, "dtd->dtd_total = %d\n", dtd->dtd_total);
+
+ /* set is_last flag if req->req.zero is set or not */
+ if (req->req.zero) {
+ if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
+ *is_last = 1;
+ else
+ *is_last = 0;
+ } else if (req->req.length == req->req.actual) {
+ *is_last = 1;
+ } else
+ *is_last = 0;
+
+ if (*is_last == 0)
+ VDBG(dev, "multi-dtd request!\n");
+
+ /* set interrupt on complete bit for the last dTD */
+ if (*is_last && !req->req.no_interrupt)
+ dtd->dtd_ioc = 1;
+
+ /* set multiplier override 0 for non-ISO and non-TX endpoint */
+ dtd->dtd_multo = 0;
+
+ /* set the active bit of status field to 1 */
+ dtd->dtd_status = DTD_STS_ACTIVE;
+ VDBG(dev, "dtd->dtd_status = 0x%02x\n", dtd->dtd_status);
+
+ VDBG(dev, "length = %d, dma addr= 0x%08x\n", *length, (int)*dma);
+ VDBG(dev, "<--- %s()\n", __func__);
+ return dtd;
+}
+
+
+/* generate dTD linked list for a request */
+static int req_to_dtd(struct langwell_request *req)
+{
+ unsigned count;
+ int is_last, is_first = 1;
+ struct langwell_dtd *dtd, *last_dtd = NULL;
+ struct langwell_udc *dev;
+ dma_addr_t dma;
+
+ dev = req->ep->dev;
+ VDBG(dev, "---> %s()\n", __func__);
+ do {
+ dtd = build_dtd(req, &count, &dma, &is_last);
+ if (dtd == NULL)
+ return -ENOMEM;
+
+ if (is_first) {
+ is_first = 0;
+ req->head = dtd;
+ } else {
+ last_dtd->dtd_next = cpu_to_le32(dma);
+ last_dtd->next_dtd_virt = dtd;
+ }
+ last_dtd = dtd;
+ req->dtd_count++;
+ } while (!is_last);
+
+ /* set terminate bit to 1 for the last dTD */
+ dtd->dtd_next = DTD_TERM;
+
+ req->tail = dtd;
+
+ VDBG(dev, "<--- %s()\n", __func__);
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* queue (submits) an I/O requests to an endpoint */
+static int langwell_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
+ gfp_t gfp_flags)
+{
+ struct langwell_request *req;
+ struct langwell_ep *ep;
+ struct langwell_udc *dev;
+ unsigned long flags;
+ int is_iso = 0, zlflag = 0;
+
+ /* always require a cpu-view buffer */
+ req = container_of(_req, struct langwell_request, req);
+ ep = container_of(_ep, struct langwell_ep, ep);
+
+ if (!_req || !_req->complete || !_req->buf
+ || !list_empty(&req->queue)) {
+ return -EINVAL;
+ }
+
+ if (unlikely(!_ep || !ep->desc))
+ return -EINVAL;
+
+ dev = ep->dev;
+ req->ep = ep;
+ VDBG(dev, "---> %s()\n", __func__);
+
+ if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+ if (req->req.length > ep->ep.maxpacket)
+ return -EMSGSIZE;
+ is_iso = 1;
+ }
+
+ if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN))
+ return -ESHUTDOWN;
+
+ /* set up dma mapping in case the caller didn't */
+ if (_req->dma == DMA_ADDR_INVALID) {
+ /* WORKAROUND: WARN_ON(size == 0) */
+ if (_req->length == 0) {
+ VDBG(dev, "req->length: 0->1\n");
+ zlflag = 1;
+ _req->length++;
+ }
+
+ _req->dma = dma_map_single(&dev->pdev->dev,
+ _req->buf, _req->length,
+ is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ if (zlflag && (_req->length == 1)) {
+ VDBG(dev, "req->length: 1->0\n");
+ zlflag = 0;
+ _req->length = 0;
+ }
+
+ req->mapped = 1;
+ VDBG(dev, "req->mapped = 1\n");
+ } else {
+ dma_sync_single_for_device(&dev->pdev->dev,
+ _req->dma, _req->length,
+ is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ req->mapped = 0;
+ VDBG(dev, "req->mapped = 0\n");
+ }
+
+ DBG(dev, "%s queue req %p, len %u, buf %p, dma 0x%08x\n",
+ _ep->name,
+ _req, _req->length, _req->buf, _req->dma);
+
+ _req->status = -EINPROGRESS;
+ _req->actual = 0;
+ req->dtd_count = 0;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ /* build and put dTDs to endpoint queue */
+ if (!req_to_dtd(req)) {
+ queue_dtd(ep, req);
+ } else {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return -ENOMEM;
+ }
+
+ /* update ep0 state */
+ if (ep->ep_num == 0)
+ dev->ep0_state = DATA_STATE_XMIT;
+
+ if (likely(req != NULL)) {
+ list_add_tail(&req->queue, &ep->queue);
+ VDBG(dev, "list_add_tail() \n");
+ }
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ VDBG(dev, "<--- %s()\n", __func__);
+ return 0;
+}
+
+
+/* dequeue (cancels, unlinks) an I/O request from an endpoint */
+static int langwell_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct langwell_ep *ep;
+ struct langwell_udc *dev;
+ struct langwell_request *req;
+ unsigned long flags;
+ int stopped, ep_num, retval = 0;
+ u32 endptctrl;
+
+ ep = container_of(_ep, struct langwell_ep, ep);
+ dev = ep->dev;
+ VDBG(dev, "---> %s()\n", __func__);
+
+ if (!_ep || !ep->desc || !_req)
+ return -EINVAL;
+
+ if (!dev->driver)
+ return -ESHUTDOWN;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ stopped = ep->stopped;
+
+ /* quiesce dma while we patch the queue */
+ ep->stopped = 1;
+ ep_num = ep->ep_num;
+
+ /* disable endpoint control register */
+ endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
+ if (is_in(ep))
+ endptctrl &= ~EPCTRL_TXE;
+ else
+ endptctrl &= ~EPCTRL_RXE;
+ writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
+
+ /* make sure it's still queued on this endpoint */
+ list_for_each_entry(req, &ep->queue, queue) {
+ if (&req->req == _req)
+ break;
+ }
+
+ if (&req->req != _req) {
+ retval = -EINVAL;
+ goto done;
+ }
+
+ /* queue head may be partially complete. */
+ if (ep->queue.next == &req->queue) {
+ DBG(dev, "unlink (%s) dma\n", _ep->name);
+ _req->status = -ECONNRESET;
+ langwell_ep_fifo_flush(&ep->ep);
+
+ /* not the last request in endpoint queue */
+ if (likely(ep->queue.next == &req->queue)) {
+ struct langwell_dqh *dqh;
+ struct langwell_request *next_req;
+
+ dqh = ep->dqh;
+ next_req = list_entry(req->queue.next,
+ struct langwell_request, queue);
+
+ /* point the dQH to the first dTD of next request */
+ writel((u32) next_req->head, &dqh->dqh_current);
+ }
+ } else {
+ struct langwell_request *prev_req;
+
+ prev_req = list_entry(req->queue.prev,
+ struct langwell_request, queue);
+ writel(readl(&req->tail->dtd_next),
+ &prev_req->tail->dtd_next);
+ }
+
+ done(ep, req, -ECONNRESET);
+
+done:
+ /* enable endpoint again */
+ endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
+ if (is_in(ep))
+ endptctrl |= EPCTRL_TXE;
+ else
+ endptctrl |= EPCTRL_RXE;
+ writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
+
+ ep->stopped = stopped;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ VDBG(dev, "<--- %s()\n", __func__);
+ return retval;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* endpoint set/clear halt */
+static void ep_set_halt(struct langwell_ep *ep, int value)
+{
+ u32 endptctrl = 0;
+ int ep_num;
+ struct langwell_udc *dev = ep->dev;
+ VDBG(dev, "---> %s()\n", __func__);
+
+ ep_num = ep->ep_num;
+ endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
+
+ /* value: 1 - set halt, 0 - clear halt */
+ if (value) {
+ /* set the stall bit */
+ if (is_in(ep))
+ endptctrl |= EPCTRL_TXS;
+ else
+ endptctrl |= EPCTRL_RXS;
+ } else {
+ /* clear the stall bit and reset data toggle */
+ if (is_in(ep)) {
+ endptctrl &= ~EPCTRL_TXS;
+ endptctrl |= EPCTRL_TXR;
+ } else {
+ endptctrl &= ~EPCTRL_RXS;
+ endptctrl |= EPCTRL_RXR;
+ }
+ }
+
+ writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
+
+ VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/* set the endpoint halt feature */
+static int langwell_ep_set_halt(struct usb_ep *_ep, int value)
+{
+ struct langwell_ep *ep;
+ struct langwell_udc *dev;
+ unsigned long flags;
+ int retval = 0;
+
+ ep = container_of(_ep, struct langwell_ep, ep);
+ dev = ep->dev;
+
+ VDBG(dev, "---> %s()\n", __func__);
+
+ if (!_ep || !ep->desc)
+ return -EINVAL;
+
+ if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ if (ep->desc && (ep->desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+ == USB_ENDPOINT_XFER_ISOC)
+ return -EOPNOTSUPP;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ /*
+ * attempt to halt IN ep will fail if any transfer requests
+ * are still queue
+ */
+ if (!list_empty(&ep->queue) && is_in(ep) && value) {
+ /* IN endpoint FIFO holds bytes */
+ DBG(dev, "%s FIFO holds bytes\n", _ep->name);
+ retval = -EAGAIN;
+ goto done;
+ }
+
+ /* endpoint set/clear halt */
+ if (ep->ep_num) {
+ ep_set_halt(ep, value);
+ } else { /* endpoint 0 */
+ dev->ep0_state = WAIT_FOR_SETUP;
+ dev->ep0_dir = USB_DIR_OUT;
+ }
+done:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ DBG(dev, "%s %s halt\n", _ep->name, value ? "set" : "clear");
+ VDBG(dev, "<--- %s()\n", __func__);
+ return retval;
+}
+
+
+/* set the halt feature and ignores clear requests */
+static int langwell_ep_set_wedge(struct usb_ep *_ep)
+{
+ struct langwell_ep *ep;
+ struct langwell_udc *dev;
+
+ ep = container_of(_ep, struct langwell_ep, ep);
+ dev = ep->dev;
+
+ VDBG(dev, "---> %s()\n", __func__);
+
+ if (!_ep || !ep->desc)
+ return -EINVAL;
+
+ VDBG(dev, "<--- %s()\n", __func__);
+ return usb_ep_set_halt(_ep);
+}
+
+
+/* flush contents of a fifo */
+static void langwell_ep_fifo_flush(struct usb_ep *_ep)
+{
+ struct langwell_ep *ep;
+ struct langwell_udc *dev;
+ u32 flush_bit;
+ unsigned long timeout;
+
+ ep = container_of(_ep, struct langwell_ep, ep);
+ dev = ep->dev;
+
+ VDBG(dev, "---> %s()\n", __func__);
+
+ if (!_ep || !ep->desc) {
+ VDBG(dev, "ep or ep->desc is NULL\n");
+ VDBG(dev, "<--- %s()\n", __func__);
+ return;
+ }
+
+ VDBG(dev, "%s-%s fifo flush\n", _ep->name, is_in(ep) ? "in" : "out");
+
+ /* flush endpoint buffer */
+ if (ep->ep_num == 0)
+ flush_bit = (1 << 16) | 1;
+ else if (is_in(ep))
+ flush_bit = 1 << (ep->ep_num + 16); /* TX */
+ else
+ flush_bit = 1 << ep->ep_num; /* RX */
+
+ /* wait until flush complete */
+ timeout = jiffies + FLUSH_TIMEOUT;
+ do {
+ writel(flush_bit, &dev->op_regs->endptflush);
+ while (readl(&dev->op_regs->endptflush)) {
+ if (time_after(jiffies, timeout)) {
+ ERROR(dev, "ep flush timeout\n");
+ goto done;
+ }
+ cpu_relax();
+ }
+ } while (readl(&dev->op_regs->endptstat) & flush_bit);
+done:
+ VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/* endpoints operations structure */
+static const struct usb_ep_ops langwell_ep_ops = {
+
+ /* configure endpoint, making it usable */
+ .enable = langwell_ep_enable,
+
+ /* endpoint is no longer usable */
+ .disable = langwell_ep_disable,
+
+ /* allocate a request object to use with this endpoint */
+ .alloc_request = langwell_alloc_request,
+
+ /* free a request object */
+ .free_request = langwell_free_request,
+
+ /* queue (submits) an I/O requests to an endpoint */
+ .queue = langwell_ep_queue,
+
+ /* dequeue (cancels, unlinks) an I/O request from an endpoint */
+ .dequeue = langwell_ep_dequeue,
+
+ /* set the endpoint halt feature */
+ .set_halt = langwell_ep_set_halt,
+
+ /* set the halt feature and ignores clear requests */
+ .set_wedge = langwell_ep_set_wedge,
+
+ /* flush contents of a fifo */
+ .fifo_flush = langwell_ep_fifo_flush,
+};
+
+
+/*-------------------------------------------------------------------------*/
+
+/* device controller usb_gadget_ops structure */
+
+/* returns the current frame number */
+static int langwell_get_frame(struct usb_gadget *_gadget)
+{
+ struct langwell_udc *dev;
+ u16 retval;
+
+ if (!_gadget)
+ return -ENODEV;
+
+ dev = container_of(_gadget, struct langwell_udc, gadget);
+ VDBG(dev, "---> %s()\n", __func__);
+
+ retval = readl(&dev->op_regs->frindex) & FRINDEX_MASK;
+
+ VDBG(dev, "<--- %s()\n", __func__);
+ return retval;
+}
+
+
+/* tries to wake up the host connected to this gadget */
+static int langwell_wakeup(struct usb_gadget *_gadget)
+{
+ struct langwell_udc *dev;
+ u32 portsc1, devlc;
+ unsigned long flags;
+
+ if (!_gadget)
+ return 0;
+
+ dev = container_of(_gadget, struct langwell_udc, gadget);
+ VDBG(dev, "---> %s()\n", __func__);
+
+ /* Remote Wakeup feature not enabled by host */
+ if (!dev->remote_wakeup)
+ return -ENOTSUPP;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ portsc1 = readl(&dev->op_regs->portsc1);
+ if (!(portsc1 & PORTS_SUSP)) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return 0;
+ }
+
+ /* LPM L1 to L0, remote wakeup */
+ if (dev->lpm && dev->lpm_state == LPM_L1) {
+ portsc1 |= PORTS_SLP;
+ writel(portsc1, &dev->op_regs->portsc1);
+ }
+
+ /* force port resume */
+ if (dev->usb_state == USB_STATE_SUSPENDED) {
+ portsc1 |= PORTS_FPR;
+ writel(portsc1, &dev->op_regs->portsc1);
+ }
+
+ /* exit PHY low power suspend */
+ devlc = readl(&dev->op_regs->devlc);
+ VDBG(dev, "devlc = 0x%08x\n", devlc);
+ devlc &= ~LPM_PHCD;
+ writel(devlc, &dev->op_regs->devlc);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ VDBG(dev, "<--- %s()\n", __func__);
+ return 0;
+}
+
+
+/* notify controller that VBUS is powered or not */
+static int langwell_vbus_session(struct usb_gadget *_gadget, int is_active)
+{
+ struct langwell_udc *dev;
+ unsigned long flags;
+ u32 usbcmd;
+
+ if (!_gadget)
+ return -ENODEV;
+
+ dev = container_of(_gadget, struct langwell_udc, gadget);
+ VDBG(dev, "---> %s()\n", __func__);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ VDBG(dev, "VBUS status: %s\n", is_active ? "on" : "off");
+
+ dev->vbus_active = (is_active != 0);
+ if (dev->driver && dev->softconnected && dev->vbus_active) {
+ usbcmd = readl(&dev->op_regs->usbcmd);
+ usbcmd |= CMD_RUNSTOP;
+ writel(usbcmd, &dev->op_regs->usbcmd);
+ } else {
+ usbcmd = readl(&dev->op_regs->usbcmd);
+ usbcmd &= ~CMD_RUNSTOP;
+ writel(usbcmd, &dev->op_regs->usbcmd);
+ }
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ VDBG(dev, "<--- %s()\n", __func__);
+ return 0;
+}
+
+
+/* constrain controller's VBUS power usage */
+static int langwell_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
+{
+ struct langwell_udc *dev;
+
+ if (!_gadget)
+ return -ENODEV;
+
+ dev = container_of(_gadget, struct langwell_udc, gadget);
+ VDBG(dev, "---> %s()\n", __func__);
+
+ if (dev->transceiver) {
+ VDBG(dev, "otg_set_power\n");
+ VDBG(dev, "<--- %s()\n", __func__);
+ return otg_set_power(dev->transceiver, mA);
+ }
+
+ VDBG(dev, "<--- %s()\n", __func__);
+ return -ENOTSUPP;
+}
+
+
+/* D+ pullup, software-controlled connect/disconnect to USB host */
+static int langwell_pullup(struct usb_gadget *_gadget, int is_on)
+{
+ struct langwell_udc *dev;
+ u32 usbcmd;
+ unsigned long flags;
+
+ if (!_gadget)
+ return -ENODEV;
+
+ dev = container_of(_gadget, struct langwell_udc, gadget);
+
+ VDBG(dev, "---> %s()\n", __func__);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->softconnected = (is_on != 0);
+
+ if (dev->driver && dev->softconnected && dev->vbus_active) {
+ usbcmd = readl(&dev->op_regs->usbcmd);
+ usbcmd |= CMD_RUNSTOP;
+ writel(usbcmd, &dev->op_regs->usbcmd);
+ } else {
+ usbcmd = readl(&dev->op_regs->usbcmd);
+ usbcmd &= ~CMD_RUNSTOP;
+ writel(usbcmd, &dev->op_regs->usbcmd);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ VDBG(dev, "<--- %s()\n", __func__);
+ return 0;
+}
+
+
+/* device controller usb_gadget_ops structure */
+static const struct usb_gadget_ops langwell_ops = {
+
+ /* returns the current frame number */
+ .get_frame = langwell_get_frame,
+
+ /* tries to wake up the host connected to this gadget */
+ .wakeup = langwell_wakeup,
+
+ /* set the device selfpowered feature, always selfpowered */
+ /* .set_selfpowered = langwell_set_selfpowered, */
+
+ /* notify controller that VBUS is powered or not */
+ .vbus_session = langwell_vbus_session,
+
+ /* constrain controller's VBUS power usage */
+ .vbus_draw = langwell_vbus_draw,
+
+ /* D+ pullup, software-controlled connect/disconnect to USB host */
+ .pullup = langwell_pullup,
+};
+
+
+/*-------------------------------------------------------------------------*/
+
+/* device controller operations */
+
+/* reset device controller */
+static int langwell_udc_reset(struct langwell_udc *dev)
+{
+ u32 usbcmd, usbmode, devlc, endpointlistaddr;
+ unsigned long timeout;
+
+ if (!dev)
+ return -EINVAL;
+
+ DBG(dev, "---> %s()\n", __func__);
+
+ /* set controller to stop state */
+ usbcmd = readl(&dev->op_regs->usbcmd);
+ usbcmd &= ~CMD_RUNSTOP;
+ writel(usbcmd, &dev->op_regs->usbcmd);
+
+ /* reset device controller */
+ usbcmd = readl(&dev->op_regs->usbcmd);
+ usbcmd |= CMD_RST;
+ writel(usbcmd, &dev->op_regs->usbcmd);
+
+ /* wait for reset to complete */
+ timeout = jiffies + RESET_TIMEOUT;
+ while (readl(&dev->op_regs->usbcmd) & CMD_RST) {
+ if (time_after(jiffies, timeout)) {
+ ERROR(dev, "device reset timeout\n");
+ return -ETIMEDOUT;
+ }
+ cpu_relax();
+ }
+
+ /* set controller to device mode */
+ usbmode = readl(&dev->op_regs->usbmode);
+ usbmode |= MODE_DEVICE;
+
+ /* turn setup lockout off, require setup tripwire in usbcmd */
+ usbmode |= MODE_SLOM;
+
+ writel(usbmode, &dev->op_regs->usbmode);
+ usbmode = readl(&dev->op_regs->usbmode);
+ VDBG(dev, "usbmode=0x%08x\n", usbmode);
+
+ /* Write-Clear setup status */
+ writel(0, &dev->op_regs->usbsts);
+
+ /* if support USB LPM, ACK all LPM token */
+ if (dev->lpm) {
+ devlc = readl(&dev->op_regs->devlc);
+ devlc &= ~LPM_STL; /* don't STALL LPM token */
+ devlc &= ~LPM_NYT_ACK; /* ACK LPM token */
+ writel(devlc, &dev->op_regs->devlc);
+ }
+
+ /* fill endpointlistaddr register */
+ endpointlistaddr = dev->ep_dqh_dma;
+ endpointlistaddr &= ENDPOINTLISTADDR_MASK;
+ writel(endpointlistaddr, &dev->op_regs->endpointlistaddr);
+
+ VDBG(dev, "dQH base (vir: %p, phy: 0x%08x), endpointlistaddr=0x%08x\n",
+ dev->ep_dqh, endpointlistaddr,
+ readl(&dev->op_regs->endpointlistaddr));
+ DBG(dev, "<--- %s()\n", __func__);
+ return 0;
+}
+
+
+/* reinitialize device controller endpoints */
+static int eps_reinit(struct langwell_udc *dev)
+{
+ struct langwell_ep *ep;
+ char name[14];
+ int i;
+
+ VDBG(dev, "---> %s()\n", __func__);
+
+ /* initialize ep0 */
+ ep = &dev->ep[0];
+ ep->dev = dev;
+ strncpy(ep->name, "ep0", sizeof(ep->name));
+ ep->ep.name = ep->name;
+ ep->ep.ops = &langwell_ep_ops;
+ ep->stopped = 0;
+ ep->ep.maxpacket = EP0_MAX_PKT_SIZE;
+ ep->ep_num = 0;
+ ep->desc = &langwell_ep0_desc;
+ INIT_LIST_HEAD(&ep->queue);
+
+ ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
+
+ /* initialize other endpoints */
+ for (i = 2; i < dev->ep_max; i++) {
+ ep = &dev->ep[i];
+ if (i % 2)
+ snprintf(name, sizeof(name), "ep%din", i / 2);
+ else
+ snprintf(name, sizeof(name), "ep%dout", i / 2);
+ ep->dev = dev;
+ strncpy(ep->name, name, sizeof(ep->name));
+ ep->ep.name = ep->name;
+
+ ep->ep.ops = &langwell_ep_ops;
+ ep->stopped = 0;
+ ep->ep.maxpacket = (unsigned short) ~0;
+ ep->ep_num = i / 2;
+
+ INIT_LIST_HEAD(&ep->queue);
+ list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
+
+ ep->dqh = &dev->ep_dqh[i];
+ }
+
+ VDBG(dev, "<--- %s()\n", __func__);
+ return 0;
+}
+
+
+/* enable interrupt and set controller to run state */
+static void langwell_udc_start(struct langwell_udc *dev)
+{
+ u32 usbintr, usbcmd;
+ DBG(dev, "---> %s()\n", __func__);
+
+ /* enable interrupts */
+ usbintr = INTR_ULPIE /* ULPI */
+ | INTR_SLE /* suspend */
+ /* | INTR_SRE SOF received */
+ | INTR_URE /* USB reset */
+ | INTR_AAE /* async advance */
+ | INTR_SEE /* system error */
+ | INTR_FRE /* frame list rollover */
+ | INTR_PCE /* port change detect */
+ | INTR_UEE /* USB error interrupt */
+ | INTR_UE; /* USB interrupt */
+ writel(usbintr, &dev->op_regs->usbintr);
+
+ /* clear stopped bit */
+ dev->stopped = 0;
+
+ /* set controller to run */
+ usbcmd = readl(&dev->op_regs->usbcmd);
+ usbcmd |= CMD_RUNSTOP;
+ writel(usbcmd, &dev->op_regs->usbcmd);
+
+ DBG(dev, "<--- %s()\n", __func__);
+ return;
+}
+
+
+/* disable interrupt and set controller to stop state */
+static void langwell_udc_stop(struct langwell_udc *dev)
+{
+ u32 usbcmd;
+
+ DBG(dev, "---> %s()\n", __func__);
+
+ /* disable all interrupts */
+ writel(0, &dev->op_regs->usbintr);
+
+ /* set stopped bit */
+ dev->stopped = 1;
+
+ /* set controller to stop state */
+ usbcmd = readl(&dev->op_regs->usbcmd);
+ usbcmd &= ~CMD_RUNSTOP;
+ writel(usbcmd, &dev->op_regs->usbcmd);
+
+ DBG(dev, "<--- %s()\n", __func__);
+ return;
+}
+
+
+/* stop all USB activities */
+static void stop_activity(struct langwell_udc *dev,
+ struct usb_gadget_driver *driver)
+{
+ struct langwell_ep *ep;
+ DBG(dev, "---> %s()\n", __func__);
+
+ nuke(&dev->ep[0], -ESHUTDOWN);
+
+ list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
+ nuke(ep, -ESHUTDOWN);
+ }
+
+ /* report disconnect; the driver is already quiesced */
+ if (driver) {
+ spin_unlock(&dev->lock);
+ driver->disconnect(&dev->gadget);
+ spin_lock(&dev->lock);
+ }
+
+ DBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* device "function" sysfs attribute file */
+static ssize_t show_function(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct langwell_udc *dev = the_controller;
+
+ if (!dev->driver || !dev->driver->function
+ || strlen(dev->driver->function) > PAGE_SIZE)
+ return 0;
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", dev->driver->function);
+}
+static DEVICE_ATTR(function, S_IRUGO, show_function, NULL);
+
+
+/* device "langwell_udc" sysfs attribute file */
+static ssize_t show_langwell_udc(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct langwell_udc *dev = the_controller;
+ struct langwell_request *req;
+ struct langwell_ep *ep = NULL;
+ char *next;
+ unsigned size;
+ unsigned t;
+ unsigned i;
+ unsigned long flags;
+ u32 tmp_reg;
+
+ next = buf;
+ size = PAGE_SIZE;
+ spin_lock_irqsave(&dev->lock, flags);
+
+ /* driver basic information */
+ t = scnprintf(next, size,
+ DRIVER_DESC "\n"
+ "%s version: %s\n"
+ "Gadget driver: %s\n\n",
+ driver_name, DRIVER_VERSION,
+ dev->driver ? dev->driver->driver.name : "(none)");
+ size -= t;
+ next += t;
+
+ /* device registers */
+ tmp_reg = readl(&dev->op_regs->usbcmd);
+ t = scnprintf(next, size,
+ "USBCMD reg:\n"
+ "SetupTW: %d\n"
+ "Run/Stop: %s\n\n",
+ (tmp_reg & CMD_SUTW) ? 1 : 0,
+ (tmp_reg & CMD_RUNSTOP) ? "Run" : "Stop");
+ size -= t;
+ next += t;
+
+ tmp_reg = readl(&dev->op_regs->usbsts);
+ t = scnprintf(next, size,
+ "USB Status Reg:\n"
+ "Device Suspend: %d\n"
+ "Reset Received: %d\n"
+ "System Error: %s\n"
+ "USB Error Interrupt: %s\n\n",
+ (tmp_reg & STS_SLI) ? 1 : 0,
+ (tmp_reg & STS_URI) ? 1 : 0,
+ (tmp_reg & STS_SEI) ? "Error" : "No error",
+ (tmp_reg & STS_UEI) ? "Error detected" : "No error");
+ size -= t;
+ next += t;
+
+ tmp_reg = readl(&dev->op_regs->usbintr);
+ t = scnprintf(next, size,
+ "USB Intrrupt Enable Reg:\n"
+ "Sleep Enable: %d\n"
+ "SOF Received Enable: %d\n"
+ "Reset Enable: %d\n"
+ "System Error Enable: %d\n"
+ "Port Change Dectected Enable: %d\n"
+ "USB Error Intr Enable: %d\n"
+ "USB Intr Enable: %d\n\n",
+ (tmp_reg & INTR_SLE) ? 1 : 0,
+ (tmp_reg & INTR_SRE) ? 1 : 0,
+ (tmp_reg & INTR_URE) ? 1 : 0,
+ (tmp_reg & INTR_SEE) ? 1 : 0,
+ (tmp_reg & INTR_PCE) ? 1 : 0,
+ (tmp_reg & INTR_UEE) ? 1 : 0,
+ (tmp_reg & INTR_UE) ? 1 : 0);
+ size -= t;
+ next += t;
+
+ tmp_reg = readl(&dev->op_regs->frindex);
+ t = scnprintf(next, size,
+ "USB Frame Index Reg:\n"
+ "Frame Number is 0x%08x\n\n",
+ (tmp_reg & FRINDEX_MASK));
+ size -= t;
+ next += t;
+
+ tmp_reg = readl(&dev->op_regs->deviceaddr);
+ t = scnprintf(next, size,
+ "USB Device Address Reg:\n"
+ "Device Addr is 0x%x\n\n",
+ USBADR(tmp_reg));
+ size -= t;
+ next += t;
+
+ tmp_reg = readl(&dev->op_regs->endpointlistaddr);
+ t = scnprintf(next, size,
+ "USB Endpoint List Address Reg:\n"
+ "Endpoint List Pointer is 0x%x\n\n",
+ EPBASE(tmp_reg));
+ size -= t;
+ next += t;
+
+ tmp_reg = readl(&dev->op_regs->portsc1);
+ t = scnprintf(next, size,
+ "USB Port Status & Control Reg:\n"
+ "Port Reset: %s\n"
+ "Port Suspend Mode: %s\n"
+ "Over-current Change: %s\n"
+ "Port Enable/Disable Change: %s\n"
+ "Port Enabled/Disabled: %s\n"
+ "Current Connect Status: %s\n\n",
+ (tmp_reg & PORTS_PR) ? "Reset" : "Not Reset",
+ (tmp_reg & PORTS_SUSP) ? "Suspend " : "Not Suspend",
+ (tmp_reg & PORTS_OCC) ? "Detected" : "No",
+ (tmp_reg & PORTS_PEC) ? "Changed" : "Not Changed",
+ (tmp_reg & PORTS_PE) ? "Enable" : "Not Correct",
+ (tmp_reg & PORTS_CCS) ? "Attached" : "Not Attached");
+ size -= t;
+ next += t;
+
+ tmp_reg = readl(&dev->op_regs->devlc);
+ t = scnprintf(next, size,
+ "Device LPM Control Reg:\n"
+ "Parallel Transceiver : %d\n"
+ "Serial Transceiver : %d\n"
+ "Port Speed: %s\n"
+ "Port Force Full Speed Connenct: %s\n"
+ "PHY Low Power Suspend Clock Disable: %s\n"
+ "BmAttributes: %d\n\n",
+ LPM_PTS(tmp_reg),
+ (tmp_reg & LPM_STS) ? 1 : 0,
+ ({
+ char *s;
+ switch (LPM_PSPD(tmp_reg)) {
+ case LPM_SPEED_FULL:
+ s = "Full Speed"; break;
+ case LPM_SPEED_LOW:
+ s = "Low Speed"; break;
+ case LPM_SPEED_HIGH:
+ s = "High Speed"; break;
+ default:
+ s = "Unknown Speed"; break;
+ }
+ s;
+ }),
+ (tmp_reg & LPM_PFSC) ? "Force Full Speed" : "Not Force",
+ (tmp_reg & LPM_PHCD) ? "Disabled" : "Enabled",
+ LPM_BA(tmp_reg));
+ size -= t;
+ next += t;
+
+ tmp_reg = readl(&dev->op_regs->usbmode);
+ t = scnprintf(next, size,
+ "USB Mode Reg:\n"
+ "Controller Mode is : %s\n\n", ({
+ char *s;
+ switch (MODE_CM(tmp_reg)) {
+ case MODE_IDLE:
+ s = "Idle"; break;
+ case MODE_DEVICE:
+ s = "Device Controller"; break;
+ case MODE_HOST:
+ s = "Host Controller"; break;
+ default:
+ s = "None"; break;
+ }
+ s;
+ }));
+ size -= t;
+ next += t;
+
+ tmp_reg = readl(&dev->op_regs->endptsetupstat);
+ t = scnprintf(next, size,
+ "Endpoint Setup Status Reg:\n"
+ "SETUP on ep 0x%04x\n\n",
+ tmp_reg & SETUPSTAT_MASK);
+ size -= t;
+ next += t;
+
+ for (i = 0; i < dev->ep_max / 2; i++) {
+ tmp_reg = readl(&dev->op_regs->endptctrl[i]);
+ t = scnprintf(next, size, "EP Ctrl Reg [%d]: 0x%08x\n",
+ i, tmp_reg);
+ size -= t;
+ next += t;
+ }
+ tmp_reg = readl(&dev->op_regs->endptprime);
+ t = scnprintf(next, size, "EP Prime Reg: 0x%08x\n\n", tmp_reg);
+ size -= t;
+ next += t;
+
+ /* langwell_udc, langwell_ep, langwell_request structure information */
+ ep = &dev->ep[0];
+ t = scnprintf(next, size, "%s MaxPacketSize: 0x%x, ep_num: %d\n",
+ ep->ep.name, ep->ep.maxpacket, ep->ep_num);
+ size -= t;
+ next += t;
+
+ if (list_empty(&ep->queue)) {
+ t = scnprintf(next, size, "its req queue is empty\n\n");
+ size -= t;
+ next += t;
+ } else {
+ list_for_each_entry(req, &ep->queue, queue) {
+ t = scnprintf(next, size,
+ "req %p actual 0x%x length 0x%x buf %p\n",
+ &req->req, req->req.actual,
+ req->req.length, req->req.buf);
+ size -= t;
+ next += t;
+ }
+ }
+ /* other gadget->eplist ep */
+ list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
+ if (ep->desc) {
+ t = scnprintf(next, size,
+ "\n%s MaxPacketSize: 0x%x, "
+ "ep_num: %d\n",
+ ep->ep.name, ep->ep.maxpacket,
+ ep->ep_num);
+ size -= t;
+ next += t;
+
+ if (list_empty(&ep->queue)) {
+ t = scnprintf(next, size,
+ "its req queue is empty\n\n");
+ size -= t;
+ next += t;
+ } else {
+ list_for_each_entry(req, &ep->queue, queue) {
+ t = scnprintf(next, size,
+ "req %p actual 0x%x length "
+ "0x%x buf %p\n",
+ &req->req, req->req.actual,
+ req->req.length, req->req.buf);
+ size -= t;
+ next += t;
+ }
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return PAGE_SIZE - size;
+}
+static DEVICE_ATTR(langwell_udc, S_IRUGO, show_langwell_udc, NULL);
+
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * when a driver is successfully registered, it will receive
+ * control requests including set_configuration(), which enables
+ * non-control requests. then usb traffic follows until a
+ * disconnect is reported. then a host may connect again, or
+ * the driver might get unbound.
+ */
+
+int usb_gadget_register_driver(struct usb_gadget_driver *driver)
+{
+ struct langwell_udc *dev = the_controller;
+ unsigned long flags;
+ int retval;
+
+ if (!dev)
+ return -ENODEV;
+
+ DBG(dev, "---> %s()\n", __func__);
+
+ if (dev->driver)
+ return -EBUSY;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ /* hook up the driver ... */
+ driver->driver.bus = NULL;
+ dev->driver = driver;
+ dev->gadget.dev.driver = &driver->driver;
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ retval = driver->bind(&dev->gadget);
+ if (retval) {
+ DBG(dev, "bind to driver %s --> %d\n",
+ driver->driver.name, retval);
+ dev->driver = NULL;
+ dev->gadget.dev.driver = NULL;
+ return retval;
+ }
+
+ retval = device_create_file(&dev->pdev->dev, &dev_attr_function);
+ if (retval)
+ goto err_unbind;
+
+ dev->usb_state = USB_STATE_ATTACHED;
+ dev->ep0_state = WAIT_FOR_SETUP;
+ dev->ep0_dir = USB_DIR_OUT;
+
+ /* enable interrupt and set controller to run state */
+ if (dev->got_irq)
+ langwell_udc_start(dev);
+
+ VDBG(dev, "After langwell_udc_start(), print all registers:\n");
+#ifdef VERBOSE
+ print_all_registers(dev);
+#endif
+
+ INFO(dev, "register driver: %s\n", driver->driver.name);
+ VDBG(dev, "<--- %s()\n", __func__);
+ return 0;
+
+err_unbind:
+ driver->unbind(&dev->gadget);
+ dev->gadget.dev.driver = NULL;
+ dev->driver = NULL;
+
+ DBG(dev, "<--- %s()\n", __func__);
+ return retval;
+}
+EXPORT_SYMBOL(usb_gadget_register_driver);
+
+
+/* unregister gadget driver */
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+ struct langwell_udc *dev = the_controller;
+ unsigned long flags;
+
+ if (!dev)
+ return -ENODEV;
+
+ DBG(dev, "---> %s()\n", __func__);
+
+ if (unlikely(!driver || !driver->bind || !driver->unbind))
+ return -EINVAL;
+
+ /* unbind OTG transceiver */
+ if (dev->transceiver)
+ (void)otg_set_peripheral(dev->transceiver, 0);
+
+ /* disable interrupt and set controller to stop state */
+ langwell_udc_stop(dev);
+
+ dev->usb_state = USB_STATE_ATTACHED;
+ dev->ep0_state = WAIT_FOR_SETUP;
+ dev->ep0_dir = USB_DIR_OUT;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ /* stop all usb activities */
+ dev->gadget.speed = USB_SPEED_UNKNOWN;
+ stop_activity(dev, driver);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ /* unbind gadget driver */
+ driver->unbind(&dev->gadget);
+ dev->gadget.dev.driver = NULL;
+ dev->driver = NULL;
+
+ device_remove_file(&dev->pdev->dev, &dev_attr_function);
+
+ INFO(dev, "unregistered driver '%s'\n", driver->driver.name);
+ DBG(dev, "<--- %s()\n", __func__);
+ return 0;
+}
+EXPORT_SYMBOL(usb_gadget_unregister_driver);
+
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * setup tripwire is used as a semaphore to ensure that the setup data
+ * payload is extracted from a dQH without being corrupted
+ */
+static void setup_tripwire(struct langwell_udc *dev)
+{
+ u32 usbcmd,
+ endptsetupstat;
+ unsigned long timeout;
+ struct langwell_dqh *dqh;
+
+ VDBG(dev, "---> %s()\n", __func__);
+
+ /* ep0 OUT dQH */
+ dqh = &dev->ep_dqh[EP_DIR_OUT];
+
+ /* Write-Clear endptsetupstat */
+ endptsetupstat = readl(&dev->op_regs->endptsetupstat);
+ writel(endptsetupstat, &dev->op_regs->endptsetupstat);
+
+ /* wait until endptsetupstat is cleared */
+ timeout = jiffies + SETUPSTAT_TIMEOUT;
+ while (readl(&dev->op_regs->endptsetupstat)) {
+ if (time_after(jiffies, timeout)) {
+ ERROR(dev, "setup_tripwire timeout\n");
+ break;
+ }
+ cpu_relax();
+ }
+
+ /* while a hazard exists when setup packet arrives */
+ do {
+ /* set setup tripwire bit */
+ usbcmd = readl(&dev->op_regs->usbcmd);
+ writel(usbcmd | CMD_SUTW, &dev->op_regs->usbcmd);
+
+ /* copy the setup packet to local buffer */
+ memcpy(&dev->local_setup_buff, &dqh->dqh_setup, 8);
+ } while (!(readl(&dev->op_regs->usbcmd) & CMD_SUTW));
+
+ /* Write-Clear setup tripwire bit */
+ usbcmd = readl(&dev->op_regs->usbcmd);
+ writel(usbcmd & ~CMD_SUTW, &dev->op_regs->usbcmd);
+
+ VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/* protocol ep0 stall, will automatically be cleared on new transaction */
+static void ep0_stall(struct langwell_udc *dev)
+{
+ u32 endptctrl;
+
+ VDBG(dev, "---> %s()\n", __func__);
+
+ /* set TX and RX to stall */
+ endptctrl = readl(&dev->op_regs->endptctrl[0]);
+ endptctrl |= EPCTRL_TXS | EPCTRL_RXS;
+ writel(endptctrl, &dev->op_regs->endptctrl[0]);
+
+ /* update ep0 state */
+ dev->ep0_state = WAIT_FOR_SETUP;
+ dev->ep0_dir = USB_DIR_OUT;
+
+ VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/* PRIME a status phase for ep0 */
+static int prime_status_phase(struct langwell_udc *dev, int dir)
+{
+ struct langwell_request *req;
+ struct langwell_ep *ep;
+ int status = 0;
+
+ VDBG(dev, "---> %s()\n", __func__);
+
+ if (dir == EP_DIR_IN)
+ dev->ep0_dir = USB_DIR_IN;
+ else
+ dev->ep0_dir = USB_DIR_OUT;
+
+ ep = &dev->ep[0];
+ dev->ep0_state = WAIT_FOR_OUT_STATUS;
+
+ req = dev->status_req;
+
+ req->ep = ep;
+ req->req.length = 0;
+ req->req.status = -EINPROGRESS;
+ req->req.actual = 0;
+ req->req.complete = NULL;
+ req->dtd_count = 0;
+
+ if (!req_to_dtd(req))
+ status = queue_dtd(ep, req);
+ else
+ return -ENOMEM;
+
+ if (status)
+ ERROR(dev, "can't queue ep0 status request\n");
+
+ list_add_tail(&req->queue, &ep->queue);
+
+ VDBG(dev, "<--- %s()\n", __func__);
+ return status;
+}
+
+
+/* SET_ADDRESS request routine */
+static void set_address(struct langwell_udc *dev, u16 value,
+ u16 index, u16 length)
+{
+ VDBG(dev, "---> %s()\n", __func__);
+
+ /* save the new address to device struct */
+ dev->dev_addr = (u8) value;
+ VDBG(dev, "dev->dev_addr = %d\n", dev->dev_addr);
+
+ /* update usb state */
+ dev->usb_state = USB_STATE_ADDRESS;
+
+ /* STATUS phase */
+ if (prime_status_phase(dev, EP_DIR_IN))
+ ep0_stall(dev);
+
+ VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/* return endpoint by windex */
+static struct langwell_ep *get_ep_by_windex(struct langwell_udc *dev,
+ u16 wIndex)
+{
+ struct langwell_ep *ep;
+ VDBG(dev, "---> %s()\n", __func__);
+
+ if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
+ return &dev->ep[0];
+
+ list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
+ u8 bEndpointAddress;
+ if (!ep->desc)
+ continue;
+
+ bEndpointAddress = ep->desc->bEndpointAddress;
+ if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
+ continue;
+
+ if ((wIndex & USB_ENDPOINT_NUMBER_MASK)
+ == (bEndpointAddress & USB_ENDPOINT_NUMBER_MASK))
+ return ep;
+ }
+
+ VDBG(dev, "<--- %s()\n", __func__);
+ return NULL;
+}
+
+
+/* return whether endpoint is stalled, 0: not stalled; 1: stalled */
+static int ep_is_stall(struct langwell_ep *ep)
+{
+ struct langwell_udc *dev = ep->dev;
+ u32 endptctrl;
+ int retval;
+
+ VDBG(dev, "---> %s()\n", __func__);
+
+ endptctrl = readl(&dev->op_regs->endptctrl[ep->ep_num]);
+ if (is_in(ep))
+ retval = endptctrl & EPCTRL_TXS ? 1 : 0;
+ else
+ retval = endptctrl & EPCTRL_RXS ? 1 : 0;
+
+ VDBG(dev, "<--- %s()\n", __func__);
+ return retval;
+}
+
+
+/* GET_STATUS request routine */
+static void get_status(struct langwell_udc *dev, u8 request_type, u16 value,
+ u16 index, u16 length)
+{
+ struct langwell_request *req;
+ struct langwell_ep *ep;
+ u16 status_data = 0; /* 16 bits cpu view status data */
+ int status = 0;
+
+ VDBG(dev, "---> %s()\n", __func__);
+
+ ep = &dev->ep[0];
+
+ if ((request_type & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
+ /* get device status */
+ status_data = 1 << USB_DEVICE_SELF_POWERED;
+ status_data |= dev->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
+ } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_INTERFACE) {
+ /* get interface status */
+ status_data = 0;
+ } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) {
+ /* get endpoint status */
+ struct langwell_ep *epn;
+ epn = get_ep_by_windex(dev, index);
+ /* stall if endpoint doesn't exist */
+ if (!epn)
+ goto stall;
+
+ status_data = ep_is_stall(epn) << USB_ENDPOINT_HALT;
+ }
+
+ dev->ep0_dir = USB_DIR_IN;
+
+ /* borrow the per device status_req */
+ req = dev->status_req;
+
+ /* fill in the reqest structure */
+ *((u16 *) req->req.buf) = cpu_to_le16(status_data);
+ req->ep = ep;
+ req->req.length = 2;
+ req->req.status = -EINPROGRESS;
+ req->req.actual = 0;
+ req->req.complete = NULL;
+ req->dtd_count = 0;
+
+ /* prime the data phase */
+ if (!req_to_dtd(req))
+ status = queue_dtd(ep, req);
+ else /* no mem */
+ goto stall;
+
+ if (status) {
+ ERROR(dev, "response error on GET_STATUS request\n");
+ goto stall;
+ }
+
+ list_add_tail(&req->queue, &ep->queue);
+ dev->ep0_state = DATA_STATE_XMIT;
+
+ VDBG(dev, "<--- %s()\n", __func__);
+ return;
+stall:
+ ep0_stall(dev);
+ VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/* setup packet interrupt handler */
+static void handle_setup_packet(struct langwell_udc *dev,
+ struct usb_ctrlrequest *setup)
+{
+ u16 wValue = le16_to_cpu(setup->wValue);
+ u16 wIndex = le16_to_cpu(setup->wIndex);
+ u16 wLength = le16_to_cpu(setup->wLength);
+
+ VDBG(dev, "---> %s()\n", __func__);
+
+ /* ep0 fifo flush */
+ nuke(&dev->ep[0], -ESHUTDOWN);
+
+ DBG(dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
+ setup->bRequestType, setup->bRequest,
+ wValue, wIndex, wLength);
+
+ /* RNDIS gadget delegate */
+ if ((setup->bRequestType == 0x21) && (setup->bRequest == 0x00)) {
+ /* USB_CDC_SEND_ENCAPSULATED_COMMAND */
+ goto delegate;
+ }
+
+ /* USB_CDC_GET_ENCAPSULATED_RESPONSE */
+ if ((setup->bRequestType == 0xa1) && (setup->bRequest == 0x01)) {
+ /* USB_CDC_GET_ENCAPSULATED_RESPONSE */
+ goto delegate;
+ }
+
+ /* We process some stardard setup requests here */
+ switch (setup->bRequest) {
+ case USB_REQ_GET_STATUS:
+ DBG(dev, "SETUP: USB_REQ_GET_STATUS\n");
+ /* get status, DATA and STATUS phase */
+ if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
+ != (USB_DIR_IN | USB_TYPE_STANDARD))
+ break;
+ get_status(dev, setup->bRequestType, wValue, wIndex, wLength);
+ goto end;
+
+ case USB_REQ_SET_ADDRESS:
+ DBG(dev, "SETUP: USB_REQ_SET_ADDRESS\n");
+ /* STATUS phase */
+ if (setup->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD
+ | USB_RECIP_DEVICE))
+ break;
+ set_address(dev, wValue, wIndex, wLength);
+ goto end;
+
+ case USB_REQ_CLEAR_FEATURE:
+ case USB_REQ_SET_FEATURE:
+ /* STATUS phase */
+ {
+ int rc = -EOPNOTSUPP;
+ if (setup->bRequest == USB_REQ_SET_FEATURE)
+ DBG(dev, "SETUP: USB_REQ_SET_FEATURE\n");
+ else if (setup->bRequest == USB_REQ_CLEAR_FEATURE)
+ DBG(dev, "SETUP: USB_REQ_CLEAR_FEATURE\n");
+
+ if ((setup->bRequestType & (USB_RECIP_MASK | USB_TYPE_MASK))
+ == (USB_RECIP_ENDPOINT | USB_TYPE_STANDARD)) {
+ struct langwell_ep *epn;
+ epn = get_ep_by_windex(dev, wIndex);
+ /* stall if endpoint doesn't exist */
+ if (!epn) {
+ ep0_stall(dev);
+ goto end;
+ }
+
+ if (wValue != 0 || wLength != 0
+ || epn->ep_num > dev->ep_max)
+ break;
+
+ spin_unlock(&dev->lock);
+ rc = langwell_ep_set_halt(&epn->ep,
+ (setup->bRequest == USB_REQ_SET_FEATURE)
+ ? 1 : 0);
+ spin_lock(&dev->lock);
+
+ } else if ((setup->bRequestType & (USB_RECIP_MASK
+ | USB_TYPE_MASK)) == (USB_RECIP_DEVICE
+ | USB_TYPE_STANDARD)) {
+ if (!gadget_is_otg(&dev->gadget))
+ break;
+ else if (setup->bRequest == USB_DEVICE_B_HNP_ENABLE) {
+ dev->gadget.b_hnp_enable = 1;
+#ifdef OTG_TRANSCEIVER
+ if (!dev->lotg->otg.default_a)
+ dev->lotg->hsm.b_hnp_enable = 1;
+#endif
+ } else if (setup->bRequest == USB_DEVICE_A_HNP_SUPPORT)
+ dev->gadget.a_hnp_support = 1;
+ else if (setup->bRequest ==
+ USB_DEVICE_A_ALT_HNP_SUPPORT)
+ dev->gadget.a_alt_hnp_support = 1;
+ else
+ break;
+ rc = 0;
+ } else
+ break;
+
+ if (rc == 0) {
+ if (prime_status_phase(dev, EP_DIR_IN))
+ ep0_stall(dev);
+ }
+ goto end;
+ }
+
+ case USB_REQ_GET_DESCRIPTOR:
+ DBG(dev, "SETUP: USB_REQ_GET_DESCRIPTOR\n");
+ goto delegate;
+
+ case USB_REQ_SET_DESCRIPTOR:
+ DBG(dev, "SETUP: USB_REQ_SET_DESCRIPTOR unsupported\n");
+ goto delegate;
+
+ case USB_REQ_GET_CONFIGURATION:
+ DBG(dev, "SETUP: USB_REQ_GET_CONFIGURATION\n");
+ goto delegate;
+
+ case USB_REQ_SET_CONFIGURATION:
+ DBG(dev, "SETUP: USB_REQ_SET_CONFIGURATION\n");
+ goto delegate;
+
+ case USB_REQ_GET_INTERFACE:
+ DBG(dev, "SETUP: USB_REQ_GET_INTERFACE\n");
+ goto delegate;
+
+ case USB_REQ_SET_INTERFACE:
+ DBG(dev, "SETUP: USB_REQ_SET_INTERFACE\n");
+ goto delegate;
+
+ case USB_REQ_SYNCH_FRAME:
+ DBG(dev, "SETUP: USB_REQ_SYNCH_FRAME unsupported\n");
+ goto delegate;
+
+ default:
+ /* delegate USB standard requests to the gadget driver */
+ goto delegate;
+delegate:
+ /* USB requests handled by gadget */
+ if (wLength) {
+ /* DATA phase from gadget, STATUS phase from udc */
+ dev->ep0_dir = (setup->bRequestType & USB_DIR_IN)
+ ? USB_DIR_IN : USB_DIR_OUT;
+ VDBG(dev, "dev->ep0_dir = 0x%x, wLength = %d\n",
+ dev->ep0_dir, wLength);
+ spin_unlock(&dev->lock);
+ if (dev->driver->setup(&dev->gadget,
+ &dev->local_setup_buff) < 0)
+ ep0_stall(dev);
+ spin_lock(&dev->lock);
+ dev->ep0_state = (setup->bRequestType & USB_DIR_IN)
+ ? DATA_STATE_XMIT : DATA_STATE_RECV;
+ } else {
+ /* no DATA phase, IN STATUS phase from gadget */
+ dev->ep0_dir = USB_DIR_IN;
+ VDBG(dev, "dev->ep0_dir = 0x%x, wLength = %d\n",
+ dev->ep0_dir, wLength);
+ spin_unlock(&dev->lock);
+ if (dev->driver->setup(&dev->gadget,
+ &dev->local_setup_buff) < 0)
+ ep0_stall(dev);
+ spin_lock(&dev->lock);
+ dev->ep0_state = WAIT_FOR_OUT_STATUS;
+ }
+ break;
+ }
+end:
+ VDBG(dev, "<--- %s()\n", __func__);
+ return;
+}
+
+
+/* transfer completion, process endpoint request and free the completed dTDs
+ * for this request
+ */
+static int process_ep_req(struct langwell_udc *dev, int index,
+ struct langwell_request *curr_req)
+{
+ struct langwell_dtd *curr_dtd;
+ struct langwell_dqh *curr_dqh;
+ int td_complete, actual, remaining_length;
+ int i, dir;
+ u8 dtd_status = 0;
+ int retval = 0;
+
+ curr_dqh = &dev->ep_dqh[index];
+ dir = index % 2;
+
+ curr_dtd = curr_req->head;
+ td_complete = 0;
+ actual = curr_req->req.length;
+
+ VDBG(dev, "---> %s()\n", __func__);
+
+ for (i = 0; i < curr_req->dtd_count; i++) {
+ remaining_length = le16_to_cpu(curr_dtd->dtd_total);
+ actual -= remaining_length;
+
+ /* command execution states by dTD */
+ dtd_status = curr_dtd->dtd_status;
+
+ if (!dtd_status) {
+ /* transfers completed successfully */
+ if (!remaining_length) {
+ td_complete++;
+ VDBG(dev, "dTD transmitted successfully\n");
+ } else {
+ if (dir) {
+ VDBG(dev, "TX dTD remains data\n");
+ retval = -EPROTO;
+ break;
+
+ } else {
+ td_complete++;
+ break;
+ }
+ }
+ } else {
+ /* transfers completed with errors */
+ if (dtd_status & DTD_STS_ACTIVE) {
+ DBG(dev, "request not completed\n");
+ retval = 1;
+ return retval;
+ } else if (dtd_status & DTD_STS_HALTED) {
+ ERROR(dev, "dTD error %08x dQH[%d]\n",
+ dtd_status, index);
+ /* clear the errors and halt condition */
+ curr_dqh->dtd_status = 0;
+ retval = -EPIPE;
+ break;
+ } else if (dtd_status & DTD_STS_DBE) {
+ DBG(dev, "data buffer (overflow) error\n");
+ retval = -EPROTO;
+ break;
+ } else if (dtd_status & DTD_STS_TRE) {
+ DBG(dev, "transaction(ISO) error\n");
+ retval = -EILSEQ;
+ break;
+ } else
+ ERROR(dev, "unknown error (0x%x)!\n",
+ dtd_status);
+ }
+
+ if (i != curr_req->dtd_count - 1)
+ curr_dtd = (struct langwell_dtd *)
+ curr_dtd->next_dtd_virt;
+ }
+
+ if (retval)
+ return retval;
+
+ curr_req->req.actual = actual;
+
+ VDBG(dev, "<--- %s()\n", __func__);
+ return 0;
+}
+
+
+/* complete DATA or STATUS phase of ep0 prime status phase if needed */
+static void ep0_req_complete(struct langwell_udc *dev,
+ struct langwell_ep *ep0, struct langwell_request *req)
+{
+ u32 new_addr;
+ VDBG(dev, "---> %s()\n", __func__);
+
+ if (dev->usb_state == USB_STATE_ADDRESS) {
+ /* set the new address */
+ new_addr = (u32)dev->dev_addr;
+ writel(new_addr << USBADR_SHIFT, &dev->op_regs->deviceaddr);
+
+ new_addr = USBADR(readl(&dev->op_regs->deviceaddr));
+ VDBG(dev, "new_addr = %d\n", new_addr);
+ }
+
+ done(ep0, req, 0);
+
+ switch (dev->ep0_state) {
+ case DATA_STATE_XMIT:
+ /* receive status phase */
+ if (prime_status_phase(dev, EP_DIR_OUT))
+ ep0_stall(dev);
+ break;
+ case DATA_STATE_RECV:
+ /* send status phase */
+ if (prime_status_phase(dev, EP_DIR_IN))
+ ep0_stall(dev);
+ break;
+ case WAIT_FOR_OUT_STATUS:
+ dev->ep0_state = WAIT_FOR_SETUP;
+ break;
+ case WAIT_FOR_SETUP:
+ ERROR(dev, "unexpect ep0 packets\n");
+ break;
+ default:
+ ep0_stall(dev);
+ break;
+ }
+
+ VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/* USB transfer completion interrupt */
+static void handle_trans_complete(struct langwell_udc *dev)
+{
+ u32 complete_bits;
+ int i, ep_num, dir, bit_mask, status;
+ struct langwell_ep *epn;
+ struct langwell_request *curr_req, *temp_req;
+
+ VDBG(dev, "---> %s()\n", __func__);
+
+ complete_bits = readl(&dev->op_regs->endptcomplete);
+ VDBG(dev, "endptcomplete register: 0x%08x\n", complete_bits);
+
+ /* Write-Clear the bits in endptcomplete register */
+ writel(complete_bits, &dev->op_regs->endptcomplete);
+
+ if (!complete_bits) {
+ DBG(dev, "complete_bits = 0\n");
+ goto done;
+ }
+
+ for (i = 0; i < dev->ep_max; i++) {
+ ep_num = i / 2;
+ dir = i % 2;
+
+ bit_mask = 1 << (ep_num + 16 * dir);
+
+ if (!(complete_bits & bit_mask))
+ continue;
+
+ /* ep0 */
+ if (i == 1)
+ epn = &dev->ep[0];
+ else
+ epn = &dev->ep[i];
+
+ if (epn->name == NULL) {
+ WARNING(dev, "invalid endpoint\n");
+ continue;
+ }
+
+ if (i < 2)
+ /* ep0 in and out */
+ DBG(dev, "%s-%s transfer completed\n",
+ epn->name,
+ is_in(epn) ? "in" : "out");
+ else
+ DBG(dev, "%s transfer completed\n", epn->name);
+
+ /* process the req queue until an uncomplete request */
+ list_for_each_entry_safe(curr_req, temp_req,
+ &epn->queue, queue) {
+ status = process_ep_req(dev, i, curr_req);
+ VDBG(dev, "%s req status: %d\n", epn->name, status);
+
+ if (status)
+ break;
+
+ /* write back status to req */
+ curr_req->req.status = status;
+
+ /* ep0 request completion */
+ if (ep_num == 0) {
+ ep0_req_complete(dev, epn, curr_req);
+ break;
+ } else {
+ done(epn, curr_req, status);
+ }
+ }
+ }
+done:
+ VDBG(dev, "<--- %s()\n", __func__);
+ return;
+}
+
+
+/* port change detect interrupt handler */
+static void handle_port_change(struct langwell_udc *dev)
+{
+ u32 portsc1, devlc;
+ u32 speed;
+
+ VDBG(dev, "---> %s()\n", __func__);
+
+ if (dev->bus_reset)
+ dev->bus_reset = 0;
+
+ portsc1 = readl(&dev->op_regs->portsc1);
+ devlc = readl(&dev->op_regs->devlc);
+ VDBG(dev, "portsc1 = 0x%08x, devlc = 0x%08x\n",
+ portsc1, devlc);
+
+ /* bus reset is finished */
+ if (!(portsc1 & PORTS_PR)) {
+ /* get the speed */
+ speed = LPM_PSPD(devlc);
+ switch (speed) {
+ case LPM_SPEED_HIGH:
+ dev->gadget.speed = USB_SPEED_HIGH;
+ break;
+ case LPM_SPEED_FULL:
+ dev->gadget.speed = USB_SPEED_FULL;
+ break;
+ case LPM_SPEED_LOW:
+ dev->gadget.speed = USB_SPEED_LOW;
+ break;
+ default:
+ dev->gadget.speed = USB_SPEED_UNKNOWN;
+ break;
+ }
+ VDBG(dev, "speed = %d, dev->gadget.speed = %d\n",
+ speed, dev->gadget.speed);
+ }
+
+ /* LPM L0 to L1 */
+ if (dev->lpm && dev->lpm_state == LPM_L0)
+ if (portsc1 & PORTS_SUSP && portsc1 & PORTS_SLP) {
+ INFO(dev, "LPM L0 to L1\n");
+ dev->lpm_state = LPM_L1;
+ }
+
+ /* LPM L1 to L0, force resume or remote wakeup finished */
+ if (dev->lpm && dev->lpm_state == LPM_L1)
+ if (!(portsc1 & PORTS_SUSP)) {
+ if (portsc1 & PORTS_SLP)
+ INFO(dev, "LPM L1 to L0, force resume\n");
+ else
+ INFO(dev, "LPM L1 to L0, remote wakeup\n");
+
+ dev->lpm_state = LPM_L0;
+ }
+
+ /* update USB state */
+ if (!dev->resume_state)
+ dev->usb_state = USB_STATE_DEFAULT;
+
+ VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/* USB reset interrupt handler */
+static void handle_usb_reset(struct langwell_udc *dev)
+{
+ u32 deviceaddr,
+ endptsetupstat,
+ endptcomplete;
+ unsigned long timeout;
+
+ VDBG(dev, "---> %s()\n", __func__);
+
+ /* Write-Clear the device address */
+ deviceaddr = readl(&dev->op_regs->deviceaddr);
+ writel(deviceaddr & ~USBADR_MASK, &dev->op_regs->deviceaddr);
+
+ dev->dev_addr = 0;
+
+ /* clear usb state */
+ dev->resume_state = 0;
+
+ /* LPM L1 to L0, reset */
+ if (dev->lpm)
+ dev->lpm_state = LPM_L0;
+
+ dev->ep0_dir = USB_DIR_OUT;
+ dev->ep0_state = WAIT_FOR_SETUP;
+ dev->remote_wakeup = 0; /* default to 0 on reset */
+ dev->gadget.b_hnp_enable = 0;
+ dev->gadget.a_hnp_support = 0;
+ dev->gadget.a_alt_hnp_support = 0;
+
+ /* Write-Clear all the setup token semaphores */
+ endptsetupstat = readl(&dev->op_regs->endptsetupstat);
+ writel(endptsetupstat, &dev->op_regs->endptsetupstat);
+
+ /* Write-Clear all the endpoint complete status bits */
+ endptcomplete = readl(&dev->op_regs->endptcomplete);
+ writel(endptcomplete, &dev->op_regs->endptcomplete);
+
+ /* wait until all endptprime bits cleared */
+ timeout = jiffies + PRIME_TIMEOUT;
+ while (readl(&dev->op_regs->endptprime)) {
+ if (time_after(jiffies, timeout)) {
+ ERROR(dev, "USB reset timeout\n");
+ break;
+ }
+ cpu_relax();
+ }
+
+ /* write 1s to endptflush register to clear any primed buffers */
+ writel((u32) ~0, &dev->op_regs->endptflush);
+
+ if (readl(&dev->op_regs->portsc1) & PORTS_PR) {
+ VDBG(dev, "USB bus reset\n");
+ /* bus is reseting */
+ dev->bus_reset = 1;
+
+ /* reset all the queues, stop all USB activities */
+ stop_activity(dev, dev->driver);
+ dev->usb_state = USB_STATE_DEFAULT;
+ } else {
+ VDBG(dev, "device controller reset\n");
+ /* controller reset */
+ langwell_udc_reset(dev);
+
+ /* reset all the queues, stop all USB activities */
+ stop_activity(dev, dev->driver);
+
+ /* reset ep0 dQH and endptctrl */
+ ep0_reset(dev);
+
+ /* enable interrupt and set controller to run state */
+ langwell_udc_start(dev);
+
+ dev->usb_state = USB_STATE_ATTACHED;
+ }
+
+#ifdef OTG_TRANSCEIVER
+ /* refer to USB OTG 6.6.2.3 b_hnp_en is cleared */
+ if (!dev->lotg->otg.default_a)
+ dev->lotg->hsm.b_hnp_enable = 0;
+#endif
+
+ VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/* USB bus suspend/resume interrupt */
+static void handle_bus_suspend(struct langwell_udc *dev)
+{
+ u32 devlc;
+ DBG(dev, "---> %s()\n", __func__);
+
+ dev->resume_state = dev->usb_state;
+ dev->usb_state = USB_STATE_SUSPENDED;
+
+#ifdef OTG_TRANSCEIVER
+ if (dev->lotg->otg.default_a) {
+ if (dev->lotg->hsm.b_bus_suspend_vld == 1) {
+ dev->lotg->hsm.b_bus_suspend = 1;
+ /* notify transceiver the state changes */
+ if (spin_trylock(&dev->lotg->wq_lock)) {
+ langwell_update_transceiver();
+ spin_unlock(&dev->lotg->wq_lock);
+ }
+ }
+ dev->lotg->hsm.b_bus_suspend_vld++;
+ } else {
+ if (!dev->lotg->hsm.a_bus_suspend) {
+ dev->lotg->hsm.a_bus_suspend = 1;
+ /* notify transceiver the state changes */
+ if (spin_trylock(&dev->lotg->wq_lock)) {
+ langwell_update_transceiver();
+ spin_unlock(&dev->lotg->wq_lock);
+ }
+ }
+ }
+#endif
+
+ /* report suspend to the driver */
+ if (dev->driver) {
+ if (dev->driver->suspend) {
+ spin_unlock(&dev->lock);
+ dev->driver->suspend(&dev->gadget);
+ spin_lock(&dev->lock);
+ DBG(dev, "suspend %s\n", dev->driver->driver.name);
+ }
+ }
+
+ /* enter PHY low power suspend */
+ devlc = readl(&dev->op_regs->devlc);
+ VDBG(dev, "devlc = 0x%08x\n", devlc);
+ devlc |= LPM_PHCD;
+ writel(devlc, &dev->op_regs->devlc);
+
+ DBG(dev, "<--- %s()\n", __func__);
+}
+
+
+static void handle_bus_resume(struct langwell_udc *dev)
+{
+ u32 devlc;
+ DBG(dev, "---> %s()\n", __func__);
+
+ dev->usb_state = dev->resume_state;
+ dev->resume_state = 0;
+
+ /* exit PHY low power suspend */
+ devlc = readl(&dev->op_regs->devlc);
+ VDBG(dev, "devlc = 0x%08x\n", devlc);
+ devlc &= ~LPM_PHCD;
+ writel(devlc, &dev->op_regs->devlc);
+
+#ifdef OTG_TRANSCEIVER
+ if (dev->lotg->otg.default_a == 0)
+ dev->lotg->hsm.a_bus_suspend = 0;
+#endif
+
+ /* report resume to the driver */
+ if (dev->driver) {
+ if (dev->driver->resume) {
+ spin_unlock(&dev->lock);
+ dev->driver->resume(&dev->gadget);
+ spin_lock(&dev->lock);
+ DBG(dev, "resume %s\n", dev->driver->driver.name);
+ }
+ }
+
+ DBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/* USB device controller interrupt handler */
+static irqreturn_t langwell_irq(int irq, void *_dev)
+{
+ struct langwell_udc *dev = _dev;
+ u32 usbsts,
+ usbintr,
+ irq_sts,
+ portsc1;
+
+ VDBG(dev, "---> %s()\n", __func__);
+
+ if (dev->stopped) {
+ VDBG(dev, "handle IRQ_NONE\n");
+ VDBG(dev, "<--- %s()\n", __func__);
+ return IRQ_NONE;
+ }
+
+ spin_lock(&dev->lock);
+
+ /* USB status */
+ usbsts = readl(&dev->op_regs->usbsts);
+
+ /* USB interrupt enable */
+ usbintr = readl(&dev->op_regs->usbintr);
+
+ irq_sts = usbsts & usbintr;
+ VDBG(dev, "usbsts = 0x%08x, usbintr = 0x%08x, irq_sts = 0x%08x\n",
+ usbsts, usbintr, irq_sts);
+
+ if (!irq_sts) {
+ VDBG(dev, "handle IRQ_NONE\n");
+ VDBG(dev, "<--- %s()\n", __func__);
+ spin_unlock(&dev->lock);
+ return IRQ_NONE;
+ }
+
+ /* Write-Clear interrupt status bits */
+ writel(irq_sts, &dev->op_regs->usbsts);
+
+ /* resume from suspend */
+ portsc1 = readl(&dev->op_regs->portsc1);
+ if (dev->usb_state == USB_STATE_SUSPENDED)
+ if (!(portsc1 & PORTS_SUSP))
+ handle_bus_resume(dev);
+
+ /* USB interrupt */
+ if (irq_sts & STS_UI) {
+ VDBG(dev, "USB interrupt\n");
+
+ /* setup packet received from ep0 */
+ if (readl(&dev->op_regs->endptsetupstat)
+ & EP0SETUPSTAT_MASK) {
+ VDBG(dev, "USB SETUP packet received interrupt\n");
+ /* setup tripwire semaphone */
+ setup_tripwire(dev);
+ handle_setup_packet(dev, &dev->local_setup_buff);
+ }
+
+ /* USB transfer completion */
+ if (readl(&dev->op_regs->endptcomplete)) {
+ VDBG(dev, "USB transfer completion interrupt\n");
+ handle_trans_complete(dev);
+ }
+ }
+
+ /* SOF received interrupt (for ISO transfer) */
+ if (irq_sts & STS_SRI) {
+ /* FIXME */
+ /* VDBG(dev, "SOF received interrupt\n"); */
+ }
+
+ /* port change detect interrupt */
+ if (irq_sts & STS_PCI) {
+ VDBG(dev, "port change detect interrupt\n");
+ handle_port_change(dev);
+ }
+
+ /* suspend interrrupt */
+ if (irq_sts & STS_SLI) {
+ VDBG(dev, "suspend interrupt\n");
+ handle_bus_suspend(dev);
+ }
+
+ /* USB reset interrupt */
+ if (irq_sts & STS_URI) {
+ VDBG(dev, "USB reset interrupt\n");
+ handle_usb_reset(dev);
+ }
+
+ /* USB error or system error interrupt */
+ if (irq_sts & (STS_UEI | STS_SEI)) {
+ /* FIXME */
+ WARNING(dev, "error IRQ, irq_sts: %x\n", irq_sts);
+ }
+
+ spin_unlock(&dev->lock);
+
+ VDBG(dev, "<--- %s()\n", __func__);
+ return IRQ_HANDLED;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* release device structure */
+static void gadget_release(struct device *_dev)
+{
+ struct langwell_udc *dev = the_controller;
+
+ DBG(dev, "---> %s()\n", __func__);
+
+ complete(dev->done);
+
+ DBG(dev, "<--- %s()\n", __func__);
+ kfree(dev);
+}
+
+
+/* tear down the binding between this driver and the pci device */
+static void langwell_udc_remove(struct pci_dev *pdev)
+{
+ struct langwell_udc *dev = the_controller;
+
+ DECLARE_COMPLETION(done);
+
+ BUG_ON(dev->driver);
+ DBG(dev, "---> %s()\n", __func__);
+
+ dev->done = &done;
+
+ /* free memory allocated in probe */
+ if (dev->dtd_pool)
+ dma_pool_destroy(dev->dtd_pool);
+
+ if (dev->status_req) {
+ kfree(dev->status_req->req.buf);
+ kfree(dev->status_req);
+ }
+
+ if (dev->ep_dqh)
+ dma_free_coherent(&pdev->dev, dev->ep_dqh_size,
+ dev->ep_dqh, dev->ep_dqh_dma);
+
+ kfree(dev->ep);
+
+ /* diable IRQ handler */
+ if (dev->got_irq)
+ free_irq(pdev->irq, dev);
+
+#ifndef OTG_TRANSCEIVER
+ if (dev->cap_regs)
+ iounmap(dev->cap_regs);
+
+ if (dev->region)
+ release_mem_region(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+
+ if (dev->enabled)
+ pci_disable_device(pdev);
+#else
+ if (dev->transceiver) {
+ otg_put_transceiver(dev->transceiver);
+ dev->transceiver = NULL;
+ dev->lotg = NULL;
+ }
+#endif
+
+ dev->cap_regs = NULL;
+
+ INFO(dev, "unbind\n");
+ DBG(dev, "<--- %s()\n", __func__);
+
+ device_unregister(&dev->gadget.dev);
+ device_remove_file(&pdev->dev, &dev_attr_langwell_udc);
+
+#ifndef OTG_TRANSCEIVER
+ pci_set_drvdata(pdev, NULL);
+#endif
+
+ /* free dev, wait for the release() finished */
+ wait_for_completion(&done);
+
+ the_controller = NULL;
+}
+
+
+/*
+ * wrap this driver around the specified device, but
+ * don't respond over USB until a gadget driver binds to us.
+ */
+static int langwell_udc_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct langwell_udc *dev;
+#ifndef OTG_TRANSCEIVER
+ unsigned long resource, len;
+#endif
+ void __iomem *base = NULL;
+ size_t size;
+ int retval;
+
+ if (the_controller) {
+ dev_warn(&pdev->dev, "ignoring\n");
+ return -EBUSY;
+ }
+
+ /* alloc, and start init */
+ dev = kzalloc(sizeof *dev, GFP_KERNEL);
+ if (dev == NULL) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ /* initialize device spinlock */
+ spin_lock_init(&dev->lock);
+
+ dev->pdev = pdev;
+ DBG(dev, "---> %s()\n", __func__);
+
+#ifdef OTG_TRANSCEIVER
+ /* PCI device is already enabled by otg_transceiver driver */
+ dev->enabled = 1;
+
+ /* mem region and register base */
+ dev->region = 1;
+ dev->transceiver = otg_get_transceiver();
+ dev->lotg = otg_to_langwell(dev->transceiver);
+ base = dev->lotg->regs;
+#else
+ pci_set_drvdata(pdev, dev);
+
+ /* now all the pci goodies ... */
+ if (pci_enable_device(pdev) < 0) {
+ retval = -ENODEV;
+ goto error;
+ }
+ dev->enabled = 1;
+
+ /* control register: BAR 0 */
+ resource = pci_resource_start(pdev, 0);
+ len = pci_resource_len(pdev, 0);
+ if (!request_mem_region(resource, len, driver_name)) {
+ ERROR(dev, "controller already in use\n");
+ retval = -EBUSY;
+ goto error;
+ }
+ dev->region = 1;
+
+ base = ioremap_nocache(resource, len);
+#endif
+ if (base == NULL) {
+ ERROR(dev, "can't map memory\n");
+ retval = -EFAULT;
+ goto error;
+ }
+
+ dev->cap_regs = (struct langwell_cap_regs __iomem *) base;
+ VDBG(dev, "dev->cap_regs: %p\n", dev->cap_regs);
+ dev->op_regs = (struct langwell_op_regs __iomem *)
+ (base + OP_REG_OFFSET);
+ VDBG(dev, "dev->op_regs: %p\n", dev->op_regs);
+
+ /* irq setup after old hardware is cleaned up */
+ if (!pdev->irq) {
+ ERROR(dev, "No IRQ. Check PCI setup!\n");
+ retval = -ENODEV;
+ goto error;
+ }
+
+#ifndef OTG_TRANSCEIVER
+ INFO(dev, "irq %d, io mem: 0x%08lx, len: 0x%08lx, pci mem 0x%p\n",
+ pdev->irq, resource, len, base);
+ /* enables bus-mastering for device dev */
+ pci_set_master(pdev);
+
+ if (request_irq(pdev->irq, langwell_irq, IRQF_SHARED,
+ driver_name, dev) != 0) {
+ ERROR(dev, "request interrupt %d failed\n", pdev->irq);
+ retval = -EBUSY;
+ goto error;
+ }
+ dev->got_irq = 1;
+#endif
+
+ /* set stopped bit */
+ dev->stopped = 1;
+
+ /* capabilities and endpoint number */
+ dev->lpm = (readl(&dev->cap_regs->hccparams) & HCC_LEN) ? 1 : 0;
+ dev->dciversion = readw(&dev->cap_regs->dciversion);
+ dev->devcap = (readl(&dev->cap_regs->dccparams) & DEVCAP) ? 1 : 0;
+ VDBG(dev, "dev->lpm: %d\n", dev->lpm);
+ VDBG(dev, "dev->dciversion: 0x%04x\n", dev->dciversion);
+ VDBG(dev, "dccparams: 0x%08x\n", readl(&dev->cap_regs->dccparams));
+ VDBG(dev, "dev->devcap: %d\n", dev->devcap);
+ if (!dev->devcap) {
+ ERROR(dev, "can't support device mode\n");
+ retval = -ENODEV;
+ goto error;
+ }
+
+ /* a pair of endpoints (out/in) for each address */
+ dev->ep_max = DEN(readl(&dev->cap_regs->dccparams)) * 2;
+ VDBG(dev, "dev->ep_max: %d\n", dev->ep_max);
+
+ /* allocate endpoints memory */
+ dev->ep = kzalloc(sizeof(struct langwell_ep) * dev->ep_max,
+ GFP_KERNEL);
+ if (!dev->ep) {
+ ERROR(dev, "allocate endpoints memory failed\n");
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ /* allocate device dQH memory */
+ size = dev->ep_max * sizeof(struct langwell_dqh);
+ VDBG(dev, "orig size = %d\n", size);
+ if (size < DQH_ALIGNMENT)
+ size = DQH_ALIGNMENT;
+ else if ((size % DQH_ALIGNMENT) != 0) {
+ size += DQH_ALIGNMENT + 1;
+ size &= ~(DQH_ALIGNMENT - 1);
+ }
+ dev->ep_dqh = dma_alloc_coherent(&pdev->dev, size,
+ &dev->ep_dqh_dma, GFP_KERNEL);
+ if (!dev->ep_dqh) {
+ ERROR(dev, "allocate dQH memory failed\n");
+ retval = -ENOMEM;
+ goto error;
+ }
+ dev->ep_dqh_size = size;
+ VDBG(dev, "ep_dqh_size = %d\n", dev->ep_dqh_size);
+
+ /* initialize ep0 status request structure */
+ dev->status_req = kzalloc(sizeof(struct langwell_request), GFP_KERNEL);
+ if (!dev->status_req) {
+ ERROR(dev, "allocate status_req memory failed\n");
+ retval = -ENOMEM;
+ goto error;
+ }
+ INIT_LIST_HEAD(&dev->status_req->queue);
+
+ /* allocate a small amount of memory to get valid address */
+ dev->status_req->req.buf = kmalloc(8, GFP_KERNEL);
+ dev->status_req->req.dma = virt_to_phys(dev->status_req->req.buf);
+
+ dev->resume_state = USB_STATE_NOTATTACHED;
+ dev->usb_state = USB_STATE_POWERED;
+ dev->ep0_dir = USB_DIR_OUT;
+ dev->remote_wakeup = 0; /* default to 0 on reset */
+
+#ifndef OTG_TRANSCEIVER
+ /* reset device controller */
+ langwell_udc_reset(dev);
+#endif
+
+ /* initialize gadget structure */
+ dev->gadget.ops = &langwell_ops; /* usb_gadget_ops */
+ dev->gadget.ep0 = &dev->ep[0].ep; /* gadget ep0 */
+ INIT_LIST_HEAD(&dev->gadget.ep_list); /* ep_list */
+ dev->gadget.speed = USB_SPEED_UNKNOWN; /* speed */
+ dev->gadget.is_dualspeed = 1; /* support dual speed */
+#ifdef OTG_TRANSCEIVER
+ dev->gadget.is_otg = 1; /* support otg mode */
+#endif
+
+ /* the "gadget" abstracts/virtualizes the controller */
+ dev_set_name(&dev->gadget.dev, "gadget");
+ dev->gadget.dev.parent = &pdev->dev;
+ dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
+ dev->gadget.dev.release = gadget_release;
+ dev->gadget.name = driver_name; /* gadget name */
+
+ /* controller endpoints reinit */
+ eps_reinit(dev);
+
+#ifndef OTG_TRANSCEIVER
+ /* reset ep0 dQH and endptctrl */
+ ep0_reset(dev);
+#endif
+
+ /* create dTD dma_pool resource */
+ dev->dtd_pool = dma_pool_create("langwell_dtd",
+ &dev->pdev->dev,
+ sizeof(struct langwell_dtd),
+ DTD_ALIGNMENT,
+ DMA_BOUNDARY);
+
+ if (!dev->dtd_pool) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ /* done */
+ INFO(dev, "%s\n", driver_desc);
+ INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base);
+ INFO(dev, "Driver version: " DRIVER_VERSION "\n");
+ INFO(dev, "Support (max) %d endpoints\n", dev->ep_max);
+ INFO(dev, "Device interface version: 0x%04x\n", dev->dciversion);
+ INFO(dev, "Controller mode: %s\n", dev->devcap ? "Device" : "Host");
+ INFO(dev, "Support USB LPM: %s\n", dev->lpm ? "Yes" : "No");
+
+ VDBG(dev, "After langwell_udc_probe(), print all registers:\n");
+#ifdef VERBOSE
+ print_all_registers(dev);
+#endif
+
+ the_controller = dev;
+
+ retval = device_register(&dev->gadget.dev);
+ if (retval)
+ goto error;
+
+ retval = device_create_file(&pdev->dev, &dev_attr_langwell_udc);
+ if (retval)
+ goto error;
+
+ VDBG(dev, "<--- %s()\n", __func__);
+ return 0;
+
+error:
+ if (dev) {
+ DBG(dev, "<--- %s()\n", __func__);
+ langwell_udc_remove(pdev);
+ }
+
+ return retval;
+}
+
+
+/* device controller suspend */
+static int langwell_udc_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct langwell_udc *dev = the_controller;
+ u32 devlc;
+
+ DBG(dev, "---> %s()\n", __func__);
+
+ /* disable interrupt and set controller to stop state */
+ langwell_udc_stop(dev);
+
+ /* diable IRQ handler */
+ if (dev->got_irq)
+ free_irq(pdev->irq, dev);
+ dev->got_irq = 0;
+
+
+ /* save PCI state */
+ pci_save_state(pdev);
+
+ /* set device power state */
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ /* enter PHY low power suspend */
+ devlc = readl(&dev->op_regs->devlc);
+ VDBG(dev, "devlc = 0x%08x\n", devlc);
+ devlc |= LPM_PHCD;
+ writel(devlc, &dev->op_regs->devlc);
+
+ DBG(dev, "<--- %s()\n", __func__);
+ return 0;
+}
+
+
+/* device controller resume */
+static int langwell_udc_resume(struct pci_dev *pdev)
+{
+ struct langwell_udc *dev = the_controller;
+ u32 devlc;
+
+ DBG(dev, "---> %s()\n", __func__);
+
+ /* exit PHY low power suspend */
+ devlc = readl(&dev->op_regs->devlc);
+ VDBG(dev, "devlc = 0x%08x\n", devlc);
+ devlc &= ~LPM_PHCD;
+ writel(devlc, &dev->op_regs->devlc);
+
+ /* set device D0 power state */
+ pci_set_power_state(pdev, PCI_D0);
+
+ /* restore PCI state */
+ pci_restore_state(pdev);
+
+ /* enable IRQ handler */
+ if (request_irq(pdev->irq, langwell_irq, IRQF_SHARED, driver_name, dev)
+ != 0) {
+ ERROR(dev, "request interrupt %d failed\n", pdev->irq);
+ return -1;
+ }
+ dev->got_irq = 1;
+
+ /* reset and start controller to run state */
+ if (dev->stopped) {
+ /* reset device controller */
+ langwell_udc_reset(dev);
+
+ /* reset ep0 dQH and endptctrl */
+ ep0_reset(dev);
+
+ /* start device if gadget is loaded */
+ if (dev->driver)
+ langwell_udc_start(dev);
+ }
+
+ /* reset USB status */
+ dev->usb_state = USB_STATE_ATTACHED;
+ dev->ep0_state = WAIT_FOR_SETUP;
+ dev->ep0_dir = USB_DIR_OUT;
+
+ DBG(dev, "<--- %s()\n", __func__);
+ return 0;
+}
+
+
+/* pci driver shutdown */
+static void langwell_udc_shutdown(struct pci_dev *pdev)
+{
+ struct langwell_udc *dev = the_controller;
+ u32 usbmode;
+
+ DBG(dev, "---> %s()\n", __func__);
+
+ /* reset controller mode to IDLE */
+ usbmode = readl(&dev->op_regs->usbmode);
+ DBG(dev, "usbmode = 0x%08x\n", usbmode);
+ usbmode &= (~3 | MODE_IDLE);
+ writel(usbmode, &dev->op_regs->usbmode);
+
+ DBG(dev, "<--- %s()\n", __func__);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static const struct pci_device_id pci_ids[] = { {
+ .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
+ .class_mask = ~0,
+ .vendor = 0x8086,
+ .device = 0x0811,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+}, { /* end: all zeroes */ }
+};
+
+
+MODULE_DEVICE_TABLE(pci, pci_ids);
+
+
+static struct pci_driver langwell_pci_driver = {
+ .name = (char *) driver_name,
+ .id_table = pci_ids,
+
+ .probe = langwell_udc_probe,
+ .remove = langwell_udc_remove,
+
+ /* device controller suspend/resume */
+ .suspend = langwell_udc_suspend,
+ .resume = langwell_udc_resume,
+
+ .shutdown = langwell_udc_shutdown,
+};
+
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Xiaochen Shen <xiaochen.shen@intel.com>");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+
+
+static int __init init(void)
+{
+#ifdef OTG_TRANSCEIVER
+ return langwell_register_peripheral(&langwell_pci_driver);
+#else
+ return pci_register_driver(&langwell_pci_driver);
+#endif
+}
+module_init(init);
+
+
+static void __exit cleanup(void)
+{
+#ifdef OTG_TRANSCEIVER
+ return langwell_unregister_peripheral(&langwell_pci_driver);
+#else
+ pci_unregister_driver(&langwell_pci_driver);
+#endif
+}
+module_exit(cleanup);
+
diff --git a/drivers/usb/gadget/langwell_udc.h b/drivers/usb/gadget/langwell_udc.h
new file mode 100644
index 00000000000..9719934e1c0
--- /dev/null
+++ b/drivers/usb/gadget/langwell_udc.h
@@ -0,0 +1,228 @@
+/*
+ * Intel Langwell USB Device Controller driver
+ * Copyright (C) 2008-2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/usb/langwell_udc.h>
+
+#if defined(CONFIG_USB_LANGWELL_OTG)
+#include <linux/usb/langwell_otg.h>
+#endif
+
+
+/*-------------------------------------------------------------------------*/
+
+/* driver data structures and utilities */
+
+/*
+ * dTD: Device Endpoint Transfer Descriptor
+ * describe to the device controller the location and quantity of
+ * data to be send/received for given transfer
+ */
+struct langwell_dtd {
+ u32 dtd_next;
+/* bits 31:5, next transfer element pointer */
+#define DTD_NEXT(d) (((d)>>5)&0x7ffffff)
+#define DTD_NEXT_MASK (0x7ffffff << 5)
+/* terminate */
+#define DTD_TERM BIT(0)
+ /* bits 7:0, execution back states */
+ u32 dtd_status:8;
+#define DTD_STATUS(d) (((d)>>0)&0xff)
+#define DTD_STS_ACTIVE BIT(7) /* active */
+#define DTD_STS_HALTED BIT(6) /* halted */
+#define DTD_STS_DBE BIT(5) /* data buffer error */
+#define DTD_STS_TRE BIT(3) /* transaction error */
+ /* bits 9:8 */
+ u32 dtd_res0:2;
+ /* bits 11:10, multipier override */
+ u32 dtd_multo:2;
+#define DTD_MULTO (BIT(11) | BIT(10))
+ /* bits 14:12 */
+ u32 dtd_res1:3;
+ /* bit 15, interrupt on complete */
+ u32 dtd_ioc:1;
+#define DTD_IOC BIT(15)
+ /* bits 30:16, total bytes */
+ u32 dtd_total:15;
+#define DTD_TOTAL(d) (((d)>>16)&0x7fff)
+#define DTD_MAX_TRANSFER_LENGTH 0x4000
+ /* bit 31 */
+ u32 dtd_res2:1;
+ /* dTD buffer pointer page 0 to 4 */
+ u32 dtd_buf[5];
+#define DTD_OFFSET_MASK 0xfff
+/* bits 31:12, buffer pointer */
+#define DTD_BUFFER(d) (((d)>>12)&0x3ff)
+/* bits 11:0, current offset */
+#define DTD_C_OFFSET(d) (((d)>>0)&0xfff)
+/* bits 10:0, frame number */
+#define DTD_FRAME(d) (((d)>>0)&0x7ff)
+
+ /* driver-private parts */
+
+ /* dtd dma address */
+ dma_addr_t dtd_dma;
+ /* next dtd virtual address */
+ struct langwell_dtd *next_dtd_virt;
+};
+
+
+/*
+ * dQH: Device Endpoint Queue Head
+ * describe where all transfers are managed
+ * 48-byte data structure, aligned on 64-byte boundary
+ *
+ * These are associated with dTD structure
+ */
+struct langwell_dqh {
+ /* endpoint capabilities and characteristics */
+ u32 dqh_res0:15; /* bits 14:0 */
+ u32 dqh_ios:1; /* bit 15, interrupt on setup */
+#define DQH_IOS BIT(15)
+ u32 dqh_mpl:11; /* bits 26:16, maximum packet length */
+#define DQH_MPL (0x7ff << 16)
+ u32 dqh_res1:2; /* bits 28:27 */
+ u32 dqh_zlt:1; /* bit 29, zero length termination */
+#define DQH_ZLT BIT(29)
+ u32 dqh_mult:2; /* bits 31:30 */
+#define DQH_MULT (BIT(30) | BIT(31))
+
+ /* current dTD pointer */
+ u32 dqh_current; /* locate the transfer in progress */
+#define DQH_C_DTD(e) \
+ (((e)>>5)&0x7ffffff) /* bits 31:5, current dTD pointer */
+
+ /* transfer overlay, hardware parts of a struct langwell_dtd */
+ u32 dtd_next;
+ u32 dtd_status:8; /* bits 7:0, execution back states */
+ u32 dtd_res0:2; /* bits 9:8 */
+ u32 dtd_multo:2; /* bits 11:10, multipier override */
+ u32 dtd_res1:3; /* bits 14:12 */
+ u32 dtd_ioc:1; /* bit 15, interrupt on complete */
+ u32 dtd_total:15; /* bits 30:16, total bytes */
+ u32 dtd_res2:1; /* bit 31 */
+ u32 dtd_buf[5]; /* dTD buffer pointer page 0 to 4 */
+
+ u32 dqh_res2;
+ struct usb_ctrlrequest dqh_setup; /* setup packet buffer */
+} __attribute__ ((aligned(64)));
+
+
+/* endpoint data structure */
+struct langwell_ep {
+ struct usb_ep ep;
+ dma_addr_t dma;
+ struct langwell_udc *dev;
+ unsigned long irqs;
+ struct list_head queue;
+ struct langwell_dqh *dqh;
+ const struct usb_endpoint_descriptor *desc;
+ char name[14];
+ unsigned stopped:1,
+ ep_type:2,
+ ep_num:8;
+};
+
+
+/* request data structure */
+struct langwell_request {
+ struct usb_request req;
+ struct langwell_dtd *dtd, *head, *tail;
+ struct langwell_ep *ep;
+ dma_addr_t dtd_dma;
+ struct list_head queue;
+ unsigned dtd_count;
+ unsigned mapped:1;
+};
+
+
+/* ep0 transfer state */
+enum ep0_state {
+ WAIT_FOR_SETUP,
+ DATA_STATE_XMIT,
+ DATA_STATE_NEED_ZLP,
+ WAIT_FOR_OUT_STATUS,
+ DATA_STATE_RECV,
+};
+
+
+/* device suspend state */
+enum lpm_state {
+ LPM_L0, /* on */
+ LPM_L1, /* LPM L1 sleep */
+ LPM_L2, /* suspend */
+ LPM_L3, /* off */
+};
+
+
+/* device data structure */
+struct langwell_udc {
+ /* each pci device provides one gadget, several endpoints */
+ struct usb_gadget gadget;
+ spinlock_t lock; /* device lock */
+ struct langwell_ep *ep;
+ struct usb_gadget_driver *driver;
+ struct otg_transceiver *transceiver;
+ u8 dev_addr;
+ u32 usb_state;
+ u32 resume_state;
+ u32 bus_reset;
+ enum lpm_state lpm_state;
+ enum ep0_state ep0_state;
+ u32 ep0_dir;
+ u16 dciversion;
+ unsigned ep_max;
+ unsigned devcap:1,
+ enabled:1,
+ region:1,
+ got_irq:1,
+ powered:1,
+ remote_wakeup:1,
+ rate:1,
+ is_reset:1,
+ softconnected:1,
+ vbus_active:1,
+ suspended:1,
+ stopped:1,
+ lpm:1; /* LPM capability */
+
+ /* pci state used to access those endpoints */
+ struct pci_dev *pdev;
+
+ /* Langwell otg transceiver */
+ struct langwell_otg *lotg;
+
+ /* control registers */
+ struct langwell_cap_regs __iomem *cap_regs;
+ struct langwell_op_regs __iomem *op_regs;
+
+ struct usb_ctrlrequest local_setup_buff;
+ struct langwell_dqh *ep_dqh;
+ size_t ep_dqh_size;
+ dma_addr_t ep_dqh_dma;
+
+ /* ep0 status request */
+ struct langwell_request *status_req;
+
+ /* dma pool */
+ struct dma_pool *dtd_pool;
+
+ /* make sure release() is done */
+ struct completion *done;
+};
+
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
index 8cc676ecbb2..1937d8c7b43 100644
--- a/drivers/usb/gadget/pxa27x_udc.c
+++ b/drivers/usb/gadget/pxa27x_udc.c
@@ -38,7 +38,6 @@
#include <linux/usb.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
-#include <mach/pxa2xx-regs.h> /* FIXME: for PSSR */
#include <mach/udc.h>
#include "pxa27x_udc.h"
@@ -474,6 +473,23 @@ static inline void udc_clear_mask_UDCCR(struct pxa_udc *udc, int mask)
}
/**
+ * ep_write_UDCCSR - set bits in UDCCSR
+ * @udc: udc device
+ * @mask: bits to set in UDCCR
+ *
+ * Sets bits in UDCCSR (UDCCSR0 and UDCCSR*).
+ *
+ * A specific case is applied to ep0 : the ACM bit is always set to 1, for
+ * SET_INTERFACE and SET_CONFIGURATION.
+ */
+static inline void ep_write_UDCCSR(struct pxa_ep *ep, int mask)
+{
+ if (is_ep0(ep))
+ mask |= UDCCSR0_ACM;
+ udc_ep_writel(ep, UDCCSR, mask);
+}
+
+/**
* ep_count_bytes_remain - get how many bytes in udc endpoint
* @ep: udc endpoint
*
@@ -861,7 +877,7 @@ static int read_packet(struct pxa_ep *ep, struct pxa27x_request *req)
*buf++ = udc_ep_readl(ep, UDCDR);
req->req.actual += count;
- udc_ep_writel(ep, UDCCSR, UDCCSR_PC);
+ ep_write_UDCCSR(ep, UDCCSR_PC);
return count;
}
@@ -969,12 +985,12 @@ static int write_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
if (udccsr & UDCCSR_PC) {
ep_vdbg(ep, "Clearing Transmit Complete, udccsr=%x\n",
udccsr);
- udc_ep_writel(ep, UDCCSR, UDCCSR_PC);
+ ep_write_UDCCSR(ep, UDCCSR_PC);
}
if (udccsr & UDCCSR_TRN) {
ep_vdbg(ep, "Clearing Underrun on, udccsr=%x\n",
udccsr);
- udc_ep_writel(ep, UDCCSR, UDCCSR_TRN);
+ ep_write_UDCCSR(ep, UDCCSR_TRN);
}
count = write_packet(ep, req, max);
@@ -996,7 +1012,7 @@ static int write_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
}
if (is_short)
- udc_ep_writel(ep, UDCCSR, UDCCSR_SP);
+ ep_write_UDCCSR(ep, UDCCSR_SP);
/* requests complete when all IN data is in the FIFO */
if (is_last) {
@@ -1029,7 +1045,7 @@ static int read_ep0_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
while (epout_has_pkt(ep)) {
count = read_packet(ep, req);
- udc_ep_writel(ep, UDCCSR, UDCCSR0_OPC);
+ ep_write_UDCCSR(ep, UDCCSR0_OPC);
inc_ep_stats_bytes(ep, count, !USB_DIR_IN);
is_short = (count < ep->fifo_size);
@@ -1074,7 +1090,7 @@ static int write_ep0_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
/* Sends either a short packet or a 0 length packet */
if (unlikely(is_short))
- udc_ep_writel(ep, UDCCSR, UDCCSR0_IPR);
+ ep_write_UDCCSR(ep, UDCCSR0_IPR);
ep_dbg(ep, "in %d bytes%s%s, %d left, req=%p, udccsr0=0x%03x\n",
count, is_short ? "/S" : "", is_last ? "/L" : "",
@@ -1277,7 +1293,7 @@ static int pxa_ep_set_halt(struct usb_ep *_ep, int value)
/* FST, FEF bits are the same for control and non control endpoints */
rc = 0;
- udc_ep_writel(ep, UDCCSR, UDCCSR_FST | UDCCSR_FEF);
+ ep_write_UDCCSR(ep, UDCCSR_FST | UDCCSR_FEF);
if (is_ep0(ep))
set_ep0state(ep->dev, STALL);
@@ -1343,7 +1359,7 @@ static void pxa_ep_fifo_flush(struct usb_ep *_ep)
udc_ep_readl(ep, UDCDR);
} else {
/* most IN status is the same, but ISO can't stall */
- udc_ep_writel(ep, UDCCSR,
+ ep_write_UDCCSR(ep,
UDCCSR_PC | UDCCSR_FEF | UDCCSR_TRN
| (EPXFERTYPE_is_ISO(ep) ? 0 : UDCCSR_SST));
}
@@ -1728,6 +1744,7 @@ static void udc_enable(struct pxa_udc *udc)
memset(&udc->stats, 0, sizeof(udc->stats));
udc_set_mask_UDCCR(udc, UDCCR_UDE);
+ ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_ACM);
udelay(2);
if (udc_readl(udc, UDCCR) & UDCCR_EMCE)
dev_err(udc->dev, "Configuration errors, udc disabled\n");
@@ -1893,6 +1910,15 @@ static void handle_ep0_ctrl_req(struct pxa_udc *udc,
nuke(ep, -EPROTO);
+ /*
+ * In the PXA320 manual, in the section about Back-to-Back setup
+ * packets, it describes this situation. The solution is to set OPC to
+ * get rid of the status packet, and then continue with the setup
+ * packet. Generalize to pxa27x CPUs.
+ */
+ if (epout_has_pkt(ep) && (ep_count_bytes_remain(ep) == 0))
+ ep_write_UDCCSR(ep, UDCCSR0_OPC);
+
/* read SETUP packet */
for (i = 0; i < 2; i++) {
if (unlikely(ep_is_empty(ep)))
@@ -1919,7 +1945,7 @@ static void handle_ep0_ctrl_req(struct pxa_udc *udc,
set_ep0state(udc, OUT_DATA_STAGE);
/* Tell UDC to enter Data Stage */
- udc_ep_writel(ep, UDCCSR, UDCCSR0_SA | UDCCSR0_OPC);
+ ep_write_UDCCSR(ep, UDCCSR0_SA | UDCCSR0_OPC);
i = udc->driver->setup(&udc->gadget, &u.r);
if (i < 0)
@@ -1929,7 +1955,7 @@ out:
stall:
ep_dbg(ep, "protocol STALL, udccsr0=%03x err %d\n",
udc_ep_readl(ep, UDCCSR), i);
- udc_ep_writel(ep, UDCCSR, UDCCSR0_FST | UDCCSR0_FTF);
+ ep_write_UDCCSR(ep, UDCCSR0_FST | UDCCSR0_FTF);
set_ep0state(udc, STALL);
goto out;
}
@@ -1966,6 +1992,8 @@ stall:
* cleared by software.
* - clearing UDCCSR0_OPC always flushes ep0. If in setup stage, never do it
* before reading ep0.
+ * This is true only for PXA27x. This is not true anymore for PXA3xx family
+ * (check Back-to-Back setup packet in developers guide).
* - irq can be called on a "packet complete" event (opc_irq=1), while
* UDCCSR0_OPC is not yet raised (delta can be as big as 100ms
* from experimentation).
@@ -1998,7 +2026,7 @@ static void handle_ep0(struct pxa_udc *udc, int fifo_irq, int opc_irq)
if (udccsr0 & UDCCSR0_SST) {
ep_dbg(ep, "clearing stall status\n");
nuke(ep, -EPIPE);
- udc_ep_writel(ep, UDCCSR, UDCCSR0_SST);
+ ep_write_UDCCSR(ep, UDCCSR0_SST);
ep0_idle(udc);
}
@@ -2023,7 +2051,7 @@ static void handle_ep0(struct pxa_udc *udc, int fifo_irq, int opc_irq)
break;
case IN_DATA_STAGE: /* GET_DESCRIPTOR */
if (epout_has_pkt(ep))
- udc_ep_writel(ep, UDCCSR, UDCCSR0_OPC);
+ ep_write_UDCCSR(ep, UDCCSR0_OPC);
if (req && !ep_is_full(ep))
completed = write_ep0_fifo(ep, req);
if (completed)
@@ -2036,7 +2064,7 @@ static void handle_ep0(struct pxa_udc *udc, int fifo_irq, int opc_irq)
ep0_end_out_req(ep, req);
break;
case STALL:
- udc_ep_writel(ep, UDCCSR, UDCCSR0_FST);
+ ep_write_UDCCSR(ep, UDCCSR0_FST);
break;
case IN_STATUS_STAGE:
/*
@@ -2131,6 +2159,7 @@ static void pxa27x_change_configuration(struct pxa_udc *udc, int config)
set_ep0state(udc, WAIT_ACK_SET_CONF_INTERF);
udc->driver->setup(&udc->gadget, &req);
+ ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_AREN);
}
/**
@@ -2159,6 +2188,7 @@ static void pxa27x_change_interface(struct pxa_udc *udc, int iface, int alt)
set_ep0state(udc, WAIT_ACK_SET_CONF_INTERF);
udc->driver->setup(&udc->gadget, &req);
+ ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_AREN);
}
/*
@@ -2280,7 +2310,7 @@ static void irq_udc_reset(struct pxa_udc *udc)
memset(&udc->stats, 0, sizeof udc->stats);
nuke(ep, -EPROTO);
- udc_ep_writel(ep, UDCCSR, UDCCSR0_FTF | UDCCSR0_OPC);
+ ep_write_UDCCSR(ep, UDCCSR0_FTF | UDCCSR0_OPC);
ep0_idle(udc);
}
@@ -2479,6 +2509,12 @@ static void pxa_udc_shutdown(struct platform_device *_dev)
udc_disable(udc);
}
+#ifdef CONFIG_CPU_PXA27x
+extern void pxa27x_clear_otgph(void);
+#else
+#define pxa27x_clear_otgph() do {} while (0)
+#endif
+
#ifdef CONFIG_PM
/**
* pxa_udc_suspend - Suspend udc device
@@ -2546,8 +2582,7 @@ static int pxa_udc_resume(struct platform_device *_dev)
* Software must configure the USB OTG pad, UDC, and UHC
* to the state they were in before entering sleep mode.
*/
- if (cpu_is_pxa27x())
- PSSR |= PSSR_OTGPH;
+ pxa27x_clear_otgph();
return 0;
}
@@ -2571,7 +2606,7 @@ static struct platform_driver udc_driver = {
static int __init udc_init(void)
{
- if (!cpu_is_pxa27x())
+ if (!cpu_is_pxa27x() && !cpu_is_pxa3xx())
return -ENODEV;
printk(KERN_INFO "%s: version %s\n", driver_name, DRIVER_VERSION);
diff --git a/drivers/usb/gadget/pxa27x_udc.h b/drivers/usb/gadget/pxa27x_udc.h
index db58125331d..e25225e2658 100644
--- a/drivers/usb/gadget/pxa27x_udc.h
+++ b/drivers/usb/gadget/pxa27x_udc.h
@@ -130,6 +130,8 @@
#define UP2OCR_HXOE (1 << 17) /* Transceiver Output Enable */
#define UP2OCR_SEOS (1 << 24) /* Single-Ended Output Select */
+#define UDCCSR0_ACM (1 << 9) /* Ack Control Mode */
+#define UDCCSR0_AREN (1 << 8) /* Ack Response Enable */
#define UDCCSR0_SA (1 << 7) /* Setup Active */
#define UDCCSR0_RNE (1 << 6) /* Receive FIFO Not Empty */
#define UDCCSR0_FST (1 << 5) /* Force Stall */
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
new file mode 100644
index 00000000000..50c71aae2cc
--- /dev/null
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -0,0 +1,3269 @@
+/* linux/drivers/usb/gadget/s3c-hsotg.c
+ *
+ * Copyright 2008 Openmoko, Inc.
+ * Copyright 2008 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ * http://armlinux.simtec.co.uk/
+ *
+ * S3C USB2.0 High-speed / OtG driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include <mach/map.h>
+
+#include <plat/regs-usb-hsotg-phy.h>
+#include <plat/regs-usb-hsotg.h>
+#include <plat/regs-sys.h>
+#include <plat/udc-hs.h>
+
+#define DMA_ADDR_INVALID (~((dma_addr_t)0))
+
+/* EP0_MPS_LIMIT
+ *
+ * Unfortunately there seems to be a limit of the amount of data that can
+ * be transfered by IN transactions on EP0. This is either 127 bytes or 3
+ * packets (which practially means 1 packet and 63 bytes of data) when the
+ * MPS is set to 64.
+ *
+ * This means if we are wanting to move >127 bytes of data, we need to
+ * split the transactions up, but just doing one packet at a time does
+ * not work (this may be an implicit DATA0 PID on first packet of the
+ * transaction) and doing 2 packets is outside the controller's limits.
+ *
+ * If we try to lower the MPS size for EP0, then no transfers work properly
+ * for EP0, and the system will fail basic enumeration. As no cause for this
+ * has currently been found, we cannot support any large IN transfers for
+ * EP0.
+ */
+#define EP0_MPS_LIMIT 64
+
+struct s3c_hsotg;
+struct s3c_hsotg_req;
+
+/**
+ * struct s3c_hsotg_ep - driver endpoint definition.
+ * @ep: The gadget layer representation of the endpoint.
+ * @name: The driver generated name for the endpoint.
+ * @queue: Queue of requests for this endpoint.
+ * @parent: Reference back to the parent device structure.
+ * @req: The current request that the endpoint is processing. This is
+ * used to indicate an request has been loaded onto the endpoint
+ * and has yet to be completed (maybe due to data move, or simply
+ * awaiting an ack from the core all the data has been completed).
+ * @debugfs: File entry for debugfs file for this endpoint.
+ * @lock: State lock to protect contents of endpoint.
+ * @dir_in: Set to true if this endpoint is of the IN direction, which
+ * means that it is sending data to the Host.
+ * @index: The index for the endpoint registers.
+ * @name: The name array passed to the USB core.
+ * @halted: Set if the endpoint has been halted.
+ * @periodic: Set if this is a periodic ep, such as Interrupt
+ * @sent_zlp: Set if we've sent a zero-length packet.
+ * @total_data: The total number of data bytes done.
+ * @fifo_size: The size of the FIFO (for periodic IN endpoints)
+ * @fifo_load: The amount of data loaded into the FIFO (periodic IN)
+ * @last_load: The offset of data for the last start of request.
+ * @size_loaded: The last loaded size for DxEPTSIZE for periodic IN
+ *
+ * This is the driver's state for each registered enpoint, allowing it
+ * to keep track of transactions that need doing. Each endpoint has a
+ * lock to protect the state, to try and avoid using an overall lock
+ * for the host controller as much as possible.
+ *
+ * For periodic IN endpoints, we have fifo_size and fifo_load to try
+ * and keep track of the amount of data in the periodic FIFO for each
+ * of these as we don't have a status register that tells us how much
+ * is in each of them.
+ */
+struct s3c_hsotg_ep {
+ struct usb_ep ep;
+ struct list_head queue;
+ struct s3c_hsotg *parent;
+ struct s3c_hsotg_req *req;
+ struct dentry *debugfs;
+
+ spinlock_t lock;
+
+ unsigned long total_data;
+ unsigned int size_loaded;
+ unsigned int last_load;
+ unsigned int fifo_load;
+ unsigned short fifo_size;
+
+ unsigned char dir_in;
+ unsigned char index;
+
+ unsigned int halted:1;
+ unsigned int periodic:1;
+ unsigned int sent_zlp:1;
+
+ char name[10];
+};
+
+#define S3C_HSOTG_EPS (8+1) /* limit to 9 for the moment */
+
+/**
+ * struct s3c_hsotg - driver state.
+ * @dev: The parent device supplied to the probe function
+ * @driver: USB gadget driver
+ * @plat: The platform specific configuration data.
+ * @regs: The memory area mapped for accessing registers.
+ * @regs_res: The resource that was allocated when claiming register space.
+ * @irq: The IRQ number we are using
+ * @debug_root: root directrory for debugfs.
+ * @debug_file: main status file for debugfs.
+ * @debug_fifo: FIFO status file for debugfs.
+ * @ep0_reply: Request used for ep0 reply.
+ * @ep0_buff: Buffer for EP0 reply data, if needed.
+ * @ctrl_buff: Buffer for EP0 control requests.
+ * @ctrl_req: Request for EP0 control packets.
+ * @eps: The endpoints being supplied to the gadget framework
+ */
+struct s3c_hsotg {
+ struct device *dev;
+ struct usb_gadget_driver *driver;
+ struct s3c_hsotg_plat *plat;
+
+ void __iomem *regs;
+ struct resource *regs_res;
+ int irq;
+
+ struct dentry *debug_root;
+ struct dentry *debug_file;
+ struct dentry *debug_fifo;
+
+ struct usb_request *ep0_reply;
+ struct usb_request *ctrl_req;
+ u8 ep0_buff[8];
+ u8 ctrl_buff[8];
+
+ struct usb_gadget gadget;
+ struct s3c_hsotg_ep eps[];
+};
+
+/**
+ * struct s3c_hsotg_req - data transfer request
+ * @req: The USB gadget request
+ * @queue: The list of requests for the endpoint this is queued for.
+ * @in_progress: Has already had size/packets written to core
+ * @mapped: DMA buffer for this request has been mapped via dma_map_single().
+ */
+struct s3c_hsotg_req {
+ struct usb_request req;
+ struct list_head queue;
+ unsigned char in_progress;
+ unsigned char mapped;
+};
+
+/* conversion functions */
+static inline struct s3c_hsotg_req *our_req(struct usb_request *req)
+{
+ return container_of(req, struct s3c_hsotg_req, req);
+}
+
+static inline struct s3c_hsotg_ep *our_ep(struct usb_ep *ep)
+{
+ return container_of(ep, struct s3c_hsotg_ep, ep);
+}
+
+static inline struct s3c_hsotg *to_hsotg(struct usb_gadget *gadget)
+{
+ return container_of(gadget, struct s3c_hsotg, gadget);
+}
+
+static inline void __orr32(void __iomem *ptr, u32 val)
+{
+ writel(readl(ptr) | val, ptr);
+}
+
+static inline void __bic32(void __iomem *ptr, u32 val)
+{
+ writel(readl(ptr) & ~val, ptr);
+}
+
+/* forward decleration of functions */
+static void s3c_hsotg_dump(struct s3c_hsotg *hsotg);
+
+/**
+ * using_dma - return the DMA status of the driver.
+ * @hsotg: The driver state.
+ *
+ * Return true if we're using DMA.
+ *
+ * Currently, we have the DMA support code worked into everywhere
+ * that needs it, but the AMBA DMA implementation in the hardware can
+ * only DMA from 32bit aligned addresses. This means that gadgets such
+ * as the CDC Ethernet cannot work as they often pass packets which are
+ * not 32bit aligned.
+ *
+ * Unfortunately the choice to use DMA or not is global to the controller
+ * and seems to be only settable when the controller is being put through
+ * a core reset. This means we either need to fix the gadgets to take
+ * account of DMA alignment, or add bounce buffers (yuerk).
+ *
+ * Until this issue is sorted out, we always return 'false'.
+ */
+static inline bool using_dma(struct s3c_hsotg *hsotg)
+{
+ return false; /* support is not complete */
+}
+
+/**
+ * s3c_hsotg_en_gsint - enable one or more of the general interrupt
+ * @hsotg: The device state
+ * @ints: A bitmask of the interrupts to enable
+ */
+static void s3c_hsotg_en_gsint(struct s3c_hsotg *hsotg, u32 ints)
+{
+ u32 gsintmsk = readl(hsotg->regs + S3C_GINTMSK);
+ u32 new_gsintmsk;
+
+ new_gsintmsk = gsintmsk | ints;
+
+ if (new_gsintmsk != gsintmsk) {
+ dev_dbg(hsotg->dev, "gsintmsk now 0x%08x\n", new_gsintmsk);
+ writel(new_gsintmsk, hsotg->regs + S3C_GINTMSK);
+ }
+}
+
+/**
+ * s3c_hsotg_disable_gsint - disable one or more of the general interrupt
+ * @hsotg: The device state
+ * @ints: A bitmask of the interrupts to enable
+ */
+static void s3c_hsotg_disable_gsint(struct s3c_hsotg *hsotg, u32 ints)
+{
+ u32 gsintmsk = readl(hsotg->regs + S3C_GINTMSK);
+ u32 new_gsintmsk;
+
+ new_gsintmsk = gsintmsk & ~ints;
+
+ if (new_gsintmsk != gsintmsk)
+ writel(new_gsintmsk, hsotg->regs + S3C_GINTMSK);
+}
+
+/**
+ * s3c_hsotg_ctrl_epint - enable/disable an endpoint irq
+ * @hsotg: The device state
+ * @ep: The endpoint index
+ * @dir_in: True if direction is in.
+ * @en: The enable value, true to enable
+ *
+ * Set or clear the mask for an individual endpoint's interrupt
+ * request.
+ */
+static void s3c_hsotg_ctrl_epint(struct s3c_hsotg *hsotg,
+ unsigned int ep, unsigned int dir_in,
+ unsigned int en)
+{
+ unsigned long flags;
+ u32 bit = 1 << ep;
+ u32 daint;
+
+ if (!dir_in)
+ bit <<= 16;
+
+ local_irq_save(flags);
+ daint = readl(hsotg->regs + S3C_DAINTMSK);
+ if (en)
+ daint |= bit;
+ else
+ daint &= ~bit;
+ writel(daint, hsotg->regs + S3C_DAINTMSK);
+ local_irq_restore(flags);
+}
+
+/**
+ * s3c_hsotg_init_fifo - initialise non-periodic FIFOs
+ * @hsotg: The device instance.
+ */
+static void s3c_hsotg_init_fifo(struct s3c_hsotg *hsotg)
+{
+ /* the ryu 2.6.24 release ahs
+ writel(0x1C0, hsotg->regs + S3C_GRXFSIZ);
+ writel(S3C_GNPTXFSIZ_NPTxFStAddr(0x200) |
+ S3C_GNPTXFSIZ_NPTxFDep(0x1C0),
+ hsotg->regs + S3C_GNPTXFSIZ);
+ */
+
+ /* set FIFO sizes to 2048/0x1C0 */
+
+ writel(2048, hsotg->regs + S3C_GRXFSIZ);
+ writel(S3C_GNPTXFSIZ_NPTxFStAddr(2048) |
+ S3C_GNPTXFSIZ_NPTxFDep(0x1C0),
+ hsotg->regs + S3C_GNPTXFSIZ);
+}
+
+/**
+ * @ep: USB endpoint to allocate request for.
+ * @flags: Allocation flags
+ *
+ * Allocate a new USB request structure appropriate for the specified endpoint
+ */
+struct usb_request *s3c_hsotg_ep_alloc_request(struct usb_ep *ep, gfp_t flags)
+{
+ struct s3c_hsotg_req *req;
+
+ req = kzalloc(sizeof(struct s3c_hsotg_req), flags);
+ if (!req)
+ return NULL;
+
+ INIT_LIST_HEAD(&req->queue);
+
+ req->req.dma = DMA_ADDR_INVALID;
+ return &req->req;
+}
+
+/**
+ * is_ep_periodic - return true if the endpoint is in periodic mode.
+ * @hs_ep: The endpoint to query.
+ *
+ * Returns true if the endpoint is in periodic mode, meaning it is being
+ * used for an Interrupt or ISO transfer.
+ */
+static inline int is_ep_periodic(struct s3c_hsotg_ep *hs_ep)
+{
+ return hs_ep->periodic;
+}
+
+/**
+ * s3c_hsotg_unmap_dma - unmap the DMA memory being used for the request
+ * @hsotg: The device state.
+ * @hs_ep: The endpoint for the request
+ * @hs_req: The request being processed.
+ *
+ * This is the reverse of s3c_hsotg_map_dma(), called for the completion
+ * of a request to ensure the buffer is ready for access by the caller.
+*/
+static void s3c_hsotg_unmap_dma(struct s3c_hsotg *hsotg,
+ struct s3c_hsotg_ep *hs_ep,
+ struct s3c_hsotg_req *hs_req)
+{
+ struct usb_request *req = &hs_req->req;
+ enum dma_data_direction dir;
+
+ dir = hs_ep->dir_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+ /* ignore this if we're not moving any data */
+ if (hs_req->req.length == 0)
+ return;
+
+ if (hs_req->mapped) {
+ /* we mapped this, so unmap and remove the dma */
+
+ dma_unmap_single(hsotg->dev, req->dma, req->length, dir);
+
+ req->dma = DMA_ADDR_INVALID;
+ hs_req->mapped = 0;
+ } else {
+ dma_sync_single(hsotg->dev, req->dma, req->length, dir);
+ }
+}
+
+/**
+ * s3c_hsotg_write_fifo - write packet Data to the TxFIFO
+ * @hsotg: The controller state.
+ * @hs_ep: The endpoint we're going to write for.
+ * @hs_req: The request to write data for.
+ *
+ * This is called when the TxFIFO has some space in it to hold a new
+ * transmission and we have something to give it. The actual setup of
+ * the data size is done elsewhere, so all we have to do is to actually
+ * write the data.
+ *
+ * The return value is zero if there is more space (or nothing was done)
+ * otherwise -ENOSPC is returned if the FIFO space was used up.
+ *
+ * This routine is only needed for PIO
+*/
+static int s3c_hsotg_write_fifo(struct s3c_hsotg *hsotg,
+ struct s3c_hsotg_ep *hs_ep,
+ struct s3c_hsotg_req *hs_req)
+{
+ bool periodic = is_ep_periodic(hs_ep);
+ u32 gnptxsts = readl(hsotg->regs + S3C_GNPTXSTS);
+ int buf_pos = hs_req->req.actual;
+ int to_write = hs_ep->size_loaded;
+ void *data;
+ int can_write;
+ int pkt_round;
+
+ to_write -= (buf_pos - hs_ep->last_load);
+
+ /* if there's nothing to write, get out early */
+ if (to_write == 0)
+ return 0;
+
+ if (periodic) {
+ u32 epsize = readl(hsotg->regs + S3C_DIEPTSIZ(hs_ep->index));
+ int size_left;
+ int size_done;
+
+ /* work out how much data was loaded so we can calculate
+ * how much data is left in the fifo. */
+
+ size_left = S3C_DxEPTSIZ_XferSize_GET(epsize);
+
+ dev_dbg(hsotg->dev, "%s: left=%d, load=%d, fifo=%d, size %d\n",
+ __func__, size_left,
+ hs_ep->size_loaded, hs_ep->fifo_load, hs_ep->fifo_size);
+
+ /* how much of the data has moved */
+ size_done = hs_ep->size_loaded - size_left;
+
+ /* how much data is left in the fifo */
+ can_write = hs_ep->fifo_load - size_done;
+ dev_dbg(hsotg->dev, "%s: => can_write1=%d\n",
+ __func__, can_write);
+
+ can_write = hs_ep->fifo_size - can_write;
+ dev_dbg(hsotg->dev, "%s: => can_write2=%d\n",
+ __func__, can_write);
+
+ if (can_write <= 0) {
+ s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_PTxFEmp);
+ return -ENOSPC;
+ }
+ } else {
+ if (S3C_GNPTXSTS_NPTxQSpcAvail_GET(gnptxsts) == 0) {
+ dev_dbg(hsotg->dev,
+ "%s: no queue slots available (0x%08x)\n",
+ __func__, gnptxsts);
+
+ s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_NPTxFEmp);
+ return -ENOSPC;
+ }
+
+ can_write = S3C_GNPTXSTS_NPTxFSpcAvail_GET(gnptxsts);
+ }
+
+ dev_dbg(hsotg->dev, "%s: GNPTXSTS=%08x, can=%d, to=%d, mps %d\n",
+ __func__, gnptxsts, can_write, to_write, hs_ep->ep.maxpacket);
+
+ /* limit to 512 bytes of data, it seems at least on the non-periodic
+ * FIFO, requests of >512 cause the endpoint to get stuck with a
+ * fragment of the end of the transfer in it.
+ */
+ if (can_write > 512)
+ can_write = 512;
+
+ /* see if we can write data */
+
+ if (to_write > can_write) {
+ to_write = can_write;
+ pkt_round = to_write % hs_ep->ep.maxpacket;
+
+ /* Not sure, but we probably shouldn't be writing partial
+ * packets into the FIFO, so round the write down to an
+ * exact number of packets.
+ *
+ * Note, we do not currently check to see if we can ever
+ * write a full packet or not to the FIFO.
+ */
+
+ if (pkt_round)
+ to_write -= pkt_round;
+
+ /* enable correct FIFO interrupt to alert us when there
+ * is more room left. */
+
+ s3c_hsotg_en_gsint(hsotg,
+ periodic ? S3C_GINTSTS_PTxFEmp :
+ S3C_GINTSTS_NPTxFEmp);
+ }
+
+ dev_dbg(hsotg->dev, "write %d/%d, can_write %d, done %d\n",
+ to_write, hs_req->req.length, can_write, buf_pos);
+
+ if (to_write <= 0)
+ return -ENOSPC;
+
+ hs_req->req.actual = buf_pos + to_write;
+ hs_ep->total_data += to_write;
+
+ if (periodic)
+ hs_ep->fifo_load += to_write;
+
+ to_write = DIV_ROUND_UP(to_write, 4);
+ data = hs_req->req.buf + buf_pos;
+
+ writesl(hsotg->regs + S3C_EPFIFO(hs_ep->index), data, to_write);
+
+ return (to_write >= can_write) ? -ENOSPC : 0;
+}
+
+/**
+ * get_ep_limit - get the maximum data legnth for this endpoint
+ * @hs_ep: The endpoint
+ *
+ * Return the maximum data that can be queued in one go on a given endpoint
+ * so that transfers that are too long can be split.
+ */
+static unsigned get_ep_limit(struct s3c_hsotg_ep *hs_ep)
+{
+ int index = hs_ep->index;
+ unsigned maxsize;
+ unsigned maxpkt;
+
+ if (index != 0) {
+ maxsize = S3C_DxEPTSIZ_XferSize_LIMIT + 1;
+ maxpkt = S3C_DxEPTSIZ_PktCnt_LIMIT + 1;
+ } else {
+ if (hs_ep->dir_in) {
+ /* maxsize = S3C_DIEPTSIZ0_XferSize_LIMIT + 1; */
+ maxsize = 64+64+1;
+ maxpkt = S3C_DIEPTSIZ0_PktCnt_LIMIT + 1;
+ } else {
+ maxsize = 0x3f;
+ maxpkt = 2;
+ }
+ }
+
+ /* we made the constant loading easier above by using +1 */
+ maxpkt--;
+ maxsize--;
+
+ /* constrain by packet count if maxpkts*pktsize is greater
+ * than the length register size. */
+
+ if ((maxpkt * hs_ep->ep.maxpacket) < maxsize)
+ maxsize = maxpkt * hs_ep->ep.maxpacket;
+
+ return maxsize;
+}
+
+/**
+ * s3c_hsotg_start_req - start a USB request from an endpoint's queue
+ * @hsotg: The controller state.
+ * @hs_ep: The endpoint to process a request for
+ * @hs_req: The request to start.
+ * @continuing: True if we are doing more for the current request.
+ *
+ * Start the given request running by setting the endpoint registers
+ * appropriately, and writing any data to the FIFOs.
+ */
+static void s3c_hsotg_start_req(struct s3c_hsotg *hsotg,
+ struct s3c_hsotg_ep *hs_ep,
+ struct s3c_hsotg_req *hs_req,
+ bool continuing)
+{
+ struct usb_request *ureq = &hs_req->req;
+ int index = hs_ep->index;
+ int dir_in = hs_ep->dir_in;
+ u32 epctrl_reg;
+ u32 epsize_reg;
+ u32 epsize;
+ u32 ctrl;
+ unsigned length;
+ unsigned packets;
+ unsigned maxreq;
+
+ if (index != 0) {
+ if (hs_ep->req && !continuing) {
+ dev_err(hsotg->dev, "%s: active request\n", __func__);
+ WARN_ON(1);
+ return;
+ } else if (hs_ep->req != hs_req && continuing) {
+ dev_err(hsotg->dev,
+ "%s: continue different req\n", __func__);
+ WARN_ON(1);
+ return;
+ }
+ }
+
+ epctrl_reg = dir_in ? S3C_DIEPCTL(index) : S3C_DOEPCTL(index);
+ epsize_reg = dir_in ? S3C_DIEPTSIZ(index) : S3C_DOEPTSIZ(index);
+
+ dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x, ep %d, dir %s\n",
+ __func__, readl(hsotg->regs + epctrl_reg), index,
+ hs_ep->dir_in ? "in" : "out");
+
+ length = ureq->length - ureq->actual;
+
+ if (0)
+ dev_dbg(hsotg->dev,
+ "REQ buf %p len %d dma 0x%08x noi=%d zp=%d snok=%d\n",
+ ureq->buf, length, ureq->dma,
+ ureq->no_interrupt, ureq->zero, ureq->short_not_ok);
+
+ maxreq = get_ep_limit(hs_ep);
+ if (length > maxreq) {
+ int round = maxreq % hs_ep->ep.maxpacket;
+
+ dev_dbg(hsotg->dev, "%s: length %d, max-req %d, r %d\n",
+ __func__, length, maxreq, round);
+
+ /* round down to multiple of packets */
+ if (round)
+ maxreq -= round;
+
+ length = maxreq;
+ }
+
+ if (length)
+ packets = DIV_ROUND_UP(length, hs_ep->ep.maxpacket);
+ else
+ packets = 1; /* send one packet if length is zero. */
+
+ if (dir_in && index != 0)
+ epsize = S3C_DxEPTSIZ_MC(1);
+ else
+ epsize = 0;
+
+ if (index != 0 && ureq->zero) {
+ /* test for the packets being exactly right for the
+ * transfer */
+
+ if (length == (packets * hs_ep->ep.maxpacket))
+ packets++;
+ }
+
+ epsize |= S3C_DxEPTSIZ_PktCnt(packets);
+ epsize |= S3C_DxEPTSIZ_XferSize(length);
+
+ dev_dbg(hsotg->dev, "%s: %d@%d/%d, 0x%08x => 0x%08x\n",
+ __func__, packets, length, ureq->length, epsize, epsize_reg);
+
+ /* store the request as the current one we're doing */
+ hs_ep->req = hs_req;
+
+ /* write size / packets */
+ writel(epsize, hsotg->regs + epsize_reg);
+
+ ctrl = readl(hsotg->regs + epctrl_reg);
+
+ if (ctrl & S3C_DxEPCTL_Stall) {
+ dev_warn(hsotg->dev, "%s: ep%d is stalled\n", __func__, index);
+
+ /* not sure what we can do here, if it is EP0 then we should
+ * get this cleared once the endpoint has transmitted the
+ * STALL packet, otherwise it needs to be cleared by the
+ * host.
+ */
+ }
+
+ if (using_dma(hsotg)) {
+ unsigned int dma_reg;
+
+ /* write DMA address to control register, buffer already
+ * synced by s3c_hsotg_ep_queue(). */
+
+ dma_reg = dir_in ? S3C_DIEPDMA(index) : S3C_DOEPDMA(index);
+ writel(ureq->dma, hsotg->regs + dma_reg);
+
+ dev_dbg(hsotg->dev, "%s: 0x%08x => 0x%08x\n",
+ __func__, ureq->dma, dma_reg);
+ }
+
+ ctrl |= S3C_DxEPCTL_EPEna; /* ensure ep enabled */
+ ctrl |= S3C_DxEPCTL_USBActEp;
+ ctrl |= S3C_DxEPCTL_CNAK; /* clear NAK set by core */
+
+ dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
+ writel(ctrl, hsotg->regs + epctrl_reg);
+
+ /* set these, it seems that DMA support increments past the end
+ * of the packet buffer so we need to calculate the length from
+ * this information. */
+ hs_ep->size_loaded = length;
+ hs_ep->last_load = ureq->actual;
+
+ if (dir_in && !using_dma(hsotg)) {
+ /* set these anyway, we may need them for non-periodic in */
+ hs_ep->fifo_load = 0;
+
+ s3c_hsotg_write_fifo(hsotg, hs_ep, hs_req);
+ }
+
+ /* clear the INTknTXFEmpMsk when we start request, more as a aide
+ * to debugging to see what is going on. */
+ if (dir_in)
+ writel(S3C_DIEPMSK_INTknTXFEmpMsk,
+ hsotg->regs + S3C_DIEPINT(index));
+
+ /* Note, trying to clear the NAK here causes problems with transmit
+ * on the S3C6400 ending up with the TXFIFO becomming full. */
+
+ /* check ep is enabled */
+ if (!(readl(hsotg->regs + epctrl_reg) & S3C_DxEPCTL_EPEna))
+ dev_warn(hsotg->dev,
+ "ep%d: failed to become enabled (DxEPCTL=0x%08x)?\n",
+ index, readl(hsotg->regs + epctrl_reg));
+
+ dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n",
+ __func__, readl(hsotg->regs + epctrl_reg));
+}
+
+/**
+ * s3c_hsotg_map_dma - map the DMA memory being used for the request
+ * @hsotg: The device state.
+ * @hs_ep: The endpoint the request is on.
+ * @req: The request being processed.
+ *
+ * We've been asked to queue a request, so ensure that the memory buffer
+ * is correctly setup for DMA. If we've been passed an extant DMA address
+ * then ensure the buffer has been synced to memory. If our buffer has no
+ * DMA memory, then we map the memory and mark our request to allow us to
+ * cleanup on completion.
+*/
+static int s3c_hsotg_map_dma(struct s3c_hsotg *hsotg,
+ struct s3c_hsotg_ep *hs_ep,
+ struct usb_request *req)
+{
+ enum dma_data_direction dir;
+ struct s3c_hsotg_req *hs_req = our_req(req);
+
+ dir = hs_ep->dir_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+ /* if the length is zero, ignore the DMA data */
+ if (hs_req->req.length == 0)
+ return 0;
+
+ if (req->dma == DMA_ADDR_INVALID) {
+ dma_addr_t dma;
+
+ dma = dma_map_single(hsotg->dev, req->buf, req->length, dir);
+
+ if (unlikely(dma_mapping_error(hsotg->dev, dma)))
+ goto dma_error;
+
+ if (dma & 3) {
+ dev_err(hsotg->dev, "%s: unaligned dma buffer\n",
+ __func__);
+
+ dma_unmap_single(hsotg->dev, dma, req->length, dir);
+ return -EINVAL;
+ }
+
+ hs_req->mapped = 1;
+ req->dma = dma;
+ } else {
+ dma_sync_single(hsotg->dev, req->dma, req->length, dir);
+ hs_req->mapped = 0;
+ }
+
+ return 0;
+
+dma_error:
+ dev_err(hsotg->dev, "%s: failed to map buffer %p, %d bytes\n",
+ __func__, req->buf, req->length);
+
+ return -EIO;
+}
+
+static int s3c_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
+ gfp_t gfp_flags)
+{
+ struct s3c_hsotg_req *hs_req = our_req(req);
+ struct s3c_hsotg_ep *hs_ep = our_ep(ep);
+ struct s3c_hsotg *hs = hs_ep->parent;
+ unsigned long irqflags;
+ bool first;
+
+ dev_dbg(hs->dev, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n",
+ ep->name, req, req->length, req->buf, req->no_interrupt,
+ req->zero, req->short_not_ok);
+
+ /* initialise status of the request */
+ INIT_LIST_HEAD(&hs_req->queue);
+ req->actual = 0;
+ req->status = -EINPROGRESS;
+
+ /* if we're using DMA, sync the buffers as necessary */
+ if (using_dma(hs)) {
+ int ret = s3c_hsotg_map_dma(hs, hs_ep, req);
+ if (ret)
+ return ret;
+ }
+
+ spin_lock_irqsave(&hs_ep->lock, irqflags);
+
+ first = list_empty(&hs_ep->queue);
+ list_add_tail(&hs_req->queue, &hs_ep->queue);
+
+ if (first)
+ s3c_hsotg_start_req(hs, hs_ep, hs_req, false);
+
+ spin_unlock_irqrestore(&hs_ep->lock, irqflags);
+
+ return 0;
+}
+
+static void s3c_hsotg_ep_free_request(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct s3c_hsotg_req *hs_req = our_req(req);
+
+ kfree(hs_req);
+}
+
+/**
+ * s3c_hsotg_complete_oursetup - setup completion callback
+ * @ep: The endpoint the request was on.
+ * @req: The request completed.
+ *
+ * Called on completion of any requests the driver itself
+ * submitted that need cleaning up.
+ */
+static void s3c_hsotg_complete_oursetup(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct s3c_hsotg_ep *hs_ep = our_ep(ep);
+ struct s3c_hsotg *hsotg = hs_ep->parent;
+
+ dev_dbg(hsotg->dev, "%s: ep %p, req %p\n", __func__, ep, req);
+
+ s3c_hsotg_ep_free_request(ep, req);
+}
+
+/**
+ * ep_from_windex - convert control wIndex value to endpoint
+ * @hsotg: The driver state.
+ * @windex: The control request wIndex field (in host order).
+ *
+ * Convert the given wIndex into a pointer to an driver endpoint
+ * structure, or return NULL if it is not a valid endpoint.
+*/
+static struct s3c_hsotg_ep *ep_from_windex(struct s3c_hsotg *hsotg,
+ u32 windex)
+{
+ struct s3c_hsotg_ep *ep = &hsotg->eps[windex & 0x7F];
+ int dir = (windex & USB_DIR_IN) ? 1 : 0;
+ int idx = windex & 0x7F;
+
+ if (windex >= 0x100)
+ return NULL;
+
+ if (idx > S3C_HSOTG_EPS)
+ return NULL;
+
+ if (idx && ep->dir_in != dir)
+ return NULL;
+
+ return ep;
+}
+
+/**
+ * s3c_hsotg_send_reply - send reply to control request
+ * @hsotg: The device state
+ * @ep: Endpoint 0
+ * @buff: Buffer for request
+ * @length: Length of reply.
+ *
+ * Create a request and queue it on the given endpoint. This is useful as
+ * an internal method of sending replies to certain control requests, etc.
+ */
+static int s3c_hsotg_send_reply(struct s3c_hsotg *hsotg,
+ struct s3c_hsotg_ep *ep,
+ void *buff,
+ int length)
+{
+ struct usb_request *req;
+ int ret;
+
+ dev_dbg(hsotg->dev, "%s: buff %p, len %d\n", __func__, buff, length);
+
+ req = s3c_hsotg_ep_alloc_request(&ep->ep, GFP_ATOMIC);
+ hsotg->ep0_reply = req;
+ if (!req) {
+ dev_warn(hsotg->dev, "%s: cannot alloc req\n", __func__);
+ return -ENOMEM;
+ }
+
+ req->buf = hsotg->ep0_buff;
+ req->length = length;
+ req->zero = 1; /* always do zero-length final transfer */
+ req->complete = s3c_hsotg_complete_oursetup;
+
+ if (length)
+ memcpy(req->buf, buff, length);
+ else
+ ep->sent_zlp = 1;
+
+ ret = s3c_hsotg_ep_queue(&ep->ep, req, GFP_ATOMIC);
+ if (ret) {
+ dev_warn(hsotg->dev, "%s: cannot queue req\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * s3c_hsotg_process_req_status - process request GET_STATUS
+ * @hsotg: The device state
+ * @ctrl: USB control request
+ */
+static int s3c_hsotg_process_req_status(struct s3c_hsotg *hsotg,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
+ struct s3c_hsotg_ep *ep;
+ __le16 reply;
+ int ret;
+
+ dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__);
+
+ if (!ep0->dir_in) {
+ dev_warn(hsotg->dev, "%s: direction out?\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ reply = cpu_to_le16(0); /* bit 0 => self powered,
+ * bit 1 => remote wakeup */
+ break;
+
+ case USB_RECIP_INTERFACE:
+ /* currently, the data result should be zero */
+ reply = cpu_to_le16(0);
+ break;
+
+ case USB_RECIP_ENDPOINT:
+ ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));
+ if (!ep)
+ return -ENOENT;
+
+ reply = cpu_to_le16(ep->halted ? 1 : 0);
+ break;
+
+ default:
+ return 0;
+ }
+
+ if (le16_to_cpu(ctrl->wLength) != 2)
+ return -EINVAL;
+
+ ret = s3c_hsotg_send_reply(hsotg, ep0, &reply, 2);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to send reply\n", __func__);
+ return ret;
+ }
+
+ return 1;
+}
+
+static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value);
+
+/**
+ * s3c_hsotg_process_req_featire - process request {SET,CLEAR}_FEATURE
+ * @hsotg: The device state
+ * @ctrl: USB control request
+ */
+static int s3c_hsotg_process_req_feature(struct s3c_hsotg *hsotg,
+ struct usb_ctrlrequest *ctrl)
+{
+ bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
+ struct s3c_hsotg_ep *ep;
+
+ dev_dbg(hsotg->dev, "%s: %s_FEATURE\n",
+ __func__, set ? "SET" : "CLEAR");
+
+ if (ctrl->bRequestType == USB_RECIP_ENDPOINT) {
+ ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));
+ if (!ep) {
+ dev_dbg(hsotg->dev, "%s: no endpoint for 0x%04x\n",
+ __func__, le16_to_cpu(ctrl->wIndex));
+ return -ENOENT;
+ }
+
+ switch (le16_to_cpu(ctrl->wValue)) {
+ case USB_ENDPOINT_HALT:
+ s3c_hsotg_ep_sethalt(&ep->ep, set);
+ break;
+
+ default:
+ return -ENOENT;
+ }
+ } else
+ return -ENOENT; /* currently only deal with endpoint */
+
+ return 1;
+}
+
+/**
+ * s3c_hsotg_process_control - process a control request
+ * @hsotg: The device state
+ * @ctrl: The control request received
+ *
+ * The controller has received the SETUP phase of a control request, and
+ * needs to work out what to do next (and whether to pass it on to the
+ * gadget driver).
+ */
+static void s3c_hsotg_process_control(struct s3c_hsotg *hsotg,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
+ int ret = 0;
+ u32 dcfg;
+
+ ep0->sent_zlp = 0;
+
+ dev_dbg(hsotg->dev, "ctrl Req=%02x, Type=%02x, V=%04x, L=%04x\n",
+ ctrl->bRequest, ctrl->bRequestType,
+ ctrl->wValue, ctrl->wLength);
+
+ /* record the direction of the request, for later use when enquing
+ * packets onto EP0. */
+
+ ep0->dir_in = (ctrl->bRequestType & USB_DIR_IN) ? 1 : 0;
+ dev_dbg(hsotg->dev, "ctrl: dir_in=%d\n", ep0->dir_in);
+
+ /* if we've no data with this request, then the last part of the
+ * transaction is going to implicitly be IN. */
+ if (ctrl->wLength == 0)
+ ep0->dir_in = 1;
+
+ if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+ switch (ctrl->bRequest) {
+ case USB_REQ_SET_ADDRESS:
+ dcfg = readl(hsotg->regs + S3C_DCFG);
+ dcfg &= ~S3C_DCFG_DevAddr_MASK;
+ dcfg |= ctrl->wValue << S3C_DCFG_DevAddr_SHIFT;
+ writel(dcfg, hsotg->regs + S3C_DCFG);
+
+ dev_info(hsotg->dev, "new address %d\n", ctrl->wValue);
+
+ ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0);
+ return;
+
+ case USB_REQ_GET_STATUS:
+ ret = s3c_hsotg_process_req_status(hsotg, ctrl);
+ break;
+
+ case USB_REQ_CLEAR_FEATURE:
+ case USB_REQ_SET_FEATURE:
+ ret = s3c_hsotg_process_req_feature(hsotg, ctrl);
+ break;
+ }
+ }
+
+ /* as a fallback, try delivering it to the driver to deal with */
+
+ if (ret == 0 && hsotg->driver) {
+ ret = hsotg->driver->setup(&hsotg->gadget, ctrl);
+ if (ret < 0)
+ dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret);
+ }
+
+ if (ret > 0) {
+ if (!ep0->dir_in) {
+ /* need to generate zlp in reply or take data */
+ /* todo - deal with any data we might be sent? */
+ ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0);
+ }
+ }
+
+ /* the request is either unhandlable, or is not formatted correctly
+ * so respond with a STALL for the status stage to indicate failure.
+ */
+
+ if (ret < 0) {
+ u32 reg;
+ u32 ctrl;
+
+ dev_dbg(hsotg->dev, "ep0 stall (dir=%d)\n", ep0->dir_in);
+ reg = (ep0->dir_in) ? S3C_DIEPCTL0 : S3C_DOEPCTL0;
+
+ /* S3C_DxEPCTL_Stall will be cleared by EP once it has
+ * taken effect, so no need to clear later. */
+
+ ctrl = readl(hsotg->regs + reg);
+ ctrl |= S3C_DxEPCTL_Stall;
+ ctrl |= S3C_DxEPCTL_CNAK;
+ writel(ctrl, hsotg->regs + reg);
+
+ dev_dbg(hsotg->dev,
+ "writen DxEPCTL=0x%08x to %08x (DxEPCTL=0x%08x)\n",
+ ctrl, reg, readl(hsotg->regs + reg));
+
+ /* don't belive we need to anything more to get the EP
+ * to reply with a STALL packet */
+ }
+}
+
+static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg);
+
+/**
+ * s3c_hsotg_complete_setup - completion of a setup transfer
+ * @ep: The endpoint the request was on.
+ * @req: The request completed.
+ *
+ * Called on completion of any requests the driver itself submitted for
+ * EP0 setup packets
+ */
+static void s3c_hsotg_complete_setup(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct s3c_hsotg_ep *hs_ep = our_ep(ep);
+ struct s3c_hsotg *hsotg = hs_ep->parent;
+
+ if (req->status < 0) {
+ dev_dbg(hsotg->dev, "%s: failed %d\n", __func__, req->status);
+ return;
+ }
+
+ if (req->actual == 0)
+ s3c_hsotg_enqueue_setup(hsotg);
+ else
+ s3c_hsotg_process_control(hsotg, req->buf);
+}
+
+/**
+ * s3c_hsotg_enqueue_setup - start a request for EP0 packets
+ * @hsotg: The device state.
+ *
+ * Enqueue a request on EP0 if necessary to received any SETUP packets
+ * received from the host.
+ */
+static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg)
+{
+ struct usb_request *req = hsotg->ctrl_req;
+ struct s3c_hsotg_req *hs_req = our_req(req);
+ int ret;
+
+ dev_dbg(hsotg->dev, "%s: queueing setup request\n", __func__);
+
+ req->zero = 0;
+ req->length = 8;
+ req->buf = hsotg->ctrl_buff;
+ req->complete = s3c_hsotg_complete_setup;
+
+ if (!list_empty(&hs_req->queue)) {
+ dev_dbg(hsotg->dev, "%s already queued???\n", __func__);
+ return;
+ }
+
+ hsotg->eps[0].dir_in = 0;
+
+ ret = s3c_hsotg_ep_queue(&hsotg->eps[0].ep, req, GFP_ATOMIC);
+ if (ret < 0) {
+ dev_err(hsotg->dev, "%s: failed queue (%d)\n", __func__, ret);
+ /* Don't think there's much we can do other than watch the
+ * driver fail. */
+ }
+}
+
+/**
+ * get_ep_head - return the first request on the endpoint
+ * @hs_ep: The controller endpoint to get
+ *
+ * Get the first request on the endpoint.
+*/
+static struct s3c_hsotg_req *get_ep_head(struct s3c_hsotg_ep *hs_ep)
+{
+ if (list_empty(&hs_ep->queue))
+ return NULL;
+
+ return list_first_entry(&hs_ep->queue, struct s3c_hsotg_req, queue);
+}
+
+/**
+ * s3c_hsotg_complete_request - complete a request given to us
+ * @hsotg: The device state.
+ * @hs_ep: The endpoint the request was on.
+ * @hs_req: The request to complete.
+ * @result: The result code (0 => Ok, otherwise errno)
+ *
+ * The given request has finished, so call the necessary completion
+ * if it has one and then look to see if we can start a new request
+ * on the endpoint.
+ *
+ * Note, expects the ep to already be locked as appropriate.
+*/
+static void s3c_hsotg_complete_request(struct s3c_hsotg *hsotg,
+ struct s3c_hsotg_ep *hs_ep,
+ struct s3c_hsotg_req *hs_req,
+ int result)
+{
+ bool restart;
+
+ if (!hs_req) {
+ dev_dbg(hsotg->dev, "%s: nothing to complete?\n", __func__);
+ return;
+ }
+
+ dev_dbg(hsotg->dev, "complete: ep %p %s, req %p, %d => %p\n",
+ hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete);
+
+ /* only replace the status if we've not already set an error
+ * from a previous transaction */
+
+ if (hs_req->req.status == -EINPROGRESS)
+ hs_req->req.status = result;
+
+ hs_ep->req = NULL;
+ list_del_init(&hs_req->queue);
+
+ if (using_dma(hsotg))
+ s3c_hsotg_unmap_dma(hsotg, hs_ep, hs_req);
+
+ /* call the complete request with the locks off, just in case the
+ * request tries to queue more work for this endpoint. */
+
+ if (hs_req->req.complete) {
+ spin_unlock(&hs_ep->lock);
+ hs_req->req.complete(&hs_ep->ep, &hs_req->req);
+ spin_lock(&hs_ep->lock);
+ }
+
+ /* Look to see if there is anything else to do. Note, the completion
+ * of the previous request may have caused a new request to be started
+ * so be careful when doing this. */
+
+ if (!hs_ep->req && result >= 0) {
+ restart = !list_empty(&hs_ep->queue);
+ if (restart) {
+ hs_req = get_ep_head(hs_ep);
+ s3c_hsotg_start_req(hsotg, hs_ep, hs_req, false);
+ }
+ }
+}
+
+/**
+ * s3c_hsotg_complete_request_lock - complete a request given to us (locked)
+ * @hsotg: The device state.
+ * @hs_ep: The endpoint the request was on.
+ * @hs_req: The request to complete.
+ * @result: The result code (0 => Ok, otherwise errno)
+ *
+ * See s3c_hsotg_complete_request(), but called with the endpoint's
+ * lock held.
+*/
+static void s3c_hsotg_complete_request_lock(struct s3c_hsotg *hsotg,
+ struct s3c_hsotg_ep *hs_ep,
+ struct s3c_hsotg_req *hs_req,
+ int result)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hs_ep->lock, flags);
+ s3c_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
+ spin_unlock_irqrestore(&hs_ep->lock, flags);
+}
+
+/**
+ * s3c_hsotg_rx_data - receive data from the FIFO for an endpoint
+ * @hsotg: The device state.
+ * @ep_idx: The endpoint index for the data
+ * @size: The size of data in the fifo, in bytes
+ *
+ * The FIFO status shows there is data to read from the FIFO for a given
+ * endpoint, so sort out whether we need to read the data into a request
+ * that has been made for that endpoint.
+ */
+static void s3c_hsotg_rx_data(struct s3c_hsotg *hsotg, int ep_idx, int size)
+{
+ struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep_idx];
+ struct s3c_hsotg_req *hs_req = hs_ep->req;
+ void __iomem *fifo = hsotg->regs + S3C_EPFIFO(ep_idx);
+ int to_read;
+ int max_req;
+ int read_ptr;
+
+ if (!hs_req) {
+ u32 epctl = readl(hsotg->regs + S3C_DOEPCTL(ep_idx));
+ int ptr;
+
+ dev_warn(hsotg->dev,
+ "%s: FIFO %d bytes on ep%d but no req (DxEPCTl=0x%08x)\n",
+ __func__, size, ep_idx, epctl);
+
+ /* dump the data from the FIFO, we've nothing we can do */
+ for (ptr = 0; ptr < size; ptr += 4)
+ (void)readl(fifo);
+
+ return;
+ }
+
+ spin_lock(&hs_ep->lock);
+
+ to_read = size;
+ read_ptr = hs_req->req.actual;
+ max_req = hs_req->req.length - read_ptr;
+
+ if (to_read > max_req) {
+ /* more data appeared than we where willing
+ * to deal with in this request.
+ */
+
+ /* currently we don't deal this */
+ WARN_ON_ONCE(1);
+ }
+
+ dev_dbg(hsotg->dev, "%s: read %d/%d, done %d/%d\n",
+ __func__, to_read, max_req, read_ptr, hs_req->req.length);
+
+ hs_ep->total_data += to_read;
+ hs_req->req.actual += to_read;
+ to_read = DIV_ROUND_UP(to_read, 4);
+
+ /* note, we might over-write the buffer end by 3 bytes depending on
+ * alignment of the data. */
+ readsl(fifo, hs_req->req.buf + read_ptr, to_read);
+
+ spin_unlock(&hs_ep->lock);
+}
+
+/**
+ * s3c_hsotg_send_zlp - send zero-length packet on control endpoint
+ * @hsotg: The device instance
+ * @req: The request currently on this endpoint
+ *
+ * Generate a zero-length IN packet request for terminating a SETUP
+ * transaction.
+ *
+ * Note, since we don't write any data to the TxFIFO, then it is
+ * currently belived that we do not need to wait for any space in
+ * the TxFIFO.
+ */
+static void s3c_hsotg_send_zlp(struct s3c_hsotg *hsotg,
+ struct s3c_hsotg_req *req)
+{
+ u32 ctrl;
+
+ if (!req) {
+ dev_warn(hsotg->dev, "%s: no request?\n", __func__);
+ return;
+ }
+
+ if (req->req.length == 0) {
+ hsotg->eps[0].sent_zlp = 1;
+ s3c_hsotg_enqueue_setup(hsotg);
+ return;
+ }
+
+ hsotg->eps[0].dir_in = 1;
+ hsotg->eps[0].sent_zlp = 1;
+
+ dev_dbg(hsotg->dev, "sending zero-length packet\n");
+
+ /* issue a zero-sized packet to terminate this */
+ writel(S3C_DxEPTSIZ_MC(1) | S3C_DxEPTSIZ_PktCnt(1) |
+ S3C_DxEPTSIZ_XferSize(0), hsotg->regs + S3C_DIEPTSIZ(0));
+
+ ctrl = readl(hsotg->regs + S3C_DIEPCTL0);
+ ctrl |= S3C_DxEPCTL_CNAK; /* clear NAK set by core */
+ ctrl |= S3C_DxEPCTL_EPEna; /* ensure ep enabled */
+ ctrl |= S3C_DxEPCTL_USBActEp;
+ writel(ctrl, hsotg->regs + S3C_DIEPCTL0);
+}
+
+/**
+ * s3c_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO
+ * @hsotg: The device instance
+ * @epnum: The endpoint received from
+ * @was_setup: Set if processing a SetupDone event.
+ *
+ * The RXFIFO has delivered an OutDone event, which means that the data
+ * transfer for an OUT endpoint has been completed, either by a short
+ * packet or by the finish of a transfer.
+*/
+static void s3c_hsotg_handle_outdone(struct s3c_hsotg *hsotg,
+ int epnum, bool was_setup)
+{
+ struct s3c_hsotg_ep *hs_ep = &hsotg->eps[epnum];
+ struct s3c_hsotg_req *hs_req = hs_ep->req;
+ struct usb_request *req = &hs_req->req;
+ int result = 0;
+
+ if (!hs_req) {
+ dev_dbg(hsotg->dev, "%s: no request active\n", __func__);
+ return;
+ }
+
+ if (using_dma(hsotg)) {
+ u32 epsize = readl(hsotg->regs + S3C_DOEPTSIZ(epnum));
+ unsigned size_done;
+ unsigned size_left;
+
+ /* Calculate the size of the transfer by checking how much
+ * is left in the endpoint size register and then working it
+ * out from the amount we loaded for the transfer.
+ *
+ * We need to do this as DMA pointers are always 32bit aligned
+ * so may overshoot/undershoot the transfer.
+ */
+
+ size_left = S3C_DxEPTSIZ_XferSize_GET(epsize);
+
+ size_done = hs_ep->size_loaded - size_left;
+ size_done += hs_ep->last_load;
+
+ req->actual = size_done;
+ }
+
+ if (req->actual < req->length && req->short_not_ok) {
+ dev_dbg(hsotg->dev, "%s: got %d/%d (short not ok) => error\n",
+ __func__, req->actual, req->length);
+
+ /* todo - what should we return here? there's no one else
+ * even bothering to check the status. */
+ }
+
+ if (epnum == 0) {
+ if (!was_setup && req->complete != s3c_hsotg_complete_setup)
+ s3c_hsotg_send_zlp(hsotg, hs_req);
+ }
+
+ s3c_hsotg_complete_request_lock(hsotg, hs_ep, hs_req, result);
+}
+
+/**
+ * s3c_hsotg_read_frameno - read current frame number
+ * @hsotg: The device instance
+ *
+ * Return the current frame number
+*/
+static u32 s3c_hsotg_read_frameno(struct s3c_hsotg *hsotg)
+{
+ u32 dsts;
+
+ dsts = readl(hsotg->regs + S3C_DSTS);
+ dsts &= S3C_DSTS_SOFFN_MASK;
+ dsts >>= S3C_DSTS_SOFFN_SHIFT;
+
+ return dsts;
+}
+
+/**
+ * s3c_hsotg_handle_rx - RX FIFO has data
+ * @hsotg: The device instance
+ *
+ * The IRQ handler has detected that the RX FIFO has some data in it
+ * that requires processing, so find out what is in there and do the
+ * appropriate read.
+ *
+ * The RXFIFO is a true FIFO, the packets comming out are still in packet
+ * chunks, so if you have x packets received on an endpoint you'll get x
+ * FIFO events delivered, each with a packet's worth of data in it.
+ *
+ * When using DMA, we should not be processing events from the RXFIFO
+ * as the actual data should be sent to the memory directly and we turn
+ * on the completion interrupts to get notifications of transfer completion.
+ */
+void s3c_hsotg_handle_rx(struct s3c_hsotg *hsotg)
+{
+ u32 grxstsr = readl(hsotg->regs + S3C_GRXSTSP);
+ u32 epnum, status, size;
+
+ WARN_ON(using_dma(hsotg));
+
+ epnum = grxstsr & S3C_GRXSTS_EPNum_MASK;
+ status = grxstsr & S3C_GRXSTS_PktSts_MASK;
+
+ size = grxstsr & S3C_GRXSTS_ByteCnt_MASK;
+ size >>= S3C_GRXSTS_ByteCnt_SHIFT;
+
+ if (1)
+ dev_dbg(hsotg->dev, "%s: GRXSTSP=0x%08x (%d@%d)\n",
+ __func__, grxstsr, size, epnum);
+
+#define __status(x) ((x) >> S3C_GRXSTS_PktSts_SHIFT)
+
+ switch (status >> S3C_GRXSTS_PktSts_SHIFT) {
+ case __status(S3C_GRXSTS_PktSts_GlobalOutNAK):
+ dev_dbg(hsotg->dev, "GlobalOutNAK\n");
+ break;
+
+ case __status(S3C_GRXSTS_PktSts_OutDone):
+ dev_dbg(hsotg->dev, "OutDone (Frame=0x%08x)\n",
+ s3c_hsotg_read_frameno(hsotg));
+
+ if (!using_dma(hsotg))
+ s3c_hsotg_handle_outdone(hsotg, epnum, false);
+ break;
+
+ case __status(S3C_GRXSTS_PktSts_SetupDone):
+ dev_dbg(hsotg->dev,
+ "SetupDone (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
+ s3c_hsotg_read_frameno(hsotg),
+ readl(hsotg->regs + S3C_DOEPCTL(0)));
+
+ s3c_hsotg_handle_outdone(hsotg, epnum, true);
+ break;
+
+ case __status(S3C_GRXSTS_PktSts_OutRX):
+ s3c_hsotg_rx_data(hsotg, epnum, size);
+ break;
+
+ case __status(S3C_GRXSTS_PktSts_SetupRX):
+ dev_dbg(hsotg->dev,
+ "SetupRX (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
+ s3c_hsotg_read_frameno(hsotg),
+ readl(hsotg->regs + S3C_DOEPCTL(0)));
+
+ s3c_hsotg_rx_data(hsotg, epnum, size);
+ break;
+
+ default:
+ dev_warn(hsotg->dev, "%s: unknown status %08x\n",
+ __func__, grxstsr);
+
+ s3c_hsotg_dump(hsotg);
+ break;
+ }
+}
+
+/**
+ * s3c_hsotg_ep0_mps - turn max packet size into register setting
+ * @mps: The maximum packet size in bytes.
+*/
+static u32 s3c_hsotg_ep0_mps(unsigned int mps)
+{
+ switch (mps) {
+ case 64:
+ return S3C_D0EPCTL_MPS_64;
+ case 32:
+ return S3C_D0EPCTL_MPS_32;
+ case 16:
+ return S3C_D0EPCTL_MPS_16;
+ case 8:
+ return S3C_D0EPCTL_MPS_8;
+ }
+
+ /* bad max packet size, warn and return invalid result */
+ WARN_ON(1);
+ return (u32)-1;
+}
+
+/**
+ * s3c_hsotg_set_ep_maxpacket - set endpoint's max-packet field
+ * @hsotg: The driver state.
+ * @ep: The index number of the endpoint
+ * @mps: The maximum packet size in bytes
+ *
+ * Configure the maximum packet size for the given endpoint, updating
+ * the hardware control registers to reflect this.
+ */
+static void s3c_hsotg_set_ep_maxpacket(struct s3c_hsotg *hsotg,
+ unsigned int ep, unsigned int mps)
+{
+ struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep];
+ void __iomem *regs = hsotg->regs;
+ u32 mpsval;
+ u32 reg;
+
+ if (ep == 0) {
+ /* EP0 is a special case */
+ mpsval = s3c_hsotg_ep0_mps(mps);
+ if (mpsval > 3)
+ goto bad_mps;
+ } else {
+ if (mps >= S3C_DxEPCTL_MPS_LIMIT+1)
+ goto bad_mps;
+
+ mpsval = mps;
+ }
+
+ hs_ep->ep.maxpacket = mps;
+
+ /* update both the in and out endpoint controldir_ registers, even
+ * if one of the directions may not be in use. */
+
+ reg = readl(regs + S3C_DIEPCTL(ep));
+ reg &= ~S3C_DxEPCTL_MPS_MASK;
+ reg |= mpsval;
+ writel(reg, regs + S3C_DIEPCTL(ep));
+
+ reg = readl(regs + S3C_DOEPCTL(ep));
+ reg &= ~S3C_DxEPCTL_MPS_MASK;
+ reg |= mpsval;
+ writel(reg, regs + S3C_DOEPCTL(ep));
+
+ return;
+
+bad_mps:
+ dev_err(hsotg->dev, "ep%d: bad mps of %d\n", ep, mps);
+}
+
+
+/**
+ * s3c_hsotg_trytx - check to see if anything needs transmitting
+ * @hsotg: The driver state
+ * @hs_ep: The driver endpoint to check.
+ *
+ * Check to see if there is a request that has data to send, and if so
+ * make an attempt to write data into the FIFO.
+ */
+static int s3c_hsotg_trytx(struct s3c_hsotg *hsotg,
+ struct s3c_hsotg_ep *hs_ep)
+{
+ struct s3c_hsotg_req *hs_req = hs_ep->req;
+
+ if (!hs_ep->dir_in || !hs_req)
+ return 0;
+
+ if (hs_req->req.actual < hs_req->req.length) {
+ dev_dbg(hsotg->dev, "trying to write more for ep%d\n",
+ hs_ep->index);
+ return s3c_hsotg_write_fifo(hsotg, hs_ep, hs_req);
+ }
+
+ return 0;
+}
+
+/**
+ * s3c_hsotg_complete_in - complete IN transfer
+ * @hsotg: The device state.
+ * @hs_ep: The endpoint that has just completed.
+ *
+ * An IN transfer has been completed, update the transfer's state and then
+ * call the relevant completion routines.
+ */
+static void s3c_hsotg_complete_in(struct s3c_hsotg *hsotg,
+ struct s3c_hsotg_ep *hs_ep)
+{
+ struct s3c_hsotg_req *hs_req = hs_ep->req;
+ u32 epsize = readl(hsotg->regs + S3C_DIEPTSIZ(hs_ep->index));
+ int size_left, size_done;
+
+ if (!hs_req) {
+ dev_dbg(hsotg->dev, "XferCompl but no req\n");
+ return;
+ }
+
+ /* Calculate the size of the transfer by checking how much is left
+ * in the endpoint size register and then working it out from
+ * the amount we loaded for the transfer.
+ *
+ * We do this even for DMA, as the transfer may have incremented
+ * past the end of the buffer (DMA transfers are always 32bit
+ * aligned).
+ */
+
+ size_left = S3C_DxEPTSIZ_XferSize_GET(epsize);
+
+ size_done = hs_ep->size_loaded - size_left;
+ size_done += hs_ep->last_load;
+
+ if (hs_req->req.actual != size_done)
+ dev_dbg(hsotg->dev, "%s: adjusting size done %d => %d\n",
+ __func__, hs_req->req.actual, size_done);
+
+ hs_req->req.actual = size_done;
+
+ /* if we did all of the transfer, and there is more data left
+ * around, then try restarting the rest of the request */
+
+ if (!size_left && hs_req->req.actual < hs_req->req.length) {
+ dev_dbg(hsotg->dev, "%s trying more for req...\n", __func__);
+ s3c_hsotg_start_req(hsotg, hs_ep, hs_req, true);
+ } else
+ s3c_hsotg_complete_request_lock(hsotg, hs_ep, hs_req, 0);
+}
+
+/**
+ * s3c_hsotg_epint - handle an in/out endpoint interrupt
+ * @hsotg: The driver state
+ * @idx: The index for the endpoint (0..15)
+ * @dir_in: Set if this is an IN endpoint
+ *
+ * Process and clear any interrupt pending for an individual endpoint
+*/
+static void s3c_hsotg_epint(struct s3c_hsotg *hsotg, unsigned int idx,
+ int dir_in)
+{
+ struct s3c_hsotg_ep *hs_ep = &hsotg->eps[idx];
+ u32 epint_reg = dir_in ? S3C_DIEPINT(idx) : S3C_DOEPINT(idx);
+ u32 epctl_reg = dir_in ? S3C_DIEPCTL(idx) : S3C_DOEPCTL(idx);
+ u32 epsiz_reg = dir_in ? S3C_DIEPTSIZ(idx) : S3C_DOEPTSIZ(idx);
+ u32 ints;
+ u32 clear = 0;
+
+ ints = readl(hsotg->regs + epint_reg);
+
+ dev_dbg(hsotg->dev, "%s: ep%d(%s) DxEPINT=0x%08x\n",
+ __func__, idx, dir_in ? "in" : "out", ints);
+
+ if (ints & S3C_DxEPINT_XferCompl) {
+ dev_dbg(hsotg->dev,
+ "%s: XferCompl: DxEPCTL=0x%08x, DxEPTSIZ=%08x\n",
+ __func__, readl(hsotg->regs + epctl_reg),
+ readl(hsotg->regs + epsiz_reg));
+
+ /* we get OutDone from the FIFO, so we only need to look
+ * at completing IN requests here */
+ if (dir_in) {
+ s3c_hsotg_complete_in(hsotg, hs_ep);
+
+ if (idx == 0)
+ s3c_hsotg_enqueue_setup(hsotg);
+ } else if (using_dma(hsotg)) {
+ /* We're using DMA, we need to fire an OutDone here
+ * as we ignore the RXFIFO. */
+
+ s3c_hsotg_handle_outdone(hsotg, idx, false);
+ }
+
+ clear |= S3C_DxEPINT_XferCompl;
+ }
+
+ if (ints & S3C_DxEPINT_EPDisbld) {
+ dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__);
+ clear |= S3C_DxEPINT_EPDisbld;
+ }
+
+ if (ints & S3C_DxEPINT_AHBErr) {
+ dev_dbg(hsotg->dev, "%s: AHBErr\n", __func__);
+ clear |= S3C_DxEPINT_AHBErr;
+ }
+
+ if (ints & S3C_DxEPINT_Setup) { /* Setup or Timeout */
+ dev_dbg(hsotg->dev, "%s: Setup/Timeout\n", __func__);
+
+ if (using_dma(hsotg) && idx == 0) {
+ /* this is the notification we've received a
+ * setup packet. In non-DMA mode we'd get this
+ * from the RXFIFO, instead we need to process
+ * the setup here. */
+
+ if (dir_in)
+ WARN_ON_ONCE(1);
+ else
+ s3c_hsotg_handle_outdone(hsotg, 0, true);
+ }
+
+ clear |= S3C_DxEPINT_Setup;
+ }
+
+ if (ints & S3C_DxEPINT_Back2BackSetup) {
+ dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__);
+ clear |= S3C_DxEPINT_Back2BackSetup;
+ }
+
+ if (dir_in) {
+ /* not sure if this is important, but we'll clear it anyway
+ */
+ if (ints & S3C_DIEPMSK_INTknTXFEmpMsk) {
+ dev_dbg(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n",
+ __func__, idx);
+ clear |= S3C_DIEPMSK_INTknTXFEmpMsk;
+ }
+
+ /* this probably means something bad is happening */
+ if (ints & S3C_DIEPMSK_INTknEPMisMsk) {
+ dev_warn(hsotg->dev, "%s: ep%d: INTknEP\n",
+ __func__, idx);
+ clear |= S3C_DIEPMSK_INTknEPMisMsk;
+ }
+ }
+
+ writel(clear, hsotg->regs + epint_reg);
+}
+
+/**
+ * s3c_hsotg_irq_enumdone - Handle EnumDone interrupt (enumeration done)
+ * @hsotg: The device state.
+ *
+ * Handle updating the device settings after the enumeration phase has
+ * been completed.
+*/
+static void s3c_hsotg_irq_enumdone(struct s3c_hsotg *hsotg)
+{
+ u32 dsts = readl(hsotg->regs + S3C_DSTS);
+ int ep0_mps = 0, ep_mps;
+
+ /* This should signal the finish of the enumeration phase
+ * of the USB handshaking, so we should now know what rate
+ * we connected at. */
+
+ dev_dbg(hsotg->dev, "EnumDone (DSTS=0x%08x)\n", dsts);
+
+ /* note, since we're limited by the size of transfer on EP0, and
+ * it seems IN transfers must be a even number of packets we do
+ * not advertise a 64byte MPS on EP0. */
+
+ /* catch both EnumSpd_FS and EnumSpd_FS48 */
+ switch (dsts & S3C_DSTS_EnumSpd_MASK) {
+ case S3C_DSTS_EnumSpd_FS:
+ case S3C_DSTS_EnumSpd_FS48:
+ hsotg->gadget.speed = USB_SPEED_FULL;
+ dev_info(hsotg->dev, "new device is full-speed\n");
+
+ ep0_mps = EP0_MPS_LIMIT;
+ ep_mps = 64;
+ break;
+
+ case S3C_DSTS_EnumSpd_HS:
+ dev_info(hsotg->dev, "new device is high-speed\n");
+ hsotg->gadget.speed = USB_SPEED_HIGH;
+
+ ep0_mps = EP0_MPS_LIMIT;
+ ep_mps = 512;
+ break;
+
+ case S3C_DSTS_EnumSpd_LS:
+ hsotg->gadget.speed = USB_SPEED_LOW;
+ dev_info(hsotg->dev, "new device is low-speed\n");
+
+ /* note, we don't actually support LS in this driver at the
+ * moment, and the documentation seems to imply that it isn't
+ * supported by the PHYs on some of the devices.
+ */
+ break;
+ }
+
+ /* we should now know the maximum packet size for an
+ * endpoint, so set the endpoints to a default value. */
+
+ if (ep0_mps) {
+ int i;
+ s3c_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps);
+ for (i = 1; i < S3C_HSOTG_EPS; i++)
+ s3c_hsotg_set_ep_maxpacket(hsotg, i, ep_mps);
+ }
+
+ /* ensure after enumeration our EP0 is active */
+
+ s3c_hsotg_enqueue_setup(hsotg);
+
+ dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
+ readl(hsotg->regs + S3C_DIEPCTL0),
+ readl(hsotg->regs + S3C_DOEPCTL0));
+}
+
+/**
+ * kill_all_requests - remove all requests from the endpoint's queue
+ * @hsotg: The device state.
+ * @ep: The endpoint the requests may be on.
+ * @result: The result code to use.
+ * @force: Force removal of any current requests
+ *
+ * Go through the requests on the given endpoint and mark them
+ * completed with the given result code.
+ */
+static void kill_all_requests(struct s3c_hsotg *hsotg,
+ struct s3c_hsotg_ep *ep,
+ int result, bool force)
+{
+ struct s3c_hsotg_req *req, *treq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ep->lock, flags);
+
+ list_for_each_entry_safe(req, treq, &ep->queue, queue) {
+ /* currently, we can't do much about an already
+ * running request on an in endpoint */
+
+ if (ep->req == req && ep->dir_in && !force)
+ continue;
+
+ s3c_hsotg_complete_request(hsotg, ep, req,
+ result);
+ }
+
+ spin_unlock_irqrestore(&ep->lock, flags);
+}
+
+#define call_gadget(_hs, _entry) \
+ if ((_hs)->gadget.speed != USB_SPEED_UNKNOWN && \
+ (_hs)->driver && (_hs)->driver->_entry) \
+ (_hs)->driver->_entry(&(_hs)->gadget);
+
+/**
+ * s3c_hsotg_disconnect_irq - disconnect irq service
+ * @hsotg: The device state.
+ *
+ * A disconnect IRQ has been received, meaning that the host has
+ * lost contact with the bus. Remove all current transactions
+ * and signal the gadget driver that this has happened.
+*/
+static void s3c_hsotg_disconnect_irq(struct s3c_hsotg *hsotg)
+{
+ unsigned ep;
+
+ for (ep = 0; ep < S3C_HSOTG_EPS; ep++)
+ kill_all_requests(hsotg, &hsotg->eps[ep], -ESHUTDOWN, true);
+
+ call_gadget(hsotg, disconnect);
+}
+
+/**
+ * s3c_hsotg_irq_fifoempty - TX FIFO empty interrupt handler
+ * @hsotg: The device state:
+ * @periodic: True if this is a periodic FIFO interrupt
+ */
+static void s3c_hsotg_irq_fifoempty(struct s3c_hsotg *hsotg, bool periodic)
+{
+ struct s3c_hsotg_ep *ep;
+ int epno, ret;
+
+ /* look through for any more data to transmit */
+
+ for (epno = 0; epno < S3C_HSOTG_EPS; epno++) {
+ ep = &hsotg->eps[epno];
+
+ if (!ep->dir_in)
+ continue;
+
+ if ((periodic && !ep->periodic) ||
+ (!periodic && ep->periodic))
+ continue;
+
+ ret = s3c_hsotg_trytx(hsotg, ep);
+ if (ret < 0)
+ break;
+ }
+}
+
+static struct s3c_hsotg *our_hsotg;
+
+/* IRQ flags which will trigger a retry around the IRQ loop */
+#define IRQ_RETRY_MASK (S3C_GINTSTS_NPTxFEmp | \
+ S3C_GINTSTS_PTxFEmp | \
+ S3C_GINTSTS_RxFLvl)
+
+/**
+ * s3c_hsotg_irq - handle device interrupt
+ * @irq: The IRQ number triggered
+ * @pw: The pw value when registered the handler.
+ */
+static irqreturn_t s3c_hsotg_irq(int irq, void *pw)
+{
+ struct s3c_hsotg *hsotg = pw;
+ int retry_count = 8;
+ u32 gintsts;
+ u32 gintmsk;
+
+irq_retry:
+ gintsts = readl(hsotg->regs + S3C_GINTSTS);
+ gintmsk = readl(hsotg->regs + S3C_GINTMSK);
+
+ dev_dbg(hsotg->dev, "%s: %08x %08x (%08x) retry %d\n",
+ __func__, gintsts, gintsts & gintmsk, gintmsk, retry_count);
+
+ gintsts &= gintmsk;
+
+ if (gintsts & S3C_GINTSTS_OTGInt) {
+ u32 otgint = readl(hsotg->regs + S3C_GOTGINT);
+
+ dev_info(hsotg->dev, "OTGInt: %08x\n", otgint);
+
+ writel(otgint, hsotg->regs + S3C_GOTGINT);
+ writel(S3C_GINTSTS_OTGInt, hsotg->regs + S3C_GINTSTS);
+ }
+
+ if (gintsts & S3C_GINTSTS_DisconnInt) {
+ dev_dbg(hsotg->dev, "%s: DisconnInt\n", __func__);
+ writel(S3C_GINTSTS_DisconnInt, hsotg->regs + S3C_GINTSTS);
+
+ s3c_hsotg_disconnect_irq(hsotg);
+ }
+
+ if (gintsts & S3C_GINTSTS_SessReqInt) {
+ dev_dbg(hsotg->dev, "%s: SessReqInt\n", __func__);
+ writel(S3C_GINTSTS_SessReqInt, hsotg->regs + S3C_GINTSTS);
+ }
+
+ if (gintsts & S3C_GINTSTS_EnumDone) {
+ s3c_hsotg_irq_enumdone(hsotg);
+ writel(S3C_GINTSTS_EnumDone, hsotg->regs + S3C_GINTSTS);
+ }
+
+ if (gintsts & S3C_GINTSTS_ConIDStsChng) {
+ dev_dbg(hsotg->dev, "ConIDStsChg (DSTS=0x%08x, GOTCTL=%08x)\n",
+ readl(hsotg->regs + S3C_DSTS),
+ readl(hsotg->regs + S3C_GOTGCTL));
+
+ writel(S3C_GINTSTS_ConIDStsChng, hsotg->regs + S3C_GINTSTS);
+ }
+
+ if (gintsts & (S3C_GINTSTS_OEPInt | S3C_GINTSTS_IEPInt)) {
+ u32 daint = readl(hsotg->regs + S3C_DAINT);
+ u32 daint_out = daint >> S3C_DAINT_OutEP_SHIFT;
+ u32 daint_in = daint & ~(daint_out << S3C_DAINT_OutEP_SHIFT);
+ int ep;
+
+ dev_dbg(hsotg->dev, "%s: daint=%08x\n", __func__, daint);
+
+ for (ep = 0; ep < 15 && daint_out; ep++, daint_out >>= 1) {
+ if (daint_out & 1)
+ s3c_hsotg_epint(hsotg, ep, 0);
+ }
+
+ for (ep = 0; ep < 15 && daint_in; ep++, daint_in >>= 1) {
+ if (daint_in & 1)
+ s3c_hsotg_epint(hsotg, ep, 1);
+ }
+
+ writel(daint, hsotg->regs + S3C_DAINT);
+ writel(gintsts & (S3C_GINTSTS_OEPInt | S3C_GINTSTS_IEPInt),
+ hsotg->regs + S3C_GINTSTS);
+ }
+
+ if (gintsts & S3C_GINTSTS_USBRst) {
+ dev_info(hsotg->dev, "%s: USBRst\n", __func__);
+ dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n",
+ readl(hsotg->regs + S3C_GNPTXSTS));
+
+ kill_all_requests(hsotg, &hsotg->eps[0], -ECONNRESET, true);
+
+ /* it seems after a reset we can end up with a situation
+ * where the TXFIFO still has data in it... try flushing
+ * it to remove anything that may still be in it.
+ */
+
+ if (1) {
+ writel(S3C_GRSTCTL_TxFNum(0) | S3C_GRSTCTL_TxFFlsh,
+ hsotg->regs + S3C_GRSTCTL);
+
+ dev_info(hsotg->dev, "GNPTXSTS=%08x\n",
+ readl(hsotg->regs + S3C_GNPTXSTS));
+ }
+
+ s3c_hsotg_enqueue_setup(hsotg);
+
+ writel(S3C_GINTSTS_USBRst, hsotg->regs + S3C_GINTSTS);
+ }
+
+ /* check both FIFOs */
+
+ if (gintsts & S3C_GINTSTS_NPTxFEmp) {
+ dev_dbg(hsotg->dev, "NPTxFEmp\n");
+
+ /* Disable the interrupt to stop it happening again
+ * unless one of these endpoint routines decides that
+ * it needs re-enabling */
+
+ s3c_hsotg_disable_gsint(hsotg, S3C_GINTSTS_NPTxFEmp);
+ s3c_hsotg_irq_fifoempty(hsotg, false);
+
+ writel(S3C_GINTSTS_NPTxFEmp, hsotg->regs + S3C_GINTSTS);
+ }
+
+ if (gintsts & S3C_GINTSTS_PTxFEmp) {
+ dev_dbg(hsotg->dev, "PTxFEmp\n");
+
+ /* See note in S3C_GINTSTS_NPTxFEmp */
+
+ s3c_hsotg_disable_gsint(hsotg, S3C_GINTSTS_PTxFEmp);
+ s3c_hsotg_irq_fifoempty(hsotg, true);
+
+ writel(S3C_GINTSTS_PTxFEmp, hsotg->regs + S3C_GINTSTS);
+ }
+
+ if (gintsts & S3C_GINTSTS_RxFLvl) {
+ /* note, since GINTSTS_RxFLvl doubles as FIFO-not-empty,
+ * we need to retry s3c_hsotg_handle_rx if this is still
+ * set. */
+
+ s3c_hsotg_handle_rx(hsotg);
+ writel(S3C_GINTSTS_RxFLvl, hsotg->regs + S3C_GINTSTS);
+ }
+
+ if (gintsts & S3C_GINTSTS_ModeMis) {
+ dev_warn(hsotg->dev, "warning, mode mismatch triggered\n");
+ writel(S3C_GINTSTS_ModeMis, hsotg->regs + S3C_GINTSTS);
+ }
+
+ if (gintsts & S3C_GINTSTS_USBSusp) {
+ dev_info(hsotg->dev, "S3C_GINTSTS_USBSusp\n");
+ writel(S3C_GINTSTS_USBSusp, hsotg->regs + S3C_GINTSTS);
+
+ call_gadget(hsotg, suspend);
+ }
+
+ if (gintsts & S3C_GINTSTS_WkUpInt) {
+ dev_info(hsotg->dev, "S3C_GINTSTS_WkUpIn\n");
+ writel(S3C_GINTSTS_WkUpInt, hsotg->regs + S3C_GINTSTS);
+
+ call_gadget(hsotg, resume);
+ }
+
+ if (gintsts & S3C_GINTSTS_ErlySusp) {
+ dev_dbg(hsotg->dev, "S3C_GINTSTS_ErlySusp\n");
+ writel(S3C_GINTSTS_ErlySusp, hsotg->regs + S3C_GINTSTS);
+ }
+
+ /* these next two seem to crop-up occasionally causing the core
+ * to shutdown the USB transfer, so try clearing them and logging
+ * the occurence. */
+
+ if (gintsts & S3C_GINTSTS_GOUTNakEff) {
+ dev_info(hsotg->dev, "GOUTNakEff triggered\n");
+
+ s3c_hsotg_dump(hsotg);
+
+ writel(S3C_DCTL_CGOUTNak, hsotg->regs + S3C_DCTL);
+ writel(S3C_GINTSTS_GOUTNakEff, hsotg->regs + S3C_GINTSTS);
+ }
+
+ if (gintsts & S3C_GINTSTS_GINNakEff) {
+ dev_info(hsotg->dev, "GINNakEff triggered\n");
+
+ s3c_hsotg_dump(hsotg);
+
+ writel(S3C_DCTL_CGNPInNAK, hsotg->regs + S3C_DCTL);
+ writel(S3C_GINTSTS_GINNakEff, hsotg->regs + S3C_GINTSTS);
+ }
+
+ /* if we've had fifo events, we should try and go around the
+ * loop again to see if there's any point in returning yet. */
+
+ if (gintsts & IRQ_RETRY_MASK && --retry_count > 0)
+ goto irq_retry;
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * s3c_hsotg_ep_enable - enable the given endpoint
+ * @ep: The USB endpint to configure
+ * @desc: The USB endpoint descriptor to configure with.
+ *
+ * This is called from the USB gadget code's usb_ep_enable().
+*/
+static int s3c_hsotg_ep_enable(struct usb_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct s3c_hsotg_ep *hs_ep = our_ep(ep);
+ struct s3c_hsotg *hsotg = hs_ep->parent;
+ unsigned long flags;
+ int index = hs_ep->index;
+ u32 epctrl_reg;
+ u32 epctrl;
+ u32 mps;
+ int dir_in;
+
+ dev_dbg(hsotg->dev,
+ "%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n",
+ __func__, ep->name, desc->bEndpointAddress, desc->bmAttributes,
+ desc->wMaxPacketSize, desc->bInterval);
+
+ /* not to be called for EP0 */
+ WARN_ON(index == 0);
+
+ dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0;
+ if (dir_in != hs_ep->dir_in) {
+ dev_err(hsotg->dev, "%s: direction mismatch!\n", __func__);
+ return -EINVAL;
+ }
+
+ mps = le16_to_cpu(desc->wMaxPacketSize);
+
+ /* note, we handle this here instead of s3c_hsotg_set_ep_maxpacket */
+
+ epctrl_reg = dir_in ? S3C_DIEPCTL(index) : S3C_DOEPCTL(index);
+ epctrl = readl(hsotg->regs + epctrl_reg);
+
+ dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n",
+ __func__, epctrl, epctrl_reg);
+
+ spin_lock_irqsave(&hs_ep->lock, flags);
+
+ epctrl &= ~(S3C_DxEPCTL_EPType_MASK | S3C_DxEPCTL_MPS_MASK);
+ epctrl |= S3C_DxEPCTL_MPS(mps);
+
+ /* mark the endpoint as active, otherwise the core may ignore
+ * transactions entirely for this endpoint */
+ epctrl |= S3C_DxEPCTL_USBActEp;
+
+ /* set the NAK status on the endpoint, otherwise we might try and
+ * do something with data that we've yet got a request to process
+ * since the RXFIFO will take data for an endpoint even if the
+ * size register hasn't been set.
+ */
+
+ epctrl |= S3C_DxEPCTL_SNAK;
+
+ /* update the endpoint state */
+ hs_ep->ep.maxpacket = mps;
+
+ /* default, set to non-periodic */
+ hs_ep->periodic = 0;
+
+ switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+ case USB_ENDPOINT_XFER_ISOC:
+ dev_err(hsotg->dev, "no current ISOC support\n");
+ return -EINVAL;
+
+ case USB_ENDPOINT_XFER_BULK:
+ epctrl |= S3C_DxEPCTL_EPType_Bulk;
+ break;
+
+ case USB_ENDPOINT_XFER_INT:
+ if (dir_in) {
+ /* Allocate our TxFNum by simply using the index
+ * of the endpoint for the moment. We could do
+ * something better if the host indicates how
+ * many FIFOs we are expecting to use. */
+
+ hs_ep->periodic = 1;
+ epctrl |= S3C_DxEPCTL_TxFNum(index);
+ }
+
+ epctrl |= S3C_DxEPCTL_EPType_Intterupt;
+ break;
+
+ case USB_ENDPOINT_XFER_CONTROL:
+ epctrl |= S3C_DxEPCTL_EPType_Control;
+ break;
+ }
+
+ /* for non control endpoints, set PID to D0 */
+ if (index)
+ epctrl |= S3C_DxEPCTL_SetD0PID;
+
+ dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n",
+ __func__, epctrl);
+
+ writel(epctrl, hsotg->regs + epctrl_reg);
+ dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x\n",
+ __func__, readl(hsotg->regs + epctrl_reg));
+
+ /* enable the endpoint interrupt */
+ s3c_hsotg_ctrl_epint(hsotg, index, dir_in, 1);
+
+ spin_unlock_irqrestore(&hs_ep->lock, flags);
+ return 0;
+}
+
+static int s3c_hsotg_ep_disable(struct usb_ep *ep)
+{
+ struct s3c_hsotg_ep *hs_ep = our_ep(ep);
+ struct s3c_hsotg *hsotg = hs_ep->parent;
+ int dir_in = hs_ep->dir_in;
+ int index = hs_ep->index;
+ unsigned long flags;
+ u32 epctrl_reg;
+ u32 ctrl;
+
+ dev_info(hsotg->dev, "%s(ep %p)\n", __func__, ep);
+
+ if (ep == &hsotg->eps[0].ep) {
+ dev_err(hsotg->dev, "%s: called for ep0\n", __func__);
+ return -EINVAL;
+ }
+
+ epctrl_reg = dir_in ? S3C_DIEPCTL(index) : S3C_DOEPCTL(index);
+
+ /* terminate all requests with shutdown */
+ kill_all_requests(hsotg, hs_ep, -ESHUTDOWN, false);
+
+ spin_lock_irqsave(&hs_ep->lock, flags);
+
+ ctrl = readl(hsotg->regs + epctrl_reg);
+ ctrl &= ~S3C_DxEPCTL_EPEna;
+ ctrl &= ~S3C_DxEPCTL_USBActEp;
+ ctrl |= S3C_DxEPCTL_SNAK;
+
+ dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
+ writel(ctrl, hsotg->regs + epctrl_reg);
+
+ /* disable endpoint interrupts */
+ s3c_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 0);
+
+ spin_unlock_irqrestore(&hs_ep->lock, flags);
+ return 0;
+}
+
+/**
+ * on_list - check request is on the given endpoint
+ * @ep: The endpoint to check.
+ * @test: The request to test if it is on the endpoint.
+*/
+static bool on_list(struct s3c_hsotg_ep *ep, struct s3c_hsotg_req *test)
+{
+ struct s3c_hsotg_req *req, *treq;
+
+ list_for_each_entry_safe(req, treq, &ep->queue, queue) {
+ if (req == test)
+ return true;
+ }
+
+ return false;
+}
+
+static int s3c_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
+{
+ struct s3c_hsotg_req *hs_req = our_req(req);
+ struct s3c_hsotg_ep *hs_ep = our_ep(ep);
+ struct s3c_hsotg *hs = hs_ep->parent;
+ unsigned long flags;
+
+ dev_info(hs->dev, "ep_dequeue(%p,%p)\n", ep, req);
+
+ if (hs_req == hs_ep->req) {
+ dev_dbg(hs->dev, "%s: already in progress\n", __func__);
+ return -EINPROGRESS;
+ }
+
+ spin_lock_irqsave(&hs_ep->lock, flags);
+
+ if (!on_list(hs_ep, hs_req)) {
+ spin_unlock_irqrestore(&hs_ep->lock, flags);
+ return -EINVAL;
+ }
+
+ s3c_hsotg_complete_request(hs, hs_ep, hs_req, -ECONNRESET);
+ spin_unlock_irqrestore(&hs_ep->lock, flags);
+
+ return 0;
+}
+
+static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value)
+{
+ struct s3c_hsotg_ep *hs_ep = our_ep(ep);
+ struct s3c_hsotg *hs = hs_ep->parent;
+ int index = hs_ep->index;
+ unsigned long irqflags;
+ u32 epreg;
+ u32 epctl;
+
+ dev_info(hs->dev, "%s(ep %p %s, %d)\n", __func__, ep, ep->name, value);
+
+ spin_lock_irqsave(&hs_ep->lock, irqflags);
+
+ /* write both IN and OUT control registers */
+
+ epreg = S3C_DIEPCTL(index);
+ epctl = readl(hs->regs + epreg);
+
+ if (value)
+ epctl |= S3C_DxEPCTL_Stall;
+ else
+ epctl &= ~S3C_DxEPCTL_Stall;
+
+ writel(epctl, hs->regs + epreg);
+
+ epreg = S3C_DOEPCTL(index);
+ epctl = readl(hs->regs + epreg);
+
+ if (value)
+ epctl |= S3C_DxEPCTL_Stall;
+ else
+ epctl &= ~S3C_DxEPCTL_Stall;
+
+ writel(epctl, hs->regs + epreg);
+
+ spin_unlock_irqrestore(&hs_ep->lock, irqflags);
+
+ return 0;
+}
+
+static struct usb_ep_ops s3c_hsotg_ep_ops = {
+ .enable = s3c_hsotg_ep_enable,
+ .disable = s3c_hsotg_ep_disable,
+ .alloc_request = s3c_hsotg_ep_alloc_request,
+ .free_request = s3c_hsotg_ep_free_request,
+ .queue = s3c_hsotg_ep_queue,
+ .dequeue = s3c_hsotg_ep_dequeue,
+ .set_halt = s3c_hsotg_ep_sethalt,
+ /* note, don't belive we have any call for the fifo routines */
+};
+
+/**
+ * s3c_hsotg_corereset - issue softreset to the core
+ * @hsotg: The device state
+ *
+ * Issue a soft reset to the core, and await the core finishing it.
+*/
+static int s3c_hsotg_corereset(struct s3c_hsotg *hsotg)
+{
+ int timeout;
+ u32 grstctl;
+
+ dev_dbg(hsotg->dev, "resetting core\n");
+
+ /* issue soft reset */
+ writel(S3C_GRSTCTL_CSftRst, hsotg->regs + S3C_GRSTCTL);
+
+ timeout = 1000;
+ do {
+ grstctl = readl(hsotg->regs + S3C_GRSTCTL);
+ } while (!(grstctl & S3C_GRSTCTL_CSftRst) && timeout-- > 0);
+
+ if (!grstctl & S3C_GRSTCTL_CSftRst) {
+ dev_err(hsotg->dev, "Failed to get CSftRst asserted\n");
+ return -EINVAL;
+ }
+
+ timeout = 1000;
+
+ while (1) {
+ u32 grstctl = readl(hsotg->regs + S3C_GRSTCTL);
+
+ if (timeout-- < 0) {
+ dev_info(hsotg->dev,
+ "%s: reset failed, GRSTCTL=%08x\n",
+ __func__, grstctl);
+ return -ETIMEDOUT;
+ }
+
+ if (grstctl & S3C_GRSTCTL_CSftRst)
+ continue;
+
+ if (!(grstctl & S3C_GRSTCTL_AHBIdle))
+ continue;
+
+ break; /* reset done */
+ }
+
+ dev_dbg(hsotg->dev, "reset successful\n");
+ return 0;
+}
+
+int usb_gadget_register_driver(struct usb_gadget_driver *driver)
+{
+ struct s3c_hsotg *hsotg = our_hsotg;
+ int ret;
+
+ if (!hsotg) {
+ printk(KERN_ERR "%s: called with no device\n", __func__);
+ return -ENODEV;
+ }
+
+ if (!driver) {
+ dev_err(hsotg->dev, "%s: no driver\n", __func__);
+ return -EINVAL;
+ }
+
+ if (driver->speed != USB_SPEED_HIGH &&
+ driver->speed != USB_SPEED_FULL) {
+ dev_err(hsotg->dev, "%s: bad speed\n", __func__);
+ }
+
+ if (!driver->bind || !driver->setup) {
+ dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
+ return -EINVAL;
+ }
+
+ WARN_ON(hsotg->driver);
+
+ driver->driver.bus = NULL;
+ hsotg->driver = driver;
+ hsotg->gadget.dev.driver = &driver->driver;
+ hsotg->gadget.dev.dma_mask = hsotg->dev->dma_mask;
+ hsotg->gadget.speed = USB_SPEED_UNKNOWN;
+
+ ret = device_add(&hsotg->gadget.dev);
+ if (ret) {
+ dev_err(hsotg->dev, "failed to register gadget device\n");
+ goto err;
+ }
+
+ ret = driver->bind(&hsotg->gadget);
+ if (ret) {
+ dev_err(hsotg->dev, "failed bind %s\n", driver->driver.name);
+
+ hsotg->gadget.dev.driver = NULL;
+ hsotg->driver = NULL;
+ goto err;
+ }
+
+ /* we must now enable ep0 ready for host detection and then
+ * set configuration. */
+
+ s3c_hsotg_corereset(hsotg);
+
+ /* set the PLL on, remove the HNP/SRP and set the PHY */
+ writel(S3C_GUSBCFG_PHYIf16 | S3C_GUSBCFG_TOutCal(7) |
+ (0x5 << 10), hsotg->regs + S3C_GUSBCFG);
+
+ /* looks like soft-reset changes state of FIFOs */
+ s3c_hsotg_init_fifo(hsotg);
+
+ __orr32(hsotg->regs + S3C_DCTL, S3C_DCTL_SftDiscon);
+
+ writel(1 << 18 | S3C_DCFG_DevSpd_HS, hsotg->regs + S3C_DCFG);
+
+ writel(S3C_GINTSTS_DisconnInt | S3C_GINTSTS_SessReqInt |
+ S3C_GINTSTS_ConIDStsChng | S3C_GINTSTS_USBRst |
+ S3C_GINTSTS_EnumDone | S3C_GINTSTS_OTGInt |
+ S3C_GINTSTS_USBSusp | S3C_GINTSTS_WkUpInt |
+ S3C_GINTSTS_GOUTNakEff | S3C_GINTSTS_GINNakEff |
+ S3C_GINTSTS_ErlySusp,
+ hsotg->regs + S3C_GINTMSK);
+
+ if (using_dma(hsotg))
+ writel(S3C_GAHBCFG_GlblIntrEn | S3C_GAHBCFG_DMAEn |
+ S3C_GAHBCFG_HBstLen_Incr4,
+ hsotg->regs + S3C_GAHBCFG);
+ else
+ writel(S3C_GAHBCFG_GlblIntrEn, hsotg->regs + S3C_GAHBCFG);
+
+ /* Enabling INTknTXFEmpMsk here seems to be a big mistake, we end
+ * up being flooded with interrupts if the host is polling the
+ * endpoint to try and read data. */
+
+ writel(S3C_DIEPMSK_TimeOUTMsk | S3C_DIEPMSK_AHBErrMsk |
+ S3C_DIEPMSK_INTknEPMisMsk |
+ S3C_DIEPMSK_EPDisbldMsk | S3C_DIEPMSK_XferComplMsk,
+ hsotg->regs + S3C_DIEPMSK);
+
+ /* don't need XferCompl, we get that from RXFIFO in slave mode. In
+ * DMA mode we may need this. */
+ writel(S3C_DOEPMSK_SetupMsk | S3C_DOEPMSK_AHBErrMsk |
+ S3C_DOEPMSK_EPDisbldMsk |
+ using_dma(hsotg) ? (S3C_DIEPMSK_XferComplMsk |
+ S3C_DIEPMSK_TimeOUTMsk) : 0,
+ hsotg->regs + S3C_DOEPMSK);
+
+ writel(0, hsotg->regs + S3C_DAINTMSK);
+
+ dev_info(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
+ readl(hsotg->regs + S3C_DIEPCTL0),
+ readl(hsotg->regs + S3C_DOEPCTL0));
+
+ /* enable in and out endpoint interrupts */
+ s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_OEPInt | S3C_GINTSTS_IEPInt);
+
+ /* Enable the RXFIFO when in slave mode, as this is how we collect
+ * the data. In DMA mode, we get events from the FIFO but also
+ * things we cannot process, so do not use it. */
+ if (!using_dma(hsotg))
+ s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_RxFLvl);
+
+ /* Enable interrupts for EP0 in and out */
+ s3c_hsotg_ctrl_epint(hsotg, 0, 0, 1);
+ s3c_hsotg_ctrl_epint(hsotg, 0, 1, 1);
+
+ __orr32(hsotg->regs + S3C_DCTL, S3C_DCTL_PWROnPrgDone);
+ udelay(10); /* see openiboot */
+ __bic32(hsotg->regs + S3C_DCTL, S3C_DCTL_PWROnPrgDone);
+
+ dev_info(hsotg->dev, "DCTL=0x%08x\n", readl(hsotg->regs + S3C_DCTL));
+
+ /* S3C_DxEPCTL_USBActEp says RO in manual, but seems to be set by
+ writing to the EPCTL register.. */
+
+ /* set to read 1 8byte packet */
+ writel(S3C_DxEPTSIZ_MC(1) | S3C_DxEPTSIZ_PktCnt(1) |
+ S3C_DxEPTSIZ_XferSize(8), hsotg->regs + DOEPTSIZ0);
+
+ writel(s3c_hsotg_ep0_mps(hsotg->eps[0].ep.maxpacket) |
+ S3C_DxEPCTL_CNAK | S3C_DxEPCTL_EPEna |
+ S3C_DxEPCTL_USBActEp,
+ hsotg->regs + S3C_DOEPCTL0);
+
+ /* enable, but don't activate EP0in */
+ writel(s3c_hsotg_ep0_mps(hsotg->eps[0].ep.maxpacket) |
+ S3C_DxEPCTL_USBActEp, hsotg->regs + S3C_DIEPCTL0);
+
+ s3c_hsotg_enqueue_setup(hsotg);
+
+ dev_info(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
+ readl(hsotg->regs + S3C_DIEPCTL0),
+ readl(hsotg->regs + S3C_DOEPCTL0));
+
+ /* clear global NAKs */
+ writel(S3C_DCTL_CGOUTNak | S3C_DCTL_CGNPInNAK,
+ hsotg->regs + S3C_DCTL);
+
+ /* remove the soft-disconnect and let's go */
+ __bic32(hsotg->regs + S3C_DCTL, S3C_DCTL_SftDiscon);
+
+ /* report to the user, and return */
+
+ dev_info(hsotg->dev, "bound driver %s\n", driver->driver.name);
+ return 0;
+
+err:
+ hsotg->driver = NULL;
+ hsotg->gadget.dev.driver = NULL;
+ return ret;
+}
+
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+ struct s3c_hsotg *hsotg = our_hsotg;
+ int ep;
+
+ if (!hsotg)
+ return -ENODEV;
+
+ if (!driver || driver != hsotg->driver || !driver->unbind)
+ return -EINVAL;
+
+ /* all endpoints should be shutdown */
+ for (ep = 0; ep < S3C_HSOTG_EPS; ep++)
+ s3c_hsotg_ep_disable(&hsotg->eps[ep].ep);
+
+ call_gadget(hsotg, disconnect);
+
+ driver->unbind(&hsotg->gadget);
+ hsotg->driver = NULL;
+ hsotg->gadget.speed = USB_SPEED_UNKNOWN;
+
+ device_del(&hsotg->gadget.dev);
+
+ dev_info(hsotg->dev, "unregistered gadget driver '%s'\n",
+ driver->driver.name);
+
+ return 0;
+}
+EXPORT_SYMBOL(usb_gadget_unregister_driver);
+
+static int s3c_hsotg_gadget_getframe(struct usb_gadget *gadget)
+{
+ return s3c_hsotg_read_frameno(to_hsotg(gadget));
+}
+
+static struct usb_gadget_ops s3c_hsotg_gadget_ops = {
+ .get_frame = s3c_hsotg_gadget_getframe,
+};
+
+/**
+ * s3c_hsotg_initep - initialise a single endpoint
+ * @hsotg: The device state.
+ * @hs_ep: The endpoint to be initialised.
+ * @epnum: The endpoint number
+ *
+ * Initialise the given endpoint (as part of the probe and device state
+ * creation) to give to the gadget driver. Setup the endpoint name, any
+ * direction information and other state that may be required.
+ */
+static void __devinit s3c_hsotg_initep(struct s3c_hsotg *hsotg,
+ struct s3c_hsotg_ep *hs_ep,
+ int epnum)
+{
+ u32 ptxfifo;
+ char *dir;
+
+ if (epnum == 0)
+ dir = "";
+ else if ((epnum % 2) == 0) {
+ dir = "out";
+ } else {
+ dir = "in";
+ hs_ep->dir_in = 1;
+ }
+
+ hs_ep->index = epnum;
+
+ snprintf(hs_ep->name, sizeof(hs_ep->name), "ep%d%s", epnum, dir);
+
+ INIT_LIST_HEAD(&hs_ep->queue);
+ INIT_LIST_HEAD(&hs_ep->ep.ep_list);
+
+ spin_lock_init(&hs_ep->lock);
+
+ /* add to the list of endpoints known by the gadget driver */
+ if (epnum)
+ list_add_tail(&hs_ep->ep.ep_list, &hsotg->gadget.ep_list);
+
+ hs_ep->parent = hsotg;
+ hs_ep->ep.name = hs_ep->name;
+ hs_ep->ep.maxpacket = epnum ? 512 : EP0_MPS_LIMIT;
+ hs_ep->ep.ops = &s3c_hsotg_ep_ops;
+
+ /* Read the FIFO size for the Periodic TX FIFO, even if we're
+ * an OUT endpoint, we may as well do this if in future the
+ * code is changed to make each endpoint's direction changeable.
+ */
+
+ ptxfifo = readl(hsotg->regs + S3C_DPTXFSIZn(epnum));
+ hs_ep->fifo_size = S3C_DPTXFSIZn_DPTxFSize_GET(ptxfifo);
+
+ /* if we're using dma, we need to set the next-endpoint pointer
+ * to be something valid.
+ */
+
+ if (using_dma(hsotg)) {
+ u32 next = S3C_DxEPCTL_NextEp((epnum + 1) % 15);
+ writel(next, hsotg->regs + S3C_DIEPCTL(epnum));
+ writel(next, hsotg->regs + S3C_DOEPCTL(epnum));
+ }
+}
+
+/**
+ * s3c_hsotg_otgreset - reset the OtG phy block
+ * @hsotg: The host state.
+ *
+ * Power up the phy, set the basic configuration and start the PHY.
+ */
+static void s3c_hsotg_otgreset(struct s3c_hsotg *hsotg)
+{
+ u32 osc;
+
+ writel(0, S3C_PHYPWR);
+ mdelay(1);
+
+ osc = hsotg->plat->is_osc ? S3C_PHYCLK_EXT_OSC : 0;
+
+ writel(osc | 0x10, S3C_PHYCLK);
+
+ /* issue a full set of resets to the otg and core */
+
+ writel(S3C_RSTCON_PHY, S3C_RSTCON);
+ udelay(20); /* at-least 10uS */
+ writel(0, S3C_RSTCON);
+}
+
+
+static void s3c_hsotg_init(struct s3c_hsotg *hsotg)
+{
+ /* unmask subset of endpoint interrupts */
+
+ writel(S3C_DIEPMSK_TimeOUTMsk | S3C_DIEPMSK_AHBErrMsk |
+ S3C_DIEPMSK_EPDisbldMsk | S3C_DIEPMSK_XferComplMsk,
+ hsotg->regs + S3C_DIEPMSK);
+
+ writel(S3C_DOEPMSK_SetupMsk | S3C_DOEPMSK_AHBErrMsk |
+ S3C_DOEPMSK_EPDisbldMsk | S3C_DOEPMSK_XferComplMsk,
+ hsotg->regs + S3C_DOEPMSK);
+
+ writel(0, hsotg->regs + S3C_DAINTMSK);
+
+ if (0) {
+ /* post global nak until we're ready */
+ writel(S3C_DCTL_SGNPInNAK | S3C_DCTL_SGOUTNak,
+ hsotg->regs + S3C_DCTL);
+ }
+
+ /* setup fifos */
+
+ dev_info(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
+ readl(hsotg->regs + S3C_GRXFSIZ),
+ readl(hsotg->regs + S3C_GNPTXFSIZ));
+
+ s3c_hsotg_init_fifo(hsotg);
+
+ /* set the PLL on, remove the HNP/SRP and set the PHY */
+ writel(S3C_GUSBCFG_PHYIf16 | S3C_GUSBCFG_TOutCal(7) | (0x5 << 10),
+ hsotg->regs + S3C_GUSBCFG);
+
+ writel(using_dma(hsotg) ? S3C_GAHBCFG_DMAEn : 0x0,
+ hsotg->regs + S3C_GAHBCFG);
+}
+
+static void s3c_hsotg_dump(struct s3c_hsotg *hsotg)
+{
+ struct device *dev = hsotg->dev;
+ void __iomem *regs = hsotg->regs;
+ u32 val;
+ int idx;
+
+ dev_info(dev, "DCFG=0x%08x, DCTL=0x%08x, DIEPMSK=%08x\n",
+ readl(regs + S3C_DCFG), readl(regs + S3C_DCTL),
+ readl(regs + S3C_DIEPMSK));
+
+ dev_info(dev, "GAHBCFG=0x%08x, 0x44=0x%08x\n",
+ readl(regs + S3C_GAHBCFG), readl(regs + 0x44));
+
+ dev_info(dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
+ readl(regs + S3C_GRXFSIZ), readl(regs + S3C_GNPTXFSIZ));
+
+ /* show periodic fifo settings */
+
+ for (idx = 1; idx <= 15; idx++) {
+ val = readl(regs + S3C_DPTXFSIZn(idx));
+ dev_info(dev, "DPTx[%d] FSize=%d, StAddr=0x%08x\n", idx,
+ val >> S3C_DPTXFSIZn_DPTxFSize_SHIFT,
+ val & S3C_DPTXFSIZn_DPTxFStAddr_MASK);
+ }
+
+ for (idx = 0; idx < 15; idx++) {
+ dev_info(dev,
+ "ep%d-in: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx,
+ readl(regs + S3C_DIEPCTL(idx)),
+ readl(regs + S3C_DIEPTSIZ(idx)),
+ readl(regs + S3C_DIEPDMA(idx)));
+
+ val = readl(regs + S3C_DOEPCTL(idx));
+ dev_info(dev,
+ "ep%d-out: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n",
+ idx, readl(regs + S3C_DOEPCTL(idx)),
+ readl(regs + S3C_DOEPTSIZ(idx)),
+ readl(regs + S3C_DOEPDMA(idx)));
+
+ }
+
+ dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n",
+ readl(regs + S3C_DVBUSDIS), readl(regs + S3C_DVBUSPULSE));
+}
+
+
+/**
+ * state_show - debugfs: show overall driver and device state.
+ * @seq: The seq file to write to.
+ * @v: Unused parameter.
+ *
+ * This debugfs entry shows the overall state of the hardware and
+ * some general information about each of the endpoints available
+ * to the system.
+ */
+static int state_show(struct seq_file *seq, void *v)
+{
+ struct s3c_hsotg *hsotg = seq->private;
+ void __iomem *regs = hsotg->regs;
+ int idx;
+
+ seq_printf(seq, "DCFG=0x%08x, DCTL=0x%08x, DSTS=0x%08x\n",
+ readl(regs + S3C_DCFG),
+ readl(regs + S3C_DCTL),
+ readl(regs + S3C_DSTS));
+
+ seq_printf(seq, "DIEPMSK=0x%08x, DOEPMASK=0x%08x\n",
+ readl(regs + S3C_DIEPMSK), readl(regs + S3C_DOEPMSK));
+
+ seq_printf(seq, "GINTMSK=0x%08x, GINTSTS=0x%08x\n",
+ readl(regs + S3C_GINTMSK),
+ readl(regs + S3C_GINTSTS));
+
+ seq_printf(seq, "DAINTMSK=0x%08x, DAINT=0x%08x\n",
+ readl(regs + S3C_DAINTMSK),
+ readl(regs + S3C_DAINT));
+
+ seq_printf(seq, "GNPTXSTS=0x%08x, GRXSTSR=%08x\n",
+ readl(regs + S3C_GNPTXSTS),
+ readl(regs + S3C_GRXSTSR));
+
+ seq_printf(seq, "\nEndpoint status:\n");
+
+ for (idx = 0; idx < 15; idx++) {
+ u32 in, out;
+
+ in = readl(regs + S3C_DIEPCTL(idx));
+ out = readl(regs + S3C_DOEPCTL(idx));
+
+ seq_printf(seq, "ep%d: DIEPCTL=0x%08x, DOEPCTL=0x%08x",
+ idx, in, out);
+
+ in = readl(regs + S3C_DIEPTSIZ(idx));
+ out = readl(regs + S3C_DOEPTSIZ(idx));
+
+ seq_printf(seq, ", DIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x",
+ in, out);
+
+ seq_printf(seq, "\n");
+ }
+
+ return 0;
+}
+
+static int state_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, state_show, inode->i_private);
+}
+
+static const struct file_operations state_fops = {
+ .owner = THIS_MODULE,
+ .open = state_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/**
+ * fifo_show - debugfs: show the fifo information
+ * @seq: The seq_file to write data to.
+ * @v: Unused parameter.
+ *
+ * Show the FIFO information for the overall fifo and all the
+ * periodic transmission FIFOs.
+*/
+static int fifo_show(struct seq_file *seq, void *v)
+{
+ struct s3c_hsotg *hsotg = seq->private;
+ void __iomem *regs = hsotg->regs;
+ u32 val;
+ int idx;
+
+ seq_printf(seq, "Non-periodic FIFOs:\n");
+ seq_printf(seq, "RXFIFO: Size %d\n", readl(regs + S3C_GRXFSIZ));
+
+ val = readl(regs + S3C_GNPTXFSIZ);
+ seq_printf(seq, "NPTXFIFO: Size %d, Start 0x%08x\n",
+ val >> S3C_GNPTXFSIZ_NPTxFDep_SHIFT,
+ val & S3C_GNPTXFSIZ_NPTxFStAddr_MASK);
+
+ seq_printf(seq, "\nPeriodic TXFIFOs:\n");
+
+ for (idx = 1; idx <= 15; idx++) {
+ val = readl(regs + S3C_DPTXFSIZn(idx));
+
+ seq_printf(seq, "\tDPTXFIFO%2d: Size %d, Start 0x%08x\n", idx,
+ val >> S3C_DPTXFSIZn_DPTxFSize_SHIFT,
+ val & S3C_DPTXFSIZn_DPTxFStAddr_MASK);
+ }
+
+ return 0;
+}
+
+static int fifo_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, fifo_show, inode->i_private);
+}
+
+static const struct file_operations fifo_fops = {
+ .owner = THIS_MODULE,
+ .open = fifo_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+
+static const char *decode_direction(int is_in)
+{
+ return is_in ? "in" : "out";
+}
+
+/**
+ * ep_show - debugfs: show the state of an endpoint.
+ * @seq: The seq_file to write data to.
+ * @v: Unused parameter.
+ *
+ * This debugfs entry shows the state of the given endpoint (one is
+ * registered for each available).
+*/
+static int ep_show(struct seq_file *seq, void *v)
+{
+ struct s3c_hsotg_ep *ep = seq->private;
+ struct s3c_hsotg *hsotg = ep->parent;
+ struct s3c_hsotg_req *req;
+ void __iomem *regs = hsotg->regs;
+ int index = ep->index;
+ int show_limit = 15;
+ unsigned long flags;
+
+ seq_printf(seq, "Endpoint index %d, named %s, dir %s:\n",
+ ep->index, ep->ep.name, decode_direction(ep->dir_in));
+
+ /* first show the register state */
+
+ seq_printf(seq, "\tDIEPCTL=0x%08x, DOEPCTL=0x%08x\n",
+ readl(regs + S3C_DIEPCTL(index)),
+ readl(regs + S3C_DOEPCTL(index)));
+
+ seq_printf(seq, "\tDIEPDMA=0x%08x, DOEPDMA=0x%08x\n",
+ readl(regs + S3C_DIEPDMA(index)),
+ readl(regs + S3C_DOEPDMA(index)));
+
+ seq_printf(seq, "\tDIEPINT=0x%08x, DOEPINT=0x%08x\n",
+ readl(regs + S3C_DIEPINT(index)),
+ readl(regs + S3C_DOEPINT(index)));
+
+ seq_printf(seq, "\tDIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x\n",
+ readl(regs + S3C_DIEPTSIZ(index)),
+ readl(regs + S3C_DOEPTSIZ(index)));
+
+ seq_printf(seq, "\n");
+ seq_printf(seq, "mps %d\n", ep->ep.maxpacket);
+ seq_printf(seq, "total_data=%ld\n", ep->total_data);
+
+ seq_printf(seq, "request list (%p,%p):\n",
+ ep->queue.next, ep->queue.prev);
+
+ spin_lock_irqsave(&ep->lock, flags);
+
+ list_for_each_entry(req, &ep->queue, queue) {
+ if (--show_limit < 0) {
+ seq_printf(seq, "not showing more requests...\n");
+ break;
+ }
+
+ seq_printf(seq, "%c req %p: %d bytes @%p, ",
+ req == ep->req ? '*' : ' ',
+ req, req->req.length, req->req.buf);
+ seq_printf(seq, "%d done, res %d\n",
+ req->req.actual, req->req.status);
+ }
+
+ spin_unlock_irqrestore(&ep->lock, flags);
+
+ return 0;
+}
+
+static int ep_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ep_show, inode->i_private);
+}
+
+static const struct file_operations ep_fops = {
+ .owner = THIS_MODULE,
+ .open = ep_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/**
+ * s3c_hsotg_create_debug - create debugfs directory and files
+ * @hsotg: The driver state
+ *
+ * Create the debugfs files to allow the user to get information
+ * about the state of the system. The directory name is created
+ * with the same name as the device itself, in case we end up
+ * with multiple blocks in future systems.
+*/
+static void __devinit s3c_hsotg_create_debug(struct s3c_hsotg *hsotg)
+{
+ struct dentry *root;
+ unsigned epidx;
+
+ root = debugfs_create_dir(dev_name(hsotg->dev), NULL);
+ hsotg->debug_root = root;
+ if (IS_ERR(root)) {
+ dev_err(hsotg->dev, "cannot create debug root\n");
+ return;
+ }
+
+ /* create general state file */
+
+ hsotg->debug_file = debugfs_create_file("state", 0444, root,
+ hsotg, &state_fops);
+
+ if (IS_ERR(hsotg->debug_file))
+ dev_err(hsotg->dev, "%s: failed to create state\n", __func__);
+
+ hsotg->debug_fifo = debugfs_create_file("fifo", 0444, root,
+ hsotg, &fifo_fops);
+
+ if (IS_ERR(hsotg->debug_fifo))
+ dev_err(hsotg->dev, "%s: failed to create fifo\n", __func__);
+
+ /* create one file for each endpoint */
+
+ for (epidx = 0; epidx < S3C_HSOTG_EPS; epidx++) {
+ struct s3c_hsotg_ep *ep = &hsotg->eps[epidx];
+
+ ep->debugfs = debugfs_create_file(ep->name, 0444,
+ root, ep, &ep_fops);
+
+ if (IS_ERR(ep->debugfs))
+ dev_err(hsotg->dev, "failed to create %s debug file\n",
+ ep->name);
+ }
+}
+
+/**
+ * s3c_hsotg_delete_debug - cleanup debugfs entries
+ * @hsotg: The driver state
+ *
+ * Cleanup (remove) the debugfs files for use on module exit.
+*/
+static void __devexit s3c_hsotg_delete_debug(struct s3c_hsotg *hsotg)
+{
+ unsigned epidx;
+
+ for (epidx = 0; epidx < S3C_HSOTG_EPS; epidx++) {
+ struct s3c_hsotg_ep *ep = &hsotg->eps[epidx];
+ debugfs_remove(ep->debugfs);
+ }
+
+ debugfs_remove(hsotg->debug_file);
+ debugfs_remove(hsotg->debug_fifo);
+ debugfs_remove(hsotg->debug_root);
+}
+
+/**
+ * s3c_hsotg_gate - set the hardware gate for the block
+ * @pdev: The device we bound to
+ * @on: On or off.
+ *
+ * Set the hardware gate setting into the block. If we end up on
+ * something other than an S3C64XX, then we might need to change this
+ * to using a platform data callback, or some other mechanism.
+ */
+static void s3c_hsotg_gate(struct platform_device *pdev, bool on)
+{
+ unsigned long flags;
+ u32 others;
+
+ local_irq_save(flags);
+
+ others = __raw_readl(S3C64XX_OTHERS);
+ if (on)
+ others |= S3C64XX_OTHERS_USBMASK;
+ else
+ others &= ~S3C64XX_OTHERS_USBMASK;
+ __raw_writel(others, S3C64XX_OTHERS);
+
+ local_irq_restore(flags);
+}
+
+struct s3c_hsotg_plat s3c_hsotg_default_pdata;
+
+static int __devinit s3c_hsotg_probe(struct platform_device *pdev)
+{
+ struct s3c_hsotg_plat *plat = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct s3c_hsotg *hsotg;
+ struct resource *res;
+ int epnum;
+ int ret;
+
+ if (!plat)
+ plat = &s3c_hsotg_default_pdata;
+
+ hsotg = kzalloc(sizeof(struct s3c_hsotg) +
+ sizeof(struct s3c_hsotg_ep) * S3C_HSOTG_EPS,
+ GFP_KERNEL);
+ if (!hsotg) {
+ dev_err(dev, "cannot get memory\n");
+ return -ENOMEM;
+ }
+
+ hsotg->dev = dev;
+ hsotg->plat = plat;
+
+ platform_set_drvdata(pdev, hsotg);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "cannot find register resource 0\n");
+ ret = -EINVAL;
+ goto err_mem;
+ }
+
+ hsotg->regs_res = request_mem_region(res->start, resource_size(res),
+ dev_name(dev));
+ if (!hsotg->regs_res) {
+ dev_err(dev, "cannot reserve registers\n");
+ ret = -ENOENT;
+ goto err_mem;
+ }
+
+ hsotg->regs = ioremap(res->start, resource_size(res));
+ if (!hsotg->regs) {
+ dev_err(dev, "cannot map registers\n");
+ ret = -ENXIO;
+ goto err_regs_res;
+ }
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(dev, "cannot find IRQ\n");
+ goto err_regs;
+ }
+
+ hsotg->irq = ret;
+
+ ret = request_irq(ret, s3c_hsotg_irq, 0, dev_name(dev), hsotg);
+ if (ret < 0) {
+ dev_err(dev, "cannot claim IRQ\n");
+ goto err_regs;
+ }
+
+ dev_info(dev, "regs %p, irq %d\n", hsotg->regs, hsotg->irq);
+
+ device_initialize(&hsotg->gadget.dev);
+
+ dev_set_name(&hsotg->gadget.dev, "gadget");
+
+ hsotg->gadget.is_dualspeed = 1;
+ hsotg->gadget.ops = &s3c_hsotg_gadget_ops;
+ hsotg->gadget.name = dev_name(dev);
+
+ hsotg->gadget.dev.parent = dev;
+ hsotg->gadget.dev.dma_mask = dev->dma_mask;
+
+ /* setup endpoint information */
+
+ INIT_LIST_HEAD(&hsotg->gadget.ep_list);
+ hsotg->gadget.ep0 = &hsotg->eps[0].ep;
+
+ /* allocate EP0 request */
+
+ hsotg->ctrl_req = s3c_hsotg_ep_alloc_request(&hsotg->eps[0].ep,
+ GFP_KERNEL);
+ if (!hsotg->ctrl_req) {
+ dev_err(dev, "failed to allocate ctrl req\n");
+ goto err_regs;
+ }
+
+ /* reset the system */
+
+ s3c_hsotg_gate(pdev, true);
+
+ s3c_hsotg_otgreset(hsotg);
+ s3c_hsotg_corereset(hsotg);
+ s3c_hsotg_init(hsotg);
+
+ /* initialise the endpoints now the core has been initialised */
+ for (epnum = 0; epnum < S3C_HSOTG_EPS; epnum++)
+ s3c_hsotg_initep(hsotg, &hsotg->eps[epnum], epnum);
+
+ s3c_hsotg_create_debug(hsotg);
+
+ s3c_hsotg_dump(hsotg);
+
+ our_hsotg = hsotg;
+ return 0;
+
+err_regs:
+ iounmap(hsotg->regs);
+
+err_regs_res:
+ release_resource(hsotg->regs_res);
+ kfree(hsotg->regs_res);
+
+err_mem:
+ kfree(hsotg);
+ return ret;
+}
+
+static int __devexit s3c_hsotg_remove(struct platform_device *pdev)
+{
+ struct s3c_hsotg *hsotg = platform_get_drvdata(pdev);
+
+ s3c_hsotg_delete_debug(hsotg);
+
+ usb_gadget_unregister_driver(hsotg->driver);
+
+ free_irq(hsotg->irq, hsotg);
+ iounmap(hsotg->regs);
+
+ release_resource(hsotg->regs_res);
+ kfree(hsotg->regs_res);
+
+ s3c_hsotg_gate(pdev, false);
+
+ kfree(hsotg);
+ return 0;
+}
+
+#if 1
+#define s3c_hsotg_suspend NULL
+#define s3c_hsotg_resume NULL
+#endif
+
+static struct platform_driver s3c_hsotg_driver = {
+ .driver = {
+ .name = "s3c-hsotg",
+ .owner = THIS_MODULE,
+ },
+ .probe = s3c_hsotg_probe,
+ .remove = __devexit_p(s3c_hsotg_remove),
+ .suspend = s3c_hsotg_suspend,
+ .resume = s3c_hsotg_resume,
+};
+
+static int __init s3c_hsotg_modinit(void)
+{
+ return platform_driver_register(&s3c_hsotg_driver);
+}
+
+static void __exit s3c_hsotg_modexit(void)
+{
+ platform_driver_unregister(&s3c_hsotg_driver);
+}
+
+module_init(s3c_hsotg_modinit);
+module_exit(s3c_hsotg_modexit);
+
+MODULE_DESCRIPTION("Samsung S3C USB High-speed/OtG device");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:s3c-hsotg");
diff --git a/drivers/usb/gadget/u_audio.c b/drivers/usb/gadget/u_audio.c
new file mode 100644
index 00000000000..0f3d22fc030
--- /dev/null
+++ b/drivers/usb/gadget/u_audio.c
@@ -0,0 +1,319 @@
+/*
+ * u_audio.c -- ALSA audio utilities for Gadget stack
+ *
+ * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
+ * Copyright (C) 2008 Analog Devices, Inc
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/ctype.h>
+#include <linux/random.h>
+#include <linux/syscalls.h>
+
+#include "u_audio.h"
+
+/*
+ * This component encapsulates the ALSA devices for USB audio gadget
+ */
+
+#define FILE_PCM_PLAYBACK "/dev/snd/pcmC0D0p"
+#define FILE_PCM_CAPTURE "/dev/snd/pcmC0D0c"
+#define FILE_CONTROL "/dev/snd/controlC0"
+
+static char *fn_play = FILE_PCM_PLAYBACK;
+module_param(fn_play, charp, S_IRUGO);
+MODULE_PARM_DESC(fn_play, "Playback PCM device file name");
+
+static char *fn_cap = FILE_PCM_CAPTURE;
+module_param(fn_cap, charp, S_IRUGO);
+MODULE_PARM_DESC(fn_cap, "Capture PCM device file name");
+
+static char *fn_cntl = FILE_CONTROL;
+module_param(fn_cntl, charp, S_IRUGO);
+MODULE_PARM_DESC(fn_cntl, "Control device file name");
+
+/*-------------------------------------------------------------------------*/
+
+/**
+ * Some ALSA internal helper functions
+ */
+static int snd_interval_refine_set(struct snd_interval *i, unsigned int val)
+{
+ struct snd_interval t;
+ t.empty = 0;
+ t.min = t.max = val;
+ t.openmin = t.openmax = 0;
+ t.integer = 1;
+ return snd_interval_refine(i, &t);
+}
+
+static int _snd_pcm_hw_param_set(struct snd_pcm_hw_params *params,
+ snd_pcm_hw_param_t var, unsigned int val,
+ int dir)
+{
+ int changed;
+ if (hw_is_mask(var)) {
+ struct snd_mask *m = hw_param_mask(params, var);
+ if (val == 0 && dir < 0) {
+ changed = -EINVAL;
+ snd_mask_none(m);
+ } else {
+ if (dir > 0)
+ val++;
+ else if (dir < 0)
+ val--;
+ changed = snd_mask_refine_set(
+ hw_param_mask(params, var), val);
+ }
+ } else if (hw_is_interval(var)) {
+ struct snd_interval *i = hw_param_interval(params, var);
+ if (val == 0 && dir < 0) {
+ changed = -EINVAL;
+ snd_interval_none(i);
+ } else if (dir == 0)
+ changed = snd_interval_refine_set(i, val);
+ else {
+ struct snd_interval t;
+ t.openmin = 1;
+ t.openmax = 1;
+ t.empty = 0;
+ t.integer = 0;
+ if (dir < 0) {
+ t.min = val - 1;
+ t.max = val;
+ } else {
+ t.min = val;
+ t.max = val+1;
+ }
+ changed = snd_interval_refine(i, &t);
+ }
+ } else
+ return -EINVAL;
+ if (changed) {
+ params->cmask |= 1 << var;
+ params->rmask |= 1 << var;
+ }
+ return changed;
+}
+/*-------------------------------------------------------------------------*/
+
+/**
+ * Set default hardware params
+ */
+static int playback_default_hw_params(struct gaudio_snd_dev *snd)
+{
+ struct snd_pcm_substream *substream = snd->substream;
+ struct snd_pcm_hw_params *params;
+ snd_pcm_sframes_t result;
+
+ /*
+ * SNDRV_PCM_ACCESS_RW_INTERLEAVED,
+ * SNDRV_PCM_FORMAT_S16_LE
+ * CHANNELS: 2
+ * RATE: 48000
+ */
+ snd->access = SNDRV_PCM_ACCESS_RW_INTERLEAVED;
+ snd->format = SNDRV_PCM_FORMAT_S16_LE;
+ snd->channels = 2;
+ snd->rate = 48000;
+
+ params = kzalloc(sizeof(*params), GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
+
+ _snd_pcm_hw_params_any(params);
+ _snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_ACCESS,
+ snd->access, 0);
+ _snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ snd->format, 0);
+ _snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_CHANNELS,
+ snd->channels, 0);
+ _snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_RATE,
+ snd->rate, 0);
+
+ snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
+ snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, params);
+
+ result = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_PREPARE, NULL);
+ if (result < 0) {
+ ERROR(snd->card,
+ "Preparing sound card failed: %d\n", (int)result);
+ kfree(params);
+ return result;
+ }
+
+ /* Store the hardware parameters */
+ snd->access = params_access(params);
+ snd->format = params_format(params);
+ snd->channels = params_channels(params);
+ snd->rate = params_rate(params);
+
+ kfree(params);
+
+ INFO(snd->card,
+ "Hardware params: access %x, format %x, channels %d, rate %d\n",
+ snd->access, snd->format, snd->channels, snd->rate);
+
+ return 0;
+}
+
+/**
+ * Playback audio buffer data by ALSA PCM device
+ */
+static size_t u_audio_playback(struct gaudio *card, void *buf, size_t count)
+{
+ struct gaudio_snd_dev *snd = &card->playback;
+ struct snd_pcm_substream *substream = snd->substream;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ mm_segment_t old_fs;
+ ssize_t result;
+ snd_pcm_sframes_t frames;
+
+try_again:
+ if (runtime->status->state == SNDRV_PCM_STATE_XRUN ||
+ runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) {
+ result = snd_pcm_kernel_ioctl(substream,
+ SNDRV_PCM_IOCTL_PREPARE, NULL);
+ if (result < 0) {
+ ERROR(card, "Preparing sound card failed: %d\n",
+ (int)result);
+ return result;
+ }
+ }
+
+ frames = bytes_to_frames(runtime, count);
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ result = snd_pcm_lib_write(snd->substream, buf, frames);
+ if (result != frames) {
+ ERROR(card, "Playback error: %d\n", (int)result);
+ set_fs(old_fs);
+ goto try_again;
+ }
+ set_fs(old_fs);
+
+ return 0;
+}
+
+static int u_audio_get_playback_channels(struct gaudio *card)
+{
+ return card->playback.channels;
+}
+
+static int u_audio_get_playback_rate(struct gaudio *card)
+{
+ return card->playback.rate;
+}
+
+/**
+ * Open ALSA PCM and control device files
+ * Initial the PCM or control device
+ */
+static int gaudio_open_snd_dev(struct gaudio *card)
+{
+ struct snd_pcm_file *pcm_file;
+ struct gaudio_snd_dev *snd;
+
+ if (!card)
+ return -ENODEV;
+
+ /* Open control device */
+ snd = &card->control;
+ snd->filp = filp_open(fn_cntl, O_RDWR, 0);
+ if (IS_ERR(snd->filp)) {
+ int ret = PTR_ERR(snd->filp);
+ ERROR(card, "unable to open sound control device file: %s\n",
+ fn_cntl);
+ snd->filp = NULL;
+ return ret;
+ }
+ snd->card = card;
+
+ /* Open PCM playback device and setup substream */
+ snd = &card->playback;
+ snd->filp = filp_open(fn_play, O_WRONLY, 0);
+ if (IS_ERR(snd->filp)) {
+ ERROR(card, "No such PCM playback device: %s\n", fn_play);
+ snd->filp = NULL;
+ }
+ pcm_file = snd->filp->private_data;
+ snd->substream = pcm_file->substream;
+ snd->card = card;
+ playback_default_hw_params(snd);
+
+ /* Open PCM capture device and setup substream */
+ snd = &card->capture;
+ snd->filp = filp_open(fn_cap, O_RDONLY, 0);
+ if (IS_ERR(snd->filp)) {
+ ERROR(card, "No such PCM capture device: %s\n", fn_cap);
+ snd->filp = NULL;
+ }
+ pcm_file = snd->filp->private_data;
+ snd->substream = pcm_file->substream;
+ snd->card = card;
+
+ return 0;
+}
+
+/**
+ * Close ALSA PCM and control device files
+ */
+static int gaudio_close_snd_dev(struct gaudio *gau)
+{
+ struct gaudio_snd_dev *snd;
+
+ /* Close control device */
+ snd = &gau->control;
+ if (!IS_ERR(snd->filp))
+ filp_close(snd->filp, current->files);
+
+ /* Close PCM playback device and setup substream */
+ snd = &gau->playback;
+ if (!IS_ERR(snd->filp))
+ filp_close(snd->filp, current->files);
+
+ /* Close PCM capture device and setup substream */
+ snd = &gau->capture;
+ if (!IS_ERR(snd->filp))
+ filp_close(snd->filp, current->files);
+
+ return 0;
+}
+
+/**
+ * gaudio_setup - setup ALSA interface and preparing for USB transfer
+ *
+ * This sets up PCM, mixer or MIDI ALSA devices fore USB gadget using.
+ *
+ * Returns negative errno, or zero on success
+ */
+int __init gaudio_setup(struct gaudio *card)
+{
+ int ret;
+
+ ret = gaudio_open_snd_dev(card);
+ if (ret)
+ ERROR(card, "we need at least one control device\n");
+
+ return ret;
+
+}
+
+/**
+ * gaudio_cleanup - remove ALSA device interface
+ *
+ * This is called to free all resources allocated by @gaudio_setup().
+ */
+void gaudio_cleanup(struct gaudio *card)
+{
+ if (card)
+ gaudio_close_snd_dev(card);
+}
+
diff --git a/drivers/usb/gadget/u_audio.h b/drivers/usb/gadget/u_audio.h
new file mode 100644
index 00000000000..cc8d159c648
--- /dev/null
+++ b/drivers/usb/gadget/u_audio.h
@@ -0,0 +1,56 @@
+/*
+ * u_audio.h -- interface to USB gadget "ALSA AUDIO" utilities
+ *
+ * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
+ * Copyright (C) 2008 Analog Devices, Inc
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __U_AUDIO_H
+#define __U_AUDIO_H
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/usb/audio.h>
+#include <linux/usb/composite.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+
+#include "gadget_chips.h"
+
+/*
+ * This represents the USB side of an audio card device, managed by a USB
+ * function which provides control and stream interfaces.
+ */
+
+struct gaudio_snd_dev {
+ struct gaudio *card;
+ struct file *filp;
+ struct snd_pcm_substream *substream;
+ int access;
+ int format;
+ int channels;
+ int rate;
+};
+
+struct gaudio {
+ struct usb_function func;
+ struct usb_gadget *gadget;
+
+ /* ALSA sound device interfaces */
+ struct gaudio_snd_dev control;
+ struct gaudio_snd_dev playback;
+ struct gaudio_snd_dev capture;
+
+ /* TODO */
+};
+
+int gaudio_setup(struct gaudio *card);
+void gaudio_cleanup(struct gaudio *card);
+
+#endif /* __U_AUDIO_H */
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index 4007770f7ed..016f63b3902 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -520,7 +520,7 @@ static int eth_start_xmit(struct sk_buff *skb, struct net_device *net)
*/
if (list_empty(&dev->tx_reqs)) {
spin_unlock_irqrestore(&dev->req_lock, flags);
- return 1;
+ return NETDEV_TX_BUSY;
}
req = container_of(dev->tx_reqs.next, struct usb_request, list);
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index 0a4d99ab40d..fc6e709f45b 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -371,6 +371,7 @@ __acquires(&port->port_lock)
req->length = len;
list_del(&req->list);
+ req->zero = (gs_buf_data_avail(&port->port_write_buf) == 0);
pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
port->port_num, len, *((u8 *)req->buf),
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 845479f7c70..1576a0520ad 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -17,6 +17,26 @@ config USB_C67X00_HCD
To compile this driver as a module, choose M here: the
module will be called c67x00.
+config USB_XHCI_HCD
+ tristate "xHCI HCD (USB 3.0) support (EXPERIMENTAL)"
+ depends on USB && PCI && EXPERIMENTAL
+ ---help---
+ The eXtensible Host Controller Interface (xHCI) is standard for USB 3.0
+ "SuperSpeed" host controller hardware.
+
+ To compile this driver as a module, choose M here: the
+ module will be called xhci-hcd.
+
+config USB_XHCI_HCD_DEBUGGING
+ bool "Debugging for the xHCI host controller"
+ depends on USB_XHCI_HCD
+ ---help---
+ Say 'Y' to turn on debugging for the xHCI host controller driver.
+ This will spew debugging output, even in interrupt context.
+ This should only be used for debugging xHCI driver bugs.
+
+ If unsure, say N.
+
config USB_EHCI_HCD
tristate "EHCI HCD (USB 2.0) support"
depends on USB && USB_ARCH_HAS_EHCI
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index f163571e33d..289d748bb41 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -12,6 +12,7 @@ fhci-objs := fhci-hcd.o fhci-hub.o fhci-q.o fhci-mem.o \
ifeq ($(CONFIG_FHCI_DEBUG),y)
fhci-objs += fhci-dbg.o
endif
+xhci-objs := xhci-hcd.o xhci-mem.o xhci-pci.o xhci-ring.o xhci-hub.o xhci-dbg.o
obj-$(CONFIG_USB_WHCI_HCD) += whci/
@@ -23,6 +24,7 @@ obj-$(CONFIG_USB_ISP116X_HCD) += isp116x-hcd.o
obj-$(CONFIG_USB_OHCI_HCD) += ohci-hcd.o
obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o
obj-$(CONFIG_USB_FHCI_HCD) += fhci.o
+obj-$(CONFIG_USB_XHCI_HCD) += xhci.o
obj-$(CONFIG_USB_SL811_HCD) += sl811-hcd.o
obj-$(CONFIG_USB_SL811_CS) += sl811_cs.o
obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index bf69f473910..c3a778bd359 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -97,6 +97,7 @@ static const struct hc_driver ehci_au1xxx_hc_driver = {
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
/*
* scheduling support
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 01c3da34f67..bf86809c512 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -309,6 +309,7 @@ static const struct hc_driver ehci_fsl_hc_driver = {
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
/*
* scheduling support
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index c637207a1c8..2b72473544d 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1024,6 +1024,51 @@ done:
return;
}
+static void
+ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct ehci_qh *qh;
+ int eptype = usb_endpoint_type(&ep->desc);
+
+ if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT)
+ return;
+
+ rescan:
+ spin_lock_irq(&ehci->lock);
+ qh = ep->hcpriv;
+
+ /* For Bulk and Interrupt endpoints we maintain the toggle state
+ * in the hardware; the toggle bits in udev aren't used at all.
+ * When an endpoint is reset by usb_clear_halt() we must reset
+ * the toggle bit in the QH.
+ */
+ if (qh) {
+ if (!list_empty(&qh->qtd_list)) {
+ WARN_ONCE(1, "clear_halt for a busy endpoint\n");
+ } else if (qh->qh_state == QH_STATE_IDLE) {
+ qh->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
+ } else {
+ /* It's not safe to write into the overlay area
+ * while the QH is active. Unlink it first and
+ * wait for the unlink to complete.
+ */
+ if (qh->qh_state == QH_STATE_LINKED) {
+ if (eptype == USB_ENDPOINT_XFER_BULK) {
+ unlink_async(ehci, qh);
+ } else {
+ intr_deschedule(ehci, qh);
+ (void) qh_schedule(ehci, qh);
+ }
+ }
+ spin_unlock_irq(&ehci->lock);
+ schedule_timeout_uninterruptible(1);
+ goto rescan;
+ }
+ }
+ spin_unlock_irq(&ehci->lock);
+}
+
static int ehci_get_frame (struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
@@ -1097,7 +1142,7 @@ static int __init ehci_hcd_init(void)
sizeof(struct ehci_itd), sizeof(struct ehci_sitd));
#ifdef DEBUG
- ehci_debug_root = debugfs_create_dir("ehci", NULL);
+ ehci_debug_root = debugfs_create_dir("ehci", usb_debug_root);
if (!ehci_debug_root) {
retval = -ENOENT;
goto err_debug;
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 97a53a48a3d..f46ad27c9a9 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -391,7 +391,7 @@ static inline void create_companion_file(struct ehci_hcd *ehci)
/* with integrated TT there is no companion! */
if (!ehci_is_TDI(ehci))
- i = device_create_file(ehci_to_hcd(ehci)->self.dev,
+ i = device_create_file(ehci_to_hcd(ehci)->self.controller,
&dev_attr_companion);
}
@@ -399,7 +399,7 @@ static inline void remove_companion_file(struct ehci_hcd *ehci)
{
/* with integrated TT there is no companion! */
if (!ehci_is_TDI(ehci))
- device_remove_file(ehci_to_hcd(ehci)->self.dev,
+ device_remove_file(ehci_to_hcd(ehci)->self.controller,
&dev_attr_companion);
}
diff --git a/drivers/usb/host/ehci-ixp4xx.c b/drivers/usb/host/ehci-ixp4xx.c
index 9c32063a0c2..a44bb4a9495 100644
--- a/drivers/usb/host/ehci-ixp4xx.c
+++ b/drivers/usb/host/ehci-ixp4xx.c
@@ -51,6 +51,7 @@ static const struct hc_driver ixp4xx_ehci_hc_driver = {
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
.get_frame_number = ehci_get_frame,
.hub_status_data = ehci_hub_status_data,
.hub_control = ehci_hub_control,
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 9d487908012..770dd9aba62 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -149,6 +149,7 @@ static const struct hc_driver ehci_orion_hc_driver = {
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
/*
* scheduling support
@@ -187,7 +188,7 @@ ehci_orion_conf_mbus_windows(struct usb_hcd *hcd,
}
}
-static int __init ehci_orion_drv_probe(struct platform_device *pdev)
+static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
{
struct orion_ehci_data *pd = pdev->dev.platform_data;
struct resource *res;
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 5aa8bce90e1..f3683e1da16 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -268,7 +268,7 @@ done:
* Also they depend on separate root hub suspend/resume.
*/
-static int ehci_pci_suspend(struct usb_hcd *hcd, pm_message_t message)
+static int ehci_pci_suspend(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
unsigned long flags;
@@ -293,12 +293,6 @@ static int ehci_pci_suspend(struct usb_hcd *hcd, pm_message_t message)
ehci_writel(ehci, 0, &ehci->regs->intr_enable);
(void)ehci_readl(ehci, &ehci->regs->intr_enable);
- /* make sure snapshot being resumed re-enumerates everything */
- if (message.event == PM_EVENT_PRETHAW) {
- ehci_halt(ehci);
- ehci_reset(ehci);
- }
-
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
bail:
spin_unlock_irqrestore (&ehci->lock, flags);
@@ -309,7 +303,7 @@ static int ehci_pci_suspend(struct usb_hcd *hcd, pm_message_t message)
return rc;
}
-static int ehci_pci_resume(struct usb_hcd *hcd)
+static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
@@ -322,10 +316,12 @@ static int ehci_pci_resume(struct usb_hcd *hcd)
/* Mark hardware accessible again as we are out of D3 state by now */
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
- /* If CF is still set, we maintained PCI Vaux power.
+ /* If CF is still set and we aren't resuming from hibernation
+ * then we maintained PCI Vaux power.
* Just undo the effect of ehci_pci_suspend().
*/
- if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF) {
+ if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF &&
+ !hibernated) {
int mask = INTR_MASK;
if (!hcd->self.root_hub->do_remote_wakeup)
@@ -335,7 +331,6 @@ static int ehci_pci_resume(struct usb_hcd *hcd)
return 0;
}
- ehci_dbg(ehci, "lost power, restarting\n");
usb_root_hub_lost_power(hcd->self.root_hub);
/* Else reset, to cope with power loss or flush-to-storage
@@ -393,6 +388,7 @@ static const struct hc_driver ehci_pci_hc_driver = {
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
/*
* scheduling support
@@ -429,10 +425,11 @@ static struct pci_driver ehci_pci_driver = {
.probe = usb_hcd_pci_probe,
.remove = usb_hcd_pci_remove,
+ .shutdown = usb_hcd_pci_shutdown,
-#ifdef CONFIG_PM
- .suspend = usb_hcd_pci_suspend,
- .resume = usb_hcd_pci_resume,
+#ifdef CONFIG_PM_SLEEP
+ .driver = {
+ .pm = &usb_hcd_pci_pm_ops
+ },
#endif
- .shutdown = usb_hcd_pci_shutdown,
};
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index ef732b704f5..fbd272288fc 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -61,6 +61,7 @@ static const struct hc_driver ehci_ppc_of_hc_driver = {
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
/*
* scheduling support
diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c
index 1ba9f9a8c30..eecd2a0680a 100644
--- a/drivers/usb/host/ehci-ps3.c
+++ b/drivers/usb/host/ehci-ps3.c
@@ -65,6 +65,7 @@ static const struct hc_driver ps3_ehci_hc_driver = {
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
.get_frame_number = ehci_get_frame,
.hub_status_data = ehci_hub_status_data,
.hub_control = ehci_hub_control,
@@ -162,7 +163,7 @@ static int ps3_ehci_probe(struct ps3_system_bus_device *dev)
dev_dbg(&dev->core, "%s:%d: virq %lu\n", __func__, __LINE__,
(unsigned long)virq);
- ps3_system_bus_set_driver_data(dev, hcd);
+ ps3_system_bus_set_drvdata(dev, hcd);
result = usb_add_hcd(hcd, virq, IRQF_DISABLED);
@@ -195,8 +196,7 @@ fail_start:
static int ps3_ehci_remove(struct ps3_system_bus_device *dev)
{
unsigned int tmp;
- struct usb_hcd *hcd =
- (struct usb_hcd *)ps3_system_bus_get_driver_data(dev);
+ struct usb_hcd *hcd = ps3_system_bus_get_drvdata(dev);
BUG_ON(!hcd);
@@ -208,7 +208,7 @@ static int ps3_ehci_remove(struct ps3_system_bus_device *dev)
ehci_shutdown(hcd);
usb_remove_hcd(hcd);
- ps3_system_bus_set_driver_data(dev, NULL);
+ ps3_system_bus_set_drvdata(dev, NULL);
BUG_ON(!hcd->regs);
iounmap(hcd->regs);
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 1976b1b3778..3192f683f80 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -93,22 +93,6 @@ qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
qh->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
qh->hw_alt_next = EHCI_LIST_END(ehci);
- /* Except for control endpoints, we make hardware maintain data
- * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
- * and set the pseudo-toggle in udev. Only usb_clear_halt() will
- * ever clear it.
- */
- if (!(qh->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) {
- unsigned is_out, epnum;
-
- is_out = !(qtd->hw_token & cpu_to_hc32(ehci, 1 << 8));
- epnum = (hc32_to_cpup(ehci, &qh->hw_info1) >> 8) & 0x0f;
- if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
- qh->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
- usb_settoggle (qh->dev, epnum, is_out, 1);
- }
- }
-
/* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
wmb ();
qh->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING);
@@ -850,7 +834,6 @@ done:
qh->qh_state = QH_STATE_IDLE;
qh->hw_info1 = cpu_to_hc32(ehci, info1);
qh->hw_info2 = cpu_to_hc32(ehci, info2);
- usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
qh_refresh (ehci, qh);
return qh;
}
@@ -881,7 +864,7 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
}
}
- /* clear halt and/or toggle; and maybe recover from silicon quirk */
+ /* clear halt and maybe recover from silicon quirk */
if (qh->qh_state == QH_STATE_IDLE)
qh_refresh (ehci, qh);
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 556d0ec0c1f..9d1babc7ff6 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -760,8 +760,10 @@ static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
if (status) {
/* "normal" case, uframing flexible except with splits */
if (qh->period) {
- frame = qh->period - 1;
- do {
+ int i;
+
+ for (i = qh->period; status && i > 0; --i) {
+ frame = ++ehci->random_frame % qh->period;
for (uframe = 0; uframe < 8; uframe++) {
status = check_intr_schedule (ehci,
frame, uframe, qh,
@@ -769,7 +771,7 @@ static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
if (status == 0)
break;
}
- } while (status && frame--);
+ }
/* qh->period == 0 means every uframe */
} else {
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 6cff195e1a3..90ad3395bb2 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -116,6 +116,7 @@ struct ehci_hcd { /* one per controller */
struct timer_list watchdog;
unsigned long actions;
unsigned stamp;
+ unsigned random_frame;
unsigned long next_statechange;
u32 command;
diff --git a/drivers/usb/host/fhci-dbg.c b/drivers/usb/host/fhci-dbg.c
index ea8a4255c5d..e799f86dab1 100644
--- a/drivers/usb/host/fhci-dbg.c
+++ b/drivers/usb/host/fhci-dbg.c
@@ -108,7 +108,7 @@ void fhci_dfs_create(struct fhci_hcd *fhci)
{
struct device *dev = fhci_to_hcd(fhci)->self.controller;
- fhci->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
+ fhci->dfs_root = debugfs_create_dir(dev_name(dev), usb_debug_root);
if (!fhci->dfs_root) {
WARN_ON(1);
return;
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
index cbf30e515f2..88b03214622 100644
--- a/drivers/usb/host/hwa-hc.c
+++ b/drivers/usb/host/hwa-hc.c
@@ -172,25 +172,6 @@ error_cluster_id_get:
}
-static int hwahc_op_suspend(struct usb_hcd *usb_hcd, pm_message_t msg)
-{
- struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
- struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
- dev_err(wusbhc->dev, "%s (%p [%p], 0x%lx) UNIMPLEMENTED\n", __func__,
- usb_hcd, hwahc, *(unsigned long *) &msg);
- return -ENOSYS;
-}
-
-static int hwahc_op_resume(struct usb_hcd *usb_hcd)
-{
- struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
- struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
-
- dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__,
- usb_hcd, hwahc);
- return -ENOSYS;
-}
-
/*
* No need to abort pipes, as when this is called, all the children
* has been disconnected and that has done it [through
@@ -598,8 +579,6 @@ static struct hc_driver hwahc_hc_driver = {
.flags = HCD_USB2, /* FIXME */
.reset = hwahc_op_reset,
.start = hwahc_op_start,
- .pci_suspend = hwahc_op_suspend,
- .pci_resume = hwahc_op_resume,
.stop = hwahc_op_stop,
.get_frame_number = hwahc_op_get_frame_number,
.urb_enqueue = hwahc_op_urb_enqueue,
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index cd07ea3f0c6..15438469f21 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -1658,6 +1658,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
u32 reg_base, or_reg, skip_reg;
unsigned long flags;
struct ptd ptd;
+ packet_enqueue *pe;
switch (usb_pipetype(urb->pipe)) {
case PIPE_ISOCHRONOUS:
@@ -1669,6 +1670,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
reg_base = INT_REGS_OFFSET;
or_reg = HC_INT_IRQ_MASK_OR_REG;
skip_reg = HC_INT_PTD_SKIPMAP_REG;
+ pe = enqueue_an_INT_packet;
break;
default:
@@ -1676,6 +1678,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
reg_base = ATL_REGS_OFFSET;
or_reg = HC_ATL_IRQ_MASK_OR_REG;
skip_reg = HC_ATL_PTD_SKIPMAP_REG;
+ pe = enqueue_an_ATL_packet;
break;
}
@@ -1687,6 +1690,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
u32 skip_map;
u32 or_map;
struct isp1760_qtd *qtd;
+ struct isp1760_qh *qh = ints->qh;
skip_map = isp1760_readl(hcd->regs + skip_reg);
skip_map |= 1 << i;
@@ -1699,8 +1703,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
priv_write_copy(priv, (u32 *)&ptd, hcd->regs + reg_base
+ i * sizeof(ptd), sizeof(ptd));
qtd = ints->qtd;
-
- clean_up_qtdlist(qtd);
+ qtd = clean_up_qtdlist(qtd);
free_mem(priv, ints->payload);
@@ -1711,7 +1714,24 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
ints->payload = 0;
isp1760_urb_done(priv, urb, status);
+ if (qtd)
+ pe(hcd, qh, qtd);
break;
+
+ } else if (ints->qtd) {
+ struct isp1760_qtd *qtd, *prev_qtd = ints->qtd;
+
+ for (qtd = ints->qtd->hw_next; qtd; qtd = qtd->hw_next) {
+ if (qtd->urb == urb) {
+ prev_qtd->hw_next = clean_up_qtdlist(qtd);
+ isp1760_urb_done(priv, urb, status);
+ break;
+ }
+ prev_qtd = qtd;
+ }
+ /* we found the urb before the end of the list */
+ if (qtd)
+ break;
}
ints++;
}
diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
index d3269656aa4..811f5dfdc58 100644
--- a/drivers/usb/host/ohci-dbg.c
+++ b/drivers/usb/host/ohci-dbg.c
@@ -431,7 +431,7 @@ static struct dentry *ohci_debug_root;
struct debug_buffer {
ssize_t (*fill_func)(struct debug_buffer *); /* fill method */
- struct device *dev;
+ struct ohci_hcd *ohci;
struct mutex mutex; /* protect filling of buffer */
size_t count; /* number of characters filled into buffer */
char *page;
@@ -505,15 +505,11 @@ show_list (struct ohci_hcd *ohci, char *buf, size_t count, struct ed *ed)
static ssize_t fill_async_buffer(struct debug_buffer *buf)
{
- struct usb_bus *bus;
- struct usb_hcd *hcd;
struct ohci_hcd *ohci;
size_t temp;
unsigned long flags;
- bus = dev_get_drvdata(buf->dev);
- hcd = bus_to_hcd(bus);
- ohci = hcd_to_ohci(hcd);
+ ohci = buf->ohci;
/* display control and bulk lists together, for simplicity */
spin_lock_irqsave (&ohci->lock, flags);
@@ -529,8 +525,6 @@ static ssize_t fill_async_buffer(struct debug_buffer *buf)
static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
{
- struct usb_bus *bus;
- struct usb_hcd *hcd;
struct ohci_hcd *ohci;
struct ed **seen, *ed;
unsigned long flags;
@@ -542,9 +536,7 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
return 0;
seen_count = 0;
- bus = (struct usb_bus *)dev_get_drvdata(buf->dev);
- hcd = bus_to_hcd(bus);
- ohci = hcd_to_ohci(hcd);
+ ohci = buf->ohci;
next = buf->page;
size = PAGE_SIZE;
@@ -626,7 +618,6 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
static ssize_t fill_registers_buffer(struct debug_buffer *buf)
{
- struct usb_bus *bus;
struct usb_hcd *hcd;
struct ohci_hcd *ohci;
struct ohci_regs __iomem *regs;
@@ -635,9 +626,8 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
char *next;
u32 rdata;
- bus = (struct usb_bus *)dev_get_drvdata(buf->dev);
- hcd = bus_to_hcd(bus);
- ohci = hcd_to_ohci(hcd);
+ ohci = buf->ohci;
+ hcd = ohci_to_hcd(ohci);
regs = ohci->regs;
next = buf->page;
size = PAGE_SIZE;
@@ -710,7 +700,7 @@ done:
return PAGE_SIZE - size;
}
-static struct debug_buffer *alloc_buffer(struct device *dev,
+static struct debug_buffer *alloc_buffer(struct ohci_hcd *ohci,
ssize_t (*fill_func)(struct debug_buffer *))
{
struct debug_buffer *buf;
@@ -718,7 +708,7 @@ static struct debug_buffer *alloc_buffer(struct device *dev,
buf = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL);
if (buf) {
- buf->dev = dev;
+ buf->ohci = ohci;
buf->fill_func = fill_func;
mutex_init(&buf->mutex);
}
@@ -810,26 +800,25 @@ static int debug_registers_open(struct inode *inode, struct file *file)
static inline void create_debug_files (struct ohci_hcd *ohci)
{
struct usb_bus *bus = &ohci_to_hcd(ohci)->self;
- struct device *dev = bus->dev;
ohci->debug_dir = debugfs_create_dir(bus->bus_name, ohci_debug_root);
if (!ohci->debug_dir)
goto dir_error;
ohci->debug_async = debugfs_create_file("async", S_IRUGO,
- ohci->debug_dir, dev,
+ ohci->debug_dir, ohci,
&debug_async_fops);
if (!ohci->debug_async)
goto async_error;
ohci->debug_periodic = debugfs_create_file("periodic", S_IRUGO,
- ohci->debug_dir, dev,
+ ohci->debug_dir, ohci,
&debug_periodic_fops);
if (!ohci->debug_periodic)
goto periodic_error;
ohci->debug_registers = debugfs_create_file("registers", S_IRUGO,
- ohci->debug_dir, dev,
+ ohci->debug_dir, ohci,
&debug_registers_fops);
if (!ohci->debug_registers)
goto registers_error;
diff --git a/drivers/usb/host/ohci-ep93xx.c b/drivers/usb/host/ohci-ep93xx.c
index 7cf74f8c2db..b0dbf4157d2 100644
--- a/drivers/usb/host/ohci-ep93xx.c
+++ b/drivers/usb/host/ohci-ep93xx.c
@@ -47,7 +47,7 @@ static int usb_hcd_ep93xx_probe(const struct hc_driver *driver,
struct usb_hcd *hcd;
if (pdev->resource[1].flags != IORESOURCE_IRQ) {
- pr_debug("resource[1] is not IORESOURCE_IRQ");
+ dbg("resource[1] is not IORESOURCE_IRQ");
return -ENOMEM;
}
@@ -65,12 +65,18 @@ static int usb_hcd_ep93xx_probe(const struct hc_driver *driver,
hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
if (hcd->regs == NULL) {
- pr_debug("ioremap failed");
+ dbg("ioremap failed");
retval = -ENOMEM;
goto err2;
}
- usb_host_clock = clk_get(&pdev->dev, "usb_host");
+ usb_host_clock = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(usb_host_clock)) {
+ dbg("clk_get failed");
+ retval = PTR_ERR(usb_host_clock);
+ goto err3;
+ }
+
ep93xx_start_hc(&pdev->dev);
ohci_hcd_init(hcd_to_ohci(hcd));
@@ -80,6 +86,7 @@ static int usb_hcd_ep93xx_probe(const struct hc_driver *driver,
return retval;
ep93xx_stop_hc(&pdev->dev);
+err3:
iounmap(hcd->regs);
err2:
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 25db704f3a2..58151687d35 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -571,7 +571,7 @@ static int ohci_init (struct ohci_hcd *ohci)
*/
static int ohci_run (struct ohci_hcd *ohci)
{
- u32 mask, temp;
+ u32 mask, val;
int first = ohci->fminterval == 0;
struct usb_hcd *hcd = ohci_to_hcd(ohci);
@@ -580,8 +580,8 @@ static int ohci_run (struct ohci_hcd *ohci)
/* boot firmware should have set this up (5.1.1.3.1) */
if (first) {
- temp = ohci_readl (ohci, &ohci->regs->fminterval);
- ohci->fminterval = temp & 0x3fff;
+ val = ohci_readl (ohci, &ohci->regs->fminterval);
+ ohci->fminterval = val & 0x3fff;
if (ohci->fminterval != FI)
ohci_dbg (ohci, "fminterval delta %d\n",
ohci->fminterval - FI);
@@ -600,25 +600,25 @@ static int ohci_run (struct ohci_hcd *ohci)
switch (ohci->hc_control & OHCI_CTRL_HCFS) {
case OHCI_USB_OPER:
- temp = 0;
+ val = 0;
break;
case OHCI_USB_SUSPEND:
case OHCI_USB_RESUME:
ohci->hc_control &= OHCI_CTRL_RWC;
ohci->hc_control |= OHCI_USB_RESUME;
- temp = 10 /* msec wait */;
+ val = 10 /* msec wait */;
break;
// case OHCI_USB_RESET:
default:
ohci->hc_control &= OHCI_CTRL_RWC;
ohci->hc_control |= OHCI_USB_RESET;
- temp = 50 /* msec wait */;
+ val = 50 /* msec wait */;
break;
}
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
// flush the writes
(void) ohci_readl (ohci, &ohci->regs->control);
- msleep(temp);
+ msleep(val);
memset (ohci->hcca, 0, sizeof (struct ohci_hcca));
@@ -628,9 +628,9 @@ static int ohci_run (struct ohci_hcd *ohci)
retry:
/* HC Reset requires max 10 us delay */
ohci_writel (ohci, OHCI_HCR, &ohci->regs->cmdstatus);
- temp = 30; /* ... allow extra time */
+ val = 30; /* ... allow extra time */
while ((ohci_readl (ohci, &ohci->regs->cmdstatus) & OHCI_HCR) != 0) {
- if (--temp == 0) {
+ if (--val == 0) {
spin_unlock_irq (&ohci->lock);
ohci_err (ohci, "USB HC reset timed out!\n");
return -1;
@@ -699,23 +699,23 @@ retry:
ohci_writel (ohci, mask, &ohci->regs->intrenable);
/* handle root hub init quirks ... */
- temp = roothub_a (ohci);
- temp &= ~(RH_A_PSM | RH_A_OCPM);
+ val = roothub_a (ohci);
+ val &= ~(RH_A_PSM | RH_A_OCPM);
if (ohci->flags & OHCI_QUIRK_SUPERIO) {
/* NSC 87560 and maybe others */
- temp |= RH_A_NOCP;
- temp &= ~(RH_A_POTPGT | RH_A_NPS);
- ohci_writel (ohci, temp, &ohci->regs->roothub.a);
+ val |= RH_A_NOCP;
+ val &= ~(RH_A_POTPGT | RH_A_NPS);
+ ohci_writel (ohci, val, &ohci->regs->roothub.a);
} else if ((ohci->flags & OHCI_QUIRK_AMD756) ||
(ohci->flags & OHCI_QUIRK_HUB_POWER)) {
/* hub power always on; required for AMD-756 and some
* Mac platforms. ganged overcurrent reporting, if any.
*/
- temp |= RH_A_NPS;
- ohci_writel (ohci, temp, &ohci->regs->roothub.a);
+ val |= RH_A_NPS;
+ ohci_writel (ohci, val, &ohci->regs->roothub.a);
}
ohci_writel (ohci, RH_HS_LPSC, &ohci->regs->roothub.status);
- ohci_writel (ohci, (temp & RH_A_NPS) ? 0 : RH_B_PPCM,
+ ohci_writel (ohci, (val & RH_A_NPS) ? 0 : RH_B_PPCM,
&ohci->regs->roothub.b);
// flush those writes
(void) ohci_readl (ohci, &ohci->regs->control);
@@ -724,7 +724,7 @@ retry:
spin_unlock_irq (&ohci->lock);
// POTPGT delay is bits 24-31, in 2 ms units.
- mdelay ((temp >> 23) & 0x1fe);
+ mdelay ((val >> 23) & 0x1fe);
hcd->state = HC_STATE_RUNNING;
if (quirk_zfmicro(ohci)) {
@@ -1105,7 +1105,7 @@ static int __init ohci_hcd_mod_init(void)
set_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
#ifdef DEBUG
- ohci_debug_root = debugfs_create_dir("ohci", NULL);
+ ohci_debug_root = debugfs_create_dir("ohci", usb_debug_root);
if (!ohci_debug_root) {
retval = -ENOENT;
goto error_debug;
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index f9961b4c0da..d2ba04dd785 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -372,7 +372,7 @@ static int __devinit ohci_pci_start (struct usb_hcd *hcd)
#ifdef CONFIG_PM
-static int ohci_pci_suspend (struct usb_hcd *hcd, pm_message_t message)
+static int ohci_pci_suspend(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
unsigned long flags;
@@ -394,10 +394,6 @@ static int ohci_pci_suspend (struct usb_hcd *hcd, pm_message_t message)
ohci_writel(ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
(void)ohci_readl(ohci, &ohci->regs->intrdisable);
- /* make sure snapshot being resumed re-enumerates everything */
- if (message.event == PM_EVENT_PRETHAW)
- ohci_usb_reset(ohci);
-
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
bail:
spin_unlock_irqrestore (&ohci->lock, flags);
@@ -406,9 +402,14 @@ static int ohci_pci_suspend (struct usb_hcd *hcd, pm_message_t message)
}
-static int ohci_pci_resume (struct usb_hcd *hcd)
+static int ohci_pci_resume(struct usb_hcd *hcd, bool hibernated)
{
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+
+ /* Make sure resume from hibernation re-enumerates everything */
+ if (hibernated)
+ ohci_usb_reset(hcd_to_ohci(hcd));
+
ohci_finish_controller_resume(hcd);
return 0;
}
@@ -484,12 +485,11 @@ static struct pci_driver ohci_pci_driver = {
.probe = usb_hcd_pci_probe,
.remove = usb_hcd_pci_remove,
+ .shutdown = usb_hcd_pci_shutdown,
-#ifdef CONFIG_PM
- .suspend = usb_hcd_pci_suspend,
- .resume = usb_hcd_pci_resume,
+#ifdef CONFIG_PM_SLEEP
+ .driver = {
+ .pm = &usb_hcd_pci_pm_ops
+ },
#endif
-
- .shutdown = usb_hcd_pci_shutdown,
};
-
diff --git a/drivers/usb/host/ohci-ps3.c b/drivers/usb/host/ohci-ps3.c
index 3d191031732..1d56259c5db 100644
--- a/drivers/usb/host/ohci-ps3.c
+++ b/drivers/usb/host/ohci-ps3.c
@@ -162,7 +162,7 @@ static int ps3_ohci_probe(struct ps3_system_bus_device *dev)
dev_dbg(&dev->core, "%s:%d: virq %lu\n", __func__, __LINE__,
(unsigned long)virq);
- ps3_system_bus_set_driver_data(dev, hcd);
+ ps3_system_bus_set_drvdata(dev, hcd);
result = usb_add_hcd(hcd, virq, IRQF_DISABLED);
@@ -195,8 +195,7 @@ fail_start:
static int ps3_ohci_remove(struct ps3_system_bus_device *dev)
{
unsigned int tmp;
- struct usb_hcd *hcd =
- (struct usb_hcd *)ps3_system_bus_get_driver_data(dev);
+ struct usb_hcd *hcd = ps3_system_bus_get_drvdata(dev);
BUG_ON(!hcd);
@@ -208,7 +207,7 @@ static int ps3_ohci_remove(struct ps3_system_bus_device *dev)
ohci_shutdown(hcd);
usb_remove_hcd(hcd);
- ps3_system_bus_set_driver_data(dev, NULL);
+ ps3_system_bus_set_drvdata(dev, NULL);
BUG_ON(!hcd->regs);
iounmap(hcd->regs);
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 033c2846ce5..83b5f9cea85 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -15,6 +15,7 @@
#include <linux/delay.h>
#include <linux/acpi.h>
#include "pci-quirks.h"
+#include "xhci-ext-caps.h"
#define UHCI_USBLEGSUP 0xc0 /* legacy support */
@@ -341,7 +342,127 @@ static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev)
return;
}
+/*
+ * handshake - spin reading a register until handshake completes
+ * @ptr: address of hc register to be read
+ * @mask: bits to look at in result of read
+ * @done: value of those bits when handshake succeeds
+ * @wait_usec: timeout in microseconds
+ * @delay_usec: delay in microseconds to wait between polling
+ *
+ * Polls a register every delay_usec microseconds.
+ * Returns 0 when the mask bits have the value done.
+ * Returns -ETIMEDOUT if this condition is not true after
+ * wait_usec microseconds have passed.
+ */
+static int handshake(void __iomem *ptr, u32 mask, u32 done,
+ int wait_usec, int delay_usec)
+{
+ u32 result;
+
+ do {
+ result = readl(ptr);
+ result &= mask;
+ if (result == done)
+ return 0;
+ udelay(delay_usec);
+ wait_usec -= delay_usec;
+ } while (wait_usec > 0);
+ return -ETIMEDOUT;
+}
+
+/**
+ * PCI Quirks for xHCI.
+ *
+ * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS.
+ * It signals to the BIOS that the OS wants control of the host controller,
+ * and then waits 5 seconds for the BIOS to hand over control.
+ * If we timeout, assume the BIOS is broken and take control anyway.
+ */
+static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
+{
+ void __iomem *base;
+ int ext_cap_offset;
+ void __iomem *op_reg_base;
+ u32 val;
+ int timeout;
+
+ if (!mmio_resource_enabled(pdev, 0))
+ return;
+
+ base = ioremap_nocache(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (base == NULL)
+ return;
+ /*
+ * Find the Legacy Support Capability register -
+ * this is optional for xHCI host controllers.
+ */
+ ext_cap_offset = xhci_find_next_cap_offset(base, XHCI_HCC_PARAMS_OFFSET);
+ do {
+ if (!ext_cap_offset)
+ /* We've reached the end of the extended capabilities */
+ goto hc_init;
+ val = readl(base + ext_cap_offset);
+ if (XHCI_EXT_CAPS_ID(val) == XHCI_EXT_CAPS_LEGACY)
+ break;
+ ext_cap_offset = xhci_find_next_cap_offset(base, ext_cap_offset);
+ } while (1);
+
+ /* If the BIOS owns the HC, signal that the OS wants it, and wait */
+ if (val & XHCI_HC_BIOS_OWNED) {
+ writel(val & XHCI_HC_OS_OWNED, base + ext_cap_offset);
+
+ /* Wait for 5 seconds with 10 microsecond polling interval */
+ timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
+ 0, 5000, 10);
+
+ /* Assume a buggy BIOS and take HC ownership anyway */
+ if (timeout) {
+ dev_warn(&pdev->dev, "xHCI BIOS handoff failed"
+ " (BIOS bug ?) %08x\n", val);
+ writel(val & ~XHCI_HC_BIOS_OWNED, base + ext_cap_offset);
+ }
+ }
+
+ /* Disable any BIOS SMIs */
+ writel(XHCI_LEGACY_DISABLE_SMI,
+ base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
+
+hc_init:
+ op_reg_base = base + XHCI_HC_LENGTH(readl(base));
+
+ /* Wait for the host controller to be ready before writing any
+ * operational or runtime registers. Wait 5 seconds and no more.
+ */
+ timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0,
+ 5000, 10);
+ /* Assume a buggy HC and start HC initialization anyway */
+ if (timeout) {
+ val = readl(op_reg_base + XHCI_STS_OFFSET);
+ dev_warn(&pdev->dev,
+ "xHCI HW not ready after 5 sec (HC bug?) "
+ "status = 0x%x\n", val);
+ }
+
+ /* Send the halt and disable interrupts command */
+ val = readl(op_reg_base + XHCI_CMD_OFFSET);
+ val &= ~(XHCI_CMD_RUN | XHCI_IRQS);
+ writel(val, op_reg_base + XHCI_CMD_OFFSET);
+
+ /* Wait for the HC to halt - poll every 125 usec (one microframe). */
+ timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_HALT, 1,
+ XHCI_MAX_HALT_USEC, 125);
+ if (timeout) {
+ val = readl(op_reg_base + XHCI_STS_OFFSET);
+ dev_warn(&pdev->dev,
+ "xHCI HW did not halt within %d usec "
+ "status = 0x%x\n", XHCI_MAX_HALT_USEC, val);
+ }
+
+ iounmap(base);
+}
static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
{
@@ -351,5 +472,7 @@ static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
quirk_usb_handoff_ohci(pdev);
else if (pdev->class == PCI_CLASS_SERIAL_USB_EHCI)
quirk_usb_disable_ehci(pdev);
+ else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
+ quirk_usb_handoff_xhci(pdev);
}
DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff);
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index f1626e58c14..56976cc0352 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -46,31 +46,10 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Yoshihiro Shimoda");
MODULE_ALIAS("platform:r8a66597_hcd");
-#define DRIVER_VERSION "10 Apr 2008"
+#define DRIVER_VERSION "2009-05-26"
static const char hcd_name[] = "r8a66597_hcd";
-/* module parameters */
-#if !defined(CONFIG_SUPERH_ON_CHIP_R8A66597)
-static unsigned short clock = XTAL12;
-module_param(clock, ushort, 0644);
-MODULE_PARM_DESC(clock, "input clock: 48MHz=32768, 24MHz=16384, 12MHz=0 "
- "(default=0)");
-#endif
-
-static unsigned short vif = LDRV;
-module_param(vif, ushort, 0644);
-MODULE_PARM_DESC(vif, "input VIF: 3.3V=32768, 1.5V=0(default=32768)");
-
-static unsigned short endian;
-module_param(endian, ushort, 0644);
-MODULE_PARM_DESC(endian, "data endian: big=256, little=0 (default=0)");
-
-static unsigned short irq_sense = 0xff;
-module_param(irq_sense, ushort, 0644);
-MODULE_PARM_DESC(irq_sense, "IRQ sense: low level=32, falling edge=0 "
- "(default=32)");
-
static void packet_write(struct r8a66597 *r8a66597, u16 pipenum);
static int r8a66597_get_frame(struct usb_hcd *hcd);
@@ -136,7 +115,8 @@ static int r8a66597_clock_enable(struct r8a66597 *r8a66597)
}
} while ((tmp & USBE) != USBE);
r8a66597_bclr(r8a66597, USBE, SYSCFG0);
- r8a66597_mdfy(r8a66597, clock, XTAL, SYSCFG0);
+ r8a66597_mdfy(r8a66597, get_xtal_from_pdata(r8a66597->pdata), XTAL,
+ SYSCFG0);
i = 0;
r8a66597_bset(r8a66597, XCKE, SYSCFG0);
@@ -203,6 +183,9 @@ static void r8a66597_disable_port(struct r8a66597 *r8a66597, int port)
static int enable_controller(struct r8a66597 *r8a66597)
{
int ret, port;
+ u16 vif = r8a66597->pdata->vif ? LDRV : 0;
+ u16 irq_sense = r8a66597->irq_sense_low ? INTL : 0;
+ u16 endian = r8a66597->pdata->endian ? BIGEND : 0;
ret = r8a66597_clock_enable(r8a66597);
if (ret < 0)
@@ -2373,7 +2356,7 @@ static int __init_or_module r8a66597_remove(struct platform_device *pdev)
return 0;
}
-static int __init r8a66597_probe(struct platform_device *pdev)
+static int __devinit r8a66597_probe(struct platform_device *pdev)
{
#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK)
char clk_name[8];
@@ -2418,6 +2401,12 @@ static int __init r8a66597_probe(struct platform_device *pdev)
goto clean_up;
}
+ if (pdev->dev.platform_data == NULL) {
+ dev_err(&pdev->dev, "no platform data\n");
+ ret = -ENODEV;
+ goto clean_up;
+ }
+
/* initialize hcd */
hcd = usb_create_hcd(&r8a66597_hc_driver, &pdev->dev, (char *)hcd_name);
if (!hcd) {
@@ -2428,6 +2417,8 @@ static int __init r8a66597_probe(struct platform_device *pdev)
r8a66597 = hcd_to_r8a66597(hcd);
memset(r8a66597, 0, sizeof(struct r8a66597));
dev_set_drvdata(&pdev->dev, r8a66597);
+ r8a66597->pdata = pdev->dev.platform_data;
+ r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW;
#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK)
snprintf(clk_name, sizeof(clk_name), "usb%d", pdev->id);
@@ -2458,29 +2449,6 @@ static int __init r8a66597_probe(struct platform_device *pdev)
hcd->rsrc_start = res->start;
- /* irq_sense setting on cmdline takes precedence over resource
- * settings, so the introduction of irqflags in IRQ resourse
- * won't disturb existing setups */
- switch (irq_sense) {
- case INTL:
- irq_trigger = IRQF_TRIGGER_LOW;
- break;
- case 0:
- irq_trigger = IRQF_TRIGGER_FALLING;
- break;
- case 0xff:
- if (irq_trigger)
- irq_sense = (irq_trigger & IRQF_TRIGGER_LOW) ?
- INTL : 0;
- else {
- irq_sense = INTL;
- irq_trigger = IRQF_TRIGGER_LOW;
- }
- break;
- default:
- dev_err(&pdev->dev, "Unknown irq_sense value.\n");
- }
-
ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | irq_trigger);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to add hcd\n");
diff --git a/drivers/usb/host/r8a66597.h b/drivers/usb/host/r8a66597.h
index f49208f1bb7..d72680b433f 100644
--- a/drivers/usb/host/r8a66597.h
+++ b/drivers/usb/host/r8a66597.h
@@ -30,6 +30,8 @@
#include <linux/clk.h>
#endif
+#include <linux/usb/r8a66597.h>
+
#define SYSCFG0 0x00
#define SYSCFG1 0x02
#define SYSSTS0 0x04
@@ -488,6 +490,7 @@ struct r8a66597 {
#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK)
struct clk *clk;
#endif
+ struct r8a66597_platdata *pdata;
struct r8a66597_device device0;
struct r8a66597_root_hub root_hub[R8A66597_MAX_ROOT_HUB];
struct list_head pipe_queue[R8A66597_MAX_NUM_PIPE];
@@ -506,6 +509,7 @@ struct r8a66597 {
unsigned long child_connect_map[4];
unsigned bus_suspended:1;
+ unsigned irq_sense_low:1;
};
static inline struct r8a66597 *hcd_to_r8a66597(struct usb_hcd *hcd)
@@ -660,10 +664,36 @@ static inline void r8a66597_port_power(struct r8a66597 *r8a66597, int port,
{
unsigned long dvstctr_reg = get_dvstctr_reg(port);
- if (power)
- r8a66597_bset(r8a66597, VBOUT, dvstctr_reg);
- else
- r8a66597_bclr(r8a66597, VBOUT, dvstctr_reg);
+ if (r8a66597->pdata->port_power) {
+ r8a66597->pdata->port_power(port, power);
+ } else {
+ if (power)
+ r8a66597_bset(r8a66597, VBOUT, dvstctr_reg);
+ else
+ r8a66597_bclr(r8a66597, VBOUT, dvstctr_reg);
+ }
+}
+
+static inline u16 get_xtal_from_pdata(struct r8a66597_platdata *pdata)
+{
+ u16 clock = 0;
+
+ switch (pdata->xtal) {
+ case R8A66597_PLATDATA_XTAL_12MHZ:
+ clock = XTAL12;
+ break;
+ case R8A66597_PLATDATA_XTAL_24MHZ:
+ clock = XTAL24;
+ break;
+ case R8A66597_PLATDATA_XTAL_48MHZ:
+ clock = XTAL48;
+ break;
+ default:
+ printk(KERN_ERR "r8a66597: platdata clock is wrong.\n");
+ break;
+ }
+
+ return clock;
}
#define get_pipectr_addr(pipenum) (PIPE1CTR + (pipenum - 1) * 2)
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index cf5e4cf7ea4..274751b4409 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -769,7 +769,7 @@ static int uhci_rh_resume(struct usb_hcd *hcd)
return rc;
}
-static int uhci_pci_suspend(struct usb_hcd *hcd, pm_message_t message)
+static int uhci_pci_suspend(struct usb_hcd *hcd)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
int rc = 0;
@@ -795,10 +795,6 @@ static int uhci_pci_suspend(struct usb_hcd *hcd, pm_message_t message)
/* FIXME: Enable non-PME# remote wakeup? */
- /* make sure snapshot being resumed re-enumerates everything */
- if (message.event == PM_EVENT_PRETHAW)
- uhci_hc_died(uhci);
-
done_okay:
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
done:
@@ -806,7 +802,7 @@ done:
return rc;
}
-static int uhci_pci_resume(struct usb_hcd *hcd)
+static int uhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
@@ -820,6 +816,10 @@ static int uhci_pci_resume(struct usb_hcd *hcd)
spin_lock_irq(&uhci->lock);
+ /* Make sure resume from hibernation re-enumerates everything */
+ if (hibernated)
+ uhci_hc_died(uhci);
+
/* FIXME: Disable non-PME# remote wakeup? */
/* The firmware or a boot kernel may have changed the controller
@@ -940,10 +940,11 @@ static struct pci_driver uhci_pci_driver = {
.remove = usb_hcd_pci_remove,
.shutdown = uhci_shutdown,
-#ifdef CONFIG_PM
- .suspend = usb_hcd_pci_suspend,
- .resume = usb_hcd_pci_resume,
-#endif /* PM */
+#ifdef CONFIG_PM_SLEEP
+ .driver = {
+ .pm = &usb_hcd_pci_pm_ops
+ },
+#endif
};
static int __init uhci_hcd_init(void)
@@ -961,7 +962,7 @@ static int __init uhci_hcd_init(void)
errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL);
if (!errbuf)
goto errbuf_failed;
- uhci_debugfs_root = debugfs_create_dir("uhci", NULL);
+ uhci_debugfs_root = debugfs_create_dir("uhci", usb_debug_root);
if (!uhci_debugfs_root)
goto debug_failed;
}
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index 3e5807d14ff..64e57bfe236 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -260,7 +260,7 @@ static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
INIT_LIST_HEAD(&qh->node);
if (udev) { /* Normal QH */
- qh->type = hep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+ qh->type = usb_endpoint_type(&hep->desc);
if (qh->type != USB_ENDPOINT_XFER_ISOC) {
qh->dummy_td = uhci_alloc_td(uhci);
if (!qh->dummy_td) {
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
new file mode 100644
index 00000000000..2501c571f85
--- /dev/null
+++ b/drivers/usb/host/xhci-dbg.c
@@ -0,0 +1,485 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "xhci.h"
+
+#define XHCI_INIT_VALUE 0x0
+
+/* Add verbose debugging later, just print everything for now */
+
+void xhci_dbg_regs(struct xhci_hcd *xhci)
+{
+ u32 temp;
+
+ xhci_dbg(xhci, "// xHCI capability registers at %p:\n",
+ xhci->cap_regs);
+ temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
+ xhci_dbg(xhci, "// @%p = 0x%x (CAPLENGTH AND HCIVERSION)\n",
+ &xhci->cap_regs->hc_capbase, temp);
+ xhci_dbg(xhci, "// CAPLENGTH: 0x%x\n",
+ (unsigned int) HC_LENGTH(temp));
+#if 0
+ xhci_dbg(xhci, "// HCIVERSION: 0x%x\n",
+ (unsigned int) HC_VERSION(temp));
+#endif
+
+ xhci_dbg(xhci, "// xHCI operational registers at %p:\n", xhci->op_regs);
+
+ temp = xhci_readl(xhci, &xhci->cap_regs->run_regs_off);
+ xhci_dbg(xhci, "// @%p = 0x%x RTSOFF\n",
+ &xhci->cap_regs->run_regs_off,
+ (unsigned int) temp & RTSOFF_MASK);
+ xhci_dbg(xhci, "// xHCI runtime registers at %p:\n", xhci->run_regs);
+
+ temp = xhci_readl(xhci, &xhci->cap_regs->db_off);
+ xhci_dbg(xhci, "// @%p = 0x%x DBOFF\n", &xhci->cap_regs->db_off, temp);
+ xhci_dbg(xhci, "// Doorbell array at %p:\n", xhci->dba);
+}
+
+static void xhci_print_cap_regs(struct xhci_hcd *xhci)
+{
+ u32 temp;
+
+ xhci_dbg(xhci, "xHCI capability registers at %p:\n", xhci->cap_regs);
+
+ temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
+ xhci_dbg(xhci, "CAPLENGTH AND HCIVERSION 0x%x:\n",
+ (unsigned int) temp);
+ xhci_dbg(xhci, "CAPLENGTH: 0x%x\n",
+ (unsigned int) HC_LENGTH(temp));
+ xhci_dbg(xhci, "HCIVERSION: 0x%x\n",
+ (unsigned int) HC_VERSION(temp));
+
+ temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
+ xhci_dbg(xhci, "HCSPARAMS 1: 0x%x\n",
+ (unsigned int) temp);
+ xhci_dbg(xhci, " Max device slots: %u\n",
+ (unsigned int) HCS_MAX_SLOTS(temp));
+ xhci_dbg(xhci, " Max interrupters: %u\n",
+ (unsigned int) HCS_MAX_INTRS(temp));
+ xhci_dbg(xhci, " Max ports: %u\n",
+ (unsigned int) HCS_MAX_PORTS(temp));
+
+ temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
+ xhci_dbg(xhci, "HCSPARAMS 2: 0x%x\n",
+ (unsigned int) temp);
+ xhci_dbg(xhci, " Isoc scheduling threshold: %u\n",
+ (unsigned int) HCS_IST(temp));
+ xhci_dbg(xhci, " Maximum allowed segments in event ring: %u\n",
+ (unsigned int) HCS_ERST_MAX(temp));
+
+ temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
+ xhci_dbg(xhci, "HCSPARAMS 3 0x%x:\n",
+ (unsigned int) temp);
+ xhci_dbg(xhci, " Worst case U1 device exit latency: %u\n",
+ (unsigned int) HCS_U1_LATENCY(temp));
+ xhci_dbg(xhci, " Worst case U2 device exit latency: %u\n",
+ (unsigned int) HCS_U2_LATENCY(temp));
+
+ temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
+ xhci_dbg(xhci, "HCC PARAMS 0x%x:\n", (unsigned int) temp);
+ xhci_dbg(xhci, " HC generates %s bit addresses\n",
+ HCC_64BIT_ADDR(temp) ? "64" : "32");
+ /* FIXME */
+ xhci_dbg(xhci, " FIXME: more HCCPARAMS debugging\n");
+
+ temp = xhci_readl(xhci, &xhci->cap_regs->run_regs_off);
+ xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK);
+}
+
+static void xhci_print_command_reg(struct xhci_hcd *xhci)
+{
+ u32 temp;
+
+ temp = xhci_readl(xhci, &xhci->op_regs->command);
+ xhci_dbg(xhci, "USBCMD 0x%x:\n", temp);
+ xhci_dbg(xhci, " HC is %s\n",
+ (temp & CMD_RUN) ? "running" : "being stopped");
+ xhci_dbg(xhci, " HC has %sfinished hard reset\n",
+ (temp & CMD_RESET) ? "not " : "");
+ xhci_dbg(xhci, " Event Interrupts %s\n",
+ (temp & CMD_EIE) ? "enabled " : "disabled");
+ xhci_dbg(xhci, " Host System Error Interrupts %s\n",
+ (temp & CMD_EIE) ? "enabled " : "disabled");
+ xhci_dbg(xhci, " HC has %sfinished light reset\n",
+ (temp & CMD_LRESET) ? "not " : "");
+}
+
+static void xhci_print_status(struct xhci_hcd *xhci)
+{
+ u32 temp;
+
+ temp = xhci_readl(xhci, &xhci->op_regs->status);
+ xhci_dbg(xhci, "USBSTS 0x%x:\n", temp);
+ xhci_dbg(xhci, " Event ring is %sempty\n",
+ (temp & STS_EINT) ? "not " : "");
+ xhci_dbg(xhci, " %sHost System Error\n",
+ (temp & STS_FATAL) ? "WARNING: " : "No ");
+ xhci_dbg(xhci, " HC is %s\n",
+ (temp & STS_HALT) ? "halted" : "running");
+}
+
+static void xhci_print_op_regs(struct xhci_hcd *xhci)
+{
+ xhci_dbg(xhci, "xHCI operational registers at %p:\n", xhci->op_regs);
+ xhci_print_command_reg(xhci);
+ xhci_print_status(xhci);
+}
+
+static void xhci_print_ports(struct xhci_hcd *xhci)
+{
+ u32 __iomem *addr;
+ int i, j;
+ int ports;
+ char *names[NUM_PORT_REGS] = {
+ "status",
+ "power",
+ "link",
+ "reserved",
+ };
+
+ ports = HCS_MAX_PORTS(xhci->hcs_params1);
+ addr = &xhci->op_regs->port_status_base;
+ for (i = 0; i < ports; i++) {
+ for (j = 0; j < NUM_PORT_REGS; ++j) {
+ xhci_dbg(xhci, "%p port %s reg = 0x%x\n",
+ addr, names[j],
+ (unsigned int) xhci_readl(xhci, addr));
+ addr++;
+ }
+ }
+}
+
+void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num)
+{
+ void *addr;
+ u32 temp;
+
+ addr = &ir_set->irq_pending;
+ temp = xhci_readl(xhci, addr);
+ if (temp == XHCI_INIT_VALUE)
+ return;
+
+ xhci_dbg(xhci, " %p: ir_set[%i]\n", ir_set, set_num);
+
+ xhci_dbg(xhci, " %p: ir_set.pending = 0x%x\n", addr,
+ (unsigned int)temp);
+
+ addr = &ir_set->irq_control;
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, " %p: ir_set.control = 0x%x\n", addr,
+ (unsigned int)temp);
+
+ addr = &ir_set->erst_size;
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, " %p: ir_set.erst_size = 0x%x\n", addr,
+ (unsigned int)temp);
+
+ addr = &ir_set->rsvd;
+ temp = xhci_readl(xhci, addr);
+ if (temp != XHCI_INIT_VALUE)
+ xhci_dbg(xhci, " WARN: %p: ir_set.rsvd = 0x%x\n",
+ addr, (unsigned int)temp);
+
+ addr = &ir_set->erst_base[0];
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, " %p: ir_set.erst_base[0] = 0x%x\n",
+ addr, (unsigned int) temp);
+
+ addr = &ir_set->erst_base[1];
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, " %p: ir_set.erst_base[1] = 0x%x\n",
+ addr, (unsigned int) temp);
+
+ addr = &ir_set->erst_dequeue[0];
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, " %p: ir_set.erst_dequeue[0] = 0x%x\n",
+ addr, (unsigned int) temp);
+
+ addr = &ir_set->erst_dequeue[1];
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, " %p: ir_set.erst_dequeue[1] = 0x%x\n",
+ addr, (unsigned int) temp);
+}
+
+void xhci_print_run_regs(struct xhci_hcd *xhci)
+{
+ u32 temp;
+ int i;
+
+ xhci_dbg(xhci, "xHCI runtime registers at %p:\n", xhci->run_regs);
+ temp = xhci_readl(xhci, &xhci->run_regs->microframe_index);
+ xhci_dbg(xhci, " %p: Microframe index = 0x%x\n",
+ &xhci->run_regs->microframe_index,
+ (unsigned int) temp);
+ for (i = 0; i < 7; ++i) {
+ temp = xhci_readl(xhci, &xhci->run_regs->rsvd[i]);
+ if (temp != XHCI_INIT_VALUE)
+ xhci_dbg(xhci, " WARN: %p: Rsvd[%i] = 0x%x\n",
+ &xhci->run_regs->rsvd[i],
+ i, (unsigned int) temp);
+ }
+}
+
+void xhci_print_registers(struct xhci_hcd *xhci)
+{
+ xhci_print_cap_regs(xhci);
+ xhci_print_op_regs(xhci);
+ xhci_print_ports(xhci);
+}
+
+void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb)
+{
+ int i;
+ for (i = 0; i < 4; ++i)
+ xhci_dbg(xhci, "Offset 0x%x = 0x%x\n",
+ i*4, trb->generic.field[i]);
+}
+
+/**
+ * Debug a transfer request block (TRB).
+ */
+void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
+{
+ u64 address;
+ u32 type = xhci_readl(xhci, &trb->link.control) & TRB_TYPE_BITMASK;
+
+ switch (type) {
+ case TRB_TYPE(TRB_LINK):
+ xhci_dbg(xhci, "Link TRB:\n");
+ xhci_print_trb_offsets(xhci, trb);
+
+ address = trb->link.segment_ptr[0] +
+ (((u64) trb->link.segment_ptr[1]) << 32);
+ xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address);
+
+ xhci_dbg(xhci, "Interrupter target = 0x%x\n",
+ GET_INTR_TARGET(trb->link.intr_target));
+ xhci_dbg(xhci, "Cycle bit = %u\n",
+ (unsigned int) (trb->link.control & TRB_CYCLE));
+ xhci_dbg(xhci, "Toggle cycle bit = %u\n",
+ (unsigned int) (trb->link.control & LINK_TOGGLE));
+ xhci_dbg(xhci, "No Snoop bit = %u\n",
+ (unsigned int) (trb->link.control & TRB_NO_SNOOP));
+ break;
+ case TRB_TYPE(TRB_TRANSFER):
+ address = trb->trans_event.buffer[0] +
+ (((u64) trb->trans_event.buffer[1]) << 32);
+ /*
+ * FIXME: look at flags to figure out if it's an address or if
+ * the data is directly in the buffer field.
+ */
+ xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address);
+ break;
+ case TRB_TYPE(TRB_COMPLETION):
+ address = trb->event_cmd.cmd_trb[0] +
+ (((u64) trb->event_cmd.cmd_trb[1]) << 32);
+ xhci_dbg(xhci, "Command TRB pointer = %llu\n", address);
+ xhci_dbg(xhci, "Completion status = %u\n",
+ (unsigned int) GET_COMP_CODE(trb->event_cmd.status));
+ xhci_dbg(xhci, "Flags = 0x%x\n", (unsigned int) trb->event_cmd.flags);
+ break;
+ default:
+ xhci_dbg(xhci, "Unknown TRB with TRB type ID %u\n",
+ (unsigned int) type>>10);
+ xhci_print_trb_offsets(xhci, trb);
+ break;
+ }
+}
+
+/**
+ * Debug a segment with an xHCI ring.
+ *
+ * @return The Link TRB of the segment, or NULL if there is no Link TRB
+ * (which is a bug, since all segments must have a Link TRB).
+ *
+ * Prints out all TRBs in the segment, even those after the Link TRB.
+ *
+ * XXX: should we print out TRBs that the HC owns? As long as we don't
+ * write, that should be fine... We shouldn't expect that the memory pointed to
+ * by the TRB is valid at all. Do we care about ones the HC owns? Probably,
+ * for HC debugging.
+ */
+void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
+{
+ int i;
+ u32 addr = (u32) seg->dma;
+ union xhci_trb *trb = seg->trbs;
+
+ for (i = 0; i < TRBS_PER_SEGMENT; ++i) {
+ trb = &seg->trbs[i];
+ xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr,
+ (unsigned int) trb->link.segment_ptr[0],
+ (unsigned int) trb->link.segment_ptr[1],
+ (unsigned int) trb->link.intr_target,
+ (unsigned int) trb->link.control);
+ addr += sizeof(*trb);
+ }
+}
+
+void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring)
+{
+ xhci_dbg(xhci, "Ring deq = %p (virt), 0x%llx (dma)\n",
+ ring->dequeue,
+ (unsigned long long)xhci_trb_virt_to_dma(ring->deq_seg,
+ ring->dequeue));
+ xhci_dbg(xhci, "Ring deq updated %u times\n",
+ ring->deq_updates);
+ xhci_dbg(xhci, "Ring enq = %p (virt), 0x%llx (dma)\n",
+ ring->enqueue,
+ (unsigned long long)xhci_trb_virt_to_dma(ring->enq_seg,
+ ring->enqueue));
+ xhci_dbg(xhci, "Ring enq updated %u times\n",
+ ring->enq_updates);
+}
+
+/**
+ * Debugging for an xHCI ring, which is a queue broken into multiple segments.
+ *
+ * Print out each segment in the ring. Check that the DMA address in
+ * each link segment actually matches the segment's stored DMA address.
+ * Check that the link end bit is only set at the end of the ring.
+ * Check that the dequeue and enqueue pointers point to real data in this ring
+ * (not some other ring).
+ */
+void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring)
+{
+ /* FIXME: Throw an error if any segment doesn't have a Link TRB */
+ struct xhci_segment *seg;
+ struct xhci_segment *first_seg = ring->first_seg;
+ xhci_debug_segment(xhci, first_seg);
+
+ if (!ring->enq_updates && !ring->deq_updates) {
+ xhci_dbg(xhci, " Ring has not been updated\n");
+ return;
+ }
+ for (seg = first_seg->next; seg != first_seg; seg = seg->next)
+ xhci_debug_segment(xhci, seg);
+}
+
+void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
+{
+ u32 addr = (u32) erst->erst_dma_addr;
+ int i;
+ struct xhci_erst_entry *entry;
+
+ for (i = 0; i < erst->num_entries; ++i) {
+ entry = &erst->entries[i];
+ xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n",
+ (unsigned int) addr,
+ (unsigned int) entry->seg_addr[0],
+ (unsigned int) entry->seg_addr[1],
+ (unsigned int) entry->seg_size,
+ (unsigned int) entry->rsvd);
+ addr += sizeof(*entry);
+ }
+}
+
+void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci)
+{
+ u32 val;
+
+ val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]);
+ xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = 0x%x\n", val);
+ val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[1]);
+ xhci_dbg(xhci, "// xHC command ring deq ptr high bits = 0x%x\n", val);
+}
+
+void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep)
+{
+ int i, j;
+ int last_ep_ctx = 31;
+ /* Fields are 32 bits wide, DMA addresses are in bytes */
+ int field_size = 32 / 8;
+
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n",
+ &ctx->drop_flags, (unsigned long long)dma,
+ ctx->drop_flags);
+ dma += field_size;
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
+ &ctx->add_flags, (unsigned long long)dma,
+ ctx->add_flags);
+ dma += field_size;
+ for (i = 0; i > 6; ++i) {
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
+ &ctx->rsvd[i], (unsigned long long)dma,
+ ctx->rsvd[i], i);
+ dma += field_size;
+ }
+
+ xhci_dbg(xhci, "Slot Context:\n");
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n",
+ &ctx->slot.dev_info,
+ (unsigned long long)dma, ctx->slot.dev_info);
+ dma += field_size;
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n",
+ &ctx->slot.dev_info2,
+ (unsigned long long)dma, ctx->slot.dev_info2);
+ dma += field_size;
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n",
+ &ctx->slot.tt_info,
+ (unsigned long long)dma, ctx->slot.tt_info);
+ dma += field_size;
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n",
+ &ctx->slot.dev_state,
+ (unsigned long long)dma, ctx->slot.dev_state);
+ dma += field_size;
+ for (i = 0; i > 4; ++i) {
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
+ &ctx->slot.reserved[i], (unsigned long long)dma,
+ ctx->slot.reserved[i], i);
+ dma += field_size;
+ }
+
+ if (last_ep < 31)
+ last_ep_ctx = last_ep + 1;
+ for (i = 0; i < last_ep_ctx; ++i) {
+ xhci_dbg(xhci, "Endpoint %02d Context:\n", i);
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",
+ &ctx->ep[i].ep_info,
+ (unsigned long long)dma, ctx->ep[i].ep_info);
+ dma += field_size;
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n",
+ &ctx->ep[i].ep_info2,
+ (unsigned long long)dma, ctx->ep[i].ep_info2);
+ dma += field_size;
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[0]\n",
+ &ctx->ep[i].deq[0],
+ (unsigned long long)dma, ctx->ep[i].deq[0]);
+ dma += field_size;
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[1]\n",
+ &ctx->ep[i].deq[1],
+ (unsigned long long)dma, ctx->ep[i].deq[1]);
+ dma += field_size;
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n",
+ &ctx->ep[i].tx_info,
+ (unsigned long long)dma, ctx->ep[i].tx_info);
+ dma += field_size;
+ for (j = 0; j < 3; ++j) {
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
+ &ctx->ep[i].reserved[j],
+ (unsigned long long)dma,
+ ctx->ep[i].reserved[j], j);
+ dma += field_size;
+ }
+ }
+}
diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
new file mode 100644
index 00000000000..ecc131c3fe3
--- /dev/null
+++ b/drivers/usb/host/xhci-ext-caps.h
@@ -0,0 +1,145 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+/* Up to 16 microframes to halt an HC - one microframe is 125 microsectonds */
+#define XHCI_MAX_HALT_USEC (16*125)
+/* HC not running - set to 1 when run/stop bit is cleared. */
+#define XHCI_STS_HALT (1<<0)
+
+/* HCCPARAMS offset from PCI base address */
+#define XHCI_HCC_PARAMS_OFFSET 0x10
+/* HCCPARAMS contains the first extended capability pointer */
+#define XHCI_HCC_EXT_CAPS(p) (((p)>>16)&0xffff)
+
+/* Command and Status registers offset from the Operational Registers address */
+#define XHCI_CMD_OFFSET 0x00
+#define XHCI_STS_OFFSET 0x04
+
+#define XHCI_MAX_EXT_CAPS 50
+
+/* Capability Register */
+/* bits 7:0 - how long is the Capabilities register */
+#define XHCI_HC_LENGTH(p) (((p)>>00)&0x00ff)
+
+/* Extended capability register fields */
+#define XHCI_EXT_CAPS_ID(p) (((p)>>0)&0xff)
+#define XHCI_EXT_CAPS_NEXT(p) (((p)>>8)&0xff)
+#define XHCI_EXT_CAPS_VAL(p) ((p)>>16)
+/* Extended capability IDs - ID 0 reserved */
+#define XHCI_EXT_CAPS_LEGACY 1
+#define XHCI_EXT_CAPS_PROTOCOL 2
+#define XHCI_EXT_CAPS_PM 3
+#define XHCI_EXT_CAPS_VIRT 4
+#define XHCI_EXT_CAPS_ROUTE 5
+/* IDs 6-9 reserved */
+#define XHCI_EXT_CAPS_DEBUG 10
+/* USB Legacy Support Capability - section 7.1.1 */
+#define XHCI_HC_BIOS_OWNED (1 << 16)
+#define XHCI_HC_OS_OWNED (1 << 24)
+
+/* USB Legacy Support Capability - section 7.1.1 */
+/* Add this offset, plus the value of xECP in HCCPARAMS to the base address */
+#define XHCI_LEGACY_SUPPORT_OFFSET (0x00)
+
+/* USB Legacy Support Control and Status Register - section 7.1.2 */
+/* Add this offset, plus the value of xECP in HCCPARAMS to the base address */
+#define XHCI_LEGACY_CONTROL_OFFSET (0x04)
+/* bits 1:2, 5:12, and 17:19 need to be preserved; bits 21:28 should be zero */
+#define XHCI_LEGACY_DISABLE_SMI ((0x3 << 1) + (0xff << 5) + (0x7 << 17))
+
+/* command register values to disable interrupts and halt the HC */
+/* start/stop HC execution - do not write unless HC is halted*/
+#define XHCI_CMD_RUN (1 << 0)
+/* Event Interrupt Enable - get irq when EINT bit is set in USBSTS register */
+#define XHCI_CMD_EIE (1 << 2)
+/* Host System Error Interrupt Enable - get irq when HSEIE bit set in USBSTS */
+#define XHCI_CMD_HSEIE (1 << 3)
+/* Enable Wrap Event - '1' means xHC generates an event when MFINDEX wraps. */
+#define XHCI_CMD_EWE (1 << 10)
+
+#define XHCI_IRQS (XHCI_CMD_EIE | XHCI_CMD_HSEIE | XHCI_CMD_EWE)
+
+/* true: Controller Not Ready to accept doorbell or op reg writes after reset */
+#define XHCI_STS_CNR (1 << 11)
+
+#include <linux/io.h>
+
+/**
+ * Return the next extended capability pointer register.
+ *
+ * @base PCI register base address.
+ *
+ * @ext_offset Offset of the 32-bit register that contains the extended
+ * capabilites pointer. If searching for the first extended capability, pass
+ * in XHCI_HCC_PARAMS_OFFSET. If searching for the next extended capability,
+ * pass in the offset of the current extended capability register.
+ *
+ * Returns 0 if there is no next extended capability register or returns the register offset
+ * from the PCI registers base address.
+ */
+static inline int xhci_find_next_cap_offset(void __iomem *base, int ext_offset)
+{
+ u32 next;
+
+ next = readl(base + ext_offset);
+
+ if (ext_offset == XHCI_HCC_PARAMS_OFFSET)
+ /* Find the first extended capability */
+ next = XHCI_HCC_EXT_CAPS(next);
+ else
+ /* Find the next extended capability */
+ next = XHCI_EXT_CAPS_NEXT(next);
+ if (!next)
+ return 0;
+ /*
+ * Address calculation from offset of extended capabilities
+ * (or HCCPARAMS) register - see section 5.3.6 and section 7.
+ */
+ return ext_offset + (next << 2);
+}
+
+/**
+ * Find the offset of the extended capabilities with capability ID id.
+ *
+ * @base PCI MMIO registers base address.
+ * @ext_offset Offset from base of the first extended capability to look at,
+ * or the address of HCCPARAMS.
+ * @id Extended capability ID to search for.
+ *
+ * This uses an arbitrary limit of XHCI_MAX_EXT_CAPS extended capabilities
+ * to make sure that the list doesn't contain a loop.
+ */
+static inline int xhci_find_ext_cap_by_id(void __iomem *base, int ext_offset, int id)
+{
+ u32 val;
+ int limit = XHCI_MAX_EXT_CAPS;
+
+ while (ext_offset && limit > 0) {
+ val = readl(base + ext_offset);
+ if (XHCI_EXT_CAPS_ID(val) == id)
+ break;
+ ext_offset = xhci_find_next_cap_offset(base, ext_offset);
+ limit--;
+ }
+ if (limit > 0)
+ return ext_offset;
+ return 0;
+}
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
new file mode 100644
index 00000000000..dba3e07ccd0
--- /dev/null
+++ b/drivers/usb/host/xhci-hcd.c
@@ -0,0 +1,1274 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/irq.h>
+#include <linux/module.h>
+
+#include "xhci.h"
+
+#define DRIVER_AUTHOR "Sarah Sharp"
+#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
+
+/* TODO: copied from ehci-hcd.c - can this be refactored? */
+/*
+ * handshake - spin reading hc until handshake completes or fails
+ * @ptr: address of hc register to be read
+ * @mask: bits to look at in result of read
+ * @done: value of those bits when handshake succeeds
+ * @usec: timeout in microseconds
+ *
+ * Returns negative errno, or zero on success
+ *
+ * Success happens when the "mask" bits have the specified value (hardware
+ * handshake done). There are two failure modes: "usec" have passed (major
+ * hardware flakeout), or the register reads as all-ones (hardware removed).
+ */
+static int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
+ u32 mask, u32 done, int usec)
+{
+ u32 result;
+
+ do {
+ result = xhci_readl(xhci, ptr);
+ if (result == ~(u32)0) /* card removed */
+ return -ENODEV;
+ result &= mask;
+ if (result == done)
+ return 0;
+ udelay(1);
+ usec--;
+ } while (usec > 0);
+ return -ETIMEDOUT;
+}
+
+/*
+ * Force HC into halt state.
+ *
+ * Disable any IRQs and clear the run/stop bit.
+ * HC will complete any current and actively pipelined transactions, and
+ * should halt within 16 microframes of the run/stop bit being cleared.
+ * Read HC Halted bit in the status register to see when the HC is finished.
+ * XXX: shouldn't we set HC_STATE_HALT here somewhere?
+ */
+int xhci_halt(struct xhci_hcd *xhci)
+{
+ u32 halted;
+ u32 cmd;
+ u32 mask;
+
+ xhci_dbg(xhci, "// Halt the HC\n");
+ /* Disable all interrupts from the host controller */
+ mask = ~(XHCI_IRQS);
+ halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
+ if (!halted)
+ mask &= ~CMD_RUN;
+
+ cmd = xhci_readl(xhci, &xhci->op_regs->command);
+ cmd &= mask;
+ xhci_writel(xhci, cmd, &xhci->op_regs->command);
+
+ return handshake(xhci, &xhci->op_regs->status,
+ STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
+}
+
+/*
+ * Reset a halted HC, and set the internal HC state to HC_STATE_HALT.
+ *
+ * This resets pipelines, timers, counters, state machines, etc.
+ * Transactions will be terminated immediately, and operational registers
+ * will be set to their defaults.
+ */
+int xhci_reset(struct xhci_hcd *xhci)
+{
+ u32 command;
+ u32 state;
+
+ state = xhci_readl(xhci, &xhci->op_regs->status);
+ BUG_ON((state & STS_HALT) == 0);
+
+ xhci_dbg(xhci, "// Reset the HC\n");
+ command = xhci_readl(xhci, &xhci->op_regs->command);
+ command |= CMD_RESET;
+ xhci_writel(xhci, command, &xhci->op_regs->command);
+ /* XXX: Why does EHCI set this here? Shouldn't other code do this? */
+ xhci_to_hcd(xhci)->state = HC_STATE_HALT;
+
+ return handshake(xhci, &xhci->op_regs->command, CMD_RESET, 0, 250 * 1000);
+}
+
+/*
+ * Stop the HC from processing the endpoint queues.
+ */
+static void xhci_quiesce(struct xhci_hcd *xhci)
+{
+ /*
+ * Queues are per endpoint, so we need to disable an endpoint or slot.
+ *
+ * To disable a slot, we need to insert a disable slot command on the
+ * command ring and ring the doorbell. This will also free any internal
+ * resources associated with the slot (which might not be what we want).
+ *
+ * A Release Endpoint command sounds better - doesn't free internal HC
+ * memory, but removes the endpoints from the schedule and releases the
+ * bandwidth, disables the doorbells, and clears the endpoint enable
+ * flag. Usually used prior to a set interface command.
+ *
+ * TODO: Implement after command ring code is done.
+ */
+ BUG_ON(!HC_IS_RUNNING(xhci_to_hcd(xhci)->state));
+ xhci_dbg(xhci, "Finished quiescing -- code not written yet\n");
+}
+
+#if 0
+/* Set up MSI-X table for entry 0 (may claim other entries later) */
+static int xhci_setup_msix(struct xhci_hcd *xhci)
+{
+ int ret;
+ struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+
+ xhci->msix_count = 0;
+ /* XXX: did I do this right? ixgbe does kcalloc for more than one */
+ xhci->msix_entries = kmalloc(sizeof(struct msix_entry), GFP_KERNEL);
+ if (!xhci->msix_entries) {
+ xhci_err(xhci, "Failed to allocate MSI-X entries\n");
+ return -ENOMEM;
+ }
+ xhci->msix_entries[0].entry = 0;
+
+ ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
+ if (ret) {
+ xhci_err(xhci, "Failed to enable MSI-X\n");
+ goto free_entries;
+ }
+
+ /*
+ * Pass the xhci pointer value as the request_irq "cookie".
+ * If more irqs are added, this will need to be unique for each one.
+ */
+ ret = request_irq(xhci->msix_entries[0].vector, &xhci_irq, 0,
+ "xHCI", xhci_to_hcd(xhci));
+ if (ret) {
+ xhci_err(xhci, "Failed to allocate MSI-X interrupt\n");
+ goto disable_msix;
+ }
+ xhci_dbg(xhci, "Finished setting up MSI-X\n");
+ return 0;
+
+disable_msix:
+ pci_disable_msix(pdev);
+free_entries:
+ kfree(xhci->msix_entries);
+ xhci->msix_entries = NULL;
+ return ret;
+}
+
+/* XXX: code duplication; can xhci_setup_msix call this? */
+/* Free any IRQs and disable MSI-X */
+static void xhci_cleanup_msix(struct xhci_hcd *xhci)
+{
+ struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ if (!xhci->msix_entries)
+ return;
+
+ free_irq(xhci->msix_entries[0].vector, xhci);
+ pci_disable_msix(pdev);
+ kfree(xhci->msix_entries);
+ xhci->msix_entries = NULL;
+ xhci_dbg(xhci, "Finished cleaning up MSI-X\n");
+}
+#endif
+
+/*
+ * Initialize memory for HCD and xHC (one-time init).
+ *
+ * Program the PAGESIZE register, initialize the device context array, create
+ * device contexts (?), set up a command ring segment (or two?), create event
+ * ring (one for now).
+ */
+int xhci_init(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ int retval = 0;
+
+ xhci_dbg(xhci, "xhci_init\n");
+ spin_lock_init(&xhci->lock);
+ retval = xhci_mem_init(xhci, GFP_KERNEL);
+ xhci_dbg(xhci, "Finished xhci_init\n");
+
+ return retval;
+}
+
+/*
+ * Called in interrupt context when there might be work
+ * queued on the event ring
+ *
+ * xhci->lock must be held by caller.
+ */
+static void xhci_work(struct xhci_hcd *xhci)
+{
+ u32 temp;
+
+ /*
+ * Clear the op reg interrupt status first,
+ * so we can receive interrupts from other MSI-X interrupters.
+ * Write 1 to clear the interrupt status.
+ */
+ temp = xhci_readl(xhci, &xhci->op_regs->status);
+ temp |= STS_EINT;
+ xhci_writel(xhci, temp, &xhci->op_regs->status);
+ /* FIXME when MSI-X is supported and there are multiple vectors */
+ /* Clear the MSI-X event interrupt status */
+
+ /* Acknowledge the interrupt */
+ temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+ temp |= 0x3;
+ xhci_writel(xhci, temp, &xhci->ir_set->irq_pending);
+ /* Flush posted writes */
+ xhci_readl(xhci, &xhci->ir_set->irq_pending);
+
+ /* FIXME this should be a delayed service routine that clears the EHB */
+ xhci_handle_event(xhci);
+
+ /* Clear the event handler busy flag; the event ring should be empty. */
+ temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
+ xhci_writel(xhci, temp & ~ERST_EHB, &xhci->ir_set->erst_dequeue[0]);
+ /* Flush posted writes -- FIXME is this necessary? */
+ xhci_readl(xhci, &xhci->ir_set->irq_pending);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * xHCI spec says we can get an interrupt, and if the HC has an error condition,
+ * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
+ * indicators of an event TRB error, but we check the status *first* to be safe.
+ */
+irqreturn_t xhci_irq(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ u32 temp, temp2;
+
+ spin_lock(&xhci->lock);
+ /* Check if the xHC generated the interrupt, or the irq is shared */
+ temp = xhci_readl(xhci, &xhci->op_regs->status);
+ temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+ if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) {
+ spin_unlock(&xhci->lock);
+ return IRQ_NONE;
+ }
+
+ if (temp & STS_FATAL) {
+ xhci_warn(xhci, "WARNING: Host System Error\n");
+ xhci_halt(xhci);
+ xhci_to_hcd(xhci)->state = HC_STATE_HALT;
+ spin_unlock(&xhci->lock);
+ return -ESHUTDOWN;
+ }
+
+ xhci_work(xhci);
+ spin_unlock(&xhci->lock);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
+void xhci_event_ring_work(unsigned long arg)
+{
+ unsigned long flags;
+ int temp;
+ struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
+ int i, j;
+
+ xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ temp = xhci_readl(xhci, &xhci->op_regs->status);
+ xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
+ temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+ xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
+ xhci_dbg(xhci, "No-op commands handled = %d\n", xhci->noops_handled);
+ xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
+ xhci->error_bitmask = 0;
+ xhci_dbg(xhci, "Event ring:\n");
+ xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
+ xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
+ temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
+ temp &= ERST_PTR_MASK;
+ xhci_dbg(xhci, "ERST deq = 0x%x\n", temp);
+ xhci_dbg(xhci, "Command ring:\n");
+ xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
+ xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
+ xhci_dbg_cmd_ptrs(xhci);
+ for (i = 0; i < MAX_HC_SLOTS; ++i) {
+ if (xhci->devs[i]) {
+ for (j = 0; j < 31; ++j) {
+ if (xhci->devs[i]->ep_rings[j]) {
+ xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j);
+ xhci_debug_segment(xhci, xhci->devs[i]->ep_rings[j]->deq_seg);
+ }
+ }
+ }
+ }
+
+ if (xhci->noops_submitted != NUM_TEST_NOOPS)
+ if (xhci_setup_one_noop(xhci))
+ xhci_ring_cmd_db(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ if (!xhci->zombie)
+ mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
+ else
+ xhci_dbg(xhci, "Quit polling the event ring.\n");
+}
+#endif
+
+/*
+ * Start the HC after it was halted.
+ *
+ * This function is called by the USB core when the HC driver is added.
+ * Its opposite is xhci_stop().
+ *
+ * xhci_init() must be called once before this function can be called.
+ * Reset the HC, enable device slot contexts, program DCBAAP, and
+ * set command ring pointer and event ring pointer.
+ *
+ * Setup MSI-X vectors and enable interrupts.
+ */
+int xhci_run(struct usb_hcd *hcd)
+{
+ u32 temp;
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ void (*doorbell)(struct xhci_hcd *) = NULL;
+
+ hcd->uses_new_polling = 1;
+ hcd->poll_rh = 0;
+
+ xhci_dbg(xhci, "xhci_run\n");
+#if 0 /* FIXME: MSI not setup yet */
+ /* Do this at the very last minute */
+ ret = xhci_setup_msix(xhci);
+ if (!ret)
+ return ret;
+
+ return -ENOSYS;
+#endif
+#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
+ init_timer(&xhci->event_ring_timer);
+ xhci->event_ring_timer.data = (unsigned long) xhci;
+ xhci->event_ring_timer.function = xhci_event_ring_work;
+ /* Poll the event ring */
+ xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
+ xhci->zombie = 0;
+ xhci_dbg(xhci, "Setting event ring polling timer\n");
+ add_timer(&xhci->event_ring_timer);
+#endif
+
+ xhci_dbg(xhci, "// Set the interrupt modulation register\n");
+ temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
+ temp &= ~ER_IRQ_INTERVAL_MASK;
+ temp |= (u32) 160;
+ xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
+
+ /* Set the HCD state before we enable the irqs */
+ hcd->state = HC_STATE_RUNNING;
+ temp = xhci_readl(xhci, &xhci->op_regs->command);
+ temp |= (CMD_EIE);
+ xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
+ temp);
+ xhci_writel(xhci, temp, &xhci->op_regs->command);
+
+ temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+ xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
+ xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
+ xhci_writel(xhci, ER_IRQ_ENABLE(temp),
+ &xhci->ir_set->irq_pending);
+ xhci_print_ir_set(xhci, xhci->ir_set, 0);
+
+ if (NUM_TEST_NOOPS > 0)
+ doorbell = xhci_setup_one_noop(xhci);
+
+ xhci_dbg(xhci, "Command ring memory map follows:\n");
+ xhci_debug_ring(xhci, xhci->cmd_ring);
+ xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
+ xhci_dbg_cmd_ptrs(xhci);
+
+ xhci_dbg(xhci, "ERST memory map follows:\n");
+ xhci_dbg_erst(xhci, &xhci->erst);
+ xhci_dbg(xhci, "Event ring:\n");
+ xhci_debug_ring(xhci, xhci->event_ring);
+ xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
+ temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
+ temp &= ERST_PTR_MASK;
+ xhci_dbg(xhci, "ERST deq = 0x%x\n", temp);
+ temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[1]);
+ xhci_dbg(xhci, "ERST deq upper = 0x%x\n", temp);
+
+ temp = xhci_readl(xhci, &xhci->op_regs->command);
+ temp |= (CMD_RUN);
+ xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
+ temp);
+ xhci_writel(xhci, temp, &xhci->op_regs->command);
+ /* Flush PCI posted writes */
+ temp = xhci_readl(xhci, &xhci->op_regs->command);
+ xhci_dbg(xhci, "// @%p = 0x%x\n", &xhci->op_regs->command, temp);
+ if (doorbell)
+ (*doorbell)(xhci);
+
+ xhci_dbg(xhci, "Finished xhci_run\n");
+ return 0;
+}
+
+/*
+ * Stop xHCI driver.
+ *
+ * This function is called by the USB core when the HC driver is removed.
+ * Its opposite is xhci_run().
+ *
+ * Disable device contexts, disable IRQs, and quiesce the HC.
+ * Reset the HC, finish any completed transactions, and cleanup memory.
+ */
+void xhci_stop(struct usb_hcd *hcd)
+{
+ u32 temp;
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ spin_lock_irq(&xhci->lock);
+ if (HC_IS_RUNNING(hcd->state))
+ xhci_quiesce(xhci);
+ xhci_halt(xhci);
+ xhci_reset(xhci);
+ spin_unlock_irq(&xhci->lock);
+
+#if 0 /* No MSI yet */
+ xhci_cleanup_msix(xhci);
+#endif
+#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
+ /* Tell the event ring poll function not to reschedule */
+ xhci->zombie = 1;
+ del_timer_sync(&xhci->event_ring_timer);
+#endif
+
+ xhci_dbg(xhci, "// Disabling event ring interrupts\n");
+ temp = xhci_readl(xhci, &xhci->op_regs->status);
+ xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
+ temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+ xhci_writel(xhci, ER_IRQ_DISABLE(temp),
+ &xhci->ir_set->irq_pending);
+ xhci_print_ir_set(xhci, xhci->ir_set, 0);
+
+ xhci_dbg(xhci, "cleaning up memory\n");
+ xhci_mem_cleanup(xhci);
+ xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
+ xhci_readl(xhci, &xhci->op_regs->status));
+}
+
+/*
+ * Shutdown HC (not bus-specific)
+ *
+ * This is called when the machine is rebooting or halting. We assume that the
+ * machine will be powered off, and the HC's internal state will be reset.
+ * Don't bother to free memory.
+ */
+void xhci_shutdown(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ spin_lock_irq(&xhci->lock);
+ xhci_halt(xhci);
+ spin_unlock_irq(&xhci->lock);
+
+#if 0
+ xhci_cleanup_msix(xhci);
+#endif
+
+ xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
+ xhci_readl(xhci, &xhci->op_regs->status));
+}
+
+/*-------------------------------------------------------------------------*/
+
+/**
+ * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
+ * HCDs. Find the index for an endpoint given its descriptor. Use the return
+ * value to right shift 1 for the bitmask.
+ *
+ * Index = (epnum * 2) + direction - 1,
+ * where direction = 0 for OUT, 1 for IN.
+ * For control endpoints, the IN index is used (OUT index is unused), so
+ * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
+ */
+unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
+{
+ unsigned int index;
+ if (usb_endpoint_xfer_control(desc))
+ index = (unsigned int) (usb_endpoint_num(desc)*2);
+ else
+ index = (unsigned int) (usb_endpoint_num(desc)*2) +
+ (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
+ return index;
+}
+
+/* Find the flag for this endpoint (for use in the control context). Use the
+ * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
+ * bit 1, etc.
+ */
+unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
+{
+ return 1 << (xhci_get_endpoint_index(desc) + 1);
+}
+
+/* Compute the last valid endpoint context index. Basically, this is the
+ * endpoint index plus one. For slot contexts with more than valid endpoint,
+ * we find the most significant bit set in the added contexts flags.
+ * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
+ * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
+ */
+static inline unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
+{
+ return fls(added_ctxs) - 1;
+}
+
+/* Returns 1 if the arguments are OK;
+ * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
+ */
+int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
+ struct usb_host_endpoint *ep, int check_ep, const char *func) {
+ if (!hcd || (check_ep && !ep) || !udev) {
+ printk(KERN_DEBUG "xHCI %s called with invalid args\n",
+ func);
+ return -EINVAL;
+ }
+ if (!udev->parent) {
+ printk(KERN_DEBUG "xHCI %s called for root hub\n",
+ func);
+ return 0;
+ }
+ if (!udev->slot_id) {
+ printk(KERN_DEBUG "xHCI %s called with unaddressed device\n",
+ func);
+ return -EINVAL;
+ }
+ return 1;
+}
+
+/*
+ * non-error returns are a promise to giveback() the urb later
+ * we drop ownership so next owner (or urb unlink) can get it
+ */
+int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ unsigned long flags;
+ int ret = 0;
+ unsigned int slot_id, ep_index;
+
+ if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0)
+ return -EINVAL;
+
+ slot_id = urb->dev->slot_id;
+ ep_index = xhci_get_endpoint_index(&urb->ep->desc);
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ if (!xhci->devs || !xhci->devs[slot_id]) {
+ if (!in_interrupt())
+ dev_warn(&urb->dev->dev, "WARN: urb submitted for dev with no Slot ID\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
+ if (!in_interrupt())
+ xhci_dbg(xhci, "urb submitted during PCI suspend\n");
+ ret = -ESHUTDOWN;
+ goto exit;
+ }
+ if (usb_endpoint_xfer_control(&urb->ep->desc))
+ ret = xhci_queue_ctrl_tx(xhci, mem_flags, urb,
+ slot_id, ep_index);
+ else if (usb_endpoint_xfer_bulk(&urb->ep->desc))
+ ret = xhci_queue_bulk_tx(xhci, mem_flags, urb,
+ slot_id, ep_index);
+ else
+ ret = -EINVAL;
+exit:
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return ret;
+}
+
+/*
+ * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
+ * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
+ * should pick up where it left off in the TD, unless a Set Transfer Ring
+ * Dequeue Pointer is issued.
+ *
+ * The TRBs that make up the buffers for the canceled URB will be "removed" from
+ * the ring. Since the ring is a contiguous structure, they can't be physically
+ * removed. Instead, there are two options:
+ *
+ * 1) If the HC is in the middle of processing the URB to be canceled, we
+ * simply move the ring's dequeue pointer past those TRBs using the Set
+ * Transfer Ring Dequeue Pointer command. This will be the common case,
+ * when drivers timeout on the last submitted URB and attempt to cancel.
+ *
+ * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
+ * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
+ * HC will need to invalidate the any TRBs it has cached after the stop
+ * endpoint command, as noted in the xHCI 0.95 errata.
+ *
+ * 3) The TD may have completed by the time the Stop Endpoint Command
+ * completes, so software needs to handle that case too.
+ *
+ * This function should protect against the TD enqueueing code ringing the
+ * doorbell while this code is waiting for a Stop Endpoint command to complete.
+ * It also needs to account for multiple cancellations on happening at the same
+ * time for the same endpoint.
+ *
+ * Note that this function can be called in any context, or so says
+ * usb_hcd_unlink_urb()
+ */
+int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+ unsigned long flags;
+ int ret;
+ struct xhci_hcd *xhci;
+ struct xhci_td *td;
+ unsigned int ep_index;
+ struct xhci_ring *ep_ring;
+
+ xhci = hcd_to_xhci(hcd);
+ spin_lock_irqsave(&xhci->lock, flags);
+ /* Make sure the URB hasn't completed or been unlinked already */
+ ret = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (ret || !urb->hcpriv)
+ goto done;
+
+ xhci_dbg(xhci, "Cancel URB %p\n", urb);
+ ep_index = xhci_get_endpoint_index(&urb->ep->desc);
+ ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index];
+ td = (struct xhci_td *) urb->hcpriv;
+
+ ep_ring->cancels_pending++;
+ list_add_tail(&td->cancelled_td_list, &ep_ring->cancelled_td_list);
+ /* Queue a stop endpoint command, but only if this is
+ * the first cancellation to be handled.
+ */
+ if (ep_ring->cancels_pending == 1) {
+ xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index);
+ xhci_ring_cmd_db(xhci);
+ }
+done:
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return ret;
+}
+
+/* Drop an endpoint from a new bandwidth configuration for this device.
+ * Only one call to this function is allowed per endpoint before
+ * check_bandwidth() or reset_bandwidth() must be called.
+ * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
+ * add the endpoint to the schedule with possibly new parameters denoted by a
+ * different endpoint descriptor in usb_host_endpoint.
+ * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
+ * not allowed.
+ *
+ * The USB core will not allow URBs to be queued to an endpoint that is being
+ * disabled, so there's no need for mutual exclusion to protect
+ * the xhci->devs[slot_id] structure.
+ */
+int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+{
+ struct xhci_hcd *xhci;
+ struct xhci_device_control *in_ctx;
+ unsigned int last_ctx;
+ unsigned int ep_index;
+ struct xhci_ep_ctx *ep_ctx;
+ u32 drop_flag;
+ u32 new_add_flags, new_drop_flags, new_slot_info;
+ int ret;
+
+ ret = xhci_check_args(hcd, udev, ep, 1, __func__);
+ if (ret <= 0)
+ return ret;
+ xhci = hcd_to_xhci(hcd);
+ xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+
+ drop_flag = xhci_get_endpoint_flag(&ep->desc);
+ if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
+ xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
+ __func__, drop_flag);
+ return 0;
+ }
+
+ if (!xhci->devs || !xhci->devs[udev->slot_id]) {
+ xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ in_ctx = xhci->devs[udev->slot_id]->in_ctx;
+ ep_index = xhci_get_endpoint_index(&ep->desc);
+ ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index];
+ /* If the HC already knows the endpoint is disabled,
+ * or the HCD has noted it is disabled, ignore this request
+ */
+ if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED ||
+ in_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) {
+ xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
+ __func__, ep);
+ return 0;
+ }
+
+ in_ctx->drop_flags |= drop_flag;
+ new_drop_flags = in_ctx->drop_flags;
+
+ in_ctx->add_flags = ~drop_flag;
+ new_add_flags = in_ctx->add_flags;
+
+ last_ctx = xhci_last_valid_endpoint(in_ctx->add_flags);
+ /* Update the last valid endpoint context, if we deleted the last one */
+ if ((in_ctx->slot.dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) {
+ in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
+ in_ctx->slot.dev_info |= LAST_CTX(last_ctx);
+ }
+ new_slot_info = in_ctx->slot.dev_info;
+
+ xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
+
+ xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
+ (unsigned int) ep->desc.bEndpointAddress,
+ udev->slot_id,
+ (unsigned int) new_drop_flags,
+ (unsigned int) new_add_flags,
+ (unsigned int) new_slot_info);
+ return 0;
+}
+
+/* Add an endpoint to a new possible bandwidth configuration for this device.
+ * Only one call to this function is allowed per endpoint before
+ * check_bandwidth() or reset_bandwidth() must be called.
+ * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
+ * add the endpoint to the schedule with possibly new parameters denoted by a
+ * different endpoint descriptor in usb_host_endpoint.
+ * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
+ * not allowed.
+ *
+ * The USB core will not allow URBs to be queued to an endpoint until the
+ * configuration or alt setting is installed in the device, so there's no need
+ * for mutual exclusion to protect the xhci->devs[slot_id] structure.
+ */
+int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+{
+ struct xhci_hcd *xhci;
+ struct xhci_device_control *in_ctx;
+ unsigned int ep_index;
+ struct xhci_ep_ctx *ep_ctx;
+ u32 added_ctxs;
+ unsigned int last_ctx;
+ u32 new_add_flags, new_drop_flags, new_slot_info;
+ int ret = 0;
+
+ ret = xhci_check_args(hcd, udev, ep, 1, __func__);
+ if (ret <= 0)
+ return ret;
+ xhci = hcd_to_xhci(hcd);
+
+ added_ctxs = xhci_get_endpoint_flag(&ep->desc);
+ last_ctx = xhci_last_valid_endpoint(added_ctxs);
+ if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
+ /* FIXME when we have to issue an evaluate endpoint command to
+ * deal with ep0 max packet size changing once we get the
+ * descriptors
+ */
+ xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
+ __func__, added_ctxs);
+ return 0;
+ }
+
+ if (!xhci->devs || !xhci->devs[udev->slot_id]) {
+ xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ in_ctx = xhci->devs[udev->slot_id]->in_ctx;
+ ep_index = xhci_get_endpoint_index(&ep->desc);
+ ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index];
+ /* If the HCD has already noted the endpoint is enabled,
+ * ignore this request.
+ */
+ if (in_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) {
+ xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
+ __func__, ep);
+ return 0;
+ }
+
+ /*
+ * Configuration and alternate setting changes must be done in
+ * process context, not interrupt context (or so documenation
+ * for usb_set_interface() and usb_set_configuration() claim).
+ */
+ if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id],
+ udev, ep, GFP_KERNEL) < 0) {
+ dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
+ __func__, ep->desc.bEndpointAddress);
+ return -ENOMEM;
+ }
+
+ in_ctx->add_flags |= added_ctxs;
+ new_add_flags = in_ctx->add_flags;
+
+ /* If xhci_endpoint_disable() was called for this endpoint, but the
+ * xHC hasn't been notified yet through the check_bandwidth() call,
+ * this re-adds a new state for the endpoint from the new endpoint
+ * descriptors. We must drop and re-add this endpoint, so we leave the
+ * drop flags alone.
+ */
+ new_drop_flags = in_ctx->drop_flags;
+
+ /* Update the last valid endpoint context, if we just added one past */
+ if ((in_ctx->slot.dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) {
+ in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
+ in_ctx->slot.dev_info |= LAST_CTX(last_ctx);
+ }
+ new_slot_info = in_ctx->slot.dev_info;
+
+ xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
+ (unsigned int) ep->desc.bEndpointAddress,
+ udev->slot_id,
+ (unsigned int) new_drop_flags,
+ (unsigned int) new_add_flags,
+ (unsigned int) new_slot_info);
+ return 0;
+}
+
+static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev)
+{
+ struct xhci_ep_ctx *ep_ctx;
+ int i;
+
+ /* When a device's add flag and drop flag are zero, any subsequent
+ * configure endpoint command will leave that endpoint's state
+ * untouched. Make sure we don't leave any old state in the input
+ * endpoint contexts.
+ */
+ virt_dev->in_ctx->drop_flags = 0;
+ virt_dev->in_ctx->add_flags = 0;
+ virt_dev->in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
+ /* Endpoint 0 is always valid */
+ virt_dev->in_ctx->slot.dev_info |= LAST_CTX(1);
+ for (i = 1; i < 31; ++i) {
+ ep_ctx = &virt_dev->in_ctx->ep[i];
+ ep_ctx->ep_info = 0;
+ ep_ctx->ep_info2 = 0;
+ ep_ctx->deq[0] = 0;
+ ep_ctx->deq[1] = 0;
+ ep_ctx->tx_info = 0;
+ }
+}
+
+/* Called after one or more calls to xhci_add_endpoint() or
+ * xhci_drop_endpoint(). If this call fails, the USB core is expected
+ * to call xhci_reset_bandwidth().
+ *
+ * Since we are in the middle of changing either configuration or
+ * installing a new alt setting, the USB core won't allow URBs to be
+ * enqueued for any endpoint on the old config or interface. Nothing
+ * else should be touching the xhci->devs[slot_id] structure, so we
+ * don't need to take the xhci->lock for manipulating that.
+ */
+int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ int i;
+ int ret = 0;
+ int timeleft;
+ unsigned long flags;
+ struct xhci_hcd *xhci;
+ struct xhci_virt_device *virt_dev;
+
+ ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
+ if (ret <= 0)
+ return ret;
+ xhci = hcd_to_xhci(hcd);
+
+ if (!udev->slot_id || !xhci->devs || !xhci->devs[udev->slot_id]) {
+ xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
+ __func__);
+ return -EINVAL;
+ }
+ xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+ virt_dev = xhci->devs[udev->slot_id];
+
+ /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
+ virt_dev->in_ctx->add_flags |= SLOT_FLAG;
+ virt_dev->in_ctx->add_flags &= ~EP0_FLAG;
+ virt_dev->in_ctx->drop_flags &= ~SLOT_FLAG;
+ virt_dev->in_ctx->drop_flags &= ~EP0_FLAG;
+ xhci_dbg(xhci, "New Input Control Context:\n");
+ xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma,
+ LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info));
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx_dma,
+ udev->slot_id);
+ if (ret < 0) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
+ return -ENOMEM;
+ }
+ xhci_ring_cmd_db(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ /* Wait for the configure endpoint command to complete */
+ timeleft = wait_for_completion_interruptible_timeout(
+ &virt_dev->cmd_completion,
+ USB_CTRL_SET_TIMEOUT);
+ if (timeleft <= 0) {
+ xhci_warn(xhci, "%s while waiting for configure endpoint command\n",
+ timeleft == 0 ? "Timeout" : "Signal");
+ /* FIXME cancel the configure endpoint command */
+ return -ETIME;
+ }
+
+ switch (virt_dev->cmd_status) {
+ case COMP_ENOMEM:
+ dev_warn(&udev->dev, "Not enough host controller resources "
+ "for new device state.\n");
+ ret = -ENOMEM;
+ /* FIXME: can we allocate more resources for the HC? */
+ break;
+ case COMP_BW_ERR:
+ dev_warn(&udev->dev, "Not enough bandwidth "
+ "for new device state.\n");
+ ret = -ENOSPC;
+ /* FIXME: can we go back to the old state? */
+ break;
+ case COMP_TRB_ERR:
+ /* the HCD set up something wrong */
+ dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, add flag = 1, "
+ "and endpoint is not disabled.\n");
+ ret = -EINVAL;
+ break;
+ case COMP_SUCCESS:
+ dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
+ break;
+ default:
+ xhci_err(xhci, "ERROR: unexpected command completion "
+ "code 0x%x.\n", virt_dev->cmd_status);
+ ret = -EINVAL;
+ break;
+ }
+ if (ret) {
+ /* Callee should call reset_bandwidth() */
+ return ret;
+ }
+
+ xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
+ xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma,
+ LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info));
+
+ xhci_zero_in_ctx(virt_dev);
+ /* Free any old rings */
+ for (i = 1; i < 31; ++i) {
+ if (virt_dev->new_ep_rings[i]) {
+ xhci_ring_free(xhci, virt_dev->ep_rings[i]);
+ virt_dev->ep_rings[i] = virt_dev->new_ep_rings[i];
+ virt_dev->new_ep_rings[i] = NULL;
+ }
+ }
+
+ return ret;
+}
+
+void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ struct xhci_hcd *xhci;
+ struct xhci_virt_device *virt_dev;
+ int i, ret;
+
+ ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
+ if (ret <= 0)
+ return;
+ xhci = hcd_to_xhci(hcd);
+
+ if (!xhci->devs || !xhci->devs[udev->slot_id]) {
+ xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
+ __func__);
+ return;
+ }
+ xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+ virt_dev = xhci->devs[udev->slot_id];
+ /* Free any rings allocated for added endpoints */
+ for (i = 0; i < 31; ++i) {
+ if (virt_dev->new_ep_rings[i]) {
+ xhci_ring_free(xhci, virt_dev->new_ep_rings[i]);
+ virt_dev->new_ep_rings[i] = NULL;
+ }
+ }
+ xhci_zero_in_ctx(virt_dev);
+}
+
+/*
+ * At this point, the struct usb_device is about to go away, the device has
+ * disconnected, and all traffic has been stopped and the endpoints have been
+ * disabled. Free any HC data structures associated with that device.
+ */
+void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ unsigned long flags;
+
+ if (udev->slot_id == 0)
+ return;
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
+ return;
+ }
+ xhci_ring_cmd_db(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ /*
+ * Event command completion handler will free any data structures
+ * associated with the slot. XXX Can free sleep?
+ */
+}
+
+/*
+ * Returns 0 if the xHC ran out of device slots, the Enable Slot command
+ * timed out, or allocating memory failed. Returns 1 on success.
+ */
+int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ unsigned long flags;
+ int timeleft;
+ int ret;
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
+ if (ret) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
+ return 0;
+ }
+ xhci_ring_cmd_db(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ /* XXX: how much time for xHC slot assignment? */
+ timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
+ USB_CTRL_SET_TIMEOUT);
+ if (timeleft <= 0) {
+ xhci_warn(xhci, "%s while waiting for a slot\n",
+ timeleft == 0 ? "Timeout" : "Signal");
+ /* FIXME cancel the enable slot request */
+ return 0;
+ }
+
+ if (!xhci->slot_id) {
+ xhci_err(xhci, "Error while assigning device slot ID\n");
+ return 0;
+ }
+ /* xhci_alloc_virt_device() does not touch rings; no need to lock */
+ if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) {
+ /* Disable slot, if we can do it without mem alloc */
+ xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
+ spin_lock_irqsave(&xhci->lock, flags);
+ if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
+ xhci_ring_cmd_db(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return 0;
+ }
+ udev->slot_id = xhci->slot_id;
+ /* Is this a LS or FS device under a HS hub? */
+ /* Hub or peripherial? */
+ return 1;
+}
+
+/*
+ * Issue an Address Device command (which will issue a SetAddress request to
+ * the device).
+ * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
+ * we should only issue and wait on one address command at the same time.
+ *
+ * We add one to the device address issued by the hardware because the USB core
+ * uses address 1 for the root hubs (even though they're not really devices).
+ */
+int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ unsigned long flags;
+ int timeleft;
+ struct xhci_virt_device *virt_dev;
+ int ret = 0;
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ u32 temp;
+
+ if (!udev->slot_id) {
+ xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
+ return -EINVAL;
+ }
+
+ virt_dev = xhci->devs[udev->slot_id];
+
+ /* If this is a Set Address to an unconfigured device, setup ep 0 */
+ if (!udev->config)
+ xhci_setup_addressable_virt_dev(xhci, udev);
+ /* Otherwise, assume the core has the device configured how it wants */
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ ret = xhci_queue_address_device(xhci, virt_dev->in_ctx_dma,
+ udev->slot_id);
+ if (ret) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
+ return ret;
+ }
+ xhci_ring_cmd_db(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
+ timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
+ USB_CTRL_SET_TIMEOUT);
+ /* FIXME: From section 4.3.4: "Software shall be responsible for timing
+ * the SetAddress() "recovery interval" required by USB and aborting the
+ * command on a timeout.
+ */
+ if (timeleft <= 0) {
+ xhci_warn(xhci, "%s while waiting for a slot\n",
+ timeleft == 0 ? "Timeout" : "Signal");
+ /* FIXME cancel the address device command */
+ return -ETIME;
+ }
+
+ switch (virt_dev->cmd_status) {
+ case COMP_CTX_STATE:
+ case COMP_EBADSLT:
+ xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n",
+ udev->slot_id);
+ ret = -EINVAL;
+ break;
+ case COMP_TX_ERR:
+ dev_warn(&udev->dev, "Device not responding to set address.\n");
+ ret = -EPROTO;
+ break;
+ case COMP_SUCCESS:
+ xhci_dbg(xhci, "Successful Address Device command\n");
+ break;
+ default:
+ xhci_err(xhci, "ERROR: unexpected command completion "
+ "code 0x%x.\n", virt_dev->cmd_status);
+ ret = -EINVAL;
+ break;
+ }
+ if (ret) {
+ return ret;
+ }
+ temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[0]);
+ xhci_dbg(xhci, "Op regs DCBAA ptr[0] = %#08x\n", temp);
+ temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[1]);
+ xhci_dbg(xhci, "Op regs DCBAA ptr[1] = %#08x\n", temp);
+ xhci_dbg(xhci, "Slot ID %d dcbaa entry[0] @%p = %#08x\n",
+ udev->slot_id,
+ &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id],
+ xhci->dcbaa->dev_context_ptrs[2*udev->slot_id]);
+ xhci_dbg(xhci, "Slot ID %d dcbaa entry[1] @%p = %#08x\n",
+ udev->slot_id,
+ &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1],
+ xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1]);
+ xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
+ (unsigned long long)virt_dev->out_ctx_dma);
+ xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
+ xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 2);
+ xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
+ xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, 2);
+ /*
+ * USB core uses address 1 for the roothubs, so we add one to the
+ * address given back to us by the HC.
+ */
+ udev->devnum = (virt_dev->out_ctx->slot.dev_state & DEV_ADDR_MASK) + 1;
+ /* Zero the input context control for later use */
+ virt_dev->in_ctx->add_flags = 0;
+ virt_dev->in_ctx->drop_flags = 0;
+ /* Mirror flags in the output context for future ep enable/disable */
+ virt_dev->out_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
+ virt_dev->out_ctx->drop_flags = 0;
+
+ xhci_dbg(xhci, "Device address = %d\n", udev->devnum);
+ /* XXX Meh, not sure if anyone else but choose_address uses this. */
+ set_bit(udev->devnum, udev->bus->devmap.devicemap);
+
+ return 0;
+}
+
+int xhci_get_frame(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ /* EHCI mods by the periodic size. Why? */
+ return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3;
+}
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_LICENSE("GPL");
+
+static int __init xhci_hcd_init(void)
+{
+#ifdef CONFIG_PCI
+ int retval = 0;
+
+ retval = xhci_register_pci();
+
+ if (retval < 0) {
+ printk(KERN_DEBUG "Problem registering PCI driver.");
+ return retval;
+ }
+#endif
+ /*
+ * Check the compiler generated sizes of structures that must be laid
+ * out in specific ways for hardware access.
+ */
+ BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
+ BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
+ BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
+ /* xhci_device_control has eight fields, and also
+ * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
+ */
+ BUILD_BUG_ON(sizeof(struct xhci_device_control) != (8+8+8*31)*32/8);
+ BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
+ BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
+ BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
+ BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
+ BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
+ /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
+ BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
+ BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
+ return 0;
+}
+module_init(xhci_hcd_init);
+
+static void __exit xhci_hcd_cleanup(void)
+{
+#ifdef CONFIG_PCI
+ xhci_unregister_pci();
+#endif
+}
+module_exit(xhci_hcd_cleanup);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
new file mode 100644
index 00000000000..eac5b53aa9e
--- /dev/null
+++ b/drivers/usb/host/xhci-hub.c
@@ -0,0 +1,308 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <asm/unaligned.h>
+
+#include "xhci.h"
+
+static void xhci_hub_descriptor(struct xhci_hcd *xhci,
+ struct usb_hub_descriptor *desc)
+{
+ int ports;
+ u16 temp;
+
+ ports = HCS_MAX_PORTS(xhci->hcs_params1);
+
+ /* USB 3.0 hubs have a different descriptor, but we fake this for now */
+ desc->bDescriptorType = 0x29;
+ desc->bPwrOn2PwrGood = 10; /* xhci section 5.4.9 says 20ms max */
+ desc->bHubContrCurrent = 0;
+
+ desc->bNbrPorts = ports;
+ temp = 1 + (ports / 8);
+ desc->bDescLength = 7 + 2 * temp;
+
+ /* Why does core/hcd.h define bitmap? It's just confusing. */
+ memset(&desc->DeviceRemovable[0], 0, temp);
+ memset(&desc->DeviceRemovable[temp], 0xff, temp);
+
+ /* Ugh, these should be #defines, FIXME */
+ /* Using table 11-13 in USB 2.0 spec. */
+ temp = 0;
+ /* Bits 1:0 - support port power switching, or power always on */
+ if (HCC_PPC(xhci->hcc_params))
+ temp |= 0x0001;
+ else
+ temp |= 0x0002;
+ /* Bit 2 - root hubs are not part of a compound device */
+ /* Bits 4:3 - individual port over current protection */
+ temp |= 0x0008;
+ /* Bits 6:5 - no TTs in root ports */
+ /* Bit 7 - no port indicators */
+ desc->wHubCharacteristics = (__force __u16) cpu_to_le16(temp);
+}
+
+static unsigned int xhci_port_speed(unsigned int port_status)
+{
+ if (DEV_LOWSPEED(port_status))
+ return 1 << USB_PORT_FEAT_LOWSPEED;
+ if (DEV_HIGHSPEED(port_status))
+ return 1 << USB_PORT_FEAT_HIGHSPEED;
+ if (DEV_SUPERSPEED(port_status))
+ return 1 << USB_PORT_FEAT_SUPERSPEED;
+ /*
+ * FIXME: Yes, we should check for full speed, but the core uses that as
+ * a default in portspeed() in usb/core/hub.c (which is the only place
+ * USB_PORT_FEAT_*SPEED is used).
+ */
+ return 0;
+}
+
+/*
+ * These bits are Read Only (RO) and should be saved and written to the
+ * registers: 0, 3, 10:13, 30
+ * connect status, over-current status, port speed, and device removable.
+ * connect status and port speed are also sticky - meaning they're in
+ * the AUX well and they aren't changed by a hot, warm, or cold reset.
+ */
+#define XHCI_PORT_RO ((1<<0) | (1<<3) | (0xf<<10) | (1<<30))
+/*
+ * These bits are RW; writing a 0 clears the bit, writing a 1 sets the bit:
+ * bits 5:8, 9, 14:15, 25:27
+ * link state, port power, port indicator state, "wake on" enable state
+ */
+#define XHCI_PORT_RWS ((0xf<<5) | (1<<9) | (0x3<<14) | (0x7<<25))
+/*
+ * These bits are RW; writing a 1 sets the bit, writing a 0 has no effect:
+ * bit 4 (port reset)
+ */
+#define XHCI_PORT_RW1S ((1<<4))
+/*
+ * These bits are RW; writing a 1 clears the bit, writing a 0 has no effect:
+ * bits 1, 17, 18, 19, 20, 21, 22, 23
+ * port enable/disable, and
+ * change bits: connect, PED, warm port reset changed (reserved zero for USB 2.0 ports),
+ * over-current, reset, link state, and L1 change
+ */
+#define XHCI_PORT_RW1CS ((1<<1) | (0x7f<<17))
+/*
+ * Bit 16 is RW, and writing a '1' to it causes the link state control to be
+ * latched in
+ */
+#define XHCI_PORT_RW ((1<<16))
+/*
+ * These bits are Reserved Zero (RsvdZ) and zero should be written to them:
+ * bits 2, 24, 28:31
+ */
+#define XHCI_PORT_RZ ((1<<2) | (1<<24) | (0xf<<28))
+
+/*
+ * Given a port state, this function returns a value that would result in the
+ * port being in the same state, if the value was written to the port status
+ * control register.
+ * Save Read Only (RO) bits and save read/write bits where
+ * writing a 0 clears the bit and writing a 1 sets the bit (RWS).
+ * For all other types (RW1S, RW1CS, RW, and RZ), writing a '0' has no effect.
+ */
+static u32 xhci_port_state_to_neutral(u32 state)
+{
+ /* Save read-only status and port state */
+ return (state & XHCI_PORT_RO) | (state & XHCI_PORT_RWS);
+}
+
+int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ int ports;
+ unsigned long flags;
+ u32 temp, status;
+ int retval = 0;
+ u32 __iomem *addr;
+ char *port_change_bit;
+
+ ports = HCS_MAX_PORTS(xhci->hcs_params1);
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ switch (typeReq) {
+ case GetHubStatus:
+ /* No power source, over-current reported per port */
+ memset(buf, 0, 4);
+ break;
+ case GetHubDescriptor:
+ xhci_hub_descriptor(xhci, (struct usb_hub_descriptor *) buf);
+ break;
+ case GetPortStatus:
+ if (!wIndex || wIndex > ports)
+ goto error;
+ wIndex--;
+ status = 0;
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*(wIndex & 0xff);
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "get port status, actual port %d status = 0x%x\n", wIndex, temp);
+
+ /* wPortChange bits */
+ if (temp & PORT_CSC)
+ status |= 1 << USB_PORT_FEAT_C_CONNECTION;
+ if (temp & PORT_PEC)
+ status |= 1 << USB_PORT_FEAT_C_ENABLE;
+ if ((temp & PORT_OCC))
+ status |= 1 << USB_PORT_FEAT_C_OVER_CURRENT;
+ /*
+ * FIXME ignoring suspend, reset, and USB 2.1/3.0 specific
+ * changes
+ */
+ if (temp & PORT_CONNECT) {
+ status |= 1 << USB_PORT_FEAT_CONNECTION;
+ status |= xhci_port_speed(temp);
+ }
+ if (temp & PORT_PE)
+ status |= 1 << USB_PORT_FEAT_ENABLE;
+ if (temp & PORT_OC)
+ status |= 1 << USB_PORT_FEAT_OVER_CURRENT;
+ if (temp & PORT_RESET)
+ status |= 1 << USB_PORT_FEAT_RESET;
+ if (temp & PORT_POWER)
+ status |= 1 << USB_PORT_FEAT_POWER;
+ xhci_dbg(xhci, "Get port status returned 0x%x\n", status);
+ put_unaligned(cpu_to_le32(status), (__le32 *) buf);
+ break;
+ case SetPortFeature:
+ wIndex &= 0xff;
+ if (!wIndex || wIndex > ports)
+ goto error;
+ wIndex--;
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*(wIndex & 0xff);
+ temp = xhci_readl(xhci, addr);
+ temp = xhci_port_state_to_neutral(temp);
+ switch (wValue) {
+ case USB_PORT_FEAT_POWER:
+ /*
+ * Turn on ports, even if there isn't per-port switching.
+ * HC will report connect events even before this is set.
+ * However, khubd will ignore the roothub events until
+ * the roothub is registered.
+ */
+ xhci_writel(xhci, temp | PORT_POWER, addr);
+
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "set port power, actual port %d status = 0x%x\n", wIndex, temp);
+ break;
+ case USB_PORT_FEAT_RESET:
+ temp = (temp | PORT_RESET);
+ xhci_writel(xhci, temp, addr);
+
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "set port reset, actual port %d status = 0x%x\n", wIndex, temp);
+ break;
+ default:
+ goto error;
+ }
+ temp = xhci_readl(xhci, addr); /* unblock any posted writes */
+ break;
+ case ClearPortFeature:
+ if (!wIndex || wIndex > ports)
+ goto error;
+ wIndex--;
+ addr = &xhci->op_regs->port_status_base +
+ NUM_PORT_REGS*(wIndex & 0xff);
+ temp = xhci_readl(xhci, addr);
+ temp = xhci_port_state_to_neutral(temp);
+ switch (wValue) {
+ case USB_PORT_FEAT_C_RESET:
+ status = PORT_RC;
+ port_change_bit = "reset";
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ status = PORT_CSC;
+ port_change_bit = "connect";
+ break;
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ status = PORT_OCC;
+ port_change_bit = "over-current";
+ break;
+ default:
+ goto error;
+ }
+ /* Change bits are all write 1 to clear */
+ xhci_writel(xhci, temp | status, addr);
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "clear port %s change, actual port %d status = 0x%x\n",
+ port_change_bit, wIndex, temp);
+ temp = xhci_readl(xhci, addr); /* unblock any posted writes */
+ break;
+ default:
+error:
+ /* "stall" on error */
+ retval = -EPIPE;
+ }
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return retval;
+}
+
+/*
+ * Returns 0 if the status hasn't changed, or the number of bytes in buf.
+ * Ports are 0-indexed from the HCD point of view,
+ * and 1-indexed from the USB core pointer of view.
+ * xHCI instances can have up to 127 ports, so FIXME if you see more than 15.
+ *
+ * Note that the status change bits will be cleared as soon as a port status
+ * change event is generated, so we use the saved status from that event.
+ */
+int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ unsigned long flags;
+ u32 temp, status;
+ int i, retval;
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ int ports;
+ u32 __iomem *addr;
+
+ ports = HCS_MAX_PORTS(xhci->hcs_params1);
+
+ /* Initial status is no changes */
+ buf[0] = 0;
+ status = 0;
+ if (ports > 7) {
+ buf[1] = 0;
+ retval = 2;
+ } else {
+ retval = 1;
+ }
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ /* For each port, did anything change? If so, set that bit in buf. */
+ for (i = 0; i < ports; i++) {
+ addr = &xhci->op_regs->port_status_base +
+ NUM_PORT_REGS*i;
+ temp = xhci_readl(xhci, addr);
+ if (temp & (PORT_CSC | PORT_PEC | PORT_OCC)) {
+ if (i < 7)
+ buf[0] |= 1 << (i + 1);
+ else
+ buf[1] |= 1 << (i - 7);
+ status = 1;
+ }
+ }
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return status ? retval : 0;
+}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
new file mode 100644
index 00000000000..c8a72de1c50
--- /dev/null
+++ b/drivers/usb/host/xhci-mem.c
@@ -0,0 +1,769 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/usb.h>
+#include <linux/pci.h>
+#include <linux/dmapool.h>
+
+#include "xhci.h"
+
+/*
+ * Allocates a generic ring segment from the ring pool, sets the dma address,
+ * initializes the segment to zero, and sets the private next pointer to NULL.
+ *
+ * Section 4.11.1.1:
+ * "All components of all Command and Transfer TRBs shall be initialized to '0'"
+ */
+static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags)
+{
+ struct xhci_segment *seg;
+ dma_addr_t dma;
+
+ seg = kzalloc(sizeof *seg, flags);
+ if (!seg)
+ return 0;
+ xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg);
+
+ seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
+ if (!seg->trbs) {
+ kfree(seg);
+ return 0;
+ }
+ xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n",
+ seg->trbs, (unsigned long long)dma);
+
+ memset(seg->trbs, 0, SEGMENT_SIZE);
+ seg->dma = dma;
+ seg->next = NULL;
+
+ return seg;
+}
+
+static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
+{
+ if (!seg)
+ return;
+ if (seg->trbs) {
+ xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n",
+ seg->trbs, (unsigned long long)seg->dma);
+ dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
+ seg->trbs = NULL;
+ }
+ xhci_dbg(xhci, "Freeing priv segment structure at %p\n", seg);
+ kfree(seg);
+}
+
+/*
+ * Make the prev segment point to the next segment.
+ *
+ * Change the last TRB in the prev segment to be a Link TRB which points to the
+ * DMA address of the next segment. The caller needs to set any Link TRB
+ * related flags, such as End TRB, Toggle Cycle, and no snoop.
+ */
+static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
+ struct xhci_segment *next, bool link_trbs)
+{
+ u32 val;
+
+ if (!prev || !next)
+ return;
+ prev->next = next;
+ if (link_trbs) {
+ prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr[0] = next->dma;
+
+ /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
+ val = prev->trbs[TRBS_PER_SEGMENT-1].link.control;
+ val &= ~TRB_TYPE_BITMASK;
+ val |= TRB_TYPE(TRB_LINK);
+ prev->trbs[TRBS_PER_SEGMENT-1].link.control = val;
+ }
+ xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
+ (unsigned long long)prev->dma,
+ (unsigned long long)next->dma);
+}
+
+/* XXX: Do we need the hcd structure in all these functions? */
+void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
+{
+ struct xhci_segment *seg;
+ struct xhci_segment *first_seg;
+
+ if (!ring || !ring->first_seg)
+ return;
+ first_seg = ring->first_seg;
+ seg = first_seg->next;
+ xhci_dbg(xhci, "Freeing ring at %p\n", ring);
+ while (seg != first_seg) {
+ struct xhci_segment *next = seg->next;
+ xhci_segment_free(xhci, seg);
+ seg = next;
+ }
+ xhci_segment_free(xhci, first_seg);
+ ring->first_seg = NULL;
+ kfree(ring);
+}
+
+/**
+ * Create a new ring with zero or more segments.
+ *
+ * Link each segment together into a ring.
+ * Set the end flag and the cycle toggle bit on the last segment.
+ * See section 4.9.1 and figures 15 and 16.
+ */
+static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
+ unsigned int num_segs, bool link_trbs, gfp_t flags)
+{
+ struct xhci_ring *ring;
+ struct xhci_segment *prev;
+
+ ring = kzalloc(sizeof *(ring), flags);
+ xhci_dbg(xhci, "Allocating ring at %p\n", ring);
+ if (!ring)
+ return 0;
+
+ INIT_LIST_HEAD(&ring->td_list);
+ INIT_LIST_HEAD(&ring->cancelled_td_list);
+ if (num_segs == 0)
+ return ring;
+
+ ring->first_seg = xhci_segment_alloc(xhci, flags);
+ if (!ring->first_seg)
+ goto fail;
+ num_segs--;
+
+ prev = ring->first_seg;
+ while (num_segs > 0) {
+ struct xhci_segment *next;
+
+ next = xhci_segment_alloc(xhci, flags);
+ if (!next)
+ goto fail;
+ xhci_link_segments(xhci, prev, next, link_trbs);
+
+ prev = next;
+ num_segs--;
+ }
+ xhci_link_segments(xhci, prev, ring->first_seg, link_trbs);
+
+ if (link_trbs) {
+ /* See section 4.9.2.1 and 6.4.4.1 */
+ prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE);
+ xhci_dbg(xhci, "Wrote link toggle flag to"
+ " segment %p (virtual), 0x%llx (DMA)\n",
+ prev, (unsigned long long)prev->dma);
+ }
+ /* The ring is empty, so the enqueue pointer == dequeue pointer */
+ ring->enqueue = ring->first_seg->trbs;
+ ring->enq_seg = ring->first_seg;
+ ring->dequeue = ring->enqueue;
+ ring->deq_seg = ring->first_seg;
+ /* The ring is initialized to 0. The producer must write 1 to the cycle
+ * bit to handover ownership of the TRB, so PCS = 1. The consumer must
+ * compare CCS to the cycle bit to check ownership, so CCS = 1.
+ */
+ ring->cycle_state = 1;
+
+ return ring;
+
+fail:
+ xhci_ring_free(xhci, ring);
+ return 0;
+}
+
+/* All the xhci_tds in the ring's TD list should be freed at this point */
+void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
+{
+ struct xhci_virt_device *dev;
+ int i;
+
+ /* Slot ID 0 is reserved */
+ if (slot_id == 0 || !xhci->devs[slot_id])
+ return;
+
+ dev = xhci->devs[slot_id];
+ xhci->dcbaa->dev_context_ptrs[2*slot_id] = 0;
+ xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
+ if (!dev)
+ return;
+
+ for (i = 0; i < 31; ++i)
+ if (dev->ep_rings[i])
+ xhci_ring_free(xhci, dev->ep_rings[i]);
+
+ if (dev->in_ctx)
+ dma_pool_free(xhci->device_pool,
+ dev->in_ctx, dev->in_ctx_dma);
+ if (dev->out_ctx)
+ dma_pool_free(xhci->device_pool,
+ dev->out_ctx, dev->out_ctx_dma);
+ kfree(xhci->devs[slot_id]);
+ xhci->devs[slot_id] = 0;
+}
+
+int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
+ struct usb_device *udev, gfp_t flags)
+{
+ dma_addr_t dma;
+ struct xhci_virt_device *dev;
+
+ /* Slot ID 0 is reserved */
+ if (slot_id == 0 || xhci->devs[slot_id]) {
+ xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
+ return 0;
+ }
+
+ xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
+ if (!xhci->devs[slot_id])
+ return 0;
+ dev = xhci->devs[slot_id];
+
+ /* Allocate the (output) device context that will be used in the HC */
+ dev->out_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma);
+ if (!dev->out_ctx)
+ goto fail;
+ dev->out_ctx_dma = dma;
+ xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
+ (unsigned long long)dma);
+ memset(dev->out_ctx, 0, sizeof(*dev->out_ctx));
+
+ /* Allocate the (input) device context for address device command */
+ dev->in_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma);
+ if (!dev->in_ctx)
+ goto fail;
+ dev->in_ctx_dma = dma;
+ xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
+ (unsigned long long)dma);
+ memset(dev->in_ctx, 0, sizeof(*dev->in_ctx));
+
+ /* Allocate endpoint 0 ring */
+ dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags);
+ if (!dev->ep_rings[0])
+ goto fail;
+
+ init_completion(&dev->cmd_completion);
+
+ /*
+ * Point to output device context in dcbaa; skip the output control
+ * context, which is eight 32 bit fields (or 32 bytes long)
+ */
+ xhci->dcbaa->dev_context_ptrs[2*slot_id] =
+ (u32) dev->out_ctx_dma + (32);
+ xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
+ slot_id,
+ &xhci->dcbaa->dev_context_ptrs[2*slot_id],
+ (unsigned long long)dev->out_ctx_dma);
+ xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
+
+ return 1;
+fail:
+ xhci_free_virt_device(xhci, slot_id);
+ return 0;
+}
+
+/* Setup an xHCI virtual device for a Set Address command */
+int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
+{
+ struct xhci_virt_device *dev;
+ struct xhci_ep_ctx *ep0_ctx;
+ struct usb_device *top_dev;
+
+ dev = xhci->devs[udev->slot_id];
+ /* Slot ID 0 is reserved */
+ if (udev->slot_id == 0 || !dev) {
+ xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
+ udev->slot_id);
+ return -EINVAL;
+ }
+ ep0_ctx = &dev->in_ctx->ep[0];
+
+ /* 2) New slot context and endpoint 0 context are valid*/
+ dev->in_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
+
+ /* 3) Only the control endpoint is valid - one endpoint context */
+ dev->in_ctx->slot.dev_info |= LAST_CTX(1);
+
+ switch (udev->speed) {
+ case USB_SPEED_SUPER:
+ dev->in_ctx->slot.dev_info |= (u32) udev->route;
+ dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_SS;
+ break;
+ case USB_SPEED_HIGH:
+ dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_HS;
+ break;
+ case USB_SPEED_FULL:
+ dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_FS;
+ break;
+ case USB_SPEED_LOW:
+ dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_LS;
+ break;
+ case USB_SPEED_VARIABLE:
+ xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
+ return -EINVAL;
+ break;
+ default:
+ /* Speed was set earlier, this shouldn't happen. */
+ BUG();
+ }
+ /* Find the root hub port this device is under */
+ for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
+ top_dev = top_dev->parent)
+ /* Found device below root hub */;
+ dev->in_ctx->slot.dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum);
+ xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum);
+
+ /* Is this a LS/FS device under a HS hub? */
+ /*
+ * FIXME: I don't think this is right, where does the TT info for the
+ * roothub or parent hub come from?
+ */
+ if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) &&
+ udev->tt) {
+ dev->in_ctx->slot.tt_info = udev->tt->hub->slot_id;
+ dev->in_ctx->slot.tt_info |= udev->ttport << 8;
+ }
+ xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
+ xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
+
+ /* Step 4 - ring already allocated */
+ /* Step 5 */
+ ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP);
+ /*
+ * See section 4.3 bullet 6:
+ * The default Max Packet size for ep0 is "8 bytes for a USB2
+ * LS/FS/HS device or 512 bytes for a USB3 SS device"
+ * XXX: Not sure about wireless USB devices.
+ */
+ if (udev->speed == USB_SPEED_SUPER)
+ ep0_ctx->ep_info2 |= MAX_PACKET(512);
+ else
+ ep0_ctx->ep_info2 |= MAX_PACKET(8);
+ /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
+ ep0_ctx->ep_info2 |= MAX_BURST(0);
+ ep0_ctx->ep_info2 |= ERROR_COUNT(3);
+
+ ep0_ctx->deq[0] =
+ dev->ep_rings[0]->first_seg->dma;
+ ep0_ctx->deq[0] |= dev->ep_rings[0]->cycle_state;
+ ep0_ctx->deq[1] = 0;
+
+ /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
+
+ return 0;
+}
+
+/* Return the polling or NAK interval.
+ *
+ * The polling interval is expressed in "microframes". If xHCI's Interval field
+ * is set to N, it will service the endpoint every 2^(Interval)*125us.
+ *
+ * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
+ * is set to 0.
+ */
+static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+{
+ unsigned int interval = 0;
+
+ switch (udev->speed) {
+ case USB_SPEED_HIGH:
+ /* Max NAK rate */
+ if (usb_endpoint_xfer_control(&ep->desc) ||
+ usb_endpoint_xfer_bulk(&ep->desc))
+ interval = ep->desc.bInterval;
+ /* Fall through - SS and HS isoc/int have same decoding */
+ case USB_SPEED_SUPER:
+ if (usb_endpoint_xfer_int(&ep->desc) ||
+ usb_endpoint_xfer_isoc(&ep->desc)) {
+ if (ep->desc.bInterval == 0)
+ interval = 0;
+ else
+ interval = ep->desc.bInterval - 1;
+ if (interval > 15)
+ interval = 15;
+ if (interval != ep->desc.bInterval + 1)
+ dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
+ ep->desc.bEndpointAddress, 1 << interval);
+ }
+ break;
+ /* Convert bInterval (in 1-255 frames) to microframes and round down to
+ * nearest power of 2.
+ */
+ case USB_SPEED_FULL:
+ case USB_SPEED_LOW:
+ if (usb_endpoint_xfer_int(&ep->desc) ||
+ usb_endpoint_xfer_isoc(&ep->desc)) {
+ interval = fls(8*ep->desc.bInterval) - 1;
+ if (interval > 10)
+ interval = 10;
+ if (interval < 3)
+ interval = 3;
+ if ((1 << interval) != 8*ep->desc.bInterval)
+ dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
+ ep->desc.bEndpointAddress, 1 << interval);
+ }
+ break;
+ default:
+ BUG();
+ }
+ return EP_INTERVAL(interval);
+}
+
+static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+{
+ int in;
+ u32 type;
+
+ in = usb_endpoint_dir_in(&ep->desc);
+ if (usb_endpoint_xfer_control(&ep->desc)) {
+ type = EP_TYPE(CTRL_EP);
+ } else if (usb_endpoint_xfer_bulk(&ep->desc)) {
+ if (in)
+ type = EP_TYPE(BULK_IN_EP);
+ else
+ type = EP_TYPE(BULK_OUT_EP);
+ } else if (usb_endpoint_xfer_isoc(&ep->desc)) {
+ if (in)
+ type = EP_TYPE(ISOC_IN_EP);
+ else
+ type = EP_TYPE(ISOC_OUT_EP);
+ } else if (usb_endpoint_xfer_int(&ep->desc)) {
+ if (in)
+ type = EP_TYPE(INT_IN_EP);
+ else
+ type = EP_TYPE(INT_OUT_EP);
+ } else {
+ BUG();
+ }
+ return type;
+}
+
+int xhci_endpoint_init(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ struct usb_device *udev,
+ struct usb_host_endpoint *ep,
+ gfp_t mem_flags)
+{
+ unsigned int ep_index;
+ struct xhci_ep_ctx *ep_ctx;
+ struct xhci_ring *ep_ring;
+ unsigned int max_packet;
+ unsigned int max_burst;
+
+ ep_index = xhci_get_endpoint_index(&ep->desc);
+ ep_ctx = &virt_dev->in_ctx->ep[ep_index];
+
+ /* Set up the endpoint ring */
+ virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags);
+ if (!virt_dev->new_ep_rings[ep_index])
+ return -ENOMEM;
+ ep_ring = virt_dev->new_ep_rings[ep_index];
+ ep_ctx->deq[0] = ep_ring->first_seg->dma | ep_ring->cycle_state;
+ ep_ctx->deq[1] = 0;
+
+ ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
+
+ /* FIXME dig Mult and streams info out of ep companion desc */
+
+ /* Allow 3 retries for everything but isoc */
+ if (!usb_endpoint_xfer_isoc(&ep->desc))
+ ep_ctx->ep_info2 = ERROR_COUNT(3);
+ else
+ ep_ctx->ep_info2 = ERROR_COUNT(0);
+
+ ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep);
+
+ /* Set the max packet size and max burst */
+ switch (udev->speed) {
+ case USB_SPEED_SUPER:
+ max_packet = ep->desc.wMaxPacketSize;
+ ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
+ /* dig out max burst from ep companion desc */
+ max_packet = ep->ss_ep_comp->desc.bMaxBurst;
+ ep_ctx->ep_info2 |= MAX_BURST(max_packet);
+ break;
+ case USB_SPEED_HIGH:
+ /* bits 11:12 specify the number of additional transaction
+ * opportunities per microframe (USB 2.0, section 9.6.6)
+ */
+ if (usb_endpoint_xfer_isoc(&ep->desc) ||
+ usb_endpoint_xfer_int(&ep->desc)) {
+ max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
+ ep_ctx->ep_info2 |= MAX_BURST(max_burst);
+ }
+ /* Fall through */
+ case USB_SPEED_FULL:
+ case USB_SPEED_LOW:
+ max_packet = ep->desc.wMaxPacketSize & 0x3ff;
+ ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
+ break;
+ default:
+ BUG();
+ }
+ /* FIXME Debug endpoint context */
+ return 0;
+}
+
+void xhci_endpoint_zero(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ struct usb_host_endpoint *ep)
+{
+ unsigned int ep_index;
+ struct xhci_ep_ctx *ep_ctx;
+
+ ep_index = xhci_get_endpoint_index(&ep->desc);
+ ep_ctx = &virt_dev->in_ctx->ep[ep_index];
+
+ ep_ctx->ep_info = 0;
+ ep_ctx->ep_info2 = 0;
+ ep_ctx->deq[0] = 0;
+ ep_ctx->deq[1] = 0;
+ ep_ctx->tx_info = 0;
+ /* Don't free the endpoint ring until the set interface or configuration
+ * request succeeds.
+ */
+}
+
+void xhci_mem_cleanup(struct xhci_hcd *xhci)
+{
+ struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ int size;
+ int i;
+
+ /* Free the Event Ring Segment Table and the actual Event Ring */
+ xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
+ xhci_writel(xhci, 0, &xhci->ir_set->erst_base[0]);
+ xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
+ xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[0]);
+ xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]);
+ size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
+ if (xhci->erst.entries)
+ pci_free_consistent(pdev, size,
+ xhci->erst.entries, xhci->erst.erst_dma_addr);
+ xhci->erst.entries = NULL;
+ xhci_dbg(xhci, "Freed ERST\n");
+ if (xhci->event_ring)
+ xhci_ring_free(xhci, xhci->event_ring);
+ xhci->event_ring = NULL;
+ xhci_dbg(xhci, "Freed event ring\n");
+
+ xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[0]);
+ xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[1]);
+ if (xhci->cmd_ring)
+ xhci_ring_free(xhci, xhci->cmd_ring);
+ xhci->cmd_ring = NULL;
+ xhci_dbg(xhci, "Freed command ring\n");
+
+ for (i = 1; i < MAX_HC_SLOTS; ++i)
+ xhci_free_virt_device(xhci, i);
+
+ if (xhci->segment_pool)
+ dma_pool_destroy(xhci->segment_pool);
+ xhci->segment_pool = NULL;
+ xhci_dbg(xhci, "Freed segment pool\n");
+
+ if (xhci->device_pool)
+ dma_pool_destroy(xhci->device_pool);
+ xhci->device_pool = NULL;
+ xhci_dbg(xhci, "Freed device context pool\n");
+
+ xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[0]);
+ xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[1]);
+ if (xhci->dcbaa)
+ pci_free_consistent(pdev, sizeof(*xhci->dcbaa),
+ xhci->dcbaa, xhci->dcbaa->dma);
+ xhci->dcbaa = NULL;
+
+ xhci->page_size = 0;
+ xhci->page_shift = 0;
+}
+
+int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+{
+ dma_addr_t dma;
+ struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ unsigned int val, val2;
+ struct xhci_segment *seg;
+ u32 page_size;
+ int i;
+
+ page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
+ xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
+ for (i = 0; i < 16; i++) {
+ if ((0x1 & page_size) != 0)
+ break;
+ page_size = page_size >> 1;
+ }
+ if (i < 16)
+ xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024);
+ else
+ xhci_warn(xhci, "WARN: no supported page size\n");
+ /* Use 4K pages, since that's common and the minimum the HC supports */
+ xhci->page_shift = 12;
+ xhci->page_size = 1 << xhci->page_shift;
+ xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024);
+
+ /*
+ * Program the Number of Device Slots Enabled field in the CONFIG
+ * register with the max value of slots the HC can handle.
+ */
+ val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
+ xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n",
+ (unsigned int) val);
+ val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
+ val |= (val2 & ~HCS_SLOTS_MASK);
+ xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n",
+ (unsigned int) val);
+ xhci_writel(xhci, val, &xhci->op_regs->config_reg);
+
+ /*
+ * Section 5.4.8 - doorbell array must be
+ * "physically contiguous and 64-byte (cache line) aligned".
+ */
+ xhci->dcbaa = pci_alloc_consistent(to_pci_dev(dev),
+ sizeof(*xhci->dcbaa), &dma);
+ if (!xhci->dcbaa)
+ goto fail;
+ memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
+ xhci->dcbaa->dma = dma;
+ xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
+ (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
+ xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]);
+ xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]);
+
+ /*
+ * Initialize the ring segment pool. The ring must be a contiguous
+ * structure comprised of TRBs. The TRBs must be 16 byte aligned,
+ * however, the command ring segment needs 64-byte aligned segments,
+ * so we pick the greater alignment need.
+ */
+ xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
+ SEGMENT_SIZE, 64, xhci->page_size);
+ /* See Table 46 and Note on Figure 55 */
+ /* FIXME support 64-byte contexts */
+ xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
+ sizeof(struct xhci_device_control),
+ 64, xhci->page_size);
+ if (!xhci->segment_pool || !xhci->device_pool)
+ goto fail;
+
+ /* Set up the command ring to have one segments for now. */
+ xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags);
+ if (!xhci->cmd_ring)
+ goto fail;
+ xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
+ xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
+ (unsigned long long)xhci->cmd_ring->first_seg->dma);
+
+ /* Set the address in the Command Ring Control register */
+ val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]);
+ val = (val & ~CMD_RING_ADDR_MASK) |
+ (xhci->cmd_ring->first_seg->dma & CMD_RING_ADDR_MASK) |
+ xhci->cmd_ring->cycle_state;
+ xhci_dbg(xhci, "// Setting command ring address low bits to 0x%x\n", val);
+ xhci_writel(xhci, val, &xhci->op_regs->cmd_ring[0]);
+ xhci_dbg(xhci, "// Setting command ring address high bits to 0x0\n");
+ xhci_writel(xhci, (u32) 0, &xhci->op_regs->cmd_ring[1]);
+ xhci_dbg_cmd_ptrs(xhci);
+
+ val = xhci_readl(xhci, &xhci->cap_regs->db_off);
+ val &= DBOFF_MASK;
+ xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
+ " from cap regs base addr\n", val);
+ xhci->dba = (void *) xhci->cap_regs + val;
+ xhci_dbg_regs(xhci);
+ xhci_print_run_regs(xhci);
+ /* Set ir_set to interrupt register set 0 */
+ xhci->ir_set = (void *) xhci->run_regs->ir_set;
+
+ /*
+ * Event ring setup: Allocate a normal ring, but also setup
+ * the event ring segment table (ERST). Section 4.9.3.
+ */
+ xhci_dbg(xhci, "// Allocating event ring\n");
+ xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags);
+ if (!xhci->event_ring)
+ goto fail;
+
+ xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev),
+ sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma);
+ if (!xhci->erst.entries)
+ goto fail;
+ xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
+ (unsigned long long)dma);
+
+ memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
+ xhci->erst.num_entries = ERST_NUM_SEGS;
+ xhci->erst.erst_dma_addr = dma;
+ xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
+ xhci->erst.num_entries,
+ xhci->erst.entries,
+ (unsigned long long)xhci->erst.erst_dma_addr);
+
+ /* set ring base address and size for each segment table entry */
+ for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
+ struct xhci_erst_entry *entry = &xhci->erst.entries[val];
+ entry->seg_addr[0] = seg->dma;
+ entry->seg_addr[1] = 0;
+ entry->seg_size = TRBS_PER_SEGMENT;
+ entry->rsvd = 0;
+ seg = seg->next;
+ }
+
+ /* set ERST count with the number of entries in the segment table */
+ val = xhci_readl(xhci, &xhci->ir_set->erst_size);
+ val &= ERST_SIZE_MASK;
+ val |= ERST_NUM_SEGS;
+ xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
+ val);
+ xhci_writel(xhci, val, &xhci->ir_set->erst_size);
+
+ xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
+ /* set the segment table base address */
+ xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
+ (unsigned long long)xhci->erst.erst_dma_addr);
+ val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]);
+ val &= ERST_PTR_MASK;
+ val |= (xhci->erst.erst_dma_addr & ~ERST_PTR_MASK);
+ xhci_writel(xhci, val, &xhci->ir_set->erst_base[0]);
+ xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
+
+ /* Set the event ring dequeue address */
+ xhci_set_hc_event_deq(xhci);
+ xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
+ xhci_print_ir_set(xhci, xhci->ir_set, 0);
+
+ /*
+ * XXX: Might need to set the Interrupter Moderation Register to
+ * something other than the default (~1ms minimum between interrupts).
+ * See section 5.5.1.2.
+ */
+ init_completion(&xhci->addr_dev);
+ for (i = 0; i < MAX_HC_SLOTS; ++i)
+ xhci->devs[i] = 0;
+
+ return 0;
+fail:
+ xhci_warn(xhci, "Couldn't initialize memory\n");
+ xhci_mem_cleanup(xhci);
+ return -ENOMEM;
+}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
new file mode 100644
index 00000000000..1462709e26c
--- /dev/null
+++ b/drivers/usb/host/xhci-pci.c
@@ -0,0 +1,166 @@
+/*
+ * xHCI host controller driver PCI Bus Glue.
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/pci.h>
+
+#include "xhci.h"
+
+static const char hcd_name[] = "xhci_hcd";
+
+/* called after powerup, by probe or system-pm "wakeup" */
+static int xhci_pci_reinit(struct xhci_hcd *xhci, struct pci_dev *pdev)
+{
+ /*
+ * TODO: Implement finding debug ports later.
+ * TODO: see if there are any quirks that need to be added to handle
+ * new extended capabilities.
+ */
+
+ /* PCI Memory-Write-Invalidate cycle support is optional (uncommon) */
+ if (!pci_set_mwi(pdev))
+ xhci_dbg(xhci, "MWI active\n");
+
+ xhci_dbg(xhci, "Finished xhci_pci_reinit\n");
+ return 0;
+}
+
+/* called during probe() after chip reset completes */
+static int xhci_pci_setup(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+ int retval;
+
+ xhci->cap_regs = hcd->regs;
+ xhci->op_regs = hcd->regs +
+ HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase));
+ xhci->run_regs = hcd->regs +
+ (xhci_readl(xhci, &xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
+ /* Cache read-only capability registers */
+ xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
+ xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
+ xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
+ xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
+ xhci_print_registers(xhci);
+
+ /* Make sure the HC is halted. */
+ retval = xhci_halt(xhci);
+ if (retval)
+ return retval;
+
+ xhci_dbg(xhci, "Resetting HCD\n");
+ /* Reset the internal HC memory state and registers. */
+ retval = xhci_reset(xhci);
+ if (retval)
+ return retval;
+ xhci_dbg(xhci, "Reset complete\n");
+
+ xhci_dbg(xhci, "Calling HCD init\n");
+ /* Initialize HCD and host controller data structures. */
+ retval = xhci_init(hcd);
+ if (retval)
+ return retval;
+ xhci_dbg(xhci, "Called HCD init\n");
+
+ pci_read_config_byte(pdev, XHCI_SBRN_OFFSET, &xhci->sbrn);
+ xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int) xhci->sbrn);
+
+ /* Find any debug ports */
+ return xhci_pci_reinit(xhci, pdev);
+}
+
+static const struct hc_driver xhci_pci_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "xHCI Host Controller",
+ .hcd_priv_size = sizeof(struct xhci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = xhci_irq,
+ .flags = HCD_MEMORY | HCD_USB3,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = xhci_pci_setup,
+ .start = xhci_run,
+ /* suspend and resume implemented later */
+ .stop = xhci_stop,
+ .shutdown = xhci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = xhci_urb_enqueue,
+ .urb_dequeue = xhci_urb_dequeue,
+ .alloc_dev = xhci_alloc_dev,
+ .free_dev = xhci_free_dev,
+ .add_endpoint = xhci_add_endpoint,
+ .drop_endpoint = xhci_drop_endpoint,
+ .check_bandwidth = xhci_check_bandwidth,
+ .reset_bandwidth = xhci_reset_bandwidth,
+ .address_device = xhci_address_device,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = xhci_get_frame,
+
+ /* Root hub support */
+ .hub_control = xhci_hub_control,
+ .hub_status_data = xhci_hub_status_data,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* PCI driver selection metadata; PCI hotplugging uses this */
+static const struct pci_device_id pci_ids[] = { {
+ /* handle any USB 3.0 xHCI controller */
+ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_XHCI, ~0),
+ .driver_data = (unsigned long) &xhci_pci_hc_driver,
+ },
+ { /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE(pci, pci_ids);
+
+/* pci driver glue; this is a "new style" PCI driver module */
+static struct pci_driver xhci_pci_driver = {
+ .name = (char *) hcd_name,
+ .id_table = pci_ids,
+
+ .probe = usb_hcd_pci_probe,
+ .remove = usb_hcd_pci_remove,
+ /* suspend and resume implemented later */
+
+ .shutdown = usb_hcd_pci_shutdown,
+};
+
+int xhci_register_pci()
+{
+ return pci_register_driver(&xhci_pci_driver);
+}
+
+void xhci_unregister_pci()
+{
+ pci_unregister_driver(&xhci_pci_driver);
+}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
new file mode 100644
index 00000000000..02d81985c45
--- /dev/null
+++ b/drivers/usb/host/xhci-ring.c
@@ -0,0 +1,1648 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * Ring initialization rules:
+ * 1. Each segment is initialized to zero, except for link TRBs.
+ * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
+ * Consumer Cycle State (CCS), depending on ring function.
+ * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
+ *
+ * Ring behavior rules:
+ * 1. A ring is empty if enqueue == dequeue. This means there will always be at
+ * least one free TRB in the ring. This is useful if you want to turn that
+ * into a link TRB and expand the ring.
+ * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
+ * link TRB, then load the pointer with the address in the link TRB. If the
+ * link TRB had its toggle bit set, you may need to update the ring cycle
+ * state (see cycle bit rules). You may have to do this multiple times
+ * until you reach a non-link TRB.
+ * 3. A ring is full if enqueue++ (for the definition of increment above)
+ * equals the dequeue pointer.
+ *
+ * Cycle bit rules:
+ * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
+ * in a link TRB, it must toggle the ring cycle state.
+ * 2. When a producer increments an enqueue pointer and encounters a toggle bit
+ * in a link TRB, it must toggle the ring cycle state.
+ *
+ * Producer rules:
+ * 1. Check if ring is full before you enqueue.
+ * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
+ * Update enqueue pointer between each write (which may update the ring
+ * cycle state).
+ * 3. Notify consumer. If SW is producer, it rings the doorbell for command
+ * and endpoint rings. If HC is the producer for the event ring,
+ * and it generates an interrupt according to interrupt modulation rules.
+ *
+ * Consumer rules:
+ * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
+ * the TRB is owned by the consumer.
+ * 2. Update dequeue pointer (which may update the ring cycle state) and
+ * continue processing TRBs until you reach a TRB which is not owned by you.
+ * 3. Notify the producer. SW is the consumer for the event ring, and it
+ * updates event ring dequeue pointer. HC is the consumer for the command and
+ * endpoint rings; it generates events on the event ring for these.
+ */
+
+#include <linux/scatterlist.h>
+#include "xhci.h"
+
+/*
+ * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
+ * address of the TRB.
+ */
+dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
+ union xhci_trb *trb)
+{
+ unsigned long segment_offset;
+
+ if (!seg || !trb || trb < seg->trbs)
+ return 0;
+ /* offset in TRBs */
+ segment_offset = trb - seg->trbs;
+ if (segment_offset > TRBS_PER_SEGMENT)
+ return 0;
+ return seg->dma + (segment_offset * sizeof(*trb));
+}
+
+/* Does this link TRB point to the first segment in a ring,
+ * or was the previous TRB the last TRB on the last segment in the ERST?
+ */
+static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ struct xhci_segment *seg, union xhci_trb *trb)
+{
+ if (ring == xhci->event_ring)
+ return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
+ (seg->next == xhci->event_ring->first_seg);
+ else
+ return trb->link.control & LINK_TOGGLE;
+}
+
+/* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
+ * segment? I.e. would the updated event TRB pointer step off the end of the
+ * event seg?
+ */
+static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ struct xhci_segment *seg, union xhci_trb *trb)
+{
+ if (ring == xhci->event_ring)
+ return trb == &seg->trbs[TRBS_PER_SEGMENT];
+ else
+ return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK);
+}
+
+/* Updates trb to point to the next TRB in the ring, and updates seg if the next
+ * TRB is in a new segment. This does not skip over link TRBs, and it does not
+ * effect the ring dequeue or enqueue pointers.
+ */
+static void next_trb(struct xhci_hcd *xhci,
+ struct xhci_ring *ring,
+ struct xhci_segment **seg,
+ union xhci_trb **trb)
+{
+ if (last_trb(xhci, ring, *seg, *trb)) {
+ *seg = (*seg)->next;
+ *trb = ((*seg)->trbs);
+ } else {
+ *trb = (*trb)++;
+ }
+}
+
+/*
+ * See Cycle bit rules. SW is the consumer for the event ring only.
+ * Don't make a ring full of link TRBs. That would be dumb and this would loop.
+ */
+static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
+{
+ union xhci_trb *next = ++(ring->dequeue);
+
+ ring->deq_updates++;
+ /* Update the dequeue pointer further if that was a link TRB or we're at
+ * the end of an event ring segment (which doesn't have link TRBS)
+ */
+ while (last_trb(xhci, ring, ring->deq_seg, next)) {
+ if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
+ ring->cycle_state = (ring->cycle_state ? 0 : 1);
+ if (!in_interrupt())
+ xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
+ ring,
+ (unsigned int) ring->cycle_state);
+ }
+ ring->deq_seg = ring->deq_seg->next;
+ ring->dequeue = ring->deq_seg->trbs;
+ next = ring->dequeue;
+ }
+}
+
+/*
+ * See Cycle bit rules. SW is the consumer for the event ring only.
+ * Don't make a ring full of link TRBs. That would be dumb and this would loop.
+ *
+ * If we've just enqueued a TRB that is in the middle of a TD (meaning the
+ * chain bit is set), then set the chain bit in all the following link TRBs.
+ * If we've enqueued the last TRB in a TD, make sure the following link TRBs
+ * have their chain bit cleared (so that each Link TRB is a separate TD).
+ *
+ * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
+ * set, but other sections talk about dealing with the chain bit set.
+ * Assume section 6.4.4.1 is wrong, and the chain bit can be set in a Link TRB.
+ */
+static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
+{
+ u32 chain;
+ union xhci_trb *next;
+
+ chain = ring->enqueue->generic.field[3] & TRB_CHAIN;
+ next = ++(ring->enqueue);
+
+ ring->enq_updates++;
+ /* Update the dequeue pointer further if that was a link TRB or we're at
+ * the end of an event ring segment (which doesn't have link TRBS)
+ */
+ while (last_trb(xhci, ring, ring->enq_seg, next)) {
+ if (!consumer) {
+ if (ring != xhci->event_ring) {
+ next->link.control &= ~TRB_CHAIN;
+ next->link.control |= chain;
+ /* Give this link TRB to the hardware */
+ wmb();
+ if (next->link.control & TRB_CYCLE)
+ next->link.control &= (u32) ~TRB_CYCLE;
+ else
+ next->link.control |= (u32) TRB_CYCLE;
+ }
+ /* Toggle the cycle bit after the last ring segment. */
+ if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
+ ring->cycle_state = (ring->cycle_state ? 0 : 1);
+ if (!in_interrupt())
+ xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
+ ring,
+ (unsigned int) ring->cycle_state);
+ }
+ }
+ ring->enq_seg = ring->enq_seg->next;
+ ring->enqueue = ring->enq_seg->trbs;
+ next = ring->enqueue;
+ }
+}
+
+/*
+ * Check to see if there's room to enqueue num_trbs on the ring. See rules
+ * above.
+ * FIXME: this would be simpler and faster if we just kept track of the number
+ * of free TRBs in a ring.
+ */
+static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ unsigned int num_trbs)
+{
+ int i;
+ union xhci_trb *enq = ring->enqueue;
+ struct xhci_segment *enq_seg = ring->enq_seg;
+
+ /* Check if ring is empty */
+ if (enq == ring->dequeue)
+ return 1;
+ /* Make sure there's an extra empty TRB available */
+ for (i = 0; i <= num_trbs; ++i) {
+ if (enq == ring->dequeue)
+ return 0;
+ enq++;
+ while (last_trb(xhci, ring, enq_seg, enq)) {
+ enq_seg = enq_seg->next;
+ enq = enq_seg->trbs;
+ }
+ }
+ return 1;
+}
+
+void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
+{
+ u32 temp;
+ dma_addr_t deq;
+
+ deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
+ xhci->event_ring->dequeue);
+ if (deq == 0 && !in_interrupt())
+ xhci_warn(xhci, "WARN something wrong with SW event ring "
+ "dequeue ptr.\n");
+ /* Update HC event ring dequeue pointer */
+ temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
+ temp &= ERST_PTR_MASK;
+ if (!in_interrupt())
+ xhci_dbg(xhci, "// Write event ring dequeue pointer\n");
+ xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]);
+ xhci_writel(xhci, (deq & ~ERST_PTR_MASK) | temp,
+ &xhci->ir_set->erst_dequeue[0]);
+}
+
+/* Ring the host controller doorbell after placing a command on the ring */
+void xhci_ring_cmd_db(struct xhci_hcd *xhci)
+{
+ u32 temp;
+
+ xhci_dbg(xhci, "// Ding dong!\n");
+ temp = xhci_readl(xhci, &xhci->dba->doorbell[0]) & DB_MASK;
+ xhci_writel(xhci, temp | DB_TARGET_HOST, &xhci->dba->doorbell[0]);
+ /* Flush PCI posted writes */
+ xhci_readl(xhci, &xhci->dba->doorbell[0]);
+}
+
+static void ring_ep_doorbell(struct xhci_hcd *xhci,
+ unsigned int slot_id,
+ unsigned int ep_index)
+{
+ struct xhci_ring *ep_ring;
+ u32 field;
+ __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
+
+ ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
+ /* Don't ring the doorbell for this endpoint if there are pending
+ * cancellations because the we don't want to interrupt processing.
+ */
+ if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING)) {
+ field = xhci_readl(xhci, db_addr) & DB_MASK;
+ xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr);
+ /* Flush PCI posted writes - FIXME Matthew Wilcox says this
+ * isn't time-critical and we shouldn't make the CPU wait for
+ * the flush.
+ */
+ xhci_readl(xhci, db_addr);
+ }
+}
+
+/*
+ * Find the segment that trb is in. Start searching in start_seg.
+ * If we must move past a segment that has a link TRB with a toggle cycle state
+ * bit set, then we will toggle the value pointed at by cycle_state.
+ */
+static struct xhci_segment *find_trb_seg(
+ struct xhci_segment *start_seg,
+ union xhci_trb *trb, int *cycle_state)
+{
+ struct xhci_segment *cur_seg = start_seg;
+ struct xhci_generic_trb *generic_trb;
+
+ while (cur_seg->trbs > trb ||
+ &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
+ generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
+ if (TRB_TYPE(generic_trb->field[3]) == TRB_LINK &&
+ (generic_trb->field[3] & LINK_TOGGLE))
+ *cycle_state = ~(*cycle_state) & 0x1;
+ cur_seg = cur_seg->next;
+ if (cur_seg == start_seg)
+ /* Looped over the entire list. Oops! */
+ return 0;
+ }
+ return cur_seg;
+}
+
+struct dequeue_state {
+ struct xhci_segment *new_deq_seg;
+ union xhci_trb *new_deq_ptr;
+ int new_cycle_state;
+};
+
+/*
+ * Move the xHC's endpoint ring dequeue pointer past cur_td.
+ * Record the new state of the xHC's endpoint ring dequeue segment,
+ * dequeue pointer, and new consumer cycle state in state.
+ * Update our internal representation of the ring's dequeue pointer.
+ *
+ * We do this in three jumps:
+ * - First we update our new ring state to be the same as when the xHC stopped.
+ * - Then we traverse the ring to find the segment that contains
+ * the last TRB in the TD. We toggle the xHC's new cycle state when we pass
+ * any link TRBs with the toggle cycle bit set.
+ * - Finally we move the dequeue state one TRB further, toggling the cycle bit
+ * if we've moved it past a link TRB with the toggle cycle bit set.
+ */
+static void find_new_dequeue_state(struct xhci_hcd *xhci,
+ unsigned int slot_id, unsigned int ep_index,
+ struct xhci_td *cur_td, struct dequeue_state *state)
+{
+ struct xhci_virt_device *dev = xhci->devs[slot_id];
+ struct xhci_ring *ep_ring = dev->ep_rings[ep_index];
+ struct xhci_generic_trb *trb;
+
+ state->new_cycle_state = 0;
+ state->new_deq_seg = find_trb_seg(cur_td->start_seg,
+ ep_ring->stopped_trb,
+ &state->new_cycle_state);
+ if (!state->new_deq_seg)
+ BUG();
+ /* Dig out the cycle state saved by the xHC during the stop ep cmd */
+ state->new_cycle_state = 0x1 & dev->out_ctx->ep[ep_index].deq[0];
+
+ state->new_deq_ptr = cur_td->last_trb;
+ state->new_deq_seg = find_trb_seg(state->new_deq_seg,
+ state->new_deq_ptr,
+ &state->new_cycle_state);
+ if (!state->new_deq_seg)
+ BUG();
+
+ trb = &state->new_deq_ptr->generic;
+ if (TRB_TYPE(trb->field[3]) == TRB_LINK &&
+ (trb->field[3] & LINK_TOGGLE))
+ state->new_cycle_state = ~(state->new_cycle_state) & 0x1;
+ next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
+
+ /* Don't update the ring cycle state for the producer (us). */
+ ep_ring->dequeue = state->new_deq_ptr;
+ ep_ring->deq_seg = state->new_deq_seg;
+}
+
+static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
+ struct xhci_td *cur_td)
+{
+ struct xhci_segment *cur_seg;
+ union xhci_trb *cur_trb;
+
+ for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
+ true;
+ next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
+ if ((cur_trb->generic.field[3] & TRB_TYPE_BITMASK) ==
+ TRB_TYPE(TRB_LINK)) {
+ /* Unchain any chained Link TRBs, but
+ * leave the pointers intact.
+ */
+ cur_trb->generic.field[3] &= ~TRB_CHAIN;
+ xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
+ xhci_dbg(xhci, "Address = %p (0x%llx dma); "
+ "in seg %p (0x%llx dma)\n",
+ cur_trb,
+ (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
+ cur_seg,
+ (unsigned long long)cur_seg->dma);
+ } else {
+ cur_trb->generic.field[0] = 0;
+ cur_trb->generic.field[1] = 0;
+ cur_trb->generic.field[2] = 0;
+ /* Preserve only the cycle bit of this TRB */
+ cur_trb->generic.field[3] &= TRB_CYCLE;
+ cur_trb->generic.field[3] |= TRB_TYPE(TRB_TR_NOOP);
+ xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
+ "in seg %p (0x%llx dma)\n",
+ cur_trb,
+ (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
+ cur_seg,
+ (unsigned long long)cur_seg->dma);
+ }
+ if (cur_trb == cur_td->last_trb)
+ break;
+ }
+}
+
+static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
+ unsigned int ep_index, struct xhci_segment *deq_seg,
+ union xhci_trb *deq_ptr, u32 cycle_state);
+
+/*
+ * When we get a command completion for a Stop Endpoint Command, we need to
+ * unlink any cancelled TDs from the ring. There are two ways to do that:
+ *
+ * 1. If the HW was in the middle of processing the TD that needs to be
+ * cancelled, then we must move the ring's dequeue pointer past the last TRB
+ * in the TD with a Set Dequeue Pointer Command.
+ * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
+ * bit cleared) so that the HW will skip over them.
+ */
+static void handle_stopped_endpoint(struct xhci_hcd *xhci,
+ union xhci_trb *trb)
+{
+ unsigned int slot_id;
+ unsigned int ep_index;
+ struct xhci_ring *ep_ring;
+ struct list_head *entry;
+ struct xhci_td *cur_td = 0;
+ struct xhci_td *last_unlinked_td;
+
+ struct dequeue_state deq_state;
+#ifdef CONFIG_USB_HCD_STAT
+ ktime_t stop_time = ktime_get();
+#endif
+
+ memset(&deq_state, 0, sizeof(deq_state));
+ slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
+ ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
+ ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
+
+ if (list_empty(&ep_ring->cancelled_td_list))
+ return;
+
+ /* Fix up the ep ring first, so HW stops executing cancelled TDs.
+ * We have the xHCI lock, so nothing can modify this list until we drop
+ * it. We're also in the event handler, so we can't get re-interrupted
+ * if another Stop Endpoint command completes
+ */
+ list_for_each(entry, &ep_ring->cancelled_td_list) {
+ cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
+ xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
+ cur_td->first_trb,
+ (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
+ /*
+ * If we stopped on the TD we need to cancel, then we have to
+ * move the xHC endpoint ring dequeue pointer past this TD.
+ */
+ if (cur_td == ep_ring->stopped_td)
+ find_new_dequeue_state(xhci, slot_id, ep_index, cur_td,
+ &deq_state);
+ else
+ td_to_noop(xhci, ep_ring, cur_td);
+ /*
+ * The event handler won't see a completion for this TD anymore,
+ * so remove it from the endpoint ring's TD list. Keep it in
+ * the cancelled TD list for URB completion later.
+ */
+ list_del(&cur_td->td_list);
+ ep_ring->cancels_pending--;
+ }
+ last_unlinked_td = cur_td;
+
+ /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
+ if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
+ xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
+ "new deq ptr = %p (0x%llx dma), new cycle = %u\n",
+ deq_state.new_deq_seg,
+ (unsigned long long)deq_state.new_deq_seg->dma,
+ deq_state.new_deq_ptr,
+ (unsigned long long)xhci_trb_virt_to_dma(deq_state.new_deq_seg, deq_state.new_deq_ptr),
+ deq_state.new_cycle_state);
+ queue_set_tr_deq(xhci, slot_id, ep_index,
+ deq_state.new_deq_seg,
+ deq_state.new_deq_ptr,
+ (u32) deq_state.new_cycle_state);
+ /* Stop the TD queueing code from ringing the doorbell until
+ * this command completes. The HC won't set the dequeue pointer
+ * if the ring is running, and ringing the doorbell starts the
+ * ring running.
+ */
+ ep_ring->state |= SET_DEQ_PENDING;
+ xhci_ring_cmd_db(xhci);
+ } else {
+ /* Otherwise just ring the doorbell to restart the ring */
+ ring_ep_doorbell(xhci, slot_id, ep_index);
+ }
+
+ /*
+ * Drop the lock and complete the URBs in the cancelled TD list.
+ * New TDs to be cancelled might be added to the end of the list before
+ * we can complete all the URBs for the TDs we already unlinked.
+ * So stop when we've completed the URB for the last TD we unlinked.
+ */
+ do {
+ cur_td = list_entry(ep_ring->cancelled_td_list.next,
+ struct xhci_td, cancelled_td_list);
+ list_del(&cur_td->cancelled_td_list);
+
+ /* Clean up the cancelled URB */
+#ifdef CONFIG_USB_HCD_STAT
+ hcd_stat_update(xhci->tp_stat, cur_td->urb->actual_length,
+ ktime_sub(stop_time, cur_td->start_time));
+#endif
+ cur_td->urb->hcpriv = NULL;
+ usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), cur_td->urb);
+
+ xhci_dbg(xhci, "Giveback cancelled URB %p\n", cur_td->urb);
+ spin_unlock(&xhci->lock);
+ /* Doesn't matter what we pass for status, since the core will
+ * just overwrite it (because the URB has been unlinked).
+ */
+ usb_hcd_giveback_urb(xhci_to_hcd(xhci), cur_td->urb, 0);
+ kfree(cur_td);
+
+ spin_lock(&xhci->lock);
+ } while (cur_td != last_unlinked_td);
+
+ /* Return to the event handler with xhci->lock re-acquired */
+}
+
+/*
+ * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
+ * we need to clear the set deq pending flag in the endpoint ring state, so that
+ * the TD queueing code can ring the doorbell again. We also need to ring the
+ * endpoint doorbell to restart the ring, but only if there aren't more
+ * cancellations pending.
+ */
+static void handle_set_deq_completion(struct xhci_hcd *xhci,
+ struct xhci_event_cmd *event,
+ union xhci_trb *trb)
+{
+ unsigned int slot_id;
+ unsigned int ep_index;
+ struct xhci_ring *ep_ring;
+ struct xhci_virt_device *dev;
+
+ slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
+ ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
+ dev = xhci->devs[slot_id];
+ ep_ring = dev->ep_rings[ep_index];
+
+ if (GET_COMP_CODE(event->status) != COMP_SUCCESS) {
+ unsigned int ep_state;
+ unsigned int slot_state;
+
+ switch (GET_COMP_CODE(event->status)) {
+ case COMP_TRB_ERR:
+ xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
+ "of stream ID configuration\n");
+ break;
+ case COMP_CTX_STATE:
+ xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
+ "to incorrect slot or ep state.\n");
+ ep_state = dev->out_ctx->ep[ep_index].ep_info;
+ ep_state &= EP_STATE_MASK;
+ slot_state = dev->out_ctx->slot.dev_state;
+ slot_state = GET_SLOT_STATE(slot_state);
+ xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
+ slot_state, ep_state);
+ break;
+ case COMP_EBADSLT:
+ xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
+ "slot %u was not enabled.\n", slot_id);
+ break;
+ default:
+ xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
+ "completion code of %u.\n",
+ GET_COMP_CODE(event->status));
+ break;
+ }
+ /* OK what do we do now? The endpoint state is hosed, and we
+ * should never get to this point if the synchronization between
+ * queueing, and endpoint state are correct. This might happen
+ * if the device gets disconnected after we've finished
+ * cancelling URBs, which might not be an error...
+ */
+ } else {
+ xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq[0] = 0x%x, "
+ "deq[1] = 0x%x.\n",
+ dev->out_ctx->ep[ep_index].deq[0],
+ dev->out_ctx->ep[ep_index].deq[1]);
+ }
+
+ ep_ring->state &= ~SET_DEQ_PENDING;
+ ring_ep_doorbell(xhci, slot_id, ep_index);
+}
+
+
+static void handle_cmd_completion(struct xhci_hcd *xhci,
+ struct xhci_event_cmd *event)
+{
+ int slot_id = TRB_TO_SLOT_ID(event->flags);
+ u64 cmd_dma;
+ dma_addr_t cmd_dequeue_dma;
+
+ cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0];
+ cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
+ xhci->cmd_ring->dequeue);
+ /* Is the command ring deq ptr out of sync with the deq seg ptr? */
+ if (cmd_dequeue_dma == 0) {
+ xhci->error_bitmask |= 1 << 4;
+ return;
+ }
+ /* Does the DMA address match our internal dequeue pointer address? */
+ if (cmd_dma != (u64) cmd_dequeue_dma) {
+ xhci->error_bitmask |= 1 << 5;
+ return;
+ }
+ switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) {
+ case TRB_TYPE(TRB_ENABLE_SLOT):
+ if (GET_COMP_CODE(event->status) == COMP_SUCCESS)
+ xhci->slot_id = slot_id;
+ else
+ xhci->slot_id = 0;
+ complete(&xhci->addr_dev);
+ break;
+ case TRB_TYPE(TRB_DISABLE_SLOT):
+ if (xhci->devs[slot_id])
+ xhci_free_virt_device(xhci, slot_id);
+ break;
+ case TRB_TYPE(TRB_CONFIG_EP):
+ xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
+ complete(&xhci->devs[slot_id]->cmd_completion);
+ break;
+ case TRB_TYPE(TRB_ADDR_DEV):
+ xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
+ complete(&xhci->addr_dev);
+ break;
+ case TRB_TYPE(TRB_STOP_RING):
+ handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue);
+ break;
+ case TRB_TYPE(TRB_SET_DEQ):
+ handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
+ break;
+ case TRB_TYPE(TRB_CMD_NOOP):
+ ++xhci->noops_handled;
+ break;
+ default:
+ /* Skip over unknown commands on the event ring */
+ xhci->error_bitmask |= 1 << 6;
+ break;
+ }
+ inc_deq(xhci, xhci->cmd_ring, false);
+}
+
+static void handle_port_status(struct xhci_hcd *xhci,
+ union xhci_trb *event)
+{
+ u32 port_id;
+
+ /* Port status change events always have a successful completion code */
+ if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) {
+ xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
+ xhci->error_bitmask |= 1 << 8;
+ }
+ /* FIXME: core doesn't care about all port link state changes yet */
+ port_id = GET_PORT_ID(event->generic.field[0]);
+ xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
+
+ /* Update event ring dequeue pointer before dropping the lock */
+ inc_deq(xhci, xhci->event_ring, true);
+ xhci_set_hc_event_deq(xhci);
+
+ spin_unlock(&xhci->lock);
+ /* Pass this up to the core */
+ usb_hcd_poll_rh_status(xhci_to_hcd(xhci));
+ spin_lock(&xhci->lock);
+}
+
+/*
+ * This TD is defined by the TRBs starting at start_trb in start_seg and ending
+ * at end_trb, which may be in another segment. If the suspect DMA address is a
+ * TRB in this TD, this function returns that TRB's segment. Otherwise it
+ * returns 0.
+ */
+static struct xhci_segment *trb_in_td(
+ struct xhci_segment *start_seg,
+ union xhci_trb *start_trb,
+ union xhci_trb *end_trb,
+ dma_addr_t suspect_dma)
+{
+ dma_addr_t start_dma;
+ dma_addr_t end_seg_dma;
+ dma_addr_t end_trb_dma;
+ struct xhci_segment *cur_seg;
+
+ start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
+ cur_seg = start_seg;
+
+ do {
+ /* We may get an event for a Link TRB in the middle of a TD */
+ end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
+ &start_seg->trbs[TRBS_PER_SEGMENT - 1]);
+ /* If the end TRB isn't in this segment, this is set to 0 */
+ end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
+
+ if (end_trb_dma > 0) {
+ /* The end TRB is in this segment, so suspect should be here */
+ if (start_dma <= end_trb_dma) {
+ if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
+ return cur_seg;
+ } else {
+ /* Case for one segment with
+ * a TD wrapped around to the top
+ */
+ if ((suspect_dma >= start_dma &&
+ suspect_dma <= end_seg_dma) ||
+ (suspect_dma >= cur_seg->dma &&
+ suspect_dma <= end_trb_dma))
+ return cur_seg;
+ }
+ return 0;
+ } else {
+ /* Might still be somewhere in this segment */
+ if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
+ return cur_seg;
+ }
+ cur_seg = cur_seg->next;
+ start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
+ } while (1);
+
+}
+
+/*
+ * If this function returns an error condition, it means it got a Transfer
+ * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
+ * At this point, the host controller is probably hosed and should be reset.
+ */
+static int handle_tx_event(struct xhci_hcd *xhci,
+ struct xhci_transfer_event *event)
+{
+ struct xhci_virt_device *xdev;
+ struct xhci_ring *ep_ring;
+ int ep_index;
+ struct xhci_td *td = 0;
+ dma_addr_t event_dma;
+ struct xhci_segment *event_seg;
+ union xhci_trb *event_trb;
+ struct urb *urb = 0;
+ int status = -EINPROGRESS;
+
+ xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)];
+ if (!xdev) {
+ xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
+ return -ENODEV;
+ }
+
+ /* Endpoint ID is 1 based, our index is zero based */
+ ep_index = TRB_TO_EP_ID(event->flags) - 1;
+ ep_ring = xdev->ep_rings[ep_index];
+ if (!ep_ring || (xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
+ xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n");
+ return -ENODEV;
+ }
+
+ event_dma = event->buffer[0];
+ if (event->buffer[1] != 0)
+ xhci_warn(xhci, "WARN ignoring upper 32-bits of 64-bit TRB dma address\n");
+
+ /* This TRB should be in the TD at the head of this ring's TD list */
+ if (list_empty(&ep_ring->td_list)) {
+ xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
+ TRB_TO_SLOT_ID(event->flags), ep_index);
+ xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
+ (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
+ xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
+ urb = NULL;
+ goto cleanup;
+ }
+ td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
+
+ /* Is this a TRB in the currently executing TD? */
+ event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
+ td->last_trb, event_dma);
+ if (!event_seg) {
+ /* HC is busted, give up! */
+ xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n");
+ return -ESHUTDOWN;
+ }
+ event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)];
+ xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
+ (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
+ xhci_dbg(xhci, "Offset 0x00 (buffer[0]) = 0x%x\n",
+ (unsigned int) event->buffer[0]);
+ xhci_dbg(xhci, "Offset 0x04 (buffer[0]) = 0x%x\n",
+ (unsigned int) event->buffer[1]);
+ xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n",
+ (unsigned int) event->transfer_len);
+ xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n",
+ (unsigned int) event->flags);
+
+ /* Look for common error cases */
+ switch (GET_COMP_CODE(event->transfer_len)) {
+ /* Skip codes that require special handling depending on
+ * transfer type
+ */
+ case COMP_SUCCESS:
+ case COMP_SHORT_TX:
+ break;
+ case COMP_STOP:
+ xhci_dbg(xhci, "Stopped on Transfer TRB\n");
+ break;
+ case COMP_STOP_INVAL:
+ xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
+ break;
+ case COMP_STALL:
+ xhci_warn(xhci, "WARN: Stalled endpoint\n");
+ status = -EPIPE;
+ break;
+ case COMP_TRB_ERR:
+ xhci_warn(xhci, "WARN: TRB error on endpoint\n");
+ status = -EILSEQ;
+ break;
+ case COMP_TX_ERR:
+ xhci_warn(xhci, "WARN: transfer error on endpoint\n");
+ status = -EPROTO;
+ break;
+ case COMP_DB_ERR:
+ xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
+ status = -ENOSR;
+ break;
+ default:
+ xhci_warn(xhci, "ERROR Unknown event condition, HC probably busted\n");
+ urb = NULL;
+ goto cleanup;
+ }
+ /* Now update the urb's actual_length and give back to the core */
+ /* Was this a control transfer? */
+ if (usb_endpoint_xfer_control(&td->urb->ep->desc)) {
+ xhci_debug_trb(xhci, xhci->event_ring->dequeue);
+ switch (GET_COMP_CODE(event->transfer_len)) {
+ case COMP_SUCCESS:
+ if (event_trb == ep_ring->dequeue) {
+ xhci_warn(xhci, "WARN: Success on ctrl setup TRB without IOC set??\n");
+ status = -ESHUTDOWN;
+ } else if (event_trb != td->last_trb) {
+ xhci_warn(xhci, "WARN: Success on ctrl data TRB without IOC set??\n");
+ status = -ESHUTDOWN;
+ } else {
+ xhci_dbg(xhci, "Successful control transfer!\n");
+ status = 0;
+ }
+ break;
+ case COMP_SHORT_TX:
+ xhci_warn(xhci, "WARN: short transfer on control ep\n");
+ status = -EREMOTEIO;
+ break;
+ default:
+ /* Others already handled above */
+ break;
+ }
+ /*
+ * Did we transfer any data, despite the errors that might have
+ * happened? I.e. did we get past the setup stage?
+ */
+ if (event_trb != ep_ring->dequeue) {
+ /* The event was for the status stage */
+ if (event_trb == td->last_trb) {
+ td->urb->actual_length =
+ td->urb->transfer_buffer_length;
+ } else {
+ /* Maybe the event was for the data stage? */
+ if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL)
+ /* We didn't stop on a link TRB in the middle */
+ td->urb->actual_length =
+ td->urb->transfer_buffer_length -
+ TRB_LEN(event->transfer_len);
+ }
+ }
+ } else {
+ switch (GET_COMP_CODE(event->transfer_len)) {
+ case COMP_SUCCESS:
+ /* Double check that the HW transferred everything. */
+ if (event_trb != td->last_trb) {
+ xhci_warn(xhci, "WARN Successful completion "
+ "on short TX\n");
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ status = -EREMOTEIO;
+ else
+ status = 0;
+ } else {
+ xhci_dbg(xhci, "Successful bulk transfer!\n");
+ status = 0;
+ }
+ break;
+ case COMP_SHORT_TX:
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ status = -EREMOTEIO;
+ else
+ status = 0;
+ break;
+ default:
+ /* Others already handled above */
+ break;
+ }
+ dev_dbg(&td->urb->dev->dev,
+ "ep %#x - asked for %d bytes, "
+ "%d bytes untransferred\n",
+ td->urb->ep->desc.bEndpointAddress,
+ td->urb->transfer_buffer_length,
+ TRB_LEN(event->transfer_len));
+ /* Fast path - was this the last TRB in the TD for this URB? */
+ if (event_trb == td->last_trb) {
+ if (TRB_LEN(event->transfer_len) != 0) {
+ td->urb->actual_length =
+ td->urb->transfer_buffer_length -
+ TRB_LEN(event->transfer_len);
+ if (td->urb->actual_length < 0) {
+ xhci_warn(xhci, "HC gave bad length "
+ "of %d bytes left\n",
+ TRB_LEN(event->transfer_len));
+ td->urb->actual_length = 0;
+ }
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ status = -EREMOTEIO;
+ else
+ status = 0;
+ } else {
+ td->urb->actual_length = td->urb->transfer_buffer_length;
+ /* Ignore a short packet completion if the
+ * untransferred length was zero.
+ */
+ status = 0;
+ }
+ } else {
+ /* Slow path - walk the list, starting from the dequeue
+ * pointer, to get the actual length transferred.
+ */
+ union xhci_trb *cur_trb;
+ struct xhci_segment *cur_seg;
+
+ td->urb->actual_length = 0;
+ for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
+ cur_trb != event_trb;
+ next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
+ if (TRB_TYPE(cur_trb->generic.field[3]) != TRB_TR_NOOP &&
+ TRB_TYPE(cur_trb->generic.field[3]) != TRB_LINK)
+ td->urb->actual_length +=
+ TRB_LEN(cur_trb->generic.field[2]);
+ }
+ /* If the ring didn't stop on a Link or No-op TRB, add
+ * in the actual bytes transferred from the Normal TRB
+ */
+ if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL)
+ td->urb->actual_length +=
+ TRB_LEN(cur_trb->generic.field[2]) -
+ TRB_LEN(event->transfer_len);
+ }
+ }
+ /* The Endpoint Stop Command completion will take care of
+ * any stopped TDs. A stopped TD may be restarted, so don't update the
+ * ring dequeue pointer or take this TD off any lists yet.
+ */
+ if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL ||
+ GET_COMP_CODE(event->transfer_len) == COMP_STOP) {
+ ep_ring->stopped_td = td;
+ ep_ring->stopped_trb = event_trb;
+ } else {
+ /* Update ring dequeue pointer */
+ while (ep_ring->dequeue != td->last_trb)
+ inc_deq(xhci, ep_ring, false);
+ inc_deq(xhci, ep_ring, false);
+
+ /* Clean up the endpoint's TD list */
+ urb = td->urb;
+ list_del(&td->td_list);
+ /* Was this TD slated to be cancelled but completed anyway? */
+ if (!list_empty(&td->cancelled_td_list)) {
+ list_del(&td->cancelled_td_list);
+ ep_ring->cancels_pending--;
+ }
+ kfree(td);
+ urb->hcpriv = NULL;
+ }
+cleanup:
+ inc_deq(xhci, xhci->event_ring, true);
+ xhci_set_hc_event_deq(xhci);
+
+ /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */
+ if (urb) {
+ usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
+ spin_unlock(&xhci->lock);
+ usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
+ spin_lock(&xhci->lock);
+ }
+ return 0;
+}
+
+/*
+ * This function handles all OS-owned events on the event ring. It may drop
+ * xhci->lock between event processing (e.g. to pass up port status changes).
+ */
+void xhci_handle_event(struct xhci_hcd *xhci)
+{
+ union xhci_trb *event;
+ int update_ptrs = 1;
+ int ret;
+
+ if (!xhci->event_ring || !xhci->event_ring->dequeue) {
+ xhci->error_bitmask |= 1 << 1;
+ return;
+ }
+
+ event = xhci->event_ring->dequeue;
+ /* Does the HC or OS own the TRB? */
+ if ((event->event_cmd.flags & TRB_CYCLE) !=
+ xhci->event_ring->cycle_state) {
+ xhci->error_bitmask |= 1 << 2;
+ return;
+ }
+
+ /* FIXME: Handle more event types. */
+ switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) {
+ case TRB_TYPE(TRB_COMPLETION):
+ handle_cmd_completion(xhci, &event->event_cmd);
+ break;
+ case TRB_TYPE(TRB_PORT_STATUS):
+ handle_port_status(xhci, event);
+ update_ptrs = 0;
+ break;
+ case TRB_TYPE(TRB_TRANSFER):
+ ret = handle_tx_event(xhci, &event->trans_event);
+ if (ret < 0)
+ xhci->error_bitmask |= 1 << 9;
+ else
+ update_ptrs = 0;
+ break;
+ default:
+ xhci->error_bitmask |= 1 << 3;
+ }
+
+ if (update_ptrs) {
+ /* Update SW and HC event ring dequeue pointer */
+ inc_deq(xhci, xhci->event_ring, true);
+ xhci_set_hc_event_deq(xhci);
+ }
+ /* Are there more items on the event ring? */
+ xhci_handle_event(xhci);
+}
+
+/**** Endpoint Ring Operations ****/
+
+/*
+ * Generic function for queueing a TRB on a ring.
+ * The caller must have checked to make sure there's room on the ring.
+ */
+static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ bool consumer,
+ u32 field1, u32 field2, u32 field3, u32 field4)
+{
+ struct xhci_generic_trb *trb;
+
+ trb = &ring->enqueue->generic;
+ trb->field[0] = field1;
+ trb->field[1] = field2;
+ trb->field[2] = field3;
+ trb->field[3] = field4;
+ inc_enq(xhci, ring, consumer);
+}
+
+/*
+ * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
+ * FIXME allocate segments if the ring is full.
+ */
+static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
+ u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
+{
+ /* Make sure the endpoint has been added to xHC schedule */
+ xhci_dbg(xhci, "Endpoint state = 0x%x\n", ep_state);
+ switch (ep_state) {
+ case EP_STATE_DISABLED:
+ /*
+ * USB core changed config/interfaces without notifying us,
+ * or hardware is reporting the wrong state.
+ */
+ xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
+ return -ENOENT;
+ case EP_STATE_HALTED:
+ case EP_STATE_ERROR:
+ xhci_warn(xhci, "WARN waiting for halt or error on ep "
+ "to be cleared\n");
+ /* FIXME event handling code for error needs to clear it */
+ /* XXX not sure if this should be -ENOENT or not */
+ return -EINVAL;
+ case EP_STATE_STOPPED:
+ case EP_STATE_RUNNING:
+ break;
+ default:
+ xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
+ /*
+ * FIXME issue Configure Endpoint command to try to get the HC
+ * back into a known state.
+ */
+ return -EINVAL;
+ }
+ if (!room_on_ring(xhci, ep_ring, num_trbs)) {
+ /* FIXME allocate more room */
+ xhci_err(xhci, "ERROR no room on ep ring\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int prepare_transfer(struct xhci_hcd *xhci,
+ struct xhci_virt_device *xdev,
+ unsigned int ep_index,
+ unsigned int num_trbs,
+ struct urb *urb,
+ struct xhci_td **td,
+ gfp_t mem_flags)
+{
+ int ret;
+
+ ret = prepare_ring(xhci, xdev->ep_rings[ep_index],
+ xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK,
+ num_trbs, mem_flags);
+ if (ret)
+ return ret;
+ *td = kzalloc(sizeof(struct xhci_td), mem_flags);
+ if (!*td)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&(*td)->td_list);
+ INIT_LIST_HEAD(&(*td)->cancelled_td_list);
+
+ ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb);
+ if (unlikely(ret)) {
+ kfree(*td);
+ return ret;
+ }
+
+ (*td)->urb = urb;
+ urb->hcpriv = (void *) (*td);
+ /* Add this TD to the tail of the endpoint ring's TD list */
+ list_add_tail(&(*td)->td_list, &xdev->ep_rings[ep_index]->td_list);
+ (*td)->start_seg = xdev->ep_rings[ep_index]->enq_seg;
+ (*td)->first_trb = xdev->ep_rings[ep_index]->enqueue;
+
+ return 0;
+}
+
+static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
+{
+ int num_sgs, num_trbs, running_total, temp, i;
+ struct scatterlist *sg;
+
+ sg = NULL;
+ num_sgs = urb->num_sgs;
+ temp = urb->transfer_buffer_length;
+
+ xhci_dbg(xhci, "count sg list trbs: \n");
+ num_trbs = 0;
+ for_each_sg(urb->sg->sg, sg, num_sgs, i) {
+ unsigned int previous_total_trbs = num_trbs;
+ unsigned int len = sg_dma_len(sg);
+
+ /* Scatter gather list entries may cross 64KB boundaries */
+ running_total = TRB_MAX_BUFF_SIZE -
+ (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ if (running_total != 0)
+ num_trbs++;
+
+ /* How many more 64KB chunks to transfer, how many more TRBs? */
+ while (running_total < sg_dma_len(sg)) {
+ num_trbs++;
+ running_total += TRB_MAX_BUFF_SIZE;
+ }
+ xhci_dbg(xhci, " sg #%d: dma = %#llx, len = %#x (%d), num_trbs = %d\n",
+ i, (unsigned long long)sg_dma_address(sg),
+ len, len, num_trbs - previous_total_trbs);
+
+ len = min_t(int, len, temp);
+ temp -= len;
+ if (temp == 0)
+ break;
+ }
+ xhci_dbg(xhci, "\n");
+ if (!in_interrupt())
+ dev_dbg(&urb->dev->dev, "ep %#x - urb len = %d, sglist used, num_trbs = %d\n",
+ urb->ep->desc.bEndpointAddress,
+ urb->transfer_buffer_length,
+ num_trbs);
+ return num_trbs;
+}
+
+static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
+{
+ if (num_trbs != 0)
+ dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
+ "TRBs, %d left\n", __func__,
+ urb->ep->desc.bEndpointAddress, num_trbs);
+ if (running_total != urb->transfer_buffer_length)
+ dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
+ "queued %#x (%d), asked for %#x (%d)\n",
+ __func__,
+ urb->ep->desc.bEndpointAddress,
+ running_total, running_total,
+ urb->transfer_buffer_length,
+ urb->transfer_buffer_length);
+}
+
+static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
+ unsigned int ep_index, int start_cycle,
+ struct xhci_generic_trb *start_trb, struct xhci_td *td)
+{
+ /*
+ * Pass all the TRBs to the hardware at once and make sure this write
+ * isn't reordered.
+ */
+ wmb();
+ start_trb->field[3] |= start_cycle;
+ ring_ep_doorbell(xhci, slot_id, ep_index);
+}
+
+static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ struct urb *urb, int slot_id, unsigned int ep_index)
+{
+ struct xhci_ring *ep_ring;
+ unsigned int num_trbs;
+ struct xhci_td *td;
+ struct scatterlist *sg;
+ int num_sgs;
+ int trb_buff_len, this_sg_len, running_total;
+ bool first_trb;
+ u64 addr;
+
+ struct xhci_generic_trb *start_trb;
+ int start_cycle;
+
+ ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
+ num_trbs = count_sg_trbs_needed(xhci, urb);
+ num_sgs = urb->num_sgs;
+
+ trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
+ ep_index, num_trbs, urb, &td, mem_flags);
+ if (trb_buff_len < 0)
+ return trb_buff_len;
+ /*
+ * Don't give the first TRB to the hardware (by toggling the cycle bit)
+ * until we've finished creating all the other TRBs. The ring's cycle
+ * state may change as we enqueue the other TRBs, so save it too.
+ */
+ start_trb = &ep_ring->enqueue->generic;
+ start_cycle = ep_ring->cycle_state;
+
+ running_total = 0;
+ /*
+ * How much data is in the first TRB?
+ *
+ * There are three forces at work for TRB buffer pointers and lengths:
+ * 1. We don't want to walk off the end of this sg-list entry buffer.
+ * 2. The transfer length that the driver requested may be smaller than
+ * the amount of memory allocated for this scatter-gather list.
+ * 3. TRBs buffers can't cross 64KB boundaries.
+ */
+ sg = urb->sg->sg;
+ addr = (u64) sg_dma_address(sg);
+ this_sg_len = sg_dma_len(sg);
+ trb_buff_len = TRB_MAX_BUFF_SIZE -
+ (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
+ if (trb_buff_len > urb->transfer_buffer_length)
+ trb_buff_len = urb->transfer_buffer_length;
+ xhci_dbg(xhci, "First length to xfer from 1st sglist entry = %u\n",
+ trb_buff_len);
+
+ first_trb = true;
+ /* Queue the first TRB, even if it's zero-length */
+ do {
+ u32 field = 0;
+
+ /* Don't change the cycle bit of the first TRB until later */
+ if (first_trb)
+ first_trb = false;
+ else
+ field |= ep_ring->cycle_state;
+
+ /* Chain all the TRBs together; clear the chain bit in the last
+ * TRB to indicate it's the last TRB in the chain.
+ */
+ if (num_trbs > 1) {
+ field |= TRB_CHAIN;
+ } else {
+ /* FIXME - add check for ZERO_PACKET flag before this */
+ td->last_trb = ep_ring->enqueue;
+ field |= TRB_IOC;
+ }
+ xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), "
+ "64KB boundary at %#x, end dma = %#x\n",
+ (unsigned int) addr, trb_buff_len, trb_buff_len,
+ (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
+ (unsigned int) addr + trb_buff_len);
+ if (TRB_MAX_BUFF_SIZE -
+ (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) {
+ xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
+ xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
+ (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
+ (unsigned int) addr + trb_buff_len);
+ }
+ queue_trb(xhci, ep_ring, false,
+ (u32) addr,
+ (u32) ((u64) addr >> 32),
+ TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0),
+ /* We always want to know if the TRB was short,
+ * or we won't get an event when it completes.
+ * (Unless we use event data TRBs, which are a
+ * waste of space and HC resources.)
+ */
+ field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
+ --num_trbs;
+ running_total += trb_buff_len;
+
+ /* Calculate length for next transfer --
+ * Are we done queueing all the TRBs for this sg entry?
+ */
+ this_sg_len -= trb_buff_len;
+ if (this_sg_len == 0) {
+ --num_sgs;
+ if (num_sgs == 0)
+ break;
+ sg = sg_next(sg);
+ addr = (u64) sg_dma_address(sg);
+ this_sg_len = sg_dma_len(sg);
+ } else {
+ addr += trb_buff_len;
+ }
+
+ trb_buff_len = TRB_MAX_BUFF_SIZE -
+ (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
+ if (running_total + trb_buff_len > urb->transfer_buffer_length)
+ trb_buff_len =
+ urb->transfer_buffer_length - running_total;
+ } while (running_total < urb->transfer_buffer_length);
+
+ check_trb_math(urb, num_trbs, running_total);
+ giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td);
+ return 0;
+}
+
+/* This is very similar to what ehci-q.c qtd_fill() does */
+int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ struct urb *urb, int slot_id, unsigned int ep_index)
+{
+ struct xhci_ring *ep_ring;
+ struct xhci_td *td;
+ int num_trbs;
+ struct xhci_generic_trb *start_trb;
+ bool first_trb;
+ int start_cycle;
+ u32 field;
+
+ int running_total, trb_buff_len, ret;
+ u64 addr;
+
+ if (urb->sg)
+ return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
+
+ ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
+
+ num_trbs = 0;
+ /* How much data is (potentially) left before the 64KB boundary? */
+ running_total = TRB_MAX_BUFF_SIZE -
+ (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+
+ /* If there's some data on this 64KB chunk, or we have to send a
+ * zero-length transfer, we need at least one TRB
+ */
+ if (running_total != 0 || urb->transfer_buffer_length == 0)
+ num_trbs++;
+ /* How many more 64KB chunks to transfer, how many more TRBs? */
+ while (running_total < urb->transfer_buffer_length) {
+ num_trbs++;
+ running_total += TRB_MAX_BUFF_SIZE;
+ }
+ /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
+
+ if (!in_interrupt())
+ dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d), addr = %#llx, num_trbs = %d\n",
+ urb->ep->desc.bEndpointAddress,
+ urb->transfer_buffer_length,
+ urb->transfer_buffer_length,
+ (unsigned long long)urb->transfer_dma,
+ num_trbs);
+
+ ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
+ num_trbs, urb, &td, mem_flags);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Don't give the first TRB to the hardware (by toggling the cycle bit)
+ * until we've finished creating all the other TRBs. The ring's cycle
+ * state may change as we enqueue the other TRBs, so save it too.
+ */
+ start_trb = &ep_ring->enqueue->generic;
+ start_cycle = ep_ring->cycle_state;
+
+ running_total = 0;
+ /* How much data is in the first TRB? */
+ addr = (u64) urb->transfer_dma;
+ trb_buff_len = TRB_MAX_BUFF_SIZE -
+ (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ if (urb->transfer_buffer_length < trb_buff_len)
+ trb_buff_len = urb->transfer_buffer_length;
+
+ first_trb = true;
+
+ /* Queue the first TRB, even if it's zero-length */
+ do {
+ field = 0;
+
+ /* Don't change the cycle bit of the first TRB until later */
+ if (first_trb)
+ first_trb = false;
+ else
+ field |= ep_ring->cycle_state;
+
+ /* Chain all the TRBs together; clear the chain bit in the last
+ * TRB to indicate it's the last TRB in the chain.
+ */
+ if (num_trbs > 1) {
+ field |= TRB_CHAIN;
+ } else {
+ /* FIXME - add check for ZERO_PACKET flag before this */
+ td->last_trb = ep_ring->enqueue;
+ field |= TRB_IOC;
+ }
+ queue_trb(xhci, ep_ring, false,
+ (u32) addr,
+ (u32) ((u64) addr >> 32),
+ TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0),
+ /* We always want to know if the TRB was short,
+ * or we won't get an event when it completes.
+ * (Unless we use event data TRBs, which are a
+ * waste of space and HC resources.)
+ */
+ field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
+ --num_trbs;
+ running_total += trb_buff_len;
+
+ /* Calculate length for next transfer */
+ addr += trb_buff_len;
+ trb_buff_len = urb->transfer_buffer_length - running_total;
+ if (trb_buff_len > TRB_MAX_BUFF_SIZE)
+ trb_buff_len = TRB_MAX_BUFF_SIZE;
+ } while (running_total < urb->transfer_buffer_length);
+
+ check_trb_math(urb, num_trbs, running_total);
+ giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td);
+ return 0;
+}
+
+/* Caller must have locked xhci->lock */
+int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ struct urb *urb, int slot_id, unsigned int ep_index)
+{
+ struct xhci_ring *ep_ring;
+ int num_trbs;
+ int ret;
+ struct usb_ctrlrequest *setup;
+ struct xhci_generic_trb *start_trb;
+ int start_cycle;
+ u32 field;
+ struct xhci_td *td;
+
+ ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
+
+ /*
+ * Need to copy setup packet into setup TRB, so we can't use the setup
+ * DMA address.
+ */
+ if (!urb->setup_packet)
+ return -EINVAL;
+
+ if (!in_interrupt())
+ xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n",
+ slot_id, ep_index);
+ /* 1 TRB for setup, 1 for status */
+ num_trbs = 2;
+ /*
+ * Don't need to check if we need additional event data and normal TRBs,
+ * since data in control transfers will never get bigger than 16MB
+ * XXX: can we get a buffer that crosses 64KB boundaries?
+ */
+ if (urb->transfer_buffer_length > 0)
+ num_trbs++;
+ ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, num_trbs,
+ urb, &td, mem_flags);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Don't give the first TRB to the hardware (by toggling the cycle bit)
+ * until we've finished creating all the other TRBs. The ring's cycle
+ * state may change as we enqueue the other TRBs, so save it too.
+ */
+ start_trb = &ep_ring->enqueue->generic;
+ start_cycle = ep_ring->cycle_state;
+
+ /* Queue setup TRB - see section 6.4.1.2.1 */
+ /* FIXME better way to translate setup_packet into two u32 fields? */
+ setup = (struct usb_ctrlrequest *) urb->setup_packet;
+ queue_trb(xhci, ep_ring, false,
+ /* FIXME endianness is probably going to bite my ass here. */
+ setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16,
+ setup->wIndex | setup->wLength << 16,
+ TRB_LEN(8) | TRB_INTR_TARGET(0),
+ /* Immediate data in pointer */
+ TRB_IDT | TRB_TYPE(TRB_SETUP));
+
+ /* If there's data, queue data TRBs */
+ field = 0;
+ if (urb->transfer_buffer_length > 0) {
+ if (setup->bRequestType & USB_DIR_IN)
+ field |= TRB_DIR_IN;
+ queue_trb(xhci, ep_ring, false,
+ lower_32_bits(urb->transfer_dma),
+ upper_32_bits(urb->transfer_dma),
+ TRB_LEN(urb->transfer_buffer_length) | TRB_INTR_TARGET(0),
+ /* Event on short tx */
+ field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state);
+ }
+
+ /* Save the DMA address of the last TRB in the TD */
+ td->last_trb = ep_ring->enqueue;
+
+ /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
+ /* If the device sent data, the status stage is an OUT transfer */
+ if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
+ field = 0;
+ else
+ field = TRB_DIR_IN;
+ queue_trb(xhci, ep_ring, false,
+ 0,
+ 0,
+ TRB_INTR_TARGET(0),
+ /* Event on completion */
+ field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
+
+ giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td);
+ return 0;
+}
+
+/**** Command Ring Operations ****/
+
+/* Generic function for queueing a command TRB on the command ring */
+static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, u32 field3, u32 field4)
+{
+ if (!room_on_ring(xhci, xhci->cmd_ring, 1)) {
+ if (!in_interrupt())
+ xhci_err(xhci, "ERR: No room for command on command ring\n");
+ return -ENOMEM;
+ }
+ queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
+ field4 | xhci->cmd_ring->cycle_state);
+ return 0;
+}
+
+/* Queue a no-op command on the command ring */
+static int queue_cmd_noop(struct xhci_hcd *xhci)
+{
+ return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP));
+}
+
+/*
+ * Place a no-op command on the command ring to test the command and
+ * event ring.
+ */
+void *xhci_setup_one_noop(struct xhci_hcd *xhci)
+{
+ if (queue_cmd_noop(xhci) < 0)
+ return NULL;
+ xhci->noops_submitted++;
+ return xhci_ring_cmd_db;
+}
+
+/* Queue a slot enable or disable request on the command ring */
+int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
+{
+ return queue_command(xhci, 0, 0, 0,
+ TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id));
+}
+
+/* Queue an address device command TRB */
+int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
+ u32 slot_id)
+{
+ return queue_command(xhci, in_ctx_ptr, 0, 0,
+ TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id));
+}
+
+/* Queue a configure endpoint command TRB */
+int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
+ u32 slot_id)
+{
+ return queue_command(xhci, in_ctx_ptr, 0, 0,
+ TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id));
+}
+
+int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
+ unsigned int ep_index)
+{
+ u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
+ u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
+ u32 type = TRB_TYPE(TRB_STOP_RING);
+
+ return queue_command(xhci, 0, 0, 0,
+ trb_slot_id | trb_ep_index | type);
+}
+
+/* Set Transfer Ring Dequeue Pointer command.
+ * This should not be used for endpoints that have streams enabled.
+ */
+static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
+ unsigned int ep_index, struct xhci_segment *deq_seg,
+ union xhci_trb *deq_ptr, u32 cycle_state)
+{
+ dma_addr_t addr;
+ u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
+ u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
+ u32 type = TRB_TYPE(TRB_SET_DEQ);
+
+ addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
+ if (addr == 0)
+ xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
+ xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
+ deq_seg, deq_ptr);
+ return queue_command(xhci, (u32) addr | cycle_state, 0, 0,
+ trb_slot_id | trb_ep_index | type);
+}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
new file mode 100644
index 00000000000..8936eeb5588
--- /dev/null
+++ b/drivers/usb/host/xhci.h
@@ -0,0 +1,1157 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __LINUX_XHCI_HCD_H
+#define __LINUX_XHCI_HCD_H
+
+#include <linux/usb.h>
+#include <linux/timer.h>
+
+#include "../core/hcd.h"
+/* Code sharing between pci-quirks and xhci hcd */
+#include "xhci-ext-caps.h"
+
+/* xHCI PCI Configuration Registers */
+#define XHCI_SBRN_OFFSET (0x60)
+
+/* Max number of USB devices for any host controller - limit in section 6.1 */
+#define MAX_HC_SLOTS 256
+/* Section 5.3.3 - MaxPorts */
+#define MAX_HC_PORTS 127
+
+/*
+ * xHCI register interface.
+ * This corresponds to the eXtensible Host Controller Interface (xHCI)
+ * Revision 0.95 specification
+ *
+ * Registers should always be accessed with double word or quad word accesses.
+ *
+ * Some xHCI implementations may support 64-bit address pointers. Registers
+ * with 64-bit address pointers should be written to with dword accesses by
+ * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
+ * xHCI implementations that do not support 64-bit address pointers will ignore
+ * the high dword, and write order is irrelevant.
+ */
+
+/**
+ * struct xhci_cap_regs - xHCI Host Controller Capability Registers.
+ * @hc_capbase: length of the capabilities register and HC version number
+ * @hcs_params1: HCSPARAMS1 - Structural Parameters 1
+ * @hcs_params2: HCSPARAMS2 - Structural Parameters 2
+ * @hcs_params3: HCSPARAMS3 - Structural Parameters 3
+ * @hcc_params: HCCPARAMS - Capability Parameters
+ * @db_off: DBOFF - Doorbell array offset
+ * @run_regs_off: RTSOFF - Runtime register space offset
+ */
+struct xhci_cap_regs {
+ u32 hc_capbase;
+ u32 hcs_params1;
+ u32 hcs_params2;
+ u32 hcs_params3;
+ u32 hcc_params;
+ u32 db_off;
+ u32 run_regs_off;
+ /* Reserved up to (CAPLENGTH - 0x1C) */
+};
+
+/* hc_capbase bitmasks */
+/* bits 7:0 - how long is the Capabilities register */
+#define HC_LENGTH(p) XHCI_HC_LENGTH(p)
+/* bits 31:16 */
+#define HC_VERSION(p) (((p) >> 16) & 0xffff)
+
+/* HCSPARAMS1 - hcs_params1 - bitmasks */
+/* bits 0:7, Max Device Slots */
+#define HCS_MAX_SLOTS(p) (((p) >> 0) & 0xff)
+#define HCS_SLOTS_MASK 0xff
+/* bits 8:18, Max Interrupters */
+#define HCS_MAX_INTRS(p) (((p) >> 8) & 0x7ff)
+/* bits 24:31, Max Ports - max value is 0x7F = 127 ports */
+#define HCS_MAX_PORTS(p) (((p) >> 24) & 0x7f)
+
+/* HCSPARAMS2 - hcs_params2 - bitmasks */
+/* bits 0:3, frames or uframes that SW needs to queue transactions
+ * ahead of the HW to meet periodic deadlines */
+#define HCS_IST(p) (((p) >> 0) & 0xf)
+/* bits 4:7, max number of Event Ring segments */
+#define HCS_ERST_MAX(p) (((p) >> 4) & 0xf)
+/* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
+/* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */
+
+/* HCSPARAMS3 - hcs_params3 - bitmasks */
+/* bits 0:7, Max U1 to U0 latency for the roothub ports */
+#define HCS_U1_LATENCY(p) (((p) >> 0) & 0xff)
+/* bits 16:31, Max U2 to U0 latency for the roothub ports */
+#define HCS_U2_LATENCY(p) (((p) >> 16) & 0xffff)
+
+/* HCCPARAMS - hcc_params - bitmasks */
+/* true: HC can use 64-bit address pointers */
+#define HCC_64BIT_ADDR(p) ((p) & (1 << 0))
+/* true: HC can do bandwidth negotiation */
+#define HCC_BANDWIDTH_NEG(p) ((p) & (1 << 1))
+/* true: HC uses 64-byte Device Context structures
+ * FIXME 64-byte context structures aren't supported yet.
+ */
+#define HCC_64BYTE_CONTEXT(p) ((p) & (1 << 2))
+/* true: HC has port power switches */
+#define HCC_PPC(p) ((p) & (1 << 3))
+/* true: HC has port indicators */
+#define HCS_INDICATOR(p) ((p) & (1 << 4))
+/* true: HC has Light HC Reset Capability */
+#define HCC_LIGHT_RESET(p) ((p) & (1 << 5))
+/* true: HC supports latency tolerance messaging */
+#define HCC_LTC(p) ((p) & (1 << 6))
+/* true: no secondary Stream ID Support */
+#define HCC_NSS(p) ((p) & (1 << 7))
+/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */
+#define HCC_MAX_PSA (1 << ((((p) >> 12) & 0xf) + 1))
+/* Extended Capabilities pointer from PCI base - section 5.3.6 */
+#define HCC_EXT_CAPS(p) XHCI_HCC_EXT_CAPS(p)
+
+/* db_off bitmask - bits 0:1 reserved */
+#define DBOFF_MASK (~0x3)
+
+/* run_regs_off bitmask - bits 0:4 reserved */
+#define RTSOFF_MASK (~0x1f)
+
+
+/* Number of registers per port */
+#define NUM_PORT_REGS 4
+
+/**
+ * struct xhci_op_regs - xHCI Host Controller Operational Registers.
+ * @command: USBCMD - xHC command register
+ * @status: USBSTS - xHC status register
+ * @page_size: This indicates the page size that the host controller
+ * supports. If bit n is set, the HC supports a page size
+ * of 2^(n+12), up to a 128MB page size.
+ * 4K is the minimum page size.
+ * @cmd_ring: CRP - 64-bit Command Ring Pointer
+ * @dcbaa_ptr: DCBAAP - 64-bit Device Context Base Address Array Pointer
+ * @config_reg: CONFIG - Configure Register
+ * @port_status_base: PORTSCn - base address for Port Status and Control
+ * Each port has a Port Status and Control register,
+ * followed by a Port Power Management Status and Control
+ * register, a Port Link Info register, and a reserved
+ * register.
+ * @port_power_base: PORTPMSCn - base address for
+ * Port Power Management Status and Control
+ * @port_link_base: PORTLIn - base address for Port Link Info (current
+ * Link PM state and control) for USB 2.1 and USB 3.0
+ * devices.
+ */
+struct xhci_op_regs {
+ u32 command;
+ u32 status;
+ u32 page_size;
+ u32 reserved1;
+ u32 reserved2;
+ u32 dev_notification;
+ u32 cmd_ring[2];
+ /* rsvd: offset 0x20-2F */
+ u32 reserved3[4];
+ u32 dcbaa_ptr[2];
+ u32 config_reg;
+ /* rsvd: offset 0x3C-3FF */
+ u32 reserved4[241];
+ /* port 1 registers, which serve as a base address for other ports */
+ u32 port_status_base;
+ u32 port_power_base;
+ u32 port_link_base;
+ u32 reserved5;
+ /* registers for ports 2-255 */
+ u32 reserved6[NUM_PORT_REGS*254];
+};
+
+/* USBCMD - USB command - command bitmasks */
+/* start/stop HC execution - do not write unless HC is halted*/
+#define CMD_RUN XHCI_CMD_RUN
+/* Reset HC - resets internal HC state machine and all registers (except
+ * PCI config regs). HC does NOT drive a USB reset on the downstream ports.
+ * The xHCI driver must reinitialize the xHC after setting this bit.
+ */
+#define CMD_RESET (1 << 1)
+/* Event Interrupt Enable - a '1' allows interrupts from the host controller */
+#define CMD_EIE XHCI_CMD_EIE
+/* Host System Error Interrupt Enable - get out-of-band signal for HC errors */
+#define CMD_HSEIE XHCI_CMD_HSEIE
+/* bits 4:6 are reserved (and should be preserved on writes). */
+/* light reset (port status stays unchanged) - reset completed when this is 0 */
+#define CMD_LRESET (1 << 7)
+/* FIXME: ignoring host controller save/restore state for now. */
+#define CMD_CSS (1 << 8)
+#define CMD_CRS (1 << 9)
+/* Enable Wrap Event - '1' means xHC generates an event when MFINDEX wraps. */
+#define CMD_EWE XHCI_CMD_EWE
+/* MFINDEX power management - '1' means xHC can stop MFINDEX counter if all root
+ * hubs are in U3 (selective suspend), disconnect, disabled, or powered-off.
+ * '0' means the xHC can power it off if all ports are in the disconnect,
+ * disabled, or powered-off state.
+ */
+#define CMD_PM_INDEX (1 << 11)
+/* bits 12:31 are reserved (and should be preserved on writes). */
+
+/* USBSTS - USB status - status bitmasks */
+/* HC not running - set to 1 when run/stop bit is cleared. */
+#define STS_HALT XHCI_STS_HALT
+/* serious error, e.g. PCI parity error. The HC will clear the run/stop bit. */
+#define STS_FATAL (1 << 2)
+/* event interrupt - clear this prior to clearing any IP flags in IR set*/
+#define STS_EINT (1 << 3)
+/* port change detect */
+#define STS_PORT (1 << 4)
+/* bits 5:7 reserved and zeroed */
+/* save state status - '1' means xHC is saving state */
+#define STS_SAVE (1 << 8)
+/* restore state status - '1' means xHC is restoring state */
+#define STS_RESTORE (1 << 9)
+/* true: save or restore error */
+#define STS_SRE (1 << 10)
+/* true: Controller Not Ready to accept doorbell or op reg writes after reset */
+#define STS_CNR XHCI_STS_CNR
+/* true: internal Host Controller Error - SW needs to reset and reinitialize */
+#define STS_HCE (1 << 12)
+/* bits 13:31 reserved and should be preserved */
+
+/*
+ * DNCTRL - Device Notification Control Register - dev_notification bitmasks
+ * Generate a device notification event when the HC sees a transaction with a
+ * notification type that matches a bit set in this bit field.
+ */
+#define DEV_NOTE_MASK (0xffff)
+#define ENABLE_DEV_NOTE(x) (1 << x)
+/* Most of the device notification types should only be used for debug.
+ * SW does need to pay attention to function wake notifications.
+ */
+#define DEV_NOTE_FWAKE ENABLE_DEV_NOTE(1)
+
+/* CRCR - Command Ring Control Register - cmd_ring bitmasks */
+/* bit 0 is the command ring cycle state */
+/* stop ring operation after completion of the currently executing command */
+#define CMD_RING_PAUSE (1 << 1)
+/* stop ring immediately - abort the currently executing command */
+#define CMD_RING_ABORT (1 << 2)
+/* true: command ring is running */
+#define CMD_RING_RUNNING (1 << 3)
+/* bits 4:5 reserved and should be preserved */
+/* Command Ring pointer - bit mask for the lower 32 bits. */
+#define CMD_RING_ADDR_MASK (0xffffffc0)
+
+/* CONFIG - Configure Register - config_reg bitmasks */
+/* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */
+#define MAX_DEVS(p) ((p) & 0xff)
+/* bits 8:31 - reserved and should be preserved */
+
+/* PORTSC - Port Status and Control Register - port_status_base bitmasks */
+/* true: device connected */
+#define PORT_CONNECT (1 << 0)
+/* true: port enabled */
+#define PORT_PE (1 << 1)
+/* bit 2 reserved and zeroed */
+/* true: port has an over-current condition */
+#define PORT_OC (1 << 3)
+/* true: port reset signaling asserted */
+#define PORT_RESET (1 << 4)
+/* Port Link State - bits 5:8
+ * A read gives the current link PM state of the port,
+ * a write with Link State Write Strobe set sets the link state.
+ */
+/* true: port has power (see HCC_PPC) */
+#define PORT_POWER (1 << 9)
+/* bits 10:13 indicate device speed:
+ * 0 - undefined speed - port hasn't be initialized by a reset yet
+ * 1 - full speed
+ * 2 - low speed
+ * 3 - high speed
+ * 4 - super speed
+ * 5-15 reserved
+ */
+#define DEV_SPEED_MASK (0xf << 10)
+#define XDEV_FS (0x1 << 10)
+#define XDEV_LS (0x2 << 10)
+#define XDEV_HS (0x3 << 10)
+#define XDEV_SS (0x4 << 10)
+#define DEV_UNDEFSPEED(p) (((p) & DEV_SPEED_MASK) == (0x0<<10))
+#define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_FS)
+#define DEV_LOWSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_LS)
+#define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_HS)
+#define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_SS)
+/* Bits 20:23 in the Slot Context are the speed for the device */
+#define SLOT_SPEED_FS (XDEV_FS << 10)
+#define SLOT_SPEED_LS (XDEV_LS << 10)
+#define SLOT_SPEED_HS (XDEV_HS << 10)
+#define SLOT_SPEED_SS (XDEV_SS << 10)
+/* Port Indicator Control */
+#define PORT_LED_OFF (0 << 14)
+#define PORT_LED_AMBER (1 << 14)
+#define PORT_LED_GREEN (2 << 14)
+#define PORT_LED_MASK (3 << 14)
+/* Port Link State Write Strobe - set this when changing link state */
+#define PORT_LINK_STROBE (1 << 16)
+/* true: connect status change */
+#define PORT_CSC (1 << 17)
+/* true: port enable change */
+#define PORT_PEC (1 << 18)
+/* true: warm reset for a USB 3.0 device is done. A "hot" reset puts the port
+ * into an enabled state, and the device into the default state. A "warm" reset
+ * also resets the link, forcing the device through the link training sequence.
+ * SW can also look at the Port Reset register to see when warm reset is done.
+ */
+#define PORT_WRC (1 << 19)
+/* true: over-current change */
+#define PORT_OCC (1 << 20)
+/* true: reset change - 1 to 0 transition of PORT_RESET */
+#define PORT_RC (1 << 21)
+/* port link status change - set on some port link state transitions:
+ * Transition Reason
+ * ------------------------------------------------------------------------------
+ * - U3 to Resume Wakeup signaling from a device
+ * - Resume to Recovery to U0 USB 3.0 device resume
+ * - Resume to U0 USB 2.0 device resume
+ * - U3 to Recovery to U0 Software resume of USB 3.0 device complete
+ * - U3 to U0 Software resume of USB 2.0 device complete
+ * - U2 to U0 L1 resume of USB 2.1 device complete
+ * - U0 to U0 (???) L1 entry rejection by USB 2.1 device
+ * - U0 to disabled L1 entry error with USB 2.1 device
+ * - Any state to inactive Error on USB 3.0 port
+ */
+#define PORT_PLC (1 << 22)
+/* port configure error change - port failed to configure its link partner */
+#define PORT_CEC (1 << 23)
+/* bit 24 reserved */
+/* wake on connect (enable) */
+#define PORT_WKCONN_E (1 << 25)
+/* wake on disconnect (enable) */
+#define PORT_WKDISC_E (1 << 26)
+/* wake on over-current (enable) */
+#define PORT_WKOC_E (1 << 27)
+/* bits 28:29 reserved */
+/* true: device is removable - for USB 3.0 roothub emulation */
+#define PORT_DEV_REMOVE (1 << 30)
+/* Initiate a warm port reset - complete when PORT_WRC is '1' */
+#define PORT_WR (1 << 31)
+
+/* Port Power Management Status and Control - port_power_base bitmasks */
+/* Inactivity timer value for transitions into U1, in microseconds.
+ * Timeout can be up to 127us. 0xFF means an infinite timeout.
+ */
+#define PORT_U1_TIMEOUT(p) ((p) & 0xff)
+/* Inactivity timer value for transitions into U2 */
+#define PORT_U2_TIMEOUT(p) (((p) & 0xff) << 8)
+/* Bits 24:31 for port testing */
+
+
+/**
+ * struct xhci_intr_reg - Interrupt Register Set
+ * @irq_pending: IMAN - Interrupt Management Register. Used to enable
+ * interrupts and check for pending interrupts.
+ * @irq_control: IMOD - Interrupt Moderation Register.
+ * Used to throttle interrupts.
+ * @erst_size: Number of segments in the Event Ring Segment Table (ERST).
+ * @erst_base: ERST base address.
+ * @erst_dequeue: Event ring dequeue pointer.
+ *
+ * Each interrupter (defined by a MSI-X vector) has an event ring and an Event
+ * Ring Segment Table (ERST) associated with it. The event ring is comprised of
+ * multiple segments of the same size. The HC places events on the ring and
+ * "updates the Cycle bit in the TRBs to indicate to software the current
+ * position of the Enqueue Pointer." The HCD (Linux) processes those events and
+ * updates the dequeue pointer.
+ */
+struct xhci_intr_reg {
+ u32 irq_pending;
+ u32 irq_control;
+ u32 erst_size;
+ u32 rsvd;
+ u32 erst_base[2];
+ u32 erst_dequeue[2];
+};
+
+/* irq_pending bitmasks */
+#define ER_IRQ_PENDING(p) ((p) & 0x1)
+/* bits 2:31 need to be preserved */
+/* THIS IS BUGGY - FIXME - IP IS WRITE 1 TO CLEAR */
+#define ER_IRQ_CLEAR(p) ((p) & 0xfffffffe)
+#define ER_IRQ_ENABLE(p) ((ER_IRQ_CLEAR(p)) | 0x2)
+#define ER_IRQ_DISABLE(p) ((ER_IRQ_CLEAR(p)) & ~(0x2))
+
+/* irq_control bitmasks */
+/* Minimum interval between interrupts (in 250ns intervals). The interval
+ * between interrupts will be longer if there are no events on the event ring.
+ * Default is 4000 (1 ms).
+ */
+#define ER_IRQ_INTERVAL_MASK (0xffff)
+/* Counter used to count down the time to the next interrupt - HW use only */
+#define ER_IRQ_COUNTER_MASK (0xffff << 16)
+
+/* erst_size bitmasks */
+/* Preserve bits 16:31 of erst_size */
+#define ERST_SIZE_MASK (0xffff << 16)
+
+/* erst_dequeue bitmasks */
+/* Dequeue ERST Segment Index (DESI) - Segment number (or alias)
+ * where the current dequeue pointer lies. This is an optional HW hint.
+ */
+#define ERST_DESI_MASK (0x7)
+/* Event Handler Busy (EHB) - is the event ring scheduled to be serviced by
+ * a work queue (or delayed service routine)?
+ */
+#define ERST_EHB (1 << 3)
+#define ERST_PTR_MASK (0xf)
+
+/**
+ * struct xhci_run_regs
+ * @microframe_index:
+ * MFINDEX - current microframe number
+ *
+ * Section 5.5 Host Controller Runtime Registers:
+ * "Software should read and write these registers using only Dword (32 bit)
+ * or larger accesses"
+ */
+struct xhci_run_regs {
+ u32 microframe_index;
+ u32 rsvd[7];
+ struct xhci_intr_reg ir_set[128];
+};
+
+/**
+ * struct doorbell_array
+ *
+ * Section 5.6
+ */
+struct xhci_doorbell_array {
+ u32 doorbell[256];
+};
+
+#define DB_TARGET_MASK 0xFFFFFF00
+#define DB_STREAM_ID_MASK 0x0000FFFF
+#define DB_TARGET_HOST 0x0
+#define DB_STREAM_ID_HOST 0x0
+#define DB_MASK (0xff << 8)
+
+/* Endpoint Target - bits 0:7 */
+#define EPI_TO_DB(p) (((p) + 1) & 0xff)
+
+
+/**
+ * struct xhci_slot_ctx
+ * @dev_info: Route string, device speed, hub info, and last valid endpoint
+ * @dev_info2: Max exit latency for device number, root hub port number
+ * @tt_info: tt_info is used to construct split transaction tokens
+ * @dev_state: slot state and device address
+ *
+ * Slot Context - section 6.2.1.1. This assumes the HC uses 32-byte context
+ * structures. If the HC uses 64-byte contexts, there is an additional 32 bytes
+ * reserved at the end of the slot context for HC internal use.
+ */
+struct xhci_slot_ctx {
+ u32 dev_info;
+ u32 dev_info2;
+ u32 tt_info;
+ u32 dev_state;
+ /* offset 0x10 to 0x1f reserved for HC internal use */
+ u32 reserved[4];
+};
+
+/* dev_info bitmasks */
+/* Route String - 0:19 */
+#define ROUTE_STRING_MASK (0xfffff)
+/* Device speed - values defined by PORTSC Device Speed field - 20:23 */
+#define DEV_SPEED (0xf << 20)
+/* bit 24 reserved */
+/* Is this LS/FS device connected through a HS hub? - bit 25 */
+#define DEV_MTT (0x1 << 25)
+/* Set if the device is a hub - bit 26 */
+#define DEV_HUB (0x1 << 26)
+/* Index of the last valid endpoint context in this device context - 27:31 */
+#define LAST_CTX_MASK (0x1f << 27)
+#define LAST_CTX(p) ((p) << 27)
+#define LAST_CTX_TO_EP_NUM(p) (((p) >> 27) - 1)
+#define SLOT_FLAG (1 << 0)
+#define EP0_FLAG (1 << 1)
+
+/* dev_info2 bitmasks */
+/* Max Exit Latency (ms) - worst case time to wake up all links in dev path */
+#define MAX_EXIT (0xffff)
+/* Root hub port number that is needed to access the USB device */
+#define ROOT_HUB_PORT(p) (((p) & 0xff) << 16)
+
+/* tt_info bitmasks */
+/*
+ * TT Hub Slot ID - for low or full speed devices attached to a high-speed hub
+ * The Slot ID of the hub that isolates the high speed signaling from
+ * this low or full-speed device. '0' if attached to root hub port.
+ */
+#define TT_SLOT (0xff)
+/*
+ * The number of the downstream facing port of the high-speed hub
+ * '0' if the device is not low or full speed.
+ */
+#define TT_PORT (0xff << 8)
+
+/* dev_state bitmasks */
+/* USB device address - assigned by the HC */
+#define DEV_ADDR_MASK (0xff)
+/* bits 8:26 reserved */
+/* Slot state */
+#define SLOT_STATE (0x1f << 27)
+#define GET_SLOT_STATE(p) (((p) & (0x1f << 27)) >> 27)
+
+
+/**
+ * struct xhci_ep_ctx
+ * @ep_info: endpoint state, streams, mult, and interval information.
+ * @ep_info2: information on endpoint type, max packet size, max burst size,
+ * error count, and whether the HC will force an event for all
+ * transactions.
+ * @deq: 64-bit ring dequeue pointer address. If the endpoint only
+ * defines one stream, this points to the endpoint transfer ring.
+ * Otherwise, it points to a stream context array, which has a
+ * ring pointer for each flow.
+ * @tx_info:
+ * Average TRB lengths for the endpoint ring and
+ * max payload within an Endpoint Service Interval Time (ESIT).
+ *
+ * Endpoint Context - section 6.2.1.2. This assumes the HC uses 32-byte context
+ * structures. If the HC uses 64-byte contexts, there is an additional 32 bytes
+ * reserved at the end of the endpoint context for HC internal use.
+ */
+struct xhci_ep_ctx {
+ u32 ep_info;
+ u32 ep_info2;
+ u32 deq[2];
+ u32 tx_info;
+ /* offset 0x14 - 0x1f reserved for HC internal use */
+ u32 reserved[3];
+};
+
+/* ep_info bitmasks */
+/*
+ * Endpoint State - bits 0:2
+ * 0 - disabled
+ * 1 - running
+ * 2 - halted due to halt condition - ok to manipulate endpoint ring
+ * 3 - stopped
+ * 4 - TRB error
+ * 5-7 - reserved
+ */
+#define EP_STATE_MASK (0xf)
+#define EP_STATE_DISABLED 0
+#define EP_STATE_RUNNING 1
+#define EP_STATE_HALTED 2
+#define EP_STATE_STOPPED 3
+#define EP_STATE_ERROR 4
+/* Mult - Max number of burtst within an interval, in EP companion desc. */
+#define EP_MULT(p) ((p & 0x3) << 8)
+/* bits 10:14 are Max Primary Streams */
+/* bit 15 is Linear Stream Array */
+/* Interval - period between requests to an endpoint - 125u increments. */
+#define EP_INTERVAL(p) ((p & 0xff) << 16)
+
+/* ep_info2 bitmasks */
+/*
+ * Force Event - generate transfer events for all TRBs for this endpoint
+ * This will tell the HC to ignore the IOC and ISP flags (for debugging only).
+ */
+#define FORCE_EVENT (0x1)
+#define ERROR_COUNT(p) (((p) & 0x3) << 1)
+#define EP_TYPE(p) ((p) << 3)
+#define ISOC_OUT_EP 1
+#define BULK_OUT_EP 2
+#define INT_OUT_EP 3
+#define CTRL_EP 4
+#define ISOC_IN_EP 5
+#define BULK_IN_EP 6
+#define INT_IN_EP 7
+/* bit 6 reserved */
+/* bit 7 is Host Initiate Disable - for disabling stream selection */
+#define MAX_BURST(p) (((p)&0xff) << 8)
+#define MAX_PACKET(p) (((p)&0xffff) << 16)
+
+
+/**
+ * struct xhci_device_control
+ * Input/Output context; see section 6.2.5.
+ *
+ * @drop_context: set the bit of the endpoint context you want to disable
+ * @add_context: set the bit of the endpoint context you want to enable
+ */
+struct xhci_device_control {
+ u32 drop_flags;
+ u32 add_flags;
+ u32 rsvd[6];
+ struct xhci_slot_ctx slot;
+ struct xhci_ep_ctx ep[31];
+};
+
+/* drop context bitmasks */
+#define DROP_EP(x) (0x1 << x)
+/* add context bitmasks */
+#define ADD_EP(x) (0x1 << x)
+
+
+struct xhci_virt_device {
+ /*
+ * Commands to the hardware are passed an "input context" that
+ * tells the hardware what to change in its data structures.
+ * The hardware will return changes in an "output context" that
+ * software must allocate for the hardware. We need to keep
+ * track of input and output contexts separately because
+ * these commands might fail and we don't trust the hardware.
+ */
+ struct xhci_device_control *out_ctx;
+ dma_addr_t out_ctx_dma;
+ /* Used for addressing devices and configuration changes */
+ struct xhci_device_control *in_ctx;
+ dma_addr_t in_ctx_dma;
+ /* FIXME when stream support is added */
+ struct xhci_ring *ep_rings[31];
+ /* Temporary storage in case the configure endpoint command fails and we
+ * have to restore the device state to the previous state
+ */
+ struct xhci_ring *new_ep_rings[31];
+ struct completion cmd_completion;
+ /* Status of the last command issued for this device */
+ u32 cmd_status;
+};
+
+
+/**
+ * struct xhci_device_context_array
+ * @dev_context_ptr array of 64-bit DMA addresses for device contexts
+ */
+struct xhci_device_context_array {
+ /* 64-bit device addresses; we only write 32-bit addresses */
+ u32 dev_context_ptrs[2*MAX_HC_SLOTS];
+ /* private xHCD pointers */
+ dma_addr_t dma;
+};
+/* TODO: write function to set the 64-bit device DMA address */
+/*
+ * TODO: change this to be dynamically sized at HC mem init time since the HC
+ * might not be able to handle the maximum number of devices possible.
+ */
+
+
+struct xhci_stream_ctx {
+ /* 64-bit stream ring address, cycle state, and stream type */
+ u32 stream_ring[2];
+ /* offset 0x14 - 0x1f reserved for HC internal use */
+ u32 reserved[2];
+};
+
+
+struct xhci_transfer_event {
+ /* 64-bit buffer address, or immediate data */
+ u32 buffer[2];
+ u32 transfer_len;
+ /* This field is interpreted differently based on the type of TRB */
+ u32 flags;
+};
+
+/** Transfer Event bit fields **/
+#define TRB_TO_EP_ID(p) (((p) >> 16) & 0x1f)
+
+/* Completion Code - only applicable for some types of TRBs */
+#define COMP_CODE_MASK (0xff << 24)
+#define GET_COMP_CODE(p) (((p) & COMP_CODE_MASK) >> 24)
+#define COMP_SUCCESS 1
+/* Data Buffer Error */
+#define COMP_DB_ERR 2
+/* Babble Detected Error */
+#define COMP_BABBLE 3
+/* USB Transaction Error */
+#define COMP_TX_ERR 4
+/* TRB Error - some TRB field is invalid */
+#define COMP_TRB_ERR 5
+/* Stall Error - USB device is stalled */
+#define COMP_STALL 6
+/* Resource Error - HC doesn't have memory for that device configuration */
+#define COMP_ENOMEM 7
+/* Bandwidth Error - not enough room in schedule for this dev config */
+#define COMP_BW_ERR 8
+/* No Slots Available Error - HC ran out of device slots */
+#define COMP_ENOSLOTS 9
+/* Invalid Stream Type Error */
+#define COMP_STREAM_ERR 10
+/* Slot Not Enabled Error - doorbell rung for disabled device slot */
+#define COMP_EBADSLT 11
+/* Endpoint Not Enabled Error */
+#define COMP_EBADEP 12
+/* Short Packet */
+#define COMP_SHORT_TX 13
+/* Ring Underrun - doorbell rung for an empty isoc OUT ep ring */
+#define COMP_UNDERRUN 14
+/* Ring Overrun - isoc IN ep ring is empty when ep is scheduled to RX */
+#define COMP_OVERRUN 15
+/* Virtual Function Event Ring Full Error */
+#define COMP_VF_FULL 16
+/* Parameter Error - Context parameter is invalid */
+#define COMP_EINVAL 17
+/* Bandwidth Overrun Error - isoc ep exceeded its allocated bandwidth */
+#define COMP_BW_OVER 18
+/* Context State Error - illegal context state transition requested */
+#define COMP_CTX_STATE 19
+/* No Ping Response Error - HC didn't get PING_RESPONSE in time to TX */
+#define COMP_PING_ERR 20
+/* Event Ring is full */
+#define COMP_ER_FULL 21
+/* Missed Service Error - HC couldn't service an isoc ep within interval */
+#define COMP_MISSED_INT 23
+/* Successfully stopped command ring */
+#define COMP_CMD_STOP 24
+/* Successfully aborted current command and stopped command ring */
+#define COMP_CMD_ABORT 25
+/* Stopped - transfer was terminated by a stop endpoint command */
+#define COMP_STOP 26
+/* Same as COMP_EP_STOPPED, but the transfered length in the event is invalid */
+#define COMP_STOP_INVAL 27
+/* Control Abort Error - Debug Capability - control pipe aborted */
+#define COMP_DBG_ABORT 28
+/* TRB type 29 and 30 reserved */
+/* Isoc Buffer Overrun - an isoc IN ep sent more data than could fit in TD */
+#define COMP_BUFF_OVER 31
+/* Event Lost Error - xHC has an "internal event overrun condition" */
+#define COMP_ISSUES 32
+/* Undefined Error - reported when other error codes don't apply */
+#define COMP_UNKNOWN 33
+/* Invalid Stream ID Error */
+#define COMP_STRID_ERR 34
+/* Secondary Bandwidth Error - may be returned by a Configure Endpoint cmd */
+/* FIXME - check for this */
+#define COMP_2ND_BW_ERR 35
+/* Split Transaction Error */
+#define COMP_SPLIT_ERR 36
+
+struct xhci_link_trb {
+ /* 64-bit segment pointer*/
+ u32 segment_ptr[2];
+ u32 intr_target;
+ u32 control;
+};
+
+/* control bitfields */
+#define LINK_TOGGLE (0x1<<1)
+
+/* Command completion event TRB */
+struct xhci_event_cmd {
+ /* Pointer to command TRB, or the value passed by the event data trb */
+ u32 cmd_trb[2];
+ u32 status;
+ u32 flags;
+};
+
+/* flags bitmasks */
+/* bits 16:23 are the virtual function ID */
+/* bits 24:31 are the slot ID */
+#define TRB_TO_SLOT_ID(p) (((p) & (0xff<<24)) >> 24)
+#define SLOT_ID_FOR_TRB(p) (((p) & 0xff) << 24)
+
+/* Stop Endpoint TRB - ep_index to endpoint ID for this TRB */
+#define TRB_TO_EP_INDEX(p) ((((p) & (0x1f << 16)) >> 16) - 1)
+#define EP_ID_FOR_TRB(p) ((((p) + 1) & 0x1f) << 16)
+
+
+/* Port Status Change Event TRB fields */
+/* Port ID - bits 31:24 */
+#define GET_PORT_ID(p) (((p) & (0xff << 24)) >> 24)
+
+/* Normal TRB fields */
+/* transfer_len bitmasks - bits 0:16 */
+#define TRB_LEN(p) ((p) & 0x1ffff)
+/* TD size - number of bytes remaining in the TD (including this TRB):
+ * bits 17 - 21. Shift the number of bytes by 10. */
+#define TD_REMAINDER(p) ((((p) >> 10) & 0x1f) << 17)
+/* Interrupter Target - which MSI-X vector to target the completion event at */
+#define TRB_INTR_TARGET(p) (((p) & 0x3ff) << 22)
+#define GET_INTR_TARGET(p) (((p) >> 22) & 0x3ff)
+
+/* Cycle bit - indicates TRB ownership by HC or HCD */
+#define TRB_CYCLE (1<<0)
+/*
+ * Force next event data TRB to be evaluated before task switch.
+ * Used to pass OS data back after a TD completes.
+ */
+#define TRB_ENT (1<<1)
+/* Interrupt on short packet */
+#define TRB_ISP (1<<2)
+/* Set PCIe no snoop attribute */
+#define TRB_NO_SNOOP (1<<3)
+/* Chain multiple TRBs into a TD */
+#define TRB_CHAIN (1<<4)
+/* Interrupt on completion */
+#define TRB_IOC (1<<5)
+/* The buffer pointer contains immediate data */
+#define TRB_IDT (1<<6)
+
+
+/* Control transfer TRB specific fields */
+#define TRB_DIR_IN (1<<16)
+
+struct xhci_generic_trb {
+ u32 field[4];
+};
+
+union xhci_trb {
+ struct xhci_link_trb link;
+ struct xhci_transfer_event trans_event;
+ struct xhci_event_cmd event_cmd;
+ struct xhci_generic_trb generic;
+};
+
+/* TRB bit mask */
+#define TRB_TYPE_BITMASK (0xfc00)
+#define TRB_TYPE(p) ((p) << 10)
+/* TRB type IDs */
+/* bulk, interrupt, isoc scatter/gather, and control data stage */
+#define TRB_NORMAL 1
+/* setup stage for control transfers */
+#define TRB_SETUP 2
+/* data stage for control transfers */
+#define TRB_DATA 3
+/* status stage for control transfers */
+#define TRB_STATUS 4
+/* isoc transfers */
+#define TRB_ISOC 5
+/* TRB for linking ring segments */
+#define TRB_LINK 6
+#define TRB_EVENT_DATA 7
+/* Transfer Ring No-op (not for the command ring) */
+#define TRB_TR_NOOP 8
+/* Command TRBs */
+/* Enable Slot Command */
+#define TRB_ENABLE_SLOT 9
+/* Disable Slot Command */
+#define TRB_DISABLE_SLOT 10
+/* Address Device Command */
+#define TRB_ADDR_DEV 11
+/* Configure Endpoint Command */
+#define TRB_CONFIG_EP 12
+/* Evaluate Context Command */
+#define TRB_EVAL_CONTEXT 13
+/* Reset Transfer Ring Command */
+#define TRB_RESET_RING 14
+/* Stop Transfer Ring Command */
+#define TRB_STOP_RING 15
+/* Set Transfer Ring Dequeue Pointer Command */
+#define TRB_SET_DEQ 16
+/* Reset Device Command */
+#define TRB_RESET_DEV 17
+/* Force Event Command (opt) */
+#define TRB_FORCE_EVENT 18
+/* Negotiate Bandwidth Command (opt) */
+#define TRB_NEG_BANDWIDTH 19
+/* Set Latency Tolerance Value Command (opt) */
+#define TRB_SET_LT 20
+/* Get port bandwidth Command */
+#define TRB_GET_BW 21
+/* Force Header Command - generate a transaction or link management packet */
+#define TRB_FORCE_HEADER 22
+/* No-op Command - not for transfer rings */
+#define TRB_CMD_NOOP 23
+/* TRB IDs 24-31 reserved */
+/* Event TRBS */
+/* Transfer Event */
+#define TRB_TRANSFER 32
+/* Command Completion Event */
+#define TRB_COMPLETION 33
+/* Port Status Change Event */
+#define TRB_PORT_STATUS 34
+/* Bandwidth Request Event (opt) */
+#define TRB_BANDWIDTH_EVENT 35
+/* Doorbell Event (opt) */
+#define TRB_DOORBELL 36
+/* Host Controller Event */
+#define TRB_HC_EVENT 37
+/* Device Notification Event - device sent function wake notification */
+#define TRB_DEV_NOTE 38
+/* MFINDEX Wrap Event - microframe counter wrapped */
+#define TRB_MFINDEX_WRAP 39
+/* TRB IDs 40-47 reserved, 48-63 is vendor-defined */
+
+/*
+ * TRBS_PER_SEGMENT must be a multiple of 4,
+ * since the command ring is 64-byte aligned.
+ * It must also be greater than 16.
+ */
+#define TRBS_PER_SEGMENT 64
+#define SEGMENT_SIZE (TRBS_PER_SEGMENT*16)
+/* TRB buffer pointers can't cross 64KB boundaries */
+#define TRB_MAX_BUFF_SHIFT 16
+#define TRB_MAX_BUFF_SIZE (1 << TRB_MAX_BUFF_SHIFT)
+
+struct xhci_segment {
+ union xhci_trb *trbs;
+ /* private to HCD */
+ struct xhci_segment *next;
+ dma_addr_t dma;
+};
+
+struct xhci_td {
+ struct list_head td_list;
+ struct list_head cancelled_td_list;
+ struct urb *urb;
+ struct xhci_segment *start_seg;
+ union xhci_trb *first_trb;
+ union xhci_trb *last_trb;
+};
+
+struct xhci_ring {
+ struct xhci_segment *first_seg;
+ union xhci_trb *enqueue;
+ struct xhci_segment *enq_seg;
+ unsigned int enq_updates;
+ union xhci_trb *dequeue;
+ struct xhci_segment *deq_seg;
+ unsigned int deq_updates;
+ struct list_head td_list;
+ /* ---- Related to URB cancellation ---- */
+ struct list_head cancelled_td_list;
+ unsigned int cancels_pending;
+ unsigned int state;
+#define SET_DEQ_PENDING (1 << 0)
+ /* The TRB that was last reported in a stopped endpoint ring */
+ union xhci_trb *stopped_trb;
+ struct xhci_td *stopped_td;
+ /*
+ * Write the cycle state into the TRB cycle field to give ownership of
+ * the TRB to the host controller (if we are the producer), or to check
+ * if we own the TRB (if we are the consumer). See section 4.9.1.
+ */
+ u32 cycle_state;
+};
+
+struct xhci_erst_entry {
+ /* 64-bit event ring segment address */
+ u32 seg_addr[2];
+ u32 seg_size;
+ /* Set to zero */
+ u32 rsvd;
+};
+
+struct xhci_erst {
+ struct xhci_erst_entry *entries;
+ unsigned int num_entries;
+ /* xhci->event_ring keeps track of segment dma addresses */
+ dma_addr_t erst_dma_addr;
+ /* Num entries the ERST can contain */
+ unsigned int erst_size;
+};
+
+/*
+ * Each segment table entry is 4*32bits long. 1K seems like an ok size:
+ * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table,
+ * meaning 64 ring segments.
+ * Initial allocated size of the ERST, in number of entries */
+#define ERST_NUM_SEGS 1
+/* Initial allocated size of the ERST, in number of entries */
+#define ERST_SIZE 64
+/* Initial number of event segment rings allocated */
+#define ERST_ENTRIES 1
+/* Poll every 60 seconds */
+#define POLL_TIMEOUT 60
+/* XXX: Make these module parameters */
+
+
+/* There is one ehci_hci structure per controller */
+struct xhci_hcd {
+ /* glue to PCI and HCD framework */
+ struct xhci_cap_regs __iomem *cap_regs;
+ struct xhci_op_regs __iomem *op_regs;
+ struct xhci_run_regs __iomem *run_regs;
+ struct xhci_doorbell_array __iomem *dba;
+ /* Our HCD's current interrupter register set */
+ struct xhci_intr_reg __iomem *ir_set;
+
+ /* Cached register copies of read-only HC data */
+ __u32 hcs_params1;
+ __u32 hcs_params2;
+ __u32 hcs_params3;
+ __u32 hcc_params;
+
+ spinlock_t lock;
+
+ /* packed release number */
+ u8 sbrn;
+ u16 hci_version;
+ u8 max_slots;
+ u8 max_interrupters;
+ u8 max_ports;
+ u8 isoc_threshold;
+ int event_ring_max;
+ int addr_64;
+ /* 4KB min, 128MB max */
+ int page_size;
+ /* Valid values are 12 to 20, inclusive */
+ int page_shift;
+ /* only one MSI vector for now, but might need more later */
+ int msix_count;
+ struct msix_entry *msix_entries;
+ /* data structures */
+ struct xhci_device_context_array *dcbaa;
+ struct xhci_ring *cmd_ring;
+ struct xhci_ring *event_ring;
+ struct xhci_erst erst;
+ /* slot enabling and address device helpers */
+ struct completion addr_dev;
+ int slot_id;
+ /* Internal mirror of the HW's dcbaa */
+ struct xhci_virt_device *devs[MAX_HC_SLOTS];
+
+ /* DMA pools */
+ struct dma_pool *device_pool;
+ struct dma_pool *segment_pool;
+
+#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
+ /* Poll the rings - for debugging */
+ struct timer_list event_ring_timer;
+ int zombie;
+#endif
+ /* Statistics */
+ int noops_submitted;
+ int noops_handled;
+ int error_bitmask;
+};
+
+/* For testing purposes */
+#define NUM_TEST_NOOPS 0
+
+/* convert between an HCD pointer and the corresponding EHCI_HCD */
+static inline struct xhci_hcd *hcd_to_xhci(struct usb_hcd *hcd)
+{
+ return (struct xhci_hcd *) (hcd->hcd_priv);
+}
+
+static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci)
+{
+ return container_of((void *) xhci, struct usb_hcd, hcd_priv);
+}
+
+#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
+#define XHCI_DEBUG 1
+#else
+#define XHCI_DEBUG 0
+#endif
+
+#define xhci_dbg(xhci, fmt, args...) \
+ do { if (XHCI_DEBUG) dev_dbg(xhci_to_hcd(xhci)->self.controller , fmt , ## args); } while (0)
+#define xhci_info(xhci, fmt, args...) \
+ do { if (XHCI_DEBUG) dev_info(xhci_to_hcd(xhci)->self.controller , fmt , ## args); } while (0)
+#define xhci_err(xhci, fmt, args...) \
+ dev_err(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
+#define xhci_warn(xhci, fmt, args...) \
+ dev_warn(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
+
+/* TODO: copied from ehci.h - can be refactored? */
+/* xHCI spec says all registers are little endian */
+static inline unsigned int xhci_readl(const struct xhci_hcd *xhci,
+ __u32 __iomem *regs)
+{
+ return readl(regs);
+}
+static inline void xhci_writel(struct xhci_hcd *xhci,
+ const unsigned int val, __u32 __iomem *regs)
+{
+ if (!in_interrupt())
+ xhci_dbg(xhci,
+ "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n",
+ regs, val);
+ writel(val, regs);
+}
+
+/* xHCI debugging */
+void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num);
+void xhci_print_registers(struct xhci_hcd *xhci);
+void xhci_dbg_regs(struct xhci_hcd *xhci);
+void xhci_print_run_regs(struct xhci_hcd *xhci);
+void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb);
+void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb);
+void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg);
+void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring);
+void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst);
+void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci);
+void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring);
+void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep);
+
+/* xHCI memory managment */
+void xhci_mem_cleanup(struct xhci_hcd *xhci);
+int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags);
+void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id);
+int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags);
+int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev);
+unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc);
+unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc);
+void xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_host_endpoint *ep);
+int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
+ struct usb_device *udev, struct usb_host_endpoint *ep,
+ gfp_t mem_flags);
+void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
+
+#ifdef CONFIG_PCI
+/* xHCI PCI glue */
+int xhci_register_pci(void);
+void xhci_unregister_pci(void);
+#endif
+
+/* xHCI host controller glue */
+int xhci_halt(struct xhci_hcd *xhci);
+int xhci_reset(struct xhci_hcd *xhci);
+int xhci_init(struct usb_hcd *hcd);
+int xhci_run(struct usb_hcd *hcd);
+void xhci_stop(struct usb_hcd *hcd);
+void xhci_shutdown(struct usb_hcd *hcd);
+int xhci_get_frame(struct usb_hcd *hcd);
+irqreturn_t xhci_irq(struct usb_hcd *hcd);
+int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev);
+void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev);
+int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev);
+int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags);
+int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
+int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
+int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
+int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
+void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
+
+/* xHCI ring, segment, TRB, and TD functions */
+dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
+void xhci_ring_cmd_db(struct xhci_hcd *xhci);
+void *xhci_setup_one_noop(struct xhci_hcd *xhci);
+void xhci_handle_event(struct xhci_hcd *xhci);
+void xhci_set_hc_event_deq(struct xhci_hcd *xhci);
+int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id);
+int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
+ u32 slot_id);
+int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
+ unsigned int ep_index);
+int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
+ int slot_id, unsigned int ep_index);
+int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
+ int slot_id, unsigned int ep_index);
+int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
+ u32 slot_id);
+
+/* xHCI roothub code */
+int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
+ char *buf, u16 wLength);
+int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
+
+#endif /* __LINUX_XHCI_HCD_H */
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index a4ef77ef917..3c5fe5cee05 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -726,12 +726,18 @@ static const struct file_operations iowarrior_fops = {
.poll = iowarrior_poll,
};
+static char *iowarrior_nodename(struct device *dev)
+{
+ return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
+}
+
/*
* usb class driver info in order to get a minor number from the usb core,
* and to have the device registered with devfs and the driver core
*/
static struct usb_class_driver iowarrior_class = {
.name = "iowarrior%d",
+ .nodename = iowarrior_nodename,
.fops = &iowarrior_fops,
.minor_base = IOWARRIOR_MINOR_BASE,
};
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index ab0f3226158..c1e2433f640 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -266,12 +266,18 @@ static const struct file_operations tower_fops = {
.llseek = tower_llseek,
};
+static char *legousbtower_nodename(struct device *dev)
+{
+ return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
+}
+
/*
* usb class driver info in order to get a minor number from the usb core,
* and to have the device registered with the driver core
*/
static struct usb_class_driver tower_class = {
.name = "legousbtower%d",
+ .nodename = legousbtower_nodename,
.fops = &tower_fops,
.minor_base = LEGO_USB_TOWER_MINOR_BASE,
};
diff --git a/drivers/usb/misc/sisusbvga/Kconfig b/drivers/usb/misc/sisusbvga/Kconfig
index 7603cbe0865..30ea7ca6846 100644
--- a/drivers/usb/misc/sisusbvga/Kconfig
+++ b/drivers/usb/misc/sisusbvga/Kconfig
@@ -1,7 +1,7 @@
config USB_SISUSBVGA
tristate "USB 2.0 SVGA dongle support (Net2280/SiS315)"
- depends on USB && USB_EHCI_HCD
+ depends on USB && (USB_MUSB_HDRC || USB_EHCI_HCD)
---help---
Say Y here if you intend to attach a USB2VGA dongle based on a
Net2280 and a SiS315 chip.
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 5f1a19d1497..a9f06d76960 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -1072,23 +1072,34 @@ static int unlink1 (struct usbtest_dev *dev, int pipe, int size, int async)
*/
msleep (jiffies % (2 * INTERRUPT_RATE));
if (async) {
-retry:
- retval = usb_unlink_urb (urb);
- if (retval == -EBUSY || retval == -EIDRM) {
- /* we can't unlink urbs while they're completing.
- * or if they've completed, and we haven't resubmitted.
- * "normal" drivers would prevent resubmission, but
- * since we're testing unlink paths, we can't.
- */
- ERROR(dev, "unlink retry\n");
- goto retry;
+ while (!completion_done(&completion)) {
+ retval = usb_unlink_urb(urb);
+
+ switch (retval) {
+ case -EBUSY:
+ case -EIDRM:
+ /* we can't unlink urbs while they're completing
+ * or if they've completed, and we haven't
+ * resubmitted. "normal" drivers would prevent
+ * resubmission, but since we're testing unlink
+ * paths, we can't.
+ */
+ ERROR(dev, "unlink retry\n");
+ continue;
+ case 0:
+ case -EINPROGRESS:
+ break;
+
+ default:
+ dev_err(&dev->intf->dev,
+ "unlink fail %d\n", retval);
+ return retval;
+ }
+
+ break;
}
} else
usb_kill_urb (urb);
- if (!(retval == 0 || retval == -EINPROGRESS)) {
- dev_err(&dev->intf->dev, "unlink fail %d\n", retval);
- return retval;
- }
wait_for_completion (&completion);
retval = urb->status;
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index 1f715436d6d..a7eb4c99342 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -733,7 +733,7 @@ int __init mon_text_init(void)
{
struct dentry *mondir;
- mondir = debugfs_create_dir("usbmon", NULL);
+ mondir = debugfs_create_dir("usbmon", usb_debug_root);
if (IS_ERR(mondir)) {
printk(KERN_NOTICE TAG ": debugfs is not available\n");
return -ENODEV;
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index b66e8544d8b..70073b157f0 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -10,6 +10,7 @@ comment "Enable Host or Gadget support to see Inventra options"
config USB_MUSB_HDRC
depends on (USB || USB_GADGET) && HAVE_CLK
depends on !SUPERH
+ select NOP_USB_XCEIV if ARCH_DAVINCI
select TWL4030_USB if MACH_OMAP_3430SDP
select USB_OTG_UTILS
tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
@@ -55,6 +56,7 @@ comment "Blackfin high speed USB Support"
config USB_TUSB6010
boolean "TUSB 6010 support"
depends on USB_MUSB_HDRC && !USB_MUSB_SOC
+ select NOP_USB_XCEIV
default y
help
The TUSB 6010 chip, from Texas Instruments, connects a discrete
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index 78613485209..f2f66ebc736 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -143,7 +143,7 @@ static void musb_conn_timer_handler(unsigned long _musb)
u16 val;
spin_lock_irqsave(&musb->lock, flags);
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
case OTG_STATE_A_IDLE:
case OTG_STATE_A_WAIT_BCON:
/* Start a new session */
@@ -154,7 +154,7 @@ static void musb_conn_timer_handler(unsigned long _musb)
val = musb_readw(musb->mregs, MUSB_DEVCTL);
if (!(val & MUSB_DEVCTL_BDEVICE)) {
gpio_set_value(musb->config->gpio_vrsel, 1);
- musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
+ musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
} else {
gpio_set_value(musb->config->gpio_vrsel, 0);
@@ -247,6 +247,11 @@ int __init musb_platform_init(struct musb *musb)
}
gpio_direction_output(musb->config->gpio_vrsel, 0);
+ usb_nop_xceiv_register();
+ musb->xceiv = otg_get_transceiver();
+ if (!musb->xceiv)
+ return -ENODEV;
+
if (ANOMALY_05000346) {
bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value);
SSYNC();
@@ -291,7 +296,7 @@ int __init musb_platform_init(struct musb *musb)
musb_conn_timer_handler, (unsigned long) musb);
}
if (is_peripheral_enabled(musb))
- musb->xceiv.set_power = bfin_set_power;
+ musb->xceiv->set_power = bfin_set_power;
musb->isr = blackfin_interrupt;
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index 1976e9b4180..c3577bbbae6 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -6,6 +6,7 @@
* The TUSB6020, using VLYNQ, has CPPI that looks much like DaVinci.
*/
+#include <linux/platform_device.h>
#include <linux/usb.h>
#include "musb_core.h"
@@ -1145,17 +1146,27 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
return completed;
}
-void cppi_completion(struct musb *musb, u32 rx, u32 tx)
+irqreturn_t cppi_interrupt(int irq, void *dev_id)
{
- void __iomem *tibase;
- int i, index;
+ struct musb *musb = dev_id;
struct cppi *cppi;
+ void __iomem *tibase;
struct musb_hw_ep *hw_ep = NULL;
+ u32 rx, tx;
+ int i, index;
cppi = container_of(musb->dma_controller, struct cppi, controller);
tibase = musb->ctrl_base;
+ tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG);
+ rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG);
+
+ if (!tx && !rx)
+ return IRQ_NONE;
+
+ DBG(4, "CPPI IRQ Tx%x Rx%x\n", tx, rx);
+
/* process TX channels */
for (index = 0; tx; tx = tx >> 1, index++) {
struct cppi_channel *tx_ch;
@@ -1273,6 +1284,8 @@ void cppi_completion(struct musb *musb, u32 rx, u32 tx)
/* write to CPPI EOI register to re-enable interrupts */
musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0);
+
+ return IRQ_HANDLED;
}
/* Instantiate a software object representing a DMA controller. */
@@ -1280,6 +1293,9 @@ struct dma_controller *__init
dma_controller_create(struct musb *musb, void __iomem *mregs)
{
struct cppi *controller;
+ struct device *dev = musb->controller;
+ struct platform_device *pdev = to_platform_device(dev);
+ int irq = platform_get_irq(pdev, 1);
controller = kzalloc(sizeof *controller, GFP_KERNEL);
if (!controller)
@@ -1310,6 +1326,15 @@ dma_controller_create(struct musb *musb, void __iomem *mregs)
return NULL;
}
+ if (irq > 0) {
+ if (request_irq(irq, cppi_interrupt, 0, "cppi-dma", musb)) {
+ dev_err(dev, "request_irq %d failed!\n", irq);
+ dma_controller_destroy(&controller->controller);
+ return NULL;
+ }
+ controller->irq = irq;
+ }
+
return &controller->controller;
}
@@ -1322,6 +1347,9 @@ void dma_controller_destroy(struct dma_controller *c)
cppi = container_of(c, struct cppi, controller);
+ if (cppi->irq)
+ free_irq(cppi->irq, cppi->musb);
+
/* assert: caller stopped the controller first */
dma_pool_destroy(cppi->pool);
diff --git a/drivers/usb/musb/cppi_dma.h b/drivers/usb/musb/cppi_dma.h
index 729b4071787..8a39de3e6e4 100644
--- a/drivers/usb/musb/cppi_dma.h
+++ b/drivers/usb/musb/cppi_dma.h
@@ -119,6 +119,8 @@ struct cppi {
void __iomem *mregs; /* Mentor regs */
void __iomem *tibase; /* TI/CPPI regs */
+ int irq;
+
struct cppi_channel tx[4];
struct cppi_channel rx[4];
@@ -127,7 +129,7 @@ struct cppi {
struct list_head tx_complete;
};
-/* irq handling hook */
-extern void cppi_completion(struct musb *, u32 rx, u32 tx);
+/* CPPI IRQ handler */
+extern irqreturn_t cppi_interrupt(int, void *);
#endif /* end of ifndef _CPPI_DMA_H_ */
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index 10d11ab113a..180d7daa409 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -215,7 +215,7 @@ static void otg_timer(unsigned long _musb)
DBG(7, "poll devctl %02x (%s)\n", devctl, otg_state_string(musb));
spin_lock_irqsave(&musb->lock, flags);
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
case OTG_STATE_A_WAIT_VFALL:
/* Wait till VBUS falls below SessionEnd (~0.2V); the 1.3 RTL
* seems to mis-handle session "start" otherwise (or in our
@@ -226,7 +226,7 @@ static void otg_timer(unsigned long _musb)
mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
break;
}
- musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+ musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG,
MUSB_INTR_VBUSERROR << DAVINCI_USB_USBINT_SHIFT);
break;
@@ -251,7 +251,7 @@ static void otg_timer(unsigned long _musb)
if (devctl & MUSB_DEVCTL_BDEVICE)
mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
else
- musb->xceiv.state = OTG_STATE_A_IDLE;
+ musb->xceiv->state = OTG_STATE_A_IDLE;
break;
default:
break;
@@ -265,6 +265,7 @@ static irqreturn_t davinci_interrupt(int irq, void *__hci)
irqreturn_t retval = IRQ_NONE;
struct musb *musb = __hci;
void __iomem *tibase = musb->ctrl_base;
+ struct cppi *cppi;
u32 tmp;
spin_lock_irqsave(&musb->lock, flags);
@@ -281,16 +282,9 @@ static irqreturn_t davinci_interrupt(int irq, void *__hci)
/* CPPI interrupts share the same IRQ line, but have their own
* mask, state, "vector", and EOI registers.
*/
- if (is_cppi_enabled()) {
- u32 cppi_tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG);
- u32 cppi_rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG);
-
- if (cppi_tx || cppi_rx) {
- DBG(4, "CPPI IRQ t%x r%x\n", cppi_tx, cppi_rx);
- cppi_completion(musb, cppi_rx, cppi_tx);
- retval = IRQ_HANDLED;
- }
- }
+ cppi = container_of(musb->dma_controller, struct cppi, controller);
+ if (is_cppi_enabled() && musb->dma_controller && !cppi->irq)
+ retval = cppi_interrupt(irq, __hci);
/* ack and handle non-CPPI interrupts */
tmp = musb_readl(tibase, DAVINCI_USB_INT_SRC_MASKED_REG);
@@ -331,21 +325,21 @@ static irqreturn_t davinci_interrupt(int irq, void *__hci)
* to stop registering in devctl.
*/
musb->int_usb &= ~MUSB_INTR_VBUSERROR;
- musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
+ musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
WARNING("VBUS error workaround (delay coming)\n");
} else if (is_host_enabled(musb) && drvvbus) {
musb->is_active = 1;
MUSB_HST_MODE(musb);
- musb->xceiv.default_a = 1;
- musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+ musb->xceiv->default_a = 1;
+ musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
portstate(musb->port1_status |= USB_PORT_STAT_POWER);
del_timer(&otg_workaround);
} else {
musb->is_active = 0;
MUSB_DEV_MODE(musb);
- musb->xceiv.default_a = 0;
- musb->xceiv.state = OTG_STATE_B_IDLE;
+ musb->xceiv->default_a = 0;
+ musb->xceiv->state = OTG_STATE_B_IDLE;
portstate(musb->port1_status &= ~USB_PORT_STAT_POWER);
}
@@ -367,17 +361,12 @@ static irqreturn_t davinci_interrupt(int irq, void *__hci)
/* poll for ID change */
if (is_otg_enabled(musb)
- && musb->xceiv.state == OTG_STATE_B_IDLE)
+ && musb->xceiv->state == OTG_STATE_B_IDLE)
mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
spin_unlock_irqrestore(&musb->lock, flags);
- /* REVISIT we sometimes get unhandled IRQs
- * (e.g. ep0). not clear why...
- */
- if (retval != IRQ_HANDLED)
- DBG(5, "unhandled? %08x\n", tmp);
- return IRQ_HANDLED;
+ return retval;
}
int musb_platform_set_mode(struct musb *musb, u8 mode)
@@ -391,6 +380,11 @@ int __init musb_platform_init(struct musb *musb)
void __iomem *tibase = musb->ctrl_base;
u32 revision;
+ usb_nop_xceiv_register();
+ musb->xceiv = otg_get_transceiver();
+ if (!musb->xceiv)
+ return -ENODEV;
+
musb->mregs += DAVINCI_BASE_OFFSET;
clk_enable(musb->clock);
@@ -398,7 +392,7 @@ int __init musb_platform_init(struct musb *musb)
/* returns zero if e.g. not clocked */
revision = musb_readl(tibase, DAVINCI_USB_VERSION_REG);
if (revision == 0)
- return -ENODEV;
+ goto fail;
if (is_host_enabled(musb))
setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);
@@ -432,6 +426,10 @@ int __init musb_platform_init(struct musb *musb)
musb->isr = davinci_interrupt;
return 0;
+
+fail:
+ usb_nop_xceiv_unregister();
+ return -ENODEV;
}
int musb_platform_exit(struct musb *musb)
@@ -442,7 +440,7 @@ int musb_platform_exit(struct musb *musb)
davinci_source_power(musb, 0 /*off*/, 1);
/* delay, to avoid problems with module reload */
- if (is_host_enabled(musb) && musb->xceiv.default_a) {
+ if (is_host_enabled(musb) && musb->xceiv->default_a) {
int maxdelay = 30;
u8 devctl, warn = 0;
@@ -471,5 +469,7 @@ int musb_platform_exit(struct musb *musb)
clk_disable(musb->clock);
+ usb_nop_xceiv_unregister();
+
return 0;
}
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 4000cf6d1e8..554a414f65d 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -112,6 +112,7 @@
#include "davinci.h"
#endif
+#define TA_WAIT_BCON(m) max_t(int, (m)->a_wait_bcon, OTG_TIME_A_WAIT_BCON)
unsigned musb_debug;
@@ -267,7 +268,7 @@ void musb_load_testpacket(struct musb *musb)
const char *otg_state_string(struct musb *musb)
{
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
case OTG_STATE_A_IDLE: return "a_idle";
case OTG_STATE_A_WAIT_VRISE: return "a_wait_vrise";
case OTG_STATE_A_WAIT_BCON: return "a_wait_bcon";
@@ -288,12 +289,6 @@ const char *otg_state_string(struct musb *musb)
#ifdef CONFIG_USB_MUSB_OTG
/*
- * See also USB_OTG_1-3.pdf 6.6.5 Timers
- * REVISIT: Are the other timers done in the hardware?
- */
-#define TB_ASE0_BRST 100 /* Min 3.125 ms */
-
-/*
* Handles OTG hnp timeouts, such as b_ase0_brst
*/
void musb_otg_timer_func(unsigned long data)
@@ -302,16 +297,18 @@ void musb_otg_timer_func(unsigned long data)
unsigned long flags;
spin_lock_irqsave(&musb->lock, flags);
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
case OTG_STATE_B_WAIT_ACON:
DBG(1, "HNP: b_wait_acon timeout; back to b_peripheral\n");
musb_g_disconnect(musb);
- musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
musb->is_active = 0;
break;
+ case OTG_STATE_A_SUSPEND:
case OTG_STATE_A_WAIT_BCON:
- DBG(1, "HNP: a_wait_bcon timeout; back to a_host\n");
- musb_hnp_stop(musb);
+ DBG(1, "HNP: %s timeout\n", otg_state_string(musb));
+ musb_set_vbus(musb, 0);
+ musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
break;
default:
DBG(1, "HNP: Unhandled mode %s\n", otg_state_string(musb));
@@ -320,10 +317,8 @@ void musb_otg_timer_func(unsigned long data)
spin_unlock_irqrestore(&musb->lock, flags);
}
-static DEFINE_TIMER(musb_otg_timer, musb_otg_timer_func, 0, 0);
-
/*
- * Stops the B-device HNP state. Caller must take care of locking.
+ * Stops the HNP transition. Caller must take care of locking.
*/
void musb_hnp_stop(struct musb *musb)
{
@@ -331,20 +326,17 @@ void musb_hnp_stop(struct musb *musb)
void __iomem *mbase = musb->mregs;
u8 reg;
- switch (musb->xceiv.state) {
+ DBG(1, "HNP: stop from %s\n", otg_state_string(musb));
+
+ switch (musb->xceiv->state) {
case OTG_STATE_A_PERIPHERAL:
- case OTG_STATE_A_WAIT_VFALL:
- case OTG_STATE_A_WAIT_BCON:
- DBG(1, "HNP: Switching back to A-host\n");
musb_g_disconnect(musb);
- musb->xceiv.state = OTG_STATE_A_IDLE;
- MUSB_HST_MODE(musb);
- musb->is_active = 0;
+ DBG(1, "HNP: back to %s\n", otg_state_string(musb));
break;
case OTG_STATE_B_HOST:
DBG(1, "HNP: Disabling HR\n");
hcd->self.is_b_host = 0;
- musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
MUSB_DEV_MODE(musb);
reg = musb_readb(mbase, MUSB_POWER);
reg |= MUSB_POWER_SUSPENDM;
@@ -402,7 +394,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
if (devctl & MUSB_DEVCTL_HM) {
#ifdef CONFIG_USB_MUSB_HDRC_HCD
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
case OTG_STATE_A_SUSPEND:
/* remote wakeup? later, GetPortStatus
* will stop RESUME signaling
@@ -425,12 +417,12 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
musb->rh_timer = jiffies
+ msecs_to_jiffies(20);
- musb->xceiv.state = OTG_STATE_A_HOST;
+ musb->xceiv->state = OTG_STATE_A_HOST;
musb->is_active = 1;
usb_hcd_resume_root_hub(musb_to_hcd(musb));
break;
case OTG_STATE_B_WAIT_ACON:
- musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
musb->is_active = 1;
MUSB_DEV_MODE(musb);
break;
@@ -441,11 +433,11 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
}
#endif
} else {
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
#ifdef CONFIG_USB_MUSB_HDRC_HCD
case OTG_STATE_A_SUSPEND:
/* possibly DISCONNECT is upcoming */
- musb->xceiv.state = OTG_STATE_A_HOST;
+ musb->xceiv->state = OTG_STATE_A_HOST;
usb_hcd_resume_root_hub(musb_to_hcd(musb));
break;
#endif
@@ -490,7 +482,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
*/
musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
musb->ep0_stage = MUSB_EP0_START;
- musb->xceiv.state = OTG_STATE_A_IDLE;
+ musb->xceiv->state = OTG_STATE_A_IDLE;
MUSB_HST_MODE(musb);
musb_set_vbus(musb, 1);
@@ -516,7 +508,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
* REVISIT: do delays from lots of DEBUG_KERNEL checks
* make trouble here, keeping VBUS < 4.4V ?
*/
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
case OTG_STATE_A_HOST:
/* recovery is dicey once we've gotten past the
* initial stages of enumeration, but if VBUS
@@ -594,37 +586,40 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
if (devctl & MUSB_DEVCTL_LSDEV)
musb->port1_status |= USB_PORT_STAT_LOW_SPEED;
- if (hcd->status_urb)
- usb_hcd_poll_rh_status(hcd);
- else
- usb_hcd_resume_root_hub(hcd);
-
- MUSB_HST_MODE(musb);
-
/* indicate new connection to OTG machine */
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
case OTG_STATE_B_PERIPHERAL:
if (int_usb & MUSB_INTR_SUSPEND) {
DBG(1, "HNP: SUSPEND+CONNECT, now b_host\n");
- musb->xceiv.state = OTG_STATE_B_HOST;
- hcd->self.is_b_host = 1;
int_usb &= ~MUSB_INTR_SUSPEND;
+ goto b_host;
} else
DBG(1, "CONNECT as b_peripheral???\n");
break;
case OTG_STATE_B_WAIT_ACON:
- DBG(1, "HNP: Waiting to switch to b_host state\n");
- musb->xceiv.state = OTG_STATE_B_HOST;
+ DBG(1, "HNP: CONNECT, now b_host\n");
+b_host:
+ musb->xceiv->state = OTG_STATE_B_HOST;
hcd->self.is_b_host = 1;
+ musb->ignore_disconnect = 0;
+ del_timer(&musb->otg_timer);
break;
default:
if ((devctl & MUSB_DEVCTL_VBUS)
== (3 << MUSB_DEVCTL_VBUS_SHIFT)) {
- musb->xceiv.state = OTG_STATE_A_HOST;
+ musb->xceiv->state = OTG_STATE_A_HOST;
hcd->self.is_b_host = 0;
}
break;
}
+
+ /* poke the root hub */
+ MUSB_HST_MODE(musb);
+ if (hcd->status_urb)
+ usb_hcd_poll_rh_status(hcd);
+ else
+ usb_hcd_resume_root_hub(hcd);
+
DBG(1, "CONNECT (%s) devctl %02x\n",
otg_state_string(musb), devctl);
}
@@ -650,7 +645,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
}
} else if (is_peripheral_capable()) {
DBG(1, "BUS RESET as %s\n", otg_state_string(musb));
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
#ifdef CONFIG_USB_OTG
case OTG_STATE_A_SUSPEND:
/* We need to ignore disconnect on suspend
@@ -661,24 +656,27 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
musb_g_reset(musb);
/* FALLTHROUGH */
case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */
- DBG(1, "HNP: Setting timer as %s\n",
- otg_state_string(musb));
- musb_otg_timer.data = (unsigned long)musb;
- mod_timer(&musb_otg_timer, jiffies
- + msecs_to_jiffies(100));
+ /* never use invalid T(a_wait_bcon) */
+ DBG(1, "HNP: in %s, %d msec timeout\n",
+ otg_state_string(musb),
+ TA_WAIT_BCON(musb));
+ mod_timer(&musb->otg_timer, jiffies
+ + msecs_to_jiffies(TA_WAIT_BCON(musb)));
break;
case OTG_STATE_A_PERIPHERAL:
- musb_hnp_stop(musb);
+ musb->ignore_disconnect = 0;
+ del_timer(&musb->otg_timer);
+ musb_g_reset(musb);
break;
case OTG_STATE_B_WAIT_ACON:
DBG(1, "HNP: RESET (%s), to b_peripheral\n",
otg_state_string(musb));
- musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
musb_g_reset(musb);
break;
#endif
case OTG_STATE_B_IDLE:
- musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
/* FALLTHROUGH */
case OTG_STATE_B_PERIPHERAL:
musb_g_reset(musb);
@@ -763,7 +761,7 @@ static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb,
MUSB_MODE(musb), devctl);
handled = IRQ_HANDLED;
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
#ifdef CONFIG_USB_MUSB_HDRC_HCD
case OTG_STATE_A_HOST:
case OTG_STATE_A_SUSPEND:
@@ -776,7 +774,16 @@ static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb,
#endif /* HOST */
#ifdef CONFIG_USB_MUSB_OTG
case OTG_STATE_B_HOST:
- musb_hnp_stop(musb);
+ /* REVISIT this behaves for "real disconnect"
+ * cases; make sure the other transitions from
+ * from B_HOST act right too. The B_HOST code
+ * in hnp_stop() is currently not used...
+ */
+ musb_root_disconnect(musb);
+ musb_to_hcd(musb)->self.is_b_host = 0;
+ musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
+ MUSB_DEV_MODE(musb);
+ musb_g_disconnect(musb);
break;
case OTG_STATE_A_PERIPHERAL:
musb_hnp_stop(musb);
@@ -805,26 +812,35 @@ static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb,
otg_state_string(musb), devctl, power);
handled = IRQ_HANDLED;
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
#ifdef CONFIG_USB_MUSB_OTG
case OTG_STATE_A_PERIPHERAL:
- /*
- * We cannot stop HNP here, devctl BDEVICE might be
- * still set.
+ /* We also come here if the cable is removed, since
+ * this silicon doesn't report ID-no-longer-grounded.
+ *
+ * We depend on T(a_wait_bcon) to shut us down, and
+ * hope users don't do anything dicey during this
+ * undesired detour through A_WAIT_BCON.
*/
+ musb_hnp_stop(musb);
+ usb_hcd_resume_root_hub(musb_to_hcd(musb));
+ musb_root_disconnect(musb);
+ musb_platform_try_idle(musb, jiffies
+ + msecs_to_jiffies(musb->a_wait_bcon
+ ? : OTG_TIME_A_WAIT_BCON));
break;
#endif
case OTG_STATE_B_PERIPHERAL:
musb_g_suspend(musb);
musb->is_active = is_otg_enabled(musb)
- && musb->xceiv.gadget->b_hnp_enable;
+ && musb->xceiv->gadget->b_hnp_enable;
if (musb->is_active) {
#ifdef CONFIG_USB_MUSB_OTG
- musb->xceiv.state = OTG_STATE_B_WAIT_ACON;
+ musb->xceiv->state = OTG_STATE_B_WAIT_ACON;
DBG(1, "HNP: Setting timer for b_ase0_brst\n");
- musb_otg_timer.data = (unsigned long)musb;
- mod_timer(&musb_otg_timer, jiffies
- + msecs_to_jiffies(TB_ASE0_BRST));
+ mod_timer(&musb->otg_timer, jiffies
+ + msecs_to_jiffies(
+ OTG_TIME_B_ASE0_BRST));
#endif
}
break;
@@ -834,9 +850,9 @@ static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb,
+ msecs_to_jiffies(musb->a_wait_bcon));
break;
case OTG_STATE_A_HOST:
- musb->xceiv.state = OTG_STATE_A_SUSPEND;
+ musb->xceiv->state = OTG_STATE_A_SUSPEND;
musb->is_active = is_otg_enabled(musb)
- && musb->xceiv.host->b_hnp_enable;
+ && musb->xceiv->host->b_hnp_enable;
break;
case OTG_STATE_B_HOST:
/* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
@@ -1068,14 +1084,13 @@ static struct fifo_cfg __initdata mode_4_cfg[] = {
{ .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 512, },
-{ .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 512, },
-{ .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 512, },
-{ .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 512, },
-{ .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 512, },
-{ .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 512, },
-{ .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 512, },
-{ .hw_ep_num = 13, .style = FIFO_TX, .maxpacket = 512, },
-{ .hw_ep_num = 13, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 256, },
+{ .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 64, },
+{ .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 256, },
+{ .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 64, },
+{ .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 256, },
+{ .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 64, },
+{ .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 4096, },
{ .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
};
@@ -1335,11 +1350,11 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
}
if (reg & MUSB_CONFIGDATA_HBRXE) {
strcat(aInfo, ", HB-ISO Rx");
- strcat(aInfo, " (X)"); /* no driver support */
+ musb->hb_iso_rx = true;
}
if (reg & MUSB_CONFIGDATA_HBTXE) {
strcat(aInfo, ", HB-ISO Tx");
- strcat(aInfo, " (X)"); /* no driver support */
+ musb->hb_iso_tx = true;
}
if (reg & MUSB_CONFIGDATA_SOFTCONE)
strcat(aInfo, ", SoftConn");
@@ -1481,13 +1496,7 @@ static irqreturn_t generic_interrupt(int irq, void *__hci)
spin_unlock_irqrestore(&musb->lock, flags);
- /* REVISIT we sometimes get spurious IRQs on g_ep0
- * not clear why...
- */
- if (retval != IRQ_HANDLED)
- DBG(5, "spurious?\n");
-
- return IRQ_HANDLED;
+ return retval;
}
#else
@@ -1687,8 +1696,9 @@ musb_vbus_store(struct device *dev, struct device_attribute *attr,
}
spin_lock_irqsave(&musb->lock, flags);
- musb->a_wait_bcon = val;
- if (musb->xceiv.state == OTG_STATE_A_WAIT_BCON)
+ /* force T(a_wait_bcon) to be zero/unlimited *OR* valid */
+ musb->a_wait_bcon = val ? max_t(int, val, OTG_TIME_A_WAIT_BCON) : 0 ;
+ if (musb->xceiv->state == OTG_STATE_A_WAIT_BCON)
musb->is_active = 0;
musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(val));
spin_unlock_irqrestore(&musb->lock, flags);
@@ -1706,10 +1716,13 @@ musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
spin_lock_irqsave(&musb->lock, flags);
val = musb->a_wait_bcon;
+ /* FIXME get_vbus_status() is normally #defined as false...
+ * and is effectively TUSB-specific.
+ */
vbus = musb_platform_get_vbus_status(musb);
spin_unlock_irqrestore(&musb->lock, flags);
- return sprintf(buf, "Vbus %s, timeout %lu\n",
+ return sprintf(buf, "Vbus %s, timeout %lu msec\n",
vbus ? "on" : "off", val);
}
static DEVICE_ATTR(vbus, 0644, musb_vbus_show, musb_vbus_store);
@@ -1749,8 +1762,8 @@ static void musb_irq_work(struct work_struct *data)
struct musb *musb = container_of(data, struct musb, irq_work);
static int old_state;
- if (musb->xceiv.state != old_state) {
- old_state = musb->xceiv.state;
+ if (musb->xceiv->state != old_state) {
+ old_state = musb->xceiv->state;
sysfs_notify(&musb->controller->kobj, NULL, "mode");
}
}
@@ -1782,6 +1795,7 @@ allocate_instance(struct device *dev,
hcd->uses_new_polling = 1;
musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
+ musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
#else
musb = kzalloc(sizeof *musb, GFP_KERNEL);
if (!musb)
@@ -1847,7 +1861,7 @@ static void musb_free(struct musb *musb)
}
#ifdef CONFIG_USB_MUSB_OTG
- put_device(musb->xceiv.dev);
+ put_device(musb->xceiv->dev);
#endif
#ifdef CONFIG_USB_MUSB_HDRC_HCD
@@ -1928,10 +1942,18 @@ bad_config:
}
}
- /* assume vbus is off */
-
- /* platform adjusts musb->mregs and musb->isr if needed,
- * and activates clocks
+ /* The musb_platform_init() call:
+ * - adjusts musb->mregs and musb->isr if needed,
+ * - may initialize an integrated tranceiver
+ * - initializes musb->xceiv, usually by otg_get_transceiver()
+ * - activates clocks.
+ * - stops powering VBUS
+ * - assigns musb->board_set_vbus if host mode is enabled
+ *
+ * There are various transciever configurations. Blackfin,
+ * DaVinci, TUSB60x0, and others integrate them. OMAP3 uses
+ * external/discrete ones in various flavors (twl4030 family,
+ * isp1504, non-OTG, etc) mostly hooking up through ULPI.
*/
musb->isr = generic_interrupt;
status = musb_platform_init(musb);
@@ -1968,6 +1990,10 @@ bad_config:
if (status < 0)
goto fail2;
+#ifdef CONFIG_USB_OTG
+ setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb);
+#endif
+
/* Init IRQ workqueue before request_irq */
INIT_WORK(&musb->irq_work, musb_irq_work);
@@ -1999,17 +2025,17 @@ bad_config:
? "DMA" : "PIO",
musb->nIrq);
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
- /* host side needs more setup, except for no-host modes */
- if (musb->board_mode != MUSB_PERIPHERAL) {
+ /* host side needs more setup */
+ if (is_host_enabled(musb)) {
struct usb_hcd *hcd = musb_to_hcd(musb);
- if (musb->board_mode == MUSB_OTG)
+ otg_set_host(musb->xceiv, &hcd->self);
+
+ if (is_otg_enabled(musb))
hcd->self.otg_port = 1;
- musb->xceiv.host = &hcd->self;
+ musb->xceiv->host = &hcd->self;
hcd->power_budget = 2 * (plat->power ? : 250);
}
-#endif /* CONFIG_USB_MUSB_HDRC_HCD */
/* For the host-only role, we can activate right away.
* (We expect the ID pin to be forcibly grounded!!)
@@ -2017,8 +2043,8 @@ bad_config:
*/
if (!is_otg_enabled(musb) && is_host_enabled(musb)) {
MUSB_HST_MODE(musb);
- musb->xceiv.default_a = 1;
- musb->xceiv.state = OTG_STATE_A_IDLE;
+ musb->xceiv->default_a = 1;
+ musb->xceiv->state = OTG_STATE_A_IDLE;
status = usb_add_hcd(musb_to_hcd(musb), -1, 0);
if (status)
@@ -2033,8 +2059,8 @@ bad_config:
} else /* peripheral is enabled */ {
MUSB_DEV_MODE(musb);
- musb->xceiv.default_a = 0;
- musb->xceiv.state = OTG_STATE_B_IDLE;
+ musb->xceiv->default_a = 0;
+ musb->xceiv->state = OTG_STATE_B_IDLE;
status = musb_gadget_setup(musb);
if (status)
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index efb39b5e55b..f3772ca3b2c 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -40,6 +40,7 @@
#include <linux/interrupt.h>
#include <linux/smp_lock.h>
#include <linux/errno.h>
+#include <linux/timer.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/usb/ch9.h>
@@ -171,7 +172,8 @@ enum musb_h_ep0_state {
/* peripheral side ep0 states */
enum musb_g_ep0_state {
- MUSB_EP0_STAGE_SETUP, /* idle, waiting for setup */
+ MUSB_EP0_STAGE_IDLE, /* idle, waiting for SETUP */
+ MUSB_EP0_STAGE_SETUP, /* received SETUP */
MUSB_EP0_STAGE_TX, /* IN data */
MUSB_EP0_STAGE_RX, /* OUT data */
MUSB_EP0_STAGE_STATUSIN, /* (after OUT data) */
@@ -179,10 +181,15 @@ enum musb_g_ep0_state {
MUSB_EP0_STAGE_ACKWAIT, /* after zlp, before statusin */
} __attribute__ ((packed));
-/* OTG protocol constants */
+/*
+ * OTG protocol constants. See USB OTG 1.3 spec,
+ * sections 5.5 "Device Timings" and 6.6.5 "Timers".
+ */
#define OTG_TIME_A_WAIT_VRISE 100 /* msec (max) */
-#define OTG_TIME_A_WAIT_BCON 0 /* 0=infinite; min 1000 msec */
-#define OTG_TIME_A_IDLE_BDIS 200 /* msec (min) */
+#define OTG_TIME_A_WAIT_BCON 1100 /* min 1 second */
+#define OTG_TIME_A_AIDL_BDIS 200 /* min 200 msec */
+#define OTG_TIME_B_ASE0_BRST 100 /* min 3.125 ms */
+
/*************************** REGISTER ACCESS ********************************/
@@ -331,6 +338,8 @@ struct musb {
struct list_head control; /* of musb_qh */
struct list_head in_bulk; /* of musb_qh */
struct list_head out_bulk; /* of musb_qh */
+
+ struct timer_list otg_timer;
#endif
/* called with IRQs blocked; ON/nonzero implies starting a session,
@@ -355,7 +364,7 @@ struct musb {
u16 int_rx;
u16 int_tx;
- struct otg_transceiver xceiv;
+ struct otg_transceiver *xceiv;
int nIrq;
unsigned irq_wake:1;
@@ -386,6 +395,9 @@ struct musb {
unsigned is_multipoint:1;
unsigned ignore_disconnect:1; /* during bus resets */
+ unsigned hb_iso_rx:1; /* high bandwidth iso rx? */
+ unsigned hb_iso_tx:1; /* high bandwidth iso tx? */
+
#ifdef C_MP_TX
unsigned bulk_split:1;
#define can_bulk_split(musb,type) \
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index f79440cdfe7..8b3c4e2ed7b 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -310,7 +310,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
/* setup DMA, then program endpoint CSR */
request_size = min(request->length,
musb_ep->dma->max_len);
- if (request_size <= musb_ep->packet_sz)
+ if (request_size < musb_ep->packet_sz)
musb_ep->dma->desired_mode = 0;
else
musb_ep->dma->desired_mode = 1;
@@ -349,7 +349,8 @@ static void txstate(struct musb *musb, struct musb_request *req)
#elif defined(CONFIG_USB_TI_CPPI_DMA)
/* program endpoint CSR first, then setup DMA */
csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
- csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_DMAENAB;
+ csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
+ MUSB_TXCSR_MODE;
musb_writew(epio, MUSB_TXCSR,
(MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
| csr);
@@ -1405,7 +1406,7 @@ static int musb_gadget_wakeup(struct usb_gadget *gadget)
spin_lock_irqsave(&musb->lock, flags);
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
case OTG_STATE_B_PERIPHERAL:
/* NOTE: OTG state machine doesn't include B_SUSPENDED;
* that's part of the standard usb 1.1 state machine, and
@@ -1507,9 +1508,9 @@ static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
{
struct musb *musb = gadget_to_musb(gadget);
- if (!musb->xceiv.set_power)
+ if (!musb->xceiv->set_power)
return -EOPNOTSUPP;
- return otg_set_power(&musb->xceiv, mA);
+ return otg_set_power(musb->xceiv, mA);
}
static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
@@ -1732,11 +1733,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
spin_lock_irqsave(&musb->lock, flags);
- /* REVISIT always use otg_set_peripheral(), handling
- * issues including the root hub one below ...
- */
- musb->xceiv.gadget = &musb->g;
- musb->xceiv.state = OTG_STATE_B_IDLE;
+ otg_set_peripheral(musb->xceiv, &musb->g);
musb->is_active = 1;
/* FIXME this ignores the softconnect flag. Drivers are
@@ -1748,6 +1745,8 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
if (!is_otg_enabled(musb))
musb_start(musb);
+ otg_set_peripheral(musb->xceiv, &musb->g);
+
spin_unlock_irqrestore(&musb->lock, flags);
if (is_otg_enabled(musb)) {
@@ -1761,8 +1760,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
if (retval < 0) {
DBG(1, "add_hcd failed, %d\n", retval);
spin_lock_irqsave(&musb->lock, flags);
- musb->xceiv.gadget = NULL;
- musb->xceiv.state = OTG_STATE_UNDEFINED;
+ otg_set_peripheral(musb->xceiv, NULL);
musb->gadget_driver = NULL;
musb->g.dev.driver = NULL;
spin_unlock_irqrestore(&musb->lock, flags);
@@ -1845,8 +1843,9 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
(void) musb_gadget_vbus_draw(&musb->g, 0);
- musb->xceiv.state = OTG_STATE_UNDEFINED;
+ musb->xceiv->state = OTG_STATE_UNDEFINED;
stop_activity(musb, driver);
+ otg_set_peripheral(musb->xceiv, NULL);
DBG(3, "unregistering driver %s\n", driver->function);
spin_unlock_irqrestore(&musb->lock, flags);
@@ -1882,7 +1881,7 @@ EXPORT_SYMBOL(usb_gadget_unregister_driver);
void musb_g_resume(struct musb *musb)
{
musb->is_suspended = 0;
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
case OTG_STATE_B_IDLE:
break;
case OTG_STATE_B_WAIT_ACON:
@@ -1908,10 +1907,10 @@ void musb_g_suspend(struct musb *musb)
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
DBG(3, "devctl %02x\n", devctl);
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
case OTG_STATE_B_IDLE:
if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
- musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
break;
case OTG_STATE_B_PERIPHERAL:
musb->is_suspended = 1;
@@ -1957,22 +1956,24 @@ void musb_g_disconnect(struct musb *musb)
spin_lock(&musb->lock);
}
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
default:
#ifdef CONFIG_USB_MUSB_OTG
DBG(2, "Unhandled disconnect %s, setting a_idle\n",
otg_state_string(musb));
- musb->xceiv.state = OTG_STATE_A_IDLE;
+ musb->xceiv->state = OTG_STATE_A_IDLE;
+ MUSB_HST_MODE(musb);
break;
case OTG_STATE_A_PERIPHERAL:
- musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
+ musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
+ MUSB_HST_MODE(musb);
break;
case OTG_STATE_B_WAIT_ACON:
case OTG_STATE_B_HOST:
#endif
case OTG_STATE_B_PERIPHERAL:
case OTG_STATE_B_IDLE:
- musb->xceiv.state = OTG_STATE_B_IDLE;
+ musb->xceiv->state = OTG_STATE_B_IDLE;
break;
case OTG_STATE_B_SRP_INIT:
break;
@@ -2028,10 +2029,10 @@ __acquires(musb->lock)
* or else after HNP, as A-Device
*/
if (devctl & MUSB_DEVCTL_BDEVICE) {
- musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
musb->g.is_a_peripheral = 0;
} else if (is_otg_enabled(musb)) {
- musb->xceiv.state = OTG_STATE_A_PERIPHERAL;
+ musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
musb->g.is_a_peripheral = 1;
} else
WARN_ON(1);
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 3f5e30ddfa2..40ed50ecedf 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -4,6 +4,7 @@
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
+ * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -58,7 +59,8 @@
static char *decode_ep0stage(u8 stage)
{
switch (stage) {
- case MUSB_EP0_STAGE_SETUP: return "idle";
+ case MUSB_EP0_STAGE_IDLE: return "idle";
+ case MUSB_EP0_STAGE_SETUP: return "setup";
case MUSB_EP0_STAGE_TX: return "in";
case MUSB_EP0_STAGE_RX: return "out";
case MUSB_EP0_STAGE_ACKWAIT: return "wait";
@@ -628,7 +630,7 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
musb_writew(regs, MUSB_CSR0,
csr & ~MUSB_CSR0_P_SENTSTALL);
retval = IRQ_HANDLED;
- musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+ musb->ep0_state = MUSB_EP0_STAGE_IDLE;
csr = musb_readw(regs, MUSB_CSR0);
}
@@ -636,7 +638,18 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
if (csr & MUSB_CSR0_P_SETUPEND) {
musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDSETUPEND);
retval = IRQ_HANDLED;
- musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+ /* Transition into the early status phase */
+ switch (musb->ep0_state) {
+ case MUSB_EP0_STAGE_TX:
+ musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT;
+ break;
+ case MUSB_EP0_STAGE_RX:
+ musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
+ break;
+ default:
+ ERR("SetupEnd came in a wrong ep0stage %s",
+ decode_ep0stage(musb->ep0_state));
+ }
csr = musb_readw(regs, MUSB_CSR0);
/* NOTE: request may need completion */
}
@@ -697,11 +710,31 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
if (req)
musb_g_ep0_giveback(musb, req);
}
+
+ /*
+ * In case when several interrupts can get coalesced,
+ * check to see if we've already received a SETUP packet...
+ */
+ if (csr & MUSB_CSR0_RXPKTRDY)
+ goto setup;
+
+ retval = IRQ_HANDLED;
+ musb->ep0_state = MUSB_EP0_STAGE_IDLE;
+ break;
+
+ case MUSB_EP0_STAGE_IDLE:
+ /*
+ * This state is typically (but not always) indiscernible
+ * from the status states since the corresponding interrupts
+ * tend to happen within too little period of time (with only
+ * a zero-length packet in between) and so get coalesced...
+ */
retval = IRQ_HANDLED;
musb->ep0_state = MUSB_EP0_STAGE_SETUP;
/* FALLTHROUGH */
case MUSB_EP0_STAGE_SETUP:
+setup:
if (csr & MUSB_CSR0_RXPKTRDY) {
struct usb_ctrlrequest setup;
int handled = 0;
@@ -783,7 +816,7 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
stall:
DBG(3, "stall (%d)\n", handled);
musb->ackpend |= MUSB_CSR0_P_SENDSTALL;
- musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+ musb->ep0_state = MUSB_EP0_STAGE_IDLE;
finish:
musb_writew(regs, MUSB_CSR0,
musb->ackpend);
@@ -803,7 +836,7 @@ finish:
/* "can't happen" */
WARN_ON(1);
musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SENDSTALL);
- musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+ musb->ep0_state = MUSB_EP0_STAGE_IDLE;
break;
}
@@ -959,7 +992,7 @@ static int musb_g_ep0_halt(struct usb_ep *e, int value)
csr |= MUSB_CSR0_P_SENDSTALL;
musb_writew(regs, MUSB_CSR0, csr);
- musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+ musb->ep0_state = MUSB_EP0_STAGE_IDLE;
musb->ackpend = 0;
break;
default:
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index db1b57415ec..94a2a350a41 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -181,6 +181,19 @@ static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
musb_writew(ep->regs, MUSB_TXCSR, txcsr);
}
+static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
+{
+ if (is_in != 0 || ep->is_shared_fifo)
+ ep->in_qh = qh;
+ if (is_in == 0 || ep->is_shared_fifo)
+ ep->out_qh = qh;
+}
+
+static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
+{
+ return is_in ? ep->in_qh : ep->out_qh;
+}
+
/*
* Start the URB at the front of an endpoint's queue
* end must be claimed from the caller.
@@ -210,7 +223,6 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
case USB_ENDPOINT_XFER_CONTROL:
/* control transfers always start with SETUP */
is_in = 0;
- hw_ep->out_qh = qh;
musb->ep0_stage = MUSB_EP0_START;
buf = urb->setup_packet;
len = 8;
@@ -239,10 +251,7 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
epnum, buf + offset, len);
/* Configure endpoint */
- if (is_in || hw_ep->is_shared_fifo)
- hw_ep->in_qh = qh;
- else
- hw_ep->out_qh = qh;
+ musb_ep_set_qh(hw_ep, is_in, qh);
musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
/* transmit may have more work: start it when it is time */
@@ -286,9 +295,8 @@ start:
}
}
-/* caller owns controller lock, irqs are blocked */
-static void
-__musb_giveback(struct musb *musb, struct urb *urb, int status)
+/* Context: caller owns controller lock, IRQs are blocked */
+static void musb_giveback(struct musb *musb, struct urb *urb, int status)
__releases(musb->lock)
__acquires(musb->lock)
{
@@ -321,60 +329,57 @@ __acquires(musb->lock)
spin_lock(&musb->lock);
}
-/* for bulk/interrupt endpoints only */
-static inline void
-musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb)
+/* For bulk/interrupt endpoints only */
+static inline void musb_save_toggle(struct musb_qh *qh, int is_in,
+ struct urb *urb)
{
- struct usb_device *udev = urb->dev;
+ void __iomem *epio = qh->hw_ep->regs;
u16 csr;
- void __iomem *epio = ep->regs;
- struct musb_qh *qh;
- /* FIXME: the current Mentor DMA code seems to have
+ /*
+ * FIXME: the current Mentor DMA code seems to have
* problems getting toggle correct.
*/
- if (is_in || ep->is_shared_fifo)
- qh = ep->in_qh;
+ if (is_in)
+ csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
else
- qh = ep->out_qh;
+ csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
- if (!is_in) {
- csr = musb_readw(epio, MUSB_TXCSR);
- usb_settoggle(udev, qh->epnum, 1,
- (csr & MUSB_TXCSR_H_DATATOGGLE)
- ? 1 : 0);
- } else {
- csr = musb_readw(epio, MUSB_RXCSR);
- usb_settoggle(udev, qh->epnum, 0,
- (csr & MUSB_RXCSR_H_DATATOGGLE)
- ? 1 : 0);
- }
+ usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0);
}
-/* caller owns controller lock, irqs are blocked */
-static struct musb_qh *
-musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
+/*
+ * Advance this hardware endpoint's queue, completing the specified URB and
+ * advancing to either the next URB queued to that qh, or else invalidating
+ * that qh and advancing to the next qh scheduled after the current one.
+ *
+ * Context: caller owns controller lock, IRQs are blocked
+ */
+static void musb_advance_schedule(struct musb *musb, struct urb *urb,
+ struct musb_hw_ep *hw_ep, int is_in)
{
+ struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in);
struct musb_hw_ep *ep = qh->hw_ep;
- struct musb *musb = ep->musb;
- int is_in = usb_pipein(urb->pipe);
int ready = qh->is_ready;
+ int status;
+
+ status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
/* save toggle eagerly, for paranoia */
switch (qh->type) {
case USB_ENDPOINT_XFER_BULK:
case USB_ENDPOINT_XFER_INT:
- musb_save_toggle(ep, is_in, urb);
+ musb_save_toggle(qh, is_in, urb);
break;
case USB_ENDPOINT_XFER_ISOC:
- if (status == 0 && urb->error_count)
+ if (urb->error_count)
status = -EXDEV;
break;
}
qh->is_ready = 0;
- __musb_giveback(musb, urb, status);
+ musb_giveback(musb, urb, status);
qh->is_ready = ready;
/* reclaim resources (and bandwidth) ASAP; deschedule it, and
@@ -388,11 +393,8 @@ musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
else
ep->tx_reinit = 1;
- /* clobber old pointers to this qh */
- if (is_in || ep->is_shared_fifo)
- ep->in_qh = NULL;
- else
- ep->out_qh = NULL;
+ /* Clobber old pointers to this qh */
+ musb_ep_set_qh(ep, is_in, NULL);
qh->hep->hcpriv = NULL;
switch (qh->type) {
@@ -421,36 +423,10 @@ musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
break;
}
}
- return qh;
-}
-
-/*
- * Advance this hardware endpoint's queue, completing the specified urb and
- * advancing to either the next urb queued to that qh, or else invalidating
- * that qh and advancing to the next qh scheduled after the current one.
- *
- * Context: caller owns controller lock, irqs are blocked
- */
-static void
-musb_advance_schedule(struct musb *musb, struct urb *urb,
- struct musb_hw_ep *hw_ep, int is_in)
-{
- struct musb_qh *qh;
-
- if (is_in || hw_ep->is_shared_fifo)
- qh = hw_ep->in_qh;
- else
- qh = hw_ep->out_qh;
-
- if (urb->status == -EINPROGRESS)
- qh = musb_giveback(qh, urb, 0);
- else
- qh = musb_giveback(qh, urb, urb->status);
if (qh != NULL && qh->is_ready) {
DBG(4, "... next ep%d %cX urb %p\n",
- hw_ep->epnum, is_in ? 'R' : 'T',
- next_urb(qh));
+ hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
musb_start_urb(musb, is_in, qh);
}
}
@@ -629,7 +605,8 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
/* NOTE: bulk combining rewrites high bits of maxpacket */
- musb_writew(ep->regs, MUSB_RXMAXP, qh->maxpacket);
+ musb_writew(ep->regs, MUSB_RXMAXP,
+ qh->maxpacket | ((qh->hb_mult - 1) << 11));
ep->rx_reinit = 0;
}
@@ -651,9 +628,10 @@ static bool musb_tx_dma_program(struct dma_controller *dma,
csr = musb_readw(epio, MUSB_TXCSR);
if (length > pkt_size) {
mode = 1;
- csr |= MUSB_TXCSR_AUTOSET
- | MUSB_TXCSR_DMAMODE
- | MUSB_TXCSR_DMAENAB;
+ csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
+ /* autoset shouldn't be set in high bandwidth */
+ if (qh->hb_mult == 1)
+ csr |= MUSB_TXCSR_AUTOSET;
} else {
mode = 0;
csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
@@ -703,15 +681,8 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
void __iomem *mbase = musb->mregs;
struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
void __iomem *epio = hw_ep->regs;
- struct musb_qh *qh;
- u16 packet_sz;
-
- if (!is_out || hw_ep->is_shared_fifo)
- qh = hw_ep->in_qh;
- else
- qh = hw_ep->out_qh;
-
- packet_sz = qh->maxpacket;
+ struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out);
+ u16 packet_sz = qh->maxpacket;
DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s "
"h_addr%02x h_port%02x bytes %d\n",
@@ -1129,17 +1100,14 @@ void musb_host_tx(struct musb *musb, u8 epnum)
u16 tx_csr;
size_t length = 0;
size_t offset = 0;
- struct urb *urb;
struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
void __iomem *epio = hw_ep->regs;
- struct musb_qh *qh = hw_ep->is_shared_fifo ? hw_ep->in_qh
- : hw_ep->out_qh;
+ struct musb_qh *qh = hw_ep->out_qh;
+ struct urb *urb = next_urb(qh);
u32 status = 0;
void __iomem *mbase = musb->mregs;
struct dma_channel *dma;
- urb = next_urb(qh);
-
musb_ep_select(mbase, epnum);
tx_csr = musb_readw(epio, MUSB_TXCSR);
@@ -1427,7 +1395,7 @@ static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep)
urb->actual_length += dma->actual_len;
dma->actual_len = 0L;
}
- musb_save_toggle(ep, 1, urb);
+ musb_save_toggle(cur_qh, 1, urb);
/* move cur_qh to end of queue */
list_move_tail(&cur_qh->ring, &musb->in_bulk);
@@ -1531,6 +1499,10 @@ void musb_host_rx(struct musb *musb, u8 epnum)
/* packet error reported later */
iso_err = true;
}
+ } else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
+ DBG(3, "end %d high bandwidth incomplete ISO packet RX\n",
+ epnum);
+ status = -EPROTO;
}
/* faults abort the transfer */
@@ -1738,7 +1710,11 @@ void musb_host_rx(struct musb *musb, u8 epnum)
val &= ~MUSB_RXCSR_H_AUTOREQ;
else
val |= MUSB_RXCSR_H_AUTOREQ;
- val |= MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB;
+ val |= MUSB_RXCSR_DMAENAB;
+
+ /* autoclear shouldn't be set in high bandwidth */
+ if (qh->hb_mult == 1)
+ val |= MUSB_RXCSR_AUTOCLEAR;
musb_writew(epio, MUSB_RXCSR,
MUSB_RXCSR_H_WZC_BITS | val);
@@ -1817,19 +1793,17 @@ static int musb_schedule(
epnum++, hw_ep++) {
int diff;
- if (is_in || hw_ep->is_shared_fifo) {
- if (hw_ep->in_qh != NULL)
- continue;
- } else if (hw_ep->out_qh != NULL)
+ if (musb_ep_get_qh(hw_ep, is_in) != NULL)
continue;
if (hw_ep == musb->bulk_ep)
continue;
if (is_in)
- diff = hw_ep->max_packet_sz_rx - qh->maxpacket;
+ diff = hw_ep->max_packet_sz_rx;
else
- diff = hw_ep->max_packet_sz_tx - qh->maxpacket;
+ diff = hw_ep->max_packet_sz_tx;
+ diff -= (qh->maxpacket * qh->hb_mult);
if (diff >= 0 && best_diff > diff) {
best_diff = diff;
@@ -1932,15 +1906,27 @@ static int musb_urb_enqueue(
qh->is_ready = 1;
qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize);
+ qh->type = usb_endpoint_type(epd);
- /* no high bandwidth support yet */
- if (qh->maxpacket & ~0x7ff) {
- ret = -EMSGSIZE;
- goto done;
+ /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
+ * Some musb cores don't support high bandwidth ISO transfers; and
+ * we don't (yet!) support high bandwidth interrupt transfers.
+ */
+ qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03);
+ if (qh->hb_mult > 1) {
+ int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
+
+ if (ok)
+ ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
+ || (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
+ if (!ok) {
+ ret = -EMSGSIZE;
+ goto done;
+ }
+ qh->maxpacket &= 0x7ff;
}
qh->epnum = usb_endpoint_num(epd);
- qh->type = usb_endpoint_type(epd);
/* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
@@ -2052,14 +2038,15 @@ done:
* called with controller locked, irqs blocked
* that hardware queue advances to the next transfer, unless prevented
*/
-static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in)
+static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
{
struct musb_hw_ep *ep = qh->hw_ep;
void __iomem *epio = ep->regs;
unsigned hw_end = ep->epnum;
void __iomem *regs = ep->musb->mregs;
- u16 csr;
+ int is_in = usb_pipein(urb->pipe);
int status = 0;
+ u16 csr;
musb_ep_select(regs, hw_end);
@@ -2112,14 +2099,14 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct musb *musb = hcd_to_musb(hcd);
struct musb_qh *qh;
- struct list_head *sched;
unsigned long flags;
+ int is_in = usb_pipein(urb->pipe);
int ret;
DBG(4, "urb=%p, dev%d ep%d%s\n", urb,
usb_pipedevice(urb->pipe),
usb_pipeendpoint(urb->pipe),
- usb_pipein(urb->pipe) ? "in" : "out");
+ is_in ? "in" : "out");
spin_lock_irqsave(&musb->lock, flags);
ret = usb_hcd_check_unlink_urb(hcd, urb, status);
@@ -2130,47 +2117,25 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
if (!qh)
goto done;
- /* Any URB not actively programmed into endpoint hardware can be
+ /*
+ * Any URB not actively programmed into endpoint hardware can be
* immediately given back; that's any URB not at the head of an
* endpoint queue, unless someday we get real DMA queues. And even
* if it's at the head, it might not be known to the hardware...
*
- * Otherwise abort current transfer, pending dma, etc.; urb->status
+ * Otherwise abort current transfer, pending DMA, etc.; urb->status
* has already been updated. This is a synchronous abort; it'd be
* OK to hold off until after some IRQ, though.
+ *
+ * NOTE: qh is invalid unless !list_empty(&hep->urb_list)
*/
- if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list)
- ret = -EINPROGRESS;
- else {
- switch (qh->type) {
- case USB_ENDPOINT_XFER_CONTROL:
- sched = &musb->control;
- break;
- case USB_ENDPOINT_XFER_BULK:
- if (qh->mux == 1) {
- if (usb_pipein(urb->pipe))
- sched = &musb->in_bulk;
- else
- sched = &musb->out_bulk;
- break;
- }
- default:
- /* REVISIT when we get a schedule tree, periodic
- * transfers won't always be at the head of a
- * singleton queue...
- */
- sched = NULL;
- break;
- }
- }
-
- /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
- if (ret < 0 || (sched && qh != first_qh(sched))) {
+ if (!qh->is_ready
+ || urb->urb_list.prev != &qh->hep->urb_list
+ || musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
int ready = qh->is_ready;
- ret = 0;
qh->is_ready = 0;
- __musb_giveback(musb, urb, 0);
+ musb_giveback(musb, urb, 0);
qh->is_ready = ready;
/* If nothing else (usually musb_giveback) is using it
@@ -2182,7 +2147,7 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
kfree(qh);
}
} else
- ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
+ ret = musb_cleanup_urb(urb, qh);
done:
spin_unlock_irqrestore(&musb->lock, flags);
return ret;
@@ -2192,13 +2157,11 @@ done:
static void
musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
{
- u8 epnum = hep->desc.bEndpointAddress;
+ u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
unsigned long flags;
struct musb *musb = hcd_to_musb(hcd);
- u8 is_in = epnum & USB_DIR_IN;
struct musb_qh *qh;
struct urb *urb;
- struct list_head *sched;
spin_lock_irqsave(&musb->lock, flags);
@@ -2206,31 +2169,11 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
if (qh == NULL)
goto exit;
- switch (qh->type) {
- case USB_ENDPOINT_XFER_CONTROL:
- sched = &musb->control;
- break;
- case USB_ENDPOINT_XFER_BULK:
- if (qh->mux == 1) {
- if (is_in)
- sched = &musb->in_bulk;
- else
- sched = &musb->out_bulk;
- break;
- }
- default:
- /* REVISIT when we get a schedule tree, periodic transfers
- * won't always be at the head of a singleton queue...
- */
- sched = NULL;
- break;
- }
-
- /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
+ /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
- /* kick first urb off the hardware, if needed */
+ /* Kick the first URB off the hardware, if needed */
qh->is_ready = 0;
- if (!sched || qh == first_qh(sched)) {
+ if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
urb = next_urb(qh);
/* make software (then hardware) stop ASAP */
@@ -2238,7 +2181,7 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
urb->status = -ESHUTDOWN;
/* cleanup */
- musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
+ musb_cleanup_urb(urb, qh);
/* Then nuke all the others ... and advance the
* queue on hw_ep (e.g. bulk ring) when we're done.
@@ -2254,7 +2197,7 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
* will activate any of these as it advances.
*/
while (!list_empty(&hep->urb_list))
- __musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
+ musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
hep->hcpriv = NULL;
list_del(&qh->ring);
@@ -2293,7 +2236,7 @@ static int musb_bus_suspend(struct usb_hcd *hcd)
{
struct musb *musb = hcd_to_musb(hcd);
- if (musb->xceiv.state == OTG_STATE_A_SUSPEND)
+ if (musb->xceiv->state == OTG_STATE_A_SUSPEND)
return 0;
if (is_host_active(musb) && musb->is_active) {
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h
index 0b7fbcd2196..14b00776638 100644
--- a/drivers/usb/musb/musb_host.h
+++ b/drivers/usb/musb/musb_host.h
@@ -67,6 +67,7 @@ struct musb_qh {
u8 is_ready; /* safe to modify hw_ep */
u8 type; /* XFERTYPE_* */
u8 epnum;
+ u8 hb_mult; /* high bandwidth pkts per uf */
u16 maxpacket;
u16 frame; /* for periodic schedule */
unsigned iso_idx; /* in urb->iso_frame_desc[] */
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index bf677acc83d..bfe5fe4ebfe 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -78,18 +78,22 @@ static void musb_port_suspend(struct musb *musb, bool do_suspend)
DBG(3, "Root port suspended, power %02x\n", power);
musb->port1_status |= USB_PORT_STAT_SUSPEND;
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
case OTG_STATE_A_HOST:
- musb->xceiv.state = OTG_STATE_A_SUSPEND;
+ musb->xceiv->state = OTG_STATE_A_SUSPEND;
musb->is_active = is_otg_enabled(musb)
- && musb->xceiv.host->b_hnp_enable;
+ && musb->xceiv->host->b_hnp_enable;
+ if (musb->is_active)
+ mod_timer(&musb->otg_timer, jiffies
+ + msecs_to_jiffies(
+ OTG_TIME_A_AIDL_BDIS));
musb_platform_try_idle(musb, 0);
break;
#ifdef CONFIG_USB_MUSB_OTG
case OTG_STATE_B_HOST:
- musb->xceiv.state = OTG_STATE_B_WAIT_ACON;
+ musb->xceiv->state = OTG_STATE_B_WAIT_ACON;
musb->is_active = is_otg_enabled(musb)
- && musb->xceiv.host->b_hnp_enable;
+ && musb->xceiv->host->b_hnp_enable;
musb_platform_try_idle(musb, 0);
break;
#endif
@@ -116,7 +120,7 @@ static void musb_port_reset(struct musb *musb, bool do_reset)
void __iomem *mbase = musb->mregs;
#ifdef CONFIG_USB_MUSB_OTG
- if (musb->xceiv.state == OTG_STATE_B_IDLE) {
+ if (musb->xceiv->state == OTG_STATE_B_IDLE) {
DBG(2, "HNP: Returning from HNP; no hub reset from b_idle\n");
musb->port1_status &= ~USB_PORT_STAT_RESET;
return;
@@ -186,14 +190,23 @@ void musb_root_disconnect(struct musb *musb)
usb_hcd_poll_rh_status(musb_to_hcd(musb));
musb->is_active = 0;
- switch (musb->xceiv.state) {
- case OTG_STATE_A_HOST:
+ switch (musb->xceiv->state) {
case OTG_STATE_A_SUSPEND:
- musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
+#ifdef CONFIG_USB_MUSB_OTG
+ if (is_otg_enabled(musb)
+ && musb->xceiv->host->b_hnp_enable) {
+ musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
+ musb->g.is_a_peripheral = 1;
+ break;
+ }
+#endif
+ /* FALLTHROUGH */
+ case OTG_STATE_A_HOST:
+ musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
musb->is_active = 0;
break;
case OTG_STATE_A_WAIT_VFALL:
- musb->xceiv.state = OTG_STATE_B_IDLE;
+ musb->xceiv->state = OTG_STATE_B_IDLE;
break;
default:
DBG(1, "host disconnect (%s)\n", otg_state_string(musb));
@@ -332,7 +345,7 @@ int musb_hub_control(
musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
usb_hcd_poll_rh_status(musb_to_hcd(musb));
/* NOTE: it might really be A_WAIT_BCON ... */
- musb->xceiv.state = OTG_STATE_A_HOST;
+ musb->xceiv->state = OTG_STATE_A_HOST;
}
put_unaligned(cpu_to_le32(musb->port1_status
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 60924ce0849..34875201ee0 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -44,7 +44,6 @@
#define get_cpu_rev() 2
#endif
-#define MUSB_TIMEOUT_A_WAIT_BCON 1100
static struct timer_list musb_idle_timer;
@@ -61,17 +60,17 @@ static void musb_do_idle(unsigned long _musb)
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
case OTG_STATE_A_WAIT_BCON:
devctl &= ~MUSB_DEVCTL_SESSION;
musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
if (devctl & MUSB_DEVCTL_BDEVICE) {
- musb->xceiv.state = OTG_STATE_B_IDLE;
+ musb->xceiv->state = OTG_STATE_B_IDLE;
MUSB_DEV_MODE(musb);
} else {
- musb->xceiv.state = OTG_STATE_A_IDLE;
+ musb->xceiv->state = OTG_STATE_A_IDLE;
MUSB_HST_MODE(musb);
}
break;
@@ -89,7 +88,7 @@ static void musb_do_idle(unsigned long _musb)
musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
usb_hcd_poll_rh_status(musb_to_hcd(musb));
/* NOTE: it might really be A_WAIT_BCON ... */
- musb->xceiv.state = OTG_STATE_A_HOST;
+ musb->xceiv->state = OTG_STATE_A_HOST;
}
break;
#endif
@@ -97,9 +96,9 @@ static void musb_do_idle(unsigned long _musb)
case OTG_STATE_A_HOST:
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
if (devctl & MUSB_DEVCTL_BDEVICE)
- musb->xceiv.state = OTG_STATE_B_IDLE;
+ musb->xceiv->state = OTG_STATE_B_IDLE;
else
- musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
+ musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
#endif
default:
break;
@@ -118,7 +117,7 @@ void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
/* Never idle if active, or when VBUS timeout is not set as host */
if (musb->is_active || ((musb->a_wait_bcon == 0)
- && (musb->xceiv.state == OTG_STATE_A_WAIT_BCON))) {
+ && (musb->xceiv->state == OTG_STATE_A_WAIT_BCON))) {
DBG(4, "%s active, deleting timer\n", otg_state_string(musb));
del_timer(&musb_idle_timer);
last_timer = jiffies;
@@ -163,8 +162,8 @@ static void omap_set_vbus(struct musb *musb, int is_on)
if (is_on) {
musb->is_active = 1;
- musb->xceiv.default_a = 1;
- musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+ musb->xceiv->default_a = 1;
+ musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
devctl |= MUSB_DEVCTL_SESSION;
MUSB_HST_MODE(musb);
@@ -175,8 +174,8 @@ static void omap_set_vbus(struct musb *musb, int is_on)
* jumping right to B_IDLE...
*/
- musb->xceiv.default_a = 0;
- musb->xceiv.state = OTG_STATE_B_IDLE;
+ musb->xceiv->default_a = 0;
+ musb->xceiv->state = OTG_STATE_B_IDLE;
devctl &= ~MUSB_DEVCTL_SESSION;
MUSB_DEV_MODE(musb);
@@ -188,10 +187,6 @@ static void omap_set_vbus(struct musb *musb, int is_on)
otg_state_string(musb),
musb_readb(musb->mregs, MUSB_DEVCTL));
}
-static int omap_set_power(struct otg_transceiver *x, unsigned mA)
-{
- return 0;
-}
static int musb_platform_resume(struct musb *musb);
@@ -202,24 +197,6 @@ int musb_platform_set_mode(struct musb *musb, u8 musb_mode)
devctl |= MUSB_DEVCTL_SESSION;
musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
- switch (musb_mode) {
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
- case MUSB_HOST:
- otg_set_host(&musb->xceiv, musb->xceiv.host);
- break;
-#endif
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
- case MUSB_PERIPHERAL:
- otg_set_peripheral(&musb->xceiv, musb->xceiv.gadget);
- break;
-#endif
-#ifdef CONFIG_USB_MUSB_OTG
- case MUSB_OTG:
- break;
-#endif
- default:
- return -EINVAL;
- }
return 0;
}
@@ -231,6 +208,16 @@ int __init musb_platform_init(struct musb *musb)
omap_cfg_reg(AE5_2430_USB0HS_STP);
#endif
+ /* We require some kind of external transceiver, hooked
+ * up through ULPI. TWL4030-family PMICs include one,
+ * which needs a driver, drivers aren't always needed.
+ */
+ musb->xceiv = otg_get_transceiver();
+ if (!musb->xceiv) {
+ pr_err("HS USB OTG: no transceiver configured\n");
+ return -ENODEV;
+ }
+
musb_platform_resume(musb);
l = omap_readl(OTG_SYSCONFIG);
@@ -240,7 +227,12 @@ int __init musb_platform_init(struct musb *musb)
l &= ~AUTOIDLE; /* disable auto idle */
l &= ~NOIDLE; /* remove possible noidle */
l |= SMARTIDLE; /* enable smart idle */
- l |= AUTOIDLE; /* enable auto idle */
+ /*
+ * MUSB AUTOIDLE don't work in 3430.
+ * Workaround by Richard Woodruff/TI
+ */
+ if (!cpu_is_omap3430())
+ l |= AUTOIDLE; /* enable auto idle */
omap_writel(l, OTG_SYSCONFIG);
l = omap_readl(OTG_INTERFSEL);
@@ -257,9 +249,6 @@ int __init musb_platform_init(struct musb *musb)
if (is_host_enabled(musb))
musb->board_set_vbus = omap_set_vbus;
- if (is_peripheral_enabled(musb))
- musb->xceiv.set_power = omap_set_power;
- musb->a_wait_bcon = MUSB_TIMEOUT_A_WAIT_BCON;
setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
@@ -282,8 +271,7 @@ int musb_platform_suspend(struct musb *musb)
l |= ENABLEWAKEUP; /* enable wakeup */
omap_writel(l, OTG_SYSCONFIG);
- if (musb->xceiv.set_suspend)
- musb->xceiv.set_suspend(&musb->xceiv, 1);
+ otg_set_suspend(musb->xceiv, 1);
if (musb->set_clock)
musb->set_clock(musb->clock, 0);
@@ -300,8 +288,7 @@ static int musb_platform_resume(struct musb *musb)
if (!musb->clock)
return 0;
- if (musb->xceiv.set_suspend)
- musb->xceiv.set_suspend(&musb->xceiv, 0);
+ otg_set_suspend(musb->xceiv, 0);
if (musb->set_clock)
musb->set_clock(musb->clock, 1);
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index 4ac1477d356..88b587c703e 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -259,6 +259,8 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf)
tusb_fifo_read_unaligned(fifo, buf, len);
}
+static struct musb *the_musb;
+
#ifdef CONFIG_USB_GADGET_MUSB_HDRC
/* This is used by gadget drivers, and OTG transceiver logic, allowing
@@ -269,7 +271,7 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf)
*/
static int tusb_draw_power(struct otg_transceiver *x, unsigned mA)
{
- struct musb *musb = container_of(x, struct musb, xceiv);
+ struct musb *musb = the_musb;
void __iomem *tbase = musb->ctrl_base;
u32 reg;
@@ -419,7 +421,7 @@ static void musb_do_idle(unsigned long _musb)
spin_lock_irqsave(&musb->lock, flags);
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
case OTG_STATE_A_WAIT_BCON:
if ((musb->a_wait_bcon != 0)
&& (musb->idle_timeout == 0
@@ -483,7 +485,7 @@ void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
/* Never idle if active, or when VBUS timeout is not set as host */
if (musb->is_active || ((musb->a_wait_bcon == 0)
- && (musb->xceiv.state == OTG_STATE_A_WAIT_BCON))) {
+ && (musb->xceiv->state == OTG_STATE_A_WAIT_BCON))) {
DBG(4, "%s active, deleting timer\n", otg_state_string(musb));
del_timer(&musb_idle_timer);
last_timer = jiffies;
@@ -532,8 +534,8 @@ static void tusb_source_power(struct musb *musb, int is_on)
if (musb->set_clock)
musb->set_clock(musb->clock, 1);
timer = OTG_TIMER_MS(OTG_TIME_A_WAIT_VRISE);
- musb->xceiv.default_a = 1;
- musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+ musb->xceiv->default_a = 1;
+ musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
devctl |= MUSB_DEVCTL_SESSION;
conf |= TUSB_DEV_CONF_USB_HOST_MODE;
@@ -546,24 +548,24 @@ static void tusb_source_power(struct musb *musb, int is_on)
/* If ID pin is grounded, we want to be a_idle */
otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
if (!(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS)) {
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
case OTG_STATE_A_WAIT_VRISE:
case OTG_STATE_A_WAIT_BCON:
- musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
+ musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
break;
case OTG_STATE_A_WAIT_VFALL:
- musb->xceiv.state = OTG_STATE_A_IDLE;
+ musb->xceiv->state = OTG_STATE_A_IDLE;
break;
default:
- musb->xceiv.state = OTG_STATE_A_IDLE;
+ musb->xceiv->state = OTG_STATE_A_IDLE;
}
musb->is_active = 0;
- musb->xceiv.default_a = 1;
+ musb->xceiv->default_a = 1;
MUSB_HST_MODE(musb);
} else {
musb->is_active = 0;
- musb->xceiv.default_a = 0;
- musb->xceiv.state = OTG_STATE_B_IDLE;
+ musb->xceiv->default_a = 0;
+ musb->xceiv->state = OTG_STATE_B_IDLE;
MUSB_DEV_MODE(musb);
}
@@ -674,7 +676,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
else
default_a = is_host_enabled(musb);
DBG(2, "Default-%c\n", default_a ? 'A' : 'B');
- musb->xceiv.default_a = default_a;
+ musb->xceiv->default_a = default_a;
tusb_source_power(musb, default_a);
/* Don't allow idling immediately */
@@ -686,7 +688,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
if (int_src & TUSB_INT_SRC_VBUS_SENSE_CHNG) {
/* B-dev state machine: no vbus ~= disconnect */
- if ((is_otg_enabled(musb) && !musb->xceiv.default_a)
+ if ((is_otg_enabled(musb) && !musb->xceiv->default_a)
|| !is_host_enabled(musb)) {
#ifdef CONFIG_USB_MUSB_HDRC_HCD
/* ? musb_root_disconnect(musb); */
@@ -701,9 +703,9 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
if (otg_stat & TUSB_DEV_OTG_STAT_SESS_END) {
DBG(1, "Forcing disconnect (no interrupt)\n");
- if (musb->xceiv.state != OTG_STATE_B_IDLE) {
+ if (musb->xceiv->state != OTG_STATE_B_IDLE) {
/* INTR_DISCONNECT can hide... */
- musb->xceiv.state = OTG_STATE_B_IDLE;
+ musb->xceiv->state = OTG_STATE_B_IDLE;
musb->int_usb |= MUSB_INTR_DISCONNECT;
}
musb->is_active = 0;
@@ -717,7 +719,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
DBG(2, "vbus change, %s, otg %03x\n",
otg_state_string(musb), otg_stat);
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
case OTG_STATE_A_IDLE:
DBG(2, "Got SRP, turning on VBUS\n");
musb_set_vbus(musb, 1);
@@ -765,7 +767,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
DBG(4, "%s timer, %03x\n", otg_state_string(musb), otg_stat);
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
case OTG_STATE_A_WAIT_VRISE:
/* VBUS has probably been valid for a while now,
* but may well have bounced out of range a bit
@@ -777,7 +779,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
DBG(2, "devctl %02x\n", devctl);
break;
}
- musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
+ musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
musb->is_active = 0;
idle_timeout = jiffies
+ msecs_to_jiffies(musb->a_wait_bcon);
@@ -1093,9 +1095,14 @@ int __init musb_platform_init(struct musb *musb)
{
struct platform_device *pdev;
struct resource *mem;
- void __iomem *sync;
+ void __iomem *sync = NULL;
int ret;
+ usb_nop_xceiv_register();
+ musb->xceiv = otg_get_transceiver();
+ if (!musb->xceiv)
+ return -ENODEV;
+
pdev = to_platform_device(musb->controller);
/* dma address for async dma */
@@ -1106,14 +1113,16 @@ int __init musb_platform_init(struct musb *musb)
mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!mem) {
pr_debug("no sync dma resource?\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto done;
}
musb->sync = mem->start;
sync = ioremap(mem->start, mem->end - mem->start + 1);
if (!sync) {
pr_debug("ioremap for sync failed\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto done;
}
musb->sync_va = sync;
@@ -1126,28 +1135,37 @@ int __init musb_platform_init(struct musb *musb)
if (ret) {
printk(KERN_ERR "Could not start tusb6010 (%d)\n",
ret);
- return -ENODEV;
+ goto done;
}
musb->isr = tusb_interrupt;
if (is_host_enabled(musb))
musb->board_set_vbus = tusb_source_power;
- if (is_peripheral_enabled(musb))
- musb->xceiv.set_power = tusb_draw_power;
+ if (is_peripheral_enabled(musb)) {
+ musb->xceiv->set_power = tusb_draw_power;
+ the_musb = musb;
+ }
setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
+done:
+ if (ret < 0) {
+ if (sync)
+ iounmap(sync);
+ usb_nop_xceiv_unregister();
+ }
return ret;
}
int musb_platform_exit(struct musb *musb)
{
del_timer_sync(&musb_idle_timer);
+ the_musb = NULL;
if (musb->board_set_power)
musb->board_set_power(0);
iounmap(musb->sync_va);
-
+ usb_nop_xceiv_unregister();
return 0;
}
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index aa884d072f0..69feeec1628 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -59,4 +59,18 @@ config NOP_USB_XCEIV
built-in with usb ip or which are autonomous and doesn't require any
phy programming such as ISP1x04 etc.
+config USB_LANGWELL_OTG
+ tristate "Intel Langwell USB OTG dual-role support"
+ depends on USB && MRST
+ select USB_OTG
+ select USB_OTG_UTILS
+ help
+ Say Y here if you want to build Intel Langwell USB OTG
+ transciever driver in kernel. This driver implements role
+ switch between EHCI host driver and Langwell USB OTG
+ client driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called langwell_otg.
+
endif # USB || OTG
diff --git a/drivers/usb/otg/Makefile b/drivers/usb/otg/Makefile
index 20816785652..6d1abdd3c0a 100644
--- a/drivers/usb/otg/Makefile
+++ b/drivers/usb/otg/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_USB_OTG_UTILS) += otg.o
obj-$(CONFIG_USB_GPIO_VBUS) += gpio_vbus.o
obj-$(CONFIG_ISP1301_OMAP) += isp1301_omap.o
obj-$(CONFIG_TWL4030_USB) += twl4030-usb.o
+obj-$(CONFIG_USB_LANGWELL_OTG) += langwell_otg.o
obj-$(CONFIG_NOP_USB_XCEIV) += nop-usb-xceiv.o
ccflags-$(CONFIG_USB_DEBUG) += -DDEBUG
diff --git a/drivers/usb/otg/langwell_otg.c b/drivers/usb/otg/langwell_otg.c
new file mode 100644
index 00000000000..6f628d0e9f3
--- /dev/null
+++ b/drivers/usb/otg/langwell_otg.c
@@ -0,0 +1,1915 @@
+/*
+ * Intel Langwell USB OTG transceiver driver
+ * Copyright (C) 2008 - 2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+/* This driver helps to switch Langwell OTG controller function between host
+ * and peripheral. It works with EHCI driver and Langwell client controller
+ * driver together.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/moduleparam.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb.h>
+#include <linux/usb/otg.h>
+#include <linux/notifier.h>
+#include <asm/ipc_defs.h>
+#include <linux/delay.h>
+#include "../core/hcd.h"
+
+#include <linux/usb/langwell_otg.h>
+
+#define DRIVER_DESC "Intel Langwell USB OTG transceiver driver"
+#define DRIVER_VERSION "3.0.0.32L.0002"
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Henry Yuan <hang.yuan@intel.com>, Hao Wu <hao.wu@intel.com>");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+
+static const char driver_name[] = "langwell_otg";
+
+static int langwell_otg_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id);
+static void langwell_otg_remove(struct pci_dev *pdev);
+static int langwell_otg_suspend(struct pci_dev *pdev, pm_message_t message);
+static int langwell_otg_resume(struct pci_dev *pdev);
+
+static int langwell_otg_set_host(struct otg_transceiver *otg,
+ struct usb_bus *host);
+static int langwell_otg_set_peripheral(struct otg_transceiver *otg,
+ struct usb_gadget *gadget);
+static int langwell_otg_start_srp(struct otg_transceiver *otg);
+
+static const struct pci_device_id pci_ids[] = {{
+ .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
+ .class_mask = ~0,
+ .vendor = 0x8086,
+ .device = 0x0811,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+}, { /* end: all zeroes */ }
+};
+
+static struct pci_driver otg_pci_driver = {
+ .name = (char *) driver_name,
+ .id_table = pci_ids,
+
+ .probe = langwell_otg_probe,
+ .remove = langwell_otg_remove,
+
+ .suspend = langwell_otg_suspend,
+ .resume = langwell_otg_resume,
+};
+
+static const char *state_string(enum usb_otg_state state)
+{
+ switch (state) {
+ case OTG_STATE_A_IDLE:
+ return "a_idle";
+ case OTG_STATE_A_WAIT_VRISE:
+ return "a_wait_vrise";
+ case OTG_STATE_A_WAIT_BCON:
+ return "a_wait_bcon";
+ case OTG_STATE_A_HOST:
+ return "a_host";
+ case OTG_STATE_A_SUSPEND:
+ return "a_suspend";
+ case OTG_STATE_A_PERIPHERAL:
+ return "a_peripheral";
+ case OTG_STATE_A_WAIT_VFALL:
+ return "a_wait_vfall";
+ case OTG_STATE_A_VBUS_ERR:
+ return "a_vbus_err";
+ case OTG_STATE_B_IDLE:
+ return "b_idle";
+ case OTG_STATE_B_SRP_INIT:
+ return "b_srp_init";
+ case OTG_STATE_B_PERIPHERAL:
+ return "b_peripheral";
+ case OTG_STATE_B_WAIT_ACON:
+ return "b_wait_acon";
+ case OTG_STATE_B_HOST:
+ return "b_host";
+ default:
+ return "UNDEFINED";
+ }
+}
+
+/* HSM timers */
+static inline struct langwell_otg_timer *otg_timer_initializer
+(void (*function)(unsigned long), unsigned long expires, unsigned long data)
+{
+ struct langwell_otg_timer *timer;
+ timer = kmalloc(sizeof(struct langwell_otg_timer), GFP_KERNEL);
+ timer->function = function;
+ timer->expires = expires;
+ timer->data = data;
+ return timer;
+}
+
+static struct langwell_otg_timer *a_wait_vrise_tmr, *a_wait_bcon_tmr,
+ *a_aidl_bdis_tmr, *b_ase0_brst_tmr, *b_se0_srp_tmr, *b_srp_res_tmr,
+ *b_bus_suspend_tmr;
+
+static struct list_head active_timers;
+
+static struct langwell_otg *the_transceiver;
+
+/* host/client notify transceiver when event affects HNP state */
+void langwell_update_transceiver()
+{
+ otg_dbg("transceiver driver is notified\n");
+ queue_work(the_transceiver->qwork, &the_transceiver->work);
+}
+EXPORT_SYMBOL(langwell_update_transceiver);
+
+static int langwell_otg_set_host(struct otg_transceiver *otg,
+ struct usb_bus *host)
+{
+ otg->host = host;
+
+ return 0;
+}
+
+static int langwell_otg_set_peripheral(struct otg_transceiver *otg,
+ struct usb_gadget *gadget)
+{
+ otg->gadget = gadget;
+
+ return 0;
+}
+
+static int langwell_otg_set_power(struct otg_transceiver *otg,
+ unsigned mA)
+{
+ return 0;
+}
+
+/* A-device drives vbus, controlled through PMIC CHRGCNTL register*/
+static void langwell_otg_drv_vbus(int on)
+{
+ struct ipc_pmic_reg_data pmic_data = {0};
+ struct ipc_pmic_reg_data battery_data;
+
+ /* Check if battery is attached or not */
+ battery_data.pmic_reg_data[0].register_address = 0xd2;
+ battery_data.ioc = 0;
+ battery_data.num_entries = 1;
+ if (ipc_pmic_register_read(&battery_data)) {
+ otg_dbg("Failed to read PMIC register 0xd2.\n");
+ return;
+ }
+
+ if ((battery_data.pmic_reg_data[0].value & 0x20) == 0) {
+ otg_dbg("no battery attached\n");
+ return;
+ }
+
+ /* Workaround for battery attachment issue */
+ if (battery_data.pmic_reg_data[0].value == 0x34) {
+ otg_dbg("battery \n");
+ return;
+ }
+
+ otg_dbg("battery attached\n");
+
+ pmic_data.ioc = 0;
+ pmic_data.pmic_reg_data[0].register_address = 0xD4;
+ pmic_data.num_entries = 1;
+ if (on)
+ pmic_data.pmic_reg_data[0].value = 0x20;
+ else
+ pmic_data.pmic_reg_data[0].value = 0xc0;
+
+ if (ipc_pmic_register_write(&pmic_data, TRUE))
+ otg_dbg("Failed to write PMIC.\n");
+
+}
+
+/* charge vbus or discharge vbus through a resistor to ground */
+static void langwell_otg_chrg_vbus(int on)
+{
+
+ u32 val;
+
+ val = readl(the_transceiver->regs + CI_OTGSC);
+
+ if (on)
+ writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_VC,
+ the_transceiver->regs + CI_OTGSC);
+ else
+ writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_VD,
+ the_transceiver->regs + CI_OTGSC);
+
+}
+
+/* Start SRP */
+static int langwell_otg_start_srp(struct otg_transceiver *otg)
+{
+ u32 val;
+
+ otg_dbg("Start SRP ->\n");
+
+ val = readl(the_transceiver->regs + CI_OTGSC);
+
+ writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HADP,
+ the_transceiver->regs + CI_OTGSC);
+
+ /* Check if the data plus is finished or not */
+ msleep(8);
+ val = readl(the_transceiver->regs + CI_OTGSC);
+ if (val & (OTGSC_HADP | OTGSC_DP))
+ otg_dbg("DataLine SRP Error\n");
+
+ /* FIXME: VBus SRP */
+
+ return 0;
+}
+
+
+/* stop SOF via bus_suspend */
+static void langwell_otg_loc_sof(int on)
+{
+ struct usb_hcd *hcd;
+ int err;
+
+ otg_dbg("loc_sof -> %d\n", on);
+
+ hcd = bus_to_hcd(the_transceiver->otg.host);
+ if (on)
+ err = hcd->driver->bus_resume(hcd);
+ else
+ err = hcd->driver->bus_suspend(hcd);
+
+ if (err)
+ otg_dbg("Failed to resume/suspend bus - %d\n", err);
+}
+
+static void langwell_otg_phy_low_power(int on)
+{
+ u32 val;
+
+ otg_dbg("phy low power mode-> %d\n", on);
+
+ val = readl(the_transceiver->regs + CI_HOSTPC1);
+ if (on)
+ writel(val | HOSTPC1_PHCD, the_transceiver->regs + CI_HOSTPC1);
+ else
+ writel(val & ~HOSTPC1_PHCD, the_transceiver->regs + CI_HOSTPC1);
+}
+
+/* Enable/Disable OTG interrupt */
+static void langwell_otg_intr(int on)
+{
+ u32 val;
+
+ otg_dbg("interrupt -> %d\n", on);
+
+ val = readl(the_transceiver->regs + CI_OTGSC);
+ if (on) {
+ val = val | (OTGSC_INTEN_MASK | OTGSC_IDPU);
+ writel(val, the_transceiver->regs + CI_OTGSC);
+ } else {
+ val = val & ~(OTGSC_INTEN_MASK | OTGSC_IDPU);
+ writel(val, the_transceiver->regs + CI_OTGSC);
+ }
+}
+
+/* set HAAR: Hardware Assist Auto-Reset */
+static void langwell_otg_HAAR(int on)
+{
+ u32 val;
+
+ otg_dbg("HAAR -> %d\n", on);
+
+ val = readl(the_transceiver->regs + CI_OTGSC);
+ if (on)
+ writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HAAR,
+ the_transceiver->regs + CI_OTGSC);
+ else
+ writel((val & ~OTGSC_INTSTS_MASK) & ~OTGSC_HAAR,
+ the_transceiver->regs + CI_OTGSC);
+}
+
+/* set HABA: Hardware Assist B-Disconnect to A-Connect */
+static void langwell_otg_HABA(int on)
+{
+ u32 val;
+
+ otg_dbg("HABA -> %d\n", on);
+
+ val = readl(the_transceiver->regs + CI_OTGSC);
+ if (on)
+ writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HABA,
+ the_transceiver->regs + CI_OTGSC);
+ else
+ writel((val & ~OTGSC_INTSTS_MASK) & ~OTGSC_HABA,
+ the_transceiver->regs + CI_OTGSC);
+}
+
+static int langwell_otg_check_se0_srp(int on)
+{
+ u32 val;
+
+ int delay_time = TB_SE0_SRP * 10; /* step is 100us */
+
+ otg_dbg("check_se0_srp -> \n");
+
+ do {
+ udelay(100);
+ if (!delay_time--)
+ break;
+ val = readl(the_transceiver->regs + CI_PORTSC1);
+ val &= PORTSC_LS;
+ } while (!val);
+
+ otg_dbg("check_se0_srp <- \n");
+ return val;
+}
+
+/* The timeout callback function to set time out bit */
+static void set_tmout(unsigned long indicator)
+{
+ *(int *)indicator = 1;
+}
+
+void langwell_otg_nsf_msg(unsigned long indicator)
+{
+ switch (indicator) {
+ case 2:
+ case 4:
+ case 6:
+ case 7:
+ printk(KERN_ERR "OTG:NSF-%lu - deivce not responding\n",
+ indicator);
+ break;
+ case 3:
+ printk(KERN_ERR "OTG:NSF-%lu - deivce not supported\n",
+ indicator);
+ break;
+ default:
+ printk(KERN_ERR "Do not have this kind of NSF\n");
+ break;
+ }
+}
+
+/* Initialize timers */
+static void langwell_otg_init_timers(struct otg_hsm *hsm)
+{
+ /* HSM used timers */
+ a_wait_vrise_tmr = otg_timer_initializer(&set_tmout, TA_WAIT_VRISE,
+ (unsigned long)&hsm->a_wait_vrise_tmout);
+ a_wait_bcon_tmr = otg_timer_initializer(&set_tmout, TA_WAIT_BCON,
+ (unsigned long)&hsm->a_wait_bcon_tmout);
+ a_aidl_bdis_tmr = otg_timer_initializer(&set_tmout, TA_AIDL_BDIS,
+ (unsigned long)&hsm->a_aidl_bdis_tmout);
+ b_ase0_brst_tmr = otg_timer_initializer(&set_tmout, TB_ASE0_BRST,
+ (unsigned long)&hsm->b_ase0_brst_tmout);
+ b_se0_srp_tmr = otg_timer_initializer(&set_tmout, TB_SE0_SRP,
+ (unsigned long)&hsm->b_se0_srp);
+ b_srp_res_tmr = otg_timer_initializer(&set_tmout, TB_SRP_RES,
+ (unsigned long)&hsm->b_srp_res_tmout);
+ b_bus_suspend_tmr = otg_timer_initializer(&set_tmout, TB_BUS_SUSPEND,
+ (unsigned long)&hsm->b_bus_suspend_tmout);
+}
+
+/* Free timers */
+static void langwell_otg_free_timers(void)
+{
+ kfree(a_wait_vrise_tmr);
+ kfree(a_wait_bcon_tmr);
+ kfree(a_aidl_bdis_tmr);
+ kfree(b_ase0_brst_tmr);
+ kfree(b_se0_srp_tmr);
+ kfree(b_srp_res_tmr);
+ kfree(b_bus_suspend_tmr);
+}
+
+/* Add timer to timer list */
+static void langwell_otg_add_timer(void *gtimer)
+{
+ struct langwell_otg_timer *timer = (struct langwell_otg_timer *)gtimer;
+ struct langwell_otg_timer *tmp_timer;
+ u32 val32;
+
+ /* Check if the timer is already in the active list,
+ * if so update timer count
+ */
+ list_for_each_entry(tmp_timer, &active_timers, list)
+ if (tmp_timer == timer) {
+ timer->count = timer->expires;
+ return;
+ }
+ timer->count = timer->expires;
+
+ if (list_empty(&active_timers)) {
+ val32 = readl(the_transceiver->regs + CI_OTGSC);
+ writel(val32 | OTGSC_1MSE, the_transceiver->regs + CI_OTGSC);
+ }
+
+ list_add_tail(&timer->list, &active_timers);
+}
+
+/* Remove timer from the timer list; clear timeout status */
+static void langwell_otg_del_timer(void *gtimer)
+{
+ struct langwell_otg_timer *timer = (struct langwell_otg_timer *)gtimer;
+ struct langwell_otg_timer *tmp_timer, *del_tmp;
+ u32 val32;
+
+ list_for_each_entry_safe(tmp_timer, del_tmp, &active_timers, list)
+ if (tmp_timer == timer)
+ list_del(&timer->list);
+
+ if (list_empty(&active_timers)) {
+ val32 = readl(the_transceiver->regs + CI_OTGSC);
+ writel(val32 & ~OTGSC_1MSE, the_transceiver->regs + CI_OTGSC);
+ }
+}
+
+/* Reduce timer count by 1, and find timeout conditions.*/
+static int langwell_otg_tick_timer(u32 *int_sts)
+{
+ struct langwell_otg_timer *tmp_timer, *del_tmp;
+ int expired = 0;
+
+ list_for_each_entry_safe(tmp_timer, del_tmp, &active_timers, list) {
+ tmp_timer->count--;
+ /* check if timer expires */
+ if (!tmp_timer->count) {
+ list_del(&tmp_timer->list);
+ tmp_timer->function(tmp_timer->data);
+ expired = 1;
+ }
+ }
+
+ if (list_empty(&active_timers)) {
+ otg_dbg("tick timer: disable 1ms int\n");
+ *int_sts = *int_sts & ~OTGSC_1MSE;
+ }
+ return expired;
+}
+
+static void reset_otg(void)
+{
+ u32 val;
+ int delay_time = 1000;
+
+ otg_dbg("reseting OTG controller ...\n");
+ val = readl(the_transceiver->regs + CI_USBCMD);
+ writel(val | USBCMD_RST, the_transceiver->regs + CI_USBCMD);
+ do {
+ udelay(100);
+ if (!delay_time--)
+ otg_dbg("reset timeout\n");
+ val = readl(the_transceiver->regs + CI_USBCMD);
+ val &= USBCMD_RST;
+ } while (val != 0);
+ otg_dbg("reset done.\n");
+}
+
+static void set_host_mode(void)
+{
+ u32 val;
+
+ reset_otg();
+ val = readl(the_transceiver->regs + CI_USBMODE);
+ val = (val & (~USBMODE_CM)) | USBMODE_HOST;
+ writel(val, the_transceiver->regs + CI_USBMODE);
+}
+
+static void set_client_mode(void)
+{
+ u32 val;
+
+ reset_otg();
+ val = readl(the_transceiver->regs + CI_USBMODE);
+ val = (val & (~USBMODE_CM)) | USBMODE_DEVICE;
+ writel(val, the_transceiver->regs + CI_USBMODE);
+}
+
+static void init_hsm(void)
+{
+ struct langwell_otg *langwell = the_transceiver;
+ u32 val32;
+
+ /* read OTGSC after reset */
+ val32 = readl(langwell->regs + CI_OTGSC);
+ otg_dbg("%s: OTGSC init value = 0x%x\n", __func__, val32);
+
+ /* set init state */
+ if (val32 & OTGSC_ID) {
+ langwell->hsm.id = 1;
+ langwell->otg.default_a = 0;
+ set_client_mode();
+ langwell->otg.state = OTG_STATE_B_IDLE;
+ langwell_otg_drv_vbus(0);
+ } else {
+ langwell->hsm.id = 0;
+ langwell->otg.default_a = 1;
+ set_host_mode();
+ langwell->otg.state = OTG_STATE_A_IDLE;
+ }
+
+ /* set session indicator */
+ if (val32 & OTGSC_BSE)
+ langwell->hsm.b_sess_end = 1;
+ if (val32 & OTGSC_BSV)
+ langwell->hsm.b_sess_vld = 1;
+ if (val32 & OTGSC_ASV)
+ langwell->hsm.a_sess_vld = 1;
+ if (val32 & OTGSC_AVV)
+ langwell->hsm.a_vbus_vld = 1;
+
+ /* defautly power the bus */
+ langwell->hsm.a_bus_req = 1;
+ langwell->hsm.a_bus_drop = 0;
+ /* defautly don't request bus as B device */
+ langwell->hsm.b_bus_req = 0;
+ /* no system error */
+ langwell->hsm.a_clr_err = 0;
+}
+
+static irqreturn_t otg_dummy_irq(int irq, void *_dev)
+{
+ void __iomem *reg_base = _dev;
+ u32 val;
+ u32 int_mask = 0;
+
+ val = readl(reg_base + CI_USBMODE);
+ if ((val & USBMODE_CM) != USBMODE_DEVICE)
+ return IRQ_NONE;
+
+ val = readl(reg_base + CI_USBSTS);
+ int_mask = val & INTR_DUMMY_MASK;
+
+ if (int_mask == 0)
+ return IRQ_NONE;
+
+ /* clear hsm.b_conn here since host driver can't detect it
+ * otg_dummy_irq called means B-disconnect happened.
+ */
+ if (the_transceiver->hsm.b_conn) {
+ the_transceiver->hsm.b_conn = 0;
+ if (spin_trylock(&the_transceiver->wq_lock)) {
+ queue_work(the_transceiver->qwork,
+ &the_transceiver->work);
+ spin_unlock(&the_transceiver->wq_lock);
+ }
+ }
+ /* Clear interrupts */
+ writel(int_mask, reg_base + CI_USBSTS);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t otg_irq(int irq, void *_dev)
+{
+ struct langwell_otg *langwell = _dev;
+ u32 int_sts, int_en;
+ u32 int_mask = 0;
+ int flag = 0;
+
+ int_sts = readl(langwell->regs + CI_OTGSC);
+ int_en = (int_sts & OTGSC_INTEN_MASK) >> 8;
+ int_mask = int_sts & int_en;
+ if (int_mask == 0)
+ return IRQ_NONE;
+
+ if (int_mask & OTGSC_IDIS) {
+ otg_dbg("%s: id change int\n", __func__);
+ langwell->hsm.id = (int_sts & OTGSC_ID) ? 1 : 0;
+ flag = 1;
+ }
+ if (int_mask & OTGSC_DPIS) {
+ otg_dbg("%s: data pulse int\n", __func__);
+ langwell->hsm.a_srp_det = (int_sts & OTGSC_DPS) ? 1 : 0;
+ flag = 1;
+ }
+ if (int_mask & OTGSC_BSEIS) {
+ otg_dbg("%s: b session end int\n", __func__);
+ langwell->hsm.b_sess_end = (int_sts & OTGSC_BSE) ? 1 : 0;
+ flag = 1;
+ }
+ if (int_mask & OTGSC_BSVIS) {
+ otg_dbg("%s: b session valid int\n", __func__);
+ langwell->hsm.b_sess_vld = (int_sts & OTGSC_BSV) ? 1 : 0;
+ flag = 1;
+ }
+ if (int_mask & OTGSC_ASVIS) {
+ otg_dbg("%s: a session valid int\n", __func__);
+ langwell->hsm.a_sess_vld = (int_sts & OTGSC_ASV) ? 1 : 0;
+ flag = 1;
+ }
+ if (int_mask & OTGSC_AVVIS) {
+ otg_dbg("%s: a vbus valid int\n", __func__);
+ langwell->hsm.a_vbus_vld = (int_sts & OTGSC_AVV) ? 1 : 0;
+ flag = 1;
+ }
+
+ if (int_mask & OTGSC_1MSS) {
+ /* need to schedule otg_work if any timer is expired */
+ if (langwell_otg_tick_timer(&int_sts))
+ flag = 1;
+ }
+
+ writel((int_sts & ~OTGSC_INTSTS_MASK) | int_mask,
+ langwell->regs + CI_OTGSC);
+ if (flag)
+ queue_work(langwell->qwork, &langwell->work);
+
+ return IRQ_HANDLED;
+}
+
+static void langwell_otg_work(struct work_struct *work)
+{
+ struct langwell_otg *langwell = container_of(work,
+ struct langwell_otg, work);
+ int retval;
+
+ otg_dbg("%s: old state = %s\n", __func__,
+ state_string(langwell->otg.state));
+
+ switch (langwell->otg.state) {
+ case OTG_STATE_UNDEFINED:
+ case OTG_STATE_B_IDLE:
+ if (!langwell->hsm.id) {
+ langwell_otg_del_timer(b_srp_res_tmr);
+ langwell->otg.default_a = 1;
+ langwell->hsm.a_srp_det = 0;
+
+ langwell_otg_chrg_vbus(0);
+ langwell_otg_drv_vbus(0);
+
+ set_host_mode();
+ langwell->otg.state = OTG_STATE_A_IDLE;
+ queue_work(langwell->qwork, &langwell->work);
+ } else if (langwell->hsm.b_srp_res_tmout) {
+ langwell->hsm.b_srp_res_tmout = 0;
+ langwell->hsm.b_bus_req = 0;
+ langwell_otg_nsf_msg(6);
+ } else if (langwell->hsm.b_sess_vld) {
+ langwell_otg_del_timer(b_srp_res_tmr);
+ langwell->hsm.b_sess_end = 0;
+ langwell->hsm.a_bus_suspend = 0;
+
+ langwell_otg_chrg_vbus(0);
+ if (langwell->client_ops) {
+ langwell->client_ops->resume(langwell->pdev);
+ langwell->otg.state = OTG_STATE_B_PERIPHERAL;
+ } else
+ otg_dbg("client driver not loaded.\n");
+
+ } else if (langwell->hsm.b_bus_req &&
+ (langwell->hsm.b_sess_end)) {
+ /* workaround for b_se0_srp detection */
+ retval = langwell_otg_check_se0_srp(0);
+ if (retval) {
+ langwell->hsm.b_bus_req = 0;
+ otg_dbg("LS is not SE0, try again later\n");
+ } else {
+ /* Start SRP */
+ langwell_otg_start_srp(&langwell->otg);
+ langwell_otg_add_timer(b_srp_res_tmr);
+ }
+ }
+ break;
+ case OTG_STATE_B_SRP_INIT:
+ if (!langwell->hsm.id) {
+ langwell->otg.default_a = 1;
+ langwell->hsm.a_srp_det = 0;
+
+ langwell_otg_drv_vbus(0);
+ langwell_otg_chrg_vbus(0);
+
+ langwell->otg.state = OTG_STATE_A_IDLE;
+ queue_work(langwell->qwork, &langwell->work);
+ } else if (langwell->hsm.b_sess_vld) {
+ langwell_otg_chrg_vbus(0);
+ if (langwell->client_ops) {
+ langwell->client_ops->resume(langwell->pdev);
+ langwell->otg.state = OTG_STATE_B_PERIPHERAL;
+ } else
+ otg_dbg("client driver not loaded.\n");
+ }
+ break;
+ case OTG_STATE_B_PERIPHERAL:
+ if (!langwell->hsm.id) {
+ langwell->otg.default_a = 1;
+ langwell->hsm.a_srp_det = 0;
+
+ langwell_otg_drv_vbus(0);
+ langwell_otg_chrg_vbus(0);
+ set_host_mode();
+
+ if (langwell->client_ops) {
+ langwell->client_ops->suspend(langwell->pdev,
+ PMSG_FREEZE);
+ } else
+ otg_dbg("client driver has been removed.\n");
+
+ langwell->otg.state = OTG_STATE_A_IDLE;
+ queue_work(langwell->qwork, &langwell->work);
+ } else if (!langwell->hsm.b_sess_vld) {
+ langwell->hsm.b_hnp_enable = 0;
+
+ if (langwell->client_ops) {
+ langwell->client_ops->suspend(langwell->pdev,
+ PMSG_FREEZE);
+ } else
+ otg_dbg("client driver has been removed.\n");
+
+ langwell->otg.state = OTG_STATE_B_IDLE;
+ } else if (langwell->hsm.b_bus_req && langwell->hsm.b_hnp_enable
+ && langwell->hsm.a_bus_suspend) {
+
+ if (langwell->client_ops) {
+ langwell->client_ops->suspend(langwell->pdev,
+ PMSG_FREEZE);
+ } else
+ otg_dbg("client driver has been removed.\n");
+
+ langwell_otg_HAAR(1);
+ langwell->hsm.a_conn = 0;
+
+ if (langwell->host_ops) {
+ langwell->host_ops->probe(langwell->pdev,
+ langwell->host_ops->id_table);
+ langwell->otg.state = OTG_STATE_B_WAIT_ACON;
+ } else
+ otg_dbg("host driver not loaded.\n");
+
+ langwell->hsm.a_bus_resume = 0;
+ langwell->hsm.b_ase0_brst_tmout = 0;
+ langwell_otg_add_timer(b_ase0_brst_tmr);
+ }
+ break;
+
+ case OTG_STATE_B_WAIT_ACON:
+ if (!langwell->hsm.id) {
+ langwell_otg_del_timer(b_ase0_brst_tmr);
+ langwell->otg.default_a = 1;
+ langwell->hsm.a_srp_det = 0;
+
+ langwell_otg_drv_vbus(0);
+ langwell_otg_chrg_vbus(0);
+ set_host_mode();
+
+ langwell_otg_HAAR(0);
+ if (langwell->host_ops)
+ langwell->host_ops->remove(langwell->pdev);
+ else
+ otg_dbg("host driver has been removed.\n");
+ langwell->otg.state = OTG_STATE_A_IDLE;
+ queue_work(langwell->qwork, &langwell->work);
+ } else if (!langwell->hsm.b_sess_vld) {
+ langwell_otg_del_timer(b_ase0_brst_tmr);
+ langwell->hsm.b_hnp_enable = 0;
+ langwell->hsm.b_bus_req = 0;
+ langwell_otg_chrg_vbus(0);
+ langwell_otg_HAAR(0);
+
+ if (langwell->host_ops)
+ langwell->host_ops->remove(langwell->pdev);
+ else
+ otg_dbg("host driver has been removed.\n");
+ langwell->otg.state = OTG_STATE_B_IDLE;
+ } else if (langwell->hsm.a_conn) {
+ langwell_otg_del_timer(b_ase0_brst_tmr);
+ langwell_otg_HAAR(0);
+ langwell->otg.state = OTG_STATE_B_HOST;
+ queue_work(langwell->qwork, &langwell->work);
+ } else if (langwell->hsm.a_bus_resume ||
+ langwell->hsm.b_ase0_brst_tmout) {
+ langwell_otg_del_timer(b_ase0_brst_tmr);
+ langwell_otg_HAAR(0);
+ langwell_otg_nsf_msg(7);
+
+ if (langwell->host_ops)
+ langwell->host_ops->remove(langwell->pdev);
+ else
+ otg_dbg("host driver has been removed.\n");
+
+ langwell->hsm.a_bus_suspend = 0;
+ langwell->hsm.b_bus_req = 0;
+
+ if (langwell->client_ops)
+ langwell->client_ops->resume(langwell->pdev);
+ else
+ otg_dbg("client driver not loaded.\n");
+
+ langwell->otg.state = OTG_STATE_B_PERIPHERAL;
+ }
+ break;
+
+ case OTG_STATE_B_HOST:
+ if (!langwell->hsm.id) {
+ langwell->otg.default_a = 1;
+ langwell->hsm.a_srp_det = 0;
+
+ langwell_otg_drv_vbus(0);
+ langwell_otg_chrg_vbus(0);
+ set_host_mode();
+ if (langwell->host_ops)
+ langwell->host_ops->remove(langwell->pdev);
+ else
+ otg_dbg("host driver has been removed.\n");
+ langwell->otg.state = OTG_STATE_A_IDLE;
+ queue_work(langwell->qwork, &langwell->work);
+ } else if (!langwell->hsm.b_sess_vld) {
+ langwell->hsm.b_hnp_enable = 0;
+ langwell->hsm.b_bus_req = 0;
+ langwell_otg_chrg_vbus(0);
+ if (langwell->host_ops)
+ langwell->host_ops->remove(langwell->pdev);
+ else
+ otg_dbg("host driver has been removed.\n");
+ langwell->otg.state = OTG_STATE_B_IDLE;
+ } else if ((!langwell->hsm.b_bus_req) ||
+ (!langwell->hsm.a_conn)) {
+ langwell->hsm.b_bus_req = 0;
+ langwell_otg_loc_sof(0);
+ if (langwell->host_ops)
+ langwell->host_ops->remove(langwell->pdev);
+ else
+ otg_dbg("host driver has been removed.\n");
+
+ langwell->hsm.a_bus_suspend = 0;
+
+ if (langwell->client_ops)
+ langwell->client_ops->resume(langwell->pdev);
+ else
+ otg_dbg("client driver not loaded.\n");
+
+ langwell->otg.state = OTG_STATE_B_PERIPHERAL;
+ }
+ break;
+
+ case OTG_STATE_A_IDLE:
+ langwell->otg.default_a = 1;
+ if (langwell->hsm.id) {
+ langwell->otg.default_a = 0;
+ langwell->hsm.b_bus_req = 0;
+ langwell_otg_drv_vbus(0);
+ langwell_otg_chrg_vbus(0);
+
+ langwell->otg.state = OTG_STATE_B_IDLE;
+ queue_work(langwell->qwork, &langwell->work);
+ } else if (langwell->hsm.a_sess_vld) {
+ langwell_otg_drv_vbus(1);
+ langwell->hsm.a_srp_det = 1;
+ langwell->hsm.a_wait_vrise_tmout = 0;
+ langwell_otg_add_timer(a_wait_vrise_tmr);
+ langwell->otg.state = OTG_STATE_A_WAIT_VRISE;
+ queue_work(langwell->qwork, &langwell->work);
+ } else if (!langwell->hsm.a_bus_drop &&
+ (langwell->hsm.a_srp_det || langwell->hsm.a_bus_req)) {
+ langwell_otg_drv_vbus(1);
+ langwell->hsm.a_wait_vrise_tmout = 0;
+ langwell_otg_add_timer(a_wait_vrise_tmr);
+ langwell->otg.state = OTG_STATE_A_WAIT_VRISE;
+ queue_work(langwell->qwork, &langwell->work);
+ }
+ break;
+ case OTG_STATE_A_WAIT_VRISE:
+ if (langwell->hsm.id) {
+ langwell_otg_del_timer(a_wait_vrise_tmr);
+ langwell->hsm.b_bus_req = 0;
+ langwell->otg.default_a = 0;
+ langwell_otg_drv_vbus(0);
+ langwell->otg.state = OTG_STATE_B_IDLE;
+ } else if (langwell->hsm.a_vbus_vld) {
+ langwell_otg_del_timer(a_wait_vrise_tmr);
+ if (langwell->host_ops)
+ langwell->host_ops->probe(langwell->pdev,
+ langwell->host_ops->id_table);
+ else
+ otg_dbg("host driver not loaded.\n");
+ langwell->hsm.b_conn = 0;
+ langwell->hsm.a_set_b_hnp_en = 0;
+ langwell->hsm.a_wait_bcon_tmout = 0;
+ langwell_otg_add_timer(a_wait_bcon_tmr);
+ langwell->otg.state = OTG_STATE_A_WAIT_BCON;
+ } else if (langwell->hsm.a_wait_vrise_tmout) {
+ if (langwell->hsm.a_vbus_vld) {
+ if (langwell->host_ops)
+ langwell->host_ops->probe(
+ langwell->pdev,
+ langwell->host_ops->id_table);
+ else
+ otg_dbg("host driver not loaded.\n");
+ langwell->hsm.b_conn = 0;
+ langwell->hsm.a_set_b_hnp_en = 0;
+ langwell->hsm.a_wait_bcon_tmout = 0;
+ langwell_otg_add_timer(a_wait_bcon_tmr);
+ langwell->otg.state = OTG_STATE_A_WAIT_BCON;
+ } else {
+ langwell_otg_drv_vbus(0);
+ langwell->otg.state = OTG_STATE_A_VBUS_ERR;
+ }
+ }
+ break;
+ case OTG_STATE_A_WAIT_BCON:
+ if (langwell->hsm.id) {
+ langwell_otg_del_timer(a_wait_bcon_tmr);
+
+ langwell->otg.default_a = 0;
+ langwell->hsm.b_bus_req = 0;
+ if (langwell->host_ops)
+ langwell->host_ops->remove(langwell->pdev);
+ else
+ otg_dbg("host driver has been removed.\n");
+ langwell_otg_drv_vbus(0);
+ langwell->otg.state = OTG_STATE_B_IDLE;
+ queue_work(langwell->qwork, &langwell->work);
+ } else if (!langwell->hsm.a_vbus_vld) {
+ langwell_otg_del_timer(a_wait_bcon_tmr);
+
+ if (langwell->host_ops)
+ langwell->host_ops->remove(langwell->pdev);
+ else
+ otg_dbg("host driver has been removed.\n");
+ langwell_otg_drv_vbus(0);
+ langwell->otg.state = OTG_STATE_A_VBUS_ERR;
+ } else if (langwell->hsm.a_bus_drop ||
+ (langwell->hsm.a_wait_bcon_tmout &&
+ !langwell->hsm.a_bus_req)) {
+ langwell_otg_del_timer(a_wait_bcon_tmr);
+
+ if (langwell->host_ops)
+ langwell->host_ops->remove(langwell->pdev);
+ else
+ otg_dbg("host driver has been removed.\n");
+ langwell_otg_drv_vbus(0);
+ langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
+ } else if (langwell->hsm.b_conn) {
+ langwell_otg_del_timer(a_wait_bcon_tmr);
+
+ langwell->hsm.a_suspend_req = 0;
+ langwell->otg.state = OTG_STATE_A_HOST;
+ if (!langwell->hsm.a_bus_req &&
+ langwell->hsm.a_set_b_hnp_en) {
+ /* It is not safe enough to do a fast
+ * transistion from A_WAIT_BCON to
+ * A_SUSPEND */
+ msleep(10000);
+ if (langwell->hsm.a_bus_req)
+ break;
+
+ if (request_irq(langwell->pdev->irq,
+ otg_dummy_irq, IRQF_SHARED,
+ driver_name, langwell->regs) != 0) {
+ otg_dbg("request interrupt %d fail\n",
+ langwell->pdev->irq);
+ }
+
+ langwell_otg_HABA(1);
+ langwell->hsm.b_bus_resume = 0;
+ langwell->hsm.a_aidl_bdis_tmout = 0;
+ langwell_otg_add_timer(a_aidl_bdis_tmr);
+
+ langwell_otg_loc_sof(0);
+ langwell->otg.state = OTG_STATE_A_SUSPEND;
+ } else if (!langwell->hsm.a_bus_req &&
+ !langwell->hsm.a_set_b_hnp_en) {
+ struct pci_dev *pdev = langwell->pdev;
+ if (langwell->host_ops)
+ langwell->host_ops->remove(pdev);
+ else
+ otg_dbg("host driver removed.\n");
+ langwell_otg_drv_vbus(0);
+ langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
+ }
+ }
+ break;
+ case OTG_STATE_A_HOST:
+ if (langwell->hsm.id) {
+ langwell->otg.default_a = 0;
+ langwell->hsm.b_bus_req = 0;
+ if (langwell->host_ops)
+ langwell->host_ops->remove(langwell->pdev);
+ else
+ otg_dbg("host driver has been removed.\n");
+ langwell_otg_drv_vbus(0);
+ langwell->otg.state = OTG_STATE_B_IDLE;
+ queue_work(langwell->qwork, &langwell->work);
+ } else if (langwell->hsm.a_bus_drop ||
+ (!langwell->hsm.a_set_b_hnp_en && !langwell->hsm.a_bus_req)) {
+ if (langwell->host_ops)
+ langwell->host_ops->remove(langwell->pdev);
+ else
+ otg_dbg("host driver has been removed.\n");
+ langwell_otg_drv_vbus(0);
+ langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
+ } else if (!langwell->hsm.a_vbus_vld) {
+ if (langwell->host_ops)
+ langwell->host_ops->remove(langwell->pdev);
+ else
+ otg_dbg("host driver has been removed.\n");
+ langwell_otg_drv_vbus(0);
+ langwell->otg.state = OTG_STATE_A_VBUS_ERR;
+ } else if (langwell->hsm.a_set_b_hnp_en
+ && !langwell->hsm.a_bus_req) {
+ /* Set HABA to enable hardware assistance to signal
+ * A-connect after receiver B-disconnect. Hardware
+ * will then set client mode and enable URE, SLE and
+ * PCE after the assistance. otg_dummy_irq is used to
+ * clean these ints when client driver is not resumed.
+ */
+ if (request_irq(langwell->pdev->irq,
+ otg_dummy_irq, IRQF_SHARED, driver_name,
+ langwell->regs) != 0) {
+ otg_dbg("request interrupt %d failed\n",
+ langwell->pdev->irq);
+ }
+
+ /* set HABA */
+ langwell_otg_HABA(1);
+ langwell->hsm.b_bus_resume = 0;
+ langwell->hsm.a_aidl_bdis_tmout = 0;
+ langwell_otg_add_timer(a_aidl_bdis_tmr);
+ langwell_otg_loc_sof(0);
+ langwell->otg.state = OTG_STATE_A_SUSPEND;
+ } else if (!langwell->hsm.b_conn || !langwell->hsm.a_bus_req) {
+ langwell->hsm.a_wait_bcon_tmout = 0;
+ langwell->hsm.a_set_b_hnp_en = 0;
+ langwell_otg_add_timer(a_wait_bcon_tmr);
+ langwell->otg.state = OTG_STATE_A_WAIT_BCON;
+ }
+ break;
+ case OTG_STATE_A_SUSPEND:
+ if (langwell->hsm.id) {
+ langwell_otg_del_timer(a_aidl_bdis_tmr);
+ langwell_otg_HABA(0);
+ free_irq(langwell->pdev->irq, langwell->regs);
+ langwell->otg.default_a = 0;
+ langwell->hsm.b_bus_req = 0;
+ if (langwell->host_ops)
+ langwell->host_ops->remove(langwell->pdev);
+ else
+ otg_dbg("host driver has been removed.\n");
+ langwell_otg_drv_vbus(0);
+ langwell->otg.state = OTG_STATE_B_IDLE;
+ queue_work(langwell->qwork, &langwell->work);
+ } else if (langwell->hsm.a_bus_req ||
+ langwell->hsm.b_bus_resume) {
+ langwell_otg_del_timer(a_aidl_bdis_tmr);
+ langwell_otg_HABA(0);
+ free_irq(langwell->pdev->irq, langwell->regs);
+ langwell->hsm.a_suspend_req = 0;
+ langwell_otg_loc_sof(1);
+ langwell->otg.state = OTG_STATE_A_HOST;
+ } else if (langwell->hsm.a_aidl_bdis_tmout ||
+ langwell->hsm.a_bus_drop) {
+ langwell_otg_del_timer(a_aidl_bdis_tmr);
+ langwell_otg_HABA(0);
+ free_irq(langwell->pdev->irq, langwell->regs);
+ if (langwell->host_ops)
+ langwell->host_ops->remove(langwell->pdev);
+ else
+ otg_dbg("host driver has been removed.\n");
+ langwell_otg_drv_vbus(0);
+ langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
+ } else if (!langwell->hsm.b_conn &&
+ langwell->hsm.a_set_b_hnp_en) {
+ langwell_otg_del_timer(a_aidl_bdis_tmr);
+ langwell_otg_HABA(0);
+ free_irq(langwell->pdev->irq, langwell->regs);
+
+ if (langwell->host_ops)
+ langwell->host_ops->remove(langwell->pdev);
+ else
+ otg_dbg("host driver has been removed.\n");
+
+ langwell->hsm.b_bus_suspend = 0;
+ langwell->hsm.b_bus_suspend_vld = 0;
+ langwell->hsm.b_bus_suspend_tmout = 0;
+
+ /* msleep(200); */
+ if (langwell->client_ops)
+ langwell->client_ops->resume(langwell->pdev);
+ else
+ otg_dbg("client driver not loaded.\n");
+
+ langwell_otg_add_timer(b_bus_suspend_tmr);
+ langwell->otg.state = OTG_STATE_A_PERIPHERAL;
+ break;
+ } else if (!langwell->hsm.a_vbus_vld) {
+ langwell_otg_del_timer(a_aidl_bdis_tmr);
+ langwell_otg_HABA(0);
+ free_irq(langwell->pdev->irq, langwell->regs);
+ if (langwell->host_ops)
+ langwell->host_ops->remove(langwell->pdev);
+ else
+ otg_dbg("host driver has been removed.\n");
+ langwell_otg_drv_vbus(0);
+ langwell->otg.state = OTG_STATE_A_VBUS_ERR;
+ }
+ break;
+ case OTG_STATE_A_PERIPHERAL:
+ if (langwell->hsm.id) {
+ langwell_otg_del_timer(b_bus_suspend_tmr);
+ langwell->otg.default_a = 0;
+ langwell->hsm.b_bus_req = 0;
+ if (langwell->client_ops)
+ langwell->client_ops->suspend(langwell->pdev,
+ PMSG_FREEZE);
+ else
+ otg_dbg("client driver has been removed.\n");
+ langwell_otg_drv_vbus(0);
+ langwell->otg.state = OTG_STATE_B_IDLE;
+ queue_work(langwell->qwork, &langwell->work);
+ } else if (!langwell->hsm.a_vbus_vld) {
+ langwell_otg_del_timer(b_bus_suspend_tmr);
+ if (langwell->client_ops)
+ langwell->client_ops->suspend(langwell->pdev,
+ PMSG_FREEZE);
+ else
+ otg_dbg("client driver has been removed.\n");
+ langwell_otg_drv_vbus(0);
+ langwell->otg.state = OTG_STATE_A_VBUS_ERR;
+ } else if (langwell->hsm.a_bus_drop) {
+ langwell_otg_del_timer(b_bus_suspend_tmr);
+ if (langwell->client_ops)
+ langwell->client_ops->suspend(langwell->pdev,
+ PMSG_FREEZE);
+ else
+ otg_dbg("client driver has been removed.\n");
+ langwell_otg_drv_vbus(0);
+ langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
+ } else if (langwell->hsm.b_bus_suspend) {
+ langwell_otg_del_timer(b_bus_suspend_tmr);
+ if (langwell->client_ops)
+ langwell->client_ops->suspend(langwell->pdev,
+ PMSG_FREEZE);
+ else
+ otg_dbg("client driver has been removed.\n");
+
+ if (langwell->host_ops)
+ langwell->host_ops->probe(langwell->pdev,
+ langwell->host_ops->id_table);
+ else
+ otg_dbg("host driver not loaded.\n");
+ langwell->hsm.a_set_b_hnp_en = 0;
+ langwell->hsm.a_wait_bcon_tmout = 0;
+ langwell_otg_add_timer(a_wait_bcon_tmr);
+ langwell->otg.state = OTG_STATE_A_WAIT_BCON;
+ } else if (langwell->hsm.b_bus_suspend_tmout) {
+ u32 val;
+ val = readl(langwell->regs + CI_PORTSC1);
+ if (!(val & PORTSC_SUSP))
+ break;
+ if (langwell->client_ops)
+ langwell->client_ops->suspend(langwell->pdev,
+ PMSG_FREEZE);
+ else
+ otg_dbg("client driver has been removed.\n");
+ if (langwell->host_ops)
+ langwell->host_ops->probe(langwell->pdev,
+ langwell->host_ops->id_table);
+ else
+ otg_dbg("host driver not loaded.\n");
+ langwell->hsm.a_set_b_hnp_en = 0;
+ langwell->hsm.a_wait_bcon_tmout = 0;
+ langwell_otg_add_timer(a_wait_bcon_tmr);
+ langwell->otg.state = OTG_STATE_A_WAIT_BCON;
+ }
+ break;
+ case OTG_STATE_A_VBUS_ERR:
+ if (langwell->hsm.id) {
+ langwell->otg.default_a = 0;
+ langwell->hsm.a_clr_err = 0;
+ langwell->hsm.a_srp_det = 0;
+ langwell->otg.state = OTG_STATE_B_IDLE;
+ queue_work(langwell->qwork, &langwell->work);
+ } else if (langwell->hsm.a_clr_err) {
+ langwell->hsm.a_clr_err = 0;
+ langwell->hsm.a_srp_det = 0;
+ reset_otg();
+ init_hsm();
+ if (langwell->otg.state == OTG_STATE_A_IDLE)
+ queue_work(langwell->qwork, &langwell->work);
+ }
+ break;
+ case OTG_STATE_A_WAIT_VFALL:
+ if (langwell->hsm.id) {
+ langwell->otg.default_a = 0;
+ langwell->otg.state = OTG_STATE_B_IDLE;
+ queue_work(langwell->qwork, &langwell->work);
+ } else if (langwell->hsm.a_bus_req) {
+ langwell_otg_drv_vbus(1);
+ langwell->hsm.a_wait_vrise_tmout = 0;
+ langwell_otg_add_timer(a_wait_vrise_tmr);
+ langwell->otg.state = OTG_STATE_A_WAIT_VRISE;
+ } else if (!langwell->hsm.a_sess_vld) {
+ langwell->hsm.a_srp_det = 0;
+ langwell_otg_drv_vbus(0);
+ set_host_mode();
+ langwell->otg.state = OTG_STATE_A_IDLE;
+ }
+ break;
+ default:
+ ;
+ }
+
+ otg_dbg("%s: new state = %s\n", __func__,
+ state_string(langwell->otg.state));
+}
+
+ static ssize_t
+show_registers(struct device *_dev, struct device_attribute *attr, char *buf)
+{
+ struct langwell_otg *langwell;
+ char *next;
+ unsigned size;
+ unsigned t;
+
+ langwell = the_transceiver;
+ next = buf;
+ size = PAGE_SIZE;
+
+ t = scnprintf(next, size,
+ "\n"
+ "USBCMD = 0x%08x \n"
+ "USBSTS = 0x%08x \n"
+ "USBINTR = 0x%08x \n"
+ "ASYNCLISTADDR = 0x%08x \n"
+ "PORTSC1 = 0x%08x \n"
+ "HOSTPC1 = 0x%08x \n"
+ "OTGSC = 0x%08x \n"
+ "USBMODE = 0x%08x \n",
+ readl(langwell->regs + 0x30),
+ readl(langwell->regs + 0x34),
+ readl(langwell->regs + 0x38),
+ readl(langwell->regs + 0x48),
+ readl(langwell->regs + 0x74),
+ readl(langwell->regs + 0xb4),
+ readl(langwell->regs + 0xf4),
+ readl(langwell->regs + 0xf8)
+ );
+ size -= t;
+ next += t;
+
+ return PAGE_SIZE - size;
+}
+static DEVICE_ATTR(registers, S_IRUGO, show_registers, NULL);
+
+static ssize_t
+show_hsm(struct device *_dev, struct device_attribute *attr, char *buf)
+{
+ struct langwell_otg *langwell;
+ char *next;
+ unsigned size;
+ unsigned t;
+
+ langwell = the_transceiver;
+ next = buf;
+ size = PAGE_SIZE;
+
+ t = scnprintf(next, size,
+ "\n"
+ "current state = %s\n"
+ "a_bus_resume = \t%d\n"
+ "a_bus_suspend = \t%d\n"
+ "a_conn = \t%d\n"
+ "a_sess_vld = \t%d\n"
+ "a_srp_det = \t%d\n"
+ "a_vbus_vld = \t%d\n"
+ "b_bus_resume = \t%d\n"
+ "b_bus_suspend = \t%d\n"
+ "b_conn = \t%d\n"
+ "b_se0_srp = \t%d\n"
+ "b_sess_end = \t%d\n"
+ "b_sess_vld = \t%d\n"
+ "id = \t%d\n"
+ "a_set_b_hnp_en = \t%d\n"
+ "b_srp_done = \t%d\n"
+ "b_hnp_enable = \t%d\n"
+ "a_wait_vrise_tmout = \t%d\n"
+ "a_wait_bcon_tmout = \t%d\n"
+ "a_aidl_bdis_tmout = \t%d\n"
+ "b_ase0_brst_tmout = \t%d\n"
+ "a_bus_drop = \t%d\n"
+ "a_bus_req = \t%d\n"
+ "a_clr_err = \t%d\n"
+ "a_suspend_req = \t%d\n"
+ "b_bus_req = \t%d\n"
+ "b_bus_suspend_tmout = \t%d\n"
+ "b_bus_suspend_vld = \t%d\n",
+ state_string(langwell->otg.state),
+ langwell->hsm.a_bus_resume,
+ langwell->hsm.a_bus_suspend,
+ langwell->hsm.a_conn,
+ langwell->hsm.a_sess_vld,
+ langwell->hsm.a_srp_det,
+ langwell->hsm.a_vbus_vld,
+ langwell->hsm.b_bus_resume,
+ langwell->hsm.b_bus_suspend,
+ langwell->hsm.b_conn,
+ langwell->hsm.b_se0_srp,
+ langwell->hsm.b_sess_end,
+ langwell->hsm.b_sess_vld,
+ langwell->hsm.id,
+ langwell->hsm.a_set_b_hnp_en,
+ langwell->hsm.b_srp_done,
+ langwell->hsm.b_hnp_enable,
+ langwell->hsm.a_wait_vrise_tmout,
+ langwell->hsm.a_wait_bcon_tmout,
+ langwell->hsm.a_aidl_bdis_tmout,
+ langwell->hsm.b_ase0_brst_tmout,
+ langwell->hsm.a_bus_drop,
+ langwell->hsm.a_bus_req,
+ langwell->hsm.a_clr_err,
+ langwell->hsm.a_suspend_req,
+ langwell->hsm.b_bus_req,
+ langwell->hsm.b_bus_suspend_tmout,
+ langwell->hsm.b_bus_suspend_vld
+ );
+ size -= t;
+ next += t;
+
+ return PAGE_SIZE - size;
+}
+static DEVICE_ATTR(hsm, S_IRUGO, show_hsm, NULL);
+
+static ssize_t
+get_a_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct langwell_otg *langwell;
+ char *next;
+ unsigned size;
+ unsigned t;
+
+ langwell = the_transceiver;
+ next = buf;
+ size = PAGE_SIZE;
+
+ t = scnprintf(next, size, "%d", langwell->hsm.a_bus_req);
+ size -= t;
+ next += t;
+
+ return PAGE_SIZE - size;
+}
+
+static ssize_t
+set_a_bus_req(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct langwell_otg *langwell;
+ langwell = the_transceiver;
+ if (!langwell->otg.default_a)
+ return -1;
+ if (count > 2)
+ return -1;
+
+ if (buf[0] == '0') {
+ langwell->hsm.a_bus_req = 0;
+ otg_dbg("a_bus_req = 0\n");
+ } else if (buf[0] == '1') {
+ /* If a_bus_drop is TRUE, a_bus_req can't be set */
+ if (langwell->hsm.a_bus_drop)
+ return -1;
+ langwell->hsm.a_bus_req = 1;
+ otg_dbg("a_bus_req = 1\n");
+ }
+ if (spin_trylock(&langwell->wq_lock)) {
+ queue_work(langwell->qwork, &langwell->work);
+ spin_unlock(&langwell->wq_lock);
+ }
+ return count;
+}
+static DEVICE_ATTR(a_bus_req, S_IRUGO | S_IWUGO, get_a_bus_req, set_a_bus_req);
+
+static ssize_t
+get_a_bus_drop(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct langwell_otg *langwell;
+ char *next;
+ unsigned size;
+ unsigned t;
+
+ langwell = the_transceiver;
+ next = buf;
+ size = PAGE_SIZE;
+
+ t = scnprintf(next, size, "%d", langwell->hsm.a_bus_drop);
+ size -= t;
+ next += t;
+
+ return PAGE_SIZE - size;
+}
+
+static ssize_t
+set_a_bus_drop(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct langwell_otg *langwell;
+ langwell = the_transceiver;
+ if (!langwell->otg.default_a)
+ return -1;
+ if (count > 2)
+ return -1;
+
+ if (buf[0] == '0') {
+ langwell->hsm.a_bus_drop = 0;
+ otg_dbg("a_bus_drop = 0\n");
+ } else if (buf[0] == '1') {
+ langwell->hsm.a_bus_drop = 1;
+ langwell->hsm.a_bus_req = 0;
+ otg_dbg("a_bus_drop = 1, then a_bus_req = 0\n");
+ }
+ if (spin_trylock(&langwell->wq_lock)) {
+ queue_work(langwell->qwork, &langwell->work);
+ spin_unlock(&langwell->wq_lock);
+ }
+ return count;
+}
+static DEVICE_ATTR(a_bus_drop, S_IRUGO | S_IWUGO,
+ get_a_bus_drop, set_a_bus_drop);
+
+static ssize_t
+get_b_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct langwell_otg *langwell;
+ char *next;
+ unsigned size;
+ unsigned t;
+
+ langwell = the_transceiver;
+ next = buf;
+ size = PAGE_SIZE;
+
+ t = scnprintf(next, size, "%d", langwell->hsm.b_bus_req);
+ size -= t;
+ next += t;
+
+ return PAGE_SIZE - size;
+}
+
+static ssize_t
+set_b_bus_req(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct langwell_otg *langwell;
+ langwell = the_transceiver;
+
+ if (langwell->otg.default_a)
+ return -1;
+
+ if (count > 2)
+ return -1;
+
+ if (buf[0] == '0') {
+ langwell->hsm.b_bus_req = 0;
+ otg_dbg("b_bus_req = 0\n");
+ } else if (buf[0] == '1') {
+ langwell->hsm.b_bus_req = 1;
+ otg_dbg("b_bus_req = 1\n");
+ }
+ if (spin_trylock(&langwell->wq_lock)) {
+ queue_work(langwell->qwork, &langwell->work);
+ spin_unlock(&langwell->wq_lock);
+ }
+ return count;
+}
+static DEVICE_ATTR(b_bus_req, S_IRUGO | S_IWUGO, get_b_bus_req, set_b_bus_req);
+
+static ssize_t
+set_a_clr_err(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct langwell_otg *langwell;
+ langwell = the_transceiver;
+
+ if (!langwell->otg.default_a)
+ return -1;
+ if (count > 2)
+ return -1;
+
+ if (buf[0] == '1') {
+ langwell->hsm.a_clr_err = 1;
+ otg_dbg("a_clr_err = 1\n");
+ }
+ if (spin_trylock(&langwell->wq_lock)) {
+ queue_work(langwell->qwork, &langwell->work);
+ spin_unlock(&langwell->wq_lock);
+ }
+ return count;
+}
+static DEVICE_ATTR(a_clr_err, S_IWUGO, NULL, set_a_clr_err);
+
+static struct attribute *inputs_attrs[] = {
+ &dev_attr_a_bus_req.attr,
+ &dev_attr_a_bus_drop.attr,
+ &dev_attr_b_bus_req.attr,
+ &dev_attr_a_clr_err.attr,
+ NULL,
+};
+
+static struct attribute_group debug_dev_attr_group = {
+ .name = "inputs",
+ .attrs = inputs_attrs,
+};
+
+int langwell_register_host(struct pci_driver *host_driver)
+{
+ int ret = 0;
+
+ the_transceiver->host_ops = host_driver;
+ queue_work(the_transceiver->qwork, &the_transceiver->work);
+ otg_dbg("host controller driver is registered\n");
+
+ return ret;
+}
+EXPORT_SYMBOL(langwell_register_host);
+
+void langwell_unregister_host(struct pci_driver *host_driver)
+{
+ if (the_transceiver->host_ops)
+ the_transceiver->host_ops->remove(the_transceiver->pdev);
+ the_transceiver->host_ops = NULL;
+ the_transceiver->hsm.a_bus_drop = 1;
+ queue_work(the_transceiver->qwork, &the_transceiver->work);
+ otg_dbg("host controller driver is unregistered\n");
+}
+EXPORT_SYMBOL(langwell_unregister_host);
+
+int langwell_register_peripheral(struct pci_driver *client_driver)
+{
+ int ret = 0;
+
+ if (client_driver)
+ ret = client_driver->probe(the_transceiver->pdev,
+ client_driver->id_table);
+ if (!ret) {
+ the_transceiver->client_ops = client_driver;
+ queue_work(the_transceiver->qwork, &the_transceiver->work);
+ otg_dbg("client controller driver is registered\n");
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(langwell_register_peripheral);
+
+void langwell_unregister_peripheral(struct pci_driver *client_driver)
+{
+ if (the_transceiver->client_ops)
+ the_transceiver->client_ops->remove(the_transceiver->pdev);
+ the_transceiver->client_ops = NULL;
+ the_transceiver->hsm.b_bus_req = 0;
+ queue_work(the_transceiver->qwork, &the_transceiver->work);
+ otg_dbg("client controller driver is unregistered\n");
+}
+EXPORT_SYMBOL(langwell_unregister_peripheral);
+
+static int langwell_otg_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ unsigned long resource, len;
+ void __iomem *base = NULL;
+ int retval;
+ u32 val32;
+ struct langwell_otg *langwell;
+ char qname[] = "langwell_otg_queue";
+
+ retval = 0;
+ otg_dbg("\notg controller is detected.\n");
+ if (pci_enable_device(pdev) < 0) {
+ retval = -ENODEV;
+ goto done;
+ }
+
+ langwell = kzalloc(sizeof *langwell, GFP_KERNEL);
+ if (langwell == NULL) {
+ retval = -ENOMEM;
+ goto done;
+ }
+ the_transceiver = langwell;
+
+ /* control register: BAR 0 */
+ resource = pci_resource_start(pdev, 0);
+ len = pci_resource_len(pdev, 0);
+ if (!request_mem_region(resource, len, driver_name)) {
+ retval = -EBUSY;
+ goto err;
+ }
+ langwell->region = 1;
+
+ base = ioremap_nocache(resource, len);
+ if (base == NULL) {
+ retval = -EFAULT;
+ goto err;
+ }
+ langwell->regs = base;
+
+ if (!pdev->irq) {
+ otg_dbg("No IRQ.\n");
+ retval = -ENODEV;
+ goto err;
+ }
+
+ langwell->qwork = create_workqueue(qname);
+ if (!langwell->qwork) {
+ otg_dbg("cannot create workqueue %s\n", qname);
+ retval = -ENOMEM;
+ goto err;
+ }
+ INIT_WORK(&langwell->work, langwell_otg_work);
+
+ /* OTG common part */
+ langwell->pdev = pdev;
+ langwell->otg.dev = &pdev->dev;
+ langwell->otg.label = driver_name;
+ langwell->otg.set_host = langwell_otg_set_host;
+ langwell->otg.set_peripheral = langwell_otg_set_peripheral;
+ langwell->otg.set_power = langwell_otg_set_power;
+ langwell->otg.start_srp = langwell_otg_start_srp;
+ langwell->otg.state = OTG_STATE_UNDEFINED;
+ if (otg_set_transceiver(&langwell->otg)) {
+ otg_dbg("can't set transceiver\n");
+ retval = -EBUSY;
+ goto err;
+ }
+
+ reset_otg();
+ init_hsm();
+
+ spin_lock_init(&langwell->lock);
+ spin_lock_init(&langwell->wq_lock);
+ INIT_LIST_HEAD(&active_timers);
+ langwell_otg_init_timers(&langwell->hsm);
+
+ if (request_irq(pdev->irq, otg_irq, IRQF_SHARED,
+ driver_name, langwell) != 0) {
+ otg_dbg("request interrupt %d failed\n", pdev->irq);
+ retval = -EBUSY;
+ goto err;
+ }
+
+ /* enable OTGSC int */
+ val32 = OTGSC_DPIE | OTGSC_BSEIE | OTGSC_BSVIE |
+ OTGSC_ASVIE | OTGSC_AVVIE | OTGSC_IDIE | OTGSC_IDPU;
+ writel(val32, langwell->regs + CI_OTGSC);
+
+ retval = device_create_file(&pdev->dev, &dev_attr_registers);
+ if (retval < 0) {
+ otg_dbg("Can't register sysfs attribute: %d\n", retval);
+ goto err;
+ }
+
+ retval = device_create_file(&pdev->dev, &dev_attr_hsm);
+ if (retval < 0) {
+ otg_dbg("Can't hsm sysfs attribute: %d\n", retval);
+ goto err;
+ }
+
+ retval = sysfs_create_group(&pdev->dev.kobj, &debug_dev_attr_group);
+ if (retval < 0) {
+ otg_dbg("Can't register sysfs attr group: %d\n", retval);
+ goto err;
+ }
+
+ if (langwell->otg.state == OTG_STATE_A_IDLE)
+ queue_work(langwell->qwork, &langwell->work);
+
+ return 0;
+
+err:
+ if (the_transceiver)
+ langwell_otg_remove(pdev);
+done:
+ return retval;
+}
+
+static void langwell_otg_remove(struct pci_dev *pdev)
+{
+ struct langwell_otg *langwell;
+
+ langwell = the_transceiver;
+
+ if (langwell->qwork) {
+ flush_workqueue(langwell->qwork);
+ destroy_workqueue(langwell->qwork);
+ }
+ langwell_otg_free_timers();
+
+ /* disable OTGSC interrupt as OTGSC doesn't change in reset */
+ writel(0, langwell->regs + CI_OTGSC);
+
+ if (pdev->irq)
+ free_irq(pdev->irq, langwell);
+ if (langwell->regs)
+ iounmap(langwell->regs);
+ if (langwell->region)
+ release_mem_region(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+
+ otg_set_transceiver(NULL);
+ pci_disable_device(pdev);
+ sysfs_remove_group(&pdev->dev.kobj, &debug_dev_attr_group);
+ device_remove_file(&pdev->dev, &dev_attr_hsm);
+ device_remove_file(&pdev->dev, &dev_attr_registers);
+ kfree(langwell);
+ langwell = NULL;
+}
+
+static void transceiver_suspend(struct pci_dev *pdev)
+{
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+ langwell_otg_phy_low_power(1);
+}
+
+static int langwell_otg_suspend(struct pci_dev *pdev, pm_message_t message)
+{
+ int ret = 0;
+ struct langwell_otg *langwell;
+
+ langwell = the_transceiver;
+
+ /* Disbale OTG interrupts */
+ langwell_otg_intr(0);
+
+ if (pdev->irq)
+ free_irq(pdev->irq, langwell);
+
+ /* Prevent more otg_work */
+ flush_workqueue(langwell->qwork);
+ spin_lock(&langwell->wq_lock);
+
+ /* start actions */
+ switch (langwell->otg.state) {
+ case OTG_STATE_A_IDLE:
+ case OTG_STATE_B_IDLE:
+ case OTG_STATE_A_WAIT_VFALL:
+ case OTG_STATE_A_VBUS_ERR:
+ transceiver_suspend(pdev);
+ break;
+ case OTG_STATE_A_WAIT_VRISE:
+ langwell_otg_del_timer(a_wait_vrise_tmr);
+ langwell->hsm.a_srp_det = 0;
+ langwell_otg_drv_vbus(0);
+ langwell->otg.state = OTG_STATE_A_IDLE;
+ transceiver_suspend(pdev);
+ break;
+ case OTG_STATE_A_WAIT_BCON:
+ langwell_otg_del_timer(a_wait_bcon_tmr);
+ if (langwell->host_ops)
+ ret = langwell->host_ops->suspend(pdev, message);
+ langwell_otg_drv_vbus(0);
+ break;
+ case OTG_STATE_A_HOST:
+ if (langwell->host_ops)
+ ret = langwell->host_ops->suspend(pdev, message);
+ langwell_otg_drv_vbus(0);
+ langwell_otg_phy_low_power(1);
+ break;
+ case OTG_STATE_A_SUSPEND:
+ langwell_otg_del_timer(a_aidl_bdis_tmr);
+ langwell_otg_HABA(0);
+ if (langwell->host_ops)
+ langwell->host_ops->remove(pdev);
+ else
+ otg_dbg("host driver has been removed.\n");
+ langwell_otg_drv_vbus(0);
+ transceiver_suspend(pdev);
+ langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
+ break;
+ case OTG_STATE_A_PERIPHERAL:
+ if (langwell->client_ops)
+ ret = langwell->client_ops->suspend(pdev, message);
+ else
+ otg_dbg("client driver has been removed.\n");
+ langwell_otg_drv_vbus(0);
+ transceiver_suspend(pdev);
+ langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
+ break;
+ case OTG_STATE_B_HOST:
+ if (langwell->host_ops)
+ langwell->host_ops->remove(pdev);
+ else
+ otg_dbg("host driver has been removed.\n");
+ langwell->hsm.b_bus_req = 0;
+ transceiver_suspend(pdev);
+ langwell->otg.state = OTG_STATE_B_IDLE;
+ break;
+ case OTG_STATE_B_PERIPHERAL:
+ if (langwell->client_ops)
+ ret = langwell->client_ops->suspend(pdev, message);
+ else
+ otg_dbg("client driver has been removed.\n");
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+ langwell_otg_del_timer(b_ase0_brst_tmr);
+ langwell_otg_HAAR(0);
+ if (langwell->host_ops)
+ langwell->host_ops->remove(pdev);
+ else
+ otg_dbg("host driver has been removed.\n");
+ langwell->hsm.b_bus_req = 0;
+ langwell->otg.state = OTG_STATE_B_IDLE;
+ transceiver_suspend(pdev);
+ break;
+ default:
+ otg_dbg("error state before suspend\n ");
+ break;
+ }
+ spin_unlock(&langwell->wq_lock);
+
+ return ret;
+}
+
+static void transceiver_resume(struct pci_dev *pdev)
+{
+ pci_restore_state(pdev);
+ pci_set_power_state(pdev, PCI_D0);
+ langwell_otg_phy_low_power(0);
+}
+
+static int langwell_otg_resume(struct pci_dev *pdev)
+{
+ int ret = 0;
+ struct langwell_otg *langwell;
+
+ langwell = the_transceiver;
+
+ spin_lock(&langwell->wq_lock);
+
+ switch (langwell->otg.state) {
+ case OTG_STATE_A_IDLE:
+ case OTG_STATE_B_IDLE:
+ case OTG_STATE_A_WAIT_VFALL:
+ case OTG_STATE_A_VBUS_ERR:
+ transceiver_resume(pdev);
+ break;
+ case OTG_STATE_A_WAIT_BCON:
+ langwell_otg_add_timer(a_wait_bcon_tmr);
+ langwell_otg_drv_vbus(1);
+ if (langwell->host_ops)
+ ret = langwell->host_ops->resume(pdev);
+ break;
+ case OTG_STATE_A_HOST:
+ langwell_otg_drv_vbus(1);
+ langwell_otg_phy_low_power(0);
+ if (langwell->host_ops)
+ ret = langwell->host_ops->resume(pdev);
+ break;
+ case OTG_STATE_B_PERIPHERAL:
+ if (langwell->client_ops)
+ ret = langwell->client_ops->resume(pdev);
+ else
+ otg_dbg("client driver not loaded.\n");
+ break;
+ default:
+ otg_dbg("error state before suspend\n ");
+ break;
+ }
+
+ if (request_irq(pdev->irq, otg_irq, IRQF_SHARED,
+ driver_name, the_transceiver) != 0) {
+ otg_dbg("request interrupt %d failed\n", pdev->irq);
+ ret = -EBUSY;
+ }
+
+ /* enable OTG interrupts */
+ langwell_otg_intr(1);
+
+ spin_unlock(&langwell->wq_lock);
+
+ queue_work(langwell->qwork, &langwell->work);
+
+
+ return ret;
+}
+
+static int __init langwell_otg_init(void)
+{
+ return pci_register_driver(&otg_pci_driver);
+}
+module_init(langwell_otg_init);
+
+static void __exit langwell_otg_cleanup(void)
+{
+ pci_unregister_driver(&otg_pci_driver);
+}
+module_exit(langwell_otg_cleanup);
diff --git a/drivers/usb/otg/nop-usb-xceiv.c b/drivers/usb/otg/nop-usb-xceiv.c
index c567168f89a..9ed5ea56867 100644
--- a/drivers/usb/otg/nop-usb-xceiv.c
+++ b/drivers/usb/otg/nop-usb-xceiv.c
@@ -22,8 +22,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Current status:
- * this is to add "nop" transceiver for all those phy which is
- * autonomous such as isp1504 etc.
+ * This provides a "nop" transceiver for PHYs which are
+ * autonomous such as isp1504, isp1707, etc.
*/
#include <linux/module.h>
@@ -36,30 +36,25 @@ struct nop_usb_xceiv {
struct device *dev;
};
-static u64 nop_xceiv_dmamask = DMA_BIT_MASK(32);
-
-static struct platform_device nop_xceiv_device = {
- .name = "nop_usb_xceiv",
- .id = -1,
- .dev = {
- .dma_mask = &nop_xceiv_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = NULL,
- },
-};
+static struct platform_device *pd;
void usb_nop_xceiv_register(void)
{
- if (platform_device_register(&nop_xceiv_device) < 0) {
+ if (pd)
+ return;
+ pd = platform_device_register_simple("nop_usb_xceiv", -1, NULL, 0);
+ if (!pd) {
printk(KERN_ERR "Unable to register usb nop transceiver\n");
return;
}
}
+EXPORT_SYMBOL(usb_nop_xceiv_register);
void usb_nop_xceiv_unregister(void)
{
- platform_device_unregister(&nop_xceiv_device);
+ platform_device_unregister(pd);
}
+EXPORT_SYMBOL(usb_nop_xceiv_unregister);
static inline struct nop_usb_xceiv *xceiv_to_nop(struct otg_transceiver *x)
{
diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c
index d9478d0e1c8..9e3e7a5c258 100644
--- a/drivers/usb/otg/twl4030-usb.c
+++ b/drivers/usb/otg/twl4030-usb.c
@@ -217,6 +217,7 @@
/* In module TWL4030_MODULE_PM_MASTER */
#define PROTECT_KEY 0x0E
+#define STS_HW_CONDITIONS 0x0F
/* In module TWL4030_MODULE_PM_RECEIVER */
#define VUSB_DEDICATED1 0x7D
@@ -351,15 +352,26 @@ static enum linkstat twl4030_usb_linkstat(struct twl4030_usb *twl)
int status;
int linkstat = USB_LINK_UNKNOWN;
- /* STS_HW_CONDITIONS */
- status = twl4030_readb(twl, TWL4030_MODULE_PM_MASTER, 0x0f);
+ /*
+ * For ID/VBUS sensing, see manual section 15.4.8 ...
+ * except when using only battery backup power, two
+ * comparators produce VBUS_PRES and ID_PRES signals,
+ * which don't match docs elsewhere. But ... BIT(7)
+ * and BIT(2) of STS_HW_CONDITIONS, respectively, do
+ * seem to match up. If either is true the USB_PRES
+ * signal is active, the OTG module is activated, and
+ * its interrupt may be raised (may wake the system).
+ */
+ status = twl4030_readb(twl, TWL4030_MODULE_PM_MASTER,
+ STS_HW_CONDITIONS);
if (status < 0)
dev_err(twl->dev, "USB link status err %d\n", status);
- else if (status & BIT(7))
- linkstat = USB_LINK_VBUS;
- else if (status & BIT(2))
- linkstat = USB_LINK_ID;
- else
+ else if (status & (BIT(7) | BIT(2))) {
+ if (status & BIT(2))
+ linkstat = USB_LINK_ID;
+ else
+ linkstat = USB_LINK_VBUS;
+ } else
linkstat = USB_LINK_NONE;
dev_dbg(twl->dev, "HW_CONDITIONS 0x%02x/%d; link %d\n",
@@ -641,7 +653,7 @@ static int twl4030_set_host(struct otg_transceiver *x, struct usb_bus *host)
return 0;
}
-static int __init twl4030_usb_probe(struct platform_device *pdev)
+static int __devinit twl4030_usb_probe(struct platform_device *pdev)
{
struct twl4030_usb_data *pdata = pdev->dev.platform_data;
struct twl4030_usb *twl;
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index 6d106e74265..2cbfab3716e 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -364,7 +364,7 @@ static int aircable_attach(struct usb_serial *serial)
return 0;
}
-static void aircable_shutdown(struct usb_serial *serial)
+static void aircable_release(struct usb_serial *serial)
{
struct usb_serial_port *port = serial->port[0];
@@ -375,7 +375,6 @@ static void aircable_shutdown(struct usb_serial *serial)
if (priv) {
serial_buf_free(priv->tx_buf);
serial_buf_free(priv->rx_buf);
- usb_set_serial_port_data(port, NULL);
kfree(priv);
}
}
@@ -601,7 +600,7 @@ static struct usb_serial_driver aircable_device = {
.num_ports = 1,
.attach = aircable_attach,
.probe = aircable_probe,
- .shutdown = aircable_shutdown,
+ .release = aircable_release,
.write = aircable_write,
.write_room = aircable_write_room,
.write_bulk_callback = aircable_write_bulk_callback,
diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
index b7eacad4d48..7033b031b44 100644
--- a/drivers/usb/serial/belkin_sa.c
+++ b/drivers/usb/serial/belkin_sa.c
@@ -90,11 +90,10 @@ static int debug;
/* function prototypes for a Belkin USB Serial Adapter F5U103 */
static int belkin_sa_startup(struct usb_serial *serial);
-static void belkin_sa_shutdown(struct usb_serial *serial);
+static void belkin_sa_release(struct usb_serial *serial);
static int belkin_sa_open(struct tty_struct *tty,
struct usb_serial_port *port, struct file *filp);
-static void belkin_sa_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp);
+static void belkin_sa_close(struct usb_serial_port *port);
static void belkin_sa_read_int_callback(struct urb *urb);
static void belkin_sa_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios * old);
@@ -143,7 +142,7 @@ static struct usb_serial_driver belkin_device = {
.tiocmget = belkin_sa_tiocmget,
.tiocmset = belkin_sa_tiocmset,
.attach = belkin_sa_startup,
- .shutdown = belkin_sa_shutdown,
+ .release = belkin_sa_release,
};
@@ -198,14 +197,13 @@ static int belkin_sa_startup(struct usb_serial *serial)
}
-static void belkin_sa_shutdown(struct usb_serial *serial)
+static void belkin_sa_release(struct usb_serial *serial)
{
struct belkin_sa_private *priv;
int i;
dbg("%s", __func__);
- /* stop reads and writes on all ports */
for (i = 0; i < serial->num_ports; ++i) {
/* My special items, the standard routines free my urbs */
priv = usb_get_serial_port_data(serial->port[i]);
@@ -244,8 +242,7 @@ exit:
} /* belkin_sa_open */
-static void belkin_sa_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void belkin_sa_close(struct usb_serial_port *port)
{
dbg("%s port %d", __func__, port->number);
diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c
index 83bbb5bca2e..ba555c528cc 100644
--- a/drivers/usb/serial/bus.c
+++ b/drivers/usb/serial/bus.c
@@ -59,23 +59,22 @@ static int usb_serial_device_probe(struct device *dev)
retval = -ENODEV;
goto exit;
}
+ if (port->dev_state != PORT_REGISTERING)
+ goto exit;
driver = port->serial->type;
if (driver->port_probe) {
- if (!try_module_get(driver->driver.owner)) {
- dev_err(dev, "module get failed, exiting\n");
- retval = -EIO;
- goto exit;
- }
retval = driver->port_probe(port);
- module_put(driver->driver.owner);
if (retval)
goto exit;
}
retval = device_create_file(dev, &dev_attr_port_number);
- if (retval)
+ if (retval) {
+ if (driver->port_remove)
+ retval = driver->port_remove(port);
goto exit;
+ }
minor = port->number;
tty_register_device(usb_serial_tty_driver, minor, dev);
@@ -98,19 +97,15 @@ static int usb_serial_device_remove(struct device *dev)
if (!port)
return -ENODEV;
+ if (port->dev_state != PORT_UNREGISTERING)
+ return retval;
+
device_remove_file(&port->dev, &dev_attr_port_number);
driver = port->serial->type;
- if (driver->port_remove) {
- if (!try_module_get(driver->driver.owner)) {
- dev_err(dev, "module get failed, exiting\n");
- retval = -EIO;
- goto exit;
- }
+ if (driver->port_remove)
retval = driver->port_remove(port);
- module_put(driver->driver.owner);
- }
-exit:
+
minor = port->number;
tty_unregister_device(usb_serial_tty_driver, minor);
dev_info(dev, "%s converter now disconnected from ttyUSB%d\n",
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index ab4cc277aa6..2830766f5b3 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -262,32 +262,40 @@ error: kfree(priv);
return r;
}
-static void ch341_close(struct tty_struct *tty, struct usb_serial_port *port,
- struct file *filp)
+static int ch341_carrier_raised(struct usb_serial_port *port)
+{
+ struct ch341_private *priv = usb_get_serial_port_data(port);
+ if (priv->line_status & CH341_BIT_DCD)
+ return 1;
+ return 0;
+}
+
+static void ch341_dtr_rts(struct usb_serial_port *port, int on)
{
struct ch341_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
- unsigned int c_cflag;
dbg("%s - port %d", __func__, port->number);
+ /* drop DTR and RTS */
+ spin_lock_irqsave(&priv->lock, flags);
+ if (on)
+ priv->line_control |= CH341_BIT_RTS | CH341_BIT_DTR;
+ else
+ priv->line_control &= ~(CH341_BIT_RTS | CH341_BIT_DTR);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ ch341_set_handshake(port->serial->dev, priv->line_control);
+ wake_up_interruptible(&priv->delta_msr_wait);
+}
+
+static void ch341_close(struct usb_serial_port *port)
+{
+ dbg("%s - port %d", __func__, port->number);
/* shutdown our urbs */
dbg("%s - shutting down urbs", __func__);
usb_kill_urb(port->write_urb);
usb_kill_urb(port->read_urb);
usb_kill_urb(port->interrupt_in_urb);
-
- if (tty) {
- c_cflag = tty->termios->c_cflag;
- if (c_cflag & HUPCL) {
- /* drop DTR and RTS */
- spin_lock_irqsave(&priv->lock, flags);
- priv->line_control = 0;
- spin_unlock_irqrestore(&priv->lock, flags);
- ch341_set_handshake(port->serial->dev, 0);
- }
- }
- wake_up_interruptible(&priv->delta_msr_wait);
}
@@ -302,7 +310,6 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port,
dbg("ch341_open()");
priv->baud_rate = DEFAULT_BAUD_RATE;
- priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR;
r = ch341_configure(serial->dev, priv);
if (r)
@@ -322,7 +329,7 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port,
if (r) {
dev_err(&port->dev, "%s - failed submitting interrupt urb,"
" error %d\n", __func__, r);
- ch341_close(tty, port, NULL);
+ ch341_close(port);
return -EPROTO;
}
@@ -343,9 +350,6 @@ static void ch341_set_termios(struct tty_struct *tty,
dbg("ch341_set_termios()");
- if (!tty || !tty->termios)
- return;
-
baud_rate = tty_get_baud_rate(tty);
priv->baud_rate = baud_rate;
@@ -568,6 +572,8 @@ static struct usb_serial_driver ch341_device = {
.usb_driver = &ch341_driver,
.num_ports = 1,
.open = ch341_open,
+ .dtr_rts = ch341_dtr_rts,
+ .carrier_raised = ch341_carrier_raised,
.close = ch341_close,
.ioctl = ch341_ioctl,
.set_termios = ch341_set_termios,
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index 19e24045b13..247b61bfb7f 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -169,7 +169,9 @@ static int usb_console_setup(struct console *co, char *options)
kfree(tty);
}
}
-
+ /* So we know not to kill the hardware on a hangup on this
+ port. We have also bumped the use count by one so it won't go
+ idle */
port->console = 1;
retval = 0;
@@ -182,7 +184,7 @@ free_tty:
kfree(tty);
reset_open_count:
port->port.count = 0;
-goto out;
+ goto out;
}
static void usb_console_write(struct console *co,
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index e8d5133ce9c..2b9eeda62bf 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -1,5 +1,5 @@
/*
- * Silicon Laboratories CP2101/CP2102 USB to RS232 serial adaptor driver
+ * Silicon Laboratories CP210x USB to RS232 serial adaptor driver
*
* Copyright (C) 2005 Craig Shelley (craig@microtron.org.uk)
*
@@ -27,44 +27,46 @@
/*
* Version Information
*/
-#define DRIVER_VERSION "v0.08"
-#define DRIVER_DESC "Silicon Labs CP2101/CP2102 RS232 serial adaptor driver"
+#define DRIVER_VERSION "v0.09"
+#define DRIVER_DESC "Silicon Labs CP210x RS232 serial adaptor driver"
/*
* Function Prototypes
*/
-static int cp2101_open(struct tty_struct *, struct usb_serial_port *,
+static int cp210x_open(struct tty_struct *, struct usb_serial_port *,
struct file *);
-static void cp2101_cleanup(struct usb_serial_port *);
-static void cp2101_close(struct tty_struct *, struct usb_serial_port *,
- struct file*);
-static void cp2101_get_termios(struct tty_struct *,
+static void cp210x_cleanup(struct usb_serial_port *);
+static void cp210x_close(struct usb_serial_port *);
+static void cp210x_get_termios(struct tty_struct *,
struct usb_serial_port *port);
-static void cp2101_get_termios_port(struct usb_serial_port *port,
+static void cp210x_get_termios_port(struct usb_serial_port *port,
unsigned int *cflagp, unsigned int *baudp);
-static void cp2101_set_termios(struct tty_struct *, struct usb_serial_port *,
+static void cp210x_set_termios(struct tty_struct *, struct usb_serial_port *,
struct ktermios*);
-static int cp2101_tiocmget(struct tty_struct *, struct file *);
-static int cp2101_tiocmset(struct tty_struct *, struct file *,
+static int cp210x_tiocmget(struct tty_struct *, struct file *);
+static int cp210x_tiocmset(struct tty_struct *, struct file *,
unsigned int, unsigned int);
-static int cp2101_tiocmset_port(struct usb_serial_port *port, struct file *,
+static int cp210x_tiocmset_port(struct usb_serial_port *port, struct file *,
unsigned int, unsigned int);
-static void cp2101_break_ctl(struct tty_struct *, int);
-static int cp2101_startup(struct usb_serial *);
-static void cp2101_shutdown(struct usb_serial *);
+static void cp210x_break_ctl(struct tty_struct *, int);
+static int cp210x_startup(struct usb_serial *);
+static void cp210x_disconnect(struct usb_serial *);
static int debug;
static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
{ USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
+ { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
+ { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
{ USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
{ USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */
{ USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */
{ USB_DEVICE(0x10A6, 0xAA26) }, /* Knock-off DCU-11 cable */
{ USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */
{ USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */
+ { USB_DEVICE(0x10C4, 0x0F91) }, /* Vstabi */
{ USB_DEVICE(0x10C4, 0x800A) }, /* SPORTident BSM7-D-USB main station */
{ USB_DEVICE(0x10C4, 0x803B) }, /* Pololu USB-serial converter */
{ USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */
@@ -85,10 +87,12 @@ static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
{ USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */
{ USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */
+ { USB_DEVICE(0x10C4, 0x81F2) }, /* C1007 HF band RFID controller */
{ USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */
{ USB_DEVICE(0x10C4, 0x822B) }, /* Modem EDGE(GSM) Comander 2 */
{ USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demostration module */
{ USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */
+ { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
{ USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
@@ -99,7 +103,9 @@ static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */
{ USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */
{ USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */
+ { USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
{ USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
+ { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
{ USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
{ USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
@@ -108,53 +114,70 @@ static struct usb_device_id id_table [] = {
MODULE_DEVICE_TABLE(usb, id_table);
-static struct usb_driver cp2101_driver = {
- .name = "cp2101",
+static struct usb_driver cp210x_driver = {
+ .name = "cp210x",
.probe = usb_serial_probe,
.disconnect = usb_serial_disconnect,
.id_table = id_table,
.no_dynamic_id = 1,
};
-static struct usb_serial_driver cp2101_device = {
+static struct usb_serial_driver cp210x_device = {
.driver = {
.owner = THIS_MODULE,
- .name = "cp2101",
+ .name = "cp210x",
},
- .usb_driver = &cp2101_driver,
+ .usb_driver = &cp210x_driver,
.id_table = id_table,
.num_ports = 1,
- .open = cp2101_open,
- .close = cp2101_close,
- .break_ctl = cp2101_break_ctl,
- .set_termios = cp2101_set_termios,
- .tiocmget = cp2101_tiocmget,
- .tiocmset = cp2101_tiocmset,
- .attach = cp2101_startup,
- .shutdown = cp2101_shutdown,
+ .open = cp210x_open,
+ .close = cp210x_close,
+ .break_ctl = cp210x_break_ctl,
+ .set_termios = cp210x_set_termios,
+ .tiocmget = cp210x_tiocmget,
+ .tiocmset = cp210x_tiocmset,
+ .attach = cp210x_startup,
+ .disconnect = cp210x_disconnect,
};
/* Config request types */
#define REQTYPE_HOST_TO_DEVICE 0x41
#define REQTYPE_DEVICE_TO_HOST 0xc1
-/* Config SET requests. To GET, add 1 to the request number */
-#define CP2101_UART 0x00 /* Enable / Disable */
-#define CP2101_BAUDRATE 0x01 /* (BAUD_RATE_GEN_FREQ / baudrate) */
-#define CP2101_BITS 0x03 /* 0x(0)(databits)(parity)(stopbits) */
-#define CP2101_BREAK 0x05 /* On / Off */
-#define CP2101_CONTROL 0x07 /* Flow control line states */
-#define CP2101_MODEMCTL 0x13 /* Modem controls */
-#define CP2101_CONFIG_6 0x19 /* 6 bytes of config data ??? */
-
-/* CP2101_UART */
+/* Config request codes */
+#define CP210X_IFC_ENABLE 0x00
+#define CP210X_SET_BAUDDIV 0x01
+#define CP210X_GET_BAUDDIV 0x02
+#define CP210X_SET_LINE_CTL 0x03
+#define CP210X_GET_LINE_CTL 0x04
+#define CP210X_SET_BREAK 0x05
+#define CP210X_IMM_CHAR 0x06
+#define CP210X_SET_MHS 0x07
+#define CP210X_GET_MDMSTS 0x08
+#define CP210X_SET_XON 0x09
+#define CP210X_SET_XOFF 0x0A
+#define CP210X_SET_EVENTMASK 0x0B
+#define CP210X_GET_EVENTMASK 0x0C
+#define CP210X_SET_CHAR 0x0D
+#define CP210X_GET_CHARS 0x0E
+#define CP210X_GET_PROPS 0x0F
+#define CP210X_GET_COMM_STATUS 0x10
+#define CP210X_RESET 0x11
+#define CP210X_PURGE 0x12
+#define CP210X_SET_FLOW 0x13
+#define CP210X_GET_FLOW 0x14
+#define CP210X_EMBED_EVENTS 0x15
+#define CP210X_GET_EVENTSTATE 0x16
+#define CP210X_SET_CHARS 0x19
+
+/* CP210X_IFC_ENABLE */
#define UART_ENABLE 0x0001
#define UART_DISABLE 0x0000
-/* CP2101_BAUDRATE */
+/* CP210X_(SET|GET)_BAUDDIV */
#define BAUD_RATE_GEN_FREQ 0x384000
-/* CP2101_BITS */
+/* CP210X_(SET|GET)_LINE_CTL */
#define BITS_DATA_MASK 0X0f00
#define BITS_DATA_5 0X0500
#define BITS_DATA_6 0X0600
@@ -174,11 +197,11 @@ static struct usb_serial_driver cp2101_device = {
#define BITS_STOP_1_5 0x0001
#define BITS_STOP_2 0x0002
-/* CP2101_BREAK */
+/* CP210X_SET_BREAK */
#define BREAK_ON 0x0000
#define BREAK_OFF 0x0001
-/* CP2101_CONTROL */
+/* CP210X_(SET_MHS|GET_MDMSTS) */
#define CONTROL_DTR 0x0001
#define CONTROL_RTS 0x0002
#define CONTROL_CTS 0x0010
@@ -189,13 +212,13 @@ static struct usb_serial_driver cp2101_device = {
#define CONTROL_WRITE_RTS 0x0200
/*
- * cp2101_get_config
- * Reads from the CP2101 configuration registers
+ * cp210x_get_config
+ * Reads from the CP210x configuration registers
* 'size' is specified in bytes.
* 'data' is a pointer to a pre-allocated array of integers large
* enough to hold 'size' bytes (with 4 bytes to each integer)
*/
-static int cp2101_get_config(struct usb_serial_port *port, u8 request,
+static int cp210x_get_config(struct usb_serial_port *port, u8 request,
unsigned int *data, int size)
{
struct usb_serial *serial = port->serial;
@@ -211,9 +234,6 @@ static int cp2101_get_config(struct usb_serial_port *port, u8 request,
return -ENOMEM;
}
- /* For get requests, the request number must be incremented */
- request++;
-
/* Issue the request, attempting to read 'size' bytes */
result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
request, REQTYPE_DEVICE_TO_HOST, 0x0000,
@@ -236,12 +256,12 @@ static int cp2101_get_config(struct usb_serial_port *port, u8 request,
}
/*
- * cp2101_set_config
- * Writes to the CP2101 configuration registers
+ * cp210x_set_config
+ * Writes to the CP210x configuration registers
* Values less than 16 bits wide are sent directly
* 'size' is specified in bytes.
*/
-static int cp2101_set_config(struct usb_serial_port *port, u8 request,
+static int cp210x_set_config(struct usb_serial_port *port, u8 request,
unsigned int *data, int size)
{
struct usb_serial *serial = port->serial;
@@ -292,21 +312,21 @@ static int cp2101_set_config(struct usb_serial_port *port, u8 request,
}
/*
- * cp2101_set_config_single
- * Convenience function for calling cp2101_set_config on single data values
+ * cp210x_set_config_single
+ * Convenience function for calling cp210x_set_config on single data values
* without requiring an integer pointer
*/
-static inline int cp2101_set_config_single(struct usb_serial_port *port,
+static inline int cp210x_set_config_single(struct usb_serial_port *port,
u8 request, unsigned int data)
{
- return cp2101_set_config(port, request, &data, 2);
+ return cp210x_set_config(port, request, &data, 2);
}
/*
- * cp2101_quantise_baudrate
+ * cp210x_quantise_baudrate
* Quantises the baud rate as per AN205 Table 1
*/
-static unsigned int cp2101_quantise_baudrate(unsigned int baud) {
+static unsigned int cp210x_quantise_baudrate(unsigned int baud) {
if (baud <= 56) baud = 0;
else if (baud <= 300) baud = 300;
else if (baud <= 600) baud = 600;
@@ -343,7 +363,7 @@ static unsigned int cp2101_quantise_baudrate(unsigned int baud) {
return baud;
}
-static int cp2101_open(struct tty_struct *tty, struct usb_serial_port *port,
+static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *port,
struct file *filp)
{
struct usb_serial *serial = port->serial;
@@ -351,7 +371,7 @@ static int cp2101_open(struct tty_struct *tty, struct usb_serial_port *port,
dbg("%s - port %d", __func__, port->number);
- if (cp2101_set_config_single(port, CP2101_UART, UART_ENABLE)) {
+ if (cp210x_set_config_single(port, CP210X_IFC_ENABLE, UART_ENABLE)) {
dev_err(&port->dev, "%s - Unable to enable UART\n",
__func__);
return -EPROTO;
@@ -373,17 +393,17 @@ static int cp2101_open(struct tty_struct *tty, struct usb_serial_port *port,
}
/* Configure the termios structure */
- cp2101_get_termios(tty, port);
+ cp210x_get_termios(tty, port);
/* Set the DTR and RTS pins low */
- cp2101_tiocmset_port(tty ? (struct usb_serial_port *) tty->driver_data
+ cp210x_tiocmset_port(tty ? (struct usb_serial_port *) tty->driver_data
: port,
NULL, TIOCM_DTR | TIOCM_RTS, 0);
return 0;
}
-static void cp2101_cleanup(struct usb_serial_port *port)
+static void cp210x_cleanup(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
@@ -398,8 +418,7 @@ static void cp2101_cleanup(struct usb_serial_port *port)
}
}
-static void cp2101_close(struct tty_struct *tty, struct usb_serial_port *port,
- struct file *filp)
+static void cp210x_close(struct usb_serial_port *port)
{
dbg("%s - port %d", __func__, port->number);
@@ -410,23 +429,23 @@ static void cp2101_close(struct tty_struct *tty, struct usb_serial_port *port,
mutex_lock(&port->serial->disc_mutex);
if (!port->serial->disconnected)
- cp2101_set_config_single(port, CP2101_UART, UART_DISABLE);
+ cp210x_set_config_single(port, CP210X_IFC_ENABLE, UART_DISABLE);
mutex_unlock(&port->serial->disc_mutex);
}
/*
- * cp2101_get_termios
+ * cp210x_get_termios
* Reads the baud rate, data bits, parity, stop bits and flow control mode
* from the device, corrects any unsupported values, and configures the
* termios structure to reflect the state of the device
*/
-static void cp2101_get_termios(struct tty_struct *tty,
+static void cp210x_get_termios(struct tty_struct *tty,
struct usb_serial_port *port)
{
unsigned int baud;
if (tty) {
- cp2101_get_termios_port(tty->driver_data,
+ cp210x_get_termios_port(tty->driver_data,
&tty->termios->c_cflag, &baud);
tty_encode_baud_rate(tty, baud, baud);
}
@@ -434,15 +453,15 @@ static void cp2101_get_termios(struct tty_struct *tty,
else {
unsigned int cflag;
cflag = 0;
- cp2101_get_termios_port(port, &cflag, &baud);
+ cp210x_get_termios_port(port, &cflag, &baud);
}
}
/*
- * cp2101_get_termios_port
- * This is the heart of cp2101_get_termios which always uses a &usb_serial_port.
+ * cp210x_get_termios_port
+ * This is the heart of cp210x_get_termios which always uses a &usb_serial_port.
*/
-static void cp2101_get_termios_port(struct usb_serial_port *port,
+static void cp210x_get_termios_port(struct usb_serial_port *port,
unsigned int *cflagp, unsigned int *baudp)
{
unsigned int cflag, modem_ctl[4];
@@ -451,17 +470,17 @@ static void cp2101_get_termios_port(struct usb_serial_port *port,
dbg("%s - port %d", __func__, port->number);
- cp2101_get_config(port, CP2101_BAUDRATE, &baud, 2);
+ cp210x_get_config(port, CP210X_GET_BAUDDIV, &baud, 2);
/* Convert to baudrate */
if (baud)
- baud = cp2101_quantise_baudrate((BAUD_RATE_GEN_FREQ + baud/2)/ baud);
+ baud = cp210x_quantise_baudrate((BAUD_RATE_GEN_FREQ + baud/2)/ baud);
dbg("%s - baud rate = %d", __func__, baud);
*baudp = baud;
cflag = *cflagp;
- cp2101_get_config(port, CP2101_BITS, &bits, 2);
+ cp210x_get_config(port, CP210X_GET_LINE_CTL, &bits, 2);
cflag &= ~CSIZE;
switch (bits & BITS_DATA_MASK) {
case BITS_DATA_5:
@@ -486,14 +505,14 @@ static void cp2101_get_termios_port(struct usb_serial_port *port,
cflag |= CS8;
bits &= ~BITS_DATA_MASK;
bits |= BITS_DATA_8;
- cp2101_set_config(port, CP2101_BITS, &bits, 2);
+ cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2);
break;
default:
dbg("%s - Unknown number of data bits, using 8", __func__);
cflag |= CS8;
bits &= ~BITS_DATA_MASK;
bits |= BITS_DATA_8;
- cp2101_set_config(port, CP2101_BITS, &bits, 2);
+ cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2);
break;
}
@@ -516,20 +535,20 @@ static void cp2101_get_termios_port(struct usb_serial_port *port,
__func__);
cflag &= ~PARENB;
bits &= ~BITS_PARITY_MASK;
- cp2101_set_config(port, CP2101_BITS, &bits, 2);
+ cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2);
break;
case BITS_PARITY_SPACE:
dbg("%s - parity = SPACE (not supported, disabling parity)",
__func__);
cflag &= ~PARENB;
bits &= ~BITS_PARITY_MASK;
- cp2101_set_config(port, CP2101_BITS, &bits, 2);
+ cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2);
break;
default:
dbg("%s - Unknown parity mode, disabling parity", __func__);
cflag &= ~PARENB;
bits &= ~BITS_PARITY_MASK;
- cp2101_set_config(port, CP2101_BITS, &bits, 2);
+ cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2);
break;
}
@@ -542,7 +561,7 @@ static void cp2101_get_termios_port(struct usb_serial_port *port,
dbg("%s - stop bits = 1.5 (not supported, using 1 stop bit)",
__func__);
bits &= ~BITS_STOP_MASK;
- cp2101_set_config(port, CP2101_BITS, &bits, 2);
+ cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2);
break;
case BITS_STOP_2:
dbg("%s - stop bits = 2", __func__);
@@ -552,11 +571,11 @@ static void cp2101_get_termios_port(struct usb_serial_port *port,
dbg("%s - Unknown number of stop bits, using 1 stop bit",
__func__);
bits &= ~BITS_STOP_MASK;
- cp2101_set_config(port, CP2101_BITS, &bits, 2);
+ cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2);
break;
}
- cp2101_get_config(port, CP2101_MODEMCTL, modem_ctl, 16);
+ cp210x_get_config(port, CP210X_GET_FLOW, modem_ctl, 16);
if (modem_ctl[0] & 0x0008) {
dbg("%s - flow control = CRTSCTS", __func__);
cflag |= CRTSCTS;
@@ -568,7 +587,7 @@ static void cp2101_get_termios_port(struct usb_serial_port *port,
*cflagp = cflag;
}
-static void cp2101_set_termios(struct tty_struct *tty,
+static void cp210x_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
unsigned int cflag, old_cflag;
@@ -583,13 +602,13 @@ static void cp2101_set_termios(struct tty_struct *tty,
tty->termios->c_cflag &= ~CMSPAR;
cflag = tty->termios->c_cflag;
old_cflag = old_termios->c_cflag;
- baud = cp2101_quantise_baudrate(tty_get_baud_rate(tty));
+ baud = cp210x_quantise_baudrate(tty_get_baud_rate(tty));
/* If the baud rate is to be updated*/
if (baud != tty_termios_baud_rate(old_termios) && baud != 0) {
dbg("%s - Setting baud rate to %d baud", __func__,
baud);
- if (cp2101_set_config_single(port, CP2101_BAUDRATE,
+ if (cp210x_set_config_single(port, CP210X_SET_BAUDDIV,
((BAUD_RATE_GEN_FREQ + baud/2) / baud))) {
dbg("Baud rate requested not supported by device\n");
baud = tty_termios_baud_rate(old_termios);
@@ -600,7 +619,7 @@ static void cp2101_set_termios(struct tty_struct *tty,
/* If the number of data bits is to be updated */
if ((cflag & CSIZE) != (old_cflag & CSIZE)) {
- cp2101_get_config(port, CP2101_BITS, &bits, 2);
+ cp210x_get_config(port, CP210X_GET_LINE_CTL, &bits, 2);
bits &= ~BITS_DATA_MASK;
switch (cflag & CSIZE) {
case CS5:
@@ -624,19 +643,19 @@ static void cp2101_set_termios(struct tty_struct *tty,
dbg("%s - data bits = 9", __func__);
break;*/
default:
- dbg("cp2101 driver does not "
+ dbg("cp210x driver does not "
"support the number of bits requested,"
" using 8 bit mode\n");
bits |= BITS_DATA_8;
break;
}
- if (cp2101_set_config(port, CP2101_BITS, &bits, 2))
+ if (cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2))
dbg("Number of data bits requested "
"not supported by device\n");
}
if ((cflag & (PARENB|PARODD)) != (old_cflag & (PARENB|PARODD))) {
- cp2101_get_config(port, CP2101_BITS, &bits, 2);
+ cp210x_get_config(port, CP210X_GET_LINE_CTL, &bits, 2);
bits &= ~BITS_PARITY_MASK;
if (cflag & PARENB) {
if (cflag & PARODD) {
@@ -647,13 +666,13 @@ static void cp2101_set_termios(struct tty_struct *tty,
dbg("%s - parity = EVEN", __func__);
}
}
- if (cp2101_set_config(port, CP2101_BITS, &bits, 2))
+ if (cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2))
dbg("Parity mode not supported "
"by device\n");
}
if ((cflag & CSTOPB) != (old_cflag & CSTOPB)) {
- cp2101_get_config(port, CP2101_BITS, &bits, 2);
+ cp210x_get_config(port, CP210X_GET_LINE_CTL, &bits, 2);
bits &= ~BITS_STOP_MASK;
if (cflag & CSTOPB) {
bits |= BITS_STOP_2;
@@ -662,13 +681,13 @@ static void cp2101_set_termios(struct tty_struct *tty,
bits |= BITS_STOP_1;
dbg("%s - stop bits = 1", __func__);
}
- if (cp2101_set_config(port, CP2101_BITS, &bits, 2))
+ if (cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2))
dbg("Number of stop bits requested "
"not supported by device\n");
}
if ((cflag & CRTSCTS) != (old_cflag & CRTSCTS)) {
- cp2101_get_config(port, CP2101_MODEMCTL, modem_ctl, 16);
+ cp210x_get_config(port, CP210X_GET_FLOW, modem_ctl, 16);
dbg("%s - read modem controls = 0x%.4x 0x%.4x 0x%.4x 0x%.4x",
__func__, modem_ctl[0], modem_ctl[1],
modem_ctl[2], modem_ctl[3]);
@@ -688,19 +707,19 @@ static void cp2101_set_termios(struct tty_struct *tty,
dbg("%s - write modem controls = 0x%.4x 0x%.4x 0x%.4x 0x%.4x",
__func__, modem_ctl[0], modem_ctl[1],
modem_ctl[2], modem_ctl[3]);
- cp2101_set_config(port, CP2101_MODEMCTL, modem_ctl, 16);
+ cp210x_set_config(port, CP210X_SET_FLOW, modem_ctl, 16);
}
}
-static int cp2101_tiocmset (struct tty_struct *tty, struct file *file,
+static int cp210x_tiocmset (struct tty_struct *tty, struct file *file,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
- return cp2101_tiocmset_port(port, file, set, clear);
+ return cp210x_tiocmset_port(port, file, set, clear);
}
-static int cp2101_tiocmset_port(struct usb_serial_port *port, struct file *file,
+static int cp210x_tiocmset_port(struct usb_serial_port *port, struct file *file,
unsigned int set, unsigned int clear)
{
unsigned int control = 0;
@@ -726,10 +745,10 @@ static int cp2101_tiocmset_port(struct usb_serial_port *port, struct file *file,
dbg("%s - control = 0x%.4x", __func__, control);
- return cp2101_set_config(port, CP2101_CONTROL, &control, 2);
+ return cp210x_set_config(port, CP210X_SET_MHS, &control, 2);
}
-static int cp2101_tiocmget (struct tty_struct *tty, struct file *file)
+static int cp210x_tiocmget (struct tty_struct *tty, struct file *file)
{
struct usb_serial_port *port = tty->driver_data;
unsigned int control;
@@ -737,7 +756,7 @@ static int cp2101_tiocmget (struct tty_struct *tty, struct file *file)
dbg("%s - port %d", __func__, port->number);
- cp2101_get_config(port, CP2101_CONTROL, &control, 1);
+ cp210x_get_config(port, CP210X_GET_MDMSTS, &control, 1);
result = ((control & CONTROL_DTR) ? TIOCM_DTR : 0)
|((control & CONTROL_RTS) ? TIOCM_RTS : 0)
@@ -751,7 +770,7 @@ static int cp2101_tiocmget (struct tty_struct *tty, struct file *file)
return result;
}
-static void cp2101_break_ctl (struct tty_struct *tty, int break_state)
+static void cp210x_break_ctl (struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
unsigned int state;
@@ -763,17 +782,17 @@ static void cp2101_break_ctl (struct tty_struct *tty, int break_state)
state = BREAK_ON;
dbg("%s - turning break %s", __func__,
state == BREAK_OFF ? "off" : "on");
- cp2101_set_config(port, CP2101_BREAK, &state, 2);
+ cp210x_set_config(port, CP210X_SET_BREAK, &state, 2);
}
-static int cp2101_startup(struct usb_serial *serial)
+static int cp210x_startup(struct usb_serial *serial)
{
- /* CP2101 buffers behave strangely unless device is reset */
+ /* cp210x buffers behave strangely unless device is reset */
usb_reset_device(serial->dev);
return 0;
}
-static void cp2101_shutdown(struct usb_serial *serial)
+static void cp210x_disconnect(struct usb_serial *serial)
{
int i;
@@ -781,21 +800,21 @@ static void cp2101_shutdown(struct usb_serial *serial)
/* Stop reads and writes on all ports */
for (i = 0; i < serial->num_ports; ++i)
- cp2101_cleanup(serial->port[i]);
+ cp210x_cleanup(serial->port[i]);
}
-static int __init cp2101_init(void)
+static int __init cp210x_init(void)
{
int retval;
- retval = usb_serial_register(&cp2101_device);
+ retval = usb_serial_register(&cp210x_device);
if (retval)
return retval; /* Failed to register */
- retval = usb_register(&cp2101_driver);
+ retval = usb_register(&cp210x_driver);
if (retval) {
/* Failed to register */
- usb_serial_deregister(&cp2101_device);
+ usb_serial_deregister(&cp210x_device);
return retval;
}
@@ -805,14 +824,14 @@ static int __init cp2101_init(void)
return 0;
}
-static void __exit cp2101_exit(void)
+static void __exit cp210x_exit(void)
{
- usb_deregister(&cp2101_driver);
- usb_serial_deregister(&cp2101_device);
+ usb_deregister(&cp210x_driver);
+ usb_serial_deregister(&cp210x_device);
}
-module_init(cp2101_init);
-module_exit(cp2101_exit);
+module_init(cp210x_init);
+module_exit(cp210x_exit);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index dd501bb63ed..336523fd736 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -58,11 +58,11 @@ static int debug;
/* Function prototypes */
static int cyberjack_startup(struct usb_serial *serial);
-static void cyberjack_shutdown(struct usb_serial *serial);
+static void cyberjack_disconnect(struct usb_serial *serial);
+static void cyberjack_release(struct usb_serial *serial);
static int cyberjack_open(struct tty_struct *tty,
struct usb_serial_port *port, struct file *filp);
-static void cyberjack_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp);
+static void cyberjack_close(struct usb_serial_port *port);
static int cyberjack_write(struct tty_struct *tty,
struct usb_serial_port *port, const unsigned char *buf, int count);
static int cyberjack_write_room(struct tty_struct *tty);
@@ -95,7 +95,8 @@ static struct usb_serial_driver cyberjack_device = {
.id_table = id_table,
.num_ports = 1,
.attach = cyberjack_startup,
- .shutdown = cyberjack_shutdown,
+ .disconnect = cyberjack_disconnect,
+ .release = cyberjack_release,
.open = cyberjack_open,
.close = cyberjack_close,
.write = cyberjack_write,
@@ -149,17 +150,25 @@ static int cyberjack_startup(struct usb_serial *serial)
return 0;
}
-static void cyberjack_shutdown(struct usb_serial *serial)
+static void cyberjack_disconnect(struct usb_serial *serial)
{
int i;
dbg("%s", __func__);
- for (i = 0; i < serial->num_ports; ++i) {
+ for (i = 0; i < serial->num_ports; ++i)
usb_kill_urb(serial->port[i]->interrupt_in_urb);
+}
+
+static void cyberjack_release(struct usb_serial *serial)
+{
+ int i;
+
+ dbg("%s", __func__);
+
+ for (i = 0; i < serial->num_ports; ++i) {
/* My special items, the standard routines free my urbs */
kfree(usb_get_serial_port_data(serial->port[i]));
- usb_set_serial_port_data(serial->port[i], NULL);
}
}
@@ -185,8 +194,7 @@ static int cyberjack_open(struct tty_struct *tty,
return result;
}
-static void cyberjack_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void cyberjack_close(struct usb_serial_port *port)
{
dbg("%s - port %d", __func__, port->number);
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index e568710b263..9734085fd2f 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -171,11 +171,11 @@ struct cypress_buf {
static int cypress_earthmate_startup(struct usb_serial *serial);
static int cypress_hidcom_startup(struct usb_serial *serial);
static int cypress_ca42v2_startup(struct usb_serial *serial);
-static void cypress_shutdown(struct usb_serial *serial);
+static void cypress_release(struct usb_serial *serial);
static int cypress_open(struct tty_struct *tty,
struct usb_serial_port *port, struct file *filp);
-static void cypress_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp);
+static void cypress_close(struct usb_serial_port *port);
+static void cypress_dtr_rts(struct usb_serial_port *port, int on);
static int cypress_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
static void cypress_send(struct usb_serial_port *port);
@@ -215,9 +215,10 @@ static struct usb_serial_driver cypress_earthmate_device = {
.id_table = id_table_earthmate,
.num_ports = 1,
.attach = cypress_earthmate_startup,
- .shutdown = cypress_shutdown,
+ .release = cypress_release,
.open = cypress_open,
.close = cypress_close,
+ .dtr_rts = cypress_dtr_rts,
.write = cypress_write,
.write_room = cypress_write_room,
.ioctl = cypress_ioctl,
@@ -241,9 +242,10 @@ static struct usb_serial_driver cypress_hidcom_device = {
.id_table = id_table_cyphidcomrs232,
.num_ports = 1,
.attach = cypress_hidcom_startup,
- .shutdown = cypress_shutdown,
+ .release = cypress_release,
.open = cypress_open,
.close = cypress_close,
+ .dtr_rts = cypress_dtr_rts,
.write = cypress_write,
.write_room = cypress_write_room,
.ioctl = cypress_ioctl,
@@ -267,9 +269,10 @@ static struct usb_serial_driver cypress_ca42v2_device = {
.id_table = id_table_nokiaca42v2,
.num_ports = 1,
.attach = cypress_ca42v2_startup,
- .shutdown = cypress_shutdown,
+ .release = cypress_release,
.open = cypress_open,
.close = cypress_close,
+ .dtr_rts = cypress_dtr_rts,
.write = cypress_write,
.write_room = cypress_write_room,
.ioctl = cypress_ioctl,
@@ -613,7 +616,7 @@ static int cypress_ca42v2_startup(struct usb_serial *serial)
} /* cypress_ca42v2_startup */
-static void cypress_shutdown(struct usb_serial *serial)
+static void cypress_release(struct usb_serial *serial)
{
struct cypress_private *priv;
@@ -626,7 +629,6 @@ static void cypress_shutdown(struct usb_serial *serial)
if (priv) {
cypress_buf_free(priv->buf);
kfree(priv);
- usb_set_serial_port_data(serial->port[0], NULL);
}
}
@@ -656,11 +658,7 @@ static int cypress_open(struct tty_struct *tty,
priv->rx_flags = 0;
spin_unlock_irqrestore(&priv->lock, flags);
- /* raise both lines and set termios */
- spin_lock_irqsave(&priv->lock, flags);
- priv->line_control = CONTROL_DTR | CONTROL_RTS;
- priv->cmd_ctrl = 1;
- spin_unlock_irqrestore(&priv->lock, flags);
+ /* Set termios */
result = cypress_write(tty, port, NULL, 0);
if (result) {
@@ -694,76 +692,42 @@ static int cypress_open(struct tty_struct *tty,
__func__, result);
cypress_set_dead(port);
}
-
+ port->port.drain_delay = 256;
return result;
} /* cypress_open */
+static void cypress_dtr_rts(struct usb_serial_port *port, int on)
+{
+ struct cypress_private *priv = usb_get_serial_port_data(port);
+ /* drop dtr and rts */
+ priv = usb_get_serial_port_data(port);
+ spin_lock_irq(&priv->lock);
+ if (on == 0)
+ priv->line_control = 0;
+ else
+ priv->line_control = CONTROL_DTR | CONTROL_RTS;
+ priv->cmd_ctrl = 1;
+ spin_unlock_irq(&priv->lock);
+ cypress_write(NULL, port, NULL, 0);
+}
-static void cypress_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void cypress_close(struct usb_serial_port *port)
{
struct cypress_private *priv = usb_get_serial_port_data(port);
- unsigned int c_cflag;
- int bps;
- long timeout;
- wait_queue_t wait;
dbg("%s - port %d", __func__, port->number);
- /* wait for data to drain from buffer */
- spin_lock_irq(&priv->lock);
- timeout = CYPRESS_CLOSING_WAIT;
- init_waitqueue_entry(&wait, current);
- add_wait_queue(&tty->write_wait, &wait);
- for (;;) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (cypress_buf_data_avail(priv->buf) == 0
- || timeout == 0 || signal_pending(current)
- /* without mutex, allowed due to harmless failure mode */
- || port->serial->disconnected)
- break;
- spin_unlock_irq(&priv->lock);
- timeout = schedule_timeout(timeout);
- spin_lock_irq(&priv->lock);
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&tty->write_wait, &wait);
- /* clear out any remaining data in the buffer */
- cypress_buf_clear(priv->buf);
- spin_unlock_irq(&priv->lock);
-
/* writing is potentially harmful, lock must be taken */
mutex_lock(&port->serial->disc_mutex);
if (port->serial->disconnected) {
mutex_unlock(&port->serial->disc_mutex);
return;
}
- /* wait for characters to drain from device */
- if (tty) {
- bps = tty_get_baud_rate(tty);
- if (bps > 1200)
- timeout = max((HZ * 2560) / bps, HZ / 10);
- else
- timeout = 2 * HZ;
- schedule_timeout_interruptible(timeout);
- }
-
+ cypress_buf_clear(priv->buf);
dbg("%s - stopping urbs", __func__);
usb_kill_urb(port->interrupt_in_urb);
usb_kill_urb(port->interrupt_out_urb);
- if (tty) {
- c_cflag = tty->termios->c_cflag;
- if (c_cflag & HUPCL) {
- /* drop dtr and rts */
- priv = usb_get_serial_port_data(port);
- spin_lock_irq(&priv->lock);
- priv->line_control = 0;
- priv->cmd_ctrl = 1;
- spin_unlock_irq(&priv->lock);
- cypress_write(tty, port, NULL, 0);
- }
- }
if (stats)
dev_info(&port->dev, "Statistics: %d Bytes In | %d Bytes Out | %d Commands Issued\n",
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 38ba4ea8b6b..f4808091c47 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -422,7 +422,6 @@ struct digi_port {
int dp_throttled;
int dp_throttle_restart;
wait_queue_head_t dp_flush_wait;
- int dp_in_close; /* close in progress */
wait_queue_head_t dp_close_wait; /* wait queue for close */
struct work_struct dp_wakeup_work;
struct usb_serial_port *dp_port;
@@ -456,11 +455,13 @@ static int digi_write_room(struct tty_struct *tty);
static int digi_chars_in_buffer(struct tty_struct *tty);
static int digi_open(struct tty_struct *tty, struct usb_serial_port *port,
struct file *filp);
-static void digi_close(struct tty_struct *tty, struct usb_serial_port *port,
- struct file *filp);
+static void digi_close(struct usb_serial_port *port);
+static int digi_carrier_raised(struct usb_serial_port *port);
+static void digi_dtr_rts(struct usb_serial_port *port, int on);
static int digi_startup_device(struct usb_serial *serial);
static int digi_startup(struct usb_serial *serial);
-static void digi_shutdown(struct usb_serial *serial);
+static void digi_disconnect(struct usb_serial *serial);
+static void digi_release(struct usb_serial *serial);
static void digi_read_bulk_callback(struct urb *urb);
static int digi_read_inb_callback(struct urb *urb);
static int digi_read_oob_callback(struct urb *urb);
@@ -510,6 +511,8 @@ static struct usb_serial_driver digi_acceleport_2_device = {
.num_ports = 3,
.open = digi_open,
.close = digi_close,
+ .dtr_rts = digi_dtr_rts,
+ .carrier_raised = digi_carrier_raised,
.write = digi_write,
.write_room = digi_write_room,
.write_bulk_callback = digi_write_bulk_callback,
@@ -522,7 +525,8 @@ static struct usb_serial_driver digi_acceleport_2_device = {
.tiocmget = digi_tiocmget,
.tiocmset = digi_tiocmset,
.attach = digi_startup,
- .shutdown = digi_shutdown,
+ .disconnect = digi_disconnect,
+ .release = digi_release,
};
static struct usb_serial_driver digi_acceleport_4_device = {
@@ -548,7 +552,8 @@ static struct usb_serial_driver digi_acceleport_4_device = {
.tiocmget = digi_tiocmget,
.tiocmset = digi_tiocmset,
.attach = digi_startup,
- .shutdown = digi_shutdown,
+ .disconnect = digi_disconnect,
+ .release = digi_release,
};
@@ -1328,6 +1333,19 @@ static int digi_chars_in_buffer(struct tty_struct *tty)
}
+static void digi_dtr_rts(struct usb_serial_port *port, int on)
+{
+ /* Adjust DTR and RTS */
+ digi_set_modem_signals(port, on * (TIOCM_DTR|TIOCM_RTS), 1);
+}
+
+static int digi_carrier_raised(struct usb_serial_port *port)
+{
+ struct digi_port *priv = usb_get_serial_port_data(port);
+ if (priv->dp_modem_signals & TIOCM_CD)
+ return 1;
+ return 0;
+}
static int digi_open(struct tty_struct *tty, struct usb_serial_port *port,
struct file *filp)
@@ -1336,7 +1354,6 @@ static int digi_open(struct tty_struct *tty, struct usb_serial_port *port,
unsigned char buf[32];
struct digi_port *priv = usb_get_serial_port_data(port);
struct ktermios not_termios;
- unsigned long flags = 0;
dbg("digi_open: TOP: port=%d, open_count=%d",
priv->dp_port_num, port->port.count);
@@ -1345,26 +1362,6 @@ static int digi_open(struct tty_struct *tty, struct usb_serial_port *port,
if (digi_startup_device(port->serial) != 0)
return -ENXIO;
- spin_lock_irqsave(&priv->dp_port_lock, flags);
-
- /* don't wait on a close in progress for non-blocking opens */
- if (priv->dp_in_close && (filp->f_flags&(O_NDELAY|O_NONBLOCK)) == 0) {
- spin_unlock_irqrestore(&priv->dp_port_lock, flags);
- return -EAGAIN;
- }
-
- /* wait for a close in progress to finish */
- while (priv->dp_in_close) {
- cond_wait_interruptible_timeout_irqrestore(
- &priv->dp_close_wait, DIGI_RETRY_TIMEOUT,
- &priv->dp_port_lock, flags);
- if (signal_pending(current))
- return -EINTR;
- spin_lock_irqsave(&priv->dp_port_lock, flags);
- }
-
- spin_unlock_irqrestore(&priv->dp_port_lock, flags);
-
/* read modem signals automatically whenever they change */
buf[0] = DIGI_CMD_READ_INPUT_SIGNALS;
buf[1] = priv->dp_port_num;
@@ -1387,16 +1384,11 @@ static int digi_open(struct tty_struct *tty, struct usb_serial_port *port,
not_termios.c_iflag = ~tty->termios->c_iflag;
digi_set_termios(tty, port, &not_termios);
}
-
- /* set DTR and RTS */
- digi_set_modem_signals(port, TIOCM_DTR|TIOCM_RTS, 1);
-
return 0;
}
-static void digi_close(struct tty_struct *tty, struct usb_serial_port *port,
- struct file *filp)
+static void digi_close(struct usb_serial_port *port)
{
DEFINE_WAIT(wait);
int ret;
@@ -1411,28 +1403,9 @@ static void digi_close(struct tty_struct *tty, struct usb_serial_port *port,
if (port->serial->disconnected)
goto exit;
- /* do cleanup only after final close on this port */
- spin_lock_irq(&priv->dp_port_lock);
- priv->dp_in_close = 1;
- spin_unlock_irq(&priv->dp_port_lock);
-
- /* tell line discipline to process only XON/XOFF */
- tty->closing = 1;
-
- /* wait for output to drain */
- if ((filp->f_flags&(O_NDELAY|O_NONBLOCK)) == 0)
- tty_wait_until_sent(tty, DIGI_CLOSE_TIMEOUT);
-
- /* flush driver and line discipline buffers */
- tty_driver_flush_buffer(tty);
- tty_ldisc_flush(tty);
-
if (port->serial->dev) {
- /* wait for transmit idle */
- if ((filp->f_flags&(O_NDELAY|O_NONBLOCK)) == 0)
- digi_transmit_idle(port, DIGI_CLOSE_TIMEOUT);
- /* drop DTR and RTS */
- digi_set_modem_signals(port, 0, 0);
+ /* FIXME: Transmit idle belongs in the wait_unti_sent path */
+ digi_transmit_idle(port, DIGI_CLOSE_TIMEOUT);
/* disable input flow control */
buf[0] = DIGI_CMD_SET_INPUT_FLOW_CONTROL;
@@ -1477,11 +1450,9 @@ static void digi_close(struct tty_struct *tty, struct usb_serial_port *port,
/* shutdown any outstanding bulk writes */
usb_kill_urb(port->write_urb);
}
- tty->closing = 0;
exit:
spin_lock_irq(&priv->dp_port_lock);
priv->dp_write_urb_in_use = 0;
- priv->dp_in_close = 0;
wake_up_interruptible(&priv->dp_close_wait);
spin_unlock_irq(&priv->dp_port_lock);
mutex_unlock(&port->serial->disc_mutex);
@@ -1560,7 +1531,6 @@ static int digi_startup(struct usb_serial *serial)
priv->dp_throttled = 0;
priv->dp_throttle_restart = 0;
init_waitqueue_head(&priv->dp_flush_wait);
- priv->dp_in_close = 0;
init_waitqueue_head(&priv->dp_close_wait);
INIT_WORK(&priv->dp_wakeup_work, digi_wakeup_write_lock);
priv->dp_port = serial->port[i];
@@ -1589,16 +1559,23 @@ static int digi_startup(struct usb_serial *serial)
}
-static void digi_shutdown(struct usb_serial *serial)
+static void digi_disconnect(struct usb_serial *serial)
{
int i;
- dbg("digi_shutdown: TOP, in_interrupt()=%ld", in_interrupt());
+ dbg("digi_disconnect: TOP, in_interrupt()=%ld", in_interrupt());
/* stop reads and writes on all ports */
for (i = 0; i < serial->type->num_ports + 1; i++) {
usb_kill_urb(serial->port[i]->read_urb);
usb_kill_urb(serial->port[i]->write_urb);
}
+}
+
+
+static void digi_release(struct usb_serial *serial)
+{
+ int i;
+ dbg("digi_release: TOP, in_interrupt()=%ld", in_interrupt());
/* free the private data structures for all ports */
/* number of regular ports + 1 for the out-of-band port */
diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c
index c709ec474a8..80cb3471adb 100644
--- a/drivers/usb/serial/empeg.c
+++ b/drivers/usb/serial/empeg.c
@@ -81,8 +81,7 @@ static int debug;
/* function prototypes for an empeg-car player */
static int empeg_open(struct tty_struct *tty, struct usb_serial_port *port,
struct file *filp);
-static void empeg_close(struct tty_struct *tty, struct usb_serial_port *port,
- struct file *filp);
+static void empeg_close(struct usb_serial_port *port);
static int empeg_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf,
int count);
@@ -91,7 +90,6 @@ static int empeg_chars_in_buffer(struct tty_struct *tty);
static void empeg_throttle(struct tty_struct *tty);
static void empeg_unthrottle(struct tty_struct *tty);
static int empeg_startup(struct usb_serial *serial);
-static void empeg_shutdown(struct usb_serial *serial);
static void empeg_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios);
static void empeg_write_bulk_callback(struct urb *urb);
@@ -125,7 +123,6 @@ static struct usb_serial_driver empeg_device = {
.throttle = empeg_throttle,
.unthrottle = empeg_unthrottle,
.attach = empeg_startup,
- .shutdown = empeg_shutdown,
.set_termios = empeg_set_termios,
.write = empeg_write,
.write_room = empeg_write_room,
@@ -181,8 +178,7 @@ static int empeg_open(struct tty_struct *tty, struct usb_serial_port *port,
}
-static void empeg_close(struct tty_struct *tty, struct usb_serial_port *port,
- struct file *filp)
+static void empeg_close(struct usb_serial_port *port)
{
dbg("%s - port %d", __func__, port->number);
@@ -429,12 +425,6 @@ static int empeg_startup(struct usb_serial *serial)
}
-static void empeg_shutdown(struct usb_serial *serial)
-{
- dbg("%s", __func__);
-}
-
-
static void empeg_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 0ab8474b00c..3dc3768ca71 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -47,7 +47,7 @@
/*
* Version Information
*/
-#define DRIVER_VERSION "v1.4.3"
+#define DRIVER_VERSION "v1.5.0"
#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>"
#define DRIVER_DESC "USB FTDI Serial Converters Driver"
@@ -82,17 +82,20 @@ struct ftdi_private {
int rx_processed;
unsigned long rx_bytes;
- __u16 interface; /* FT2232C port interface (0 for FT232/245) */
+ __u16 interface; /* FT2232C, FT2232H or FT4232H port interface
+ (0 for FT232/245) */
speed_t force_baud; /* if non-zero, force the baud rate to
this value */
int force_rtscts; /* if non-zero, force RTS-CTS to always
be enabled */
+ unsigned int latency; /* latency setting in use */
spinlock_t tx_lock; /* spinlock for transmit state */
unsigned long tx_bytes;
unsigned long tx_outstanding_bytes;
unsigned long tx_outstanding_urbs;
+ unsigned short max_packet_size;
};
/* struct ftdi_sio_quirk is used by devices requiring special attention. */
@@ -163,6 +166,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_8U232AM_ALT_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_232RL_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_8U2232C_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_4232H_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_RELAIS_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_OPENDCC_PID) },
@@ -672,6 +676,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(JETI_VID, JETI_SPC1201_PID) },
{ USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) },
{ }, /* Optional parameter entry */
{ } /* Terminating entry */
};
@@ -692,12 +697,13 @@ static const char *ftdi_chip_name[] = {
[FT232BM] = "FT232BM",
[FT2232C] = "FT2232C",
[FT232RL] = "FT232RL",
+ [FT2232H] = "FT2232H",
+ [FT4232H] = "FT4232H"
};
/* Constants for read urb and write urb */
#define BUFSZ 512
-#define PKTSZ 64
/* rx_flags */
#define THROTTLED 0x01
@@ -714,13 +720,12 @@ static const char *ftdi_chip_name[] = {
/* function prototypes for a FTDI serial converter */
static int ftdi_sio_probe(struct usb_serial *serial,
const struct usb_device_id *id);
-static void ftdi_shutdown(struct usb_serial *serial);
static int ftdi_sio_port_probe(struct usb_serial_port *port);
static int ftdi_sio_port_remove(struct usb_serial_port *port);
static int ftdi_open(struct tty_struct *tty,
struct usb_serial_port *port, struct file *filp);
-static void ftdi_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp);
+static void ftdi_close(struct usb_serial_port *port);
+static void ftdi_dtr_rts(struct usb_serial_port *port, int on);
static int ftdi_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
static int ftdi_write_room(struct tty_struct *tty);
@@ -743,6 +748,8 @@ static unsigned short int ftdi_232am_baud_base_to_divisor(int baud, int base);
static unsigned short int ftdi_232am_baud_to_divisor(int baud);
static __u32 ftdi_232bm_baud_base_to_divisor(int baud, int base);
static __u32 ftdi_232bm_baud_to_divisor(int baud);
+static __u32 ftdi_2232h_baud_base_to_divisor(int baud, int base);
+static __u32 ftdi_2232h_baud_to_divisor(int baud);
static struct usb_serial_driver ftdi_sio_device = {
.driver = {
@@ -758,6 +765,7 @@ static struct usb_serial_driver ftdi_sio_device = {
.port_remove = ftdi_sio_port_remove,
.open = ftdi_open,
.close = ftdi_close,
+ .dtr_rts = ftdi_dtr_rts,
.throttle = ftdi_throttle,
.unthrottle = ftdi_unthrottle,
.write = ftdi_write,
@@ -770,7 +778,6 @@ static struct usb_serial_driver ftdi_sio_device = {
.ioctl = ftdi_ioctl,
.set_termios = ftdi_set_termios,
.break_ctl = ftdi_break_ctl,
- .shutdown = ftdi_shutdown,
};
@@ -836,6 +843,36 @@ static __u32 ftdi_232bm_baud_to_divisor(int baud)
return ftdi_232bm_baud_base_to_divisor(baud, 48000000);
}
+static __u32 ftdi_2232h_baud_base_to_divisor(int baud, int base)
+{
+ static const unsigned char divfrac[8] = { 0, 3, 2, 4, 1, 5, 6, 7 };
+ __u32 divisor;
+ int divisor3;
+
+ /* hi-speed baud rate is 10-bit sampling instead of 16-bit */
+ divisor3 = (base / 10 / baud) * 8;
+
+ divisor = divisor3 >> 3;
+ divisor |= (__u32)divfrac[divisor3 & 0x7] << 14;
+ /* Deal with special cases for highest baud rates. */
+ if (divisor == 1)
+ divisor = 0;
+ else if (divisor == 0x4001)
+ divisor = 1;
+ /*
+ * Set this bit to turn off a divide by 2.5 on baud rate generator
+ * This enables baud rates up to 12Mbaud but cannot reach below 1200
+ * baud with this bit set
+ */
+ divisor |= 0x00020000;
+ return divisor;
+}
+
+static __u32 ftdi_2232h_baud_to_divisor(int baud)
+{
+ return ftdi_2232h_baud_base_to_divisor(baud, 120000000);
+}
+
#define set_mctrl(port, set) update_mctrl((port), (set), 0)
#define clear_mctrl(port, clear) update_mctrl((port), 0, (clear))
@@ -994,6 +1031,19 @@ static __u32 get_ftdi_divisor(struct tty_struct *tty,
baud = 9600;
}
break;
+ case FT2232H: /* FT2232H chip */
+ case FT4232H: /* FT4232H chip */
+ if ((baud <= 12000000) & (baud >= 1200)) {
+ div_value = ftdi_2232h_baud_to_divisor(baud);
+ } else if (baud < 1200) {
+ div_value = ftdi_232bm_baud_to_divisor(baud);
+ } else {
+ dbg("%s - Baud rate too high!", __func__);
+ div_value = ftdi_232bm_baud_to_divisor(9600);
+ div_okay = 0;
+ baud = 9600;
+ }
+ break;
} /* priv->chip_type */
if (div_okay) {
@@ -1037,7 +1087,54 @@ static int change_speed(struct tty_struct *tty, struct usb_serial_port *port)
return rv;
}
+static int write_latency_timer(struct usb_serial_port *port)
+{
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ struct usb_device *udev = port->serial->dev;
+ char buf[1];
+ int rv = 0;
+ int l = priv->latency;
+
+ if (priv->flags & ASYNC_LOW_LATENCY)
+ l = 1;
+
+ dbg("%s: setting latency timer = %i", __func__, l);
+
+ rv = usb_control_msg(udev,
+ usb_sndctrlpipe(udev, 0),
+ FTDI_SIO_SET_LATENCY_TIMER_REQUEST,
+ FTDI_SIO_SET_LATENCY_TIMER_REQUEST_TYPE,
+ l, priv->interface,
+ buf, 0, WDR_TIMEOUT);
+
+ if (rv < 0)
+ dev_err(&port->dev, "Unable to write latency timer: %i\n", rv);
+ return rv;
+}
+
+static int read_latency_timer(struct usb_serial_port *port)
+{
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ struct usb_device *udev = port->serial->dev;
+ unsigned short latency = 0;
+ int rv = 0;
+
+
+ dbg("%s", __func__);
+
+ rv = usb_control_msg(udev,
+ usb_rcvctrlpipe(udev, 0),
+ FTDI_SIO_GET_LATENCY_TIMER_REQUEST,
+ FTDI_SIO_GET_LATENCY_TIMER_REQUEST_TYPE,
+ 0, priv->interface,
+ (char *) &latency, 1, WDR_TIMEOUT);
+ if (rv < 0) {
+ dev_err(&port->dev, "Unable to read latency timer: %i\n", rv);
+ return -EIO;
+ }
+ return latency;
+}
static int get_serial_info(struct usb_serial_port *port,
struct serial_struct __user *retinfo)
@@ -1097,6 +1194,7 @@ static int set_serial_info(struct tty_struct *tty,
priv->custom_divisor = new_serial.custom_divisor;
tty->low_latency = (priv->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+ write_latency_timer(port);
check_and_exit:
if ((old_priv.flags & ASYNC_SPD_MASK) !=
@@ -1146,14 +1244,29 @@ static void ftdi_determine_type(struct usb_serial_port *port)
if (interfaces > 1) {
int inter;
- /* Multiple interfaces. Assume FT2232C. */
- priv->chip_type = FT2232C;
+ /* Multiple interfaces.*/
+ if (version == 0x0800) {
+ priv->chip_type = FT4232H;
+ /* Hi-speed - baud clock runs at 120MHz */
+ priv->baud_base = 120000000 / 2;
+ } else if (version == 0x0700) {
+ priv->chip_type = FT2232H;
+ /* Hi-speed - baud clock runs at 120MHz */
+ priv->baud_base = 120000000 / 2;
+ } else
+ priv->chip_type = FT2232C;
+
/* Determine interface code. */
inter = serial->interface->altsetting->desc.bInterfaceNumber;
- if (inter == 0)
- priv->interface = PIT_SIOA;
- else
- priv->interface = PIT_SIOB;
+ if (inter == 0) {
+ priv->interface = INTERFACE_A;
+ } else if (inter == 1) {
+ priv->interface = INTERFACE_B;
+ } else if (inter == 2) {
+ priv->interface = INTERFACE_C;
+ } else if (inter == 3) {
+ priv->interface = INTERFACE_D;
+ }
/* BM-type devices have a bug where bcdDevice gets set
* to 0x200 when iSerialNumber is 0. */
if (version < 0x500) {
@@ -1181,6 +1294,45 @@ static void ftdi_determine_type(struct usb_serial_port *port)
}
+/* Determine the maximum packet size for the device. This depends on the chip
+ * type and the USB host capabilities. The value should be obtained from the
+ * device descriptor as the chip will use the appropriate values for the host.*/
+static void ftdi_set_max_packet_size(struct usb_serial_port *port)
+{
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ struct usb_serial *serial = port->serial;
+ struct usb_device *udev = serial->dev;
+
+ struct usb_interface *interface = serial->interface;
+ struct usb_endpoint_descriptor *ep_desc = &interface->cur_altsetting->endpoint[1].desc;
+
+ unsigned num_endpoints;
+ int i = 0;
+
+ num_endpoints = interface->cur_altsetting->desc.bNumEndpoints;
+ dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints);
+
+ /* NOTE: some customers have programmed FT232R/FT245R devices
+ * with an endpoint size of 0 - not good. In this case, we
+ * want to override the endpoint descriptor setting and use a
+ * value of 64 for wMaxPacketSize */
+ for (i = 0; i < num_endpoints; i++) {
+ dev_info(&udev->dev, "Endpoint %d MaxPacketSize %d\n", i+1,
+ interface->cur_altsetting->endpoint[i].desc.wMaxPacketSize);
+ ep_desc = &interface->cur_altsetting->endpoint[i].desc;
+ if (ep_desc->wMaxPacketSize == 0) {
+ ep_desc->wMaxPacketSize = cpu_to_le16(0x40);
+ dev_info(&udev->dev, "Overriding wMaxPacketSize on endpoint %d\n", i);
+ }
+ }
+
+ /* set max packet size based on descriptor */
+ priv->max_packet_size = ep_desc->wMaxPacketSize;
+
+ dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size);
+}
+
+
/*
* ***************************************************************************
* Sysfs Attribute
@@ -1192,27 +1344,13 @@ static ssize_t show_latency_timer(struct device *dev,
{
struct usb_serial_port *port = to_usb_serial_port(dev);
struct ftdi_private *priv = usb_get_serial_port_data(port);
- struct usb_device *udev = port->serial->dev;
- unsigned short latency = 0;
- int rv = 0;
-
-
- dbg("%s", __func__);
-
- rv = usb_control_msg(udev,
- usb_rcvctrlpipe(udev, 0),
- FTDI_SIO_GET_LATENCY_TIMER_REQUEST,
- FTDI_SIO_GET_LATENCY_TIMER_REQUEST_TYPE,
- 0, priv->interface,
- (char *) &latency, 1, WDR_TIMEOUT);
-
- if (rv < 0) {
- dev_err(dev, "Unable to read latency timer: %i\n", rv);
- return -EIO;
- }
- return sprintf(buf, "%i\n", latency);
+ if (priv->flags & ASYNC_LOW_LATENCY)
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "%i\n", priv->latency);
}
+
/* Write a new value of the latency timer, in units of milliseconds. */
static ssize_t store_latency_timer(struct device *dev,
struct device_attribute *attr, const char *valbuf,
@@ -1220,25 +1358,13 @@ static ssize_t store_latency_timer(struct device *dev,
{
struct usb_serial_port *port = to_usb_serial_port(dev);
struct ftdi_private *priv = usb_get_serial_port_data(port);
- struct usb_device *udev = port->serial->dev;
- char buf[1];
int v = simple_strtoul(valbuf, NULL, 10);
int rv = 0;
- dbg("%s: setting latency timer = %i", __func__, v);
-
- rv = usb_control_msg(udev,
- usb_sndctrlpipe(udev, 0),
- FTDI_SIO_SET_LATENCY_TIMER_REQUEST,
- FTDI_SIO_SET_LATENCY_TIMER_REQUEST_TYPE,
- v, priv->interface,
- buf, 0, WDR_TIMEOUT);
-
- if (rv < 0) {
- dev_err(dev, "Unable to write latency timer: %i\n", rv);
+ priv->latency = v;
+ rv = write_latency_timer(port);
+ if (rv < 0)
return -EIO;
- }
-
return count;
}
@@ -1290,7 +1416,9 @@ static int create_sysfs_attrs(struct usb_serial_port *port)
if ((!retval) &&
(priv->chip_type == FT232BM ||
priv->chip_type == FT2232C ||
- priv->chip_type == FT232RL)) {
+ priv->chip_type == FT232RL ||
+ priv->chip_type == FT2232H ||
+ priv->chip_type == FT4232H)) {
retval = device_create_file(&port->dev,
&dev_attr_latency_timer);
}
@@ -1309,7 +1437,9 @@ static void remove_sysfs_attrs(struct usb_serial_port *port)
device_remove_file(&port->dev, &dev_attr_event_char);
if (priv->chip_type == FT232BM ||
priv->chip_type == FT2232C ||
- priv->chip_type == FT232RL) {
+ priv->chip_type == FT232RL ||
+ priv->chip_type == FT2232H ||
+ priv->chip_type == FT4232H) {
device_remove_file(&port->dev, &dev_attr_latency_timer);
}
}
@@ -1392,6 +1522,8 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
usb_set_serial_port_data(port, priv);
ftdi_determine_type(port);
+ ftdi_set_max_packet_size(port);
+ read_latency_timer(port);
create_sysfs_attrs(port);
return 0;
}
@@ -1460,18 +1592,6 @@ static int ftdi_mtxorb_hack_setup(struct usb_serial *serial)
return 0;
}
-/* ftdi_shutdown is called from usbserial:usb_serial_disconnect
- * it is called when the usb device is disconnected
- *
- * usbserial:usb_serial_disconnect
- * calls __serial_close for each open of the port
- * shutdown is called then (ie ftdi_shutdown)
- */
-static void ftdi_shutdown(struct usb_serial *serial)
-{
- dbg("%s", __func__);
-}
-
static void ftdi_sio_priv_release(struct kref *k)
{
struct ftdi_private *priv = container_of(k, struct ftdi_private, kref);
@@ -1487,14 +1607,7 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port)
remove_sysfs_attrs(port);
- /* all open ports are closed at this point
- * (by usbserial.c:__serial_close, which calls ftdi_close)
- */
-
- if (priv) {
- usb_set_serial_port_data(port, NULL);
- kref_put(&priv->kref, ftdi_sio_priv_release);
- }
+ kref_put(&priv->kref, ftdi_sio_priv_release);
return 0;
}
@@ -1521,6 +1634,8 @@ static int ftdi_open(struct tty_struct *tty,
if (tty)
tty->low_latency = (priv->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+ write_latency_timer(port);
+
/* No error checking for this (will get errors later anyway) */
/* See ftdi_sio.h for description of what is reset */
usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
@@ -1536,11 +1651,6 @@ static int ftdi_open(struct tty_struct *tty,
if (tty)
ftdi_set_termios(tty, port, tty->termios);
- /* FIXME: Flow control might be enabled, so it should be checked -
- we have no control of defaults! */
- /* Turn on RTS and DTR since we are not flow controlling by default */
- set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
-
/* Not throttled */
spin_lock_irqsave(&priv->rx_lock, flags);
priv->rx_flags &= ~(THROTTLED | ACTUALLY_THROTTLED);
@@ -1565,6 +1675,30 @@ static int ftdi_open(struct tty_struct *tty,
} /* ftdi_open */
+static void ftdi_dtr_rts(struct usb_serial_port *port, int on)
+{
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ char buf[1];
+
+ mutex_lock(&port->serial->disc_mutex);
+ if (!port->serial->disconnected) {
+ /* Disable flow control */
+ if (!on && usb_control_msg(port->serial->dev,
+ usb_sndctrlpipe(port->serial->dev, 0),
+ FTDI_SIO_SET_FLOW_CTRL_REQUEST,
+ FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
+ 0, priv->interface, buf, 0,
+ WDR_TIMEOUT) < 0) {
+ dev_err(&port->dev, "error from flowcontrol urb\n");
+ }
+ /* drop RTS and DTR */
+ if (on)
+ set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
+ else
+ clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
+ }
+ mutex_unlock(&port->serial->disc_mutex);
+}
/*
* usbserial:__serial_close only calls ftdi_close if the point is open
@@ -1574,31 +1708,12 @@ static int ftdi_open(struct tty_struct *tty,
*
*/
-static void ftdi_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void ftdi_close(struct usb_serial_port *port)
{ /* ftdi_close */
- unsigned int c_cflag = tty->termios->c_cflag;
struct ftdi_private *priv = usb_get_serial_port_data(port);
- char buf[1];
dbg("%s", __func__);
- mutex_lock(&port->serial->disc_mutex);
- if (c_cflag & HUPCL && !port->serial->disconnected) {
- /* Disable flow control */
- if (usb_control_msg(port->serial->dev,
- usb_sndctrlpipe(port->serial->dev, 0),
- FTDI_SIO_SET_FLOW_CTRL_REQUEST,
- FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
- 0, priv->interface, buf, 0,
- WDR_TIMEOUT) < 0) {
- dev_err(&port->dev, "error from flowcontrol urb\n");
- }
-
- /* drop RTS and DTR */
- clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
- } /* Note change no line if hupcl is off */
- mutex_unlock(&port->serial->disc_mutex);
/* cancel any scheduled reading */
cancel_delayed_work_sync(&priv->rx_work);
@@ -1651,8 +1766,8 @@ static int ftdi_write(struct tty_struct *tty, struct usb_serial_port *port,
if (data_offset > 0) {
/* Original sio needs control bytes too... */
transfer_size += (data_offset *
- ((count + (PKTSZ - 1 - data_offset)) /
- (PKTSZ - data_offset)));
+ ((count + (priv->max_packet_size - 1 - data_offset)) /
+ (priv->max_packet_size - data_offset)));
}
buffer = kmalloc(transfer_size, GFP_ATOMIC);
@@ -1674,7 +1789,7 @@ static int ftdi_write(struct tty_struct *tty, struct usb_serial_port *port,
if (data_offset > 0) {
/* Original sio requires control byte at start of
each packet. */
- int user_pktsz = PKTSZ - data_offset;
+ int user_pktsz = priv->max_packet_size - data_offset;
int todo = count;
unsigned char *first_byte = buffer;
const unsigned char *current_position = buf;
@@ -1755,11 +1870,6 @@ static void ftdi_write_bulk_callback(struct urb *urb)
dbg("%s - port %d", __func__, port->number);
- if (status) {
- dbg("nonzero write bulk status received: %d", status);
- return;
- }
-
priv = usb_get_serial_port_data(port);
if (!priv) {
dbg("%s - bad port private data pointer - exiting", __func__);
@@ -1770,13 +1880,18 @@ static void ftdi_write_bulk_callback(struct urb *urb)
data_offset = priv->write_offset;
if (data_offset > 0) {
/* Subtract the control bytes */
- countback -= (data_offset * DIV_ROUND_UP(countback, PKTSZ));
+ countback -= (data_offset * DIV_ROUND_UP(countback, priv->max_packet_size));
}
spin_lock_irqsave(&priv->tx_lock, flags);
--priv->tx_outstanding_urbs;
priv->tx_outstanding_bytes -= countback;
spin_unlock_irqrestore(&priv->tx_lock, flags);
+ if (status) {
+ dbg("nonzero write bulk status received: %d", status);
+ return;
+ }
+
usb_serial_port_softint(port);
} /* ftdi_write_bulk_callback */
@@ -1872,7 +1987,7 @@ static void ftdi_read_bulk_callback(struct urb *urb)
/* count data bytes, but not status bytes */
countread = urb->actual_length;
- countread -= 2 * DIV_ROUND_UP(countread, PKTSZ);
+ countread -= 2 * DIV_ROUND_UP(countread, priv->max_packet_size);
spin_lock_irqsave(&priv->rx_lock, flags);
priv->rx_bytes += countread;
spin_unlock_irqrestore(&priv->rx_lock, flags);
@@ -1945,7 +2060,7 @@ static void ftdi_process_read(struct work_struct *work)
need_flip = 0;
for (packet_offset = priv->rx_processed;
- packet_offset < urb->actual_length; packet_offset += PKTSZ) {
+ packet_offset < urb->actual_length; packet_offset += priv->max_packet_size) {
int length;
/* Compare new line status to the old one, signal if different/
@@ -1960,7 +2075,7 @@ static void ftdi_process_read(struct work_struct *work)
priv->prev_status = new_status;
}
- length = min_t(u32, PKTSZ, urb->actual_length-packet_offset)-2;
+ length = min_t(u32, priv->max_packet_size, urb->actual_length-packet_offset)-2;
if (length < 0) {
dev_err(&port->dev, "%s - bad packet length: %d\n",
__func__, length+2);
@@ -1991,6 +2106,7 @@ static void ftdi_process_read(struct work_struct *work)
if (data[packet_offset+1] & FTDI_RS_BI) {
error_flag = TTY_BREAK;
dbg("BREAK received");
+ usb_serial_handle_break(port);
}
if (data[packet_offset+1] & FTDI_RS_PE) {
error_flag = TTY_PARITY;
@@ -2005,8 +2121,11 @@ static void ftdi_process_read(struct work_struct *work)
/* Note that the error flag is duplicated for
every character received since we don't know
which character it applied to */
- tty_insert_flip_char(tty,
- data[packet_offset + i], error_flag);
+ if (!usb_serial_handle_sysrq_char(port,
+ data[packet_offset + i]))
+ tty_insert_flip_char(tty,
+ data[packet_offset + i],
+ error_flag);
}
need_flip = 1;
}
@@ -2312,6 +2431,8 @@ static int ftdi_tiocmget(struct tty_struct *tty, struct file *file)
case FT232BM:
case FT2232C:
case FT232RL:
+ case FT2232H:
+ case FT4232H:
/* the 8U232AM returns a two byte value (the sio is a 1 byte
value) - in the same format as the data returned from the in
point */
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index 12330fa1c09..f1d440a728a 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -10,7 +10,7 @@
* The device is based on the FTDI FT8U100AX chip. It has a DB25 on one side,
* USB on the other.
*
- * Thanx to FTDI (http://www.ftdi.co.uk) for so kindly providing details
+ * Thanx to FTDI (http://www.ftdichip.com) for so kindly providing details
* of the protocol required to talk to the device and ongoing assistence
* during development.
*
@@ -28,11 +28,15 @@
#define FTDI_8U232AM_ALT_PID 0x6006 /* FTDI's alternate PID for above */
#define FTDI_8U2232C_PID 0x6010 /* Dual channel device */
#define FTDI_232RL_PID 0xFBFA /* Product ID for FT232RL */
+#define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */
#define FTDI_RELAIS_PID 0xFA10 /* Relais device from Rudolf Gugler */
#define FTDI_NF_RIC_VID 0x0DCD /* Vendor Id */
#define FTDI_NF_RIC_PID 0x0001 /* Product Id */
#define FTDI_USBX_707_PID 0xF857 /* ADSTech IR Blaster USBX-707 */
+/* Larsen and Brusgaard AltiTrack/USBtrack */
+#define LARSENBRUSGAARD_VID 0x0FD8
+#define LB_ALTITRACK_PID 0x0001
/* www.canusb.com Lawicel CANUSB device */
#define FTDI_CANUSB_PID 0xFFA8 /* Product Id */
@@ -873,6 +877,11 @@
#define FTDI_SIO_SET_LATENCY_TIMER 9 /* Set the latency timer */
#define FTDI_SIO_GET_LATENCY_TIMER 10 /* Get the latency timer */
+/* Interface indicies for FT2232, FT2232H and FT4232H devices*/
+#define INTERFACE_A 1
+#define INTERFACE_B 2
+#define INTERFACE_C 3
+#define INTERFACE_D 4
/*
* FIC / OpenMoko, Inc. http://wiki.openmoko.org/wiki/Neo1973_Debug_Board_v3
@@ -1036,6 +1045,8 @@ typedef enum {
FT232BM = 3,
FT2232C = 4,
FT232RL = 5,
+ FT2232H = 6,
+ FT4232H = 7
} ftdi_chip_type_t;
typedef enum {
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 586d30ff450..8839f1c70b7 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -1,7 +1,7 @@
/*
* Garmin GPS driver
*
- * Copyright (C) 2006,2007 Hermann Kneissel herkne@users.sourceforge.net
+ * Copyright (C) 2006-2009 Hermann Kneissel herkne@users.sourceforge.net
*
* The latest version of the driver can be found at
* http://sourceforge.net/projects/garmin-gps/
@@ -51,7 +51,7 @@ static int debug;
*/
#define VERSION_MAJOR 0
-#define VERSION_MINOR 31
+#define VERSION_MINOR 33
#define _STR(s) #s
#define _DRIVER_VERSION(a, b) "v" _STR(a) "." _STR(b)
@@ -129,7 +129,6 @@ struct garmin_data {
__u8 state;
__u16 flags;
__u8 mode;
- __u8 ignorePkts;
__u8 count;
__u8 pkt_id;
__u32 serial_num;
@@ -141,8 +140,6 @@ struct garmin_data {
__u8 inbuffer [GPS_IN_BUFSIZ]; /* tty -> usb */
__u8 outbuffer[GPS_OUT_BUFSIZ]; /* usb -> tty */
__u8 privpkt[4*6];
- atomic_t req_count;
- atomic_t resp_count;
spinlock_t lock;
struct list_head pktlist;
};
@@ -170,6 +167,8 @@ struct garmin_data {
#define FLAGS_BULK_IN_ACTIVE 0x0020
#define FLAGS_BULK_IN_RESTART 0x0010
#define FLAGS_THROTTLED 0x0008
+#define APP_REQ_SEEN 0x0004
+#define APP_RESP_SEEN 0x0002
#define CLEAR_HALT_REQUIRED 0x0001
#define FLAGS_QUEUING 0x0100
@@ -184,20 +183,16 @@ struct garmin_data {
/* function prototypes */
-static void gsp_next_packet(struct garmin_data *garmin_data_p);
-static int garmin_write_bulk(struct usb_serial_port *port,
+static int gsp_next_packet(struct garmin_data *garmin_data_p);
+static int garmin_write_bulk(struct usb_serial_port *port,
const unsigned char *buf, int count,
int dismiss_ack);
/* some special packets to be send or received */
static unsigned char const GARMIN_START_SESSION_REQ[]
= { 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0 };
-static unsigned char const GARMIN_START_SESSION_REQ2[]
- = { 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0 };
static unsigned char const GARMIN_START_SESSION_REPLY[]
= { 0, 0, 0, 0, 6, 0, 0, 0, 4, 0, 0, 0 };
-static unsigned char const GARMIN_SESSION_ACTIVE_REPLY[]
- = { 0, 0, 0, 0, 17, 0, 0, 0, 4, 0, 0, 0, 0, 16, 0, 0 };
static unsigned char const GARMIN_BULK_IN_AVAIL_REPLY[]
= { 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0 };
static unsigned char const GARMIN_APP_LAYER_REPLY[]
@@ -233,13 +228,6 @@ static struct usb_driver garmin_driver = {
};
-static inline int noResponseFromAppLayer(struct garmin_data *garmin_data_p)
-{
- return atomic_read(&garmin_data_p->req_count) ==
- atomic_read(&garmin_data_p->resp_count);
-}
-
-
static inline int getLayerId(const __u8 *usbPacket)
{
return __le32_to_cpup((__le32 *)(usbPacket));
@@ -325,8 +313,11 @@ static int pkt_add(struct garmin_data *garmin_data_p,
state = garmin_data_p->state;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
+ dbg("%s - added: pkt: %d - %d bytes",
+ __func__, pkt->seq, data_length);
+
/* in serial mode, if someone is waiting for data from
- the device, iconvert and send the next packet to tty. */
+ the device, convert and send the next packet to tty. */
if (result && (state == STATE_GSP_WAIT_DATA))
gsp_next_packet(garmin_data_p);
}
@@ -411,7 +402,7 @@ static int gsp_send_ack(struct garmin_data *garmin_data_p, __u8 pkt_id)
/*
* called for a complete packet received from tty layer
*
- * the complete packet (pkzid ... cksum) is in garmin_data_p->inbuf starting
+ * the complete packet (pktid ... cksum) is in garmin_data_p->inbuf starting
* at GSP_INITIAL_OFFSET.
*
* count - number of bytes in the input buffer including space reserved for
@@ -501,7 +492,6 @@ static int gsp_receive(struct garmin_data *garmin_data_p,
unsigned long flags;
int offs = 0;
int ack_or_nak_seen = 0;
- int i = 0;
__u8 *dest;
int size;
/* dleSeen: set if last byte read was a DLE */
@@ -519,8 +509,8 @@ static int gsp_receive(struct garmin_data *garmin_data_p,
skip = garmin_data_p->flags & FLAGS_GSP_SKIP;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
- dbg("%s - dle=%d skip=%d size=%d count=%d",
- __func__, dleSeen, skip, size, count);
+ /* dbg("%s - dle=%d skip=%d size=%d count=%d",
+ __func__, dleSeen, skip, size, count); */
if (size == 0)
size = GSP_INITIAL_OFFSET;
@@ -568,7 +558,6 @@ static int gsp_receive(struct garmin_data *garmin_data_p,
} else if (!skip) {
if (dleSeen) {
- dbg("non-masked DLE at %d - restarting", i);
size = GSP_INITIAL_OFFSET;
dleSeen = 0;
}
@@ -599,19 +588,19 @@ static int gsp_receive(struct garmin_data *garmin_data_p,
else
garmin_data_p->flags &= ~FLAGS_GSP_DLESEEN;
- if (ack_or_nak_seen)
- garmin_data_p->state = STATE_GSP_WAIT_DATA;
-
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
- if (ack_or_nak_seen)
- gsp_next_packet(garmin_data_p);
+ if (ack_or_nak_seen) {
+ if (gsp_next_packet(garmin_data_p) > 0)
+ garmin_data_p->state = STATE_ACTIVE;
+ else
+ garmin_data_p->state = STATE_GSP_WAIT_DATA;
+ }
return count;
}
-
/*
* Sends a usb packet to the tty
*
@@ -733,29 +722,28 @@ static int gsp_send(struct garmin_data *garmin_data_p,
}
-
-
-
/*
* Process the next pending data packet - if there is one
*/
-static void gsp_next_packet(struct garmin_data *garmin_data_p)
+static int gsp_next_packet(struct garmin_data *garmin_data_p)
{
+ int result = 0;
struct garmin_packet *pkt = NULL;
while ((pkt = pkt_pop(garmin_data_p)) != NULL) {
dbg("%s - next pkt: %d", __func__, pkt->seq);
- if (gsp_send(garmin_data_p, pkt->data, pkt->size) > 0) {
+ result = gsp_send(garmin_data_p, pkt->data, pkt->size);
+ if (result > 0) {
kfree(pkt);
- return;
+ return result;
}
kfree(pkt);
}
+ return result;
}
-
/******************************************************************************
* garmin native mode
******************************************************************************/
@@ -888,14 +876,6 @@ static int garmin_clear(struct garmin_data *garmin_data_p)
unsigned long flags;
int status = 0;
- struct usb_serial_port *port = garmin_data_p->port;
-
- if (port != NULL && atomic_read(&garmin_data_p->resp_count)) {
- /* send a terminate command */
- status = garmin_write_bulk(port, GARMIN_STOP_TRANSFER_REQ,
- sizeof(GARMIN_STOP_TRANSFER_REQ), 1);
- }
-
/* flush all queued data */
pkt_clear(garmin_data_p);
@@ -908,16 +888,12 @@ static int garmin_clear(struct garmin_data *garmin_data_p)
}
-
-
-
-
static int garmin_init_session(struct usb_serial_port *port)
{
- unsigned long flags;
struct usb_serial *serial = port->serial;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
int status = 0;
+ int i = 0;
if (status == 0) {
usb_kill_urb(port->interrupt_in_urb);
@@ -931,30 +907,25 @@ static int garmin_init_session(struct usb_serial_port *port)
__func__, status);
}
+ /*
+ * using the initialization method from gpsbabel. See comments in
+ * gpsbabel/jeeps/gpslibusb.c gusb_reset_toggles()
+ */
if (status == 0) {
dbg("%s - starting session ...", __func__);
garmin_data_p->state = STATE_ACTIVE;
- status = garmin_write_bulk(port, GARMIN_START_SESSION_REQ,
- sizeof(GARMIN_START_SESSION_REQ), 0);
- if (status >= 0) {
-
- spin_lock_irqsave(&garmin_data_p->lock, flags);
- garmin_data_p->ignorePkts++;
- spin_unlock_irqrestore(&garmin_data_p->lock, flags);
-
- /* not needed, but the win32 driver does it too ... */
+ for (i = 0; i < 3; i++) {
status = garmin_write_bulk(port,
- GARMIN_START_SESSION_REQ2,
- sizeof(GARMIN_START_SESSION_REQ2), 0);
- if (status >= 0) {
- status = 0;
- spin_lock_irqsave(&garmin_data_p->lock, flags);
- garmin_data_p->ignorePkts++;
- spin_unlock_irqrestore(&garmin_data_p->lock,
- flags);
- }
+ GARMIN_START_SESSION_REQ,
+ sizeof(GARMIN_START_SESSION_REQ), 0);
+
+ if (status < 0)
+ break;
}
+
+ if (status > 0)
+ status = 0;
}
return status;
@@ -962,8 +933,6 @@ static int garmin_init_session(struct usb_serial_port *port)
-
-
static int garmin_open(struct tty_struct *tty,
struct usb_serial_port *port, struct file *filp)
{
@@ -977,8 +946,6 @@ static int garmin_open(struct tty_struct *tty,
garmin_data_p->mode = initial_mode;
garmin_data_p->count = 0;
garmin_data_p->flags = 0;
- atomic_set(&garmin_data_p->req_count, 0);
- atomic_set(&garmin_data_p->resp_count, 0);
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
/* shutdown any bulk reads that might be going on */
@@ -993,8 +960,7 @@ static int garmin_open(struct tty_struct *tty,
}
-static void garmin_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void garmin_close(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
@@ -1007,6 +973,7 @@ static void garmin_close(struct tty_struct *tty,
return;
mutex_lock(&port->serial->disc_mutex);
+
if (!port->serial->disconnected)
garmin_clear(garmin_data_p);
@@ -1014,25 +981,17 @@ static void garmin_close(struct tty_struct *tty,
usb_kill_urb(port->read_urb);
usb_kill_urb(port->write_urb);
- if (!port->serial->disconnected) {
- if (noResponseFromAppLayer(garmin_data_p) ||
- ((garmin_data_p->flags & CLEAR_HALT_REQUIRED) != 0)) {
- process_resetdev_request(port);
- garmin_data_p->state = STATE_RESET;
- } else {
- garmin_data_p->state = STATE_DISCONNECTED;
- }
- } else {
+ /* keep reset state so we know that we must start a new session */
+ if (garmin_data_p->state != STATE_RESET)
garmin_data_p->state = STATE_DISCONNECTED;
- }
+
mutex_unlock(&port->serial->disc_mutex);
}
+
static void garmin_write_bulk_callback(struct urb *urb)
{
- unsigned long flags;
struct usb_serial_port *port = urb->context;
- int status = urb->status;
if (port) {
struct garmin_data *garmin_data_p =
@@ -1040,20 +999,13 @@ static void garmin_write_bulk_callback(struct urb *urb)
dbg("%s - port %d", __func__, port->number);
- if (GARMIN_LAYERID_APPL == getLayerId(urb->transfer_buffer)
- && (garmin_data_p->mode == MODE_GARMIN_SERIAL)) {
- gsp_send_ack(garmin_data_p,
- ((__u8 *)urb->transfer_buffer)[4]);
- }
+ if (GARMIN_LAYERID_APPL == getLayerId(urb->transfer_buffer)) {
- if (status) {
- dbg("%s - nonzero write bulk status received: %d",
- __func__, status);
- spin_lock_irqsave(&garmin_data_p->lock, flags);
- garmin_data_p->flags |= CLEAR_HALT_REQUIRED;
- spin_unlock_irqrestore(&garmin_data_p->lock, flags);
+ if (garmin_data_p->mode == MODE_GARMIN_SERIAL) {
+ gsp_send_ack(garmin_data_p,
+ ((__u8 *)urb->transfer_buffer)[4]);
+ }
}
-
usb_serial_port_softint(port);
}
@@ -1109,7 +1061,11 @@ static int garmin_write_bulk(struct usb_serial_port *port,
urb->transfer_flags |= URB_ZERO_PACKET;
if (GARMIN_LAYERID_APPL == getLayerId(buffer)) {
- atomic_inc(&garmin_data_p->req_count);
+
+ spin_lock_irqsave(&garmin_data_p->lock, flags);
+ garmin_data_p->flags |= APP_REQ_SEEN;
+ spin_unlock_irqrestore(&garmin_data_p->lock, flags);
+
if (garmin_data_p->mode == MODE_GARMIN_SERIAL) {
pkt_clear(garmin_data_p);
garmin_data_p->state = STATE_GSP_WAIT_DATA;
@@ -1141,6 +1097,9 @@ static int garmin_write(struct tty_struct *tty, struct usb_serial_port *port,
usb_serial_debug_data(debug, &port->dev, __func__, count, buf);
+ if (garmin_data_p->state == STATE_RESET)
+ return -EIO;
+
/* check for our private packets */
if (count >= GARMIN_PKTHDR_LENGTH) {
len = PRIVPKTSIZ;
@@ -1185,7 +1144,7 @@ static int garmin_write(struct tty_struct *tty, struct usb_serial_port *port,
break;
case PRIV_PKTID_RESET_REQ:
- atomic_inc(&garmin_data_p->req_count);
+ process_resetdev_request(port);
break;
case PRIV_PKTID_SET_DEF_MODE:
@@ -1201,8 +1160,6 @@ static int garmin_write(struct tty_struct *tty, struct usb_serial_port *port,
}
}
- garmin_data_p->ignorePkts = 0;
-
if (garmin_data_p->mode == MODE_GARMIN_SERIAL) {
return gsp_receive(garmin_data_p, buf, count);
} else { /* MODE_NATIVE */
@@ -1225,31 +1182,33 @@ static int garmin_write_room(struct tty_struct *tty)
static void garmin_read_process(struct garmin_data *garmin_data_p,
unsigned char *data, unsigned data_length)
{
+ unsigned long flags;
+
if (garmin_data_p->flags & FLAGS_DROP_DATA) {
/* abort-transfer cmd is actice */
dbg("%s - pkt dropped", __func__);
} else if (garmin_data_p->state != STATE_DISCONNECTED &&
garmin_data_p->state != STATE_RESET) {
- /* remember any appl.layer packets, so we know
- if a reset is required or not when closing
- the device */
- if (0 == memcmp(data, GARMIN_APP_LAYER_REPLY,
- sizeof(GARMIN_APP_LAYER_REPLY))) {
- atomic_inc(&garmin_data_p->resp_count);
- }
-
/* if throttling is active or postprecessing is required
put the received data in the input queue, otherwise
send it directly to the tty port */
if (garmin_data_p->flags & FLAGS_QUEUING) {
pkt_add(garmin_data_p, data, data_length);
- } else if (garmin_data_p->mode == MODE_GARMIN_SERIAL) {
- if (getLayerId(data) == GARMIN_LAYERID_APPL)
+ } else if (getLayerId(data) == GARMIN_LAYERID_APPL) {
+
+ spin_lock_irqsave(&garmin_data_p->lock, flags);
+ garmin_data_p->flags |= APP_RESP_SEEN;
+ spin_unlock_irqrestore(&garmin_data_p->lock, flags);
+
+ if (garmin_data_p->mode == MODE_GARMIN_SERIAL) {
pkt_add(garmin_data_p, data, data_length);
- } else {
- send_to_tty(garmin_data_p->port, data, data_length);
+ } else {
+ send_to_tty(garmin_data_p->port, data,
+ data_length);
+ }
}
+ /* ignore system layer packets ... */
}
}
@@ -1364,8 +1323,6 @@ static void garmin_read_int_callback(struct urb *urb)
} else {
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags |= FLAGS_BULK_IN_ACTIVE;
- /* do not send this packet to the user */
- garmin_data_p->ignorePkts = 1;
spin_unlock_irqrestore(&garmin_data_p->lock,
flags);
}
@@ -1392,17 +1349,7 @@ static void garmin_read_int_callback(struct urb *urb)
__func__, garmin_data_p->serial_num);
}
- if (garmin_data_p->ignorePkts) {
- /* this reply belongs to a request generated by the driver,
- ignore it. */
- dbg("%s - pkt ignored (%d)",
- __func__, garmin_data_p->ignorePkts);
- spin_lock_irqsave(&garmin_data_p->lock, flags);
- garmin_data_p->ignorePkts--;
- spin_unlock_irqrestore(&garmin_data_p->lock, flags);
- } else {
- garmin_read_process(garmin_data_p, data, urb->actual_length);
- }
+ garmin_read_process(garmin_data_p, data, urb->actual_length);
port->interrupt_in_urb->dev = port->serial->dev;
retval = usb_submit_urb(urb, GFP_ATOMIC);
@@ -1528,7 +1475,7 @@ static int garmin_attach(struct usb_serial *serial)
}
-static void garmin_shutdown(struct usb_serial *serial)
+static void garmin_disconnect(struct usb_serial *serial)
{
struct usb_serial_port *port = serial->port[0];
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
@@ -1537,8 +1484,17 @@ static void garmin_shutdown(struct usb_serial *serial)
usb_kill_urb(port->interrupt_in_urb);
del_timer_sync(&garmin_data_p->timer);
+}
+
+
+static void garmin_release(struct usb_serial *serial)
+{
+ struct usb_serial_port *port = serial->port[0];
+ struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
+
+ dbg("%s", __func__);
+
kfree(garmin_data_p);
- usb_set_serial_port_data(port, NULL);
}
@@ -1557,7 +1513,8 @@ static struct usb_serial_driver garmin_device = {
.throttle = garmin_throttle,
.unthrottle = garmin_unthrottle,
.attach = garmin_attach,
- .shutdown = garmin_shutdown,
+ .disconnect = garmin_disconnect,
+ .release = garmin_release,
.write = garmin_write,
.write_room = garmin_write_room,
.write_bulk_callback = garmin_write_bulk_callback,
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 4cec9906ccf..932d6241b78 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -63,7 +63,8 @@ struct usb_serial_driver usb_serial_generic_device = {
.id_table = generic_device_ids,
.usb_driver = &generic_driver,
.num_ports = 1,
- .shutdown = usb_serial_generic_shutdown,
+ .disconnect = usb_serial_generic_disconnect,
+ .release = usb_serial_generic_release,
.throttle = usb_serial_generic_throttle,
.unthrottle = usb_serial_generic_unthrottle,
.resume = usb_serial_generic_resume,
@@ -184,13 +185,94 @@ int usb_serial_generic_resume(struct usb_serial *serial)
}
EXPORT_SYMBOL_GPL(usb_serial_generic_resume);
-void usb_serial_generic_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+void usb_serial_generic_close(struct usb_serial_port *port)
{
dbg("%s - port %d", __func__, port->number);
generic_cleanup(port);
}
+static int usb_serial_multi_urb_write(struct tty_struct *tty,
+ struct usb_serial_port *port, const unsigned char *buf, int count)
+{
+ unsigned long flags;
+ struct urb *urb;
+ unsigned char *buffer;
+ int status;
+ int towrite;
+ int bwrite = 0;
+
+ dbg("%s - port %d", __func__, port->number);
+
+ if (count == 0)
+ dbg("%s - write request of 0 bytes", __func__);
+
+ while (count > 0) {
+ towrite = (count > port->bulk_out_size) ?
+ port->bulk_out_size : count;
+ spin_lock_irqsave(&port->lock, flags);
+ if (port->urbs_in_flight >
+ port->serial->type->max_in_flight_urbs) {
+ spin_unlock_irqrestore(&port->lock, flags);
+ dbg("%s - write limit hit\n", __func__);
+ return bwrite;
+ }
+ port->tx_bytes_flight += towrite;
+ port->urbs_in_flight++;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ buffer = kmalloc(towrite, GFP_ATOMIC);
+ if (!buffer) {
+ dev_err(&port->dev,
+ "%s ran out of kernel memory for urb ...\n", __func__);
+ goto error_no_buffer;
+ }
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ dev_err(&port->dev, "%s - no more free urbs\n",
+ __func__);
+ goto error_no_urb;
+ }
+
+ /* Copy data */
+ memcpy(buffer, buf + bwrite, towrite);
+ usb_serial_debug_data(debug, &port->dev, __func__,
+ towrite, buffer);
+ /* fill the buffer and send it */
+ usb_fill_bulk_urb(urb, port->serial->dev,
+ usb_sndbulkpipe(port->serial->dev,
+ port->bulk_out_endpointAddress),
+ buffer, towrite,
+ usb_serial_generic_write_bulk_callback, port);
+
+ status = usb_submit_urb(urb, GFP_ATOMIC);
+ if (status) {
+ dev_err(&port->dev,
+ "%s - failed submitting write urb, error %d\n",
+ __func__, status);
+ goto error;
+ }
+
+ /* This urb is the responsibility of the host driver now */
+ usb_free_urb(urb);
+ dbg("%s write: %d", __func__, towrite);
+ count -= towrite;
+ bwrite += towrite;
+ }
+ return bwrite;
+
+error:
+ usb_free_urb(urb);
+error_no_urb:
+ kfree(buffer);
+error_no_buffer:
+ spin_lock_irqsave(&port->lock, flags);
+ port->urbs_in_flight--;
+ port->tx_bytes_flight -= towrite;
+ spin_unlock_irqrestore(&port->lock, flags);
+ return bwrite;
+}
+
int usb_serial_generic_write(struct tty_struct *tty,
struct usb_serial_port *port, const unsigned char *buf, int count)
{
@@ -208,6 +290,11 @@ int usb_serial_generic_write(struct tty_struct *tty,
/* only do something if we have a bulk out endpoint */
if (serial->num_bulk_out) {
unsigned long flags;
+
+ if (serial->type->max_in_flight_urbs)
+ return usb_serial_multi_urb_write(tty, port,
+ buf, count);
+
spin_lock_irqsave(&port->lock, flags);
if (port->write_urb_busy) {
spin_unlock_irqrestore(&port->lock, flags);
@@ -253,20 +340,26 @@ int usb_serial_generic_write(struct tty_struct *tty,
/* no bulk out, so return 0 bytes written */
return 0;
}
+EXPORT_SYMBOL_GPL(usb_serial_generic_write);
int usb_serial_generic_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = port->serial;
+ unsigned long flags;
int room = 0;
dbg("%s - port %d", __func__, port->number);
-
- /* FIXME: Locking */
- if (serial->num_bulk_out) {
- if (!(port->write_urb_busy))
- room = port->bulk_out_size;
+ spin_lock_irqsave(&port->lock, flags);
+ if (serial->type->max_in_flight_urbs) {
+ if (port->urbs_in_flight < serial->type->max_in_flight_urbs)
+ room = port->bulk_out_size *
+ (serial->type->max_in_flight_urbs -
+ port->urbs_in_flight);
+ } else if (serial->num_bulk_out && !(port->write_urb_busy)) {
+ room = port->bulk_out_size;
}
+ spin_unlock_irqrestore(&port->lock, flags);
dbg("%s - returns %d", __func__, room);
return room;
@@ -277,11 +370,16 @@ int usb_serial_generic_chars_in_buffer(struct tty_struct *tty)
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = port->serial;
int chars = 0;
+ unsigned long flags;
dbg("%s - port %d", __func__, port->number);
- /* FIXME: Locking */
- if (serial->num_bulk_out) {
+ if (serial->type->max_in_flight_urbs) {
+ spin_lock_irqsave(&port->lock, flags);
+ chars = port->tx_bytes_flight;
+ spin_unlock_irqrestore(&port->lock, flags);
+ } else if (serial->num_bulk_out) {
+ /* FIXME: Locking */
if (port->write_urb_busy)
chars = port->write_urb->transfer_buffer_length;
}
@@ -291,7 +389,8 @@ int usb_serial_generic_chars_in_buffer(struct tty_struct *tty)
}
-static void resubmit_read_urb(struct usb_serial_port *port, gfp_t mem_flags)
+void usb_serial_generic_resubmit_read_urb(struct usb_serial_port *port,
+ gfp_t mem_flags)
{
struct urb *urb = port->read_urb;
struct usb_serial *serial = port->serial;
@@ -312,25 +411,28 @@ static void resubmit_read_urb(struct usb_serial_port *port, gfp_t mem_flags)
"%s - failed resubmitting read urb, error %d\n",
__func__, result);
}
+EXPORT_SYMBOL_GPL(usb_serial_generic_resubmit_read_urb);
/* Push data to tty layer and resubmit the bulk read URB */
static void flush_and_resubmit_read_urb(struct usb_serial_port *port)
{
struct urb *urb = port->read_urb;
struct tty_struct *tty = tty_port_tty_get(&port->port);
- int room;
+ char *ch = (char *)urb->transfer_buffer;
+ int i;
+
+ if (!tty)
+ goto done;
/* Push data to tty */
- if (tty && urb->actual_length) {
- room = tty_buffer_request_room(tty, urb->actual_length);
- if (room) {
- tty_insert_flip_string(tty, urb->transfer_buffer, room);
- tty_flip_buffer_push(tty);
- }
+ for (i = 0; i < urb->actual_length; i++, ch++) {
+ if (!usb_serial_handle_sysrq_char(port, *ch))
+ tty_insert_flip_char(tty, *ch, TTY_NORMAL);
}
+ tty_flip_buffer_push(tty);
tty_kref_put(tty);
-
- resubmit_read_urb(port, GFP_ATOMIC);
+done:
+ usb_serial_generic_resubmit_read_urb(port, GFP_ATOMIC);
}
void usb_serial_generic_read_bulk_callback(struct urb *urb)
@@ -364,12 +466,24 @@ EXPORT_SYMBOL_GPL(usb_serial_generic_read_bulk_callback);
void usb_serial_generic_write_bulk_callback(struct urb *urb)
{
+ unsigned long flags;
struct usb_serial_port *port = urb->context;
int status = urb->status;
dbg("%s - port %d", __func__, port->number);
- port->write_urb_busy = 0;
+ if (port->serial->type->max_in_flight_urbs) {
+ spin_lock_irqsave(&port->lock, flags);
+ --port->urbs_in_flight;
+ port->tx_bytes_flight -= urb->transfer_buffer_length;
+ if (port->urbs_in_flight < 0)
+ port->urbs_in_flight = 0;
+ spin_unlock_irqrestore(&port->lock, flags);
+ } else {
+ /* Handle the case for single urb mode */
+ port->write_urb_busy = 0;
+ }
+
if (status) {
dbg("%s - nonzero write bulk status received: %d",
__func__, status);
@@ -409,11 +523,36 @@ void usb_serial_generic_unthrottle(struct tty_struct *tty)
if (was_throttled) {
/* Resume reading from device */
- resubmit_read_urb(port, GFP_KERNEL);
+ usb_serial_generic_resubmit_read_urb(port, GFP_KERNEL);
+ }
+}
+
+int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch)
+{
+ if (port->sysrq && port->console) {
+ if (ch && time_before(jiffies, port->sysrq)) {
+ handle_sysrq(ch, tty_port_tty_get(&port->port));
+ port->sysrq = 0;
+ return 1;
+ }
+ port->sysrq = 0;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(usb_serial_handle_sysrq_char);
+
+int usb_serial_handle_break(struct usb_serial_port *port)
+{
+ if (!port->sysrq) {
+ port->sysrq = jiffies + HZ*5;
+ return 1;
}
+ port->sysrq = 0;
+ return 0;
}
+EXPORT_SYMBOL_GPL(usb_serial_handle_break);
-void usb_serial_generic_shutdown(struct usb_serial *serial)
+void usb_serial_generic_disconnect(struct usb_serial *serial)
{
int i;
@@ -424,3 +563,7 @@ void usb_serial_generic_shutdown(struct usb_serial *serial)
generic_cleanup(serial->port[i]);
}
+void usb_serial_generic_release(struct usb_serial *serial)
+{
+ dbg("%s", __func__);
+}
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index fb4a73d090f..0191693625d 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -207,8 +207,7 @@ static void edge_bulk_out_cmd_callback(struct urb *urb);
/* function prototypes for the usbserial callbacks */
static int edge_open(struct tty_struct *tty, struct usb_serial_port *port,
struct file *filp);
-static void edge_close(struct tty_struct *tty, struct usb_serial_port *port,
- struct file *filp);
+static void edge_close(struct usb_serial_port *port);
static int edge_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
static int edge_write_room(struct tty_struct *tty);
@@ -225,7 +224,8 @@ static int edge_tiocmget(struct tty_struct *tty, struct file *file);
static int edge_tiocmset(struct tty_struct *tty, struct file *file,
unsigned int set, unsigned int clear);
static int edge_startup(struct usb_serial *serial);
-static void edge_shutdown(struct usb_serial *serial);
+static void edge_disconnect(struct usb_serial *serial);
+static void edge_release(struct usb_serial *serial);
#include "io_tables.h" /* all of the devices that this driver supports */
@@ -965,7 +965,7 @@ static int edge_open(struct tty_struct *tty,
if (!edge_port->txfifo.fifo) {
dbg("%s - no memory", __func__);
- edge_close(tty, port, filp);
+ edge_close(port);
return -ENOMEM;
}
@@ -975,7 +975,7 @@ static int edge_open(struct tty_struct *tty,
if (!edge_port->write_urb) {
dbg("%s - no memory", __func__);
- edge_close(tty, port, filp);
+ edge_close(port);
return -ENOMEM;
}
@@ -1099,8 +1099,7 @@ static void block_until_tx_empty(struct edgeport_port *edge_port)
* edge_close
* this function is called by the tty driver when a port is closed
*****************************************************************************/
-static void edge_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void edge_close(struct usb_serial_port *port)
{
struct edgeport_serial *edge_serial;
struct edgeport_port *edge_port;
@@ -3195,21 +3194,16 @@ static int edge_startup(struct usb_serial *serial)
/****************************************************************************
- * edge_shutdown
+ * edge_disconnect
* This function is called whenever the device is removed from the usb bus.
****************************************************************************/
-static void edge_shutdown(struct usb_serial *serial)
+static void edge_disconnect(struct usb_serial *serial)
{
struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
- int i;
dbg("%s", __func__);
/* stop reads and writes on all ports */
- for (i = 0; i < serial->num_ports; ++i) {
- kfree(usb_get_serial_port_data(serial->port[i]));
- usb_set_serial_port_data(serial->port[i], NULL);
- }
/* free up our endpoint stuff */
if (edge_serial->is_epic) {
usb_kill_urb(edge_serial->interrupt_read_urb);
@@ -3220,9 +3214,24 @@ static void edge_shutdown(struct usb_serial *serial)
usb_free_urb(edge_serial->read_urb);
kfree(edge_serial->bulk_in_buffer);
}
+}
+
+
+/****************************************************************************
+ * edge_release
+ * This function is called when the device structure is deallocated.
+ ****************************************************************************/
+static void edge_release(struct usb_serial *serial)
+{
+ struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
+ int i;
+
+ dbg("%s", __func__);
+
+ for (i = 0; i < serial->num_ports; ++i)
+ kfree(usb_get_serial_port_data(serial->port[i]));
kfree(edge_serial);
- usb_set_serial_data(serial, NULL);
}
diff --git a/drivers/usb/serial/io_tables.h b/drivers/usb/serial/io_tables.h
index 7eb9d67b81b..9241d314751 100644
--- a/drivers/usb/serial/io_tables.h
+++ b/drivers/usb/serial/io_tables.h
@@ -117,7 +117,8 @@ static struct usb_serial_driver edgeport_2port_device = {
.throttle = edge_throttle,
.unthrottle = edge_unthrottle,
.attach = edge_startup,
- .shutdown = edge_shutdown,
+ .disconnect = edge_disconnect,
+ .release = edge_release,
.ioctl = edge_ioctl,
.set_termios = edge_set_termios,
.tiocmget = edge_tiocmget,
@@ -145,7 +146,8 @@ static struct usb_serial_driver edgeport_4port_device = {
.throttle = edge_throttle,
.unthrottle = edge_unthrottle,
.attach = edge_startup,
- .shutdown = edge_shutdown,
+ .disconnect = edge_disconnect,
+ .release = edge_release,
.ioctl = edge_ioctl,
.set_termios = edge_set_termios,
.tiocmget = edge_tiocmget,
@@ -173,7 +175,8 @@ static struct usb_serial_driver edgeport_8port_device = {
.throttle = edge_throttle,
.unthrottle = edge_unthrottle,
.attach = edge_startup,
- .shutdown = edge_shutdown,
+ .disconnect = edge_disconnect,
+ .release = edge_release,
.ioctl = edge_ioctl,
.set_termios = edge_set_termios,
.tiocmget = edge_tiocmget,
@@ -200,7 +203,8 @@ static struct usb_serial_driver epic_device = {
.throttle = edge_throttle,
.unthrottle = edge_unthrottle,
.attach = edge_startup,
- .shutdown = edge_shutdown,
+ .disconnect = edge_disconnect,
+ .release = edge_release,
.ioctl = edge_ioctl,
.set_termios = edge_set_termios,
.tiocmget = edge_tiocmget,
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index 513b25e044c..e8bc42f92e7 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -102,7 +102,7 @@ struct edgeport_port {
__u8 shadow_mcr;
__u8 shadow_lsr;
__u8 lsr_mask;
- __u32 ump_read_timeout; /* Number of miliseconds the UMP will
+ __u32 ump_read_timeout; /* Number of milliseconds the UMP will
wait without data before completing
a read short */
int baud_rate;
@@ -2009,8 +2009,7 @@ release_es_lock:
return status;
}
-static void edge_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void edge_close(struct usb_serial_port *port)
{
struct edgeport_serial *edge_serial;
struct edgeport_port *edge_port;
@@ -2664,7 +2663,7 @@ cleanup:
return -ENOMEM;
}
-static void edge_shutdown(struct usb_serial *serial)
+static void edge_disconnect(struct usb_serial *serial)
{
int i;
struct edgeport_port *edge_port;
@@ -2674,12 +2673,22 @@ static void edge_shutdown(struct usb_serial *serial)
for (i = 0; i < serial->num_ports; ++i) {
edge_port = usb_get_serial_port_data(serial->port[i]);
edge_remove_sysfs_attrs(edge_port->port);
+ }
+}
+
+static void edge_release(struct usb_serial *serial)
+{
+ int i;
+ struct edgeport_port *edge_port;
+
+ dbg("%s", __func__);
+
+ for (i = 0; i < serial->num_ports; ++i) {
+ edge_port = usb_get_serial_port_data(serial->port[i]);
edge_buf_free(edge_port->ep_out_buf);
kfree(edge_port);
- usb_set_serial_port_data(serial->port[i], NULL);
}
kfree(usb_get_serial_data(serial));
- usb_set_serial_data(serial, NULL);
}
@@ -2916,7 +2925,8 @@ static struct usb_serial_driver edgeport_1port_device = {
.throttle = edge_throttle,
.unthrottle = edge_unthrottle,
.attach = edge_startup,
- .shutdown = edge_shutdown,
+ .disconnect = edge_disconnect,
+ .release = edge_release,
.port_probe = edge_create_sysfs_attrs,
.ioctl = edge_ioctl,
.set_termios = edge_set_termios,
@@ -2945,7 +2955,8 @@ static struct usb_serial_driver edgeport_2port_device = {
.throttle = edge_throttle,
.unthrottle = edge_unthrottle,
.attach = edge_startup,
- .shutdown = edge_shutdown,
+ .disconnect = edge_disconnect,
+ .release = edge_release,
.port_probe = edge_create_sysfs_attrs,
.ioctl = edge_ioctl,
.set_termios = edge_set_termios,
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index cd62825a9ac..2545d45ce16 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -76,11 +76,9 @@ static int initial_wait;
/* Function prototypes for an ipaq */
static int ipaq_open(struct tty_struct *tty,
struct usb_serial_port *port, struct file *filp);
-static void ipaq_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp);
+static void ipaq_close(struct usb_serial_port *port);
static int ipaq_calc_num_ports(struct usb_serial *serial);
static int ipaq_startup(struct usb_serial *serial);
-static void ipaq_shutdown(struct usb_serial *serial);
static int ipaq_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
static int ipaq_write_bulk(struct usb_serial_port *port,
@@ -577,7 +575,6 @@ static struct usb_serial_driver ipaq_device = {
.close = ipaq_close,
.attach = ipaq_startup,
.calc_num_ports = ipaq_calc_num_ports,
- .shutdown = ipaq_shutdown,
.write = ipaq_write,
.write_room = ipaq_write_room,
.chars_in_buffer = ipaq_chars_in_buffer,
@@ -714,8 +711,7 @@ error:
}
-static void ipaq_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void ipaq_close(struct usb_serial_port *port)
{
struct ipaq_private *priv = usb_get_serial_port_data(port);
@@ -992,11 +988,6 @@ static int ipaq_startup(struct usb_serial *serial)
return usb_reset_configuration(serial->dev);
}
-static void ipaq_shutdown(struct usb_serial *serial)
-{
- dbg("%s", __func__);
-}
-
static int __init ipaq_init(void)
{
int retval;
diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c
index da2a2b46644..29ad038b9c8 100644
--- a/drivers/usb/serial/ipw.c
+++ b/drivers/usb/serial/ipw.c
@@ -302,23 +302,17 @@ static int ipw_open(struct tty_struct *tty,
return 0;
}
-static void ipw_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void ipw_dtr_rts(struct usb_serial_port *port, int on)
{
struct usb_device *dev = port->serial->dev;
int result;
- if (tty_hung_up_p(filp)) {
- dbg("%s: tty_hung_up_p ...", __func__);
- return;
- }
-
/*--1: drop the dtr */
dbg("%s:dropping dtr", __func__);
result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
IPW_SIO_SET_PIN,
USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT,
- IPW_PIN_CLRDTR,
+ on ? IPW_PIN_SETDTR : IPW_PIN_CLRDTR,
0,
NULL,
0,
@@ -332,7 +326,7 @@ static void ipw_close(struct tty_struct *tty,
result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
IPW_SIO_SET_PIN, USB_TYPE_VENDOR |
USB_RECIP_INTERFACE | USB_DIR_OUT,
- IPW_PIN_CLRRTS,
+ on ? IPW_PIN_SETRTS : IPW_PIN_CLRRTS,
0,
NULL,
0,
@@ -340,7 +334,12 @@ static void ipw_close(struct tty_struct *tty,
if (result < 0)
dev_err(&port->dev,
"dropping rts failed (error = %d)\n", result);
+}
+static void ipw_close(struct usb_serial_port *port)
+{
+ struct usb_device *dev = port->serial->dev;
+ int result;
/*--3: purge */
dbg("%s:sending purge", __func__);
@@ -461,6 +460,7 @@ static struct usb_serial_driver ipw_device = {
.num_ports = 1,
.open = ipw_open,
.close = ipw_close,
+ .dtr_rts = ipw_dtr_rts,
.port_probe = ipw_probe,
.port_remove = ipw_disconnect,
.write = ipw_write,
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index 4e2cda93da5..66009b6b763 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -88,8 +88,7 @@ static int xbof = -1;
static int ir_startup (struct usb_serial *serial);
static int ir_open(struct tty_struct *tty, struct usb_serial_port *port,
struct file *filep);
-static void ir_close(struct tty_struct *tty, struct usb_serial_port *port,
- struct file *filep);
+static void ir_close(struct usb_serial_port *port);
static int ir_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
static void ir_write_bulk_callback (struct urb *urb);
@@ -346,8 +345,7 @@ static int ir_open(struct tty_struct *tty,
return result;
}
-static void ir_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file * filp)
+static void ir_close(struct usb_serial_port *port)
{
dbg("%s - port %d", __func__, port->number);
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 4473d442b2a..96873a7a32b 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -40,7 +40,7 @@ static int debug;
/*
* Version Information
*/
-#define DRIVER_VERSION "v0.5"
+#define DRIVER_VERSION "v0.10"
#define DRIVER_DESC "Infinity USB Unlimited Phoenix driver"
static struct usb_device_id id_table[] = {
@@ -70,7 +70,6 @@ static void read_rxcmd_callback(struct urb *urb);
struct iuu_private {
spinlock_t lock; /* store irq state */
wait_queue_head_t delta_msr_wait;
- u8 line_control;
u8 line_status;
u8 termios_initialized;
int tiostatus; /* store IUART SIGNAL for tiocmget call */
@@ -122,8 +121,8 @@ static int iuu_startup(struct usb_serial *serial)
return 0;
}
-/* Shutdown function */
-static void iuu_shutdown(struct usb_serial *serial)
+/* Release function */
+static void iuu_release(struct usb_serial *serial)
{
struct usb_serial_port *port = serial->port[0];
struct iuu_private *priv = usb_get_serial_port_data(port);
@@ -651,32 +650,33 @@ static int iuu_bulk_write(struct usb_serial_port *port)
unsigned long flags;
int result;
int i;
+ int buf_len;
char *buf_ptr = port->write_urb->transfer_buffer;
dbg("%s - enter", __func__);
+ spin_lock_irqsave(&priv->lock, flags);
*buf_ptr++ = IUU_UART_ESC;
*buf_ptr++ = IUU_UART_TX;
*buf_ptr++ = priv->writelen;
- memcpy(buf_ptr, priv->writebuf,
- priv->writelen);
+ memcpy(buf_ptr, priv->writebuf, priv->writelen);
+ buf_len = priv->writelen;
+ priv->writelen = 0;
+ spin_unlock_irqrestore(&priv->lock, flags);
if (debug == 1) {
- for (i = 0; i < priv->writelen; i++)
+ for (i = 0; i < buf_len; i++)
sprintf(priv->dbgbuf + i*2 ,
"%02X", priv->writebuf[i]);
- priv->dbgbuf[priv->writelen+i*2] = 0;
+ priv->dbgbuf[buf_len+i*2] = 0;
dbg("%s - writing %i chars : %s", __func__,
- priv->writelen, priv->dbgbuf);
+ buf_len, priv->dbgbuf);
}
usb_fill_bulk_urb(port->write_urb, port->serial->dev,
usb_sndbulkpipe(port->serial->dev,
port->bulk_out_endpointAddress),
- port->write_urb->transfer_buffer, priv->writelen + 3,
+ port->write_urb->transfer_buffer, buf_len + 3,
iuu_rxcmd, port);
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
- spin_lock_irqsave(&priv->lock, flags);
- priv->writelen = 0;
- spin_unlock_irqrestore(&priv->lock, flags);
usb_serial_port_softint(port);
return result;
}
@@ -770,14 +770,10 @@ static int iuu_uart_write(struct tty_struct *tty, struct usb_serial_port *port,
return -ENOMEM;
spin_lock_irqsave(&priv->lock, flags);
- if (priv->writelen > 0) {
- /* buffer already filled but not commited */
- spin_unlock_irqrestore(&priv->lock, flags);
- return 0;
- }
+
/* fill the buffer */
- memcpy(priv->writebuf, buf, count);
- priv->writelen = count;
+ memcpy(priv->writebuf + priv->writelen, buf, count);
+ priv->writelen += count;
spin_unlock_irqrestore(&priv->lock, flags);
return count;
@@ -819,7 +815,7 @@ static int iuu_uart_on(struct usb_serial_port *port)
buf[0] = IUU_UART_ENABLE;
buf[1] = (u8) ((IUU_BAUD_9600 >> 8) & 0x00FF);
buf[2] = (u8) (0x00FF & IUU_BAUD_9600);
- buf[3] = (u8) (0x0F0 & IUU_TWO_STOP_BITS) | (0x07 & IUU_PARITY_EVEN);
+ buf[3] = (u8) (0x0F0 & IUU_ONE_STOP_BIT) | (0x07 & IUU_PARITY_EVEN);
status = bulk_immediate(port, buf, 4);
if (status != IUU_OPERATION_OK) {
@@ -946,19 +942,59 @@ static int iuu_uart_baud(struct usb_serial_port *port, u32 baud,
return status;
}
-static int set_control_lines(struct usb_device *dev, u8 value)
+static void iuu_set_termios(struct tty_struct *tty,
+ struct usb_serial_port *port, struct ktermios *old_termios)
{
- return 0;
+ const u32 supported_mask = CMSPAR|PARENB|PARODD;
+
+ unsigned int cflag = tty->termios->c_cflag;
+ int status;
+ u32 actual;
+ u32 parity;
+ int csize = CS7;
+ int baud = 9600; /* Fixed for the moment */
+ u32 newval = cflag & supported_mask;
+
+ /* compute the parity parameter */
+ parity = 0;
+ if (cflag & CMSPAR) { /* Using mark space */
+ if (cflag & PARODD)
+ parity |= IUU_PARITY_SPACE;
+ else
+ parity |= IUU_PARITY_MARK;
+ } else if (!(cflag & PARENB)) {
+ parity |= IUU_PARITY_NONE;
+ csize = CS8;
+ } else if (cflag & PARODD)
+ parity |= IUU_PARITY_ODD;
+ else
+ parity |= IUU_PARITY_EVEN;
+
+ parity |= (cflag & CSTOPB ? IUU_TWO_STOP_BITS : IUU_ONE_STOP_BIT);
+
+ /* set it */
+ status = iuu_uart_baud(port,
+ (clockmode == 2) ? 16457 : 9600 * boost / 100,
+ &actual, parity);
+
+ /* set the termios value to the real one, so the user now what has
+ * changed. We support few fields so its easies to copy the old hw
+ * settings back over and then adjust them
+ */
+ if (old_termios)
+ tty_termios_copy_hw(tty->termios, old_termios);
+ if (status != 0) /* Set failed - return old bits */
+ return;
+ /* Re-encode speed, parity and csize */
+ tty_encode_baud_rate(tty, baud, baud);
+ tty->termios->c_cflag &= ~(supported_mask|CSIZE);
+ tty->termios->c_cflag |= newval | csize;
}
-static void iuu_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void iuu_close(struct usb_serial_port *port)
{
/* iuu_led (port,255,0,0,0); */
struct usb_serial *serial;
- struct iuu_private *priv = usb_get_serial_port_data(port);
- unsigned long flags;
- unsigned int c_cflag;
serial = port->serial;
if (!serial)
@@ -968,17 +1004,6 @@ static void iuu_close(struct tty_struct *tty,
iuu_uart_off(port);
if (serial->dev) {
- if (tty) {
- c_cflag = tty->termios->c_cflag;
- if (c_cflag & HUPCL) {
- /* drop DTR and RTS */
- priv = usb_get_serial_port_data(port);
- spin_lock_irqsave(&priv->lock, flags);
- priv->line_control = 0;
- spin_unlock_irqrestore(&priv->lock, flags);
- set_control_lines(port->serial->dev, 0);
- }
- }
/* free writebuf */
/* shutdown our urbs */
dbg("%s - shutting down urbs", __func__);
@@ -1154,7 +1179,7 @@ static int iuu_open(struct tty_struct *tty,
if (result) {
dev_err(&port->dev, "%s - failed submitting read urb,"
" error %d\n", __func__, result);
- iuu_close(tty, port, NULL);
+ iuu_close(port);
return -EPROTO;
} else {
dbg("%s - rxcmd OK", __func__);
@@ -1175,8 +1200,9 @@ static struct usb_serial_driver iuu_device = {
.read_bulk_callback = iuu_uart_read_callback,
.tiocmget = iuu_tiocmget,
.tiocmset = iuu_tiocmset,
+ .set_termios = iuu_set_termios,
.attach = iuu_startup,
- .shutdown = iuu_shutdown,
+ .release = iuu_release,
};
static int __init iuu_init(void)
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 00daa8f7759..2594b8743d3 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -1298,8 +1298,16 @@ static inline void stop_urb(struct urb *urb)
usb_kill_urb(urb);
}
-static void keyspan_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void keyspan_dtr_rts(struct usb_serial_port *port, int on)
+{
+ struct keyspan_port_private *p_priv = usb_get_serial_port_data(port);
+
+ p_priv->rts_state = on;
+ p_priv->dtr_state = on;
+ keyspan_send_setup(port, 0);
+}
+
+static void keyspan_close(struct usb_serial_port *port)
{
int i;
struct usb_serial *serial = port->serial;
@@ -1336,7 +1344,6 @@ static void keyspan_close(struct tty_struct *tty,
stop_urb(p_priv->out_urbs[i]);
}
}
- tty_port_tty_set(&port->port, NULL);
}
/* download the firmware to a pre-renumeration device */
@@ -2682,7 +2689,7 @@ static int keyspan_startup(struct usb_serial *serial)
return 0;
}
-static void keyspan_shutdown(struct usb_serial *serial)
+static void keyspan_disconnect(struct usb_serial *serial)
{
int i, j;
struct usb_serial_port *port;
@@ -2722,6 +2729,17 @@ static void keyspan_shutdown(struct usb_serial *serial)
usb_free_urb(p_priv->out_urbs[j]);
}
}
+}
+
+static void keyspan_release(struct usb_serial *serial)
+{
+ int i;
+ struct usb_serial_port *port;
+ struct keyspan_serial_private *s_priv;
+
+ dbg("%s", __func__);
+
+ s_priv = usb_get_serial_data(serial);
/* dbg("Freeing serial->private."); */
kfree(s_priv);
diff --git a/drivers/usb/serial/keyspan.h b/drivers/usb/serial/keyspan.h
index 38b4582e073..3107ed15af6 100644
--- a/drivers/usb/serial/keyspan.h
+++ b/drivers/usb/serial/keyspan.h
@@ -38,11 +38,11 @@
static int keyspan_open (struct tty_struct *tty,
struct usb_serial_port *port,
struct file *filp);
-static void keyspan_close (struct tty_struct *tty,
- struct usb_serial_port *port,
- struct file *filp);
+static void keyspan_close (struct usb_serial_port *port);
+static void keyspan_dtr_rts (struct usb_serial_port *port, int on);
static int keyspan_startup (struct usb_serial *serial);
-static void keyspan_shutdown (struct usb_serial *serial);
+static void keyspan_disconnect (struct usb_serial *serial);
+static void keyspan_release (struct usb_serial *serial);
static int keyspan_write_room (struct tty_struct *tty);
static int keyspan_write (struct tty_struct *tty,
@@ -562,6 +562,7 @@ static struct usb_serial_driver keyspan_1port_device = {
.num_ports = 1,
.open = keyspan_open,
.close = keyspan_close,
+ .dtr_rts = keyspan_dtr_rts,
.write = keyspan_write,
.write_room = keyspan_write_room,
.set_termios = keyspan_set_termios,
@@ -569,7 +570,8 @@ static struct usb_serial_driver keyspan_1port_device = {
.tiocmget = keyspan_tiocmget,
.tiocmset = keyspan_tiocmset,
.attach = keyspan_startup,
- .shutdown = keyspan_shutdown,
+ .disconnect = keyspan_disconnect,
+ .release = keyspan_release,
};
static struct usb_serial_driver keyspan_2port_device = {
@@ -582,6 +584,7 @@ static struct usb_serial_driver keyspan_2port_device = {
.num_ports = 2,
.open = keyspan_open,
.close = keyspan_close,
+ .dtr_rts = keyspan_dtr_rts,
.write = keyspan_write,
.write_room = keyspan_write_room,
.set_termios = keyspan_set_termios,
@@ -589,7 +592,8 @@ static struct usb_serial_driver keyspan_2port_device = {
.tiocmget = keyspan_tiocmget,
.tiocmset = keyspan_tiocmset,
.attach = keyspan_startup,
- .shutdown = keyspan_shutdown,
+ .disconnect = keyspan_disconnect,
+ .release = keyspan_release,
};
static struct usb_serial_driver keyspan_4port_device = {
@@ -602,6 +606,7 @@ static struct usb_serial_driver keyspan_4port_device = {
.num_ports = 4,
.open = keyspan_open,
.close = keyspan_close,
+ .dtr_rts = keyspan_dtr_rts,
.write = keyspan_write,
.write_room = keyspan_write_room,
.set_termios = keyspan_set_termios,
@@ -609,7 +614,8 @@ static struct usb_serial_driver keyspan_4port_device = {
.tiocmget = keyspan_tiocmget,
.tiocmset = keyspan_tiocmset,
.attach = keyspan_startup,
- .shutdown = keyspan_shutdown,
+ .disconnect = keyspan_disconnect,
+ .release = keyspan_release,
};
#endif
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index bf1ae247da6..d0b12e40c2b 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -651,6 +651,35 @@ static int keyspan_pda_chars_in_buffer(struct tty_struct *tty)
}
+static void keyspan_pda_dtr_rts(struct usb_serial_port *port, int on)
+{
+ struct usb_serial *serial = port->serial;
+
+ if (serial->dev) {
+ if (on)
+ keyspan_pda_set_modem_info(serial, (1<<7) | (1<< 2));
+ else
+ keyspan_pda_set_modem_info(serial, 0);
+ }
+}
+
+static int keyspan_pda_carrier_raised(struct usb_serial_port *port)
+{
+ struct usb_serial *serial = port->serial;
+ unsigned char modembits;
+
+ /* If we can read the modem status and the DCD is low then
+ carrier is not raised yet */
+ if (keyspan_pda_get_modem_info(serial, &modembits) >= 0) {
+ if (!(modembits & (1>>6)))
+ return 0;
+ }
+ /* Carrier raised, or we failed (eg disconnected) so
+ progress accordingly */
+ return 1;
+}
+
+
static int keyspan_pda_open(struct tty_struct *tty,
struct usb_serial_port *port, struct file *filp)
{
@@ -682,13 +711,6 @@ static int keyspan_pda_open(struct tty_struct *tty,
priv->tx_room = room;
priv->tx_throttled = room ? 0 : 1;
- /* the normal serial device seems to always turn on DTR and RTS here,
- so do the same */
- if (tty && (tty->termios->c_cflag & CBAUD))
- keyspan_pda_set_modem_info(serial, (1<<7) | (1<<2));
- else
- keyspan_pda_set_modem_info(serial, 0);
-
/*Start reading from the device*/
port->interrupt_in_urb->dev = serial->dev;
rc = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
@@ -700,19 +722,11 @@ static int keyspan_pda_open(struct tty_struct *tty,
error:
return rc;
}
-
-
-static void keyspan_pda_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void keyspan_pda_close(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
if (serial->dev) {
- /* the normal serial device seems to always shut
- off DTR and RTS now */
- if (tty->termios->c_cflag & HUPCL)
- keyspan_pda_set_modem_info(serial, 0);
-
/* shutdown our bulk reads and writes */
usb_kill_urb(port->write_urb);
usb_kill_urb(port->interrupt_in_urb);
@@ -795,7 +809,7 @@ static int keyspan_pda_startup(struct usb_serial *serial)
return 0;
}
-static void keyspan_pda_shutdown(struct usb_serial *serial)
+static void keyspan_pda_release(struct usb_serial *serial)
{
dbg("%s", __func__);
@@ -839,6 +853,8 @@ static struct usb_serial_driver keyspan_pda_device = {
.usb_driver = &keyspan_pda_driver,
.id_table = id_table_std,
.num_ports = 1,
+ .dtr_rts = keyspan_pda_dtr_rts,
+ .carrier_raised = keyspan_pda_carrier_raised,
.open = keyspan_pda_open,
.close = keyspan_pda_close,
.write = keyspan_pda_write,
@@ -853,7 +869,7 @@ static struct usb_serial_driver keyspan_pda_device = {
.tiocmget = keyspan_pda_tiocmget,
.tiocmset = keyspan_pda_tiocmset,
.attach = keyspan_pda_startup,
- .shutdown = keyspan_pda_shutdown,
+ .release = keyspan_pda_release,
};
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index fcd9082f3e7..0f44bb8e8d4 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -73,11 +73,11 @@ static int debug;
* Function prototypes
*/
static int klsi_105_startup(struct usb_serial *serial);
-static void klsi_105_shutdown(struct usb_serial *serial);
+static void klsi_105_disconnect(struct usb_serial *serial);
+static void klsi_105_release(struct usb_serial *serial);
static int klsi_105_open(struct tty_struct *tty,
struct usb_serial_port *port, struct file *filp);
-static void klsi_105_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp);
+static void klsi_105_close(struct usb_serial_port *port);
static int klsi_105_write(struct tty_struct *tty,
struct usb_serial_port *port, const unsigned char *buf, int count);
static void klsi_105_write_bulk_callback(struct urb *urb);
@@ -132,7 +132,8 @@ static struct usb_serial_driver kl5kusb105d_device = {
.tiocmget = klsi_105_tiocmget,
.tiocmset = klsi_105_tiocmset,
.attach = klsi_105_startup,
- .shutdown = klsi_105_shutdown,
+ .disconnect = klsi_105_disconnect,
+ .release = klsi_105_release,
.throttle = klsi_105_throttle,
.unthrottle = klsi_105_unthrottle,
};
@@ -316,7 +317,7 @@ err_cleanup:
} /* klsi_105_startup */
-static void klsi_105_shutdown(struct usb_serial *serial)
+static void klsi_105_disconnect(struct usb_serial *serial)
{
int i;
@@ -326,33 +327,36 @@ static void klsi_105_shutdown(struct usb_serial *serial)
for (i = 0; i < serial->num_ports; ++i) {
struct klsi_105_private *priv =
usb_get_serial_port_data(serial->port[i]);
- unsigned long flags;
if (priv) {
/* kill our write urb pool */
int j;
struct urb **write_urbs = priv->write_urb_pool;
- spin_lock_irqsave(&priv->lock, flags);
for (j = 0; j < NUM_URBS; j++) {
if (write_urbs[j]) {
- /* FIXME - uncomment the following
- * usb_kill_urb call when the host
- * controllers get fixed to set
- * urb->dev = NULL after the urb is
- * finished. Otherwise this call
- * oopses. */
- /* usb_kill_urb(write_urbs[j]); */
- kfree(write_urbs[j]->transfer_buffer);
+ usb_kill_urb(write_urbs[j]);
usb_free_urb(write_urbs[j]);
}
}
- spin_unlock_irqrestore(&priv->lock, flags);
- kfree(priv);
- usb_set_serial_port_data(serial->port[i], NULL);
}
}
-} /* klsi_105_shutdown */
+} /* klsi_105_disconnect */
+
+
+static void klsi_105_release(struct usb_serial *serial)
+{
+ int i;
+
+ dbg("%s", __func__);
+
+ for (i = 0; i < serial->num_ports; ++i) {
+ struct klsi_105_private *priv =
+ usb_get_serial_port_data(serial->port[i]);
+
+ kfree(priv);
+ }
+} /* klsi_105_release */
static int klsi_105_open(struct tty_struct *tty,
struct usb_serial_port *port, struct file *filp)
@@ -447,8 +451,7 @@ exit:
} /* klsi_105_open */
-static void klsi_105_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void klsi_105_close(struct usb_serial_port *port)
{
struct klsi_105_private *priv = usb_get_serial_port_data(port);
int rc;
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index c148544953b..6db0e561f68 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -69,11 +69,10 @@ static int debug;
/* Function prototypes */
static int kobil_startup(struct usb_serial *serial);
-static void kobil_shutdown(struct usb_serial *serial);
+static void kobil_release(struct usb_serial *serial);
static int kobil_open(struct tty_struct *tty,
struct usb_serial_port *port, struct file *filp);
-static void kobil_close(struct tty_struct *tty, struct usb_serial_port *port,
- struct file *filp);
+static void kobil_close(struct usb_serial_port *port);
static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
static int kobil_write_room(struct tty_struct *tty);
@@ -118,7 +117,7 @@ static struct usb_serial_driver kobil_device = {
.id_table = id_table,
.num_ports = 1,
.attach = kobil_startup,
- .shutdown = kobil_shutdown,
+ .release = kobil_release,
.ioctl = kobil_ioctl,
.set_termios = kobil_set_termios,
.tiocmget = kobil_tiocmget,
@@ -202,17 +201,13 @@ static int kobil_startup(struct usb_serial *serial)
}
-static void kobil_shutdown(struct usb_serial *serial)
+static void kobil_release(struct usb_serial *serial)
{
int i;
dbg("%s - port %d", __func__, serial->port[0]->number);
- for (i = 0; i < serial->num_ports; ++i) {
- while (serial->port[i]->port.count > 0)
- kobil_close(NULL, serial->port[i], NULL);
+ for (i = 0; i < serial->num_ports; ++i)
kfree(usb_get_serial_port_data(serial->port[i]));
- usb_set_serial_port_data(serial->port[i], NULL);
- }
}
@@ -346,11 +341,11 @@ static int kobil_open(struct tty_struct *tty,
}
-static void kobil_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void kobil_close(struct usb_serial_port *port)
{
dbg("%s - port %d", __func__, port->number);
+ /* FIXME: Add rts/dtr methods */
if (port->write_urb) {
usb_kill_urb(port->write_urb);
usb_free_urb(port->write_urb);
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index 82930a7d509..d8825e159aa 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -92,11 +92,11 @@ static int debug;
* Function prototypes
*/
static int mct_u232_startup(struct usb_serial *serial);
-static void mct_u232_shutdown(struct usb_serial *serial);
+static void mct_u232_release(struct usb_serial *serial);
static int mct_u232_open(struct tty_struct *tty,
struct usb_serial_port *port, struct file *filp);
-static void mct_u232_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp);
+static void mct_u232_close(struct usb_serial_port *port);
+static void mct_u232_dtr_rts(struct usb_serial_port *port, int on);
static void mct_u232_read_int_callback(struct urb *urb);
static void mct_u232_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old);
@@ -140,6 +140,7 @@ static struct usb_serial_driver mct_u232_device = {
.num_ports = 1,
.open = mct_u232_open,
.close = mct_u232_close,
+ .dtr_rts = mct_u232_dtr_rts,
.throttle = mct_u232_throttle,
.unthrottle = mct_u232_unthrottle,
.read_int_callback = mct_u232_read_int_callback,
@@ -148,7 +149,7 @@ static struct usb_serial_driver mct_u232_device = {
.tiocmget = mct_u232_tiocmget,
.tiocmset = mct_u232_tiocmset,
.attach = mct_u232_startup,
- .shutdown = mct_u232_shutdown,
+ .release = mct_u232_release,
};
@@ -406,7 +407,7 @@ static int mct_u232_startup(struct usb_serial *serial)
} /* mct_u232_startup */
-static void mct_u232_shutdown(struct usb_serial *serial)
+static void mct_u232_release(struct usb_serial *serial)
{
struct mct_u232_private *priv;
int i;
@@ -416,12 +417,9 @@ static void mct_u232_shutdown(struct usb_serial *serial)
for (i = 0; i < serial->num_ports; ++i) {
/* My special items, the standard routines free my urbs */
priv = usb_get_serial_port_data(serial->port[i]);
- if (priv) {
- usb_set_serial_port_data(serial->port[i], NULL);
- kfree(priv);
- }
+ kfree(priv);
}
-} /* mct_u232_shutdown */
+} /* mct_u232_release */
static int mct_u232_open(struct tty_struct *tty,
struct usb_serial_port *port, struct file *filp)
@@ -496,29 +494,29 @@ error:
return retval;
} /* mct_u232_open */
-
-static void mct_u232_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void mct_u232_dtr_rts(struct usb_serial_port *port, int on)
{
- unsigned int c_cflag;
unsigned int control_state;
struct mct_u232_private *priv = usb_get_serial_port_data(port);
- dbg("%s port %d", __func__, port->number);
- if (tty) {
- c_cflag = tty->termios->c_cflag;
- mutex_lock(&port->serial->disc_mutex);
- if (c_cflag & HUPCL && !port->serial->disconnected) {
- /* drop DTR and RTS */
- spin_lock_irq(&priv->lock);
+ mutex_lock(&port->serial->disc_mutex);
+ if (!port->serial->disconnected) {
+ /* drop DTR and RTS */
+ spin_lock_irq(&priv->lock);
+ if (on)
+ priv->control_state |= TIOCM_DTR | TIOCM_RTS;
+ else
priv->control_state &= ~(TIOCM_DTR | TIOCM_RTS);
- control_state = priv->control_state;
- spin_unlock_irq(&priv->lock);
- mct_u232_set_modem_ctrl(port->serial, control_state);
- }
- mutex_unlock(&port->serial->disc_mutex);
+ control_state = priv->control_state;
+ spin_unlock_irq(&priv->lock);
+ mct_u232_set_modem_ctrl(port->serial, control_state);
}
+ mutex_unlock(&port->serial->disc_mutex);
+}
+static void mct_u232_close(struct usb_serial_port *port)
+{
+ dbg("%s port %d", __func__, port->number);
if (port->serial->dev) {
/* shutdown our urbs */
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 24e3b5d4b4d..bfc5ce000ef 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -533,8 +533,7 @@ static int mos7720_chars_in_buffer(struct tty_struct *tty)
return chars;
}
-static void mos7720_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void mos7720_close(struct usb_serial_port *port)
{
struct usb_serial *serial;
struct moschip_port *mos7720_port;
@@ -1522,19 +1521,16 @@ static int mos7720_startup(struct usb_serial *serial)
return 0;
}
-static void mos7720_shutdown(struct usb_serial *serial)
+static void mos7720_release(struct usb_serial *serial)
{
int i;
/* free private structure allocated for serial port */
- for (i = 0; i < serial->num_ports; ++i) {
+ for (i = 0; i < serial->num_ports; ++i)
kfree(usb_get_serial_port_data(serial->port[i]));
- usb_set_serial_port_data(serial->port[i], NULL);
- }
/* free private structure allocated for serial device */
kfree(usb_get_serial_data(serial));
- usb_set_serial_data(serial, NULL);
}
static struct usb_driver usb_driver = {
@@ -1559,7 +1555,7 @@ static struct usb_serial_driver moschip7720_2port_driver = {
.throttle = mos7720_throttle,
.unthrottle = mos7720_unthrottle,
.attach = mos7720_startup,
- .shutdown = mos7720_shutdown,
+ .release = mos7720_release,
.ioctl = mos7720_ioctl,
.set_termios = mos7720_set_termios,
.write = mos7720_write,
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 84fb1dcd30d..c40f95c1951 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -238,7 +238,7 @@ static int mos7840_set_reg_sync(struct usb_serial_port *port, __u16 reg,
{
struct usb_device *dev = port->serial->dev;
val = val & 0x00ff;
- dbg("mos7840_set_reg_sync offset is %x, value %x\n", reg, val);
+ dbg("mos7840_set_reg_sync offset is %x, value %x", reg, val);
return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), MCS_WRREQ,
MCS_WR_RTYPE, val, reg, NULL, 0,
@@ -260,7 +260,7 @@ static int mos7840_get_reg_sync(struct usb_serial_port *port, __u16 reg,
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
MCS_RD_RTYPE, 0, reg, val, VENDOR_READ_LENGTH,
MOS_WDR_TIMEOUT);
- dbg("mos7840_get_reg_sync offset is %x, return val %x\n", reg, *val);
+ dbg("mos7840_get_reg_sync offset is %x, return val %x", reg, *val);
*val = (*val) & 0x00ff;
return ret;
}
@@ -282,18 +282,18 @@ static int mos7840_set_uart_reg(struct usb_serial_port *port, __u16 reg,
if (port->serial->num_ports == 4) {
val |= (((__u16) port->number -
(__u16) (port->serial->minor)) + 1) << 8;
- dbg("mos7840_set_uart_reg application number is %x\n", val);
+ dbg("mos7840_set_uart_reg application number is %x", val);
} else {
if (((__u16) port->number - (__u16) (port->serial->minor)) == 0) {
val |= (((__u16) port->number -
(__u16) (port->serial->minor)) + 1) << 8;
- dbg("mos7840_set_uart_reg application number is %x\n",
+ dbg("mos7840_set_uart_reg application number is %x",
val);
} else {
val |=
(((__u16) port->number -
(__u16) (port->serial->minor)) + 2) << 8;
- dbg("mos7840_set_uart_reg application number is %x\n",
+ dbg("mos7840_set_uart_reg application number is %x",
val);
}
}
@@ -315,24 +315,24 @@ static int mos7840_get_uart_reg(struct usb_serial_port *port, __u16 reg,
int ret = 0;
__u16 Wval;
- /* dbg("application number is %4x \n",
+ /* dbg("application number is %4x",
(((__u16)port->number - (__u16)(port->serial->minor))+1)<<8); */
/* Wval is same as application number */
if (port->serial->num_ports == 4) {
Wval =
(((__u16) port->number - (__u16) (port->serial->minor)) +
1) << 8;
- dbg("mos7840_get_uart_reg application number is %x\n", Wval);
+ dbg("mos7840_get_uart_reg application number is %x", Wval);
} else {
if (((__u16) port->number - (__u16) (port->serial->minor)) == 0) {
Wval = (((__u16) port->number -
(__u16) (port->serial->minor)) + 1) << 8;
- dbg("mos7840_get_uart_reg application number is %x\n",
+ dbg("mos7840_get_uart_reg application number is %x",
Wval);
} else {
Wval = (((__u16) port->number -
(__u16) (port->serial->minor)) + 2) << 8;
- dbg("mos7840_get_uart_reg application number is %x\n",
+ dbg("mos7840_get_uart_reg application number is %x",
Wval);
}
}
@@ -346,11 +346,11 @@ static int mos7840_get_uart_reg(struct usb_serial_port *port, __u16 reg,
static void mos7840_dump_serial_port(struct moschip_port *mos7840_port)
{
- dbg("***************************************\n");
- dbg("SpRegOffset is %2x\n", mos7840_port->SpRegOffset);
- dbg("ControlRegOffset is %2x \n", mos7840_port->ControlRegOffset);
- dbg("DCRRegOffset is %2x \n", mos7840_port->DcrRegOffset);
- dbg("***************************************\n");
+ dbg("***************************************");
+ dbg("SpRegOffset is %2x", mos7840_port->SpRegOffset);
+ dbg("ControlRegOffset is %2x", mos7840_port->ControlRegOffset);
+ dbg("DCRRegOffset is %2x", mos7840_port->DcrRegOffset);
+ dbg("***************************************");
}
@@ -474,12 +474,12 @@ static void mos7840_control_callback(struct urb *urb)
goto exit;
}
- dbg("%s urb buffer size is %d\n", __func__, urb->actual_length);
- dbg("%s mos7840_port->MsrLsr is %d port %d\n", __func__,
+ dbg("%s urb buffer size is %d", __func__, urb->actual_length);
+ dbg("%s mos7840_port->MsrLsr is %d port %d", __func__,
mos7840_port->MsrLsr, mos7840_port->port_num);
data = urb->transfer_buffer;
regval = (__u8) data[0];
- dbg("%s data is %x\n", __func__, regval);
+ dbg("%s data is %x", __func__, regval);
if (mos7840_port->MsrLsr == 0)
mos7840_handle_new_msr(mos7840_port, regval);
else if (mos7840_port->MsrLsr == 1)
@@ -538,7 +538,7 @@ static void mos7840_interrupt_callback(struct urb *urb)
__u16 wval, wreg = 0;
int status = urb->status;
- dbg("%s", " : Entering\n");
+ dbg("%s", " : Entering");
switch (status) {
case 0:
@@ -570,7 +570,7 @@ static void mos7840_interrupt_callback(struct urb *urb)
* Byte 5 FIFO status for both */
if (length && length > 5) {
- dbg("%s \n", "Wrong data !!!");
+ dbg("%s", "Wrong data !!!");
return;
}
@@ -587,17 +587,17 @@ static void mos7840_interrupt_callback(struct urb *urb)
(__u16) (serial->minor)) + 1) << 8;
if (mos7840_port->open) {
if (sp[i] & 0x01) {
- dbg("SP%d No Interrupt !!!\n", i);
+ dbg("SP%d No Interrupt !!!", i);
} else {
switch (sp[i] & 0x0f) {
case SERIAL_IIR_RLS:
dbg("Serial Port %d: Receiver status error or ", i);
- dbg("address bit detected in 9-bit mode\n");
+ dbg("address bit detected in 9-bit mode");
mos7840_port->MsrLsr = 1;
wreg = LINE_STATUS_REGISTER;
break;
case SERIAL_IIR_MS:
- dbg("Serial Port %d: Modem status change\n", i);
+ dbg("Serial Port %d: Modem status change", i);
mos7840_port->MsrLsr = 0;
wreg = MODEM_STATUS_REGISTER;
break;
@@ -689,7 +689,7 @@ static void mos7840_bulk_in_callback(struct urb *urb)
mos7840_port = urb->context;
if (!mos7840_port) {
- dbg("%s", "NULL mos7840_port pointer \n");
+ dbg("%s", "NULL mos7840_port pointer");
mos7840_port->read_urb_busy = false;
return;
}
@@ -702,41 +702,41 @@ static void mos7840_bulk_in_callback(struct urb *urb)
port = (struct usb_serial_port *)mos7840_port->port;
if (mos7840_port_paranoia_check(port, __func__)) {
- dbg("%s", "Port Paranoia failed \n");
+ dbg("%s", "Port Paranoia failed");
mos7840_port->read_urb_busy = false;
return;
}
serial = mos7840_get_usb_serial(port, __func__);
if (!serial) {
- dbg("%s\n", "Bad serial pointer ");
+ dbg("%s", "Bad serial pointer");
mos7840_port->read_urb_busy = false;
return;
}
- dbg("%s\n", "Entering... \n");
+ dbg("%s", "Entering... ");
data = urb->transfer_buffer;
- dbg("%s", "Entering ........... \n");
+ dbg("%s", "Entering ...........");
if (urb->actual_length) {
tty = tty_port_tty_get(&mos7840_port->port->port);
if (tty) {
tty_buffer_request_room(tty, urb->actual_length);
tty_insert_flip_string(tty, data, urb->actual_length);
- dbg(" %s \n", data);
+ dbg(" %s ", data);
tty_flip_buffer_push(tty);
tty_kref_put(tty);
}
mos7840_port->icount.rx += urb->actual_length;
smp_wmb();
- dbg("mos7840_port->icount.rx is %d:\n",
+ dbg("mos7840_port->icount.rx is %d:",
mos7840_port->icount.rx);
}
if (!mos7840_port->read_urb) {
- dbg("%s", "URB KILLED !!!\n");
+ dbg("%s", "URB KILLED !!!");
mos7840_port->read_urb_busy = false;
return;
}
@@ -777,16 +777,16 @@ static void mos7840_bulk_out_data_callback(struct urb *urb)
spin_unlock(&mos7840_port->pool_lock);
if (status) {
- dbg("nonzero write bulk status received:%d\n", status);
+ dbg("nonzero write bulk status received:%d", status);
return;
}
if (mos7840_port_paranoia_check(mos7840_port->port, __func__)) {
- dbg("%s", "Port Paranoia failed \n");
+ dbg("%s", "Port Paranoia failed");
return;
}
- dbg("%s \n", "Entering .........");
+ dbg("%s", "Entering .........");
tty = tty_port_tty_get(&mos7840_port->port->port);
if (tty && mos7840_port->open)
@@ -830,15 +830,17 @@ static int mos7840_open(struct tty_struct *tty,
struct moschip_port *mos7840_port;
struct moschip_port *port0;
+ dbg ("%s enter", __func__);
+
if (mos7840_port_paranoia_check(port, __func__)) {
- dbg("%s", "Port Paranoia failed \n");
+ dbg("%s", "Port Paranoia failed");
return -ENODEV;
}
serial = port->serial;
if (mos7840_serial_paranoia_check(serial, __func__)) {
- dbg("%s", "Serial Paranoia failed \n");
+ dbg("%s", "Serial Paranoia failed");
return -ENODEV;
}
@@ -891,20 +893,20 @@ static int mos7840_open(struct tty_struct *tty,
Data = 0x0;
status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset, &Data);
if (status < 0) {
- dbg("Reading Spreg failed\n");
+ dbg("Reading Spreg failed");
return -1;
}
Data |= 0x80;
status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
if (status < 0) {
- dbg("writing Spreg failed\n");
+ dbg("writing Spreg failed");
return -1;
}
Data &= ~0x80;
status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
if (status < 0) {
- dbg("writing Spreg failed\n");
+ dbg("writing Spreg failed");
return -1;
}
/* End of block to be checked */
@@ -913,7 +915,7 @@ static int mos7840_open(struct tty_struct *tty,
status = mos7840_get_reg_sync(port, mos7840_port->ControlRegOffset,
&Data);
if (status < 0) {
- dbg("Reading Controlreg failed\n");
+ dbg("Reading Controlreg failed");
return -1;
}
Data |= 0x08; /* Driver done bit */
@@ -921,7 +923,7 @@ static int mos7840_open(struct tty_struct *tty,
status = mos7840_set_reg_sync(port,
mos7840_port->ControlRegOffset, Data);
if (status < 0) {
- dbg("writing Controlreg failed\n");
+ dbg("writing Controlreg failed");
return -1;
}
/* do register settings here */
@@ -932,21 +934,21 @@ static int mos7840_open(struct tty_struct *tty,
Data = 0x00;
status = mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
if (status < 0) {
- dbg("disableing interrupts failed\n");
+ dbg("disabling interrupts failed");
return -1;
}
/* Set FIFO_CONTROL_REGISTER to the default value */
Data = 0x00;
status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
if (status < 0) {
- dbg("Writing FIFO_CONTROL_REGISTER failed\n");
+ dbg("Writing FIFO_CONTROL_REGISTER failed");
return -1;
}
Data = 0xcf;
status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
if (status < 0) {
- dbg("Writing FIFO_CONTROL_REGISTER failed\n");
+ dbg("Writing FIFO_CONTROL_REGISTER failed");
return -1;
}
@@ -1043,12 +1045,12 @@ static int mos7840_open(struct tty_struct *tty,
* (can't set it up in mos7840_startup as the *
* structures were not set up at that time.) */
- dbg("port number is %d \n", port->number);
- dbg("serial number is %d \n", port->serial->minor);
- dbg("Bulkin endpoint is %d \n", port->bulk_in_endpointAddress);
- dbg("BulkOut endpoint is %d \n", port->bulk_out_endpointAddress);
- dbg("Interrupt endpoint is %d \n", port->interrupt_in_endpointAddress);
- dbg("port's number in the device is %d\n", mos7840_port->port_num);
+ dbg("port number is %d", port->number);
+ dbg("serial number is %d", port->serial->minor);
+ dbg("Bulkin endpoint is %d", port->bulk_in_endpointAddress);
+ dbg("BulkOut endpoint is %d", port->bulk_out_endpointAddress);
+ dbg("Interrupt endpoint is %d", port->interrupt_in_endpointAddress);
+ dbg("port's number in the device is %d", mos7840_port->port_num);
mos7840_port->read_urb = port->read_urb;
/* set up our bulk in urb */
@@ -1061,7 +1063,7 @@ static int mos7840_open(struct tty_struct *tty,
mos7840_port->read_urb->transfer_buffer_length,
mos7840_bulk_in_callback, mos7840_port);
- dbg("mos7840_open: bulkin endpoint is %d\n",
+ dbg("mos7840_open: bulkin endpoint is %d",
port->bulk_in_endpointAddress);
mos7840_port->read_urb_busy = true;
response = usb_submit_urb(mos7840_port->read_urb, GFP_KERNEL);
@@ -1087,9 +1089,11 @@ static int mos7840_open(struct tty_struct *tty,
mos7840_port->icount.tx = 0;
mos7840_port->icount.rx = 0;
- dbg("\n\nusb_serial serial:%p mos7840_port:%p\n usb_serial_port port:%p\n\n",
+ dbg("usb_serial serial:%p mos7840_port:%p\n usb_serial_port port:%p",
serial, mos7840_port, port);
+ dbg ("%s leave", __func__);
+
return 0;
}
@@ -1112,16 +1116,16 @@ static int mos7840_chars_in_buffer(struct tty_struct *tty)
unsigned long flags;
struct moschip_port *mos7840_port;
- dbg("%s \n", " mos7840_chars_in_buffer:entering ...........");
+ dbg("%s", " mos7840_chars_in_buffer:entering ...........");
if (mos7840_port_paranoia_check(port, __func__)) {
- dbg("%s", "Invalid port \n");
+ dbg("%s", "Invalid port");
return 0;
}
mos7840_port = mos7840_get_port_private(port);
if (mos7840_port == NULL) {
- dbg("%s \n", "mos7840_break:leaving ...........");
+ dbg("%s", "mos7840_break:leaving ...........");
return 0;
}
@@ -1135,54 +1139,12 @@ static int mos7840_chars_in_buffer(struct tty_struct *tty)
}
-/************************************************************************
- *
- * mos7840_block_until_tx_empty
- *
- * This function will block the close until one of the following:
- * 1. TX count are 0
- * 2. The mos7840 has stopped
- * 3. A timeout of 3 seconds without activity has expired
- *
- ************************************************************************/
-static void mos7840_block_until_tx_empty(struct tty_struct *tty,
- struct moschip_port *mos7840_port)
-{
- int timeout = HZ / 10;
- int wait = 30;
- int count;
-
- while (1) {
-
- count = mos7840_chars_in_buffer(tty);
-
- /* Check for Buffer status */
- if (count <= 0)
- return;
-
- /* Block the thread for a while */
- interruptible_sleep_on_timeout(&mos7840_port->wait_chase,
- timeout);
-
- /* No activity.. count down section */
- wait--;
- if (wait == 0) {
- dbg("%s - TIMEOUT", __func__);
- return;
- } else {
- /* Reset timeout value back to seconds */
- wait = 30;
- }
- }
-}
-
/*****************************************************************************
* mos7840_close
* this function is called by the tty driver when a port is closed
*****************************************************************************/
-static void mos7840_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void mos7840_close(struct usb_serial_port *port)
{
struct usb_serial *serial;
struct moschip_port *mos7840_port;
@@ -1190,16 +1152,16 @@ static void mos7840_close(struct tty_struct *tty,
int j;
__u16 Data;
- dbg("%s\n", "mos7840_close:entering...");
+ dbg("%s", "mos7840_close:entering...");
if (mos7840_port_paranoia_check(port, __func__)) {
- dbg("%s", "Port Paranoia failed \n");
+ dbg("%s", "Port Paranoia failed");
return;
}
serial = mos7840_get_usb_serial(port, __func__);
if (!serial) {
- dbg("%s", "Serial Paranoia failed \n");
+ dbg("%s", "Serial Paranoia failed");
return;
}
@@ -1223,35 +1185,31 @@ static void mos7840_close(struct tty_struct *tty,
}
}
- if (serial->dev)
- /* flush and block until tx is empty */
- mos7840_block_until_tx_empty(tty, mos7840_port);
-
/* While closing port, shutdown all bulk read, write *
* and interrupt read if they exists */
if (serial->dev) {
if (mos7840_port->write_urb) {
- dbg("%s", "Shutdown bulk write\n");
+ dbg("%s", "Shutdown bulk write");
usb_kill_urb(mos7840_port->write_urb);
}
if (mos7840_port->read_urb) {
- dbg("%s", "Shutdown bulk read\n");
+ dbg("%s", "Shutdown bulk read");
usb_kill_urb(mos7840_port->read_urb);
mos7840_port->read_urb_busy = false;
}
if ((&mos7840_port->control_urb)) {
- dbg("%s", "Shutdown control read\n");
+ dbg("%s", "Shutdown control read");
/*/ usb_kill_urb (mos7840_port->control_urb); */
}
}
/* if(mos7840_port->ctrl_buf != NULL) */
/* kfree(mos7840_port->ctrl_buf); */
port0->open_ports--;
- dbg("mos7840_num_open_ports in close%d:in port%d\n",
+ dbg("mos7840_num_open_ports in close%d:in port%d",
port0->open_ports, port->number);
if (port0->open_ports == 0) {
if (serial->port[0]->interrupt_in_urb) {
- dbg("%s", "Shutdown interrupt_in_urb\n");
+ dbg("%s", "Shutdown interrupt_in_urb");
usb_kill_urb(serial->port[0]->interrupt_in_urb);
}
}
@@ -1271,7 +1229,7 @@ static void mos7840_close(struct tty_struct *tty,
mos7840_port->open = 0;
- dbg("%s \n", "Leaving ............");
+ dbg("%s", "Leaving ............");
}
/************************************************************************
@@ -1326,17 +1284,17 @@ static void mos7840_break(struct tty_struct *tty, int break_state)
struct usb_serial *serial;
struct moschip_port *mos7840_port;
- dbg("%s \n", "Entering ...........");
- dbg("mos7840_break: Start\n");
+ dbg("%s", "Entering ...........");
+ dbg("mos7840_break: Start");
if (mos7840_port_paranoia_check(port, __func__)) {
- dbg("%s", "Port Paranoia failed \n");
+ dbg("%s", "Port Paranoia failed");
return;
}
serial = mos7840_get_usb_serial(port, __func__);
if (!serial) {
- dbg("%s", "Serial Paranoia failed \n");
+ dbg("%s", "Serial Paranoia failed");
return;
}
@@ -1356,7 +1314,7 @@ static void mos7840_break(struct tty_struct *tty, int break_state)
/* FIXME: no locking on shadowLCR anywhere in driver */
mos7840_port->shadowLCR = data;
- dbg("mcs7840_break mos7840_port->shadowLCR is %x\n",
+ dbg("mcs7840_break mos7840_port->shadowLCR is %x",
mos7840_port->shadowLCR);
mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER,
mos7840_port->shadowLCR);
@@ -1380,17 +1338,17 @@ static int mos7840_write_room(struct tty_struct *tty)
unsigned long flags;
struct moschip_port *mos7840_port;
- dbg("%s \n", " mos7840_write_room:entering ...........");
+ dbg("%s", " mos7840_write_room:entering ...........");
if (mos7840_port_paranoia_check(port, __func__)) {
- dbg("%s", "Invalid port \n");
- dbg("%s \n", " mos7840_write_room:leaving ...........");
+ dbg("%s", "Invalid port");
+ dbg("%s", " mos7840_write_room:leaving ...........");
return -1;
}
mos7840_port = mos7840_get_port_private(port);
if (mos7840_port == NULL) {
- dbg("%s \n", "mos7840_break:leaving ...........");
+ dbg("%s", "mos7840_break:leaving ...........");
return -1;
}
@@ -1430,16 +1388,16 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
/* __u16 Data; */
const unsigned char *current_position = data;
unsigned char *data1;
- dbg("%s \n", "entering ...........");
- /* dbg("mos7840_write: mos7840_port->shadowLCR is %x\n",
+ dbg("%s", "entering ...........");
+ /* dbg("mos7840_write: mos7840_port->shadowLCR is %x",
mos7840_port->shadowLCR); */
#ifdef NOTMOS7840
Data = 0x00;
status = mos7840_get_uart_reg(port, LINE_CONTROL_REGISTER, &Data);
mos7840_port->shadowLCR = Data;
- dbg("mos7840_write: LINE_CONTROL_REGISTER is %x\n", Data);
- dbg("mos7840_write: mos7840_port->shadowLCR is %x\n",
+ dbg("mos7840_write: LINE_CONTROL_REGISTER is %x", Data);
+ dbg("mos7840_write: mos7840_port->shadowLCR is %x",
mos7840_port->shadowLCR);
/* Data = 0x03; */
@@ -1453,32 +1411,32 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
/* status = mos7840_set_uart_reg(port,DIVISOR_LATCH_LSB,Data); */
Data = 0x00;
status = mos7840_get_uart_reg(port, DIVISOR_LATCH_LSB, &Data);
- dbg("mos7840_write:DLL value is %x\n", Data);
+ dbg("mos7840_write:DLL value is %x", Data);
Data = 0x0;
status = mos7840_get_uart_reg(port, DIVISOR_LATCH_MSB, &Data);
- dbg("mos7840_write:DLM value is %x\n", Data);
+ dbg("mos7840_write:DLM value is %x", Data);
Data = Data & ~SERIAL_LCR_DLAB;
- dbg("mos7840_write: mos7840_port->shadowLCR is %x\n",
+ dbg("mos7840_write: mos7840_port->shadowLCR is %x",
mos7840_port->shadowLCR);
status = mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data);
#endif
if (mos7840_port_paranoia_check(port, __func__)) {
- dbg("%s", "Port Paranoia failed \n");
+ dbg("%s", "Port Paranoia failed");
return -1;
}
serial = port->serial;
if (mos7840_serial_paranoia_check(serial, __func__)) {
- dbg("%s", "Serial Paranoia failed \n");
+ dbg("%s", "Serial Paranoia failed");
return -1;
}
mos7840_port = mos7840_get_port_private(port);
if (mos7840_port == NULL) {
- dbg("%s", "mos7840_port is NULL\n");
+ dbg("%s", "mos7840_port is NULL");
return -1;
}
@@ -1490,7 +1448,7 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
if (!mos7840_port->busy[i]) {
mos7840_port->busy[i] = 1;
urb = mos7840_port->write_urb_pool[i];
- dbg("\nURB:%d", i);
+ dbg("URB:%d", i);
break;
}
}
@@ -1525,7 +1483,7 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
mos7840_bulk_out_data_callback, mos7840_port);
data1 = urb->transfer_buffer;
- dbg("\nbulkout endpoint is %d", port->bulk_out_endpointAddress);
+ dbg("bulkout endpoint is %d", port->bulk_out_endpointAddress);
/* send it down the pipe */
status = usb_submit_urb(urb, GFP_ATOMIC);
@@ -1540,7 +1498,7 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
bytes_sent = transfer_size;
mos7840_port->icount.tx += transfer_size;
smp_wmb();
- dbg("mos7840_port->icount.tx is %d:\n", mos7840_port->icount.tx);
+ dbg("mos7840_port->icount.tx is %d:", mos7840_port->icount.tx);
exit:
return bytes_sent;
@@ -1559,11 +1517,11 @@ static void mos7840_throttle(struct tty_struct *tty)
int status;
if (mos7840_port_paranoia_check(port, __func__)) {
- dbg("%s", "Invalid port \n");
+ dbg("%s", "Invalid port");
return;
}
- dbg("- port %d\n", port->number);
+ dbg("- port %d", port->number);
mos7840_port = mos7840_get_port_private(port);
@@ -1571,11 +1529,11 @@ static void mos7840_throttle(struct tty_struct *tty)
return;
if (!mos7840_port->open) {
- dbg("%s\n", "port not opened");
+ dbg("%s", "port not opened");
return;
}
- dbg("%s", "Entering .......... \n");
+ dbg("%s", "Entering ..........");
/* if we are implementing XON/XOFF, send the stop character */
if (I_IXOFF(tty)) {
@@ -1609,7 +1567,7 @@ static void mos7840_unthrottle(struct tty_struct *tty)
struct moschip_port *mos7840_port = mos7840_get_port_private(port);
if (mos7840_port_paranoia_check(port, __func__)) {
- dbg("%s", "Invalid port \n");
+ dbg("%s", "Invalid port");
return;
}
@@ -1621,7 +1579,7 @@ static void mos7840_unthrottle(struct tty_struct *tty)
return;
}
- dbg("%s", "Entering .......... \n");
+ dbg("%s", "Entering ..........");
/* if we are implementing XON/XOFF, send the start character */
if (I_IXOFF(tty)) {
@@ -1706,7 +1664,7 @@ static int mos7840_tiocmset(struct tty_struct *tty, struct file *file,
status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, mcr);
if (status < 0) {
- dbg("setting MODEM_CONTROL_REGISTER Failed\n");
+ dbg("setting MODEM_CONTROL_REGISTER Failed");
return status;
}
@@ -1775,11 +1733,11 @@ static int mos7840_calc_baud_rate_divisor(int baudRate, int *divisor,
custom++;
*divisor = custom;
- dbg(" Baud %d = %d\n", baudrate, custom);
+ dbg(" Baud %d = %d", baudrate, custom);
return 0;
}
- dbg("%s\n", " Baud calculation Failed...");
+ dbg("%s", " Baud calculation Failed...");
return -1;
#endif
}
@@ -1805,16 +1763,16 @@ static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port,
port = (struct usb_serial_port *)mos7840_port->port;
if (mos7840_port_paranoia_check(port, __func__)) {
- dbg("%s", "Invalid port \n");
+ dbg("%s", "Invalid port");
return -1;
}
if (mos7840_serial_paranoia_check(port->serial, __func__)) {
- dbg("%s", "Invalid Serial \n");
+ dbg("%s", "Invalid Serial");
return -1;
}
- dbg("%s", "Entering .......... \n");
+ dbg("%s", "Entering ..........");
number = mos7840_port->port->number - mos7840_port->port->serial->minor;
@@ -1830,7 +1788,7 @@ static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port,
status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER,
Data);
if (status < 0) {
- dbg("Writing spreg failed in set_serial_baud\n");
+ dbg("Writing spreg failed in set_serial_baud");
return -1;
}
#endif
@@ -1843,7 +1801,7 @@ static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port,
status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER,
Data);
if (status < 0) {
- dbg("Writing spreg failed in set_serial_baud\n");
+ dbg("Writing spreg failed in set_serial_baud");
return -1;
}
#endif
@@ -1858,14 +1816,14 @@ static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port,
status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset,
&Data);
if (status < 0) {
- dbg("reading spreg failed in set_serial_baud\n");
+ dbg("reading spreg failed in set_serial_baud");
return -1;
}
Data = (Data & 0x8f) | clk_sel_val;
status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset,
Data);
if (status < 0) {
- dbg("Writing spreg failed in set_serial_baud\n");
+ dbg("Writing spreg failed in set_serial_baud");
return -1;
}
/* Calculate the Divisor */
@@ -1881,11 +1839,11 @@ static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port,
/* Write the divisor */
Data = (unsigned char)(divisor & 0xff);
- dbg("set_serial_baud Value to write DLL is %x\n", Data);
+ dbg("set_serial_baud Value to write DLL is %x", Data);
mos7840_set_uart_reg(port, DIVISOR_LATCH_LSB, Data);
Data = (unsigned char)((divisor & 0xff00) >> 8);
- dbg("set_serial_baud Value to write DLM is %x\n", Data);
+ dbg("set_serial_baud Value to write DLM is %x", Data);
mos7840_set_uart_reg(port, DIVISOR_LATCH_MSB, Data);
/* Disable access to divisor latch */
@@ -1923,12 +1881,12 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
port = (struct usb_serial_port *)mos7840_port->port;
if (mos7840_port_paranoia_check(port, __func__)) {
- dbg("%s", "Invalid port \n");
+ dbg("%s", "Invalid port");
return;
}
if (mos7840_serial_paranoia_check(port->serial, __func__)) {
- dbg("%s", "Invalid Serial \n");
+ dbg("%s", "Invalid Serial");
return;
}
@@ -1941,7 +1899,7 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
return;
}
- dbg("%s", "Entering .......... \n");
+ dbg("%s", "Entering ..........");
lData = LCR_BITS_8;
lStop = LCR_STOP_1;
@@ -2001,7 +1959,7 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
~(LCR_BITS_MASK | LCR_STOP_MASK | LCR_PAR_MASK);
mos7840_port->shadowLCR |= (lData | lParity | lStop);
- dbg("mos7840_change_port_settings mos7840_port->shadowLCR is %x\n",
+ dbg("mos7840_change_port_settings mos7840_port->shadowLCR is %x",
mos7840_port->shadowLCR);
/* Disable Interrupts */
Data = 0x00;
@@ -2043,7 +2001,7 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
if (!baud) {
/* pick a default, any default... */
- dbg("%s\n", "Picked default baud...");
+ dbg("%s", "Picked default baud...");
baud = 9600;
}
@@ -2066,7 +2024,7 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
}
wake_up(&mos7840_port->delta_msr_wait);
mos7840_port->delta_msr_cond = 1;
- dbg("mos7840_change_port_settings mos7840_port->shadowLCR is End %x\n",
+ dbg("mos7840_change_port_settings mos7840_port->shadowLCR is End %x",
mos7840_port->shadowLCR);
return;
@@ -2086,16 +2044,16 @@ static void mos7840_set_termios(struct tty_struct *tty,
unsigned int cflag;
struct usb_serial *serial;
struct moschip_port *mos7840_port;
- dbg("mos7840_set_termios: START\n");
+ dbg("mos7840_set_termios: START");
if (mos7840_port_paranoia_check(port, __func__)) {
- dbg("%s", "Invalid port \n");
+ dbg("%s", "Invalid port");
return;
}
serial = port->serial;
if (mos7840_serial_paranoia_check(serial, __func__)) {
- dbg("%s", "Invalid Serial \n");
+ dbg("%s", "Invalid Serial");
return;
}
@@ -2109,7 +2067,7 @@ static void mos7840_set_termios(struct tty_struct *tty,
return;
}
- dbg("%s\n", "setting termios - ");
+ dbg("%s", "setting termios - ");
cflag = tty->termios->c_cflag;
@@ -2124,7 +2082,7 @@ static void mos7840_set_termios(struct tty_struct *tty,
mos7840_change_port_settings(tty, mos7840_port, old_termios);
if (!mos7840_port->read_urb) {
- dbg("%s", "URB KILLED !!!!!\n");
+ dbg("%s", "URB KILLED !!!!!");
return;
}
@@ -2190,7 +2148,7 @@ static int mos7840_set_modem_info(struct moschip_port *mos7840_port,
port = (struct usb_serial_port *)mos7840_port->port;
if (mos7840_port_paranoia_check(port, __func__)) {
- dbg("%s", "Invalid port \n");
+ dbg("%s", "Invalid port");
return -1;
}
@@ -2235,7 +2193,7 @@ static int mos7840_set_modem_info(struct moschip_port *mos7840_port,
status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
unlock_kernel();
if (status < 0) {
- dbg("setting MODEM_CONTROL_REGISTER Failed\n");
+ dbg("setting MODEM_CONTROL_REGISTER Failed");
return -1;
}
@@ -2320,7 +2278,7 @@ static int mos7840_ioctl(struct tty_struct *tty, struct file *file,
int mosret = 0;
if (mos7840_port_paranoia_check(port, __func__)) {
- dbg("%s", "Invalid port \n");
+ dbg("%s", "Invalid port");
return -1;
}
@@ -2420,9 +2378,8 @@ static int mos7840_calc_num_ports(struct usb_serial *serial)
{
int mos7840_num_ports = 0;
- dbg("numberofendpoints: %d \n",
- (int)serial->interface->cur_altsetting->desc.bNumEndpoints);
- dbg("numberofendpoints: %d \n",
+ dbg("numberofendpoints: cur %d, alt %d",
+ (int)serial->interface->cur_altsetting->desc.bNumEndpoints,
(int)serial->interface->altsetting->desc.bNumEndpoints);
if (serial->interface->cur_altsetting->desc.bNumEndpoints == 5) {
mos7840_num_ports = serial->num_ports = 2;
@@ -2431,7 +2388,7 @@ static int mos7840_calc_num_ports(struct usb_serial *serial)
serial->num_bulk_out = 4;
mos7840_num_ports = serial->num_ports = 4;
}
-
+ dbg ("mos7840_num_ports = %d", mos7840_num_ports);
return mos7840_num_ports;
}
@@ -2446,22 +2403,24 @@ static int mos7840_startup(struct usb_serial *serial)
int i, status;
__u16 Data;
- dbg("%s \n", " mos7840_startup :entering..........");
+ dbg("%s", "mos7840_startup :Entering..........");
if (!serial) {
- dbg("%s\n", "Invalid Handler");
+ dbg("%s", "Invalid Handler");
return -1;
}
dev = serial->dev;
- dbg("%s\n", "Entering...");
+ dbg("%s", "Entering...");
+ dbg ("mos7840_startup: serial = %p", serial);
/* we set up the pointers to the endpoints in the mos7840_open *
* function, as the structures aren't created yet. */
/* set up port private structures */
for (i = 0; i < serial->num_ports; ++i) {
+ dbg ("mos7840_startup: configuring port %d............", i);
mos7840_port = kzalloc(sizeof(struct moschip_port), GFP_KERNEL);
if (mos7840_port == NULL) {
dev_err(&dev->dev, "%s - Out of memory\n", __func__);
@@ -2519,10 +2478,10 @@ static int mos7840_startup(struct usb_serial *serial)
status = mos7840_get_reg_sync(serial->port[i],
mos7840_port->ControlRegOffset, &Data);
if (status < 0) {
- dbg("Reading ControlReg failed status-0x%x\n", status);
+ dbg("Reading ControlReg failed status-0x%x", status);
break;
} else
- dbg("ControlReg Reading success val is %x, status%d\n",
+ dbg("ControlReg Reading success val is %x, status%d",
Data, status);
Data |= 0x08; /* setting driver done bit */
Data |= 0x04; /* sp1_bit to have cts change reflect in
@@ -2532,10 +2491,10 @@ static int mos7840_startup(struct usb_serial *serial)
status = mos7840_set_reg_sync(serial->port[i],
mos7840_port->ControlRegOffset, Data);
if (status < 0) {
- dbg("Writing ControlReg failed(rx_disable) status-0x%x\n", status);
+ dbg("Writing ControlReg failed(rx_disable) status-0x%x", status);
break;
} else
- dbg("ControlReg Writing success(rx_disable) status%d\n",
+ dbg("ControlReg Writing success(rx_disable) status%d",
status);
/* Write default values in DCR (i.e 0x01 in DCR0, 0x05 in DCR2
@@ -2544,48 +2503,48 @@ static int mos7840_startup(struct usb_serial *serial)
status = mos7840_set_reg_sync(serial->port[i],
(__u16) (mos7840_port->DcrRegOffset + 0), Data);
if (status < 0) {
- dbg("Writing DCR0 failed status-0x%x\n", status);
+ dbg("Writing DCR0 failed status-0x%x", status);
break;
} else
- dbg("DCR0 Writing success status%d\n", status);
+ dbg("DCR0 Writing success status%d", status);
Data = 0x05;
status = mos7840_set_reg_sync(serial->port[i],
(__u16) (mos7840_port->DcrRegOffset + 1), Data);
if (status < 0) {
- dbg("Writing DCR1 failed status-0x%x\n", status);
+ dbg("Writing DCR1 failed status-0x%x", status);
break;
} else
- dbg("DCR1 Writing success status%d\n", status);
+ dbg("DCR1 Writing success status%d", status);
Data = 0x24;
status = mos7840_set_reg_sync(serial->port[i],
(__u16) (mos7840_port->DcrRegOffset + 2), Data);
if (status < 0) {
- dbg("Writing DCR2 failed status-0x%x\n", status);
+ dbg("Writing DCR2 failed status-0x%x", status);
break;
} else
- dbg("DCR2 Writing success status%d\n", status);
+ dbg("DCR2 Writing success status%d", status);
/* write values in clkstart0x0 and clkmulti 0x20 */
Data = 0x0;
status = mos7840_set_reg_sync(serial->port[i],
CLK_START_VALUE_REGISTER, Data);
if (status < 0) {
- dbg("Writing CLK_START_VALUE_REGISTER failed status-0x%x\n", status);
+ dbg("Writing CLK_START_VALUE_REGISTER failed status-0x%x", status);
break;
} else
- dbg("CLK_START_VALUE_REGISTER Writing success status%d\n", status);
+ dbg("CLK_START_VALUE_REGISTER Writing success status%d", status);
Data = 0x20;
status = mos7840_set_reg_sync(serial->port[i],
CLK_MULTI_REGISTER, Data);
if (status < 0) {
- dbg("Writing CLK_MULTI_REGISTER failed status-0x%x\n",
+ dbg("Writing CLK_MULTI_REGISTER failed status-0x%x",
status);
goto error;
} else
- dbg("CLK_MULTI_REGISTER Writing success status%d\n",
+ dbg("CLK_MULTI_REGISTER Writing success status%d",
status);
/* write value 0x0 to scratchpad register */
@@ -2593,11 +2552,11 @@ static int mos7840_startup(struct usb_serial *serial)
status = mos7840_set_uart_reg(serial->port[i],
SCRATCH_PAD_REGISTER, Data);
if (status < 0) {
- dbg("Writing SCRATCH_PAD_REGISTER failed status-0x%x\n",
+ dbg("Writing SCRATCH_PAD_REGISTER failed status-0x%x",
status);
break;
} else
- dbg("SCRATCH_PAD_REGISTER Writing success status%d\n",
+ dbg("SCRATCH_PAD_REGISTER Writing success status%d",
status);
/* Zero Length flag register */
@@ -2608,30 +2567,30 @@ static int mos7840_startup(struct usb_serial *serial)
status = mos7840_set_reg_sync(serial->port[i],
(__u16) (ZLP_REG1 +
((__u16)mos7840_port->port_num)), Data);
- dbg("ZLIP offset%x\n",
+ dbg("ZLIP offset %x",
(__u16) (ZLP_REG1 +
((__u16) mos7840_port->port_num)));
if (status < 0) {
- dbg("Writing ZLP_REG%d failed status-0x%x\n",
+ dbg("Writing ZLP_REG%d failed status-0x%x",
i + 2, status);
break;
} else
- dbg("ZLP_REG%d Writing success status%d\n",
+ dbg("ZLP_REG%d Writing success status%d",
i + 2, status);
} else {
Data = 0xff;
status = mos7840_set_reg_sync(serial->port[i],
(__u16) (ZLP_REG1 +
((__u16)mos7840_port->port_num) - 0x1), Data);
- dbg("ZLIP offset%x\n",
+ dbg("ZLIP offset %x",
(__u16) (ZLP_REG1 +
((__u16) mos7840_port->port_num) - 0x1));
if (status < 0) {
- dbg("Writing ZLP_REG%d failed status-0x%x\n",
+ dbg("Writing ZLP_REG%d failed status-0x%x",
i + 1, status);
break;
} else
- dbg("ZLP_REG%d Writing success status%d\n",
+ dbg("ZLP_REG%d Writing success status%d",
i + 1, status);
}
@@ -2645,15 +2604,16 @@ static int mos7840_startup(struct usb_serial *serial)
goto error;
}
}
+ dbg ("mos7840_startup: all ports configured...........");
/* Zero Length flag enable */
Data = 0x0f;
status = mos7840_set_reg_sync(serial->port[0], ZLP_REG5, Data);
if (status < 0) {
- dbg("Writing ZLP_REG5 failed status-0x%x\n", status);
+ dbg("Writing ZLP_REG5 failed status-0x%x", status);
goto error;
} else
- dbg("ZLP_REG5 Writing success status%d\n", status);
+ dbg("ZLP_REG5 Writing success status%d", status);
/* setting configuration feature to one */
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
@@ -2673,19 +2633,19 @@ error:
}
/****************************************************************************
- * mos7840_shutdown
+ * mos7840_disconnect
* This function is called whenever the device is removed from the usb bus.
****************************************************************************/
-static void mos7840_shutdown(struct usb_serial *serial)
+static void mos7840_disconnect(struct usb_serial *serial)
{
int i;
unsigned long flags;
struct moschip_port *mos7840_port;
- dbg("%s \n", " shutdown :entering..........");
+ dbg("%s", " disconnect :entering..........");
if (!serial) {
- dbg("%s", "Invalid Handler \n");
+ dbg("%s", "Invalid Handler");
return;
}
@@ -2702,14 +2662,45 @@ static void mos7840_shutdown(struct usb_serial *serial)
mos7840_port->zombie = 1;
spin_unlock_irqrestore(&mos7840_port->pool_lock, flags);
usb_kill_urb(mos7840_port->control_urb);
+ }
+ }
+
+ dbg("%s", "Thank u :: ");
+
+}
+
+/****************************************************************************
+ * mos7840_release
+ * This function is called when the usb_serial structure is freed.
+ ****************************************************************************/
+
+static void mos7840_release(struct usb_serial *serial)
+{
+ int i;
+ struct moschip_port *mos7840_port;
+ dbg("%s", " release :entering..........");
+
+ if (!serial) {
+ dbg("%s", "Invalid Handler");
+ return;
+ }
+
+ /* check for the ports to be closed,close the ports and disconnect */
+
+ /* free private structure allocated for serial port *
+ * stop reads and writes on all ports */
+
+ for (i = 0; i < serial->num_ports; ++i) {
+ mos7840_port = mos7840_get_port_private(serial->port[i]);
+ dbg("mos7840_port %d = %p", i, mos7840_port);
+ if (mos7840_port) {
kfree(mos7840_port->ctrl_buf);
kfree(mos7840_port->dr);
kfree(mos7840_port);
}
- mos7840_set_port_private(serial->port[i], NULL);
}
- dbg("%s\n", "Thank u :: ");
+ dbg("%s", "Thank u :: ");
}
@@ -2747,7 +2738,8 @@ static struct usb_serial_driver moschip7840_4port_device = {
.tiocmget = mos7840_tiocmget,
.tiocmset = mos7840_tiocmset,
.attach = mos7840_startup,
- .shutdown = mos7840_shutdown,
+ .disconnect = mos7840_disconnect,
+ .release = mos7840_release,
.read_bulk_callback = mos7840_bulk_in_callback,
.read_int_callback = mos7840_interrupt_callback,
};
@@ -2760,7 +2752,7 @@ static int __init moschip7840_init(void)
{
int retval;
- dbg("%s \n", " mos7840_init :entering..........");
+ dbg("%s", " mos7840_init :entering..........");
/* Register with the usb serial */
retval = usb_serial_register(&moschip7840_4port_device);
@@ -2768,14 +2760,14 @@ static int __init moschip7840_init(void)
if (retval)
goto failed_port_device_register;
- dbg("%s\n", "Entring...");
+ dbg("%s", "Entering...");
printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
DRIVER_DESC "\n");
/* Register with the usb */
retval = usb_register(&io_driver);
if (retval == 0) {
- dbg("%s\n", "Leaving...");
+ dbg("%s", "Leaving...");
return 0;
}
usb_serial_deregister(&moschip7840_4port_device);
@@ -2790,13 +2782,13 @@ failed_port_device_register:
static void __exit moschip7840_exit(void)
{
- dbg("%s \n", " mos7840_exit :entering..........");
+ dbg("%s", " mos7840_exit :entering..........");
usb_deregister(&io_driver);
usb_serial_deregister(&moschip7840_4port_device);
- dbg("%s\n", "Entring...");
+ dbg("%s", "Entering...");
}
module_init(moschip7840_init);
diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c
index bcdcbb82270..f5f3751a888 100644
--- a/drivers/usb/serial/navman.c
+++ b/drivers/usb/serial/navman.c
@@ -98,8 +98,7 @@ static int navman_open(struct tty_struct *tty,
return result;
}
-static void navman_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void navman_close(struct usb_serial_port *port)
{
dbg("%s - port %d", __func__, port->number);
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index df653971272..56857ddbd70 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -66,14 +66,14 @@ static int debug;
/* function prototypes */
static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port,
struct file *filp);
-static void omninet_close(struct tty_struct *tty, struct usb_serial_port *port,
- struct file *filp);
+static void omninet_close(struct usb_serial_port *port);
static void omninet_read_bulk_callback(struct urb *urb);
static void omninet_write_bulk_callback(struct urb *urb);
static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
static int omninet_write_room(struct tty_struct *tty);
-static void omninet_shutdown(struct usb_serial *serial);
+static void omninet_disconnect(struct usb_serial *serial);
+static void omninet_release(struct usb_serial *serial);
static int omninet_attach(struct usb_serial *serial);
static struct usb_device_id id_table[] = {
@@ -109,7 +109,8 @@ static struct usb_serial_driver zyxel_omninet_device = {
.write_room = omninet_write_room,
.read_bulk_callback = omninet_read_bulk_callback,
.write_bulk_callback = omninet_write_bulk_callback,
- .shutdown = omninet_shutdown,
+ .disconnect = omninet_disconnect,
+ .release = omninet_release,
};
@@ -189,8 +190,7 @@ static int omninet_open(struct tty_struct *tty,
return result;
}
-static void omninet_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void omninet_close(struct usb_serial_port *port)
{
dbg("%s - port %d", __func__, port->number);
usb_kill_urb(port->read_urb);
@@ -347,13 +347,22 @@ static void omninet_write_bulk_callback(struct urb *urb)
}
-static void omninet_shutdown(struct usb_serial *serial)
+static void omninet_disconnect(struct usb_serial *serial)
{
struct usb_serial_port *wport = serial->port[1];
- struct usb_serial_port *port = serial->port[0];
+
dbg("%s", __func__);
usb_kill_urb(wport->write_urb);
+}
+
+
+static void omninet_release(struct usb_serial *serial)
+{
+ struct usb_serial_port *port = serial->port[0];
+
+ dbg("%s", __func__);
+
kfree(usb_get_serial_port_data(port));
}
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index b500ad10b75..336bba79ad3 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -173,8 +173,7 @@ static int opticon_open(struct tty_struct *tty, struct usb_serial_port *port,
return result;
}
-static void opticon_close(struct tty_struct *tty, struct usb_serial_port *port,
- struct file *filp)
+static void opticon_close(struct usb_serial_port *port)
{
struct opticon_private *priv = usb_get_serial_data(port->serial);
@@ -464,7 +463,7 @@ error:
return retval;
}
-static void opticon_shutdown(struct usb_serial *serial)
+static void opticon_disconnect(struct usb_serial *serial)
{
struct opticon_private *priv = usb_get_serial_data(serial);
@@ -472,9 +471,16 @@ static void opticon_shutdown(struct usb_serial *serial)
usb_kill_urb(priv->bulk_read_urb);
usb_free_urb(priv->bulk_read_urb);
+}
+
+static void opticon_release(struct usb_serial *serial)
+{
+ struct opticon_private *priv = usb_get_serial_data(serial);
+
+ dbg("%s", __func__);
+
kfree(priv->bulk_in_buffer);
kfree(priv);
- usb_set_serial_data(serial, NULL);
}
static int opticon_suspend(struct usb_interface *intf, pm_message_t message)
@@ -525,7 +531,8 @@ static struct usb_serial_driver opticon_device = {
.close = opticon_close,
.write = opticon_write,
.write_room = opticon_write_room,
- .shutdown = opticon_shutdown,
+ .disconnect = opticon_disconnect,
+ .release = opticon_release,
.throttle = opticon_throttle,
.unthrottle = opticon_unthrottle,
.ioctl = opticon_ioctl,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 7817b82889c..575816e6ba3 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -43,12 +43,16 @@
#include <linux/usb/serial.h>
/* Function prototypes */
+static int option_probe(struct usb_serial *serial,
+ const struct usb_device_id *id);
static int option_open(struct tty_struct *tty, struct usb_serial_port *port,
struct file *filp);
-static void option_close(struct tty_struct *tty, struct usb_serial_port *port,
- struct file *filp);
+static void option_close(struct usb_serial_port *port);
+static void option_dtr_rts(struct usb_serial_port *port, int on);
+
static int option_startup(struct usb_serial *serial);
-static void option_shutdown(struct usb_serial *serial);
+static void option_disconnect(struct usb_serial *serial);
+static void option_release(struct usb_serial *serial);
static int option_write_room(struct tty_struct *tty);
static void option_instat_callback(struct urb *urb);
@@ -61,7 +65,7 @@ static void option_set_termios(struct tty_struct *tty,
static int option_tiocmget(struct tty_struct *tty, struct file *file);
static int option_tiocmset(struct tty_struct *tty, struct file *file,
unsigned int set, unsigned int clear);
-static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *port);
+static int option_send_setup(struct usb_serial_port *port);
static int option_suspend(struct usb_serial *serial, pm_message_t message);
static int option_resume(struct usb_serial *serial);
@@ -201,9 +205,9 @@ static int option_resume(struct usb_serial *serial);
#define NOVATELWIRELESS_PRODUCT_MC727 0x4100
#define NOVATELWIRELESS_PRODUCT_MC950D 0x4400
#define NOVATELWIRELESS_PRODUCT_U727 0x5010
+#define NOVATELWIRELESS_PRODUCT_MC760 0x6000
/* FUTURE NOVATEL PRODUCTS */
-#define NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED 0X6000
#define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0X6001
#define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED 0X7000
#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED 0X7001
@@ -304,6 +308,10 @@ static int option_resume(struct usb_serial *serial);
#define DLINK_PRODUCT_DWM_652 0x3e04
+/* TOSHIBA PRODUCTS */
+#define TOSHIBA_VENDOR_ID 0x0930
+#define TOSHIBA_PRODUCT_HSDPA_MINICARD 0x1302
+
static struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -421,7 +429,7 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED) }, /* Novatel EVDO product */
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC760) }, /* Novatel MC760/U760/USB760 */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) }, /* Novatel HSPA product */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, /* Novatel EVDO Embedded product */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) }, /* Novatel HSPA Embedded product */
@@ -522,6 +530,7 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
{ USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
{ USB_DEVICE(0x1da5, 0x4515) }, /* BenQ H20 */
+ { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
@@ -549,8 +558,10 @@ static struct usb_serial_driver option_1port_device = {
.usb_driver = &option_driver,
.id_table = option_ids,
.num_ports = 1,
+ .probe = option_probe,
.open = option_open,
.close = option_close,
+ .dtr_rts = option_dtr_rts,
.write = option_write,
.write_room = option_write_room,
.chars_in_buffer = option_chars_in_buffer,
@@ -558,7 +569,8 @@ static struct usb_serial_driver option_1port_device = {
.tiocmget = option_tiocmget,
.tiocmset = option_tiocmset,
.attach = option_startup,
- .shutdown = option_shutdown,
+ .disconnect = option_disconnect,
+ .release = option_release,
.read_int_callback = option_instat_callback,
.suspend = option_suspend,
.resume = option_resume,
@@ -624,13 +636,25 @@ static void __exit option_exit(void)
module_init(option_init);
module_exit(option_exit);
+static int option_probe(struct usb_serial *serial,
+ const struct usb_device_id *id)
+{
+ /* D-Link DWM 652 still exposes CD-Rom emulation interface in modem mode */
+ if (serial->dev->descriptor.idVendor == DLINK_VENDOR_ID &&
+ serial->dev->descriptor.idProduct == DLINK_PRODUCT_DWM_652 &&
+ serial->interface->cur_altsetting->desc.bInterfaceClass == 0x8)
+ return -ENODEV;
+
+ return 0;
+}
+
static void option_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
dbg("%s", __func__);
/* Doesn't support option setting */
tty_termios_copy_hw(tty->termios, old_termios);
- option_send_setup(tty, port);
+ option_send_setup(port);
}
static int option_tiocmget(struct tty_struct *tty, struct file *file)
@@ -669,7 +693,7 @@ static int option_tiocmset(struct tty_struct *tty, struct file *file,
portdata->rts_state = 0;
if (clear & TIOCM_DTR)
portdata->dtr_state = 0;
- return option_send_setup(tty, port);
+ return option_send_setup(port);
}
/* Write */
@@ -897,10 +921,6 @@ static int option_open(struct tty_struct *tty,
dbg("%s", __func__);
- /* Set some sane defaults */
- portdata->rts_state = 1;
- portdata->dtr_state = 1;
-
/* Reset low level data toggle and start reading from endpoints */
for (i = 0; i < N_IN_URB; i++) {
urb = portdata->in_urbs[i];
@@ -936,37 +956,43 @@ static int option_open(struct tty_struct *tty,
usb_pipeout(urb->pipe), 0); */
}
- option_send_setup(tty, port);
+ option_send_setup(port);
return 0;
}
-static void option_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void option_dtr_rts(struct usb_serial_port *port, int on)
{
- int i;
struct usb_serial *serial = port->serial;
struct option_port_private *portdata;
dbg("%s", __func__);
portdata = usb_get_serial_port_data(port);
+ mutex_lock(&serial->disc_mutex);
+ portdata->rts_state = on;
+ portdata->dtr_state = on;
+ if (serial->dev)
+ option_send_setup(port);
+ mutex_unlock(&serial->disc_mutex);
+}
- portdata->rts_state = 0;
- portdata->dtr_state = 0;
- if (serial->dev) {
- mutex_lock(&serial->disc_mutex);
- if (!serial->disconnected)
- option_send_setup(tty, port);
- mutex_unlock(&serial->disc_mutex);
+static void option_close(struct usb_serial_port *port)
+{
+ int i;
+ struct usb_serial *serial = port->serial;
+ struct option_port_private *portdata;
+
+ dbg("%s", __func__);
+ portdata = usb_get_serial_port_data(port);
+ if (serial->dev) {
/* Stop reading/writing urbs */
for (i = 0; i < N_IN_URB; i++)
usb_kill_urb(portdata->in_urbs[i]);
for (i = 0; i < N_OUT_URB; i++)
usb_kill_urb(portdata->out_urbs[i]);
}
- tty_port_tty_set(&port->port, NULL);
}
/* Helper functions used by option_setup_urbs */
@@ -1032,28 +1058,24 @@ static void option_setup_urbs(struct usb_serial *serial)
* This is exactly the same as SET_CONTROL_LINE_STATE from the PSTN
* CDC.
*/
-static int option_send_setup(struct tty_struct *tty,
- struct usb_serial_port *port)
+static int option_send_setup(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct option_port_private *portdata;
int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
+ int val = 0;
dbg("%s", __func__);
portdata = usb_get_serial_port_data(port);
- if (tty) {
- int val = 0;
- if (portdata->dtr_state)
- val |= 0x01;
- if (portdata->rts_state)
- val |= 0x02;
+ if (portdata->dtr_state)
+ val |= 0x01;
+ if (portdata->rts_state)
+ val |= 0x02;
- return usb_control_msg(serial->dev,
- usb_rcvctrlpipe(serial->dev, 0),
- 0x22, 0x21, val, ifNum, NULL, 0, USB_CTRL_SET_TIMEOUT);
- }
- return 0;
+ return usb_control_msg(serial->dev,
+ usb_rcvctrlpipe(serial->dev, 0),
+ 0x22, 0x21, val, ifNum, NULL, 0, USB_CTRL_SET_TIMEOUT);
}
static int option_startup(struct usb_serial *serial)
@@ -1129,7 +1151,14 @@ static void stop_read_write_urbs(struct usb_serial *serial)
}
}
-static void option_shutdown(struct usb_serial *serial)
+static void option_disconnect(struct usb_serial *serial)
+{
+ dbg("%s", __func__);
+
+ stop_read_write_urbs(serial);
+}
+
+static void option_release(struct usb_serial *serial)
{
int i, j;
struct usb_serial_port *port;
@@ -1137,8 +1166,6 @@ static void option_shutdown(struct usb_serial *serial)
dbg("%s", __func__);
- stop_read_write_urbs(serial);
-
/* Now free them */
for (i = 0; i < serial->num_ports; ++i) {
port = serial->port[i];
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index ba551f00f16..3cece27325e 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -143,8 +143,7 @@ struct oti6858_control_pkt {
/* function prototypes */
static int oti6858_open(struct tty_struct *tty,
struct usb_serial_port *port, struct file *filp);
-static void oti6858_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp);
+static void oti6858_close(struct usb_serial_port *port);
static void oti6858_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old);
static int oti6858_ioctl(struct tty_struct *tty, struct file *file,
@@ -160,7 +159,7 @@ static int oti6858_tiocmget(struct tty_struct *tty, struct file *file);
static int oti6858_tiocmset(struct tty_struct *tty, struct file *file,
unsigned int set, unsigned int clear);
static int oti6858_startup(struct usb_serial *serial);
-static void oti6858_shutdown(struct usb_serial *serial);
+static void oti6858_release(struct usb_serial *serial);
/* functions operating on buffers */
static struct oti6858_buf *oti6858_buf_alloc(unsigned int size);
@@ -195,7 +194,7 @@ static struct usb_serial_driver oti6858_device = {
.write_room = oti6858_write_room,
.chars_in_buffer = oti6858_chars_in_buffer,
.attach = oti6858_startup,
- .shutdown = oti6858_shutdown,
+ .release = oti6858_release,
};
struct oti6858_private {
@@ -622,67 +621,30 @@ static int oti6858_open(struct tty_struct *tty,
if (result != 0) {
dev_err(&port->dev, "%s(): usb_submit_urb() failed"
" with error %d\n", __func__, result);
- oti6858_close(tty, port, NULL);
+ oti6858_close(port);
return -EPROTO;
}
/* setup termios */
if (tty)
oti6858_set_termios(tty, port, &tmp_termios);
-
+ port->port.drain_delay = 256; /* FIXME: check the FIFO length */
return 0;
}
-static void oti6858_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void oti6858_close(struct usb_serial_port *port)
{
struct oti6858_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
- long timeout;
- wait_queue_t wait;
dbg("%s(port = %d)", __func__, port->number);
- /* wait for data to drain from the buffer */
spin_lock_irqsave(&priv->lock, flags);
- timeout = 30 * HZ; /* PL2303_CLOSING_WAIT */
- init_waitqueue_entry(&wait, current);
- add_wait_queue(&tty->write_wait, &wait);
- dbg("%s(): entering wait loop", __func__);
- for (;;) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (oti6858_buf_data_avail(priv->buf) == 0
- || timeout == 0 || signal_pending(current)
- || port->serial->disconnected)
- break;
- spin_unlock_irqrestore(&priv->lock, flags);
- timeout = schedule_timeout(timeout);
- spin_lock_irqsave(&priv->lock, flags);
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&tty->write_wait, &wait);
- dbg("%s(): after wait loop", __func__);
-
/* clear out any remaining data in the buffer */
oti6858_buf_clear(priv->buf);
spin_unlock_irqrestore(&priv->lock, flags);
- /* wait for characters to drain from the device */
- /* (this is long enough for the entire 256 byte */
- /* pl2303 hardware buffer to drain with no flow */
- /* control for data rates of 1200 bps or more, */
- /* for lower rates we should really know how much */
- /* data is in the buffer to compute a delay */
- /* that is not unnecessarily long) */
- /* FIXME
- bps = tty_get_baud_rate(tty);
- if (bps > 1200)
- timeout = max((HZ*2560)/bps,HZ/10);
- else
- */
- timeout = 2*HZ;
- schedule_timeout_interruptible(timeout);
- dbg("%s(): after schedule_timeout_interruptible()", __func__);
+ dbg("%s(): after buf_clear()", __func__);
/* cancel scheduled setup */
cancel_delayed_work(&priv->delayed_setup_work);
@@ -694,15 +656,6 @@ static void oti6858_close(struct tty_struct *tty,
usb_kill_urb(port->write_urb);
usb_kill_urb(port->read_urb);
usb_kill_urb(port->interrupt_in_urb);
-
- /*
- if (tty && (tty->termios->c_cflag) & HUPCL) {
- // drop DTR and RTS
- spin_lock_irqsave(&priv->lock, flags);
- priv->pending_setup.control &= ~CONTROL_MASK;
- spin_unlock_irqrestore(&priv->lock, flags);
- }
- */
}
static int oti6858_tiocmset(struct tty_struct *tty, struct file *file,
@@ -829,7 +782,7 @@ static int oti6858_ioctl(struct tty_struct *tty, struct file *file,
}
-static void oti6858_shutdown(struct usb_serial *serial)
+static void oti6858_release(struct usb_serial *serial)
{
struct oti6858_private *priv;
int i;
@@ -841,7 +794,6 @@ static void oti6858_shutdown(struct usb_serial *serial)
if (priv) {
oti6858_buf_free(priv->buf);
kfree(priv);
- usb_set_serial_port_data(serial->port[i], NULL);
}
}
}
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 751a533a434..ec6c132a25b 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -652,69 +652,41 @@ static void pl2303_set_termios(struct tty_struct *tty,
kfree(buf);
}
-static void pl2303_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void pl2303_dtr_rts(struct usb_serial_port *port, int on)
+{
+ struct pl2303_private *priv = usb_get_serial_port_data(port);
+ unsigned long flags;
+ u8 control;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ /* Change DTR and RTS */
+ if (on)
+ priv->line_control |= (CONTROL_DTR | CONTROL_RTS);
+ else
+ priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS);
+ control = priv->line_control;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ set_control_lines(port->serial->dev, control);
+}
+
+static void pl2303_close(struct usb_serial_port *port)
{
struct pl2303_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
- unsigned int c_cflag;
- int bps;
- long timeout;
- wait_queue_t wait;
dbg("%s - port %d", __func__, port->number);
- /* wait for data to drain from the buffer */
spin_lock_irqsave(&priv->lock, flags);
- timeout = PL2303_CLOSING_WAIT;
- init_waitqueue_entry(&wait, current);
- add_wait_queue(&tty->write_wait, &wait);
- for (;;) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (pl2303_buf_data_avail(priv->buf) == 0 ||
- timeout == 0 || signal_pending(current) ||
- port->serial->disconnected)
- break;
- spin_unlock_irqrestore(&priv->lock, flags);
- timeout = schedule_timeout(timeout);
- spin_lock_irqsave(&priv->lock, flags);
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&tty->write_wait, &wait);
/* clear out any remaining data in the buffer */
pl2303_buf_clear(priv->buf);
spin_unlock_irqrestore(&priv->lock, flags);
- /* wait for characters to drain from the device */
- /* (this is long enough for the entire 256 byte */
- /* pl2303 hardware buffer to drain with no flow */
- /* control for data rates of 1200 bps or more, */
- /* for lower rates we should really know how much */
- /* data is in the buffer to compute a delay */
- /* that is not unnecessarily long) */
- bps = tty_get_baud_rate(tty);
- if (bps > 1200)
- timeout = max((HZ*2560)/bps, HZ/10);
- else
- timeout = 2*HZ;
- schedule_timeout_interruptible(timeout);
-
/* shutdown our urbs */
dbg("%s - shutting down urbs", __func__);
usb_kill_urb(port->write_urb);
usb_kill_urb(port->read_urb);
usb_kill_urb(port->interrupt_in_urb);
- if (tty) {
- c_cflag = tty->termios->c_cflag;
- if (c_cflag & HUPCL) {
- /* drop DTR and RTS */
- spin_lock_irqsave(&priv->lock, flags);
- priv->line_control = 0;
- spin_unlock_irqrestore(&priv->lock, flags);
- set_control_lines(port->serial->dev, 0);
- }
- }
}
static int pl2303_open(struct tty_struct *tty,
@@ -748,7 +720,7 @@ static int pl2303_open(struct tty_struct *tty,
if (result) {
dev_err(&port->dev, "%s - failed submitting read urb,"
" error %d\n", __func__, result);
- pl2303_close(tty, port, NULL);
+ pl2303_close(port);
return -EPROTO;
}
@@ -758,9 +730,10 @@ static int pl2303_open(struct tty_struct *tty,
if (result) {
dev_err(&port->dev, "%s - failed submitting interrupt urb,"
" error %d\n", __func__, result);
- pl2303_close(tty, port, NULL);
+ pl2303_close(port);
return -EPROTO;
}
+ port->port.drain_delay = 256;
return 0;
}
@@ -821,6 +794,14 @@ static int pl2303_tiocmget(struct tty_struct *tty, struct file *file)
return result;
}
+static int pl2303_carrier_raised(struct usb_serial_port *port)
+{
+ struct pl2303_private *priv = usb_get_serial_port_data(port);
+ if (priv->line_status & UART_DCD)
+ return 1;
+ return 0;
+}
+
static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
{
struct pl2303_private *priv = usb_get_serial_port_data(port);
@@ -897,7 +878,7 @@ static void pl2303_break_ctl(struct tty_struct *tty, int break_state)
dbg("%s - error sending break = %d", __func__, result);
}
-static void pl2303_shutdown(struct usb_serial *serial)
+static void pl2303_release(struct usb_serial *serial)
{
int i;
struct pl2303_private *priv;
@@ -909,7 +890,6 @@ static void pl2303_shutdown(struct usb_serial *serial)
if (priv) {
pl2303_buf_free(priv->buf);
kfree(priv);
- usb_set_serial_port_data(serial->port[i], NULL);
}
}
}
@@ -946,6 +926,8 @@ static void pl2303_update_line_status(struct usb_serial_port *port,
spin_lock_irqsave(&priv->lock, flags);
priv->line_status = data[status_idx];
spin_unlock_irqrestore(&priv->lock, flags);
+ if (priv->line_status & UART_BREAK_ERROR)
+ usb_serial_handle_break(port);
wake_up_interruptible(&priv->delta_msr_wait);
}
@@ -1056,7 +1038,8 @@ static void pl2303_read_bulk_callback(struct urb *urb)
if (line_status & UART_OVERRUN_ERROR)
tty_insert_flip_char(tty, 0, TTY_OVERRUN);
for (i = 0; i < urb->actual_length; ++i)
- tty_insert_flip_char(tty, data[i], tty_flag);
+ if (!usb_serial_handle_sysrq_char(port, data[i]))
+ tty_insert_flip_char(tty, data[i], tty_flag);
tty_flip_buffer_push(tty);
}
tty_kref_put(tty);
@@ -1125,6 +1108,8 @@ static struct usb_serial_driver pl2303_device = {
.num_ports = 1,
.open = pl2303_open,
.close = pl2303_close,
+ .dtr_rts = pl2303_dtr_rts,
+ .carrier_raised = pl2303_carrier_raised,
.write = pl2303_write,
.ioctl = pl2303_ioctl,
.break_ctl = pl2303_break_ctl,
@@ -1137,7 +1122,7 @@ static struct usb_serial_driver pl2303_device = {
.write_room = pl2303_write_room,
.chars_in_buffer = pl2303_chars_in_buffer,
.attach = pl2303_startup,
- .shutdown = pl2303_shutdown,
+ .release = pl2303_release,
};
static int __init pl2303_init(void)
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 913225c6161..032f7aeb40a 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -1,7 +1,10 @@
/*
USB Driver for Sierra Wireless
- Copyright (C) 2006, 2007, 2008 Kevin Lloyd <klloyd@sierrawireless.com>
+ Copyright (C) 2006, 2007, 2008 Kevin Lloyd <klloyd@sierrawireless.com>,
+
+ Copyright (C) 2008, 2009 Elina Pasheva, Matthew Safar, Rory Filer
+ <linux@sierrawireless.com>
IMPORTANT DISCLAIMER: This driver is not commercially supported by
Sierra Wireless. Use at your own risk.
@@ -14,8 +17,8 @@
Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org>
*/
-#define DRIVER_VERSION "v.1.3.3"
-#define DRIVER_AUTHOR "Kevin Lloyd <klloyd@sierrawireless.com>"
+#define DRIVER_VERSION "v.1.3.7"
+#define DRIVER_AUTHOR "Kevin Lloyd, Elina Pasheva, Matthew Safar, Rory Filer"
#define DRIVER_DESC "USB Driver for Sierra Wireless USB modems"
#include <linux/kernel.h>
@@ -26,23 +29,32 @@
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
-#include <linux/usb/ch9.h>
#define SWIMS_USB_REQUEST_SetPower 0x00
#define SWIMS_USB_REQUEST_SetNmea 0x07
-/* per port private data */
-#define N_IN_URB 4
-#define N_OUT_URB 4
+#define N_IN_URB 8
+#define N_OUT_URB 64
#define IN_BUFLEN 4096
+#define MAX_TRANSFER (PAGE_SIZE - 512)
+/* MAX_TRANSFER is chosen so that the VM is not stressed by
+ allocations > PAGE_SIZE and the number of packets in a page
+ is an integer 512 is the largest possible packet on EHCI */
+
static int debug;
static int nmea;
+/* Used in interface blacklisting */
+struct sierra_iface_info {
+ const u32 infolen; /* number of interface numbers on blacklist */
+ const u8 *ifaceinfo; /* pointer to the array holding the numbers */
+};
+
static int sierra_set_power_state(struct usb_device *udev, __u16 swiState)
{
int result;
- dev_dbg(&udev->dev, "%s", __func__);
+ dev_dbg(&udev->dev, "%s\n", __func__);
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
SWIMS_USB_REQUEST_SetPower, /* __u8 request */
USB_TYPE_VENDOR, /* __u8 request type */
@@ -57,7 +69,7 @@ static int sierra_set_power_state(struct usb_device *udev, __u16 swiState)
static int sierra_vsc_set_nmea(struct usb_device *udev, __u16 enable)
{
int result;
- dev_dbg(&udev->dev, "%s", __func__);
+ dev_dbg(&udev->dev, "%s\n", __func__);
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
SWIMS_USB_REQUEST_SetNmea, /* __u8 request */
USB_TYPE_VENDOR, /* __u8 request type */
@@ -71,18 +83,39 @@ static int sierra_vsc_set_nmea(struct usb_device *udev, __u16 enable)
static int sierra_calc_num_ports(struct usb_serial *serial)
{
- int result;
- int *num_ports = usb_get_serial_data(serial);
- dev_dbg(&serial->dev->dev, "%s", __func__);
+ int num_ports = 0;
+ u8 ifnum, numendpoints;
- result = *num_ports;
+ dev_dbg(&serial->dev->dev, "%s\n", __func__);
- if (result) {
- kfree(num_ports);
- usb_set_serial_data(serial, NULL);
- }
+ ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
+ numendpoints = serial->interface->cur_altsetting->desc.bNumEndpoints;
- return result;
+ /* Dummy interface present on some SKUs should be ignored */
+ if (ifnum == 0x99)
+ num_ports = 0;
+ else if (numendpoints <= 3)
+ num_ports = 1;
+ else
+ num_ports = (numendpoints-1)/2;
+ return num_ports;
+}
+
+static int is_blacklisted(const u8 ifnum,
+ const struct sierra_iface_info *blacklist)
+{
+ const u8 *info;
+ int i;
+
+ if (blacklist) {
+ info = blacklist->ifaceinfo;
+
+ for (i = 0; i < blacklist->infolen; i++) {
+ if (info[i] == ifnum)
+ return 1;
+ }
+ }
+ return 0;
}
static int sierra_calc_interface(struct usb_serial *serial)
@@ -90,7 +123,7 @@ static int sierra_calc_interface(struct usb_serial *serial)
int interface;
struct usb_interface *p_interface;
struct usb_host_interface *p_host_interface;
- dev_dbg(&serial->dev->dev, "%s", __func__);
+ dev_dbg(&serial->dev->dev, "%s\n", __func__);
/* Get the interface structure pointer from the serial struct */
p_interface = serial->interface;
@@ -111,23 +144,12 @@ static int sierra_probe(struct usb_serial *serial,
{
int result = 0;
struct usb_device *udev;
- int *num_ports;
u8 ifnum;
- u8 numendpoints;
-
- dev_dbg(&serial->dev->dev, "%s", __func__);
-
- num_ports = kmalloc(sizeof(*num_ports), GFP_KERNEL);
- if (!num_ports)
- return -ENOMEM;
- ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
- numendpoints = serial->interface->cur_altsetting->desc.bNumEndpoints;
udev = serial->dev;
+ dev_dbg(&udev->dev, "%s\n", __func__);
- /* Figure out the interface number from the serial structure */
ifnum = sierra_calc_interface(serial);
-
/*
* If this interface supports more than 1 alternate
* select the 2nd one
@@ -139,23 +161,25 @@ static int sierra_probe(struct usb_serial *serial,
usb_set_interface(udev, ifnum, 1);
}
- /* Dummy interface present on some SKUs should be ignored */
- if (ifnum == 0x99)
- *num_ports = 0;
- else if (numendpoints <= 3)
- *num_ports = 1;
- else
- *num_ports = (numendpoints-1)/2;
+ /* ifnum could have changed - by calling usb_set_interface */
+ ifnum = sierra_calc_interface(serial);
- /*
- * save off our num_ports info so that we can use it in the
- * calc_num_ports callback
- */
- usb_set_serial_data(serial, (void *)num_ports);
+ if (is_blacklisted(ifnum,
+ (struct sierra_iface_info *)id->driver_info)) {
+ dev_dbg(&serial->dev->dev,
+ "Ignoring blacklisted interface #%d\n", ifnum);
+ return -ENODEV;
+ }
return result;
}
+static const u8 direct_ip_non_serial_ifaces[] = { 7, 8, 9, 10, 11 };
+static const struct sierra_iface_info direct_ip_interface_blacklist = {
+ .infolen = ARRAY_SIZE(direct_ip_non_serial_ifaces),
+ .ifaceinfo = direct_ip_non_serial_ifaces,
+};
+
static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */
{ USB_DEVICE(0x1199, 0x0018) }, /* Sierra Wireless MC5720 */
@@ -188,9 +212,11 @@ static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781 */
{ USB_DEVICE(0x1199, 0x683A) }, /* Sierra Wireless MC8785 */
{ USB_DEVICE(0x1199, 0x683B) }, /* Sierra Wireless MC8785 Composite */
- { USB_DEVICE(0x1199, 0x683C) }, /* Sierra Wireless MC8790 */
- { USB_DEVICE(0x1199, 0x683D) }, /* Sierra Wireless MC8790 */
- { USB_DEVICE(0x1199, 0x683E) }, /* Sierra Wireless MC8790 */
+ /* Sierra Wireless MC8790, MC8791, MC8792 Composite */
+ { USB_DEVICE(0x1199, 0x683C) },
+ { USB_DEVICE(0x1199, 0x683D) }, /* Sierra Wireless MC8791 Composite */
+ /* Sierra Wireless MC8790, MC8791, MC8792 */
+ { USB_DEVICE(0x1199, 0x683E) },
{ USB_DEVICE(0x1199, 0x6850) }, /* Sierra Wireless AirCard 880 */
{ USB_DEVICE(0x1199, 0x6851) }, /* Sierra Wireless AirCard 881 */
{ USB_DEVICE(0x1199, 0x6852) }, /* Sierra Wireless AirCard 880 E */
@@ -211,6 +237,10 @@ static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */
{ USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */
+ { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */
+ .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+ },
+
{ }
};
MODULE_DEVICE_TABLE(usb, id_table);
@@ -229,7 +259,6 @@ struct sierra_port_private {
/* Input endpoints and buffers for this port */
struct urb *in_urbs[N_IN_URB];
- char *in_buffer[N_IN_URB];
/* Settings for the port */
int rts_state; /* Handshaking pins (outputs) */
@@ -240,66 +269,59 @@ struct sierra_port_private {
int ri_state;
};
-static int sierra_send_setup(struct tty_struct *tty,
- struct usb_serial_port *port)
+static int sierra_send_setup(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct sierra_port_private *portdata;
__u16 interface = 0;
+ int val = 0;
- dev_dbg(&port->dev, "%s", __func__);
+ dev_dbg(&port->dev, "%s\n", __func__);
portdata = usb_get_serial_port_data(port);
- if (tty) {
- int val = 0;
- if (portdata->dtr_state)
- val |= 0x01;
- if (portdata->rts_state)
- val |= 0x02;
-
- /* If composite device then properly report interface */
- if (serial->num_ports == 1) {
- interface = sierra_calc_interface(serial);
-
- /* Control message is sent only to interfaces with
- * interrupt_in endpoints
- */
- if (port->interrupt_in_urb) {
- /* send control message */
- return usb_control_msg(serial->dev,
- usb_rcvctrlpipe(serial->dev, 0),
- 0x22, 0x21, val, interface,
- NULL, 0, USB_CTRL_SET_TIMEOUT);
- }
- }
-
- /* Otherwise the need to do non-composite mapping */
- else {
- if (port->bulk_out_endpointAddress == 2)
- interface = 0;
- else if (port->bulk_out_endpointAddress == 4)
- interface = 1;
- else if (port->bulk_out_endpointAddress == 5)
- interface = 2;
+ if (portdata->dtr_state)
+ val |= 0x01;
+ if (portdata->rts_state)
+ val |= 0x02;
+ /* If composite device then properly report interface */
+ if (serial->num_ports == 1) {
+ interface = sierra_calc_interface(serial);
+ /* Control message is sent only to interfaces with
+ * interrupt_in endpoints
+ */
+ if (port->interrupt_in_urb) {
+ /* send control message */
return usb_control_msg(serial->dev,
usb_rcvctrlpipe(serial->dev, 0),
0x22, 0x21, val, interface,
NULL, 0, USB_CTRL_SET_TIMEOUT);
-
}
}
+ /* Otherwise the need to do non-composite mapping */
+ else {
+ if (port->bulk_out_endpointAddress == 2)
+ interface = 0;
+ else if (port->bulk_out_endpointAddress == 4)
+ interface = 1;
+ else if (port->bulk_out_endpointAddress == 5)
+ interface = 2;
+ return usb_control_msg(serial->dev,
+ usb_rcvctrlpipe(serial->dev, 0),
+ 0x22, 0x21, val, interface,
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
+ }
return 0;
}
static void sierra_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
- dev_dbg(&port->dev, "%s", __func__);
+ dev_dbg(&port->dev, "%s\n", __func__);
tty_termios_copy_hw(tty->termios, old_termios);
- sierra_send_setup(tty, port);
+ sierra_send_setup(port);
}
static int sierra_tiocmget(struct tty_struct *tty, struct file *file)
@@ -308,7 +330,7 @@ static int sierra_tiocmget(struct tty_struct *tty, struct file *file)
unsigned int value;
struct sierra_port_private *portdata;
- dev_dbg(&port->dev, "%s", __func__);
+ dev_dbg(&port->dev, "%s\n", __func__);
portdata = usb_get_serial_port_data(port);
value = ((portdata->rts_state) ? TIOCM_RTS : 0) |
@@ -338,7 +360,18 @@ static int sierra_tiocmset(struct tty_struct *tty, struct file *file,
portdata->rts_state = 0;
if (clear & TIOCM_DTR)
portdata->dtr_state = 0;
- return sierra_send_setup(tty, port);
+ return sierra_send_setup(port);
+}
+
+static void sierra_release_urb(struct urb *urb)
+{
+ struct usb_serial_port *port;
+ if (urb) {
+ port = urb->context;
+ dev_dbg(&port->dev, "%s: %p\n", __func__, urb);
+ kfree(urb->transfer_buffer);
+ usb_free_urb(urb);
+ }
}
static void sierra_outdat_callback(struct urb *urb)
@@ -348,14 +381,14 @@ static void sierra_outdat_callback(struct urb *urb)
int status = urb->status;
unsigned long flags;
- dev_dbg(&port->dev, "%s - port %d", __func__, port->number);
+ dev_dbg(&port->dev, "%s - port %d\n", __func__, port->number);
/* free up the transfer buffer, as usb_free_urb() does not do this */
kfree(urb->transfer_buffer);
if (status)
dev_dbg(&port->dev, "%s - nonzero write bulk status "
- "received: %d", __func__, status);
+ "received: %d\n", __func__, status);
spin_lock_irqsave(&portdata->lock, flags);
--portdata->outstanding_urbs;
@@ -373,50 +406,61 @@ static int sierra_write(struct tty_struct *tty, struct usb_serial_port *port,
unsigned long flags;
unsigned char *buffer;
struct urb *urb;
- int status;
+ size_t writesize = min((size_t)count, (size_t)MAX_TRANSFER);
+ int retval = 0;
+
+ /* verify that we actually have some data to write */
+ if (count == 0)
+ return 0;
portdata = usb_get_serial_port_data(port);
- dev_dbg(&port->dev, "%s: write (%d chars)", __func__, count);
+ dev_dbg(&port->dev, "%s: write (%zd bytes)\n", __func__, writesize);
spin_lock_irqsave(&portdata->lock, flags);
+ dev_dbg(&port->dev, "%s - outstanding_urbs: %d\n", __func__,
+ portdata->outstanding_urbs);
if (portdata->outstanding_urbs > N_OUT_URB) {
spin_unlock_irqrestore(&portdata->lock, flags);
dev_dbg(&port->dev, "%s - write limit hit\n", __func__);
return 0;
}
portdata->outstanding_urbs++;
+ dev_dbg(&port->dev, "%s - 1, outstanding_urbs: %d\n", __func__,
+ portdata->outstanding_urbs);
spin_unlock_irqrestore(&portdata->lock, flags);
- buffer = kmalloc(count, GFP_ATOMIC);
+ buffer = kmalloc(writesize, GFP_ATOMIC);
if (!buffer) {
dev_err(&port->dev, "out of memory\n");
- count = -ENOMEM;
+ retval = -ENOMEM;
goto error_no_buffer;
}
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
dev_err(&port->dev, "no more free urbs\n");
- count = -ENOMEM;
+ retval = -ENOMEM;
goto error_no_urb;
}
- memcpy(buffer, buf, count);
+ memcpy(buffer, buf, writesize);
- usb_serial_debug_data(debug, &port->dev, __func__, count, buffer);
+ usb_serial_debug_data(debug, &port->dev, __func__, writesize, buffer);
usb_fill_bulk_urb(urb, serial->dev,
usb_sndbulkpipe(serial->dev,
port->bulk_out_endpointAddress),
- buffer, count, sierra_outdat_callback, port);
+ buffer, writesize, sierra_outdat_callback, port);
+
+ /* Handle the need to send a zero length packet */
+ urb->transfer_flags |= URB_ZERO_PACKET;
/* send it down the pipe */
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status) {
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
+ if (retval) {
dev_err(&port->dev, "%s - usb_submit_urb(write bulk) failed "
- "with status = %d\n", __func__, status);
- count = status;
+ "with status = %d\n", __func__, retval);
goto error;
}
@@ -424,7 +468,7 @@ static int sierra_write(struct tty_struct *tty, struct usb_serial_port *port,
* really free it when it is finished with it */
usb_free_urb(urb);
- return count;
+ return writesize;
error:
usb_free_urb(urb);
error_no_urb:
@@ -432,8 +476,10 @@ error_no_urb:
error_no_buffer:
spin_lock_irqsave(&portdata->lock, flags);
--portdata->outstanding_urbs;
+ dev_dbg(&port->dev, "%s - 2. outstanding_urbs: %d\n", __func__,
+ portdata->outstanding_urbs);
spin_unlock_irqrestore(&portdata->lock, flags);
- return count;
+ return retval;
}
static void sierra_indat_callback(struct urb *urb)
@@ -445,33 +491,39 @@ static void sierra_indat_callback(struct urb *urb)
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
- dbg("%s: %p", __func__, urb);
-
endpoint = usb_pipeendpoint(urb->pipe);
- port = urb->context;
+ port = urb->context;
+
+ dev_dbg(&port->dev, "%s: %p\n", __func__, urb);
if (status) {
dev_dbg(&port->dev, "%s: nonzero status: %d on"
- " endpoint %02x.", __func__, status, endpoint);
+ " endpoint %02x\n", __func__, status, endpoint);
} else {
if (urb->actual_length) {
tty = tty_port_tty_get(&port->port);
+
tty_buffer_request_room(tty, urb->actual_length);
tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
+
tty_kref_put(tty);
- } else
+ usb_serial_debug_data(debug, &port->dev, __func__,
+ urb->actual_length, data);
+ } else {
dev_dbg(&port->dev, "%s: empty read urb"
- " received", __func__);
-
- /* Resubmit urb so we continue receiving */
- if (port->port.count && status != -ESHUTDOWN) {
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err)
- dev_err(&port->dev, "resubmit read urb failed."
- "(%d)\n", err);
+ " received\n", __func__);
}
}
+
+ /* Resubmit urb so we continue receiving */
+ if (port->port.count && status != -ESHUTDOWN && status != -EPERM) {
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err)
+ dev_err(&port->dev, "resubmit read urb failed."
+ "(%d)\n", err);
+ }
+
return;
}
@@ -483,8 +535,7 @@ static void sierra_instat_callback(struct urb *urb)
struct sierra_port_private *portdata = usb_get_serial_port_data(port);
struct usb_serial *serial = port->serial;
- dev_dbg(&port->dev, "%s", __func__);
- dev_dbg(&port->dev, "%s: urb %p port %p has data %p", __func__,
+ dev_dbg(&port->dev, "%s: urb %p port %p has data %p\n", __func__,
urb, port, portdata);
if (status == 0) {
@@ -504,7 +555,7 @@ static void sierra_instat_callback(struct urb *urb)
sizeof(struct usb_ctrlrequest));
struct tty_struct *tty;
- dev_dbg(&port->dev, "%s: signal x%x", __func__,
+ dev_dbg(&port->dev, "%s: signal x%x\n", __func__,
signals);
old_dcd_state = portdata->dcd_state;
@@ -519,20 +570,20 @@ static void sierra_instat_callback(struct urb *urb)
tty_hangup(tty);
tty_kref_put(tty);
} else {
- dev_dbg(&port->dev, "%s: type %x req %x",
+ dev_dbg(&port->dev, "%s: type %x req %x\n",
__func__, req_pkt->bRequestType,
req_pkt->bRequest);
}
} else
- dev_dbg(&port->dev, "%s: error %d", __func__, status);
+ dev_dbg(&port->dev, "%s: error %d\n", __func__, status);
/* Resubmit urb so we continue receiving IRQ data */
- if (status != -ESHUTDOWN) {
+ if (port->port.count && status != -ESHUTDOWN && status != -ENOENT) {
urb->dev = serial->dev;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err)
- dev_dbg(&port->dev, "%s: resubmit intr urb "
- "failed. (%d)", __func__, err);
+ dev_err(&port->dev, "%s: resubmit intr urb "
+ "failed. (%d)\n", __func__, err);
}
}
@@ -542,7 +593,7 @@ static int sierra_write_room(struct tty_struct *tty)
struct sierra_port_private *portdata = usb_get_serial_port_data(port);
unsigned long flags;
- dev_dbg(&port->dev, "%s - port %d", __func__, port->number);
+ dev_dbg(&port->dev, "%s - port %d\n", __func__, port->number);
/* try to give a good number back based on if we have any free urbs at
* this point in time */
@@ -557,67 +608,99 @@ static int sierra_write_room(struct tty_struct *tty)
return 2048;
}
-static int sierra_open(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void sierra_stop_rx_urbs(struct usb_serial_port *port)
{
- struct sierra_port_private *portdata;
- struct usb_serial *serial = port->serial;
int i;
- struct urb *urb;
- int result;
+ struct sierra_port_private *portdata = usb_get_serial_port_data(port);
- portdata = usb_get_serial_port_data(port);
+ for (i = 0; i < ARRAY_SIZE(portdata->in_urbs); i++)
+ usb_kill_urb(portdata->in_urbs[i]);
- dev_dbg(&port->dev, "%s", __func__);
+ usb_kill_urb(port->interrupt_in_urb);
+}
- /* Set some sane defaults */
- portdata->rts_state = 1;
- portdata->dtr_state = 1;
+static int sierra_submit_rx_urbs(struct usb_serial_port *port, gfp_t mem_flags)
+{
+ int ok_cnt;
+ int err = -EINVAL;
+ int i;
+ struct urb *urb;
+ struct sierra_port_private *portdata = usb_get_serial_port_data(port);
- /* Reset low level data toggle and start reading from endpoints */
- for (i = 0; i < N_IN_URB; i++) {
+ ok_cnt = 0;
+ for (i = 0; i < ARRAY_SIZE(portdata->in_urbs); i++) {
urb = portdata->in_urbs[i];
if (!urb)
continue;
- if (urb->dev != serial->dev) {
- dev_dbg(&port->dev, "%s: dev %p != %p",
- __func__, urb->dev, serial->dev);
- continue;
+ err = usb_submit_urb(urb, mem_flags);
+ if (err) {
+ dev_err(&port->dev, "%s: submit urb failed: %d\n",
+ __func__, err);
+ } else {
+ ok_cnt++;
}
+ }
- /*
- * make sure endpoint data toggle is synchronized with the
- * device
- */
- usb_clear_halt(urb->dev, urb->pipe);
-
- result = usb_submit_urb(urb, GFP_KERNEL);
- if (result) {
- dev_err(&port->dev, "submit urb %d failed (%d) %d\n",
- i, result, urb->transfer_buffer_length);
+ if (ok_cnt && port->interrupt_in_urb) {
+ err = usb_submit_urb(port->interrupt_in_urb, mem_flags);
+ if (err) {
+ dev_err(&port->dev, "%s: submit intr urb failed: %d\n",
+ __func__, err);
}
}
- sierra_send_setup(tty, port);
+ if (ok_cnt > 0) /* at least one rx urb submitted */
+ return 0;
+ else
+ return err;
+}
- /* start up the interrupt endpoint if we have one */
- if (port->interrupt_in_urb) {
- result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
- if (result)
- dev_err(&port->dev, "submit irq_in urb failed %d\n",
- result);
+static struct urb *sierra_setup_urb(struct usb_serial *serial, int endpoint,
+ int dir, void *ctx, int len,
+ gfp_t mem_flags,
+ usb_complete_t callback)
+{
+ struct urb *urb;
+ u8 *buf;
+
+ if (endpoint == -1)
+ return NULL;
+
+ urb = usb_alloc_urb(0, mem_flags);
+ if (urb == NULL) {
+ dev_dbg(&serial->dev->dev, "%s: alloc for endpoint %d failed\n",
+ __func__, endpoint);
+ return NULL;
}
- return 0;
+
+ buf = kmalloc(len, mem_flags);
+ if (buf) {
+ /* Fill URB using supplied data */
+ usb_fill_bulk_urb(urb, serial->dev,
+ usb_sndbulkpipe(serial->dev, endpoint) | dir,
+ buf, len, callback, ctx);
+
+ /* debug */
+ dev_dbg(&serial->dev->dev, "%s %c u : %p d:%p\n", __func__,
+ dir == USB_DIR_IN ? 'i' : 'o', urb, buf);
+ } else {
+ dev_dbg(&serial->dev->dev, "%s %c u:%p d:%p\n", __func__,
+ dir == USB_DIR_IN ? 'i' : 'o', urb, buf);
+
+ sierra_release_urb(urb);
+ urb = NULL;
+ }
+
+ return urb;
}
-static void sierra_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void sierra_close(struct usb_serial_port *port)
{
int i;
struct usb_serial *serial = port->serial;
struct sierra_port_private *portdata;
- dev_dbg(&port->dev, "%s", __func__);
+ dev_dbg(&port->dev, "%s\n", __func__);
portdata = usb_get_serial_port_data(port);
portdata->rts_state = 0;
@@ -626,27 +709,85 @@ static void sierra_close(struct tty_struct *tty,
if (serial->dev) {
mutex_lock(&serial->disc_mutex);
if (!serial->disconnected)
- sierra_send_setup(tty, port);
+ sierra_send_setup(port);
mutex_unlock(&serial->disc_mutex);
- /* Stop reading/writing urbs */
- for (i = 0; i < N_IN_URB; i++)
- usb_kill_urb(portdata->in_urbs[i]);
+ /* Stop reading urbs */
+ sierra_stop_rx_urbs(port);
+ /* .. and release them */
+ for (i = 0; i < N_IN_URB; i++) {
+ sierra_release_urb(portdata->in_urbs[i]);
+ portdata->in_urbs[i] = NULL;
+ }
}
+}
- usb_kill_urb(port->interrupt_in_urb);
- tty_port_tty_set(&port->port, NULL);
+static int sierra_open(struct tty_struct *tty,
+ struct usb_serial_port *port, struct file *filp)
+{
+ struct sierra_port_private *portdata;
+ struct usb_serial *serial = port->serial;
+ int i;
+ int err;
+ int endpoint;
+ struct urb *urb;
+
+ portdata = usb_get_serial_port_data(port);
+
+ dev_dbg(&port->dev, "%s\n", __func__);
+
+ /* Set some sane defaults */
+ portdata->rts_state = 1;
+ portdata->dtr_state = 1;
+
+
+ endpoint = port->bulk_in_endpointAddress;
+ for (i = 0; i < ARRAY_SIZE(portdata->in_urbs); i++) {
+ urb = sierra_setup_urb(serial, endpoint, USB_DIR_IN, port,
+ IN_BUFLEN, GFP_KERNEL,
+ sierra_indat_callback);
+ portdata->in_urbs[i] = urb;
+ }
+ /* clear halt condition */
+ usb_clear_halt(serial->dev,
+ usb_sndbulkpipe(serial->dev, endpoint) | USB_DIR_IN);
+
+ err = sierra_submit_rx_urbs(port, GFP_KERNEL);
+ if (err) {
+ /* get rid of everything as in close */
+ sierra_close(port);
+ return err;
+ }
+ sierra_send_setup(port);
+
+ return 0;
+}
+
+
+static void sierra_dtr_rts(struct usb_serial_port *port, int on)
+{
+ struct usb_serial *serial = port->serial;
+ struct sierra_port_private *portdata;
+
+ portdata = usb_get_serial_port_data(port);
+ portdata->rts_state = on;
+ portdata->dtr_state = on;
+
+ if (serial->dev) {
+ mutex_lock(&serial->disc_mutex);
+ if (!serial->disconnected)
+ sierra_send_setup(port);
+ mutex_unlock(&serial->disc_mutex);
+ }
}
static int sierra_startup(struct usb_serial *serial)
{
struct usb_serial_port *port;
struct sierra_port_private *portdata;
- struct urb *urb;
int i;
- int j;
- dev_dbg(&serial->dev->dev, "%s", __func__);
+ dev_dbg(&serial->dev->dev, "%s\n", __func__);
/* Set Device mode to D0 */
sierra_set_power_state(serial->dev, 0x0000);
@@ -661,51 +802,25 @@ static int sierra_startup(struct usb_serial *serial)
portdata = kzalloc(sizeof(*portdata), GFP_KERNEL);
if (!portdata) {
dev_dbg(&port->dev, "%s: kmalloc for "
- "sierra_port_private (%d) failed!.",
+ "sierra_port_private (%d) failed!.\n",
__func__, i);
return -ENOMEM;
}
spin_lock_init(&portdata->lock);
- for (j = 0; j < N_IN_URB; j++) {
- portdata->in_buffer[j] = kmalloc(IN_BUFLEN, GFP_KERNEL);
- if (!portdata->in_buffer[j]) {
- for (--j; j >= 0; j--)
- kfree(portdata->in_buffer[j]);
- kfree(portdata);
- return -ENOMEM;
- }
- }
-
+ /* Set the port private data pointer */
usb_set_serial_port_data(port, portdata);
-
- /* initialize the in urbs */
- for (j = 0; j < N_IN_URB; ++j) {
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (urb == NULL) {
- dev_dbg(&port->dev, "%s: alloc for in "
- "port failed.", __func__);
- continue;
- }
- /* Fill URB using supplied data. */
- usb_fill_bulk_urb(urb, serial->dev,
- usb_rcvbulkpipe(serial->dev,
- port->bulk_in_endpointAddress),
- portdata->in_buffer[j], IN_BUFLEN,
- sierra_indat_callback, port);
- portdata->in_urbs[j] = urb;
- }
}
return 0;
}
-static void sierra_shutdown(struct usb_serial *serial)
+static void sierra_disconnect(struct usb_serial *serial)
{
- int i, j;
+ int i;
struct usb_serial_port *port;
struct sierra_port_private *portdata;
- dev_dbg(&serial->dev->dev, "%s", __func__);
+ dev_dbg(&serial->dev->dev, "%s\n", __func__);
for (i = 0; i < serial->num_ports; ++i) {
port = serial->port[i];
@@ -714,12 +829,6 @@ static void sierra_shutdown(struct usb_serial *serial)
portdata = usb_get_serial_port_data(port);
if (!portdata)
continue;
-
- for (j = 0; j < N_IN_URB; j++) {
- usb_kill_urb(portdata->in_urbs[j]);
- usb_free_urb(portdata->in_urbs[j]);
- kfree(portdata->in_buffer[j]);
- }
kfree(portdata);
usb_set_serial_port_data(port, NULL);
}
@@ -737,13 +846,14 @@ static struct usb_serial_driver sierra_device = {
.probe = sierra_probe,
.open = sierra_open,
.close = sierra_close,
+ .dtr_rts = sierra_dtr_rts,
.write = sierra_write,
.write_room = sierra_write_room,
.set_termios = sierra_set_termios,
.tiocmget = sierra_tiocmget,
.tiocmset = sierra_tiocmset,
.attach = sierra_startup,
- .shutdown = sierra_shutdown,
+ .disconnect = sierra_disconnect,
.read_int_callback = sierra_instat_callback,
};
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 5e7528cc81a..3c249d8e8b8 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -356,7 +356,7 @@ cleanup:
}
/* call when the device plug out. free all the memory alloced by probe */
-static void spcp8x5_shutdown(struct usb_serial *serial)
+static void spcp8x5_release(struct usb_serial *serial)
{
int i;
struct spcp8x5_private *priv;
@@ -366,7 +366,6 @@ static void spcp8x5_shutdown(struct usb_serial *serial)
if (priv) {
free_ringbuf(priv->buf);
kfree(priv);
- usb_set_serial_port_data(serial->port[i] , NULL);
}
}
}
@@ -446,66 +445,47 @@ static void spcp8x5_set_workMode(struct usb_device *dev, u16 value,
"RTSCTS usb_control_msg(enable flowctrl) = %d\n", ret);
}
+static int spcp8x5_carrier_raised(struct usb_serial_port *port)
+{
+ struct spcp8x5_private *priv = usb_get_serial_port_data(port);
+ if (priv->line_status & MSR_STATUS_LINE_DCD)
+ return 1;
+ return 0;
+}
+
+static void spcp8x5_dtr_rts(struct usb_serial_port *port, int on)
+{
+ struct spcp8x5_private *priv = usb_get_serial_port_data(port);
+ unsigned long flags;
+ u8 control;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (on)
+ priv->line_control = MCR_CONTROL_LINE_DTR
+ | MCR_CONTROL_LINE_RTS;
+ else
+ priv->line_control &= ~ (MCR_CONTROL_LINE_DTR
+ | MCR_CONTROL_LINE_RTS);
+ control = priv->line_control;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ spcp8x5_set_ctrlLine(port->serial->dev, control , priv->type);
+}
+
/* close the serial port. We should wait for data sending to device 1st and
* then kill all urb. */
-static void spcp8x5_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void spcp8x5_close(struct usb_serial_port *port)
{
struct spcp8x5_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
- unsigned int c_cflag;
- int bps;
- long timeout;
- wait_queue_t wait;
int result;
dbg("%s - port %d", __func__, port->number);
- /* wait for data to drain from the buffer */
spin_lock_irqsave(&priv->lock, flags);
- timeout = SPCP8x5_CLOSING_WAIT;
- init_waitqueue_entry(&wait, current);
- add_wait_queue(&tty->write_wait, &wait);
- for (;;) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (ringbuf_avail_data(priv->buf) == 0 ||
- timeout == 0 || signal_pending(current))
- break;
- spin_unlock_irqrestore(&priv->lock, flags);
- timeout = schedule_timeout(timeout);
- spin_lock_irqsave(&priv->lock, flags);
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&tty->write_wait, &wait);
-
/* clear out any remaining data in the buffer */
clear_ringbuf(priv->buf);
spin_unlock_irqrestore(&priv->lock, flags);
- /* wait for characters to drain from the device (this is long enough
- * for the entire all byte spcp8x5 hardware buffer to drain with no
- * flow control for data rates of 1200 bps or more, for lower rates we
- * should really know how much data is in the buffer to compute a delay
- * that is not unnecessarily long) */
- bps = tty_get_baud_rate(tty);
- if (bps > 1200)
- timeout = max((HZ*2560) / bps, HZ/10);
- else
- timeout = 2*HZ;
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(timeout);
-
- /* clear control lines */
- if (tty) {
- c_cflag = tty->termios->c_cflag;
- if (c_cflag & HUPCL) {
- spin_lock_irqsave(&priv->lock, flags);
- priv->line_control = 0;
- spin_unlock_irqrestore(&priv->lock, flags);
- spcp8x5_set_ctrlLine(port->serial->dev, 0 , priv->type);
- }
- }
-
/* kill urb */
if (port->write_urb != NULL) {
result = usb_unlink_urb(port->write_urb);
@@ -665,13 +645,6 @@ static int spcp8x5_open(struct tty_struct *tty,
if (ret)
return ret;
- spin_lock_irqsave(&priv->lock, flags);
- if (tty && (tty->termios->c_cflag & CBAUD))
- priv->line_control = MCR_DTR | MCR_RTS;
- else
- priv->line_control = 0;
- spin_unlock_irqrestore(&priv->lock, flags);
-
spcp8x5_set_ctrlLine(serial->dev, priv->line_control , priv->type);
/* Setup termios */
@@ -691,9 +664,10 @@ static int spcp8x5_open(struct tty_struct *tty,
port->read_urb->dev = serial->dev;
ret = usb_submit_urb(port->read_urb, GFP_KERNEL);
if (ret) {
- spcp8x5_close(tty, port, NULL);
+ spcp8x5_close(port);
return -EPROTO;
}
+ port->port.drain_delay = 256;
return 0;
}
@@ -1033,6 +1007,8 @@ static struct usb_serial_driver spcp8x5_device = {
.num_ports = 1,
.open = spcp8x5_open,
.close = spcp8x5_close,
+ .dtr_rts = spcp8x5_dtr_rts,
+ .carrier_raised = spcp8x5_carrier_raised,
.write = spcp8x5_write,
.set_termios = spcp8x5_set_termios,
.ioctl = spcp8x5_ioctl,
@@ -1043,7 +1019,7 @@ static struct usb_serial_driver spcp8x5_device = {
.write_bulk_callback = spcp8x5_write_bulk_callback,
.chars_in_buffer = spcp8x5_chars_in_buffer,
.attach = spcp8x5_startup,
- .shutdown = spcp8x5_shutdown,
+ .release = spcp8x5_release,
};
static int __init spcp8x5_init(void)
diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
index 69879e43794..6157fac9366 100644
--- a/drivers/usb/serial/symbolserial.c
+++ b/drivers/usb/serial/symbolserial.c
@@ -152,8 +152,7 @@ static int symbol_open(struct tty_struct *tty, struct usb_serial_port *port,
return result;
}
-static void symbol_close(struct tty_struct *tty, struct usb_serial_port *port,
- struct file *filp)
+static void symbol_close(struct usb_serial_port *port)
{
struct symbol_private *priv = usb_get_serial_data(port->serial);
@@ -268,7 +267,7 @@ error:
return retval;
}
-static void symbol_shutdown(struct usb_serial *serial)
+static void symbol_disconnect(struct usb_serial *serial)
{
struct symbol_private *priv = usb_get_serial_data(serial);
@@ -276,9 +275,16 @@ static void symbol_shutdown(struct usb_serial *serial)
usb_kill_urb(priv->int_urb);
usb_free_urb(priv->int_urb);
+}
+
+static void symbol_release(struct usb_serial *serial)
+{
+ struct symbol_private *priv = usb_get_serial_data(serial);
+
+ dbg("%s", __func__);
+
kfree(priv->int_buffer);
kfree(priv);
- usb_set_serial_data(serial, NULL);
}
static struct usb_driver symbol_driver = {
@@ -300,7 +306,8 @@ static struct usb_serial_driver symbol_device = {
.attach = symbol_startup,
.open = symbol_open,
.close = symbol_close,
- .shutdown = symbol_shutdown,
+ .disconnect = symbol_disconnect,
+ .release = symbol_release,
.throttle = symbol_throttle,
.unthrottle = symbol_unthrottle,
};
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 0a64bac306e..991d8232e37 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -97,11 +97,10 @@ struct ti_device {
/* Function Declarations */
static int ti_startup(struct usb_serial *serial);
-static void ti_shutdown(struct usb_serial *serial);
+static void ti_release(struct usb_serial *serial);
static int ti_open(struct tty_struct *tty, struct usb_serial_port *port,
struct file *file);
-static void ti_close(struct tty_struct *tty, struct usb_serial_port *port,
- struct file *file);
+static void ti_close(struct usb_serial_port *port);
static int ti_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *data, int count);
static int ti_write_room(struct tty_struct *tty);
@@ -231,7 +230,7 @@ static struct usb_serial_driver ti_1port_device = {
.id_table = ti_id_table_3410,
.num_ports = 1,
.attach = ti_startup,
- .shutdown = ti_shutdown,
+ .release = ti_release,
.open = ti_open,
.close = ti_close,
.write = ti_write,
@@ -259,7 +258,7 @@ static struct usb_serial_driver ti_2port_device = {
.id_table = ti_id_table_5052,
.num_ports = 2,
.attach = ti_startup,
- .shutdown = ti_shutdown,
+ .release = ti_release,
.open = ti_open,
.close = ti_close,
.write = ti_write,
@@ -474,7 +473,7 @@ free_tdev:
}
-static void ti_shutdown(struct usb_serial *serial)
+static void ti_release(struct usb_serial *serial)
{
int i;
struct ti_device *tdev = usb_get_serial_data(serial);
@@ -487,12 +486,10 @@ static void ti_shutdown(struct usb_serial *serial)
if (tport) {
ti_buf_free(tport->tp_write_buf);
kfree(tport);
- usb_set_serial_port_data(serial->port[i], NULL);
}
}
kfree(tdev);
- usb_set_serial_data(serial, NULL);
}
@@ -647,8 +644,7 @@ release_lock:
}
-static void ti_close(struct tty_struct *tty, struct usb_serial_port *port,
- struct file *file)
+static void ti_close(struct usb_serial_port *port)
{
struct ti_device *tdev;
struct ti_port *tport;
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 0a566eea49c..d595aa5586a 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -141,6 +141,14 @@ static void destroy_serial(struct kref *kref)
if (serial->minor != SERIAL_TTY_NO_MINOR)
return_serial(serial);
+ serial->type->release(serial);
+
+ for (i = 0; i < serial->num_ports; ++i) {
+ port = serial->port[i];
+ if (port)
+ put_device(&port->dev);
+ }
+
/* If this is a "fake" port, we have to clean it up here, as it will
* not get cleaned up in port_release() as it was never registered with
* the driver core */
@@ -148,9 +156,8 @@ static void destroy_serial(struct kref *kref)
for (i = serial->num_ports;
i < serial->num_port_pointers; ++i) {
port = serial->port[i];
- if (!port)
- continue;
- port_free(port);
+ if (port)
+ port_free(port);
}
}
@@ -238,9 +245,11 @@ static int serial_open (struct tty_struct *tty, struct file *filp)
goto bailout_interface_put;
mutex_unlock(&serial->disc_mutex);
}
-
mutex_unlock(&port->mutex);
- return 0;
+ /* Now do the correct tty layer semantics */
+ retval = tty_port_block_til_ready(&port->port, tty, filp);
+ if (retval == 0)
+ return 0;
bailout_interface_put:
usb_autopm_put_interface(serial->interface);
@@ -259,64 +268,89 @@ bailout_serial_put:
return retval;
}
-static void serial_close(struct tty_struct *tty, struct file *filp)
+/**
+ * serial_do_down - shut down hardware
+ * @port: port to shut down
+ *
+ * Shut down a USB port unless it is the console. We never shut down the
+ * console hardware as it will always be in use.
+ *
+ * Don't free any resources at this point
+ */
+static void serial_do_down(struct usb_serial_port *port)
{
- struct usb_serial_port *port = tty->driver_data;
+ struct usb_serial_driver *drv = port->serial->type;
struct usb_serial *serial;
struct module *owner;
- int count;
- if (!port)
+ /* The console is magical, do not hang up the console hardware
+ or there will be tears */
+ if (port->console)
return;
- dbg("%s - port %d", __func__, port->number);
-
mutex_lock(&port->mutex);
serial = port->serial;
owner = serial->type->driver.owner;
- if (port->port.count == 0) {
- mutex_unlock(&port->mutex);
- return;
- }
+ if (drv->close)
+ drv->close(port);
- if (port->port.count == 1)
- /* only call the device specific close if this
- * port is being closed by the last owner. Ensure we do
- * this before we drop the port count. The call is protected
- * by the port mutex
- */
- serial->type->close(tty, port, filp);
-
- if (port->port.count == (port->console ? 2 : 1)) {
- struct tty_struct *tty = tty_port_tty_get(&port->port);
- if (tty) {
- /* We must do this before we drop the port count to
- zero. */
- if (tty->driver_data)
- tty->driver_data = NULL;
- tty_port_tty_set(&port->port, NULL);
- tty_kref_put(tty);
- }
- }
-
- --port->port.count;
- count = port->port.count;
mutex_unlock(&port->mutex);
- put_device(&port->dev);
+}
+
+/**
+ * serial_do_free - free resources post close/hangup
+ * @port: port to free up
+ *
+ * Do the resource freeing and refcount dropping for the port. We must
+ * be careful about ordering and we must avoid freeing up the console.
+ */
+
+static void serial_do_free(struct usb_serial_port *port)
+{
+ struct usb_serial *serial;
+ struct module *owner;
+
+ /* The console is magical, do not hang up the console hardware
+ or there will be tears */
+ if (port->console)
+ return;
+ serial = port->serial;
+ owner = serial->type->driver.owner;
+ put_device(&port->dev);
/* Mustn't dereference port any more */
- if (count == 0) {
- mutex_lock(&serial->disc_mutex);
- if (!serial->disconnected)
- usb_autopm_put_interface(serial->interface);
- mutex_unlock(&serial->disc_mutex);
- }
+ mutex_lock(&serial->disc_mutex);
+ if (!serial->disconnected)
+ usb_autopm_put_interface(serial->interface);
+ mutex_unlock(&serial->disc_mutex);
usb_serial_put(serial);
-
/* Mustn't dereference serial any more */
- if (count == 0)
- module_put(owner);
+ module_put(owner);
+}
+
+static void serial_close(struct tty_struct *tty, struct file *filp)
+{
+ struct usb_serial_port *port = tty->driver_data;
+
+ dbg("%s - port %d", __func__, port->number);
+
+
+ if (tty_port_close_start(&port->port, tty, filp) == 0)
+ return;
+
+ serial_do_down(port);
+ tty_port_close_end(&port->port, tty);
+ tty_port_tty_set(&port->port, NULL);
+ serial_do_free(port);
+}
+
+static void serial_hangup(struct tty_struct *tty)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ serial_do_down(port);
+ tty_port_hangup(&port->port);
+ serial_do_free(port);
}
static int serial_write(struct tty_struct *tty, const unsigned char *buf,
@@ -648,6 +682,29 @@ static struct usb_serial_driver *search_serial_device(
return NULL;
}
+static int serial_carrier_raised(struct tty_port *port)
+{
+ struct usb_serial_port *p = container_of(port, struct usb_serial_port, port);
+ struct usb_serial_driver *drv = p->serial->type;
+ if (drv->carrier_raised)
+ return drv->carrier_raised(p);
+ /* No carrier control - don't block */
+ return 1;
+}
+
+static void serial_dtr_rts(struct tty_port *port, int on)
+{
+ struct usb_serial_port *p = container_of(port, struct usb_serial_port, port);
+ struct usb_serial_driver *drv = p->serial->type;
+ if (drv->dtr_rts)
+ drv->dtr_rts(p, on);
+}
+
+static const struct tty_port_operations serial_port_ops = {
+ .carrier_raised = serial_carrier_raised,
+ .dtr_rts = serial_dtr_rts,
+};
+
int usb_serial_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
@@ -841,6 +898,7 @@ int usb_serial_probe(struct usb_interface *interface,
if (!port)
goto probe_error;
tty_port_init(&port->port);
+ port->port.ops = &serial_port_ops;
port->serial = serial;
spin_lock_init(&port->lock);
mutex_init(&port->mutex);
@@ -974,6 +1032,7 @@ int usb_serial_probe(struct usb_interface *interface,
if (retval > 0) {
/* quietly accept this device, but don't bind to a
serial port as it's about to disappear */
+ serial->num_ports = 0;
goto exit;
}
}
@@ -994,10 +1053,15 @@ int usb_serial_probe(struct usb_interface *interface,
dev_set_name(&port->dev, "ttyUSB%d", port->number);
dbg ("%s - registering %s", __func__, dev_name(&port->dev));
+ port->dev_state = PORT_REGISTERING;
retval = device_register(&port->dev);
- if (retval)
+ if (retval) {
dev_err(&port->dev, "Error registering port device, "
"continuing\n");
+ port->dev_state = PORT_UNREGISTERED;
+ } else {
+ port->dev_state = PORT_REGISTERED;
+ }
}
usb_serial_console_init(debug, minor);
@@ -1061,31 +1125,38 @@ void usb_serial_disconnect(struct usb_interface *interface)
serial->disconnected = 1;
mutex_unlock(&serial->disc_mutex);
- /* Unfortunately, many of the sub-drivers expect the port structures
- * to exist when their shutdown method is called, so we have to go
- * through this awkward two-step unregistration procedure.
- */
for (i = 0; i < serial->num_ports; ++i) {
port = serial->port[i];
if (port) {
struct tty_struct *tty = tty_port_tty_get(&port->port);
if (tty) {
+ /* The hangup will occur asynchronously but
+ the object refcounts will sort out all the
+ cleanup */
tty_hangup(tty);
tty_kref_put(tty);
}
kill_traffic(port);
cancel_work_sync(&port->work);
- device_del(&port->dev);
- }
- }
- serial->type->shutdown(serial);
- for (i = 0; i < serial->num_ports; ++i) {
- port = serial->port[i];
- if (port) {
- put_device(&port->dev);
- serial->port[i] = NULL;
+ if (port->dev_state == PORT_REGISTERED) {
+
+ /* Make sure the port is bound so that the
+ * driver's port_remove method is called.
+ */
+ if (!port->dev.driver) {
+ int rc;
+
+ port->dev.driver =
+ &serial->type->driver;
+ rc = device_bind_driver(&port->dev);
+ }
+ port->dev_state = PORT_UNREGISTERING;
+ device_del(&port->dev);
+ port->dev_state = PORT_UNREGISTERED;
+ }
}
}
+ serial->type->disconnect(serial);
/* let the last holder of this object
* cause it to be cleaned up */
@@ -1134,6 +1205,7 @@ static const struct tty_operations serial_ops = {
.open = serial_open,
.close = serial_close,
.write = serial_write,
+ .hangup = serial_hangup,
.write_room = serial_write_room,
.ioctl = serial_ioctl,
.set_termios = serial_set_termios,
@@ -1146,6 +1218,7 @@ static const struct tty_operations serial_ops = {
.proc_fops = &serial_proc_fops,
};
+
struct tty_driver *usb_serial_tty_driver;
static int __init usb_serial_init(void)
@@ -1261,7 +1334,8 @@ static void fixup_generic(struct usb_serial_driver *device)
set_to_generic_if_null(device, chars_in_buffer);
set_to_generic_if_null(device, read_bulk_callback);
set_to_generic_if_null(device, write_bulk_callback);
- set_to_generic_if_null(device, shutdown);
+ set_to_generic_if_null(device, disconnect);
+ set_to_generic_if_null(device, release);
}
int usb_serial_register(struct usb_serial_driver *driver)
diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c
index 6c9cbb59552..614800972dc 100644
--- a/drivers/usb/serial/usb_debug.c
+++ b/drivers/usb/serial/usb_debug.c
@@ -15,7 +15,19 @@
#include <linux/usb.h>
#include <linux/usb/serial.h>
+#define URB_DEBUG_MAX_IN_FLIGHT_URBS 4000
#define USB_DEBUG_MAX_PACKET_SIZE 8
+#define USB_DEBUG_BRK_SIZE 8
+static char USB_DEBUG_BRK[USB_DEBUG_BRK_SIZE] = {
+ 0x00,
+ 0xff,
+ 0x01,
+ 0xfe,
+ 0x00,
+ 0xfe,
+ 0x01,
+ 0xff,
+};
static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x0525, 0x127a) },
@@ -38,6 +50,32 @@ static int usb_debug_open(struct tty_struct *tty, struct usb_serial_port *port,
return usb_serial_generic_open(tty, port, filp);
}
+/* This HW really does not support a serial break, so one will be
+ * emulated when ever the break state is set to true.
+ */
+static void usb_debug_break_ctl(struct tty_struct *tty, int break_state)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ if (!break_state)
+ return;
+ usb_serial_generic_write(tty, port, USB_DEBUG_BRK, USB_DEBUG_BRK_SIZE);
+}
+
+static void usb_debug_read_bulk_callback(struct urb *urb)
+{
+ struct usb_serial_port *port = urb->context;
+
+ if (urb->actual_length == USB_DEBUG_BRK_SIZE &&
+ memcmp(urb->transfer_buffer, USB_DEBUG_BRK,
+ USB_DEBUG_BRK_SIZE) == 0) {
+ usb_serial_handle_break(port);
+ usb_serial_generic_resubmit_read_urb(port, GFP_ATOMIC);
+ return;
+ }
+
+ usb_serial_generic_read_bulk_callback(urb);
+}
+
static struct usb_serial_driver debug_device = {
.driver = {
.owner = THIS_MODULE,
@@ -46,6 +84,9 @@ static struct usb_serial_driver debug_device = {
.id_table = id_table,
.num_ports = 1,
.open = usb_debug_open,
+ .max_in_flight_urbs = URB_DEBUG_MAX_IN_FLIGHT_URBS,
+ .break_ctl = usb_debug_break_ctl,
+ .read_bulk_callback = usb_debug_read_bulk_callback,
};
static int __init debug_init(void)
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 5ac414bda71..f5d0f64dcc5 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -38,8 +38,7 @@
/* function prototypes for a handspring visor */
static int visor_open(struct tty_struct *tty, struct usb_serial_port *port,
struct file *filp);
-static void visor_close(struct tty_struct *tty, struct usb_serial_port *port,
- struct file *filp);
+static void visor_close(struct usb_serial_port *port);
static int visor_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
static int visor_write_room(struct tty_struct *tty);
@@ -48,7 +47,7 @@ static void visor_unthrottle(struct tty_struct *tty);
static int visor_probe(struct usb_serial *serial,
const struct usb_device_id *id);
static int visor_calc_num_ports(struct usb_serial *serial);
-static void visor_shutdown(struct usb_serial *serial);
+static void visor_release(struct usb_serial *serial);
static void visor_write_bulk_callback(struct urb *urb);
static void visor_read_bulk_callback(struct urb *urb);
static void visor_read_int_callback(struct urb *urb);
@@ -203,7 +202,7 @@ static struct usb_serial_driver handspring_device = {
.attach = treo_attach,
.probe = visor_probe,
.calc_num_ports = visor_calc_num_ports,
- .shutdown = visor_shutdown,
+ .release = visor_release,
.write = visor_write,
.write_room = visor_write_room,
.write_bulk_callback = visor_write_bulk_callback,
@@ -228,7 +227,7 @@ static struct usb_serial_driver clie_5_device = {
.attach = clie_5_attach,
.probe = visor_probe,
.calc_num_ports = visor_calc_num_ports,
- .shutdown = visor_shutdown,
+ .release = visor_release,
.write = visor_write,
.write_room = visor_write_room,
.write_bulk_callback = visor_write_bulk_callback,
@@ -324,8 +323,7 @@ exit:
}
-static void visor_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void visor_close(struct usb_serial_port *port)
{
struct visor_private *priv = usb_get_serial_port_data(port);
unsigned char *transfer_buffer;
@@ -920,7 +918,7 @@ static int clie_5_attach(struct usb_serial *serial)
return generic_startup(serial);
}
-static void visor_shutdown(struct usb_serial *serial)
+static void visor_release(struct usb_serial *serial)
{
struct visor_private *priv;
int i;
@@ -929,10 +927,7 @@ static void visor_shutdown(struct usb_serial *serial)
for (i = 0; i < serial->num_ports; i++) {
priv = usb_get_serial_port_data(serial->port[i]);
- if (priv) {
- usb_set_serial_port_data(serial->port[i], NULL);
- kfree(priv);
- }
+ kfree(priv);
}
}
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 5335d3211c0..8d126dd7a02 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -144,11 +144,10 @@ static int whiteheat_firmware_attach(struct usb_serial *serial);
/* function prototypes for the Connect Tech WhiteHEAT serial converter */
static int whiteheat_attach(struct usb_serial *serial);
-static void whiteheat_shutdown(struct usb_serial *serial);
+static void whiteheat_release(struct usb_serial *serial);
static int whiteheat_open(struct tty_struct *tty,
struct usb_serial_port *port, struct file *filp);
-static void whiteheat_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp);
+static void whiteheat_close(struct usb_serial_port *port);
static int whiteheat_write(struct tty_struct *tty,
struct usb_serial_port *port,
const unsigned char *buf, int count);
@@ -190,7 +189,7 @@ static struct usb_serial_driver whiteheat_device = {
.id_table = id_table_std,
.num_ports = 4,
.attach = whiteheat_attach,
- .shutdown = whiteheat_shutdown,
+ .release = whiteheat_release,
.open = whiteheat_open,
.close = whiteheat_close,
.write = whiteheat_write,
@@ -618,7 +617,7 @@ no_command_buffer:
}
-static void whiteheat_shutdown(struct usb_serial *serial)
+static void whiteheat_release(struct usb_serial *serial)
{
struct usb_serial_port *command_port;
struct usb_serial_port *port;
@@ -712,8 +711,7 @@ exit:
}
-static void whiteheat_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp)
+static void whiteheat_close(struct usb_serial_port *port)
{
struct whiteheat_private *info = usb_get_serial_port_data(port);
struct whiteheat_urb_wrap *wrap;
@@ -723,31 +721,7 @@ static void whiteheat_close(struct tty_struct *tty,
dbg("%s - port %d", __func__, port->number);
- mutex_lock(&port->serial->disc_mutex);
- /* filp is NULL when called from usb_serial_disconnect */
- if ((filp && (tty_hung_up_p(filp))) || port->serial->disconnected) {
- mutex_unlock(&port->serial->disc_mutex);
- return;
- }
- mutex_unlock(&port->serial->disc_mutex);
-
- tty->closing = 1;
-
-/*
- * Not currently in use; tty_wait_until_sent() calls
- * serial_chars_in_buffer() which deadlocks on the second semaphore
- * acquisition. This should be fixed at some point. Greg's been
- * notified.
- if ((filp->f_flags & (O_NDELAY | O_NONBLOCK)) == 0) {
- tty_wait_until_sent(tty, CLOSING_DELAY);
- }
-*/
-
- tty_driver_flush_buffer(tty);
- tty_ldisc_flush(tty);
-
firm_report_tx_done(port);
-
firm_close(port);
/* shutdown our bulk reads and writes */
@@ -775,10 +749,7 @@ static void whiteheat_close(struct tty_struct *tty,
}
spin_unlock_irq(&info->lock);
mutex_unlock(&info->deathwarrant);
-
stop_command_port(port->serial);
-
- tty->closing = 0;
}
diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c
index 2dd9bd4bff5..ec17c96371a 100644
--- a/drivers/usb/storage/initializers.c
+++ b/drivers/usb/storage/initializers.c
@@ -52,7 +52,7 @@ int usb_stor_euscsi_init(struct us_data *us)
us->iobuf[0] = 0x1;
result = usb_stor_control_msg(us, us->send_ctrl_pipe,
0x0C, USB_RECIP_INTERFACE | USB_TYPE_VENDOR,
- 0x01, 0x0, us->iobuf, 0x1, 5*HZ);
+ 0x01, 0x0, us->iobuf, 0x1, 5000);
US_DEBUGP("-- result is %d\n", result);
return 0;
@@ -80,14 +80,16 @@ int usb_stor_ucr61s2b_init(struct us_data *us)
res = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcb,
US_BULK_CB_WRAP_LEN, &partial);
- if(res)
- return res;
+ if (res)
+ return -EIO;
US_DEBUGP("Getting status packet...\n");
res = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs,
US_BULK_CS_WRAP_LEN, &partial);
+ if (res)
+ return -EIO;
- return (res ? -1 : 0);
+ return 0;
}
/* This places the HUAWEI E220 devices in multi-port mode */
@@ -99,6 +101,6 @@ int usb_stor_huawei_e220_init(struct us_data *us)
USB_REQ_SET_FEATURE,
USB_TYPE_STANDARD | USB_RECIP_DEVICE,
0x01, 0x0, NULL, 0x0, 1000);
- US_DEBUGP("usb_control_msg performing result is %d\n", result);
- return (result ? 0 : -1);
+ US_DEBUGP("Huawei mode set result is %d\n", result);
+ return (result ? 0 : -ENODEV);
}
diff --git a/drivers/usb/storage/option_ms.c b/drivers/usb/storage/option_ms.c
index 353f922939a..d41cc0a970f 100644
--- a/drivers/usb/storage/option_ms.c
+++ b/drivers/usb/storage/option_ms.c
@@ -37,7 +37,7 @@ MODULE_PARM_DESC(option_zero_cd, "ZeroCD mode (1=Force Modem (default),"
#define RESPONSE_LEN 1024
-static int option_rezero(struct us_data *us, int ep_in, int ep_out)
+static int option_rezero(struct us_data *us)
{
const unsigned char rezero_msg[] = {
0x55, 0x53, 0x42, 0x43, 0x78, 0x56, 0x34, 0x12,
@@ -54,10 +54,10 @@ static int option_rezero(struct us_data *us, int ep_in, int ep_out)
if (buffer == NULL)
return USB_STOR_TRANSPORT_ERROR;
- memcpy(buffer, rezero_msg, sizeof (rezero_msg));
+ memcpy(buffer, rezero_msg, sizeof(rezero_msg));
result = usb_stor_bulk_transfer_buf(us,
- usb_sndbulkpipe(us->pusb_dev, ep_out),
- buffer, sizeof (rezero_msg), NULL);
+ us->send_bulk_pipe,
+ buffer, sizeof(rezero_msg), NULL);
if (result != USB_STOR_XFER_GOOD) {
result = USB_STOR_XFER_ERROR;
goto out;
@@ -66,9 +66,15 @@ static int option_rezero(struct us_data *us, int ep_in, int ep_out)
/* Some of the devices need to be asked for a response, but we don't
* care what that response is.
*/
- result = usb_stor_bulk_transfer_buf(us,
- usb_sndbulkpipe(us->pusb_dev, ep_out),
+ usb_stor_bulk_transfer_buf(us,
+ us->recv_bulk_pipe,
buffer, RESPONSE_LEN, NULL);
+
+ /* Read the CSW */
+ usb_stor_bulk_transfer_buf(us,
+ us->recv_bulk_pipe,
+ buffer, 13, NULL);
+
result = USB_STOR_XFER_GOOD;
out:
@@ -76,63 +82,75 @@ out:
return result;
}
-int option_ms_init(struct us_data *us)
+static int option_inquiry(struct us_data *us)
{
- struct usb_device *udev;
- struct usb_interface *intf;
- struct usb_host_interface *iface_desc;
- struct usb_endpoint_descriptor *endpoint = NULL;
- u8 ep_in = 0, ep_out = 0;
- int ep_in_size = 0, ep_out_size = 0;
- int i, result;
-
- udev = us->pusb_dev;
- intf = us->pusb_intf;
-
- /* Ensure it's really a ZeroCD device; devices that are already
- * in modem mode return 0xFF for class, subclass, and protocol.
- */
- if (udev->descriptor.bDeviceClass != 0 ||
- udev->descriptor.bDeviceSubClass != 0 ||
- udev->descriptor.bDeviceProtocol != 0)
- return USB_STOR_TRANSPORT_GOOD;
+ const unsigned char inquiry_msg[] = {
+ 0x55, 0x53, 0x42, 0x43, 0x12, 0x34, 0x56, 0x78,
+ 0x24, 0x00, 0x00, 0x00, 0x80, 0x00, 0x06, 0x12,
+ 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ };
+ char *buffer;
+ int result;
- US_DEBUGP("Option MS: option_ms_init called\n");
+ US_DEBUGP("Option MS: %s", "device inquiry for vendor name\n");
- /* Find the right mass storage interface */
- iface_desc = intf->cur_altsetting;
- if (iface_desc->desc.bInterfaceClass != 0x8 ||
- iface_desc->desc.bInterfaceSubClass != 0x6 ||
- iface_desc->desc.bInterfaceProtocol != 0x50) {
- US_DEBUGP("Option MS: mass storage interface not found, no action "
- "required\n");
- return USB_STOR_TRANSPORT_GOOD;
- }
+ buffer = kzalloc(0x24, GFP_KERNEL);
+ if (buffer == NULL)
+ return USB_STOR_TRANSPORT_ERROR;
- /* Find the mass storage bulk endpoints */
- for (i = 0; i < iface_desc->desc.bNumEndpoints && (!ep_in_size || !ep_out_size); ++i) {
- endpoint = &iface_desc->endpoint[i].desc;
-
- if (usb_endpoint_is_bulk_in(endpoint)) {
- ep_in = usb_endpoint_num(endpoint);
- ep_in_size = le16_to_cpu(endpoint->wMaxPacketSize);
- } else if (usb_endpoint_is_bulk_out(endpoint)) {
- ep_out = usb_endpoint_num(endpoint);
- ep_out_size = le16_to_cpu(endpoint->wMaxPacketSize);
- }
+ memcpy(buffer, inquiry_msg, sizeof(inquiry_msg));
+ result = usb_stor_bulk_transfer_buf(us,
+ us->send_bulk_pipe,
+ buffer, sizeof(inquiry_msg), NULL);
+ if (result != USB_STOR_XFER_GOOD) {
+ result = USB_STOR_XFER_ERROR;
+ goto out;
}
- /* Can't find the mass storage endpoints */
- if (!ep_in_size || !ep_out_size) {
- US_DEBUGP("Option MS: mass storage endpoints not found, no action "
- "required\n");
- return USB_STOR_TRANSPORT_GOOD;
+ result = usb_stor_bulk_transfer_buf(us,
+ us->recv_bulk_pipe,
+ buffer, 0x24, NULL);
+ if (result != USB_STOR_XFER_GOOD) {
+ result = USB_STOR_XFER_ERROR;
+ goto out;
}
+ result = memcmp(buffer+8, "Option", 6);
+
+ /* Read the CSW */
+ usb_stor_bulk_transfer_buf(us,
+ us->recv_bulk_pipe,
+ buffer, 13, NULL);
+
+out:
+ kfree(buffer);
+ return result;
+}
+
+
+int option_ms_init(struct us_data *us)
+{
+ int result;
+
+ US_DEBUGP("Option MS: option_ms_init called\n");
+
+ /* Additional test for vendor information via INQUIRY,
+ * because some vendor/product IDs are ambiguous
+ */
+ result = option_inquiry(us);
+ if (result != 0) {
+ US_DEBUGP("Option MS: vendor is not Option or not determinable,"
+ " no action taken\n");
+ return 0;
+ } else
+ US_DEBUGP("Option MS: this is a genuine Option device,"
+ " proceeding\n");
+
/* Force Modem mode */
if (option_zero_cd == ZCD_FORCE_MODEM) {
US_DEBUGP("Option MS: %s", "Forcing Modem Mode\n");
- result = option_rezero(us, ep_in, ep_out);
+ result = option_rezero(us);
if (result != USB_STOR_XFER_GOOD)
US_DEBUGP("Option MS: Failed to switch to modem mode.\n");
return -EIO;
@@ -142,6 +160,6 @@ int option_ms_init(struct us_data *us)
" requests it\n");
}
- return USB_STOR_TRANSPORT_GOOD;
+ return 0;
}
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 4ca3b586064..cfa26d56ce6 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -132,7 +132,7 @@ static int slave_configure(struct scsi_device *sdev)
if (us->fflags & US_FL_MAX_SECTORS_MIN)
max_sectors = PAGE_CACHE_SIZE >> 9;
- if (sdev->request_queue->max_sectors > max_sectors)
+ if (queue_max_sectors(sdev->request_queue) > max_sectors)
blk_queue_max_sectors(sdev->request_queue,
max_sectors);
} else if (sdev->type == TYPE_TAPE) {
@@ -483,7 +483,7 @@ static ssize_t show_max_sectors(struct device *dev, struct device_attribute *att
{
struct scsi_device *sdev = to_scsi_device(dev);
- return sprintf(buf, "%u\n", sdev->request_queue->max_sectors);
+ return sprintf(buf, "%u\n", queue_max_sectors(sdev->request_queue));
}
/* Input routine for the sysfs max_sectors file */
diff --git a/drivers/usb/storage/sierra_ms.c b/drivers/usb/storage/sierra_ms.c
index 4359a2cb42d..4395c4100ec 100644
--- a/drivers/usb/storage/sierra_ms.c
+++ b/drivers/usb/storage/sierra_ms.c
@@ -202,6 +202,6 @@ int sierra_ms_init(struct us_data *us)
complete:
result = device_create_file(&us->pusb_intf->dev, &dev_attr_truinst);
- return USB_STOR_TRANSPORT_GOOD;
+ return 0;
}
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 4b8b69045fe..1b9c5dd0fb2 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1385,7 +1385,7 @@ UNUSUAL_DEV( 0x10d6, 0x2200, 0x0100, 0x0100,
UNUSUAL_DEV( 0x1186, 0x3e04, 0x0000, 0x0000,
"D-Link",
"USB Mass Storage",
- US_SC_DEVICE, US_PR_DEVICE, option_ms_init, 0),
+ US_SC_DEVICE, US_PR_DEVICE, option_ms_init, US_FL_IGNORE_DEVICE),
/* Reported by Kevin Lloyd <linux@sierrawireless.com>
* Entry is needed for the initializer function override,
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 0048f1185a6..932ffdbf86d 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -397,7 +397,7 @@ config FB_SA1100
config FB_IMX
tristate "Motorola i.MX LCD support"
- depends on FB && (ARCH_IMX || ARCH_MX2)
+ depends on FB && (ARCH_MX1 || ARCH_MX2)
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -1759,6 +1759,16 @@ config FB_68328
Say Y here if you want to support the built-in frame buffer of
the Motorola 68328 CPU family.
+config FB_PXA168
+ tristate "PXA168/910 LCD framebuffer support"
+ depends on FB && (CPU_PXA168 || CPU_PXA910)
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ ---help---
+ Frame buffer driver for the built-in LCD controller in the Marvell
+ MMP processor.
+
config FB_PXA
tristate "PXA LCD framebuffer support"
depends on FB && ARCH_PXA
@@ -1996,7 +2006,7 @@ config FB_PS3_DEFAULT_SIZE_M
config FB_XILINX
tristate "Xilinx frame buffer support"
- depends on FB && XILINX_VIRTEX
+ depends on FB && (XILINX_VIRTEX || MICROBLAZE)
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -2094,6 +2104,7 @@ config FB_MB862XX_LIME
bool "Lime GDC"
depends on FB_MB862XX
depends on OF && !FB_MB862XX_PCI_GDC
+ depends on PPC
select FB_FOREIGN_ENDIAN
select FB_LITTLE_ENDIAN
---help---
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index d8d0be5151e..01a819f4737 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -97,6 +97,7 @@ obj-$(CONFIG_FB_GBE) += gbefb.o
obj-$(CONFIG_FB_CIRRUS) += cirrusfb.o
obj-$(CONFIG_FB_ASILIANT) += asiliantfb.o
obj-$(CONFIG_FB_PXA) += pxafb.o
+obj-$(CONFIG_FB_PXA168) += pxa168fb.o
obj-$(CONFIG_FB_W100) += w100fb.o
obj-$(CONFIG_FB_TMIO) += tmiofb.o
obj-$(CONFIG_FB_AU1100) += au1100fb.o
diff --git a/drivers/video/acornfb.c b/drivers/video/acornfb.c
index 6995fe1e86d..0bcc59eb37f 100644
--- a/drivers/video/acornfb.c
+++ b/drivers/video/acornfb.c
@@ -859,43 +859,6 @@ acornfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
return 0;
}
-/*
- * Note that we are entered with the kernel locked.
- */
-static int
-acornfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
-{
- unsigned long off, start;
- u32 len;
-
- off = vma->vm_pgoff << PAGE_SHIFT;
-
- start = info->fix.smem_start;
- len = PAGE_ALIGN(start & ~PAGE_MASK) + info->fix.smem_len;
- start &= PAGE_MASK;
- if ((vma->vm_end - vma->vm_start + off) > len)
- return -EINVAL;
- off += start;
- vma->vm_pgoff = off >> PAGE_SHIFT;
-
- /* This is an IO map - tell maydump to skip this VMA */
- vma->vm_flags |= VM_IO;
-
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-
- /*
- * Don't alter the page protection flags; we want to keep the area
- * cached for better performance. This does mean that we may miss
- * some updates to the screen occasionally, but process switches
- * should cause the caches and buffers to be flushed often enough.
- */
- if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot))
- return -EAGAIN;
- return 0;
-}
-
static struct fb_ops acornfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = acornfb_check_var,
@@ -905,7 +868,6 @@ static struct fb_ops acornfb_ops = {
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
- .fb_mmap = acornfb_mmap,
};
/*
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index 61050ab1412..fb8163d181a 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -351,7 +351,7 @@ static int clcdfb_register(struct clcd_fb *fb)
}
fb->fb.fix.mmio_start = fb->dev->res.start;
- fb->fb.fix.mmio_len = 4096;
+ fb->fb.fix.mmio_len = resource_size(&fb->dev->res);
fb->regs = ioremap(fb->fb.fix.mmio_start, fb->fb.fix.mmio_len);
if (!fb->regs) {
@@ -437,7 +437,7 @@ static int clcdfb_register(struct clcd_fb *fb)
return ret;
}
-static int clcdfb_probe(struct amba_device *dev, void *id)
+static int clcdfb_probe(struct amba_device *dev, struct amba_id *id)
{
struct clcd_board *board = dev->dev.platform_data;
struct clcd_fb *fb;
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 9a577a800db..5afd64482f5 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -29,14 +29,8 @@
/* configurable parameters */
#define ATMEL_LCDC_CVAL_DEFAULT 0xc8
-#define ATMEL_LCDC_DMA_BURST_LEN 8
-
-#if defined(CONFIG_ARCH_AT91SAM9263) || defined(CONFIG_ARCH_AT91CAP9) || \
- defined(CONFIG_ARCH_AT91SAM9RL)
-#define ATMEL_LCDC_FIFO_SIZE 2048
-#else
-#define ATMEL_LCDC_FIFO_SIZE 512
-#endif
+#define ATMEL_LCDC_DMA_BURST_LEN 8 /* words */
+#define ATMEL_LCDC_FIFO_SIZE 512 /* words */
#if defined(CONFIG_ARCH_AT91)
#define ATMEL_LCDFB_FBINFO_DEFAULT (FBINFO_DEFAULT \
@@ -351,7 +345,7 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
dev_dbg(dev, " bpp: %u\n", var->bits_per_pixel);
dev_dbg(dev, " clk: %lu KHz\n", clk_value_khz);
- if ((PICOS2KHZ(var->pixclock) * var->bits_per_pixel / 8) > clk_value_khz) {
+ if (PICOS2KHZ(var->pixclock) > clk_value_khz) {
dev_err(dev, "%lu KHz pixel clock is too fast\n", PICOS2KHZ(var->pixclock));
return -EINVAL;
}
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index 35e8eb02b9e..e4e4d433b00 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -354,7 +354,7 @@ static int default_crt_on __devinitdata = 0;
static int default_lcd_on __devinitdata = 1;
#ifdef CONFIG_MTRR
-static int mtrr = 1;
+static bool mtrr = true;
#endif
#ifdef CONFIG_PMAC_BACKLIGHT
diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c
index 97a1f095f32..515cf1978d1 100644
--- a/drivers/video/aty/radeon_pm.c
+++ b/drivers/video/aty/radeon_pm.c
@@ -213,7 +213,6 @@ static void radeon_pm_disable_dynamic_mode(struct radeonfb_info *rinfo)
PIXCLKS_CNTL__R300_PIXCLK_TRANS_ALWAYS_ONb |
PIXCLKS_CNTL__R300_PIXCLK_TVO_ALWAYS_ONb |
PIXCLKS_CNTL__R300_P2G2CLK_ALWAYS_ONb |
- PIXCLKS_CNTL__R300_P2G2CLK_ALWAYS_ONb |
PIXCLKS_CNTL__R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF);
OUTPLL(pllPIXCLKS_CNTL, tmp);
@@ -395,7 +394,7 @@ static void radeon_pm_enable_dynamic_mode(struct radeonfb_info *rinfo)
PIXCLKS_CNTL__R300_PIXCLK_TRANS_ALWAYS_ONb |
PIXCLKS_CNTL__R300_PIXCLK_TVO_ALWAYS_ONb |
PIXCLKS_CNTL__R300_P2G2CLK_ALWAYS_ONb |
- PIXCLKS_CNTL__R300_P2G2CLK_ALWAYS_ONb);
+ PIXCLKS_CNTL__R300_P2G2CLK_DAC_ALWAYS_ONb);
OUTPLL(pllPIXCLKS_CNTL, tmp);
tmp = INPLL(pllMCLK_MISC);
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index 37e60b1d2ed..e49ae5edcc0 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -323,7 +323,6 @@ static int bfin_bf54x_fb_release(struct fb_info *info, int user)
bfin_write_EPPI0_CONTROL(0);
SSYNC();
disable_dma(CH_EPPI0);
- memset(fbi->fb_buffer, 0, info->fix.smem_len);
}
spin_unlock(&fbi->lock);
@@ -530,7 +529,7 @@ static irqreturn_t bfin_bf54x_irq_error(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __init bfin_bf54x_probe(struct platform_device *pdev)
+static int __devinit bfin_bf54x_probe(struct platform_device *pdev)
{
struct bfin_bf54xfb_info *info;
struct fb_info *fbinfo;
@@ -626,14 +625,12 @@ static int __init bfin_bf54x_probe(struct platform_device *pdev)
goto out3;
}
- memset(info->fb_buffer, 0, fbinfo->fix.smem_len);
-
fbinfo->screen_base = (void *)info->fb_buffer;
fbinfo->fix.smem_start = (int)info->fb_buffer;
fbinfo->fbops = &bfin_bf54x_fb_ops;
- fbinfo->pseudo_palette = kmalloc(sizeof(u32) * 16, GFP_KERNEL);
+ fbinfo->pseudo_palette = kzalloc(sizeof(u32) * 16, GFP_KERNEL);
if (!fbinfo->pseudo_palette) {
printk(KERN_ERR DRIVER_NAME
"Fail to allocate pseudo_palette\n");
@@ -642,8 +639,6 @@ static int __init bfin_bf54x_probe(struct platform_device *pdev)
goto out4;
}
- memset(fbinfo->pseudo_palette, 0, sizeof(u32) * 16);
-
if (fb_alloc_cmap(&fbinfo->cmap, BFIN_LCD_NBR_PALETTE_ENTRIES, 0)
< 0) {
printk(KERN_ERR DRIVER_NAME
@@ -712,7 +707,7 @@ out1:
return ret;
}
-static int bfin_bf54x_remove(struct platform_device *pdev)
+static int __devexit bfin_bf54x_remove(struct platform_device *pdev)
{
struct fb_info *fbinfo = platform_get_drvdata(pdev);
@@ -781,7 +776,7 @@ static int bfin_bf54x_resume(struct platform_device *pdev)
static struct platform_driver bfin_bf54x_driver = {
.probe = bfin_bf54x_probe,
- .remove = bfin_bf54x_remove,
+ .remove = __devexit_p(bfin_bf54x_remove),
.suspend = bfin_bf54x_suspend,
.resume = bfin_bf54x_resume,
.driver = {
@@ -790,7 +785,7 @@ static struct platform_driver bfin_bf54x_driver = {
},
};
-static int __devinit bfin_bf54x_driver_init(void)
+static int __init bfin_bf54x_driver_init(void)
{
return platform_driver_register(&bfin_bf54x_driver);
}
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index 90cfddabf1f..5cc36cfbf07 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -242,7 +242,6 @@ static int bfin_t350mcqb_fb_release(struct fb_info *info, int user)
SSYNC();
disable_dma(CH_PPI);
bfin_t350mcqb_stop_timers();
- memset(fbi->fb_buffer, 0, info->fix.smem_len);
}
spin_unlock(&fbi->lock);
@@ -527,8 +526,6 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
goto out3;
}
- memset(info->fb_buffer, 0, fbinfo->fix.smem_len);
-
fbinfo->screen_base = (void *)info->fb_buffer + ACTIVE_VIDEO_MEM_OFFSET;
fbinfo->fix.smem_start = (int)info->fb_buffer + ACTIVE_VIDEO_MEM_OFFSET;
@@ -602,7 +599,7 @@ out1:
return ret;
}
-static int bfin_t350mcqb_remove(struct platform_device *pdev)
+static int __devexit bfin_t350mcqb_remove(struct platform_device *pdev)
{
struct fb_info *fbinfo = platform_get_drvdata(pdev);
@@ -637,9 +634,6 @@ static int bfin_t350mcqb_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int bfin_t350mcqb_suspend(struct platform_device *pdev, pm_message_t state)
{
- struct fb_info *fbinfo = platform_get_drvdata(pdev);
- struct bfin_t350mcqbfb_info *info = fbinfo->par;
-
bfin_t350mcqb_disable_ppi();
disable_dma(CH_PPI);
bfin_write_PPI_STATUS(0xFFFF);
@@ -649,9 +643,6 @@ static int bfin_t350mcqb_suspend(struct platform_device *pdev, pm_message_t stat
static int bfin_t350mcqb_resume(struct platform_device *pdev)
{
- struct fb_info *fbinfo = platform_get_drvdata(pdev);
- struct bfin_t350mcqbfb_info *info = fbinfo->par;
-
enable_dma(CH_PPI);
bfin_t350mcqb_enable_ppi();
@@ -664,7 +655,7 @@ static int bfin_t350mcqb_resume(struct platform_device *pdev)
static struct platform_driver bfin_t350mcqb_driver = {
.probe = bfin_t350mcqb_probe,
- .remove = bfin_t350mcqb_remove,
+ .remove = __devexit_p(bfin_t350mcqb_remove),
.suspend = bfin_t350mcqb_suspend,
.resume = bfin_t350mcqb_resume,
.driver = {
@@ -673,7 +664,7 @@ static struct platform_driver bfin_t350mcqb_driver = {
},
};
-static int __devinit bfin_t350mcqb_driver_init(void)
+static int __init bfin_t350mcqb_driver_init(void)
{
return platform_driver_register(&bfin_t350mcqb_driver);
}
diff --git a/drivers/video/bw2.c b/drivers/video/bw2.c
index 1e35ba6f18e..b0b147cb4cb 100644
--- a/drivers/video/bw2.c
+++ b/drivers/video/bw2.c
@@ -111,9 +111,7 @@ struct bw2_par {
u32 flags;
#define BW2_FLAG_BLANKED 0x00000001
- unsigned long physbase;
unsigned long which_io;
- unsigned long fbsize;
};
/**
@@ -167,17 +165,15 @@ static int bw2_mmap(struct fb_info *info, struct vm_area_struct *vma)
struct bw2_par *par = (struct bw2_par *)info->par;
return sbusfb_mmap_helper(bw2_mmap_map,
- par->physbase, par->fbsize,
+ info->fix.smem_start, info->fix.smem_len,
par->which_io,
vma);
}
static int bw2_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
{
- struct bw2_par *par = (struct bw2_par *) info->par;
-
return sbusfb_ioctl_helper(cmd, arg, info,
- FBTYPE_SUN2BW, 1, par->fbsize);
+ FBTYPE_SUN2BW, 1, info->fix.smem_len);
}
/*
@@ -294,7 +290,7 @@ static int __devinit bw2_probe(struct of_device *op, const struct of_device_id *
spin_lock_init(&par->lock);
- par->physbase = op->resource[0].start;
+ info->fix.smem_start = op->resource[0].start;
par->which_io = op->resource[0].flags & IORESOURCE_BITS;
sbusfb_fill_var(&info->var, dp, 1);
@@ -317,13 +313,13 @@ static int __devinit bw2_probe(struct of_device *op, const struct of_device_id *
goto out_unmap_regs;
}
- par->fbsize = PAGE_ALIGN(linebytes * info->var.yres);
+ info->fix.smem_len = PAGE_ALIGN(linebytes * info->var.yres);
info->flags = FBINFO_DEFAULT;
info->fbops = &bw2_ops;
info->screen_base = of_ioremap(&op->resource[0], 0,
- par->fbsize, "bw2 ram");
+ info->fix.smem_len, "bw2 ram");
if (!info->screen_base)
goto out_unmap_regs;
@@ -338,12 +334,12 @@ static int __devinit bw2_probe(struct of_device *op, const struct of_device_id *
dev_set_drvdata(&op->dev, info);
printk(KERN_INFO "%s: bwtwo at %lx:%lx\n",
- dp->full_name, par->which_io, par->physbase);
+ dp->full_name, par->which_io, info->fix.smem_start);
return 0;
out_unmap_screen:
- of_iounmap(&op->resource[0], info->screen_base, par->fbsize);
+ of_iounmap(&op->resource[0], info->screen_base, info->fix.smem_len);
out_unmap_regs:
of_iounmap(&op->resource[0], par->regs, sizeof(struct bw2_regs));
@@ -363,7 +359,7 @@ static int __devexit bw2_remove(struct of_device *op)
unregister_framebuffer(info);
of_iounmap(&op->resource[0], par->regs, sizeof(struct bw2_regs));
- of_iounmap(&op->resource[0], info->screen_base, par->fbsize);
+ of_iounmap(&op->resource[0], info->screen_base, info->fix.smem_len);
framebuffer_release(info);
diff --git a/drivers/video/carminefb.c b/drivers/video/carminefb.c
index c7ff3c1a266..0c02f8ec4bf 100644
--- a/drivers/video/carminefb.c
+++ b/drivers/video/carminefb.c
@@ -562,7 +562,7 @@ static int __devinit alloc_carmine_fb(void __iomem *regs, void __iomem *smem_bas
if (ret < 0)
goto err_free_fb;
- if (fb_mode > ARRAY_SIZE(carmine_modedb))
+ if (fb_mode >= ARRAY_SIZE(carmine_modedb))
fb_mode = CARMINEFB_DEFAULT_VIDEO_MODE;
par->cur_mode = par->new_mode = ~0;
diff --git a/drivers/video/cg14.c b/drivers/video/cg14.c
index a2d1882791a..fe45a3b8d0e 100644
--- a/drivers/video/cg14.c
+++ b/drivers/video/cg14.c
@@ -196,9 +196,7 @@ struct cg14_par {
u32 flags;
#define CG14_FLAG_BLANKED 0x00000001
- unsigned long physbase;
unsigned long iospace;
- unsigned long fbsize;
struct sbus_mmap_map mmap_map[CG14_MMAP_ENTRIES];
@@ -271,7 +269,7 @@ static int cg14_mmap(struct fb_info *info, struct vm_area_struct *vma)
struct cg14_par *par = (struct cg14_par *) info->par;
return sbusfb_mmap_helper(par->mmap_map,
- par->physbase, par->fbsize,
+ info->fix.smem_start, info->fix.smem_len,
par->iospace, vma);
}
@@ -343,7 +341,8 @@ static int cg14_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
default:
ret = sbusfb_ioctl_helper(cmd, arg, info,
- FBTYPE_MDICOLOR, 8, par->fbsize);
+ FBTYPE_MDICOLOR, 8,
+ info->fix.smem_len);
break;
};
@@ -462,7 +461,7 @@ static void cg14_unmap_regs(struct of_device *op, struct fb_info *info,
par->cursor, sizeof(struct cg14_cursor));
if (info->screen_base)
of_iounmap(&op->resource[1],
- info->screen_base, par->fbsize);
+ info->screen_base, info->fix.smem_len);
}
static int __devinit cg14_probe(struct of_device *op, const struct of_device_id *match)
@@ -488,14 +487,14 @@ static int __devinit cg14_probe(struct of_device *op, const struct of_device_id
linebytes = of_getintprop_default(dp, "linebytes",
info->var.xres);
- par->fbsize = PAGE_ALIGN(linebytes * info->var.yres);
+ info->fix.smem_len = PAGE_ALIGN(linebytes * info->var.yres);
if (!strcmp(dp->parent->name, "sbus") ||
!strcmp(dp->parent->name, "sbi")) {
- par->physbase = op->resource[0].start;
+ info->fix.smem_start = op->resource[0].start;
par->iospace = op->resource[0].flags & IORESOURCE_BITS;
} else {
- par->physbase = op->resource[1].start;
+ info->fix.smem_start = op->resource[1].start;
par->iospace = op->resource[0].flags & IORESOURCE_BITS;
}
@@ -507,7 +506,7 @@ static int __devinit cg14_probe(struct of_device *op, const struct of_device_id
sizeof(struct cg14_cursor), "cg14 cursor");
info->screen_base = of_ioremap(&op->resource[1], 0,
- par->fbsize, "cg14 ram");
+ info->fix.smem_len, "cg14 ram");
if (!par->regs || !par->clut || !par->cursor || !info->screen_base)
goto out_unmap_regs;
@@ -557,7 +556,7 @@ static int __devinit cg14_probe(struct of_device *op, const struct of_device_id
printk(KERN_INFO "%s: cgfourteen at %lx:%lx, %dMB\n",
dp->full_name,
- par->iospace, par->physbase,
+ par->iospace, info->fix.smem_start,
par->ramsize >> 20);
return 0;
diff --git a/drivers/video/cg3.c b/drivers/video/cg3.c
index 99f87fb61d0..b2319fa7286 100644
--- a/drivers/video/cg3.c
+++ b/drivers/video/cg3.c
@@ -118,9 +118,7 @@ struct cg3_par {
#define CG3_FLAG_BLANKED 0x00000001
#define CG3_FLAG_RDI 0x00000002
- unsigned long physbase;
unsigned long which_io;
- unsigned long fbsize;
};
/**
@@ -231,17 +229,15 @@ static int cg3_mmap(struct fb_info *info, struct vm_area_struct *vma)
struct cg3_par *par = (struct cg3_par *)info->par;
return sbusfb_mmap_helper(cg3_mmap_map,
- par->physbase, par->fbsize,
+ info->fix.smem_start, info->fix.smem_len,
par->which_io,
vma);
}
static int cg3_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
{
- struct cg3_par *par = (struct cg3_par *) info->par;
-
return sbusfb_ioctl_helper(cmd, arg, info,
- FBTYPE_SUN3COLOR, 8, par->fbsize);
+ FBTYPE_SUN3COLOR, 8, info->fix.smem_len);
}
/*
@@ -368,7 +364,7 @@ static int __devinit cg3_probe(struct of_device *op,
spin_lock_init(&par->lock);
- par->physbase = op->resource[0].start;
+ info->fix.smem_start = op->resource[0].start;
par->which_io = op->resource[0].flags & IORESOURCE_BITS;
sbusfb_fill_var(&info->var, dp, 8);
@@ -382,7 +378,7 @@ static int __devinit cg3_probe(struct of_device *op,
linebytes = of_getintprop_default(dp, "linebytes",
info->var.xres);
- par->fbsize = PAGE_ALIGN(linebytes * info->var.yres);
+ info->fix.smem_len = PAGE_ALIGN(linebytes * info->var.yres);
par->regs = of_ioremap(&op->resource[0], CG3_REGS_OFFSET,
sizeof(struct cg3_regs), "cg3 regs");
@@ -392,7 +388,7 @@ static int __devinit cg3_probe(struct of_device *op,
info->flags = FBINFO_DEFAULT;
info->fbops = &cg3_ops;
info->screen_base = of_ioremap(&op->resource[0], CG3_RAM_OFFSET,
- par->fbsize, "cg3 ram");
+ info->fix.smem_len, "cg3 ram");
if (!info->screen_base)
goto out_unmap_regs;
@@ -418,7 +414,7 @@ static int __devinit cg3_probe(struct of_device *op,
dev_set_drvdata(&op->dev, info);
printk(KERN_INFO "%s: cg3 at %lx:%lx\n",
- dp->full_name, par->which_io, par->physbase);
+ dp->full_name, par->which_io, info->fix.smem_start);
return 0;
@@ -426,7 +422,7 @@ out_dealloc_cmap:
fb_dealloc_cmap(&info->cmap);
out_unmap_screen:
- of_iounmap(&op->resource[0], info->screen_base, par->fbsize);
+ of_iounmap(&op->resource[0], info->screen_base, info->fix.smem_len);
out_unmap_regs:
of_iounmap(&op->resource[0], par->regs, sizeof(struct cg3_regs));
@@ -447,7 +443,7 @@ static int __devexit cg3_remove(struct of_device *op)
fb_dealloc_cmap(&info->cmap);
of_iounmap(&op->resource[0], par->regs, sizeof(struct cg3_regs));
- of_iounmap(&op->resource[0], info->screen_base, par->fbsize);
+ of_iounmap(&op->resource[0], info->screen_base, info->fix.smem_len);
framebuffer_release(info);
diff --git a/drivers/video/cg6.c b/drivers/video/cg6.c
index 940ec04f0f1..0d47c6030e3 100644
--- a/drivers/video/cg6.c
+++ b/drivers/video/cg6.c
@@ -263,9 +263,7 @@ struct cg6_par {
u32 flags;
#define CG6_FLAG_BLANKED 0x00000001
- unsigned long physbase;
unsigned long which_io;
- unsigned long fbsize;
};
static int cg6_sync(struct fb_info *info)
@@ -596,16 +594,14 @@ static int cg6_mmap(struct fb_info *info, struct vm_area_struct *vma)
struct cg6_par *par = (struct cg6_par *)info->par;
return sbusfb_mmap_helper(cg6_mmap_map,
- par->physbase, par->fbsize,
+ info->fix.smem_start, info->fix.smem_len,
par->which_io, vma);
}
static int cg6_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
{
- struct cg6_par *par = (struct cg6_par *)info->par;
-
return sbusfb_ioctl_helper(cmd, arg, info,
- FBTYPE_SUNFAST_COLOR, 8, par->fbsize);
+ FBTYPE_SUNFAST_COLOR, 8, info->fix.smem_len);
}
/*
@@ -631,12 +627,12 @@ static void __devinit cg6_init_fix(struct fb_info *info, int linebytes)
break;
};
if (((conf >> CG6_FHC_REV_SHIFT) & CG6_FHC_REV_MASK) >= 11) {
- if (par->fbsize <= 0x100000)
+ if (info->fix.smem_len <= 0x100000)
cg6_card_name = "TGX";
else
cg6_card_name = "TGX+";
} else {
- if (par->fbsize <= 0x100000)
+ if (info->fix.smem_len <= 0x100000)
cg6_card_name = "GX";
else
cg6_card_name = "GX+";
@@ -738,7 +734,8 @@ static void cg6_unmap_regs(struct of_device *op, struct fb_info *info,
of_iounmap(&op->resource[0], par->fhc, sizeof(u32));
if (info->screen_base)
- of_iounmap(&op->resource[0], info->screen_base, par->fbsize);
+ of_iounmap(&op->resource[0], info->screen_base,
+ info->fix.smem_len);
}
static int __devinit cg6_probe(struct of_device *op,
@@ -759,7 +756,7 @@ static int __devinit cg6_probe(struct of_device *op,
spin_lock_init(&par->lock);
- par->physbase = op->resource[0].start;
+ info->fix.smem_start = op->resource[0].start;
par->which_io = op->resource[0].flags & IORESOURCE_BITS;
sbusfb_fill_var(&info->var, dp, 8);
@@ -769,11 +766,11 @@ static int __devinit cg6_probe(struct of_device *op,
linebytes = of_getintprop_default(dp, "linebytes",
info->var.xres);
- par->fbsize = PAGE_ALIGN(linebytes * info->var.yres);
+ info->fix.smem_len = PAGE_ALIGN(linebytes * info->var.yres);
dblbuf = of_getintprop_default(dp, "dblbuf", 0);
if (dblbuf)
- par->fbsize *= 4;
+ info->fix.smem_len *= 4;
par->fbc = of_ioremap(&op->resource[0], CG6_FBC_OFFSET,
4096, "cgsix fbc");
@@ -792,7 +789,7 @@ static int __devinit cg6_probe(struct of_device *op,
info->fbops = &cg6_ops;
info->screen_base = of_ioremap(&op->resource[0], CG6_RAM_OFFSET,
- par->fbsize, "cgsix ram");
+ info->fix.smem_len, "cgsix ram");
if (!par->fbc || !par->tec || !par->thc ||
!par->bt || !par->fhc || !info->screen_base)
goto out_unmap_regs;
@@ -817,7 +814,7 @@ static int __devinit cg6_probe(struct of_device *op,
printk(KERN_INFO "%s: CGsix [%s] at %lx:%lx\n",
dp->full_name, info->fix.id,
- par->which_io, par->physbase);
+ par->which_io, info->fix.smem_start);
return 0;
diff --git a/drivers/video/chipsfb.c b/drivers/video/chipsfb.c
index 777389c4098..57b9d276497 100644
--- a/drivers/video/chipsfb.c
+++ b/drivers/video/chipsfb.c
@@ -414,7 +414,6 @@ chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
}
pci_set_drvdata(dp, p);
- p->device = &dp->dev;
init_chips(p, addr);
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 38e86b84dce..59d7d5ec17a 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -180,7 +180,7 @@ static inline void vga_set_mem_top(struct vc_data *c)
}
#ifdef CONFIG_VGACON_SOFT_SCROLLBACK
-#include <linux/bootmem.h>
+#include <linux/slab.h>
/* software scrollback */
static void *vgacon_scrollback;
static int vgacon_scrollback_tail;
@@ -210,8 +210,7 @@ static void vgacon_scrollback_init(int pitch)
*/
static void __init_refok vgacon_scrollback_startup(void)
{
- vgacon_scrollback = alloc_bootmem(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE
- * 1024);
+ vgacon_scrollback = kcalloc(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE, 1024, GFP_NOWAIT);
vgacon_scrollback_init(vga_video_num_columns * 2);
}
diff --git a/drivers/video/cyber2000fb.c b/drivers/video/cyber2000fb.c
index 83c5cefc266..da7c01b39be 100644
--- a/drivers/video/cyber2000fb.c
+++ b/drivers/video/cyber2000fb.c
@@ -1736,10 +1736,8 @@ static int __init cyber2000fb_init(void)
#ifdef CONFIG_ARCH_SHARK
err = cyberpro_vl_probe();
- if (!err) {
+ if (!err)
ret = 0;
- __module_get(THIS_MODULE);
- }
#endif
#ifdef CONFIG_PCI
err = pci_register_driver(&cyberpro_driver);
@@ -1749,14 +1747,15 @@ static int __init cyber2000fb_init(void)
return ret ? err : 0;
}
+module_init(cyber2000fb_init);
+#ifndef CONFIG_ARCH_SHARK
static void __exit cyberpro_exit(void)
{
pci_unregister_driver(&cyberpro_driver);
}
-
-module_init(cyber2000fb_init);
module_exit(cyberpro_exit);
+#endif
MODULE_AUTHOR("Russell King");
MODULE_DESCRIPTION("CyberPro 2000, 2010 and 5000 framebuffer driver");
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index 8dea2bc9270..eb12182b205 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -280,6 +280,9 @@ static int __init efifb_probe(struct platform_device *dev)
info->pseudo_palette = info->par;
info->par = NULL;
+ info->aperture_base = efifb_fix.smem_start;
+ info->aperture_size = size_total;
+
info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
if (!info->screen_base) {
printk(KERN_ERR "efifb: abort, cannot ioremap video memory "
@@ -337,7 +340,7 @@ static int __init efifb_probe(struct platform_device *dev)
info->fbops = &efifb_ops;
info->var = efifb_defined;
info->fix = efifb_fix;
- info->flags = FBINFO_FLAG_DEFAULT;
+ info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE;
if ((err = fb_alloc_cmap(&info->cmap, 256, 0)) < 0) {
printk(KERN_ERR "efifb: cannot allocate colormap\n");
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index d412a1ddc12..f8a09bf8d0c 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1462,6 +1462,16 @@ static int fb_check_foreignness(struct fb_info *fi)
return 0;
}
+static bool fb_do_apertures_overlap(struct fb_info *gen, struct fb_info *hw)
+{
+ /* is the generic aperture base the same as the HW one */
+ if (gen->aperture_base == hw->aperture_base)
+ return true;
+ /* is the generic aperture base inside the hw base->hw base+size */
+ if (gen->aperture_base > hw->aperture_base && gen->aperture_base <= hw->aperture_base + hw->aperture_size)
+ return true;
+ return false;
+}
/**
* register_framebuffer - registers a frame buffer device
* @fb_info: frame buffer info structure
@@ -1485,6 +1495,23 @@ register_framebuffer(struct fb_info *fb_info)
if (fb_check_foreignness(fb_info))
return -ENOSYS;
+ /* check all firmware fbs and kick off if the base addr overlaps */
+ for (i = 0 ; i < FB_MAX; i++) {
+ if (!registered_fb[i])
+ continue;
+
+ if (registered_fb[i]->flags & FBINFO_MISC_FIRMWARE) {
+ if (fb_do_apertures_overlap(registered_fb[i], fb_info)) {
+ printk(KERN_ERR "fb: conflicting fb hw usage "
+ "%s vs %s - removing generic driver\n",
+ fb_info->fix.id,
+ registered_fb[i]->fix.id);
+ unregister_framebuffer(registered_fb[i]);
+ break;
+ }
+ }
+ }
+
num_registered_fb++;
for (i = 0 ; i < FB_MAX; i++)
if (!registered_fb[i])
@@ -1586,6 +1613,10 @@ unregister_framebuffer(struct fb_info *fb_info)
device_destroy(fb_class, MKDEV(FB_MAJOR, i));
event.info = fb_info;
fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event);
+
+ /* this may free fb info */
+ if (fb_info->fbops->fb_destroy)
+ fb_info->fbops->fb_destroy(fb_info);
done:
return ret;
}
diff --git a/drivers/video/hitfb.c b/drivers/video/hitfb.c
index e6467cf9f19..020db7fc915 100644
--- a/drivers/video/hitfb.c
+++ b/drivers/video/hitfb.c
@@ -335,9 +335,9 @@ static int __init hitfb_probe(struct platform_device *dev)
if (fb_get_options("hitfb", NULL))
return -ENODEV;
- hitfb_fix.mmio_start = CONFIG_HD64461_IOBASE+0x1000;
+ hitfb_fix.mmio_start = HD64461_IO_OFFSET(0x1000);
hitfb_fix.mmio_len = 0x1000;
- hitfb_fix.smem_start = CONFIG_HD64461_IOBASE + 0x02000000;
+ hitfb_fix.smem_start = HD64461_IO_OFFSET(0x02000000);
hitfb_fix.smem_len = 512 * 1024;
lcdclor = fb_readw(HD64461_LCDCLOR);
diff --git a/drivers/video/igafb.c b/drivers/video/igafb.c
index 3a81060137a..15d20010944 100644
--- a/drivers/video/igafb.c
+++ b/drivers/video/igafb.c
@@ -395,17 +395,16 @@ int __init igafb_init(void)
/* We leak a reference here but as it cannot be unloaded this is
fine. If you write unload code remember to free it in unload */
- size = sizeof(struct fb_info) + sizeof(struct iga_par) + sizeof(u32)*16;
+ size = sizeof(struct iga_par) + sizeof(u32)*16;
- info = kzalloc(size, GFP_ATOMIC);
+ info = framebuffer_alloc(size, &pdev->dev);
if (!info) {
printk("igafb_init: can't alloc fb_info\n");
pci_dev_put(pdev);
return -ENOMEM;
}
- par = (struct iga_par *) (info + 1);
-
+ par = info->par;
if ((addr = pdev->resource[0].start) == 0) {
printk("igafb_init: no memory start\n");
@@ -526,7 +525,6 @@ int __init igafb_init(void)
info->var = default_var;
info->fix = igafb_fix;
info->pseudo_palette = (void *)(par + 1);
- info->device = &pdev->dev;
if (!iga_init(info, par)) {
iounmap((void *)par->io_base);
diff --git a/drivers/video/intelfb/intelfbdrv.c b/drivers/video/intelfb/intelfbdrv.c
index ace14fe02fc..0cafd642fbc 100644
--- a/drivers/video/intelfb/intelfbdrv.c
+++ b/drivers/video/intelfb/intelfbdrv.c
@@ -1365,6 +1365,11 @@ static int intelfb_set_par(struct fb_info *info)
DBG_MSG("intelfb_set_par (%dx%d-%d)\n", info->var.xres,
info->var.yres, info->var.bits_per_pixel);
+ /*
+ * Disable VCO prior to timing register change.
+ */
+ OUTREG(DPLL_A, INREG(DPLL_A) & ~DPLL_VCO_ENABLE);
+
intelfb_blank(FB_BLANK_POWERDOWN, info);
if (ACCEL(dinfo, info))
diff --git a/drivers/video/leo.c b/drivers/video/leo.c
index 7c7e8c2da9d..e145e2d16fe 100644
--- a/drivers/video/leo.c
+++ b/drivers/video/leo.c
@@ -191,9 +191,7 @@ struct leo_par {
u32 flags;
#define LEO_FLAG_BLANKED 0x00000001
- unsigned long physbase;
unsigned long which_io;
- unsigned long fbsize;
};
static void leo_wait(struct leo_lx_krn __iomem *lx_krn)
@@ -420,16 +418,14 @@ static int leo_mmap(struct fb_info *info, struct vm_area_struct *vma)
struct leo_par *par = (struct leo_par *)info->par;
return sbusfb_mmap_helper(leo_mmap_map,
- par->physbase, par->fbsize,
+ info->fix.smem_start, info->fix.smem_len,
par->which_io, vma);
}
static int leo_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
{
- struct leo_par *par = (struct leo_par *) info->par;
-
return sbusfb_ioctl_helper(cmd, arg, info,
- FBTYPE_SUNLEO, 32, par->fbsize);
+ FBTYPE_SUNLEO, 32, info->fix.smem_len);
}
/*
@@ -569,7 +565,7 @@ static int __devinit leo_probe(struct of_device *op,
spin_lock_init(&par->lock);
- par->physbase = op->resource[0].start;
+ info->fix.smem_start = op->resource[0].start;
par->which_io = op->resource[0].flags & IORESOURCE_BITS;
sbusfb_fill_var(&info->var, dp, 32);
@@ -577,7 +573,7 @@ static int __devinit leo_probe(struct of_device *op,
linebytes = of_getintprop_default(dp, "linebytes",
info->var.xres);
- par->fbsize = PAGE_ALIGN(linebytes * info->var.yres);
+ info->fix.smem_len = PAGE_ALIGN(linebytes * info->var.yres);
par->lc_ss0_usr =
of_ioremap(&op->resource[0], LEO_OFF_LC_SS0_USR,
@@ -627,7 +623,7 @@ static int __devinit leo_probe(struct of_device *op,
printk(KERN_INFO "%s: leo at %lx:%lx\n",
dp->full_name,
- par->which_io, par->physbase);
+ par->which_io, info->fix.smem_start);
return 0;
diff --git a/drivers/video/logo/Makefile b/drivers/video/logo/Makefile
index b91251d1fe4..3b437813584 100644
--- a/drivers/video/logo/Makefile
+++ b/drivers/video/logo/Makefile
@@ -37,22 +37,24 @@ extra-y += $(call logo-cfiles,_clut224,ppm)
# Gray 256
extra-y += $(call logo-cfiles,_gray256,pgm)
+pnmtologo := scripts/pnmtologo
+
# Create commands like "pnmtologo -t mono -n logo_mac_mono -o ..."
quiet_cmd_logo = LOGO $@
- cmd_logo = scripts/pnmtologo \
+ cmd_logo = $(pnmtologo) \
-t $(patsubst $*_%,%,$(notdir $(basename $<))) \
-n $(notdir $(basename $<)) -o $@ $<
-$(obj)/%_mono.c: $(src)/%_mono.pbm FORCE
+$(obj)/%_mono.c: $(src)/%_mono.pbm $(pnmtologo) FORCE
$(call if_changed,logo)
-$(obj)/%_vga16.c: $(src)/%_vga16.ppm FORCE
+$(obj)/%_vga16.c: $(src)/%_vga16.ppm $(pnmtologo) FORCE
$(call if_changed,logo)
-$(obj)/%_clut224.c: $(src)/%_clut224.ppm FORCE
+$(obj)/%_clut224.c: $(src)/%_clut224.ppm $(pnmtologo) FORCE
$(call if_changed,logo)
-$(obj)/%_gray256.c: $(src)/%_gray256.pgm FORCE
+$(obj)/%_gray256.c: $(src)/%_gray256.pgm $(pnmtologo) FORCE
$(call if_changed,logo)
# Files generated that shall be removed upon make clean
diff --git a/drivers/video/logo/logo.c b/drivers/video/logo/logo.c
index 2e85a2b52d0..ea7a8ccc830 100644
--- a/drivers/video/logo/logo.c
+++ b/drivers/video/logo/logo.c
@@ -21,21 +21,6 @@
#include <asm/bootinfo.h>
#endif
-extern const struct linux_logo logo_linux_mono;
-extern const struct linux_logo logo_linux_vga16;
-extern const struct linux_logo logo_linux_clut224;
-extern const struct linux_logo logo_blackfin_vga16;
-extern const struct linux_logo logo_blackfin_clut224;
-extern const struct linux_logo logo_dec_clut224;
-extern const struct linux_logo logo_mac_clut224;
-extern const struct linux_logo logo_parisc_clut224;
-extern const struct linux_logo logo_sgi_clut224;
-extern const struct linux_logo logo_sun_clut224;
-extern const struct linux_logo logo_superh_mono;
-extern const struct linux_logo logo_superh_vga16;
-extern const struct linux_logo logo_superh_clut224;
-extern const struct linux_logo logo_m32r_clut224;
-
static int nologo;
module_param(nologo, bool, 0);
MODULE_PARM_DESC(nologo, "Disables startup logo");
diff --git a/drivers/video/mb862xx/mb862xxfb.c b/drivers/video/mb862xx/mb862xxfb.c
index fb64234a382..a28e3cfbbf7 100644
--- a/drivers/video/mb862xx/mb862xxfb.c
+++ b/drivers/video/mb862xx/mb862xxfb.c
@@ -19,7 +19,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
-#if defined(CONFIG_PPC_OF)
+#if defined(CONFIG_OF)
#include <linux/of_platform.h>
#endif
#include "mb862xxfb.h"
diff --git a/drivers/video/modedb.c b/drivers/video/modedb.c
index 16186240c5f..34e4e799516 100644
--- a/drivers/video/modedb.c
+++ b/drivers/video/modedb.c
@@ -264,6 +264,14 @@ static const struct fb_videomode modedb[] = {
/* 1280x800, 60 Hz, 47.403 kHz hsync, WXGA 16:10 aspect ratio */
NULL, 60, 1280, 800, 12048, 200, 64, 24, 1, 136, 3,
0, FB_VMODE_NONINTERLACED
+ }, {
+ /* 720x576i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */
+ NULL, 50, 720, 576, 74074, 64, 16, 39, 5, 64, 5,
+ 0, FB_VMODE_INTERLACED
+ }, {
+ /* 800x520i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */
+ NULL, 50, 800, 520, 58823, 144, 64, 72, 28, 80, 5,
+ 0, FB_VMODE_INTERLACED
},
};
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index 9894de1c9b9..b7af5256e88 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -706,7 +706,7 @@ static void mx3fb_dma_done(void *arg)
dev_dbg(mx3fb->dev, "irq %d callback\n", ichannel->eof_irq);
/* We only need one interrupt, it will be re-enabled as needed */
- disable_irq(ichannel->eof_irq);
+ disable_irq_nosync(ichannel->eof_irq);
complete(&mx3_fbi->flip_cmpl);
}
@@ -1366,7 +1366,7 @@ static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan)
mx3fb_blank(FB_BLANK_UNBLANK, fbi);
- dev_info(dev, "mx3fb: fb registered, using mode %s\n", fb_mode);
+ dev_info(dev, "registered, using mode %s\n", fb_mode);
ret = register_framebuffer(fbi);
if (ret < 0)
diff --git a/drivers/video/offb.c b/drivers/video/offb.c
index e1d9eeb1aea..4d8c54c23dd 100644
--- a/drivers/video/offb.c
+++ b/drivers/video/offb.c
@@ -378,7 +378,6 @@ static void __init offb_init_fb(const char *name, const char *full_name,
struct fb_fix_screeninfo *fix;
struct fb_var_screeninfo *var;
struct fb_info *info;
- int size;
if (!request_mem_region(res_start, res_size, "offb"))
return;
@@ -393,15 +392,12 @@ static void __init offb_init_fb(const char *name, const char *full_name,
return;
}
- size = sizeof(struct fb_info) + sizeof(u32) * 16;
-
- info = kmalloc(size, GFP_ATOMIC);
+ info = framebuffer_alloc(sizeof(u32) * 16, NULL);
if (info == 0) {
release_mem_region(res_start, res_size);
return;
}
- memset(info, 0, size);
fix = &info->fix;
var = &info->var;
@@ -497,7 +493,7 @@ static void __init offb_init_fb(const char *name, const char *full_name,
iounmap(par->cmap_adr);
par->cmap_adr = NULL;
iounmap(info->screen_base);
- kfree(info);
+ framebuffer_release(info);
release_mem_region(res_start, res_size);
return;
}
diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
index dfb72f5e4c9..148cbcc3960 100644
--- a/drivers/video/omap/dispc.c
+++ b/drivers/video/omap/dispc.c
@@ -880,20 +880,22 @@ static irqreturn_t omap_dispc_irq_handler(int irq, void *dev)
static int get_dss_clocks(void)
{
- if (IS_ERR((dispc.dss_ick = clk_get(dispc.fbdev->dev, "dss_ick")))) {
- dev_err(dispc.fbdev->dev, "can't get dss_ick\n");
+ dispc.dss_ick = clk_get(dispc.fbdev->dev, "ick");
+ if (IS_ERR(dispc.dss_ick)) {
+ dev_err(dispc.fbdev->dev, "can't get ick\n");
return PTR_ERR(dispc.dss_ick);
}
- if (IS_ERR((dispc.dss1_fck = clk_get(dispc.fbdev->dev, "dss1_fck")))) {
+ dispc.dss1_fck = clk_get(dispc.fbdev->dev, "dss1_fck");
+ if (IS_ERR(dispc.dss1_fck)) {
dev_err(dispc.fbdev->dev, "can't get dss1_fck\n");
clk_put(dispc.dss_ick);
return PTR_ERR(dispc.dss1_fck);
}
- if (IS_ERR((dispc.dss_54m_fck =
- clk_get(dispc.fbdev->dev, "dss_54m_fck")))) {
- dev_err(dispc.fbdev->dev, "can't get dss_54m_fck\n");
+ dispc.dss_54m_fck = clk_get(dispc.fbdev->dev, "tv_fck");
+ if (IS_ERR(dispc.dss_54m_fck)) {
+ dev_err(dispc.fbdev->dev, "can't get tv_fck\n");
clk_put(dispc.dss_ick);
clk_put(dispc.dss1_fck);
return PTR_ERR(dispc.dss_54m_fck);
diff --git a/drivers/video/omap/hwa742.c b/drivers/video/omap/hwa742.c
index 8aa6e47202b..5d4f34887a2 100644
--- a/drivers/video/omap/hwa742.c
+++ b/drivers/video/omap/hwa742.c
@@ -133,8 +133,7 @@ struct {
struct lcd_ctrl_extif *extif;
struct lcd_ctrl *int_ctrl;
- void (*power_up)(struct device *dev);
- void (*power_down)(struct device *dev);
+ struct clk *sys_ck;
} hwa742;
struct lcd_ctrl hwa742_ctrl;
@@ -915,14 +914,13 @@ static void hwa742_suspend(void)
hwa742_set_update_mode(OMAPFB_UPDATE_DISABLED);
/* Enable sleep mode */
hwa742_write_reg(HWA742_POWER_SAVE, 1 << 1);
- if (hwa742.power_down != NULL)
- hwa742.power_down(hwa742.fbdev->dev);
+ clk_disable(hwa742.sys_ck);
}
static void hwa742_resume(void)
{
- if (hwa742.power_up != NULL)
- hwa742.power_up(hwa742.fbdev->dev);
+ clk_enable(hwa742.sys_ck);
+
/* Disable sleep mode */
hwa742_write_reg(HWA742_POWER_SAVE, 0);
while (1) {
@@ -955,14 +953,13 @@ static int hwa742_init(struct omapfb_device *fbdev, int ext_mode,
omapfb_conf = fbdev->dev->platform_data;
ctrl_conf = omapfb_conf->ctrl_platform_data;
- if (ctrl_conf == NULL || ctrl_conf->get_clock_rate == NULL) {
+ if (ctrl_conf == NULL) {
dev_err(fbdev->dev, "HWA742: missing platform data\n");
r = -ENOENT;
goto err1;
}
- hwa742.power_down = ctrl_conf->power_down;
- hwa742.power_up = ctrl_conf->power_up;
+ hwa742.sys_ck = clk_get(NULL, "hwa_sys_ck");
spin_lock_init(&hwa742.req_lock);
@@ -972,12 +969,11 @@ static int hwa742_init(struct omapfb_device *fbdev, int ext_mode,
if ((r = hwa742.extif->init(fbdev)) < 0)
goto err2;
- ext_clk = ctrl_conf->get_clock_rate(fbdev->dev);
+ ext_clk = clk_get_rate(hwa742.sys_ck);
if ((r = calc_extif_timings(ext_clk, &extif_mem_div)) < 0)
goto err3;
hwa742.extif->set_timings(&hwa742.reg_timings);
- if (hwa742.power_up != NULL)
- hwa742.power_up(fbdev->dev);
+ clk_enable(hwa742.sys_ck);
calc_hwa742_clk_rates(ext_clk, &sys_clk, &pix_clk);
if ((r = calc_extif_timings(sys_clk, &extif_mem_div)) < 0)
@@ -1040,8 +1036,7 @@ static int hwa742_init(struct omapfb_device *fbdev, int ext_mode,
return 0;
err4:
- if (hwa742.power_down != NULL)
- hwa742.power_down(fbdev->dev);
+ clk_disable(hwa742.sys_ck);
err3:
hwa742.extif->cleanup();
err2:
@@ -1055,8 +1050,7 @@ static void hwa742_cleanup(void)
hwa742_set_update_mode(OMAPFB_UPDATE_DISABLED);
hwa742.extif->cleanup();
hwa742.int_ctrl->cleanup();
- if (hwa742.power_down != NULL)
- hwa742.power_down(hwa742.fbdev->dev);
+ clk_disable(hwa742.sys_ck);
}
struct lcd_ctrl hwa742_ctrl = {
diff --git a/drivers/video/omap/rfbi.c b/drivers/video/omap/rfbi.c
index a13c8dcad2a..9332d6ca645 100644
--- a/drivers/video/omap/rfbi.c
+++ b/drivers/video/omap/rfbi.c
@@ -83,12 +83,14 @@ static inline u32 rfbi_read_reg(int idx)
static int rfbi_get_clocks(void)
{
- if (IS_ERR((rfbi.dss_ick = clk_get(rfbi.fbdev->dev, "dss_ick")))) {
- dev_err(rfbi.fbdev->dev, "can't get dss_ick\n");
+ rfbi.dss_ick = clk_get(rfbi.fbdev->dev, "ick");
+ if (IS_ERR(rfbi.dss_ick)) {
+ dev_err(rfbi.fbdev->dev, "can't get ick\n");
return PTR_ERR(rfbi.dss_ick);
}
- if (IS_ERR((rfbi.dss1_fck = clk_get(rfbi.fbdev->dev, "dss1_fck")))) {
+ rfbi.dss1_fck = clk_get(rfbi.fbdev->dev, "dss1_fck");
+ if (IS_ERR(rfbi.dss1_fck)) {
dev_err(rfbi.fbdev->dev, "can't get dss1_fck\n");
clk_put(rfbi.dss_ick);
return PTR_ERR(rfbi.dss1_fck);
diff --git a/drivers/video/p9100.c b/drivers/video/p9100.c
index 7000f2cd585..7fa4ab01b0d 100644
--- a/drivers/video/p9100.c
+++ b/drivers/video/p9100.c
@@ -134,9 +134,7 @@ struct p9100_par {
u32 flags;
#define P9100_FLAG_BLANKED 0x00000001
- unsigned long physbase;
unsigned long which_io;
- unsigned long fbsize;
};
/**
@@ -224,18 +222,16 @@ static int p9100_mmap(struct fb_info *info, struct vm_area_struct *vma)
struct p9100_par *par = (struct p9100_par *)info->par;
return sbusfb_mmap_helper(p9100_mmap_map,
- par->physbase, par->fbsize,
+ info->fix.smem_start, info->fix.smem_len,
par->which_io, vma);
}
static int p9100_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
- struct p9100_par *par = (struct p9100_par *) info->par;
-
/* Make it look like a cg3. */
return sbusfb_ioctl_helper(cmd, arg, info,
- FBTYPE_SUN3COLOR, 8, par->fbsize);
+ FBTYPE_SUN3COLOR, 8, info->fix.smem_len);
}
/*
@@ -271,7 +267,7 @@ static int __devinit p9100_probe(struct of_device *op, const struct of_device_id
spin_lock_init(&par->lock);
/* This is the framebuffer and the only resource apps can mmap. */
- par->physbase = op->resource[2].start;
+ info->fix.smem_start = op->resource[2].start;
par->which_io = op->resource[2].flags & IORESOURCE_BITS;
sbusfb_fill_var(&info->var, dp, 8);
@@ -280,7 +276,7 @@ static int __devinit p9100_probe(struct of_device *op, const struct of_device_id
info->var.blue.length = 8;
linebytes = of_getintprop_default(dp, "linebytes", info->var.xres);
- par->fbsize = PAGE_ALIGN(linebytes * info->var.yres);
+ info->fix.smem_len = PAGE_ALIGN(linebytes * info->var.yres);
par->regs = of_ioremap(&op->resource[0], 0,
sizeof(struct p9100_regs), "p9100 regs");
@@ -290,7 +286,7 @@ static int __devinit p9100_probe(struct of_device *op, const struct of_device_id
info->flags = FBINFO_DEFAULT;
info->fbops = &p9100_ops;
info->screen_base = of_ioremap(&op->resource[2], 0,
- par->fbsize, "p9100 ram");
+ info->fix.smem_len, "p9100 ram");
if (!info->screen_base)
goto out_unmap_regs;
@@ -311,7 +307,7 @@ static int __devinit p9100_probe(struct of_device *op, const struct of_device_id
printk(KERN_INFO "%s: p9100 at %lx:%lx\n",
dp->full_name,
- par->which_io, par->physbase);
+ par->which_io, info->fix.smem_start);
return 0;
@@ -319,7 +315,7 @@ out_dealloc_cmap:
fb_dealloc_cmap(&info->cmap);
out_unmap_screen:
- of_iounmap(&op->resource[2], info->screen_base, par->fbsize);
+ of_iounmap(&op->resource[2], info->screen_base, info->fix.smem_len);
out_unmap_regs:
of_iounmap(&op->resource[0], par->regs, sizeof(struct p9100_regs));
@@ -340,7 +336,7 @@ static int __devexit p9100_remove(struct of_device *op)
fb_dealloc_cmap(&info->cmap);
of_iounmap(&op->resource[0], par->regs, sizeof(struct p9100_regs));
- of_iounmap(&op->resource[2], info->screen_base, par->fbsize);
+ of_iounmap(&op->resource[2], info->screen_base, info->fix.smem_len);
framebuffer_release(info);
diff --git a/drivers/video/pm2fb.c b/drivers/video/pm2fb.c
index c6dd924976a..36436ee6c1a 100644
--- a/drivers/video/pm2fb.c
+++ b/drivers/video/pm2fb.c
@@ -1748,7 +1748,7 @@ static void __devexit pm2fb_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
fb_dealloc_cmap(&info->cmap);
kfree(info->pixmap.addr);
- kfree(info);
+ framebuffer_release(info);
}
static struct pci_device_id pm2fb_id_table[] = {
diff --git a/drivers/video/ps3fb.c b/drivers/video/ps3fb.c
index e00c1dff55d..c0af638fe70 100644
--- a/drivers/video/ps3fb.c
+++ b/drivers/video/ps3fb.c
@@ -32,25 +32,16 @@
#include <linux/init.h>
#include <asm/abs_addr.h>
+#include <asm/iommu.h>
#include <asm/lv1call.h>
#include <asm/ps3av.h>
#include <asm/ps3fb.h>
#include <asm/ps3.h>
+#include <asm/ps3gpu.h>
#define DEVICE_NAME "ps3fb"
-#define L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_SYNC 0x101
-#define L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_FLIP 0x102
-#define L1GPU_CONTEXT_ATTRIBUTE_FB_SETUP 0x600
-#define L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT 0x601
-#define L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT_SYNC 0x602
-
-#define L1GPU_FB_BLIT_WAIT_FOR_COMPLETION (1ULL << 32)
-
-#define L1GPU_DISPLAY_SYNC_HSYNC 1
-#define L1GPU_DISPLAY_SYNC_VSYNC 2
-
#define GPU_CMD_BUF_SIZE (2 * 1024 * 1024)
#define GPU_FB_START (64 * 1024)
#define GPU_IOIF (0x0d000000UL)
@@ -462,33 +453,27 @@ static void ps3fb_sync_image(struct device *dev, u64 frame_offset,
src_offset += GPU_FB_START;
mutex_lock(&ps3_gpu_mutex);
- status = lv1_gpu_context_attribute(ps3fb.context_handle,
- L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT,
- dst_offset, GPU_IOIF + src_offset,
- L1GPU_FB_BLIT_WAIT_FOR_COMPLETION |
- (width << 16) | height,
- line_length);
+ status = lv1_gpu_fb_blit(ps3fb.context_handle, dst_offset,
+ GPU_IOIF + src_offset,
+ L1GPU_FB_BLIT_WAIT_FOR_COMPLETION |
+ (width << 16) | height,
+ line_length);
mutex_unlock(&ps3_gpu_mutex);
if (status)
- dev_err(dev,
- "%s: lv1_gpu_context_attribute FB_BLIT failed: %d\n",
- __func__, status);
+ dev_err(dev, "%s: lv1_gpu_fb_blit failed: %d\n", __func__,
+ status);
#ifdef HEAD_A
- status = lv1_gpu_context_attribute(ps3fb.context_handle,
- L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_FLIP,
- 0, frame_offset, 0, 0);
+ status = lv1_gpu_display_flip(ps3fb.context_handle, 0, frame_offset);
if (status)
- dev_err(dev, "%s: lv1_gpu_context_attribute FLIP failed: %d\n",
- __func__, status);
+ dev_err(dev, "%s: lv1_gpu_display_flip failed: %d\n", __func__,
+ status);
#endif
#ifdef HEAD_B
- status = lv1_gpu_context_attribute(ps3fb.context_handle,
- L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_FLIP,
- 1, frame_offset, 0, 0);
+ status = lv1_gpu_display_flip(ps3fb.context_handle, 1, frame_offset);
if (status)
- dev_err(dev, "%s: lv1_gpu_context_attribute FLIP failed: %d\n",
- __func__, status);
+ dev_err(dev, "%s: lv1_gpu_display_flip failed: %d\n", __func__,
+ status);
#endif
}
@@ -956,73 +941,6 @@ static irqreturn_t ps3fb_vsync_interrupt(int irq, void *ptr)
}
-static int ps3fb_vsync_settings(struct gpu_driver_info *dinfo,
- struct device *dev)
-{
- int error;
-
- dev_dbg(dev, "version_driver:%x\n", dinfo->version_driver);
- dev_dbg(dev, "irq outlet:%x\n", dinfo->irq.irq_outlet);
- dev_dbg(dev,
- "version_gpu: %x memory_size: %x ch: %x core_freq: %d "
- "mem_freq:%d\n",
- dinfo->version_gpu, dinfo->memory_size, dinfo->hardware_channel,
- dinfo->nvcore_frequency/1000000, dinfo->memory_frequency/1000000);
-
- if (dinfo->version_driver != GPU_DRIVER_INFO_VERSION) {
- dev_err(dev, "%s: version_driver err:%x\n", __func__,
- dinfo->version_driver);
- return -EINVAL;
- }
-
- error = ps3_irq_plug_setup(PS3_BINDING_CPU_ANY, dinfo->irq.irq_outlet,
- &ps3fb.irq_no);
- if (error) {
- dev_err(dev, "%s: ps3_alloc_irq failed %d\n", __func__, error);
- return error;
- }
-
- error = request_irq(ps3fb.irq_no, ps3fb_vsync_interrupt, IRQF_DISABLED,
- DEVICE_NAME, dev);
- if (error) {
- dev_err(dev, "%s: request_irq failed %d\n", __func__, error);
- ps3_irq_plug_destroy(ps3fb.irq_no);
- return error;
- }
-
- dinfo->irq.mask = (1 << GPU_INTR_STATUS_VSYNC_1) |
- (1 << GPU_INTR_STATUS_FLIP_1);
- return 0;
-}
-
-static int ps3fb_xdr_settings(u64 xdr_lpar, struct device *dev)
-{
- int status;
-
- status = lv1_gpu_context_iomap(ps3fb.context_handle, GPU_IOIF,
- xdr_lpar, ps3fb_videomemory.size, 0);
- if (status) {
- dev_err(dev, "%s: lv1_gpu_context_iomap failed: %d\n",
- __func__, status);
- return -ENXIO;
- }
- dev_dbg(dev, "video:%p ioif:%lx lpar:%llx size:%lx\n",
- ps3fb_videomemory.address, GPU_IOIF, xdr_lpar,
- ps3fb_videomemory.size);
-
- status = lv1_gpu_context_attribute(ps3fb.context_handle,
- L1GPU_CONTEXT_ATTRIBUTE_FB_SETUP,
- xdr_lpar, GPU_CMD_BUF_SIZE,
- GPU_IOIF, 0);
- if (status) {
- dev_err(dev,
- "%s: lv1_gpu_context_attribute FB_SETUP failed: %d\n",
- __func__, status);
- return -ENXIO;
- }
- return 0;
-}
-
static struct fb_ops ps3fb_ops = {
.fb_open = ps3fb_open,
.fb_release = ps3fb_release,
@@ -1048,49 +966,18 @@ static struct fb_fix_screeninfo ps3fb_fix __initdata = {
.accel = FB_ACCEL_NONE,
};
-static int ps3fb_set_sync(struct device *dev)
-{
- int status;
-
-#ifdef HEAD_A
- status = lv1_gpu_context_attribute(0x0,
- L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_SYNC,
- 0, L1GPU_DISPLAY_SYNC_VSYNC, 0, 0);
- if (status) {
- dev_err(dev,
- "%s: lv1_gpu_context_attribute DISPLAY_SYNC failed: "
- "%d\n",
- __func__, status);
- return -1;
- }
-#endif
-#ifdef HEAD_B
- status = lv1_gpu_context_attribute(0x0,
- L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_SYNC,
- 1, L1GPU_DISPLAY_SYNC_VSYNC, 0, 0);
-
- if (status) {
- dev_err(dev,
- "%s: lv1_gpu_context_attribute DISPLAY_SYNC failed: "
- "%d\n",
- __func__, status);
- return -1;
- }
-#endif
- return 0;
-}
-
static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev)
{
struct fb_info *info;
struct ps3fb_par *par;
- int retval = -ENOMEM;
+ int retval;
u64 ddr_lpar = 0;
u64 lpar_dma_control = 0;
u64 lpar_driver_info = 0;
u64 lpar_reports = 0;
u64 lpar_reports_size = 0;
u64 xdr_lpar;
+ struct gpu_driver_info *dinfo;
void *fb_start;
int status;
struct task_struct *task;
@@ -1101,8 +988,8 @@ static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev)
return -ENOMEM;
}
- status = ps3_open_hv_device(dev);
- if (status) {
+ retval = ps3_open_hv_device(dev);
+ if (retval) {
dev_err(&dev->core, "%s: ps3_open_hv_device failed\n",
__func__);
goto err;
@@ -1116,7 +1003,24 @@ static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev)
atomic_set(&ps3fb.ext_flip, 0); /* for flip with vsync */
init_waitqueue_head(&ps3fb.wait_vsync);
- ps3fb_set_sync(&dev->core);
+#ifdef HEAD_A
+ status = lv1_gpu_display_sync(0x0, 0, L1GPU_DISPLAY_SYNC_VSYNC);
+ if (status) {
+ dev_err(&dev->core, "%s: lv1_gpu_display_sync failed: %d\n",
+ __func__, status);
+ retval = -ENODEV;
+ goto err_close_device;
+ }
+#endif
+#ifdef HEAD_B
+ status = lv1_gpu_display_sync(0x0, 1, L1GPU_DISPLAY_SYNC_VSYNC);
+ if (status) {
+ dev_err(&dev->core, "%s: lv1_gpu_display_sync failed: %d\n",
+ __func__, status);
+ retval = -ENODEV;
+ goto err_close_device;
+ }
+#endif
max_ps3fb_size = _ALIGN_UP(GPU_IOIF, 256*1024*1024) - GPU_IOIF;
if (ps3fb_videomemory.size > max_ps3fb_size) {
@@ -1131,7 +1035,7 @@ static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev)
if (status) {
dev_err(&dev->core, "%s: lv1_gpu_memory_allocate failed: %d\n",
__func__, status);
- goto err;
+ goto err_close_device;
}
dev_dbg(&dev->core, "ddr:lpar:0x%llx\n", ddr_lpar);
@@ -1141,33 +1045,85 @@ static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev)
&lpar_reports, &lpar_reports_size);
if (status) {
dev_err(&dev->core,
- "%s: lv1_gpu_context_attribute failed: %d\n", __func__,
+ "%s: lv1_gpu_context_allocate failed: %d\n", __func__,
status);
goto err_gpu_memory_free;
}
/* vsync interrupt */
- ps3fb.dinfo = (void __force *)ioremap(lpar_driver_info, 128 * 1024);
- if (!ps3fb.dinfo) {
+ dinfo = (void __force *)ioremap(lpar_driver_info, 128 * 1024);
+ if (!dinfo) {
dev_err(&dev->core, "%s: ioremap failed\n", __func__);
goto err_gpu_context_free;
}
- retval = ps3fb_vsync_settings(ps3fb.dinfo, &dev->core);
- if (retval)
+ ps3fb.dinfo = dinfo;
+ dev_dbg(&dev->core, "version_driver:%x\n", dinfo->version_driver);
+ dev_dbg(&dev->core, "irq outlet:%x\n", dinfo->irq.irq_outlet);
+ dev_dbg(&dev->core, "version_gpu: %x memory_size: %x ch: %x "
+ "core_freq: %d mem_freq:%d\n", dinfo->version_gpu,
+ dinfo->memory_size, dinfo->hardware_channel,
+ dinfo->nvcore_frequency/1000000,
+ dinfo->memory_frequency/1000000);
+
+ if (dinfo->version_driver != GPU_DRIVER_INFO_VERSION) {
+ dev_err(&dev->core, "%s: version_driver err:%x\n", __func__,
+ dinfo->version_driver);
+ retval = -EINVAL;
+ goto err_iounmap_dinfo;
+ }
+
+ retval = ps3_irq_plug_setup(PS3_BINDING_CPU_ANY, dinfo->irq.irq_outlet,
+ &ps3fb.irq_no);
+ if (retval) {
+ dev_err(&dev->core, "%s: ps3_alloc_irq failed %d\n", __func__,
+ retval);
goto err_iounmap_dinfo;
+ }
+
+ retval = request_irq(ps3fb.irq_no, ps3fb_vsync_interrupt,
+ IRQF_DISABLED, DEVICE_NAME, &dev->core);
+ if (retval) {
+ dev_err(&dev->core, "%s: request_irq failed %d\n", __func__,
+ retval);
+ goto err_destroy_plug;
+ }
+
+ dinfo->irq.mask = (1 << GPU_INTR_STATUS_VSYNC_1) |
+ (1 << GPU_INTR_STATUS_FLIP_1);
/* Clear memory to prevent kernel info leakage into userspace */
memset(ps3fb_videomemory.address, 0, ps3fb_videomemory.size);
xdr_lpar = ps3_mm_phys_to_lpar(__pa(ps3fb_videomemory.address));
- retval = ps3fb_xdr_settings(xdr_lpar, &dev->core);
- if (retval)
+
+ status = lv1_gpu_context_iomap(ps3fb.context_handle, GPU_IOIF,
+ xdr_lpar, ps3fb_videomemory.size,
+ CBE_IOPTE_PP_W | CBE_IOPTE_PP_R |
+ CBE_IOPTE_M);
+ if (status) {
+ dev_err(&dev->core, "%s: lv1_gpu_context_iomap failed: %d\n",
+ __func__, status);
+ retval = -ENXIO;
goto err_free_irq;
+ }
+
+ dev_dbg(&dev->core, "video:%p ioif:%lx lpar:%llx size:%lx\n",
+ ps3fb_videomemory.address, GPU_IOIF, xdr_lpar,
+ ps3fb_videomemory.size);
+
+ status = lv1_gpu_fb_setup(ps3fb.context_handle, xdr_lpar,
+ GPU_CMD_BUF_SIZE, GPU_IOIF);
+ if (status) {
+ dev_err(&dev->core, "%s: lv1_gpu_fb_setup failed: %d\n",
+ __func__, status);
+ retval = -ENXIO;
+ goto err_context_unmap;
+ }
info = framebuffer_alloc(sizeof(struct ps3fb_par), &dev->core);
if (!info)
- goto err_free_irq;
+ goto err_context_fb_close;
par = info->par;
par->mode_id = ~ps3fb_mode; /* != ps3fb_mode, to trigger change */
@@ -1210,7 +1166,7 @@ static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev)
if (retval < 0)
goto err_fb_dealloc;
- dev->core.driver_data = info;
+ ps3_system_bus_set_drvdata(dev, info);
dev_info(info->device, "%s %s, using %u KiB of video memory\n",
dev_driver_string(info->dev), dev_name(info->dev),
@@ -1232,8 +1188,14 @@ err_fb_dealloc:
fb_dealloc_cmap(&info->cmap);
err_framebuffer_release:
framebuffer_release(info);
+err_context_fb_close:
+ lv1_gpu_fb_close(ps3fb.context_handle);
+err_context_unmap:
+ lv1_gpu_context_iomap(ps3fb.context_handle, GPU_IOIF, xdr_lpar,
+ ps3fb_videomemory.size, CBE_IOPTE_M);
err_free_irq:
free_irq(ps3fb.irq_no, &dev->core);
+err_destroy_plug:
ps3_irq_plug_destroy(ps3fb.irq_no);
err_iounmap_dinfo:
iounmap((u8 __force __iomem *)ps3fb.dinfo);
@@ -1241,14 +1203,16 @@ err_gpu_context_free:
lv1_gpu_context_free(ps3fb.context_handle);
err_gpu_memory_free:
lv1_gpu_memory_free(ps3fb.memory_handle);
+err_close_device:
+ ps3_close_hv_device(dev);
err:
return retval;
}
static int ps3fb_shutdown(struct ps3_system_bus_device *dev)
{
- int status;
- struct fb_info *info = dev->core.driver_data;
+ struct fb_info *info = ps3_system_bus_get_drvdata(dev);
+ u64 xdr_lpar = ps3_mm_phys_to_lpar(__pa(ps3fb_videomemory.address));
dev_dbg(&dev->core, " -> %s:%d\n", __func__, __LINE__);
@@ -1268,20 +1232,14 @@ static int ps3fb_shutdown(struct ps3_system_bus_device *dev)
unregister_framebuffer(info);
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
- info = dev->core.driver_data = NULL;
+ ps3_system_bus_set_drvdata(dev, NULL);
}
iounmap((u8 __force __iomem *)ps3fb.dinfo);
-
- status = lv1_gpu_context_free(ps3fb.context_handle);
- if (status)
- dev_dbg(&dev->core, "lv1_gpu_context_free failed: %d\n",
- status);
-
- status = lv1_gpu_memory_free(ps3fb.memory_handle);
- if (status)
- dev_dbg(&dev->core, "lv1_gpu_memory_free failed: %d\n",
- status);
-
+ lv1_gpu_fb_close(ps3fb.context_handle);
+ lv1_gpu_context_iomap(ps3fb.context_handle, GPU_IOIF, xdr_lpar,
+ ps3fb_videomemory.size, CBE_IOPTE_M);
+ lv1_gpu_context_free(ps3fb.context_handle);
+ lv1_gpu_memory_free(ps3fb.memory_handle);
ps3_close_hv_device(dev);
dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__);
diff --git a/drivers/video/pxa168fb.c b/drivers/video/pxa168fb.c
new file mode 100644
index 00000000000..84d8327e47d
--- /dev/null
+++ b/drivers/video/pxa168fb.c
@@ -0,0 +1,803 @@
+/*
+ * linux/drivers/video/pxa168fb.c -- Marvell PXA168 LCD Controller
+ *
+ * Copyright (C) 2008 Marvell International Ltd.
+ * All rights reserved.
+ *
+ * 2009-02-16 adapted from original version for PXA168/910
+ * Jun Nie <njun@marvell.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/fb.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/uaccess.h>
+#include <video/pxa168fb.h>
+
+#include "pxa168fb.h"
+
+#define DEFAULT_REFRESH 60 /* Hz */
+
+static int determine_best_pix_fmt(struct fb_var_screeninfo *var)
+{
+ /*
+ * Pseudocolor mode?
+ */
+ if (var->bits_per_pixel == 8)
+ return PIX_FMT_PSEUDOCOLOR;
+
+ /*
+ * Check for 565/1555.
+ */
+ if (var->bits_per_pixel == 16 && var->red.length <= 5 &&
+ var->green.length <= 6 && var->blue.length <= 5) {
+ if (var->transp.length == 0) {
+ if (var->red.offset >= var->blue.offset)
+ return PIX_FMT_RGB565;
+ else
+ return PIX_FMT_BGR565;
+ }
+
+ if (var->transp.length == 1 && var->green.length <= 5) {
+ if (var->red.offset >= var->blue.offset)
+ return PIX_FMT_RGB1555;
+ else
+ return PIX_FMT_BGR1555;
+ }
+
+ /* fall through */
+ }
+
+ /*
+ * Check for 888/A888.
+ */
+ if (var->bits_per_pixel <= 32 && var->red.length <= 8 &&
+ var->green.length <= 8 && var->blue.length <= 8) {
+ if (var->bits_per_pixel == 24 && var->transp.length == 0) {
+ if (var->red.offset >= var->blue.offset)
+ return PIX_FMT_RGB888PACK;
+ else
+ return PIX_FMT_BGR888PACK;
+ }
+
+ if (var->bits_per_pixel == 32 && var->transp.length == 8) {
+ if (var->red.offset >= var->blue.offset)
+ return PIX_FMT_RGBA888;
+ else
+ return PIX_FMT_BGRA888;
+ } else {
+ if (var->red.offset >= var->blue.offset)
+ return PIX_FMT_RGB888UNPACK;
+ else
+ return PIX_FMT_BGR888UNPACK;
+ }
+
+ /* fall through */
+ }
+
+ return -EINVAL;
+}
+
+static void set_pix_fmt(struct fb_var_screeninfo *var, int pix_fmt)
+{
+ switch (pix_fmt) {
+ case PIX_FMT_RGB565:
+ var->bits_per_pixel = 16;
+ var->red.offset = 11; var->red.length = 5;
+ var->green.offset = 5; var->green.length = 6;
+ var->blue.offset = 0; var->blue.length = 5;
+ var->transp.offset = 0; var->transp.length = 0;
+ break;
+ case PIX_FMT_BGR565:
+ var->bits_per_pixel = 16;
+ var->red.offset = 0; var->red.length = 5;
+ var->green.offset = 5; var->green.length = 6;
+ var->blue.offset = 11; var->blue.length = 5;
+ var->transp.offset = 0; var->transp.length = 0;
+ break;
+ case PIX_FMT_RGB1555:
+ var->bits_per_pixel = 16;
+ var->red.offset = 10; var->red.length = 5;
+ var->green.offset = 5; var->green.length = 5;
+ var->blue.offset = 0; var->blue.length = 5;
+ var->transp.offset = 15; var->transp.length = 1;
+ break;
+ case PIX_FMT_BGR1555:
+ var->bits_per_pixel = 16;
+ var->red.offset = 0; var->red.length = 5;
+ var->green.offset = 5; var->green.length = 5;
+ var->blue.offset = 10; var->blue.length = 5;
+ var->transp.offset = 15; var->transp.length = 1;
+ break;
+ case PIX_FMT_RGB888PACK:
+ var->bits_per_pixel = 24;
+ var->red.offset = 16; var->red.length = 8;
+ var->green.offset = 8; var->green.length = 8;
+ var->blue.offset = 0; var->blue.length = 8;
+ var->transp.offset = 0; var->transp.length = 0;
+ break;
+ case PIX_FMT_BGR888PACK:
+ var->bits_per_pixel = 24;
+ var->red.offset = 0; var->red.length = 8;
+ var->green.offset = 8; var->green.length = 8;
+ var->blue.offset = 16; var->blue.length = 8;
+ var->transp.offset = 0; var->transp.length = 0;
+ break;
+ case PIX_FMT_RGBA888:
+ var->bits_per_pixel = 32;
+ var->red.offset = 16; var->red.length = 8;
+ var->green.offset = 8; var->green.length = 8;
+ var->blue.offset = 0; var->blue.length = 8;
+ var->transp.offset = 24; var->transp.length = 8;
+ break;
+ case PIX_FMT_BGRA888:
+ var->bits_per_pixel = 32;
+ var->red.offset = 0; var->red.length = 8;
+ var->green.offset = 8; var->green.length = 8;
+ var->blue.offset = 16; var->blue.length = 8;
+ var->transp.offset = 24; var->transp.length = 8;
+ break;
+ case PIX_FMT_PSEUDOCOLOR:
+ var->bits_per_pixel = 8;
+ var->red.offset = 0; var->red.length = 8;
+ var->green.offset = 0; var->green.length = 8;
+ var->blue.offset = 0; var->blue.length = 8;
+ var->transp.offset = 0; var->transp.length = 0;
+ break;
+ }
+}
+
+static void set_mode(struct pxa168fb_info *fbi, struct fb_var_screeninfo *var,
+ struct fb_videomode *mode, int pix_fmt, int ystretch)
+{
+ struct fb_info *info = fbi->info;
+
+ set_pix_fmt(var, pix_fmt);
+
+ var->xres = mode->xres;
+ var->yres = mode->yres;
+ var->xres_virtual = max(var->xres, var->xres_virtual);
+ if (ystretch)
+ var->yres_virtual = info->fix.smem_len /
+ (var->xres_virtual * (var->bits_per_pixel >> 3));
+ else
+ var->yres_virtual = max(var->yres, var->yres_virtual);
+ var->grayscale = 0;
+ var->accel_flags = FB_ACCEL_NONE;
+ var->pixclock = mode->pixclock;
+ var->left_margin = mode->left_margin;
+ var->right_margin = mode->right_margin;
+ var->upper_margin = mode->upper_margin;
+ var->lower_margin = mode->lower_margin;
+ var->hsync_len = mode->hsync_len;
+ var->vsync_len = mode->vsync_len;
+ var->sync = mode->sync;
+ var->vmode = FB_VMODE_NONINTERLACED;
+ var->rotate = FB_ROTATE_UR;
+}
+
+static int pxa168fb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct pxa168fb_info *fbi = info->par;
+ int pix_fmt;
+
+ /*
+ * Determine which pixel format we're going to use.
+ */
+ pix_fmt = determine_best_pix_fmt(var);
+ if (pix_fmt < 0)
+ return pix_fmt;
+ set_pix_fmt(var, pix_fmt);
+ fbi->pix_fmt = pix_fmt;
+
+ /*
+ * Basic geometry sanity checks.
+ */
+ if (var->xoffset + var->xres > var->xres_virtual)
+ return -EINVAL;
+ if (var->yoffset + var->yres > var->yres_virtual)
+ return -EINVAL;
+ if (var->xres + var->right_margin +
+ var->hsync_len + var->left_margin > 2048)
+ return -EINVAL;
+ if (var->yres + var->lower_margin +
+ var->vsync_len + var->upper_margin > 2048)
+ return -EINVAL;
+
+ /*
+ * Check size of framebuffer.
+ */
+ if (var->xres_virtual * var->yres_virtual *
+ (var->bits_per_pixel >> 3) > info->fix.smem_len)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * The hardware clock divider has an integer and a fractional
+ * stage:
+ *
+ * clk2 = clk_in / integer_divider
+ * clk_out = clk2 * (1 - (fractional_divider >> 12))
+ *
+ * Calculate integer and fractional divider for given clk_in
+ * and clk_out.
+ */
+static void set_clock_divider(struct pxa168fb_info *fbi,
+ const struct fb_videomode *m)
+{
+ int divider_int;
+ int needed_pixclk;
+ u64 div_result;
+ u32 x = 0;
+
+ /*
+ * Notice: The field pixclock is used by linux fb
+ * is in pixel second. E.g. struct fb_videomode &
+ * struct fb_var_screeninfo
+ */
+
+ /*
+ * Check input values.
+ */
+ if (!m || !m->pixclock || !m->refresh) {
+ dev_err(fbi->dev, "Input refresh or pixclock is wrong.\n");
+ return;
+ }
+
+ /*
+ * Using PLL/AXI clock.
+ */
+ x = 0x80000000;
+
+ /*
+ * Calc divider according to refresh rate.
+ */
+ div_result = 1000000000000ll;
+ do_div(div_result, m->pixclock);
+ needed_pixclk = (u32)div_result;
+
+ divider_int = clk_get_rate(fbi->clk) / needed_pixclk;
+
+ /* check whether divisor is too small. */
+ if (divider_int < 2) {
+ dev_warn(fbi->dev, "Warning: clock source is too slow."
+ "Try smaller resolution\n");
+ divider_int = 2;
+ }
+
+ /*
+ * Set setting to reg.
+ */
+ x |= divider_int;
+ writel(x, fbi->reg_base + LCD_CFG_SCLK_DIV);
+}
+
+static void set_dma_control0(struct pxa168fb_info *fbi)
+{
+ u32 x;
+
+ /*
+ * Set bit to enable graphics DMA.
+ */
+ x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0);
+ x |= fbi->active ? 0x00000100 : 0;
+ fbi->active = 0;
+
+ /*
+ * If we are in a pseudo-color mode, we need to enable
+ * palette lookup.
+ */
+ if (fbi->pix_fmt == PIX_FMT_PSEUDOCOLOR)
+ x |= 0x10000000;
+
+ /*
+ * Configure hardware pixel format.
+ */
+ x &= ~(0xF << 16);
+ x |= (fbi->pix_fmt >> 1) << 16;
+
+ /*
+ * Check red and blue pixel swap.
+ * 1. source data swap
+ * 2. panel output data swap
+ */
+ x &= ~(1 << 12);
+ x |= ((fbi->pix_fmt & 1) ^ (fbi->panel_rbswap)) << 12;
+
+ writel(x, fbi->reg_base + LCD_SPU_DMA_CTRL0);
+}
+
+static void set_dma_control1(struct pxa168fb_info *fbi, int sync)
+{
+ u32 x;
+
+ /*
+ * Configure default bits: vsync triggers DMA, gated clock
+ * enable, power save enable, configure alpha registers to
+ * display 100% graphics, and set pixel command.
+ */
+ x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL1);
+ x |= 0x2032ff81;
+
+ /*
+ * We trigger DMA on the falling edge of vsync if vsync is
+ * active low, or on the rising edge if vsync is active high.
+ */
+ if (!(sync & FB_SYNC_VERT_HIGH_ACT))
+ x |= 0x08000000;
+
+ writel(x, fbi->reg_base + LCD_SPU_DMA_CTRL1);
+}
+
+static void set_graphics_start(struct fb_info *info, int xoffset, int yoffset)
+{
+ struct pxa168fb_info *fbi = info->par;
+ struct fb_var_screeninfo *var = &info->var;
+ int pixel_offset;
+ unsigned long addr;
+
+ pixel_offset = (yoffset * var->xres_virtual) + xoffset;
+
+ addr = fbi->fb_start_dma + (pixel_offset * (var->bits_per_pixel >> 3));
+ writel(addr, fbi->reg_base + LCD_CFG_GRA_START_ADDR0);
+}
+
+static void set_dumb_panel_control(struct fb_info *info)
+{
+ struct pxa168fb_info *fbi = info->par;
+ struct pxa168fb_mach_info *mi = fbi->dev->platform_data;
+ u32 x;
+
+ /*
+ * Preserve enable flag.
+ */
+ x = readl(fbi->reg_base + LCD_SPU_DUMB_CTRL) & 0x00000001;
+
+ x |= (fbi->is_blanked ? 0x7 : mi->dumb_mode) << 28;
+ x |= mi->gpio_output_data << 20;
+ x |= mi->gpio_output_mask << 12;
+ x |= mi->panel_rgb_reverse_lanes ? 0x00000080 : 0;
+ x |= mi->invert_composite_blank ? 0x00000040 : 0;
+ x |= (info->var.sync & FB_SYNC_COMP_HIGH_ACT) ? 0x00000020 : 0;
+ x |= mi->invert_pix_val_ena ? 0x00000010 : 0;
+ x |= (info->var.sync & FB_SYNC_VERT_HIGH_ACT) ? 0 : 0x00000008;
+ x |= (info->var.sync & FB_SYNC_HOR_HIGH_ACT) ? 0 : 0x00000004;
+ x |= mi->invert_pixclock ? 0x00000002 : 0;
+
+ writel(x, fbi->reg_base + LCD_SPU_DUMB_CTRL);
+}
+
+static void set_dumb_screen_dimensions(struct fb_info *info)
+{
+ struct pxa168fb_info *fbi = info->par;
+ struct fb_var_screeninfo *v = &info->var;
+ int x;
+ int y;
+
+ x = v->xres + v->right_margin + v->hsync_len + v->left_margin;
+ y = v->yres + v->lower_margin + v->vsync_len + v->upper_margin;
+
+ writel((y << 16) | x, fbi->reg_base + LCD_SPUT_V_H_TOTAL);
+}
+
+static int pxa168fb_set_par(struct fb_info *info)
+{
+ struct pxa168fb_info *fbi = info->par;
+ struct fb_var_screeninfo *var = &info->var;
+ struct fb_videomode mode;
+ u32 x;
+ struct pxa168fb_mach_info *mi;
+
+ mi = fbi->dev->platform_data;
+
+ /*
+ * Set additional mode info.
+ */
+ if (fbi->pix_fmt == PIX_FMT_PSEUDOCOLOR)
+ info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
+ else
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+ info->fix.line_length = var->xres_virtual * var->bits_per_pixel / 8;
+ info->fix.ypanstep = var->yres;
+
+ /*
+ * Disable panel output while we setup the display.
+ */
+ x = readl(fbi->reg_base + LCD_SPU_DUMB_CTRL);
+ writel(x & ~1, fbi->reg_base + LCD_SPU_DUMB_CTRL);
+
+ /*
+ * Configure global panel parameters.
+ */
+ writel((var->yres << 16) | var->xres,
+ fbi->reg_base + LCD_SPU_V_H_ACTIVE);
+
+ /*
+ * convet var to video mode
+ */
+ fb_var_to_videomode(&mode, &info->var);
+
+ /* Calculate clock divisor. */
+ set_clock_divider(fbi, &mode);
+
+ /* Configure dma ctrl regs. */
+ set_dma_control0(fbi);
+ set_dma_control1(fbi, info->var.sync);
+
+ /*
+ * Configure graphics DMA parameters.
+ */
+ x = readl(fbi->reg_base + LCD_CFG_GRA_PITCH);
+ x = (x & ~0xFFFF) | ((var->xres_virtual * var->bits_per_pixel) >> 3);
+ writel(x, fbi->reg_base + LCD_CFG_GRA_PITCH);
+ writel((var->yres << 16) | var->xres,
+ fbi->reg_base + LCD_SPU_GRA_HPXL_VLN);
+ writel((var->yres << 16) | var->xres,
+ fbi->reg_base + LCD_SPU_GZM_HPXL_VLN);
+
+ /*
+ * Configure dumb panel ctrl regs & timings.
+ */
+ set_dumb_panel_control(info);
+ set_dumb_screen_dimensions(info);
+
+ writel((var->left_margin << 16) | var->right_margin,
+ fbi->reg_base + LCD_SPU_H_PORCH);
+ writel((var->upper_margin << 16) | var->lower_margin,
+ fbi->reg_base + LCD_SPU_V_PORCH);
+
+ /*
+ * Re-enable panel output.
+ */
+ x = readl(fbi->reg_base + LCD_SPU_DUMB_CTRL);
+ writel(x | 1, fbi->reg_base + LCD_SPU_DUMB_CTRL);
+
+ return 0;
+}
+
+static unsigned int chan_to_field(unsigned int chan, struct fb_bitfield *bf)
+{
+ return ((chan & 0xffff) >> (16 - bf->length)) << bf->offset;
+}
+
+static u32 to_rgb(u16 red, u16 green, u16 blue)
+{
+ red >>= 8;
+ green >>= 8;
+ blue >>= 8;
+
+ return (red << 16) | (green << 8) | blue;
+}
+
+static int
+pxa168fb_setcolreg(unsigned int regno, unsigned int red, unsigned int green,
+ unsigned int blue, unsigned int trans, struct fb_info *info)
+{
+ struct pxa168fb_info *fbi = info->par;
+ u32 val;
+
+ if (info->var.grayscale)
+ red = green = blue = (19595 * red + 38470 * green +
+ 7471 * blue) >> 16;
+
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR && regno < 16) {
+ val = chan_to_field(red, &info->var.red);
+ val |= chan_to_field(green, &info->var.green);
+ val |= chan_to_field(blue , &info->var.blue);
+ fbi->pseudo_palette[regno] = val;
+ }
+
+ if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR && regno < 256) {
+ val = to_rgb(red, green, blue);
+ writel(val, fbi->reg_base + LCD_SPU_SRAM_WRDAT);
+ writel(0x8300 | regno, fbi->reg_base + LCD_SPU_SRAM_CTRL);
+ }
+
+ return 0;
+}
+
+static int pxa168fb_blank(int blank, struct fb_info *info)
+{
+ struct pxa168fb_info *fbi = info->par;
+
+ fbi->is_blanked = (blank == FB_BLANK_UNBLANK) ? 0 : 1;
+ set_dumb_panel_control(info);
+
+ return 0;
+}
+
+static int pxa168fb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ set_graphics_start(info, var->xoffset, var->yoffset);
+
+ return 0;
+}
+
+static irqreturn_t pxa168fb_handle_irq(int irq, void *dev_id)
+{
+ struct pxa168fb_info *fbi = dev_id;
+ u32 isr = readl(fbi->reg_base + SPU_IRQ_ISR);
+
+ if ((isr & GRA_FRAME_IRQ0_ENA_MASK)) {
+
+ writel(isr & (~GRA_FRAME_IRQ0_ENA_MASK),
+ fbi->reg_base + SPU_IRQ_ISR);
+
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+static struct fb_ops pxa168fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = pxa168fb_check_var,
+ .fb_set_par = pxa168fb_set_par,
+ .fb_setcolreg = pxa168fb_setcolreg,
+ .fb_blank = pxa168fb_blank,
+ .fb_pan_display = pxa168fb_pan_display,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+};
+
+static int __init pxa168fb_init_mode(struct fb_info *info,
+ struct pxa168fb_mach_info *mi)
+{
+ struct pxa168fb_info *fbi = info->par;
+ struct fb_var_screeninfo *var = &info->var;
+ int ret = 0;
+ u32 total_w, total_h, refresh;
+ u64 div_result;
+ const struct fb_videomode *m;
+
+ /*
+ * Set default value
+ */
+ refresh = DEFAULT_REFRESH;
+
+ /* try to find best video mode. */
+ m = fb_find_best_mode(&info->var, &info->modelist);
+ if (m)
+ fb_videomode_to_var(&info->var, m);
+
+ /* Init settings. */
+ var->xres_virtual = var->xres;
+ var->yres_virtual = info->fix.smem_len /
+ (var->xres_virtual * (var->bits_per_pixel >> 3));
+ dev_dbg(fbi->dev, "pxa168fb: find best mode: res = %dx%d\n",
+ var->xres, var->yres);
+
+ /* correct pixclock. */
+ total_w = var->xres + var->left_margin + var->right_margin +
+ var->hsync_len;
+ total_h = var->yres + var->upper_margin + var->lower_margin +
+ var->vsync_len;
+
+ div_result = 1000000000000ll;
+ do_div(div_result, total_w * total_h * refresh);
+ var->pixclock = (u32)div_result;
+
+ return ret;
+}
+
+static int __init pxa168fb_probe(struct platform_device *pdev)
+{
+ struct pxa168fb_mach_info *mi;
+ struct fb_info *info = 0;
+ struct pxa168fb_info *fbi = 0;
+ struct resource *res;
+ struct clk *clk;
+ int irq, ret;
+
+ mi = pdev->dev.platform_data;
+ if (mi == NULL) {
+ dev_err(&pdev->dev, "no platform data defined\n");
+ return -EINVAL;
+ }
+
+ clk = clk_get(&pdev->dev, "LCDCLK");
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "unable to get LCDCLK");
+ return PTR_ERR(clk);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "no IO memory defined\n");
+ return -ENOENT;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no IRQ defined\n");
+ return -ENOENT;
+ }
+
+ info = framebuffer_alloc(sizeof(struct pxa168fb_info), &pdev->dev);
+ if (info == NULL) {
+ clk_put(clk);
+ return -ENOMEM;
+ }
+
+ /* Initialize private data */
+ fbi = info->par;
+ fbi->info = info;
+ fbi->clk = clk;
+ fbi->dev = info->dev = &pdev->dev;
+ fbi->panel_rbswap = mi->panel_rbswap;
+ fbi->is_blanked = 0;
+ fbi->active = mi->active;
+
+ /*
+ * Initialise static fb parameters.
+ */
+ info->flags = FBINFO_DEFAULT | FBINFO_PARTIAL_PAN_OK |
+ FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN;
+ info->node = -1;
+ strlcpy(info->fix.id, mi->id, 16);
+ info->fix.type = FB_TYPE_PACKED_PIXELS;
+ info->fix.type_aux = 0;
+ info->fix.xpanstep = 0;
+ info->fix.ypanstep = 0;
+ info->fix.ywrapstep = 0;
+ info->fix.mmio_start = res->start;
+ info->fix.mmio_len = res->end - res->start + 1;
+ info->fix.accel = FB_ACCEL_NONE;
+ info->fbops = &pxa168fb_ops;
+ info->pseudo_palette = fbi->pseudo_palette;
+
+ /*
+ * Map LCD controller registers.
+ */
+ fbi->reg_base = ioremap_nocache(res->start, res->end - res->start);
+ if (fbi->reg_base == NULL) {
+ ret = -ENOMEM;
+ goto failed;
+ }
+
+ /*
+ * Allocate framebuffer memory.
+ */
+ info->fix.smem_len = PAGE_ALIGN(DEFAULT_FB_SIZE);
+
+ info->screen_base = dma_alloc_writecombine(fbi->dev, info->fix.smem_len,
+ &fbi->fb_start_dma, GFP_KERNEL);
+ if (info->screen_base == NULL) {
+ ret = -ENOMEM;
+ goto failed;
+ }
+
+ info->fix.smem_start = (unsigned long)fbi->fb_start_dma;
+
+ /*
+ * Set video mode according to platform data.
+ */
+ set_mode(fbi, &info->var, mi->modes, mi->pix_fmt, 1);
+
+ fb_videomode_to_modelist(mi->modes, mi->num_modes, &info->modelist);
+
+ /*
+ * init video mode data.
+ */
+ pxa168fb_init_mode(info, mi);
+
+ ret = pxa168fb_check_var(&info->var, info);
+ if (ret)
+ goto failed_free_fbmem;
+
+ /*
+ * Fill in sane defaults.
+ */
+ ret = pxa168fb_check_var(&info->var, info);
+ if (ret)
+ goto failed;
+
+ /*
+ * enable controller clock
+ */
+ clk_enable(fbi->clk);
+
+ pxa168fb_set_par(info);
+
+ /*
+ * Configure default register values.
+ */
+ writel(0, fbi->reg_base + LCD_SPU_BLANKCOLOR);
+ writel(mi->io_pin_allocation_mode, fbi->reg_base + SPU_IOPAD_CONTROL);
+ writel(0, fbi->reg_base + LCD_CFG_GRA_START_ADDR1);
+ writel(0, fbi->reg_base + LCD_SPU_GRA_OVSA_HPXL_VLN);
+ writel(0, fbi->reg_base + LCD_SPU_SRAM_PARA0);
+ writel(CFG_CSB_256x32(0x1)|CFG_CSB_256x24(0x1)|CFG_CSB_256x8(0x1),
+ fbi->reg_base + LCD_SPU_SRAM_PARA1);
+
+ /*
+ * Allocate color map.
+ */
+ if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
+ ret = -ENOMEM;
+ goto failed_free_clk;
+ }
+
+ /*
+ * Register irq handler.
+ */
+ ret = request_irq(irq, pxa168fb_handle_irq, IRQF_SHARED,
+ info->fix.id, fbi);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "unable to request IRQ\n");
+ ret = -ENXIO;
+ goto failed_free_cmap;
+ }
+
+ /*
+ * Enable GFX interrupt
+ */
+ writel(GRA_FRAME_IRQ0_ENA(0x1), fbi->reg_base + SPU_IRQ_ENA);
+
+ /*
+ * Register framebuffer.
+ */
+ ret = register_framebuffer(info);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register pxa168-fb: %d\n", ret);
+ ret = -ENXIO;
+ goto failed_free_irq;
+ }
+
+ platform_set_drvdata(pdev, fbi);
+ return 0;
+
+failed_free_irq:
+ free_irq(irq, fbi);
+failed_free_cmap:
+ fb_dealloc_cmap(&info->cmap);
+failed_free_clk:
+ clk_disable(fbi->clk);
+failed_free_fbmem:
+ dma_free_coherent(fbi->dev, info->fix.smem_len,
+ info->screen_base, fbi->fb_start_dma);
+failed:
+ kfree(info);
+ clk_put(clk);
+
+ dev_err(&pdev->dev, "frame buffer device init failed with %d\n", ret);
+ return ret;
+}
+
+static struct platform_driver pxa168fb_driver = {
+ .driver = {
+ .name = "pxa168-fb",
+ .owner = THIS_MODULE,
+ },
+ .probe = pxa168fb_probe,
+};
+
+static int __devinit pxa168fb_init(void)
+{
+ return platform_driver_register(&pxa168fb_driver);
+}
+module_init(pxa168fb_init);
+
+MODULE_AUTHOR("Lennert Buytenhek <buytenh@marvell.com> "
+ "Green Wan <gwan@marvell.com>");
+MODULE_DESCRIPTION("Framebuffer driver for PXA168/910");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/pxa168fb.h b/drivers/video/pxa168fb.h
new file mode 100644
index 00000000000..eee09279c52
--- /dev/null
+++ b/drivers/video/pxa168fb.h
@@ -0,0 +1,558 @@
+#ifndef __PXA168FB_H__
+#define __PXA168FB_H__
+
+/* ------------< LCD register >------------ */
+/* Video Frame 0&1 start address registers */
+#define LCD_SPU_DMA_START_ADDR_Y0 0x00C0
+#define LCD_SPU_DMA_START_ADDR_U0 0x00C4
+#define LCD_SPU_DMA_START_ADDR_V0 0x00C8
+#define LCD_CFG_DMA_START_ADDR_0 0x00CC /* Cmd address */
+#define LCD_SPU_DMA_START_ADDR_Y1 0x00D0
+#define LCD_SPU_DMA_START_ADDR_U1 0x00D4
+#define LCD_SPU_DMA_START_ADDR_V1 0x00D8
+#define LCD_CFG_DMA_START_ADDR_1 0x00DC /* Cmd address */
+
+/* YC & UV Pitch */
+#define LCD_SPU_DMA_PITCH_YC 0x00E0
+#define SPU_DMA_PITCH_C(c) ((c) << 16)
+#define SPU_DMA_PITCH_Y(y) (y)
+#define LCD_SPU_DMA_PITCH_UV 0x00E4
+#define SPU_DMA_PITCH_V(v) ((v) << 16)
+#define SPU_DMA_PITCH_U(u) (u)
+
+/* Video Starting Point on Screen Register */
+#define LCD_SPUT_DMA_OVSA_HPXL_VLN 0x00E8
+#define CFG_DMA_OVSA_VLN(y) ((y) << 16) /* 0~0xfff */
+#define CFG_DMA_OVSA_HPXL(x) (x) /* 0~0xfff */
+
+/* Video Size Register */
+#define LCD_SPU_DMA_HPXL_VLN 0x00EC
+#define CFG_DMA_VLN(y) ((y) << 16)
+#define CFG_DMA_HPXL(x) (x)
+
+/* Video Size After zooming Register */
+#define LCD_SPU_DZM_HPXL_VLN 0x00F0
+#define CFG_DZM_VLN(y) ((y) << 16)
+#define CFG_DZM_HPXL(x) (x)
+
+/* Graphic Frame 0&1 Starting Address Register */
+#define LCD_CFG_GRA_START_ADDR0 0x00F4
+#define LCD_CFG_GRA_START_ADDR1 0x00F8
+
+/* Graphic Frame Pitch */
+#define LCD_CFG_GRA_PITCH 0x00FC
+
+/* Graphic Starting Point on Screen Register */
+#define LCD_SPU_GRA_OVSA_HPXL_VLN 0x0100
+#define CFG_GRA_OVSA_VLN(y) ((y) << 16)
+#define CFG_GRA_OVSA_HPXL(x) (x)
+
+/* Graphic Size Register */
+#define LCD_SPU_GRA_HPXL_VLN 0x0104
+#define CFG_GRA_VLN(y) ((y) << 16)
+#define CFG_GRA_HPXL(x) (x)
+
+/* Graphic Size after Zooming Register */
+#define LCD_SPU_GZM_HPXL_VLN 0x0108
+#define CFG_GZM_VLN(y) ((y) << 16)
+#define CFG_GZM_HPXL(x) (x)
+
+/* HW Cursor Starting Point on Screen Register */
+#define LCD_SPU_HWC_OVSA_HPXL_VLN 0x010C
+#define CFG_HWC_OVSA_VLN(y) ((y) << 16)
+#define CFG_HWC_OVSA_HPXL(x) (x)
+
+/* HW Cursor Size */
+#define LCD_SPU_HWC_HPXL_VLN 0x0110
+#define CFG_HWC_VLN(y) ((y) << 16)
+#define CFG_HWC_HPXL(x) (x)
+
+/* Total Screen Size Register */
+#define LCD_SPUT_V_H_TOTAL 0x0114
+#define CFG_V_TOTAL(y) ((y) << 16)
+#define CFG_H_TOTAL(x) (x)
+
+/* Total Screen Active Size Register */
+#define LCD_SPU_V_H_ACTIVE 0x0118
+#define CFG_V_ACTIVE(y) ((y) << 16)
+#define CFG_H_ACTIVE(x) (x)
+
+/* Screen H&V Porch Register */
+#define LCD_SPU_H_PORCH 0x011C
+#define CFG_H_BACK_PORCH(b) ((b) << 16)
+#define CFG_H_FRONT_PORCH(f) (f)
+#define LCD_SPU_V_PORCH 0x0120
+#define CFG_V_BACK_PORCH(b) ((b) << 16)
+#define CFG_V_FRONT_PORCH(f) (f)
+
+/* Screen Blank Color Register */
+#define LCD_SPU_BLANKCOLOR 0x0124
+#define CFG_BLANKCOLOR_MASK 0x00FFFFFF
+#define CFG_BLANKCOLOR_R_MASK 0x000000FF
+#define CFG_BLANKCOLOR_G_MASK 0x0000FF00
+#define CFG_BLANKCOLOR_B_MASK 0x00FF0000
+
+/* HW Cursor Color 1&2 Register */
+#define LCD_SPU_ALPHA_COLOR1 0x0128
+#define CFG_HWC_COLOR1 0x00FFFFFF
+#define CFG_HWC_COLOR1_R(red) ((red) << 16)
+#define CFG_HWC_COLOR1_G(green) ((green) << 8)
+#define CFG_HWC_COLOR1_B(blue) (blue)
+#define CFG_HWC_COLOR1_R_MASK 0x000000FF
+#define CFG_HWC_COLOR1_G_MASK 0x0000FF00
+#define CFG_HWC_COLOR1_B_MASK 0x00FF0000
+#define LCD_SPU_ALPHA_COLOR2 0x012C
+#define CFG_HWC_COLOR2 0x00FFFFFF
+#define CFG_HWC_COLOR2_R_MASK 0x000000FF
+#define CFG_HWC_COLOR2_G_MASK 0x0000FF00
+#define CFG_HWC_COLOR2_B_MASK 0x00FF0000
+
+/* Video YUV Color Key Control */
+#define LCD_SPU_COLORKEY_Y 0x0130
+#define CFG_CKEY_Y2(y2) ((y2) << 24)
+#define CFG_CKEY_Y2_MASK 0xFF000000
+#define CFG_CKEY_Y1(y1) ((y1) << 16)
+#define CFG_CKEY_Y1_MASK 0x00FF0000
+#define CFG_CKEY_Y(y) ((y) << 8)
+#define CFG_CKEY_Y_MASK 0x0000FF00
+#define CFG_ALPHA_Y(y) (y)
+#define CFG_ALPHA_Y_MASK 0x000000FF
+#define LCD_SPU_COLORKEY_U 0x0134
+#define CFG_CKEY_U2(u2) ((u2) << 24)
+#define CFG_CKEY_U2_MASK 0xFF000000
+#define CFG_CKEY_U1(u1) ((u1) << 16)
+#define CFG_CKEY_U1_MASK 0x00FF0000
+#define CFG_CKEY_U(u) ((u) << 8)
+#define CFG_CKEY_U_MASK 0x0000FF00
+#define CFG_ALPHA_U(u) (u)
+#define CFG_ALPHA_U_MASK 0x000000FF
+#define LCD_SPU_COLORKEY_V 0x0138
+#define CFG_CKEY_V2(v2) ((v2) << 24)
+#define CFG_CKEY_V2_MASK 0xFF000000
+#define CFG_CKEY_V1(v1) ((v1) << 16)
+#define CFG_CKEY_V1_MASK 0x00FF0000
+#define CFG_CKEY_V(v) ((v) << 8)
+#define CFG_CKEY_V_MASK 0x0000FF00
+#define CFG_ALPHA_V(v) (v)
+#define CFG_ALPHA_V_MASK 0x000000FF
+
+/* SPI Read Data Register */
+#define LCD_SPU_SPI_RXDATA 0x0140
+
+/* Smart Panel Read Data Register */
+#define LCD_SPU_ISA_RSDATA 0x0144
+#define ISA_RXDATA_16BIT_1_DATA_MASK 0x000000FF
+#define ISA_RXDATA_16BIT_2_DATA_MASK 0x0000FF00
+#define ISA_RXDATA_16BIT_3_DATA_MASK 0x00FF0000
+#define ISA_RXDATA_16BIT_4_DATA_MASK 0xFF000000
+#define ISA_RXDATA_32BIT_1_DATA_MASK 0x00FFFFFF
+
+/* HWC SRAM Read Data Register */
+#define LCD_SPU_HWC_RDDAT 0x0158
+
+/* Gamma Table SRAM Read Data Register */
+#define LCD_SPU_GAMMA_RDDAT 0x015c
+#define CFG_GAMMA_RDDAT_MASK 0x000000FF
+
+/* Palette Table SRAM Read Data Register */
+#define LCD_SPU_PALETTE_RDDAT 0x0160
+#define CFG_PALETTE_RDDAT_MASK 0x00FFFFFF
+
+/* I/O Pads Input Read Only Register */
+#define LCD_SPU_IOPAD_IN 0x0178
+#define CFG_IOPAD_IN_MASK 0x0FFFFFFF
+
+/* Reserved Read Only Registers */
+#define LCD_CFG_RDREG5F 0x017C
+#define IRE_FRAME_CNT_MASK 0x000000C0
+#define IPE_FRAME_CNT_MASK 0x00000030
+#define GRA_FRAME_CNT_MASK 0x0000000C /* Graphic */
+#define DMA_FRAME_CNT_MASK 0x00000003 /* Video */
+
+/* SPI Control Register. */
+#define LCD_SPU_SPI_CTRL 0x0180
+#define CFG_SCLKCNT(div) ((div) << 24) /* 0xFF~0x2 */
+#define CFG_SCLKCNT_MASK 0xFF000000
+#define CFG_RXBITS(rx) ((rx) << 16) /* 0x1F~0x1 */
+#define CFG_RXBITS_MASK 0x00FF0000
+#define CFG_TXBITS(tx) ((tx) << 8) /* 0x1F~0x1 */
+#define CFG_TXBITS_MASK 0x0000FF00
+#define CFG_CLKINV(clk) ((clk) << 7)
+#define CFG_CLKINV_MASK 0x00000080
+#define CFG_KEEPXFER(transfer) ((transfer) << 6)
+#define CFG_KEEPXFER_MASK 0x00000040
+#define CFG_RXBITSTO0(rx) ((rx) << 5)
+#define CFG_RXBITSTO0_MASK 0x00000020
+#define CFG_TXBITSTO0(tx) ((tx) << 4)
+#define CFG_TXBITSTO0_MASK 0x00000010
+#define CFG_SPI_ENA(spi) ((spi) << 3)
+#define CFG_SPI_ENA_MASK 0x00000008
+#define CFG_SPI_SEL(spi) ((spi) << 2)
+#define CFG_SPI_SEL_MASK 0x00000004
+#define CFG_SPI_3W4WB(wire) ((wire) << 1)
+#define CFG_SPI_3W4WB_MASK 0x00000002
+#define CFG_SPI_START(start) (start)
+#define CFG_SPI_START_MASK 0x00000001
+
+/* SPI Tx Data Register */
+#define LCD_SPU_SPI_TXDATA 0x0184
+
+/*
+ 1. Smart Pannel 8-bit Bus Control Register.
+ 2. AHB Slave Path Data Port Register
+*/
+#define LCD_SPU_SMPN_CTRL 0x0188
+
+/* DMA Control 0 Register */
+#define LCD_SPU_DMA_CTRL0 0x0190
+#define CFG_NOBLENDING(nb) ((nb) << 31)
+#define CFG_NOBLENDING_MASK 0x80000000
+#define CFG_GAMMA_ENA(gn) ((gn) << 30)
+#define CFG_GAMMA_ENA_MASK 0x40000000
+#define CFG_CBSH_ENA(cn) ((cn) << 29)
+#define CFG_CBSH_ENA_MASK 0x20000000
+#define CFG_PALETTE_ENA(pn) ((pn) << 28)
+#define CFG_PALETTE_ENA_MASK 0x10000000
+#define CFG_ARBFAST_ENA(an) ((an) << 27)
+#define CFG_ARBFAST_ENA_MASK 0x08000000
+#define CFG_HWC_1BITMOD(mode) ((mode) << 26)
+#define CFG_HWC_1BITMOD_MASK 0x04000000
+#define CFG_HWC_1BITENA(mn) ((mn) << 25)
+#define CFG_HWC_1BITENA_MASK 0x02000000
+#define CFG_HWC_ENA(cn) ((cn) << 24)
+#define CFG_HWC_ENA_MASK 0x01000000
+#define CFG_DMAFORMAT(dmaformat) ((dmaformat) << 20)
+#define CFG_DMAFORMAT_MASK 0x00F00000
+#define CFG_GRAFORMAT(graformat) ((graformat) << 16)
+#define CFG_GRAFORMAT_MASK 0x000F0000
+/* for graphic part */
+#define CFG_GRA_FTOGGLE(toggle) ((toggle) << 15)
+#define CFG_GRA_FTOGGLE_MASK 0x00008000
+#define CFG_GRA_HSMOOTH(smooth) ((smooth) << 14)
+#define CFG_GRA_HSMOOTH_MASK 0x00004000
+#define CFG_GRA_TSTMODE(test) ((test) << 13)
+#define CFG_GRA_TSTMODE_MASK 0x00002000
+#define CFG_GRA_SWAPRB(swap) ((swap) << 12)
+#define CFG_GRA_SWAPRB_MASK 0x00001000
+#define CFG_GRA_SWAPUV(swap) ((swap) << 11)
+#define CFG_GRA_SWAPUV_MASK 0x00000800
+#define CFG_GRA_SWAPYU(swap) ((swap) << 10)
+#define CFG_GRA_SWAPYU_MASK 0x00000400
+#define CFG_YUV2RGB_GRA(cvrt) ((cvrt) << 9)
+#define CFG_YUV2RGB_GRA_MASK 0x00000200
+#define CFG_GRA_ENA(gra) ((gra) << 8)
+#define CFG_GRA_ENA_MASK 0x00000100
+/* for video part */
+#define CFG_DMA_FTOGGLE(toggle) ((toggle) << 7)
+#define CFG_DMA_FTOGGLE_MASK 0x00000080
+#define CFG_DMA_HSMOOTH(smooth) ((smooth) << 6)
+#define CFG_DMA_HSMOOTH_MASK 0x00000040
+#define CFG_DMA_TSTMODE(test) ((test) << 5)
+#define CFG_DMA_TSTMODE_MASK 0x00000020
+#define CFG_DMA_SWAPRB(swap) ((swap) << 4)
+#define CFG_DMA_SWAPRB_MASK 0x00000010
+#define CFG_DMA_SWAPUV(swap) ((swap) << 3)
+#define CFG_DMA_SWAPUV_MASK 0x00000008
+#define CFG_DMA_SWAPYU(swap) ((swap) << 2)
+#define CFG_DMA_SWAPYU_MASK 0x00000004
+#define CFG_DMA_SWAP_MASK 0x0000001C
+#define CFG_YUV2RGB_DMA(cvrt) ((cvrt) << 1)
+#define CFG_YUV2RGB_DMA_MASK 0x00000002
+#define CFG_DMA_ENA(video) (video)
+#define CFG_DMA_ENA_MASK 0x00000001
+
+/* DMA Control 1 Register */
+#define LCD_SPU_DMA_CTRL1 0x0194
+#define CFG_FRAME_TRIG(trig) ((trig) << 31)
+#define CFG_FRAME_TRIG_MASK 0x80000000
+#define CFG_VSYNC_TRIG(trig) ((trig) << 28)
+#define CFG_VSYNC_TRIG_MASK 0x70000000
+#define CFG_VSYNC_INV(inv) ((inv) << 27)
+#define CFG_VSYNC_INV_MASK 0x08000000
+#define CFG_COLOR_KEY_MODE(cmode) ((cmode) << 24)
+#define CFG_COLOR_KEY_MASK 0x07000000
+#define CFG_CARRY(carry) ((carry) << 23)
+#define CFG_CARRY_MASK 0x00800000
+#define CFG_LNBUF_ENA(lnbuf) ((lnbuf) << 22)
+#define CFG_LNBUF_ENA_MASK 0x00400000
+#define CFG_GATED_ENA(gated) ((gated) << 21)
+#define CFG_GATED_ENA_MASK 0x00200000
+#define CFG_PWRDN_ENA(power) ((power) << 20)
+#define CFG_PWRDN_ENA_MASK 0x00100000
+#define CFG_DSCALE(dscale) ((dscale) << 18)
+#define CFG_DSCALE_MASK 0x000C0000
+#define CFG_ALPHA_MODE(amode) ((amode) << 16)
+#define CFG_ALPHA_MODE_MASK 0x00030000
+#define CFG_ALPHA(alpha) ((alpha) << 8)
+#define CFG_ALPHA_MASK 0x0000FF00
+#define CFG_PXLCMD(pxlcmd) (pxlcmd)
+#define CFG_PXLCMD_MASK 0x000000FF
+
+/* SRAM Control Register */
+#define LCD_SPU_SRAM_CTRL 0x0198
+#define CFG_SRAM_INIT_WR_RD(mode) ((mode) << 14)
+#define CFG_SRAM_INIT_WR_RD_MASK 0x0000C000
+#define CFG_SRAM_ADDR_LCDID(id) ((id) << 8)
+#define CFG_SRAM_ADDR_LCDID_MASK 0x00000F00
+#define CFG_SRAM_ADDR(addr) (addr)
+#define CFG_SRAM_ADDR_MASK 0x000000FF
+
+/* SRAM Write Data Register */
+#define LCD_SPU_SRAM_WRDAT 0x019C
+
+/* SRAM RTC/WTC Control Register */
+#define LCD_SPU_SRAM_PARA0 0x01A0
+
+/* SRAM Power Down Control Register */
+#define LCD_SPU_SRAM_PARA1 0x01A4
+#define CFG_CSB_256x32(hwc) ((hwc) << 15) /* HWC */
+#define CFG_CSB_256x32_MASK 0x00008000
+#define CFG_CSB_256x24(palette) ((palette) << 14) /* Palette */
+#define CFG_CSB_256x24_MASK 0x00004000
+#define CFG_CSB_256x8(gamma) ((gamma) << 13) /* Gamma */
+#define CFG_CSB_256x8_MASK 0x00002000
+#define CFG_PDWN256x32(pdwn) ((pdwn) << 7) /* HWC */
+#define CFG_PDWN256x32_MASK 0x00000080
+#define CFG_PDWN256x24(pdwn) ((pdwn) << 6) /* Palette */
+#define CFG_PDWN256x24_MASK 0x00000040
+#define CFG_PDWN256x8(pdwn) ((pdwn) << 5) /* Gamma */
+#define CFG_PDWN256x8_MASK 0x00000020
+#define CFG_PDWN32x32(pdwn) ((pdwn) << 3)
+#define CFG_PDWN32x32_MASK 0x00000008
+#define CFG_PDWN16x66(pdwn) ((pdwn) << 2)
+#define CFG_PDWN16x66_MASK 0x00000004
+#define CFG_PDWN32x66(pdwn) ((pdwn) << 1)
+#define CFG_PDWN32x66_MASK 0x00000002
+#define CFG_PDWN64x66(pdwn) (pdwn)
+#define CFG_PDWN64x66_MASK 0x00000001
+
+/* Smart or Dumb Panel Clock Divider */
+#define LCD_CFG_SCLK_DIV 0x01A8
+#define SCLK_SOURCE_SELECT(src) ((src) << 31)
+#define SCLK_SOURCE_SELECT_MASK 0x80000000
+#define CLK_FRACDIV(frac) ((frac) << 16)
+#define CLK_FRACDIV_MASK 0x0FFF0000
+#define CLK_INT_DIV(div) (div)
+#define CLK_INT_DIV_MASK 0x0000FFFF
+
+/* Video Contrast Register */
+#define LCD_SPU_CONTRAST 0x01AC
+#define CFG_BRIGHTNESS(bright) ((bright) << 16)
+#define CFG_BRIGHTNESS_MASK 0xFFFF0000
+#define CFG_CONTRAST(contrast) (contrast)
+#define CFG_CONTRAST_MASK 0x0000FFFF
+
+/* Video Saturation Register */
+#define LCD_SPU_SATURATION 0x01B0
+#define CFG_C_MULTS(mult) ((mult) << 16)
+#define CFG_C_MULTS_MASK 0xFFFF0000
+#define CFG_SATURATION(sat) (sat)
+#define CFG_SATURATION_MASK 0x0000FFFF
+
+/* Video Hue Adjust Register */
+#define LCD_SPU_CBSH_HUE 0x01B4
+#define CFG_SIN0(sin0) ((sin0) << 16)
+#define CFG_SIN0_MASK 0xFFFF0000
+#define CFG_COS0(con0) (con0)
+#define CFG_COS0_MASK 0x0000FFFF
+
+/* Dump LCD Panel Control Register */
+#define LCD_SPU_DUMB_CTRL 0x01B8
+#define CFG_DUMBMODE(mode) ((mode) << 28)
+#define CFG_DUMBMODE_MASK 0xF0000000
+#define CFG_LCDGPIO_O(data) ((data) << 20)
+#define CFG_LCDGPIO_O_MASK 0x0FF00000
+#define CFG_LCDGPIO_ENA(gpio) ((gpio) << 12)
+#define CFG_LCDGPIO_ENA_MASK 0x000FF000
+#define CFG_BIAS_OUT(bias) ((bias) << 8)
+#define CFG_BIAS_OUT_MASK 0x00000100
+#define CFG_REVERSE_RGB(rRGB) ((rRGB) << 7)
+#define CFG_REVERSE_RGB_MASK 0x00000080
+#define CFG_INV_COMPBLANK(blank) ((blank) << 6)
+#define CFG_INV_COMPBLANK_MASK 0x00000040
+#define CFG_INV_COMPSYNC(sync) ((sync) << 5)
+#define CFG_INV_COMPSYNC_MASK 0x00000020
+#define CFG_INV_HENA(hena) ((hena) << 4)
+#define CFG_INV_HENA_MASK 0x00000010
+#define CFG_INV_VSYNC(vsync) ((vsync) << 3)
+#define CFG_INV_VSYNC_MASK 0x00000008
+#define CFG_INV_HSYNC(hsync) ((hsync) << 2)
+#define CFG_INV_HSYNC_MASK 0x00000004
+#define CFG_INV_PCLK(pclk) ((pclk) << 1)
+#define CFG_INV_PCLK_MASK 0x00000002
+#define CFG_DUMB_ENA(dumb) (dumb)
+#define CFG_DUMB_ENA_MASK 0x00000001
+
+/* LCD I/O Pads Control Register */
+#define SPU_IOPAD_CONTROL 0x01BC
+#define CFG_GRA_VM_ENA(vm) ((vm) << 15) /* gfx */
+#define CFG_GRA_VM_ENA_MASK 0x00008000
+#define CFG_DMA_VM_ENA(vm) ((vm) << 13) /* video */
+#define CFG_DMA_VM_ENA_MASK 0x00002000
+#define CFG_CMD_VM_ENA(vm) ((vm) << 13)
+#define CFG_CMD_VM_ENA_MASK 0x00000800
+#define CFG_CSC(csc) ((csc) << 8) /* csc */
+#define CFG_CSC_MASK 0x00000300
+#define CFG_AXICTRL(axi) ((axi) << 4)
+#define CFG_AXICTRL_MASK 0x000000F0
+#define CFG_IOPADMODE(iopad) (iopad)
+#define CFG_IOPADMODE_MASK 0x0000000F
+
+/* LCD Interrupt Control Register */
+#define SPU_IRQ_ENA 0x01C0
+#define DMA_FRAME_IRQ0_ENA(irq) ((irq) << 31)
+#define DMA_FRAME_IRQ0_ENA_MASK 0x80000000
+#define DMA_FRAME_IRQ1_ENA(irq) ((irq) << 30)
+#define DMA_FRAME_IRQ1_ENA_MASK 0x40000000
+#define DMA_FF_UNDERFLOW_ENA(ff) ((ff) << 29)
+#define DMA_FF_UNDERFLOW_ENA_MASK 0x20000000
+#define GRA_FRAME_IRQ0_ENA(irq) ((irq) << 27)
+#define GRA_FRAME_IRQ0_ENA_MASK 0x08000000
+#define GRA_FRAME_IRQ1_ENA(irq) ((irq) << 26)
+#define GRA_FRAME_IRQ1_ENA_MASK 0x04000000
+#define GRA_FF_UNDERFLOW_ENA(ff) ((ff) << 25)
+#define GRA_FF_UNDERFLOW_ENA_MASK 0x02000000
+#define VSYNC_IRQ_ENA(vsync_irq) ((vsync_irq) << 23)
+#define VSYNC_IRQ_ENA_MASK 0x00800000
+#define DUMB_FRAMEDONE_ENA(fdone) ((fdone) << 22)
+#define DUMB_FRAMEDONE_ENA_MASK 0x00400000
+#define TWC_FRAMEDONE_ENA(fdone) ((fdone) << 21)
+#define TWC_FRAMEDONE_ENA_MASK 0x00200000
+#define HWC_FRAMEDONE_ENA(fdone) ((fdone) << 20)
+#define HWC_FRAMEDONE_ENA_MASK 0x00100000
+#define SLV_IRQ_ENA(irq) ((irq) << 19)
+#define SLV_IRQ_ENA_MASK 0x00080000
+#define SPI_IRQ_ENA(irq) ((irq) << 18)
+#define SPI_IRQ_ENA_MASK 0x00040000
+#define PWRDN_IRQ_ENA(irq) ((irq) << 17)
+#define PWRDN_IRQ_ENA_MASK 0x00020000
+#define ERR_IRQ_ENA(irq) ((irq) << 16)
+#define ERR_IRQ_ENA_MASK 0x00010000
+#define CLEAN_SPU_IRQ_ISR(irq) (irq)
+#define CLEAN_SPU_IRQ_ISR_MASK 0x0000FFFF
+
+/* LCD Interrupt Status Register */
+#define SPU_IRQ_ISR 0x01C4
+#define DMA_FRAME_IRQ0(irq) ((irq) << 31)
+#define DMA_FRAME_IRQ0_MASK 0x80000000
+#define DMA_FRAME_IRQ1(irq) ((irq) << 30)
+#define DMA_FRAME_IRQ1_MASK 0x40000000
+#define DMA_FF_UNDERFLOW(ff) ((ff) << 29)
+#define DMA_FF_UNDERFLOW_MASK 0x20000000
+#define GRA_FRAME_IRQ0(irq) ((irq) << 27)
+#define GRA_FRAME_IRQ0_MASK 0x08000000
+#define GRA_FRAME_IRQ1(irq) ((irq) << 26)
+#define GRA_FRAME_IRQ1_MASK 0x04000000
+#define GRA_FF_UNDERFLOW(ff) ((ff) << 25)
+#define GRA_FF_UNDERFLOW_MASK 0x02000000
+#define VSYNC_IRQ(vsync_irq) ((vsync_irq) << 23)
+#define VSYNC_IRQ_MASK 0x00800000
+#define DUMB_FRAMEDONE(fdone) ((fdone) << 22)
+#define DUMB_FRAMEDONE_MASK 0x00400000
+#define TWC_FRAMEDONE(fdone) ((fdone) << 21)
+#define TWC_FRAMEDONE_MASK 0x00200000
+#define HWC_FRAMEDONE(fdone) ((fdone) << 20)
+#define HWC_FRAMEDONE_MASK 0x00100000
+#define SLV_IRQ(irq) ((irq) << 19)
+#define SLV_IRQ_MASK 0x00080000
+#define SPI_IRQ(irq) ((irq) << 18)
+#define SPI_IRQ_MASK 0x00040000
+#define PWRDN_IRQ(irq) ((irq) << 17)
+#define PWRDN_IRQ_MASK 0x00020000
+#define ERR_IRQ(irq) ((irq) << 16)
+#define ERR_IRQ_MASK 0x00010000
+/* read-only */
+#define DMA_FRAME_IRQ0_LEVEL_MASK 0x00008000
+#define DMA_FRAME_IRQ1_LEVEL_MASK 0x00004000
+#define DMA_FRAME_CNT_ISR_MASK 0x00003000
+#define GRA_FRAME_IRQ0_LEVEL_MASK 0x00000800
+#define GRA_FRAME_IRQ1_LEVEL_MASK 0x00000400
+#define GRA_FRAME_CNT_ISR_MASK 0x00000300
+#define VSYNC_IRQ_LEVEL_MASK 0x00000080
+#define DUMB_FRAMEDONE_LEVEL_MASK 0x00000040
+#define TWC_FRAMEDONE_LEVEL_MASK 0x00000020
+#define HWC_FRAMEDONE_LEVEL_MASK 0x00000010
+#define SLV_FF_EMPTY_MASK 0x00000008
+#define DMA_FF_ALLEMPTY_MASK 0x00000004
+#define GRA_FF_ALLEMPTY_MASK 0x00000002
+#define PWRDN_IRQ_LEVEL_MASK 0x00000001
+
+
+/*
+ * defined Video Memory Color format for DMA control 0 register
+ * DMA0 bit[23:20]
+ */
+#define VMODE_RGB565 0x0
+#define VMODE_RGB1555 0x1
+#define VMODE_RGB888PACKED 0x2
+#define VMODE_RGB888UNPACKED 0x3
+#define VMODE_RGBA888 0x4
+#define VMODE_YUV422PACKED 0x5
+#define VMODE_YUV422PLANAR 0x6
+#define VMODE_YUV420PLANAR 0x7
+#define VMODE_SMPNCMD 0x8
+#define VMODE_PALETTE4BIT 0x9
+#define VMODE_PALETTE8BIT 0xa
+#define VMODE_RESERVED 0xb
+
+/*
+ * defined Graphic Memory Color format for DMA control 0 register
+ * DMA0 bit[19:16]
+ */
+#define GMODE_RGB565 0x0
+#define GMODE_RGB1555 0x1
+#define GMODE_RGB888PACKED 0x2
+#define GMODE_RGB888UNPACKED 0x3
+#define GMODE_RGBA888 0x4
+#define GMODE_YUV422PACKED 0x5
+#define GMODE_YUV422PLANAR 0x6
+#define GMODE_YUV420PLANAR 0x7
+#define GMODE_SMPNCMD 0x8
+#define GMODE_PALETTE4BIT 0x9
+#define GMODE_PALETTE8BIT 0xa
+#define GMODE_RESERVED 0xb
+
+/*
+ * define for DMA control 1 register
+ */
+#define DMA1_FRAME_TRIG 31 /* bit location */
+#define DMA1_VSYNC_MODE 28
+#define DMA1_VSYNC_INV 27
+#define DMA1_CKEY 24
+#define DMA1_CARRY 23
+#define DMA1_LNBUF_ENA 22
+#define DMA1_GATED_ENA 21
+#define DMA1_PWRDN_ENA 20
+#define DMA1_DSCALE 18
+#define DMA1_ALPHA_MODE 16
+#define DMA1_ALPHA 08
+#define DMA1_PXLCMD 00
+
+/*
+ * defined for Configure Dumb Mode
+ * DUMB LCD Panel bit[31:28]
+ */
+#define DUMB16_RGB565_0 0x0
+#define DUMB16_RGB565_1 0x1
+#define DUMB18_RGB666_0 0x2
+#define DUMB18_RGB666_1 0x3
+#define DUMB12_RGB444_0 0x4
+#define DUMB12_RGB444_1 0x5
+#define DUMB24_RGB888_0 0x6
+#define DUMB_BLANK 0x7
+
+/*
+ * defined for Configure I/O Pin Allocation Mode
+ * LCD LCD I/O Pads control register bit[3:0]
+ */
+#define IOPAD_DUMB24 0x0
+#define IOPAD_DUMB18SPI 0x1
+#define IOPAD_DUMB18GPIO 0x2
+#define IOPAD_DUMB16SPI 0x3
+#define IOPAD_DUMB16GPIO 0x4
+#define IOPAD_DUMB12 0x5
+#define IOPAD_SMART18SPI 0x6
+#define IOPAD_SMART16SPI 0x7
+#define IOPAD_SMART8BOTH 0x8
+
+#endif /* __PXA168FB_H__ */
diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
index 0726aecf3b7..0deb0a8867b 100644
--- a/drivers/video/s1d13xxxfb.c
+++ b/drivers/video/s1d13xxxfb.c
@@ -2,6 +2,7 @@
*
* (c) 2004 Simtec Electronics
* (c) 2005 Thibaut VARENE <varenet@parisc-linux.org>
+ * (c) 2009 Kristoffer Ericson <kristoffer.ericson@gmail.com>
*
* Driver for Epson S1D13xxx series framebuffer chips
*
@@ -10,18 +11,10 @@
* linux/drivers/video/epson1355fb.c
* linux/drivers/video/epson/s1d13xxxfb.c (2.4 driver by Epson)
*
- * Note, currently only tested on S1D13806 with 16bit CRT.
- * As such, this driver might still contain some hardcoded bits relating to
- * S1D13806.
- * Making it work on other S1D13XXX chips should merely be a matter of adding
- * a few switch()s, some missing glue here and there maybe, and split header
- * files.
- *
* TODO: - handle dual screen display (CRT and LCD at the same time).
* - check_var(), mode change, etc.
- * - PM untested.
- * - Accelerated interfaces.
- * - Probably not SMP safe :)
+ * - probably not SMP safe :)
+ * - support all bitblt operations on all cards
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
@@ -31,19 +24,24 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
-
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/fb.h>
+#include <linux/spinlock_types.h>
+#include <linux/spinlock.h>
#include <asm/io.h>
#include <video/s1d13xxxfb.h>
-#define PFX "s1d13xxxfb: "
+#define PFX "s1d13xxxfb: "
+#define BLIT "s1d13xxxfb_bitblt: "
+/*
+ * set this to enable debugging on general functions
+ */
#if 0
#define dbg(fmt, args...) do { printk(KERN_INFO fmt, ## args); } while(0)
#else
@@ -51,7 +49,21 @@
#endif
/*
- * List of card production ids
+ * set this to enable debugging on 2D acceleration
+ */
+#if 0
+#define dbg_blit(fmt, args...) do { printk(KERN_INFO BLIT fmt, ## args); } while (0)
+#else
+#define dbg_blit(fmt, args...) do { } while (0)
+#endif
+
+/*
+ * we make sure only one bitblt operation is running
+ */
+static DEFINE_SPINLOCK(s1d13xxxfb_bitblt_lock);
+
+/*
+ * list of card production ids
*/
static const int s1d13xxxfb_prod_ids[] = {
S1D13505_PROD_ID,
@@ -69,7 +81,7 @@ static const char *s1d13xxxfb_prod_names[] = {
};
/*
- * Here we define the default struct fb_fix_screeninfo
+ * here we define the default struct fb_fix_screeninfo
*/
static struct fb_fix_screeninfo __devinitdata s1d13xxxfb_fix = {
.id = S1D_FBID,
@@ -145,8 +157,10 @@ crt_enable(struct s1d13xxxfb_par *par, int enable)
s1d13xxxfb_writereg(par, S1DREG_COM_DISP_MODE, mode);
}
-/* framebuffer control routines */
+/*************************************************************
+ framebuffer control functions
+ *************************************************************/
static inline void
s1d13xxxfb_setup_pseudocolour(struct fb_info *info)
{
@@ -242,13 +256,13 @@ s1d13xxxfb_set_par(struct fb_info *info)
}
/**
- * s1d13xxxfb_setcolreg - sets a color register.
- * @regno: Which register in the CLUT we are programming
- * @red: The red value which can be up to 16 bits wide
+ * s1d13xxxfb_setcolreg - sets a color register.
+ * @regno: Which register in the CLUT we are programming
+ * @red: The red value which can be up to 16 bits wide
* @green: The green value which can be up to 16 bits wide
* @blue: The blue value which can be up to 16 bits wide.
* @transp: If supported the alpha value which can be up to 16 bits wide.
- * @info: frame buffer info structure
+ * @info: frame buffer info structure
*
* Returns negative errno on error, or zero on success.
*/
@@ -351,15 +365,15 @@ s1d13xxxfb_blank(int blank_mode, struct fb_info *info)
}
/**
- * s1d13xxxfb_pan_display - Pans the display.
- * @var: frame buffer variable screen structure
- * @info: frame buffer structure that represents a single frame buffer
+ * s1d13xxxfb_pan_display - Pans the display.
+ * @var: frame buffer variable screen structure
+ * @info: frame buffer structure that represents a single frame buffer
*
* Pan (or wrap, depending on the `vmode' field) the display using the
- * `yoffset' field of the `var' structure (`xoffset' not yet supported).
- * If the values don't fit, return -EINVAL.
+ * `yoffset' field of the `var' structure (`xoffset' not yet supported).
+ * If the values don't fit, return -EINVAL.
*
- * Returns negative errno on error, or zero on success.
+ * Returns negative errno on error, or zero on success.
*/
static int
s1d13xxxfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
@@ -390,8 +404,259 @@ s1d13xxxfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
return 0;
}
-/* framebuffer information structures */
+/************************************************************
+ functions to handle bitblt acceleration
+ ************************************************************/
+
+/**
+ * bltbit_wait_bitset - waits for change in register value
+ * @info : framebuffer structure
+ * @bit : value expected in register
+ * @timeout : ...
+ *
+ * waits until value changes INTO bit
+ */
+static u8
+bltbit_wait_bitset(struct fb_info *info, u8 bit, int timeout)
+{
+ while (!(s1d13xxxfb_readreg(info->par, S1DREG_BBLT_CTL0) & bit)) {
+ udelay(10);
+ if (!--timeout) {
+ dbg_blit("wait_bitset timeout\n");
+ break;
+ }
+ }
+
+ return timeout;
+}
+
+/**
+ * bltbit_wait_bitclear - waits for change in register value
+ * @info : frambuffer structure
+ * @bit : value currently in register
+ * @timeout : ...
+ *
+ * waits until value changes FROM bit
+ *
+ */
+static u8
+bltbit_wait_bitclear(struct fb_info *info, u8 bit, int timeout)
+{
+ while (s1d13xxxfb_readreg(info->par, S1DREG_BBLT_CTL0) & bit) {
+ udelay(10);
+ if (!--timeout) {
+ dbg_blit("wait_bitclear timeout\n");
+ break;
+ }
+ }
+
+ return timeout;
+}
+
+/**
+ * bltbit_fifo_status - checks the current status of the fifo
+ * @info : framebuffer structure
+ *
+ * returns number of free words in buffer
+ */
+static u8
+bltbit_fifo_status(struct fb_info *info)
+{
+ u8 status;
+ status = s1d13xxxfb_readreg(info->par, S1DREG_BBLT_CTL0);
+
+ /* its empty so room for 16 words */
+ if (status & BBLT_FIFO_EMPTY)
+ return 16;
+
+ /* its full so we dont want to add */
+ if (status & BBLT_FIFO_FULL)
+ return 0;
+
+ /* its atleast half full but we can add one atleast */
+ if (status & BBLT_FIFO_NOT_FULL)
+ return 1;
+
+ return 0;
+}
+
+/*
+ * s1d13xxxfb_bitblt_copyarea - accelerated copyarea function
+ * @info : framebuffer structure
+ * @area : fb_copyarea structure
+ *
+ * supports (atleast) S1D13506
+ *
+ */
+static void
+s1d13xxxfb_bitblt_copyarea(struct fb_info *info, const struct fb_copyarea *area)
+{
+ u32 dst, src;
+ u32 stride;
+ u16 reverse = 0;
+ u16 sx = area->sx, sy = area->sy;
+ u16 dx = area->dx, dy = area->dy;
+ u16 width = area->width, height = area->height;
+ u16 bpp;
+
+ spin_lock(&s1d13xxxfb_bitblt_lock);
+
+ /* bytes per xres line */
+ bpp = (info->var.bits_per_pixel >> 3);
+ stride = bpp * info->var.xres;
+
+ /* reverse, calculate the last pixel in rectangle */
+ if ((dy > sy) || ((dy == sy) && (dx >= sx))) {
+ dst = (((dy + height - 1) * stride) + (bpp * (dx + width - 1)));
+ src = (((sy + height - 1) * stride) + (bpp * (sx + width - 1)));
+ reverse = 1;
+ /* not reverse, calculate the first pixel in rectangle */
+ } else { /* (y * xres) + (bpp * x) */
+ dst = (dy * stride) + (bpp * dx);
+ src = (sy * stride) + (bpp * sx);
+ }
+
+ /* set source adress */
+ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_SRC_START0, (src & 0xff));
+ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_SRC_START1, (src >> 8) & 0x00ff);
+ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_SRC_START2, (src >> 16) & 0x00ff);
+
+ /* set destination adress */
+ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START0, (dst & 0xff));
+ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START1, (dst >> 8) & 0x00ff);
+ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START2, (dst >> 16) & 0x00ff);
+
+ /* program height and width */
+ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_WIDTH0, (width & 0xff) - 1);
+ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_WIDTH1, (width >> 8));
+
+ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_HEIGHT0, (height & 0xff) - 1);
+ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_HEIGHT1, (height >> 8));
+
+ /* negative direction ROP */
+ if (reverse == 1) {
+ dbg_blit("(copyarea) negative rop\n");
+ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_OP, 0x03);
+ } else /* positive direction ROP */ {
+ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_OP, 0x02);
+ dbg_blit("(copyarea) positive rop\n");
+ }
+
+ /* set for rectangel mode and not linear */
+ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL0, 0x0);
+
+ /* setup the bpp 1 = 16bpp, 0 = 8bpp*/
+ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL1, (bpp >> 1));
+
+ /* set words per xres */
+ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_MEM_OFF0, (stride >> 1) & 0xff);
+ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_MEM_OFF1, (stride >> 9));
+
+ dbg_blit("(copyarea) dx=%d, dy=%d\n", dx, dy);
+ dbg_blit("(copyarea) sx=%d, sy=%d\n", sx, sy);
+ dbg_blit("(copyarea) width=%d, height=%d\n", width - 1, height - 1);
+ dbg_blit("(copyarea) stride=%d\n", stride);
+ dbg_blit("(copyarea) bpp=%d=0x0%d, mem_offset1=%d, mem_offset2=%d\n", bpp, (bpp >> 1),
+ (stride >> 1) & 0xff, stride >> 9);
+
+ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CC_EXP, 0x0c);
+
+ /* initialize the engine */
+ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL0, 0x80);
+
+ /* wait to complete */
+ bltbit_wait_bitclear(info, 0x80, 8000);
+
+ spin_unlock(&s1d13xxxfb_bitblt_lock);
+}
+
+/**
+ *
+ * s1d13xxxfb_bitblt_solidfill - accelerated solidfill function
+ * @info : framebuffer structure
+ * @rect : fb_fillrect structure
+ *
+ * supports (atleast 13506)
+ *
+ **/
+static void
+s1d13xxxfb_bitblt_solidfill(struct fb_info *info, const struct fb_fillrect *rect)
+{
+ u32 screen_stride, dest;
+ u32 fg;
+ u16 bpp = (info->var.bits_per_pixel >> 3);
+
+ /* grab spinlock */
+ spin_lock(&s1d13xxxfb_bitblt_lock);
+
+ /* bytes per x width */
+ screen_stride = (bpp * info->var.xres);
+
+ /* bytes to starting point */
+ dest = ((rect->dy * screen_stride) + (bpp * rect->dx));
+
+ dbg_blit("(solidfill) dx=%d, dy=%d, stride=%d, dest=%d\n"
+ "(solidfill) : rect_width=%d, rect_height=%d\n",
+ rect->dx, rect->dy, screen_stride, dest,
+ rect->width - 1, rect->height - 1);
+
+ dbg_blit("(solidfill) : xres=%d, yres=%d, bpp=%d\n",
+ info->var.xres, info->var.yres,
+ info->var.bits_per_pixel);
+ dbg_blit("(solidfill) : rop=%d\n", rect->rop);
+
+ /* We split the destination into the three registers */
+ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START0, (dest & 0x00ff));
+ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START1, ((dest >> 8) & 0x00ff));
+ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START2, ((dest >> 16) & 0x00ff));
+
+ /* give information regarding rectangel width */
+ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_WIDTH0, ((rect->width) & 0x00ff) - 1);
+ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_WIDTH1, (rect->width >> 8));
+
+ /* give information regarding rectangel height */
+ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_HEIGHT0, ((rect->height) & 0x00ff) - 1);
+ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_HEIGHT1, (rect->height >> 8));
+
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+ fg = ((u32 *)info->pseudo_palette)[rect->color];
+ dbg_blit("(solidfill) truecolor/directcolor\n");
+ dbg_blit("(solidfill) pseudo_palette[%d] = %d\n", rect->color, fg);
+ } else {
+ fg = rect->color;
+ dbg_blit("(solidfill) color = %d\n", rect->color);
+ }
+
+ /* set foreground color */
+ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_FGC0, (fg & 0xff));
+ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_FGC1, (fg >> 8) & 0xff);
+
+ /* set rectangual region of memory (rectangle and not linear) */
+ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL0, 0x0);
+
+ /* set operation mode SOLID_FILL */
+ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_OP, BBLT_SOLID_FILL);
+
+ /* set bits per pixel (1 = 16bpp, 0 = 8bpp) */
+ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL1, (info->var.bits_per_pixel >> 4));
+
+ /* set the memory offset for the bblt in word sizes */
+ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_MEM_OFF0, (screen_stride >> 1) & 0x00ff);
+ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_MEM_OFF1, (screen_stride >> 9));
+
+ /* and away we go.... */
+ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL0, 0x80);
+
+ /* wait until its done */
+ bltbit_wait_bitclear(info, 0x80, 8000);
+
+ /* let others play */
+ spin_unlock(&s1d13xxxfb_bitblt_lock);
+}
+
+/* framebuffer information structures */
static struct fb_ops s1d13xxxfb_fbops = {
.owner = THIS_MODULE,
.fb_set_par = s1d13xxxfb_set_par,
@@ -400,7 +665,7 @@ static struct fb_ops s1d13xxxfb_fbops = {
.fb_pan_display = s1d13xxxfb_pan_display,
- /* to be replaced by any acceleration we can */
+ /* gets replaced at chip detection time */
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
@@ -412,9 +677,9 @@ static int s1d13xxxfb_width_tab[2][4] __devinitdata = {
};
/**
- * s1d13xxxfb_fetch_hw_state - Configure the framebuffer according to
+ * s1d13xxxfb_fetch_hw_state - Configure the framebuffer according to
* hardware setup.
- * @info: frame buffer structure
+ * @info: frame buffer structure
*
* We setup the framebuffer structures according to the current
* hardware setup. On some machines, the BIOS will have filled
@@ -569,7 +834,6 @@ s1d13xxxfb_probe(struct platform_device *pdev)
if (pdata && pdata->platform_init_video)
pdata->platform_init_video();
-
if (pdev->num_resources != 2) {
dev_err(&pdev->dev, "invalid num_resources: %i\n",
pdev->num_resources);
@@ -655,16 +919,27 @@ s1d13xxxfb_probe(struct platform_device *pdev)
info->fix = s1d13xxxfb_fix;
info->fix.mmio_start = pdev->resource[1].start;
- info->fix.mmio_len = pdev->resource[1].end - pdev->resource[1].start +1;
+ info->fix.mmio_len = pdev->resource[1].end - pdev->resource[1].start + 1;
info->fix.smem_start = pdev->resource[0].start;
- info->fix.smem_len = pdev->resource[0].end - pdev->resource[0].start +1;
+ info->fix.smem_len = pdev->resource[0].end - pdev->resource[0].start + 1;
printk(KERN_INFO PFX "regs mapped at 0x%p, fb %d KiB mapped at 0x%p\n",
default_par->regs, info->fix.smem_len / 1024, info->screen_base);
info->par = default_par;
- info->fbops = &s1d13xxxfb_fbops;
info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
+ info->fbops = &s1d13xxxfb_fbops;
+
+ switch(prod_id) {
+ case S1D13506_PROD_ID: /* activate acceleration */
+ s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
+ s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
+ info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
+ FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
+ break;
+ default:
+ break;
+ }
/* perform "manual" chip initialization, if needed */
if (pdata && pdata->initregs)
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index 5e9c6302433..43680e54542 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -358,9 +358,16 @@ static int s3c_fb_set_par(struct fb_info *info)
writel(data, regs + VIDOSD_B(win_no));
data = var->xres * var->yres;
+
+ u32 osdc_data = 0;
+
+ osdc_data = VIDISD14C_ALPHA1_R(0xf) |
+ VIDISD14C_ALPHA1_G(0xf) |
+ VIDISD14C_ALPHA1_B(0xf);
+
if (s3c_fb_has_osd_d(win_no)) {
writel(data, regs + VIDOSD_D(win_no));
- writel(0, regs + VIDOSD_C(win_no));
+ writel(osdc_data, regs + VIDOSD_C(win_no));
} else
writel(data, regs + VIDOSD_C(win_no));
@@ -409,8 +416,12 @@ static int s3c_fb_set_par(struct fb_info *info)
data |= WINCON1_BPPMODE_19BPP_A1666;
else
data |= WINCON1_BPPMODE_18BPP_666;
- } else if (var->transp.length != 0)
- data |= WINCON1_BPPMODE_25BPP_A1888;
+ } else if (var->transp.length == 1)
+ data |= WINCON1_BPPMODE_25BPP_A1888
+ | WINCON1_BLD_PIX;
+ else if (var->transp.length == 4)
+ data |= WINCON1_BPPMODE_28BPP_A4888
+ | WINCON1_BLD_PIX | WINCON1_ALPHA_SEL;
else
data |= WINCON0_BPPMODE_24BPP_888;
@@ -418,6 +429,20 @@ static int s3c_fb_set_par(struct fb_info *info)
break;
}
+ /* It has no color key control register for window0 */
+ if (win_no > 0) {
+ u32 keycon0_data = 0, keycon1_data = 0;
+
+ keycon0_data = ~(WxKEYCON0_KEYBL_EN |
+ WxKEYCON0_KEYEN_F |
+ WxKEYCON0_DIRCON) | WxKEYCON0_COMPKEY(0);
+
+ keycon1_data = WxKEYCON1_COLVAL(0xffffff);
+
+ writel(keycon0_data, regs + WxKEYCONy(win_no-1, 0));
+ writel(keycon1_data, regs + WxKEYCONy(win_no-1, 1));
+ }
+
writel(data, regs + WINCON(win_no));
writel(0x0, regs + WINxMAP(win_no));
@@ -700,9 +725,12 @@ static void s3c_fb_free_memory(struct s3c_fb *sfb, struct s3c_fb_win *win)
*/
static void s3c_fb_release_win(struct s3c_fb *sfb, struct s3c_fb_win *win)
{
- fb_dealloc_cmap(&win->fbinfo->cmap);
- unregister_framebuffer(win->fbinfo);
- s3c_fb_free_memory(sfb, win);
+ if (win->fbinfo) {
+ unregister_framebuffer(win->fbinfo);
+ fb_dealloc_cmap(&win->fbinfo->cmap);
+ s3c_fb_free_memory(sfb, win);
+ framebuffer_release(win->fbinfo);
+ }
}
/**
@@ -753,7 +781,7 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
ret = s3c_fb_alloc_memory(sfb, win);
if (ret) {
dev_err(sfb->dev, "failed to allocate display memory\n");
- goto err_framebuffer;
+ return ret;
}
/* setup the r/b/g positions for the window's palette */
@@ -776,7 +804,7 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
ret = s3c_fb_check_var(&fbinfo->var, fbinfo);
if (ret < 0) {
dev_err(sfb->dev, "check_var failed on initial video params\n");
- goto err_alloc_mem;
+ return ret;
}
/* create initial colour map */
@@ -796,20 +824,13 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
ret = register_framebuffer(fbinfo);
if (ret < 0) {
dev_err(sfb->dev, "failed to register framebuffer\n");
- goto err_alloc_mem;
+ return ret;
}
*res = win;
dev_info(sfb->dev, "window %d: fb %s\n", win_no, fbinfo->fix.id);
return 0;
-
-err_alloc_mem:
- s3c_fb_free_memory(sfb, win);
-
-err_framebuffer:
- unregister_framebuffer(fbinfo);
- return ret;
}
/**
@@ -947,7 +968,8 @@ static int __devexit s3c_fb_remove(struct platform_device *pdev)
int win;
for (win = 0; win <= S3C_FB_MAX_WIN; win++)
- s3c_fb_release_win(sfb, sfb->windows[win]);
+ if (sfb->windows[win])
+ s3c_fb_release_win(sfb, sfb->windows[win]);
iounmap(sfb->regs);
@@ -985,11 +1007,20 @@ static int s3c_fb_suspend(struct platform_device *pdev, pm_message_t state)
static int s3c_fb_resume(struct platform_device *pdev)
{
struct s3c_fb *sfb = platform_get_drvdata(pdev);
+ struct s3c_fb_platdata *pd = sfb->pdata;
struct s3c_fb_win *win;
int win_no;
clk_enable(sfb->bus_clk);
+ /* setup registers */
+ writel(pd->vidcon1, sfb->regs + VIDCON1);
+
+ /* zero all windows before we do anything */
+ for (win_no = 0; win_no < S3C_FB_MAX_WIN; win_no++)
+ s3c_fb_clear_win(sfb, win_no);
+
+ /* restore framebuffers */
for (win_no = 0; win_no < S3C_FB_MAX_WIN; win_no++) {
win = sfb->windows[win_no];
if (!win)
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c
index b0b4513ba53..7da0027e240 100644
--- a/drivers/video/s3c2410fb.c
+++ b/drivers/video/s3c2410fb.c
@@ -24,6 +24,7 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/cpufreq.h>
#include <asm/io.h>
#include <asm/div64.h>
@@ -89,7 +90,7 @@ static void s3c2410fb_set_lcdaddr(struct fb_info *info)
static unsigned int s3c2410fb_calc_pixclk(struct s3c2410fb_info *fbi,
unsigned long pixclk)
{
- unsigned long clk = clk_get_rate(fbi->clk);
+ unsigned long clk = fbi->clk_rate;
unsigned long long div;
/* pixclk is in picoseconds, our clock is in Hz
@@ -758,6 +759,57 @@ static irqreturn_t s3c2410fb_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
+#ifdef CONFIG_CPU_FREQ
+
+static int s3c2410fb_cpufreq_transition(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct cpufreq_freqs *freqs = data;
+ struct s3c2410fb_info *info;
+ struct fb_info *fbinfo;
+ long delta_f;
+
+ info = container_of(nb, struct s3c2410fb_info, freq_transition);
+ fbinfo = platform_get_drvdata(to_platform_device(info->dev));
+
+ /* work out change, <0 for speed-up */
+ delta_f = info->clk_rate - clk_get_rate(info->clk);
+
+ if ((val == CPUFREQ_POSTCHANGE && delta_f > 0) ||
+ (val == CPUFREQ_PRECHANGE && delta_f < 0)) {
+ info->clk_rate = clk_get_rate(info->clk);
+ s3c2410fb_activate_var(fbinfo);
+ }
+
+ return 0;
+}
+
+static inline int s3c2410fb_cpufreq_register(struct s3c2410fb_info *info)
+{
+ info->freq_transition.notifier_call = s3c2410fb_cpufreq_transition;
+
+ return cpufreq_register_notifier(&info->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+}
+
+static inline void s3c2410fb_cpufreq_deregister(struct s3c2410fb_info *info)
+{
+ cpufreq_unregister_notifier(&info->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+}
+
+#else
+static inline int s3c2410fb_cpufreq_register(struct s3c2410fb_info *info)
+{
+ return 0;
+}
+
+static inline void s3c2410fb_cpufreq_deregister(struct s3c2410fb_info *info)
+{
+}
+#endif
+
+
static char driver_name[] = "s3c2410fb";
static int __init s3c24xxfb_probe(struct platform_device *pdev,
@@ -875,6 +927,8 @@ static int __init s3c24xxfb_probe(struct platform_device *pdev,
msleep(1);
+ info->clk_rate = clk_get_rate(info->clk);
+
/* find maximum required memory size for display */
for (i = 0; i < mach_info->num_displays; i++) {
unsigned long smem_len = mach_info->displays[i].xres;
@@ -904,11 +958,17 @@ static int __init s3c24xxfb_probe(struct platform_device *pdev,
s3c2410fb_check_var(&fbinfo->var, fbinfo);
+ ret = s3c2410fb_cpufreq_register(info);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register cpufreq\n");
+ goto free_video_memory;
+ }
+
ret = register_framebuffer(fbinfo);
if (ret < 0) {
printk(KERN_ERR "Failed to register framebuffer device: %d\n",
ret);
- goto free_video_memory;
+ goto free_cpufreq;
}
/* create device files */
@@ -922,6 +982,8 @@ static int __init s3c24xxfb_probe(struct platform_device *pdev,
return 0;
+ free_cpufreq:
+ s3c2410fb_cpufreq_deregister(info);
free_video_memory:
s3c2410fb_unmap_video_memory(fbinfo);
release_clock:
@@ -961,6 +1023,7 @@ static int s3c2410fb_remove(struct platform_device *pdev)
int irq;
unregister_framebuffer(fbinfo);
+ s3c2410fb_cpufreq_deregister(info);
s3c2410fb_lcd_enable(info, 0);
msleep(1);
diff --git a/drivers/video/s3c2410fb.h b/drivers/video/s3c2410fb.h
index 9a6ba3e9d1b..47a17bd2301 100644
--- a/drivers/video/s3c2410fb.h
+++ b/drivers/video/s3c2410fb.h
@@ -29,8 +29,13 @@ struct s3c2410fb_info {
enum s3c_drv_type drv_type;
struct s3c2410fb_hw regs;
+ unsigned long clk_rate;
unsigned int palette_ready;
+#ifdef CONFIG_CPU_FREQ
+ struct notifier_block freq_transition;
+#endif
+
/* keep these registers in case we need to re-write palette */
u32 palette_buffer[256];
u32 pseudo_pal[16];
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index 92ea0ab44ce..f10d2fbeda0 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -47,6 +47,7 @@ struct sh_mobile_lcdc_priv {
#endif
unsigned long lddckr;
struct sh_mobile_lcdc_chan ch[2];
+ int started;
};
/* shared registers */
@@ -451,6 +452,7 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
/* start the lcdc */
sh_mobile_lcdc_start_stop(priv, 1);
+ priv->started = 1;
/* tell the board code to enable the panel */
for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
@@ -493,7 +495,10 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv)
}
/* stop the lcdc */
- sh_mobile_lcdc_start_stop(priv, 0);
+ if (priv->started) {
+ sh_mobile_lcdc_start_stop(priv, 0);
+ priv->started = 0;
+ }
/* stop clocks */
for (k = 0; k < ARRAY_SIZE(priv->ch); k++)
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 7e17ee95a97..7072d19080d 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -5928,7 +5928,7 @@ sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if(pci_enable_device(pdev)) {
if(ivideo->nbridge) pci_dev_put(ivideo->nbridge);
pci_set_drvdata(pdev, NULL);
- kfree(sis_fb_info);
+ framebuffer_release(sis_fb_info);
return -EIO;
}
}
@@ -6134,7 +6134,7 @@ error_3: vfree(ivideo->bios_abase);
pci_set_drvdata(pdev, NULL);
if(!ivideo->sisvga_enabled)
pci_disable_device(pdev);
- kfree(sis_fb_info);
+ framebuffer_release(sis_fb_info);
return ret;
}
diff --git a/drivers/video/stifb.c b/drivers/video/stifb.c
index eabaad765ae..eec9dcb7f59 100644
--- a/drivers/video/stifb.c
+++ b/drivers/video/stifb.c
@@ -1380,7 +1380,7 @@ stifb_cleanup(void)
if (info->screen_base)
iounmap(info->screen_base);
fb_dealloc_cmap(&info->cmap);
- kfree(info);
+ framebuffer_release(info);
}
sti->info = NULL;
}
diff --git a/drivers/video/tcx.c b/drivers/video/tcx.c
index 643afbfe827..45b883598bf 100644
--- a/drivers/video/tcx.c
+++ b/drivers/video/tcx.c
@@ -116,17 +116,16 @@ struct tcx_par {
u32 flags;
#define TCX_FLAG_BLANKED 0x00000001
- unsigned long physbase;
unsigned long which_io;
- unsigned long fbsize;
struct sbus_mmap_map mmap_map[TCX_MMAP_ENTRIES];
int lowdepth;
};
/* Reset control plane so that WID is 8-bit plane. */
-static void __tcx_set_control_plane(struct tcx_par *par)
+static void __tcx_set_control_plane(struct fb_info *info)
{
+ struct tcx_par *par = info->par;
u32 __iomem *p, *pend;
if (par->lowdepth)
@@ -135,7 +134,7 @@ static void __tcx_set_control_plane(struct tcx_par *par)
p = par->cplane;
if (p == NULL)
return;
- for (pend = p + par->fbsize; p < pend; p++) {
+ for (pend = p + info->fix.smem_len; p < pend; p++) {
u32 tmp = sbus_readl(p);
tmp &= 0xffffff;
@@ -149,7 +148,7 @@ static void tcx_reset(struct fb_info *info)
unsigned long flags;
spin_lock_irqsave(&par->lock, flags);
- __tcx_set_control_plane(par);
+ __tcx_set_control_plane(info);
spin_unlock_irqrestore(&par->lock, flags);
}
@@ -304,7 +303,7 @@ static int tcx_mmap(struct fb_info *info, struct vm_area_struct *vma)
struct tcx_par *par = (struct tcx_par *)info->par;
return sbusfb_mmap_helper(par->mmap_map,
- par->physbase, par->fbsize,
+ info->fix.smem_start, info->fix.smem_len,
par->which_io, vma);
}
@@ -316,7 +315,7 @@ static int tcx_ioctl(struct fb_info *info, unsigned int cmd,
return sbusfb_ioctl_helper(cmd, arg, info,
FBTYPE_TCXCOLOR,
(par->lowdepth ? 8 : 24),
- par->fbsize);
+ info->fix.smem_len);
}
/*
@@ -358,10 +357,10 @@ static void tcx_unmap_regs(struct of_device *op, struct fb_info *info,
par->bt, sizeof(struct bt_regs));
if (par->cplane)
of_iounmap(&op->resource[4],
- par->cplane, par->fbsize * sizeof(u32));
+ par->cplane, info->fix.smem_len * sizeof(u32));
if (info->screen_base)
of_iounmap(&op->resource[0],
- info->screen_base, par->fbsize);
+ info->screen_base, info->fix.smem_len);
}
static int __devinit tcx_probe(struct of_device *op,
@@ -391,7 +390,7 @@ static int __devinit tcx_probe(struct of_device *op,
linebytes = of_getintprop_default(dp, "linebytes",
info->var.xres);
- par->fbsize = PAGE_ALIGN(linebytes * info->var.yres);
+ info->fix.smem_len = PAGE_ALIGN(linebytes * info->var.yres);
par->tec = of_ioremap(&op->resource[7], 0,
sizeof(struct tcx_tec), "tcx tec");
@@ -400,7 +399,7 @@ static int __devinit tcx_probe(struct of_device *op,
par->bt = of_ioremap(&op->resource[8], 0,
sizeof(struct bt_regs), "tcx dac");
info->screen_base = of_ioremap(&op->resource[0], 0,
- par->fbsize, "tcx ram");
+ info->fix.smem_len, "tcx ram");
if (!par->tec || !par->thc ||
!par->bt || !info->screen_base)
goto out_unmap_regs;
@@ -408,7 +407,7 @@ static int __devinit tcx_probe(struct of_device *op,
memcpy(&par->mmap_map, &__tcx_mmap_map, sizeof(par->mmap_map));
if (!par->lowdepth) {
par->cplane = of_ioremap(&op->resource[4], 0,
- par->fbsize * sizeof(u32),
+ info->fix.smem_len * sizeof(u32),
"tcx cplane");
if (!par->cplane)
goto out_unmap_regs;
@@ -419,7 +418,7 @@ static int __devinit tcx_probe(struct of_device *op,
par->mmap_map[6].size = SBUS_MMAP_EMPTY;
}
- par->physbase = op->resource[0].start;
+ info->fix.smem_start = op->resource[0].start;
par->which_io = op->resource[0].flags & IORESOURCE_BITS;
for (i = 0; i < TCX_MMAP_ENTRIES; i++) {
@@ -473,7 +472,7 @@ static int __devinit tcx_probe(struct of_device *op,
printk(KERN_INFO "%s: TCX at %lx:%lx, %s\n",
dp->full_name,
par->which_io,
- par->physbase,
+ info->fix.smem_start,
par->lowdepth ? "8-bit only" : "24-bit depth");
return 0;
diff --git a/drivers/video/tdfxfb.c b/drivers/video/tdfxfb.c
index 89f231dc443..ff43c888502 100644
--- a/drivers/video/tdfxfb.c
+++ b/drivers/video/tdfxfb.c
@@ -1315,7 +1315,6 @@ static int __devinit tdfxfb_setup_i2c_bus(struct tdfxfb_i2c_chan *chan,
strlcpy(chan->adapter.name, name, sizeof(chan->adapter.name));
chan->adapter.owner = THIS_MODULE;
- chan->adapter.class = I2C_CLASS_TV_ANALOG;
chan->adapter.algo_data = &chan->algo;
chan->adapter.dev.parent = dev;
chan->algo.setsda = tdfxfb_i2c_setsda;
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index 421770b5e6a..ca5b4643a40 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -45,7 +45,7 @@ static struct fb_fix_screeninfo uvesafb_fix __devinitdata = {
static int mtrr __devinitdata = 3; /* enable mtrr by default */
static int blank = 1; /* enable blanking by default */
static int ypan = 1; /* 0: scroll, 1: ypan, 2: ywrap */
-static int pmi_setpal __devinitdata = 1; /* use PMI for palette changes */
+static bool pmi_setpal __devinitdata = true; /* use PMI for palette changes */
static int nocrtc __devinitdata; /* ignore CRTC settings */
static int noedid __devinitdata; /* don't try DDC transfers */
static int vram_remap __devinitdata; /* set amt. of memory to be used */
@@ -2002,11 +2002,7 @@ static void __devexit uvesafb_exit(void)
module_exit(uvesafb_exit);
-static int param_get_scroll(char *buffer, struct kernel_param *kp)
-{
- return 0;
-}
-
+#define param_get_scroll NULL
static int param_set_scroll(const char *val, struct kernel_param *kp)
{
ypan = 0;
@@ -2017,6 +2013,8 @@ static int param_set_scroll(const char *val, struct kernel_param *kp)
ypan = 1;
else if (!strcmp(val, "ywrap"))
ypan = 2;
+ else
+ return -EINVAL;
return 0;
}
diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
index d6856f43d24..bd37ee1f6a2 100644
--- a/drivers/video/vesafb.c
+++ b/drivers/video/vesafb.c
@@ -174,8 +174,17 @@ static int vesafb_setcolreg(unsigned regno, unsigned red, unsigned green,
return err;
}
+static void vesafb_destroy(struct fb_info *info)
+{
+ if (info->screen_base)
+ iounmap(info->screen_base);
+ release_mem_region(info->aperture_base, info->aperture_size);
+ framebuffer_release(info);
+}
+
static struct fb_ops vesafb_ops = {
.owner = THIS_MODULE,
+ .fb_destroy = vesafb_destroy,
.fb_setcolreg = vesafb_setcolreg,
.fb_pan_display = vesafb_pan_display,
.fb_fillrect = cfb_fillrect,
@@ -286,6 +295,10 @@ static int __init vesafb_probe(struct platform_device *dev)
info->pseudo_palette = info->par;
info->par = NULL;
+ /* set vesafb aperture size for generic probing */
+ info->aperture_base = screen_info.lfb_base;
+ info->aperture_size = size_total;
+
info->screen_base = ioremap(vesafb_fix.smem_start, vesafb_fix.smem_len);
if (!info->screen_base) {
printk(KERN_ERR
@@ -437,7 +450,7 @@ static int __init vesafb_probe(struct platform_device *dev)
info->fbops = &vesafb_ops;
info->var = vesafb_defined;
info->fix = vesafb_fix;
- info->flags = FBINFO_FLAG_DEFAULT |
+ info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
(ypan ? FBINFO_HWACCEL_YPAN : 0);
if (!ypan)
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index 2493f05e9f6..15502d5e364 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -384,7 +384,7 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
fb_size = XENFB_DEFAULT_FB_LEN;
}
- dev->dev.driver_data = info;
+ dev_set_drvdata(&dev->dev, info);
info->xbdev = dev;
info->irq = -1;
info->x1 = info->y1 = INT_MAX;
@@ -503,7 +503,7 @@ xenfb_make_preferred_console(void)
static int xenfb_resume(struct xenbus_device *dev)
{
- struct xenfb_info *info = dev->dev.driver_data;
+ struct xenfb_info *info = dev_get_drvdata(&dev->dev);
xenfb_disconnect_backend(info);
xenfb_init_shared_page(info, info->fb_info);
@@ -512,7 +512,7 @@ static int xenfb_resume(struct xenbus_device *dev)
static int xenfb_remove(struct xenbus_device *dev)
{
- struct xenfb_info *info = dev->dev.driver_data;
+ struct xenfb_info *info = dev_get_drvdata(&dev->dev);
xenfb_disconnect_backend(info);
if (info->fb_info) {
@@ -621,7 +621,7 @@ static void xenfb_disconnect_backend(struct xenfb_info *info)
static void xenfb_backend_changed(struct xenbus_device *dev,
enum xenbus_state backend_state)
{
- struct xenfb_info *info = dev->dev.driver_data;
+ struct xenfb_info *info = dev_get_drvdata(&dev->dev);
int val;
switch (backend_state) {
diff --git a/drivers/video/xilinxfb.c b/drivers/video/xilinxfb.c
index 40a3a2afbfe..7a868bd16e0 100644
--- a/drivers/video/xilinxfb.c
+++ b/drivers/video/xilinxfb.c
@@ -1,13 +1,12 @@
/*
- * xilinxfb.c
- *
- * Xilinx TFT LCD frame buffer driver
+ * Xilinx TFT frame buffer driver
*
* Author: MontaVista Software, Inc.
* source@mvista.com
*
* 2002-2007 (c) MontaVista Software, Inc.
* 2007 (c) Secret Lab Technologies, Ltd.
+ * 2009 (c) Xilinx Inc.
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
@@ -24,33 +23,38 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/version.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-#if defined(CONFIG_OF)
#include <linux/of_device.h>
#include <linux/of_platform.h>
-#endif
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/xilinxfb.h>
+#include <asm/dcr.h>
#define DRIVER_NAME "xilinxfb"
-#define DRIVER_DESCRIPTION "Xilinx TFT LCD frame buffer driver"
+
/*
* Xilinx calls it "PLB TFT LCD Controller" though it can also be used for
- * the VGA port on the Xilinx ML40x board. This is a hardware display controller
- * for a 640x480 resolution TFT or VGA screen.
+ * the VGA port on the Xilinx ML40x board. This is a hardware display
+ * controller for a 640x480 resolution TFT or VGA screen.
*
* The interface to the framebuffer is nice and simple. There are two
* control registers. The first tells the LCD interface where in memory
* the frame buffer is (only the 11 most significant bits are used, so
* don't start thinking about scrolling). The second allows the LCD to
* be turned on or off as well as rotated 180 degrees.
+ *
+ * In case of direct PLB access the second control register will be at
+ * an offset of 4 as compared to the DCR access where the offset is 1
+ * i.e. REG_CTRL. So this is taken care in the function
+ * xilinx_fb_out_be32 where it left shifts the offset 2 times in case of
+ * direct PLB access.
*/
#define NUM_REGS 2
#define REG_FB_ADDR 0
@@ -107,17 +111,28 @@ static struct fb_var_screeninfo xilinx_fb_var = {
.activate = FB_ACTIVATE_NOW
};
+
+#define PLB_ACCESS_FLAG 0x1 /* 1 = PLB, 0 = DCR */
+
struct xilinxfb_drvdata {
struct fb_info info; /* FB driver info record */
- u32 regs_phys; /* phys. address of the control registers */
- u32 __iomem *regs; /* virt. address of the control registers */
+ phys_addr_t regs_phys; /* phys. address of the control
+ registers */
+ void __iomem *regs; /* virt. address of the control
+ registers */
+
+ dcr_host_t dcr_host;
+ unsigned int dcr_start;
+ unsigned int dcr_len;
void *fb_virt; /* virt. address of the frame buffer */
dma_addr_t fb_phys; /* phys. address of the frame buffer */
int fb_alloced; /* Flag, was the fb memory alloced? */
+ u8 flags; /* features of the driver */
+
u32 reg_ctrl_default;
u32 pseudo_palette[PALETTE_ENTRIES_NO];
@@ -128,14 +143,19 @@ struct xilinxfb_drvdata {
container_of(_info, struct xilinxfb_drvdata, info)
/*
- * The LCD controller has DCR interface to its registers, but all
- * the boards and configurations the driver has been tested with
- * use opb2dcr bridge. So the registers are seen as memory mapped.
- * This macro is to make it simple to add the direct DCR access
- * when it's needed.
+ * The XPS TFT Controller can be accessed through PLB or DCR interface.
+ * To perform the read/write on the registers we need to check on
+ * which bus its connected and call the appropriate write API.
*/
-#define xilinx_fb_out_be32(driverdata, offset, val) \
- out_be32(driverdata->regs + offset, val)
+static void xilinx_fb_out_be32(struct xilinxfb_drvdata *drvdata, u32 offset,
+ u32 val)
+{
+ if (drvdata->flags & PLB_ACCESS_FLAG)
+ out_be32(drvdata->regs + (offset << 2), val);
+ else
+ dcr_write(drvdata->dcr_host, offset, val);
+
+}
static int
xilinx_fb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue,
@@ -203,35 +223,34 @@ static struct fb_ops xilinxfb_ops =
* Bus independent setup/teardown
*/
-static int xilinxfb_assign(struct device *dev, unsigned long physaddr,
+static int xilinxfb_assign(struct device *dev,
+ struct xilinxfb_drvdata *drvdata,
+ unsigned long physaddr,
struct xilinxfb_platform_data *pdata)
{
- struct xilinxfb_drvdata *drvdata;
int rc;
int fbsize = pdata->xvirt * pdata->yvirt * BYTES_PER_PIXEL;
- /* Allocate the driver data region */
- drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
- if (!drvdata) {
- dev_err(dev, "Couldn't allocate device private record\n");
- return -ENOMEM;
- }
- dev_set_drvdata(dev, drvdata);
-
- /* Map the control registers in */
- if (!request_mem_region(physaddr, 8, DRIVER_NAME)) {
- dev_err(dev, "Couldn't lock memory region at 0x%08lX\n",
- physaddr);
- rc = -ENODEV;
- goto err_region;
- }
- drvdata->regs_phys = physaddr;
- drvdata->regs = ioremap(physaddr, 8);
- if (!drvdata->regs) {
- dev_err(dev, "Couldn't lock memory region at 0x%08lX\n",
- physaddr);
- rc = -ENODEV;
- goto err_map;
+ if (drvdata->flags & PLB_ACCESS_FLAG) {
+ /*
+ * Map the control registers in if the controller
+ * is on direct PLB interface.
+ */
+ if (!request_mem_region(physaddr, 8, DRIVER_NAME)) {
+ dev_err(dev, "Couldn't lock memory region at 0x%08lX\n",
+ physaddr);
+ rc = -ENODEV;
+ goto err_region;
+ }
+
+ drvdata->regs_phys = physaddr;
+ drvdata->regs = ioremap(physaddr, 8);
+ if (!drvdata->regs) {
+ dev_err(dev, "Couldn't lock memory region at 0x%08lX\n",
+ physaddr);
+ rc = -ENODEV;
+ goto err_map;
+ }
}
/* Allocate the framebuffer memory */
@@ -247,7 +266,10 @@ static int xilinxfb_assign(struct device *dev, unsigned long physaddr,
if (!drvdata->fb_virt) {
dev_err(dev, "Could not allocate frame buffer memory\n");
rc = -ENOMEM;
- goto err_fbmem;
+ if (drvdata->flags & PLB_ACCESS_FLAG)
+ goto err_fbmem;
+ else
+ goto err_region;
}
/* Clear (turn to black) the framebuffer */
@@ -260,7 +282,8 @@ static int xilinxfb_assign(struct device *dev, unsigned long physaddr,
drvdata->reg_ctrl_default = REG_CTRL_ENABLE;
if (pdata->rotate_screen)
drvdata->reg_ctrl_default |= REG_CTRL_ROTATE;
- xilinx_fb_out_be32(drvdata, REG_CTRL, drvdata->reg_ctrl_default);
+ xilinx_fb_out_be32(drvdata, REG_CTRL,
+ drvdata->reg_ctrl_default);
/* Fill struct fb_info */
drvdata->info.device = dev;
@@ -296,11 +319,14 @@ static int xilinxfb_assign(struct device *dev, unsigned long physaddr,
goto err_regfb;
}
+ if (drvdata->flags & PLB_ACCESS_FLAG) {
+ /* Put a banner in the log (for DEBUG) */
+ dev_dbg(dev, "regs: phys=%lx, virt=%p\n", physaddr,
+ drvdata->regs);
+ }
/* Put a banner in the log (for DEBUG) */
- dev_dbg(dev, "regs: phys=%lx, virt=%p\n", physaddr, drvdata->regs);
- dev_dbg(dev, "fb: phys=%llx, virt=%p, size=%x\n",
- (unsigned long long) drvdata->fb_phys, drvdata->fb_virt,
- fbsize);
+ dev_dbg(dev, "fb: phys=%p, virt=%p, size=%x\n",
+ (void *)drvdata->fb_phys, drvdata->fb_virt, fbsize);
return 0; /* success */
@@ -311,14 +337,19 @@ err_cmap:
if (drvdata->fb_alloced)
dma_free_coherent(dev, PAGE_ALIGN(fbsize), drvdata->fb_virt,
drvdata->fb_phys);
+ else
+ iounmap(drvdata->fb_virt);
+
/* Turn off the display */
xilinx_fb_out_be32(drvdata, REG_CTRL, 0);
err_fbmem:
- iounmap(drvdata->regs);
+ if (drvdata->flags & PLB_ACCESS_FLAG)
+ iounmap(drvdata->regs);
err_map:
- release_mem_region(physaddr, 8);
+ if (drvdata->flags & PLB_ACCESS_FLAG)
+ release_mem_region(physaddr, 8);
err_region:
kfree(drvdata);
@@ -342,12 +373,18 @@ static int xilinxfb_release(struct device *dev)
if (drvdata->fb_alloced)
dma_free_coherent(dev, PAGE_ALIGN(drvdata->info.fix.smem_len),
drvdata->fb_virt, drvdata->fb_phys);
+ else
+ iounmap(drvdata->fb_virt);
/* Turn off the display */
xilinx_fb_out_be32(drvdata, REG_CTRL, 0);
- iounmap(drvdata->regs);
- release_mem_region(drvdata->regs_phys, 8);
+ /* Release the resources, as allocated based on interface */
+ if (drvdata->flags & PLB_ACCESS_FLAG) {
+ iounmap(drvdata->regs);
+ release_mem_region(drvdata->regs_phys, 8);
+ } else
+ dcr_unmap(drvdata->dcr_host, drvdata->dcr_len);
kfree(drvdata);
dev_set_drvdata(dev, NULL);
@@ -356,77 +393,57 @@ static int xilinxfb_release(struct device *dev)
}
/* ---------------------------------------------------------------------
- * Platform bus binding
- */
-
-static int
-xilinxfb_platform_probe(struct platform_device *pdev)
-{
- struct xilinxfb_platform_data *pdata;
- struct resource *res;
-
- /* Find the registers address */
- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!res) {
- dev_err(&pdev->dev, "Couldn't get registers resource\n");
- return -ENODEV;
- }
-
- /* If a pdata structure is provided, then extract the parameters */
- pdata = &xilinx_fb_default_pdata;
- if (pdev->dev.platform_data) {
- pdata = pdev->dev.platform_data;
- if (!pdata->xres)
- pdata->xres = xilinx_fb_default_pdata.xres;
- if (!pdata->yres)
- pdata->yres = xilinx_fb_default_pdata.yres;
- if (!pdata->xvirt)
- pdata->xvirt = xilinx_fb_default_pdata.xvirt;
- if (!pdata->yvirt)
- pdata->yvirt = xilinx_fb_default_pdata.yvirt;
- }
-
- return xilinxfb_assign(&pdev->dev, res->start, pdata);
-}
-
-static int
-xilinxfb_platform_remove(struct platform_device *pdev)
-{
- return xilinxfb_release(&pdev->dev);
-}
-
-
-static struct platform_driver xilinxfb_platform_driver = {
- .probe = xilinxfb_platform_probe,
- .remove = xilinxfb_platform_remove,
- .driver = {
- .owner = THIS_MODULE,
- .name = DRIVER_NAME,
- },
-};
-
-/* ---------------------------------------------------------------------
* OF bus binding
*/
-#if defined(CONFIG_OF)
static int __devinit
xilinxfb_of_probe(struct of_device *op, const struct of_device_id *match)
{
- struct resource res;
const u32 *prop;
+ u32 *p;
+ u32 tft_access;
struct xilinxfb_platform_data pdata;
+ struct resource res;
int size, rc;
+ int start = 0, len = 0;
+ dcr_host_t dcr_host;
+ struct xilinxfb_drvdata *drvdata;
/* Copy with the default pdata (not a ptr reference!) */
pdata = xilinx_fb_default_pdata;
dev_dbg(&op->dev, "xilinxfb_of_probe(%p, %p)\n", op, match);
- rc = of_address_to_resource(op->node, 0, &res);
- if (rc) {
- dev_err(&op->dev, "invalid address\n");
- return rc;
+ /*
+ * To check whether the core is connected directly to DCR or PLB
+ * interface and initialize the tft_access accordingly.
+ */
+ p = (u32 *)of_get_property(op->node, "xlnx,dcr-splb-slave-if", NULL);
+
+ if (p)
+ tft_access = *p;
+ else
+ tft_access = 0; /* For backward compatibility */
+
+ /*
+ * Fill the resource structure if its direct PLB interface
+ * otherwise fill the dcr_host structure.
+ */
+ if (tft_access) {
+ rc = of_address_to_resource(op->node, 0, &res);
+ if (rc) {
+ dev_err(&op->dev, "invalid address\n");
+ return -ENODEV;
+ }
+
+ } else {
+ start = dcr_resource_start(op->node, 0);
+ len = dcr_resource_len(op->node, 0);
+ dcr_host = dcr_map(op->node, start, len);
+ if (!DCR_MAP_OK(dcr_host)) {
+ dev_err(&op->dev, "invalid address\n");
+ return -ENODEV;
+ }
}
prop = of_get_property(op->node, "phys-size", &size);
@@ -450,7 +467,26 @@ xilinxfb_of_probe(struct of_device *op, const struct of_device_id *match)
if (of_find_property(op->node, "rotate-display", NULL))
pdata.rotate_screen = 1;
- return xilinxfb_assign(&op->dev, res.start, &pdata);
+ /* Allocate the driver data region */
+ drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata) {
+ dev_err(&op->dev, "Couldn't allocate device private record\n");
+ return -ENOMEM;
+ }
+ dev_set_drvdata(&op->dev, drvdata);
+
+ if (tft_access)
+ drvdata->flags |= PLB_ACCESS_FLAG;
+
+ /* Arguments are passed based on the interface */
+ if (drvdata->flags & PLB_ACCESS_FLAG) {
+ return xilinxfb_assign(&op->dev, drvdata, res.start, &pdata);
+ } else {
+ drvdata->dcr_start = start;
+ drvdata->dcr_len = len;
+ drvdata->dcr_host = dcr_host;
+ return xilinxfb_assign(&op->dev, drvdata, 0, &pdata);
+ }
}
static int __devexit xilinxfb_of_remove(struct of_device *op)
@@ -460,7 +496,9 @@ static int __devexit xilinxfb_of_remove(struct of_device *op)
/* Match table for of_platform binding */
static struct of_device_id xilinxfb_of_match[] __devinitdata = {
+ { .compatible = "xlnx,xps-tft-1.00.a", },
{ .compatible = "xlnx,plb-tft-cntlr-ref-1.00.a", },
+ { .compatible = "xlnx,plb-dvi-cntlr-ref-1.00.c", },
{},
};
MODULE_DEVICE_TABLE(of, xilinxfb_of_match);
@@ -476,22 +514,6 @@ static struct of_platform_driver xilinxfb_of_driver = {
},
};
-/* Registration helpers to keep the number of #ifdefs to a minimum */
-static inline int __init xilinxfb_of_register(void)
-{
- pr_debug("xilinxfb: calling of_register_platform_driver()\n");
- return of_register_platform_driver(&xilinxfb_of_driver);
-}
-
-static inline void __exit xilinxfb_of_unregister(void)
-{
- of_unregister_platform_driver(&xilinxfb_of_driver);
-}
-#else /* CONFIG_OF */
-/* CONFIG_OF not enabled; do nothing helpers */
-static inline int __init xilinxfb_of_register(void) { return 0; }
-static inline void __exit xilinxfb_of_unregister(void) { }
-#endif /* CONFIG_OF */
/* ---------------------------------------------------------------------
* Module setup and teardown
@@ -500,28 +522,18 @@ static inline void __exit xilinxfb_of_unregister(void) { }
static int __init
xilinxfb_init(void)
{
- int rc;
- rc = xilinxfb_of_register();
- if (rc)
- return rc;
-
- rc = platform_driver_register(&xilinxfb_platform_driver);
- if (rc)
- xilinxfb_of_unregister();
-
- return rc;
+ return of_register_platform_driver(&xilinxfb_of_driver);
}
static void __exit
xilinxfb_cleanup(void)
{
- platform_driver_unregister(&xilinxfb_platform_driver);
- xilinxfb_of_unregister();
+ of_unregister_platform_driver(&xilinxfb_of_driver);
}
module_init(xilinxfb_init);
module_exit(xilinxfb_cleanup);
MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
-MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
+MODULE_DESCRIPTION("Xilinx TFT frame buffer driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 018c070a357..3a43ebf83a4 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -31,21 +31,37 @@ static ssize_t modalias_show(struct device *_d,
return sprintf(buf, "virtio:d%08Xv%08X\n",
dev->id.device, dev->id.vendor);
}
+static ssize_t features_show(struct device *_d,
+ struct device_attribute *attr, char *buf)
+{
+ struct virtio_device *dev = container_of(_d, struct virtio_device, dev);
+ unsigned int i;
+ ssize_t len = 0;
+
+ /* We actually represent this as a bitstring, as it could be
+ * arbitrary length in future. */
+ for (i = 0; i < ARRAY_SIZE(dev->features)*BITS_PER_LONG; i++)
+ len += sprintf(buf+len, "%c",
+ test_bit(i, dev->features) ? '1' : '0');
+ len += sprintf(buf+len, "\n");
+ return len;
+}
static struct device_attribute virtio_dev_attrs[] = {
__ATTR_RO(device),
__ATTR_RO(vendor),
__ATTR_RO(status),
__ATTR_RO(modalias),
+ __ATTR_RO(features),
__ATTR_NULL
};
static inline int virtio_id_match(const struct virtio_device *dev,
const struct virtio_device_id *id)
{
- if (id->device != dev->id.device)
+ if (id->device != dev->id.device && id->device != VIRTIO_DEV_ANY_ID)
return 0;
- return id->vendor == VIRTIO_DEV_ANY_ID || id->vendor != dev->id.vendor;
+ return id->vendor == VIRTIO_DEV_ANY_ID || id->vendor == dev->id.vendor;
}
/* This looks through all the IDs a driver claims to support. If any of them
@@ -118,13 +134,14 @@ static int virtio_dev_probe(struct device *_d)
if (device_features & (1 << i))
set_bit(i, dev->features);
+ dev->config->finalize_features(dev);
+
err = drv->probe(dev);
if (err)
add_status(dev, VIRTIO_CONFIG_S_FAILED);
- else {
- dev->config->finalize_features(dev);
+ else
add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
- }
+
return err;
}
@@ -185,6 +202,8 @@ int register_virtio_device(struct virtio_device *dev)
/* Acknowledge that we've seen the device. */
add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
+ INIT_LIST_HEAD(&dev->vqs);
+
/* device_register() causes the bus infrastructure to look for a
* matching driver. */
err = device_register(&dev->dev);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 9c76a061a04..26b27826479 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -204,6 +204,9 @@ static int balloon(void *_vballoon)
static int virtballoon_probe(struct virtio_device *vdev)
{
struct virtio_balloon *vb;
+ struct virtqueue *vqs[2];
+ vq_callback_t *callbacks[] = { balloon_ack, balloon_ack };
+ const char *names[] = { "inflate", "deflate" };
int err;
vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL);
@@ -218,22 +221,17 @@ static int virtballoon_probe(struct virtio_device *vdev)
vb->vdev = vdev;
/* We expect two virtqueues. */
- vb->inflate_vq = vdev->config->find_vq(vdev, 0, balloon_ack);
- if (IS_ERR(vb->inflate_vq)) {
- err = PTR_ERR(vb->inflate_vq);
+ err = vdev->config->find_vqs(vdev, 2, vqs, callbacks, names);
+ if (err)
goto out_free_vb;
- }
- vb->deflate_vq = vdev->config->find_vq(vdev, 1, balloon_ack);
- if (IS_ERR(vb->deflate_vq)) {
- err = PTR_ERR(vb->deflate_vq);
- goto out_del_inflate_vq;
- }
+ vb->inflate_vq = vqs[0];
+ vb->deflate_vq = vqs[1];
vb->thread = kthread_run(balloon, vb, "vballoon");
if (IS_ERR(vb->thread)) {
err = PTR_ERR(vb->thread);
- goto out_del_deflate_vq;
+ goto out_del_vqs;
}
vb->tell_host_first
@@ -241,10 +239,8 @@ static int virtballoon_probe(struct virtio_device *vdev)
return 0;
-out_del_deflate_vq:
- vdev->config->del_vq(vb->deflate_vq);
-out_del_inflate_vq:
- vdev->config->del_vq(vb->inflate_vq);
+out_del_vqs:
+ vdev->config->del_vqs(vdev);
out_free_vb:
kfree(vb);
out:
@@ -264,8 +260,7 @@ static void virtballoon_remove(struct virtio_device *vdev)
/* Now we reset the device so we can clean up the queues. */
vdev->config->reset(vdev);
- vdev->config->del_vq(vb->deflate_vq);
- vdev->config->del_vq(vb->inflate_vq);
+ vdev->config->del_vqs(vdev);
kfree(vb);
}
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 330aacbdec1..193c8f0e5cc 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -42,6 +42,26 @@ struct virtio_pci_device
/* a list of queues so we can dispatch IRQs */
spinlock_t lock;
struct list_head virtqueues;
+
+ /* MSI-X support */
+ int msix_enabled;
+ int intx_enabled;
+ struct msix_entry *msix_entries;
+ /* Name strings for interrupts. This size should be enough,
+ * and I'm too lazy to allocate each name separately. */
+ char (*msix_names)[256];
+ /* Number of available vectors */
+ unsigned msix_vectors;
+ /* Vectors allocated */
+ unsigned msix_used_vectors;
+};
+
+/* Constants for MSI-X */
+/* Use first vector for configuration changes, second and the rest for
+ * virtqueues Thus, we need at least 2 vectors for MSI. */
+enum {
+ VP_MSIX_CONFIG_VECTOR = 0,
+ VP_MSIX_VQ_VECTOR = 1,
};
struct virtio_pci_vq_info
@@ -60,6 +80,9 @@ struct virtio_pci_vq_info
/* the list node for the virtqueues list */
struct list_head node;
+
+ /* MSI-X vector (or none) */
+ unsigned vector;
};
/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
@@ -109,7 +132,8 @@ static void vp_get(struct virtio_device *vdev, unsigned offset,
void *buf, unsigned len)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- void __iomem *ioaddr = vp_dev->ioaddr + VIRTIO_PCI_CONFIG + offset;
+ void __iomem *ioaddr = vp_dev->ioaddr +
+ VIRTIO_PCI_CONFIG(vp_dev) + offset;
u8 *ptr = buf;
int i;
@@ -123,7 +147,8 @@ static void vp_set(struct virtio_device *vdev, unsigned offset,
const void *buf, unsigned len)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- void __iomem *ioaddr = vp_dev->ioaddr + VIRTIO_PCI_CONFIG + offset;
+ void __iomem *ioaddr = vp_dev->ioaddr +
+ VIRTIO_PCI_CONFIG(vp_dev) + offset;
const u8 *ptr = buf;
int i;
@@ -164,6 +189,37 @@ static void vp_notify(struct virtqueue *vq)
iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY);
}
+/* Handle a configuration change: Tell driver if it wants to know. */
+static irqreturn_t vp_config_changed(int irq, void *opaque)
+{
+ struct virtio_pci_device *vp_dev = opaque;
+ struct virtio_driver *drv;
+ drv = container_of(vp_dev->vdev.dev.driver,
+ struct virtio_driver, driver);
+
+ if (drv && drv->config_changed)
+ drv->config_changed(&vp_dev->vdev);
+ return IRQ_HANDLED;
+}
+
+/* Notify all virtqueues on an interrupt. */
+static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
+{
+ struct virtio_pci_device *vp_dev = opaque;
+ struct virtio_pci_vq_info *info;
+ irqreturn_t ret = IRQ_NONE;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vp_dev->lock, flags);
+ list_for_each_entry(info, &vp_dev->virtqueues, node) {
+ if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ spin_unlock_irqrestore(&vp_dev->lock, flags);
+
+ return ret;
+}
+
/* A small wrapper to also acknowledge the interrupt when it's handled.
* I really need an EIO hook for the vring so I can ack the interrupt once we
* know that we'll be handling the IRQ but before we invoke the callback since
@@ -173,9 +229,6 @@ static void vp_notify(struct virtqueue *vq)
static irqreturn_t vp_interrupt(int irq, void *opaque)
{
struct virtio_pci_device *vp_dev = opaque;
- struct virtio_pci_vq_info *info;
- irqreturn_t ret = IRQ_NONE;
- unsigned long flags;
u8 isr;
/* reading the ISR has the effect of also clearing it so it's very
@@ -187,34 +240,137 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
return IRQ_NONE;
/* Configuration change? Tell driver if it wants to know. */
- if (isr & VIRTIO_PCI_ISR_CONFIG) {
- struct virtio_driver *drv;
- drv = container_of(vp_dev->vdev.dev.driver,
- struct virtio_driver, driver);
+ if (isr & VIRTIO_PCI_ISR_CONFIG)
+ vp_config_changed(irq, opaque);
- if (drv && drv->config_changed)
- drv->config_changed(&vp_dev->vdev);
+ return vp_vring_interrupt(irq, opaque);
+}
+
+static void vp_free_vectors(struct virtio_device *vdev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ int i;
+
+ if (vp_dev->intx_enabled) {
+ free_irq(vp_dev->pci_dev->irq, vp_dev);
+ vp_dev->intx_enabled = 0;
}
- spin_lock_irqsave(&vp_dev->lock, flags);
- list_for_each_entry(info, &vp_dev->virtqueues, node) {
- if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
- ret = IRQ_HANDLED;
+ for (i = 0; i < vp_dev->msix_used_vectors; ++i)
+ free_irq(vp_dev->msix_entries[i].vector, vp_dev);
+ vp_dev->msix_used_vectors = 0;
+
+ if (vp_dev->msix_enabled) {
+ /* Disable the vector used for configuration */
+ iowrite16(VIRTIO_MSI_NO_VECTOR,
+ vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
+ /* Flush the write out to device */
+ ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
+
+ vp_dev->msix_enabled = 0;
+ pci_disable_msix(vp_dev->pci_dev);
}
- spin_unlock_irqrestore(&vp_dev->lock, flags);
+}
- return ret;
+static int vp_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
+ int *options, int noptions)
+{
+ int i;
+ for (i = 0; i < noptions; ++i)
+ if (!pci_enable_msix(dev, entries, options[i]))
+ return options[i];
+ return -EBUSY;
+}
+
+static int vp_request_vectors(struct virtio_device *vdev, unsigned max_vqs)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ const char *name = dev_name(&vp_dev->vdev.dev);
+ unsigned i, v;
+ int err = -ENOMEM;
+ /* We want at most one vector per queue and one for config changes.
+ * Fallback to separate vectors for config and a shared for queues.
+ * Finally fall back to regular interrupts. */
+ int options[] = { max_vqs + 1, 2 };
+ int nvectors = max(options[0], options[1]);
+
+ vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries,
+ GFP_KERNEL);
+ if (!vp_dev->msix_entries)
+ goto error_entries;
+ vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
+ GFP_KERNEL);
+ if (!vp_dev->msix_names)
+ goto error_names;
+
+ for (i = 0; i < nvectors; ++i)
+ vp_dev->msix_entries[i].entry = i;
+
+ err = vp_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries,
+ options, ARRAY_SIZE(options));
+ if (err < 0) {
+ /* Can't allocate enough MSI-X vectors, use regular interrupt */
+ vp_dev->msix_vectors = 0;
+ err = request_irq(vp_dev->pci_dev->irq, vp_interrupt,
+ IRQF_SHARED, name, vp_dev);
+ if (err)
+ goto error_irq;
+ vp_dev->intx_enabled = 1;
+ } else {
+ vp_dev->msix_vectors = err;
+ vp_dev->msix_enabled = 1;
+
+ /* Set the vector used for configuration */
+ v = vp_dev->msix_used_vectors;
+ snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
+ "%s-config", name);
+ err = request_irq(vp_dev->msix_entries[v].vector,
+ vp_config_changed, 0, vp_dev->msix_names[v],
+ vp_dev);
+ if (err)
+ goto error_irq;
+ ++vp_dev->msix_used_vectors;
+
+ iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
+ /* Verify we had enough resources to assign the vector */
+ v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
+ if (v == VIRTIO_MSI_NO_VECTOR) {
+ err = -EBUSY;
+ goto error_irq;
+ }
+ }
+
+ if (vp_dev->msix_vectors && vp_dev->msix_vectors != max_vqs + 1) {
+ /* Shared vector for all VQs */
+ v = vp_dev->msix_used_vectors;
+ snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
+ "%s-virtqueues", name);
+ err = request_irq(vp_dev->msix_entries[v].vector,
+ vp_vring_interrupt, 0, vp_dev->msix_names[v],
+ vp_dev);
+ if (err)
+ goto error_irq;
+ ++vp_dev->msix_used_vectors;
+ }
+ return 0;
+error_irq:
+ vp_free_vectors(vdev);
+ kfree(vp_dev->msix_names);
+error_names:
+ kfree(vp_dev->msix_entries);
+error_entries:
+ return err;
}
-/* the config->find_vq() implementation */
static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
- void (*callback)(struct virtqueue *vq))
+ void (*callback)(struct virtqueue *vq),
+ const char *name)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtio_pci_vq_info *info;
struct virtqueue *vq;
unsigned long flags, size;
- u16 num;
+ u16 num, vector;
int err;
/* Select the queue we're interested in */
@@ -233,6 +389,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
info->queue_index = index;
info->num = num;
+ info->vector = VIRTIO_MSI_NO_VECTOR;
size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN));
info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO);
@@ -247,7 +404,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
/* create the vring */
vq = vring_new_virtqueue(info->num, VIRTIO_PCI_VRING_ALIGN,
- vdev, info->queue, vp_notify, callback);
+ vdev, info->queue, vp_notify, callback, name);
if (!vq) {
err = -ENOMEM;
goto out_activate_queue;
@@ -256,12 +413,43 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
vq->priv = info;
info->vq = vq;
+ /* allocate per-vq vector if available and necessary */
+ if (callback && vp_dev->msix_used_vectors < vp_dev->msix_vectors) {
+ vector = vp_dev->msix_used_vectors;
+ snprintf(vp_dev->msix_names[vector], sizeof *vp_dev->msix_names,
+ "%s-%s", dev_name(&vp_dev->vdev.dev), name);
+ err = request_irq(vp_dev->msix_entries[vector].vector,
+ vring_interrupt, 0,
+ vp_dev->msix_names[vector], vq);
+ if (err)
+ goto out_request_irq;
+ info->vector = vector;
+ ++vp_dev->msix_used_vectors;
+ } else
+ vector = VP_MSIX_VQ_VECTOR;
+
+ if (callback && vp_dev->msix_enabled) {
+ iowrite16(vector, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
+ vector = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
+ if (vector == VIRTIO_MSI_NO_VECTOR) {
+ err = -EBUSY;
+ goto out_assign;
+ }
+ }
+
spin_lock_irqsave(&vp_dev->lock, flags);
list_add(&info->node, &vp_dev->virtqueues);
spin_unlock_irqrestore(&vp_dev->lock, flags);
return vq;
+out_assign:
+ if (info->vector != VIRTIO_MSI_NO_VECTOR) {
+ free_irq(vp_dev->msix_entries[info->vector].vector, vq);
+ --vp_dev->msix_used_vectors;
+ }
+out_request_irq:
+ vring_del_virtqueue(vq);
out_activate_queue:
iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
free_pages_exact(info->queue, size);
@@ -270,21 +458,27 @@ out_info:
return ERR_PTR(err);
}
-/* the config->del_vq() implementation */
static void vp_del_vq(struct virtqueue *vq)
{
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
struct virtio_pci_vq_info *info = vq->priv;
- unsigned long flags, size;
+ unsigned long size;
- spin_lock_irqsave(&vp_dev->lock, flags);
- list_del(&info->node);
- spin_unlock_irqrestore(&vp_dev->lock, flags);
+ iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
+
+ if (info->vector != VIRTIO_MSI_NO_VECTOR)
+ free_irq(vp_dev->msix_entries[info->vector].vector, vq);
+
+ if (vp_dev->msix_enabled) {
+ iowrite16(VIRTIO_MSI_NO_VECTOR,
+ vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
+ /* Flush the write out to device */
+ ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR);
+ }
vring_del_virtqueue(vq);
/* Select and deactivate the queue */
- iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN));
@@ -292,14 +486,57 @@ static void vp_del_vq(struct virtqueue *vq)
kfree(info);
}
+/* the config->del_vqs() implementation */
+static void vp_del_vqs(struct virtio_device *vdev)
+{
+ struct virtqueue *vq, *n;
+
+ list_for_each_entry_safe(vq, n, &vdev->vqs, list)
+ vp_del_vq(vq);
+
+ vp_free_vectors(vdev);
+}
+
+/* the config->find_vqs() implementation */
+static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+ struct virtqueue *vqs[],
+ vq_callback_t *callbacks[],
+ const char *names[])
+{
+ int vectors = 0;
+ int i, err;
+
+ /* How many vectors would we like? */
+ for (i = 0; i < nvqs; ++i)
+ if (callbacks[i])
+ ++vectors;
+
+ err = vp_request_vectors(vdev, vectors);
+ if (err)
+ goto error_request;
+
+ for (i = 0; i < nvqs; ++i) {
+ vqs[i] = vp_find_vq(vdev, i, callbacks[i], names[i]);
+ if (IS_ERR(vqs[i]))
+ goto error_find;
+ }
+ return 0;
+
+error_find:
+ vp_del_vqs(vdev);
+
+error_request:
+ return PTR_ERR(vqs[i]);
+}
+
static struct virtio_config_ops virtio_pci_config_ops = {
.get = vp_get,
.set = vp_set,
.get_status = vp_get_status,
.set_status = vp_set_status,
.reset = vp_reset,
- .find_vq = vp_find_vq,
- .del_vq = vp_del_vq,
+ .find_vqs = vp_find_vqs,
+ .del_vqs = vp_del_vqs,
.get_features = vp_get_features,
.finalize_features = vp_finalize_features,
};
@@ -310,7 +547,7 @@ static void virtio_pci_release_dev(struct device *_d)
struct virtio_pci_device *vp_dev = to_vp_device(dev);
struct pci_dev *pci_dev = vp_dev->pci_dev;
- free_irq(pci_dev->irq, vp_dev);
+ vp_del_vqs(dev);
pci_set_drvdata(pci_dev, NULL);
pci_iounmap(pci_dev, vp_dev->ioaddr);
pci_release_regions(pci_dev);
@@ -369,21 +606,13 @@ static int __devinit virtio_pci_probe(struct pci_dev *pci_dev,
vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
vp_dev->vdev.id.device = pci_dev->subsystem_device;
- /* register a handler for the queue with the PCI device's interrupt */
- err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
- dev_name(&vp_dev->vdev.dev), vp_dev);
- if (err)
- goto out_set_drvdata;
-
/* finally register the virtio device */
err = register_virtio_device(&vp_dev->vdev);
if (err)
- goto out_req_irq;
+ goto out_set_drvdata;
return 0;
-out_req_irq:
- free_irq(pci_dev->irq, vp_dev);
out_set_drvdata:
pci_set_drvdata(pci_dev, NULL);
pci_iounmap(pci_dev, vp_dev->ioaddr);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 5c52369ab9b..a882f260651 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -23,21 +23,30 @@
#ifdef DEBUG
/* For development, we want to crash whenever the ring is screwed. */
-#define BAD_RING(_vq, fmt...) \
- do { dev_err(&(_vq)->vq.vdev->dev, fmt); BUG(); } while(0)
+#define BAD_RING(_vq, fmt, args...) \
+ do { \
+ dev_err(&(_vq)->vq.vdev->dev, \
+ "%s:"fmt, (_vq)->vq.name, ##args); \
+ BUG(); \
+ } while (0)
/* Caller is supposed to guarantee no reentry. */
#define START_USE(_vq) \
do { \
if ((_vq)->in_use) \
- panic("in_use = %i\n", (_vq)->in_use); \
+ panic("%s:in_use = %i\n", \
+ (_vq)->vq.name, (_vq)->in_use); \
(_vq)->in_use = __LINE__; \
mb(); \
- } while(0)
+ } while (0)
#define END_USE(_vq) \
do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; mb(); } while(0)
#else
-#define BAD_RING(_vq, fmt...) \
- do { dev_err(&_vq->vq.vdev->dev, fmt); (_vq)->broken = true; } while(0)
+#define BAD_RING(_vq, fmt, args...) \
+ do { \
+ dev_err(&_vq->vq.vdev->dev, \
+ "%s:"fmt, (_vq)->vq.name, ##args); \
+ (_vq)->broken = true; \
+ } while (0)
#define START_USE(vq)
#define END_USE(vq)
#endif
@@ -52,6 +61,9 @@ struct vring_virtqueue
/* Other side has made a mess, don't try any more. */
bool broken;
+ /* Host supports indirect buffers */
+ bool indirect;
+
/* Number of free buffers */
unsigned int num_free;
/* Head of free buffer list. */
@@ -76,6 +88,55 @@ struct vring_virtqueue
#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
+/* Set up an indirect table of descriptors and add it to the queue. */
+static int vring_add_indirect(struct vring_virtqueue *vq,
+ struct scatterlist sg[],
+ unsigned int out,
+ unsigned int in)
+{
+ struct vring_desc *desc;
+ unsigned head;
+ int i;
+
+ desc = kmalloc((out + in) * sizeof(struct vring_desc), GFP_ATOMIC);
+ if (!desc)
+ return vq->vring.num;
+
+ /* Transfer entries from the sg list into the indirect page */
+ for (i = 0; i < out; i++) {
+ desc[i].flags = VRING_DESC_F_NEXT;
+ desc[i].addr = sg_phys(sg);
+ desc[i].len = sg->length;
+ desc[i].next = i+1;
+ sg++;
+ }
+ for (; i < (out + in); i++) {
+ desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
+ desc[i].addr = sg_phys(sg);
+ desc[i].len = sg->length;
+ desc[i].next = i+1;
+ sg++;
+ }
+
+ /* Last one doesn't continue. */
+ desc[i-1].flags &= ~VRING_DESC_F_NEXT;
+ desc[i-1].next = 0;
+
+ /* We're about to use a buffer */
+ vq->num_free--;
+
+ /* Use a single buffer which doesn't continue */
+ head = vq->free_head;
+ vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
+ vq->vring.desc[head].addr = virt_to_phys(desc);
+ vq->vring.desc[head].len = i * sizeof(struct vring_desc);
+
+ /* Update free pointer */
+ vq->free_head = vq->vring.desc[head].next;
+
+ return head;
+}
+
static int vring_add_buf(struct virtqueue *_vq,
struct scatterlist sg[],
unsigned int out,
@@ -85,12 +146,21 @@ static int vring_add_buf(struct virtqueue *_vq,
struct vring_virtqueue *vq = to_vvq(_vq);
unsigned int i, avail, head, uninitialized_var(prev);
+ START_USE(vq);
+
BUG_ON(data == NULL);
+
+ /* If the host supports indirect descriptor tables, and we have multiple
+ * buffers, then go indirect. FIXME: tune this threshold */
+ if (vq->indirect && (out + in) > 1 && vq->num_free) {
+ head = vring_add_indirect(vq, sg, out, in);
+ if (head != vq->vring.num)
+ goto add_head;
+ }
+
BUG_ON(out + in > vq->vring.num);
BUG_ON(out + in == 0);
- START_USE(vq);
-
if (vq->num_free < out + in) {
pr_debug("Can't add buf len %i - avail = %i\n",
out + in, vq->num_free);
@@ -127,6 +197,7 @@ static int vring_add_buf(struct virtqueue *_vq,
/* Update free pointer */
vq->free_head = i;
+add_head:
/* Set token. */
vq->data[head] = data;
@@ -170,6 +241,11 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
/* Put back on free list: find end */
i = head;
+
+ /* Free the indirect table */
+ if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT)
+ kfree(phys_to_virt(vq->vring.desc[i].addr));
+
while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
i = vq->vring.desc[i].next;
vq->num_free++;
@@ -284,7 +360,8 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
struct virtio_device *vdev,
void *pages,
void (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *))
+ void (*callback)(struct virtqueue *),
+ const char *name)
{
struct vring_virtqueue *vq;
unsigned int i;
@@ -303,14 +380,18 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
vq->vq.callback = callback;
vq->vq.vdev = vdev;
vq->vq.vq_ops = &vring_vq_ops;
+ vq->vq.name = name;
vq->notify = notify;
vq->broken = false;
vq->last_used_idx = 0;
vq->num_added = 0;
+ list_add_tail(&vq->vq.list, &vdev->vqs);
#ifdef DEBUG
vq->in_use = false;
#endif
+ vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
+
/* No callback? Tell other side not to bother us. */
if (!callback)
vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
@@ -327,6 +408,7 @@ EXPORT_SYMBOL_GPL(vring_new_virtqueue);
void vring_del_virtqueue(struct virtqueue *vq)
{
+ list_del(&vq->list);
kfree(to_vvq(vq));
}
EXPORT_SYMBOL_GPL(vring_del_virtqueue);
@@ -338,6 +420,8 @@ void vring_transport_features(struct virtio_device *vdev)
for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
switch (i) {
+ case VIRTIO_RING_F_INDIRECT_DESC:
+ break;
default:
/* We don't understand this bit. */
clear_bit(i, vdev->features);
diff --git a/drivers/vlynq/Kconfig b/drivers/vlynq/Kconfig
new file mode 100644
index 00000000000..f6542211db4
--- /dev/null
+++ b/drivers/vlynq/Kconfig
@@ -0,0 +1,20 @@
+menu "TI VLYNQ"
+
+config VLYNQ
+ bool "TI VLYNQ bus support"
+ depends on AR7 && EXPERIMENTAL
+ help
+ Support for Texas Instruments(R) VLYNQ bus.
+ The VLYNQ bus is a high-speed, serial and packetized
+ data bus which allows external peripherals of a SoC
+ to appear into the system's main memory.
+
+ If unsure, say N
+
+config VLYNQ_DEBUG
+ bool "VLYNQ bus debug"
+ depends on VLYNQ && KERNEL_DEBUG
+ help
+ Turn on VLYNQ bus debugging.
+
+endmenu
diff --git a/drivers/vlynq/Makefile b/drivers/vlynq/Makefile
new file mode 100644
index 00000000000..b3f61149b59
--- /dev/null
+++ b/drivers/vlynq/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for kernel vlynq drivers
+#
+
+obj-$(CONFIG_VLYNQ) += vlynq.o
diff --git a/drivers/vlynq/vlynq.c b/drivers/vlynq/vlynq.c
new file mode 100644
index 00000000000..7335433b067
--- /dev/null
+++ b/drivers/vlynq/vlynq.c
@@ -0,0 +1,814 @@
+/*
+ * Copyright (C) 2006, 2007 Eugene Konev <ejka@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Parts of the VLYNQ specification can be found here:
+ * http://www.ti.com/litv/pdf/sprue36a
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <linux/vlynq.h>
+
+#define VLYNQ_CTRL_PM_ENABLE 0x80000000
+#define VLYNQ_CTRL_CLOCK_INT 0x00008000
+#define VLYNQ_CTRL_CLOCK_DIV(x) (((x) & 7) << 16)
+#define VLYNQ_CTRL_INT_LOCAL 0x00004000
+#define VLYNQ_CTRL_INT_ENABLE 0x00002000
+#define VLYNQ_CTRL_INT_VECTOR(x) (((x) & 0x1f) << 8)
+#define VLYNQ_CTRL_INT2CFG 0x00000080
+#define VLYNQ_CTRL_RESET 0x00000001
+
+#define VLYNQ_CTRL_CLOCK_MASK (0x7 << 16)
+
+#define VLYNQ_INT_OFFSET 0x00000014
+#define VLYNQ_REMOTE_OFFSET 0x00000080
+
+#define VLYNQ_STATUS_LINK 0x00000001
+#define VLYNQ_STATUS_LERROR 0x00000080
+#define VLYNQ_STATUS_RERROR 0x00000100
+
+#define VINT_ENABLE 0x00000100
+#define VINT_TYPE_EDGE 0x00000080
+#define VINT_LEVEL_LOW 0x00000040
+#define VINT_VECTOR(x) ((x) & 0x1f)
+#define VINT_OFFSET(irq) (8 * ((irq) % 4))
+
+#define VLYNQ_AUTONEGO_V2 0x00010000
+
+struct vlynq_regs {
+ u32 revision;
+ u32 control;
+ u32 status;
+ u32 int_prio;
+ u32 int_status;
+ u32 int_pending;
+ u32 int_ptr;
+ u32 tx_offset;
+ struct vlynq_mapping rx_mapping[4];
+ u32 chip;
+ u32 autonego;
+ u32 unused[6];
+ u32 int_device[8];
+};
+
+#ifdef VLYNQ_DEBUG
+static void vlynq_dump_regs(struct vlynq_device *dev)
+{
+ int i;
+
+ printk(KERN_DEBUG "VLYNQ local=%p remote=%p\n",
+ dev->local, dev->remote);
+ for (i = 0; i < 32; i++) {
+ printk(KERN_DEBUG "VLYNQ: local %d: %08x\n",
+ i + 1, ((u32 *)dev->local)[i]);
+ printk(KERN_DEBUG "VLYNQ: remote %d: %08x\n",
+ i + 1, ((u32 *)dev->remote)[i]);
+ }
+}
+
+static void vlynq_dump_mem(u32 *base, int count)
+{
+ int i;
+
+ for (i = 0; i < (count + 3) / 4; i++) {
+ if (i % 4 == 0)
+ printk(KERN_DEBUG "\nMEM[0x%04x]:", i * 4);
+ printk(KERN_DEBUG " 0x%08x", *(base + i));
+ }
+ printk(KERN_DEBUG "\n");
+}
+#endif
+
+/* Check the VLYNQ link status with a given device */
+static int vlynq_linked(struct vlynq_device *dev)
+{
+ int i;
+
+ for (i = 0; i < 100; i++)
+ if (readl(&dev->local->status) & VLYNQ_STATUS_LINK)
+ return 1;
+ else
+ cpu_relax();
+
+ return 0;
+}
+
+static void vlynq_reset(struct vlynq_device *dev)
+{
+ writel(readl(&dev->local->control) | VLYNQ_CTRL_RESET,
+ &dev->local->control);
+
+ /* Wait for the devices to finish resetting */
+ msleep(5);
+
+ /* Remove reset bit */
+ writel(readl(&dev->local->control) & ~VLYNQ_CTRL_RESET,
+ &dev->local->control);
+
+ /* Give some time for the devices to settle */
+ msleep(5);
+}
+
+static void vlynq_irq_unmask(unsigned int irq)
+{
+ u32 val;
+ struct vlynq_device *dev = get_irq_chip_data(irq);
+ int virq;
+
+ BUG_ON(!dev);
+ virq = irq - dev->irq_start;
+ val = readl(&dev->remote->int_device[virq >> 2]);
+ val |= (VINT_ENABLE | virq) << VINT_OFFSET(virq);
+ writel(val, &dev->remote->int_device[virq >> 2]);
+}
+
+static void vlynq_irq_mask(unsigned int irq)
+{
+ u32 val;
+ struct vlynq_device *dev = get_irq_chip_data(irq);
+ int virq;
+
+ BUG_ON(!dev);
+ virq = irq - dev->irq_start;
+ val = readl(&dev->remote->int_device[virq >> 2]);
+ val &= ~(VINT_ENABLE << VINT_OFFSET(virq));
+ writel(val, &dev->remote->int_device[virq >> 2]);
+}
+
+static int vlynq_irq_type(unsigned int irq, unsigned int flow_type)
+{
+ u32 val;
+ struct vlynq_device *dev = get_irq_chip_data(irq);
+ int virq;
+
+ BUG_ON(!dev);
+ virq = irq - dev->irq_start;
+ val = readl(&dev->remote->int_device[virq >> 2]);
+ switch (flow_type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_EDGE_BOTH:
+ val |= VINT_TYPE_EDGE << VINT_OFFSET(virq);
+ val &= ~(VINT_LEVEL_LOW << VINT_OFFSET(virq));
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ val &= ~(VINT_TYPE_EDGE << VINT_OFFSET(virq));
+ val &= ~(VINT_LEVEL_LOW << VINT_OFFSET(virq));
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ val &= ~(VINT_TYPE_EDGE << VINT_OFFSET(virq));
+ val |= VINT_LEVEL_LOW << VINT_OFFSET(virq);
+ break;
+ default:
+ return -EINVAL;
+ }
+ writel(val, &dev->remote->int_device[virq >> 2]);
+ return 0;
+}
+
+static void vlynq_local_ack(unsigned int irq)
+{
+ struct vlynq_device *dev = get_irq_chip_data(irq);
+
+ u32 status = readl(&dev->local->status);
+
+ pr_debug("%s: local status: 0x%08x\n",
+ dev_name(&dev->dev), status);
+ writel(status, &dev->local->status);
+}
+
+static void vlynq_remote_ack(unsigned int irq)
+{
+ struct vlynq_device *dev = get_irq_chip_data(irq);
+
+ u32 status = readl(&dev->remote->status);
+
+ pr_debug("%s: remote status: 0x%08x\n",
+ dev_name(&dev->dev), status);
+ writel(status, &dev->remote->status);
+}
+
+static irqreturn_t vlynq_irq(int irq, void *dev_id)
+{
+ struct vlynq_device *dev = dev_id;
+ u32 status;
+ int virq = 0;
+
+ status = readl(&dev->local->int_status);
+ writel(status, &dev->local->int_status);
+
+ if (unlikely(!status))
+ spurious_interrupt();
+
+ while (status) {
+ if (status & 1)
+ do_IRQ(dev->irq_start + virq);
+ status >>= 1;
+ virq++;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct irq_chip vlynq_irq_chip = {
+ .name = "vlynq",
+ .unmask = vlynq_irq_unmask,
+ .mask = vlynq_irq_mask,
+ .set_type = vlynq_irq_type,
+};
+
+static struct irq_chip vlynq_local_chip = {
+ .name = "vlynq local error",
+ .unmask = vlynq_irq_unmask,
+ .mask = vlynq_irq_mask,
+ .ack = vlynq_local_ack,
+};
+
+static struct irq_chip vlynq_remote_chip = {
+ .name = "vlynq local error",
+ .unmask = vlynq_irq_unmask,
+ .mask = vlynq_irq_mask,
+ .ack = vlynq_remote_ack,
+};
+
+static int vlynq_setup_irq(struct vlynq_device *dev)
+{
+ u32 val;
+ int i, virq;
+
+ if (dev->local_irq == dev->remote_irq) {
+ printk(KERN_ERR
+ "%s: local vlynq irq should be different from remote\n",
+ dev_name(&dev->dev));
+ return -EINVAL;
+ }
+
+ /* Clear local and remote error bits */
+ writel(readl(&dev->local->status), &dev->local->status);
+ writel(readl(&dev->remote->status), &dev->remote->status);
+
+ /* Now setup interrupts */
+ val = VLYNQ_CTRL_INT_VECTOR(dev->local_irq);
+ val |= VLYNQ_CTRL_INT_ENABLE | VLYNQ_CTRL_INT_LOCAL |
+ VLYNQ_CTRL_INT2CFG;
+ val |= readl(&dev->local->control);
+ writel(VLYNQ_INT_OFFSET, &dev->local->int_ptr);
+ writel(val, &dev->local->control);
+
+ val = VLYNQ_CTRL_INT_VECTOR(dev->remote_irq);
+ val |= VLYNQ_CTRL_INT_ENABLE;
+ val |= readl(&dev->remote->control);
+ writel(VLYNQ_INT_OFFSET, &dev->remote->int_ptr);
+ writel(val, &dev->remote->int_ptr);
+ writel(val, &dev->remote->control);
+
+ for (i = dev->irq_start; i <= dev->irq_end; i++) {
+ virq = i - dev->irq_start;
+ if (virq == dev->local_irq) {
+ set_irq_chip_and_handler(i, &vlynq_local_chip,
+ handle_level_irq);
+ set_irq_chip_data(i, dev);
+ } else if (virq == dev->remote_irq) {
+ set_irq_chip_and_handler(i, &vlynq_remote_chip,
+ handle_level_irq);
+ set_irq_chip_data(i, dev);
+ } else {
+ set_irq_chip_and_handler(i, &vlynq_irq_chip,
+ handle_simple_irq);
+ set_irq_chip_data(i, dev);
+ writel(0, &dev->remote->int_device[virq >> 2]);
+ }
+ }
+
+ if (request_irq(dev->irq, vlynq_irq, IRQF_SHARED, "vlynq", dev)) {
+ printk(KERN_ERR "%s: request_irq failed\n",
+ dev_name(&dev->dev));
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static void vlynq_device_release(struct device *dev)
+{
+ struct vlynq_device *vdev = to_vlynq_device(dev);
+ kfree(vdev);
+}
+
+static int vlynq_device_match(struct device *dev,
+ struct device_driver *drv)
+{
+ struct vlynq_device *vdev = to_vlynq_device(dev);
+ struct vlynq_driver *vdrv = to_vlynq_driver(drv);
+ struct vlynq_device_id *ids = vdrv->id_table;
+
+ while (ids->id) {
+ if (ids->id == vdev->dev_id) {
+ vdev->divisor = ids->divisor;
+ vlynq_set_drvdata(vdev, ids);
+ printk(KERN_INFO "Driver found for VLYNQ "
+ "device: %08x\n", vdev->dev_id);
+ return 1;
+ }
+ printk(KERN_DEBUG "Not using the %08x VLYNQ device's driver"
+ " for VLYNQ device: %08x\n", ids->id, vdev->dev_id);
+ ids++;
+ }
+ return 0;
+}
+
+static int vlynq_device_probe(struct device *dev)
+{
+ struct vlynq_device *vdev = to_vlynq_device(dev);
+ struct vlynq_driver *drv = to_vlynq_driver(dev->driver);
+ struct vlynq_device_id *id = vlynq_get_drvdata(vdev);
+ int result = -ENODEV;
+
+ if (drv->probe)
+ result = drv->probe(vdev, id);
+ if (result)
+ put_device(dev);
+ return result;
+}
+
+static int vlynq_device_remove(struct device *dev)
+{
+ struct vlynq_driver *drv = to_vlynq_driver(dev->driver);
+
+ if (drv->remove)
+ drv->remove(to_vlynq_device(dev));
+
+ return 0;
+}
+
+int __vlynq_register_driver(struct vlynq_driver *driver, struct module *owner)
+{
+ driver->driver.name = driver->name;
+ driver->driver.bus = &vlynq_bus_type;
+ return driver_register(&driver->driver);
+}
+EXPORT_SYMBOL(__vlynq_register_driver);
+
+void vlynq_unregister_driver(struct vlynq_driver *driver)
+{
+ driver_unregister(&driver->driver);
+}
+EXPORT_SYMBOL(vlynq_unregister_driver);
+
+/*
+ * A VLYNQ remote device can clock the VLYNQ bus master
+ * using a dedicated clock line. In that case, both the
+ * remove device and the bus master should have the same
+ * serial clock dividers configured. Iterate through the
+ * 8 possible dividers until we actually link with the
+ * device.
+ */
+static int __vlynq_try_remote(struct vlynq_device *dev)
+{
+ int i;
+
+ vlynq_reset(dev);
+ for (i = dev->dev_id ? vlynq_rdiv2 : vlynq_rdiv8; dev->dev_id ?
+ i <= vlynq_rdiv8 : i >= vlynq_rdiv2;
+ dev->dev_id ? i++ : i--) {
+
+ if (!vlynq_linked(dev))
+ break;
+
+ writel((readl(&dev->remote->control) &
+ ~VLYNQ_CTRL_CLOCK_MASK) |
+ VLYNQ_CTRL_CLOCK_INT |
+ VLYNQ_CTRL_CLOCK_DIV(i - vlynq_rdiv1),
+ &dev->remote->control);
+ writel((readl(&dev->local->control)
+ & ~(VLYNQ_CTRL_CLOCK_INT |
+ VLYNQ_CTRL_CLOCK_MASK)) |
+ VLYNQ_CTRL_CLOCK_DIV(i - vlynq_rdiv1),
+ &dev->local->control);
+
+ if (vlynq_linked(dev)) {
+ printk(KERN_DEBUG
+ "%s: using remote clock divisor %d\n",
+ dev_name(&dev->dev), i - vlynq_rdiv1 + 1);
+ dev->divisor = i;
+ return 0;
+ } else {
+ vlynq_reset(dev);
+ }
+ }
+
+ return -ENODEV;
+}
+
+/*
+ * A VLYNQ remote device can be clocked by the VLYNQ bus
+ * master using a dedicated clock line. In that case, only
+ * the bus master configures the serial clock divider.
+ * Iterate through the 8 possible dividers until we
+ * actually get a link with the device.
+ */
+static int __vlynq_try_local(struct vlynq_device *dev)
+{
+ int i;
+
+ vlynq_reset(dev);
+
+ for (i = dev->dev_id ? vlynq_ldiv2 : vlynq_ldiv8; dev->dev_id ?
+ i <= vlynq_ldiv8 : i >= vlynq_ldiv2;
+ dev->dev_id ? i++ : i--) {
+
+ writel((readl(&dev->local->control) &
+ ~VLYNQ_CTRL_CLOCK_MASK) |
+ VLYNQ_CTRL_CLOCK_INT |
+ VLYNQ_CTRL_CLOCK_DIV(i - vlynq_ldiv1),
+ &dev->local->control);
+
+ if (vlynq_linked(dev)) {
+ printk(KERN_DEBUG
+ "%s: using local clock divisor %d\n",
+ dev_name(&dev->dev), i - vlynq_ldiv1 + 1);
+ dev->divisor = i;
+ return 0;
+ } else {
+ vlynq_reset(dev);
+ }
+ }
+
+ return -ENODEV;
+}
+
+/*
+ * When using external clocking method, serial clock
+ * is supplied by an external oscillator, therefore we
+ * should mask the local clock bit in the clock control
+ * register for both the bus master and the remote device.
+ */
+static int __vlynq_try_external(struct vlynq_device *dev)
+{
+ vlynq_reset(dev);
+ if (!vlynq_linked(dev))
+ return -ENODEV;
+
+ writel((readl(&dev->remote->control) &
+ ~VLYNQ_CTRL_CLOCK_INT),
+ &dev->remote->control);
+
+ writel((readl(&dev->local->control) &
+ ~VLYNQ_CTRL_CLOCK_INT),
+ &dev->local->control);
+
+ if (vlynq_linked(dev)) {
+ printk(KERN_DEBUG "%s: using external clock\n",
+ dev_name(&dev->dev));
+ dev->divisor = vlynq_div_external;
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static int __vlynq_enable_device(struct vlynq_device *dev)
+{
+ int result;
+ struct plat_vlynq_ops *ops = dev->dev.platform_data;
+
+ result = ops->on(dev);
+ if (result)
+ return result;
+
+ switch (dev->divisor) {
+ case vlynq_div_external:
+ case vlynq_div_auto:
+ /* When the device is brought from reset it should have clock
+ * generation negotiated by hardware.
+ * Check which device is generating clocks and perform setup
+ * accordingly */
+ if (vlynq_linked(dev) && readl(&dev->remote->control) &
+ VLYNQ_CTRL_CLOCK_INT) {
+ if (!__vlynq_try_remote(dev) ||
+ !__vlynq_try_local(dev) ||
+ !__vlynq_try_external(dev))
+ return 0;
+ } else {
+ if (!__vlynq_try_external(dev) ||
+ !__vlynq_try_local(dev) ||
+ !__vlynq_try_remote(dev))
+ return 0;
+ }
+ break;
+ case vlynq_ldiv1:
+ case vlynq_ldiv2:
+ case vlynq_ldiv3:
+ case vlynq_ldiv4:
+ case vlynq_ldiv5:
+ case vlynq_ldiv6:
+ case vlynq_ldiv7:
+ case vlynq_ldiv8:
+ writel(VLYNQ_CTRL_CLOCK_INT |
+ VLYNQ_CTRL_CLOCK_DIV(dev->divisor -
+ vlynq_ldiv1), &dev->local->control);
+ writel(0, &dev->remote->control);
+ if (vlynq_linked(dev)) {
+ printk(KERN_DEBUG
+ "%s: using local clock divisor %d\n",
+ dev_name(&dev->dev),
+ dev->divisor - vlynq_ldiv1 + 1);
+ return 0;
+ }
+ break;
+ case vlynq_rdiv1:
+ case vlynq_rdiv2:
+ case vlynq_rdiv3:
+ case vlynq_rdiv4:
+ case vlynq_rdiv5:
+ case vlynq_rdiv6:
+ case vlynq_rdiv7:
+ case vlynq_rdiv8:
+ writel(0, &dev->local->control);
+ writel(VLYNQ_CTRL_CLOCK_INT |
+ VLYNQ_CTRL_CLOCK_DIV(dev->divisor -
+ vlynq_rdiv1), &dev->remote->control);
+ if (vlynq_linked(dev)) {
+ printk(KERN_DEBUG
+ "%s: using remote clock divisor %d\n",
+ dev_name(&dev->dev),
+ dev->divisor - vlynq_rdiv1 + 1);
+ return 0;
+ }
+ break;
+ }
+
+ ops->off(dev);
+ return -ENODEV;
+}
+
+int vlynq_enable_device(struct vlynq_device *dev)
+{
+ struct plat_vlynq_ops *ops = dev->dev.platform_data;
+ int result = -ENODEV;
+
+ result = __vlynq_enable_device(dev);
+ if (result)
+ return result;
+
+ result = vlynq_setup_irq(dev);
+ if (result)
+ ops->off(dev);
+
+ dev->enabled = !result;
+ return result;
+}
+EXPORT_SYMBOL(vlynq_enable_device);
+
+
+void vlynq_disable_device(struct vlynq_device *dev)
+{
+ struct plat_vlynq_ops *ops = dev->dev.platform_data;
+
+ dev->enabled = 0;
+ free_irq(dev->irq, dev);
+ ops->off(dev);
+}
+EXPORT_SYMBOL(vlynq_disable_device);
+
+int vlynq_set_local_mapping(struct vlynq_device *dev, u32 tx_offset,
+ struct vlynq_mapping *mapping)
+{
+ int i;
+
+ if (!dev->enabled)
+ return -ENXIO;
+
+ writel(tx_offset, &dev->local->tx_offset);
+ for (i = 0; i < 4; i++) {
+ writel(mapping[i].offset, &dev->local->rx_mapping[i].offset);
+ writel(mapping[i].size, &dev->local->rx_mapping[i].size);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(vlynq_set_local_mapping);
+
+int vlynq_set_remote_mapping(struct vlynq_device *dev, u32 tx_offset,
+ struct vlynq_mapping *mapping)
+{
+ int i;
+
+ if (!dev->enabled)
+ return -ENXIO;
+
+ writel(tx_offset, &dev->remote->tx_offset);
+ for (i = 0; i < 4; i++) {
+ writel(mapping[i].offset, &dev->remote->rx_mapping[i].offset);
+ writel(mapping[i].size, &dev->remote->rx_mapping[i].size);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(vlynq_set_remote_mapping);
+
+int vlynq_set_local_irq(struct vlynq_device *dev, int virq)
+{
+ int irq = dev->irq_start + virq;
+ if (dev->enabled)
+ return -EBUSY;
+
+ if ((irq < dev->irq_start) || (irq > dev->irq_end))
+ return -EINVAL;
+
+ if (virq == dev->remote_irq)
+ return -EINVAL;
+
+ dev->local_irq = virq;
+
+ return 0;
+}
+EXPORT_SYMBOL(vlynq_set_local_irq);
+
+int vlynq_set_remote_irq(struct vlynq_device *dev, int virq)
+{
+ int irq = dev->irq_start + virq;
+ if (dev->enabled)
+ return -EBUSY;
+
+ if ((irq < dev->irq_start) || (irq > dev->irq_end))
+ return -EINVAL;
+
+ if (virq == dev->local_irq)
+ return -EINVAL;
+
+ dev->remote_irq = virq;
+
+ return 0;
+}
+EXPORT_SYMBOL(vlynq_set_remote_irq);
+
+static int vlynq_probe(struct platform_device *pdev)
+{
+ struct vlynq_device *dev;
+ struct resource *regs_res, *mem_res, *irq_res;
+ int len, result;
+
+ regs_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+ if (!regs_res)
+ return -ENODEV;
+
+ mem_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem");
+ if (!mem_res)
+ return -ENODEV;
+
+ irq_res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "devirq");
+ if (!irq_res)
+ return -ENODEV;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ printk(KERN_ERR
+ "vlynq: failed to allocate device structure\n");
+ return -ENOMEM;
+ }
+
+ dev->id = pdev->id;
+ dev->dev.bus = &vlynq_bus_type;
+ dev->dev.parent = &pdev->dev;
+ dev_set_name(&dev->dev, "vlynq%d", dev->id);
+ dev->dev.platform_data = pdev->dev.platform_data;
+ dev->dev.release = vlynq_device_release;
+
+ dev->regs_start = regs_res->start;
+ dev->regs_end = regs_res->end;
+ dev->mem_start = mem_res->start;
+ dev->mem_end = mem_res->end;
+
+ len = regs_res->end - regs_res->start;
+ if (!request_mem_region(regs_res->start, len, dev_name(&dev->dev))) {
+ printk(KERN_ERR "%s: Can't request vlynq registers\n",
+ dev_name(&dev->dev));
+ result = -ENXIO;
+ goto fail_request;
+ }
+
+ dev->local = ioremap(regs_res->start, len);
+ if (!dev->local) {
+ printk(KERN_ERR "%s: Can't remap vlynq registers\n",
+ dev_name(&dev->dev));
+ result = -ENXIO;
+ goto fail_remap;
+ }
+
+ dev->remote = (struct vlynq_regs *)((void *)dev->local +
+ VLYNQ_REMOTE_OFFSET);
+
+ dev->irq = platform_get_irq_byname(pdev, "irq");
+ dev->irq_start = irq_res->start;
+ dev->irq_end = irq_res->end;
+ dev->local_irq = dev->irq_end - dev->irq_start;
+ dev->remote_irq = dev->local_irq - 1;
+
+ if (device_register(&dev->dev))
+ goto fail_register;
+ platform_set_drvdata(pdev, dev);
+
+ printk(KERN_INFO "%s: regs 0x%p, irq %d, mem 0x%p\n",
+ dev_name(&dev->dev), (void *)dev->regs_start, dev->irq,
+ (void *)dev->mem_start);
+
+ dev->dev_id = 0;
+ dev->divisor = vlynq_div_auto;
+ result = __vlynq_enable_device(dev);
+ if (result == 0) {
+ dev->dev_id = readl(&dev->remote->chip);
+ ((struct plat_vlynq_ops *)(dev->dev.platform_data))->off(dev);
+ }
+ if (dev->dev_id)
+ printk(KERN_INFO "Found a VLYNQ device: %08x\n", dev->dev_id);
+
+ return 0;
+
+fail_register:
+ iounmap(dev->local);
+fail_remap:
+fail_request:
+ release_mem_region(regs_res->start, len);
+ kfree(dev);
+ return result;
+}
+
+static int vlynq_remove(struct platform_device *pdev)
+{
+ struct vlynq_device *dev = platform_get_drvdata(pdev);
+
+ device_unregister(&dev->dev);
+ iounmap(dev->local);
+ release_mem_region(dev->regs_start, dev->regs_end - dev->regs_start);
+
+ kfree(dev);
+
+ return 0;
+}
+
+static struct platform_driver vlynq_platform_driver = {
+ .driver.name = "vlynq",
+ .probe = vlynq_probe,
+ .remove = __devexit_p(vlynq_remove),
+};
+
+struct bus_type vlynq_bus_type = {
+ .name = "vlynq",
+ .match = vlynq_device_match,
+ .probe = vlynq_device_probe,
+ .remove = vlynq_device_remove,
+};
+EXPORT_SYMBOL(vlynq_bus_type);
+
+static int __devinit vlynq_init(void)
+{
+ int res = 0;
+
+ res = bus_register(&vlynq_bus_type);
+ if (res)
+ goto fail_bus;
+
+ res = platform_driver_register(&vlynq_platform_driver);
+ if (res)
+ goto fail_platform;
+
+ return 0;
+
+fail_platform:
+ bus_unregister(&vlynq_bus_type);
+fail_bus:
+ return res;
+}
+
+static void __devexit vlynq_exit(void)
+{
+ platform_driver_unregister(&vlynq_platform_driver);
+ bus_unregister(&vlynq_bus_type);
+}
+
+module_init(vlynq_init);
+module_exit(vlynq_exit);
diff --git a/drivers/w1/Kconfig b/drivers/w1/Kconfig
index 9adbb4f9047..fd2c7bd9dfb 100644
--- a/drivers/w1/Kconfig
+++ b/drivers/w1/Kconfig
@@ -8,7 +8,7 @@ menuconfig W1
If you want W1 support, you should say Y here.
This W1 support can also be built as a module. If so, the module
- will be called wire.ko.
+ will be called wire.
if W1
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index 96d2f8e4c27..3195fb8b7d9 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -12,7 +12,7 @@ config W1_MASTER_MATROX
using Matrox's G400 GPIO pins.
This support is also available as a module. If so, the module
- will be called matrox_w1.ko.
+ will be called matrox_w1.
config W1_MASTER_DS2490
tristate "DS2490 USB <-> W1 transport layer for 1-wire"
@@ -22,7 +22,7 @@ config W1_MASTER_DS2490
for example DS9490*.
This support is also available as a module. If so, the module
- will be called ds2490.ko.
+ will be called ds2490.
config W1_MASTER_DS2482
tristate "Maxim DS2482 I2C to 1-Wire bridge"
@@ -56,7 +56,7 @@ config W1_MASTER_GPIO
GPIO pins. This driver uses the GPIO API to control the wire.
This support is also available as a module. If so, the module
- will be called w1-gpio.ko.
+ will be called w1-gpio.
config HDQ_MASTER_OMAP
tristate "OMAP HDQ driver"
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 5eb8f21da82..b166f2852a6 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -231,14 +231,14 @@ config DAVINCI_WATCHDOG
NOTE: once enabled, this timer cannot be disabled.
Say N if you are unsure.
-config ORION5X_WATCHDOG
- tristate "Orion5x watchdog"
- depends on ARCH_ORION5X
+config ORION_WATCHDOG
+ tristate "Orion watchdog"
+ depends on ARCH_ORION5X || ARCH_KIRKWOOD
help
Say Y here if to include support for the watchdog timer
- in the Orion5x ARM SoCs.
+ in the Marvell Orion5x and Kirkwood ARM SoCs.
To compile this driver as a module, choose M here: the
- module will be called orion5x_wdt.
+ module will be called orion_wdt.
# AVR32 Architecture
@@ -531,7 +531,7 @@ config SBC8360_WDT
Board Computer produced by Axiomtek Co., Ltd. (www.axiomtek.com).
To compile this driver as a module, choose M here: the
- module will be called sbc8360.ko.
+ module will be called sbc8360.
Most people will say N.
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 7f8c56b14f5..c3afa14d5be 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -40,7 +40,7 @@ obj-$(CONFIG_EP93XX_WATCHDOG) += ep93xx_wdt.o
obj-$(CONFIG_PNX4008_WATCHDOG) += pnx4008_wdt.o
obj-$(CONFIG_IOP_WATCHDOG) += iop_wdt.o
obj-$(CONFIG_DAVINCI_WATCHDOG) += davinci_wdt.o
-obj-$(CONFIG_ORION5X_WATCHDOG) += orion5x_wdt.o
+obj-$(CONFIG_ORION_WATCHDOG) += orion_wdt.o
# AVR32 Architecture
obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
diff --git a/drivers/watchdog/iop_wdt.c b/drivers/watchdog/iop_wdt.c
index 96eb2cbe587..0c905967669 100644
--- a/drivers/watchdog/iop_wdt.c
+++ b/drivers/watchdog/iop_wdt.c
@@ -192,7 +192,7 @@ static int iop_wdt_release(struct inode *inode, struct file *file)
if (test_bit(WDT_ENABLED, &wdt_status))
state = wdt_disable();
- /* if the timer is not disbaled reload and notify that we are still
+ /* if the timer is not disabled reload and notify that we are still
* going down
*/
if (state != 0) {
diff --git a/drivers/watchdog/orion5x_wdt.c b/drivers/watchdog/orion_wdt.c
index 2cde568e4fb..2d9fb96a9ee 100644
--- a/drivers/watchdog/orion5x_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -1,7 +1,7 @@
/*
- * drivers/watchdog/orion5x_wdt.c
+ * drivers/watchdog/orion_wdt.c
*
- * Watchdog driver for Orion5x processors
+ * Watchdog driver for Orion/Kirkwood processors
*
* Author: Sylver Bruneau <sylver.bruneau@googlemail.com>
*
@@ -23,7 +23,7 @@
#include <linux/io.h>
#include <linux/spinlock.h>
#include <mach/bridge-regs.h>
-#include <plat/orion5x_wdt.h>
+#include <plat/orion_wdt.h>
/*
* Watchdog timer block registers.
@@ -43,7 +43,7 @@ static unsigned int wdt_tclk;
static unsigned long wdt_status;
static spinlock_t wdt_lock;
-static void orion5x_wdt_ping(void)
+static void orion_wdt_ping(void)
{
spin_lock(&wdt_lock);
@@ -53,7 +53,7 @@ static void orion5x_wdt_ping(void)
spin_unlock(&wdt_lock);
}
-static void orion5x_wdt_enable(void)
+static void orion_wdt_enable(void)
{
u32 reg;
@@ -73,23 +73,23 @@ static void orion5x_wdt_enable(void)
writel(reg, TIMER_CTRL);
/* Enable reset on watchdog */
- reg = readl(CPU_RESET_MASK);
- reg |= WDT_RESET;
- writel(reg, CPU_RESET_MASK);
+ reg = readl(RSTOUTn_MASK);
+ reg |= WDT_RESET_OUT_EN;
+ writel(reg, RSTOUTn_MASK);
spin_unlock(&wdt_lock);
}
-static void orion5x_wdt_disable(void)
+static void orion_wdt_disable(void)
{
u32 reg;
spin_lock(&wdt_lock);
/* Disable reset on watchdog */
- reg = readl(CPU_RESET_MASK);
- reg &= ~WDT_RESET;
- writel(reg, CPU_RESET_MASK);
+ reg = readl(RSTOUTn_MASK);
+ reg &= ~WDT_RESET_OUT_EN;
+ writel(reg, RSTOUTn_MASK);
/* Disable watchdog timer */
reg = readl(TIMER_CTRL);
@@ -99,7 +99,7 @@ static void orion5x_wdt_disable(void)
spin_unlock(&wdt_lock);
}
-static int orion5x_wdt_get_timeleft(int *time_left)
+static int orion_wdt_get_timeleft(int *time_left)
{
spin_lock(&wdt_lock);
*time_left = readl(WDT_VAL) / wdt_tclk;
@@ -107,16 +107,16 @@ static int orion5x_wdt_get_timeleft(int *time_left)
return 0;
}
-static int orion5x_wdt_open(struct inode *inode, struct file *file)
+static int orion_wdt_open(struct inode *inode, struct file *file)
{
if (test_and_set_bit(WDT_IN_USE, &wdt_status))
return -EBUSY;
clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
- orion5x_wdt_enable();
+ orion_wdt_enable();
return nonseekable_open(inode, file);
}
-static ssize_t orion5x_wdt_write(struct file *file, const char *data,
+static ssize_t orion_wdt_write(struct file *file, const char *data,
size_t len, loff_t *ppos)
{
if (len) {
@@ -133,18 +133,18 @@ static ssize_t orion5x_wdt_write(struct file *file, const char *data,
set_bit(WDT_OK_TO_CLOSE, &wdt_status);
}
}
- orion5x_wdt_ping();
+ orion_wdt_ping();
}
return len;
}
-static int orion5x_wdt_settimeout(int new_time)
+static int orion_wdt_settimeout(int new_time)
{
if ((new_time <= 0) || (new_time > wdt_max_duration))
return -EINVAL;
/* Set new watchdog time to be used when
- * orion5x_wdt_enable() or orion5x_wdt_ping() is called. */
+ * orion_wdt_enable() or orion_wdt_ping() is called. */
heartbeat = new_time;
return 0;
}
@@ -152,10 +152,10 @@ static int orion5x_wdt_settimeout(int new_time)
static const struct watchdog_info ident = {
.options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT |
WDIOF_KEEPALIVEPING,
- .identity = "Orion5x Watchdog",
+ .identity = "Orion Watchdog",
};
-static long orion5x_wdt_ioctl(struct file *file, unsigned int cmd,
+static long orion_wdt_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int ret = -ENOTTY;
@@ -173,7 +173,7 @@ static long orion5x_wdt_ioctl(struct file *file, unsigned int cmd,
break;
case WDIOC_KEEPALIVE:
- orion5x_wdt_ping();
+ orion_wdt_ping();
ret = 0;
break;
@@ -182,11 +182,11 @@ static long orion5x_wdt_ioctl(struct file *file, unsigned int cmd,
if (ret)
break;
- if (orion5x_wdt_settimeout(time)) {
+ if (orion_wdt_settimeout(time)) {
ret = -EINVAL;
break;
}
- orion5x_wdt_ping();
+ orion_wdt_ping();
/* Fall through */
case WDIOC_GETTIMEOUT:
@@ -194,7 +194,7 @@ static long orion5x_wdt_ioctl(struct file *file, unsigned int cmd,
break;
case WDIOC_GETTIMELEFT:
- if (orion5x_wdt_get_timeleft(&time)) {
+ if (orion_wdt_get_timeleft(&time)) {
ret = -EINVAL;
break;
}
@@ -204,10 +204,10 @@ static long orion5x_wdt_ioctl(struct file *file, unsigned int cmd,
return ret;
}
-static int orion5x_wdt_release(struct inode *inode, struct file *file)
+static int orion_wdt_release(struct inode *inode, struct file *file)
{
if (test_bit(WDT_OK_TO_CLOSE, &wdt_status))
- orion5x_wdt_disable();
+ orion_wdt_disable();
else
printk(KERN_CRIT "WATCHDOG: Device closed unexpectedly - "
"timer will not stop\n");
@@ -218,98 +218,98 @@ static int orion5x_wdt_release(struct inode *inode, struct file *file)
}
-static const struct file_operations orion5x_wdt_fops = {
+static const struct file_operations orion_wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
- .write = orion5x_wdt_write,
- .unlocked_ioctl = orion5x_wdt_ioctl,
- .open = orion5x_wdt_open,
- .release = orion5x_wdt_release,
+ .write = orion_wdt_write,
+ .unlocked_ioctl = orion_wdt_ioctl,
+ .open = orion_wdt_open,
+ .release = orion_wdt_release,
};
-static struct miscdevice orion5x_wdt_miscdev = {
+static struct miscdevice orion_wdt_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
- .fops = &orion5x_wdt_fops,
+ .fops = &orion_wdt_fops,
};
-static int __devinit orion5x_wdt_probe(struct platform_device *pdev)
+static int __devinit orion_wdt_probe(struct platform_device *pdev)
{
- struct orion5x_wdt_platform_data *pdata = pdev->dev.platform_data;
+ struct orion_wdt_platform_data *pdata = pdev->dev.platform_data;
int ret;
if (pdata) {
wdt_tclk = pdata->tclk;
} else {
- printk(KERN_ERR "Orion5x Watchdog misses platform data\n");
+ printk(KERN_ERR "Orion Watchdog misses platform data\n");
return -ENODEV;
}
- if (orion5x_wdt_miscdev.parent)
+ if (orion_wdt_miscdev.parent)
return -EBUSY;
- orion5x_wdt_miscdev.parent = &pdev->dev;
+ orion_wdt_miscdev.parent = &pdev->dev;
wdt_max_duration = WDT_MAX_CYCLE_COUNT / wdt_tclk;
- if (orion5x_wdt_settimeout(heartbeat))
+ if (orion_wdt_settimeout(heartbeat))
heartbeat = wdt_max_duration;
- ret = misc_register(&orion5x_wdt_miscdev);
+ ret = misc_register(&orion_wdt_miscdev);
if (ret)
return ret;
- printk(KERN_INFO "Orion5x Watchdog Timer: Initial timeout %d sec%s\n",
+ printk(KERN_INFO "Orion Watchdog Timer: Initial timeout %d sec%s\n",
heartbeat, nowayout ? ", nowayout" : "");
return 0;
}
-static int __devexit orion5x_wdt_remove(struct platform_device *pdev)
+static int __devexit orion_wdt_remove(struct platform_device *pdev)
{
int ret;
if (test_bit(WDT_IN_USE, &wdt_status)) {
- orion5x_wdt_disable();
+ orion_wdt_disable();
clear_bit(WDT_IN_USE, &wdt_status);
}
- ret = misc_deregister(&orion5x_wdt_miscdev);
+ ret = misc_deregister(&orion_wdt_miscdev);
if (!ret)
- orion5x_wdt_miscdev.parent = NULL;
+ orion_wdt_miscdev.parent = NULL;
return ret;
}
-static void orion5x_wdt_shutdown(struct platform_device *pdev)
+static void orion_wdt_shutdown(struct platform_device *pdev)
{
if (test_bit(WDT_IN_USE, &wdt_status))
- orion5x_wdt_disable();
+ orion_wdt_disable();
}
-static struct platform_driver orion5x_wdt_driver = {
- .probe = orion5x_wdt_probe,
- .remove = __devexit_p(orion5x_wdt_remove),
- .shutdown = orion5x_wdt_shutdown,
+static struct platform_driver orion_wdt_driver = {
+ .probe = orion_wdt_probe,
+ .remove = __devexit_p(orion_wdt_remove),
+ .shutdown = orion_wdt_shutdown,
.driver = {
.owner = THIS_MODULE,
- .name = "orion5x_wdt",
+ .name = "orion_wdt",
},
};
-static int __init orion5x_wdt_init(void)
+static int __init orion_wdt_init(void)
{
spin_lock_init(&wdt_lock);
- return platform_driver_register(&orion5x_wdt_driver);
+ return platform_driver_register(&orion_wdt_driver);
}
-static void __exit orion5x_wdt_exit(void)
+static void __exit orion_wdt_exit(void)
{
- platform_driver_unregister(&orion5x_wdt_driver);
+ platform_driver_unregister(&orion_wdt_driver);
}
-module_init(orion5x_wdt_init);
-module_exit(orion5x_wdt_exit);
+module_init(orion_wdt_init);
+module_exit(orion_wdt_exit);
MODULE_AUTHOR("Sylver Bruneau <sylver.bruneau@googlemail.com>");
-MODULE_DESCRIPTION("Orion5x Processor Watchdog");
+MODULE_DESCRIPTION("Orion Processor Watchdog");
module_param(heartbeat, int, 0);
MODULE_PARM_DESC(heartbeat, "Initial watchdog heartbeat in seconds");
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 8ac9cddac57..cab100acf98 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -18,6 +18,16 @@ config XEN_SCRUB_PAGES
secure, but slightly less efficient.
If in doubt, say yes.
+config XEN_DEV_EVTCHN
+ tristate "Xen /dev/xen/evtchn device"
+ depends on XEN
+ default y
+ help
+ The evtchn driver allows a userspace process to triger event
+ channels and to receive notification of an event channel
+ firing.
+ If in doubt, say yes.
+
config XENFS
tristate "Xen filesystem"
depends on XEN
@@ -41,3 +51,13 @@ config XEN_COMPAT_XENFS
a xen platform.
If in doubt, say yes.
+config XEN_SYS_HYPERVISOR
+ bool "Create xen entries under /sys/hypervisor"
+ depends on XEN && SYSFS
+ select SYS_HYPERVISOR
+ default y
+ help
+ Create entries under /sys/hypervisor describing the Xen
+ hypervisor environment. When running native or in another
+ virtual environment, /sys/hypervisor will still be present,
+ but will have no xen contents. \ No newline at end of file
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index ff8accc9e10..ec2a39b1e26 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -4,4 +4,6 @@ obj-y += xenbus/
obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
obj-$(CONFIG_XEN_BALLOON) += balloon.o
-obj-$(CONFIG_XENFS) += xenfs/ \ No newline at end of file
+obj-$(CONFIG_XEN_DEV_EVTCHN) += evtchn.o
+obj-$(CONFIG_XENFS) += xenfs/
+obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o \ No newline at end of file
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 30963af5dba..891d2e90753 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -151,6 +151,12 @@ static unsigned int evtchn_from_irq(unsigned irq)
return info_for_irq(irq)->evtchn;
}
+unsigned irq_from_evtchn(unsigned int evtchn)
+{
+ return evtchn_to_irq[evtchn];
+}
+EXPORT_SYMBOL_GPL(irq_from_evtchn);
+
static enum ipi_vector ipi_from_irq(unsigned irq)
{
struct irq_info *info = info_for_irq(irq);
@@ -335,7 +341,7 @@ static int find_unbound_irq(void)
if (irq == nr_irqs)
panic("No available IRQ to bind to: increase nr_irqs!\n");
- desc = irq_to_desc_alloc_cpu(irq, 0);
+ desc = irq_to_desc_alloc_node(irq, 0);
if (WARN_ON(desc == NULL))
return -1;
@@ -688,13 +694,13 @@ void rebind_evtchn_irq(int evtchn, int irq)
}
/* Rebind an evtchn so that it gets delivered to a specific cpu */
-static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
+static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
{
struct evtchn_bind_vcpu bind_vcpu;
int evtchn = evtchn_from_irq(irq);
if (!VALID_EVTCHN(evtchn))
- return;
+ return -1;
/* Send future instances of this interrupt to other vcpu. */
bind_vcpu.port = evtchn;
@@ -707,13 +713,15 @@ static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
*/
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
bind_evtchn_to_cpu(evtchn, tcpu);
-}
+ return 0;
+}
-static void set_affinity_irq(unsigned irq, const struct cpumask *dest)
+static int set_affinity_irq(unsigned irq, const struct cpumask *dest)
{
unsigned tcpu = cpumask_first(dest);
- rebind_irq_to_cpu(irq, tcpu);
+
+ return rebind_irq_to_cpu(irq, tcpu);
}
int resend_irq_on_evtchn(unsigned int irq)
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
new file mode 100644
index 00000000000..af031950f9b
--- /dev/null
+++ b/drivers/xen/evtchn.c
@@ -0,0 +1,507 @@
+/******************************************************************************
+ * evtchn.c
+ *
+ * Driver for receiving and demuxing event-channel signals.
+ *
+ * Copyright (c) 2004-2005, K A Fraser
+ * Multi-process extensions Copyright (c) 2004, Steven Smith
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/miscdevice.h>
+#include <linux/major.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/poll.h>
+#include <linux/irq.h>
+#include <linux/init.h>
+#include <linux/gfp.h>
+#include <linux/mutex.h>
+#include <linux/cpu.h>
+#include <xen/events.h>
+#include <xen/evtchn.h>
+#include <asm/xen/hypervisor.h>
+
+struct per_user_data {
+ struct mutex bind_mutex; /* serialize bind/unbind operations */
+
+ /* Notification ring, accessed via /dev/xen/evtchn. */
+#define EVTCHN_RING_SIZE (PAGE_SIZE / sizeof(evtchn_port_t))
+#define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
+ evtchn_port_t *ring;
+ unsigned int ring_cons, ring_prod, ring_overflow;
+ struct mutex ring_cons_mutex; /* protect against concurrent readers */
+
+ /* Processes wait on this queue when ring is empty. */
+ wait_queue_head_t evtchn_wait;
+ struct fasync_struct *evtchn_async_queue;
+ const char *name;
+};
+
+/* Who's bound to each port? */
+static struct per_user_data *port_user[NR_EVENT_CHANNELS];
+static DEFINE_SPINLOCK(port_user_lock); /* protects port_user[] and ring_prod */
+
+irqreturn_t evtchn_interrupt(int irq, void *data)
+{
+ unsigned int port = (unsigned long)data;
+ struct per_user_data *u;
+
+ spin_lock(&port_user_lock);
+
+ u = port_user[port];
+
+ disable_irq_nosync(irq);
+
+ if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) {
+ u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port;
+ wmb(); /* Ensure ring contents visible */
+ if (u->ring_cons == u->ring_prod++) {
+ wake_up_interruptible(&u->evtchn_wait);
+ kill_fasync(&u->evtchn_async_queue,
+ SIGIO, POLL_IN);
+ }
+ } else {
+ u->ring_overflow = 1;
+ }
+
+ spin_unlock(&port_user_lock);
+
+ return IRQ_HANDLED;
+}
+
+static ssize_t evtchn_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int rc;
+ unsigned int c, p, bytes1 = 0, bytes2 = 0;
+ struct per_user_data *u = file->private_data;
+
+ /* Whole number of ports. */
+ count &= ~(sizeof(evtchn_port_t)-1);
+
+ if (count == 0)
+ return 0;
+
+ if (count > PAGE_SIZE)
+ count = PAGE_SIZE;
+
+ for (;;) {
+ mutex_lock(&u->ring_cons_mutex);
+
+ rc = -EFBIG;
+ if (u->ring_overflow)
+ goto unlock_out;
+
+ c = u->ring_cons;
+ p = u->ring_prod;
+ if (c != p)
+ break;
+
+ mutex_unlock(&u->ring_cons_mutex);
+
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ rc = wait_event_interruptible(u->evtchn_wait,
+ u->ring_cons != u->ring_prod);
+ if (rc)
+ return rc;
+ }
+
+ /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
+ if (((c ^ p) & EVTCHN_RING_SIZE) != 0) {
+ bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) *
+ sizeof(evtchn_port_t);
+ bytes2 = EVTCHN_RING_MASK(p) * sizeof(evtchn_port_t);
+ } else {
+ bytes1 = (p - c) * sizeof(evtchn_port_t);
+ bytes2 = 0;
+ }
+
+ /* Truncate chunks according to caller's maximum byte count. */
+ if (bytes1 > count) {
+ bytes1 = count;
+ bytes2 = 0;
+ } else if ((bytes1 + bytes2) > count) {
+ bytes2 = count - bytes1;
+ }
+
+ rc = -EFAULT;
+ rmb(); /* Ensure that we see the port before we copy it. */
+ if (copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) ||
+ ((bytes2 != 0) &&
+ copy_to_user(&buf[bytes1], &u->ring[0], bytes2)))
+ goto unlock_out;
+
+ u->ring_cons += (bytes1 + bytes2) / sizeof(evtchn_port_t);
+ rc = bytes1 + bytes2;
+
+ unlock_out:
+ mutex_unlock(&u->ring_cons_mutex);
+ return rc;
+}
+
+static ssize_t evtchn_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int rc, i;
+ evtchn_port_t *kbuf = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
+ struct per_user_data *u = file->private_data;
+
+ if (kbuf == NULL)
+ return -ENOMEM;
+
+ /* Whole number of ports. */
+ count &= ~(sizeof(evtchn_port_t)-1);
+
+ rc = 0;
+ if (count == 0)
+ goto out;
+
+ if (count > PAGE_SIZE)
+ count = PAGE_SIZE;
+
+ rc = -EFAULT;
+ if (copy_from_user(kbuf, buf, count) != 0)
+ goto out;
+
+ spin_lock_irq(&port_user_lock);
+ for (i = 0; i < (count/sizeof(evtchn_port_t)); i++)
+ if ((kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u))
+ enable_irq(irq_from_evtchn(kbuf[i]));
+ spin_unlock_irq(&port_user_lock);
+
+ rc = count;
+
+ out:
+ free_page((unsigned long)kbuf);
+ return rc;
+}
+
+static int evtchn_bind_to_user(struct per_user_data *u, int port)
+{
+ int rc = 0;
+
+ /*
+ * Ports are never reused, so every caller should pass in a
+ * unique port.
+ *
+ * (Locking not necessary because we haven't registered the
+ * interrupt handler yet, and our caller has already
+ * serialized bind operations.)
+ */
+ BUG_ON(port_user[port] != NULL);
+ port_user[port] = u;
+
+ rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED,
+ u->name, (void *)(unsigned long)port);
+ if (rc >= 0)
+ rc = 0;
+
+ return rc;
+}
+
+static void evtchn_unbind_from_user(struct per_user_data *u, int port)
+{
+ int irq = irq_from_evtchn(port);
+
+ unbind_from_irqhandler(irq, (void *)(unsigned long)port);
+
+ /* make sure we unbind the irq handler before clearing the port */
+ barrier();
+
+ port_user[port] = NULL;
+}
+
+static long evtchn_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int rc;
+ struct per_user_data *u = file->private_data;
+ void __user *uarg = (void __user *) arg;
+
+ /* Prevent bind from racing with unbind */
+ mutex_lock(&u->bind_mutex);
+
+ switch (cmd) {
+ case IOCTL_EVTCHN_BIND_VIRQ: {
+ struct ioctl_evtchn_bind_virq bind;
+ struct evtchn_bind_virq bind_virq;
+
+ rc = -EFAULT;
+ if (copy_from_user(&bind, uarg, sizeof(bind)))
+ break;
+
+ bind_virq.virq = bind.virq;
+ bind_virq.vcpu = 0;
+ rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
+ &bind_virq);
+ if (rc != 0)
+ break;
+
+ rc = evtchn_bind_to_user(u, bind_virq.port);
+ if (rc == 0)
+ rc = bind_virq.port;
+ break;
+ }
+
+ case IOCTL_EVTCHN_BIND_INTERDOMAIN: {
+ struct ioctl_evtchn_bind_interdomain bind;
+ struct evtchn_bind_interdomain bind_interdomain;
+
+ rc = -EFAULT;
+ if (copy_from_user(&bind, uarg, sizeof(bind)))
+ break;
+
+ bind_interdomain.remote_dom = bind.remote_domain;
+ bind_interdomain.remote_port = bind.remote_port;
+ rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
+ &bind_interdomain);
+ if (rc != 0)
+ break;
+
+ rc = evtchn_bind_to_user(u, bind_interdomain.local_port);
+ if (rc == 0)
+ rc = bind_interdomain.local_port;
+ break;
+ }
+
+ case IOCTL_EVTCHN_BIND_UNBOUND_PORT: {
+ struct ioctl_evtchn_bind_unbound_port bind;
+ struct evtchn_alloc_unbound alloc_unbound;
+
+ rc = -EFAULT;
+ if (copy_from_user(&bind, uarg, sizeof(bind)))
+ break;
+
+ alloc_unbound.dom = DOMID_SELF;
+ alloc_unbound.remote_dom = bind.remote_domain;
+ rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
+ &alloc_unbound);
+ if (rc != 0)
+ break;
+
+ rc = evtchn_bind_to_user(u, alloc_unbound.port);
+ if (rc == 0)
+ rc = alloc_unbound.port;
+ break;
+ }
+
+ case IOCTL_EVTCHN_UNBIND: {
+ struct ioctl_evtchn_unbind unbind;
+
+ rc = -EFAULT;
+ if (copy_from_user(&unbind, uarg, sizeof(unbind)))
+ break;
+
+ rc = -EINVAL;
+ if (unbind.port >= NR_EVENT_CHANNELS)
+ break;
+
+ spin_lock_irq(&port_user_lock);
+
+ rc = -ENOTCONN;
+ if (port_user[unbind.port] != u) {
+ spin_unlock_irq(&port_user_lock);
+ break;
+ }
+
+ evtchn_unbind_from_user(u, unbind.port);
+
+ spin_unlock_irq(&port_user_lock);
+
+ rc = 0;
+ break;
+ }
+
+ case IOCTL_EVTCHN_NOTIFY: {
+ struct ioctl_evtchn_notify notify;
+
+ rc = -EFAULT;
+ if (copy_from_user(&notify, uarg, sizeof(notify)))
+ break;
+
+ if (notify.port >= NR_EVENT_CHANNELS) {
+ rc = -EINVAL;
+ } else if (port_user[notify.port] != u) {
+ rc = -ENOTCONN;
+ } else {
+ notify_remote_via_evtchn(notify.port);
+ rc = 0;
+ }
+ break;
+ }
+
+ case IOCTL_EVTCHN_RESET: {
+ /* Initialise the ring to empty. Clear errors. */
+ mutex_lock(&u->ring_cons_mutex);
+ spin_lock_irq(&port_user_lock);
+ u->ring_cons = u->ring_prod = u->ring_overflow = 0;
+ spin_unlock_irq(&port_user_lock);
+ mutex_unlock(&u->ring_cons_mutex);
+ rc = 0;
+ break;
+ }
+
+ default:
+ rc = -ENOSYS;
+ break;
+ }
+ mutex_unlock(&u->bind_mutex);
+
+ return rc;
+}
+
+static unsigned int evtchn_poll(struct file *file, poll_table *wait)
+{
+ unsigned int mask = POLLOUT | POLLWRNORM;
+ struct per_user_data *u = file->private_data;
+
+ poll_wait(file, &u->evtchn_wait, wait);
+ if (u->ring_cons != u->ring_prod)
+ mask |= POLLIN | POLLRDNORM;
+ if (u->ring_overflow)
+ mask = POLLERR;
+ return mask;
+}
+
+static int evtchn_fasync(int fd, struct file *filp, int on)
+{
+ struct per_user_data *u = filp->private_data;
+ return fasync_helper(fd, filp, on, &u->evtchn_async_queue);
+}
+
+static int evtchn_open(struct inode *inode, struct file *filp)
+{
+ struct per_user_data *u;
+
+ u = kzalloc(sizeof(*u), GFP_KERNEL);
+ if (u == NULL)
+ return -ENOMEM;
+
+ u->name = kasprintf(GFP_KERNEL, "evtchn:%s", current->comm);
+ if (u->name == NULL) {
+ kfree(u);
+ return -ENOMEM;
+ }
+
+ init_waitqueue_head(&u->evtchn_wait);
+
+ u->ring = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
+ if (u->ring == NULL) {
+ kfree(u->name);
+ kfree(u);
+ return -ENOMEM;
+ }
+
+ mutex_init(&u->bind_mutex);
+ mutex_init(&u->ring_cons_mutex);
+
+ filp->private_data = u;
+
+ return 0;
+}
+
+static int evtchn_release(struct inode *inode, struct file *filp)
+{
+ int i;
+ struct per_user_data *u = filp->private_data;
+
+ spin_lock_irq(&port_user_lock);
+
+ free_page((unsigned long)u->ring);
+
+ for (i = 0; i < NR_EVENT_CHANNELS; i++) {
+ if (port_user[i] != u)
+ continue;
+
+ evtchn_unbind_from_user(port_user[i], i);
+ }
+
+ spin_unlock_irq(&port_user_lock);
+
+ kfree(u->name);
+ kfree(u);
+
+ return 0;
+}
+
+static const struct file_operations evtchn_fops = {
+ .owner = THIS_MODULE,
+ .read = evtchn_read,
+ .write = evtchn_write,
+ .unlocked_ioctl = evtchn_ioctl,
+ .poll = evtchn_poll,
+ .fasync = evtchn_fasync,
+ .open = evtchn_open,
+ .release = evtchn_release,
+};
+
+static struct miscdevice evtchn_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "evtchn",
+ .fops = &evtchn_fops,
+};
+static int __init evtchn_init(void)
+{
+ int err;
+
+ if (!xen_domain())
+ return -ENODEV;
+
+ spin_lock_init(&port_user_lock);
+ memset(port_user, 0, sizeof(port_user));
+
+ /* Create '/dev/misc/evtchn'. */
+ err = misc_register(&evtchn_miscdev);
+ if (err != 0) {
+ printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
+ return err;
+ }
+
+ printk(KERN_INFO "Event-channel device installed.\n");
+
+ return 0;
+}
+
+static void __exit evtchn_cleanup(void)
+{
+ misc_deregister(&evtchn_miscdev);
+}
+
+module_init(evtchn_init);
+module_exit(evtchn_cleanup);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 4b5b84837ee..10d03d7931c 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -43,7 +43,7 @@ static int xen_suspend(void *data)
if (err) {
printk(KERN_ERR "xen_suspend: sysdev_suspend failed: %d\n",
err);
- device_power_up(PMSG_RESUME);
+ dpm_resume_noirq(PMSG_RESUME);
return err;
}
@@ -69,7 +69,7 @@ static int xen_suspend(void *data)
}
sysdev_resume();
- device_power_up(PMSG_RESUME);
+ dpm_resume_noirq(PMSG_RESUME);
return 0;
}
@@ -92,19 +92,18 @@ static void do_suspend(void)
}
#endif
- err = device_suspend(PMSG_SUSPEND);
+ err = dpm_suspend_start(PMSG_SUSPEND);
if (err) {
- printk(KERN_ERR "xen suspend: device_suspend %d\n", err);
+ printk(KERN_ERR "xen suspend: dpm_suspend_start %d\n", err);
goto out;
}
- printk("suspending xenbus...\n");
- /* XXX use normal device tree? */
- xenbus_suspend();
+ printk(KERN_DEBUG "suspending xenstore...\n");
+ xs_suspend();
- err = device_power_down(PMSG_SUSPEND);
+ err = dpm_suspend_noirq(PMSG_SUSPEND);
if (err) {
- printk(KERN_ERR "device_power_down failed: %d\n", err);
+ printk(KERN_ERR "dpm_suspend_noirq failed: %d\n", err);
goto resume_devices;
}
@@ -116,14 +115,14 @@ static void do_suspend(void)
if (!cancelled) {
xen_arch_resume();
- xenbus_resume();
+ xs_resume();
} else
- xenbus_suspend_cancel();
+ xs_suspend_cancel();
- device_power_up(PMSG_RESUME);
+ dpm_resume_noirq(PMSG_RESUME);
resume_devices:
- device_resume(PMSG_RESUME);
+ dpm_resume_end(PMSG_RESUME);
/* Make sure timer events get retriggered on all CPUs */
clock_was_set();
diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
new file mode 100644
index 00000000000..88a60e03ccf
--- /dev/null
+++ b/drivers/xen/sys-hypervisor.c
@@ -0,0 +1,445 @@
+/*
+ * copyright (c) 2006 IBM Corporation
+ * Authored by: Mike D. Day <ncmike@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kobject.h>
+
+#include <asm/xen/hypervisor.h>
+#include <asm/xen/hypercall.h>
+
+#include <xen/xenbus.h>
+#include <xen/interface/xen.h>
+#include <xen/interface/version.h>
+
+#define HYPERVISOR_ATTR_RO(_name) \
+static struct hyp_sysfs_attr _name##_attr = __ATTR_RO(_name)
+
+#define HYPERVISOR_ATTR_RW(_name) \
+static struct hyp_sysfs_attr _name##_attr = \
+ __ATTR(_name, 0644, _name##_show, _name##_store)
+
+struct hyp_sysfs_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct hyp_sysfs_attr *, char *);
+ ssize_t (*store)(struct hyp_sysfs_attr *, const char *, size_t);
+ void *hyp_attr_data;
+};
+
+static ssize_t type_show(struct hyp_sysfs_attr *attr, char *buffer)
+{
+ return sprintf(buffer, "xen\n");
+}
+
+HYPERVISOR_ATTR_RO(type);
+
+static int __init xen_sysfs_type_init(void)
+{
+ return sysfs_create_file(hypervisor_kobj, &type_attr.attr);
+}
+
+static void xen_sysfs_type_destroy(void)
+{
+ sysfs_remove_file(hypervisor_kobj, &type_attr.attr);
+}
+
+/* xen version attributes */
+static ssize_t major_show(struct hyp_sysfs_attr *attr, char *buffer)
+{
+ int version = HYPERVISOR_xen_version(XENVER_version, NULL);
+ if (version)
+ return sprintf(buffer, "%d\n", version >> 16);
+ return -ENODEV;
+}
+
+HYPERVISOR_ATTR_RO(major);
+
+static ssize_t minor_show(struct hyp_sysfs_attr *attr, char *buffer)
+{
+ int version = HYPERVISOR_xen_version(XENVER_version, NULL);
+ if (version)
+ return sprintf(buffer, "%d\n", version & 0xff);
+ return -ENODEV;
+}
+
+HYPERVISOR_ATTR_RO(minor);
+
+static ssize_t extra_show(struct hyp_sysfs_attr *attr, char *buffer)
+{
+ int ret = -ENOMEM;
+ char *extra;
+
+ extra = kmalloc(XEN_EXTRAVERSION_LEN, GFP_KERNEL);
+ if (extra) {
+ ret = HYPERVISOR_xen_version(XENVER_extraversion, extra);
+ if (!ret)
+ ret = sprintf(buffer, "%s\n", extra);
+ kfree(extra);
+ }
+
+ return ret;
+}
+
+HYPERVISOR_ATTR_RO(extra);
+
+static struct attribute *version_attrs[] = {
+ &major_attr.attr,
+ &minor_attr.attr,
+ &extra_attr.attr,
+ NULL
+};
+
+static struct attribute_group version_group = {
+ .name = "version",
+ .attrs = version_attrs,
+};
+
+static int __init xen_sysfs_version_init(void)
+{
+ return sysfs_create_group(hypervisor_kobj, &version_group);
+}
+
+static void xen_sysfs_version_destroy(void)
+{
+ sysfs_remove_group(hypervisor_kobj, &version_group);
+}
+
+/* UUID */
+
+static ssize_t uuid_show(struct hyp_sysfs_attr *attr, char *buffer)
+{
+ char *vm, *val;
+ int ret;
+ extern int xenstored_ready;
+
+ if (!xenstored_ready)
+ return -EBUSY;
+
+ vm = xenbus_read(XBT_NIL, "vm", "", NULL);
+ if (IS_ERR(vm))
+ return PTR_ERR(vm);
+ val = xenbus_read(XBT_NIL, vm, "uuid", NULL);
+ kfree(vm);
+ if (IS_ERR(val))
+ return PTR_ERR(val);
+ ret = sprintf(buffer, "%s\n", val);
+ kfree(val);
+ return ret;
+}
+
+HYPERVISOR_ATTR_RO(uuid);
+
+static int __init xen_sysfs_uuid_init(void)
+{
+ return sysfs_create_file(hypervisor_kobj, &uuid_attr.attr);
+}
+
+static void xen_sysfs_uuid_destroy(void)
+{
+ sysfs_remove_file(hypervisor_kobj, &uuid_attr.attr);
+}
+
+/* xen compilation attributes */
+
+static ssize_t compiler_show(struct hyp_sysfs_attr *attr, char *buffer)
+{
+ int ret = -ENOMEM;
+ struct xen_compile_info *info;
+
+ info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL);
+ if (info) {
+ ret = HYPERVISOR_xen_version(XENVER_compile_info, info);
+ if (!ret)
+ ret = sprintf(buffer, "%s\n", info->compiler);
+ kfree(info);
+ }
+
+ return ret;
+}
+
+HYPERVISOR_ATTR_RO(compiler);
+
+static ssize_t compiled_by_show(struct hyp_sysfs_attr *attr, char *buffer)
+{
+ int ret = -ENOMEM;
+ struct xen_compile_info *info;
+
+ info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL);
+ if (info) {
+ ret = HYPERVISOR_xen_version(XENVER_compile_info, info);
+ if (!ret)
+ ret = sprintf(buffer, "%s\n", info->compile_by);
+ kfree(info);
+ }
+
+ return ret;
+}
+
+HYPERVISOR_ATTR_RO(compiled_by);
+
+static ssize_t compile_date_show(struct hyp_sysfs_attr *attr, char *buffer)
+{
+ int ret = -ENOMEM;
+ struct xen_compile_info *info;
+
+ info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL);
+ if (info) {
+ ret = HYPERVISOR_xen_version(XENVER_compile_info, info);
+ if (!ret)
+ ret = sprintf(buffer, "%s\n", info->compile_date);
+ kfree(info);
+ }
+
+ return ret;
+}
+
+HYPERVISOR_ATTR_RO(compile_date);
+
+static struct attribute *xen_compile_attrs[] = {
+ &compiler_attr.attr,
+ &compiled_by_attr.attr,
+ &compile_date_attr.attr,
+ NULL
+};
+
+static struct attribute_group xen_compilation_group = {
+ .name = "compilation",
+ .attrs = xen_compile_attrs,
+};
+
+int __init static xen_compilation_init(void)
+{
+ return sysfs_create_group(hypervisor_kobj, &xen_compilation_group);
+}
+
+static void xen_compilation_destroy(void)
+{
+ sysfs_remove_group(hypervisor_kobj, &xen_compilation_group);
+}
+
+/* xen properties info */
+
+static ssize_t capabilities_show(struct hyp_sysfs_attr *attr, char *buffer)
+{
+ int ret = -ENOMEM;
+ char *caps;
+
+ caps = kmalloc(XEN_CAPABILITIES_INFO_LEN, GFP_KERNEL);
+ if (caps) {
+ ret = HYPERVISOR_xen_version(XENVER_capabilities, caps);
+ if (!ret)
+ ret = sprintf(buffer, "%s\n", caps);
+ kfree(caps);
+ }
+
+ return ret;
+}
+
+HYPERVISOR_ATTR_RO(capabilities);
+
+static ssize_t changeset_show(struct hyp_sysfs_attr *attr, char *buffer)
+{
+ int ret = -ENOMEM;
+ char *cset;
+
+ cset = kmalloc(XEN_CHANGESET_INFO_LEN, GFP_KERNEL);
+ if (cset) {
+ ret = HYPERVISOR_xen_version(XENVER_changeset, cset);
+ if (!ret)
+ ret = sprintf(buffer, "%s\n", cset);
+ kfree(cset);
+ }
+
+ return ret;
+}
+
+HYPERVISOR_ATTR_RO(changeset);
+
+static ssize_t virtual_start_show(struct hyp_sysfs_attr *attr, char *buffer)
+{
+ int ret = -ENOMEM;
+ struct xen_platform_parameters *parms;
+
+ parms = kmalloc(sizeof(struct xen_platform_parameters), GFP_KERNEL);
+ if (parms) {
+ ret = HYPERVISOR_xen_version(XENVER_platform_parameters,
+ parms);
+ if (!ret)
+ ret = sprintf(buffer, "%lx\n", parms->virt_start);
+ kfree(parms);
+ }
+
+ return ret;
+}
+
+HYPERVISOR_ATTR_RO(virtual_start);
+
+static ssize_t pagesize_show(struct hyp_sysfs_attr *attr, char *buffer)
+{
+ int ret;
+
+ ret = HYPERVISOR_xen_version(XENVER_pagesize, NULL);
+ if (ret > 0)
+ ret = sprintf(buffer, "%x\n", ret);
+
+ return ret;
+}
+
+HYPERVISOR_ATTR_RO(pagesize);
+
+static ssize_t xen_feature_show(int index, char *buffer)
+{
+ ssize_t ret;
+ struct xen_feature_info info;
+
+ info.submap_idx = index;
+ ret = HYPERVISOR_xen_version(XENVER_get_features, &info);
+ if (!ret)
+ ret = sprintf(buffer, "%08x", info.submap);
+
+ return ret;
+}
+
+static ssize_t features_show(struct hyp_sysfs_attr *attr, char *buffer)
+{
+ ssize_t len;
+ int i;
+
+ len = 0;
+ for (i = XENFEAT_NR_SUBMAPS-1; i >= 0; i--) {
+ int ret = xen_feature_show(i, buffer + len);
+ if (ret < 0) {
+ if (len == 0)
+ len = ret;
+ break;
+ }
+ len += ret;
+ }
+ if (len > 0)
+ buffer[len++] = '\n';
+
+ return len;
+}
+
+HYPERVISOR_ATTR_RO(features);
+
+static struct attribute *xen_properties_attrs[] = {
+ &capabilities_attr.attr,
+ &changeset_attr.attr,
+ &virtual_start_attr.attr,
+ &pagesize_attr.attr,
+ &features_attr.attr,
+ NULL
+};
+
+static struct attribute_group xen_properties_group = {
+ .name = "properties",
+ .attrs = xen_properties_attrs,
+};
+
+static int __init xen_properties_init(void)
+{
+ return sysfs_create_group(hypervisor_kobj, &xen_properties_group);
+}
+
+static void xen_properties_destroy(void)
+{
+ sysfs_remove_group(hypervisor_kobj, &xen_properties_group);
+}
+
+static int __init hyper_sysfs_init(void)
+{
+ int ret;
+
+ if (!xen_domain())
+ return -ENODEV;
+
+ ret = xen_sysfs_type_init();
+ if (ret)
+ goto out;
+ ret = xen_sysfs_version_init();
+ if (ret)
+ goto version_out;
+ ret = xen_compilation_init();
+ if (ret)
+ goto comp_out;
+ ret = xen_sysfs_uuid_init();
+ if (ret)
+ goto uuid_out;
+ ret = xen_properties_init();
+ if (ret)
+ goto prop_out;
+
+ goto out;
+
+prop_out:
+ xen_sysfs_uuid_destroy();
+uuid_out:
+ xen_compilation_destroy();
+comp_out:
+ xen_sysfs_version_destroy();
+version_out:
+ xen_sysfs_type_destroy();
+out:
+ return ret;
+}
+
+static void __exit hyper_sysfs_exit(void)
+{
+ xen_properties_destroy();
+ xen_compilation_destroy();
+ xen_sysfs_uuid_destroy();
+ xen_sysfs_version_destroy();
+ xen_sysfs_type_destroy();
+
+}
+module_init(hyper_sysfs_init);
+module_exit(hyper_sysfs_exit);
+
+static ssize_t hyp_sysfs_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buffer)
+{
+ struct hyp_sysfs_attr *hyp_attr;
+ hyp_attr = container_of(attr, struct hyp_sysfs_attr, attr);
+ if (hyp_attr->show)
+ return hyp_attr->show(hyp_attr, buffer);
+ return 0;
+}
+
+static ssize_t hyp_sysfs_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buffer,
+ size_t len)
+{
+ struct hyp_sysfs_attr *hyp_attr;
+ hyp_attr = container_of(attr, struct hyp_sysfs_attr, attr);
+ if (hyp_attr->store)
+ return hyp_attr->store(hyp_attr, buffer, len);
+ return 0;
+}
+
+static struct sysfs_ops hyp_sysfs_ops = {
+ .show = hyp_sysfs_show,
+ .store = hyp_sysfs_store,
+};
+
+static struct kobj_type hyp_sysfs_kobj_type = {
+ .sysfs_ops = &hyp_sysfs_ops,
+};
+
+static int __init hypervisor_subsys_init(void)
+{
+ if (!xen_domain())
+ return -ENODEV;
+
+ hypervisor_kobj->ktype = &hyp_sysfs_kobj_type;
+ return 0;
+}
+device_initcall(hypervisor_subsys_init);
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 773d1cf2328..d42e25d5968 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -71,6 +71,9 @@ static int xenbus_probe_frontend(const char *type, const char *name);
static void xenbus_dev_shutdown(struct device *_dev);
+static int xenbus_dev_suspend(struct device *dev, pm_message_t state);
+static int xenbus_dev_resume(struct device *dev);
+
/* If something in array of ids matches this device, return it. */
static const struct xenbus_device_id *
match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev)
@@ -188,6 +191,9 @@ static struct xen_bus_type xenbus_frontend = {
.remove = xenbus_dev_remove,
.shutdown = xenbus_dev_shutdown,
.dev_attrs = xenbus_dev_attrs,
+
+ .suspend = xenbus_dev_suspend,
+ .resume = xenbus_dev_resume,
},
};
@@ -654,6 +660,7 @@ void xenbus_dev_changed(const char *node, struct xen_bus_type *bus)
kfree(root);
}
+EXPORT_SYMBOL_GPL(xenbus_dev_changed);
static void frontend_changed(struct xenbus_watch *watch,
const char **vec, unsigned int len)
@@ -669,7 +676,7 @@ static struct xenbus_watch fe_watch = {
.callback = frontend_changed,
};
-static int suspend_dev(struct device *dev, void *data)
+static int xenbus_dev_suspend(struct device *dev, pm_message_t state)
{
int err = 0;
struct xenbus_driver *drv;
@@ -682,35 +689,14 @@ static int suspend_dev(struct device *dev, void *data)
drv = to_xenbus_driver(dev->driver);
xdev = container_of(dev, struct xenbus_device, dev);
if (drv->suspend)
- err = drv->suspend(xdev);
+ err = drv->suspend(xdev, state);
if (err)
printk(KERN_WARNING
"xenbus: suspend %s failed: %i\n", dev_name(dev), err);
return 0;
}
-static int suspend_cancel_dev(struct device *dev, void *data)
-{
- int err = 0;
- struct xenbus_driver *drv;
- struct xenbus_device *xdev;
-
- DPRINTK("");
-
- if (dev->driver == NULL)
- return 0;
- drv = to_xenbus_driver(dev->driver);
- xdev = container_of(dev, struct xenbus_device, dev);
- if (drv->suspend_cancel)
- err = drv->suspend_cancel(xdev);
- if (err)
- printk(KERN_WARNING
- "xenbus: suspend_cancel %s failed: %i\n",
- dev_name(dev), err);
- return 0;
-}
-
-static int resume_dev(struct device *dev, void *data)
+static int xenbus_dev_resume(struct device *dev)
{
int err;
struct xenbus_driver *drv;
@@ -755,33 +741,6 @@ static int resume_dev(struct device *dev, void *data)
return 0;
}
-void xenbus_suspend(void)
-{
- DPRINTK("");
-
- bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev);
- xenbus_backend_suspend(suspend_dev);
- xs_suspend();
-}
-EXPORT_SYMBOL_GPL(xenbus_suspend);
-
-void xenbus_resume(void)
-{
- xb_init_comms();
- xs_resume();
- bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev);
- xenbus_backend_resume(resume_dev);
-}
-EXPORT_SYMBOL_GPL(xenbus_resume);
-
-void xenbus_suspend_cancel(void)
-{
- xs_suspend_cancel();
- bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev);
- xenbus_backend_resume(suspend_cancel_dev);
-}
-EXPORT_SYMBOL_GPL(xenbus_suspend_cancel);
-
/* A flag to determine if xenstored is 'ready' (i.e. has started) */
int xenstored_ready = 0;
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index e325eab4724..eab33f1dbdf 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -673,6 +673,8 @@ void xs_resume(void)
struct xenbus_watch *watch;
char token[sizeof(watch) * 2 + 1];
+ xb_init_comms();
+
mutex_unlock(&xs_state.response_mutex);
mutex_unlock(&xs_state.request_mutex);
up_write(&xs_state.transaction_mutex);
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c
index 515741a8e6b..6559e0c752c 100644
--- a/drivers/xen/xenfs/super.c
+++ b/drivers/xen/xenfs/super.c
@@ -20,10 +20,27 @@
MODULE_DESCRIPTION("Xen filesystem");
MODULE_LICENSE("GPL");
+static ssize_t capabilities_read(struct file *file, char __user *buf,
+ size_t size, loff_t *off)
+{
+ char *tmp = "";
+
+ if (xen_initial_domain())
+ tmp = "control_d\n";
+
+ return simple_read_from_buffer(buf, size, off, tmp, strlen(tmp));
+}
+
+static const struct file_operations capabilities_file_ops = {
+ .read = capabilities_read,
+};
+
static int xenfs_fill_super(struct super_block *sb, void *data, int silent)
{
static struct tree_descr xenfs_files[] = {
- [2] = {"xenbus", &xenbus_file_ops, S_IRUSR|S_IWUSR},
+ [1] = {},
+ { "xenbus", &xenbus_file_ops, S_IRUSR|S_IWUSR },
+ { "capabilities", &capabilities_file_ops, S_IRUGO },
{""},
};